replace common qcom sources with samsung ones

This commit is contained in:
SaschaNes
2025-08-12 22:13:00 +02:00
parent ba24dcded9
commit 6f7753de11
5682 changed files with 2450203 additions and 103634 deletions

View File

@@ -5,7 +5,6 @@ ifneq ($(TARGET_BOARD_PLATFORM),qssi)
RMNET_APS_DLKM_PLATFORMS_LIST := pineapple
RMNET_APS_DLKM_PLATFORMS_LIST += sun
RMNET_APS_DLKM_PLATFORMS_LIST += parrot
RMNET_APS_DLKM_PLATFORMS_LIST += tuna
ifeq ($(call is-board-platform-in-list, $(RMNET_APS_DLKM_PLATFORMS_LIST)),true)
LOCAL_PATH := $(call my-dir)

View File

@@ -9,6 +9,3 @@ define_aps("sun", "perf")
define_aps("parrot", "consolidate")
define_aps("parrot", "perf")
define_aps("tuna", "consolidate")
define_aps("tuna", "perf")

View File

@@ -13,25 +13,20 @@ ifeq ($(TARGET_DATARMNET_EXT_ENABLE), true)
DATA_OFFLOAD_DLKM_BOARD_PLATFORMS_LIST := pineapple
DATA_OFFLOAD_DLKM_BOARD_PLATFORMS_LIST += sun
DATA_OFFLOAD_DLKM_BOARD_PLATFORMS_LIST += parrot
DATA_OFFLOAD_DLKM_BOARD_PLATFORMS_LIST += tuna
DATA_SHS_DLKM_BOARD_PLATFORMS_LIST := pineapple
DATA_SHS_DLKM_BOARD_PLATFORMS_LIST += sun
DATA_SHS_DLKM_BOARD_PLATFORMS_LIST += parrot
DATA_SHS_DLKM_BOARD_PLATFORMS_LIST += tuna
DATA_APS_DLKM_BOARD_PLATFORMS_LIST := pineapple
DATA_APS_DLKM_BOARD_PLATFORMS_LIST += sun
DATA_APS_DLKM_BOARD_PLATFORMS_LIST += parrot
DATA_APS_DLKM_BOARD_PLATFORMS_LIST += tuna
DATA_WLAN_DLKM_BOARD_PLATFORMS_LIST := pineapple
DATA_WLAN_DLKM_BOARD_PLATFORMS_LIST += sun
DATA_WLAN_DLKM_BOARD_PLATFORMS_LIST += parrot
DATA_WLAN_DLKM_BOARD_PLATFORMS_LIST += monaco
DATA_WLAN_DLKM_BOARD_PLATFORMS_LIST += tuna
DATA_MEM_DLKM_BOARD_PLATFORMS_LIST := pineapple
DATA_MEM_DLKM_BOARD_PLATFORMS_LIST += sun
DATA_MEM_DLKM_BOARD_PLATFORMS_LIST += parrot
DATA_MEM_DLKM_BOARD_PLATFORMS_LIST += monaco
DATA_MEM_DLKM_BOARD_PLATFORMS_LIST += tuna
ifneq ($(TARGET_BOARD_AUTO),true)
ifeq ($(call is-board-platform-in-list,$(DATA_OFFLOAD_DLKM_BOARD_PLATFORMS_LIST)),true)

View File

@@ -6,8 +6,6 @@ RMNET_MEM_DLKM_PLATFORMS_LIST := pineapple
RMNET_MEM_DLKM_PLATFORMS_LIST += sun
RMNET_MEM_DLKM_PLATFORMS_LIST += parrot
RMNET_MEM_DLKM_PLATFORMS_LIST += monaco
RMNET_MEM_DLKM_PLATFORMS_LIST += tuna
ifeq ($(call is-board-platform-in-list, $(RMNET_MEM_DLKM_PLATFORMS_LIST)),true)
LOCAL_PATH := $(call my-dir)
include $(CLEAR_VARS)

View File

@@ -13,9 +13,6 @@ define_mem("parrot", "perf")
define_mem("monaco", "consolidate")
define_mem("monaco", "perf")
define_mem("tuna", "consolidate")
define_mem("tuna", "perf")
package(
default_visibility = [
"//visibility:public", ],

View File

@@ -1,4 +1,4 @@
/* Copyright (c) 2023-2025 Qualcomm Innovation Center, Inc. All rights reserved.
/* Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
* SPDX-License-Identifier: GPL-2.0-only
*/
@@ -10,16 +10,19 @@
#define DATARMNET19337c1bbf (0xdb7+6670-0x2634)
extern struct delayed_work pool_adjust_work;extern struct workqueue_struct*
mem_wq;int DATARMNET291f036d31(struct sk_buff*skb,struct genl_info*
DATARMNET54338da2ff){u8 mode=(0xd2d+202-0xdf7);struct DATARMNET5d6175c98d
mem_info;struct nlattr*na;if(DATARMNET54338da2ff->attrs[DATARMNETe5184c7a76]){na
=DATARMNET54338da2ff->attrs[DATARMNETe5184c7a76];if(nla_memcpy(&mem_info,na,
sizeof(mem_info))>(0xd2d+202-0xdf7)){rm_err("%s(): modeinfo %u\n",__func__,
mem_info.DATARMNET3a4d9ad400);}rm_err(
DATARMNET54338da2ff){u8 mode=(0xd2d+202-0xdf7);struct sk_buff*
DATARMNETa13fcf9070;struct DATARMNET5d6175c98d mem_info;struct nlattr*na;if(
DATARMNET54338da2ff->attrs[DATARMNETe5184c7a76]){na=DATARMNET54338da2ff->attrs[
DATARMNETe5184c7a76];if(nla_memcpy(&mem_info,na,sizeof(mem_info))>
(0xd2d+202-0xdf7)){rm_err("%s(): modeinfo %u\n",__func__,mem_info.
DATARMNET3a4d9ad400);}rm_err(
"\x25\x73\x28\x29\x3a\x20\x6d\x6f\x64\x65\x20\x25\x75" "\n",__func__,mode);
DATARMNETe85d734d4f(DATARMNETa967925c7a,DATARMNET54338da2ff);}else{
DATARMNETe85d734d4f(DATARMNET19337c1bbf,DATARMNET54338da2ff);}return
(0xd2d+202-0xdf7);}int DATARMNET8e48a951e4(struct sk_buff*skb,struct genl_info*
DATARMNET54338da2ff){struct DATARMNET5d23779a8f mem_info;struct nlattr*na;int i;
DATARMNETa13fcf9070=nlmsg_new(NLMSG_DEFAULT_SIZE,GFP_KERNEL);if(!
DATARMNETa13fcf9070)return-ENOMEM;DATARMNETe85d734d4f(DATARMNETa967925c7a,
DATARMNET54338da2ff);}else{DATARMNETe85d734d4f(DATARMNET19337c1bbf,
DATARMNET54338da2ff);}return(0xd2d+202-0xdf7);}int DATARMNET8e48a951e4(struct
sk_buff*skb,struct genl_info*DATARMNET54338da2ff){struct sk_buff*
DATARMNETa13fcf9070;struct DATARMNET5d23779a8f mem_info;struct nlattr*na;int i;
unsigned long DATARMNET28085cfd14;u8 DATARMNET205e85dea0=(0xd2d+202-0xdf7);u8
DATARMNET4f9cb7ce34=(0xd2d+202-0xdf7);DATARMNETa293261aea[DATARMNET95fc2e84cc]++
;if(DATARMNET54338da2ff->attrs[DATARMNETb0428b7575]){na=DATARMNET54338da2ff->
@@ -38,14 +41,16 @@ DATARMNETe87b937bb6[(0xd1f+216-0xdf5)],mem_info.DATARMNETe87b937bb6[
(0xd18+223-0xdf4)]);if(DATARMNET205e85dea0&&mem_wq){DATARMNET28085cfd14=
msecs_to_jiffies(DATARMNET675090896c);cancel_delayed_work_sync(&pool_adjust_work
);queue_delayed_work(mem_wq,&pool_adjust_work,(DATARMNET4f9cb7ce34)?
(0xd2d+202-0xdf7):DATARMNET28085cfd14);}DATARMNETe85d734d4f(DATARMNETa967925c7a,
DATARMNET54338da2ff);}else{DATARMNETe85d734d4f(DATARMNET19337c1bbf,
DATARMNET54338da2ff);}return(0xd2d+202-0xdf7);}int DATARMNET803d42739e(struct
sk_buff*skb,struct genl_info*DATARMNET54338da2ff){struct DATARMNET5d23779a8f
mem_info;struct nlattr*na;int i;DATARMNETa293261aea[DATARMNETe581523c0b]++;if(
DATARMNET54338da2ff->attrs[DATARMNETb0428b7575]){na=DATARMNET54338da2ff->attrs[
DATARMNETb0428b7575];if(nla_memcpy(&mem_info,na,sizeof(mem_info))>
(0xd2d+202-0xdf7)){rm_err(
(0xd2d+202-0xdf7):DATARMNET28085cfd14);}DATARMNETa13fcf9070=nlmsg_new(
NLMSG_DEFAULT_SIZE,GFP_KERNEL);if(!DATARMNETa13fcf9070)return-ENOMEM;
DATARMNETe85d734d4f(DATARMNETa967925c7a,DATARMNET54338da2ff);}else{
DATARMNETe85d734d4f(DATARMNET19337c1bbf,DATARMNET54338da2ff);}return
(0xd2d+202-0xdf7);}int DATARMNET803d42739e(struct sk_buff*skb,struct genl_info*
DATARMNET54338da2ff){struct sk_buff*DATARMNETa13fcf9070;struct
DATARMNET5d23779a8f mem_info;struct nlattr*na;int i;DATARMNETa293261aea[
DATARMNETe581523c0b]++;if(DATARMNET54338da2ff->attrs[DATARMNETb0428b7575]){na=
DATARMNET54338da2ff->attrs[DATARMNETb0428b7575];if(nla_memcpy(&mem_info,na,
sizeof(mem_info))>(0xd2d+202-0xdf7)){rm_err(
"\x25\x73\x28\x29\x3a\x20\x6d\x6f\x64\x65\x69\x6e\x66\x6f\x20\x25\x75" "\n",
__func__,mem_info.DATARMNET855b934a37);}rm_err(
"\x25\x73\x28\x29\x3a\x20\x70\x62\x69\x6e\x64\x20\x70\x6f\x6f\x6c\x5f\x73\x69\x7a\x65\x20\x25\x75" "\n"
@@ -53,6 +58,7 @@ __func__,mem_info.DATARMNET855b934a37);}rm_err(
(0xd2d+202-0xdf7);i<POOL_LEN;i++){if(mem_info.DATARMNET855b934a37&
(0xd26+209-0xdf6)<<i){if(mem_info.DATARMNETe87b937bb6[i]>(0xd2d+202-0xdf7)&&
mem_info.DATARMNETe87b937bb6[i]<=MAX_STATIC_POOL)DATARMNETf85ebffa7a[i]=mem_info
.DATARMNETe87b937bb6[i];}}DATARMNETe85d734d4f(DATARMNETa967925c7a,
DATARMNET54338da2ff);}else{DATARMNETe85d734d4f(DATARMNET19337c1bbf,
DATARMNET54338da2ff);}return(0xd2d+202-0xdf7);}
.DATARMNETe87b937bb6[i];}}DATARMNETa13fcf9070=nlmsg_new(NLMSG_DEFAULT_SIZE,
GFP_KERNEL);if(!DATARMNETa13fcf9070)return-ENOMEM;DATARMNETe85d734d4f(
DATARMNETa967925c7a,DATARMNET54338da2ff);}else{DATARMNETe85d734d4f(
DATARMNET19337c1bbf,DATARMNET54338da2ff);}return(0xd2d+202-0xdf7);}

View File

@@ -5,7 +5,6 @@ ifneq ($(TARGET_BOARD_PLATFORM),qssi)
RMNET_OFFLOAD_DLKM_PLATFORMS_LIST := pineapple
RMNET_OFFLOAD_DLKM_PLATFORMS_LIST += sun
RMNET_OFFLOAD_DLKM_PLATFORMS_LIST += parrot
RMNET_OFFLOAD_DLKM_PLATFORMS_LIST += tuna
ifeq ($(call is-board-platform-in-list, $(RMNET_OFFLOAD_DLKM_PLATFORMS_LIST)),true)
LOCAL_PATH := $(call my-dir)

View File

@@ -9,6 +9,3 @@ define_offload("sun", "perf")
define_offload("parrot", "consolidate")
define_offload("parrot", "perf")
define_offload("tuna", "consolidate")
define_offload("tuna", "perf")

View File

@@ -1,5 +1,5 @@
/* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -56,8 +56,8 @@ DATARMNET5fe4c722a8->DATARMNET144d119066.DATARMNET388842c721==(0xd03+244-0xdf1))
{__be32 DATARMNET25ffacbec5,DATARMNETb351dd927f;__be32 DATARMNET9422f16113;
DATARMNET25ffacbec5=DATARMNETaa568481cf->DATARMNET78fd20ce0e.DATARMNETabd58f7f89
;DATARMNETb351dd927f=DATARMNET5fe4c722a8->DATARMNET144d119066.
DATARMNETabd58f7f89;DATARMNET9422f16113=DATARMNET25ffacbec5^DATARMNETb351dd927f;
if(DATARMNET9422f16113&htonl(267386880))return true;}return false;}static bool
DATARMNETabd58f7f89;;DATARMNET9422f16113=DATARMNET25ffacbec5^DATARMNETb351dd927f
;if(DATARMNET9422f16113&htonl(267386880))return true;}return false;}static bool
DATARMNET6895620058(struct DATARMNETd7c9631acd*DATARMNETaa568481cf,struct
DATARMNETd812bcdbb5*DATARMNET5fe4c722a8){struct DATARMNET4287f07234*
DATARMNET699c2c62cd,*DATARMNET8814564ab9;DATARMNET699c2c62cd=&

View File

@@ -23,10 +23,10 @@
#include "rmnet_offload_state.h"
#include "rmnet_offload_engine.h"
#include "rmnet_offload_stats.h"
static char*verinfo[]={"\x37\x39\x37\x32\x32\x35\x34\x63",
"\x33\x36\x66\x30\x64\x38\x62\x31","\x35\x38\x61\x61\x39\x62\x65\x65",
"\x63\x38\x61\x63\x61\x66\x38\x35","\x65\x32\x31\x38\x66\x34\x35\x31",
"\x32\x61\x34\x34\x66\x36\x62\x65","\x37\x34\x31\x35\x39\x32\x31\x63"};
static char*verinfo[]={"\x33\x36\x66\x30\x64\x38\x62\x31",
"\x35\x38\x61\x61\x39\x62\x65\x65","\x63\x38\x61\x63\x61\x66\x38\x35",
"\x65\x32\x31\x38\x66\x34\x35\x31","\x32\x61\x34\x34\x66\x36\x62\x65",
"\x37\x34\x31\x35\x39\x32\x31\x63","\x34\x31\x32\x39\x36\x62\x30\x39"};
module_param_array(verinfo,charp,NULL,(0xcb7+5769-0x221c));MODULE_PARM_DESC(
verinfo,
"\x56\x65\x72\x73\x69\x6f\x6e\x20\x6f\x66\x20\x74\x68\x65\x20\x64\x72\x69\x76\x65\x72"

View File

@@ -5,7 +5,6 @@ ifneq ($(TARGET_BOARD_PLATFORM),qssi)
RMNET_PERF_DLKM_PLATFORMS_LIST := pineapple
RMNET_PERF_DLKM_PLATFORMS_LIST += sun
RMNET_PERF_DLKM_PLATFORMS_LIST += parrot
RMNET_PERF_DLKM_PLATFORMS_LIST += tuna
ifeq ($(call is-board-platform-in-list, $(RMNET_PERF_DLKM_PLATFORMS_LIST)),true)
LOCAL_PATH := $(call my-dir)

View File

@@ -9,6 +9,3 @@ define_perf("sun", "perf")
define_perf("parrot", "consolidate")
define_perf("parrot", "perf")
define_perf("tuna", "consolidate")
define_perf("tuna", "perf")

View File

@@ -5,7 +5,6 @@ ifneq ($(TARGET_BOARD_PLATFORM),qssi)
RMNET_PERF_TETHER_DLKM_PLATFORMS_LIST := pineapple
RMNET_PERF_TETHER_DLKM_PLATFORMS_LIST += sun
RMNET_PERF_TETHER_DLKM_PLATFORMS_LIST += parrot
RMNET_PERF_TETHER_DLKM_PLATFORMS_LIST += tuna
ifeq ($(call is-board-platform-in-list, $(RMNET_PERF_TETHER_DLKM_PLATFORMS_LIST)),true)
LOCAL_PATH := $(call my-dir)

View File

@@ -9,6 +9,3 @@ define_perf_tether("sun", "perf")
define_perf_tether("parrot", "consolidate")
define_perf_tether("parrot", "perf")
define_perf_tether("tuna", "consolidate")
define_perf_tether("tuna", "perf")

View File

@@ -5,7 +5,6 @@ ifneq ($(TARGET_BOARD_PLATFORM),qssi)
RMNET_SCH_DLKM_PLATFORMS_LIST := pineapple
RMNET_SCH_DLKM_PLATFORMS_LIST += sun
RMNET_SCH_DLKM_PLATFORMS_LIST += parrot
RMNET_SCH_DLKM_PLATFORMS_LIST += tuna
ifeq ($(call is-board-platform-in-list, $(RMNET_SCH_DLKM_PLATFORMS_LIST)),true)
LOCAL_PATH := $(call my-dir)

View File

@@ -9,6 +9,3 @@ define_sch("sun", "perf")
define_sch("parrot", "consolidate")
define_sch("parrot", "perf")
define_sch("tuna", "consolidate")
define_sch("tuna", "perf")

View File

@@ -3,7 +3,6 @@ ifneq ($(TARGET_BOARD_PLATFORM),qssi)
RMNET_SHS_DLKM_PLATFORMS_LIST := pineapple
RMNET_SHS_DLKM_PLATFORMS_LIST += sun
RMNET_SHS_DLKM_PLATFORMS_LIST += parrot
RMNET_SHS_DLKM_PLATFORMS_LIST += tuna
ifeq ($(call is-board-platform-in-list, $(RMNET_SHS_DLKM_PLATFORMS_LIST)),true)
#Make file to create RMNET_SHS DLKM

View File

@@ -8,6 +8,3 @@ define_shs("sun", "perf")
define_shs("parrot", "consolidate")
define_shs("parrot", "perf")
define_shs("tuna", "consolidate")
define_shs("tuna", "perf")

View File

@@ -1,5 +1,5 @@
/* Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -68,7 +68,7 @@ DATARMNET30a3e83974,DATARMNET0763436b8d){if(!ep->DATARMNET4a4e6f66b5)continue;if
(0xd2d+202-0xdf7);DATARMNETecc0627c70.map_len=(0xd2d+202-0xdf7);return;}else if(
DATARMNETecc0627c70.map_mask!=mask){DATARMNETecc0627c70.map_mask=mask;
DATARMNETecc0627c70.map_len=DATARMNET310c3eb16e(mask);pr_info(
"\x72\x6d\x6e\x65\x74\x5f\x73\x68\x73\x3a\x20\x20\x6d\x61\x73\x6b\x3a\x20\x30\x78\x25\x78\x20\x6d\x61\x70\x6c\x65\x6e\x3a\x20\x25\x64" "\n"
"\x72\x6d\x6e\x65\x74\x5f\x73\x68\x73\x3a\x20\x20\x6d\x61\x73\x6b\x3a\x20\x30\x78\x25\x78\x20\x6d\x61\x70\x6c\x65\x6e\x3a\x20\x25\x64"
,DATARMNETecc0627c70.map_mask,DATARMNETecc0627c70.map_len);}}void
DATARMNETde8ee16f92(struct DATARMNET63d7680df2*DATARMNET63b1a086d5){
DATARMNETda96251102(DATARMNETd5eb6398da,DATARMNETe9a79499ac,(0x16e8+787-0xc0c),

View File

@@ -59,7 +59,7 @@ DATARMNETf5157a9b85]++;return NOTIFY_DONE;}if(!(strncmp(dev->name,
DATARMNETd6ee05f1b4(dev);break;case NETDEV_UNREGISTER:DATARMNET2cb9ae589c--;if(!
DATARMNET2cb9ae589c&&DATARMNETecc0627c70.DATARMNETfc89d842ae){unsigned int
DATARMNET9f4bc49c6f;pr_info(
"\x72\x6d\x6e\x65\x74\x5f\x73\x68\x73\x20\x64\x65\x69\x6e\x69\x74\x20\x25\x73\x20\x67\x6f\x69\x6e\x67\x20\x64\x6f\x77\x6e" "\n"
"\x72\x6d\x6e\x65\x74\x5f\x73\x68\x73\x20\x64\x65\x69\x6e\x69\x74\x20\x25\x73\x20\x67\x6f\x69\x6e\x67\x20\x64\x6f\x77\x6e\x20"
,dev->name);DATARMNET203752febd();DATARMNETa871eeb7e7();
rmnet_module_hook_unregister_no_sync(&DATARMNETf6217b20b8,(0xd26+209-0xdf6));
qmi_rmnet_ps_ind_deregister(DATARMNETecc0627c70.port,&DATARMNETecc0627c70.
@@ -73,15 +73,15 @@ DATARMNET9f4bc49c6f);trace_rmnet_shs_high(DATARMNET1790979ccf,
DATARMNET443dab7031,(0x16e8+787-0xc0c),(0x16e8+787-0xc0c),(0x16e8+787-0xc0c),
(0x16e8+787-0xc0c),NULL,NULL);}break;case NETDEV_REGISTER:DATARMNET2cb9ae589c++;
if(DATARMNET2cb9ae589c&&!DATARMNETecc0627c70.DATARMNETfc89d842ae){pr_info(
"\x72\x6d\x6e\x65\x74\x5f\x73\x68\x73\x20\x69\x6e\x69\x74\x69\x61\x6c\x69\x7a\x69\x6e\x67\x20\x25\x73" "\n"
"\x72\x6d\x6e\x65\x74\x5f\x73\x68\x73\x20\x69\x6e\x69\x74\x69\x61\x6c\x69\x7a\x69\x6e\x67\x20\x25\x73"
,dev->name);priv=netdev_priv(dev);port=rmnet_get_port(priv->real_dev);if(!port){
pr_err(
"\x72\x6d\x6e\x65\x74\x5f\x73\x68\x73\x3a\x20\x69\x6e\x76\x61\x6c\x69\x64\x20\x72\x6d\x6e\x65\x74\x5f\x70\x6f\x72\x74" "\n"
"\x72\x6d\x6e\x65\x74\x5f\x73\x68\x73\x3a\x20\x69\x6e\x76\x61\x6c\x69\x64\x20\x72\x6d\x6e\x65\x74\x5f\x70\x6f\x72\x74"
);break;}DATARMNET45d8cdb224(priv->real_dev,dev);DATARMNET3ae0d614d6();
DATARMNETe1f95274f1();}break;case NETDEV_UP:if(!DATARMNETecc0627c70.
DATARMNET9c869c1ec2&&DATARMNETecc0627c70.DATARMNETfc89d842ae){port=
DATARMNETecc0627c70.port;if(!port){pr_err(
"\x72\x6d\x6e\x65\x74\x5f\x73\x68\x73\x3a\x20\x69\x6e\x76\x61\x6c\x69\x64\x20\x72\x6d\x6e\x65\x74\x5f\x63\x66\x67\x5f\x70\x6f\x72\x74" "\n"
"\x72\x6d\x6e\x65\x74\x5f\x73\x68\x73\x3a\x20\x69\x6e\x76\x61\x6c\x69\x64\x20\x72\x6d\x6e\x65\x74\x5f\x63\x66\x67\x5f\x70\x6f\x72\x74"
);break;}DATARMNETecc0627c70.DATARMNET08dbb5ab35.priority=RMNET_SHS;
DATARMNETecc0627c70.DATARMNET6b783c98fe.priority=RMNET_SHS;if(port->data_format&
RMNET_INGRESS_FORMAT_DL_MARKER_V2){DATARMNETecc0627c70.DATARMNET08dbb5ab35.

View File

@@ -1,4 +1,4 @@
/* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
/* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -233,41 +233,41 @@ DATARMNET110549da6f--;DATARMNET75ae82094a--;rm_err(
);}void DATARMNET1e918c8e0d(struct DATARMNET0331d6732d*DATARMNET63b1a086d5){
struct DATARMNETbf4d34b241*DATARMNET54338da2ff=&DATARMNET63b1a086d5->
DATARMNET54338da2ff;pr_info(
"\x53\x48\x53\x5f\x4c\x4c\x3a\x20\x70\x72\x6f\x74\x6f\x20\x76\x61\x6c\x69\x64\x20\x25\x75\x20\x73\x72\x63\x20\x61\x64\x64\x72\x20\x76\x61\x6c\x69\x64\x20\x25\x75\x2c\x20\x64\x65\x73\x74\x20\x61\x64\x64\x72\x20\x76\x61\x6c\x69\x64\x20\x25\x75\x2c\x20\x64\x65\x73\x74\x20\x70\x6f\x72\x74\x20\x76\x61\x6c\x69\x64\x20\x25\x75\x2c\x20\x73\x72\x63\x70\x6f\x72\x74\x20\x76\x61\x6c\x69\x64\x20\x25\x75\x2c\x20\x69\x70\x20\x76\x65\x72\x73\x69\x6f\x6e\x20\x25\x75\x20\x73\x65\x71\x20\x25\x75" "\n"
"\x53\x48\x53\x5f\x4c\x4c\x3a\x20\x70\x72\x6f\x74\x6f\x20\x76\x61\x6c\x69\x64\x20\x25\x75\x20\x73\x72\x63\x20\x61\x64\x64\x72\x20\x76\x61\x6c\x69\x64\x20\x25\x75\x2c\x20\x64\x65\x73\x74\x20\x61\x64\x64\x72\x20\x76\x61\x6c\x69\x64\x20\x25\x75\x2c\x20\x64\x65\x73\x74\x20\x70\x6f\x72\x74\x20\x76\x61\x6c\x69\x64\x20\x25\x75\x2c\x20\x73\x72\x63\x70\x6f\x72\x74\x20\x76\x61\x6c\x69\x64\x20\x25\x75\x2c\x20\x69\x70\x20\x76\x65\x72\x73\x69\x6f\x6e\x20\x25\x75\x20\x73\x65\x71\x20\x25\x75"
,DATARMNET54338da2ff->DATARMNET8b5ace4a98,DATARMNET54338da2ff->
DATARMNET1819cae4a3,DATARMNET54338da2ff->DATARMNETb035edcfb9,DATARMNET54338da2ff
->DATARMNET1c959e10ca,DATARMNET54338da2ff->DATARMNET5a5907dd87,
DATARMNET54338da2ff->DATARMNET602389fe52,DATARMNET54338da2ff->seq);pr_info(
"\x53\x48\x53\x5f\x4c\x4c\x3a\x20\x69\x6e\x66\x6f\x2d\x3e\x69\x70\x5f\x76\x65\x72\x73\x69\x6f\x6e\x20\x25\x75" "\n"
"\x53\x48\x53\x5f\x4c\x4c\x3a\x20\x69\x6e\x66\x6f\x2d\x3e\x69\x70\x5f\x76\x65\x72\x73\x69\x6f\x6e\x20\x25\x75"
,DATARMNET54338da2ff->DATARMNET602389fe52);pr_info(
"\x53\x48\x53\x5f\x4c\x4c\x3a\x20\x69\x6e\x66\x6f\x2d\x3e\x70\x72\x6f\x74\x6f\x20\x25\x75" "\n"
"\x53\x48\x53\x5f\x4c\x4c\x3a\x20\x69\x6e\x66\x6f\x2d\x3e\x70\x72\x6f\x74\x6f\x20\x25\x75"
,DATARMNET54338da2ff->proto);pr_info(
"\x53\x48\x53\x5f\x4c\x4c\x3a\x20\x69\x6e\x66\x6f\x2d\x3e\x64\x65\x73\x74\x5f\x70\x6f\x72\x74\x20\x25\x75" "\n"
"\x53\x48\x53\x5f\x4c\x4c\x3a\x20\x69\x6e\x66\x6f\x2d\x3e\x64\x65\x73\x74\x5f\x70\x6f\x72\x74\x20\x25\x75"
,DATARMNET54338da2ff->DATARMNET1e49bc75c8);pr_info(
"\x53\x48\x53\x5f\x4c\x4c\x3a\x20\x69\x6e\x66\x6f\x2d\x3e\x73\x72\x63\x5f\x70\x6f\x72\x74\x20\x25\x75" "\n"
"\x53\x48\x53\x5f\x4c\x4c\x3a\x20\x69\x6e\x66\x6f\x2d\x3e\x73\x72\x63\x5f\x70\x6f\x72\x74\x20\x25\x75"
,DATARMNET54338da2ff->src_port);pr_info(
"\x53\x48\x53\x5f\x4c\x4c\x3a\x20\x69\x6e\x66\x6f\x2d\x3e\x64\x65\x73\x74\x5f\x61\x64\x64\x72\x5f\x76\x61\x6c\x69\x64\x20\x25\x75" "\n"
"\x53\x48\x53\x5f\x4c\x4c\x3a\x20\x69\x6e\x66\x6f\x2d\x3e\x64\x65\x73\x74\x5f\x61\x64\x64\x72\x5f\x76\x61\x6c\x69\x64\x20\x25\x75"
,DATARMNET54338da2ff->DATARMNETb035edcfb9);pr_info(
"\x53\x48\x53\x5f\x4c\x4c\x3a\x20\x69\x6e\x66\x6f\x2d\x3e\x73\x72\x63\x5f\x61\x64\x64\x72\x5f\x76\x61\x6c\x69\x64\x20\x25\x75" "\n"
"\x53\x48\x53\x5f\x4c\x4c\x3a\x20\x69\x6e\x66\x6f\x2d\x3e\x73\x72\x63\x5f\x61\x64\x64\x72\x5f\x76\x61\x6c\x69\x64\x20\x25\x75"
,DATARMNET54338da2ff->DATARMNET1819cae4a3);pr_info(
"\x53\x48\x53\x5f\x4c\x4c\x3a\x20\x69\x6e\x66\x6f\x2d\x3e\x73\x65\x71\x20\x25\x75" "\n"
"\x53\x48\x53\x5f\x4c\x4c\x3a\x20\x69\x6e\x66\x6f\x2d\x3e\x73\x65\x71\x20\x25\x75"
,DATARMNET54338da2ff->seq);if(DATARMNET54338da2ff->DATARMNET602389fe52==
(0xd11+230-0xdf3)&&(DATARMNET54338da2ff->DATARMNETb035edcfb9)&&(
DATARMNET54338da2ff->DATARMNET1819cae4a3)){pr_info(
"\x4e\x65\x77\x20\x66\x6c\x6f\x77\x20\x69\x6e\x66\x6f\x2d\x3e\x64\x65\x73\x74\x5f\x61\x64\x64\x72\x5f\x76\x61\x6c\x69\x64\x20\x25\x75" "\n"
"\x4e\x65\x77\x20\x66\x6c\x6f\x77\x20\x69\x6e\x66\x6f\x2d\x3e\x64\x65\x73\x74\x5f\x61\x64\x64\x72\x5f\x76\x61\x6c\x69\x64\x20\x25\x75\x20"
,DATARMNET54338da2ff->DATARMNETea422561ef.daddr);pr_info(
"\x4e\x65\x77\x20\x66\x6c\x6f\x77\x20\x69\x6e\x66\x6f\x2d\x3e\x73\x72\x63\x5f\x61\x64\x64\x72\x5f\x76\x61\x6c\x69\x64\x20\x25\x75" "\n"
"\x4e\x65\x77\x20\x66\x6c\x6f\x77\x20\x69\x6e\x66\x6f\x2d\x3e\x73\x72\x63\x5f\x61\x64\x64\x72\x5f\x76\x61\x6c\x69\x64\x20\x25\x75"
,DATARMNET54338da2ff->DATARMNET53d5f671f0.saddr);}if(DATARMNET54338da2ff->
DATARMNET602389fe52==(0xd03+244-0xdf1)&&(DATARMNET54338da2ff->
DATARMNETb035edcfb9)&&(DATARMNET54338da2ff->DATARMNET1819cae4a3)){pr_info(
"\x4e\x65\x77\x20\x66\x6c\x6f\x77\x20\x69\x6e\x66\x6f\x2d\x3e\x64\x65\x73\x74\x5f\x61\x64\x64\x72\x5f\x76\x61\x6c\x69\x64\x20\x25\x75\x20\x25\x75\x20\x25\x75\x20\x25\x75" "\n"
"\x4e\x65\x77\x20\x66\x6c\x6f\x77\x20\x69\x6e\x66\x6f\x2d\x3e\x64\x65\x73\x74\x5f\x61\x64\x64\x72\x5f\x76\x61\x6c\x69\x64\x20\x25\x75\x20\x25\x75\x20\x25\x75\x20\x25\x75\x20"
,DATARMNET63b1a086d5->DATARMNET54338da2ff.DATARMNETea422561ef.v6_daddr.in6_u.
u6_addr32[(0xd18+223-0xdf4)],DATARMNET63b1a086d5->DATARMNET54338da2ff.
DATARMNETea422561ef.v6_daddr.in6_u.u6_addr32[(0xd1f+216-0xdf5)],
DATARMNET63b1a086d5->DATARMNET54338da2ff.DATARMNETea422561ef.v6_daddr.in6_u.
u6_addr32[(0xd26+209-0xdf6)],DATARMNET63b1a086d5->DATARMNET54338da2ff.
DATARMNETea422561ef.v6_daddr.in6_u.u6_addr32[(0xd2d+202-0xdf7)]);pr_info(
"\x4e\x65\x77\x20\x66\x6c\x6f\x77\x20\x69\x6e\x66\x6f\x2d\x3e\x73\x72\x63\x5f\x61\x64\x64\x72\x5f\x76\x61\x6c\x69\x64\x20\x20\x25\x75\x20\x25\x75\x20\x25\x75\x20\x25\x75" "\n"
"\x4e\x65\x77\x20\x66\x6c\x6f\x77\x20\x69\x6e\x66\x6f\x2d\x3e\x73\x72\x63\x5f\x61\x64\x64\x72\x5f\x76\x61\x6c\x69\x64\x20\x20\x25\x75\x20\x25\x75\x20\x25\x75\x20\x25\x75"
,DATARMNET63b1a086d5->DATARMNET54338da2ff.DATARMNET53d5f671f0.v6_saddr.in6_u.
u6_addr32[(0xd18+223-0xdf4)],DATARMNET63b1a086d5->DATARMNET54338da2ff.
DATARMNET53d5f671f0.v6_saddr.in6_u.u6_addr32[(0xd1f+216-0xdf5)],

View File

@@ -1,4 +1,4 @@
/* Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
/* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -11,12 +11,12 @@
*
*/
#ifndef DATARMNETf391f4bee3
#define DATARMNETf391f4bee3
#ifndef DATARMNETf391f4bee3
#define DATARMNETf391f4bee3
int DATARMNETf5821256ad(struct sk_buff*skb,struct rmnet_shs_clnt_s*
DATARMNET0bf01e7c6f);void DATARMNET44499733f2(void);void DATARMNET90fe3a4b56(
void);void DATARMNET2ac305d296(struct DATARMNET0331d6732d*DATARMNET63b1a086d5);
void DATARMNETd52d50282d(struct DATARMNET0331d6732d*DATARMNET63b1a086d5);int
DATARMNETe24386452c(struct sk_buff*skb);
#endif
#endif

View File

@@ -49,12 +49,12 @@ LINUX_VERSION_CODE)
LINUX_VERSION_CODE)
#include <net/netdev_rx_queue.h>
#endif
static char*verinfo[]={"\x37\x37\x65\x66\x34\x32\x32\x36",
"\x37\x30\x32\x35\x63\x61\x30\x66","\x32\x30\x64\x64\x38\x35\x65\x31",
"\x30\x38\x31\x31\x36\x62\x33\x63","\x33\x33\x64\x31\x37\x34\x34\x66",
"\x31\x34\x38\x61\x38\x63\x39\x63","\x31\x61\x63\x63\x38\x33\x36\x32",
"\x66\x38\x35\x35\x65\x39\x63\x30","\x30\x62\x31\x34\x30\x66\x35\x65",
"\x65\x36\x31\x39\x64\x64\x30\x38","\x36\x37\x30\x32\x34\x34\x63\x35",};
static char*verinfo[]={"\x30\x38\x31\x31\x36\x62\x33\x63",
"\x33\x33\x64\x31\x37\x34\x34\x66","\x31\x34\x38\x61\x38\x63\x39\x63",
"\x31\x61\x63\x63\x38\x33\x36\x32","\x66\x38\x35\x35\x65\x39\x63\x30",
"\x30\x62\x31\x34\x30\x66\x35\x65","\x65\x36\x31\x39\x64\x64\x30\x38",
"\x36\x37\x30\x32\x34\x34\x63\x35","\x65\x34\x64\x30\x39\x61\x38\x66",
"\x35\x38\x61\x61\x39\x62\x65\x65","\x62\x62\x32\x65\x66\x37\x30\x39",};
module_param_array(verinfo,charp,NULL,(0xcb7+5769-0x221c));MODULE_PARM_DESC(
verinfo,
"\x56\x65\x72\x73\x69\x6f\x6e\x20\x6f\x66\x20\x74\x68\x65\x20\x64\x72\x69\x76\x65\x72"
@@ -139,11 +139,12 @@ GFP_ATOMIC);if(!map){free_cpumask_var(mask);return-ENOMEM;}map->cpus[
DATARMNETbc3c416b77)DATARMNETb7ddf3c5dd[DATARMNET975060d6b5]++;else
DATARMNETb7ddf3c5dd[DATARMNET0e398136dc]++;rcu_read_lock();DATARMNETaeb4918e65=
rcu_dereference(DATARMNETb4180393e4->rps_map);rcu_assign_pointer(
DATARMNETb4180393e4->rps_map,map);kfree(DATARMNETaeb4918e65);free_cpumask_var(
mask);rcu_read_unlock();return(0xd2d+202-0xdf7);}int DATARMNET9303cec796(struct
sk_buff*skb,u8*ret){int DATARMNETbd864aa442=(0xd2d+202-0xdf7);struct iphdr*ip4h,
DATARMNETc00baf31c3;struct ipv6hdr*ip6h,DATARMNETcf1d9e2c1e;const struct
ipv6_opt_hdr*DATARMNET7b34b7b5be;struct ipv6_opt_hdr DATARMNET1688a97aa4;s64
DATARMNETb4180393e4->rps_map,map);if(DATARMNETaeb4918e65)kfree(
DATARMNETaeb4918e65);free_cpumask_var(mask);rcu_read_unlock();return
(0xd2d+202-0xdf7);}int DATARMNET9303cec796(struct sk_buff*skb,u8*ret){int
DATARMNETbd864aa442=(0xd2d+202-0xdf7);struct iphdr*ip4h,DATARMNETc00baf31c3;
struct ipv6hdr*ip6h,DATARMNETcf1d9e2c1e;const struct ipv6_opt_hdr*
DATARMNET7b34b7b5be;struct ipv6_opt_hdr DATARMNET1688a97aa4;s64
DATARMNETe30c7cdaf5;struct timespec64 time;int DATARMNET441081ddc0;if(
rmnet_module_hook_perf_ingress(&DATARMNET441081ddc0,skb)){if(!
DATARMNET441081ddc0){goto done;}}switch(skb->protocol){case htons(ETH_P_IP):ip4h
@@ -526,111 +527,110 @@ NULL);if(DATARMNETaef946bb68(DATARMNET63b1a086d5->map_index,map)!=
DATARMNET63b1a086d5->map_cpu){DATARMNET5643f7b5e9=DATARMNET04e8d1b862(
DATARMNET63b1a086d5->map_cpu,map);if(DATARMNET5643f7b5e9>=(0xd2d+202-0xdf7)){
DATARMNET63b1a086d5->map_index=DATARMNET5643f7b5e9;DATARMNET63b1a086d5->map_cpu=
DATARMNETaef946bb68(DATARMNET5643f7b5e9,map);}else{int map_cpu=
DATARMNETaef946bb68(DATARMNET2edff26954,map);DATARMNET63b1a086d5->map_index=
DATARMNET2edff26954;if(map_cpu<(0xd2d+202-0xdf7))DATARMNET63b1a086d5->map_cpu=
DATARMNET2edff26954;else DATARMNET63b1a086d5->map_cpu=map_cpu;}
DATARMNETd87669e323=(0xd26+209-0xdf6);DATARMNET68d84e7b98[DATARMNETa1f9420686]++
;DATARMNET015fb2ba0e(DATARMNET720469c0a9,DATARMNET998c6de143,(0x16e8+787-0xc0c),
(0x16e8+787-0xc0c),(0x16e8+787-0xc0c),(0x16e8+787-0xc0c),NULL,NULL);}if(
DATARMNETe488c1c396(DATARMNET63b1a086d5,DATARMNETd87669e323,DATARMNET5447204733)
){DATARMNETcba2f7f7b6(DATARMNET63b1a086d5,DATARMNET5447204733,
DATARMNETc88d0a6cdd);DATARMNETbd864aa442=(0xd26+209-0xdf6);}DATARMNET52de1f3dc0(
DATARMNET4510abc30d,DATARMNET64b02f64c6,DATARMNETbd864aa442,DATARMNETd87669e323,
(0x16e8+787-0xc0c),(0x16e8+787-0xc0c),DATARMNET63b1a086d5,NULL);return
DATARMNETbd864aa442;}static void DATARMNETc54e431bbc(u8 DATARMNET42a992465f,u8
DATARMNET7845d39672,u32 DATARMNETea08087fc0){u32 DATARMNETba6017a3a9=(!
DATARMNET227d2ff866)?(0xd26+209-0xdf6):DATARMNET227d2ff866;int
DATARMNETd9d8392917;if((DATARMNETd9d8392917=DATARMNET2af09ccd0b(
DATARMNET42a992465f,DATARMNET7845d39672,DATARMNETea08087fc0))&&
DATARMNET362b15f941(DATARMNET42a992465f)&&!DATARMNET0997c5650d[
DATARMNET42a992465f].DATARMNETef866573e0&&DATARMNETecc0627c70.
DATARMNET6625085b71!=DATARMNET42a992465f&&DATARMNET42a992465f!=(0xd26+209-0xdf6)
){DATARMNETba6017a3a9=(!DATARMNET7845d39672)?DATARMNET249a927510:
DATARMNETba6017a3a9;DATARMNET0997c5650d[DATARMNET42a992465f].DATARMNETef866573e0
=(0xd26+209-0xdf6);DATARMNETfb7007f025();if(hrtimer_active(&DATARMNETba5ea4329f(
DATARMNET42a992465f)))hrtimer_cancel(&DATARMNETba5ea4329f(DATARMNET42a992465f));
hrtimer_start(&DATARMNETba5ea4329f(DATARMNET42a992465f),ns_to_ktime(
DATARMNETba6017a3a9*DATARMNET68fc0be252),HRTIMER_MODE_REL);DATARMNETb7ddf3c5dd[
DATARMNETd9d8392917]++;}}void DATARMNETe377e0368d(u8 DATARMNETded3da1a77,u8
DATARMNET5447204733){struct DATARMNET63d7680df2*DATARMNET3f85732c70=NULL;struct
DATARMNET63d7680df2*DATARMNETbb236c7d08=NULL;struct list_head*
DATARMNET7b34b7b5be=NULL,*next=NULL;struct sk_buff*DATARMNETc88d0a6cdd=NULL;int
DATARMNET42a992465f;u32 DATARMNET61ab18a4bd;u32 DATARMNETed7800fc72=
(0xd2d+202-0xdf7);u32 DATARMNETa0df5de99d=(0xd2d+202-0xdf7);u32
DATARMNETe56f4fbbe6=(0xd2d+202-0xdf7);u32 DATARMNET8bf94cc2f7=(0xd2d+202-0xdf7);
u32 DATARMNET76192fa639=(0xd2d+202-0xdf7);u32 DATARMNET870611bedd=
(0xd2d+202-0xdf7);u32 DATARMNETa6424e3c4e=(0xd2d+202-0xdf7);u8
DATARMNET77e7f4db43=(0xd2d+202-0xdf7);struct sk_buff*skb=NULL;struct sk_buff*
DATARMNETcebafc57a4=NULL;struct DATARMNETe600c5b727*DATARMNETa4055affd5=NULL;
DATARMNETe074a09496();DATARMNET52de1f3dc0(DATARMNET4510abc30d,
DATARMNETde91850c28,DATARMNETecc0627c70.DATARMNETa2e32cdd3a,DATARMNETecc0627c70.
DATARMNETc252a1f55d,(0x16e8+787-0xc0c),(0x16e8+787-0xc0c),NULL,NULL);for(
DATARMNET42a992465f=(0xd2d+202-0xdf7);DATARMNET42a992465f<DATARMNETc6782fed88;
DATARMNET42a992465f++){DATARMNET61ab18a4bd=DATARMNETeb3978575d(
DATARMNET42a992465f);DATARMNET870611bedd=(0xd2d+202-0xdf7);DATARMNETe56f4fbbe6=
(0xd2d+202-0xdf7);list_for_each_safe(DATARMNET7b34b7b5be,next,&
DATARMNETaef946bb68(DATARMNET5643f7b5e9,map);}else{DATARMNET63b1a086d5->
map_index=DATARMNET2edff26954;DATARMNET63b1a086d5->map_cpu=DATARMNETaef946bb68(
DATARMNET2edff26954,map);if(DATARMNET63b1a086d5->map_cpu<(0xd2d+202-0xdf7))
DATARMNET63b1a086d5->map_cpu=DATARMNET2edff26954;}DATARMNETd87669e323=
(0xd26+209-0xdf6);DATARMNET68d84e7b98[DATARMNETa1f9420686]++;DATARMNET015fb2ba0e
(DATARMNET720469c0a9,DATARMNET998c6de143,(0x16e8+787-0xc0c),(0x16e8+787-0xc0c),
(0x16e8+787-0xc0c),(0x16e8+787-0xc0c),NULL,NULL);}if(DATARMNETe488c1c396(
DATARMNET63b1a086d5,DATARMNETd87669e323,DATARMNET5447204733)){
DATARMNETcba2f7f7b6(DATARMNET63b1a086d5,DATARMNET5447204733,DATARMNETc88d0a6cdd)
;DATARMNETbd864aa442=(0xd26+209-0xdf6);}DATARMNET52de1f3dc0(DATARMNET4510abc30d,
DATARMNET64b02f64c6,DATARMNETbd864aa442,DATARMNETd87669e323,(0x16e8+787-0xc0c),
(0x16e8+787-0xc0c),DATARMNET63b1a086d5,NULL);return DATARMNETbd864aa442;}static
void DATARMNETc54e431bbc(u8 DATARMNET42a992465f,u8 DATARMNET7845d39672,u32
DATARMNETea08087fc0){u32 DATARMNETba6017a3a9=(!DATARMNET227d2ff866)?
(0xd26+209-0xdf6):DATARMNET227d2ff866;int DATARMNETd9d8392917;if((
DATARMNETd9d8392917=DATARMNET2af09ccd0b(DATARMNET42a992465f,DATARMNET7845d39672,
DATARMNETea08087fc0))&&DATARMNET362b15f941(DATARMNET42a992465f)&&!
DATARMNET0997c5650d[DATARMNET42a992465f].DATARMNETef866573e0&&
DATARMNETecc0627c70.DATARMNET6625085b71!=DATARMNET42a992465f&&
DATARMNET42a992465f!=(0xd26+209-0xdf6)){DATARMNETba6017a3a9=(!
DATARMNET7845d39672)?DATARMNET249a927510:DATARMNETba6017a3a9;DATARMNET0997c5650d
[DATARMNET42a992465f].DATARMNETef866573e0=(0xd26+209-0xdf6);DATARMNETfb7007f025(
);if(hrtimer_active(&DATARMNETba5ea4329f(DATARMNET42a992465f)))hrtimer_cancel(&
DATARMNETba5ea4329f(DATARMNET42a992465f));hrtimer_start(&DATARMNETba5ea4329f(
DATARMNET42a992465f),ns_to_ktime(DATARMNETba6017a3a9*DATARMNET68fc0be252),
HRTIMER_MODE_REL);DATARMNETb7ddf3c5dd[DATARMNETd9d8392917]++;}}void
DATARMNETe377e0368d(u8 DATARMNETded3da1a77,u8 DATARMNET5447204733){struct
DATARMNET63d7680df2*DATARMNET3f85732c70=NULL;struct DATARMNET63d7680df2*
DATARMNETbb236c7d08=NULL;struct list_head*DATARMNET7b34b7b5be=NULL,*next=NULL;
struct sk_buff*DATARMNETc88d0a6cdd=NULL;int DATARMNET42a992465f;u32
DATARMNET61ab18a4bd;u32 DATARMNETed7800fc72=(0xd2d+202-0xdf7);u32
DATARMNETa0df5de99d=(0xd2d+202-0xdf7);u32 DATARMNETe56f4fbbe6=(0xd2d+202-0xdf7);
u32 DATARMNET8bf94cc2f7=(0xd2d+202-0xdf7);u32 DATARMNET76192fa639=
(0xd2d+202-0xdf7);u32 DATARMNET870611bedd=(0xd2d+202-0xdf7);u32
DATARMNETa6424e3c4e=(0xd2d+202-0xdf7);u8 DATARMNET77e7f4db43=(0xd2d+202-0xdf7);
struct sk_buff*skb=NULL;struct sk_buff*DATARMNETcebafc57a4=NULL;struct
DATARMNETe600c5b727*DATARMNETa4055affd5=NULL;DATARMNETe074a09496();
DATARMNET52de1f3dc0(DATARMNET4510abc30d,DATARMNETde91850c28,DATARMNETecc0627c70.
DATARMNETa2e32cdd3a,DATARMNETecc0627c70.DATARMNETc252a1f55d,(0x16e8+787-0xc0c),
(0x16e8+787-0xc0c),NULL,NULL);for(DATARMNET42a992465f=(0xd2d+202-0xdf7);
DATARMNET42a992465f<DATARMNETc6782fed88;DATARMNET42a992465f++){
DATARMNET61ab18a4bd=DATARMNETeb3978575d(DATARMNET42a992465f);DATARMNET870611bedd
=(0xd2d+202-0xdf7);DATARMNETe56f4fbbe6=(0xd2d+202-0xdf7);list_for_each_safe(
DATARMNET7b34b7b5be,next,&DATARMNET0997c5650d[DATARMNET42a992465f].
DATARMNET3dc4262f53){DATARMNET3f85732c70=list_entry(DATARMNET7b34b7b5be,struct
DATARMNET63d7680df2,DATARMNET04c88b8191);DATARMNETe56f4fbbe6+=
DATARMNET3f85732c70->DATARMNETae4b27456e.DATARMNET35234676d4;}if(
DATARMNET12565c8f98){DATARMNETc54e431bbc(DATARMNET42a992465f,DATARMNET20dc6bb36b
,DATARMNETe56f4fbbe6);}list_for_each_safe(DATARMNET7b34b7b5be,next,&
DATARMNET0997c5650d[DATARMNET42a992465f].DATARMNET3dc4262f53){
DATARMNET3f85732c70=list_entry(DATARMNET7b34b7b5be,struct DATARMNET63d7680df2,
DATARMNET04c88b8191);DATARMNETe56f4fbbe6+=DATARMNET3f85732c70->
DATARMNETae4b27456e.DATARMNET35234676d4;}if(DATARMNET12565c8f98){
DATARMNETc54e431bbc(DATARMNET42a992465f,DATARMNET20dc6bb36b,DATARMNETe56f4fbbe6)
;}list_for_each_safe(DATARMNET7b34b7b5be,next,&DATARMNET0997c5650d[
DATARMNET42a992465f].DATARMNET3dc4262f53){DATARMNET3f85732c70=list_entry(
DATARMNET7b34b7b5be,struct DATARMNET63d7680df2,DATARMNET04c88b8191);if(
DATARMNET3f85732c70->DATARMNETae4b27456e.DATARMNET6215127f48){if(
DATARMNET3f85732c70->DATARMNET85c698ec34)DATARMNETbb236c7d08=DATARMNET3f85732c70
;DATARMNETed7800fc72=DATARMNET3f85732c70->DATARMNETae4b27456e.
DATARMNET6215127f48;DATARMNETa0df5de99d=DATARMNET3f85732c70->DATARMNETae4b27456e
.DATARMNETbaa5765693;DATARMNETa6424e3c4e=DATARMNET3f85732c70->
DATARMNETae4b27456e.DATARMNET35234676d4;DATARMNET77e7f4db43=DATARMNETe02152c351(
DATARMNET3f85732c70,DATARMNETded3da1a77,DATARMNET5447204733,&DATARMNETc88d0a6cdd
);if(DATARMNET77e7f4db43){DATARMNET870611bedd+=DATARMNETa6424e3c4e;
DATARMNET8bf94cc2f7+=DATARMNETed7800fc72;DATARMNET76192fa639+=
DATARMNETa0df5de99d;DATARMNET0997c5650d[DATARMNET3f85732c70->map_cpu].
DATARMNET4133fc9428-=DATARMNETed7800fc72;DATARMNET3f85732c70->
DATARMNETae4b27456e.DATARMNET35234676d4=(0xd2d+202-0xdf7);if(DATARMNET3f85732c70
->map_cpu==DATARMNET42a992465f){DATARMNET61ab18a4bd+=DATARMNETed7800fc72;
DATARMNET3f85732c70->queue_head=DATARMNET61ab18a4bd;}}}}if(DATARMNET12565c8f98){
if(DATARMNET362b15f941(DATARMNET42a992465f)&&!DATARMNET0997c5650d[
DATARMNET42a992465f].DATARMNETef866573e0)DATARMNETfa919d00dc(DATARMNET42a992465f
,DATARMNET870611bedd);DATARMNETc54e431bbc(DATARMNET42a992465f,
DATARMNET20dc6bb36b,(0xd2d+202-0xdf7));}if(DATARMNET0997c5650d[
DATARMNET42a992465f].DATARMNET4133fc9428<(0xd2d+202-0xdf7))DATARMNET68d84e7b98[
DATARMNET33a9d4759f]++;if(DATARMNETeea3cef5b6(DATARMNET42a992465f)>=
DATARMNET84413d1257[DATARMNET42a992465f])DATARMNET84413d1257[DATARMNET42a992465f
]=DATARMNETeea3cef5b6(DATARMNET42a992465f);}DATARMNETecc0627c70.
DATARMNETc252a1f55d-=DATARMNET76192fa639;DATARMNETecc0627c70.DATARMNETa2e32cdd3a
-=DATARMNET8bf94cc2f7;if(DATARMNETc88d0a6cdd&&DATARMNETbb236c7d08){
DATARMNETa4055affd5=&DATARMNET0997c5650d[DATARMNETbb236c7d08->map_cpu];
DATARMNETecc0627c70.DATARMNET75af9f3c31=(0xd26+209-0xdf6);spin_unlock_bh(&
DATARMNET3764d083f0);DATARMNETbb236c7d08->DATARMNET0371465875=(0xd2d+202-0xdf7);
for((skb=DATARMNETc88d0a6cdd);skb!=NULL;skb=DATARMNETcebafc57a4){
DATARMNETcebafc57a4=skb->next;skb->next=NULL;DATARMNETde8ee16f92(
DATARMNETbb236c7d08);rmnet_rx_handler(&skb);DATARMNET3e37ad2816(
DATARMNETbb236c7d08,&DATARMNETa4055affd5->DATARMNET3dc4262f53);}spin_lock_bh(&
DATARMNET3764d083f0);DATARMNETa871eeb7e7();DATARMNETecc0627c70.
DATARMNET75af9f3c31=(0xd2d+202-0xdf7);DATARMNETecc0627c70.DATARMNETfeee6933fc=
(0xd2d+202-0xdf7);DATARMNETecc0627c70.DATARMNET6625085b71=DATARMNETecc0627c70.
DATARMNET7d667e828e;}DATARMNET52de1f3dc0(DATARMNET4510abc30d,DATARMNET576793621a
,DATARMNETecc0627c70.DATARMNETa2e32cdd3a,DATARMNETecc0627c70.DATARMNETc252a1f55d
,DATARMNET8bf94cc2f7,DATARMNET76192fa639,NULL,NULL);if((DATARMNETecc0627c70.
DATARMNETc252a1f55d<=(0xd2d+202-0xdf7))||(DATARMNETecc0627c70.
DATARMNETa2e32cdd3a<=(0xd2d+202-0xdf7))){DATARMNETecc0627c70.DATARMNETc252a1f55d
=(0xd2d+202-0xdf7);DATARMNETecc0627c70.DATARMNETa2e32cdd3a=(0xd2d+202-0xdf7);
DATARMNETecc0627c70.DATARMNETd9cfd2812b=(0xd2d+202-0xdf7);DATARMNETecc0627c70.
DATARMNET34097703c8=DATARMNET8dcf06727b;}}void DATARMNETa4bf9fbf64(u8
DATARMNETded3da1a77,u8 DATARMNET5447204733){spin_lock_bh(&DATARMNET3764d083f0);
DATARMNETe377e0368d(DATARMNETded3da1a77,DATARMNET5447204733);spin_unlock_bh(&
DATARMNET3764d083f0);if(DATARMNET5447204733==DATARMNET5b5927fd7e){if(
DATARMNET365ddeca1c&&DATARMNETecc0627c70.DATARMNETc252a1f55d&&
DATARMNETecc0627c70.DATARMNETa2e32cdd3a){if(hrtimer_active(&DATARMNETecc0627c70.
DATARMNET6fd692fc7a))hrtimer_cancel(&DATARMNETecc0627c70.DATARMNET6fd692fc7a);
hrtimer_start(&DATARMNETecc0627c70.DATARMNET6fd692fc7a,ns_to_ktime(
DATARMNET4ac8af832c*DATARMNET68fc0be252),HRTIMER_MODE_REL);}DATARMNET14ed771dfb[
DATARMNETd45c383019]++;}}void DATARMNET495dab3d72(struct sk_buff*skb,struct
DATARMNET63d7680df2*DATARMNET63b1a086d5,struct rmnet_shs_clnt_s*
DATARMNET0bf01e7c6f){u8 DATARMNET7d63e92341=(0xd2d+202-0xdf7);
DATARMNET04c88b8191);if(DATARMNET3f85732c70->DATARMNETae4b27456e.
DATARMNET6215127f48){if(DATARMNET3f85732c70->DATARMNET85c698ec34)
DATARMNETbb236c7d08=DATARMNET3f85732c70;DATARMNETed7800fc72=DATARMNET3f85732c70
->DATARMNETae4b27456e.DATARMNET6215127f48;DATARMNETa0df5de99d=
DATARMNET3f85732c70->DATARMNETae4b27456e.DATARMNETbaa5765693;DATARMNETa6424e3c4e
=DATARMNET3f85732c70->DATARMNETae4b27456e.DATARMNET35234676d4;
DATARMNET77e7f4db43=DATARMNETe02152c351(DATARMNET3f85732c70,DATARMNETded3da1a77,
DATARMNET5447204733,&DATARMNETc88d0a6cdd);if(DATARMNET77e7f4db43){
DATARMNET870611bedd+=DATARMNETa6424e3c4e;DATARMNET8bf94cc2f7+=
DATARMNETed7800fc72;DATARMNET76192fa639+=DATARMNETa0df5de99d;DATARMNET0997c5650d
[DATARMNET3f85732c70->map_cpu].DATARMNET4133fc9428-=DATARMNETed7800fc72;
DATARMNET3f85732c70->DATARMNETae4b27456e.DATARMNET35234676d4=(0xd2d+202-0xdf7);
if(DATARMNET3f85732c70->map_cpu==DATARMNET42a992465f){DATARMNET61ab18a4bd+=
DATARMNETed7800fc72;DATARMNET3f85732c70->queue_head=DATARMNET61ab18a4bd;}}}}if(
DATARMNET12565c8f98){if(DATARMNET362b15f941(DATARMNET42a992465f)&&!
DATARMNET0997c5650d[DATARMNET42a992465f].DATARMNETef866573e0)DATARMNETfa919d00dc
(DATARMNET42a992465f,DATARMNET870611bedd);DATARMNETc54e431bbc(
DATARMNET42a992465f,DATARMNET20dc6bb36b,(0xd2d+202-0xdf7));}if(
DATARMNET0997c5650d[DATARMNET42a992465f].DATARMNET4133fc9428<(0xd2d+202-0xdf7))
DATARMNET68d84e7b98[DATARMNET33a9d4759f]++;if(DATARMNETeea3cef5b6(
DATARMNET42a992465f)>=DATARMNET84413d1257[DATARMNET42a992465f])
DATARMNET84413d1257[DATARMNET42a992465f]=DATARMNETeea3cef5b6(DATARMNET42a992465f
);}DATARMNETecc0627c70.DATARMNETc252a1f55d-=DATARMNET76192fa639;
DATARMNETecc0627c70.DATARMNETa2e32cdd3a-=DATARMNET8bf94cc2f7;if(
DATARMNETc88d0a6cdd&&DATARMNETbb236c7d08){DATARMNETa4055affd5=&
DATARMNET0997c5650d[DATARMNETbb236c7d08->map_cpu];DATARMNETecc0627c70.
DATARMNET75af9f3c31=(0xd26+209-0xdf6);spin_unlock_bh(&DATARMNET3764d083f0);
DATARMNETbb236c7d08->DATARMNET0371465875=(0xd2d+202-0xdf7);for((skb=
DATARMNETc88d0a6cdd);skb!=NULL;skb=DATARMNETcebafc57a4){DATARMNETcebafc57a4=skb
->next;skb->next=NULL;DATARMNETde8ee16f92(DATARMNETbb236c7d08);rmnet_rx_handler(
&skb);DATARMNET3e37ad2816(DATARMNETbb236c7d08,&DATARMNETa4055affd5->
DATARMNET3dc4262f53);}spin_lock_bh(&DATARMNET3764d083f0);DATARMNETa871eeb7e7();
DATARMNETecc0627c70.DATARMNET75af9f3c31=(0xd2d+202-0xdf7);DATARMNETecc0627c70.
DATARMNETfeee6933fc=(0xd2d+202-0xdf7);DATARMNETecc0627c70.DATARMNET6625085b71=
DATARMNETecc0627c70.DATARMNET7d667e828e;}DATARMNET52de1f3dc0(DATARMNET4510abc30d
,DATARMNET576793621a,DATARMNETecc0627c70.DATARMNETa2e32cdd3a,DATARMNETecc0627c70
.DATARMNETc252a1f55d,DATARMNET8bf94cc2f7,DATARMNET76192fa639,NULL,NULL);if((
DATARMNETecc0627c70.DATARMNETc252a1f55d<=(0xd2d+202-0xdf7))||(
DATARMNETecc0627c70.DATARMNETa2e32cdd3a<=(0xd2d+202-0xdf7))){DATARMNETecc0627c70
.DATARMNETc252a1f55d=(0xd2d+202-0xdf7);DATARMNETecc0627c70.DATARMNETa2e32cdd3a=
(0xd2d+202-0xdf7);DATARMNETecc0627c70.DATARMNETd9cfd2812b=(0xd2d+202-0xdf7);
DATARMNETecc0627c70.DATARMNET34097703c8=DATARMNET8dcf06727b;}}void
DATARMNETa4bf9fbf64(u8 DATARMNETded3da1a77,u8 DATARMNET5447204733){spin_lock_bh(
&DATARMNET3764d083f0);DATARMNETe377e0368d(DATARMNETded3da1a77,
DATARMNET5447204733);spin_unlock_bh(&DATARMNET3764d083f0);if(DATARMNET5447204733
==DATARMNET5b5927fd7e){if(DATARMNET365ddeca1c&&DATARMNETecc0627c70.
DATARMNETc252a1f55d&&DATARMNETecc0627c70.DATARMNETa2e32cdd3a){if(hrtimer_active(
&DATARMNETecc0627c70.DATARMNET6fd692fc7a))hrtimer_cancel(&DATARMNETecc0627c70.
DATARMNET6fd692fc7a);hrtimer_start(&DATARMNETecc0627c70.DATARMNET6fd692fc7a,
ns_to_ktime(DATARMNET4ac8af832c*DATARMNET68fc0be252),HRTIMER_MODE_REL);}
DATARMNET14ed771dfb[DATARMNETd45c383019]++;}}void DATARMNET495dab3d72(struct
sk_buff*skb,struct DATARMNET63d7680df2*DATARMNET63b1a086d5,struct
rmnet_shs_clnt_s*DATARMNET0bf01e7c6f){u8 DATARMNET7d63e92341=(0xd2d+202-0xdf7);
#ifdef DATARMNETdddae6afa8
if(!(DATARMNET0bf01e7c6f->config&DATARMNET2a76d433b3)&&skb->cb[
DATARMNET8fadb49f38]){DATARMNETe377e0368d((0xd2d+202-0xdf7),DATARMNET0b15fd8b54)
@@ -686,34 +686,36 @@ DATARMNETecc0627c70.DATARMNET34097703c8==DATARMNETb3e3b2a799){
DATARMNET52de1f3dc0(DATARMNET4510abc30d,DATARMNET83147a2e7d,DATARMNETecc0627c70.
DATARMNET34097703c8,(0x16e8+787-0xc0c),(0x16e8+787-0xc0c),(0x16e8+787-0xc0c),
NULL,NULL);schedule_work((struct work_struct*)&shs_rx_work);}}return ret;}enum
hrtimer_restart DATARMNET2ba63fbd90(struct hrtimer*DATARMNET6e4292679f){
__pm_relax(DATARMNETecc0627c70.ws);DATARMNET064fbe9e3a=(0xd26+209-0xdf6);return
HRTIMER_NORESTART;}enum hrtimer_restart DATARMNETc2b0dbbb32(struct hrtimer*
DATARMNET6e4292679f){struct DATARMNET177911299b DATARMNET9f249e95ee;struct
timespec64 time;DATARMNET8d0d510d45(DATARMNETe4a6d22824,&DATARMNET9f249e95ee);
DATARMNETb5d58adbe7(&DATARMNET9f249e95ee);ktime_get_boottime_ts64(&time);
DATARMNETecc0627c70.DATARMNETdd3caf9200=ktime_set(time.tv_sec,time.tv_nsec);
return HRTIMER_NORESTART;}enum hrtimer_restart DATARMNET4ce9744605(struct
hrtimer*DATARMNET6e4292679f){struct DATARMNETa6f2d499b2*DATARMNET4b39dc1574=
container_of(DATARMNET6e4292679f,struct DATARMNETa6f2d499b2,DATARMNET758a55f103)
;DATARMNET371703c28d();schedule_work(&DATARMNET4b39dc1574->DATARMNET33110a3ff5);
return HRTIMER_NORESTART;}enum hrtimer_restart DATARMNETbfbe1f5cd0(struct
hrtimer*DATARMNET6e4292679f){DATARMNETda96251102(DATARMNETb77d87790d,
DATARMNET623224aa0a,(0xd2d+202-0xdf7),(0xd2d+202-0xdf7),(0x16e8+787-0xc0c),
(0x16e8+787-0xc0c),NULL,NULL);DATARMNETf20806b279();return HRTIMER_NORESTART;}
void DATARMNETe1f95274f1(void){int i;for(i=(0xd2d+202-0xdf7);i<
DATARMNETc6782fed88;i++){DATARMNETecc0627c70.DATARMNET132b9c7dc4[i].
DATARMNET42c3ecbd5e=i;INIT_WORK(&DATARMNETecc0627c70.DATARMNET132b9c7dc4[i].
DATARMNET33110a3ff5,DATARMNET056c939591);hrtimer_init(&DATARMNETecc0627c70.
DATARMNET132b9c7dc4[i].DATARMNET758a55f103,CLOCK_MONOTONIC,HRTIMER_MODE_REL);
DATARMNETecc0627c70.DATARMNET132b9c7dc4[i].DATARMNET758a55f103.function=
DATARMNET4ce9744605;}hrtimer_init(&DATARMNETecc0627c70.DATARMNET6fd692fc7a,
CLOCK_MONOTONIC,HRTIMER_MODE_REL);DATARMNETecc0627c70.DATARMNET6fd692fc7a.
function=DATARMNET0a73c53ae6;hrtimer_init(&DATARMNETecc0627c70.
DATARMNET645fb3b468,CLOCK_MONOTONIC,HRTIMER_MODE_REL);DATARMNETecc0627c70.
DATARMNET645fb3b468.function=DATARMNETc2b0dbbb32;hrtimer_init(&
DATARMNETecc0627c70.hrtimer_wake,CLOCK_MONOTONIC,HRTIMER_MODE_REL);
DATARMNETecc0627c70.hrtimer_wake.function=DATARMNET2ba63fbd90;hrtimer_init(&
hrtimer_restart DATARMNET2ba63fbd90(struct hrtimer*DATARMNET6e4292679f){const
enum hrtimer_restart ret=HRTIMER_NORESTART;__pm_relax(DATARMNETecc0627c70.ws);
DATARMNET064fbe9e3a=(0xd26+209-0xdf6);return ret;}enum hrtimer_restart
DATARMNETc2b0dbbb32(struct hrtimer*DATARMNET6e4292679f){const enum
hrtimer_restart ret=HRTIMER_NORESTART;struct DATARMNET177911299b
DATARMNET9f249e95ee;struct timespec64 time;DATARMNET8d0d510d45(
DATARMNETe4a6d22824,&DATARMNET9f249e95ee);DATARMNETb5d58adbe7(&
DATARMNET9f249e95ee);ktime_get_boottime_ts64(&time);DATARMNETecc0627c70.
DATARMNETdd3caf9200=ktime_set(time.tv_sec,time.tv_nsec);return ret;}enum
hrtimer_restart DATARMNET4ce9744605(struct hrtimer*DATARMNET6e4292679f){const
enum hrtimer_restart ret=HRTIMER_NORESTART;struct DATARMNETa6f2d499b2*
DATARMNET4b39dc1574=container_of(DATARMNET6e4292679f,struct DATARMNETa6f2d499b2,
DATARMNET758a55f103);DATARMNET371703c28d();schedule_work(&DATARMNET4b39dc1574->
DATARMNET33110a3ff5);return ret;}enum hrtimer_restart DATARMNETbfbe1f5cd0(struct
hrtimer*DATARMNET6e4292679f){const enum hrtimer_restart ret=HRTIMER_NORESTART;
DATARMNETda96251102(DATARMNETb77d87790d,DATARMNET623224aa0a,(0xd2d+202-0xdf7),
(0xd2d+202-0xdf7),(0x16e8+787-0xc0c),(0x16e8+787-0xc0c),NULL,NULL);
DATARMNETf20806b279();return ret;}void DATARMNETe1f95274f1(void){int i;for(i=
(0xd2d+202-0xdf7);i<DATARMNETc6782fed88;i++){DATARMNETecc0627c70.
DATARMNET132b9c7dc4[i].DATARMNET42c3ecbd5e=i;INIT_WORK(&DATARMNETecc0627c70.
DATARMNET132b9c7dc4[i].DATARMNET33110a3ff5,DATARMNET056c939591);hrtimer_init(&
DATARMNETecc0627c70.DATARMNET132b9c7dc4[i].DATARMNET758a55f103,CLOCK_MONOTONIC,
HRTIMER_MODE_REL);DATARMNETecc0627c70.DATARMNET132b9c7dc4[i].DATARMNET758a55f103
.function=DATARMNET4ce9744605;}hrtimer_init(&DATARMNETecc0627c70.
DATARMNET6fd692fc7a,CLOCK_MONOTONIC,HRTIMER_MODE_REL);DATARMNETecc0627c70.
DATARMNET6fd692fc7a.function=DATARMNET0a73c53ae6;hrtimer_init(&
DATARMNETecc0627c70.DATARMNET645fb3b468,CLOCK_MONOTONIC,HRTIMER_MODE_REL);
DATARMNETecc0627c70.DATARMNET645fb3b468.function=DATARMNETc2b0dbbb32;
hrtimer_init(&DATARMNETecc0627c70.hrtimer_wake,CLOCK_MONOTONIC,HRTIMER_MODE_REL)
;DATARMNETecc0627c70.hrtimer_wake.function=DATARMNET2ba63fbd90;hrtimer_init(&
DATARMNETecc0627c70.DATARMNET533dba0f29,CLOCK_MONOTONIC,HRTIMER_MODE_REL);
DATARMNETecc0627c70.DATARMNET533dba0f29.function=DATARMNETbfbe1f5cd0;INIT_WORK(&
shs_rx_work.DATARMNET33110a3ff5,DATARMNETa7afc8cdaa);}unsigned int
@@ -764,7 +766,7 @@ DATARMNET23f8730007,DATARMNET4ebbc01257,dlhdr->le.seq,dlhdr->le.pkts,
(0x16e8+787-0xc0c),(0x16e8+787-0xc0c),NULL,NULL);if(!DATARMNET016351c9e4||
DATARMNET016351c9e4<dlhdr->le.seq)DATARMNET016351c9e4=dlhdr->le.seq;else{if(
DATARMNET756bdd424a)pr_info(
"\x72\x6d\x6e\x65\x74\x5f\x73\x68\x73\x3a\x20\x4f\x46\x4f\x20\x64\x6c\x20\x73\x65\x71\x20\x25\x75\x20\x62\x65\x66\x6f\x72\x65\x20\x25\x75" "\n"
"\x72\x6d\x6e\x65\x74\x5f\x73\x68\x73\x3a\x20\x4f\x46\x4f\x20\x64\x6c\x20\x73\x65\x71\x20\x25\x75\x20\x62\x65\x66\x6f\x72\x65\x20\x25\x75"
,DATARMNET016351c9e4,dlhdr->le.seq);DATARMNET016351c9e4=dlhdr->le.seq;
DATARMNET68d84e7b98[DATARMNETf352684b06]++;}if(!spin_is_locked(&
DATARMNET3764d083f0)){if(DATARMNETecc0627c70.DATARMNETa2e32cdd3a>
@@ -797,9 +799,9 @@ DATARMNETc6782fed88;DATARMNET0e4304d903++)INIT_LIST_HEAD(&DATARMNET0997c5650d[
DATARMNET0e4304d903].DATARMNET3dc4262f53);DATARMNETe6e8431304();
DATARMNET44499733f2();rc=register_oom_notifier(&DATARMNET105c85d84c);if(rc<
(0xd2d+202-0xdf7)){pr_info(
"\x52\x6d\x6e\x65\x74\x5f\x73\x68\x73\x5f\x6f\x6f\x6d\x20\x72\x65\x67\x69\x73\x74\x65\x72\x20\x66\x61\x69\x6c\x75\x72\x65" "\n"
"\x52\x6d\x6e\x65\x74\x5f\x73\x68\x73\x5f\x6f\x6f\x6d\x20\x72\x65\x67\x69\x73\x74\x65\x72\x20\x66\x61\x69\x6c\x75\x72\x65"
);}pr_info(
"\x72\x6d\x6e\x65\x74\x5f\x73\x68\x73\x20\x69\x6e\x69\x74\x20\x77\x69\x74\x68\x20\x25\x78" "\n"
"\x72\x6d\x6e\x65\x74\x5f\x73\x68\x73\x20\x69\x6e\x69\x74\x20\x77\x69\x74\x68\x20\x25\x78"
,DATARMNETecc0627c70.map_mask);DATARMNETecc0627c70.ws=wakeup_source_register(
NULL,"\x52\x4d\x4e\x45\x54\x5f\x53\x48\x53");DATARMNETecc0627c70.
DATARMNETfc89d842ae=(0xd26+209-0xdf6);}void DATARMNETeacad8334e(void){struct

View File

@@ -842,7 +842,7 @@ DATARMNET3764d083f0);rcu_read_unlock();}void DATARMNETe69c918dc8(struct
DATARMNET9b44b71ee9*ep){struct rps_map*map;u8 len=(0xd2d+202-0xdf7);if(!ep||!ep
->ep){DATARMNET68d84e7b98[DATARMNETb8fe2c0e64]++;return;}rcu_read_lock();if(!ep
->ep){pr_info(
"\x72\x6d\x6e\x65\x74\x5f\x73\x68\x73\x20\x69\x6e\x76\x61\x6c\x69\x64\x20\x73\x74\x61\x74\x65\x20\x25\x70" "\n"
"\x20\x72\x6d\x6e\x65\x74\x5f\x73\x68\x73\x20\x69\x6e\x76\x61\x6c\x69\x64\x20\x73\x74\x61\x74\x65\x20\x25\x70"
,ep->ep);DATARMNET68d84e7b98[DATARMNETb8fe2c0e64]++;return;}map=rcu_dereference(
ep->ep->_rx->rps_map);ep->DATARMNET9fb369ce5f=(0xd2d+202-0xdf7);if(map!=NULL){
for(len=(0xd2d+202-0xdf7);len<map->len;len++)ep->DATARMNET9fb369ce5f|=(
@@ -932,49 +932,46 @@ DATARMNET0763436b8d.prev=NULL;kfree(ep);}}void DATARMNETf7dcab9a9e(void){if(!
DATARMNETf141197982||!DATARMNET9dc7755be5)return;DATARMNET28d33bd09f();
DATARMNET5945236cd3(DATARMNET19092afcc2);trace_rmnet_shs_wq_high(
DATARMNETc1e19aa345,DATARMNET7cf840e991,(0x16e8+787-0xc0c),(0x16e8+787-0xc0c),
(0x16e8+787-0xc0c),(0x16e8+787-0xc0c),NULL,NULL);DATARMNET4063c95208();
cancel_delayed_work_sync(&DATARMNET9dc7755be5->DATARMNET1150269da2);
drain_workqueue(DATARMNETf141197982);destroy_workqueue(DATARMNETf141197982);
kfree(DATARMNET9dc7755be5);DATARMNET9dc7755be5=NULL;DATARMNETf141197982=NULL;
DATARMNET39391a8bc5(DATARMNETc5db038c35);DATARMNET5fb4151598();
trace_rmnet_shs_wq_high(DATARMNETc1e19aa345,DATARMNETa5cdfd53b3,
(0x16e8+787-0xc0c),(0x16e8+787-0xc0c),NULL,NULL);cancel_delayed_work_sync(&
DATARMNET9dc7755be5->DATARMNET1150269da2);drain_workqueue(DATARMNETf141197982);
destroy_workqueue(DATARMNETf141197982);kfree(DATARMNET9dc7755be5);
DATARMNET9dc7755be5=NULL;DATARMNETf141197982=NULL;DATARMNET39391a8bc5(
DATARMNETc5db038c35);DATARMNET5fb4151598();trace_rmnet_shs_wq_high(
DATARMNETc1e19aa345,DATARMNETa5cdfd53b3,(0x16e8+787-0xc0c),(0x16e8+787-0xc0c),
(0x16e8+787-0xc0c),(0x16e8+787-0xc0c),NULL,NULL);}void DATARMNETd3d1d13f44(void)
{u8 DATARMNET42a992465f;struct DATARMNET228056d4b7*DATARMNET7bea4a06a6;for(
DATARMNET42a992465f=(0xd2d+202-0xdf7);DATARMNET42a992465f<DATARMNETc6782fed88;
DATARMNET42a992465f++){trace_rmnet_shs_wq_high(DATARMNET92b282b12c,
DATARMNET57cad43bb7,DATARMNET42a992465f,(0x16e8+787-0xc0c),(0x16e8+787-0xc0c),
(0x16e8+787-0xc0c),NULL,NULL);DATARMNET7bea4a06a6=&DATARMNET6cdd58e74c.
DATARMNET73464778dc[DATARMNET42a992465f];INIT_LIST_HEAD(&DATARMNET7bea4a06a6->
DATARMNETab5c1e9ad5);DATARMNET7bea4a06a6->DATARMNET42a992465f=
DATARMNET42a992465f;}}void DATARMNET4063c95208(void){int cpu;DATARMNETaf95716235
++;if(DATARMNETf141197982&&DATARMNET9dc7755be5)cancel_delayed_work_sync(&
DATARMNET9dc7755be5->DATARMNET1150269da2);for(cpu=(0xd2d+202-0xdf7);cpu<
DATARMNETc6782fed88;cpu++)DATARMNET2d482e7d9f[cpu]=(0xd2d+202-0xdf7);
rcu_read_lock();if((DATARMNETecc0627c70.DATARMNET7d667e828e!=DATARMNETf81c265415
)&&(((0xd26+209-0xdf6)<<DATARMNETf81c265415)&~DATARMNETf55430ea0a)){
DATARMNETecc0627c70.DATARMNET5c24e1df05=DATARMNETf81c265415;DATARMNET8f9da46b14(
);DATARMNETb7ddf3c5dd[DATARMNET6ea8a58f4e]++;}rcu_read_unlock();}void
DATARMNET7b6c061b06(void){DATARMNET7e039054c6++;if(DATARMNETf141197982&&
DATARMNET9dc7755be5)queue_delayed_work(DATARMNETf141197982,&DATARMNET9dc7755be5
->DATARMNET1150269da2,(0xd2d+202-0xdf7));}void DATARMNET3ae0d614d6(void){if(
DATARMNETf141197982)return;DATARMNETf5f83b943f();trace_rmnet_shs_wq_high(
DATARMNET9104d544fa,DATARMNET1b421b0381,(0x16e8+787-0xc0c),(0x16e8+787-0xc0c),
(0x16e8+787-0xc0c),(0x16e8+787-0xc0c),NULL,NULL);DATARMNETf141197982=
alloc_workqueue("\x72\x6d\x6e\x65\x74\x5f\x73\x68\x73\x5f\x77\x71",WQ_UNBOUND,
(0xd26+209-0xdf6));if(!DATARMNETf141197982){DATARMNET68d84e7b98[
DATARMNETecdf13872c]++;return;}DATARMNET9dc7755be5=kmalloc(sizeof(struct
DATARMNETa144e2bd00),GFP_ATOMIC);if(!DATARMNET9dc7755be5){DATARMNET68d84e7b98[
DATARMNETd1687e0776]++;DATARMNETf7dcab9a9e();return;}DATARMNETf5b8fce55d(
DATARMNET138a989ecb,(0xd26+209-0xdf6));DATARMNETd3d1d13f44();INIT_DELAYED_WORK(&
DATARMNET9dc7755be5->DATARMNET1150269da2,DATARMNETb4b5fc9686);
trace_rmnet_shs_wq_high(DATARMNET9104d544fa,DATARMNETb196f64ee0,
(0x16e8+787-0xc0c),(0x16e8+787-0xc0c),(0x16e8+787-0xc0c),(0x16e8+787-0xc0c),NULL
,NULL);}void DATARMNETd3d1d13f44(void){u8 DATARMNET42a992465f;struct
DATARMNET228056d4b7*DATARMNET7bea4a06a6;for(DATARMNET42a992465f=
(0xd2d+202-0xdf7);DATARMNET42a992465f<DATARMNETc6782fed88;DATARMNET42a992465f++)
{trace_rmnet_shs_wq_high(DATARMNET92b282b12c,DATARMNET57cad43bb7,
DATARMNET42a992465f,(0x16e8+787-0xc0c),(0x16e8+787-0xc0c),(0x16e8+787-0xc0c),
NULL,NULL);DATARMNET7bea4a06a6=&DATARMNET6cdd58e74c.DATARMNET73464778dc[
DATARMNET42a992465f];INIT_LIST_HEAD(&DATARMNET7bea4a06a6->DATARMNETab5c1e9ad5);
DATARMNET7bea4a06a6->DATARMNET42a992465f=DATARMNET42a992465f;}}void
DATARMNET4063c95208(void){int cpu;struct DATARMNET177911299b msg;
DATARMNETaf95716235++;if(DATARMNETf141197982&&DATARMNET9dc7755be5)
cancel_delayed_work_sync(&DATARMNET9dc7755be5->DATARMNET1150269da2);for(cpu=
(0xd2d+202-0xdf7);cpu<DATARMNETc6782fed88;cpu++)DATARMNET2d482e7d9f[cpu]=
(0xd2d+202-0xdf7);rcu_read_lock();if((DATARMNETecc0627c70.DATARMNET7d667e828e!=
DATARMNETf81c265415)&&(((0xd26+209-0xdf6)<<DATARMNETf81c265415)&~
DATARMNETf55430ea0a)){DATARMNETecc0627c70.DATARMNET5c24e1df05=
DATARMNETf81c265415;DATARMNET8f9da46b14();DATARMNETb7ddf3c5dd[
DATARMNET6ea8a58f4e]++;}rcu_read_unlock();DATARMNET88ef60041c((0xd2d+202-0xdf7),
&msg);DATARMNETb5d58adbe7(&msg);}void DATARMNET7b6c061b06(void){
DATARMNET7e039054c6++;if(DATARMNETf141197982&&DATARMNET9dc7755be5)
queue_delayed_work(DATARMNETf141197982,&DATARMNET9dc7755be5->DATARMNET1150269da2
,(0xd2d+202-0xdf7));}void DATARMNET3ae0d614d6(void){if(DATARMNETf141197982)
return;DATARMNETf5f83b943f();trace_rmnet_shs_wq_high(DATARMNET9104d544fa,
DATARMNET1b421b0381,(0x16e8+787-0xc0c),(0x16e8+787-0xc0c),(0x16e8+787-0xc0c),
(0x16e8+787-0xc0c),NULL,NULL);DATARMNETf141197982=alloc_workqueue(
"\x72\x6d\x6e\x65\x74\x5f\x73\x68\x73\x5f\x77\x71",WQ_UNBOUND,(0xd26+209-0xdf6))
;if(!DATARMNETf141197982){DATARMNET68d84e7b98[DATARMNETecdf13872c]++;return;}
DATARMNET9dc7755be5=kmalloc(sizeof(struct DATARMNETa144e2bd00),GFP_ATOMIC);if(!
DATARMNET9dc7755be5){DATARMNET68d84e7b98[DATARMNETd1687e0776]++;
DATARMNETf7dcab9a9e();return;}DATARMNETf5b8fce55d(DATARMNET138a989ecb,
(0xd26+209-0xdf6));DATARMNETd3d1d13f44();INIT_DELAYED_WORK(&DATARMNET9dc7755be5
->DATARMNET1150269da2,DATARMNETb4b5fc9686);trace_rmnet_shs_wq_high(
DATARMNET9104d544fa,DATARMNETb196f64ee0,(0x16e8+787-0xc0c),(0x16e8+787-0xc0c),
(0x16e8+787-0xc0c),(0x16e8+787-0xc0c),NULL,NULL);}int DATARMNETb709a938b5(u16
cpu){int flows=-(0xd26+209-0xdf6);if(cpu>=DATARMNETc6782fed88){
DATARMNET68d84e7b98[DATARMNET709b59b0e6]++;return flows;}flows=
DATARMNET6cdd58e74c.DATARMNET73464778dc[cpu].flows;trace_rmnet_shs_wq_low(
,NULL);}int DATARMNETb709a938b5(u16 cpu){int flows=-(0xd26+209-0xdf6);if(cpu>=
DATARMNETc6782fed88){DATARMNET68d84e7b98[DATARMNET709b59b0e6]++;return flows;}
flows=DATARMNET6cdd58e74c.DATARMNET73464778dc[cpu].flows;trace_rmnet_shs_wq_low(
DATARMNET39a68a0eba,DATARMNET0e287157de,cpu,flows,(0x16e8+787-0xc0c),
(0x16e8+787-0xc0c),NULL,NULL);return flows;}int DATARMNET392890a12b(void){u16
cpu;int DATARMNET59bd820724=-(0xd26+209-0xdf6);int DATARMNET847bd62811;for(cpu=

View File

@@ -323,16 +323,6 @@ DATARMNETaf3d356342=DATARMNETaf3d356342;DATARMNET7baa284dc5.DATARMNET43a8300dfd=
DATARMNETdf2dbc641f),&DATARMNET7baa284dc5,sizeof(DATARMNET7baa284dc5));
DATARMNET60b6e12cfd->list[(0xd2d+202-0xdf7)].msg_type=DATARMNETfce267cbe9;
DATARMNET60b6e12cfd->valid=(0xd26+209-0xdf6);DATARMNET60b6e12cfd->list_len=
(0xd26+209-0xdf6);}void DATARMNET88ef60041c(uint8_t seq,struct
DATARMNET177911299b*DATARMNET60b6e12cfd){struct DATARMNET1564093dbc
DATARMNET28ce320ddf;if(DATARMNET60b6e12cfd==NULL){rm_err("\x25\x73",
"\x53\x48\x53\x5f\x4d\x53\x47\x5f\x47\x4e\x4c\x20\x2d\x20\x69\x6e\x76\x61\x6c\x69\x64\x20\x69\x6e\x70\x75\x74"
);return;}memset(DATARMNET60b6e12cfd,(0xd2d+202-0xdf7),sizeof(struct
DATARMNET177911299b));memset(&DATARMNET28ce320ddf,(0xd2d+202-0xdf7),sizeof(
DATARMNET28ce320ddf));memcpy(&(DATARMNET60b6e12cfd->list[(0xd2d+202-0xdf7)].
DATARMNETdf2dbc641f),&DATARMNET28ce320ddf,sizeof(DATARMNET28ce320ddf));
DATARMNET60b6e12cfd->list[(0xd2d+202-0xdf7)].msg_type=DATARMNET890e50739c;
DATARMNET60b6e12cfd->valid=(0xd26+209-0xdf6);DATARMNET60b6e12cfd->list_len=
(0xd26+209-0xdf6);}void DATARMNET1d4b1eff85(struct DATARMNET177911299b*
DATARMNET60b6e12cfd,uint8_t DATARMNET907a90c6af,uint8_t DATARMNET9a4544e068){
struct DATARMNET80e227e008 DATARMNETc909849dcb;struct timespec64 time;if(

View File

@@ -56,9 +56,8 @@ DATARMNET5f0371060e,DATARMNETc08daf87d4,DATARMNET8070cc0bdc,DATARMNETc2be398ed4,
#define DATARMNET3b631aeccb ((0xeb7+712-0x111d))
#define DATARMNET8a917ef593 ((0xd26+209-0xdf6))
struct DATARMNETe5f1cf1a69{uint32_t DATARMNETaf3d356342;uint8_t
DATARMNET43a8300dfd;};struct DATARMNET1564093dbc{uint8_t seq;};enum{
DATARMNET68b3f1699c=(0xd2d+202-0xdf7),DATARMNETfce267cbe9=(0xd26+209-0xdf6),
DATARMNETf41c724abf=(0xd1f+216-0xdf5),DATARMNET890e50739c=(0xd18+223-0xdf4),};
DATARMNET43a8300dfd;};enum{DATARMNET68b3f1699c=(0xd2d+202-0xdf7),
DATARMNETfce267cbe9=(0xd26+209-0xdf6),DATARMNETf41c724abf=(0xd1f+216-0xdf5),};
struct DATARMNET4a3b3209dd{char DATARMNETdf2dbc641f[DATARMNET3b631aeccb];
uint16_t msg_type;};struct DATARMNET25187800fe{int valid;};struct
DATARMNET177911299b{struct DATARMNET4a3b3209dd list[DATARMNET8a917ef593];
@@ -83,9 +82,8 @@ DATARMNET2b7c02fa2c(struct sk_buff*DATARMNETaafc1d9519,struct genl_info*
DATARMNET54338da2ff);int DATARMNET5d4ca1da1c(struct genl_info*
DATARMNET54338da2ff,int val);int DATARMNET5945236cd3(int val);int
DATARMNETa9a7fa898c(void);void DATARMNET8d0d510d45(uint32_t DATARMNETaf3d356342,
struct DATARMNET177911299b*DATARMNET60b6e12cfd);void DATARMNET88ef60041c(uint8_t
seq,struct DATARMNET177911299b*DATARMNET60b6e12cfd);int DATARMNETb5d58adbe7(
struct DATARMNET177911299b*msg_ptr);int DATARMNETd65d1351b9(struct sk_buff*
struct DATARMNET177911299b*DATARMNET60b6e12cfd);int DATARMNETb5d58adbe7(struct
DATARMNET177911299b*msg_ptr);int DATARMNETd65d1351b9(struct sk_buff*
DATARMNETaafc1d9519,struct genl_info*DATARMNET54338da2ff);void
DATARMNET1d4b1eff85(struct DATARMNET177911299b*DATARMNET60b6e12cfd,uint8_t
DATARMNET907a90c6af,uint8_t DATARMNET9a4544e068);int DATARMNET0dbc627e8f(void);

View File

@@ -1,87 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _RMNET_APS_H_
#define _RMNET_APS_H_
#include <linux/skbuff.h>
#include <net/genetlink.h>
#ifdef RMNET_APS_DEBUG
#define aps_log(...) pr_err(__VA_ARGS__)
#else
#define aps_log(...)
#endif
struct rmnet_aps_flow_req {
u32 cmd;
u32 label;
u32 duration;
u32 ifindex;
u8 aps_prio;
u8 use_llc;
u8 use_llb;
u8 reserved;
};
struct rmnet_aps_flow_resp {
u32 cmd;
u32 cmd_data;
u32 label;
};
#define FILTER_MASK_SADDR 1
#define FILTER_MASK_DADDR 2
struct rmnet_aps_filter_req {
u32 cmd;
u32 label;
u32 ifindex;
s32 ip_type;
__be32 saddr[4];
__be32 daddr[4];
u16 sport;
u16 dport;
u32 flow_label;
u8 tos;
u8 tos_mask;
u8 l4_proto;
u8 filter_masks;
u8 reserved[68];
};
struct rmnet_aps_filter_resp {
u32 cmd;
u32 cmd_data;
u32 label;
};
struct rmnet_aps_pdn_config_req {
u32 ifindex;
u64 apn_mask;
u32 expire_ms;
u32 reserved[8];
};
struct rmnet_aps_pdn_config_resp {
u32 ifindex;
u32 reserved[7];
};
struct rmnet_aps_data_report {
u8 mux_id;
u8 type;
u8 sum_all_bearers;
u8 len;
u32 value[8];
};
int rmnet_aps_genl_flow_hdlr(struct sk_buff *skb_2, struct genl_info *info);
int rmnet_aps_genl_pdn_config_hdlr(struct sk_buff *skb_2,
struct genl_info *info);
int rmnet_aps_genl_filter_hdlr(struct sk_buff *skb_2, struct genl_info *info);
int rmnet_aps_genl_data_report_hdlr(struct sk_buff *skb_2,
struct genl_info *info);
#endif /* _RMNET_APS_H_ */

View File

@@ -1,63 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "rmnet_aps_genl.h"
#include "rmnet_aps.h"
/* Static Functions and Definitions */
static struct nla_policy rmnet_aps_genl_attr_policy[RMNET_APS_GENL_ATTR_MAX +
1] = {
[RMNET_APS_GENL_ATTR_FLOW_REQ] =
NLA_POLICY_EXACT_LEN(sizeof(struct rmnet_aps_flow_req)),
[RMNET_APS_GENL_ATTR_FLOW_RESP] = NLA_POLICY_EXACT_LEN(
sizeof(struct rmnet_aps_flow_resp)),
[RMNET_APS_GENL_ATTR_PDN_CONFIG_REQ] = NLA_POLICY_EXACT_LEN(
sizeof(struct rmnet_aps_pdn_config_req)),
[RMNET_APS_GENL_ATTR_PDN_CONFIG_RESP] = NLA_POLICY_EXACT_LEN(
sizeof(struct rmnet_aps_pdn_config_resp)),
[RMNET_APS_GENL_ATTR_FILTER_REQ] = NLA_POLICY_EXACT_LEN(
sizeof(struct rmnet_aps_filter_req)),
[RMNET_APS_GENL_ATTR_FILTER_RESP] = NLA_POLICY_EXACT_LEN(
sizeof(struct rmnet_aps_filter_resp)),
[RMNET_APS_GENL_ATTR_DATA_REPORT] = NLA_POLICY_EXACT_LEN(
sizeof(struct rmnet_aps_data_report)),
};
#define RMNET_APS_GENL_OP(_cmd, _func) \
{ \
.cmd = _cmd, .doit = _func, .dumpit = NULL, .flags = 0, \
}
static const struct genl_ops rmnet_aps_genl_ops[] = {
RMNET_APS_GENL_OP(RMNET_APS_GENL_CMD_FLOW, rmnet_aps_genl_flow_hdlr),
RMNET_APS_GENL_OP(RMNET_APS_GENL_CMD_PDN_CONFIG,
rmnet_aps_genl_pdn_config_hdlr),
RMNET_APS_GENL_OP(RMNET_APS_GENL_CMD_FILTER,
rmnet_aps_genl_filter_hdlr),
RMNET_APS_GENL_OP(RMNET_APS_GENL_CMD_DATA_REPORT,
rmnet_aps_genl_data_report_hdlr),
};
struct genl_family rmnet_aps_genl_family = {
.hdrsize = 0,
.name = RMNET_APS_GENL_FAMILY_NAME,
.version = RMNET_APS_GENL_VERSION,
.maxattr = RMNET_APS_GENL_ATTR_MAX,
.policy = rmnet_aps_genl_attr_policy,
.ops = rmnet_aps_genl_ops,
.n_ops = ARRAY_SIZE(rmnet_aps_genl_ops),
};
/* register new generic netlink family */
int rmnet_aps_genl_init(void)
{
return genl_register_family(&rmnet_aps_genl_family);
}
/* Unregister the generic netlink family */
void rmnet_aps_genl_deinit(void)
{
genl_unregister_family(&rmnet_aps_genl_family);
}

View File

@@ -1,39 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _RMNET_APS_GENL_H_
#define _RMNET_APS_GENL_H_
#include <net/genetlink.h>
/* Generic Netlink Definitions */
#define RMNET_APS_GENL_VERSION 1
#define RMNET_APS_GENL_FAMILY_NAME "RMNET_APS"
enum { RMNET_APS_GENL_CMD_UNSPEC,
RMNET_APS_GENL_CMD_FLOW,
RMNET_APS_GENL_CMD_PDN_CONFIG,
RMNET_APS_GENL_CMD_FILTER,
RMNET_APS_GENL_CMD_DATA_REPORT,
__RMNET_APS_GENL_CMD_MAX,
};
enum { RMNET_APS_GENL_ATTR_UNSPEC,
RMNET_APS_GENL_ATTR_FLOW_REQ,
RMNET_APS_GENL_ATTR_FLOW_RESP,
RMNET_APS_GENL_ATTR_PDN_CONFIG_REQ,
RMNET_APS_GENL_ATTR_PDN_CONFIG_RESP,
RMNET_APS_GENL_ATTR_FILTER_REQ,
RMNET_APS_GENL_ATTR_FILTER_RESP,
RMNET_APS_GENL_ATTR_DATA_REPORT,
__RMNET_APS_GENL_ATTR_MAX,
};
#define RMNET_APS_GENL_ATTR_MAX (__RMNET_APS_GENL_ATTR_MAX - 1)
int rmnet_aps_genl_init(void);
void rmnet_aps_genl_deinit(void);
#endif /*_RMNET_APS_GENL_H_*/

File diff suppressed because it is too large Load Diff

View File

@@ -1,36 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _RMNET_MEM_H_
#define _RMNET_MEM_H_
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/module.h>
#include <linux/mm.h>
#define IPA_ID 1
#define RMNET_CORE_ID 2
#define POOL_NOTIF 3
#define RMNET_MEM_SUCCESS 0
#define RMNET_MEM_FAIL -1
#define RMNET_MEM_DOWNGRADE -2
#define RMNET_MEM_UPGRADE -3
#define NS_IN_MS 1000000
int rmnet_mem_unregister_notifier(struct notifier_block *nb);
int rmnet_mem_register_notifier(struct notifier_block *nb);
void rmnet_mem_pb_ind(void);
int rmnet_mem_get_pool_size(unsigned order);
extern struct rmnet_mem_notif_s rmnet_mem_notifier;
void rmnet_mem_put_page_entry(struct page *page);
void rmnet_mem_page_ref_inc_entry(struct page *page, unsigned id);
struct page* rmnet_mem_get_pages_entry(gfp_t gfp_mask, unsigned int order, int *code, int *pageorder, unsigned id);
#endif

View File

@@ -1,444 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/module.h>
#include <linux/mm.h>
#include "rmnet_mem_nl.h"
#include "rmnet_mem.h"
#include "rmnet_mem_priv.h"
MODULE_LICENSE("GPL v2");
DEFINE_SPINLOCK(rmnet_mem_lock);
int rmnet_mem_id_gaveup[POOL_LEN];
module_param_array(rmnet_mem_id_gaveup, int, NULL, 0444);
MODULE_PARM_DESC(rmnet_mem_id_gaveup, "gaveup per id");
int max_pool_size[POOL_LEN] = { 0, 0, MAX_POOL_O2, MAX_POOL_O3};
module_param_array(max_pool_size, int, NULL, 0644);
MODULE_PARM_DESC(max_pool_size, "Max Pool size per order");
int static_pool_size[POOL_LEN];
module_param_array(static_pool_size, int, NULL, 0444);
MODULE_PARM_DESC(static_pool_size, "Pool size per order");
int pool_unbound_feature[POOL_LEN] = { 0, 0, 1, 1};
module_param_array(pool_unbound_feature, int, NULL, 0644);
MODULE_PARM_DESC(pool_unbound_featue, "Pool bound gate");
int rmnet_mem_order_requests[POOL_LEN];
module_param_array(rmnet_mem_order_requests, int, NULL, 0444);
MODULE_PARM_DESC(rmnet_mem_order_requests, "Request per order");
int rmnet_mem_id_req[POOL_LEN];
module_param_array(rmnet_mem_id_req, int, NULL, 0444);
MODULE_PARM_DESC(rmnet_mem_id_req, "Request per id");
int rmnet_mem_id_recycled[POOL_LEN];
module_param_array(rmnet_mem_id_recycled, int, NULL, 0444);
MODULE_PARM_DESC(rmnet_mem_id_recycled, "Recycled per id");
int rmnet_mem_stats[RMNET_MEM_STAT_MAX];
module_param_array(rmnet_mem_stats, int, NULL, 0444);
MODULE_PARM_DESC(rmnet_mem_stats, "Rmnet mem stats for modules");
int rmnet_mem_err[ERR_MAX];
module_param_array(rmnet_mem_err, int, NULL, 0444);
MODULE_PARM_DESC(rmnet_mem_err, "Error counting");
unsigned int rmnet_mem_pb_ind_max[POOL_LEN];
module_param_array(rmnet_mem_pb_ind_max, uint, NULL, 0644);
MODULE_PARM_DESC(rmnet_mem_pb_ind_max, "Pool size vote that is active on PB ind");
unsigned target_pool_size[POOL_LEN] = { 0, 0, MID_POOL_O2, MID_POOL_O3};
module_param_array(target_pool_size, uint, NULL, 0444);
MODULE_PARM_DESC(target_pool_size, "Pool size wq will adjust to on run");
static char *verinfo[] = {"2003bae3"};
module_param_array(verinfo, charp, NULL, 0444);
MODULE_PARM_DESC(verinfo, "Version of the driver");
struct workqueue_struct *mem_wq;
struct delayed_work pool_adjust_work;
int pb_ind_pending;
struct hrtimer pb_timer;
struct list_head rmnet_mem_pool[POOL_LEN];
struct mem_info {
struct page *addr;
struct list_head mem_head;
u8 order;
};
void rmnet_mem_page_ref_inc_entry(struct page *page, unsigned id)
{
page_ref_inc(page);
}
EXPORT_SYMBOL_GPL(rmnet_mem_page_ref_inc_entry);
struct rmnet_mem_notif_s {
struct raw_notifier_head chain;
spinlock_t lock;
};
struct rmnet_mem_notif_s rmnet_mem_notifier = {
.chain = RAW_NOTIFIER_INIT(rmnet_mem_notifier.chain),
.lock = __SPIN_LOCK_UNLOCKED(rmnet_mem_notifier.lock),
};
EXPORT_SYMBOL_GPL(rmnet_mem_notifier);
int rmnet_mem_get_pool_size(unsigned order)
{
if (order >= POOL_LEN) {
rmnet_mem_err[ERR_GET_ORDER_ERR]++;
return 0;
}
/* Return actual size or configured amount if not grown yet.*/
return (static_pool_size[order]) ? static_pool_size[order]: target_pool_size[order];
}
EXPORT_SYMBOL_GPL(rmnet_mem_get_pool_size);
int rmnet_mem_mode_notify(unsigned pool_size)
{
unsigned long flags;
spin_lock_irqsave(&rmnet_mem_notifier.lock, flags);
raw_notifier_call_chain(&rmnet_mem_notifier.chain, pool_size, NULL);
spin_unlock_irqrestore(&rmnet_mem_notifier.lock, flags);
return NOTIFY_OK;
}
int rmnet_mem_register_notifier(struct notifier_block *nb)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&rmnet_mem_notifier.lock, flags);
ret = raw_notifier_chain_register(&rmnet_mem_notifier.chain, nb);
spin_unlock_irqrestore(&rmnet_mem_notifier.lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(rmnet_mem_register_notifier);
int rmnet_mem_unregister_notifier(struct notifier_block *nb)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&rmnet_mem_notifier.lock, flags);
ret = raw_notifier_chain_unregister(&rmnet_mem_notifier.chain, nb);
spin_unlock_irqrestore(&rmnet_mem_notifier.lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(rmnet_mem_unregister_notifier);
/* Malloc by client so rem from to pool */
struct mem_info* rmnet_mem_add_page(struct page *page, u8 pageorder)
{
struct mem_info *mem_slot;
mem_slot = kzalloc(sizeof(*mem_slot), GFP_ATOMIC);
if (!mem_slot) {
rmnet_mem_err[ERR_MALLOC_FAIL1]++;
return NULL;
}
static_pool_size[pageorder]++;
mem_slot->order = pageorder;
mem_slot->addr = (void*)page;
INIT_LIST_HEAD(&mem_slot->mem_head);
if (pageorder < POOL_LEN) {
list_add_rcu(&mem_slot->mem_head, &(rmnet_mem_pool[pageorder]));
}
return mem_slot;
}
/* Freed by client so added back to pool */
void rmnet_mem_free_all(void)
{
unsigned long flags;
struct mem_info *mem_slot;
struct list_head *ptr = NULL, *next = NULL;
int i;
spin_lock_irqsave(&rmnet_mem_lock, flags);
for (i = 0; i < POOL_LEN; i++) {
list_for_each_safe(ptr, next, &rmnet_mem_pool[i]) {
mem_slot = list_entry(ptr, struct mem_info, mem_head);
list_del(&mem_slot->mem_head);
put_page(mem_slot->addr);
static_pool_size[mem_slot->order]--;
kfree(mem_slot);
}
}
spin_unlock_irqrestore(&rmnet_mem_lock, flags);
}
/* Freed by client so added back to pool */
struct page* rmnet_mem_get_pages_entry(gfp_t gfp_mask, unsigned int order, int *code, int *pageorder, unsigned id)
{
unsigned long flags;
struct mem_info *mem_page;
struct page *page = NULL;
int i = 0;
int j = 0;
int adding = 0;
spin_lock_irqsave(&rmnet_mem_lock, flags);
if (order < POOL_LEN) {
rmnet_mem_id_req[id]++;
rmnet_mem_order_requests[order]++;
/* Check high order for rmnet and lower order for IPA if matching order fails */
for (j = order; j > 0 && j < POOL_LEN; j++) {
do {
mem_page = list_first_entry_or_null(&rmnet_mem_pool[j], struct mem_info, mem_head);
if (!mem_page) {
break;
}
if (page_ref_count(mem_page->addr) == 1) {
rmnet_mem_id_recycled[j]++;
page = mem_page->addr;
page_ref_inc(mem_page->addr);
list_rotate_left(&rmnet_mem_pool[j]);
break;
}
list_rotate_left(&rmnet_mem_pool[j]);
i++;
} while (i <= 5);
if (page && pageorder) {
*pageorder = j;
break;
}
i = 0;
}
}
if (static_pool_size[order] < max_pool_size[order] &&
pool_unbound_feature[order]) {
adding = 1;
} else
spin_unlock_irqrestore(&rmnet_mem_lock, flags);
if (!page) {
rmnet_mem_id_gaveup[id]++;
/* IPA doesn't want retry logic but pool will be empty for lower orders and those
* will fail too so that is akin to retry. So just hardcode to not retry for o3 page req
*/
if (order < 3) {
page = __dev_alloc_pages((adding)? GFP_ATOMIC : gfp_mask, order);
if (page) {
/* If below unbound limit then add page to static pool*/
if (adding) {
rmnet_mem_add_page(page, order);
page_ref_inc(page);
}
if (pageorder) {
*pageorder = order;
}
}
} else {
/* Only call get page if we will add page to static pool*/
if (adding) {
page = __dev_alloc_pages((adding)? GFP_ATOMIC : gfp_mask, order);
if (page) {
rmnet_mem_add_page(page, order);
page_ref_inc(page);
}
if (pageorder) {
*pageorder = order;
}
}
}
}
/* If we had potential to add, this won't occur after we fill up to limit */
if (adding)
spin_unlock_irqrestore(&rmnet_mem_lock, flags);
if (pageorder && code && page) {
if (*pageorder == order)
*code = RMNET_MEM_SUCCESS;
else if (*pageorder > order)
*code = RMNET_MEM_UPGRADE;
else if (*pageorder < order)
*code = RMNET_MEM_DOWNGRADE;
} else if (pageorder && code) {
*code = RMNET_MEM_FAIL;
*pageorder = 0;
}
return page;
}
EXPORT_SYMBOL_GPL(rmnet_mem_get_pages_entry);
/* Freed by client so added back to pool */
void rmnet_mem_put_page_entry(struct page *page)
{
put_page(page);
}
EXPORT_SYMBOL_GPL(rmnet_mem_put_page_entry);
static void mem_update_pool_work(struct work_struct *work)
{
int i;
int new_size;
local_bh_disable();
for (i = 0; i < POOL_LEN; i++) {
/* If PB ind is active and max pool has been configured
* new pool size is max of the two.
*/
new_size = (pb_ind_pending && rmnet_mem_pb_ind_max[i]) ?
MAX_VOTE(rmnet_mem_pb_ind_max[i],target_pool_size[i]):
target_pool_size[i];
rmnet_mem_adjust(new_size, i);
}
local_bh_enable();
}
/* Freed by client so added back to pool */
void rmnet_mem_adjust(unsigned perm_size, u8 pageorder)
{
struct list_head *entry, *next;
struct mem_info *mem_slot;
int i;
struct page *newpage = NULL;
int adjustment;
unsigned long flags;
if (pageorder >= POOL_LEN || perm_size > MAX_STATIC_POOL) {
rmnet_mem_err[ERR_INV_ARGS]++;
return;
}
adjustment = perm_size - static_pool_size[pageorder];
if (perm_size == static_pool_size[pageorder])
return;
spin_lock_irqsave(&rmnet_mem_lock, flags);
if (perm_size > static_pool_size[pageorder]) {
for (i = 0; i < (adjustment); i++) {
newpage = __dev_alloc_pages(GFP_ATOMIC, pageorder);
if (!newpage) {
continue;
}
rmnet_mem_add_page(newpage, pageorder);
}
} else {
/*TODO what if shrink comes in when we have allocated all pages, can't shrink currently */
/* Shrink static pool */
list_for_each_safe(entry, next, &(rmnet_mem_pool[pageorder])) {
mem_slot = list_entry(entry, struct mem_info, mem_head);
/* Freeing temp pool memory Remove from ht and kfree*/
list_del(&mem_slot->mem_head);
put_page(mem_slot->addr);
kfree(mem_slot);
static_pool_size[pageorder]--;
if (static_pool_size[pageorder] == perm_size)
break;
}
}
spin_unlock_irqrestore(&rmnet_mem_lock, flags);
if (pageorder == POOL_NOTIF) {
rmnet_mem_mode_notify(perm_size);
}
}
enum hrtimer_restart rmnet_mem_pb_timer_cb(struct hrtimer *t)
{
unsigned jiffies;
pb_ind_pending = 0;
rmnet_mem_stats[RMNET_MEM_PB_TIMEOUT]++;
jiffies = msecs_to_jiffies(RAMP_DOWN_DELAY);
/* Ramping down can be done with a delay. Less urgent.*/
queue_delayed_work(mem_wq, &pool_adjust_work, jiffies);
return HRTIMER_NORESTART;
}
void rmnet_mem_pb_ind(void)
{
/* Only listen to pb idn vote if configured*/
if (!rmnet_mem_pb_ind_max[POOL_NOTIF]) {
rmnet_mem_stats[RMNET_MEM_PB_IND_CONFIG_FAIL]++;
return;
}
pb_ind_pending = 1;
/* Trigger update to change pool size */
if (hrtimer_active(&pb_timer)) {
hrtimer_cancel(&pb_timer);
} else {
cancel_delayed_work(&pool_adjust_work);
queue_delayed_work(mem_wq, &pool_adjust_work, 0);
}
rmnet_mem_stats[RMNET_MEM_PB_IND]++;
hrtimer_start(&pb_timer, ns_to_ktime(PB_IND_DUR* NS_IN_MS),
HRTIMER_MODE_REL| HRTIMER_MODE_PINNED);
}
EXPORT_SYMBOL_GPL(rmnet_mem_pb_ind);
int __init rmnet_mem_module_init(void)
{
int rc, i = 0;
pr_info("%s(): Starting rmnet mem module\n", __func__);
for (i = 0; i < POOL_LEN; i++) {
INIT_LIST_HEAD(&(rmnet_mem_pool[i]));
}
mem_wq = alloc_workqueue("mem_wq", WQ_HIGHPRI, 0);
if (!mem_wq) {
pr_err("%s(): Failed to alloc workqueue \n", __func__);
return -ENOMEM;
}
INIT_DELAYED_WORK(&pool_adjust_work, mem_update_pool_work);
rc = rmnet_mem_nl_register();
if (rc) {
pr_err("%s(): Failed to register generic netlink family\n", __func__);
destroy_workqueue(mem_wq);
mem_wq = NULL;
return -ENOMEM;
}
hrtimer_init(&pb_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
pb_timer.function = rmnet_mem_pb_timer_cb;
return 0;
}
void __exit rmnet_mem_module_exit(void)
{
rmnet_mem_nl_unregister();
if (mem_wq) {
cancel_delayed_work_sync(&pool_adjust_work);
drain_workqueue(mem_wq);
destroy_workqueue(mem_wq);
mem_wq = NULL;
}
rmnet_mem_free_all();
}
module_init(rmnet_mem_module_init);
module_exit(rmnet_mem_module_exit);

View File

@@ -1,105 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "rmnet_mem_nl.h"
#include "rmnet_mem_priv.h"
#define RMNET_MEM_GENL_FAMILY_NAME "RMNET_MEM"
#define RMNET_MEM_GENL_VERSION 1
enum {
RMNET_MEM_CMD_UNSPEC,
RMNET_MEM_CMD_UPDATE_MODE,
RMNET_MEM_CMD_UPDATE_POOL_SIZE,
RMNET_MEM_CMD_UPDATE_PEAK_POOL_SIZE,
__RMNET_MEM_GENL_CMD_MAX,
};
#define RMNET_MEM_ATTR_MAX (__RMNET_MEM_ATTR_MAX - 1)
uint32_t rmnet_shs_genl_seqnum;
static struct nla_policy rmnet_mem_nl_policy[RMNET_MEM_ATTR_MAX + 1] = {
[RMNET_MEM_ATTR_MODE] = NLA_POLICY_EXACT_LEN(sizeof(struct rmnet_memzone_req)),
[RMNET_MEM_ATTR_POOL_SIZE] = NLA_POLICY_EXACT_LEN(sizeof(struct rmnet_pool_update_req)),
};
static const struct genl_ops rmnet_mem_nl_ops[] = {
{
/* Deprecated, not used*/
.cmd = RMNET_MEM_CMD_UPDATE_MODE,
.doit = rmnet_mem_nl_cmd_update_mode,
},
{
/* Adjust static pool size on the fly, set target_pool_size & start wq */
.cmd = RMNET_MEM_CMD_UPDATE_POOL_SIZE,
.doit = rmnet_mem_nl_cmd_update_pool_size,
},
{
/* Set PB ind vote for what pool size will be adjusted to
* during active PB IND. Max(target_pool_size, pb_ind_max)
*/
.cmd = RMNET_MEM_CMD_UPDATE_PEAK_POOL_SIZE,
.doit = rmnet_mem_nl_cmd_peak_pool_size,
},
};
struct genl_family rmnet_aps_nl_family __ro_after_init = {
.hdrsize = 0,
.name = RMNET_MEM_GENL_FAMILY_NAME,
.version = RMNET_MEM_GENL_VERSION,
.maxattr = RMNET_MEM_ATTR_MAX,
.policy = rmnet_mem_nl_policy,
.ops = rmnet_mem_nl_ops,
.n_ops = ARRAY_SIZE(rmnet_mem_nl_ops),
};
int rmnet_mem_genl_send_int_to_userspace_no_info(int val, struct genl_info *info)
{
struct sk_buff *skb;
void *msg_head;
int rc;
skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
if (skb == NULL)
goto out;
msg_head = genlmsg_put(skb, 0, 0, &rmnet_aps_nl_family,
0, RMNET_MEM_CMD_UPDATE_MODE);
if (msg_head == NULL) {
rc = -ENOMEM;
rm_err("MEM_GNL: FAILED to msg_head %d\n", rc);
kfree(skb);
goto out;
}
rc = nla_put_u32(skb, RMNET_MEM_ATTR_INT, val);
if (rc != 0) {
rm_err("MEM_GNL: FAILED nla_put %d\n", rc);
kfree(skb);
goto out;
}
genlmsg_end(skb, msg_head);
rc = genlmsg_reply(skb, info);
if (rc != 0)
goto out;
rm_err("MEM_GNL: Successfully sent int %d\n", val);
return 0;
out:
rm_err("MEM_GNL: FAILED to send int %d\n", val);
return -1;
}
int rmnet_mem_nl_register(void)
{
return genl_register_family(&rmnet_aps_nl_family);
}
void rmnet_mem_nl_unregister(void)
{
genl_unregister_family(&rmnet_aps_nl_family);
}

View File

@@ -1,35 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _RMNET_MEM_NL_H_
#define _RMNET_MEM_NL_H_
#include <net/genetlink.h>
enum {
RMNET_MEM_ATTR_UNSPEC,
RMNET_MEM_ATTR_MODE,
RMNET_MEM_ATTR_POOL_SIZE,
RMNET_MEM_ATTR_INT,
__RMNET_MEM_ATTR_MAX,
};
struct rmnet_memzone_req {
int zone;
int valid;
};
struct rmnet_pool_update_req {
unsigned poolsize[4];
unsigned valid_mask;
};
int rmnet_mem_nl_register(void);
void rmnet_mem_nl_unregister(void);
int rmnet_mem_nl_cmd_update_mode(struct sk_buff *skb, struct genl_info *info);
int rmnet_mem_nl_cmd_update_pool_size(struct sk_buff *skb, struct genl_info *info);
int rmnet_mem_nl_cmd_peak_pool_size(struct sk_buff *skb, struct genl_info *info);
int rmnet_mem_genl_send_int_to_userspace_no_info(int val, struct genl_info *info);
#endif /* _RMNET_MEM_GENL_H_ */

View File

@@ -1,118 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2023-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "rmnet_mem_nl.h"
#include "rmnet_mem_priv.h"
#define MAX_POOL 500
#define DEF_PAGEO 3
#define RMNET_MEM_NL_SUCCESS 400
#define RMNET_MEM_NL_FAIL 401
extern struct delayed_work pool_adjust_work;
extern struct workqueue_struct *mem_wq;
int rmnet_mem_nl_cmd_update_mode(struct sk_buff *skb, struct genl_info *info)
{
u8 mode = 0;
struct rmnet_memzone_req mem_info;
struct nlattr *na;
if (info->attrs[RMNET_MEM_ATTR_MODE]) {
na = info->attrs[RMNET_MEM_ATTR_MODE];
if (nla_memcpy(&mem_info, na, sizeof(mem_info)) > 0) {
rm_err("%s(): modeinfo %u\n", __func__, mem_info.zone);
}
rm_err("%s(): mode %u\n", __func__, mode);
rmnet_mem_genl_send_int_to_userspace_no_info(RMNET_MEM_NL_SUCCESS, info);
} else {
rmnet_mem_genl_send_int_to_userspace_no_info(RMNET_MEM_NL_FAIL, info);
}
return 0;
}
int rmnet_mem_nl_cmd_update_pool_size(struct sk_buff *skb, struct genl_info *info)
{
struct rmnet_pool_update_req mem_info;
struct nlattr *na;
int i;
unsigned long jiffies;
u8 update_flag = 0;
u8 increase = 0;
rmnet_mem_stats[RMNET_MEM_POOL_NL]++;
if (info->attrs[RMNET_MEM_ATTR_POOL_SIZE]) {
na = info->attrs[RMNET_MEM_ATTR_POOL_SIZE];
if (nla_memcpy(&mem_info, na, sizeof(mem_info)) > 0) {
rm_err("%s(): modeinfo %u\n", __func__, mem_info.valid_mask);
}
for (i = 0; i < POOL_LEN; i++) {
if (mem_info.valid_mask & 1 << i &&
mem_info.poolsize[i] > 0 &&
mem_info.poolsize[i] <= MAX_STATIC_POOL) {
/* Sets next adjust work trigger to alloc new target memory.
* Updates grow cap for new pages we alloc.
*/
target_pool_size[i] = mem_info.poolsize[i];
max_pool_size[i] = mem_info.poolsize[i];
update_flag = 1;
/* If greater mem demands grab mem immediately */
if (!increase && mem_info.poolsize[i] > static_pool_size[i]) {
increase = 1;
}
}
}
rm_err(" poolsize %d %d\n", mem_info.poolsize[2], mem_info.poolsize[3]);
if (update_flag && mem_wq) {
jiffies = msecs_to_jiffies(RAMP_DOWN_DELAY);
cancel_delayed_work_sync(&pool_adjust_work);
queue_delayed_work(mem_wq, &pool_adjust_work, (increase)? 0: jiffies);
}
rmnet_mem_genl_send_int_to_userspace_no_info(RMNET_MEM_NL_SUCCESS, info);
} else {
rmnet_mem_genl_send_int_to_userspace_no_info(RMNET_MEM_NL_FAIL, info);
}
return 0;
}
/* Update peak Mem pool size for Pb Ind usage */
int rmnet_mem_nl_cmd_peak_pool_size(struct sk_buff *skb, struct genl_info *info)
{
struct rmnet_pool_update_req mem_info;
struct nlattr *na;
int i;
rmnet_mem_stats[RMNET_MEM_PEAK_POOL_NL]++;
if (info->attrs[RMNET_MEM_ATTR_POOL_SIZE]) {
na = info->attrs[RMNET_MEM_ATTR_POOL_SIZE];
if (nla_memcpy(&mem_info, na, sizeof(mem_info)) > 0) {
rm_err("%s(): modeinfo %u\n", __func__, mem_info.valid_mask);
}
rm_err("%s(): pbind pool_size %u\n", __func__, mem_info.poolsize[3]);
for (i = 0; i < POOL_LEN; i++) {
if (mem_info.valid_mask & 1 << i) {
if (mem_info.poolsize[i] > 0 && mem_info.poolsize[i] <= MAX_STATIC_POOL)
rmnet_mem_pb_ind_max[i] = mem_info.poolsize[i];
}
}
rmnet_mem_genl_send_int_to_userspace_no_info(RMNET_MEM_NL_SUCCESS, info);
} else {
rmnet_mem_genl_send_int_to_userspace_no_info(RMNET_MEM_NL_FAIL, info);
}
return 0;
}

View File

@@ -1,62 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _RMNET_MEM_PRIV_H_
#define _RMNET_MEM_PRIV_H_
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/module.h>
#include <linux/mm.h>
#define IPA_ID 1
#define RMNET_CORE_ID 2
#define POOL_LEN 4
#define MAX_STATIC_POOL 700
#define MAX_POOL_O3 675
#define MAX_POOL_O2 224
#define MID_POOL_O3 600
#define MID_POOL_O2 190
#define RAMP_DOWN_DELAY 3000
#define PB_IND_DUR 105
#define MAX_VOTE(a,b) ((a) > (b) ? (a) : (b))
enum {
RMNET_MEM_PB_IND,
RMNET_MEM_PB_TIMEOUT,
RMNET_MEM_POOL_NL,
RMNET_MEM_PEAK_POOL_NL,
RMNET_MEM_PB_IND_CONFIG_FAIL,
RMNET_MEM_STAT_MAX,
};
enum {
ERR_MALLOC_FAIL1,
ERR_GET_ORDER_ERR,
ERR_INV_ARGS,
ERR_TIMEOUT,
ERR_MAX,
};
void rmnet_mem_adjust(unsigned perm_size, u8 order);
#define rm_err(fmt, ...) \
do { if (0) pr_err(fmt, __VA_ARGS__); } while (0)
extern int max_pool_size[POOL_LEN];
extern int static_pool_size[POOL_LEN];
extern int pool_unbound_feature[POOL_LEN];
extern int rmnet_mem_order_requests[POOL_LEN];
extern int rmnet_mem_id_req[POOL_LEN];
extern int rmnet_mem_id_recycled[POOL_LEN];
extern unsigned int target_pool_size[POOL_LEN];
extern unsigned int rmnet_mem_pb_ind_max[POOL_LEN];
extern int rmnet_mem_stats[RMNET_MEM_STAT_MAX];
#endif

View File

@@ -1,438 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
/* rmnet_offload core optimization engine */
#include <linux/log2.h>
#include <linux/list.h>
#include <linux/hashtable.h>
#include "rmnet_descriptor.h"
#include "rmnet_module.h"
#include "rmnet_offload_state.h"
#include "rmnet_offload_engine.h"
#include "rmnet_offload_main.h"
#include "rmnet_offload_tcp.h"
#include "rmnet_offload_udp.h"
#include "rmnet_offload_stats.h"
#include "rmnet_offload_knob.h"
#define RMNET_OFFLOAD_ENGINE_HASH_TABLE_BITS \
(const_ilog2(RMNET_OFFLOAD_ENGINE_NUM_FLOWS))
static DEFINE_HASHTABLE(rmnet_offload_flow_table,
RMNET_OFFLOAD_ENGINE_HASH_TABLE_BITS);
/* Flushes all active flows of a certain transport protocol */
static u32 rmnet_offload_engine_flush_by_protocol(u8 l4_proto,
struct list_head *flush_list)
{
struct rmnet_offload_flow *flow_cursor;
int bkt_cursor;
u32 flushed = 0;
hash_for_each(rmnet_offload_flow_table, bkt_cursor, flow_cursor,
rof_flow_list) {
if (flow_cursor->rof_pkts_held &&
flow_cursor->rof_hdrs.roh_trans_proto == l4_proto) {
flushed++;
rmnet_offload_engine_flush_flow(flow_cursor, flush_list);
}
}
return flushed;
}
/* Check if a specific protocol should be optimized */
static bool rmnet_offload_engine_optimize_protocol(u8 l4_proto)
{
u64 engine_mode;
engine_mode = rmnet_offload_knob_get(RMNET_OFFLOAD_KNOB_ENGINE_MODE);
if (engine_mode == RMNET_OFFLOAD_ENGINE_MODE_ALL)
return true;
if (engine_mode == RMNET_OFFLOAD_ENGINE_MODE_TCP &&
l4_proto == RMNET_OFFLOAD_PROTO_TCP)
return true;
if (engine_mode == RMNET_OFFLOAD_ENGINE_MODE_UDP &&
l4_proto == RMNET_OFFLOAD_PROTO_UDP)
return true;
return false;
}
/* Compare the ip flags of a flow and the incoming packet */
static bool rmnet_offload_engine_ip_mismatch(struct rmnet_offload_flow *flow,
struct rmnet_offload_info *pkt)
{
/* Can't mismatch if the flow is empty */
if (!flow->rof_pkts_held)
return false;
if (pkt->roi_hdrs.roh_ip_proto == 0x4) {
struct rmnet_offload_header_info *flow_hdr, *pkt_hdr;
flow_hdr = &flow->rof_hdrs;
pkt_hdr = &pkt->roi_hdrs;
if (flow_hdr->roh_ip_ttl ^ pkt_hdr->roh_ip_ttl ||
flow_hdr->roh_ip_tos ^ pkt_hdr->roh_ip_tos ||
flow_hdr->roh_ip_frag_off ^ pkt_hdr->roh_ip_frag_off ||
flow_hdr->roh_ip_len ^ pkt_hdr->roh_ip_len)
return true;
} else if (pkt->roi_hdrs.roh_ip_proto == 0x6) {
__be32 flow_word, pkt_word;
__be32 word_mismatch;
flow_word = flow->rof_hdrs.roh_flag_word;
pkt_word = pkt->roi_hdrs.roh_flag_word;
word_mismatch = flow_word ^ pkt_word;
if (word_mismatch & htonl(0x0FF00000))
return true;
}
return false;
}
/* Match an incoming packet against a flow in our table */
static bool rmnet_offload_engine_flow_match(struct rmnet_offload_flow *flow,
struct rmnet_offload_info *pkt)
{
struct rmnet_offload_header_info *flow_hdr, *pkt_hdr;
flow_hdr = &flow->rof_hdrs;
pkt_hdr = &pkt->roi_hdrs;
/* If the flow is empty, it has no header information. We rely on
* the hash key to tell us what was there.
*/
if (!flow->rof_pkts_held)
return flow->rof_hash_key == pkt->roi_hash_key;
/* Transport protocokk must match */
if (flow_hdr->roh_trans_proto != pkt_hdr->roh_trans_proto)
return false;
/* Grab the ports from the L4 header. Fortunately, both TCP and UDP
* these in the same location in the header.
*/
if (flow_hdr->roh_sport ^ pkt_hdr->roh_sport ||
flow_hdr->roh_dport ^ pkt_hdr->roh_dport)
return false;
/* Compare the addresses */
if (pkt_hdr->roh_ip_proto == 0x4) {
if (flow_hdr->roh_saddr4 ^ pkt_hdr->roh_saddr4 ||
flow_hdr->roh_daddr4 ^ pkt_hdr->roh_daddr4)
return false;
} else if (pkt_hdr->roh_ip_proto == 0x6) {
if (memcmp(flow_hdr->roh_saddr6, pkt_hdr->roh_saddr6,
sizeof(pkt_hdr->roh_saddr6)) ||
memcmp(flow_hdr->roh_daddr6, pkt_hdr->roh_daddr6,
sizeof(pkt_hdr->roh_daddr6)))
return false;
} else {
/* This shouldn't ever be hit. But returning false here beats
* returning true below and storing the packet somewhere.
*/
return false;
}
return true;
}
/* Select a flow node to use for a new flow we're going to store */
static struct rmnet_offload_flow *rmnet_offload_engine_recycle(void)
{
struct rmnet_offload_state *rmnet_offload = rmnet_offload_state_get();
struct rmnet_offload_engine_state *state;
struct rmnet_offload_flow *new_flow;
LIST_HEAD(flush_list);
state = &rmnet_offload->engine_state;
if (state->roe_nodes_used < RMNET_OFFLOAD_ENGINE_NUM_FLOWS) {
/* Still have a few that we've never used, so we can fast
* path this flow.
*/
new_flow = &state->roe_flow_pool[state->roe_nodes_used];
state->roe_nodes_used++;
return new_flow;
}
/* Recycle one of the already used flows.
* Traditionally, we've just used a modular counter here to
* choose which one we replace. Could potentially add a little
* more intelligence and check for empty nodes or do a LRU scheme
* if we wanted to avoid potentially prematurely flushing an
* active flow.
*/
new_flow = &state->roe_flow_pool[state->roe_recycle_idx];
state->roe_recycle_idx++;
state->roe_recycle_idx %= RMNET_OFFLOAD_ENGINE_NUM_FLOWS;
hash_del(&new_flow->rof_flow_list);
if (new_flow->rof_pkts_held) {
rmnet_offload_stats_update(RMNET_OFFLOAD_STAT_FLOW_EVICT);
rmnet_offload_engine_flush_flow(new_flow, &flush_list);
}
rmnet_offload_deliver_descs(&flush_list);
return new_flow;
}
/* Flush all flows at the end of the packet chain */
static void rmnet_offload_engine_chain_flush(void)
{
LIST_HEAD(flush_list);
rmnet_offload_lock();
if (rmnet_offload_engine_flush_all_flows(&flush_list))
rmnet_offload_stats_update(RMNET_OFFLOAD_STAT_CHAIN_FLUSH);
rmnet_offload_unlock();
rmnet_offload_deliver_descs(&flush_list);
}
static const struct rmnet_module_hook_register_info
rmnet_offload_engine_hook = {
.hooknum = RMNET_MODULE_HOOK_OFFLOAD_CHAIN_END,
.func = rmnet_offload_engine_chain_flush,
};
/* Set hook for flushing on end of chain notifications */
void rmnet_offload_engine_enable_chain_flush(void)
{
rmnet_module_hook_register(&rmnet_offload_engine_hook, 1);
}
/* Unset hook for flushing on end of chain notifications */
void rmnet_offload_engine_disable_chain_flush(void)
{
rmnet_module_hook_unregister_no_sync(&rmnet_offload_engine_hook, 1);
}
/* Handle engine mode change notifications */
int rmnet_offload_engine_mode_change(u64 old_mode, u64 new_mode)
{
LIST_HEAD(flush_list);
u32 flushed = 0;
/* If all we did was add a protocol or two to optimize, then nothing
* needs to be flushed. O frabjous day!
*/
if (old_mode == RMNET_OFFLOAD_ENGINE_MODE_NONE ||
new_mode == RMNET_OFFLOAD_ENGINE_MODE_ALL)
return 0;
/* Flush any flows belonging to the protocol(s) we're not optimizing */
switch (new_mode) {
case RMNET_OFFLOAD_ENGINE_MODE_TCP:
flushed =
rmnet_offload_engine_flush_by_protocol(RMNET_OFFLOAD_PROTO_UDP,
&flush_list);
break;
case RMNET_OFFLOAD_ENGINE_MODE_UDP:
flushed =
rmnet_offload_engine_flush_by_protocol(RMNET_OFFLOAD_PROTO_TCP,
&flush_list);
break;
case RMNET_OFFLOAD_ENGINE_MODE_NONE:
flushed = rmnet_offload_engine_flush_all_flows(&flush_list);
break;
}
__rmnet_offload_stats_update(RMNET_OFFLOAD_STAT_PROTO_FLUSH, flushed);
rmnet_offload_deliver_descs(&flush_list);
return 0;
}
/* Combines packets in a given flow and returns them to the core driver */
void rmnet_offload_engine_flush_flow(struct rmnet_offload_flow *flow,
struct list_head *flush_list)
{
struct rmnet_offload_state *rmnet_offload = rmnet_offload_state_get();
struct rmnet_frag_descriptor *head_frag, *frag_iter, *tmp;
struct rmnet_offload_header_info *flow_hdr = &flow->rof_hdrs;
u32 hlen = flow_hdr->roh_ip_len + flow_hdr->roh_trans_len;
if (!flow->rof_pkts_held)
return;
head_frag = list_first_entry(&flow->rof_pkts,
struct rmnet_frag_descriptor, list);
/* Set GSO segs if it hasn't been initialized yet, e.g. checksum
* offload packets.
*/
if (!head_frag->gso_segs)
head_frag->gso_segs = 1;
head_frag->gso_size = flow->rof_gso_len;
/* Add subsequent packets to the main one, updating the GSO information
* and pulling any unneeded headers along the way.
*/
frag_iter = head_frag;
list_for_each_entry_safe_continue(frag_iter, tmp, &flow->rof_pkts,
list) {
u32 dlen = frag_iter->len - hlen;
if (!rmnet_frag_descriptor_add_frags_from(head_frag, frag_iter,
hlen, dlen)) {
head_frag->gso_segs += (frag_iter->gso_segs) ?: 1;
head_frag->coal_bytes += frag_iter->coal_bytes;
head_frag->coal_bufsize += frag_iter->coal_bufsize;
}
rmnet_recycle_frag_descriptor(frag_iter,
rmnet_offload->core_port);
}
/* Set the hash value and fire it off */
head_frag->hash = flow->rof_hash_key;
list_del_init(&head_frag->list);
list_add_tail(&head_frag->list, flush_list);
flow->rof_pkts_held = 0;
flow->rof_len = 0;
}
/* Flush any active flows that match a given hash value */
void rmnet_offload_engine_flush_by_hash(u32 hash_val,
struct list_head *flush_list)
{
struct rmnet_offload_flow *flow;
hash_for_each_possible(rmnet_offload_flow_table, flow, rof_flow_list,
hash_val) {
if (flow->rof_hash_key == hash_val && flow->rof_pkts_held)
rmnet_offload_engine_flush_flow(flow, flush_list);
}
}
/* Flush all active flows. Returns the number flushed */
u32 rmnet_offload_engine_flush_all_flows(struct list_head *flush_list)
{
struct rmnet_offload_flow *flow;
int bkt_cursor;
u32 flushed = 0;
hash_for_each(rmnet_offload_flow_table, bkt_cursor, flow,
rof_flow_list) {
if (flow->rof_pkts_held) {
flushed++;
rmnet_offload_engine_flush_flow(flow, flush_list);
}
}
return flushed;
}
/* Add a packet to a flow node */
void rmnet_offload_engine_add_flow_pkt(struct rmnet_offload_flow *flow,
struct rmnet_offload_info *pkt)
{
if (pkt->roi_first_pkt) {
/* Copy over the flow information */
memcpy(&flow->rof_hdrs, &pkt->roi_hdrs,
sizeof(flow->rof_hdrs));
flow->rof_hash_key = pkt->roi_hash_key;
flow->rof_gso_len = (pkt->roi_frag_desc->gso_size) ?:
pkt->roi_payload_len;
}
/* Set the next sequence number for tcp flows */
if (pkt->roi_hdrs.roh_trans_proto == RMNET_OFFLOAD_PROTO_TCP)
flow->rof_hdrs.roh_tcp_seq += pkt->roi_payload_len;
/* Hold the packet */
list_add_tail(&pkt->roi_frag_desc->list, &flow->rof_pkts);
flow->rof_pkts_held++;
flow->rof_len += pkt->roi_payload_len;
}
/* Main entry point into the core engine framework */
bool rmnet_offload_engine_ingress(struct rmnet_offload_info *pkt,
struct list_head *flush_list)
{
struct rmnet_offload_flow *flow;
bool flow_node_found = false;
u8 pkt_proto = pkt->roi_hdrs.roh_trans_proto;
if (!rmnet_offload_engine_optimize_protocol(pkt_proto)) {
rmnet_offload_stats_update(RMNET_OFFLOAD_STAT_PROTO_SKIPPED);
return false;
}
hash_for_each_possible(rmnet_offload_flow_table, flow, rof_flow_list,
pkt->roi_hash_key) {
bool ip_flush;
if (!rmnet_offload_engine_flow_match(flow, pkt))
continue;
node_found:
ip_flush = rmnet_offload_engine_ip_mismatch(flow, pkt);
/* Set to true by default. Protocol handlers will handle
* adjusting this if needed.
*/
pkt->roi_first_pkt = true;
flow_node_found = true;
switch (pkt_proto) {
case RMNET_OFFLOAD_PROTO_TCP:
return rmnet_offload_engine_tcp_ingress(flow, pkt,
ip_flush,
flush_list);
case RMNET_OFFLOAD_PROTO_UDP:
return rmnet_offload_engine_udp_ingress(flow, pkt,
ip_flush,
flush_list);
default:
/* Should never be hit */
return false;
}
}
if (!flow_node_found) {
/* This is a new flow. Get a node and retry */
flow = rmnet_offload_engine_recycle();
flow->rof_hash_key = pkt->roi_hash_key;
hash_add(rmnet_offload_flow_table, &flow->rof_flow_list,
flow->rof_hash_key);
goto node_found;
}
/* This is never hit, but to keep gcc happy... */
return false;
}
/* Tears down the internal engine state */
void rmnet_offload_engine_exit(void)
{
struct rmnet_offload_flow *flow;
struct hlist_node *tmp;
int bkt_cursor;
/* Avoid holding any pointers to memory that will be freed */
hash_for_each_safe(rmnet_offload_flow_table, bkt_cursor, tmp, flow,
rof_flow_list)
hash_del(&flow->rof_flow_list);
}
/* Initializes the internal engine state */
int rmnet_offload_engine_init(void)
{
struct rmnet_offload_state *rmnet_offload = rmnet_offload_state_get();
u8 i;
/* Initialize the flow nodes */
for (i = 0; i < RMNET_OFFLOAD_ENGINE_NUM_FLOWS; i++) {
struct rmnet_offload_flow *flow;
flow = &rmnet_offload->engine_state.roe_flow_pool[i];
INIT_LIST_HEAD(&flow->rof_pkts);
INIT_HLIST_NODE(&flow->rof_flow_list);
}
return RMNET_OFFLOAD_MGMT_SUCCESS;
}

View File

@@ -1,75 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __RMNET_OFFLOAD_ENGINE_H__
#define __RMNET_OFFLOAD_ENGINE_H__
#include <linux/types.h>
#include "rmnet_offload_main.h"
#define RMNET_OFFLOAD_ENGINE_NUM_FLOWS 50
enum {
RMNET_OFFLOAD_ENGINE_FLUSH_ALL,
RMNET_OFFLOAD_ENGINE_FLUSH_SOME,
RMNET_OFFLOAD_ENGINE_FLUSH_NONE,
};
enum {
RMNET_OFFLOAD_ENGINE_MODE_MIN,
RMNET_OFFLOAD_ENGINE_MODE_ALL = RMNET_OFFLOAD_ENGINE_MODE_MIN,
RMNET_OFFLOAD_ENGINE_MODE_TCP,
RMNET_OFFLOAD_ENGINE_MODE_UDP,
RMNET_OFFLOAD_ENGINE_MODE_NONE,
RMNET_OFFLOAD_ENGINE_MODE_MAX = RMNET_OFFLOAD_ENGINE_MODE_NONE,
};
struct rmnet_offload_flow {
/* Lists */
struct hlist_node rof_flow_list;
struct list_head rof_pkts;
/* Flow header information */
struct rmnet_offload_header_info rof_hdrs;
/* 5 tuple hash key */
u32 rof_hash_key;
/* Total data length */
u16 rof_len;
/* TCP sequence number */
u32 rof_tcp_seq;
/* GSO segment size */
u16 rof_gso_len;
/* Number of packets in the flow */
u8 rof_pkts_held;
};
struct rmnet_offload_engine_state {
struct rmnet_offload_flow roe_flow_pool[RMNET_OFFLOAD_ENGINE_NUM_FLOWS];
u8 roe_nodes_used;
u8 roe_recycle_idx;
};
void rmnet_offload_engine_enable_chain_flush(void);
void rmnet_offload_engine_disable_chain_flush(void);
int rmnet_offload_engine_mode_change(u64 old_mode, u64 new_mode);
void rmnet_offload_engine_flush_flow(struct rmnet_offload_flow *flow,
struct list_head *flush_list);
void rmnet_offload_engine_flush_by_hash(u32 hash_val,
struct list_head *flush_list);
u32 rmnet_offload_engine_flush_all_flows(struct list_head *flush_list);
void rmnet_offload_engine_add_flow_pkt(struct rmnet_offload_flow *flow,
struct rmnet_offload_info *pkt);
bool rmnet_offload_engine_ingress(struct rmnet_offload_info *pkt,
struct list_head *flush_list);
void rmnet_offload_engine_exit(void);
int rmnet_offload_engine_init(void);
#endif

View File

@@ -1,178 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/moduleparam.h>
#include "rmnet_offload_knob.h"
#include "rmnet_offload_main.h"
#include "rmnet_offload_engine.h"
/* OK, this whole song and dance requires some explanation.
*
* There are several things that I needed to satisfy when creating this
* framework:
* 1) The setting of any parameter NEEDS to be synchronized with the rest
* of the module. We have to take the lock BEFORE the value is changed,
* allow the module to react to the new value as necessary, set the
* new value, then unlock. This requires custom SET operations for each
* parameter.
* 2) Each parameter has a different range of acceptable values, and a
* different starting value. The handler for each knob must be aware of
* these values and enfore them.
* 3) The external parameter name should be purposely vague (knob0, knob1)
* and different than the internal stat name.
*
* (1) and (2) taken together results in the core of the knob framework. Since
* much of the handling of setting a knob value is the same, having a common
* handler is desirable. Handlers for each knob value should tell this main
* handler what knob they are, and what their acceptable value range is. As the
* arguments to the SET handlers for kernel params lack any information about
* knob is being set, each knob requires its own handler that passes a hard-
* coded value to the main handler to look up the appropriate value ranges.
* This means that each knob must be exported individually; we cannot use an
* array of module parameters like we can with the statistics as there's no eay
* of specifying a different set of of kernel_param_ops for each element.
*
* Unfortunately, this API requirement makes (3) more difficult for the
* programmer because of the C preprocessor. You can't simply
* make macros for each configuratble knob and append them to some
* vague stem name with ##. The C preprocessor will not resolve arithmetic, and
* the resulting name, say rmnet_offload_knob(x-y+z), is definitely not a valid
* identifier name. GCC rejects this as an invalid symbol when concatenating
* with ## and terminates the compilation. As such, you have to name each knob
* by hand. Sorry about that...
*
* Thus, the final workflow is this:
* 1) Call RMNET_OFFLOAD_KNOB_HANDLER() with the knob's enum value to set up
* the custom SET function
* 2) Add RMNET_OFFLOAD_KNOB_DECLARE() to the main rmnet_offload_knobs[] with
* the range of acceptable values, the starting value, and any callback
* needed for the module to take any appropriate action before the value
* is changed.
* 3) Call RMNET_OFFLOAD_KNOB_INIT() with the external name for your new knob
* to register the final module param with the kernel.
*/
#define RMNET_OFFLOAD_KNOB_HANDLER(knob) \
static int __ ## knob(const char *val, const struct kernel_param *kp) \
{ \
return __rmnet_offload_knob_set(val, kp, knob); \
}
#define RMNET_OFFLOAD_KNOB_DECLARE(knob, def_val, min_val, max_val, cb) \
(struct __rmnet_offload_knob) { \
.knob_val = def_val, \
.knob_min = min_val, \
.knob_max = max_val, \
.knob_cb = cb, \
.knob_ops = { \
.set = __ ## knob, \
.get = param_get_ullong, \
}, \
}
#define RMNET_OFFLOAD_KNOB_INIT(knob_name, knob) \
module_param_cb(knob_name, &rmnet_offload_knobs[knob].knob_ops, \
&rmnet_offload_knobs[knob].knob_val, 0644)
struct __rmnet_offload_knob {
u64 knob_val;
u64 knob_min;
u64 knob_max;
int (*knob_cb)(u64 old_val, u64 new_val);
struct kernel_param_ops knob_ops;
};
/* Forward declaration of our main value setting handler */
static int __rmnet_offload_knob_set(const char *val,
const struct kernel_param *kp, u32 knob);
/* Initialize the handlers for each knob */
RMNET_OFFLOAD_KNOB_HANDLER(RMNET_OFFLOAD_KNOB_TCP_BYTE_LIMIT);
RMNET_OFFLOAD_KNOB_HANDLER(RMNET_OFFLOAD_KNOB_UDP_BYTE_LIMIT);
RMNET_OFFLOAD_KNOB_HANDLER(RMNET_OFFLOAD_KNOB_ENGINE_MODE);
/* Our knob array. This stores the knob metadata (range of values, get and set
* operations, callback, initial value), and the current value of the knob.
*/
static struct __rmnet_offload_knob
rmnet_offload_knobs[RMNET_OFFLOAD_KNOB_MAX] = {
RMNET_OFFLOAD_KNOB_DECLARE(RMNET_OFFLOAD_KNOB_TCP_BYTE_LIMIT, 65000,
0, 65000, NULL),
RMNET_OFFLOAD_KNOB_DECLARE(RMNET_OFFLOAD_KNOB_UDP_BYTE_LIMIT, 65000,
0, 65000, NULL),
RMNET_OFFLOAD_KNOB_DECLARE(RMNET_OFFLOAD_KNOB_ENGINE_MODE,
RMNET_OFFLOAD_ENGINE_MODE_ALL,
RMNET_OFFLOAD_ENGINE_MODE_MIN,
RMNET_OFFLOAD_ENGINE_MODE_MAX,
rmnet_offload_engine_mode_change),
};
/* Handle changing the knob value. Checks to make sure the value given is in
* range, and informs the rest of the module of the change if needed.
*/
static int __rmnet_offload_knob_set(const char *val,
const struct kernel_param *kp, u32 knob)
{
struct __rmnet_offload_knob *knob_def;
unsigned long long new_val;
u64 old_val;
int rc;
/* Protext us from ourselves */
if (knob >= RMNET_OFFLOAD_KNOB_MAX)
return -EINVAL;
/* Extract the value from the string. Very similar to param_set_ullong,
* but I don't want to trash the old value immediately.
*/
rc = kstrtoull(val, 0, &new_val);
if (rc < 0)
return rc;
/* Ensure value is within bounds */
knob_def = &rmnet_offload_knobs[knob];
if ((u64)new_val < knob_def->knob_min ||
(u64)new_val > knob_def->knob_max)
return -ERANGE;
/* Lock ourselves down for synchronization with packet processing */
rmnet_offload_lock();
old_val = *(u64 *)kp->arg;
if ((u64)new_val == old_val) {
/* Nothing to change. Let's bail early */
rmnet_offload_unlock();
return 0;
}
if (knob_def->knob_cb) {
rc = knob_def->knob_cb(old_val, (u64)new_val);
if (rc < 0) {
rmnet_offload_unlock();
return rc;
}
}
/* Set the new value */
*(u64 *)kp->arg = (u64)new_val;
rmnet_offload_unlock();
return 0;
}
/* Create the module parameters. */
RMNET_OFFLOAD_KNOB_INIT(rmnet_offload_knob0, RMNET_OFFLOAD_KNOB_TCP_BYTE_LIMIT);
RMNET_OFFLOAD_KNOB_INIT(rmnet_offload_knob1, RMNET_OFFLOAD_KNOB_UDP_BYTE_LIMIT);
RMNET_OFFLOAD_KNOB_INIT(rmnet_offload_knob2, RMNET_OFFLOAD_KNOB_ENGINE_MODE);
/* Retrieve the value of a knob */
u64 rmnet_offload_knob_get(u32 knob) {
struct __rmnet_offload_knob *knob_def;
if (knob >= RMNET_OFFLOAD_KNOB_MAX)
return (u64)~0;
knob_def = &rmnet_offload_knobs[knob];
return knob_def->knob_val;
}

View File

@@ -1,21 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __RMNET_OFFLOAD_KNOB_H__
#define __RMNET_OFFLOAD_KNOB_H__
#include <linux/types.h>
enum {
RMNET_OFFLOAD_KNOB_TCP_BYTE_LIMIT,
RMNET_OFFLOAD_KNOB_UDP_BYTE_LIMIT,
RMNET_OFFLOAD_KNOB_ENGINE_MODE,
RMNET_OFFLOAD_KNOB_MAX,
};
u64 rmnet_offload_knob_get(u32 knob);
#endif

View File

@@ -1,585 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
/* rmnet_offload main handlers and helpers */
#include <linux/compiler.h>
#include <linux/rcupdate.h>
#include <linux/jhash.h>
#include <linux/spinlock.h>
#include "rmnet_descriptor.h"
#include "rmnet_handlers.h"
#include "rmnet_map.h"
#include "rmnet_module.h"
#include "rmnet_offload_main.h"
#include "rmnet_offload_state.h"
#include "rmnet_offload_engine.h"
#include "rmnet_offload_stats.h"
/* Insert newest first, last 4 bytes of the change id */
static char *verinfo[] = {
"7972254c",
"36f0d8b1",
"58aa9bee",
"c8acaf85",
"e218f451",
"2a44f6be",
"7415921c"};
module_param_array(verinfo, charp, NULL, 0444);
MODULE_PARM_DESC(verinfo, "Version of the driver");
/* Lock around our operations for synchronization with flushing and mode
* mode changes.
*/
static DEFINE_SPINLOCK(rmnet_offload_main_lock);
/* Computes the flow hash over the packet's 5 tuple */
static u32 rmnet_offload_compute_flow_hash(struct rmnet_offload_info *pkt_info)
{
struct rmnet_offload_header_info *pkt_hdr = &pkt_info->roi_hdrs;
__be32 pkt_five_tuple[11];
u32 flow_hash_key_len;
__be16 sport = 0, dport = 0;
if (pkt_hdr->roh_trans_proto == RMNET_OFFLOAD_PROTO_UDP ||
pkt_hdr->roh_trans_proto == RMNET_OFFLOAD_PROTO_TCP) {
sport = pkt_hdr->roh_sport;
dport = pkt_hdr->roh_dport;
}
if (pkt_hdr->roh_ip_proto == 0x4) {
pkt_five_tuple[0] = pkt_hdr->roh_daddr4;
pkt_five_tuple[1] = pkt_hdr->roh_saddr4;
pkt_five_tuple[2] = pkt_hdr->roh_trans_proto;
pkt_five_tuple[3] = dport;
pkt_five_tuple[4] = sport;
flow_hash_key_len = 5;
} else {
memcpy(&pkt_five_tuple[0], &pkt_hdr->roh_daddr6[0],
sizeof(pkt_hdr->roh_daddr6));
memcpy(&pkt_five_tuple[5], &pkt_hdr->roh_saddr6[0],
sizeof(pkt_hdr->roh_daddr6));
pkt_five_tuple[8] = pkt_hdr->roh_trans_proto;
pkt_five_tuple[9] = dport;
pkt_five_tuple[10] = sport;
flow_hash_key_len = 11;
}
return jhash2(pkt_five_tuple, flow_hash_key_len, 0);
}
static void rmnet_offload_update_pkt_size_stats(u32 pkt_len)
{
u32 size_stat;
if (pkt_len > 50000)
size_stat = RMNET_OFFLOAD_STAT_SIZE_50000_PLUS;
else if (pkt_len > 30000)
size_stat = RMNET_OFFLOAD_STAT_SIZE_30000_PLUS;
else if (pkt_len > 23000)
size_stat = RMNET_OFFLOAD_STAT_SIZE_23000_PLUS;
else if (pkt_len > 14500)
size_stat = RMNET_OFFLOAD_STAT_SIZE_14500_PLUS;
else if (pkt_len > 7000)
size_stat = RMNET_OFFLOAD_STAT_SIZE_7000_PLUS;
else if (pkt_len > 1400)
size_stat = RMNET_OFFLOAD_STAT_SIZE_1400_PLUS;
else
size_stat = RMNET_OFFLOAD_STAT_SIZE_0_PLUS;
rmnet_offload_stats_update(size_stat);
}
static bool rmnet_offload_dissect_pkt(struct rmnet_frag_descriptor *frag_desc,
struct rmnet_offload_info *pkt_info)
{
struct rmnet_offload_header_info *pkt_hdr = &pkt_info->roi_hdrs;
u8 *payload;
u16 pkt_len;
u16 ip_pkt_len;
pkt_len = frag_desc->len;
/* Guilty until proven innocent */
pkt_info->roi_skip_hash = true;
if (unlikely(!(frag_desc->dev->features & NETIF_F_RXCSUM))) {
/* Sorry, coalescing only makes sense if RX checksum offload
* is enabled.
*/
goto done;
}
/* Check if the core driver already did work for us during RSB/RSC
* packet processing. This allows us to skip many sanity checks as well
* as some computation.
*/
if (frag_desc->hdrs_valid) {
struct rmnet_offload_udphdr *up, __up;
struct rmnet_offload_tcphdr *tp, __tp;
/* Grab header lengths and protocols */
pkt_hdr->roh_ip_proto = frag_desc->ip_proto;
pkt_hdr->roh_ip_len = frag_desc->ip_len;
pkt_hdr->roh_trans_proto = frag_desc->trans_proto;
pkt_hdr->roh_trans_len = frag_desc->trans_len;
pkt_len = frag_desc->len;
/* Grab the IP flags from the header */
if (pkt_hdr->roh_ip_proto == 0x4) {
struct rmnet_offload_iphdr *iph, __iph;
iph = rmnet_frag_header_ptr(frag_desc, 0, sizeof(*iph),
&__iph);
if (!iph)
goto done;
pkt_hdr->roh_saddr4 = iph->roip_saddr;
pkt_hdr->roh_daddr4 = iph->roip_daddr;
pkt_hdr->roh_ip_ttl = iph->roip_ttl;
pkt_hdr->roh_ip_tos = iph->roip_tos;
pkt_hdr->roh_ip_frag_off = iph->roip_frag_off;
} else {
struct rmnet_offload_ipv6hdr *ip6h, __ip6h;
ip6h = rmnet_frag_header_ptr(frag_desc, 0,
sizeof(*ip6h), &__ip6h);
if (!ip6h)
goto done;
memcpy(&pkt_hdr->roh_saddr6[0], &ip6h->roipv6_saddr[0],
sizeof(ip6h->roipv6_saddr));
memcpy(&pkt_hdr->roh_daddr6[0], &ip6h->roipv6_daddr[0],
sizeof(ip6h->roipv6_daddr));
pkt_hdr->roh_flag_word = ip6h->roipv6_flow_lbl;
if (pkt_hdr->roh_ip_len > sizeof(*ip6h)) {
int dummy_len;
__be16 roi_frag_off;
bool roi_frag;
u8 roi_proto = ip6h->roipv6_nexthdr;
/* Extension headers are present. And that
* means an empty fragment header could ALSO
* be present, as IPA can coalesce those.
* Kernel can't handle those if we're able to
* coalesce past MAX_SKB_FRAGS and move to the
* fraglist; defragmentation will trash the
* skb.
*
* Unfortunately, there's not really a good
* way of avoiding the reparse.
*/
dummy_len =
rmnet_frag_ipv6_skip_exthdr(frag_desc,
sizeof(*ip6h),
&roi_proto,
&roi_frag_off,
&roi_frag);
if (dummy_len < 0 || roi_frag_off || roi_frag) {
/* Frag detected */
if (roi_proto == RMNET_OFFLOAD_PROTO_FRAGMENT)
pkt_hdr->roh_ip_len += 8;
rmnet_offload_stats_update(RMNET_OFFLOAD_STAT_FRAG_FLUSH);
goto done;
}
}
}
/* Grab ports and tcp sequence number if needed */
up = rmnet_frag_header_ptr(frag_desc, pkt_hdr->roh_ip_len,
sizeof(*up), &__up);
if (!up)
goto done;
pkt_hdr->roh_sport = up->roudp_source;
pkt_hdr->roh_dport = up->roudp_dest;
if (pkt_hdr->roh_trans_proto == RMNET_OFFLOAD_PROTO_TCP) {
__be32 seq_no;
tp = rmnet_frag_header_ptr(frag_desc,
pkt_hdr->roh_ip_len,
sizeof(*tp), &__tp);
if (!tp)
goto done;
if (frag_desc->tcp_seq_set)
seq_no = frag_desc->tcp_seq;
else
seq_no = tp->rotcp_seq;
pkt_hdr->roh_tcp_seq = ntohl(seq_no);
}
/* Compute the flow hash since this is guaranteed to be a
* valid TCP/UDP non-fragmented packet.
*/
pkt_info->roi_hash_key =
rmnet_offload_compute_flow_hash(pkt_info);
/* Compute the data length of the packet */
pkt_info->roi_payload_len = frag_desc->len -
frag_desc->ip_len -
frag_desc->trans_len;
/* Store the frag_descriptor and we're in business */
pkt_info->roi_frag_desc = frag_desc;
pkt_info->roi_skip_hash = false;
return false;
}
/* This isn't an RSB/RSC packet, so all bets are off. Make sure
* everything is valid before we continue.
*
* We need to go deeper. Grab your totem and let's go!
*/
payload = rmnet_frag_data_ptr(frag_desc);
if (unlikely(!payload))
return true;
pkt_hdr->roh_ip_proto = (payload[0] & 0xF0) >> 4;
if (pkt_hdr->roh_ip_proto == 0x4) {
struct rmnet_offload_iphdr *iph, __iph;
iph = rmnet_frag_header_ptr(frag_desc, 0, sizeof(*iph),
&__iph);
if (!iph)
goto done;
pkt_hdr->roh_ip_len = iph->roip_ihl * 4;
pkt_hdr->roh_trans_proto = iph->roip_protocol;
pkt_hdr->roh_saddr4 = iph->roip_saddr;
pkt_hdr->roh_daddr4 = iph->roip_daddr;
pkt_hdr->roh_ip_ttl = iph->roip_ttl;
pkt_hdr->roh_ip_tos = iph->roip_tos;
pkt_hdr->roh_ip_frag_off = iph->roip_frag_off;
/* Flush out any fragment packets immediately.
* Mask value is equivalent to IP_MF (0x2000) OR'd
* with IP_OFFSET (0x1FFF).
*/
if (iph->roip_frag_off & htons(0x3FFF)) {
rmnet_offload_stats_update(RMNET_OFFLOAD_STAT_FRAG_FLUSH);
goto done;
}
/* Check for length mismatch */
ip_pkt_len = ntohs(iph->roip_tot_len);
pkt_info->roi_len_mismatch = ip_pkt_len != pkt_len;
} else if (pkt_hdr->roh_ip_proto == 0x6) {
struct rmnet_offload_ipv6hdr *ip6h, __ip6h;
int roi_v6_len;
__be16 roi_frag_off;
bool roi_frag;
u8 roi_v6_proto;
ip6h = rmnet_frag_header_ptr(frag_desc, 0, sizeof(*ip6h),
&__ip6h);
if (!ip6h)
goto done;
roi_v6_proto = ip6h->roipv6_nexthdr;
/* Dive down the ipv6 header chain */
roi_v6_len = rmnet_frag_ipv6_skip_exthdr(frag_desc,
sizeof(*ip6h),
&roi_v6_proto,
&roi_frag_off,
&roi_frag);
if (roi_v6_len < 0) {
/* Something somewhere has gone horribly wrong. Let
* the stack deal with it.
*/
goto done;
}
pkt_hdr->roh_ip_len = (u16)roi_v6_len;
pkt_hdr->roh_trans_proto = roi_v6_proto;
memcpy(&pkt_hdr->roh_saddr6[0], &ip6h->roipv6_saddr[0],
sizeof(ip6h->roipv6_saddr));
memcpy(&pkt_hdr->roh_daddr6[0], &ip6h->roipv6_daddr[0],
sizeof(ip6h->roipv6_daddr));
pkt_hdr->roh_flag_word = ip6h->roipv6_flow_lbl;
/* Flush out any fragment packets immediately */
if (roi_frag_off || roi_frag) {
/* Add in the fragment header length to any non-first
* fragment packets.
*/
if (pkt_hdr->roh_trans_proto ==
RMNET_OFFLOAD_PROTO_FRAGMENT)
pkt_hdr->roh_ip_len += 8;
rmnet_offload_stats_update(RMNET_OFFLOAD_STAT_FRAG_FLUSH);
goto done;
}
/* Check for length mismatch */
ip_pkt_len = ntohs(ip6h->roipv6_payload_len) + sizeof(*ip6h);
pkt_info->roi_len_mismatch = ip_pkt_len != pkt_len;
} else {
/* Not a valid IP packet */
return true;
}
/* Down another level. Leo's gotta be around here somewhere... */
if (pkt_hdr->roh_trans_proto == RMNET_OFFLOAD_PROTO_TCP) {
struct rmnet_offload_tcphdr *tp, __tp;
tp = rmnet_frag_header_ptr(frag_desc, pkt_hdr->roh_ip_len,
sizeof(*tp), &__tp);
if (!tp)
goto done;
pkt_hdr->roh_trans_len = tp->rotcp_doff * 4;
pkt_hdr->roh_sport = tp->rotcp_source;
pkt_hdr->roh_dport = tp->rotcp_dest;
pkt_hdr->roh_tcp_seq = ntohl(tp->rotcp_seq);
} else if (pkt_hdr->roh_trans_proto == RMNET_OFFLOAD_PROTO_UDP) {
struct rmnet_offload_udphdr *up, __up;
up = rmnet_frag_header_ptr(frag_desc, pkt_hdr->roh_ip_len,
sizeof(*up), &__up);
if (!up)
goto done;
pkt_hdr->roh_trans_len = sizeof(*up);
pkt_hdr->roh_sport = up->roudp_source;
pkt_hdr->roh_dport = up->roudp_dest;
} else {
/* Not a protocol we can optimize */
goto done;
}
/* Everything seems fine. Go ahead and compute the hash */
pkt_info->roi_skip_hash = false;
pkt_info->roi_hash_key = rmnet_offload_compute_flow_hash(pkt_info);
if (!pkt_info->roi_len_mismatch) {
/* Copy the header info into the frag descriptor for the core
* driver to use later since everything is kosher.
*/
frag_desc->ip_proto = pkt_hdr->roh_ip_proto;
frag_desc->ip_len = pkt_hdr->roh_ip_len;
frag_desc->trans_proto = pkt_hdr->roh_trans_proto;
frag_desc->trans_len = pkt_hdr->roh_trans_len;
/* Now, and ONLY now, do we dare touch this bit */
frag_desc->hdrs_valid = 1;
}
done:
/* Set payload length based on the headers we found */
pkt_info->roi_payload_len = pkt_len - pkt_hdr->roh_ip_len -
pkt_hdr->roh_trans_len;
if (pkt_info->roi_len_mismatch)
rmnet_offload_stats_update(RMNET_OFFLOAD_STAT_LEN_MISMATCH);
/* Hold on to the descriptor for later */
pkt_info->roi_frag_desc = frag_desc;
return false;
}
/* The main entry point into the module from the core driver */
static void __rmnet_offload_ingress(struct rmnet_frag_descriptor *frag_desc,
struct rmnet_port *port)
{
struct rmnet_offload_state *rmnet_offload = rmnet_offload_state_get();
struct rmnet_offload_info pkt_info;
LIST_HEAD(flush_list);
memset(&pkt_info, 0, sizeof(pkt_info));
rmnet_offload_lock();
rmnet_offload->core_port = port;
if (rmnet_offload_dissect_pkt(frag_desc, &pkt_info)) {
rmnet_offload_stats_update(RMNET_OFFLOAD_STAT_NON_IP_COUNT);
rmnet_recycle_frag_descriptor(frag_desc, port);
goto out;
}
/* We know the packet is an IP packet now */
rmnet_offload_stats_update(RMNET_OFFLOAD_STAT_PRE_IP_COUNT);
if (pkt_info.roi_skip_hash) {
/* We're not optimizing this packet */
goto flush;
} else if (pkt_info.roi_len_mismatch) {
/* Can't optimize this, but we are potentially holding other
* packets in this flow. Flush the flow with this has value
* to avoid OOO packets.
*/
rmnet_offload_engine_flush_by_hash(pkt_info.roi_hash_key,
&flush_list);
goto flush;
}
/* Skip any bad checksum packets.
* We wait to do this until now to allow any packets that won't be
* checksummed by hardware (i.e. non-TCP/UDP, fragments, padding) to
* be caught by the above checks. This ensures we report stats
* correctly and don't increment the "bad checksum" field for otherwise
* valid packets.
*/
if (!frag_desc->csum_valid) {
/* Possible behavior change here. We know that the checksum is
* incorrect, so we flush the packet immediately; we do not
* flush anything internally. This can potentially make the bad
* packet show up in tcpdump as a TCP OOO packet. If we want to
* avoid that (even though it doesn't really hurt anything), we
* could flush by the hash. Worst case, one of the 5 tuple
* components was corrupted so the hash ends up being the same
* as another flow we're holding so we flush it prematurely.
*/
goto flush;
}
if (!rmnet_offload_engine_ingress(&pkt_info, &flush_list))
goto flush;
goto out;
flush:
rmnet_offload_flush_current_pkt(&pkt_info, &flush_list);
out:
rmnet_offload_unlock();
rmnet_offload_deliver_descs(&flush_list);
}
static void rmnet_offload_ingress(struct list_head *desc_list,
struct rmnet_port *port)
{
struct rmnet_frag_descriptor *frag, *tmp;
list_for_each_entry_safe(frag, tmp, desc_list, list) {
list_del_init(&frag->list);
__rmnet_offload_ingress(frag, port);
}
}
void rmnet_offload_lock(void)
{
spin_lock_bh(&rmnet_offload_main_lock);
}
void rmnet_offload_unlock(void)
{
spin_unlock_bh(&rmnet_offload_main_lock);
}
static const struct rmnet_module_hook_register_info
rmnet_offload_main_hook = {
.hooknum = RMNET_MODULE_HOOK_OFFLOAD_INGRESS,
.func = rmnet_offload_ingress,
};
void rmnet_offload_set_hooks(void)
{
rmnet_module_hook_register(&rmnet_offload_main_hook, 1);
}
void rmnet_offload_unset_hooks(void)
{
rmnet_module_hook_unregister_no_sync(&rmnet_offload_main_hook, 1);
}
/* Deliver the final descriptors to the core driver */
void rmnet_offload_deliver_descs(struct list_head *desc_list)
{
struct rmnet_offload_state *rmnet_offload = rmnet_offload_state_get();
struct rmnet_frag_descriptor *frag_desc, *tmp;
list_for_each_entry_safe(frag_desc, tmp, desc_list, list) {
/* Log the outgoing size */
rmnet_offload_update_pkt_size_stats(frag_desc->len);
rmnet_offload_stats_update(RMNET_OFFLOAD_STAT_POST_IP_COUNT);
list_del_init(&frag_desc->list);
rmnet_frag_deliver(frag_desc, rmnet_offload->core_port);
}
}
/* Flush the packet that we're currently handling back to the core driver */
void rmnet_offload_flush_current_pkt(struct rmnet_offload_info *pkt_info,
struct list_head *flush_list)
{
struct rmnet_frag_descriptor *frag_desc = pkt_info->roi_frag_desc;
u32 pkt_len = pkt_info->roi_payload_len +
pkt_info->roi_hdrs.roh_ip_len +
pkt_info->roi_hdrs.roh_trans_len;
/* Sanity check. Make sure the data will fit in the IP header */
if (pkt_len > 65536)
return;
/* Only set the hash key if we actually calculated it */
if (!pkt_info->roi_skip_hash)
frag_desc->hash = pkt_info->roi_hash_key;
list_add_tail(&frag_desc->list, flush_list);
}
/* Handles entering powersave mode. DL markers are turned off now.
* This is a no-op for us currently, as we don't need to change anything
* about our operation.
*/
void rmnet_offload_handle_powersave_on(void *port)
{
}
/* Handles exiting powersave mode. DL markers are turned on again.
* This is also a no-op for us currently, since we didn't change anything
* when powersave was enabled.
*/
void rmnet_offload_handle_powersave_off(void *port)
{
}
/* Handles DL maker start notifications from the core driver */
void
rmnet_offload_handle_dl_header(struct rmnet_map_dl_ind_hdr *dlhdr,
struct rmnet_map_control_command_header *cmd)
{
struct rmnet_offload_state *rmnet_offload = rmnet_offload_state_get();
LIST_HEAD(flush_list);
(void)cmd;
rmnet_offload_lock();
/* If we get multiple starts in a row, assume the end was lost and
* flush everything out.
*/
if (rmnet_offload->dl_marker_state.dl_marker_start &&
rmnet_offload_engine_flush_all_flows(&flush_list))
rmnet_offload_stats_update(RMNET_OFFLOAD_STAT_DL_START_FLUSH);
/* Store away the meta information */
rmnet_offload->dl_marker_state.dl_marker_start = true;
rmnet_offload->dl_marker_state.dl_marker_seq = dlhdr->le.seq;
rmnet_offload->dl_marker_state.dl_marker_pkts = dlhdr->le.pkts;
rmnet_offload_unlock();
rmnet_offload_deliver_descs(&flush_list);
}
/* Handles DL maker end notifications from the core driver */
void
rmnet_offload_handle_dl_trailer(struct rmnet_map_dl_ind_trl *dltrl,
struct rmnet_map_control_command_header *cmd)
{
struct rmnet_offload_state *rmnet_offload = rmnet_offload_state_get();
LIST_HEAD(flush_list);
(void)cmd;
rmnet_offload_lock();
/* Check on the sequence number. If they don't match, a marker was lost
* somewhere. Log it, but it doesn't change our behavior.
*/
if (rmnet_offload->dl_marker_state.dl_marker_seq != dltrl->seq_le)
rmnet_offload_stats_update(RMNET_OFFLOAD_STAT_DL_SEQ_MISMATCH);
/* Flush everything we've got */
if (rmnet_offload_engine_flush_all_flows(&flush_list))
rmnet_offload_stats_update(RMNET_OFFLOAD_STAT_DL_END_FLUSH);
/* Reset state information */
rmnet_offload->dl_marker_state.dl_marker_start = false;
rmnet_offload->dl_marker_state.dl_marker_seq = 0;
rmnet_offload->dl_marker_state.dl_marker_pkts = 0;
rmnet_offload_unlock();
rmnet_offload_deliver_descs(&flush_list);
}

View File

@@ -1,152 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __RMNET_OFFLOAD_H__
#define __RMNET_OFFLOAD_H__
#include <linux/types.h>
#include <asm/byteorder.h>
#include "rmnet_descriptor.h"
#include "rmnet_map.h"
#define RMNET_OFFLOAD_PROTO_TCP 6
#define RMNET_OFFLOAD_PROTO_UDP 17
#define RMNET_OFFLOAD_PROTO_FRAGMENT 44
struct rmnet_offload_iphdr {
#if defined(__LITTLE_ENDIAN_BITFIELD)
u8 roip_ihl:4;
u8 roip_version:4;
#elif defined (__BIG_ENDIAN_BITFIELD)
u8 roip_version:4;
u8 roip_ihl:4;
#else
#error "<asm/byteorder.h> error"
#endif
u8 roip_tos;
__be16 roip_tot_len;
__be16 roip_id;
__be16 roip_frag_off;
u8 roip_ttl;
u8 roip_protocol;
__be16 roip_check;
__be32 roip_saddr;
__be32 roip_daddr;
};
struct rmnet_offload_ipv6hdr {
/* rmnet_offload doesn't care about the version field. So honestly,
* it's easier to just take the whole 32 bits as the flow label
*/
__be32 roipv6_flow_lbl;
__be16 roipv6_payload_len;
u8 roipv6_nexthdr;
u8 roipv6_hop_limit;
__be32 roipv6_saddr[4];
__be32 roipv6_daddr[4];
};
struct rmnet_offload_tcphdr {
__be16 rotcp_source;
__be16 rotcp_dest;
__be32 rotcp_seq;
__be32 rotcp_ack;
#if defined(__LITTLE_ENDIAN_BITFIELD)
u8 rotcp_res:4;
u8 rotcp_doff:4;
#elif defined (__BIG_ENDIAN_BITFIELD)
u8 rotcp_doff:4;
u8 rotcp_res:4;
#else
#error "<asm/byteorder.h> error"
#endif
u8 rotcp_flags;
__be16 rotcp_window;
__be16 rotcp_check;
__be16 rotcp_urg;
};
struct rmnet_offload_udphdr {
__be16 roudp_source;
__be16 roudp_dest;
__be16 roudp_len;
__be16 roudp_check;
};
struct rmnet_offload_header_info {
/* Port information */
__be16 roh_sport;
__be16 roh_dport;
/* Address information */
union {
__be32 roh_saddr4;
__be32 roh_saddr6[4];
};
union {
__be32 roh_daddr4;
__be32 roh_daddr6[4];
};
/* Header flags */
union {
struct {
u8 roh_ip_ttl;
u8 roh_ip_tos;
__be16 roh_ip_frag_off;
};
__be32 roh_flag_word;
};
/* TCP sequence number. Both the flow and the pkt info structs need
* this value at various times, so it makes sense to put it in this
* shared struct.
*/
u32 roh_tcp_seq;
/* Header lengths and protocols */
u16 roh_ip_len;
u16 roh_trans_len;
u8 roh_ip_proto;
u8 roh_trans_proto;
};
struct rmnet_offload_info {
struct rmnet_frag_descriptor *roi_frag_desc;
/* Packet headers */
struct rmnet_offload_header_info roi_hdrs;
/* 5 tuple hash key */
u32 roi_hash_key;
/* Payload length */
u16 roi_payload_len;
/* Packet meta information */
bool roi_first_pkt;
bool roi_skip_hash;
bool roi_len_mismatch;
};
void rmnet_offload_lock(void);
void rmnet_offload_unlock(void);
void rmnet_offload_set_hooks(void);
void rmnet_offload_unset_hooks(void);
void rmnet_offload_deliver_descs(struct list_head *desc_list);
void rmnet_offload_flush_current_pkt(struct rmnet_offload_info *pkt_info,
struct list_head *flush_list);
void rmnet_offload_handle_powersave_on(void *port);
void rmnet_offload_handle_powersave_off(void *port);
void
rmnet_offload_handle_dl_header(struct rmnet_map_dl_ind_hdr *dlhdr,
struct rmnet_map_control_command_header *cmd);
void
rmnet_offload_handle_dl_trailer(struct rmnet_map_dl_ind_trl *dltrl,
struct rmnet_map_control_command_header *cmd);
#endif

View File

@@ -1,222 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
/* rmnet_offload configuration handlers */
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/rcupdate.h>
#include "rmnet_map.h"
#include "rmnet_private.h"
#include "qmi_rmnet.h"
#include "rmnet_offload_state.h"
#include "rmnet_offload_engine.h"
MODULE_LICENSE("GPL v2");
#define DL_PRIO_RMNET_OFFLOAD 0
/* Our internal state */
static struct rmnet_offload_state *rmnet_offload;
static void rmnet_offload_state_deregister_cb(void)
{
struct rmnet_port *port = rmnet_offload->core_port;
struct rmnet_offload_dl_marker_state *dl_state;
qmi_rmnet_ps_ind_deregister(port, &rmnet_offload->powersave_ind);
dl_state = &rmnet_offload->dl_marker_state;
if (dl_state->dl_marker_cb_registered)
rmnet_map_dl_ind_deregister(port, &dl_state->dl_ind);
}
static void rmnet_offload_state_free(void)
{
LIST_HEAD(flush_list);
/* Nothing to free if it was never allocated */
if (!rmnet_offload)
return;
/* Unhook ourselves from the other drivers */
rmnet_offload_state_deregister_cb();
rmnet_offload_unset_hooks();
rmnet_offload_engine_disable_chain_flush();
synchronize_rcu();
/* Flush out before we destroy everything */
rmnet_offload_engine_flush_all_flows(&flush_list);
rmnet_offload_engine_exit();
kfree(rmnet_offload);
rmnet_offload = NULL;
}
/* Conditionally register for DL marker callbacks and powersave notifications
* from the core and DFC drivers.
*/
static int rmnet_offload_state_register_cb(void)
{
struct rmnet_port *port = rmnet_offload->core_port;
struct qmi_rmnet_ps_ind *ps_ind;
struct rmnet_offload_dl_marker_state *dl_state;
struct rmnet_map_dl_ind *dl_ind;
int rc = RMNET_OFFLOAD_MGMT_SUCCESS;
/* Register for powersave indications */
ps_ind = &rmnet_offload->powersave_ind;
ps_ind->ps_on_handler = rmnet_offload_handle_powersave_on;
ps_ind->ps_off_handler = rmnet_offload_handle_powersave_off;
if (qmi_rmnet_ps_ind_register(port, ps_ind)) {
/* These callbacks are a no-op currently, but we might as well
* warn about the failure, since that points to a problem in
* rmnet_core.ko.
*/
rc = RMNET_OFFLOAD_MGMT_PARTIAL;
pr_warn("%s(): PS CB registration failed\n", __func__);
}
dl_state = &rmnet_offload->dl_marker_state;
dl_ind = &dl_state->dl_ind;
dl_ind->priority = DL_PRIO_RMNET_OFFLOAD;
dl_ind->dl_hdr_handler_v2 =
rmnet_offload_handle_dl_header;
dl_ind->dl_trl_handler_v2 =
rmnet_offload_handle_dl_trailer;
if (rmnet_map_dl_ind_register(port, dl_ind)) {
rc = RMNET_OFFLOAD_MGMT_PARTIAL;
pr_warn("%s(): DL CB registratation failed\n",
__func__);
} else {
dl_state->dl_marker_cb_registered = true;
}
/* Flush on the end of SKB chains as a backup for DL markers */
rmnet_offload_engine_enable_chain_flush();
return rc;
}
static int rmnet_offload_state_init(struct rmnet_port *port)
{
int rc;
rmnet_offload = kzalloc(sizeof(*rmnet_offload), GFP_KERNEL);
if (!rmnet_offload) {
pr_err("%s(): Resource allocation failed\n", __func__);
return RMNET_OFFLOAD_MGMT_FAILURE;
}
/* Store the port struct for processing */
rmnet_offload->core_port = port;
/* Let the engine core initialize itself */
rc = rmnet_offload_engine_init();
if (rc < 0)
goto fail;
/* Register for callbacks */
rc = rmnet_offload_state_register_cb();
if (rc != RMNET_OFFLOAD_MGMT_SUCCESS) {
/* Traditionally this has not been an error. We just
* warned about it.
*/
pr_warn("%s(): Callback registration failed\n", __func__);
}
rmnet_offload->rmnet_offload_vnd_count = 1;
/* Everything is ready. Say hello to the core driver */
rmnet_offload_set_hooks();
return rc;
fail:
kfree(rmnet_offload);
rmnet_offload = NULL;
return rc;
}
static int rmnet_offload_state_notifier(struct notifier_block *nb,
unsigned long notify_event,
void *notify_data)
{
struct net_device *device = netdev_notifier_info_to_dev(notify_data);
struct rmnet_port *port;
struct rmnet_priv *priv;
int rc;
(void)nb;
/* We only care about rmnet devices */
if (!device || strncmp(device->name, "rmnet_data", 10))
goto done;
switch (notify_event) {
case NETDEV_REGISTER:
/* Don't initialze if we've already done so */
if (rmnet_offload) {
/* Increment the device count and we're done */
rmnet_offload->rmnet_offload_vnd_count++;
goto done;
}
priv = netdev_priv(device);
port = rmnet_get_port(priv->real_dev);
if (!port) {
pr_err("%s(): Invalid rmnet configuration on %s\n",
__func__, device->name);
goto done;
}
pr_info("%s(): Initializing on device %s\n", __func__,
device->name);
rc = rmnet_offload_state_init(port);
if (rc == RMNET_OFFLOAD_MGMT_FAILURE) {
pr_err("%s(): Initialization failed\n", __func__);
goto done;
}
break;
case NETDEV_UNREGISTER:
/* Don't uninitialize if we never initialized */
if (!rmnet_offload)
goto done;
/* Decrement vnd count and free if no more devices */
if (--rmnet_offload->rmnet_offload_vnd_count)
goto done;
pr_info("%s(): Uninitializing on device %s\n", __func__,
device->name);
rmnet_offload_state_free();
break;
}
done:
return NOTIFY_DONE;
}
static struct notifier_block rmnet_offload_state_notifier_block = {
.notifier_call = rmnet_offload_state_notifier,
.priority = 1,
};
static int __init rmnet_offload_init(void)
{
pr_info("%s(): rmnet_offload initializing\n", __func__);
return register_netdevice_notifier(&rmnet_offload_state_notifier_block);
}
static void __exit rmnet_offload_exit(void)
{
pr_info("%s(): rmnet_offload exiting\n", __func__);
unregister_netdevice_notifier(&rmnet_offload_state_notifier_block);
}
/* Internal state accessor */
struct rmnet_offload_state *rmnet_offload_state_get(void)
{
return rmnet_offload;
}
module_init(rmnet_offload_init);
module_exit(rmnet_offload_exit);

View File

@@ -1,39 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2013-2014, 2016-2017, 2019-2020 The Linux Foundation. All rights reserved.
* Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __RMNET_OFFLOAD_STATE_H__
#define __RMNET_OFFLOAD_STATE_H__
#include "rmnet_offload_engine.h"
#include "rmnet_map.h"
#include "rmnet_descriptor.h"
#include "qmi_rmnet.h"
enum {
RMNET_OFFLOAD_MGMT_SUCCESS,
RMNET_OFFLOAD_MGMT_PARTIAL,
RMNET_OFFLOAD_MGMT_FAILURE,
};
struct rmnet_offload_dl_marker_state {
struct rmnet_map_dl_ind dl_ind;
u32 dl_marker_seq;
u32 dl_marker_pkts;
bool dl_marker_cb_registered;
bool dl_marker_start;
};
struct rmnet_offload_state {
struct rmnet_port *core_port;
struct rmnet_offload_dl_marker_state dl_marker_state;
struct qmi_rmnet_ps_ind powersave_ind;
struct rmnet_offload_engine_state engine_state;
u8 rmnet_offload_vnd_count;
};
struct rmnet_offload_state *rmnet_offload_state_get(void);
#endif

View File

@@ -1,24 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
/* rmnet_offload statistics interface */
#include <linux/moduleparam.h>
#include "rmnet_offload_stats.h"
static u64 rmnet_offload_stats[RMNET_OFFLOAD_STAT_MAX];
module_param_array_named(rmnet_offload_stat, rmnet_offload_stats, ullong,
NULL, 0444);
void __rmnet_offload_stats_update(u32 stat, u64 inc)
{
if (stat < RMNET_OFFLOAD_STAT_MAX)
rmnet_offload_stats[stat] += inc;
}
void rmnet_offload_stats_update(u32 stat)
{
__rmnet_offload_stats_update(stat, 1);
}

View File

@@ -1,69 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __RMNET_OFFLOAD_STATS_H__
#define __RMNET_OFFLOAD_STATS_H__
#include <linux/types.h>
enum {
/* Number of inbound IP packets */
RMNET_OFFLOAD_STAT_PRE_IP_COUNT,
/* Number of outbound IP packets */
RMNET_OFFLOAD_STAT_POST_IP_COUNT,
/* Number of non-IP packets dropped */
RMNET_OFFLOAD_STAT_NON_IP_COUNT,
/* Number of flushes caused by 2 start markers in a row */
RMNET_OFFLOAD_STAT_DL_START_FLUSH,
/* Number of flushes caused by start-end marker sequence mismatch */
RMNET_OFFLOAD_STAT_DL_SEQ_MISMATCH,
/* Number of flushes caused by end markers */
RMNET_OFFLOAD_STAT_DL_END_FLUSH,
/* Number of IP fragments received */
RMNET_OFFLOAD_STAT_FRAG_FLUSH,
/* Number of QMAP-IP packet length mismatches */
RMNET_OFFLOAD_STAT_LEN_MISMATCH,
/* Number of flows evicted to make room for another */
RMNET_OFFLOAD_STAT_FLOW_EVICT,
/* Number of flushes caused by end of skb chain */
RMNET_OFFLOAD_STAT_CHAIN_FLUSH,
/* Number of outbound TCP/UDP packets not coalesced because of protocol mode */
RMNET_OFFLOAD_STAT_PROTO_SKIPPED,
/* Number of outbound TCP/UDP packets flushed because of mode chages */
RMNET_OFFLOAD_STAT_PROTO_FLUSH,
/* Number of outbound TCP packets flushed because of IP header changes */
RMNET_OFFLOAD_STAT_TCP_FORCE_FLUSH,
/* Number of outbound TCP packets flushed because of TCP flags */
RMNET_OFFLOAD_STAT_TCP_FLAG_FLUSH,
/* Number of outbound TCP packets flushed because of TCP option changes */
RMNET_OFFLOAD_STAT_TCP_OPTION_FLUSH,
/* Number of outbound TCP packets flushed because of out-of-order sequencing */
RMNET_OFFLOAD_STAT_TCP_OOO_FLUSH,
/* Number of outbound TCP packets flushed because of changing data length */
RMNET_OFFLOAD_STAT_TCP_LEN_FLUSH,
/* Number of outbound TCP packets flushed because of hitting max byte limit */
RMNET_OFFLOAD_STAT_TCP_BYTE_FLUSH,
/* Number of outbound UDP packets flushed because of IP header changes */
RMNET_OFFLOAD_STAT_UDP_FORCE_FLUSH,
/* Number of outbound UDP packets flushed because of changing data length */
RMNET_OFFLOAD_STAT_UDP_LEN_FLUSH,
/* Number of outbound UDP packets flushed because of hitting max byte limit */
RMNET_OFFLOAD_STAT_UDP_BYTE_FLUSH,
/* Outbound packet size distribution */
RMNET_OFFLOAD_STAT_SIZE_0_PLUS,
RMNET_OFFLOAD_STAT_SIZE_1400_PLUS,
RMNET_OFFLOAD_STAT_SIZE_7000_PLUS,
RMNET_OFFLOAD_STAT_SIZE_14500_PLUS,
RMNET_OFFLOAD_STAT_SIZE_23000_PLUS,
RMNET_OFFLOAD_STAT_SIZE_30000_PLUS,
RMNET_OFFLOAD_STAT_SIZE_50000_PLUS,
RMNET_OFFLOAD_STAT_MAX,
};
void __rmnet_offload_stats_update(u32 stat, u64 inc);
void rmnet_offload_stats_update(u32 stat);
#endif

View File

@@ -1,180 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
/* rmnet_offload TCP optimization engine */
#include <linux/list.h>
#include "rmnet_descriptor.h"
#include "rmnet_offload_main.h"
#include "rmnet_offload_engine.h"
#include "rmnet_offload_stats.h"
#include "rmnet_offload_knob.h"
union rmnet_offload_tcp_opts {
struct rmnet_offload_tcphdr rotopt_tcp;
/* Maximum TCP header size. (doff * 4) */
u8 rotopt_buf[60];
};
/* Check if the TCP flags prevent us from coalescing the packet */
static bool rmnet_offload_tcp_flag_flush(struct rmnet_offload_info *pkt)
{
struct rmnet_offload_tcphdr *tp, __tp;
__be32 flush_mask;
u8 flags;
tp = rmnet_frag_header_ptr(pkt->roi_frag_desc, pkt->roi_hdrs.roh_ip_len,
sizeof(*tp), &__tp);
if (!tp)
/* How did you even get this far? Panic and flush everything */
return true;
/* OK, being kinda cheeky here to hide this a bit more than it would
* be otherwise, but it also cuts down on the number of conditions in
* the if statement, so you can check the flags in a single AND.
*
* TCP flags are as follows:
* | C | E | U | A | P | R | S | F |
* ^ ^ ^ ^ ^ ^
*/
flush_mask = 0xAF;
flags = tp->rotcp_flags;
if (pkt->roi_frag_desc->tcp_flags_set)
flags = (u8)ntohs(pkt->roi_frag_desc->tcp_flags);
/* Pure ACKs or any special flags indicated above cause us to flush */
if ((!pkt->roi_payload_len && (flags & 0x10)) || (flags & flush_mask))
return true;
return false;
}
/* Compare the TCP options fields */
static bool rmnet_offload_tcp_option_mismatch(struct rmnet_offload_flow *flow,
struct rmnet_offload_info *pkt)
{
union rmnet_offload_tcp_opts *flow_hdr, __flow_hdr;
union rmnet_offload_tcp_opts *pkt_hdr, __pkt_hdr;
struct rmnet_frag_descriptor *flow_desc;
u32 opt_len, i;
/* Grab TCP header including options */
flow_desc = list_first_entry(&flow->rof_pkts,
struct rmnet_frag_descriptor, list);
flow_hdr = rmnet_frag_header_ptr(flow_desc, flow->rof_hdrs.roh_ip_len,
flow->rof_hdrs.roh_trans_len,
&__flow_hdr);
if (!flow_hdr)
/* Uhh, lolwat? Reality is collapsing, so let's flush... */
return true;
pkt_hdr = rmnet_frag_header_ptr(pkt->roi_frag_desc,
pkt->roi_hdrs.roh_ip_len,
pkt->roi_hdrs.roh_trans_len,
&__pkt_hdr);
if (!pkt_hdr)
return true;
opt_len = flow_hdr->rotopt_tcp.rotcp_doff * 4;
/* Obviously, if the lengths are different, something has changed */
if (pkt_hdr->rotopt_tcp.rotcp_doff * 4 != opt_len)
return true;
/* Compare the words. Memcmp is too easy ;). Also, this is how the
* kernel does it, so hey.
*/
for (i = sizeof(flow_hdr->rotopt_tcp); i < opt_len; i += 4) {
if (*(u32 *)(flow_hdr->rotopt_buf + i) ^
*(u32 *)(pkt_hdr->rotopt_buf + i))
return true;
}
return false;
}
/* Check if we can merge the packet with the flow */
static int rmnet_offload_tcp_merge_check(struct rmnet_offload_flow *flow,
struct rmnet_offload_info *pkt)
{
u64 tcp_byte_limit;
u32 gso_len;
/* 1: check the TCP flags to see if this packet can be coalesced */
if (rmnet_offload_tcp_flag_flush(pkt)) {
rmnet_offload_stats_update(RMNET_OFFLOAD_STAT_TCP_FLAG_FLUSH);
return RMNET_OFFLOAD_ENGINE_FLUSH_ALL;
}
/* 2: check the number of packets held. If we don't have anything
* stored right now, we can stop here.
*/
if (!flow->rof_pkts_held)
return RMNET_OFFLOAD_ENGINE_FLUSH_NONE;
/* 3: Compare the TCP options between the flow header and the new
* packet.
*/
if (rmnet_offload_tcp_option_mismatch(flow, pkt)) {
rmnet_offload_stats_update(RMNET_OFFLOAD_STAT_TCP_OPTION_FLUSH);
return RMNET_OFFLOAD_ENGINE_FLUSH_SOME;
}
/* 4: Check packet ordering */
if (pkt->roi_hdrs.roh_tcp_seq ^ flow->rof_hdrs.roh_tcp_seq) {
rmnet_offload_stats_update(RMNET_OFFLOAD_STAT_TCP_OOO_FLUSH);
return RMNET_OFFLOAD_ENGINE_FLUSH_ALL;
}
/* 5: Check packet size */
gso_len = (pkt->roi_frag_desc->gso_size) ?: pkt->roi_payload_len;
if (gso_len != flow->rof_gso_len) {
rmnet_offload_stats_update(RMNET_OFFLOAD_STAT_TCP_LEN_FLUSH);
return RMNET_OFFLOAD_ENGINE_FLUSH_SOME;
}
/* 6: Check the byte limit */
tcp_byte_limit =
rmnet_offload_knob_get(RMNET_OFFLOAD_KNOB_TCP_BYTE_LIMIT);
if (pkt->roi_payload_len + flow->rof_len >= tcp_byte_limit) {
rmnet_offload_stats_update(RMNET_OFFLOAD_STAT_TCP_BYTE_FLUSH);
return RMNET_OFFLOAD_ENGINE_FLUSH_SOME;
}
/* Other packets exist in the flow state */
pkt->roi_first_pkt = false;
return RMNET_OFFLOAD_ENGINE_FLUSH_NONE;
}
/* Handle a TCP packet */
bool rmnet_offload_engine_tcp_ingress(struct rmnet_offload_flow *flow,
struct rmnet_offload_info *pkt,
bool force_flush,
struct list_head *flush_list)
{
int rc;
if (force_flush) {
rmnet_offload_stats_update(RMNET_OFFLOAD_STAT_TCP_FORCE_FLUSH);
rmnet_offload_engine_flush_flow(flow, flush_list);
rmnet_offload_flush_current_pkt(pkt, flush_list);
return true;
}
rc = rmnet_offload_tcp_merge_check(flow, pkt);
if (rc == RMNET_OFFLOAD_ENGINE_FLUSH_NONE) {
/* Coalesce */
rmnet_offload_engine_add_flow_pkt(flow, pkt);
} else if (rc == RMNET_OFFLOAD_ENGINE_FLUSH_SOME) {
/* Flush flow and insert packet */
rmnet_offload_engine_flush_flow(flow, flush_list);
rmnet_offload_engine_add_flow_pkt(flow, pkt);
} else {
/* Flush everything */
rmnet_offload_engine_flush_flow(flow, flush_list);
rmnet_offload_flush_current_pkt(pkt, flush_list);
}
return true;
}

View File

@@ -1,18 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __RMNET_OFFLOAD_TCP_H__
#define __RMNET_OFFLOAD_TCP_H__
#include "rmnet_offload_main.h"
#include "rmnet_offload_engine.h"
bool rmnet_offload_engine_tcp_ingress(struct rmnet_offload_flow *flow,
struct rmnet_offload_info *pkt,
bool force_flush,
struct list_head *flush_list);
#endif

View File

@@ -1,73 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
/* rmnet_offload UDP optimization engine */
#include "rmnet_descriptor.h"
#include "rmnet_offload_main.h"
#include "rmnet_offload_engine.h"
#include "rmnet_offload_stats.h"
#include "rmnet_offload_knob.h"
/* Check if we can merge the packet with the flow */
static int rmnet_offload_udp_merge_check(struct rmnet_offload_flow *flow,
struct rmnet_offload_info *pkt)
{
u64 udp_byte_limit;
u16 gso_len;
/* 1: If we're not holding anything, the packet can be merged
* trivially.
*/
if (!flow->rof_pkts_held)
return RMNET_OFFLOAD_ENGINE_FLUSH_NONE;
/* 2: Check packet size */
gso_len = (pkt->roi_frag_desc->gso_size) ?: pkt->roi_payload_len;
if (gso_len != flow->rof_gso_len) {
rmnet_offload_stats_update(RMNET_OFFLOAD_STAT_UDP_LEN_FLUSH);
return RMNET_OFFLOAD_ENGINE_FLUSH_SOME;
}
/* 3: Check byte limit */
udp_byte_limit =
rmnet_offload_knob_get(RMNET_OFFLOAD_KNOB_UDP_BYTE_LIMIT);
if (pkt->roi_payload_len + flow->rof_len >= udp_byte_limit) {
rmnet_offload_stats_update(RMNET_OFFLOAD_STAT_UDP_BYTE_FLUSH);
return RMNET_OFFLOAD_ENGINE_FLUSH_SOME;
}
/* Other packets exist in the flow state */
pkt->roi_first_pkt = false;
return RMNET_OFFLOAD_ENGINE_FLUSH_NONE;
}
/* Handle a UDP packet */
bool rmnet_offload_engine_udp_ingress(struct rmnet_offload_flow *flow,
struct rmnet_offload_info *pkt,
bool force_flush,
struct list_head *flush_list)
{
int rc;
if (force_flush) {
rmnet_offload_stats_update(RMNET_OFFLOAD_STAT_UDP_FORCE_FLUSH);
rmnet_offload_engine_flush_flow(flow, flush_list);
rmnet_offload_flush_current_pkt(pkt, flush_list);
return true;
}
rc = rmnet_offload_udp_merge_check(flow, pkt);
if (rc == RMNET_OFFLOAD_ENGINE_FLUSH_NONE) {
/* Coalesce */
rmnet_offload_engine_add_flow_pkt(flow, pkt);
} else if (rc == RMNET_OFFLOAD_ENGINE_FLUSH_SOME) {
/* Flush flow and insert packet */
rmnet_offload_engine_flush_flow(flow, flush_list);
rmnet_offload_engine_add_flow_pkt(flow, pkt);
}
return true;
}

View File

@@ -1,18 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __RMNET_OFFLOAD_UDP_H__
#define __RMNET_OFFLOAD_UDP_H__
#include "rmnet_offload_main.h"
#include "rmnet_offload_engine.h"
bool rmnet_offload_engine_udp_ingress(struct rmnet_offload_flow *flow,
struct rmnet_offload_info *pkt,
bool force_flush,
struct list_head *flush_list);
#endif

View File

@@ -1,899 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021-2024, Qualcomm Innovation Center, Inc. All rights reserved.
*/
/* RMNET PERF framework */
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/in.h>
#include <linux/udp.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include "rmnet_module.h"
#include <net/ipv6.h>
#include <net/ip.h>
#include "rmnet_perf_tcp.h"
#include "rmnet_perf_udp.h"
#include "rmnet_descriptor.h"
#include "rmnet_map.h"
#include "rmnet_qmap.h"
#include <net/genetlink.h>
MODULE_LICENSE("GPL v2");
/* Insert newest first, last 4 bytes of the change id */
static char *verinfo[] = {
"71b2019d",
"1a5fa493",
"58aa9bee",
"8ab0a8ee",
"f22bace0",
"cc98f08a",
"ce79321c",
"5dcdd4c0",
"4c9b5337",
"a3babd40",
"7f078f96"
};
#define RMNET_PERF_GENL_FAMILY_NAME "RMNET_PERF"
#define RMNET_PERF_GENL_MULTICAST_NAME_0 "RMNET_PERF_MC_0"
#define RMNET_PERF_GENL_MULTICAST_NAME_1 "RMNET_PERF_MC_1"
#define RMNET_PERF_GENL_MULTICAST_NAME_2 "RMNET_PERF_MC_2"
#define RMNET_PERF_GENL_MULTICAST_NAME_3 "RMNET_PERF_MC_3"
#define RMNET_PERF_GENL_VERSION 1
enum {
RMNET_PERF_CMD_UNSPEC,
RMNET_PERF_CMD_GET_STATS,
RMNET_PERF_CMD_MAP_CMD,
__RMNET_PERF_GENL_CMD_MAX,
};
enum {
RMNET_PERF_ATTR_UNSPEC,
RMNET_PERF_ATTR_STATS_REQ,
RMNET_PERF_ATTR_STATS_RESP,
RMNET_PERF_ATTR_MAP_CMD_REQ,
RMNET_PERF_ATTR_MAP_CMD_RESP,
RMNET_PERF_ATTR_MAP_CMD_IND,
__RMNET_PERF_ATTR_MAX,
};
enum {
RMNET_PERF_MULTICAST_GROUP_0,
RMNET_PERF_MULTICAST_GROUP_1,
RMNET_PERF_MULTICAST_GROUP_2,
RMNET_PERF_MULTICAST_GROUP_3,
__RMNET_PERF_MULTICAST_GROUP_MAX,
};
#define RMNET_PERF_ATTR_MAX (__RMNET_PERF_ATTR_MAX - 1)
struct rmnet_perf_stats_req {
u8 mux_id;
} __aligned(1);
struct rmnet_perf_proto_stats {
u64 tcpv4_pkts;
u64 tcpv4_bytes;
u64 udpv4_pkts;
u64 udpv4_bytes;
u64 tcpv6_pkts;
u64 tcpv6_bytes;
u64 udpv6_pkts;
u64 udpv6_bytes;
} __aligned(1);
struct rmnet_perf_coal_common_stats {
u64 csum_error;
u64 pkt_recons;
u64 close_non_coal;
u64 l3_mismatch;
u64 l4_mismatch;
u64 nlo_limit;
u64 pkt_limit;
u64 byte_limit;
u64 time_limit;
u64 eviction;
u64 close_coal;
} __aligned(1);
struct downlink_stats {
struct rmnet_perf_coal_common_stats coal_common_stats;
struct rmnet_perf_proto_stats coal_veid_stats[16];
u64 non_coal_pkts;
u64 non_coal_bytes;
} __aligned(1);
struct uplink_stats {
struct rmnet_perf_proto_stats seg_proto_stats;
} __aligned(1);
struct rmnet_perf_stats_store {
struct downlink_stats dl_stats;
struct uplink_stats ul_stats;
} __aligned(1);
struct rmnet_perf_stats_resp {
u16 error_code;
struct rmnet_perf_stats_store stats;
} __aligned(1);
struct rmnet_perf_map_cmd_req {
u16 cmd_len;
u8 cmd_name;
u8 ack;
u8 cmd_content[16384];
} __aligned(1);
struct rmnet_perf_map_cmd_resp {
u8 cmd_name;
u16 error_code;
} __aligned(1);
struct rmnet_perf_map_cmd_ind {
u16 cmd_len;
u8 cmd_name;
u8 ack;
u8 cmd_content[4096];
} __aligned(1);
static struct nla_policy rmnet_perf_nl_policy[RMNET_PERF_ATTR_MAX + 1] = {
[RMNET_PERF_ATTR_STATS_REQ] = NLA_POLICY_EXACT_LEN(sizeof(struct rmnet_perf_stats_req)),
[RMNET_PERF_ATTR_STATS_RESP] = NLA_POLICY_EXACT_LEN(sizeof(struct rmnet_perf_stats_resp)),
[RMNET_PERF_ATTR_MAP_CMD_REQ] = NLA_POLICY_EXACT_LEN(sizeof(struct rmnet_perf_map_cmd_req)),
[RMNET_PERF_ATTR_MAP_CMD_RESP] = NLA_POLICY_EXACT_LEN(sizeof(struct rmnet_perf_map_cmd_resp)),
[RMNET_PERF_ATTR_MAP_CMD_IND] = NLA_POLICY_EXACT_LEN(sizeof(struct rmnet_perf_map_cmd_ind)),
};
static const struct genl_multicast_group rmnet_perf_nl_mcgrps[] = {
[RMNET_PERF_MULTICAST_GROUP_0] = { .name = RMNET_PERF_GENL_MULTICAST_NAME_0, },
[RMNET_PERF_MULTICAST_GROUP_1] = { .name = RMNET_PERF_GENL_MULTICAST_NAME_1, },
[RMNET_PERF_MULTICAST_GROUP_2] = { .name = RMNET_PERF_GENL_MULTICAST_NAME_2, },
[RMNET_PERF_MULTICAST_GROUP_3] = { .name = RMNET_PERF_GENL_MULTICAST_NAME_3, },
};
int rmnet_perf_netlink_seq = 0;
module_param_array(verinfo, charp, NULL, 0444);
MODULE_PARM_DESC(verinfo, "Version of the driver");
bool enable_tcp = true;
module_param_named(rmnet_perf_knob0, enable_tcp, bool, 0644);
static bool enable_udp = true;
module_param_named(rmnet_perf_knob1, enable_udp, bool, 0644);
#define RMNET_INGRESS_QUIC_PORT 443
struct rmnet_perf_stats_store stats_store[17];
static inline bool rmnet_perf_is_quic_packet(struct udphdr *uh)
{
return be16_to_cpu(uh->source) == RMNET_INGRESS_QUIC_PORT ||
be16_to_cpu(uh->dest) == RMNET_INGRESS_QUIC_PORT;
}
static bool rmnet_perf_is_quic_initial_packet(struct sk_buff *skb, int ip_len)
{
u8 *first_byte, __first_byte;
struct udphdr *uh, __uh;
uh = skb_header_pointer(skb, ip_len, sizeof(*uh), &__uh);
if (!uh || !rmnet_perf_is_quic_packet(uh))
return false;
/* Length sanity check. Could check for the full QUIC header length if
* need be, but since all we really care about is the first byte, just
* make sure there is one.
*/
if (be16_to_cpu(uh->len) < sizeof(struct udphdr) + 1)
return false;
/* I am a very paranoid accessor of data at this point... */
first_byte = skb_header_pointer(skb, ip_len + sizeof(struct udphdr),
1, &__first_byte);
if (!first_byte)
return false;
return ((*first_byte) & 0xC0) == 0xC0;
}
static int rmnet_perf_ingress_handle_quic(struct sk_buff *skb, int ip_len)
{
if (rmnet_perf_is_quic_initial_packet(skb, ip_len)) {
skb->hash = 0;
skb->sw_hash = 1;
return 0;
}
return -EINVAL;
}
int rmnet_perf_ingress_handle(struct sk_buff *skb)
{
if (skb->protocol == htons(ETH_P_IP)) {
struct iphdr *iph, __iph;
iph = skb_header_pointer(skb, 0, sizeof(*iph), &__iph);
if (!iph || ip_is_fragment(iph))
return -EINVAL;
if (iph->protocol == IPPROTO_UDP) {
if (enable_udp)
rmnet_perf_ingress_handle_udp(skb);
return rmnet_perf_ingress_handle_quic(skb,
iph->ihl * 4);
}
if (iph->protocol == IPPROTO_TCP) {
if (enable_tcp)
rmnet_perf_ingress_handle_tcp(skb);
/* Don't skip SHS processing for TCP */
return -EINVAL;
}
} else if (skb->protocol == htons(ETH_P_IPV6)) {
struct ipv6hdr *ip6h, __ip6h;
int ip_len;
__be16 frag_off;
u8 proto;
ip6h = skb_header_pointer(skb, 0, sizeof(*ip6h), &__ip6h);
if (!ip6h)
return -EINVAL;
proto = ip6h->nexthdr;
ip_len = ipv6_skip_exthdr(skb, sizeof(*ip6h), &proto,
&frag_off);
if (ip_len < 0 || frag_off)
return -EINVAL;
if (proto == IPPROTO_UDP) {
if (enable_udp)
rmnet_perf_ingress_handle_udp(skb);
return rmnet_perf_ingress_handle_quic(skb, ip_len);
}
if (proto == IPPROTO_TCP) {
if (enable_tcp)
rmnet_perf_ingress_handle_tcp(skb);
return -EINVAL;
}
}
return -EINVAL;
}
void rmnet_perf_ingress_rx_handler(struct sk_buff *skb)
{
if (skb->protocol == htons(ETH_P_IP)) {
struct iphdr *iph, __iph;
iph = skb_header_pointer(skb, 0, sizeof(*iph), &__iph);
if (!iph || ip_is_fragment(iph))
return;
if (iph->protocol == IPPROTO_TCP) {
if (enable_tcp)
rmnet_perf_ingress_rx_handler_tcp(skb);
}
} else if (skb->protocol == htons(ETH_P_IPV6)) {
struct ipv6hdr *ip6h, __ip6h;
int ip_len;
__be16 frag_off;
u8 proto;
ip6h = skb_header_pointer(skb, 0, sizeof(*ip6h), &__ip6h);
if (!ip6h)
return;
proto = ip6h->nexthdr;
ip_len = ipv6_skip_exthdr(skb, sizeof(*ip6h), &proto,
&frag_off);
if (ip_len < 0 || frag_off)
return;
if (proto == IPPROTO_TCP) {
if (enable_tcp)
rmnet_perf_ingress_rx_handler_tcp(skb);
}
}
}
static void rmnet_perf_egress_handle_quic(struct sk_buff *skb, int ip_len)
{
if (rmnet_perf_is_quic_initial_packet(skb, ip_len))
skb->priority = 0xDA001A;
}
void rmnet_perf_egress_handle(struct sk_buff *skb)
{
if (skb->protocol == htons(ETH_P_IP)) {
struct iphdr *iph, __iph;
iph = skb_header_pointer(skb, 0, sizeof(*iph), &__iph);
/* Potentially problematic, but the problem is secondary
* fragments have no transport header.
*/
if (!iph || ip_is_fragment(iph))
return;
if (iph->protocol == IPPROTO_UDP) {
if (enable_udp)
rmnet_perf_egress_handle_udp(skb);
rmnet_perf_egress_handle_quic(skb, iph->ihl * 4);
return;
}
if (iph->protocol == IPPROTO_TCP) {
if (enable_tcp)
rmnet_perf_egress_handle_tcp(skb);
return;
}
} else if (skb->protocol == htons(ETH_P_IPV6)) {
struct ipv6hdr *ip6h, __ip6h;
int ip_len;
__be16 frag_off;
u8 proto;
ip6h = skb_header_pointer(skb, 0, sizeof(*ip6h), &__ip6h);
if (!ip6h)
return;
proto = ip6h->nexthdr;
ip_len = ipv6_skip_exthdr(skb, sizeof(*ip6h), &proto,
&frag_off);
if (ip_len < 0 || frag_off)
return;
if (proto == IPPROTO_UDP) {
if (enable_udp)
rmnet_perf_egress_handle_udp(skb);
rmnet_perf_egress_handle_quic(skb, ip_len);
return;
}
if (proto == IPPROTO_TCP) {
if (enable_tcp)
rmnet_perf_egress_handle_tcp(skb);
return;
}
}
}
void rmnet_perf_nl_map_cmd_multicast(struct sk_buff *skb);
/* skb will be freed by rmnet_qmap_cmd_handler() */
void rmnet_perf_cmd_ingress_handler(struct sk_buff *skb)
{
if (skb_linearize(skb)) {
pr_err("%s(): Linearization error\n", __func__);
return;
}
rmnet_perf_nl_map_cmd_multicast(skb);
}
void rmnet_perf_coal_common_stat(uint8_t mux_id, uint32_t type)
{
if (!mux_id || mux_id > 16)
goto err0;
switch (type) {
case 0:
stats_store[mux_id].dl_stats.coal_common_stats.csum_error++;
break;
case 1:
stats_store[mux_id].dl_stats.coal_common_stats.pkt_recons++;
break;
case 2:
stats_store[mux_id].dl_stats.coal_common_stats.close_non_coal++;
break;
case 3:
stats_store[mux_id].dl_stats.coal_common_stats.l3_mismatch++;
break;
case 4:
stats_store[mux_id].dl_stats.coal_common_stats.l4_mismatch++;
break;
case 5:
stats_store[mux_id].dl_stats.coal_common_stats.nlo_limit++;
break;
case 6:
stats_store[mux_id].dl_stats.coal_common_stats.pkt_limit++;
break;
case 7:
stats_store[mux_id].dl_stats.coal_common_stats.byte_limit++;
break;
case 8:
stats_store[mux_id].dl_stats.coal_common_stats.time_limit++;
break;
case 9:
stats_store[mux_id].dl_stats.coal_common_stats.eviction++;
break;
case 10:
stats_store[mux_id].dl_stats.coal_common_stats.close_coal++;
break;
default:
break;
}
err0:
return;
}
void rmnet_perf_coal_stat(uint8_t mux_id, uint8_t veid, uint64_t len, uint32_t type)
{
if (!mux_id || mux_id > 16)
goto err0;
if (veid >= 16)
goto err0;
switch (type) {
case 0:
stats_store[mux_id].dl_stats.coal_veid_stats[veid].tcpv4_pkts++;
stats_store[mux_id].dl_stats.coal_veid_stats[veid].tcpv4_bytes += len;
break;
case 1:
stats_store[mux_id].dl_stats.coal_veid_stats[veid].udpv4_pkts++;
stats_store[mux_id].dl_stats.coal_veid_stats[veid].udpv4_bytes += len;
break;
case 2:
stats_store[mux_id].dl_stats.coal_veid_stats[veid].tcpv6_pkts++;
stats_store[mux_id].dl_stats.coal_veid_stats[veid].tcpv6_bytes += len;
break;
case 3:
stats_store[mux_id].dl_stats.coal_veid_stats[veid].udpv6_pkts++;
stats_store[mux_id].dl_stats.coal_veid_stats[veid].udpv6_bytes += len;
break;
}
err0:
return;
}
void rmnet_perf_seg_stat(uint8_t mux_id, struct sk_buff *skb)
{
if (!mux_id || mux_id > 16)
goto err0;
if (skb->protocol == htons(ETH_P_IP)) {
if (ip_hdr(skb)->protocol == IPPROTO_TCP) {
stats_store[mux_id].ul_stats.seg_proto_stats.tcpv4_pkts++;
stats_store[mux_id].ul_stats.seg_proto_stats.tcpv4_bytes += skb->len;
} else if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
stats_store[mux_id].ul_stats.seg_proto_stats.udpv4_pkts++;
stats_store[mux_id].ul_stats.seg_proto_stats.udpv4_bytes += skb->len;
}
}
if (skb->protocol == htons(ETH_P_IPV6)) {
if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) {
stats_store[mux_id].ul_stats.seg_proto_stats.tcpv6_pkts++;
stats_store[mux_id].ul_stats.seg_proto_stats.tcpv6_bytes += skb->len;
} else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP) {
stats_store[mux_id].ul_stats.seg_proto_stats.udpv6_pkts++;
stats_store[mux_id].ul_stats.seg_proto_stats.udpv6_bytes += skb->len;
}
}
err0:
return;
}
void rmnet_perf_non_coal_stat(uint8_t mux_id, uint64_t len)
{
if (!mux_id || mux_id > 16)
goto err0;
stats_store[mux_id].dl_stats.non_coal_pkts++;
stats_store[mux_id].dl_stats.non_coal_bytes += len;
err0:
return;
}
static const struct rmnet_module_hook_register_info
rmnet_perf_module_hooks[] = {
{
.hooknum = RMNET_MODULE_HOOK_PERF_INGRESS,
.func = rmnet_perf_ingress_handle,
},
{
.hooknum = RMNET_MODULE_HOOK_PERF_EGRESS,
.func = rmnet_perf_egress_handle,
},
{
.hooknum = RMNET_MODULE_HOOK_PERF_SET_THRESH,
.func = rmnet_perf_tcp_update_quickack_thresh,
},
{
.hooknum = RMNET_MODULE_HOOK_PERF_INGRESS_RX_HANDLER,
.func = rmnet_perf_ingress_rx_handler,
},
{
.hooknum = RMNET_MODULE_HOOK_PERF_CMD_INGRESS,
.func = rmnet_perf_cmd_ingress_handler,
},
{
.hooknum = RMNET_MODULE_HOOK_PERF_COAL_COMMON_STAT,
.func = rmnet_perf_coal_common_stat,
},
{
.hooknum = RMNET_MODULE_HOOK_PERF_COAL_STAT,
.func = rmnet_perf_coal_stat,
},
{
.hooknum = RMNET_MODULE_HOOK_PERF_SEG_STAT,
.func = rmnet_perf_seg_stat,
},
{
.hooknum = RMNET_MODULE_HOOK_PERF_NON_COAL_STAT,
.func = rmnet_perf_non_coal_stat,
},
};
void rmnet_perf_set_hooks(void)
{
rmnet_module_hook_register(rmnet_perf_module_hooks,
ARRAY_SIZE(rmnet_perf_module_hooks));
}
void rmnet_perf_unset_hooks(void)
{
rmnet_module_hook_unregister(rmnet_perf_module_hooks,
ARRAY_SIZE(rmnet_perf_module_hooks));
}
int rmnet_perf_nl_cmd_get_stats(struct sk_buff *skb, struct genl_info *info);
int rmnet_perf_nl_cmd_map_cmd_req(struct sk_buff *skb, struct genl_info *info);
static const struct genl_ops rmnet_perf_nl_ops[] = {
{
.cmd = RMNET_PERF_CMD_GET_STATS,
.doit = rmnet_perf_nl_cmd_get_stats,
},
{
.cmd = RMNET_PERF_CMD_MAP_CMD,
.doit = rmnet_perf_nl_cmd_map_cmd_req,
},
};
struct genl_family rmnet_perf_nl_family __ro_after_init = {
.hdrsize = 0,
.name = RMNET_PERF_GENL_FAMILY_NAME,
.version = RMNET_PERF_GENL_VERSION,
.maxattr = RMNET_PERF_ATTR_MAX,
.policy = rmnet_perf_nl_policy,
.ops = rmnet_perf_nl_ops,
.n_ops = ARRAY_SIZE(rmnet_perf_nl_ops),
.mcgrps = rmnet_perf_nl_mcgrps,
.n_mcgrps = ARRAY_SIZE(rmnet_perf_nl_mcgrps),
};
int rmnet_perf_nl_cmd_get_stats(struct sk_buff *skb, struct genl_info *info)
{
struct rmnet_perf_stats_resp *resp = NULL;
struct rmnet_perf_stats_req req;
int bytes = -1, ret = -ENOMEM;
struct sk_buff *rskb = NULL;
struct nlattr *na = NULL;
void *hdrp = NULL;
rskb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
if (!rskb) {
pr_err("%s(): Failed to allocate response skb\n", __func__);
goto err0;
}
hdrp = genlmsg_put(rskb, 0, rmnet_perf_netlink_seq++,
&rmnet_perf_nl_family, 0,
RMNET_PERF_CMD_GET_STATS);
if (!hdrp) {
pr_err("%s(): Failed to set header pointer\n", __func__);
goto err1;
}
resp = kzalloc(sizeof(struct rmnet_perf_stats_resp), GFP_ATOMIC);
if (!resp) {
pr_err("%s(): Failed to allocate response cmd\n", __func__);
goto err1;
}
memset(&req, 0, sizeof(struct rmnet_perf_stats_req));
ret = -EINVAL;
na = info->attrs[RMNET_PERF_ATTR_STATS_REQ];
if (!na) {
pr_err("%s(): Failed to get cmd request attribute\n", __func__);
goto err2;
}
bytes = nla_memcpy(&req, na, sizeof(struct rmnet_perf_stats_req));
if (bytes <= 0) {
pr_err("%s(): Failed to copy cmd request attribute\n", __func__);
goto err2;
}
if (req.mux_id > 16) {
pr_err("%s(): Unsupported mux id %u\n", __func__, req.mux_id);
goto err2;
}
ret = 0;
memcpy(&resp->stats, &stats_store[req.mux_id],
sizeof(struct rmnet_perf_stats_store));
err2:
resp->error_code = abs(ret);
if (!nla_put(rskb, RMNET_PERF_ATTR_STATS_RESP,
sizeof(struct rmnet_perf_stats_resp), resp)) {
kfree(resp);
genlmsg_end(rskb, hdrp);
return genlmsg_reply(rskb, info);
} else {
pr_err("%s(): Failed to copy cmd response attribute\n", __func__);
}
kfree(resp);
err1:
nlmsg_free(rskb);
err0:
return ret;
}
void rmnet_perf_nl_map_cmd_multicast(struct sk_buff *skb)
{
uint8_t offset = sizeof(struct qmap_cmd_hdr);
struct rmnet_perf_map_cmd_ind *ind = NULL;
struct qmap_cmd_hdr *cmd_hdr = NULL;
struct sk_buff *iskb = NULL;
void *hdrp = NULL;
int rc = -EINVAL;
iskb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
if (!iskb) {
pr_err("%s(): Failed to indication skb\n", __func__);
goto err0;
}
hdrp = genlmsg_put(iskb, 0, rmnet_perf_netlink_seq++,
&rmnet_perf_nl_family, 0,
RMNET_PERF_CMD_MAP_CMD);
if (!hdrp) {
pr_err("%s(): Failed to set header pointer\n", __func__);
goto err1;
}
ind = kzalloc(sizeof(struct rmnet_perf_map_cmd_ind), GFP_ATOMIC);
if (!ind) {
pr_err("%s(): Failed to allocate indication cmd\n", __func__);
goto err1;
}
if (skb->len <= offset) {
pr_err("%s(): Incoming cmd size is invalid\n", __func__);
goto err2;
}
cmd_hdr = (struct qmap_cmd_hdr *)skb->data;
ind->cmd_len = skb->len - offset;
ind->cmd_name = cmd_hdr->cmd_name;
ind->ack = cmd_hdr->cmd_type;
memcpy(ind->cmd_content, skb->data + offset, ind->cmd_len);
if (nla_put(iskb, RMNET_PERF_ATTR_MAP_CMD_IND,
sizeof(struct rmnet_perf_map_cmd_ind), ind)) {
pr_err("%s(): Failed to copy cmd indication attribute\n", __func__);
goto err2;
}
genlmsg_end(iskb, hdrp);
kfree(ind);
/* -EINVAL is the only error for which the skb is not freed */
rc = genlmsg_multicast(&rmnet_perf_nl_family, iskb, 0,
RMNET_PERF_MULTICAST_GROUP_0, GFP_ATOMIC);
if (rc == -EINVAL) {
pr_err("%s(): Invalid group for multicast\n", __func__);
goto err1;
}
return;
err2:
kfree(ind);
err1:
nlmsg_free(iskb);
err0:
return;
}
int rmnet_perf_cmd_xmit(struct rmnet_perf_map_cmd_req *cmd)
{
struct net_device *dev = dev_get_by_name(&init_net, "rmnet_ipa0");
int cmd_len = sizeof(struct qmap_cmd_hdr) + cmd->cmd_len;
struct qmap_cmd_hdr *cmd_hdr = NULL;
struct sk_buff *skb = NULL;
char *cmd_content = NULL;
int ret = -ENODEV;
if (!dev) {
pr_err("%s(): Unable to get reference to device\n", __func__);
goto err0;
}
skb = alloc_skb(cmd_len, GFP_ATOMIC);
if (!skb) {
pr_err("%s(): Unable to allocate memory for cmd\n", __func__);
ret = -ENOMEM;
goto err1;
}
skb_put(skb, cmd_len);
memset(skb->data, 0, cmd_len);
cmd_hdr = (struct qmap_cmd_hdr *)skb->data;
cmd_hdr->cd_bit = 1;
cmd_hdr->mux_id = 0;
cmd_hdr->pkt_len = htons(sizeof(struct rmnet_map_control_command_header) +
cmd->cmd_len);
cmd_hdr->cmd_name = cmd->cmd_name;
cmd_hdr->cmd_type = cmd->ack;
cmd_content = (char *)(skb->data + sizeof(struct qmap_cmd_hdr));
memcpy(cmd_content, cmd->cmd_content, cmd->cmd_len);
skb->dev = dev;
skb->protocol = htons(ETH_P_MAP);
ret = rmnet_qmap_send(skb, RMNET_CH_CTL, false);
err1:
dev_put(dev);
err0:
return ret;
}
int rmnet_perf_nl_cmd_map_cmd_req(struct sk_buff *skb, struct genl_info *info)
{
struct rmnet_perf_map_cmd_req *req = NULL;
struct rmnet_perf_map_cmd_resp resp;
int bytes = -1, ret = -ENOMEM;
struct sk_buff *rskb = NULL;
struct nlattr *na = NULL;
void *hdrp = NULL;
rskb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
if (!rskb) {
pr_err("%s(): Failed to allocate response skb\n", __func__);
goto err0;
}
hdrp = genlmsg_put(rskb, 0, rmnet_perf_netlink_seq++,
&rmnet_perf_nl_family, 0,
RMNET_PERF_CMD_MAP_CMD);
if (!hdrp) {
pr_err("%s(): Failed to set header pointer\n", __func__);
goto err1;
}
memset(&resp, 0, sizeof(struct rmnet_perf_map_cmd_resp));
req = kzalloc(sizeof(struct rmnet_perf_map_cmd_req), GFP_ATOMIC);
if (!req) {
pr_err("%s(): Failed to allocate request cmd\n", __func__);
goto err2;
}
ret = -EINVAL;
na = info->attrs[RMNET_PERF_ATTR_MAP_CMD_REQ];
if (!na) {
pr_err("%s(): Failed to get cmd request attribute\n", __func__);
goto err3;
}
bytes = nla_memcpy(req, na, sizeof(struct rmnet_perf_map_cmd_req));
if (bytes <= 0) {
pr_err("%s(): Failed to copy cmd request attribute\n", __func__);
goto err3;
}
switch (req->cmd_name) {
case QMAP_CMD_31:
case QMAP_CMD_32:
case QMAP_CMD_40:
case QMAP_CMD_42:
break;
default:
pr_err("%s(): Unsupported command %u\n", __func__, req->cmd_name);
goto err3;
}
if (!req->cmd_len || (req->cmd_len > 16000)) {
pr_err("%s(): Unsupported length %u\n", __func__, req->cmd_len);
goto err3;
}
resp.cmd_name = req->cmd_name;
ret = rmnet_perf_cmd_xmit(req);
err3:
kfree(req);
err2:
resp.error_code = abs(ret);
if (!nla_put(rskb, RMNET_PERF_ATTR_MAP_CMD_RESP,
sizeof(struct rmnet_perf_map_cmd_resp), &resp)) {
genlmsg_end(rskb, hdrp);
return genlmsg_reply(rskb, info);
} else {
pr_err("%s(): Failed to copy cmd response attribute\n", __func__);
}
err1:
nlmsg_free(rskb);
err0:
return ret;
}
int rmnet_perf_nl_register(void)
{
return genl_register_family(&rmnet_perf_nl_family);
}
void rmnet_perf_nl_unregister(void)
{
genl_unregister_family(&rmnet_perf_nl_family);
}
static int __init rmnet_perf_init(void)
{
int rc;
pr_info("%s(): Loading\n", __func__);
rc = rmnet_perf_tcp_init();
if (rc)
goto err0;
rc = rmnet_perf_udp_init();
if (rc)
goto err1;
rc = rmnet_perf_nl_register();
if (rc) {
pr_err("%s(): Failed to register generic netlink family\n", __func__);
goto err2;
}
rmnet_perf_set_hooks();
err2:
rmnet_perf_udp_exit();
err1:
rmnet_perf_tcp_exit();
err0:
return rc;
}
static void __exit rmnet_perf_exit(void)
{
rmnet_perf_unset_hooks();
rmnet_perf_nl_unregister();
rmnet_perf_udp_exit();
rmnet_perf_tcp_exit();
pr_info("%s(): exiting\n", __func__);
}
module_init(rmnet_perf_init);
module_exit(rmnet_perf_exit);

View File

@@ -1,648 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
*/
/* RMNET PERF TCP framework */
#include <linux/types.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/rcupdate.h>
#include <linux/hashtable.h>
#include <linux/log2.h>
#include <linux/workqueue.h>
#include <net/ip.h>
#include <net/inet_hashtables.h>
#include <net/ipv6.h>
#include <net/inet6_hashtables.h>
#include <net/tcp.h>
#include <net/sock.h>
#include "rmnet_private.h"
#include "rmnet_perf_tcp.h"
/* How long to hold a node, in millisecs */
#define RMNET_PERF_QUICKACK_TIMEOUT (2000)
/* How often to run the cleaning workqueue, in millisecs */
#define RMNET_PERF_QUICKACK_WQ_INTERVAL (500)
/* Default threshold is 192 KB of data. shsusrd can change this per flow */
#define RMNET_PERF_QUICKACK_THRESH (192000)
#define RMNET_PERF_QUICKACK_HASH_BKTS (16)
#define RMNET_PERF_QUICKACK_HASH_BITS \
(const_ilog2(RMNET_PERF_QUICKACK_HASH_BKTS))
enum {
RMNET_PERF_QUICKACK_STAT_NODE_ADD,
RMNET_PERF_QUICKACK_STAT_NODE_ADD_FAIL,
RMNET_PERF_QUICKACK_STAT_NODE_DEL,
RMNET_PERF_QUICKACK_STAT_NO_SK,
RMNET_PERF_QUICKACK_STAT_FORCE_RX,
RMNET_PERF_QUICKACK_STAT_FORCE_TX,
RMNET_PERF_QUICKACK_STAT_MAX,
};
struct rmnet_perf_quickack_tuple {
union {
__be32 v4_saddr;
struct in6_addr v6_saddr;
};
union {
__be32 v4_daddr;
struct in6_addr v6_daddr;
};
union {
struct {
__be16 sport;
__be16 dport;
};
u32 hash_key;
};
u8 ip_proto;
};
struct rmnet_perf_quickack_node {
struct hlist_node hash;
struct rcu_head rcu;
struct rmnet_perf_quickack_tuple info;
unsigned long ts;
u32 tcp_seq;
u32 tcp_ack;
u32 offload_hash;
u32 byte_threshold;
u32 quickack_count;
bool no_sock;
u8 dead;
};
struct rmnet_perf_quickack_work_struct {
struct delayed_work ws;
bool force_clean;
};
/* For quickack hash protection */
static DEFINE_SPINLOCK(rmnet_perf_quickack_lock);
static DEFINE_HASHTABLE(rmnet_perf_quickack_hash,
RMNET_PERF_QUICKACK_HASH_BITS);
static u32 rmnet_perf_quickack_hash_size;
/* Periodic cleaning work struct for the hashtable */
static struct rmnet_perf_quickack_work_struct rmnet_perf_quickack_work;
/* Maximum number of flows to support at a time */
static u32 rmnet_perf_quickack_hash_size_param = 100;
module_param_named(rmnet_perf_tcp_knob0, rmnet_perf_quickack_hash_size_param,
uint, 0644);
/* Stats Array */
static u64 rmnet_perf_quickack_stats[RMNET_PERF_QUICKACK_STAT_MAX];
module_param_array_named(rmnet_perf_tcp_stat, rmnet_perf_quickack_stats,
ullong, NULL, 0444);
static void rmnet_perf_quickack_stats_update(u32 stat)
{
if (stat < RMNET_PERF_QUICKACK_STAT_MAX)
rmnet_perf_quickack_stats[stat] += 1;
}
static bool
rmnet_perf_quickack_node_expired(struct rmnet_perf_quickack_node *node,
unsigned long ts)
{
unsigned long timeout;
timeout = msecs_to_jiffies(RMNET_PERF_QUICKACK_TIMEOUT);
if (ts - node->ts > timeout)
return true;
return false;
}
static void rmnet_perf_quickack_node_free(struct rcu_head *head)
{
struct rmnet_perf_quickack_node *node;
node = container_of(head, struct rmnet_perf_quickack_node, rcu);
kfree(node);
}
static bool rmnet_perf_quickack_hash_clean(bool force)
{
struct rmnet_perf_quickack_node *node;
struct hlist_node *tmp;
unsigned long ts;
int bkt;
ts = jiffies;
hash_for_each_safe(rmnet_perf_quickack_hash, bkt, tmp, node, hash) {
if (node->dead)
/* Node already marked as removed, but not yet
* purged after a grace period. Skip it.
*/
continue;
if (force || rmnet_perf_quickack_node_expired(node, ts)) {
node->dead = true;
hash_del_rcu(&node->hash);
call_rcu(&node->rcu, rmnet_perf_quickack_node_free);
rmnet_perf_quickack_stats_update(RMNET_PERF_QUICKACK_STAT_NODE_DEL);
rmnet_perf_quickack_hash_size--;
}
}
return !!rmnet_perf_quickack_hash_size;
}
static void rmnet_perf_quickack_work_process(struct work_struct *ws)
{
struct rmnet_perf_quickack_work_struct *quickack_work;
unsigned long flags;
bool should_resched;
quickack_work = container_of(to_delayed_work(ws),
struct rmnet_perf_quickack_work_struct,
ws);
spin_lock_irqsave(&rmnet_perf_quickack_lock, flags);
should_resched =
rmnet_perf_quickack_hash_clean(quickack_work->force_clean);
if (should_resched) {
unsigned long delay;
delay = msecs_to_jiffies(RMNET_PERF_QUICKACK_WQ_INTERVAL);
schedule_delayed_work(&quickack_work->ws, delay);
}
spin_unlock_irqrestore(&rmnet_perf_quickack_lock, flags);
}
static bool
rmnet_perf_quickack_tuple_match(struct rmnet_perf_quickack_tuple *t1,
struct rmnet_perf_quickack_tuple *t2)
{
if (t1->ip_proto != t2->ip_proto ||
t1->sport != t2->sport ||
t1->dport != t2->dport)
return false;
if (t1->ip_proto == 4)
return t1->v4_saddr == t2->v4_saddr &&
t1->v4_daddr == t2->v4_daddr;
return !ipv6_addr_cmp(&t1->v6_saddr, &t2->v6_saddr) &&
!ipv6_addr_cmp(&t1->v6_daddr, &t2->v6_daddr);
}
static struct rmnet_perf_quickack_node *
rmnet_perf_quickack_node_add(struct rmnet_perf_quickack_tuple *tuple)
__must_hold(&rmnet_perf_quickack_lock)
{
struct rmnet_perf_quickack_node *node;
if (rmnet_perf_quickack_hash_size >= rmnet_perf_quickack_hash_size_param)
/* Max flows. Ignore */
return NULL;
node = kzalloc(sizeof(*node), GFP_ATOMIC);
if (!node) {
rmnet_perf_quickack_stats_update(RMNET_PERF_QUICKACK_STAT_NODE_ADD_FAIL);
return NULL;
}
INIT_HLIST_NODE(&node->hash);
memcpy(&node->info, tuple, sizeof(*tuple));
node->byte_threshold = RMNET_PERF_QUICKACK_THRESH;
node->ts = jiffies;
hash_add_rcu(rmnet_perf_quickack_hash, &node->hash, tuple->hash_key);
rmnet_perf_quickack_stats_update(RMNET_PERF_QUICKACK_STAT_NODE_ADD);
if (!rmnet_perf_quickack_hash_size) {
unsigned long delay;
delay = msecs_to_jiffies(RMNET_PERF_QUICKACK_WQ_INTERVAL);
schedule_delayed_work(&rmnet_perf_quickack_work.ws, delay);
}
rmnet_perf_quickack_hash_size++;
return node;
}
static void rmnet_perf_quickack_node_init(struct rmnet_perf_quickack_node *node,
struct sk_buff *skb, bool is_tx)
__must_hold(RCU)
{
struct tcphdr *th = tcp_hdr(skb);
struct rmnet_skb_cb *rmnet_cb = RMNET_SKB_CB(skb);
node->offload_hash = skb->hash;
if (is_tx) {
u32 tcp_ack = ntohl(th->ack_seq);
WRITE_ONCE(node->tcp_ack, tcp_ack);
/* If we're initializing on an ACK, assume no data has flowed
* yet, as this is very likely the ACK finishing the handshake.
* SEQ will be equal to the ACK in this case.
*/
WRITE_ONCE(node->tcp_seq, tcp_ack);
} else {
u32 tcp_seq = ntohl(th->seq);
WRITE_ONCE(node->tcp_seq, tcp_seq);
/* If we're initializing on DATA, assume this is the first
* data packet in the flow. The ACK number is 1 less than the
* sequence number, as only the handshake is complete.
*/
WRITE_ONCE(node->tcp_ack, tcp_seq - 1);
rmnet_cb->bif = 0;
rmnet_cb->ack_thresh = READ_ONCE(node->byte_threshold);
}
}
static struct rmnet_perf_quickack_node *
rmnet_perf_quickack_tuple_find(struct rmnet_perf_quickack_tuple *tuple,
struct sk_buff *skb, bool is_tx)
__must_hold(RCU)
{
struct rmnet_perf_quickack_node *node;
unsigned long flags;
spin_lock_irqsave(&rmnet_perf_quickack_lock, flags);
hash_for_each_possible_rcu(rmnet_perf_quickack_hash, node, hash,
tuple->hash_key) {
if (node->dead)
continue;
if (rmnet_perf_quickack_tuple_match(&node->info, tuple)) {
spin_unlock_irqrestore(&rmnet_perf_quickack_lock,
flags);
return node;
}
}
/* Make a new one */
node = rmnet_perf_quickack_node_add(tuple);
spin_unlock_irqrestore(&rmnet_perf_quickack_lock, flags);
if (node)
rmnet_perf_quickack_node_init(node, skb, is_tx);
return node;
}
static struct sock *
rmnet_perf_sk_lookup(struct rmnet_perf_quickack_tuple *tuple,
struct net_device *skb_dev)
{
struct net *net = dev_net(skb_dev);
if (tuple->ip_proto == 4)
return inet_lookup_established(net, &tcp_hashinfo,
tuple->v4_saddr,
tuple->sport, tuple->v4_daddr,
tuple->dport, skb_dev->ifindex);
/* Interestingly, this one doesn't have a nice wrapper.
*
* And yes, the ntohs on dport here is intentional. The v4 wrapper
* actually handles doing that for us. The lookup code REALLY does want
* dport in host order ;)
*/
return __inet6_lookup_established(net, &tcp_hashinfo, &tuple->v6_saddr,
tuple->sport, &tuple->v6_daddr,
ntohs(tuple->dport), skb_dev->ifindex,
0);
}
static void rmnet_perf_quickack_force(struct rmnet_perf_quickack_node *node,
struct sk_buff *skb)
__must_hold(RCU)
{
struct sock *sk;
if (skb->sk) {
/* Packet has one! Only possible on the TX path */
sk = skb->sk;
if (sk_fullsock(sk)) {
if (sk->sk_state == TCP_ESTABLISHED &&
!sock_flag(sk, SOCK_DEAD) &&
!sk_unhashed(sk) &&
sk->sk_shutdown != SHUTDOWN_MASK) {
inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
node->quickack_count++;
rmnet_perf_quickack_stats_update(RMNET_PERF_QUICKACK_STAT_FORCE_TX);
}
}
return;
}
sk = rmnet_perf_sk_lookup(&node->info, skb->dev);
/* Note that this will take a reference to the socket. */
if (!sk) {
struct rmnet_skb_cb *rmnet_cb = RMNET_SKB_CB(skb);
/* There's no established socket on the host.
* Flow is tethered, or something weird happened. Log, mark,
* and avoid touching this flow anymore.
*/
rmnet_perf_quickack_stats_update(RMNET_PERF_QUICKACK_STAT_NO_SK);
node->no_sock = true;
rmnet_cb->tethered = true;
return;
}
if (sk_fullsock(sk)) {
bh_lock_sock(sk);
if (sk->sk_state == TCP_ESTABLISHED &&
!sock_flag(sk, SOCK_DEAD) &&
!sk_unhashed(sk) &&
sk->sk_shutdown != SHUTDOWN_MASK) {
inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
inet_csk_schedule_ack(sk);
node->quickack_count++;
rmnet_perf_quickack_stats_update(RMNET_PERF_QUICKACK_STAT_FORCE_RX);
}
bh_unlock_sock(sk);
}
sock_gen_put(sk);
}
/* Quick and dirty payload length calculation. Note that this requires
* tcp_hdr(skb) to be valid, so make sure it is ;)
*/
static u32 rmnet_perf_tcp_payload_len(struct sk_buff *skb)
{
struct tcphdr *th = tcp_hdr(skb);
return skb->len - ((u8 *)th - skb->data) - th->doff * 4;
}
static void
rmnet_perf_quickack_node_update(struct rmnet_perf_quickack_node *node,
struct sk_buff *skb, bool is_tx)
__must_hold(RCU)
{
struct tcphdr *th = tcp_hdr(skb);
u32 curr_seq = ntohl(th->seq);
u32 curr_ack = ntohl(th->ack_seq);
u32 node_seq = READ_ONCE(node->tcp_seq);
u32 node_ack = READ_ONCE(node->tcp_ack);
u32 byte_thresh = READ_ONCE(node->byte_threshold);
/* First off, poke the timestamp. The flow is still active. */
node->ts = jiffies;
if (node->no_sock)
/* Don't bother, we have nothing to update */
return;
if (is_tx) {
/* Care about the ACK */
if (after(curr_ack, node_ack)) {
u32 unacked = 0;
if (curr_ack > node_seq)
unacked = curr_ack - node_seq;
// trace_printk("%s(): curr_ack %lu node_ack %lu node_seq %lu unacked %lu TX %lu\n",
// __func__, curr_ack, node_ack, node_seq, unacked,
// rmnet_perf_quickack_stats[RMNET_PERF_QUICKACK_STAT_FORCE_TX]);
if (unacked > byte_thresh)
rmnet_perf_quickack_force(node, skb);
WRITE_ONCE(node->tcp_ack, curr_ack);
}
} else {
u32 unacked;
/* Care about the SEQ */
if (after(curr_seq, node_seq)) {
unacked = curr_seq - node_ack;
unacked += rmnet_perf_tcp_payload_len(skb);
// trace_printk("%s(): curr_seq %lu node_seq %lu node_ack %lu unacked %lu unacked' %lu RX %lu\n",
// __func__, curr_seq, node_seq, node_ack, unacked,
// unacked - rmnet_perf_tcp_payload_len(skb),
// rmnet_perf_quickack_stats[RMNET_PERF_QUICKACK_STAT_FORCE_RX]);
if (unacked > byte_thresh)
rmnet_perf_quickack_force(node, skb);
WRITE_ONCE(node->tcp_seq, curr_seq);
}
}
}
static bool
rmnet_perf_ingress_handle_tcp_common(struct sk_buff *skb,
struct rmnet_perf_quickack_tuple *tuple)
{
struct tcphdr *th;
u32 payload_len;
/* At this point, both RSC and rmnet_offload have looked at this packet.
* If they haven't been able to process this thing successfully, then
* there's no use in trying on our end either ;)
*
* BUT WHAT IF BOTH RSC AND OFFLOAD ARE DISABLED????
* Then the socket is only ever getting a stream of 1500 byte packets.
* If the kernel can't handle THAT, then we have a bigger problem than
* this driver could ever hope to fix.
*/
if (!skb_transport_header_was_set(skb) ||
skb->ip_summed == CHECKSUM_NONE)
return false;
th = tcp_hdr(skb);
if (th->syn)
/* SYNs and SYN-ACKs are skipped, as we don't know if there's
* even a socket to check yet (and even if there is, how much
* data can these packets have~? helllloooo Fast Open that
* somehow resulted in 64KB coalescing! ;)
*/
return false;
payload_len = rmnet_perf_tcp_payload_len(skb);
if (!payload_len && th->ack)
/* DL ACKs aren't counted. We only care about DL data. */
return false;
tuple->sport = th->source;
tuple->dport = th->dest;
if (skb->protocol == htons(ETH_P_IP)) {
struct iphdr *iph = ip_hdr(skb);
tuple->v4_saddr = iph->saddr;
tuple->v4_daddr = iph->daddr;
tuple->ip_proto = 4;
} else {
struct ipv6hdr *ip6h = ipv6_hdr(skb);
memcpy(&tuple->v6_saddr, &ip6h->saddr, sizeof(ip6h->saddr));
memcpy(&tuple->v6_daddr, &ip6h->daddr, sizeof(ip6h->daddr));
tuple->ip_proto = 6;
}
/* We will process this packet */
return true;
}
/* Process a TCP packet on the RMNET core */
void rmnet_perf_ingress_handle_tcp(struct sk_buff *skb)
{
struct rmnet_skb_cb *rmnet_cb = RMNET_SKB_CB(skb);
struct rmnet_perf_quickack_tuple tuple = {};
struct rmnet_perf_quickack_node *node;
if (!rmnet_perf_ingress_handle_tcp_common(skb, &tuple))
return;
rcu_read_lock();
node = rmnet_perf_quickack_tuple_find(&tuple, skb, false);
if (!node)
goto out;
if (unlikely(rmnet_perf_quickack_node_expired(node, jiffies)))
goto out;
/* Our one and only job here is to report statistics to shs via the
* rmnet_cb struct in the skb. All actual tracking happens on the
* network stack core, where the calculations will be far more
* accurate as RPS has finished.
*/
if (node->no_sock) {
rmnet_cb->tethered = true;
} else {
u32 unacked = READ_ONCE(node->tcp_seq) -
READ_ONCE(node->tcp_ack);
/* A "good enough" estimate of the bytes in flight:
* How much outstatding data is there, using only values
* for packets the stack has seen.
* (i.e. not counting the current data we have yet to
* queue to RPS)
*/
rmnet_cb->bif = unacked;
rmnet_cb->ack_thresh = READ_ONCE(node->byte_threshold);
rmnet_cb->ack_forced = node->quickack_count;
}
out:
rcu_read_unlock();
}
/* Process a TCP packet on the Network stack core */
void rmnet_perf_ingress_rx_handler_tcp(struct sk_buff *skb)
{
struct rmnet_perf_quickack_tuple tuple = {};
struct rmnet_perf_quickack_node *node;
if (!rmnet_perf_ingress_handle_tcp_common(skb, &tuple))
return;
rcu_read_lock();
node = rmnet_perf_quickack_tuple_find(&tuple, skb, false);
if (node) {
if (likely(!rmnet_perf_quickack_node_expired(node, jiffies)))
rmnet_perf_quickack_node_update(node, skb, false);
}
rcu_read_unlock();
}
void rmnet_perf_egress_handle_tcp(struct sk_buff *skb)
{
struct rmnet_perf_quickack_tuple tuple = {};
struct rmnet_perf_quickack_node *node;
struct tcphdr *th;
u32 payload_len;
/* The only case I can see where this would be the case is for
* forwarded packets. In which case, we don't even have a socket
* to force quickack on, so just skip everything.
*/
if (!skb_transport_header_was_set(skb))
return;
th = tcp_hdr(skb);
if (th->syn)
/* SYNs and SYN-ACKs are skipped for the same reason as the
* ingress hook: no data at the socket yet.
*/
return;
payload_len = rmnet_perf_tcp_payload_len(skb);
if (payload_len || !th->ack)
/* We don't care about UL data, only UL ACKs */
return;
/* Node tuples are formatted in the DL direction. Swap SRC and DST */
tuple.sport = th->dest;
tuple.dport = th->source;
if (skb->protocol == htons(ETH_P_IP)) {
struct iphdr *iph = ip_hdr(skb);
tuple.v4_saddr = iph->daddr;
tuple.v4_daddr = iph->saddr;
tuple.ip_proto = 4;
} else {
struct ipv6hdr *ip6h = ipv6_hdr(skb);
memcpy(&tuple.v6_saddr, &ip6h->daddr, sizeof(ip6h->daddr));
memcpy(&tuple.v6_daddr, &ip6h->saddr, sizeof(ip6h->saddr));
tuple.ip_proto = 6;
}
rcu_read_lock();
node = rmnet_perf_quickack_tuple_find(&tuple, skb, true);
if (node) {
if (likely(!rmnet_perf_quickack_node_expired(node, jiffies)))
rmnet_perf_quickack_node_update(node, skb, true);
}
rcu_read_unlock();
}
void rmnet_perf_tcp_update_quickack_thresh(u32 hash_key, u32 byte_thresh)
{
struct rmnet_perf_quickack_node *node;
int bkt;
if (hash_key == 0x0) {
if (byte_thresh == 0) {
enable_tcp = false;
} else if (byte_thresh == 1) {
enable_tcp = true;
}
return;
}
rcu_read_lock();
hash_for_each_rcu(rmnet_perf_quickack_hash, bkt, node, hash) {
if (node->offload_hash == hash_key)
WRITE_ONCE(node->byte_threshold, byte_thresh);
}
rcu_read_unlock();
}
int rmnet_perf_tcp_init(void)
{
INIT_DELAYED_WORK(&rmnet_perf_quickack_work.ws,
rmnet_perf_quickack_work_process);
return 0;
}
void rmnet_perf_tcp_exit(void)
{
/* Force the current work struct to finish deleting anything old
* enough...
*/
cancel_delayed_work_sync(&rmnet_perf_quickack_work.ws);
rmnet_perf_quickack_work.force_clean = true;
schedule_delayed_work(&rmnet_perf_quickack_work.ws, 0);
/* ...and force remove all the rest of the nodes */
cancel_delayed_work_sync(&rmnet_perf_quickack_work.ws);
}

View File

@@ -1,21 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
*/
/* RMNET PERF TCP framework */
#ifndef __RMNET_PERF_TCP_H__
#define __RMNET_PERF_TCP_H__
#include <linux/skbuff.h>
extern bool enable_tcp;
void rmnet_perf_ingress_handle_tcp(struct sk_buff *skb);
void rmnet_perf_ingress_rx_handler_tcp(struct sk_buff *skb);
void rmnet_perf_egress_handle_tcp(struct sk_buff *skb);
void rmnet_perf_tcp_update_quickack_thresh(u32 hash, u32 byte_thresh);
int rmnet_perf_tcp_init(void);
void rmnet_perf_tcp_exit(void);
#endif

View File

@@ -1,390 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
*/
/* RMNET PERF UDP framework */
#include <linux/types.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/rcupdate.h>
#include <linux/hashtable.h>
#include <linux/log2.h>
#include <linux/workqueue.h>
#include <linux/refcount.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/udp.h>
#include <net/sock.h>
#include "rmnet_private.h"
#include "rmnet_perf_udp.h"
/* How long to keep a node, in millisecs */
#define RMNET_PERF_UDP_TRACK_TIMEOUT (2000)
/* How often to run the cleaning workqueue, in millisecs */
#define RMNET_PERF_UDP_TRACK_WQ_INTERVAL (500)
#define RMNET_PERF_UDP_TRACK_HASH_BKTS (16)
#define RMNET_PERF_UDP_TRACK_HASH_BITS \
(const_ilog2(RMNET_PERF_UDP_TRACK_HASH_BKTS))
enum {
RMNET_PERF_UDP_TRACK_STAT_NODE_ADD,
RMNET_PERF_UDP_TRACK_STAT_NODE_ADD_FAIL,
RMNET_PERF_UDP_TRACK_STAT_NODE_DEL,
RMNET_PERF_UDP_TRACK_STAT_NO_SK,
RMNET_PERF_UDP_TRACK_STAT_MAX,
};
struct rmnet_perf_udp_track_tuple {
union {
__be32 v4_saddr;
struct in6_addr v6_saddr;
};
union {
__be32 v4_daddr;
struct in6_addr v6_daddr;
};
union {
struct {
__be16 sport;
__be16 dport;
};
u32 hash_key;
};
u8 ip_proto;
};
struct rmnet_perf_udp_track_node {
struct hlist_node hash;
struct rcu_head rcu;
struct rmnet_perf_udp_track_tuple info;
struct sock *node_sk;
unsigned long ts;
u8 dead;
};
struct rmnet_perf_udp_work_struct {
struct delayed_work ws;
bool force_clean;
};
/* For tracking hash protection */
static DEFINE_SPINLOCK(rmnet_perf_udp_track_lock);
static DEFINE_HASHTABLE(rmnet_perf_udp_track_hash,
RMNET_PERF_UDP_TRACK_HASH_BITS);
static u32 rmnet_perf_udp_track_hash_size;
/* Periodic cleaning work struct for the hashtable */
static struct rmnet_perf_udp_work_struct rmnet_perf_udp_work;
/* Stats Array */
static u64 rmnet_perf_udp_track_stats[RMNET_PERF_UDP_TRACK_STAT_MAX];
module_param_array_named(rmnet_perf_udp_stat, rmnet_perf_udp_track_stats,
ullong, NULL, 0444);
static void rmnet_perf_udp_track_stats_update(u32 stat)
{
if (stat < RMNET_PERF_UDP_TRACK_STAT_MAX)
rmnet_perf_udp_track_stats[stat] += 1;
}
static bool
rmnet_perf_udp_track_node_expired(struct rmnet_perf_udp_track_node *node,
unsigned long ts)
{
unsigned long timeout;
timeout = msecs_to_jiffies(RMNET_PERF_UDP_TRACK_TIMEOUT);
if (ts - node->ts > timeout)
return true;
return false;
}
static void rmnet_perf_udp_track_node_free(struct rcu_head *head)
{
struct rmnet_perf_udp_track_node *node;
node = container_of(head, struct rmnet_perf_udp_track_node, rcu);
if (!IS_ERR_OR_NULL(node->node_sk))
sock_put(node->node_sk);
kfree(node);
}
static bool rmnet_perf_udp_track_hash_clean(bool force)
{
struct rmnet_perf_udp_track_node *node;
struct hlist_node *tmp;
unsigned long ts;
int bkt;
ts = jiffies;
hash_for_each_safe(rmnet_perf_udp_track_hash, bkt, tmp, node, hash) {
if (node->dead)
/* Node already marked as removed, but not yet
* purged after a grace period. Skip it.
*/
continue;
if (force || rmnet_perf_udp_track_node_expired(node, ts)) {
node->dead = 1;
hash_del_rcu(&node->hash);
call_rcu(&node->rcu, rmnet_perf_udp_track_node_free);
rmnet_perf_udp_track_stats_update(RMNET_PERF_UDP_TRACK_STAT_NODE_DEL);
rmnet_perf_udp_track_hash_size--;
}
}
return !!rmnet_perf_udp_track_hash_size;
}
static void rmnet_perf_udp_work_process(struct work_struct *ws)
{
struct rmnet_perf_udp_work_struct *udp_work;
unsigned long flags;
bool should_resched;
udp_work = container_of(to_delayed_work(ws),
struct rmnet_perf_udp_work_struct, ws);
spin_lock_irqsave(&rmnet_perf_udp_track_lock, flags);
should_resched = rmnet_perf_udp_track_hash_clean(udp_work->force_clean);
if (should_resched) {
unsigned long delay;
delay = msecs_to_jiffies(RMNET_PERF_UDP_TRACK_WQ_INTERVAL);
schedule_delayed_work(&udp_work->ws, delay);
}
spin_unlock_irqrestore(&rmnet_perf_udp_track_lock, flags);
}
static bool
rmnet_perf_udp_track_tuple_match(struct rmnet_perf_udp_track_tuple *t1,
struct rmnet_perf_udp_track_tuple *t2)
{
if (t1->ip_proto != t2->ip_proto ||
t1->sport != t2->sport ||
t1->dport != t2->dport)
return false;
if (t1->ip_proto == 4)
return t1->v4_saddr == t2->v4_saddr &&
t1->v4_daddr == t2->v4_daddr;
return !ipv6_addr_cmp(&t1->v6_saddr, &t2->v6_saddr) &&
!ipv6_addr_cmp(&t1->v6_daddr, &t2->v6_daddr);
}
static struct rmnet_perf_udp_track_node *
rmnet_perf_udp_track_node_add(struct rmnet_perf_udp_track_tuple *tuple)
__must_hold(&rmnet_perf_udp_track_lock)
{
struct rmnet_perf_udp_track_node *node;
node = kzalloc(sizeof(*node), GFP_ATOMIC);
if (!node) {
rmnet_perf_udp_track_stats_update(RMNET_PERF_UDP_TRACK_STAT_NODE_ADD_FAIL);
return NULL;
}
INIT_HLIST_NODE(&node->hash);
memcpy(&node->info, tuple, sizeof(*tuple));
node->ts = jiffies;
hash_add_rcu(rmnet_perf_udp_track_hash, &node->hash, tuple->hash_key);
rmnet_perf_udp_track_stats_update(RMNET_PERF_UDP_TRACK_STAT_NODE_ADD);
if (!rmnet_perf_udp_track_hash_size) {
unsigned long delay;
delay = msecs_to_jiffies(RMNET_PERF_UDP_TRACK_WQ_INTERVAL);
schedule_delayed_work(&rmnet_perf_udp_work.ws, delay);
}
rmnet_perf_udp_track_hash_size++;
return node;
}
static struct rmnet_perf_udp_track_node *
rmnet_perf_udp_track_tuple_find(struct rmnet_perf_udp_track_tuple *tuple)
__must_hold(RCU)
{
struct rmnet_perf_udp_track_node *node;
unsigned long flags;
spin_lock_irqsave(&rmnet_perf_udp_track_lock, flags);
hash_for_each_possible_rcu(rmnet_perf_udp_track_hash, node, hash,
tuple->hash_key) {
if (node->dead)
continue;
if (rmnet_perf_udp_track_tuple_match(&node->info, tuple))
goto out;
}
/* Make a new one */
node = rmnet_perf_udp_track_node_add(tuple);
out:
spin_unlock_irqrestore(&rmnet_perf_udp_track_lock, flags);
return node;
}
static struct sock *
rmnet_perf_udp_track_sk_lookup(struct rmnet_perf_udp_track_tuple *tuple,
struct sk_buff *skb)
{
struct sock *udp_sock;
/* UDP socket lookup is surprisingly involved. Fortunately, the
* kernel does at least export these helpers. They HAVE nice wrappers,
* but those aren't exported, naturally.
*/
if (tuple->ip_proto == 4)
udp_sock = __udp4_lib_lookup(dev_net(skb->dev), tuple->v4_saddr,
tuple->sport, tuple->v4_daddr,
tuple->dport, inet_iif(skb), 0,
&udp_table, NULL);
else
udp_sock = __udp6_lib_lookup(dev_net(skb->dev),
&tuple->v6_saddr, tuple->sport,
&tuple->v6_daddr, tuple->dport,
inet6_iif(skb), 0, &udp_table,
NULL);
/* Also, neither of these helpers handle bumping the socket refcount!
* We have to do that, in the manner of udp4/6_lib_lookup().
*/
if (udp_sock && !refcount_inc_not_zero(&udp_sock->sk_refcnt))
udp_sock = NULL;
return udp_sock;
}
static void
rmnet_perf_udp_track_node_update(struct rmnet_perf_udp_track_node *node,
struct sk_buff *skb)
__must_hold(RCU)
{
struct rmnet_skb_cb *rmnet_cb = RMNET_SKB_CB(skb);
/* Poke the timestamp since the flow is still active */
node->ts = jiffies;
if (IS_ERR(node->node_sk)) {
/* No socket found */
rmnet_cb->tethered = true;
return;
}
if (!node->node_sk) {
/* Perform first-time socket lookup */
node->node_sk = rmnet_perf_udp_track_sk_lookup(&node->info,
skb);
if (!node->node_sk) {
rmnet_perf_udp_track_stats_update(RMNET_PERF_UDP_TRACK_STAT_NO_SK);
node->node_sk = ERR_PTR(-EINVAL);
rmnet_cb->tethered = true;
return;
}
}
/* Graft in the socket since we have it? */
}
void rmnet_perf_ingress_handle_udp(struct sk_buff *skb)
{
struct rmnet_perf_udp_track_tuple tuple = {};
struct rmnet_perf_udp_track_node *node;
struct udphdr *uh;
if (!skb_transport_header_was_set(skb) ||
skb->ip_summed == CHECKSUM_NONE)
return;
uh = udp_hdr(skb);
tuple.sport = uh->source;
tuple.dport = uh->dest;
if (skb->protocol == htons(ETH_P_IP)) {
struct iphdr *iph = ip_hdr(skb);
tuple.v4_saddr = iph->saddr;
tuple.v4_daddr = iph->daddr;
tuple.ip_proto = 4;
} else {
struct ipv6hdr *ip6h = ipv6_hdr(skb);
memcpy(&tuple.v6_saddr, &ip6h->saddr, sizeof(ip6h->saddr));
memcpy(&tuple.v6_daddr, &ip6h->daddr, sizeof(ip6h->daddr));
tuple.ip_proto = 6;
}
rcu_read_lock();
node = rmnet_perf_udp_track_tuple_find(&tuple);
if (node) {
if (likely(!rmnet_perf_udp_track_node_expired(node, jiffies)))
rmnet_perf_udp_track_node_update(node, skb);
}
rcu_read_unlock();
}
void rmnet_perf_egress_handle_udp(struct sk_buff *skb)
{
struct rmnet_perf_udp_track_tuple tuple = {};
struct rmnet_perf_udp_track_node *node;
struct udphdr *uh;
if (!skb_transport_header_was_set(skb))
return;
uh = udp_hdr(skb);
/* Node tuples are formatted in the DL direction. Swap SRC and DST */
tuple.sport = uh->dest;
tuple.dport = uh->source;
if (skb->protocol == htons(ETH_P_IP)) {
struct iphdr *iph = ip_hdr(skb);
tuple.v4_saddr = iph->daddr;
tuple.v4_daddr = iph->saddr;
tuple.ip_proto = 4;
} else {
struct ipv6hdr *ip6h = ipv6_hdr(skb);
memcpy(&tuple.v6_saddr, &ip6h->daddr, sizeof(ip6h->daddr));
memcpy(&tuple.v6_daddr, &ip6h->saddr, sizeof(ip6h->saddr));
tuple.ip_proto = 6;
}
rcu_read_lock();
node = rmnet_perf_udp_track_tuple_find(&tuple);
if (node) {
if (likely(!rmnet_perf_udp_track_node_expired(node, jiffies)))
rmnet_perf_udp_track_node_update(node, skb);
}
rcu_read_unlock();
}
int rmnet_perf_udp_init(void)
{
INIT_DELAYED_WORK(&rmnet_perf_udp_work.ws,
rmnet_perf_udp_work_process);
return 0;
}
void rmnet_perf_udp_exit(void)
{
/* Force the current work struct to finish deleting anything old
* enough...
*/
cancel_delayed_work_sync(&rmnet_perf_udp_work.ws);
rmnet_perf_udp_work.force_clean = true;
schedule_delayed_work(&rmnet_perf_udp_work.ws, 0);
/* ...and force remove all the rest of the nodes */
cancel_delayed_work_sync(&rmnet_perf_udp_work.ws);
}

View File

@@ -1,18 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
*/
/* RMNET PERF UDP framework */
#ifndef __RMNET_PERF_UDP_H__
#define __RMNET_PERF_UDP_H__
#include <linux/skbuff.h>
void rmnet_perf_ingress_handle_udp(struct sk_buff *skb);
void rmnet_perf_egress_handle_udp(struct sk_buff *skb);
int rmnet_perf_udp_init(void);
void rmnet_perf_udp_exit(void);
#endif

View File

@@ -1,549 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
/* RMNET PERF TETHER framework */
#include <linux/module.h>
#include <net/tcp.h>
#include "rmnet_descriptor.h"
#include "rmnet_map.h"
#include "rmnet_qmap.h"
#include "rmnet_module.h"
MODULE_LICENSE("GPL v2");
/* Insert newest first, last 4 bytes of the change id */
static char *verinfo[] = {
"58aa9bee",
"e218f451",
"648b7095",
"7415921c",
"49af9bd4"
};
module_param_array(verinfo, charp, NULL, 0444);
MODULE_PARM_DESC(verinfo, "Version of the driver");
struct rmnet_perf_tether_state {
u8 rmnet_perf_tether_vnd_count;
};
static struct rmnet_perf_tether_state *rmnet_perf_tether;
unsigned int configure_knob1 __read_mostly = 0;
module_param(configure_knob1, uint, 0644);
unsigned int knob1 __read_mostly = 0;
module_param(knob1, uint, 0644);
unsigned int configure_knob2 __read_mostly = 0;
module_param(configure_knob2, uint, 0644);
unsigned int knob2 __read_mostly = 0;
module_param(knob2, uint, 0644);
static DEFINE_SPINLOCK(rmnet_perf_tether_lock);
#define RMNET_PERF_TETHER_NUM_FLOWS (50)
#define RMNET_PERF_TETHER_HASH_TABLE_BITS \
(const_ilog2(RMNET_PERF_TETHER_NUM_FLOWS))
static DEFINE_HASHTABLE(rmnet_perf_tether_flow_table,
RMNET_PERF_TETHER_HASH_TABLE_BITS);
struct rmnet_perf_tether_node {
struct list_head list;
struct hlist_node hlist;
u32 hash;
/* instead of using headers, the values are stored in __be32 in the
* layout used by jhash2 below.
*/
__be32 pkt_five_tuple[11];
u32 tuple_len;
};
struct list_head rmnet_perf_tether_free_list = \
LIST_HEAD_INIT(rmnet_perf_tether_free_list);
#define RMNET_PERF_TYPE_TETHER_MESSAGE (1)
#define RMNET_PERF_TYPE_TETHER_LEN (12)
#define RMNET_PERF_TYPE_TETHER_CMD_NAME (27)
#define RMNET_PERF_TYPE_TETHER_CMD_MODE (1)
struct rmnet_map_tether_cmd_header
{
u8 mode;
u8 endp_count;
u8 config;
u8 reserved;
};
static u32 rmnet_perf_tether_get_hash_from_skb(struct sk_buff *skb, int *valid,
int syn_ack, int egress)
{
__be32 pkt_five_tuple[11];
u32 flow_hash_key_len;
if (skb->protocol == htons(ETH_P_IP)) {
/* We know that this is a TCP packet because of the core
* hook checks
*/
if (!tcp_hdr(skb)->syn)
goto fail;
if (syn_ack) {
if (!tcp_hdr(skb)->ack)
goto fail;
} else {
if (tcp_hdr(skb)->ack)
goto fail;
}
pkt_five_tuple[0] = egress ? ip_hdr(skb)->daddr : ip_hdr(skb)->saddr;
pkt_five_tuple[1] = egress ? ip_hdr(skb)->saddr : ip_hdr(skb)->daddr;
pkt_five_tuple[2] = ip_hdr(skb)->protocol;
pkt_five_tuple[3] = egress ? tcp_hdr(skb)->dest : tcp_hdr(skb)->source;
pkt_five_tuple[4] = egress ? tcp_hdr(skb)->source : tcp_hdr(skb)->dest;
flow_hash_key_len = 5;
} else if (skb->protocol == htons(ETH_P_IPV6)) {
/* We know that this is a TCP packet because of the core
* hook checks
*/
if (!tcp_hdr(skb)->syn)
goto fail;
if (syn_ack) {
if (!tcp_hdr(skb)->ack)
goto fail;
} else {
if (tcp_hdr(skb)->ack)
goto fail;
}
memcpy(&pkt_five_tuple[0], egress ? ipv6_hdr(skb)->daddr.s6_addr :
ipv6_hdr(skb)->saddr.s6_addr, sizeof(struct in6_addr));
memcpy(&pkt_five_tuple[5], egress ? ipv6_hdr(skb)->saddr.s6_addr :
ipv6_hdr(skb)->daddr.s6_addr, sizeof(struct in6_addr));
pkt_five_tuple[8] = ipv6_hdr(skb)->nexthdr;
pkt_five_tuple[9] = tcp_hdr(skb)->dest;
pkt_five_tuple[10] = tcp_hdr(skb)->source;
flow_hash_key_len = 11;
} else {
goto fail;
}
*valid = 1;
return jhash2(pkt_five_tuple, flow_hash_key_len, 0);
fail:
*valid = 0;
return 0;
}
static void rmnet_perf_mangle_syn_ack(struct tcphdr *tp)
{
if (tp->syn && tp->ack) {
if (configure_knob1) {
if (knob1 > 65535)
knob1 = 65535;
tp->window = cpu_to_be16(knob1);
}
if (configure_knob2) {
unsigned char *ptr;
u32 length;
if (knob2 > TCP_MAX_WSCALE)
knob2 = TCP_MAX_WSCALE;
length = tp->doff * 4 - sizeof(struct tcphdr);
ptr = (unsigned char *)(tp + 1);
while (length > 0) {
int opcode = *ptr++;
int opsize;
switch(opcode) {
case TCPOPT_EOL:
return;
case TCPOPT_NOP:
length--;
continue;
default:
if (length < 2)
return;
opsize = *ptr++;
if (opsize < 2)
return;
if (opsize > length)
return;
if (opcode == TCPOPT_WINDOW)
*ptr = knob2;
ptr += opsize-2;
length -= opsize;
}
}
}
}
}
static int
rmnet_perf_compare_node(struct rmnet_perf_tether_node *node,
struct sk_buff *skb)
{
/* already checked for tcp syn earlier */
if (skb->protocol == htons(ETH_P_IP)) {
if ((node->pkt_five_tuple[0] == ip_hdr(skb)->saddr) &&
(node->pkt_five_tuple[1] == ip_hdr(skb)->daddr) &&
(node->pkt_five_tuple[2] == ip_hdr(skb)->protocol) &&
(node->pkt_five_tuple[3] == tcp_hdr(skb)->source) &&
(node->pkt_five_tuple[4] == tcp_hdr(skb)->dest) &&
(node->tuple_len == 5))
return 0;
} else if (skb->protocol == htons(ETH_P_IPV6)) {
if ((!memcmp(&node->pkt_five_tuple[0], ipv6_hdr(skb)->saddr.s6_addr,
sizeof(struct in6_addr))) &&
(!memcmp(&node->pkt_five_tuple[5], ipv6_hdr(skb)->daddr.s6_addr,
sizeof(struct in6_addr))) &&
(node->pkt_five_tuple[8] == ipv6_hdr(skb)->nexthdr) &&
(node->pkt_five_tuple[9] == tcp_hdr(skb)->source) &&
(node->pkt_five_tuple[10] == tcp_hdr(skb)->dest) &&
(node->tuple_len == 11))
return 0;
}
return 1;
}
void rmnet_perf_tether_ingress(struct tcphdr *tp, struct sk_buff *skb)
{
int valid = 0;
u32 hash;
unsigned long flags;
struct rmnet_perf_tether_node *node, *tmp = NULL;
if (!configure_knob1 && !configure_knob2)
return;
hash = rmnet_perf_tether_get_hash_from_skb(skb, &valid, 1, 0);
if (!valid)
return;
spin_lock_irqsave(&rmnet_perf_tether_lock, flags);
hash_for_each_possible(rmnet_perf_tether_flow_table, node, hlist, hash) {
if (!rmnet_perf_compare_node(node, skb)) {
tmp = node;
break;
}
tmp = NULL;
}
if (!tmp) {
spin_unlock_irqrestore(&rmnet_perf_tether_lock, flags);
return;
}
if (node) {
/* Remove from hashlist and add to free list in case
* of a match
*/
hash_del(&node->hlist);
list_add_tail(&node->list, &rmnet_perf_tether_free_list);
}
spin_unlock_irqrestore(&rmnet_perf_tether_lock, flags);
rmnet_perf_mangle_syn_ack(tp);
}
static void
rmnet_perf_populate_node(struct rmnet_perf_tether_node *node,
struct sk_buff *skb)
{
/* already checked for tcp syn earlier */
if (skb->protocol == htons(ETH_P_IP)) {
node->pkt_five_tuple[0] = ip_hdr(skb)->daddr;
node->pkt_five_tuple[1] = ip_hdr(skb)->saddr;
node->pkt_five_tuple[2] = ip_hdr(skb)->protocol;
node->pkt_five_tuple[3] = tcp_hdr(skb)->dest;
node->pkt_five_tuple[4] = tcp_hdr(skb)->source;
node->tuple_len = 5;
} else if (skb->protocol == htons(ETH_P_IPV6)) {
memcpy(&node->pkt_five_tuple[0], ipv6_hdr(skb)->daddr.s6_addr,
sizeof(struct in6_addr));
memcpy(&node->pkt_five_tuple[5], ipv6_hdr(skb)->saddr.s6_addr,
sizeof(struct in6_addr));
node->pkt_five_tuple[8] = ipv6_hdr(skb)->nexthdr;
node->pkt_five_tuple[9] = tcp_hdr(skb)->dest;
node->pkt_five_tuple[10] = tcp_hdr(skb)->source;
node->tuple_len = 11;
}
}
void rmnet_perf_tether_egress(struct sk_buff *skb)
{
int valid = 0;
u32 hash;
unsigned long flags;
struct rmnet_perf_tether_node *node;
struct hlist_node *tmp;
int bkt_cursor;
/* Check for forwarded skbs */
struct net_device *dev;
if (!configure_knob1 && !configure_knob2)
return;
if (!skb->skb_iif)
return;
dev = __dev_get_by_index(&init_net, skb->skb_iif);
if (!dev)
return;
hash = rmnet_perf_tether_get_hash_from_skb(skb, &valid, 0, 1);
if (!valid)
return;
spin_lock_irqsave(&rmnet_perf_tether_lock, flags);
/* Find a free node from the freelist and add to the hash list */
node = list_first_entry_or_null(&rmnet_perf_tether_free_list,
struct rmnet_perf_tether_node, list);
if (node) {
list_del(&node->list);
node->hash = hash;
rmnet_perf_populate_node(node, skb);
hash_add(rmnet_perf_tether_flow_table, &node->hlist,
node->hash);
} else {
hash_for_each_safe(rmnet_perf_tether_flow_table, bkt_cursor, tmp,
node, hlist) {
/* reuse first node, ideally this needs to be fifo */
hash_del(&node->hlist);
node->hash = hash;
rmnet_perf_populate_node(node, skb);
hash_add(rmnet_perf_tether_flow_table, &node->hlist,
node->hash);
break;
}
}
spin_unlock_irqrestore(&rmnet_perf_tether_lock, flags);
}
void rmnet_perf_tether_cmd(u8 message, u64 val)
{
struct net_device *dev = dev_get_by_name(&init_net, "rmnet_ipa0");
struct sk_buff *skb;
if (!dev)
return;
if (message == RMNET_PERF_TYPE_TETHER_MESSAGE)
{
struct rmnet_map_control_command_header *cmdh;
struct rmnet_map_tether_cmd_header *teth;
struct rmnet_map_header *maph;
skb = alloc_skb(16, GFP_ATOMIC);
if (!skb)
goto done;
skb_put(skb, 16);
memset(skb->data, 0, 16);
maph = (struct rmnet_map_header *)skb->data;
maph->cd_bit = 1;
maph->pkt_len = htons(RMNET_PERF_TYPE_TETHER_LEN);
cmdh = (struct rmnet_map_control_command_header *)(skb->data + sizeof(*maph));
cmdh->command_name = RMNET_PERF_TYPE_TETHER_CMD_NAME;
teth = (struct rmnet_map_tether_cmd_header *)(skb->data + sizeof(*maph) + sizeof(*cmdh));
teth->mode = RMNET_PERF_TYPE_TETHER_CMD_MODE;
teth->config = !val;
skb->dev = dev;
skb->protocol = htons(ETH_P_MAP);
rmnet_qmap_send(skb, RMNET_CH_CTL, false);
}
done:
dev_put(dev);
}
static const struct rmnet_module_hook_register_info
rmnet_perf_tether_module_hooks[] = {
{
.hooknum = RMNET_MODULE_HOOK_PERF_TETHER_INGRESS,
.func = rmnet_perf_tether_ingress,
},
{
.hooknum = RMNET_MODULE_HOOK_PERF_TETHER_EGRESS,
.func = rmnet_perf_tether_egress,
},
{
.hooknum = RMNET_MODULE_HOOK_PERF_TETHER_CMD,
.func = rmnet_perf_tether_cmd,
},
};
void rmnet_perf_tether_set_hooks(void)
{
rmnet_module_hook_register(rmnet_perf_tether_module_hooks,
ARRAY_SIZE(rmnet_perf_tether_module_hooks));
}
void rmnet_perf_tether_unset_hooks(void)
{
rmnet_module_hook_unregister(rmnet_perf_tether_module_hooks,
ARRAY_SIZE(rmnet_perf_tether_module_hooks));
}
static int rmnet_perf_tether_state_init(void)
{
int i;
rmnet_perf_tether = kzalloc(sizeof(*rmnet_perf_tether), GFP_KERNEL);
if (!rmnet_perf_tether) {
pr_err("%s(): Resource allocation failed\n", __func__);
return -1;
}
rmnet_perf_tether->rmnet_perf_tether_vnd_count++;
for (i = 0; i < RMNET_PERF_TETHER_NUM_FLOWS; i++) {
struct rmnet_perf_tether_node *node;
node = kzalloc(sizeof(*node), GFP_ATOMIC);
if (!node)
continue;
INIT_LIST_HEAD(&node->list);
INIT_HLIST_NODE(&node->hlist);
list_add_tail(&node->list, &rmnet_perf_tether_free_list);
}
/* Everything is ready. Say hello to the core driver */
rmnet_perf_tether_set_hooks();
return 0;
}
static void rmnet_perf_tether_clear_flow_table(void)
{
struct rmnet_perf_tether_node *node;
struct hlist_node *tmp;
int bkt_cursor;
hash_for_each_safe(rmnet_perf_tether_flow_table, bkt_cursor, tmp,
node, hlist) {
hash_del(&node->hlist);
kfree(node);
}
}
static void rmnet_perf_tether_clear_free_list(void)
{
struct rmnet_perf_tether_node *node, *idx;
list_for_each_entry_safe(node, idx, &rmnet_perf_tether_free_list,
list) {
list_del(&node->list);
kfree(node);
}
}
static void rmnet_perf_tether_state_free(void)
{
rmnet_perf_tether_unset_hooks();
rmnet_perf_tether_clear_free_list();
rmnet_perf_tether_clear_flow_table();
kfree(rmnet_perf_tether);
rmnet_perf_tether = NULL;
}
static int rmnet_perf_tether_state_notifier(struct notifier_block *nb,
unsigned long notify_event,
void *notify_data)
{
struct net_device *device = netdev_notifier_info_to_dev(notify_data);
int rc;
(void)nb;
/* We only care about rmnet devices */
if (!device || strncmp(device->name, "rmnet_data", 10))
goto done;
switch (notify_event) {
case NETDEV_REGISTER:
/* Don't initialze if we've already done so */
if (rmnet_perf_tether) {
/* Increment the device count and we're done */
rmnet_perf_tether->rmnet_perf_tether_vnd_count++;
goto done;
}
pr_info("%s(): Initializing on device %s\n", __func__,
device->name);
rc = rmnet_perf_tether_state_init();
if (rc) {
pr_err("%s(): Initialization failed\n", __func__);
goto done;
}
break;
case NETDEV_UNREGISTER:
/* Don't uninitialize if we never initialized */
if (!rmnet_perf_tether)
goto done;
/* Decrement vnd count and free if no more devices */
if (--rmnet_perf_tether->rmnet_perf_tether_vnd_count)
goto done;
pr_info("%s(): Uninitializing on device %s\n", __func__,
device->name);
rmnet_perf_tether_state_free();
break;
}
done:
return NOTIFY_DONE;
}
static struct notifier_block rmnet_perf_tether_state_notifier_block = {
.notifier_call = rmnet_perf_tether_state_notifier,
.priority = 3,
};
static int __init rmnet_perf_tether_init(void)
{
pr_info("%s(): Loading\n", __func__);
return register_netdevice_notifier(&rmnet_perf_tether_state_notifier_block);
}
static void __exit rmnet_perf_tether_exit(void)
{
pr_info("%s(): exiting\n", __func__);
unregister_netdevice_notifier(&rmnet_perf_tether_state_notifier_block);
}
module_init(rmnet_perf_tether_init);
module_exit(rmnet_perf_tether_exit);

View File

@@ -1,263 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <net/pkt_sched.h>
/* Insert newest first, last 4 bytes of the change id */
static char *verinfo[] = { "b10f2ea2", "e6371d40", "7415921c", "ae244a9d" };
module_param_array(verinfo, charp, NULL, 0444);
MODULE_PARM_DESC(verinfo, "Version of the driver");
static const char *rmnet_sch_version = "1.2";
/* queue 0 has highest priority */
#define RMNET_SCH_MAX_QUEUE 4
/* Linux priority 6, 7, 8, 9 maps to queue 0, 1, 2, 3.
* All other priorities use queue 3 */
static const u8 prio2queue[TC_PRIO_MAX + 1] = { 3, 3, 3, 3, 3, 3, 0, 1,
2, 3, 3, 3, 3, 3, 3, 3 };
/* Bytes to dequeue before switching to lower priority queue */
static const int bytes_limit[RMNET_SCH_MAX_QUEUE] = { 256 * 1024, 128 * 1024,
64 * 1024, 32 * 1024 };
/* Packets to dequeue before switching to lower priority queue */
static const int pkts_limit[RMNET_SCH_MAX_QUEUE] = { 8, 6, 4, 2 };
/* Queue len ratio (total 10) for each queue */
static const int qlen_ratio[RMNET_SCH_MAX_QUEUE] = { 4, 3, 2, 1 };
struct rmnet_sch_queue {
struct qdisc_skb_head q;
int pkts_quota;
int bytes_quota;
unsigned int qlen_thresh;
unsigned int qlen_thresh2;
};
struct rmnet_sch_priv {
struct rmnet_sch_queue queue[RMNET_SCH_MAX_QUEUE];
};
/*
* Choose a queue that exceeds qlen threshold to drop.
* return RMNET_SCH_MAX_QUEUE if no such queue.
*/
static int rmnet_sch_next_to_drop(struct rmnet_sch_priv *priv)
{
int candidate = RMNET_SCH_MAX_QUEUE;
int candidate2 = RMNET_SCH_MAX_QUEUE;
int qn, diff, max = -1;
/* candidate is the queue that exceeds thresh2 the most.
* candidate2 is the lowest priority queue that exceeds thresh.
*/
for (qn = 0; qn < RMNET_SCH_MAX_QUEUE; qn++) {
if (priv->queue[qn].q.qlen > priv->queue[qn].qlen_thresh2) {
diff = priv->queue[qn].q.qlen -
priv->queue[qn].qlen_thresh2;
if (diff >= max) {
max = diff;
candidate = qn;
}
}
if (priv->queue[qn].q.qlen > priv->queue[qn].qlen_thresh)
candidate2 = qn;
}
if (candidate < RMNET_SCH_MAX_QUEUE)
return candidate;
return candidate2;
}
static inline void rmnet_sch_set_quota(struct rmnet_sch_priv *priv, int qn)
{
priv->queue[qn].pkts_quota = pkts_limit[qn];
priv->queue[qn].bytes_quota = bytes_limit[qn];
}
static inline void rmnet_sch_set_qlen(struct rmnet_sch_priv *priv, int qn,
unsigned int tx_qlen)
{
priv->queue[qn].qlen_thresh = tx_qlen / 10 * qlen_ratio[qn];
priv->queue[qn].qlen_thresh2 = priv->queue[qn].qlen_thresh << 1;
}
static int rmnet_sch_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
struct rmnet_sch_priv *priv = qdisc_priv(sch);
unsigned int pkt_len = qdisc_pkt_len(skb);
int qn_to_enq;
int qn_to_drop;
struct sk_buff *skb_to_drop;
qn_to_enq = prio2queue[skb->priority & TC_PRIO_MAX];
/* If qlen is full, try to drop one packet from the queue that
* exceeds qlen threshold
*/
if (unlikely(sch->q.qlen >= qdisc_dev(sch)->tx_queue_len)) {
qn_to_drop = rmnet_sch_next_to_drop(priv);
if (qn_to_drop < RMNET_SCH_MAX_QUEUE &&
qn_to_drop != qn_to_enq) {
skb_to_drop = __qdisc_dequeue_head(
&priv->queue[qn_to_drop].q);
if (likely(skb_to_drop)) {
sch->qstats.backlog -=
qdisc_pkt_len(skb_to_drop);
sch->q.qlen--;
qdisc_drop(skb_to_drop, sch, to_free);
}
} else {
return qdisc_drop(skb, sch, to_free);
}
}
__qdisc_enqueue_tail(skb, &priv->queue[qn_to_enq].q);
qdisc_update_stats_at_enqueue(sch, pkt_len);
return NET_XMIT_SUCCESS;
}
/*
* Next queue to dequeue. RMNET_SCH_MAX_QUEUE no data available.
*/
static u8 rmnet_sch_next_to_dequeue(struct rmnet_sch_priv *priv)
{
int qn, candidate = RMNET_SCH_MAX_QUEUE;
for (qn = 0; qn < RMNET_SCH_MAX_QUEUE; qn++) {
if (!priv->queue[qn].q.qlen)
continue;
if (priv->queue[qn].pkts_quota <= 0 ||
priv->queue[qn].bytes_quota <= 0) {
if (qn < candidate)
candidate = qn;
continue;
}
return qn;
}
/* Either no packet, or all queues with packets have quota consumed,
* reset quota */
for (qn = 0; qn < RMNET_SCH_MAX_QUEUE; qn++)
rmnet_sch_set_quota(priv, qn);
return candidate;
}
static struct sk_buff *rmnet_sch_dequeue(struct Qdisc *sch)
{
struct rmnet_sch_priv *priv = qdisc_priv(sch);
struct sk_buff *skb = NULL;
u8 qn;
qn = rmnet_sch_next_to_dequeue(priv);
if (qn < RMNET_SCH_MAX_QUEUE) {
skb = __qdisc_dequeue_head(&priv->queue[qn].q);
if (likely(skb)) {
priv->queue[qn].pkts_quota--;
priv->queue[qn].bytes_quota -= qdisc_pkt_len(skb);
qdisc_update_stats_at_dequeue(sch, skb);
}
}
return skb;
}
static struct sk_buff *rmnet_sch_peek(struct Qdisc *sch)
{
struct rmnet_sch_priv *priv = qdisc_priv(sch);
struct sk_buff *skb = NULL;
u8 qn;
qn = rmnet_sch_next_to_dequeue(priv);
if (qn < RMNET_SCH_MAX_QUEUE)
skb = priv->queue[qn].q.head;
return skb;
}
static int rmnet_sch_init(struct Qdisc *sch, struct nlattr *arg,
struct netlink_ext_ack *extack)
{
struct rmnet_sch_priv *priv = qdisc_priv(sch);
int qn;
for (qn = 0; qn < RMNET_SCH_MAX_QUEUE; qn++) {
rmnet_sch_set_quota(priv, qn);
rmnet_sch_set_qlen(priv, qn, qdisc_dev(sch)->tx_queue_len);
}
sch->flags |= TCQ_F_CAN_BYPASS;
return 0;
}
static void rmnet_sch_reset(struct Qdisc *sch)
{
struct rmnet_sch_priv *priv = qdisc_priv(sch);
int qn;
for (qn = 0; qn < RMNET_SCH_MAX_QUEUE; qn++) {
kfree_skb_list(priv->queue[qn].q.head);
priv->queue[qn].q.head = NULL;
priv->queue[qn].q.tail = NULL;
priv->queue[qn].q.qlen = 0;
rmnet_sch_set_quota(priv, qn);
rmnet_sch_set_qlen(priv, qn, qdisc_dev(sch)->tx_queue_len);
}
/* stats will be reset by qdisc_reset */
}
static int rmnet_sch_change_tx_queue_len(struct Qdisc *sch, unsigned int qlen)
{
struct rmnet_sch_priv *priv = qdisc_priv(sch);
int qn;
for (qn = 0; qn < RMNET_SCH_MAX_QUEUE; qn++)
rmnet_sch_set_qlen(priv, qn, qlen);
return 0;
}
static struct Qdisc_ops rmnet_sch_qdisc_ops __read_mostly = {
.id = "rmnet_sch",
.priv_size = sizeof(struct rmnet_sch_priv),
.enqueue = rmnet_sch_enqueue,
.dequeue = rmnet_sch_dequeue,
.peek = rmnet_sch_peek,
.init = rmnet_sch_init,
.reset = rmnet_sch_reset,
.change_tx_queue_len = rmnet_sch_change_tx_queue_len,
.owner = THIS_MODULE,
};
static int __init rmnet_sch_module_init(void)
{
pr_info("sch: init (%s)\n", rmnet_sch_version);
return register_qdisc(&rmnet_sch_qdisc_ops);
}
static void __exit rmnet_sch_module_exit(void)
{
unregister_qdisc(&rmnet_sch_qdisc_ops);
}
MODULE_LICENSE("GPL v2");
module_init(rmnet_sch_module_init);
module_exit(rmnet_sch_module_exit);

View File

@@ -1,513 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/skbuff.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include "rmnet_shs_wq.h"
#ifndef _RMNET_SHS_H_
#define _RMNET_SHS_H_
#include "rmnet_shs_freq.h"
#include "rmnet_config.h"
#include "rmnet_map.h"
#include "rmnet_shs_wq_genl.h"
#include "rmnet_private.h"
#include "rmnet_handlers.h"
#include "rmnet_trace.h"
#include "qmi_rmnet.h"
#define RMNET_SHS_HT rmnet_shs_ht
#define RMNET_SHS_HT_SIZE 9
#define RMNET_SHS_MAX_SKB_INACTIVE_TSEC 15
#define MAX_SILVER_CORES rmnet_shs_cfg.max_s_cores
#define MAX_CPUS 8
#define PERF_MASK rmnet_shs_cfg.perf_mask
#define NONPERF_MASK rmnet_shs_cfg.non_perf_mask
/* Duration to acquire perf lock for pings (in ms) */
#define PING_PERF_DURATION (200)
#define INVALID_CPU -1
/* RPS mask change's Default core for orphaned CPU flows */
#define MAIN_CORE 0
#define UPDATE_MASK 0xFF
#define MAX_FLOWS 700
#define DEF_LL_CORE 4
#define DEFAULT_PIN_HASH 0x00AAAAAA
/* Different max inactivity based on # of flows */
#define FLOW_LIMIT1 70
#define INACTIVE_TSEC1 8
#define FLOW_LIMIT2 140
#define INACTIVE_TSEC2 2
#define DEF_PHY_CPU 1
/*Bit field Features */
#define TITANIUM_FEAT 1
#define INST_RX_SWTCH_FEAT 2
#define SILVER_BALANCE_FEAT 4
/* Moves Phy core to gold cluster when cpu 1 is unavailable */
#define PHY_GOLD_SWITCH_FEAT 8
#define SHS_TRACE_ERR(...) \
do { if (rmnet_shs_debug) trace_rmnet_shs_err(__VA_ARGS__); } while (0)
#define SHS_TRACE_HIGH(...) \
do { if (rmnet_shs_debug) trace_rmnet_shs_high(__VA_ARGS__); } while (0)
#define SHS_TRACE_LOW(...) \
do { if (rmnet_shs_debug) trace_rmnet_shs_low(__VA_ARGS__); } while (0)
#define RMNET_SHS_MAX_SILVER_CORE_BURST_CAPACITY 204800
#define RMNET_SHS_TCP_COALESCING_RATIO 23 //Heuristic
#define RMNET_SHS_UDP_PPS_LPWR_CPU_UTHRESH 100000
#define RMNET_SHS_UDP_PPS_LPWR_CPU0_UTHRESH 100000
#define RMNET_SHS_TCP_PPS_LPWR_CPU_UTHRESH (80000*RMNET_SHS_TCP_COALESCING_RATIO)
#define RMNET_SHS_UDP_PPS_PERF_CPU_UTHRESH 210000
#define RMNET_SHS_TCP_PPS_PERF_CPU_UTHRESH (210000*RMNET_SHS_TCP_COALESCING_RATIO)
//50% of MAX SILVER THRESHOLD
#define RMNET_SHS_UDP_PPS_LPWR_CPU_LTHRESH 0
#define RMNET_SHS_UDP_PPS_PERF_CPU_LTHRESH 40000
#define RMNET_SHS_TCP_PPS_PERF_CPU_LTHRESH (40000*RMNET_SHS_TCP_COALESCING_RATIO)
#define RMNET_SHS_UDP_PPS_HEADROOM 20000
#define RMNET_SHS_GOLD_BALANCING_THRESH (RMNET_SHS_UDP_PPS_PERF_CPU_UTHRESH / 2)
struct core_flush_s {
struct hrtimer core_timer;
struct work_struct work;
struct timespec64 coretime;
int coresum;
u8 core;
};
struct rmnet_shs_cfg_s {
struct hrtimer hrtimer_shs;
struct hrtimer hrtimer_lpm;
struct hrtimer hrtimer_wake;
struct hrtimer hrtimer_disable_pb_boost;
struct rmnet_map_dl_ind dl_mrk_ind_cb;
struct rmnet_map_pb_ind pb_mrk_ind_cb;
struct qmi_rmnet_ps_ind rmnet_idl_ind_cb;
struct rmnet_port *port;
struct core_flush_s core_flush[MAX_CPUS];
long num_bytes_parked;
long num_pkts_parked;
atomic_long_t num_flows;
ktime_t lpm_ring;
struct wakeup_source *ws;
u16 max_phy_steer;
u16 feature_mask;
u8 num_filters;
u8 is_reg_dl_mrk_ind;
u8 is_pkt_parked;
u8 force_flush_state;
u8 rmnet_shs_init_complete;
u8 dl_ind_state;
u8 ban_mask;
u8 map_mask;
u8 map_len;
/*Target phy CPU*/
u8 phy_tcpu;
u8 phy_old_cpu;
/* Actual Phy CCU*/
u8 phy_acpu;
u8 max_s_cores;
u8 perf_mask;
u8 non_perf_mask;
/* Prevents ht add/del ops while phy flushing */
u8 kfree_stop;
u32 cpu_freq_boost_val;
};
struct rmnet_shs_skb_list {
struct sk_buff *head;
struct sk_buff *tail;
u64 num_parked_bytes;
u32 num_parked_skbs;
u32 skb_load;
};
struct rmnet_shs_skbn_s {
union {
struct iphdr v4hdr;
struct ipv6hdr v6hdr;
} ip_hdr;
union {
struct tcphdr tp;
struct udphdr up;
} trans_hdr;
struct list_head node_id;
/*list head for per cpu flow table*/
struct net_device *dev;
struct rmnet_shs_wq_hstat_s *hstats;
/*stats meta data*/
struct rmnet_shs_skb_list skb_list;
/*list to park packets*/
struct hlist_node list;
/*list head for hash table*/
u64 num_skb;
/* num segments of skbs received*/
u64 num_coal_skb;
/* num coalesced skbs received*/
u64 num_skb_bytes;
/* num bytes received*/
u64 hw_coal_bytes;
/* bytes coalesced in HW */
u64 hw_coal_bufsize;
u32 num_ll_skb;
/* coalescing buffer size in HW */
u32 queue_head;
/* n/w stack CPU pkt processing queue head */
u32 hash;
/*incoming hash*/
u32 bif;
/*bytes in flight*/
u32 ack_thresh;
/*quickack threshold*/
u16 map_index;
/* rps map index assigned*/
u16 map_cpu;
u16 custom_map;
u16 custom_len;
u8 phy;
u16 qhead_offset;
/* rps cpu for this flow*/
u16 skb_tport_proto;
/* Transport protocol associated with this flow*/
u8 is_shs_enabled;
u8 low_latency;
u8 ll_flag;
/*Is SHS enabled for this flow*/
u8 mux_id;
};
enum rmnet_shs_ll_steer_state_e {
RMNET_SHS_LL_SAME_CORE_SILVER,
RMNET_SHS_LL_SILVER_GOLD_NW,
RMNET_SHS_LL_SAME_CORE_GOLD,
RMNET_SHS_LL_SPLIT_ALWAYS,
RMNET_SHS_LL_STEER_MAX
};
enum rmnet_shs_low_latency_state_e {
RMNET_SHS_NOT_LOW_LATENCY,
RMNET_SHS_LOW_LATENCY_MATCH,
RMNET_SHS_LOW_LATENCY_CHECK,
RMNET_SHS_LL_MAX_STATE
};
enum rmnet_shs_tmr_force_flush_state_e {
RMNET_SHS_FLUSH_OFF,
RMNET_SHS_FLUSH_ON,
RMNET_SHS_FLUSH_DONE
};
enum rmnet_shs_switch_reason_e {
RMNET_SHS_SWITCH_INSTANT_RATE,
RMNET_SHS_SWITCH_WQ_RATE,
RMNET_SHS_OOO_PACKET_SWITCH,
RMNET_SHS_OOO_PACKET_TOTAL,
RMNET_SHS_SWITCH_PACKET_BURST,
RMNET_SHS_SWITCH_CORE_BACKLOG,
RMNET_SHS_PHY_SWITCH_GOLD_TO_S,
RMNET_SHS_PHY_SWITCH_SILVER_TO_G,
RMNET_SHS_PHY_SWITCH_GOLD_TO_S_ACT,
RMNET_SHS_PHY_SWITCH_SILVER_TO_G_ACT,
RMNET_SHS_BANNED_CPU_SUGG,
RMNET_SHS_WQ_FAIL_PHY_DROP,
/* If no interleaved packets come in after phy switch we would be in this state from not moving node->mapcpu */
RMNET_SHS_PHY_NO_INTERL_QMAP_FF,
RMNET_SHS_RESERVED_CPU_SUGG,
RMNET_SHS_RESERVED_PHY_SUGG,
RMNET_SHS_DUP_SUGG_R2G,
RMNET_SHS_RESERVED_PHY_MOVE,
RMNET_SHS_SUGG_R2G_FAIL1,
RMNET_SHS_SUGG_R2S_FAIL1,
RMNET_SHS_RM2G_G2G_SWITCH,
RMNET_SHS_WALT_SWITCH1,
RMNET_SHS_WALT_SWITCH2,
RMNET_SHS_CPU_OFFLINE,
RMNET_SHS_HALT_PHY,
RMNET_SHS_HALT_MASK_CHANGE,
RMNET_SHS_SWITCH_MAX_REASON
};
enum rmnet_shs_dl_ind_state {
RMNET_SHS_HDR_PENDING,
RMNET_SHS_END_PENDING,
RMNET_SHS_IND_COMPLETE,
RMNET_SHS_DL_IND_MAX_STATE
};
enum rmnet_shs_mid_err_e {
RMNET_SHS_PING_UNOPTIMIZED,
RMNET_SHS_MALFORM_MOVE,
RMNET_SHS_SUGG_FAIL1,
RMNET_SHS_SUGG_FAIL2,
RMNET_SHS_MID_ERR_MAX
};
enum rmnet_shs_crit_err_e {
RMNET_SHS_NETDEV_ERR,
RMNET_SHS_INVALID_CPU_ERR,
RMNET_SHS_MAIN_SHS_NOT_REQD,
RMNET_SHS_MAIN_SHS_RPS_INIT_ERR,
RMNET_SHS_MAIN_MALLOC_ERR,
RMNET_SHS_MAIN_MAP_LEN_INVALID,
RMNET_SHS_MAX_FLOWS,
RMNET_SHS_WQ_ALLOC_WQ_ERR,
RMNET_SHS_WQ_ALLOC_DEL_WQ_ERR,
RMNET_SHS_WQ_ALLOC_HSTAT_ERR,
RMNET_SHS_WQ_ALLOC_EP_TBL_ERR,
RMNET_SHS_WQ_GET_RMNET_PORT_ERR,
RMNET_SHS_WQ_EP_ACCESS_ERR,
RMNET_SHS_WQ_COMSUME_PKTS,
RMNET_SHS_CPU_PKTLEN_ERR,
RMNET_SHS_NULL_SKB_HEAD,
RMNET_SHS_RPS_MASK_CHANGE,
RMNET_SHS_WQ_INVALID_CPU_ERR,
RMNET_SHS_WQ_INVALID_PTR_ERR,
RMNET_SHS_WQ_NODE_MALLOC_ERR,
RMNET_SHS_WQ_NL_SOCKET_ERR,
RMNET_SHS_CPU_FLOWS_BNDS_ERR,
RMNET_SHS_OUT_OF_MEM_ERR,
RMNET_SHS_UDP_SEGMENT,
RMNET_SHS_PHY_OOO_SWITCH,
RMNET_SHS_FAILED_RPS_CHANGE,
RMNET_SHS_PHY_ON_TCPU,
RMNET_SHS_PHY_LONG_STEER,
RMNET_SHS_PHY_INVALID_STATE2,
RMNET_SHS_PHY_INVALID_STATE3,
RMNET_SHS_PHY_INVALID_STATE4,
RMNET_SHS_DL_MKR_SEQ_OFO,
RMNET_SHS_MAX_LL_FILTERS,
RMNET_SHS_RESERVE_CPU,
RMNET_SHS_RESERVE_LIMIT,
RMNET_SHS_CRIT_ERR_MAX
};
enum rmnet_shs_ff_reason_e {
RMNET_SHS_REG_NOT_FORCE_FLUSH,
/* PHY specific FFs */
RMNET_SHS_FF_PHY_PKT_LIMIT_ETC,
RMNET_SHS_FF_PHY_INVALID,
RMNET_SHS_FF_PHY_REG,
/* GLOBAL specific FFs */
RMNET_SHS_FF_GLOBAL,
RMNET_SHS_FF_PKT_LIMIT,
RMNET_SHS_FF_BYTE_LIMIT,
RMNET_SHS_FF_CORE_FLUSH,
RMNET_SHS_FF_BAD_RPS,
RMNET_SHS_FF_MAX_REASON
};
enum rmnet_shs_flush_reason_e {
RMNET_SHS_FLUSH_PHY_PKT_LIMIT,
RMNET_SHS_FLUSH_PKT_LIMIT,
RMNET_SHS_FLUSH_BYTE_LIMIT,
RMNET_SHS_FLUSH_TIMER_EXPIRY,
RMNET_SHS_FLUSH_RX_DL_TRAILER,
RMNET_SHS_FLUSH_INV_DL_IND,
RMNET_SHS_FLUSH_WQ_FB_FLUSH,
RMNET_SHS_FLUSH_WQ_CORE_FLUSH,
RMNET_SHS_FLUSH_PSH_PKT_FLUSH,
RMNET_SHS_FLUSH_PHY_FLUSH,
RMNET_SHS_FLUSH_PHY_FF_FLUSH,
RMNET_SHS_FLUSH_PHY_WQ_FLUSH,
RMNET_SHS_FLUSH_Z_QUEUE_FLUSH,
RMNET_SHS_FLUSH_INV_DL_IND2,
RMNET_SHS_FLUSH_MAX_REASON
};
struct flow_buff {
struct sk_buff *skb;
struct flow_buff *next;
};
struct rmnet_shs_flush_work {
struct work_struct work;
struct rmnet_port *port;
};
struct rmnet_shs_cpu_node_s {
struct list_head node_list_id;
u32 qhead;
u32 qtail;
u32 qdiff;
u32 parkedlen;
u32 seg;
u8 prio;
u8 wqprio;
u8 async;
};
enum rmnet_shs_trace_func {
RMNET_SHS_MODULE,
RMNET_SHS_CPU_NODE,
RMNET_SHS_SKB_STAMPING,
RMNET_SHS_SKB_CAN_GRO,
RMNET_SHS_DELIVER_SKB,
RMNET_SHS_CORE_CFG,
RMNET_SHS_HASH_MAP,
RMNET_SHS_ASSIGN,
RMNET_SHS_FLUSH,
RMNET_SHS_DL_MRK,
RMNET_SHS_PB_BOOST_CPU,
RMNET_SHS_WALT,
};
enum rmnet_shs_flush_context {
RMNET_RX_CTXT,
RMNET_WQ_CTXT,
RMNET_MAX_CTXT
};
/* Trace events and functions */
enum rmnet_shs_trace_evt {
RMNET_SHS_MODULE_INIT,
RMNET_SHS_MODULE_INIT_WQ,
RMNET_SHS_MODULE_GOING_DOWN,
RMNET_SHS_MODULE_EXIT,
RMNET_SHS_CPU_NODE_FUNC_START,
RMNET_SHS_CPU_NODE_FUNC_ADD,
RMNET_SHS_CPU_NODE_FUNC_MOVE,
RMNET_SHS_CPU_NODE_FUNC_REMOVE,
RMNET_SHS_CPU_NODE_FUNC_END,
RMNET_SHS_SKB_STAMPING_START,
RMNET_SHS_SKB_STAMPING_END,
RMNET_SHS_SKB_CAN_GRO_START,
RMNET_SHS_SKB_CAN_GRO_END,
RMNET_SHS_DELIVER_SKB_START,
RMNET_SHS_DELIVER_SKB_END,
RMNET_SHS_CORE_CFG_START,
RMNET_SHS_CORE_CFG_NUM_LO_CORES,
RMNET_SHS_CORE_CFG_NUM_HI_CORES,
RMNET_SHS_CORE_CFG_CHK_HI_CPU,
RMNET_SHS_CORE_CFG_CHK_LO_CPU,
RMNET_SHS_CORE_CFG_GET_QHEAD,
RMNET_SHS_CORE_CFG_GET_QTAIL,
RMNET_SHS_CORE_CFG_GET_CPU_PROC_PARAMS,
RMNET_SHS_CORE_CFG_END,
RMNET_SHS_HASH_MAP_START,
RMNET_SHS_HASH_MAP_IDX_TO_STAMP,
RMNET_SHS_HASH_MAP_FORM_HASH,
RMNET_SHS_HASH_MAP_END,
RMNET_SHS_ASSIGN_START,
RMNET_SHS_ASSIGN_GET_NEW_FLOW_CPU,
RMNET_SHS_ASSIGN_MATCH_FLOW_NODE_START,
RMNET_SHS_ASSIGN_MATCH_FLOW_COMPLETE,
RMNET_SHS_ASSIGN_PARK_PKT_COMPLETE,
RMNET_SHS_ASSIGN_PARK_TMR_START,
RMNET_SHS_ASSIGN_PARK_TMR_CANCEL,
RMNET_SHS_ASSIGN_MASK_CHNG,
RMNET_SHS_ASSIGN_CRIT_ERROR_NO_MSK_SET,
RMNET_SHS_ASSIGN_CRIT_ERROR_NO_SHS_REQD,
RMNET_SHS_ASSIGN_END,
RMNET_SHS_FLUSH_START,
RMNET_SHS_FLUSH_PARK_TMR_EXPIRY,
RMNET_SHS_FLUSH_PARK_TMR_RESTART,
RMNET_SHS_FLUSH_DELAY_WQ_TRIGGER,
RMNET_SHS_FLUSH_DELAY_WQ_START,
RMNET_SHS_FLUSH_DELAY_WQ_END,
RMNET_SHS_FLUSH_FORCE_TRIGGER,
RMNET_SHS_FLUSH_BYTE_LIMIT_TRIGGER,
RMNET_SHS_FLUSH_PKT_LIMIT_TRIGGER,
RMNET_SHS_FLUSH_DL_MRK_TRLR_HDLR_START,
RMNET_SHS_FLUSH_DL_MRK_TRLR_HDLR_END,
RMNET_SHS_FLUSH_CHK_AND_FLUSH_NODE_START,
RMNET_SHS_FLUSH_NODE_START,
RMNET_SHS_FLUSH_CHK_NODE_CAN_FLUSH,
RMNET_SHS_FLUSH_NODE_CORE_SWITCH,
RMNET_SHS_FLUSH_NODE_END,
RMNET_SHS_FLUSH_CHK_AND_FLUSH_NODE_END,
RMNET_SHS_FLUSH_END,
RMNET_SHS_DL_MRK_START,
RMNET_SHS_DL_MRK_HDR_HDLR_START,
RMNET_SHS_DL_MRK_HDR_HDLR_END,
RMNET_SHS_DL_MRK_TRLR_START,
RMNET_SHS_DL_MRK_TRLR_HDLR_END,
RMNET_SHS_DL_MRK_TRLR_END,
RMNET_SHS_DL_MRK_END,
RMNET_SHS_PB_BOOST_CPU_ENTER,
RMNET_SHS_PB_BOOST_CPU_UPDATE,
RMNET_SHS_PB_BOOST_CPU_RESET,
RMNET_SHS_WALT_TRANSITION,
};
extern struct rmnet_shs_flush_work shs_delayed_work;
extern spinlock_t rmnet_shs_ll_ht_splock;
extern spinlock_t rmnet_shs_ht_splock;
extern spinlock_t rmnet_shs_ep_lock;
extern spinlock_t rmnet_shs_hstat_tbl_lock;
extern struct hlist_head RMNET_SHS_HT[1 << (RMNET_SHS_HT_SIZE)];
void rmnet_shs_skb_entry_disable(void);
void rmnet_shs_skb_entry_enable(void);
void rmnet_shs_switch_disable(void);
void rmnet_shs_switch_enable(void);
int rmnet_shs_is_lpwr_cpu(u16 cpu);
void rmnet_shs_cancel_table(void);
void rmnet_shs_rx_wq_init(void);
unsigned int rmnet_shs_rx_wq_exit(void);
int rmnet_shs_get_mask_len(u8 mask);
int rmnet_shs_chk_and_flush_node(struct rmnet_shs_skbn_s *node,
u8 force_flush, u8 ctxt, struct sk_buff **phy_list);
void rmnet_shs_pb_hdr_handler(struct rmnet_map_pb_ind_hdr *pbhdr);
void rmnet_shs_dl_hdr_handler_v2(struct rmnet_map_dl_ind_hdr *dlhdr,
struct rmnet_map_control_command_header *qcmd);
void rmnet_shs_dl_trl_handler_v2(struct rmnet_map_dl_ind_trl *dltrl,
struct rmnet_map_control_command_header *qcmd);
void rmnet_shs_dl_hdr_handler(struct rmnet_map_dl_ind_hdr *dlhdr);
void rmnet_shs_dl_trl_handler(struct rmnet_map_dl_ind_trl *dltrl);
int rmnet_shs_assign(struct sk_buff *skb, struct rmnet_shs_clnt_s *cfg);
void rmnet_shs_flush_table(u8 is_force_flush, u8 ctxt);
void rmnet_shs_cpu_node_remove(struct rmnet_shs_skbn_s *node);
void rmnet_shs_init(struct net_device *dev, struct net_device *vnd);
void rmnet_shs_exit(unsigned int cpu_switch);
void rmnet_shs_ps_on_hdlr(void *port);
void rmnet_shs_ps_off_hdlr(void *port);
void rmnet_shs_update_cpu_proc_q_all_cpus(void);
void rmnet_shs_clear_node(struct rmnet_shs_skbn_s *node, u8 ctxt);
void rmnet_shs_change_cpu_num_flows(u16 map_cpu, bool inc);
void rmnet_shs_deliver_skb(struct sk_buff *skb);
u32 rmnet_shs_get_cpu_qhead(u8 cpu_num);
#endif /* _RMNET_SHS_H_ */

View File

@@ -1,650 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "rmnet_shs.h"
#include "rmnet_shs_wq.h"
#include "rmnet_shs_modules.h"
#include <net/ip.h>
#include <linux/cpu.h>
#include <linux/bitmap.h>
#include <linux/netdevice.h>
#include <linux/kernel.h>
#include <linux/smp.h>
#include <linux/ipv6.h>
#include <linux/netdevice.h>
#define INCREMENT 1
#define DECREMENT 0
/* Helper functions to add and remove entries to the table
* that maintains a list of all endpoints (vnd's) available on this device.
*/
void rmnet_shs_ep_tbl_add(struct rmnet_shs_wq_ep_s *ep)
{
trace_rmnet_shs_wq_low(RMNET_SHS_WQ_EP_TBL, RMNET_SHS_WQ_EP_TBL_ADD,
0xDEF, 0xDEF, 0xDEF, 0xDEF, ep, NULL);
list_add(&ep->ep_list_id, &rmnet_shs_wq_ep_tbl);
}
void rmnet_shs_ep_tbl_remove(struct rmnet_shs_wq_ep_s *ep)
{
trace_rmnet_shs_wq_low(RMNET_SHS_WQ_EP_TBL, RMNET_SHS_WQ_EP_TBL_DEL,
0xDEF, 0xDEF, 0xDEF, 0xDEF, ep, NULL);
list_del_init(&ep->ep_list_id);
}
/* Helper functions to add and remove entries to the table
* that maintains a list of all nodes that maintain statistics per flow
*/
void rmnet_shs_hstat_tbl_add(struct rmnet_shs_wq_hstat_s *hnode)
{
trace_rmnet_shs_wq_low(RMNET_SHS_WQ_HSTAT_TBL,
RMNET_SHS_WQ_HSTAT_TBL_ADD,
0xDEF, 0xDEF, 0xDEF, 0xDEF, hnode, NULL);
spin_lock_bh(&rmnet_shs_hstat_tbl_lock);
list_add_rcu(&hnode->hstat_node_id, &rmnet_shs_wq_hstat_tbl);
spin_unlock_bh(&rmnet_shs_hstat_tbl_lock);
}
void rmnet_shs_hstat_tbl_remove(struct rmnet_shs_wq_hstat_s *hnode)
{
trace_rmnet_shs_wq_low(RMNET_SHS_WQ_HSTAT_TBL,
RMNET_SHS_WQ_HSTAT_TBL_DEL,
0xDEF, 0xDEF, 0xDEF, 0xDEF, hnode, NULL);
spin_lock_bh(&rmnet_shs_hstat_tbl_lock);
list_del_rcu(&hnode->hstat_node_id);
spin_unlock_bh(&rmnet_shs_hstat_tbl_lock);
}
/* We maintain a list of all flow nodes processed by a cpu.
* Below helper functions are used to maintain flow<=>cpu
* association.*
*/
void rmnet_shs_cpu_list_remove(struct rmnet_shs_wq_hstat_s *hnode)
{
trace_rmnet_shs_wq_low(RMNET_SHS_WQ_CPU_HSTAT_TBL,
RMNET_SHS_WQ_CPU_HSTAT_TBL_DEL,
0xDEF, 0xDEF, 0xDEF, 0xDEF, hnode, NULL);
spin_lock_bh(&rmnet_shs_hstat_tbl_lock);
list_del_init(&hnode->cpu_node_id);
spin_unlock_bh(&rmnet_shs_hstat_tbl_lock);
}
void rmnet_shs_cpu_list_add(struct rmnet_shs_wq_hstat_s *hnode,
struct list_head *head)
{
trace_rmnet_shs_wq_low(RMNET_SHS_WQ_CPU_HSTAT_TBL,
RMNET_SHS_WQ_CPU_HSTAT_TBL_ADD,
0xDEF, 0xDEF, 0xDEF, 0xDEF, hnode, NULL);
spin_lock_bh(&rmnet_shs_hstat_tbl_lock);
list_add(&hnode->cpu_node_id, head);
spin_unlock_bh(&rmnet_shs_hstat_tbl_lock);
}
void rmnet_shs_cpu_list_move(struct rmnet_shs_wq_hstat_s *hnode,
struct list_head *head)
{
trace_rmnet_shs_wq_low(RMNET_SHS_WQ_CPU_HSTAT_TBL,
RMNET_SHS_WQ_CPU_HSTAT_TBL_MOVE,
hnode->current_cpu,
0xDEF, 0xDEF, 0xDEF, hnode, NULL);
spin_lock_bh(&rmnet_shs_hstat_tbl_lock);
list_move(&hnode->cpu_node_id, head);
spin_unlock_bh(&rmnet_shs_hstat_tbl_lock);
}
void rmnet_shs_ep_lock_bh(void)
{
spin_lock_bh(&rmnet_shs_ep_lock);
}
void rmnet_shs_ep_unlock_bh(void)
{
spin_unlock_bh(&rmnet_shs_ep_lock);
}
void rmnet_shs_update_cfg_mask(void)
{
/* Start with most avaible mask all eps could share*/
u8 mask = UPDATE_MASK;
u8 rps_enabled = 0;
struct rmnet_shs_wq_ep_s *ep;
list_for_each_entry(ep, &rmnet_shs_wq_ep_tbl, ep_list_id) {
if (!ep->is_ep_active)
continue;
/* Bitwise and to get common mask from non-null masks.
* VNDs with different mask will have UNDEFINED behavior
*/
if (ep->rps_config_msk) {
mask &= ep->rps_config_msk;
rps_enabled = 1;
}
}
if (!rps_enabled) {
rmnet_shs_cfg.map_mask = 0;
rmnet_shs_cfg.map_len = 0;
return;
} else if (rmnet_shs_cfg.map_mask != mask) {
rmnet_shs_cfg.map_mask = mask;
rmnet_shs_cfg.map_len = rmnet_shs_get_mask_len(mask);
pr_info("rmnet_shs: mask: 0x%x maplen: %d\n", rmnet_shs_cfg.map_mask, rmnet_shs_cfg.map_len);
}
}
void rmnet_shs_cpu_node_remove(struct rmnet_shs_skbn_s *node)
{
SHS_TRACE_LOW(RMNET_SHS_CPU_NODE, RMNET_SHS_CPU_NODE_FUNC_REMOVE,
0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL);
list_del_init(&node->node_id);
rmnet_shs_change_cpu_num_flows(node->map_cpu, DECREMENT);
}
void rmnet_shs_cpu_node_add(struct rmnet_shs_skbn_s *node,
struct list_head *hd)
{
SHS_TRACE_LOW(RMNET_SHS_CPU_NODE, RMNET_SHS_CPU_NODE_FUNC_ADD,
0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL);
list_add(&node->node_id, hd);
rmnet_shs_change_cpu_num_flows(node->map_cpu, INCREMENT);
}
void rmnet_shs_cpu_node_move(struct rmnet_shs_skbn_s *node,
struct list_head *hd, int oldcpu)
{
SHS_TRACE_LOW(RMNET_SHS_CPU_NODE, RMNET_SHS_CPU_NODE_FUNC_MOVE,
0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL);
list_move(&node->node_id, hd);
rmnet_shs_change_cpu_num_flows(node->map_cpu, INCREMENT);
rmnet_shs_change_cpu_num_flows((u16) oldcpu, DECREMENT);
}
void rmnet_shs_cpu_ooo(u8 cpu, int count)
{
if (cpu < MAX_CPUS)
{
rmnet_shs_cpu_ooo_count[cpu]+=count;
}
}
u64 rmnet_shs_wq_get_max_allowed_pps(u16 cpu)
{
if (cpu >= MAX_CPUS) {
rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_CPU_ERR]++;
return 0;
}
return rmnet_shs_cpu_rx_max_pps_thresh[cpu];
}
inline int rmnet_shs_is_lpwr_cpu(u16 cpu)
{
return !((1 << cpu) & PERF_MASK);
}
u32 rmnet_shs_get_cpu_qhead(u8 cpu_num)
{
u32 ret = 0;
if (cpu_num < MAX_CPUS)
ret = rmnet_shs_cpu_node_tbl[cpu_num].qhead;
SHS_TRACE_LOW(RMNET_SHS_CORE_CFG, RMNET_SHS_CORE_CFG_GET_QHEAD,
cpu_num, ret, 0xDEF, 0xDEF, NULL, NULL);
return ret;
}
u32 rmnet_shs_get_cpu_qtail(u8 cpu_num)
{
u32 ret = 0;
if (cpu_num < MAX_CPUS)
ret = rmnet_shs_cpu_node_tbl[cpu_num].qtail;
SHS_TRACE_LOW(RMNET_SHS_CORE_CFG, RMNET_SHS_CORE_CFG_GET_QTAIL,
cpu_num, ret, 0xDEF, 0xDEF, NULL, NULL);
return ret;
}
u32 rmnet_shs_get_cpu_qdiff(u8 cpu_num)
{
u32 ret = 0;
if (cpu_num < MAX_CPUS)
ret = rmnet_shs_cpu_node_tbl[cpu_num].qdiff;
SHS_TRACE_LOW(RMNET_SHS_CORE_CFG, RMNET_SHS_CORE_CFG_GET_QTAIL,
cpu_num, ret, 0xDEF, 0xDEF, NULL, NULL);
return ret;
}
/* Comparison function to sort ll flow loads - based on flow avg_pps
* return -1 if a is before b, 1 if a is after b, 0 if equal
*/
int cmp_fn_ll_flow_pps(void *priv, const struct list_head *a, const struct list_head *b)
{
struct rmnet_shs_wq_ll_flow_s *flow_a;
struct rmnet_shs_wq_ll_flow_s *flow_b;
if (!a || !b)
return 0;
flow_a = list_entry(a, struct rmnet_shs_wq_ll_flow_s, ll_flow_list);
flow_b = list_entry(b, struct rmnet_shs_wq_ll_flow_s, ll_flow_list);
if (flow_a->avg_pps > flow_b->avg_pps)
return -1;
else if (flow_a->avg_pps < flow_b->avg_pps)
return 1;
return 0;
}
/* Comparison function to sort filter flow loads - based on flow avg_pps
* return -1 if a is before b, 1 if a is after b, 0 if equal
*/
int cmp_fn_filter_flow_pps(void *priv, const struct list_head *a, const struct list_head *b)
{
struct rmnet_shs_wq_fflow_s *flow_a;
struct rmnet_shs_wq_fflow_s *flow_b;
if (!a || !b)
return 0;
flow_a = list_entry(a, struct rmnet_shs_wq_fflow_s, fflow_list);
flow_b = list_entry(b, struct rmnet_shs_wq_fflow_s, fflow_list);
if (flow_a->avg_pps > flow_b->avg_pps)
return -1;
else if (flow_a->avg_pps < flow_b->avg_pps)
return 1;
return 0;
}
/* Comparison function to sort gold flow loads - based on flow avg_pps
* return -1 if a is before b, 1 if a is after b, 0 if equal
*/
int cmp_fn_flow_pps(void *priv, const struct list_head *a, const struct list_head *b)
{
struct rmnet_shs_wq_gold_flow_s *flow_a;
struct rmnet_shs_wq_gold_flow_s *flow_b;
if (!a || !b)
return 0;
flow_a = list_entry(a, struct rmnet_shs_wq_gold_flow_s, gflow_list);
flow_b = list_entry(b, struct rmnet_shs_wq_gold_flow_s, gflow_list);
if (flow_a->avg_pps > flow_b->avg_pps)
return -1;
else if (flow_a->avg_pps < flow_b->avg_pps)
return 1;
return 0;
}
/* Comparison function to sort cpu capacities - based on cpu avg_pps capacity
* return -1 if a is before b, 1 if a is after b, 0 if equal
*/
int cmp_fn_cpu_pps(void *priv, const struct list_head *a, const struct list_head *b)
{
struct rmnet_shs_wq_cpu_cap_s *cpu_a;
struct rmnet_shs_wq_cpu_cap_s *cpu_b;
if (!a || !b)
return 0;
cpu_a = list_entry(a, struct rmnet_shs_wq_cpu_cap_s, cpu_cap_list);
cpu_b = list_entry(b, struct rmnet_shs_wq_cpu_cap_s, cpu_cap_list);
if (cpu_a->avg_pps_capacity > cpu_b->avg_pps_capacity)
return -1;
else if (cpu_a->avg_pps_capacity < cpu_b->avg_pps_capacity)
return 1;
return 0;
}
/* Return Invalid core if only pri core available*/
int rmnet_shs_wq_get_lpwr_cpu_new_flow(struct net_device *dev)
{
u8 lo_idx;
u8 lo_max;
int cpu_assigned = -1;
u8 is_match_found = 0;
struct rmnet_shs_wq_ep_s *ep = NULL;
if (!dev) {
rmnet_shs_crit_err[RMNET_SHS_NETDEV_ERR]++;
return cpu_assigned;
}
spin_lock_bh(&rmnet_shs_ep_lock);
list_for_each_entry(ep, &rmnet_shs_wq_ep_tbl, ep_list_id) {
if (!ep->is_ep_active)
continue;
if (ep->ep == dev) {
is_match_found = 1;
break;
}
}
if (!is_match_found) {
rmnet_shs_crit_err[RMNET_SHS_WQ_EP_ACCESS_ERR]++;
spin_unlock_bh(&rmnet_shs_ep_lock);
return cpu_assigned;
}
lo_idx = ep->new_lo_idx;
lo_max = ep->new_lo_max;
while (lo_idx < lo_max) {
if (ep->new_lo_core[lo_idx] >= 0) {
cpu_assigned = ep->new_lo_core[lo_idx];
break;
}
lo_idx++;
}
/* Increment CPU assignment idx to be ready for next flow assignment*/
if ((cpu_assigned >= 0) || ((ep->new_lo_idx + 1) >= ep->new_lo_max))
ep->new_lo_idx = ((ep->new_lo_idx + 1) % ep->new_lo_max);
spin_unlock_bh(&rmnet_shs_ep_lock);
return cpu_assigned;
}
int rmnet_shs_wq_get_perf_cpu_new_flow(struct net_device *dev)
{
struct rmnet_shs_wq_ep_s *ep = NULL;
int cpu_assigned = -1;
u8 hi_idx;
u8 hi_max;
u8 is_match_found = 0;
if (!dev) {
rmnet_shs_crit_err[RMNET_SHS_NETDEV_ERR]++;
return cpu_assigned;
}
spin_lock_bh(&rmnet_shs_ep_lock);
list_for_each_entry(ep, &rmnet_shs_wq_ep_tbl, ep_list_id) {
if (!ep->is_ep_active)
continue;
if (ep->ep == dev) {
is_match_found = 1;
break;
}
}
if (!is_match_found) {
rmnet_shs_crit_err[RMNET_SHS_WQ_EP_ACCESS_ERR]++;
spin_unlock_bh(&rmnet_shs_ep_lock);
return cpu_assigned;
}
hi_idx = ep->new_hi_idx;
hi_max = ep->new_hi_max;
while (hi_idx < hi_max) {
if (ep->new_hi_core[hi_idx] >= 0) {
cpu_assigned = ep->new_hi_core[hi_idx];
break;
}
hi_idx++;
}
/* Increment CPU assignment idx to be ready for next flow assignment*/
if (cpu_assigned >= 0)
ep->new_hi_idx = ((hi_idx + 1) % hi_max);
spin_unlock_bh(&rmnet_shs_ep_lock);
return cpu_assigned;
}
void rmnet_shs_ps_on_hdlr(void *port)
{
rmnet_shs_wq_pause();
}
void rmnet_shs_ps_off_hdlr(void *port)
{
rmnet_shs_wq_restart();
}
u8 rmnet_shs_mask_from_map(struct rps_map *map)
{
u8 mask = 0;
u8 i;
for (i = 0; i < map->len; i++)
mask |= 1 << map->cpus[i];
return mask;
}
int rmnet_shs_get_mask_len(u8 mask)
{
u8 i;
u8 sum = 0;
for (i = 0; i < MAX_CPUS; i++) {
if (mask & (1 << i))
sum++;
}
return sum;
}
/* Takes a CPU and a CPU mask and computes what index of configured
* the CPU is in. Returns INVALID_CPU if CPU is not enabled in the mask.
*/
int rmnet_shs_idx_from_cpu(u8 cpu, u8 mask)
{
int ret = INVALID_CPU;
u8 idx = 0;
u8 i;
/* If not in mask return invalid*/
if (!(mask & 1 << cpu))
return ret;
/* Find idx by counting all other configed CPUs*/
for (i = 0; i < MAX_CPUS; i++) {
if (i == cpu && (mask & (1 << i))) {
ret = idx;
break;
}
if (mask & (1 << i))
idx++;
}
return ret;
}
/* Assigns a CPU to process packets corresponding to new flow. For flow with
* small incoming burst a low power core handling least number of packets
* per second will be assigned.
*
* For a flow with a heavy incoming burst, a performance core with the least
* number of packets processed per second will be assigned
*
* If two or more cores within a cluster are handling the same number of
* packets per second, the first match will be assigned.
*/
int rmnet_shs_new_flow_cpu(u64 burst_size, struct net_device *dev)
{
int flow_cpu = INVALID_CPU;
if (burst_size < RMNET_SHS_MAX_SILVER_CORE_BURST_CAPACITY)
flow_cpu = rmnet_shs_wq_get_lpwr_cpu_new_flow(dev);
if (flow_cpu == INVALID_CPU ||
burst_size >= RMNET_SHS_MAX_SILVER_CORE_BURST_CAPACITY)
flow_cpu = rmnet_shs_wq_get_perf_cpu_new_flow(dev);
SHS_TRACE_HIGH(RMNET_SHS_ASSIGN,
RMNET_SHS_ASSIGN_GET_NEW_FLOW_CPU,
flow_cpu, burst_size, 0xDEF, 0xDEF, NULL, NULL);
return flow_cpu;
}
void *rmnet_shs_header_ptr(struct sk_buff *skb, u32 offset, u32 hlen,
void *buf)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
skb_frag_t *frag;
u32 offset_orig = offset;
int i;
if (offset > skb->len || hlen > skb->len || offset + hlen > skb->len)
return NULL;
/* Linear packets or packets with headers in linear portion */
if (skb_headlen(skb) >= offset + hlen)
return skb->data + offset;
offset -= skb_headlen(skb);
/* Return pointer to page if contiguous */
for (i = 0; i < shinfo->nr_frags; i++) {
u32 frag_size;
frag = &shinfo->frags[i];
frag_size = skb_frag_size(frag);
if (offset >= frag_size) {
/* Next frag */
offset -= frag_size;
continue;
}
if (frag_size >= offset + hlen)
return skb_frag_address(frag) + offset;
}
/* The data is split across pages. Use the linear buffer */
if (skb_copy_bits(skb, (int)offset_orig, buf, (int)hlen))
return NULL;
return buf;
}
void rmnet_shs_get_update_skb_hdr_info(struct sk_buff *skb,
struct rmnet_shs_skbn_s *node_p)
{
struct iphdr *ip4h, __ip4h;
struct ipv6hdr *ip6h, __ip6h;
struct tcphdr *tp, __tp;
struct udphdr *up, __up;
int len = 0;
u16 ip_len = 0;
__be16 frag_off;
u8 protocol;
switch (skb->protocol) {
case htons(ETH_P_IP):
ip4h = rmnet_shs_header_ptr(skb, 0, sizeof(*ip4h), &__ip4h);
if (!ip4h)
return;
node_p->skb_tport_proto = ip4h->protocol;
memcpy(&(node_p->ip_hdr.v4hdr), ip4h, sizeof(*ip4h));
ip_len = ip4h->ihl * 4;
break;
case htons(ETH_P_IPV6):
ip6h = rmnet_shs_header_ptr(skb, 0, sizeof(*ip6h), &__ip6h);
if (!ip6h)
return;
node_p->skb_tport_proto = ip6h->nexthdr;
memcpy(&(node_p->ip_hdr.v6hdr), ip6h, sizeof(*ip6h));
protocol = ip6h->nexthdr;
len = ipv6_skip_exthdr(skb, sizeof(*ip6h), &protocol,
&frag_off);
if (len < 0) {
/* Cant find transport header */
return;
}
ip_len = (u16)len;
break;
default:
break;
}
if (node_p->skb_tport_proto == IPPROTO_TCP) {
tp = rmnet_shs_header_ptr(skb, ip_len, sizeof(*tp), &__tp);
if (!tp)
return;
memcpy(&(node_p->trans_hdr.tp),
tp,
sizeof(struct tcphdr));
} else if (node_p->skb_tport_proto == IPPROTO_UDP) {
up = rmnet_shs_header_ptr(skb, ip_len, sizeof(*up), &__up);
if (!up)
return;
memcpy(&(node_p->trans_hdr.up),
up,
sizeof(struct udphdr));
} else {
/* Non TCP or UDP proto, dont copy transport header */
}
}
/* Forms a new hash from the incoming hash based on the number of cores
* available for processing. This new hash will be stamped by
* SHS module (for all the packets arriving with same incoming hash)
* before delivering them to next layer.
*/
u32 rmnet_shs_form_hash(u32 index, u32 maplen, u32 hash, u8 async)
{
int offsetmap[MAX_CPUS / 2] = {8, 4, 3, 2};
u32 ret = 0;
if (!maplen) {
rmnet_shs_crit_err[RMNET_SHS_MAIN_MAP_LEN_INVALID]++;
return ret;
}
/* Override MSB of skb hash to steer. Save most of Hash bits
* Leave some as 0 to allow for easy debugging.
*/
if (maplen < MAX_CPUS)
ret = ((((index + ((maplen % 2) ? 1 : 0))) << 28)
* offsetmap[(maplen - 1) >> 1]) | (hash & 0xFFFFFF);
/*Wipe last 4 bytes and set to magic number if async set*/
if (async)
ret = (ret & ~0xFFFFF) | VH_MAGIC_HASH;
SHS_TRACE_LOW(RMNET_SHS_HASH_MAP, RMNET_SHS_HASH_MAP_FORM_HASH,
ret, hash, index, maplen, NULL, NULL);
return ret;
}

View File

@@ -1,128 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2020 The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _RMNET_SHS_COMMON_H_
#define _RMNET_SHS_COMMON_H_
#undef TRACE_INCLUDE_PATH
#include <trace/hooks/sched.h>
#include <linux/tracepoint.h>
#include <trace/hooks/vendor_hooks.h>
#if (KERNEL_VERSION(6, 9, 0) <= LINUX_VERSION_CODE)
#include <net/rps.h>
#endif
#define VH_MAGIC_HASH 0x77777
void rmnet_shs_ep_tbl_add(struct rmnet_shs_wq_ep_s *ep);
void rmnet_shs_ep_tbl_remove(struct rmnet_shs_wq_ep_s *ep);
/* Helper functions to add and remove entries to the table
* that maintains a list of all nodes that maintain statistics per flow
*/
void rmnet_shs_hstat_tbl_add(struct rmnet_shs_wq_hstat_s *hnode);
void rmnet_shs_hstat_tbl_remove(struct rmnet_shs_wq_hstat_s *hnode);
/* We maintain a list of all flow nodes processed by a cpu.
* Below helper functions are used to maintain flow<=>cpu
* association.*
*/
void rmnet_shs_cpu_list_add(struct rmnet_shs_wq_hstat_s *hnode,
struct list_head *head);
void rmnet_shs_cpu_list_move(struct rmnet_shs_wq_hstat_s *hnode,
struct list_head *head);
void rmnet_shs_ep_lock_bh(void);
void rmnet_shs_ep_unlock_bh(void);
void rmnet_shs_update_cfg_mask(void);
void rmnet_shs_cpu_node_remove(struct rmnet_shs_skbn_s *node);
void rmnet_shs_cpu_node_add(struct rmnet_shs_skbn_s *node,
struct list_head *hd);
void rmnet_shs_cpu_node_move(struct rmnet_shs_skbn_s *node,
struct list_head *hd, int oldcpu);
void rmnet_shs_cpu_ooo(u8 cpu, int count);
inline int rmnet_shs_is_lpwr_cpu(u16 cpu);
u64 rmnet_shs_wq_get_max_allowed_pps(u16 cpu);
u32 rmnet_shs_get_cpu_qtail(u8 cpu_num);
u32 rmnet_shs_get_cpu_qdiff(u8 cpu_num);
u8 rmnet_shs_mask_from_map(struct rps_map *map);
int cmp_fn_ll_flow_pps(void *priv, const struct list_head *a, const struct list_head *b);
/* Comparison function to sort filter flow loads - based on flow avg_pps
* return -1 if a is before b, 1 if a is after b, 0 if equal
*/
int cmp_fn_filter_flow_pps(void *priv, const struct list_head *a, const struct list_head *b);
/* Comparison function to sort gold flow loads - based on flow avg_pps
* return -1 if a is before b, 1 if a is after b, 0 if equal
*/
int cmp_fn_flow_pps(void *priv, const struct list_head *a, const struct list_head *b);
/* Comparison function to sort cpu capacities - based on cpu avg_pps capacity
* return -1 if a is before b, 1 if a is after b, 0 if equal
*/
int cmp_fn_cpu_pps(void *priv, const struct list_head *a, const struct list_head *b);
/* Return Invalid core if only pri core available*/
int rmnet_shs_wq_get_lpwr_cpu_new_flow(struct net_device *dev);
int rmnet_shs_wq_get_perf_cpu_new_flow(struct net_device *dev);
void rmnet_shs_ps_on_hdlr(void *port);
void rmnet_shs_ps_off_hdlr(void *port);
int rmnet_shs_get_mask_len(u8 mask);
void rmnet_shs_cpu_list_remove(struct rmnet_shs_wq_hstat_s *hnode);
void rmnet_shs_cpu_list_add(struct rmnet_shs_wq_hstat_s *hnode,
struct list_head *head);
void rmnet_shs_cpu_list_move(struct rmnet_shs_wq_hstat_s *hnode,
struct list_head *head);
int rmnet_shs_idx_from_cpu(u8 cpu, u8 mask);
void rmnet_shs_get_update_skb_hdr_info(struct sk_buff *skb,
struct rmnet_shs_skbn_s *node_p);
int rmnet_shs_new_flow_cpu(u64 burst_size, struct net_device *dev);
void *rmnet_shs_header_ptr(struct sk_buff *skb, u32 offset, u32 hlen,
void *buf);
u32 rmnet_shs_form_hash(u32 index, u32 maplen, u32 hash, u8 setasync);
extern struct list_head rmnet_shs_wq_hstat_tbl;
static inline void rmnet_vh_do_wake_up_sync(void *unused, struct wait_queue_head *wq_head, int *done, struct sock* sk)
{
if ((sk->sk_protocol == IPPROTO_TCP || sk->sk_protocol == IPPROTO_UDP) &&
(sk->sk_rxhash & 0xFFFFF) == VH_MAGIC_HASH) {
(*done) = 1;
/* Non sync poll is done here as above flag disables sync poll */
wake_up_interruptible_poll(wq_head, EPOLLIN | EPOLLPRI | EPOLLRDNORM | EPOLLRDBAND);
}
}
static inline int rmnet_shs_vh_set(void)
{
int rc = 0;
rc = register_trace_android_vh_do_wake_up_sync(rmnet_vh_do_wake_up_sync, NULL);
return rc;
}
static inline int rmnet_shs_vh_unset(void)
{
int rc = 0;
rc = unregister_trace_android_vh_do_wake_up_sync(rmnet_vh_do_wake_up_sync, NULL);
return rc;
}
#undef TRACE_INCLUDE_PATH
#endif

View File

@@ -1,226 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/module.h>
#include "rmnet_map.h"
#include "rmnet_private.h"
#include "rmnet_shs_config.h"
#include "rmnet_shs.h"
#include "rmnet_shs_wq.h"
#include "rmnet_shs_ll.h"
#include "rmnet_shs_freq.h"
#include "rmnet_shs_wq_genl.h"
#include "rmnet_shs_common.h"
#include "rmnet_shs_modules.h"
#include "rmnet_module.h"
static int rmnet_shs_dev_notify_cb(struct notifier_block *nb,
unsigned long event, void *data);
static struct notifier_block rmnet_shs_dev_notifier __read_mostly = {
.notifier_call = rmnet_shs_dev_notify_cb,
.priority = 2,
};
/* Version array, version's should be inserted at the end */
static char *rmnet_shs_version[] = {"43a6b",
"a2ce6",
"d1ab1",
"d879b",
"ac626",
"5cff7",
"a586b"
};
module_param_array(rmnet_shs_version, charp, NULL, 0444);
MODULE_PARM_DESC(rmnet_shs_version, "Version of shs driver");
static const struct rmnet_module_hook_register_info
rmnet_shs_ll_entry_hook = {
.hooknum = RMNET_MODULE_HOOK_SHS_SKB_LL_ENTRY,
.func = rmnet_shs_ll_handler
};
static int rmnet_vnd_total;
/* Enable smart hashing capability upon call to initialize module*/
int __init rmnet_shs_module_init(void)
{
pr_info("%s(): Loaded rmnet SHS module \n", __func__);
trace_rmnet_shs_high(RMNET_SHS_MODULE, RMNET_SHS_MODULE_INIT,
0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL);
if (rmnet_shs_wq_genl_init()) {
rm_err("%s", "SHS_GNL: Failed to init generic netlink");
}
return register_netdevice_notifier(&rmnet_shs_dev_notifier);
}
/* Remove smart hashing capability upon call to initialize module */
void __exit rmnet_shs_module_exit(void)
{
trace_rmnet_shs_high(RMNET_SHS_MODULE, RMNET_SHS_MODULE_EXIT,
0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL);
unregister_netdevice_notifier(&rmnet_shs_dev_notifier);
rmnet_shs_wq_genl_deinit();
pr_info("%s(): Exiting rmnet SHS module\n", __func__);
}
static int rmnet_shs_dev_notify_cb(struct notifier_block *nb,
unsigned long event, void *data)
{
struct net_device *dev = netdev_notifier_info_to_dev(data);
struct rmnet_priv *priv;
struct rmnet_port *port;
int ret = 0;
if (!dev) {
rmnet_shs_crit_err[RMNET_SHS_NETDEV_ERR]++;
return NOTIFY_DONE;
}
if (!(strncmp(dev->name, "rmnet_data", 10) == 0))
return NOTIFY_DONE;
switch (event) {
case NETDEV_DOWN:
rmnet_shs_wq_reset_ep_active(dev);
break;
case NETDEV_UNREGISTER:
rmnet_vnd_total--;
/* Deinitialize if last vnd is going down or if
* phy_dev is going down.
*/
if (!rmnet_vnd_total && rmnet_shs_cfg.rmnet_shs_init_complete) {
unsigned int cpu_switch;
pr_info("rmnet_shs deinit %s going down\n", dev->name);
rmnet_shs_skb_entry_disable();
rmnet_shs_switch_disable();
rmnet_module_hook_unregister_no_sync(&rmnet_shs_ll_entry_hook, 1);
qmi_rmnet_ps_ind_deregister(rmnet_shs_cfg.port,
&rmnet_shs_cfg.rmnet_idl_ind_cb);
rmnet_map_dl_ind_deregister(rmnet_shs_cfg.port,
&rmnet_shs_cfg.dl_mrk_ind_cb);
rmnet_map_pb_ind_deregister(rmnet_shs_cfg.port,
&rmnet_shs_cfg.pb_mrk_ind_cb);
rmnet_shs_cancel_table();
rmnet_shs_ll_deinit();
cpu_switch = rmnet_shs_rx_wq_exit();
/* Only Unhook vh if we registered in 1st place */
if (rmnet_shs_cfg.is_reg_dl_mrk_ind)
rmnet_shs_vh_unset();
rmnet_shs_wq_exit();
rmnet_shs_exit(cpu_switch);
trace_rmnet_shs_high(RMNET_SHS_MODULE,
RMNET_SHS_MODULE_INIT_WQ,
0xDEF, 0xDEF, 0xDEF,
0xDEF, NULL, NULL);
}
break;
case NETDEV_REGISTER:
rmnet_vnd_total++;
if (rmnet_vnd_total && !rmnet_shs_cfg.rmnet_shs_init_complete) {
pr_info("rmnet_shs initializing %s\n", dev->name);
priv = netdev_priv(dev);
port = rmnet_get_port(priv->real_dev);
if (!port) {
pr_err("rmnet_shs: invalid rmnet_port\n");
break;
}
rmnet_shs_init(priv->real_dev, dev);
rmnet_shs_wq_init();
rmnet_shs_rx_wq_init();
}
break;
case NETDEV_UP:
if (!rmnet_shs_cfg.is_reg_dl_mrk_ind &&
rmnet_shs_cfg.rmnet_shs_init_complete) {
port = rmnet_shs_cfg.port;
if (!port) {
pr_err("rmnet_shs: invalid rmnet_cfg_port\n");
break;
}
rmnet_shs_cfg.dl_mrk_ind_cb.priority = RMNET_SHS;
rmnet_shs_cfg.pb_mrk_ind_cb.priority = RMNET_SHS;
if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER_V2) {
rmnet_shs_cfg.dl_mrk_ind_cb.dl_hdr_handler_v2 =
&rmnet_shs_dl_hdr_handler_v2;
rmnet_shs_cfg.dl_mrk_ind_cb.dl_trl_handler_v2 =
&rmnet_shs_dl_trl_handler_v2;
rmnet_shs_cfg.pb_mrk_ind_cb.pb_ind_handler =
&rmnet_shs_pb_hdr_handler;
}
rmnet_shs_cfg.rmnet_idl_ind_cb.ps_on_handler =
&rmnet_shs_ps_on_hdlr;
rmnet_shs_cfg.rmnet_idl_ind_cb.ps_off_handler =
&rmnet_shs_ps_off_hdlr;
ret = rmnet_map_dl_ind_register(port,
&rmnet_shs_cfg.dl_mrk_ind_cb);
if (ret)
pr_err("%s(): rmnet dl_ind registration fail\n",
__func__);
ret = rmnet_map_pb_ind_register(port,
&rmnet_shs_cfg.pb_mrk_ind_cb);
if (ret)
pr_err("%s(): rmnet pb_ind registration fail\n",
__func__);
ret = qmi_rmnet_ps_ind_register(port,
&rmnet_shs_cfg.rmnet_idl_ind_cb);
if (ret)
pr_err("%s(): rmnet ps_ind registration fail\n",
__func__);
rmnet_shs_wq_set_ep_active(dev);
rmnet_shs_wq_refresh_ep_masks();
rmnet_shs_wq_refresh_new_flow_list();
/* Mark active before RCU pointer */
rmnet_shs_update_cfg_mask();
trace_rmnet_shs_high(RMNET_SHS_MODULE,
RMNET_SHS_MODULE_INIT_WQ,
0xDEF, 0xDEF, 0xDEF,
0xDEF, NULL, NULL);
rmnet_shs_switch_disable();
rmnet_shs_skb_entry_enable();
rmnet_module_hook_register(&rmnet_shs_ll_entry_hook, 1);
rmnet_shs_vh_set();
rmnet_shs_cfg.is_reg_dl_mrk_ind = 1;
/* Needed so we don't mark active twice*/
break;
}
rmnet_shs_wq_set_ep_active(dev);
break;
default:
break;
}
return NOTIFY_DONE;
}
module_init(rmnet_shs_module_init);
module_exit(rmnet_shs_module_exit);

View File

@@ -1,24 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#ifndef _RMNET_SHS_CONFIG_H_
#define _RMNET_SHS_CONFIG_H_
#define RMNET_SHS_LOG_LEVEL_ERROR 1
#define RMNET_SHS_LOG_LEVEL_INFO 2
#define RMNET_SHS_LOG_LEVEL_DEBUG 3
extern struct rmnet_shs_cfg_s rmnet_shs_cfg;
extern int rmnet_is_real_dev_registered(const struct net_device *real_dev);
extern rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb);
int __init rmnet_shs_module_init(void);
void __exit rmnet_shs_module_exit(void);
#endif /* _RMNET_SMHS_CONFIG_H_ */

View File

@@ -1,323 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
#include "rmnet_shs.h"
#include "rmnet_shs_freq.h"
#include "rmnet_shs_modules.h"
#include "rmnet_shs_config.h"
#include <linux/cpufreq.h>
#include <linux/cpu.h>
#include <linux/pm_qos.h>
#define MAX_FREQ INT_MAX
#define MIN_FREQ 0
#define BOOST_FREQ rmnet_shs_cfg.cpu_freq_boost_val
/* CPU1 is doing important work, dont do freq boost work on it */
#define WORK_CPU 2
struct cpu_freq {
unsigned int freq_floor;
unsigned int freq_ceil;
};
unsigned int rmnet_shs_freq_enable __read_mostly = 1;
module_param(rmnet_shs_freq_enable, uint, 0644);
MODULE_PARM_DESC(rmnet_shs_freq_enable, "Enable/disable freq boost feature");
/* Shared workqueue between existing boosting and pb marker boosting*/
struct workqueue_struct *shs_boost_wq;
struct rmnet_shs_cpu_boosts {
struct cpu_freq __percpu *cpu_boosts;
};
static struct rmnet_shs_cpu_boosts shs_cpu_boosts;
static struct work_struct boost_cpu;
static DEFINE_PER_CPU(struct freq_qos_request, boost_req);
/* PB Burst Marker has its own work struct and cpe_freqs */
struct rmnet_shs_pb_cpu_boosts {
struct cpu_freq __percpu *pb_cpu_boosts;
};
static struct rmnet_shs_pb_cpu_boosts shs_pb_cpu_boosts;
static struct work_struct pb_boost_worker;
static DEFINE_PER_CPU(struct freq_qos_request, pb_boost_req);
static void shs_update_cpu_policy(struct work_struct *work)
{
struct cpu_freq *boost;
unsigned int i;
int ret;
struct freq_qos_request *req;
cpus_read_lock();
for_each_online_cpu(i) {
boost = per_cpu_ptr(shs_cpu_boosts.cpu_boosts, i);
req = &per_cpu(boost_req, i);
ret = freq_qos_update_request(req, boost->freq_floor);
}
cpus_read_unlock();
}
static void shs_update_pb_cpu_policy(struct work_struct *work)
{
struct cpu_freq *boost;
unsigned int i;
int ret;
struct freq_qos_request *req;
cpus_read_lock();
for_each_online_cpu(i) {
boost = per_cpu_ptr(shs_pb_cpu_boosts.pb_cpu_boosts, i);
req = &per_cpu(pb_boost_req, i);
ret = freq_qos_update_request(req, boost->freq_floor);
SHS_TRACE_LOW(RMNET_SHS_PB_BOOST_CPU, RMNET_SHS_PB_BOOST_CPU_UPDATE,
boost->freq_floor, boost->freq_ceil, 0xDEF, 0xDEF, NULL,
NULL);
}
cpus_read_unlock();
}
void rmnet_shs_reset_freq(void)
{
struct cpu_freq *boost;
int i;
for_each_possible_cpu(i) {
boost = per_cpu_ptr(shs_cpu_boosts.cpu_boosts, i);
boost->freq_floor = MIN_FREQ;
boost->freq_ceil = MAX_FREQ;
}
for_each_possible_cpu(i) {
boost = per_cpu_ptr(shs_pb_cpu_boosts.pb_cpu_boosts, i);
boost->freq_floor = MIN_FREQ;
boost->freq_ceil = MAX_FREQ;
}
}
/* Does not queue it's own work, must be called before boost_cpus, and
* reset_cpus must be called afterwards to ramp it down.
*/
void rmnet_shs_boost_gold_cpu(int cpu)
{
struct cpu_freq *boost;
int i = cpu;
if (cpu < 0 || cpu >= MAX_CPUS)
return;
if ((1 << i) & NONPERF_MASK)
return;
boost = per_cpu_ptr(shs_cpu_boosts.cpu_boosts, i);
boost->freq_floor = BOOST_FREQ;
boost->freq_ceil = MAX_FREQ;
trace_rmnet_freq_boost(i, BOOST_FREQ);
}
void rmnet_shs_boost_cpus(void)
{
struct cpu_freq *boost;
int i;
for_each_possible_cpu(i) {
if ((1 << i) & PERF_MASK)
continue;
boost = per_cpu_ptr(shs_cpu_boosts.cpu_boosts, i);
boost->freq_floor = BOOST_FREQ;
boost->freq_ceil = MAX_FREQ;
trace_rmnet_freq_boost(i, BOOST_FREQ);
}
if (work_pending(&boost_cpu))
return;
if (shs_boost_wq) {
queue_work_on(WORK_CPU, shs_boost_wq, &boost_cpu);
}
}
void rmnet_shs_pb_boost_cpus(void)
{
struct cpu_freq *boost;
int i;
for_each_possible_cpu(i) {
if ((1 << i) & PERF_MASK)
continue;
boost = per_cpu_ptr(shs_pb_cpu_boosts.pb_cpu_boosts, i);
boost->freq_floor = BOOST_FREQ;
boost->freq_ceil = MAX_FREQ;
trace_rmnet_freq_boost(i, BOOST_FREQ);
}
if (work_pending(&pb_boost_worker))
return;
if (shs_boost_wq) {
queue_work_on(WORK_CPU, shs_boost_wq, &pb_boost_worker);
}
}
void rmnet_shs_reset_cpus(void)
{
struct cpu_freq *boost;
int i;
for_each_possible_cpu(i) {
boost = per_cpu_ptr(shs_cpu_boosts.cpu_boosts, i);
boost->freq_floor = MIN_FREQ;
boost->freq_ceil = MAX_FREQ;
trace_rmnet_freq_reset(i, MIN_FREQ);
}
if (work_pending(&boost_cpu))
return;
if (shs_boost_wq)
queue_work_on(WORK_CPU, shs_boost_wq, &boost_cpu);
}
void rmnet_shs_pb_reset_cpus(void)
{
struct cpu_freq *boost;
int i;
for_each_possible_cpu(i) {
boost = per_cpu_ptr(shs_pb_cpu_boosts.pb_cpu_boosts, i);
boost->freq_floor = MIN_FREQ;
boost->freq_ceil = MAX_FREQ;
trace_rmnet_freq_reset(i, MIN_FREQ);
}
if (work_pending(&pb_boost_worker))
return;
if (shs_boost_wq)
queue_work_on(WORK_CPU, shs_boost_wq, &pb_boost_worker);
}
static void rmnet_shs_remove_qos_reqs(void)
{
struct freq_qos_request *req;
int i;
for_each_possible_cpu(i) {
req = &per_cpu(boost_req, i);
if (req && freq_qos_request_active(req)) {
freq_qos_remove_request(req);
}
req = &per_cpu(pb_boost_req, i);
if (req && freq_qos_request_active(req)) {
freq_qos_remove_request(req);
}
}
}
int rmnet_shs_freq_init(void)
{
struct cpu_freq *boost;
int i;
int ret = 0;
struct freq_qos_request *req;
struct cpufreq_policy *policy;
shs_cpu_boosts.cpu_boosts = alloc_percpu(struct cpu_freq);
if (!shs_cpu_boosts.cpu_boosts)
return -ENOMEM;
shs_pb_cpu_boosts.pb_cpu_boosts = alloc_percpu(struct cpu_freq);
if (!shs_pb_cpu_boosts.pb_cpu_boosts) {
free_percpu(shs_cpu_boosts.cpu_boosts);
return -ENOMEM;
}
if (!shs_boost_wq)
shs_boost_wq = alloc_workqueue("shs_boost_wq", WQ_HIGHPRI, 0);
if (!shs_boost_wq) {
ret = -ENOMEM;
goto err;
}
for_each_possible_cpu(i) {
boost = per_cpu_ptr(shs_cpu_boosts.cpu_boosts, i);
req = &per_cpu(boost_req, i);
policy = cpufreq_cpu_get(i);
if (!policy) {
pr_err("%s: cpufreq policy not found for cpu%d\n",
__func__, i);
return -ESRCH;
}
ret = freq_qos_add_request(&policy->constraints, req,
FREQ_QOS_MIN, MIN_FREQ);
if (ret < 0) {
pr_err("%s: Failed to add freq constraint (%d)\n",
__func__, ret);
return ret;
}
req = &per_cpu(pb_boost_req, i);
policy = cpufreq_cpu_get(i);
if (!policy) {
pr_err("%s: cpufreq policy not found for pb cpu%d\n",
__func__, i);
return -ESRCH;
}
ret = freq_qos_add_request(&policy->constraints, req,
FREQ_QOS_MIN, MIN_FREQ);
if (ret < 0) {
pr_err("%s: Failed to add pb freq constraint (%d)\n",
__func__, ret);
return ret;
}
}
INIT_WORK(&boost_cpu, shs_update_cpu_policy);
INIT_WORK(&pb_boost_worker, shs_update_pb_cpu_policy);
rmnet_shs_reset_freq();
return 0;
err:
/* this resetting of frequencies is redundant when cpu_boosts is dynamic
* but will be leaving it in here for if we switch back to static
*/
rmnet_shs_reset_freq();
free_percpu(shs_cpu_boosts.cpu_boosts);
free_percpu(shs_pb_cpu_boosts.pb_cpu_boosts);
if (shs_boost_wq) {
destroy_workqueue(shs_boost_wq);
shs_boost_wq = NULL;
}
return ret;
}
int rmnet_shs_freq_exit(void)
{
/* No need to cancel work as it will be drained and not re-queued */
/* No need to call reset_freq as removing qos freqs will do that for us */
rmnet_shs_remove_qos_reqs();
if (shs_boost_wq) {
destroy_workqueue(shs_boost_wq);
shs_boost_wq = NULL;
}
free_percpu(shs_cpu_boosts.cpu_boosts);
free_percpu(shs_pb_cpu_boosts.pb_cpu_boosts);
return 0;
}

View File

@@ -1,19 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _RMNET_SHS_FREQ_H_
#define _RMNET_SHS_FREQ_H_
int rmnet_shs_freq_init(void);
int rmnet_shs_freq_exit(void);
void rmnet_shs_boost_cpus(void);
void rmnet_shs_reset_cpus(void);
void rmnet_shs_pb_boost_cpus(void);
void rmnet_shs_pb_reset_cpus(void);
void rmnet_shs_boost_gold_cpu(int cpu);
#endif

View File

@@ -1,714 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <net/sock.h>
#include <linux/netlink.h>
#include <linux/ip.h>
#include <linux/cpu.h>
#include <net/ip.h>
#include <linux/cpu.h>
#include <linux/bitmap.h>
#include <linux/kernel.h>
#include <linux/smp.h>
#include <linux/ipv6.h>
#include <linux/netdevice.h>
#include <linux/percpu-defs.h>
#include "rmnet_shs.h"
#include "rmnet_shs_wq_genl.h"
#include "rmnet_shs_config.h"
#include "rmnet_shs_wq.h"
#include "rmnet_shs_modules.h"
#include "rmnet_shs_common.h"
#include "rmnet_trace.h"
#include <linux/icmp.h>
#include <linux/inet.h>
DEFINE_HASHTABLE(rmnet_shs_ll_ht, RMNET_SHS_HT_SIZE);
DEFINE_HASHTABLE(rmnet_shs_ll_filter_ht, RMNET_SHS_HT_SIZE);
DEFINE_SPINLOCK(rmnet_shs_ll_ht_splock);
struct rmnet_shs_cpu_node_s rmnet_shs_ll_cpu_node_tbl[MAX_CPUS];
#define MAX_LL_FILTERS 100
#define GET_IQUEUE(CPU) (per_cpu(softnet_data, CPU).input_pkt_queue)
#define GET_PQUEUE(CPU) (per_cpu(softnet_data, CPU).process_queue)
#define GET_QLEN(CPU) (GET_IQUEUE(CPU).qlen + GET_PQUEUE(CPU).qlen)
#define GET_QTAIL(SD, CPU) (per_cpu(SD, CPU).input_queue_tail)
#define GET_QHEAD(SD, CPU) (per_cpu(SD, CPU).input_queue_head)
#define GET_QHEADS(CPU) (per_cpu(softnet_data, CPU).input_queue_head)
#define GET_QTAILS(CPU) (per_cpu(softnet_data, CPU).input_queue_tail)
#define MAX_LL_CORE_BACKLOG 20
unsigned int rmnet_shs_ll_pkts = 0;
module_param(rmnet_shs_ll_pkts, uint, 0644);
MODULE_PARM_DESC(rmnet_shs_ll_pkts, "LL packets seen in ll rmnet_shs");
unsigned int rmnet_shs_filter_count = 0;
module_param(rmnet_shs_filter_count, uint, 0644);
MODULE_PARM_DESC(rmnet_shs_filter_count, "LL filter count seen in ll rmnet_shs");
/* Evaluates the incoming transport protocol of the incoming skb. Determines
* if the skb transport protocol will be supported by SHS module
*/
int rmnet_shs_is_ll_skb_stamping_reqd(struct sk_buff *skb)
{
int ret_val = 0;
struct iphdr *ip4h, __ip4h;
struct ipv6hdr *ip6h, __ip6h;
const struct ipv6_opt_hdr *ptr;
struct ipv6_opt_hdr v6hdr;
/* SHS will ignore ICMP and frag pkts completely */
switch (skb->protocol) {
case htons(ETH_P_IP):
ip4h = rmnet_shs_header_ptr(skb, 0, sizeof(*ip4h), &__ip4h);
if (!ip4h)
break;
if (!ip_is_fragment(ip4h) &&
(ip4h->protocol == IPPROTO_TCP ||
ip4h->protocol == IPPROTO_UDP)) {
ret_val = 1;
break;
}
/* RPS logic is skipped if RPS hash is 0 while sw_hash
* is set as active and packet is processed on the same
* CPU as the initial caller.
*
* No longer put ICMP on phy core when moving to perf core
*/
if (ip4h->protocol == IPPROTO_ICMP) {
skb->hash = 0;
skb->sw_hash = 1;
if (trace_print_icmp_rx_enabled()) {
char saddr[INET6_ADDRSTRLEN], daddr[INET6_ADDRSTRLEN];
u16 ip_proto = 0;
__be16 sequence = 0;
u8 type = 0;
struct icmphdr *icmphdr, __icmphdr;
memset(saddr, 0, INET6_ADDRSTRLEN);
memset(daddr, 0, INET6_ADDRSTRLEN);
icmphdr = rmnet_shs_header_ptr(skb, ip4h->ihl * 4,
sizeof(*icmphdr), &__icmphdr);
if (!icmphdr)
goto skip_trace_print_icmp4_rx;
if (icmphdr->type != ICMP_ECHOREPLY &&
icmphdr->type != ICMP_ECHO)
goto skip_trace_print_icmp4_rx;
ip_proto = htons(ETH_P_IP);
type = icmphdr->type;
sequence = icmphdr->un.echo.sequence;
snprintf(saddr, INET6_ADDRSTRLEN, "%pI4", &ip4h->saddr);
snprintf(daddr, INET6_ADDRSTRLEN, "%pI4", &ip4h->daddr);
trace_print_icmp_rx(skb, ip_proto, type, sequence, saddr, daddr);
}
} else if (ip4h->protocol == IPPROTO_ESP) {
/* Pin to core 0 if ESP protocol */
skb->hash = DEFAULT_PIN_HASH;
skb->sw_hash = 1;
}
skip_trace_print_icmp4_rx:
break;
case htons(ETH_P_IPV6):
ip6h = rmnet_shs_header_ptr(skb, 0, sizeof(*ip6h), &__ip6h);
if (!ip6h)
break;
if (!(ip6h->nexthdr == NEXTHDR_FRAGMENT) &&
(ip6h->nexthdr == IPPROTO_TCP ||
ip6h->nexthdr == IPPROTO_UDP)) {
ret_val = 1;
break;
}
/* RPS logic is skipped if RPS hash is 0 while sw_hash
* is set as active and packet is processed on the same
* CPU as the initial caller.
*
* No longer put ICMP on phy core when moving to perf core
*/
if (ip6h->nexthdr == NEXTHDR_ICMP) {
skb->hash = 0;
skb->sw_hash = 1;
if (trace_print_icmp_rx_enabled()) {
char saddr[INET6_ADDRSTRLEN], daddr[INET6_ADDRSTRLEN];
u16 ip_proto = 0;
__be16 sequence = 0;
u8 type = 0;
struct icmp6hdr *icmp6hdr, __icmp6hdr;
memset(saddr, 0, INET6_ADDRSTRLEN);
memset(daddr, 0, INET6_ADDRSTRLEN);
icmp6hdr = rmnet_shs_header_ptr(skb, sizeof(*ip6h),
sizeof(*icmp6hdr), &__icmp6hdr);
if (!icmp6hdr)
goto skip_trace_print_icmp6_rx;
if (icmp6hdr->icmp6_type != ICMPV6_ECHO_REQUEST &&
icmp6hdr->icmp6_type != ICMPV6_ECHO_REPLY)
goto skip_trace_print_icmp6_rx;
ip_proto = htons(ETH_P_IPV6);
type = icmp6hdr->icmp6_type;
sequence = icmp6hdr->icmp6_sequence;
snprintf(saddr, INET6_ADDRSTRLEN, "%pI6", &ip6h->saddr);
snprintf(daddr, INET6_ADDRSTRLEN, "%pI6", &ip6h->daddr);
trace_print_icmp_rx(skb, ip_proto, type, sequence, saddr, daddr);
}
} else if (ip6h->nexthdr == NEXTHDR_ESP) {
skb->hash = DEFAULT_PIN_HASH;
skb->sw_hash = 1;
} else if (ip6h->nexthdr == NEXTHDR_FRAGMENT) {
if (skb->len - sizeof(struct ipv6hdr) < (int)sizeof(struct ipv6_opt_hdr)) {
/* skb too small to contain another header */
break;
}
/* Check if frag header has ESP nextheader. If packet has more headers before
* ESP header this would fail. Ex Frag, Encap, ESP Chain */
ptr = skb_header_pointer(skb, sizeof(struct ipv6hdr), sizeof(v6hdr), &v6hdr);
if (ptr && ptr->nexthdr == NEXTHDR_ESP) {
skb->hash = DEFAULT_PIN_HASH;
skb->sw_hash = 1;
}
}
skip_trace_print_icmp6_rx:
break;
default:
break;
}
SHS_TRACE_LOW(RMNET_SHS_SKB_STAMPING, RMNET_SHS_SKB_STAMPING_END,
ret_val, 0xDEF, 0xDEF, 0xDEF, skb, NULL);
return ret_val;
}
/* Evaluates if a v6 skb matches the v6 filter passed in *info */
int ipv6_packet_match(struct sk_buff *skb, struct ipv6hdr *skb_ip6h, struct rmnet_shs_wq_flow_info *info)
{
struct tcphdr *tp, __tp;
struct udphdr *up, __up;
int saddmatch, daddmatch, protomatch, src_port_match, dest_port_match = false;
int ret = false;
int v6len = 0;
u8 protocol;
__be16 frag_off;
saddmatch = !info->src_addr_valid || ipv6_addr_equal(&skb_ip6h->saddr, &info->src_ip_addr.v6_saddr);
daddmatch = !info->dest_addr_valid || ipv6_addr_equal(&skb_ip6h->daddr, &info->dest_ip_addr.v6_daddr);
protomatch = !info->proto_valid || info->proto == skb_ip6h->nexthdr;
src_port_match = !info->src_port_valid ;
dest_port_match = !info->dest_port_valid;
protocol = skb_ip6h->nexthdr;
if (info->src_port_valid || info->dest_port_valid) {
v6len = ipv6_skip_exthdr(skb, sizeof(*skb_ip6h), &protocol, &frag_off);
if (v6len < 0) {
/* Cant find transport header */
return false;
}
if (skb_ip6h->nexthdr == IPPROTO_TCP) {
tp = rmnet_shs_header_ptr(skb, v6len, sizeof(*tp), &__tp);
if (!tp) {
src_port_match = false;
dest_port_match = false;
} else {
src_port_match = !info->src_port_valid || tp->source == (info->src_port);
dest_port_match = !info->dest_port_valid || tp->dest == (info->dest_port);
}
} else if (skb_ip6h->nexthdr == IPPROTO_UDP) {
up = rmnet_shs_header_ptr(skb, v6len, sizeof(*up), &__up);
if (!up) {
src_port_match = false;
dest_port_match = false;
} else {
src_port_match = !info->src_port_valid || up->source == (info->src_port);
dest_port_match = !info->dest_port_valid || up->dest == (info->dest_port);
}
}
}
if ((saddmatch) && (daddmatch) && (protomatch) && (src_port_match) && (dest_port_match))
ret = true;
return ret;
}
/* Evaluates if a v4 skb matches the v4 filter passed in *info */
int ipv4_packet_match(struct sk_buff *skb, struct iphdr *skb_ip4h, struct rmnet_shs_wq_flow_info *info)
{
int ret = false;
struct tcphdr *tp, __tp;
struct udphdr *up, __up;
u16 v4ip_len = skb_ip4h->ihl * 4;
int saddmatch = !info->src_addr_valid || skb_ip4h->saddr == info->src_ip_addr.saddr;
int daddmatch = !info->dest_addr_valid || skb_ip4h->daddr == (info->dest_ip_addr.daddr);
int protomatch = !info->proto_valid || skb_ip4h->protocol == info->proto;
int src_port_match = !info->src_port_valid ;
int dest_port_match = !info->dest_port_valid;
if (info->src_port_valid || info->dest_port_valid) {
if (skb_ip4h->protocol == IPPROTO_TCP) {
tp = rmnet_shs_header_ptr(skb, v4ip_len, sizeof(*tp), &__tp);
if (!tp) {
src_port_match = false;
dest_port_match = false;
} else {
src_port_match = !info->src_port_valid || tp->source == (info->src_port);
dest_port_match = !info->dest_port_valid || tp->dest == (info->dest_port);
}
} else if (skb_ip4h->protocol == IPPROTO_UDP) {
up = rmnet_shs_header_ptr(skb, v4ip_len, sizeof(*up), &__up);
if (!up) {
src_port_match = false;
dest_port_match = false;
} else {
src_port_match = !info->src_port_valid || up->source == (info->src_port);
dest_port_match = !info->dest_port_valid || up->dest == (info->dest_port);
}
}
}
if ((saddmatch) && (daddmatch) && (protomatch) && (src_port_match) && (dest_port_match))
ret = true;
rm_err("SHS_LL: V4 saddr match %u daddr match %u, proto match %u, src port %u, dest port match %u proto %u\n",
saddmatch, daddmatch, protomatch, src_port_match, dest_port_match, skb_ip4h->protocol);
return ret;
}
/* Evaluates if two filters match identical things, to prevent dup filter additions.*/
int rmnet_shs_is_identical_filter(struct rmnet_shs_wq_flow_node *node, struct rmnet_shs_wq_flow_node *node2)
{
struct rmnet_shs_wq_flow_info *info = &node->info;
struct rmnet_shs_wq_flow_info *info2 = &node2->info;
int versionmatch = info->ip_version == info2->ip_version;
int saddmatch, daddmatch, protomatch, src_port_match, dest_port_match = false;
/* Sequence match superscedes contents */
if (info->seq && info2->seq && (info->seq == info2->seq)) {
return true;
}
/* Saddr matches between filters if both are not matchign for addr and/or address match*/
saddmatch = (!info->src_addr_valid && !info2->src_addr_valid) ||
(info->src_addr_valid && info2->src_addr_valid &&
(info->ip_version == 4)?(info->src_ip_addr.saddr == info2->src_ip_addr.saddr) :
ipv6_addr_equal(&info2->src_ip_addr.v6_saddr, &info->src_ip_addr.v6_saddr));
daddmatch = (!info->dest_addr_valid && !info2->dest_addr_valid) ||
(info->dest_addr_valid && info2->dest_addr_valid &&
(info->ip_version == 4)?(info->dest_ip_addr.daddr == info2->dest_ip_addr.daddr) :
ipv6_addr_equal(&info2->dest_ip_addr.v6_daddr, &info->dest_ip_addr.v6_daddr));
protomatch = (!info->proto_valid && !info2->proto_valid) ||
(info->proto_valid && info2->proto_valid && info->proto == info2->proto);
src_port_match = (!info->src_port_valid && !info2->src_port_valid) ||
(info->src_port_valid && info2->src_port_valid &&
info->src_port == info2->src_port);
dest_port_match = (!info->dest_port_valid && !info2->dest_port_valid) ||
(info->dest_port_valid && info2->dest_port_valid &&
info->dest_port == info2->dest_port);
rm_err("SHS_LL: match result sadr match %u daddr match %u, proto match %u, src port %u, dest port match %u versionmatch %u\n",
saddmatch, daddmatch, protomatch,src_port_match, dest_port_match, versionmatch);
return (versionmatch && saddmatch && daddmatch && protomatch && src_port_match && dest_port_match);
}
/* Evaluates the incoming skb against all installed filters
* if the filter matches with the SKB then true is returned.
*/
int rmnet_shs_is_filter_match(struct sk_buff *skb)
{
struct iphdr *ip4h, __ip4h;
struct ipv6hdr *ip6h, __ip6h;
struct rmnet_shs_wq_flow_node *node_p;
struct hlist_node *tmp;
int ret = false;
spin_lock_bh(&rmnet_shs_ll_ht_splock);
/* SHS will ignore ICMP and frag pkts completely */
switch (skb->protocol) {
case htons(ETH_P_IP):
ip4h = rmnet_shs_header_ptr(skb, 0, sizeof(*ip4h), &__ip4h);
if (!ip4h) {
break;
}
hash_for_each_possible_safe(rmnet_shs_ll_filter_ht, node_p, tmp, list, ip4h->saddr) {
if (ipv4_packet_match(skb, ip4h, &node_p->info)) {
ret = true;
break;
}
}
break;
case htons(ETH_P_IPV6):
ip6h = rmnet_shs_header_ptr(skb, 0, sizeof(*ip6h), &__ip6h);
if (!ip6h) {
break;
}
hash_for_each_possible_safe(rmnet_shs_ll_filter_ht, node_p, tmp, list, ip6h->saddr.in6_u.u6_addr32[0]) {
if (ipv6_packet_match(skb, ip6h, &node_p->info)) {
ret = true;
break;
}
}
break;
default:
break;
}
spin_unlock_bh(&rmnet_shs_ll_ht_splock);
rm_err("SHS_LL: Packet Filter checked analyzed ret: %d", ret);
return ret;
}
/* Uninstalls a LL flow filter contained by node */
void rmnet_shs_remove_llflow(struct rmnet_shs_wq_flow_node *node)
{
struct rmnet_shs_wq_flow_node *temp_node;
struct hlist_node *tmp;
struct rmnet_shs_wq_hstat_s *hnode = NULL;
unsigned long bkt;
int i = 0;
spin_lock_bh(&rmnet_shs_ll_ht_splock);
hash_for_each_safe(rmnet_shs_ll_filter_ht, bkt, tmp, temp_node, list)
{
i++;
if (rmnet_shs_is_identical_filter(temp_node, node)) {
rm_err("SHS_LL: %s\n", "Filter already installed, Dup Filter");
hash_del_rcu(&temp_node->list);
kfree(temp_node);
break;
}
}
spin_unlock_bh(&rmnet_shs_ll_ht_splock);
spin_lock_bh(&rmnet_shs_hstat_tbl_lock);
list_for_each_entry(hnode, &rmnet_shs_wq_hstat_tbl, hstat_node_id) {
if (hnode->node && !hnode->node->low_latency) {
hnode->node->low_latency= RMNET_SHS_LOW_LATENCY_CHECK;
}
}
spin_unlock_bh(&rmnet_shs_hstat_tbl_lock);
/* Free the genl node allocated at genl recv */
kfree(node);
rmnet_shs_cfg.num_filters--;
rmnet_shs_filter_count--;
rm_err("SHS_LL: %s", " Uninstalled LL filter");
}
void rmnet_shs_print_llflow(struct rmnet_shs_wq_flow_node *node)
{
struct rmnet_shs_wq_flow_info *info = &node->info;
pr_info("SHS_LL: proto valid %u src addr valid %u, dest addr valid %u, dest port valid %u, srcport valid %u, ip version %u seq %u\n",
info->proto_valid, info->src_addr_valid, info->dest_addr_valid,
info->dest_port_valid, info->src_port_valid, info->ip_version, info->seq);
pr_info("SHS_LL: info->ip_version %u\n", info->ip_version );
pr_info("SHS_LL: info->proto %u\n", info->proto );
pr_info("SHS_LL: info->dest_port %u\n", info->dest_port );
pr_info("SHS_LL: info->src_port %u\n", info->src_port );
pr_info("SHS_LL: info->dest_addr_valid %u\n", info->dest_addr_valid);
pr_info("SHS_LL: info->src_addr_valid %u\n", info->src_addr_valid);
pr_info("SHS_LL: info->seq %u\n", info->seq);
if (info->ip_version == 4 && (info->dest_addr_valid) && (info->src_addr_valid )) {
pr_info("New flow info->dest_addr_valid %u\n", info->dest_ip_addr.daddr);
pr_info("New flow info->src_addr_valid %u\n", info->src_ip_addr.saddr);
}
if (info->ip_version == 6 && (info->dest_addr_valid) && (info->src_addr_valid )) {
pr_info("New flow info->dest_addr_valid %u %u %u %u\n",
node->info.dest_ip_addr.v6_daddr.in6_u.u6_addr32[3],
node->info.dest_ip_addr.v6_daddr.in6_u.u6_addr32[2],
node->info.dest_ip_addr.v6_daddr.in6_u.u6_addr32[1],
node->info.dest_ip_addr.v6_daddr.in6_u.u6_addr32[0]);
pr_info("New flow info->src_addr_valid %u %u %u %u\n",
node->info.src_ip_addr.v6_saddr.in6_u.u6_addr32[3],
node->info.src_ip_addr.v6_saddr.in6_u.u6_addr32[2],
node->info.src_ip_addr.v6_saddr.in6_u.u6_addr32[1],
node->info.src_ip_addr.v6_saddr.in6_u.u6_addr32[0]);
}
}
/* Installs a LL flow filter contained by node */
void rmnet_shs_add_llflow(struct rmnet_shs_wq_flow_node *node)
{
struct rmnet_shs_wq_flow_node *temp_node;
struct rmnet_shs_wq_hstat_s *hnode = NULL;
unsigned long bkt;
int i = 0;
spin_lock_bh(&rmnet_shs_ll_ht_splock);
hash_for_each(rmnet_shs_ll_filter_ht, bkt, temp_node, list)
{
i++;
if (rmnet_shs_is_identical_filter(temp_node, node)) {
kfree(node);
spin_unlock_bh(&rmnet_shs_ll_ht_splock);
rm_err("SHS_LL: %s", " Dup filter seen match seen, no install");
return;
}
}
if (rmnet_shs_cfg.num_filters >= MAX_LL_FILTERS) {
kfree(node);
spin_unlock_bh(&rmnet_shs_ll_ht_splock);
rmnet_shs_crit_err[RMNET_SHS_MAX_LL_FILTERS]++;
rm_err("SHS_LL: %s\n", "Installed LL filter failed: Max reached");
return;
}
rmnet_shs_cfg.num_filters++;
rmnet_shs_filter_count++;
if (rmnet_shs_debug)
rmnet_shs_print_llflow(node);
/* Room for improvement if hash is not just the src address 1st int for v6 */
hash_add(rmnet_shs_ll_filter_ht, &node->list, node->info.src_ip_addr.saddr);
spin_unlock_bh(&rmnet_shs_ll_ht_splock);
/* Mark all flows to check if filter is a match on next packet */
rm_err("SHS_LL: %s\n", "Setting low latency flow check for all flows");
spin_lock_bh(&rmnet_shs_hstat_tbl_lock);
list_for_each_entry(hnode, &rmnet_shs_wq_hstat_tbl, hstat_node_id) {
if (hnode->node && !hnode->node->low_latency) {
hnode->node->low_latency= RMNET_SHS_LOW_LATENCY_CHECK;
}
}
spin_unlock_bh(&rmnet_shs_hstat_tbl_lock);
rm_err("SHS_LL: %s\n", "Installed LL filter");
}
void rmnet_shs_ll_stamp(struct sk_buff *skb, struct rmnet_shs_skbn_s *node)
{
u32 hash2stamp = 0; /* the default value of skb->hash*/
u8 map = 0, maplen = 0;
u16 index;
if (!node->custom_map) {
map = rmnet_shs_cfg.map_mask;
maplen = rmnet_shs_cfg.map_len;
index = node->map_index;
} else {
map = node->custom_map;
maplen = node->custom_len;
index = node->map_index;
}
if (map) {
hash2stamp = rmnet_shs_form_hash(index,
maplen,
node->hash, 0);
skb->hash = hash2stamp;
}
}
int rmnet_shs_ll_handler(struct sk_buff *skb, struct rmnet_shs_clnt_s *clnt_cfg)
{
struct rmnet_shs_skbn_s *node_p;
struct hlist_node *tmp;
int map = rmnet_shs_cfg.map_mask;
int ll_cpu = rmnet_shs_ll_flow_cpu;
int map_cpu;
u32 hash;
u8 is_match_found = 0;
struct rmnet_shs_cpu_node_s *cpu_node_tbl_p;
struct rmnet_priv *priv;
rmnet_shs_ll_pkts++;
hash = skb_get_hash(skb);
/*deliver non TCP/UDP packets right away*/
/* If stmp all is set break and don't check reqd */
if (!(clnt_cfg->config & RMNET_SHS_STMP_ALL) &&
!rmnet_shs_is_ll_skb_stamping_reqd(skb)) {
rmnet_shs_deliver_skb(skb);
return 0;
}
spin_lock_bh(&rmnet_shs_ll_ht_splock);
do {
hash_for_each_possible_safe(rmnet_shs_ll_ht, node_p, tmp, list,
hash) {
if (hash != node_p->hash)
continue;
is_match_found = 1;
node_p->map_cpu = rmnet_shs_ll_flow_cpu;
node_p->map_index = rmnet_shs_idx_from_cpu(node_p->map_cpu, map);
break;
}
if (is_match_found)
break;
if (ll_cpu < 0) {
rmnet_shs_crit_err[RMNET_SHS_RPS_MASK_CHANGE]++;
break;
}
if (atomic_long_read(&rmnet_shs_cfg.num_flows) > MAX_FLOWS) {
rmnet_shs_crit_err[RMNET_SHS_MAX_FLOWS]++;
break;
}
node_p = kzalloc(sizeof(*node_p), GFP_ATOMIC);
if (!node_p) {
rmnet_shs_crit_err[RMNET_SHS_MAIN_MALLOC_ERR]++;
break;
}
atomic_long_inc(&rmnet_shs_cfg.num_flows);
node_p->custom_map = clnt_cfg->map_mask;
node_p->custom_len = rmnet_shs_cfg.map_mask;
node_p->dev = skb->dev;
node_p->hash = skb->hash;
node_p->map_cpu = ll_cpu;
node_p->low_latency = 1;
node_p->map_index = rmnet_shs_idx_from_cpu(node_p->map_cpu, map);
node_p->map_cpu = raw_smp_processor_id();
node_p->map_index = rmnet_shs_idx_from_cpu(node_p->map_cpu, map);
INIT_LIST_HEAD(&node_p->node_id);
/* Set ip header / transport header / transport proto */
rmnet_shs_get_update_skb_hdr_info(skb, node_p);
/* Workqueue utilizes some of the values from above
* initializations . Therefore, we need to request
* for memory (to workqueue) after the above initializations
*/
rmnet_shs_wq_create_new_flow(node_p);
map_cpu = node_p->map_cpu;
cpu_node_tbl_p = &rmnet_shs_ll_cpu_node_tbl[map_cpu];
/* Set mux id */
priv = netdev_priv(node_p->dev);
if (!priv) {
rm_err("SHS_LL: priv for netdev is null for hash 0x%x", node_p->hash);
rmnet_shs_crit_err[RMNET_SHS_NETDEV_ERR]++;
} else {
node_p->hstats->mux_id = priv->mux_id;
rm_err("SHS_LL: mux id for hash 0x%x is %d",
node_p->hash, node_p->hstats->mux_id);
}
rmnet_shs_cpu_node_add(node_p, &cpu_node_tbl_p->node_list_id);
hash_add_rcu(rmnet_shs_ll_ht, &node_p->list, skb->hash);
is_match_found = 1;
break;
} while (0);
spin_unlock_bh(&rmnet_shs_ll_ht_splock);
if (is_match_found) {
/* Put on same core if backlog is super low until it isnt then
* never do that again unless we moved to gold core and backlog
* is again super small.
* State 0 RMNET_SHS_LL_SAME_CORE_SILVER = Same core eligible silver
* State 1 RMNET_SHS_LL_SILVER_GOLD_NW = No longer same core eligible on silver but ok on gold
* State 0 RMNET_SHS_LL_SAME_CORE_GOLD = Same core eligible gold
* State 2 RMNET_SHS_LL_SPLIT_ALWAY = No longer same core eligible on silver and gold
* */
rmnet_shs_ll_stamp(skb, node_p);
if (!node_p->ll_flag &&
rmnet_shs_is_lpwr_cpu(raw_smp_processor_id())) {
if (GET_QLEN(raw_smp_processor_id()) < MAX_LL_CORE_BACKLOG &&
rmnet_shs_cpu_psb_above_thresh(raw_smp_processor_id(), 4000)) {
skb->hash = 0;
skb->sw_hash = 1;
} else if (!node_p->ll_flag) {
node_p->ll_flag = RMNET_SHS_LL_SILVER_GOLD_NW;
node_p->map_cpu = ll_cpu;
node_p->map_index = rmnet_shs_idx_from_cpu(node_p->map_cpu, map);
}
} else if (node_p->ll_flag != RMNET_SHS_LL_SAME_CORE_GOLD){
if (!rmnet_shs_is_lpwr_cpu(raw_smp_processor_id())) {
if (GET_QLEN(raw_smp_processor_id()) < MAX_LL_CORE_BACKLOG &&
rmnet_shs_cpu_psb_above_thresh(raw_smp_processor_id(), 12000)) {
skb->hash = 0;
skb->sw_hash = 1;
} else {
node_p->ll_flag = RMNET_SHS_LL_SAME_CORE_GOLD;
node_p->map_cpu = ll_cpu;
node_p->map_index = rmnet_shs_idx_from_cpu(node_p->map_cpu, map);
}
}
}
if (skb_shinfo(skb)->gso_segs) {
node_p->num_skb += skb_shinfo(skb)->gso_segs;
rmnet_shs_cpu_node_tbl[node_p->map_cpu].parkedlen++;
node_p->skb_list.skb_load += skb_shinfo(skb)->gso_segs;
} else {
node_p->num_skb += 1;
rmnet_shs_cpu_node_tbl[node_p->map_cpu].parkedlen++;
node_p->skb_list.skb_load++;
}
node_p->num_coal_skb += 1;
node_p->hw_coal_bytes += RMNET_SKB_CB(skb)->coal_bytes;
node_p->hw_coal_bufsize += RMNET_SKB_CB(skb)->coal_bufsize;
if (skb->priority == 0xda1a)
node_p->num_ll_skb++;
node_p->num_skb_bytes += skb->len;
}
rmnet_shs_deliver_skb(skb);
return 0;
}
void rmnet_shs_ll_init(void)
{
u8 num_cpu;
for (num_cpu = 0; num_cpu < MAX_CPUS; num_cpu++)
INIT_LIST_HEAD(&rmnet_shs_ll_cpu_node_tbl[num_cpu].node_list_id);
}
void rmnet_shs_ll_deinit(void)
{
struct rmnet_shs_wq_flow_node *node;
struct hlist_node *tmp;
unsigned long bkt;
rm_err("%s", "SHS_LL: De-init LL book-keeping");
spin_lock_bh(&rmnet_shs_ll_ht_splock);
hash_for_each_safe(rmnet_shs_ll_ht, bkt, tmp, node, list)
{
hash_del_rcu(&node->list);
}
hash_for_each_safe(rmnet_shs_ll_filter_ht, bkt, tmp, node, list)
{
hash_del_rcu(&node->list);
kfree(node);
rmnet_shs_cfg.num_filters--;
rmnet_shs_filter_count--;
}
spin_unlock_bh(&rmnet_shs_ll_ht_splock);
rm_err("%s", "SHS_LL: De-init LL book-keeping exit");
}

View File

@@ -1,16 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _RMNET_SHS_LL_H_
#define _RMNET_SHS_LL_H_
int rmnet_shs_ll_handler(struct sk_buff *skb, struct rmnet_shs_clnt_s *clnt_cfg);
void rmnet_shs_ll_init(void);
void rmnet_shs_ll_deinit(void);
void rmnet_shs_add_llflow(struct rmnet_shs_wq_flow_node *node);
void rmnet_shs_remove_llflow(struct rmnet_shs_wq_flow_node *node);
int rmnet_shs_is_filter_match(struct sk_buff *skb);
#endif /* _RMNET_SHS_LL_H_ */

File diff suppressed because it is too large Load Diff

View File

@@ -1,272 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "rmnet_shs.h"
unsigned int rmnet_shs_wq_interval_ms __read_mostly = RMNET_SHS_WQ_INTERVAL_MS;
module_param(rmnet_shs_wq_interval_ms, uint, 0644);
MODULE_PARM_DESC(rmnet_shs_wq_interval_ms, "Interval between wq runs (ms)");
unsigned long rmnet_shs_max_flow_inactivity_sec __read_mostly =
RMNET_SHS_MAX_SKB_INACTIVE_TSEC;
module_param(rmnet_shs_max_flow_inactivity_sec, ulong, 0644);
MODULE_PARM_DESC(rmnet_shs_max_flow_inactivity_sec,
"Max flow inactive time before clean up");
unsigned int rmnet_shs_wq_tuning __read_mostly = 80;
module_param(rmnet_shs_wq_tuning, uint, 0644);
MODULE_PARM_DESC(rmnet_shs_wq_tuning, "moving average weightage");
unsigned long long rmnet_shs_cpu_rx_max_pps_thresh[MAX_CPUS]__read_mostly = {
RMNET_SHS_UDP_PPS_LPWR_CPU0_UTHRESH,
RMNET_SHS_UDP_PPS_LPWR_CPU_UTHRESH,
RMNET_SHS_UDP_PPS_PERF_CPU_UTHRESH,
RMNET_SHS_UDP_PPS_PERF_CPU_UTHRESH,
RMNET_SHS_UDP_PPS_PERF_CPU_UTHRESH,
RMNET_SHS_UDP_PPS_LPWR_CPU_UTHRESH,
RMNET_SHS_UDP_PPS_LPWR_CPU_UTHRESH,
RMNET_SHS_UDP_PPS_PERF_CPU_UTHRESH
};
module_param_array(rmnet_shs_cpu_rx_max_pps_thresh, ullong, NULL , 0644);
MODULE_PARM_DESC(rmnet_shs_cpu_rx_max_pps_thresh, "Max pkts core can handle");
unsigned long long rmnet_shs_cpu_rx_min_pps_thresh[MAX_CPUS]__read_mostly = {
RMNET_SHS_UDP_PPS_LPWR_CPU_LTHRESH,
RMNET_SHS_UDP_PPS_LPWR_CPU_LTHRESH,
RMNET_SHS_UDP_PPS_PERF_CPU_LTHRESH,
RMNET_SHS_UDP_PPS_PERF_CPU_LTHRESH,
RMNET_SHS_UDP_PPS_PERF_CPU_LTHRESH,
RMNET_SHS_UDP_PPS_LPWR_CPU_LTHRESH,
RMNET_SHS_UDP_PPS_LPWR_CPU_LTHRESH,
RMNET_SHS_UDP_PPS_PERF_CPU_LTHRESH
};
module_param_array(rmnet_shs_cpu_rx_min_pps_thresh, ullong, NULL , 0644);
MODULE_PARM_DESC(rmnet_shs_cpu_rx_min_pps_thresh, "Min pkts core can handle");
unsigned int rmnet_shs_cpu_rx_flows[MAX_CPUS];
module_param_array(rmnet_shs_cpu_rx_flows, uint, NULL, 0444);
MODULE_PARM_DESC(rmnet_shs_cpu_rx_flows, "Num flows processed per core");
unsigned int rmnet_shs_cpu_rx_filter_flows[MAX_CPUS];
module_param_array(rmnet_shs_cpu_rx_filter_flows, uint, NULL, 0444);
MODULE_PARM_DESC(rmnet_shs_cpu_rx_filter_flows, "Num filtered flows per core");
unsigned long long rmnet_shs_cpu_rx_bytes[MAX_CPUS];
module_param_array(rmnet_shs_cpu_rx_bytes, ullong, NULL, 0444);
MODULE_PARM_DESC(rmnet_shs_cpu_rx_bytes, "SHS stamp bytes per CPU");
unsigned long long rmnet_shs_cpu_rx_pkts[MAX_CPUS];
module_param_array(rmnet_shs_cpu_rx_pkts, ullong, NULL, 0444);
MODULE_PARM_DESC(rmnet_shs_cpu_rx_pkts, "SHS stamp total pkts per CPU");
unsigned long long rmnet_shs_cpu_rx_bps[MAX_CPUS];
module_param_array(rmnet_shs_cpu_rx_bps, ullong, NULL , 0444);
MODULE_PARM_DESC(rmnet_shs_cpu_rx_bps, "SHS stamp enq rate per CPU");
unsigned long long rmnet_shs_cpu_rx_pps[MAX_CPUS];
module_param_array(rmnet_shs_cpu_rx_pps, ullong, NULL , 0444);
MODULE_PARM_DESC(rmnet_shs_cpu_rx_pps, "SHS stamp pkt enq rate per CPU");
unsigned long long rmnet_shs_cpu_qhead_diff[MAX_CPUS];
module_param_array(rmnet_shs_cpu_qhead_diff, ullong, NULL , 0444);
MODULE_PARM_DESC(rmnet_shs_cpu_qhead_diff, "SHS nw stack queue processed diff");
unsigned long long rmnet_shs_cpu_qhead_total[MAX_CPUS];
module_param_array(rmnet_shs_cpu_qhead_total, ullong, NULL , 0444);
MODULE_PARM_DESC(rmnet_shs_cpu_qhead_total, "SHS nw queue processed total");
unsigned long rmnet_shs_flow_hash[MAX_SUPPORTED_FLOWS_DEBUG];
module_param_array(rmnet_shs_flow_hash, ulong, NULL , 0444);
MODULE_PARM_DESC(rmnet_shs_flow_hash, "SHS stamp hash flow");
unsigned long rmnet_shs_flow_proto[MAX_SUPPORTED_FLOWS_DEBUG];
module_param_array(rmnet_shs_flow_proto, ulong, NULL , 0444);
MODULE_PARM_DESC(rmnet_shs_flow_proto, "SHS stamp hash transport protocol");
unsigned long long rmnet_shs_flow_inactive_tsec[MAX_SUPPORTED_FLOWS_DEBUG];
module_param_array(rmnet_shs_flow_inactive_tsec, ullong, NULL , 0444);
MODULE_PARM_DESC(rmnet_shs_flow_inactive_tsec, "SHS stamp inactive flow time");
int rmnet_shs_flow_cpu[MAX_SUPPORTED_FLOWS_DEBUG] = {
-1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1};
module_param_array(rmnet_shs_flow_cpu, int, NULL , 0444);
MODULE_PARM_DESC(rmnet_shs_flow_cpu, "SHS stamp flow processing CPU");
int rmnet_shs_flow_cpu_recommended[MAX_SUPPORTED_FLOWS_DEBUG] = {
-1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1
};
module_param_array(rmnet_shs_flow_cpu_recommended, int, NULL, 0444);
MODULE_PARM_DESC(rmnet_shs_flow_cpu_recommended, "SHS stamp flow proc CPU");
unsigned long long rmnet_shs_flow_rx_bytes[MAX_SUPPORTED_FLOWS_DEBUG];
module_param_array(rmnet_shs_flow_rx_bytes, ullong, NULL, 0444);
MODULE_PARM_DESC(rmnet_shs_flow_rx_bytes, "SHS stamp bytes per flow");
unsigned long long rmnet_shs_flow_rx_pkts[MAX_SUPPORTED_FLOWS_DEBUG];
module_param_array(rmnet_shs_flow_rx_pkts, ullong, NULL , 0444);
MODULE_PARM_DESC(rmnet_shs_flow_rx_pkts, "SHS stamp total pkts per flow");
unsigned long long rmnet_shs_flow_rx_bps[MAX_SUPPORTED_FLOWS_DEBUG];
module_param_array(rmnet_shs_flow_rx_bps, ullong, NULL , 0444);
MODULE_PARM_DESC(rmnet_shs_flow_rx_bps, "SHS stamp enq rate per flow");
unsigned long long rmnet_shs_flow_rx_pps[MAX_SUPPORTED_FLOWS_DEBUG];
module_param_array(rmnet_shs_flow_rx_pps, ullong, NULL , 0444);
MODULE_PARM_DESC(rmnet_shs_flow_rx_pps, "SHS stamp pkt enq rate per flow");
/* Counters for suggestions made by wq */
unsigned long long rmnet_shs_flow_silver_to_gold[MAX_SUPPORTED_FLOWS_DEBUG];
module_param_array(rmnet_shs_flow_silver_to_gold, ullong, NULL , 0444);
MODULE_PARM_DESC(rmnet_shs_flow_silver_to_gold, "SHS Suggest Silver to Gold");
unsigned long long rmnet_shs_flow_gold_to_silver[MAX_SUPPORTED_FLOWS_DEBUG];
module_param_array(rmnet_shs_flow_gold_to_silver, ullong, NULL , 0444);
MODULE_PARM_DESC(rmnet_shs_flow_gold_to_silver, "SHS Suggest Gold to Silver");
unsigned long long rmnet_shs_flow_gold_balance[MAX_SUPPORTED_FLOWS_DEBUG];
module_param_array(rmnet_shs_flow_gold_balance, ullong, NULL , 0444);
MODULE_PARM_DESC(rmnet_shs_flow_gold_balance, "SHS Suggest Gold Balance");
unsigned long rmnet_shs_switch_reason[RMNET_SHS_SWITCH_MAX_REASON];
module_param_array(rmnet_shs_switch_reason, ulong, NULL, 0444);
MODULE_PARM_DESC(rmnet_shs_switch_reason, "rmnet shs skb core swtich type");
unsigned long rmnet_shs_flush_reason[RMNET_SHS_FLUSH_MAX_REASON];
module_param_array(rmnet_shs_flush_reason, ulong, NULL, 0444);
MODULE_PARM_DESC(rmnet_shs_flush_reason, "rmnet shs skb flush trigger type");
unsigned int rmnet_shs_byte_store_limit __read_mostly = 30144000;
module_param(rmnet_shs_byte_store_limit, uint, 0644);
MODULE_PARM_DESC(rmnet_shs_byte_store_limit, "Maximum byte module will park");
unsigned int rmnet_shs_pkts_store_limit __read_mostly = 24000;
module_param(rmnet_shs_pkts_store_limit, uint, 0644);
MODULE_PARM_DESC(rmnet_shs_pkts_store_limit, "Maximum pkts module will park");
unsigned int rmnet_shs_max_core_wait __read_mostly = 55;
module_param(rmnet_shs_max_core_wait, uint, 0644);
MODULE_PARM_DESC(rmnet_shs_max_core_wait,
"Max wait module will wait during move to perf core in ms");
unsigned int rmnet_shs_inst_rate_interval __read_mostly = 20;
module_param(rmnet_shs_inst_rate_interval, uint, 0644);
MODULE_PARM_DESC(rmnet_shs_inst_rate_interval,
"Max interval we sample for instant burst prioritizing");
unsigned int rmnet_shs_inst_rate_switch __read_mostly = 0;
module_param(rmnet_shs_inst_rate_switch, uint, 0644);
MODULE_PARM_DESC(rmnet_shs_inst_rate_switch,
"Configurable option to enable rx rate cpu switching");
unsigned int rmnet_shs_fall_back_timer __read_mostly = 1;
module_param(rmnet_shs_fall_back_timer, uint, 0644);
MODULE_PARM_DESC(rmnet_shs_fall_back_timer,
"Option to enable fall back limit for parking");
unsigned int rmnet_shs_cpu_backlog_max_pkts[MAX_CPUS] = {
900, 1100, 1100, 1100, 1100, 1100, 1100, 1100};
module_param_array(rmnet_shs_cpu_backlog_max_pkts, uint, NULL, 0644);
MODULE_PARM_DESC(rmnet_shs_cpu_backlog_max_pkts,
"Max pkts in backlog prioritizing");
/*
unsigned int rmnet_shs_inst_rate_max_pkts __read_mostly = 2500;
module_param(rmnet_shs_inst_rate_max_pkts, uint, 0644);
MODULE_PARM_DESC(rmnet_shs_inst_rate_max_pkts,
"Max pkts in a instant burst interval before prioritizing");
*/
unsigned int rmnet_shs_cpu_inst_rate_max_pkts[MAX_CPUS] = {
3100, 3100, 3100, 3100, 3100, 3100, 3100, 3100};
module_param_array(rmnet_shs_cpu_inst_rate_max_pkts, uint, NULL, 0644);
MODULE_PARM_DESC(rmnet_shs_cpu_inst_rate_max_pkts, "Max pkts in a burst before prioritizing");
unsigned int rmnet_shs_timeout __read_mostly = 6;
module_param(rmnet_shs_timeout, uint, 0644);
MODULE_PARM_DESC(rmnet_shs_timeout, "Option to configure fall back duration");
unsigned int rmnet_shs_switch_cores __read_mostly = 1;
module_param(rmnet_shs_switch_cores, uint, 0644);
MODULE_PARM_DESC(rmnet_shs_switch_cores, "Switch core upon hitting threshold");
unsigned int rmnet_shs_cpu_max_qdiff[MAX_CPUS];
module_param_array(rmnet_shs_cpu_max_qdiff, uint, NULL, 0644);
MODULE_PARM_DESC(rmnet_shs_cpu_max_qdiff, "Max queue length seen of each core");
unsigned int rmnet_shs_cpu_ooo_count[MAX_CPUS];
module_param_array(rmnet_shs_cpu_ooo_count, uint, NULL, 0644);
MODULE_PARM_DESC(rmnet_shs_cpu_ooo_count, "OOO count for each cpu");
unsigned int rmnet_shs_cpu_max_coresum[MAX_CPUS];
module_param_array(rmnet_shs_cpu_max_coresum, uint, NULL, 0644);
MODULE_PARM_DESC(rmnet_shs_cpu_max_coresum, "Max coresum seen of each core");
unsigned int rmnet_shs_cpu_prio_dur __read_mostly = 3;
module_param(rmnet_shs_cpu_prio_dur, uint, 0644);
MODULE_PARM_DESC(rmnet_shs_cpu_prio_dur, "Staying duration for netrx switch");
unsigned int rmnet_oom_pkt_limit __read_mostly = 5000;
module_param(rmnet_oom_pkt_limit, uint, 0644);
MODULE_PARM_DESC(rmnet_oom_pkt_limit, "Max rmnet pre-backlog");
/* Reserve mask is now only indicating audio reservations we are honoring*/
unsigned int rmnet_shs_reserve_mask __read_mostly = 0;
module_param(rmnet_shs_reserve_mask, uint, 0644);
MODULE_PARM_DESC(rmnet_shs_resever_mask, "rmnet_shs_reserve_mask");
/* Halt mask is now a super set of reserve mask */
unsigned int rmnet_shs_halt_mask __read_mostly = 0;
module_param(rmnet_shs_halt_mask, uint, 0644);
MODULE_PARM_DESC(rmnet_shs_halt_mask, "rmnet_shs_halt_mask");
unsigned int rmnet_shs_debug __read_mostly;
module_param(rmnet_shs_debug, uint, 0644);
MODULE_PARM_DESC(rmnet_shs_debug, "rmnet_shs_debug");
unsigned int rmnet_shs_stats_enabled __read_mostly = 1;
module_param(rmnet_shs_stats_enabled, uint, 0644);
MODULE_PARM_DESC(rmnet_shs_stats_enabled, "Enable Disable stats collection");
unsigned long rmnet_shs_mid_err[RMNET_SHS_MID_ERR_MAX];
module_param_array(rmnet_shs_mid_err, ulong, NULL, 0444);
MODULE_PARM_DESC(rmnet_shs_mid_err, "rmnet shs mid error type");
unsigned long rmnet_shs_crit_err[RMNET_SHS_CRIT_ERR_MAX];
module_param_array(rmnet_shs_crit_err, ulong, NULL, 0444);
MODULE_PARM_DESC(rmnet_shs_crit_err, "rmnet shs crtical error type");
unsigned int rmnet_shs_ll_flow_cpu = DEF_LL_CORE;
module_param(rmnet_shs_ll_flow_cpu, uint, 0644);
MODULE_PARM_DESC(rmnet_shs_ll_flow_cpu, "Current LL flow cpu ");
unsigned int rmnet_shs_ll_phy_cpu = 2;
module_param(rmnet_shs_ll_phy_cpu, uint, 0644);
MODULE_PARM_DESC(rmnet_shs_ll_phy_cpu, "Current LL phy cpu ");
unsigned int rmnet_shs_wq_tick = 0;
module_param(rmnet_shs_wq_tick, uint, 0644);
MODULE_PARM_DESC(rmnet_shs_wq_tick, "rmnet_shs_Wq execution tick ");
unsigned int rmnet_shs_pause_count = 0;
module_param(rmnet_shs_pause_count, uint, 0644);
MODULE_PARM_DESC(rmnet_shs_pause_count, "rmnet shs wq pause count");
unsigned int rmnet_shs_restart_count = 0;
module_param(rmnet_shs_restart_count, uint, 0644);
MODULE_PARM_DESC(rmnet_shs_restart_count, "rmnet shs wq restart count");
unsigned int rmnet_shs_reserve_on = 1;
module_param(rmnet_shs_reserve_on, uint, 0644);
MODULE_PARM_DESC(rmnet_shs_reserve_on, "reserve mask feature flag");
unsigned int rmnet_shs_no_sync_off __read_mostly = 0;
module_param(rmnet_shs_no_sync_off, uint, 0644);
MODULE_PARM_DESC(rmnet_shs_no_sync_off, "rmnet no sync feature toggle");
unsigned int rmnet_shs_no_sync_packets = 0;
module_param(rmnet_shs_no_sync_packets, uint, 0644);
MODULE_PARM_DESC(rmnet_shs_no_sync_packets, "rmnet shs async packet count");

View File

@@ -1,70 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2020 The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "rmnet_shs_wq.h"
#include "rmnet_shs_common.h"
#include "rmnet_shs.h"
#ifndef _RMNET_SHS_MODULES_H_
#define _RMNET_SHS_MODULES_H_
extern unsigned int rmnet_shs_wq_interval_ms;
extern unsigned long rmnet_shs_max_flow_inactivity_sec;
extern unsigned int rmnet_shs_wq_tuning __read_mostly;
extern unsigned long long rmnet_shs_cpu_rx_max_pps_thresh[MAX_CPUS];
extern unsigned long long rmnet_shs_cpu_rx_min_pps_thresh[MAX_CPUS];
extern unsigned int rmnet_shs_cpu_rx_flows[MAX_CPUS];
extern unsigned int rmnet_shs_cpu_rx_filter_flows[MAX_CPUS];
extern unsigned long long rmnet_shs_cpu_rx_bytes[MAX_CPUS];
extern unsigned long long rmnet_shs_cpu_rx_pkts[MAX_CPUS];
extern unsigned long long rmnet_shs_cpu_rx_bps[MAX_CPUS];
extern unsigned long long rmnet_shs_cpu_rx_pps[MAX_CPUS];
extern unsigned long long rmnet_shs_cpu_qhead_diff[MAX_CPUS];
extern unsigned long long rmnet_shs_cpu_qhead_total[MAX_CPUS];
extern unsigned long rmnet_shs_flow_hash[MAX_SUPPORTED_FLOWS_DEBUG];
extern unsigned long rmnet_shs_flow_proto[MAX_SUPPORTED_FLOWS_DEBUG];
extern unsigned long long rmnet_shs_flow_inactive_tsec[MAX_SUPPORTED_FLOWS_DEBUG];
extern int rmnet_shs_flow_cpu[MAX_SUPPORTED_FLOWS_DEBUG];
extern int rmnet_shs_flow_cpu_recommended[MAX_SUPPORTED_FLOWS_DEBUG];
extern unsigned long long rmnet_shs_flow_rx_bytes[MAX_SUPPORTED_FLOWS_DEBUG];
extern unsigned long long rmnet_shs_flow_rx_pkts[MAX_SUPPORTED_FLOWS_DEBUG];
extern unsigned long long rmnet_shs_flow_rx_bps[MAX_SUPPORTED_FLOWS_DEBUG];
extern unsigned long long rmnet_shs_flow_rx_pps[MAX_SUPPORTED_FLOWS_DEBUG];
extern unsigned long long rmnet_shs_flow_silver_to_gold[MAX_SUPPORTED_FLOWS_DEBUG];
extern unsigned long long rmnet_shs_flow_gold_to_silver[MAX_SUPPORTED_FLOWS_DEBUG];
extern unsigned long long rmnet_shs_flow_gold_balance[MAX_SUPPORTED_FLOWS_DEBUG];
extern unsigned long rmnet_shs_switch_reason[RMNET_SHS_SWITCH_MAX_REASON];
extern unsigned long rmnet_shs_flush_reason[RMNET_SHS_FLUSH_MAX_REASON];
extern unsigned int rmnet_shs_byte_store_limit;
extern unsigned int rmnet_shs_pkts_store_limit;
extern unsigned int rmnet_shs_max_core_wait;
extern unsigned int rmnet_shs_inst_rate_interval;
extern unsigned int rmnet_shs_inst_rate_switch;
extern unsigned int rmnet_shs_fall_back_timer;
extern unsigned int rmnet_shs_cpu_backlog_max_pkts[MAX_CPUS];
extern unsigned int rmnet_shs_cpu_inst_rate_max_pkts[MAX_CPUS];
extern unsigned int rmnet_shs_timeout;
extern unsigned int rmnet_shs_switch_cores;
extern unsigned int rmnet_shs_cpu_max_qdiff[MAX_CPUS];
extern unsigned int rmnet_shs_cpu_ooo_count[MAX_CPUS];
extern unsigned int rmnet_shs_cpu_max_coresum[MAX_CPUS];
extern unsigned int rmnet_shs_cpu_prio_dur;
extern unsigned int rmnet_oom_pkt_limit;
extern unsigned int rmnet_shs_debug;
extern unsigned int rmnet_shs_stats_enabled __read_mostly;
extern unsigned long rmnet_shs_crit_err[RMNET_SHS_CRIT_ERR_MAX];
extern unsigned long rmnet_shs_mid_err[RMNET_SHS_MID_ERR_MAX];
extern unsigned int rmnet_shs_ll_flow_cpu;
extern unsigned int rmnet_shs_ll_phy_cpu;
extern unsigned int rmnet_shs_halt_mask;
extern unsigned int rmnet_shs_reserve_mask;
extern unsigned int rmnet_shs_wq_tick;
extern unsigned int rmnet_shs_pause_count;
extern unsigned int rmnet_shs_restart_count;
extern unsigned int rmnet_shs_no_sync_packets;
extern unsigned int rmnet_shs_no_sync_off;
extern unsigned int rmnet_shs_reserve_on;
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -1,375 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _RMNET_SHS_WQ_H_
#define _RMNET_SHS_WQ_H_
#include "rmnet_shs_config.h"
#include "rmnet_shs.h"
#include <linux/ktime.h>
#define RMNET_SHS_DEBUG 0
#define RMNET_SHS_DEBUG1 0
#define rm_err(fmt, ...) \
do { if (RMNET_SHS_DEBUG) pr_err(fmt, __VA_ARGS__); } while (0)
#define rm_err1(fmt, ...) \
do { if (RMNET_SHS_DEBUG1) pr_err(fmt, __VA_ARGS__); } while (0)
#define MAX_SUPPORTED_FLOWS_DEBUG 32
#define RMNET_SHS_RX_BPNSEC_TO_BPSEC(x) ((x)*1000000000)
#define RMNET_SHS_SEC_TO_NSEC(x) ((x)*1000000000)
#define RMNET_SHS_NSEC_TO_SEC(x) ((x)/1000000000)
#define RMNET_SHS_BYTE_TO_BIT(x) ((x)*8)
#define RMNET_SHS_MSEC_TO_NSC(x) ((x)*1000000 )
#define RMNET_SHS_MIN_HSTAT_NODES_REQD 16
#define RMNET_SHS_WQ_INTERVAL_MS 100
extern struct list_head rmnet_shs_wq_ep_tbl;
/* stores wq and end point details */
struct rmnet_shs_wq_ep_s {
u64 tcp_rx_bps;
u64 udp_rx_bps;
struct list_head ep_list_id;
struct net_device *ep;
int new_lo_core[MAX_CPUS];
int new_hi_core[MAX_CPUS];
u16 default_core_msk;
u16 pri_core_msk;
u16 rps_config_msk;
u8 is_ep_active;
int new_lo_idx;
int new_hi_idx;
int new_lo_max;
int new_hi_max;
};
struct rmnet_shs_wq_ep_list_s {
struct list_head ep_id;
struct rmnet_shs_wq_ep_s ep;
};
/* Types of suggestions made by shs wq */
enum rmnet_shs_wq_suggestion_type {
RMNET_SHS_WQ_SUGG_NONE,
RMNET_SHS_WQ_SUGG_SILVER_TO_GOLD,
RMNET_SHS_WQ_SUGG_GOLD_TO_SILVER,
RMNET_SHS_WQ_SUGG_GOLD_BALANCE,
RMNET_SHS_WQ_SUGG_RMNET_TO_GOLD,
RMNET_SHS_WQ_SUGG_RMNET_TO_SILVER,
RMNET_SHS_WQ_SUGG_LL_FLOW_CORE,
RMNET_SHS_WQ_SUGG_LL_PHY_CORE,
RMNET_SHS_WQ_SUGG_MAX,
};
struct rmnet_shs_wq_hstat_s {
unsigned long int rmnet_shs_wq_suggs[RMNET_SHS_WQ_SUGG_MAX];
struct list_head cpu_node_id;
struct list_head hstat_node_id;
struct rmnet_shs_skbn_s *node; //back pointer to node
ktime_t c_epoch; /*current epoch*/
ktime_t l_epoch; /*last hash update epoch*/
ktime_t inactive_duration;
u64 rx_skb;
u64 rx_bytes;
u64 rx_coal_skb;
u64 rx_pps; /*pkts per second*/
u64 rx_bps; /*bits per second*/
u64 last_pps;
u64 avg_pps;
u64 avg_segs;
u64 hw_coal_bytes_diff; /* diff of coalescing bytes in HW */
u64 hw_coal_bufsize_diff; /* diff of coalescing buffer size from HW */
u64 last_hw_coal_bytes;
u64 last_hw_coal_bufsize;
u64 hw_coal_bytes;
u64 hw_coal_bufsize;
u64 last_rx_skb;
u64 last_rx_coal_skb;
u64 last_rx_bytes;
u32 last_rx_ll_skb;
u32 rx_ll_skb;
u32 rps_config_msk; /*configured rps mask for net device*/
u32 current_core_msk; /*mask where the current core's bit is set*/
u32 def_core_msk; /*(little cluster) avaialble core mask*/
u32 pri_core_msk; /* priority cores availability mask*/
u32 available_core_msk; /* other available cores for this flow*/
u32 hash; /*skb hash*/
u32 bif; /* Bytes in flight */
u32 ack_thresh; /* Quick ack threshold */
int stat_idx; /*internal used for datatop*/
u16 suggested_cpu; /* recommended CPU to stamp pkts*/
u16 current_cpu; /* core where the flow is being processed*/
u16 skb_tport_proto;
u8 ll_diff;
u8 mux_id;
u8 in_use;
u8 is_perm;
u8 is_new_flow;
u8 segs_per_skb; /* segments per skb */
};
struct rmnet_shs_wq_cpu_rx_pkt_q_s {
struct list_head hstat_id;
ktime_t l_epoch; /*last epoch update for this structure*/
u64 last_rx_skbs;
u64 last_rx_bytes;
u64 last_rx_segs;
u64 rx_skbs;
u64 rx_bytes;
u64 rx_segs;
u64 rx_pps; /* pkts per second*/
u64 rx_bps; /*bits per second*/
u64 last_rx_pps; /* pkts per second*/
u64 last_rx_bps; /* bits per second*/
u64 avg_pps;
u64 rx_bps_est; /*estimated bits per second*/
u32 qhead; /* queue head */
u32 last_qhead; /* last queue head */
u32 qhead_diff; /* diff in pp in last tick*/
u32 qhead_start; /* start mark of total pp*/
u32 qhead_total; /* end mark of total pp*/
int flows;
u16 cpu_num;
};
struct rmnet_shs_wq_rx_flow_s {
struct rmnet_shs_wq_cpu_rx_pkt_q_s cpu_list[MAX_CPUS];
ktime_t l_epoch; /*last epoch update for this flow*/
u64 dl_mrk_last_rx_bytes;
u64 dl_mrk_last_rx_pkts;
u64 dl_mrk_rx_bytes; /*rx bytes as observed in DL marker*/
u64 dl_mrk_rx_pkts; /*rx pkts as observed in DL marker*/
u64 dl_mrk_rx_pps; /*rx pkts per sec as observed in DL marker*/
u64 dl_mrk_rx_bps; /*rx bits per sec as observed in DL marker*/
u64 last_rx_skbs;
u64 last_rx_bytes;
u64 last_rx_segs;
u64 last_rx_pps; /*rx pkts per sec*/
u64 last_rx_bps; /*rx bits per sec*/
u64 rx_skbs;
u64 rx_bytes;
u64 rx_segs;
u64 rx_pps; /*rx pkts per sec*/
u64 rx_bps; /*rx bits per sec*/
u64 hw_coal_bytes_diff; /* diff of coalescing bytes in HW */
u64 hw_coal_bufsize_diff; /* diff of coalescing buffer size from HW */
u32 rps_config_msk; /*configured rps mask for net device*/
u32 def_core_msk; /*(little cluster) avaialble core mask*/
u32 pri_core_msk; /* priority cores availability mask*/
u32 available_core_msk; /* other available cores for this flow*/
int new_lo_core[MAX_CPUS];
int new_hi_core[MAX_CPUS];
int new_lo_idx;
int new_hi_idx;
int new_lo_max;
int new_hi_max;
int flows;
u8 cpus;
};
struct rmnet_shs_delay_wq_s {
struct delayed_work wq;
};
/* Structures to be used for creating sorted versions of flow and cpu lists */
struct rmnet_shs_wq_cpu_cap_s {
struct list_head cpu_cap_list;
u64 pps_capacity;
u64 avg_pps_capacity;
u64 bps;
u16 cpu_num;
};
struct rmnet_shs_wq_gold_flow_s {
struct list_head gflow_list;
u64 rx_pps;
u64 avg_pps;
u32 hash;
u16 cpu_num;
};
struct rmnet_shs_wq_ll_flow_s {
struct list_head ll_flow_list;
union {
struct iphdr v4hdr;
struct ipv6hdr v6hdr;
} ip_hdr;
union {
struct tcphdr tp;
struct udphdr up;
} trans_hdr;
u64 rx_pps;
u64 avg_pps;
u64 rx_bps;
u64 avg_segs;
u64 hw_coal_bytes_diff;
u64 hw_coal_bufsize_diff;
u32 hash;
u16 cpu_num;
u16 trans_proto;
u8 mux_id;
u8 ll_pipe;
};
struct rmnet_shs_wq_fflow_s {
struct list_head fflow_list;
union {
struct iphdr v4hdr;
struct ipv6hdr v6hdr;
} ip_hdr;
union {
struct tcphdr tp;
struct udphdr up;
} trans_hdr;
u64 rx_pps;
u64 avg_pps;
u64 rx_bps;
u64 avg_segs;
u64 hw_coal_bytes_diff;
u64 hw_coal_bufsize_diff;
u32 hash;
u16 cpu_num;
u16 trans_proto;
u8 mux_id;
};
struct rmnet_shs_wq_ss_flow_s {
struct list_head ssflow_list;
u64 rx_pps;
u64 avg_pps;
u64 rx_bps;
u32 hash;
u32 bif;
u32 ack_thresh;
u16 cpu_num;
};
/* Tracing Definitions */
enum rmnet_shs_wq_trace_func {
RMNET_SHS_WQ_INIT,
RMNET_SHS_WQ_PROCESS_WQ,
RMNET_SHS_WQ_EXIT,
RMNET_SHS_WQ_EP_TBL,
RMNET_SHS_WQ_HSTAT_TBL,
RMNET_SHS_WQ_CPU_HSTAT_TBL,
RMNET_SHS_WQ_FLOW_STATS,
RMNET_SHS_WQ_CPU_STATS,
RMNET_SHS_WQ_TOTAL_STATS,
RMNET_SHS_WQ_SHSUSR,
};
enum rmnet_shs_wq_trace_evt {
RMNET_SHS_WQ_EP_TBL_START,
RMNET_SHS_WQ_EP_TBL_ADD,
RMNET_SHS_WQ_EP_TBL_DEL,
RMNET_SHS_WQ_EP_TBL_CLEANUP,
RMNET_SHS_WQ_EP_TBL_INIT,
RMNET_SHS_WQ_EP_TBL_END,
RMNET_SHS_WQ_HSTAT_TBL_START,
RMNET_SHS_WQ_HSTAT_TBL_ADD,
RMNET_SHS_WQ_HSTAT_TBL_DEL,
RMNET_SHS_WQ_HSTAT_TBL_NODE_RESET,
RMNET_SHS_WQ_HSTAT_TBL_NODE_NEW_REQ,
RMNET_SHS_WQ_HSTAT_TBL_NODE_REUSE,
RMNET_SHS_WQ_HSTAT_TBL_NODE_DYN_ALLOCATE,
RMNET_SHS_WQ_HSTAT_TBL_END,
RMNET_SHS_WQ_CPU_HSTAT_TBL_START,
RMNET_SHS_WQ_CPU_HSTAT_TBL_INIT,
RMNET_SHS_WQ_CPU_HSTAT_TBL_ADD,
RMNET_SHS_WQ_CPU_HSTAT_TBL_MOVE,
RMNET_SHS_WQ_CPU_HSTAT_TBL_DEL,
RMNET_SHS_WQ_CPU_HSTAT_TBL_END,
RMNET_SHS_WQ_FLOW_STATS_START,
RMNET_SHS_WQ_FLOW_STATS_UPDATE_MSK,
RMNET_SHS_WQ_FLOW_STATS_UPDATE_NEW_CPU,
RMNET_SHS_WQ_FLOW_STATS_SUGGEST_NEW_CPU,
RMNET_SHS_WQ_FLOW_STATS_ERR,
RMNET_SHS_WQ_FLOW_STATS_FLOW_INACTIVE,
RMNET_SHS_WQ_FLOW_STATS_FLOW_INACTIVE_TIMEOUT,
RMNET_SHS_WQ_FLOW_STATS_END,
RMNET_SHS_WQ_CPU_STATS_START,
RMNET_SHS_WQ_CPU_STATS_CURRENT_UTIL,
RMNET_SHS_WQ_CPU_STATS_INC_CPU_FLOW,
RMNET_SHS_WQ_CPU_STATS_DEC_CPU_FLOW,
RMNET_SHS_WQ_CPU_STATS_GET_CPU_FLOW,
RMNET_SHS_WQ_CPU_STATS_GET_MAX_CPU_FLOW,
RMNET_SHS_WQ_CPU_STATS_MAX_FLOW_IN_CLUSTER,
RMNET_SHS_WQ_CPU_STATS_UPDATE,
RMNET_SHS_WQ_CPU_STATS_CORE2SWITCH_START,
RMNET_SHS_WQ_CPU_STATS_CORE2SWITCH_FIND,
RMNET_SHS_WQ_CPU_STATS_CORE2SWITCH_EVAL_CPU,
RMNET_SHS_WQ_CPU_STATS_CORE2SWITCH_END,
RMNET_SHS_WQ_CPU_STATS_NEW_FLOW_LIST_LO,
RMNET_SHS_WQ_CPU_STATS_NEW_FLOW_LIST_HI,
RMNET_SHS_WQ_CPU_STATS_END,
RMNET_SHS_WQ_TOTAL_STATS_START,
RMNET_SHS_WQ_TOTAL_STATS_UPDATE,
RMNET_SHS_WQ_TOTAL_STATS_END,
RMNET_SHS_WQ_PROCESS_WQ_START,
RMNET_SHS_WQ_PROCESS_WQ_END,
RMNET_SHS_WQ_PROCESS_WQ_ERR,
RMNET_SHS_WQ_INIT_START,
RMNET_SHS_WQ_INIT_END,
RMNET_SHS_WQ_EXIT_START,
RMNET_SHS_WQ_EXIT_END,
RMNET_SHS_WQ_TRY_PASS,
RMNET_SHS_WQ_TRY_FAIL,
RMNET_SHS_WQ_SHSUSR_SYNC_START,
RMNET_SHS_WQ_SHSUSR_SYNC_END,
RMNET_SHS_WQ_FLOW_STATS_SET_FLOW_SEGMENTATION,
RMNET_SHS_WQ_FLOW_SEG_SET_PASS,
RMNET_SHS_WQ_FLOW_SEG_SET_FAIL,
};
extern struct rmnet_shs_cpu_node_s rmnet_shs_cpu_node_tbl[MAX_CPUS];
extern struct list_head rmnet_shs_wq_hstat_tbl;
extern struct workqueue_struct *rmnet_shs_wq;
void rmnet_shs_wq_init(void);
void rmnet_shs_wq_exit(void);
void rmnet_shs_wq_restart(void);
void rmnet_shs_wq_pause(void);
void rmnet_shs_update_cfg_mask(void);
void rmnet_shs_wq_refresh_ep_masks(void);
u64 rmnet_shs_wq_get_max_pps_among_cores(u32 core_msk);
void rmnet_shs_wq_create_new_flow(struct rmnet_shs_skbn_s *node_p);
int rmnet_shs_wq_get_least_utilized_core(u16 core_msk);
int rmnet_shs_wq_get_lpwr_cpu_new_flow(struct net_device *dev);
int rmnet_shs_wq_get_perf_cpu_new_flow(struct net_device *dev);
u64 rmnet_shs_wq_get_max_allowed_pps(u16 cpu);
void rmnet_shs_wq_inc_cpu_flow(u16 cpu);
void rmnet_shs_wq_dec_cpu_flow(u16 cpu);
void rmnet_shs_hstat_tbl_delete(void);
void rmnet_shs_wq_set_ep_active(struct net_device *dev);
void rmnet_shs_wq_reset_ep_active(struct net_device *dev);
void rmnet_shs_wq_refresh_new_flow_list(void);
int rmnet_shs_wq_try_to_move_flow(u16 cur_cpu, u16 dest_cpu, u32 hash_to_move,
u32 sugg_type);
int rmnet_shs_wq_set_flow_segmentation(u32 hash_to_set, u8 segs_per_skb);
int rmnet_shs_wq_set_quickack_thresh(u32 hash_to_set, u32 ack_thresh);
void rmnet_shs_ep_lock_bh(void);
void rmnet_shs_ep_unlock_bh(void);
void rmnet_shs_wq_update_stats(void);
int rmnet_shs_cpu_psb_above_thresh(unsigned cpu_num, unsigned thresh);
#endif /*_RMNET_SHS_WQ_H_*/

View File

@@ -1,831 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "rmnet_shs_modules.h"
#include "rmnet_shs_common.h"
#include "rmnet_shs_ll.h"
#include "rmnet_shs_wq_genl.h"
#include <net/sock.h>
#include <linux/skbuff.h>
#include <linux/cpumask.h>
MODULE_LICENSE("GPL v2");
static struct net *last_net;
static u32 last_snd_portid;
static struct net *msg_last_net;
static u32 msg_last_snd_portid;
uint32_t rmnet_shs_genl_seqnum;
uint32_t rmnet_shs_genl_msg_seqnum;
int rmnet_shs_userspace_connected;
#define RMNET_SHS_GENL_MAX_STR_LEN 255
#define RMNET_SHS_GENL_SEC_TO_NSEC(x) ((x) * 1000000000)
/* Static Functions and Definitions */
static struct nla_policy rmnet_shs_genl_attr_policy[RMNET_SHS_GENL_ATTR_MAX + 1] = {
[RMNET_SHS_GENL_ATTR_INT] = { .type = NLA_S32 },
[RMNET_SHS_GENL_ATTR_SUGG] = NLA_POLICY_EXACT_LEN(sizeof(struct rmnet_shs_wq_sugg_info)),
[RMNET_SHS_GENL_ATTR_SEG] = NLA_POLICY_EXACT_LEN(sizeof(struct rmnet_shs_wq_seg_info)),
[RMNET_SHS_GENL_ATTR_FLOW] = NLA_POLICY_EXACT_LEN(sizeof(struct rmnet_shs_wq_flow_info)),
[RMNET_SHS_GENL_ATTR_QUICKACK] = NLA_POLICY_EXACT_LEN(sizeof(struct rmnet_shs_wq_quickack_info)),
[RMNET_SHS_GENL_ATTR_STR] = { .type = NLA_NUL_STRING, .len = RMNET_SHS_GENL_MAX_STR_LEN},
[RMNET_SHS_GENL_ATTR_BOOTUP] = NLA_POLICY_EXACT_LEN(sizeof(struct rmnet_shs_bootup_info)),
};
#define RMNET_SHS_GENL_OP(_cmd, _func) \
{ \
.cmd = _cmd, \
.doit = _func, \
.dumpit = NULL, \
.flags = 0, \
}
static const struct genl_ops rmnet_shs_genl_ops[] = {
RMNET_SHS_GENL_OP(RMNET_SHS_GENL_CMD_INIT_SHSUSRD,
rmnet_shs_genl_dma_init),
RMNET_SHS_GENL_OP(RMNET_SHS_GENL_CMD_TRY_TO_MOVE_FLOW,
rmnet_shs_genl_try_to_move_flow),
RMNET_SHS_GENL_OP(RMNET_SHS_GENL_CMD_SET_FLOW_SEGMENTATION,
rmnet_shs_genl_set_flow_segmentation),
RMNET_SHS_GENL_OP(RMNET_SHS_GENL_CMD_MEM_SYNC,
rmnet_shs_genl_mem_sync),
RMNET_SHS_GENL_OP(RMNET_SHS_GENL_CMD_LL_FLOW,
rmnet_shs_genl_set_flow_ll),
RMNET_SHS_GENL_OP(RMNET_SHS_GENL_CMD_QUICKACK,
rmnet_shs_genl_set_quickack_thresh),
RMNET_SHS_GENL_OP(RMNET_SHS_GENL_CMD_BOOTUP,
rmnet_shs_genl_set_bootup_config),
};
/* Generic Netlink Message Channel policy and ops */
static struct nla_policy rmnet_shs_genl_msg_attr_policy[RMNET_SHS_GENL_ATTR_MAX + 1] = {
[RMNET_SHS_GENL_MSG_ATTR_REQ] = NLA_POLICY_EXACT_LEN(sizeof(struct rmnet_shs_msg_req)),
[RMNET_SHS_GENL_MSG_ATTR_RESP] = NLA_POLICY_EXACT_LEN(sizeof(struct rmnet_shs_msg_resp)),
};
static const struct genl_ops rmnet_shs_genl_msg_ops[] = {
RMNET_SHS_GENL_OP(RMNET_SHS_GENL_MSG_WAIT_CMD,
rmnet_shs_genl_msg_req_hdlr),
};
struct genl_family rmnet_shs_genl_family = {
.hdrsize = 0,
.name = RMNET_SHS_GENL_FAMILY_NAME,
.version = RMNET_SHS_GENL_VERSION,
.maxattr = RMNET_SHS_GENL_ATTR_MAX,
.policy = rmnet_shs_genl_attr_policy,
.ops = rmnet_shs_genl_ops,
.n_ops = ARRAY_SIZE(rmnet_shs_genl_ops),
};
struct genl_family rmnet_shs_genl_msg_family = {
.hdrsize = 0,
.name = RMNET_SHS_GENL_MSG_FAMILY_NAME,
.version = RMNET_SHS_GENL_VERSION,
.maxattr = RMNET_SHS_GENL_ATTR_MAX,
.policy = rmnet_shs_genl_msg_attr_policy,
.ops = rmnet_shs_genl_msg_ops,
.n_ops = ARRAY_SIZE(rmnet_shs_genl_msg_ops),
};
int rmnet_shs_genl_send_int_to_userspace(struct genl_info *info, int val)
{
struct sk_buff *skb;
void *msg_head;
int rc;
skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
if (skb == NULL)
goto out;
msg_head = genlmsg_put(skb, 0, info->snd_seq+1, &rmnet_shs_genl_family,
0, RMNET_SHS_GENL_CMD_INIT_SHSUSRD);
if (msg_head == NULL) {
rc = -ENOMEM;
rm_err("SHS_GNL: FAILED to msg_head %d\n", rc);
kfree(skb);
goto out;
}
rc = nla_put_u32(skb, RMNET_SHS_GENL_ATTR_INT, val);
if (rc != 0) {
rm_err("SHS_GNL: FAILED nla_put %d\n", rc);
kfree(skb);
goto out;
}
genlmsg_end(skb, msg_head);
rc = genlmsg_unicast(genl_info_net(info), skb, info->snd_portid);
if (rc != 0)
goto out;
rm_err("SHS_GNL: Successfully sent int %d\n", val);
return 0;
out:
/* TODO: Need to free skb?? */
rm_err("SHS_GNL: FAILED to send int %d\n", val);
return -1;
}
int rmnet_shs_genl_send_int_to_userspace_no_info(int val)
{
struct sk_buff *skb;
void *msg_head;
int rc;
if (last_net == NULL) {
rm_err("SHS_GNL: FAILED to send int %d - last_net is NULL\n",
val);
return -1;
}
skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
if (skb == NULL)
goto out;
msg_head = genlmsg_put(skb, 0, rmnet_shs_genl_seqnum++, &rmnet_shs_genl_family,
0, RMNET_SHS_GENL_CMD_INIT_SHSUSRD);
if (msg_head == NULL) {
rc = -ENOMEM;
rm_err("SHS_GNL: FAILED to msg_head %d\n", rc);
kfree(skb);
goto out;
}
rc = nla_put_u32(skb, RMNET_SHS_GENL_ATTR_INT, val);
if (rc != 0) {
rm_err("SHS_GNL: FAILED nla_put %d\n", rc);
kfree(skb);
goto out;
}
genlmsg_end(skb, msg_head);
rc = genlmsg_unicast(last_net, skb, last_snd_portid);
if (rc != 0)
goto out;
rm_err("SHS_GNL: Successfully sent int %d\n", val);
return 0;
out:
/* TODO: Need to free skb?? */
rm_err("SHS_GNL: FAILED to send int %d\n", val);
rmnet_shs_userspace_connected = 0;
return -1;
}
int rmnet_shs_genl_send_msg_to_userspace(void)
{
struct sk_buff *skb;
void *msg_head;
int rc;
int val = rmnet_shs_genl_seqnum++;
rm_err("SHS_GNL: Trying to send msg %d\n", val);
skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
if (skb == NULL)
goto out;
msg_head = genlmsg_put(skb, 0, rmnet_shs_genl_seqnum++, &rmnet_shs_genl_family,
0, RMNET_SHS_GENL_CMD_INIT_SHSUSRD);
if (msg_head == NULL) {
rc = -ENOMEM;
rm_err("SHS_GNL: FAILED to msg_head %d\n", rc);
kfree(skb);
goto out;
}
rc = nla_put_u32(skb, RMNET_SHS_GENL_ATTR_INT, val);
if (rc != 0) {
rm_err("SHS_GNL: FAILED nla_put %d\n", rc);
kfree(skb);
goto out;
}
genlmsg_end(skb, msg_head);
genlmsg_multicast(&rmnet_shs_genl_family, skb, 0, 0, GFP_ATOMIC);
rm_err("SHS_GNL: Successfully sent int %d\n", val);
return 0;
out:
/* TODO: Need to free skb?? */
rm_err("SHS_GNL: FAILED to send int %d\n", val);
rmnet_shs_userspace_connected = 0;
return -1;
}
/* Currently unused - handles message from userspace to initialize the shared memory,
* memory is inited by kernel wq automatically
*/
int rmnet_shs_genl_dma_init(struct sk_buff *skb_2, struct genl_info *info)
{
rm_err("%s", "SHS_GNL: rmnet_shs_genl_dma_init: Clear LL");
rmnet_shs_ll_deinit();
if (info == NULL) {
rm_err("%s", "SHS_GNL: an error occured - info is null");
return -1;
}
return 0;
}
int rmnet_shs_genl_set_flow_ll(struct sk_buff *skb_2, struct genl_info *info)
{
struct nlattr *na;
struct rmnet_shs_wq_flow_node *flow_info;
rm_err("%s", "SHS_GNL: rmnet_shs_genl_set_flow_ll");
if (info == NULL) {
rm_err("%s", "SHS_GNL: an error occured - info is null");
return -1;
}
na = info->attrs[RMNET_SHS_GENL_ATTR_FLOW];
if (na) {
/* Dyanmically allocating filter/flow info which must be freed */
flow_info = kzalloc(sizeof(*flow_info), GFP_ATOMIC);
if (!flow_info) {
rm_err("%s", "SHS_GNL: rmnet_shs_genl_set_flow_ll flow info failure");
rmnet_shs_genl_send_int_to_userspace(info,
RMNET_SHS_SEG_FAIL_RESP_INT);
return 0;
}
if (nla_memcpy(&flow_info->info, na, sizeof(flow_info->info)) > 0) {
if (flow_info->info.opcode == RMNET_SHS_LL_OPCODE_ADD)
rmnet_shs_add_llflow(flow_info);
else if (flow_info->info.opcode == RMNET_SHS_LL_OPCODE_DEL)
rmnet_shs_remove_llflow(flow_info);
rmnet_shs_genl_send_int_to_userspace(info,
RMNET_SHS_SEG_SET_RESP_INT);
} else {
rm_err("SHS_GNL: nla_memcpy failed %d\n",
RMNET_SHS_GENL_ATTR_FLOW);
rmnet_shs_genl_send_int_to_userspace(info,
RMNET_SHS_SEG_FAIL_RESP_INT);
return 0;
}
} else {
rm_err("SHS_GNL: no info->attrs %d\n",
RMNET_SHS_GENL_ATTR_FLOW);
rmnet_shs_genl_send_int_to_userspace(info,
RMNET_SHS_SEG_FAIL_RESP_INT);
return 0;
}
return 0;
}
int rmnet_shs_genl_set_bootup_config(struct sk_buff *skb_2, struct genl_info *info)
{
struct nlattr *na;
struct rmnet_shs_bootup_info bootup_info;
int i;
rm_err("%s %s", "SHS_GNL: ", __func__);
if (info == NULL) {
rm_err("%s", "SHS_GNL: an error occured - info is null");
return -1;
}
na = info->attrs[RMNET_SHS_GENL_ATTR_BOOTUP];
if (na) {
if (nla_memcpy(&bootup_info, na, sizeof(bootup_info)) > 0) {
rmnet_shs_cfg.non_perf_mask = bootup_info.non_perf_mask;
rmnet_shs_cfg.max_s_cores = hweight_long(bootup_info.non_perf_mask);
rmnet_shs_cfg.perf_mask = ~rmnet_shs_cfg.non_perf_mask;
rmnet_shs_cfg.feature_mask = bootup_info.feature_mask;
rmnet_shs_cfg.cpu_freq_boost_val = bootup_info.cpu_freq_boost_val;
/* Requires shsusrd to enable */
if (rmnet_shs_cfg.feature_mask & INST_RX_SWTCH_FEAT) {
rmnet_shs_inst_rate_switch = 1;
}
rm_err("SHS_GNL: bootup req "
"feature_mask = 0x%x non_perfmaxk = 0x%x, perf_mask 0x%x",
bootup_info.feature_mask,
rmnet_shs_cfg.non_perf_mask,
rmnet_shs_cfg.perf_mask);
for(i = 0; i < MAX_CPUS; i++)
{
rmnet_shs_cpu_rx_min_pps_thresh[i] = bootup_info.rx_min_pps_thresh[i];
rmnet_shs_cpu_rx_max_pps_thresh[i] = bootup_info.rx_max_pps_thresh[i];
rm_err("SHS_GNL: bootup %i req %llu %llu", i,
rmnet_shs_cpu_rx_min_pps_thresh[i],
rmnet_shs_cpu_rx_max_pps_thresh[i] );
}
rmnet_shs_genl_send_int_to_userspace(info,
RMNET_SHS_BOOT_SET_RESP_INT);
} else {
rm_err("SHS_GNL: nla_memcpy failed %d\n",
RMNET_SHS_GENL_ATTR_BOOTUP);
rmnet_shs_genl_send_int_to_userspace(info,
RMNET_SHS_BOOT_FAIL_RESP_INT);
return 0;
}
} else {
rm_err("SHS_GNL: no info->attrs %d\n",
RMNET_SHS_GENL_ATTR_BOOTUP);
rmnet_shs_genl_send_int_to_userspace(info,
RMNET_SHS_BOOT_FAIL_RESP_INT);
return 0;
}
return 0;
}
int rmnet_shs_genl_set_flow_segmentation(struct sk_buff *skb_2, struct genl_info *info)
{
struct nlattr *na;
struct rmnet_shs_wq_seg_info seg_info;
int rc = 0;
rm_err("%s", "SHS_GNL: rmnet_shs_genl_set_flow_segmentation");
if (info == NULL) {
rm_err("%s", "SHS_GNL: an error occured - info is null");
return -1;
}
na = info->attrs[RMNET_SHS_GENL_ATTR_SEG];
if (na) {
if (nla_memcpy(&seg_info, na, sizeof(seg_info)) > 0) {
rm_err("SHS_GNL: recv segmentation req "
"hash_to_set = 0x%x segs_per_skb = %u",
seg_info.hash_to_set,
seg_info.segs_per_skb);
rc = rmnet_shs_wq_set_flow_segmentation(seg_info.hash_to_set,
seg_info.segs_per_skb);
if (rc == 1) {
rmnet_shs_genl_send_int_to_userspace(info,
RMNET_SHS_SEG_SET_RESP_INT);
trace_rmnet_shs_wq_high(RMNET_SHS_WQ_SHSUSR,
RMNET_SHS_WQ_FLOW_SEG_SET_PASS,
seg_info.hash_to_set, seg_info.segs_per_skb,
0xDEF, 0xDEF, NULL, NULL);
} else {
rmnet_shs_genl_send_int_to_userspace(info,
RMNET_SHS_SEG_FAIL_RESP_INT);
trace_rmnet_shs_wq_high(RMNET_SHS_WQ_SHSUSR,
RMNET_SHS_WQ_FLOW_SEG_SET_FAIL,
seg_info.hash_to_set, seg_info.segs_per_skb,
0xDEF, 0xDEF, NULL, NULL);
return 0;
}
} else {
rm_err("SHS_GNL: nla_memcpy failed %d\n",
RMNET_SHS_GENL_ATTR_SEG);
rmnet_shs_genl_send_int_to_userspace(info,
RMNET_SHS_SEG_FAIL_RESP_INT);
return 0;
}
} else {
rm_err("SHS_GNL: no info->attrs %d\n",
RMNET_SHS_GENL_ATTR_SEG);
rmnet_shs_genl_send_int_to_userspace(info,
RMNET_SHS_SEG_FAIL_RESP_INT);
return 0;
}
return 0;
}
int rmnet_shs_genl_set_quickack_thresh(struct sk_buff *skb_2, struct genl_info *info)
{
struct nlattr *na;
struct rmnet_shs_wq_quickack_info quickack_info;
int rc = 0;
rm_err("%s", "SHS_GNL: rmnet_shs_genl_set_quickack_thresh");
if (info == NULL) {
rm_err("%s", "SHS_GNL: an error occured - info is null");
return -1;
}
na = info->attrs[RMNET_SHS_GENL_ATTR_QUICKACK];
if (na) {
if (nla_memcpy(&quickack_info, na, sizeof(quickack_info)) > 0) {
rm_err("SHS_GNL: recv quickack req "
"hash_to_set = 0x%x thresh = %u",
quickack_info.hash_to_set,
quickack_info.ack_thresh);
rc = rmnet_shs_wq_set_quickack_thresh(quickack_info.hash_to_set,
quickack_info.ack_thresh);
if (rc == 1) {
rmnet_shs_genl_send_int_to_userspace(info,
RMNET_SHS_QUICKACK_SET_RESP_INT);
} else {
rmnet_shs_genl_send_int_to_userspace(info,
RMNET_SHS_QUICKACK_FAIL_RESP_INT);
return 0;
}
} else {
rm_err("SHS_GNL: nla_memcpy failed %d\n",
RMNET_SHS_GENL_ATTR_QUICKACK);
rmnet_shs_genl_send_int_to_userspace(info,
RMNET_SHS_QUICKACK_FAIL_RESP_INT);
return 0;
}
} else {
rm_err("SHS_GNL: no info->attrs %d\n",
RMNET_SHS_GENL_ATTR_QUICKACK);
rmnet_shs_genl_send_int_to_userspace(info,
RMNET_SHS_QUICKACK_FAIL_RESP_INT);
return 0;
}
return 0;
}
int rmnet_shs_genl_try_to_move_flow(struct sk_buff *skb_2, struct genl_info *info)
{
struct nlattr *na;
struct rmnet_shs_wq_sugg_info sugg_info;
int rc = 0;
rm_err("%s", "SHS_GNL: rmnet_shs_genl_try_to_move_flow");
if (info == NULL) {
rm_err("%s", "SHS_GNL: an error occured - info is null");
return -1;
}
na = info->attrs[RMNET_SHS_GENL_ATTR_SUGG];
if (na) {
if (nla_memcpy(&sugg_info, na, sizeof(sugg_info)) > 0) {
rm_err("SHS_GNL: cur_cpu =%u dest_cpu = %u "
"hash_to_move = 0x%x sugg_type = %u",
sugg_info.cur_cpu,
sugg_info.dest_cpu,
sugg_info.hash_to_move,
sugg_info.sugg_type);
if (sugg_info.dest_cpu >= MAX_CPUS || sugg_info.cur_cpu >= MAX_CPUS) {
rmnet_shs_mid_err[RMNET_SHS_MALFORM_MOVE]++;
rmnet_shs_genl_send_int_to_userspace(info, RMNET_SHS_RMNET_MOVE_FAIL_RESP_INT);
return -1;
}
if (sugg_info.sugg_type == RMNET_SHS_WQ_SUGG_LL_FLOW_CORE) {
rmnet_shs_ll_flow_cpu = sugg_info.dest_cpu;
trace_rmnet_shs_wq_high(RMNET_SHS_WQ_SHSUSR, RMNET_SHS_WQ_TRY_PASS,
sugg_info.cur_cpu, sugg_info.dest_cpu,
sugg_info.hash_to_move, sugg_info.sugg_type, NULL, NULL);
rmnet_shs_genl_send_int_to_userspace(info,RMNET_SHS_RMNET_MOVE_DONE_RESP_INT);
return 0;
}
if (sugg_info.sugg_type == RMNET_SHS_WQ_SUGG_LL_PHY_CORE) {
rmnet_shs_ll_phy_cpu = sugg_info.dest_cpu;
trace_rmnet_shs_wq_high(RMNET_SHS_WQ_SHSUSR, RMNET_SHS_WQ_TRY_PASS,
sugg_info.cur_cpu, sugg_info.dest_cpu,
sugg_info.hash_to_move, sugg_info.sugg_type, NULL, NULL);
rmnet_shs_genl_send_int_to_userspace(info,RMNET_SHS_RMNET_MOVE_DONE_RESP_INT);
return 0;
}
if (sugg_info.sugg_type == RMNET_SHS_WQ_SUGG_RMNET_TO_SILVER) {
rmnet_shs_switch_reason[RMNET_SHS_PHY_SWITCH_GOLD_TO_S]++;
/* Only drop to silver if given a gold core and phy core not already dropped*/
if (!((1 << sugg_info.dest_cpu) & NONPERF_MASK) ||
((1 << rmnet_shs_cfg.phy_tcpu) & NONPERF_MASK )) {
rmnet_shs_genl_send_int_to_userspace(info,RMNET_SHS_RMNET_MOVE_DONE_RESP_INT);
return -1;
}
/* Dont move back down to core 1 if core 1 is reserved */
if (((1 << sugg_info.dest_cpu) & rmnet_shs_halt_mask)) {
rmnet_shs_switch_reason[RMNET_SHS_SUGG_R2S_FAIL1]++;
rmnet_shs_genl_send_int_to_userspace(info, RMNET_SHS_RMNET_MOVE_FAIL_RESP_INT);
return -1;
}
rmnet_shs_cfg.phy_tcpu = sugg_info.dest_cpu;
rcu_read_lock();
rmnet_shs_switch_enable();
rcu_read_unlock();
rmnet_shs_genl_send_int_to_userspace(info,RMNET_SHS_RMNET_MOVE_DONE_RESP_INT);
return 0;
}
if (sugg_info.sugg_type == RMNET_SHS_WQ_SUGG_RMNET_TO_GOLD) {
rmnet_shs_switch_reason[RMNET_SHS_PHY_SWITCH_SILVER_TO_G]++;
/* Only ramp to gold if given a gold core and phy cpu is not already ramped*/
if (!((1 << sugg_info.dest_cpu) & PERF_MASK)) {
rmnet_shs_genl_send_int_to_userspace(info, RMNET_SHS_RMNET_MOVE_DONE_RESP_INT);
return -1;
}
if (((1 << sugg_info.dest_cpu) & rmnet_shs_halt_mask)) {
rmnet_shs_switch_reason[RMNET_SHS_SUGG_R2G_FAIL1]++;
rmnet_shs_genl_send_int_to_userspace(info, RMNET_SHS_RMNET_MOVE_FAIL_RESP_INT);
return -1;
}
/* If dest is already current cpu exit */
if ((rmnet_shs_cfg.phy_acpu) == sugg_info.dest_cpu &&
(rmnet_shs_cfg.phy_tcpu) == sugg_info.dest_cpu) {
rmnet_shs_genl_send_int_to_userspace(info, RMNET_SHS_RMNET_MOVE_DONE_RESP_INT);
return 0;
}
if (((1 << sugg_info.dest_cpu) & PERF_MASK) && ((1 << rmnet_shs_cfg.phy_acpu) & PERF_MASK)) {
rmnet_shs_switch_reason[RMNET_SHS_RM2G_G2G_SWITCH]++;
}
rmnet_shs_cfg.phy_tcpu = sugg_info.dest_cpu;
rcu_read_lock();
rmnet_shs_switch_enable();
rcu_read_unlock();
rmnet_shs_genl_send_int_to_userspace(info,RMNET_SHS_RMNET_MOVE_DONE_RESP_INT);
return 0;
}
rc = rmnet_shs_wq_try_to_move_flow(sugg_info.cur_cpu,
sugg_info.dest_cpu,
sugg_info.hash_to_move,
sugg_info.sugg_type);
if (rc == 1) {
rmnet_shs_genl_send_int_to_userspace(info,
RMNET_SHS_MOVE_PASS_RESP_INT);
trace_rmnet_shs_wq_high(RMNET_SHS_WQ_SHSUSR, RMNET_SHS_WQ_TRY_PASS,
sugg_info.cur_cpu, sugg_info.dest_cpu,
sugg_info.hash_to_move, sugg_info.sugg_type, NULL, NULL);
} else {
rmnet_shs_genl_send_int_to_userspace(info,
RMNET_SHS_MOVE_FAIL_RESP_INT);
trace_rmnet_shs_wq_high(RMNET_SHS_WQ_SHSUSR, RMNET_SHS_WQ_TRY_FAIL,
sugg_info.cur_cpu, sugg_info.dest_cpu,
sugg_info.hash_to_move, sugg_info.sugg_type, NULL, NULL);
return 0;
}
} else {
rm_err("SHS_GNL: nla_memcpy failed %d\n",
RMNET_SHS_GENL_ATTR_SUGG);
rmnet_shs_genl_send_int_to_userspace(info,
RMNET_SHS_MOVE_FAIL_RESP_INT);
return 0;
}
} else {
rm_err("SHS_GNL: no info->attrs %d\n",
RMNET_SHS_GENL_ATTR_SUGG);
rmnet_shs_genl_send_int_to_userspace(info,
RMNET_SHS_MOVE_FAIL_RESP_INT);
return 0;
}
return 0;
}
int rmnet_shs_genl_mem_sync(struct sk_buff *skb_2, struct genl_info *info)
{
rm_err("%s", "SHS_GNL: rmnet_shs_genl_mem_sync");
if (!rmnet_shs_userspace_connected)
rmnet_shs_userspace_connected = 1;
/* Todo: detect when userspace is disconnected. If we dont get
* a sync message in the next 2 wq ticks, we got disconnected
*/
trace_rmnet_shs_wq_high(RMNET_SHS_WQ_SHSUSR, RMNET_SHS_WQ_SHSUSR_SYNC_START,
0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL);
if (info == NULL) {
rm_err("%s", "SHS_GNL: an error occured - info is null");
return -1;
}
last_net = genl_info_net(info);
last_snd_portid = info->snd_portid;
rm_err("port_id = %u", last_snd_portid);
return 0;
}
void rmnet_shs_create_ping_boost_msg_resp(uint32_t perf_duration,
struct rmnet_shs_msg_resp *msg_resp)
{
struct rmnet_shs_ping_boost_payload ping_boost_msg;
struct timespec64 time;
if (msg_resp == NULL) {
rm_err("%s", "SHS_MSG_GNL - invalid input");
return;
}
memset(msg_resp, 0x0, sizeof(struct rmnet_shs_msg_resp));
memset(&ping_boost_msg, 0x0, sizeof(ping_boost_msg));
ktime_get_real_ts64(&time);
msg_resp->timestamp = (RMNET_SHS_GENL_SEC_TO_NSEC(time.tv_sec) + time.tv_nsec);
ping_boost_msg.perf_duration = perf_duration;
ping_boost_msg.perf_acq = 1;
/* Copy to boost info into to the payload of first msg */
memcpy(&(msg_resp->list[0].payload),
&ping_boost_msg, sizeof(ping_boost_msg));
msg_resp->list[0].msg_type = RMNET_SHS_GENL_PING_BOOST_MSG;
msg_resp->valid = 1;
msg_resp->list_len = 1;
}
void rmnet_shs_create_pause_msg_resp(uint8_t seq,
struct rmnet_shs_msg_resp *msg_resp)
{
struct rmnet_shs_pause_payload pause_msg;
if (msg_resp == NULL) {
rm_err("%s", "SHS_MSG_GNL - invalid input");
return;
}
memset(msg_resp, 0x0, sizeof(struct rmnet_shs_msg_resp));
memset(&pause_msg, 0x0, sizeof(pause_msg));
/* Copy to boost info into to the payload of first msg */
memcpy(&(msg_resp->list[0].payload),
&pause_msg, sizeof(pause_msg));
msg_resp->list[0].msg_type = RMNET_SHS_GENL_TRAFFIC_PAUSE_MSG;
msg_resp->valid = 1;
msg_resp->list_len = 1;
}
void rmnet_shs_create_phy_msg_resp(struct rmnet_shs_msg_resp *msg_resp,
uint8_t ocpu, uint8_t ncpu)
{
struct rmnet_shs_phy_change_payload phy_change_msg;
struct timespec64 time;
if (msg_resp == NULL) {
rm_err("%s", "SHS_MSG_GNL - invalid input");
return;
}
memset(msg_resp, 0x0, sizeof(struct rmnet_shs_msg_resp));
memset(&phy_change_msg, 0x0, sizeof(phy_change_msg));
ktime_get_real_ts64(&time);
msg_resp->timestamp = (RMNET_SHS_GENL_SEC_TO_NSEC(time.tv_sec) + time.tv_nsec);
phy_change_msg.old_cpu = ocpu;
phy_change_msg.new_cpu = ncpu;
/* Copy to boost info into to the payload of first msg */
memcpy(&(msg_resp->list[0].payload),
&phy_change_msg, sizeof(phy_change_msg));
msg_resp->list[0].msg_type = RMNET_SHS_GENL_PHY_CHANGE_MSG;
msg_resp->valid = 1;
msg_resp->list_len = 1;
}
int rmnet_shs_genl_msg_direct_send_to_userspace(struct rmnet_shs_msg_resp *msg_ptr)
{
struct sk_buff *skb;
void *msg_head;
int rc;
if (msg_last_net == NULL) {
rm_err("%s", "SHS_GNL: FAILED to send msg_last_net is NULL\n");
return -1;
}
skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
if (skb == NULL)
goto out;
msg_head = genlmsg_put(skb, 0, rmnet_shs_genl_msg_seqnum++,
&rmnet_shs_genl_msg_family,
0, RMNET_SHS_GENL_MSG_WAIT_CMD);
if (msg_head == NULL) {
rc = -ENOMEM;
rm_err("SHS_GNL: FAILED to msg_head %d\n", rc);
kfree(skb);
goto out;
}
rc = nla_put(skb, RMNET_SHS_GENL_MSG_ATTR_RESP,
sizeof(struct rmnet_shs_msg_resp),
msg_ptr);
if (rc != 0) {
rm_err("SHS_GNL: FAILED nla_put %d\n", rc);
kfree(skb);
goto out;
}
genlmsg_end(skb, msg_head);
rc = genlmsg_unicast(msg_last_net, skb, msg_last_snd_portid);
if (rc != 0)
goto out;
rm_err("SHS_MSG_GNL: Successfully sent msg %d\n",
rmnet_shs_genl_msg_seqnum);
return 0;
out:
rm_err("%s", "SHS_GNL: FAILED to send to msg channel\n");
return -1;
}
/* Handler for message channel to shsusrd */
int rmnet_shs_genl_msg_req_hdlr(struct sk_buff *skb_2,
struct genl_info *info)
{
rm_err("%s", "SHS_GNL_MSG: rmnet_shs_genl_msg_req");
if (!rmnet_shs_userspace_connected) {
rm_err("%s", "SHS_GNL_MSG: error: userspace not connected");
return -1;
}
if (info == NULL) {
rm_err("%s", "SHS_GNL_MSG: error: info is null");
return -1;
}
msg_last_net = genl_info_net(info);
msg_last_snd_portid = info->snd_portid;
rm_err("msg_port_id = %u", msg_last_snd_portid);
return 0;
}
/* register new generic netlink family */
int rmnet_shs_wq_genl_init(void)
{
int ret;
rmnet_shs_userspace_connected = 0;
ret = genl_register_family(&rmnet_shs_genl_family);
if (ret != 0) {
rm_err("SHS_GNL: register family failed: %i", ret);
genl_unregister_family(&rmnet_shs_genl_family);
return -1;
}
rm_err("SHS_GNL: successfully registered generic netlink family: %s",
RMNET_SHS_GENL_FAMILY_NAME);
ret = genl_register_family(&rmnet_shs_genl_msg_family);
if (ret != 0) {
rm_err("SHS_MSG_GNL: register family failed: %i", ret);
genl_unregister_family(&rmnet_shs_genl_msg_family);
} else {
rm_err("SHS_MSG_GNL: successfully registered generic netlink family: %s",
RMNET_SHS_GENL_MSG_FAMILY_NAME);
}
return 0;
}
/* Unregister the generic netlink family */
int rmnet_shs_wq_genl_deinit(void)
{
int ret;
rmnet_shs_genl_send_int_to_userspace_no_info(RMNET_SHS_SYNC_WQ_EXIT);
ret = genl_unregister_family(&rmnet_shs_genl_family);
if(ret != 0){
rm_err("SHS_GNL: unregister family failed: %i\n",ret);
}
rmnet_shs_userspace_connected = 0;
ret = genl_unregister_family(&rmnet_shs_genl_msg_family);
if(ret != 0){
rm_err("SHS_GNL: unregister family failed: %i\n", ret);
}
return 0;
}

View File

@@ -1,220 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "rmnet_shs.h"
#ifndef _RMNET_SHS_WQ_GENL_H_
#define _RMNET_SHS_WQ_GENL_H_
#include <net/genetlink.h>
/* Generic Netlink Definitions */
#define RMNET_SHS_GENL_VERSION 1
#define RMNET_SHS_GENL_FAMILY_NAME "RMNET_SHS"
#define RMNET_SHS_MOVE_FAIL_RESP_INT 720
#define RMNET_SHS_MOVE_PASS_RESP_INT 727
#define RMNET_SHS_SYNC_RESP_INT 828
#define RMNET_SHS_SYNC_RESP_INT_LPM_DISABLE 829
#define RMNET_SHS_RMNET_MOVE_DONE_RESP_INT 300
#define RMNET_SHS_RMNET_MOVE_FAIL_RESP_INT 301
#define RMNET_SHS_RMNET_DMA_RESP_INT 400
#define RMNET_SHS_SEG_FAIL_RESP_INT 920
#define RMNET_SHS_SEG_SET_RESP_INT 929
#define RMNET_SHS_BOOT_FAIL_RESP_INT 530
#define RMNET_SHS_BOOT_SET_RESP_INT 539
#define RMNET_SHS_QUICKACK_FAIL_RESP_INT 930
#define RMNET_SHS_QUICKACK_SET_RESP_INT 931
#define RMNET_SHS_SYNC_WQ_EXIT 42
#define MAXCPU 8
extern int rmnet_shs_userspace_connected;
enum {
RMNET_SHS_GENL_CMD_UNSPEC,
RMNET_SHS_GENL_CMD_INIT_SHSUSRD,
RMNET_SHS_GENL_CMD_TRY_TO_MOVE_FLOW,
RMNET_SHS_GENL_CMD_SET_FLOW_SEGMENTATION,
RMNET_SHS_GENL_CMD_MEM_SYNC,
RMNET_SHS_GENL_CMD_LL_FLOW,
RMNET_SHS_GENL_CMD_QUICKACK,
RMNET_SHS_GENL_CMD_BOOTUP,
__RMNET_SHS_GENL_CMD_MAX,
};
enum {
RMNET_SHS_GENL_ATTR_UNSPEC,
RMNET_SHS_GENL_ATTR_STR,
RMNET_SHS_GENL_ATTR_INT,
RMNET_SHS_GENL_ATTR_SUGG,
RMNET_SHS_GENL_ATTR_SEG,
RMNET_SHS_GENL_ATTR_FLOW,
RMNET_SHS_GENL_ATTR_QUICKACK,
RMNET_SHS_GENL_ATTR_BOOTUP,
__RMNET_SHS_GENL_ATTR_MAX,
};
#define RMNET_SHS_GENL_ATTR_MAX (__RMNET_SHS_GENL_ATTR_MAX - 1)
struct rmnet_shs_bootup_info {
uint32_t feature_mask;
uint8_t non_perf_mask;
/*Scaled by 1k on recv */
uint32_t rx_min_pps_thresh[MAXCPU];
uint32_t rx_max_pps_thresh[MAXCPU];
uint32_t cpu_freq_boost_val;
};
struct rmnet_shs_wq_sugg_info {
uint32_t hash_to_move;
uint32_t sugg_type;
uint16_t cur_cpu;
uint16_t dest_cpu;
};
struct rmnet_shs_wq_seg_info {
uint32_t hash_to_set;
uint32_t segs_per_skb;
};
struct rmnet_shs_wq_quickack_info {
uint32_t hash_to_set;
uint32_t ack_thresh;
};
struct rmnet_shs_phy_change_payload {
uint8_t old_cpu;
uint8_t new_cpu;
};
/* rmnet_shs to shsusrd message channel */
#define RMNET_SHS_GENL_MSG_FAMILY_NAME "RMNET_SHS_MSG"
/* Command Types */
enum {
RMNET_SHS_GENL_MSG_CMD_UNSPEC,
RMNET_SHS_GENL_MSG_WAIT_CMD,
__RMNET_SHS_GENL_MSG_CMD_MAX,
};
/* Attribute Types */
enum {
RMNET_SHS_GENL_MSG_ATTR_UNSPEC,
RMNET_SHS_GENL_MSG_ATTR_REQ,
RMNET_SHS_GENL_MSG_ATTR_RESP,
__RMNET_SHS_GENL_MSG_ATTR_MAX,
};
#define RMNET_SHS_MSG_PAYLOAD_SIZE (98)
#define RMNET_SHS_GENL_MSG_MAX (1)
struct rmnet_shs_ping_boost_payload {
uint32_t perf_duration; /* Duration to acquire perf lock */
uint8_t perf_acq; /* Set to 1 to aquire */
};
struct rmnet_shs_pause_payload {
uint8_t seq; /* Duration to acquire perf lock */
};
enum {
RMNET_SHS_GENL_MSG_NOP = 0, /* 0 = No-operation */
RMNET_SHS_GENL_PING_BOOST_MSG = 1, /* 1 = Ping boost request */
RMNET_SHS_GENL_PHY_CHANGE_MSG = 2, /* 2 = Phy change request */
RMNET_SHS_GENL_TRAFFIC_PAUSE_MSG = 3, /* 3 = Phy change request */
};
struct rmnet_shs_msg_info {
char payload[RMNET_SHS_MSG_PAYLOAD_SIZE];
uint16_t msg_type;
};
struct rmnet_shs_msg_req {
int valid;
};
struct rmnet_shs_msg_resp {
struct rmnet_shs_msg_info list[RMNET_SHS_GENL_MSG_MAX];
uint64_t timestamp;
uint16_t list_len;
uint8_t valid;
};
struct rmnet_shs_wq_flow_info {
union {
__be32 daddr;
struct in6_addr v6_daddr;
} dest_ip_addr;
union {
__be32 saddr;
struct in6_addr v6_saddr;
} src_ip_addr;
u16 src_port;
u16 src_port_valid;
u16 dest_port;
u16 dest_port_valid;
u8 src_addr_valid;
u8 dest_addr_valid;
u8 proto;
u8 proto_valid;
u8 ip_version;
u8 timeout;
u8 seq;
u8 opcode;
};
/* Types of suggestions made by shs wq */
enum rmnet_shs_ll_flow_opcode {
RMNET_SHS_LL_OPCODE_DEL,
RMNET_SHS_LL_OPCODE_ADD,
RMNET_SHS_LL_OPCODE_MAX,
};
struct rmnet_shs_wq_flow_node {
struct list_head filter_head;
struct hlist_node list;
struct rmnet_shs_wq_flow_info info;
};
/* Function Prototypes */
int rmnet_shs_genl_dma_init(struct sk_buff *skb_2, struct genl_info *info);
int rmnet_shs_genl_try_to_move_flow(struct sk_buff *skb_2, struct genl_info *info);
int rmnet_shs_genl_set_flow_segmentation(struct sk_buff *skb_2, struct genl_info *info);
int rmnet_shs_genl_mem_sync(struct sk_buff *skb_2, struct genl_info *info);
int rmnet_shs_genl_set_flow_ll(struct sk_buff *skb_2, struct genl_info *info);
int rmnet_shs_genl_set_quickack_thresh(struct sk_buff *skb_2, struct genl_info *info);
int rmnet_shs_genl_set_bootup_config(struct sk_buff *skb_2, struct genl_info *info);
int rmnet_shs_genl_send_int_to_userspace(struct genl_info *info, int val);
int rmnet_shs_genl_send_int_to_userspace_no_info(int val);
int rmnet_shs_genl_send_msg_to_userspace(void);
/* rmnet_shs to shsusrd messaging functionality */
void rmnet_shs_create_ping_boost_msg_resp(uint32_t perf_duration,
struct rmnet_shs_msg_resp *msg_resp);
void rmnet_shs_create_pause_msg_resp(uint8_t seq,
struct rmnet_shs_msg_resp *msg_resp);
int rmnet_shs_genl_msg_direct_send_to_userspace(struct rmnet_shs_msg_resp *msg_ptr);
/* Handler for message channel to shsusrd */
int rmnet_shs_genl_msg_req_hdlr(struct sk_buff *skb_2,
struct genl_info *info);
void rmnet_shs_create_phy_msg_resp(struct rmnet_shs_msg_resp *msg_resp,
uint8_t ocpu, uint8_t ncpu);
int rmnet_shs_wq_genl_init(void);
int rmnet_shs_wq_genl_deinit(void);
#endif /*_RMNET_SHS_WQ_GENL_H_*/

File diff suppressed because it is too large Load Diff

View File

@@ -1,180 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _RMNET_SHS_WQ_MEM_H_
#define _RMNET_SHS_WQ_MEM_H_
#include "rmnet_shs.h"
#include "rmnet_shs_config.h"
/* Shared memory files */
#define RMNET_SHS_PROC_DIR "shs"
#define RMNET_SHS_PROC_CAPS "rmnet_shs_caps"
#define RMNET_SHS_PROC_G_FLOWS "rmnet_shs_flows"
#define RMNET_SHS_PROC_SS_FLOWS "rmnet_shs_ss_flows"
#define RMNET_SHS_PROC_FFLOWS "rmnet_shs_fflows"
#define RMNET_SHS_PROC_LL_FLOWS "rmnet_shs_ll_flows"
#define RMNET_SHS_PROC_NETDEV "rmnet_shs_netdev"
#define RMNET_SHS_NUM_TOP_FFLOWS (30)
#define RMNET_SHS_NUM_TOP_LL_FLOWS (RMNET_SHS_NUM_TOP_FFLOWS)
#define RMNET_SHS_MAX_USRFLOWS (100)
#define RMNET_SHS_MAX_NETDEVS (40)
#define RMNET_SHS_IFNAMSIZ (16)
#define RMNET_SHS_READ_VAL (0)
/* NOTE: Make sure these structs fit in one page */
/* 26 bytes * 8 max cpus = 208 bytes < 4096 */
struct __attribute__((__packed__)) rmnet_shs_wq_cpu_cap_usr_s {
u64 pps_capacity;
u64 avg_pps_capacity;
u64 bps;
u16 cpu_num;
u8 perf_mask;
};
struct __attribute__((__packed__)) rmnet_shs_wq_additional_stats_s {
/* Stats from include/net/netns/ipv4.h => struct netns_ipv4 */
int ipv4_tcp_rmem[3]; /* init_net.ipv4.sysctl_tcp_rmem[] */
int ipv4_tcp_wmem[3]; /* init_net.ipv4.sysctl_tcp_wmem[] */
};
/* 30 bytes * 128 max = 3840 bytes < 4096 */
struct __attribute__((__packed__)) rmnet_shs_wq_gflows_usr_s {
u64 rx_pps;
u64 avg_pps;
u64 rx_bps;
u32 hash;
u16 cpu_num;
};
/* 38 bytes * 100 max = 3800 bytes < 4096 */
struct __attribute__((__packed__)) rmnet_shs_wq_ssflows_usr_s {
u64 rx_pps;
u64 avg_pps;
u64 rx_bps;
u32 hash;
u32 bif; /* Bytes in flight */
u32 ack_thresh;
u16 cpu_num;
};
/* 30 max < 4096 */
struct __attribute__((__packed__)) rmnet_shs_wq_ll_flows_usr_s {
union {
struct iphdr v4hdr;
struct ipv6hdr v6hdr;
} ip_hdr;
union {
struct tcphdr tp;
struct udphdr up;
} trans_hdr;
u64 rx_pps;
u64 avg_pps;
u64 rx_bps;
u64 avg_segs;
u64 hw_coal_bytes_diff;
u64 hw_coal_bufsize_diff;
u32 hash;
u16 cpu_num;
u16 trans_proto;
u8 mux_id;
u8 ll_pipe;
};
/* 30 max < 4096 */
struct __attribute__((__packed__)) rmnet_shs_wq_fflows_usr_s {
union {
struct iphdr v4hdr;
struct ipv6hdr v6hdr;
} ip_hdr;
union {
struct tcphdr tp;
struct udphdr up;
} trans_hdr;
u64 rx_pps;
u64 avg_pps;
u64 rx_bps;
u64 avg_segs;
u64 hw_coal_bytes_diff;
u64 hw_coal_bufsize_diff;
u32 hash;
u16 cpu_num;
u16 trans_proto;
u8 mux_id;
};
/* 16 + 8*10 + 1 = 97 bytes, 97*40 netdev = 3880 bytes < 4096 */
struct __attribute__((__packed__)) rmnet_shs_wq_netdev_usr_s {
char name[RMNET_SHS_IFNAMSIZ];
u64 coal_ip_miss;
u64 hw_evict;
u64 coal_rx_pkts;
u64 coal_tcp;
u64 coal_tcp_bytes;
u64 coal_udp;
u64 coal_udp_bytes;
u64 udp_rx_bps;
u64 tcp_rx_bps;
u64 pb_marker_seq;
u8 mux_id;
};
extern struct list_head gflows;
extern struct list_head ssflows;
extern struct list_head fflows;
extern struct list_head ll_flows;
extern struct list_head cpu_caps;
/* Buffer size for read and write syscalls */
enum {RMNET_SHS_BUFFER_SIZE = 4096};
struct rmnet_shs_mmap_info {
char *data;
refcount_t refcnt;
};
/* Function Definitions */
void rmnet_shs_wq_ssflow_list_add(struct rmnet_shs_wq_hstat_s *hnode,
struct list_head *ss_flows);
void rmnet_shs_wq_gflow_list_add(struct rmnet_shs_wq_hstat_s *hnode,
struct list_head *gold_flows);
void rmnet_shs_wq_fflow_list_add(struct rmnet_shs_wq_hstat_s *hnode,
struct list_head *fflows);
void rmnet_shs_wq_ll_flow_list_add(struct rmnet_shs_wq_hstat_s *hnode,
struct list_head *ll_flows);
void rmnet_shs_wq_cleanup_gold_flow_list(struct list_head *gold_flows);
void rmnet_shs_wq_cleanup_ss_flow_list(struct list_head *ss_flows);
void rmnet_shs_wq_cleanup_fflow_list(struct list_head *fflows);
void rmnet_shs_wq_cleanup_ll_flow_list(struct list_head *ll_flows);
void rmnet_shs_wq_cpu_caps_list_add(
struct rmnet_shs_wq_rx_flow_s *rx_flow_tbl_p,
struct rmnet_shs_wq_cpu_rx_pkt_q_s *cpu_node,
struct list_head *cpu_caps);
void rmnet_shs_wq_cleanup_cpu_caps_list(struct list_head *cpu_caps);
void rmnet_shs_wq_mem_update_cached_cpu_caps(struct list_head *cpu_caps);
void rmnet_shs_wq_mem_update_cached_sorted_gold_flows(struct list_head *gold_flows);
void rmnet_shs_wq_mem_update_cached_sorted_ss_flows(struct list_head *ss_flows);
void rmnet_shs_wq_mem_update_cached_sorted_fflows(struct list_head *fflows);
void rmnet_shs_wq_mem_update_cached_sorted_ll_flows(struct list_head *ll_flows);
void rmnet_shs_wq_mem_update_cached_netdevs(void);
void rmnet_shs_wq_mem_init(void);
void rmnet_shs_wq_mem_deinit(void);
#endif /*_RMNET_SHS_WQ_GENL_H_*/

View File

@@ -1,129 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
/* RMNET WLAN handler framework */
#ifndef __RMNET_WLAN_H__
#define __RMNET_WLAN_H__
#include <linux/types.h>
#include <net/genetlink.h>
#include <linux/netdevice.h>
#include <linux/in6.h>
struct rmnet_wlan_tuple {
union {
__be16 port;
__be32 spi_val;
};
u8 ip_proto;
u8 trans_proto;
};
struct rmnet_wlan_fwd_info {
struct net_device *fwd_dev;
union {
__be32 v4_addr;
struct in6_addr v6_addr;
};
u8 ip_proto;
u8 net_type;
};
struct rmnet_wlan_fwd_info_node {
struct hlist_node hash;
struct rcu_head rcu;
struct notifier_block nb;
char dev_name[IFNAMSIZ];
struct rmnet_wlan_fwd_info fwd;
};
/* Low Latency Address Info Structure */
struct rmnet_wlan_ll_tuple {
union {
__be32 v4_saddr;
struct in6_addr v6_saddr;
};
union {
__be32 v4_daddr;
struct in6_addr v6_daddr;
};
__be16 sport;
__be16 dport;
u8 ip_proto;
};
enum {
DATA_PATH_PROXY_NET_WLAN,
DATA_PATH_PROXY_NET_WWAN,
DATA_PATH_PROXY_NET_LBO,
__DATA_PATH_PROXY_NET_MAX,
};
rx_handler_result_t rmnet_wlan_rx_handler(struct sk_buff **pskb);
/* TCP clamping api. Clamps mss to 1140 for packets matching the tcp flags */
void rmnet_wlan_tcp_mss_clamp(struct sk_buff *skb, u32 tcp_flags);
/* Pass on an SKB to a FWD device */
int rmnet_wlan_deliver_skb(struct sk_buff *skb,
struct rmnet_wlan_fwd_info *fwd_info);
/* Is this tuple present in our list? */
bool rmnet_wlan_tuple_present(struct rmnet_wlan_tuple *tuple);
/* Tuple add/delete interface */
int rmnet_wlan_add_tuples(struct rmnet_wlan_tuple *tuples, u32 tuple_count,
struct genl_info *info);
int rmnet_wlan_del_tuples(struct rmnet_wlan_tuple *tuples, u32 tuple_count,
struct genl_info *info);
int rmnet_wlan_get_tuples(struct sk_buff **pskb, struct genl_family *fam,
struct genl_info *info);
/* Device interface */
int rmnet_wlan_set_device(char *dev_name, struct genl_info *info);
int rmnet_wlan_unset_device(char *dev_name, struct genl_info *info);
int rmnet_wwan_set_device(char *dev_name, struct genl_info *info);
int rmnet_wwan_unset_device(char *dev_name, struct genl_info *info);
/* Forwarding information interface */
int rmnet_wlan_add_fwd_info(struct rmnet_wlan_fwd_info *fwd_info,
struct genl_info *info);
int rmnet_wlan_del_fwd_info(struct rmnet_wlan_fwd_info *fwd_info,
struct genl_info *info);
/* UDP Encap interface */
int rmnet_wlan_set_encap_port(__be16 port, struct genl_info *info);
int rmnet_wlan_unset_encap_port(__be16 port, struct genl_info *info);
bool rmnet_wlan_udp_encap_check(struct sk_buff *skb,
struct rmnet_wlan_tuple *tuple,
int ip_len);
int rmnet_wlan_act_encap_port_pass_through(__be16 port, struct genl_info *info);
int rmnet_wlan_act_encap_port_drop(__be16 port, struct genl_info *info);
bool rmnet_wlan_udp_encap_drop_check(struct rmnet_wlan_tuple *tuple);
/* Pull the plug */
int rmnet_wlan_reset(void);
/* Module teardown */
void rmnet_wlan_deinit(void);
char *rmnet_wlan_get_dev(void);
char *rmnet_wwan_get_dev(void);
struct rmnet_wlan_fwd_info_node * rmnet_wlan_fwd_info_find
(struct rmnet_wlan_fwd_info *info);
/* Low Latency Tuple Management */
int rmnet_wlan_add_ll_tuple(struct rmnet_wlan_ll_tuple *tuple);
int rmnet_wlan_del_ll_tuple(void);
extern struct rmnet_wlan_ll_tuple * rmnet_wlan_ll_tuple_cache;
int rmnet_wlan_strlcmp(const char *string1, const char *string2,
size_t limit_bytes);
#endif

View File

@@ -1,793 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
/* RMNET WLAN connection management framework */
#include <linux/types.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/spinlock.h>
#include <linux/rcupdate.h>
#include <linux/hashtable.h>
#include <linux/if_ether.h>
#include <linux/workqueue.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/dst.h>
#include <net/netfilter/nf_conntrack.h>
#include "rmnet_module.h"
#include "rmnet_wlan.h"
#include "rmnet_wlan_connection.h"
#include "rmnet_wlan_stats.h"
/* Time to hold connection entries, in ms. 2 seconds currently */
#define RMNET_WLAN_CONNECTION_TIMEOUT (2000)
/* How often to run the cleaning workqueue while connection info is present,
* in ms.
*/
#define RMNET_WLAN_CONNECTION_WQ_INTERVAL (500)
#define RMNET_WLAN_CONNECTION_BKTS (16)
#define RMNET_WLAN_CONNECTION_HASH_BITS \
(const_ilog2(RMNET_WLAN_CONNECTION_BKTS))
struct rmnet_wlan_connection_node {
struct hlist_node hash;
struct rcu_head rcu;
struct rmnet_wlan_connection_info info;
struct rmnet_wlan_fwd_info *fwd;
unsigned long ts;
bool dead;
};
struct rmnet_wlan_connection_work_struct {
struct delayed_work ws;
bool force_clean;
};
/* spinlock for connection hashtable protection */
static DEFINE_SPINLOCK(rmnet_wlan_connection_lock);
static DEFINE_HASHTABLE(rmnet_wlan_connection_hash,
RMNET_WLAN_CONNECTION_HASH_BITS);
/* Thus number of connection objects present in the hash table */
static u32 rmnet_wlan_connection_hash_size;
/* Periodic cleaning work struct for the hashtable */
static struct rmnet_wlan_connection_work_struct rmnet_wlan_connection_work;
static bool
rmnet_wlan_connection_info_match(struct rmnet_wlan_connection_info *i1,
struct rmnet_wlan_connection_info *i2)
{
if (i1->ip_proto != i2->ip_proto)
return false;
if (i1->ip_proto == 4)
return i1->v4_saddr == i2->v4_saddr &&
i1->v4_daddr == i2->v4_daddr;
return !ipv6_addr_cmp(&i1->v6_saddr, &i2->v6_saddr) &&
!ipv6_addr_cmp(&i1->v6_daddr, &i2->v6_daddr);
}
static bool
rmnet_wlan_connection_node_expired(struct rmnet_wlan_connection_node *node,
unsigned long ts)
{
unsigned long timeout;
timeout = msecs_to_jiffies(RMNET_WLAN_CONNECTION_TIMEOUT);
if (ts - node->ts > timeout)
return true;
return false;
}
static bool rmnet_wlan_connection_hash_clean(bool force)
{
struct rmnet_wlan_connection_node *node;
struct hlist_node *tmp;
unsigned long ts;
int bkt;
ts = jiffies;
hash_for_each_safe(rmnet_wlan_connection_hash, bkt, tmp, node, hash) {
if (node->dead)
/* Node is already removed, but RCU grace period has
* not yet expired.
*/
continue;
if (force || rmnet_wlan_connection_node_expired(node, ts)) {
node->dead = true;
hash_del_rcu(&node->hash);
kfree_rcu(node, rcu);
rmnet_wlan_connection_hash_size--;
}
}
return !!rmnet_wlan_connection_hash_size;
}
static void rmnet_wlan_connection_work_process(struct work_struct *ws)
{
struct rmnet_wlan_connection_work_struct *conn_work;
unsigned long flags;
bool should_resched;
conn_work = container_of(to_delayed_work(ws),
struct rmnet_wlan_connection_work_struct,
ws);
spin_lock_irqsave(&rmnet_wlan_connection_lock, flags);
should_resched =
rmnet_wlan_connection_hash_clean(conn_work->force_clean);
if (should_resched) {
unsigned long delay;
delay = msecs_to_jiffies(RMNET_WLAN_CONNECTION_WQ_INTERVAL);
schedule_delayed_work(&conn_work->ws, delay);
}
spin_unlock_irqrestore(&rmnet_wlan_connection_lock, flags);
}
static rx_handler_result_t rmnet_wlan_receive_skb(struct sk_buff *skb, uint8_t network_type)
{
/* Only reverse rmnet packets should arrive in this function and match the check */
if (skb_is_nonlinear(skb) && !skb_headlen(skb)) {
int header_size = 0;
if (skb->protocol == htons(ETH_P_IP)) {
header_size = sizeof(struct iphdr);
} else if (skb->protocol == htons(ETH_P_IPV6)) {
header_size = sizeof(struct ipv6hdr);
} else {
rmnet_wlan_forward_stats_update(RMNET_F_S_PULL_PROTO_MISMATCH);
goto drop;
}
/* Headroom is already reserved in rmnet core */
if (!__pskb_pull_tail(skb, header_size)) {
rmnet_wlan_forward_stats_update(RMNET_F_S_PULL_FAILURE);
goto drop;
} else {
skb_reset_network_header(skb);
rmnet_wlan_forward_stats_update(RMNET_F_S_PULL_SUCCESS);
}
}
if (skb->dev && (skb->protocol == htons(ETH_P_IP)) &&
network_type == DATA_PATH_PROXY_NET_LBO) {
struct iphdr *iph, __iph;
struct net_device *wdev = NULL;
struct flowi4 fl4 = {};
struct rtable *rt;
struct neighbour *n;
int err = 0;
iph = skb_header_pointer(skb, 0, sizeof(*iph), &__iph);
if (!iph) {
rmnet_wlan_forward_stats_update(RMNET_WLAN_F_S_NON_R0_IP_HDRP_FAIL);
goto drop;
}
wdev = dev_get_by_name_rcu(&init_net, rmnet_wlan_get_dev());
if (!wdev) {
rmnet_wlan_forward_stats_update(RMNET_WLAN_F_S_R0_IP_DEV_GET_FAIL);
goto drop;
}
skb->dev = wdev;
memcpy(&fl4.saddr, &iph->saddr, sizeof(__be32));
memcpy(&fl4.daddr, &iph->daddr, sizeof(__be32));
fl4.flowi4_oif = wdev->ifindex;
fl4.flowi4_flags = FLOWI_FLAG_KNOWN_NH;
rt = ip_route_output_key(&init_net, &fl4);
if (IS_ERR(rt)) {
rmnet_wlan_forward_stats_update(RMNET_WLAN_F_S_R0_IP_ROUTE_FAIL);
goto drop;
}
n = dst_neigh_lookup(&rt->dst, &fl4.daddr);
ip_rt_put(rt);
if (!n) {
rmnet_wlan_forward_stats_update(RMNET_WLAN_F_S_R0_IP_NEIGH_LOOKUP_FAIL);
goto drop;
}
if (n->dev != skb->dev || !n->dev->header_ops) {
rmnet_wlan_forward_stats_update(RMNET_WLAN_F_S_R0_IP_HARD_HEADER_FAIL);
neigh_release(n);
goto drop;
}
err = neigh_resolve_output(n, skb);
neigh_release(n);
if (likely(err == NET_XMIT_SUCCESS || err == NET_XMIT_CN)) {
rmnet_wlan_forward_stats_update(RMNET_WLAN_F_S_R0_IP_XMIT_SUCCESS);
} else {
rmnet_wlan_forward_stats_update(RMNET_WLAN_F_S_R0_IP_XMIT_DROP);
}
return RX_HANDLER_CONSUMED;
} else if (skb->dev && (skb->protocol == htons(ETH_P_IPV6)) &&
network_type == DATA_PATH_PROXY_NET_LBO) {
struct ipv6hdr *ip6h, __ip6h;
struct net_device *wdev = NULL;
struct flowi6 fl6 = {};
struct neighbour *n;
struct dst_entry *dst;
int err = 0;
ip6h = skb_header_pointer(skb, 0, sizeof(*ip6h), &__ip6h);
if (!ip6h) {
rmnet_wlan_forward_stats_update(RMNET_WLAN_F_S_R0_IPV6_HDRP_FAIL);
goto drop;
}
wdev = dev_get_by_name_rcu(&init_net, rmnet_wlan_get_dev());
if (!wdev) {
rmnet_wlan_forward_stats_update(RMNET_WLAN_F_S_R0_IPV6_DEV_GET_FAIL);
goto drop;
}
skb->dev = wdev;
memcpy(&fl6.saddr, &ip6h->saddr, sizeof(struct in6_addr));
memcpy(&fl6.daddr, &ip6h->daddr, sizeof(struct in6_addr));
fl6.flowi6_oif = wdev->ifindex;
fl6.flowi6_flags = FLOWI_FLAG_KNOWN_NH;
dst = ipv6_stub->ipv6_dst_lookup_flow(&init_net, NULL, &fl6, NULL);
if (IS_ERR(dst)) {
rmnet_wlan_forward_stats_update(RMNET_WLAN_F_S_R0_IPV6_ROUTE_FAIL);
goto drop;
}
n = dst_neigh_lookup(dst, &fl6.daddr);
dst_release(dst);
if (!n) {
rmnet_wlan_forward_stats_update(RMNET_WLAN_F_S_R0_IPV6_NEIGH_LOOKUP_FAIL);
goto drop;
}
if (n->dev != skb->dev || !n->dev->header_ops) {
rmnet_wlan_forward_stats_update(RMNET_WLAN_F_S_R0_IPV6_HARD_HEADER_FAIL);
neigh_release(n);
goto drop;
}
err = neigh_resolve_output(n, skb);
neigh_release(n);
if (likely(err == NET_XMIT_SUCCESS || err == NET_XMIT_CN)) {
rmnet_wlan_forward_stats_update(RMNET_WLAN_F_S_R0_IPV6_XMIT_SUCCESS);
} else {
rmnet_wlan_forward_stats_update(RMNET_WLAN_F_S_R0_IPV6_XMIT_DROP);
}
return RX_HANDLER_CONSUMED;
} else if(skb->dev && (skb->protocol == htons(ETH_P_IP)) &&
network_type == DATA_PATH_PROXY_NET_WWAN) {
/* Use xfrm to route packet to rmnet data */
struct iphdr *iph, __iph;
struct net_device *wdev = NULL;
struct flowi4 fl4 = {};
struct dst_entry *dst_xfrm;
struct rtable *rt;
struct net_device *ddev = NULL;
iph = skb_header_pointer(skb, 0, sizeof(*iph), &__iph);
if (!iph) {
rmnet_wlan_forward_stats_update(RMNET_WWAN_F_S_R0_IP_HDRP_FAIL);
goto drop;
}
wdev = dev_get_by_name_rcu(&init_net, rmnet_wwan_get_dev());
if (!wdev) {
rmnet_wlan_forward_stats_update(RMNET_WWAN_F_S_R0_IP_DEV_GET_FAIL);
goto drop;
}
memcpy(&fl4.daddr, &iph->daddr, sizeof(__be32));
fl4.flowi4_oif = wdev->ifindex;
fl4.flowi4_flags = FLOWI_FLAG_KNOWN_NH;
rt = ip_route_output_key(&init_net, &fl4);
if (IS_ERR(rt)) {
rmnet_wlan_forward_stats_update(RMNET_WWAN_F_S_R0_IP_ROUTE_FAIL);
ddev = dev_get_by_name_rcu(&init_net, "dummy0");
if (!ddev) {
rmnet_wlan_forward_stats_update(RMNET_WWAN_F_S_R0_IP_DDEV_GET_FAIL);
goto drop;
}
fl4.flowi4_oif = ddev->ifindex;
fl4.flowi4_flags = FLOWI_FLAG_KNOWN_NH;
rt = ip_route_output_key(&init_net, &fl4);
if (IS_ERR(rt)) {
rmnet_wlan_forward_stats_update(RMNET_WWAN_F_S_NON_R0_IP_DRT_LOOKUP_FAIL);
goto drop;
}
}
memcpy(&fl4.saddr, &iph->saddr, sizeof(__be32));
dst_xfrm = xfrm_lookup(&init_net, &rt->dst, flowi4_to_flowi(&fl4), NULL, 0);
rt = (struct rtable*) dst_xfrm;
if (IS_ERR(rt)) {
rmnet_wlan_forward_stats_update(RMNET_WWAN_F_S_NON_R0_IP_XFRM_LOOKUP_FAIL);
goto drop;
}
skb_dst_set(skb, dst_xfrm);
dst_output(&init_net, NULL, skb);
rmnet_wlan_forward_stats_update(RMNET_WWAN_F_S_NON_R0_IP_DST_OUTPUT_SUCCESS);
return RX_HANDLER_CONSUMED;
} else if(skb->dev && (skb->protocol == htons(ETH_P_IPV6)) &&
network_type == DATA_PATH_PROXY_NET_WWAN) {
/* Use xfrm to route packet to rmnet data */
struct ipv6hdr *ip6h, __ip6h;
struct flowi6 fl6 = {};
struct dst_entry *dst = NULL, *dst_xfrm;
struct rtable *rt;
struct net_device *ddev = NULL;
ip6h = skb_header_pointer(skb, 0, sizeof(*ip6h), &__ip6h);
if (!ip6h) {
rmnet_wlan_forward_stats_update(RMNET_WWAN_F_S_NON_R0_IPV6_HDRP_FAIL);
goto drop;
}
memcpy(&fl6.saddr, &ip6h->saddr, sizeof(struct in6_addr));
memcpy(&fl6.daddr, &ip6h->daddr, sizeof(struct in6_addr));
dst = ipv6_stub->ipv6_dst_lookup_flow(&init_net, NULL, &fl6, NULL);
if (IS_ERR(dst)) {
rmnet_wlan_forward_stats_update(RMNET_WWAN_F_S_NON_R0_IPV6_DST_LOOKUP_FAIL);
ddev = dev_get_by_name_rcu(&init_net, "dummy0");
if (!ddev) {
rmnet_wlan_forward_stats_update(RMNET_WWAN_F_S_R0_IPV6_DDEV_GET_FAIL);
goto drop;
}
fl6.flowi6_oif = ddev->ifindex;
fl6.flowi6_flags = FLOWI_FLAG_KNOWN_NH;
dst = ipv6_stub->ipv6_dst_lookup_flow(&init_net, NULL, &fl6, NULL);
if (IS_ERR(dst)) {
rmnet_wlan_forward_stats_update(RMNET_WWAN_F_S_NON_R0_IPV6_DDST_LOOKUP_FAIL);
goto drop;
}
}
dst_xfrm = xfrm_lookup(&init_net, dst, flowi6_to_flowi(&fl6), NULL, 0);
rt = (struct rtable *)dst_xfrm;
if (IS_ERR(rt)) {
rmnet_wlan_forward_stats_update(RMNET_WWAN_F_S_NON_R0_IPV6_XFRM_LOOKUP_FAIL);
goto drop;
}
skb_dst_set(skb, dst_xfrm);
dst_output(&init_net, NULL, skb);
rmnet_wlan_forward_stats_update(RMNET_WWAN_F_S_NON_R0_IPV6_DST_OUTPUT_SUCCESS);
return RX_HANDLER_CONSUMED;
} else if(skb->dev && (skb->protocol == htons(ETH_P_IP)) &&
network_type == DATA_PATH_PROXY_NET_WLAN) {
/* Use xfrm to route packet to rmnet data */
struct iphdr *iph, __iph;
struct flowi4 fl4 = {};
struct net_device *wdev = NULL;
struct dst_entry *dst_xfrm;
struct net_device *ddev = NULL;
struct rtable *rt;
iph = skb_header_pointer(skb, 0, sizeof(*iph), &__iph);
if (!iph) {
rmnet_wlan_forward_stats_update(RMNET_WLAN_F_S_NON_R0_IP_HDRP_FAIL);
goto drop;
}
wdev = dev_get_by_name_rcu(&init_net, rmnet_wlan_get_dev());
if (!wdev) {
rmnet_wlan_forward_stats_update(RMNET_WLAN_F_S_NON_R0_IP_DEV_GET_FAIL);
goto drop;
}
memcpy(&fl4.daddr, &iph->daddr, sizeof(__be32));
fl4.flowi4_oif = wdev->ifindex;
fl4.flowi4_flags = FLOWI_FLAG_KNOWN_NH;
rt = ip_route_output_key(&init_net, &fl4);
if (IS_ERR(rt)) {
rmnet_wlan_forward_stats_update(RMNET_WLAN_F_S_NON_R0_IP_RT_LOOKUP_FAIL);
ddev = dev_get_by_name_rcu(&init_net, "dummy0");
if (!ddev) {
rmnet_wlan_forward_stats_update(RMNET_WLAN_F_S_R0_IP_DDEV_GET_FAIL);
goto drop;
}
fl4.flowi4_oif = ddev->ifindex;
fl4.flowi4_flags = FLOWI_FLAG_KNOWN_NH;
rt = ip_route_output_key(&init_net, &fl4);
if (IS_ERR(rt)) {
rmnet_wlan_forward_stats_update(RMNET_WLAN_F_S_NON_R0_IP_DRT_LOOKUP_FAIL);
goto drop;
}
}
memcpy(&fl4.saddr, &iph->saddr, sizeof(__be32));
dst_xfrm = xfrm_lookup(&init_net, &rt->dst, flowi4_to_flowi(&fl4), NULL, 0);
rt = (struct rtable*) dst_xfrm;
if (IS_ERR(rt)) {
rmnet_wlan_forward_stats_update(RMNET_WLAN_F_S_NON_R0_IP_XFRM_LOOKUP_FAIL);
goto drop;
}
skb_dst_set(skb, dst_xfrm);
dst_output(&init_net, NULL, skb);
rmnet_wlan_forward_stats_update(RMNET_WLAN_F_S_NON_R0_IP_DST_OUTPUT_SUCCESS);
return RX_HANDLER_CONSUMED;
} else if(skb->dev && (skb->protocol == htons(ETH_P_IPV6)) &&
network_type == DATA_PATH_PROXY_NET_WLAN) {
/* Use xfrm to route packet to rmnet data */
struct ipv6hdr *ip6h, __ip6h;
struct flowi6 fl6 = {};
struct dst_entry *dst = NULL, *dst_xfrm;
struct rtable *rt;
struct net_device *ddev = NULL;
ip6h = skb_header_pointer(skb, 0, sizeof(*ip6h), &__ip6h);
if (!ip6h) {
rmnet_wlan_forward_stats_update(RMNET_WLAN_F_S_NON_R0_IPV6_HDRP_FAIL);
goto drop;
}
memcpy(&fl6.saddr, &ip6h->saddr, sizeof(struct in6_addr));
memcpy(&fl6.daddr, &ip6h->daddr, sizeof(struct in6_addr));
dst = ipv6_stub->ipv6_dst_lookup_flow(&init_net, NULL, &fl6, NULL);
if (IS_ERR(dst)) {
rmnet_wlan_forward_stats_update(RMNET_WLAN_F_S_NON_R0_IPV6_DST_LOOKUP_FAIL);
ddev = dev_get_by_name_rcu(&init_net, "dummy0");
if (!ddev) {
rmnet_wlan_forward_stats_update(RMNET_WLAN_F_S_R0_IPV6_DDEV_GET_FAIL);
goto drop;
}
fl6.flowi6_oif = ddev->ifindex;
fl6.flowi6_flags = FLOWI_FLAG_KNOWN_NH;
dst = ipv6_stub->ipv6_dst_lookup_flow(&init_net, NULL, &fl6, NULL);
if (IS_ERR(dst)) {
rmnet_wlan_forward_stats_update(RMNET_WLAN_F_S_NON_R0_IPV6_DDST_LOOKUP_FAIL);
goto drop;
}
}
dst_xfrm = xfrm_lookup(&init_net, dst, flowi6_to_flowi(&fl6), NULL, 0);
rt = (struct rtable*) dst_xfrm;
if (IS_ERR(rt)) {
rmnet_wlan_forward_stats_update(RMNET_WLAN_F_S_NON_R0_IPV6_XFRM_LOOKUP_FAIL);
goto drop;
}
skb_dst_set(skb, dst_xfrm);
dst_output(&init_net, NULL, skb);
rmnet_wlan_forward_stats_update(RMNET_WLAN_F_S_NON_R0_IPV6_DST_OUTPUT_SUCCESS);
return RX_HANDLER_CONSUMED;
}
drop:
kfree_skb(skb);
return RX_HANDLER_CONSUMED;
}
static rx_handler_result_t rmnet_wlan_connection_handler(struct sk_buff **pskb)
{
struct rmnet_wlan_connection_info conn = {};
struct rmnet_wlan_connection_node *node;
struct sk_buff *skb = *pskb;
unsigned long flags;
struct rmnet_wlan_fwd_info fwd_info;
struct rmnet_wlan_fwd_info_node *fwd_info_node;
uint8_t network_type = __DATA_PATH_PROXY_NET_MAX;
if (!skb || skb->pkt_type == PACKET_LOOPBACK)
return RX_HANDLER_PASS;
/* Get the source address and IP type */
if (skb->protocol == htons(ETH_P_IP)) {
struct iphdr *iph, __iph;
iph = skb_header_pointer(skb, 0, sizeof(*iph), &__iph);
if (!iph)
goto out;
fwd_info.v4_addr = iph->saddr;
fwd_info.ip_proto = 4;
} else if(skb->protocol == htons(ETH_P_IPV6)) {
struct ipv6hdr *ip6h, __ip6h;
ip6h = skb_header_pointer(skb, 0, sizeof(*ip6h), &__ip6h);
if (!ip6h)
goto out;
memcpy(&fwd_info.v6_addr, &ip6h->saddr, sizeof(fwd_info.v6_addr));
fwd_info.ip_proto = 6;
} else {
goto out;
}
/* Get the registered fwd_node to get the type */
rcu_read_lock();
fwd_info_node = rmnet_wlan_fwd_info_find(&fwd_info);
rcu_read_unlock();
if (!fwd_info_node)
goto out;
network_type = fwd_info_node->fwd.net_type;
/* Invalid network type given */
if(network_type == __DATA_PATH_PROXY_NET_MAX) goto out;
/* replaces raw_before_defrag */
if (skb->dev)
nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
if (skb->protocol == htons(ETH_P_IP)) {
struct iphdr *iph, __iph;
iph = skb_header_pointer(skb, 0, sizeof(*iph), &__iph);
if (!iph)
goto out;
if (iph->protocol == IPPROTO_TCP)
goto clamp;
if (iph->protocol != IPPROTO_ICMP)
goto out;
conn.v4_saddr = iph->saddr;
conn.v4_daddr = iph->daddr;
conn.ip_proto = 4;
} else if (skb->protocol == htons(ETH_P_IPV6)) {
struct ipv6hdr *ip6h, __ip6h;
__be16 frag_off;
u8 proto;
ip6h = skb_header_pointer(skb, 0, sizeof(*ip6h), &__ip6h);
if (!ip6h)
goto out;
proto = ip6h->nexthdr;
if (ipv6_skip_exthdr(skb, sizeof(*ip6h), &proto, &frag_off) < 0)
goto out;
if (frag_off && proto == NEXTHDR_FRAGMENT)
/* Heckin' KIIIIILL meeeee... */
goto out;
if (proto == IPPROTO_TCP)
goto clamp;
if (proto != IPPROTO_ICMPV6)
goto out;
memcpy(&conn.v6_saddr, &ip6h->saddr, sizeof(conn.v6_saddr));
memcpy(&conn.v6_daddr, &ip6h->daddr, sizeof(conn.v6_daddr));
conn.ip_proto = 6;
} else {
goto out;
}
rcu_read_lock();
hash_for_each_possible_rcu(rmnet_wlan_connection_hash, node, hash,
conn.v4_daddr) {
if (node->dead)
continue;
if (!rmnet_wlan_connection_info_match(&node->info, &conn))
continue;
/* Match found. You still alive? *poke poke* */
/* Ah, ah, ah, ah~ Stayin' alive, stayin' alive! */
node->ts = jiffies;
rcu_read_unlock();
goto out;
}
rcu_read_unlock();
/* Make a new connection entry */
spin_lock_irqsave(&rmnet_wlan_connection_lock, flags);
node = kzalloc(sizeof(*node), GFP_ATOMIC);
if (!node) {
/* Well, that's unfortunate */
spin_unlock_irqrestore(&rmnet_wlan_connection_lock, flags);
goto out;
}
INIT_HLIST_NODE(&node->hash);
memcpy(&node->info, &conn, sizeof(conn));
node->fwd = &fwd_info_node->fwd;
hash_add_rcu(rmnet_wlan_connection_hash, &node->hash, conn.v4_daddr);
if (!rmnet_wlan_connection_hash_size) {
unsigned long delay;
delay = msecs_to_jiffies(RMNET_WLAN_CONNECTION_WQ_INTERVAL);
schedule_delayed_work(&rmnet_wlan_connection_work.ws, delay);
}
rmnet_wlan_connection_hash_size++;
spin_unlock_irqrestore(&rmnet_wlan_connection_lock, flags);
out:
return rmnet_wlan_receive_skb(skb, network_type);
clamp:
/* Clamp any received SYNs */
rmnet_wlan_tcp_mss_clamp(skb, TCP_FLAG_SYN);
return rmnet_wlan_receive_skb(skb, network_type);
}
struct rmnet_wlan_fwd_info *
rmnet_wlan_connection_find(struct rmnet_wlan_connection_info *info)
__must_hold(RCU)
{
struct rmnet_wlan_connection_node *node;
hash_for_each_possible_rcu(rmnet_wlan_connection_hash, node, hash,
info->v4_daddr) {
if (node->dead)
continue;
if (!rmnet_wlan_connection_info_match(&node->info, info))
continue;
return node->fwd;
}
return NULL;
}
void rmnet_wlan_connection_flush(void)
{
/* Purge anything old enough... */
cancel_delayed_work_sync(&rmnet_wlan_connection_work.ws);
rmnet_wlan_connection_work.force_clean = true;
schedule_delayed_work(&rmnet_wlan_connection_work.ws, 0);
/* ... and force remove all the rest. */
cancel_delayed_work_sync(&rmnet_wlan_connection_work.ws);
}
void rmnet_wlan_ll_tuple_match(struct sk_buff *skb)
{
int protocol = -1;
struct rmnet_wlan_ll_tuple * tuple =
rcu_dereference(rmnet_wlan_ll_tuple_cache);
/* Check if something valid is cached */
if (!tuple) return;
/* IPv4 */
if (skb->protocol == htons(ETH_P_IP)) {
struct iphdr *iph, __iph;
iph = skb_header_pointer(skb, 0, sizeof(*iph), &__iph);
if(!iph) return;
if(iph->version != tuple->ip_proto &&
iph->saddr != tuple->v4_saddr &&
iph->daddr != tuple->v4_daddr)
return;
protocol = iph->protocol;
/* IPv6 */
} else if (skb->protocol == htons(ETH_P_IPV6)) {
struct ipv6hdr *ip6h, __ip6h;
ip6h = skb_header_pointer(skb, 0, sizeof(*ip6h), &__ip6h);
if(!ip6h) return;
if(ip6h->version != tuple->ip_proto &&
ipv6_addr_cmp(&ip6h->saddr, &tuple->v6_saddr) &&
ipv6_addr_cmp(&ip6h->daddr, &tuple->v6_daddr))
return;
protocol = ip6h->nexthdr;
}
/* Check that ports match and is UDP */
if(protocol == IPPROTO_UDP) {
if(udp_hdr(skb)->source == tuple->sport &&
udp_hdr(skb)->dest == tuple->dport)
goto tx_priority;
}
return;
tx_priority:
skb->priority = 0x9B6D0100;
rmnet_wlan_stats_update(RMNET_WLAN_STAT_LL_TX);
}
static rx_handler_result_t rmnet_wlan_ingress_rx_handler(struct sk_buff **pskb)
{
struct net_device *device;
if (!pskb || !(*pskb) || !(*pskb)->dev)
return RX_HANDLER_PASS;
device = (*pskb)->dev;
/* Reverse devices this way please */
if (!rmnet_wlan_strlcmp(device->name, "r_rmnet_data", 12))
return rmnet_wlan_connection_handler(pskb);
/* CIWLAN goes over here */
if (!rmnet_wlan_strlcmp(device->name, rmnet_wwan_get_dev(), IFNAMSIZ))
return rmnet_wlan_rx_handler(pskb);
/* OH, you're a wlan device you say? Well, what pranksterful prankster
* is naming devices on this logic-forsaken machine...
*/
if (!rmnet_wlan_strlcmp(device->name, rmnet_wlan_get_dev(), IFNAMSIZ))
return rmnet_wlan_rx_handler(pskb);
/* We have no interest in your devices here */
return RX_HANDLER_PASS;
}
static const struct rmnet_module_hook_register_info
rmnet_wlan_module_hooks[] = {
{
.hooknum = RMNET_MODULE_HOOK_WLAN_FLOW_MATCH,
.func = rmnet_wlan_ll_tuple_match,
},
{
.hooknum = RMNET_MODULE_HOOK_WLAN_INGRESS_RX_HANDLER,
.func = rmnet_wlan_ingress_rx_handler,
},
};
void rmnet_wlan_set_hooks(void)
{
rmnet_module_hook_register(rmnet_wlan_module_hooks,
ARRAY_SIZE(rmnet_wlan_module_hooks));
}
void rmnet_wlan_unset_hooks(void)
{
rmnet_module_hook_unregister(rmnet_wlan_module_hooks,
ARRAY_SIZE(rmnet_wlan_module_hooks));
}
int rmnet_wlan_connection_init(void)
{
INIT_DELAYED_WORK(&rmnet_wlan_connection_work.ws,
rmnet_wlan_connection_work_process);
return 0;
}
int rmnet_wlan_connection_deinit(void)
{
rmnet_wlan_connection_flush();
return 0;
}

View File

@@ -1,43 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
/* RMNET WLAN connection management framework */
#ifndef __RMNET_WLAN_CONNECTION_H__
#define __RMNET_WLAN_CONNECTION_H__
#include <linux/types.h>
#include <linux/in6.h>
#include "rmnet_wlan.h"
struct rmnet_wlan_connection_info {
union {
__be32 v4_saddr;
struct in6_addr v6_saddr;
};
union {
__be32 v4_daddr;
struct in6_addr v6_daddr;
};
u8 ip_proto;
};
/* Lookup */
struct rmnet_wlan_fwd_info *
rmnet_wlan_connection_find(struct rmnet_wlan_connection_info *info);
/* Flush everything */
void rmnet_wlan_connection_flush(void);
/* External Hooks */
void rmnet_wlan_set_hooks(void);
void rmnet_wlan_unset_hooks(void);
/* Setup and teardown interface */
int rmnet_wlan_connection_init(void);
int rmnet_wlan_connection_deinit(void);
#endif

View File

@@ -1,577 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
/* RMNET WLAN fragment handler framework */
#include <linux/types.h>
#include <linux/skbuff.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <linux/rcupdate.h>
#include <linux/list.h>
#include <linux/rculist.h>
#include <linux/hashtable.h>
#include <linux/workqueue.h>
#include "rmnet_wlan.h"
#include "rmnet_wlan_stats.h"
#include "rmnet_wlan_fragment.h"
#define RMNET_WLAN_FRAGMENT_BKTS (16)
#define RMNET_WLAN_FRAGMENT_HASH_BITS (const_ilog2(RMNET_WLAN_FRAGMENT_BKTS))
/* Period to wait after receiving fragmented packet before declaring no more
* fragments are coming. 100 ms, currently.
*/
#define RMNET_WLAN_FRAGMENT_TIMEOUT (100)
/* How often to run the cleaning workqueue while framents are present, in ms. */
#define RMNET_WLAN_FRAGMENT_WQ_INTERVAL (50)
struct rmnet_wlan_fragment_info {
/* Need both addresses to check fragments */
union {
__be32 v4_saddr;
struct in6_addr v6_saddr;
};
union {
__be32 v4_daddr;
struct in6_addr v6_daddr;
};
__be32 id;
u16 ip_len;
u16 offset;
u8 ip_proto;
};
struct rmnet_wlan_fragment_node {
struct hlist_node hash;
/* Protects the list of queued fragments */
spinlock_t pkt_lock;
struct list_head pkts;
struct rcu_head rcu;
struct rmnet_wlan_fragment_info info;
struct rmnet_wlan_fwd_info *fwd;
unsigned long ts;
bool dead;
};
struct rmnet_wlan_fragment_work_struct {
struct delayed_work ws;
bool force_clean;
};
/* For fragment hashtable protection */
static DEFINE_SPINLOCK(rmnet_wlan_fragment_lock);
static DEFINE_HASHTABLE(rmnet_wlan_fragment_hash,
RMNET_WLAN_FRAGMENT_HASH_BITS);
/* Current size of the hashtable. This is purposely a u64 because some
* places seem to have ways of blasting ridiculous amounts of fragments into
* the XFRM tunnel at once. If overflow happens here (meaning UINT64_MAX logical
* packets that have been fragmented within a single RCU grace period), then
* boy-howdy do we need to have a talk...
*/
static u64 rmnet_wlan_fragment_hash_size;
/* Periodic cleaning work struct for the hashtable */
static struct rmnet_wlan_fragment_work_struct rmnet_wlan_fragment_work;
static int rmnet_wlan_ipv6_find_hdr(const struct sk_buff *skb,
unsigned int *offset, int target,
unsigned short *fragoff, int *flags)
{
unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
u8 nexthdr = ipv6_hdr(skb)->nexthdr;
bool found;
if (fragoff)
*fragoff = 0;
if (*offset) {
struct ipv6hdr _ip6, *ip6;
ip6 = skb_header_pointer(skb, *offset, sizeof(_ip6), &_ip6);
if (!ip6 || (ip6->version != 6))
return -EBADMSG;
start = *offset + sizeof(struct ipv6hdr);
nexthdr = ip6->nexthdr;
}
do {
struct ipv6_opt_hdr _hdr, *hp;
unsigned int hdrlen;
found = (nexthdr == target);
if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
if (target < 0 || found)
break;
return -ENOENT;
}
hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
if (!hp)
return -EBADMSG;
if (nexthdr == NEXTHDR_ROUTING) {
struct ipv6_rt_hdr _rh, *rh;
rh = skb_header_pointer(skb, start, sizeof(_rh),
&_rh);
if (!rh)
return -EBADMSG;
if (flags && (*flags & IP6_FH_F_SKIP_RH) &&
rh->segments_left == 0)
found = false;
}
if (nexthdr == NEXTHDR_FRAGMENT) {
unsigned short _frag_off;
__be16 *fp;
if (flags) /* Indicate that this is a fragment */
*flags |= IP6_FH_F_FRAG;
fp = skb_header_pointer(skb,
start+offsetof(struct frag_hdr,
frag_off),
sizeof(_frag_off),
&_frag_off);
if (!fp)
return -EBADMSG;
_frag_off = ntohs(*fp) & ~0x7;
if (_frag_off) {
if (target < 0 &&
((!ipv6_ext_hdr(hp->nexthdr)) ||
hp->nexthdr == NEXTHDR_NONE)) {
if (fragoff)
*fragoff = _frag_off;
return hp->nexthdr;
}
if (!found)
return -ENOENT;
if (fragoff)
*fragoff = _frag_off;
break;
}
hdrlen = 8;
} else if (nexthdr == NEXTHDR_AUTH) {
if (flags && (*flags & IP6_FH_F_AUTH) && (target < 0))
break;
hdrlen = ipv6_authlen(hp);
} else
hdrlen = ipv6_optlen(hp);
if (!found) {
nexthdr = hp->nexthdr;
start += hdrlen;
}
} while (!found);
*offset = start;
return nexthdr;
}
static bool
rmnet_wlan_fragment_node_expired(struct rmnet_wlan_fragment_node *node,
unsigned long ts)
{
unsigned long timeout;
timeout = msecs_to_jiffies(RMNET_WLAN_FRAGMENT_TIMEOUT);
if (ts - node->ts > timeout)
return true;
return false;
}
static void
rmnet_wlan_flush_fragment_node(struct rmnet_wlan_fragment_node *node,
bool in_net_rx)
{
struct rmnet_wlan_fwd_info *info;
int (*rx_func)(struct sk_buff *skb);
struct sk_buff *skb, *tmp;
unsigned long flags;
rx_func = (in_net_rx) ? netif_receive_skb : __netif_rx;
info = node->fwd;
spin_lock_irqsave(&node->pkt_lock, flags);
list_for_each_entry_safe(skb, tmp, &node->pkts, list) {
u32 stat;
list_del(&skb->list);
skb->next = NULL;
skb->prev = NULL;
if (IS_ERR_OR_NULL(info)) {
rx_func(skb);
continue;
}
/* Forward fragment */
if (rmnet_wlan_deliver_skb(skb, info)) {
stat = RMNET_WLAN_STAT_FRAG_FWD_NO_DEV;
rx_func(skb);
} else {
stat = RMNET_WLAN_STAT_FRAG_FWD;
}
rmnet_wlan_stats_update(stat);
}
spin_unlock_irqrestore(&node->pkt_lock, flags);
}
static bool rmnet_wlan_fragment_hash_clean(bool force)
{
struct rmnet_wlan_fragment_node *node;
struct hlist_node *tmp;
unsigned long ts;
int bkt;
ts = jiffies;
hash_for_each_safe(rmnet_wlan_fragment_hash, bkt, tmp, node, hash) {
if (node->dead)
/* Node already marked as removed, but not yet
* purged after an RCU grace period. Skip it.
*/
continue;
if (force || rmnet_wlan_fragment_node_expired(node, ts)) {
node->dead = true;
hash_del_rcu(&node->hash);
/* Flush out any fragments we're holding */
rmnet_wlan_flush_fragment_node(node, false);
kfree_rcu(node, rcu);
rmnet_wlan_stats_update(RMNET_WLAN_STAT_FRAG_EXP);
rmnet_wlan_fragment_hash_size--;
}
}
return !!rmnet_wlan_fragment_hash_size;
}
static void rmnet_wlan_fragment_work_process(struct work_struct *ws)
{
struct rmnet_wlan_fragment_work_struct *fragment_work;
unsigned long flags;
bool should_resched;
fragment_work = container_of(to_delayed_work(ws),
struct rmnet_wlan_fragment_work_struct,
ws);
spin_lock_irqsave(&rmnet_wlan_fragment_lock, flags);
should_resched =
rmnet_wlan_fragment_hash_clean(fragment_work->force_clean);
if (should_resched) {
unsigned long delay;
delay = msecs_to_jiffies(RMNET_WLAN_FRAGMENT_WQ_INTERVAL);
schedule_delayed_work(&fragment_work->ws, delay);
}
spin_unlock_irqrestore(&rmnet_wlan_fragment_lock, flags);
}
static bool rmnet_wlan_fragment_match(struct rmnet_wlan_fragment_info *i1,
struct rmnet_wlan_fragment_info *i2)
{
if (i1->ip_proto != i2->ip_proto || i1->id != i2->id)
return false;
if (i1->ip_proto == 4)
return i1->v4_saddr == i2->v4_saddr &&
i1->v4_daddr == i2->v4_daddr;
return !ipv6_addr_cmp(&i1->v6_saddr, &i2->v6_saddr) &&
!ipv6_addr_cmp(&i1->v6_daddr, &i2->v6_daddr);
}
static struct rmnet_wlan_fragment_node *
rmnet_wlan_fragment_find(struct rmnet_wlan_fragment_info *info)
{
struct rmnet_wlan_fragment_node *node;
unsigned long flags;
spin_lock_irqsave(&rmnet_wlan_fragment_lock, flags);
hash_for_each_possible_rcu(rmnet_wlan_fragment_hash, node, hash,
info->id) {
if (node->dead)
continue;
if (rmnet_wlan_fragment_match(info, &node->info))
goto out;
}
/* Time to make one */
node = kzalloc(sizeof(*node), GFP_ATOMIC);
if (!node)
goto out;
spin_lock_init(&node->pkt_lock);
INIT_LIST_HEAD(&node->pkts);
memcpy(&node->info, info, sizeof(*info));
INIT_HLIST_NODE(&node->hash);
hash_add_rcu(rmnet_wlan_fragment_hash, &node->hash, info->id);
if (!rmnet_wlan_fragment_hash_size) {
unsigned long delay;
delay = msecs_to_jiffies(RMNET_WLAN_FRAGMENT_WQ_INTERVAL);
schedule_delayed_work(&rmnet_wlan_fragment_work.ws, delay);
}
rmnet_wlan_fragment_hash_size++;
out:
spin_unlock_irqrestore(&rmnet_wlan_fragment_lock, flags);
return node;
}
static int rmnet_wlan_fragment_handle(struct sk_buff *skb,
struct rmnet_wlan_tuple *tuple,
struct rmnet_wlan_fragment_info *info,
struct rmnet_wlan_fwd_info *fwd_info)
__must_hold(RCU)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
struct rmnet_wlan_fragment_node *node;
int ret = 1; /* Pass on by default */
/* Avoid toching any fragments we've already seen when our rx_handler
* has been invoked again after flushing to the network stack.
*/
if (shinfo->tskey) {
/* This is basically unused by the kernel on the RX side, but
* we can play nice and reset it to the default value, now
* that it can't end up back here.
*/
shinfo->tskey = 0;
goto out;
}
rmnet_wlan_stats_update(RMNET_WLAN_STAT_FRAG_RX);
/* Mark this fragment as having been seen by the rx handler */
shinfo->tskey = 1;
/* Check our fragment table */
node = rmnet_wlan_fragment_find(info);
if (!node) {
/* Allocation error */
ret = (-1);
goto out;
}
/* Poke the timestamp, since there are still fragments happening */
node->ts = jiffies;
/* Have we seen the initial frag? */
if (node->fwd) {
if (IS_ERR(node->fwd))
/* We don't need to forward this tuple */
goto out;
/* Forward it to the device we used for the others */
if (!rmnet_wlan_deliver_skb(skb, node->fwd)) {
rmnet_wlan_stats_update(RMNET_WLAN_STAT_FRAG_FWD);
ret = 0;
goto out;
}
rmnet_wlan_stats_update(RMNET_WLAN_STAT_FRAG_FWD_NO_DEV);
goto out;
}
if (info->offset) {
unsigned long flags;
/* Ah, the worst case scenario. The fragments are arriving
* out of order, and we haven't seen the inital fragment to
* determine if we care about this packet or not. We have no
* choice but to hold it.
*/
spin_lock_irqsave(&node->pkt_lock, flags);
list_add_tail(&skb->list, &node->pkts);
spin_unlock_irqrestore(&node->pkt_lock, flags);
ret = 0;
rmnet_wlan_stats_update(RMNET_WLAN_STAT_FRAG_QUEUE);
goto out;
}
/* We have the first fragment. Time to figure out what to do */
if (tuple->trans_proto == IPPROTO_TCP ||
tuple->trans_proto == IPPROTO_UDP) {
struct udphdr *udph, __udph;
udph = skb_header_pointer(skb, info->ip_len, sizeof(*udph), &__udph);
if (!udph) {
rmnet_wlan_stats_update(RMNET_WLAN_STAT_ENCAP_HDRP_FAIL);
goto out;
}
tuple->port = udph->dest;
if (rmnet_wlan_udp_encap_check(skb, tuple, info->ip_len)) {
if (rmnet_wlan_udp_encap_drop_check(tuple)) {
kfree_skb(skb);
ret = 0;
rmnet_wlan_stats_update(RMNET_WLAN_STAT_ENCAP_DROP);
goto out;
}
/* Let the stack handle this packet */
rmnet_wlan_stats_update(RMNET_WLAN_STAT_ENCAP_DELIVER);
goto encap;
}
} else if (tuple->trans_proto == IPPROTO_ESP) {
struct ip_esp_hdr *esph, __esph;
esph = skb_header_pointer(skb, info->ip_len, sizeof(*esph), &__esph);
if (!esph) {
rmnet_wlan_stats_update(RMNET_WLAN_STAT_ENCAP_HDRP_FAIL);
goto out;
}
tuple->spi_val = esph->spi;
}
if (rmnet_wlan_tuple_present(tuple)) {
u32 stat;
/* Match found. Go ahead and pass it on, and store
* this decision for the later fragments.
*/
node->fwd = fwd_info;
if (!rmnet_wlan_deliver_skb(skb, fwd_info)) {
stat = RMNET_WLAN_STAT_FRAG_FWD;
ret = 0;
} else {
stat = RMNET_WLAN_STAT_FRAG_FWD_NO_DEV;
}
rmnet_wlan_stats_update(stat);
/* Now that we know where to forward, forward! */
rmnet_wlan_flush_fragment_node(node, true);
goto out;
}
encap:
/* Not a fragment we're interested in. Remember that */
node->fwd = ERR_PTR(-EINVAL);
/* Flush anything we held before we found this */
rmnet_wlan_flush_fragment_node(node, true);
out:
if (ret)
/* Make sure to reset as we are not requeuing the packet */
shinfo->tskey = 0;
return ret;
}
int rmnet_wlan_fragment_v4(struct sk_buff *skb, int ip_len,
struct rmnet_wlan_tuple *tuple,
struct rmnet_wlan_fwd_info *fwd_info)
__must_hold(RCU)
{
struct rmnet_wlan_fragment_info info = {};
struct iphdr *iph = ip_hdr(skb);
/* Only deal with this rigmarole if we can't escape it */
if (tuple->trans_proto != IPPROTO_TCP &&
tuple->trans_proto != IPPROTO_UDP &&
tuple->trans_proto != IPPROTO_ESP)
return -1;
info.ip_proto = 4;
info.v4_saddr = iph->saddr;
info.v4_daddr = iph->daddr;
/* Endian up-casting is messy, ain't it~? */
info.id = htonl((u32)ntohs(iph->id));
info.offset = htons(iph->frag_off) & IP_OFFSET;
info.ip_len = (u16)ip_len;
return rmnet_wlan_fragment_handle(skb, tuple, &info, fwd_info);
}
int rmnet_wlan_fragment_v6(struct sk_buff *skb, int ip_len,
struct rmnet_wlan_tuple *tuple,
struct rmnet_wlan_fwd_info *fwd_info)
__must_hold(RCU)
{
struct rmnet_wlan_fragment_info info = {};
struct ipv6hdr *ip6h = ipv6_hdr(skb);
struct frag_hdr *fragh, __fragh;
unsigned int ptr;
/* V6 fragments are harder to deal with, since you won't know the
* actual transport protocol in any secondary fragments...
*/
if (tuple->trans_proto != IPPROTO_TCP &&
tuple->trans_proto != IPPROTO_UDP &&
tuple->trans_proto != IPPROTO_ESP &&
tuple->trans_proto != NEXTHDR_FRAGMENT)
return -1;
/* Grab that frag header! */
if (rmnet_wlan_ipv6_find_hdr(skb, &ptr, NEXTHDR_FRAGMENT, NULL, NULL)
< 0)
/* ...or not, somehow? */
return -1;
fragh = skb_header_pointer(skb, ptr, sizeof(*fragh), &__fragh);
if (!fragh) {
rmnet_wlan_stats_update(RMNET_WLAN_STAT_ENCAP_HDRP_FAIL);
return -1;
}
info.ip_proto = 6;
memcpy(&info.v6_saddr, &ip6h->saddr, sizeof(ip6h->saddr));
memcpy(&info.v6_daddr, &ip6h->daddr, sizeof(ip6h->daddr));
info.id = fragh->identification;
info.offset = htons(fragh->frag_off) & IP6_OFFSET;
info.ip_len = (u16)ip_len;
/* Account for the the fact that non-secondary fragments won't
* handle the fragment header length.
*/
if (tuple->trans_proto == NEXTHDR_FRAGMENT)
info.ip_len += sizeof(*fragh);
return rmnet_wlan_fragment_handle(skb, tuple, &info, fwd_info);
}
int rmnet_wlan_fragment_init(void)
{
INIT_DELAYED_WORK(&rmnet_wlan_fragment_work.ws,
rmnet_wlan_fragment_work_process);
return 0;
}
void rmnet_wlan_fragments_remove(void)
{
/* Force the current work struct to finish deleting anything old
* enough...
*/
cancel_delayed_work_sync(&rmnet_wlan_fragment_work.ws);
rmnet_wlan_fragment_work.force_clean = true;
schedule_delayed_work(&rmnet_wlan_fragment_work.ws, 0);
/* ...and orce remove all the rest of the nodes */
cancel_delayed_work_sync(&rmnet_wlan_fragment_work.ws);
}
void rmnet_wlan_fragment_del_fwd_info(struct rmnet_wlan_fwd_info *info)
{
struct rmnet_wlan_fragment_node *node;
int bkt;
rcu_read_lock();
hash_for_each_rcu(rmnet_wlan_fragment_hash, bkt, node, hash) {
/* Poison anything that is using the info */
if (node->fwd == info)
node->fwd = ERR_PTR(-EINVAL);
}
rcu_read_unlock();
}

View File

@@ -1,31 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
/* RMNET WLAN fragment handler framework */
#ifndef __RMNET_WLAN_FRAGMENT_H__
#define __RMNET_WLAN_FRAGMENT_H__
#include <linux/skbuff.h>
#include "rmnet_wlan.h"
/* Fragment handling interface */
int rmnet_wlan_fragment_v4(struct sk_buff *skb, int ip_len,
struct rmnet_wlan_tuple *tuple,
struct rmnet_wlan_fwd_info *fwd_info);
int rmnet_wlan_fragment_v6(struct sk_buff *skb, int ip_len,
struct rmnet_wlan_tuple *tuple,
struct rmnet_wlan_fwd_info *fwd_info);
/* Initialize fragment handling */
int rmnet_wlan_fragment_init(void);
/* Purge all fragment information */
void rmnet_wlan_fragments_remove(void);
/* Handle FWD information removal */
void rmnet_wlan_fragment_del_fwd_info(struct rmnet_wlan_fwd_info *info);
#endif

View File

@@ -1,641 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
/* RMNET WLAN Generic Netlink */
#include <net/genetlink.h>
#include <net/netlink.h>
#include <linux/module.h>
#include <linux/if.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include "rmnet_wlan_genl.h"
#include "rmnet_wlan.h"
#include "rmnet_wlan_connection.h"
#include "rmnet_wlan_fragment.h"
/* Use integer 58 instead of ':' to avoid issues with scripts */
#define RMNET_WLAN_CHAR_COLON 58
static struct nla_policy
rmnet_wlan_genl_tuple_policy[RMNET_WLAN_GENL_TUPLE_ATTR_MAX + 1] = {
[RMNET_WLAN_GENL_TUPLE_ATTR_TUPLE] =
NLA_POLICY_EXACT_LEN(sizeof(struct rmnet_wlan_tuple)),
};
static struct nla_policy
rmnet_wlan_genl_attr_policy[RMNET_WLAN_GENL_ATTR_MAX + 1] = {
[RMNET_WLAN_GENL_ATTR_TUPLES] =
NLA_POLICY_NESTED_ARRAY(rmnet_wlan_genl_tuple_policy),
[RMNET_WLAN_GENL_ATTR_DEV] = {
.type = NLA_NUL_STRING,
.len = IFNAMSIZ - 1, /* Max len excluding NULL */
},
[RMNET_WLAN_GENL_ATTR_FWD_ADDR] = {
.type = NLA_NUL_STRING,
.len = INET6_ADDRSTRLEN,
},
[RMNET_WLAN_GENL_ATTR_FWD_DEV] = {
.type = NLA_NUL_STRING,
.len = IFNAMSIZ - 1, /* Max len excluding NULL */
},
[RMNET_WLAN_GENL_ATTR_ENCAP_PORT] = {
.type = NLA_U16,
},
[RMNET_WLAN_GENL_ATTR_NET_TYPE] = {
.type = NLA_U8,
},
[RMNET_WLAN_GENL_ATTR_LL_SRC_ADDR] = {
.type = NLA_NUL_STRING,
.len = INET6_ADDRSTRLEN,
},
[RMNET_WLAN_GENL_ATTR_LL_DST_ADDR] = {
.type = NLA_NUL_STRING,
.len = INET6_ADDRSTRLEN,
},
[RMNET_WLAN_GENL_ATTR_LL_SRC_PORT] = {
.type = NLA_U16,
},
[RMNET_WLAN_GENL_ATTR_LL_DST_PORT] = {
.type = NLA_U16,
},
};
#define RMNET_WLAN_GENL_OP(_cmd, _func) \
{ \
.cmd = _cmd, \
.doit = _func, \
}
static struct genl_family rmnet_wlan_genl_family;
static int rmnet_wlan_genl_add_tuples(struct sk_buff *skb,
struct genl_info *info)
{
struct nlattr *tb[RMNET_WLAN_GENL_TUPLE_ATTR_MAX + 1];
struct rmnet_wlan_tuple *tuples;
struct nlattr *nla;
u32 tuple_count = 0;
int tuple_len;
int rc = 0;
if (!info->attrs[RMNET_WLAN_GENL_ATTR_TUPLES]) {
/* Help me help you! */
GENL_SET_ERR_MSG(info, "Must supply tuple info");
return -EINVAL;
}
nla_for_each_nested(nla, info->attrs[RMNET_WLAN_GENL_ATTR_TUPLES],
tuple_len)
tuple_count++;
tuples = kcalloc(tuple_count, sizeof(*tuples), GFP_KERNEL);
if (!tuples) {
GENL_SET_ERR_MSG(info, "Kernel OOM");
return -ENOMEM;
}
tuple_count = 0;
nla_for_each_nested(nla, info->attrs[RMNET_WLAN_GENL_ATTR_TUPLES],
tuple_len) {
struct rmnet_wlan_tuple *tuple;
rc = nla_parse_nested(tb, RMNET_WLAN_GENL_TUPLE_ATTR_MAX, nla,
rmnet_wlan_genl_tuple_policy,
info->extack);
if (rc)
goto out;
if (!tb[RMNET_WLAN_GENL_TUPLE_ATTR_TUPLE]) {
GENL_SET_ERR_MSG(info, "Must specify tuple entry");
goto out;
}
/* Sanitize. It's 2020 after all.
*
* ...Too soon?
*/
tuple = nla_data(tb[RMNET_WLAN_GENL_TUPLE_ATTR_TUPLE]);
if (tuple->ip_proto != 4 && tuple->ip_proto != 6) {
GENL_SET_ERR_MSG(info, "Invalid IP protocol");
goto out;
}
if (tuple->trans_proto != IPPROTO_TCP &&
tuple->trans_proto != IPPROTO_UDP &&
tuple->trans_proto != IPPROTO_ESP) {
GENL_SET_ERR_MSG(info, "Invalid transport protocol");
goto out;
}
memcpy(&tuples[tuple_count], tuple, sizeof(*tuple));
tuple_count++;
}
rc = rmnet_wlan_add_tuples(tuples, tuple_count, info);
out:
kfree(tuples);
return rc;
}
static int rmnet_wlan_genl_del_tuples(struct sk_buff *skb,
struct genl_info *info)
{
struct nlattr *tb[RMNET_WLAN_GENL_TUPLE_ATTR_MAX + 1];
struct rmnet_wlan_tuple *tuples;
struct nlattr *nla;
u32 tuple_count = 0;
int tuple_len;
int rc;
if (!info->attrs[RMNET_WLAN_GENL_ATTR_TUPLES]) {
GENL_SET_ERR_MSG(info, "Must supply tuple info");
return -EINVAL;
}
nla_for_each_nested(nla, info->attrs[RMNET_WLAN_GENL_ATTR_TUPLES],
tuple_len)
tuple_count++;
tuples = kcalloc(tuple_count, sizeof(*tuples), GFP_KERNEL);
if (!tuples) {
GENL_SET_ERR_MSG(info, "Kernel OOM");
return -ENOMEM;
}
tuple_count = 0;
nla_for_each_nested(nla, info->attrs[RMNET_WLAN_GENL_ATTR_TUPLES],
tuple_len) {
struct rmnet_wlan_tuple *tuple;
rc = nla_parse_nested(tb, RMNET_WLAN_GENL_TUPLE_ATTR_MAX, nla,
rmnet_wlan_genl_tuple_policy,
info->extack);
if (rc)
goto out;
if (!tb[RMNET_WLAN_GENL_TUPLE_ATTR_TUPLE]) {
GENL_SET_ERR_MSG(info, "Must specify tuple entry");
rc = -EINVAL;
goto out;
}
tuple = nla_data(tb[RMNET_WLAN_GENL_TUPLE_ATTR_TUPLE]);
memcpy(&tuples[tuple_count], tuple, sizeof(*tuple));
tuple_count++;
}
rc = rmnet_wlan_del_tuples(tuples, tuple_count, info);
out:
kfree(tuples);
return rc;
}
static int rmnet_wlan_genl_set_device(struct sk_buff *skb,
struct genl_info *info)
{
struct nlattr *nla;
int net_type;
int err;
if (!info->attrs[RMNET_WLAN_GENL_ATTR_DEV] ||
!info->attrs[RMNET_WLAN_GENL_ATTR_NET_TYPE]) {
GENL_SET_ERR_MSG(info, "Must specify device and network info");
return -EINVAL;
}
nla = info->attrs[RMNET_WLAN_GENL_ATTR_DEV];
net_type = nla_get_u8(info->attrs[RMNET_WLAN_GENL_ATTR_NET_TYPE]);
if(net_type != DATA_PATH_PROXY_NET_WLAN &&
net_type != DATA_PATH_PROXY_NET_WWAN &&
net_type != DATA_PATH_PROXY_NET_LBO) {
GENL_SET_ERR_MSG(info, "Network type not supported!");
return -EINVAL;
}
if(net_type == DATA_PATH_PROXY_NET_WWAN) {
err = rmnet_wwan_set_device(nla_data(nla), info);
} else {
err = rmnet_wlan_set_device(nla_data(nla), info);
}
return err;
}
static int rmnet_wlan_genl_unset_device(struct sk_buff *skb,
struct genl_info *info)
{
struct nlattr *nla;
int net_type;
int err;
if(!info->attrs[RMNET_WLAN_GENL_ATTR_DEV] ||
!info->attrs[RMNET_WLAN_GENL_ATTR_NET_TYPE]) {
GENL_SET_ERR_MSG(info,
"Kernel error, unregistering notifier failed");
return -EINVAL;
}
net_type = nla_get_u8(info->attrs[RMNET_WLAN_GENL_ATTR_NET_TYPE]);
/* Still don't care about you */
nla = info->attrs[RMNET_WLAN_GENL_ATTR_DEV];
if(net_type != DATA_PATH_PROXY_NET_WLAN &&
net_type != DATA_PATH_PROXY_NET_WWAN &&
net_type != DATA_PATH_PROXY_NET_LBO) {
GENL_SET_ERR_MSG(info, "Network type not supported!");
return -EINVAL;
}
if(net_type == DATA_PATH_PROXY_NET_WWAN) {
err = rmnet_wwan_unset_device(nla_data(nla), info);
} else {
err = rmnet_wlan_unset_device(nla_data(nla), info);
}
if (err)
GENL_SET_ERR_MSG(info,
"Kernel error, unregistering notifier failed");
return err;
}
static int rmnet_wlan_genl_add_fwd_info(struct sk_buff *skb,
struct genl_info *info)
{
struct rmnet_wlan_fwd_info fwd_info = {};
struct nlattr *nla;
char *addr_str;
int err;
/* Must provide the address and device to forward to */
if (!info->attrs[RMNET_WLAN_GENL_ATTR_FWD_ADDR] ||
!info->attrs[RMNET_WLAN_GENL_ATTR_FWD_DEV] ||
!info->attrs[RMNET_WLAN_GENL_ATTR_NET_TYPE]) {
GENL_SET_ERR_MSG(info,
"Must specify FWD device, address, and network");
return -EINVAL;
}
fwd_info.net_type = nla_get_u8(info->attrs[RMNET_WLAN_GENL_ATTR_NET_TYPE]);
nla = info->attrs[RMNET_WLAN_GENL_ATTR_FWD_ADDR];
addr_str = nla_data(nla);
if (strnchr(addr_str, nla_len(nla), RMNET_WLAN_CHAR_COLON)) {
if (in6_pton(addr_str, nla_len(nla),
fwd_info.v6_addr.s6_addr, -1, NULL) != 1) {
GENL_SET_ERR_MSG(info,
"FWD address is invalid in IPv6");
return -EINVAL;
}
fwd_info.ip_proto = 6;
} else {
if (in4_pton(addr_str, nla_len(nla),
(u8 *)&fwd_info.v4_addr, -1, NULL) != 1) {
GENL_SET_ERR_MSG(info,
"FWD address is invalid in IPv4");
return -EINVAL;
}
fwd_info.ip_proto = 4;
}
nla = info->attrs[RMNET_WLAN_GENL_ATTR_FWD_DEV];
fwd_info.fwd_dev = dev_get_by_name(genl_info_net(info), nla_data(nla));
if (!fwd_info.fwd_dev) {
GENL_SET_ERR_MSG(info, "Invalid FWD device name");
return -EINVAL;
}
err = rmnet_wlan_add_fwd_info(&fwd_info, info);
dev_put(fwd_info.fwd_dev);
return err;
}
static int rmnet_wlan_genl_del_fwd_info(struct sk_buff *skb,
struct genl_info *info)
{
struct rmnet_wlan_fwd_info fwd_info = {};
struct nlattr *nla;
char *addr_str;
int err;
/* Must provide the address and device to forward to */
if (!info->attrs[RMNET_WLAN_GENL_ATTR_FWD_ADDR] ||
!info->attrs[RMNET_WLAN_GENL_ATTR_FWD_DEV] ||
!info->attrs[RMNET_WLAN_GENL_ATTR_NET_TYPE]) {
GENL_SET_ERR_MSG(info,
"Must specify FWD device and address");
return -EINVAL;
}
fwd_info.net_type = nla_get_u8(info->attrs[RMNET_WLAN_GENL_ATTR_NET_TYPE]);
nla = info->attrs[RMNET_WLAN_GENL_ATTR_FWD_ADDR];
addr_str = nla_data(nla);
if (strnchr(addr_str, nla_len(nla), RMNET_WLAN_CHAR_COLON)) {
if (in6_pton(addr_str, nla_len(nla),
fwd_info.v6_addr.s6_addr, -1, NULL) != 1) {
GENL_SET_ERR_MSG(info,
"FWD address is invalid in IPv6");
return -EINVAL;
}
fwd_info.ip_proto = 6;
} else {
if (in4_pton(addr_str, nla_len(nla),
(u8 *)&fwd_info.v4_addr, -1, NULL) != 1) {
GENL_SET_ERR_MSG(info,
"FWD address is invalid in IPv4");
return -EINVAL;
}
fwd_info.ip_proto = 4;
}
nla = info->attrs[RMNET_WLAN_GENL_ATTR_FWD_DEV];
fwd_info.fwd_dev = dev_get_by_name(genl_info_net(info), nla_data(nla));
if (!fwd_info.fwd_dev) {
GENL_SET_ERR_MSG(info, "Invalid FWD device name");
return -EINVAL;
}
err = rmnet_wlan_del_fwd_info(&fwd_info, info);
dev_put(fwd_info.fwd_dev);
return err;
}
static int rmnet_wlan_genl_set_encap_port(struct sk_buff *skb,
struct genl_info *info)
{
struct nlattr *nla;
nla = info->attrs[RMNET_WLAN_GENL_ATTR_ENCAP_PORT];
if (!nla) {
GENL_SET_ERR_MSG(info, "Must specify encap port");
return -EINVAL;
}
return rmnet_wlan_set_encap_port(nla_get_be16(nla), info);
}
static int rmnet_wlan_genl_unset_encap_port(struct sk_buff *skb,
struct genl_info *info)
{
struct nlattr *nla;
nla = info->attrs[RMNET_WLAN_GENL_ATTR_ENCAP_PORT];
if (!nla) {
GENL_SET_ERR_MSG(info, "Must specify encap port");
return -EINVAL;
}
return rmnet_wlan_unset_encap_port(nla_get_be16(nla), info);
}
static int rmnet_wlan_genl_act_encap_port_pass_through(struct sk_buff *skb,
struct genl_info *info)
{
struct nlattr *nla;
nla = info->attrs[RMNET_WLAN_GENL_ATTR_ENCAP_PORT];
if (!nla) {
GENL_SET_ERR_MSG(info, "Must specify encap port");
return -EINVAL;
}
return rmnet_wlan_act_encap_port_pass_through(nla_get_be16(nla), info);
}
static int rmnet_wlan_genl_act_encap_port_drop(struct sk_buff *skb,
struct genl_info *info)
{
struct nlattr *nla;
nla = info->attrs[RMNET_WLAN_GENL_ATTR_ENCAP_PORT];
if (!nla) {
GENL_SET_ERR_MSG(info, "Must specify encap port");
return -EINVAL;
}
return rmnet_wlan_act_encap_port_drop(nla_get_be16(nla), info);
}
static int rmnet_wlan_genl_reset(struct sk_buff *skb, struct genl_info *info)
{
(void)skb;
(void)info;
rmnet_wlan_reset();
return 0;
}
static int rmnet_wlan_genl_add_ll_tuple(struct sk_buff *skb,
struct genl_info *info)
{
struct rmnet_wlan_ll_tuple tuple = {};
struct nlattr *nla;
char *addr_str;
/* Must provide the saddr, daddr, sport, dport */
if (!info->attrs[RMNET_WLAN_GENL_ATTR_LL_SRC_ADDR] ||
!info->attrs[RMNET_WLAN_GENL_ATTR_LL_DST_ADDR] ||
!info->attrs[RMNET_WLAN_GENL_ATTR_LL_SRC_PORT] ||
!info->attrs[RMNET_WLAN_GENL_ATTR_LL_DST_PORT]) {
GENL_SET_ERR_MSG(info,
"Must specify FWD device and address");
return -EINVAL;
}
/* Set SRC address and set IPv4 or IPv6 Protocol */
nla = info->attrs[RMNET_WLAN_GENL_ATTR_LL_SRC_ADDR];
addr_str = nla_data(nla);
if (strnchr(addr_str, nla_len(nla), RMNET_WLAN_CHAR_COLON)) {
if (in6_pton(addr_str, nla_len(nla),
tuple.v6_saddr.s6_addr, -1, NULL) != 1) {
GENL_SET_ERR_MSG(info,
"SRC address is invalid in IPv6");
return -EINVAL;
}
tuple.ip_proto = 6;
} else {
if (in4_pton(addr_str, nla_len(nla),
(u8 *)&tuple.v4_saddr, -1, NULL) != 1) {
GENL_SET_ERR_MSG(info,
"SRC address is invalid in IPv4");
return -EINVAL;
}
tuple.ip_proto = 4;
}
/* Set DST address */
nla = info->attrs[RMNET_WLAN_GENL_ATTR_LL_DST_ADDR];
addr_str = nla_data(nla);
if (strnchr(addr_str, nla_len(nla), RMNET_WLAN_CHAR_COLON)) {
if (in6_pton(addr_str, nla_len(nla),
tuple.v6_daddr.s6_addr, -1, NULL) != 1) {
GENL_SET_ERR_MSG(info,
"DST address is invalid in IPv6");
return -EINVAL;
}
} else {
if (in4_pton(addr_str, nla_len(nla),
(u8 *)&tuple.v4_daddr, -1, NULL) != 1) {
GENL_SET_ERR_MSG(info,
"DST address is invalid in IPv4");
return -EINVAL;
}
}
/* Set Source and Destination Port */
nla = info->attrs[RMNET_WLAN_GENL_ATTR_LL_SRC_PORT];
tuple.sport = nla_get_be16(nla);
nla = info->attrs[RMNET_WLAN_GENL_ATTR_LL_DST_PORT];
tuple.dport = nla_get_be16(nla);
rmnet_wlan_add_ll_tuple(&tuple);
return 0;
}
static int rmnet_wlan_genl_del_ll_tuple(struct sk_buff *skb,
struct genl_info *info)
{
(void)skb;
(void)info;
rmnet_wlan_del_ll_tuple();
return 0;
}
static int rmnet_wlan_genl_get_tuple(struct sk_buff *skb,
struct genl_info *info)
{
struct sk_buff *skb_out = NULL;
int err = 0;
/* Create a buffer and write the internal tuples */
err = rmnet_wlan_get_tuples(&skb_out, &rmnet_wlan_genl_family, info);
if (err)
goto out;
if (!skb_out) {
err = -EINVAL;
goto out;
}
genlmsg_reply(skb_out, info);
out:
return err;
}
static const struct genl_ops rmnet_wlan_genl_ops[] = {
RMNET_WLAN_GENL_OP(RMNET_WLAN_GENL_CMD_ADD_TUPLES,
rmnet_wlan_genl_add_tuples),
RMNET_WLAN_GENL_OP(RMNET_WLAN_GENL_CMD_DEL_TUPLES,
rmnet_wlan_genl_del_tuples),
RMNET_WLAN_GENL_OP(RMNET_WLAN_GENL_CMD_SET_DEV,
rmnet_wlan_genl_set_device),
RMNET_WLAN_GENL_OP(RMNET_WLAN_GENL_CMD_UNSET_DEV,
rmnet_wlan_genl_unset_device),
RMNET_WLAN_GENL_OP(RMNET_WLAN_GENL_CMD_ADD_FWD_INFO,
rmnet_wlan_genl_add_fwd_info),
RMNET_WLAN_GENL_OP(RMNET_WLAN_GENL_CMD_DEL_FWD_INFO,
rmnet_wlan_genl_del_fwd_info),
RMNET_WLAN_GENL_OP(RMNET_WLAN_GENL_CMD_SET_ENCAP_PORT,
rmnet_wlan_genl_set_encap_port),
RMNET_WLAN_GENL_OP(RMNET_WLAN_GENL_CMD_UNSET_ENCAP_PORT,
rmnet_wlan_genl_unset_encap_port),
RMNET_WLAN_GENL_OP(RMNET_WLAN_GENL_CMD_RESET,
rmnet_wlan_genl_reset),
RMNET_WLAN_GENL_OP(RMNET_WLAN_GENL_CMD_ENCAP_PORT_ACT_PASS_THROUGH,
rmnet_wlan_genl_act_encap_port_pass_through),
RMNET_WLAN_GENL_OP(RMNET_WLAN_GENL_CMD_ENCAP_PORT_ACT_DROP,
rmnet_wlan_genl_act_encap_port_drop),
RMNET_WLAN_GENL_OP(RMNET_WLAN_GENL_CMD_LL_ADDR_ADD,
rmnet_wlan_genl_add_ll_tuple),
RMNET_WLAN_GENL_OP(RMNET_WLAN_GENL_CMD_LL_ADDR_DEL,
rmnet_wlan_genl_del_ll_tuple),
RMNET_WLAN_GENL_OP(RMNET_WLAN_GENL_CMD_GET_TUPLES,
rmnet_wlan_genl_get_tuple),
};
static struct genl_family rmnet_wlan_genl_family = {
.name = RMNET_WLAN_GENL_FAMILY_NAME,
.version = RMNET_WLAN_GENL_VERSION,
.maxattr = RMNET_WLAN_GENL_ATTR_MAX,
.policy = rmnet_wlan_genl_attr_policy,
.ops = rmnet_wlan_genl_ops,
.n_ops = ARRAY_SIZE(rmnet_wlan_genl_ops),
};
static int __init rmnet_wlan_genl_init(void)
{
int ret = 0;
pr_info("%s(): rmnet_wlan initializing\n", __func__);
ret = genl_register_family(&rmnet_wlan_genl_family);
if (ret) {
pr_err("%s(): registering family failed: %i\n", __func__, ret);
goto err0;
}
ret = rmnet_wlan_connection_init();
if (ret) {
pr_err("%s(): connection management init failed: %i\n", __func__, ret);
goto err1;
}
ret = rmnet_wlan_fragment_init();
if (ret) {
pr_err("%s(): fragment management init failed: %i\n", __func__,
ret);
goto err2;
}
rmnet_wlan_set_hooks();
pr_info("%s(): rmnet_wlan_set_hooks set\n", __func__);
return 0;
err2:
rmnet_wlan_connection_deinit();
err1:
genl_unregister_family(&rmnet_wlan_genl_family);
err0:
return ret;
}
static void __exit rmnet_wlan_genl_exit(void)
{
int ret;
pr_info("%s(): rmnet_wlan exiting\n", __func__);
ret = rmnet_wlan_connection_deinit();
if (ret)
pr_err("%s(): connection management de-init failed: %i\n", __func__, ret);
rmnet_wlan_deinit();
ret = genl_unregister_family(&rmnet_wlan_genl_family);
if (ret)
pr_err("%s(): unregister family failed: %i\n", __func__, ret);
rmnet_wlan_unset_hooks();
pr_info("%s(): rmnet_wlan_unset_hooks unset\n", __func__);
}
MODULE_LICENSE("GPL v2");
module_init(rmnet_wlan_genl_init);
module_exit(rmnet_wlan_genl_exit);

View File

@@ -1,61 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022, 2024, Qualcomm Innovation Center, Inc. All rights reserved.
*/
/* RMNET WLAN Generic netlink */
#ifndef __RMNET_WLAN_GENL_H__
#define __RMNET_WLAN_GENL_H__
#define RMNET_WLAN_GENL_VERSION 1
#define RMNET_WLAN_GENL_FAMILY_NAME "RMNET_WLAN"
enum {
RMNET_WLAN_GENL_CMD_UNSPEC,
RMNET_WLAN_GENL_CMD_ADD_TUPLES,
RMNET_WLAN_GENL_CMD_DEL_TUPLES,
RMNET_WLAN_GENL_CMD_SET_DEV,
RMNET_WLAN_GENL_CMD_UNSET_DEV,
RMNET_WLAN_GENL_CMD_ADD_FWD_INFO,
RMNET_WLAN_GENL_CMD_DEL_FWD_INFO,
RMNET_WLAN_GENL_CMD_SET_ENCAP_PORT,
RMNET_WLAN_GENL_CMD_UNSET_ENCAP_PORT,
RMNET_WLAN_GENL_CMD_RESET,
RMNET_WLAN_GENL_CMD_ENCAP_PORT_ACT_PASS_THROUGH,
RMNET_WLAN_GENL_CMD_ENCAP_PORT_ACT_DROP,
RMNET_WLAN_GENL_CMD_LL_ADDR_ADD,
RMNET_WLAN_GENL_CMD_LL_ADDR_DEL,
RMNET_WLAN_GENL_CMD_GET_TUPLES,
__RMNET_WLAN_GENL_CMD_MAX,
};
enum {
RMNET_WLAN_GENL_ATTR_UNSPEC,
RMNET_WLAN_GENL_ATTR_TUPLES,
RMNET_WLAN_GENL_ATTR_DEV,
RMNET_WLAN_GENL_ATTR_FWD_ADDR,
RMNET_WLAN_GENL_ATTR_FWD_DEV,
RMNET_WLAN_GENL_ATTR_ENCAP_PORT,
RMNET_WLAN_GENL_ATTR_NET_TYPE,
RMNET_WLAN_GENL_ATTR_LL_SRC_ADDR,
RMNET_WLAN_GENL_ATTR_LL_DST_ADDR,
RMNET_WLAN_GENL_ATTR_LL_SRC_PORT,
RMNET_WLAN_GENL_ATTR_LL_DST_PORT,
__RMNET_WLAN_GENL_ATTR_MAX,
};
#define RMNET_WLAN_GENL_ATTR_MAX (__RMNET_WLAN_GENL_ATTR_MAX - 1)
enum {
RMNET_WLAN_GENL_TUPLE_ATTR_UNSPEC,
RMNET_WLAN_GENL_TUPLE_ATTR_TUPLE,
__RMNET_WLAN_GENL_TUPLE_ATTR_MAX,
};
#define RMNET_WLAN_GENL_TUPLE_ATTR_MAX (__RMNET_WLAN_GENL_TUPLE_ATTR_MAX - 1)
#define DATA_PATH_PROXY_NET_MAX (__DATA_PATH_PROXY_NET_MAX - 1)
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -1,28 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
/* RMNET WLAN stats framework */
#include <linux/moduleparam.h>
#include "rmnet_wlan_stats.h"
static u64 rmnet_wlan_stats[RMNET_WLAN_STAT_MAX];
module_param_array_named(rmnet_wlan_stat, rmnet_wlan_stats, ullong, NULL, 0444);
static u64 rmnet_wlan_forward_stats[RMNET_WLAN_F_S_R0_MAX];
module_param_array_named(rmnet_wlan_forward_stat, rmnet_wlan_forward_stats, ullong, NULL, 0444);
void rmnet_wlan_stats_update(u32 stat)
{
if (stat < RMNET_WLAN_STAT_MAX)
rmnet_wlan_stats[stat] += 1;
}
void rmnet_wlan_forward_stats_update(u32 stat)
{
if (stat < RMNET_WLAN_F_S_R0_MAX)
rmnet_wlan_forward_stats[stat] += 1;
}

View File

@@ -1,95 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
/* RMNET WLAN stats framework */
#ifndef __RMNET_WLAN_STATS_H__
#define __RMNET_WLAN_STATS_H__
#include <linux/types.h>
enum {
RMNET_WLAN_STAT_TUPLE_ADD,
RMNET_WLAN_STAT_TUPLE_DEL,
RMNET_WLAN_STAT_PKT_RX,
RMNET_WLAN_STAT_PKT_FWD,
RMNET_WLAN_STAT_PKT_FWD_NO_DEV,
RMNET_WLAN_STAT_FRAG_RX,
RMNET_WLAN_STAT_FRAG_FWD,
RMNET_WLAN_STAT_FRAG_FWD_NO_DEV,
RMNET_WLAN_STAT_FRAG_QUEUE,
RMNET_WLAN_STAT_FRAG_EXP,
RMNET_WLAN_STAT_ENCAP_DELIVER,
RMNET_WLAN_STAT_MSS_CLAMP,
RMNET_WLAN_STAT_MSS_CLAMP_SKIP,
RMNET_WLAN_STAT_MSS_CLAMP_ERR,
RMNET_WLAN_STAT_ENCAP_PORT_ADD,
RMNET_WLAN_STAT_ENCAP_PORT_DEL,
RMNET_WLAN_STAT_ENCAP_PORT_DROP_ADD,
RMNET_WLAN_STAT_ENCAP_PORT_DROP_DEL,
RMNET_WLAN_STAT_ENCAP_DROP,
RMNET_WWAN_STAT_PKT_RX,
RMNET_WLAN_STAT_ENCAP_HDRP_FAIL,
RMNET_WLAN_STAT_LL_TX,
RMNET_WLAN_STAT_CIWLAN_DDEV_GET_FAIL,
RMNET_WLAN_STAT_MAX,
};
/* fowarding stats */
enum {
RMNET_WLAN_F_S_R0_IP_XMIT_SUCCESS,
RMNET_WLAN_F_S_R0_IPV6_XMIT_SUCCESS,
RMNET_WLAN_F_S_R0_IP_XMIT_DROP,
RMNET_WLAN_F_S_R0_IPV6_XMIT_DROP,
RMNET_WLAN_F_S_R0_IP_HDRP_FAIL,
RMNET_WLAN_F_S_R0_IP_DEV_GET_FAIL,
RMNET_WLAN_F_S_R0_IP_ROUTE_FAIL,
RMNET_WLAN_F_S_R0_IP_NEIGH_LOOKUP_FAIL,
RMNET_WLAN_F_S_R0_IP_HARD_HEADER_FAIL,
RMNET_WLAN_F_S_R0_IPV6_HDRP_FAIL,
RMNET_WLAN_F_S_R0_IPV6_DEV_GET_FAIL,
RMNET_WLAN_F_S_R0_IPV6_ROUTE_FAIL,
RMNET_WLAN_F_S_R0_IPV6_NEIGH_LOOKUP_FAIL,
RMNET_WLAN_F_S_R0_IPV6_HARD_HEADER_FAIL,
RMNET_WLAN_F_S_NON_R0_IPV6_HDRP_FAIL,
RMNET_WLAN_F_S_NON_R0_IPV6_DST_LOOKUP_FAIL,
RMNET_WLAN_F_S_NON_R0_IPV6_XFRM_LOOKUP_FAIL,
RMNET_WLAN_F_S_NON_R0_IPV6_DST_OUTPUT_SUCCESS,
RMNET_WLAN_F_S_NON_R0_IP_HDRP_FAIL,
RMNET_WLAN_F_S_NON_R0_IP_RT_LOOKUP_FAIL,
RMNET_WLAN_F_S_NON_R0_IP_XFRM_LOOKUP_FAIL,
RMNET_WLAN_F_S_NON_R0_IP_DST_OUTPUT_SUCCESS,
RMNET_WLAN_F_S_R0_IPV6_DDEV_GET_FAIL,
RMNET_WLAN_F_S_NON_R0_IPV6_DDST_LOOKUP_FAIL,
RMNET_WLAN_F_S_R0_IP_DDEV_GET_FAIL,
RMNET_WLAN_F_S_NON_R0_IP_DRT_LOOKUP_FAIL,
RMNET_WWAN_F_S_NON_R0_IPV6_HDRP_FAIL,
RMNET_WWAN_F_S_NON_R0_IPV6_DST_LOOKUP_FAIL,
RMNET_WWAN_F_S_NON_R0_IPV6_XFRM_LOOKUP_FAIL,
RMNET_WWAN_F_S_NON_R0_IPV6_DST_OUTPUT_SUCCESS,
RMNET_WWAN_F_S_R0_IPV6_DDEV_GET_FAIL,
RMNET_WWAN_F_S_NON_R0_IPV6_DDST_LOOKUP_FAIL,
RMNET_WWAN_F_S_NON_R0_IP_HDRP_FAIL,
RMNET_WWAN_F_S_NON_R0_IP_RT_LOOKUP_FAIL,
RMNET_WWAN_F_S_NON_R0_IP_XFRM_LOOKUP_FAIL,
RMNET_WWAN_F_S_NON_R0_IP_DST_OUTPUT_SUCCESS,
RMNET_WWAN_F_S_R0_IP_DDEV_GET_FAIL,
RMNET_WWAN_F_S_NON_R0_IP_DRT_LOOKUP_FAIL,
RMNET_WWAN_F_S_R0_IP_DEV_GET_FAIL,
RMNET_WWAN_F_S_R0_IPV6_DEV_GET_FAIL,
RMNET_WWAN_F_S_R0_IP_ROUTE_FAIL,
RMNET_WWAN_F_S_R0_IP_HDRP_FAIL,
RMNET_WLAN_F_S_NON_R0_IP_DEV_GET_FAIL,
RMNET_F_S_PULL_PROTO_MISMATCH,
RMNET_F_S_PULL_SUCCESS,
RMNET_F_S_PULL_FAILURE,
RMNET_WLAN_F_S_R0_MAX,
};
void rmnet_wlan_stats_update(u32 stat);
void rmnet_wlan_forward_stats_update(u32 stat);
#endif

View File

@@ -6,7 +6,6 @@ RMNET_WLAN_DLKM_PLATFORMS_LIST := pineapple
RMNET_WLAN_DLKM_PLATFORMS_LIST += sun
RMNET_WLAN_DLKM_PLATFORMS_LIST += parrot
RMNET_WLAN_DLKM_PLATFORMS_LIST += monaco
RMNET_WLAN_DLKM_PLATFORMS_LIST += tuna
ifeq ($(call is-board-platform-in-list, $(RMNET_WLAN_DLKM_PLATFORMS_LIST)),true)
LOCAL_PATH := $(call my-dir)

View File

@@ -12,6 +12,3 @@ define_wlan("parrot", "perf")
define_wlan("monaco", "consolidate")
define_wlan("monaco", "perf")
define_wlan("tuna", "consolidate")
define_wlan("tuna", "perf")

View File

@@ -167,68 +167,60 @@ DATARMNET63b1a086d5->DATARMNET94b1f9ee09,DATARMNETfb0677cc3c);
DATARMNET61c2303133=(0xd2d+202-0xdf7);DATARMNET5ca94dbc3c(DATARMNETe75ad1a949);
goto DATARMNETbf4095f79e;}if(DATARMNET3396919a68->DATARMNET4924e79411==
IPPROTO_TCP||DATARMNET3396919a68->DATARMNET4924e79411==IPPROTO_UDP){struct
udphdr*DATARMNETd115b38943,DATARMNETbd022e3f5b;DATARMNETd115b38943=
skb_header_pointer(DATARMNET543491eb0f,DATARMNET54338da2ff->DATARMNET611d08d671,
sizeof(*DATARMNETd115b38943),&DATARMNETbd022e3f5b);if(!DATARMNETd115b38943){
DATARMNET5ca94dbc3c(DATARMNETf1f7e2c408);goto DATARMNETbf4095f79e;}
DATARMNET3396919a68->DATARMNETf0d9de7e2f=DATARMNETd115b38943->dest;if(
DATARMNETa8b2566e6a(DATARMNET543491eb0f,DATARMNET3396919a68,DATARMNET54338da2ff
->DATARMNET611d08d671)){if(DATARMNET0a4704e5e0(DATARMNET3396919a68)){kfree_skb(
DATARMNET543491eb0f);DATARMNET61c2303133=(0xd2d+202-0xdf7);DATARMNET5ca94dbc3c(
DATARMNET0981317411);goto DATARMNETbf4095f79e;}DATARMNET5ca94dbc3c(
DATARMNETd1ad664d00);goto DATARMNET07fc49caf2;}}else if(DATARMNET3396919a68->
DATARMNET4924e79411==IPPROTO_ESP){struct ip_esp_hdr*DATARMNET568b3d4b19,
DATARMNET3f4e206745;DATARMNET568b3d4b19=skb_header_pointer(DATARMNET543491eb0f,
DATARMNET54338da2ff->DATARMNET611d08d671,sizeof(*DATARMNET568b3d4b19),&
DATARMNET3f4e206745);if(!DATARMNET568b3d4b19){DATARMNET5ca94dbc3c(
DATARMNETf1f7e2c408);goto DATARMNETbf4095f79e;}DATARMNET3396919a68->
DATARMNET906b2ee561=DATARMNET568b3d4b19->spi;}if(DATARMNET4eafcdee07(
DATARMNET3396919a68)){u32 DATARMNET248f120dd5;DATARMNET63b1a086d5->
DATARMNET7ed5754a5c=DATARMNET2d4b4cfc9e;if(!DATARMNET4899053671(
DATARMNET543491eb0f,DATARMNET2d4b4cfc9e)){DATARMNET248f120dd5=
DATARMNET7a58a5c1fc;DATARMNET61c2303133=(0xd2d+202-0xdf7);}else{
DATARMNET248f120dd5=DATARMNETba232077da;}DATARMNET5ca94dbc3c(DATARMNET248f120dd5
);DATARMNETc7c83f614f(DATARMNET63b1a086d5,true);goto DATARMNETbf4095f79e;}
DATARMNET07fc49caf2:DATARMNET63b1a086d5->DATARMNET7ed5754a5c=ERR_PTR(-EINVAL);
DATARMNETc7c83f614f(DATARMNET63b1a086d5,true);DATARMNETbf4095f79e:if(
DATARMNET61c2303133)DATARMNETa9599f487c->tskey=(0xd2d+202-0xdf7);return
DATARMNET61c2303133;}int DATARMNET579f75aa50(struct sk_buff*DATARMNET543491eb0f,
int DATARMNET611d08d671,struct DATARMNETb89ecedefc*DATARMNET3396919a68,struct
DATARMNET8d3c2559ca*DATARMNET2d4b4cfc9e)__must_hold(RCU){struct
DATARMNETdadb4e2c65 DATARMNET54338da2ff={};struct iphdr*DATARMNET86f1f2cdc9=
ip_hdr(DATARMNET543491eb0f);if(DATARMNET3396919a68->DATARMNET4924e79411!=
IPPROTO_TCP&&DATARMNET3396919a68->DATARMNET4924e79411!=IPPROTO_UDP&&
DATARMNET3396919a68->DATARMNET4924e79411!=IPPROTO_ESP)return-(0xd26+209-0xdf6);
DATARMNET54338da2ff.DATARMNET0d956cc77a=(0xd11+230-0xdf3);DATARMNET54338da2ff.
DATARMNETdfe430c2d6=DATARMNET86f1f2cdc9->saddr;DATARMNET54338da2ff.
DATARMNET2cb607d686=DATARMNET86f1f2cdc9->daddr;DATARMNET54338da2ff.id=htonl((u32
)ntohs(DATARMNET86f1f2cdc9->id));DATARMNET54338da2ff.DATARMNETb65c469a15=htons(
DATARMNET86f1f2cdc9->frag_off)&IP_OFFSET;DATARMNET54338da2ff.DATARMNET611d08d671
=(u16)DATARMNET611d08d671;return DATARMNET9d6ad3b16f(DATARMNET543491eb0f,
DATARMNET3396919a68,&DATARMNET54338da2ff,DATARMNET2d4b4cfc9e);}int
DATARMNETaca8ca54ed(struct sk_buff*DATARMNET543491eb0f,int DATARMNET611d08d671,
struct DATARMNETb89ecedefc*DATARMNET3396919a68,struct DATARMNET8d3c2559ca*
DATARMNET2d4b4cfc9e)__must_hold(RCU){struct DATARMNETdadb4e2c65
DATARMNET54338da2ff={};struct ipv6hdr*DATARMNETbf55123e5b=ipv6_hdr(
DATARMNET543491eb0f);struct frag_hdr*DATARMNET2d5a34898d,DATARMNET1879b194d6;
udphdr*DATARMNET75be5f3406=(struct udphdr*)(DATARMNET543491eb0f->data+
DATARMNET54338da2ff->DATARMNET611d08d671);DATARMNET3396919a68->
DATARMNETf0d9de7e2f=DATARMNET75be5f3406->dest;if(DATARMNETa8b2566e6a(
DATARMNET543491eb0f,DATARMNET3396919a68,DATARMNET54338da2ff->DATARMNET611d08d671
)){if(DATARMNET0a4704e5e0(DATARMNET3396919a68)){kfree_skb(DATARMNET543491eb0f);
DATARMNET61c2303133=(0xd2d+202-0xdf7);DATARMNET5ca94dbc3c(DATARMNET0981317411);
goto DATARMNETbf4095f79e;}DATARMNET5ca94dbc3c(DATARMNETd1ad664d00);goto
DATARMNET07fc49caf2;}}else if(DATARMNET3396919a68->DATARMNET4924e79411==
IPPROTO_ESP){struct ip_esp_hdr*DATARMNET73ee6a7020=(struct ip_esp_hdr*)(
DATARMNET543491eb0f->data+DATARMNET54338da2ff->DATARMNET611d08d671);
DATARMNET3396919a68->DATARMNET906b2ee561=DATARMNET73ee6a7020->spi;}if(
DATARMNET4eafcdee07(DATARMNET3396919a68)){u32 DATARMNET248f120dd5;
DATARMNET63b1a086d5->DATARMNET7ed5754a5c=DATARMNET2d4b4cfc9e;if(!
DATARMNET4899053671(DATARMNET543491eb0f,DATARMNET2d4b4cfc9e)){
DATARMNET248f120dd5=DATARMNET7a58a5c1fc;DATARMNET61c2303133=(0xd2d+202-0xdf7);}
else{DATARMNET248f120dd5=DATARMNETba232077da;}DATARMNET5ca94dbc3c(
DATARMNET248f120dd5);DATARMNETc7c83f614f(DATARMNET63b1a086d5,true);goto
DATARMNETbf4095f79e;}DATARMNET07fc49caf2:DATARMNET63b1a086d5->
DATARMNET7ed5754a5c=ERR_PTR(-EINVAL);DATARMNETc7c83f614f(DATARMNET63b1a086d5,
true);DATARMNETbf4095f79e:if(DATARMNET61c2303133)DATARMNETa9599f487c->tskey=
(0xd2d+202-0xdf7);return DATARMNET61c2303133;}int DATARMNET579f75aa50(struct
sk_buff*DATARMNET543491eb0f,int DATARMNET611d08d671,struct DATARMNETb89ecedefc*
DATARMNET3396919a68,struct DATARMNET8d3c2559ca*DATARMNET2d4b4cfc9e)__must_hold(
RCU){struct DATARMNETdadb4e2c65 DATARMNET54338da2ff={};struct iphdr*
DATARMNET86f1f2cdc9=ip_hdr(DATARMNET543491eb0f);if(DATARMNET3396919a68->
DATARMNET4924e79411!=IPPROTO_TCP&&DATARMNET3396919a68->DATARMNET4924e79411!=
IPPROTO_UDP&&DATARMNET3396919a68->DATARMNET4924e79411!=IPPROTO_ESP)return-
(0xd26+209-0xdf6);DATARMNET54338da2ff.DATARMNET0d956cc77a=(0xd11+230-0xdf3);
DATARMNET54338da2ff.DATARMNETdfe430c2d6=DATARMNET86f1f2cdc9->saddr;
DATARMNET54338da2ff.DATARMNET2cb607d686=DATARMNET86f1f2cdc9->daddr;
DATARMNET54338da2ff.id=htonl((u32)ntohs(DATARMNET86f1f2cdc9->id));
DATARMNET54338da2ff.DATARMNETb65c469a15=htons(DATARMNET86f1f2cdc9->frag_off)&
IP_OFFSET;DATARMNET54338da2ff.DATARMNET611d08d671=(u16)DATARMNET611d08d671;
return DATARMNET9d6ad3b16f(DATARMNET543491eb0f,DATARMNET3396919a68,&
DATARMNET54338da2ff,DATARMNET2d4b4cfc9e);}int DATARMNETaca8ca54ed(struct sk_buff
*DATARMNET543491eb0f,int DATARMNET611d08d671,struct DATARMNETb89ecedefc*
DATARMNET3396919a68,struct DATARMNET8d3c2559ca*DATARMNET2d4b4cfc9e)__must_hold(
RCU){struct DATARMNETdadb4e2c65 DATARMNET54338da2ff={};struct ipv6hdr*
DATARMNETbf55123e5b=ipv6_hdr(DATARMNET543491eb0f);struct frag_hdr*frag_hdr;
unsigned int DATARMNET7b34b7b5be;if(DATARMNET3396919a68->DATARMNET4924e79411!=
IPPROTO_TCP&&DATARMNET3396919a68->DATARMNET4924e79411!=IPPROTO_UDP&&
DATARMNET3396919a68->DATARMNET4924e79411!=IPPROTO_ESP&&DATARMNET3396919a68->
DATARMNET4924e79411!=NEXTHDR_FRAGMENT)return-(0xd26+209-0xdf6);if(
DATARMNET24669a931d(DATARMNET543491eb0f,&DATARMNET7b34b7b5be,NEXTHDR_FRAGMENT,
NULL,NULL)<(0xd2d+202-0xdf7))return-(0xd26+209-0xdf6);DATARMNET2d5a34898d=
skb_header_pointer(DATARMNET543491eb0f,DATARMNET7b34b7b5be,sizeof(*
DATARMNET2d5a34898d),&DATARMNET1879b194d6);if(!DATARMNET2d5a34898d){
DATARMNET5ca94dbc3c(DATARMNETf1f7e2c408);return-(0xd26+209-0xdf6);}
DATARMNET54338da2ff.DATARMNET0d956cc77a=(0xd03+244-0xdf1);memcpy(&
DATARMNET54338da2ff.DATARMNET815cbb4bf5,&DATARMNETbf55123e5b->saddr,sizeof(
DATARMNETbf55123e5b->saddr));memcpy(&DATARMNET54338da2ff.DATARMNETc3f31215b7,&
DATARMNETbf55123e5b->daddr,sizeof(DATARMNETbf55123e5b->daddr));
DATARMNET54338da2ff.id=DATARMNET2d5a34898d->identification;DATARMNET54338da2ff.
DATARMNETb65c469a15=htons(DATARMNET2d5a34898d->frag_off)&IP6_OFFSET;
DATARMNET54338da2ff.DATARMNET611d08d671=(u16)DATARMNET611d08d671;if(
NULL,NULL)<(0xd2d+202-0xdf7))return-(0xd26+209-0xdf6);frag_hdr=(struct frag_hdr*
)(DATARMNET543491eb0f->data+DATARMNET7b34b7b5be);DATARMNET54338da2ff.
DATARMNET0d956cc77a=(0xd03+244-0xdf1);memcpy(&DATARMNET54338da2ff.
DATARMNET815cbb4bf5,&DATARMNETbf55123e5b->saddr,sizeof(DATARMNETbf55123e5b->
saddr));memcpy(&DATARMNET54338da2ff.DATARMNETc3f31215b7,&DATARMNETbf55123e5b->
daddr,sizeof(DATARMNETbf55123e5b->daddr));DATARMNET54338da2ff.id=frag_hdr->
identification;DATARMNET54338da2ff.DATARMNETb65c469a15=htons(frag_hdr->frag_off)
&IP6_OFFSET;DATARMNET54338da2ff.DATARMNET611d08d671=(u16)DATARMNET611d08d671;if(
DATARMNET3396919a68->DATARMNET4924e79411==NEXTHDR_FRAGMENT)DATARMNET54338da2ff.
DATARMNET611d08d671+=sizeof(*DATARMNET2d5a34898d);return DATARMNET9d6ad3b16f(
DATARMNET611d08d671+=sizeof(*frag_hdr);return DATARMNET9d6ad3b16f(
DATARMNET543491eb0f,DATARMNET3396919a68,&DATARMNET54338da2ff,DATARMNET2d4b4cfc9e
);}int DATARMNET49c2c17e77(void){INIT_DELAYED_WORK(&DATARMNETa41953a37b.
DATARMNET190b4452e8,DATARMNETad2b7fd8f5);return(0xd2d+202-0xdf7);}void

View File

@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*
* RMNET WLAN Generic Netlink
*
@@ -139,11 +139,10 @@ DATARMNET54338da2ff,
);return-EINVAL;}DATARMNET2d4b4cfc9e.DATARMNET9954a624ac=nla_get_u8(
DATARMNET54338da2ff->attrs[DATARMNET149cafb1b7]);DATARMNETef7cdd7b6b=
DATARMNET54338da2ff->attrs[DATARMNETea4b56dc2b];DATARMNETd7f4d7c495=nla_data(
DATARMNETef7cdd7b6b);if(strnchr(DATARMNETd7f4d7c495,nla_len(DATARMNETef7cdd7b6b)
,DATARMNET5fe2c6571f)){if(in6_pton(DATARMNETd7f4d7c495,nla_len(
DATARMNETef7cdd7b6b),DATARMNET2d4b4cfc9e.DATARMNET5700daac01.s6_addr,-
(0xd26+209-0xdf6),NULL)!=(0xd26+209-0xdf6)){GENL_SET_ERR_MSG(DATARMNET54338da2ff
,
DATARMNETef7cdd7b6b);if(strchr(DATARMNETd7f4d7c495,DATARMNET5fe2c6571f)){if(
in6_pton(DATARMNETd7f4d7c495,nla_len(DATARMNETef7cdd7b6b),DATARMNET2d4b4cfc9e.
DATARMNET5700daac01.s6_addr,-(0xd26+209-0xdf6),NULL)!=(0xd26+209-0xdf6)){
GENL_SET_ERR_MSG(DATARMNET54338da2ff,
"\x46\x57\x44\x20\x61\x64\x64\x72\x65\x73\x73\x20\x69\x73\x20\x69\x6e\x76\x61\x6c\x69\x64\x20\x69\x6e\x20\x49\x50\x76\x36"
);return-EINVAL;}DATARMNET2d4b4cfc9e.DATARMNET0d956cc77a=(0xd03+244-0xdf1);}else
{if(in4_pton(DATARMNETd7f4d7c495,nla_len(DATARMNETef7cdd7b6b),(u8*)&
@@ -169,11 +168,10 @@ DATARMNET54338da2ff,
);return-EINVAL;}DATARMNET2d4b4cfc9e.DATARMNET9954a624ac=nla_get_u8(
DATARMNET54338da2ff->attrs[DATARMNET149cafb1b7]);DATARMNETef7cdd7b6b=
DATARMNET54338da2ff->attrs[DATARMNETea4b56dc2b];DATARMNETd7f4d7c495=nla_data(
DATARMNETef7cdd7b6b);if(strnchr(DATARMNETd7f4d7c495,nla_len(DATARMNETef7cdd7b6b)
,DATARMNET5fe2c6571f)){if(in6_pton(DATARMNETd7f4d7c495,nla_len(
DATARMNETef7cdd7b6b),DATARMNET2d4b4cfc9e.DATARMNET5700daac01.s6_addr,-
(0xd26+209-0xdf6),NULL)!=(0xd26+209-0xdf6)){GENL_SET_ERR_MSG(DATARMNET54338da2ff
,
DATARMNETef7cdd7b6b);if(strchr(DATARMNETd7f4d7c495,DATARMNET5fe2c6571f)){if(
in6_pton(DATARMNETd7f4d7c495,nla_len(DATARMNETef7cdd7b6b),DATARMNET2d4b4cfc9e.
DATARMNET5700daac01.s6_addr,-(0xd26+209-0xdf6),NULL)!=(0xd26+209-0xdf6)){
GENL_SET_ERR_MSG(DATARMNET54338da2ff,
"\x46\x57\x44\x20\x61\x64\x64\x72\x65\x73\x73\x20\x69\x73\x20\x69\x6e\x76\x61\x6c\x69\x64\x20\x69\x6e\x20\x49\x50\x76\x36"
);return-EINVAL;}DATARMNET2d4b4cfc9e.DATARMNET0d956cc77a=(0xd03+244-0xdf1);}else
{if(in4_pton(DATARMNETd7f4d7c495,nla_len(DATARMNETef7cdd7b6b),(u8*)&
@@ -228,11 +226,11 @@ DATARMNETd3dacf7559]||!DATARMNET54338da2ff->attrs[DATARMNETeaebe95912]){
GENL_SET_ERR_MSG(DATARMNET54338da2ff,
"\x4d\x75\x73\x74\x20\x73\x70\x65\x63\x69\x66\x79\x20\x46\x57\x44\x20\x64\x65\x76\x69\x63\x65\x20\x61\x6e\x64\x20\x61\x64\x64\x72\x65\x73\x73"
);return-EINVAL;}DATARMNETef7cdd7b6b=DATARMNET54338da2ff->attrs[
DATARMNET185fd3de68];DATARMNETd7f4d7c495=nla_data(DATARMNETef7cdd7b6b);if(
strnchr(DATARMNETd7f4d7c495,nla_len(DATARMNETef7cdd7b6b),DATARMNET5fe2c6571f)){
if(in6_pton(DATARMNETd7f4d7c495,nla_len(DATARMNETef7cdd7b6b),DATARMNET3396919a68
.DATARMNET815cbb4bf5.s6_addr,-(0xd26+209-0xdf6),NULL)!=(0xd26+209-0xdf6)){
GENL_SET_ERR_MSG(DATARMNET54338da2ff,
DATARMNET185fd3de68];DATARMNETd7f4d7c495=nla_data(DATARMNETef7cdd7b6b);if(strchr
(DATARMNETd7f4d7c495,DATARMNET5fe2c6571f)){if(in6_pton(DATARMNETd7f4d7c495,
nla_len(DATARMNETef7cdd7b6b),DATARMNET3396919a68.DATARMNET815cbb4bf5.s6_addr,-
(0xd26+209-0xdf6),NULL)!=(0xd26+209-0xdf6)){GENL_SET_ERR_MSG(DATARMNET54338da2ff
,
"\x53\x52\x43\x20\x61\x64\x64\x72\x65\x73\x73\x20\x69\x73\x20\x69\x6e\x76\x61\x6c\x69\x64\x20\x69\x6e\x20\x49\x50\x76\x36"
);return-EINVAL;}DATARMNET3396919a68.DATARMNET0d956cc77a=(0xd03+244-0xdf1);}else
{if(in4_pton(DATARMNETd7f4d7c495,nla_len(DATARMNETef7cdd7b6b),(u8*)&
@@ -241,11 +239,11 @@ DATARMNET3396919a68.DATARMNETdfe430c2d6,-(0xd26+209-0xdf6),NULL)!=
"\x53\x52\x43\x20\x61\x64\x64\x72\x65\x73\x73\x20\x69\x73\x20\x69\x6e\x76\x61\x6c\x69\x64\x20\x69\x6e\x20\x49\x50\x76\x34"
);return-EINVAL;}DATARMNET3396919a68.DATARMNET0d956cc77a=(0xd11+230-0xdf3);}
DATARMNETef7cdd7b6b=DATARMNET54338da2ff->attrs[DATARMNETd996a18fa6];
DATARMNETd7f4d7c495=nla_data(DATARMNETef7cdd7b6b);if(strnchr(DATARMNETd7f4d7c495
,nla_len(DATARMNETef7cdd7b6b),DATARMNET5fe2c6571f)){if(in6_pton(
DATARMNETd7f4d7c495,nla_len(DATARMNETef7cdd7b6b),DATARMNET3396919a68.
DATARMNETc3f31215b7.s6_addr,-(0xd26+209-0xdf6),NULL)!=(0xd26+209-0xdf6)){
GENL_SET_ERR_MSG(DATARMNET54338da2ff,
DATARMNETd7f4d7c495=nla_data(DATARMNETef7cdd7b6b);if(strchr(DATARMNETd7f4d7c495,
DATARMNET5fe2c6571f)){if(in6_pton(DATARMNETd7f4d7c495,nla_len(
DATARMNETef7cdd7b6b),DATARMNET3396919a68.DATARMNETc3f31215b7.s6_addr,-
(0xd26+209-0xdf6),NULL)!=(0xd26+209-0xdf6)){GENL_SET_ERR_MSG(DATARMNET54338da2ff
,
"\x44\x53\x54\x20\x61\x64\x64\x72\x65\x73\x73\x20\x69\x73\x20\x69\x6e\x76\x61\x6c\x69\x64\x20\x69\x6e\x20\x49\x50\x76\x36"
);return-EINVAL;}}else{if(in4_pton(DATARMNETd7f4d7c495,nla_len(
DATARMNETef7cdd7b6b),(u8*)&DATARMNET3396919a68.DATARMNET2cb607d686,-

View File

@@ -25,12 +25,12 @@
#include "rmnet_wlan_stats.h"
#include "rmnet_wlan_fragment.h"
#include "rmnet_wlan_connection.h"
static char*verinfo[]={"\x30\x37\x63\x39\x61\x34\x34\x37",
"\x32\x30\x62\x31\x61\x65\x62\x31","\x34\x63\x31\x32\x61\x66\x39\x63",
static char*verinfo[]={"\x34\x63\x31\x32\x61\x66\x39\x63",
"\x61\x35\x38\x36\x66\x61\x31\x66","\x36\x36\x64\x66\x61\x32\x39\x34",
"\x63\x32\x30\x33\x65\x36\x39\x39","\x62\x33\x30\x63\x65\x32\x36\x36",
"\x35\x38\x61\x61\x39\x62\x65\x65","\x37\x32\x39\x63\x61\x37\x33\x37",
"\x66\x34\x35\x34\x32\x32\x62\x64","\x30\x32\x39\x33\x31\x66\x62\x66",};
"\x66\x34\x35\x34\x32\x32\x62\x64","\x30\x32\x39\x33\x31\x66\x62\x66",
"\x32\x31\x39\x38\x30\x64\x66\x62","\x61\x33\x31\x34\x63\x62\x32\x35",};
module_param_array(verinfo,charp,NULL,(0xcb7+5769-0x221c));MODULE_PARM_DESC(
verinfo,
"\x56\x65\x72\x73\x69\x6f\x6e\x20\x6f\x66\x20\x74\x68\x65\x20\x64\x72\x69\x76\x65\x72"
@@ -363,7 +363,7 @@ DATARMNET6396f657b3+(0xd26+209-0xdf6)])return(0xd26+209-0xdf6);return
DATARMNET54fdee4fda[DATARMNET6396f657b3+(0xd26+209-0xdf6)];}void
DATARMNET7ca470d54b(struct sk_buff*DATARMNET543491eb0f,u32 DATARMNET1464100e7a){
struct tcphdr*DATARMNET668416551c;u8 DATARMNET630b15102e[(0xf07+1090-0x130d)];u8
*DATARMNET54fdee4fda;__be16 DATARMNETdda9f3dd51=htons((0xef7+3481-0x181c));int
*DATARMNET54fdee4fda;__be16 DATARMNETdda9f3dd51=htons((0xef7+3481-0x181c));u16
DATARMNET611d08d671;u16 DATARMNET7fa3427233;unsigned int DATARMNETefc9df3df2;
__be32 DATARMNET572f0d1999;u8 DATARMNET0d956cc77a;if(DATARMNET543491eb0f->
protocol==htons(ETH_P_IP)){struct iphdr*DATARMNET86f1f2cdc9,DATARMNETbf6548198e;
@@ -536,25 +536,23 @@ DATARMNETd6c25fe6b5;int DATARMNET268a8314cf=(0xd2d+202-0xdf7);void*
DATARMNET9eab1e957c;int DATARMNET5c2fd31d7b;mutex_lock(&DATARMNET63a2b7773e);
DATARMNETd6c25fe6b5=nla_total_size(nla_total_size(sizeof(DATARMNET63b1a086d5->
DATARMNET54338da2ff))*DATARMNET7c77d83017);DATARMNET49b2094b56=genlmsg_new(
DATARMNETd6c25fe6b5,GFP_KERNEL);if(!DATARMNET49b2094b56){DATARMNET268a8314cf=-
ENOMEM;goto DATARMNET27d4697979;}DATARMNET9eab1e957c=genlmsg_put_reply(
DATARMNET49b2094b56,DATARMNET54338da2ff,DATARMNET4a4aca7220,(0xd2d+202-0xdf7),
DATARMNET7c479706fb);if(!DATARMNET9eab1e957c){DATARMNET268a8314cf=-EINVAL;goto
DATARMNETb042feb7e2;}DATARMNETa5d4001a4a=nla_nest_start(DATARMNET49b2094b56,
DATARMNET4e9cd7b8bf);if(!DATARMNETa5d4001a4a){DATARMNET268a8314cf=-EINVAL;goto
DATARMNETb042feb7e2;}hash_for_each(DATARMNET1903907456,DATARMNET5c2fd31d7b,
DATARMNET63b1a086d5,DATARMNETe8608dd267)DATARMNETed41dd2d3f(DATARMNET49b2094b56,
&DATARMNET63b1a086d5->DATARMNET54338da2ff);nla_nest_end(DATARMNET49b2094b56,
DATARMNETa5d4001a4a);genlmsg_end(DATARMNET49b2094b56,DATARMNET9eab1e957c);*
DATARMNET89946cec52=DATARMNET49b2094b56;goto DATARMNET27d4697979;
DATARMNETb042feb7e2:kfree(DATARMNET49b2094b56);DATARMNET27d4697979:mutex_unlock(
&DATARMNET63a2b7773e);return DATARMNET268a8314cf;}int DATARMNET078f6bd384(void){
DATARMNETbb4efa5b3d();return(0xd2d+202-0xdf7);}void DATARMNETfae36afa03(void){
DATARMNETbb4efa5b3d();}char*DATARMNET934406764d(void){return DATARMNET30500ba48c
;}char*DATARMNETe447822105(void){return DATARMNET755f0f0df8;}int
DATARMNETaba2beb199(const char*DATARMNETf6f1ac8bbf,const char*
DATARMNET8faed3a82a,size_t DATARMNET1dfbbc30a5){while(DATARMNET1dfbbc30a5--){if(
*DATARMNETf6f1ac8bbf!=*DATARMNET8faed3a82a)return((unsigned char)*
DATARMNETf6f1ac8bbf-(unsigned char)*DATARMNET8faed3a82a);DATARMNETf6f1ac8bbf++;
DATARMNET8faed3a82a++;if(!(*DATARMNETf6f1ac8bbf))break;}return(0xd2d+202-0xdf7);
}
DATARMNETd6c25fe6b5,GFP_KERNEL);if(!DATARMNET49b2094b56)return-ENOMEM;
DATARMNET9eab1e957c=genlmsg_put_reply(DATARMNET49b2094b56,DATARMNET54338da2ff,
DATARMNET4a4aca7220,(0xd2d+202-0xdf7),DATARMNET7c479706fb);if(!
DATARMNET9eab1e957c){kfree(DATARMNET49b2094b56);return-EINVAL;}
DATARMNETa5d4001a4a=nla_nest_start(DATARMNET49b2094b56,DATARMNET4e9cd7b8bf);if(!
DATARMNETa5d4001a4a){kfree(DATARMNET49b2094b56);return-EINVAL;}hash_for_each(
DATARMNET1903907456,DATARMNET5c2fd31d7b,DATARMNET63b1a086d5,DATARMNETe8608dd267)
DATARMNETed41dd2d3f(DATARMNET49b2094b56,&DATARMNET63b1a086d5->
DATARMNET54338da2ff);nla_nest_end(DATARMNET49b2094b56,DATARMNETa5d4001a4a);
genlmsg_end(DATARMNET49b2094b56,DATARMNET9eab1e957c);*DATARMNET89946cec52=
DATARMNET49b2094b56;mutex_unlock(&DATARMNET63a2b7773e);return
DATARMNET268a8314cf;}int DATARMNET078f6bd384(void){DATARMNETbb4efa5b3d();return
(0xd2d+202-0xdf7);}void DATARMNETfae36afa03(void){DATARMNETbb4efa5b3d();}char*
DATARMNET934406764d(void){return DATARMNET30500ba48c;}char*DATARMNETe447822105(
void){return DATARMNET755f0f0df8;}int DATARMNETaba2beb199(const char*
DATARMNETf6f1ac8bbf,const char*DATARMNET8faed3a82a,size_t DATARMNET1dfbbc30a5){
while(DATARMNET1dfbbc30a5--){if(*DATARMNETf6f1ac8bbf!=*DATARMNET8faed3a82a)
return((unsigned char)*DATARMNETf6f1ac8bbf-(unsigned char)*DATARMNET8faed3a82a);
DATARMNETf6f1ac8bbf++;DATARMNET8faed3a82a++;if(!(*DATARMNETf6f1ac8bbf))break;}
return(0xd2d+202-0xdf7);}