replace common qcom sources with samsung ones

This commit is contained in:
SaschaNes
2025-08-12 22:13:00 +02:00
parent ba24dcded9
commit 6f7753de11
5682 changed files with 2450203 additions and 103634 deletions

View File

@@ -13,9 +13,6 @@ define_modules("parrot", "perf")
define_modules("monaco", "consolidate")
define_modules("monaco", "perf")
define_modules("tuna", "consolidate")
define_modules("tuna", "perf")
package(
default_visibility = [
"//visibility:public",

View File

@@ -380,25 +380,6 @@ TRACE_EVENT(dfc_set_powersave_mode,
__entry->enable ? "enable" : "disable")
);
TRACE_EVENT(dfc_pm_event,
TP_PROTO(int event),
TP_ARGS(event),
TP_STRUCT__entry(
__field(int, event)
),
TP_fast_assign(
__entry->event = event;
),
TP_printk("Got PM event from notifier %d",
__entry->event)
);
#endif /* _TRACE_DFC_H */
/* This part must be outside protection */

View File

@@ -6,6 +6,7 @@
#include <net/pkt_sched.h>
#include <linux/module.h>
#include <linux/ipc_logging.h>
#include "rmnet_qmap.h"
#include "dfc_defs.h"
#include "rmnet_qmi.h"
@@ -502,6 +503,7 @@ int dfc_qmap_client_init(void *port, int index, struct svc_info *psvc,
struct qmi_info *qmi)
{
struct dfc_qmi_data *data;
int rc;
if (!port || !qmi)
return -EINVAL;
@@ -522,7 +524,9 @@ int dfc_qmap_client_init(void *port, int index, struct svc_info *psvc,
qmi->dfc_clients[index] = (void *)data;
rcu_assign_pointer(qmap_dfc_data, data);
rmnet_qmap_init(port);
if ((rc = rmnet_qmap_init(port)) < 0 ) {
net_log("%s(%d) QMAP INIT FAILED:%d\n", __func__, __LINE__, rc);
}
trace_dfc_client_state_up(data->index, data->svc.instance,
data->svc.ep_type, data->svc.iface_id);
@@ -546,7 +550,7 @@ void dfc_qmap_client_exit(void *dfc_data)
struct dfc_qmi_data *data = (struct dfc_qmi_data *)dfc_data;
if (!data) {
pr_err("%s() data is null\n", __func__);
net_log("%s() data is null\n", __func__);
return;
}

View File

@@ -20,6 +20,8 @@
#define CREATE_TRACE_POINTS
#include "dfc.h"
#include <linux/ipc_logging.h>
struct dfc_qmap_header {
u8 pad_len:6;
u8 reserved_bit:1;
@@ -935,10 +937,17 @@ int dfc_bearer_flow_ctl(struct net_device *dev,
* If there is non zero grant but tcp ancillary is false,
* send out ACKs anyway
*/
if (bearer->ack_mq_idx != INVALID_MQ)
if (bearer->ack_mq_idx != INVALID_MQ) {
net_log("m=%d b=%u gr=%u ack_mq_idx %s",
qos->mux_id, bearer->bearer_id, bearer->grant_size,
enable ? "en" : "dis");
qmi_rmnet_flow_control(dev, bearer->ack_mq_idx,
enable || bearer->tcp_bidir);
}
net_log("m=%d b=%u q=%d gr=%u mq %s",
qos->mux_id, bearer->bearer_id, bearer->mq_idx, bearer->grant_size,
enable ? "en" : "dis");
qmi_rmnet_flow_control(dev, bearer->mq_idx, enable);
if (!enable && bearer->ack_req)
@@ -1079,9 +1088,16 @@ static int dfc_update_fc_map(struct net_device *dev, struct qos_info *qos,
itm->last_seq = fc_info->seq_num;
itm->last_adjusted_grant = adjusted_grant;
if (action)
if (action) {
net_log("I> m=%d b=%d gr=%d agr=%d s=%d a=%d\n",
fc_info->mux_id, fc_info->bearer_id,
fc_info->num_bytes, itm->grant_size, fc_info->seq_num,
ancillary);
rc = dfc_bearer_flow_ctl(dev, itm, qos);
}
} else {
net_log("no match bearer_map! m=%d b=%d\n", fc_info->mux_id, fc_info->bearer_id);
}
return rc;
}
@@ -1148,9 +1164,14 @@ void dfc_do_burst_flow_control(struct dfc_qmi_data *dfc,
}
}
if (unlikely(flow_status->bearer_id == 0xFF))
if (unlikely(flow_status->bearer_id == 0xFF)) {
net_log("I> m=%d b=%d gr=%d s=%d a=%d\n",
flow_status->mux_id, flow_status->bearer_id,
flow_status->num_bytes, flow_status->seq_num,
ancillary);
dfc_all_bearer_flow_ctl(
dev, qos, ack_req, ancillary, flow_status);
}
else
dfc_update_fc_map(
dev, qos, ack_req, ancillary, flow_status,
@@ -1177,6 +1198,9 @@ static void dfc_update_tx_link_status(struct net_device *dev,
if (itm->tx_off == !tx_status)
return;
net_log("Link> %s, b=%d, gr=%d, rs=%d, status %d\n", dev->name,
binfo->bearer_id, itm->grant_size, itm->rat_switch, tx_status);
if (itm->grant_size && !tx_status) {
itm->grant_size = 0;
itm->tcp_bidir = false;

View File

@@ -1,6 +1,6 @@
/*
* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -27,8 +27,8 @@
#include <linux/moduleparam.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/suspend.h>
#include <linux/notifier.h>
#include <linux/ipc_logging.h>
#include <linux/rtc.h>
#define NLMSG_FLOW_ACTIVATE 1
#define NLMSG_FLOW_DEACTIVATE 2
@@ -212,6 +212,8 @@ static void qmi_rmnet_update_flow_map(struct rmnet_flow_map *itm,
int qmi_rmnet_flow_control(struct net_device *dev, u32 mq_idx, int enable)
{
struct netdev_queue *q;
struct timespec64 ts64;
struct rtc_time tm;
if (unlikely(mq_idx >= dev->num_tx_queues))
return 0;
@@ -220,6 +222,13 @@ int qmi_rmnet_flow_control(struct net_device *dev, u32 mq_idx, int enable)
if (unlikely(!q))
return 0;
ktime_get_real_ts64(&ts64);
rtc_time64_to_tm(ts64.tv_sec - (sys_tz.tz_minuteswest * 60), &tm);
net_log("%d-%02d-%02d %02d:%02d:%02d.%06lu, %s[%d] %s_queue\n",
tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
tm.tm_hour, tm.tm_min, tm.tm_sec, ts64.tv_nsec/1000,
dev->name, mq_idx, enable ? "wake" : "stop");
if (enable)
netif_tx_wake_queue(q);
else
@@ -339,6 +348,9 @@ static struct rmnet_bearer_map *__qmi_rmnet_bearer_get(
timer_setup(&bearer->ch_switch.guard_timer,
rmnet_ll_guard_fn, 0);
list_add(&bearer->list, &qos_info->bearer_head);
net_log("create bearer: %s m=%d b=%u",
qos_info->vnd_dev, qos_info->mux_id, bearer->bearer_id);
}
return bearer;
@@ -373,6 +385,14 @@ static void __qmi_rmnet_bearer_put(struct net_device *dev,
/* Remove from bearer map */
list_del(&bearer->list);
qos_info->removed_bearer = bearer;
net_log("remove bearer: %s m=%d b=%u gr=%u q=%d",
dev->name, qos_info->mux_id, bearer->bearer_id,
bearer->grant_size, bearer->mq_idx);
} else if (bearer && bearer->flow_ref >= 0) {
/* bearer is still being used by other mq */
net_log("can't remove bearer_map m=%d b=%u ref:%d q=%d",
qos_info->mux_id, bearer->bearer_id, bearer->flow_ref, bearer->mq_idx);
}
}
@@ -389,6 +409,11 @@ static void __qmi_rmnet_update_mq(struct net_device *dev,
mq = &qos_info->mq[itm->mq_idx];
if (!mq->bearer) {
/* TODO: seems to be assigned multiple times */
if (bearer->mq_idx != INVALID_MQ && bearer->mq_idx != itm->mq_idx)
net_log("WARN: multiple assign! %s m=%d b=%u f=%u ip=%d prev_q=%d next_q=%d",
dev->name, qos_info->mux_id, itm->bearer_id, itm->flow_id, itm->ip_type,
bearer->mq_idx, itm->mq_idx);
mq->bearer = bearer;
mq->is_ll_ch = bearer->ch_switch.current_ch;
mq->drop_on_remove = false;
@@ -407,9 +432,22 @@ static void __qmi_rmnet_update_mq(struct net_device *dev,
bearer->grant_thresh =
qmi_rmnet_grant_per(DEFAULT_GRANT);
}
net_log("update_mq %s m=%d b=%u gr=%u f=%u q=%d en",
dev->name, qos_info->mux_id, itm->bearer_id,
bearer->grant_size, itm->flow_id, itm->mq_idx);
qmi_rmnet_flow_control(dev, itm->mq_idx, 1);
if (dfc_mode == DFC_MODE_SA)
if (dfc_mode == DFC_MODE_SA) {
net_log("update_mq %s m=%d b=%u gr=%u f=%u q=%d en",
dev->name, qos_info->mux_id, itm->bearer_id,
bearer->grant_size, itm->flow_id, bearer->ack_mq_idx);
qmi_rmnet_flow_control(dev, bearer->ack_mq_idx, 1);
}
} else {
if (mq->bearer->bearer_id != itm->bearer_id)
net_log("WARN: skip update_mp! %s m=%d current(b=%u q=%d) new(b=%u f=%u ip=%d q=%d)",
dev->name, qos_info->mux_id,
mq->bearer->bearer_id, mq->bearer->mq_idx,
itm->bearer_id, itm->flow_id, itm->ip_type, itm->mq_idx);
}
}
@@ -459,7 +497,10 @@ static int qmi_rmnet_add_flow(struct net_device *dev, struct tcmsg *tcm,
new_map.mq_idx = tcm->tcm_handle;
trace_dfc_flow_info(dev->name, new_map.bearer_id, new_map.flow_id,
new_map.ip_type, new_map.mq_idx, 1);
net_log("add flow: %s m=%d b=%d f=%d ip=%d q=%d\n",
dev->name, qos_info->mux_id, new_map.bearer_id,
new_map.flow_id,
new_map.ip_type, new_map.mq_idx);
again:
spin_lock_bh(&qos_info->qos_lock);
@@ -493,6 +534,10 @@ again:
qmi_rmnet_update_flow_map(itm, &new_map);
list_add(&itm->list, &qos_info->flow_head);
net_log("create flow: %s m=%d b=%d f=%d ip=%d q=%d\n",
dev->name, qos_info->mux_id, itm->bearer_id,
itm->flow_id, itm->ip_type, itm->mq_idx);
/* Create or update bearer map */
bearer = __qmi_rmnet_bearer_get(qos_info, new_map.bearer_id);
if (!bearer) {
@@ -540,7 +585,10 @@ qmi_rmnet_del_flow(struct net_device *dev, struct tcmsg *tcm,
trace_dfc_flow_info(dev->name, new_map.bearer_id,
new_map.flow_id, new_map.ip_type,
itm->mq_idx, 0);
net_log("delete flow: %s m=%d b=%d f=%d ip=%d q=%d\n",
dev->name, qos_info->mux_id, new_map.bearer_id,
new_map.flow_id,
new_map.ip_type, itm->mq_idx);
__qmi_rmnet_bearer_put(dev, qos_info, itm->bearer, true);
/* Remove from flow map */
@@ -548,8 +596,11 @@ qmi_rmnet_del_flow(struct net_device *dev, struct tcmsg *tcm,
kfree(itm);
}
if (list_empty(&qos_info->flow_head))
if (list_empty(&qos_info->flow_head)) {
netif_tx_wake_all_queues(dev);
net_log("wake: %s m=%d b=%u gr=%u f=%u q=%d en",
dev->name, qos_info->mux_id, 0xff, DEFAULT_GRANT, 0, 0);
}
spin_unlock_bh(&qos_info->qos_lock);
@@ -581,9 +632,6 @@ struct rmnet_bearer_map *qmi_rmnet_get_bearer_noref(struct qos_info *qos_info,
return bearer;
}
static int qmi_rmnet_pm_notify_cb(struct notifier_block *notifier,
unsigned long pm_event, void *unused);
#else
static inline void
qmi_rmnet_update_flow_map(struct rmnet_flow_map *itm,
@@ -630,12 +678,6 @@ qmi_rmnet_setup_client(void *port, struct qmi_info *qmi, struct tcmsg *tcm)
return -ENOMEM;
rmnet_init_qmi_pt(port, qmi);
/* pm-register is only needed once when first client is setup
* and not per client
*/
((struct rmnet_port *)port)->dfc_pm_notifier.notifier_call
= qmi_rmnet_pm_notify_cb;
register_pm_notifier(&(((struct rmnet_port *)port)->dfc_pm_notifier));
}
qmi->flag = tcm->tcm_ifindex;
@@ -659,6 +701,9 @@ qmi_rmnet_setup_client(void *port, struct qmi_info *qmi, struct tcmsg *tcm)
err = wda_qmi_client_init(port, &svc, qmi);
}
net_log("%s: mode %d idx %d svc [%d:%d:%d], err=%d\n", __func__,
dfc_mode, idx, svc.instance, svc.ep_type, svc.iface_id, err);
return err;
}
@@ -667,6 +712,8 @@ __qmi_rmnet_delete_client(void *port, struct qmi_info *qmi, int idx)
{
void *data = NULL;
net_log("%s: idx %d\n", __func__, idx);
ASSERT_RTNL();
if (qmi->dfc_clients[idx])
@@ -724,11 +771,9 @@ int qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt,
{
struct qmi_info *qmi = (struct qmi_info *)rmnet_get_qmi_pt(port);
struct tcmsg *tcm = (struct tcmsg *)tcm_pt;
struct notifier_block *nb;
void *wda_data = NULL;
int rc = 0;
switch (tcm->tcm_family) {
case NLMSG_FLOW_ACTIVATE:
if (!qmi || !DFC_SUPPORTED_MODE(dfc_mode) ||
@@ -758,8 +803,6 @@ int qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt,
if (qmi &&
!qmi_rmnet_has_client(qmi) &&
!qmi_rmnet_has_pending(qmi)) {
nb = &(((struct rmnet_port *)port)->dfc_pm_notifier);
unregister_pm_notifier(nb);
rmnet_reset_qmi_pt(port);
kfree(qmi);
}
@@ -1238,57 +1281,6 @@ int qmi_rmnet_set_powersave_mode(void *port, uint8_t enable, u8 num_bearers,
}
EXPORT_SYMBOL(qmi_rmnet_set_powersave_mode);
static int qmi_rmnet_pm_notify_cb(struct notifier_block *notifier,
unsigned long pm_event, void *unused)
{
struct qmi_info *qmi;
struct rmnet_port *port;
u8 num_bearers;
port = container_of(notifier, struct rmnet_port, dfc_pm_notifier);
qmi = port->qmi_info;
trace_dfc_pm_event(pm_event);
switch (pm_event) {
case PM_SUSPEND_PREPARE:
cancel_delayed_work_sync(&rmnet_work->work);
if (!qmi->ps_enabled) {
qmi->ps_ignore_grant = true;
qmi->ps_enabled = true;
rmnet_module_hook_aps_data_inactive();
/* Needed Memory barrier */
smp_mb();
num_bearers = sizeof(ps_bearer_id);
memset(ps_bearer_id, 0, sizeof(ps_bearer_id));
rmnet_prepare_ps_bearers(port, &num_bearers,
ps_bearer_id);
/* Enter powersave */
if (dfc_qmap)
dfc_qmap_set_powersave(1, num_bearers, ps_bearer_id);
else
qmi_rmnet_set_powersave_mode(port, 1,
num_bearers, ps_bearer_id);
if (rmnet_get_powersave_notif(port))
qmi_rmnet_ps_on_notify(port);
}
break;
case PM_POST_SUSPEND:
/* Clear the bit before enabling flow so pending packets
* can trigger the work again
*/
clear_bit(PS_WORK_ACTIVE_BIT, &qmi->ps_work_active);
break;
default:
break;
}
return NOTIFY_DONE;
}
static void qmi_rmnet_work_restart(void *port)
{
rcu_read_lock();
@@ -1536,8 +1528,6 @@ void qmi_rmnet_work_exit(void *port)
rmnet_work_quit = true;
synchronize_rcu();
unregister_pm_notifier(&(((struct rmnet_port *)port)->dfc_pm_notifier));
rmnet_work_inited = false;
cancel_delayed_work_sync(&rmnet_work->work);
destroy_workqueue(rmnet_ps_wq);

View File

@@ -1,5 +1,5 @@
/* Copyright (c) 2013-2014, 2016-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -79,8 +79,6 @@ struct rmnet_port_priv_stats {
u64 dl_frag_stat[5];
u64 pb_marker_count;
u64 pb_marker_seq;
u64 chained_packets_recvd;
u64 packets_chained;
};
struct rmnet_egress_agg_params {
@@ -150,7 +148,6 @@ struct rmnet_port {
/* Descriptor pool */
spinlock_t desc_pool_lock;
struct rmnet_frag_descriptor_pool *frag_desc_pool;
struct notifier_block dfc_pm_notifier;
};
extern struct rtnl_link_ops rmnet_link_ops;

View File

@@ -8,6 +8,7 @@
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/ipa.h>
#include <linux/ipc_logging.h>
#include "rmnet_ctl.h"
#include "rmnet_ctl_client.h"
@@ -80,7 +81,7 @@ static void rmnet_ctl_ipa_ready(void *user_data)
&ctl_ipa_dev);
if (rc)
pr_err("%s: %d\n", __func__, rc);
net_log("%s: %d\n", __func__, rc);
else
rmnet_ctl_ipa_registered = true;
}
@@ -90,10 +91,12 @@ static int __init rmnet_ctl_init(void)
int rc;
rc = ipa_register_ipa_ready_cb(rmnet_ctl_ipa_ready, NULL);
if (rc == -EEXIST)
if (rc == -EEXIST) {
rmnet_ctl_ipa_ready(NULL);
else if (rc)
pr_err("%s: %d\n", __func__, rc);
}
else if (rc) {
net_log("%s: rmnet CTL INIT failed %d\n", __func__, rc);
}
return 0;
}
@@ -103,6 +106,7 @@ static void __exit rmnet_ctl_exit(void)
if (rmnet_ctl_ipa_registered) {
ipa_unregister_rmnet_ctl_cb();
rmnet_ctl_ipa_registered = false;
net_log("%s: rmnet CTL exited \n", __func__);
}
}

View File

@@ -19,6 +19,7 @@
#include <linux/inet.h>
#include <net/ipv6.h>
#include <net/ip6_checksum.h>
#include <linux/ipc_logging.h>
#include "rmnet_config.h"
#include "rmnet_descriptor.h"
#include "rmnet_handlers.h"
@@ -28,6 +29,7 @@
#include "rmnet_qmi.h"
#include "rmnet_trace.h"
#include "qmi_rmnet.h"
#include "rmnet_qmap.h"
#include "rmnet_mem.h"
#define RMNET_FRAG_DESCRIPTOR_POOL_SIZE 64
@@ -595,6 +597,9 @@ void rmnet_frag_command(struct rmnet_frag_descriptor *frag_desc,
break;
default:
net_log("%s() n:%d type:%d tr:%d\n",
__func__, cmd->command_name, cmd->cmd_type,
cmd->transaction_id);
rc = RMNET_MAP_COMMAND_UNSUPPORTED;
break;
}
@@ -631,6 +636,9 @@ int rmnet_frag_flow_command(struct rmnet_frag_descriptor *frag_desc,
rmnet_frag_process_pb_ind(frag_desc, cmd, port, pkt_len);
break;
default:
net_log("%s() n:%d type:%d Tr:%d\n",
__func__, cmd->command_name, cmd->cmd_type,
cmd->transaction_id);
return 1;
}
@@ -1877,6 +1885,33 @@ int rmnet_frag_process_next_hdr_packet(struct rmnet_frag_descriptor *frag_desc,
return rc;
}
static int
rmnet_check_frag_dfc_qmap_packets(struct rmnet_frag_descriptor *frag_desc)
{
struct rmnet_map_control_command_header *cmd, __cmd;
cmd = rmnet_frag_header_ptr(frag_desc, sizeof(struct rmnet_map_header),
sizeof(*cmd), &__cmd);
if (!cmd)
return 0;
switch (cmd->command_name) {
case QMAP_DFC_CONFIG:
case QMAP_DFC_IND:
case QMAP_DFC_QUERY:
case QMAP_DFC_END_MARKER:
case QMAP_DFC_POWERSAVE:
net_log("%s(%d) n:%d t:%d trans:%d\n",
__func__, __LINE__,
cmd->command_name, cmd->cmd_type,
cmd->transaction_id);
return 1;
default:
return 0;
}
return 1;
}
static void
__rmnet_frag_ingress_handler(struct rmnet_frag_descriptor *frag_desc,
struct rmnet_port *port)
@@ -1907,6 +1942,9 @@ __rmnet_frag_ingress_handler(struct rmnet_frag_descriptor *frag_desc,
if (port->data_format & RMNET_FLAGS_INGRESS_MAP_COMMANDS)
rmnet_frag_command(frag_desc, qmap, port);
if (rmnet_check_frag_dfc_qmap_packets(frag_desc))
BUG_ON(1);
goto recycle;
}

View File

@@ -1,5 +1,5 @@
/* Copyright (c) 2013-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023, 2025 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -227,8 +227,7 @@ static void
rmnet_map_ingress_handler(struct sk_buff *skb,
struct rmnet_port *port)
{
int is_chained_skb = 0;
struct sk_buff *skbn, *head_skb;
struct sk_buff *skbn;
if (skb->dev->type == ARPHRD_ETHER) {
if (pskb_expand_head(skb, ETH_HLEN, 0, GFP_ATOMIC)) {
@@ -256,13 +255,10 @@ rmnet_map_ingress_handler(struct sk_buff *skb,
/* Deaggregation and freeing of HW originating
* buffers is done within here
*/
head_skb = skb;
while (skb) {
struct sk_buff *skb_frag = skb_shinfo(skb)->frag_list;
struct sk_buff *skb_next = skb->next;
skb_shinfo(skb)->frag_list = NULL;
skb->next = NULL;
while ((skbn = rmnet_map_deaggregate(skb, port)) != NULL) {
__rmnet_map_ingress_handler(skbn, port);
@@ -272,20 +268,7 @@ rmnet_map_ingress_handler(struct sk_buff *skb,
consume_skb(skb);
next_skb:
if (head_skb == skb) {
skb = skb_frag;
if (skb_frag) {
is_chained_skb = 1;
port->stats.chained_packets_recvd++;
}
} else {
skb = skb_next;
/* No longer expected to use older skb chain format */
WARN_ON(skb_frag != NULL);
}
if (is_chained_skb)
port->stats.packets_chained++;
skb = skb_frag;
}
}

View File

@@ -12,6 +12,7 @@
* GNU General Public License for more details.
*/
#include <linux/ipc_logging.h>
#include "dfc.h"
#include "rmnet_qmi.h"
#include "rmnet_ctl.h"
@@ -40,7 +41,7 @@ int rmnet_qmap_send(struct sk_buff *skb, u8 ch, bool flush)
}
if (rmnet_ctl->send(rmnet_ctl_handle, skb)) {
pr_err("Failed to send to rmnet ctl\n");
net_log("Failed to send to rmnet ctl\n");
return -ECOMM;
}
@@ -48,6 +49,42 @@ int rmnet_qmap_send(struct sk_buff *skb, u8 ch, bool flush)
}
EXPORT_SYMBOL(rmnet_qmap_send);
static void rmnet_print_packet(const struct sk_buff *skb, char *dir)
{
struct qmap_cmd_hdr *cmd;
char buffer[200];
unsigned int len, printlen;
int i, buffloc = 0;
printlen = 48;
if (skb->len > 0)
len = skb->len;
else
len = ((unsigned int)(uintptr_t)skb->end) -
((unsigned int)(uintptr_t)skb->data);
cmd = (struct qmap_cmd_hdr *)skb->data;
net_log("%s() QMAP cmd %d %s-ed type:%d len:%d",
__func__, cmd->cmd_name, dir,
cmd->cmd_type, len);
memset(buffer, 0, sizeof(buffer));
for (i = 0; (i < printlen) && (i < len); i++) {
if ((i % 16) == 0) {
net_log("[%s] - PKT%s\n", dir, buffer);
memset(buffer, 0, sizeof(buffer));
buffloc = 0;
buffloc += snprintf(&buffer[buffloc],
sizeof(buffer) - buffloc, "%04X:",
i);
}
buffloc += snprintf(&buffer[buffloc], sizeof(buffer) - buffloc,
" %02x", skb->data[i]);
}
net_log("[%s] - PKT%s\n", dir, buffer);
}
static void rmnet_qmap_cmd_handler(struct sk_buff *skb)
{
struct qmap_cmd_hdr *cmd;
@@ -73,6 +110,7 @@ static void rmnet_qmap_cmd_handler(struct sk_buff *skb)
case QMAP_DFC_QUERY:
case QMAP_DFC_END_MARKER:
case QMAP_DFC_POWERSAVE:
rmnet_print_packet(skb, "RX");
rc = dfc_qmap_cmd_handler(skb);
break;
@@ -107,6 +145,7 @@ static void rmnet_qmap_cmd_handler(struct sk_buff *skb)
rmnet_qmap_send(skb, RMNET_CH_DEFAULT, false);
} else {
cmd->cmd_type = rc;
rmnet_print_packet(skb, "TX");
rmnet_qmap_send(skb, RMNET_CH_CTL, false);
}
rcu_read_unlock();
@@ -145,7 +184,7 @@ int rmnet_qmap_init(void *port)
rmnet_ctl = rmnet_ctl_if();
if (!rmnet_ctl) {
pr_err("rmnet_ctl module not loaded\n");
net_log("rmnet_ctl module not loaded\n");
return -EFAULT;
}
@@ -153,7 +192,7 @@ int rmnet_qmap_init(void *port)
rmnet_ctl_handle = rmnet_ctl->reg(&cb);
if (!rmnet_ctl_handle) {
pr_err("Failed to register with rmnet ctl\n");
net_log("Failed to register with rmnet ctl\n");
return -EFAULT;
}
@@ -162,8 +201,12 @@ int rmnet_qmap_init(void *port)
void rmnet_qmap_exit(void)
{
if (rmnet_ctl && rmnet_ctl->dereg)
rmnet_ctl->dereg(rmnet_ctl_handle);
int rc = 0;
if (rmnet_ctl && rmnet_ctl->dereg) {
rc = rmnet_ctl->dereg(rmnet_ctl_handle);
net_log("%s() deregistered with rmnet ctl: %d\n", __func__, rc);
}
rmnet_ctl_handle = NULL;
real_data_dev = NULL;

View File

@@ -1,5 +1,5 @@
/* Copyright (c) 2013-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -550,9 +550,6 @@ static const char rmnet_port_gstrings_stats[][ETH_GSTRING_LEN] = {
"DL chaining frags [12-15]",
"DL chaining frags = 16",
"PB Byte Marker Count",
"PB Byte Marker Seq",
"Chained packets received",
"Packets chained",
};
static const char rmnet_ll_gstrings_stats[][ETH_GSTRING_LEN] = {

View File

@@ -14,7 +14,6 @@ ifeq ($(TARGET_DATARMNET_ENABLE), true)
DATA_DLKM_BOARD_PLATFORMS_LIST += sun
DATA_DLKM_BOARD_PLATFORMS_LIST += parrot
DATA_DLKM_BOARD_PLATFORMS_LIST += monaco
DATA_DLKM_BOARD_PLATFORMS_LIST += tuna
ifneq ($(TARGET_BOARD_AUTO),true)
ifeq ($(call is-board-platform-in-list,$(DATA_DLKM_BOARD_PLATFORMS_LIST)),true)

View File

@@ -38,11 +38,6 @@ def define_modules(target, variant):
"core/rmnet_ctl_client.c",
],
},
"CONFIG_ARCH_TUNA": {
True: [
"core/rmnet_ctl_client.c",
],
},
},
kernel_build = "//msm-kernel:{}".format(kernel_build_variant),
deps = [