Add samsung specific changes
This commit is contained in:
@@ -266,10 +266,8 @@ source "net/hsr/Kconfig"
|
||||
source "net/switchdev/Kconfig"
|
||||
source "net/l3mdev/Kconfig"
|
||||
source "net/qrtr/Kconfig"
|
||||
source "net/qmsgq/Kconfig"
|
||||
source "net/ncsi/Kconfig"
|
||||
# SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA {
|
||||
source "net/ncm/Kconfig"
|
||||
# SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA }
|
||||
|
||||
config PCPU_DEV_REFCNT
|
||||
bool "Use percpu variables to maintain network device refcount"
|
||||
|
@@ -75,10 +75,8 @@ obj-$(CONFIG_HSR) += hsr/
|
||||
obj-$(CONFIG_NET_SWITCHDEV) += switchdev/
|
||||
obj-$(CONFIG_NET_L3_MASTER_DEV) += l3mdev/
|
||||
obj-$(CONFIG_QRTR) += qrtr/
|
||||
obj-$(CONFIG_QMSGQ) += qmsgq/
|
||||
obj-$(CONFIG_NET_NCSI) += ncsi/
|
||||
# SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA {
|
||||
obj-$(CONFIG_KNOX_NCM) += ncm/
|
||||
# SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA }
|
||||
obj-$(CONFIG_XDP_SOCKETS) += xdp/
|
||||
obj-$(CONFIG_MPTCP) += mptcp/
|
||||
obj-$(CONFIG_MCTP) += mctp/
|
||||
|
@@ -39,7 +39,7 @@
|
||||
static LIST_HEAD(mgmt_chan_list);
|
||||
static DEFINE_MUTEX(mgmt_chan_list_lock);
|
||||
|
||||
static DEFINE_IDA(sock_cookie_ida);
|
||||
// static DEFINE_IDA(sock_cookie_ida);
|
||||
|
||||
static atomic_t monitor_promisc = ATOMIC_INIT(0);
|
||||
|
||||
@@ -95,7 +95,7 @@ u32 hci_sock_get_cookie(struct sock *sk)
|
||||
{
|
||||
return hci_pi(sk)->cookie;
|
||||
}
|
||||
|
||||
/*
|
||||
static bool hci_sock_gen_cookie(struct sock *sk)
|
||||
{
|
||||
int id = hci_pi(sk)->cookie;
|
||||
@@ -122,7 +122,7 @@ static void hci_sock_free_cookie(struct sock *sk)
|
||||
ida_simple_remove(&sock_cookie_ida, id);
|
||||
}
|
||||
}
|
||||
|
||||
*/
|
||||
static inline int hci_test_bit(int nr, const void *addr)
|
||||
{
|
||||
return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
|
||||
@@ -548,7 +548,7 @@ static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
|
||||
|
||||
return skb;
|
||||
}
|
||||
|
||||
/*
|
||||
static struct sk_buff *create_monitor_ctrl_open(struct sock *sk)
|
||||
{
|
||||
struct hci_mon_hdr *hdr;
|
||||
@@ -557,7 +557,7 @@ static struct sk_buff *create_monitor_ctrl_open(struct sock *sk)
|
||||
u8 ver[3];
|
||||
u32 flags;
|
||||
|
||||
/* No message needed when cookie is not present */
|
||||
// No message needed when cookie is not present
|
||||
if (!hci_pi(sk)->cookie)
|
||||
return NULL;
|
||||
|
||||
@@ -577,7 +577,7 @@ static struct sk_buff *create_monitor_ctrl_open(struct sock *sk)
|
||||
mgmt_fill_version_info(ver);
|
||||
break;
|
||||
default:
|
||||
/* No message for unsupported format */
|
||||
// No message for unsupported format
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -614,7 +614,7 @@ static struct sk_buff *create_monitor_ctrl_close(struct sock *sk)
|
||||
struct hci_mon_hdr *hdr;
|
||||
struct sk_buff *skb;
|
||||
|
||||
/* No message needed when cookie is not present */
|
||||
// No message needed when cookie is not present
|
||||
if (!hci_pi(sk)->cookie)
|
||||
return NULL;
|
||||
|
||||
@@ -624,7 +624,7 @@ static struct sk_buff *create_monitor_ctrl_close(struct sock *sk)
|
||||
case HCI_CHANNEL_CONTROL:
|
||||
break;
|
||||
default:
|
||||
/* No message for unsupported format */
|
||||
// No message for unsupported format
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -648,7 +648,7 @@ static struct sk_buff *create_monitor_ctrl_close(struct sock *sk)
|
||||
|
||||
return skb;
|
||||
}
|
||||
|
||||
*/
|
||||
static struct sk_buff *create_monitor_ctrl_command(struct sock *sk, u16 index,
|
||||
u16 opcode, u16 len,
|
||||
const void *buf)
|
||||
@@ -677,7 +677,7 @@ static struct sk_buff *create_monitor_ctrl_command(struct sock *sk, u16 index,
|
||||
|
||||
return skb;
|
||||
}
|
||||
|
||||
/*
|
||||
static void __printf(2, 3)
|
||||
send_monitor_note(struct sock *sk, const char *fmt, ...)
|
||||
{
|
||||
@@ -773,7 +773,7 @@ static void send_monitor_control_replay(struct sock *mon_sk)
|
||||
|
||||
read_unlock(&hci_sk_list.lock);
|
||||
}
|
||||
|
||||
*/
|
||||
/* Generate internal stack event */
|
||||
static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
|
||||
{
|
||||
@@ -893,6 +893,7 @@ EXPORT_SYMBOL(hci_mgmt_chan_unregister);
|
||||
|
||||
static int hci_sock_release(struct socket *sock)
|
||||
{
|
||||
/*
|
||||
struct sock *sk = sock->sk;
|
||||
struct hci_dev *hdev;
|
||||
struct sk_buff *skb;
|
||||
@@ -911,7 +912,7 @@ static int hci_sock_release(struct socket *sock)
|
||||
case HCI_CHANNEL_RAW:
|
||||
case HCI_CHANNEL_USER:
|
||||
case HCI_CHANNEL_CONTROL:
|
||||
/* Send event to monitor */
|
||||
// Send event to monitor
|
||||
skb = create_monitor_ctrl_close(sk);
|
||||
if (skb) {
|
||||
hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
|
||||
@@ -929,20 +930,20 @@ static int hci_sock_release(struct socket *sock)
|
||||
if (hdev) {
|
||||
if (hci_pi(sk)->channel == HCI_CHANNEL_USER &&
|
||||
!hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
|
||||
/* When releasing a user channel exclusive access,
|
||||
* call hci_dev_do_close directly instead of calling
|
||||
* hci_dev_close to ensure the exclusive access will
|
||||
* be released and the controller brought back down.
|
||||
*
|
||||
* The checking of HCI_AUTO_OFF is not needed in this
|
||||
* case since it will have been cleared already when
|
||||
* opening the user channel.
|
||||
*
|
||||
* Make sure to also check that we haven't already
|
||||
* unregistered since all the cleanup will have already
|
||||
* been complete and hdev will get released when we put
|
||||
* below.
|
||||
*/
|
||||
// When releasing a user channel exclusive access,
|
||||
// call hci_dev_do_close directly instead of calling
|
||||
// hci_dev_close to ensure the exclusive access will
|
||||
// be released and the controller brought back down.
|
||||
//
|
||||
// The checking of HCI_AUTO_OFF is not needed in this
|
||||
// case since it will have been cleared already when
|
||||
// opening the user channel.
|
||||
//
|
||||
// Make sure to also check that we haven't already
|
||||
// unregistered since all the cleanup will have already
|
||||
// been complete and hdev will get released when we put
|
||||
// below.
|
||||
|
||||
hci_dev_do_close(hdev);
|
||||
hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
|
||||
mgmt_index_added(hdev);
|
||||
@@ -955,9 +956,10 @@ static int hci_sock_release(struct socket *sock)
|
||||
sock_orphan(sk);
|
||||
release_sock(sk);
|
||||
sock_put(sk);
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
static int hci_sock_reject_list_add(struct hci_dev *hdev, void __user *arg)
|
||||
{
|
||||
bdaddr_t bdaddr;
|
||||
@@ -992,7 +994,7 @@ static int hci_sock_reject_list_del(struct hci_dev *hdev, void __user *arg)
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Ioctls that require bound socket */
|
||||
// Ioctls that require bound socket
|
||||
static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
@@ -1032,17 +1034,18 @@ static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
|
||||
|
||||
return -ENOIOCTLCMD;
|
||||
}
|
||||
|
||||
*/
|
||||
static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
/*
|
||||
void __user *argp = (void __user *)arg;
|
||||
struct sock *sk = sock->sk;
|
||||
int err;
|
||||
|
||||
BT_DBG("cmd %x arg %lx", cmd, arg);
|
||||
|
||||
/* Make sure the cmd is valid before doing anything */
|
||||
// Make sure the cmd is valid before doing anything
|
||||
switch (cmd) {
|
||||
case HCIGETDEVLIST:
|
||||
case HCIGETDEVINFO:
|
||||
@@ -1077,26 +1080,26 @@ static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* When calling an ioctl on an unbound raw socket, then ensure
|
||||
* that the monitor gets informed. Ensure that the resulting event
|
||||
* is only send once by checking if the cookie exists or not. The
|
||||
* socket cookie will be only ever generated once for the lifetime
|
||||
* of a given socket.
|
||||
*/
|
||||
// When calling an ioctl on an unbound raw socket, then ensure
|
||||
// that the monitor gets informed. Ensure that the resulting event
|
||||
// is only send once by checking if the cookie exists or not. The
|
||||
// socket cookie will be only ever generated once for the lifetime
|
||||
// of a given socket.
|
||||
|
||||
if (hci_sock_gen_cookie(sk)) {
|
||||
struct sk_buff *skb;
|
||||
|
||||
/* Perform careful checks before setting the HCI_SOCK_TRUSTED
|
||||
* flag. Make sure that not only the current task but also
|
||||
* the socket opener has the required capability, since
|
||||
* privileged programs can be tricked into making ioctl calls
|
||||
* on HCI sockets, and the socket should not be marked as
|
||||
* trusted simply because the ioctl caller is privileged.
|
||||
*/
|
||||
// Perform careful checks before setting the HCI_SOCK_TRUSTED
|
||||
// flag. Make sure that not only the current task but also
|
||||
// the socket opener has the required capability, since
|
||||
// privileged programs can be tricked into making ioctl calls
|
||||
// on HCI sockets, and the socket should not be marked as
|
||||
// trusted simply because the ioctl caller is privileged.
|
||||
//
|
||||
if (sk_capable(sk, CAP_NET_ADMIN))
|
||||
hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
|
||||
|
||||
/* Send event to monitor */
|
||||
// Send event to monitor
|
||||
skb = create_monitor_ctrl_open(sk);
|
||||
if (skb) {
|
||||
hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
|
||||
@@ -1160,6 +1163,8 @@ static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
|
||||
done:
|
||||
release_sock(sk);
|
||||
return err;
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
@@ -1181,6 +1186,7 @@ static int hci_sock_compat_ioctl(struct socket *sock, unsigned int cmd,
|
||||
static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
|
||||
int addr_len)
|
||||
{
|
||||
/*
|
||||
struct sockaddr_hci haddr;
|
||||
struct sock *sk = sock->sk;
|
||||
struct hci_dev *hdev = NULL;
|
||||
@@ -1201,10 +1207,10 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
|
||||
|
||||
lock_sock(sk);
|
||||
|
||||
/* Allow detaching from dead device and attaching to alive device, if
|
||||
* the caller wants to re-bind (instead of close) this socket in
|
||||
* response to hci_sock_dev_event(HCI_DEV_UNREG) notification.
|
||||
*/
|
||||
// Allow detaching from dead device and attaching to alive device, if
|
||||
// the caller wants to re-bind (instead of close) this socket in
|
||||
// response to hci_sock_dev_event(HCI_DEV_UNREG) notification.
|
||||
|
||||
hdev = hci_pi(sk)->hdev;
|
||||
if (hdev && hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
|
||||
hci_pi(sk)->hdev = NULL;
|
||||
@@ -1238,12 +1244,12 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
|
||||
hci_pi(sk)->channel = haddr.hci_channel;
|
||||
|
||||
if (!hci_sock_gen_cookie(sk)) {
|
||||
/* In the case when a cookie has already been assigned,
|
||||
* then there has been already an ioctl issued against
|
||||
* an unbound socket and with that triggered an open
|
||||
* notification. Send a close notification first to
|
||||
* allow the state transition to bounded.
|
||||
*/
|
||||
// In the case when a cookie has already been assigned,
|
||||
// then there has been already an ioctl issued against
|
||||
// an unbound socket and with that triggered an open
|
||||
// notification. Send a close notification first to
|
||||
// allow the state transition to bounded.
|
||||
|
||||
skb = create_monitor_ctrl_close(sk);
|
||||
if (skb) {
|
||||
hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
|
||||
@@ -1257,7 +1263,7 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
|
||||
|
||||
hci_pi(sk)->hdev = hdev;
|
||||
|
||||
/* Send event to monitor */
|
||||
// Send event to monitor
|
||||
skb = create_monitor_ctrl_open(sk);
|
||||
if (skb) {
|
||||
hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
|
||||
@@ -1309,13 +1315,13 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
|
||||
err = hci_dev_open(hdev->id);
|
||||
if (err) {
|
||||
if (err == -EALREADY) {
|
||||
/* In case the transport is already up and
|
||||
* running, clear the error here.
|
||||
*
|
||||
* This can happen when opening a user
|
||||
* channel and HCI_AUTO_OFF grace period
|
||||
* is still active.
|
||||
*/
|
||||
// In case the transport is already up and
|
||||
// running, clear the error here.
|
||||
|
||||
// This can happen when opening a user
|
||||
// channel and HCI_AUTO_OFF grace period
|
||||
// is still active.
|
||||
|
||||
err = 0;
|
||||
} else {
|
||||
hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
|
||||
@@ -1328,11 +1334,11 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
|
||||
hci_pi(sk)->channel = haddr.hci_channel;
|
||||
|
||||
if (!hci_sock_gen_cookie(sk)) {
|
||||
/* In the case when a cookie has already been assigned,
|
||||
* this socket will transition from a raw socket into
|
||||
* a user channel socket. For a clean transition, send
|
||||
* the close notification first.
|
||||
*/
|
||||
// In the case when a cookie has already been assigned,
|
||||
// this socket will transition from a raw socket into
|
||||
// a user channel socket. For a clean transition, send
|
||||
// the close notification first.
|
||||
|
||||
skb = create_monitor_ctrl_close(sk);
|
||||
if (skb) {
|
||||
hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
|
||||
@@ -1341,14 +1347,14 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
|
||||
}
|
||||
}
|
||||
|
||||
/* The user channel is restricted to CAP_NET_ADMIN
|
||||
* capabilities and with that implicitly trusted.
|
||||
*/
|
||||
// The user channel is restricted to CAP_NET_ADMIN
|
||||
// capabilities and with that implicitly trusted.
|
||||
|
||||
hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
|
||||
|
||||
hci_pi(sk)->hdev = hdev;
|
||||
|
||||
/* Send event to monitor */
|
||||
// Send event to monitor
|
||||
skb = create_monitor_ctrl_open(sk);
|
||||
if (skb) {
|
||||
hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
|
||||
@@ -1372,9 +1378,9 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
|
||||
|
||||
hci_pi(sk)->channel = haddr.hci_channel;
|
||||
|
||||
/* The monitor interface is restricted to CAP_NET_RAW
|
||||
* capabilities and with that implicitly trusted.
|
||||
*/
|
||||
// The monitor interface is restricted to CAP_NET_RAW
|
||||
// capabilities and with that implicitly trusted.
|
||||
|
||||
hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
|
||||
|
||||
send_monitor_note(sk, "Linux version %s (%s)",
|
||||
@@ -1413,34 +1419,34 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* Users with CAP_NET_ADMIN capabilities are allowed
|
||||
* access to all management commands and events. For
|
||||
* untrusted users the interface is restricted and
|
||||
* also only untrusted events are sent.
|
||||
*/
|
||||
// Users with CAP_NET_ADMIN capabilities are allowed
|
||||
// access to all management commands and events. For
|
||||
// untrusted users the interface is restricted and
|
||||
// also only untrusted events are sent.
|
||||
|
||||
if (capable(CAP_NET_ADMIN))
|
||||
hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
|
||||
|
||||
hci_pi(sk)->channel = haddr.hci_channel;
|
||||
|
||||
/* At the moment the index and unconfigured index events
|
||||
* are enabled unconditionally. Setting them on each
|
||||
* socket when binding keeps this functionality. They
|
||||
* however might be cleared later and then sending of these
|
||||
* events will be disabled, but that is then intentional.
|
||||
*
|
||||
* This also enables generic events that are safe to be
|
||||
* received by untrusted users. Example for such events
|
||||
* are changes to settings, class of device, name etc.
|
||||
*/
|
||||
// At the moment the index and unconfigured index events
|
||||
// are enabled unconditionally. Setting them on each
|
||||
// socket when binding keeps this functionality. They
|
||||
// however might be cleared later and then sending of these
|
||||
// events will be disabled, but that is then intentional.
|
||||
|
||||
// This also enables generic events that are safe to be
|
||||
// received by untrusted users. Example for such events
|
||||
// are changes to settings, class of device, name etc.
|
||||
|
||||
if (hci_pi(sk)->channel == HCI_CHANNEL_CONTROL) {
|
||||
if (!hci_sock_gen_cookie(sk)) {
|
||||
/* In the case when a cookie has already been
|
||||
* assigned, this socket will transition from
|
||||
* a raw socket into a control socket. To
|
||||
* allow for a clean transition, send the
|
||||
* close notification first.
|
||||
*/
|
||||
// In the case when a cookie has already been
|
||||
// assigned, this socket will transition from
|
||||
// a raw socket into a control socket. To
|
||||
// allow for a clean transition, send the
|
||||
// close notification first.
|
||||
|
||||
skb = create_monitor_ctrl_close(sk);
|
||||
if (skb) {
|
||||
hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
|
||||
@@ -1449,7 +1455,7 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
|
||||
}
|
||||
}
|
||||
|
||||
/* Send event to monitor */
|
||||
// Send event to monitor
|
||||
skb = create_monitor_ctrl_open(sk);
|
||||
if (skb) {
|
||||
hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
|
||||
@@ -1467,7 +1473,7 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
|
||||
break;
|
||||
}
|
||||
|
||||
/* Default MTU to HCI_MAX_FRAME_SIZE if not set */
|
||||
// Default MTU to HCI_MAX_FRAME_SIZE if not set
|
||||
if (!hci_pi(sk)->mtu)
|
||||
hci_pi(sk)->mtu = HCI_MAX_FRAME_SIZE;
|
||||
|
||||
@@ -1476,11 +1482,14 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
|
||||
done:
|
||||
release_sock(sk);
|
||||
return err;
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
|
||||
int peer)
|
||||
{
|
||||
/*
|
||||
struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
|
||||
struct sock *sk = sock->sk;
|
||||
struct hci_dev *hdev;
|
||||
@@ -1507,6 +1516,8 @@ static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
|
||||
done:
|
||||
release_sock(sk);
|
||||
return err;
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
|
||||
@@ -2155,14 +2166,14 @@ static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
|
||||
release_sock(sk);
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
static void hci_sock_destruct(struct sock *sk)
|
||||
{
|
||||
mgmt_cleanup(sk);
|
||||
skb_queue_purge(&sk->sk_receive_queue);
|
||||
skb_queue_purge(&sk->sk_write_queue);
|
||||
}
|
||||
|
||||
*/
|
||||
static const struct proto_ops hci_sock_ops = {
|
||||
.family = PF_BLUETOOTH,
|
||||
.owner = THIS_MODULE,
|
||||
@@ -2195,6 +2206,7 @@ static struct proto hci_sk_proto = {
|
||||
static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
|
||||
int kern)
|
||||
{
|
||||
/*
|
||||
struct sock *sk;
|
||||
|
||||
BT_DBG("sock %p", sock);
|
||||
@@ -2213,6 +2225,7 @@ static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
|
||||
sk->sk_destruct = hci_sock_destruct;
|
||||
|
||||
bt_sock_link(&hci_sk_list, sk);
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -1033,11 +1033,6 @@ out:
|
||||
static __inline__ int neigh_max_probes(struct neighbour *n)
|
||||
{
|
||||
struct neigh_parms *p = n->parms;
|
||||
if (n->dev != NULL && !strcmp(n->dev->name, "aware_data0")) {
|
||||
return (NEIGH_VAR(p, UCAST_PROBES) * 2) + NEIGH_VAR(p, APP_PROBES) +
|
||||
(n->nud_state & NUD_PROBE ? NEIGH_VAR(p, MCAST_REPROBES) :
|
||||
NEIGH_VAR(p, MCAST_PROBES));
|
||||
}
|
||||
return NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES) +
|
||||
(n->nud_state & NUD_PROBE ? NEIGH_VAR(p, MCAST_REPROBES) :
|
||||
NEIGH_VAR(p, MCAST_PROBES));
|
||||
@@ -1141,10 +1136,6 @@ static void neigh_timer_handler(struct timer_list *t)
|
||||
}
|
||||
} else {
|
||||
/* NUD_PROBE|NUD_INCOMPLETE */
|
||||
if (neigh->dev != NULL && !strcmp(neigh->dev->name, "aware_data0")) {
|
||||
next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME)/5,
|
||||
HZ/100);
|
||||
} else
|
||||
next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME), HZ/100);
|
||||
}
|
||||
|
||||
@@ -1205,11 +1196,6 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb,
|
||||
next = now + 1;
|
||||
} else {
|
||||
immediate_probe = true;
|
||||
if (neigh->dev != NULL && !strcmp(neigh->dev->name, "aware_data0")) {
|
||||
next = now + max(NEIGH_VAR(neigh->parms,
|
||||
RETRANS_TIME)/25,
|
||||
HZ/100);
|
||||
} else
|
||||
next = now + max(NEIGH_VAR(neigh->parms,
|
||||
RETRANS_TIME),
|
||||
HZ / 100);
|
||||
|
@@ -63,11 +63,6 @@
|
||||
#define RTNL_MAX_TYPE 50
|
||||
#define RTNL_SLAVE_MAX_TYPE 44
|
||||
|
||||
#define CONFIG_DEBUG_RTNL_LATENCY
|
||||
#define RTNL_LATENCY_THRESHOLD_MS 500
|
||||
#define ADD_LATENCY_THRESHOLD(jiffie_time) \
|
||||
((jiffie_time) + msecs_to_jiffies(RTNL_LATENCY_THRESHOLD_MS))
|
||||
|
||||
struct rtnl_link {
|
||||
rtnl_doit_func doit;
|
||||
rtnl_dumpit_func dumpit;
|
||||
@@ -78,56 +73,15 @@ struct rtnl_link {
|
||||
|
||||
static DEFINE_MUTEX(rtnl_mutex);
|
||||
|
||||
#ifdef CONFIG_DEBUG_RTNL_LATENCY
|
||||
static unsigned long time_latency;
|
||||
static char owner_comm[TASK_COMM_LEN];
|
||||
#endif
|
||||
|
||||
void rtnl_lock(void)
|
||||
{
|
||||
#ifdef CONFIG_DEBUG_RTNL_LATENCY
|
||||
unsigned long local_time_latency = jiffies;
|
||||
|
||||
mutex_lock(&rtnl_mutex);
|
||||
|
||||
if (time_after(jiffies, ADD_LATENCY_THRESHOLD(local_time_latency))) {
|
||||
pr_err("%s: %s took over %u msec\n",
|
||||
__func__, current->comm,
|
||||
jiffies_delta_to_msecs(jiffies - local_time_latency));
|
||||
dump_stack();
|
||||
}
|
||||
|
||||
memcpy(owner_comm, current->comm, TASK_COMM_LEN);
|
||||
time_latency = jiffies;
|
||||
#else
|
||||
mutex_lock(&rtnl_mutex);
|
||||
#endif
|
||||
}
|
||||
EXPORT_SYMBOL(rtnl_lock);
|
||||
|
||||
int rtnl_lock_killable(void)
|
||||
{
|
||||
#ifdef CONFIG_DEBUG_RTNL_LATENCY
|
||||
unsigned long local_time_latency = jiffies;
|
||||
int ret = mutex_lock_killable(&rtnl_mutex);
|
||||
|
||||
if (time_after(jiffies, ADD_LATENCY_THRESHOLD(local_time_latency))) {
|
||||
pr_err("%s: %s took over %u msec, ret=%d\n",
|
||||
__func__, current->comm,
|
||||
jiffies_delta_to_msecs(jiffies - local_time_latency),
|
||||
ret);
|
||||
dump_stack();
|
||||
}
|
||||
|
||||
if (!ret) {
|
||||
memcpy(owner_comm, current->comm, TASK_COMM_LEN);
|
||||
time_latency = jiffies;
|
||||
}
|
||||
|
||||
return ret;
|
||||
#else
|
||||
return mutex_lock_killable(&rtnl_mutex);
|
||||
#endif
|
||||
}
|
||||
EXPORT_SYMBOL(rtnl_lock_killable);
|
||||
|
||||
@@ -180,16 +134,6 @@ void __rtnl_unlock(void)
|
||||
*/
|
||||
WARN_ON(!list_empty(&net_todo_list));
|
||||
|
||||
#ifdef CONFIG_DEBUG_RTNL_LATENCY
|
||||
if (time_after(jiffies, ADD_LATENCY_THRESHOLD(time_latency))) {
|
||||
pr_err("%s: %s(%s) took over %u msec to unlock\n",
|
||||
__func__, owner_comm, current->comm,
|
||||
jiffies_delta_to_msecs(jiffies - time_latency));
|
||||
dump_stack();
|
||||
}
|
||||
time_latency = jiffies;
|
||||
#endif
|
||||
|
||||
mutex_unlock(&rtnl_mutex);
|
||||
|
||||
while (head) {
|
||||
@@ -210,21 +154,7 @@ EXPORT_SYMBOL(rtnl_unlock);
|
||||
|
||||
int rtnl_trylock(void)
|
||||
{
|
||||
#ifdef CONFIG_DEBUG_RTNL_LATENCY
|
||||
int ret;
|
||||
|
||||
ret = mutex_trylock(&rtnl_mutex);
|
||||
|
||||
if (ret) {
|
||||
/* succeed to grab lock */
|
||||
memcpy(owner_comm, current->comm, TASK_COMM_LEN);
|
||||
time_latency = jiffies;
|
||||
}
|
||||
|
||||
return ret;
|
||||
#else
|
||||
return mutex_trylock(&rtnl_mutex);
|
||||
#endif
|
||||
}
|
||||
EXPORT_SYMBOL(rtnl_trylock);
|
||||
|
||||
|
235
net/core/sock.c
235
net/core/sock.c
@@ -146,13 +146,6 @@
|
||||
#include <net/busy_poll.h>
|
||||
#include <net/phonet/phonet.h>
|
||||
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA {
|
||||
#ifdef CONFIG_KNOX_NCM
|
||||
#include <linux/pid.h>
|
||||
#define PROCESS_NAME_LEN_NAP 128
|
||||
#define DOMAIN_NAME_LEN_NAP 255
|
||||
#endif
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA }
|
||||
#include <linux/ethtool.h>
|
||||
|
||||
#include "dev.h"
|
||||
@@ -160,12 +153,6 @@
|
||||
static DEFINE_MUTEX(proto_list_mutex);
|
||||
static LIST_HEAD(proto_list);
|
||||
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA {
|
||||
#ifdef CONFIG_KNOX_NCM
|
||||
extern unsigned int check_ncm_flag(void);
|
||||
#endif
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA }
|
||||
|
||||
static void sock_def_write_space_wfree(struct sock *sk);
|
||||
static void sock_def_write_space(struct sock *sk);
|
||||
|
||||
@@ -776,175 +763,6 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA {
|
||||
#ifdef CONFIG_KNOX_NCM
|
||||
/** The function sets the domain name associated with the socket. **/
|
||||
static int sock_set_domain_name(struct sock *sk, sockptr_t optval, int optlen)
|
||||
{
|
||||
int ret = -EADDRNOTAVAIL;
|
||||
char domain[DOMAIN_NAME_LEN_NAP];
|
||||
|
||||
ret = -EINVAL;
|
||||
if (optlen < 0)
|
||||
goto out;
|
||||
|
||||
if (optlen > DOMAIN_NAME_LEN_NAP - 1)
|
||||
optlen = DOMAIN_NAME_LEN_NAP - 1;
|
||||
|
||||
memset(domain, 0, sizeof(domain));
|
||||
|
||||
ret = -EFAULT;
|
||||
if (copy_from_sockptr(domain, optval, optlen))
|
||||
goto out;
|
||||
|
||||
if (SOCK_NPA_VENDOR_DATA_GET(sk)) {
|
||||
memcpy(SOCK_NPA_VENDOR_DATA_GET(sk)->domain_name, domain,
|
||||
sizeof(SOCK_NPA_VENDOR_DATA_GET(sk)->domain_name) - 1);
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/** The function sets the uid associated with the dns socket. **/
|
||||
static int sock_set_dns_uid(struct sock *sk, sockptr_t optval, int optlen)
|
||||
{
|
||||
int ret = -EADDRNOTAVAIL;
|
||||
|
||||
if (optlen < 0)
|
||||
goto out;
|
||||
|
||||
if (optlen == sizeof(uid_t)) {
|
||||
uid_t dns_uid;
|
||||
|
||||
ret = -EFAULT;
|
||||
if (copy_from_sockptr(&dns_uid, optval, sizeof(dns_uid)))
|
||||
goto out;
|
||||
|
||||
if (SOCK_NPA_VENDOR_DATA_GET(sk)) {
|
||||
memcpy(&SOCK_NPA_VENDOR_DATA_GET(sk)->knox_dns_uid, &dns_uid,
|
||||
sizeof(SOCK_NPA_VENDOR_DATA_GET(sk)->knox_dns_uid));
|
||||
ret = 0;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/** The function sets the pid and the process name associated with the dns socket. **/
|
||||
static int sock_set_dns_pid(struct sock *sk, sockptr_t optval, int optlen)
|
||||
{
|
||||
int ret = -EADDRNOTAVAIL;
|
||||
struct pid *pid_struct = NULL;
|
||||
struct task_struct *task = NULL;
|
||||
int process_returnvalue = -1;
|
||||
pid_t dns_pid;
|
||||
char full_process_name[PROCESS_NAME_LEN_NAP] = { 0 };
|
||||
|
||||
if (optlen < 0)
|
||||
goto out;
|
||||
if (optlen != sizeof(pid_t))
|
||||
goto out;
|
||||
|
||||
ret = -EFAULT;
|
||||
if (copy_from_sockptr(&dns_pid, optval, sizeof(dns_pid)))
|
||||
goto out;
|
||||
if (!SOCK_NPA_VENDOR_DATA_GET(sk))
|
||||
goto out;
|
||||
ret = 0;
|
||||
memcpy(&SOCK_NPA_VENDOR_DATA_GET(sk)->knox_dns_pid, &dns_pid,
|
||||
sizeof(SOCK_NPA_VENDOR_DATA_GET(sk)->knox_dns_pid));
|
||||
if (!check_ncm_flag())
|
||||
goto out;
|
||||
pid_struct = find_get_pid(dns_pid);
|
||||
if (pid_struct) {
|
||||
task = pid_task(pid_struct, PIDTYPE_PID);
|
||||
if (task) {
|
||||
process_returnvalue = get_cmdline(task, full_process_name,
|
||||
sizeof(full_process_name) - 1);
|
||||
if (process_returnvalue > 0) {
|
||||
memcpy(SOCK_NPA_VENDOR_DATA_GET(sk)->dns_process_name,
|
||||
full_process_name,
|
||||
sizeof(SOCK_NPA_VENDOR_DATA_GET(sk)->dns_process_name) - 1);
|
||||
} else {
|
||||
memcpy(SOCK_NPA_VENDOR_DATA_GET(sk)->dns_process_name, task->comm,
|
||||
sizeof(task->comm) - 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/** This function used to set parent data like process_name ,uid and pid. **/
|
||||
void set_parent_process_name(struct sock *sk, struct task_struct *task)
|
||||
{
|
||||
int parent_returnvalue = -1;
|
||||
char full_parent_process_name[PROCESS_NAME_LEN_NAP] = { 0 };
|
||||
struct task_struct *parent_task;
|
||||
struct pid *parent_pid_struct = NULL;
|
||||
|
||||
parent_pid_struct = find_get_pid(task->parent->tgid);
|
||||
if (!parent_pid_struct)
|
||||
return;
|
||||
parent_task = pid_task(parent_pid_struct, PIDTYPE_PID);
|
||||
if (!parent_task)
|
||||
return;
|
||||
parent_returnvalue = get_cmdline(parent_task, full_parent_process_name,
|
||||
sizeof(full_parent_process_name) - 1);
|
||||
if (parent_returnvalue > 0) {
|
||||
memcpy(SOCK_NPA_VENDOR_DATA_GET(sk)->parent_process_name, full_parent_process_name,
|
||||
sizeof(SOCK_NPA_VENDOR_DATA_GET(sk)->parent_process_name) - 1);
|
||||
} else {
|
||||
memcpy(SOCK_NPA_VENDOR_DATA_GET(sk)->parent_process_name, parent_task->comm,
|
||||
sizeof(parent_task->comm) - 1);
|
||||
}
|
||||
SOCK_NPA_VENDOR_DATA_GET(sk)->knox_puid = parent_task->cred->uid.val;
|
||||
SOCK_NPA_VENDOR_DATA_GET(sk)->knox_ppid = parent_task->tgid;
|
||||
}
|
||||
|
||||
/** This function used to set process name in sock **/
|
||||
void set_process_name(struct sock *sk)
|
||||
{
|
||||
int process_returnvalue = -1;
|
||||
char full_process_name[PROCESS_NAME_LEN_NAP] = { 0 };
|
||||
struct pid *pid_struct = NULL;
|
||||
struct task_struct *task = NULL;
|
||||
|
||||
pid_struct = find_get_pid(current->tgid);
|
||||
if (!pid_struct)
|
||||
return;
|
||||
task = pid_task(pid_struct, PIDTYPE_PID);
|
||||
if (!task)
|
||||
return;
|
||||
if (task->parent)
|
||||
set_parent_process_name(sk, task);
|
||||
process_returnvalue = get_cmdline(task, full_process_name,
|
||||
sizeof(full_process_name) - 1);
|
||||
if (process_returnvalue > 0) {
|
||||
memcpy(SOCK_NPA_VENDOR_DATA_GET(sk)->process_name, full_process_name,
|
||||
sizeof(SOCK_NPA_VENDOR_DATA_GET(sk)->process_name) - 1);
|
||||
} else {
|
||||
memcpy(SOCK_NPA_VENDOR_DATA_GET(sk)->process_name, task->comm,
|
||||
sizeof(task->comm) - 1);
|
||||
}
|
||||
}
|
||||
/**This function is used to get process metadata. **/
|
||||
void set_npa_process_metadata(struct sock *sk)
|
||||
{
|
||||
if (!SOCK_NPA_VENDOR_DATA_GET(sk))
|
||||
return;
|
||||
SOCK_NPA_VENDOR_DATA_GET(sk)->knox_uid = current->cred->uid.val;
|
||||
SOCK_NPA_VENDOR_DATA_GET(sk)->knox_pid = current->tgid;
|
||||
if (check_ncm_flag())
|
||||
set_process_name(sk);
|
||||
}
|
||||
#endif
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA }
|
||||
|
||||
bool sk_mc_loop(struct sock *sk)
|
||||
{
|
||||
if (dev_recursion_level())
|
||||
@@ -1295,16 +1113,6 @@ int sk_setsockopt(struct sock *sk, int level, int optname,
|
||||
|
||||
if (optname == SO_BINDTODEVICE)
|
||||
return sock_setbindtodevice(sk, optval, optlen);
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA {
|
||||
#ifdef CONFIG_KNOX_NCM
|
||||
if (optname == SO_SET_DOMAIN_NAME)
|
||||
return sock_set_domain_name(sk, optval, optlen);
|
||||
if (optname == SO_SET_DNS_UID)
|
||||
return sock_set_dns_uid(sk, optval, optlen);
|
||||
if (optname == SO_SET_DNS_PID)
|
||||
return sock_set_dns_pid(sk, optval, optlen);
|
||||
#endif
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA }
|
||||
|
||||
if (optlen < sizeof(int))
|
||||
return -EINVAL;
|
||||
@@ -2255,11 +2063,6 @@ static void sock_copy(struct sock *nsk, const struct sock *osk)
|
||||
#ifdef CONFIG_SECURITY_NETWORK
|
||||
void *sptr = nsk->sk_security;
|
||||
#endif
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA {
|
||||
#ifdef CONFIG_KNOX_NCM
|
||||
u64 android_vendor_data_npa = nsk->android_oem_data1;
|
||||
#endif
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA }
|
||||
|
||||
/* If we move sk_tx_queue_mapping out of the private section,
|
||||
* we must check if sk_tx_queue_clear() is called after
|
||||
@@ -2279,11 +2082,6 @@ static void sock_copy(struct sock *nsk, const struct sock *osk)
|
||||
nsk->sk_security = sptr;
|
||||
security_sk_clone(osk, nsk);
|
||||
#endif
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA {
|
||||
#ifdef CONFIG_KNOX_NCM
|
||||
nsk->android_oem_data1 = android_vendor_data_npa;
|
||||
#endif
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA }
|
||||
}
|
||||
|
||||
static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
|
||||
@@ -2303,21 +2101,9 @@ static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
|
||||
sk = kmalloc(prot->obj_size, priority);
|
||||
|
||||
if (sk != NULL) {
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA {
|
||||
#ifdef CONFIG_KNOX_NCM
|
||||
sk->android_oem_data1 = (u64)NULL;
|
||||
#endif
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA }
|
||||
if (security_sk_alloc(sk, family, priority))
|
||||
goto out_free;
|
||||
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA {
|
||||
#ifdef CONFIG_KNOX_NCM
|
||||
sk->android_oem_data1 =
|
||||
(u64)kzalloc(sizeof(struct sock_npa_vendor_data), GFP_NOWAIT);
|
||||
#endif
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA }
|
||||
|
||||
if (!try_module_get(prot->owner))
|
||||
goto out_free_sec;
|
||||
}
|
||||
@@ -2326,14 +2112,6 @@ static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
|
||||
|
||||
out_free_sec:
|
||||
security_sk_free(sk);
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA {
|
||||
#ifdef CONFIG_KNOX_NCM
|
||||
if (SOCK_NPA_VENDOR_DATA_GET(sk)) {
|
||||
kfree(SOCK_NPA_VENDOR_DATA_GET(sk));
|
||||
sk->android_oem_data1 = (u64)NULL;
|
||||
}
|
||||
#endif
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA }
|
||||
out_free:
|
||||
if (slab != NULL)
|
||||
kmem_cache_free(slab, sk);
|
||||
@@ -2354,14 +2132,6 @@ static void sk_prot_free(struct proto *prot, struct sock *sk)
|
||||
mem_cgroup_sk_free(sk);
|
||||
trace_android_vh_sk_free(sk);
|
||||
security_sk_free(sk);
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA {
|
||||
#ifdef CONFIG_KNOX_NCM
|
||||
if (SOCK_NPA_VENDOR_DATA_GET(sk)) {
|
||||
kfree(SOCK_NPA_VENDOR_DATA_GET(sk));
|
||||
sk->android_oem_data1 = (u64)NULL;
|
||||
}
|
||||
#endif
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA }
|
||||
if (slab != NULL)
|
||||
kmem_cache_free(slab, sk);
|
||||
else
|
||||
@@ -2385,11 +2155,6 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
|
||||
sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
|
||||
if (sk) {
|
||||
sk->sk_family = family;
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA {
|
||||
#ifdef CONFIG_KNOX_NCM
|
||||
set_npa_process_metadata(sk);
|
||||
#endif
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA }
|
||||
/*
|
||||
* See comment in struct sock definition to understand
|
||||
* why we need sk_prot_creator -acme
|
||||
|
@@ -2375,87 +2375,6 @@ static int udp_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
|
||||
* All we need to do is get the socket, and then do a checksum.
|
||||
*/
|
||||
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA {
|
||||
#ifdef CONFIG_KNOX_NCM
|
||||
#include <net/ncm.h>
|
||||
#endif
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA }
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA_ADITYA {
|
||||
#ifdef CONFIG_KNOX_NCM
|
||||
/* function to handle open flows with incoming udp packets */
|
||||
void collect_udp_start_flow_metadata(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
struct nf_conn *ct = NULL;
|
||||
enum ip_conntrack_info ctinfo;
|
||||
struct nf_conntrack_tuple *tuple = NULL;
|
||||
char srcaddr[INET6_ADDRSTRLEN_NAP];
|
||||
char dstaddr[INET6_ADDRSTRLEN_NAP];
|
||||
|
||||
if (!check_ncm_flag())
|
||||
return;
|
||||
if (sk->sk_protocol != IPPROTO_UDP)
|
||||
return;
|
||||
if (!SOCK_NPA_VENDOR_DATA_GET(sk))
|
||||
return;
|
||||
|
||||
ct = nf_ct_get(skb, &ctinfo);
|
||||
if (!ct)
|
||||
return;
|
||||
if (!NF_CONN_NPA_VENDOR_DATA_GET(ct))
|
||||
return;
|
||||
if (atomic_read(&NF_CONN_NPA_VENDOR_DATA_GET(ct)->startFlow))
|
||||
return;
|
||||
if (nf_ct_is_dying(ct))
|
||||
return;
|
||||
|
||||
tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
|
||||
if (!tuple)
|
||||
return;
|
||||
|
||||
sprintf(srcaddr, "%pI4", (void *)&tuple->src.u3.ip);
|
||||
sprintf(dstaddr, "%pI4", (void *)&tuple->dst.u3.ip);
|
||||
if (isIpv4AddressEqualsNull(srcaddr, dstaddr))
|
||||
return;
|
||||
atomic_set(&NF_CONN_NPA_VENDOR_DATA_GET(ct)->startFlow, 1);
|
||||
if (check_intermediate_flag()) {
|
||||
NF_CONN_NPA_VENDOR_DATA_GET(ct)->npa_timeout =
|
||||
((u32)(jiffies)) + (get_intermediate_timeout() * HZ);
|
||||
atomic_set(&NF_CONN_NPA_VENDOR_DATA_GET(ct)->intermediateFlow, 1);
|
||||
}
|
||||
NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_uid = SOCK_NPA_VENDOR_DATA_GET(sk)->knox_uid;
|
||||
NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_pid = SOCK_NPA_VENDOR_DATA_GET(sk)->knox_pid;
|
||||
memcpy(NF_CONN_NPA_VENDOR_DATA_GET(ct)->process_name,
|
||||
SOCK_NPA_VENDOR_DATA_GET(sk)->process_name,
|
||||
sizeof(NF_CONN_NPA_VENDOR_DATA_GET(ct)->process_name) - 1);
|
||||
NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_puid = SOCK_NPA_VENDOR_DATA_GET(sk)->knox_puid;
|
||||
NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_ppid = SOCK_NPA_VENDOR_DATA_GET(sk)->knox_ppid;
|
||||
memcpy(NF_CONN_NPA_VENDOR_DATA_GET(ct)->parent_process_name,
|
||||
SOCK_NPA_VENDOR_DATA_GET(sk)->parent_process_name,
|
||||
sizeof(NF_CONN_NPA_VENDOR_DATA_GET(ct)->parent_process_name) - 1);
|
||||
memcpy(NF_CONN_NPA_VENDOR_DATA_GET(ct)->domain_name,
|
||||
SOCK_NPA_VENDOR_DATA_GET(sk)->domain_name,
|
||||
sizeof(NF_CONN_NPA_VENDOR_DATA_GET(ct)->domain_name) - 1);
|
||||
if ((skb->dev)) {
|
||||
memcpy(NF_CONN_NPA_VENDOR_DATA_GET(ct)->interface_name, skb->dev->name,
|
||||
sizeof(NF_CONN_NPA_VENDOR_DATA_GET(ct)->interface_name) - 1);
|
||||
} else {
|
||||
sprintf(NF_CONN_NPA_VENDOR_DATA_GET(ct)->interface_name, "%s", "null");
|
||||
}
|
||||
if ((ntohs(tuple->dst.u.udp.port) == DNS_PORT_NAP) &&
|
||||
(NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_uid == INIT_UID_NAP) &&
|
||||
(SOCK_NPA_VENDOR_DATA_GET(sk)->knox_dns_uid > INIT_UID_NAP)) {
|
||||
NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_puid =
|
||||
SOCK_NPA_VENDOR_DATA_GET(sk)->knox_dns_uid;
|
||||
NF_CONN_NPA_VENDOR_DATA_GET(ct)->knox_ppid =
|
||||
SOCK_NPA_VENDOR_DATA_GET(sk)->knox_dns_pid;
|
||||
memcpy(NF_CONN_NPA_VENDOR_DATA_GET(ct)->parent_process_name,
|
||||
SOCK_NPA_VENDOR_DATA_GET(sk)->dns_process_name,
|
||||
sizeof(NF_CONN_NPA_VENDOR_DATA_GET(ct)->parent_process_name) - 1);
|
||||
}
|
||||
knox_collect_conntrack_data(ct, NCM_FLOW_TYPE_OPEN, 3);
|
||||
}
|
||||
#endif
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA_ADITYA }
|
||||
int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
|
||||
int proto)
|
||||
{
|
||||
@@ -2506,12 +2425,6 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
|
||||
if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst))
|
||||
udp_sk_rx_dst_set(sk, dst);
|
||||
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA {
|
||||
#ifdef CONFIG_KNOX_NCM
|
||||
collect_udp_start_flow_metadata(sk, skb);
|
||||
#endif
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA }
|
||||
|
||||
ret = udp_unicast_rcv_skb(sk, skb, uh);
|
||||
if (refcounted)
|
||||
sock_put(sk);
|
||||
@@ -2523,16 +2436,8 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
|
||||
saddr, daddr, udptable, proto);
|
||||
|
||||
sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
|
||||
if (sk) {
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA {
|
||||
#ifdef CONFIG_KNOX_NCM
|
||||
collect_udp_start_flow_metadata(sk, skb);
|
||||
#endif
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA }
|
||||
|
||||
if (sk)
|
||||
return udp_unicast_rcv_skb(sk, skb, uh);
|
||||
}
|
||||
|
||||
no_sk:
|
||||
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
|
||||
goto drop;
|
||||
|
@@ -4240,16 +4240,6 @@ static void addrconf_dad_work(struct work_struct *w)
|
||||
}
|
||||
|
||||
ifp->dad_probes--;
|
||||
if (ifp->idev->dev != NULL && !strcmp(ifp->idev->dev->name, "aware_data0")) {
|
||||
pr_info("Reduce wating time from %d to %d (HZ=%d) to send NS for quick transmission for %s\n",
|
||||
max(NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME), HZ/100),
|
||||
max(NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME)/100, HZ/100),
|
||||
HZ,
|
||||
ifp->idev->dev->name);
|
||||
addrconf_mod_dad_work(ifp,
|
||||
max(NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME)/100,
|
||||
HZ/100));
|
||||
} else
|
||||
addrconf_mod_dad_work(ifp,
|
||||
max(NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME),
|
||||
HZ/100));
|
||||
|
@@ -953,15 +953,6 @@ have_ifp:
|
||||
NEIGH_UPDATE_F_WEAK_OVERRIDE|
|
||||
NEIGH_UPDATE_F_OVERRIDE,
|
||||
NDISC_NEIGHBOUR_SOLICITATION, &ndopts);
|
||||
|
||||
if (neigh != NULL && neigh->dev != NULL && !strcmp(neigh->dev->name, "aware_data0")) {
|
||||
pr_info("ipv6 neigh_lookup is done by receiving NS"
|
||||
" from [:%02x%02x] to [:%02x%02x] and sending NA for %s\n",
|
||||
saddr->s6_addr[14], saddr->s6_addr[15],
|
||||
daddr->s6_addr[14], daddr->s6_addr[15],
|
||||
neigh->dev->name);
|
||||
}
|
||||
|
||||
if (neigh || !dev->header_ops) {
|
||||
ndisc_send_na(dev, saddr, &msg->target, !!is_router,
|
||||
true, (ifp != NULL && inc), inc);
|
||||
@@ -1123,14 +1114,6 @@ static enum skb_drop_reason ndisc_recv_na(struct sk_buff *skb)
|
||||
(msg->icmph.icmp6_router ? NEIGH_UPDATE_F_ISROUTER : 0),
|
||||
NDISC_NEIGHBOUR_ADVERTISEMENT, &ndopts);
|
||||
|
||||
if (neigh->dev != NULL && !strcmp(neigh->dev->name, "aware_data0")) {
|
||||
pr_info("ipv6 neigh_lookup is done by receiving NA"
|
||||
" from [:%02x%02x] to [:%02x%02x] for %s\n",
|
||||
saddr->s6_addr[14], saddr->s6_addr[15],
|
||||
daddr->s6_addr[14], daddr->s6_addr[15],
|
||||
dev->name);
|
||||
}
|
||||
|
||||
if ((old_flags & ~neigh->flags) & NTF_ROUTER) {
|
||||
/*
|
||||
* Change: router to host
|
||||
|
@@ -1682,18 +1682,6 @@ config NETFILTER_XT_MATCH_U32
|
||||
|
||||
Details and examples are in the kernel module source.
|
||||
|
||||
config NETFILTER_XT_MATCH_DOMAIN
|
||||
tristate 'Domain Filter Support'
|
||||
depends on NETFILTER_XTABLES
|
||||
depends on KNOX_NCM
|
||||
default y
|
||||
help
|
||||
This option allow or block user network traffic based on domain
|
||||
|
||||
If you want to compile it as a module, say M here.
|
||||
If unsure, say N.
|
||||
|
||||
|
||||
endif # NETFILTER_XTABLES
|
||||
|
||||
endmenu
|
||||
|
@@ -227,7 +227,6 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_STRING) += xt_string.o
|
||||
obj-$(CONFIG_NETFILTER_XT_MATCH_TCPMSS) += xt_tcpmss.o
|
||||
obj-$(CONFIG_NETFILTER_XT_MATCH_TIME) += xt_time.o
|
||||
obj-$(CONFIG_NETFILTER_XT_MATCH_U32) += xt_u32.o
|
||||
obj-$(CONFIG_NETFILTER_XT_MATCH_DOMAIN) += xt_domainfilter.o
|
||||
|
||||
# ipset
|
||||
obj-$(CONFIG_IP_SET) += ipset/
|
||||
|
@@ -51,14 +51,8 @@
|
||||
#include <net/netfilter/nf_nat_helper.h>
|
||||
#include <net/netns/hash.h>
|
||||
#include <net/ip.h>
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA {
|
||||
#ifdef CONFIG_KNOX_NCM
|
||||
#include <net/ncm.h>
|
||||
#endif
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA }
|
||||
|
||||
#include "nf_internals.h"
|
||||
#include <linux/time.h>
|
||||
|
||||
__cacheline_aligned_in_smp spinlock_t nf_conntrack_locks[CONNTRACK_LOCKS];
|
||||
EXPORT_SYMBOL_GPL(nf_conntrack_locks);
|
||||
@@ -81,11 +75,6 @@ struct conntrack_gc_work {
|
||||
|
||||
static __read_mostly struct kmem_cache *nf_conntrack_cachep;
|
||||
static DEFINE_SPINLOCK(nf_conntrack_locks_all_lock);
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA {
|
||||
#ifdef CONFIG_KNOX_NCM
|
||||
static DEFINE_SPINLOCK(knox_nf_conntrack);
|
||||
#endif
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA }
|
||||
static __read_mostly bool nf_conntrack_locks_all;
|
||||
|
||||
/* serialize hash resizes and nf_ct_iterate_cleanup */
|
||||
@@ -586,17 +575,6 @@ void nf_ct_destroy(struct nf_conntrack *nfct)
|
||||
{
|
||||
struct nf_conn *ct = (struct nf_conn *)nfct;
|
||||
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA {
|
||||
#ifdef CONFIG_KNOX_NCM
|
||||
unsigned long flags;
|
||||
spin_lock_irqsave(&knox_nf_conntrack, flags);
|
||||
if (NF_CONN_NPA_VENDOR_DATA_GET(ct)) {
|
||||
kfree(NF_CONN_NPA_VENDOR_DATA_GET(ct));
|
||||
ct->android_oem_data1 = (u64)NULL;
|
||||
}
|
||||
spin_unlock_irqrestore(&knox_nf_conntrack, flags);
|
||||
#endif
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA }
|
||||
WARN_ON(refcount_read(&nfct->use) != 0);
|
||||
|
||||
if (unlikely(nf_ct_is_template(ct))) {
|
||||
@@ -1031,13 +1009,6 @@ static int __nf_ct_resolve_clash(struct sk_buff *skb,
|
||||
nf_conntrack_get(&ct->ct_general);
|
||||
|
||||
nf_ct_acct_merge(ct, ctinfo, loser_ct);
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA {
|
||||
#ifdef CONFIG_KNOX_NCM
|
||||
if ( (check_ncm_flag()) && (loser_ct != NULL) && (NF_CONN_NPA_VENDOR_DATA_GET(loser_ct)) && (atomic_read(&NF_CONN_NPA_VENDOR_DATA_GET(loser_ct)->startFlow)) ) {
|
||||
knox_collect_conntrack_data(loser_ct, NCM_FLOW_TYPE_CLOSE, 10);
|
||||
}
|
||||
#endif
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA }
|
||||
nf_ct_put(loser_ct);
|
||||
nf_ct_set(skb, ct, ctinfo);
|
||||
|
||||
@@ -1170,13 +1141,6 @@ nf_ct_resolve_clash(struct sk_buff *skb, struct nf_conntrack_tuple_hash *h,
|
||||
return ret;
|
||||
|
||||
drop:
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA {
|
||||
#ifdef CONFIG_KNOX_NCM
|
||||
if ( (check_ncm_flag()) && (loser_ct != NULL) && (NF_CONN_NPA_VENDOR_DATA_GET(loser_ct)) && (atomic_read(&NF_CONN_NPA_VENDOR_DATA_GET(loser_ct)->startFlow)) ) {
|
||||
knox_collect_conntrack_data(loser_ct, NCM_FLOW_TYPE_CLOSE, 10);
|
||||
}
|
||||
#endif
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA }
|
||||
NF_CT_STAT_INC(net, drop);
|
||||
NF_CT_STAT_INC(net, insert_failed);
|
||||
return NF_DROP;
|
||||
@@ -1250,13 +1214,6 @@ __nf_conntrack_confirm(struct sk_buff *skb)
|
||||
ct->status |= IPS_CONFIRMED;
|
||||
|
||||
if (unlikely(nf_ct_is_dying(ct))) {
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA {
|
||||
#ifdef CONFIG_KNOX_NCM
|
||||
if ( (check_ncm_flag()) && (ct != NULL) && (NF_CONN_NPA_VENDOR_DATA_GET(ct)) && (atomic_read(&NF_CONN_NPA_VENDOR_DATA_GET(ct)->startFlow)) ) {
|
||||
knox_collect_conntrack_data(ct, NCM_FLOW_TYPE_CLOSE, 10);
|
||||
}
|
||||
#endif
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA }
|
||||
NF_CT_STAT_INC(net, insert_failed);
|
||||
goto dying;
|
||||
}
|
||||
@@ -1280,13 +1237,6 @@ __nf_conntrack_confirm(struct sk_buff *skb)
|
||||
goto out;
|
||||
if (chainlen++ > max_chainlen) {
|
||||
chaintoolong:
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA {
|
||||
#ifdef CONFIG_KNOX_NCM
|
||||
if ( (check_ncm_flag()) && (ct != NULL) && (NF_CONN_NPA_VENDOR_DATA_GET(ct)) && (atomic_read(&NF_CONN_NPA_VENDOR_DATA_GET(ct)->startFlow)) ) {
|
||||
knox_collect_conntrack_data(ct, NCM_FLOW_TYPE_CLOSE, 10);
|
||||
}
|
||||
#endif
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA }
|
||||
NF_CT_STAT_INC(net, chaintoolong);
|
||||
NF_CT_STAT_INC(net, insert_failed);
|
||||
ret = NF_DROP;
|
||||
@@ -1574,27 +1524,6 @@ static void gc_worker(struct work_struct *work)
|
||||
nf_ct_gc_expired(tmp);
|
||||
expired_count++;
|
||||
continue;
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA {
|
||||
#ifdef CONFIG_KNOX_NCM
|
||||
} else if ((tmp) && (check_ncm_flag()) &&
|
||||
(check_intermediate_flag()) &&
|
||||
(NF_CONN_NPA_VENDOR_DATA_GET(tmp)) &&
|
||||
(atomic_read(&NF_CONN_NPA_VENDOR_DATA_GET(tmp)->startFlow)) &&
|
||||
(atomic_read(&NF_CONN_NPA_VENDOR_DATA_GET(tmp)
|
||||
->intermediateFlow))) {
|
||||
s32 npa_timeout =
|
||||
NF_CONN_NPA_VENDOR_DATA_GET(tmp)->npa_timeout -
|
||||
((u32)(jiffies));
|
||||
|
||||
if (npa_timeout <= 0) {
|
||||
NF_CONN_NPA_VENDOR_DATA_GET(tmp)->npa_timeout =
|
||||
((u32)(jiffies)) +
|
||||
(get_intermediate_timeout() * HZ);
|
||||
knox_collect_conntrack_data(tmp,
|
||||
NCM_FLOW_TYPE_INTERMEDIATE, 20);
|
||||
}
|
||||
#endif
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA }
|
||||
}
|
||||
|
||||
expires = clamp(nf_ct_expires(tmp), GC_SCAN_INTERVAL_MIN, GC_SCAN_INTERVAL_CLAMP);
|
||||
@@ -1663,12 +1592,6 @@ early_exit:
|
||||
|
||||
if (next_run)
|
||||
gc_work->early_drop = false;
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA {
|
||||
#ifdef CONFIG_KNOX_NCM
|
||||
if ((check_ncm_flag()) && (check_intermediate_flag()))
|
||||
next_run = 0;
|
||||
#endif
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA }
|
||||
|
||||
queue_delayed_work(system_power_efficient_wq, &gc_work->dwork, next_run);
|
||||
}
|
||||
@@ -1689,11 +1612,6 @@ __nf_conntrack_alloc(struct net *net,
|
||||
struct nf_conntrack_net *cnet = nf_ct_pernet(net);
|
||||
unsigned int ct_count;
|
||||
struct nf_conn *ct;
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA {
|
||||
#ifdef CONFIG_KNOX_NCM
|
||||
struct timespec64 open_timespec;
|
||||
#endif
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA }
|
||||
|
||||
/* We don't want any race condition at early drop stage */
|
||||
ct_count = atomic_inc_return(&cnet->count);
|
||||
@@ -1717,11 +1635,6 @@ __nf_conntrack_alloc(struct net *net,
|
||||
goto out;
|
||||
|
||||
spin_lock_init(&ct->lock);
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA {
|
||||
#ifdef CONFIG_KNOX_NCM
|
||||
ct->android_oem_data1 = (u64)NULL;
|
||||
#endif
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA }
|
||||
ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
|
||||
ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL;
|
||||
ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
|
||||
@@ -1734,15 +1647,6 @@ __nf_conntrack_alloc(struct net *net,
|
||||
|
||||
nf_ct_zone_add(ct, zone);
|
||||
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA {
|
||||
#ifdef CONFIG_KNOX_NCM
|
||||
ct->android_oem_data1 = (u64)kzalloc(sizeof(struct nf_conn_npa_vendor_data), gfp);
|
||||
ktime_get_ts64(&open_timespec);
|
||||
if (NF_CONN_NPA_VENDOR_DATA_GET(ct))
|
||||
NF_CONN_NPA_VENDOR_DATA_GET(ct)->open_time = open_timespec.tv_sec;
|
||||
#endif
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA }
|
||||
|
||||
/* Because we use RCU lookups, we set ct_general.use to zero before
|
||||
* this is inserted in any list.
|
||||
*/
|
||||
@@ -1765,11 +1669,6 @@ EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
|
||||
|
||||
void nf_conntrack_free(struct nf_conn *ct)
|
||||
{
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA {
|
||||
#ifdef CONFIG_KNOX_NCM
|
||||
unsigned long flags;
|
||||
#endif
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA }
|
||||
struct net *net = nf_ct_net(ct);
|
||||
struct nf_conntrack_net *cnet;
|
||||
|
||||
@@ -1793,16 +1692,6 @@ void nf_conntrack_free(struct nf_conn *ct)
|
||||
cnet = nf_ct_pernet(net);
|
||||
|
||||
smp_mb__before_atomic();
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA {
|
||||
#ifdef CONFIG_KNOX_NCM
|
||||
spin_lock_irqsave(&knox_nf_conntrack, flags);
|
||||
if (NF_CONN_NPA_VENDOR_DATA_GET(ct)) {
|
||||
kfree(NF_CONN_NPA_VENDOR_DATA_GET(ct));
|
||||
ct->android_oem_data1 = (u64)NULL;
|
||||
}
|
||||
spin_unlock_irqrestore(&knox_nf_conntrack, flags);
|
||||
#endif
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA }
|
||||
atomic_dec(&cnet->count);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_conntrack_free);
|
||||
|
@@ -15,16 +15,6 @@
|
||||
#include <linux/netfilter/x_tables.h>
|
||||
#include <linux/netfilter/xt_connmark.h>
|
||||
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_VPN {
|
||||
#ifdef CONFIG_KNOX_NCM
|
||||
#include <linux/pid.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/tcp.h>
|
||||
#include <linux/ip.h>
|
||||
#include <net/ip.h>
|
||||
#endif
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_VPN }
|
||||
|
||||
MODULE_AUTHOR("Henrik Nordstrom <hno@marasystems.com>");
|
||||
MODULE_DESCRIPTION("Xtables: connection mark operations");
|
||||
MODULE_LICENSE("GPL");
|
||||
@@ -33,46 +23,6 @@ MODULE_ALIAS("ip6t_CONNMARK");
|
||||
MODULE_ALIAS("ipt_connmark");
|
||||
MODULE_ALIAS("ip6t_connmark");
|
||||
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_VPN {
|
||||
#ifdef CONFIG_KNOX_NCM
|
||||
#define META_MARK_BASE_LOWER 100
|
||||
#define META_MARK_BASE_UPPER 500
|
||||
|
||||
static void knoxvpn_uidpid(struct sk_buff *skb, u_int32_t newmark)
|
||||
{
|
||||
struct skb_shared_info *knox_shinfo = NULL;
|
||||
|
||||
if (skb) {
|
||||
knox_shinfo = skb_shinfo(skb);
|
||||
} else {
|
||||
pr_err("KNOX: NULL SKB - no KNOX processing");
|
||||
return;
|
||||
}
|
||||
|
||||
if (!skb->sk) {
|
||||
pr_err("KNOX: skb->sk value is null");
|
||||
return;
|
||||
}
|
||||
|
||||
if (!knox_shinfo) {
|
||||
pr_err("KNOX: knox_shinfo is null");
|
||||
return;
|
||||
}
|
||||
|
||||
if (newmark < META_MARK_BASE_LOWER || newmark > META_MARK_BASE_UPPER) {
|
||||
pr_err("KNOX: The mark is out of range");
|
||||
return;
|
||||
} else {
|
||||
if ((current) && (current->cred))
|
||||
knox_shinfo->android_oem_data1[0] = (u64)current->cred->uid.val;
|
||||
if (current)
|
||||
knox_shinfo->android_oem_data1[1] = (u64)current->tgid;
|
||||
knox_shinfo->android_oem_data1[2] = (u64)newmark;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_VPN }
|
||||
|
||||
static unsigned int
|
||||
connmark_tg_shift(struct sk_buff *skb, const struct xt_connmark_tginfo2 *info)
|
||||
{
|
||||
@@ -124,11 +74,6 @@ connmark_tg_shift(struct sk_buff *skb, const struct xt_connmark_tginfo2 *info)
|
||||
newmark = (skb->mark & ~info->nfmask) ^
|
||||
new_targetmark;
|
||||
skb->mark = newmark;
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_VPN {
|
||||
#ifdef CONFIG_KNOX_NCM
|
||||
knoxvpn_uidpid(skb, newmark);
|
||||
#endif
|
||||
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_VPN }
|
||||
break;
|
||||
}
|
||||
return XT_CONTINUE;
|
||||
|
27
net/qmsgq/Kconfig
Normal file
27
net/qmsgq/Kconfig
Normal file
@@ -0,0 +1,27 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
# QTI Message Queue Socket configuration
|
||||
#
|
||||
|
||||
config QMSGQ
|
||||
tristate "QTI Message Queue Socket"
|
||||
select VSOCKETS
|
||||
help
|
||||
Say Y if you intend to use QTI Message Queue Socket protocol. The
|
||||
protocol is used to communicate with baremetal VM and platforms
|
||||
that use the Gunyah Hypervisor. This protocol will mimic the vsock
|
||||
address space with cid and port id but allow for guest to guest
|
||||
communication.
|
||||
|
||||
if QMSGQ
|
||||
|
||||
config QMSGQ_GUNYAH
|
||||
tristate "QTI Message Queue Socket Gunyah Transport"
|
||||
depends on GH_MSGQ
|
||||
depends on QMSGQ
|
||||
help
|
||||
Say Y here to support Gunyah Message Queue based transport for the
|
||||
QMSGQ Socket Transport. This transport is intended to facilitate
|
||||
Guest to Guest communication on the Gunyah Hypervisor. This transport
|
||||
supports Datagram and Seqpacket operations.
|
||||
|
||||
endif # QMSGQ
|
3
net/qmsgq/Makefile
Normal file
3
net/qmsgq/Makefile
Normal file
@@ -0,0 +1,3 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
obj-$(CONFIG_QMSGQ) += af_qmsgq.o
|
||||
obj-$(CONFIG_QMSGQ_GUNYAH) += qmsgq_gunyah.o
|
804
net/qmsgq/af_qmsgq.c
Normal file
804
net/qmsgq/af_qmsgq.c
Normal file
@@ -0,0 +1,804 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
#include <linux/compat.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/cred.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/net.h>
|
||||
#include <linux/poll.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/socket.h>
|
||||
#include <net/sock.h>
|
||||
#include <net/af_vsock.h>
|
||||
|
||||
#include "af_qmsgq.h"
|
||||
|
||||
#ifndef AF_QMSGQ
|
||||
#define AF_QMSGQ 27
|
||||
#endif
|
||||
|
||||
#ifndef PF_QMSGQ
|
||||
#define PF_QMSGQ AF_QMSGQ
|
||||
#endif
|
||||
|
||||
struct qmsgq_cb {
|
||||
u32 src_cid;
|
||||
u32 src_port;
|
||||
u32 dst_cid;
|
||||
u32 dst_port;
|
||||
};
|
||||
|
||||
static const struct qmsgq_endpoint *registered_ep;
|
||||
static DEFINE_MUTEX(qmsgq_register_mutex);
|
||||
|
||||
/* auto-bind range */
|
||||
#define QMSGQ_MIN_EPH_SOCKET 0x4000
|
||||
#define QMSGQ_MAX_EPH_SOCKET 0x7fff
|
||||
#define QMSGQ_EPH_PORT_RANGE \
|
||||
XA_LIMIT(QMSGQ_MIN_EPH_SOCKET, QMSGQ_MAX_EPH_SOCKET)
|
||||
|
||||
/* local port allocation management */
|
||||
static DEFINE_XARRAY_ALLOC(qmsgq_ports);
|
||||
u32 qmsgq_ports_next = QMSGQ_MIN_EPH_SOCKET;
|
||||
static DEFINE_SPINLOCK(qmsgq_port_lock);
|
||||
|
||||
/* The default peer timeout indicates how long we will wait for a peer response
|
||||
* to a control message.
|
||||
*/
|
||||
#define QMSGQ_DEFAULT_CONNECT_TIMEOUT (2 * HZ)
|
||||
|
||||
#define QMSGQ_DEFAULT_BUFFER_SIZE (1024 * 256)
|
||||
#define QMSGQ_DEFAULT_BUFFER_MAX_SIZE (1024 * 256)
|
||||
#define QMSGQ_DEFAULT_BUFFER_MIN_SIZE 128
|
||||
|
||||
static void qmsgq_deassign_ep(struct qmsgq_sock *qsk)
|
||||
{
|
||||
if (!qsk->ep)
|
||||
return;
|
||||
|
||||
qsk->ep->destruct(qsk);
|
||||
module_put(qsk->ep->module);
|
||||
qsk->ep = NULL;
|
||||
}
|
||||
|
||||
int qmsgq_assign_ep(struct qmsgq_sock *qsk, struct qmsgq_sock *psk)
|
||||
{
|
||||
const struct qmsgq_endpoint *new_ep;
|
||||
int ret;
|
||||
|
||||
new_ep = registered_ep;
|
||||
if (qsk->ep) {
|
||||
if (qsk->ep == new_ep)
|
||||
return 0;
|
||||
|
||||
qsk->ep->release(qsk);
|
||||
qmsgq_deassign_ep(qsk);
|
||||
}
|
||||
|
||||
/* We increase the module refcnt to prevent the transport unloading
|
||||
* while there are open sockets assigned to it.
|
||||
*/
|
||||
if (!new_ep || !try_module_get(new_ep->module))
|
||||
return -ENODEV;
|
||||
|
||||
ret = new_ep->init(qsk, psk);
|
||||
if (ret) {
|
||||
module_put(new_ep->module);
|
||||
return ret;
|
||||
}
|
||||
|
||||
qsk->ep = new_ep;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool qmsgq_find_cid(unsigned int cid)
|
||||
{
|
||||
if (registered_ep && cid == registered_ep->get_local_cid())
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool sock_type_connectible(u16 type)
|
||||
{
|
||||
return (type == SOCK_STREAM) || (type == SOCK_SEQPACKET);
|
||||
}
|
||||
|
||||
static struct qmsgq_sock *qmsgq_port_lookup(int port)
|
||||
{
|
||||
struct qmsgq_sock *qsk;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&qmsgq_port_lock, flags);
|
||||
qsk = xa_load(&qmsgq_ports, port);
|
||||
if (qsk)
|
||||
sock_hold(qsk_sk(qsk));
|
||||
spin_unlock_irqrestore(&qmsgq_port_lock, flags);
|
||||
|
||||
return qsk;
|
||||
}
|
||||
|
||||
static void qmsgq_port_put(struct qmsgq_sock *qsk)
|
||||
{
|
||||
sock_put(qsk_sk(qsk));
|
||||
}
|
||||
|
||||
static void qmsgq_port_remove(struct qmsgq_sock *qsk)
|
||||
{
|
||||
int port = qsk->local_addr.svm_port;
|
||||
unsigned long flags;
|
||||
|
||||
__sock_put(qsk_sk(qsk));
|
||||
|
||||
spin_lock_irqsave(&qmsgq_port_lock, flags);
|
||||
xa_erase(&qmsgq_ports, port);
|
||||
spin_unlock_irqrestore(&qmsgq_port_lock, flags);
|
||||
}
|
||||
|
||||
static int qmsgq_port_assign(struct qmsgq_sock *qsk, int *port)
|
||||
{
|
||||
int rc;
|
||||
|
||||
if (!*port || *port < 0) {
|
||||
rc = xa_alloc_cyclic(&qmsgq_ports, port, qsk,
|
||||
QMSGQ_EPH_PORT_RANGE, &qmsgq_ports_next,
|
||||
GFP_ATOMIC);
|
||||
} else if (*port < QMSGQ_MIN_EPH_SOCKET && !capable(CAP_NET_ADMIN)) {
|
||||
rc = -EACCES;
|
||||
} else {
|
||||
rc = xa_insert(&qmsgq_ports, *port, qsk, GFP_ATOMIC);
|
||||
}
|
||||
|
||||
if (rc == -EBUSY)
|
||||
return -EADDRINUSE;
|
||||
else if (rc < 0)
|
||||
return rc;
|
||||
|
||||
sock_hold(qsk_sk(qsk));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qmsgq_send_shutdown(struct sock *sk, int mode)
|
||||
{
|
||||
struct qmsgq_sock *qsk = sk_qsk(sk);
|
||||
|
||||
if (!qsk->ep)
|
||||
return -ENODEV;
|
||||
|
||||
return qsk->ep->shutdown(qsk, mode);
|
||||
}
|
||||
|
||||
static void qmsgq_connect_timeout(struct work_struct *work)
|
||||
{
|
||||
}
|
||||
|
||||
static void qmsgq_pending_work(struct work_struct *work)
|
||||
{
|
||||
}
|
||||
|
||||
/* Bind socket to address.
|
||||
*
|
||||
* Socket should be locked upon call.
|
||||
*/
|
||||
static int __qmsgq_bind(struct socket *sock,
|
||||
const struct sockaddr_vm *addr, int zapped)
|
||||
{
|
||||
struct qmsgq_sock *qsk = sk_qsk(sock->sk);
|
||||
struct sock *sk = sock->sk;
|
||||
unsigned long flags;
|
||||
int port;
|
||||
int rc;
|
||||
|
||||
/* rebinding ok */
|
||||
if (!zapped && addr->svm_port == qsk->local_addr.svm_port)
|
||||
return 0;
|
||||
|
||||
if (addr->svm_cid != VMADDR_CID_ANY && !qmsgq_find_cid(addr->svm_cid))
|
||||
return -EADDRNOTAVAIL;
|
||||
|
||||
spin_lock_irqsave(&qmsgq_port_lock, flags);
|
||||
port = addr->svm_port;
|
||||
rc = qmsgq_port_assign(qsk, &port);
|
||||
spin_unlock_irqrestore(&qmsgq_port_lock, flags);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* unbind previous, if any */
|
||||
if (!zapped)
|
||||
qmsgq_port_remove(qsk);
|
||||
|
||||
vsock_addr_init(&qsk->local_addr, VMADDR_CID_HOST, port);
|
||||
sock_reset_flag(sk, SOCK_ZAPPED);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Auto bind to an ephemeral port. */
|
||||
static int qmsgq_autobind(struct socket *sock)
|
||||
{
|
||||
struct sock *sk = sock->sk;
|
||||
struct sockaddr_vm addr;
|
||||
|
||||
if (!sock_flag(sk, SOCK_ZAPPED))
|
||||
return 0;
|
||||
vsock_addr_init(&addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
|
||||
return __qmsgq_bind(sock, &addr, 1);
|
||||
}
|
||||
|
||||
static int qmsgq_bind(struct socket *sock, struct sockaddr *addr, int len)
|
||||
{
|
||||
struct sockaddr_vm *vm_addr;
|
||||
struct sock *sk = sock->sk;
|
||||
int rc;
|
||||
|
||||
if (vsock_addr_cast(addr, len, &vm_addr) != 0)
|
||||
return -EINVAL;
|
||||
|
||||
lock_sock(sk);
|
||||
rc = __qmsgq_bind(sock, vm_addr, sock_flag(sk, SOCK_ZAPPED));
|
||||
release_sock(sk);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int qmsgq_dgram_connect(struct socket *sock, struct sockaddr *addr, int addr_len, int flags)
|
||||
{
|
||||
struct sockaddr_vm *remote_addr;
|
||||
struct qmsgq_sock *qsk;
|
||||
struct sock *sk;
|
||||
int rc;
|
||||
|
||||
sk = sock->sk;
|
||||
qsk = sk_qsk(sk);
|
||||
|
||||
rc = vsock_addr_cast(addr, addr_len, &remote_addr);
|
||||
if (rc == -EAFNOSUPPORT && remote_addr->svm_family == AF_UNSPEC) {
|
||||
lock_sock(sk);
|
||||
vsock_addr_init(&qsk->remote_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
|
||||
sock->state = SS_UNCONNECTED;
|
||||
release_sock(sk);
|
||||
return 0;
|
||||
} else if (rc != 0) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
lock_sock(sk);
|
||||
rc = qmsgq_autobind(sock);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
if (!qsk->ep->dgram_allow(remote_addr->svm_cid, remote_addr->svm_port)) {
|
||||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
memcpy(&qsk->remote_addr, remote_addr, sizeof(qsk->remote_addr));
|
||||
sock->state = SS_CONNECTED;
|
||||
|
||||
out:
|
||||
release_sock(sk);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int qmsgq_getname(struct socket *sock, struct sockaddr *addr, int peer)
|
||||
{
|
||||
struct sockaddr_vm *vm_addr = NULL;
|
||||
struct sock *sk = sock->sk;
|
||||
struct qmsgq_sock *qsk;
|
||||
int rc = 0;
|
||||
|
||||
qsk = sk_qsk(sk);
|
||||
|
||||
lock_sock(sk);
|
||||
if (peer) {
|
||||
if (sock->state != SS_CONNECTED) {
|
||||
rc = -ENOTCONN;
|
||||
goto out;
|
||||
}
|
||||
vm_addr = &qsk->remote_addr;
|
||||
} else {
|
||||
vm_addr = &qsk->local_addr;
|
||||
}
|
||||
if (!vm_addr) {
|
||||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
BUILD_BUG_ON(sizeof(*vm_addr) > 128);
|
||||
memcpy(addr, vm_addr, sizeof(*vm_addr));
|
||||
rc = sizeof(*vm_addr);
|
||||
|
||||
out:
|
||||
release_sock(sk);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int qmsgq_shutdown(struct socket *sock, int mode)
|
||||
{
|
||||
struct sock *sk;
|
||||
int rc;
|
||||
|
||||
/* User level uses SHUT_RD (0) and SHUT_WR (1), but the kernel uses
|
||||
* RCV_SHUTDOWN (1) and SEND_SHUTDOWN (2), so we must increment mode
|
||||
* here like the other address families do. Note also that the
|
||||
* increment makes SHUT_RDWR (2) into RCV_SHUTDOWN | SEND_SHUTDOWN (3),
|
||||
* which is what we want.
|
||||
*/
|
||||
mode++;
|
||||
|
||||
if ((mode & ~SHUTDOWN_MASK) || !mode)
|
||||
return -EINVAL;
|
||||
|
||||
/* If this is a connection oriented socket and it is not connected then
|
||||
* bail out immediately. If it is a DGRAM socket then we must first
|
||||
* kick the socket so that it wakes up from any sleeping calls, for
|
||||
* example recv(), and then afterwards return the error.
|
||||
*/
|
||||
sk = sock->sk;
|
||||
|
||||
lock_sock(sk);
|
||||
if (sock->state == SS_UNCONNECTED) {
|
||||
rc = -ENOTCONN;
|
||||
if (sock_type_connectible(sk->sk_type))
|
||||
goto out;
|
||||
} else {
|
||||
sock->state = SS_DISCONNECTING;
|
||||
rc = 0;
|
||||
}
|
||||
|
||||
/* Receive and send shutdowns are treated alike. */
|
||||
mode = mode & (RCV_SHUTDOWN | SEND_SHUTDOWN);
|
||||
if (mode) {
|
||||
sk->sk_shutdown |= mode;
|
||||
sk->sk_state_change(sk);
|
||||
|
||||
if (sock_type_connectible(sk->sk_type)) {
|
||||
sock_reset_flag(sk, SOCK_DONE);
|
||||
qmsgq_send_shutdown(sk, mode);
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
release_sock(sk);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static __poll_t qmsgq_poll(struct file *file, struct socket *sock, poll_table *wait)
|
||||
{
|
||||
struct sock *sk = sock->sk;
|
||||
struct qmsgq_sock *qsk;
|
||||
__poll_t mask;
|
||||
|
||||
qsk = sk_qsk(sk);
|
||||
|
||||
poll_wait(file, sk_sleep(sk), wait);
|
||||
mask = 0;
|
||||
|
||||
if (sk->sk_err)
|
||||
/* Signify that there has been an error on this socket. */
|
||||
mask |= EPOLLERR;
|
||||
|
||||
/* INET sockets treat local write shutdown and peer write shutdown as a
|
||||
* case of EPOLLHUP set.
|
||||
*/
|
||||
if (sk->sk_shutdown == SHUTDOWN_MASK ||
|
||||
((sk->sk_shutdown & SEND_SHUTDOWN) &&
|
||||
(qsk->peer_shutdown & SEND_SHUTDOWN))) {
|
||||
mask |= EPOLLHUP;
|
||||
}
|
||||
|
||||
if (sk->sk_shutdown & RCV_SHUTDOWN ||
|
||||
qsk->peer_shutdown & SEND_SHUTDOWN) {
|
||||
mask |= EPOLLRDHUP;
|
||||
}
|
||||
|
||||
if (sock->type == SOCK_DGRAM) {
|
||||
/* For datagram sockets we can read if there is something in
|
||||
* the queue and write as long as the socket isn't shutdown for
|
||||
* sending.
|
||||
*/
|
||||
if (!skb_queue_empty_lockless(&sk->sk_receive_queue) ||
|
||||
(sk->sk_shutdown & RCV_SHUTDOWN)) {
|
||||
mask |= EPOLLIN | EPOLLRDNORM;
|
||||
}
|
||||
|
||||
if (!(sk->sk_shutdown & SEND_SHUTDOWN))
|
||||
mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
|
||||
|
||||
} /* TODO Connected POLL */
|
||||
|
||||
return mask;
|
||||
}
|
||||
|
||||
static int qmsgq_dgram_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
|
||||
{
|
||||
const struct qmsgq_endpoint *ep;
|
||||
struct sockaddr_vm *remote_addr;
|
||||
struct sock *sk = sock->sk;
|
||||
struct qmsgq_sock *qsk;
|
||||
int rc = 0;
|
||||
|
||||
if (msg->msg_flags & MSG_OOB)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
qsk = sk_qsk(sk);
|
||||
|
||||
lock_sock(sk);
|
||||
ep = qsk->ep;
|
||||
|
||||
rc = qmsgq_autobind(sock);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
if (msg->msg_name) {
|
||||
rc = vsock_addr_cast(msg->msg_name, msg->msg_namelen, &remote_addr);
|
||||
if (rc)
|
||||
goto out;
|
||||
} else if (sock->state == SS_CONNECTED) {
|
||||
remote_addr = &qsk->remote_addr;
|
||||
} else {
|
||||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (remote_addr->svm_cid == VMADDR_CID_ANY)
|
||||
remote_addr->svm_cid = ep->get_local_cid();
|
||||
|
||||
if (!vsock_addr_bound(remote_addr)) {
|
||||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!ep->dgram_allow(remote_addr->svm_cid, remote_addr->svm_port)) {
|
||||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
rc = ep->dgram_enqueue(qsk, remote_addr, msg, len);
|
||||
if (!rc)
|
||||
rc = len;
|
||||
|
||||
out:
|
||||
release_sock(sk);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int qmsgq_dgram_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags)
|
||||
{
|
||||
DECLARE_SOCKADDR(struct sockaddr_vm *, vm_addr, msg->msg_name);
|
||||
struct sock *sk = sock->sk;
|
||||
struct qmsgq_sock *qsk;
|
||||
struct sk_buff *skb;
|
||||
struct qmsgq_cb *cb;
|
||||
int copied;
|
||||
int rc = 0;
|
||||
|
||||
qsk = sk_qsk(sk);
|
||||
|
||||
if (sock_flag(sk, SOCK_ZAPPED)) {
|
||||
pr_err("%s: Invalid socket error\n", __func__);
|
||||
return -EADDRNOTAVAIL;
|
||||
}
|
||||
|
||||
skb = skb_recv_datagram(sk, flags, &rc);
|
||||
if (!skb)
|
||||
return rc;
|
||||
|
||||
lock_sock(sk);
|
||||
cb = (struct qmsgq_cb *)skb->cb;
|
||||
|
||||
copied = skb->len;
|
||||
if (copied > len) {
|
||||
copied = len;
|
||||
msg->msg_flags |= MSG_TRUNC;
|
||||
}
|
||||
|
||||
/* Place the datagram payload in the user's iovec. */
|
||||
rc = skb_copy_datagram_msg(skb, 0, msg, copied);
|
||||
if (rc < 0) {
|
||||
pr_err("%s: skb_copy_datagram_msg failed: %d\n", __func__, rc);
|
||||
goto out;
|
||||
}
|
||||
rc = copied;
|
||||
|
||||
if (vm_addr) {
|
||||
vsock_addr_init(vm_addr, VMADDR_CID_HOST, cb->src_port);
|
||||
msg->msg_namelen = sizeof(*vm_addr);
|
||||
}
|
||||
out:
|
||||
skb_free_datagram(sk, skb);
|
||||
release_sock(sk);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void __qmsgq_release(struct sock *sk, int level)
|
||||
{
|
||||
if (sk) {
|
||||
struct qmsgq_sock *qsk = sk_qsk(sk);
|
||||
|
||||
lock_sock_nested(sk, level);
|
||||
if (qsk->ep)
|
||||
qsk->ep->release(qsk);
|
||||
|
||||
if (!sock_flag(sk, SOCK_DEAD))
|
||||
sk->sk_state_change(sk);
|
||||
|
||||
if (!sock_flag(sk, SOCK_ZAPPED))
|
||||
qmsgq_port_remove(qsk);
|
||||
|
||||
sock_orphan(sk);
|
||||
sk->sk_shutdown = SHUTDOWN_MASK;
|
||||
skb_queue_purge(&sk->sk_receive_queue);
|
||||
release_sock(sk);
|
||||
sock_put(sk);
|
||||
}
|
||||
}
|
||||
|
||||
static int qmsgq_release(struct socket *sock)
|
||||
{
|
||||
__qmsgq_release(sock->sk, 0);
|
||||
sock->sk = NULL;
|
||||
sock->state = SS_FREE;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct proto_ops qmsgq_dgram_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.family = AF_QMSGQ,
|
||||
.release = qmsgq_release,
|
||||
.bind = qmsgq_bind,
|
||||
.connect = qmsgq_dgram_connect,
|
||||
.socketpair = sock_no_socketpair,
|
||||
.accept = sock_no_accept,
|
||||
.getname = qmsgq_getname,
|
||||
.poll = qmsgq_poll,
|
||||
.ioctl = sock_no_ioctl,
|
||||
.listen = sock_no_listen,
|
||||
.shutdown = qmsgq_shutdown,
|
||||
.sendmsg = qmsgq_dgram_sendmsg,
|
||||
.recvmsg = qmsgq_dgram_recvmsg,
|
||||
.mmap = sock_no_mmap,
|
||||
};
|
||||
|
||||
static struct proto qmsgq_proto = {
|
||||
.name = "QMSGQ",
|
||||
.owner = THIS_MODULE,
|
||||
.obj_size = sizeof(struct qmsgq_sock),
|
||||
};
|
||||
|
||||
static void sk_qsk_destruct(struct sock *sk)
|
||||
{
|
||||
struct qmsgq_sock *qsk = sk_qsk(sk);
|
||||
|
||||
qmsgq_deassign_ep(qsk);
|
||||
/* When clearing these addresses, there's no need to set the family and
|
||||
* possibly register the address family with the kernel.
|
||||
*/
|
||||
vsock_addr_init(&qsk->local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
|
||||
vsock_addr_init(&qsk->remote_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
|
||||
|
||||
put_cred(qsk->owner);
|
||||
}
|
||||
|
||||
static int qmsgq_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = sock_queue_rcv_skb(sk, skb);
|
||||
if (err)
|
||||
kfree_skb(skb);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct sock *__qmsgq_create(struct net *net, struct socket *sock, struct sock *parent,
|
||||
gfp_t priority, unsigned short type, int kern)
|
||||
{
|
||||
struct qmsgq_sock *psk;
|
||||
struct qmsgq_sock *qsk;
|
||||
struct sock *sk;
|
||||
|
||||
sk = sk_alloc(net, AF_QMSGQ, priority, &qmsgq_proto, kern);
|
||||
if (!sk)
|
||||
return NULL;
|
||||
|
||||
sock_init_data(sock, sk);
|
||||
|
||||
if (!sock)
|
||||
sk->sk_type = type;
|
||||
|
||||
qsk = sk_qsk(sk);
|
||||
vsock_addr_init(&qsk->local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
|
||||
vsock_addr_init(&qsk->remote_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
|
||||
|
||||
sk->sk_destruct = sk_qsk_destruct;
|
||||
sk->sk_backlog_rcv = qmsgq_queue_rcv_skb;
|
||||
sock_reset_flag(sk, SOCK_DONE);
|
||||
sock_set_flag(sk, SOCK_ZAPPED);
|
||||
|
||||
INIT_LIST_HEAD(&qsk->bound_table);
|
||||
INIT_LIST_HEAD(&qsk->connected_table);
|
||||
qsk->listener = NULL;
|
||||
INIT_LIST_HEAD(&qsk->pending_links);
|
||||
INIT_LIST_HEAD(&qsk->accept_queue);
|
||||
qsk->rejected = false;
|
||||
qsk->sent_request = false;
|
||||
qsk->ignore_connecting_rst = false;
|
||||
qsk->peer_shutdown = 0;
|
||||
INIT_DELAYED_WORK(&qsk->connect_work, qmsgq_connect_timeout);
|
||||
INIT_DELAYED_WORK(&qsk->pending_work, qmsgq_pending_work);
|
||||
|
||||
psk = parent ? sk_qsk(parent) : NULL;
|
||||
if (parent) {
|
||||
qsk->trusted = psk->trusted;
|
||||
qsk->owner = get_cred(psk->owner);
|
||||
qsk->connect_timeout = psk->connect_timeout;
|
||||
qsk->buffer_size = psk->buffer_size;
|
||||
qsk->buffer_min_size = psk->buffer_min_size;
|
||||
qsk->buffer_max_size = psk->buffer_max_size;
|
||||
security_sk_clone(parent, sk);
|
||||
} else {
|
||||
qsk->trusted = ns_capable_noaudit(&init_user_ns, CAP_NET_ADMIN);
|
||||
qsk->owner = get_current_cred();
|
||||
qsk->connect_timeout = QMSGQ_DEFAULT_CONNECT_TIMEOUT;
|
||||
qsk->buffer_size = QMSGQ_DEFAULT_BUFFER_SIZE;
|
||||
qsk->buffer_min_size = QMSGQ_DEFAULT_BUFFER_MIN_SIZE;
|
||||
qsk->buffer_max_size = QMSGQ_DEFAULT_BUFFER_MAX_SIZE;
|
||||
}
|
||||
|
||||
return sk;
|
||||
}
|
||||
|
||||
static int qmsgq_create(struct net *net, struct socket *sock,
|
||||
int protocol, int kern)
|
||||
{
|
||||
struct qmsgq_sock *qsk;
|
||||
struct sock *sk;
|
||||
int rc;
|
||||
|
||||
if (!sock)
|
||||
return -EINVAL;
|
||||
|
||||
if (protocol && protocol != PF_QMSGQ)
|
||||
return -EPROTONOSUPPORT;
|
||||
|
||||
switch (sock->type) {
|
||||
case SOCK_DGRAM:
|
||||
sock->ops = &qmsgq_dgram_ops;
|
||||
break;
|
||||
default:
|
||||
return -ESOCKTNOSUPPORT;
|
||||
}
|
||||
sock->state = SS_UNCONNECTED;
|
||||
|
||||
sk = __qmsgq_create(net, sock, NULL, GFP_KERNEL, 0, kern);
|
||||
if (!sk)
|
||||
return -ENOMEM;
|
||||
|
||||
qsk = sk_qsk(sk);
|
||||
if (sock->type == SOCK_DGRAM) {
|
||||
rc = qmsgq_assign_ep(qsk, NULL);
|
||||
if (rc < 0) {
|
||||
sock_put(sk);
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int qmsgq_post(const struct qmsgq_endpoint *ep, struct sockaddr_vm *src, struct sockaddr_vm *dst,
|
||||
void *data, int len)
|
||||
{
|
||||
struct qmsgq_sock *qsk;
|
||||
struct qmsgq_cb *cb;
|
||||
struct sk_buff *skb;
|
||||
int rc;
|
||||
|
||||
skb = alloc_skb_with_frags(0, len, 0, &rc, GFP_KERNEL);
|
||||
if (!skb) {
|
||||
pr_err("%s: Unable to get skb with len:%d\n", __func__, len);
|
||||
return -ENOMEM;
|
||||
}
|
||||
cb = (struct qmsgq_cb *)skb->cb;
|
||||
cb->src_cid = src->svm_cid;
|
||||
cb->src_port = src->svm_port;
|
||||
cb->dst_cid = dst->svm_cid;
|
||||
cb->dst_port = dst->svm_port;
|
||||
|
||||
skb->data_len = len;
|
||||
skb->len = len;
|
||||
skb_store_bits(skb, 0, data, len);
|
||||
|
||||
qsk = qmsgq_port_lookup(dst->svm_port);
|
||||
if (!qsk || qsk->ep != ep) {
|
||||
pr_err("%s: invalid dst port:%d\n", __func__, dst->svm_port);
|
||||
kfree_skb(skb);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (sock_queue_rcv_skb(qsk_sk(qsk), skb)) {
|
||||
pr_err("%s: sock_queue_rcv_skb failed\n", __func__);
|
||||
qmsgq_port_put(qsk);
|
||||
kfree_skb(skb);
|
||||
return -EINVAL;
|
||||
}
|
||||
qmsgq_port_put(qsk);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qmsgq_post);
|
||||
|
||||
int qmsgq_endpoint_register(const struct qmsgq_endpoint *ep)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
if (!ep)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&qmsgq_register_mutex);
|
||||
if (registered_ep) {
|
||||
rc = -EBUSY;
|
||||
goto error;
|
||||
}
|
||||
registered_ep = ep;
|
||||
|
||||
error:
|
||||
mutex_unlock(&qmsgq_register_mutex);
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qmsgq_endpoint_register);
|
||||
|
||||
void qmsgq_endpoint_unregister(const struct qmsgq_endpoint *ep)
|
||||
{
|
||||
mutex_lock(&qmsgq_register_mutex);
|
||||
if (registered_ep == ep)
|
||||
ep = NULL;
|
||||
mutex_unlock(&qmsgq_register_mutex);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qmsgq_endpoint_unregister);
|
||||
|
||||
static const struct net_proto_family qmsgq_family = {
|
||||
.owner = THIS_MODULE,
|
||||
.family = AF_QMSGQ,
|
||||
.create = qmsgq_create,
|
||||
};
|
||||
|
||||
static int __init qmsgq_proto_init(void)
|
||||
{
|
||||
int rc;
|
||||
|
||||
registered_ep = NULL;
|
||||
|
||||
rc = proto_register(&qmsgq_proto, 1);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = sock_register(&qmsgq_family);
|
||||
if (rc)
|
||||
goto err_proto;
|
||||
|
||||
return 0;
|
||||
|
||||
err_proto:
|
||||
proto_unregister(&qmsgq_proto);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void __exit qmsgq_proto_fini(void)
|
||||
{
|
||||
sock_unregister(qmsgq_family.family);
|
||||
proto_unregister(&qmsgq_proto);
|
||||
}
|
||||
module_init(qmsgq_proto_init);
|
||||
module_exit(qmsgq_proto_fini);
|
||||
|
||||
MODULE_DESCRIPTION("QTI Gunyah MSGQ Socket driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_ALIAS_NETPROTO(PF_QMSGQ);
|
87
net/qmsgq/af_qmsgq.h
Normal file
87
net/qmsgq/af_qmsgq.h
Normal file
@@ -0,0 +1,87 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
#ifndef __AF_QMSGQ_H_
|
||||
#define __AF_QMSGQ_H_
|
||||
|
||||
#include <net/af_vsock.h>
|
||||
|
||||
struct qmsgq_endpoint;
|
||||
|
||||
struct qmsgq_sock {
|
||||
/* sk must be the first member. */
|
||||
struct sock sk;
|
||||
const struct qmsgq_endpoint *ep;
|
||||
struct sockaddr_vm local_addr;
|
||||
struct sockaddr_vm remote_addr;
|
||||
/* Links for the global tables of bound and connected sockets. */
|
||||
struct list_head bound_table;
|
||||
struct list_head connected_table;
|
||||
/* Accessed without the socket lock held. This means it can never be
|
||||
* modified outsided of socket create or destruct.
|
||||
*/
|
||||
bool trusted;
|
||||
bool cached_peer_allow_dgram; /* Dgram communication allowed to
|
||||
* cached peer?
|
||||
*/
|
||||
u32 cached_peer; /* Context ID of last dgram destination check. */
|
||||
const struct cred *owner;
|
||||
/* Rest are SOCK_STREAM only. */
|
||||
long connect_timeout;
|
||||
/* Listening socket that this came from. */
|
||||
struct sock *listener;
|
||||
/* Used for pending list and accept queue during connection handshake.
|
||||
* The listening socket is the head for both lists. Sockets created
|
||||
* for connection requests are placed in the pending list until they
|
||||
* are connected, at which point they are put in the accept queue list
|
||||
* so they can be accepted in accept(). If accept() cannot accept the
|
||||
* connection, it is marked as rejected so the cleanup function knows
|
||||
* to clean up the socket.
|
||||
*/
|
||||
struct list_head pending_links;
|
||||
struct list_head accept_queue;
|
||||
bool rejected;
|
||||
struct delayed_work connect_work;
|
||||
struct delayed_work pending_work;
|
||||
struct delayed_work close_work;
|
||||
bool close_work_scheduled;
|
||||
u32 peer_shutdown;
|
||||
bool sent_request;
|
||||
bool ignore_connecting_rst;
|
||||
|
||||
/* Protected by lock_sock(sk) */
|
||||
u64 buffer_size;
|
||||
u64 buffer_min_size;
|
||||
u64 buffer_max_size;
|
||||
};
|
||||
|
||||
#define qsk_sk(__qsk) (&(__qsk)->sk)
|
||||
#define sk_qsk(__sk) ((struct qmsgq_sock *)__sk)
|
||||
|
||||
struct qmsgq_endpoint {
|
||||
struct module *module;
|
||||
|
||||
/* Initialize/tear-down socket. */
|
||||
int (*init)(struct qmsgq_sock *qsk, struct qmsgq_sock *psk);
|
||||
void (*destruct)(struct qmsgq_sock *qsk);
|
||||
void (*release)(struct qmsgq_sock *qsk);
|
||||
|
||||
/* DGRAM. */
|
||||
int (*dgram_enqueue)(struct qmsgq_sock *qsk, struct sockaddr_vm *addr,
|
||||
struct msghdr *msg, size_t len);
|
||||
bool (*dgram_allow)(u32 cid, u32 port);
|
||||
|
||||
/* Shutdown. */
|
||||
int (*shutdown)(struct qmsgq_sock *qsk, int mode);
|
||||
|
||||
/* Addressing. */
|
||||
u32 (*get_local_cid)(void);
|
||||
};
|
||||
|
||||
int qmsgq_post(const struct qmsgq_endpoint *ep, struct sockaddr_vm *src, struct sockaddr_vm *dst,
|
||||
void *data, int len);
|
||||
int qmsgq_endpoint_register(const struct qmsgq_endpoint *ep);
|
||||
void qmsgq_endpoint_unregister(const struct qmsgq_endpoint *ep);
|
||||
|
||||
#endif
|
523
net/qmsgq/qmsgq_gunyah.c
Normal file
523
net/qmsgq/qmsgq_gunyah.c
Normal file
@@ -0,0 +1,523 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
#include <linux/gunyah/gh_rm_drv.h>
|
||||
#include <linux/gunyah/gh_msgq.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_wakeup.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/sizes.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/of.h>
|
||||
|
||||
#include "af_qmsgq.h"
|
||||
|
||||
#define QMSGQ_GH_PROTO_VER_1 1
|
||||
#define MAX_PKT_SZ SZ_64K
|
||||
|
||||
#define QMSGQ_SKB_WAKEUP_MS 500
|
||||
|
||||
enum qmsgq_gh_pkt_type {
|
||||
QMSGQ_GH_TYPE_DATA = 1,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct qmsgq_gh_hdr - qmsgq gunyah packet header
|
||||
* @version: protocol version
|
||||
* @type: packet type; one of qmsgq_gh_pkt_type
|
||||
* @flags: Reserved for future use
|
||||
* @optlen: length of optional header data
|
||||
* @size: length of packet, excluding this header and optlen
|
||||
* @src_node_id: source cid, reserved
|
||||
* @src_port_id: source port
|
||||
* @dst_node_id: destination cid, reserved
|
||||
* @dst_port_id: destination port
|
||||
*/
|
||||
struct qmsgq_gh_hdr {
|
||||
u8 version;
|
||||
u8 type;
|
||||
u8 flags;
|
||||
u8 optlen;
|
||||
__le32 size;
|
||||
__le32 src_rsvd;
|
||||
__le32 src_port_id;
|
||||
__le32 dst_rsvd;
|
||||
__le32 dst_port_id;
|
||||
};
|
||||
|
||||
/* gh_transport_buf: gunyah transport buffer
|
||||
* @lock: lock for the buffer
|
||||
* @len: hdrlen + packet size
|
||||
* @copied: size of buffer copied
|
||||
* @hdr_received: true if the header is already saved, else false
|
||||
* @buf: buffer saved
|
||||
*/
|
||||
struct qmsgq_gh_recv_buf {
|
||||
/* @lock: lock for the buffer */
|
||||
struct mutex lock;
|
||||
size_t len;
|
||||
size_t copied;
|
||||
bool hdr_received;
|
||||
|
||||
char buf[MAX_PKT_SZ];
|
||||
};
|
||||
|
||||
/* qmsgq_gh_device: vm devices attached to this transport
|
||||
* @item: list item of all vm devices
|
||||
* @dev: device from platform_device.
|
||||
* @peer_cid: remote cid
|
||||
* @msgq_label: msgq label
|
||||
* @msgq_hdl: msgq handle
|
||||
* @rm_nb: notifier block for vm status from rm
|
||||
* @tx_lock: tx lock to queue only one packet at a time
|
||||
* @rx_thread: rx thread to receive incoming packets
|
||||
* @ep: qmsq endpoint
|
||||
* @sock_ws: wakeup source
|
||||
*/
|
||||
struct qmsgq_gh_device {
|
||||
struct list_head item;
|
||||
struct device *dev;
|
||||
struct qmsgq_endpoint ep;
|
||||
|
||||
unsigned int peer_cid;
|
||||
enum gh_msgq_label msgq_label;
|
||||
void *msgq_hdl;
|
||||
struct notifier_block rm_nb;
|
||||
|
||||
struct wakeup_source *sock_ws;
|
||||
|
||||
/* @tx_lock: tx lock to queue only one packet at a time */
|
||||
struct mutex tx_lock;
|
||||
struct task_struct *rx_thread;
|
||||
struct qmsgq_gh_recv_buf rx_buf;
|
||||
};
|
||||
|
||||
static void reset_buf(struct qmsgq_gh_recv_buf *rx_buf)
|
||||
{
|
||||
memset(rx_buf->buf, 0, MAX_PKT_SZ);
|
||||
rx_buf->hdr_received = false;
|
||||
rx_buf->copied = 0;
|
||||
rx_buf->len = 0;
|
||||
}
|
||||
|
||||
static int qmsgq_gh_post(struct qmsgq_gh_device *qdev, struct qmsgq_gh_recv_buf *rx_buf)
|
||||
{
|
||||
unsigned int cid, port, len;
|
||||
struct qmsgq_gh_hdr *hdr;
|
||||
struct sockaddr_vm src;
|
||||
struct sockaddr_vm dst;
|
||||
void *data;
|
||||
int rc;
|
||||
|
||||
if (rx_buf->len < sizeof(*hdr)) {
|
||||
pr_err("%s: len: %zu < hdr size\n", __func__, rx_buf->len);
|
||||
return -EINVAL;
|
||||
}
|
||||
hdr = (struct qmsgq_gh_hdr *)rx_buf->buf;
|
||||
|
||||
if (hdr->type != QMSGQ_GH_TYPE_DATA)
|
||||
return -EINVAL;
|
||||
|
||||
cid = le32_to_cpu(hdr->src_rsvd);
|
||||
port = le32_to_cpu(hdr->src_port_id);
|
||||
vsock_addr_init(&src, cid, port);
|
||||
|
||||
cid = le32_to_cpu(hdr->dst_rsvd);
|
||||
port = le32_to_cpu(hdr->dst_port_id);
|
||||
vsock_addr_init(&dst, cid, port);
|
||||
|
||||
data = rx_buf->buf + sizeof(*hdr);
|
||||
len = rx_buf->len - sizeof(*hdr);
|
||||
|
||||
rc = qmsgq_post(&qdev->ep, &src, &dst, data, len);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void qmsgq_process_recv(struct qmsgq_gh_device *qdev, void *buf, size_t len)
|
||||
{
|
||||
struct qmsgq_gh_recv_buf *rx_buf = &qdev->rx_buf;
|
||||
struct qmsgq_gh_hdr *hdr;
|
||||
size_t n;
|
||||
|
||||
mutex_lock(&rx_buf->lock);
|
||||
|
||||
/* Copy message into the local buffer */
|
||||
n = (rx_buf->copied + len < MAX_PKT_SZ) ? len : MAX_PKT_SZ - rx_buf->copied;
|
||||
memcpy(rx_buf->buf + rx_buf->copied, buf, n);
|
||||
rx_buf->copied += n;
|
||||
|
||||
if (!rx_buf->hdr_received) {
|
||||
hdr = (struct qmsgq_gh_hdr *)rx_buf->buf;
|
||||
|
||||
if (hdr->version != QMSGQ_GH_PROTO_VER_1) {
|
||||
pr_err("%s: Incorrect version:%d\n", __func__, hdr->version);
|
||||
goto err;
|
||||
}
|
||||
if (hdr->type != QMSGQ_GH_TYPE_DATA) {
|
||||
pr_err("%s: Incorrect type:%d\n", __func__, hdr->type);
|
||||
goto err;
|
||||
}
|
||||
if (hdr->size > MAX_PKT_SZ - sizeof(*hdr)) {
|
||||
pr_err("%s: Packet size too big:%d\n", __func__, hdr->size);
|
||||
goto err;
|
||||
}
|
||||
|
||||
rx_buf->len = sizeof(*hdr) + hdr->size;
|
||||
rx_buf->hdr_received = true;
|
||||
}
|
||||
|
||||
/* Check len size, can not be smaller than amount copied*/
|
||||
if (rx_buf->len < rx_buf->copied) {
|
||||
pr_err("%s: Size mismatch len:%zu, copied:%zu\n", __func__,
|
||||
rx_buf->len, rx_buf->copied);
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (rx_buf->len == rx_buf->copied) {
|
||||
qmsgq_gh_post(qdev, rx_buf);
|
||||
reset_buf(rx_buf);
|
||||
}
|
||||
|
||||
mutex_unlock(&rx_buf->lock);
|
||||
return;
|
||||
|
||||
err:
|
||||
reset_buf(rx_buf);
|
||||
mutex_unlock(&rx_buf->lock);
|
||||
}
|
||||
|
||||
static int qmsgq_gh_msgq_recv(void *data)
|
||||
{
|
||||
struct qmsgq_gh_device *qdev = data;
|
||||
size_t size;
|
||||
void *buf;
|
||||
int rc;
|
||||
|
||||
buf = kzalloc(GH_MSGQ_MAX_MSG_SIZE_BYTES, GFP_KERNEL);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
while (!kthread_should_stop()) {
|
||||
rc = gh_msgq_recv(qdev->msgq_hdl, buf, GH_MSGQ_MAX_MSG_SIZE_BYTES, &size,
|
||||
GH_MSGQ_TX_PUSH);
|
||||
if (rc)
|
||||
continue;
|
||||
|
||||
if (size <= 0)
|
||||
continue;
|
||||
|
||||
qmsgq_process_recv(qdev, buf, size);
|
||||
pm_wakeup_ws_event(qdev->sock_ws, QMSGQ_SKB_WAKEUP_MS, true);
|
||||
}
|
||||
kfree(buf);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qmsgq_gh_send(struct qmsgq_gh_device *qdev, void *buf, size_t len)
|
||||
{
|
||||
size_t left, chunk, offset;
|
||||
int rc = 0;
|
||||
|
||||
left = len;
|
||||
chunk = 0;
|
||||
offset = 0;
|
||||
|
||||
mutex_lock(&qdev->tx_lock);
|
||||
while (left > 0) {
|
||||
chunk = (left > GH_MSGQ_MAX_MSG_SIZE_BYTES) ? GH_MSGQ_MAX_MSG_SIZE_BYTES : left;
|
||||
rc = gh_msgq_send(qdev->msgq_hdl, buf + offset, chunk, GH_MSGQ_TX_PUSH);
|
||||
if (rc) {
|
||||
if (rc == -ERESTARTSYS) {
|
||||
continue;
|
||||
} else {
|
||||
pr_err("%s: gh_msgq_send failed: %d\n", __func__, rc);
|
||||
mutex_unlock(&qdev->tx_lock);
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
left -= chunk;
|
||||
offset += chunk;
|
||||
}
|
||||
mutex_unlock(&qdev->tx_lock);
|
||||
return 0;
|
||||
|
||||
err:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int qmsgq_gh_dgram_enqueue(struct qmsgq_sock *qsk, struct sockaddr_vm *remote,
|
||||
struct msghdr *msg, size_t len)
|
||||
{
|
||||
struct sockaddr_vm *local_addr = &qsk->local_addr;
|
||||
const struct qmsgq_endpoint *ep;
|
||||
struct qmsgq_gh_device *qdev;
|
||||
struct qmsgq_gh_hdr *hdr;
|
||||
char *buf;
|
||||
int rc;
|
||||
|
||||
ep = qsk->ep;
|
||||
if (!ep)
|
||||
return -ENXIO;
|
||||
qdev = container_of(ep, struct qmsgq_gh_device, ep);
|
||||
|
||||
if (!qdev->msgq_hdl) {
|
||||
pr_err("%s: Transport not ready\n", __func__);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (len > MAX_PKT_SZ - sizeof(*hdr)) {
|
||||
pr_err("%s: Invalid pk size: len: %lu\n", __func__, len);
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
/* Allocate a buffer for the user's message and our packet header. */
|
||||
buf = kmalloc(len + sizeof(*hdr), GFP_KERNEL);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Populate Header */
|
||||
hdr = (struct qmsgq_gh_hdr *)buf;
|
||||
hdr->version = QMSGQ_GH_PROTO_VER_1;
|
||||
hdr->type = QMSGQ_GH_TYPE_DATA;
|
||||
hdr->flags = 0;
|
||||
hdr->optlen = 0;
|
||||
hdr->size = len;
|
||||
hdr->src_rsvd = 0;
|
||||
hdr->src_port_id = local_addr->svm_port;
|
||||
hdr->dst_rsvd = 0;
|
||||
hdr->dst_port_id = remote->svm_port;
|
||||
rc = memcpy_from_msg((void *)buf + sizeof(*hdr), msg, len);
|
||||
if (rc) {
|
||||
pr_err("%s failed: memcpy_from_msg rc: %d\n", __func__, rc);
|
||||
goto send_err;
|
||||
}
|
||||
|
||||
pr_debug("TX DATA: Len:0x%zx src[0x%x] dst[0x%x]\n", len, hdr->src_port_id,
|
||||
hdr->dst_port_id);
|
||||
|
||||
rc = qmsgq_gh_send(qdev, buf, len + sizeof(*hdr));
|
||||
if (rc < 0) {
|
||||
pr_err("%s: failed to send msg rc: %d\n", __func__, rc);
|
||||
goto send_err;
|
||||
}
|
||||
kfree(buf);
|
||||
|
||||
return 0;
|
||||
|
||||
send_err:
|
||||
kfree(buf);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int qmsgq_gh_socket_init(struct qmsgq_sock *qsk, struct qmsgq_sock *psk)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void qmsgq_gh_destruct(struct qmsgq_sock *qsk)
|
||||
{
|
||||
}
|
||||
|
||||
static void qmsgq_gh_release(struct qmsgq_sock *qsk)
|
||||
{
|
||||
}
|
||||
|
||||
static bool qmsgq_gh_allow_rsvd_cid(u32 cid)
|
||||
{
|
||||
/* Allowing for cid 0 as of now as af_qmsgq sends 0 if no cid is
|
||||
* passed by the client.
|
||||
*/
|
||||
if (cid == 0)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool qmsgq_gh_dgram_allow(u32 cid, u32 port)
|
||||
{
|
||||
if (qmsgq_gh_allow_rsvd_cid(cid) || cid == VMADDR_CID_ANY || cid == VMADDR_CID_HOST)
|
||||
return true;
|
||||
|
||||
pr_err("%s: dgram not allowed for cid 0x%x\n", __func__, cid);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int qmsgq_gh_shutdown(struct qmsgq_sock *qsk, int mode)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u32 qmsgq_gh_get_local_cid(void)
|
||||
{
|
||||
return VMADDR_CID_HOST;
|
||||
}
|
||||
|
||||
static int qmsgq_gh_msgq_start(struct qmsgq_gh_device *qdev)
|
||||
{
|
||||
struct device *dev = qdev->dev;
|
||||
int rc;
|
||||
|
||||
if (qdev->msgq_hdl) {
|
||||
dev_err(qdev->dev, "Already have msgq handle!\n");
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
qdev->msgq_hdl = gh_msgq_register(qdev->msgq_label);
|
||||
if (IS_ERR_OR_NULL(qdev->msgq_hdl)) {
|
||||
rc = PTR_ERR(qdev->msgq_hdl);
|
||||
dev_err(dev, "msgq register failed rc:%d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
qdev->rx_thread = kthread_run(qmsgq_gh_msgq_recv, qdev, "qmsgq_gh_rx");
|
||||
if (IS_ERR_OR_NULL(qdev->rx_thread)) {
|
||||
rc = PTR_ERR(qdev->rx_thread);
|
||||
dev_err(dev, "Failed to create rx thread rc:%d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qmsgq_gh_rm_cb(struct notifier_block *nb, unsigned long cmd, void *data)
|
||||
{
|
||||
struct qmsgq_gh_device *qdev = container_of(nb, struct qmsgq_gh_device, rm_nb);
|
||||
struct gh_rm_notif_vm_status_payload *vm_status_payload = data;
|
||||
u8 vm_status = vm_status_payload->vm_status;
|
||||
int rc;
|
||||
|
||||
if (cmd != GH_RM_NOTIF_VM_STATUS)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
/* TODO - check for peer */
|
||||
switch (vm_status) {
|
||||
case GH_RM_VM_STATUS_READY:
|
||||
rc = qmsgq_gh_msgq_start(qdev);
|
||||
break;
|
||||
default:
|
||||
pr_debug("Unknown notification for vmid = %d vm_status = %d\n",
|
||||
vm_status_payload->vmid, vm_status);
|
||||
}
|
||||
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static int qmsgq_gh_peer_lookup(struct qmsgq_gh_device *qdev)
|
||||
{
|
||||
struct device_node *node;
|
||||
u32 peer_vmid;
|
||||
int rc;
|
||||
|
||||
node = of_get_child_by_name(of_find_node_by_path("/hypervisor"),
|
||||
"msgqsock-msgq-pair");
|
||||
if (!node) {
|
||||
dev_err(qdev->dev, "failed to get msgqsock-msgq-pair node\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* The peer_vimid indicates both VMs are ready to communicate */
|
||||
rc = of_property_read_u32(node, "qcom,peer-vmid", &peer_vmid);
|
||||
if (!rc) {
|
||||
rc = qmsgq_gh_msgq_start(qdev);
|
||||
if (rc) {
|
||||
dev_err(qdev->dev, "msgq start failed rc[%d]\n", rc);
|
||||
of_node_put(node);
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
|
||||
of_node_put(node);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qmsgq_gh_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device_node *np = pdev->dev.of_node;
|
||||
struct device *dev = &pdev->dev;
|
||||
struct qmsgq_gh_device *qdev;
|
||||
int rc;
|
||||
|
||||
qdev = devm_kzalloc(dev, sizeof(*qdev), GFP_KERNEL);
|
||||
if (!qdev)
|
||||
return -ENOMEM;
|
||||
qdev->dev = dev;
|
||||
dev_set_drvdata(&pdev->dev, qdev);
|
||||
|
||||
mutex_init(&qdev->tx_lock);
|
||||
mutex_init(&qdev->rx_buf.lock);
|
||||
qdev->rx_buf.len = 0;
|
||||
qdev->rx_buf.copied = 0;
|
||||
qdev->rx_buf.hdr_received = false;
|
||||
|
||||
qdev->ep.module = THIS_MODULE;
|
||||
qdev->ep.init = qmsgq_gh_socket_init;
|
||||
qdev->ep.destruct = qmsgq_gh_destruct;
|
||||
qdev->ep.release = qmsgq_gh_release;
|
||||
qdev->ep.dgram_enqueue = qmsgq_gh_dgram_enqueue;
|
||||
qdev->ep.dgram_allow = qmsgq_gh_dgram_allow;
|
||||
qdev->ep.shutdown = qmsgq_gh_shutdown;
|
||||
qdev->ep.get_local_cid = qmsgq_gh_get_local_cid;
|
||||
|
||||
//TODO properly set this
|
||||
qdev->peer_cid = 0;
|
||||
|
||||
qdev->sock_ws = wakeup_source_register(NULL, "qmsgq_sock_ws");
|
||||
|
||||
rc = of_property_read_u32(np, "msgq-label", &qdev->msgq_label);
|
||||
if (rc) {
|
||||
dev_err(dev, "failed to read msgq-label info %d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
qdev->rm_nb.notifier_call = qmsgq_gh_rm_cb;
|
||||
gh_rm_register_notifier(&qdev->rm_nb);
|
||||
|
||||
rc = qmsgq_gh_peer_lookup(qdev);
|
||||
if (rc) {
|
||||
gh_rm_unregister_notifier(&qdev->rm_nb);
|
||||
return rc;
|
||||
}
|
||||
|
||||
qmsgq_endpoint_register(&qdev->ep);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qmsgq_gh_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct qmsgq_gh_device *qdev = dev_get_drvdata(&pdev->dev);
|
||||
|
||||
gh_rm_unregister_notifier(&qdev->rm_nb);
|
||||
|
||||
if (qdev->rx_thread)
|
||||
kthread_stop(qdev->rx_thread);
|
||||
|
||||
qmsgq_endpoint_unregister(&qdev->ep);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id qmsgq_gh_of_match[] = {
|
||||
{ .compatible = "qcom,qmsgq-gh" },
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, qmsgq_gh_of_match);
|
||||
|
||||
static struct platform_driver qmsgq_gh_driver = {
|
||||
.probe = qmsgq_gh_probe,
|
||||
.remove = qmsgq_gh_remove,
|
||||
.driver = {
|
||||
.name = "qmsgq-gh",
|
||||
.of_match_table = qmsgq_gh_of_match,
|
||||
}
|
||||
};
|
||||
module_platform_driver(qmsgq_gh_driver);
|
||||
|
||||
MODULE_ALIAS("gunyah:QMSGQ");
|
||||
MODULE_DESCRIPTION("Gunyah QMSGQ Transport driver");
|
||||
MODULE_LICENSE("GPL");
|
@@ -14,6 +14,28 @@ config QRTR
|
||||
|
||||
if QRTR
|
||||
|
||||
config QRTR_NODE_ID
|
||||
int "QRTR Local Node ID"
|
||||
default 1
|
||||
help
|
||||
This option is used to configure the QRTR Node ID for the local
|
||||
processor. The node ID published to other nodes within the system.
|
||||
This value can be overridden by the name service application. This
|
||||
option is for configurations where Node ID needs to be customized
|
||||
but the name service application is not priveleged enough to use
|
||||
netlink sockets.
|
||||
|
||||
config QRTR_WAKEUP_MS
|
||||
int "QRTR Wakeup timeout"
|
||||
default 0
|
||||
help
|
||||
This option is used to configure the wakesource timeout that QRTR
|
||||
should take when a packet is received. The qrtr driver can guarantee
|
||||
that the packet gets queued to the socket but cannot guarantee the
|
||||
client process will get time to run if auto sleep is enabled. This
|
||||
config will help mitigate missed packets on systems where auto sleep
|
||||
is aggressive.
|
||||
|
||||
config QRTR_SMD
|
||||
tristate "SMD IPC Router channels"
|
||||
depends on RPMSG || (COMPILE_TEST && RPMSG=n)
|
||||
@@ -35,4 +57,21 @@ config QRTR_MHI
|
||||
Say Y here to support MHI based ipcrouter channels. MHI is the
|
||||
transport used for communicating to external modems.
|
||||
|
||||
config QRTR_GUNYAH
|
||||
tristate "Gunyah IPC Router channels"
|
||||
help
|
||||
Say Y here to support a fifo based ipcrouter channel with gunyah
|
||||
hypervisor signaling. The gunyah transport layer enables IPC
|
||||
Router communication between two virtual machines. The transport
|
||||
uses dynamically shared memory and gunyah doorbells.
|
||||
|
||||
config QRTR_GENPOOL
|
||||
tristate "Genpool FIFO IPC Router channels"
|
||||
help
|
||||
Say Y here to support a fifo based ipcrouter channel with genpool and
|
||||
IPCC signaling. The genpool fifo transport layer enables IPC Router
|
||||
communication between two endpoints. The transport utilizes a reserved
|
||||
memory created and owned by another device, which is shared through
|
||||
the genpool framework.
|
||||
|
||||
endif # QRTR
|
||||
|
@@ -8,3 +8,7 @@ obj-$(CONFIG_QRTR_TUN) += qrtr-tun.o
|
||||
qrtr-tun-y := tun.o
|
||||
obj-$(CONFIG_QRTR_MHI) += qrtr-mhi.o
|
||||
qrtr-mhi-y := mhi.o
|
||||
obj-$(CONFIG_QRTR_GUNYAH) += qrtr-gunyah.o
|
||||
qrtr-gunyah-y := gunyah.o
|
||||
obj-$(CONFIG_QRTR_GENPOOL) += qrtr-genpool.o
|
||||
qrtr-genpool-y := genpool.o
|
||||
|
1428
net/qrtr/af_qrtr.c
1428
net/qrtr/af_qrtr.c
File diff suppressed because it is too large
Load Diff
771
net/qrtr/genpool.c
Normal file
771
net/qrtr/genpool.c
Normal file
@@ -0,0 +1,771 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. */
|
||||
|
||||
#include <linux/genalloc.h>
|
||||
#include <linux/mailbox_client.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/reboot.h>
|
||||
#include <linux/sizes.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/types.h>
|
||||
#include "qrtr.h"
|
||||
|
||||
#define MAX_PKT_SZ SZ_64K
|
||||
#define LABEL_SIZE 32
|
||||
|
||||
#define FIFO_FULL_RESERVE 8
|
||||
#define FIFO_SIZE 0x4000
|
||||
|
||||
#define HDR_KEY_VALUE 0xdead
|
||||
|
||||
#define MAGIC_KEY_VALUE 0x24495043 /* "$IPC" */
|
||||
#define MAGIC_KEY 0x0
|
||||
#define BUFFER_SIZE 0x4
|
||||
|
||||
#define FIFO_0_START_OFFSET 0x1000
|
||||
#define FIFO_0_BASE 0x8
|
||||
#define FIFO_0_SIZE 0xc
|
||||
#define FIFO_0_TAIL 0x10
|
||||
#define FIFO_0_HEAD 0x14
|
||||
#define FIFO_0_NOTIFY 0x18
|
||||
|
||||
#define FIFO_1_START_OFFSET (FIFO_0_START_OFFSET + FIFO_SIZE)
|
||||
#define FIFO_1_BASE 0x1c
|
||||
#define FIFO_1_SIZE 0x20
|
||||
#define FIFO_1_TAIL 0x24
|
||||
#define FIFO_1_HEAD 0x28
|
||||
#define FIFO_1_NOTIFY 0x2c
|
||||
|
||||
#define LOCAL_STATE 0x30
|
||||
|
||||
#define IRQ_SETUP_IDX 0
|
||||
#define IRQ_XFER_IDX 1
|
||||
|
||||
#define STATE_WAIT_TIMEOUT msecs_to_jiffies(1000)
|
||||
|
||||
enum {
|
||||
LOCAL_STATE_DEFAULT,
|
||||
LOCAL_STATE_INIT,
|
||||
LOCAL_STATE_START,
|
||||
LOCAL_STATE_PREPARE_REBOOT,
|
||||
LOCAL_STATE_REBOOT,
|
||||
};
|
||||
|
||||
struct qrtr_genpool_hdr {
|
||||
__le16 len;
|
||||
__le16 magic;
|
||||
};
|
||||
|
||||
struct qrtr_genpool_ring {
|
||||
void *buf;
|
||||
size_t len;
|
||||
u32 offset;
|
||||
};
|
||||
|
||||
struct qrtr_genpool_pipe {
|
||||
__le32 *tail;
|
||||
__le32 *head;
|
||||
__le32 *read_notify;
|
||||
|
||||
void *fifo;
|
||||
size_t length;
|
||||
};
|
||||
|
||||
/**
|
||||
* qrtr_genpool_dev - qrtr genpool fifo transport structure
|
||||
* @ep: qrtr endpoint specific info
|
||||
* @ep_registered: tracks the registration state of the qrtr endpoint
|
||||
* @dev: device from platform_device
|
||||
* @label: label of the edge on the other side
|
||||
* @ring: buf for reading from fifo
|
||||
* @rx_pipe: RX genpool fifo specific info
|
||||
* @tx_pipe: TX genpool fifo specific info
|
||||
* @tx_avail_notify: wait queue for available tx
|
||||
* @pool: handle to gen_pool framework
|
||||
* @dma_addr: dma_addr of shared fifo
|
||||
* @base: base of the shared fifo
|
||||
* @size: fifo size
|
||||
* @mbox_client: mailbox client signaling
|
||||
* @mbox_setup_chan: mailbox channel for setup
|
||||
* @mbox_xfer_chan: mailbox channel for transfers
|
||||
* @irq_setup: IRQ for signaling completion of fifo setup
|
||||
* @irq_setup_label: IRQ name for irq_setup
|
||||
* @setup_work: worker to maintain shared memory between edges
|
||||
* @irq_xfer: IRQ for incoming transfers
|
||||
* @irq_xfer_label: IRQ name for irq_xfer
|
||||
* @state: current state of the local side
|
||||
* @lock: lock for updating local state
|
||||
* @state_wait: wait queue for specific state
|
||||
* @reboot_handler: handle for getting reboot notifications
|
||||
*/
|
||||
struct qrtr_genpool_dev {
|
||||
struct qrtr_endpoint ep;
|
||||
bool ep_registered;
|
||||
struct device *dev;
|
||||
const char *label;
|
||||
struct qrtr_genpool_ring ring;
|
||||
struct qrtr_genpool_pipe rx_pipe;
|
||||
struct qrtr_genpool_pipe tx_pipe;
|
||||
wait_queue_head_t tx_avail_notify;
|
||||
|
||||
struct gen_pool *pool;
|
||||
dma_addr_t dma_addr;
|
||||
void *base;
|
||||
size_t size;
|
||||
|
||||
struct mbox_client mbox_client;
|
||||
struct mbox_chan *mbox_setup_chan;
|
||||
struct mbox_chan *mbox_xfer_chan;
|
||||
|
||||
int irq_setup;
|
||||
char irq_setup_label[LABEL_SIZE];
|
||||
struct work_struct setup_work;
|
||||
|
||||
int irq_xfer;
|
||||
char irq_xfer_label[LABEL_SIZE];
|
||||
|
||||
u32 state;
|
||||
spinlock_t lock; /* lock for local state updates */
|
||||
wait_queue_head_t state_wait;
|
||||
|
||||
struct notifier_block reboot_handler;
|
||||
};
|
||||
|
||||
static void qrtr_genpool_set_state(struct qrtr_genpool_dev *qdev, u32 state)
|
||||
{
|
||||
qdev->state = state;
|
||||
*(u32 *)(qdev->base + LOCAL_STATE) = cpu_to_le32(qdev->state);
|
||||
}
|
||||
|
||||
static void qrtr_genpool_signal(struct qrtr_genpool_dev *qdev,
|
||||
struct mbox_chan *mbox_chan)
|
||||
{
|
||||
mbox_send_message(mbox_chan, NULL);
|
||||
mbox_client_txdone(mbox_chan, 0);
|
||||
}
|
||||
|
||||
static void qrtr_genpool_signal_setup(struct qrtr_genpool_dev *qdev)
|
||||
{
|
||||
qrtr_genpool_signal(qdev, qdev->mbox_setup_chan);
|
||||
}
|
||||
|
||||
static void qrtr_genpool_signal_xfer(struct qrtr_genpool_dev *qdev)
|
||||
{
|
||||
qrtr_genpool_signal(qdev, qdev->mbox_xfer_chan);
|
||||
}
|
||||
|
||||
static void qrtr_genpool_tx_write(struct qrtr_genpool_pipe *pipe, const void *data,
|
||||
size_t count)
|
||||
{
|
||||
size_t len;
|
||||
u32 head;
|
||||
|
||||
head = le32_to_cpu(*pipe->head);
|
||||
|
||||
len = min_t(size_t, count, pipe->length - head);
|
||||
if (len)
|
||||
memcpy_toio(pipe->fifo + head, data, len);
|
||||
|
||||
if (len != count)
|
||||
memcpy_toio(pipe->fifo, data + len, count - len);
|
||||
|
||||
head += count;
|
||||
if (head >= pipe->length)
|
||||
head %= pipe->length;
|
||||
|
||||
/* Ensure ordering of fifo and head update */
|
||||
smp_wmb();
|
||||
|
||||
*pipe->head = cpu_to_le32(head);
|
||||
}
|
||||
|
||||
static void qrtr_genpool_clr_tx_notify(struct qrtr_genpool_dev *qdev)
|
||||
{
|
||||
*qdev->tx_pipe.read_notify = 0;
|
||||
}
|
||||
|
||||
static void qrtr_genpool_set_tx_notify(struct qrtr_genpool_dev *qdev)
|
||||
{
|
||||
*qdev->tx_pipe.read_notify = cpu_to_le32(1);
|
||||
}
|
||||
|
||||
static size_t qrtr_genpool_tx_avail(struct qrtr_genpool_pipe *pipe)
|
||||
{
|
||||
u32 avail;
|
||||
u32 head;
|
||||
u32 tail;
|
||||
|
||||
head = le32_to_cpu(*pipe->head);
|
||||
tail = le32_to_cpu(*pipe->tail);
|
||||
|
||||
if (tail <= head)
|
||||
avail = pipe->length - head + tail;
|
||||
else
|
||||
avail = tail - head;
|
||||
|
||||
if (avail < FIFO_FULL_RESERVE)
|
||||
avail = 0;
|
||||
else
|
||||
avail -= FIFO_FULL_RESERVE;
|
||||
|
||||
return avail;
|
||||
}
|
||||
|
||||
static void qrtr_genpool_wait_for_tx_avail(struct qrtr_genpool_dev *qdev)
|
||||
{
|
||||
qrtr_genpool_set_tx_notify(qdev);
|
||||
wait_event_timeout(qdev->tx_avail_notify,
|
||||
qrtr_genpool_tx_avail(&qdev->tx_pipe), 10 * HZ);
|
||||
}
|
||||
|
||||
static void qrtr_genpool_generate_hdr(struct qrtr_genpool_dev *qdev,
|
||||
struct qrtr_genpool_hdr *hdr)
|
||||
{
|
||||
size_t hdr_len = sizeof(*hdr);
|
||||
|
||||
while (qrtr_genpool_tx_avail(&qdev->tx_pipe) < hdr_len)
|
||||
qrtr_genpool_wait_for_tx_avail(qdev);
|
||||
|
||||
qrtr_genpool_tx_write(&qdev->tx_pipe, hdr, hdr_len);
|
||||
};
|
||||
|
||||
/* from qrtr to genpool fifo */
|
||||
static int qrtr_genpool_send(struct qrtr_endpoint *ep, struct sk_buff *skb)
|
||||
{
|
||||
struct qrtr_genpool_dev *qdev;
|
||||
struct qrtr_genpool_hdr hdr;
|
||||
size_t tx_avail;
|
||||
int chunk_size;
|
||||
int left_size;
|
||||
int offset;
|
||||
int rc;
|
||||
|
||||
qdev = container_of(ep, struct qrtr_genpool_dev, ep);
|
||||
|
||||
rc = skb_linearize(skb);
|
||||
if (rc) {
|
||||
kfree_skb(skb);
|
||||
return rc;
|
||||
}
|
||||
|
||||
hdr.len = cpu_to_le16(skb->len);
|
||||
hdr.magic = cpu_to_le16(HDR_KEY_VALUE);
|
||||
qrtr_genpool_generate_hdr(qdev, &hdr);
|
||||
|
||||
left_size = skb->len;
|
||||
offset = 0;
|
||||
while (left_size > 0) {
|
||||
tx_avail = qrtr_genpool_tx_avail(&qdev->tx_pipe);
|
||||
if (!tx_avail) {
|
||||
qrtr_genpool_wait_for_tx_avail(qdev);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (tx_avail < left_size)
|
||||
chunk_size = tx_avail;
|
||||
else
|
||||
chunk_size = left_size;
|
||||
|
||||
qrtr_genpool_tx_write(&qdev->tx_pipe, skb->data + offset,
|
||||
chunk_size);
|
||||
offset += chunk_size;
|
||||
left_size -= chunk_size;
|
||||
|
||||
qrtr_genpool_signal_xfer(qdev);
|
||||
}
|
||||
qrtr_genpool_clr_tx_notify(qdev);
|
||||
kfree_skb(skb);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static size_t qrtr_genpool_rx_avail(struct qrtr_genpool_pipe *pipe)
|
||||
{
|
||||
size_t len;
|
||||
u32 head;
|
||||
u32 tail;
|
||||
|
||||
head = le32_to_cpu(*pipe->head);
|
||||
tail = le32_to_cpu(*pipe->tail);
|
||||
|
||||
if (head < tail)
|
||||
len = pipe->length - tail + head;
|
||||
else
|
||||
len = head - tail;
|
||||
|
||||
if (WARN_ON_ONCE(len > pipe->length))
|
||||
len = 0;
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
static void qrtr_genpool_rx_advance(struct qrtr_genpool_pipe *pipe, size_t count)
|
||||
{
|
||||
u32 tail;
|
||||
|
||||
tail = le32_to_cpu(*pipe->tail);
|
||||
tail += count;
|
||||
if (tail >= pipe->length)
|
||||
tail %= pipe->length;
|
||||
|
||||
*pipe->tail = cpu_to_le32(tail);
|
||||
}
|
||||
|
||||
static void qrtr_genpool_rx_peak(struct qrtr_genpool_pipe *pipe, void *data,
|
||||
unsigned int offset, size_t count)
|
||||
{
|
||||
size_t len;
|
||||
u32 tail;
|
||||
|
||||
tail = le32_to_cpu(*pipe->tail);
|
||||
tail += offset;
|
||||
if (tail >= pipe->length)
|
||||
tail %= pipe->length;
|
||||
|
||||
len = min_t(size_t, count, pipe->length - tail);
|
||||
if (len)
|
||||
memcpy_fromio(data, pipe->fifo + tail, len);
|
||||
|
||||
if (len != count)
|
||||
memcpy_fromio(data + len, pipe->fifo, count - len);
|
||||
}
|
||||
|
||||
static bool qrtr_genpool_get_read_notify(struct qrtr_genpool_dev *qdev)
|
||||
{
|
||||
return le32_to_cpu(*qdev->rx_pipe.read_notify);
|
||||
}
|
||||
|
||||
static void qrtr_genpool_read_new(struct qrtr_genpool_dev *qdev)
|
||||
{
|
||||
struct qrtr_genpool_ring *ring = &qdev->ring;
|
||||
struct qrtr_genpool_hdr hdr = {0, 0};
|
||||
size_t rx_avail;
|
||||
size_t pkt_len;
|
||||
size_t hdr_len;
|
||||
int rc;
|
||||
|
||||
/* copy hdr from rx_pipe and check hdr for pkt size */
|
||||
hdr_len = sizeof(hdr);
|
||||
qrtr_genpool_rx_peak(&qdev->rx_pipe, &hdr, 0, hdr_len);
|
||||
pkt_len = le16_to_cpu(hdr.len);
|
||||
if (pkt_len > MAX_PKT_SZ) {
|
||||
dev_err(qdev->dev, "invalid pkt_len %zu\n", pkt_len);
|
||||
return;
|
||||
}
|
||||
qrtr_genpool_rx_advance(&qdev->rx_pipe, hdr_len);
|
||||
|
||||
rx_avail = qrtr_genpool_rx_avail(&qdev->rx_pipe);
|
||||
if (rx_avail > pkt_len)
|
||||
rx_avail = pkt_len;
|
||||
|
||||
qrtr_genpool_rx_peak(&qdev->rx_pipe, ring->buf, 0, rx_avail);
|
||||
qrtr_genpool_rx_advance(&qdev->rx_pipe, rx_avail);
|
||||
|
||||
if (rx_avail == pkt_len) {
|
||||
rc = qrtr_endpoint_post(&qdev->ep, ring->buf, pkt_len);
|
||||
if (rc == -EINVAL)
|
||||
dev_err(qdev->dev, "invalid ipcrouter packet\n");
|
||||
} else {
|
||||
ring->len = pkt_len;
|
||||
ring->offset = rx_avail;
|
||||
}
|
||||
}
|
||||
|
||||
static void qrtr_genpool_read_frag(struct qrtr_genpool_dev *qdev)
|
||||
{
|
||||
struct qrtr_genpool_ring *ring = &qdev->ring;
|
||||
size_t rx_avail;
|
||||
int rc;
|
||||
|
||||
rx_avail = qrtr_genpool_rx_avail(&qdev->rx_pipe);
|
||||
if (rx_avail + ring->offset > ring->len)
|
||||
rx_avail = ring->len - ring->offset;
|
||||
|
||||
qrtr_genpool_rx_peak(&qdev->rx_pipe, ring->buf + ring->offset, 0, rx_avail);
|
||||
qrtr_genpool_rx_advance(&qdev->rx_pipe, rx_avail);
|
||||
|
||||
if (rx_avail + ring->offset == ring->len) {
|
||||
rc = qrtr_endpoint_post(&qdev->ep, ring->buf, ring->len);
|
||||
if (rc == -EINVAL)
|
||||
dev_err(qdev->dev, "invalid ipcrouter packet\n");
|
||||
ring->offset = 0;
|
||||
ring->len = 0;
|
||||
} else {
|
||||
ring->offset += rx_avail;
|
||||
}
|
||||
}
|
||||
|
||||
static void qrtr_genpool_read(struct qrtr_genpool_dev *qdev)
|
||||
{
|
||||
wake_up_all(&qdev->tx_avail_notify);
|
||||
|
||||
while (qrtr_genpool_rx_avail(&qdev->rx_pipe)) {
|
||||
if (qdev->ring.offset)
|
||||
qrtr_genpool_read_frag(qdev);
|
||||
else
|
||||
qrtr_genpool_read_new(qdev);
|
||||
|
||||
if (qrtr_genpool_get_read_notify(qdev))
|
||||
qrtr_genpool_signal_xfer(qdev);
|
||||
}
|
||||
}
|
||||
|
||||
static void qrtr_genpool_memory_free(struct qrtr_genpool_dev *qdev)
|
||||
{
|
||||
if (!qdev->base)
|
||||
return;
|
||||
|
||||
gen_pool_free(qdev->pool, (unsigned long)qdev->base, qdev->size);
|
||||
qdev->base = NULL;
|
||||
qdev->dma_addr = 0;
|
||||
qdev->size = 0;
|
||||
}
|
||||
|
||||
static int qrtr_genpool_memory_alloc(struct qrtr_genpool_dev *qdev)
|
||||
{
|
||||
qdev->size = gen_pool_size(qdev->pool);
|
||||
qdev->base = gen_pool_dma_alloc(qdev->pool, qdev->size, &qdev->dma_addr);
|
||||
if (!qdev->base) {
|
||||
dev_err(qdev->dev, "failed to dma alloc\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static irqreturn_t qrtr_genpool_setup_intr(int irq, void *data)
|
||||
{
|
||||
struct qrtr_genpool_dev *qdev = data;
|
||||
|
||||
schedule_work(&qdev->setup_work);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static irqreturn_t qrtr_genpool_xfer_intr(int irq, void *data)
|
||||
{
|
||||
struct qrtr_genpool_dev *qdev = data;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&qdev->lock, flags);
|
||||
if (qdev->state != LOCAL_STATE_START) {
|
||||
spin_unlock_irqrestore(&qdev->lock, flags);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
spin_unlock_irqrestore(&qdev->lock, flags);
|
||||
|
||||
qrtr_genpool_read(qdev);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static int qrtr_genpool_irq_init(struct qrtr_genpool_dev *qdev)
|
||||
{
|
||||
struct device *dev = qdev->dev;
|
||||
int irq, rc;
|
||||
|
||||
irq = of_irq_get(dev->of_node, IRQ_XFER_IDX);
|
||||
if (irq < 0)
|
||||
return irq;
|
||||
|
||||
qdev->irq_xfer = irq;
|
||||
snprintf(qdev->irq_xfer_label, LABEL_SIZE, "%s-xfer", qdev->label);
|
||||
rc = devm_request_irq(dev, qdev->irq_xfer, qrtr_genpool_xfer_intr, 0,
|
||||
qdev->irq_xfer_label, qdev);
|
||||
if (rc) {
|
||||
dev_err(dev, "failed to request xfer IRQ: %d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
enable_irq_wake(qdev->irq_xfer);
|
||||
|
||||
irq = of_irq_get(dev->of_node, IRQ_SETUP_IDX);
|
||||
if (irq < 0)
|
||||
return irq;
|
||||
|
||||
qdev->irq_setup = irq;
|
||||
snprintf(qdev->irq_setup_label, LABEL_SIZE, "%s-setup", qdev->label);
|
||||
rc = devm_request_irq(dev, qdev->irq_setup, qrtr_genpool_setup_intr, 0,
|
||||
qdev->irq_setup_label, qdev);
|
||||
if (rc) {
|
||||
dev_err(dev, "failed to request setup IRQ: %d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
enable_irq_wake(qdev->irq_setup);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qrtr_genpool_mbox_init(struct qrtr_genpool_dev *qdev)
|
||||
{
|
||||
struct device *dev = qdev->dev;
|
||||
int rc;
|
||||
|
||||
qdev->mbox_client.dev = dev;
|
||||
qdev->mbox_client.knows_txdone = true;
|
||||
qdev->mbox_setup_chan = mbox_request_channel(&qdev->mbox_client, IRQ_SETUP_IDX);
|
||||
if (IS_ERR(qdev->mbox_setup_chan)) {
|
||||
rc = PTR_ERR(qdev->mbox_setup_chan);
|
||||
if (rc != -EPROBE_DEFER)
|
||||
dev_err(dev, "failed to acquire IPC setup channel %d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
qdev->mbox_xfer_chan = mbox_request_channel(&qdev->mbox_client, IRQ_XFER_IDX);
|
||||
if (IS_ERR(qdev->mbox_xfer_chan)) {
|
||||
rc = PTR_ERR(qdev->mbox_xfer_chan);
|
||||
if (rc != -EPROBE_DEFER)
|
||||
dev_err(dev, "failed to acquire IPC xfer channel %d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* qrtr_genpool_fifo_init() - init genpool fifo configs
|
||||
*
|
||||
* @qdev: internal qrtr genpool state variable
|
||||
*
|
||||
* @return: 0 on success, standard Linux error codes on error.
|
||||
*
|
||||
* This function is called to initialize the genpool fifo pointer with
|
||||
* the genpool fifo configurations.
|
||||
*/
|
||||
static void qrtr_genpool_fifo_init(struct qrtr_genpool_dev *qdev)
|
||||
{
|
||||
u8 *descs;
|
||||
|
||||
memset(qdev->base, 0, FIFO_0_START_OFFSET);
|
||||
descs = qdev->base;
|
||||
*(u32 *)(descs + MAGIC_KEY) = MAGIC_KEY_VALUE;
|
||||
*(u32 *)(descs + BUFFER_SIZE) = qdev->size;
|
||||
|
||||
*(u32 *)(descs + FIFO_0_BASE) = FIFO_0_START_OFFSET;
|
||||
*(u32 *)(descs + FIFO_0_SIZE) = FIFO_SIZE;
|
||||
qdev->tx_pipe.fifo = (u32 *)(descs + FIFO_0_START_OFFSET);
|
||||
qdev->tx_pipe.tail = (u32 *)(descs + FIFO_0_TAIL);
|
||||
qdev->tx_pipe.head = (u32 *)(descs + FIFO_0_HEAD);
|
||||
qdev->tx_pipe.read_notify = (u32 *)(descs + FIFO_0_NOTIFY);
|
||||
qdev->tx_pipe.length = FIFO_SIZE;
|
||||
|
||||
*(u32 *)(descs + FIFO_1_BASE) = FIFO_1_START_OFFSET;
|
||||
*(u32 *)(descs + FIFO_1_SIZE) = FIFO_SIZE;
|
||||
qdev->rx_pipe.fifo = (u32 *)(descs + FIFO_1_START_OFFSET);
|
||||
qdev->rx_pipe.tail = (u32 *)(descs + FIFO_1_TAIL);
|
||||
qdev->rx_pipe.head = (u32 *)(descs + FIFO_1_HEAD);
|
||||
qdev->rx_pipe.read_notify = (u32 *)(descs + FIFO_1_NOTIFY);
|
||||
qdev->rx_pipe.length = FIFO_SIZE;
|
||||
|
||||
/* Reset respective index */
|
||||
*qdev->tx_pipe.head = 0;
|
||||
*qdev->rx_pipe.tail = 0;
|
||||
}
|
||||
|
||||
static int qrtr_genpool_memory_init(struct qrtr_genpool_dev *qdev)
|
||||
{
|
||||
struct device_node *np;
|
||||
|
||||
np = of_parse_phandle(qdev->dev->of_node, "gen-pool", 0);
|
||||
if (!np) {
|
||||
dev_err(qdev->dev, "failed to parse gen-pool\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
qdev->pool = of_gen_pool_get(np, "qrtr-gen-pool", 0);
|
||||
of_node_put(np);
|
||||
if (!qdev->pool)
|
||||
return -EPROBE_DEFER;
|
||||
|
||||
/* check if pool has any entries */
|
||||
if (!gen_pool_avail(qdev->pool))
|
||||
return -EPROBE_DEFER;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void qrtr_genpool_setup_work(struct work_struct *work)
|
||||
{
|
||||
struct qrtr_genpool_dev *qdev = container_of(work, struct qrtr_genpool_dev, setup_work);
|
||||
unsigned long flags;
|
||||
int rc;
|
||||
|
||||
spin_lock_irqsave(&qdev->lock, flags);
|
||||
if (qdev->state == LOCAL_STATE_REBOOT) {
|
||||
spin_unlock_irqrestore(&qdev->lock, flags);
|
||||
return;
|
||||
}
|
||||
|
||||
if (qdev->state == LOCAL_STATE_PREPARE_REBOOT) {
|
||||
qrtr_genpool_set_state(qdev, LOCAL_STATE_REBOOT);
|
||||
spin_unlock_irqrestore(&qdev->lock, flags);
|
||||
wake_up_all(&qdev->state_wait);
|
||||
return;
|
||||
}
|
||||
spin_unlock_irqrestore(&qdev->lock, flags);
|
||||
|
||||
disable_irq(qdev->irq_xfer);
|
||||
|
||||
if (qdev->ep_registered) {
|
||||
qrtr_endpoint_unregister(&qdev->ep);
|
||||
qdev->ep_registered = false;
|
||||
}
|
||||
|
||||
qrtr_genpool_memory_free(qdev);
|
||||
|
||||
rc = qrtr_genpool_memory_alloc(qdev);
|
||||
if (rc)
|
||||
return;
|
||||
|
||||
qrtr_genpool_fifo_init(qdev);
|
||||
|
||||
qdev->ep.xmit = qrtr_genpool_send;
|
||||
rc = qrtr_endpoint_register(&qdev->ep, QRTR_EP_NET_ID_AUTO, false, NULL);
|
||||
if (rc) {
|
||||
dev_err(qdev->dev, "failed to register qrtr endpoint rc%d\n", rc);
|
||||
return;
|
||||
}
|
||||
qdev->ep_registered = true;
|
||||
|
||||
enable_irq(qdev->irq_xfer);
|
||||
|
||||
spin_lock_irqsave(&qdev->lock, flags);
|
||||
qrtr_genpool_set_state(qdev, LOCAL_STATE_START);
|
||||
qrtr_genpool_signal_setup(qdev);
|
||||
spin_unlock_irqrestore(&qdev->lock, flags);
|
||||
}
|
||||
|
||||
static int qrtr_genpool_reboot_cb(struct notifier_block *nb,
|
||||
unsigned long action, void *data)
|
||||
{
|
||||
struct qrtr_genpool_dev *qdev = container_of(nb, struct qrtr_genpool_dev, reboot_handler);
|
||||
unsigned long flags;
|
||||
int rc;
|
||||
|
||||
cancel_work_sync(&qdev->setup_work);
|
||||
|
||||
spin_lock_irqsave(&qdev->lock, flags);
|
||||
qrtr_genpool_set_state(qdev, LOCAL_STATE_PREPARE_REBOOT);
|
||||
qrtr_genpool_signal_setup(qdev);
|
||||
|
||||
rc = wait_event_lock_irq_timeout(qdev->state_wait,
|
||||
qdev->state == LOCAL_STATE_REBOOT,
|
||||
qdev->lock,
|
||||
STATE_WAIT_TIMEOUT);
|
||||
if (!rc)
|
||||
dev_dbg(qdev->dev, "timedout waiting for reboot state change\n");
|
||||
spin_unlock_irqrestore(&qdev->lock, flags);
|
||||
|
||||
disable_irq(qdev->irq_xfer);
|
||||
|
||||
if (qdev->ep_registered) {
|
||||
qrtr_endpoint_unregister(&qdev->ep);
|
||||
qdev->ep_registered = false;
|
||||
}
|
||||
|
||||
qrtr_genpool_memory_free(qdev);
|
||||
|
||||
vfree(qdev->ring.buf);
|
||||
qdev->ring.buf = NULL;
|
||||
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
/**
|
||||
* qrtr_genpool_probe() - Probe a genpool fifo transport
|
||||
*
|
||||
* @pdev: Platform device corresponding to genpool fifo transport.
|
||||
*
|
||||
* @return: 0 on success, standard Linux error codes on error.
|
||||
*
|
||||
* This function is called when the underlying device tree driver registers
|
||||
* a platform device, mapped to a genpool fifo transport.
|
||||
*/
|
||||
static int qrtr_genpool_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct qrtr_genpool_dev *qdev;
|
||||
int rc;
|
||||
|
||||
qdev = devm_kzalloc(dev, sizeof(*qdev), GFP_KERNEL);
|
||||
if (!qdev)
|
||||
return -ENOMEM;
|
||||
|
||||
qdev->dev = dev;
|
||||
dev_set_drvdata(dev, qdev);
|
||||
|
||||
rc = of_property_read_string(dev->of_node, "label", &qdev->label);
|
||||
if (rc < 0)
|
||||
qdev->label = dev->of_node->name;
|
||||
|
||||
qdev->ring.buf = vzalloc(MAX_PKT_SZ);
|
||||
if (!qdev->ring.buf)
|
||||
return -ENOMEM;
|
||||
|
||||
rc = qrtr_genpool_memory_init(qdev);
|
||||
if (rc)
|
||||
goto err;
|
||||
|
||||
init_waitqueue_head(&qdev->tx_avail_notify);
|
||||
init_waitqueue_head(&qdev->state_wait);
|
||||
INIT_WORK(&qdev->setup_work, qrtr_genpool_setup_work);
|
||||
spin_lock_init(&qdev->lock);
|
||||
|
||||
qdev->reboot_handler.notifier_call = qrtr_genpool_reboot_cb;
|
||||
rc = devm_register_reboot_notifier(qdev->dev, &qdev->reboot_handler);
|
||||
if (rc) {
|
||||
dev_err(qdev->dev, "failed to register reboot notifier rc%d\n", rc);
|
||||
goto err;
|
||||
}
|
||||
|
||||
rc = qrtr_genpool_mbox_init(qdev);
|
||||
if (rc)
|
||||
goto err;
|
||||
|
||||
rc = qrtr_genpool_irq_init(qdev);
|
||||
if (rc)
|
||||
goto err;
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
vfree(qdev->ring.buf);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int qrtr_genpool_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct qrtr_genpool_dev *qdev = platform_get_drvdata(pdev);
|
||||
|
||||
cancel_work_sync(&qdev->setup_work);
|
||||
|
||||
if (qdev->ep_registered)
|
||||
qrtr_endpoint_unregister(&qdev->ep);
|
||||
|
||||
vfree(qdev->ring.buf);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id qrtr_genpool_match_table[] = {
|
||||
{ .compatible = "qcom,qrtr-genpool" },
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, qrtr_genpool_match_table);
|
||||
|
||||
static struct platform_driver qrtr_genpool_driver = {
|
||||
.probe = qrtr_genpool_probe,
|
||||
.remove = qrtr_genpool_remove,
|
||||
.driver = {
|
||||
.name = "qcom_genpool_qrtr",
|
||||
.of_match_table = qrtr_genpool_match_table,
|
||||
},
|
||||
};
|
||||
module_platform_driver(qrtr_genpool_driver);
|
||||
|
||||
MODULE_DESCRIPTION("QTI IPC-Router FIFO interface driver");
|
||||
MODULE_LICENSE("GPL");
|
872
net/qrtr/gunyah.c
Normal file
872
net/qrtr/gunyah.c
Normal file
@@ -0,0 +1,872 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/io.h>
|
||||
#include <linux/sizes.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/gunyah/gh_rm_drv.h>
|
||||
#include <linux/gunyah/gh_vm.h>
|
||||
#include <linux/gunyah/gh_dbl.h>
|
||||
#include <soc/qcom/secure_buffer.h>
|
||||
#include <linux/firmware/qcom/qcom_scm.h>
|
||||
#include "qrtr.h"
|
||||
|
||||
#define GUNYAH_MAGIC_KEY 0x24495043 /* "$IPC" */
|
||||
#define FIFO_SIZE 0x4000
|
||||
#define FIFO_FULL_RESERVE 8
|
||||
#define FIFO_0_START 0x1000
|
||||
#define FIFO_1_START (FIFO_0_START + FIFO_SIZE)
|
||||
#define GUNYAH_MAGIC_IDX 0x0
|
||||
#define TAIL_0_IDX 0x1
|
||||
#define HEAD_0_IDX 0x2
|
||||
#define TAIL_1_IDX 0x3
|
||||
#define HEAD_1_IDX 0x4
|
||||
#define NOTIFY_0_IDX 0x5
|
||||
#define NOTIFY_1_IDX 0x6
|
||||
#define QRTR_DBL_MASK 0x1
|
||||
|
||||
/* Add potential padding and header space to 64k */
|
||||
#define MAX_PKT_SZ (SZ_64K + SZ_32)
|
||||
|
||||
struct gunyah_ring {
|
||||
void *buf;
|
||||
size_t len;
|
||||
u32 offset;
|
||||
};
|
||||
|
||||
struct gunyah_pipe {
|
||||
__le32 *tail;
|
||||
__le32 *head;
|
||||
__le32 *read_notify;
|
||||
|
||||
void *fifo;
|
||||
size_t length;
|
||||
};
|
||||
|
||||
/**
|
||||
* qrtr_gunyah_dev - qrtr gunyah transport structure
|
||||
* @ep: qrtr endpoint specific info
|
||||
* @dev: device from platform_device
|
||||
* @ring: ring buffer information for bouncing data
|
||||
* @res: resource of reserved mem region
|
||||
* @memparcel: memparcel handle returned from sharing mem
|
||||
* @base: Base of the shared fifo
|
||||
* @size: fifo size
|
||||
* @master: primary vm indicator
|
||||
* @peer_name: name of vm peer
|
||||
* @vm_nb: notifier block for vm status from rm
|
||||
* @state_lock: lock to protect registered state
|
||||
* @registered: state of endpoint
|
||||
* @label: label for gunyah resources
|
||||
* @tx_dbl: doorbell for tx notifications
|
||||
* @rx_dbl: doorbell for rx notifications
|
||||
* @dbl_lock: lock to prevent read races
|
||||
* @tx_pipe: TX gunyah specific info
|
||||
* @rx_pipe: RX gunyah specific info
|
||||
* @tx_avail_notify: wait until tx space available
|
||||
*/
|
||||
struct qrtr_gunyah_dev {
|
||||
struct qrtr_endpoint ep;
|
||||
struct device *dev;
|
||||
struct gunyah_ring ring;
|
||||
|
||||
struct resource res;
|
||||
u32 memparcel;
|
||||
void *base;
|
||||
size_t size;
|
||||
bool master;
|
||||
u32 peer_name;
|
||||
struct notifier_block vm_nb;
|
||||
/* lock to protect registered */
|
||||
struct mutex state_lock;
|
||||
bool registered;
|
||||
|
||||
u32 label;
|
||||
void *tx_dbl;
|
||||
void *rx_dbl;
|
||||
struct work_struct work;
|
||||
/* lock to protect dbl_running */
|
||||
spinlock_t dbl_lock;
|
||||
|
||||
struct gunyah_pipe tx_pipe;
|
||||
struct gunyah_pipe rx_pipe;
|
||||
wait_queue_head_t tx_avail_notify;
|
||||
};
|
||||
|
||||
static void qrtr_gunyah_read(struct qrtr_gunyah_dev *qdev);
|
||||
static void qrtr_gunyah_fifo_init(struct qrtr_gunyah_dev *qdev);
|
||||
|
||||
static void qrtr_gunyah_kick(struct qrtr_gunyah_dev *qdev)
|
||||
{
|
||||
gh_dbl_flags_t dbl_mask = QRTR_DBL_MASK;
|
||||
int ret;
|
||||
|
||||
ret = gh_dbl_send(qdev->tx_dbl, &dbl_mask, GH_DBL_NONBLOCK);
|
||||
if (ret) {
|
||||
if (ret != EAGAIN)
|
||||
dev_err(qdev->dev, "failed to raise doorbell %d\n", ret);
|
||||
if (!qdev->master)
|
||||
schedule_work(&qdev->work);
|
||||
}
|
||||
}
|
||||
|
||||
static void qrtr_gunyah_retry_work(struct work_struct *work)
|
||||
{
|
||||
struct qrtr_gunyah_dev *qdev = container_of(work, struct qrtr_gunyah_dev,
|
||||
work);
|
||||
gh_dbl_flags_t dbl_mask = QRTR_DBL_MASK;
|
||||
|
||||
gh_dbl_send(qdev->tx_dbl, &dbl_mask, 0);
|
||||
}
|
||||
|
||||
static void qrtr_gunyah_cb(int irq, void *data)
|
||||
{
|
||||
qrtr_gunyah_read((struct qrtr_gunyah_dev *)data);
|
||||
}
|
||||
|
||||
static size_t gunyah_rx_avail(struct gunyah_pipe *pipe)
|
||||
{
|
||||
size_t len;
|
||||
u32 head;
|
||||
u32 tail;
|
||||
|
||||
head = le32_to_cpu(*pipe->head);
|
||||
tail = le32_to_cpu(*pipe->tail);
|
||||
|
||||
if (head < tail)
|
||||
len = pipe->length - tail + head;
|
||||
else
|
||||
len = head - tail;
|
||||
|
||||
if (WARN_ON_ONCE(len > pipe->length))
|
||||
len = 0;
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
static void gunyah_rx_peak(struct gunyah_pipe *pipe, void *data,
|
||||
unsigned int offset, size_t count)
|
||||
{
|
||||
size_t len;
|
||||
u32 tail;
|
||||
|
||||
tail = le32_to_cpu(*pipe->tail);
|
||||
tail += offset;
|
||||
if (tail >= pipe->length)
|
||||
tail -= pipe->length;
|
||||
|
||||
if (WARN_ON_ONCE(tail > pipe->length))
|
||||
return;
|
||||
|
||||
len = min_t(size_t, count, pipe->length - tail);
|
||||
if (len)
|
||||
memcpy_fromio(data, pipe->fifo + tail, len);
|
||||
|
||||
if (len != count)
|
||||
memcpy_fromio(data + len, pipe->fifo, (count - len));
|
||||
}
|
||||
|
||||
static void gunyah_rx_advance(struct gunyah_pipe *pipe, size_t count)
|
||||
{
|
||||
u32 tail;
|
||||
|
||||
tail = le32_to_cpu(*pipe->tail);
|
||||
|
||||
tail += count;
|
||||
if (tail >= pipe->length)
|
||||
tail %= pipe->length;
|
||||
|
||||
*pipe->tail = cpu_to_le32(tail);
|
||||
}
|
||||
|
||||
static size_t gunyah_tx_avail(struct gunyah_pipe *pipe)
|
||||
{
|
||||
u32 avail;
|
||||
u32 head;
|
||||
u32 tail;
|
||||
|
||||
head = le32_to_cpu(*pipe->head);
|
||||
tail = le32_to_cpu(*pipe->tail);
|
||||
|
||||
if (tail <= head)
|
||||
avail = pipe->length - head + tail;
|
||||
else
|
||||
avail = tail - head;
|
||||
|
||||
if (avail < FIFO_FULL_RESERVE)
|
||||
avail = 0;
|
||||
else
|
||||
avail -= FIFO_FULL_RESERVE;
|
||||
|
||||
if (WARN_ON_ONCE(head > pipe->length))
|
||||
avail = 0;
|
||||
|
||||
return avail;
|
||||
}
|
||||
|
||||
static void gunyah_tx_write(struct gunyah_pipe *pipe, const void *data,
|
||||
size_t count)
|
||||
{
|
||||
size_t len;
|
||||
u32 head;
|
||||
|
||||
head = le32_to_cpu(*pipe->head);
|
||||
if (WARN_ON_ONCE(head > pipe->length))
|
||||
return;
|
||||
|
||||
len = min_t(size_t, count, pipe->length - head);
|
||||
if (len)
|
||||
memcpy_toio(pipe->fifo + head, data, len);
|
||||
|
||||
if (len != count)
|
||||
memcpy_toio(pipe->fifo, data + len, count - len);
|
||||
|
||||
head += count;
|
||||
if (head >= pipe->length)
|
||||
head -= pipe->length;
|
||||
|
||||
/* Ensure ordering of fifo and head update */
|
||||
smp_wmb();
|
||||
|
||||
*pipe->head = cpu_to_le32(head);
|
||||
}
|
||||
|
||||
static size_t gunyah_sg_copy_toio(struct scatterlist *sg, unsigned int nents,
|
||||
void *buf, size_t buflen, off_t skip)
|
||||
{
|
||||
unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_FROM_SG;
|
||||
struct sg_mapping_iter miter;
|
||||
unsigned int offset = 0;
|
||||
|
||||
sg_miter_start(&miter, sg, nents, sg_flags);
|
||||
|
||||
if (!sg_miter_skip(&miter, skip))
|
||||
return 0;
|
||||
|
||||
while ((offset < buflen) && sg_miter_next(&miter)) {
|
||||
unsigned int len;
|
||||
|
||||
len = min(miter.length, buflen - offset);
|
||||
memcpy_toio(buf + offset, miter.addr, len);
|
||||
offset += len;
|
||||
}
|
||||
|
||||
sg_miter_stop(&miter);
|
||||
|
||||
return offset;
|
||||
}
|
||||
|
||||
static void gunyah_sg_write(struct gunyah_pipe *pipe, struct scatterlist *sg,
|
||||
int offset, size_t count)
|
||||
{
|
||||
size_t len;
|
||||
u32 head;
|
||||
int rc = 0;
|
||||
|
||||
head = le32_to_cpu(*pipe->head);
|
||||
if (WARN_ON_ONCE(head > pipe->length))
|
||||
return;
|
||||
|
||||
len = min_t(size_t, count, pipe->length - head);
|
||||
if (len) {
|
||||
rc = gunyah_sg_copy_toio(sg, sg_nents(sg), pipe->fifo + head,
|
||||
len, offset);
|
||||
offset += rc;
|
||||
}
|
||||
|
||||
if (len != count)
|
||||
rc = gunyah_sg_copy_toio(sg, sg_nents(sg), pipe->fifo,
|
||||
count - len, offset);
|
||||
|
||||
head += count;
|
||||
if (head >= pipe->length)
|
||||
head -= pipe->length;
|
||||
|
||||
/* ensure contents are in fifo before updating head */
|
||||
smp_wmb();
|
||||
|
||||
*pipe->head = cpu_to_le32(head);
|
||||
}
|
||||
|
||||
static void gunyah_set_tx_notify(struct qrtr_gunyah_dev *qdev)
|
||||
{
|
||||
*qdev->tx_pipe.read_notify = cpu_to_le32(1);
|
||||
}
|
||||
|
||||
static void gunyah_clr_tx_notify(struct qrtr_gunyah_dev *qdev)
|
||||
{
|
||||
*qdev->tx_pipe.read_notify = 0;
|
||||
}
|
||||
|
||||
static bool gunyah_get_read_notify(struct qrtr_gunyah_dev *qdev)
|
||||
{
|
||||
return le32_to_cpu(*qdev->rx_pipe.read_notify);
|
||||
}
|
||||
|
||||
static int gunyah_wait_for_tx_avail(struct qrtr_gunyah_dev *qdev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
gunyah_set_tx_notify(qdev);
|
||||
qrtr_gunyah_kick(qdev);
|
||||
ret = wait_event_timeout(qdev->tx_avail_notify, gunyah_tx_avail(&qdev->tx_pipe), 10 * HZ);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* from qrtr to gunyah */
|
||||
static int qrtr_gunyah_send(struct qrtr_endpoint *ep, struct sk_buff *skb)
|
||||
{
|
||||
struct qrtr_gunyah_dev *qdev;
|
||||
size_t tx_avail;
|
||||
int chunk_size;
|
||||
int left_size;
|
||||
int offset;
|
||||
int rc = 0;
|
||||
|
||||
qdev = container_of(ep, struct qrtr_gunyah_dev, ep);
|
||||
|
||||
left_size = skb->len;
|
||||
offset = 0;
|
||||
while (left_size > 0) {
|
||||
tx_avail = gunyah_tx_avail(&qdev->tx_pipe);
|
||||
if (!tx_avail) {
|
||||
if (!gunyah_wait_for_tx_avail(qdev)) {
|
||||
dev_err(qdev->dev, "transport stalled\n");
|
||||
rc = -ETIMEDOUT;
|
||||
break;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
if (tx_avail < left_size)
|
||||
chunk_size = tx_avail;
|
||||
else
|
||||
chunk_size = left_size;
|
||||
|
||||
if (skb_is_nonlinear(skb)) {
|
||||
struct scatterlist sg[MAX_SKB_FRAGS + 1];
|
||||
|
||||
sg_init_table(sg, skb_shinfo(skb)->nr_frags + 1);
|
||||
rc = skb_to_sgvec(skb, sg, 0, skb->len);
|
||||
if (rc < 0) {
|
||||
dev_err(qdev->dev, "failed skb_to_sgvec rc:%d\n", rc);
|
||||
break;
|
||||
}
|
||||
gunyah_sg_write(&qdev->tx_pipe, sg, offset,
|
||||
chunk_size);
|
||||
} else {
|
||||
gunyah_tx_write(&qdev->tx_pipe, skb->data + offset,
|
||||
chunk_size);
|
||||
}
|
||||
|
||||
offset += chunk_size;
|
||||
left_size -= chunk_size;
|
||||
|
||||
qrtr_gunyah_kick(qdev);
|
||||
}
|
||||
gunyah_clr_tx_notify(qdev);
|
||||
kfree_skb(skb);
|
||||
|
||||
return (rc < 0) ? rc : 0;
|
||||
}
|
||||
|
||||
static void qrtr_gunyah_read_new(struct qrtr_gunyah_dev *qdev)
|
||||
{
|
||||
struct gunyah_ring *ring = &qdev->ring;
|
||||
size_t rx_avail;
|
||||
size_t pkt_len;
|
||||
u32 hdr[8];
|
||||
int rc;
|
||||
size_t hdr_len = sizeof(hdr);
|
||||
|
||||
gunyah_rx_peak(&qdev->rx_pipe, &hdr, 0, hdr_len);
|
||||
pkt_len = qrtr_peek_pkt_size((void *)&hdr);
|
||||
if ((int)pkt_len < 0 || pkt_len > MAX_PKT_SZ) {
|
||||
/* Corrupted packet, reset the pipe and discard existing data */
|
||||
rx_avail = gunyah_rx_avail(&qdev->rx_pipe);
|
||||
dev_err(qdev->dev, "invalid pkt_len:%zu dropping:%zu bytes\n",
|
||||
pkt_len, rx_avail);
|
||||
gunyah_rx_advance(&qdev->rx_pipe, rx_avail);
|
||||
return;
|
||||
}
|
||||
|
||||
rx_avail = gunyah_rx_avail(&qdev->rx_pipe);
|
||||
if (rx_avail > pkt_len)
|
||||
rx_avail = pkt_len;
|
||||
|
||||
gunyah_rx_peak(&qdev->rx_pipe, ring->buf, 0, rx_avail);
|
||||
gunyah_rx_advance(&qdev->rx_pipe, rx_avail);
|
||||
|
||||
if (rx_avail == pkt_len) {
|
||||
rc = qrtr_endpoint_post(&qdev->ep, ring->buf, pkt_len);
|
||||
if (rc == -EINVAL)
|
||||
dev_err(qdev->dev, "invalid ipcrouter packet\n");
|
||||
} else {
|
||||
ring->len = pkt_len;
|
||||
ring->offset = rx_avail;
|
||||
}
|
||||
}
|
||||
|
||||
static void qrtr_gunyah_read_frag(struct qrtr_gunyah_dev *qdev)
|
||||
{
|
||||
struct gunyah_ring *ring = &qdev->ring;
|
||||
size_t rx_avail;
|
||||
int rc;
|
||||
|
||||
rx_avail = gunyah_rx_avail(&qdev->rx_pipe);
|
||||
if (rx_avail + ring->offset > ring->len)
|
||||
rx_avail = ring->len - ring->offset;
|
||||
|
||||
gunyah_rx_peak(&qdev->rx_pipe, ring->buf + ring->offset, 0, rx_avail);
|
||||
gunyah_rx_advance(&qdev->rx_pipe, rx_avail);
|
||||
|
||||
if (rx_avail + ring->offset == ring->len) {
|
||||
rc = qrtr_endpoint_post(&qdev->ep, ring->buf, ring->len);
|
||||
if (rc == -EINVAL)
|
||||
dev_err(qdev->dev, "invalid ipcrouter packet\n");
|
||||
ring->offset = 0;
|
||||
ring->len = 0;
|
||||
} else {
|
||||
ring->offset += rx_avail;
|
||||
}
|
||||
}
|
||||
|
||||
static void qrtr_gunyah_read(struct qrtr_gunyah_dev *qdev)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (!qdev) {
|
||||
pr_err("%s: Invalid data.\n", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&qdev->dbl_lock, flags);
|
||||
wake_up_all(&qdev->tx_avail_notify);
|
||||
|
||||
while (gunyah_rx_avail(&qdev->rx_pipe)) {
|
||||
if (qdev->ring.offset)
|
||||
qrtr_gunyah_read_frag(qdev);
|
||||
else
|
||||
qrtr_gunyah_read_new(qdev);
|
||||
|
||||
if (gunyah_get_read_notify(qdev))
|
||||
qrtr_gunyah_kick(qdev);
|
||||
}
|
||||
spin_unlock_irqrestore(&qdev->dbl_lock, flags);
|
||||
}
|
||||
|
||||
static int qrtr_gunyah_share_mem(struct qrtr_gunyah_dev *qdev, gh_vmid_t self,
|
||||
gh_vmid_t peer)
|
||||
{
|
||||
struct qcom_scm_vmperm src_vmlist[] = {{self,
|
||||
PERM_READ | PERM_WRITE | PERM_EXEC}};
|
||||
struct qcom_scm_vmperm dst_vmlist[] = {{self, PERM_READ | PERM_WRITE},
|
||||
{peer, PERM_READ | PERM_WRITE}};
|
||||
u64 srcvmids = BIT(src_vmlist[0].vmid);
|
||||
u64 dstvmids = BIT(dst_vmlist[0].vmid) | BIT(dst_vmlist[1].vmid);
|
||||
struct gh_acl_desc *acl;
|
||||
struct gh_sgl_desc *sgl;
|
||||
int ret;
|
||||
|
||||
ret = qcom_scm_assign_mem(qdev->res.start, resource_size(&qdev->res),
|
||||
&srcvmids, dst_vmlist, ARRAY_SIZE(dst_vmlist));
|
||||
if (ret) {
|
||||
dev_err(qdev->dev, "qcom_scm_assign_mem failed addr=%llx size=%zu err=%d\n",
|
||||
qdev->res.start, qdev->size, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
acl = kzalloc(offsetof(struct gh_acl_desc, acl_entries[2]), GFP_KERNEL);
|
||||
if (!acl)
|
||||
return -ENOMEM;
|
||||
sgl = kzalloc(offsetof(struct gh_sgl_desc, sgl_entries[1]), GFP_KERNEL);
|
||||
if (!sgl) {
|
||||
kfree(acl);
|
||||
return -ENOMEM;
|
||||
}
|
||||
acl->n_acl_entries = 2;
|
||||
acl->acl_entries[0].vmid = (u16)self;
|
||||
acl->acl_entries[0].perms = GH_RM_ACL_R | GH_RM_ACL_W;
|
||||
acl->acl_entries[1].vmid = (u16)peer;
|
||||
acl->acl_entries[1].perms = GH_RM_ACL_R | GH_RM_ACL_W;
|
||||
|
||||
sgl->n_sgl_entries = 1;
|
||||
sgl->sgl_entries[0].ipa_base = qdev->res.start;
|
||||
sgl->sgl_entries[0].size = resource_size(&qdev->res);
|
||||
|
||||
ret = ghd_rm_mem_share(GH_RM_MEM_TYPE_NORMAL, 0, qdev->label,
|
||||
acl, sgl, NULL, &qdev->memparcel);
|
||||
if (ret) {
|
||||
dev_err(qdev->dev, "gh_rm_mem_share failed addr=%llx size=%lu err=%d\n",
|
||||
qdev->res.start, qdev->size, ret);
|
||||
/* Attempt to give resource back to HLOS */
|
||||
if (qcom_scm_assign_mem(qdev->res.start, resource_size(&qdev->res),
|
||||
&dstvmids, src_vmlist, ARRAY_SIZE(src_vmlist)))
|
||||
dev_err(qdev->dev, "qcom_scm_assign_mem failed addr=%llx size=%lu err=%d\n",
|
||||
qdev->res.start, qdev->size, ret);
|
||||
}
|
||||
|
||||
kfree(acl);
|
||||
kfree(sgl);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void qrtr_gunyah_unshare_mem(struct qrtr_gunyah_dev *qdev,
|
||||
gh_vmid_t self, gh_vmid_t peer)
|
||||
{
|
||||
u64 src_vmlist = BIT(self) | BIT(peer);
|
||||
struct qcom_scm_vmperm dst_vmlist[1] = {{self, PERM_READ | PERM_WRITE | PERM_EXEC}};
|
||||
int ret;
|
||||
|
||||
ret = ghd_rm_mem_reclaim(qdev->memparcel, 0);
|
||||
if (ret)
|
||||
dev_err(qdev->dev, "Gunyah reclaim failed\n");
|
||||
|
||||
ret = qcom_scm_assign_mem(qdev->res.start, resource_size(&qdev->res),
|
||||
&src_vmlist, dst_vmlist, 1);
|
||||
if (ret)
|
||||
dev_err(qdev->dev, "qcom_scm_assign_mem failed addr=%llx size=%llu err=%d\n",
|
||||
qdev->res.start, resource_size(&qdev->res), ret);
|
||||
}
|
||||
|
||||
static int qrtr_gunyah_vm_cb(struct notifier_block *nb, unsigned long cmd, void *data)
|
||||
{
|
||||
struct qrtr_gunyah_dev *qdev = container_of(nb, struct qrtr_gunyah_dev, vm_nb);
|
||||
gh_vmid_t peer_vmid;
|
||||
gh_vmid_t self_vmid;
|
||||
gh_vmid_t vmid;
|
||||
|
||||
if (!data)
|
||||
return NOTIFY_DONE;
|
||||
vmid = *((gh_vmid_t *)data);
|
||||
|
||||
if (ghd_rm_get_vmid(qdev->peer_name, &peer_vmid))
|
||||
return NOTIFY_DONE;
|
||||
if (ghd_rm_get_vmid(GH_PRIMARY_VM, &self_vmid))
|
||||
return NOTIFY_DONE;
|
||||
if (peer_vmid != vmid)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
mutex_lock(&qdev->state_lock);
|
||||
switch (cmd) {
|
||||
case GH_VM_BEFORE_POWERUP:
|
||||
if (qdev->registered)
|
||||
break;
|
||||
qrtr_gunyah_fifo_init(qdev);
|
||||
if (qrtr_endpoint_register(&qdev->ep, QRTR_EP_NET_ID_AUTO, false, NULL)) {
|
||||
dev_err(qdev->dev, "endpoint register failed\n");
|
||||
break;
|
||||
}
|
||||
if (qrtr_gunyah_share_mem(qdev, self_vmid, peer_vmid)) {
|
||||
dev_err(qdev->dev, "failed to share memory\n");
|
||||
qrtr_endpoint_unregister(&qdev->ep);
|
||||
break;
|
||||
}
|
||||
qdev->registered = true;
|
||||
break;
|
||||
case GH_VM_POWERUP_FAIL:
|
||||
fallthrough;
|
||||
case GH_VM_EARLY_POWEROFF:
|
||||
if (qdev->registered) {
|
||||
qrtr_endpoint_unregister(&qdev->ep);
|
||||
qrtr_gunyah_unshare_mem(qdev, self_vmid, peer_vmid);
|
||||
qdev->registered = false;
|
||||
}
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&qdev->state_lock);
|
||||
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
/**
|
||||
* qrtr_gunyah_fifo_init() - init gunyah xprt configs
|
||||
*
|
||||
* @qdev: internal qrtr gunyah state variable
|
||||
*
|
||||
* @return: 0 on success, standard Linux error codes on error.
|
||||
*
|
||||
* This function is called to initialize the gunyah XPRT pointer with
|
||||
* the gunyah XPRT configurations either from device tree or static arrays.
|
||||
*/
|
||||
static void qrtr_gunyah_fifo_init(struct qrtr_gunyah_dev *qdev)
|
||||
{
|
||||
__le32 *descs;
|
||||
|
||||
if (qdev->master)
|
||||
memset(qdev->base, 0, sizeof(*descs) * 10);
|
||||
|
||||
descs = qdev->base;
|
||||
descs[GUNYAH_MAGIC_IDX] = GUNYAH_MAGIC_KEY;
|
||||
|
||||
if (qdev->master) {
|
||||
qdev->tx_pipe.tail = &descs[TAIL_0_IDX];
|
||||
qdev->tx_pipe.head = &descs[HEAD_0_IDX];
|
||||
qdev->tx_pipe.fifo = qdev->base + FIFO_0_START;
|
||||
qdev->tx_pipe.length = FIFO_SIZE;
|
||||
qdev->tx_pipe.read_notify = &descs[NOTIFY_0_IDX];
|
||||
|
||||
qdev->rx_pipe.tail = &descs[TAIL_1_IDX];
|
||||
qdev->rx_pipe.head = &descs[HEAD_1_IDX];
|
||||
qdev->rx_pipe.fifo = qdev->base + FIFO_1_START;
|
||||
qdev->rx_pipe.length = FIFO_SIZE;
|
||||
qdev->rx_pipe.read_notify = &descs[NOTIFY_1_IDX];
|
||||
} else {
|
||||
qdev->tx_pipe.tail = &descs[TAIL_1_IDX];
|
||||
qdev->tx_pipe.head = &descs[HEAD_1_IDX];
|
||||
qdev->tx_pipe.fifo = qdev->base + FIFO_1_START;
|
||||
qdev->tx_pipe.length = FIFO_SIZE;
|
||||
qdev->tx_pipe.read_notify = &descs[NOTIFY_1_IDX];
|
||||
|
||||
qdev->rx_pipe.tail = &descs[TAIL_0_IDX];
|
||||
qdev->rx_pipe.head = &descs[HEAD_0_IDX];
|
||||
qdev->rx_pipe.fifo = qdev->base + FIFO_0_START;
|
||||
qdev->rx_pipe.length = FIFO_SIZE;
|
||||
qdev->rx_pipe.read_notify = &descs[NOTIFY_0_IDX];
|
||||
}
|
||||
|
||||
/* Reset respective index */
|
||||
*qdev->tx_pipe.head = 0;
|
||||
*qdev->tx_pipe.read_notify = 0;
|
||||
*qdev->rx_pipe.tail = 0;
|
||||
}
|
||||
|
||||
static struct device_node *qrtr_gunyah_svm_of_parse(struct qrtr_gunyah_dev *qdev)
|
||||
{
|
||||
const char *compat = "qcom,qrtr-gunyah-gen";
|
||||
struct device_node *np = NULL;
|
||||
struct device_node *shm_np;
|
||||
u32 label;
|
||||
int ret;
|
||||
|
||||
while ((np = of_find_compatible_node(np, NULL, compat))) {
|
||||
ret = of_property_read_u32(np, "qcom,label", &label);
|
||||
if (ret) {
|
||||
of_node_put(np);
|
||||
continue;
|
||||
}
|
||||
if (label == qdev->label)
|
||||
break;
|
||||
|
||||
of_node_put(np);
|
||||
}
|
||||
if (!np)
|
||||
return NULL;
|
||||
|
||||
shm_np = of_parse_phandle(np, "memory-region", 0);
|
||||
if (!shm_np)
|
||||
dev_err(qdev->dev, "can't parse svm shared mem node!\n");
|
||||
|
||||
of_node_put(np);
|
||||
return shm_np;
|
||||
}
|
||||
|
||||
static int qrtr_gunyah_alloc_fifo(struct qrtr_gunyah_dev *qdev)
|
||||
{
|
||||
struct device *dev = qdev->dev;
|
||||
resource_size_t size;
|
||||
|
||||
size = FIFO_1_START + FIFO_SIZE;
|
||||
|
||||
qdev->base = dma_alloc_attrs(dev, size, &qdev->res.start, GFP_KERNEL,
|
||||
DMA_ATTR_FORCE_CONTIGUOUS);
|
||||
if (!qdev->base)
|
||||
return -ENOMEM;
|
||||
|
||||
qdev->res.end = qdev->res.start + size - 1;
|
||||
qdev->size = size;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qrtr_gunyah_map_memory(struct qrtr_gunyah_dev *qdev)
|
||||
{
|
||||
struct device *dev = qdev->dev;
|
||||
struct device_node *np;
|
||||
resource_size_t size;
|
||||
int ret;
|
||||
|
||||
if (qdev->master) {
|
||||
np = of_parse_phandle(dev->of_node, "shared-buffer", 0);
|
||||
if (!np)
|
||||
return qrtr_gunyah_alloc_fifo(qdev);
|
||||
} else {
|
||||
np = qrtr_gunyah_svm_of_parse(qdev);
|
||||
if (!np) {
|
||||
dev_err(dev, "can't parse shared mem node!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
ret = of_address_to_resource(np, 0, &qdev->res);
|
||||
of_node_put(np);
|
||||
if (ret) {
|
||||
dev_err(dev, "of_address_to_resource failed!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
size = resource_size(&qdev->res);
|
||||
|
||||
qdev->base = devm_ioremap_resource(dev, &qdev->res);
|
||||
if (IS_ERR(qdev->base)) {
|
||||
dev_err(dev, "ioremap failed!\n");
|
||||
return PTR_ERR(qdev->base);
|
||||
}
|
||||
qdev->size = size;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* qrtr_gunyah_probe() - Probe a gunyah xprt
|
||||
*
|
||||
* @pdev: Platform device corresponding to gunyah xprt.
|
||||
*
|
||||
* @return: 0 on success, standard Linux error codes on error.
|
||||
*
|
||||
* This function is called when the underlying device tree driver registers
|
||||
* a platform device, mapped to a gunyah transport.
|
||||
*/
|
||||
static int qrtr_gunyah_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device_node *node = pdev->dev.of_node;
|
||||
struct qrtr_gunyah_dev *qdev;
|
||||
enum gh_dbl_label dbl_label;
|
||||
int ret;
|
||||
|
||||
qdev = devm_kzalloc(&pdev->dev, sizeof(*qdev), GFP_KERNEL);
|
||||
if (!qdev)
|
||||
return -ENOMEM;
|
||||
qdev->dev = &pdev->dev;
|
||||
dev_set_drvdata(&pdev->dev, qdev);
|
||||
|
||||
qdev->ring.buf = devm_kzalloc(&pdev->dev, MAX_PKT_SZ, GFP_KERNEL);
|
||||
if (!qdev->ring.buf)
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_init(&qdev->state_lock);
|
||||
qdev->registered = false;
|
||||
spin_lock_init(&qdev->dbl_lock);
|
||||
|
||||
ret = of_property_read_u32(node, "gunyah-label", &qdev->label);
|
||||
if (ret) {
|
||||
dev_err(qdev->dev, "failed to read label info %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
qdev->master = of_property_read_bool(node, "qcom,master");
|
||||
|
||||
ret = qrtr_gunyah_map_memory(qdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!qdev->master)
|
||||
qrtr_gunyah_fifo_init(qdev);
|
||||
init_waitqueue_head(&qdev->tx_avail_notify);
|
||||
|
||||
if (qdev->master) {
|
||||
ret = of_property_read_u32(node, "peer-name", &qdev->peer_name);
|
||||
if (ret)
|
||||
qdev->peer_name = GH_SELF_VM;
|
||||
|
||||
qdev->vm_nb.notifier_call = qrtr_gunyah_vm_cb;
|
||||
qdev->vm_nb.priority = INT_MAX;
|
||||
gh_register_vm_notifier(&qdev->vm_nb);
|
||||
}
|
||||
|
||||
dbl_label = qdev->label;
|
||||
qdev->tx_dbl = gh_dbl_tx_register(dbl_label);
|
||||
if (IS_ERR_OR_NULL(qdev->tx_dbl)) {
|
||||
ret = PTR_ERR(qdev->tx_dbl);
|
||||
dev_err(qdev->dev, "failed to get gunyah tx dbl %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
INIT_WORK(&qdev->work, qrtr_gunyah_retry_work);
|
||||
|
||||
qdev->ep.xmit = qrtr_gunyah_send;
|
||||
if (!qdev->master) {
|
||||
ret = qrtr_endpoint_register(&qdev->ep, QRTR_EP_NET_ID_AUTO,
|
||||
false, NULL);
|
||||
if (ret)
|
||||
goto register_fail;
|
||||
}
|
||||
|
||||
qdev->rx_dbl = gh_dbl_rx_register(dbl_label, qrtr_gunyah_cb, qdev);
|
||||
if (IS_ERR_OR_NULL(qdev->rx_dbl)) {
|
||||
ret = PTR_ERR(qdev->rx_dbl);
|
||||
dev_err(qdev->dev, "failed to get gunyah rx dbl %d\n", ret);
|
||||
goto fail_rx_dbl;
|
||||
}
|
||||
|
||||
if (!qdev->master && gunyah_rx_avail(&qdev->rx_pipe))
|
||||
qrtr_gunyah_read(qdev);
|
||||
|
||||
return 0;
|
||||
|
||||
fail_rx_dbl:
|
||||
qrtr_endpoint_unregister(&qdev->ep);
|
||||
register_fail:
|
||||
cancel_work_sync(&qdev->work);
|
||||
gh_dbl_tx_unregister(qdev->tx_dbl);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int qrtr_gunyah_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct qrtr_gunyah_dev *qdev = dev_get_drvdata(&pdev->dev);
|
||||
struct device_node *np;
|
||||
gh_vmid_t peer_vmid;
|
||||
gh_vmid_t self_vmid;
|
||||
|
||||
cancel_work_sync(&qdev->work);
|
||||
gh_dbl_tx_unregister(qdev->tx_dbl);
|
||||
gh_dbl_rx_unregister(qdev->rx_dbl);
|
||||
|
||||
if (!qdev->master)
|
||||
return 0;
|
||||
gh_unregister_vm_notifier(&qdev->vm_nb);
|
||||
|
||||
if (ghd_rm_get_vmid(qdev->peer_name, &peer_vmid))
|
||||
return 0;
|
||||
if (ghd_rm_get_vmid(GH_PRIMARY_VM, &self_vmid))
|
||||
return 0;
|
||||
qrtr_gunyah_unshare_mem(qdev, self_vmid, peer_vmid);
|
||||
|
||||
np = of_parse_phandle(qdev->dev->of_node, "shared-buffer", 0);
|
||||
if (np) {
|
||||
of_node_put(np);
|
||||
return 0;
|
||||
}
|
||||
|
||||
dma_free_attrs(qdev->dev, qdev->size, qdev->base, qdev->res.start,
|
||||
DMA_ATTR_FORCE_CONTIGUOUS);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id qrtr_gunyah_match_table[] = {
|
||||
{ .compatible = "qcom,qrtr-gunyah" },
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, qrtr_gunyah_match_table);
|
||||
|
||||
static struct platform_driver qrtr_gunyah_driver = {
|
||||
.driver = {
|
||||
.name = "qcom_gunyah_qrtr",
|
||||
.of_match_table = qrtr_gunyah_match_table,
|
||||
},
|
||||
.probe = qrtr_gunyah_probe,
|
||||
.remove = qrtr_gunyah_remove,
|
||||
};
|
||||
module_platform_driver(qrtr_gunyah_driver);
|
||||
|
||||
MODULE_DESCRIPTION("QTI IPC-Router Gunyah interface driver");
|
||||
MODULE_LICENSE("GPL");
|
@@ -1,12 +1,16 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/mhi.h>
|
||||
#include <linux/mod_devicetable.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/of.h>
|
||||
#include <net/sock.h>
|
||||
|
||||
#include "qrtr.h"
|
||||
@@ -15,6 +19,8 @@ struct qrtr_mhi_dev {
|
||||
struct qrtr_endpoint ep;
|
||||
struct mhi_device *mhi_dev;
|
||||
struct device *dev;
|
||||
struct completion prepared;
|
||||
struct completion ringfull;
|
||||
};
|
||||
|
||||
/* From MHI to QRTR */
|
||||
@@ -38,14 +44,17 @@ static void qcom_mhi_qrtr_ul_callback(struct mhi_device *mhi_dev,
|
||||
struct mhi_result *mhi_res)
|
||||
{
|
||||
struct sk_buff *skb = mhi_res->buf_addr;
|
||||
struct qrtr_mhi_dev *qdev = dev_get_drvdata(&mhi_dev->dev);
|
||||
|
||||
if (skb->sk)
|
||||
sock_put(skb->sk);
|
||||
consume_skb(skb);
|
||||
|
||||
complete_all(&qdev->ringfull);
|
||||
}
|
||||
|
||||
/* Send data over MHI */
|
||||
static int qcom_mhi_qrtr_send(struct qrtr_endpoint *ep, struct sk_buff *skb)
|
||||
static int __qcom_mhi_qrtr_send(struct qrtr_endpoint *ep, struct sk_buff *skb)
|
||||
{
|
||||
struct qrtr_mhi_dev *qdev = container_of(ep, struct qrtr_mhi_dev, ep);
|
||||
int rc;
|
||||
@@ -53,13 +62,17 @@ static int qcom_mhi_qrtr_send(struct qrtr_endpoint *ep, struct sk_buff *skb)
|
||||
if (skb->sk)
|
||||
sock_hold(skb->sk);
|
||||
|
||||
rc = wait_for_completion_interruptible(&qdev->prepared);
|
||||
if (rc)
|
||||
goto free_skb;
|
||||
|
||||
rc = skb_linearize(skb);
|
||||
if (rc)
|
||||
goto free_skb;
|
||||
|
||||
rc = mhi_queue_skb(qdev->mhi_dev, DMA_TO_DEVICE, skb, skb->len,
|
||||
MHI_EOT);
|
||||
if (rc)
|
||||
if (rc && rc != -EAGAIN)
|
||||
goto free_skb;
|
||||
|
||||
return rc;
|
||||
@@ -72,10 +85,55 @@ free_skb:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int qcom_mhi_qrtr_send(struct qrtr_endpoint *ep, struct sk_buff *skb)
|
||||
{
|
||||
struct qrtr_mhi_dev *qdev = container_of(ep, struct qrtr_mhi_dev, ep);
|
||||
int rc;
|
||||
|
||||
do {
|
||||
reinit_completion(&qdev->ringfull);
|
||||
rc = __qcom_mhi_qrtr_send(ep, skb);
|
||||
if (rc == -EAGAIN)
|
||||
wait_for_completion(&qdev->ringfull);
|
||||
} while (rc == -EAGAIN);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void qrtr_mhi_of_parse(struct mhi_device *mhi_dev,
|
||||
u32 *net_id, bool *rt)
|
||||
{
|
||||
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
|
||||
struct device_node *np = NULL;
|
||||
struct pci_dev *pci_device;
|
||||
u32 dev_id, nid;
|
||||
int rc;
|
||||
|
||||
*net_id = QRTR_EP_NET_ID_AUTO;
|
||||
|
||||
np = of_find_compatible_node(np, NULL, "qcom,qrtr-mhi");
|
||||
if (!np)
|
||||
return;
|
||||
|
||||
rc = of_property_read_u32(np, "qcom,dev-id", &dev_id);
|
||||
if (!rc) {
|
||||
pci_device = to_pci_dev(mhi_cntrl->cntrl_dev);
|
||||
if (pci_device->device == dev_id) {
|
||||
rc = of_property_read_u32(np, "qcom,net-id", &nid);
|
||||
if (!rc)
|
||||
*net_id = nid;
|
||||
*rt = of_property_read_bool(np, "qcom,low-latency");
|
||||
}
|
||||
}
|
||||
of_node_put(np);
|
||||
}
|
||||
|
||||
static int qcom_mhi_qrtr_probe(struct mhi_device *mhi_dev,
|
||||
const struct mhi_device_id *id)
|
||||
{
|
||||
struct qrtr_mhi_dev *qdev;
|
||||
u32 net_id;
|
||||
bool rt;
|
||||
int rc;
|
||||
|
||||
qdev = devm_kzalloc(&mhi_dev->dev, sizeof(*qdev), GFP_KERNEL);
|
||||
@@ -85,9 +143,14 @@ static int qcom_mhi_qrtr_probe(struct mhi_device *mhi_dev,
|
||||
qdev->mhi_dev = mhi_dev;
|
||||
qdev->dev = &mhi_dev->dev;
|
||||
qdev->ep.xmit = qcom_mhi_qrtr_send;
|
||||
init_completion(&qdev->prepared);
|
||||
init_completion(&qdev->ringfull);
|
||||
|
||||
dev_set_drvdata(&mhi_dev->dev, qdev);
|
||||
rc = qrtr_endpoint_register(&qdev->ep, QRTR_EP_NID_AUTO);
|
||||
|
||||
qrtr_mhi_of_parse(mhi_dev, &net_id, &rt);
|
||||
|
||||
rc = qrtr_endpoint_register(&qdev->ep, net_id, rt, NULL);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
@@ -97,6 +160,7 @@ static int qcom_mhi_qrtr_probe(struct mhi_device *mhi_dev,
|
||||
qrtr_endpoint_unregister(&qdev->ep);
|
||||
return rc;
|
||||
}
|
||||
complete_all(&qdev->prepared);
|
||||
|
||||
dev_dbg(qdev->dev, "Qualcomm MHI QRTR driver probed\n");
|
||||
|
||||
|
154
net/qrtr/ns.c
154
net/qrtr/ns.c
@@ -3,11 +3,16 @@
|
||||
* Copyright (c) 2015, Sony Mobile Communications Inc.
|
||||
* Copyright (c) 2013, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2020, Linaro Ltd.
|
||||
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "qrtr: %s(): " fmt, __func__
|
||||
|
||||
#include <linux/ipc_logging.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/qrtr.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/xarray.h>
|
||||
#include <net/sock.h>
|
||||
|
||||
#include "qrtr.h"
|
||||
@@ -16,14 +21,19 @@
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/qrtr.h>
|
||||
|
||||
#define NS_LOG_PAGE_CNT 4
|
||||
static void *ns_ilc;
|
||||
#define NS_INFO(x, ...) ipc_log_string(ns_ilc, x, ##__VA_ARGS__)
|
||||
|
||||
static DEFINE_XARRAY(nodes);
|
||||
|
||||
static struct {
|
||||
struct socket *sock;
|
||||
struct sockaddr_qrtr bcast_sq;
|
||||
struct list_head lookups;
|
||||
struct workqueue_struct *workqueue;
|
||||
struct work_struct work;
|
||||
struct kthread_worker kworker;
|
||||
struct kthread_work work;
|
||||
struct task_struct *task;
|
||||
int local_node;
|
||||
} qrtr_ns;
|
||||
|
||||
@@ -78,14 +88,14 @@ static struct qrtr_node *node_get(unsigned int node_id)
|
||||
return node;
|
||||
|
||||
/* If node didn't exist, allocate and insert it to the tree */
|
||||
node = kzalloc(sizeof(*node), GFP_KERNEL);
|
||||
node = kzalloc(sizeof(*node), GFP_ATOMIC);
|
||||
if (!node)
|
||||
return NULL;
|
||||
|
||||
node->id = node_id;
|
||||
xa_init(&node->servers);
|
||||
|
||||
if (xa_store(&nodes, node_id, node, GFP_KERNEL)) {
|
||||
if (xa_store(&nodes, node_id, node, GFP_ATOMIC)) {
|
||||
kfree(node);
|
||||
return NULL;
|
||||
}
|
||||
@@ -93,6 +103,25 @@ static struct qrtr_node *node_get(unsigned int node_id)
|
||||
return node;
|
||||
}
|
||||
|
||||
int qrtr_get_service_id(unsigned int node_id, unsigned int port_id)
|
||||
{
|
||||
struct qrtr_server *srv;
|
||||
struct qrtr_node *node;
|
||||
unsigned long index;
|
||||
|
||||
node = xa_load(&nodes, node_id);
|
||||
if (!node)
|
||||
return -EINVAL;
|
||||
|
||||
xa_for_each(&node->servers, index, srv) {
|
||||
if (srv->node == node_id && srv->port == port_id)
|
||||
return srv->service;
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qrtr_get_service_id);
|
||||
|
||||
static int server_match(const struct qrtr_server *srv,
|
||||
const struct qrtr_server_filter *f)
|
||||
{
|
||||
@@ -116,6 +145,8 @@ static int service_announce_new(struct sockaddr_qrtr *dest,
|
||||
trace_qrtr_ns_service_announce_new(srv->service, srv->instance,
|
||||
srv->node, srv->port);
|
||||
|
||||
NS_INFO("%s: [0x%x:0x%x]@[0x%x:0x%x]\n", __func__, srv->service,
|
||||
srv->instance, srv->node, srv->port);
|
||||
iv.iov_base = &pkt;
|
||||
iv.iov_len = sizeof(pkt);
|
||||
|
||||
@@ -143,6 +174,9 @@ static int service_announce_del(struct sockaddr_qrtr *dest,
|
||||
trace_qrtr_ns_service_announce_del(srv->service, srv->instance,
|
||||
srv->node, srv->port);
|
||||
|
||||
NS_INFO("%s: [0x%x:0x%x]@[0x%x:0x%x]\n", __func__, srv->service,
|
||||
srv->instance, srv->node, srv->port);
|
||||
|
||||
iv.iov_base = &pkt;
|
||||
iv.iov_len = sizeof(pkt);
|
||||
|
||||
@@ -157,8 +191,8 @@ static int service_announce_del(struct sockaddr_qrtr *dest,
|
||||
msg.msg_namelen = sizeof(*dest);
|
||||
|
||||
ret = kernel_sendmsg(qrtr_ns.sock, &msg, &iv, 1, sizeof(pkt));
|
||||
if (ret < 0)
|
||||
pr_err("failed to announce del service\n");
|
||||
if (ret < 0 && ret != -ENODEV)
|
||||
pr_err_ratelimited("failed to announce del service %d\n", ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -188,27 +222,34 @@ static void lookup_notify(struct sockaddr_qrtr *to, struct qrtr_server *srv,
|
||||
msg.msg_namelen = sizeof(*to);
|
||||
|
||||
ret = kernel_sendmsg(qrtr_ns.sock, &msg, &iv, 1, sizeof(pkt));
|
||||
if (ret < 0)
|
||||
pr_err("failed to send lookup notification\n");
|
||||
if (ret < 0 && ret != -ENODEV)
|
||||
pr_err_ratelimited("failed to send lookup notification %d\n",
|
||||
ret);
|
||||
}
|
||||
|
||||
static int announce_servers(struct sockaddr_qrtr *sq)
|
||||
{
|
||||
struct qrtr_server *srv;
|
||||
struct qrtr_node *node;
|
||||
unsigned long node_idx;
|
||||
unsigned long index;
|
||||
int ret;
|
||||
|
||||
node = node_get(qrtr_ns.local_node);
|
||||
if (!node)
|
||||
return 0;
|
||||
|
||||
/* Announce the list of servers registered in this node */
|
||||
xa_for_each(&node->servers, index, srv) {
|
||||
ret = service_announce_new(sq, srv);
|
||||
if (ret < 0) {
|
||||
pr_err("failed to announce new service\n");
|
||||
return ret;
|
||||
xa_for_each(&nodes, node_idx, node) {
|
||||
if (node->id == sq->sq_node) {
|
||||
pr_info("Avoiding duplicate announce for NODE ID %u\n", node->id);
|
||||
continue;
|
||||
}
|
||||
xa_for_each(&node->servers, index, srv) {
|
||||
ret = service_announce_new(sq, srv);
|
||||
if (ret < 0) {
|
||||
if (ret == -ENODEV)
|
||||
continue;
|
||||
|
||||
pr_err("failed to announce new service %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
@@ -254,6 +295,9 @@ static struct qrtr_server *server_add(unsigned int service,
|
||||
trace_qrtr_ns_server_add(srv->service, srv->instance,
|
||||
srv->node, srv->port);
|
||||
|
||||
NS_INFO("%s: [0x%x:0x%x]@[0x%x:0x%x]\n", __func__, srv->service,
|
||||
srv->instance, srv->node, srv->port);
|
||||
|
||||
return srv;
|
||||
|
||||
err:
|
||||
@@ -269,7 +313,7 @@ static int server_del(struct qrtr_node *node, unsigned int port, bool bcast)
|
||||
|
||||
srv = xa_load(&node->servers, port);
|
||||
if (!srv)
|
||||
return -ENOENT;
|
||||
return 0;
|
||||
|
||||
xa_erase(&node->servers, port);
|
||||
|
||||
@@ -311,7 +355,7 @@ static int say_hello(struct sockaddr_qrtr *dest)
|
||||
|
||||
ret = kernel_sendmsg(qrtr_ns.sock, &msg, &iv, 1, sizeof(pkt));
|
||||
if (ret < 0)
|
||||
pr_err("failed to send hello msg\n");
|
||||
pr_err("failed to send hello msg %d\n", ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -369,11 +413,12 @@ static int ctrl_cmd_bye(struct sockaddr_qrtr *from)
|
||||
msg.msg_namelen = sizeof(sq);
|
||||
|
||||
ret = kernel_sendmsg(qrtr_ns.sock, &msg, &iv, 1, sizeof(pkt));
|
||||
if (ret < 0) {
|
||||
pr_err("failed to send bye cmd\n");
|
||||
return ret;
|
||||
}
|
||||
if (ret < 0 && ret != -ENODEV)
|
||||
pr_err_ratelimited("send bye failed: [0x%x:0x%x] 0x%x ret: %d\n",
|
||||
srv->service, srv->instance,
|
||||
srv->port, ret);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -443,11 +488,12 @@ static int ctrl_cmd_del_client(struct sockaddr_qrtr *from,
|
||||
msg.msg_namelen = sizeof(sq);
|
||||
|
||||
ret = kernel_sendmsg(qrtr_ns.sock, &msg, &iv, 1, sizeof(pkt));
|
||||
if (ret < 0) {
|
||||
pr_err("failed to send del client cmd\n");
|
||||
return ret;
|
||||
}
|
||||
if (ret < 0 && ret != -ENODEV)
|
||||
pr_err_ratelimited("del client cmd failed: [0x%x:0x%x] 0x%x %d\n",
|
||||
srv->service, srv->instance,
|
||||
srv->port, ret);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -473,7 +519,7 @@ static int ctrl_cmd_new_server(struct sockaddr_qrtr *from,
|
||||
if (srv->node == qrtr_ns.local_node) {
|
||||
ret = service_announce_new(&qrtr_ns.bcast_sq, srv);
|
||||
if (ret < 0) {
|
||||
pr_err("failed to announce new service\n");
|
||||
pr_err("failed to announce new service %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
@@ -582,7 +628,30 @@ static void ctrl_cmd_del_lookup(struct sockaddr_qrtr *from,
|
||||
}
|
||||
}
|
||||
|
||||
static void qrtr_ns_worker(struct work_struct *work)
|
||||
static void ns_log_msg(const struct qrtr_ctrl_pkt *pkt,
|
||||
struct sockaddr_qrtr *sq)
|
||||
{
|
||||
unsigned int cmd = le32_to_cpu(pkt->cmd);
|
||||
|
||||
if (cmd == QRTR_TYPE_HELLO || cmd == QRTR_TYPE_BYE)
|
||||
NS_INFO("cmd:0x%x node[0x%x]\n", cmd, sq->sq_node);
|
||||
else if (cmd == QRTR_TYPE_DEL_CLIENT)
|
||||
NS_INFO("cmd:0x%x addr[0x%x:0x%x]\n", cmd,
|
||||
le32_to_cpu(pkt->client.node),
|
||||
le32_to_cpu(pkt->client.port));
|
||||
else if (cmd == QRTR_TYPE_NEW_SERVER || cmd == QRTR_TYPE_DEL_SERVER)
|
||||
NS_INFO("cmd:0x%x SVC[0x%x:0x%x] addr[0x%x:0x%x]\n", cmd,
|
||||
le32_to_cpu(pkt->server.service),
|
||||
le32_to_cpu(pkt->server.instance),
|
||||
le32_to_cpu(pkt->server.node),
|
||||
le32_to_cpu(pkt->server.port));
|
||||
else if (cmd == QRTR_TYPE_NEW_LOOKUP || cmd == QRTR_TYPE_DEL_LOOKUP)
|
||||
NS_INFO("cmd:0x%x SVC[0x%x:0x%x]\n", cmd,
|
||||
le32_to_cpu(pkt->server.service),
|
||||
le32_to_cpu(pkt->server.instance));
|
||||
}
|
||||
|
||||
static void qrtr_ns_worker(struct kthread_work *work)
|
||||
{
|
||||
const struct qrtr_ctrl_pkt *pkt;
|
||||
size_t recv_buf_size = 4096;
|
||||
@@ -623,6 +692,8 @@ static void qrtr_ns_worker(struct work_struct *work)
|
||||
trace_qrtr_ns_message(qrtr_ctrl_pkt_strings[cmd],
|
||||
sq.sq_node, sq.sq_port);
|
||||
|
||||
ns_log_msg(pkt, &sq);
|
||||
|
||||
ret = 0;
|
||||
switch (cmd) {
|
||||
case QRTR_TYPE_HELLO:
|
||||
@@ -678,16 +749,20 @@ static void qrtr_ns_data_ready(struct sock *sk)
|
||||
{
|
||||
trace_sk_data_ready(sk);
|
||||
|
||||
queue_work(qrtr_ns.workqueue, &qrtr_ns.work);
|
||||
kthread_queue_work(&qrtr_ns.kworker, &qrtr_ns.work);
|
||||
}
|
||||
|
||||
int qrtr_ns_init(void)
|
||||
{
|
||||
struct sockaddr_qrtr sq;
|
||||
int rx_buf_sz = INT_MAX;
|
||||
int ret;
|
||||
|
||||
INIT_LIST_HEAD(&qrtr_ns.lookups);
|
||||
INIT_WORK(&qrtr_ns.work, qrtr_ns_worker);
|
||||
kthread_init_worker(&qrtr_ns.kworker);
|
||||
kthread_init_work(&qrtr_ns.work, qrtr_ns_worker);
|
||||
|
||||
ns_ilc = ipc_log_context_create(NS_LOG_PAGE_CNT, "qrtr_ns", 0);
|
||||
|
||||
ret = sock_create_kern(&init_net, AF_QIPCRTR, SOCK_DGRAM,
|
||||
PF_QIPCRTR, &qrtr_ns.sock);
|
||||
@@ -700,9 +775,11 @@ int qrtr_ns_init(void)
|
||||
goto err_sock;
|
||||
}
|
||||
|
||||
qrtr_ns.workqueue = alloc_ordered_workqueue("qrtr_ns_handler", 0);
|
||||
if (!qrtr_ns.workqueue) {
|
||||
ret = -ENOMEM;
|
||||
qrtr_ns.task = kthread_run(kthread_worker_fn, &qrtr_ns.kworker,
|
||||
"qrtr_ns");
|
||||
if (IS_ERR(qrtr_ns.task)) {
|
||||
pr_err("failed to spawn worker thread %ld\n",
|
||||
PTR_ERR(qrtr_ns.task));
|
||||
goto err_sock;
|
||||
}
|
||||
|
||||
@@ -717,6 +794,9 @@ int qrtr_ns_init(void)
|
||||
goto err_wq;
|
||||
}
|
||||
|
||||
sock_setsockopt(qrtr_ns.sock, SOL_SOCKET, SO_RCVBUF,
|
||||
KERNEL_SOCKPTR((void *)&rx_buf_sz), sizeof(rx_buf_sz));
|
||||
|
||||
qrtr_ns.bcast_sq.sq_family = AF_QIPCRTR;
|
||||
qrtr_ns.bcast_sq.sq_node = QRTR_NODE_BCAST;
|
||||
qrtr_ns.bcast_sq.sq_port = QRTR_PORT_CTRL;
|
||||
@@ -728,7 +808,7 @@ int qrtr_ns_init(void)
|
||||
return 0;
|
||||
|
||||
err_wq:
|
||||
destroy_workqueue(qrtr_ns.workqueue);
|
||||
kthread_stop(qrtr_ns.task);
|
||||
err_sock:
|
||||
sock_release(qrtr_ns.sock);
|
||||
return ret;
|
||||
@@ -737,8 +817,8 @@ EXPORT_SYMBOL_GPL(qrtr_ns_init);
|
||||
|
||||
void qrtr_ns_remove(void)
|
||||
{
|
||||
cancel_work_sync(&qrtr_ns.work);
|
||||
destroy_workqueue(qrtr_ns.workqueue);
|
||||
kthread_flush_worker(&qrtr_ns.kworker);
|
||||
kthread_stop(qrtr_ns.task);
|
||||
sock_release(qrtr_ns.sock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qrtr_ns_remove);
|
||||
|
@@ -8,10 +8,14 @@ struct sk_buff;
|
||||
|
||||
/* endpoint node id auto assignment */
|
||||
#define QRTR_EP_NID_AUTO (-1)
|
||||
#define QRTR_EP_NET_ID_AUTO (1)
|
||||
|
||||
#define QRTR_DEL_PROC_MAGIC 0xe111
|
||||
|
||||
/**
|
||||
* struct qrtr_endpoint - endpoint handle
|
||||
* @xmit: Callback for outgoing packets
|
||||
* @in_thread: To indicate if the data callback runs in thread context
|
||||
*
|
||||
* The socket buffer passed to the xmit function becomes owned by the endpoint
|
||||
* driver. As such, when the driver is done with the buffer, it should
|
||||
@@ -21,9 +25,23 @@ struct qrtr_endpoint {
|
||||
int (*xmit)(struct qrtr_endpoint *ep, struct sk_buff *skb);
|
||||
/* private: not for endpoint use */
|
||||
struct qrtr_node *node;
|
||||
bool in_thread;
|
||||
};
|
||||
|
||||
int qrtr_endpoint_register(struct qrtr_endpoint *ep, unsigned int nid);
|
||||
/**
|
||||
* struct qrtr_array - array with size
|
||||
* @arr: elements in the array
|
||||
* @size: number of elements
|
||||
*
|
||||
* An array with its size provided.
|
||||
*/
|
||||
struct qrtr_array {
|
||||
u32 *arr;
|
||||
size_t size;
|
||||
};
|
||||
|
||||
int qrtr_endpoint_register(struct qrtr_endpoint *ep, unsigned int net_id,
|
||||
bool rt, struct qrtr_array *no_wake);
|
||||
|
||||
void qrtr_endpoint_unregister(struct qrtr_endpoint *ep);
|
||||
|
||||
@@ -33,4 +51,9 @@ int qrtr_ns_init(void);
|
||||
|
||||
void qrtr_ns_remove(void);
|
||||
|
||||
int qrtr_peek_pkt_size(const void *data);
|
||||
|
||||
int qrtr_get_service_id(unsigned int node_id, unsigned int port_id);
|
||||
|
||||
void qrtr_print_wakeup_reason(const void *data);
|
||||
#endif
|
||||
|
@@ -2,11 +2,14 @@
|
||||
/*
|
||||
* Copyright (c) 2015, Sony Mobile Communications Inc.
|
||||
* Copyright (c) 2013, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/rpmsg.h>
|
||||
#include <linux/rpmsg/qcom_glink.h>
|
||||
#include <linux/of.h>
|
||||
|
||||
#include "qrtr.h"
|
||||
|
||||
@@ -21,18 +24,27 @@ static int qcom_smd_qrtr_callback(struct rpmsg_device *rpdev,
|
||||
void *data, int len, void *priv, u32 addr)
|
||||
{
|
||||
struct qrtr_smd_dev *qdev = dev_get_drvdata(&rpdev->dev);
|
||||
unsigned char *bytedata = (unsigned char *)data;
|
||||
int rc;
|
||||
|
||||
if (!qdev)
|
||||
if (!qdev) {
|
||||
pr_err_ratelimited("%s: Not ready\n", __func__);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
rc = qrtr_endpoint_post(&qdev->ep, data, len);
|
||||
if (rc == -EINVAL) {
|
||||
dev_err(qdev->dev, "invalid ipcrouter packet\n");
|
||||
dev_err(qdev->dev, "invalid ipcrouter packet, len=%d\n", len);
|
||||
if (len > 0)
|
||||
print_hex_dump(KERN_INFO, "invliad package : ", DUMP_PREFIX_ADDRESS, 16, 1,
|
||||
bytedata, len, false);
|
||||
/* return 0 to let smd drop the packet */
|
||||
rc = 0;
|
||||
}
|
||||
|
||||
if (qcom_glink_is_wakeup(true))
|
||||
qrtr_print_wakeup_reason(data);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
@@ -58,7 +70,11 @@ out:
|
||||
|
||||
static int qcom_smd_qrtr_probe(struct rpmsg_device *rpdev)
|
||||
{
|
||||
struct qrtr_array svc_arr = {NULL, 0};
|
||||
struct qrtr_smd_dev *qdev;
|
||||
u32 net_id;
|
||||
int size;
|
||||
bool rt;
|
||||
int rc;
|
||||
|
||||
qdev = devm_kzalloc(&rpdev->dev, sizeof(*qdev), GFP_KERNEL);
|
||||
@@ -69,13 +85,36 @@ static int qcom_smd_qrtr_probe(struct rpmsg_device *rpdev)
|
||||
qdev->dev = &rpdev->dev;
|
||||
qdev->ep.xmit = qcom_smd_qrtr_send;
|
||||
|
||||
rc = qrtr_endpoint_register(&qdev->ep, QRTR_EP_NID_AUTO);
|
||||
if (rc)
|
||||
/* data callback runs in threaded context */
|
||||
qdev->ep.in_thread = true;
|
||||
|
||||
rc = of_property_read_u32(rpdev->dev.of_node, "qcom,net-id", &net_id);
|
||||
if (rc < 0)
|
||||
net_id = QRTR_EP_NET_ID_AUTO;
|
||||
|
||||
rt = of_property_read_bool(rpdev->dev.of_node, "qcom,low-latency");
|
||||
|
||||
size = of_property_count_u32_elems(rpdev->dev.of_node, "qcom,no-wake-svc");
|
||||
if (size > 0) {
|
||||
svc_arr.size = size;
|
||||
svc_arr.arr = kmalloc_array(size, sizeof(u32), GFP_KERNEL);
|
||||
if (!svc_arr.arr)
|
||||
return -ENOMEM;
|
||||
|
||||
of_property_read_u32_array(rpdev->dev.of_node, "qcom,no-wake-svc",
|
||||
svc_arr.arr, size);
|
||||
}
|
||||
|
||||
rc = qrtr_endpoint_register(&qdev->ep, net_id, rt, &svc_arr);
|
||||
kfree(svc_arr.arr);
|
||||
if (rc) {
|
||||
dev_err(qdev->dev, "endpoint register failed: %d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
dev_set_drvdata(&rpdev->dev, qdev);
|
||||
|
||||
dev_dbg(&rpdev->dev, "Qualcomm SMD QRTR driver probed\n");
|
||||
pr_debug("SMD QRTR driver probed\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@@ -44,7 +44,7 @@ static int qrtr_tun_open(struct inode *inode, struct file *filp)
|
||||
|
||||
filp->private_data = tun;
|
||||
|
||||
ret = qrtr_endpoint_register(&tun->ep, QRTR_EP_NID_AUTO);
|
||||
ret = qrtr_endpoint_register(&tun->ep, QRTR_EP_NET_ID_AUTO, 0, NULL);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
|
Reference in New Issue
Block a user