Add samsung specific changes

This commit is contained in:
2025-08-11 14:29:00 +02:00
parent c66122e619
commit 4d134a1294
2688 changed files with 1127995 additions and 11475 deletions

View File

@@ -119,11 +119,6 @@
#define SO_DETACH_REUSEPORT_BPF 68
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA {
#define SO_SET_DOMAIN_NAME 1000
#define SO_SET_DNS_UID 1001
#define SO_SET_DNS_PID 1002
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_NPA }
#define SO_PREFER_BUSY_POLL 69
#define SO_BUSY_POLL_BUDGET 70

View File

@@ -0,0 +1,97 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _UAPI_ESOC_CTRL_H_
#define _UAPI_ESOC_CTRL_H_
#include <linux/types.h>
enum esoc_client_hook_prio {
ESOC_MHI_HOOK,
ESOC_MAX_HOOKS
};
struct esoc_link_data {
enum esoc_client_hook_prio prio;
__u64 link_id;
};
#define ESOC_CODE 0xCC
#define ESOC_CMD_EXE _IOW(ESOC_CODE, 1, unsigned int)
#define ESOC_WAIT_FOR_REQ _IOR(ESOC_CODE, 2, unsigned int)
#define ESOC_NOTIFY _IOW(ESOC_CODE, 3, unsigned int)
#define ESOC_GET_STATUS _IOR(ESOC_CODE, 4, unsigned int)
#define ESOC_GET_ERR_FATAL _IOR(ESOC_CODE, 5, unsigned int)
#define ESOC_WAIT_FOR_CRASH _IOR(ESOC_CODE, 6, unsigned int)
#define ESOC_REG_REQ_ENG _IO(ESOC_CODE, 7)
#define ESOC_REG_CMD_ENG _IO(ESOC_CODE, 8)
#define ESOC_GET_LINK_ID _IOWR(ESOC_CODE, 9, struct esoc_link_data)
#define ESOC_SET_BOOT_FAIL_ACT _IOW(ESOC_CODE, 10, unsigned int)
#define ESOC_SET_N_PON_TRIES _IOW(ESOC_CODE, 11, unsigned int)
#define ESOC_REQ_SEND_SHUTDOWN ESOC_REQ_SEND_SHUTDOWN
#define ESOC_REQ_CRASH_SHUTDOWN ESOC_REQ_CRASH_SHUTDOWN
#define ESOC_PON_RETRY ESOC_PON_RETRY
#define ESOC_BOOT_FAIL_ACTION
enum esoc_boot_fail_action {
BOOT_FAIL_ACTION_RETRY,
BOOT_FAIL_ACTION_COLD_RESET,
BOOT_FAIL_ACTION_SHUTDOWN,
BOOT_FAIL_ACTION_PANIC,
BOOT_FAIL_ACTION_NOP,
BOOT_FAIL_ACTION_S3_RESET,
BOOT_FAIL_ACTION_LAST,
};
enum esoc_evt {
ESOC_RUN_STATE = 0x1,
ESOC_UNEXPECTED_RESET,
ESOC_ERR_FATAL,
ESOC_IN_DEBUG,
ESOC_REQ_ENG_ON,
ESOC_REQ_ENG_OFF,
ESOC_CMD_ENG_ON,
ESOC_CMD_ENG_OFF,
ESOC_INVALID_STATE,
ESOC_RETRY_PON_EVT,
ESOC_BOOT_STATE,
};
enum esoc_cmd {
ESOC_PWR_ON = 1,
ESOC_PWR_OFF,
ESOC_FORCE_PWR_OFF,
ESOC_RESET,
ESOC_PREPARE_DEBUG,
ESOC_EXE_DEBUG,
ESOC_EXIT_DEBUG,
};
enum esoc_notify {
ESOC_IMG_XFER_DONE = 1,
ESOC_BOOT_DONE,
ESOC_BOOT_FAIL,
ESOC_IMG_XFER_RETRY,
ESOC_IMG_XFER_FAIL,
ESOC_UPGRADE_AVAILABLE,
ESOC_DEBUG_DONE,
ESOC_DEBUG_FAIL,
ESOC_PRIMARY_CRASH,
ESOC_PRIMARY_REBOOT,
ESOC_PON_RETRY,
};
enum esoc_req {
ESOC_REQ_IMG = 1,
ESOC_REQ_DEBUG,
ESOC_REQ_SHUTDOWN,
ESOC_REQ_SEND_SHUTDOWN,
ESOC_REQ_CRASH_SHUTDOWN,
};
#endif

View File

@@ -45,8 +45,6 @@
#define F2FS_IOC_START_ATOMIC_REPLACE _IO(F2FS_IOCTL_MAGIC, 25)
#define F2FS_IOC_GET_VALID_NODE_COUNT _IOR(F2FS_IOCTL_MAGIC, 32, __u32)
#define F2FS_IOC_STAT_COMPRESS_FILE _IOWR(F2FS_IOCTL_MAGIC, 33, \
struct f2fs_sec_stat_compfile)
#define F2FS_IOC_SET_RELIABLE_WRITE _IO(F2FS_IOCTL_MAGIC, 34)
/*
@@ -101,20 +99,4 @@ struct f2fs_comp_option {
__u8 log_cluster_size;
};
struct f2fs_sec_stat_compfile {
union {
struct {
__u32 in_init:1;
__u32 in_scan:1;
__u32 in_commit:1;
__u32 in_reserved:13;
__u32 out_compressed:1;
__u32 out_reserved:15;
};
__u32 flags;
};
__u64 st_blocks;
__u64 st_compressed_blocks;
};
#endif /* _UAPI_LINUX_F2FS_H */

View File

@@ -216,12 +216,6 @@ struct fsxattr {
#define FS_IOC_GETFSLABEL _IOR(0x94, 49, char[FSLABEL_MAX])
#define FS_IOC_SETFSLABEL _IOW(0x94, 50, char[FSLABEL_MAX])
/** KNOX DDAR */
#define FS_IOC_GET_DD_POLICY _IO('P', 0x00)
#define FS_IOC_SET_DD_POLICY _IO('P', 0x01)
#define FS_IOC_GET_DD_INODE_COUNT _IOR('P', 0x02, long)
#define FS_IOC_HAS_DD_POLICY _IO('P', 0x03) /* KNOX_SUPPORT_DAR_DUAL_DO */
/*
* Inode flags (FS_IOC_GETFLAGS / FS_IOC_SETFLAGS)
*

View File

@@ -0,0 +1,118 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _UAPI_LINUX_VIRTIO_BACKEND_H
#define _UAPI_LINUX_VIRTIO_BACKEND_H
#include <linux/virtio_types.h>
#define VIRTIO_BE_IOC_MAGIC 0xBC
#define VBE_ASSIGN_IOEVENTFD 1
#define VBE_DEASSIGN_IOEVENTFD 2
#define VBE_ASSIGN_IRQFD 1
#define VBE_DEASSIGN_IRQFD 2
#define EVENT_NEW_BUFFER 1
#define EVENT_RESET_RQST 2
#define EVENT_INTERRUPT_ACK 4
#define EVENT_DRIVER_OK 8
#define EVENT_DRIVER_FAILED 0x10
#define EVENT_MODULE_EXIT 0x20
#define EVENT_VM_EXIT 0x40
#define EVENT_APP_EXIT 0x100
/*
* IOCTLs supported by virtio backend driver
*/
#define GH_GET_SHARED_MEMORY_SIZE _IOR(VIRTIO_BE_IOC_MAGIC, 1, __u64)
#define GH_IOEVENTFD _IOW(VIRTIO_BE_IOC_MAGIC, 2, \
struct virtio_eventfd)
#define GH_IRQFD _IOW(VIRTIO_BE_IOC_MAGIC, 3, \
struct virtio_irqfd)
#define GH_WAIT_FOR_EVENT _IOWR(VIRTIO_BE_IOC_MAGIC, 4, \
struct virtio_event)
#define GH_SET_DEVICE_FEATURES _IOW(VIRTIO_BE_IOC_MAGIC, 5, \
struct virtio_dev_features)
#define GH_SET_QUEUE_NUM_MAX _IOW(VIRTIO_BE_IOC_MAGIC, 6, \
struct virtio_queue_max)
#define GH_SET_DEVICE_CONFIG_DATA _IOW(VIRTIO_BE_IOC_MAGIC, 7, \
struct virtio_config_data)
#define GH_GET_DRIVER_CONFIG_DATA _IOWR(VIRTIO_BE_IOC_MAGIC, 8, \
struct virtio_config_data)
#define GH_GET_QUEUE_INFO _IOWR(VIRTIO_BE_IOC_MAGIC, 9, \
struct virtio_queue_info)
#define GH_GET_DRIVER_FEATURES _IOWR(VIRTIO_BE_IOC_MAGIC, 10, \
struct virtio_driver_features)
#define GH_ACK_DRIVER_OK _IOWR(VIRTIO_BE_IOC_MAGIC, 11, __u32)
#define GH_SET_APP_READY _IO(VIRTIO_BE_IOC_MAGIC, 12)
#define GH_ACK_RESET _IOW(VIRTIO_BE_IOC_MAGIC, 13, struct virtio_ack_reset)
struct virtio_ack_reset {
__u32 label;
__u32 reserved;
};
struct virtio_driver_features {
__u32 label;
__u32 reserved;
__u32 features_sel;
__u32 features;
};
struct virtio_queue_info {
__u32 label;
__u32 queue_sel;
__u32 queue_num;
__u32 queue_ready;
__u64 queue_desc;
__u64 queue_driver;
__u64 queue_device;
};
struct virtio_config_data {
__u32 label;
__u32 config_size;
__u64 config_data;
};
struct virtio_dev_features {
__u32 label;
__u32 reserved;
__u32 features_sel;
__u32 features;
};
struct virtio_queue_max {
__u32 label;
__u32 reserved;
__u32 queue_sel;
__u32 queue_num_max;
};
struct virtio_event {
__u32 label;
__u32 event;
__u32 event_data;
__u32 reserved;
};
struct virtio_eventfd {
__u32 label;
__u32 flags;
__u32 queue_num;
__s32 fd;
};
struct virtio_irqfd {
__u32 label;
__u32 flags;
__s32 fd;
__u32 reserved;
};
#endif /* _UAPI_LINUX_VIRTIO_BACKEND_H */

View File

@@ -0,0 +1,372 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _UAPI_LINUX_GUNYAH
#define _UAPI_LINUX_GUNYAH
/*
* Userspace interface for /dev/gunyah - gunyah based virtual machine
*
* Note: this interface is considered experimental and may change without
* notice.
*/
#include <linux/types.h>
#include <linux/ioctl.h>
#include <linux/virtio_types.h>
#define GH_IOCTL_TYPE 0xB2
/*
* fw_name is used to find the secure VM image by name to be loaded.
*/
#define GH_VM_FW_NAME_MAX 16
/** @struct gh_fw_name
* A structure to be passed to GH_VM_SET_FM_NAME ioctl
* @name - name of the secure VM image
*/
struct gh_fw_name {
char name[GH_VM_FW_NAME_MAX];
};
/** @struct gh_userspace_memory_region
* A structure to be passed to GH_VM_SET_USER_MEM_REGION ioctl
* @memory_size - size of userspace memory address
* @userspace_addr - the userspace memory address
* @fw_name - name of the secure VM image
*/
struct gh_userspace_memory_region {
__u64 memory_size;
__u64 userspace_addr;
struct gh_fw_name fw_name;
};
#define VBE_ASSIGN_IOEVENTFD 1
#define VBE_DEASSIGN_IOEVENTFD 2
#define VBE_ASSIGN_IRQFD 1
#define VBE_DEASSIGN_IRQFD 2
#define EVENT_NEW_BUFFER 1
#define EVENT_RESET_RQST 2
#define EVENT_INTERRUPT_ACK 4
#define EVENT_DRIVER_OK 8
#define EVENT_DRIVER_FAILED 0x10
#define EVENT_MODULE_EXIT 0x20
#define EVENT_VM_EXIT 0x40
#define EVENT_APP_EXIT 0x100
/*
* gh_vm_exit_reasons specifies the various reasons why
* the secondary VM ended its execution. VCPU_RUN returns these values
* to userspace.
*/
#define GH_VM_EXIT_REASON_UNKNOWN 0
#define GH_VM_EXIT_REASON_SHUTDOWN 1
#define GH_VM_EXIT_REASON_RESTART 2
#define GH_VM_EXIT_REASON_PANIC 3
#define GH_VM_EXIT_REASON_NSWD 4
#define GH_VM_EXIT_REASON_HYP_ERROR 5
#define GH_VM_EXIT_REASON_ASYNC_EXT_ABORT 6
#define GH_VM_EXIT_REASON_FORCE_STOPPED 7
#define GH_VM_EXIT_REASONS_MAX 8
/*
* ioctls for /dev/gunyah fds:
*/
/**
* GH_CREATE_VM - Driver creates a VM sepecific structure. An anon file is
* also created per VM. This would be the first IOCTL made
* on /dev/gunyah node to obtain a per VM fd for futher
* VM specific operations like VCPU creation, memory etc.
*
* Return: an fd for the per VM file created, -errno on failure
*/
#define GH_CREATE_VM _IO(GH_IOCTL_TYPE, 0x01)
/*
* ioctls for VM fd.
*/
/**
* GH_CREATE_VCPU - Driver creates a VCPU sepecific structure. It takes
* vcpu id as the input. This also creates an anon file
* per vcpu which is used for further vcpu specific
* operations.
*
* Return: an fd for the per VCPU file created, -errno on failure
*/
#define GH_CREATE_VCPU _IO(GH_IOCTL_TYPE, 0x40)
/*
* ioctls for VM properties
*/
/**
* GH_VM_SET_FW_NAME - Userspace will specify the name of the firmware
* image that needs to be loaded into VM's memory
* after authentication. The loaded VM memory details
* are forwarded to Gunyah Hypervisor underneath.
*
* Input: gh_fw_name structure with Secure VM name as name attribute of
* the struct.
* Return: 0 if success, -errno on failure
*/
#define GH_VM_SET_FW_NAME _IOW(GH_IOCTL_TYPE, 0x41, struct gh_fw_name)
/**
* GH_VM_GET_FW_NAME - Userspace can use this IOCTL to query the name of
* the secure VM image that was loaded.
*
* Input: gh_fw_name structure to be filled with Secure VM name as the
* name attribute of the struct.
* Return: 0 if success and firmware name in struct fw_name that
* represents the firmware image name currently associated with
* the VM if a call to GH_VM_SET_FW_NAME ioctl previously was
* successful, -errno on failure
*/
#define GH_VM_GET_FW_NAME _IOR(GH_IOCTL_TYPE, 0x42, struct gh_fw_name)
/**
* GH_VM_GET_VCPU_COUNT - Userspace can use this IOCTL to query the number
* of vcpus that are supported for the VM. Userspace
* can further use this count to create VCPUs.
*
* Return: nr_vcpus for proxy scheduled VMs, 1 for hypervisor scheduled VMs,
* -errno on failure
*/
#define GH_VM_GET_VCPU_COUNT _IO(GH_IOCTL_TYPE, 0x43)
/**
* GH_VM_GET_FW_RESV_MEM_SIZE - Userspace can use this IOCTL to query the CMA or
* carve out memory size of the VM.
*
* Input: gh_fw_name structure to be filled with Secure VM name as the
* name attribute of the struct.
*
* Return: 0 if success with memory size as u64 in the third argument,
* -errno on failure
*/
#define GH_VM_GET_FW_RESV_MEM_SIZE _IOW(GH_IOCTL_TYPE, 0x44, struct gh_fw_name)
/**
* GH_VM_SET_FW_USER_MEM_REGION - Userspace will specify the userspace memory
* address and size that will to be appended to VM system memory
* The loaded VM memory details are forwarded to Gunyah Hypervisor
* underneath.
*
* Input: gh_userspace_memory_region structure for userspace memory descripion
* Return: 0 if success, -errno on failure
*/
#define GH_VM_SET_FW_USER_MEM_REGION _IOW(GH_IOCTL_TYPE, 0x45, \
struct gh_userspace_memory_region)
/*
* IOCTLs supported by virtio backend driver
*/
/**
* GH_GET_SHARED_MEMORY_SIZE - Userspace can use this IOCTL to query the virtio
* shared memory size of the VM. Userpsace can use
* it for mmap.
*
* Input: 64 bit unsigned integer variable to be filled with shared memory size.
*
* Return: 0 if success with shared memory size as u64 in the third argument,
* -errno on failure
*/
#define GH_GET_SHARED_MEMORY_SIZE _IOR(GH_IOCTL_TYPE, 0x61, __u64)
/**
* GH_IOEVENTFD - Eventfd created in userspace is passed to kernel using this
* ioctl. Userspace is signalled by virtio backend driver through
* this fd when data is available in the ring.
*
* Input: virtio_eventfd structure with required attributes.
*
* Return: 0 if success, -errno on failure
*/
#define GH_IOEVENTFD _IOW(GH_IOCTL_TYPE, 0x62, \
struct virtio_eventfd)
/**
* GH_IRQFD - Eventfd created in userspace is passed to kernel using this ioctl.
* Virtio backned driver is signalled by userspace using this fd when
* the ring is serviced.
*
* Input: virtio_irqfd structure with required attributes.
*
* Return: 0 if success, -errno on failure
*/
#define GH_IRQFD _IOW(GH_IOCTL_TYPE, 0x63, \
struct virtio_irqfd)
/**
* GH_WAIT_FOR_EVENT - Userspace waits for events from the virtio backend driver
* for indefinite time. For example when hypervisor detects
* a DRIVER_OK event, it is passed to userspace using this
* ioctl.
*
* Input: virtio_event structure with required attributes.
*
* Return: 0 if success, with the event data in struct virtio_event
* -errno on failure
*/
#define GH_WAIT_FOR_EVENT _IOWR(GH_IOCTL_TYPE, 0x64, \
struct virtio_event)
/**
* GH_SET_DEVICE_FEATURES - This ioctl writes virtio device features supported
* by the userspace to a page that is shared with
* guest VM.
*
* Input: virtio_dev_features structure with required attributes.
*
* Return: 0 if success, -errno on failure
*/
#define GH_SET_DEVICE_FEATURES _IOW(GH_IOCTL_TYPE, 0x65, \
struct virtio_dev_features)
/**
* GH_SET_QUEUE_NUM_MAX - This ioctl writes max virtio queue size supported by
* the userspace to a page that is shared with guest VM.
*
* Input: virtio_queue_max structure with required attributes.
*
* Return: 0 if success, -errno on failure
*/
#define GH_SET_QUEUE_NUM_MAX _IOW(GH_IOCTL_TYPE, 0x66, \
struct virtio_queue_max)
/**
* GH_SET_DEVICE_CONFIG_DATA - This ioctl writes device configuration data
* to a page that is shared with guest VM.
*
* Input: virtio_config_data structure with required attributes.
*
* Return: 0 if success, -errno on failure
*/
#define GH_SET_DEVICE_CONFIG_DATA _IOW(GH_IOCTL_TYPE, 0x67, \
struct virtio_config_data)
/**
* GH_GET_DRIVER_CONFIG_DATA - This ioctl reads the driver supported virtio
* device configuration data from a page that is
* shared with guest VM.
*
* Input: virtio_config_data structure with required attributes.
*
* Return: 0 if success with driver config data in struct virtio_config_data,
* -errno on failure
*/
#define GH_GET_DRIVER_CONFIG_DATA _IOWR(GH_IOCTL_TYPE, 0x68, \
struct virtio_config_data)
/**
* GH_GET_QUEUE_INFO - This ioctl reads the driver supported virtqueue info from
* a page that is shared with guest VM.
*
* Input: virtio_queue_info structure with required attributes.
*
* Return: 0 if success with virtqueue info in struct virtio_queue_info,
* -errno on failure
*/
#define GH_GET_QUEUE_INFO _IOWR(GH_IOCTL_TYPE, 0x69, \
struct virtio_queue_info)
/**
* GH_GET_DRIVER_FEATURES - This ioctl reads the driver supported features from
* a page that is shared with guest VM.
*
* Input: virtio_driver_features structure with required attributes.
*
* Return: 0 if success with driver features in struct virtio_driver_features,
* -errno on failure
*/
#define GH_GET_DRIVER_FEATURES _IOWR(GH_IOCTL_TYPE, 0x6a, \
struct virtio_driver_features)
/**
* GH_ACK_DRIVER_OK - This ioctl acknowledges the DRIVER_OK event from virtio
* backend driver.
*
* Input: 32 bit unsigned integer virtio device label.
*
* Return: 0 if success, -errno on failure
*/
#define GH_ACK_DRIVER_OK _IOWR(GH_IOCTL_TYPE, 0x6b, __u32)
/**
* GH_ACK_RESET - This ioctl acknowledges the RESET event from virtio
* backend driver.
*
* Input: virtio_ack_reset structure with required attributes.
*
* Return: 0 if success, -errno on failure
*/
#define GH_ACK_RESET _IOW(GH_IOCTL_TYPE, 0x6d, struct virtio_ack_reset)
/*
* ioctls for vcpu fd.
*/
/**
* GH_VCPU_RUN - This command is used to run the vcpus created. VCPU_RUN
* is called on vcpu fd created previously. VCPUs are
* started individually if proxy scheduling is chosen as the
* scheduling policy and vcpus are started simultaneously
* in case of VMs whose scheduling is controlled by the
* hypervisor. In the latter case, VCPU_RUN is blocked
* until the VM terminates.
*
* Return: Reason for vm termination, -errno on failure
*/
#define GH_VCPU_RUN _IO(GH_IOCTL_TYPE, 0x80)
struct virtio_ack_reset {
__u32 label;
__u32 reserved;
};
struct virtio_driver_features {
__u32 label;
__u32 reserved;
__u32 features_sel;
__u32 features;
};
struct virtio_queue_info {
__u32 label;
__u32 queue_sel;
__u32 queue_num;
__u32 queue_ready;
__u64 queue_desc;
__u64 queue_driver;
__u64 queue_device;
};
struct virtio_config_data {
__u32 label;
__u32 config_size;
__u64 config_data;
};
struct virtio_dev_features {
__u32 label;
__u32 reserved;
__u32 features_sel;
__u32 features;
};
struct virtio_queue_max {
__u32 label;
__u32 reserved;
__u32 queue_sel;
__u32 queue_num_max;
};
struct virtio_event {
__u32 label;
__u32 event;
__u32 event_data;
__u32 reserved;
};
struct virtio_eventfd {
__u32 label;
__u32 flags;
__u32 queue_num;
__s32 fd;
};
struct virtio_irqfd {
__u32 label;
__u32 flags;
__s32 fd;
__u32 reserved;
};
#endif /* _UAPI_LINUX_GUNYAH */

View File

@@ -93,15 +93,6 @@
#define TUN_F_USO4 0x20 /* I can handle USO for IPv4 packets */
#define TUN_F_USO6 0x40 /* I can handle USO for IPv6 packets */
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_VPN {
#define TUN_META_HDR 0x0020
#define TUNGETMETAPARAM _IOR('T', 218, int)
#define IFF_META_HDR 0x0004
#define TUN_GET_META_HDR_SZ 0
#define TUN_GET_META_MARK_OFFSET 1
#define DEFAULT_IHL 5
// SEC_PRODUCT_FEATURE_KNOX_SUPPORT_VPN }
/* Protocol info prepended to the packets (when IFF_NO_PI is not set) */
#define TUN_PKT_STRIP 0x0001
struct tun_pi {

View File

@@ -338,8 +338,6 @@
#define KEY_MICMUTE 248 /* Mute / unmute the microphone */
#define KEY_RECENT 254
/* Code 255 is reserved for special needs of AT keyboard driver */
#define BTN_MISC 0x100
@@ -399,7 +397,6 @@
#define BTN_MODE 0x13c
#define BTN_THUMBL 0x13d
#define BTN_THUMBR 0x13e
#define BTN_GAME 0x13f
#define BTN_DIGI 0x140
#define BTN_TOOL_PEN 0x140
@@ -807,11 +804,6 @@
#define BTN_TRIGGER_HAPPY39 0x2e6
#define BTN_TRIGGER_HAPPY40 0x2e7
#define KEY_DEX_ON 0x2bd
#define BTN_HOTKEY_APP1 0x2f5
#define BTN_HOTKEY_APP2 0x2f6
#define BTN_HOTKEY_APP3 0x2f7
/* We avoid low common keys in module aliases so they don't get huge. */
#define KEY_MIN_INTERESTING KEY_MUTE
#define KEY_MAX 0x2ff

150
include/uapi/linux/ion.h Normal file
View File

@@ -0,0 +1,150 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* drivers/staging/android/uapi/ion.h
*
* Copyright (C) 2011 Google, Inc.
*/
#ifndef _UAPI_LINUX_ION_H
#define _UAPI_LINUX_ION_H
#include <linux/ioctl.h>
#include <linux/types.h>
/**
* ion_heap_types - list of all possible types of heaps that Android can use
*
* @ION_HEAP_TYPE_SYSTEM: Reserved heap id for ion heap that allocates
* memory using alloc_page(). Also, supports
* deferred free and allocation pools.
* @ION_HEAP_TYPE_DMA: Reserved heap id for ion heap that manages
* single CMA (contiguous memory allocator)
* region. Uses standard DMA APIs for
* managing memory within the CMA region.
*/
enum ion_heap_type {
ION_HEAP_TYPE_SYSTEM = 0,
ION_HEAP_TYPE_DMA = 2,
/* reserved range for future standard heap types */
ION_HEAP_TYPE_CUSTOM = 16,
ION_HEAP_TYPE_MAX = 31,
};
/**
* ion_heap_id - list of standard heap ids that Android can use
*
* @ION_HEAP_SYSTEM Id for the ION_HEAP_TYPE_SYSTEM
* @ION_HEAP_DMA_START Start of reserved id range for heaps of type
* ION_HEAP_TYPE_DMA
* @ION_HEAP_DMA_END End of reserved id range for heaps of type
* ION_HEAP_TYPE_DMA
* @ION_HEAP_CUSTOM_START Start of reserved id range for heaps of custom
* type
* @ION_HEAP_CUSTOM_END End of reserved id range for heaps of custom
* type
*/
enum ion_heap_id {
ION_HEAP_SYSTEM = (1 << ION_HEAP_TYPE_SYSTEM),
ION_HEAP_DMA_START = (ION_HEAP_SYSTEM << 1),
ION_HEAP_DMA_END = (ION_HEAP_DMA_START << 7),
ION_HEAP_CUSTOM_START = (ION_HEAP_DMA_END << 1),
ION_HEAP_CUSTOM_END = (ION_HEAP_CUSTOM_START << 22),
};
#define ION_NUM_MAX_HEAPS (32)
/**
* allocation flags - the lower 16 bits are used by core ion, the upper 16
* bits are reserved for use by the heaps themselves.
*/
/*
* mappings of this buffer should be cached, ion will do cache maintenance
* when the buffer is mapped for dma
*/
#define ION_FLAG_CACHED 1
/**
* DOC: Ion Userspace API
*
* create a client by opening /dev/ion
* most operations handled via following ioctls
*
*/
/**
* struct ion_allocation_data - metadata passed from userspace for allocations
* @len: size of the allocation
* @heap_id_mask: mask of heap ids to allocate from
* @flags: flags passed to heap
* @handle: pointer that will be populated with a cookie to use to
* refer to this allocation
*
* Provided by userspace as an argument to the ioctl
*/
struct ion_allocation_data {
__u64 len;
__u32 heap_id_mask;
__u32 flags;
__u32 fd;
__u32 unused;
};
#define MAX_HEAP_NAME 32
/**
* struct ion_heap_data - data about a heap
* @name - first 32 characters of the heap name
* @type - heap type
* @heap_id - heap id for the heap
*/
struct ion_heap_data {
char name[MAX_HEAP_NAME];
__u32 type;
__u32 heap_id;
__u32 reserved0;
__u32 reserved1;
__u32 reserved2;
};
/**
* struct ion_heap_query - collection of data about all heaps
* @cnt - total number of heaps to be copied
* @heaps - buffer to copy heap data
*/
struct ion_heap_query {
__u32 cnt; /* Total number of heaps to be copied */
__u32 reserved0; /* align to 64bits */
__u64 heaps; /* buffer to be populated */
__u32 reserved1;
__u32 reserved2;
};
#define ION_IOC_MAGIC 'I'
/**
* DOC: ION_IOC_ALLOC - allocate memory
*
* Takes an ion_allocation_data struct and returns it with the handle field
* populated with the opaque handle for the allocation.
*/
#define ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \
struct ion_allocation_data)
/**
* DOC: ION_IOC_HEAP_QUERY - information about available heaps
*
* Takes an ion_heap_query structure and populates information about
* available Ion heaps.
*/
#define ION_IOC_HEAP_QUERY _IOWR(ION_IOC_MAGIC, 8, \
struct ion_heap_query)
/**
* DOC: ION_IOC_HEAP_ABI_VERSION - return ABI version
*
* Returns ABI version for this driver
*/
#define ION_IOC_ABI_VERSION _IOR(ION_IOC_MAGIC, 9, \
__u32)
#endif /* _UAPI_LINUX_ION_H */

View File

@@ -0,0 +1,252 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _UAPI_LINUX_MEM_BUF_H
#define _UAPI_LINUX_MEM_BUF_H
#include <linux/ioctl.h>
#include <linux/types.h>
#define MEM_BUF_IOC_MAGIC 'M'
/**
* enum mem_buf_mem_type: Types of memory that can be allocated from and to
* @MEM_BUF_ION_MEM_TYPE: The memory for the source or destination is ION memory
*/
enum mem_buf_mem_type {
MEM_BUF_ION_MEM_TYPE,
MEM_BUF_MAX_MEM_TYPE,
};
#define MEM_BUF_DMAHEAP_MEM_TYPE (MEM_BUF_ION_MEM_TYPE + 1)
/* RESERVED for MEM_BUF_BUDDY_MEM_TYPE: MEM_BUF_ION_MEM_TYPE + 2 */
/* The mem-buf values that represent VMIDs for an ACL. */
#define MEM_BUF_VMID_PRIMARY_VM 0
#define MEM_BUF_VMID_TRUSTED_VM 1
#define MEM_BUF_PERM_FLAG_READ (1U << 0)
#define MEM_BUF_PERM_FLAG_WRITE (1U << 1)
#define MEM_BUF_PERM_FLAG_EXEC (1U << 2)
#define MEM_BUF_PERM_VALID_FLAGS\
(MEM_BUF_PERM_FLAG_READ | MEM_BUF_PERM_FLAG_WRITE |\
MEM_BUF_PERM_FLAG_EXEC)
#define MEM_BUF_MAX_NR_ACL_ENTS 16
/**
* struct acl_entry: Represents the access control permissions for a VMID.
* @vmid: The mem-buf VMID specifier associated with the VMID that will access
* the memory.
* @perms: The access permissions for the VMID in @vmid. This flag is
* interpreted as a bitmap, and thus, should be a combination of one or more
* of the MEM_BUF_PERM_FLAG_* flags.
*/
struct acl_entry {
__u32 vmid;
__u32 perms;
};
/**
* struct mem_buf_ion_data: Data that is unique to memory that is of type
* MEM_BUF_ION_MEM_TYPE.
* @heap_id: The heap ID of where memory should be allocated from or added to.
*/
struct mem_buf_ion_data {
__u32 heap_id;
};
#define MEM_BUF_MAX_DMAHEAP_NAME_LEN 128
/**
* struct mem_buf_dmaheap_data: Data that is unique to memory that is of type
* MEM_BUF_DMAHEAP_MEM_TYPE.
* @heap_name: array of characters containing the heap name.
*/
struct mem_buf_dmaheap_data {
__u64 heap_name;
};
/**
* struct mem_buf_alloc_ioctl_arg: A request to allocate memory from another
* VM to other VMs.
* @size: The size of the allocation.
* @acl_list: An array of structures, where each structure specifies a VMID
* and the access permissions that the VMID will have to the memory to be
* allocated.
* @nr_acl_entries: The number of ACL entries in @acl_list.
* @src_mem_type: The type of memory that the source VM should allocate from.
* This should be one of the mem_buf_mem_type enum values.
* @src_data: A pointer to data that the source VM should interpret when
* performing the allocation.
* @dst_mem_type: The type of memory that the destination VM should treat the
* incoming allocation from the source VM as. This should be one of the
* mem_buf_mem_type enum values.
* @mem_buf_fd: A file descriptor representing the memory that was allocated
* from the source VM and added to the current VM. Calling close() on this file
* descriptor will deallocate the memory from the current VM, and return it
* to the source VM.
* * @dst_data: A pointer to data that the destination VM should interpret when
* adding the memory to the current VM.
*
* All reserved fields must be zeroed out by the caller prior to invoking the
* allocation IOCTL command with this argument.
*/
struct mem_buf_alloc_ioctl_arg {
__u64 size;
__u64 acl_list;
__u32 nr_acl_entries;
__u32 src_mem_type;
__u64 src_data;
__u32 dst_mem_type;
__u32 mem_buf_fd;
__u64 dst_data;
__u64 reserved0;
__u64 reserved1;
__u64 reserved2;
};
#define MEM_BUF_IOC_ALLOC _IOWR(MEM_BUF_IOC_MAGIC, 0,\
struct mem_buf_alloc_ioctl_arg)
/**
* struct mem_buf_lend_ioctl_arg: A request to lend memory from the local VM
* VM to one or more remote VMs.
* @dma_buf_fd: The fd of the dma-buf that will be exported to another VM.
* @nr_acl_entries: The number of ACL entries in @acl_list.
* @acl_list: An array of structures, where each structure specifies a VMID
* and the access permissions that the VMID will have to the memory to be
* exported. Must not include the local VMID.
* @memparcel_hdl: The handle associated with the memparcel that was created by
* granting access to the dma-buf for the VMIDs specified in @acl_list.
*
* All reserved fields must be zeroed out by the caller prior to invoking the
* import IOCTL command with this argument.
*/
struct mem_buf_lend_ioctl_arg {
__u32 dma_buf_fd;
__u32 nr_acl_entries;
__u64 acl_list;
__u64 memparcel_hdl;
__u64 reserved0;
__u64 reserved1;
__u64 reserved2;
};
#define MEM_BUF_IOC_LEND _IOWR(MEM_BUF_IOC_MAGIC, 3,\
struct mem_buf_lend_ioctl_arg)
#define MEM_BUF_VALID_FD_FLAGS (O_CLOEXEC | O_ACCMODE)
/**
* struct mem_buf_retrieve_ioctl_arg: A request to retrieve memory from another
* VM as a dma-buf
* @sender_vm_fd: An open file descriptor identifing the VM who sent the handle.
* @nr_acl_entries: The number of ACL entries in @acl_list.
* @acl_list: An array of structures, where each structure specifies a VMID
* and the access permissions that the VMID should have for the memparcel.
* @memparcel_hdl: The handle that corresponds to the memparcel we are
* importing.
* @dma_buf_import_fd: A dma-buf file descriptor that the client can use to
* access the buffer. This fd must be closed to release the memory.
* @fd_flags: file descriptor flags used when allocating
*
* All reserved fields must be zeroed out by the caller prior to invoking the
* import IOCTL command with this argument.
*/
struct mem_buf_retrieve_ioctl_arg {
__u32 sender_vm_fd;
__u32 nr_acl_entries;
__u64 acl_list;
__u64 memparcel_hdl;
__u32 dma_buf_import_fd;
__u32 fd_flags;
__u64 reserved0;
__u64 reserved1;
__u64 reserved2;
};
#define MEM_BUF_IOC_RETRIEVE _IOWR(MEM_BUF_IOC_MAGIC, 4,\
struct mem_buf_retrieve_ioctl_arg)
/**
* struct mem_buf_reclaim_ioctl_arg: A request to reclaim memory from another
* VM. The other VM must have relinquished access, and the current VM must be
* the original owner of the memory. The dma-buf file will not be closed by
* this operation.
* @memparcel_hdl: The handle that corresponds to the memparcel we are
* reclaiming.
* @dma_buf_fd: A dma-buf file descriptor that the client can use to
* access the buffer.
*
* All reserved fields must be zeroed out by the caller prior to invoking the
* import IOCTL command with this argument.
*/
struct mem_buf_reclaim_ioctl_arg {
__u64 memparcel_hdl;
__u32 dma_buf_fd;
__u32 reserved0;
__u64 reserved1;
__u64 reserved2;
};
#define MEM_BUF_IOC_RECLAIM _IOWR(MEM_BUF_IOC_MAGIC, 3,\
struct mem_buf_reclaim_ioctl_arg)
/**
* struct mem_buf_share_ioctl_arg: An request to share memory between the
* local VM and one or more remote VMs.
* @dma_buf_fd: The fd of the dma-buf that will be exported to another VM.
* @nr_acl_entries: The number of ACL entries in @acl_list.
* @acl_list: An array of structures, where each structure specifies a VMID
* and the access permissions that the VMID will have to the memory to be
* exported. Must include the local VMID.
* @memparcel_hdl: The handle associated with the memparcel that was created by
* granting access to the dma-buf for the VMIDs specified in @acl_list.
*
* All reserved fields must be zeroed out by the caller prior to invoking the
* import IOCTL command with this argument.
*/
struct mem_buf_share_ioctl_arg {
__u32 dma_buf_fd;
__u32 nr_acl_entries;
__u64 acl_list;
__u64 memparcel_hdl;
__u64 reserved0;
__u64 reserved1;
__u64 reserved2;
};
#define MEM_BUF_IOC_SHARE _IOWR(MEM_BUF_IOC_MAGIC, 6,\
struct mem_buf_share_ioctl_arg)
/**
* struct mem_buf_exclusive_owner_ioctl_arg: A request to see if a DMA-BUF
* is owned by and belongs exclusively to this VM.
* @dma_buf_fd: The fd of the dma-buf the user wants to obtain information on
* @is_exclusive_owner:
*/
struct mem_buf_exclusive_owner_ioctl_arg {
__u32 dma_buf_fd;
__u32 is_exclusive_owner;
};
#define MEM_BUF_IOC_EXCLUSIVE_OWNER _IOWR(MEM_BUF_IOC_MAGIC, 2,\
struct mem_buf_exclusive_owner_ioctl_arg)
/**
* struct mem_buf_get_memparcel_hdl_ioctl_arg: A request to get the Gunyah
* memparcel handle from a DMA-BUF, given that it has one.
* @memparcel_hdl: The handle associated with the DMA-BUF, if it exists
* @dma_buf_fd: The fd of the dma-buf the user wants to obtain a handle from
*/
struct mem_buf_get_memparcel_hdl_ioctl_arg {
__u64 memparcel_hdl;
__u32 dma_buf_fd;
__u32 padding;
};
#define MEM_BUF_IOC_GET_MEMPARCEL_HDL _IOWR(MEM_BUF_IOC_MAGIC, 5,\
struct mem_buf_get_memparcel_hdl_ioctl_arg)
#endif /* _UAPI_LINUX_MEM_BUF_H */

View File

@@ -0,0 +1,17 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __UAPI_LINUX_MSM_GENI_SERIAL_H
#define __UAPI_LINUX_MSM_GENI_SERIAL_H
/* IOCTLS used by BT clients to control UART power state */
#define MSM_GENI_SERIAL_TIOCFAULT 0x54EC /* Uart fault */
#define MSM_GENI_SERIAL_TIOCPMGET 0x54ED /* PM get */
#define MSM_GENI_SERIAL_TIOCPMPUT 0x54EE /* PM put */
#define MSM_GENI_SERIAL_TIOCPMACT 0x54EF /* PM is active */
#endif /* __UAPI_LINUX_MSM_GENI_SERIAL_H */

View File

@@ -0,0 +1,96 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _UAPI_LINUX_MSM_ION_H
#define _UAPI_LINUX_MSM_ION_H
#include <linux/types.h>
#include <linux/msm_ion_ids.h>
/**
* TARGET_ION_ABI_VERSION can be used by user space clients to ensure that at
* compile time only their code which uses the appropriate ION APIs for
* this kernel is included.
*/
#define TARGET_ION_ABI_VERSION 3
enum msm_ion_heap_types {
ION_HEAP_TYPE_MSM_START = 16,
ION_HEAP_TYPE_SECURE_DMA = ION_HEAP_TYPE_MSM_START,
ION_HEAP_TYPE_SYSTEM_SECURE,
ION_HEAP_TYPE_HYP_CMA,
ION_HEAP_TYPE_MSM_CARVEOUT,
ION_HEAP_TYPE_SECURE_CARVEOUT,
ION_HEAP_TYPE_MSM_SYSTEM,
};
/**
* Flags to be used when allocating from the secure heap for
* content protection
*/
#define ION_FLAG_CP_TRUSTED_VM ION_BIT(15)
/* ION_FLAG_POOL_FORCE_ALLOC uses ION_BIT(16) */
#define ION_FLAG_CP_TOUCH ION_BIT(17)
#define ION_FLAG_CP_BITSTREAM ION_BIT(18)
#define ION_FLAG_CP_PIXEL ION_BIT(19)
#define ION_FLAG_CP_NON_PIXEL ION_BIT(20)
#define ION_FLAG_CP_CAMERA ION_BIT(21)
#define ION_FLAG_CP_HLOS ION_BIT(22)
#define ION_FLAG_CP_SPSS_SP ION_BIT(23)
#define ION_FLAG_CP_SPSS_SP_SHARED ION_BIT(24)
#define ION_FLAG_CP_SEC_DISPLAY ION_BIT(25)
#define ION_FLAG_CP_APP ION_BIT(26)
#define ION_FLAG_CP_CAMERA_PREVIEW ION_BIT(27)
/* ION_FLAG_ALLOW_NON_CONTIG uses ION_BIT(28) */
#define ION_FLAG_CP_CDSP ION_BIT(29)
#define ION_FLAG_CP_SPSS_HLOS_SHARED ION_BIT(30)
#define ION_FLAGS_CP_MASK 0x6FFE8000
/**
* Flag to allow non continguous allocation of memory from secure
* heap
*/
#define ION_FLAG_ALLOW_NON_CONTIG ION_BIT(28)
/**
* Flag to use when allocating to indicate that a heap is secure.
* Do NOT use BIT macro since it is defined in #ifdef __KERNEL__
*/
#define ION_FLAG_SECURE ION_BIT(31)
/*
* Used in conjunction with heap which pool memory to force an allocation
* to come from the page allocator directly instead of from the pool allocation
*/
#define ION_FLAG_POOL_FORCE_ALLOC ION_BIT(16)
/**
* Macro should be used with ion_heap_ids defined above.
*/
#define ION_HEAP(bit) bit
#define ION_IOC_MSM_MAGIC 'M'
struct ion_prefetch_regions {
__u64 sizes;
__u32 vmid;
__u32 nr_sizes;
};
struct ion_prefetch_data {
__u64 len;
__u64 regions;
__u32 heap_id;
__u32 nr_regions;
};
#define ION_IOC_PREFETCH _IOWR(ION_IOC_MSM_MAGIC, 3, \
struct ion_prefetch_data)
#define ION_IOC_DRAIN _IOWR(ION_IOC_MSM_MAGIC, 4, \
struct ion_prefetch_data)
#endif /* _UAPI_LINUX_MSM_ION_H */

View File

@@ -0,0 +1,38 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved..
*/
#ifndef _MSM_ION_IDS_H
#define _MSM_ION_IDS_H
#define ION_BIT(nr) (1U << (nr))
/**
* These are the only ids that should be used for Ion heap ids.
* The ids listed are the order in which allocation will be attempted
* if specified. Don't swap the order of heap ids unless you know what
* you are doing!
* Id's are spaced by purpose to allow new Id's to be inserted in-between (for
* possible fallbacks)
*/
/* ION_BIT(0) is reserved for the generic system heap. */
#define ION_QSECOM_TA_HEAP_ID ION_BIT(1)
#define ION_CAMERA_HEAP_ID ION_BIT(2)
#define ION_DISPLAY_HEAP_ID ION_BIT(3)
#define ION_ADSP_HEAP_ID ION_BIT(4)
#define ION_AUDIO_ML_HEAP_ID ION_BIT(5)
#define ION_USER_CONTIG_HEAP_ID ION_BIT(6)
#define ION_QSECOM_HEAP_ID ION_BIT(7)
#define ION_AUDIO_HEAP_ID ION_BIT(8)
#define ION_CP_MM_HEAP_ID ION_BIT(9)
#define ION_SECURE_HEAP_ID ION_BIT(10)
#define ION_SECURE_DISPLAY_HEAP_ID ION_BIT(11)
#define ION_SPSS_HEAP_ID ION_BIT(14)
#define ION_SECURE_CARVEOUT_HEAP_ID ION_BIT(15)
#define ION_TUI_CARVEOUT_HEAP_ID ION_BIT(16)
#define ION_SYSTEM_HEAP_ID ION_BIT(25)
#define ION_HEAP_ID_RESERVED ION_BIT(31)
#endif /* _MSM_ION_IDS_H */

View File

@@ -0,0 +1,365 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _UAPI_MSM_RMNET_H_
#define _UAPI_MSM_RMNET_H_
#include <linux/types.h>
/* Bitmap macros for RmNET driver operation mode. */
#define RMNET_MODE_NONE (0x00)
#define RMNET_MODE_LLP_ETH (0x01)
#define RMNET_MODE_LLP_IP (0x02)
#define RMNET_MODE_QOS (0x04)
#define RMNET_MODE_MASK (RMNET_MODE_LLP_ETH | \
RMNET_MODE_LLP_IP | \
RMNET_MODE_QOS)
#define RMNET_IS_MODE_QOS(mode) \
((mode & RMNET_MODE_QOS) == RMNET_MODE_QOS)
#define RMNET_IS_MODE_IP(mode) \
((mode & RMNET_MODE_LLP_IP) == RMNET_MODE_LLP_IP)
/**
* IOCTL commands
* Values chosen to not conflict with other drivers in the ecosystem
*/
#define RMNET_IOCTL_SET_LLP_ETHERNET 0x000089F1 /* Set Ethernet protocol */
#define RMNET_IOCTL_SET_LLP_IP 0x000089F2 /* Set RAWIP protocol */
#define RMNET_IOCTL_GET_LLP 0x000089F3 /* Get link protocol */
#define RMNET_IOCTL_SET_QOS_ENABLE 0x000089F4 /* Set QoS header enabled */
#define RMNET_IOCTL_SET_QOS_DISABLE 0x000089F5 /* Set QoS header disabled*/
#define RMNET_IOCTL_GET_QOS 0x000089F6 /* Get QoS header state */
#define RMNET_IOCTL_GET_OPMODE 0x000089F7 /* Get operation mode */
#define RMNET_IOCTL_OPEN 0x000089F8 /* Open transport port */
#define RMNET_IOCTL_CLOSE 0x000089F9 /* Close transport port */
#define RMNET_IOCTL_FLOW_ENABLE 0x000089FA /* Flow enable */
#define RMNET_IOCTL_FLOW_DISABLE 0x000089FB /* Flow disable */
#define RMNET_IOCTL_FLOW_SET_HNDL 0x000089FC /* Set flow handle */
#define RMNET_IOCTL_EXTENDED 0x000089FD /* Extended IOCTLs */
#define RMNET_IOCTL_EXTENDED_V2 0x000089FE /* Extended V2 IOCTLs */
/* RmNet Data Required IOCTLs */
#define RMNET_IOCTL_GET_SUPPORTED_FEATURES 0x0000 /* Get features */
#define RMNET_IOCTL_SET_MRU 0x0001 /* Set MRU */
#define RMNET_IOCTL_GET_MRU 0x0002 /* Get MRU */
#define RMNET_IOCTL_GET_EPID 0x0003 /* Get endpoint ID */
#define RMNET_IOCTL_GET_DRIVER_NAME 0x0004 /* Get driver name */
#define RMNET_IOCTL_ADD_MUX_CHANNEL 0x0005 /* Add MUX ID */
#define RMNET_IOCTL_SET_EGRESS_DATA_FORMAT 0x0006 /* Set EDF */
#define RMNET_IOCTL_SET_INGRESS_DATA_FORMAT 0x0007 /* Set IDF */
#define RMNET_IOCTL_SET_AGGREGATION_COUNT 0x0008 /* Set agg count */
#define RMNET_IOCTL_GET_AGGREGATION_COUNT 0x0009 /* Get agg count */
#define RMNET_IOCTL_SET_AGGREGATION_SIZE 0x000A /* Set agg size */
#define RMNET_IOCTL_GET_AGGREGATION_SIZE 0x000B /* Get agg size */
#define RMNET_IOCTL_FLOW_CONTROL 0x000C /* Do flow control */
#define RMNET_IOCTL_GET_DFLT_CONTROL_CHANNEL 0x000D /* For legacy use */
#define RMNET_IOCTL_GET_HWSW_MAP 0x000E /* Get HW/SW map */
#define RMNET_IOCTL_SET_RX_HEADROOM 0x000F /* RX Headroom */
#define RMNET_IOCTL_GET_EP_PAIR 0x0010 /* Endpoint pair */
#define RMNET_IOCTL_SET_QOS_VERSION 0x0011 /* 8/6 byte QoS hdr*/
#define RMNET_IOCTL_GET_QOS_VERSION 0x0012 /* 8/6 byte QoS hdr*/
#define RMNET_IOCTL_GET_SUPPORTED_QOS_MODES 0x0013 /* Get QoS modes */
#define RMNET_IOCTL_SET_SLEEP_STATE 0x0014 /* Set sleep state */
#define RMNET_IOCTL_SET_XLAT_DEV_INFO 0x0015 /* xlat dev name */
#define RMNET_IOCTL_DEREGISTER_DEV 0x0016 /* Dereg a net dev */
#define RMNET_IOCTL_GET_SG_SUPPORT 0x0017 /* Query sg support*/
#define RMNET_IOCTL_SET_OFFLOAD 0x0018 /* Set IPA offload */
#define RMNET_IOCTL_GET_MTU 0x0019 /* Get v4/v6 MTU */
#define RMNET_IOCTL_SET_MTU 0x0020 /* Set v4/v6 MTU */
#define RMNET_IOCTL_GET_EPID_LL 0x0021 /* Get LL ep ID */
#define RMNET_IOCTL_GET_EP_PAIR_LL 0x0022 /* LL ep pair */
/**
* RMNET_IOCTL_EXTENDED_V2 ioctl types.
* Should be sent through "extended_ioctl_type" variable.
* Any number of new IOCTL type can be added.
*/
/**
* Set EDF with config values
* Includes all the egress pipe's config in one single ioctl
*/
#define RMNET_IOCTL_SET_EGRESS_DATA_FORMAT_V2 0x0000
/**
* Set IDF with config values
* Includes all the ingress pipe's config in one single ioctl
*/
#define RMNET_IOCTL_SET_INGRESS_DATA_FORMAT_V2 0x0001
/* Return values for the RMNET_IOCTL_GET_SUPPORTED_FEATURES IOCTL */
#define RMNET_IOCTL_FEAT_NOTIFY_MUX_CHANNEL (1<<0)
#define RMNET_IOCTL_FEAT_SET_EGRESS_DATA_FORMAT (1<<1)
#define RMNET_IOCTL_FEAT_SET_INGRESS_DATA_FORMAT (1<<2)
#define RMNET_IOCTL_FEAT_SET_AGGREGATION_COUNT (1<<3)
#define RMNET_IOCTL_FEAT_GET_AGGREGATION_COUNT (1<<4)
#define RMNET_IOCTL_FEAT_SET_AGGREGATION_SIZE (1<<5)
#define RMNET_IOCTL_FEAT_GET_AGGREGATION_SIZE (1<<6)
#define RMNET_IOCTL_FEAT_FLOW_CONTROL (1<<7)
#define RMNET_IOCTL_FEAT_GET_DFLT_CONTROL_CHANNEL (1<<8)
#define RMNET_IOCTL_FEAT_GET_HWSW_MAP (1<<9)
/* Input values for the RMNET_IOCTL_SET_EGRESS_DATA_FORMAT IOCTL */
#define RMNET_IOCTL_EGRESS_FORMAT_MAP (1<<1)
#define RMNET_IOCTL_EGRESS_FORMAT_AGGREGATION (1<<2)
#define RMNET_IOCTL_EGRESS_FORMAT_MUXING (1<<3)
#define RMNET_IOCTL_EGRESS_FORMAT_CHECKSUM (1<<4)
/* Input values for the RMNET_IOCTL_SET_INGRESS_DATA_FORMAT IOCTL */
#define RMNET_IOCTL_INGRESS_FORMAT_MAP (1<<1)
#define RMNET_IOCTL_INGRESS_FORMAT_DEAGGREGATION (1<<2)
#define RMNET_IOCTL_INGRESS_FORMAT_DEMUXING (1<<3)
#define RMNET_IOCTL_INGRESS_FORMAT_CHECKSUM (1<<4)
#define RMNET_IOCTL_INGRESS_FORMAT_AGG_DATA (1<<5)
/* Input values for the RMNET_IOCTL_SET_OFFLOAD */
#define RMNET_IOCTL_OFFLOAD_FORMAT_NONE (0)
#define RMNET_IOCTL_COALESCING_FORMAT_TCP (1<<0)
#define RMNET_IOCTL_COALESCING_FORMAT_UDP (1<<1)
/* User space may not have this defined. */
#ifndef IFNAMSIZ
#define IFNAMSIZ 16
#endif
/**
* enum rmnet_egress_ep_type - To specify pipe type for egress
* @RMNET_EGRESS_DEFAULT: WAN Producer pipe
* @RMNET_EGRESS_LOW_LAT_CTRL: Low latency ctrl producer pipe
* @RMNET_EGRESS_LOW_LAT_DATA: Low latency data producer pipe
* Add any number of pipes before max
*/
enum rmnet_egress_ep_type {
RMNET_EGRESS_DEFAULT = 0x0000,
RMNET_EGRESS_LOW_LAT_CTRL = 0x0001,
RMNET_EGRESS_LOW_LAT_DATA = 0x0002,
RMNET_EGRESS_MAX = 0x0003,
};
/**
* enum rmnet_ingress_ep_type - To specify pipe type for ingress
* @RMNET_INGRESS_COALS: Coalescing Consumer pipe
* @RMNET_INGRESS_DEFAULT: WAN Consumer pipe
* @RMNET_INGRESS_LOW_LAT_CTRL: Low latency ctrl consumer pipe
* @RMNET_INGRESS_LOW_LAT_DATA: Low latency data consumer pipe
* Add any number of pipes before max
*/
enum rmnet_ingress_ep_type {
RMNET_INGRESS_COALS = 0x0000,
RMNET_INGRESS_DEFAULT = 0x0001,
RMNET_INGRESS_LOW_LAT_CTRL = 0x0002,
RMNET_INGRESS_LOW_LAT_DATA = 0x0003,
RMNET_INGRESS_MAX = 0x0004,
};
/**
* enum rmnet_egress_ingress_pipe_setup_status - To give
* back the pipe setup status info to netmngr
* @IPA_PIPE_SETUP_SUCCESS: pipe setup successful
* @IPA_PIPE_SETUP_FAILURE: pipe setup failure
* @IPA_PIPE_SETUP_EXISTS: pipe already exists
*/
enum rmnet_egress_ingress_pipe_setup_status {
IPA_PIPE_SETUP_SUCCESS = 0x0000,
IPA_PIPE_SETUP_FAILURE = 0x0001,
IPA_PIPE_SETUP_EXISTS = 0x0002,
};
/**
* struct rmnet_egress_param - Include all the egress params that
* needs to be configured. Structure should have even
* __u32 variables or add padding.
* @egress_ep_type: Should be from rmnet_egress_ep_type
* @pipe_setup_status: Out parameter.
* Need to place below enum
* rmnet_egress_ingress_pipe_setup_status
* @cs_offload_en: Checksum offload (1 - Enable)
* @aggr_en: Aggregation Enable (1 - Enable)
* @ulso_en: (1 - Enable)
* @ipid_min_max_idx(for ULSO): A value from the range [0, 2] determines
* the registers pair from which to read the minimum and maximum of
* IPv4 packets ID.
* @int_modt: GSI event ring interrupt moderation time
* cycles base interrupt moderation (32KHz clock)
* @int_modc: GSI event ring interrupt moderation packet counter
*/
struct rmnet_egress_param {
__u32 egress_ep_type;
__u32 pipe_setup_status;
__u32 cs_offload_en;
__u32 aggr_en;
__u32 ulso_en;
__u32 ipid_min_max_idx;
__u32 int_modt;
__u32 int_modc;
};
/**
* struct rmnet_ingress_param - Include all the ingress params that
* needs to be configured. Structure should have even
* __u32 variables or add padding.
* @ingress_ep_type: Should be from rmnet_ingress_ep_type
* @pipe_setup_status: Out parameter.
* Need to place below enum
* rmnet_egress_ingress_pipe_setup_status
* @cs_offload_en: Checksum offload (1 - Enable)
* @buff_size: Actual buff size of rx_pkt
* @agg_byte_limit: Aggregation byte limit
* @agg_time_limit: Aggregation time limit
* @agg_pkt_limit: Aggregation packet limit
* @int_modt: GSI event ring interrupt moderation time
* cycles base interrupt moderation (32KHz clock)
* @int_modc: GSI event ring interrupt moderation packet counter
* @padding: To make it 64 bit packed structure
*/
struct rmnet_ingress_param {
__u32 ingress_ep_type;
__u32 pipe_setup_status;
__u32 cs_offload_en;
__u32 buff_size;
__u32 agg_byte_limit;
__u32 agg_time_limit;
__u32 agg_pkt_limit;
__u32 int_modt;
__u32 int_modc;
__u32 padding;
};
/**
* Following uapi coding convention here
* struct mystruct {
* __u64 pointer;
* };
*
* In userspace code:
* mystruct.pointer = (__u64)(uintptr_t) &pointer;
* In kernelspace code:
* copy_from_user(&struct, u64_to_user_ptr(mystruct.pointer), size);
*/
/**
* struct ingress_format_v2 - To include all the ingress params that
* needs to be configured. Structure should have even
* __u32 variables or add padding.
* @ingress_param_ptr: Should be rmnet_ingress_param pointer.
* Array of ingress params for all the pipes.
* @ingress_param_size: = sizeof(rmnet_ingress_param);
* @number_of_eps: Number of ep_types, should be = RMNET_INGRESS_MAX
*/
struct ingress_format_v2 {
__u64 ingress_param_ptr;
__u32 ingress_param_size;
__u32 number_of_eps;
};
/**
* struct egress_format_v2 - To include all the egress params that
* needs to be configured. Structure should have even
* __u32 variables or add padding.
* @egress_param_ptr: Should be rmnet_egress_param pointer.
* Array of egress params for all the pipes.
* @egress_param_size: = sizeof(rmnet_egress_param);
* @number_of_eps: Number of ep_types, should be = RMNET_EGRESS_MAX.
*/
struct egress_format_v2 {
__u64 egress_param_ptr;
__u32 egress_param_size;
__u32 number_of_eps;
};
/**
* struct rmnet_ioctl_extended_s_v2: New structure to include any number of
* ioctl of any size. Structure should have even
* __u32 variables or add padding.
* @ioctl_data_size: Eg: For egress ioctl
* = sizeof(egress_format_v2)
* @ioctl_ptr: Has to be typecasted to (__u64)(uintptr_t).
* @extended_v2_ioctl_type: Should be hash defined above similar
* to RMNET_IOCTL_SET_EGRESS_DATA_FORMAT_V2.
*/
struct rmnet_ioctl_extended_s_v2 {
__u64 ioctl_ptr;
__u32 ioctl_data_size;
__u32 extended_v2_ioctl_type;
};
struct rmnet_ioctl_extended_s {
__u32 extended_ioctl;
union {
__u32 data; /* Generic data field for most extended IOCTLs */
/* Return values for
* RMNET_IOCTL_GET_DRIVER_NAME
* RMNET_IOCTL_GET_DFLT_CONTROL_CHANNEL
*/
__s8 if_name[IFNAMSIZ];
/* Input values for the RMNET_IOCTL_ADD_MUX_CHANNEL IOCTL */
struct {
__u32 mux_id;
__s8 vchannel_name[IFNAMSIZ];
} rmnet_mux_val;
/* Input values for the RMNET_IOCTL_FLOW_CONTROL IOCTL */
struct {
__u8 flow_mode;
__u8 mux_id;
} flow_control_prop;
/* Return values for RMNET_IOCTL_GET_EP_PAIR */
struct {
__u32 consumer_pipe_num;
__u32 producer_pipe_num;
} ipa_ep_pair;
struct {
__u32 __data; /* Placeholder for legacy data*/
__u32 agg_size;
__u32 agg_count;
} ingress_format;
/* Input values for the RMNET_IOCTL_SET_OFFLOAD */
struct {
__u32 flags;
__u8 mux_id;
} offload_params;
/* Input values for the RMNET_IOCTL_SET_MTU */
struct {
__s8 if_name[IFNAMSIZ];
/* if given non-zero value, mtu has changed */
__u16 mtu_v4;
__u16 mtu_v6;
} mtu_params;
} u;
};
struct rmnet_ioctl_data_s {
union {
__u32 operation_mode;
__u32 tcm_handle;
} u;
};
#define RMNET_IOCTL_QOS_MODE_6 (1<<0)
#define RMNET_IOCTL_QOS_MODE_8 (1<<1)
/* QMI QoS header definition */
struct QMI_QOS_HDR_S {
unsigned char version;
unsigned char flags;
__u32 flow_id;
} __attribute((__packed__));
/* QMI QoS 8-byte header. */
struct qmi_qos_hdr8_s {
struct QMI_QOS_HDR_S hdr;
__u8 reserved[2];
} __attribute((__packed__));
#endif /* _UAPI_MSM_RMNET_H_ */

View File

@@ -0,0 +1,102 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _UAPI_MSM_SYSSTATS_H_
#define _UAPI_MSM_SYSSTATS_H_
#include <linux/types.h>
#define SYSSTATS_GENL_NAME "SYSSTATS"
#define SYSSTATS_GENL_VERSION 0x2
#define TS_COMM_LEN 32
#define SYSSTATS_TYPE_UNSPEC 0
#define SYSSTATS_TASK_TYPE_STATS 1
#define SYSSTATS_TYPE_NULL 2
#define SYSSTATS_TASK_TYPE_FOREACH 3
#define SYSSTATS_MEMINFO_TYPE_STATS 4
#define SYSSTATS_PID_TYPE_STATS 5
#define SYSSTATS_CMD_ATTR_UNSPEC 0
#define SYSSTATS_TASK_CMD_ATTR_PID 1
#define SYSSTATS_TASK_CMD_ATTR_FOREACH 2
#define SYSSTATS_TASK_CMD_ATTR_PIDS_OF_NAME 3
#define SYSSTATS_CMD_UNSPEC 0
#define SYSSTATS_TASK_CMD_GET 1
#define SYSSTATS_TASK_CMD_NEW 2
#define SYSSTATS_MEMINFO_CMD_GET 3
#define SYSSTATS_MEMINFO_CMD_NEW 4
#define SYSSTATS_PIDS_CMD_GET 5
#define SYSSTATS_PIDS_CMD_NEW 6
struct sysstats_task {
__u64 anon_rss; /* KB */
__u64 file_rss; /* KB */
__u64 swap_rss; /* KB */
__u64 shmem_rss; /* KB */
__u64 unreclaimable; /* KB */
__u64 utime; /* User CPU time [usec] */
__u64 stime; /* System CPU time [usec] */
__u64 cutime; /* Cumulative User CPU time [usec] */
__u64 cstime; /* Cumulative System CPU time [usec] */
__s16 oom_score;
__s16 __padding;
__u32 pid;
__u32 uid;
__u32 ppid; /* Parent process ID */
char name[TS_COMM_LEN]; /* Command name */
char state[TS_COMM_LEN]; /* Process state */
};
/*
* All values in KB.
*/
struct sysstats_mem {
__u64 memtotal;
__u64 misc_reclaimable;
__u64 unreclaimable;
__u64 zram_compressed;
__u64 swap_used;
__u64 swap_total;
__u64 buffer;
__u64 vmalloc_total;
__u64 swapcache;
__u64 slab_reclaimable;
__u64 slab_unreclaimable;
__u64 free_cma;
__u64 file_mapped;
__u64 pagetable;
__u64 kernelstack;
__u64 shmem;
__u64 dma_nr_free;
__u64 dma_nr_active_anon;
__u64 dma_nr_inactive_anon;
__u64 dma_nr_active_file;
__u64 dma_nr_inactive_file;
__u64 normal_nr_free;
__u64 normal_nr_active_anon;
__u64 normal_nr_inactive_anon;
__u64 normal_nr_active_file;
__u64 normal_nr_inactive_file;
__u64 movable_nr_free;
__u64 movable_nr_active_anon;
__u64 movable_nr_inactive_anon;
__u64 movable_nr_active_file;
__u64 movable_nr_inactive_file;
__u64 highmem_nr_free;
__u64 highmem_nr_active_anon;
__u64 highmem_nr_inactive_anon;
__u64 highmem_nr_active_file;
__u64 highmem_nr_inactive_file;
};
struct sysstats_pid {
__u64 pid;
};
#endif /* _UAPI_MSM_SYSSTATS_H_ */

View File

@@ -32,8 +32,6 @@
#define NETLINK_INET_DIAG NETLINK_SOCK_DIAG
#define NETLINK_KFREECESS 27
#define MAX_LINKS 32
struct sockaddr_nl {

View File

@@ -0,0 +1,130 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _UAPI_LINUX_Q2SPI_H
#define _UAPI_LINUX_Q2SPI_H
#include <linux/types.h>
#include "asm-generic/errno-base.h"
/**
* enum cmd_type - q2spi request command type
* @LOCAL_REG_READ: this command is used to read Q2SPI slave local register space.
* @LOCAL_REG_WRITE: this command is used to write Q2SPI slave local register space.
* @DATA_READ: this command is used to read bulk data from Q2SPI slave.
* @DATA_WRITE: this command is used to write bulk data to Q2SPI slave.
* @HRF_READ: this command is used to read data from Q2SPI slave HRF.
* @HRF_WRITE: this command is used to write data to Q2SPI slave HRF.
* @SOFT_RESET: this command is used to reset Q2SPI slave.
* @ABORT: this command is used to abort the CR Q2SPI slave.
*/
enum cmd_type {
LOCAL_REG_READ = 0,
LOCAL_REG_WRITE = 1,
DATA_READ = 2,
DATA_WRITE = 3,
HRF_READ = 4,
HRF_WRITE = 5,
SOFT_RESET = 6,
ABORT = 7,
INVALID_CMD = -EINVAL,
};
/**
* enum priority_type - priority of Q2SPI transfer request
* @NORMAL: user space client specifies this for normal priority transfer.
* @HIGH: user space client specifies this for high priority transfer.
* @LOW: same as NORMAL. Reserved for future use.
*/
enum priority_type {
NORMAL = 0,
HIGH = 1,
LOW = NORMAL,
INVALID_TYPE = -EINVAL,
};
/**
* enum xfer_status - indicate status of the transfer
* @SUCCESS: indicates success
* @FAILURE: indicates failure
* @OVERFLOW: indicates TX buffer overflow
* @UNDERFLOW: indicates RX buffer underflow
* @RESPONSE_ERROR: indicates AHB response error
* @CHECKSUM_ERROR: indicates checksum error
* @TIMEOUT: timeout for a transfer
* @OTHER: reserved for future purpose
*/
enum xfer_status {
SUCCESS = 0,
FAILURE = 1,
OVERFLOW = 2,
UNDERFLOW = 3,
RESPONSE_ERROR = 4,
CHECKSUM_ERROR = 5,
TIMEOUT = 6,
OTHER = 7,
INVALID_STATUS = -EINVAL,
};
/**
* struct q2spi_request - structure to pass Q2SPI transfer request(read/write) to driver
* @data_buff: stores data buffer pointer passed from user space client. First byte filled
* by user space client to specify Q2SPI slave(Ganges) configuration(HCI/UCI).
* @cmd: represents command type of a transfer request.
* @addr: user space client will use this field to indicate any specific address
* used for transfer request.
* @end_point: user space client specifies source endpoint information using this field.
* @proto_ind: user space client specifies protocol indicators BT or UWB using this field.
* @data_len: represents transfer length of the transaction.
* @priority: priority of Q2SPI transfer request, Valid only in async mode.
* @sync: by default synchronous transfer are used.
* user space client can use this to specify synchronous or asynchronous
* transfers are used.
* @flow_id: unique flow id of the transfer assigned by q2spi interface driver. In
* asynchronous mode write api returns this flow id to userspace.
* @reserved[20]: reserved for future purpose.
*
* This structure is to send information to q2spi driver from user space.
*/
struct q2spi_request {
void *data_buff;
enum cmd_type cmd;
__u32 addr;
__u8 end_point;
__u8 proto_ind;
__u32 data_len;
enum priority_type priority;
__u8 flow_id;
_Bool sync;
__u32 reserved[20];
};
/**
* struct q2spi_client_request - structure to retrieve Q2SPI client request information.
* @data_buff: points to data buffer pointer passed from user space client
* @data_len: represents transfer length
* @end_point: q2spi driver copy CR data arg2 information in this field.
* @proto_ind: q2spi driver copy CR data arg3 information in this field.
* @cmd: represents command type of a transfer request.
* @status: response status for the request
* @flow_id: unique flow id of the transfer assigned by q2spi interface driver.
* In asynchronous mode write api returns this flow id to userspace,
* application can use this flow_id to match the response it
* received asynchronously from q2spi driver.
* @reserved[20]: reserved for future purpose
*
* q2spi driver will copy this to user space client as a part of read API which contains previous
* request response from Q2SPI slave or new client request from Q2SPI slave.
*/
struct q2spi_client_request {
void *data_buff;
__u32 data_len;
__u8 end_point;
__u8 proto_ind;
enum cmd_type cmd;
enum xfer_status status;
__u8 flow_id;
__u32 reserved[20];
};
#endif /* _UAPI_LINUX_Q2SPI_H */

View File

@@ -0,0 +1,57 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __QBG_PROFILE_H__
#define __QBG_PROFILE_H__
#include <linux/types.h>
#define MAX_BP_LUT_ROWS 35
#define MAX_BP_LUT_COLS 8
#define MAX_PROFILE_NAME_LENGTH 256
#define QBG_BPIOCXBP 0x1
#define QBG_BPIOCXBPTABLE 0x2
enum profile_table_type {
CHARGE_TABLE = 0,
DISCHARGE_TABLE,
};
struct battery_data_table {
unsigned short int table[MAX_BP_LUT_ROWS][MAX_BP_LUT_COLS];
int unit_conv_factor[MAX_BP_LUT_COLS];
unsigned short int nrows;
unsigned short int ncols;
} __attribute__ ((__packed__));
struct battery_config {
char bp_profile_name[MAX_PROFILE_NAME_LENGTH];
int bp_batt_id;
int capacity;
int bp_checksum;
int soh_range_high;
int soh_range_low;
int normal_impedance;
int aged_impedance;
int normal_capacity;
int aged_capacity;
int recharge_soc_delta;
int recharge_vflt_delta;
int recharge_iterm;
} __attribute__ ((__packed__));
struct battery_profile_table {
enum profile_table_type table_type;
int table_index;
struct battery_data_table *table;
} __attribute__ ((__packed__));
/* IOCTLs to query battery profile data */
#define BPIOCXBP _IOWR('B', 0x01, struct battery_config) /* Battery configuration */
#define BPIOCXBPTABLE _IOWR('B', 0x02, struct battery_profile_table) /* Battery profile table */
#endif

186
include/uapi/linux/qbg.h Normal file
View File

@@ -0,0 +1,186 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __QBG_H__
#define __QBG_H__
#include <linux/types.h>
#define MAX_FIFO_COUNT 36
#define QBG_MAX_STEP_CHG_ENTRIES 6
#define QBG_QBGIOCXCFG 0x01
#define QBG_QBGIOCXEPR 0x02
#define QBG_QBGIOCXEPW 0x03
#define QBG_QBGIOCXSTEPCHGCFG 0x04
enum QBG_STATE {
QBG_LPM,
QBG_MPM,
QBG_HPM,
QBG_FAST_CHAR,
QBG_PON_OCV,
QBG_STATE_MAX,
};
enum QBG_SDAM_DATA_OFFSET {
QBG_ACC0_OFFSET = 0,
QBG_ACC1_OFFSET = 2,
QBG_ACC2_OFFSET = 4,
QBG_TBAT_OFFSET = 6,
QBG_IBAT_OFFSET = 8,
QBG_VREF_OFFSET = 10,
QBG_DATA_TAG_OFFSET = 12,
QBG_QG_STS_OFFSET,
QBG_STS1_OFFSET,
QBG_STS2_OFFSET,
QBG_STS3_OFFSET,
QBG_ONE_FIFO_LENGTH,
};
enum qbg {
QBG_PARAM_SOC,
QBG_PARAM_BATT_SOC,
QBG_PARAM_SYS_SOC,
QBG_PARAM_ESR,
QBG_PARAM_OCV_UV,
QBG_PARAM_MAX_LOAD_NOW,
QBG_PARAM_MAX_LOAD_AVG,
QBG_PARAM_HOLD_SOC_100PCT,
QBG_PARAM_CHARGE_CYCLE_COUNT,
QBG_PARAM_LEARNED_CAPACITY,
QBG_PARAM_TTF_100MS,
QBG_PARAM_TTE_100MS,
QBG_PARAM_SOH,
QBG_PARAM_TBAT,
QBG_PARAM_SYS_SOC_HOLD_100PCT,
QBG_PARAM_JEITA_COOL_THRESHOLD,
QBG_PARAM_TOTAL_IMPEDANCE,
QBG_PARAM_ESSENTIAL_PARAM_REVID,
QBG_PARAM_FIFO_TIMESTAMP,
QBG_PARAM_MAX,
};
struct qbg_essential_params {
short int msoc;
short int cutoff_soc;
short int full_soc;
short int x0;
short int x1;
short int x2;
short int soh_r;
short int soh_c;
short int theta0;
short int theta1;
short int theta2;
short int i1full;
short int i2full;
short int i1cutoff;
short int i2cutoff;
short int syssoc;
int discharge_cycle_count;
int charge_cycle_count;
unsigned int rtc_time;
short int batt_therm;
unsigned short int ocv;
} __attribute__ ((__packed__));
struct fifo_data {
unsigned short int v1;
unsigned short int v2;
unsigned short int i;
unsigned short int tbat;
unsigned short int ibat;
unsigned short int vref;
char data_tag;
char qg_sts;
char sts1;
char sts2;
char sts3;
} __attribute__ ((__packed__));
struct k_fifo_data {
unsigned int v1;
unsigned int v2;
unsigned int i;
unsigned int tbat;
unsigned int ibat;
unsigned int vref;
unsigned int data_tag;
unsigned int qg_sts;
unsigned int sts1;
unsigned int sts2;
unsigned int sts3;
} __attribute__ ((__packed__));
struct qbg_config {
unsigned int batt_id;
unsigned int pon_ocv;
unsigned int pon_ibat;
unsigned int pon_tbat;
unsigned int pon_soc;
unsigned int float_volt_uv;
unsigned int fastchg_curr_ma;
unsigned int vbat_cutoff_mv;
unsigned int ibat_cutoff_ma;
unsigned int vph_min_mv;
unsigned int iterm_ma;
unsigned int rconn_mohm;
__u64 current_time;
unsigned int sdam_batt_id;
unsigned int essential_param_revid;
__u64 sample_time_us[QBG_STATE_MAX];
} __attribute__ ((__packed__));
struct qbg_param {
unsigned int data;
_Bool valid;
};
struct qbg_kernel_data {
unsigned int seq_no;
unsigned int fifo_time;
unsigned int fifo_count;
struct k_fifo_data fifo[MAX_FIFO_COUNT];
struct qbg_param param[QBG_PARAM_MAX];
} __attribute__ ((__packed__));
struct qbg_user_data {
struct qbg_param param[QBG_PARAM_MAX];
} __attribute__ ((__packed__));
struct range_data {
int low_threshold;
int high_threshold;
unsigned int value;
} __attribute__ ((__packed__));
struct ranges {
struct range_data data[QBG_MAX_STEP_CHG_ENTRIES];
unsigned char range_count;
_Bool valid;
} __attribute__((__packed__));
struct qbg_step_chg_jeita_params {
int jeita_full_fv_10nv;
int jeita_full_iterm_10na;
int jeita_warm_adc_value;
int jeita_cool_adc_value;
int battery_beta;
int battery_therm_kohm;
struct ranges step_fcc_cfg;
struct ranges jeita_fcc_cfg;
struct ranges jeita_fv_cfg;
unsigned char ttf_calc_mode;
} __attribute__ ((__packed__));
/* IOCTLs to read & write QBG config and essential params */
#define QBGIOCXCFG _IOR('B', 0x01, struct qbg_config)
#define QBGIOCXEPR _IOR('B', 0x02, struct qbg_essential_params)
#define QBGIOCXEPW _IOWR('B', 0x03, struct qbg_essential_params)
#define QBGIOCXSTEPCHGCFG _IOWR('B', 0x04, struct qbg_step_chg_jeita_params)
#endif

View File

@@ -0,0 +1,74 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __QG_PROFILE_H__
#define __QG_PROFILE_H__
#include <linux/ioctl.h>
/**
* enum profile_table - Table index for battery profile data
*/
enum profile_table {
TABLE_SOC_OCV1,
TABLE_SOC_OCV2,
TABLE_FCC1,
TABLE_FCC2,
TABLE_Z1,
TABLE_Z2,
TABLE_Z3,
TABLE_Z4,
TABLE_Z5,
TABLE_Z6,
TABLE_Y1,
TABLE_Y2,
TABLE_Y3,
TABLE_Y4,
TABLE_Y5,
TABLE_Y6,
TABLE_MAX,
};
/**
* struct battery_params - Battery profile data to be exchanged
* @soc: SOC (state of charge) of the battery
* @ocv_uv: OCV (open circuit voltage) of the battery
* @batt_temp: Battery temperature in deci-degree
* @var: 'X' axis param for interpolation
* @table_index:Table index to be used for interpolation
*/
struct battery_params {
int soc;
int ocv_uv;
int fcc_mah;
int slope;
int var;
int batt_temp;
int table_index;
};
/* Profile MIN / MAX values */
#define QG_MIN_SOC 0
#define QG_MAX_SOC 10000
#define QG_MIN_OCV_UV 2000000
#define QG_MAX_OCV_UV 5000000
#define QG_MIN_VAR 0
#define QG_MAX_VAR 65535
#define QG_MIN_FCC_MAH 100
#define QG_MAX_FCC_MAH 16000
#define QG_MIN_SLOPE 1
#define QG_MAX_SLOPE 50000
#define QG_ESR_SF_MIN 5000
#define QG_ESR_SF_MAX 20000
/* IOCTLs to query battery profile data */
#define BPIOCXSOC _IOWR('B', 0x01, struct battery_params) /* SOC */
#define BPIOCXOCV _IOWR('B', 0x02, struct battery_params) /* OCV */
#define BPIOCXFCC _IOWR('B', 0x03, struct battery_params) /* FCC */
#define BPIOCXSLOPE _IOWR('B', 0x04, struct battery_params) /* Slope */
#define BPIOCXVAR _IOWR('B', 0x05, struct battery_params) /* All-other */
#endif /* __QG_PROFILE_H__ */

71
include/uapi/linux/qg.h Normal file
View File

@@ -0,0 +1,71 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (c) 2018, 2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __QG_H__
#define __QG_H__
#include <linux/types.h>
#define MAX_FIFO_LENGTH 16
enum qg {
QG_SOC,
QG_OCV_UV,
QG_RBAT_MOHM,
QG_PON_OCV_UV,
QG_GOOD_OCV_UV,
QG_ESR,
QG_CHARGE_COUNTER,
QG_FIFO_TIME_DELTA,
QG_BATT_SOC,
QG_CC_SOC,
QG_ESR_CHARGE_DELTA,
QG_ESR_DISCHARGE_DELTA,
QG_ESR_CHARGE_SF,
QG_ESR_DISCHARGE_SF,
QG_FULL_SOC,
QG_CLEAR_LEARNT_DATA,
QG_SYS_SOC,
QG_V_IBAT,
QG_MAX,
};
#define QG_BATT_SOC QG_BATT_SOC
#define QG_CC_SOC QG_CC_SOC
#define QG_ESR_CHARGE_DELTA QG_ESR_CHARGE_DELTA
#define QG_ESR_DISCHARGE_DELTA QG_ESR_DISCHARGE_DELTA
#define QG_ESR_CHARGE_SF QG_ESR_CHARGE_SF
#define QG_ESR_DISCHARGE_SF QG_ESR_DISCHARGE_SF
#define QG_FULL_SOC QG_FULL_SOC
#define QG_CLEAR_LEARNT_DATA QG_CLEAR_LEARNT_DATA
#define QG_SYS_SOC QG_SYS_SOC
#define QG_V_IBAT QG_V_IBAT
struct fifo_data {
unsigned int v;
unsigned int i;
unsigned int count;
unsigned int interval;
};
struct qg_param {
unsigned int data;
_Bool valid;
};
struct qg_kernel_data {
unsigned int seq_no;
unsigned int fifo_time;
unsigned int fifo_length;
struct fifo_data fifo[MAX_FIFO_LENGTH];
struct qg_param param[QG_MAX];
};
struct qg_user_data {
struct qg_param param[QG_MAX];
};
#endif

View File

@@ -27,6 +27,7 @@ enum qrtr_pkt_type {
QRTR_TYPE_NEW_LOOKUP = 10,
QRTR_TYPE_DEL_LOOKUP = 11,
};
#define QRTR_TYPE_DEL_PROC 13
struct qrtr_ctrl_pkt {
__le32 cmd;
@@ -43,6 +44,11 @@ struct qrtr_ctrl_pkt {
__le32 node;
__le32 port;
} client;
struct {
__le32 rsvd;
__le32 node;
} proc;
};
} __packed;

View File

@@ -0,0 +1,414 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (c) 2017, 2019, 2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _QSEECOM_H_
#define _QSEECOM_H_
#include <linux/types.h>
#include <linux/ioctl.h>
#define MAX_ION_FD 4
#define MAX_APP_NAME_SIZE 64
#define QSEECOM_HASH_SIZE 32
/* qseecom_ta_heap allocation retry delay (ms) and max attempt count */
#define QSEECOM_TA_ION_ALLOCATE_DELAY 50
#define QSEECOM_TA_ION_ALLOCATE_MAX_ATTEMP 20
#define ICE_KEY_SIZE 32
#define ICE_SALT_SIZE 32
/*
* struct qseecom_register_listener_req -
* for register listener ioctl request
* @listener_id - service id (shared between userspace and QSE)
* @ifd_data_fd - ion handle
* @virt_sb_base - shared buffer base in user space
* @sb_size - shared buffer size
*/
struct qseecom_register_listener_req {
__u32 listener_id; /* in */
__s32 ifd_data_fd; /* in */
void *virt_sb_base; /* in */
__u32 sb_size; /* in */
};
/*
* struct qseecom_send_cmd_req - for send command ioctl request
* @cmd_req_len - command buffer length
* @cmd_req_buf - command buffer
* @resp_len - response buffer length
* @resp_buf - response buffer
*/
struct qseecom_send_cmd_req {
void *cmd_req_buf; /* in */
unsigned int cmd_req_len; /* in */
void *resp_buf; /* in/out */
unsigned int resp_len; /* in/out */
};
/*
* struct qseecom_ion_fd_info - ion fd handle data information
* @fd - ion handle to some memory allocated in user space
* @cmd_buf_offset - command buffer offset
*/
struct qseecom_ion_fd_info {
__s32 fd;
__u32 cmd_buf_offset;
};
/*
* struct qseecom_send_modfd_cmd_req - for send command ioctl request
* @cmd_req_len - command buffer length
* @cmd_req_buf - command buffer
* @resp_len - response buffer length
* @resp_buf - response buffer
* @ifd_data_fd - ion handle to memory allocated in user space
* @cmd_buf_offset - command buffer offset
*/
struct qseecom_send_modfd_cmd_req {
void *cmd_req_buf; /* in */
unsigned int cmd_req_len; /* in */
void *resp_buf; /* in/out */
unsigned int resp_len; /* in/out */
struct qseecom_ion_fd_info ifd_data[MAX_ION_FD];
};
/*
* struct qseecom_listener_send_resp_req - signal to continue the send_cmd req.
* Used as a trigger from HLOS service to notify QSEECOM that it's done with its
* operation and provide the response for QSEECOM can continue the incomplete
* command execution
* @resp_len - Length of the response
* @resp_buf - Response buffer where the response of the cmd should go.
*/
struct qseecom_send_resp_req {
void *resp_buf; /* in */
unsigned int resp_len; /* in */
};
/*
* struct qseecom_load_img_data - for sending image length information and
* ion file descriptor to the qseecom driver. ion file descriptor is used
* for retrieving the ion file handle and in turn the physical address of
* the image location.
* @mdt_len - Length of the .mdt file in bytes.
* @img_len - Length of the .mdt + .b00 +..+.bxx images files in bytes
* @ion_fd - Ion file descriptor used when allocating memory.
* @img_name - Name of the image.
* @app_arch - Architecture of the image, i.e. 32bit or 64bit app
*/
struct qseecom_load_img_req {
__u32 mdt_len; /* in */
__u32 img_len; /* in */
__s32 ifd_data_fd; /* in */
char img_name[MAX_APP_NAME_SIZE]; /* in */
__u32 app_arch; /* in */
__u32 app_id; /* out*/
};
struct qseecom_set_sb_mem_param_req {
__s32 ifd_data_fd; /* in */
void *virt_sb_base; /* in */
__u32 sb_len; /* in */
};
/*
* struct qseecom_qseos_version_req - get qseos version
* @qseos_version - version number
*/
struct qseecom_qseos_version_req {
unsigned int qseos_version; /* in */
};
/*
* struct qseecom_qseos_app_load_query - verify if app is loaded in qsee
* @app_name[MAX_APP_NAME_SIZE]- name of the app.
* @app_id - app id.
*/
struct qseecom_qseos_app_load_query {
char app_name[MAX_APP_NAME_SIZE]; /* in */
__u32 app_id; /* out */
__u32 app_arch;
};
struct qseecom_send_svc_cmd_req {
__u32 cmd_id;
void *cmd_req_buf; /* in */
unsigned int cmd_req_len; /* in */
void *resp_buf; /* in/out */
unsigned int resp_len; /* in/out */
};
enum qseecom_key_management_usage_type {
QSEOS_KM_USAGE_DISK_ENCRYPTION = 0x01,
QSEOS_KM_USAGE_FILE_ENCRYPTION = 0x02,
QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION = 0x03,
QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION = 0x04,
QSEOS_KM_USAGE_MAX
};
struct qseecom_create_key_req {
unsigned char hash32[QSEECOM_HASH_SIZE];
enum qseecom_key_management_usage_type usage;
};
struct qseecom_wipe_key_req {
enum qseecom_key_management_usage_type usage;
int wipe_key_flag;/* 1->remove key from storage(alone with clear key) */
/* 0->do not remove from storage (clear key) */
};
struct qseecom_update_key_userinfo_req {
unsigned char current_hash32[QSEECOM_HASH_SIZE];
unsigned char new_hash32[QSEECOM_HASH_SIZE];
enum qseecom_key_management_usage_type usage;
};
#define SHA256_DIGEST_LENGTH (256/8)
/*
* struct qseecom_save_partition_hash_req
* @partition_id - partition id.
* @hash[SHA256_DIGEST_LENGTH] - sha256 digest.
*/
struct qseecom_save_partition_hash_req {
int partition_id; /* in */
char digest[SHA256_DIGEST_LENGTH]; /* in */
};
/*
* struct qseecom_is_es_activated_req
* @is_activated - 1=true , 0=false
*/
struct qseecom_is_es_activated_req {
int is_activated; /* out */
};
/*
* struct qseecom_mdtp_cipher_dip_req
* @in_buf - input buffer
* @in_buf_size - input buffer size
* @out_buf - output buffer
* @out_buf_size - output buffer size
* @direction - 0=encrypt, 1=decrypt
*/
struct qseecom_mdtp_cipher_dip_req {
__u8 *in_buf;
__u32 in_buf_size;
__u8 *out_buf;
__u32 out_buf_size;
__u32 direction;
};
enum qseecom_bandwidth_request_mode {
INACTIVE = 0,
LOW,
MEDIUM,
HIGH,
};
/*
* struct qseecom_send_modfd_resp - for send command ioctl request
* @req_len - command buffer length
* @req_buf - command buffer
* @ifd_data_fd - ion handle to memory allocated in user space
* @cmd_buf_offset - command buffer offset
*/
struct qseecom_send_modfd_listener_resp {
void *resp_buf_ptr; /* in */
unsigned int resp_len; /* in */
struct qseecom_ion_fd_info ifd_data[MAX_ION_FD]; /* in */
};
struct qseecom_qteec_req {
void *req_ptr;
__u32 req_len;
void *resp_ptr;
__u32 resp_len;
};
struct qseecom_qteec_modfd_req {
void *req_ptr;
__u32 req_len;
void *resp_ptr;
__u32 resp_len;
struct qseecom_ion_fd_info ifd_data[MAX_ION_FD];
};
struct qseecom_sg_entry {
__u32 phys_addr;
__u32 len;
};
struct qseecom_sg_entry_64bit {
__u64 phys_addr;
__u32 len;
} __attribute__ ((packed));
/*
* sg list buf format version
* 1: Legacy format to support only 512 SG list entries
* 2: new format to support > 512 entries
*/
#define QSEECOM_SG_LIST_BUF_FORMAT_VERSION_1 1
#define QSEECOM_SG_LIST_BUF_FORMAT_VERSION_2 2
struct qseecom_sg_list_buf_hdr_64bit {
struct qseecom_sg_entry_64bit blank_entry; /* must be all 0 */
__u32 version; /* sg list buf format version */
__u64 new_buf_phys_addr; /* PA of new buffer */
__u32 nents_total; /* Total number of SG entries */
} __attribute__ ((packed));
#define QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT \
sizeof(struct qseecom_sg_list_buf_hdr_64bit)
#define MAX_CE_PIPE_PAIR_PER_UNIT 3
#define INVALID_CE_INFO_UNIT_NUM 0xffffffff
#define CE_PIPE_PAIR_USE_TYPE_FDE 0
#define CE_PIPE_PAIR_USE_TYPE_PFE 1
struct qseecom_ce_pipe_entry {
int valid;
unsigned int ce_num;
unsigned int ce_pipe_pair;
};
struct qseecom_ice_data_t {
int flag;
};
#define MAX_CE_INFO_HANDLE_SIZE 32
struct qseecom_ce_info_req {
unsigned char handle[MAX_CE_INFO_HANDLE_SIZE];
unsigned int usage;
unsigned int unit_num;
unsigned int num_ce_pipe_entries;
struct qseecom_ce_pipe_entry ce_pipe_entry[MAX_CE_PIPE_PAIR_PER_UNIT];
};
struct qseecom_ice_key_data_t {
__u8 key[ICE_KEY_SIZE];
__u32 key_len;
__u8 salt[ICE_SALT_SIZE];
__u32 salt_len;
};
#define SG_ENTRY_SZ sizeof(struct qseecom_sg_entry)
#define SG_ENTRY_SZ_64BIT sizeof(struct qseecom_sg_entry_64bit)
struct file;
#define QSEECOM_IOC_MAGIC 0x97
#define QSEECOM_IOCTL_REGISTER_LISTENER_REQ \
_IOWR(QSEECOM_IOC_MAGIC, 1, struct qseecom_register_listener_req)
#define QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ \
_IO(QSEECOM_IOC_MAGIC, 2)
#define QSEECOM_IOCTL_SEND_CMD_REQ \
_IOWR(QSEECOM_IOC_MAGIC, 3, struct qseecom_send_cmd_req)
#define QSEECOM_IOCTL_SEND_MODFD_CMD_REQ \
_IOWR(QSEECOM_IOC_MAGIC, 4, struct qseecom_send_modfd_cmd_req)
#define QSEECOM_IOCTL_RECEIVE_REQ \
_IO(QSEECOM_IOC_MAGIC, 5)
#define QSEECOM_IOCTL_SEND_RESP_REQ \
_IO(QSEECOM_IOC_MAGIC, 6)
#define QSEECOM_IOCTL_LOAD_APP_REQ \
_IOWR(QSEECOM_IOC_MAGIC, 7, struct qseecom_load_img_req)
#define QSEECOM_IOCTL_SET_MEM_PARAM_REQ \
_IOWR(QSEECOM_IOC_MAGIC, 8, struct qseecom_set_sb_mem_param_req)
#define QSEECOM_IOCTL_UNLOAD_APP_REQ \
_IO(QSEECOM_IOC_MAGIC, 9)
#define QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ \
_IOWR(QSEECOM_IOC_MAGIC, 10, struct qseecom_qseos_version_req)
#define QSEECOM_IOCTL_PERF_ENABLE_REQ \
_IO(QSEECOM_IOC_MAGIC, 11)
#define QSEECOM_IOCTL_PERF_DISABLE_REQ \
_IO(QSEECOM_IOC_MAGIC, 12)
#define QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ \
_IOWR(QSEECOM_IOC_MAGIC, 13, struct qseecom_load_img_req)
#define QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ \
_IO(QSEECOM_IOC_MAGIC, 14)
#define QSEECOM_IOCTL_APP_LOADED_QUERY_REQ \
_IOWR(QSEECOM_IOC_MAGIC, 15, struct qseecom_qseos_app_load_query)
#define QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ \
_IOWR(QSEECOM_IOC_MAGIC, 16, struct qseecom_send_svc_cmd_req)
#define QSEECOM_IOCTL_CREATE_KEY_REQ \
_IOWR(QSEECOM_IOC_MAGIC, 17, struct qseecom_create_key_req)
#define QSEECOM_IOCTL_WIPE_KEY_REQ \
_IOWR(QSEECOM_IOC_MAGIC, 18, struct qseecom_wipe_key_req)
#define QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ \
_IOWR(QSEECOM_IOC_MAGIC, 19, struct qseecom_save_partition_hash_req)
#define QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ \
_IOWR(QSEECOM_IOC_MAGIC, 20, struct qseecom_is_es_activated_req)
#define QSEECOM_IOCTL_SEND_MODFD_RESP \
_IOWR(QSEECOM_IOC_MAGIC, 21, struct qseecom_send_modfd_listener_resp)
#define QSEECOM_IOCTL_SET_BUS_SCALING_REQ \
_IOWR(QSEECOM_IOC_MAGIC, 23, int)
#define QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ \
_IOWR(QSEECOM_IOC_MAGIC, 24, struct qseecom_update_key_userinfo_req)
#define QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ \
_IOWR(QSEECOM_IOC_MAGIC, 30, struct qseecom_qteec_modfd_req)
#define QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ \
_IOWR(QSEECOM_IOC_MAGIC, 31, struct qseecom_qteec_req)
#define QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ \
_IOWR(QSEECOM_IOC_MAGIC, 32, struct qseecom_qteec_modfd_req)
#define QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ \
_IOWR(QSEECOM_IOC_MAGIC, 33, struct qseecom_qteec_modfd_req)
#define QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ \
_IOWR(QSEECOM_IOC_MAGIC, 34, struct qseecom_mdtp_cipher_dip_req)
#define QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ \
_IOWR(QSEECOM_IOC_MAGIC, 35, struct qseecom_send_modfd_cmd_req)
#define QSEECOM_IOCTL_SEND_MODFD_RESP_64 \
_IOWR(QSEECOM_IOC_MAGIC, 36, struct qseecom_send_modfd_listener_resp)
#define QSEECOM_IOCTL_GET_CE_PIPE_INFO \
_IOWR(QSEECOM_IOC_MAGIC, 40, struct qseecom_ce_info_req)
#define QSEECOM_IOCTL_FREE_CE_PIPE_INFO \
_IOWR(QSEECOM_IOC_MAGIC, 41, struct qseecom_ce_info_req)
#define QSEECOM_IOCTL_QUERY_CE_PIPE_INFO \
_IOWR(QSEECOM_IOC_MAGIC, 42, struct qseecom_ce_info_req)
#define QSEECOM_IOCTL_SET_ICE_INFO \
_IOWR(QSEECOM_IOC_MAGIC, 43, struct qseecom_ice_data_t)
#define QSEECOM_IOCTL_FBE_CLEAR_KEY \
_IOWR(QSEECOM_IOC_MAGIC, 44, struct qseecom_ice_key_data_t)
#endif /* _QSEECOM_H_ */

View File

@@ -0,0 +1,28 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _UAPI_LINUX_QTI_VIRTIO_MEM_H
#define _UAPI_LINUX_QTI_VIRTIO_MEM_H
#include <linux/ioctl.h>
#include <linux/types.h>
#define QTI_VIRTIO_MEM_IOC_MAGIC 'M'
#define QTI_VIRTIO_MEM_IOC_MAX_NAME_LEN 128
struct qti_virtio_mem_ioc_hint_create_arg {
char name[QTI_VIRTIO_MEM_IOC_MAX_NAME_LEN];
__s64 size;
__u32 fd;
__u32 reserved0;
__u64 reserved1;
};
#define QTI_VIRTIO_MEM_IOC_HINT_CREATE \
_IOWR(QTI_VIRTIO_MEM_IOC_MAGIC, 0, \
struct qti_virtio_mem_ioc_hint_create_arg)
#endif /* _UAPI_LINUX_QTI_VIRTIO_MEM_H */

View File

@@ -0,0 +1,23 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef LINUX_RANGE_SENSOR_H
#define LINUX_RANGE_SENSOR_H
#if defined(__linux__)
#include <linux/types.h>
#else
#include <stdint.h>
#include <sys/types.h>
#endif
#define NUMBER_OF_ZONE 64
#define NUMBER_OF_TARGET 1
struct range_sensor_data_t {
__u16 depth16[NUMBER_OF_ZONE];
__u16 dmax[NUMBER_OF_ZONE];
__u32 peak_signal[NUMBER_OF_ZONE];
__u8 glass_detection_flag;
};
#endif

View File

@@ -0,0 +1,129 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef LINUX_SLATECOM_INTERFACE_H
#define LINUX_SLATECOM_INTERFACE_H
#include <linux/types.h>
#define SLATECOM_REG_READ 0
#define SLATECOM_AHB_READ 1
#define SLATECOM_AHB_WRITE 2
#define SLATECOM_SET_SPI_FREE 3
#define SLATECOM_SET_SPI_BUSY 4
#define SLATECOM_REG_WRITE 5
#define SLATECOM_MODEM_DOWN2_SLATE 6
#define SLATECOM_ADSP_DOWN2_SLATE 7
#define SLATECOM_SEND_IPC_CMD 8
#define SLATECOM_SEND_BOOT_CMD 9
#define EXCHANGE_CODE 'V'
struct slate_ui_data {
__u64 __user write;
__u64 __user result;
__u32 slate_address;
__u32 cmd;
__u32 num_of_words;
__u8 __user *buffer;
} __attribute__ ((packed));
enum slate_event_type {
SLATE_BEFORE_POWER_DOWN = 1,
SLATE_AFTER_POWER_DOWN,
SLATE_BEFORE_POWER_UP,
SLATE_AFTER_POWER_UP,
MODEM_BEFORE_POWER_DOWN,
MODEM_AFTER_POWER_UP,
ADSP_BEFORE_POWER_DOWN,
ADSP_AFTER_POWER_UP,
TWM_SLATE_AFTER_POWER_UP,
SLATE_DSP_ERROR,
SLATE_DSP_READY,
SLATE_BT_ERROR,
SLATE_BT_READY,
SLATE_SNS_ERROR,
SLATE_SNS_READY,
MODEM_AFTER_POWER_DOWN,
MODEM_BEFORE_POWER_UP,
ADSP_AFTER_POWER_DOWN,
ADSP_BEFORE_POWER_UP,
};
enum device_state_transition {
STATE_TWM_ENTER = 1,
STATE_TWM_EXIT,
STATE_DS_ENTER,
STATE_DS_EXIT,
STATE_S2D_ENTER,
STATE_S2D_EXIT,
};
enum debug_config {
ENABLE_PMIC_RTC,
DISABLE_PMIC_RTC,
ENABLE_QCLI,
DISABLE_QCLI,
};
enum ipc_cmd {
STATE_TRANSITION,
TIME_SYNC,
DEBUG_CONFIG,
GET_VERSION,
};
enum boot_cmd_info {
SOFT_RESET,
TWM_EXIT,
AON_APP_RUNNING,
LOAD,
UNLOAD,
SET_BOOT_MODE,
GET_BOOT_MODE,
CMD_SAVE_AON_DUMP,
BOOT_STATUS,
};
enum boot_status {
SLATE_READY = 1,
SLATE_UPDATE_START,
SLATE_UPDATE_DONE,
SLATE_BOOT_HOST,
SLATE_BOOT_FLASH,
};
#define REG_READ \
_IOWR(EXCHANGE_CODE, SLATECOM_REG_READ, \
struct slate_ui_data)
#define AHB_READ \
_IOWR(EXCHANGE_CODE, SLATECOM_AHB_READ, \
struct slate_ui_data)
#define AHB_WRITE \
_IOW(EXCHANGE_CODE, SLATECOM_AHB_WRITE, \
struct slate_ui_data)
#define SET_SPI_FREE \
_IOR(EXCHANGE_CODE, SLATECOM_SET_SPI_FREE, \
struct slate_ui_data)
#define SET_SPI_BUSY \
_IOR(EXCHANGE_CODE, SLATECOM_SET_SPI_BUSY, \
struct slate_ui_data)
#define REG_WRITE \
_IOWR(EXCHANGE_CODE, SLATECOM_REG_WRITE, \
struct slate_ui_data)
#define SLATE_MODEM_DOWN2_SLATE_DONE \
_IOWR(EXCHANGE_CODE, SLATECOM_MODEM_DOWN2_SLATE, \
struct slate_ui_data)
#define SLATE_ADSP_DOWN2_SLATE_DONE \
_IOWR(EXCHANGE_CODE, SLATECOM_ADSP_DOWN2_SLATE, \
struct slate_ui_data)
#define SEND_IPC_CMD \
_IOWR(EXCHANGE_CODE, SLATECOM_SEND_IPC_CMD, \
struct slate_ui_data)
#define SEND_BOOT_CMD \
_IOWR(EXCHANGE_CODE, SLATECOM_SEND_BOOT_CMD, \
struct slate_ui_data)
#endif /* LINUX_SLATECOM_INTERFACE_H */

View File

@@ -0,0 +1,60 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __UAPI_LINUX_USB_CTRL_QTI_H
#define __UAPI_LINUX_USB_CTRL_QTI_H
#include <linux/types.h>
#include <linux/ioctl.h>
#define MAX_QTI_PKT_SIZE 2048
#define QTI_CTRL_IOCTL_MAGIC 'r'
#define QTI_CTRL_GET_LINE_STATE _IOR(QTI_CTRL_IOCTL_MAGIC, 2, int)
#define QTI_CTRL_EP_LOOKUP _IOR(QTI_CTRL_IOCTL_MAGIC, 3, struct ep_info)
#define QTI_CTRL_MODEM_OFFLINE _IO(QTI_CTRL_IOCTL_MAGIC, 4)
#define QTI_CTRL_MODEM_ONLINE _IO(QTI_CTRL_IOCTL_MAGIC, 5)
#define QTI_CTRL_DATA_BUF_INFO \
_IOR(QTI_CTRL_IOCTL_MAGIC, 6, struct data_buf_info)
#define GSI_MBIM_IOCTL_MAGIC 'o'
#define GSI_MBIM_GET_NTB_SIZE _IOR(GSI_MBIM_IOCTL_MAGIC, 2, __u32)
#define GSI_MBIM_GET_DATAGRAM_COUNT _IOR(GSI_MBIM_IOCTL_MAGIC, 3, __u16)
#define GSI_MBIM_EP_LOOKUP _IOR(GSI_MBIM_IOCTL_MAGIC, 4, struct ep_info)
#define GSI_MBIM_GPS_USB_STATUS _IOR(GSI_MBIM_IOCTL_MAGIC, 5, int)
enum peripheral_ep_type {
DATA_EP_TYPE_RESERVED = 0x0,
DATA_EP_TYPE_HSIC = 0x1,
DATA_EP_TYPE_HSUSB = 0x2,
DATA_EP_TYPE_PCIE = 0x3,
DATA_EP_TYPE_EMBEDDED = 0x4,
DATA_EP_TYPE_BAM_DMUX = 0x5,
};
struct peripheral_ep_info {
enum peripheral_ep_type ep_type;
__u32 peripheral_iface_id;
};
struct ipa_ep_pair {
__u32 cons_pipe_num;
__u32 prod_pipe_num;
};
struct ep_info {
struct peripheral_ep_info ph_ep_info;
struct ipa_ep_pair ipa_ep_pair;
};
struct data_buf_info {
__u32 epout_buf_len;
__u32 epout_total_buf_len;
__u32 epin_buf_len;
__u32 epin_total_buf_len;
};
#endif

View File

@@ -0,0 +1,70 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __QCOM_ADSPSLEEPMON_H__
#define __QCOM_ADSPSLEEPMON_H__
#include <linux/types.h>
/** Device name for ADSP Sleep monitor */
#define ADSPSLEEPMON_DEVICE_NAME "/dev/msm_adsp_sleepmon"
/** IOCTL for intimating audio activity */
#define ADSPSLEEPMON_IOCTL_AUDIO_ACTIVITY _IOWR('R', 1, struct adspsleepmon_ioctl_audio)
/** IOCTL to runtime disable or re-enable panic on ADSP activity anomaly detection */
#define ADSPSLEEPMON_IOCTL_CONFIGURE_PANIC _IOWR('R', 2, struct adspsleepmon_ioctl_panic)
/** Version used in Audio activity IOCTL */
#define ADSPSLEEPMON_IOCTL_AUDIO_VER_1 1
/** Version used in Panic config IOCTL */
#define ADSPSLEEPMON_IOCTL_CONFIG_PANIC_VER_1 1
/** Reserved fields in the Audio activity IOCTL structure */
#define ADSPSLEEPMON_IOCTL_AUDIO_NUM_RES 3
enum adspsleepmon_ioctl_audio_cmd {
ADSPSLEEPMON_AUDIO_ACTIVITY_START = 1,
/**< Activity start of a non-LPI use case */
ADSPSLEEPMON_AUDIO_ACTIVITY_STOP,
/**< Activity stop of a non-LPI use case */
ADSPSLEEPMON_AUDIO_ACTIVITY_LPI_START,
/**< Activity start of a LPI use case */
ADSPSLEEPMON_AUDIO_ACTIVITY_LPI_STOP,
/**< Activity stop of a LPI use case */
ADSPSLEEPMON_AUDIO_ACTIVITY_RESET,
/**< Notify no ongoing activity (reset activity trackers) */
ADSPSLEEPMON_AUDIO_ACTIVITY_MAX,
/**< Max check for Audio ioctl audio command*/
};
enum adspsleepmon_ioctl_panic_cmd {
ADSPSLEEPMON_DISABLE_PANIC_LPM = 1,
/**< Disable panic on detecting ADSP LPM anomaly */
ADSPSLEEPMON_DISABLE_PANIC_LPI,
/**< Disable panic on detecting ADSP LPI anomaly */
ADSPSLEEPMON_RESET_PANIC_LPM,
/**< Reset panic on detecting ADSP LPM anomaly to default */
ADSPSLEEPMON_RESET_PANIC_LPI,
/**< Reset panic on detecting ADSP LPI anomaly to default */
ADSPSLEEPMON_RESET_PANIC_MAX,
/**< Max check for Audio ioctl panic command*/
};
/** @struct adspsleepmon_ioctl_audio
* Structure to be passed in Audio activity IOCTL
*/
struct adspsleepmon_ioctl_audio {
__u32 version;
/**< Version of the interface */
__u32 command;
/**< One of the supported commands from adspsleepmon_ioctl_audio_cmd */
__u32 reserved[ADSPSLEEPMON_IOCTL_AUDIO_NUM_RES];
/**< Reserved fields for future expansion */
};
struct adspsleepmon_ioctl_panic {
__u32 version;
/**< version of the interface */
__u32 command;
/**< One of the supported commands from adspsleepmon_ioctl_panic_cmd */
};
#endif /* __QCOM_ADSPSLEEPMON_H__ */