SecurityExploits/Android/Qualcomm/NPU/npu_shell.h (247 lines of code) (raw):
#ifndef NPU_SPLOIT_H
#define NPU_SPLOIT_H
#include <fcntl.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include <sys/syscall.h>
#define QSEECOM_IOC_MAGIC 0x97
#define QSEECOM_IOCTL_RECEIVE_REQ _IO(QSEECOM_IOC_MAGIC, 5)
enum ion_heap_ids {
INVALID_HEAP_ID = -1,
ION_CP_MM_HEAP_ID = 8,
ION_SECURE_HEAP_ID = 9,
ION_SECURE_DISPLAY_HEAP_ID = 10,
ION_CP_MFC_HEAP_ID = 12,
ION_SPSS_HEAP_ID = 13, /* Secure Processor ION heap */
ION_SECURE_CARVEOUT_HEAP_ID = 14,
ION_CP_WB_HEAP_ID = 16, /* 8660 only */
ION_QSECOM_TA_HEAP_ID = 19, //CMA_heap
ION_CAMERA_HEAP_ID = 20, /* 8660 only */
ION_SYSTEM_CONTIG_HEAP_ID = 21,
ION_ADSP_HEAP_ID = 22, //CMA_heap
ION_PIL1_HEAP_ID = 23, /* Currently used for other PIL images */
ION_SF_HEAP_ID = 24,
ION_SYSTEM_HEAP_ID = 25,
ION_PIL2_HEAP_ID = 26, /* Currently used for modem firmware images cma_heap*/
ION_QSECOM_HEAP_ID = 27, //CMA_heap
ION_AUDIO_HEAP_ID = 28,
ION_MM_FIRMWARE_HEAP_ID = 29,
ION_GOOGLE_HEAP_ID = 30,
ION_HEAP_ID_RESERVED = 31 /** Bit reserved for ION_FLAG_SECURE flag */
};
enum ion_heap_type {
ION_HEAP_TYPE_SYSTEM,
ION_HEAP_TYPE_SYSTEM_CONTIG,
ION_HEAP_TYPE_CARVEOUT,
ION_HEAP_TYPE_CHUNK,
ION_HEAP_TYPE_DMA,
ION_HEAP_TYPE_CUSTOM, /*
* must be last so device specific heaps always
* are at the end of this enum
*/
ION_NUM_HEAPS = 16,
};
#define ION_HEAP_SYSTEM_MASK ((1 << ION_HEAP_TYPE_SYSTEM))
#define ION_HEAP_SYSTEM_CONTIG_MASK ((1 << ION_HEAP_TYPE_SYSTEM_CONTIG))
#define ION_HEAP_CARVEOUT_MASK ((1 << ION_HEAP_TYPE_CARVEOUT))
#define ION_HEAP_TYPE_DMA_MASK ((1 << ION_HEAP_TYPE_DMA))
#define ION_FLAGS_CP_MASK 0x6FFE8000
#define ION_NUM_HEAP_IDS (sizeof(unsigned int) * 8)
#define ION_FLAG_CACHED 1
#define ION_FLAG_CACHED_NEEDS_SYNC 2
struct ion_allocation_data {
size_t len;
unsigned int heap_id_mask;
unsigned int flags;
uint32_t fd;
uint32_t unused;
};
struct ion_custom_data {
unsigned int cmd;
unsigned long arg;
};
#define ION_IOC_MAGIC 'I'
#define ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \
struct ion_allocation_data)
#define ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data)
#define ION_IOC_MAP _IOWR(ION_IOC_MAGIC, 2, struct ion_fd_data)
#define ION_IOC_SHARE _IOWR(ION_IOC_MAGIC, 4, struct ion_fd_data)
#define ION_IOC_IMPORT _IOWR(ION_IOC_MAGIC, 5, struct ion_fd_data)
#define ION_IOC_SYNC _IOWR(ION_IOC_MAGIC, 7, struct ion_fd_data)
#define ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, struct ion_custom_data)
#define ION_BIT(nr) (1UL << (nr))
#define ION_HEAP(bit) ION_BIT(bit)
struct dma_buf_sync {
__u64 flags;
};
#define DMA_BUF_SYNC_READ (1 << 0)
#define DMA_BUF_SYNC_WRITE (2 << 0)
#define DMA_BUF_SYNC_RW (DMA_BUF_SYNC_READ | DMA_BUF_SYNC_WRITE)
#define DMA_BUF_SYNC_START (0 << 2)
#define DMA_BUF_SYNC_END (1 << 2)
#define DMA_BUF_SYNC_VALID_FLAGS_MASK \
(DMA_BUF_SYNC_RW | DMA_BUF_SYNC_END)
#define DMA_BUF_BASE 'b'
#define DMA_BUF_IOCTL_SYNC _IOW(DMA_BUF_BASE, 0, struct dma_buf_sync)
#define MSM_NPU_IOCTL_MAGIC 'n'
struct msm_npu_map_buf_ioctl {
/* buffer ion handle */
int32_t buf_ion_hdl;
/* buffer size */
uint32_t size;
/* iommu mapped physical address */
uint64_t npu_phys_addr;
};
#define MSM_NPU_MAP_BUF _IOWR(MSM_NPU_IOCTL_MAGIC, 2, struct msm_npu_map_buf_ioctl)
struct msm_npu_unmap_buf_ioctl {
/* buffer ion handle */
int32_t buf_ion_hdl;
/* iommu mapped physical address */
uint64_t npu_phys_addr;
};
#define MSM_NPU_UNMAP_BUF _IOWR(MSM_NPU_IOCTL_MAGIC, 3, struct msm_npu_unmap_buf_ioctl)
struct msm_npu_load_network_ioctl {
/* buffer ion handle */
int32_t buf_ion_hdl;
/* physical address */
uint64_t buf_phys_addr;
/* buffer size */
uint32_t buf_size;
/* first block size */
uint32_t first_block_size;
/* reserved */
uint32_t flags;
/* network handle */
uint32_t network_hdl;
/* priority */
uint32_t priority;
/* perf mode */
uint32_t perf_mode;
};
#define MSM_NPU_LOAD_NETWORK _IOWR(MSM_NPU_IOCTL_MAGIC, 4, struct msm_npu_load_network_ioctl)
#define PROP_PARAM_MAX_SIZE 8
struct msm_npu_property {
uint32_t prop_id;
uint32_t num_of_params;
uint32_t network_hdl;
uint32_t prop_param[PROP_PARAM_MAX_SIZE];
};
#define MSM_NPU_SET_PROP _IOW(MSM_NPU_IOCTL_MAGIC, 10, struct msm_npu_property)
#define MSM_NPU_MAX_INPUT_LAYER_NUM 8
#define MSM_NPU_MAX_OUTPUT_LAYER_NUM 4
struct msm_npu_patch_info {
/* chunk id */
uint32_t chunk_id;
/* instruction size in bytes */
uint16_t instruction_size_in_bytes;
/* variable size in bits */
uint16_t variable_size_in_bits;
/* shift value in bits */
uint16_t shift_value_in_bits;
/* location offset */
uint32_t loc_offset;
};
struct msm_npu_layer {
/* layer id */
uint32_t layer_id;
/* patch information*/
struct msm_npu_patch_info patch_info;
/* buffer handle */
int32_t buf_hdl;
/* buffer size */
uint32_t buf_size;
/* physical address */
uint64_t buf_phys_addr;
};
struct msm_npu_exec_network_ioctl {
/* network handle */
uint32_t network_hdl;
/* input layer number */
uint32_t input_layer_num;
/* input layer info */
struct msm_npu_layer input_layers[MSM_NPU_MAX_INPUT_LAYER_NUM];
/* output layer number */
uint32_t output_layer_num;
/* output layer info */
struct msm_npu_layer output_layers[MSM_NPU_MAX_OUTPUT_LAYER_NUM];
/* patching is required */
uint32_t patching_required;
/* asynchronous execution */
uint32_t async;
/* reserved */
uint32_t flags;
};
#define MSM_NPU_EXEC_NETWORK _IOWR(MSM_NPU_IOCTL_MAGIC, 6, struct msm_npu_exec_network_ioctl)
#define MSM_NPU_PROP_ID_START 0x100
#define MSM_NPU_PROP_ID_FW_STATE (MSM_NPU_PROP_ID_START + 0)
struct msm_npu_load_network_ioctl_v2 {
/* physical address */
uint64_t buf_phys_addr;
/* patch info(v2) for all input/output layers */
uint64_t patch_info;
/* buffer ion handle */
int32_t buf_ion_hdl;
/* buffer size */
uint32_t buf_size;
/* first block size */
uint32_t first_block_size;
/* load flags */
uint32_t flags;
/* network handle */
uint32_t network_hdl;
/* priority */
uint32_t priority;
/* perf mode */
uint32_t perf_mode;
/* number of layers in the network */
uint32_t num_layers;
/* number of layers to be patched */
uint32_t patch_info_num;
/* reserved */
uint32_t reserved;
};
struct msm_npu_exec_network_ioctl_v2 {
/* stats buffer to be filled with execution stats */
uint64_t stats_buf_addr;
/* patch buf info for both input and output layers */
uint64_t patch_buf_info;
/* network handle */
uint32_t network_hdl;
/* asynchronous execution */
uint32_t async;
/* execution flags */
uint32_t flags;
/* stats buf size allocated */
uint32_t stats_buf_size;
/* number of layers to be patched */
uint32_t patch_buf_info_num;
/* reserved */
uint32_t reserved;
};
struct msm_npu_unload_network_ioctl {
/* network handle */
uint32_t network_hdl;
};
struct msm_npu_event_execute_done {
uint32_t network_hdl;
int32_t exec_result;
};
struct msm_npu_event_execute_v2_done {
uint32_t network_hdl;
int32_t exec_result;
/* stats buf size filled */
uint32_t stats_buf_size;
};
struct msm_npu_event_ssr {
uint32_t network_hdl;
};
struct msm_npu_event {
uint32_t type;
union {
struct msm_npu_event_execute_done exec_done;
struct msm_npu_event_execute_v2_done exec_v2_done;
struct msm_npu_event_ssr ssr;
uint8_t data[128];
} u;
uint32_t reserved[4];
};
struct msm_npu_patch_buf_info {
/* physical address to be patched */
uint64_t buf_phys_addr;
/* buffer id */
uint32_t buf_id;
};
struct msm_npu_patch_info_v2 {
/* patch value */
uint32_t value;
/* chunk id */
uint32_t chunk_id;
/* instruction size in bytes */
uint32_t instruction_size_in_bytes;
/* variable size in bits */
uint32_t variable_size_in_bits;
/* shift value in bits */
uint32_t shift_value_in_bits;
/* location offset */
uint32_t loc_offset;
};
/* load network v2 */
#define MSM_NPU_LOAD_NETWORK_V2 \
_IOWR(MSM_NPU_IOCTL_MAGIC, 7, struct msm_npu_load_network_ioctl_v2)
/* exec network v2 */
#define MSM_NPU_EXEC_NETWORK_V2 \
_IOWR(MSM_NPU_IOCTL_MAGIC, 8, struct msm_npu_exec_network_ioctl_v2)
#define MSM_NPU_UNLOAD_NETWORK \
_IOWR(MSM_NPU_IOCTL_MAGIC, 5, struct msm_npu_unload_network_ioctl)
#define MSM_NPU_RECEIVE_EVENT \
_IOR(MSM_NPU_IOCTL_MAGIC, 9, struct msm_npu_event)
struct spinlock_t {
uint16_t owner;
uint16_t next;
};
struct list_head {
uint64_t next, prev;
};
struct wait_queue_head {
struct spinlock_t lock;
struct list_head head;
};
typedef struct wait_queue_head wait_queue_head_t;
typedef int (*wait_queue_func_t)(void *wq_entry, unsigned mode, int flags, void *key);
struct wait_queue_entry {
uint32_t flags;
void *private;
wait_queue_func_t func;
struct list_head entry;
};
struct mutex {
uint64_t owner;
struct spinlock_t wait_lock;
struct list_head wait_list;
};
struct npu_client {
uint32_t *npu_dev;
wait_queue_head_t wait;
struct mutex list_lock;
struct list_head evt_list;
struct list_head mapped_buffer_list;
};
#endif