search for: kallsyms_token_t

Displaying 14 results from an estimated 14 matches for "kallsyms_token_t".

2007 Apr 18
1
[PATCH] lguest32 kallsyms backtrace of guest.
...be_init(void); extern void lguest_iret(void); +/* For back tracing from the HV */ +extern const unsigned long kallsyms_addresses[] __attribute__((weak)); +extern const unsigned long kallsyms_num_syms __attribute__((weak)); +extern const u8 kallsyms_names[] __attribute__((weak)); +extern const u8 kallsyms_token_table[] __attribute__((weak)); +extern const u16 kallsyms_token_index[] __attribute__((weak)); +extern const unsigned long kallsyms_markers[] __attribute__((weak)); + struct lguest_data lguest_data = { .hcall_status = { [0 ... LHCALL_RING_SIZE-1] = 0xFF }, .noirq_start = (u32)lguest_noirq_start,...
2007 Apr 18
1
[PATCH] lguest32 kallsyms backtrace of guest.
...be_init(void); extern void lguest_iret(void); +/* For back tracing from the HV */ +extern const unsigned long kallsyms_addresses[] __attribute__((weak)); +extern const unsigned long kallsyms_num_syms __attribute__((weak)); +extern const u8 kallsyms_names[] __attribute__((weak)); +extern const u8 kallsyms_token_table[] __attribute__((weak)); +extern const u16 kallsyms_token_index[] __attribute__((weak)); +extern const unsigned long kallsyms_markers[] __attribute__((weak)); + struct lguest_data lguest_data = { .hcall_status = { [0 ... LHCALL_RING_SIZE-1] = 0xFF }, .noirq_start = (u32)lguest_noirq_start,...
2017 Oct 11
0
[PATCH v1 15/27] compiler: Option to default to hidden symbols
...".rodata"))); +__attribute__((weak, section(".rodata"))) __default_visibility; extern const unsigned long kallsyms_relative_base -__attribute__((weak, section(".rodata"))); +__attribute__((weak, section(".rodata"))) __default_visibility; -extern const u8 kallsyms_token_table[] __weak; -extern const u16 kallsyms_token_index[] __weak; +extern const u8 kallsyms_token_table[] __weak __default_visibility; +extern const u16 kallsyms_token_index[] __weak __default_visibility; -extern const unsigned long kallsyms_markers[] __weak; +extern const unsigned long kallsyms_mar...
2007 Apr 18
0
[RFC/PATCH LGUEST X86_64 04/13] Useful debugging
...addresses = (unsigned long*)lhread_u64(vcpu, (u64)&data->kallsyms_addresses); + kstuff.num_syms = lhread_u64(vcpu, (u64)&data->kallsyms_num_syms); + kstuff.names = (u8*)lhread_u64(vcpu, (u64)&data->kallsyms_names); + kstuff.token_table = (u8*)lhread_u64(vcpu, (u64)&data->kallsyms_token_table); + kstuff.token_index = (u16*)lhread_u64(vcpu, (u64)&data->kallsyms_token_index); + kstuff.markers = (unsigned long*)lhread_u64(vcpu, (u64)&data->kallsyms_markers); + + if (!kstuff.addresses || !kstuff.num_syms || !kstuff.names || + !kstuff.token_table || !kstuff.token_index...
2007 Apr 18
0
[RFC/PATCH LGUEST X86_64 04/13] Useful debugging
...addresses = (unsigned long*)lhread_u64(vcpu, (u64)&data->kallsyms_addresses); + kstuff.num_syms = lhread_u64(vcpu, (u64)&data->kallsyms_num_syms); + kstuff.names = (u8*)lhread_u64(vcpu, (u64)&data->kallsyms_names); + kstuff.token_table = (u8*)lhread_u64(vcpu, (u64)&data->kallsyms_token_table); + kstuff.token_index = (u16*)lhread_u64(vcpu, (u64)&data->kallsyms_token_index); + kstuff.markers = (unsigned long*)lhread_u64(vcpu, (u64)&data->kallsyms_markers); + + if (!kstuff.addresses || !kstuff.num_syms || !kstuff.names || + !kstuff.token_table || !kstuff.token_index...
2007 Apr 18
1
[RFC/PATCH LGUEST X86_64 03/13] lguest64 core
...uct lguest_text_ptr code_stack[2]; +extern int acpi_disabled; +extern int acpi_ht; + +extern const unsigned long kallsyms_addresses[] __attribute__((weak)); +extern const unsigned long kallsyms_num_syms __attribute__((weak)); +extern const u8 kallsyms_names[] __attribute__((weak)); +extern const u8 kallsyms_token_table[] __attribute__((weak)); +extern const u16 kallsyms_token_index[] __attribute__((weak)); +extern const unsigned long kallsyms_markers[] __attribute__((weak)); + +static DEFINE_SPINLOCK(hcall_print_lock); +#define HCALL_BUFF_SIZ 1024 +static char hcall_buff[HCALL_BUFF_SIZ]; + +/* Set to true whe...
2007 Apr 18
1
[RFC/PATCH LGUEST X86_64 03/13] lguest64 core
...uct lguest_text_ptr code_stack[2]; +extern int acpi_disabled; +extern int acpi_ht; + +extern const unsigned long kallsyms_addresses[] __attribute__((weak)); +extern const unsigned long kallsyms_num_syms __attribute__((weak)); +extern const u8 kallsyms_names[] __attribute__((weak)); +extern const u8 kallsyms_token_table[] __attribute__((weak)); +extern const u16 kallsyms_token_index[] __attribute__((weak)); +extern const unsigned long kallsyms_markers[] __attribute__((weak)); + +static DEFINE_SPINLOCK(hcall_print_lock); +#define HCALL_BUFF_SIZ 1024 +static char hcall_buff[HCALL_BUFF_SIZ]; + +/* Set to true whe...
2018 Mar 13
32
[PATCH v2 00/27] x86: PIE support and option to extend KASLR randomization
Changes: - patch v2: - Adapt patch to work post KPTI and compiler changes - Redo all performance testing with latest configs and compilers - Simplify mov macro on PIE (MOVABS now) - Reduce GOT footprint - patch v1: - Simplify ftrace implementation. - Use gcc mstack-protector-guard-reg=%gs with PIE when possible. - rfc v3: - Use --emit-relocs instead of -pie to reduce
2018 Mar 13
32
[PATCH v2 00/27] x86: PIE support and option to extend KASLR randomization
Changes: - patch v2: - Adapt patch to work post KPTI and compiler changes - Redo all performance testing with latest configs and compilers - Simplify mov macro on PIE (MOVABS now) - Reduce GOT footprint - patch v1: - Simplify ftrace implementation. - Use gcc mstack-protector-guard-reg=%gs with PIE when possible. - rfc v3: - Use --emit-relocs instead of -pie to reduce
2017 Oct 04
28
x86: PIE support and option to extend KASLR randomization
These patches make the changes necessary to build the kernel as Position Independent Executable (PIE) on x86_64. A PIE kernel can be relocated below the top 2G of the virtual address space. It allows to optionally extend the KASLR randomization range from 1G to 3G. Thanks a lot to Ard Biesheuvel & Kees Cook on their feedback on compiler changes, PIE support and KASLR in general. Thanks to
2017 Oct 04
28
x86: PIE support and option to extend KASLR randomization
These patches make the changes necessary to build the kernel as Position Independent Executable (PIE) on x86_64. A PIE kernel can be relocated below the top 2G of the virtual address space. It allows to optionally extend the KASLR randomization range from 1G to 3G. Thanks a lot to Ard Biesheuvel & Kees Cook on their feedback on compiler changes, PIE support and KASLR in general. Thanks to
2018 May 23
33
[PATCH v3 00/27] x86: PIE support and option to extend KASLR randomization
Changes: - patch v3: - Update on message to describe longer term PIE goal. - Minor change on ftrace if condition. - Changed code using xchgq. - patch v2: - Adapt patch to work post KPTI and compiler changes - Redo all performance testing with latest configs and compilers - Simplify mov macro on PIE (MOVABS now) - Reduce GOT footprint - patch v1: - Simplify ftrace
2017 Oct 11
32
[PATCH v1 00/27] x86: PIE support and option to extend KASLR randomization
Changes: - patch v1: - Simplify ftrace implementation. - Use gcc mstack-protector-guard-reg=%gs with PIE when possible. - rfc v3: - Use --emit-relocs instead of -pie to reduce dynamic relocation space on mapped memory. It also simplifies the relocation process. - Move the start the module section next to the kernel. Remove the need for -mcmodel=large on modules. Extends
2017 Oct 11
32
[PATCH v1 00/27] x86: PIE support and option to extend KASLR randomization
Changes: - patch v1: - Simplify ftrace implementation. - Use gcc mstack-protector-guard-reg=%gs with PIE when possible. - rfc v3: - Use --emit-relocs instead of -pie to reduce dynamic relocation space on mapped memory. It also simplifies the relocation process. - Move the start the module section next to the kernel. Remove the need for -mcmodel=large on modules. Extends