Subject: vserver + grsec
From: Adam Osuchowski <Adam.Osuchowski@polsl.pl>
Date: Wed, 9 Jul 2014 12:55:54 +0200
Wed, 9 Jul 2014 12:55:54 +0200
Hi,

if you are interested in vserver+grsec combo patch, in attachment there
is my own one against 3.13.11 with the newest vserver (2.3.6.11) and
appropriate grsec (3.0-201404182111).

Regards.


diff -ruNp linux-3.13.11/Documentation/dontdiff linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/Documentation/dontdiff
--- linux-3.13.11/Documentation/dontdiff	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/Documentation/dontdiff	2014-07-09
12:00:15.000000000 +0200
@@ -2,9 +2,11 @@
 *.aux
 *.bin
 *.bz2
+*.c.[012]*.*
 *.cis
 *.cpio
 *.csp
+*.dbg
 *.dsp
 *.dvi
 *.elf
@@ -14,6 +16,7 @@
 *.gcov
 *.gen.S
 *.gif
+*.gmo
 *.grep
 *.grp
 *.gz
@@ -48,14 +51,17 @@
 *.tab.h
 *.tex
 *.ver
+*.vim
 *.xml
 *.xz
 *_MODULES
+*_reg_safe.h
 *_vga16.c
 *~
 \#*#
 *.9
-.*
+.[^g]*
+.gen*
 .*.d
 .mm
 53c700_d.h
@@ -69,9 +75,11 @@ Image
 Module.markers
 Module.symvers
 PENDING
+PERF*
 SCCS
 System.map*
 TAGS
+TRACEEVENT-CFLAGS
 aconf
 af_names.h
 aic7*reg.h*
@@ -80,6 +88,7 @@ aic7*seq.h*
 aicasm
 aicdb.h*
 altivec*.c
+ashldi3.S
 asm-offsets.h
 asm_offsets.h
 autoconf.h*
@@ -92,32 +101,40 @@ bounds.h
 bsetup
 btfixupprep
 build
+builtin-policy.h
 bvmlinux
 bzImage*
 capability_names.h
 capflags.c
 classlist.h*
+clut_vga16.c
+common-cmds.h
 comp*.log
 compile.h*
 conf
 config
 config-*
 config_data.h*
+config.c
 config.mak
 config.mak.autogen
+config.tmp
 conmakehash
 consolemap_deftbl.c*
 cpustr.h
 crc32table.h*
 cscope.*
 defkeymap.c
+devicetable-offsets.h
 devlist.h*
 dnotify_test
 docproc
 dslm
+dtc-lexer.lex.c
 elf2ecoff
 elfconfig.h*
 evergreen_reg_safe.h
+exception_policy.conf
 fixdep
 flask.h
 fore200e_mkfirm
@@ -125,12 +142,15 @@ fore200e_pca_fw.c*
 gconf
 gconf.glade.h
 gen-devlist
+gen-kdb_cmds.c
 gen_crc32table
 gen_init_cpio
 generated
 genheaders
 genksyms
 *_gray256.c
+hash
+hid-example
 hpet_example
 hugepage-mmap
 hugepage-shm
@@ -145,14 +165,14 @@ int32.c
 int4.c
 int8.c
 kallsyms
-kconfig
+kern_constants.h
 keywords.c
 ksym.c*
 ksym.h*
 kxgettext
 lex.c
 lex.*.c
-linux
+lib1funcs.S
 logo_*.c
 logo_*_clut224.c
 logo_*_mono.c
@@ -162,14 +182,15 @@ mach-types.h
 machtypes.h
 map
 map_hugetlb
-media
 mconf
+mdp
 miboot*
 mk_elfconfig
 mkboot
 mkbugboot
 mkcpustr
 mkdep
+mkpiggy
 mkprep
 mkregtable
 mktables
@@ -185,6 +206,8 @@ oui.c*
 page-types
 parse.c
 parse.h
+parse-events*
+pasyms.h
 patches*
 pca200e.bin
 pca200e_ecd.bin2
@@ -194,6 +217,7 @@ perf-archive
 piggyback
 piggy.gzip
 piggy.S
+pmu-*
 pnmtologo
 ppc_defs.h*
 pss_boot.h
@@ -203,7 +227,12 @@ r200_reg_safe.h
 r300_reg_safe.h
 r420_reg_safe.h
 r600_reg_safe.h
+randomize_layout_hash.h
+randomize_layout_seed.h
+realmode.lds
+realmode.relocs
 recordmcount
+regdb.c
 relocs
 rlim_names.h
 rn50_reg_safe.h
@@ -213,8 +242,12 @@ series
 setup
 setup.bin
 setup.elf
+signing_key*
+size_overflow_hash.h
 sImage
+slabinfo
 sm_tbl*
+sortextable
 split-include
 syscalltab.h
 tables.c
@@ -224,6 +257,7 @@ tftpboot.img
 timeconst.h
 times.h*
 trix_boot.h
+user_constants.h
 utsrelease.h*
 vdso-syms.lds
 vdso.lds
@@ -235,13 +269,17 @@ vdso32.lds
 vdso32.so.dbg
 vdso64.lds
 vdso64.so.dbg
+vdsox32.lds
+vdsox32-syms.lds
 version.h*
 vmImage
 vmlinux
 vmlinux-*
 vmlinux.aout
 vmlinux.bin.all
+vmlinux.bin.bz2
 vmlinux.lds
+vmlinux.relocs
 vmlinuz
 voffset.h
 vsyscall.lds
@@ -249,9 +287,12 @@ vsyscall_32.lds
 wanxlfw.inc
 uImage
 unifdef
+utsrelease.h
 wakeup.bin
 wakeup.elf
 wakeup.lds
+x509*
 zImage*
 zconf.hash.c
+zconf.lex.c
 zoffset.h
diff -ruNp linux-3.13.11/Documentation/kernel-parameters.txt linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/Documentation/kernel-parameters.txt
--- linux-3.13.11/Documentation/kernel-parameters.txt	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/Documentation/kernel-parameters.txt	2014-07-09
12:00:15.000000000 +0200
@@ -1033,6 +1033,10 @@ bytes respectively. Such letter suffixes
 			Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
 			Default: 1024
 
+	grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
+			ignore grsecurity's /proc restrictions
+
+
 	hashdist=	[KNL,NUMA] Large hashes allocated during boot
 			are distributed across NUMA nodes.  Defaults on
 			for 64-bit NUMA, off otherwise.
@@ -2018,6 +2022,10 @@ bytes respectively. Such letter suffixes
 			noexec=on: enable non-executable mappings (default)
 			noexec=off: disable non-executable mappings
 
+	nopcid		[X86-64]
+			Disable PCID (Process-Context IDentifier) even if it
+			is supported by the processor.
+
 	nosmap		[X86]
 			Disable SMAP (Supervisor Mode Access Prevention)
 			even if it is supported by processor.
@@ -2285,6 +2293,25 @@ bytes respectively. Such letter suffixes
 			the specified number of seconds.  This is to be used if
 			your oopses keep scrolling off the screen.
 
+	pax_nouderef	[X86] disables UDEREF.  Most likely needed under certain
+			virtualization environments that don't cope well with the
+			expand down segment used by UDEREF on X86-32 or the frequent
+			page table updates on X86-64.
+
+	pax_sanitize_slab=
+			0/1 to disable/enable slab object sanitization (enabled by
+			default).
+
+	pax_softmode=	0/1 to disable/enable PaX softmode on boot already.
+
+	pax_extra_latent_entropy
+			Enable a very simple form of latent entropy extraction
+			from the first 4GB of memory as the bootmem allocator
+			passes the memory pages to the buddy allocator.
+
+	pax_weakuderef	[X86-64] enables the weaker but faster form of UDEREF
+			when the processor supports PCID.
+
 	pcbit=		[HW,ISDN]
 
 	pcd.		[PARIDE]
diff -ruNp linux-3.13.11/Documentation/vserver/debug.txt linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/Documentation/vserver/debug.txt
--- linux-3.13.11/Documentation/vserver/debug.txt	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/Documentation/vserver/debug.txt	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,154 @@
+
+debug_cvirt:
+
+ 2   4	"vx_map_tgid: %p/%llx: %d -> %d"
+	"vx_rmap_tgid: %p/%llx: %d -> %d"
+
+debug_dlim:
+
+ 0   1	"ALLOC (%p,#%d)%c inode (%d)"
+	"FREE  (%p,#%d)%c inode"
+ 1   2	"ALLOC (%p,#%d)%c %lld bytes (%d)"
+	"FREE  (%p,#%d)%c %lld bytes"
+ 2   4	"ADJUST: %lld,%lld on %ld,%ld [mult=%d]"
+ 3   8	"ext3_has_free_blocks(%p): %lu<%lu+1, %c, %u!=%u r=%d"
+	"ext3_has_free_blocks(%p): free=%lu, root=%lu"
+	"rcu_free_dl_info(%p)"
+ 4  10	"alloc_dl_info(%p,%d) = %p"
+	"dealloc_dl_info(%p)"
+	"get_dl_info(%p[#%d.%d])"
+	"put_dl_info(%p[#%d.%d])"
+ 5  20	"alloc_dl_info(%p,%d)*"
+ 6  40	"__hash_dl_info: %p[#%d]"
+	"__unhash_dl_info: %p[#%d]"
+ 7  80	"locate_dl_info(%p,#%d) = %p"
+
+debug_misc:
+
+ 0   1	"destroy_dqhash: %p [#0x%08x] c=%d"
+	"new_dqhash: %p [#0x%08x]"
+	"vroot[%d]_clr_dev: dev=%p[%lu,%d:%d]"
+	"vroot[%d]_get_real_bdev: dev=%p[%lu,%d:%d]"
+	"vroot[%d]_set_dev: dev=%p[%lu,%d:%d]"
+	"vroot_get_real_bdev not set"
+ 1   2	"cow_break_link(»%s«)"
+	"temp copy »%s«"
+ 2   4	"dentry_open(new): %p"
+	"dentry_open(old): %p"
+	"lookup_create(new): %p"
+	"old path »%s«"
+	"path_lookup(old): %d"
+	"vfs_create(new): %d"
+	"vfs_rename: %d"
+	"vfs_sendfile: %d"
+ 3   8	"fput(new_file=%p[#%d])"
+	"fput(old_file=%p[#%d])"
+ 4  10	"vx_info_kill(%p[#%d],%d,%d) = %d"
+	"vx_info_kill(%p[#%d],%d,%d)*"
+ 5  20	"vs_reboot(%p[#%d],%d)"
+ 6  40	"dropping task %p[#%u,%u] for %p[#%u,%u]"
+
+debug_net:
+
+ 2   4	"nx_addr_conflict(%p,%p) %d.%d,%d.%d"
+ 3   8	"inet_bind(%p) %d.%d.%d.%d, %d.%d.%d.%d, %d.%d.%d.%d"
+	"inet_bind(%p)* %p,%p;%lx %d.%d.%d.%d"
+ 4  10	"ip_route_connect(%p) %p,%p;%lx"
+ 5  20	"__addr_in_socket(%p,%d.%d.%d.%d) %p:%d.%d.%d.%d %p;%lx"
+ 6  40	"sk,egf: %p [#%d] (from %d)"
+	"sk,egn: %p [#%d] (from %d)"
+	"sk,req: %p [#%d] (from %d)"
+	"sk: %p [#%d] (from %d)"
+	"tw: %p [#%d] (from %d)"
+ 7  80	"__sock_recvmsg: %p[%p,%p,%p;%d]:%d/%d"
+	"__sock_sendmsg: %p[%p,%p,%p;%d]:%d/%d"
+
+debug_nid:
+
+ 0   1	"__lookup_nx_info(#%u): %p[#%u]"
+	"alloc_nx_info(%d) = %p"
+	"create_nx_info(%d) (dynamic rejected)"
+	"create_nx_info(%d) = %p (already there)"
+	"create_nx_info(%d) = %p (new)"
+	"dealloc_nx_info(%p)"
+ 1   2	"alloc_nx_info(%d)*"
+	"create_nx_info(%d)*"
+ 2   4	"get_nx_info(%p[#%d.%d])"
+	"put_nx_info(%p[#%d.%d])"
+ 3   8	"claim_nx_info(%p[#%d.%d.%d]) %p"
+	"clr_nx_info(%p[#%d.%d])"
+	"init_nx_info(%p[#%d.%d])"
+	"release_nx_info(%p[#%d.%d.%d]) %p"
+	"set_nx_info(%p[#%d.%d])"
+ 4  10	"__hash_nx_info: %p[#%d]"
+	"__nx_dynamic_id: [#%d]"
+	"__unhash_nx_info: %p[#%d.%d.%d]"
+ 5  20	"moved task %p into nxi:%p[#%d]"
+	"nx_migrate_task(%p,%p[#%d.%d.%d])"
+	"task_get_nx_info(%p)"
+ 6  40	"nx_clear_persistent(%p[#%d])"
+
+debug_quota:
+
+ 0   1	"quota_sync_dqh(%p,%d) discard inode %p"
+ 1   2	"quota_sync_dqh(%p,%d)"
+	"sync_dquots(%p,%d)"
+	"sync_dquots_dqh(%p,%d)"
+ 3   8	"do_quotactl(%p,%d,cmd=%d,id=%d,%p)"
+
+debug_switch:
+
+ 0   1	"vc: VCMD_%02d_%d[%d], %d,%p [%d,%d,%x,%x]"
+ 1   2	"vc: VCMD_%02d_%d[%d] = %08lx(%ld) [%d,%d]"
+ 4  10	"%s: (%s %s) returned %s with %d"
+
+debug_tag:
+
+ 7  80	"dx_parse_tag(»%s«): %d:#%d"
+	"dx_propagate_tag(%p[#%lu.%d]): %d,%d"
+
+debug_xid:
+
+ 0   1	"__lookup_vx_info(#%u): %p[#%u]"
+	"alloc_vx_info(%d) = %p"
+	"alloc_vx_info(%d)*"
+	"create_vx_info(%d) (dynamic rejected)"
+	"create_vx_info(%d) = %p (already there)"
+	"create_vx_info(%d) = %p (new)"
+	"dealloc_vx_info(%p)"
+	"loc_vx_info(%d) = %p (found)"
+	"loc_vx_info(%d) = %p (new)"
+	"loc_vx_info(%d) = %p (not available)"
+ 1   2	"create_vx_info(%d)*"
+	"loc_vx_info(%d)*"
+ 2   4	"get_vx_info(%p[#%d.%d])"
+	"put_vx_info(%p[#%d.%d])"
+ 3   8	"claim_vx_info(%p[#%d.%d.%d]) %p"
+	"clr_vx_info(%p[#%d.%d])"
+	"init_vx_info(%p[#%d.%d])"
+	"release_vx_info(%p[#%d.%d.%d]) %p"
+	"set_vx_info(%p[#%d.%d])"
+ 4  10	"__hash_vx_info: %p[#%d]"
+	"__unhash_vx_info: %p[#%d.%d.%d]"
+	"__vx_dynamic_id: [#%d]"
+ 5  20	"enter_vx_info(%p[#%d],%p) %p[#%d,%p]"
+	"leave_vx_info(%p[#%d,%p]) %p[#%d,%p]"
+	"moved task %p into vxi:%p[#%d]"
+	"task_get_vx_info(%p)"
+	"vx_migrate_task(%p,%p[#%d.%d])"
+ 6  40	"vx_clear_persistent(%p[#%d])"
+	"vx_exit_init(%p[#%d],%p[#%d,%d,%d])"
+	"vx_set_init(%p[#%d],%p[#%d,%d,%d])"
+	"vx_set_persistent(%p[#%d])"
+	"vx_set_reaper(%p[#%d],%p[#%d,%d])"
+ 7  80	"vx_child_reaper(%p[#%u,%u]) = %p[#%u,%u]"
+
+
+debug_limit:
+
+ n 2^n	"vx_acc_cres[%5d,%s,%2d]: %5d%s"
+	"vx_cres_avail[%5d,%s,%2d]: %5ld > %5d + %5d"
+
+ m 2^m	"vx_acc_page[%5d,%s,%2d]: %5d%s"
+	"vx_acc_pages[%5d,%s,%2d]: %5d += %5d"
+	"vx_pages_avail[%5d,%s,%2d]: %5ld > %5d + %5d"
diff -ruNp linux-3.13.11/Makefile linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/Makefile
--- linux-3.13.11/Makefile	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/Makefile	2014-07-09 12:00:31.000000000
+0200
@@ -244,8 +244,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH"
 
 HOSTCC       = gcc
 HOSTCXX      = g++
-HOSTCFLAGS   = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
-HOSTCXXFLAGS = -O2
+HOSTCFLAGS   = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter
-Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
+HOSTCFLAGS  += $(call cc-option, -Wno-empty-body)
+HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
 
 # Decide whether to build built-in, modular, or both.
 # Normally, just do built-in.
@@ -311,9 +312,15 @@ endif
 # If the user is running make -s (silent mode), suppress echoing of
 # commands
 
+ifneq ($(filter 4.%,$(MAKE_VERSION)),)	# make-4
+ifneq ($(filter %s ,$(firstword x$(MAKEFLAGS))),)
+ quiet=silent_
+endif
+else					# make-3.8x
 ifneq ($(filter s% -s%,$(MAKEFLAGS)),)
   quiet=silent_
 endif
+endif
 
 export quiet Q KBUILD_VERBOSE
 
@@ -417,8 +424,8 @@ export RCS_TAR_IGNORE := --exclude SCCS
 # Rules shared between *config targets and build targets
 
 # Basic helpers built in scripts/
-PHONY += scripts_basic
-scripts_basic:
+PHONY += scripts_basic gcc-plugins
+scripts_basic: gcc-plugins
 	$(Q)$(MAKE) $(build)=scripts/basic
 	$(Q)rm -f .tmp_quiet_recordmcount
 
@@ -579,6 +586,72 @@ else
 KBUILD_CFLAGS	+= -O2
 endif
 
+ifndef DISABLE_PAX_PLUGINS
+ifeq ($(call cc-ifversion, -ge, 0408, y), y)
+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)"
"$(CC)")
+else
+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)"
"$(CC)")
+endif
+ifneq ($(PLUGINCC),)
+ifdef CONFIG_PAX_CONSTIFY_PLUGIN
+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
+endif
+ifdef CONFIG_PAX_MEMORY_STACKLEAK
+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
+endif
+ifdef CONFIG_KALLOCSTAT_PLUGIN
+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
+endif
+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD)
-DKERNEXEC_PLUGIN
+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
+endif
+ifdef CONFIG_GRKERNSEC_RANDSTRUCT
+RANDSTRUCT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/randomize_layout_plugin.so
-DRANDSTRUCT_PLUGIN
+ifdef CONFIG_GRKERNSEC_RANDSTRUCT_PERFORMANCE
+RANDSTRUCT_PLUGIN_CFLAGS += -fplugin-arg-randomize_layout_plugin-performance-mode
+endif
+endif
+ifdef CONFIG_CHECKER_PLUGIN
+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
+endif
+endif
+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
+ifdef CONFIG_PAX_SIZE_OVERFLOW
+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so
-DSIZE_OVERFLOW_PLUGIN
+endif
+ifdef CONFIG_PAX_LATENT_ENTROPY
+LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so
-DLATENT_ENTROPY_PLUGIN
+endif
+ifdef CONFIG_PAX_MEMORY_STRUCTLEAK
+STRUCTLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/structleak_plugin.so -DSTRUCTLEAK_PLUGIN
+endif
+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
+GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS)
$(STRUCTLEAK_PLUGIN_CFLAGS)
+GCC_PLUGINS_CFLAGS += $(RANDSTRUCT_PLUGIN_CFLAGS)
+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
+export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN LATENT_ENTROPY_PLUGIN_CFLAGS
+ifeq ($(KBUILD_EXTMOD),)
+gcc-plugins:
+	$(Q)$(MAKE) $(build)=tools/gcc
+else
+gcc-plugins: ;
+endif
+else
+gcc-plugins:
+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
+	$(error Your gcc installation does not support plugins.  If the necessary headers
for plugin support are missing, they should be installed.  On Debian, apt-get install
gcc-<ver>-plugin-dev.  If you choose to ignore this error and lessen the improvements
provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
+else
+	$(Q)echo "warning, your gcc version does not support plugins, you should upgrade it
to gcc 4.5 at least"
+endif
+	$(Q)echo "PAX_MEMORY_STACKLEAK, constification, PAX_LATENT_ENTROPY and other features
will be less secure.  PAX_SIZE_OVERFLOW will not be active."
+endif
+endif
+
 include $(srctree)/arch/$(SRCARCH)/Makefile
 
 ifdef CONFIG_READABLE_ASM
@@ -619,7 +692,7 @@ endif
 
 ifdef CONFIG_DEBUG_INFO
 KBUILD_CFLAGS	+= -g
-KBUILD_AFLAGS	+= -gdwarf-2
+KBUILD_AFLAGS	+= -Wa,--gdwarf-2
 endif
 
 ifdef CONFIG_DEBUG_INFO_REDUCED
@@ -754,7 +827,7 @@ export mod_sign_cmd
 
 
 ifeq ($(KBUILD_EXTMOD),)
-core-y		+= kernel/ mm/ fs/ ipc/ security/ crypto/ block/
+core-y		+= kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
 
 vmlinux-dirs	:= $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
 		     $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
@@ -803,6 +876,8 @@ endif
 
 # The actual objects are generated when descending, 
 # make sure no implicit rule kicks in
+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
 
 # Handle descending into subdirectories listed in $(vmlinux-dirs)
@@ -812,7 +887,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs)
 # Error messages still appears in the original language
 
 PHONY += $(vmlinux-dirs)
-$(vmlinux-dirs): prepare scripts
+$(vmlinux-dirs): gcc-plugins prepare scripts
 	$(Q)$(MAKE) $(build)=$@
 
 define filechk_kernel.release
@@ -855,10 +930,13 @@ prepare1: prepare2 $(version_h) include/
 
 archprepare: archheaders archscripts prepare1 scripts_basic
 
+prepare0: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
+prepare0: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
 prepare0: archprepare FORCE
 	$(Q)$(MAKE) $(build)=.
 
 # All the preparing..
+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
 prepare: prepare0
 
 # Generate some files
@@ -966,6 +1044,8 @@ all: modules
 #	using awk while concatenating to the final file.
 
 PHONY += modules
+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
 	$(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
 	@$(kecho) '  Building modules, stage 2.';
@@ -981,7 +1061,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modu
 
 # Target to prepare building external modules
 PHONY += modules_prepare
-modules_prepare: prepare scripts
+modules_prepare: gcc-plugins prepare scripts
 
 # Target to install modules
 PHONY += modules_install
@@ -1047,7 +1127,8 @@ MRPROPER_FILES += .config .config.old .v
 		  Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
 		  signing_key.priv signing_key.x509 x509.genkey		\
 		  extra_certificates signing_key.x509.keyid		\
-		  signing_key.x509.signer
+		  signing_key.x509.signer tools/gcc/size_overflow_hash.h \
+		  tools/gcc/randomize_layout_seed.h
 
 # clean - Delete most, but leave enough to build external modules
 #
@@ -1087,6 +1168,7 @@ distclean: mrproper
 		\( -name '*.orig' -o -name '*.rej' -o -name '*~' \
 		-o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
 		-o -name '.*.rej' \
+		-o -name '.*.rej' -o -name '*.so' \
 		-o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
 		-type f -print | xargs rm -f
 
@@ -1248,6 +1330,8 @@ PHONY += $(module-dirs) modules
 $(module-dirs): crmodverdir $(objtree)/Module.symvers
 	$(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
 
+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
 modules: $(module-dirs)
 	@$(kecho) '  Building modules, stage 2.';
 	$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
@@ -1387,17 +1471,21 @@ else
         target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
 endif
 
-%.s: %.c prepare scripts FORCE
+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
+%.s: %.c gcc-plugins prepare scripts FORCE
 	$(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
 %.i: %.c prepare scripts FORCE
 	$(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
-%.o: %.c prepare scripts FORCE
+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
+%.o: %.c gcc-plugins prepare scripts FORCE
 	$(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
 %.lst: %.c prepare scripts FORCE
 	$(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
-%.s: %.S prepare scripts FORCE
+%.s: %.S gcc-plugins prepare scripts FORCE
 	$(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
-%.o: %.S prepare scripts FORCE
+%.o: %.S gcc-plugins prepare scripts FORCE
 	$(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
 %.symtypes: %.c prepare scripts FORCE
 	$(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
@@ -1407,11 +1495,15 @@ endif
 	$(cmd_crmodverdir)
 	$(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
 	$(build)=$(build-dir)
-%/: prepare scripts FORCE
+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
+%/: gcc-plugins prepare scripts FORCE
 	$(cmd_crmodverdir)
 	$(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
 	$(build)=$(build-dir)
-%.ko: prepare scripts FORCE
+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
+%.ko: gcc-plugins prepare scripts FORCE
 	$(cmd_crmodverdir)
 	$(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1)   \
 	$(build)=$(build-dir) $(@:.ko=.o)
diff -ruNp linux-3.13.11/arch/alpha/Kconfig linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/alpha/Kconfig
--- linux-3.13.11/arch/alpha/Kconfig	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/alpha/Kconfig	2014-07-09 12:00:15.000000000
+0200
@@ -735,6 +735,8 @@ config DUMMY_CONSOLE
 	depends on VGA_HOSE
 	default y
 
+source "kernel/vserver/Kconfig"
+
 source "security/Kconfig"
 
 source "crypto/Kconfig"
diff -ruNp linux-3.13.11/arch/alpha/include/asm/atomic.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/alpha/include/asm/atomic.h
--- linux-3.13.11/arch/alpha/include/asm/atomic.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/alpha/include/asm/atomic.h	2014-07-09
12:00:15.000000000 +0200
@@ -292,6 +292,16 @@ static inline long atomic64_dec_if_posit
 #define atomic_dec(v) atomic_sub(1,(v))
 #define atomic64_dec(v) atomic64_sub(1,(v))
 
+#define atomic64_read_unchecked(v)		atomic64_read(v)
+#define atomic64_set_unchecked(v, i)		atomic64_set((v), (i))
+#define atomic64_add_unchecked(a, v)		atomic64_add((a), (v))
+#define atomic64_add_return_unchecked(a, v)	atomic64_add_return((a), (v))
+#define atomic64_sub_unchecked(a, v)		atomic64_sub((a), (v))
+#define atomic64_inc_unchecked(v)		atomic64_inc(v)
+#define atomic64_inc_return_unchecked(v)	atomic64_inc_return(v)
+#define atomic64_dec_unchecked(v)		atomic64_dec(v)
+#define atomic64_cmpxchg_unchecked(v, o, n)	atomic64_cmpxchg((v), (o), (n))
+
 #define smp_mb__before_atomic_dec()	smp_mb()
 #define smp_mb__after_atomic_dec()	smp_mb()
 #define smp_mb__before_atomic_inc()	smp_mb()
diff -ruNp linux-3.13.11/arch/alpha/include/asm/cache.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/alpha/include/asm/cache.h
--- linux-3.13.11/arch/alpha/include/asm/cache.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/alpha/include/asm/cache.h	2014-07-09
12:00:15.000000000 +0200
@@ -4,19 +4,19 @@
 #ifndef __ARCH_ALPHA_CACHE_H
 #define __ARCH_ALPHA_CACHE_H
 
+#include <linux/const.h>
 
 /* Bytes per L1 (data) cache line. */
 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
-# define L1_CACHE_BYTES     64
 # define L1_CACHE_SHIFT     6
 #else
 /* Both EV4 and EV5 are write-through, read-allocate,
    direct-mapped, physical.
 */
-# define L1_CACHE_BYTES     32
 # define L1_CACHE_SHIFT     5
 #endif
 
+#define L1_CACHE_BYTES     (_AC(1,UL) << L1_CACHE_SHIFT)
 #define SMP_CACHE_BYTES    L1_CACHE_BYTES
 
 #endif
diff -ruNp linux-3.13.11/arch/alpha/include/asm/elf.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/alpha/include/asm/elf.h
--- linux-3.13.11/arch/alpha/include/asm/elf.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/alpha/include/asm/elf.h	2014-07-09
12:00:15.000000000 +0200
@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
 
 #define ELF_ET_DYN_BASE		(TASK_UNMAPPED_BASE + 0x1000000)
 
+#ifdef CONFIG_PAX_ASLR
+#define PAX_ELF_ET_DYN_BASE	(current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
+
+#define PAX_DELTA_MMAP_LEN	(current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
+#define PAX_DELTA_STACK_LEN	(current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
+#endif
+
 /* $0 is set by ld.so to a pointer to a function which might be 
    registered using atexit.  This provides a mean for the dynamic
    linker to call DT_FINI functions for shared libraries that have
diff -ruNp linux-3.13.11/arch/alpha/include/asm/pgalloc.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/alpha/include/asm/pgalloc.h
--- linux-3.13.11/arch/alpha/include/asm/pgalloc.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/alpha/include/asm/pgalloc.h	2014-07-09
12:00:15.000000000 +0200
@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t
 	pgd_set(pgd, pmd);
 }
 
+static inline void
+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
+{
+	pgd_populate(mm, pgd, pmd);
+}
+
 extern pgd_t *pgd_alloc(struct mm_struct *mm);
 
 static inline void
diff -ruNp linux-3.13.11/arch/alpha/include/asm/pgtable.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/alpha/include/asm/pgtable.h
--- linux-3.13.11/arch/alpha/include/asm/pgtable.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/alpha/include/asm/pgtable.h	2014-07-09
12:00:15.000000000 +0200
@@ -102,6 +102,17 @@ struct vm_area_struct;
 #define PAGE_SHARED	__pgprot(_PAGE_VALID | __ACCESS_BITS)
 #define PAGE_COPY	__pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
 #define PAGE_READONLY	__pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
+
+#ifdef CONFIG_PAX_PAGEEXEC
+# define PAGE_SHARED_NOEXEC	__pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
+# define PAGE_COPY_NOEXEC	__pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
+# define PAGE_READONLY_NOEXEC	__pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
+#else
+# define PAGE_SHARED_NOEXEC	PAGE_SHARED
+# define PAGE_COPY_NOEXEC	PAGE_COPY
+# define PAGE_READONLY_NOEXEC	PAGE_READONLY
+#endif
+
 #define PAGE_KERNEL	__pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
 
 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
diff -ruNp linux-3.13.11/arch/alpha/kernel/module.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/alpha/kernel/module.c
--- linux-3.13.11/arch/alpha/kernel/module.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/alpha/kernel/module.c	2014-07-09
12:00:15.000000000 +0200
@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
 
 	/* The small sections were sorted to the end of the segment.
 	   The following should definitely cover them.  */
-	gp = (u64)me->module_core + me->core_size - 0x8000;
+	gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
 	got = sechdrs[me->arch.gotsecindex].sh_addr;
 
 	for (i = 0; i < n; i++) {
diff -ruNp linux-3.13.11/arch/alpha/kernel/osf_sys.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/alpha/kernel/osf_sys.c
--- linux-3.13.11/arch/alpha/kernel/osf_sys.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/alpha/kernel/osf_sys.c	2014-07-09
12:00:15.000000000 +0200
@@ -1298,10 +1298,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct tim
    generic version except that we know how to honor ADDR_LIMIT_32BIT.  */
 
 static unsigned long
-arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
-		         unsigned long limit)
+arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
+		         unsigned long limit, unsigned long flags)
 {
 	struct vm_unmapped_area_info info;
+	unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
 
 	info.flags = 0;
 	info.length = len;
@@ -1309,6 +1310,7 @@ arch_get_unmapped_area_1(unsigned long a
 	info.high_limit = limit;
 	info.align_mask = 0;
 	info.align_offset = 0;
+	info.threadstack_offset = offset;
 	return vm_unmapped_area(&info);
 }
 
@@ -1341,20 +1343,24 @@ arch_get_unmapped_area(struct file *filp
 	   merely specific addresses, but regions of memory -- perhaps
 	   this feature should be incorporated into all ports?  */
 
+#ifdef CONFIG_PAX_RANDMMAP
+	if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
+#endif
+
 	if (addr) {
-		addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
+		addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
 		if (addr != (unsigned long) -ENOMEM)
 			return addr;
 	}
 
 	/* Next, try allocating at TASK_UNMAPPED_BASE.  */
-	addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
-					 len, limit);
+	addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit,
flags);
+
 	if (addr != (unsigned long) -ENOMEM)
 		return addr;
 
 	/* Finally, try allocating in low memory.  */
-	addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
+	addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
 
 	return addr;
 }
diff -ruNp linux-3.13.11/arch/alpha/kernel/systbls.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/alpha/kernel/systbls.S
--- linux-3.13.11/arch/alpha/kernel/systbls.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/alpha/kernel/systbls.S	2014-07-09
12:00:15.000000000 +0200
@@ -446,7 +446,7 @@ sys_call_table:
 	.quad sys_stat64			/* 425 */
 	.quad sys_lstat64
 	.quad sys_fstat64
-	.quad sys_ni_syscall			/* sys_vserver */
+	.quad sys_vserver			/* sys_vserver */
 	.quad sys_ni_syscall			/* sys_mbind */
 	.quad sys_ni_syscall			/* sys_get_mempolicy */
 	.quad sys_ni_syscall			/* sys_set_mempolicy */
diff -ruNp linux-3.13.11/arch/alpha/kernel/traps.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/alpha/kernel/traps.c
--- linux-3.13.11/arch/alpha/kernel/traps.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/alpha/kernel/traps.c	2014-07-09
12:00:15.000000000 +0200
@@ -175,7 +175,8 @@ die_if_kernel(char * str, struct pt_regs
 #ifdef CONFIG_SMP
 	printk("CPU %d ", hard_smp_processor_id());
 #endif
-	printk("%s(%d): %s %ld\n", current->comm, task_pid_nr(current), str, err);
+	printk("%s(%d:#%u): %s %ld\n", current->comm,
+		task_pid_nr(current), current->xid, str, err);
 	dik_show_regs(regs, r9_15);
 	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
 	dik_show_trace((unsigned long *)(regs+1));
diff -ruNp linux-3.13.11/arch/alpha/mm/fault.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/alpha/mm/fault.c
--- linux-3.13.11/arch/alpha/mm/fault.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/alpha/mm/fault.c	2014-07-09
12:00:15.000000000 +0200
@@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *
 	__reload_thread(pcb);
 }
 
+#ifdef CONFIG_PAX_PAGEEXEC
+/*
+ * PaX: decide what to do with offenders (regs->pc = fault address)
+ *
+ * returns 1 when task should be killed
+ *         2 when patched PLT trampoline was detected
+ *         3 when unpatched PLT trampoline was detected
+ */
+static int pax_handle_fetch_fault(struct pt_regs *regs)
+{
+
+#ifdef CONFIG_PAX_EMUPLT
+	int err;
+
+	do { /* PaX: patched PLT emulation #1 */
+		unsigned int ldah, ldq, jmp;
+
+		err = get_user(ldah, (unsigned int *)regs->pc);
+		err |= get_user(ldq, (unsigned int *)(regs->pc+4));
+		err |= get_user(jmp, (unsigned int *)(regs->pc+8));
+
+		if (err)
+			break;
+
+		if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
+		    (ldq & 0xFFFF0000U) == 0xA77B0000U &&
+		    jmp == 0x6BFB0000U)
+		{
+			unsigned long r27, addr;
+			unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
+			unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
+
+			addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL)
+ 0x8000UL);
+			err = get_user(r27, (unsigned long *)addr);
+			if (err)
+				break;
+
+			regs->r27 = r27;
+			regs->pc = r27;
+			return 2;
+		}
+	} while (0);
+
+	do { /* PaX: patched PLT emulation #2 */
+		unsigned int ldah, lda, br;
+
+		err = get_user(ldah, (unsigned int *)regs->pc);
+		err |= get_user(lda, (unsigned int *)(regs->pc+4));
+		err |= get_user(br, (unsigned int *)(regs->pc+8));
+
+		if (err)
+			break;
+
+		if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
+		    (lda & 0xFFFF0000U) == 0xA77B0000U &&
+		    (br & 0xFFE00000U) == 0xC3E00000U)
+		{
+			unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
+			unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
+			unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
+
+			regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
+			regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
+			return 2;
+		}
+	} while (0);
+
+	do { /* PaX: unpatched PLT emulation */
+		unsigned int br;
+
+		err = get_user(br, (unsigned int *)regs->pc);
+
+		if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
+			unsigned int br2, ldq, nop, jmp;
+			unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
+
+			addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
+			err = get_user(br2, (unsigned int *)addr);
+			err |= get_user(ldq, (unsigned int *)(addr+4));
+			err |= get_user(nop, (unsigned int *)(addr+8));
+			err |= get_user(jmp, (unsigned int *)(addr+12));
+			err |= get_user(resolver, (unsigned long *)(addr+16));
+
+			if (err)
+				break;
+
+			if (br2 == 0xC3600000U &&
+			    ldq == 0xA77B000CU &&
+			    nop == 0x47FF041FU &&
+			    jmp == 0x6B7B0000U)
+			{
+				regs->r28 = regs->pc+4;
+				regs->r27 = addr+16;
+				regs->pc = resolver;
+				return 3;
+			}
+		}
+	} while (0);
+#endif
+
+	return 1;
+}
+
+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
+{
+	unsigned long i;
+
+	printk(KERN_ERR "PAX: bytes at PC: ");
+	for (i = 0; i < 5; i++) {
+		unsigned int c;
+		if (get_user(c, (unsigned int *)pc+i))
+			printk(KERN_CONT "???????? ");
+		else
+			printk(KERN_CONT "%08x ", c);
+	}
+	printk("\n");
+}
+#endif
 
 /*
  * This routine handles page faults.  It determines the address,
@@ -133,8 +251,29 @@ retry:
  good_area:
 	si_code = SEGV_ACCERR;
 	if (cause < 0) {
-		if (!(vma->vm_flags & VM_EXEC))
+		if (!(vma->vm_flags & VM_EXEC)) {
+
+#ifdef CONFIG_PAX_PAGEEXEC
+			if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
+				goto bad_area;
+
+			up_read(&mm->mmap_sem);
+			switch (pax_handle_fetch_fault(regs)) {
+
+#ifdef CONFIG_PAX_EMUPLT
+			case 2:
+			case 3:
+				return;
+#endif
+
+			}
+			pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
+			do_group_exit(SIGKILL);
+#else
 			goto bad_area;
+#endif
+
+		}
 	} else if (!cause) {
 		/* Allow reads even for write-only mappings */
 		if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
diff -ruNp linux-3.13.11/arch/arm/Kconfig linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/Kconfig
--- linux-3.13.11/arch/arm/Kconfig	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/Kconfig	2014-07-09 12:00:15.000000000
+0200
@@ -1830,7 +1830,7 @@ config ALIGNMENT_TRAP
 
 config UACCESS_WITH_MEMCPY
 	bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
-	depends on MMU
+	depends on MMU && !PAX_MEMORY_UDEREF
 	default y if CPU_FEROCEON
 	help
 	  Implement faster copy_to_user and clear_user methods for CPU
@@ -2102,6 +2102,7 @@ config XIP_PHYS_ADDR
 config KEXEC
 	bool "Kexec system call (EXPERIMENTAL)"
 	depends on (!SMP || PM_SLEEP_SMP)
+	depends on !GRKERNSEC_KMEM
 	help
 	  kexec is a system call that implements the ability to shutdown your
 	  current kernel, and to start another kernel.  It is like a reboot
@@ -2267,6 +2268,8 @@ source "fs/Kconfig"
 
 source "arch/arm/Kconfig.debug"
 
+source "kernel/vserver/Kconfig"
+
 source "security/Kconfig"
 
 source "crypto/Kconfig"
diff -ruNp linux-3.13.11/arch/arm/include/asm/atomic.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/include/asm/atomic.h
--- linux-3.13.11/arch/arm/include/asm/atomic.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/include/asm/atomic.h	2014-07-09
12:00:15.000000000 +0200
@@ -18,17 +18,35 @@
 #include <asm/barrier.h>
 #include <asm/cmpxchg.h>
 
+#ifdef CONFIG_GENERIC_ATOMIC64
+#include <asm-generic/atomic64.h>
+#endif
+
 #define ATOMIC_INIT(i)	{ (i) }
 
 #ifdef __KERNEL__
 
+#define _ASM_EXTABLE(from, to)		\
+"	.pushsection __ex_table,\"a\"\n"\
+"	.align	3\n"			\
+"	.long	" #from ", " #to"\n"	\
+"	.popsection"
+
 /*
  * On ARM, ordinary assignment (str instruction) doesn't clear the local
  * strex/ldrex monitor on some implementations. The reason we can use it for
  * atomic_set() is the clrex or dummy strex done on every exception return.
  */
 #define atomic_read(v)	(*(volatile int *)&(v)->counter)
+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
+{
+	return v->counter;
+}
 #define atomic_set(v,i)	(((v)->counter) = (i))
+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
+{
+	v->counter = i;
+}
 
 #if __LINUX_ARM_ARCH__ >= 6
 
@@ -44,6 +62,36 @@ static inline void atomic_add(int i, ato
 
 	prefetchw(&v->counter);
 	__asm__ __volatile__("@ atomic_add\n"
+"1:	ldrex	%1, [%3]\n"
+"	adds	%0, %1, %4\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+"	bvc	3f\n"
+"2:	bkpt	0xf103\n"
+"3:\n"
+#endif
+
+"	strex	%1, %0, [%3]\n"
+"	teq	%1, #0\n"
+"	bne	1b"
+
+#ifdef CONFIG_PAX_REFCOUNT
+"\n4:\n"
+	_ASM_EXTABLE(2b, 4b)
+#endif
+
+	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
+	: "r" (&v->counter), "Ir" (i)
+	: "cc");
+}
+
+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
+{
+	unsigned long tmp;
+	int result;
+
+	prefetchw(&v->counter);
+	__asm__ __volatile__("@ atomic_add_unchecked\n"
 "1:	ldrex	%0, [%3]\n"
 "	add	%0, %0, %4\n"
 "	strex	%1, %0, [%3]\n"
@@ -62,6 +110,42 @@ static inline int atomic_add_return(int
 	smp_mb();
 
 	__asm__ __volatile__("@ atomic_add_return\n"
+"1:	ldrex	%1, [%3]\n"
+"	adds	%0, %1, %4\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+"	bvc	3f\n"
+"	mov	%0, %1\n"
+"2:	bkpt	0xf103\n"
+"3:\n"
+#endif
+
+"	strex	%1, %0, [%3]\n"
+"	teq	%1, #0\n"
+"	bne	1b"
+
+#ifdef CONFIG_PAX_REFCOUNT
+"\n4:\n"
+	_ASM_EXTABLE(2b, 4b)
+#endif
+
+	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
+	: "r" (&v->counter), "Ir" (i)
+	: "cc");
+
+	smp_mb();
+
+	return result;
+}
+
+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
+{
+	unsigned long tmp;
+	int result;
+
+	smp_mb();
+
+	__asm__ __volatile__("@ atomic_add_return_unchecked\n"
 "1:	ldrex	%0, [%3]\n"
 "	add	%0, %0, %4\n"
 "	strex	%1, %0, [%3]\n"
@@ -83,6 +167,36 @@ static inline void atomic_sub(int i, ato
 
 	prefetchw(&v->counter);
 	__asm__ __volatile__("@ atomic_sub\n"
+"1:	ldrex	%1, [%3]\n"
+"	subs	%0, %1, %4\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+"	bvc	3f\n"
+"2:	bkpt	0xf103\n"
+"3:\n"
+#endif
+
+"	strex	%1, %0, [%3]\n"
+"	teq	%1, #0\n"
+"	bne	1b"
+
+#ifdef CONFIG_PAX_REFCOUNT
+"\n4:\n"
+	_ASM_EXTABLE(2b, 4b)
+#endif
+
+	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
+	: "r" (&v->counter), "Ir" (i)
+	: "cc");
+}
+
+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
+{
+	unsigned long tmp;
+	int result;
+
+	prefetchw(&v->counter);
+	__asm__ __volatile__("@ atomic_sub_unchecked\n"
 "1:	ldrex	%0, [%3]\n"
 "	sub	%0, %0, %4\n"
 "	strex	%1, %0, [%3]\n"
@@ -101,11 +215,25 @@ static inline int atomic_sub_return(int
 	smp_mb();
 
 	__asm__ __volatile__("@ atomic_sub_return\n"
-"1:	ldrex	%0, [%3]\n"
-"	sub	%0, %0, %4\n"
+"1:	ldrex	%1, [%3]\n"
+"	subs	%0, %1, %4\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+"	bvc	3f\n"
+"	mov	%0, %1\n"
+"2:	bkpt	0xf103\n"
+"3:\n"
+#endif
+
 "	strex	%1, %0, [%3]\n"
 "	teq	%1, #0\n"
 "	bne	1b"
+
+#ifdef CONFIG_PAX_REFCOUNT
+"\n4:\n"
+	_ASM_EXTABLE(2b, 4b)
+#endif
+
 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
 	: "r" (&v->counter), "Ir" (i)
 	: "cc");
@@ -138,6 +266,28 @@ static inline int atomic_cmpxchg(atomic_
 	return oldval;
 }
 
+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
+{
+	unsigned long oldval, res;
+
+	smp_mb();
+
+	do {
+		__asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
+		"ldrex	%1, [%3]\n"
+		"mov	%0, #0\n"
+		"teq	%1, %4\n"
+		"strexeq %0, %5, [%3]\n"
+		    : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
+		    : "r" (&ptr->counter), "Ir" (old), "r" (new)
+		    : "cc");
+	} while (res);
+
+	smp_mb();
+
+	return oldval;
+}
+
 #else /* ARM_ARCH_6 */
 
 #ifdef CONFIG_SMP
@@ -156,7 +306,17 @@ static inline int atomic_add_return(int
 
 	return val;
 }
+
+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
+{
+	return atomic_add_return(i, v);
+}
+
 #define atomic_add(i, v)	(void) atomic_add_return(i, v)
+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
+{
+	(void) atomic_add_return(i, v);
+}
 
 static inline int atomic_sub_return(int i, atomic_t *v)
 {
@@ -171,6 +331,10 @@ static inline int atomic_sub_return(int
 	return val;
 }
 #define atomic_sub(i, v)	(void) atomic_sub_return(i, v)
+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
+{
+	(void) atomic_sub_return(i, v);
+}
 
 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
 {
@@ -186,9 +350,18 @@ static inline int atomic_cmpxchg(atomic_
 	return ret;
 }
 
+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
+{
+	return atomic_cmpxchg(v, old, new);
+}
+
 #endif /* __LINUX_ARM_ARCH__ */
 
 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
+{
+	return xchg(&v->counter, new);
+}
 
 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
 {
@@ -201,11 +374,27 @@ static inline int __atomic_add_unless(at
 }
 
 #define atomic_inc(v)		atomic_add(1, v)
+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
+{
+	atomic_add_unchecked(1, v);
+}
 #define atomic_dec(v)		atomic_sub(1, v)
+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
+{
+	atomic_sub_unchecked(1, v);
+}
 
 #define atomic_inc_and_test(v)	(atomic_add_return(1, v) == 0)
+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
+{
+	return atomic_add_return_unchecked(1, v) == 0;
+}
 #define atomic_dec_and_test(v)	(atomic_sub_return(1, v) == 0)
 #define atomic_inc_return(v)    (atomic_add_return(1, v))
+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
+{
+	return atomic_add_return_unchecked(1, v);
+}
 #define atomic_dec_return(v)    (atomic_sub_return(1, v))
 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
 
@@ -221,6 +410,14 @@ typedef struct {
 	long long counter;
 } atomic64_t;
 
+#ifdef CONFIG_PAX_REFCOUNT
+typedef struct {
+	long long counter;
+} atomic64_unchecked_t;
+#else
+typedef atomic64_t atomic64_unchecked_t;
+#endif
+
 #define ATOMIC64_INIT(i) { (i) }
 
 #ifdef CONFIG_ARM_LPAE
@@ -237,6 +434,19 @@ static inline long long atomic64_read(co
 	return result;
 }
 
+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
+{
+	long long result;
+
+	__asm__ __volatile__("@ atomic64_read_unchecked\n"
+"	ldrd	%0, %H0, [%1]"
+	: "=&r" (result)
+	: "r" (&v->counter), "Qo" (v->counter)
+	);
+
+	return result;
+}
+
 static inline void atomic64_set(atomic64_t *v, long long i)
 {
 	__asm__ __volatile__("@ atomic64_set\n"
@@ -245,6 +455,15 @@ static inline void atomic64_set(atomic64
 	: "r" (&v->counter), "r" (i)
 	);
 }
+
+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
+{
+	__asm__ __volatile__("@ atomic64_set_unchecked\n"
+"	strd	%2, %H2, [%1]"
+	: "=Qo" (v->counter)
+	: "r" (&v->counter), "r" (i)
+	);
+}
 #else
 static inline long long atomic64_read(const atomic64_t *v)
 {
@@ -259,6 +478,19 @@ static inline long long atomic64_read(co
 	return result;
 }
 
+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
+{
+	long long result;
+
+	__asm__ __volatile__("@ atomic64_read_unchecked\n"
+"	ldrexd	%0, %H0, [%1]"
+	: "=&r" (result)
+	: "r" (&v->counter), "Qo" (v->counter)
+	);
+
+	return result;
+}
+
 static inline void atomic64_set(atomic64_t *v, long long i)
 {
 	long long tmp;
@@ -273,6 +505,21 @@ static inline void atomic64_set(atomic64
 	: "r" (&v->counter), "r" (i)
 	: "cc");
 }
+
+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
+{
+	long long tmp;
+
+	prefetchw(&v->counter);
+	__asm__ __volatile__("@ atomic64_set_unchecked\n"
+"1:	ldrexd	%0, %H0, [%2]\n"
+"	strexd	%0, %3, %H3, [%2]\n"
+"	teq	%0, #0\n"
+"	bne	1b"
+	: "=&r" (tmp), "=Qo" (v->counter)
+	: "r" (&v->counter), "r" (i)
+	: "cc");
+}
 #endif
 
 static inline void atomic64_add(long long i, atomic64_t *v)
@@ -284,6 +531,37 @@ static inline void atomic64_add(long lon
 	__asm__ __volatile__("@ atomic64_add\n"
 "1:	ldrexd	%0, %H0, [%3]\n"
 "	adds	%Q0, %Q0, %Q4\n"
+"	adcs	%R0, %R0, %R4\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+"	bvc	3f\n"
+"2:	bkpt	0xf103\n"
+"3:\n"
+#endif
+
+"	strexd	%1, %0, %H0, [%3]\n"
+"	teq	%1, #0\n"
+"	bne	1b"
+
+#ifdef CONFIG_PAX_REFCOUNT
+"\n4:\n"
+	_ASM_EXTABLE(2b, 4b)
+#endif
+
+	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
+	: "r" (&v->counter), "r" (i)
+	: "cc");
+}
+
+static inline void atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
+{
+	long long result;
+	unsigned long tmp;
+
+	prefetchw(&v->counter);
+	__asm__ __volatile__("@ atomic64_add_unchecked\n"
+"1:	ldrexd	%0, %H0, [%3]\n"
+"	adds	%Q0, %Q0, %Q4\n"
 "	adc	%R0, %R0, %R4\n"
 "	strexd	%1, %0, %H0, [%3]\n"
 "	teq	%1, #0\n"
@@ -303,6 +581,44 @@ static inline long long atomic64_add_ret
 	__asm__ __volatile__("@ atomic64_add_return\n"
 "1:	ldrexd	%0, %H0, [%3]\n"
 "	adds	%Q0, %Q0, %Q4\n"
+"	adcs	%R0, %R0, %R4\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+"	bvc	3f\n"
+"	mov	%0, %1\n"
+"	mov	%H0, %H1\n"
+"2:	bkpt	0xf103\n"
+"3:\n"
+#endif
+
+"	strexd	%1, %0, %H0, [%3]\n"
+"	teq	%1, #0\n"
+"	bne	1b"
+
+#ifdef CONFIG_PAX_REFCOUNT
+"\n4:\n"
+	_ASM_EXTABLE(2b, 4b)
+#endif
+
+	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
+	: "r" (&v->counter), "r" (i)
+	: "cc");
+
+	smp_mb();
+
+	return result;
+}
+
+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t
*v)
+{
+	long long result;
+	unsigned long tmp;
+
+	smp_mb();
+
+	__asm__ __volatile__("@ atomic64_add_return_unchecked\n"
+"1:	ldrexd	%0, %H0, [%3]\n"
+"	adds	%Q0, %Q0, %Q4\n"
 "	adc	%R0, %R0, %R4\n"
 "	strexd	%1, %0, %H0, [%3]\n"
 "	teq	%1, #0\n"
@@ -325,6 +641,37 @@ static inline void atomic64_sub(long lon
 	__asm__ __volatile__("@ atomic64_sub\n"
 "1:	ldrexd	%0, %H0, [%3]\n"
 "	subs	%Q0, %Q0, %Q4\n"
+"	sbcs	%R0, %R0, %R4\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+"	bvc	3f\n"
+"2:	bkpt	0xf103\n"
+"3:\n"
+#endif
+
+"	strexd	%1, %0, %H0, [%3]\n"
+"	teq	%1, #0\n"
+"	bne	1b"
+
+#ifdef CONFIG_PAX_REFCOUNT
+"\n4:\n"
+	_ASM_EXTABLE(2b, 4b)
+#endif
+
+	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
+	: "r" (&v->counter), "r" (i)
+	: "cc");
+}
+
+static inline void atomic64_sub_unchecked(long long i, atomic64_unchecked_t *v)
+{
+	long long result;
+	unsigned long tmp;
+
+	prefetchw(&v->counter);
+	__asm__ __volatile__("@ atomic64_sub_unchecked\n"
+"1:	ldrexd	%0, %H0, [%3]\n"
+"	subs	%Q0, %Q0, %Q4\n"
 "	sbc	%R0, %R0, %R4\n"
 "	strexd	%1, %0, %H0, [%3]\n"
 "	teq	%1, #0\n"
@@ -344,16 +691,29 @@ static inline long long atomic64_sub_ret
 	__asm__ __volatile__("@ atomic64_sub_return\n"
 "1:	ldrexd	%0, %H0, [%3]\n"
 "	subs	%Q0, %Q0, %Q4\n"
-"	sbc	%R0, %R0, %R4\n"
+"	sbcs	%R0, %R0, %R4\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+"	bvc	3f\n"
+"	mov	%0, %1\n"
+"	mov	%H0, %H1\n"
+"2:	bkpt	0xf103\n"
+"3:\n"
+#endif
+
 "	strexd	%1, %0, %H0, [%3]\n"
 "	teq	%1, #0\n"
 "	bne	1b"
+
+#ifdef CONFIG_PAX_REFCOUNT
+"\n4:\n"
+	_ASM_EXTABLE(2b, 4b)
+#endif
+
 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
 	: "r" (&v->counter), "r" (i)
 	: "cc");
 
-	smp_mb();
-
 	return result;
 }
 
@@ -382,6 +742,31 @@ static inline long long atomic64_cmpxchg
 	return oldval;
 }
 
+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, long
long old,
+					long long new)
+{
+	long long oldval;
+	unsigned long res;
+
+	smp_mb();
+
+	do {
+		__asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
+		"ldrexd		%1, %H1, [%3]\n"
+		"mov		%0, #0\n"
+		"teq		%1, %4\n"
+		"teqeq		%H1, %H4\n"
+		"strexdeq	%0, %5, %H5, [%3]"
+		: "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
+		: "r" (&ptr->counter), "r" (old), "r" (new)
+		: "cc");
+	} while (res);
+
+	smp_mb();
+
+	return oldval;
+}
+
 static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
 {
 	long long result;
@@ -406,20 +791,34 @@ static inline long long atomic64_xchg(at
 static inline long long atomic64_dec_if_positive(atomic64_t *v)
 {
 	long long result;
-	unsigned long tmp;
+	u64 tmp;
 
 	smp_mb();
 
 	__asm__ __volatile__("@ atomic64_dec_if_positive\n"
-"1:	ldrexd	%0, %H0, [%3]\n"
-"	subs	%Q0, %Q0, #1\n"
-"	sbc	%R0, %R0, #0\n"
+"1:	ldrexd	%1, %H1, [%3]\n"
+"	subs	%Q0, %Q1, #1\n"
+"	sbcs	%R0, %R1, #0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+"	bvc	3f\n"
+"	mov	%Q0, %Q1\n"
+"	mov	%R0, %R1\n"
+"2:	bkpt	0xf103\n"
+"3:\n"
+#endif
+
 "	teq	%R0, #0\n"
-"	bmi	2f\n"
+"	bmi	4f\n"
 "	strexd	%1, %0, %H0, [%3]\n"
 "	teq	%1, #0\n"
 "	bne	1b\n"
-"2:"
+"4:\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+	_ASM_EXTABLE(2b, 4b)
+#endif
+
 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
 	: "r" (&v->counter)
 	: "cc");
@@ -442,13 +841,25 @@ static inline int atomic64_add_unless(at
 "	teq	%0, %5\n"
 "	teqeq	%H0, %H5\n"
 "	moveq	%1, #0\n"
-"	beq	2f\n"
+"	beq	4f\n"
 "	adds	%Q0, %Q0, %Q6\n"
-"	adc	%R0, %R0, %R6\n"
+"	adcs	%R0, %R0, %R6\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+"	bvc	3f\n"
+"2:	bkpt	0xf103\n"
+"3:\n"
+#endif
+
 "	strexd	%2, %0, %H0, [%4]\n"
 "	teq	%2, #0\n"
 "	bne	1b\n"
-"2:"
+"4:\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+	_ASM_EXTABLE(2b, 4b)
+#endif
+
 	: "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
 	: "r" (&v->counter), "r" (u), "r" (a)
 	: "cc");
@@ -461,10 +872,13 @@ static inline int atomic64_add_unless(at
 
 #define atomic64_add_negative(a, v)	(atomic64_add_return((a), (v)) < 0)
 #define atomic64_inc(v)			atomic64_add(1LL, (v))
+#define atomic64_inc_unchecked(v)	atomic64_add_unchecked(1LL, (v))
 #define atomic64_inc_return(v)		atomic64_add_return(1LL, (v))
+#define atomic64_inc_return_unchecked(v)	atomic64_add_return_unchecked(1LL, (v))
 #define atomic64_inc_and_test(v)	(atomic64_inc_return(v) == 0)
 #define atomic64_sub_and_test(a, v)	(atomic64_sub_return((a), (v)) == 0)
 #define atomic64_dec(v)			atomic64_sub(1LL, (v))
+#define atomic64_dec_unchecked(v)	atomic64_sub_unchecked(1LL, (v))
 #define atomic64_dec_return(v)		atomic64_sub_return(1LL, (v))
 #define atomic64_dec_and_test(v)	(atomic64_dec_return((v)) == 0)
 #define atomic64_inc_not_zero(v)	atomic64_add_unless((v), 1LL, 0LL)
diff -ruNp linux-3.13.11/arch/arm/include/asm/cache.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/include/asm/cache.h
--- linux-3.13.11/arch/arm/include/asm/cache.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/include/asm/cache.h	2014-07-09
12:00:15.000000000 +0200
@@ -4,8 +4,10 @@
 #ifndef __ASMARM_CACHE_H
 #define __ASMARM_CACHE_H
 
+#include <linux/const.h>
+
 #define L1_CACHE_SHIFT		CONFIG_ARM_L1_CACHE_SHIFT
-#define L1_CACHE_BYTES		(1 << L1_CACHE_SHIFT)
+#define L1_CACHE_BYTES		(_AC(1,UL) << L1_CACHE_SHIFT)
 
 /*
  * Memory returned by kmalloc() may be used for DMA, so we must make
@@ -24,5 +26,6 @@
 #endif
 
 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
+#define __read_only __attribute__ ((__section__(".data..read_only")))
 
 #endif
diff -ruNp linux-3.13.11/arch/arm/include/asm/cacheflush.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/include/asm/cacheflush.h
--- linux-3.13.11/arch/arm/include/asm/cacheflush.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/include/asm/cacheflush.h	2014-07-09
12:00:15.000000000 +0200
@@ -116,7 +116,7 @@ struct cpu_cache_fns {
 	void (*dma_unmap_area)(const void *, size_t, int);
 
 	void (*dma_flush_range)(const void *, const void *);
-};
+} __no_const;
 
 /*
  * Select the calling method
diff -ruNp linux-3.13.11/arch/arm/include/asm/checksum.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/include/asm/checksum.h
--- linux-3.13.11/arch/arm/include/asm/checksum.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/include/asm/checksum.h	2014-07-09
12:00:15.000000000 +0200
@@ -37,7 +37,19 @@ __wsum
 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
 
 __wsum
-csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum,
int *err_ptr);
+__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum,
int *err_ptr);
+
+static inline __wsum
+csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum,
int *err_ptr)
+{
+	__wsum ret;
+	pax_open_userland();
+	ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
+	pax_close_userland();
+	return ret;
+}
+
+
 
 /*
  * 	Fold a partial checksum without adding pseudo headers
diff -ruNp linux-3.13.11/arch/arm/include/asm/cmpxchg.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/include/asm/cmpxchg.h
--- linux-3.13.11/arch/arm/include/asm/cmpxchg.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/include/asm/cmpxchg.h	2014-07-09
12:00:15.000000000 +0200
@@ -102,6 +102,8 @@ static inline unsigned long __xchg(unsig
 
 #define xchg(ptr,x) \
 	((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
+#define xchg_unchecked(ptr,x) \
+	((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
 
 #include <asm-generic/cmpxchg-local.h>
 
diff -ruNp linux-3.13.11/arch/arm/include/asm/domain.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/include/asm/domain.h
--- linux-3.13.11/arch/arm/include/asm/domain.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/include/asm/domain.h	2014-07-09
12:00:15.000000000 +0200
@@ -48,18 +48,37 @@
  * Domain types
  */
 #define DOMAIN_NOACCESS	0
-#define DOMAIN_CLIENT	1
 #ifdef CONFIG_CPU_USE_DOMAINS
+#define DOMAIN_USERCLIENT	1
+#define DOMAIN_KERNELCLIENT	1
 #define DOMAIN_MANAGER	3
+#define DOMAIN_VECTORS		DOMAIN_USER
+#else
+
+#ifdef CONFIG_PAX_KERNEXEC
+#define DOMAIN_MANAGER	1
+#define DOMAIN_KERNEXEC	3
 #else
 #define DOMAIN_MANAGER	1
 #endif
 
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+#define DOMAIN_USERCLIENT	0
+#define DOMAIN_UDEREF		1
+#define DOMAIN_VECTORS		DOMAIN_KERNEL
+#else
+#define DOMAIN_USERCLIENT	1
+#define DOMAIN_VECTORS		DOMAIN_USER
+#endif
+#define DOMAIN_KERNELCLIENT	1
+
+#endif
+
 #define domain_val(dom,type)	((type) << (2*(dom)))
 
 #ifndef __ASSEMBLY__
 
-#ifdef CONFIG_CPU_USE_DOMAINS
+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
 static inline void set_domain(unsigned val)
 {
 	asm volatile(
@@ -68,15 +87,7 @@ static inline void set_domain(unsigned v
 	isb();
 }
 
-#define modify_domain(dom,type)					\
-	do {							\
-	struct thread_info *thread = current_thread_info();	\
-	unsigned int domain = thread->cpu_domain;		\
-	domain &= ~domain_val(dom, DOMAIN_MANAGER);		\
-	thread->cpu_domain = domain | domain_val(dom, type);	\
-	set_domain(thread->cpu_domain);				\
-	} while (0)
-
+extern void modify_domain(unsigned int dom, unsigned int type);
 #else
 static inline void set_domain(unsigned val) { }
 static inline void modify_domain(unsigned dom, unsigned type)	{ }
diff -ruNp linux-3.13.11/arch/arm/include/asm/elf.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/include/asm/elf.h
--- linux-3.13.11/arch/arm/include/asm/elf.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/include/asm/elf.h	2014-07-09
12:00:15.000000000 +0200
@@ -114,7 +114,14 @@ int dump_task_regs(struct task_struct *t
    the loader.  We need to make sure that it is out of the way of the program
    that it will "exec", and that there is sufficient room for the brk.  */
 
-#define ELF_ET_DYN_BASE	(2 * TASK_SIZE / 3)
+#define ELF_ET_DYN_BASE		(TASK_SIZE / 3 * 2)
+
+#ifdef CONFIG_PAX_ASLR
+#define PAX_ELF_ET_DYN_BASE	0x00008000UL
+
+#define PAX_DELTA_MMAP_LEN	((current->personality == PER_LINUX_32BIT) ? 16 : 10)
+#define PAX_DELTA_STACK_LEN	((current->personality == PER_LINUX_32BIT) ? 16 : 10)
+#endif
 
 /* When the program starts, a1 contains a pointer to a function to be 
    registered with atexit, as per the SVR4 ABI.  A value of 0 means we 
@@ -124,10 +131,6 @@ int dump_task_regs(struct task_struct *t
 extern void elf_set_personality(const struct elf32_hdr *);
 #define SET_PERSONALITY(ex)	elf_set_personality(&(ex))
 
-struct mm_struct;
-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
-#define arch_randomize_brk arch_randomize_brk
-
 #ifdef CONFIG_MMU
 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
 struct linux_binprm;
diff -ruNp linux-3.13.11/arch/arm/include/asm/fncpy.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/include/asm/fncpy.h
--- linux-3.13.11/arch/arm/include/asm/fncpy.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/include/asm/fncpy.h	2014-07-09
12:00:15.000000000 +0200
@@ -81,7 +81,9 @@
 	BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) ||		\
 		(__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1)));	\
 									\
+	pax_open_kernel();						\
 	memcpy(dest_buf, (void const *)(__funcp_address & ~1), size);	\
+	pax_close_kernel();						\
 	flush_icache_range((unsigned long)(dest_buf),			\
 		(unsigned long)(dest_buf) + (size));			\
 									\
diff -ruNp linux-3.13.11/arch/arm/include/asm/futex.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/include/asm/futex.h
--- linux-3.13.11/arch/arm/include/asm/futex.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/include/asm/futex.h	2014-07-09
12:00:15.000000000 +0200
@@ -50,6 +50,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval,
 	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
 		return -EFAULT;
 
+	pax_open_userland();
+
 	smp_mb();
 	__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
 	"1:	ldrex	%1, [%4]\n"
@@ -65,6 +67,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval,
 	: "cc", "memory");
 	smp_mb();
 
+	pax_close_userland();
+
 	*uval = val;
 	return ret;
 }
@@ -95,6 +99,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval,
 	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
 		return -EFAULT;
 
+	pax_open_userland();
+
 	__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
 	"1:	" TUSER(ldr) "	%1, [%4]\n"
 	"	teq	%1, %2\n"
@@ -105,6 +111,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval,
 	: "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
 	: "cc", "memory");
 
+	pax_close_userland();
+
 	*uval = val;
 	return ret;
 }
@@ -127,6 +135,7 @@ futex_atomic_op_inuser (int encoded_op,
 		return -EFAULT;
 
 	pagefault_disable();	/* implies preempt_disable() */
+	pax_open_userland();
 
 	switch (op) {
 	case FUTEX_OP_SET:
@@ -148,6 +157,7 @@ futex_atomic_op_inuser (int encoded_op,
 		ret = -ENOSYS;
 	}
 
+	pax_close_userland();
 	pagefault_enable();	/* subsumes preempt_enable() */
 
 	if (!ret) {
diff -ruNp linux-3.13.11/arch/arm/include/asm/kmap_types.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/include/asm/kmap_types.h
--- linux-3.13.11/arch/arm/include/asm/kmap_types.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/include/asm/kmap_types.h	2014-07-09
12:00:15.000000000 +0200
@@ -4,6 +4,6 @@
 /*
  * This is the "bare minimum".  AIO seems to require this.
  */
-#define KM_TYPE_NR 16
+#define KM_TYPE_NR 17
 
 #endif
diff -ruNp linux-3.13.11/arch/arm/include/asm/mach/dma.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/include/asm/mach/dma.h
--- linux-3.13.11/arch/arm/include/asm/mach/dma.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/include/asm/mach/dma.h	2014-07-09
12:00:15.000000000 +0200
@@ -22,7 +22,7 @@ struct dma_ops {
 	int	(*residue)(unsigned int, dma_t *);		/* optional */
 	int	(*setspeed)(unsigned int, dma_t *, int);	/* optional */
 	const char *type;
-};
+} __do_const;
 
 struct dma_struct {
 	void		*addr;		/* single DMA address		*/
diff -ruNp linux-3.13.11/arch/arm/include/asm/mach/map.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/include/asm/mach/map.h
--- linux-3.13.11/arch/arm/include/asm/mach/map.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/include/asm/mach/map.h	2014-07-09
12:00:15.000000000 +0200
@@ -27,13 +27,16 @@ struct map_desc {
 #define MT_MINICLEAN		6
 #define MT_LOW_VECTORS		7
 #define MT_HIGH_VECTORS		8
-#define MT_MEMORY		9
+#define MT_MEMORY_RWX		9
 #define MT_ROM			10
-#define MT_MEMORY_NONCACHED	11
+#define MT_MEMORY_NONCACHED_RX	11
 #define MT_MEMORY_DTCM		12
 #define MT_MEMORY_ITCM		13
 #define MT_MEMORY_SO		14
 #define MT_MEMORY_DMA_READY	15
+#define MT_MEMORY_RW		16
+#define MT_MEMORY_RX		17
+#define MT_MEMORY_NONCACHED_RW	18
 
 #ifdef CONFIG_MMU
 extern void iotable_init(struct map_desc *, int);
diff -ruNp linux-3.13.11/arch/arm/include/asm/outercache.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/include/asm/outercache.h
--- linux-3.13.11/arch/arm/include/asm/outercache.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/include/asm/outercache.h	2014-07-09
12:00:15.000000000 +0200
@@ -35,7 +35,7 @@ struct outer_cache_fns {
 #endif
 	void (*set_debug)(unsigned long);
 	void (*resume)(void);
-};
+} __no_const;
 
 extern struct outer_cache_fns outer_cache;
 
diff -ruNp linux-3.13.11/arch/arm/include/asm/page.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/include/asm/page.h
--- linux-3.13.11/arch/arm/include/asm/page.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/include/asm/page.h	2014-07-09
12:00:15.000000000 +0200
@@ -23,6 +23,7 @@
 
 #else
 
+#include <linux/compiler.h>
 #include <asm/glue.h>
 
 /*
@@ -114,7 +115,7 @@ struct cpu_user_fns {
 	void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
 	void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
 			unsigned long vaddr, struct vm_area_struct *vma);
-};
+} __no_const;
 
 #ifdef MULTI_USER
 extern struct cpu_user_fns cpu_user;
diff -ruNp linux-3.13.11/arch/arm/include/asm/pgalloc.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/include/asm/pgalloc.h
--- linux-3.13.11/arch/arm/include/asm/pgalloc.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/include/asm/pgalloc.h	2014-07-09
12:00:15.000000000 +0200
@@ -17,6 +17,7 @@
 #include <asm/processor.h>
 #include <asm/cacheflush.h>
 #include <asm/tlbflush.h>
+#include <asm/system_info.h>
 
 #define check_pgt_cache()		do { } while (0)
 
@@ -43,6 +44,11 @@ static inline void pud_populate(struct m
 	set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
 }
 
+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+{
+	pud_populate(mm, pud, pmd);
+}
+
 #else	/* !CONFIG_ARM_LPAE */
 
 /*
@@ -51,6 +57,7 @@ static inline void pud_populate(struct m
 #define pmd_alloc_one(mm,addr)		({ BUG(); ((pmd_t *)2); })
 #define pmd_free(mm, pmd)		do { } while (0)
 #define pud_populate(mm,pmd,pte)	BUG()
+#define pud_populate_kernel(mm,pmd,pte)	BUG()
 
 #endif	/* CONFIG_ARM_LPAE */
 
@@ -128,6 +135,19 @@ static inline void pte_free(struct mm_st
 	__free_page(pte);
 }
 
+static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
+{
+#ifdef CONFIG_ARM_LPAE
+	pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
+#else
+	if (addr & SECTION_SIZE)
+		pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
+	else
+		pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
+#endif
+	flush_pmd_entry(pmdp);
+}
+
 static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
 				  pmdval_t prot)
 {
@@ -157,7 +177,7 @@ pmd_populate_kernel(struct mm_struct *mm
 static inline void
 pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
 {
-	__pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE);
+	__pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE | __supported_pmd_mask);
 }
 #define pmd_pgtable(pmd) pmd_page(pmd)
 
diff -ruNp linux-3.13.11/arch/arm/include/asm/pgtable-2level-hwdef.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/include/asm/pgtable-2level-hwdef.h
--- linux-3.13.11/arch/arm/include/asm/pgtable-2level-hwdef.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/include/asm/pgtable-2level-hwdef.h	2014-07-09
12:00:15.000000000 +0200
@@ -20,12 +20,15 @@
 #define PMD_TYPE_FAULT		(_AT(pmdval_t, 0) << 0)
 #define PMD_TYPE_TABLE		(_AT(pmdval_t, 1) << 0)
 #define PMD_TYPE_SECT		(_AT(pmdval_t, 2) << 0)
+#define PMD_PXNTABLE		(_AT(pmdval_t, 1) << 2)		/* v7 */
 #define PMD_BIT4		(_AT(pmdval_t, 1) << 4)
 #define PMD_DOMAIN(x)		(_AT(pmdval_t, (x)) << 5)
 #define PMD_PROTECTION		(_AT(pmdval_t, 1) << 9)		/* v5 */
+
 /*
  *   - section
  */
+#define PMD_SECT_PXN		(_AT(pmdval_t, 1) << 0)		/* v7 */
 #define PMD_SECT_BUFFERABLE	(_AT(pmdval_t, 1) << 2)
 #define PMD_SECT_CACHEABLE	(_AT(pmdval_t, 1) << 3)
 #define PMD_SECT_XN		(_AT(pmdval_t, 1) << 4)		/* v6 */
@@ -37,6 +40,7 @@
 #define PMD_SECT_nG		(_AT(pmdval_t, 1) << 17)	/* v6 */
 #define PMD_SECT_SUPER		(_AT(pmdval_t, 1) << 18)	/* v6 */
 #define PMD_SECT_AF		(_AT(pmdval_t, 0))
+#define PMD_SECT_RDONLY		(_AT(pmdval_t, 0))
 
 #define PMD_SECT_UNCACHED	(_AT(pmdval_t, 0))
 #define PMD_SECT_BUFFERED	(PMD_SECT_BUFFERABLE)
@@ -66,6 +70,7 @@
  *   - extended small page/tiny page
  */
 #define PTE_EXT_XN		(_AT(pteval_t, 1) << 0)		/* v6 */
+#define PTE_EXT_PXN		(_AT(pteval_t, 1) << 2)		/* v7 */
 #define PTE_EXT_AP_MASK		(_AT(pteval_t, 3) << 4)
 #define PTE_EXT_AP0		(_AT(pteval_t, 1) << 4)
 #define PTE_EXT_AP1		(_AT(pteval_t, 2) << 4)
diff -ruNp linux-3.13.11/arch/arm/include/asm/pgtable-2level.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/include/asm/pgtable-2level.h
--- linux-3.13.11/arch/arm/include/asm/pgtable-2level.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/include/asm/pgtable-2level.h	2014-07-09
12:00:15.000000000 +0200
@@ -126,6 +126,9 @@
 #define L_PTE_SHARED		(_AT(pteval_t, 1) << 10)	/* shared(v6), coherent(xsc3) */
 #define L_PTE_NONE		(_AT(pteval_t, 1) << 11)
 
+/* Two-level page tables only have PXN in the PGD, not in the PTE. */
+#define L_PTE_PXN		(_AT(pteval_t, 0))
+
 /*
  * These are the memory types, defined to be compatible with
  * pre-ARMv6 CPUs cacheable and bufferable bits:   XXCB
diff -ruNp linux-3.13.11/arch/arm/include/asm/pgtable-3level-hwdef.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/include/asm/pgtable-3level-hwdef.h
--- linux-3.13.11/arch/arm/include/asm/pgtable-3level-hwdef.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/include/asm/pgtable-3level-hwdef.h	2014-07-09
12:00:15.000000000 +0200
@@ -75,6 +75,7 @@
 #define PTE_EXT_SHARED		(_AT(pteval_t, 3) << 8)		/* SH[1:0], inner shareable */
 #define PTE_EXT_AF		(_AT(pteval_t, 1) << 10)	/* Access Flag */
 #define PTE_EXT_NG		(_AT(pteval_t, 1) << 11)	/* nG */
+#define PTE_EXT_PXN		(_AT(pteval_t, 1) << 53)	/* PXN */
 #define PTE_EXT_XN		(_AT(pteval_t, 1) << 54)	/* XN */
 
 /*
diff -ruNp linux-3.13.11/arch/arm/include/asm/pgtable-3level.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/include/asm/pgtable-3level.h
--- linux-3.13.11/arch/arm/include/asm/pgtable-3level.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/include/asm/pgtable-3level.h	2014-07-09
12:00:15.000000000 +0200
@@ -82,6 +82,7 @@
 #define L_PTE_RDONLY		(_AT(pteval_t, 1) << 7)		/* AP[2] */
 #define L_PTE_SHARED		(_AT(pteval_t, 3) << 8)		/* SH[1:0], inner shareable */
 #define L_PTE_YOUNG		(_AT(pteval_t, 1) << 10)	/* AF */
+#define L_PTE_PXN		(_AT(pteval_t, 1) << 53)	/* PXN */
 #define L_PTE_XN		(_AT(pteval_t, 1) << 54)	/* XN */
 #define L_PTE_DIRTY		(_AT(pteval_t, 1) << 55)	/* unused */
 #define L_PTE_SPECIAL		(_AT(pteval_t, 1) << 56)	/* unused */
@@ -95,6 +96,7 @@
 /*
  * To be used in assembly code with the upper page attributes.
  */
+#define L_PTE_PXN_HIGH		(1 << (53 - 32))
 #define L_PTE_XN_HIGH		(1 << (54 - 32))
 #define L_PTE_DIRTY_HIGH	(1 << (55 - 32))
 
diff -ruNp linux-3.13.11/arch/arm/include/asm/pgtable.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/include/asm/pgtable.h
--- linux-3.13.11/arch/arm/include/asm/pgtable.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/include/asm/pgtable.h	2014-07-09
12:00:15.000000000 +0200
@@ -33,6 +33,9 @@
 #include <asm/pgtable-2level.h>
 #endif
 
+#define ktla_ktva(addr)		(addr)
+#define ktva_ktla(addr)		(addr)
+
 /*
  * Just any arbitrary offset to the start of the vmalloc VM area: the
  * current 8MB value just means that there will be a 8MB "hole" after the
@@ -48,6 +51,9 @@
 #define LIBRARY_TEXT_START	0x0c000000
 
 #ifndef __ASSEMBLY__
+extern pteval_t __supported_pte_mask;
+extern pmdval_t __supported_pmd_mask;
+
 extern void __pte_error(const char *file, int line, pte_t);
 extern void __pmd_error(const char *file, int line, pmd_t);
 extern void __pgd_error(const char *file, int line, pgd_t);
@@ -56,6 +62,48 @@ extern void __pgd_error(const char *file
 #define pmd_ERROR(pmd)		__pmd_error(__FILE__, __LINE__, pmd)
 #define pgd_ERROR(pgd)		__pgd_error(__FILE__, __LINE__, pgd)
 
+#define  __HAVE_ARCH_PAX_OPEN_KERNEL
+#define  __HAVE_ARCH_PAX_CLOSE_KERNEL
+
+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
+#include <asm/domain.h>
+#include <linux/thread_info.h>
+#include <linux/preempt.h>
+
+static inline int test_domain(int domain, int domaintype)
+{
+	return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain,
domaintype);
+}
+#endif
+
+#ifdef CONFIG_PAX_KERNEXEC
+static inline unsigned long pax_open_kernel(void) {
+#ifdef CONFIG_ARM_LPAE
+	/* TODO */
+#else
+	preempt_disable();
+	BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
+	modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
+#endif
+	return 0;
+}
+
+static inline unsigned long pax_close_kernel(void) {
+#ifdef CONFIG_ARM_LPAE
+	/* TODO */
+#else
+	BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
+	/* DOMAIN_MANAGER = "client" under KERNEXEC */
+	modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
+	preempt_enable_no_resched();
+#endif
+	return 0;
+}
+#else
+static inline unsigned long pax_open_kernel(void) { return 0; }
+static inline unsigned long pax_close_kernel(void) { return 0; }
+#endif
+
 /*
  * This is the lowest virtual address we can permit any user space
  * mapping to be mapped at.  This is particularly important for
@@ -75,8 +123,8 @@ extern void __pgd_error(const char *file
 /*
  * The pgprot_* and protection_map entries will be fixed up in runtime
  * to include the cachable and bufferable bits based on memory policy,
- * as well as any architecture dependent bits like global/ASID and SMP
- * shared mapping bits.
+ * as well as any architecture dependent bits like global/ASID, PXN,
+ * and SMP shared mapping bits.
  */
 #define _L_PTE_DEFAULT	L_PTE_PRESENT | L_PTE_YOUNG
 
@@ -260,7 +308,7 @@ static inline pte_t pte_mkspecial(pte_t
 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 {
 	const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
-		L_PTE_NONE | L_PTE_VALID;
+		L_PTE_NONE | L_PTE_VALID | __supported_pte_mask;
 	pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
 	return pte;
 }
diff -ruNp linux-3.13.11/arch/arm/include/asm/psci.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/include/asm/psci.h
--- linux-3.13.11/arch/arm/include/asm/psci.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/include/asm/psci.h	2014-07-09
12:00:15.000000000 +0200
@@ -29,7 +29,7 @@ struct psci_operations {
 	int (*cpu_off)(struct psci_power_state state);
 	int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
 	int (*migrate)(unsigned long cpuid);
-};
+} __no_const;
 
 extern struct psci_operations psci_ops;
 extern struct smp_operations psci_smp_ops;
diff -ruNp linux-3.13.11/arch/arm/include/asm/smp.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/include/asm/smp.h
--- linux-3.13.11/arch/arm/include/asm/smp.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/include/asm/smp.h	2014-07-09
12:00:15.000000000 +0200
@@ -112,7 +112,7 @@ struct smp_operations {
 	int  (*cpu_disable)(unsigned int cpu);
 #endif
 #endif
-};
+} __no_const;
 
 /*
  * set platform specific SMP operations
diff -ruNp linux-3.13.11/arch/arm/include/asm/thread_info.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/include/asm/thread_info.h
--- linux-3.13.11/arch/arm/include/asm/thread_info.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/include/asm/thread_info.h	2014-07-09
12:00:15.000000000 +0200
@@ -88,9 +88,9 @@ struct thread_info {
 	.flags		= 0,						\
 	.preempt_count	= INIT_PREEMPT_COUNT,				\
 	.addr_limit	= KERNEL_DS,					\
-	.cpu_domain	= domain_val(DOMAIN_USER, DOMAIN_MANAGER) |	\
-			  domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) |	\
-			  domain_val(DOMAIN_IO, DOMAIN_CLIENT),		\
+	.cpu_domain	= domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) |	\
+			  domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) |	\
+			  domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT),	\
 	.restart_block	= {						\
 		.fn	= do_no_restart_syscall,			\
 	},								\
@@ -157,7 +157,11 @@ extern int vfp_restore_user_hwstate(stru
 #define TIF_SYSCALL_AUDIT	9
 #define TIF_SYSCALL_TRACEPOINT	10
 #define TIF_SECCOMP		11	/* seccomp syscall filtering active */
-#define TIF_NOHZ		12	/* in adaptive nohz mode */
+/* within 8 bits of TIF_SYSCALL_TRACE
+ *  to meet flexible second operand requirements
+ */
+#define TIF_GRSEC_SETXID	12
+#define TIF_NOHZ		13	/* in adaptive nohz mode */
 #define TIF_USING_IWMMXT	17
 #define TIF_MEMDIE		18	/* is terminating due to OOM killer */
 #define TIF_RESTORE_SIGMASK	20
@@ -170,10 +174,11 @@ extern int vfp_restore_user_hwstate(stru
 #define _TIF_SYSCALL_TRACEPOINT	(1 << TIF_SYSCALL_TRACEPOINT)
 #define _TIF_SECCOMP		(1 << TIF_SECCOMP)
 #define _TIF_USING_IWMMXT	(1 << TIF_USING_IWMMXT)
+#define _TIF_GRSEC_SETXID	(1 << TIF_GRSEC_SETXID)
 
 /* Checks for any syscall work in entry-common.S */
 #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
-			   _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
+			   _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
 
 /*
  * Change these and you break ASM code in entry-common.S
diff -ruNp linux-3.13.11/arch/arm/include/asm/uaccess.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/include/asm/uaccess.h
--- linux-3.13.11/arch/arm/include/asm/uaccess.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/include/asm/uaccess.h	2014-07-09
12:00:15.000000000 +0200
@@ -18,6 +18,7 @@
 #include <asm/domain.h>
 #include <asm/unified.h>
 #include <asm/compiler.h>
+#include <asm/pgtable.h>
 
 #if __LINUX_ARM_ARCH__ < 6
 #include <asm-generic/uaccess-unaligned.h>
@@ -70,11 +71,38 @@ extern int __put_user_bad(void);
 static inline void set_fs(mm_segment_t fs)
 {
 	current_thread_info()->addr_limit = fs;
-	modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
+	modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
 }
 
 #define segment_eq(a,b)	((a) == (b))
 
+#define __HAVE_ARCH_PAX_OPEN_USERLAND
+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
+
+static inline void pax_open_userland(void)
+{
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+	if (segment_eq(get_fs(), USER_DS)) {
+		BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
+		modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
+	}
+#endif
+
+}
+
+static inline void pax_close_userland(void)
+{
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+	if (segment_eq(get_fs(), USER_DS)) {
+		BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
+		modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
+	}
+#endif
+
+}
+
 #define __addr_ok(addr) ({ \
 	unsigned long flag; \
 	__asm__("cmp %2, %0; movlo %0, #0" \
@@ -150,8 +178,12 @@ extern int __get_user_4(void *);
 
 #define get_user(x,p)							\
 	({								\
+		int __e;						\
 		might_fault();						\
-		__get_user_check(x,p);					\
+		pax_open_userland();					\
+		__e = __get_user_check(x,p);				\
+		pax_close_userland();					\
+		__e;							\
 	 })
 
 extern int __put_user_1(void *, unsigned int);
@@ -195,8 +227,12 @@ extern int __put_user_8(void *, unsigned
 
 #define put_user(x,p)							\
 	({								\
+		int __e;						\
 		might_fault();						\
-		__put_user_check(x,p);					\
+		pax_open_userland();					\
+		__e = __put_user_check(x,p);				\
+		pax_close_userland();					\
+		__e;							\
 	 })
 
 #else /* CONFIG_MMU */
@@ -220,6 +256,7 @@ static inline void set_fs(mm_segment_t f
 
 #endif /* CONFIG_MMU */
 
+#define access_ok_noprefault(type,addr,size) access_ok((type),(addr),(size))
 #define access_ok(type,addr,size)	(__range_ok(addr,size) == 0)
 
 #define user_addr_max() \
@@ -237,13 +274,17 @@ static inline void set_fs(mm_segment_t f
 #define __get_user(x,ptr)						\
 ({									\
 	long __gu_err = 0;						\
+	pax_open_userland();						\
 	__get_user_err((x),(ptr),__gu_err);				\
+	pax_close_userland();						\
 	__gu_err;							\
 })
 
 #define __get_user_error(x,ptr,err)					\
 ({									\
+	pax_open_userland();						\
 	__get_user_err((x),(ptr),err);					\
+	pax_close_userland();						\
 	(void) 0;							\
 })
 
@@ -319,13 +360,17 @@ do {									\
 #define __put_user(x,ptr)						\
 ({									\
 	long __pu_err = 0;						\
+	pax_open_userland();						\
 	__put_user_err((x),(ptr),__pu_err);				\
+	pax_close_userland();						\
 	__pu_err;							\
 })
 
 #define __put_user_error(x,ptr,err)					\
 ({									\
+	pax_open_userland();						\
 	__put_user_err((x),(ptr),err);					\
+	pax_close_userland();						\
 	(void) 0;							\
 })
 
@@ -425,11 +470,44 @@ do {									\
 
 
 #ifdef CONFIG_MMU
-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from,
unsigned long n);
-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from,
unsigned long n);
+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from,
unsigned long n);
+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from,
unsigned long n);
+
+static inline unsigned long __must_check __copy_from_user(void *to, const void __user
*from, unsigned long n)
+{
+	unsigned long ret;
+
+	check_object_size(to, n, false);
+	pax_open_userland();
+	ret = ___copy_from_user(to, from, n);
+	pax_close_userland();
+	return ret;
+}
+
+static inline unsigned long __must_check __copy_to_user(void __user *to, const void
*from, unsigned long n)
+{
+	unsigned long ret;
+
+	check_object_size(from, n, true);
+	pax_open_userland();
+	ret = ___copy_to_user(to, from, n);
+	pax_close_userland();
+	return ret;
+}
+
 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from,
unsigned long n);
-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
+extern unsigned long __must_check ___clear_user(void __user *addr, unsigned long n);
 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long
n);
+
+static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long
n)
+{
+	unsigned long ret;
+	pax_open_userland();
+	ret = ___clear_user(addr, n);
+	pax_close_userland();
+	return ret;
+}
+
 #else
 #define __copy_from_user(to,from,n)	(memcpy(to, (void __force *)from, n), 0)
 #define __copy_to_user(to,from,n)	(memcpy((void __force *)to, from, n), 0)
@@ -438,6 +516,9 @@ extern unsigned long __must_check __clea
 
 static inline unsigned long __must_check copy_from_user(void *to, const void __user
*from, unsigned long n)
 {
+	if ((long)n < 0)
+		return n;
+
 	if (access_ok(VERIFY_READ, from, n))
 		n = __copy_from_user(to, from, n);
 	else /* security hole - plug it */
@@ -447,6 +528,9 @@ static inline unsigned long __must_check
 
 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from,
unsigned long n)
 {
+	if ((long)n < 0)
+		return n;
+
 	if (access_ok(VERIFY_WRITE, to, n))
 		n = __copy_to_user(to, from, n);
 	return n;
diff -ruNp linux-3.13.11/arch/arm/include/uapi/asm/ptrace.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/include/uapi/asm/ptrace.h
--- linux-3.13.11/arch/arm/include/uapi/asm/ptrace.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/include/uapi/asm/ptrace.h	2014-07-09
12:00:15.000000000 +0200
@@ -92,7 +92,7 @@
  * ARMv7 groups of PSR bits
  */
 #define APSR_MASK	0xf80f0000	/* N, Z, C, V, Q and GE flags */
-#define PSR_ISET_MASK	0x01000010	/* ISA state (J, T) mask */
+#define PSR_ISET_MASK	0x01000020	/* ISA state (J, T) mask */
 #define PSR_IT_MASK	0x0600fc00	/* If-Then execution state mask */
 #define PSR_ENDIAN_MASK	0x00000200	/* Endianness state mask */
 
diff -ruNp linux-3.13.11/arch/arm/kernel/armksyms.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/kernel/armksyms.c
--- linux-3.13.11/arch/arm/kernel/armksyms.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/kernel/armksyms.c	2014-07-09
12:00:15.000000000 +0200
@@ -53,7 +53,7 @@ EXPORT_SYMBOL(arm_delay_ops);
 
 	/* networking */
 EXPORT_SYMBOL(csum_partial);
-EXPORT_SYMBOL(csum_partial_copy_from_user);
+EXPORT_SYMBOL(__csum_partial_copy_from_user);
 EXPORT_SYMBOL(csum_partial_copy_nocheck);
 EXPORT_SYMBOL(__csum_ipv6_magic);
 
@@ -89,9 +89,9 @@ EXPORT_SYMBOL(__memzero);
 #ifdef CONFIG_MMU
 EXPORT_SYMBOL(copy_page);
 
-EXPORT_SYMBOL(__copy_from_user);
-EXPORT_SYMBOL(__copy_to_user);
-EXPORT_SYMBOL(__clear_user);
+EXPORT_SYMBOL(___copy_from_user);
+EXPORT_SYMBOL(___copy_to_user);
+EXPORT_SYMBOL(___clear_user);
 
 EXPORT_SYMBOL(__get_user_1);
 EXPORT_SYMBOL(__get_user_2);
diff -ruNp linux-3.13.11/arch/arm/kernel/calls.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/kernel/calls.S
--- linux-3.13.11/arch/arm/kernel/calls.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/kernel/calls.S	2014-07-09
12:00:15.000000000 +0200
@@ -322,7 +322,7 @@
 /* 310 */	CALL(sys_request_key)
 		CALL(sys_keyctl)
 		CALL(ABI(sys_semtimedop, sys_oabi_semtimedop))
-/* vserver */	CALL(sys_ni_syscall)
+		CALL(sys_vserver)
 		CALL(sys_ioprio_set)
 /* 315 */	CALL(sys_ioprio_get)
 		CALL(sys_inotify_init)
diff -ruNp linux-3.13.11/arch/arm/kernel/entry-armv.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/kernel/entry-armv.S
--- linux-3.13.11/arch/arm/kernel/entry-armv.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/kernel/entry-armv.S	2014-07-09
12:00:15.000000000 +0200
@@ -47,6 +47,87 @@
 9997:
 	.endm
 
+	.macro	pax_enter_kernel
+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
+	@ make aligned space for saved DACR
+	sub	sp, sp, #8
+	@ save regs
+	stmdb	sp!, {r1, r2}
+	@ read DACR from cpu_domain into r1
+	mov	r2, sp
+	@ assume 8K pages, since we have to split the immediate in two
+	bic	r2, r2, #(0x1fc0)
+	bic	r2, r2, #(0x3f)
+	ldr	r1, [r2, #TI_CPU_DOMAIN]
+	@ store old DACR on stack 
+	str	r1, [sp, #8]
+#ifdef CONFIG_PAX_KERNEXEC
+	@ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
+	bic	r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
+	orr	r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
+#endif
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+	@ set current DOMAIN_USER to DOMAIN_NOACCESS
+	bic	r1, r1, #(domain_val(DOMAIN_USER, 3))
+#endif
+	@ write r1 to current_thread_info()->cpu_domain
+	str	r1, [r2, #TI_CPU_DOMAIN]
+	@ write r1 to DACR
+	mcr	p15, 0, r1, c3, c0, 0
+	@ instruction sync
+	instr_sync
+	@ restore regs
+	ldmia	sp!, {r1, r2}
+#endif
+	.endm
+
+	.macro	pax_open_userland
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+	@ save regs
+	stmdb	sp!, {r0, r1}
+	@ read DACR from cpu_domain into r1
+	mov	r0, sp
+	@ assume 8K pages, since we have to split the immediate in two
+	bic	r0, r0, #(0x1fc0)
+	bic	r0, r0, #(0x3f)
+	ldr	r1, [r0, #TI_CPU_DOMAIN]
+	@ set current DOMAIN_USER to DOMAIN_CLIENT
+	bic	r1, r1, #(domain_val(DOMAIN_USER, 3))
+	orr	r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
+	@ write r1 to current_thread_info()->cpu_domain
+	str	r1, [r0, #TI_CPU_DOMAIN]
+	@ write r1 to DACR
+	mcr	p15, 0, r1, c3, c0, 0
+	@ instruction sync
+	instr_sync
+	@ restore regs
+	ldmia	sp!, {r0, r1}
+#endif
+	.endm
+
+	.macro	pax_close_userland
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+	@ save regs
+	stmdb	sp!, {r0, r1}
+	@ read DACR from cpu_domain into r1
+	mov	r0, sp
+	@ assume 8K pages, since we have to split the immediate in two
+	bic	r0, r0, #(0x1fc0)
+	bic	r0, r0, #(0x3f)
+	ldr	r1, [r0, #TI_CPU_DOMAIN]
+	@ set current DOMAIN_USER to DOMAIN_NOACCESS
+	bic	r1, r1, #(domain_val(DOMAIN_USER, 3))
+	@ write r1 to current_thread_info()->cpu_domain
+	str	r1, [r0, #TI_CPU_DOMAIN]
+	@ write r1 to DACR
+	mcr	p15, 0, r1, c3, c0, 0
+	@ instruction sync
+	instr_sync
+	@ restore regs
+	ldmia	sp!, {r0, r1}
+#endif
+	.endm
+
 	.macro	pabt_helper
 	@ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
 #ifdef MULTI_PABORT
@@ -89,11 +170,15 @@
  * Invalid mode handlers
  */
 	.macro	inv_entry, reason
+
+	pax_enter_kernel
+
 	sub	sp, sp, #S_FRAME_SIZE
  ARM(	stmib	sp, {r1 - lr}		)
  THUMB(	stmia	sp, {r0 - r12}		)
  THUMB(	str	sp, [sp, #S_SP]		)
  THUMB(	str	lr, [sp, #S_LR]		)
+
 	mov	r1, #\reason
 	.endm
 
@@ -149,7 +234,11 @@ ENDPROC(__und_invalid)
 	.macro	svc_entry, stack_hole=0
  UNWIND(.fnstart		)
  UNWIND(.save {r0 - pc}		)
+
+	pax_enter_kernel
+
 	sub	sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
+
 #ifdef CONFIG_THUMB2_KERNEL
  SPFIX(	str	r0, [sp]	)	@ temporarily saved
  SPFIX(	mov	r0, sp		)
@@ -164,7 +253,12 @@ ENDPROC(__und_invalid)
 	ldmia	r0, {r3 - r5}
 	add	r7, sp, #S_SP - 4	@ here for interlock avoidance
 	mov	r6, #-1			@  ""  ""      ""       ""
+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
+	@ offset sp by 8 as done in pax_enter_kernel
+	add	r2, sp, #(S_FRAME_SIZE + \stack_hole + 4)
+#else
 	add	r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
+#endif
  SPFIX(	addeq	r2, r2, #4	)
 	str	r3, [sp, #-4]!		@ save the "real" r0 copied
 					@ from the exception stack
@@ -317,6 +411,9 @@ ENDPROC(__pabt_svc)
 	.macro	usr_entry
  UNWIND(.fnstart	)
  UNWIND(.cantunwind	)	@ don't unwind the user space
+
+	pax_enter_kernel_user
+
 	sub	sp, sp, #S_FRAME_SIZE
  ARM(	stmib	sp, {r1 - r12}	)
  THUMB(	stmia	sp, {r0 - r12}	)
@@ -416,7 +513,9 @@ __und_usr:
 	tst	r3, #PSR_T_BIT			@ Thumb mode?
 	bne	__und_usr_thumb
 	sub	r4, r2, #4			@ ARM instr at LR - 4
+	pax_open_userland
 1:	ldrt	r0, [r4]
+	pax_close_userland
  ARM_BE8(rev	r0, r0)				@ little endian instruction
 
 	@ r0 = 32-bit ARM instruction which caused the exception
@@ -450,10 +549,14 @@ __und_usr_thumb:
  */
 	.arch	armv6t2
 #endif
+	pax_open_userland
 2:	ldrht	r5, [r4]
+	pax_close_userland
 	cmp	r5, #0xe800			@ 32bit instruction if xx != 0
 	blo	__und_usr_fault_16		@ 16bit undefined instruction
+	pax_open_userland
 3:	ldrht	r0, [r2]
+	pax_close_userland
 	add	r2, r2, #2			@ r2 is PC + 2, make it PC + 4
 	str	r2, [sp, #S_PC]			@ it's a 2x16bit instr, update
 	orr	r0, r0, r5, lsl #16
@@ -482,7 +585,8 @@ ENDPROC(__und_usr)
  */
 	.pushsection .fixup, "ax"
 	.align	2
-4:	mov	pc, r9
+4:	pax_close_userland
+	mov	pc, r9
 	.popsection
 	.pushsection __ex_table,"a"
 	.long	1b, 4b
@@ -692,7 +796,7 @@ ENTRY(__switch_to)
  THUMB(	str	lr, [ip], #4		   )
 	ldr	r4, [r2, #TI_TP_VALUE]
 	ldr	r5, [r2, #TI_TP_VALUE + 4]
-#ifdef CONFIG_CPU_USE_DOMAINS
+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
 	ldr	r6, [r2, #TI_CPU_DOMAIN]
 #endif
 	switch_tls r1, r4, r5, r3, r7
@@ -701,7 +805,7 @@ ENTRY(__switch_to)
 	ldr	r8, =__stack_chk_guard
 	ldr	r7, [r7, #TSK_STACK_CANARY]
 #endif
-#ifdef CONFIG_CPU_USE_DOMAINS
+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
 	mcr	p15, 0, r6, c3, c0, 0		@ Set domain register
 #endif
 	mov	r5, r0
diff -ruNp linux-3.13.11/arch/arm/kernel/entry-common.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/kernel/entry-common.S
--- linux-3.13.11/arch/arm/kernel/entry-common.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/kernel/entry-common.S	2014-07-09
12:00:15.000000000 +0200
@@ -10,18 +10,46 @@
 
 #include <asm/unistd.h>
 #include <asm/ftrace.h>
+#include <asm/domain.h>
 #include <asm/unwind.h>
 
+#include "entry-header.S"
+
 #ifdef CONFIG_NEED_RET_TO_USER
 #include <mach/entry-macro.S>
 #else
 	.macro  arch_ret_to_user, tmp1, tmp2
+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
+	@ save regs
+	stmdb	sp!, {r1, r2}
+        @ read DACR from cpu_domain into r1
+        mov     r2, sp
+        @ assume 8K pages, since we have to split the immediate in two
+        bic     r2, r2, #(0x1fc0)
+        bic     r2, r2, #(0x3f)
+        ldr     r1, [r2, #TI_CPU_DOMAIN]
+#ifdef CONFIG_PAX_KERNEXEC
+        @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
+        bic     r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
+        orr     r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
+#endif
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+        @ set current DOMAIN_USER to DOMAIN_UDEREF
+        bic     r1, r1, #(domain_val(DOMAIN_USER, 3))
+        orr     r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
+#endif
+        @ write r1 to current_thread_info()->cpu_domain
+        str     r1, [r2, #TI_CPU_DOMAIN]
+        @ write r1 to DACR
+        mcr     p15, 0, r1, c3, c0, 0
+        @ instruction sync
+        instr_sync
+	@ restore regs
+	ldmia	sp!, {r1, r2}
+#endif
 	.endm
 #endif
 
-#include "entry-header.S"
-
-
 	.align	5
 /*
  * This is the fast syscall return path.  We do as little as
@@ -411,6 +439,12 @@ ENTRY(vector_swi)
  USER(	ldr	scno, [lr, #-4]		)	@ get SWI instruction
 #endif
 
+	/*
+	 * do this here to avoid a performance hit of wrapping the code above
+	 * that directly dereferences userland to parse the SWI instruction
+	 */
+	pax_enter_kernel_user
+
 	adr	tbl, sys_call_table		@ load syscall table pointer
 
 #if defined(CONFIG_OABI_COMPAT)
diff -ruNp linux-3.13.11/arch/arm/kernel/entry-header.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/kernel/entry-header.S
--- linux-3.13.11/arch/arm/kernel/entry-header.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/kernel/entry-header.S	2014-07-09
12:00:15.000000000 +0200
@@ -184,6 +184,60 @@
 	msr	cpsr_c, \rtemp			@ switch back to the SVC mode
 	.endm
 
+	.macro	pax_enter_kernel_user
+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
+	@ save regs
+	stmdb	sp!, {r0, r1}
+	@ read DACR from cpu_domain into r1
+	mov	r0, sp
+	@ assume 8K pages, since we have to split the immediate in two
+	bic	r0, r0, #(0x1fc0)
+	bic	r0, r0, #(0x3f)
+	ldr	r1, [r0, #TI_CPU_DOMAIN]
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+	@ set current DOMAIN_USER to DOMAIN_NOACCESS
+	bic	r1, r1, #(domain_val(DOMAIN_USER, 3))
+#endif
+#ifdef CONFIG_PAX_KERNEXEC
+	@ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
+	bic	r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
+	orr	r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
+#endif
+	@ write r1 to current_thread_info()->cpu_domain
+	str	r1, [r0, #TI_CPU_DOMAIN]
+	@ write r1 to DACR
+	mcr	p15, 0, r1, c3, c0, 0
+	@ instruction sync
+	instr_sync
+	@ restore regs
+	ldmia	sp!, {r0, r1}
+#endif
+	.endm
+
+	.macro  pax_exit_kernel
+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
+	@ save regs
+	stmdb	sp!, {r0, r1}
+	@ read old DACR from stack into r1
+	ldr	r1, [sp, #(8 + S_SP)]
+	sub	r1, r1, #8
+	ldr	r1, [r1]
+
+	@ write r1 to current_thread_info()->cpu_domain
+	mov	r0, sp
+	@ assume 8K pages, since we have to split the immediate in two
+	bic	r0, r0, #(0x1fc0)
+	bic	r0, r0, #(0x3f)
+	str	r1, [r0, #TI_CPU_DOMAIN]
+	@ write r1 to DACR
+	mcr	p15, 0, r1, c3, c0, 0
+	@ instruction sync
+	instr_sync
+	@ restore regs
+	ldmia	sp!, {r0, r1}
+#endif
+	.endm
+
 #ifndef CONFIG_THUMB2_KERNEL
 	.macro	svc_exit, rpsr, irq = 0
 	.if	\irq != 0
@@ -203,6 +257,9 @@
 	blne	trace_hardirqs_off
 #endif
 	.endif
+
+	pax_exit_kernel
+
 	msr	spsr_cxsf, \rpsr
 #if defined(CONFIG_CPU_V6)
 	ldr	r0, [sp]
@@ -266,6 +323,9 @@
 	blne	trace_hardirqs_off
 #endif
 	.endif
+
+	pax_exit_kernel
+
 	ldr	lr, [sp, #S_SP]			@ top of the stack
 	ldrd	r0, r1, [sp, #S_LR]		@ calling lr and pc
 	clrex					@ clear the exclusive monitor
diff -ruNp linux-3.13.11/arch/arm/kernel/fiq.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/kernel/fiq.c
--- linux-3.13.11/arch/arm/kernel/fiq.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/kernel/fiq.c	2014-07-09
12:00:15.000000000 +0200
@@ -87,7 +87,10 @@ void set_fiq_handler(void *start, unsign
 	void *base = vectors_page;
 	unsigned offset = FIQ_OFFSET;
 
+	pax_open_kernel();
 	memcpy(base + offset, start, length);
+	pax_close_kernel();
+
 	if (!cache_is_vipt_nonaliasing())
 		flush_icache_range((unsigned long)base + offset, offset +
 				   length);
diff -ruNp linux-3.13.11/arch/arm/kernel/head.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/kernel/head.S
--- linux-3.13.11/arch/arm/kernel/head.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/kernel/head.S	2014-07-09
12:00:15.000000000 +0200
@@ -52,7 +52,9 @@
 	.equ	swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE
 
 	.macro	pgtbl, rd, phys
-	add	\rd, \phys, #TEXT_OFFSET - PG_DIR_SIZE
+	mov	\rd, #TEXT_OFFSET
+	sub	\rd, #PG_DIR_SIZE
+	add	\rd, \rd, \phys
 	.endm
 
 /*
@@ -436,7 +438,7 @@ __enable_mmu:
 	mov	r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
 		      domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
 		      domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
-		      domain_val(DOMAIN_IO, DOMAIN_CLIENT))
+		      domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
 	mcr	p15, 0, r5, c3, c0, 0		@ load domain access register
 	mcr	p15, 0, r4, c2, c0, 0		@ load page table pointer
 #endif
diff -ruNp linux-3.13.11/arch/arm/kernel/module.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/kernel/module.c
--- linux-3.13.11/arch/arm/kernel/module.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/kernel/module.c	2014-07-09
12:00:15.000000000 +0200
@@ -38,12 +38,39 @@
 #endif
 
 #ifdef CONFIG_MMU
-void *module_alloc(unsigned long size)
+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
 {
+	if (!size || PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR)
+		return NULL;
 	return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
-				GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE,
+				GFP_KERNEL, prot, NUMA_NO_NODE,
 				__builtin_return_address(0));
 }
+
+void *module_alloc(unsigned long size)
+{
+
+#ifdef CONFIG_PAX_KERNEXEC
+	return __module_alloc(size, PAGE_KERNEL);
+#else
+	return __module_alloc(size, PAGE_KERNEL_EXEC);
+#endif
+
+}
+
+#ifdef CONFIG_PAX_KERNEXEC
+void module_free_exec(struct module *mod, void *module_region)
+{
+	module_free(mod, module_region);
+}
+EXPORT_SYMBOL(module_free_exec);
+
+void *module_alloc_exec(unsigned long size)
+{
+	return __module_alloc(size, PAGE_KERNEL_EXEC);
+}
+EXPORT_SYMBOL(module_alloc_exec);
+#endif
 #endif
 
 int
diff -ruNp linux-3.13.11/arch/arm/kernel/patch.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/kernel/patch.c
--- linux-3.13.11/arch/arm/kernel/patch.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/kernel/patch.c	2014-07-09
12:00:15.000000000 +0200
@@ -18,6 +18,7 @@ void __kprobes __patch_text(void *addr,
 	bool thumb2 = IS_ENABLED(CONFIG_THUMB2_KERNEL);
 	int size;
 
+	pax_open_kernel();
 	if (thumb2 && __opcode_is_thumb16(insn)) {
 		*(u16 *)addr = __opcode_to_mem_thumb16(insn);
 		size = sizeof(u16);
@@ -39,6 +40,7 @@ void __kprobes __patch_text(void *addr,
 		*(u32 *)addr = insn;
 		size = sizeof(u32);
 	}
+	pax_close_kernel();
 
 	flush_icache_range((uintptr_t)(addr),
 			   (uintptr_t)(addr) + size);
diff -ruNp linux-3.13.11/arch/arm/kernel/process.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/kernel/process.c
--- linux-3.13.11/arch/arm/kernel/process.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/kernel/process.c	2014-07-09
12:00:15.000000000 +0200
@@ -217,6 +217,7 @@ void machine_power_off(void)
 
 	if (pm_power_off)
 		pm_power_off();
+	BUG();
 }
 
 /*
@@ -230,7 +231,7 @@ void machine_power_off(void)
  * executing pre-reset code, and using RAM that the primary CPU's code wishes
  * to use. Implementing such co-ordination would be essentially impossible.
  */
-void machine_restart(char *cmd)
+__noreturn void machine_restart(char *cmd)
 {
 	local_irq_disable();
 	smp_send_stop();
@@ -253,8 +254,8 @@ void __show_regs(struct pt_regs *regs)
 
 	show_regs_print_info(KERN_DEFAULT);
 
-	print_symbol("PC is at %s\n", instruction_pointer(regs));
-	print_symbol("LR is at %s\n", regs->ARM_lr);
+	printk("PC is at %pA\n", (void *)instruction_pointer(regs));
+	printk("LR is at %pA\n", (void *)regs->ARM_lr);
 	printk("pc : [<%08lx>]    lr : [<%08lx>]    psr: %08lx\n"
 	       "sp : %08lx  ip : %08lx  fp : %08lx\n",
 		regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
@@ -425,12 +426,6 @@ unsigned long get_wchan(struct task_stru
 	return 0;
 }
 
-unsigned long arch_randomize_brk(struct mm_struct *mm)
-{
-	unsigned long range_end = mm->brk + 0x02000000;
-	return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
-}
-
 #ifdef CONFIG_MMU
 #ifdef CONFIG_KUSER_HELPERS
 /*
@@ -446,7 +441,7 @@ static struct vm_area_struct gate_vma =
 
 static int __init gate_vma_init(void)
 {
-	gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
+	gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
 	return 0;
 }
 arch_initcall(gate_vma_init);
@@ -472,41 +467,16 @@ int in_gate_area_no_mm(unsigned long add
 
 const char *arch_vma_name(struct vm_area_struct *vma)
 {
-	return is_gate_vma(vma) ? "[vectors]" :
-		(vma->vm_mm && vma->vm_start == vma->vm_mm->context.sigpage) ?
-		 "[sigpage]" : NULL;
+	return is_gate_vma(vma) ? "[vectors]" : NULL;
 }
 
-static struct page *signal_page;
-extern struct page *get_signal_page(void);
-
 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
 {
 	struct mm_struct *mm = current->mm;
-	unsigned long addr;
-	int ret;
-
-	if (!signal_page)
-		signal_page = get_signal_page();
-	if (!signal_page)
-		return -ENOMEM;
 
 	down_write(&mm->mmap_sem);
-	addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
-	if (IS_ERR_VALUE(addr)) {
-		ret = addr;
-		goto up_fail;
-	}
-
-	ret = install_special_mapping(mm, addr, PAGE_SIZE,
-		VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
-		&signal_page);
-
-	if (ret == 0)
-		mm->context.sigpage = addr;
-
- up_fail:
+	mm->context.sigpage = (PAGE_OFFSET + (get_random_int() % 0x3FFEFFE0)) & 0xFFFFFFFC;
 	up_write(&mm->mmap_sem);
-	return ret;
+	return 0;
 }
 #endif
diff -ruNp linux-3.13.11/arch/arm/kernel/psci.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/kernel/psci.c
--- linux-3.13.11/arch/arm/kernel/psci.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/kernel/psci.c	2014-07-09
12:00:15.000000000 +0200
@@ -24,7 +24,7 @@
 #include <asm/opcodes-virt.h>
 #include <asm/psci.h>
 
-struct psci_operations psci_ops;
+struct psci_operations psci_ops __read_only;
 
 static int (*invoke_psci_fn)(u32, u32, u32, u32);
 
diff -ruNp linux-3.13.11/arch/arm/kernel/ptrace.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/kernel/ptrace.c
--- linux-3.13.11/arch/arm/kernel/ptrace.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/kernel/ptrace.c	2014-07-09
12:00:15.000000000 +0200
@@ -929,10 +929,19 @@ static int tracehook_report_syscall(stru
 	return current_thread_info()->syscall;
 }
 
+#ifdef CONFIG_GRKERNSEC_SETXID
+extern void gr_delayed_cred_worker(void);
+#endif
+
 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
 {
 	current_thread_info()->syscall = scno;
 
+#ifdef CONFIG_GRKERNSEC_SETXID
+	if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
+		gr_delayed_cred_worker();
+#endif
+
 	/* Do the secure computing check first; failures should be fast. */
 	if (secure_computing(scno) == -1)
 		return -1;
diff -ruNp linux-3.13.11/arch/arm/kernel/setup.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/kernel/setup.c
--- linux-3.13.11/arch/arm/kernel/setup.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/kernel/setup.c	2014-07-09
12:00:15.000000000 +0200
@@ -100,21 +100,23 @@ EXPORT_SYMBOL(system_serial_high);
 unsigned int elf_hwcap __read_mostly;
 EXPORT_SYMBOL(elf_hwcap);
 
+pteval_t __supported_pte_mask __read_only;
+pmdval_t __supported_pmd_mask __read_only;
 
 #ifdef MULTI_CPU
-struct processor processor __read_mostly;
+struct processor processor __read_only;
 #endif
 #ifdef MULTI_TLB
-struct cpu_tlb_fns cpu_tlb __read_mostly;
+struct cpu_tlb_fns cpu_tlb __read_only;
 #endif
 #ifdef MULTI_USER
-struct cpu_user_fns cpu_user __read_mostly;
+struct cpu_user_fns cpu_user __read_only;
 #endif
 #ifdef MULTI_CACHE
-struct cpu_cache_fns cpu_cache __read_mostly;
+struct cpu_cache_fns cpu_cache __read_only;
 #endif
 #ifdef CONFIG_OUTER_CACHE
-struct outer_cache_fns outer_cache __read_mostly;
+struct outer_cache_fns outer_cache __read_only;
 EXPORT_SYMBOL(outer_cache);
 #endif
 
@@ -247,9 +249,13 @@ static int __get_cpu_architecture(void)
 		asm("mrc	p15, 0, %0, c0, c1, 4"
 		    : "=r" (mmfr0));
 		if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
-		    (mmfr0 & 0x000000f0) >= 0x00000030)
+		    (mmfr0 & 0x000000f0) >= 0x00000030) {
 			cpu_arch = CPU_ARCH_ARMv7;
-		else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
+			if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) {
+				__supported_pte_mask |= L_PTE_PXN;
+				__supported_pmd_mask |= PMD_PXNTABLE;
+			}
+		} else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
 			 (mmfr0 & 0x000000f0) == 0x00000020)
 			cpu_arch = CPU_ARCH_ARMv6;
 		else
diff -ruNp linux-3.13.11/arch/arm/kernel/signal.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/kernel/signal.c
--- linux-3.13.11/arch/arm/kernel/signal.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/kernel/signal.c	2014-07-09
12:00:15.000000000 +0200
@@ -23,8 +23,6 @@
 
 extern const unsigned long sigreturn_codes[7];
 
-static unsigned long signal_return_offset;
-
 #ifdef CONFIG_CRUNCH
 static int preserve_crunch_context(struct crunch_sigframe __user *frame)
 {
@@ -395,8 +393,7 @@ setup_return(struct pt_regs *regs, struc
 			 * except when the MPU has protected the vectors
 			 * page from PL0
 			 */
-			retcode = mm->context.sigpage + signal_return_offset +
-				  (idx << 2) + thumb;
+			retcode = mm->context.sigpage + (idx << 2) + thumb;
 		} else
 #endif
 		{
@@ -600,33 +597,3 @@ do_work_pending(struct pt_regs *regs, un
 	} while (thread_flags & _TIF_WORK_MASK);
 	return 0;
 }
-
-struct page *get_signal_page(void)
-{
-	unsigned long ptr;
-	unsigned offset;
-	struct page *page;
-	void *addr;
-
-	page = alloc_pages(GFP_KERNEL, 0);
-
-	if (!page)
-		return NULL;
-
-	addr = page_address(page);
-
-	/* Give the signal return code some randomness */
-	offset = 0x200 + (get_random_int() & 0x7fc);
-	signal_return_offset = offset;
-
-	/*
-	 * Copy signal return handlers into the vector page, and
-	 * set sigreturn to be a pointer to these.
-	 */
-	memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
-
-	ptr = (unsigned long)addr + offset;
-	flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
-
-	return page;
-}
diff -ruNp linux-3.13.11/arch/arm/kernel/smp.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/kernel/smp.c
--- linux-3.13.11/arch/arm/kernel/smp.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/kernel/smp.c	2014-07-09
12:00:15.000000000 +0200
@@ -73,7 +73,7 @@ enum ipi_msg_type {
 
 static DECLARE_COMPLETION(cpu_running);
 
-static struct smp_operations smp_ops;
+static struct smp_operations smp_ops __read_only;
 
 void __init smp_set_ops(struct smp_operations *ops)
 {
diff -ruNp linux-3.13.11/arch/arm/kernel/traps.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/kernel/traps.c
--- linux-3.13.11/arch/arm/kernel/traps.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/kernel/traps.c	2014-07-09
12:00:15.000000000 +0200
@@ -62,7 +62,7 @@ static void dump_mem(const char *, const
 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
 {
 #ifdef CONFIG_KALLSYMS
-	printk("[<%08lx>] (%pS) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void
*)from);
+	printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void
*)from);
 #else
 	printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
 #endif
@@ -247,8 +247,8 @@ static int __die(const char *str, int er
 
 	print_modules();
 	__show_regs(regs);
-	printk(KERN_EMERG "Process %.*s (pid: %d, stack limit = 0x%p)\n",
-		TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), end_of_stack(tsk));
+	printk(KERN_EMERG "Process %.*s (pid: %d:#%u, stack limit = 0x%p)\n",
+		TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), tsk->xid, end_of_stack(tsk));
 
 	if (!user_mode(regs) || in_interrupt()) {
 		dump_mem(KERN_EMERG, "Stack: ", regs->ARM_sp,
@@ -264,6 +264,8 @@ static arch_spinlock_t die_lock = __ARCH
 static int die_owner = -1;
 static unsigned int die_nest_count;
 
+extern void gr_handle_kernel_exploit(void);
+
 static unsigned long oops_begin(void)
 {
 	int cpu;
@@ -306,6 +308,9 @@ static void oops_end(unsigned long flags
 		panic("Fatal exception in interrupt");
 	if (panic_on_oops)
 		panic("Fatal exception");
+
+	gr_handle_kernel_exploit();
+
 	if (signr)
 		do_exit(signr);
 }
@@ -642,7 +647,9 @@ asmlinkage int arm_syscall(int no, struc
 			 * The user helper at 0xffff0fe0 must be used instead.
 			 * (see entry-armv.S for details)
 			 */
+			pax_open_kernel();
 			*((unsigned int *)0xffff0ff0) = regs->ARM_r0;
+			pax_close_kernel();
 		}
 		return 0;
 
@@ -899,7 +906,11 @@ void __init early_trap_init(void *vector
 	kuser_init(vectors_base);
 
 	flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
-	modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
+
+#ifndef CONFIG_PAX_MEMORY_UDEREF
+	modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT);
+#endif
+
 #else /* ifndef CONFIG_CPU_V7M */
 	/*
 	 * on V7-M there is no need to copy the vector table to a dedicated
diff -ruNp linux-3.13.11/arch/arm/kernel/vmlinux.lds.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/kernel/vmlinux.lds.S
--- linux-3.13.11/arch/arm/kernel/vmlinux.lds.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/kernel/vmlinux.lds.S	2014-07-09
12:00:15.000000000 +0200
@@ -8,7 +8,11 @@
 #include <asm/thread_info.h>
 #include <asm/memory.h>
 #include <asm/page.h>
-	
+
+#ifdef CONFIG_PAX_KERNEXEC
+#include <asm/pgtable.h>
+#endif
+
 #define PROC_INFO							\
 	. = ALIGN(4);							\
 	VMLINUX_SYMBOL(__proc_info_begin) = .;				\
@@ -34,7 +38,7 @@
 #endif
 
 #if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
-	defined(CONFIG_GENERIC_BUG)
+	defined(CONFIG_GENERIC_BUG) || defined(CONFIG_PAX_REFCOUNT)
 #define ARM_EXIT_KEEP(x)	x
 #define ARM_EXIT_DISCARD(x)
 #else
@@ -90,6 +94,11 @@ SECTIONS
 		_text = .;
 		HEAD_TEXT
 	}
+
+#ifdef CONFIG_PAX_KERNEXEC
+	. = ALIGN(1<<SECTION_SHIFT);
+#endif
+
 	.text : {			/* Real text segment		*/
 		_stext = .;		/* Text and read-only data	*/
 			__exception_text_start = .;
@@ -112,6 +121,8 @@ SECTIONS
 			ARM_CPU_KEEP(PROC_INFO)
 	}
 
+	_etext = .;			/* End of text section */
+
 	RO_DATA(PAGE_SIZE)
 
 	. = ALIGN(4);
@@ -142,7 +153,9 @@ SECTIONS
 
 	NOTES
 
-	_etext = .;			/* End of text and rodata section */
+#ifdef CONFIG_PAX_KERNEXEC
+	. = ALIGN(1<<SECTION_SHIFT);
+#endif
 
 #ifndef CONFIG_XIP_KERNEL
 	. = ALIGN(PAGE_SIZE);
@@ -220,6 +233,11 @@ SECTIONS
 	. = PAGE_OFFSET + TEXT_OFFSET;
 #else
 	__init_end = .;
+
+#ifdef CONFIG_PAX_KERNEXEC
+	. = ALIGN(1<<SECTION_SHIFT);
+#endif
+
 	. = ALIGN(THREAD_SIZE);
 	__data_loc = .;
 #endif
diff -ruNp linux-3.13.11/arch/arm/kvm/arm.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/kvm/arm.c
--- linux-3.13.11/arch/arm/kvm/arm.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/kvm/arm.c	2014-07-09 12:00:15.000000000
+0200
@@ -56,7 +56,7 @@ static unsigned long hyp_default_vectors
 static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
 
 /* The VMID used in the VTTBR */
-static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
+static atomic64_unchecked_t kvm_vmid_gen = ATOMIC64_INIT(1);
 static u8 kvm_next_vmid;
 static DEFINE_SPINLOCK(kvm_vmid_lock);
 
@@ -397,7 +397,7 @@ void force_vm_exit(const cpumask_t *mask
  */
 static bool need_new_vmid_gen(struct kvm *kvm)
 {
-	return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
+	return unlikely(kvm->arch.vmid_gen != atomic64_read_unchecked(&kvm_vmid_gen));
 }
 
 /**
@@ -430,7 +430,7 @@ static void update_vttbr(struct kvm *kvm
 
 	/* First user of a new VMID generation? */
 	if (unlikely(kvm_next_vmid == 0)) {
-		atomic64_inc(&kvm_vmid_gen);
+		atomic64_inc_unchecked(&kvm_vmid_gen);
 		kvm_next_vmid = 1;
 
 		/*
@@ -447,7 +447,7 @@ static void update_vttbr(struct kvm *kvm
 		kvm_call_hyp(__kvm_flush_vm_context);
 	}
 
-	kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
+	kvm->arch.vmid_gen = atomic64_read_unchecked(&kvm_vmid_gen);
 	kvm->arch.vmid = kvm_next_vmid;
 	kvm_next_vmid++;
 
diff -ruNp linux-3.13.11/arch/arm/lib/clear_user.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/lib/clear_user.S
--- linux-3.13.11/arch/arm/lib/clear_user.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/lib/clear_user.S	2014-07-09
12:00:15.000000000 +0200
@@ -12,14 +12,14 @@
 
 		.text
 
-/* Prototype: int __clear_user(void *addr, size_t sz)
+/* Prototype: int ___clear_user(void *addr, size_t sz)
  * Purpose  : clear some user memory
  * Params   : addr - user memory address to clear
  *          : sz   - number of bytes to clear
  * Returns  : number of bytes NOT cleared
  */
 ENTRY(__clear_user_std)
-WEAK(__clear_user)
+WEAK(___clear_user)
 		stmfd	sp!, {r1, lr}
 		mov	r2, #0
 		cmp	r1, #4
@@ -44,7 +44,7 @@ WEAK(__clear_user)
 USER(		strnebt	r2, [r0])
 		mov	r0, #0
 		ldmfd	sp!, {r1, pc}
-ENDPROC(__clear_user)
+ENDPROC(___clear_user)
 ENDPROC(__clear_user_std)
 
 		.pushsection .fixup,"ax"
diff -ruNp linux-3.13.11/arch/arm/lib/copy_from_user.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/lib/copy_from_user.S
--- linux-3.13.11/arch/arm/lib/copy_from_user.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/lib/copy_from_user.S	2014-07-09
12:00:15.000000000 +0200
@@ -16,7 +16,7 @@
 /*
  * Prototype:
  *
- *	size_t __copy_from_user(void *to, const void *from, size_t n)
+ *	size_t ___copy_from_user(void *to, const void *from, size_t n)
  *
  * Purpose:
  *
@@ -84,11 +84,11 @@
 
 	.text
 
-ENTRY(__copy_from_user)
+ENTRY(___copy_from_user)
 
 #include "copy_template.S"
 
-ENDPROC(__copy_from_user)
+ENDPROC(___copy_from_user)
 
 	.pushsection .fixup,"ax"
 	.align 0
diff -ruNp linux-3.13.11/arch/arm/lib/copy_page.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/lib/copy_page.S
--- linux-3.13.11/arch/arm/lib/copy_page.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/lib/copy_page.S	2014-07-09
12:00:15.000000000 +0200
@@ -10,6 +10,7 @@
  *  ASM optimised string functions
  */
 #include <linux/linkage.h>
+#include <linux/const.h>
 #include <asm/assembler.h>
 #include <asm/asm-offsets.h>
 #include <asm/cache.h>
diff -ruNp linux-3.13.11/arch/arm/lib/copy_to_user.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/lib/copy_to_user.S
--- linux-3.13.11/arch/arm/lib/copy_to_user.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/lib/copy_to_user.S	2014-07-09
12:00:15.000000000 +0200
@@ -16,7 +16,7 @@
 /*
  * Prototype:
  *
- *	size_t __copy_to_user(void *to, const void *from, size_t n)
+ *	size_t ___copy_to_user(void *to, const void *from, size_t n)
  *
  * Purpose:
  *
@@ -88,11 +88,11 @@
 	.text
 
 ENTRY(__copy_to_user_std)
-WEAK(__copy_to_user)
+WEAK(___copy_to_user)
 
 #include "copy_template.S"
 
-ENDPROC(__copy_to_user)
+ENDPROC(___copy_to_user)
 ENDPROC(__copy_to_user_std)
 
 	.pushsection .fixup,"ax"
diff -ruNp linux-3.13.11/arch/arm/lib/csumpartialcopyuser.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/lib/csumpartialcopyuser.S
--- linux-3.13.11/arch/arm/lib/csumpartialcopyuser.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/lib/csumpartialcopyuser.S	2014-07-09
12:00:15.000000000 +0200
@@ -57,8 +57,8 @@
  *  Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
  */
 
-#define FN_ENTRY	ENTRY(csum_partial_copy_from_user)
-#define FN_EXIT		ENDPROC(csum_partial_copy_from_user)
+#define FN_ENTRY	ENTRY(__csum_partial_copy_from_user)
+#define FN_EXIT		ENDPROC(__csum_partial_copy_from_user)
 
 #include "csumpartialcopygeneric.S"
 
diff -ruNp linux-3.13.11/arch/arm/lib/delay.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/lib/delay.c
--- linux-3.13.11/arch/arm/lib/delay.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/lib/delay.c	2014-07-09 12:00:15.000000000
+0200
@@ -28,7 +28,7 @@
 /*
  * Default to the loop-based delay implementation.
  */
-struct arm_delay_ops arm_delay_ops = {
+struct arm_delay_ops arm_delay_ops __read_only = {
 	.delay		= __loop_delay,
 	.const_udelay	= __loop_const_udelay,
 	.udelay		= __loop_udelay,
diff -ruNp linux-3.13.11/arch/arm/lib/uaccess_with_memcpy.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/lib/uaccess_with_memcpy.c
--- linux-3.13.11/arch/arm/lib/uaccess_with_memcpy.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/lib/uaccess_with_memcpy.c	2014-07-09
12:00:15.000000000 +0200
@@ -136,7 +136,7 @@ out:
 }
 
 unsigned long
-__copy_to_user(void __user *to, const void *from, unsigned long n)
+___copy_to_user(void __user *to, const void *from, unsigned long n)
 {
 	/*
 	 * This test is stubbed out of the main function above to keep
@@ -190,7 +190,7 @@ out:
 	return n;
 }
 
-unsigned long __clear_user(void __user *addr, unsigned long n)
+unsigned long ___clear_user(void __user *addr, unsigned long n)
 {
 	/* See rational for this in __copy_to_user() above. */
 	if (n < 64)
diff -ruNp linux-3.13.11/arch/arm/mach-kirkwood/common.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/mach-kirkwood/common.c
--- linux-3.13.11/arch/arm/mach-kirkwood/common.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/mach-kirkwood/common.c	2014-07-09
12:00:15.000000000 +0200
@@ -156,7 +156,16 @@ static void clk_gate_fn_disable(struct c
 	clk_gate_ops.disable(hw);
 }
 
-static struct clk_ops clk_gate_fn_ops;
+static int clk_gate_fn_is_enabled(struct clk_hw *hw)
+{
+	return clk_gate_ops.is_enabled(hw);
+}
+
+static struct clk_ops clk_gate_fn_ops = {
+	.enable = clk_gate_fn_enable,
+	.disable = clk_gate_fn_disable,
+	.is_enabled = clk_gate_fn_is_enabled,
+};
 
 static struct clk __init *clk_register_gate_fn(struct device *dev,
 		const char *name,
@@ -190,14 +199,6 @@ static struct clk __init *clk_register_g
 	gate_fn->fn_en = fn_en;
 	gate_fn->fn_dis = fn_dis;
 
-	/* ops is the gate ops, but with our enable/disable functions */
-	if (clk_gate_fn_ops.enable != clk_gate_fn_enable ||
-	    clk_gate_fn_ops.disable != clk_gate_fn_disable) {
-		clk_gate_fn_ops = clk_gate_ops;
-		clk_gate_fn_ops.enable = clk_gate_fn_enable;
-		clk_gate_fn_ops.disable = clk_gate_fn_disable;
-	}
-
 	clk = clk_register(dev, &gate_fn->gate.hw);
 
 	if (IS_ERR(clk))
diff -ruNp linux-3.13.11/arch/arm/mach-omap2/board-n8x0.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/mach-omap2/board-n8x0.c
--- linux-3.13.11/arch/arm/mach-omap2/board-n8x0.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/mach-omap2/board-n8x0.c	2014-07-09
12:00:15.000000000 +0200
@@ -627,7 +627,7 @@ static int n8x0_menelaus_late_init(struc
 }
 #endif
 
-static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
+static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
 	.late_init = n8x0_menelaus_late_init,
 };
 
diff -ruNp linux-3.13.11/arch/arm/mach-omap2/gpmc.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/mach-omap2/gpmc.c
--- linux-3.13.11/arch/arm/mach-omap2/gpmc.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/mach-omap2/gpmc.c	2014-07-09
12:00:15.000000000 +0200
@@ -148,7 +148,6 @@ struct omap3_gpmc_regs {
 };
 
 static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ];
-static struct irq_chip gpmc_irq_chip;
 static int gpmc_irq_start;
 
 static struct resource	gpmc_mem_root;
@@ -716,6 +715,18 @@ static void gpmc_irq_noop(struct irq_dat
 
 static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; }
 
+static struct irq_chip gpmc_irq_chip = {
+	.name = "gpmc",
+	.irq_startup = gpmc_irq_noop_ret,
+	.irq_enable = gpmc_irq_enable,
+	.irq_disable = gpmc_irq_disable,
+	.irq_shutdown = gpmc_irq_noop,
+	.irq_ack = gpmc_irq_noop,
+	.irq_mask = gpmc_irq_noop,
+	.irq_unmask = gpmc_irq_noop,
+
+};
+
 static int gpmc_setup_irq(void)
 {
 	int i;
@@ -730,15 +741,6 @@ static int gpmc_setup_irq(void)
 		return gpmc_irq_start;
 	}
 
-	gpmc_irq_chip.name = "gpmc";
-	gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret;
-	gpmc_irq_chip.irq_enable = gpmc_irq_enable;
-	gpmc_irq_chip.irq_disable = gpmc_irq_disable;
-	gpmc_irq_chip.irq_shutdown = gpmc_irq_noop;
-	gpmc_irq_chip.irq_ack = gpmc_irq_noop;
-	gpmc_irq_chip.irq_mask = gpmc_irq_noop;
-	gpmc_irq_chip.irq_unmask = gpmc_irq_noop;
-
 	gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE;
 	gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT;
 
diff -ruNp linux-3.13.11/arch/arm/mach-omap2/omap-mpuss-lowpower.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/mach-omap2/omap-mpuss-lowpower.c
--- linux-3.13.11/arch/arm/mach-omap2/omap-mpuss-lowpower.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/mach-omap2/omap-mpuss-lowpower.c	2014-07-09
12:00:15.000000000 +0200
@@ -84,7 +84,7 @@ struct cpu_pm_ops {
 	int (*finish_suspend)(unsigned long cpu_state);
 	void (*resume)(void);
 	void (*scu_prepare)(unsigned int cpu_id, unsigned int cpu_state);
-};
+} __no_const;
 
 static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info);
 static struct powerdomain *mpuss_pd;
@@ -102,7 +102,7 @@ static void dummy_cpu_resume(void)
 static void dummy_scu_prepare(unsigned int cpu_id, unsigned int cpu_state)
 {}
 
-struct cpu_pm_ops omap_pm_ops = {
+static struct cpu_pm_ops omap_pm_ops __read_only = {
 	.finish_suspend		= default_finish_suspend,
 	.resume			= dummy_cpu_resume,
 	.scu_prepare		= dummy_scu_prepare,
diff -ruNp linux-3.13.11/arch/arm/mach-omap2/omap-wakeupgen.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/mach-omap2/omap-wakeupgen.c
--- linux-3.13.11/arch/arm/mach-omap2/omap-wakeupgen.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/mach-omap2/omap-wakeupgen.c	2014-07-09
12:00:15.000000000 +0200
@@ -343,7 +343,7 @@ static int irq_cpu_hotplug_notify(struct
 	return NOTIFY_OK;
 }
 
-static struct notifier_block __refdata irq_hotplug_notifier = {
+static struct notifier_block irq_hotplug_notifier = {
 	.notifier_call = irq_cpu_hotplug_notify,
 };
 
diff -ruNp linux-3.13.11/arch/arm/mach-omap2/omap_device.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/mach-omap2/omap_device.c
--- linux-3.13.11/arch/arm/mach-omap2/omap_device.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/mach-omap2/omap_device.c	2014-07-09
12:00:15.000000000 +0200
@@ -508,7 +508,7 @@ void omap_device_delete(struct omap_devi
 struct platform_device __init *omap_device_build(const char *pdev_name,
 						 int pdev_id,
 						 struct omap_hwmod *oh,
-						 void *pdata, int pdata_len)
+						 const void *pdata, int pdata_len)
 {
 	struct omap_hwmod *ohs[] = { oh };
 
@@ -536,7 +536,7 @@ struct platform_device __init *omap_devi
 struct platform_device __init *omap_device_build_ss(const char *pdev_name,
 						    int pdev_id,
 						    struct omap_hwmod **ohs,
-						    int oh_cnt, void *pdata,
+						    int oh_cnt, const void *pdata,
 						    int pdata_len)
 {
 	int ret = -ENOMEM;
diff -ruNp linux-3.13.11/arch/arm/mach-omap2/omap_device.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/mach-omap2/omap_device.h
--- linux-3.13.11/arch/arm/mach-omap2/omap_device.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/mach-omap2/omap_device.h	2014-07-09
12:00:15.000000000 +0200
@@ -72,12 +72,12 @@ int omap_device_idle(struct platform_dev
 /* Core code interface */
 
 struct platform_device *omap_device_build(const char *pdev_name, int pdev_id,
-					  struct omap_hwmod *oh, void *pdata,
+					  struct omap_hwmod *oh, const void *pdata,
 					  int pdata_len);
 
 struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id,
 					 struct omap_hwmod **oh, int oh_cnt,
-					 void *pdata, int pdata_len);
+					 const void *pdata, int pdata_len);
 
 struct omap_device *omap_device_alloc(struct platform_device *pdev,
 				      struct omap_hwmod **ohs, int oh_cnt);
diff -ruNp linux-3.13.11/arch/arm/mach-omap2/omap_hwmod.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/mach-omap2/omap_hwmod.c
--- linux-3.13.11/arch/arm/mach-omap2/omap_hwmod.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/mach-omap2/omap_hwmod.c	2014-07-09
12:00:15.000000000 +0200
@@ -194,10 +194,10 @@ struct omap_hwmod_soc_ops {
 	int (*init_clkdm)(struct omap_hwmod *oh);
 	void (*update_context_lost)(struct omap_hwmod *oh);
 	int (*get_context_lost)(struct omap_hwmod *oh);
-};
+} __no_const;
 
 /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
-static struct omap_hwmod_soc_ops soc_ops;
+static struct omap_hwmod_soc_ops soc_ops __read_only;
 
 /* omap_hwmod_list contains all registered struct omap_hwmods */
 static LIST_HEAD(omap_hwmod_list);
diff -ruNp linux-3.13.11/arch/arm/mach-omap2/powerdomains43xx_data.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/mach-omap2/powerdomains43xx_data.c
--- linux-3.13.11/arch/arm/mach-omap2/powerdomains43xx_data.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/mach-omap2/powerdomains43xx_data.c	2014-07-09
12:00:15.000000000 +0200
@@ -10,6 +10,7 @@
 
 #include <linux/kernel.h>
 #include <linux/init.h>
+#include <asm/pgtable.h>
 
 #include "powerdomain.h"
 
@@ -129,7 +130,9 @@ static int am43xx_check_vcvp(void)
 
 void __init am43xx_powerdomains_init(void)
 {
-	omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
+	pax_open_kernel();
+	*(void **)&omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
+	pax_close_kernel();
 	pwrdm_register_platform_funcs(&omap4_pwrdm_operations);
 	pwrdm_register_pwrdms(powerdomains_am43xx);
 	pwrdm_complete_init();
diff -ruNp linux-3.13.11/arch/arm/mach-omap2/wd_timer.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/mach-omap2/wd_timer.c
--- linux-3.13.11/arch/arm/mach-omap2/wd_timer.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/mach-omap2/wd_timer.c	2014-07-09
12:00:15.000000000 +0200
@@ -110,7 +110,9 @@ static int __init omap_init_wdt(void)
 	struct omap_hwmod *oh;
 	char *oh_name = "wd_timer2";
 	char *dev_name = "omap_wdt";
-	struct omap_wd_timer_platform_data pdata;
+	static struct omap_wd_timer_platform_data pdata = {
+		.read_reset_sources = prm_read_reset_sources
+	};
 
 	if (!cpu_class_is_omap2() || of_have_populated_dt())
 		return 0;
@@ -121,8 +123,6 @@ static int __init omap_init_wdt(void)
 		return -EINVAL;
 	}
 
-	pdata.read_reset_sources = prm_read_reset_sources;
-
 	pdev = omap_device_build(dev_name, id, oh, &pdata,
 				 sizeof(struct omap_wd_timer_platform_data));
 	WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n",
diff -ruNp linux-3.13.11/arch/arm/mach-tegra/cpuidle-tegra20.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/mach-tegra/cpuidle-tegra20.c
--- linux-3.13.11/arch/arm/mach-tegra/cpuidle-tegra20.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/mach-tegra/cpuidle-tegra20.c	2014-07-09
12:00:15.000000000 +0200
@@ -180,7 +180,7 @@ static int tegra20_idle_lp2_coupled(stru
 	bool entered_lp2 = false;
 
 	if (tegra_pending_sgi())
-		ACCESS_ONCE(abort_flag) = true;
+		ACCESS_ONCE_RW(abort_flag) = true;
 
 	cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
 
diff -ruNp linux-3.13.11/arch/arm/mach-ux500/setup.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/mach-ux500/setup.h
--- linux-3.13.11/arch/arm/mach-ux500/setup.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/mach-ux500/setup.h	2014-07-09
12:00:15.000000000 +0200
@@ -39,13 +39,6 @@ extern void ux500_timer_init(void);
 	.type		= MT_DEVICE,		\
 }
 
-#define __MEM_DEV_DESC(x, sz)	{		\
-	.virtual	= IO_ADDRESS(x),	\
-	.pfn		= __phys_to_pfn(x),	\
-	.length		= sz,			\
-	.type		= MT_MEMORY,		\
-}
-
 extern struct smp_operations ux500_smp_ops;
 extern void ux500_cpu_die(unsigned int cpu);
 
diff -ruNp linux-3.13.11/arch/arm/mm/Kconfig linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/mm/Kconfig
--- linux-3.13.11/arch/arm/mm/Kconfig	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/mm/Kconfig	2014-07-09 12:00:15.000000000
+0200
@@ -446,7 +446,7 @@ config CPU_32v5
 
 config CPU_32v6
 	bool
-	select CPU_USE_DOMAINS if CPU_V6 && MMU
+	select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
 	select TLS_REG_EMUL if !CPU_32v6K && !MMU
 
 config CPU_32v6K
@@ -601,6 +601,7 @@ config CPU_CP15_MPU
 
 config CPU_USE_DOMAINS
 	bool
+	depends on !ARM_LPAE && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
 	help
 	  This option enables or disables the use of domain switching
 	  via the set_fs() function.
@@ -800,6 +801,7 @@ config NEED_KUSER_HELPERS
 config KUSER_HELPERS
 	bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
 	default y
+	depends on !(CPU_V6 || CPU_V6K || CPU_V7) || GRKERNSEC_OLD_ARM_USERLAND
 	help
 	  Warning: disabling this option may break user programs.
 
@@ -812,7 +814,7 @@ config KUSER_HELPERS
 	  See Documentation/arm/kernel_user_helpers.txt for details.
 
 	  However, the fixed address nature of these helpers can be used
-	  by ROP (return orientated programming) authors when creating
+	  by ROP (Return Oriented Programming) authors when creating
 	  exploits.
 
 	  If all of the binaries and libraries which run on your platform
diff -ruNp linux-3.13.11/arch/arm/mm/alignment.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/mm/alignment.c
--- linux-3.13.11/arch/arm/mm/alignment.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/mm/alignment.c	2014-07-09
12:00:15.000000000 +0200
@@ -212,10 +212,12 @@ union offset_union {
 #define __get16_unaligned_check(ins,val,addr)			\
 	do {							\
 		unsigned int err = 0, v, a = addr;		\
+		pax_open_userland();				\
 		__get8_unaligned_check(ins,v,a,err);		\
 		val =  v << ((BE) ? 8 : 0);			\
 		__get8_unaligned_check(ins,v,a,err);		\
 		val |= v << ((BE) ? 0 : 8);			\
+		pax_close_userland();				\
 		if (err)					\
 			goto fault;				\
 	} while (0)
@@ -229,6 +231,7 @@ union offset_union {
 #define __get32_unaligned_check(ins,val,addr)			\
 	do {							\
 		unsigned int err = 0, v, a = addr;		\
+		pax_open_userland();				\
 		__get8_unaligned_check(ins,v,a,err);		\
 		val =  v << ((BE) ? 24 :  0);			\
 		__get8_unaligned_check(ins,v,a,err);		\
@@ -237,6 +240,7 @@ union offset_union {
 		val |= v << ((BE) ?  8 : 16);			\
 		__get8_unaligned_check(ins,v,a,err);		\
 		val |= v << ((BE) ?  0 : 24);			\
+		pax_close_userland();				\
 		if (err)					\
 			goto fault;				\
 	} while (0)
@@ -250,6 +254,7 @@ union offset_union {
 #define __put16_unaligned_check(ins,val,addr)			\
 	do {							\
 		unsigned int err = 0, v = val, a = addr;	\
+		pax_open_userland();				\
 		__asm__( FIRST_BYTE_16				\
 	 ARM(	"1:	"ins"	%1, [%2], #1\n"	)		\
 	 THUMB(	"1:	"ins"	%1, [%2]\n"	)		\
@@ -269,6 +274,7 @@ union offset_union {
 		"	.popsection\n"				\
 		: "=r" (err), "=&r" (v), "=&r" (a)		\
 		: "0" (err), "1" (v), "2" (a));			\
+		pax_close_userland();				\
 		if (err)					\
 			goto fault;				\
 	} while (0)
@@ -282,6 +288,7 @@ union offset_union {
 #define __put32_unaligned_check(ins,val,addr)			\
 	do {							\
 		unsigned int err = 0, v = val, a = addr;	\
+		pax_open_userland();				\
 		__asm__( FIRST_BYTE_32				\
 	 ARM(	"1:	"ins"	%1, [%2], #1\n"	)		\
 	 THUMB(	"1:	"ins"	%1, [%2]\n"	)		\
@@ -311,6 +318,7 @@ union offset_union {
 		"	.popsection\n"				\
 		: "=r" (err), "=&r" (v), "=&r" (a)		\
 		: "0" (err), "1" (v), "2" (a));			\
+		pax_close_userland();				\
 		if (err)					\
 			goto fault;				\
 	} while (0)
diff -ruNp linux-3.13.11/arch/arm/mm/cache-l2x0.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/mm/cache-l2x0.c
--- linux-3.13.11/arch/arm/mm/cache-l2x0.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/mm/cache-l2x0.c	2014-07-09
12:00:15.000000000 +0200
@@ -45,7 +45,7 @@ struct l2x0_of_data {
 	void (*setup)(const struct device_node *, u32 *, u32 *);
 	void (*save)(void);
 	struct outer_cache_fns outer_cache;
-};
+} __do_const;
 
 static bool of_init = false;
 
diff -ruNp linux-3.13.11/arch/arm/mm/context.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/mm/context.c
--- linux-3.13.11/arch/arm/mm/context.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/mm/context.c	2014-07-09
12:00:15.000000000 +0200
@@ -43,7 +43,7 @@
 #define NUM_USER_ASIDS		ASID_FIRST_VERSION
 
 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
-static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
+static atomic64_unchecked_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
 static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
 
 static DEFINE_PER_CPU(atomic64_t, active_asids);
@@ -180,7 +180,7 @@ static int is_reserved_asid(u64 asid)
 static u64 new_context(struct mm_struct *mm, unsigned int cpu)
 {
 	u64 asid = atomic64_read(&mm->context.id);
-	u64 generation = atomic64_read(&asid_generation);
+	u64 generation = atomic64_read_unchecked(&asid_generation);
 
 	if (asid != 0 && is_reserved_asid(asid)) {
 		/*
@@ -198,7 +198,7 @@ static u64 new_context(struct mm_struct
 		 */
 		asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
 		if (asid == NUM_USER_ASIDS) {
-			generation = atomic64_add_return(ASID_FIRST_VERSION,
+			generation = atomic64_add_return_unchecked(ASID_FIRST_VERSION,
 							 &asid_generation);
 			flush_context(cpu);
 			asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
@@ -227,14 +227,14 @@ void check_and_switch_context(struct mm_
 	cpu_set_reserved_ttbr0();
 
 	asid = atomic64_read(&mm->context.id);
-	if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
+	if (!((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS)
 	    && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
 		goto switch_mm_fastpath;
 
 	raw_spin_lock_irqsave(&cpu_asid_lock, flags);
 	/* Check that our ASID belongs to the current generation. */
 	asid = atomic64_read(&mm->context.id);
-	if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
+	if ((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS) {
 		asid = new_context(mm, cpu);
 		atomic64_set(&mm->context.id, asid);
 	}
diff -ruNp linux-3.13.11/arch/arm/mm/fault.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/mm/fault.c
--- linux-3.13.11/arch/arm/mm/fault.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/mm/fault.c	2014-07-09 12:00:15.000000000
+0200
@@ -25,6 +25,7 @@
 #include <asm/system_misc.h>
 #include <asm/system_info.h>
 #include <asm/tlbflush.h>
+#include <asm/sections.h>
 
 #include "fault.h"
 
@@ -138,6 +139,31 @@ __do_kernel_fault(struct mm_struct *mm,
 	if (fixup_exception(regs))
 		return;
 
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+	if (addr < TASK_SIZE) {
+		if (current->signal->curr_ip)
+			printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland
memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
+					from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns,
current_euid()), addr);
+		else
+			printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory
at %08lx\n", current->comm, task_pid_nr(current),
+					from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns,
current_euid()), addr);
+	}
+#endif
+
+#ifdef CONFIG_PAX_KERNEXEC
+	if ((fsr & FSR_WRITE) &&
+	    (((unsigned long)_stext <= addr && addr < init_mm.end_code) ||
+	     (MODULES_VADDR <= addr && addr < MODULES_END)))
+	{
+		if (current->signal->curr_ip)
+			printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel
code\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
+					from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns,
current_euid()));
+		else
+			printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
current->comm, task_pid_nr(current),
+					from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns,
current_euid()));
+	}
+#endif
+
 	/*
 	 * No handler, we'll have to terminate things with extreme prejudice.
 	 */
@@ -174,6 +200,13 @@ __do_user_fault(struct task_struct *tsk,
 	}
 #endif
 
+#ifdef CONFIG_PAX_PAGEEXEC
+	if (fsr & FSR_LNX_PF) {
+		pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
+		do_group_exit(SIGKILL);
+	}
+#endif
+
 	tsk->thread.address = addr;
 	tsk->thread.error_code = fsr;
 	tsk->thread.trap_no = 14;
@@ -401,6 +434,33 @@ do_page_fault(unsigned long addr, unsign
 }
 #endif					/* CONFIG_MMU */
 
+#ifdef CONFIG_PAX_PAGEEXEC
+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
+{
+	long i;
+
+	printk(KERN_ERR "PAX: bytes at PC: ");
+	for (i = 0; i < 20; i++) {
+		unsigned char c;
+		if (get_user(c, (__force unsigned char __user *)pc+i))
+			printk(KERN_CONT "?? ");
+		else
+			printk(KERN_CONT "%02x ", c);
+	}
+	printk("\n");
+
+	printk(KERN_ERR "PAX: bytes at SP-4: ");
+	for (i = -1; i < 20; i++) {
+		unsigned long c;
+		if (get_user(c, (__force unsigned long __user *)sp+i))
+			printk(KERN_CONT "???????? ");
+		else
+			printk(KERN_CONT "%08lx ", c);
+	}
+	printk("\n");
+}
+#endif
+
 /*
  * First Level Translation Fault Handler
  *
@@ -548,9 +608,22 @@ do_DataAbort(unsigned long addr, unsigne
 	const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
 	struct siginfo info;
 
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+	if (addr < TASK_SIZE && is_domain_fault(fsr)) {
+		if (current->signal->curr_ip)
+			printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland
memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
+					from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns,
current_euid()), addr);
+		else
+			printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory
at %08lx\n", current->comm, task_pid_nr(current),
+					from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns,
current_euid()), addr);
+		goto die;
+	}
+#endif
+
 	if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
 		return;
 
+die:
 	printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
 		inf->name, fsr, addr);
 
@@ -574,15 +647,98 @@ hook_ifault_code(int nr, int (*fn)(unsig
 	ifsr_info[nr].name = name;
 }
 
+asmlinkage int sys_sigreturn(struct pt_regs *regs);
+asmlinkage int sys_rt_sigreturn(struct pt_regs *regs);
+
 asmlinkage void __exception
 do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
 {
 	const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
 	struct siginfo info;
+	unsigned long pc = instruction_pointer(regs);
+
+	if (user_mode(regs)) {
+		unsigned long sigpage = current->mm->context.sigpage;
+
+		if (sigpage <= pc && pc < sigpage + 7*4) {
+			if (pc < sigpage + 3*4)
+				sys_sigreturn(regs);
+			else
+				sys_rt_sigreturn(regs);
+			return;
+		}
+		if (pc == 0xffff0f60UL) {
+			/*
+			 * PaX: __kuser_cmpxchg64 emulation
+			 */
+			// TODO
+			//regs->ARM_pc = regs->ARM_lr;
+			//return;
+		}
+		if (pc == 0xffff0fa0UL) {
+			/*
+			 * PaX: __kuser_memory_barrier emulation
+			 */
+			// dmb(); implied by the exception
+			regs->ARM_pc = regs->ARM_lr;
+			return;
+		}
+		if (pc == 0xffff0fc0UL) {
+			/*
+			 * PaX: __kuser_cmpxchg emulation
+			 */
+			// TODO
+			//long new;
+			//int op;
+
+			//op = FUTEX_OP_SET << 28;
+			//new = futex_atomic_op_inuser(op, regs->ARM_r2);
+			//regs->ARM_r0 = old != new;
+			//regs->ARM_pc = regs->ARM_lr;
+			//return;
+		}
+		if (pc == 0xffff0fe0UL) {
+			/*
+			 * PaX: __kuser_get_tls emulation
+			 */
+			regs->ARM_r0 = current_thread_info()->tp_value[0];
+			regs->ARM_pc = regs->ARM_lr;
+			return;
+		}
+	}
+
+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
+	else if (is_domain_fault(ifsr) || is_xn_fault(ifsr)) {
+		if (current->signal->curr_ip)
+			printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s
memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
+					from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns,
current_euid()),
+					pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
+		else
+			printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at
%08lx\n", current->comm, task_pid_nr(current),
+					from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns,
current_euid()),
+					pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
+		goto die;
+	}
+#endif
+
+#ifdef CONFIG_PAX_REFCOUNT
+	if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
+		unsigned int bkpt;
+
+		if (!probe_kernel_address(pc, bkpt) && cpu_to_le32(bkpt) == 0xe12f1073) {
+			current->thread.error_code = ifsr;
+			current->thread.trap_no = 0;
+			pax_report_refcount_overflow(regs);
+			fixup_exception(regs);
+			return;
+		}
+	}
+#endif
 
 	if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
 		return;
 
+die:
 	printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
 		inf->name, ifsr, addr);
 
diff -ruNp linux-3.13.11/arch/arm/mm/fault.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/mm/fault.h
--- linux-3.13.11/arch/arm/mm/fault.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/mm/fault.h	2014-07-09 12:00:15.000000000
+0200
@@ -3,6 +3,7 @@
 
 /*
  * Fault status register encodings.  We steal bit 31 for our own purposes.
+ * Set when the FSR value is from an instruction fault.
  */
 #define FSR_LNX_PF		(1 << 31)
 #define FSR_WRITE		(1 << 11)
@@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fs
 }
 #endif
 
+/* valid for LPAE and !LPAE */
+static inline int is_xn_fault(unsigned int fsr)
+{
+	return ((fsr_fs(fsr) & 0x3c) == 0xc);
+}
+
+static inline int is_domain_fault(unsigned int fsr)
+{
+	return ((fsr_fs(fsr) & 0xD) == 0x9);
+}
+
 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
 unsigned long search_exception_table(unsigned long addr);
 
diff -ruNp linux-3.13.11/arch/arm/mm/init.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/mm/init.c
--- linux-3.13.11/arch/arm/mm/init.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/mm/init.c	2014-07-09 12:00:15.000000000
+0200
@@ -30,6 +30,8 @@
 #include <asm/setup.h>
 #include <asm/tlb.h>
 #include <asm/fixmap.h>
+#include <asm/system_info.h>
+#include <asm/cp15.h>
 
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
@@ -682,7 +684,46 @@ void free_initmem(void)
 {
 #ifdef CONFIG_HAVE_TCM
 	extern char __tcm_start, __tcm_end;
+#endif
+
+#ifdef CONFIG_PAX_KERNEXEC
+	unsigned long addr;
+	pgd_t *pgd;
+	pud_t *pud;
+	pmd_t *pmd;
+	int cpu_arch = cpu_architecture();
+	unsigned int cr = get_cr();
+
+	if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
+		/* make pages tables, etc before .text NX */
+		for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) {
+			pgd = pgd_offset_k(addr);
+			pud = pud_offset(pgd, addr);
+			pmd = pmd_offset(pud, addr);
+			__section_update(pmd, addr, PMD_SECT_XN);
+		}
+		/* make init NX */
+		for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE)
{
+			pgd = pgd_offset_k(addr);
+			pud = pud_offset(pgd, addr);
+			pmd = pmd_offset(pud, addr);
+			__section_update(pmd, addr, PMD_SECT_XN);
+		}
+		/* make kernel code/rodata RX */
+		for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE)
{
+			pgd = pgd_offset_k(addr);
+			pud = pud_offset(pgd, addr);
+			pmd = pmd_offset(pud, addr);
+#ifdef CONFIG_ARM_LPAE
+			__section_update(pmd, addr, PMD_SECT_RDONLY);
+#else
+			__section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE);
+#endif
+		}
+	}
+#endif
 
+#ifdef CONFIG_HAVE_TCM
 	poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
 	free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
 #endif
diff -ruNp linux-3.13.11/arch/arm/mm/ioremap.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/mm/ioremap.c
--- linux-3.13.11/arch/arm/mm/ioremap.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/mm/ioremap.c	2014-07-09
12:00:15.000000000 +0200
@@ -392,9 +392,9 @@ __arm_ioremap_exec(phys_addr_t phys_addr
 	unsigned int mtype;
 
 	if (cached)
-		mtype = MT_MEMORY;
+		mtype = MT_MEMORY_RX;
 	else
-		mtype = MT_MEMORY_NONCACHED;
+		mtype = MT_MEMORY_NONCACHED_RX;
 
 	return __arm_ioremap_caller(phys_addr, size, mtype,
 			__builtin_return_address(0));
diff -ruNp linux-3.13.11/arch/arm/mm/mmap.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/mm/mmap.c
--- linux-3.13.11/arch/arm/mm/mmap.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/mm/mmap.c	2014-07-09 12:00:15.000000000
+0200
@@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp
 	struct vm_area_struct *vma;
 	int do_align = 0;
 	int aliasing = cache_is_vipt_aliasing();
+	unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
 	struct vm_unmapped_area_info info;
 
 	/*
@@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp
 	if (len > TASK_SIZE)
 		return -ENOMEM;
 
+#ifdef CONFIG_PAX_RANDMMAP
+	if (!(mm->pax_flags & MF_PAX_RANDMMAP))
+#endif
+
 	if (addr) {
 		if (do_align)
 			addr = COLOUR_ALIGN(addr, pgoff);
@@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp
 			addr = PAGE_ALIGN(addr);
 
 		vma = find_vma(mm, addr);
-		if (TASK_SIZE - len >= addr &&
-		    (!vma || addr + len <= vma->vm_start))
+		if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
 			return addr;
 	}
 
@@ -99,6 +103,7 @@ arch_get_unmapped_area(struct file *filp
 	info.high_limit = TASK_SIZE;
 	info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
 	info.align_offset = pgoff << PAGE_SHIFT;
+	info.threadstack_offset = offset;
 	return vm_unmapped_area(&info);
 }
 
@@ -112,6 +117,7 @@ arch_get_unmapped_area_topdown(struct fi
 	unsigned long addr = addr0;
 	int do_align = 0;
 	int aliasing = cache_is_vipt_aliasing();
+	unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
 	struct vm_unmapped_area_info info;
 
 	/*
@@ -132,6 +138,10 @@ arch_get_unmapped_area_topdown(struct fi
 		return addr;
 	}
 
+#ifdef CONFIG_PAX_RANDMMAP
+	if (!(mm->pax_flags & MF_PAX_RANDMMAP))
+#endif
+
 	/* requesting a specific address */
 	if (addr) {
 		if (do_align)
@@ -139,8 +149,7 @@ arch_get_unmapped_area_topdown(struct fi
 		else
 			addr = PAGE_ALIGN(addr);
 		vma = find_vma(mm, addr);
-		if (TASK_SIZE - len >= addr &&
-				(!vma || addr + len <= vma->vm_start))
+		if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
 			return addr;
 	}
 
@@ -150,6 +159,7 @@ arch_get_unmapped_area_topdown(struct fi
 	info.high_limit = mm->mmap_base;
 	info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
 	info.align_offset = pgoff << PAGE_SHIFT;
+	info.threadstack_offset = offset;
 	addr = vm_unmapped_area(&info);
 
 	/*
@@ -173,6 +183,10 @@ void arch_pick_mmap_layout(struct mm_str
 {
 	unsigned long random_factor = 0UL;
 
+#ifdef CONFIG_PAX_RANDMMAP
+	if (!(mm->pax_flags & MF_PAX_RANDMMAP))
+#endif
+
 	/* 8 bits of randomness in 20 address space bits */
 	if ((current->flags & PF_RANDOMIZE) &&
 	    !(current->personality & ADDR_NO_RANDOMIZE))
@@ -180,9 +194,21 @@ void arch_pick_mmap_layout(struct mm_str
 
 	if (mmap_is_legacy()) {
 		mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
+
+#ifdef CONFIG_PAX_RANDMMAP
+		if (mm->pax_flags & MF_PAX_RANDMMAP)
+			mm->mmap_base += mm->delta_mmap;
+#endif
+
 		mm->get_unmapped_area = arch_get_unmapped_area;
 	} else {
 		mm->mmap_base = mmap_base(random_factor);
+
+#ifdef CONFIG_PAX_RANDMMAP
+		if (mm->pax_flags & MF_PAX_RANDMMAP)
+			mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
+#endif
+
 		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
 	}
 }
diff -ruNp linux-3.13.11/arch/arm/mm/mmu.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/mm/mmu.c
--- linux-3.13.11/arch/arm/mm/mmu.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/mm/mmu.c	2014-07-09 12:00:15.000000000
+0200
@@ -38,6 +38,22 @@
 #include "mm.h"
 #include "tcm.h"
 
+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
+void modify_domain(unsigned int dom, unsigned int type)
+{
+	struct thread_info *thread = current_thread_info();
+	unsigned int domain = thread->cpu_domain;
+	/*
+	 * DOMAIN_MANAGER might be defined to some other value,
+	 * use the arch-defined constant
+	 */
+	domain &= ~domain_val(dom, 3);
+	thread->cpu_domain = domain | domain_val(dom, type);
+	set_domain(thread->cpu_domain);
+}
+EXPORT_SYMBOL(modify_domain);
+#endif
+
 /*
  * empty_zero_page is a special page that is used for
  * zero-initialized data and COW.
@@ -230,11 +246,19 @@ __setup("noalign", noalign_setup);
 
 #endif /* ifdef CONFIG_CPU_CP15 / else */
 
-#define PROT_PTE_DEVICE		L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
+#define PROT_PTE_DEVICE		L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY
 #define PROT_PTE_S2_DEVICE	PROT_PTE_DEVICE
 #define PROT_SECT_DEVICE	PMD_TYPE_SECT|PMD_SECT_AP_WRITE
 
-static struct mem_type mem_types[] = {
+#ifdef CONFIG_PAX_KERNEXEC
+#define L_PTE_KERNEXEC		L_PTE_RDONLY
+#define PMD_SECT_KERNEXEC	PMD_SECT_RDONLY
+#else
+#define L_PTE_KERNEXEC		L_PTE_DIRTY
+#define PMD_SECT_KERNEXEC	PMD_SECT_AP_WRITE
+#endif
+
+static struct mem_type mem_types[] __read_only = {
 	[MT_DEVICE] = {		  /* Strongly ordered / ARMv6 shared device */
 		.prot_pte	= PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
 				  L_PTE_SHARED,
@@ -266,16 +290,16 @@ static struct mem_type mem_types[] = {
 	[MT_UNCACHED] = {
 		.prot_pte	= PROT_PTE_DEVICE,
 		.prot_l1	= PMD_TYPE_TABLE,
-		.prot_sect	= PMD_TYPE_SECT | PMD_SECT_XN,
+		.prot_sect	= PROT_SECT_DEVICE,
 		.domain		= DOMAIN_IO,
 	},
 	[MT_CACHECLEAN] = {
-		.prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
+		.prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
 		.domain    = DOMAIN_KERNEL,
 	},
 #ifndef CONFIG_ARM_LPAE
 	[MT_MINICLEAN] = {
-		.prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
+		.prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_RDONLY,
 		.domain    = DOMAIN_KERNEL,
 	},
 #endif
@@ -283,36 +307,54 @@ static struct mem_type mem_types[] = {
 		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
 				L_PTE_RDONLY,
 		.prot_l1   = PMD_TYPE_TABLE,
-		.domain    = DOMAIN_USER,
+		.domain    = DOMAIN_VECTORS,
 	},
 	[MT_HIGH_VECTORS] = {
 		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
 				L_PTE_USER | L_PTE_RDONLY,
 		.prot_l1   = PMD_TYPE_TABLE,
-		.domain    = DOMAIN_USER,
+		.domain    = DOMAIN_VECTORS,
 	},
-	[MT_MEMORY] = {
+	[MT_MEMORY_RWX] = {
 		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
 		.prot_l1   = PMD_TYPE_TABLE,
 		.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
 		.domain    = DOMAIN_KERNEL,
 	},
+	[MT_MEMORY_RW] = {
+		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
+		.prot_l1   = PMD_TYPE_TABLE,
+		.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
+		.domain	   = DOMAIN_KERNEL,
+	},
+	[MT_MEMORY_RX] = {
+		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
+		.prot_l1   = PMD_TYPE_TABLE,
+		.prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
+		.domain	   = DOMAIN_KERNEL,
+	},
 	[MT_ROM] = {
-		.prot_sect = PMD_TYPE_SECT,
+		.prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
 		.domain    = DOMAIN_KERNEL,
 	},
-	[MT_MEMORY_NONCACHED] = {
+	[MT_MEMORY_NONCACHED_RW] = {
 		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
 				L_PTE_MT_BUFFERABLE,
 		.prot_l1   = PMD_TYPE_TABLE,
 		.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
 		.domain    = DOMAIN_KERNEL,
 	},
+	[MT_MEMORY_NONCACHED_RX] = {
+		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC |
+				L_PTE_MT_BUFFERABLE,
+		.prot_l1   = PMD_TYPE_TABLE,
+		.prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
+		.domain    = DOMAIN_KERNEL,
+	},
 	[MT_MEMORY_DTCM] = {
-		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
-				L_PTE_XN,
+		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
 		.prot_l1   = PMD_TYPE_TABLE,
-		.prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
+		.prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
 		.domain    = DOMAIN_KERNEL,
 	},
 	[MT_MEMORY_ITCM] = {
@@ -322,10 +364,10 @@ static struct mem_type mem_types[] = {
 	},
 	[MT_MEMORY_SO] = {
 		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
-				L_PTE_MT_UNCACHED | L_PTE_XN,
+				L_PTE_MT_UNCACHED,
 		.prot_l1   = PMD_TYPE_TABLE,
 		.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
-				PMD_SECT_UNCACHED | PMD_SECT_XN,
+				PMD_SECT_UNCACHED,
 		.domain    = DOMAIN_KERNEL,
 	},
 	[MT_MEMORY_DMA_READY] = {
@@ -411,9 +453,35 @@ static void __init build_mem_type_table(
 			 * to prevent speculative instruction fetches.
 			 */
 			mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
+			mem_types[MT_DEVICE].prot_pte |= L_PTE_XN;
 			mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
+			mem_types[MT_DEVICE_NONSHARED].prot_pte |= L_PTE_XN;
 			mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
+			mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_XN;
 			mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
+			mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_XN;
+
+			/* Mark other regions on ARMv6+ as execute-never */
+
+#ifdef CONFIG_PAX_KERNEXEC
+			mem_types[MT_UNCACHED].prot_sect |= PMD_SECT_XN;
+			mem_types[MT_UNCACHED].prot_pte |= L_PTE_XN;
+			mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_XN;
+			mem_types[MT_CACHECLEAN].prot_pte |= L_PTE_XN;
+#ifndef CONFIG_ARM_LPAE
+			mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_XN;
+			mem_types[MT_MINICLEAN].prot_pte |= L_PTE_XN;
+#endif
+			mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_XN;
+			mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_XN;
+			mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_XN;
+			mem_types[MT_MEMORY_NONCACHED_RW].prot_pte |= PMD_SECT_XN;
+			mem_types[MT_MEMORY_DTCM].prot_sect |= PMD_SECT_XN;
+			mem_types[MT_MEMORY_DTCM].prot_pte |= L_PTE_XN;
+#endif
+
+			mem_types[MT_MEMORY_SO].prot_sect |= PMD_SECT_XN;
+			mem_types[MT_MEMORY_SO].prot_pte |= L_PTE_XN;
 		}
 		if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
 			/*
@@ -475,6 +543,9 @@ static void __init build_mem_type_table(
 		 * from SVC mode and no access from userspace.
 		 */
 		mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
+#ifdef CONFIG_PAX_KERNEXEC
+		mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
+#endif
 		mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
 		mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
 #endif
@@ -492,11 +563,17 @@ static void __init build_mem_type_table(
 			mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
 			mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
 			mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
-			mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
-			mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
+			mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
+			mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
+			mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
+			mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
+			mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
+			mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
 			mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
-			mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
-			mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
+			mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_S;
+			mem_types[MT_MEMORY_NONCACHED_RW].prot_pte |= L_PTE_SHARED;
+			mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= PMD_SECT_S;
+			mem_types[MT_MEMORY_NONCACHED_RX].prot_pte |= L_PTE_SHARED;
 		}
 	}
 
@@ -507,15 +584,20 @@ static void __init build_mem_type_table(
 	if (cpu_arch >= CPU_ARCH_ARMv6) {
 		if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
 			/* Non-cacheable Normal is XCB = 001 */
-			mem_types[MT_MEMORY_NONCACHED].prot_sect |=
+			mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |=
+				PMD_SECT_BUFFERED;
+			mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |=
 				PMD_SECT_BUFFERED;
 		} else {
 			/* For both ARMv6 and non-TEX-remapping ARMv7 */
-			mem_types[MT_MEMORY_NONCACHED].prot_sect |=
+			mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |=
+				PMD_SECT_TEX(1);
+			mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |=
 				PMD_SECT_TEX(1);
 		}
 	} else {
-		mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
+		mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_BUFFERABLE;
+		mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= PMD_SECT_BUFFERABLE;
 	}
 
 #ifdef CONFIG_ARM_LPAE
@@ -531,6 +613,8 @@ static void __init build_mem_type_table(
 	vecs_pgprot |= PTE_EXT_AF;
 #endif
 
+	user_pgprot |= __supported_pte_mask;
+
 	for (i = 0; i < 16; i++) {
 		pteval_t v = pgprot_val(protection_map[i]);
 		protection_map[i] = __pgprot(v | user_pgprot);
@@ -548,10 +632,15 @@ static void __init build_mem_type_table(
 
 	mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
 	mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
-	mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
-	mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
+	mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
+	mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
+	mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
+	mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
+	mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
+	mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
 	mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
-	mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
+	mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= ecc_mask;
+	mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= ecc_mask;
 	mem_types[MT_ROM].prot_sect |= cp->pmd;
 
 	switch (cp->pmd) {
@@ -1193,18 +1282,15 @@ void __init arm_mm_memblock_reserve(void
  * called function.  This means you can't use any function or debugging
  * method which may touch any device, otherwise the kernel _will_ crash.
  */
+
+static char vectors[PAGE_SIZE * 2] __read_only __aligned(PAGE_SIZE);
+
 static void __init devicemaps_init(const struct machine_desc *mdesc)
 {
 	struct map_desc map;
 	unsigned long addr;
-	void *vectors;
-
-	/*
-	 * Allocate the vector page early.
-	 */
-	vectors = early_alloc(PAGE_SIZE * 2);
 
-	early_trap_init(vectors);
+	early_trap_init(&vectors);
 
 	for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
 		pmd_clear(pmd_off_k(addr));
@@ -1244,7 +1330,7 @@ static void __init devicemaps_init(const
 	 * location (0xffff0000).  If we aren't using high-vectors, also
 	 * create a mapping at the low-vectors virtual address.
 	 */
-	map.pfn = __phys_to_pfn(virt_to_phys(vectors));
+	map.pfn = __phys_to_pfn(virt_to_phys(&vectors));
 	map.virtual = 0xffff0000;
 	map.length = PAGE_SIZE;
 #ifdef CONFIG_KUSER_HELPERS
@@ -1316,8 +1402,39 @@ static void __init map_lowmem(void)
 		map.pfn = __phys_to_pfn(start);
 		map.virtual = __phys_to_virt(start);
 		map.length = end - start;
-		map.type = MT_MEMORY;
 
+#ifdef CONFIG_PAX_KERNEXEC
+		if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual +
map.length))) {
+			struct map_desc kernel;
+			struct map_desc initmap;
+
+			/* when freeing initmem we will make this RW */
+			initmap.pfn = __phys_to_pfn(__pa(__init_begin));
+			initmap.virtual = (unsigned long)__init_begin;
+			initmap.length = _sdata - __init_begin;
+			initmap.type = MT_MEMORY_RWX;
+			create_mapping(&initmap);
+
+			/* when freeing initmem we will make this RX */
+			kernel.pfn = __phys_to_pfn(__pa(_stext));
+			kernel.virtual = (unsigned long)_stext;
+			kernel.length = __init_begin - _stext;
+			kernel.type = MT_MEMORY_RWX;
+			create_mapping(&kernel);
+
+			if (map.virtual < (unsigned long)_stext) {
+				map.length = (unsigned long)_stext - map.virtual;
+				map.type = MT_MEMORY_RWX;
+				create_mapping(&map);
+			}
+
+			map.pfn = __phys_to_pfn(__pa(_sdata));
+			map.virtual = (unsigned long)_sdata;
+			map.length = end - __pa(_sdata);
+		}
+#endif
+
+		map.type = MT_MEMORY_RW;
 		create_mapping(&map);
 	}
 }
diff -ruNp linux-3.13.11/arch/arm/plat-omap/sram.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/plat-omap/sram.c
--- linux-3.13.11/arch/arm/plat-omap/sram.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/plat-omap/sram.c	2014-07-09
12:00:15.000000000 +0200
@@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long
 	 * Looks like we need to preserve some bootloader code at the
 	 * beginning of SRAM for jumping to flash for reboot to work...
 	 */
+	pax_open_kernel();
 	memset_io(omap_sram_base + omap_sram_skip, 0,
 		  omap_sram_size - omap_sram_skip);
+	pax_close_kernel();
 }
diff -ruNp linux-3.13.11/arch/arm/plat-samsung/include/plat/dma-ops.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/plat-samsung/include/plat/dma-ops.h
--- linux-3.13.11/arch/arm/plat-samsung/include/plat/dma-ops.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm/plat-samsung/include/plat/dma-ops.h	2014-07-09
12:00:15.000000000 +0200
@@ -47,7 +47,7 @@ struct samsung_dma_ops {
 	int (*started)(unsigned ch);
 	int (*flush)(unsigned ch);
 	int (*stop)(unsigned ch);
-};
+} __no_const;
 
 extern void *samsung_dmadev_get_ops(void);
 extern void *s3c_dma_get_ops(void);
diff -ruNp linux-3.13.11/arch/arm64/include/asm/uaccess.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm64/include/asm/uaccess.h
--- linux-3.13.11/arch/arm64/include/asm/uaccess.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/arm64/include/asm/uaccess.h	2014-07-09
12:00:15.000000000 +0200
@@ -99,6 +99,7 @@ static inline void set_fs(mm_segment_t f
 	flag;								\
 })
 
+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
 #define access_ok(type, addr, size)	__range_ok(addr, size)
 
 /*
diff -ruNp linux-3.13.11/arch/avr32/include/asm/cache.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/avr32/include/asm/cache.h
--- linux-3.13.11/arch/avr32/include/asm/cache.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/avr32/include/asm/cache.h	2014-07-09
12:00:15.000000000 +0200
@@ -1,8 +1,10 @@
 #ifndef __ASM_AVR32_CACHE_H
 #define __ASM_AVR32_CACHE_H
 
+#include <linux/const.h>
+
 #define L1_CACHE_SHIFT 5
-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
 
 /*
  * Memory returned by kmalloc() may be used for DMA, so we must make
diff -ruNp linux-3.13.11/arch/avr32/include/asm/elf.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/avr32/include/asm/elf.h
--- linux-3.13.11/arch/avr32/include/asm/elf.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/avr32/include/asm/elf.h	2014-07-09
12:00:15.000000000 +0200
@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpreg
    the loader.  We need to make sure that it is out of the way of the program
    that it will "exec", and that there is sufficient room for the brk.  */
 
-#define ELF_ET_DYN_BASE         (2 * TASK_SIZE / 3)
+#define ELF_ET_DYN_BASE		(TASK_SIZE / 3 * 2)
 
+#ifdef CONFIG_PAX_ASLR
+#define PAX_ELF_ET_DYN_BASE	0x00001000UL
+
+#define PAX_DELTA_MMAP_LEN	15
+#define PAX_DELTA_STACK_LEN	15
+#endif
 
 /* This yields a mask that user programs can use to figure out what
    instruction set this CPU supports.  This could be done in user space,
diff -ruNp linux-3.13.11/arch/avr32/include/asm/kmap_types.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/avr32/include/asm/kmap_types.h
--- linux-3.13.11/arch/avr32/include/asm/kmap_types.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/avr32/include/asm/kmap_types.h	2014-07-09
12:00:15.000000000 +0200
@@ -2,9 +2,9 @@
 #define __ASM_AVR32_KMAP_TYPES_H
 
 #ifdef CONFIG_DEBUG_HIGHMEM
-# define KM_TYPE_NR 29
+# define KM_TYPE_NR 30
 #else
-# define KM_TYPE_NR 14
+# define KM_TYPE_NR 15
 #endif
 
 #endif /* __ASM_AVR32_KMAP_TYPES_H */
diff -ruNp linux-3.13.11/arch/avr32/mm/fault.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/avr32/mm/fault.c
--- linux-3.13.11/arch/avr32/mm/fault.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/avr32/mm/fault.c	2014-07-09
12:00:15.000000000 +0200
@@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
 
 int exception_trace = 1;
 
+#ifdef CONFIG_PAX_PAGEEXEC
+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
+{
+	unsigned long i;
+
+	printk(KERN_ERR "PAX: bytes at PC: ");
+	for (i = 0; i < 20; i++) {
+		unsigned char c;
+		if (get_user(c, (unsigned char *)pc+i))
+			printk(KERN_CONT "???????? ");
+		else
+			printk(KERN_CONT "%02x ", c);
+	}
+	printk("\n");
+}
+#endif
+
 /*
  * This routine handles page faults. It determines the address and the
  * problem, and then passes it off to one of the appropriate routines.
@@ -176,6 +193,16 @@ bad_area:
 	up_read(&mm->mmap_sem);
 
 	if (user_mode(regs)) {
+
+#ifdef CONFIG_PAX_PAGEEXEC
+		if (mm->pax_flags & MF_PAX_PAGEEXEC) {
+			if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
+				pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
+				do_group_exit(SIGKILL);
+			}
+		}
+#endif
+
 		if (exception_trace && printk_ratelimit())
 			printk("%s%s[%d]: segfault at %08lx pc %08lx "
 			       "sp %08lx ecr %lu\n",
diff -ruNp linux-3.13.11/arch/blackfin/include/asm/cache.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/blackfin/include/asm/cache.h
--- linux-3.13.11/arch/blackfin/include/asm/cache.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/blackfin/include/asm/cache.h	2014-07-09
12:00:15.000000000 +0200
@@ -7,6 +7,7 @@
 #ifndef __ARCH_BLACKFIN_CACHE_H
 #define __ARCH_BLACKFIN_CACHE_H
 
+#include <linux/const.h>
 #include <linux/linkage.h>	/* for asmlinkage */
 
 /*
@@ -14,7 +15,7 @@
  * Blackfin loads 32 bytes for cache
  */
 #define L1_CACHE_SHIFT	5
-#define L1_CACHE_BYTES	(1 << L1_CACHE_SHIFT)
+#define L1_CACHE_BYTES	(_AC(1,UL) << L1_CACHE_SHIFT)
 #define SMP_CACHE_BYTES	L1_CACHE_BYTES
 
 #define ARCH_DMA_MINALIGN	L1_CACHE_BYTES
diff -ruNp linux-3.13.11/arch/cris/Kconfig linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/cris/Kconfig
--- linux-3.13.11/arch/cris/Kconfig	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/cris/Kconfig	2014-07-09 12:00:15.000000000
+0200
@@ -561,6 +561,8 @@ source "fs/Kconfig"
 
 source "arch/cris/Kconfig.debug"
 
+source "kernel/vserver/Kconfig"
+
 source "security/Kconfig"
 
 source "crypto/Kconfig"
diff -ruNp linux-3.13.11/arch/cris/include/arch-v10/arch/cache.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/cris/include/arch-v10/arch/cache.h
--- linux-3.13.11/arch/cris/include/arch-v10/arch/cache.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/cris/include/arch-v10/arch/cache.h	2014-07-09
12:00:15.000000000 +0200
@@ -1,8 +1,9 @@
 #ifndef _ASM_ARCH_CACHE_H
 #define _ASM_ARCH_CACHE_H
 
+#include <linux/const.h>
 /* Etrax 100LX have 32-byte cache-lines. */
-#define L1_CACHE_BYTES 32
 #define L1_CACHE_SHIFT 5
+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
 
 #endif /* _ASM_ARCH_CACHE_H */
diff -ruNp linux-3.13.11/arch/cris/include/arch-v32/arch/cache.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/cris/include/arch-v32/arch/cache.h
--- linux-3.13.11/arch/cris/include/arch-v32/arch/cache.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/cris/include/arch-v32/arch/cache.h	2014-07-09
12:00:15.000000000 +0200
@@ -1,11 +1,12 @@
 #ifndef _ASM_CRIS_ARCH_CACHE_H
 #define _ASM_CRIS_ARCH_CACHE_H
 
+#include <linux/const.h>
 #include <arch/hwregs/dma.h>
 
 /* A cache-line is 32 bytes. */
-#define L1_CACHE_BYTES 32
 #define L1_CACHE_SHIFT 5
+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
 
 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
 
diff -ruNp linux-3.13.11/arch/frv/include/asm/atomic.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/frv/include/asm/atomic.h
--- linux-3.13.11/arch/frv/include/asm/atomic.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/frv/include/asm/atomic.h	2014-07-09
12:00:15.000000000 +0200
@@ -186,6 +186,16 @@ static inline void atomic64_dec(atomic64
 #define atomic64_cmpxchg(v, old, new)	(__cmpxchg_64(old, new, &(v)->counter))
 #define atomic64_xchg(v, new)		(__xchg_64(new, &(v)->counter))
 
+#define atomic64_read_unchecked(v)		atomic64_read(v)
+#define atomic64_set_unchecked(v, i)		atomic64_set((v), (i))
+#define atomic64_add_unchecked(a, v)		atomic64_add((a), (v))
+#define atomic64_add_return_unchecked(a, v)	atomic64_add_return((a), (v))
+#define atomic64_sub_unchecked(a, v)		atomic64_sub((a), (v))
+#define atomic64_inc_unchecked(v)		atomic64_inc(v)
+#define atomic64_inc_return_unchecked(v)	atomic64_inc_return(v)
+#define atomic64_dec_unchecked(v)		atomic64_dec(v)
+#define atomic64_cmpxchg_unchecked(v, o, n)	atomic64_cmpxchg((v), (o), (n))
+
 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
 {
 	int c, old;
diff -ruNp linux-3.13.11/arch/frv/include/asm/cache.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/frv/include/asm/cache.h
--- linux-3.13.11/arch/frv/include/asm/cache.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/frv/include/asm/cache.h	2014-07-09
12:00:15.000000000 +0200
@@ -12,10 +12,11 @@
 #ifndef __ASM_CACHE_H
 #define __ASM_CACHE_H
 
+#include <linux/const.h>
 
 /* bytes per L1 cache line */
 #define L1_CACHE_SHIFT		(CONFIG_FRV_L1_CACHE_SHIFT)
-#define L1_CACHE_BYTES		(1 << L1_CACHE_SHIFT)
+#define L1_CACHE_BYTES		(_AC(1,UL) << L1_CACHE_SHIFT)
 
 #define __cacheline_aligned	__attribute__((aligned(L1_CACHE_BYTES)))
 #define ____cacheline_aligned	__attribute__((aligned(L1_CACHE_BYTES)))
diff -ruNp linux-3.13.11/arch/frv/include/asm/kmap_types.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/frv/include/asm/kmap_types.h
--- linux-3.13.11/arch/frv/include/asm/kmap_types.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/frv/include/asm/kmap_types.h	2014-07-09
12:00:15.000000000 +0200
@@ -2,6 +2,6 @@
 #ifndef _ASM_KMAP_TYPES_H
 #define _ASM_KMAP_TYPES_H
 
-#define KM_TYPE_NR 17
+#define KM_TYPE_NR 18
 
 #endif
diff -ruNp linux-3.13.11/arch/frv/mm/elf-fdpic.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/frv/mm/elf-fdpic.c
--- linux-3.13.11/arch/frv/mm/elf-fdpic.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/frv/mm/elf-fdpic.c	2014-07-09
12:00:15.000000000 +0200
@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(str
 {
 	struct vm_area_struct *vma;
 	struct vm_unmapped_area_info info;
+	unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
 
 	if (len > TASK_SIZE)
 		return -ENOMEM;
@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
 	if (addr) {
 		addr = PAGE_ALIGN(addr);
 		vma = find_vma(current->mm, addr);
-		if (TASK_SIZE - len >= addr &&
-		    (!vma || addr + len <= vma->vm_start))
+		if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
 			goto success;
 	}
 
@@ -85,6 +85,7 @@ unsigned long arch_get_unmapped_area(str
 	info.high_limit = (current->mm->start_stack - 0x00200000);
 	info.align_mask = 0;
 	info.align_offset = 0;
+	info.threadstack_offset = offset;
 	addr = vm_unmapped_area(&info);
 	if (!(addr & ~PAGE_MASK))
 		goto success;
diff -ruNp linux-3.13.11/arch/hexagon/include/asm/cache.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/hexagon/include/asm/cache.h
--- linux-3.13.11/arch/hexagon/include/asm/cache.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/hexagon/include/asm/cache.h	2014-07-09
12:00:15.000000000 +0200
@@ -21,9 +21,11 @@
 #ifndef __ASM_CACHE_H
 #define __ASM_CACHE_H
 
+#include <linux/const.h>
+
 /* Bytes per L1 cache line */
-#define L1_CACHE_SHIFT		(5)
-#define L1_CACHE_BYTES		(1 << L1_CACHE_SHIFT)
+#define L1_CACHE_SHIFT		5
+#define L1_CACHE_BYTES		(_AC(1,UL) << L1_CACHE_SHIFT)
 
 #define __cacheline_aligned	__aligned(L1_CACHE_BYTES)
 #define ____cacheline_aligned	__aligned(L1_CACHE_BYTES)
diff -ruNp linux-3.13.11/arch/ia64/Kconfig linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/ia64/Kconfig
--- linux-3.13.11/arch/ia64/Kconfig	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/ia64/Kconfig	2014-07-09 12:00:15.000000000
+0200
@@ -554,6 +554,7 @@ source "drivers/sn/Kconfig"
 config KEXEC
 	bool "kexec system call"
 	depends on !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
+	depends on !GRKERNSEC_KMEM
 	help
 	  kexec is a system call that implements the ability to shutdown your
 	  current kernel, and to start another kernel.  It is like a reboot
@@ -641,6 +642,8 @@ source "fs/Kconfig"
 
 source "arch/ia64/Kconfig.debug"
 
+source "kernel/vserver/Kconfig"
+
 source "security/Kconfig"
 
 source "crypto/Kconfig"
diff -ruNp linux-3.13.11/arch/ia64/include/asm/atomic.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/ia64/include/asm/atomic.h
--- linux-3.13.11/arch/ia64/include/asm/atomic.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/ia64/include/asm/atomic.h	2014-07-09
12:00:15.000000000 +0200
@@ -208,6 +208,16 @@ atomic64_add_negative (__s64 i, atomic64
 #define atomic64_inc(v)			atomic64_add(1, (v))
 #define atomic64_dec(v)			atomic64_sub(1, (v))
 
+#define atomic64_read_unchecked(v)		atomic64_read(v)
+#define atomic64_set_unchecked(v, i)		atomic64_set((v), (i))
+#define atomic64_add_unchecked(a, v)		atomic64_add((a), (v))
+#define atomic64_add_return_unchecked(a, v)	atomic64_add_return((a), (v))
+#define atomic64_sub_unchecked(a, v)		atomic64_sub((a), (v))
+#define atomic64_inc_unchecked(v)		atomic64_inc(v)
+#define atomic64_inc_return_unchecked(v)	atomic64_inc_return(v)
+#define atomic64_dec_unchecked(v)		atomic64_dec(v)
+#define atomic64_cmpxchg_unchecked(v, o, n)	atomic64_cmpxchg((v), (o), (n))
+
 /* Atomic operations are already serializing */
 #define smp_mb__before_atomic_dec()	barrier()
 #define smp_mb__after_atomic_dec()	barrier()
diff -ruNp linux-3.13.11/arch/ia64/include/asm/cache.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/ia64/include/asm/cache.h
--- linux-3.13.11/arch/ia64/include/asm/cache.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/ia64/include/asm/cache.h	2014-07-09
12:00:15.000000000 +0200
@@ -1,6 +1,7 @@
 #ifndef _ASM_IA64_CACHE_H
 #define _ASM_IA64_CACHE_H
 
+#include <linux/const.h>
 
 /*
  * Copyright (C) 1998-2000 Hewlett-Packard Co
@@ -9,7 +10,7 @@
 
 /* Bytes per L1 (data) cache line.  */
 #define L1_CACHE_SHIFT		CONFIG_IA64_L1_CACHE_SHIFT
-#define L1_CACHE_BYTES		(1 << L1_CACHE_SHIFT)
+#define L1_CACHE_BYTES		(_AC(1,UL) << L1_CACHE_SHIFT)
 
 #ifdef CONFIG_SMP
 # define SMP_CACHE_SHIFT	L1_CACHE_SHIFT
diff -ruNp linux-3.13.11/arch/ia64/include/asm/elf.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/ia64/include/asm/elf.h
--- linux-3.13.11/arch/ia64/include/asm/elf.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/ia64/include/asm/elf.h	2014-07-09
12:00:15.000000000 +0200
@@ -42,6 +42,13 @@
  */
 #define ELF_ET_DYN_BASE		(TASK_UNMAPPED_BASE + 0x800000000UL)
 
+#ifdef CONFIG_PAX_ASLR
+#define PAX_ELF_ET_DYN_BASE	(current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
+
+#define PAX_DELTA_MMAP_LEN	(current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT
- 13)
+#define PAX_DELTA_STACK_LEN	(current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT
- 13)
+#endif
+
 #define PT_IA_64_UNWIND		0x70000001
 
 /* IA-64 relocations: */
diff -ruNp linux-3.13.11/arch/ia64/include/asm/pgalloc.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/ia64/include/asm/pgalloc.h
--- linux-3.13.11/arch/ia64/include/asm/pgalloc.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/ia64/include/asm/pgalloc.h	2014-07-09
12:00:15.000000000 +0200
@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t
 	pgd_val(*pgd_entry) = __pa(pud);
 }
 
+static inline void
+pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
+{
+	pgd_populate(mm, pgd_entry, pud);
+}
+
 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
 	return quicklist_alloc(0, GFP_KERNEL, NULL);
@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t
 	pud_val(*pud_entry) = __pa(pmd);
 }
 
+static inline void
+pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
+{
+	pud_populate(mm, pud_entry, pmd);
+}
+
 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
 	return quicklist_alloc(0, GFP_KERNEL, NULL);
diff -ruNp linux-3.13.11/arch/ia64/include/asm/pgtable.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/ia64/include/asm/pgtable.h
--- linux-3.13.11/arch/ia64/include/asm/pgtable.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/ia64/include/asm/pgtable.h	2014-07-09
12:00:15.000000000 +0200
@@ -12,7 +12,7 @@
  *	David Mosberger-Tang <davidm@hpl.hp.com>
  */
 
-
+#include <linux/const.h>
 #include <asm/mman.h>
 #include <asm/page.h>
 #include <asm/processor.h>
@@ -142,6 +142,17 @@
 #define PAGE_READONLY	__pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
 #define PAGE_COPY	__pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
 #define PAGE_COPY_EXEC	__pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
+
+#ifdef CONFIG_PAX_PAGEEXEC
+# define PAGE_SHARED_NOEXEC	__pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
+# define PAGE_READONLY_NOEXEC	__pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
+# define PAGE_COPY_NOEXEC	__pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
+#else
+# define PAGE_SHARED_NOEXEC	PAGE_SHARED
+# define PAGE_READONLY_NOEXEC	PAGE_READONLY
+# define PAGE_COPY_NOEXEC	PAGE_COPY
+#endif
+
 #define PAGE_GATE	__pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
 #define PAGE_KERNEL	__pgprot(__DIRTY_BITS  | _PAGE_PL_0 | _PAGE_AR_RWX)
 #define PAGE_KERNELRX	__pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
diff -ruNp linux-3.13.11/arch/ia64/include/asm/spinlock.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/ia64/include/asm/spinlock.h
--- linux-3.13.11/arch/ia64/include/asm/spinlock.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/ia64/include/asm/spinlock.h	2014-07-09
12:00:15.000000000 +0200
@@ -71,7 +71,7 @@ static __always_inline void __ticket_spi
 	unsigned short	*p = (unsigned short *)&lock->lock + 1, tmp;
 
 	asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
-	ACCESS_ONCE(*p) = (tmp + 2) & ~1;
+	ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
 }
 
 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
diff -ruNp linux-3.13.11/arch/ia64/include/asm/uaccess.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/ia64/include/asm/uaccess.h
--- linux-3.13.11/arch/ia64/include/asm/uaccess.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/ia64/include/asm/uaccess.h	2014-07-09
12:00:15.000000000 +0200
@@ -70,6 +70,7 @@
 	 && ((segment).seg == KERNEL_DS.seg						\
 	     || likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT)));	\
 })
+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
 #define access_ok(type, addr, size)	__access_ok((addr), (size), get_fs())
 
 /*
@@ -240,12 +241,24 @@ extern unsigned long __must_check __copy
 static inline unsigned long
 __copy_to_user (void __user *to, const void *from, unsigned long count)
 {
+	if (count > INT_MAX)
+		return count;
+
+	if (!__builtin_constant_p(count))
+		check_object_size(from, count, true);
+
 	return __copy_user(to, (__force void __user *) from, count);
 }
 
 static inline unsigned long
 __copy_from_user (void *to, const void __user *from, unsigned long count)
 {
+	if (count > INT_MAX)
+		return count;
+
+	if (!__builtin_constant_p(count))
+		check_object_size(to, count, false);
+
 	return __copy_user((__force void __user *) to, from, count);
 }
 
@@ -255,10 +268,13 @@ __copy_from_user (void *to, const void _
 ({											\
 	void __user *__cu_to = (to);							\
 	const void *__cu_from = (from);							\
-	long __cu_len = (n);								\
+	unsigned long __cu_len = (n);							\
 											\
-	if (__access_ok(__cu_to, __cu_len, get_fs()))					\
+	if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) {		\
+		if (!__builtin_constant_p(n))						\
+			check_object_size(__cu_from, __cu_len, true);			\
 		__cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len);	\
+	}										\
 	__cu_len;									\
 })
 
@@ -266,11 +282,14 @@ __copy_from_user (void *to, const void _
 ({											\
 	void *__cu_to = (to);								\
 	const void __user *__cu_from = (from);						\
-	long __cu_len = (n);								\
+	unsigned long __cu_len = (n);							\
 											\
 	__chk_user_ptr(__cu_from);							\
-	if (__access_ok(__cu_from, __cu_len, get_fs()))					\
+	if (__cu_len <= INT_MAX  && __access_ok(__cu_from, __cu_len, get_fs())) {	\
+		if (!__builtin_constant_p(n))						\
+			check_object_size(__cu_to, __cu_len, false);			\
 		__cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len);	\
+	}										\
 	__cu_len;									\
 })
 
diff -ruNp linux-3.13.11/arch/ia64/kernel/entry.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/ia64/kernel/entry.S
--- linux-3.13.11/arch/ia64/kernel/entry.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/ia64/kernel/entry.S	2014-07-09
12:00:15.000000000 +0200
@@ -1706,7 +1706,7 @@ sys_call_table:
 	data8 sys_mq_notify
 	data8 sys_mq_getsetattr
 	data8 sys_kexec_load
-	data8 sys_ni_syscall			// reserved for vserver
+	data8 sys_vserver
 	data8 sys_waitid			// 1270
 	data8 sys_add_key
 	data8 sys_request_key
diff -ruNp linux-3.13.11/arch/ia64/kernel/module.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/ia64/kernel/module.c
--- linux-3.13.11/arch/ia64/kernel/module.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/ia64/kernel/module.c	2014-07-09
12:00:15.000000000 +0200
@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
 void
 module_free (struct module *mod, void *module_region)
 {
-	if (mod && mod->arch.init_unw_table &&
-	    module_region == mod->module_init) {
+	if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
 		unw_remove_unwind_table(mod->arch.init_unw_table);
 		mod->arch.init_unw_table = NULL;
 	}
@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
 }
 
 static inline int
+in_init_rx (const struct module *mod, uint64_t addr)
+{
+	return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
+}
+
+static inline int
+in_init_rw (const struct module *mod, uint64_t addr)
+{
+	return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
+}
+
+static inline int
 in_init (const struct module *mod, uint64_t addr)
 {
-	return addr - (uint64_t) mod->module_init < mod->init_size;
+	return in_init_rx(mod, addr) || in_init_rw(mod, addr);
+}
+
+static inline int
+in_core_rx (const struct module *mod, uint64_t addr)
+{
+	return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
+}
+
+static inline int
+in_core_rw (const struct module *mod, uint64_t addr)
+{
+	return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
 }
 
 static inline int
 in_core (const struct module *mod, uint64_t addr)
 {
-	return addr - (uint64_t) mod->module_core < mod->core_size;
+	return in_core_rx(mod, addr) || in_core_rw(mod, addr);
 }
 
 static inline int
@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_
 		break;
 
 	      case RV_BDREL:
-		val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
+		if (in_init_rx(mod, val))
+			val -= (uint64_t) mod->module_init_rx;
+		else if (in_init_rw(mod, val))
+			val -= (uint64_t) mod->module_init_rw;
+		else if (in_core_rx(mod, val))
+			val -= (uint64_t) mod->module_core_rx;
+		else if (in_core_rw(mod, val))
+			val -= (uint64_t) mod->module_core_rw;
 		break;
 
 	      case RV_LTV:
@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
 		 *     addresses have been selected...
 		 */
 		uint64_t gp;
-		if (mod->core_size > MAX_LTOFF)
+		if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
 			/*
 			 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
 			 * at the end of the module.
 			 */
-			gp = mod->core_size - MAX_LTOFF / 2;
+			gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
 		else
-			gp = mod->core_size / 2;
-		gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
+			gp = (mod->core_size_rx + mod->core_size_rw) / 2;
+		gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
 		mod->arch.gp = gp;
 		DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
 	}
diff -ruNp linux-3.13.11/arch/ia64/kernel/palinfo.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/ia64/kernel/palinfo.c
--- linux-3.13.11/arch/ia64/kernel/palinfo.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/ia64/kernel/palinfo.c	2014-07-09
12:00:15.000000000 +0200
@@ -980,7 +980,7 @@ static int palinfo_cpu_callback(struct n
 	return NOTIFY_OK;
 }
 
-static struct notifier_block __refdata palinfo_cpu_notifier =
+static struct notifier_block palinfo_cpu_notifier =
 {
 	.notifier_call = palinfo_cpu_callback,
 	.priority = 0,
diff -ruNp linux-3.13.11/arch/ia64/kernel/ptrace.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/ia64/kernel/ptrace.c
--- linux-3.13.11/arch/ia64/kernel/ptrace.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/ia64/kernel/ptrace.c	2014-07-09
12:00:15.000000000 +0200
@@ -21,6 +21,7 @@
 #include <linux/regset.h>
 #include <linux/elf.h>
 #include <linux/tracehook.h>
+#include <linux/vs_base.h>
 
 #include <asm/pgtable.h>
 #include <asm/processor.h>
diff -ruNp linux-3.13.11/arch/ia64/kernel/sys_ia64.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/ia64/kernel/sys_ia64.c
--- linux-3.13.11/arch/ia64/kernel/sys_ia64.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/ia64/kernel/sys_ia64.c	2014-07-09
12:00:15.000000000 +0200
@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *fil
 	unsigned long align_mask = 0;
 	struct mm_struct *mm = current->mm;
 	struct vm_unmapped_area_info info;
+	unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
 
 	if (len > RGN_MAP_LIMIT)
 		return -ENOMEM;
@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *fil
 	if (REGION_NUMBER(addr) == RGN_HPAGE)
 		addr = 0;
 #endif
+
+#ifdef CONFIG_PAX_RANDMMAP
+	if (mm->pax_flags & MF_PAX_RANDMMAP)
+		addr = mm->free_area_cache;
+	else
+#endif
+
 	if (!addr)
 		addr = TASK_UNMAPPED_BASE;
 
@@ -61,6 +69,7 @@ arch_get_unmapped_area (struct file *fil
 	info.high_limit = TASK_SIZE;
 	info.align_mask = align_mask;
 	info.align_offset = 0;
+	info.threadstack_offset = offset;
 	return vm_unmapped_area(&info);
 }
 
diff -ruNp linux-3.13.11/arch/ia64/kernel/traps.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/ia64/kernel/traps.c
--- linux-3.13.11/arch/ia64/kernel/traps.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/ia64/kernel/traps.c	2014-07-09
12:00:15.000000000 +0200
@@ -60,8 +60,9 @@ die (const char *str, struct pt_regs *re
 	put_cpu();
 
 	if (++die.lock_owner_depth < 3) {
-		printk("%s[%d]: %s %ld [%d]\n",
-		current->comm, task_pid_nr(current), str, err, ++die_counter);
+		printk("%s[%d:#%u]: %s %ld [%d]\n",
+			current->comm, task_pid_nr(current), current->xid,
+			str, err, ++die_counter);
 		if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV)
 	            != NOTIFY_STOP)
 			show_regs(regs);
@@ -324,8 +325,9 @@ handle_fpu_swa (int fp_fault, struct pt_
 			if ((last.count & 15) < 5 && (ia64_fetchadd(1, &last.count, acq) & 15) < 5) {
 				last.time = current_jiffies + 5 * HZ;
 				printk(KERN_WARNING
-		       			"%s(%d): floating-point assist fault at ip %016lx, isr %016lx\n",
-		       			current->comm, task_pid_nr(current), regs->cr_iip + ia64_psr(regs)->ri,
isr);
+					"%s(%d:#%u): floating-point assist fault at ip %016lx, isr %016lx\n",
+					current->comm, task_pid_nr(current), current->xid,
+					regs->cr_iip + ia64_psr(regs)->ri, isr);
 			}
 		}
 	}
diff -ruNp linux-3.13.11/arch/ia64/kernel/vmlinux.lds.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/ia64/kernel/vmlinux.lds.S
--- linux-3.13.11/arch/ia64/kernel/vmlinux.lds.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/ia64/kernel/vmlinux.lds.S	2014-07-09
12:00:15.000000000 +0200
@@ -198,7 +198,7 @@ SECTIONS {
 	/* Per-cpu data: */
 	. = ALIGN(PERCPU_PAGE_SIZE);
 	PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
-	__phys_per_cpu_start = __per_cpu_load;
+	__phys_per_cpu_start = per_cpu_load;
 	/*
 	 * ensure percpu data fits
 	 * into percpu page size
diff -ruNp linux-3.13.11/arch/ia64/mm/fault.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/ia64/mm/fault.c
--- linux-3.13.11/arch/ia64/mm/fault.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/ia64/mm/fault.c	2014-07-09 12:00:15.000000000
+0200
@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned
 	return pte_present(pte);
 }
 
+#ifdef CONFIG_PAX_PAGEEXEC
+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
+{
+	unsigned long i;
+
+	printk(KERN_ERR "PAX: bytes at PC: ");
+	for (i = 0; i < 8; i++) {
+		unsigned int c;
+		if (get_user(c, (unsigned int *)pc+i))
+			printk(KERN_CONT "???????? ");
+		else
+			printk(KERN_CONT "%08x ", c);
+	}
+	printk("\n");
+}
+#endif
+
 #	define VM_READ_BIT	0
 #	define VM_WRITE_BIT	1
 #	define VM_EXEC_BIT	2
@@ -151,8 +168,21 @@ retry:
 	if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
 		goto bad_area;
 
-	if ((vma->vm_flags & mask) != mask)
+	if ((vma->vm_flags & mask) != mask) {
+
+#ifdef CONFIG_PAX_PAGEEXEC
+		if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
+			if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
+				goto bad_area;
+
+			up_read(&mm->mmap_sem);
+			pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
+			do_group_exit(SIGKILL);
+		}
+#endif
+
 		goto bad_area;
+	}
 
 	/*
 	 * If for any reason at all we couldn't handle the fault, make
diff -ruNp linux-3.13.11/arch/ia64/mm/hugetlbpage.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/ia64/mm/hugetlbpage.c
--- linux-3.13.11/arch/ia64/mm/hugetlbpage.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/ia64/mm/hugetlbpage.c	2014-07-09
12:00:15.000000000 +0200
@@ -154,6 +154,7 @@ unsigned long hugetlb_get_unmapped_area(
 		unsigned long pgoff, unsigned long flags)
 {
 	struct vm_unmapped_area_info info;
+	unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
 
 	if (len > RGN_MAP_LIMIT)
 		return -ENOMEM;
@@ -177,6 +178,7 @@ unsigned long hugetlb_get_unmapped_area(
 	info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
 	info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
 	info.align_offset = 0;
+	info.threadstack_offset = offset;
 	return vm_unmapped_area(&info);
 }
 
diff -ruNp linux-3.13.11/arch/ia64/mm/init.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/ia64/mm/init.c
--- linux-3.13.11/arch/ia64/mm/init.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/ia64/mm/init.c	2014-07-09 12:00:15.000000000
+0200
@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
 		vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
 		vma->vm_end = vma->vm_start + PAGE_SIZE;
 		vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
+
+#ifdef CONFIG_PAX_PAGEEXEC
+		if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
+			vma->vm_flags &= ~VM_EXEC;
+
+#ifdef CONFIG_PAX_MPROTECT
+			if (current->mm->pax_flags & MF_PAX_MPROTECT)
+				vma->vm_flags &= ~VM_MAYEXEC;
+#endif
+
+		}
+#endif
+
 		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
 		down_write(&current->mm->mmap_sem);
 		if (insert_vm_struct(current->mm, vma)) {
diff -ruNp linux-3.13.11/arch/m32r/include/asm/cache.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/m32r/include/asm/cache.h
--- linux-3.13.11/arch/m32r/include/asm/cache.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/m32r/include/asm/cache.h	2014-07-09
12:00:15.000000000 +0200
@@ -1,8 +1,10 @@
 #ifndef _ASM_M32R_CACHE_H
 #define _ASM_M32R_CACHE_H
 
+#include <linux/const.h>
+
 /* L1 cache line size */
 #define L1_CACHE_SHIFT		4
-#define L1_CACHE_BYTES		(1 << L1_CACHE_SHIFT)
+#define L1_CACHE_BYTES		(_AC(1,UL) << L1_CACHE_SHIFT)
 
 #endif  /* _ASM_M32R_CACHE_H */
diff -ruNp linux-3.13.11/arch/m32r/kernel/traps.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/m32r/kernel/traps.c
--- linux-3.13.11/arch/m32r/kernel/traps.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/m32r/kernel/traps.c	2014-07-09
12:00:15.000000000 +0200
@@ -184,8 +184,9 @@ static void show_registers(struct pt_reg
 	} else {
 		printk("SPI: %08lx\n", sp);
 	}
-	printk("Process %s (pid: %d, process nr: %d, stackpage=%08lx)",
-		current->comm, task_pid_nr(current), 0xffff & i, 4096+(unsigned long)current);
+	printk("Process %s (pid: %d:#%u, process nr: %d, stackpage=%08lx)",
+		current->comm, task_pid_nr(current), current->xid,
+		0xffff & i, 4096+(unsigned long)current);
 
 	/*
 	 * When in-kernel, we also print out the stack and code at the
diff -ruNp linux-3.13.11/arch/m32r/lib/usercopy.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/m32r/lib/usercopy.c
--- linux-3.13.11/arch/m32r/lib/usercopy.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/m32r/lib/usercopy.c	2014-07-09
12:00:15.000000000 +0200
@@ -14,6 +14,9 @@
 unsigned long
 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
 {
+	if ((long)n < 0)
+		return n;
+
 	prefetch(from);
 	if (access_ok(VERIFY_WRITE, to, n))
 		__copy_user(to,from,n);
@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
 unsigned long
 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
 {
+	if ((long)n < 0)
+		return n;
+
 	prefetchw(to);
 	if (access_ok(VERIFY_READ, from, n))
 		__copy_user_zeroing(to,from,n);
diff -ruNp linux-3.13.11/arch/m68k/Kconfig linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/m68k/Kconfig
--- linux-3.13.11/arch/m68k/Kconfig	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/m68k/Kconfig	2014-07-09 12:00:15.000000000
+0200
@@ -135,6 +135,8 @@ source "fs/Kconfig"
 
 source "arch/m68k/Kconfig.debug"
 
+source "kernel/vserver/Kconfig"
+
 source "security/Kconfig"
 
 source "crypto/Kconfig"
diff -ruNp linux-3.13.11/arch/m68k/include/asm/cache.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/m68k/include/asm/cache.h
--- linux-3.13.11/arch/m68k/include/asm/cache.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/m68k/include/asm/cache.h	2014-07-09
12:00:15.000000000 +0200
@@ -4,9 +4,11 @@
 #ifndef __ARCH_M68K_CACHE_H
 #define __ARCH_M68K_CACHE_H
 
+#include <linux/const.h>
+
 /* bytes per L1 cache line */
 #define        L1_CACHE_SHIFT  4
-#define        L1_CACHE_BYTES  (1<< L1_CACHE_SHIFT)
+#define        L1_CACHE_BYTES  (_AC(1,UL) << L1_CACHE_SHIFT)
 
 #define ARCH_DMA_MINALIGN	L1_CACHE_BYTES
 
diff -ruNp linux-3.13.11/arch/metag/mm/hugetlbpage.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/metag/mm/hugetlbpage.c
--- linux-3.13.11/arch/metag/mm/hugetlbpage.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/metag/mm/hugetlbpage.c	2014-07-09
12:00:15.000000000 +0200
@@ -205,6 +205,7 @@ hugetlb_get_unmapped_area_new_pmd(unsign
 	info.high_limit = TASK_SIZE;
 	info.align_mask = PAGE_MASK & HUGEPT_MASK;
 	info.align_offset = 0;
+	info.threadstack_offset = 0;
 	return vm_unmapped_area(&info);
 }
 
diff -ruNp linux-3.13.11/arch/microblaze/include/asm/cache.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/microblaze/include/asm/cache.h
--- linux-3.13.11/arch/microblaze/include/asm/cache.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/microblaze/include/asm/cache.h	2014-07-09
12:00:15.000000000 +0200
@@ -13,11 +13,12 @@
 #ifndef _ASM_MICROBLAZE_CACHE_H
 #define _ASM_MICROBLAZE_CACHE_H
 
+#include <linux/const.h>
 #include <asm/registers.h>
 
 #define L1_CACHE_SHIFT 5
 /* word-granular cache in microblaze */
-#define L1_CACHE_BYTES	(1 << L1_CACHE_SHIFT)
+#define L1_CACHE_BYTES	(_AC(1,UL) << L1_CACHE_SHIFT)
 
 #define SMP_CACHE_BYTES	L1_CACHE_BYTES
 
diff -ruNp linux-3.13.11/arch/mips/Kconfig linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/Kconfig
--- linux-3.13.11/arch/mips/Kconfig	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/Kconfig	2014-07-09 12:00:15.000000000
+0200
@@ -2268,6 +2268,7 @@ source "kernel/Kconfig.preempt"
 
 config KEXEC
 	bool "Kexec system call"
+	depends on !GRKERNSEC_KMEM
 	help
 	  kexec is a system call that implements the ability to shutdown your
 	  current kernel, and to start another kernel.  It is like a reboot
@@ -2539,6 +2540,8 @@ source "fs/Kconfig"
 
 source "arch/mips/Kconfig.debug"
 
+source "kernel/vserver/Kconfig"
+
 source "security/Kconfig"
 
 source "crypto/Kconfig"
diff -ruNp linux-3.13.11/arch/mips/cavium-octeon/dma-octeon.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/cavium-octeon/dma-octeon.c
--- linux-3.13.11/arch/mips/cavium-octeon/dma-octeon.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/cavium-octeon/dma-octeon.c	2014-07-09
12:00:15.000000000 +0200
@@ -199,7 +199,7 @@ static void octeon_dma_free_coherent(str
 	if (dma_release_from_coherent(dev, order, vaddr))
 		return;
 
-	swiotlb_free_coherent(dev, size, vaddr, dma_handle);
+	swiotlb_free_coherent(dev, size, vaddr, dma_handle, attrs);
 }
 
 static dma_addr_t octeon_unity_phys_to_dma(struct device *dev, phys_addr_t paddr)
diff -ruNp linux-3.13.11/arch/mips/include/asm/atomic.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/include/asm/atomic.h
--- linux-3.13.11/arch/mips/include/asm/atomic.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/include/asm/atomic.h	2014-07-09
12:00:15.000000000 +0200
@@ -21,15 +21,39 @@
 #include <asm/cmpxchg.h>
 #include <asm/war.h>
 
+#ifdef CONFIG_GENERIC_ATOMIC64
+#include <asm-generic/atomic64.h>
+#endif
+
 #define ATOMIC_INIT(i)	  { (i) }
 
+#ifdef CONFIG_64BIT
+#define _ASM_EXTABLE(from, to)		\
+"	.section __ex_table,\"a\"\n"	\
+"	.dword	" #from ", " #to"\n"	\
+"	.previous\n"
+#else
+#define _ASM_EXTABLE(from, to)		\
+"	.section __ex_table,\"a\"\n"	\
+"	.word	" #from ", " #to"\n"	\
+"	.previous\n"
+#endif
+
 /*
  * atomic_read - read atomic variable
  * @v: pointer of type atomic_t
  *
  * Atomically reads the value of @v.
  */
-#define atomic_read(v)		(*(volatile int *)&(v)->counter)
+static inline int atomic_read(const atomic_t *v)
+{
+	return (*(volatile const int *) &v->counter);
+}
+
+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
+{
+	return (*(volatile const int *) &v->counter);
+}
 
 /*
  * atomic_set - set atomic variable
@@ -38,7 +62,15 @@
  *
  * Atomically sets the value of @v to @i.
  */
-#define atomic_set(v, i)		((v)->counter = (i))
+static inline void atomic_set(atomic_t *v, int i)
+{
+	v->counter = i;
+}
+
+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
+{
+	v->counter = i;
+}
 
 /*
  * atomic_add - add integer to atomic variable
@@ -47,7 +79,67 @@
  *
  * Atomically adds @i to @v.
  */
-static __inline__ void atomic_add(int i, atomic_t * v)
+static __inline__ void atomic_add(int i, atomic_t *v)
+{
+	int temp;
+
+	if (kernel_uses_llsc && R10000_LLSC_WAR) {
+		__asm__ __volatile__(
+		"	.set	mips3					\n"
+		"1:	ll	%0, %1		# atomic_add		\n"
+#ifdef CONFIG_PAX_REFCOUNT
+			/* Exception on overflow. */
+		"2:	add	%0, %2					\n"
+#else
+		"	addu	%0, %2					\n"
+#endif
+		"	sc	%0, %1					\n"
+		"	beqzl	%0, 1b					\n"
+#ifdef CONFIG_PAX_REFCOUNT
+		"3:							\n"
+		_ASM_EXTABLE(2b, 3b)
+#endif
+		"	.set	mips0					\n"
+		: "=&r" (temp), "+m" (v->counter)
+		: "Ir" (i));
+	} else if (kernel_uses_llsc) {
+		__asm__ __volatile__(
+		"	.set	mips3					\n"
+		"1:	ll	%0, %1		# atomic_add		\n"
+#ifdef CONFIG_PAX_REFCOUNT
+		/* Exception on overflow. */
+		"2:	add	%0, %2					\n"
+#else
+		"	addu	%0, %2					\n"
+#endif
+		"	sc	%0, %1					\n"
+		"	beqz	%0, 1b					\n"
+#ifdef CONFIG_PAX_REFCOUNT
+		"3:							\n"
+		_ASM_EXTABLE(2b, 3b)
+#endif
+		"	.set	mips0					\n"
+		: "=&r" (temp), "+m" (v->counter)
+		: "Ir" (i));
+	} else {
+		unsigned long flags;
+
+		raw_local_irq_save(flags);
+		__asm__ __volatile__(
+#ifdef CONFIG_PAX_REFCOUNT
+			/* Exception on overflow. */
+		"1:	add	%0, %1					\n"
+		"2:							\n"
+		_ASM_EXTABLE(1b, 2b)
+#else
+		"	addu	%0, %1					\n"
+#endif
+		: "+r" (v->counter) : "Ir" (i));
+		raw_local_irq_restore(flags);
+	}
+}
+
+static __inline__ void atomic_add_unchecked(int i, atomic_unchecked_t *v)
 {
 	if (kernel_uses_llsc && R10000_LLSC_WAR) {
 		int temp;
@@ -90,7 +182,67 @@ static __inline__ void atomic_add(int i,
  *
  * Atomically subtracts @i from @v.
  */
-static __inline__ void atomic_sub(int i, atomic_t * v)
+static __inline__ void atomic_sub(int i, atomic_t *v)
+{
+	int temp;
+
+	if (kernel_uses_llsc && R10000_LLSC_WAR) {
+		__asm__ __volatile__(
+		"	.set	mips3					\n"
+		"1:	ll	%0, %1		# atomic64_sub		\n"
+#ifdef CONFIG_PAX_REFCOUNT
+		/* Exception on overflow. */
+		"2:	sub	%0, %2					\n"
+#else
+		"	subu	%0, %2					\n"
+#endif
+		"	sc	%0, %1					\n"
+		"	beqzl	%0, 1b					\n"
+#ifdef CONFIG_PAX_REFCOUNT
+		"3:							\n"
+		_ASM_EXTABLE(2b, 3b)
+#endif
+		"	.set	mips0					\n"
+		: "=&r" (temp), "+m" (v->counter)
+		: "Ir" (i));
+	} else if (kernel_uses_llsc) {
+		__asm__ __volatile__(
+		"	.set	mips3					\n"
+		"1:	ll	%0, %1		# atomic64_sub		\n"
+#ifdef CONFIG_PAX_REFCOUNT
+		/* Exception on overflow. */
+		"2:	sub	%0, %2					\n"
+#else
+		"	subu	%0, %2					\n"
+#endif
+		"	sc	%0, %1					\n"
+		"	beqz	%0, 1b					\n"
+#ifdef CONFIG_PAX_REFCOUNT
+		"3:							\n"
+		_ASM_EXTABLE(2b, 3b)
+#endif
+		"	.set	mips0					\n"
+		: "=&r" (temp), "+m" (v->counter)
+		: "Ir" (i));
+	} else {
+		unsigned long flags;
+
+		raw_local_irq_save(flags);
+		__asm__ __volatile__(
+#ifdef CONFIG_PAX_REFCOUNT
+			/* Exception on overflow. */
+		"1:	sub	%0, %1					\n"
+		"2:							\n"
+		_ASM_EXTABLE(1b, 2b)
+#else
+		"	subu	%0, %1					\n"
+#endif
+		: "+r" (v->counter) : "Ir" (i));
+		raw_local_irq_restore(flags);
+	}
+}
+
+static __inline__ void atomic_sub_unchecked(long i, atomic_unchecked_t *v)
 {
 	if (kernel_uses_llsc && R10000_LLSC_WAR) {
 		int temp;
@@ -129,7 +281,93 @@ static __inline__ void atomic_sub(int i,
 /*
  * Same as above, but return the result value
  */
-static __inline__ int atomic_add_return(int i, atomic_t * v)
+static __inline__ int atomic_add_return(int i, atomic_t *v)
+{
+	int result;
+	int temp;
+
+	smp_mb__before_llsc();
+
+	if (kernel_uses_llsc && R10000_LLSC_WAR) {
+		__asm__ __volatile__(
+		"	.set	mips3					\n"
+		"1:	ll	%1, %2		# atomic_add_return	\n"
+#ifdef CONFIG_PAX_REFCOUNT
+		"2:	add	%0, %1, %3				\n"
+#else
+		"	addu	%0, %1, %3				\n"
+#endif
+		"	sc	%0, %2					\n"
+		"	beqzl	%0, 1b					\n"
+#ifdef CONFIG_PAX_REFCOUNT
+		"	b	4f					\n"
+		"	.set	noreorder				\n"
+		"3:	b	5f					\n"
+		"	move	%0, %1					\n"
+		"	.set	reorder					\n"
+		_ASM_EXTABLE(2b, 3b)
+#endif
+		"4:	addu	%0, %1, %3				\n"
+#ifdef CONFIG_PAX_REFCOUNT
+		"5:							\n"
+#endif
+		"	.set	mips0					\n"
+		: "=&r" (result), "=&r" (temp), "+m" (v->counter)
+		: "Ir" (i));
+	} else if (kernel_uses_llsc) {
+		__asm__ __volatile__(
+		"	.set	mips3					\n"
+		"1:	ll	%1, %2	# atomic_add_return		\n"
+#ifdef CONFIG_PAX_REFCOUNT
+		"2:	add	%0, %1, %3				\n"
+#else
+		"	addu	%0, %1, %3				\n"
+#endif
+		"	sc	%0, %2					\n"
+		"	bnez	%0, 4f					\n"
+		"	b	1b					\n"
+#ifdef CONFIG_PAX_REFCOUNT
+		"	.set	noreorder				\n"
+		"3:	b	5f					\n"
+		"	move	%0, %1					\n"
+		"	.set	reorder					\n"
+		_ASM_EXTABLE(2b, 3b)
+#endif
+		"4:	addu	%0, %1, %3				\n"
+#ifdef CONFIG_PAX_REFCOUNT
+		"5:							\n"
+#endif
+		"	.set	mips0					\n"
+		: "=&r" (result), "=&r" (temp), "+m" (v->counter)
+		: "Ir" (i));
+	} else {
+		unsigned long flags;
+
+		raw_local_irq_save(flags);
+		__asm__ __volatile__(
+		"	lw	%0, %1					\n"
+#ifdef CONFIG_PAX_REFCOUNT
+			/* Exception on overflow. */
+		"1:	add	%0, %2					\n"
+#else
+		"	addu	%0, %2					\n"
+#endif
+		"	sw	%0, %1					\n"
+#ifdef CONFIG_PAX_REFCOUNT
+		/* Note: Dest reg is not modified on overflow */
+		"2:							\n"
+		_ASM_EXTABLE(1b, 2b)
+#endif
+		: "=&r" (result), "+m" (v->counter) : "Ir" (i));
+		raw_local_irq_restore(flags);
+	}
+
+	smp_llsc_mb();
+
+	return result;
+}
+
+static __inline__ int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
 {
 	int result;
 
@@ -178,7 +416,93 @@ static __inline__ int atomic_add_return(
 	return result;
 }
 
-static __inline__ int atomic_sub_return(int i, atomic_t * v)
+static __inline__ int atomic_sub_return(int i, atomic_t *v)
+{
+	int result;
+	int temp;
+
+	smp_mb__before_llsc();
+
+	if (kernel_uses_llsc && R10000_LLSC_WAR) {
+		__asm__ __volatile__(
+		"	.set	mips3					\n"
+		"1:	ll	%1, %2		# atomic_sub_return	\n"
+#ifdef CONFIG_PAX_REFCOUNT
+		"2:	sub	%0, %1, %3				\n"
+#else
+		"	subu	%0, %1, %3				\n"
+#endif
+		"	sc	%0, %2					\n"
+		"	beqzl	%0, 1b					\n"
+#ifdef CONFIG_PAX_REFCOUNT
+		"	b	4f					\n"
+		"	.set	noreorder				\n"
+		"3:	b	5f					\n"
+		"	move	%0, %1					\n"
+		"	.set	reorder					\n"
+		_ASM_EXTABLE(2b, 3b)
+#endif
+		"4:	subu	%0, %1, %3				\n"
+#ifdef CONFIG_PAX_REFCOUNT
+		"5:							\n"
+#endif
+		"	.set	mips0					\n"
+		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
+		: "Ir" (i), "m" (v->counter)
+		: "memory");
+	} else if (kernel_uses_llsc) {
+		__asm__ __volatile__(
+		"	.set	mips3					\n"
+		"1:	ll	%1, %2	# atomic_sub_return		\n"
+#ifdef CONFIG_PAX_REFCOUNT
+		"2:	sub	%0, %1, %3				\n"
+#else
+		"	subu	%0, %1, %3				\n"
+#endif
+		"	sc	%0, %2					\n"
+		"	bnez	%0, 4f					\n"
+		"	b	1b					\n"
+#ifdef CONFIG_PAX_REFCOUNT
+		"	.set	noreorder				\n"
+		"3:	b	5f					\n"
+		"	move	%0, %1					\n"
+		"	.set	reorder					\n"
+		_ASM_EXTABLE(2b, 3b)
+#endif
+		"4:	subu	%0, %1, %3				\n"
+#ifdef CONFIG_PAX_REFCOUNT
+		"5:							\n"
+#endif
+		"	.set	mips0					\n"
+		: "=&r" (result), "=&r" (temp), "+m" (v->counter)
+		: "Ir" (i));
+	} else {
+		unsigned long flags;
+
+		raw_local_irq_save(flags);
+		__asm__ __volatile__(
+		"	lw	%0, %1					\n"
+#ifdef CONFIG_PAX_REFCOUNT
+			/* Exception on overflow. */
+		"1:	sub	%0, %2					\n"
+#else
+		"	subu	%0, %2					\n"
+#endif
+		"	sw	%0, %1					\n"
+#ifdef CONFIG_PAX_REFCOUNT
+		/* Note: Dest reg is not modified on overflow */
+		"2:							\n"
+		_ASM_EXTABLE(1b, 2b)
+#endif
+		: "=&r" (result), "+m" (v->counter) : "Ir" (i));
+		raw_local_irq_restore(flags);
+	}
+
+	smp_llsc_mb();
+
+	return result;
+}
+static __inline__ int atomic_sub_return_unchecked(int i, atomic_unchecked_t *v)
 {
 	int result;
 
@@ -238,7 +562,7 @@ static __inline__ int atomic_sub_return(
  * Atomically test @v and subtract @i if @v is greater or equal than @i.
  * The function returns the old value of @v minus @i.
  */
-static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
+static __inline__ int atomic_sub_if_positive(int i, atomic_t *v)
 {
 	int result;
 
@@ -295,8 +619,26 @@ static __inline__ int atomic_sub_if_posi
 	return result;
 }
 
-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
-#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
+static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+{
+	return cmpxchg(&v->counter, old, new);
+}
+
+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old,
+					   int new)
+{
+	return cmpxchg(&(v->counter), old, new);
+}
+
+static inline int atomic_xchg(atomic_t *v, int new)
+{
+	return xchg(&v->counter, new);
+}
+
+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
+{
+	return xchg(&(v->counter), new);
+}
 
 /**
  * __atomic_add_unless - add unless the number is a given value
@@ -324,6 +666,10 @@ static __inline__ int __atomic_add_unles
 
 #define atomic_dec_return(v) atomic_sub_return(1, (v))
 #define atomic_inc_return(v) atomic_add_return(1, (v))
+static __inline__ int atomic_inc_return_unchecked(atomic_unchecked_t *v)
+{
+	return atomic_add_return_unchecked(1, v);
+}
 
 /*
  * atomic_sub_and_test - subtract value from variable and test result
@@ -345,6 +691,10 @@ static __inline__ int __atomic_add_unles
  * other cases.
  */
 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
+static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
+{
+	return atomic_add_return_unchecked(1, v) == 0;
+}
 
 /*
  * atomic_dec_and_test - decrement by 1 and test
@@ -369,6 +719,10 @@ static __inline__ int __atomic_add_unles
  * Atomically increments @v by 1.
  */
 #define atomic_inc(v) atomic_add(1, (v))
+static __inline__ void atomic_inc_unchecked(atomic_unchecked_t *v)
+{
+	atomic_add_unchecked(1, v);
+}
 
 /*
  * atomic_dec - decrement and test
@@ -377,6 +731,10 @@ static __inline__ int __atomic_add_unles
  * Atomically decrements @v by 1.
  */
 #define atomic_dec(v) atomic_sub(1, (v))
+static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
+{
+	atomic_sub_unchecked(1, v);
+}
 
 /*
  * atomic_add_negative - add and test if negative
@@ -398,14 +756,30 @@ static __inline__ int __atomic_add_unles
  * @v: pointer of type atomic64_t
  *
  */
-#define atomic64_read(v)	(*(volatile long *)&(v)->counter)
+static inline long atomic64_read(const atomic64_t *v)
+{
+	return (*(volatile const long *) &v->counter);
+}
+
+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
+{
+	return (*(volatile const long *) &v->counter);
+}
 
 /*
  * atomic64_set - set atomic variable
  * @v: pointer of type atomic64_t
  * @i: required value
  */
-#define atomic64_set(v, i)	((v)->counter = (i))
+static inline void atomic64_set(atomic64_t *v, long i)
+{
+	v->counter = i;
+}
+
+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
+{
+	v->counter = i;
+}
 
 /*
  * atomic64_add - add integer to atomic variable
@@ -414,7 +788,66 @@ static __inline__ int __atomic_add_unles
  *
  * Atomically adds @i to @v.
  */
-static __inline__ void atomic64_add(long i, atomic64_t * v)
+static __inline__ void atomic64_add(long i, atomic64_t *v)
+{
+	long temp;
+
+	if (kernel_uses_llsc && R10000_LLSC_WAR) {
+		__asm__ __volatile__(
+		"	.set	mips3					\n"
+		"1:	lld	%0, %1		# atomic64_add		\n"
+#ifdef CONFIG_PAX_REFCOUNT
+			/* Exception on overflow. */
+		"2:	dadd	%0, %2					\n"
+#else
+		"	daddu	%0, %2					\n"
+#endif
+		"	scd	%0, %1					\n"
+		"	beqzl	%0, 1b					\n"
+#ifdef CONFIG_PAX_REFCOUNT
+		"3:							\n"
+		_ASM_EXTABLE(2b, 3b)
+#endif
+		"	.set	mips0					\n"
+		: "=&r" (temp), "+m" (v->counter)
+		: "Ir" (i));
+	} else if (kernel_uses_llsc) {
+		__asm__ __volatile__(
+		"	.set	mips3					\n"
+		"1:	lld	%0, %1		# atomic64_add		\n"
+#ifdef CONFIG_PAX_REFCOUNT
+			/* Exception on overflow. */
+		"2:	dadd	%0, %2					\n"
+#else
+		"	daddu	%0, %2					\n"
+#endif
+		"	scd	%0, %1					\n"
+		"	beqz	%0, 1b					\n"
+#ifdef CONFIG_PAX_REFCOUNT
+		"3:							\n"
+		_ASM_EXTABLE(2b, 3b)
+#endif
+		"	.set	mips0					\n"
+		: "=&r" (temp), "+m" (v->counter)
+		: "Ir" (i));
+	} else {
+		unsigned long flags;
+
+		raw_local_irq_save(flags);
+		__asm__ __volatile__(
+#ifdef CONFIG_PAX_REFCOUNT
+			/* Exception on overflow. */
+		"1:	dadd	%0, %1					\n"
+		"2:							\n"
+		_ASM_EXTABLE(1b, 2b)
+#else
+		"	daddu	%0, %1					\n"
+#endif
+		: "+r" (v->counter) : "Ir" (i));
+		raw_local_irq_restore(flags);
+	}
+}
+static __inline__ void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
 {
 	if (kernel_uses_llsc && R10000_LLSC_WAR) {
 		long temp;
@@ -457,7 +890,67 @@ static __inline__ void atomic64_add(long
  *
  * Atomically subtracts @i from @v.
  */
-static __inline__ void atomic64_sub(long i, atomic64_t * v)
+static __inline__ void atomic64_sub(long i, atomic64_t *v)
+{
+	long temp;
+
+	if (kernel_uses_llsc && R10000_LLSC_WAR) {
+		__asm__ __volatile__(
+		"	.set	mips3					\n"
+		"1:	lld	%0, %1		# atomic64_sub		\n"
+#ifdef CONFIG_PAX_REFCOUNT
+		/* Exception on overflow. */
+		"2:	dsub	%0, %2					\n"
+#else
+		"	dsubu	%0, %2					\n"
+#endif
+		"	scd	%0, %1					\n"
+		"	beqzl	%0, 1b					\n"
+#ifdef CONFIG_PAX_REFCOUNT
+		"3:							\n"
+		_ASM_EXTABLE(2b, 3b)
+#endif
+		"	.set	mips0					\n"
+		: "=&r" (temp), "+m" (v->counter)
+		: "Ir" (i));
+	} else if (kernel_uses_llsc) {
+		__asm__ __volatile__(
+		"	.set	mips3					\n"
+		"1:	lld	%0, %1		# atomic64_sub		\n"
+#ifdef CONFIG_PAX_REFCOUNT
+		/* Exception on overflow. */
+		"2:	dsub	%0, %2					\n"
+#else
+		"	dsubu	%0, %2					\n"
+#endif
+		"	scd	%0, %1					\n"
+		"	beqz	%0, 1b					\n"
+#ifdef CONFIG_PAX_REFCOUNT
+		"3:							\n"
+		_ASM_EXTABLE(2b, 3b)
+#endif
+		"	.set	mips0					\n"
+		: "=&r" (temp), "+m" (v->counter)
+		: "Ir" (i));
+	} else {
+		unsigned long flags;
+
+		raw_local_irq_save(flags);
+		__asm__ __volatile__(
+#ifdef CONFIG_PAX_REFCOUNT
+			/* Exception on overflow. */
+		"1:	dsub	%0, %1					\n"
+		"2:							\n"
+		_ASM_EXTABLE(1b, 2b)
+#else
+		"	dsubu	%0, %1					\n"
+#endif
+		: "+r" (v->counter) : "Ir" (i));
+		raw_local_irq_restore(flags);
+	}
+}
+
+static __inline__ void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
 {
 	if (kernel_uses_llsc && R10000_LLSC_WAR) {
 		long temp;
@@ -496,7 +989,93 @@ static __inline__ void atomic64_sub(long
 /*
  * Same as above, but return the result value
  */
-static __inline__ long atomic64_add_return(long i, atomic64_t * v)
+static __inline__ long atomic64_add_return(long i, atomic64_t *v)
+{
+	long result;
+	long temp;
+
+	smp_mb__before_llsc();
+
+	if (kernel_uses_llsc && R10000_LLSC_WAR) {
+		__asm__ __volatile__(
+		"	.set	mips3					\n"
+		"1:	lld	%1, %2		# atomic64_add_return	\n"
+#ifdef CONFIG_PAX_REFCOUNT
+		"2:	dadd	%0, %1, %3				\n"
+#else
+		"	daddu	%0, %1, %3				\n"
+#endif
+		"	scd	%0, %2					\n"
+		"	beqzl	%0, 1b					\n"
+#ifdef CONFIG_PAX_REFCOUNT
+		"	b	4f					\n"
+		"	.set	noreorder				\n"
+		"3:	b	5f					\n"
+		"	move	%0, %1					\n"
+		"	.set	reorder					\n"
+		_ASM_EXTABLE(2b, 3b)
+#endif
+		"4:	daddu	%0, %1, %3				\n"
+#ifdef CONFIG_PAX_REFCOUNT
+		"5:							\n"
+#endif
+		"	.set	mips0					\n"
+		: "=&r" (result), "=&r" (temp), "+m" (v->counter)
+		: "Ir" (i));
+	} else if (kernel_uses_llsc) {
+		__asm__ __volatile__(
+		"	.set	mips3					\n"
+		"1:	lld	%1, %2	# atomic64_add_return		\n"
+#ifdef CONFIG_PAX_REFCOUNT
+		"2:	dadd	%0, %1, %3				\n"
+#else
+		"	daddu	%0, %1, %3				\n"
+#endif
+		"	scd	%0, %2					\n"
+		"	bnez	%0, 4f					\n"
+		"	b	1b					\n"
+#ifdef CONFIG_PAX_REFCOUNT
+		"	.set	noreorder				\n"
+		"3:	b	5f					\n"
+		"	move	%0, %1					\n"
+		"	.set	reorder					\n"
+		_ASM_EXTABLE(2b, 3b)
+#endif
+		"4:	daddu	%0, %1, %3				\n"
+#ifdef CONFIG_PAX_REFCOUNT
+		"5:							\n"
+#endif
+		"	.set	mips0					\n"
+		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
+		: "Ir" (i), "m" (v->counter)
+		: "memory");
+	} else {
+		unsigned long flags;
+
+		raw_local_irq_save(flags);
+		__asm__ __volatile__(
+		"	ld	%0, %1					\n"
+#ifdef CONFIG_PAX_REFCOUNT
+			/* Exception on overflow. */
+		"1:	dadd	%0, %2					\n"
+#else
+		"	daddu	%0, %2					\n"
+#endif
+		"	sd	%0, %1					\n"
+#ifdef CONFIG_PAX_REFCOUNT
+		/* Note: Dest reg is not modified on overflow */
+		"2:							\n"
+		_ASM_EXTABLE(1b, 2b)
+#endif
+		: "=&r" (result), "+m" (v->counter) : "Ir" (i));
+		raw_local_irq_restore(flags);
+	}
+
+	smp_llsc_mb();
+
+	return result;
+}
+static __inline__ long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
 {
 	long result;
 
@@ -546,7 +1125,97 @@ static __inline__ long atomic64_add_retu
 	return result;
 }
 
-static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
+static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
+{
+	long result;
+	long temp;
+
+	smp_mb__before_llsc();
+
+	if (kernel_uses_llsc && R10000_LLSC_WAR) {
+		long temp;
+
+		__asm__ __volatile__(
+		"	.set	mips3					\n"
+		"1:	lld	%1, %2		# atomic64_sub_return	\n"
+#ifdef CONFIG_PAX_REFCOUNT
+		"2:	dsub	%0, %1, %3				\n"
+#else
+		"	dsubu	%0, %1, %3				\n"
+#endif
+		"	scd	%0, %2					\n"
+		"	beqzl	%0, 1b					\n"
+#ifdef CONFIG_PAX_REFCOUNT
+		"	b	4f					\n"
+		"	.set	noreorder				\n"
+		"3:	b	5f					\n"
+		"	move	%0, %1					\n"
+		"	.set	reorder					\n"
+		_ASM_EXTABLE(2b, 3b)
+#endif
+		"4:	dsubu	%0, %1, %3				\n"
+#ifdef CONFIG_PAX_REFCOUNT
+		"5:							\n"
+#endif
+		"	.set	mips0					\n"
+		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
+		: "Ir" (i), "m" (v->counter)
+		: "memory");
+	} else if (kernel_uses_llsc) {
+		__asm__ __volatile__(
+		"	.set	mips3					\n"
+		"1:	lld	%1, %2	# atomic64_sub_return		\n"
+#ifdef CONFIG_PAX_REFCOUNT
+		"2:	dsub	%0, %1, %3				\n"
+#else
+		"	dsubu	%0, %1, %3				\n"
+#endif
+		"	scd	%0, %2					\n"
+		"	bnez	%0, 4f					\n"
+		"	b	1b					\n"
+#ifdef CONFIG_PAX_REFCOUNT
+		"	.set	noreorder				\n"
+		"3:	b	5f					\n"
+		"	move	%0, %1					\n"
+		"	.set	reorder					\n"
+		_ASM_EXTABLE(2b, 3b)
+#endif
+		"4:	dsubu	%0, %1, %3				\n"
+#ifdef CONFIG_PAX_REFCOUNT
+		"5:							\n"
+#endif
+		"	.set	mips0					\n"
+		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
+		: "Ir" (i), "m" (v->counter)
+		: "memory");
+	} else {
+		unsigned long flags;
+
+		raw_local_irq_save(flags);
+		__asm__ __volatile__(
+		"	ld	%0, %1					\n"
+#ifdef CONFIG_PAX_REFCOUNT
+			/* Exception on overflow. */
+		"1:	dsub	%0, %2					\n"
+#else
+		"	dsubu	%0, %2					\n"
+#endif
+		"	sd	%0, %1					\n"
+#ifdef CONFIG_PAX_REFCOUNT
+		/* Note: Dest reg is not modified on overflow */
+		"2:							\n"
+		_ASM_EXTABLE(1b, 2b)
+#endif
+		: "=&r" (result), "+m" (v->counter) : "Ir" (i));
+		raw_local_irq_restore(flags);
+	}
+
+	smp_llsc_mb();
+
+	return result;
+}
+
+static __inline__ long atomic64_sub_return_unchecked(long i, atomic64_unchecked_t *v)
 {
 	long result;
 
@@ -605,7 +1274,7 @@ static __inline__ long atomic64_sub_retu
  * Atomically test @v and subtract @i if @v is greater or equal than @i.
  * The function returns the old value of @v minus @i.
  */
-static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
+static __inline__ long atomic64_sub_if_positive(long i, atomic64_t *v)
 {
 	long result;
 
@@ -662,9 +1331,26 @@ static __inline__ long atomic64_sub_if_p
 	return result;
 }
 
-#define atomic64_cmpxchg(v, o, n) \
-	((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
-#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
+static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
+{
+	return cmpxchg(&v->counter, old, new);
+}
+
+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old,
+					      long new)
+{
+	return cmpxchg(&(v->counter), old, new);
+}
+
+static inline long atomic64_xchg(atomic64_t *v, long new)
+{
+	return xchg(&v->counter, new);
+}
+
+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
+{
+	return xchg(&(v->counter), new);
+}
 
 /**
  * atomic64_add_unless - add unless the number is a given value
@@ -694,6 +1380,7 @@ static __inline__ int atomic64_add_unles
 
 #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
 #define atomic64_inc_return(v) atomic64_add_return(1, (v))
+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1, (v))
 
 /*
  * atomic64_sub_and_test - subtract value from variable and test result
@@ -715,6 +1402,7 @@ static __inline__ int atomic64_add_unles
  * other cases.
  */
 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
+#define atomic64_inc_and_test_unchecked(v) atomic64_add_return_unchecked(1, (v)) ==
0)
 
 /*
  * atomic64_dec_and_test - decrement by 1 and test
@@ -739,6 +1427,7 @@ static __inline__ int atomic64_add_unles
  * Atomically increments @v by 1.
  */
 #define atomic64_inc(v) atomic64_add(1, (v))
+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1, (v))
 
 /*
  * atomic64_dec - decrement and test
@@ -747,6 +1436,7 @@ static __inline__ int atomic64_add_unles
  * Atomically decrements @v by 1.
  */
 #define atomic64_dec(v) atomic64_sub(1, (v))
+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1, (v))
 
 /*
  * atomic64_add_negative - add and test if negative
diff -ruNp linux-3.13.11/arch/mips/include/asm/cache.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/include/asm/cache.h
--- linux-3.13.11/arch/mips/include/asm/cache.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/include/asm/cache.h	2014-07-09
12:00:15.000000000 +0200
@@ -9,10 +9,11 @@
 #ifndef _ASM_CACHE_H
 #define _ASM_CACHE_H
 
+#include <linux/const.h>
 #include <kmalloc.h>
 
 #define L1_CACHE_SHIFT		CONFIG_MIPS_L1_CACHE_SHIFT
-#define L1_CACHE_BYTES		(1 << L1_CACHE_SHIFT)
+#define L1_CACHE_BYTES		(_AC(1,UL) << L1_CACHE_SHIFT)
 
 #define SMP_CACHE_SHIFT		L1_CACHE_SHIFT
 #define SMP_CACHE_BYTES		L1_CACHE_BYTES
diff -ruNp linux-3.13.11/arch/mips/include/asm/elf.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/include/asm/elf.h
--- linux-3.13.11/arch/mips/include/asm/elf.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/include/asm/elf.h	2014-07-09
12:00:15.000000000 +0200
@@ -373,13 +373,16 @@ extern const char *__elf_platform;
 #define ELF_ET_DYN_BASE		(TASK_SIZE / 3 * 2)
 #endif
 
+#ifdef CONFIG_PAX_ASLR
+#define PAX_ELF_ET_DYN_BASE	(TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
+
+#define PAX_DELTA_MMAP_LEN	(TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
+#define PAX_DELTA_STACK_LEN	(TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
+#endif
+
 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
 struct linux_binprm;
 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
 				       int uses_interp);
 
-struct mm_struct;
-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
-#define arch_randomize_brk arch_randomize_brk
-
 #endif /* _ASM_ELF_H */
diff -ruNp linux-3.13.11/arch/mips/include/asm/exec.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/include/asm/exec.h
--- linux-3.13.11/arch/mips/include/asm/exec.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/include/asm/exec.h	2014-07-09
12:00:15.000000000 +0200
@@ -12,6 +12,6 @@
 #ifndef _ASM_EXEC_H
 #define _ASM_EXEC_H
 
-extern unsigned long arch_align_stack(unsigned long sp);
+#define arch_align_stack(x) ((x) & ~0xfUL)
 
 #endif /* _ASM_EXEC_H */
diff -ruNp linux-3.13.11/arch/mips/include/asm/ftrace.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/include/asm/ftrace.h
--- linux-3.13.11/arch/mips/include/asm/ftrace.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/include/asm/ftrace.h	2014-07-09
12:00:15.000000000 +0200
@@ -22,12 +22,12 @@ extern void _mcount(void);
 #define safe_load(load, src, dst, error)		\
 do {							\
 	asm volatile (					\
-		"1: " load " %[" STR(dst) "], 0(%[" STR(src) "])\n"\
-		"   li %[" STR(error) "], 0\n"		\
+		"1: " load " %[dest], 0(%[source])\n"	\
+		"   li %[err], 0\n"			\
 		"2:\n"					\
 							\
 		".section .fixup, \"ax\"\n"		\
-		"3: li %[" STR(error) "], 1\n"		\
+		"3: li %[err], 1\n"			\
 		"   j 2b\n"				\
 		".previous\n"				\
 							\
@@ -35,8 +35,8 @@ do {							\
 		STR(PTR) "\t1b, 3b\n\t"			\
 		".previous\n"				\
 							\
-		: [dst] "=&r" (dst), [error] "=r" (error)\
-		: [src] "r" (src)			\
+		: [dest] "=&r" (dst), [err] "=r" (error)\
+		: [source] "r" (src)			\
 		: "memory"				\
 	);						\
 } while (0)
@@ -44,12 +44,12 @@ do {							\
 #define safe_store(store, src, dst, error)	\
 do {						\
 	asm volatile (				\
-		"1: " store " %[" STR(src) "], 0(%[" STR(dst) "])\n"\
-		"   li %[" STR(error) "], 0\n"	\
+		"1: " store " %[source], 0(%[dest])\n"\
+		"   li %[err], 0\n"		\
 		"2:\n"				\
 						\
 		".section .fixup, \"ax\"\n"	\
-		"3: li %[" STR(error) "], 1\n"	\
+		"3: li %[err], 1\n"		\
 		"   j 2b\n"			\
 		".previous\n"			\
 						\
@@ -57,8 +57,8 @@ do {						\
 		STR(PTR) "\t1b, 3b\n\t"		\
 		".previous\n"			\
 						\
-		: [error] "=r" (error)		\
-		: [dst] "r" (dst), [src] "r" (src)\
+		: [err] "=r" (error)		\
+		: [dest] "r" (dst), [source] "r" (src)\
 		: "memory"			\
 	);					\
 } while (0)
diff -ruNp linux-3.13.11/arch/mips/include/asm/hw_irq.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/include/asm/hw_irq.h
--- linux-3.13.11/arch/mips/include/asm/hw_irq.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/include/asm/hw_irq.h	2014-07-09
12:00:15.000000000 +0200
@@ -10,7 +10,7 @@
 
 #include <linux/atomic.h>
 
-extern atomic_t irq_err_count;
+extern atomic_unchecked_t irq_err_count;
 
 /*
  * interrupt-retrigger: NOP for now. This may not be appropriate for all
diff -ruNp linux-3.13.11/arch/mips/include/asm/local.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/include/asm/local.h
--- linux-3.13.11/arch/mips/include/asm/local.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/include/asm/local.h	2014-07-09
12:00:15.000000000 +0200
@@ -12,15 +12,25 @@ typedef struct
 	atomic_long_t a;
 } local_t;
 
+typedef struct {
+	atomic_long_unchecked_t a;
+} local_unchecked_t;
+
 #define LOCAL_INIT(i)	{ ATOMIC_LONG_INIT(i) }
 
 #define local_read(l)	atomic_long_read(&(l)->a)
+#define local_read_unchecked(l)	atomic_long_read_unchecked(&(l)->a)
 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
+#define local_set_unchecked(l, i)	atomic_long_set_unchecked(&(l)->a, (i))
 
 #define local_add(i, l) atomic_long_add((i), (&(l)->a))
+#define local_add_unchecked(i, l) atomic_long_add_unchecked((i), (&(l)->a))
 #define local_sub(i, l) atomic_long_sub((i), (&(l)->a))
+#define local_sub_unchecked(i, l) atomic_long_sub_unchecked((i), (&(l)->a))
 #define local_inc(l)	atomic_long_inc(&(l)->a)
+#define local_inc_unchecked(l)	atomic_long_inc_unchecked(&(l)->a)
 #define local_dec(l)	atomic_long_dec(&(l)->a)
+#define local_dec_unchecked(l)	atomic_long_dec_unchecked(&(l)->a)
 
 /*
  * Same as above, but return the result value
@@ -70,6 +80,51 @@ static __inline__ long local_add_return(
 	return result;
 }
 
+static __inline__ long local_add_return_unchecked(long i, local_unchecked_t * l)
+{
+	unsigned long result;
+
+	if (kernel_uses_llsc && R10000_LLSC_WAR) {
+		unsigned long temp;
+
+		__asm__ __volatile__(
+		"	.set	mips3					\n"
+		"1:"	__LL	"%1, %2		# local_add_return	\n"
+		"	addu	%0, %1, %3				\n"
+			__SC	"%0, %2					\n"
+		"	beqzl	%0, 1b					\n"
+		"	addu	%0, %1, %3				\n"
+		"	.set	mips0					\n"
+		: "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
+		: "Ir" (i), "m" (l->a.counter)
+		: "memory");
+	} else if (kernel_uses_llsc) {
+		unsigned long temp;
+
+		__asm__ __volatile__(
+		"	.set	mips3					\n"
+		"1:"	__LL	"%1, %2		# local_add_return	\n"
+		"	addu	%0, %1, %3				\n"
+			__SC	"%0, %2					\n"
+		"	beqz	%0, 1b					\n"
+		"	addu	%0, %1, %3				\n"
+		"	.set	mips0					\n"
+		: "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
+		: "Ir" (i), "m" (l->a.counter)
+		: "memory");
+	} else {
+		unsigned long flags;
+
+		local_irq_save(flags);
+		result = l->a.counter;
+		result += i;
+		l->a.counter = result;
+		local_irq_restore(flags);
+	}
+
+	return result;
+}
+
 static __inline__ long local_sub_return(long i, local_t * l)
 {
 	unsigned long result;
@@ -117,6 +172,8 @@ static __inline__ long local_sub_return(
 
 #define local_cmpxchg(l, o, n) \
 	((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
+#define local_cmpxchg_unchecked(l, o, n) \
+	((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
 #define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n)))
 
 /**
diff -ruNp linux-3.13.11/arch/mips/include/asm/page.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/include/asm/page.h
--- linux-3.13.11/arch/mips/include/asm/page.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/include/asm/page.h	2014-07-09
12:00:15.000000000 +0200
@@ -95,7 +95,7 @@ extern void copy_user_highpage(struct pa
   #ifdef CONFIG_CPU_MIPS32
     typedef struct { unsigned long pte_low, pte_high; } pte_t;
     #define pte_val(x)	  ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
-    #define __pte(x)	  ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte;
})
+    #define __pte(x)	  ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
   #else
      typedef struct { unsigned long long pte; } pte_t;
      #define pte_val(x) ((x).pte)
diff -ruNp linux-3.13.11/arch/mips/include/asm/pgalloc.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/include/asm/pgalloc.h
--- linux-3.13.11/arch/mips/include/asm/pgalloc.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/include/asm/pgalloc.h	2014-07-09
12:00:15.000000000 +0200
@@ -37,6 +37,11 @@ static inline void pud_populate(struct m
 {
 	set_pud(pud, __pud((unsigned long)pmd));
 }
+
+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+{
+	pud_populate(mm, pud, pmd);
+}
 #endif
 
 /*
diff -ruNp linux-3.13.11/arch/mips/include/asm/pgtable.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/include/asm/pgtable.h
--- linux-3.13.11/arch/mips/include/asm/pgtable.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/include/asm/pgtable.h	2014-07-09
12:00:15.000000000 +0200
@@ -20,6 +20,9 @@
 #include <asm/io.h>
 #include <asm/pgtable-bits.h>
 
+#define ktla_ktva(addr)		(addr)
+#define ktva_ktla(addr)		(addr)
+
 struct mm_struct;
 struct vm_area_struct;
 
diff -ruNp linux-3.13.11/arch/mips/include/asm/smtc_proc.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/include/asm/smtc_proc.h
--- linux-3.13.11/arch/mips/include/asm/smtc_proc.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/include/asm/smtc_proc.h	2014-07-09
12:00:15.000000000 +0200
@@ -18,6 +18,6 @@ extern struct smtc_cpu_proc smtc_cpu_sta
 
 /* Count of number of recoveries of "stolen" FPU access rights on 34K */
 
-extern atomic_t smtc_fpu_recoveries;
+extern atomic_unchecked_t smtc_fpu_recoveries;
 
 #endif /* __ASM_SMTC_PROC_H */
diff -ruNp linux-3.13.11/arch/mips/include/asm/syscall.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/include/asm/syscall.h
--- linux-3.13.11/arch/mips/include/asm/syscall.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/include/asm/syscall.h	2014-07-09
12:00:15.000000000 +0200
@@ -39,14 +39,14 @@ static inline unsigned long mips_get_sys
 
 #ifdef CONFIG_32BIT
 	case 4: case 5: case 6: case 7:
-		return get_user(*arg, (int *)usp + 4 * n);
+		return get_user(*arg, (int *)usp + n);
 #endif
 
 #ifdef CONFIG_64BIT
 	case 4: case 5: case 6: case 7:
 #ifdef CONFIG_MIPS32_O32
 		if (test_thread_flag(TIF_32BIT_REGS))
-			return get_user(*arg, (int *)usp + 4 * n);
+			return get_user(*arg, (int *)usp + n);
 		else
 #endif
 			*arg = regs->regs[4 + n];
@@ -83,11 +83,10 @@ static inline void syscall_get_arguments
 					 unsigned int i, unsigned int n,
 					 unsigned long *args)
 {
-	unsigned long arg;
 	int ret;
 
 	while (n--)
-		ret |= mips_get_syscall_arg(&arg, task, regs, i++);
+		ret |= mips_get_syscall_arg(args++, task, regs, i++);
 
 	/*
 	 * No way to communicate an error because this is a void function.
diff -ruNp linux-3.13.11/arch/mips/include/asm/thread_info.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/include/asm/thread_info.h
--- linux-3.13.11/arch/mips/include/asm/thread_info.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/include/asm/thread_info.h	2014-07-09
12:00:15.000000000 +0200
@@ -115,6 +115,8 @@ static inline struct thread_info *curren
 #define TIF_FPUBOUND		24	/* thread bound to FPU-full CPU set */
 #define TIF_LOAD_WATCH		25	/* If set, load watch registers */
 #define TIF_SYSCALL_TRACEPOINT	26	/* syscall tracepoint instrumentation */
+/* li takes a 32bit immediate */
+#define TIF_GRSEC_SETXID	29	/* update credentials on syscall entry/exit */
 #define TIF_SYSCALL_TRACE	31	/* syscall trace active */
 
 #define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
@@ -132,13 +134,14 @@ static inline struct thread_info *curren
 #define _TIF_FPUBOUND		(1<<TIF_FPUBOUND)
 #define _TIF_LOAD_WATCH		(1<<TIF_LOAD_WATCH)
 #define _TIF_SYSCALL_TRACEPOINT	(1<<TIF_SYSCALL_TRACEPOINT)
+#define _TIF_GRSEC_SETXID	(1<<TIF_GRSEC_SETXID)
 
 #define _TIF_WORK_SYSCALL_ENTRY	(_TIF_NOHZ | _TIF_SYSCALL_TRACE |	\
-				 _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT)
+				 _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
 
 /* work to do in syscall_trace_leave() */
 #define _TIF_WORK_SYSCALL_EXIT	(_TIF_NOHZ | _TIF_SYSCALL_TRACE |	\
-				 _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT)
+				 _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
 
 /* work to do on interrupt/exception return */
 #define _TIF_WORK_MASK		\
@@ -146,7 +149,7 @@ static inline struct thread_info *curren
 /* work to do on any return to u-space */
 #define _TIF_ALLWORK_MASK	(_TIF_NOHZ | _TIF_WORK_MASK |		\
 				 _TIF_WORK_SYSCALL_EXIT |		\
-				 _TIF_SYSCALL_TRACEPOINT)
+				 _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
 
 /*
  * We stash processor id into a COP0 register to retrieve it fast
diff -ruNp linux-3.13.11/arch/mips/include/asm/uaccess.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/include/asm/uaccess.h
--- linux-3.13.11/arch/mips/include/asm/uaccess.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/include/asm/uaccess.h	2014-07-09
12:00:15.000000000 +0200
@@ -128,6 +128,7 @@ extern u64 __ua_limit;
 	__ok == 0;							\
 })
 
+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
 #define access_ok(type, addr, size)					\
 	likely(__access_ok((addr), (size), __access_mask))
 
diff -ruNp linux-3.13.11/arch/mips/kernel/binfmt_elfn32.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/kernel/binfmt_elfn32.c
--- linux-3.13.11/arch/mips/kernel/binfmt_elfn32.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/kernel/binfmt_elfn32.c	2014-07-09
12:00:15.000000000 +0200
@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
 #undef ELF_ET_DYN_BASE
 #define ELF_ET_DYN_BASE		(TASK32_SIZE / 3 * 2)
 
+#ifdef CONFIG_PAX_ASLR
+#define PAX_ELF_ET_DYN_BASE	(TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
+
+#define PAX_DELTA_MMAP_LEN	(TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
+#define PAX_DELTA_STACK_LEN	(TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
+#endif
+
 #include <asm/processor.h>
 #include <linux/module.h>
 #include <linux/elfcore.h>
diff -ruNp linux-3.13.11/arch/mips/kernel/binfmt_elfo32.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/kernel/binfmt_elfo32.c
--- linux-3.13.11/arch/mips/kernel/binfmt_elfo32.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/kernel/binfmt_elfo32.c	2014-07-09
12:00:15.000000000 +0200
@@ -56,6 +56,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
 #undef ELF_ET_DYN_BASE
 #define ELF_ET_DYN_BASE		(TASK32_SIZE / 3 * 2)
 
+#ifdef CONFIG_PAX_ASLR
+#define PAX_ELF_ET_DYN_BASE	(TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
+
+#define PAX_DELTA_MMAP_LEN	(TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
+#define PAX_DELTA_STACK_LEN	(TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
+#endif
+
 #include <asm/processor.h>
 
 /*
diff -ruNp linux-3.13.11/arch/mips/kernel/ftrace.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/kernel/ftrace.c
--- linux-3.13.11/arch/mips/kernel/ftrace.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/kernel/ftrace.c	2014-07-09
12:00:15.000000000 +0200
@@ -111,11 +111,10 @@ static int ftrace_modify_code_2(unsigned
 	safe_store_code(new_code1, ip, faulted);
 	if (unlikely(faulted))
 		return -EFAULT;
-	ip += 4;
-	safe_store_code(new_code2, ip, faulted);
+	safe_store_code(new_code2, ip + 4, faulted);
 	if (unlikely(faulted))
 		return -EFAULT;
-	flush_icache_range(ip, ip + 8); /* original ip + 12 */
+	flush_icache_range(ip, ip + 8);
 	return 0;
 }
 #endif
diff -ruNp linux-3.13.11/arch/mips/kernel/i8259.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/kernel/i8259.c
--- linux-3.13.11/arch/mips/kernel/i8259.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/kernel/i8259.c	2014-07-09
12:00:15.000000000 +0200
@@ -205,7 +205,7 @@ spurious_8259A_irq:
 			printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq);
 			spurious_irq_mask |= irqmask;
 		}
-		atomic_inc(&irq_err_count);
+		atomic_inc_unchecked(&irq_err_count);
 		/*
 		 * Theoretically we do not have to handle this IRQ,
 		 * but in Linux this does not cause problems and is
diff -ruNp linux-3.13.11/arch/mips/kernel/irq-gt641xx.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/kernel/irq-gt641xx.c
--- linux-3.13.11/arch/mips/kernel/irq-gt641xx.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/kernel/irq-gt641xx.c	2014-07-09
12:00:15.000000000 +0200
@@ -110,7 +110,7 @@ void gt641xx_irq_dispatch(void)
 		}
 	}
 
-	atomic_inc(&irq_err_count);
+	atomic_inc_unchecked(&irq_err_count);
 }
 
 void __init gt641xx_irq_init(void)
diff -ruNp linux-3.13.11/arch/mips/kernel/irq.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/kernel/irq.c
--- linux-3.13.11/arch/mips/kernel/irq.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/kernel/irq.c	2014-07-09
12:00:15.000000000 +0200
@@ -77,17 +77,17 @@ void ack_bad_irq(unsigned int irq)
 	printk("unexpected IRQ # %d\n", irq);
 }
 
-atomic_t irq_err_count;
+atomic_unchecked_t irq_err_count;
 
 int arch_show_interrupts(struct seq_file *p, int prec)
 {
-	seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
+	seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
 	return 0;
 }
 
 asmlinkage void spurious_interrupt(void)
 {
-	atomic_inc(&irq_err_count);
+	atomic_inc_unchecked(&irq_err_count);
 }
 
 void __init init_IRQ(void)
diff -ruNp linux-3.13.11/arch/mips/kernel/process.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/kernel/process.c
--- linux-3.13.11/arch/mips/kernel/process.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/kernel/process.c	2014-07-09
12:00:15.000000000 +0200
@@ -566,15 +566,3 @@ unsigned long get_wchan(struct task_stru
 out:
 	return pc;
 }
-
-/*
- * Don't forget that the stack pointer must be aligned on a 8 bytes
- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
- */
-unsigned long arch_align_stack(unsigned long sp)
-{
-	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
-		sp -= get_random_int() & ~PAGE_MASK;
-
-	return sp & ALMASK;
-}
diff -ruNp linux-3.13.11/arch/mips/kernel/ptrace.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/kernel/ptrace.c
--- linux-3.13.11/arch/mips/kernel/ptrace.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/kernel/ptrace.c	2014-07-09
12:00:15.000000000 +0200
@@ -30,6 +30,7 @@
 #include <linux/audit.h>
 #include <linux/seccomp.h>
 #include <linux/ftrace.h>
+#include <linux/vs_base.h>
 
 #include <asm/byteorder.h>
 #include <asm/cpu.h>
@@ -398,6 +399,9 @@ long arch_ptrace(struct task_struct *chi
 	void __user *datavp = (void __user *) data;
 	unsigned long __user *datalp = (void __user *) data;
 
+	if (!vx_check(vx_task_xid(child), VS_WATCH_P | VS_IDENT))
+		goto out;
+
 	switch (request) {
 	/* when I and D space are separate, these will need to be fixed. */
 	case PTRACE_PEEKTEXT: /* read word at location addr. */
@@ -652,6 +656,10 @@ long arch_ptrace(struct task_struct *chi
 	return ret;
 }
 
+#ifdef CONFIG_GRKERNSEC_SETXID
+extern void gr_delayed_cred_worker(void);
+#endif
+
 /*
  * Notification of system call entry/exit
  * - triggered by current->work.syscall_trace
@@ -668,6 +676,11 @@ asmlinkage void syscall_trace_enter(stru
 	    tracehook_report_syscall_entry(regs))
 		ret = -1;
 
+#ifdef CONFIG_GRKERNSEC_SETXID
+	if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
+		gr_delayed_cred_worker();
+#endif
+
 	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
 		trace_sys_enter(regs, regs->regs[2]);
 
diff -ruNp linux-3.13.11/arch/mips/kernel/reset.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/kernel/reset.c
--- linux-3.13.11/arch/mips/kernel/reset.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/kernel/reset.c	2014-07-09
12:00:15.000000000 +0200
@@ -13,6 +13,7 @@
 #include <linux/reboot.h>
 
 #include <asm/reboot.h>
+#include <asm/bug.h>
 
 /*
  * Urgs ...  Too many MIPS machines to handle this in a generic way.
@@ -29,16 +30,19 @@ void machine_restart(char *command)
 {
 	if (_machine_restart)
 		_machine_restart(command);
+	BUG();
 }
 
 void machine_halt(void)
 {
 	if (_machine_halt)
 		_machine_halt();
+	BUG();
 }
 
 void machine_power_off(void)
 {
 	if (pm_power_off)
 		pm_power_off();
+	BUG();
 }
diff -ruNp linux-3.13.11/arch/mips/kernel/scall32-o32.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/kernel/scall32-o32.S
--- linux-3.13.11/arch/mips/kernel/scall32-o32.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/kernel/scall32-o32.S	2014-07-09
12:00:15.000000000 +0200
@@ -491,7 +491,7 @@ EXPORT(sys_call_table)
 	PTR	sys_mq_timedreceive
 	PTR	sys_mq_notify			/* 4275 */
 	PTR	sys_mq_getsetattr
-	PTR	sys_ni_syscall			/* sys_vserver */
+	PTR	sys_vserver
 	PTR	sys_waitid
 	PTR	sys_ni_syscall			/* available, was setaltroot */
 	PTR	sys_add_key			/* 4280 */
diff -ruNp linux-3.13.11/arch/mips/kernel/scall64-64.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/kernel/scall64-64.S
--- linux-3.13.11/arch/mips/kernel/scall64-64.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/kernel/scall64-64.S	2014-07-09
12:00:15.000000000 +0200
@@ -352,7 +352,7 @@ EXPORT(sys_call_table)
 	PTR	sys_mq_timedreceive
 	PTR	sys_mq_notify
 	PTR	sys_mq_getsetattr		/* 5235 */
-	PTR	sys_ni_syscall			/* sys_vserver */
+	PTR	sys_vserver
 	PTR	sys_waitid
 	PTR	sys_ni_syscall			/* available, was setaltroot */
 	PTR	sys_add_key
diff -ruNp linux-3.13.11/arch/mips/kernel/scall64-n32.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/kernel/scall64-n32.S
--- linux-3.13.11/arch/mips/kernel/scall64-n32.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/kernel/scall64-n32.S	2014-07-09
12:00:15.000000000 +0200
@@ -345,7 +345,7 @@ EXPORT(sysn32_call_table)
 	PTR	compat_sys_mq_timedreceive
 	PTR	compat_sys_mq_notify
 	PTR	compat_sys_mq_getsetattr
-	PTR	sys_ni_syscall			/* 6240, sys_vserver */
+	PTR	sys32_vserver			/* 6240 */
 	PTR	compat_sys_waitid
 	PTR	sys_ni_syscall			/* available, was setaltroot */
 	PTR	sys_add_key
diff -ruNp linux-3.13.11/arch/mips/kernel/scall64-o32.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/kernel/scall64-o32.S
--- linux-3.13.11/arch/mips/kernel/scall64-o32.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/kernel/scall64-o32.S	2014-07-09
12:00:15.000000000 +0200
@@ -469,7 +469,7 @@ EXPORT(sys32_call_table)
 	PTR	compat_sys_mq_timedreceive
 	PTR	compat_sys_mq_notify		/* 4275 */
 	PTR	compat_sys_mq_getsetattr
-	PTR	sys_ni_syscall			/* sys_vserver */
+	PTR	sys32_vserver
 	PTR	compat_sys_waitid
 	PTR	sys_ni_syscall			/* available, was setaltroot */
 	PTR	sys_add_key			/* 4280 */
diff -ruNp linux-3.13.11/arch/mips/kernel/smtc-proc.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/kernel/smtc-proc.c
--- linux-3.13.11/arch/mips/kernel/smtc-proc.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/kernel/smtc-proc.c	2014-07-09
12:00:15.000000000 +0200
@@ -31,7 +31,7 @@ unsigned long selfipis[NR_CPUS];
 
 struct smtc_cpu_proc smtc_cpu_stats[NR_CPUS];
 
-atomic_t smtc_fpu_recoveries;
+atomic_unchecked_t smtc_fpu_recoveries;
 
 static int smtc_proc_show(struct seq_file *m, void *v)
 {
@@ -48,7 +48,7 @@ static int smtc_proc_show(struct seq_fil
 	for(i = 0; i < NR_CPUS; i++)
 		seq_printf(m, "%d: %ld\n", i, smtc_cpu_stats[i].selfipis);
 	seq_printf(m, "%d Recoveries of \"stolen\" FPU\n",
-		   atomic_read(&smtc_fpu_recoveries));
+		   atomic_read_unchecked(&smtc_fpu_recoveries));
 	return 0;
 }
 
@@ -73,7 +73,7 @@ void init_smtc_stats(void)
 		smtc_cpu_stats[i].selfipis = 0;
 	}
 
-	atomic_set(&smtc_fpu_recoveries, 0);
+	atomic_set_unchecked(&smtc_fpu_recoveries, 0);
 
 	proc_create("smtc", 0444, NULL, &smtc_proc_fops);
 }
diff -ruNp linux-3.13.11/arch/mips/kernel/smtc.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/kernel/smtc.c
--- linux-3.13.11/arch/mips/kernel/smtc.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/kernel/smtc.c	2014-07-09
12:00:15.000000000 +0200
@@ -1359,7 +1359,7 @@ void smtc_soft_dump(void)
 	}
 	smtc_ipi_qdump();
 	printk("%d Recoveries of \"stolen\" FPU\n",
-	       atomic_read(&smtc_fpu_recoveries));
+	       atomic_read_unchecked(&smtc_fpu_recoveries));
 }
 
 
diff -ruNp linux-3.13.11/arch/mips/kernel/sync-r4k.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/kernel/sync-r4k.c
--- linux-3.13.11/arch/mips/kernel/sync-r4k.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/kernel/sync-r4k.c	2014-07-09
12:00:15.000000000 +0200
@@ -21,8 +21,8 @@
 #include <asm/mipsregs.h>
 
 static atomic_t count_start_flag = ATOMIC_INIT(0);
-static atomic_t count_count_start = ATOMIC_INIT(0);
-static atomic_t count_count_stop = ATOMIC_INIT(0);
+static atomic_unchecked_t count_count_start = ATOMIC_INIT(0);
+static atomic_unchecked_t count_count_stop = ATOMIC_INIT(0);
 static atomic_t count_reference = ATOMIC_INIT(0);
 
 #define COUNTON 100
@@ -69,13 +69,13 @@ void synchronise_count_master(int cpu)
 
 	for (i = 0; i < NR_LOOPS; i++) {
 		/* slaves loop on '!= 2' */
-		while (atomic_read(&count_count_start) != 1)
+		while (atomic_read_unchecked(&count_count_start) != 1)
 			mb();
-		atomic_set(&count_count_stop, 0);
+		atomic_set_unchecked(&count_count_stop, 0);
 		smp_wmb();
 
 		/* this lets the slaves write their count register */
-		atomic_inc(&count_count_start);
+		atomic_inc_unchecked(&count_count_start);
 
 		/*
 		 * Everyone initialises count in the last loop:
@@ -86,11 +86,11 @@ void synchronise_count_master(int cpu)
 		/*
 		 * Wait for all slaves to leave the synchronization point:
 		 */
-		while (atomic_read(&count_count_stop) != 1)
+		while (atomic_read_unchecked(&count_count_stop) != 1)
 			mb();
-		atomic_set(&count_count_start, 0);
+		atomic_set_unchecked(&count_count_start, 0);
 		smp_wmb();
-		atomic_inc(&count_count_stop);
+		atomic_inc_unchecked(&count_count_stop);
 	}
 	/* Arrange for an interrupt in a short while */
 	write_c0_compare(read_c0_count() + COUNTON);
@@ -131,8 +131,8 @@ void synchronise_count_slave(int cpu)
 	initcount = atomic_read(&count_reference);
 
 	for (i = 0; i < NR_LOOPS; i++) {
-		atomic_inc(&count_count_start);
-		while (atomic_read(&count_count_start) != 2)
+		atomic_inc_unchecked(&count_count_start);
+		while (atomic_read_unchecked(&count_count_start) != 2)
 			mb();
 
 		/*
@@ -141,8 +141,8 @@ void synchronise_count_slave(int cpu)
 		if (i == NR_LOOPS-1)
 			write_c0_count(initcount);
 
-		atomic_inc(&count_count_stop);
-		while (atomic_read(&count_count_stop) != 2)
+		atomic_inc_unchecked(&count_count_stop);
+		while (atomic_read_unchecked(&count_count_stop) != 2)
 			mb();
 	}
 	/* Arrange for an interrupt in a short while */
diff -ruNp linux-3.13.11/arch/mips/kernel/traps.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/kernel/traps.c
--- linux-3.13.11/arch/mips/kernel/traps.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/kernel/traps.c	2014-07-09
12:00:15.000000000 +0200
@@ -334,9 +334,10 @@ void show_registers(struct pt_regs *regs
 
 	__show_regs(regs);
 	print_modules();
-	printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n",
-	       current->comm, current->pid, current_thread_info(), current,
-	      field, current_thread_info()->tp_value);
+	printk("Process %s (pid: %d:#%u, threadinfo=%p, task=%p, tls=%0*lx)\n",
+		current->comm, task_pid_nr(current), current->xid,
+		current_thread_info(), current,
+		field, current_thread_info()->tp_value);
 	if (cpu_has_userlocal) {
 		unsigned long tls;
 
@@ -690,7 +691,18 @@ asmlinkage void do_ov(struct pt_regs *re
 	siginfo_t info;
 
 	prev_state = exception_enter();
-	die_if_kernel("Integer overflow", regs);
+	if (unlikely(!user_mode(regs))) {
+
+#ifdef CONFIG_PAX_REFCOUNT
+		if (fixup_exception(regs)) {
+			pax_report_refcount_overflow(regs);
+			exception_exit(prev_state);
+			return;
+		}
+#endif
+
+		die("Integer overflow", regs);
+	}
 
 	info.si_code = FPE_INTOVF;
 	info.si_signo = SIGFPE;
diff -ruNp linux-3.13.11/arch/mips/mm/fault.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/mm/fault.c
--- linux-3.13.11/arch/mips/mm/fault.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/mm/fault.c	2014-07-09 12:00:15.000000000
+0200
@@ -28,6 +28,23 @@
 #include <asm/highmem.h>		/* For VMALLOC_END */
 #include <linux/kdebug.h>
 
+#ifdef CONFIG_PAX_PAGEEXEC
+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
+{
+	unsigned long i;
+
+	printk(KERN_ERR "PAX: bytes at PC: ");
+	for (i = 0; i < 5; i++) {
+		unsigned int c;
+		if (get_user(c, (unsigned int *)pc+i))
+			printk(KERN_CONT "???????? ");
+		else
+			printk(KERN_CONT "%08x ", c);
+	}
+	printk("\n");
+}
+#endif
+
 /*
  * This routine handles page faults.  It determines the address,
  * and the problem, and then passes it off to one of the appropriate
@@ -199,6 +216,14 @@ bad_area:
 bad_area_nosemaphore:
 	/* User mode accesses just cause a SIGSEGV */
 	if (user_mode(regs)) {
+
+#ifdef CONFIG_PAX_PAGEEXEC
+		if (cpu_has_rixi && (mm->pax_flags & MF_PAX_PAGEEXEC) && !write && address == instruction_pointer(regs))
{
+			pax_report_fault(regs, (void *)address, (void *)user_stack_pointer(regs));
+			do_group_exit(SIGKILL);
+		}
+#endif
+
 		tsk->thread.cp0_badvaddr = address;
 		tsk->thread.error_code = write;
 #if 0
diff -ruNp linux-3.13.11/arch/mips/mm/mmap.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/mm/mmap.c
--- linux-3.13.11/arch/mips/mm/mmap.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/mm/mmap.c	2014-07-09 12:00:15.000000000
+0200
@@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_a
 	struct vm_area_struct *vma;
 	unsigned long addr = addr0;
 	int do_color_align;
+	unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
 	struct vm_unmapped_area_info info;
 
 	if (unlikely(len > TASK_SIZE))
@@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_a
 		do_color_align = 1;
 
 	/* requesting a specific address */
+
+#ifdef CONFIG_PAX_RANDMMAP
+	if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
+#endif
+
 	if (addr) {
 		if (do_color_align)
 			addr = COLOUR_ALIGN(addr, pgoff);
@@ -91,14 +97,14 @@ static unsigned long arch_get_unmapped_a
 			addr = PAGE_ALIGN(addr);
 
 		vma = find_vma(mm, addr);
-		if (TASK_SIZE - len >= addr &&
-		    (!vma || addr + len <= vma->vm_start))
+		if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
 			return addr;
 	}
 
 	info.length = len;
 	info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
 	info.align_offset = pgoff << PAGE_SHIFT;
+	info.threadstack_offset = offset;
 
 	if (dir == DOWN) {
 		info.flags = VM_UNMAPPED_AREA_TOPDOWN;
@@ -146,6 +152,10 @@ void arch_pick_mmap_layout(struct mm_str
 {
 	unsigned long random_factor = 0UL;
 
+#ifdef CONFIG_PAX_RANDMMAP
+	if (!(mm->pax_flags & MF_PAX_RANDMMAP))
+#endif
+
 	if (current->flags & PF_RANDOMIZE) {
 		random_factor = get_random_int();
 		random_factor = random_factor << PAGE_SHIFT;
@@ -157,38 +167,23 @@ void arch_pick_mmap_layout(struct mm_str
 
 	if (mmap_is_legacy()) {
 		mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
+
+#ifdef CONFIG_PAX_RANDMMAP
+		if (mm->pax_flags & MF_PAX_RANDMMAP)
+			mm->mmap_base += mm->delta_mmap;
+#endif
+
 		mm->get_unmapped_area = arch_get_unmapped_area;
 	} else {
 		mm->mmap_base = mmap_base(random_factor);
-		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
-	}
-}
-
-static inline unsigned long brk_rnd(void)
-{
-	unsigned long rnd = get_random_int();
-
-	rnd = rnd << PAGE_SHIFT;
-	/* 8MB for 32bit, 256MB for 64bit */
-	if (TASK_IS_32BIT_ADDR)
-		rnd = rnd & 0x7ffffful;
-	else
-		rnd = rnd & 0xffffffful;
 
-	return rnd;
-}
-
-unsigned long arch_randomize_brk(struct mm_struct *mm)
-{
-	unsigned long base = mm->brk;
-	unsigned long ret;
+#ifdef CONFIG_PAX_RANDMMAP
+		if (mm->pax_flags & MF_PAX_RANDMMAP)
+			mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
+#endif
 
-	ret = PAGE_ALIGN(base + brk_rnd());
-
-	if (ret < mm->brk)
-		return mm->brk;
-
-	return ret;
+		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+	}
 }
 
 int __virt_addr_valid(const volatile void *kaddr)
diff -ruNp linux-3.13.11/arch/mips/pci/pci-octeon.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/pci/pci-octeon.c
--- linux-3.13.11/arch/mips/pci/pci-octeon.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/pci/pci-octeon.c	2014-07-09
12:00:15.000000000 +0200
@@ -327,8 +327,8 @@ static int octeon_write_config(struct pc
 
 
 static struct pci_ops octeon_pci_ops = {
-	octeon_read_config,
-	octeon_write_config,
+	.read = octeon_read_config,
+	.write = octeon_write_config,
 };
 
 static struct resource octeon_pci_mem_resource = {
diff -ruNp linux-3.13.11/arch/mips/pci/pcie-octeon.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/pci/pcie-octeon.c
--- linux-3.13.11/arch/mips/pci/pcie-octeon.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/pci/pcie-octeon.c	2014-07-09
12:00:15.000000000 +0200
@@ -1792,8 +1792,8 @@ static int octeon_dummy_write_config(str
 }
 
 static struct pci_ops octeon_pcie0_ops = {
-	octeon_pcie0_read_config,
-	octeon_pcie0_write_config,
+	.read = octeon_pcie0_read_config,
+	.write = octeon_pcie0_write_config,
 };
 
 static struct resource octeon_pcie0_mem_resource = {
@@ -1813,8 +1813,8 @@ static struct pci_controller octeon_pcie
 };
 
 static struct pci_ops octeon_pcie1_ops = {
-	octeon_pcie1_read_config,
-	octeon_pcie1_write_config,
+	.read = octeon_pcie1_read_config,
+	.write = octeon_pcie1_write_config,
 };
 
 static struct resource octeon_pcie1_mem_resource = {
@@ -1834,8 +1834,8 @@ static struct pci_controller octeon_pcie
 };
 
 static struct pci_ops octeon_dummy_ops = {
-	octeon_dummy_read_config,
-	octeon_dummy_write_config,
+	.read = octeon_dummy_read_config,
+	.write = octeon_dummy_write_config,
 };
 
 static struct resource octeon_dummy_mem_resource = {
diff -ruNp linux-3.13.11/arch/mips/sgi-ip27/ip27-nmi.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/sgi-ip27/ip27-nmi.c
--- linux-3.13.11/arch/mips/sgi-ip27/ip27-nmi.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/sgi-ip27/ip27-nmi.c	2014-07-09
12:00:15.000000000 +0200
@@ -187,9 +187,9 @@ void
 cont_nmi_dump(void)
 {
 #ifndef REAL_NMI_SIGNAL
-	static atomic_t nmied_cpus = ATOMIC_INIT(0);
+	static atomic_unchecked_t nmied_cpus = ATOMIC_INIT(0);
 
-	atomic_inc(&nmied_cpus);
+	atomic_inc_unchecked(&nmied_cpus);
 #endif
 	/*
 	 * Only allow 1 cpu to proceed
@@ -233,7 +233,7 @@ cont_nmi_dump(void)
 		udelay(10000);
 	}
 #else
-	while (atomic_read(&nmied_cpus) != num_online_cpus());
+	while (atomic_read_unchecked(&nmied_cpus) != num_online_cpus());
 #endif
 
 	/*
diff -ruNp linux-3.13.11/arch/mips/sni/rm200.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/sni/rm200.c
--- linux-3.13.11/arch/mips/sni/rm200.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/sni/rm200.c	2014-07-09
12:00:15.000000000 +0200
@@ -270,7 +270,7 @@ spurious_8259A_irq:
 			       "spurious RM200 8259A interrupt: IRQ%d.\n", irq);
 			spurious_irq_mask |= irqmask;
 		}
-		atomic_inc(&irq_err_count);
+		atomic_inc_unchecked(&irq_err_count);
 		/*
 		 * Theoretically we do not have to handle this IRQ,
 		 * but in Linux this does not cause problems and is
diff -ruNp linux-3.13.11/arch/mips/vr41xx/common/icu.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/vr41xx/common/icu.c
--- linux-3.13.11/arch/mips/vr41xx/common/icu.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/vr41xx/common/icu.c	2014-07-09
12:00:15.000000000 +0200
@@ -653,7 +653,7 @@ static int icu_get_irq(unsigned int irq)
 
 	printk(KERN_ERR "spurious ICU interrupt: %04x,%04x\n", pend1, pend2);
 
-	atomic_inc(&irq_err_count);
+	atomic_inc_unchecked(&irq_err_count);
 
 	return -1;
 }
diff -ruNp linux-3.13.11/arch/mips/vr41xx/common/irq.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/vr41xx/common/irq.c
--- linux-3.13.11/arch/mips/vr41xx/common/irq.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mips/vr41xx/common/irq.c	2014-07-09
12:00:15.000000000 +0200
@@ -64,7 +64,7 @@ static void irq_dispatch(unsigned int ir
 	irq_cascade_t *cascade;
 
 	if (irq >= NR_IRQS) {
-		atomic_inc(&irq_err_count);
+		atomic_inc_unchecked(&irq_err_count);
 		return;
 	}
 
@@ -84,7 +84,7 @@ static void irq_dispatch(unsigned int ir
 		ret = cascade->get_irq(irq);
 		irq = ret;
 		if (ret < 0)
-			atomic_inc(&irq_err_count);
+			atomic_inc_unchecked(&irq_err_count);
 		else
 			irq_dispatch(irq);
 		if (!irqd_irq_disabled(idata) && chip->irq_unmask)
diff -ruNp linux-3.13.11/arch/mn10300/proc-mn103e010/include/proc/cache.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mn10300/proc-mn103e010/include/proc/cache.h
--- linux-3.13.11/arch/mn10300/proc-mn103e010/include/proc/cache.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mn10300/proc-mn103e010/include/proc/cache.h	2014-07-09
12:00:15.000000000 +0200
@@ -11,12 +11,14 @@
 #ifndef _ASM_PROC_CACHE_H
 #define _ASM_PROC_CACHE_H
 
+#include <linux/const.h>
+
 /* L1 cache */
 
 #define L1_CACHE_NWAYS		4	/* number of ways in caches */
 #define L1_CACHE_NENTRIES	256	/* number of entries in each way */
-#define L1_CACHE_BYTES		16	/* bytes per entry */
 #define L1_CACHE_SHIFT		4	/* shift for bytes per entry */
+#define L1_CACHE_BYTES		(_AC(1,UL) << L1_CACHE_SHIFT)	/* bytes per entry */
 #define L1_CACHE_WAYDISP	0x1000	/* displacement of one way from the next */
 
 #define L1_CACHE_TAG_VALID	0x00000001	/* cache tag valid bit */
diff -ruNp linux-3.13.11/arch/mn10300/proc-mn2ws0050/include/proc/cache.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
--- linux-3.13.11/arch/mn10300/proc-mn2ws0050/include/proc/cache.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/mn10300/proc-mn2ws0050/include/proc/cache.h	2014-07-09
12:00:15.000000000 +0200
@@ -16,13 +16,15 @@
 #ifndef _ASM_PROC_CACHE_H
 #define _ASM_PROC_CACHE_H
 
+#include <linux/const.h>
+
 /*
  * L1 cache
  */
 #define L1_CACHE_NWAYS		4		/* number of ways in caches */
 #define L1_CACHE_NENTRIES	128		/* number of entries in each way */
-#define L1_CACHE_BYTES		32		/* bytes per entry */
 #define L1_CACHE_SHIFT		5		/* shift for bytes per entry */
+#define L1_CACHE_BYTES		(_AC(1,UL) << L1_CACHE_SHIFT)	/* bytes per entry */
 #define L1_CACHE_WAYDISP	0x1000		/* distance from one way to the next */
 
 #define L1_CACHE_TAG_VALID	0x00000001	/* cache tag valid bit */
diff -ruNp linux-3.13.11/arch/openrisc/include/asm/cache.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/openrisc/include/asm/cache.h
--- linux-3.13.11/arch/openrisc/include/asm/cache.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/openrisc/include/asm/cache.h	2014-07-09
12:00:15.000000000 +0200
@@ -19,11 +19,13 @@
 #ifndef __ASM_OPENRISC_CACHE_H
 #define __ASM_OPENRISC_CACHE_H
 
+#include <linux/const.h>
+
 /* FIXME: How can we replace these with values from the CPU...
  * they shouldn't be hard-coded!
  */
 
-#define L1_CACHE_BYTES 16
 #define L1_CACHE_SHIFT 4
+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
 
 #endif /* __ASM_OPENRISC_CACHE_H */
diff -ruNp linux-3.13.11/arch/parisc/Kconfig linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/parisc/Kconfig
--- linux-3.13.11/arch/parisc/Kconfig	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/parisc/Kconfig	2014-07-09 12:00:15.000000000
+0200
@@ -320,6 +320,8 @@ source "fs/Kconfig"
 
 source "arch/parisc/Kconfig.debug"
 
+source "kernel/vserver/Kconfig"
+
 source "security/Kconfig"
 
 source "crypto/Kconfig"
diff -ruNp linux-3.13.11/arch/parisc/include/asm/atomic.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/parisc/include/asm/atomic.h
--- linux-3.13.11/arch/parisc/include/asm/atomic.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/parisc/include/asm/atomic.h	2014-07-09
12:00:15.000000000 +0200
@@ -252,6 +252,16 @@ static inline long atomic64_dec_if_posit
 	return dec;
 }
 
+#define atomic64_read_unchecked(v)		atomic64_read(v)
+#define atomic64_set_unchecked(v, i)		atomic64_set((v), (i))
+#define atomic64_add_unchecked(a, v)		atomic64_add((a), (v))
+#define atomic64_add_return_unchecked(a, v)	atomic64_add_return((a), (v))
+#define atomic64_sub_unchecked(a, v)		atomic64_sub((a), (v))
+#define atomic64_inc_unchecked(v)		atomic64_inc(v)
+#define atomic64_inc_return_unchecked(v)	atomic64_inc_return(v)
+#define atomic64_dec_unchecked(v)		atomic64_dec(v)
+#define atomic64_cmpxchg_unchecked(v, o, n)	atomic64_cmpxchg((v), (o), (n))
+
 #endif /* !CONFIG_64BIT */
 
 
diff -ruNp linux-3.13.11/arch/parisc/include/asm/cache.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/parisc/include/asm/cache.h
--- linux-3.13.11/arch/parisc/include/asm/cache.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/parisc/include/asm/cache.h	2014-07-09
12:00:15.000000000 +0200
@@ -5,6 +5,7 @@
 #ifndef __ARCH_PARISC_CACHE_H
 #define __ARCH_PARISC_CACHE_H
 
+#include <linux/const.h>
 
 /*
  * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
@@ -15,13 +16,13 @@
  * just ruin performance.
  */
 #ifdef CONFIG_PA20
-#define L1_CACHE_BYTES 64
 #define L1_CACHE_SHIFT 6
 #else
-#define L1_CACHE_BYTES 32
 #define L1_CACHE_SHIFT 5
 #endif
 
+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
 #ifndef __ASSEMBLY__
 
 #define SMP_CACHE_BYTES L1_CACHE_BYTES
diff -ruNp linux-3.13.11/arch/parisc/include/asm/elf.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/parisc/include/asm/elf.h
--- linux-3.13.11/arch/parisc/include/asm/elf.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/parisc/include/asm/elf.h	2014-07-09
12:00:15.000000000 +0200
@@ -342,6 +342,13 @@ struct pt_regs;	/* forward declaration..
 
 #define ELF_ET_DYN_BASE         (TASK_UNMAPPED_BASE + 0x01000000)
 
+#ifdef CONFIG_PAX_ASLR
+#define PAX_ELF_ET_DYN_BASE	0x10000UL
+
+#define PAX_DELTA_MMAP_LEN	16
+#define PAX_DELTA_STACK_LEN	16
+#endif
+
 /* This yields a mask that user programs can use to figure out what
    instruction set this CPU supports.  This could be done in user space,
    but it's not easy, and we've already done it here.  */
diff -ruNp linux-3.13.11/arch/parisc/include/asm/pgalloc.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/parisc/include/asm/pgalloc.h
--- linux-3.13.11/arch/parisc/include/asm/pgalloc.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/parisc/include/asm/pgalloc.h	2014-07-09
12:00:15.000000000 +0200
@@ -61,6 +61,11 @@ static inline void pgd_populate(struct m
 		        (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
 }
 
+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
+{
+	pgd_populate(mm, pgd, pmd);
+}
+
 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
 {
 	pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
@@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_st
 #define pmd_alloc_one(mm, addr)		({ BUG(); ((pmd_t *)2); })
 #define pmd_free(mm, x)			do { } while (0)
 #define pgd_populate(mm, pmd, pte)	BUG()
+#define pgd_populate_kernel(mm, pmd, pte)	BUG()
 
 #endif
 
diff -ruNp linux-3.13.11/arch/parisc/include/asm/pgtable.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/parisc/include/asm/pgtable.h
--- linux-3.13.11/arch/parisc/include/asm/pgtable.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/parisc/include/asm/pgtable.h	2014-07-09
12:00:15.000000000 +0200
@@ -223,6 +223,17 @@ extern void purge_tlb_entries(struct mm_
 #define PAGE_EXECREAD   __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC
|_PAGE_ACCESSED)
 #define PAGE_COPY       PAGE_EXECREAD
 #define PAGE_RWX        __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE
| _PAGE_EXEC |_PAGE_ACCESSED)
+
+#ifdef CONFIG_PAX_PAGEEXEC
+# define PAGE_SHARED_NOEXEC	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE
| _PAGE_ACCESSED)
+# define PAGE_COPY_NOEXEC	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
+# define PAGE_READONLY_NOEXEC	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
+#else
+# define PAGE_SHARED_NOEXEC	PAGE_SHARED
+# define PAGE_COPY_NOEXEC	PAGE_COPY
+# define PAGE_READONLY_NOEXEC	PAGE_READONLY
+#endif
+
 #define PAGE_KERNEL	__pgprot(_PAGE_KERNEL)
 #define PAGE_KERNEL_EXEC	__pgprot(_PAGE_KERNEL_EXEC)
 #define PAGE_KERNEL_RWX	__pgprot(_PAGE_KERNEL_RWX)
diff -ruNp linux-3.13.11/arch/parisc/include/asm/uaccess.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/parisc/include/asm/uaccess.h
--- linux-3.13.11/arch/parisc/include/asm/uaccess.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/parisc/include/asm/uaccess.h	2014-07-09
12:00:15.000000000 +0200
@@ -246,10 +246,10 @@ static inline unsigned long __must_check
                                           const void __user *from,
                                           unsigned long n)
 {
-        int sz = __compiletime_object_size(to);
+        size_t sz = __compiletime_object_size(to);
         int ret = -EFAULT;
 
-        if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
+        if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
                 ret = __copy_from_user(to, from, n);
         else
                 copy_from_user_overflow();
diff -ruNp linux-3.13.11/arch/parisc/kernel/module.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/parisc/kernel/module.c
--- linux-3.13.11/arch/parisc/kernel/module.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/parisc/kernel/module.c	2014-07-09
12:00:15.000000000 +0200
@@ -98,16 +98,38 @@
 
 /* three functions to determine where in the module core
  * or init pieces the location is */
+static inline int in_init_rx(struct module *me, void *loc)
+{
+	return (loc >= me->module_init_rx &&
+		loc < (me->module_init_rx + me->init_size_rx));
+}
+
+static inline int in_init_rw(struct module *me, void *loc)
+{
+	return (loc >= me->module_init_rw &&
+		loc < (me->module_init_rw + me->init_size_rw));
+}
+
 static inline int in_init(struct module *me, void *loc)
 {
-	return (loc >= me->module_init &&
-		loc <= (me->module_init + me->init_size));
+	return in_init_rx(me, loc) || in_init_rw(me, loc);
+}
+
+static inline int in_core_rx(struct module *me, void *loc)
+{
+	return (loc >= me->module_core_rx &&
+		loc < (me->module_core_rx + me->core_size_rx));
+}
+
+static inline int in_core_rw(struct module *me, void *loc)
+{
+	return (loc >= me->module_core_rw &&
+		loc < (me->module_core_rw + me->core_size_rw));
 }
 
 static inline int in_core(struct module *me, void *loc)
 {
-	return (loc >= me->module_core &&
-		loc <= (me->module_core + me->core_size));
+	return in_core_rx(me, loc) || in_core_rw(me, loc);
 }
 
 static inline int in_local(struct module *me, void *loc)
@@ -371,13 +393,13 @@ int module_frob_arch_sections(CONST Elf_
 	}
 
 	/* align things a bit */
-	me->core_size = ALIGN(me->core_size, 16);
-	me->arch.got_offset = me->core_size;
-	me->core_size += gots * sizeof(struct got_entry);
-
-	me->core_size = ALIGN(me->core_size, 16);
-	me->arch.fdesc_offset = me->core_size;
-	me->core_size += fdescs * sizeof(Elf_Fdesc);
+	me->core_size_rw = ALIGN(me->core_size_rw, 16);
+	me->arch.got_offset = me->core_size_rw;
+	me->core_size_rw += gots * sizeof(struct got_entry);
+
+	me->core_size_rw = ALIGN(me->core_size_rw, 16);
+	me->arch.fdesc_offset = me->core_size_rw;
+	me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
 
 	me->arch.got_max = gots;
 	me->arch.fdesc_max = fdescs;
@@ -395,7 +417,7 @@ static Elf64_Word get_got(struct module
 
 	BUG_ON(value == 0);
 
-	got = me->module_core + me->arch.got_offset;
+	got = me->module_core_rw + me->arch.got_offset;
 	for (i = 0; got[i].addr; i++)
 		if (got[i].addr == value)
 			goto out;
@@ -413,7 +435,7 @@ static Elf64_Word get_got(struct module
 #ifdef CONFIG_64BIT
 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
 {
-	Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
+	Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
 
 	if (!value) {
 		printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
@@ -431,7 +453,7 @@ static Elf_Addr get_fdesc(struct module
 
 	/* Create new one */
 	fdesc->addr = value;
-	fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
+	fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
 	return (Elf_Addr)fdesc;
 }
 #endif /* CONFIG_64BIT */
@@ -843,7 +865,7 @@ register_unwind_table(struct module *me,
 
 	table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
 	end = table + sechdrs[me->arch.unwind_section].sh_size;
-	gp = (Elf_Addr)me->module_core + me->arch.got_offset;
+	gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
 
 	DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
 	       me->arch.unwind_section, table, end, gp);
diff -ruNp linux-3.13.11/arch/parisc/kernel/sys_parisc.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/parisc/kernel/sys_parisc.c
--- linux-3.13.11/arch/parisc/kernel/sys_parisc.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/parisc/kernel/sys_parisc.c	2014-07-09
12:00:15.000000000 +0200
@@ -33,9 +33,11 @@
 #include <linux/utsname.h>
 #include <linux/personality.h>
 
-static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
+static unsigned long get_unshared_area(unsigned long addr, unsigned long len,
+					unsigned long flags)
 {
 	struct vm_unmapped_area_info info;
+	unsigned long offset = gr_rand_threadstack_offset(current->mm, NULL, flags);
 
 	info.flags = 0;
 	info.length = len;
@@ -43,6 +45,7 @@ static unsigned long get_unshared_area(u
 	info.high_limit = TASK_SIZE;
 	info.align_mask = 0;
 	info.align_offset = 0;
+	info.threadstack_offset = offset;
 	return vm_unmapped_area(&info);
 }
 
@@ -69,9 +72,10 @@ static unsigned long shared_align_offset
 }
 
 static unsigned long get_shared_area(struct file *filp, unsigned long addr,
-		unsigned long len, unsigned long pgoff)
+		unsigned long len, unsigned long pgoff, unsigned long flags)
 {
 	struct vm_unmapped_area_info info;
+	unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
 
 	info.flags = 0;
 	info.length = len;
@@ -79,6 +83,7 @@ static unsigned long get_shared_area(str
 	info.high_limit = TASK_SIZE;
 	info.align_mask = PAGE_MASK & (SHMLBA - 1);
 	info.align_offset = shared_align_offset(filp, pgoff);
+	info.threadstack_offset = offset;
 	return vm_unmapped_area(&info);
 }
 
@@ -93,13 +98,20 @@ unsigned long arch_get_unmapped_area(str
 			return -EINVAL;
 		return addr;
 	}
-	if (!addr)
+	if (!addr) {
 		addr = TASK_UNMAPPED_BASE;
 
+#ifdef CONFIG_PAX_RANDMMAP
+		if (current->mm->pax_flags & MF_PAX_RANDMMAP)
+			addr += current->mm->delta_mmap;
+#endif
+
+	}
+
 	if (filp || (flags & MAP_SHARED))
-		addr = get_shared_area(filp, addr, len, pgoff);
+		addr = get_shared_area(filp, addr, len, pgoff, flags);
 	else
-		addr = get_unshared_area(addr, len);
+		addr = get_unshared_area(addr, len, flags);
 
 	return addr;
 }
diff -ruNp linux-3.13.11/arch/parisc/kernel/syscall_table.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/parisc/kernel/syscall_table.S
--- linux-3.13.11/arch/parisc/kernel/syscall_table.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/parisc/kernel/syscall_table.S	2014-07-09
12:00:15.000000000 +0200
@@ -358,7 +358,7 @@
 	ENTRY_COMP(mbind)		/* 260 */
 	ENTRY_COMP(get_mempolicy)
 	ENTRY_COMP(set_mempolicy)
-	ENTRY_SAME(ni_syscall)	/* 263: reserved for vserver */
+	ENTRY_DIFF(vserver)
 	ENTRY_SAME(add_key)
 	ENTRY_SAME(request_key)		/* 265 */
 	ENTRY_SAME(keyctl)
diff -ruNp linux-3.13.11/arch/parisc/kernel/traps.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/parisc/kernel/traps.c
--- linux-3.13.11/arch/parisc/kernel/traps.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/parisc/kernel/traps.c	2014-07-09
12:00:15.000000000 +0200
@@ -229,8 +229,9 @@ void die_if_kernel(char *str, struct pt_
 		if (err == 0)
 			return; /* STFU */
 
-		printk(KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n",
-			current->comm, task_pid_nr(current), str, err, regs->iaoq[0]);
+		printk(KERN_CRIT "%s (pid %d:#%u): %s (code %ld) at " RFMT "\n",
+			current->comm, task_pid_nr(current), current->xid,
+			str, err, regs->iaoq[0]);
 #ifdef PRINT_USER_FAULTS
 		/* XXX for debugging only */
 		show_regs(regs);
@@ -263,8 +264,8 @@ void die_if_kernel(char *str, struct pt_
 		pdc_console_restart();
 	
 	if (err)
-		printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n",
-			current->comm, task_pid_nr(current), str, err);
+		printk(KERN_CRIT "%s (pid %d:#%u): %s (code %ld)\n",
+			current->comm, task_pid_nr(current), current->xid, str, err);
 
 	/* Wot's wrong wif bein' racy? */
 	if (current->thread.flags & PARISC_KERNEL_DEATH) {
@@ -722,9 +723,7 @@ void notrace handle_interruption(int cod
 
 			down_read(&current->mm->mmap_sem);
 			vma = find_vma(current->mm,regs->iaoq[0]);
-			if (vma && (regs->iaoq[0] >= vma->vm_start)
-				&& (vma->vm_flags & VM_EXEC)) {
-
+			if (vma && (regs->iaoq[0] >= vma->vm_start)) {
 				fault_address = regs->iaoq[0];
 				fault_space = regs->iasq[0];
 
diff -ruNp linux-3.13.11/arch/parisc/mm/fault.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/parisc/mm/fault.c
--- linux-3.13.11/arch/parisc/mm/fault.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/parisc/mm/fault.c	2014-07-09
12:00:15.000000000 +0200
@@ -15,6 +15,7 @@
 #include <linux/sched.h>
 #include <linux/interrupt.h>
 #include <linux/module.h>
+#include <linux/unistd.h>
 
 #include <asm/uaccess.h>
 #include <asm/traps.h>
@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
 static unsigned long
 parisc_acctyp(unsigned long code, unsigned int inst)
 {
-	if (code == 6 || code == 16)
+	if (code == 6 || code == 7 || code == 16)
 	    return VM_EXEC;
 
 	switch (inst & 0xf0000000) {
@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
 			}
 #endif
 
+#ifdef CONFIG_PAX_PAGEEXEC
+/*
+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
+ *
+ * returns 1 when task should be killed
+ *         2 when rt_sigreturn trampoline was detected
+ *         3 when unpatched PLT trampoline was detected
+ */
+static int pax_handle_fetch_fault(struct pt_regs *regs)
+{
+
+#ifdef CONFIG_PAX_EMUPLT
+	int err;
+
+	do { /* PaX: unpatched PLT emulation */
+		unsigned int bl, depwi;
+
+		err = get_user(bl, (unsigned int *)instruction_pointer(regs));
+		err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
+
+		if (err)
+			break;
+
+		if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
+			unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
+
+			err = get_user(ldw, (unsigned int *)addr);
+			err |= get_user(bv, (unsigned int *)(addr+4));
+			err |= get_user(ldw2, (unsigned int *)(addr+8));
+
+			if (err)
+				break;
+
+			if (ldw == 0x0E801096U &&
+			    bv == 0xEAC0C000U &&
+			    ldw2 == 0x0E881095U)
+			{
+				unsigned int resolver, map;
+
+				err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
+				err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
+				if (err)
+					break;
+
+				regs->gr[20] = instruction_pointer(regs)+8;
+				regs->gr[21] = map;
+				regs->gr[22] = resolver;
+				regs->iaoq[0] = resolver | 3UL;
+				regs->iaoq[1] = regs->iaoq[0] + 4;
+				return 3;
+			}
+		}
+	} while (0);
+#endif
+
+#ifdef CONFIG_PAX_EMUTRAMP
+
+#ifndef CONFIG_PAX_EMUSIGRT
+	if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
+		return 1;
+#endif
+
+	do { /* PaX: rt_sigreturn emulation */
+		unsigned int ldi1, ldi2, bel, nop;
+
+		err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
+		err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
+		err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
+		err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
+
+		if (err)
+			break;
+
+		if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
+		    ldi2 == 0x3414015AU &&
+		    bel == 0xE4008200U &&
+		    nop == 0x08000240U)
+		{
+			regs->gr[25] = (ldi1 & 2) >> 1;
+			regs->gr[20] = __NR_rt_sigreturn;
+			regs->gr[31] = regs->iaoq[1] + 16;
+			regs->sr[0] = regs->iasq[1];
+			regs->iaoq[0] = 0x100UL;
+			regs->iaoq[1] = regs->iaoq[0] + 4;
+			regs->iasq[0] = regs->sr[2];
+			regs->iasq[1] = regs->sr[2];
+			return 2;
+		}
+	} while (0);
+#endif
+
+	return 1;
+}
+
+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
+{
+	unsigned long i;
+
+	printk(KERN_ERR "PAX: bytes at PC: ");
+	for (i = 0; i < 5; i++) {
+		unsigned int c;
+		if (get_user(c, (unsigned int *)pc+i))
+			printk(KERN_CONT "???????? ");
+		else
+			printk(KERN_CONT "%08x ", c);
+	}
+	printk("\n");
+}
+#endif
+
 int fixup_exception(struct pt_regs *regs)
 {
 	const struct exception_table_entry *fix;
@@ -210,8 +321,33 @@ retry:
 
 good_area:
 
-	if ((vma->vm_flags & acc_type) != acc_type)
+	if ((vma->vm_flags & acc_type) != acc_type) {
+
+#ifdef CONFIG_PAX_PAGEEXEC
+		if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
+		    (address & ~3UL) == instruction_pointer(regs))
+		{
+			up_read(&mm->mmap_sem);
+			switch (pax_handle_fetch_fault(regs)) {
+
+#ifdef CONFIG_PAX_EMUPLT
+			case 3:
+				return;
+#endif
+
+#ifdef CONFIG_PAX_EMUTRAMP
+			case 2:
+				return;
+#endif
+
+			}
+			pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
+			do_group_exit(SIGKILL);
+		}
+#endif
+
 		goto bad_area;
+	}
 
 	/*
 	 * If for any reason at all we couldn't handle the fault, make
@@ -272,8 +408,9 @@ bad_area:
 
 #ifdef PRINT_USER_FAULTS
 		printk(KERN_DEBUG "\n");
-		printk(KERN_DEBUG "do_page_fault() pid=%d command='%s' type=%lu address=0x%08lx\n",
-		    task_pid_nr(tsk), tsk->comm, code, address);
+		printk(KERN_DEBUG "do_page_fault() pid=%d:#%u "
+		    "command='%s' type=%lu address=0x%08lx\n",
+		    task_pid_nr(tsk), tsk->xid, tsk->comm, code, address);
 		if (vma) {
 			printk(KERN_DEBUG "vm_start = 0x%08lx, vm_end = 0x%08lx\n",
 					vma->vm_start, vma->vm_end);
diff -ruNp linux-3.13.11/arch/powerpc/Kconfig linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/Kconfig
--- linux-3.13.11/arch/powerpc/Kconfig	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/Kconfig	2014-07-09 12:00:15.000000000
+0200
@@ -382,6 +382,7 @@ config ARCH_ENABLE_MEMORY_HOTREMOVE
 config KEXEC
 	bool "kexec system call"
 	depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP))
+	depends on !GRKERNSEC_KMEM
 	help
 	  kexec is a system call that implements the ability to shutdown your
 	  current kernel, and to start another kernel.  It is like a reboot
@@ -1028,6 +1029,8 @@ source "lib/Kconfig"
 
 source "arch/powerpc/Kconfig.debug"
 
+source "kernel/vserver/Kconfig"
+
 source "security/Kconfig"
 
 config KEYS_COMPAT
diff -ruNp linux-3.13.11/arch/powerpc/include/asm/atomic.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/include/asm/atomic.h
--- linux-3.13.11/arch/powerpc/include/asm/atomic.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/include/asm/atomic.h	2014-07-09
12:00:15.000000000 +0200
@@ -523,6 +523,16 @@ static __inline__ long atomic64_inc_not_
 	return t1;
 }
 
+#define atomic64_read_unchecked(v)		atomic64_read(v)
+#define atomic64_set_unchecked(v, i)		atomic64_set((v), (i))
+#define atomic64_add_unchecked(a, v)		atomic64_add((a), (v))
+#define atomic64_add_return_unchecked(a, v)	atomic64_add_return((a), (v))
+#define atomic64_sub_unchecked(a, v)		atomic64_sub((a), (v))
+#define atomic64_inc_unchecked(v)		atomic64_inc(v)
+#define atomic64_inc_return_unchecked(v)	atomic64_inc_return(v)
+#define atomic64_dec_unchecked(v)		atomic64_dec(v)
+#define atomic64_cmpxchg_unchecked(v, o, n)	atomic64_cmpxchg((v), (o), (n))
+
 #endif /* __powerpc64__ */
 
 #endif /* __KERNEL__ */
diff -ruNp linux-3.13.11/arch/powerpc/include/asm/cache.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/include/asm/cache.h
--- linux-3.13.11/arch/powerpc/include/asm/cache.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/include/asm/cache.h	2014-07-09
12:00:15.000000000 +0200
@@ -3,6 +3,7 @@
 
 #ifdef __KERNEL__
 
+#include <linux/const.h>
 
 /* bytes per L1 cache line */
 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
@@ -22,7 +23,7 @@
 #define L1_CACHE_SHIFT		7
 #endif
 
-#define	L1_CACHE_BYTES		(1 << L1_CACHE_SHIFT)
+#define	L1_CACHE_BYTES		(_AC(1,UL) << L1_CACHE_SHIFT)
 
 #define	SMP_CACHE_BYTES		L1_CACHE_BYTES
 
diff -ruNp linux-3.13.11/arch/powerpc/include/asm/elf.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/include/asm/elf.h
--- linux-3.13.11/arch/powerpc/include/asm/elf.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/include/asm/elf.h	2014-07-09
12:00:15.000000000 +0200
@@ -28,8 +28,19 @@
    the loader.  We need to make sure that it is out of the way of the program
    that it will "exec", and that there is sufficient room for the brk.  */
 
-extern unsigned long randomize_et_dyn(unsigned long base);
-#define ELF_ET_DYN_BASE		(randomize_et_dyn(0x20000000))
+#define ELF_ET_DYN_BASE		(0x20000000)
+
+#ifdef CONFIG_PAX_ASLR
+#define PAX_ELF_ET_DYN_BASE	(0x10000000UL)
+
+#ifdef __powerpc64__
+#define PAX_DELTA_MMAP_LEN	(is_32bit_task() ? 16 : 28)
+#define PAX_DELTA_STACK_LEN	(is_32bit_task() ? 16 : 28)
+#else
+#define PAX_DELTA_MMAP_LEN	15
+#define PAX_DELTA_STACK_LEN	15
+#endif
+#endif
 
 #define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0)
 
@@ -127,10 +138,6 @@ extern int arch_setup_additional_pages(s
 	(0x7ff >> (PAGE_SHIFT - 12)) : \
 	(0x3ffff >> (PAGE_SHIFT - 12)))
 
-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
-#define arch_randomize_brk arch_randomize_brk
-
-
 #ifdef CONFIG_SPU_BASE
 /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
 #define NT_SPU		1
diff -ruNp linux-3.13.11/arch/powerpc/include/asm/exec.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/include/asm/exec.h
--- linux-3.13.11/arch/powerpc/include/asm/exec.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/include/asm/exec.h	2014-07-09
12:00:15.000000000 +0200
@@ -4,6 +4,6 @@
 #ifndef _ASM_POWERPC_EXEC_H
 #define _ASM_POWERPC_EXEC_H
 
-extern unsigned long arch_align_stack(unsigned long sp);
+#define arch_align_stack(x) ((x) & ~0xfUL)
 
 #endif /* _ASM_POWERPC_EXEC_H */
diff -ruNp linux-3.13.11/arch/powerpc/include/asm/kmap_types.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/include/asm/kmap_types.h
--- linux-3.13.11/arch/powerpc/include/asm/kmap_types.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/include/asm/kmap_types.h	2014-07-09
12:00:15.000000000 +0200
@@ -10,7 +10,7 @@
  * 2 of the License, or (at your option) any later version.
  */
 
-#define KM_TYPE_NR 16
+#define KM_TYPE_NR 17
 
 #endif	/* __KERNEL__ */
 #endif	/* _ASM_POWERPC_KMAP_TYPES_H */
diff -ruNp linux-3.13.11/arch/powerpc/include/asm/local.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/include/asm/local.h
--- linux-3.13.11/arch/powerpc/include/asm/local.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/include/asm/local.h	2014-07-09
12:00:15.000000000 +0200
@@ -9,15 +9,26 @@ typedef struct
 	atomic_long_t a;
 } local_t;
 
+typedef struct
+{
+	atomic_long_unchecked_t a;
+} local_unchecked_t;
+
 #define LOCAL_INIT(i)	{ ATOMIC_LONG_INIT(i) }
 
 #define local_read(l)	atomic_long_read(&(l)->a)
+#define local_read_unchecked(l)	atomic_long_read_unchecked(&(l)->a)
 #define local_set(l,i)	atomic_long_set(&(l)->a, (i))
+#define local_set_unchecked(l,i)	atomic_long_set_unchecked(&(l)->a, (i))
 
 #define local_add(i,l)	atomic_long_add((i),(&(l)->a))
+#define local_add_unchecked(i,l)	atomic_long_add_unchecked((i),(&(l)->a))
 #define local_sub(i,l)	atomic_long_sub((i),(&(l)->a))
+#define local_sub_unchecked(i,l)	atomic_long_sub_unchecked((i),(&(l)->a))
 #define local_inc(l)	atomic_long_inc(&(l)->a)
+#define local_inc_unchecked(l)	atomic_long_inc_unchecked(&(l)->a)
 #define local_dec(l)	atomic_long_dec(&(l)->a)
+#define local_dec_unchecked(l)	atomic_long_dec_unchecked(&(l)->a)
 
 static __inline__ long local_add_return(long a, local_t *l)
 {
@@ -35,6 +46,7 @@ static __inline__ long local_add_return(
 
 	return t;
 }
+#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
 
 #define local_add_negative(a, l)	(local_add_return((a), (l)) < 0)
 
@@ -54,6 +66,7 @@ static __inline__ long local_sub_return(
 
 	return t;
 }
+#define local_sub_return_unchecked(i, l) atomic_long_sub_return_unchecked((i), (&(l)->a))
 
 static __inline__ long local_inc_return(local_t *l)
 {
@@ -101,6 +114,8 @@ static __inline__ long local_dec_return(
 
 #define local_cmpxchg(l, o, n) \
 	(cmpxchg_local(&((l)->a.counter), (o), (n)))
+#define local_cmpxchg_unchecked(l, o, n) \
+	(cmpxchg_local(&((l)->a.counter), (o), (n)))
 #define local_xchg(l, n) (xchg_local(&((l)->a.counter), (n)))
 
 /**
diff -ruNp linux-3.13.11/arch/powerpc/include/asm/mman.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/include/asm/mman.h
--- linux-3.13.11/arch/powerpc/include/asm/mman.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/include/asm/mman.h	2014-07-09
12:00:15.000000000 +0200
@@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm
 }
 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
 
-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
 {
 	return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
 }
diff -ruNp linux-3.13.11/arch/powerpc/include/asm/page.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/include/asm/page.h
--- linux-3.13.11/arch/powerpc/include/asm/page.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/include/asm/page.h	2014-07-09
12:00:15.000000000 +0200
@@ -230,8 +230,9 @@ extern long long virt_phys_offset;
  * and needs to be executable.  This means the whole heap ends
  * up being executable.
  */
-#define VM_DATA_DEFAULT_FLAGS32	(VM_READ | VM_WRITE | VM_EXEC | \
-				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+#define VM_DATA_DEFAULT_FLAGS32 \
+	(((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
+	 VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
 
 #define VM_DATA_DEFAULT_FLAGS64	(VM_READ | VM_WRITE | \
 				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
@@ -259,6 +260,9 @@ extern long long virt_phys_offset;
 #define is_kernel_addr(x)	((x) >= PAGE_OFFSET)
 #endif
 
+#define ktla_ktva(addr)		(addr)
+#define ktva_ktla(addr)		(addr)
+
 #ifndef CONFIG_PPC_BOOK3S_64
 /*
  * Use the top bit of the higher-level page table entries to indicate whether
diff -ruNp linux-3.13.11/arch/powerpc/include/asm/page_64.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/include/asm/page_64.h
--- linux-3.13.11/arch/powerpc/include/asm/page_64.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/include/asm/page_64.h	2014-07-09
12:00:15.000000000 +0200
@@ -153,15 +153,18 @@ do {						\
  * stack by default, so in the absence of a PT_GNU_STACK program header
  * we turn execute permission off.
  */
-#define VM_STACK_DEFAULT_FLAGS32	(VM_READ | VM_WRITE | VM_EXEC | \
-					 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+#define VM_STACK_DEFAULT_FLAGS32 \
+	(((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
+	 VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
 
 #define VM_STACK_DEFAULT_FLAGS64	(VM_READ | VM_WRITE | \
 					 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
 
+#ifndef CONFIG_PAX_PAGEEXEC
 #define VM_STACK_DEFAULT_FLAGS \
 	(is_32bit_task() ? \
 	 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
+#endif
 
 #include <asm-generic/getorder.h>
 
diff -ruNp linux-3.13.11/arch/powerpc/include/asm/pgalloc-64.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/include/asm/pgalloc-64.h
--- linux-3.13.11/arch/powerpc/include/asm/pgalloc-64.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/include/asm/pgalloc-64.h	2014-07-09
12:00:15.000000000 +0200
@@ -54,6 +54,7 @@ static inline void pgd_free(struct mm_st
 #ifndef CONFIG_PPC_64K_PAGES
 
 #define pgd_populate(MM, PGD, PUD)	pgd_set(PGD, PUD)
+#define pgd_populate_kernel(MM, PGD, PUD)	pgd_populate((MM), (PGD), (PUD))
 
 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
@@ -71,6 +72,11 @@ static inline void pud_populate(struct m
 	pud_set(pud, (unsigned long)pmd);
 }
 
+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+{
+	pud_populate(mm, pud, pmd);
+}
+
 #define pmd_populate(mm, pmd, pte_page) \
 	pmd_populate_kernel(mm, pmd, page_address(pte_page))
 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
@@ -173,6 +179,7 @@ extern void __tlb_remove_table(void *_ta
 #endif
 
 #define pud_populate(mm, pud, pmd)	pud_set(pud, (unsigned long)pmd)
+#define pud_populate_kernel(mm, pud, pmd)	pud_populate((mm), (pud), (pmd))
 
 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
 				       pte_t *pte)
diff -ruNp linux-3.13.11/arch/powerpc/include/asm/pgtable.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/include/asm/pgtable.h
--- linux-3.13.11/arch/powerpc/include/asm/pgtable.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/include/asm/pgtable.h	2014-07-09
12:00:15.000000000 +0200
@@ -2,6 +2,7 @@
 #define _ASM_POWERPC_PGTABLE_H
 #ifdef __KERNEL__
 
+#include <linux/const.h>
 #ifndef __ASSEMBLY__
 #include <asm/processor.h>		/* For TASK_SIZE */
 #include <asm/mmu.h>
diff -ruNp linux-3.13.11/arch/powerpc/include/asm/pte-hash32.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/include/asm/pte-hash32.h
--- linux-3.13.11/arch/powerpc/include/asm/pte-hash32.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/include/asm/pte-hash32.h	2014-07-09
12:00:15.000000000 +0200
@@ -21,6 +21,7 @@
 #define _PAGE_FILE	0x004	/* when !present: nonlinear file mapping */
 #define _PAGE_USER	0x004	/* usermode access allowed */
 #define _PAGE_GUARDED	0x008	/* G: prohibit speculative access */
+#define _PAGE_EXEC	_PAGE_GUARDED
 #define _PAGE_COHERENT	0x010	/* M: enforce memory coherence (SMP systems) */
 #define _PAGE_NO_CACHE	0x020	/* I: cache inhibit */
 #define _PAGE_WRITETHRU	0x040	/* W: cache write-through */
diff -ruNp linux-3.13.11/arch/powerpc/include/asm/reg.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/include/asm/reg.h
--- linux-3.13.11/arch/powerpc/include/asm/reg.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/include/asm/reg.h	2014-07-09
12:00:15.000000000 +0200
@@ -239,6 +239,7 @@
 #define SPRN_DBCR	0x136	/* e300 Data Breakpoint Control Reg */
 #define SPRN_DSISR	0x012	/* Data Storage Interrupt Status Register */
 #define   DSISR_NOHPTE		0x40000000	/* no translation found */
+#define   DSISR_GUARDED		0x10000000	/* fetch from guarded storage */
 #define   DSISR_PROTFAULT	0x08000000	/* protection fault */
 #define   DSISR_ISSTORE		0x02000000	/* access was a store */
 #define   DSISR_DABRMATCH	0x00400000	/* hit data breakpoint */
diff -ruNp linux-3.13.11/arch/powerpc/include/asm/smp.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/include/asm/smp.h
--- linux-3.13.11/arch/powerpc/include/asm/smp.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/include/asm/smp.h	2014-07-09
12:00:15.000000000 +0200
@@ -51,7 +51,7 @@ struct smp_ops_t {
 	int   (*cpu_disable)(void);
 	void  (*cpu_die)(unsigned int nr);
 	int   (*cpu_bootable)(unsigned int nr);
-};
+} __no_const;
 
 extern void smp_send_debugger_break(void);
 extern void start_secondary_resume(void);
diff -ruNp linux-3.13.11/arch/powerpc/include/asm/thread_info.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/include/asm/thread_info.h
--- linux-3.13.11/arch/powerpc/include/asm/thread_info.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/include/asm/thread_info.h	2014-07-09
12:00:15.000000000 +0200
@@ -91,7 +91,6 @@ static inline struct thread_info *curren
 #define TIF_POLLING_NRFLAG	3	/* true if poll_idle() is polling
 					   TIF_NEED_RESCHED */
 #define TIF_32BIT		4	/* 32 bit binary */
-#define TIF_PERFMON_WORK	5	/* work for pfm_handle_work() */
 #define TIF_PERFMON_CTXSW	6	/* perfmon needs ctxsw calls */
 #define TIF_SYSCALL_AUDIT	7	/* syscall auditing active */
 #define TIF_SINGLESTEP		8	/* singlestepping active */
@@ -108,6 +107,9 @@ static inline struct thread_info *curren
 #if defined(CONFIG_PPC64)
 #define TIF_ELF2ABI		18	/* function descriptors must die! */
 #endif
+#define TIF_PERFMON_WORK	19	/* work for pfm_handle_work() */
+/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
+#define TIF_GRSEC_SETXID	5	/* update credentials on syscall entry/exit */
 
 /* as above, but as bit values */
 #define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
@@ -127,9 +129,10 @@ static inline struct thread_info *curren
 #define _TIF_SYSCALL_TRACEPOINT	(1<<TIF_SYSCALL_TRACEPOINT)
 #define _TIF_EMULATE_STACK_STORE	(1<<TIF_EMULATE_STACK_STORE)
 #define _TIF_NOHZ		(1<<TIF_NOHZ)
+#define _TIF_GRSEC_SETXID	(1<<TIF_GRSEC_SETXID)
 #define _TIF_SYSCALL_T_OR_A	(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
 				 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
-				 _TIF_NOHZ)
+				 _TIF_NOHZ | _TIF_GRSEC_SETXID)
 
 #define _TIF_USER_WORK_MASK	(_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
 				 _TIF_NOTIFY_RESUME | _TIF_UPROBE)
diff -ruNp linux-3.13.11/arch/powerpc/include/asm/uaccess.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/include/asm/uaccess.h
--- linux-3.13.11/arch/powerpc/include/asm/uaccess.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/include/asm/uaccess.h	2014-07-09
12:00:15.000000000 +0200
@@ -58,6 +58,7 @@
 
 #endif
 
+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
 #define access_ok(type, addr, size)		\
 	(__chk_user_ptr(addr),			\
 	 __access_ok((__force unsigned long)(addr), (size), get_fs()))
@@ -318,52 +319,6 @@ do {								\
 extern unsigned long __copy_tofrom_user(void __user *to,
 		const void __user *from, unsigned long size);
 
-#ifndef __powerpc64__
-
-static inline unsigned long copy_from_user(void *to,
-		const void __user *from, unsigned long n)
-{
-	unsigned long over;
-
-	if (access_ok(VERIFY_READ, from, n))
-		return __copy_tofrom_user((__force void __user *)to, from, n);
-	if ((unsigned long)from < TASK_SIZE) {
-		over = (unsigned long)from + n - TASK_SIZE;
-		return __copy_tofrom_user((__force void __user *)to, from,
-				n - over) + over;
-	}
-	return n;
-}
-
-static inline unsigned long copy_to_user(void __user *to,
-		const void *from, unsigned long n)
-{
-	unsigned long over;
-
-	if (access_ok(VERIFY_WRITE, to, n))
-		return __copy_tofrom_user(to, (__force void __user *)from, n);
-	if ((unsigned long)to < TASK_SIZE) {
-		over = (unsigned long)to + n - TASK_SIZE;
-		return __copy_tofrom_user(to, (__force void __user *)from,
-				n - over) + over;
-	}
-	return n;
-}
-
-#else /* __powerpc64__ */
-
-#define __copy_in_user(to, from, size) \
-	__copy_tofrom_user((to), (from), (size))
-
-extern unsigned long copy_from_user(void *to, const void __user *from,
-				    unsigned long n);
-extern unsigned long copy_to_user(void __user *to, const void *from,
-				  unsigned long n);
-extern unsigned long copy_in_user(void __user *to, const void __user *from,
-				  unsigned long n);
-
-#endif /* __powerpc64__ */
-
 static inline unsigned long __copy_from_user_inatomic(void *to,
 		const void __user *from, unsigned long n)
 {
@@ -387,6 +342,10 @@ static inline unsigned long __copy_from_
 		if (ret == 0)
 			return 0;
 	}
+
+	if (!__builtin_constant_p(n))
+		check_object_size(to, n, false);
+
 	return __copy_tofrom_user((__force void __user *)to, from, n);
 }
 
@@ -413,6 +372,10 @@ static inline unsigned long __copy_to_us
 		if (ret == 0)
 			return 0;
 	}
+
+	if (!__builtin_constant_p(n))
+		check_object_size(from, n, true);
+
 	return __copy_tofrom_user(to, (__force const void __user *)from, n);
 }
 
@@ -430,6 +393,92 @@ static inline unsigned long __copy_to_us
 	return __copy_to_user_inatomic(to, from, size);
 }
 
+#ifndef __powerpc64__
+
+static inline unsigned long __must_check copy_from_user(void *to,
+		const void __user *from, unsigned long n)
+{
+	unsigned long over;
+
+	if ((long)n < 0)
+		return n;
+
+	if (access_ok(VERIFY_READ, from, n)) {
+		if (!__builtin_constant_p(n))
+			check_object_size(to, n, false);
+		return __copy_tofrom_user((__force void __user *)to, from, n);
+	}
+	if ((unsigned long)from < TASK_SIZE) {
+		over = (unsigned long)from + n - TASK_SIZE;
+		if (!__builtin_constant_p(n - over))
+			check_object_size(to, n - over, false);
+		return __copy_tofrom_user((__force void __user *)to, from,
+				n - over) + over;
+	}
+	return n;
+}
+
+static inline unsigned long __must_check copy_to_user(void __user *to,
+		const void *from, unsigned long n)
+{
+	unsigned long over;
+
+	if ((long)n < 0)
+		return n;
+
+	if (access_ok(VERIFY_WRITE, to, n)) {
+		if (!__builtin_constant_p(n))
+			check_object_size(from, n, true);
+		return __copy_tofrom_user(to, (__force void __user *)from, n);
+	}
+	if ((unsigned long)to < TASK_SIZE) {
+		over = (unsigned long)to + n - TASK_SIZE;
+		if (!__builtin_constant_p(n))
+			check_object_size(from, n - over, true);
+		return __copy_tofrom_user(to, (__force void __user *)from,
+				n - over) + over;
+	}
+	return n;
+}
+
+#else /* __powerpc64__ */
+
+#define __copy_in_user(to, from, size) \
+	__copy_tofrom_user((to), (from), (size))
+
+static inline unsigned long __must_check copy_from_user(void *to, const void __user
*from, unsigned long n)
+{
+	if ((long)n < 0 || n > INT_MAX)
+		return n;
+
+	if (!__builtin_constant_p(n))
+		check_object_size(to, n, false);
+
+	if (likely(access_ok(VERIFY_READ, from, n)))
+		n = __copy_from_user(to, from, n);
+	else
+		memset(to, 0, n);
+	return n;
+}
+
+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from,
unsigned long n)
+{
+	if ((long)n < 0 || n > INT_MAX)
+		return n;
+
+	if (likely(access_ok(VERIFY_WRITE, to, n))) {
+		if (!__builtin_constant_p(n))
+			check_object_size(from, n, true);
+		n = __copy_to_user(to, from, n);
+	}
+	return n;
+}
+
+extern unsigned long copy_in_user(void __user *to, const void __user *from,
+				  unsigned long n);
+
+#endif /* __powerpc64__ */
+
 extern unsigned long __clear_user(void __user *addr, unsigned long size);
 
 static inline unsigned long clear_user(void __user *addr, unsigned long size)
diff -ruNp linux-3.13.11/arch/powerpc/include/uapi/asm/unistd.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/include/uapi/asm/unistd.h
--- linux-3.13.11/arch/powerpc/include/uapi/asm/unistd.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/include/uapi/asm/unistd.h	2014-07-09
12:00:15.000000000 +0200
@@ -275,7 +275,7 @@
 #endif
 #define __NR_rtas		255
 #define __NR_sys_debug_setcontext 256
-/* Number 257 is reserved for vserver */
+#define __NR_vserver		257
 #define __NR_migrate_pages	258
 #define __NR_mbind		259
 #define __NR_get_mempolicy	260
diff -ruNp linux-3.13.11/arch/powerpc/kernel/Makefile linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/kernel/Makefile
--- linux-3.13.11/arch/powerpc/kernel/Makefile	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/kernel/Makefile	2014-07-09
12:00:15.000000000 +0200
@@ -26,6 +26,8 @@ CFLAGS_REMOVE_ftrace.o = -pg -mno-sched-
 CFLAGS_REMOVE_time.o = -pg -mno-sched-epilog
 endif
 
+CFLAGS_REMOVE_prom_init.o += $(LATENT_ENTROPY_PLUGIN_CFLAGS)
+
 obj-y				:= cputable.o ptrace.o syscalls.o \
 				   irq.o align.o signal_32.o pmc.o vdso.o \
 				   process.o systbl.o idle.o \
diff -ruNp linux-3.13.11/arch/powerpc/kernel/exceptions-64e.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/kernel/exceptions-64e.S
--- linux-3.13.11/arch/powerpc/kernel/exceptions-64e.S	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/kernel/exceptions-64e.S	2014-07-09
12:00:15.000000000 +0200
@@ -759,6 +759,7 @@ storage_fault_common:
 	std	r14,_DAR(r1)
 	std	r15,_DSISR(r1)
 	addi	r3,r1,STACK_FRAME_OVERHEAD
+	bl	.save_nvgprs
 	mr	r4,r14
 	mr	r5,r15
 	ld	r14,PACA_EXGEN+EX_R14(r13)
@@ -767,8 +768,7 @@ storage_fault_common:
 	cmpdi	r3,0
 	bne-	1f
 	b	.ret_from_except_lite
-1:	bl	.save_nvgprs
-	mr	r5,r3
+1:	mr	r5,r3
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	ld	r4,_DAR(r1)
 	bl	.bad_page_fault
diff -ruNp linux-3.13.11/arch/powerpc/kernel/exceptions-64s.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/kernel/exceptions-64s.S
--- linux-3.13.11/arch/powerpc/kernel/exceptions-64s.S	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/kernel/exceptions-64s.S	2014-07-09
12:00:15.000000000 +0200
@@ -1390,10 +1390,10 @@ handle_page_fault:
 11:	ld	r4,_DAR(r1)
 	ld	r5,_DSISR(r1)
 	addi	r3,r1,STACK_FRAME_OVERHEAD
+	bl	.save_nvgprs
 	bl	.do_page_fault
 	cmpdi	r3,0
 	beq+	12f
-	bl	.save_nvgprs
 	mr	r5,r3
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	lwz	r4,_DAR(r1)
diff -ruNp linux-3.13.11/arch/powerpc/kernel/module_32.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/kernel/module_32.c
--- linux-3.13.11/arch/powerpc/kernel/module_32.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/kernel/module_32.c	2014-07-09
12:00:15.000000000 +0200
@@ -161,7 +161,7 @@ int module_frob_arch_sections(Elf32_Ehdr
 			me->arch.core_plt_section = i;
 	}
 	if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
-		printk("Module doesn't contain .plt or .init.plt sections.\n");
+		printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
 		return -ENOEXEC;
 	}
 
@@ -191,11 +191,16 @@ static uint32_t do_plt_call(void *locati
 
 	DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
 	/* Init, or core PLT? */
-	if (location >= mod->module_core
-	    && location < mod->module_core + mod->core_size)
+	if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx)
||
+	    (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
 		entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
-	else
+	else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx)
||
+		 (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
 		entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
+	else {
+		printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
+		return ~0UL;
+	}
 
 	/* Find this entry, or if that fails, the next avail. entry */
 	while (entry->jump[0]) {
@@ -299,7 +304,7 @@ int apply_relocate_add(Elf32_Shdr *sechd
 	}
 #ifdef CONFIG_DYNAMIC_FTRACE
 	module->arch.tramp =
-		do_plt_call(module->module_core,
+		do_plt_call(module->module_core_rx,
 			    (unsigned long)ftrace_caller,
 			    sechdrs, module);
 #endif
diff -ruNp linux-3.13.11/arch/powerpc/kernel/process.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/kernel/process.c
--- linux-3.13.11/arch/powerpc/kernel/process.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/kernel/process.c	2014-07-09
12:00:15.000000000 +0200
@@ -888,8 +888,8 @@ void show_regs(struct pt_regs * regs)
 	 * Lookup NIP late so we have the best change of getting the
 	 * above info out without failing
 	 */
-	printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
-	printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
+	printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
+	printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
 #endif
 	show_stack(current, (unsigned long *) regs->gpr[1]);
 	if (!user_mode(regs))
@@ -1385,10 +1385,10 @@ void show_stack(struct task_struct *tsk,
 		newsp = stack[0];
 		ip = stack[STACK_FRAME_LR_SAVE];
 		if (!firstframe || ip != lr) {
-			printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
+			printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 			if ((ip == rth || ip == mrth) && curr_frame >= 0) {
-				printk(" (%pS)",
+				printk(" (%pA)",
 				       (void *)current->ret_stack[curr_frame].ret);
 				curr_frame--;
 			}
@@ -1408,7 +1408,7 @@ void show_stack(struct task_struct *tsk,
 			struct pt_regs *regs = (struct pt_regs *)
 				(sp + STACK_FRAME_OVERHEAD);
 			lr = regs->link;
-			printk("--- Exception: %lx at %pS\n    LR = %pS\n",
+			printk("--- Exception: %lx at %pA\n    LR = %pA\n",
 			       regs->trap, (void *)regs->nip, (void *)lr);
 			firstframe = 1;
 		}
@@ -1444,58 +1444,3 @@ void notrace __ppc64_runlatch_off(void)
 	mtspr(SPRN_CTRLT, ctrl);
 }
 #endif /* CONFIG_PPC64 */
-
-unsigned long arch_align_stack(unsigned long sp)
-{
-	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
-		sp -= get_random_int() & ~PAGE_MASK;
-	return sp & ~0xf;
-}
-
-static inline unsigned long brk_rnd(void)
-{
-        unsigned long rnd = 0;
-
-	/* 8MB for 32bit, 1GB for 64bit */
-	if (is_32bit_task())
-		rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
-	else
-		rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
-
-	return rnd << PAGE_SHIFT;
-}
-
-unsigned long arch_randomize_brk(struct mm_struct *mm)
-{
-	unsigned long base = mm->brk;
-	unsigned long ret;
-
-#ifdef CONFIG_PPC_STD_MMU_64
-	/*
-	 * If we are using 1TB segments and we are allowed to randomise
-	 * the heap, we can put it above 1TB so it is backed by a 1TB
-	 * segment. Otherwise the heap will be in the bottom 1TB
-	 * which always uses 256MB segments and this may result in a
-	 * performance penalty.
-	 */
-	if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
-		base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
-#endif
-
-	ret = PAGE_ALIGN(base + brk_rnd());
-
-	if (ret < mm->brk)
-		return mm->brk;
-
-	return ret;
-}
-
-unsigned long randomize_et_dyn(unsigned long base)
-{
-	unsigned long ret = PAGE_ALIGN(base + brk_rnd());
-
-	if (ret < base)
-		return base;
-
-	return ret;
-}
diff -ruNp linux-3.13.11/arch/powerpc/kernel/ptrace.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/kernel/ptrace.c
--- linux-3.13.11/arch/powerpc/kernel/ptrace.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/kernel/ptrace.c	2014-07-09
12:00:15.000000000 +0200
@@ -1762,6 +1762,10 @@ long arch_ptrace(struct task_struct *chi
 	return ret;
 }
 
+#ifdef CONFIG_GRKERNSEC_SETXID
+extern void gr_delayed_cred_worker(void);
+#endif
+
 /*
  * We must return the syscall number to actually look up in the table.
  * This can be -1L to skip running any syscall at all.
@@ -1774,6 +1778,11 @@ long do_syscall_trace_enter(struct pt_re
 
 	secure_computing_strict(regs->gpr[0]);
 
+#ifdef CONFIG_GRKERNSEC_SETXID
+	if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
+		gr_delayed_cred_worker();
+#endif
+
 	if (test_thread_flag(TIF_SYSCALL_TRACE) &&
 	    tracehook_report_syscall_entry(regs))
 		/*
@@ -1808,6 +1817,11 @@ void do_syscall_trace_leave(struct pt_re
 {
 	int step;
 
+#ifdef CONFIG_GRKERNSEC_SETXID
+	if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
+		gr_delayed_cred_worker();
+#endif
+
 	audit_syscall_exit(regs);
 
 	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
diff -ruNp linux-3.13.11/arch/powerpc/kernel/signal_32.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/kernel/signal_32.c
--- linux-3.13.11/arch/powerpc/kernel/signal_32.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/kernel/signal_32.c	2014-07-09
12:00:15.000000000 +0200
@@ -1004,7 +1004,7 @@ int handle_rt_signal32(unsigned long sig
 	/* Save user registers on the stack */
 	frame = &rt_sf->uc.uc_mcontext;
 	addr = frame;
-	if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
+	if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
 		sigret = 0;
 		tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp;
 	} else {
diff -ruNp linux-3.13.11/arch/powerpc/kernel/signal_64.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/kernel/signal_64.c
--- linux-3.13.11/arch/powerpc/kernel/signal_64.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/kernel/signal_64.c	2014-07-09
12:00:15.000000000 +0200
@@ -758,7 +758,7 @@ int handle_rt_signal64(int signr, struct
 #endif
 
 	/* Set up to return from userspace. */
-	if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
+	if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
 		regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
 	} else {
 		err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
diff -ruNp linux-3.13.11/arch/powerpc/kernel/traps.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/kernel/traps.c
--- linux-3.13.11/arch/powerpc/kernel/traps.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/kernel/traps.c	2014-07-09
12:00:15.000000000 +0200
@@ -142,6 +142,8 @@ static unsigned __kprobes long oops_begi
 	return flags;
 }
 
+extern void gr_handle_kernel_exploit(void);
+
 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
 			       int signr)
 {
@@ -191,6 +193,9 @@ static void __kprobes oops_end(unsigned
 		panic("Fatal exception in interrupt");
 	if (panic_on_oops)
 		panic("Fatal exception");
+
+	gr_handle_kernel_exploit();
+
 	do_exit(signr);
 }
 
@@ -1272,8 +1277,9 @@ void nonrecoverable_exception(struct pt_
 
 void trace_syscall(struct pt_regs *regs)
 {
-	printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld    %s\n",
-	       current, task_pid_nr(current), regs->nip, regs->link, regs->gpr[0],
+	printk("Task: %p(%d:#%u), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld    %s\n",
+	       current, task_pid_nr(current), current->xid,
+	       regs->nip, regs->link, regs->gpr[0],
 	       regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted());
 }
 
diff -ruNp linux-3.13.11/arch/powerpc/kernel/vdso.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/kernel/vdso.c
--- linux-3.13.11/arch/powerpc/kernel/vdso.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/kernel/vdso.c	2014-07-09
12:00:15.000000000 +0200
@@ -35,6 +35,7 @@
 #include <asm/vdso.h>
 #include <asm/vdso_datapage.h>
 #include <asm/setup.h>
+#include <asm/mman.h>
 
 #undef DEBUG
 
@@ -221,7 +222,7 @@ int arch_setup_additional_pages(struct l
 	vdso_base = VDSO32_MBASE;
 #endif
 
-	current->mm->context.vdso_base = 0;
+	current->mm->context.vdso_base = ~0UL;
 
 	/* vDSO has a problem and was disabled, just don't "enable" it for the
 	 * process
@@ -241,7 +242,7 @@ int arch_setup_additional_pages(struct l
 	vdso_base = get_unmapped_area(NULL, vdso_base,
 				      (vdso_pages << PAGE_SHIFT) +
 				      ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
-				      0, 0);
+				      0, MAP_PRIVATE | MAP_EXECUTABLE);
 	if (IS_ERR_VALUE(vdso_base)) {
 		rc = vdso_base;
 		goto fail_mmapsem;
diff -ruNp linux-3.13.11/arch/powerpc/kvm/powerpc.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/kvm/powerpc.c
--- linux-3.13.11/arch/powerpc/kvm/powerpc.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/kvm/powerpc.c	2014-07-09
12:00:15.000000000 +0200
@@ -1141,7 +1141,7 @@ void kvmppc_init_lpid(unsigned long nr_l
 }
 EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
 
-int kvm_arch_init(void *opaque)
+int kvm_arch_init(const void *opaque)
 {
 	return 0;
 }
diff -ruNp linux-3.13.11/arch/powerpc/lib/usercopy_64.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/lib/usercopy_64.c
--- linux-3.13.11/arch/powerpc/lib/usercopy_64.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/lib/usercopy_64.c	2014-07-09
12:00:15.000000000 +0200
@@ -9,22 +9,6 @@
 #include <linux/module.h>
 #include <asm/uaccess.h>
 
-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
-{
-	if (likely(access_ok(VERIFY_READ, from, n)))
-		n = __copy_from_user(to, from, n);
-	else
-		memset(to, 0, n);
-	return n;
-}
-
-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
-{
-	if (likely(access_ok(VERIFY_WRITE, to, n)))
-		n = __copy_to_user(to, from, n);
-	return n;
-}
-
 unsigned long copy_in_user(void __user *to, const void __user *from,
 			   unsigned long n)
 {
@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
 	return n;
 }
 
-EXPORT_SYMBOL(copy_from_user);
-EXPORT_SYMBOL(copy_to_user);
 EXPORT_SYMBOL(copy_in_user);
 
diff -ruNp linux-3.13.11/arch/powerpc/mm/fault.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/mm/fault.c
--- linux-3.13.11/arch/powerpc/mm/fault.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/mm/fault.c	2014-07-09
12:00:15.000000000 +0200
@@ -33,6 +33,10 @@
 #include <linux/magic.h>
 #include <linux/ratelimit.h>
 #include <linux/context_tracking.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/compiler.h>
+#include <linux/unistd.h>
 
 #include <asm/firmware.h>
 #include <asm/page.h>
@@ -69,6 +73,33 @@ static inline int notify_page_fault(stru
 }
 #endif
 
+#ifdef CONFIG_PAX_PAGEEXEC
+/*
+ * PaX: decide what to do with offenders (regs->nip = fault address)
+ *
+ * returns 1 when task should be killed
+ */
+static int pax_handle_fetch_fault(struct pt_regs *regs)
+{
+	return 1;
+}
+
+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
+{
+	unsigned long i;
+
+	printk(KERN_ERR "PAX: bytes at PC: ");
+	for (i = 0; i < 5; i++) {
+		unsigned int c;
+		if (get_user(c, (unsigned int __user *)pc+i))
+			printk(KERN_CONT "???????? ");
+		else
+			printk(KERN_CONT "%08x ", c);
+	}
+	printk("\n");
+}
+#endif
+
 /*
  * Check whether the instruction at regs->nip is a store using
  * an update addressing form which will update r1.
@@ -216,7 +247,7 @@ int __kprobes do_page_fault(struct pt_re
 	 * indicate errors in DSISR but can validly be set in SRR1.
 	 */
 	if (trap == 0x400)
-		error_code &= 0x48200000;
+		error_code &= 0x58200000;
 	else
 		is_write = error_code & DSISR_ISSTORE;
 #else
@@ -378,7 +409,7 @@ good_area:
          * "undefined".  Of those that can be set, this is the only
          * one which seems bad.
          */
-	if (error_code & 0x10000000)
+	if (error_code & DSISR_GUARDED)
                 /* Guarded storage error. */
 		goto bad_area;
 #endif /* CONFIG_8xx */
@@ -393,7 +424,7 @@ good_area:
 		 * processors use the same I/D cache coherency mechanism
 		 * as embedded.
 		 */
-		if (error_code & DSISR_PROTFAULT)
+		if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
 			goto bad_area;
 #endif /* CONFIG_PPC_STD_MMU */
 
@@ -483,6 +514,23 @@ bad_area:
 bad_area_nosemaphore:
 	/* User mode accesses cause a SIGSEGV */
 	if (user_mode(regs)) {
+
+#ifdef CONFIG_PAX_PAGEEXEC
+		if (mm->pax_flags & MF_PAX_PAGEEXEC) {
+#ifdef CONFIG_PPC_STD_MMU
+			if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
+#else
+			if (is_exec && regs->nip == address) {
+#endif
+				switch (pax_handle_fetch_fault(regs)) {
+				}
+
+				pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
+				do_group_exit(SIGKILL);
+			}
+		}
+#endif
+
 		_exception(SIGSEGV, regs, code, address);
 		goto bail;
 	}
diff -ruNp linux-3.13.11/arch/powerpc/mm/mmap.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/mm/mmap.c
--- linux-3.13.11/arch/powerpc/mm/mmap.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/mm/mmap.c	2014-07-09
12:00:15.000000000 +0200
@@ -53,10 +53,14 @@ static inline int mmap_is_legacy(void)
 	return sysctl_legacy_va_layout;
 }
 
-static unsigned long mmap_rnd(void)
+static unsigned long mmap_rnd(struct mm_struct *mm)
 {
 	unsigned long rnd = 0;
 
+#ifdef CONFIG_PAX_RANDMMAP
+	if (!(mm->pax_flags & MF_PAX_RANDMMAP))
+#endif
+
 	if (current->flags & PF_RANDOMIZE) {
 		/* 8MB for 32bit, 1GB for 64bit */
 		if (is_32bit_task())
@@ -67,7 +71,7 @@ static unsigned long mmap_rnd(void)
 	return rnd << PAGE_SHIFT;
 }
 
-static inline unsigned long mmap_base(void)
+static inline unsigned long mmap_base(struct mm_struct *mm)
 {
 	unsigned long gap = rlimit(RLIMIT_STACK);
 
@@ -76,7 +80,7 @@ static inline unsigned long mmap_base(vo
 	else if (gap > MAX_GAP)
 		gap = MAX_GAP;
 
-	return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
+	return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd(mm));
 }
 
 /*
@@ -91,9 +95,21 @@ void arch_pick_mmap_layout(struct mm_str
 	 */
 	if (mmap_is_legacy()) {
 		mm->mmap_base = TASK_UNMAPPED_BASE;
+
+#ifdef CONFIG_PAX_RANDMMAP
+		if (mm->pax_flags & MF_PAX_RANDMMAP)
+			mm->mmap_base += mm->delta_mmap;
+#endif
+
 		mm->get_unmapped_area = arch_get_unmapped_area;
 	} else {
-		mm->mmap_base = mmap_base();
+		mm->mmap_base = mmap_base(mm);
+
+#ifdef CONFIG_PAX_RANDMMAP
+		if (mm->pax_flags & MF_PAX_RANDMMAP)
+			mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
+#endif
+
 		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
 	}
 }
diff -ruNp linux-3.13.11/arch/powerpc/mm/slice.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/mm/slice.c
--- linux-3.13.11/arch/powerpc/mm/slice.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/mm/slice.c	2014-07-09
12:00:15.000000000 +0200
@@ -103,7 +103,7 @@ static int slice_area_is_free(struct mm_
 	if ((mm->task_size - len) < addr)
 		return 0;
 	vma = find_vma(mm, addr);
-	return (!vma || (addr + len) <= vma->vm_start);
+	return check_heap_stack_gap(vma, addr, len, 0);
 }
 
 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
@@ -277,6 +277,12 @@ static unsigned long slice_find_area_bot
 	info.align_offset = 0;
 
 	addr = TASK_UNMAPPED_BASE;
+
+#ifdef CONFIG_PAX_RANDMMAP
+	if (mm->pax_flags & MF_PAX_RANDMMAP)
+		addr += mm->delta_mmap;
+#endif
+
 	while (addr < TASK_SIZE) {
 		info.low_limit = addr;
 		if (!slice_scan_available(addr, available, 1, &addr))
@@ -410,6 +416,11 @@ unsigned long slice_get_unmapped_area(un
 	if (fixed && addr > (mm->task_size - len))
 		return -EINVAL;
 
+#ifdef CONFIG_PAX_RANDMMAP
+	if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
+		addr = 0;
+#endif
+
 	/* If hint, make sure it matches our alignment restrictions */
 	if (!fixed && addr) {
 		addr = _ALIGN_UP(addr, 1ul << pshift);
diff -ruNp linux-3.13.11/arch/powerpc/platforms/cell/celleb_scc_pciex.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/platforms/cell/celleb_scc_pciex.c
--- linux-3.13.11/arch/powerpc/platforms/cell/celleb_scc_pciex.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/platforms/cell/celleb_scc_pciex.c	2014-07-09
12:00:15.000000000 +0200
@@ -400,8 +400,8 @@ static int scc_pciex_write_config(struct
 }
 
 static struct pci_ops scc_pciex_pci_ops = {
-	scc_pciex_read_config,
-	scc_pciex_write_config,
+	.read = scc_pciex_read_config,
+	.write = scc_pciex_write_config,
 };
 
 static void pciex_clear_intr_all(unsigned int __iomem *base)
diff -ruNp linux-3.13.11/arch/powerpc/platforms/cell/spufs/file.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/platforms/cell/spufs/file.c
--- linux-3.13.11/arch/powerpc/platforms/cell/spufs/file.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/powerpc/platforms/cell/spufs/file.c	2014-07-09
12:00:15.000000000 +0200
@@ -280,9 +280,9 @@ spufs_mem_mmap_fault(struct vm_area_stru
 	return VM_FAULT_NOPAGE;
 }
 
-static int spufs_mem_mmap_access(struct vm_area_struct *vma,
+static ssize_t spufs_mem_mmap_access(struct vm_area_struct *vma,
 				unsigned long address,
-				void *buf, int len, int write)
+				void *buf, size_t len, int write)
 {
 	struct spu_context *ctx = vma->vm_file->private_data;
 	unsigned long offset = address - vma->vm_start;
diff -ruNp linux-3.13.11/arch/s390/Kconfig linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/s390/Kconfig
--- linux-3.13.11/arch/s390/Kconfig	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/s390/Kconfig	2014-07-09 12:00:15.000000000
+0200
@@ -650,6 +650,8 @@ source "fs/Kconfig"
 
 source "arch/s390/Kconfig.debug"
 
+source "kernel/vserver/Kconfig"
+
 source "security/Kconfig"
 
 source "crypto/Kconfig"
diff -ruNp linux-3.13.11/arch/s390/include/asm/atomic.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/s390/include/asm/atomic.h
--- linux-3.13.11/arch/s390/include/asm/atomic.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/s390/include/asm/atomic.h	2014-07-09
12:00:15.000000000 +0200
@@ -398,6 +398,16 @@ static inline long long atomic64_dec_if_
 #define atomic64_dec_and_test(_v)	(atomic64_sub_return(1, _v) == 0)
 #define atomic64_inc_not_zero(v)	atomic64_add_unless((v), 1, 0)
 
+#define atomic64_read_unchecked(v)		atomic64_read(v)
+#define atomic64_set_unchecked(v, i)		atomic64_set((v), (i))
+#define atomic64_add_unchecked(a, v)		atomic64_add((a), (v))
+#define atomic64_add_return_unchecked(a, v)	atomic64_add_return((a), (v))
+#define atomic64_sub_unchecked(a, v)		atomic64_sub((a), (v))
+#define atomic64_inc_unchecked(v)		atomic64_inc(v)
+#define atomic64_inc_return_unchecked(v)	atomic64_inc_return(v)
+#define atomic64_dec_unchecked(v)		atomic64_dec(v)
+#define atomic64_cmpxchg_unchecked(v, o, n)	atomic64_cmpxchg((v), (o), (n))
+
 #define smp_mb__before_atomic_dec()	smp_mb()
 #define smp_mb__after_atomic_dec()	smp_mb()
 #define smp_mb__before_atomic_inc()	smp_mb()
diff -ruNp linux-3.13.11/arch/s390/include/asm/cache.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/s390/include/asm/cache.h
--- linux-3.13.11/arch/s390/include/asm/cache.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/s390/include/asm/cache.h	2014-07-09
12:00:15.000000000 +0200
@@ -9,8 +9,10 @@
 #ifndef __ARCH_S390_CACHE_H
 #define __ARCH_S390_CACHE_H
 
-#define L1_CACHE_BYTES     256
+#include <linux/const.h>
+
 #define L1_CACHE_SHIFT     8
+#define L1_CACHE_BYTES     (_AC(1,UL) << L1_CACHE_SHIFT)
 #define NET_SKB_PAD	   32
 
 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
diff -ruNp linux-3.13.11/arch/s390/include/asm/elf.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/s390/include/asm/elf.h
--- linux-3.13.11/arch/s390/include/asm/elf.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/s390/include/asm/elf.h	2014-07-09
12:00:15.000000000 +0200
@@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
    the loader.  We need to make sure that it is out of the way of the program
    that it will "exec", and that there is sufficient room for the brk.  */
 
-extern unsigned long randomize_et_dyn(unsigned long base);
-#define ELF_ET_DYN_BASE		(randomize_et_dyn(STACK_TOP / 3 * 2))
+#define ELF_ET_DYN_BASE		(STACK_TOP / 3 * 2)
+
+#ifdef CONFIG_PAX_ASLR
+#define PAX_ELF_ET_DYN_BASE	(test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
+
+#define PAX_DELTA_MMAP_LEN	(test_thread_flag(TIF_31BIT) ? 15 : 26)
+#define PAX_DELTA_STACK_LEN	(test_thread_flag(TIF_31BIT) ? 15 : 26)
+#endif
 
 /* This yields a mask that user programs can use to figure out what
    instruction set this CPU supports. */
@@ -222,9 +228,6 @@ struct linux_binprm;
 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
 int arch_setup_additional_pages(struct linux_binprm *, int);
 
-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
-#define arch_randomize_brk arch_randomize_brk
-
 void *fill_cpu_elf_notes(void *ptr, struct save_area *sa);
 
 #endif
diff -ruNp linux-3.13.11/arch/s390/include/asm/exec.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/s390/include/asm/exec.h
--- linux-3.13.11/arch/s390/include/asm/exec.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/s390/include/asm/exec.h	2014-07-09
12:00:15.000000000 +0200
@@ -7,6 +7,6 @@
 #ifndef __ASM_EXEC_H
 #define __ASM_EXEC_H
 
-extern unsigned long arch_align_stack(unsigned long sp);
+#define arch_align_stack(x) ((x) & ~0xfUL)
 
 #endif /* __ASM_EXEC_H */
diff -ruNp linux-3.13.11/arch/s390/include/asm/tlb.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/s390/include/asm/tlb.h
--- linux-3.13.11/arch/s390/include/asm/tlb.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/s390/include/asm/tlb.h	2014-07-09
12:00:15.000000000 +0200
@@ -24,6 +24,7 @@
 #include <linux/mm.h>
 #include <linux/pagemap.h>
 #include <linux/swap.h>
+
 #include <asm/processor.h>
 #include <asm/pgalloc.h>
 #include <asm/tlbflush.h>
diff -ruNp linux-3.13.11/arch/s390/include/asm/uaccess.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/s390/include/asm/uaccess.h
--- linux-3.13.11/arch/s390/include/asm/uaccess.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/s390/include/asm/uaccess.h	2014-07-09
12:00:15.000000000 +0200
@@ -59,6 +59,7 @@ static inline int __range_ok(unsigned lo
 	__range_ok((unsigned long)(addr), (size));	\
 })
 
+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
 #define access_ok(type, addr, size) __access_ok(addr, size)
 
 /*
@@ -245,6 +246,10 @@ static inline unsigned long __must_check
 copy_to_user(void __user *to, const void *from, unsigned long n)
 {
 	might_fault();
+
+	if ((long)n < 0)
+		return n;
+
 	return __copy_to_user(to, from, n);
 }
 
@@ -268,6 +273,9 @@ copy_to_user(void __user *to, const void
 static inline unsigned long __must_check
 __copy_from_user(void *to, const void __user *from, unsigned long n)
 {
+	if ((long)n < 0)
+		return n;
+
 	return uaccess.copy_from_user(n, from, to);
 }
 
@@ -296,10 +304,14 @@ __compiletime_warning("copy_from_user()
 static inline unsigned long __must_check
 copy_from_user(void *to, const void __user *from, unsigned long n)
 {
-	unsigned int sz = __compiletime_object_size(to);
+	size_t sz = __compiletime_object_size(to);
 
 	might_fault();
-	if (unlikely(sz != -1 && sz < n)) {
+
+	if ((long)n < 0)
+		return n;
+
+	if (unlikely(sz != (size_t)-1 && sz < n)) {
 		copy_from_user_overflow();
 		return n;
 	}
diff -ruNp linux-3.13.11/arch/s390/include/uapi/asm/unistd.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/s390/include/uapi/asm/unistd.h
--- linux-3.13.11/arch/s390/include/uapi/asm/unistd.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/s390/include/uapi/asm/unistd.h	2014-07-09
12:00:15.000000000 +0200
@@ -200,7 +200,7 @@
 #define __NR_clock_gettime	(__NR_timer_create+6)
 #define __NR_clock_getres	(__NR_timer_create+7)
 #define __NR_clock_nanosleep	(__NR_timer_create+8)
-/* Number 263 is reserved for vserver */
+#define __NR_vserver		263
 #define __NR_statfs64		265
 #define __NR_fstatfs64		266
 #define __NR_remap_file_pages	267
diff -ruNp linux-3.13.11/arch/s390/kernel/module.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/s390/kernel/module.c
--- linux-3.13.11/arch/s390/kernel/module.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/s390/kernel/module.c	2014-07-09
12:00:15.000000000 +0200
@@ -169,11 +169,11 @@ int module_frob_arch_sections(Elf_Ehdr *
 
 	/* Increase core size by size of got & plt and set start
 	   offsets for got and plt. */
-	me->core_size = ALIGN(me->core_size, 4);
-	me->arch.got_offset = me->core_size;
-	me->core_size += me->arch.got_size;
-	me->arch.plt_offset = me->core_size;
-	me->core_size += me->arch.plt_size;
+	me->core_size_rw = ALIGN(me->core_size_rw, 4);
+	me->arch.got_offset = me->core_size_rw;
+	me->core_size_rw += me->arch.got_size;
+	me->arch.plt_offset = me->core_size_rx;
+	me->core_size_rx += me->arch.plt_size;
 	return 0;
 }
 
@@ -289,7 +289,7 @@ static int apply_rela(Elf_Rela *rela, El
 		if (info->got_initialized == 0) {
 			Elf_Addr *gotent;
 
-			gotent = me->module_core + me->arch.got_offset +
+			gotent = me->module_core_rw + me->arch.got_offset +
 				info->got_offset;
 			*gotent = val;
 			info->got_initialized = 1;
@@ -312,7 +312,7 @@ static int apply_rela(Elf_Rela *rela, El
 			rc = apply_rela_bits(loc, val, 0, 64, 0);
 		else if (r_type == R_390_GOTENT ||
 			 r_type == R_390_GOTPLTENT) {
-			val += (Elf_Addr) me->module_core - loc;
+			val += (Elf_Addr) me->module_core_rw - loc;
 			rc = apply_rela_bits(loc, val, 1, 32, 1);
 		}
 		break;
@@ -325,7 +325,7 @@ static int apply_rela(Elf_Rela *rela, El
 	case R_390_PLTOFF64:	/* 16 bit offset from GOT to PLT. */
 		if (info->plt_initialized == 0) {
 			unsigned int *ip;
-			ip = me->module_core + me->arch.plt_offset +
+			ip = me->module_core_rx + me->arch.plt_offset +
 				info->plt_offset;
 #ifndef CONFIG_64BIT
 			ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
@@ -350,7 +350,7 @@ static int apply_rela(Elf_Rela *rela, El
 			       val - loc + 0xffffUL < 0x1ffffeUL) ||
 			      (r_type == R_390_PLT32DBL &&
 			       val - loc + 0xffffffffULL < 0x1fffffffeULL)))
-				val = (Elf_Addr) me->module_core +
+				val = (Elf_Addr) me->module_core_rx +
 					me->arch.plt_offset +
 					info->plt_offset;
 			val += rela->r_addend - loc;
@@ -372,7 +372,7 @@ static int apply_rela(Elf_Rela *rela, El
 	case R_390_GOTOFF32:	/* 32 bit offset to GOT.  */
 	case R_390_GOTOFF64:	/* 64 bit offset to GOT. */
 		val = val + rela->r_addend -
-			((Elf_Addr) me->module_core + me->arch.got_offset);
+			((Elf_Addr) me->module_core_rw + me->arch.got_offset);
 		if (r_type == R_390_GOTOFF16)
 			rc = apply_rela_bits(loc, val, 0, 16, 0);
 		else if (r_type == R_390_GOTOFF32)
@@ -382,7 +382,7 @@ static int apply_rela(Elf_Rela *rela, El
 		break;
 	case R_390_GOTPC:	/* 32 bit PC relative offset to GOT. */
 	case R_390_GOTPCDBL:	/* 32 bit PC rel. off. to GOT shifted by 1. */
-		val = (Elf_Addr) me->module_core + me->arch.got_offset +
+		val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
 			rela->r_addend - loc;
 		if (r_type == R_390_GOTPC)
 			rc = apply_rela_bits(loc, val, 1, 32, 0);
diff -ruNp linux-3.13.11/arch/s390/kernel/process.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/s390/kernel/process.c
--- linux-3.13.11/arch/s390/kernel/process.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/s390/kernel/process.c	2014-07-09
12:00:15.000000000 +0200
@@ -242,39 +242,3 @@ unsigned long get_wchan(struct task_stru
 	}
 	return 0;
 }
-
-unsigned long arch_align_stack(unsigned long sp)
-{
-	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
-		sp -= get_random_int() & ~PAGE_MASK;
-	return sp & ~0xf;
-}
-
-static inline unsigned long brk_rnd(void)
-{
-	/* 8MB for 32bit, 1GB for 64bit */
-	if (is_32bit_task())
-		return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
-	else
-		return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
-}
-
-unsigned long arch_randomize_brk(struct mm_struct *mm)
-{
-	unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
-
-	if (ret < mm->brk)
-		return mm->brk;
-	return ret;
-}
-
-unsigned long randomize_et_dyn(unsigned long base)
-{
-	unsigned long ret = PAGE_ALIGN(base + brk_rnd());
-
-	if (!(current->flags & PF_RANDOMIZE))
-		return base;
-	if (ret < base)
-		return base;
-	return ret;
-}
diff -ruNp linux-3.13.11/arch/s390/kernel/ptrace.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/s390/kernel/ptrace.c
--- linux-3.13.11/arch/s390/kernel/ptrace.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/s390/kernel/ptrace.c	2014-07-09
12:00:15.000000000 +0200
@@ -21,6 +21,7 @@
 #include <linux/tracehook.h>
 #include <linux/seccomp.h>
 #include <linux/compat.h>
+#include <linux/vs_base.h>
 #include <trace/syscall.h>
 #include <asm/segment.h>
 #include <asm/page.h>
diff -ruNp linux-3.13.11/arch/s390/kernel/syscalls.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/s390/kernel/syscalls.S
--- linux-3.13.11/arch/s390/kernel/syscalls.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/s390/kernel/syscalls.S	2014-07-09
12:00:15.000000000 +0200
@@ -271,7 +271,7 @@ SYSCALL(sys_clock_settime,sys_clock_sett
 SYSCALL(sys_clock_gettime,sys_clock_gettime,sys32_clock_gettime_wrapper)	/* 260 */
 SYSCALL(sys_clock_getres,sys_clock_getres,sys32_clock_getres_wrapper)
 SYSCALL(sys_clock_nanosleep,sys_clock_nanosleep,sys32_clock_nanosleep_wrapper)
-NI_SYSCALL							/* reserved for vserver */
+SYSCALL(sys_vserver,sys_vserver,sys32_vserver)
 SYSCALL(sys_s390_fadvise64_64,sys_ni_syscall,sys32_fadvise64_64_wrapper)
 SYSCALL(sys_statfs64,sys_statfs64,compat_sys_statfs64_wrapper)
 SYSCALL(sys_fstatfs64,sys_fstatfs64,compat_sys_fstatfs64_wrapper)
diff -ruNp linux-3.13.11/arch/s390/mm/mmap.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/s390/mm/mmap.c
--- linux-3.13.11/arch/s390/mm/mmap.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/s390/mm/mmap.c	2014-07-09 12:00:15.000000000
+0200
@@ -95,9 +95,21 @@ void arch_pick_mmap_layout(struct mm_str
 	 */
 	if (mmap_is_legacy()) {
 		mm->mmap_base = mmap_base_legacy();
+
+#ifdef CONFIG_PAX_RANDMMAP
+		if (mm->pax_flags & MF_PAX_RANDMMAP)
+			mm->mmap_base += mm->delta_mmap;
+#endif
+
 		mm->get_unmapped_area = arch_get_unmapped_area;
 	} else {
 		mm->mmap_base = mmap_base();
+
+#ifdef CONFIG_PAX_RANDMMAP
+		if (mm->pax_flags & MF_PAX_RANDMMAP)
+			mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
+#endif
+
 		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
 	}
 }
@@ -170,9 +182,21 @@ void arch_pick_mmap_layout(struct mm_str
 	 */
 	if (mmap_is_legacy()) {
 		mm->mmap_base = mmap_base_legacy();
+
+#ifdef CONFIG_PAX_RANDMMAP
+		if (mm->pax_flags & MF_PAX_RANDMMAP)
+			mm->mmap_base += mm->delta_mmap;
+#endif
+
 		mm->get_unmapped_area = s390_get_unmapped_area;
 	} else {
 		mm->mmap_base = mmap_base();
+
+#ifdef CONFIG_PAX_RANDMMAP
+		if (mm->pax_flags & MF_PAX_RANDMMAP)
+			mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
+#endif
+
 		mm->get_unmapped_area = s390_get_unmapped_area_topdown;
 	}
 }
diff -ruNp linux-3.13.11/arch/score/include/asm/cache.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/score/include/asm/cache.h
--- linux-3.13.11/arch/score/include/asm/cache.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/score/include/asm/cache.h	2014-07-09
12:00:15.000000000 +0200
@@ -1,7 +1,9 @@
 #ifndef _ASM_SCORE_CACHE_H
 #define _ASM_SCORE_CACHE_H
 
+#include <linux/const.h>
+
 #define L1_CACHE_SHIFT		4
-#define L1_CACHE_BYTES		(1 << L1_CACHE_SHIFT)
+#define L1_CACHE_BYTES		(_AC(1,UL) << L1_CACHE_SHIFT)
 
 #endif /* _ASM_SCORE_CACHE_H */
diff -ruNp linux-3.13.11/arch/score/include/asm/exec.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/score/include/asm/exec.h
--- linux-3.13.11/arch/score/include/asm/exec.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/score/include/asm/exec.h	2014-07-09
12:00:15.000000000 +0200
@@ -1,6 +1,6 @@
 #ifndef _ASM_SCORE_EXEC_H
 #define _ASM_SCORE_EXEC_H
 
-extern unsigned long arch_align_stack(unsigned long sp);
+#define arch_align_stack(x) (x)
 
 #endif /* _ASM_SCORE_EXEC_H */
diff -ruNp linux-3.13.11/arch/score/kernel/process.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/score/kernel/process.c
--- linux-3.13.11/arch/score/kernel/process.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/score/kernel/process.c	2014-07-09
12:00:15.000000000 +0200
@@ -116,8 +116,3 @@ unsigned long get_wchan(struct task_stru
 
 	return task_pt_regs(task)->cp0_epc;
 }
-
-unsigned long arch_align_stack(unsigned long sp)
-{
-	return sp;
-}
diff -ruNp linux-3.13.11/arch/sh/Kconfig linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sh/Kconfig
--- linux-3.13.11/arch/sh/Kconfig	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sh/Kconfig	2014-07-09 12:00:15.000000000
+0200
@@ -927,6 +927,8 @@ source "fs/Kconfig"
 
 source "arch/sh/Kconfig.debug"
 
+source "kernel/vserver/Kconfig"
+
 source "security/Kconfig"
 
 source "crypto/Kconfig"
diff -ruNp linux-3.13.11/arch/sh/include/asm/cache.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sh/include/asm/cache.h
--- linux-3.13.11/arch/sh/include/asm/cache.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sh/include/asm/cache.h	2014-07-09
12:00:15.000000000 +0200
@@ -9,10 +9,11 @@
 #define __ASM_SH_CACHE_H
 #ifdef __KERNEL__
 
+#include <linux/const.h>
 #include <linux/init.h>
 #include <cpu/cache.h>
 
-#define L1_CACHE_BYTES		(1 << L1_CACHE_SHIFT)
+#define L1_CACHE_BYTES		(_AC(1,UL) << L1_CACHE_SHIFT)
 
 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
 
diff -ruNp linux-3.13.11/arch/sh/kernel/irq.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sh/kernel/irq.c
--- linux-3.13.11/arch/sh/kernel/irq.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sh/kernel/irq.c	2014-07-09 12:00:15.000000000
+0200
@@ -14,6 +14,7 @@
 #include <linux/ftrace.h>
 #include <linux/delay.h>
 #include <linux/ratelimit.h>
+// #include <linux/vs_context.h>
 #include <asm/processor.h>
 #include <asm/machvec.h>
 #include <asm/uaccess.h>
diff -ruNp linux-3.13.11/arch/sh/mm/mmap.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sh/mm/mmap.c
--- linux-3.13.11/arch/sh/mm/mmap.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sh/mm/mmap.c	2014-07-09 12:00:15.000000000
+0200
@@ -36,6 +36,7 @@ unsigned long arch_get_unmapped_area(str
 	struct mm_struct *mm = current->mm;
 	struct vm_area_struct *vma;
 	int do_colour_align;
+	unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
 	struct vm_unmapped_area_info info;
 
 	if (flags & MAP_FIXED) {
@@ -55,6 +56,10 @@ unsigned long arch_get_unmapped_area(str
 	if (filp || (flags & MAP_SHARED))
 		do_colour_align = 1;
 
+#ifdef CONFIG_PAX_RANDMMAP
+	if (!(mm->pax_flags & MF_PAX_RANDMMAP))
+#endif
+
 	if (addr) {
 		if (do_colour_align)
 			addr = COLOUR_ALIGN(addr, pgoff);
@@ -62,14 +67,13 @@ unsigned long arch_get_unmapped_area(str
 			addr = PAGE_ALIGN(addr);
 
 		vma = find_vma(mm, addr);
-		if (TASK_SIZE - len >= addr &&
-		    (!vma || addr + len <= vma->vm_start))
+		if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
 			return addr;
 	}
 
 	info.flags = 0;
 	info.length = len;
-	info.low_limit = TASK_UNMAPPED_BASE;
+	info.low_limit = mm->mmap_base;
 	info.high_limit = TASK_SIZE;
 	info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
 	info.align_offset = pgoff << PAGE_SHIFT;
@@ -85,6 +89,7 @@ arch_get_unmapped_area_topdown(struct fi
 	struct mm_struct *mm = current->mm;
 	unsigned long addr = addr0;
 	int do_colour_align;
+	unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
 	struct vm_unmapped_area_info info;
 
 	if (flags & MAP_FIXED) {
@@ -104,6 +109,10 @@ arch_get_unmapped_area_topdown(struct fi
 	if (filp || (flags & MAP_SHARED))
 		do_colour_align = 1;
 
+#ifdef CONFIG_PAX_RANDMMAP
+	if (!(mm->pax_flags & MF_PAX_RANDMMAP))
+#endif
+
 	/* requesting a specific address */
 	if (addr) {
 		if (do_colour_align)
@@ -112,8 +121,7 @@ arch_get_unmapped_area_topdown(struct fi
 			addr = PAGE_ALIGN(addr);
 
 		vma = find_vma(mm, addr);
-		if (TASK_SIZE - len >= addr &&
-		    (!vma || addr + len <= vma->vm_start))
+		if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
 			return addr;
 	}
 
@@ -135,6 +143,12 @@ arch_get_unmapped_area_topdown(struct fi
 		VM_BUG_ON(addr != -ENOMEM);
 		info.flags = 0;
 		info.low_limit = TASK_UNMAPPED_BASE;
+
+#ifdef CONFIG_PAX_RANDMMAP
+		if (mm->pax_flags & MF_PAX_RANDMMAP)
+			info.low_limit += mm->delta_mmap;
+#endif
+
 		info.high_limit = TASK_SIZE;
 		addr = vm_unmapped_area(&info);
 	}
diff -ruNp linux-3.13.11/arch/sparc/Kconfig linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/Kconfig
--- linux-3.13.11/arch/sparc/Kconfig	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/Kconfig	2014-07-09 12:00:15.000000000
+0200
@@ -553,6 +553,8 @@ source "fs/Kconfig"
 
 source "arch/sparc/Kconfig.debug"
 
+source "kernel/vserver/Kconfig"
+
 source "security/Kconfig"
 
 source "crypto/Kconfig"
diff -ruNp linux-3.13.11/arch/sparc/include/asm/atomic_64.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/include/asm/atomic_64.h
--- linux-3.13.11/arch/sparc/include/asm/atomic_64.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/include/asm/atomic_64.h	2014-07-09
12:00:15.000000000 +0200
@@ -14,18 +14,40 @@
 #define ATOMIC64_INIT(i)	{ (i) }
 
 #define atomic_read(v)		(*(volatile int *)&(v)->counter)
+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
+{
+	return v->counter;
+}
 #define atomic64_read(v)	(*(volatile long *)&(v)->counter)
+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
+{
+	return v->counter;
+}
 
 #define atomic_set(v, i)	(((v)->counter) = i)
+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
+{
+	v->counter = i;
+}
 #define atomic64_set(v, i)	(((v)->counter) = i)
+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
+{
+	v->counter = i;
+}
 
 extern void atomic_add(int, atomic_t *);
+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
 extern void atomic64_add(long, atomic64_t *);
+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
 extern void atomic_sub(int, atomic_t *);
+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
 extern void atomic64_sub(long, atomic64_t *);
+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
 
 extern int atomic_add_ret(int, atomic_t *);
+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
 extern long atomic64_add_ret(long, atomic64_t *);
+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
 extern int atomic_sub_ret(int, atomic_t *);
 extern long atomic64_sub_ret(long, atomic64_t *);
 
@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomi
 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
 
 #define atomic_inc_return(v) atomic_add_ret(1, v)
+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
+{
+	return atomic_add_ret_unchecked(1, v);
+}
 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
+{
+	return atomic64_add_ret_unchecked(1, v);
+}
 
 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
 
 #define atomic_add_return(i, v) atomic_add_ret(i, v)
+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
+{
+	return atomic_add_ret_unchecked(i, v);
+}
 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
+{
+	return atomic64_add_ret_unchecked(i, v);
+}
 
 /*
  * atomic_inc_and_test - increment and test
@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomi
  * other cases.
  */
 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
+{
+	return atomic_inc_return_unchecked(v) == 0;
+}
 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
 
 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
@@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomi
 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
 
 #define atomic_inc(v) atomic_add(1, v)
+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
+{
+	atomic_add_unchecked(1, v);
+}
 #define atomic64_inc(v) atomic64_add(1, v)
+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
+{
+	atomic64_add_unchecked(1, v);
+}
 
 #define atomic_dec(v) atomic_sub(1, v)
+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
+{
+	atomic_sub_unchecked(1, v);
+}
 #define atomic64_dec(v) atomic64_sub(1, v)
+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
+{
+	atomic64_sub_unchecked(1, v);
+}
 
 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
 
 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
+{
+	return cmpxchg(&v->counter, old, new);
+}
 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
+{
+	return xchg(&v->counter, new);
+}
 
 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
 {
-	int c, old;
+	int c, old, new;
 	c = atomic_read(v);
 	for (;;) {
-		if (unlikely(c == (u)))
+		if (unlikely(c == u))
 			break;
-		old = atomic_cmpxchg((v), c, c + (a));
+
+		asm volatile("addcc %2, %0, %0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+			     "tvs %%icc, 6\n"
+#endif
+
+			     : "=r" (new)
+			     : "0" (c), "ir" (a)
+			     : "cc");
+
+		old = atomic_cmpxchg(v, c, new);
 		if (likely(old == c))
 			break;
 		c = old;
@@ -88,20 +165,35 @@ static inline int __atomic_add_unless(at
 #define atomic64_cmpxchg(v, o, n) \
 	((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
+{
+	return xchg(&v->counter, new);
+}
 
 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
 {
-	long c, old;
+	long c, old, new;
 	c = atomic64_read(v);
 	for (;;) {
-		if (unlikely(c == (u)))
+		if (unlikely(c == u))
 			break;
-		old = atomic64_cmpxchg((v), c, c + (a));
+
+		asm volatile("addcc %2, %0, %0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+			     "tvs %%xcc, 6\n"
+#endif
+
+			     : "=r" (new)
+			     : "0" (c), "ir" (a)
+			     : "cc");
+
+		old = atomic64_cmpxchg(v, c, new);
 		if (likely(old == c))
 			break;
 		c = old;
 	}
-	return c != (u);
+	return c != u;
 }
 
 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
diff -ruNp linux-3.13.11/arch/sparc/include/asm/cache.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/include/asm/cache.h
--- linux-3.13.11/arch/sparc/include/asm/cache.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/include/asm/cache.h	2014-07-09
12:00:15.000000000 +0200
@@ -7,10 +7,12 @@
 #ifndef _SPARC_CACHE_H
 #define _SPARC_CACHE_H
 
+#include <linux/const.h>
+
 #define ARCH_SLAB_MINALIGN	__alignof__(unsigned long long)
 
 #define L1_CACHE_SHIFT 5
-#define L1_CACHE_BYTES 32
+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
 
 #ifdef CONFIG_SPARC32
 #define SMP_CACHE_BYTES_SHIFT 5
diff -ruNp linux-3.13.11/arch/sparc/include/asm/elf_32.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/include/asm/elf_32.h
--- linux-3.13.11/arch/sparc/include/asm/elf_32.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/include/asm/elf_32.h	2014-07-09
12:00:15.000000000 +0200
@@ -114,6 +114,13 @@ typedef struct {
 
 #define ELF_ET_DYN_BASE         (TASK_UNMAPPED_BASE)
 
+#ifdef CONFIG_PAX_ASLR
+#define PAX_ELF_ET_DYN_BASE	0x10000UL
+
+#define PAX_DELTA_MMAP_LEN	16
+#define PAX_DELTA_STACK_LEN	16
+#endif
+
 /* This yields a mask that user programs can use to figure out what
    instruction set this cpu supports.  This can NOT be done in userspace
    on Sparc.  */
diff -ruNp linux-3.13.11/arch/sparc/include/asm/elf_64.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/include/asm/elf_64.h
--- linux-3.13.11/arch/sparc/include/asm/elf_64.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/include/asm/elf_64.h	2014-07-09
12:00:15.000000000 +0200
@@ -189,6 +189,13 @@ typedef struct {
 #define ELF_ET_DYN_BASE		0x0000010000000000UL
 #define COMPAT_ELF_ET_DYN_BASE	0x0000000070000000UL
 
+#ifdef CONFIG_PAX_ASLR
+#define PAX_ELF_ET_DYN_BASE	(test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
+
+#define PAX_DELTA_MMAP_LEN	(test_thread_flag(TIF_32BIT) ? 14 : 28)
+#define PAX_DELTA_STACK_LEN	(test_thread_flag(TIF_32BIT) ? 15 : 29)
+#endif
+
 extern unsigned long sparc64_elf_hwcap;
 #define ELF_HWCAP	sparc64_elf_hwcap
 
diff -ruNp linux-3.13.11/arch/sparc/include/asm/pgalloc_32.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/include/asm/pgalloc_32.h
--- linux-3.13.11/arch/sparc/include/asm/pgalloc_32.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/include/asm/pgalloc_32.h	2014-07-09
12:00:15.000000000 +0200
@@ -33,6 +33,7 @@ static inline void pgd_set(pgd_t * pgdp,
 }
 
 #define pgd_populate(MM, PGD, PMD)      pgd_set(PGD, PMD)
+#define pgd_populate_kernel(MM, PGD, PMD)      pgd_populate((MM), (PGD), (PMD))
 
 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
 				   unsigned long address)
diff -ruNp linux-3.13.11/arch/sparc/include/asm/pgalloc_64.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/include/asm/pgalloc_64.h
--- linux-3.13.11/arch/sparc/include/asm/pgalloc_64.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/include/asm/pgalloc_64.h	2014-07-09
12:00:15.000000000 +0200
@@ -26,6 +26,7 @@ static inline void pgd_free(struct mm_st
 }
 
 #define pud_populate(MM, PUD, PMD)	pud_set(PUD, PMD)
+#define pud_populate_kernel(MM, PUD, PMD)	pud_populate((MM), (PUD), (PMD))
 
 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
diff -ruNp linux-3.13.11/arch/sparc/include/asm/pgtable.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/include/asm/pgtable.h
--- linux-3.13.11/arch/sparc/include/asm/pgtable.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/include/asm/pgtable.h	2014-07-09
12:00:15.000000000 +0200
@@ -5,4 +5,8 @@
 #else
 #include <asm/pgtable_32.h>
 #endif
+
+#define ktla_ktva(addr)		(addr)
+#define ktva_ktla(addr)		(addr)
+
 #endif
diff -ruNp linux-3.13.11/arch/sparc/include/asm/pgtable_32.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/include/asm/pgtable_32.h
--- linux-3.13.11/arch/sparc/include/asm/pgtable_32.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/include/asm/pgtable_32.h	2014-07-09
12:00:15.000000000 +0200
@@ -50,6 +50,9 @@ extern unsigned long calc_highpages(void
 #define PAGE_SHARED	SRMMU_PAGE_SHARED
 #define PAGE_COPY	SRMMU_PAGE_COPY
 #define PAGE_READONLY	SRMMU_PAGE_RDONLY
+#define PAGE_SHARED_NOEXEC	SRMMU_PAGE_SHARED_NOEXEC
+#define PAGE_COPY_NOEXEC	SRMMU_PAGE_COPY_NOEXEC
+#define PAGE_READONLY_NOEXEC	SRMMU_PAGE_RDONLY_NOEXEC
 #define PAGE_KERNEL	SRMMU_PAGE_KERNEL
 
 /* Top-level page directory - dummy used by init-mm.
@@ -62,18 +65,18 @@ extern unsigned long ptr_in_current_pgd;
 
 /*         xwr */
 #define __P000  PAGE_NONE
-#define __P001  PAGE_READONLY
-#define __P010  PAGE_COPY
-#define __P011  PAGE_COPY
+#define __P001  PAGE_READONLY_NOEXEC
+#define __P010  PAGE_COPY_NOEXEC
+#define __P011  PAGE_COPY_NOEXEC
 #define __P100  PAGE_READONLY
 #define __P101  PAGE_READONLY
 #define __P110  PAGE_COPY
 #define __P111  PAGE_COPY
 
 #define __S000	PAGE_NONE
-#define __S001	PAGE_READONLY
-#define __S010	PAGE_SHARED
-#define __S011	PAGE_SHARED
+#define __S001	PAGE_READONLY_NOEXEC
+#define __S010	PAGE_SHARED_NOEXEC
+#define __S011	PAGE_SHARED_NOEXEC
 #define __S100	PAGE_READONLY
 #define __S101	PAGE_READONLY
 #define __S110	PAGE_SHARED
diff -ruNp linux-3.13.11/arch/sparc/include/asm/pgtsrmmu.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/include/asm/pgtsrmmu.h
--- linux-3.13.11/arch/sparc/include/asm/pgtsrmmu.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/include/asm/pgtsrmmu.h	2014-07-09
12:00:15.000000000 +0200
@@ -115,6 +115,11 @@
 				    SRMMU_EXEC | SRMMU_REF)
 #define SRMMU_PAGE_RDONLY  __pgprot(SRMMU_VALID | SRMMU_CACHE | \
 				    SRMMU_EXEC | SRMMU_REF)
+
+#define SRMMU_PAGE_SHARED_NOEXEC	__pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE |
SRMMU_REF)
+#define SRMMU_PAGE_COPY_NOEXEC		__pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
+#define SRMMU_PAGE_RDONLY_NOEXEC	__pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
+
 #define SRMMU_PAGE_KERNEL  __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
 				    SRMMU_DIRTY | SRMMU_REF)
 
diff -ruNp linux-3.13.11/arch/sparc/include/asm/spinlock_64.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/include/asm/spinlock_64.h
--- linux-3.13.11/arch/sparc/include/asm/spinlock_64.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/include/asm/spinlock_64.h	2014-07-09
12:00:15.000000000 +0200
@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(
 
 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
 
-static void inline arch_read_lock(arch_rwlock_t *lock)
+static inline void arch_read_lock(arch_rwlock_t *lock)
 {
 	unsigned long tmp1, tmp2;
 
 	__asm__ __volatile__ (
 "1:	ldsw		[%2], %0\n"
 "	brlz,pn		%0, 2f\n"
-"4:	 add		%0, 1, %1\n"
+"4:	 addcc		%0, 1, %1\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+"	tvs		%%icc, 6\n"
+#endif
+
 "	cas		[%2], %0, %1\n"
 "	cmp		%0, %1\n"
 "	bne,pn		%%icc, 1b\n"
@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_r
 "	.previous"
 	: "=&r" (tmp1), "=&r" (tmp2)
 	: "r" (lock)
-	: "memory");
+	: "memory", "cc");
 }
 
-static int inline arch_read_trylock(arch_rwlock_t *lock)
+static inline int arch_read_trylock(arch_rwlock_t *lock)
 {
 	int tmp1, tmp2;
 
@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch
 "1:	ldsw		[%2], %0\n"
 "	brlz,a,pn	%0, 2f\n"
 "	 mov		0, %0\n"
-"	add		%0, 1, %1\n"
+"	addcc		%0, 1, %1\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+"	tvs		%%icc, 6\n"
+#endif
+
 "	cas		[%2], %0, %1\n"
 "	cmp		%0, %1\n"
 "	bne,pn		%%icc, 1b\n"
@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch
 	return tmp1;
 }
 
-static void inline arch_read_unlock(arch_rwlock_t *lock)
+static inline void arch_read_unlock(arch_rwlock_t *lock)
 {
 	unsigned long tmp1, tmp2;
 
 	__asm__ __volatile__(
 "1:	lduw	[%2], %0\n"
-"	sub	%0, 1, %1\n"
+"	subcc	%0, 1, %1\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+"	tvs	%%icc, 6\n"
+#endif
+
 "	cas	[%2], %0, %1\n"
 "	cmp	%0, %1\n"
 "	bne,pn	%%xcc, 1b\n"
@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch
 	: "memory");
 }
 
-static void inline arch_write_lock(arch_rwlock_t *lock)
+static inline void arch_write_lock(arch_rwlock_t *lock)
 {
 	unsigned long mask, tmp1, tmp2;
 
@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_
 	: "memory");
 }
 
-static void inline arch_write_unlock(arch_rwlock_t *lock)
+static inline void arch_write_unlock(arch_rwlock_t *lock)
 {
 	__asm__ __volatile__(
 "	stw		%%g0, [%0]"
@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arc
 	: "memory");
 }
 
-static int inline arch_write_trylock(arch_rwlock_t *lock)
+static inline int arch_write_trylock(arch_rwlock_t *lock)
 {
 	unsigned long mask, tmp1, tmp2, result;
 
diff -ruNp linux-3.13.11/arch/sparc/include/asm/thread_info_32.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/include/asm/thread_info_32.h
--- linux-3.13.11/arch/sparc/include/asm/thread_info_32.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/include/asm/thread_info_32.h	2014-07-09
12:00:15.000000000 +0200
@@ -49,6 +49,8 @@ struct thread_info {
 	unsigned long		w_saved;
 
 	struct restart_block	restart_block;
+
+	unsigned long		lowest_stack;
 };
 
 /*
diff -ruNp linux-3.13.11/arch/sparc/include/asm/thread_info_64.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/include/asm/thread_info_64.h
--- linux-3.13.11/arch/sparc/include/asm/thread_info_64.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/include/asm/thread_info_64.h	2014-07-09
12:00:15.000000000 +0200
@@ -63,6 +63,8 @@ struct thread_info {
 	struct pt_regs		*kern_una_regs;
 	unsigned int		kern_una_insn;
 
+	unsigned long		lowest_stack;
+
 	unsigned long		fpregs[0] __attribute__ ((aligned(64)));
 };
 
@@ -188,12 +190,13 @@ register struct thread_info *current_thr
 #define TIF_NEED_RESCHED	3	/* rescheduling necessary */
 /* flag bit 4 is available */
 #define TIF_UNALIGNED		5	/* allowed to do unaligned accesses */
-/* flag bit 6 is available */
+#define TIF_GRSEC_SETXID	6	/* update credentials on syscall entry/exit */
 #define TIF_32BIT		7	/* 32-bit binary */
 #define TIF_NOHZ		8	/* in adaptive nohz mode */
 #define TIF_SECCOMP		9	/* secure computing */
 #define TIF_SYSCALL_AUDIT	10	/* syscall auditing active */
 #define TIF_SYSCALL_TRACEPOINT	11	/* syscall tracepoint instrumentation */
+
 /* NOTE: Thread flags >= 12 should be ones we have no interest
  *       in using in assembly, else we can't use the mask as
  *       an immediate value in instructions such as andcc.
@@ -213,12 +216,18 @@ register struct thread_info *current_thr
 #define _TIF_SYSCALL_AUDIT	(1<<TIF_SYSCALL_AUDIT)
 #define _TIF_SYSCALL_TRACEPOINT	(1<<TIF_SYSCALL_TRACEPOINT)
 #define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG)
+#define _TIF_GRSEC_SETXID	(1<<TIF_GRSEC_SETXID)
 
 #define _TIF_USER_WORK_MASK	((0xff << TI_FLAG_WSAVED_SHIFT) | \
 				 _TIF_DO_NOTIFY_RESUME_MASK | \
 				 _TIF_NEED_RESCHED)
 #define _TIF_DO_NOTIFY_RESUME_MASK	(_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
 
+#define _TIF_WORK_SYSCALL		\
+	(_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
+	 _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
+
+
 /*
  * Thread-synchronous status.
  *
diff -ruNp linux-3.13.11/arch/sparc/include/asm/uaccess.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/include/asm/uaccess.h
--- linux-3.13.11/arch/sparc/include/asm/uaccess.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/include/asm/uaccess.h	2014-07-09
12:00:15.000000000 +0200
@@ -1,5 +1,6 @@
 #ifndef ___ASM_SPARC_UACCESS_H
 #define ___ASM_SPARC_UACCESS_H
+
 #if defined(__sparc__) && defined(__arch64__)
 #include <asm/uaccess_64.h>
 #else
diff -ruNp linux-3.13.11/arch/sparc/include/asm/uaccess_32.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/include/asm/uaccess_32.h
--- linux-3.13.11/arch/sparc/include/asm/uaccess_32.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/include/asm/uaccess_32.h	2014-07-09
12:00:15.000000000 +0200
@@ -250,27 +250,46 @@ extern unsigned long __copy_user(void __
 
 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned
long n)
 {
-	if (n && __access_ok((unsigned long) to, n))
+	if ((long)n < 0)
+		return n;
+
+	if (n && __access_ok((unsigned long) to, n)) {
+		if (!__builtin_constant_p(n))
+			check_object_size(from, n, true);
 		return __copy_user(to, (__force void __user *) from, n);
-	else
+	} else
 		return n;
 }
 
 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned
long n)
 {
+	if ((long)n < 0)
+		return n;
+
+	if (!__builtin_constant_p(n))
+		check_object_size(from, n, true);
+
 	return __copy_user(to, (__force void __user *) from, n);
 }
 
 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned
long n)
 {
-	if (n && __access_ok((unsigned long) from, n))
+	if ((long)n < 0)
+		return n;
+
+	if (n && __access_ok((unsigned long) from, n)) {
+		if (!__builtin_constant_p(n))
+			check_object_size(to, n, false);
 		return __copy_user((__force void __user *) to, from, n);
-	else
+	} else
 		return n;
 }
 
 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned
long n)
 {
+	if ((long)n < 0)
+		return n;
+
 	return __copy_user((__force void __user *) to, from, n);
 }
 
diff -ruNp linux-3.13.11/arch/sparc/include/asm/uaccess_64.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/include/asm/uaccess_64.h
--- linux-3.13.11/arch/sparc/include/asm/uaccess_64.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/include/asm/uaccess_64.h	2014-07-09
12:00:15.000000000 +0200
@@ -10,6 +10,7 @@
 #include <linux/compiler.h>
 #include <linux/string.h>
 #include <linux/thread_info.h>
+#include <linux/kernel.h>
 #include <asm/asi.h>
 #include <asm/spitfire.h>
 #include <asm-generic/uaccess-unaligned.h>
@@ -214,8 +215,15 @@ extern unsigned long copy_from_user_fixu
 static inline unsigned long __must_check
 copy_from_user(void *to, const void __user *from, unsigned long size)
 {
-	unsigned long ret = ___copy_from_user(to, from, size);
+	unsigned long ret;
 
+	if ((long)size < 0 || size > INT_MAX)
+		return size;
+
+	if (!__builtin_constant_p(size))
+		check_object_size(to, size, false);
+
+	ret = ___copy_from_user(to, from, size);
 	if (unlikely(ret))
 		ret = copy_from_user_fixup(to, from, size);
 
@@ -231,8 +239,15 @@ extern unsigned long copy_to_user_fixup(
 static inline unsigned long __must_check
 copy_to_user(void __user *to, const void *from, unsigned long size)
 {
-	unsigned long ret = ___copy_to_user(to, from, size);
+	unsigned long ret;
+
+	if ((long)size < 0 || size > INT_MAX)
+		return size;
+
+	if (!__builtin_constant_p(size))
+		check_object_size(from, size, true);
 
+	ret = ___copy_to_user(to, from, size);
 	if (unlikely(ret))
 		ret = copy_to_user_fixup(to, from, size);
 	return ret;
diff -ruNp linux-3.13.11/arch/sparc/include/uapi/asm/unistd.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/include/uapi/asm/unistd.h
--- linux-3.13.11/arch/sparc/include/uapi/asm/unistd.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/include/uapi/asm/unistd.h	2014-07-09
12:00:15.000000000 +0200
@@ -332,7 +332,7 @@
 #define __NR_timer_getoverrun	264
 #define __NR_timer_delete	265
 #define __NR_timer_create	266
-/* #define __NR_vserver		267 Reserved for VSERVER */
+#define __NR_vserver		267
 #define __NR_io_setup		268
 #define __NR_io_destroy		269
 #define __NR_io_submit		270
diff -ruNp linux-3.13.11/arch/sparc/kernel/Makefile linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/kernel/Makefile
--- linux-3.13.11/arch/sparc/kernel/Makefile	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/kernel/Makefile	2014-07-09
12:00:15.000000000 +0200
@@ -4,7 +4,7 @@
 #
 
 asflags-y := -ansi
-ccflags-y := -Werror
+#ccflags-y := -Werror
 
 extra-y     := head_$(BITS).o
 
diff -ruNp linux-3.13.11/arch/sparc/kernel/process_32.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/kernel/process_32.c
--- linux-3.13.11/arch/sparc/kernel/process_32.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/kernel/process_32.c	2014-07-09
12:00:15.000000000 +0200
@@ -116,14 +116,14 @@ void show_regs(struct pt_regs *r)
 
         printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx    %s\n",
 	       r->psr, r->pc, r->npc, r->y, print_tainted());
-	printk("PC: <%pS>\n", (void *) r->pc);
+	printk("PC: <%pA>\n", (void *) r->pc);
 	printk("%%G: %08lx %08lx  %08lx %08lx  %08lx %08lx  %08lx %08lx\n",
 	       r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
 	       r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
 	printk("%%O: %08lx %08lx  %08lx %08lx  %08lx %08lx  %08lx %08lx\n",
 	       r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
 	       r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
-	printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
+	printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
 
 	printk("%%L: %08lx %08lx  %08lx %08lx  %08lx %08lx  %08lx %08lx\n",
 	       rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
@@ -160,7 +160,7 @@ void show_stack(struct task_struct *tsk,
 		rw = (struct reg_window32 *) fp;
 		pc = rw->ins[7];
 		printk("[%08lx : ", pc);
-		printk("%pS ] ", (void *) pc);
+		printk("%pA ] ", (void *) pc);
 		fp = rw->ins[6];
 	} while (++count < 16);
 	printk("\n");
diff -ruNp linux-3.13.11/arch/sparc/kernel/process_64.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/kernel/process_64.c
--- linux-3.13.11/arch/sparc/kernel/process_64.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/kernel/process_64.c	2014-07-09
12:00:15.000000000 +0200
@@ -161,7 +161,7 @@ static void show_regwindow(struct pt_reg
 	printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
 	       rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
 	if (regs->tstate & TSTATE_PRIV)
-		printk("I7: <%pS>\n", (void *) rwk->ins[7]);
+		printk("I7: <%pA>\n", (void *) rwk->ins[7]);
 }
 
 void show_regs(struct pt_regs *regs)
@@ -170,7 +170,7 @@ void show_regs(struct pt_regs *regs)
 
 	printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x    %s\n", regs->tstate,
 	       regs->tpc, regs->tnpc, regs->y, print_tainted());
-	printk("TPC: <%pS>\n", (void *) regs->tpc);
+	printk("TPC: <%pA>\n", (void *) regs->tpc);
 	printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
 	       regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
 	       regs->u_regs[3]);
@@ -183,7 +183,7 @@ void show_regs(struct pt_regs *regs)
 	printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
 	       regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
 	       regs->u_regs[15]);
-	printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
+	printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
 	show_regwindow(regs);
 	show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
 }
@@ -272,7 +272,7 @@ void arch_trigger_all_cpu_backtrace(void
 		       ((tp && tp->task) ? tp->task->pid : -1));
 
 		if (gp->tstate & TSTATE_PRIV) {
-			printk("             TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
+			printk("             TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
 			       (void *) gp->tpc,
 			       (void *) gp->o7,
 			       (void *) gp->i7,
diff -ruNp linux-3.13.11/arch/sparc/kernel/prom_common.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/kernel/prom_common.c
--- linux-3.13.11/arch/sparc/kernel/prom_common.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/kernel/prom_common.c	2014-07-09
12:00:15.000000000 +0200
@@ -144,7 +144,7 @@ static int __init prom_common_nextprop(p
 
 unsigned int prom_early_allocated __initdata;
 
-static struct of_pdt_ops prom_sparc_ops __initdata = {
+static struct of_pdt_ops prom_sparc_ops __initconst = {
 	.nextprop = prom_common_nextprop,
 	.getproplen = prom_getproplen,
 	.getproperty = prom_getproperty,
diff -ruNp linux-3.13.11/arch/sparc/kernel/ptrace_64.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/kernel/ptrace_64.c
--- linux-3.13.11/arch/sparc/kernel/ptrace_64.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/kernel/ptrace_64.c	2014-07-09
12:00:15.000000000 +0200
@@ -1060,6 +1060,10 @@ long arch_ptrace(struct task_struct *chi
 	return ret;
 }
 
+#ifdef CONFIG_GRKERNSEC_SETXID
+extern void gr_delayed_cred_worker(void);
+#endif
+
 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
 {
 	int ret = 0;
@@ -1070,6 +1074,11 @@ asmlinkage int syscall_trace_enter(struc
 	if (test_thread_flag(TIF_NOHZ))
 		user_exit();
 
+#ifdef CONFIG_GRKERNSEC_SETXID
+	if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
+		gr_delayed_cred_worker();
+#endif
+
 	if (test_thread_flag(TIF_SYSCALL_TRACE))
 		ret = tracehook_report_syscall_entry(regs);
 
@@ -1093,6 +1102,11 @@ asmlinkage void syscall_trace_leave(stru
 	if (test_thread_flag(TIF_NOHZ))
 		user_exit();
 
+#ifdef CONFIG_GRKERNSEC_SETXID
+	if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
+		gr_delayed_cred_worker();
+#endif
+
 	audit_syscall_exit(regs);
 
 	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
diff -ruNp linux-3.13.11/arch/sparc/kernel/smp_64.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/kernel/smp_64.c
--- linux-3.13.11/arch/sparc/kernel/smp_64.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/kernel/smp_64.c	2014-07-09
12:00:15.000000000 +0200
@@ -870,8 +870,8 @@ extern unsigned long xcall_flush_dcache_
 extern unsigned long xcall_flush_dcache_page_spitfire;
 
 #ifdef CONFIG_DEBUG_DCFLUSH
-extern atomic_t dcpage_flushes;
-extern atomic_t dcpage_flushes_xcall;
+extern atomic_unchecked_t dcpage_flushes;
+extern atomic_unchecked_t dcpage_flushes_xcall;
 #endif
 
 static inline void __local_flush_dcache_page(struct page *page)
@@ -895,7 +895,7 @@ void smp_flush_dcache_page_impl(struct p
 		return;
 
 #ifdef CONFIG_DEBUG_DCFLUSH
-	atomic_inc(&dcpage_flushes);
+	atomic_inc_unchecked(&dcpage_flushes);
 #endif
 
 	this_cpu = get_cpu();
@@ -919,7 +919,7 @@ void smp_flush_dcache_page_impl(struct p
 			xcall_deliver(data0, __pa(pg_addr),
 				      (u64) pg_addr, cpumask_of(cpu));
 #ifdef CONFIG_DEBUG_DCFLUSH
-			atomic_inc(&dcpage_flushes_xcall);
+			atomic_inc_unchecked(&dcpage_flushes_xcall);
 #endif
 		}
 	}
@@ -938,7 +938,7 @@ void flush_dcache_page_all(struct mm_str
 	preempt_disable();
 
 #ifdef CONFIG_DEBUG_DCFLUSH
-	atomic_inc(&dcpage_flushes);
+	atomic_inc_unchecked(&dcpage_flushes);
 #endif
 	data0 = 0;
 	pg_addr = page_address(page);
@@ -955,7 +955,7 @@ void flush_dcache_page_all(struct mm_str
 		xcall_deliver(data0, __pa(pg_addr),
 			      (u64) pg_addr, cpu_online_mask);
 #ifdef CONFIG_DEBUG_DCFLUSH
-		atomic_inc(&dcpage_flushes_xcall);
+		atomic_inc_unchecked(&dcpage_flushes_xcall);
 #endif
 	}
 	__local_flush_dcache_page(page);
diff -ruNp linux-3.13.11/arch/sparc/kernel/sys_sparc_32.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/kernel/sys_sparc_32.c
--- linux-3.13.11/arch/sparc/kernel/sys_sparc_32.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/kernel/sys_sparc_32.c	2014-07-09
12:00:15.000000000 +0200
@@ -52,7 +52,7 @@ unsigned long arch_get_unmapped_area(str
 	if (len > TASK_SIZE - PAGE_SIZE)
 		return -ENOMEM;
 	if (!addr)
-		addr = TASK_UNMAPPED_BASE;
+		addr = current->mm->mmap_base;
 
 	info.flags = 0;
 	info.length = len;
diff -ruNp linux-3.13.11/arch/sparc/kernel/sys_sparc_64.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/kernel/sys_sparc_64.c
--- linux-3.13.11/arch/sparc/kernel/sys_sparc_64.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/kernel/sys_sparc_64.c	2014-07-09
12:00:15.000000000 +0200
@@ -88,13 +88,14 @@ unsigned long arch_get_unmapped_area(str
 	struct vm_area_struct * vma;
 	unsigned long task_size = TASK_SIZE;
 	int do_color_align;
+	unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
 	struct vm_unmapped_area_info info;
 
 	if (flags & MAP_FIXED) {
 		/* We do not accept a shared mapping if it would violate
 		 * cache aliasing constraints.
 		 */
-		if ((flags & MAP_SHARED) &&
+		if ((filp || (flags & MAP_SHARED)) &&
 		    ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
 			return -EINVAL;
 		return addr;
@@ -109,6 +110,10 @@ unsigned long arch_get_unmapped_area(str
 	if (filp || (flags & MAP_SHARED))
 		do_color_align = 1;
 
+#ifdef CONFIG_PAX_RANDMMAP
+	if (!(mm->pax_flags & MF_PAX_RANDMMAP))
+#endif
+
 	if (addr) {
 		if (do_color_align)
 			addr = COLOR_ALIGN(addr, pgoff);
@@ -116,22 +121,28 @@ unsigned long arch_get_unmapped_area(str
 			addr = PAGE_ALIGN(addr);
 
 		vma = find_vma(mm, addr);
-		if (task_size - len >= addr &&
-		    (!vma || addr + len <= vma->vm_start))
+		if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
 			return addr;
 	}
 
 	info.flags = 0;
 	info.length = len;
-	info.low_limit = TASK_UNMAPPED_BASE;
+	info.low_limit = mm->mmap_base;
 	info.high_limit = min(task_size, VA_EXCLUDE_START);
 	info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
 	info.align_offset = pgoff << PAGE_SHIFT;
+	info.threadstack_offset = offset;
 	addr = vm_unmapped_area(&info);
 
 	if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
 		VM_BUG_ON(addr != -ENOMEM);
 		info.low_limit = VA_EXCLUDE_END;
+
+#ifdef CONFIG_PAX_RANDMMAP
+		if (mm->pax_flags & MF_PAX_RANDMMAP)
+			info.low_limit += mm->delta_mmap;
+#endif
+
 		info.high_limit = task_size;
 		addr = vm_unmapped_area(&info);
 	}
@@ -149,6 +160,7 @@ arch_get_unmapped_area_topdown(struct fi
 	unsigned long task_size = STACK_TOP32;
 	unsigned long addr = addr0;
 	int do_color_align;
+	unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
 	struct vm_unmapped_area_info info;
 
 	/* This should only ever run for 32-bit processes.  */
@@ -158,7 +170,7 @@ arch_get_unmapped_area_topdown(struct fi
 		/* We do not accept a shared mapping if it would violate
 		 * cache aliasing constraints.
 		 */
-		if ((flags & MAP_SHARED) &&
+		if ((filp || (flags & MAP_SHARED)) &&
 		    ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
 			return -EINVAL;
 		return addr;
@@ -171,6 +183,10 @@ arch_get_unmapped_area_topdown(struct fi
 	if (filp || (flags & MAP_SHARED))
 		do_color_align = 1;
 
+#ifdef CONFIG_PAX_RANDMMAP
+	if (!(mm->pax_flags & MF_PAX_RANDMMAP))
+#endif
+
 	/* requesting a specific address */
 	if (addr) {
 		if (do_color_align)
@@ -179,8 +195,7 @@ arch_get_unmapped_area_topdown(struct fi
 			addr = PAGE_ALIGN(addr);
 
 		vma = find_vma(mm, addr);
-		if (task_size - len >= addr &&
-		    (!vma || addr + len <= vma->vm_start))
+		if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
 			return addr;
 	}
 
@@ -190,6 +205,7 @@ arch_get_unmapped_area_topdown(struct fi
 	info.high_limit = mm->mmap_base;
 	info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
 	info.align_offset = pgoff << PAGE_SHIFT;
+	info.threadstack_offset = offset;
 	addr = vm_unmapped_area(&info);
 
 	/*
@@ -202,6 +218,12 @@ arch_get_unmapped_area_topdown(struct fi
 		VM_BUG_ON(addr != -ENOMEM);
 		info.flags = 0;
 		info.low_limit = TASK_UNMAPPED_BASE;
+
+#ifdef CONFIG_PAX_RANDMMAP
+		if (mm->pax_flags & MF_PAX_RANDMMAP)
+			info.low_limit += mm->delta_mmap;
+#endif
+
 		info.high_limit = STACK_TOP32;
 		addr = vm_unmapped_area(&info);
 	}
@@ -258,10 +280,14 @@ unsigned long get_fb_unmapped_area(struc
 EXPORT_SYMBOL(get_fb_unmapped_area);
 
 /* Essentially the same as PowerPC.  */
-static unsigned long mmap_rnd(void)
+static unsigned long mmap_rnd(struct mm_struct *mm)
 {
 	unsigned long rnd = 0UL;
 
+#ifdef CONFIG_PAX_RANDMMAP
+	if (!(mm->pax_flags & MF_PAX_RANDMMAP))
+#endif
+
 	if (current->flags & PF_RANDOMIZE) {
 		unsigned long val = get_random_int();
 		if (test_thread_flag(TIF_32BIT))
@@ -274,7 +300,7 @@ static unsigned long mmap_rnd(void)
 
 void arch_pick_mmap_layout(struct mm_struct *mm)
 {
-	unsigned long random_factor = mmap_rnd();
+	unsigned long random_factor = mmap_rnd(mm);
 	unsigned long gap;
 
 	/*
@@ -287,6 +313,12 @@ void arch_pick_mmap_layout(struct mm_str
 	    gap == RLIM_INFINITY ||
 	    sysctl_legacy_va_layout) {
 		mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
+
+#ifdef CONFIG_PAX_RANDMMAP
+		if (mm->pax_flags & MF_PAX_RANDMMAP)
+			mm->mmap_base += mm->delta_mmap;
+#endif
+
 		mm->get_unmapped_area = arch_get_unmapped_area;
 	} else {
 		/* We know it's 32-bit */
@@ -298,6 +330,12 @@ void arch_pick_mmap_layout(struct mm_str
 			gap = (task_size / 6 * 5);
 
 		mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
+
+#ifdef CONFIG_PAX_RANDMMAP
+		if (mm->pax_flags & MF_PAX_RANDMMAP)
+			mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
+#endif
+
 		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
 	}
 }
diff -ruNp linux-3.13.11/arch/sparc/kernel/syscalls.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/kernel/syscalls.S
--- linux-3.13.11/arch/sparc/kernel/syscalls.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/kernel/syscalls.S	2014-07-09
12:00:15.000000000 +0200
@@ -52,7 +52,7 @@ sys32_rt_sigreturn:
 #endif
 	.align	32
 1:	ldx	[%g6 + TI_FLAGS], %l5
-	andcc	%l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ),
%g0
+	andcc	%l5, _TIF_WORK_SYSCALL, %g0
 	be,pt	%icc, rtrap
 	 nop
 	call	syscall_trace_leave
@@ -184,7 +184,7 @@ linux_sparc_syscall32:
 
 	srl	%i3, 0, %o3				! IEU0
 	srl	%i2, 0, %o2				! IEU0	Group
-	andcc	%l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ),
%g0
+	andcc	%l0, _TIF_WORK_SYSCALL, %g0
 	bne,pn	%icc, linux_syscall_trace32		! CTI
 	 mov	%i0, %l5				! IEU1
 5:	call	%l7					! CTI	Group brk forced
@@ -208,7 +208,7 @@ linux_sparc_syscall:
 
 	mov	%i3, %o3				! IEU1
 	mov	%i4, %o4				! IEU0	Group
-	andcc	%l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ),
%g0
+	andcc	%l0, _TIF_WORK_SYSCALL, %g0
 	bne,pn	%icc, linux_syscall_trace		! CTI	Group
 	 mov	%i0, %l5				! IEU0
 2:	call	%l7					! CTI	Group brk forced
@@ -223,7 +223,7 @@ ret_sys_call:
 
 	cmp	%o0, -ERESTART_RESTARTBLOCK
 	bgeu,pn	%xcc, 1f
-	 andcc	%l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ),
%g0
+	 andcc	%l0, _TIF_WORK_SYSCALL, %g0
 	ldx	[%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
 
 2:
diff -ruNp linux-3.13.11/arch/sparc/kernel/systbls_32.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/kernel/systbls_32.S
--- linux-3.13.11/arch/sparc/kernel/systbls_32.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/kernel/systbls_32.S	2014-07-09
12:00:15.000000000 +0200
@@ -70,7 +70,7 @@ sys_call_table:
 /*250*/	.long sys_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_ni_syscall
 /*255*/	.long sys_sync_file_range, sys_clock_settime, sys_clock_gettime, sys_clock_getres,
sys_clock_nanosleep
 /*260*/	.long sys_sched_getaffinity, sys_sched_setaffinity, sys_timer_settime, sys_timer_gettime,
sys_timer_getoverrun
-/*265*/	.long sys_timer_delete, sys_timer_create, sys_nis_syscall, sys_io_setup, sys_io_destroy
+/*265*/	.long sys_timer_delete, sys_timer_create, sys_vserver, sys_io_setup, sys_io_destroy
 /*270*/	.long sys_io_submit, sys_io_cancel, sys_io_getevents, sys_mq_open, sys_mq_unlink
 /*275*/	.long sys_mq_timedsend, sys_mq_timedreceive, sys_mq_notify, sys_mq_getsetattr,
sys_waitid
 /*280*/	.long sys_tee, sys_add_key, sys_request_key, sys_keyctl, sys_openat
diff -ruNp linux-3.13.11/arch/sparc/kernel/systbls_64.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/kernel/systbls_64.S
--- linux-3.13.11/arch/sparc/kernel/systbls_64.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/kernel/systbls_64.S	2014-07-09
12:00:15.000000000 +0200
@@ -71,7 +71,7 @@ sys_call_table32:
 /*250*/	.word sys_mremap, compat_sys_sysctl, sys_getsid, sys_fdatasync, sys_nis_syscall
 	.word sys32_sync_file_range, compat_sys_clock_settime, compat_sys_clock_gettime, compat_sys_clock_getres,
sys32_clock_nanosleep
 /*260*/	.word compat_sys_sched_getaffinity, compat_sys_sched_setaffinity, sys32_timer_settime,
compat_sys_timer_gettime, sys_timer_getoverrun
-	.word sys_timer_delete, compat_sys_timer_create, sys_ni_syscall, compat_sys_io_setup,
sys_io_destroy
+	.word sys_timer_delete, compat_sys_timer_create, sys32_vserver, compat_sys_io_setup,
sys_io_destroy
 /*270*/	.word sys32_io_submit, sys_io_cancel, compat_sys_io_getevents, sys32_mq_open,
sys_mq_unlink
 	.word compat_sys_mq_timedsend, compat_sys_mq_timedreceive, compat_sys_mq_notify, compat_sys_mq_getsetattr,
compat_sys_waitid
 /*280*/	.word sys_tee, sys_add_key, sys_request_key, compat_sys_keyctl, compat_sys_openat
@@ -149,7 +149,7 @@ sys_call_table:
 /*250*/	.word sys_64_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_nis_syscall
 	.word sys_sync_file_range, sys_clock_settime, sys_clock_gettime, sys_clock_getres,
sys_clock_nanosleep
 /*260*/	.word sys_sched_getaffinity, sys_sched_setaffinity, sys_timer_settime, sys_timer_gettime,
sys_timer_getoverrun
-	.word sys_timer_delete, sys_timer_create, sys_ni_syscall, sys_io_setup, sys_io_destroy
+	.word sys_timer_delete, sys_timer_create, sys_vserver, sys_io_setup, sys_io_destroy
 /*270*/	.word sys_io_submit, sys_io_cancel, sys_io_getevents, sys_mq_open, sys_mq_unlink
 	.word sys_mq_timedsend, sys_mq_timedreceive, sys_mq_notify, sys_mq_getsetattr, sys_waitid
 /*280*/	.word sys_tee, sys_add_key, sys_request_key, sys_keyctl, sys_openat
diff -ruNp linux-3.13.11/arch/sparc/kernel/traps_32.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/kernel/traps_32.c
--- linux-3.13.11/arch/sparc/kernel/traps_32.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/kernel/traps_32.c	2014-07-09
12:00:15.000000000 +0200
@@ -44,6 +44,8 @@ static void instruction_dump(unsigned lo
 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
 
+extern void gr_handle_kernel_exploit(void);
+
 void die_if_kernel(char *str, struct pt_regs *regs)
 {
 	static int die_counter;
@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_
 		      count++ < 30				&&
                       (((unsigned long) rw) >= PAGE_OFFSET)	&&
 		      !(((unsigned long) rw) & 0x7)) {
-			printk("Caller[%08lx]: %pS\n", rw->ins[7],
+			printk("Caller[%08lx]: %pA\n", rw->ins[7],
 			       (void *) rw->ins[7]);
 			rw = (struct reg_window32 *)rw->ins[6];
 		}
 	}
 	printk("Instruction DUMP:");
 	instruction_dump ((unsigned long *) regs->pc);
-	if(regs->psr & PSR_PS)
+	if(regs->psr & PSR_PS) {
+		gr_handle_kernel_exploit();
 		do_exit(SIGKILL);
+	}
 	do_exit(SIGSEGV);
 }
 
diff -ruNp linux-3.13.11/arch/sparc/kernel/traps_64.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/kernel/traps_64.c
--- linux-3.13.11/arch/sparc/kernel/traps_64.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/kernel/traps_64.c	2014-07-09
12:00:15.000000000 +0200
@@ -77,7 +77,7 @@ static void dump_tl1_traplog(struct tl1_
 		       i + 1,
 		       p->trapstack[i].tstate, p->trapstack[i].tpc,
 		       p->trapstack[i].tnpc, p->trapstack[i].tt);
-		printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
+		printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
 	}
 }
 
@@ -97,6 +97,12 @@ void bad_trap(struct pt_regs *regs, long
 
 	lvl -= 0x100;
 	if (regs->tstate & TSTATE_PRIV) {
+
+#ifdef CONFIG_PAX_REFCOUNT
+		if (lvl == 6)
+			pax_report_refcount_overflow(regs);
+#endif
+
 		sprintf(buffer, "Kernel bad sw trap %lx", lvl);
 		die_if_kernel(buffer, regs);
 	}
@@ -115,11 +121,16 @@ void bad_trap(struct pt_regs *regs, long
 void bad_trap_tl1(struct pt_regs *regs, long lvl)
 {
 	char buffer[32];
-	
+
 	if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
 		       0, lvl, SIGTRAP) == NOTIFY_STOP)
 		return;
 
+#ifdef CONFIG_PAX_REFCOUNT
+	if (lvl == 6)
+		pax_report_refcount_overflow(regs);
+#endif
+
 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
 
 	sprintf (buffer, "Bad trap %lx at tl>0", lvl);
@@ -1149,7 +1160,7 @@ static void cheetah_log_errors(struct pt
 	       regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
 	printk("%s" "ERROR(%d): ",
 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
-	printk("TPC<%pS>\n", (void *) regs->tpc);
+	printk("TPC<%pA>\n", (void *) regs->tpc);
 	printk("%s" "ERROR(%d): M_SYND(%lx),  E_SYND(%lx)%s%s\n",
 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
 	       (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
@@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type,
 		       smp_processor_id(),
 		       (type & 0x1) ? 'I' : 'D',
 		       regs->tpc);
-		printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
+		printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
 		panic("Irrecoverable Cheetah+ parity error.");
 	}
 
@@ -1764,7 +1775,7 @@ void cheetah_plus_parity_error(int type,
 	       smp_processor_id(),
 	       (type & 0x1) ? 'I' : 'D',
 	       regs->tpc);
-	printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
+	printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
 }
 
 struct sun4v_error_entry {
@@ -1837,8 +1848,8 @@ struct sun4v_error_entry {
 /*0x38*/u64		reserved_5;
 };
 
-static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
-static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
+static atomic_unchecked_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
+static atomic_unchecked_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
 
 static const char *sun4v_err_type_to_str(u8 type)
 {
@@ -1930,7 +1941,7 @@ static void sun4v_report_real_raddr(cons
 }
 
 static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
-			    int cpu, const char *pfx, atomic_t *ocnt)
+			    int cpu, const char *pfx, atomic_unchecked_t *ocnt)
 {
 	u64 *raw_ptr = (u64 *) ent;
 	u32 attrs;
@@ -1988,8 +1999,8 @@ static void sun4v_log_error(struct pt_re
 
 	show_regs(regs);
 
-	if ((cnt = atomic_read(ocnt)) != 0) {
-		atomic_set(ocnt, 0);
+	if ((cnt = atomic_read_unchecked(ocnt)) != 0) {
+		atomic_set_unchecked(ocnt, 0);
 		wmb();
 		printk("%s: Queue overflowed %d times.\n",
 		       pfx, cnt);
@@ -2046,7 +2057,7 @@ out:
  */
 void sun4v_resum_overflow(struct pt_regs *regs)
 {
-	atomic_inc(&sun4v_resum_oflow_cnt);
+	atomic_inc_unchecked(&sun4v_resum_oflow_cnt);
 }
 
 /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
@@ -2099,7 +2110,7 @@ void sun4v_nonresum_overflow(struct pt_r
 	/* XXX Actually even this can make not that much sense.  Perhaps
 	 * XXX we should just pull the plug and panic directly from here?
 	 */
-	atomic_inc(&sun4v_nonresum_oflow_cnt);
+	atomic_inc_unchecked(&sun4v_nonresum_oflow_cnt);
 }
 
 unsigned long sun4v_err_itlb_vaddr;
@@ -2114,9 +2125,9 @@ void sun4v_itlb_error_report(struct pt_r
 
 	printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
 	       regs->tpc, tl);
-	printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
+	printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
 	printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
-	printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
+	printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
 	       (void *) regs->u_regs[UREG_I7]);
 	printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
 	       "pte[%lx] error[%lx]\n",
@@ -2138,9 +2149,9 @@ void sun4v_dtlb_error_report(struct pt_r
 
 	printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
 	       regs->tpc, tl);
-	printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
+	printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
 	printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
-	printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
+	printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
 	       (void *) regs->u_regs[UREG_I7]);
 	printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
 	       "pte[%lx] error[%lx]\n",
@@ -2359,13 +2370,13 @@ void show_stack(struct task_struct *tsk,
 			fp = (unsigned long)sf->fp + STACK_BIAS;
 		}
 
-		printk(" [%016lx] %pS\n", pc, (void *) pc);
+		printk(" [%016lx] %pA\n", pc, (void *) pc);
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 		if ((pc + 8UL) == (unsigned long) &return_to_handler) {
 			int index = tsk->curr_ret_stack;
 			if (tsk->ret_stack && index >= graph) {
 				pc = tsk->ret_stack[index - graph].ret;
-				printk(" [%016lx] %pS\n", pc, (void *) pc);
+				printk(" [%016lx] %pA\n", pc, (void *) pc);
 				graph++;
 			}
 		}
@@ -2383,6 +2394,8 @@ static inline struct reg_window *kernel_
 	return (struct reg_window *) (fp + STACK_BIAS);
 }
 
+extern void gr_handle_kernel_exploit(void);
+
 void die_if_kernel(char *str, struct pt_regs *regs)
 {
 	static int die_counter;
@@ -2411,7 +2424,7 @@ void die_if_kernel(char *str, struct pt_
 		while (rw &&
 		       count++ < 30 &&
 		       kstack_valid(tp, (unsigned long) rw)) {
-			printk("Caller[%016lx]: %pS\n", rw->ins[7],
+			printk("Caller[%016lx]: %pA\n", rw->ins[7],
 			       (void *) rw->ins[7]);
 
 			rw = kernel_stack_up(rw);
@@ -2424,8 +2437,10 @@ void die_if_kernel(char *str, struct pt_
 		}
 		user_instruction_dump ((unsigned int __user *) regs->tpc);
 	}
-	if (regs->tstate & TSTATE_PRIV)
+	if (regs->tstate & TSTATE_PRIV) {
+		gr_handle_kernel_exploit();
 		do_exit(SIGKILL);
+	}
 	do_exit(SIGSEGV);
 }
 EXPORT_SYMBOL(die_if_kernel);
diff -ruNp linux-3.13.11/arch/sparc/kernel/unaligned_64.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/kernel/unaligned_64.c
--- linux-3.13.11/arch/sparc/kernel/unaligned_64.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/kernel/unaligned_64.c	2014-07-09
12:00:15.000000000 +0200
@@ -289,7 +289,7 @@ static void log_unaligned(struct pt_regs
 	static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
 
 	if (__ratelimit(&ratelimit)) {
-		printk("Kernel unaligned access at TPC[%lx] %pS\n",
+		printk("Kernel unaligned access at TPC[%lx] %pA\n",
 		       regs->tpc, (void *) regs->tpc);
 	}
 }
diff -ruNp linux-3.13.11/arch/sparc/lib/Makefile linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/lib/Makefile
--- linux-3.13.11/arch/sparc/lib/Makefile	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/lib/Makefile	2014-07-09
12:00:15.000000000 +0200
@@ -2,7 +2,7 @@
 #
 
 asflags-y := -ansi -DST_DIV0=0x02
-ccflags-y := -Werror
+#ccflags-y := -Werror
 
 lib-$(CONFIG_SPARC32) += ashrdi3.o
 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
diff -ruNp linux-3.13.11/arch/sparc/lib/atomic_64.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/lib/atomic_64.S
--- linux-3.13.11/arch/sparc/lib/atomic_64.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/lib/atomic_64.S	2014-07-09
12:00:15.000000000 +0200
@@ -17,7 +17,12 @@
 ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
 	BACKOFF_SETUP(%o2)
 1:	lduw	[%o1], %g1
-	add	%g1, %o0, %g7
+	addcc	%g1, %o0, %g7
+
+#ifdef CONFIG_PAX_REFCOUNT
+	tvs	%icc, 6
+#endif
+
 	cas	[%o1], %g1, %g7
 	cmp	%g1, %g7
 	bne,pn	%icc, BACKOFF_LABEL(2f, 1b)
@@ -27,10 +32,28 @@ ENTRY(atomic_add) /* %o0 = increment, %o
 2:	BACKOFF_SPIN(%o2, %o3, 1b)
 ENDPROC(atomic_add)
 
+ENTRY(atomic_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
+	BACKOFF_SETUP(%o2)
+1:	lduw	[%o1], %g1
+	add	%g1, %o0, %g7
+	cas	[%o1], %g1, %g7
+	cmp	%g1, %g7
+	bne,pn	%icc, 2f
+	 nop
+	retl
+	 nop
+2:	BACKOFF_SPIN(%o2, %o3, 1b)
+ENDPROC(atomic_add_unchecked)
+
 ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
 	BACKOFF_SETUP(%o2)
 1:	lduw	[%o1], %g1
-	sub	%g1, %o0, %g7
+	subcc	%g1, %o0, %g7
+
+#ifdef CONFIG_PAX_REFCOUNT
+	tvs	%icc, 6
+#endif
+
 	cas	[%o1], %g1, %g7
 	cmp	%g1, %g7
 	bne,pn	%icc, BACKOFF_LABEL(2f, 1b)
@@ -40,10 +63,28 @@ ENTRY(atomic_sub) /* %o0 = decrement, %o
 2:	BACKOFF_SPIN(%o2, %o3, 1b)
 ENDPROC(atomic_sub)
 
+ENTRY(atomic_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
+	BACKOFF_SETUP(%o2)
+1:	lduw	[%o1], %g1
+	sub	%g1, %o0, %g7
+	cas	[%o1], %g1, %g7
+	cmp	%g1, %g7
+	bne,pn	%icc, 2f
+	 nop
+	retl
+	 nop
+2:	BACKOFF_SPIN(%o2, %o3, 1b)
+ENDPROC(atomic_sub_unchecked)
+
 ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
 	BACKOFF_SETUP(%o2)
 1:	lduw	[%o1], %g1
-	add	%g1, %o0, %g7
+	addcc	%g1, %o0, %g7
+
+#ifdef CONFIG_PAX_REFCOUNT
+	tvs	%icc, 6
+#endif
+
 	cas	[%o1], %g1, %g7
 	cmp	%g1, %g7
 	bne,pn	%icc, BACKOFF_LABEL(2f, 1b)
@@ -53,10 +94,29 @@ ENTRY(atomic_add_ret) /* %o0 = increment
 2:	BACKOFF_SPIN(%o2, %o3, 1b)
 ENDPROC(atomic_add_ret)
 
+ENTRY(atomic_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
+	BACKOFF_SETUP(%o2)
+1:	lduw	[%o1], %g1
+	addcc	%g1, %o0, %g7
+	cas	[%o1], %g1, %g7
+	cmp	%g1, %g7
+	bne,pn	%icc, 2f
+	 add	%g7, %o0, %g7
+	sra	%g7, 0, %o0
+	retl
+	 nop
+2:	BACKOFF_SPIN(%o2, %o3, 1b)
+ENDPROC(atomic_add_ret_unchecked)
+
 ENTRY(atomic_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
 	BACKOFF_SETUP(%o2)
 1:	lduw	[%o1], %g1
-	sub	%g1, %o0, %g7
+	subcc	%g1, %o0, %g7
+
+#ifdef CONFIG_PAX_REFCOUNT
+	tvs	%icc, 6
+#endif
+
 	cas	[%o1], %g1, %g7
 	cmp	%g1, %g7
 	bne,pn	%icc, BACKOFF_LABEL(2f, 1b)
@@ -69,7 +129,12 @@ ENDPROC(atomic_sub_ret)
 ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
 	BACKOFF_SETUP(%o2)
 1:	ldx	[%o1], %g1
-	add	%g1, %o0, %g7
+	addcc	%g1, %o0, %g7
+
+#ifdef CONFIG_PAX_REFCOUNT
+	tvs	%xcc, 6
+#endif
+
 	casx	[%o1], %g1, %g7
 	cmp	%g1, %g7
 	bne,pn	%xcc, BACKOFF_LABEL(2f, 1b)
@@ -79,10 +144,28 @@ ENTRY(atomic64_add) /* %o0 = increment,
 2:	BACKOFF_SPIN(%o2, %o3, 1b)
 ENDPROC(atomic64_add)
 
+ENTRY(atomic64_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
+	BACKOFF_SETUP(%o2)
+1:	ldx	[%o1], %g1
+	addcc	%g1, %o0, %g7
+	casx	[%o1], %g1, %g7
+	cmp	%g1, %g7
+	bne,pn	%xcc, 2f
+	 nop
+	retl
+	 nop
+2:	BACKOFF_SPIN(%o2, %o3, 1b)
+ENDPROC(atomic64_add_unchecked)
+
 ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
 	BACKOFF_SETUP(%o2)
 1:	ldx	[%o1], %g1
-	sub	%g1, %o0, %g7
+	subcc	%g1, %o0, %g7
+
+#ifdef CONFIG_PAX_REFCOUNT
+	tvs	%xcc, 6
+#endif
+
 	casx	[%o1], %g1, %g7
 	cmp	%g1, %g7
 	bne,pn	%xcc, BACKOFF_LABEL(2f, 1b)
@@ -92,10 +175,28 @@ ENTRY(atomic64_sub) /* %o0 = decrement,
 2:	BACKOFF_SPIN(%o2, %o3, 1b)
 ENDPROC(atomic64_sub)
 
+ENTRY(atomic64_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
+	BACKOFF_SETUP(%o2)
+1:	ldx	[%o1], %g1
+	subcc	%g1, %o0, %g7
+	casx	[%o1], %g1, %g7
+	cmp	%g1, %g7
+	bne,pn	%xcc, 2f
+	 nop
+	retl
+	 nop
+2:	BACKOFF_SPIN(%o2, %o3, 1b)
+ENDPROC(atomic64_sub_unchecked)
+
 ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
 	BACKOFF_SETUP(%o2)
 1:	ldx	[%o1], %g1
-	add	%g1, %o0, %g7
+	addcc	%g1, %o0, %g7
+
+#ifdef CONFIG_PAX_REFCOUNT
+	tvs	%xcc, 6
+#endif
+
 	casx	[%o1], %g1, %g7
 	cmp	%g1, %g7
 	bne,pn	%xcc, BACKOFF_LABEL(2f, 1b)
@@ -105,10 +206,29 @@ ENTRY(atomic64_add_ret) /* %o0 = increme
 2:	BACKOFF_SPIN(%o2, %o3, 1b)
 ENDPROC(atomic64_add_ret)
 
+ENTRY(atomic64_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
+	BACKOFF_SETUP(%o2)
+1:	ldx	[%o1], %g1
+	addcc	%g1, %o0, %g7
+	casx	[%o1], %g1, %g7
+	cmp	%g1, %g7
+	bne,pn	%xcc, 2f
+	 add	%g7, %o0, %g7
+	mov	%g7, %o0
+	retl
+	 nop
+2:	BACKOFF_SPIN(%o2, %o3, 1b)
+ENDPROC(atomic64_add_ret_unchecked)
+
 ENTRY(atomic64_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
 	BACKOFF_SETUP(%o2)
 1:	ldx	[%o1], %g1
-	sub	%g1, %o0, %g7
+	subcc	%g1, %o0, %g7
+
+#ifdef CONFIG_PAX_REFCOUNT
+	tvs	%xcc, 6
+#endif
+
 	casx	[%o1], %g1, %g7
 	cmp	%g1, %g7
 	bne,pn	%xcc, BACKOFF_LABEL(2f, 1b)
diff -ruNp linux-3.13.11/arch/sparc/lib/ksyms.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/lib/ksyms.c
--- linux-3.13.11/arch/sparc/lib/ksyms.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/lib/ksyms.c	2014-07-09
12:00:15.000000000 +0200
@@ -100,12 +100,18 @@ EXPORT_SYMBOL(__clear_user);
 
 /* Atomic counter implementation. */
 EXPORT_SYMBOL(atomic_add);
+EXPORT_SYMBOL(atomic_add_unchecked);
 EXPORT_SYMBOL(atomic_add_ret);
+EXPORT_SYMBOL(atomic_add_ret_unchecked);
 EXPORT_SYMBOL(atomic_sub);
+EXPORT_SYMBOL(atomic_sub_unchecked);
 EXPORT_SYMBOL(atomic_sub_ret);
 EXPORT_SYMBOL(atomic64_add);
+EXPORT_SYMBOL(atomic64_add_unchecked);
 EXPORT_SYMBOL(atomic64_add_ret);
+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
 EXPORT_SYMBOL(atomic64_sub);
+EXPORT_SYMBOL(atomic64_sub_unchecked);
 EXPORT_SYMBOL(atomic64_sub_ret);
 EXPORT_SYMBOL(atomic64_dec_if_positive);
 
diff -ruNp linux-3.13.11/arch/sparc/mm/Makefile linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/mm/Makefile
--- linux-3.13.11/arch/sparc/mm/Makefile	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/mm/Makefile	2014-07-09
12:00:15.000000000 +0200
@@ -2,7 +2,7 @@
 #
 
 asflags-y := -ansi
-ccflags-y := -Werror
+#ccflags-y := -Werror
 
 obj-$(CONFIG_SPARC64)   += ultra.o tlb.o tsb.o gup.o
 obj-y                   += fault_$(BITS).o
diff -ruNp linux-3.13.11/arch/sparc/mm/fault_32.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/mm/fault_32.c
--- linux-3.13.11/arch/sparc/mm/fault_32.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/mm/fault_32.c	2014-07-09
12:00:15.000000000 +0200
@@ -21,6 +21,9 @@
 #include <linux/perf_event.h>
 #include <linux/interrupt.h>
 #include <linux/kdebug.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/compiler.h>
 
 #include <asm/page.h>
 #include <asm/pgtable.h>
@@ -159,6 +162,277 @@ static unsigned long compute_si_addr(str
 	return safe_compute_effective_address(regs, insn);
 }
 
+#ifdef CONFIG_PAX_PAGEEXEC
+#ifdef CONFIG_PAX_DLRESOLVE
+static void pax_emuplt_close(struct vm_area_struct *vma)
+{
+	vma->vm_mm->call_dl_resolve = 0UL;
+}
+
+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	unsigned int *kaddr;
+
+	vmf->page = alloc_page(GFP_HIGHUSER);
+	if (!vmf->page)
+		return VM_FAULT_OOM;
+
+	kaddr = kmap(vmf->page);
+	memset(kaddr, 0, PAGE_SIZE);
+	kaddr[0] = 0x9DE3BFA8U; /* save */
+	flush_dcache_page(vmf->page);
+	kunmap(vmf->page);
+	return VM_FAULT_MAJOR;
+}
+
+static const struct vm_operations_struct pax_vm_ops = {
+	.close = pax_emuplt_close,
+	.fault = pax_emuplt_fault
+};
+
+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
+{
+	int ret;
+
+	INIT_LIST_HEAD(&vma->anon_vma_chain);
+	vma->vm_mm = current->mm;
+	vma->vm_start = addr;
+	vma->vm_end = addr + PAGE_SIZE;
+	vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
+	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+	vma->vm_ops = &pax_vm_ops;
+
+	ret = insert_vm_struct(current->mm, vma);
+	if (ret)
+		return ret;
+
+	++current->mm->total_vm;
+	return 0;
+}
+#endif
+
+/*
+ * PaX: decide what to do with offenders (regs->pc = fault address)
+ *
+ * returns 1 when task should be killed
+ *         2 when patched PLT trampoline was detected
+ *         3 when unpatched PLT trampoline was detected
+ */
+static int pax_handle_fetch_fault(struct pt_regs *regs)
+{
+
+#ifdef CONFIG_PAX_EMUPLT
+	int err;
+
+	do { /* PaX: patched PLT emulation #1 */
+		unsigned int sethi1, sethi2, jmpl;
+
+		err = get_user(sethi1, (unsigned int *)regs->pc);
+		err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
+		err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
+
+		if (err)
+			break;
+
+		if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
+		    (sethi2 & 0xFFC00000U) == 0x03000000U &&
+		    (jmpl & 0xFFFFE000U) == 0x81C06000U)
+		{
+			unsigned int addr;
+
+			regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
+			addr = regs->u_regs[UREG_G1];
+			addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
+			regs->pc = addr;
+			regs->npc = addr+4;
+			return 2;
+		}
+	} while (0);
+
+	do { /* PaX: patched PLT emulation #2 */
+		unsigned int ba;
+
+		err = get_user(ba, (unsigned int *)regs->pc);
+
+		if (err)
+			break;
+
+		if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
+			unsigned int addr;
+
+			if ((ba & 0xFFC00000U) == 0x30800000U)
+				addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
+			else
+				addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
+			regs->pc = addr;
+			regs->npc = addr+4;
+			return 2;
+		}
+	} while (0);
+
+	do { /* PaX: patched PLT emulation #3 */
+		unsigned int sethi, bajmpl, nop;
+
+		err = get_user(sethi, (unsigned int *)regs->pc);
+		err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
+		err |= get_user(nop, (unsigned int *)(regs->pc+8));
+
+		if (err)
+			break;
+
+		if ((sethi & 0xFFC00000U) == 0x03000000U &&
+		    ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U)
&&
+		    nop == 0x01000000U)
+		{
+			unsigned int addr;
+
+			addr = (sethi & 0x003FFFFFU) << 10;
+			regs->u_regs[UREG_G1] = addr;
+			if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
+				addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
+			else
+				addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
+			regs->pc = addr;
+			regs->npc = addr+4;
+			return 2;
+		}
+	} while (0);
+
+	do { /* PaX: unpatched PLT emulation step 1 */
+		unsigned int sethi, ba, nop;
+
+		err = get_user(sethi, (unsigned int *)regs->pc);
+		err |= get_user(ba, (unsigned int *)(regs->pc+4));
+		err |= get_user(nop, (unsigned int *)(regs->pc+8));
+
+		if (err)
+			break;
+
+		if ((sethi & 0xFFC00000U) == 0x03000000U &&
+		    ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
+		    nop == 0x01000000U)
+		{
+			unsigned int addr, save, call;
+
+			if ((ba & 0xFFC00000U) == 0x30800000U)
+				addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
+			else
+				addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
+
+			err = get_user(save, (unsigned int *)addr);
+			err |= get_user(call, (unsigned int *)(addr+4));
+			err |= get_user(nop, (unsigned int *)(addr+8));
+			if (err)
+				break;
+
+#ifdef CONFIG_PAX_DLRESOLVE
+			if (save == 0x9DE3BFA8U &&
+			    (call & 0xC0000000U) == 0x40000000U &&
+			    nop == 0x01000000U)
+			{
+				struct vm_area_struct *vma;
+				unsigned long call_dl_resolve;
+
+				down_read(&current->mm->mmap_sem);
+				call_dl_resolve = current->mm->call_dl_resolve;
+				up_read(&current->mm->mmap_sem);
+				if (likely(call_dl_resolve))
+					goto emulate;
+
+				vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+
+				down_write(&current->mm->mmap_sem);
+				if (current->mm->call_dl_resolve) {
+					call_dl_resolve = current->mm->call_dl_resolve;
+					up_write(&current->mm->mmap_sem);
+					if (vma)
+						kmem_cache_free(vm_area_cachep, vma);
+					goto emulate;
+				}
+
+				call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
+				if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
+					up_write(&current->mm->mmap_sem);
+					if (vma)
+						kmem_cache_free(vm_area_cachep, vma);
+					return 1;
+				}
+
+				if (pax_insert_vma(vma, call_dl_resolve)) {
+					up_write(&current->mm->mmap_sem);
+					kmem_cache_free(vm_area_cachep, vma);
+					return 1;
+				}
+
+				current->mm->call_dl_resolve = call_dl_resolve;
+				up_write(&current->mm->mmap_sem);
+
+emulate:
+				regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
+				regs->pc = call_dl_resolve;
+				regs->npc = addr+4;
+				return 3;
+			}
+#endif
+
+			/* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
+			if ((save & 0xFFC00000U) == 0x05000000U &&
+			    (call & 0xFFFFE000U) == 0x85C0A000U &&
+			    nop == 0x01000000U)
+			{
+				regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
+				regs->u_regs[UREG_G2] = addr + 4;
+				addr = (save & 0x003FFFFFU) << 10;
+				addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
+				regs->pc = addr;
+				regs->npc = addr+4;
+				return 3;
+			}
+		}
+	} while (0);
+
+	do { /* PaX: unpatched PLT emulation step 2 */
+		unsigned int save, call, nop;
+
+		err = get_user(save, (unsigned int *)(regs->pc-4));
+		err |= get_user(call, (unsigned int *)regs->pc);
+		err |= get_user(nop, (unsigned int *)(regs->pc+4));
+		if (err)
+			break;
+
+		if (save == 0x9DE3BFA8U &&
+		    (call & 0xC0000000U) == 0x40000000U &&
+		    nop == 0x01000000U)
+		{
+			unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U)
<< 2);
+
+			regs->u_regs[UREG_RETPC] = regs->pc;
+			regs->pc = dl_resolve;
+			regs->npc = dl_resolve+4;
+			return 3;
+		}
+	} while (0);
+#endif
+
+	return 1;
+}
+
+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
+{
+	unsigned long i;
+
+	printk(KERN_ERR "PAX: bytes at PC: ");
+	for (i = 0; i < 8; i++) {
+		unsigned int c;
+		if (get_user(c, (unsigned int *)pc+i))
+			printk(KERN_CONT "???????? ");
+		else
+			printk(KERN_CONT "%08x ", c);
+	}
+	printk("\n");
+}
+#endif
+
 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
 				      int text_fault)
 {
@@ -229,6 +503,24 @@ good_area:
 		if (!(vma->vm_flags & VM_WRITE))
 			goto bad_area;
 	} else {
+
+#ifdef CONFIG_PAX_PAGEEXEC
+		if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC))
{
+			up_read(&mm->mmap_sem);
+			switch (pax_handle_fetch_fault(regs)) {
+
+#ifdef CONFIG_PAX_EMUPLT
+			case 2:
+			case 3:
+				return;
+#endif
+
+			}
+			pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
+			do_group_exit(SIGKILL);
+		}
+#endif
+
 		/* Allow reads even for write-only mappings */
 		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
 			goto bad_area;
diff -ruNp linux-3.13.11/arch/sparc/mm/fault_64.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/mm/fault_64.c
--- linux-3.13.11/arch/sparc/mm/fault_64.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/mm/fault_64.c	2014-07-09
12:00:15.000000000 +0200
@@ -22,6 +22,9 @@
 #include <linux/kdebug.h>
 #include <linux/percpu.h>
 #include <linux/context_tracking.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/compiler.h>
 
 #include <asm/page.h>
 #include <asm/pgtable.h>
@@ -75,7 +78,7 @@ static void __kprobes bad_kernel_pc(stru
 	printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
 	       regs->tpc);
 	printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
-	printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
+	printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
 	printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
 	dump_stack();
 	unhandled_fault(regs->tpc, current, regs);
@@ -271,6 +274,466 @@ static void noinline __kprobes bogus_32b
 	show_regs(regs);
 }
 
+#ifdef CONFIG_PAX_PAGEEXEC
+#ifdef CONFIG_PAX_DLRESOLVE
+static void pax_emuplt_close(struct vm_area_struct *vma)
+{
+	vma->vm_mm->call_dl_resolve = 0UL;
+}
+
+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	unsigned int *kaddr;
+
+	vmf->page = alloc_page(GFP_HIGHUSER);
+	if (!vmf->page)
+		return VM_FAULT_OOM;
+
+	kaddr = kmap(vmf->page);
+	memset(kaddr, 0, PAGE_SIZE);
+	kaddr[0] = 0x9DE3BFA8U; /* save */
+	flush_dcache_page(vmf->page);
+	kunmap(vmf->page);
+	return VM_FAULT_MAJOR;
+}
+
+static const struct vm_operations_struct pax_vm_ops = {
+	.close = pax_emuplt_close,
+	.fault = pax_emuplt_fault
+};
+
+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
+{
+	int ret;
+
+	INIT_LIST_HEAD(&vma->anon_vma_chain);
+	vma->vm_mm = current->mm;
+	vma->vm_start = addr;
+	vma->vm_end = addr + PAGE_SIZE;
+	vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
+	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+	vma->vm_ops = &pax_vm_ops;
+
+	ret = insert_vm_struct(current->mm, vma);
+	if (ret)
+		return ret;
+
+	++current->mm->total_vm;
+	return 0;
+}
+#endif
+
+/*
+ * PaX: decide what to do with offenders (regs->tpc = fault address)
+ *
+ * returns 1 when task should be killed
+ *         2 when patched PLT trampoline was detected
+ *         3 when unpatched PLT trampoline was detected
+ */
+static int pax_handle_fetch_fault(struct pt_regs *regs)
+{
+
+#ifdef CONFIG_PAX_EMUPLT
+	int err;
+
+	do { /* PaX: patched PLT emulation #1 */
+		unsigned int sethi1, sethi2, jmpl;
+
+		err = get_user(sethi1, (unsigned int *)regs->tpc);
+		err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
+		err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
+
+		if (err)
+			break;
+
+		if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
+		    (sethi2 & 0xFFC00000U) == 0x03000000U &&
+		    (jmpl & 0xFFFFE000U) == 0x81C06000U)
+		{
+			unsigned long addr;
+
+			regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
+			addr = regs->u_regs[UREG_G1];
+			addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
+
+			if (test_thread_flag(TIF_32BIT))
+				addr &= 0xFFFFFFFFUL;
+
+			regs->tpc = addr;
+			regs->tnpc = addr+4;
+			return 2;
+		}
+	} while (0);
+
+	do { /* PaX: patched PLT emulation #2 */
+		unsigned int ba;
+
+		err = get_user(ba, (unsigned int *)regs->tpc);
+
+		if (err)
+			break;
+
+		if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
+			unsigned long addr;
+
+			if ((ba & 0xFFC00000U) == 0x30800000U)
+				addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL)
<< 2);
+			else
+				addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL)
<< 2);
+
+			if (test_thread_flag(TIF_32BIT))
+				addr &= 0xFFFFFFFFUL;
+
+			regs->tpc = addr;
+			regs->tnpc = addr+4;
+			return 2;
+		}
+	} while (0);
+
+	do { /* PaX: patched PLT emulation #3 */
+		unsigned int sethi, bajmpl, nop;
+
+		err = get_user(sethi, (unsigned int *)regs->tpc);
+		err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
+		err |= get_user(nop, (unsigned int *)(regs->tpc+8));
+
+		if (err)
+			break;
+
+		if ((sethi & 0xFFC00000U) == 0x03000000U &&
+		    ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U)
&&
+		    nop == 0x01000000U)
+		{
+			unsigned long addr;
+
+			addr = (sethi & 0x003FFFFFU) << 10;
+			regs->u_regs[UREG_G1] = addr;
+			if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
+				addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
+			else
+				addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL)
<< 2);
+
+			if (test_thread_flag(TIF_32BIT))
+				addr &= 0xFFFFFFFFUL;
+
+			regs->tpc = addr;
+			regs->tnpc = addr+4;
+			return 2;
+		}
+	} while (0);
+
+	do { /* PaX: patched PLT emulation #4 */
+		unsigned int sethi, mov1, call, mov2;
+
+		err = get_user(sethi, (unsigned int *)regs->tpc);
+		err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
+		err |= get_user(call, (unsigned int *)(regs->tpc+8));
+		err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
+
+		if (err)
+			break;
+
+		if ((sethi & 0xFFC00000U) == 0x03000000U &&
+		    mov1 == 0x8210000FU &&
+		    (call & 0xC0000000U) == 0x40000000U &&
+		    mov2 == 0x9E100001U)
+		{
+			unsigned long addr;
+
+			regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
+			addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL)
<< 2);
+
+			if (test_thread_flag(TIF_32BIT))
+				addr &= 0xFFFFFFFFUL;
+
+			regs->tpc = addr;
+			regs->tnpc = addr+4;
+			return 2;
+		}
+	} while (0);
+
+	do { /* PaX: patched PLT emulation #5 */
+		unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
+
+		err = get_user(sethi, (unsigned int *)regs->tpc);
+		err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
+		err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
+		err |= get_user(or1, (unsigned int *)(regs->tpc+12));
+		err |= get_user(or2, (unsigned int *)(regs->tpc+16));
+		err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
+		err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
+		err |= get_user(nop, (unsigned int *)(regs->tpc+28));
+
+		if (err)
+			break;
+
+		if ((sethi & 0xFFC00000U) == 0x03000000U &&
+		    (sethi1 & 0xFFC00000U) == 0x03000000U &&
+		    (sethi2 & 0xFFC00000U) == 0x0B000000U &&
+		    (or1 & 0xFFFFE000U) == 0x82106000U &&
+		    (or2 & 0xFFFFE000U) == 0x8A116000U &&
+		    sllx == 0x83287020U &&
+		    jmpl == 0x81C04005U &&
+		    nop == 0x01000000U)
+		{
+			unsigned long addr;
+
+			regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
+			regs->u_regs[UREG_G1] <<= 32;
+			regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
+			addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
+			regs->tpc = addr;
+			regs->tnpc = addr+4;
+			return 2;
+		}
+	} while (0);
+
+	do { /* PaX: patched PLT emulation #6 */
+		unsigned int sethi, sethi1, sethi2, sllx, or,  jmpl, nop;
+
+		err = get_user(sethi, (unsigned int *)regs->tpc);
+		err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
+		err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
+		err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
+		err |= get_user(or, (unsigned int *)(regs->tpc+16));
+		err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
+		err |= get_user(nop, (unsigned int *)(regs->tpc+24));
+
+		if (err)
+			break;
+
+		if ((sethi & 0xFFC00000U) == 0x03000000U &&
+		    (sethi1 & 0xFFC00000U) == 0x03000000U &&
+		    (sethi2 & 0xFFC00000U) == 0x0B000000U &&
+		    sllx == 0x83287020U &&
+		    (or & 0xFFFFE000U) == 0x8A116000U &&
+		    jmpl == 0x81C04005U &&
+		    nop == 0x01000000U)
+		{
+			unsigned long addr;
+
+			regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
+			regs->u_regs[UREG_G1] <<= 32;
+			regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
+			addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
+			regs->tpc = addr;
+			regs->tnpc = addr+4;
+			return 2;
+		}
+	} while (0);
+
+	do { /* PaX: unpatched PLT emulation step 1 */
+		unsigned int sethi, ba, nop;
+
+		err = get_user(sethi, (unsigned int *)regs->tpc);
+		err |= get_user(ba, (unsigned int *)(regs->tpc+4));
+		err |= get_user(nop, (unsigned int *)(regs->tpc+8));
+
+		if (err)
+			break;
+
+		if ((sethi & 0xFFC00000U) == 0x03000000U &&
+		    ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
+		    nop == 0x01000000U)
+		{
+			unsigned long addr;
+			unsigned int save, call;
+			unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
+
+			if ((ba & 0xFFC00000U) == 0x30800000U)
+				addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL)
<< 2);
+			else
+				addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL)
<< 2);
+
+			if (test_thread_flag(TIF_32BIT))
+				addr &= 0xFFFFFFFFUL;
+
+			err = get_user(save, (unsigned int *)addr);
+			err |= get_user(call, (unsigned int *)(addr+4));
+			err |= get_user(nop, (unsigned int *)(addr+8));
+			if (err)
+				break;
+
+#ifdef CONFIG_PAX_DLRESOLVE
+			if (save == 0x9DE3BFA8U &&
+			    (call & 0xC0000000U) == 0x40000000U &&
+			    nop == 0x01000000U)
+			{
+				struct vm_area_struct *vma;
+				unsigned long call_dl_resolve;
+
+				down_read(&current->mm->mmap_sem);
+				call_dl_resolve = current->mm->call_dl_resolve;
+				up_read(&current->mm->mmap_sem);
+				if (likely(call_dl_resolve))
+					goto emulate;
+
+				vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+
+				down_write(&current->mm->mmap_sem);
+				if (current->mm->call_dl_resolve) {
+					call_dl_resolve = current->mm->call_dl_resolve;
+					up_write(&current->mm->mmap_sem);
+					if (vma)
+						kmem_cache_free(vm_area_cachep, vma);
+					goto emulate;
+				}
+
+				call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
+				if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
+					up_write(&current->mm->mmap_sem);
+					if (vma)
+						kmem_cache_free(vm_area_cachep, vma);
+					return 1;
+				}
+
+				if (pax_insert_vma(vma, call_dl_resolve)) {
+					up_write(&current->mm->mmap_sem);
+					kmem_cache_free(vm_area_cachep, vma);
+					return 1;
+				}
+
+				current->mm->call_dl_resolve = call_dl_resolve;
+				up_write(&current->mm->mmap_sem);
+
+emulate:
+				regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
+				regs->tpc = call_dl_resolve;
+				regs->tnpc = addr+4;
+				return 3;
+			}
+#endif
+
+			/* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
+			if ((save & 0xFFC00000U) == 0x05000000U &&
+			    (call & 0xFFFFE000U) == 0x85C0A000U &&
+			    nop == 0x01000000U)
+			{
+				regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
+				regs->u_regs[UREG_G2] = addr + 4;
+				addr = (save & 0x003FFFFFU) << 10;
+				addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
+
+				if (test_thread_flag(TIF_32BIT))
+					addr &= 0xFFFFFFFFUL;
+
+				regs->tpc = addr;
+				regs->tnpc = addr+4;
+				return 3;
+			}
+
+			/* PaX: 64-bit PLT stub */
+			err = get_user(sethi1, (unsigned int *)addr);
+			err |= get_user(sethi2, (unsigned int *)(addr+4));
+			err |= get_user(or1, (unsigned int *)(addr+8));
+			err |= get_user(or2, (unsigned int *)(addr+12));
+			err |= get_user(sllx, (unsigned int *)(addr+16));
+			err |= get_user(add, (unsigned int *)(addr+20));
+			err |= get_user(jmpl, (unsigned int *)(addr+24));
+			err |= get_user(nop, (unsigned int *)(addr+28));
+			if (err)
+				break;
+
+			if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
+			    (sethi2 & 0xFFC00000U) == 0x0B000000U &&
+			    (or1 & 0xFFFFE000U) == 0x88112000U &&
+			    (or2 & 0xFFFFE000U) == 0x8A116000U &&
+			    sllx == 0x89293020U &&
+			    add == 0x8A010005U &&
+			    jmpl == 0x89C14000U &&
+			    nop == 0x01000000U)
+			{
+				regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
+				regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
+				regs->u_regs[UREG_G4] <<= 32;
+				regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
+				regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
+				regs->u_regs[UREG_G4] = addr + 24;
+				addr = regs->u_regs[UREG_G5];
+				regs->tpc = addr;
+				regs->tnpc = addr+4;
+				return 3;
+			}
+		}
+	} while (0);
+
+#ifdef CONFIG_PAX_DLRESOLVE
+	do { /* PaX: unpatched PLT emulation step 2 */
+		unsigned int save, call, nop;
+
+		err = get_user(save, (unsigned int *)(regs->tpc-4));
+		err |= get_user(call, (unsigned int *)regs->tpc);
+		err |= get_user(nop, (unsigned int *)(regs->tpc+4));
+		if (err)
+			break;
+
+		if (save == 0x9DE3BFA8U &&
+		    (call & 0xC0000000U) == 0x40000000U &&
+		    nop == 0x01000000U)
+		{
+			unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL)
+ 0x20000000UL) << 2);
+
+			if (test_thread_flag(TIF_32BIT))
+				dl_resolve &= 0xFFFFFFFFUL;
+
+			regs->u_regs[UREG_RETPC] = regs->tpc;
+			regs->tpc = dl_resolve;
+			regs->tnpc = dl_resolve+4;
+			return 3;
+		}
+	} while (0);
+#endif
+
+	do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
+		unsigned int sethi, ba, nop;
+
+		err = get_user(sethi, (unsigned int *)regs->tpc);
+		err |= get_user(ba, (unsigned int *)(regs->tpc+4));
+		err |= get_user(nop, (unsigned int *)(regs->tpc+8));
+
+		if (err)
+			break;
+
+		if ((sethi & 0xFFC00000U) == 0x03000000U &&
+		    (ba & 0xFFF00000U) == 0x30600000U &&
+		    nop == 0x01000000U)
+		{
+			unsigned long addr;
+
+			addr = (sethi & 0x003FFFFFU) << 10;
+			regs->u_regs[UREG_G1] = addr;
+			addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL)
<< 2);
+
+			if (test_thread_flag(TIF_32BIT))
+				addr &= 0xFFFFFFFFUL;
+
+			regs->tpc = addr;
+			regs->tnpc = addr+4;
+			return 2;
+		}
+	} while (0);
+
+#endif
+
+	return 1;
+}
+
+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
+{
+	unsigned long i;
+
+	printk(KERN_ERR "PAX: bytes at PC: ");
+	for (i = 0; i < 8; i++) {
+		unsigned int c;
+		if (get_user(c, (unsigned int *)pc+i))
+			printk(KERN_CONT "???????? ");
+		else
+			printk(KERN_CONT "%08x ", c);
+	}
+	printk("\n");
+}
+#endif
+
 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
 {
 	enum ctx_state prev_state = exception_enter();
@@ -344,6 +807,29 @@ retry:
 	if (!vma)
 		goto bad_area;
 
+#ifdef CONFIG_PAX_PAGEEXEC
+	/* PaX: detect ITLB misses on non-exec pages */
+	if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
+	    !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
+	{
+		if (address != regs->tpc)
+			goto good_area;
+
+		up_read(&mm->mmap_sem);
+		switch (pax_handle_fetch_fault(regs)) {
+
+#ifdef CONFIG_PAX_EMUPLT
+		case 2:
+		case 3:
+			return;
+#endif
+
+		}
+		pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
+		do_group_exit(SIGKILL);
+	}
+#endif
+
 	/* Pure DTLB misses do not tell us whether the fault causing
 	 * load/store/atomic was a write or not, it only says that there
 	 * was no match.  So in such a case we (carefully) read the
diff -ruNp linux-3.13.11/arch/sparc/mm/hugetlbpage.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/mm/hugetlbpage.c
--- linux-3.13.11/arch/sparc/mm/hugetlbpage.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/mm/hugetlbpage.c	2014-07-09
12:00:15.000000000 +0200
@@ -26,7 +26,8 @@ static unsigned long hugetlb_get_unmappe
 							unsigned long addr,
 							unsigned long len,
 							unsigned long pgoff,
-							unsigned long flags)
+							unsigned long flags,
+							unsigned long offset)
 {
 	unsigned long task_size = TASK_SIZE;
 	struct vm_unmapped_area_info info;
@@ -36,15 +37,22 @@ static unsigned long hugetlb_get_unmappe
 
 	info.flags = 0;
 	info.length = len;
-	info.low_limit = TASK_UNMAPPED_BASE;
+	info.low_limit = mm->mmap_base;
 	info.high_limit = min(task_size, VA_EXCLUDE_START);
 	info.align_mask = PAGE_MASK & ~HPAGE_MASK;
 	info.align_offset = 0;
+	info.threadstack_offset = offset;
 	addr = vm_unmapped_area(&info);
 
 	if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
 		VM_BUG_ON(addr != -ENOMEM);
 		info.low_limit = VA_EXCLUDE_END;
+
+#ifdef CONFIG_PAX_RANDMMAP
+		if (mm->pax_flags & MF_PAX_RANDMMAP)
+			info.low_limit += mm->delta_mmap;
+#endif
+
 		info.high_limit = task_size;
 		addr = vm_unmapped_area(&info);
 	}
@@ -56,7 +64,8 @@ static unsigned long
 hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
 				  const unsigned long len,
 				  const unsigned long pgoff,
-				  const unsigned long flags)
+				  const unsigned long flags,
+				  const unsigned long offset)
 {
 	struct mm_struct *mm = current->mm;
 	unsigned long addr = addr0;
@@ -71,6 +80,7 @@ hugetlb_get_unmapped_area_topdown(struct
 	info.high_limit = mm->mmap_base;
 	info.align_mask = PAGE_MASK & ~HPAGE_MASK;
 	info.align_offset = 0;
+	info.threadstack_offset = offset;
 	addr = vm_unmapped_area(&info);
 
 	/*
@@ -83,6 +93,12 @@ hugetlb_get_unmapped_area_topdown(struct
 		VM_BUG_ON(addr != -ENOMEM);
 		info.flags = 0;
 		info.low_limit = TASK_UNMAPPED_BASE;
+
+#ifdef CONFIG_PAX_RANDMMAP
+		if (mm->pax_flags & MF_PAX_RANDMMAP)
+			info.low_limit += mm->delta_mmap;
+#endif
+
 		info.high_limit = STACK_TOP32;
 		addr = vm_unmapped_area(&info);
 	}
@@ -97,6 +113,7 @@ hugetlb_get_unmapped_area(struct file *f
 	struct mm_struct *mm = current->mm;
 	struct vm_area_struct *vma;
 	unsigned long task_size = TASK_SIZE;
+	unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
 
 	if (test_thread_flag(TIF_32BIT))
 		task_size = STACK_TOP32;
@@ -112,19 +129,22 @@ hugetlb_get_unmapped_area(struct file *f
 		return addr;
 	}
 
+#ifdef CONFIG_PAX_RANDMMAP
+	if (!(mm->pax_flags & MF_PAX_RANDMMAP))
+#endif
+
 	if (addr) {
 		addr = ALIGN(addr, HPAGE_SIZE);
 		vma = find_vma(mm, addr);
-		if (task_size - len >= addr &&
-		    (!vma || addr + len <= vma->vm_start))
+		if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
 			return addr;
 	}
 	if (mm->get_unmapped_area == arch_get_unmapped_area)
 		return hugetlb_get_unmapped_area_bottomup(file, addr, len,
-				pgoff, flags);
+				pgoff, flags, offset);
 	else
 		return hugetlb_get_unmapped_area_topdown(file, addr, len,
-				pgoff, flags);
+				pgoff, flags, offset);
 }
 
 pte_t *huge_pte_alloc(struct mm_struct *mm,
diff -ruNp linux-3.13.11/arch/sparc/mm/init_64.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/mm/init_64.c
--- linux-3.13.11/arch/sparc/mm/init_64.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/sparc/mm/init_64.c	2014-07-09
12:00:15.000000000 +0200
@@ -188,9 +188,9 @@ unsigned long sparc64_kern_sec_context _
 int num_kernel_image_mappings;
 
 #ifdef CONFIG_DEBUG_DCFLUSH
-atomic_t dcpage_flushes = ATOMIC_INIT(0);
+atomic_unchecked_t dcpage_flushes = ATOMIC_INIT(0);
 #ifdef CONFIG_SMP
-atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
+atomic_unchecked_t dcpage_flushes_xcall = ATOMIC_INIT(0);
 #endif
 #endif
 
@@ -198,7 +198,7 @@ inline void flush_dcache_page_impl(struc
 {
 	BUG_ON(tlb_type == hypervisor);
 #ifdef CONFIG_DEBUG_DCFLUSH
-	atomic_inc(&dcpage_flushes);
+	atomic_inc_unchecked(&dcpage_flushes);
 #endif
 
 #ifdef DCACHE_ALIASING_POSSIBLE
@@ -466,10 +466,10 @@ void mmu_info(struct seq_file *m)
 
 #ifdef CONFIG_DEBUG_DCFLUSH
 	seq_printf(m, "DCPageFlushes\t: %d\n",
-		   atomic_read(&dcpage_flushes));
+		   atomic_read_unchecked(&dcpage_flushes));
 #ifdef CONFIG_SMP
 	seq_printf(m, "DCPageFlushesXC\t: %d\n",
-		   atomic_read(&dcpage_flushes_xcall));
+		   atomic_read_unchecked(&dcpage_flushes_xcall));
 #endif /* CONFIG_SMP */
 #endif /* CONFIG_DEBUG_DCFLUSH */
 }
diff -ruNp linux-3.13.11/arch/tile/Kconfig linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/tile/Kconfig
--- linux-3.13.11/arch/tile/Kconfig	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/tile/Kconfig	2014-07-09 12:00:15.000000000
+0200
@@ -184,6 +184,7 @@ source "kernel/Kconfig.hz"
 
 config KEXEC
 	bool "kexec system call"
+	depends on !GRKERNSEC_KMEM
 	---help---
 	  kexec is a system call that implements the ability to shutdown your
 	  current kernel, and to start another kernel.  It is like a reboot
diff -ruNp linux-3.13.11/arch/tile/include/asm/atomic_64.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/tile/include/asm/atomic_64.h
--- linux-3.13.11/arch/tile/include/asm/atomic_64.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/tile/include/asm/atomic_64.h	2014-07-09
12:00:15.000000000 +0200
@@ -105,6 +105,16 @@ static inline long atomic64_add_unless(a
 
 #define atomic64_inc_not_zero(v)	atomic64_add_unless((v), 1, 0)
 
+#define atomic64_read_unchecked(v)		atomic64_read(v)
+#define atomic64_set_unchecked(v, i)		atomic64_set((v), (i))
+#define atomic64_add_unchecked(a, v)		atomic64_add((a), (v))
+#define atomic64_add_return_unchecked(a, v)	atomic64_add_return((a), (v))
+#define atomic64_sub_unchecked(a, v)		atomic64_sub((a), (v))
+#define atomic64_inc_unchecked(v)		atomic64_inc(v)
+#define atomic64_inc_return_unchecked(v)	atomic64_inc_return(v)
+#define atomic64_dec_unchecked(v)		atomic64_dec(v)
+#define atomic64_cmpxchg_unchecked(v, o, n)	atomic64_cmpxchg((v), (o), (n))
+
 /* Atomic dec and inc don't implement barrier, so provide them if needed. */
 #define smp_mb__before_atomic_dec()	smp_mb()
 #define smp_mb__after_atomic_dec()	smp_mb()
diff -ruNp linux-3.13.11/arch/tile/include/asm/cache.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/tile/include/asm/cache.h
--- linux-3.13.11/arch/tile/include/asm/cache.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/tile/include/asm/cache.h	2014-07-09
12:00:15.000000000 +0200
@@ -15,11 +15,12 @@
 #ifndef _ASM_TILE_CACHE_H
 #define _ASM_TILE_CACHE_H
 
+#include <linux/const.h>
 #include <arch/chip.h>
 
 /* bytes per L1 data cache line */
 #define L1_CACHE_SHIFT		CHIP_L1D_LOG_LINE_SIZE()
-#define L1_CACHE_BYTES		(1 << L1_CACHE_SHIFT)
+#define L1_CACHE_BYTES		(_AC(1,UL) << L1_CACHE_SHIFT)
 
 /* bytes per L2 cache line */
 #define L2_CACHE_SHIFT		CHIP_L2_LOG_LINE_SIZE()
diff -ruNp linux-3.13.11/arch/tile/include/asm/uaccess.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/tile/include/asm/uaccess.h
--- linux-3.13.11/arch/tile/include/asm/uaccess.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/tile/include/asm/uaccess.h	2014-07-09
12:00:15.000000000 +0200
@@ -414,9 +414,9 @@ static inline unsigned long __must_check
 					  const void __user *from,
 					  unsigned long n)
 {
-	int sz = __compiletime_object_size(to);
+	size_t sz = __compiletime_object_size(to);
 
-	if (likely(sz == -1 || sz >= n))
+	if (likely(sz == (size_t)-1 || sz >= n))
 		n = _copy_from_user(to, from, n);
 	else
 		copy_from_user_overflow();
diff -ruNp linux-3.13.11/arch/tile/mm/hugetlbpage.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/tile/mm/hugetlbpage.c
--- linux-3.13.11/arch/tile/mm/hugetlbpage.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/tile/mm/hugetlbpage.c	2014-07-09
12:00:15.000000000 +0200
@@ -212,6 +212,7 @@ static unsigned long hugetlb_get_unmappe
 	info.high_limit = TASK_SIZE;
 	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
 	info.align_offset = 0;
+	info.threadstack_offset = 0;
 	return vm_unmapped_area(&info);
 }
 
@@ -229,6 +230,7 @@ static unsigned long hugetlb_get_unmappe
 	info.high_limit = current->mm->mmap_base;
 	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
 	info.align_offset = 0;
+	info.threadstack_offset = 0;
 	addr = vm_unmapped_area(&info);
 
 	/*
diff -ruNp linux-3.13.11/arch/um/Kconfig.rest linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/um/Kconfig.rest
--- linux-3.13.11/arch/um/Kconfig.rest	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/um/Kconfig.rest	2014-07-09 12:00:15.000000000
+0200
@@ -12,6 +12,8 @@ source "arch/um/Kconfig.net"
 
 source "fs/Kconfig"
 
+source "kernel/vserver/Kconfig"
+
 source "security/Kconfig"
 
 source "crypto/Kconfig"
diff -ruNp linux-3.13.11/arch/um/Makefile linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/um/Makefile
--- linux-3.13.11/arch/um/Makefile	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/um/Makefile	2014-07-09 12:00:15.000000000
+0200
@@ -72,6 +72,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINE
 	$(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
 	$(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
 
+ifdef CONSTIFY_PLUGIN
+USER_CFLAGS	+= -fplugin-arg-constify_plugin-no-constify
+endif
+
 #This will adjust *FLAGS accordingly to the platform.
 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
 
diff -ruNp linux-3.13.11/arch/um/include/asm/cache.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/um/include/asm/cache.h
--- linux-3.13.11/arch/um/include/asm/cache.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/um/include/asm/cache.h	2014-07-09
12:00:15.000000000 +0200
@@ -1,6 +1,7 @@
 #ifndef __UM_CACHE_H
 #define __UM_CACHE_H
 
+#include <linux/const.h>
 
 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
 # define L1_CACHE_SHIFT		(CONFIG_X86_L1_CACHE_SHIFT)
@@ -12,6 +13,6 @@
 # define L1_CACHE_SHIFT		5
 #endif
 
-#define L1_CACHE_BYTES		(1 << L1_CACHE_SHIFT)
+#define L1_CACHE_BYTES		(_AC(1,UL) << L1_CACHE_SHIFT)
 
 #endif
diff -ruNp linux-3.13.11/arch/um/include/asm/kmap_types.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/um/include/asm/kmap_types.h
--- linux-3.13.11/arch/um/include/asm/kmap_types.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/um/include/asm/kmap_types.h	2014-07-09
12:00:15.000000000 +0200
@@ -8,6 +8,6 @@
 
 /* No more #include "asm/arch/kmap_types.h" ! */
 
-#define KM_TYPE_NR 14
+#define KM_TYPE_NR 15
 
 #endif
diff -ruNp linux-3.13.11/arch/um/include/asm/page.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/um/include/asm/page.h
--- linux-3.13.11/arch/um/include/asm/page.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/um/include/asm/page.h	2014-07-09
12:00:15.000000000 +0200
@@ -14,6 +14,9 @@
 #define PAGE_SIZE	(_AC(1, UL) << PAGE_SHIFT)
 #define PAGE_MASK	(~(PAGE_SIZE-1))
 
+#define ktla_ktva(addr)			(addr)
+#define ktva_ktla(addr)			(addr)
+
 #ifndef __ASSEMBLY__
 
 struct page;
diff -ruNp linux-3.13.11/arch/um/include/asm/pgtable-3level.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/um/include/asm/pgtable-3level.h
--- linux-3.13.11/arch/um/include/asm/pgtable-3level.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/um/include/asm/pgtable-3level.h	2014-07-09
12:00:15.000000000 +0200
@@ -58,6 +58,7 @@
 #define pud_present(x)	(pud_val(x) & _PAGE_PRESENT)
 #define pud_populate(mm, pud, pmd) \
 	set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
 
 #ifdef CONFIG_64BIT
 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
diff -ruNp linux-3.13.11/arch/um/kernel/process.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/um/kernel/process.c
--- linux-3.13.11/arch/um/kernel/process.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/um/kernel/process.c	2014-07-09
12:00:15.000000000 +0200
@@ -356,22 +356,6 @@ int singlestepping(void * t)
 	return 2;
 }
 
-/*
- * Only x86 and x86_64 have an arch_align_stack().
- * All other arches have "#define arch_align_stack(x) (x)"
- * in their asm/system.h
- * As this is included in UML from asm-um/system-generic.h,
- * we can use it to behave as the subarch does.
- */
-#ifndef arch_align_stack
-unsigned long arch_align_stack(unsigned long sp)
-{
-	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
-		sp -= get_random_int() % 8192;
-	return sp & ~0xf;
-}
-#endif
-
 unsigned long get_wchan(struct task_struct *p)
 {
 	unsigned long stack_page, sp, ip;
diff -ruNp linux-3.13.11/arch/unicore32/include/asm/cache.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/unicore32/include/asm/cache.h
--- linux-3.13.11/arch/unicore32/include/asm/cache.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/unicore32/include/asm/cache.h	2014-07-09
12:00:15.000000000 +0200
@@ -12,8 +12,10 @@
 #ifndef __UNICORE_CACHE_H__
 #define __UNICORE_CACHE_H__
 
-#define L1_CACHE_SHIFT		(5)
-#define L1_CACHE_BYTES		(1 << L1_CACHE_SHIFT)
+#include <linux/const.h>
+
+#define L1_CACHE_SHIFT		5
+#define L1_CACHE_BYTES		(_AC(1,UL) << L1_CACHE_SHIFT)
 
 /*
  * Memory returned by kmalloc() may be used for DMA, so we must make
diff -ruNp linux-3.13.11/arch/x86/Kconfig linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/Kconfig
--- linux-3.13.11/arch/x86/Kconfig	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/Kconfig	2014-07-09 12:00:15.000000000
+0200
@@ -249,7 +249,7 @@ config X86_HT
 
 config X86_32_LAZY_GS
 	def_bool y
-	depends on X86_32 && !CC_STACKPROTECTOR
+	depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
 
 config ARCH_HWEIGHT_CFLAGS
 	string
@@ -602,6 +602,7 @@ config SCHED_OMIT_FRAME_POINTER
 
 menuconfig HYPERVISOR_GUEST
 	bool "Linux guest support"
+	depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_GUEST || (GRKERNSEC_CONFIG_VIRT_HOST
&& GRKERNSEC_CONFIG_VIRT_XEN)
 	---help---
 	  Say Y here to enable options for running Linux under various hyper-
 	  visors. This option enables basic hypervisor detection and platform
@@ -1127,7 +1128,7 @@ choice
 
 config NOHIGHMEM
 	bool "off"
-	depends on !X86_NUMAQ
+	depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
 	---help---
 	  Linux can use up to 64 Gigabytes of physical memory on x86 systems.
 	  However, the address space of 32-bit x86 processors is only 4
@@ -1164,7 +1165,7 @@ config NOHIGHMEM
 
 config HIGHMEM4G
 	bool "4GB"
-	depends on !X86_NUMAQ
+	depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
 	---help---
 	  Select this if you have a 32-bit processor and between 1 and 4
 	  gigabytes of physical RAM.
@@ -1217,7 +1218,7 @@ config PAGE_OFFSET
 	hex
 	default 0xB0000000 if VMSPLIT_3G_OPT
 	default 0x80000000 if VMSPLIT_2G
-	default 0x78000000 if VMSPLIT_2G_OPT
+	default 0x70000000 if VMSPLIT_2G_OPT
 	default 0x40000000 if VMSPLIT_1G
 	default 0xC0000000
 	depends on X86_32
@@ -1619,6 +1620,7 @@ config SECCOMP
 
 config CC_STACKPROTECTOR
 	bool "Enable -fstack-protector buffer overflow detection"
+	depends on X86_64 || !PAX_MEMORY_UDEREF
 	---help---
 	  This option turns on the -fstack-protector GCC feature. This
 	  feature puts, at the beginning of functions, a canary value on
@@ -1637,6 +1639,7 @@ source kernel/Kconfig.hz
 
 config KEXEC
 	bool "kexec system call"
+	depends on !GRKERNSEC_KMEM
 	---help---
 	  kexec is a system call that implements the ability to shutdown your
 	  current kernel, and to start another kernel.  It is like a reboot
@@ -1738,6 +1741,8 @@ config X86_NEED_RELOCS
 config PHYSICAL_ALIGN
 	hex "Alignment value to which kernel should be aligned"
 	default "0x1000000"
+	range 0x200000 0x1000000 if PAX_KERNEXEC && X86_PAE
+	range 0x400000 0x1000000 if PAX_KERNEXEC && !X86_PAE
 	range 0x2000 0x1000000 if X86_32
 	range 0x200000 0x1000000 if X86_64
 	---help---
@@ -1817,9 +1822,10 @@ config DEBUG_HOTPLUG_CPU0
 	  If unsure, say N.
 
 config COMPAT_VDSO
-	def_bool y
+	def_bool n
 	prompt "Compat VDSO support"
 	depends on X86_32 || IA32_EMULATION
+	depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
 	---help---
 	  Map the 32-bit VDSO to the predictable old-style address too.
 
@@ -2403,6 +2409,8 @@ source "fs/Kconfig"
 
 source "arch/x86/Kconfig.debug"
 
+source "kernel/vserver/Kconfig"
+
 source "security/Kconfig"
 
 source "crypto/Kconfig"
diff -ruNp linux-3.13.11/arch/x86/Kconfig.cpu linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/Kconfig.cpu
--- linux-3.13.11/arch/x86/Kconfig.cpu	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/Kconfig.cpu	2014-07-09 12:00:15.000000000
+0200
@@ -319,7 +319,7 @@ config X86_PPRO_FENCE
 
 config X86_F00F_BUG
 	def_bool y
-	depends on M586MMX || M586TSC || M586 || M486
+	depends on (M586MMX || M586TSC || M586 || M486) && !PAX_KERNEXEC
 
 config X86_INVD_BUG
 	def_bool y
@@ -327,7 +327,7 @@ config X86_INVD_BUG
 
 config X86_ALIGNMENT_16
 	def_bool y
-	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC
|| M586 || M486 || MVIAC3_2 || MGEODEGX1
+	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6
|| MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC ||
M586 || M486 || MVIAC3_2 || MGEODEGX1
 
 config X86_INTEL_USERCOPY
 	def_bool y
@@ -373,7 +373,7 @@ config X86_CMPXCHG64
 # generates cmov.
 config X86_CMOV
 	def_bool y
-	depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII
|| M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
+	depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII
|| MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM
|| MGEODE_LX)
 
 config X86_MINIMUM_CPU_FAMILY
 	int
diff -ruNp linux-3.13.11/arch/x86/Kconfig.debug linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/Kconfig.debug
--- linux-3.13.11/arch/x86/Kconfig.debug	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/Kconfig.debug	2014-07-09
12:00:15.000000000 +0200
@@ -84,7 +84,7 @@ config X86_PTDUMP
 config DEBUG_RODATA
 	bool "Write protect kernel read-only data structures"
 	default y
-	depends on DEBUG_KERNEL
+	depends on DEBUG_KERNEL && BROKEN
 	---help---
 	  Mark the kernel read-only data as write-protected in the pagetables,
 	  in order to catch accidental (and incorrect) writes to such const
@@ -102,7 +102,7 @@ config DEBUG_RODATA_TEST
 
 config DEBUG_SET_MODULE_RONX
 	bool "Set loadable kernel module data as NX and text as RO"
-	depends on MODULES
+	depends on MODULES && BROKEN
 	---help---
 	  This option helps catch unintended modifications to loadable
 	  kernel module's text and read-only data. It also prevents execution
diff -ruNp linux-3.13.11/arch/x86/Makefile linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/Makefile
--- linux-3.13.11/arch/x86/Makefile	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/Makefile	2014-07-09 12:00:15.000000000
+0200
@@ -49,14 +49,12 @@ ifeq ($(CONFIG_X86_32),y)
         # CPU-specific tuning. Anything which can be shared with UML should go here.
         include $(srctree)/arch/x86/Makefile_32.cpu
         KBUILD_CFLAGS += $(cflags-y)
-
-        # temporary until string.h is fixed
-        KBUILD_CFLAGS += -ffreestanding
 else
         BITS := 64
         UTS_MACHINE := x86_64
         CHECKFLAGS += -D__x86_64__ -m64
 
+        biarch := $(call cc-option,-m64)
         KBUILD_AFLAGS += -m64
         KBUILD_CFLAGS += -m64
 
@@ -89,6 +87,9 @@ else
         KBUILD_CFLAGS += -maccumulate-outgoing-args
 endif
 
+# temporary until string.h is fixed
+KBUILD_CFLAGS += -ffreestanding
+
 ifdef CONFIG_CC_STACKPROTECTOR
 	cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
         ifeq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC) $(KBUILD_CPPFLAGS) $(biarch)),y)
@@ -247,3 +248,12 @@ define archhelp
   echo  '                  FDINITRD=file initrd for the booted kernel'
   echo  '  kvmconfig	- Enable additional options for guest kernel support'
 endef
+
+define OLD_LD
+
+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions
of binutils.
+*** Please upgrade your binutils to 2.18 or newer
+endef
+
+archprepare:
+	$(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
diff -ruNp linux-3.13.11/arch/x86/boot/Makefile linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/boot/Makefile
--- linux-3.13.11/arch/x86/boot/Makefile	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/boot/Makefile	2014-07-09
12:00:15.000000000 +0200
@@ -65,6 +65,9 @@ KBUILD_CFLAGS	:= $(USERINCLUDE) -m32 -g
 		   $(call cc-option, -fno-unit-at-a-time)) \
 		   $(call cc-option, -fno-stack-protector) \
 		   $(call cc-option, -mpreferred-stack-boundary=2)
+ifdef CONSTIFY_PLUGIN
+KBUILD_CFLAGS	+= -fplugin-arg-constify_plugin-no-constify
+endif
 KBUILD_AFLAGS	:= $(KBUILD_CFLAGS) -D__ASSEMBLY__
 GCOV_PROFILE := n
 
diff -ruNp linux-3.13.11/arch/x86/boot/bitops.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/boot/bitops.h
--- linux-3.13.11/arch/x86/boot/bitops.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/boot/bitops.h	2014-07-09
12:00:15.000000000 +0200
@@ -26,7 +26,7 @@ static inline int variable_test_bit(int
 	u8 v;
 	const u32 *p = (const u32 *)addr;
 
-	asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
+	asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
 	return v;
 }
 
@@ -37,7 +37,7 @@ static inline int variable_test_bit(int
 
 static inline void set_bit(int nr, void *addr)
 {
-	asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
+	asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
 }
 
 #endif /* BOOT_BITOPS_H */
diff -ruNp linux-3.13.11/arch/x86/boot/boot.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/boot/boot.h
--- linux-3.13.11/arch/x86/boot/boot.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/boot/boot.h	2014-07-09 12:00:15.000000000
+0200
@@ -85,7 +85,7 @@ static inline void io_delay(void)
 static inline u16 ds(void)
 {
 	u16 seg;
-	asm("movw %%ds,%0" : "=rm" (seg));
+	asm volatile("movw %%ds,%0" : "=rm" (seg));
 	return seg;
 }
 
@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t
 static inline int memcmp(const void *s1, const void *s2, size_t len)
 {
 	u8 diff;
-	asm("repe; cmpsb; setnz %0"
+	asm volatile("repe; cmpsb; setnz %0"
 	    : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
 	return diff;
 }
diff -ruNp linux-3.13.11/arch/x86/boot/compressed/Makefile linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/boot/compressed/Makefile
--- linux-3.13.11/arch/x86/boot/compressed/Makefile	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/boot/compressed/Makefile	2014-07-09
12:00:15.000000000 +0200
@@ -16,6 +16,9 @@ KBUILD_CFLAGS += $(cflags-y)
 KBUILD_CFLAGS += -mno-mmx -mno-sse
 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
+ifdef CONSTIFY_PLUGIN
+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
+endif
 
 KBUILD_AFLAGS  := $(KBUILD_CFLAGS) -D__ASSEMBLY__
 GCOV_PROFILE := n
diff -ruNp linux-3.13.11/arch/x86/boot/compressed/efi_stub_32.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/boot/compressed/efi_stub_32.S
--- linux-3.13.11/arch/x86/boot/compressed/efi_stub_32.S	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/boot/compressed/efi_stub_32.S	2014-07-09
12:00:15.000000000 +0200
@@ -46,16 +46,13 @@ ENTRY(efi_call_phys)
 	 * parameter 2, ..., param n. To make things easy, we save the return
 	 * address of efi_call_phys in a global variable.
 	 */
-	popl	%ecx
-	movl	%ecx, saved_return_addr(%edx)
-	/* get the function pointer into ECX*/
-	popl	%ecx
-	movl	%ecx, efi_rt_function_ptr(%edx)
+	popl	saved_return_addr(%edx)
+	popl	efi_rt_function_ptr(%edx)
 
 	/*
 	 * 3. Call the physical function.
 	 */
-	call	*%ecx
+	call	*efi_rt_function_ptr(%edx)
 
 	/*
 	 * 4. Balance the stack. And because EAX contain the return value,
@@ -67,15 +64,12 @@ ENTRY(efi_call_phys)
 1:	popl	%edx
 	subl	$1b, %edx
 
-	movl	efi_rt_function_ptr(%edx), %ecx
-	pushl	%ecx
+	pushl	efi_rt_function_ptr(%edx)
 
 	/*
 	 * 10. Push the saved return address onto the stack and return.
 	 */
-	movl	saved_return_addr(%edx), %ecx
-	pushl	%ecx
-	ret
+	jmpl	*saved_return_addr(%edx)
 ENDPROC(efi_call_phys)
 .previous
 
diff -ruNp linux-3.13.11/arch/x86/boot/compressed/head_32.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/boot/compressed/head_32.S
--- linux-3.13.11/arch/x86/boot/compressed/head_32.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/boot/compressed/head_32.S	2014-07-09
12:00:15.000000000 +0200
@@ -118,7 +118,7 @@ preferred_addr:
 	notl	%eax
 	andl    %eax, %ebx
 #else
-	movl	$LOAD_PHYSICAL_ADDR, %ebx
+	movl	$____LOAD_PHYSICAL_ADDR, %ebx
 #endif
 
 	/* Target address to relocate to for decompression */
diff -ruNp linux-3.13.11/arch/x86/boot/compressed/head_64.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/boot/compressed/head_64.S
--- linux-3.13.11/arch/x86/boot/compressed/head_64.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/boot/compressed/head_64.S	2014-07-09
12:00:15.000000000 +0200
@@ -95,7 +95,7 @@ ENTRY(startup_32)
 	notl	%eax
 	andl	%eax, %ebx
 #else
-	movl	$LOAD_PHYSICAL_ADDR, %ebx
+	movl	$____LOAD_PHYSICAL_ADDR, %ebx
 #endif
 
 	/* Target address to relocate to for decompression */
@@ -270,7 +270,7 @@ preferred_addr:
 	notq	%rax
 	andq	%rax, %rbp
 #else
-	movq	$LOAD_PHYSICAL_ADDR, %rbp
+	movq	$____LOAD_PHYSICAL_ADDR, %rbp
 #endif
 
 	/* Target address to relocate to for decompression */
@@ -362,8 +362,8 @@ gdt:
 	.long	gdt
 	.word	0
 	.quad	0x0000000000000000	/* NULL descriptor */
-	.quad	0x00af9a000000ffff	/* __KERNEL_CS */
-	.quad	0x00cf92000000ffff	/* __KERNEL_DS */
+	.quad	0x00af9b000000ffff	/* __KERNEL_CS */
+	.quad	0x00cf93000000ffff	/* __KERNEL_DS */
 	.quad	0x0080890000000000	/* TS descriptor */
 	.quad   0x0000000000000000	/* TS continued */
 gdt_end:
diff -ruNp linux-3.13.11/arch/x86/boot/compressed/misc.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/boot/compressed/misc.c
--- linux-3.13.11/arch/x86/boot/compressed/misc.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/boot/compressed/misc.c	2014-07-09
12:00:15.000000000 +0200
@@ -224,7 +224,7 @@ void __putstr(const char *s)
 
 void *memset(void *s, int c, size_t n)
 {
-	int i;
+	size_t i;
 	char *ss = s;
 
 	for (i = 0; i < n; i++)
@@ -283,7 +283,7 @@ static void handle_relocations(void *out
 	 * Calculate the delta between where vmlinux was linked to load
 	 * and where it was actually loaded.
 	 */
-	delta = min_addr - LOAD_PHYSICAL_ADDR;
+	delta = min_addr - ____LOAD_PHYSICAL_ADDR;
 	if (!delta) {
 		debug_putstr("No relocation needed... ");
 		return;
@@ -353,7 +353,7 @@ static void parse_elf(void *output)
 	Elf32_Ehdr ehdr;
 	Elf32_Phdr *phdrs, *phdr;
 #endif
-	void *dest;
+	void *dest, *prev;
 	int i;
 
 	memcpy(&ehdr, output, sizeof(ehdr));
@@ -380,13 +380,16 @@ static void parse_elf(void *output)
 		case PT_LOAD:
 #ifdef CONFIG_RELOCATABLE
 			dest = output;
-			dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
+			dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
 #else
 			dest = (void *)(phdr->p_paddr);
 #endif
 			memcpy(dest,
 			       output + phdr->p_offset,
 			       phdr->p_filesz);
+			if (i)
+				memset(prev, 0xff, dest - prev);
+			prev = dest + phdr->p_filesz;
 			break;
 		default: /* Ignore other PT_* */ break;
 		}
@@ -432,7 +435,7 @@ asmlinkage void decompress_kernel(void *
 		error("Destination address too large");
 #endif
 #ifndef CONFIG_RELOCATABLE
-	if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
+	if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
 		error("Wrong destination address");
 #endif
 
diff -ruNp linux-3.13.11/arch/x86/boot/cpucheck.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/boot/cpucheck.c
--- linux-3.13.11/arch/x86/boot/cpucheck.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/boot/cpucheck.c	2014-07-09
12:00:15.000000000 +0200
@@ -74,7 +74,7 @@ static int has_fpu(void)
 	u16 fcw = -1, fsw = -1;
 	u32 cr0;
 
-	asm("movl %%cr0,%0" : "=r" (cr0));
+	asm volatile("movl %%cr0,%0" : "=r" (cr0));
 	if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
 		cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
 		asm volatile("movl %0,%%cr0" : : "r" (cr0));
@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
 {
 	u32 f0, f1;
 
-	asm("pushfl ; "
+	asm volatile("pushfl ; "
 	    "pushfl ; "
 	    "popl %0 ; "
 	    "movl %0,%1 ; "
@@ -115,7 +115,7 @@ static void get_flags(void)
 		set_bit(X86_FEATURE_FPU, cpu.flags);
 
 	if (has_eflag(X86_EFLAGS_ID)) {
-		asm("cpuid"
+		asm volatile("cpuid"
 		    : "=a" (max_intel_level),
 		      "=b" (cpu_vendor[0]),
 		      "=d" (cpu_vendor[1]),
@@ -124,7 +124,7 @@ static void get_flags(void)
 
 		if (max_intel_level >= 0x00000001 &&
 		    max_intel_level <= 0x0000ffff) {
-			asm("cpuid"
+			asm volatile("cpuid"
 			    : "=a" (tfms),
 			      "=c" (cpu.flags[4]),
 			      "=d" (cpu.flags[0])
@@ -136,7 +136,7 @@ static void get_flags(void)
 				cpu.model += ((tfms >> 16) & 0xf) << 4;
 		}
 
-		asm("cpuid"
+		asm volatile("cpuid"
 		    : "=a" (max_amd_level)
 		    : "a" (0x80000000)
 		    : "ebx", "ecx", "edx");
@@ -144,7 +144,7 @@ static void get_flags(void)
 		if (max_amd_level >= 0x80000001 &&
 		    max_amd_level <= 0x8000ffff) {
 			u32 eax = 0x80000001;
-			asm("cpuid"
+			asm volatile("cpuid"
 			    : "+a" (eax),
 			      "=c" (cpu.flags[6]),
 			      "=d" (cpu.flags[1])
@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
 		u32 ecx = MSR_K7_HWCR;
 		u32 eax, edx;
 
-		asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
+		asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
 		eax &= ~(1 << 15);
-		asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
+		asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
 
 		get_flags();	/* Make sure it really did something */
 		err = check_flags();
@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
 		u32 ecx = MSR_VIA_FCR;
 		u32 eax, edx;
 
-		asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
+		asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
 		eax |= (1<<1)|(1<<7);
-		asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
+		asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
 
 		set_bit(X86_FEATURE_CX8, cpu.flags);
 		err = check_flags();
@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
 		u32 eax, edx;
 		u32 level = 1;
 
-		asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
-		asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
-		asm("cpuid"
+		asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
+		asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
+		asm volatile("cpuid"
 		    : "+a" (level), "=d" (cpu.flags[0])
 		    : : "ecx", "ebx");
-		asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
+		asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
 
 		err = check_flags();
 	}
diff -ruNp linux-3.13.11/arch/x86/boot/header.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/boot/header.S
--- linux-3.13.11/arch/x86/boot/header.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/boot/header.S	2014-07-09
12:00:15.000000000 +0200
@@ -409,10 +409,14 @@ setup_data:		.quad 0			# 64-bit physical
 						# single linked list of
 						# struct setup_data
 
-pref_address:		.quad LOAD_PHYSICAL_ADDR	# preferred load addr
+pref_address:		.quad ____LOAD_PHYSICAL_ADDR	# preferred load addr
 
 #define ZO_INIT_SIZE	(ZO__end - ZO_startup_32 + ZO_z_extract_offset)
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
+#define VO_INIT_SIZE	(VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
+#else
 #define VO_INIT_SIZE	(VO__end - VO__text)
+#endif
 #if ZO_INIT_SIZE > VO_INIT_SIZE
 #define INIT_SIZE ZO_INIT_SIZE
 #else
diff -ruNp linux-3.13.11/arch/x86/boot/memory.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/boot/memory.c
--- linux-3.13.11/arch/x86/boot/memory.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/boot/memory.c	2014-07-09
12:00:15.000000000 +0200
@@ -19,7 +19,7 @@
 
 static int detect_memory_e820(void)
 {
-	int count = 0;
+	unsigned int count = 0;
 	struct biosregs ireg, oreg;
 	struct e820entry *desc = boot_params.e820_map;
 	static struct e820entry buf; /* static so it is zeroed */
diff -ruNp linux-3.13.11/arch/x86/boot/video-vesa.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/boot/video-vesa.c
--- linux-3.13.11/arch/x86/boot/video-vesa.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/boot/video-vesa.c	2014-07-09
12:00:15.000000000 +0200
@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
 
 	boot_params.screen_info.vesapm_seg = oreg.es;
 	boot_params.screen_info.vesapm_off = oreg.di;
+	boot_params.screen_info.vesapm_size = oreg.cx;
 }
 
 /*
diff -ruNp linux-3.13.11/arch/x86/boot/video.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/boot/video.c
--- linux-3.13.11/arch/x86/boot/video.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/boot/video.c	2014-07-09
12:00:15.000000000 +0200
@@ -96,7 +96,7 @@ static void store_mode_params(void)
 static unsigned int get_entry(void)
 {
 	char entry_buf[4];
-	int i, len = 0;
+	unsigned int i, len = 0;
 	int key;
 	unsigned int v;
 
diff -ruNp linux-3.13.11/arch/x86/crypto/aes-x86_64-asm_64.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/crypto/aes-x86_64-asm_64.S
--- linux-3.13.11/arch/x86/crypto/aes-x86_64-asm_64.S	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/crypto/aes-x86_64-asm_64.S	2014-07-09
12:00:15.000000000 +0200
@@ -8,6 +8,8 @@
  * including this sentence is retained in full.
  */
 
+#include <asm/alternative-asm.h>
+
 .extern crypto_ft_tab
 .extern crypto_it_tab
 .extern crypto_fl_tab
@@ -70,6 +72,8 @@
 	je	B192;			\
 	leaq	32(r9),r9;
 
+#define ret	pax_force_retaddr; ret
+
 #define epilogue(FUNC,r1,r2,r3,r4,r5,r6,r7,r8,r9) \
 	movq	r1,r2;			\
 	movq	r3,r4;			\
diff -ruNp linux-3.13.11/arch/x86/crypto/aesni-intel_asm.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/crypto/aesni-intel_asm.S
--- linux-3.13.11/arch/x86/crypto/aesni-intel_asm.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/crypto/aesni-intel_asm.S	2014-07-09
12:00:15.000000000 +0200
@@ -31,6 +31,7 @@
 
 #include <linux/linkage.h>
 #include <asm/inst.h>
+#include <asm/alternative-asm.h>
 
 #ifdef __x86_64__
 .data
@@ -205,7 +206,7 @@ enc:        .octa 0x2
 * num_initial_blocks = b mod 4
 * encrypt the initial num_initial_blocks blocks and apply ghash on
 * the ciphertext
-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
 * are clobbered
 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
 */
@@ -214,8 +215,8 @@ enc:        .octa 0x2
 .macro INITIAL_BLOCKS_DEC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
 	mov	   arg7, %r10           # %r10 = AAD
-	mov	   arg8, %r12           # %r12 = aadLen
-	mov	   %r12, %r11
+	mov	   arg8, %r15           # %r15 = aadLen
+	mov	   %r15, %r11
 	pxor	   %xmm\i, %xmm\i
 _get_AAD_loop\num_initial_blocks\operation:
 	movd	   (%r10), \TMP1
@@ -223,15 +224,15 @@ _get_AAD_loop\num_initial_blocks\operati
 	psrldq	   $4, %xmm\i
 	pxor	   \TMP1, %xmm\i
 	add	   $4, %r10
-	sub	   $4, %r12
+	sub	   $4, %r15
 	jne	   _get_AAD_loop\num_initial_blocks\operation
 	cmp	   $16, %r11
 	je	   _get_AAD_loop2_done\num_initial_blocks\operation
-	mov	   $16, %r12
+	mov	   $16, %r15
 _get_AAD_loop2\num_initial_blocks\operation:
 	psrldq	   $4, %xmm\i
-	sub	   $4, %r12
-	cmp	   %r11, %r12
+	sub	   $4, %r15
+	cmp	   %r11, %r15
 	jne	   _get_AAD_loop2\num_initial_blocks\operation
 _get_AAD_loop2_done\num_initial_blocks\operation:
         movdqa     SHUF_MASK(%rip), %xmm14
@@ -443,7 +444,7 @@ _initial_blocks_done\num_initial_blocks\
 * num_initial_blocks = b mod 4
 * encrypt the initial num_initial_blocks blocks and apply ghash on
 * the ciphertext
-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
 * are clobbered
 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
 */
@@ -452,8 +453,8 @@ _initial_blocks_done\num_initial_blocks\
 .macro INITIAL_BLOCKS_ENC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
 	mov	   arg7, %r10           # %r10 = AAD
-	mov	   arg8, %r12           # %r12 = aadLen
-	mov	   %r12, %r11
+	mov	   arg8, %r15           # %r15 = aadLen
+	mov	   %r15, %r11
 	pxor	   %xmm\i, %xmm\i
 _get_AAD_loop\num_initial_blocks\operation:
 	movd	   (%r10), \TMP1
@@ -461,15 +462,15 @@ _get_AAD_loop\num_initial_blocks\operati
 	psrldq	   $4, %xmm\i
 	pxor	   \TMP1, %xmm\i
 	add	   $4, %r10
-	sub	   $4, %r12
+	sub	   $4, %r15
 	jne	   _get_AAD_loop\num_initial_blocks\operation
 	cmp	   $16, %r11
 	je	   _get_AAD_loop2_done\num_initial_blocks\operation
-	mov	   $16, %r12
+	mov	   $16, %r15
 _get_AAD_loop2\num_initial_blocks\operation:
 	psrldq	   $4, %xmm\i
-	sub	   $4, %r12
-	cmp	   %r11, %r12
+	sub	   $4, %r15
+	cmp	   %r11, %r15
 	jne	   _get_AAD_loop2\num_initial_blocks\operation
 _get_AAD_loop2_done\num_initial_blocks\operation:
         movdqa     SHUF_MASK(%rip), %xmm14
@@ -1269,7 +1270,7 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
 *
 *****************************************************************************/
 ENTRY(aesni_gcm_dec)
-	push	%r12
+	push	%r15
 	push	%r13
 	push	%r14
 	mov	%rsp, %r14
@@ -1279,8 +1280,8 @@ ENTRY(aesni_gcm_dec)
 */
 	sub	$VARIABLE_OFFSET, %rsp
 	and	$~63, %rsp                        # align rsp to 64 bytes
-	mov	%arg6, %r12
-	movdqu	(%r12), %xmm13			  # %xmm13 = HashKey
+	mov	%arg6, %r15
+	movdqu	(%r15), %xmm13			  # %xmm13 = HashKey
         movdqa  SHUF_MASK(%rip), %xmm2
 	PSHUFB_XMM %xmm2, %xmm13
 
@@ -1308,10 +1309,10 @@ ENTRY(aesni_gcm_dec)
 	movdqa %xmm13, HashKey(%rsp)           # store HashKey<<1 (mod poly)
 	mov %arg4, %r13    # save the number of bytes of plaintext/ciphertext
 	and $-16, %r13                      # %r13 = %r13 - (%r13 mod 16)
-	mov %r13, %r12
-	and $(3<<4), %r12
+	mov %r13, %r15
+	and $(3<<4), %r15
 	jz _initial_num_blocks_is_0_decrypt
-	cmp $(2<<4), %r12
+	cmp $(2<<4), %r15
 	jb _initial_num_blocks_is_1_decrypt
 	je _initial_num_blocks_is_2_decrypt
 _initial_num_blocks_is_3_decrypt:
@@ -1361,16 +1362,16 @@ _zero_cipher_left_decrypt:
 	sub $16, %r11
 	add %r13, %r11
 	movdqu (%arg3,%r11,1), %xmm1   # receive the last <16 byte block
-	lea SHIFT_MASK+16(%rip), %r12
-	sub %r13, %r12
+	lea SHIFT_MASK+16(%rip), %r15
+	sub %r13, %r15
 # adjust the shuffle mask pointer to be able to shift 16-%r13 bytes
 # (%r13 is the number of bytes in plaintext mod 16)
-	movdqu (%r12), %xmm2           # get the appropriate shuffle mask
+	movdqu (%r15), %xmm2           # get the appropriate shuffle mask
 	PSHUFB_XMM %xmm2, %xmm1            # right shift 16-%r13 butes
 
 	movdqa  %xmm1, %xmm2
 	pxor %xmm1, %xmm0            # Ciphertext XOR E(K, Yn)
-	movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
+	movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
 	# get the appropriate mask to mask out top 16-%r13 bytes of %xmm0
 	pand %xmm1, %xmm0            # mask out top 16-%r13 bytes of %xmm0
 	pand    %xmm1, %xmm2
@@ -1399,9 +1400,9 @@ _less_than_8_bytes_left_decrypt:
 	sub	$1, %r13
 	jne	_less_than_8_bytes_left_decrypt
 _multiple_of_16_bytes_decrypt:
-	mov	arg8, %r12		  # %r13 = aadLen (number of bytes)
-	shl	$3, %r12		  # convert into number of bits
-	movd	%r12d, %xmm15		  # len(A) in %xmm15
+	mov	arg8, %r15		  # %r13 = aadLen (number of bytes)
+	shl	$3, %r15		  # convert into number of bits
+	movd	%r15d, %xmm15		  # len(A) in %xmm15
 	shl	$3, %arg4		  # len(C) in bits (*128)
 	MOVQ_R64_XMM	%arg4, %xmm1
 	pslldq	$8, %xmm15		  # %xmm15 = len(A)||0x0000000000000000
@@ -1440,7 +1441,8 @@ _return_T_done_decrypt:
 	mov	%r14, %rsp
 	pop	%r14
 	pop	%r13
-	pop	%r12
+	pop	%r15
+	pax_force_retaddr
 	ret
 ENDPROC(aesni_gcm_dec)
 
@@ -1529,7 +1531,7 @@ ENDPROC(aesni_gcm_dec)
 * poly = x^128 + x^127 + x^126 + x^121 + 1
 ***************************************************************************/
 ENTRY(aesni_gcm_enc)
-	push	%r12
+	push	%r15
 	push	%r13
 	push	%r14
 	mov	%rsp, %r14
@@ -1539,8 +1541,8 @@ ENTRY(aesni_gcm_enc)
 #
 	sub	$VARIABLE_OFFSET, %rsp
 	and	$~63, %rsp
-	mov	%arg6, %r12
-	movdqu	(%r12), %xmm13
+	mov	%arg6, %r15
+	movdqu	(%r15), %xmm13
         movdqa  SHUF_MASK(%rip), %xmm2
 	PSHUFB_XMM %xmm2, %xmm13
 
@@ -1564,13 +1566,13 @@ ENTRY(aesni_gcm_enc)
 	movdqa	%xmm13, HashKey(%rsp)
 	mov	%arg4, %r13            # %xmm13 holds HashKey<<1 (mod poly)
 	and	$-16, %r13
-	mov	%r13, %r12
+	mov	%r13, %r15
 
         # Encrypt first few blocks
 
-	and	$(3<<4), %r12
+	and	$(3<<4), %r15
 	jz	_initial_num_blocks_is_0_encrypt
-	cmp	$(2<<4), %r12
+	cmp	$(2<<4), %r15
 	jb	_initial_num_blocks_is_1_encrypt
 	je	_initial_num_blocks_is_2_encrypt
 _initial_num_blocks_is_3_encrypt:
@@ -1623,14 +1625,14 @@ _zero_cipher_left_encrypt:
 	sub $16, %r11
 	add %r13, %r11
 	movdqu (%arg3,%r11,1), %xmm1     # receive the last <16 byte blocks
-	lea SHIFT_MASK+16(%rip), %r12
-	sub %r13, %r12
+	lea SHIFT_MASK+16(%rip), %r15
+	sub %r13, %r15
 	# adjust the shuffle mask pointer to be able to shift 16-r13 bytes
 	# (%r13 is the number of bytes in plaintext mod 16)
-	movdqu	(%r12), %xmm2           # get the appropriate shuffle mask
+	movdqu	(%r15), %xmm2           # get the appropriate shuffle mask
 	PSHUFB_XMM	%xmm2, %xmm1            # shift right 16-r13 byte
 	pxor	%xmm1, %xmm0            # Plaintext XOR Encrypt(K, Yn)
-	movdqu	ALL_F-SHIFT_MASK(%r12), %xmm1
+	movdqu	ALL_F-SHIFT_MASK(%r15), %xmm1
 	# get the appropriate mask to mask out top 16-r13 bytes of xmm0
 	pand	%xmm1, %xmm0            # mask out top 16-r13 bytes of xmm0
         movdqa SHUF_MASK(%rip), %xmm10
@@ -1663,9 +1665,9 @@ _less_than_8_bytes_left_encrypt:
 	sub $1, %r13
 	jne _less_than_8_bytes_left_encrypt
 _multiple_of_16_bytes_encrypt:
-	mov	arg8, %r12    # %r12 = addLen (number of bytes)
-	shl	$3, %r12
-	movd	%r12d, %xmm15       # len(A) in %xmm15
+	mov	arg8, %r15    # %r15 = addLen (number of bytes)
+	shl	$3, %r15
+	movd	%r15d, %xmm15       # len(A) in %xmm15
 	shl	$3, %arg4               # len(C) in bits (*128)
 	MOVQ_R64_XMM	%arg4, %xmm1
 	pslldq	$8, %xmm15          # %xmm15 = len(A)||0x0000000000000000
@@ -1704,7 +1706,8 @@ _return_T_done_encrypt:
 	mov	%r14, %rsp
 	pop	%r14
 	pop	%r13
-	pop	%r12
+	pop	%r15
+	pax_force_retaddr
 	ret
 ENDPROC(aesni_gcm_enc)
 
@@ -1722,6 +1725,7 @@ _key_expansion_256a:
 	pxor %xmm1, %xmm0
 	movaps %xmm0, (TKEYP)
 	add $0x10, TKEYP
+	pax_force_retaddr
 	ret
 ENDPROC(_key_expansion_128)
 ENDPROC(_key_expansion_256a)
@@ -1748,6 +1752,7 @@ _key_expansion_192a:
 	shufps $0b01001110, %xmm2, %xmm1
 	movaps %xmm1, 0x10(TKEYP)
 	add $0x20, TKEYP
+	pax_force_retaddr
 	ret
 ENDPROC(_key_expansion_192a)
 
@@ -1768,6 +1773,7 @@ _key_expansion_192b:
 
 	movaps %xmm0, (TKEYP)
 	add $0x10, TKEYP
+	pax_force_retaddr
 	ret
 ENDPROC(_key_expansion_192b)
 
@@ -1781,6 +1787,7 @@ _key_expansion_256b:
 	pxor %xmm1, %xmm2
 	movaps %xmm2, (TKEYP)
 	add $0x10, TKEYP
+	pax_force_retaddr
 	ret
 ENDPROC(_key_expansion_256b)
 
@@ -1894,6 +1901,7 @@ ENTRY(aesni_set_key)
 #ifndef __x86_64__
 	popl KEYP
 #endif
+	pax_force_retaddr
 	ret
 ENDPROC(aesni_set_key)
 
@@ -1916,6 +1924,7 @@ ENTRY(aesni_enc)
 	popl KLEN
 	popl KEYP
 #endif
+	pax_force_retaddr
 	ret
 ENDPROC(aesni_enc)
 
@@ -1974,6 +1983,7 @@ _aesni_enc1:
 	AESENC KEY STATE
 	movaps 0x70(TKEYP), KEY
 	AESENCLAST KEY STATE
+	pax_force_retaddr
 	ret
 ENDPROC(_aesni_enc1)
 
@@ -2083,6 +2093,7 @@ _aesni_enc4:
 	AESENCLAST KEY STATE2
 	AESENCLAST KEY STATE3
 	AESENCLAST KEY STATE4
+	pax_force_retaddr
 	ret
 ENDPROC(_aesni_enc4)
 
@@ -2106,6 +2117,7 @@ ENTRY(aesni_dec)
 	popl KLEN
 	popl KEYP
 #endif
+	pax_force_retaddr
 	ret
 ENDPROC(aesni_dec)
 
@@ -2164,6 +2176,7 @@ _aesni_dec1:
 	AESDEC KEY STATE
 	movaps 0x70(TKEYP), KEY
 	AESDECLAST KEY STATE
+	pax_force_retaddr
 	ret
 ENDPROC(_aesni_dec1)
 
@@ -2273,6 +2286,7 @@ _aesni_dec4:
 	AESDECLAST KEY STATE2
 	AESDECLAST KEY STATE3
 	AESDECLAST KEY STATE4
+	pax_force_retaddr
 	ret
 ENDPROC(_aesni_dec4)
 
@@ -2331,6 +2345,7 @@ ENTRY(aesni_ecb_enc)
 	popl KEYP
 	popl LEN
 #endif
+	pax_force_retaddr
 	ret
 ENDPROC(aesni_ecb_enc)
 
@@ -2390,6 +2405,7 @@ ENTRY(aesni_ecb_dec)
 	popl KEYP
 	popl LEN
 #endif
+	pax_force_retaddr
 	ret
 ENDPROC(aesni_ecb_dec)
 
@@ -2432,6 +2448,7 @@ ENTRY(aesni_cbc_enc)
 	popl LEN
 	popl IVP
 #endif
+	pax_force_retaddr
 	ret
 ENDPROC(aesni_cbc_enc)
 
@@ -2523,6 +2540,7 @@ ENTRY(aesni_cbc_dec)
 	popl LEN
 	popl IVP
 #endif
+	pax_force_retaddr
 	ret
 ENDPROC(aesni_cbc_dec)
 
@@ -2550,6 +2568,7 @@ _aesni_inc_init:
 	mov $1, TCTR_LOW
 	MOVQ_R64_XMM TCTR_LOW INC
 	MOVQ_R64_XMM CTR TCTR_LOW
+	pax_force_retaddr
 	ret
 ENDPROC(_aesni_inc_init)
 
@@ -2579,6 +2598,7 @@ _aesni_inc:
 .Linc_low:
 	movaps CTR, IV
 	PSHUFB_XMM BSWAP_MASK IV
+	pax_force_retaddr
 	ret
 ENDPROC(_aesni_inc)
 
@@ -2640,6 +2660,7 @@ ENTRY(aesni_ctr_enc)
 .Lctr_enc_ret:
 	movups IV, (IVP)
 .Lctr_enc_just_ret:
+	pax_force_retaddr
 	ret
 ENDPROC(aesni_ctr_enc)
 
@@ -2766,6 +2787,7 @@ ENTRY(aesni_xts_crypt8)
 	pxor INC, STATE4
 	movdqu STATE4, 0x70(OUTP)
 
+	pax_force_retaddr
 	ret
 ENDPROC(aesni_xts_crypt8)
 
diff -ruNp linux-3.13.11/arch/x86/crypto/blowfish-x86_64-asm_64.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/crypto/blowfish-x86_64-asm_64.S
--- linux-3.13.11/arch/x86/crypto/blowfish-x86_64-asm_64.S	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/crypto/blowfish-x86_64-asm_64.S	2014-07-09
12:00:15.000000000 +0200
@@ -21,6 +21,7 @@
  */
 
 #include <linux/linkage.h>
+#include <asm/alternative-asm.h>
 
 .file "blowfish-x86_64-asm.S"
 .text
@@ -149,9 +150,11 @@ ENTRY(__blowfish_enc_blk)
 	jnz .L__enc_xor;
 
 	write_block();
+	pax_force_retaddr
 	ret;
 .L__enc_xor:
 	xor_block();
+	pax_force_retaddr
 	ret;
 ENDPROC(__blowfish_enc_blk)
 
@@ -183,6 +186,7 @@ ENTRY(blowfish_dec_blk)
 
 	movq %r11, %rbp;
 
+	pax_force_retaddr
 	ret;
 ENDPROC(blowfish_dec_blk)
 
@@ -334,6 +338,7 @@ ENTRY(__blowfish_enc_blk_4way)
 
 	popq %rbx;
 	popq %rbp;
+	pax_force_retaddr
 	ret;
 
 .L__enc_xor4:
@@ -341,6 +346,7 @@ ENTRY(__blowfish_enc_blk_4way)
 
 	popq %rbx;
 	popq %rbp;
+	pax_force_retaddr
 	ret;
 ENDPROC(__blowfish_enc_blk_4way)
 
@@ -375,5 +381,6 @@ ENTRY(blowfish_dec_blk_4way)
 	popq %rbx;
 	popq %rbp;
 
+	pax_force_retaddr
 	ret;
 ENDPROC(blowfish_dec_blk_4way)
diff -ruNp linux-3.13.11/arch/x86/crypto/camellia-aesni-avx-asm_64.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/crypto/camellia-aesni-avx-asm_64.S
--- linux-3.13.11/arch/x86/crypto/camellia-aesni-avx-asm_64.S	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/crypto/camellia-aesni-avx-asm_64.S	2014-07-09
12:00:15.000000000 +0200
@@ -16,6 +16,7 @@
  */
 
 #include <linux/linkage.h>
+#include <asm/alternative-asm.h>
 
 #define CAMELLIA_TABLE_BYTE_LEN 272
 
@@ -191,6 +192,7 @@ roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_
 	roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
 		  %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15,
 		  %rcx, (%r9));
+	pax_force_retaddr
 	ret;
 ENDPROC(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
 
@@ -199,6 +201,7 @@ roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_
 	roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3,
 		  %xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11,
 		  %rax, (%r9));
+	pax_force_retaddr
 	ret;
 ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
 
@@ -780,6 +783,7 @@ __camellia_enc_blk16:
 		    %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
 		    %xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax));
 
+	pax_force_retaddr
 	ret;
 
 .align 8
@@ -865,6 +869,7 @@ __camellia_dec_blk16:
 		    %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
 		    %xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax));
 
+	pax_force_retaddr
 	ret;
 
 .align 8
@@ -904,6 +909,7 @@ ENTRY(camellia_ecb_enc_16way)
 		     %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
 		     %xmm8, %rsi);
 
+	pax_force_retaddr
 	ret;
 ENDPROC(camellia_ecb_enc_16way)
 
@@ -932,6 +938,7 @@ ENTRY(camellia_ecb_dec_16way)
 		     %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
 		     %xmm8, %rsi);
 
+	pax_force_retaddr
 	ret;
 ENDPROC(camellia_ecb_dec_16way)
 
@@ -981,6 +988,7 @@ ENTRY(camellia_cbc_dec_16way)
 		     %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
 		     %xmm8, %rsi);
 
+	pax_force_retaddr
 	ret;
 ENDPROC(camellia_cbc_dec_16way)
 
@@ -1092,6 +1100,7 @@ ENTRY(camellia_ctr_16way)
 		     %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
 		     %xmm8, %rsi);
 
+	pax_force_retaddr
 	ret;
 ENDPROC(camellia_ctr_16way)
 
@@ -1234,6 +1243,7 @@ camellia_xts_crypt_16way:
 		     %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
 		     %xmm8, %rsi);
 
+	pax_force_retaddr
 	ret;
 ENDPROC(camellia_xts_crypt_16way)
 
diff -ruNp linux-3.13.11/arch/x86/crypto/camellia-aesni-avx2-asm_64.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
--- linux-3.13.11/arch/x86/crypto/camellia-aesni-avx2-asm_64.S	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/crypto/camellia-aesni-avx2-asm_64.S	2014-07-09
12:00:15.000000000 +0200
@@ -11,6 +11,7 @@
  */
 
 #include <linux/linkage.h>
+#include <asm/alternative-asm.h>
 
 #define CAMELLIA_TABLE_BYTE_LEN 272
 
@@ -230,6 +231,7 @@ roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_
 	roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
 		  %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15,
 		  %rcx, (%r9));
+	pax_force_retaddr
 	ret;
 ENDPROC(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
 
@@ -238,6 +240,7 @@ roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_
 	roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3,
 		  %ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11,
 		  %rax, (%r9));
+	pax_force_retaddr
 	ret;
 ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
 
@@ -820,6 +823,7 @@ __camellia_enc_blk32:
 		    %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
 		    %ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax));
 
+	pax_force_retaddr
 	ret;
 
 .align 8
@@ -905,6 +909,7 @@ __camellia_dec_blk32:
 		    %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
 		    %ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax));
 
+	pax_force_retaddr
 	ret;
 
 .align 8
@@ -948,6 +953,7 @@ ENTRY(camellia_ecb_enc_32way)
 
 	vzeroupper;
 
+	pax_force_retaddr
 	ret;
 ENDPROC(camellia_ecb_enc_32way)
 
@@ -980,6 +986,7 @@ ENTRY(camellia_ecb_dec_32way)
 
 	vzeroupper;
 
+	pax_force_retaddr
 	ret;
 ENDPROC(camellia_ecb_dec_32way)
 
@@ -1046,6 +1053,7 @@ ENTRY(camellia_cbc_dec_32way)
 
 	vzeroupper;
 
+	pax_force_retaddr
 	ret;
 ENDPROC(camellia_cbc_dec_32way)
 
@@ -1184,6 +1192,7 @@ ENTRY(camellia_ctr_32way)
 
 	vzeroupper;
 
+	pax_force_retaddr
 	ret;
 ENDPROC(camellia_ctr_32way)
 
@@ -1349,6 +1358,7 @@ camellia_xts_crypt_32way:
 
 	vzeroupper;
 
+	pax_force_retaddr
 	ret;
 ENDPROC(camellia_xts_crypt_32way)
 
diff -ruNp linux-3.13.11/arch/x86/crypto/camellia-x86_64-asm_64.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/crypto/camellia-x86_64-asm_64.S
--- linux-3.13.11/arch/x86/crypto/camellia-x86_64-asm_64.S	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/crypto/camellia-x86_64-asm_64.S	2014-07-09
12:00:15.000000000 +0200
@@ -21,6 +21,7 @@
  */
 
 #include <linux/linkage.h>
+#include <asm/alternative-asm.h>
 
 .file "camellia-x86_64-asm_64.S"
 .text
@@ -228,12 +229,14 @@ ENTRY(__camellia_enc_blk)
 	enc_outunpack(mov, RT1);
 
 	movq RRBP, %rbp;
+	pax_force_retaddr
 	ret;
 
 .L__enc_xor:
 	enc_outunpack(xor, RT1);
 
 	movq RRBP, %rbp;
+	pax_force_retaddr
 	ret;
 ENDPROC(__camellia_enc_blk)
 
@@ -272,6 +275,7 @@ ENTRY(camellia_dec_blk)
 	dec_outunpack();
 
 	movq RRBP, %rbp;
+	pax_force_retaddr
 	ret;
 ENDPROC(camellia_dec_blk)
 
@@ -463,6 +467,7 @@ ENTRY(__camellia_enc_blk_2way)
 
 	movq RRBP, %rbp;
 	popq %rbx;
+	pax_force_retaddr
 	ret;
 
 .L__enc2_xor:
@@ -470,6 +475,7 @@ ENTRY(__camellia_enc_blk_2way)
 
 	movq RRBP, %rbp;
 	popq %rbx;
+	pax_force_retaddr
 	ret;
 ENDPROC(__camellia_enc_blk_2way)
 
@@ -510,5 +516,6 @@ ENTRY(camellia_dec_blk_2way)
 
 	movq RRBP, %rbp;
 	movq RXOR, %rbx;
+	pax_force_retaddr
 	ret;
 ENDPROC(camellia_dec_blk_2way)
diff -ruNp linux-3.13.11/arch/x86/crypto/cast5-avx-x86_64-asm_64.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
--- linux-3.13.11/arch/x86/crypto/cast5-avx-x86_64-asm_64.S	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/crypto/cast5-avx-x86_64-asm_64.S	2014-07-09
12:00:15.000000000 +0200
@@ -24,6 +24,7 @@
  */
 
 #include <linux/linkage.h>
+#include <asm/alternative-asm.h>
 
 .file "cast5-avx-x86_64-asm_64.S"
 
@@ -281,6 +282,7 @@ __cast5_enc_blk16:
 	outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
 	outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
 
+	pax_force_retaddr
 	ret;
 ENDPROC(__cast5_enc_blk16)
 
@@ -352,6 +354,7 @@ __cast5_dec_blk16:
 	outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
 	outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
 
+	pax_force_retaddr
 	ret;
 
 .L__skip_dec:
@@ -388,6 +391,7 @@ ENTRY(cast5_ecb_enc_16way)
 	vmovdqu RR4, (6*4*4)(%r11);
 	vmovdqu RL4, (7*4*4)(%r11);
 
+	pax_force_retaddr
 	ret;
 ENDPROC(cast5_ecb_enc_16way)
 
@@ -420,6 +424,7 @@ ENTRY(cast5_ecb_dec_16way)
 	vmovdqu RR4, (6*4*4)(%r11);
 	vmovdqu RL4, (7*4*4)(%r11);
 
+	pax_force_retaddr
 	ret;
 ENDPROC(cast5_ecb_dec_16way)
 
@@ -430,10 +435,10 @@ ENTRY(cast5_cbc_dec_16way)
 	 *	%rdx: src
 	 */
 
-	pushq %r12;
+	pushq %r14;
 
 	movq %rsi, %r11;
-	movq %rdx, %r12;
+	movq %rdx, %r14;
 
 	vmovdqu (0*16)(%rdx), RL1;
 	vmovdqu (1*16)(%rdx), RR1;
@@ -447,16 +452,16 @@ ENTRY(cast5_cbc_dec_16way)
 	call __cast5_dec_blk16;
 
 	/* xor with src */
-	vmovq (%r12), RX;
+	vmovq (%r14), RX;
 	vpshufd $0x4f, RX, RX;
 	vpxor RX, RR1, RR1;
-	vpxor 0*16+8(%r12), RL1, RL1;
-	vpxor 1*16+8(%r12), RR2, RR2;
-	vpxor 2*16+8(%r12), RL2, RL2;
-	vpxor 3*16+8(%r12), RR3, RR3;
-	vpxor 4*16+8(%r12), RL3, RL3;
-	vpxor 5*16+8(%r12), RR4, RR4;
-	vpxor 6*16+8(%r12), RL4, RL4;
+	vpxor 0*16+8(%r14), RL1, RL1;
+	vpxor 1*16+8(%r14), RR2, RR2;
+	vpxor 2*16+8(%r14), RL2, RL2;
+	vpxor 3*16+8(%r14), RR3, RR3;
+	vpxor 4*16+8(%r14), RL3, RL3;
+	vpxor 5*16+8(%r14), RR4, RR4;
+	vpxor 6*16+8(%r14), RL4, RL4;
 
 	vmovdqu RR1, (0*16)(%r11);
 	vmovdqu RL1, (1*16)(%r11);
@@ -467,8 +472,9 @@ ENTRY(cast5_cbc_dec_16way)
 	vmovdqu RR4, (6*16)(%r11);
 	vmovdqu RL4, (7*16)(%r11);
 
-	popq %r12;
+	popq %r14;
 
+	pax_force_retaddr
 	ret;
 ENDPROC(cast5_cbc_dec_16way)
 
@@ -480,10 +486,10 @@ ENTRY(cast5_ctr_16way)
 	 *	%rcx: iv (big endian, 64bit)
 	 */
 
-	pushq %r12;
+	pushq %r14;
 
 	movq %rsi, %r11;
-	movq %rdx, %r12;
+	movq %rdx, %r14;
 
 	vpcmpeqd RTMP, RTMP, RTMP;
 	vpsrldq $8, RTMP, RTMP; /* low: -1, high: 0 */
@@ -523,14 +529,14 @@ ENTRY(cast5_ctr_16way)
 	call __cast5_enc_blk16;
 
 	/* dst = src ^ iv */
-	vpxor (0*16)(%r12), RR1, RR1;
-	vpxor (1*16)(%r12), RL1, RL1;
-	vpxor (2*16)(%r12), RR2, RR2;
-	vpxor (3*16)(%r12), RL2, RL2;
-	vpxor (4*16)(%r12), RR3, RR3;
-	vpxor (5*16)(%r12), RL3, RL3;
-	vpxor (6*16)(%r12), RR4, RR4;
-	vpxor (7*16)(%r12), RL4, RL4;
+	vpxor (0*16)(%r14), RR1, RR1;
+	vpxor (1*16)(%r14), RL1, RL1;
+	vpxor (2*16)(%r14), RR2, RR2;
+	vpxor (3*16)(%r14), RL2, RL2;
+	vpxor (4*16)(%r14), RR3, RR3;
+	vpxor (5*16)(%r14), RL3, RL3;
+	vpxor (6*16)(%r14), RR4, RR4;
+	vpxor (7*16)(%r14), RL4, RL4;
 	vmovdqu RR1, (0*16)(%r11);
 	vmovdqu RL1, (1*16)(%r11);
 	vmovdqu RR2, (2*16)(%r11);
@@ -540,7 +546,8 @@ ENTRY(cast5_ctr_16way)
 	vmovdqu RR4, (6*16)(%r11);
 	vmovdqu RL4, (7*16)(%r11);
 
-	popq %r12;
+	popq %r14;
 
+	pax_force_retaddr
 	ret;
 ENDPROC(cast5_ctr_16way)
diff -ruNp linux-3.13.11/arch/x86/crypto/cast6-avx-x86_64-asm_64.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
--- linux-3.13.11/arch/x86/crypto/cast6-avx-x86_64-asm_64.S	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/crypto/cast6-avx-x86_64-asm_64.S	2014-07-09
12:00:15.000000000 +0200
@@ -24,6 +24,7 @@
  */
 
 #include <linux/linkage.h>
+#include <asm/alternative-asm.h>
 #include "glue_helper-asm-avx.S"
 
 .file "cast6-avx-x86_64-asm_64.S"
@@ -295,6 +296,7 @@ __cast6_enc_blk8:
 	outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
 	outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
 
+	pax_force_retaddr
 	ret;
 ENDPROC(__cast6_enc_blk8)
 
@@ -340,6 +342,7 @@ __cast6_dec_blk8:
 	outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
 	outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
 
+	pax_force_retaddr
 	ret;
 ENDPROC(__cast6_dec_blk8)
 
@@ -358,6 +361,7 @@ ENTRY(cast6_ecb_enc_8way)
 
 	store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
 
+	pax_force_retaddr
 	ret;
 ENDPROC(cast6_ecb_enc_8way)
 
@@ -376,6 +380,7 @@ ENTRY(cast6_ecb_dec_8way)
 
 	store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
 
+	pax_force_retaddr
 	ret;
 ENDPROC(cast6_ecb_dec_8way)
 
@@ -386,19 +391,20 @@ ENTRY(cast6_cbc_dec_8way)
 	 *	%rdx: src
 	 */
 
-	pushq %r12;
+	pushq %r14;
 
 	movq %rsi, %r11;
-	movq %rdx, %r12;
+	movq %rdx, %r14;
 
 	load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
 
 	call __cast6_dec_blk8;
 
-	store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+	store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
 
-	popq %r12;
+	popq %r14;
 
+	pax_force_retaddr
 	ret;
 ENDPROC(cast6_cbc_dec_8way)
 
@@ -410,20 +416,21 @@ ENTRY(cast6_ctr_8way)
 	 *	%rcx: iv (little endian, 128bit)
 	 */
 
-	pushq %r12;
+	pushq %r14;
 
 	movq %rsi, %r11;
-	movq %rdx, %r12;
+	movq %rdx, %r14;
 
 	load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
 		      RD2, RX, RKR, RKM);
 
 	call __cast6_enc_blk8;
 
-	store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+	store_ctr_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
 
-	popq %r12;
+	popq %r14;
 
+	pax_force_retaddr
 	ret;
 ENDPROC(cast6_ctr_8way)
 
@@ -446,6 +453,7 @@ ENTRY(cast6_xts_enc_8way)
 	/* dst <= regs xor IVs(in dst) */
 	store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
 
+	pax_force_retaddr
 	ret;
 ENDPROC(cast6_xts_enc_8way)
 
@@ -468,5 +476,6 @@ ENTRY(cast6_xts_dec_8way)
 	/* dst <= regs xor IVs(in dst) */
 	store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
 
+	pax_force_retaddr
 	ret;
 ENDPROC(cast6_xts_dec_8way)
diff -ruNp linux-3.13.11/arch/x86/crypto/crc32c-pcl-intel-asm_64.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
--- linux-3.13.11/arch/x86/crypto/crc32c-pcl-intel-asm_64.S	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/crypto/crc32c-pcl-intel-asm_64.S	2014-07-09
12:00:15.000000000 +0200
@@ -45,6 +45,7 @@
 
 #include <asm/inst.h>
 #include <linux/linkage.h>
+#include <asm/alternative-asm.h>
 
 ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
 
@@ -312,6 +313,7 @@ do_return:
 	popq    %rsi
 	popq    %rdi
 	popq    %rbx
+	pax_force_retaddr
         ret
 
         ################################################################
diff -ruNp linux-3.13.11/arch/x86/crypto/ghash-clmulni-intel_asm.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/crypto/ghash-clmulni-intel_asm.S
--- linux-3.13.11/arch/x86/crypto/ghash-clmulni-intel_asm.S	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/crypto/ghash-clmulni-intel_asm.S	2014-07-09
12:00:15.000000000 +0200
@@ -18,6 +18,7 @@
 
 #include <linux/linkage.h>
 #include <asm/inst.h>
+#include <asm/alternative-asm.h>
 
 .data
 
@@ -89,6 +90,7 @@ __clmul_gf128mul_ble:
 	psrlq $1, T2
 	pxor T2, T1
 	pxor T1, DATA
+	pax_force_retaddr
 	ret
 ENDPROC(__clmul_gf128mul_ble)
 
@@ -101,6 +103,7 @@ ENTRY(clmul_ghash_mul)
 	call __clmul_gf128mul_ble
 	PSHUFB_XMM BSWAP DATA
 	movups DATA, (%rdi)
+	pax_force_retaddr
 	ret
 ENDPROC(clmul_ghash_mul)
 
@@ -128,5 +131,6 @@ ENTRY(clmul_ghash_update)
 	PSHUFB_XMM BSWAP DATA
 	movups DATA, (%rdi)
 .Lupdate_just_ret:
+	pax_force_retaddr
 	ret
 ENDPROC(clmul_ghash_update)
diff -ruNp linux-3.13.11/arch/x86/crypto/salsa20-x86_64-asm_64.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/crypto/salsa20-x86_64-asm_64.S
--- linux-3.13.11/arch/x86/crypto/salsa20-x86_64-asm_64.S	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/crypto/salsa20-x86_64-asm_64.S	2014-07-09
12:00:15.000000000 +0200
@@ -1,4 +1,5 @@
 #include <linux/linkage.h>
+#include <asm/alternative-asm.h>
 
 # enter salsa20_encrypt_bytes
 ENTRY(salsa20_encrypt_bytes)
@@ -789,6 +790,7 @@ ENTRY(salsa20_encrypt_bytes)
 	add	%r11,%rsp
 	mov	%rdi,%rax
 	mov	%rsi,%rdx
+	pax_force_retaddr
 	ret
 #   bytesatleast65:
 ._bytesatleast65:
@@ -889,6 +891,7 @@ ENTRY(salsa20_keysetup)
 	add	%r11,%rsp
 	mov	%rdi,%rax
 	mov	%rsi,%rdx
+	pax_force_retaddr
 	ret
 ENDPROC(salsa20_keysetup)
 
@@ -914,5 +917,6 @@ ENTRY(salsa20_ivsetup)
 	add	%r11,%rsp
 	mov	%rdi,%rax
 	mov	%rsi,%rdx
+	pax_force_retaddr
 	ret
 ENDPROC(salsa20_ivsetup)
diff -ruNp linux-3.13.11/arch/x86/crypto/serpent-avx-x86_64-asm_64.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
--- linux-3.13.11/arch/x86/crypto/serpent-avx-x86_64-asm_64.S	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/crypto/serpent-avx-x86_64-asm_64.S	2014-07-09
12:00:15.000000000 +0200
@@ -24,6 +24,7 @@
  */
 
 #include <linux/linkage.h>
+#include <asm/alternative-asm.h>
 #include "glue_helper-asm-avx.S"
 
 .file "serpent-avx-x86_64-asm_64.S"
@@ -618,6 +619,7 @@ __serpent_enc_blk8_avx:
 	write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
 	write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
 
+	pax_force_retaddr
 	ret;
 ENDPROC(__serpent_enc_blk8_avx)
 
@@ -672,6 +674,7 @@ __serpent_dec_blk8_avx:
 	write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
 	write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
 
+	pax_force_retaddr
 	ret;
 ENDPROC(__serpent_dec_blk8_avx)
 
@@ -688,6 +691,7 @@ ENTRY(serpent_ecb_enc_8way_avx)
 
 	store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
 
+	pax_force_retaddr
 	ret;
 ENDPROC(serpent_ecb_enc_8way_avx)
 
@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_8way_avx)
 
 	store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
 
+	pax_force_retaddr
 	ret;
 ENDPROC(serpent_ecb_dec_8way_avx)
 
@@ -720,6 +725,7 @@ ENTRY(serpent_cbc_dec_8way_avx)
 
 	store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
 
+	pax_force_retaddr
 	ret;
 ENDPROC(serpent_cbc_dec_8way_avx)
 
@@ -738,6 +744,7 @@ ENTRY(serpent_ctr_8way_avx)
 
 	store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
 
+	pax_force_retaddr
 	ret;
 ENDPROC(serpent_ctr_8way_avx)
 
@@ -758,6 +765,7 @@ ENTRY(serpent_xts_enc_8way_avx)
 	/* dst <= regs xor IVs(in dst) */
 	store_xts_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
 
+	pax_force_retaddr
 	ret;
 ENDPROC(serpent_xts_enc_8way_avx)
 
@@ -778,5 +786,6 @@ ENTRY(serpent_xts_dec_8way_avx)
 	/* dst <= regs xor IVs(in dst) */
 	store_xts_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
 
+	pax_force_retaddr
 	ret;
 ENDPROC(serpent_xts_dec_8way_avx)
diff -ruNp linux-3.13.11/arch/x86/crypto/serpent-avx2-asm_64.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/crypto/serpent-avx2-asm_64.S
--- linux-3.13.11/arch/x86/crypto/serpent-avx2-asm_64.S	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/crypto/serpent-avx2-asm_64.S	2014-07-09
12:00:15.000000000 +0200
@@ -15,6 +15,7 @@
  */
 
 #include <linux/linkage.h>
+#include <asm/alternative-asm.h>
 #include "glue_helper-asm-avx2.S"
 
 .file "serpent-avx2-asm_64.S"
@@ -610,6 +611,7 @@ __serpent_enc_blk16:
 	write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
 	write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
 
+	pax_force_retaddr
 	ret;
 ENDPROC(__serpent_enc_blk16)
 
@@ -664,6 +666,7 @@ __serpent_dec_blk16:
 	write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
 	write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
 
+	pax_force_retaddr
 	ret;
 ENDPROC(__serpent_dec_blk16)
 
@@ -684,6 +687,7 @@ ENTRY(serpent_ecb_enc_16way)
 
 	vzeroupper;
 
+	pax_force_retaddr
 	ret;
 ENDPROC(serpent_ecb_enc_16way)
 
@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_16way)
 
 	vzeroupper;
 
+	pax_force_retaddr
 	ret;
 ENDPROC(serpent_ecb_dec_16way)
 
@@ -725,6 +730,7 @@ ENTRY(serpent_cbc_dec_16way)
 
 	vzeroupper;
 
+	pax_force_retaddr
 	ret;
 ENDPROC(serpent_cbc_dec_16way)
 
@@ -748,6 +754,7 @@ ENTRY(serpent_ctr_16way)
 
 	vzeroupper;
 
+	pax_force_retaddr
 	ret;
 ENDPROC(serpent_ctr_16way)
 
@@ -772,6 +779,7 @@ ENTRY(serpent_xts_enc_16way)
 
 	vzeroupper;
 
+	pax_force_retaddr
 	ret;
 ENDPROC(serpent_xts_enc_16way)
 
@@ -796,5 +804,6 @@ ENTRY(serpent_xts_dec_16way)
 
 	vzeroupper;
 
+	pax_force_retaddr
 	ret;
 ENDPROC(serpent_xts_dec_16way)
diff -ruNp linux-3.13.11/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
--- linux-3.13.11/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S	2014-07-09
12:00:15.000000000 +0200
@@ -25,6 +25,7 @@
  */
 
 #include <linux/linkage.h>
+#include <asm/alternative-asm.h>
 
 .file "serpent-sse2-x86_64-asm_64.S"
 .text
@@ -690,12 +691,14 @@ ENTRY(__serpent_enc_blk_8way)
 	write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
 	write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
 
+	pax_force_retaddr
 	ret;
 
 .L__enc_xor8:
 	xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
 	xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
 
+	pax_force_retaddr
 	ret;
 ENDPROC(__serpent_enc_blk_8way)
 
@@ -750,5 +753,6 @@ ENTRY(serpent_dec_blk_8way)
 	write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
 	write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
 
+	pax_force_retaddr
 	ret;
 ENDPROC(serpent_dec_blk_8way)
diff -ruNp linux-3.13.11/arch/x86/crypto/sha1_ssse3_asm.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/crypto/sha1_ssse3_asm.S
--- linux-3.13.11/arch/x86/crypto/sha1_ssse3_asm.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/crypto/sha1_ssse3_asm.S	2014-07-09
12:00:15.000000000 +0200
@@ -29,6 +29,7 @@
  */
 
 #include <linux/linkage.h>
+#include <asm/alternative-asm.h>
 
 #define CTX	%rdi	// arg1
 #define BUF	%rsi	// arg2
@@ -75,9 +76,9 @@
 
 	push	%rbx
 	push	%rbp
-	push	%r12
+	push	%r14
 
-	mov	%rsp, %r12
+	mov	%rsp, %r14
 	sub	$64, %rsp		# allocate workspace
 	and	$~15, %rsp		# align stack
 
@@ -99,11 +100,12 @@
 	xor	%rax, %rax
 	rep stosq
 
-	mov	%r12, %rsp		# deallocate workspace
+	mov	%r14, %rsp		# deallocate workspace
 
-	pop	%r12
+	pop	%r14
 	pop	%rbp
 	pop	%rbx
+	pax_force_retaddr
 	ret
 
 	ENDPROC(\name)
diff -ruNp linux-3.13.11/arch/x86/crypto/sha256-avx-asm.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/crypto/sha256-avx-asm.S
--- linux-3.13.11/arch/x86/crypto/sha256-avx-asm.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/crypto/sha256-avx-asm.S	2014-07-09
12:00:15.000000000 +0200
@@ -49,6 +49,7 @@
 
 #ifdef CONFIG_AS_AVX
 #include <linux/linkage.h>
+#include <asm/alternative-asm.h>
 
 ## assume buffers not aligned
 #define    VMOVDQ vmovdqu
@@ -460,6 +461,7 @@ done_hash:
 	popq    %r13
 	popq    %rbp
 	popq    %rbx
+	pax_force_retaddr
 	ret
 ENDPROC(sha256_transform_avx)
 
diff -ruNp linux-3.13.11/arch/x86/crypto/sha256-avx2-asm.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/crypto/sha256-avx2-asm.S
--- linux-3.13.11/arch/x86/crypto/sha256-avx2-asm.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/crypto/sha256-avx2-asm.S	2014-07-09
12:00:15.000000000 +0200
@@ -50,6 +50,7 @@
 
 #ifdef CONFIG_AS_AVX2
 #include <linux/linkage.h>
+#include <asm/alternative-asm.h>
 
 ## assume buffers not aligned
 #define	VMOVDQ vmovdqu
@@ -720,6 +721,7 @@ done_hash:
 	popq	%r12
 	popq	%rbp
 	popq	%rbx
+	pax_force_retaddr
 	ret
 ENDPROC(sha256_transform_rorx)
 
diff -ruNp linux-3.13.11/arch/x86/crypto/sha256-ssse3-asm.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/crypto/sha256-ssse3-asm.S
--- linux-3.13.11/arch/x86/crypto/sha256-ssse3-asm.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/crypto/sha256-ssse3-asm.S	2014-07-09
12:00:15.000000000 +0200
@@ -47,6 +47,7 @@
 ########################################################################
 
 #include <linux/linkage.h>
+#include <asm/alternative-asm.h>
 
 ## assume buffers not aligned
 #define    MOVDQ movdqu
@@ -471,6 +472,7 @@ done_hash:
 	popq    %rbp
 	popq    %rbx
 
+	pax_force_retaddr
 	ret
 ENDPROC(sha256_transform_ssse3)
 
diff -ruNp linux-3.13.11/arch/x86/crypto/sha512-avx-asm.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/crypto/sha512-avx-asm.S
--- linux-3.13.11/arch/x86/crypto/sha512-avx-asm.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/crypto/sha512-avx-asm.S	2014-07-09
12:00:15.000000000 +0200
@@ -49,6 +49,7 @@
 
 #ifdef CONFIG_AS_AVX
 #include <linux/linkage.h>
+#include <asm/alternative-asm.h>
 
 .text
 
@@ -364,6 +365,7 @@ updateblock:
 	mov	frame_RSPSAVE(%rsp), %rsp
 
 nowork:
+	pax_force_retaddr
 	ret
 ENDPROC(sha512_transform_avx)
 
diff -ruNp linux-3.13.11/arch/x86/crypto/sha512-avx2-asm.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/crypto/sha512-avx2-asm.S
--- linux-3.13.11/arch/x86/crypto/sha512-avx2-asm.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/crypto/sha512-avx2-asm.S	2014-07-09
12:00:15.000000000 +0200
@@ -51,6 +51,7 @@
 
 #ifdef CONFIG_AS_AVX2
 #include <linux/linkage.h>
+#include <asm/alternative-asm.h>
 
 .text
 
@@ -678,6 +679,7 @@ done_hash:
 
 	# Restore Stack Pointer
 	mov	frame_RSPSAVE(%rsp), %rsp
+	pax_force_retaddr
 	ret
 ENDPROC(sha512_transform_rorx)
 
diff -ruNp linux-3.13.11/arch/x86/crypto/sha512-ssse3-asm.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/crypto/sha512-ssse3-asm.S
--- linux-3.13.11/arch/x86/crypto/sha512-ssse3-asm.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/crypto/sha512-ssse3-asm.S	2014-07-09
12:00:15.000000000 +0200
@@ -48,6 +48,7 @@
 ########################################################################
 
 #include <linux/linkage.h>
+#include <asm/alternative-asm.h>
 
 .text
 
@@ -363,6 +364,7 @@ updateblock:
 	mov	frame_RSPSAVE(%rsp), %rsp
 
 nowork:
+	pax_force_retaddr
 	ret
 ENDPROC(sha512_transform_ssse3)
 
diff -ruNp linux-3.13.11/arch/x86/crypto/twofish-avx-x86_64-asm_64.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
--- linux-3.13.11/arch/x86/crypto/twofish-avx-x86_64-asm_64.S	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/crypto/twofish-avx-x86_64-asm_64.S	2014-07-09
12:00:15.000000000 +0200
@@ -24,6 +24,7 @@
  */
 
 #include <linux/linkage.h>
+#include <asm/alternative-asm.h>
 #include "glue_helper-asm-avx.S"
 
 .file "twofish-avx-x86_64-asm_64.S"
@@ -284,6 +285,7 @@ __twofish_enc_blk8:
 	outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
 	outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
 
+	pax_force_retaddr
 	ret;
 ENDPROC(__twofish_enc_blk8)
 
@@ -324,6 +326,7 @@ __twofish_dec_blk8:
 	outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
 	outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
 
+	pax_force_retaddr
 	ret;
 ENDPROC(__twofish_dec_blk8)
 
@@ -342,6 +345,7 @@ ENTRY(twofish_ecb_enc_8way)
 
 	store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
 
+	pax_force_retaddr
 	ret;
 ENDPROC(twofish_ecb_enc_8way)
 
@@ -360,6 +364,7 @@ ENTRY(twofish_ecb_dec_8way)
 
 	store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
 
+	pax_force_retaddr
 	ret;
 ENDPROC(twofish_ecb_dec_8way)
 
@@ -370,19 +375,20 @@ ENTRY(twofish_cbc_dec_8way)
 	 *	%rdx: src
 	 */
 
-	pushq %r12;
+	pushq %r14;
 
 	movq %rsi, %r11;
-	movq %rdx, %r12;
+	movq %rdx, %r14;
 
 	load_8way(%rdx, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
 
 	call __twofish_dec_blk8;
 
-	store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+	store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
 
-	popq %r12;
+	popq %r14;
 
+	pax_force_retaddr
 	ret;
 ENDPROC(twofish_cbc_dec_8way)
 
@@ -394,20 +400,21 @@ ENTRY(twofish_ctr_8way)
 	 *	%rcx: iv (little endian, 128bit)
 	 */
 
-	pushq %r12;
+	pushq %r14;
 
 	movq %rsi, %r11;
-	movq %rdx, %r12;
+	movq %rdx, %r14;
 
 	load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
 		      RD2, RX0, RX1, RY0);
 
 	call __twofish_enc_blk8;
 
-	store_ctr_8way(%r12, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
+	store_ctr_8way(%r14, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
 
-	popq %r12;
+	popq %r14;
 
+	pax_force_retaddr
 	ret;
 ENDPROC(twofish_ctr_8way)
 
@@ -430,6 +437,7 @@ ENTRY(twofish_xts_enc_8way)
 	/* dst <= regs xor IVs(in dst) */
 	store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
 
+	pax_force_retaddr
 	ret;
 ENDPROC(twofish_xts_enc_8way)
 
@@ -452,5 +460,6 @@ ENTRY(twofish_xts_dec_8way)
 	/* dst <= regs xor IVs(in dst) */
 	store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
 
+	pax_force_retaddr
 	ret;
 ENDPROC(twofish_xts_dec_8way)
diff -ruNp linux-3.13.11/arch/x86/crypto/twofish-x86_64-asm_64-3way.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
--- linux-3.13.11/arch/x86/crypto/twofish-x86_64-asm_64-3way.S	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/crypto/twofish-x86_64-asm_64-3way.S	2014-07-09
12:00:15.000000000 +0200
@@ -21,6 +21,7 @@
  */
 
 #include <linux/linkage.h>
+#include <asm/alternative-asm.h>
 
 .file "twofish-x86_64-asm-3way.S"
 .text
@@ -258,6 +259,7 @@ ENTRY(__twofish_enc_blk_3way)
 	popq %r13;
 	popq %r14;
 	popq %r15;
+	pax_force_retaddr
 	ret;
 
 .L__enc_xor3:
@@ -269,6 +271,7 @@ ENTRY(__twofish_enc_blk_3way)
 	popq %r13;
 	popq %r14;
 	popq %r15;
+	pax_force_retaddr
 	ret;
 ENDPROC(__twofish_enc_blk_3way)
 
@@ -308,5 +311,6 @@ ENTRY(twofish_dec_blk_3way)
 	popq %r13;
 	popq %r14;
 	popq %r15;
+	pax_force_retaddr
 	ret;
 ENDPROC(twofish_dec_blk_3way)
diff -ruNp linux-3.13.11/arch/x86/crypto/twofish-x86_64-asm_64.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/crypto/twofish-x86_64-asm_64.S
--- linux-3.13.11/arch/x86/crypto/twofish-x86_64-asm_64.S	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/crypto/twofish-x86_64-asm_64.S	2014-07-09
12:00:15.000000000 +0200
@@ -22,6 +22,7 @@
 
 #include <linux/linkage.h>
 #include <asm/asm-offsets.h>
+#include <asm/alternative-asm.h>
 
 #define a_offset	0
 #define b_offset	4
@@ -265,6 +266,7 @@ ENTRY(twofish_enc_blk)
 
 	popq	R1
 	movq	$1,%rax
+	pax_force_retaddr
 	ret
 ENDPROC(twofish_enc_blk)
 
@@ -317,5 +319,6 @@ ENTRY(twofish_dec_blk)
 
 	popq	R1
 	movq	$1,%rax
+	pax_force_retaddr
 	ret
 ENDPROC(twofish_dec_blk)
diff -ruNp linux-3.13.11/arch/x86/ia32/ia32_aout.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/ia32/ia32_aout.c
--- linux-3.13.11/arch/x86/ia32/ia32_aout.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/ia32/ia32_aout.c	2014-07-09
12:00:15.000000000 +0200
@@ -153,6 +153,8 @@ static int aout_core_dump(struct coredum
 	unsigned long dump_start, dump_size;
 	struct user32 dump;
 
+	memset(&dump, 0, sizeof(dump));
+
 	fs = get_fs();
 	set_fs(KERNEL_DS);
 	has_dumped = 1;
diff -ruNp linux-3.13.11/arch/x86/ia32/ia32_signal.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/ia32/ia32_signal.c
--- linux-3.13.11/arch/x86/ia32/ia32_signal.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/ia32/ia32_signal.c	2014-07-09
12:00:15.000000000 +0200
@@ -218,7 +218,7 @@ asmlinkage long sys32_sigreturn(void)
 	if (__get_user(set.sig[0], &frame->sc.oldmask)
 	    || (_COMPAT_NSIG_WORDS > 1
 		&& __copy_from_user((((char *) &set.sig) + 4),
-				    &frame->extramask,
+				    frame->extramask,
 				    sizeof(frame->extramask))))
 		goto badframe;
 
@@ -338,7 +338,7 @@ static void __user *get_sigframe(struct
 	sp -= frame_size;
 	/* Align the stack pointer according to the i386 ABI,
 	 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
-	sp = ((sp + 4) & -16ul) - 4;
+	sp = ((sp - 12) & -16ul) - 4;
 	return (void __user *) sp;
 }
 
@@ -386,7 +386,7 @@ int ia32_setup_frame(int sig, struct ksi
 			restorer = VDSO32_SYMBOL(current->mm->context.vdso,
 						 sigreturn);
 		else
-			restorer = &frame->retcode;
+			restorer = frame->retcode;
 	}
 
 	put_user_try {
@@ -396,7 +396,7 @@ int ia32_setup_frame(int sig, struct ksi
 		 * These are actually not used anymore, but left because some
 		 * gdb versions depend on them as a marker.
 		 */
-		put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
+		put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
 	} put_user_catch(err);
 
 	if (err)
@@ -438,7 +438,7 @@ int ia32_setup_rt_frame(int sig, struct
 		0xb8,
 		__NR_ia32_rt_sigreturn,
 		0x80cd,
-		0,
+		0
 	};
 
 	frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate);
@@ -461,16 +461,18 @@ int ia32_setup_rt_frame(int sig, struct
 
 		if (ksig->ka.sa.sa_flags & SA_RESTORER)
 			restorer = ksig->ka.sa.sa_restorer;
+		else if (current->mm->context.vdso)
+			/* Return stub is in 32bit vsyscall page */
+			restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
 		else
-			restorer = VDSO32_SYMBOL(current->mm->context.vdso,
-						 rt_sigreturn);
+			restorer = frame->retcode;
 		put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
 
 		/*
 		 * Not actually used anymore, but left because some gdb
 		 * versions need it.
 		 */
-		put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
+		put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
 	} put_user_catch(err);
 
 	err |= copy_siginfo_to_user32(&frame->info, &ksig->info);
diff -ruNp linux-3.13.11/arch/x86/ia32/ia32entry.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/ia32/ia32entry.S
--- linux-3.13.11/arch/x86/ia32/ia32entry.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/ia32/ia32entry.S	2014-07-09
12:00:15.000000000 +0200
@@ -15,8 +15,10 @@
 #include <asm/irqflags.h>
 #include <asm/asm.h>
 #include <asm/smap.h>
+#include <asm/pgtable.h>
 #include <linux/linkage.h>
 #include <linux/err.h>
+#include <asm/alternative-asm.h>
 
 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this.  */
 #include <linux/elf-em.h>
@@ -62,12 +64,12 @@
 	 */
 	.macro LOAD_ARGS32 offset, _r9=0
 	.if \_r9
-	movl \offset+16(%rsp),%r9d
+	movl \offset+R9(%rsp),%r9d
 	.endif
-	movl \offset+40(%rsp),%ecx
-	movl \offset+48(%rsp),%edx
-	movl \offset+56(%rsp),%esi
-	movl \offset+64(%rsp),%edi
+	movl \offset+RCX(%rsp),%ecx
+	movl \offset+RDX(%rsp),%edx
+	movl \offset+RSI(%rsp),%esi
+	movl \offset+RDI(%rsp),%edi
 	movl %eax,%eax			/* zero extension */
 	.endm
 	
@@ -96,6 +98,32 @@ ENTRY(native_irq_enable_sysexit)
 ENDPROC(native_irq_enable_sysexit)
 #endif
 
+	.macro pax_enter_kernel_user
+	pax_set_fptr_mask
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+	call pax_enter_kernel_user
+#endif
+	.endm
+
+	.macro pax_exit_kernel_user
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+	call pax_exit_kernel_user
+#endif
+#ifdef CONFIG_PAX_RANDKSTACK
+	pushq %rax
+	pushq %r11
+	call pax_randomize_kstack
+	popq %r11
+	popq %rax
+#endif
+	.endm
+
+	.macro pax_erase_kstack
+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
+	call pax_erase_kstack
+#endif
+	.endm
+
 /*
  * 32bit SYSENTER instruction entry.
  *
@@ -122,12 +150,6 @@ ENTRY(ia32_sysenter_target)
 	CFI_REGISTER	rsp,rbp
 	SWAPGS_UNSAFE_STACK
 	movq	PER_CPU_VAR(kernel_stack), %rsp
-	addq	$(KERNEL_STACK_OFFSET),%rsp
-	/*
-	 * No need to follow this irqs on/off section: the syscall
-	 * disabled irqs, here we enable it straight after entry:
-	 */
-	ENABLE_INTERRUPTS(CLBR_NONE)
  	movl	%ebp,%ebp		/* zero extension */
 	pushq_cfi $__USER32_DS
 	/*CFI_REL_OFFSET ss,0*/
@@ -135,24 +157,49 @@ ENTRY(ia32_sysenter_target)
 	CFI_REL_OFFSET rsp,0
 	pushfq_cfi
 	/*CFI_REL_OFFSET rflags,0*/
-	movl	TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
-	CFI_REGISTER rip,r10
+	orl	$X86_EFLAGS_IF,(%rsp)
+	GET_THREAD_INFO(%r11)
+	movl	TI_sysenter_return(%r11), %r11d
+	CFI_REGISTER rip,r11
 	pushq_cfi $__USER32_CS
 	/*CFI_REL_OFFSET cs,0*/
 	movl	%eax, %eax
-	pushq_cfi %r10
+	pushq_cfi %r11
 	CFI_REL_OFFSET rip,0
 	pushq_cfi %rax
 	cld
 	SAVE_ARGS 0,1,0
+	pax_enter_kernel_user
+
+#ifdef CONFIG_PAX_RANDKSTACK
+	pax_erase_kstack
+#endif
+
+	/*
+	 * No need to follow this irqs on/off section: the syscall
+	 * disabled irqs, here we enable it straight after entry:
+	 */
+	ENABLE_INTERRUPTS(CLBR_NONE)
  	/* no need to do an access_ok check here because rbp has been
  	   32bit zero extended */ 
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+	addq	pax_user_shadow_base,%rbp
+	ASM_PAX_OPEN_USERLAND
+#endif
+
 	ASM_STAC
 1:	movl	(%rbp),%ebp
 	_ASM_EXTABLE(1b,ia32_badarg)
 	ASM_CLAC
-	orl     $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
-	testl   $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+	ASM_PAX_CLOSE_USERLAND
+#endif
+
+	GET_THREAD_INFO(%r11)
+	orl    $TS_COMPAT,TI_status(%r11)
+	testl  $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
 	CFI_REMEMBER_STATE
 	jnz  sysenter_tracesys
 	cmpq	$(IA32_NR_syscalls-1),%rax
@@ -162,15 +209,18 @@ sysenter_do_call:
 sysenter_dispatch:
 	call	*ia32_sys_call_table(,%rax,8)
 	movq	%rax,RAX-ARGOFFSET(%rsp)
+	GET_THREAD_INFO(%r11)
 	DISABLE_INTERRUPTS(CLBR_NONE)
 	TRACE_IRQS_OFF
-	testl	$_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+	testl	$_TIF_ALLWORK_MASK,TI_flags(%r11)
 	jnz	sysexit_audit
 sysexit_from_sys_call:
-	andl    $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+	pax_exit_kernel_user
+	pax_erase_kstack
+	andl    $~TS_COMPAT,TI_status(%r11)
 	/* clear IF, that popfq doesn't enable interrupts early */
-	andl  $~0x200,EFLAGS-R11(%rsp) 
-	movl	RIP-R11(%rsp),%edx		/* User %eip */
+	andl  $~X86_EFLAGS_IF,EFLAGS(%rsp) 
+	movl	RIP(%rsp),%edx		/* User %eip */
 	CFI_REGISTER rip,rdx
 	RESTORE_ARGS 0,24,0,0,0,0
 	xorq	%r8,%r8
@@ -193,6 +243,9 @@ sysexit_from_sys_call:
 	movl %eax,%esi			/* 2nd arg: syscall number */
 	movl $AUDIT_ARCH_I386,%edi	/* 1st arg: audit arch */
 	call __audit_syscall_entry
+
+	pax_erase_kstack
+
 	movl RAX-ARGOFFSET(%rsp),%eax	/* reload syscall number */
 	cmpq $(IA32_NR_syscalls-1),%rax
 	ja ia32_badsys
@@ -204,7 +257,7 @@ sysexit_from_sys_call:
 	.endm
 
 	.macro auditsys_exit exit
-	testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+	testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
 	jnz ia32_ret_from_sys_call
 	TRACE_IRQS_ON
 	ENABLE_INTERRUPTS(CLBR_NONE)
@@ -215,11 +268,12 @@ sysexit_from_sys_call:
 1:	setbe %al		/* 1 if error, 0 if not */
 	movzbl %al,%edi		/* zero-extend that into %edi */
 	call __audit_syscall_exit
+	GET_THREAD_INFO(%r11)
 	movq RAX-ARGOFFSET(%rsp),%rax	/* reload syscall return value */
 	movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
 	DISABLE_INTERRUPTS(CLBR_NONE)
 	TRACE_IRQS_OFF
-	testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+	testl %edi,TI_flags(%r11)
 	jz \exit
 	CLEAR_RREGS -ARGOFFSET
 	jmp int_with_check
@@ -237,7 +291,7 @@ sysexit_audit:
 
 sysenter_tracesys:
 #ifdef CONFIG_AUDITSYSCALL
-	testl	$(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+	testl	$(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
 	jz	sysenter_auditsys
 #endif
 	SAVE_REST
@@ -249,6 +303,9 @@ sysenter_tracesys:
 	RESTORE_REST
 	cmpq	$(IA32_NR_syscalls-1),%rax
 	ja	int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
+
+	pax_erase_kstack
+
 	jmp	sysenter_do_call
 	CFI_ENDPROC
 ENDPROC(ia32_sysenter_target)
@@ -276,19 +333,25 @@ ENDPROC(ia32_sysenter_target)
 ENTRY(ia32_cstar_target)
 	CFI_STARTPROC32	simple
 	CFI_SIGNAL_FRAME
-	CFI_DEF_CFA	rsp,KERNEL_STACK_OFFSET
+	CFI_DEF_CFA	rsp,0
 	CFI_REGISTER	rip,rcx
 	/*CFI_REGISTER	rflags,r11*/
 	SWAPGS_UNSAFE_STACK
 	movl	%esp,%r8d
 	CFI_REGISTER	rsp,r8
 	movq	PER_CPU_VAR(kernel_stack),%rsp
+	SAVE_ARGS 8*6,0,0
+	pax_enter_kernel_user
+
+#ifdef CONFIG_PAX_RANDKSTACK
+	pax_erase_kstack
+#endif
+
 	/*
 	 * No need to follow this irqs on/off section: the syscall
 	 * disabled irqs and here we enable it straight after entry:
 	 */
 	ENABLE_INTERRUPTS(CLBR_NONE)
-	SAVE_ARGS 8,0,0
 	movl 	%eax,%eax	/* zero extension */
 	movq	%rax,ORIG_RAX-ARGOFFSET(%rsp)
 	movq	%rcx,RIP-ARGOFFSET(%rsp)
@@ -304,12 +367,25 @@ ENTRY(ia32_cstar_target)
 	/* no need to do an access_ok check here because r8 has been
 	   32bit zero extended */ 
 	/* hardware stack frame is complete now */	
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+	ASM_PAX_OPEN_USERLAND
+	movq	pax_user_shadow_base,%r8
+	addq	RSP-ARGOFFSET(%rsp),%r8
+#endif
+
 	ASM_STAC
 1:	movl	(%r8),%r9d
 	_ASM_EXTABLE(1b,ia32_badarg)
 	ASM_CLAC
-	orl     $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
-	testl   $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+	ASM_PAX_CLOSE_USERLAND
+#endif
+
+	GET_THREAD_INFO(%r11)
+	orl   $TS_COMPAT,TI_status(%r11)
+	testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
 	CFI_REMEMBER_STATE
 	jnz   cstar_tracesys
 	cmpq $IA32_NR_syscalls-1,%rax
@@ -319,13 +395,16 @@ cstar_do_call:
 cstar_dispatch:
 	call *ia32_sys_call_table(,%rax,8)
 	movq %rax,RAX-ARGOFFSET(%rsp)
+	GET_THREAD_INFO(%r11)
 	DISABLE_INTERRUPTS(CLBR_NONE)
 	TRACE_IRQS_OFF
-	testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+	testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
 	jnz sysretl_audit
 sysretl_from_sys_call:
-	andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
-	RESTORE_ARGS 0,-ARG_SKIP,0,0,0
+	pax_exit_kernel_user
+	pax_erase_kstack
+	andl $~TS_COMPAT,TI_status(%r11)
+	RESTORE_ARGS 0,-ORIG_RAX,0,0,0
 	movl RIP-ARGOFFSET(%rsp),%ecx
 	CFI_REGISTER rip,rcx
 	movl EFLAGS-ARGOFFSET(%rsp),%r11d	
@@ -352,7 +431,7 @@ sysretl_audit:
 
 cstar_tracesys:
 #ifdef CONFIG_AUDITSYSCALL
-	testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+	testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
 	jz cstar_auditsys
 #endif
 	xchgl %r9d,%ebp
@@ -366,11 +445,19 @@ cstar_tracesys:
 	xchgl %ebp,%r9d
 	cmpq $(IA32_NR_syscalls-1),%rax
 	ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
+
+	pax_erase_kstack
+
 	jmp cstar_do_call
 END(ia32_cstar_target)
 				
 ia32_badarg:
 	ASM_CLAC
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+	ASM_PAX_CLOSE_USERLAND
+#endif
+
 	movq $-EFAULT,%rax
 	jmp ia32_sysret
 	CFI_ENDPROC
@@ -407,19 +494,26 @@ ENTRY(ia32_syscall)
 	CFI_REL_OFFSET	rip,RIP-RIP
 	PARAVIRT_ADJUST_EXCEPTION_FRAME
 	SWAPGS
-	/*
-	 * No need to follow this irqs on/off section: the syscall
-	 * disabled irqs and here we enable it straight after entry:
-	 */
-	ENABLE_INTERRUPTS(CLBR_NONE)
 	movl %eax,%eax
 	pushq_cfi %rax
 	cld
 	/* note the registers are not zero extended to the sf.
 	   this could be a problem. */
 	SAVE_ARGS 0,1,0
-	orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
-	testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+	pax_enter_kernel_user
+
+#ifdef CONFIG_PAX_RANDKSTACK
+	pax_erase_kstack
+#endif
+
+	/*
+	 * No need to follow this irqs on/off section: the syscall
+	 * disabled irqs and here we enable it straight after entry:
+	 */
+	ENABLE_INTERRUPTS(CLBR_NONE)
+	GET_THREAD_INFO(%r11)
+	orl   $TS_COMPAT,TI_status(%r11)
+	testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
 	jnz ia32_tracesys
 	cmpq $(IA32_NR_syscalls-1),%rax
 	ja ia32_badsys
@@ -442,6 +536,9 @@ ia32_tracesys:
 	RESTORE_REST
 	cmpq $(IA32_NR_syscalls-1),%rax
 	ja  int_ret_from_sys_call	/* ia32_tracesys has set RAX(%rsp) */
+
+	pax_erase_kstack
+
 	jmp ia32_do_call
 END(ia32_syscall)
 
diff -ruNp linux-3.13.11/arch/x86/ia32/sys_ia32.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/ia32/sys_ia32.c
--- linux-3.13.11/arch/x86/ia32/sys_ia32.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/ia32/sys_ia32.c	2014-07-09
12:00:15.000000000 +0200
@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsign
  */
 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
 {
-	typeof(ubuf->st_uid) uid = 0;
-	typeof(ubuf->st_gid) gid = 0;
+	typeof(((struct stat64 *)0)->st_uid) uid = 0;
+	typeof(((struct stat64 *)0)->st_gid) gid = 0;
 	SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
 	SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
 	if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
diff -ruNp linux-3.13.11/arch/x86/include/asm/alternative-asm.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/alternative-asm.h
--- linux-3.13.11/arch/x86/include/asm/alternative-asm.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/alternative-asm.h	2014-07-09
12:00:15.000000000 +0200
@@ -18,6 +18,45 @@
 	.endm
 #endif
 
+#ifdef KERNEXEC_PLUGIN
+	.macro pax_force_retaddr_bts rip=0
+	btsq $63,\rip(%rsp)
+	.endm
+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
+	.macro pax_force_retaddr rip=0, reload=0
+	btsq $63,\rip(%rsp)
+	.endm
+	.macro pax_force_fptr ptr
+	btsq $63,\ptr
+	.endm
+	.macro pax_set_fptr_mask
+	.endm
+#endif
+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
+	.macro pax_force_retaddr rip=0, reload=0
+	.if \reload
+	pax_set_fptr_mask
+	.endif
+	orq %r12,\rip(%rsp)
+	.endm
+	.macro pax_force_fptr ptr
+	orq %r12,\ptr
+	.endm
+	.macro pax_set_fptr_mask
+	movabs $0x8000000000000000,%r12
+	.endm
+#endif
+#else
+	.macro pax_force_retaddr rip=0, reload=0
+	.endm
+	.macro pax_force_fptr ptr
+	.endm
+	.macro pax_force_retaddr_bts rip=0
+	.endm
+	.macro pax_set_fptr_mask
+	.endm
+#endif
+
 .macro altinstruction_entry orig alt feature orig_len alt_len
 	.long \orig - .
 	.long \alt - .
diff -ruNp linux-3.13.11/arch/x86/include/asm/alternative.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/alternative.h
--- linux-3.13.11/arch/x86/include/asm/alternative.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/alternative.h	2014-07-09
12:00:15.000000000 +0200
@@ -106,7 +106,7 @@ static inline int alternatives_text_rese
 	".pushsection .discard,\"aw\",@progbits\n"			\
 	DISCARD_ENTRY(1)						\
 	".popsection\n"							\
-	".pushsection .altinstr_replacement, \"ax\"\n"			\
+	".pushsection .altinstr_replacement, \"a\"\n"			\
 	ALTINSTR_REPLACEMENT(newinstr, feature, 1)			\
 	".popsection"
 
@@ -120,7 +120,7 @@ static inline int alternatives_text_rese
 	DISCARD_ENTRY(1)						\
 	DISCARD_ENTRY(2)						\
 	".popsection\n"							\
-	".pushsection .altinstr_replacement, \"ax\"\n"			\
+	".pushsection .altinstr_replacement, \"a\"\n"			\
 	ALTINSTR_REPLACEMENT(newinstr1, feature1, 1)			\
 	ALTINSTR_REPLACEMENT(newinstr2, feature2, 2)			\
 	".popsection"
diff -ruNp linux-3.13.11/arch/x86/include/asm/apic.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/apic.h
--- linux-3.13.11/arch/x86/include/asm/apic.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/apic.h	2014-07-09
12:00:15.000000000 +0200
@@ -45,7 +45,7 @@ static inline void generic_apic_probe(vo
 
 #ifdef CONFIG_X86_LOCAL_APIC
 
-extern unsigned int apic_verbosity;
+extern int apic_verbosity;
 extern int local_apic_timer_c2_ok;
 
 extern int disable_apic;
diff -ruNp linux-3.13.11/arch/x86/include/asm/apm.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/apm.h
--- linux-3.13.11/arch/x86/include/asm/apm.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/apm.h	2014-07-09
12:00:15.000000000 +0200
@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
 	__asm__ __volatile__(APM_DO_ZERO_SEGS
 		"pushl %%edi\n\t"
 		"pushl %%ebp\n\t"
-		"lcall *%%cs:apm_bios_entry\n\t"
+		"lcall *%%ss:apm_bios_entry\n\t"
 		"setc %%al\n\t"
 		"popl %%ebp\n\t"
 		"popl %%edi\n\t"
@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
 	__asm__ __volatile__(APM_DO_ZERO_SEGS
 		"pushl %%edi\n\t"
 		"pushl %%ebp\n\t"
-		"lcall *%%cs:apm_bios_entry\n\t"
+		"lcall *%%ss:apm_bios_entry\n\t"
 		"setc %%bl\n\t"
 		"popl %%ebp\n\t"
 		"popl %%edi\n\t"
diff -ruNp linux-3.13.11/arch/x86/include/asm/atomic.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/atomic.h
--- linux-3.13.11/arch/x86/include/asm/atomic.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/atomic.h	2014-07-09
12:00:15.000000000 +0200
@@ -23,7 +23,18 @@
  */
 static inline int atomic_read(const atomic_t *v)
 {
-	return (*(volatile int *)&(v)->counter);
+	return (*(volatile const int *)&(v)->counter);
+}
+
+/**
+ * atomic_read_unchecked - read atomic variable
+ * @v: pointer of type atomic_unchecked_t
+ *
+ * Atomically reads the value of @v.
+ */
+static inline int __intentional_overflow(-1) atomic_read_unchecked(const atomic_unchecked_t
*v)
+{
+	return (*(volatile const int *)&(v)->counter);
 }
 
 /**
@@ -39,6 +50,18 @@ static inline void atomic_set(atomic_t *
 }
 
 /**
+ * atomic_set_unchecked - set atomic variable
+ * @v: pointer of type atomic_unchecked_t
+ * @i: required value
+ *
+ * Atomically sets the value of @v to @i.
+ */
+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
+{
+	v->counter = i;
+}
+
+/**
  * atomic_add - add integer to atomic variable
  * @i: integer value to add
  * @v: pointer of type atomic_t
@@ -47,7 +70,29 @@ static inline void atomic_set(atomic_t *
  */
 static inline void atomic_add(int i, atomic_t *v)
 {
-	asm volatile(LOCK_PREFIX "addl %1,%0"
+	asm volatile(LOCK_PREFIX "addl %1,%0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+		     "jno 0f\n"
+		     LOCK_PREFIX "subl %1,%0\n"
+		     "int $4\n0:\n"
+		     _ASM_EXTABLE(0b, 0b)
+#endif
+
+		     : "+m" (v->counter)
+		     : "ir" (i));
+}
+
+/**
+ * atomic_add_unchecked - add integer to atomic variable
+ * @i: integer value to add
+ * @v: pointer of type atomic_unchecked_t
+ *
+ * Atomically adds @i to @v.
+ */
+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
+{
+	asm volatile(LOCK_PREFIX "addl %1,%0\n"
 		     : "+m" (v->counter)
 		     : "ir" (i));
 }
@@ -61,7 +106,29 @@ static inline void atomic_add(int i, ato
  */
 static inline void atomic_sub(int i, atomic_t *v)
 {
-	asm volatile(LOCK_PREFIX "subl %1,%0"
+	asm volatile(LOCK_PREFIX "subl %1,%0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+		     "jno 0f\n"
+		     LOCK_PREFIX "addl %1,%0\n"
+		     "int $4\n0:\n"
+		     _ASM_EXTABLE(0b, 0b)
+#endif
+
+		     : "+m" (v->counter)
+		     : "ir" (i));
+}
+
+/**
+ * atomic_sub_unchecked - subtract integer from atomic variable
+ * @i: integer value to subtract
+ * @v: pointer of type atomic_unchecked_t
+ *
+ * Atomically subtracts @i from @v.
+ */
+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
+{
+	asm volatile(LOCK_PREFIX "subl %1,%0\n"
 		     : "+m" (v->counter)
 		     : "ir" (i));
 }
@@ -77,7 +144,7 @@ static inline void atomic_sub(int i, ato
  */
 static inline int atomic_sub_and_test(int i, atomic_t *v)
 {
-	GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e");
+	GEN_BINARY_RMWcc(LOCK_PREFIX "subl", LOCK_PREFIX "addl",  v->counter, "er", i, "%0",
"e");
 }
 
 /**
@@ -88,7 +155,27 @@ static inline int atomic_sub_and_test(in
  */
 static inline void atomic_inc(atomic_t *v)
 {
-	asm volatile(LOCK_PREFIX "incl %0"
+	asm volatile(LOCK_PREFIX "incl %0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+		     "jno 0f\n"
+		     LOCK_PREFIX "decl %0\n"
+		     "int $4\n0:\n"
+		     _ASM_EXTABLE(0b, 0b)
+#endif
+
+		     : "+m" (v->counter));
+}
+
+/**
+ * atomic_inc_unchecked - increment atomic variable
+ * @v: pointer of type atomic_unchecked_t
+ *
+ * Atomically increments @v by 1.
+ */
+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
+{
+	asm volatile(LOCK_PREFIX "incl %0\n"
 		     : "+m" (v->counter));
 }
 
@@ -100,7 +187,27 @@ static inline void atomic_inc(atomic_t *
  */
 static inline void atomic_dec(atomic_t *v)
 {
-	asm volatile(LOCK_PREFIX "decl %0"
+	asm volatile(LOCK_PREFIX "decl %0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+		     "jno 0f\n"
+		     LOCK_PREFIX "incl %0\n"
+		     "int $4\n0:\n"
+		     _ASM_EXTABLE(0b, 0b)
+#endif
+
+		     : "+m" (v->counter));
+}
+
+/**
+ * atomic_dec_unchecked - decrement atomic variable
+ * @v: pointer of type atomic_unchecked_t
+ *
+ * Atomically decrements @v by 1.
+ */
+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
+{
+	asm volatile(LOCK_PREFIX "decl %0\n"
 		     : "+m" (v->counter));
 }
 
@@ -114,7 +221,7 @@ static inline void atomic_dec(atomic_t *
  */
 static inline int atomic_dec_and_test(atomic_t *v)
 {
-	GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e");
+	GEN_UNARY_RMWcc(LOCK_PREFIX "decl", LOCK_PREFIX "incl", v->counter, "%0", "e");
 }
 
 /**
@@ -127,7 +234,20 @@ static inline int atomic_dec_and_test(at
  */
 static inline int atomic_inc_and_test(atomic_t *v)
 {
-	GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e");
+	GEN_UNARY_RMWcc(LOCK_PREFIX "incl", LOCK_PREFIX "decl", v->counter, "%0", "e");
+}
+
+/**
+ * atomic_inc_and_test_unchecked - increment and test
+ * @v: pointer of type atomic_unchecked_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
+{
+	GEN_UNARY_RMWcc_unchecked(LOCK_PREFIX "incl", v->counter, "%0", "e");
 }
 
 /**
@@ -141,7 +261,7 @@ static inline int atomic_inc_and_test(at
  */
 static inline int atomic_add_negative(int i, atomic_t *v)
 {
-	GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s");
+	GEN_BINARY_RMWcc(LOCK_PREFIX "addl", LOCK_PREFIX "subl", v->counter, "er", i, "%0",
"s");
 }
 
 /**
@@ -153,6 +273,18 @@ static inline int atomic_add_negative(in
  */
 static inline int atomic_add_return(int i, atomic_t *v)
 {
+	return i + xadd_check_overflow(&v->counter, i);
+}
+
+/**
+ * atomic_add_return_unchecked - add integer and return
+ * @i: integer value to add
+ * @v: pointer of type atomic_unchecked_t
+ *
+ * Atomically adds @i to @v and returns @i + @v
+ */
+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
+{
 	return i + xadd(&v->counter, i);
 }
 
@@ -169,9 +301,18 @@ static inline int atomic_sub_return(int
 }
 
 #define atomic_inc_return(v)  (atomic_add_return(1, v))
+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
+{
+	return atomic_add_return_unchecked(1, v);
+}
 #define atomic_dec_return(v)  (atomic_sub_return(1, v))
 
-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+static inline int __intentional_overflow(-1) atomic_cmpxchg(atomic_t *v, int old, int
new)
+{
+	return cmpxchg(&v->counter, old, new);
+}
+
+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
 {
 	return cmpxchg(&v->counter, old, new);
 }
@@ -181,6 +322,11 @@ static inline int atomic_xchg(atomic_t *
 	return xchg(&v->counter, new);
 }
 
+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
+{
+	return xchg(&v->counter, new);
+}
+
 /**
  * __atomic_add_unless - add unless the number is already a given value
  * @v: pointer of type atomic_t
@@ -190,14 +336,27 @@ static inline int atomic_xchg(atomic_t *
  * Atomically adds @a to @v, so long as @v was not already @u.
  * Returns the old value of @v.
  */
-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+static inline int __intentional_overflow(-1) __atomic_add_unless(atomic_t *v, int a,
int u)
 {
-	int c, old;
+	int c, old, new;
 	c = atomic_read(v);
 	for (;;) {
-		if (unlikely(c == (u)))
+		if (unlikely(c == u))
 			break;
-		old = atomic_cmpxchg((v), c, c + (a));
+
+		asm volatile("addl %2,%0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+			     "jno 0f\n"
+			     "subl %2,%0\n"
+			     "int $4\n0:\n"
+			     _ASM_EXTABLE(0b, 0b)
+#endif
+
+			     : "=r" (new)
+			     : "0" (c), "ir" (a));
+
+		old = atomic_cmpxchg(v, c, new);
 		if (likely(old == c))
 			break;
 		c = old;
@@ -206,6 +365,49 @@ static inline int __atomic_add_unless(at
 }
 
 /**
+ * atomic_inc_not_zero_hint - increment if not null
+ * @v: pointer of type atomic_t
+ * @hint: probable value of the atomic before the increment
+ *
+ * This version of atomic_inc_not_zero() gives a hint of probable
+ * value of the atomic. This helps processor to not read the memory
+ * before doing the atomic read/modify/write cycle, lowering
+ * number of bus transactions on some arches.
+ *
+ * Returns: 0 if increment was not done, 1 otherwise.
+ */
+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
+{
+	int val, c = hint, new;
+
+	/* sanity test, should be removed by compiler if hint is a constant */
+	if (!hint)
+		return __atomic_add_unless(v, 1, 0);
+
+	do {
+		asm volatile("incl %0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+			     "jno 0f\n"
+			     "decl %0\n"
+			     "int $4\n0:\n"
+			     _ASM_EXTABLE(0b, 0b)
+#endif
+
+			     : "=r" (new)
+			     : "0" (c));
+
+		val = atomic_cmpxchg(v, c, new);
+		if (val == c)
+			return 1;
+		c = val;
+	} while (c);
+
+	return 0;
+}
+
+/**
  * atomic_inc_short - increment of a short integer
  * @v: pointer to type int
  *
@@ -234,14 +436,37 @@ static inline void atomic_or_long(unsign
 #endif
 
 /* These are x86-specific, used by some header files */
-#define atomic_clear_mask(mask, addr)				\
-	asm volatile(LOCK_PREFIX "andl %0,%1"			\
-		     : : "r" (~(mask)), "m" (*(addr)) : "memory")
-
-#define atomic_set_mask(mask, addr)				\
-	asm volatile(LOCK_PREFIX "orl %0,%1"			\
-		     : : "r" ((unsigned)(mask)), "m" (*(addr))	\
-		     : "memory")
+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
+{
+	asm volatile(LOCK_PREFIX "andl %1,%0"
+		     : "+m" (v->counter)
+		     : "r" (~(mask))
+		     : "memory");
+}
+
+static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t
*v)
+{
+	asm volatile(LOCK_PREFIX "andl %1,%0"
+		     : "+m" (v->counter)
+		     : "r" (~(mask))
+		     : "memory");
+}
+
+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
+{
+	asm volatile(LOCK_PREFIX "orl %1,%0"
+		     : "+m" (v->counter)
+		     : "r" (mask)
+		     : "memory");
+}
+
+static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t
*v)
+{
+	asm volatile(LOCK_PREFIX "orl %1,%0"
+		     : "+m" (v->counter)
+		     : "r" (mask)
+		     : "memory");
+}
 
 /* Atomic operations are already serializing on x86 */
 #define smp_mb__before_atomic_dec()	barrier()
diff -ruNp linux-3.13.11/arch/x86/include/asm/atomic64_32.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/atomic64_32.h
--- linux-3.13.11/arch/x86/include/asm/atomic64_32.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/atomic64_32.h	2014-07-09
12:00:15.000000000 +0200
@@ -12,6 +12,14 @@ typedef struct {
 	u64 __aligned(8) counter;
 } atomic64_t;
 
+#ifdef CONFIG_PAX_REFCOUNT
+typedef struct {
+	u64 __aligned(8) counter;
+} atomic64_unchecked_t;
+#else
+typedef atomic64_t atomic64_unchecked_t;
+#endif
+
 #define ATOMIC64_INIT(val)	{ (val) }
 
 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
@@ -37,21 +45,31 @@ typedef struct {
 	ATOMIC64_DECL_ONE(sym##_386)
 
 ATOMIC64_DECL_ONE(add_386);
+ATOMIC64_DECL_ONE(add_unchecked_386);
 ATOMIC64_DECL_ONE(sub_386);
+ATOMIC64_DECL_ONE(sub_unchecked_386);
 ATOMIC64_DECL_ONE(inc_386);
+ATOMIC64_DECL_ONE(inc_unchecked_386);
 ATOMIC64_DECL_ONE(dec_386);
+ATOMIC64_DECL_ONE(dec_unchecked_386);
 #endif
 
 #define alternative_atomic64(f, out, in...) \
 	__alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
 
 ATOMIC64_DECL(read);
+ATOMIC64_DECL(read_unchecked);
 ATOMIC64_DECL(set);
+ATOMIC64_DECL(set_unchecked);
 ATOMIC64_DECL(xchg);
 ATOMIC64_DECL(add_return);
+ATOMIC64_DECL(add_return_unchecked);
 ATOMIC64_DECL(sub_return);
+ATOMIC64_DECL(sub_return_unchecked);
 ATOMIC64_DECL(inc_return);
+ATOMIC64_DECL(inc_return_unchecked);
 ATOMIC64_DECL(dec_return);
+ATOMIC64_DECL(dec_return_unchecked);
 ATOMIC64_DECL(dec_if_positive);
 ATOMIC64_DECL(inc_not_zero);
 ATOMIC64_DECL(add_unless);
@@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg
 }
 
 /**
+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
+ * @p: pointer to type atomic64_unchecked_t
+ * @o: expected value
+ * @n: new value
+ *
+ * Atomically sets @v to @n if it was equal to @o and returns
+ * the old value.
+ */
+
+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long
o, long long n)
+{
+	return cmpxchg64(&v->counter, o, n);
+}
+
+/**
  * atomic64_xchg - xchg atomic64 variable
  * @v: pointer to type atomic64_t
  * @n: value to assign
@@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64
 }
 
 /**
+ * atomic64_set_unchecked - set atomic64 variable
+ * @v: pointer to type atomic64_unchecked_t
+ * @n: value to assign
+ *
+ * Atomically sets the value of @v to @n.
+ */
+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
+{
+	unsigned high = (unsigned)(i >> 32);
+	unsigned low = (unsigned)i;
+	alternative_atomic64(set, /* no output */,
+			     "S" (v), "b" (low), "c" (high)
+			     : "eax", "edx", "memory");
+}
+
+/**
  * atomic64_read - read atomic64 variable
  * @v: pointer to type atomic64_t
  *
@@ -125,6 +174,19 @@ static inline long long atomic64_read(co
  }
 
 /**
+ * atomic64_read_unchecked - read atomic64 variable
+ * @v: pointer to type atomic64_unchecked_t
+ *
+ * Atomically reads the value of @v and returns it.
+ */
+static inline long long __intentional_overflow(-1) atomic64_read_unchecked(atomic64_unchecked_t
*v)
+{
+	long long r;
+	alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
+	return r;
+ }
+
+/**
  * atomic64_add_return - add and return
  * @i: integer value to add
  * @v: pointer to type atomic64_t
@@ -139,6 +201,21 @@ static inline long long atomic64_add_ret
 	return i;
 }
 
+/**
+ * atomic64_add_return_unchecked - add and return
+ * @i: integer value to add
+ * @v: pointer to type atomic64_unchecked_t
+ *
+ * Atomically adds @i to @v and returns @i + *@v
+ */
+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t
*v)
+{
+	alternative_atomic64(add_return_unchecked,
+			     ASM_OUTPUT2("+A" (i), "+c" (v)),
+			     ASM_NO_INPUT_CLOBBER("memory"));
+	return i;
+}
+
 /*
  * Other variants with different arithmetic operators:
  */
@@ -158,6 +235,14 @@ static inline long long atomic64_inc_ret
 	return a;
 }
 
+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
+{
+	long long a;
+	alternative_atomic64(inc_return_unchecked, "=&A" (a),
+			     "S" (v) : "memory", "ecx");
+	return a;
+}
+
 static inline long long atomic64_dec_return(atomic64_t *v)
 {
 	long long a;
@@ -179,6 +264,21 @@ static inline long long atomic64_add(lon
 			       ASM_OUTPUT2("+A" (i), "+c" (v)),
 			       ASM_NO_INPUT_CLOBBER("memory"));
 	return i;
+}
+
+/**
+ * atomic64_add_unchecked - add integer to atomic64 variable
+ * @i: integer value to add
+ * @v: pointer to type atomic64_unchecked_t
+ *
+ * Atomically adds @i to @v.
+ */
+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
+{
+	__alternative_atomic64(add_unchecked, add_return_unchecked,
+			       ASM_OUTPUT2("+A" (i), "+c" (v)),
+			       ASM_NO_INPUT_CLOBBER("memory"));
+	return i;
 }
 
 /**
diff -ruNp linux-3.13.11/arch/x86/include/asm/atomic64_64.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/atomic64_64.h
--- linux-3.13.11/arch/x86/include/asm/atomic64_64.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/atomic64_64.h	2014-07-09
12:00:15.000000000 +0200
@@ -18,7 +18,19 @@
  */
 static inline long atomic64_read(const atomic64_t *v)
 {
-	return (*(volatile long *)&(v)->counter);
+	return (*(volatile const long *)&(v)->counter);
+}
+
+/**
+ * atomic64_read_unchecked - read atomic64 variable
+ * @v: pointer of type atomic64_unchecked_t
+ *
+ * Atomically reads the value of @v.
+ * Doesn't imply a read memory barrier.
+ */
+static inline long __intentional_overflow(-1) atomic64_read_unchecked(const atomic64_unchecked_t
*v)
+{
+	return (*(volatile const long *)&(v)->counter);
 }
 
 /**
@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64
 }
 
 /**
+ * atomic64_set_unchecked - set atomic64 variable
+ * @v: pointer to type atomic64_unchecked_t
+ * @i: required value
+ *
+ * Atomically sets the value of @v to @i.
+ */
+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
+{
+	v->counter = i;
+}
+
+/**
  * atomic64_add - add integer to atomic64 variable
  * @i: integer value to add
  * @v: pointer to type atomic64_t
@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64
  */
 static inline void atomic64_add(long i, atomic64_t *v)
 {
+	asm volatile(LOCK_PREFIX "addq %1,%0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+		     "jno 0f\n"
+		     LOCK_PREFIX "subq %1,%0\n"
+		     "int $4\n0:\n"
+		     _ASM_EXTABLE(0b, 0b)
+#endif
+
+		     : "=m" (v->counter)
+		     : "er" (i), "m" (v->counter));
+}
+
+/**
+ * atomic64_add_unchecked - add integer to atomic64 variable
+ * @i: integer value to add
+ * @v: pointer to type atomic64_unchecked_t
+ *
+ * Atomically adds @i to @v.
+ */
+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
+{
 	asm volatile(LOCK_PREFIX "addq %1,%0"
 		     : "=m" (v->counter)
 		     : "er" (i), "m" (v->counter));
@@ -56,7 +102,29 @@ static inline void atomic64_add(long i,
  */
 static inline void atomic64_sub(long i, atomic64_t *v)
 {
-	asm volatile(LOCK_PREFIX "subq %1,%0"
+	asm volatile(LOCK_PREFIX "subq %1,%0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+		     "jno 0f\n"
+		     LOCK_PREFIX "addq %1,%0\n"
+		     "int $4\n0:\n"
+		     _ASM_EXTABLE(0b, 0b)
+#endif
+
+		     : "=m" (v->counter)
+		     : "er" (i), "m" (v->counter));
+}
+
+/**
+ * atomic64_sub_unchecked - subtract the atomic64 variable
+ * @i: integer value to subtract
+ * @v: pointer to type atomic64_unchecked_t
+ *
+ * Atomically subtracts @i from @v.
+ */
+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
+{
+	asm volatile(LOCK_PREFIX "subq %1,%0\n"
 		     : "=m" (v->counter)
 		     : "er" (i), "m" (v->counter));
 }
@@ -72,7 +140,7 @@ static inline void atomic64_sub(long i,
  */
 static inline int atomic64_sub_and_test(long i, atomic64_t *v)
 {
-	GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", "e");
+	GEN_BINARY_RMWcc(LOCK_PREFIX "subq", LOCK_PREFIX "addq", v->counter, "er", i, "%0",
"e");
 }
 
 /**
@@ -83,6 +151,27 @@ static inline int atomic64_sub_and_test(
  */
 static inline void atomic64_inc(atomic64_t *v)
 {
+	asm volatile(LOCK_PREFIX "incq %0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+		     "jno 0f\n"
+		     LOCK_PREFIX "decq %0\n"
+		     "int $4\n0:\n"
+		     _ASM_EXTABLE(0b, 0b)
+#endif
+
+		     : "=m" (v->counter)
+		     : "m" (v->counter));
+}
+
+/**
+ * atomic64_inc_unchecked - increment atomic64 variable
+ * @v: pointer to type atomic64_unchecked_t
+ *
+ * Atomically increments @v by 1.
+ */
+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
+{
 	asm volatile(LOCK_PREFIX "incq %0"
 		     : "=m" (v->counter)
 		     : "m" (v->counter));
@@ -96,7 +185,28 @@ static inline void atomic64_inc(atomic64
  */
 static inline void atomic64_dec(atomic64_t *v)
 {
-	asm volatile(LOCK_PREFIX "decq %0"
+	asm volatile(LOCK_PREFIX "decq %0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+		     "jno 0f\n"
+		     LOCK_PREFIX "incq %0\n"
+		     "int $4\n0:\n"
+		     _ASM_EXTABLE(0b, 0b)
+#endif
+
+		     : "=m" (v->counter)
+		     : "m" (v->counter));
+}
+
+/**
+ * atomic64_dec_unchecked - decrement atomic64 variable
+ * @v: pointer to type atomic64_t
+ *
+ * Atomically decrements @v by 1.
+ */
+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
+{
+	asm volatile(LOCK_PREFIX "decq %0\n"
 		     : "=m" (v->counter)
 		     : "m" (v->counter));
 }
@@ -111,7 +221,7 @@ static inline void atomic64_dec(atomic64
  */
 static inline int atomic64_dec_and_test(atomic64_t *v)
 {
-	GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", "e");
+	GEN_UNARY_RMWcc(LOCK_PREFIX "decq", LOCK_PREFIX "incq", v->counter, "%0", "e");
 }
 
 /**
@@ -124,7 +234,7 @@ static inline int atomic64_dec_and_test(
  */
 static inline int atomic64_inc_and_test(atomic64_t *v)
 {
-	GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", "e");
+	GEN_UNARY_RMWcc(LOCK_PREFIX "incq", LOCK_PREFIX "decq", v->counter, "%0", "e");
 }
 
 /**
@@ -138,7 +248,7 @@ static inline int atomic64_inc_and_test(
  */
 static inline int atomic64_add_negative(long i, atomic64_t *v)
 {
-	GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", "s");
+	GEN_BINARY_RMWcc(LOCK_PREFIX "addq", LOCK_PREFIX "subq",  v->counter, "er", i, "%0",
"s");
 }
 
 /**
@@ -150,6 +260,18 @@ static inline int atomic64_add_negative(
  */
 static inline long atomic64_add_return(long i, atomic64_t *v)
 {
+	return i + xadd_check_overflow(&v->counter, i);
+}
+
+/**
+ * atomic64_add_return_unchecked - add and return
+ * @i: integer value to add
+ * @v: pointer to type atomic64_unchecked_t
+ *
+ * Atomically adds @i to @v and returns @i + @v
+ */
+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
+{
 	return i + xadd(&v->counter, i);
 }
 
@@ -159,6 +281,10 @@ static inline long atomic64_sub_return(l
 }
 
 #define atomic64_inc_return(v)  (atomic64_add_return(1, (v)))
+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
+{
+	return atomic64_add_return_unchecked(1, v);
+}
 #define atomic64_dec_return(v)  (atomic64_sub_return(1, (v)))
 
 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
@@ -166,6 +292,11 @@ static inline long atomic64_cmpxchg(atom
 	return cmpxchg(&v->counter, old, new);
 }
 
+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long
new)
+{
+	return cmpxchg(&v->counter, old, new);
+}
+
 static inline long atomic64_xchg(atomic64_t *v, long new)
 {
 	return xchg(&v->counter, new);
@@ -182,17 +313,30 @@ static inline long atomic64_xchg(atomic6
  */
 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
 {
-	long c, old;
+	long c, old, new;
 	c = atomic64_read(v);
 	for (;;) {
-		if (unlikely(c == (u)))
+		if (unlikely(c == u))
 			break;
-		old = atomic64_cmpxchg((v), c, c + (a));
+
+		asm volatile("add %2,%0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+			     "jno 0f\n"
+			     "sub %2,%0\n"
+			     "int $4\n0:\n"
+			     _ASM_EXTABLE(0b, 0b)
+#endif
+
+			     : "=r" (new)
+			     : "0" (c), "ir" (a));
+
+		old = atomic64_cmpxchg(v, c, new);
 		if (likely(old == c))
 			break;
 		c = old;
 	}
-	return c != (u);
+	return c != u;
 }
 
 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
diff -ruNp linux-3.13.11/arch/x86/include/asm/bitops.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/bitops.h
--- linux-3.13.11/arch/x86/include/asm/bitops.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/bitops.h	2014-07-09
12:00:15.000000000 +0200
@@ -49,7 +49,7 @@
  * a mask operation on a byte.
  */
 #define IS_IMMEDIATE(nr)		(__builtin_constant_p(nr))
-#define CONST_MASK_ADDR(nr, addr)	BITOP_ADDR((void *)(addr) + ((nr)>>3))
+#define CONST_MASK_ADDR(nr, addr)	BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
 #define CONST_MASK(nr)			(1 << ((nr) & 7))
 
 /**
@@ -205,7 +205,7 @@ static inline void change_bit(long nr, v
  */
 static inline int test_and_set_bit(long nr, volatile unsigned long *addr)
 {
-	GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
+	GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
 }
 
 /**
@@ -251,7 +251,7 @@ static inline int __test_and_set_bit(lon
  */
 static inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
 {
-	GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
+	GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
 }
 
 /**
@@ -304,7 +304,7 @@ static inline int __test_and_change_bit(
  */
 static inline int test_and_change_bit(long nr, volatile unsigned long *addr)
 {
-	GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
+	GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
 }
 
 static __always_inline int constant_test_bit(long nr, const volatile unsigned long
*addr)
@@ -345,7 +345,7 @@ static int test_bit(int nr, const volati
  *
  * Undefined if no bit exists, so code should check against 0 first.
  */
-static inline unsigned long __ffs(unsigned long word)
+static inline unsigned long __intentional_overflow(-1) __ffs(unsigned long word)
 {
 	asm("rep; bsf %1,%0"
 		: "=r" (word)
@@ -359,7 +359,7 @@ static inline unsigned long __ffs(unsign
  *
  * Undefined if no zero exists, so code should check against ~0UL first.
  */
-static inline unsigned long ffz(unsigned long word)
+static inline unsigned long __intentional_overflow(-1) ffz(unsigned long word)
 {
 	asm("rep; bsf %1,%0"
 		: "=r" (word)
@@ -373,7 +373,7 @@ static inline unsigned long ffz(unsigned
  *
  * Undefined if no set bit exists, so code should check against 0 first.
  */
-static inline unsigned long __fls(unsigned long word)
+static inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
 {
 	asm("bsr %1,%0"
 	    : "=r" (word)
@@ -436,7 +436,7 @@ static inline int ffs(int x)
  * set bit if value is nonzero. The last (most significant) bit is
  * at position 32.
  */
-static inline int fls(int x)
+static inline int __intentional_overflow(-1) fls(int x)
 {
 	int r;
 
@@ -478,7 +478,7 @@ static inline int fls(int x)
  * at position 64.
  */
 #ifdef CONFIG_X86_64
-static __always_inline int fls64(__u64 x)
+static __always_inline long fls64(__u64 x)
 {
 	int bitpos = -1;
 	/*
diff -ruNp linux-3.13.11/arch/x86/include/asm/boot.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/boot.h
--- linux-3.13.11/arch/x86/include/asm/boot.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/boot.h	2014-07-09
12:00:15.000000000 +0200
@@ -6,10 +6,15 @@
 #include <uapi/asm/boot.h>
 
 /* Physical address where kernel should be loaded. */
-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
 				+ (CONFIG_PHYSICAL_ALIGN - 1)) \
 				& ~(CONFIG_PHYSICAL_ALIGN - 1))
 
+#ifndef __ASSEMBLY__
+extern unsigned char __LOAD_PHYSICAL_ADDR[];
+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
+#endif
+
 /* Minimum kernel alignment, as a power of two */
 #ifdef CONFIG_X86_64
 #define MIN_KERNEL_ALIGN_LG2	PMD_SHIFT
diff -ruNp linux-3.13.11/arch/x86/include/asm/cache.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/cache.h
--- linux-3.13.11/arch/x86/include/asm/cache.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/cache.h	2014-07-09
12:00:15.000000000 +0200
@@ -5,12 +5,13 @@
 
 /* L1 cache line size */
 #define L1_CACHE_SHIFT	(CONFIG_X86_L1_CACHE_SHIFT)
-#define L1_CACHE_BYTES	(1 << L1_CACHE_SHIFT)
+#define L1_CACHE_BYTES	(_AC(1,UL) << L1_CACHE_SHIFT)
 
 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
+#define __read_only __attribute__((__section__(".data..read_only")))
 
 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
 
 #ifdef CONFIG_X86_VSMP
 #ifdef CONFIG_SMP
diff -ruNp linux-3.13.11/arch/x86/include/asm/cacheflush.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/cacheflush.h
--- linux-3.13.11/arch/x86/include/asm/cacheflush.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/cacheflush.h	2014-07-09
12:00:15.000000000 +0200
@@ -27,7 +27,7 @@ static inline unsigned long get_page_mem
 	unsigned long pg_flags = pg->flags & _PGMT_MASK;
 
 	if (pg_flags == _PGMT_DEFAULT)
-		return -1;
+		return ~0UL;
 	else if (pg_flags == _PGMT_WC)
 		return _PAGE_CACHE_WC;
 	else if (pg_flags == _PGMT_UC_MINUS)
diff -ruNp linux-3.13.11/arch/x86/include/asm/calling.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/calling.h
--- linux-3.13.11/arch/x86/include/asm/calling.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/calling.h	2014-07-09
12:00:15.000000000 +0200
@@ -82,103 +82,113 @@ For 32-bit we have the following convent
 #define RSP		152
 #define SS		160
 
-#define ARGOFFSET	R11
-#define SWFRAME		ORIG_RAX
+#define ARGOFFSET	R15
 
 	.macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1
-	subq  $9*8+\addskip, %rsp
-	CFI_ADJUST_CFA_OFFSET	9*8+\addskip
-	movq_cfi rdi, 8*8
-	movq_cfi rsi, 7*8
-	movq_cfi rdx, 6*8
+	subq  $ORIG_RAX-ARGOFFSET+\addskip, %rsp
+	CFI_ADJUST_CFA_OFFSET	ORIG_RAX-ARGOFFSET+\addskip
+	movq_cfi rdi, RDI
+	movq_cfi rsi, RSI
+	movq_cfi rdx, RDX
 
 	.if \save_rcx
-	movq_cfi rcx, 5*8
+	movq_cfi rcx, RCX
 	.endif
 
-	movq_cfi rax, 4*8
+	movq_cfi rax, RAX
 
 	.if \save_r891011
-	movq_cfi r8,  3*8
-	movq_cfi r9,  2*8
-	movq_cfi r10, 1*8
-	movq_cfi r11, 0*8
+	movq_cfi r8,  R8
+	movq_cfi r9,  R9
+	movq_cfi r10, R10
+	movq_cfi r11, R11
 	.endif
 
+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
+	movq_cfi r12, R12
+#endif
+
 	.endm
 
-#define ARG_SKIP	(9*8)
+#define ARG_SKIP	ORIG_RAX
 
 	.macro RESTORE_ARGS rstor_rax=1, addskip=0, rstor_rcx=1, rstor_r11=1, \
 			    rstor_r8910=1, rstor_rdx=1
+
+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
+	movq_cfi_restore R12, r12
+#endif
+
 	.if \rstor_r11
-	movq_cfi_restore 0*8, r11
+	movq_cfi_restore R11, r11
 	.endif
 
 	.if \rstor_r8910
-	movq_cfi_restore 1*8, r10
-	movq_cfi_restore 2*8, r9
-	movq_cfi_restore 3*8, r8
+	movq_cfi_restore R10, r10
+	movq_cfi_restore R9, r9
+	movq_cfi_restore R8, r8
 	.endif
 
 	.if \rstor_rax
-	movq_cfi_restore 4*8, rax
+	movq_cfi_restore RAX, rax
 	.endif
 
 	.if \rstor_rcx
-	movq_cfi_restore 5*8, rcx
+	movq_cfi_restore RCX, rcx
 	.endif
 
 	.if \rstor_rdx
-	movq_cfi_restore 6*8, rdx
+	movq_cfi_restore RDX, rdx
 	.endif
 
-	movq_cfi_restore 7*8, rsi
-	movq_cfi_restore 8*8, rdi
+	movq_cfi_restore RSI, rsi
+	movq_cfi_restore RDI, rdi
 
-	.if ARG_SKIP+\addskip > 0
-	addq $ARG_SKIP+\addskip, %rsp
-	CFI_ADJUST_CFA_OFFSET	-(ARG_SKIP+\addskip)
+	.if ORIG_RAX+\addskip > 0
+	addq $ORIG_RAX+\addskip, %rsp
+	CFI_ADJUST_CFA_OFFSET	-(ORIG_RAX+\addskip)
 	.endif
 	.endm
 
-	.macro LOAD_ARGS offset, skiprax=0
-	movq \offset(%rsp),    %r11
-	movq \offset+8(%rsp),  %r10
-	movq \offset+16(%rsp), %r9
-	movq \offset+24(%rsp), %r8
-	movq \offset+40(%rsp), %rcx
-	movq \offset+48(%rsp), %rdx
-	movq \offset+56(%rsp), %rsi
-	movq \offset+64(%rsp), %rdi
+	.macro LOAD_ARGS skiprax=0
+	movq R11(%rsp),    %r11
+	movq R10(%rsp),  %r10
+	movq R9(%rsp), %r9
+	movq R8(%rsp), %r8
+	movq RCX(%rsp), %rcx
+	movq RDX(%rsp), %rdx
+	movq RSI(%rsp), %rsi
+	movq RDI(%rsp), %rdi
 	.if \skiprax
 	.else
-	movq \offset+72(%rsp), %rax
+	movq RAX(%rsp), %rax
 	.endif
 	.endm
 
-#define REST_SKIP	(6*8)
-
 	.macro SAVE_REST
-	subq $REST_SKIP, %rsp
-	CFI_ADJUST_CFA_OFFSET	REST_SKIP
-	movq_cfi rbx, 5*8
-	movq_cfi rbp, 4*8
-	movq_cfi r12, 3*8
-	movq_cfi r13, 2*8
-	movq_cfi r14, 1*8
-	movq_cfi r15, 0*8
+	movq_cfi rbx, RBX
+	movq_cfi rbp, RBP
+
+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
+	movq_cfi r12, R12
+#endif
+
+	movq_cfi r13, R13
+	movq_cfi r14, R14
+	movq_cfi r15, R15
 	.endm
 
 	.macro RESTORE_REST
-	movq_cfi_restore 0*8, r15
-	movq_cfi_restore 1*8, r14
-	movq_cfi_restore 2*8, r13
-	movq_cfi_restore 3*8, r12
-	movq_cfi_restore 4*8, rbp
-	movq_cfi_restore 5*8, rbx
-	addq $REST_SKIP, %rsp
-	CFI_ADJUST_CFA_OFFSET	-(REST_SKIP)
+	movq_cfi_restore R15, r15
+	movq_cfi_restore R14, r14
+	movq_cfi_restore R13, r13
+
+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
+	movq_cfi_restore R12, r12
+#endif
+
+	movq_cfi_restore RBP, rbp
+	movq_cfi_restore RBX, rbx
 	.endm
 
 	.macro SAVE_ALL
diff -ruNp linux-3.13.11/arch/x86/include/asm/checksum_32.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/checksum_32.h
--- linux-3.13.11/arch/x86/include/asm/checksum_32.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/checksum_32.h	2014-07-09
12:00:15.000000000 +0200
@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
 					    int len, __wsum sum,
 					    int *src_err_ptr, int *dst_err_ptr);
 
+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
+						  int len, __wsum sum,
+						  int *src_err_ptr, int *dst_err_ptr);
+
+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
+						  int len, __wsum sum,
+						  int *src_err_ptr, int *dst_err_ptr);
+
 /*
  *	Note: when you get a NULL pointer exception here this means someone
  *	passed in an incorrect kernel address to one of these functions.
@@ -53,7 +61,7 @@ static inline __wsum csum_partial_copy_f
 
 	might_sleep();
 	stac();
-	ret = csum_partial_copy_generic((__force void *)src, dst,
+	ret = csum_partial_copy_generic_from_user((__force void *)src, dst,
 					len, sum, err_ptr, NULL);
 	clac();
 
@@ -187,7 +195,7 @@ static inline __wsum csum_and_copy_to_us
 	might_sleep();
 	if (access_ok(VERIFY_WRITE, dst, len)) {
 		stac();
-		ret = csum_partial_copy_generic(src, (__force void *)dst,
+		ret = csum_partial_copy_generic_to_user(src, (__force void *)dst,
 						len, sum, NULL, err_ptr);
 		clac();
 		return ret;
diff -ruNp linux-3.13.11/arch/x86/include/asm/cmpxchg.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/cmpxchg.h
--- linux-3.13.11/arch/x86/include/asm/cmpxchg.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/cmpxchg.h	2014-07-09
12:00:15.000000000 +0200
@@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void)
 	__compiletime_error("Bad argument size for cmpxchg");
 extern void __xadd_wrong_size(void)
 	__compiletime_error("Bad argument size for xadd");
+extern void __xadd_check_overflow_wrong_size(void)
+	__compiletime_error("Bad argument size for xadd_check_overflow");
 extern void __add_wrong_size(void)
 	__compiletime_error("Bad argument size for add");
+extern void __add_check_overflow_wrong_size(void)
+	__compiletime_error("Bad argument size for add_check_overflow");
 
 /*
  * Constants for operation sizes. On 32-bit, the 64-bit size it set to
@@ -67,6 +71,34 @@ extern void __add_wrong_size(void)
 		__ret;							\
 	})
 
+#define __xchg_op_check_overflow(ptr, arg, op, lock)			\
+	({								\
+	        __typeof__ (*(ptr)) __ret = (arg);			\
+		switch (sizeof(*(ptr))) {				\
+		case __X86_CASE_L:					\
+			asm volatile (lock #op "l %0, %1\n"		\
+				      "jno 0f\n"			\
+				      "mov %0,%1\n"			\
+				      "int $4\n0:\n"			\
+				      _ASM_EXTABLE(0b, 0b)		\
+				      : "+r" (__ret), "+m" (*(ptr))	\
+				      : : "memory", "cc");		\
+			break;						\
+		case __X86_CASE_Q:					\
+			asm volatile (lock #op "q %q0, %1\n"		\
+				      "jno 0f\n"			\
+				      "mov %0,%1\n"			\
+				      "int $4\n0:\n"			\
+				      _ASM_EXTABLE(0b, 0b)		\
+				      : "+r" (__ret), "+m" (*(ptr))	\
+				      : : "memory", "cc");		\
+			break;						\
+		default:						\
+			__ ## op ## _check_overflow_wrong_size();	\
+		}							\
+		__ret;							\
+	})
+
 /*
  * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
  * Since this is generally used to protect other memory information, we
@@ -167,6 +199,9 @@ extern void __add_wrong_size(void)
 #define xadd_sync(ptr, inc)	__xadd((ptr), (inc), "lock; ")
 #define xadd_local(ptr, inc)	__xadd((ptr), (inc), "")
 
+#define __xadd_check_overflow(ptr, inc, lock)	__xchg_op_check_overflow((ptr), (inc),
xadd, lock)
+#define xadd_check_overflow(ptr, inc)		__xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
+
 #define __add(ptr, inc, lock)						\
 	({								\
 	        __typeof__ (*(ptr)) __ret = (inc);			\
diff -ruNp linux-3.13.11/arch/x86/include/asm/compat.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/compat.h
--- linux-3.13.11/arch/x86/include/asm/compat.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/compat.h	2014-07-09
12:00:15.000000000 +0200
@@ -41,7 +41,7 @@ typedef s64 __attribute__((aligned(4)))
 typedef u32		compat_uint_t;
 typedef u32		compat_ulong_t;
 typedef u64 __attribute__((aligned(4))) compat_u64;
-typedef u32		compat_uptr_t;
+typedef u32		__user compat_uptr_t;
 
 struct compat_timespec {
 	compat_time_t	tv_sec;
diff -ruNp linux-3.13.11/arch/x86/include/asm/cpufeature.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/cpufeature.h
--- linux-3.13.11/arch/x86/include/asm/cpufeature.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/cpufeature.h	2014-07-09
12:00:15.000000000 +0200
@@ -203,7 +203,7 @@
 #define X86_FEATURE_DECODEASSISTS (8*32+12) /* AMD Decode Assists support */
 #define X86_FEATURE_PAUSEFILTER (8*32+13) /* AMD filtered pause intercept */
 #define X86_FEATURE_PFTHRESHOLD (8*32+14) /* AMD pause filter threshold */
-
+#define X86_FEATURE_STRONGUDEREF (8*32+31) /* PaX PCID based strong UDEREF */
 
 /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
 #define X86_FEATURE_FSGSBASE	(9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
@@ -211,7 +211,7 @@
 #define X86_FEATURE_BMI1	(9*32+ 3) /* 1st group bit manipulation extensions */
 #define X86_FEATURE_HLE		(9*32+ 4) /* Hardware Lock Elision */
 #define X86_FEATURE_AVX2	(9*32+ 5) /* AVX2 instructions */
-#define X86_FEATURE_SMEP	(9*32+ 7) /* Supervisor Mode Execution Protection */
+#define X86_FEATURE_SMEP	(9*32+ 7) /* Supervisor Mode Execution Prevention */
 #define X86_FEATURE_BMI2	(9*32+ 8) /* 2nd group bit manipulation extensions */
 #define X86_FEATURE_ERMS	(9*32+ 9) /* Enhanced REP MOVSB/STOSB */
 #define X86_FEATURE_INVPCID	(9*32+10) /* Invalidate Processor Context ID */
@@ -353,6 +353,7 @@ extern const char * const x86_power_flag
 #undef  cpu_has_centaur_mcr
 #define cpu_has_centaur_mcr	0
 
+#define cpu_has_pcid		boot_cpu_has(X86_FEATURE_PCID)
 #endif /* CONFIG_X86_64 */
 
 #if __GNUC__ >= 4
@@ -405,7 +406,8 @@ static __always_inline __pure bool __sta
 
 #ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
 	t_warn:
-		warn_pre_alternatives();
+		if (bit != X86_FEATURE_PCID && bit != X86_FEATURE_INVPCID)
+			warn_pre_alternatives();
 		return false;
 #endif
 
@@ -425,7 +427,7 @@ static __always_inline __pure bool __sta
 			     ".section .discard,\"aw\",@progbits\n"
 			     " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
 			     ".previous\n"
-			     ".section .altinstr_replacement,\"ax\"\n"
+			     ".section .altinstr_replacement,\"a\"\n"
 			     "3: movb $1,%0\n"
 			     "4:\n"
 			     ".previous\n"
@@ -462,7 +464,7 @@ static __always_inline __pure bool _stat
 			 " .byte 2b - 1b\n"		/* src len */
 			 " .byte 4f - 3f\n"		/* repl len */
 			 ".previous\n"
-			 ".section .altinstr_replacement,\"ax\"\n"
+			 ".section .altinstr_replacement,\"a\"\n"
 			 "3: .byte 0xe9\n .long %l[t_no] - 2b\n"
 			 "4:\n"
 			 ".previous\n"
@@ -495,7 +497,7 @@ static __always_inline __pure bool _stat
 			     ".section .discard,\"aw\",@progbits\n"
 			     " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
 			     ".previous\n"
-			     ".section .altinstr_replacement,\"ax\"\n"
+			     ".section .altinstr_replacement,\"a\"\n"
 			     "3: movb $0,%0\n"
 			     "4:\n"
 			     ".previous\n"
@@ -509,7 +511,7 @@ static __always_inline __pure bool _stat
 			     ".section .discard,\"aw\",@progbits\n"
 			     " .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */
 			     ".previous\n"
-			     ".section .altinstr_replacement,\"ax\"\n"
+			     ".section .altinstr_replacement,\"a\"\n"
 			     "5: movb $1,%0\n"
 			     "6:\n"
 			     ".previous\n"
diff -ruNp linux-3.13.11/arch/x86/include/asm/desc.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/desc.h
--- linux-3.13.11/arch/x86/include/asm/desc.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/desc.h	2014-07-09
12:00:15.000000000 +0200
@@ -4,6 +4,7 @@
 #include <asm/desc_defs.h>
 #include <asm/ldt.h>
 #include <asm/mmu.h>
+#include <asm/pgtable.h>
 
 #include <linux/smp.h>
 #include <linux/percpu.h>
@@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_
 
 	desc->type		= (info->read_exec_only ^ 1) << 1;
 	desc->type	       |= info->contents << 2;
+	desc->type	       |= info->seg_not_present ^ 1;
 
 	desc->s			= 1;
 	desc->dpl		= 0x3;
@@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_
 }
 
 extern struct desc_ptr idt_descr;
-extern gate_desc idt_table[];
-extern struct desc_ptr debug_idt_descr;
-extern gate_desc debug_idt_table[];
-
-struct gdt_page {
-	struct desc_struct gdt[GDT_ENTRIES];
-} __attribute__((aligned(PAGE_SIZE)));
-
-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
+extern gate_desc idt_table[IDT_ENTRIES];
+extern const struct desc_ptr debug_idt_descr;
+extern gate_desc debug_idt_table[IDT_ENTRIES];
 
+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
 {
-	return per_cpu(gdt_page, cpu).gdt;
+	return cpu_gdt_table[cpu];
 }
 
 #ifdef CONFIG_X86_64
@@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *
 			     unsigned long base, unsigned dpl, unsigned flags,
 			     unsigned short seg)
 {
-	gate->a = (seg << 16) | (base & 0xffff);
-	gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
+	gate->gate.offset_low	= base;
+	gate->gate.seg		= seg;
+	gate->gate.reserved	= 0;
+	gate->gate.type		= type;
+	gate->gate.s		= 0;
+	gate->gate.dpl		= dpl;
+	gate->gate.p		= 1;
+	gate->gate.offset_high	= base >> 16;
 }
 
 #endif
@@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(str
 
 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc
*gate)
 {
+	pax_open_kernel();
 	memcpy(&idt[entry], gate, sizeof(*gate));
+	pax_close_kernel();
 }
 
 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const
void *desc)
 {
+	pax_open_kernel();
 	memcpy(&ldt[entry], desc, 8);
+	pax_close_kernel();
 }
 
 static inline void
@@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struc
 	default:	size = sizeof(*gdt);		break;
 	}
 
+	pax_open_kernel();
 	memcpy(&gdt[entry], desc, size);
+	pax_close_kernel();
 }
 
 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
@@ -210,7 +219,9 @@ static inline void native_set_ldt(const
 
 static inline void native_load_tr_desc(void)
 {
+	pax_open_kernel();
 	asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
+	pax_close_kernel();
 }
 
 static inline void native_load_gdt(const struct desc_ptr *dtr)
@@ -247,8 +258,10 @@ static inline void native_load_tls(struc
 	struct desc_struct *gdt = get_cpu_gdt_table(cpu);
 	unsigned int i;
 
+	pax_open_kernel();
 	for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
 		gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
+	pax_close_kernel();
 }
 
 #define _LDT_empty(info)				\
@@ -287,7 +300,7 @@ static inline void load_LDT(mm_context_t
 	preempt_enable();
 }
 
-static inline unsigned long get_desc_base(const struct desc_struct *desc)
+static inline unsigned long __intentional_overflow(-1) get_desc_base(const struct desc_struct
*desc)
 {
 	return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
 }
@@ -311,7 +324,7 @@ static inline void set_desc_limit(struct
 }
 
 #ifdef CONFIG_X86_64
-static inline void set_nmi_gate(int gate, void *addr)
+static inline void set_nmi_gate(int gate, const void *addr)
 {
 	gate_desc s;
 
@@ -321,14 +334,14 @@ static inline void set_nmi_gate(int gate
 #endif
 
 #ifdef CONFIG_TRACING
-extern struct desc_ptr trace_idt_descr;
-extern gate_desc trace_idt_table[];
+extern const struct desc_ptr trace_idt_descr;
+extern gate_desc trace_idt_table[IDT_ENTRIES];
 static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
 {
 	write_idt_entry(trace_idt_table, entry, gate);
 }
 
-static inline void _trace_set_gate(int gate, unsigned type, void *addr,
+static inline void _trace_set_gate(int gate, unsigned type, const void *addr,
 				   unsigned dpl, unsigned ist, unsigned seg)
 {
 	gate_desc s;
@@ -348,7 +361,7 @@ static inline void write_trace_idt_entry
 #define _trace_set_gate(gate, type, addr, dpl, ist, seg)
 #endif
 
-static inline void _set_gate(int gate, unsigned type, void *addr,
+static inline void _set_gate(int gate, unsigned type, const void *addr,
 			     unsigned dpl, unsigned ist, unsigned seg)
 {
 	gate_desc s;
@@ -371,9 +384,9 @@ static inline void _set_gate(int gate, u
 #define set_intr_gate(n, addr)						\
 	do {								\
 		BUG_ON((unsigned)n > 0xFF);				\
-		_set_gate(n, GATE_INTERRUPT, (void *)addr, 0, 0,	\
+		_set_gate(n, GATE_INTERRUPT, (const void *)addr, 0, 0,	\
 			  __KERNEL_CS);					\
-		_trace_set_gate(n, GATE_INTERRUPT, (void *)trace_##addr,\
+		_trace_set_gate(n, GATE_INTERRUPT, (const void *)trace_##addr,\
 				0, 0, __KERNEL_CS);			\
 	} while (0)
 
@@ -401,19 +414,19 @@ static inline void alloc_system_vector(i
 /*
  * This routine sets up an interrupt gate at directory privilege level 3.
  */
-static inline void set_system_intr_gate(unsigned int n, void *addr)
+static inline void set_system_intr_gate(unsigned int n, const void *addr)
 {
 	BUG_ON((unsigned)n > 0xFF);
 	_set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
 }
 
-static inline void set_system_trap_gate(unsigned int n, void *addr)
+static inline void set_system_trap_gate(unsigned int n, const void *addr)
 {
 	BUG_ON((unsigned)n > 0xFF);
 	_set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
 }
 
-static inline void set_trap_gate(unsigned int n, void *addr)
+static inline void set_trap_gate(unsigned int n, const void *addr)
 {
 	BUG_ON((unsigned)n > 0xFF);
 	_set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
@@ -422,16 +435,16 @@ static inline void set_trap_gate(unsigne
 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
 {
 	BUG_ON((unsigned)n > 0xFF);
-	_set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
+	_set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
 }
 
-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
 {
 	BUG_ON((unsigned)n > 0xFF);
 	_set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
 }
 
-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
 {
 	BUG_ON((unsigned)n > 0xFF);
 	_set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
@@ -503,4 +516,17 @@ static inline void load_current_idt(void
 	else
 		load_idt((const struct desc_ptr *)&idt_descr);
 }
+
+#ifdef CONFIG_X86_32
+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
+{
+	struct desc_struct d;
+
+	if (likely(limit))
+		limit = (limit - 1UL) >> PAGE_SHIFT;
+	pack_descriptor(&d, base, limit, 0xFB, 0xC);
+	write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
+}
+#endif
+
 #endif /* _ASM_X86_DESC_H */
diff -ruNp linux-3.13.11/arch/x86/include/asm/desc_defs.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/desc_defs.h
--- linux-3.13.11/arch/x86/include/asm/desc_defs.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/desc_defs.h	2014-07-09
12:00:15.000000000 +0200
@@ -31,6 +31,12 @@ struct desc_struct {
 			unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
 			unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
 		};
+		struct {
+			u16 offset_low;
+			u16 seg;
+			unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
+			unsigned offset_high: 16;
+		} gate;
 	};
 } __attribute__((packed));
 
diff -ruNp linux-3.13.11/arch/x86/include/asm/div64.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/div64.h
--- linux-3.13.11/arch/x86/include/asm/div64.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/div64.h	2014-07-09
12:00:15.000000000 +0200
@@ -39,7 +39,7 @@
 	__mod;							\
 })
 
-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor,
u32 *remainder)
 {
 	union {
 		u64 v64;
diff -ruNp linux-3.13.11/arch/x86/include/asm/elf.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/elf.h
--- linux-3.13.11/arch/x86/include/asm/elf.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/elf.h	2014-07-09
12:00:15.000000000 +0200
@@ -243,7 +243,25 @@ extern int force_personality32;
    the loader.  We need to make sure that it is out of the way of the program
    that it will "exec", and that there is sufficient room for the brk.  */
 
+#ifdef CONFIG_PAX_SEGMEXEC
+#define ELF_ET_DYN_BASE		((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2
: TASK_SIZE/3*2)
+#else
 #define ELF_ET_DYN_BASE		(TASK_SIZE / 3 * 2)
+#endif
+
+#ifdef CONFIG_PAX_ASLR
+#ifdef CONFIG_X86_32
+#define PAX_ELF_ET_DYN_BASE	0x10000000UL
+
+#define PAX_DELTA_MMAP_LEN	(current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
+#define PAX_DELTA_STACK_LEN	(current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
+#else
+#define PAX_ELF_ET_DYN_BASE	0x400000UL
+
+#define PAX_DELTA_MMAP_LEN	((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT
- PAGE_SHIFT - 3)
+#define PAX_DELTA_STACK_LEN	((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT
- PAGE_SHIFT - 3)
+#endif
+#endif
 
 /* This yields a mask that user programs can use to figure out what
    instruction set this CPU supports.  This could be done in user space,
@@ -296,16 +314,12 @@ do {									\
 
 #define ARCH_DLINFO							\
 do {									\
-	if (vdso_enabled)						\
-		NEW_AUX_ENT(AT_SYSINFO_EHDR,				\
-			    (unsigned long)current->mm->context.vdso);	\
+	NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso);	\
 } while (0)
 
 #define ARCH_DLINFO_X32							\
 do {									\
-	if (vdso_enabled)						\
-		NEW_AUX_ENT(AT_SYSINFO_EHDR,				\
-			    (unsigned long)current->mm->context.vdso);	\
+	NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso);	\
 } while (0)
 
 #define AT_SYSINFO		32
@@ -320,7 +334,7 @@ else									\
 
 #endif /* !CONFIG_X86_32 */
 
-#define VDSO_CURRENT_BASE	((unsigned long)current->mm->context.vdso)
+#define VDSO_CURRENT_BASE	(current->mm->context.vdso)
 
 #define VDSO_ENTRY							\
 	((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
@@ -336,9 +350,6 @@ extern int x32_setup_additional_pages(st
 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
 #define compat_arch_setup_additional_pages	syscall32_setup_pages
 
-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
-#define arch_randomize_brk arch_randomize_brk
-
 /*
  * True on X86_32 or when emulating IA32 on X86_64
  */
diff -ruNp linux-3.13.11/arch/x86/include/asm/emergency-restart.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/emergency-restart.h
--- linux-3.13.11/arch/x86/include/asm/emergency-restart.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/emergency-restart.h	2014-07-09
12:00:15.000000000 +0200
@@ -1,6 +1,6 @@
 #ifndef _ASM_X86_EMERGENCY_RESTART_H
 #define _ASM_X86_EMERGENCY_RESTART_H
 
-extern void machine_emergency_restart(void);
+extern void machine_emergency_restart(void) __noreturn;
 
 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
diff -ruNp linux-3.13.11/arch/x86/include/asm/floppy.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/floppy.h
--- linux-3.13.11/arch/x86/include/asm/floppy.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/floppy.h	2014-07-09
12:00:15.000000000 +0200
@@ -229,18 +229,18 @@ static struct fd_routine_l {
 	int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
 } fd_routine[] = {
 	{
-		request_dma,
-		free_dma,
-		get_dma_residue,
-		dma_mem_alloc,
-		hard_dma_setup
+		._request_dma = request_dma,
+		._free_dma = free_dma,
+		._get_dma_residue = get_dma_residue,
+		._dma_mem_alloc = dma_mem_alloc,
+		._dma_setup = hard_dma_setup
 	},
 	{
-		vdma_request_dma,
-		vdma_nop,
-		vdma_get_dma_residue,
-		vdma_mem_alloc,
-		vdma_dma_setup
+		._request_dma = vdma_request_dma,
+		._free_dma = vdma_nop,
+		._get_dma_residue = vdma_get_dma_residue,
+		._dma_mem_alloc = vdma_mem_alloc,
+		._dma_setup = vdma_dma_setup
 	}
 };
 
diff -ruNp linux-3.13.11/arch/x86/include/asm/fpu-internal.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/fpu-internal.h
--- linux-3.13.11/arch/x86/include/asm/fpu-internal.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/fpu-internal.h	2014-07-09
12:00:15.000000000 +0200
@@ -124,8 +124,11 @@ static inline void sanitize_i387_state(s
 #define user_insn(insn, output, input...)				\
 ({									\
 	int err;							\
+	pax_open_userland();						\
 	asm volatile(ASM_STAC "\n"					\
-		     "1:" #insn "\n\t"					\
+		     "1:"						\
+		     __copyuser_seg					\
+		     #insn "\n\t"					\
 		     "2: " ASM_CLAC "\n"				\
 		     ".section .fixup,\"ax\"\n"				\
 		     "3:  movl $-1,%[err]\n"				\
@@ -134,6 +137,7 @@ static inline void sanitize_i387_state(s
 		     _ASM_EXTABLE(1b, 3b)				\
 		     : [err] "=r" (err), output				\
 		     : "0"(0), input);					\
+	pax_close_userland();						\
 	err;								\
 })
 
@@ -298,7 +302,7 @@ static inline int restore_fpu_checking(s
 			"fnclex\n\t"
 			"emms\n\t"
 			"fildl %P[addr]"	/* set F?P to defined value */
-			: : [addr] "m" (tsk->thread.fpu.has_fpu));
+			: : [addr] "m" (init_tss[raw_smp_processor_id()].x86_tss.sp0));
 	}
 
 	return fpu_restore_checking(&tsk->thread.fpu);
diff -ruNp linux-3.13.11/arch/x86/include/asm/futex.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/futex.h
--- linux-3.13.11/arch/x86/include/asm/futex.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/futex.h	2014-07-09
12:00:15.000000000 +0200
@@ -12,6 +12,7 @@
 #include <asm/smap.h>
 
 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg)	\
+	typecheck(u32 __user *, uaddr);				\
 	asm volatile("\t" ASM_STAC "\n"				\
 		     "1:\t" insn "\n"				\
 		     "2:\t" ASM_CLAC "\n"			\
@@ -20,15 +21,16 @@
 		     "\tjmp\t2b\n"				\
 		     "\t.previous\n"				\
 		     _ASM_EXTABLE(1b, 3b)			\
-		     : "=r" (oldval), "=r" (ret), "+m" (*uaddr)	\
+		     : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))	\
 		     : "i" (-EFAULT), "0" (oparg), "1" (0))
 
 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg)	\
+	typecheck(u32 __user *, uaddr);				\
 	asm volatile("\t" ASM_STAC "\n"				\
 		     "1:\tmovl	%2, %0\n"			\
 		     "\tmovl\t%0, %3\n"				\
 		     "\t" insn "\n"				\
-		     "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n"	\
+		     "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n"	\
 		     "\tjnz\t1b\n"				\
 		     "3:\t" ASM_CLAC "\n"			\
 		     "\t.section .fixup,\"ax\"\n"		\
@@ -38,7 +40,7 @@
 		     _ASM_EXTABLE(1b, 4b)			\
 		     _ASM_EXTABLE(2b, 4b)			\
 		     : "=&a" (oldval), "=&r" (ret),		\
-		       "+m" (*uaddr), "=&r" (tem)		\
+		       "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem)	\
 		     : "r" (oparg), "i" (-EFAULT), "1" (0))
 
 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
@@ -57,12 +59,13 @@ static inline int futex_atomic_op_inuser
 
 	pagefault_disable();
 
+	pax_open_userland();
 	switch (op) {
 	case FUTEX_OP_SET:
-		__futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
+		__futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
 		break;
 	case FUTEX_OP_ADD:
-		__futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
+		__futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
 				   uaddr, oparg);
 		break;
 	case FUTEX_OP_OR:
@@ -77,6 +80,7 @@ static inline int futex_atomic_op_inuser
 	default:
 		ret = -ENOSYS;
 	}
+	pax_close_userland();
 
 	pagefault_enable();
 
@@ -115,18 +119,20 @@ static inline int futex_atomic_cmpxchg_i
 	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
 		return -EFAULT;
 
+	pax_open_userland();
 	asm volatile("\t" ASM_STAC "\n"
-		     "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
+		     "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
 		     "2:\t" ASM_CLAC "\n"
 		     "\t.section .fixup, \"ax\"\n"
 		     "3:\tmov     %3, %0\n"
 		     "\tjmp     2b\n"
 		     "\t.previous\n"
 		     _ASM_EXTABLE(1b, 3b)
-		     : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
+		     : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
 		     : "i" (-EFAULT), "r" (newval), "1" (oldval)
 		     : "memory"
 	);
+	pax_close_userland();
 
 	*uval = oldval;
 	return ret;
diff -ruNp linux-3.13.11/arch/x86/include/asm/hw_irq.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/hw_irq.h
--- linux-3.13.11/arch/x86/include/asm/hw_irq.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/hw_irq.h	2014-07-09
12:00:15.000000000 +0200
@@ -165,8 +165,8 @@ extern void setup_ioapic_dest(void);
 extern void enable_IO_APIC(void);
 
 /* Statistics */
-extern atomic_t irq_err_count;
-extern atomic_t irq_mis_count;
+extern atomic_unchecked_t irq_err_count;
+extern atomic_unchecked_t irq_mis_count;
 
 /* EISA */
 extern void eisa_set_level_irq(unsigned int irq);
diff -ruNp linux-3.13.11/arch/x86/include/asm/i8259.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/i8259.h
--- linux-3.13.11/arch/x86/include/asm/i8259.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/i8259.h	2014-07-09
12:00:15.000000000 +0200
@@ -62,7 +62,7 @@ struct legacy_pic {
 	void (*init)(int auto_eoi);
 	int (*irq_pending)(unsigned int irq);
 	void (*make_irq)(unsigned int irq);
-};
+} __do_const;
 
 extern struct legacy_pic *legacy_pic;
 extern struct legacy_pic null_legacy_pic;
diff -ruNp linux-3.13.11/arch/x86/include/asm/io.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/io.h
--- linux-3.13.11/arch/x86/include/asm/io.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/io.h	2014-07-09
12:00:15.000000000 +0200
@@ -51,12 +51,12 @@ static inline void name(type val, volati
 "m" (*(volatile type __force *)addr) barrier); }
 
 build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
-build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
-build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
+build_mmio_read(__intentional_overflow(-1) readw, "w", unsigned short, "=r", :"memory")
+build_mmio_read(__intentional_overflow(-1) readl, "l", unsigned int, "=r", :"memory")
 
 build_mmio_read(__readb, "b", unsigned char, "=q", )
-build_mmio_read(__readw, "w", unsigned short, "=r", )
-build_mmio_read(__readl, "l", unsigned int, "=r", )
+build_mmio_read(__intentional_overflow(-1) __readw, "w", unsigned short, "=r", )
+build_mmio_read(__intentional_overflow(-1) __readl, "l", unsigned int, "=r", )
 
 build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
 build_mmio_write(writew, "w", unsigned short, "r", :"memory")
@@ -184,7 +184,7 @@ static inline void __iomem *ioremap(reso
 	return ioremap_nocache(offset, size);
 }
 
-extern void iounmap(volatile void __iomem *addr);
+extern void iounmap(const volatile void __iomem *addr);
 
 extern void set_iounmap_nonlazy(void);
 
@@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
 
 #include <linux/vmalloc.h>
 
+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
+{
+	return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits
- PAGE_SHIFT)) ? 1 : 0;
+}
+
+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
+{
+	return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT))
? 1 : 0;
+}
+
 /*
  * Convert a virtual cached pointer to an uncached pointer
  */
diff -ruNp linux-3.13.11/arch/x86/include/asm/irqflags.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/irqflags.h
--- linux-3.13.11/arch/x86/include/asm/irqflags.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/irqflags.h	2014-07-09
12:00:15.000000000 +0200
@@ -141,6 +141,11 @@ static inline notrace unsigned long arch
 	sti;					\
 	sysexit
 
+#define GET_CR0_INTO_RDI		mov %cr0, %rdi
+#define SET_RDI_INTO_CR0		mov %rdi, %cr0
+#define GET_CR3_INTO_RDI		mov %cr3, %rdi
+#define SET_RDI_INTO_CR3		mov %rdi, %cr3
+
 #else
 #define INTERRUPT_RETURN		iret
 #define ENABLE_INTERRUPTS_SYSEXIT	sti; sysexit
diff -ruNp linux-3.13.11/arch/x86/include/asm/kprobes.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/kprobes.h
--- linux-3.13.11/arch/x86/include/asm/kprobes.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/kprobes.h	2014-07-09
12:00:15.000000000 +0200
@@ -38,13 +38,8 @@ typedef u8 kprobe_opcode_t;
 #define RELATIVEJUMP_SIZE 5
 #define RELATIVECALL_OPCODE 0xe8
 #define RELATIVE_ADDR_SIZE 4
-#define MAX_STACK_SIZE 64
-#define MIN_STACK_SIZE(ADDR)					       \
-	(((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
-			      THREAD_SIZE - (unsigned long)(ADDR)))    \
-	 ? (MAX_STACK_SIZE)					       \
-	 : (((unsigned long)current_thread_info()) +		       \
-	    THREAD_SIZE - (unsigned long)(ADDR)))
+#define MAX_STACK_SIZE 64UL
+#define MIN_STACK_SIZE(ADDR)	min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
 
 #define flush_insn_slot(p)	do { } while (0)
 
diff -ruNp linux-3.13.11/arch/x86/include/asm/local.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/local.h
--- linux-3.13.11/arch/x86/include/asm/local.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/local.h	2014-07-09
12:00:15.000000000 +0200
@@ -10,33 +10,97 @@ typedef struct {
 	atomic_long_t a;
 } local_t;
 
+typedef struct {
+	atomic_long_unchecked_t a;
+} local_unchecked_t;
+
 #define LOCAL_INIT(i)	{ ATOMIC_LONG_INIT(i) }
 
 #define local_read(l)	atomic_long_read(&(l)->a)
+#define local_read_unchecked(l)	atomic_long_read_unchecked(&(l)->a)
 #define local_set(l, i)	atomic_long_set(&(l)->a, (i))
+#define local_set_unchecked(l, i)	atomic_long_set_unchecked(&(l)->a, (i))
 
 static inline void local_inc(local_t *l)
 {
-	asm volatile(_ASM_INC "%0"
+	asm volatile(_ASM_INC "%0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+		     "jno 0f\n"
+		     _ASM_DEC "%0\n"
+		     "int $4\n0:\n"
+		     _ASM_EXTABLE(0b, 0b)
+#endif
+
+		     : "+m" (l->a.counter));
+}
+
+static inline void local_inc_unchecked(local_unchecked_t *l)
+{
+	asm volatile(_ASM_INC "%0\n"
 		     : "+m" (l->a.counter));
 }
 
 static inline void local_dec(local_t *l)
 {
-	asm volatile(_ASM_DEC "%0"
+	asm volatile(_ASM_DEC "%0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+		     "jno 0f\n"
+		     _ASM_INC "%0\n"
+		     "int $4\n0:\n"
+		     _ASM_EXTABLE(0b, 0b)
+#endif
+
+		     : "+m" (l->a.counter));
+}
+
+static inline void local_dec_unchecked(local_unchecked_t *l)
+{
+	asm volatile(_ASM_DEC "%0\n"
 		     : "+m" (l->a.counter));
 }
 
 static inline void local_add(long i, local_t *l)
 {
-	asm volatile(_ASM_ADD "%1,%0"
+	asm volatile(_ASM_ADD "%1,%0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+		     "jno 0f\n"
+		     _ASM_SUB "%1,%0\n"
+		     "int $4\n0:\n"
+		     _ASM_EXTABLE(0b, 0b)
+#endif
+
+		     : "+m" (l->a.counter)
+		     : "ir" (i));
+}
+
+static inline void local_add_unchecked(long i, local_unchecked_t *l)
+{
+	asm volatile(_ASM_ADD "%1,%0\n"
 		     : "+m" (l->a.counter)
 		     : "ir" (i));
 }
 
 static inline void local_sub(long i, local_t *l)
 {
-	asm volatile(_ASM_SUB "%1,%0"
+	asm volatile(_ASM_SUB "%1,%0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+		     "jno 0f\n"
+		     _ASM_ADD "%1,%0\n"
+		     "int $4\n0:\n"
+		     _ASM_EXTABLE(0b, 0b)
+#endif
+
+		     : "+m" (l->a.counter)
+		     : "ir" (i));
+}
+
+static inline void local_sub_unchecked(long i, local_unchecked_t *l)
+{
+	asm volatile(_ASM_SUB "%1,%0\n"
 		     : "+m" (l->a.counter)
 		     : "ir" (i));
 }
@@ -52,7 +116,7 @@ static inline void local_sub(long i, loc
  */
 static inline int local_sub_and_test(long i, local_t *l)
 {
-	GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", "e");
+	GEN_BINARY_RMWcc(_ASM_SUB, _ASM_ADD, l->a.counter, "er", i, "%0", "e");
 }
 
 /**
@@ -65,7 +129,7 @@ static inline int local_sub_and_test(lon
  */
 static inline int local_dec_and_test(local_t *l)
 {
-	GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", "e");
+	GEN_UNARY_RMWcc(_ASM_DEC, _ASM_INC, l->a.counter, "%0", "e");
 }
 
 /**
@@ -78,7 +142,7 @@ static inline int local_dec_and_test(loc
  */
 static inline int local_inc_and_test(local_t *l)
 {
-	GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", "e");
+	GEN_UNARY_RMWcc(_ASM_INC, _ASM_DEC, l->a.counter, "%0", "e");
 }
 
 /**
@@ -92,7 +156,7 @@ static inline int local_inc_and_test(loc
  */
 static inline int local_add_negative(long i, local_t *l)
 {
-	GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", "s");
+	GEN_BINARY_RMWcc(_ASM_ADD, _ASM_SUB, l->a.counter, "er", i, "%0", "s");
 }
 
 /**
@@ -105,6 +169,30 @@ static inline int local_add_negative(lon
 static inline long local_add_return(long i, local_t *l)
 {
 	long __i = i;
+	asm volatile(_ASM_XADD "%0, %1\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+		     "jno 0f\n"
+		     _ASM_MOV "%0,%1\n"
+		     "int $4\n0:\n"
+		     _ASM_EXTABLE(0b, 0b)
+#endif
+
+		     : "+r" (i), "+m" (l->a.counter)
+		     : : "memory");
+	return i + __i;
+}
+
+/**
+ * local_add_return_unchecked - add and return
+ * @i: integer value to add
+ * @l: pointer to type local_unchecked_t
+ *
+ * Atomically adds @i to @l and returns @i + @l
+ */
+static inline long local_add_return_unchecked(long i, local_unchecked_t *l)
+{
+	long __i = i;
 	asm volatile(_ASM_XADD "%0, %1;"
 		     : "+r" (i), "+m" (l->a.counter)
 		     : : "memory");
@@ -121,6 +209,8 @@ static inline long local_sub_return(long
 
 #define local_cmpxchg(l, o, n) \
 	(cmpxchg_local(&((l)->a.counter), (o), (n)))
+#define local_cmpxchg_unchecked(l, o, n) \
+	(cmpxchg_local(&((l)->a.counter), (o), (n)))
 /* Always has a lock prefix */
 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
 
diff -ruNp linux-3.13.11/arch/x86/include/asm/mman.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/mman.h
--- linux-3.13.11/arch/x86/include/asm/mman.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/mman.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,15 @@
+#ifndef _X86_MMAN_H
+#define _X86_MMAN_H
+
+#include <uapi/asm/mman.h>
+
+#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
+#ifdef CONFIG_X86_32
+#define arch_mmap_check	i386_mmap_check
+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
+#endif
+#endif
+#endif
+
+#endif /* X86_MMAN_H */
diff -ruNp linux-3.13.11/arch/x86/include/asm/mmu.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/mmu.h
--- linux-3.13.11/arch/x86/include/asm/mmu.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/mmu.h	2014-07-09
12:00:15.000000000 +0200
@@ -9,7 +9,7 @@
  * we put the segment information here.
  */
 typedef struct {
-	void *ldt;
+	struct desc_struct *ldt;
 	int size;
 
 #ifdef CONFIG_X86_64
@@ -18,7 +18,19 @@ typedef struct {
 #endif
 
 	struct mutex lock;
-	void *vdso;
+	unsigned long vdso;
+
+#ifdef CONFIG_X86_32
+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
+	unsigned long user_cs_base;
+	unsigned long user_cs_limit;
+
+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
+	cpumask_t cpu_user_cs_mask;
+#endif
+
+#endif
+#endif
 } mm_context_t;
 
 #ifdef CONFIG_SMP
diff -ruNp linux-3.13.11/arch/x86/include/asm/mmu_context.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/mmu_context.h
--- linux-3.13.11/arch/x86/include/asm/mmu_context.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/mmu_context.h	2014-07-09
12:00:15.000000000 +0200
@@ -24,6 +24,20 @@ void destroy_context(struct mm_struct *m
 
 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
 {
+
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+	if (!(static_cpu_has(X86_FEATURE_PCID))) {
+		unsigned int i;
+		pgd_t *pgd;
+
+		pax_open_kernel();
+		pgd = get_cpu_pgd(smp_processor_id(), kernel);
+		for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
+			set_pgd_batched(pgd+i, native_make_pgd(0));
+		pax_close_kernel();
+	}
+#endif
+
 #ifdef CONFIG_SMP
 	if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
 		this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
@@ -34,16 +48,59 @@ static inline void switch_mm(struct mm_s
 			     struct task_struct *tsk)
 {
 	unsigned cpu = smp_processor_id();
+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC)
|| defined(CONFIG_PAX_SEGMEXEC))
+	int tlbstate = TLBSTATE_OK;
+#endif
 
 	if (likely(prev != next)) {
 #ifdef CONFIG_SMP
+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
+		tlbstate = this_cpu_read(cpu_tlbstate.state);
+#endif
 		this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
 		this_cpu_write(cpu_tlbstate.active_mm, next);
 #endif
 		cpumask_set_cpu(cpu, mm_cpumask(next));
 
 		/* Re-load page tables */
+#ifdef CONFIG_PAX_PER_CPU_PGD
+		pax_open_kernel();
+
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+		if (static_cpu_has(X86_FEATURE_PCID))
+			__clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
+		else
+#endif
+
+		__clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
+		__shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
+		pax_close_kernel();
+		BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK)
&& (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
+
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+		if (static_cpu_has(X86_FEATURE_PCID)) {
+			if (static_cpu_has(X86_FEATURE_INVPCID)) {
+				u64 descriptor[2];
+				descriptor[0] = PCID_USER;
+				asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) :
"memory");
+				if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
+					descriptor[0] = PCID_KERNEL;
+					asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) :
"memory");
+				}
+			} else {
+				write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
+				if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
+					write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
+				else
+					write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
+			}
+		} else
+#endif
+
+			load_cr3(get_cpu_pgd(cpu, kernel));
+#else
 		load_cr3(next->pgd);
+#endif
 
 		/* Stop flush ipis for the previous mm */
 		cpumask_clear_cpu(cpu, mm_cpumask(prev));
@@ -51,9 +108,67 @@ static inline void switch_mm(struct mm_s
 		/* Load the LDT, if the LDT is different: */
 		if (unlikely(prev->context.ldt != next->context.ldt))
 			load_LDT_nolock(&next->context);
+
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
+		if (!(__supported_pte_mask & _PAGE_NX)) {
+			smp_mb__before_clear_bit();
+			cpu_clear(cpu, prev->context.cpu_user_cs_mask);
+			smp_mb__after_clear_bit();
+			cpu_set(cpu, next->context.cpu_user_cs_mask);
+		}
+#endif
+
+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
+		if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
+			     prev->context.user_cs_limit != next->context.user_cs_limit))
+			set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
+#ifdef CONFIG_SMP
+		else if (unlikely(tlbstate != TLBSTATE_OK))
+			set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
+#endif
+#endif
+
 	}
+	else {
+
+#ifdef CONFIG_PAX_PER_CPU_PGD
+		pax_open_kernel();
+
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+		if (static_cpu_has(X86_FEATURE_PCID))
+			__clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
+		else
+#endif
+
+		__clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
+		__shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
+		pax_close_kernel();
+		BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK)
&& (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
+
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+		if (static_cpu_has(X86_FEATURE_PCID)) {
+			if (static_cpu_has(X86_FEATURE_INVPCID)) {
+				u64 descriptor[2];
+				descriptor[0] = PCID_USER;
+				asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) :
"memory");
+				if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
+					descriptor[0] = PCID_KERNEL;
+					asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) :
"memory");
+				}
+			} else {
+				write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
+				if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
+					write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
+				else
+					write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
+			}
+		} else
+#endif
+
+			load_cr3(get_cpu_pgd(cpu, kernel));
+#endif
+
 #ifdef CONFIG_SMP
-	  else {
 		this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
 		BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
 
@@ -70,11 +185,28 @@ static inline void switch_mm(struct mm_s
 			 * tlb flush IPI delivery. We must reload CR3
 			 * to make sure to use no freed page tables.
 			 */
+
+#ifndef CONFIG_PAX_PER_CPU_PGD
 			load_cr3(next->pgd);
+#endif
+
 			load_LDT_nolock(&next->context);
+
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
+			if (!(__supported_pte_mask & _PAGE_NX))
+				cpu_set(cpu, next->context.cpu_user_cs_mask);
+#endif
+
+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
+#ifdef CONFIG_PAX_PAGEEXEC
+			if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
+#endif
+				set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
+#endif
+
 		}
-	}
 #endif
+	}
 }
 
 #define activate_mm(prev, next)			\
diff -ruNp linux-3.13.11/arch/x86/include/asm/module.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/module.h
--- linux-3.13.11/arch/x86/include/asm/module.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/module.h	2014-07-09
12:00:15.000000000 +0200
@@ -5,6 +5,7 @@
 
 #ifdef CONFIG_X86_64
 /* X86_64 does not define MODULE_PROC_FAMILY */
+#define MODULE_PROC_FAMILY ""
 #elif defined CONFIG_M486
 #define MODULE_PROC_FAMILY "486 "
 #elif defined CONFIG_M586
@@ -57,8 +58,20 @@
 #error unknown processor family
 #endif
 
-#ifdef CONFIG_X86_32
-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
+#else
+#define MODULE_PAX_KERNEXEC ""
 #endif
 
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+#define MODULE_PAX_UDEREF "UDEREF "
+#else
+#define MODULE_PAX_UDEREF ""
+#endif
+
+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
+
 #endif /* _ASM_X86_MODULE_H */
diff -ruNp linux-3.13.11/arch/x86/include/asm/nmi.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/nmi.h
--- linux-3.13.11/arch/x86/include/asm/nmi.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/nmi.h	2014-07-09
12:00:15.000000000 +0200
@@ -40,11 +40,11 @@ struct nmiaction {
 	nmi_handler_t		handler;
 	unsigned long		flags;
 	const char		*name;
-};
+} __do_const;
 
 #define register_nmi_handler(t, fn, fg, n, init...)	\
 ({							\
-	static struct nmiaction init fn##_na = {	\
+	static const struct nmiaction init fn##_na = {	\
 		.handler = (fn),			\
 		.name = (n),				\
 		.flags = (fg),				\
@@ -52,7 +52,7 @@ struct nmiaction {
 	__register_nmi_handler((t), &fn##_na);		\
 })
 
-int __register_nmi_handler(unsigned int, struct nmiaction *);
+int __register_nmi_handler(unsigned int, const struct nmiaction *);
 
 void unregister_nmi_handler(unsigned int, const char *);
 
diff -ruNp linux-3.13.11/arch/x86/include/asm/page.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/page.h
--- linux-3.13.11/arch/x86/include/asm/page.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/page.h	2014-07-09
12:00:15.000000000 +0200
@@ -52,6 +52,7 @@ static inline void copy_user_page(void *
 	__phys_addr_symbol(__phys_reloc_hide((unsigned long)(x)))
 
 #define __va(x)			((void *)((unsigned long)(x)+PAGE_OFFSET))
+#define __early_va(x)		((void *)((unsigned long)(x)+__START_KERNEL_map - phys_base))
 
 #define __boot_va(x)		__va(x)
 #define __boot_pa(x)		__pa(x)
diff -ruNp linux-3.13.11/arch/x86/include/asm/page_64.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/page_64.h
--- linux-3.13.11/arch/x86/include/asm/page_64.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/page_64.h	2014-07-09
12:00:15.000000000 +0200
@@ -7,9 +7,9 @@
 
 /* duplicated to the one in bootmem.h */
 extern unsigned long max_pfn;
-extern unsigned long phys_base;
+extern const unsigned long phys_base;
 
-static inline unsigned long __phys_addr_nodebug(unsigned long x)
+static inline unsigned long __intentional_overflow(-1) __phys_addr_nodebug(unsigned
long x)
 {
 	unsigned long y = x - __START_KERNEL_map;
 
diff -ruNp linux-3.13.11/arch/x86/include/asm/paravirt.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/paravirt.h
--- linux-3.13.11/arch/x86/include/asm/paravirt.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/paravirt.h	2014-07-09
12:00:15.000000000 +0200
@@ -560,7 +560,7 @@ static inline pmd_t __pmd(pmdval_t val)
 	return (pmd_t) { ret };
 }
 
-static inline pmdval_t pmd_val(pmd_t pmd)
+static inline __intentional_overflow(-1) pmdval_t pmd_val(pmd_t pmd)
 {
 	pmdval_t ret;
 
@@ -626,6 +626,18 @@ static inline void set_pgd(pgd_t *pgdp,
 			    val);
 }
 
+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
+{
+	pgdval_t val = native_pgd_val(pgd);
+
+	if (sizeof(pgdval_t) > sizeof(long))
+		PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
+			    val, (u64)val >> 32);
+	else
+		PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
+			    val);
+}
+
 static inline void pgd_clear(pgd_t *pgdp)
 {
 	set_pgd(pgdp, __pgd(0));
@@ -710,6 +722,21 @@ static inline void __set_fixmap(unsigned
 	pv_mmu_ops.set_fixmap(idx, phys, flags);
 }
 
+#ifdef CONFIG_PAX_KERNEXEC
+static inline unsigned long pax_open_kernel(void)
+{
+	return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
+}
+
+static inline unsigned long pax_close_kernel(void)
+{
+	return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
+}
+#else
+static inline unsigned long pax_open_kernel(void) { return 0; }
+static inline unsigned long pax_close_kernel(void) { return 0; }
+#endif
+
 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
 
 static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
@@ -906,7 +933,7 @@ extern void default_banner(void);
 
 #define PARA_PATCH(struct, off)        ((PARAVIRT_PATCH_##struct + (off)) / 4)
 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
-#define PARA_INDIRECT(addr)	*%cs:addr
+#define PARA_INDIRECT(addr)	*%ss:addr
 #endif
 
 #define INTERRUPT_RETURN						\
@@ -981,6 +1008,21 @@ extern void default_banner(void);
 	PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit),	\
 		  CLBR_NONE,						\
 		  jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
+
+#define GET_CR0_INTO_RDI				\
+	call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);	\
+	mov %rax,%rdi
+
+#define SET_RDI_INTO_CR0				\
+	call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
+
+#define GET_CR3_INTO_RDI				\
+	call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3);	\
+	mov %rax,%rdi
+
+#define SET_RDI_INTO_CR3				\
+	call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
+
 #endif	/* CONFIG_X86_32 */
 
 #endif /* __ASSEMBLY__ */
diff -ruNp linux-3.13.11/arch/x86/include/asm/paravirt_types.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/paravirt_types.h
--- linux-3.13.11/arch/x86/include/asm/paravirt_types.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/paravirt_types.h	2014-07-09
12:00:15.000000000 +0200
@@ -84,7 +84,7 @@ struct pv_init_ops {
 	 */
 	unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
 			  unsigned long addr, unsigned len);
-};
+} __no_const __no_randomize_layout;
 
 
 struct pv_lazy_ops {
@@ -92,13 +92,13 @@ struct pv_lazy_ops {
 	void (*enter)(void);
 	void (*leave)(void);
 	void (*flush)(void);
-};
+} __no_randomize_layout;
 
 struct pv_time_ops {
 	unsigned long long (*sched_clock)(void);
 	unsigned long long (*steal_clock)(int cpu);
 	unsigned long (*get_tsc_khz)(void);
-};
+} __no_const __no_randomize_layout;
 
 struct pv_cpu_ops {
 	/* hooks for various privileged instructions */
@@ -192,7 +192,7 @@ struct pv_cpu_ops {
 
 	void (*start_context_switch)(struct task_struct *prev);
 	void (*end_context_switch)(struct task_struct *next);
-};
+} __no_const __no_randomize_layout;
 
 struct pv_irq_ops {
 	/*
@@ -215,7 +215,7 @@ struct pv_irq_ops {
 #ifdef CONFIG_X86_64
 	void (*adjust_exception_frame)(void);
 #endif
-};
+} __no_randomize_layout;
 
 struct pv_apic_ops {
 #ifdef CONFIG_X86_LOCAL_APIC
@@ -223,7 +223,7 @@ struct pv_apic_ops {
 				 unsigned long start_eip,
 				 unsigned long start_esp);
 #endif
-};
+} __no_const __no_randomize_layout;
 
 struct pv_mmu_ops {
 	unsigned long (*read_cr2)(void);
@@ -313,6 +313,7 @@ struct pv_mmu_ops {
 	struct paravirt_callee_save make_pud;
 
 	void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
+	void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
 #endif	/* PAGETABLE_LEVELS == 4 */
 #endif	/* PAGETABLE_LEVELS >= 3 */
 
@@ -324,7 +325,13 @@ struct pv_mmu_ops {
 	   an mfn.  We can tell which is which from the index. */
 	void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
 			   phys_addr_t phys, pgprot_t flags);
-};
+
+#ifdef CONFIG_PAX_KERNEXEC
+	unsigned long (*pax_open_kernel)(void);
+	unsigned long (*pax_close_kernel)(void);
+#endif
+
+} __no_randomize_layout;
 
 struct arch_spinlock;
 #ifdef CONFIG_SMP
@@ -336,11 +343,14 @@ typedef u16 __ticket_t;
 struct pv_lock_ops {
 	struct paravirt_callee_save lock_spinning;
 	void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket);
-};
+} __no_randomize_layout;
 
 /* This contains all the paravirt structures: we get a convenient
  * number for each function using the offset which we use to indicate
- * what to patch. */
+ * what to patch.
+ * shouldn't be randomized due to the "NEAT TRICK" in paravirt.c
+ */
+
 struct paravirt_patch_template {
 	struct pv_init_ops pv_init_ops;
 	struct pv_time_ops pv_time_ops;
@@ -349,7 +359,7 @@ struct paravirt_patch_template {
 	struct pv_apic_ops pv_apic_ops;
 	struct pv_mmu_ops pv_mmu_ops;
 	struct pv_lock_ops pv_lock_ops;
-};
+} __no_randomize_layout;
 
 extern struct pv_info pv_info;
 extern struct pv_init_ops pv_init_ops;
diff -ruNp linux-3.13.11/arch/x86/include/asm/pgalloc.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/pgalloc.h
--- linux-3.13.11/arch/x86/include/asm/pgalloc.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/pgalloc.h	2014-07-09
12:00:15.000000000 +0200
@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
 				       pmd_t *pmd, pte_t *pte)
 {
 	paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
+	set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
+}
+
+static inline void pmd_populate_user(struct mm_struct *mm,
+				       pmd_t *pmd, pte_t *pte)
+{
+	paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
 	set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
 }
 
@@ -108,12 +115,22 @@ static inline void __pmd_free_tlb(struct
 
 #ifdef CONFIG_X86_PAE
 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
+{
+	pud_populate(mm, pudp, pmd);
+}
 #else	/* !CONFIG_X86_PAE */
 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
 {
 	paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
 	set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
 }
+
+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+{
+	paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
+	set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
+}
 #endif	/* CONFIG_X86_PAE */
 
 #if PAGETABLE_LEVELS > 3
@@ -123,6 +140,12 @@ static inline void pgd_populate(struct m
 	set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
 }
 
+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
+{
+	paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
+	set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
+}
+
 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
 	return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
diff -ruNp linux-3.13.11/arch/x86/include/asm/pgtable-2level.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/pgtable-2level.h
--- linux-3.13.11/arch/x86/include/asm/pgtable-2level.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/pgtable-2level.h	2014-07-09
12:00:15.000000000 +0200
@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
 
 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
 {
+	pax_open_kernel();
 	*pmdp = pmd;
+	pax_close_kernel();
 }
 
 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
diff -ruNp linux-3.13.11/arch/x86/include/asm/pgtable-3level.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/pgtable-3level.h
--- linux-3.13.11/arch/x86/include/asm/pgtable-3level.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/pgtable-3level.h	2014-07-09
12:00:15.000000000 +0200
@@ -92,12 +92,16 @@ static inline void native_set_pte_atomic
 
 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
 {
+	pax_open_kernel();
 	set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
+	pax_close_kernel();
 }
 
 static inline void native_set_pud(pud_t *pudp, pud_t pud)
 {
+	pax_open_kernel();
 	set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
+	pax_close_kernel();
 }
 
 /*
diff -ruNp linux-3.13.11/arch/x86/include/asm/pgtable.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/pgtable.h
--- linux-3.13.11/arch/x86/include/asm/pgtable.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/pgtable.h	2014-07-09
12:00:15.000000000 +0200
@@ -45,6 +45,7 @@ extern struct mm_struct *pgd_page_get_mm
 
 #ifndef __PAGETABLE_PUD_FOLDED
 #define set_pgd(pgdp, pgd)		native_set_pgd(pgdp, pgd)
+#define set_pgd_batched(pgdp, pgd)	native_set_pgd_batched(pgdp, pgd)
 #define pgd_clear(pgd)			native_pgd_clear(pgd)
 #endif
 
@@ -82,12 +83,51 @@ extern struct mm_struct *pgd_page_get_mm
 
 #define arch_end_context_switch(prev)	do {} while(0)
 
+#define pax_open_kernel()	native_pax_open_kernel()
+#define pax_close_kernel()	native_pax_close_kernel()
 #endif	/* CONFIG_PARAVIRT */
 
+#define  __HAVE_ARCH_PAX_OPEN_KERNEL
+#define  __HAVE_ARCH_PAX_CLOSE_KERNEL
+
+#ifdef CONFIG_PAX_KERNEXEC
+static inline unsigned long native_pax_open_kernel(void)
+{
+	unsigned long cr0;
+
+	preempt_disable();
+	barrier();
+	cr0 = read_cr0() ^ X86_CR0_WP;
+	BUG_ON(cr0 & X86_CR0_WP);
+	write_cr0(cr0);
+	return cr0 ^ X86_CR0_WP;
+}
+
+static inline unsigned long native_pax_close_kernel(void)
+{
+	unsigned long cr0;
+
+	cr0 = read_cr0() ^ X86_CR0_WP;
+	BUG_ON(!(cr0 & X86_CR0_WP));
+	write_cr0(cr0);
+	barrier();
+	preempt_enable_no_resched();
+	return cr0 ^ X86_CR0_WP;
+}
+#else
+static inline unsigned long native_pax_open_kernel(void) { return 0; }
+static inline unsigned long native_pax_close_kernel(void) { return 0; }
+#endif
+
 /*
  * The following only work if pte_present() is true.
  * Undefined behaviour if not..
  */
+static inline int pte_user(pte_t pte)
+{
+	return pte_val(pte) & _PAGE_USER;
+}
+
 static inline int pte_dirty(pte_t pte)
 {
 	return pte_flags(pte) & _PAGE_DIRTY;
@@ -148,6 +188,11 @@ static inline unsigned long pud_pfn(pud_
 	return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
 }
 
+static inline unsigned long pgd_pfn(pgd_t pgd)
+{
+	return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
+}
+
 #define pte_page(pte)	pfn_to_page(pte_pfn(pte))
 
 static inline int pmd_large(pmd_t pte)
@@ -201,9 +246,29 @@ static inline pte_t pte_wrprotect(pte_t
 	return pte_clear_flags(pte, _PAGE_RW);
 }
 
+static inline pte_t pte_mkread(pte_t pte)
+{
+	return __pte(pte_val(pte) | _PAGE_USER);
+}
+
 static inline pte_t pte_mkexec(pte_t pte)
 {
-	return pte_clear_flags(pte, _PAGE_NX);
+#ifdef CONFIG_X86_PAE
+	if (__supported_pte_mask & _PAGE_NX)
+		return pte_clear_flags(pte, _PAGE_NX);
+	else
+#endif
+		return pte_set_flags(pte, _PAGE_USER);
+}
+
+static inline pte_t pte_exprotect(pte_t pte)
+{
+#ifdef CONFIG_X86_PAE
+	if (__supported_pte_mask & _PAGE_NX)
+		return pte_set_flags(pte, _PAGE_NX);
+	else
+#endif
+		return pte_clear_flags(pte, _PAGE_USER);
 }
 
 static inline pte_t pte_mkdirty(pte_t pte)
@@ -430,6 +495,16 @@ pte_t *populate_extra_pte(unsigned long
 #endif
 
 #ifndef __ASSEMBLY__
+
+#ifdef CONFIG_PAX_PER_CPU_PGD
+extern pgd_t cpu_pgd[NR_CPUS][2][PTRS_PER_PGD];
+enum cpu_pgd_type {kernel = 0, user = 1};
+static inline pgd_t *get_cpu_pgd(unsigned int cpu, enum cpu_pgd_type type)
+{
+	return cpu_pgd[cpu][type];
+}
+#endif
+
 #include <linux/mm_types.h>
 #include <linux/mmdebug.h>
 #include <linux/log2.h>
@@ -570,7 +645,7 @@ static inline unsigned long pud_page_vad
  * Currently stuck as a macro due to indirect forward reference to
  * linux/mmzone.h's __section_mem_map_addr() definition:
  */
-#define pud_page(pud)		pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
+#define pud_page(pud)		pfn_to_page((pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT)
 
 /* Find an entry in the second-level page table.. */
 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
@@ -610,7 +685,7 @@ static inline unsigned long pgd_page_vad
  * Currently stuck as a macro due to indirect forward reference to
  * linux/mmzone.h's __section_mem_map_addr() definition:
  */
-#define pgd_page(pgd)		pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
+#define pgd_page(pgd)		pfn_to_page((pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT)
 
 /* to find an entry in a page-table-directory. */
 static inline unsigned long pud_index(unsigned long address)
@@ -625,7 +700,7 @@ static inline pud_t *pud_offset(pgd_t *p
 
 static inline int pgd_bad(pgd_t pgd)
 {
-	return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
+	return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
 }
 
 static inline int pgd_none(pgd_t pgd)
@@ -648,7 +723,12 @@ static inline int pgd_none(pgd_t pgd)
  * pgd_offset() returns a (pgd_t *)
  * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
  */
-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
+
+#ifdef CONFIG_PAX_PER_CPU_PGD
+#define pgd_offset_cpu(cpu, type, address) (get_cpu_pgd(cpu, type) + pgd_index(address))
+#endif
+
 /*
  * a shortcut which implies the use of the kernel's pgd, instead
  * of a process's
@@ -659,6 +739,23 @@ static inline int pgd_none(pgd_t pgd)
 #define KERNEL_PGD_BOUNDARY	pgd_index(PAGE_OFFSET)
 #define KERNEL_PGD_PTRS		(PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
 
+#ifdef CONFIG_X86_32
+#define USER_PGD_PTRS		KERNEL_PGD_BOUNDARY
+#else
+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
+#define USER_PGD_PTRS		(_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+#ifdef __ASSEMBLY__
+#define pax_user_shadow_base	pax_user_shadow_base(%rip)
+#else
+extern unsigned long pax_user_shadow_base;
+extern pgdval_t clone_pgd_mask;
+#endif
+#endif
+
+#endif
+
 #ifndef __ASSEMBLY__
 
 extern int direct_gbpages;
@@ -825,11 +922,24 @@ static inline void pmdp_set_wrprotect(st
  * dst and src can be on the same page, but the range must not overlap,
  * and must not cross a page boundary.
  */
-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
 {
-       memcpy(dst, src, count * sizeof(pgd_t));
+	pax_open_kernel();
+	while (count--)
+		*dst++ = *src++;
+	pax_close_kernel();
 }
 
+#ifdef CONFIG_PAX_PER_CPU_PGD
+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
+#endif
+
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
+#else
+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
+#endif
+
 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
 static inline int page_level_shift(enum pg_level level)
 {
diff -ruNp linux-3.13.11/arch/x86/include/asm/pgtable_32.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/pgtable_32.h
--- linux-3.13.11/arch/x86/include/asm/pgtable_32.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/pgtable_32.h	2014-07-09
12:00:15.000000000 +0200
@@ -25,9 +25,6 @@
 struct mm_struct;
 struct vm_area_struct;
 
-extern pgd_t swapper_pg_dir[1024];
-extern pgd_t initial_page_table[1024];
-
 static inline void pgtable_cache_init(void) { }
 static inline void check_pgt_cache(void) { }
 void paging_init(void);
@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, u
 # include <asm/pgtable-2level.h>
 #endif
 
+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
+extern pgd_t initial_page_table[PTRS_PER_PGD];
+#ifdef CONFIG_X86_PAE
+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
+#endif
+
 #if defined(CONFIG_HIGHPTE)
 #define pte_offset_map(dir, address)					\
 	((pte_t *)kmap_atomic(pmd_page(*(dir))) +		\
@@ -62,12 +65,17 @@ extern void set_pmd_pfn(unsigned long, u
 /* Clear a kernel PTE and flush it from the TLB */
 #define kpte_clear_flush(ptep, vaddr)		\
 do {						\
+	pax_open_kernel();			\
 	pte_clear(&init_mm, (vaddr), (ptep));	\
+	pax_close_kernel();			\
 	__flush_tlb_one((vaddr));		\
 } while (0)
 
 #endif /* !__ASSEMBLY__ */
 
+#define HAVE_ARCH_UNMAPPED_AREA
+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
+
 /*
  * kern_addr_valid() is (1) for FLATMEM and (0) for
  * SPARSEMEM and DISCONTIGMEM
diff -ruNp linux-3.13.11/arch/x86/include/asm/pgtable_32_types.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/pgtable_32_types.h
--- linux-3.13.11/arch/x86/include/asm/pgtable_32_types.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/pgtable_32_types.h	2014-07-09
12:00:15.000000000 +0200
@@ -8,7 +8,7 @@
  */
 #ifdef CONFIG_X86_PAE
 # include <asm/pgtable-3level_types.h>
-# define PMD_SIZE	(1UL << PMD_SHIFT)
+# define PMD_SIZE	(_AC(1, UL) << PMD_SHIFT)
 # define PMD_MASK	(~(PMD_SIZE - 1))
 #else
 # include <asm/pgtable-2level_types.h>
@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
 # define VMALLOC_END	(FIXADDR_START - 2 * PAGE_SIZE)
 #endif
 
+#ifdef CONFIG_PAX_KERNEXEC
+#ifndef __ASSEMBLY__
+extern unsigned char MODULES_EXEC_VADDR[];
+extern unsigned char MODULES_EXEC_END[];
+#endif
+#include <asm/boot.h>
+#define ktla_ktva(addr)		(addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
+#define ktva_ktla(addr)		(addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
+#else
+#define ktla_ktva(addr)		(addr)
+#define ktva_ktla(addr)		(addr)
+#endif
+
 #define MODULES_VADDR	VMALLOC_START
 #define MODULES_END	VMALLOC_END
 #define MODULES_LEN	(MODULES_VADDR - MODULES_END)
diff -ruNp linux-3.13.11/arch/x86/include/asm/pgtable_64.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/pgtable_64.h
--- linux-3.13.11/arch/x86/include/asm/pgtable_64.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/pgtable_64.h	2014-07-09
12:00:15.000000000 +0200
@@ -16,10 +16,14 @@
 
 extern pud_t level3_kernel_pgt[512];
 extern pud_t level3_ident_pgt[512];
+extern pud_t level3_vmalloc_start_pgt[512];
+extern pud_t level3_vmalloc_end_pgt[512];
+extern pud_t level3_vmemmap_pgt[512];
+extern pud_t level2_vmemmap_pgt[512];
 extern pmd_t level2_kernel_pgt[512];
 extern pmd_t level2_fixmap_pgt[512];
-extern pmd_t level2_ident_pgt[512];
-extern pgd_t init_level4_pgt[];
+extern pmd_t level2_ident_pgt[512*2];
+extern pgd_t init_level4_pgt[512];
 
 #define swapper_pg_dir init_level4_pgt
 
@@ -61,7 +65,9 @@ static inline void native_set_pte_atomic
 
 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
 {
+	pax_open_kernel();
 	*pmdp = pmd;
+	pax_close_kernel();
 }
 
 static inline void native_pmd_clear(pmd_t *pmd)
@@ -97,7 +103,9 @@ static inline pmd_t native_pmdp_get_and_
 
 static inline void native_set_pud(pud_t *pudp, pud_t pud)
 {
+	pax_open_kernel();
 	*pudp = pud;
+	pax_close_kernel();
 }
 
 static inline void native_pud_clear(pud_t *pud)
@@ -107,6 +115,13 @@ static inline void native_pud_clear(pud_
 
 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
 {
+	pax_open_kernel();
+	*pgdp = pgd;
+	pax_close_kernel();
+}
+
+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
+{
 	*pgdp = pgd;
 }
 
diff -ruNp linux-3.13.11/arch/x86/include/asm/pgtable_64_types.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/pgtable_64_types.h
--- linux-3.13.11/arch/x86/include/asm/pgtable_64_types.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/pgtable_64_types.h	2014-07-09
12:00:15.000000000 +0200
@@ -61,6 +61,11 @@ typedef struct { pteval_t pte; } pte_t;
 #define MODULES_VADDR    _AC(0xffffffffa0000000, UL)
 #define MODULES_END      _AC(0xffffffffff000000, UL)
 #define MODULES_LEN   (MODULES_END - MODULES_VADDR)
+#define MODULES_EXEC_VADDR MODULES_VADDR
+#define MODULES_EXEC_END MODULES_END
+
+#define ktla_ktva(addr)		(addr)
+#define ktva_ktla(addr)		(addr)
 
 #define EARLY_DYNAMIC_PAGE_TABLES	64
 
diff -ruNp linux-3.13.11/arch/x86/include/asm/pgtable_types.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/pgtable_types.h
--- linux-3.13.11/arch/x86/include/asm/pgtable_types.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/pgtable_types.h	2014-07-09
12:00:15.000000000 +0200
@@ -16,13 +16,12 @@
 #define _PAGE_BIT_PSE		7	/* 4 MB (or 2MB) page */
 #define _PAGE_BIT_PAT		7	/* on 4KB pages */
 #define _PAGE_BIT_GLOBAL	8	/* Global TLB entry PPro+ */
-#define _PAGE_BIT_UNUSED1	9	/* available for programmer */
+#define _PAGE_BIT_SPECIAL	9	/* special mappings, no associated struct page */
 #define _PAGE_BIT_IOMAP		10	/* flag used to indicate IO mapping */
 #define _PAGE_BIT_HIDDEN	11	/* hidden by kmemcheck */
 #define _PAGE_BIT_PAT_LARGE	12	/* On 2MB or 1GB pages */
-#define _PAGE_BIT_SPECIAL	_PAGE_BIT_UNUSED1
-#define _PAGE_BIT_CPA_TEST	_PAGE_BIT_UNUSED1
-#define _PAGE_BIT_SPLITTING	_PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
+#define _PAGE_BIT_CPA_TEST	_PAGE_BIT_SPECIAL
+#define _PAGE_BIT_SPLITTING	_PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
 #define _PAGE_BIT_NX           63       /* No execute: only valid after cpuid check
*/
 
 /* If _PAGE_BIT_PRESENT is clear, we use these: */
@@ -40,7 +39,6 @@
 #define _PAGE_DIRTY	(_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
 #define _PAGE_PSE	(_AT(pteval_t, 1) << _PAGE_BIT_PSE)
 #define _PAGE_GLOBAL	(_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
-#define _PAGE_UNUSED1	(_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
 #define _PAGE_IOMAP	(_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
 #define _PAGE_PAT	(_AT(pteval_t, 1) << _PAGE_BIT_PAT)
 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
@@ -87,8 +85,10 @@
 
 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
 #define _PAGE_NX	(_AT(pteval_t, 1) << _PAGE_BIT_NX)
-#else
+#elif defined(CONFIG_KMEMCHECK) || defined(CONFIG_MEM_SOFT_DIRTY)
 #define _PAGE_NX	(_AT(pteval_t, 0))
+#else
+#define _PAGE_NX	(_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
 #endif
 
 #define _PAGE_FILE	(_AT(pteval_t, 1) << _PAGE_BIT_FILE)
@@ -147,6 +147,9 @@
 #define PAGE_READONLY_EXEC	__pgprot(_PAGE_PRESENT | _PAGE_USER |	\
 					 _PAGE_ACCESSED)
 
+#define PAGE_READONLY_NOEXEC PAGE_READONLY
+#define PAGE_SHARED_NOEXEC PAGE_SHARED
+
 #define __PAGE_KERNEL_EXEC						\
 	(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
 #define __PAGE_KERNEL		(__PAGE_KERNEL_EXEC | _PAGE_NX)
@@ -157,7 +160,7 @@
 #define __PAGE_KERNEL_WC		(__PAGE_KERNEL | _PAGE_CACHE_WC)
 #define __PAGE_KERNEL_NOCACHE		(__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
 #define __PAGE_KERNEL_UC_MINUS		(__PAGE_KERNEL | _PAGE_PCD)
-#define __PAGE_KERNEL_VSYSCALL		(__PAGE_KERNEL_RX | _PAGE_USER)
+#define __PAGE_KERNEL_VSYSCALL		(__PAGE_KERNEL_RO | _PAGE_USER)
 #define __PAGE_KERNEL_VVAR		(__PAGE_KERNEL_RO | _PAGE_USER)
 #define __PAGE_KERNEL_VVAR_NOCACHE	(__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
 #define __PAGE_KERNEL_LARGE		(__PAGE_KERNEL | _PAGE_PSE)
@@ -219,8 +222,8 @@
  * bits are combined, this will alow user to access the high address mapped
  * VDSO in the presence of CONFIG_COMPAT_VDSO
  */
-#define PTE_IDENT_ATTR	 0x003		/* PRESENT+RW */
-#define PDE_IDENT_ATTR	 0x067		/* PRESENT+RW+USER+DIRTY+ACCESSED */
+#define PTE_IDENT_ATTR	 0x063		/* PRESENT+RW+DIRTY+ACCESSED */
+#define PDE_IDENT_ATTR	 0x063		/* PRESENT+RW+DIRTY+ACCESSED */
 #define PGD_IDENT_ATTR	 0x001		/* PRESENT (no other attributes) */
 #endif
 
@@ -258,7 +261,17 @@ static inline pgdval_t pgd_flags(pgd_t p
 {
 	return native_pgd_val(pgd) & PTE_FLAGS_MASK;
 }
+#endif
 
+#if PAGETABLE_LEVELS == 3
+#include <asm-generic/pgtable-nopud.h>
+#endif
+
+#if PAGETABLE_LEVELS == 2
+#include <asm-generic/pgtable-nopmd.h>
+#endif
+
+#ifndef __ASSEMBLY__
 #if PAGETABLE_LEVELS > 3
 typedef struct { pudval_t pud; } pud_t;
 
@@ -272,8 +285,6 @@ static inline pudval_t native_pud_val(pu
 	return pud.pud;
 }
 #else
-#include <asm-generic/pgtable-nopud.h>
-
 static inline pudval_t native_pud_val(pud_t pud)
 {
 	return native_pgd_val(pud.pgd);
@@ -293,8 +304,6 @@ static inline pmdval_t native_pmd_val(pm
 	return pmd.pmd;
 }
 #else
-#include <asm-generic/pgtable-nopmd.h>
-
 static inline pmdval_t native_pmd_val(pmd_t pmd)
 {
 	return native_pgd_val(pmd.pud.pgd);
@@ -334,7 +343,6 @@ typedef struct page *pgtable_t;
 
 extern pteval_t __supported_pte_mask;
 extern void set_nx(void);
-extern int nx_enabled;
 
 #define pgprot_writecombine	pgprot_writecombine
 extern pgprot_t pgprot_writecombine(pgprot_t prot);
diff -ruNp linux-3.13.11/arch/x86/include/asm/preempt.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/preempt.h
--- linux-3.13.11/arch/x86/include/asm/preempt.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/preempt.h	2014-07-09
12:00:15.000000000 +0200
@@ -87,7 +87,7 @@ static __always_inline void __preempt_co
  */
 static __always_inline bool __preempt_count_dec_and_test(void)
 {
-	GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
+	GEN_UNARY_RMWcc("decl", "incl", __preempt_count, __percpu_arg(0), "e");
 }
 
 /*
diff -ruNp linux-3.13.11/arch/x86/include/asm/processor.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/processor.h
--- linux-3.13.11/arch/x86/include/asm/processor.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/processor.h	2014-07-09
12:00:15.000000000 +0200
@@ -128,7 +128,7 @@ struct cpuinfo_x86 {
 	/* Index into per_cpu list: */
 	u16			cpu_index;
 	u32			microcode;
-} __attribute__((__aligned__(SMP_CACHE_BYTES)));
+} __attribute__((__aligned__(SMP_CACHE_BYTES))) __randomize_layout;
 
 #define X86_VENDOR_INTEL	0
 #define X86_VENDOR_CYRIX	1
@@ -199,9 +199,21 @@ static inline void native_cpuid(unsigned
 	    : "memory");
 }
 
+/* invpcid (%rdx),%rax */
+#define __ASM_INVPCID ".byte 0x66,0x0f,0x38,0x82,0x02"
+
+#define INVPCID_SINGLE_ADDRESS	0UL
+#define INVPCID_SINGLE_CONTEXT	1UL
+#define INVPCID_ALL_GLOBAL	2UL
+#define INVPCID_ALL_MONGLOBAL	3UL
+
+#define PCID_KERNEL		0UL
+#define PCID_USER		1UL
+#define PCID_NOFLUSH		(1UL << 63)
+
 static inline void load_cr3(pgd_t *pgdir)
 {
-	write_cr3(__pa(pgdir));
+	write_cr3(__pa(pgdir) | PCID_KERNEL);
 }
 
 #ifdef CONFIG_X86_32
@@ -283,7 +295,7 @@ struct tss_struct {
 
 } ____cacheline_aligned;
 
-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
+extern struct tss_struct init_tss[NR_CPUS];
 
 /*
  * Save the original ist values for checking stack pointers during debugging
@@ -453,6 +465,7 @@ struct thread_struct {
 	unsigned short		ds;
 	unsigned short		fsindex;
 	unsigned short		gsindex;
+	unsigned short		ss;
 #endif
 #ifdef CONFIG_X86_32
 	unsigned long		ip;
@@ -562,29 +575,8 @@ static inline void load_sp0(struct tss_s
 extern unsigned long mmu_cr4_features;
 extern u32 *trampoline_cr4_features;
 
-static inline void set_in_cr4(unsigned long mask)
-{
-	unsigned long cr4;
-
-	mmu_cr4_features |= mask;
-	if (trampoline_cr4_features)
-		*trampoline_cr4_features = mmu_cr4_features;
-	cr4 = read_cr4();
-	cr4 |= mask;
-	write_cr4(cr4);
-}
-
-static inline void clear_in_cr4(unsigned long mask)
-{
-	unsigned long cr4;
-
-	mmu_cr4_features &= ~mask;
-	if (trampoline_cr4_features)
-		*trampoline_cr4_features = mmu_cr4_features;
-	cr4 = read_cr4();
-	cr4 &= ~mask;
-	write_cr4(cr4);
-}
+extern void set_in_cr4(unsigned long mask);
+extern void clear_in_cr4(unsigned long mask);
 
 typedef struct {
 	unsigned long		seg;
@@ -833,11 +825,18 @@ static inline void spin_lock_prefetch(co
  */
 #define TASK_SIZE		PAGE_OFFSET
 #define TASK_SIZE_MAX		TASK_SIZE
+
+#ifdef CONFIG_PAX_SEGMEXEC
+#define SEGMEXEC_TASK_SIZE	(TASK_SIZE / 2)
+#define STACK_TOP		((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
+#else
 #define STACK_TOP		TASK_SIZE
-#define STACK_TOP_MAX		STACK_TOP
+#endif
+
+#define STACK_TOP_MAX		TASK_SIZE
 
 #define INIT_THREAD  {							  \
-	.sp0			= sizeof(init_stack) + (long)&init_stack, \
+	.sp0			= sizeof(init_stack) + (long)&init_stack - 8, \
 	.vm86_info		= NULL,					  \
 	.sysenter_cs		= __KERNEL_CS,				  \
 	.io_bitmap_ptr		= NULL,					  \
@@ -851,7 +850,7 @@ static inline void spin_lock_prefetch(co
  */
 #define INIT_TSS  {							  \
 	.x86_tss = {							  \
-		.sp0		= sizeof(init_stack) + (long)&init_stack, \
+		.sp0		= sizeof(init_stack) + (long)&init_stack - 8, \
 		.ss0		= __KERNEL_DS,				  \
 		.ss1		= __KERNEL_CS,				  \
 		.io_bitmap_base	= INVALID_IO_BITMAP_OFFSET,		  \
@@ -862,11 +861,7 @@ static inline void spin_lock_prefetch(co
 extern unsigned long thread_saved_pc(struct task_struct *tsk);
 
 #define THREAD_SIZE_LONGS      (THREAD_SIZE/sizeof(unsigned long))
-#define KSTK_TOP(info)                                                 \
-({                                                                     \
-       unsigned long *__ptr = (unsigned long *)(info);                 \
-       (unsigned long)(&__ptr[THREAD_SIZE_LONGS]);                     \
-})
+#define KSTK_TOP(info)         ((container_of(info, struct task_struct, tinfo))->thread.sp0)
 
 /*
  * The below -8 is to reserve 8 bytes on top of the ring0 stack.
@@ -881,7 +876,7 @@ extern unsigned long thread_saved_pc(str
 #define task_pt_regs(task)                                             \
 ({                                                                     \
        struct pt_regs *__regs__;                                       \
-       __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
+       __regs__ = (struct pt_regs *)((task)->thread.sp0);              \
        __regs__ - 1;                                                   \
 })
 
@@ -891,13 +886,13 @@ extern unsigned long thread_saved_pc(str
 /*
  * User space process size. 47bits minus one guard page.
  */
-#define TASK_SIZE_MAX	((1UL << 47) - PAGE_SIZE)
+#define TASK_SIZE_MAX	((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
 
 /* This decides where the kernel will search for a free chunk of vm
  * space during mmap's.
  */
 #define IA32_PAGE_OFFSET	((current->personality & ADDR_LIMIT_3GB) ? \
-					0xc0000000 : 0xFFFFe000)
+					0xc0000000 : 0xFFFFf000)
 
 #define TASK_SIZE		(test_thread_flag(TIF_ADDR32) ? \
 					IA32_PAGE_OFFSET : TASK_SIZE_MAX)
@@ -908,11 +903,11 @@ extern unsigned long thread_saved_pc(str
 #define STACK_TOP_MAX		TASK_SIZE_MAX
 
 #define INIT_THREAD  { \
-	.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
+	.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
 }
 
 #define INIT_TSS  { \
-	.x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
+	.x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
 }
 
 /*
@@ -940,6 +935,10 @@ extern void start_thread(struct pt_regs
  */
 #define TASK_UNMAPPED_BASE	(PAGE_ALIGN(TASK_SIZE / 3))
 
+#ifdef CONFIG_PAX_SEGMEXEC
+#define SEGMEXEC_TASK_UNMAPPED_BASE	(PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
+#endif
+
 #define KSTK_EIP(task)		(task_pt_regs(task)->ip)
 
 /* Get/set a process' ability to use the timestamp counter instruction */
@@ -966,7 +965,7 @@ static inline uint32_t hypervisor_cpuid_
 	return 0;
 }
 
-extern unsigned long arch_align_stack(unsigned long sp);
+#define arch_align_stack(x) ((x) & ~0xfUL)
 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
 
 void default_idle(void);
@@ -976,6 +975,6 @@ bool xen_set_default_idle(void);
 #define xen_set_default_idle 0
 #endif
 
-void stop_this_cpu(void *dummy);
+void stop_this_cpu(void *dummy) __noreturn;
 void df_debug(struct pt_regs *regs, long error_code);
 #endif /* _ASM_X86_PROCESSOR_H */
diff -ruNp linux-3.13.11/arch/x86/include/asm/ptrace.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/ptrace.h
--- linux-3.13.11/arch/x86/include/asm/ptrace.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/ptrace.h	2014-07-09
12:00:15.000000000 +0200
@@ -85,28 +85,29 @@ static inline unsigned long regs_return_
 }
 
 /*
- * user_mode_vm(regs) determines whether a register set came from user mode.
+ * user_mode(regs) determines whether a register set came from user mode.
  * This is true if V8086 mode was enabled OR if the register set was from
  * protected mode with RPL-3 CS value.  This tricky test checks that with
  * one comparison.  Many places in the kernel can bypass this full check
- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
+ * be used.
  */
-static inline int user_mode(struct pt_regs *regs)
+static inline int user_mode_novm(struct pt_regs *regs)
 {
 #ifdef CONFIG_X86_32
 	return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
 #else
-	return !!(regs->cs & 3);
+	return !!(regs->cs & SEGMENT_RPL_MASK);
 #endif
 }
 
-static inline int user_mode_vm(struct pt_regs *regs)
+static inline int user_mode(struct pt_regs *regs)
 {
 #ifdef CONFIG_X86_32
 	return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
 		USER_RPL;
 #else
-	return user_mode(regs);
+	return user_mode_novm(regs);
 #endif
 }
 
@@ -122,15 +123,16 @@ static inline int v8086_mode(struct pt_r
 #ifdef CONFIG_X86_64
 static inline bool user_64bit_mode(struct pt_regs *regs)
 {
+	unsigned long cs = regs->cs & 0xffff;
 #ifndef CONFIG_PARAVIRT
 	/*
 	 * On non-paravirt systems, this is the only long mode CPL 3
 	 * selector.  We do not allow long mode selectors in the LDT.
 	 */
-	return regs->cs == __USER_CS;
+	return cs == __USER_CS;
 #else
 	/* Headers are too twisted for this to go in paravirt.h. */
-	return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
+	return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
 #endif
 }
 
@@ -181,9 +183,11 @@ static inline unsigned long regs_get_reg
 	 * Traps from the kernel do not save sp and ss.
 	 * Use the helper function to retrieve sp.
 	 */
-	if (offset == offsetof(struct pt_regs, sp) &&
-	    regs->cs == __KERNEL_CS)
-		return kernel_stack_pointer(regs);
+	if (offset == offsetof(struct pt_regs, sp)) {
+		unsigned long cs = regs->cs & 0xffff;
+	 	if (cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS)
+			return kernel_stack_pointer(regs);
+	}
 #endif
 	return *(unsigned long *)((unsigned long)regs + offset);
 }
diff -ruNp linux-3.13.11/arch/x86/include/asm/realmode.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/realmode.h
--- linux-3.13.11/arch/x86/include/asm/realmode.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/realmode.h	2014-07-09
12:00:15.000000000 +0200
@@ -22,16 +22,14 @@ struct real_mode_header {
 #endif
 	/* APM/BIOS reboot */
 	u32	machine_real_restart_asm;
-#ifdef CONFIG_X86_64
 	u32	machine_real_restart_seg;
-#endif
 };
 
 /* This must match data at trampoline_32/64.S */
 struct trampoline_header {
 #ifdef CONFIG_X86_32
 	u32 start;
-	u16 gdt_pad;
+	u16 boot_cs;
 	u16 gdt_limit;
 	u32 gdt_base;
 #else
diff -ruNp linux-3.13.11/arch/x86/include/asm/reboot.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/reboot.h
--- linux-3.13.11/arch/x86/include/asm/reboot.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/reboot.h	2014-07-09
12:00:15.000000000 +0200
@@ -6,13 +6,13 @@
 struct pt_regs;
 
 struct machine_ops {
-	void (*restart)(char *cmd);
-	void (*halt)(void);
-	void (*power_off)(void);
+	void (* __noreturn restart)(char *cmd);
+	void (* __noreturn halt)(void);
+	void (* __noreturn power_off)(void);
 	void (*shutdown)(void);
 	void (*crash_shutdown)(struct pt_regs *);
-	void (*emergency_restart)(void);
-};
+	void (* __noreturn emergency_restart)(void);
+} __no_const;
 
 extern struct machine_ops machine_ops;
 
diff -ruNp linux-3.13.11/arch/x86/include/asm/rmwcc.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/rmwcc.h
--- linux-3.13.11/arch/x86/include/asm/rmwcc.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/rmwcc.h	2014-07-09
12:00:15.000000000 +0200
@@ -3,7 +3,34 @@
 
 #ifdef CC_HAVE_ASM_GOTO
 
-#define __GEN_RMWcc(fullop, var, cc, ...)				\
+#ifdef CONFIG_PAX_REFCOUNT
+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...)			\
+do {									\
+	asm_volatile_goto (fullop					\
+			";jno 0f\n"					\
+			fullantiop					\
+			";int $4\n0:\n"					\
+			_ASM_EXTABLE(0b, 0b)				\
+			 ";j" cc " %l[cc_label]"			\
+			: : "m" (var), ## __VA_ARGS__ 			\
+			: "memory" : cc_label);				\
+	return 0;							\
+cc_label:								\
+	return 1;							\
+} while (0)
+#else
+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...)			\
+do {									\
+	asm_volatile_goto (fullop ";j" cc " %l[cc_label]"		\
+			: : "m" (var), ## __VA_ARGS__ 			\
+			: "memory" : cc_label);				\
+	return 0;							\
+cc_label:								\
+	return 1;							\
+} while (0)
+#endif
+
+#define __GEN_RMWcc_unchecked(fullop, var, cc, ...)			\
 do {									\
 	asm_volatile_goto (fullop "; j" cc " %l[cc_label]"		\
 			: : "m" (var), ## __VA_ARGS__ 			\
@@ -13,15 +40,46 @@ cc_label:								\
 	return 1;							\
 } while (0)
 
-#define GEN_UNARY_RMWcc(op, var, arg0, cc) 				\
-	__GEN_RMWcc(op " " arg0, var, cc)
+#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) 			\
+	__GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
+
+#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) 			\
+	__GEN_RMWcc_unchecked(op " " arg0, var, cc)
+
+#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc)		\
+	__GEN_RMWcc(op " %1, " arg0, antiop " %1, " arg0, var, cc, vcon (val))
 
-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc)			\
-	__GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val))
+#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc)	\
+	__GEN_RMWcc_unchecked(op " %1, " arg0, var, cc, vcon (val))
 
 #else /* !CC_HAVE_ASM_GOTO */
 
-#define __GEN_RMWcc(fullop, var, cc, ...)				\
+#ifdef CONFIG_PAX_REFCOUNT
+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...)			\
+do {									\
+	char c;								\
+	asm volatile (fullop 						\
+			";jno 0f\n"					\
+			fullantiop					\
+			";int $4\n0:\n"					\
+			_ASM_EXTABLE(0b, 0b)				\
+			"; set" cc " %1"				\
+			: "+m" (var), "=qm" (c)				\
+			: __VA_ARGS__ : "memory");			\
+	return c != 0;							\
+} while (0)
+#else
+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...)			\
+do {									\
+	char c;								\
+	asm volatile (fullop "; set" cc " %1"				\
+			: "+m" (var), "=qm" (c)				\
+			: __VA_ARGS__ : "memory");			\
+	return c != 0;							\
+} while (0)
+#endif
+
+#define __GEN_RMWcc_unchecked(fullop, var, cc, ...)			\
 do {									\
 	char c;								\
 	asm volatile (fullop "; set" cc " %1"				\
@@ -30,11 +88,17 @@ do {									\
 	return c != 0;							\
 } while (0)
 
-#define GEN_UNARY_RMWcc(op, var, arg0, cc)				\
-	__GEN_RMWcc(op " " arg0, var, cc)
+#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc)			\
+	__GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
+
+#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc)			\
+	__GEN_RMWcc_unchecked(op " " arg0, var, cc)
+
+#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc)		\
+	__GEN_RMWcc(op " %2, " arg0, antiop " %2, " arg0, var, cc, vcon (val))
 
-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc)			\
-	__GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val))
+#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc)	\
+	__GEN_RMWcc_unchecked(op " %2, " arg0, var, cc, vcon (val))
 
 #endif /* CC_HAVE_ASM_GOTO */
 
diff -ruNp linux-3.13.11/arch/x86/include/asm/rwsem.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/rwsem.h
--- linux-3.13.11/arch/x86/include/asm/rwsem.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/rwsem.h	2014-07-09
12:00:15.000000000 +0200
@@ -64,6 +64,14 @@ static inline void __down_read(struct rw
 {
 	asm volatile("# beginning down_read\n\t"
 		     LOCK_PREFIX _ASM_INC "(%1)\n\t"
+
+#ifdef CONFIG_PAX_REFCOUNT
+		     "jno 0f\n"
+		     LOCK_PREFIX _ASM_DEC "(%1)\n"
+		     "int $4\n0:\n"
+		     _ASM_EXTABLE(0b, 0b)
+#endif
+
 		     /* adds 0x00000001 */
 		     "  jns        1f\n"
 		     "  call call_rwsem_down_read_failed\n"
@@ -85,6 +93,14 @@ static inline int __down_read_trylock(st
 		     "1:\n\t"
 		     "  mov          %1,%2\n\t"
 		     "  add          %3,%2\n\t"
+
+#ifdef CONFIG_PAX_REFCOUNT
+		     "jno 0f\n"
+		     "sub %3,%2\n"
+		     "int $4\n0:\n"
+		     _ASM_EXTABLE(0b, 0b)
+#endif
+
 		     "  jle	     2f\n\t"
 		     LOCK_PREFIX "  cmpxchg  %2,%0\n\t"
 		     "  jnz	     1b\n\t"
@@ -104,6 +120,14 @@ static inline void __down_write_nested(s
 	long tmp;
 	asm volatile("# beginning down_write\n\t"
 		     LOCK_PREFIX "  xadd      %1,(%2)\n\t"
+
+#ifdef CONFIG_PAX_REFCOUNT
+		     "jno 0f\n"
+		     "mov %1,(%2)\n"
+		     "int $4\n0:\n"
+		     _ASM_EXTABLE(0b, 0b)
+#endif
+
 		     /* adds 0xffff0001, returns the old value */
 		     "  test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t"
 		     /* was the active mask 0 before? */
@@ -155,6 +179,14 @@ static inline void __up_read(struct rw_s
 	long tmp;
 	asm volatile("# beginning __up_read\n\t"
 		     LOCK_PREFIX "  xadd      %1,(%2)\n\t"
+
+#ifdef CONFIG_PAX_REFCOUNT
+		     "jno 0f\n"
+		     "mov %1,(%2)\n"
+		     "int $4\n0:\n"
+		     _ASM_EXTABLE(0b, 0b)
+#endif
+
 		     /* subtracts 1, returns the old value */
 		     "  jns        1f\n\t"
 		     "  call call_rwsem_wake\n" /* expects old value in %edx */
@@ -173,6 +205,14 @@ static inline void __up_write(struct rw_
 	long tmp;
 	asm volatile("# beginning __up_write\n\t"
 		     LOCK_PREFIX "  xadd      %1,(%2)\n\t"
+
+#ifdef CONFIG_PAX_REFCOUNT
+		     "jno 0f\n"
+		     "mov %1,(%2)\n"
+		     "int $4\n0:\n"
+		     _ASM_EXTABLE(0b, 0b)
+#endif
+
 		     /* subtracts 0xffff0001, returns the old value */
 		     "  jns        1f\n\t"
 		     "  call call_rwsem_wake\n" /* expects old value in %edx */
@@ -190,6 +230,14 @@ static inline void __downgrade_write(str
 {
 	asm volatile("# beginning __downgrade_write\n\t"
 		     LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
+
+#ifdef CONFIG_PAX_REFCOUNT
+		     "jno 0f\n"
+		     LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
+		     "int $4\n0:\n"
+		     _ASM_EXTABLE(0b, 0b)
+#endif
+
 		     /*
 		      * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
 		      *     0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
@@ -208,7 +256,15 @@ static inline void __downgrade_write(str
  */
 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
 {
-	asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
+	asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+		     "jno 0f\n"
+		     LOCK_PREFIX _ASM_SUB "%1,%0\n"
+		     "int $4\n0:\n"
+		     _ASM_EXTABLE(0b, 0b)
+#endif
+
 		     : "+m" (sem->count)
 		     : "er" (delta));
 }
@@ -218,7 +274,7 @@ static inline void rwsem_atomic_add(long
  */
 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
 {
-	return delta + xadd(&sem->count, delta);
+	return delta + xadd_check_overflow(&sem->count, delta);
 }
 
 #endif /* __KERNEL__ */
diff -ruNp linux-3.13.11/arch/x86/include/asm/segment.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/segment.h
--- linux-3.13.11/arch/x86/include/asm/segment.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/segment.h	2014-07-09
12:00:15.000000000 +0200
@@ -64,10 +64,15 @@
  *  26 - ESPFIX small SS
  *  27 - per-cpu			[ offset to per-cpu data area ]
  *  28 - stack_canary-20		[ for stack protector ]
- *  29 - unused
- *  30 - unused
+ *  29 - PCI BIOS CS
+ *  30 - PCI BIOS DS
  *  31 - TSS for double fault handler
  */
+#define GDT_ENTRY_KERNEXEC_EFI_CS	(1)
+#define GDT_ENTRY_KERNEXEC_EFI_DS	(2)
+#define __KERNEXEC_EFI_CS	(GDT_ENTRY_KERNEXEC_EFI_CS*8)
+#define __KERNEXEC_EFI_DS	(GDT_ENTRY_KERNEXEC_EFI_DS*8)
+
 #define GDT_ENTRY_TLS_MIN	6
 #define GDT_ENTRY_TLS_MAX 	(GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
 
@@ -79,6 +84,8 @@
 
 #define GDT_ENTRY_KERNEL_CS		(GDT_ENTRY_KERNEL_BASE+0)
 
+#define GDT_ENTRY_KERNEXEC_KERNEL_CS	(4)
+
 #define GDT_ENTRY_KERNEL_DS		(GDT_ENTRY_KERNEL_BASE+1)
 
 #define GDT_ENTRY_TSS			(GDT_ENTRY_KERNEL_BASE+4)
@@ -104,6 +111,12 @@
 #define __KERNEL_STACK_CANARY		0
 #endif
 
+#define GDT_ENTRY_PCIBIOS_CS		(GDT_ENTRY_KERNEL_BASE+17)
+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
+
+#define GDT_ENTRY_PCIBIOS_DS		(GDT_ENTRY_KERNEL_BASE+18)
+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
+
 #define GDT_ENTRY_DOUBLEFAULT_TSS	31
 
 /*
@@ -141,7 +154,7 @@
  */
 
 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
-#define SEGMENT_IS_PNP_CODE(x)   (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
+#define SEGMENT_IS_PNP_CODE(x)   (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) ==
PNP_CS16)
 
 
 #else
@@ -165,6 +178,8 @@
 #define __USER32_CS   (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
 #define __USER32_DS	__USER_DS
 
+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
+
 #define GDT_ENTRY_TSS 8	/* needs two entries */
 #define GDT_ENTRY_LDT 10 /* needs two entries */
 #define GDT_ENTRY_TLS_MIN 12
@@ -173,6 +188,8 @@
 #define GDT_ENTRY_PER_CPU 15	/* Abused to load per CPU data from limit */
 #define __PER_CPU_SEG	(GDT_ENTRY_PER_CPU * 8 + 3)
 
+#define GDT_ENTRY_UDEREF_KERNEL_DS 16
+
 /* TLS indexes for 64bit - hardcoded in arch_prctl */
 #define FS_TLS 0
 #define GS_TLS 1
@@ -180,12 +197,14 @@
 #define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3)
 #define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3)
 
-#define GDT_ENTRIES 16
+#define GDT_ENTRIES 17
 
 #endif
 
 #define __KERNEL_CS	(GDT_ENTRY_KERNEL_CS*8)
+#define __KERNEXEC_KERNEL_CS	(GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
 #define __KERNEL_DS	(GDT_ENTRY_KERNEL_DS*8)
+#define __UDEREF_KERNEL_DS	(GDT_ENTRY_UDEREF_KERNEL_DS*8)
 #define __USER_DS	(GDT_ENTRY_DEFAULT_USER_DS*8+3)
 #define __USER_CS	(GDT_ENTRY_DEFAULT_USER_CS*8+3)
 #ifndef CONFIG_PARAVIRT
@@ -268,7 +287,7 @@ static inline unsigned long get_limit(un
 {
 	unsigned long __limit;
 	asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
-	return __limit + 1;
+	return __limit;
 }
 
 #endif /* !__ASSEMBLY__ */
diff -ruNp linux-3.13.11/arch/x86/include/asm/smap.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/smap.h
--- linux-3.13.11/arch/x86/include/asm/smap.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/smap.h	2014-07-09
12:00:15.000000000 +0200
@@ -25,11 +25,40 @@
 
 #include <asm/alternative-asm.h>
 
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+#define ASM_PAX_OPEN_USERLAND					\
+	661: jmp 663f;						\
+	.pushsection .altinstr_replacement, "a" ;		\
+	662: pushq %rax; nop;					\
+	.popsection ;						\
+	.pushsection .altinstructions, "a" ;			\
+	altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
+	.popsection ;						\
+	call __pax_open_userland;				\
+	popq %rax;						\
+	663:
+
+#define ASM_PAX_CLOSE_USERLAND					\
+	661: jmp 663f;						\
+	.pushsection .altinstr_replacement, "a" ;		\
+	662: pushq %rax; nop;					\
+	.popsection;						\
+	.pushsection .altinstructions, "a" ;			\
+	altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
+	.popsection;						\
+	call __pax_close_userland;				\
+	popq %rax;						\
+	663:
+#else
+#define ASM_PAX_OPEN_USERLAND
+#define ASM_PAX_CLOSE_USERLAND
+#endif
+
 #ifdef CONFIG_X86_SMAP
 
 #define ASM_CLAC							\
 	661: ASM_NOP3 ;							\
-	.pushsection .altinstr_replacement, "ax" ;			\
+	.pushsection .altinstr_replacement, "a" ;			\
 	662: __ASM_CLAC ;						\
 	.popsection ;							\
 	.pushsection .altinstructions, "a" ;				\
@@ -38,7 +67,7 @@
 
 #define ASM_STAC							\
 	661: ASM_NOP3 ;							\
-	.pushsection .altinstr_replacement, "ax" ;			\
+	.pushsection .altinstr_replacement, "a" ;			\
 	662: __ASM_STAC ;						\
 	.popsection ;							\
 	.pushsection .altinstructions, "a" ;				\
@@ -56,6 +85,37 @@
 
 #include <asm/alternative.h>
 
+#define __HAVE_ARCH_PAX_OPEN_USERLAND
+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
+
+extern void __pax_open_userland(void);
+static __always_inline unsigned long pax_open_userland(void)
+{
+
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+	asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[open]", X86_FEATURE_STRONGUDEREF)
+		:
+		: [open] "i" (__pax_open_userland)
+		: "memory", "rax");
+#endif
+
+	return 0;
+}
+
+extern void __pax_close_userland(void);
+static __always_inline unsigned long pax_close_userland(void)
+{
+
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+	asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[close]", X86_FEATURE_STRONGUDEREF)
+		:
+		: [close] "i" (__pax_close_userland)
+		: "memory", "rax");
+#endif
+
+	return 0;
+}
+
 #ifdef CONFIG_X86_SMAP
 
 static __always_inline void clac(void)
diff -ruNp linux-3.13.11/arch/x86/include/asm/smp.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/smp.h
--- linux-3.13.11/arch/x86/include/asm/smp.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/smp.h	2014-07-09
12:00:15.000000000 +0200
@@ -36,7 +36,7 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_
 /* cpus sharing the last level cache: */
 DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
 DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
-DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
+DECLARE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
 
 static inline struct cpumask *cpu_sibling_mask(int cpu)
 {
@@ -79,7 +79,7 @@ struct smp_ops {
 
 	void (*send_call_func_ipi)(const struct cpumask *mask);
 	void (*send_call_func_single_ipi)(int cpu);
-};
+} __no_const;
 
 /* Globals due to paravirt */
 extern void set_cpu_sibling_map(int cpu);
@@ -191,14 +191,8 @@ extern unsigned disabled_cpus;
 extern int safe_smp_processor_id(void);
 
 #elif defined(CONFIG_X86_64_SMP)
-#define raw_smp_processor_id() (this_cpu_read(cpu_number))
-
-#define stack_smp_processor_id()					\
-({								\
-	struct thread_info *ti;						\
-	__asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK));	\
-	ti->cpu;							\
-})
+#define raw_smp_processor_id()		(this_cpu_read(cpu_number))
+#define stack_smp_processor_id()	raw_smp_processor_id()
 #define safe_smp_processor_id()		smp_processor_id()
 
 #endif
diff -ruNp linux-3.13.11/arch/x86/include/asm/spinlock.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/spinlock.h
--- linux-3.13.11/arch/x86/include/asm/spinlock.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/spinlock.h	2014-07-09
12:00:15.000000000 +0200
@@ -223,6 +223,14 @@ static inline int arch_write_can_lock(ar
 static inline void arch_read_lock(arch_rwlock_t *rw)
 {
 	asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
+
+#ifdef CONFIG_PAX_REFCOUNT
+		     "jno 0f\n"
+		     LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
+		     "int $4\n0:\n"
+		     _ASM_EXTABLE(0b, 0b)
+#endif
+
 		     "jns 1f\n"
 		     "call __read_lock_failed\n\t"
 		     "1:\n"
@@ -232,6 +240,14 @@ static inline void arch_read_lock(arch_r
 static inline void arch_write_lock(arch_rwlock_t *rw)
 {
 	asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
+
+#ifdef CONFIG_PAX_REFCOUNT
+		     "jno 0f\n"
+		     LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
+		     "int $4\n0:\n"
+		     _ASM_EXTABLE(0b, 0b)
+#endif
+
 		     "jz 1f\n"
 		     "call __write_lock_failed\n\t"
 		     "1:\n"
@@ -261,13 +277,29 @@ static inline int arch_write_trylock(arc
 
 static inline void arch_read_unlock(arch_rwlock_t *rw)
 {
-	asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
+	asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+		     "jno 0f\n"
+		     LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
+		     "int $4\n0:\n"
+		     _ASM_EXTABLE(0b, 0b)
+#endif
+
 		     :"+m" (rw->lock) : : "memory");
 }
 
 static inline void arch_write_unlock(arch_rwlock_t *rw)
 {
-	asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
+	asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
+
+#ifdef CONFIG_PAX_REFCOUNT
+		     "jno 0f\n"
+		     LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
+		     "int $4\n0:\n"
+		     _ASM_EXTABLE(0b, 0b)
+#endif
+
 		     : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
 }
 
diff -ruNp linux-3.13.11/arch/x86/include/asm/stackprotector.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/stackprotector.h
--- linux-3.13.11/arch/x86/include/asm/stackprotector.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/stackprotector.h	2014-07-09
12:00:15.000000000 +0200
@@ -47,7 +47,7 @@
  * head_32 for boot CPU and setup_per_cpu_areas() for others.
  */
 #define GDT_STACK_CANARY_INIT						\
-	[GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
+	[GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
 
 /*
  * Initialize the stackprotector canary value.
@@ -112,7 +112,7 @@ static inline void setup_stack_canary_se
 
 static inline void load_stack_canary_segment(void)
 {
-#ifdef CONFIG_X86_32
+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
 	asm volatile ("mov %0, %%gs" : : "r" (0));
 #endif
 }
diff -ruNp linux-3.13.11/arch/x86/include/asm/stacktrace.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/stacktrace.h
--- linux-3.13.11/arch/x86/include/asm/stacktrace.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/stacktrace.h	2014-07-09
12:00:15.000000000 +0200
@@ -11,28 +11,20 @@
 
 extern int kstack_depth_to_print;
 
-struct thread_info;
+struct task_struct;
 struct stacktrace_ops;
 
-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
-				      unsigned long *stack,
-				      unsigned long bp,
-				      const struct stacktrace_ops *ops,
-				      void *data,
-				      unsigned long *end,
-				      int *graph);
-
-extern unsigned long
-print_context_stack(struct thread_info *tinfo,
-		    unsigned long *stack, unsigned long bp,
-		    const struct stacktrace_ops *ops, void *data,
-		    unsigned long *end, int *graph);
-
-extern unsigned long
-print_context_stack_bp(struct thread_info *tinfo,
-		       unsigned long *stack, unsigned long bp,
-		       const struct stacktrace_ops *ops, void *data,
-		       unsigned long *end, int *graph);
+typedef unsigned long walk_stack_t(struct task_struct *task,
+				   void *stack_start,
+				   unsigned long *stack,
+				   unsigned long bp,
+				   const struct stacktrace_ops *ops,
+				   void *data,
+				   unsigned long *end,
+				   int *graph);
+
+extern walk_stack_t print_context_stack;
+extern walk_stack_t print_context_stack_bp;
 
 /* Generic stack tracer with callbacks */
 
@@ -40,7 +32,7 @@ struct stacktrace_ops {
 	void (*address)(void *data, unsigned long address, int reliable);
 	/* On negative return stop dumping */
 	int (*stack)(void *data, char *name);
-	walk_stack_t	walk_stack;
+	walk_stack_t	*walk_stack;
 };
 
 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
diff -ruNp linux-3.13.11/arch/x86/include/asm/switch_to.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/switch_to.h
--- linux-3.13.11/arch/x86/include/asm/switch_to.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/switch_to.h	2014-07-09
12:00:15.000000000 +0200
@@ -108,7 +108,7 @@ do {									\
 	     "call __switch_to\n\t"					  \
 	     "movq "__percpu_arg([current_task])",%%rsi\n\t"		  \
 	     __switch_canary						  \
-	     "movq %P[thread_info](%%rsi),%%r8\n\t"			  \
+	     "movq "__percpu_arg([thread_info])",%%r8\n\t"		  \
 	     "movq %%rax,%%rdi\n\t" 					  \
 	     "testl  %[_tif_fork],%P[ti_flags](%%r8)\n\t"		  \
 	     "jnz   ret_from_fork\n\t"					  \
@@ -119,7 +119,7 @@ do {									\
 	       [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
 	       [ti_flags] "i" (offsetof(struct thread_info, flags)),	  \
 	       [_tif_fork] "i" (_TIF_FORK),			  	  \
-	       [thread_info] "i" (offsetof(struct task_struct, stack)),   \
+	       [thread_info] "m" (current_tinfo),			  \
 	       [current_task] "m" (current_task)			  \
 	       __switch_canary_iparam					  \
 	     : "memory", "cc" __EXTRA_CLOBBER)
diff -ruNp linux-3.13.11/arch/x86/include/asm/thread_info.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/thread_info.h
--- linux-3.13.11/arch/x86/include/asm/thread_info.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/thread_info.h	2014-07-09
12:00:15.000000000 +0200
@@ -10,6 +10,7 @@
 #include <linux/compiler.h>
 #include <asm/page.h>
 #include <asm/types.h>
+#include <asm/percpu.h>
 
 /*
  * low level task data that entry.S needs immediate access to
@@ -23,7 +24,6 @@ struct exec_domain;
 #include <linux/atomic.h>
 
 struct thread_info {
-	struct task_struct	*task;		/* main task structure */
 	struct exec_domain	*exec_domain;	/* execution domain */
 	__u32			flags;		/* low level flags */
 	__u32			status;		/* thread synchronous flags */
@@ -32,19 +32,13 @@ struct thread_info {
 	mm_segment_t		addr_limit;
 	struct restart_block    restart_block;
 	void __user		*sysenter_return;
-#ifdef CONFIG_X86_32
-	unsigned long           previous_esp;   /* ESP of the previous stack in
-						   case of nested (IRQ) stacks
-						*/
-	__u8			supervisor_stack[0];
-#endif
+	unsigned long		lowest_stack;
 	unsigned int		sig_on_uaccess_error:1;
 	unsigned int		uaccess_err:1;	/* uaccess failed */
 };
 
-#define INIT_THREAD_INFO(tsk)			\
+#define INIT_THREAD_INFO			\
 {						\
-	.task		= &tsk,			\
 	.exec_domain	= &default_exec_domain,	\
 	.flags		= 0,			\
 	.cpu		= 0,			\
@@ -55,7 +49,7 @@ struct thread_info {
 	},					\
 }
 
-#define init_thread_info	(init_thread_union.thread_info)
+#define init_thread_info	(init_thread_union.stack)
 #define init_stack		(init_thread_union.stack)
 
 #else /* !__ASSEMBLY__ */
@@ -95,6 +89,7 @@ struct thread_info {
 #define TIF_SYSCALL_TRACEPOINT	28	/* syscall tracepoint instrumentation */
 #define TIF_ADDR32		29	/* 32-bit address space on 64 bits */
 #define TIF_X32			30	/* 32-bit native x86-64 binary */
+#define TIF_GRSEC_SETXID	31	/* update credentials on syscall entry/exit */
 
 #define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)
 #define _TIF_NOTIFY_RESUME	(1 << TIF_NOTIFY_RESUME)
@@ -118,17 +113,18 @@ struct thread_info {
 #define _TIF_SYSCALL_TRACEPOINT	(1 << TIF_SYSCALL_TRACEPOINT)
 #define _TIF_ADDR32		(1 << TIF_ADDR32)
 #define _TIF_X32		(1 << TIF_X32)
+#define _TIF_GRSEC_SETXID	(1 << TIF_GRSEC_SETXID)
 
 /* work to do in syscall_trace_enter() */
 #define _TIF_WORK_SYSCALL_ENTRY	\
 	(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT |	\
 	 _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT |	\
-	 _TIF_NOHZ)
+	 _TIF_NOHZ | _TIF_GRSEC_SETXID)
 
 /* work to do in syscall_trace_leave() */
 #define _TIF_WORK_SYSCALL_EXIT	\
 	(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP |	\
-	 _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
+	 _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
 
 /* work to do on interrupt/exception return */
 #define _TIF_WORK_MASK							\
@@ -139,7 +135,7 @@ struct thread_info {
 /* work to do on any return to user space */
 #define _TIF_ALLWORK_MASK						\
 	((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT |	\
-	_TIF_NOHZ)
+	_TIF_NOHZ | _TIF_GRSEC_SETXID)
 
 /* Only used for 64 bit */
 #define _TIF_DO_NOTIFY_MASK						\
@@ -153,6 +149,23 @@ struct thread_info {
 #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
 #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
 
+#ifdef __ASSEMBLY__
+/* how to get the thread information struct from ASM */
+#define GET_THREAD_INFO(reg)	 \
+	mov PER_CPU_VAR(current_tinfo), reg
+
+/* use this one if reg already contains %esp */
+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
+#else
+/* how to get the thread information struct from C */
+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
+
+static __always_inline struct thread_info *current_thread_info(void)
+{
+	return this_cpu_read_stable(current_tinfo);
+}
+#endif
+
 #ifdef CONFIG_X86_32
 
 #define STACK_WARN	(THREAD_SIZE/8)
@@ -163,35 +176,13 @@ struct thread_info {
  */
 #ifndef __ASSEMBLY__
 
-
 /* how to get the current stack pointer from C */
 register unsigned long current_stack_pointer asm("esp") __used;
 
-/* how to get the thread information struct from C */
-static inline struct thread_info *current_thread_info(void)
-{
-	return (struct thread_info *)
-		(current_stack_pointer & ~(THREAD_SIZE - 1));
-}
-
-#else /* !__ASSEMBLY__ */
-
-/* how to get the thread information struct from ASM */
-#define GET_THREAD_INFO(reg)	 \
-	movl $-THREAD_SIZE, reg; \
-	andl %esp, reg
-
-/* use this one if reg already contains %esp */
-#define GET_THREAD_INFO_WITH_ESP(reg) \
-	andl $-THREAD_SIZE, reg
-
 #endif
 
 #else /* X86_32 */
 
-#include <asm/percpu.h>
-#define KERNEL_STACK_OFFSET (5*8)
-
 /*
  * macros/functions for gaining access to the thread information structure
  * preempt_count needs to be 1 initially, until the scheduler is functional.
@@ -199,27 +190,8 @@ static inline struct thread_info *curren
 #ifndef __ASSEMBLY__
 DECLARE_PER_CPU(unsigned long, kernel_stack);
 
-static inline struct thread_info *current_thread_info(void)
-{
-	struct thread_info *ti;
-	ti = (void *)(this_cpu_read_stable(kernel_stack) +
-		      KERNEL_STACK_OFFSET - THREAD_SIZE);
-	return ti;
-}
-
-#else /* !__ASSEMBLY__ */
-
-/* how to get the thread information struct from ASM */
-#define GET_THREAD_INFO(reg) \
-	movq PER_CPU_VAR(kernel_stack),reg ; \
-	subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
-
-/*
- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
- * a certain register (to be used in assembler memory operands).
- */
-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
-
+/* how to get the current stack pointer from C */
+register unsigned long current_stack_pointer asm("rsp") __used;
 #endif
 
 #endif /* !X86_32 */
@@ -278,5 +250,12 @@ static inline bool is_ia32_task(void)
 extern void arch_task_cache_init(void);
 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
 extern void arch_release_task_struct(struct task_struct *tsk);
+
+#define __HAVE_THREAD_FUNCTIONS
+#define task_thread_info(task)	(&(task)->tinfo)
+#define task_stack_page(task)	((task)->stack)
+#define setup_thread_stack(p, org) do {} while (0)
+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
+
 #endif
 #endif /* _ASM_X86_THREAD_INFO_H */
diff -ruNp linux-3.13.11/arch/x86/include/asm/tlbflush.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/tlbflush.h
--- linux-3.13.11/arch/x86/include/asm/tlbflush.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/tlbflush.h	2014-07-09
12:00:15.000000000 +0200
@@ -17,18 +17,44 @@
 
 static inline void __native_flush_tlb(void)
 {
+	if (static_cpu_has(X86_FEATURE_INVPCID)) {
+		u64 descriptor[2];
+
+		descriptor[0] = PCID_KERNEL;
+		asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_MONGLOBAL) : "memory");
+		return;
+	}
+
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+	if (static_cpu_has(X86_FEATURE_PCID)) {
+		unsigned int cpu = raw_get_cpu();
+
+		native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
+		native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
+		raw_put_cpu_no_resched();
+		return;
+	}
+#endif
+
 	native_write_cr3(native_read_cr3());
 }
 
 static inline void __native_flush_tlb_global_irq_disabled(void)
 {
-	unsigned long cr4;
+	if (static_cpu_has(X86_FEATURE_INVPCID)) {
+		u64 descriptor[2];
 
-	cr4 = native_read_cr4();
-	/* clear PGE */
-	native_write_cr4(cr4 & ~X86_CR4_PGE);
-	/* write old PGE again and flush TLBs */
-	native_write_cr4(cr4);
+		descriptor[0] = PCID_KERNEL;
+		asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_GLOBAL) : "memory");
+	} else {
+		unsigned long cr4;
+
+		cr4 = native_read_cr4();
+		/* clear PGE */
+		native_write_cr4(cr4 & ~X86_CR4_PGE);
+		/* write old PGE again and flush TLBs */
+		native_write_cr4(cr4);
+	}
 }
 
 static inline void __native_flush_tlb_global(void)
@@ -49,6 +75,41 @@ static inline void __native_flush_tlb_gl
 
 static inline void __native_flush_tlb_single(unsigned long addr)
 {
+	if (static_cpu_has(X86_FEATURE_INVPCID)) {
+		u64 descriptor[2];
+
+		descriptor[0] = PCID_KERNEL;
+		descriptor[1] = addr;
+
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+		if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) || addr >= TASK_SIZE_MAX) {
+			if (addr < TASK_SIZE_MAX)
+				descriptor[1] += pax_user_shadow_base;
+			asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
+		}
+
+		descriptor[0] = PCID_USER;
+		descriptor[1] = addr;
+#endif
+
+		asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
+		return;
+	}
+
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+	if (static_cpu_has(X86_FEATURE_PCID)) {
+		unsigned int cpu = raw_get_cpu();
+
+		native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
+		asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
+		native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
+		raw_put_cpu_no_resched();
+
+		if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) && addr < TASK_SIZE_MAX)
+			addr += pax_user_shadow_base;
+	}
+#endif
+
 	asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
 }
 
diff -ruNp linux-3.13.11/arch/x86/include/asm/uaccess.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/uaccess.h
--- linux-3.13.11/arch/x86/include/asm/uaccess.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/uaccess.h	2014-07-09
12:00:15.000000000 +0200
@@ -7,6 +7,7 @@
 #include <linux/compiler.h>
 #include <linux/thread_info.h>
 #include <linux/string.h>
+#include <linux/spinlock.h>
 #include <asm/asm.h>
 #include <asm/page.h>
 #include <asm/smap.h>
@@ -29,7 +30,12 @@
 
 #define get_ds()	(KERNEL_DS)
 #define get_fs()	(current_thread_info()->addr_limit)
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
+void __set_fs(mm_segment_t x);
+void set_fs(mm_segment_t x);
+#else
 #define set_fs(x)	(current_thread_info()->addr_limit = (x))
+#endif
 
 #define segment_eq(a, b)	((a).seg == (b).seg)
 
@@ -77,8 +83,34 @@
  * checks that the pointer is in the user space range - after calling
  * this function, memory access functions may still return -EFAULT.
  */
-#define access_ok(type, addr, size) \
-	(likely(__range_not_ok(addr, size, user_addr_max()) == 0))
+extern int _cond_resched(void);
+#define access_ok_noprefault(type, addr, size) (likely(__range_not_ok(addr, size, user_addr_max())
== 0))
+#define access_ok(type, addr, size)					\
+({									\
+	long __size = size;						\
+	unsigned long __addr = (unsigned long)addr;			\
+	unsigned long __addr_ao = __addr & PAGE_MASK;			\
+	unsigned long __end_ao = __addr + __size - 1;			\
+	bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\
+	if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) {	\
+		while(__addr_ao <= __end_ao) {				\
+			char __c_ao;					\
+			__addr_ao += PAGE_SIZE;				\
+			if (__size > PAGE_SIZE)				\
+				_cond_resched();			\
+			if (__get_user(__c_ao, (char __user *)__addr))	\
+				break;					\
+			if (type != VERIFY_WRITE) {			\
+				__addr = __addr_ao;			\
+				continue;				\
+			}						\
+			if (__put_user(__c_ao, (char __user *)__addr))	\
+				break;					\
+			__addr = __addr_ao;				\
+		}							\
+	}								\
+	__ret_ao;							\
+})
 
 /*
  * The exception table consists of pairs of addresses relative to the
@@ -168,10 +200,12 @@ __typeof__(__builtin_choose_expr(sizeof(
 	register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX);		\
 	__chk_user_ptr(ptr);						\
 	might_fault();							\
+	pax_open_userland();						\
 	asm volatile("call __get_user_%P3"				\
 		     : "=a" (__ret_gu), "=r" (__val_gu)			\
 		     : "0" (ptr), "i" (sizeof(*(ptr))));		\
 	(x) = (__typeof__(*(ptr))) __val_gu;				\
+	pax_close_userland();						\
 	__ret_gu;							\
 })
 
@@ -179,13 +213,21 @@ __typeof__(__builtin_choose_expr(sizeof(
 	asm volatile("call __put_user_" #size : "=a" (__ret_pu)	\
 		     : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
 
-
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
+#define __copyuser_seg "gs;"
+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
+#else
+#define __copyuser_seg
+#define __COPYUSER_SET_ES
+#define __COPYUSER_RESTORE_ES
+#endif
 
 #ifdef CONFIG_X86_32
 #define __put_user_asm_u64(x, addr, err, errret)			\
 	asm volatile(ASM_STAC "\n"					\
-		     "1:	movl %%eax,0(%2)\n"			\
-		     "2:	movl %%edx,4(%2)\n"			\
+		     "1:	"__copyuser_seg"movl %%eax,0(%2)\n"	\
+		     "2:	"__copyuser_seg"movl %%edx,4(%2)\n"	\
 		     "3: " ASM_CLAC "\n"				\
 		     ".section .fixup,\"ax\"\n"				\
 		     "4:	movl %3,%0\n"				\
@@ -198,8 +240,8 @@ __typeof__(__builtin_choose_expr(sizeof(
 
 #define __put_user_asm_ex_u64(x, addr)					\
 	asm volatile(ASM_STAC "\n"					\
-		     "1:	movl %%eax,0(%1)\n"			\
-		     "2:	movl %%edx,4(%1)\n"			\
+		     "1:	"__copyuser_seg"movl %%eax,0(%1)\n"	\
+		     "2:	"__copyuser_seg"movl %%edx,4(%1)\n"	\
 		     "3: " ASM_CLAC "\n"				\
 		     _ASM_EXTABLE_EX(1b, 2b)				\
 		     _ASM_EXTABLE_EX(2b, 3b)				\
@@ -249,7 +291,8 @@ extern void __put_user_8(void);
 	__typeof__(*(ptr)) __pu_val;				\
 	__chk_user_ptr(ptr);					\
 	might_fault();						\
-	__pu_val = x;						\
+	__pu_val = (x);						\
+	pax_open_userland();					\
 	switch (sizeof(*(ptr))) {				\
 	case 1:							\
 		__put_user_x(1, __pu_val, ptr, __ret_pu);	\
@@ -267,6 +310,7 @@ extern void __put_user_8(void);
 		__put_user_x(X, __pu_val, ptr, __ret_pu);	\
 		break;						\
 	}							\
+	pax_close_userland();					\
 	__ret_pu;						\
 })
 
@@ -347,8 +391,10 @@ do {									\
 } while (0)
 
 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret)	\
+do {									\
+	pax_open_userland();						\
 	asm volatile(ASM_STAC "\n"					\
-		     "1:	mov"itype" %2,%"rtype"1\n"		\
+		     "1:	"__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
 		     "2: " ASM_CLAC "\n"				\
 		     ".section .fixup,\"ax\"\n"				\
 		     "3:	mov %3,%0\n"				\
@@ -356,8 +402,10 @@ do {									\
 		     "	jmp 2b\n"					\
 		     ".previous\n"					\
 		     _ASM_EXTABLE(1b, 3b)				\
-		     : "=r" (err), ltype(x)				\
-		     : "m" (__m(addr)), "i" (errret), "0" (err))
+		     : "=r" (err), ltype (x)				\
+		     : "m" (__m(addr)), "i" (errret), "0" (err));	\
+	pax_close_userland();						\
+} while (0)
 
 #define __get_user_size_ex(x, ptr, size)				\
 do {									\
@@ -381,7 +429,7 @@ do {									\
 } while (0)
 
 #define __get_user_asm_ex(x, addr, itype, rtype, ltype)			\
-	asm volatile("1:	mov"itype" %1,%"rtype"0\n"		\
+	asm volatile("1:	"__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
 		     "2:\n"						\
 		     _ASM_EXTABLE_EX(1b, 2b)				\
 		     : ltype(x) : "m" (__m(addr)))
@@ -398,13 +446,24 @@ do {									\
 	int __gu_err;							\
 	unsigned long __gu_val;						\
 	__get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT);	\
-	(x) = (__force __typeof__(*(ptr)))__gu_val;			\
+	(x) = (__typeof__(*(ptr)))__gu_val;				\
 	__gu_err;							\
 })
 
 /* FIXME: this hack is definitely wrong -AK */
 struct __large_struct { unsigned long buf[100]; };
-#define __m(x) (*(struct __large_struct __user *)(x))
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+#define ____m(x)					\
+({							\
+	unsigned long ____x = (unsigned long)(x);	\
+	if (____x < pax_user_shadow_base)		\
+		____x += pax_user_shadow_base;		\
+	(typeof(x))____x;				\
+})
+#else
+#define ____m(x) (x)
+#endif
+#define __m(x) (*(struct __large_struct __user *)____m(x))
 
 /*
  * Tell gcc we read from memory instead of writing: this is because
@@ -412,8 +471,10 @@ struct __large_struct { unsigned long bu
  * aliasing issues.
  */
 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret)	\
+do {									\
+	pax_open_userland();						\
 	asm volatile(ASM_STAC "\n"					\
-		     "1:	mov"itype" %"rtype"1,%2\n"		\
+		     "1:	"__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
 		     "2: " ASM_CLAC "\n"				\
 		     ".section .fixup,\"ax\"\n"				\
 		     "3:	mov %3,%0\n"				\
@@ -421,10 +482,12 @@ struct __large_struct { unsigned long bu
 		     ".previous\n"					\
 		     _ASM_EXTABLE(1b, 3b)				\
 		     : "=r"(err)					\
-		     : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
+		     : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err));\
+	pax_close_userland();						\
+} while (0)
 
 #define __put_user_asm_ex(x, addr, itype, rtype, ltype)			\
-	asm volatile("1:	mov"itype" %"rtype"0,%1\n"		\
+	asm volatile("1:	"__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
 		     "2:\n"						\
 		     _ASM_EXTABLE_EX(1b, 2b)				\
 		     : : ltype(x), "m" (__m(addr)))
@@ -434,11 +497,13 @@ struct __large_struct { unsigned long bu
  */
 #define uaccess_try	do {						\
 	current_thread_info()->uaccess_err = 0;				\
+	pax_open_userland();						\
 	stac();								\
 	barrier();
 
 #define uaccess_catch(err)						\
 	clac();								\
+	pax_close_userland();						\
 	(err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0);	\
 } while (0)
 
@@ -463,8 +528,12 @@ struct __large_struct { unsigned long bu
  * On error, the variable @x is set to zero.
  */
 
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+#define __get_user(x, ptr)	get_user((x), (ptr))
+#else
 #define __get_user(x, ptr)						\
 	__get_user_nocheck((x), (ptr), sizeof(*(ptr)))
+#endif
 
 /**
  * __put_user: - Write a simple value into user space, with less checking.
@@ -486,8 +555,12 @@ struct __large_struct { unsigned long bu
  * Returns zero on success, or -EFAULT on error.
  */
 
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+#define __put_user(x, ptr)	put_user((x), (ptr))
+#else
 #define __put_user(x, ptr)						\
 	__put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
+#endif
 
 #define __get_user_unaligned __get_user
 #define __put_user_unaligned __put_user
@@ -505,7 +578,7 @@ struct __large_struct { unsigned long bu
 #define get_user_ex(x, ptr)	do {					\
 	unsigned long __gue_val;					\
 	__get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr))));	\
-	(x) = (__force __typeof__(*(ptr)))__gue_val;			\
+	(x) = (__typeof__(*(ptr)))__gue_val;				\
 } while (0)
 
 #define put_user_try		uaccess_try
@@ -536,17 +609,6 @@ extern struct movsl_mask {
 
 #define ARCH_HAS_NOCACHE_UACCESS 1
 
-#ifdef CONFIG_X86_32
-# include <asm/uaccess_32.h>
-#else
-# include <asm/uaccess_64.h>
-#endif
-
-unsigned long __must_check _copy_from_user(void *to, const void __user *from,
-					   unsigned n);
-unsigned long __must_check _copy_to_user(void __user *to, const void *from,
-					 unsigned n);
-
 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
 # define copy_user_diag __compiletime_error
 #else
@@ -556,7 +618,7 @@ unsigned long __must_check _copy_to_user
 extern void copy_user_diag("copy_from_user() buffer size is too small")
 copy_from_user_overflow(void);
 extern void copy_user_diag("copy_to_user() buffer size is too small")
-copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
+copy_to_user_overflow(void);
 
 #undef copy_user_diag
 
@@ -569,7 +631,7 @@ __copy_from_user_overflow(void) __asm__(
 
 extern void
 __compiletime_warning("copy_to_user() buffer size is not provably correct")
-__copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
+__copy_to_user_overflow(void) __asm__("copy_to_user_overflow");
 #define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
 
 #else
@@ -584,10 +646,16 @@ __copy_from_user_overflow(int size, unsi
 
 #endif
 
+#ifdef CONFIG_X86_32
+# include <asm/uaccess_32.h>
+#else
+# include <asm/uaccess_64.h>
+#endif
+
 static inline unsigned long __must_check
 copy_from_user(void *to, const void __user *from, unsigned long n)
 {
-	int sz = __compiletime_object_size(to);
+	size_t sz = __compiletime_object_size(to);
 
 	might_fault();
 
@@ -609,12 +677,15 @@ copy_from_user(void *to, const void __us
 	 * case, and do only runtime checking for non-constant sizes.
 	 */
 
-	if (likely(sz < 0 || sz >= n))
-		n = _copy_from_user(to, from, n);
-	else if(__builtin_constant_p(n))
-		copy_from_user_overflow();
-	else
-		__copy_from_user_overflow(sz, n);
+	if (likely(sz != (size_t)-1  && sz < n)) {
+		 if(__builtin_constant_p(n))
+			copy_from_user_overflow();
+		else
+			__copy_from_user_overflow(sz, n);
+	} if (access_ok(VERIFY_READ, from, n))
+		n = __copy_from_user(to, from, n);
+	else if ((long)n > 0)
+		memset(to, 0, n);
 
 	return n;
 }
@@ -622,17 +693,18 @@ copy_from_user(void *to, const void __us
 static inline unsigned long __must_check
 copy_to_user(void __user *to, const void *from, unsigned long n)
 {
-	int sz = __compiletime_object_size(from);
+	size_t sz = __compiletime_object_size(from);
 
 	might_fault();
 
 	/* See the comment in copy_from_user() above. */
-	if (likely(sz < 0 || sz >= n))
-		n = _copy_to_user(to, from, n);
-	else if(__builtin_constant_p(n))
-		copy_to_user_overflow();
-	else
-		__copy_to_user_overflow(sz, n);
+	if (likely(sz != (size_t)-1  && sz < n)) {
+		 if(__builtin_constant_p(n))
+			copy_to_user_overflow();
+		else
+			__copy_to_user_overflow(sz, n);
+	} else if (access_ok(VERIFY_WRITE, to, n))
+		n = __copy_to_user(to, from, n);
 
 	return n;
 }
diff -ruNp linux-3.13.11/arch/x86/include/asm/uaccess_32.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/uaccess_32.h
--- linux-3.13.11/arch/x86/include/asm/uaccess_32.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/uaccess_32.h	2014-07-09
12:00:15.000000000 +0200
@@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_u
 static __always_inline unsigned long __must_check
 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
 {
+	if ((long)n < 0)
+		return n;
+
+	check_object_size(from, n, true);
+
 	if (__builtin_constant_p(n)) {
 		unsigned long ret;
 
@@ -82,12 +87,16 @@ static __always_inline unsigned long __m
 __copy_to_user(void __user *to, const void *from, unsigned long n)
 {
 	might_fault();
+
 	return __copy_to_user_inatomic(to, from, n);
 }
 
 static __always_inline unsigned long
 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
 {
+	if ((long)n < 0)
+		return n;
+
 	/* Avoid zeroing the tail if the copy fails..
 	 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
 	 * but as the zeroing behaviour is only significant when n is not
@@ -137,6 +146,12 @@ static __always_inline unsigned long
 __copy_from_user(void *to, const void __user *from, unsigned long n)
 {
 	might_fault();
+
+	if ((long)n < 0)
+		return n;
+
+	check_object_size(to, n, false);
+
 	if (__builtin_constant_p(n)) {
 		unsigned long ret;
 
@@ -159,6 +174,10 @@ static __always_inline unsigned long __c
 				const void __user *from, unsigned long n)
 {
 	might_fault();
+
+	if ((long)n < 0)
+		return n;
+
 	if (__builtin_constant_p(n)) {
 		unsigned long ret;
 
@@ -181,7 +200,10 @@ static __always_inline unsigned long
 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
 				  unsigned long n)
 {
-       return __copy_from_user_ll_nocache_nozero(to, from, n);
+	if ((long)n < 0)
+		return n;
+
+	return __copy_from_user_ll_nocache_nozero(to, from, n);
 }
 
 #endif /* _ASM_X86_UACCESS_32_H */
diff -ruNp linux-3.13.11/arch/x86/include/asm/uaccess_64.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/uaccess_64.h
--- linux-3.13.11/arch/x86/include/asm/uaccess_64.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/uaccess_64.h	2014-07-09
12:00:15.000000000 +0200
@@ -10,6 +10,9 @@
 #include <asm/alternative.h>
 #include <asm/cpufeature.h>
 #include <asm/page.h>
+#include <asm/pgtable.h>
+
+#define set_fs(x)	(current_thread_info()->addr_limit = (x))
 
 /*
  * Copy To/From Userspace
@@ -17,14 +20,14 @@
 
 /* Handles exceptions in both to and from, but doesn't do access_ok */
 __must_check unsigned long
-copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
+copy_user_enhanced_fast_string(void *to, const void *from, unsigned len) __size_overflow(3);
 __must_check unsigned long
-copy_user_generic_string(void *to, const void *from, unsigned len);
+copy_user_generic_string(void *to, const void *from, unsigned len) __size_overflow(3);
 __must_check unsigned long
-copy_user_generic_unrolled(void *to, const void *from, unsigned len);
+copy_user_generic_unrolled(void *to, const void *from, unsigned len) __size_overflow(3);
 
 static __always_inline __must_check unsigned long
-copy_user_generic(void *to, const void *from, unsigned len)
+copy_user_generic(void *to, const void *from, unsigned long len)
 {
 	unsigned ret;
 
@@ -46,121 +49,170 @@ copy_user_generic(void *to, const void *
 }
 
 __must_check unsigned long
-copy_in_user(void __user *to, const void __user *from, unsigned len);
+copy_in_user(void __user *to, const void __user *from, unsigned long len);
 
 static __always_inline __must_check
-int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
+unsigned long __copy_from_user_nocheck(void *dst, const void __user *src, unsigned
long size)
 {
-	int ret = 0;
+	size_t sz = __compiletime_object_size(dst);
+	unsigned ret = 0;
+
+	if (size > INT_MAX)
+		return size;
+
+	check_object_size(dst, size, false);
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+	if (!access_ok_noprefault(VERIFY_READ, src, size))
+		return size;
+#endif
+
+	if (unlikely(sz != (size_t)-1 && sz < size)) {
+		 if(__builtin_constant_p(size))
+			copy_from_user_overflow();
+		else
+			__copy_from_user_overflow(sz, size);
+		return size;
+	}
 
 	if (!__builtin_constant_p(size))
-		return copy_user_generic(dst, (__force void *)src, size);
+		return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
 	switch (size) {
-	case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
+	case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
 			      ret, "b", "b", "=q", 1);
 		return ret;
-	case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
+	case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
 			      ret, "w", "w", "=r", 2);
 		return ret;
-	case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
+	case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
 			      ret, "l", "k", "=r", 4);
 		return ret;
-	case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
+	case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
 			      ret, "q", "", "=r", 8);
 		return ret;
 	case 10:
-		__get_user_asm(*(u64 *)dst, (u64 __user *)src,
+		__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
 			       ret, "q", "", "=r", 10);
 		if (unlikely(ret))
 			return ret;
 		__get_user_asm(*(u16 *)(8 + (char *)dst),
-			       (u16 __user *)(8 + (char __user *)src),
+			       (const u16 __user *)(8 + (const char __user *)src),
 			       ret, "w", "w", "=r", 2);
 		return ret;
 	case 16:
-		__get_user_asm(*(u64 *)dst, (u64 __user *)src,
+		__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
 			       ret, "q", "", "=r", 16);
 		if (unlikely(ret))
 			return ret;
 		__get_user_asm(*(u64 *)(8 + (char *)dst),
-			       (u64 __user *)(8 + (char __user *)src),
+			       (const u64 __user *)(8 + (const char __user *)src),
 			       ret, "q", "", "=r", 8);
 		return ret;
 	default:
-		return copy_user_generic(dst, (__force void *)src, size);
+		return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
 	}
 }
 
 static __always_inline __must_check
-int __copy_from_user(void *dst, const void __user *src, unsigned size)
+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
 {
 	might_fault();
 	return __copy_from_user_nocheck(dst, src, size);
 }
 
 static __always_inline __must_check
-int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
+unsigned long __copy_to_user_nocheck(void __user *dst, const void *src, unsigned long
size)
 {
-	int ret = 0;
+	size_t sz = __compiletime_object_size(src);
+	unsigned ret = 0;
+
+	if (size > INT_MAX)
+		return size;
+
+	check_object_size(src, size, true);
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+	if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
+		return size;
+#endif
+
+	if (unlikely(sz != (size_t)-1 && sz < size)) {
+		 if(__builtin_constant_p(size))
+			copy_to_user_overflow();
+		else
+			__copy_to_user_overflow(sz, size);
+		return size;
+	}
 
 	if (!__builtin_constant_p(size))
-		return copy_user_generic((__force void *)dst, src, size);
+		return copy_user_generic((__force_kernel void *)____m(dst), src, size);
 	switch (size) {
-	case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
+	case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
 			      ret, "b", "b", "iq", 1);
 		return ret;
-	case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
+	case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
 			      ret, "w", "w", "ir", 2);
 		return ret;
-	case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
+	case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
 			      ret, "l", "k", "ir", 4);
 		return ret;
-	case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
+	case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
 			      ret, "q", "", "er", 8);
 		return ret;
 	case 10:
-		__put_user_asm(*(u64 *)src, (u64 __user *)dst,
+		__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
 			       ret, "q", "", "er", 10);
 		if (unlikely(ret))
 			return ret;
 		asm("":::"memory");
-		__put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
+		__put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
 			       ret, "w", "w", "ir", 2);
 		return ret;
 	case 16:
-		__put_user_asm(*(u64 *)src, (u64 __user *)dst,
+		__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
 			       ret, "q", "", "er", 16);
 		if (unlikely(ret))
 			return ret;
 		asm("":::"memory");
-		__put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
+		__put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
 			       ret, "q", "", "er", 8);
 		return ret;
 	default:
-		return copy_user_generic((__force void *)dst, src, size);
+		return copy_user_generic((__force_kernel void *)____m(dst), src, size);
 	}
 }
 
 static __always_inline __must_check
-int __copy_to_user(void __user *dst, const void *src, unsigned size)
+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
 {
 	might_fault();
 	return __copy_to_user_nocheck(dst, src, size);
 }
 
 static __always_inline __must_check
-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
 {
-	int ret = 0;
+	unsigned ret = 0;
 
 	might_fault();
+
+	if (size > INT_MAX)
+		return size;
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+	if (!access_ok_noprefault(VERIFY_READ, src, size))
+		return size;
+	if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
+		return size;
+#endif
+
 	if (!__builtin_constant_p(size))
-		return copy_user_generic((__force void *)dst,
-					 (__force void *)src, size);
+		return copy_user_generic((__force_kernel void *)____m(dst),
+					 (__force_kernel const void *)____m(src), size);
 	switch (size) {
 	case 1: {
 		u8 tmp;
-		__get_user_asm(tmp, (u8 __user *)src,
+		__get_user_asm(tmp, (const u8 __user *)src,
 			       ret, "b", "b", "=q", 1);
 		if (likely(!ret))
 			__put_user_asm(tmp, (u8 __user *)dst,
@@ -169,7 +221,7 @@ int __copy_in_user(void __user *dst, con
 	}
 	case 2: {
 		u16 tmp;
-		__get_user_asm(tmp, (u16 __user *)src,
+		__get_user_asm(tmp, (const u16 __user *)src,
 			       ret, "w", "w", "=r", 2);
 		if (likely(!ret))
 			__put_user_asm(tmp, (u16 __user *)dst,
@@ -179,7 +231,7 @@ int __copy_in_user(void __user *dst, con
 
 	case 4: {
 		u32 tmp;
-		__get_user_asm(tmp, (u32 __user *)src,
+		__get_user_asm(tmp, (const u32 __user *)src,
 			       ret, "l", "k", "=r", 4);
 		if (likely(!ret))
 			__put_user_asm(tmp, (u32 __user *)dst,
@@ -188,7 +240,7 @@ int __copy_in_user(void __user *dst, con
 	}
 	case 8: {
 		u64 tmp;
-		__get_user_asm(tmp, (u64 __user *)src,
+		__get_user_asm(tmp, (const u64 __user *)src,
 			       ret, "q", "", "=r", 8);
 		if (likely(!ret))
 			__put_user_asm(tmp, (u64 __user *)dst,
@@ -196,41 +248,58 @@ int __copy_in_user(void __user *dst, con
 		return ret;
 	}
 	default:
-		return copy_user_generic((__force void *)dst,
-					 (__force void *)src, size);
+		return copy_user_generic((__force_kernel void *)____m(dst),
+					 (__force_kernel const void *)____m(src), size);
 	}
 }
 
-static __must_check __always_inline int
-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
+static __must_check __always_inline unsigned long
+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
 {
-	return __copy_from_user_nocheck(dst, (__force const void *)src, size);
+	return __copy_from_user_nocheck(dst, src, size);
 }
 
-static __must_check __always_inline int
-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
+static __must_check __always_inline unsigned long
+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
 {
-	return __copy_to_user_nocheck((__force void *)dst, src, size);
+	return __copy_to_user_nocheck(dst, src, size);
 }
 
-extern long __copy_user_nocache(void *dst, const void __user *src,
-				unsigned size, int zerorest);
+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
+				unsigned long size, int zerorest);
 
-static inline int
-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
+static inline unsigned long
+__copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
 {
 	might_fault();
+
+	if (size > INT_MAX)
+		return size;
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+	if (!access_ok_noprefault(VERIFY_READ, src, size))
+		return size;
+#endif
+
 	return __copy_user_nocache(dst, src, size, 1);
 }
 
-static inline int
+static inline unsigned long
 __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
-				  unsigned size)
+				  unsigned long size)
 {
+	if (size > INT_MAX)
+		return size;
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+	if (!access_ok_noprefault(VERIFY_READ, src, size))
+		return size;
+#endif
+
 	return __copy_user_nocache(dst, src, size, 0);
 }
 
 unsigned long
-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned
zerorest) __size_overflow(3);
 
 #endif /* _ASM_X86_UACCESS_64_H */
diff -ruNp linux-3.13.11/arch/x86/include/asm/word-at-a-time.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/word-at-a-time.h
--- linux-3.13.11/arch/x86/include/asm/word-at-a-time.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/word-at-a-time.h	2014-07-09
12:00:15.000000000 +0200
@@ -11,7 +11,7 @@
  * and shift, for example.
  */
 struct word_at_a_time {
-	const unsigned long one_bits, high_bits;
+	unsigned long one_bits, high_bits;
 };
 
 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
diff -ruNp linux-3.13.11/arch/x86/include/asm/x86_init.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/x86_init.h
--- linux-3.13.11/arch/x86/include/asm/x86_init.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/x86_init.h	2014-07-09
12:00:15.000000000 +0200
@@ -129,7 +129,7 @@ struct x86_init_ops {
 	struct x86_init_timers		timers;
 	struct x86_init_iommu		iommu;
 	struct x86_init_pci		pci;
-};
+} __no_const;
 
 /**
  * struct x86_cpuinit_ops - platform specific cpu hotplug setups
@@ -140,7 +140,7 @@ struct x86_cpuinit_ops {
 	void (*setup_percpu_clockev)(void);
 	void (*early_percpu_clock_init)(void);
 	void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
-};
+} __no_const;
 
 struct timespec;
 
@@ -168,7 +168,7 @@ struct x86_platform_ops {
 	void (*save_sched_clock_state)(void);
 	void (*restore_sched_clock_state)(void);
 	void (*apic_post_init)(void);
-};
+} __no_const;
 
 struct pci_dev;
 struct msi_msg;
@@ -185,7 +185,7 @@ struct x86_msi_ops {
 	int  (*setup_hpet_msi)(unsigned int irq, unsigned int id);
 	u32 (*msi_mask_irq)(struct msi_desc *desc, u32 mask, u32 flag);
 	u32 (*msix_mask_irq)(struct msi_desc *desc, u32 flag);
-};
+} __no_const;
 
 struct IO_APIC_route_entry;
 struct io_apic_irq_attr;
@@ -206,7 +206,7 @@ struct x86_io_apic_ops {
 				       unsigned int destination, int vector,
 				       struct io_apic_irq_attr *attr);
 	void		(*eoi_ioapic_pin)(int apic, int pin, int vector);
-};
+} __no_const;
 
 extern struct x86_init_ops x86_init;
 extern struct x86_cpuinit_ops x86_cpuinit;
diff -ruNp linux-3.13.11/arch/x86/include/asm/xen/page.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/xen/page.h
--- linux-3.13.11/arch/x86/include/asm/xen/page.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/xen/page.h	2014-07-09
12:00:15.000000000 +0200
@@ -56,7 +56,7 @@ extern int m2p_remove_override(struct pa
 extern struct page *m2p_find_override(unsigned long mfn);
 extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn);
 
-static inline unsigned long pfn_to_mfn(unsigned long pfn)
+static inline unsigned long __intentional_overflow(-1) pfn_to_mfn(unsigned long pfn)
 {
 	unsigned long mfn;
 
diff -ruNp linux-3.13.11/arch/x86/include/asm/xsave.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/xsave.h
--- linux-3.13.11/arch/x86/include/asm/xsave.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/asm/xsave.h	2014-07-09
12:00:15.000000000 +0200
@@ -70,8 +70,11 @@ static inline int xsave_user(struct xsav
 	if (unlikely(err))
 		return -EFAULT;
 
+	pax_open_userland();
 	__asm__ __volatile__(ASM_STAC "\n"
-			     "1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
+			     "1:"
+			     __copyuser_seg
+			     ".byte " REX_PREFIX "0x0f,0xae,0x27\n"
 			     "2: " ASM_CLAC "\n"
 			     ".section .fixup,\"ax\"\n"
 			     "3:  movl $-1,%[err]\n"
@@ -81,18 +84,22 @@ static inline int xsave_user(struct xsav
 			     : [err] "=r" (err)
 			     : "D" (buf), "a" (-1), "d" (-1), "0" (0)
 			     : "memory");
+	pax_close_userland();
 	return err;
 }
 
 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
 {
 	int err;
-	struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
+	struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
 	u32 lmask = mask;
 	u32 hmask = mask >> 32;
 
+	pax_open_userland();
 	__asm__ __volatile__(ASM_STAC "\n"
-			     "1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
+			     "1:"
+			     __copyuser_seg
+			     ".byte " REX_PREFIX "0x0f,0xae,0x2f\n"
 			     "2: " ASM_CLAC "\n"
 			     ".section .fixup,\"ax\"\n"
 			     "3:  movl $-1,%[err]\n"
@@ -102,6 +109,7 @@ static inline int xrestore_user(struct x
 			     : [err] "=r" (err)
 			     : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
 			     : "memory");	/* memory required? */
+	pax_close_userland();
 	return err;
 }
 
diff -ruNp linux-3.13.11/arch/x86/include/uapi/asm/e820.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/uapi/asm/e820.h
--- linux-3.13.11/arch/x86/include/uapi/asm/e820.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/uapi/asm/e820.h	2014-07-09
12:00:15.000000000 +0200
@@ -63,7 +63,7 @@ struct e820map {
 #define ISA_START_ADDRESS	0xa0000
 #define ISA_END_ADDRESS		0x100000
 
-#define BIOS_BEGIN		0x000a0000
+#define BIOS_BEGIN		0x000c0000
 #define BIOS_END		0x00100000
 
 #define BIOS_ROM_BASE		0xffe00000
diff -ruNp linux-3.13.11/arch/x86/include/uapi/asm/ptrace-abi.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/uapi/asm/ptrace-abi.h
--- linux-3.13.11/arch/x86/include/uapi/asm/ptrace-abi.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/include/uapi/asm/ptrace-abi.h	2014-07-09
12:00:15.000000000 +0200
@@ -49,7 +49,6 @@
 #define EFLAGS 144
 #define RSP 152
 #define SS 160
-#define ARGOFFSET R11
 #endif /* __ASSEMBLY__ */
 
 /* top of stack page */
diff -ruNp linux-3.13.11/arch/x86/kernel/Makefile linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/Makefile
--- linux-3.13.11/arch/x86/kernel/Makefile	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/Makefile	2014-07-09
12:00:15.000000000 +0200
@@ -24,7 +24,7 @@ obj-y			+= time.o ioport.o ldt.o dumpsta
 obj-y			+= setup.o x86_init.o i8259.o irqinit.o jump_label.o
 obj-$(CONFIG_IRQ_WORK)  += irq_work.o
 obj-y			+= probe_roms.o
-obj-$(CONFIG_X86_32)	+= i386_ksyms_32.o
+obj-$(CONFIG_X86_32)	+= sys_i386_32.o i386_ksyms_32.o
 obj-$(CONFIG_X86_64)	+= sys_x86_64.o x8664_ksyms_64.o
 obj-y			+= syscall_$(BITS).o
 obj-$(CONFIG_X86_64)	+= vsyscall_64.o
diff -ruNp linux-3.13.11/arch/x86/kernel/acpi/boot.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/acpi/boot.c
--- linux-3.13.11/arch/x86/kernel/acpi/boot.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/acpi/boot.c	2014-07-09
12:00:15.000000000 +0200
@@ -1315,7 +1315,7 @@ static int __init dmi_ignore_irq0_timer_
  * If your system is blacklisted here, but you find that acpi=force
  * works for you, please contact linux-acpi@vger.kernel.org
  */
-static struct dmi_system_id __initdata acpi_dmi_table[] = {
+static const struct dmi_system_id __initconst acpi_dmi_table[] = {
 	/*
 	 * Boxes that need ACPI disabled
 	 */
@@ -1390,7 +1390,7 @@ static struct dmi_system_id __initdata a
 };
 
 /* second table for DMI checks that should run after early-quirks */
-static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
+static const struct dmi_system_id __initconst acpi_dmi_table_late[] = {
 	/*
 	 * HP laptops which use a DSDT reporting as HP/SB400/10000,
 	 * which includes some code which overrides all temperature
diff -ruNp linux-3.13.11/arch/x86/kernel/acpi/sleep.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/acpi/sleep.c
--- linux-3.13.11/arch/x86/kernel/acpi/sleep.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/acpi/sleep.c	2014-07-09
12:00:15.000000000 +0200
@@ -99,8 +99,12 @@ int x86_acpi_suspend_lowlevel(void)
 #else /* CONFIG_64BIT */
 #ifdef CONFIG_SMP
 	stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
+
+	pax_open_kernel();
 	early_gdt_descr.address =
 			(unsigned long)get_cpu_gdt_table(smp_processor_id());
+	pax_close_kernel();
+
 	initial_gs = per_cpu_offset(smp_processor_id());
 #endif
 	initial_code = (unsigned long)wakeup_long64;
diff -ruNp linux-3.13.11/arch/x86/kernel/acpi/wakeup_32.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/acpi/wakeup_32.S
--- linux-3.13.11/arch/x86/kernel/acpi/wakeup_32.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/acpi/wakeup_32.S	2014-07-09
12:00:15.000000000 +0200
@@ -29,13 +29,11 @@ wakeup_pmode_return:
 	# and restore the stack ... but you need gdt for this to work
 	movl	saved_context_esp, %esp
 
-	movl	%cs:saved_magic, %eax
-	cmpl	$0x12345678, %eax
+	cmpl	$0x12345678, saved_magic
 	jne	bogus_magic
 
 	# jump to place where we left off
-	movl	saved_eip, %eax
-	jmp	*%eax
+	jmp	*(saved_eip)
 
 bogus_magic:
 	jmp	bogus_magic
diff -ruNp linux-3.13.11/arch/x86/kernel/alternative.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/alternative.c
--- linux-3.13.11/arch/x86/kernel/alternative.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/alternative.c	2014-07-09
12:00:15.000000000 +0200
@@ -269,6 +269,13 @@ void __init_or_module apply_alternatives
 	 */
 	for (a = start; a < end; a++) {
 		instr = (u8 *)&a->instr_offset + a->instr_offset;
+
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
+		instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
+		if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
+			instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
+#endif
+
 		replacement = (u8 *)&a->repl_offset + a->repl_offset;
 		BUG_ON(a->replacementlen > a->instrlen);
 		BUG_ON(a->instrlen > sizeof(insnbuf));
@@ -300,10 +307,16 @@ static void alternatives_smp_lock(const
 	for (poff = start; poff < end; poff++) {
 		u8 *ptr = (u8 *)poff + *poff;
 
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
+		ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
+		if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
+			ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
+#endif
+
 		if (!*poff || ptr < text || ptr >= text_end)
 			continue;
 		/* turn DS segment override prefix into lock prefix */
-		if (*ptr == 0x3e)
+		if (*ktla_ktva(ptr) == 0x3e)
 			text_poke(ptr, ((unsigned char []){0xf0}), 1);
 	}
 	mutex_unlock(&text_mutex);
@@ -318,10 +331,16 @@ static void alternatives_smp_unlock(cons
 	for (poff = start; poff < end; poff++) {
 		u8 *ptr = (u8 *)poff + *poff;
 
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
+		ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
+		if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
+			ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
+#endif
+
 		if (!*poff || ptr < text || ptr >= text_end)
 			continue;
 		/* turn lock prefix into DS segment override prefix */
-		if (*ptr == 0xf0)
+		if (*ktla_ktva(ptr) == 0xf0)
 			text_poke(ptr, ((unsigned char []){0x3E}), 1);
 	}
 	mutex_unlock(&text_mutex);
@@ -458,7 +477,7 @@ void __init_or_module apply_paravirt(str
 
 		BUG_ON(p->len > MAX_PATCH_LEN);
 		/* prep the buffer with the original instructions */
-		memcpy(insnbuf, p->instr, p->len);
+		memcpy(insnbuf, ktla_ktva(p->instr), p->len);
 		used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
 					 (unsigned long)p->instr, p->len);
 
@@ -505,7 +524,7 @@ void __init alternative_instructions(voi
 	if (!uniproc_patched || num_possible_cpus() == 1)
 		free_init_pages("SMP alternatives",
 				(unsigned long)__smp_locks,
-				(unsigned long)__smp_locks_end);
+				PAGE_ALIGN((unsigned long)__smp_locks_end));
 #endif
 
 	apply_paravirt(__parainstructions, __parainstructions_end);
@@ -525,13 +544,17 @@ void __init alternative_instructions(voi
  * instructions. And on the local CPU you need to be protected again NMI or MCE
  * handlers seeing an inconsistent instruction while you patch.
  */
-void *__init_or_module text_poke_early(void *addr, const void *opcode,
+void *__kprobes text_poke_early(void *addr, const void *opcode,
 					      size_t len)
 {
 	unsigned long flags;
 	local_irq_save(flags);
-	memcpy(addr, opcode, len);
+
+	pax_open_kernel();
+	memcpy(ktla_ktva(addr), opcode, len);
 	sync_core();
+	pax_close_kernel();
+
 	local_irq_restore(flags);
 	/* Could also do a CLFLUSH here to speed up CPU recovery; but
 	   that causes hangs on some VIA CPUs. */
@@ -553,36 +576,22 @@ void *__init_or_module text_poke_early(v
  */
 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
 {
-	unsigned long flags;
-	char *vaddr;
+	unsigned char *vaddr = ktla_ktva(addr);
 	struct page *pages[2];
-	int i;
+	size_t i;
 
 	if (!core_kernel_text((unsigned long)addr)) {
-		pages[0] = vmalloc_to_page(addr);
-		pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
+		pages[0] = vmalloc_to_page(vaddr);
+		pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
 	} else {
-		pages[0] = virt_to_page(addr);
+		pages[0] = virt_to_page(vaddr);
 		WARN_ON(!PageReserved(pages[0]));
-		pages[1] = virt_to_page(addr + PAGE_SIZE);
+		pages[1] = virt_to_page(vaddr + PAGE_SIZE);
 	}
 	BUG_ON(!pages[0]);
-	local_irq_save(flags);
-	set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
-	if (pages[1])
-		set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
-	vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
-	memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
-	clear_fixmap(FIX_TEXT_POKE0);
-	if (pages[1])
-		clear_fixmap(FIX_TEXT_POKE1);
-	local_flush_tlb();
-	sync_core();
-	/* Could also do a CLFLUSH here to speed up CPU recovery; but
-	   that causes hangs on some VIA CPUs. */
+	text_poke_early(addr, opcode, len);
 	for (i = 0; i < len; i++)
-		BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
-	local_irq_restore(flags);
+		BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
 	return addr;
 }
 
@@ -602,7 +611,7 @@ int poke_int3_handler(struct pt_regs *re
 	if (likely(!bp_patching_in_progress))
 		return 0;
 
-	if (user_mode_vm(regs) || regs->ip != (unsigned long)bp_int3_addr)
+	if (user_mode(regs) || regs->ip != (unsigned long)bp_int3_addr)
 		return 0;
 
 	/* set up the specified breakpoint handler */
@@ -636,7 +645,7 @@ int poke_int3_handler(struct pt_regs *re
  */
 void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
 {
-	unsigned char int3 = 0xcc;
+	const unsigned char int3 = 0xcc;
 
 	bp_int3_handler = handler;
 	bp_int3_addr = (u8 *)addr + sizeof(int3);
diff -ruNp linux-3.13.11/arch/x86/kernel/apic/apic.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/apic/apic.c
--- linux-3.13.11/arch/x86/kernel/apic/apic.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/apic/apic.c	2014-07-09
12:00:15.000000000 +0200
@@ -191,7 +191,7 @@ int first_system_vector = 0xfe;
 /*
  * Debug level, exported for io_apic.c
  */
-unsigned int apic_verbosity;
+int apic_verbosity;
 
 int pic_mode;
 
@@ -1986,7 +1986,7 @@ static inline void __smp_error_interrupt
 	apic_write(APIC_ESR, 0);
 	v1 = apic_read(APIC_ESR);
 	ack_APIC_irq();
-	atomic_inc(&irq_err_count);
+	atomic_inc_unchecked(&irq_err_count);
 
 	apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
 		    smp_processor_id(), v0 , v1);
diff -ruNp linux-3.13.11/arch/x86/kernel/apic/apic_flat_64.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/apic/apic_flat_64.c
--- linux-3.13.11/arch/x86/kernel/apic/apic_flat_64.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/apic/apic_flat_64.c	2014-07-09
12:00:15.000000000 +0200
@@ -157,7 +157,7 @@ static int flat_probe(void)
 	return 1;
 }
 
-static struct apic apic_flat =  {
+static struct apic apic_flat __read_only =  {
 	.name				= "flat",
 	.probe				= flat_probe,
 	.acpi_madt_oem_check		= flat_acpi_madt_oem_check,
@@ -271,7 +271,7 @@ static int physflat_probe(void)
 	return 0;
 }
 
-static struct apic apic_physflat =  {
+static struct apic apic_physflat __read_only =  {
 
 	.name				= "physical flat",
 	.probe				= physflat_probe,
diff -ruNp linux-3.13.11/arch/x86/kernel/apic/apic_noop.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/apic/apic_noop.c
--- linux-3.13.11/arch/x86/kernel/apic/apic_noop.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/apic/apic_noop.c	2014-07-09
12:00:15.000000000 +0200
@@ -119,7 +119,7 @@ static void noop_apic_write(u32 reg, u32
 	WARN_ON_ONCE(cpu_has_apic && !disable_apic);
 }
 
-struct apic apic_noop = {
+struct apic apic_noop __read_only = {
 	.name				= "noop",
 	.probe				= noop_probe,
 	.acpi_madt_oem_check		= NULL,
diff -ruNp linux-3.13.11/arch/x86/kernel/apic/bigsmp_32.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/apic/bigsmp_32.c
--- linux-3.13.11/arch/x86/kernel/apic/bigsmp_32.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/apic/bigsmp_32.c	2014-07-09
12:00:15.000000000 +0200
@@ -152,7 +152,7 @@ static int probe_bigsmp(void)
 	return dmi_bigsmp;
 }
 
-static struct apic apic_bigsmp = {
+static struct apic apic_bigsmp __read_only = {
 
 	.name				= "bigsmp",
 	.probe				= probe_bigsmp,
diff -ruNp linux-3.13.11/arch/x86/kernel/apic/es7000_32.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/apic/es7000_32.c
--- linux-3.13.11/arch/x86/kernel/apic/es7000_32.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/apic/es7000_32.c	2014-07-09
12:00:15.000000000 +0200
@@ -608,8 +608,7 @@ static int es7000_mps_oem_check_cluster(
 	return ret && es7000_apic_is_cluster();
 }
 
-/* We've been warned by a false positive warning.Use __refdata to keep calm. */
-static struct apic __refdata apic_es7000_cluster = {
+static struct apic apic_es7000_cluster __read_only = {
 
 	.name				= "es7000",
 	.probe				= probe_es7000,
@@ -675,7 +674,7 @@ static struct apic __refdata apic_es7000
 	.x86_32_early_logical_apicid	= es7000_early_logical_apicid,
 };
 
-static struct apic __refdata apic_es7000 = {
+static struct apic apic_es7000 __read_only = {
 
 	.name				= "es7000",
 	.probe				= probe_es7000,
diff -ruNp linux-3.13.11/arch/x86/kernel/apic/io_apic.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/apic/io_apic.c
--- linux-3.13.11/arch/x86/kernel/apic/io_apic.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/apic/io_apic.c	2014-07-09
12:00:15.000000000 +0200
@@ -1060,7 +1060,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
 }
 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
 
-void lock_vector_lock(void)
+void lock_vector_lock(void) __acquires(vector_lock)
 {
 	/* Used to the online set of cpus does not change
 	 * during assign_irq_vector.
@@ -1068,7 +1068,7 @@ void lock_vector_lock(void)
 	raw_spin_lock(&vector_lock);
 }
 
-void unlock_vector_lock(void)
+void unlock_vector_lock(void) __releases(vector_lock)
 {
 	raw_spin_unlock(&vector_lock);
 }
@@ -2367,7 +2367,7 @@ static void ack_apic_edge(struct irq_dat
 	ack_APIC_irq();
 }
 
-atomic_t irq_mis_count;
+atomic_unchecked_t irq_mis_count;
 
 #ifdef CONFIG_GENERIC_PENDING_IRQ
 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
@@ -2508,7 +2508,7 @@ static void ack_apic_level(struct irq_da
 	 * at the cpu.
 	 */
 	if (!(v & (1 << (i & 0x1f)))) {
-		atomic_inc(&irq_mis_count);
+		atomic_inc_unchecked(&irq_mis_count);
 
 		eoi_ioapic_irq(irq, cfg);
 	}
diff -ruNp linux-3.13.11/arch/x86/kernel/apic/numaq_32.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/apic/numaq_32.c
--- linux-3.13.11/arch/x86/kernel/apic/numaq_32.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/apic/numaq_32.c	2014-07-09
12:00:15.000000000 +0200
@@ -455,8 +455,7 @@ static void numaq_setup_portio_remap(voi
 		(u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD);
 }
 
-/* Use __refdata to keep false positive warning calm.  */
-static struct apic __refdata apic_numaq = {
+static struct apic apic_numaq __read_only = {
 
 	.name				= "NUMAQ",
 	.probe				= probe_numaq,
diff -ruNp linux-3.13.11/arch/x86/kernel/apic/probe_32.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/apic/probe_32.c
--- linux-3.13.11/arch/x86/kernel/apic/probe_32.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/apic/probe_32.c	2014-07-09
12:00:15.000000000 +0200
@@ -72,7 +72,7 @@ static int probe_default(void)
 	return 1;
 }
 
-static struct apic apic_default = {
+static struct apic apic_default __read_only = {
 
 	.name				= "default",
 	.probe				= probe_default,
diff -ruNp linux-3.13.11/arch/x86/kernel/apic/summit_32.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/apic/summit_32.c
--- linux-3.13.11/arch/x86/kernel/apic/summit_32.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/apic/summit_32.c	2014-07-09
12:00:15.000000000 +0200
@@ -486,7 +486,7 @@ void setup_summit(void)
 }
 #endif
 
-static struct apic apic_summit = {
+static struct apic apic_summit __read_only = {
 
 	.name				= "summit",
 	.probe				= probe_summit,
diff -ruNp linux-3.13.11/arch/x86/kernel/apic/x2apic_cluster.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/apic/x2apic_cluster.c
--- linux-3.13.11/arch/x86/kernel/apic/x2apic_cluster.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/apic/x2apic_cluster.c	2014-07-09
12:00:15.000000000 +0200
@@ -183,7 +183,7 @@ update_clusterinfo(struct notifier_block
 	return notifier_from_errno(err);
 }
 
-static struct notifier_block __refdata x2apic_cpu_notifier = {
+static struct notifier_block x2apic_cpu_notifier = {
 	.notifier_call = update_clusterinfo,
 };
 
@@ -235,7 +235,7 @@ static void cluster_vector_allocation_do
 		cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
 }
 
-static struct apic apic_x2apic_cluster = {
+static struct apic apic_x2apic_cluster __read_only = {
 
 	.name				= "cluster x2apic",
 	.probe				= x2apic_cluster_probe,
diff -ruNp linux-3.13.11/arch/x86/kernel/apic/x2apic_phys.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/apic/x2apic_phys.c
--- linux-3.13.11/arch/x86/kernel/apic/x2apic_phys.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/apic/x2apic_phys.c	2014-07-09
12:00:15.000000000 +0200
@@ -89,7 +89,7 @@ static int x2apic_phys_probe(void)
 	return apic == &apic_x2apic_phys;
 }
 
-static struct apic apic_x2apic_phys = {
+static struct apic apic_x2apic_phys __read_only = {
 
 	.name				= "physical x2apic",
 	.probe				= x2apic_phys_probe,
diff -ruNp linux-3.13.11/arch/x86/kernel/apic/x2apic_uv_x.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/apic/x2apic_uv_x.c
--- linux-3.13.11/arch/x86/kernel/apic/x2apic_uv_x.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/apic/x2apic_uv_x.c	2014-07-09
12:00:15.000000000 +0200
@@ -350,7 +350,7 @@ static int uv_probe(void)
 	return apic == &apic_x2apic_uv_x;
 }
 
-static struct apic __refdata apic_x2apic_uv_x = {
+static struct apic apic_x2apic_uv_x __read_only = {
 
 	.name				= "UV large system",
 	.probe				= uv_probe,
diff -ruNp linux-3.13.11/arch/x86/kernel/apm_32.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/apm_32.c
--- linux-3.13.11/arch/x86/kernel/apm_32.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/apm_32.c	2014-07-09
12:00:15.000000000 +0200
@@ -433,7 +433,7 @@ static DEFINE_MUTEX(apm_mutex);
  * This is for buggy BIOS's that refer to (real mode) segment 0x40
  * even though they are called in protected mode.
  */
-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
 			(unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
 
 static const char driver_version[] = "1.16ac";	/* no spaces */
@@ -611,7 +611,10 @@ static long __apm_bios_call(void *_call)
 	BUG_ON(cpu != 0);
 	gdt = get_cpu_gdt_table(cpu);
 	save_desc_40 = gdt[0x40 / 8];
+
+	pax_open_kernel();
 	gdt[0x40 / 8] = bad_bios_desc;
+	pax_close_kernel();
 
 	apm_irq_save(flags);
 	APM_DO_SAVE_SEGS;
@@ -620,7 +623,11 @@ static long __apm_bios_call(void *_call)
 			  &call->esi);
 	APM_DO_RESTORE_SEGS;
 	apm_irq_restore(flags);
+
+	pax_open_kernel();
 	gdt[0x40 / 8] = save_desc_40;
+	pax_close_kernel();
+
 	put_cpu();
 
 	return call->eax & 0xff;
@@ -687,7 +694,10 @@ static long __apm_bios_call_simple(void
 	BUG_ON(cpu != 0);
 	gdt = get_cpu_gdt_table(cpu);
 	save_desc_40 = gdt[0x40 / 8];
+
+	pax_open_kernel();
 	gdt[0x40 / 8] = bad_bios_desc;
+	pax_close_kernel();
 
 	apm_irq_save(flags);
 	APM_DO_SAVE_SEGS;
@@ -695,7 +705,11 @@ static long __apm_bios_call_simple(void
 					 &call->eax);
 	APM_DO_RESTORE_SEGS;
 	apm_irq_restore(flags);
+
+	pax_open_kernel();
 	gdt[0x40 / 8] = save_desc_40;
+	pax_close_kernel();
+
 	put_cpu();
 	return error;
 }
@@ -2362,12 +2376,15 @@ static int __init apm_init(void)
 	 * code to that CPU.
 	 */
 	gdt = get_cpu_gdt_table(0);
+
+	pax_open_kernel();
 	set_desc_base(&gdt[APM_CS >> 3],
 		 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
 	set_desc_base(&gdt[APM_CS_16 >> 3],
 		 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
 	set_desc_base(&gdt[APM_DS >> 3],
 		 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
+	pax_close_kernel();
 
 	proc_create("apm", 0, NULL, &apm_file_ops);
 
diff -ruNp linux-3.13.11/arch/x86/kernel/asm-offsets.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/asm-offsets.c
--- linux-3.13.11/arch/x86/kernel/asm-offsets.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/asm-offsets.c	2014-07-09
12:00:15.000000000 +0200
@@ -32,6 +32,8 @@ void common(void) {
 	OFFSET(TI_flags, thread_info, flags);
 	OFFSET(TI_status, thread_info, status);
 	OFFSET(TI_addr_limit, thread_info, addr_limit);
+	OFFSET(TI_lowest_stack, thread_info, lowest_stack);
+	DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct
task_struct, tinfo));
 
 	BLANK();
 	OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
@@ -52,8 +54,26 @@ void common(void) {
 	OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
 	OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
 	OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
+
+#ifdef CONFIG_PAX_KERNEXEC
+	OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
+#endif
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+	OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
+	OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
+#ifdef CONFIG_X86_64
+	OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
+#endif
 #endif
 
+#endif
+
+	BLANK();
+	DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
+	DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
+	DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
+
 #ifdef CONFIG_XEN
 	BLANK();
 	OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
diff -ruNp linux-3.13.11/arch/x86/kernel/asm-offsets_64.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/asm-offsets_64.c
--- linux-3.13.11/arch/x86/kernel/asm-offsets_64.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/asm-offsets_64.c	2014-07-09
12:00:15.000000000 +0200
@@ -77,6 +77,7 @@ int main(void)
 	BLANK();
 #undef ENTRY
 
+	DEFINE(TSS_size, sizeof(struct tss_struct));
 	OFFSET(TSS_ist, tss_struct, x86_tss.ist);
 	BLANK();
 
diff -ruNp linux-3.13.11/arch/x86/kernel/cpu/Makefile linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/cpu/Makefile
--- linux-3.13.11/arch/x86/kernel/cpu/Makefile	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/cpu/Makefile	2014-07-09
12:00:15.000000000 +0200
@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
 CFLAGS_REMOVE_perf_event.o = -pg
 endif
 
-# Make sure load_percpu_segment has no stackprotector
-nostackp := $(call cc-option, -fno-stack-protector)
-CFLAGS_common.o		:= $(nostackp)
-
 obj-y			:= intel_cacheinfo.o scattered.o topology.o
 obj-y			+= proc.o capflags.o powerflags.o common.o
 obj-y			+= rdrand.o
diff -ruNp linux-3.13.11/arch/x86/kernel/cpu/amd.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/cpu/amd.c
--- linux-3.13.11/arch/x86/kernel/cpu/amd.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/cpu/amd.c	2014-07-09
12:00:15.000000000 +0200
@@ -753,7 +753,7 @@ static void init_amd(struct cpuinfo_x86
 static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
 {
 	/* AMD errata T13 (order #21922) */
-	if ((c->x86 == 6)) {
+	if (c->x86 == 6) {
 		/* Duron Rev A0 */
 		if (c->x86_model == 3 && c->x86_mask == 0)
 			size = 64;
diff -ruNp linux-3.13.11/arch/x86/kernel/cpu/common.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/cpu/common.c
--- linux-3.13.11/arch/x86/kernel/cpu/common.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/cpu/common.c	2014-07-09
12:00:15.000000000 +0200
@@ -88,60 +88,6 @@ static const struct cpu_dev default_cpu
 
 static const struct cpu_dev *this_cpu = &default_cpu;
 
-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
-#ifdef CONFIG_X86_64
-	/*
-	 * We need valid kernel segments for data and code in long mode too
-	 * IRET will check the segment types  kkeil 2000/10/28
-	 * Also sysret mandates a special GDT layout
-	 *
-	 * TLS descriptors are currently at a different place compared to i386.
-	 * Hopefully nobody expects them at a fixed place (Wine?)
-	 */
-	[GDT_ENTRY_KERNEL32_CS]		= GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
-	[GDT_ENTRY_KERNEL_CS]		= GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
-	[GDT_ENTRY_KERNEL_DS]		= GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
-	[GDT_ENTRY_DEFAULT_USER32_CS]	= GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
-	[GDT_ENTRY_DEFAULT_USER_DS]	= GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
-	[GDT_ENTRY_DEFAULT_USER_CS]	= GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
-#else
-	[GDT_ENTRY_KERNEL_CS]		= GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
-	[GDT_ENTRY_KERNEL_DS]		= GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
-	[GDT_ENTRY_DEFAULT_USER_CS]	= GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
-	[GDT_ENTRY_DEFAULT_USER_DS]	= GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
-	/*
-	 * Segments used for calling PnP BIOS have byte granularity.
-	 * They code segments and data segments have fixed 64k limits,
-	 * the transfer segment sizes are set at run time.
-	 */
-	/* 32-bit code */
-	[GDT_ENTRY_PNPBIOS_CS32]	= GDT_ENTRY_INIT(0x409a, 0, 0xffff),
-	/* 16-bit code */
-	[GDT_ENTRY_PNPBIOS_CS16]	= GDT_ENTRY_INIT(0x009a, 0, 0xffff),
-	/* 16-bit data */
-	[GDT_ENTRY_PNPBIOS_DS]		= GDT_ENTRY_INIT(0x0092, 0, 0xffff),
-	/* 16-bit data */
-	[GDT_ENTRY_PNPBIOS_TS1]		= GDT_ENTRY_INIT(0x0092, 0, 0),
-	/* 16-bit data */
-	[GDT_ENTRY_PNPBIOS_TS2]		= GDT_ENTRY_INIT(0x0092, 0, 0),
-	/*
-	 * The APM segments have byte granularity and their bases
-	 * are set at run time.  All have 64k limits.
-	 */
-	/* 32-bit code */
-	[GDT_ENTRY_APMBIOS_BASE]	= GDT_ENTRY_INIT(0x409a, 0, 0xffff),
-	/* 16-bit code */
-	[GDT_ENTRY_APMBIOS_BASE+1]	= GDT_ENTRY_INIT(0x009a, 0, 0xffff),
-	/* data */
-	[GDT_ENTRY_APMBIOS_BASE+2]	= GDT_ENTRY_INIT(0x4092, 0, 0xffff),
-
-	[GDT_ENTRY_ESPFIX_SS]		= GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
-	[GDT_ENTRY_PERCPU]		= GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
-	GDT_STACK_CANARY_INIT
-#endif
-} };
-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
-
 static int __init x86_xsave_setup(char *s)
 {
 	setup_clear_cpu_cap(X86_FEATURE_XSAVE);
@@ -293,6 +239,59 @@ static __always_inline void setup_smap(s
 	}
 }
 
+#ifdef CONFIG_X86_64
+static __init int setup_disable_pcid(char *arg)
+{
+	setup_clear_cpu_cap(X86_FEATURE_PCID);
+	setup_clear_cpu_cap(X86_FEATURE_INVPCID);
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+	if (clone_pgd_mask != ~(pgdval_t)0UL)
+		pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
+#endif
+
+	return 1;
+}
+__setup("nopcid", setup_disable_pcid);
+
+static void setup_pcid(struct cpuinfo_x86 *c)
+{
+	if (!cpu_has(c, X86_FEATURE_PCID)) {
+		clear_cpu_cap(c, X86_FEATURE_INVPCID);
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+		if (clone_pgd_mask != ~(pgdval_t)0UL) {
+			pax_open_kernel();
+			pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
+			pax_close_kernel();
+			printk("PAX: slow and weak UDEREF enabled\n");
+		} else
+			printk("PAX: UDEREF disabled\n");
+#endif
+
+		return;
+	}
+
+	printk("PAX: PCID detected\n");
+	set_in_cr4(X86_CR4_PCIDE);
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+	pax_open_kernel();
+	clone_pgd_mask = ~(pgdval_t)0UL;
+	pax_close_kernel();
+	if (pax_user_shadow_base)
+		printk("PAX: weak UDEREF enabled\n");
+	else {
+		set_cpu_cap(c, X86_FEATURE_STRONGUDEREF);
+		printk("PAX: strong UDEREF enabled\n");
+	}
+#endif
+
+	if (cpu_has(c, X86_FEATURE_INVPCID))
+		printk("PAX: INVPCID detected\n");
+}
+#endif
+
 /*
  * Some CPU features depend on higher CPUID levels, which may not always
  * be available due to CPUID level capping or broken virtualization
@@ -393,7 +392,7 @@ void switch_to_new_gdt(int cpu)
 {
 	struct desc_ptr gdt_descr;
 
-	gdt_descr.address = (long)get_cpu_gdt_table(cpu);
+	gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
 	gdt_descr.size = GDT_SIZE - 1;
 	load_gdt(&gdt_descr);
 	/* Reload the per-cpu base */
@@ -882,6 +881,10 @@ static void identify_cpu(struct cpuinfo_
 	setup_smep(c);
 	setup_smap(c);
 
+#ifdef CONFIG_X86_64
+	setup_pcid(c);
+#endif
+
 	/*
 	 * The vendor-specific functions might have changed features.
 	 * Now we do "generic changes."
@@ -890,6 +893,10 @@ static void identify_cpu(struct cpuinfo_
 	/* Filter out anything that depends on CPUID levels we don't have */
 	filter_cpuid_features(c, true);
 
+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC)
|| defined(CONFIG_PAX_MEMORY_UDEREF))
+	setup_clear_cpu_cap(X86_FEATURE_SEP);
+#endif
+
 	/* If the model name is still unset, do table lookup. */
 	if (!c->x86_model_id[0]) {
 		const char *p;
@@ -1077,10 +1084,12 @@ static __init int setup_disablecpuid(cha
 }
 __setup("clearcpuid=", setup_disablecpuid);
 
+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
+EXPORT_PER_CPU_SYMBOL(current_tinfo);
+
 #ifdef CONFIG_X86_64
-struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
-struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1,
-				    (unsigned long) debug_idt_table };
+struct desc_ptr idt_descr __read_only = { NR_VECTORS * 16 - 1, (unsigned long) idt_table
};
+const struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) debug_idt_table
};
 
 DEFINE_PER_CPU_FIRST(union irq_stack_union,
 		     irq_stack_union) __aligned(PAGE_SIZE) __visible;
@@ -1094,7 +1103,7 @@ DEFINE_PER_CPU(struct task_struct *, cur
 EXPORT_PER_CPU_SYMBOL(current_task);
 
 DEFINE_PER_CPU(unsigned long, kernel_stack) =
-	(unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
+	(unsigned long)&init_thread_union - 16 + THREAD_SIZE;
 EXPORT_PER_CPU_SYMBOL(kernel_stack);
 
 DEFINE_PER_CPU(char *, irq_stack_ptr) =
@@ -1244,7 +1253,7 @@ void cpu_init(void)
 	load_ucode_ap();
 
 	cpu = stack_smp_processor_id();
-	t = &per_cpu(init_tss, cpu);
+	t = init_tss + cpu;
 	oist = &per_cpu(orig_ist, cpu);
 
 #ifdef CONFIG_NUMA
@@ -1279,7 +1288,6 @@ void cpu_init(void)
 	wrmsrl(MSR_KERNEL_GS_BASE, 0);
 	barrier();
 
-	x86_configure_nx();
 	enable_x2apic();
 
 	/*
@@ -1331,7 +1339,7 @@ void cpu_init(void)
 {
 	int cpu = smp_processor_id();
 	struct task_struct *curr = current;
-	struct tss_struct *t = &per_cpu(init_tss, cpu);
+	struct tss_struct *t = init_tss + cpu;
 	struct thread_struct *thread = &curr->thread;
 
 	show_ucode_info_early();
diff -ruNp linux-3.13.11/arch/x86/kernel/cpu/intel_cacheinfo.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/cpu/intel_cacheinfo.c
--- linux-3.13.11/arch/x86/kernel/cpu/intel_cacheinfo.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/cpu/intel_cacheinfo.c	2014-07-09
12:00:15.000000000 +0200
@@ -1014,6 +1014,22 @@ static struct attribute *default_attrs[]
 };
 
 #ifdef CONFIG_AMD_NB
+static struct attribute *default_attrs_amd_nb[] = {
+	&type.attr,
+	&level.attr,
+	&coherency_line_size.attr,
+	&physical_line_partition.attr,
+	&ways_of_associativity.attr,
+	&number_of_sets.attr,
+	&size.attr,
+	&shared_cpu_map.attr,
+	&shared_cpu_list.attr,
+	NULL,
+	NULL,
+	NULL,
+	NULL
+};
+
 static struct attribute **amd_l3_attrs(void)
 {
 	static struct attribute **attrs;
@@ -1024,18 +1040,7 @@ static struct attribute **amd_l3_attrs(v
 
 	n = ARRAY_SIZE(default_attrs);
 
-	if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
-		n += 2;
-
-	if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
-		n += 1;
-
-	attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
-	if (attrs == NULL)
-		return attrs = default_attrs;
-
-	for (n = 0; default_attrs[n]; n++)
-		attrs[n] = default_attrs[n];
+	attrs = default_attrs_amd_nb;
 
 	if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
 		attrs[n++] = &cache_disable_0.attr;
@@ -1086,6 +1091,13 @@ static struct kobj_type ktype_cache = {
 	.default_attrs	= default_attrs,
 };
 
+#ifdef CONFIG_AMD_NB
+static struct kobj_type ktype_cache_amd_nb = {
+	.sysfs_ops	= &sysfs_ops,
+	.default_attrs	= default_attrs_amd_nb,
+};
+#endif
+
 static struct kobj_type ktype_percpu_entry = {
 	.sysfs_ops	= &sysfs_ops,
 };
@@ -1151,20 +1163,26 @@ static int cache_add_dev(struct device *
 		return retval;
 	}
 
+#ifdef CONFIG_AMD_NB
+	amd_l3_attrs();
+#endif
+
 	for (i = 0; i < num_cache_leaves; i++) {
+		struct kobj_type *ktype;
+
 		this_object = INDEX_KOBJECT_PTR(cpu, i);
 		this_object->cpu = cpu;
 		this_object->index = i;
 
 		this_leaf = CPUID4_INFO_IDX(cpu, i);
 
-		ktype_cache.default_attrs = default_attrs;
+		ktype = &ktype_cache;
 #ifdef CONFIG_AMD_NB
 		if (this_leaf->base.nb)
-			ktype_cache.default_attrs = amd_l3_attrs();
+			ktype = &ktype_cache_amd_nb;
 #endif
 		retval = kobject_init_and_add(&(this_object->kobj),
-					      &ktype_cache,
+					      ktype,
 					      per_cpu(ici_cache_kobject, cpu),
 					      "index%1lu", i);
 		if (unlikely(retval)) {
diff -ruNp linux-3.13.11/arch/x86/kernel/cpu/mcheck/mce.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/cpu/mcheck/mce.c
--- linux-3.13.11/arch/x86/kernel/cpu/mcheck/mce.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/cpu/mcheck/mce.c	2014-07-09
12:00:15.000000000 +0200
@@ -45,6 +45,7 @@
 #include <asm/processor.h>
 #include <asm/mce.h>
 #include <asm/msr.h>
+#include <asm/local.h>
 
 #include "mce-internal.h"
 
@@ -258,7 +259,7 @@ static void print_mce(struct mce *m)
 			!(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
 				m->cs, m->ip);
 
-		if (m->cs == __KERNEL_CS)
+		if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
 			print_symbol("{%s}", m->ip);
 		pr_cont("\n");
 	}
@@ -291,10 +292,10 @@ static void print_mce(struct mce *m)
 
 #define PANIC_TIMEOUT 5 /* 5 seconds */
 
-static atomic_t mce_paniced;
+static atomic_unchecked_t mce_paniced;
 
 static int fake_panic;
-static atomic_t mce_fake_paniced;
+static atomic_unchecked_t mce_fake_paniced;
 
 /* Panic in progress. Enable interrupts and wait for final IPI */
 static void wait_for_panic(void)
@@ -318,7 +319,7 @@ static void mce_panic(char *msg, struct
 		/*
 		 * Make sure only one CPU runs in machine check panic
 		 */
-		if (atomic_inc_return(&mce_paniced) > 1)
+		if (atomic_inc_return_unchecked(&mce_paniced) > 1)
 			wait_for_panic();
 		barrier();
 
@@ -326,7 +327,7 @@ static void mce_panic(char *msg, struct
 		console_verbose();
 	} else {
 		/* Don't log too much for fake panic */
-		if (atomic_inc_return(&mce_fake_paniced) > 1)
+		if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
 			return;
 	}
 	/* First print corrected ones that are still unlogged */
@@ -365,7 +366,7 @@ static void mce_panic(char *msg, struct
 	if (!fake_panic) {
 		if (panic_timeout == 0)
 			panic_timeout = mca_cfg.panic_timeout;
-		panic(msg);
+		panic("%s", msg);
 	} else
 		pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
 }
@@ -695,7 +696,7 @@ static int mce_timed_out(u64 *t)
 	 * might have been modified by someone else.
 	 */
 	rmb();
-	if (atomic_read(&mce_paniced))
+	if (atomic_read_unchecked(&mce_paniced))
 		wait_for_panic();
 	if (!mca_cfg.monarch_timeout)
 		goto out;
@@ -1666,7 +1667,7 @@ static void unexpected_machine_check(str
 }
 
 /* Call the installed machine check handler for this CPU setup. */
-void (*machine_check_vector)(struct pt_regs *, long error_code) =
+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
 						unexpected_machine_check;
 
 /*
@@ -1689,7 +1690,9 @@ void mcheck_cpu_init(struct cpuinfo_x86
 		return;
 	}
 
+	pax_open_kernel();
 	machine_check_vector = do_machine_check;
+	pax_close_kernel();
 
 	__mcheck_cpu_init_generic();
 	__mcheck_cpu_init_vendor(c);
@@ -1703,7 +1706,7 @@ void mcheck_cpu_init(struct cpuinfo_x86
  */
 
 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
-static int mce_chrdev_open_count;	/* #times opened */
+static local_t mce_chrdev_open_count;	/* #times opened */
 static int mce_chrdev_open_exclu;	/* already open exclusive? */
 
 static int mce_chrdev_open(struct inode *inode, struct file *file)
@@ -1711,7 +1714,7 @@ static int mce_chrdev_open(struct inode
 	spin_lock(&mce_chrdev_state_lock);
 
 	if (mce_chrdev_open_exclu ||
-	    (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
+	    (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
 		spin_unlock(&mce_chrdev_state_lock);
 
 		return -EBUSY;
@@ -1719,7 +1722,7 @@ static int mce_chrdev_open(struct inode
 
 	if (file->f_flags & O_EXCL)
 		mce_chrdev_open_exclu = 1;
-	mce_chrdev_open_count++;
+	local_inc(&mce_chrdev_open_count);
 
 	spin_unlock(&mce_chrdev_state_lock);
 
@@ -1730,7 +1733,7 @@ static int mce_chrdev_release(struct ino
 {
 	spin_lock(&mce_chrdev_state_lock);
 
-	mce_chrdev_open_count--;
+	local_dec(&mce_chrdev_open_count);
 	mce_chrdev_open_exclu = 0;
 
 	spin_unlock(&mce_chrdev_state_lock);
@@ -2404,7 +2407,7 @@ static __init void mce_init_banks(void)
 
 	for (i = 0; i < mca_cfg.banks; i++) {
 		struct mce_bank *b = &mce_banks[i];
-		struct device_attribute *a = &b->attr;
+		device_attribute_no_const *a = &b->attr;
 
 		sysfs_attr_init(&a->attr);
 		a->attr.name	= b->attrname;
@@ -2472,7 +2475,7 @@ struct dentry *mce_get_debugfs_dir(void)
 static void mce_reset(void)
 {
 	cpu_missing = 0;
-	atomic_set(&mce_fake_paniced, 0);
+	atomic_set_unchecked(&mce_fake_paniced, 0);
 	atomic_set(&mce_executing, 0);
 	atomic_set(&mce_callin, 0);
 	atomic_set(&global_nwo, 0);
diff -ruNp linux-3.13.11/arch/x86/kernel/cpu/mcheck/p5.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/cpu/mcheck/p5.c
--- linux-3.13.11/arch/x86/kernel/cpu/mcheck/p5.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/cpu/mcheck/p5.c	2014-07-09
12:00:15.000000000 +0200
@@ -11,6 +11,7 @@
 #include <asm/processor.h>
 #include <asm/mce.h>
 #include <asm/msr.h>
+#include <asm/pgtable.h>
 
 /* By default disabled */
 int mce_p5_enabled __read_mostly;
@@ -49,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo
 	if (!cpu_has(c, X86_FEATURE_MCE))
 		return;
 
+	pax_open_kernel();
 	machine_check_vector = pentium_machine_check;
+	pax_close_kernel();
 	/* Make sure the vector pointer is visible before we enable MCEs: */
 	wmb();
 
diff -ruNp linux-3.13.11/arch/x86/kernel/cpu/mcheck/winchip.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/cpu/mcheck/winchip.c
--- linux-3.13.11/arch/x86/kernel/cpu/mcheck/winchip.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/cpu/mcheck/winchip.c	2014-07-09
12:00:15.000000000 +0200
@@ -10,6 +10,7 @@
 #include <asm/processor.h>
 #include <asm/mce.h>
 #include <asm/msr.h>
+#include <asm/pgtable.h>
 
 /* Machine check handler for WinChip C6: */
 static void winchip_machine_check(struct pt_regs *regs, long error_code)
@@ -23,7 +24,9 @@ void winchip_mcheck_init(struct cpuinfo_
 {
 	u32 lo, hi;
 
+	pax_open_kernel();
 	machine_check_vector = winchip_machine_check;
+	pax_close_kernel();
 	/* Make sure the vector pointer is visible before we enable MCEs: */
 	wmb();
 
diff -ruNp linux-3.13.11/arch/x86/kernel/cpu/mtrr/main.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/cpu/mtrr/main.c
--- linux-3.13.11/arch/x86/kernel/cpu/mtrr/main.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/cpu/mtrr/main.c	2014-07-09
12:00:15.000000000 +0200
@@ -66,7 +66,7 @@ static DEFINE_MUTEX(mtrr_mutex);
 u64 size_or_mask, size_and_mask;
 static bool mtrr_aps_delayed_init;
 
-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
 
 const struct mtrr_ops *mtrr_if;
 
diff -ruNp linux-3.13.11/arch/x86/kernel/cpu/mtrr/mtrr.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/cpu/mtrr/mtrr.h
--- linux-3.13.11/arch/x86/kernel/cpu/mtrr/mtrr.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/cpu/mtrr/mtrr.h	2014-07-09
12:00:15.000000000 +0200
@@ -25,7 +25,7 @@ struct mtrr_ops {
 	int	(*validate_add_page)(unsigned long base, unsigned long size,
 				     unsigned int type);
 	int	(*have_wrcomb)(void);
-};
+} __do_const;
 
 extern int generic_get_free_region(unsigned long base, unsigned long size,
 				   int replace_reg);
diff -ruNp linux-3.13.11/arch/x86/kernel/cpu/perf_event.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/cpu/perf_event.c
--- linux-3.13.11/arch/x86/kernel/cpu/perf_event.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/cpu/perf_event.c	2014-07-09
12:00:15.000000000 +0200
@@ -1351,7 +1351,7 @@ static void __init pmu_check_apic(void)
 	pr_info("no hardware sampling interrupt available.\n");
 }
 
-static struct attribute_group x86_pmu_format_group = {
+static attribute_group_no_const x86_pmu_format_group = {
 	.name = "format",
 	.attrs = NULL,
 };
@@ -1450,7 +1450,7 @@ static struct attribute *events_attr[] =
 	NULL,
 };
 
-static struct attribute_group x86_pmu_events_group = {
+static attribute_group_no_const x86_pmu_events_group = {
 	.name = "events",
 	.attrs = events_attr,
 };
@@ -1961,7 +1961,7 @@ static unsigned long get_segment_base(un
 		if (idx > GDT_ENTRIES)
 			return 0;
 
-		desc = __this_cpu_ptr(&gdt_page.gdt[0]);
+		desc = get_cpu_gdt_table(smp_processor_id());
 	}
 
 	return get_desc_base(desc + idx);
@@ -2051,7 +2051,7 @@ perf_callchain_user(struct perf_callchai
 			break;
 
 		perf_callchain_store(entry, frame.return_address);
-		fp = frame.next_frame;
+		fp = (const void __force_user *)frame.next_frame;
 	}
 }
 
diff -ruNp linux-3.13.11/arch/x86/kernel/cpu/perf_event_amd_iommu.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/cpu/perf_event_amd_iommu.c
--- linux-3.13.11/arch/x86/kernel/cpu/perf_event_amd_iommu.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/cpu/perf_event_amd_iommu.c	2014-07-09
12:00:15.000000000 +0200
@@ -405,7 +405,7 @@ static void perf_iommu_del(struct perf_e
 static __init int _init_events_attrs(struct perf_amd_iommu *perf_iommu)
 {
 	struct attribute **attrs;
-	struct attribute_group *attr_group;
+	attribute_group_no_const *attr_group;
 	int i = 0, j;
 
 	while (amd_iommu_v2_event_descs[i].attr.attr.name)
diff -ruNp linux-3.13.11/arch/x86/kernel/cpu/perf_event_intel.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/cpu/perf_event_intel.c
--- linux-3.13.11/arch/x86/kernel/cpu/perf_event_intel.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/cpu/perf_event_intel.c	2014-07-09
12:00:15.000000000 +0200
@@ -2314,10 +2314,10 @@ __init int intel_pmu_init(void)
 	 * v2 and above have a perf capabilities MSR
 	 */
 	if (version > 1) {
-		u64 capabilities;
+		u64 capabilities = x86_pmu.intel_cap.capabilities;
 
-		rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
-		x86_pmu.intel_cap.capabilities = capabilities;
+		if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities))
+			x86_pmu.intel_cap.capabilities = capabilities;
 	}
 
 	intel_ds_init();
diff -ruNp linux-3.13.11/arch/x86/kernel/cpu/perf_event_intel_uncore.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/cpu/perf_event_intel_uncore.c
--- linux-3.13.11/arch/x86/kernel/cpu/perf_event_intel_uncore.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/cpu/perf_event_intel_uncore.c	2014-07-09
12:00:15.000000000 +0200
@@ -3318,7 +3318,7 @@ static void __init uncore_types_exit(str
 static int __init uncore_type_init(struct intel_uncore_type *type)
 {
 	struct intel_uncore_pmu *pmus;
-	struct attribute_group *attr_group;
+	attribute_group_no_const *attr_group;
 	struct attribute **attrs;
 	int i, j;
 
diff -ruNp linux-3.13.11/arch/x86/kernel/cpu/perf_event_intel_uncore.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/cpu/perf_event_intel_uncore.h
--- linux-3.13.11/arch/x86/kernel/cpu/perf_event_intel_uncore.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/cpu/perf_event_intel_uncore.h	2014-07-09
12:00:15.000000000 +0200
@@ -498,7 +498,7 @@ struct intel_uncore_box {
 struct uncore_event_desc {
 	struct kobj_attribute attr;
 	const char *config;
-};
+} __do_const;
 
 #define INTEL_UNCORE_EVENT_DESC(_name, _config)			\
 {								\
diff -ruNp linux-3.13.11/arch/x86/kernel/cpuid.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/cpuid.c
--- linux-3.13.11/arch/x86/kernel/cpuid.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/cpuid.c	2014-07-09
12:00:15.000000000 +0200
@@ -170,7 +170,7 @@ static int cpuid_class_cpu_callback(stru
 	return notifier_from_errno(err);
 }
 
-static struct notifier_block __refdata cpuid_class_cpu_notifier =
+static struct notifier_block cpuid_class_cpu_notifier =
 {
 	.notifier_call = cpuid_class_cpu_callback,
 };
diff -ruNp linux-3.13.11/arch/x86/kernel/crash.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/crash.c
--- linux-3.13.11/arch/x86/kernel/crash.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/crash.c	2014-07-09
12:00:15.000000000 +0200
@@ -58,10 +58,8 @@ static void kdump_nmi_callback(int cpu,
 {
 #ifdef CONFIG_X86_32
 	struct pt_regs fixed_regs;
-#endif
 
-#ifdef CONFIG_X86_32
-	if (!user_mode_vm(regs)) {
+	if (!user_mode(regs)) {
 		crash_fixup_ss_esp(&fixed_regs, regs);
 		regs = &fixed_regs;
 	}
diff -ruNp linux-3.13.11/arch/x86/kernel/crash_dump_64.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/crash_dump_64.c
--- linux-3.13.11/arch/x86/kernel/crash_dump_64.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/crash_dump_64.c	2014-07-09
12:00:15.000000000 +0200
@@ -36,7 +36,7 @@ ssize_t copy_oldmem_page(unsigned long p
 		return -ENOMEM;
 
 	if (userbuf) {
-		if (copy_to_user(buf, vaddr + offset, csize)) {
+		if (copy_to_user((char __force_user *)buf, vaddr + offset, csize)) {
 			iounmap(vaddr);
 			return -EFAULT;
 		}
diff -ruNp linux-3.13.11/arch/x86/kernel/doublefault.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/doublefault.c
--- linux-3.13.11/arch/x86/kernel/doublefault.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/doublefault.c	2014-07-09
12:00:15.000000000 +0200
@@ -13,7 +13,7 @@
 
 #define DOUBLEFAULT_STACKSIZE (1024)
 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
 
 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
 
@@ -23,7 +23,7 @@ static void doublefault_fn(void)
 	unsigned long gdt, tss;
 
 	native_store_gdt(&gdt_desc);
-	gdt = gdt_desc.address;
+	gdt = (unsigned long)gdt_desc.address;
 
 	printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
 
@@ -60,10 +60,10 @@ struct tss_struct doublefault_tss __cach
 		/* 0x2 bit is always set */
 		.flags		= X86_EFLAGS_SF | 0x2,
 		.sp		= STACK_START,
-		.es		= __USER_DS,
+		.es		= __KERNEL_DS,
 		.cs		= __KERNEL_CS,
 		.ss		= __KERNEL_DS,
-		.ds		= __USER_DS,
+		.ds		= __KERNEL_DS,
 		.fs		= __KERNEL_PERCPU,
 
 		.__cr3		= __pa_nodebug(swapper_pg_dir),
diff -ruNp linux-3.13.11/arch/x86/kernel/dumpstack.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/dumpstack.c
--- linux-3.13.11/arch/x86/kernel/dumpstack.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/dumpstack.c	2014-07-09
12:00:15.000000000 +0200
@@ -2,6 +2,9 @@
  *  Copyright (C) 1991, 1992  Linus Torvalds
  *  Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
  */
+#ifdef CONFIG_GRKERNSEC_HIDESYM
+#define __INCLUDED_BY_HIDESYM 1
+#endif
 #include <linux/kallsyms.h>
 #include <linux/kprobes.h>
 #include <linux/uaccess.h>
@@ -40,16 +43,14 @@ void printk_address(unsigned long addres
 static void
 print_ftrace_graph_addr(unsigned long addr, void *data,
 			const struct stacktrace_ops *ops,
-			struct thread_info *tinfo, int *graph)
+			struct task_struct *task, int *graph)
 {
-	struct task_struct *task;
 	unsigned long ret_addr;
 	int index;
 
 	if (addr != (unsigned long)return_to_handler)
 		return;
 
-	task = tinfo->task;
 	index = task->curr_ret_stack;
 
 	if (!task->ret_stack || index < *graph)
@@ -66,7 +67,7 @@ print_ftrace_graph_addr(unsigned long ad
 static inline void
 print_ftrace_graph_addr(unsigned long addr, void *data,
 			const struct stacktrace_ops *ops,
-			struct thread_info *tinfo, int *graph)
+			struct task_struct *task, int *graph)
 { }
 #endif
 
@@ -77,10 +78,8 @@ print_ftrace_graph_addr(unsigned long ad
  * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
  */
 
-static inline int valid_stack_ptr(struct thread_info *tinfo,
-			void *p, unsigned int size, void *end)
+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
 {
-	void *t = tinfo;
 	if (end) {
 		if (p < end && p >= (end-THREAD_SIZE))
 			return 1;
@@ -91,14 +90,14 @@ static inline int valid_stack_ptr(struct
 }
 
 unsigned long
-print_context_stack(struct thread_info *tinfo,
+print_context_stack(struct task_struct *task, void *stack_start,
 		unsigned long *stack, unsigned long bp,
 		const struct stacktrace_ops *ops, void *data,
 		unsigned long *end, int *graph)
 {
 	struct stack_frame *frame = (struct stack_frame *)bp;
 
-	while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
+	while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
 		unsigned long addr;
 
 		addr = *stack;
@@ -110,7 +109,7 @@ print_context_stack(struct thread_info *
 			} else {
 				ops->address(data, addr, 0);
 			}
-			print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
+			print_ftrace_graph_addr(addr, data, ops, task, graph);
 		}
 		stack++;
 	}
@@ -119,7 +118,7 @@ print_context_stack(struct thread_info *
 EXPORT_SYMBOL_GPL(print_context_stack);
 
 unsigned long
-print_context_stack_bp(struct thread_info *tinfo,
+print_context_stack_bp(struct task_struct *task, void *stack_start,
 		       unsigned long *stack, unsigned long bp,
 		       const struct stacktrace_ops *ops, void *data,
 		       unsigned long *end, int *graph)
@@ -127,7 +126,7 @@ print_context_stack_bp(struct thread_inf
 	struct stack_frame *frame = (struct stack_frame *)bp;
 	unsigned long *ret_addr = &frame->return_address;
 
-	while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
+	while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
 		unsigned long addr = *ret_addr;
 
 		if (!__kernel_text_address(addr))
@@ -136,7 +135,7 @@ print_context_stack_bp(struct thread_inf
 		ops->address(data, addr, 1);
 		frame = frame->next_frame;
 		ret_addr = &frame->return_address;
-		print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
+		print_ftrace_graph_addr(addr, data, ops, task, graph);
 	}
 
 	return (unsigned long)frame;
@@ -155,7 +154,7 @@ static int print_trace_stack(void *data,
 static void print_trace_address(void *data, unsigned long addr, int reliable)
 {
 	touch_nmi_watchdog();
-	printk(data);
+	printk("%s", (char *)data);
 	printk_stack_address(addr, reliable);
 }
 
@@ -224,6 +223,8 @@ unsigned __kprobes long oops_begin(void)
 }
 EXPORT_SYMBOL_GPL(oops_begin);
 
+extern void gr_handle_kernel_exploit(void);
+
 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
 {
 	if (regs && kexec_should_crash(current))
@@ -245,7 +246,10 @@ void __kprobes oops_end(unsigned long fl
 		panic("Fatal exception in interrupt");
 	if (panic_on_oops)
 		panic("Fatal exception");
-	do_exit(signr);
+
+	gr_handle_kernel_exploit();
+
+	do_group_exit(signr);
 }
 
 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
@@ -273,7 +277,7 @@ int __kprobes __die(const char *str, str
 	print_modules();
 	show_regs(regs);
 #ifdef CONFIG_X86_32
-	if (user_mode_vm(regs)) {
+	if (user_mode(regs)) {
 		sp = regs->sp;
 		ss = regs->ss & 0xffff;
 	} else {
@@ -301,7 +305,7 @@ void die(const char *str, struct pt_regs
 	unsigned long flags = oops_begin();
 	int sig = SIGSEGV;
 
-	if (!user_mode_vm(regs))
+	if (!user_mode(regs))
 		report_bug(regs->ip, regs);
 
 	if (__die(str, regs, err))
diff -ruNp linux-3.13.11/arch/x86/kernel/dumpstack_32.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/dumpstack_32.c
--- linux-3.13.11/arch/x86/kernel/dumpstack_32.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/dumpstack_32.c	2014-07-09
12:00:15.000000000 +0200
@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task
 		bp = stack_frame(task, regs);
 
 	for (;;) {
-		struct thread_info *context;
+		void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
 
-		context = (struct thread_info *)
-			((unsigned long)stack & (~(THREAD_SIZE - 1)));
-		bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
+		bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
 
-		stack = (unsigned long *)context->previous_esp;
-		if (!stack)
+		if (stack_start == task_stack_page(task))
 			break;
+		stack = *(unsigned long **)stack_start;
 		if (ops->stack(data, "IRQ") < 0)
 			break;
 		touch_nmi_watchdog();
@@ -87,27 +85,28 @@ void show_regs(struct pt_regs *regs)
 	int i;
 
 	show_regs_print_info(KERN_EMERG);
-	__show_regs(regs, !user_mode_vm(regs));
+	__show_regs(regs, !user_mode(regs));
 
 	/*
 	 * When in-kernel, we also print out the stack and code at the
 	 * time of the fault..
 	 */
-	if (!user_mode_vm(regs)) {
+	if (!user_mode(regs)) {
 		unsigned int code_prologue = code_bytes * 43 / 64;
 		unsigned int code_len = code_bytes;
 		unsigned char c;
 		u8 *ip;
+		unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >>
3]);
 
 		pr_emerg("Stack:\n");
 		show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
 
 		pr_emerg("Code:");
 
-		ip = (u8 *)regs->ip - code_prologue;
+		ip = (u8 *)regs->ip - code_prologue + cs_base;
 		if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
 			/* try starting at IP */
-			ip = (u8 *)regs->ip;
+			ip = (u8 *)regs->ip + cs_base;
 			code_len = code_len - code_prologue + 1;
 		}
 		for (i = 0; i < code_len; i++, ip++) {
@@ -116,7 +115,7 @@ void show_regs(struct pt_regs *regs)
 				pr_cont("  Bad EIP value.");
 				break;
 			}
-			if (ip == (u8 *)regs->ip)
+			if (ip == (u8 *)regs->ip + cs_base)
 				pr_cont(" <%02x>", c);
 			else
 				pr_cont(" %02x", c);
@@ -129,6 +128,7 @@ int is_valid_bugaddr(unsigned long ip)
 {
 	unsigned short ud2;
 
+	ip = ktla_ktva(ip);
 	if (ip < PAGE_OFFSET)
 		return 0;
 	if (probe_kernel_address((unsigned short *)ip, ud2))
@@ -136,3 +136,15 @@ int is_valid_bugaddr(unsigned long ip)
 
 	return ud2 == 0x0b0f;
 }
+
+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
+void pax_check_alloca(unsigned long size)
+{
+	unsigned long sp = (unsigned long)&sp, stack_left;
+
+	/* all kernel stacks are of the same size */
+	stack_left = sp & (THREAD_SIZE - 1);
+	BUG_ON(stack_left < 256 || size >= stack_left - 256);
+}
+EXPORT_SYMBOL(pax_check_alloca);
+#endif
diff -ruNp linux-3.13.11/arch/x86/kernel/dumpstack_64.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/dumpstack_64.c
--- linux-3.13.11/arch/x86/kernel/dumpstack_64.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/dumpstack_64.c	2014-07-09
12:00:15.000000000 +0200
@@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task
 	unsigned long *irq_stack_end =
 		(unsigned long *)per_cpu(irq_stack_ptr, cpu);
 	unsigned used = 0;
-	struct thread_info *tinfo;
 	int graph = 0;
 	unsigned long dummy;
+	void *stack_start;
 
 	if (!task)
 		task = current;
@@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task
 	 * current stack address. If the stacks consist of nested
 	 * exceptions
 	 */
-	tinfo = task_thread_info(task);
 	for (;;) {
 		char *id;
 		unsigned long *estack_end;
+
 		estack_end = in_exception_stack(cpu, (unsigned long)stack,
 						&used, &id);
 
@@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task
 			if (ops->stack(data, id) < 0)
 				break;
 
-			bp = ops->walk_stack(tinfo, stack, bp, ops,
+			bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
 					     data, estack_end, &graph);
 			ops->stack(data, "<EOE>");
 			/*
@@ -161,6 +161,8 @@ void dump_trace(struct task_struct *task
 			 * second-to-last pointer (index -2 to end) in the
 			 * exception stack:
 			 */
+			if ((u16)estack_end[-1] != __KERNEL_DS)
+				goto out;
 			stack = (unsigned long *) estack_end[-2];
 			continue;
 		}
@@ -172,7 +174,7 @@ void dump_trace(struct task_struct *task
 			if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
 				if (ops->stack(data, "IRQ") < 0)
 					break;
-				bp = ops->walk_stack(tinfo, stack, bp,
+				bp = ops->walk_stack(task, irq_stack, stack, bp,
 					ops, data, irq_stack_end, &graph);
 				/*
 				 * We link to the next stack (which would be
@@ -191,7 +193,9 @@ void dump_trace(struct task_struct *task
 	/*
 	 * This handles the process stack:
 	 */
-	bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
+	stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
+	bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
+out:
 	put_cpu();
 }
 EXPORT_SYMBOL(dump_trace);
@@ -300,3 +304,50 @@ int is_valid_bugaddr(unsigned long ip)
 
 	return ud2 == 0x0b0f;
 }
+
+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
+void pax_check_alloca(unsigned long size)
+{
+	unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
+	unsigned cpu, used;
+	char *id;
+
+	/* check the process stack first */
+	stack_start = (unsigned long)task_stack_page(current);
+	stack_end = stack_start + THREAD_SIZE;
+	if (likely(stack_start <= sp && sp < stack_end)) {
+		unsigned long stack_left = sp & (THREAD_SIZE - 1);
+		BUG_ON(stack_left < 256 || size >= stack_left - 256);
+		return;
+	}
+
+	cpu = get_cpu();
+
+	/* check the irq stacks */
+	stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
+	stack_start = stack_end - IRQ_STACK_SIZE;
+	if (stack_start <= sp && sp < stack_end) {
+		unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
+		put_cpu();
+		BUG_ON(stack_left < 256 || size >= stack_left - 256);
+		return;
+	}
+
+	/* check the exception stacks */
+	used = 0;
+	stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
+	stack_start = stack_end - EXCEPTION_STKSZ;
+	if (stack_end && stack_start <= sp && sp < stack_end) {
+		unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
+		put_cpu();
+		BUG_ON(stack_left < 256 || size >= stack_left - 256);
+		return;
+	}
+
+	put_cpu();
+
+	/* unknown stack */
+	BUG();
+}
+EXPORT_SYMBOL(pax_check_alloca);
+#endif
diff -ruNp linux-3.13.11/arch/x86/kernel/e820.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/e820.c
--- linux-3.13.11/arch/x86/kernel/e820.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/e820.c	2014-07-09
12:00:15.000000000 +0200
@@ -803,8 +803,8 @@ unsigned long __init e820_end_of_low_ram
 
 static void early_panic(char *msg)
 {
-	early_printk(msg);
-	panic(msg);
+	early_printk("%s", msg);
+	panic("%s", msg);
 }
 
 static int userdef __initdata;
diff -ruNp linux-3.13.11/arch/x86/kernel/early_printk.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/early_printk.c
--- linux-3.13.11/arch/x86/kernel/early_printk.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/early_printk.c	2014-07-09
12:00:15.000000000 +0200
@@ -7,6 +7,7 @@
 #include <linux/pci_regs.h>
 #include <linux/pci_ids.h>
 #include <linux/errno.h>
+#include <linux/sched.h>
 #include <asm/io.h>
 #include <asm/processor.h>
 #include <asm/fcntl.h>
diff -ruNp linux-3.13.11/arch/x86/kernel/entry_32.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/entry_32.S
--- linux-3.13.11/arch/x86/kernel/entry_32.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/entry_32.S	2014-07-09
12:00:15.000000000 +0200
@@ -177,13 +177,153 @@
 	/*CFI_REL_OFFSET gs, PT_GS*/
 .endm
 .macro SET_KERNEL_GS reg
+
+#ifdef CONFIG_CC_STACKPROTECTOR
 	movl $(__KERNEL_STACK_CANARY), \reg
+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
+	movl $(__USER_DS), \reg
+#else
+	xorl \reg, \reg
+#endif
+
 	movl \reg, %gs
 .endm
 
 #endif	/* CONFIG_X86_32_LAZY_GS */
 
-.macro SAVE_ALL
+.macro pax_enter_kernel
+#ifdef CONFIG_PAX_KERNEXEC
+	call pax_enter_kernel
+#endif
+.endm
+
+.macro pax_exit_kernel
+#ifdef CONFIG_PAX_KERNEXEC
+	call pax_exit_kernel
+#endif
+.endm
+
+#ifdef CONFIG_PAX_KERNEXEC
+ENTRY(pax_enter_kernel)
+#ifdef CONFIG_PARAVIRT
+	pushl %eax
+	pushl %ecx
+	call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
+	mov %eax, %esi
+#else
+	mov %cr0, %esi
+#endif
+	bts $16, %esi
+	jnc 1f
+	mov %cs, %esi
+	cmp $__KERNEL_CS, %esi
+	jz 3f
+	ljmp $__KERNEL_CS, $3f
+1:	ljmp $__KERNEXEC_KERNEL_CS, $2f
+2:
+#ifdef CONFIG_PARAVIRT
+	mov %esi, %eax
+	call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
+#else
+	mov %esi, %cr0
+#endif
+3:
+#ifdef CONFIG_PARAVIRT
+	popl %ecx
+	popl %eax
+#endif
+	ret
+ENDPROC(pax_enter_kernel)
+
+ENTRY(pax_exit_kernel)
+#ifdef CONFIG_PARAVIRT
+	pushl %eax
+	pushl %ecx
+#endif
+	mov %cs, %esi
+	cmp $__KERNEXEC_KERNEL_CS, %esi
+	jnz 2f
+#ifdef CONFIG_PARAVIRT
+	call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
+	mov %eax, %esi
+#else
+	mov %cr0, %esi
+#endif
+	btr $16, %esi
+	ljmp $__KERNEL_CS, $1f
+1:
+#ifdef CONFIG_PARAVIRT
+	mov %esi, %eax
+	call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
+#else
+	mov %esi, %cr0
+#endif
+2:
+#ifdef CONFIG_PARAVIRT
+	popl %ecx
+	popl %eax
+#endif
+	ret
+ENDPROC(pax_exit_kernel)
+#endif
+
+	.macro pax_erase_kstack
+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
+	call pax_erase_kstack
+#endif
+	.endm
+
+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
+/*
+ * ebp: thread_info
+ */
+ENTRY(pax_erase_kstack)
+	pushl %edi
+	pushl %ecx
+	pushl %eax
+
+	mov TI_lowest_stack(%ebp), %edi
+	mov $-0xBEEF, %eax
+	std
+
+1:	mov %edi, %ecx
+	and $THREAD_SIZE_asm - 1, %ecx
+	shr $2, %ecx
+	repne scasl
+	jecxz 2f
+
+	cmp $2*16, %ecx
+	jc 2f
+
+	mov $2*16, %ecx
+	repe scasl
+	jecxz 2f
+	jne 1b
+
+2:	cld
+	mov %esp, %ecx
+	sub %edi, %ecx
+
+	cmp $THREAD_SIZE_asm, %ecx
+	jb 3f
+	ud2
+3:
+
+	shr $2, %ecx
+	rep stosl
+
+	mov TI_task_thread_sp0(%ebp), %edi
+	sub $128, %edi
+	mov %edi, TI_lowest_stack(%ebp)
+
+	popl %eax
+	popl %ecx
+	popl %edi
+	ret
+ENDPROC(pax_erase_kstack)
+#endif
+
+.macro __SAVE_ALL _DS
 	cld
 	PUSH_GS
 	pushl_cfi %fs
@@ -206,7 +346,7 @@
 	CFI_REL_OFFSET ecx, 0
 	pushl_cfi %ebx
 	CFI_REL_OFFSET ebx, 0
-	movl $(__USER_DS), %edx
+	movl $\_DS, %edx
 	movl %edx, %ds
 	movl %edx, %es
 	movl $(__KERNEL_PERCPU), %edx
@@ -214,6 +354,15 @@
 	SET_KERNEL_GS %edx
 .endm
 
+.macro SAVE_ALL
+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
|| defined(CONFIG_PAX_MEMORY_UDEREF)
+	__SAVE_ALL __KERNEL_DS
+	pax_enter_kernel
+#else
+	__SAVE_ALL __USER_DS
+#endif
+.endm
+
 .macro RESTORE_INT_REGS
 	popl_cfi %ebx
 	CFI_RESTORE ebx
@@ -297,7 +446,7 @@ ENTRY(ret_from_fork)
 	popfl_cfi
 	jmp syscall_exit
 	CFI_ENDPROC
-END(ret_from_fork)
+ENDPROC(ret_from_fork)
 
 ENTRY(ret_from_kernel_thread)
 	CFI_STARTPROC
@@ -344,7 +493,15 @@ ret_from_intr:
 	andl $SEGMENT_RPL_MASK, %eax
 #endif
 	cmpl $USER_RPL, %eax
+
+#ifdef CONFIG_PAX_KERNEXEC
+	jae resume_userspace
+
+	pax_exit_kernel
+	jmp resume_kernel
+#else
 	jb resume_kernel		# not returning to v8086 or userspace
+#endif
 
 ENTRY(resume_userspace)
 	LOCKDEP_SYS_EXIT
@@ -356,8 +513,8 @@ ENTRY(resume_userspace)
 	andl $_TIF_WORK_MASK, %ecx	# is there any work to be done on
 					# int/exception return?
 	jne work_pending
-	jmp restore_all
-END(ret_from_exception)
+	jmp restore_all_pax
+ENDPROC(ret_from_exception)
 
 #ifdef CONFIG_PREEMPT
 ENTRY(resume_kernel)
@@ -369,7 +526,7 @@ need_resched:
 	jz restore_all
 	call preempt_schedule_irq
 	jmp need_resched
-END(resume_kernel)
+ENDPROC(resume_kernel)
 #endif
 	CFI_ENDPROC
 /*
@@ -403,30 +560,45 @@ sysenter_past_esp:
 	/*CFI_REL_OFFSET cs, 0*/
 	/*
 	 * Push current_thread_info()->sysenter_return to the stack.
-	 * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
-	 * pushed above; +8 corresponds to copy_thread's esp0 setting.
 	 */
-	pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
+	pushl_cfi $0
 	CFI_REL_OFFSET eip, 0
 
 	pushl_cfi %eax
 	SAVE_ALL
+	GET_THREAD_INFO(%ebp)
+	movl TI_sysenter_return(%ebp),%ebp
+	movl %ebp,PT_EIP(%esp)
 	ENABLE_INTERRUPTS(CLBR_NONE)
 
 /*
  * Load the potential sixth argument from user stack.
  * Careful about security.
  */
+	movl PT_OLDESP(%esp),%ebp
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+	mov PT_OLDSS(%esp),%ds
+1:	movl %ds:(%ebp),%ebp
+	push %ss
+	pop %ds
+#else
 	cmpl $__PAGE_OFFSET-3,%ebp
 	jae syscall_fault
 	ASM_STAC
 1:	movl (%ebp),%ebp
 	ASM_CLAC
+#endif
+
 	movl %ebp,PT_EBP(%esp)
 	_ASM_EXTABLE(1b,syscall_fault)
 
 	GET_THREAD_INFO(%ebp)
 
+#ifdef CONFIG_PAX_RANDKSTACK
+	pax_erase_kstack
+#endif
+
 	testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
 	jnz sysenter_audit
 sysenter_do_call:
@@ -441,12 +613,24 @@ sysenter_do_call:
 	testl $_TIF_ALLWORK_MASK, %ecx
 	jne sysexit_audit
 sysenter_exit:
+
+#ifdef CONFIG_PAX_RANDKSTACK
+	pushl_cfi %eax
+	movl %esp, %eax
+	call pax_randomize_kstack
+	popl_cfi %eax
+#endif
+
+	pax_erase_kstack
+
 /* if something modifies registers it must also disable sysexit */
 	movl PT_EIP(%esp), %edx
 	movl PT_OLDESP(%esp), %ecx
 	xorl %ebp,%ebp
 	TRACE_IRQS_ON
 1:	mov  PT_FS(%esp), %fs
+2:	mov  PT_DS(%esp), %ds
+3:	mov  PT_ES(%esp), %es
 	PTGS_TO_GS
 	ENABLE_INTERRUPTS_SYSEXIT
 
@@ -463,6 +647,9 @@ sysenter_audit:
 	movl %eax,%edx			/* 2nd arg: syscall number */
 	movl $AUDIT_ARCH_I386,%eax	/* 1st arg: audit arch */
 	call __audit_syscall_entry
+
+	pax_erase_kstack
+
 	pushl_cfi %ebx
 	movl PT_EAX(%esp),%eax		/* reload syscall number */
 	jmp sysenter_do_call
@@ -488,10 +675,16 @@ sysexit_audit:
 
 	CFI_ENDPROC
 .pushsection .fixup,"ax"
-2:	movl $0,PT_FS(%esp)
+4:	movl $0,PT_FS(%esp)
+	jmp 1b
+5:	movl $0,PT_DS(%esp)
+	jmp 1b
+6:	movl $0,PT_ES(%esp)
 	jmp 1b
 .popsection
-	_ASM_EXTABLE(1b,2b)
+	_ASM_EXTABLE(1b,4b)
+	_ASM_EXTABLE(2b,5b)
+	_ASM_EXTABLE(3b,6b)
 	PTGS_TO_GS_EX
 ENDPROC(ia32_sysenter_target)
 
@@ -506,6 +699,11 @@ ENTRY(system_call)
 	pushl_cfi %eax			# save orig_eax
 	SAVE_ALL
 	GET_THREAD_INFO(%ebp)
+
+#ifdef CONFIG_PAX_RANDKSTACK
+	pax_erase_kstack
+#endif
+
 					# system call tracing in operation / emulation
 	testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
 	jnz syscall_trace_entry
@@ -524,6 +722,15 @@ syscall_exit:
 	testl $_TIF_ALLWORK_MASK, %ecx	# current->work
 	jne syscall_exit_work
 
+restore_all_pax:
+
+#ifdef CONFIG_PAX_RANDKSTACK
+	movl %esp, %eax
+	call pax_randomize_kstack
+#endif
+
+	pax_erase_kstack
+
 restore_all:
 	TRACE_IRQS_IRET
 restore_all_notrace:
@@ -580,14 +787,34 @@ ldt_ss:
  * compensating for the offset by changing to the ESPFIX segment with
  * a base address that matches for the difference.
  */
-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
 	mov %esp, %edx			/* load kernel esp */
 	mov PT_OLDESP(%esp), %eax	/* load userspace esp */
 	mov %dx, %ax			/* eax: new kernel esp */
 	sub %eax, %edx			/* offset (low word is 0) */
+#ifdef CONFIG_SMP
+	movl PER_CPU_VAR(cpu_number), %ebx
+	shll $PAGE_SHIFT_asm, %ebx
+	addl $cpu_gdt_table, %ebx
+#else
+	movl $cpu_gdt_table, %ebx
+#endif
 	shr $16, %edx
-	mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
-	mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
+
+#ifdef CONFIG_PAX_KERNEXEC
+	mov %cr0, %esi
+	btr $16, %esi
+	mov %esi, %cr0
+#endif
+
+	mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
+	mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
+
+#ifdef CONFIG_PAX_KERNEXEC
+	bts $16, %esi
+	mov %esi, %cr0
+#endif
+
 	pushl_cfi $__ESPFIX_SS
 	pushl_cfi %eax			/* new kernel esp */
 	/* Disable interrupts, but do not irqtrace this section: we
@@ -616,20 +843,18 @@ work_resched:
 	movl TI_flags(%ebp), %ecx
 	andl $_TIF_WORK_MASK, %ecx	# is there any work to be done other
 					# than syscall tracing?
-	jz restore_all
+	jz restore_all_pax
 	testb $_TIF_NEED_RESCHED, %cl
 	jnz work_resched
 
 work_notifysig:				# deal with pending signals and
 					# notify-resume requests
+	movl %esp, %eax
 #ifdef CONFIG_VM86
 	testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
-	movl %esp, %eax
 	jne work_notifysig_v86		# returning to kernel-space or
 					# vm86-space
 1:
-#else
-	movl %esp, %eax
 #endif
 	TRACE_IRQS_ON
 	ENABLE_INTERRUPTS(CLBR_NONE)
@@ -650,7 +875,7 @@ work_notifysig_v86:
 	movl %eax, %esp
 	jmp 1b
 #endif
-END(work_pending)
+ENDPROC(work_pending)
 
 	# perform syscall exit tracing
 	ALIGN
@@ -658,11 +883,14 @@ syscall_trace_entry:
 	movl $-ENOSYS,PT_EAX(%esp)
 	movl %esp, %eax
 	call syscall_trace_enter
+
+	pax_erase_kstack
+
 	/* What it returned is what we'll actually use.  */
 	cmpl $(NR_syscalls), %eax
 	jnae syscall_call
 	jmp syscall_exit
-END(syscall_trace_entry)
+ENDPROC(syscall_trace_entry)
 
 	# perform syscall exit tracing
 	ALIGN
@@ -675,21 +903,25 @@ syscall_exit_work:
 	movl %esp, %eax
 	call syscall_trace_leave
 	jmp resume_userspace
-END(syscall_exit_work)
+ENDPROC(syscall_exit_work)
 	CFI_ENDPROC
 
 	RING0_INT_FRAME			# can't unwind into user space anyway
 syscall_fault:
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+	push %ss
+	pop %ds
+#endif
 	ASM_CLAC
 	GET_THREAD_INFO(%ebp)
 	movl $-EFAULT,PT_EAX(%esp)
 	jmp resume_userspace
-END(syscall_fault)
+ENDPROC(syscall_fault)
 
 syscall_badsys:
 	movl $-ENOSYS,PT_EAX(%esp)
 	jmp resume_userspace
-END(syscall_badsys)
+ENDPROC(syscall_badsys)
 	CFI_ENDPROC
 /*
  * End of kprobes section
@@ -705,8 +937,15 @@ END(syscall_badsys)
  * normal stack and adjusts ESP with the matching offset.
  */
 	/* fixup the stack */
-	mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
-	mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
+#ifdef CONFIG_SMP
+	movl PER_CPU_VAR(cpu_number), %ebx
+	shll $PAGE_SHIFT_asm, %ebx
+	addl $cpu_gdt_table, %ebx
+#else
+	movl $cpu_gdt_table, %ebx
+#endif
+	mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
+	mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
 	shl $16, %eax
 	addl %esp, %eax			/* the adjusted stack pointer */
 	pushl_cfi $__KERNEL_DS
@@ -759,7 +998,7 @@ vector=vector+1
   .endr
 2:	jmp common_interrupt
 .endr
-END(irq_entries_start)
+ENDPROC(irq_entries_start)
 
 .previous
 END(interrupt)
@@ -820,7 +1059,7 @@ ENTRY(coprocessor_error)
 	pushl_cfi $do_coprocessor_error
 	jmp error_code
 	CFI_ENDPROC
-END(coprocessor_error)
+ENDPROC(coprocessor_error)
 
 ENTRY(simd_coprocessor_error)
 	RING0_INT_FRAME
@@ -833,7 +1072,7 @@ ENTRY(simd_coprocessor_error)
 .section .altinstructions,"a"
 	altinstruction_entry 661b, 663f, X86_FEATURE_XMM, 662b-661b, 664f-663f
 .previous
-.section .altinstr_replacement,"ax"
+.section .altinstr_replacement,"a"
 663:	pushl $do_simd_coprocessor_error
 664:
 .previous
@@ -842,7 +1081,7 @@ ENTRY(simd_coprocessor_error)
 #endif
 	jmp error_code
 	CFI_ENDPROC
-END(simd_coprocessor_error)
+ENDPROC(simd_coprocessor_error)
 
 ENTRY(device_not_available)
 	RING0_INT_FRAME
@@ -851,18 +1090,18 @@ ENTRY(device_not_available)
 	pushl_cfi $do_device_not_available
 	jmp error_code
 	CFI_ENDPROC
-END(device_not_available)
+ENDPROC(device_not_available)
 
 #ifdef CONFIG_PARAVIRT
 ENTRY(native_iret)
 	iret
 	_ASM_EXTABLE(native_iret, iret_exc)
-END(native_iret)
+ENDPROC(native_iret)
 
 ENTRY(native_irq_enable_sysexit)
 	sti
 	sysexit
-END(native_irq_enable_sysexit)
+ENDPROC(native_irq_enable_sysexit)
 #endif
 
 ENTRY(overflow)
@@ -872,7 +1111,7 @@ ENTRY(overflow)
 	pushl_cfi $do_overflow
 	jmp error_code
 	CFI_ENDPROC
-END(overflow)
+ENDPROC(overflow)
 
 ENTRY(bounds)
 	RING0_INT_FRAME
@@ -881,7 +1120,7 @@ ENTRY(bounds)
 	pushl_cfi $do_bounds
 	jmp error_code
 	CFI_ENDPROC
-END(bounds)
+ENDPROC(bounds)
 
 ENTRY(invalid_op)
 	RING0_INT_FRAME
@@ -890,7 +1129,7 @@ ENTRY(invalid_op)
 	pushl_cfi $do_invalid_op
 	jmp error_code
 	CFI_ENDPROC
-END(invalid_op)
+ENDPROC(invalid_op)
 
 ENTRY(coprocessor_segment_overrun)
 	RING0_INT_FRAME
@@ -899,7 +1138,7 @@ ENTRY(coprocessor_segment_overrun)
 	pushl_cfi $do_coprocessor_segment_overrun
 	jmp error_code
 	CFI_ENDPROC
-END(coprocessor_segment_overrun)
+ENDPROC(coprocessor_segment_overrun)
 
 ENTRY(invalid_TSS)
 	RING0_EC_FRAME
@@ -907,7 +1146,7 @@ ENTRY(invalid_TSS)
 	pushl_cfi $do_invalid_TSS
 	jmp error_code
 	CFI_ENDPROC
-END(invalid_TSS)
+ENDPROC(invalid_TSS)
 
 ENTRY(segment_not_present)
 	RING0_EC_FRAME
@@ -915,7 +1154,7 @@ ENTRY(segment_not_present)
 	pushl_cfi $do_segment_not_present
 	jmp error_code
 	CFI_ENDPROC
-END(segment_not_present)
+ENDPROC(segment_not_present)
 
 ENTRY(stack_segment)
 	RING0_EC_FRAME
@@ -923,7 +1162,7 @@ ENTRY(stack_segment)
 	pushl_cfi $do_stack_segment
 	jmp error_code
 	CFI_ENDPROC
-END(stack_segment)
+ENDPROC(stack_segment)
 
 ENTRY(alignment_check)
 	RING0_EC_FRAME
@@ -931,7 +1170,7 @@ ENTRY(alignment_check)
 	pushl_cfi $do_alignment_check
 	jmp error_code
 	CFI_ENDPROC
-END(alignment_check)
+ENDPROC(alignment_check)
 
 ENTRY(divide_error)
 	RING0_INT_FRAME
@@ -940,7 +1179,7 @@ ENTRY(divide_error)
 	pushl_cfi $do_divide_error
 	jmp error_code
 	CFI_ENDPROC
-END(divide_error)
+ENDPROC(divide_error)
 
 #ifdef CONFIG_X86_MCE
 ENTRY(machine_check)
@@ -950,7 +1189,7 @@ ENTRY(machine_check)
 	pushl_cfi machine_check_vector
 	jmp error_code
 	CFI_ENDPROC
-END(machine_check)
+ENDPROC(machine_check)
 #endif
 
 ENTRY(spurious_interrupt_bug)
@@ -960,7 +1199,7 @@ ENTRY(spurious_interrupt_bug)
 	pushl_cfi $do_spurious_interrupt_bug
 	jmp error_code
 	CFI_ENDPROC
-END(spurious_interrupt_bug)
+ENDPROC(spurious_interrupt_bug)
 /*
  * End of kprobes section
  */
@@ -1070,7 +1309,7 @@ BUILD_INTERRUPT3(hyperv_callback_vector,
 
 ENTRY(mcount)
 	ret
-END(mcount)
+ENDPROC(mcount)
 
 ENTRY(ftrace_caller)
 	cmpl $0, function_trace_stop
@@ -1103,7 +1342,7 @@ ftrace_graph_call:
 .globl ftrace_stub
 ftrace_stub:
 	ret
-END(ftrace_caller)
+ENDPROC(ftrace_caller)
 
 ENTRY(ftrace_regs_caller)
 	pushf	/* push flags before compare (in cs location) */
@@ -1207,7 +1446,7 @@ trace:
 	popl %ecx
 	popl %eax
 	jmp ftrace_stub
-END(mcount)
+ENDPROC(mcount)
 #endif /* CONFIG_DYNAMIC_FTRACE */
 #endif /* CONFIG_FUNCTION_TRACER */
 
@@ -1225,7 +1464,7 @@ ENTRY(ftrace_graph_caller)
 	popl %ecx
 	popl %eax
 	ret
-END(ftrace_graph_caller)
+ENDPROC(ftrace_graph_caller)
 
 .globl return_to_handler
 return_to_handler:
@@ -1291,15 +1530,18 @@ error_code:
 	movl $-1, PT_ORIG_EAX(%esp)	# no syscall to restart
 	REG_TO_PTGS %ecx
 	SET_KERNEL_GS %ecx
-	movl $(__USER_DS), %ecx
+	movl $(__KERNEL_DS), %ecx
 	movl %ecx, %ds
 	movl %ecx, %es
+
+	pax_enter_kernel
+
 	TRACE_IRQS_OFF
 	movl %esp,%eax			# pt_regs pointer
 	call *%edi
 	jmp ret_from_exception
 	CFI_ENDPROC
-END(page_fault)
+ENDPROC(page_fault)
 
 /*
  * Debug traps and NMI can happen at the one SYSENTER instruction
@@ -1342,7 +1584,7 @@ debug_stack_correct:
 	call do_debug
 	jmp ret_from_exception
 	CFI_ENDPROC
-END(debug)
+ENDPROC(debug)
 
 /*
  * NMI is doubly nasty. It can happen _while_ we're handling
@@ -1380,6 +1622,9 @@ nmi_stack_correct:
 	xorl %edx,%edx		# zero error code
 	movl %esp,%eax		# pt_regs pointer
 	call do_nmi
+
+	pax_exit_kernel
+
 	jmp restore_all_notrace
 	CFI_ENDPROC
 
@@ -1416,12 +1661,15 @@ nmi_espfix_stack:
 	FIXUP_ESPFIX_STACK		# %eax == %esp
 	xorl %edx,%edx			# zero error code
 	call do_nmi
+
+	pax_exit_kernel
+
 	RESTORE_REGS
 	lss 12+4(%esp), %esp		# back to espfix stack
 	CFI_ADJUST_CFA_OFFSET -24
 	jmp irq_return
 	CFI_ENDPROC
-END(nmi)
+ENDPROC(nmi)
 
 ENTRY(int3)
 	RING0_INT_FRAME
@@ -1434,14 +1682,14 @@ ENTRY(int3)
 	call do_int3
 	jmp ret_from_exception
 	CFI_ENDPROC
-END(int3)
+ENDPROC(int3)
 
 ENTRY(general_protection)
 	RING0_EC_FRAME
 	pushl_cfi $do_general_protection
 	jmp error_code
 	CFI_ENDPROC
-END(general_protection)
+ENDPROC(general_protection)
 
 #ifdef CONFIG_KVM_GUEST
 ENTRY(async_page_fault)
@@ -1450,7 +1698,7 @@ ENTRY(async_page_fault)
 	pushl_cfi $do_async_page_fault
 	jmp error_code
 	CFI_ENDPROC
-END(async_page_fault)
+ENDPROC(async_page_fault)
 #endif
 
 /*
diff -ruNp linux-3.13.11/arch/x86/kernel/entry_64.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/entry_64.S
--- linux-3.13.11/arch/x86/kernel/entry_64.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/entry_64.S	2014-07-09
12:00:15.000000000 +0200
@@ -59,6 +59,8 @@
 #include <asm/context_tracking.h>
 #include <asm/smap.h>
 #include <linux/err.h>
+#include <asm/pgtable.h>
+#include <asm/alternative-asm.h>
 
 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this.  */
 #include <linux/elf-em.h>
@@ -80,8 +82,9 @@
 #ifdef CONFIG_DYNAMIC_FTRACE
 
 ENTRY(function_hook)
+	pax_force_retaddr
 	retq
-END(function_hook)
+ENDPROC(function_hook)
 
 /* skip is set if stack has been adjusted */
 .macro ftrace_caller_setup skip=0
@@ -122,8 +125,9 @@ GLOBAL(ftrace_graph_call)
 #endif
 
 GLOBAL(ftrace_stub)
+	pax_force_retaddr
 	retq
-END(ftrace_caller)
+ENDPROC(ftrace_caller)
 
 ENTRY(ftrace_regs_caller)
 	/* Save the current flags before compare (in SS location)*/
@@ -191,7 +195,7 @@ ftrace_restore_flags:
 	popfq
 	jmp  ftrace_stub
 
-END(ftrace_regs_caller)
+ENDPROC(ftrace_regs_caller)
 
 
 #else /* ! CONFIG_DYNAMIC_FTRACE */
@@ -212,6 +216,7 @@ ENTRY(function_hook)
 #endif
 
 GLOBAL(ftrace_stub)
+	pax_force_retaddr
 	retq
 
 trace:
@@ -225,12 +230,13 @@ trace:
 #endif
 	subq $MCOUNT_INSN_SIZE, %rdi
 
+	pax_force_fptr ftrace_trace_function
 	call   *ftrace_trace_function
 
 	MCOUNT_RESTORE_FRAME
 
 	jmp ftrace_stub
-END(function_hook)
+ENDPROC(function_hook)
 #endif /* CONFIG_DYNAMIC_FTRACE */
 #endif /* CONFIG_FUNCTION_TRACER */
 
@@ -252,8 +258,9 @@ ENTRY(ftrace_graph_caller)
 
 	MCOUNT_RESTORE_FRAME
 
+	pax_force_retaddr
 	retq
-END(ftrace_graph_caller)
+ENDPROC(ftrace_graph_caller)
 
 GLOBAL(return_to_handler)
 	subq  $24, %rsp
@@ -269,7 +276,9 @@ GLOBAL(return_to_handler)
 	movq 8(%rsp), %rdx
 	movq (%rsp), %rax
 	addq $24, %rsp
+	pax_force_fptr %rdi
 	jmp *%rdi
+ENDPROC(return_to_handler)
 #endif
 
 
@@ -284,6 +293,430 @@ ENTRY(native_usergs_sysret64)
 ENDPROC(native_usergs_sysret64)
 #endif /* CONFIG_PARAVIRT */
 
+	.macro ljmpq sel, off
+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
+	.byte 0x48; ljmp *1234f(%rip)
+	.pushsection .rodata
+	.align 16
+	1234: .quad \off; .word \sel
+	.popsection
+#else
+	pushq $\sel
+	pushq $\off
+	lretq
+#endif
+	.endm
+
+	.macro pax_enter_kernel
+	pax_set_fptr_mask
+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
+	call pax_enter_kernel
+#endif
+	.endm
+
+	.macro pax_exit_kernel
+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
+	call pax_exit_kernel
+#endif
+
+	.endm
+
+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
+ENTRY(pax_enter_kernel)
+	pushq %rdi
+
+#ifdef CONFIG_PARAVIRT
+	PV_SAVE_REGS(CLBR_RDI)
+#endif
+
+#ifdef CONFIG_PAX_KERNEXEC
+	GET_CR0_INTO_RDI
+	bts $16,%rdi
+	jnc 3f
+	mov %cs,%edi
+	cmp $__KERNEL_CS,%edi
+	jnz 2f
+1:
+#endif
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+	661: jmp 111f
+	.pushsection .altinstr_replacement, "a"
+	662: ASM_NOP2
+	.popsection
+	.pushsection .altinstructions, "a"
+	altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
+	.popsection
+	GET_CR3_INTO_RDI
+	cmp $0,%dil
+	jnz 112f
+	mov $__KERNEL_DS,%edi
+	mov %edi,%ss
+	jmp 111f
+112:	cmp $1,%dil
+	jz 113f
+	ud2
+113:	sub $4097,%rdi
+	bts $63,%rdi
+	SET_RDI_INTO_CR3
+	mov $__UDEREF_KERNEL_DS,%edi
+	mov %edi,%ss
+111:
+#endif
+
+#ifdef CONFIG_PARAVIRT
+	PV_RESTORE_REGS(CLBR_RDI)
+#endif
+
+	popq %rdi
+	pax_force_retaddr
+	retq
+
+#ifdef CONFIG_PAX_KERNEXEC
+2:	ljmpq __KERNEL_CS,1b
+3:	ljmpq __KERNEXEC_KERNEL_CS,4f
+4:	SET_RDI_INTO_CR0
+	jmp 1b
+#endif
+ENDPROC(pax_enter_kernel)
+
+ENTRY(pax_exit_kernel)
+	pushq %rdi
+
+#ifdef CONFIG_PARAVIRT
+	PV_SAVE_REGS(CLBR_RDI)
+#endif
+
+#ifdef CONFIG_PAX_KERNEXEC
+	mov %cs,%rdi
+	cmp $__KERNEXEC_KERNEL_CS,%edi
+	jz 2f
+	GET_CR0_INTO_RDI
+	bts $16,%rdi
+	jnc 4f
+1:
+#endif
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+	661: jmp 111f
+	.pushsection .altinstr_replacement, "a"
+	662: ASM_NOP2
+	.popsection
+	.pushsection .altinstructions, "a"
+	altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
+	.popsection
+	mov %ss,%edi
+	cmp $__UDEREF_KERNEL_DS,%edi
+	jnz 111f
+	GET_CR3_INTO_RDI
+	cmp $0,%dil
+	jz 112f
+	ud2
+112:	add $4097,%rdi
+	bts $63,%rdi
+	SET_RDI_INTO_CR3
+	mov $__KERNEL_DS,%edi
+	mov %edi,%ss
+111:
+#endif
+
+#ifdef CONFIG_PARAVIRT
+	PV_RESTORE_REGS(CLBR_RDI);
+#endif
+
+	popq %rdi
+	pax_force_retaddr
+	retq
+
+#ifdef CONFIG_PAX_KERNEXEC
+2:	GET_CR0_INTO_RDI
+	btr $16,%rdi
+	jnc 4f
+	ljmpq __KERNEL_CS,3f
+3:	SET_RDI_INTO_CR0
+	jmp 1b
+4:	ud2
+	jmp 4b
+#endif
+ENDPROC(pax_exit_kernel)
+#endif
+
+	.macro pax_enter_kernel_user
+	pax_set_fptr_mask
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+	call pax_enter_kernel_user
+#endif
+	.endm
+
+	.macro pax_exit_kernel_user
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+	call pax_exit_kernel_user
+#endif
+#ifdef CONFIG_PAX_RANDKSTACK
+	pushq %rax
+	pushq %r11
+	call pax_randomize_kstack
+	popq %r11
+	popq %rax
+#endif
+	.endm
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+ENTRY(pax_enter_kernel_user)
+	pushq %rdi
+	pushq %rbx
+
+#ifdef CONFIG_PARAVIRT
+	PV_SAVE_REGS(CLBR_RDI)
+#endif
+
+	661: jmp 111f
+	.pushsection .altinstr_replacement, "a"
+	662: ASM_NOP2
+	.popsection
+	.pushsection .altinstructions, "a"
+	altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
+	.popsection
+	GET_CR3_INTO_RDI
+	cmp $1,%dil
+	jnz 4f
+	sub $4097,%rdi
+	bts $63,%rdi
+	SET_RDI_INTO_CR3
+	jmp 3f
+111:
+
+	GET_CR3_INTO_RDI
+	mov %rdi,%rbx
+	add $__START_KERNEL_map,%rbx
+	sub phys_base(%rip),%rbx
+
+#ifdef CONFIG_PARAVIRT
+	cmpl $0, pv_info+PARAVIRT_enabled
+	jz 1f
+	pushq %rdi
+	i = 0
+	.rept USER_PGD_PTRS
+	mov i*8(%rbx),%rsi
+	mov $0,%sil
+	lea i*8(%rbx),%rdi
+	call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
+	i = i + 1
+	.endr
+	popq %rdi
+	jmp 2f
+1:
+#endif
+
+	i = 0
+	.rept USER_PGD_PTRS
+	movb $0,i*8(%rbx)
+	i = i + 1
+	.endr
+
+2:	SET_RDI_INTO_CR3
+
+#ifdef CONFIG_PAX_KERNEXEC
+	GET_CR0_INTO_RDI
+	bts $16,%rdi
+	SET_RDI_INTO_CR0
+#endif
+
+3:
+
+#ifdef CONFIG_PARAVIRT
+	PV_RESTORE_REGS(CLBR_RDI)
+#endif
+
+	popq %rbx
+	popq %rdi
+	pax_force_retaddr
+	retq
+4:	ud2
+ENDPROC(pax_enter_kernel_user)
+
+ENTRY(pax_exit_kernel_user)
+	pushq %rdi
+	pushq %rbx
+
+#ifdef CONFIG_PARAVIRT
+	PV_SAVE_REGS(CLBR_RDI)
+#endif
+
+	GET_CR3_INTO_RDI
+	661: jmp 1f
+	.pushsection .altinstr_replacement, "a"
+	662: ASM_NOP2
+	.popsection
+	.pushsection .altinstructions, "a"
+	altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
+	.popsection
+	cmp $0,%dil
+	jnz 3f
+	add $4097,%rdi
+	bts $63,%rdi
+	SET_RDI_INTO_CR3
+	jmp 2f
+1:
+
+	mov %rdi,%rbx
+
+#ifdef CONFIG_PAX_KERNEXEC
+	GET_CR0_INTO_RDI
+	btr $16,%rdi
+	jnc 3f
+	SET_RDI_INTO_CR0
+#endif
+
+	add $__START_KERNEL_map,%rbx
+	sub phys_base(%rip),%rbx
+
+#ifdef CONFIG_PARAVIRT
+	cmpl $0, pv_info+PARAVIRT_enabled
+	jz 1f
+	i = 0
+	.rept USER_PGD_PTRS
+	mov i*8(%rbx),%rsi
+	mov $0x67,%sil
+	lea i*8(%rbx),%rdi
+	call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
+	i = i + 1
+	.endr
+	jmp 2f
+1:
+#endif
+
+	i = 0
+	.rept USER_PGD_PTRS
+	movb $0x67,i*8(%rbx)
+	i = i + 1
+	.endr
+2:
+
+#ifdef CONFIG_PARAVIRT
+	PV_RESTORE_REGS(CLBR_RDI)
+#endif
+
+	popq %rbx
+	popq %rdi
+	pax_force_retaddr
+	retq
+3:	ud2
+ENDPROC(pax_exit_kernel_user)
+#endif
+
+	.macro pax_enter_kernel_nmi
+	pax_set_fptr_mask
+
+#ifdef CONFIG_PAX_KERNEXEC
+	GET_CR0_INTO_RDI
+	bts $16,%rdi
+	jc 110f
+	SET_RDI_INTO_CR0
+	or $2,%ebx
+110:
+#endif
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+	661: jmp 111f
+	.pushsection .altinstr_replacement, "a"
+	662: ASM_NOP2
+	.popsection
+	.pushsection .altinstructions, "a"
+	altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
+	.popsection
+	GET_CR3_INTO_RDI
+	cmp $0,%dil
+	jz 111f
+	sub $4097,%rdi
+	or $4,%ebx
+	bts $63,%rdi
+	SET_RDI_INTO_CR3
+	mov $__UDEREF_KERNEL_DS,%edi
+	mov %edi,%ss
+111:
+#endif
+	.endm
+
+	.macro pax_exit_kernel_nmi
+#ifdef CONFIG_PAX_KERNEXEC
+	btr $1,%ebx
+	jnc 110f
+	GET_CR0_INTO_RDI
+	btr $16,%rdi
+	SET_RDI_INTO_CR0
+110:
+#endif
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+	btr $2,%ebx
+	jnc 111f
+	GET_CR3_INTO_RDI
+	add $4097,%rdi
+	bts $63,%rdi
+	SET_RDI_INTO_CR3
+	mov $__KERNEL_DS,%edi
+	mov %edi,%ss
+111:
+#endif
+	.endm
+
+	.macro pax_erase_kstack
+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
+	call pax_erase_kstack
+#endif
+	.endm
+
+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
+ENTRY(pax_erase_kstack)
+	pushq %rdi
+	pushq %rcx
+	pushq %rax
+	pushq %r11
+
+	GET_THREAD_INFO(%r11)
+	mov TI_lowest_stack(%r11), %rdi
+	mov $-0xBEEF, %rax
+	std
+
+1:	mov %edi, %ecx
+	and $THREAD_SIZE_asm - 1, %ecx
+	shr $3, %ecx
+	repne scasq
+	jecxz 2f
+
+	cmp $2*8, %ecx
+	jc 2f
+
+	mov $2*8, %ecx
+	repe scasq
+	jecxz 2f
+	jne 1b
+
+2:	cld
+	mov %esp, %ecx
+	sub %edi, %ecx
+
+	cmp $THREAD_SIZE_asm, %rcx
+	jb 3f
+	ud2
+3:
+
+	shr $3, %ecx
+	rep stosq
+
+	mov TI_task_thread_sp0(%r11), %rdi
+	sub $256, %rdi
+	mov %rdi, TI_lowest_stack(%r11)
+
+	popq %r11
+	popq %rax
+	popq %rcx
+	popq %rdi
+	pax_force_retaddr
+	ret
+ENDPROC(pax_erase_kstack)
+#endif
 
 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
 #ifdef CONFIG_TRACE_IRQFLAGS
@@ -320,7 +753,7 @@ ENDPROC(native_usergs_sysret64)
 .endm
 
 .macro TRACE_IRQS_IRETQ_DEBUG offset=ARGOFFSET
-	bt   $9,EFLAGS-\offset(%rsp)	/* interrupts off? */
+	bt   $X86_EFLAGS_IF_BIT,EFLAGS-\offset(%rsp)	/* interrupts off? */
 	jnc  1f
 	TRACE_IRQS_ON_DEBUG
 1:
@@ -358,27 +791,6 @@ ENDPROC(native_usergs_sysret64)
 	movq \tmp,R11+\offset(%rsp)
 	.endm
 
-	.macro FAKE_STACK_FRAME child_rip
-	/* push in order ss, rsp, eflags, cs, rip */
-	xorl %eax, %eax
-	pushq_cfi $__KERNEL_DS /* ss */
-	/*CFI_REL_OFFSET	ss,0*/
-	pushq_cfi %rax /* rsp */
-	CFI_REL_OFFSET	rsp,0
-	pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_FIXED) /* eflags - interrupts on */
-	/*CFI_REL_OFFSET	rflags,0*/
-	pushq_cfi $__KERNEL_CS /* cs */
-	/*CFI_REL_OFFSET	cs,0*/
-	pushq_cfi \child_rip /* rip */
-	CFI_REL_OFFSET	rip,0
-	pushq_cfi %rax /* orig rax */
-	.endm
-
-	.macro UNFAKE_STACK_FRAME
-	addq $8*6, %rsp
-	CFI_ADJUST_CFA_OFFSET	-(6*8)
-	.endm
-
 /*
  * initial frame state for interrupts (and exceptions without error code)
  */
@@ -445,25 +857,26 @@ ENDPROC(native_usergs_sysret64)
 /* save partial stack frame */
 	.macro SAVE_ARGS_IRQ
 	cld
-	/* start from rbp in pt_regs and jump over */
-	movq_cfi rdi, (RDI-RBP)
-	movq_cfi rsi, (RSI-RBP)
-	movq_cfi rdx, (RDX-RBP)
-	movq_cfi rcx, (RCX-RBP)
-	movq_cfi rax, (RAX-RBP)
-	movq_cfi  r8,  (R8-RBP)
-	movq_cfi  r9,  (R9-RBP)
-	movq_cfi r10, (R10-RBP)
-	movq_cfi r11, (R11-RBP)
+	/* start from r15 in pt_regs and jump over */
+	movq_cfi rdi, RDI
+	movq_cfi rsi, RSI
+	movq_cfi rdx, RDX
+	movq_cfi rcx, RCX
+	movq_cfi rax, RAX
+	movq_cfi  r8,  R8
+	movq_cfi  r9,  R9
+	movq_cfi r10, R10
+	movq_cfi r11, R11
+	movq_cfi r12, R12
 
 	/* Save rbp so that we can unwind from get_irq_regs() */
-	movq_cfi rbp, 0
+	movq_cfi rbp, RBP
 
 	/* Save previous stack value */
 	movq %rsp, %rsi
 
-	leaq -RBP(%rsp),%rdi	/* arg1 for handler */
-	testl $3, CS-RBP(%rsi)
+	movq %rsp,%rdi	/* arg1 for handler */
+	testb $3, CS(%rsi)
 	je 1f
 	SWAPGS
 	/*
@@ -483,6 +896,18 @@ ENDPROC(native_usergs_sysret64)
 			0x06 /* DW_OP_deref */, \
 			0x08 /* DW_OP_const1u */, SS+8-RBP, \
 			0x22 /* DW_OP_plus */
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+	testb $3, CS(%rdi)
+	jnz 1f
+	pax_enter_kernel
+	jmp 2f
+1:	pax_enter_kernel_user
+2:
+#else
+	pax_enter_kernel
+#endif
+
 	/* We entered an interrupt context - irqs are off: */
 	TRACE_IRQS_OFF
 	.endm
@@ -514,9 +939,52 @@ ENTRY(save_paranoid)
 	js 1f	/* negative -> in kernel */
 	SWAPGS
 	xorl %ebx,%ebx
-1:	ret
+1:
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+	testb $3, CS+8(%rsp)
+	jnz 1f
+	pax_enter_kernel
+	jmp 2f
+1:	pax_enter_kernel_user
+2:
+#else
+	pax_enter_kernel
+#endif
+	pax_force_retaddr
+	ret
+	CFI_ENDPROC
+ENDPROC(save_paranoid)
+
+ENTRY(save_paranoid_nmi)
+	XCPT_FRAME 1 RDI+8
+	cld
+	movq_cfi rdi, RDI+8
+	movq_cfi rsi, RSI+8
+	movq_cfi rdx, RDX+8
+	movq_cfi rcx, RCX+8
+	movq_cfi rax, RAX+8
+	movq_cfi r8, R8+8
+	movq_cfi r9, R9+8
+	movq_cfi r10, R10+8
+	movq_cfi r11, R11+8
+	movq_cfi rbx, RBX+8
+	movq_cfi rbp, RBP+8
+	movq_cfi r12, R12+8
+	movq_cfi r13, R13+8
+	movq_cfi r14, R14+8
+	movq_cfi r15, R15+8
+	movl $1,%ebx
+	movl $MSR_GS_BASE,%ecx
+	rdmsr
+	testl %edx,%edx
+	js 1f	/* negative -> in kernel */
+	SWAPGS
+	xorl %ebx,%ebx
+1:	pax_enter_kernel_nmi
+	pax_force_retaddr
+	ret
 	CFI_ENDPROC
-END(save_paranoid)
+ENDPROC(save_paranoid_nmi)
 	.popsection
 
 /*
@@ -538,7 +1006,7 @@ ENTRY(ret_from_fork)
 
 	RESTORE_REST
 
-	testl $3, CS-ARGOFFSET(%rsp)		# from kernel_thread?
+	testb $3, CS-ARGOFFSET(%rsp)		# from kernel_thread?
 	jz   1f
 
 	testl $_TIF_IA32, TI_flags(%rcx)	# 32-bit compat task needs IRET
@@ -548,15 +1016,13 @@ ENTRY(ret_from_fork)
 	jmp ret_from_sys_call			# go to the SYSRET fastpath
 
 1:
-	subq $REST_SKIP, %rsp	# leave space for volatiles
-	CFI_ADJUST_CFA_OFFSET	REST_SKIP
 	movq %rbp, %rdi
 	call *%rbx
 	movl $0, RAX(%rsp)
 	RESTORE_REST
 	jmp int_ret_from_sys_call
 	CFI_ENDPROC
-END(ret_from_fork)
+ENDPROC(ret_from_fork)
 
 /*
  * System call entry. Up to 6 arguments in registers are supported.
@@ -593,7 +1059,7 @@ END(ret_from_fork)
 ENTRY(system_call)
 	CFI_STARTPROC	simple
 	CFI_SIGNAL_FRAME
-	CFI_DEF_CFA	rsp,KERNEL_STACK_OFFSET
+	CFI_DEF_CFA	rsp,0
 	CFI_REGISTER	rip,rcx
 	/*CFI_REGISTER	rflags,r11*/
 	SWAPGS_UNSAFE_STACK
@@ -606,16 +1072,23 @@ GLOBAL(system_call_after_swapgs)
 
 	movq	%rsp,PER_CPU_VAR(old_rsp)
 	movq	PER_CPU_VAR(kernel_stack),%rsp
+	SAVE_ARGS 8*6,0
+	pax_enter_kernel_user
+
+#ifdef CONFIG_PAX_RANDKSTACK
+	pax_erase_kstack
+#endif
+
 	/*
 	 * No need to follow this irqs off/on section - it's straight
 	 * and short:
 	 */
 	ENABLE_INTERRUPTS(CLBR_NONE)
-	SAVE_ARGS 8,0
 	movq  %rax,ORIG_RAX-ARGOFFSET(%rsp)
 	movq  %rcx,RIP-ARGOFFSET(%rsp)
 	CFI_REL_OFFSET rip,RIP-ARGOFFSET
-	testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+	GET_THREAD_INFO(%rcx)
+	testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
 	jnz tracesys
 system_call_fastpath:
 #if __SYSCALL_MASK == ~0
@@ -639,10 +1112,13 @@ sysret_check:
 	LOCKDEP_SYS_EXIT
 	DISABLE_INTERRUPTS(CLBR_NONE)
 	TRACE_IRQS_OFF
-	movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
+	GET_THREAD_INFO(%rcx)
+	movl TI_flags(%rcx),%edx
 	andl %edi,%edx
 	jnz  sysret_careful
 	CFI_REMEMBER_STATE
+	pax_exit_kernel_user
+	pax_erase_kstack
 	/*
 	 * sysretq will re-enable interrupts:
 	 */
@@ -701,6 +1177,9 @@ auditsys:
 	movq %rax,%rsi			/* 2nd arg: syscall number */
 	movl $AUDIT_ARCH_X86_64,%edi	/* 1st arg: audit arch */
 	call __audit_syscall_entry
+
+	pax_erase_kstack
+
 	LOAD_ARGS 0		/* reload call-clobbered registers */
 	jmp system_call_fastpath
 
@@ -722,7 +1201,7 @@ sysret_audit:
 	/* Do syscall tracing */
 tracesys:
 #ifdef CONFIG_AUDITSYSCALL
-	testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+	testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
 	jz auditsys
 #endif
 	SAVE_REST
@@ -730,12 +1209,15 @@ tracesys:
 	FIXUP_TOP_OF_STACK %rdi
 	movq %rsp,%rdi
 	call syscall_trace_enter
+
+	pax_erase_kstack
+
 	/*
 	 * Reload arg registers from stack in case ptrace changed them.
 	 * We don't reload %rax because syscall_trace_enter() returned
 	 * the value it wants us to use in the table lookup.
 	 */
-	LOAD_ARGS ARGOFFSET, 1
+	LOAD_ARGS 1
 	RESTORE_REST
 #if __SYSCALL_MASK == ~0
 	cmpq $__NR_syscall_max,%rax
@@ -765,7 +1247,9 @@ GLOBAL(int_with_check)
 	andl %edi,%edx
 	jnz   int_careful
 	andl    $~TS_COMPAT,TI_status(%rcx)
-	jmp   retint_swapgs
+	pax_exit_kernel_user
+	pax_erase_kstack
+	jmp   retint_swapgs_pax
 
 	/* Either reschedule or signal or syscall exit tracking needed. */
 	/* First do a reschedule test. */
@@ -811,7 +1295,7 @@ int_restore_rest:
 	TRACE_IRQS_OFF
 	jmp int_with_check
 	CFI_ENDPROC
-END(system_call)
+ENDPROC(system_call)
 
 	.macro FORK_LIKE func
 ENTRY(stub_\func)
@@ -824,9 +1308,10 @@ ENTRY(stub_\func)
 	DEFAULT_FRAME 0 8		/* offset 8: return address */
 	call sys_\func
 	RESTORE_TOP_OF_STACK %r11, 8
-	ret $REST_SKIP		/* pop extended registers */
+	pax_force_retaddr
+	ret
 	CFI_ENDPROC
-END(stub_\func)
+ENDPROC(stub_\func)
 	.endm
 
 	.macro FIXED_FRAME label,func
@@ -836,9 +1321,10 @@ ENTRY(\label)
 	FIXUP_TOP_OF_STACK %r11, 8-ARGOFFSET
 	call \func
 	RESTORE_TOP_OF_STACK %r11, 8-ARGOFFSET
+	pax_force_retaddr
 	ret
 	CFI_ENDPROC
-END(\label)
+ENDPROC(\label)
 	.endm
 
 	FORK_LIKE  clone
@@ -846,19 +1332,6 @@ END(\label)
 	FORK_LIKE  vfork
 	FIXED_FRAME stub_iopl, sys_iopl
 
-ENTRY(ptregscall_common)
-	DEFAULT_FRAME 1 8	/* offset 8: return address */
-	RESTORE_TOP_OF_STACK %r11, 8
-	movq_cfi_restore R15+8, r15
-	movq_cfi_restore R14+8, r14
-	movq_cfi_restore R13+8, r13
-	movq_cfi_restore R12+8, r12
-	movq_cfi_restore RBP+8, rbp
-	movq_cfi_restore RBX+8, rbx
-	ret $REST_SKIP		/* pop extended registers */
-	CFI_ENDPROC
-END(ptregscall_common)
-
 ENTRY(stub_execve)
 	CFI_STARTPROC
 	addq $8, %rsp
@@ -870,7 +1343,7 @@ ENTRY(stub_execve)
 	RESTORE_REST
 	jmp int_ret_from_sys_call
 	CFI_ENDPROC
-END(stub_execve)
+ENDPROC(stub_execve)
 
 /*
  * sigreturn is special because it needs to restore all registers on return.
@@ -887,7 +1360,7 @@ ENTRY(stub_rt_sigreturn)
 	RESTORE_REST
 	jmp int_ret_from_sys_call
 	CFI_ENDPROC
-END(stub_rt_sigreturn)
+ENDPROC(stub_rt_sigreturn)
 
 #ifdef CONFIG_X86_X32_ABI
 ENTRY(stub_x32_rt_sigreturn)
@@ -901,7 +1374,7 @@ ENTRY(stub_x32_rt_sigreturn)
 	RESTORE_REST
 	jmp int_ret_from_sys_call
 	CFI_ENDPROC
-END(stub_x32_rt_sigreturn)
+ENDPROC(stub_x32_rt_sigreturn)
 
 ENTRY(stub_x32_execve)
 	CFI_STARTPROC
@@ -915,7 +1388,7 @@ ENTRY(stub_x32_execve)
 	RESTORE_REST
 	jmp int_ret_from_sys_call
 	CFI_ENDPROC
-END(stub_x32_execve)
+ENDPROC(stub_x32_execve)
 
 #endif
 
@@ -952,7 +1425,7 @@ vector=vector+1
 2:	jmp common_interrupt
 .endr
 	CFI_ENDPROC
-END(irq_entries_start)
+ENDPROC(irq_entries_start)
 
 .previous
 END(interrupt)
@@ -969,8 +1442,8 @@ END(interrupt)
 /* 0(%rsp): ~(interrupt number) */
 	.macro interrupt func
 	/* reserve pt_regs for scratch regs and rbp */
-	subq $ORIG_RAX-RBP, %rsp
-	CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
+	subq $ORIG_RAX, %rsp
+	CFI_ADJUST_CFA_OFFSET ORIG_RAX
 	SAVE_ARGS_IRQ
 	call \func
 	.endm
@@ -997,14 +1470,14 @@ ret_from_intr:
 
 	/* Restore saved previous stack */
 	popq %rsi
-	CFI_DEF_CFA rsi,SS+8-RBP	/* reg/off reset after def_cfa_expr */
-	leaq ARGOFFSET-RBP(%rsi), %rsp
+	CFI_DEF_CFA rsi,SS+8	/* reg/off reset after def_cfa_expr */
+	movq %rsi, %rsp
 	CFI_DEF_CFA_REGISTER	rsp
-	CFI_ADJUST_CFA_OFFSET	RBP-ARGOFFSET
+	CFI_ADJUST_CFA_OFFSET	-ARGOFFSET
 
 exit_intr:
 	GET_THREAD_INFO(%rcx)
-	testl $3,CS-ARGOFFSET(%rsp)
+	testb $3,CS-ARGOFFSET(%rsp)
 	je retint_kernel
 
 	/* Interrupt came from user space */
@@ -1026,12 +1499,16 @@ retint_swapgs:		/* return to user-space
 	 * The iretq could re-enable interrupts:
 	 */
 	DISABLE_INTERRUPTS(CLBR_ANY)
+	pax_exit_kernel_user
+retint_swapgs_pax:
 	TRACE_IRQS_IRETQ
 	SWAPGS
 	jmp restore_args
 
 retint_restore_args:	/* return to kernel space */
 	DISABLE_INTERRUPTS(CLBR_ANY)
+	pax_exit_kernel
+	pax_force_retaddr (RIP-ARGOFFSET)
 	/*
 	 * The iretq could re-enable interrupts:
 	 */
@@ -1112,7 +1589,7 @@ ENTRY(retint_kernel)
 #endif
 
 	CFI_ENDPROC
-END(common_interrupt)
+ENDPROC(common_interrupt)
 /*
  * End of kprobes section
  */
@@ -1130,7 +1607,7 @@ ENTRY(\sym)
 	interrupt \do_sym
 	jmp ret_from_intr
 	CFI_ENDPROC
-END(\sym)
+ENDPROC(\sym)
 .endm
 
 #ifdef CONFIG_TRACING
@@ -1218,7 +1695,7 @@ ENTRY(\sym)
 	call \do_sym
 	jmp error_exit		/* %ebx: no swapgs flag */
 	CFI_ENDPROC
-END(\sym)
+ENDPROC(\sym)
 .endm
 
 .macro paranoidzeroentry sym do_sym
@@ -1236,10 +1713,10 @@ ENTRY(\sym)
 	call \do_sym
 	jmp paranoid_exit	/* %ebx: no swapgs flag */
 	CFI_ENDPROC
-END(\sym)
+ENDPROC(\sym)
 .endm
 
-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r13)
 .macro paranoidzeroentry_ist sym do_sym ist
 ENTRY(\sym)
 	INTR_FRAME
@@ -1252,12 +1729,18 @@ ENTRY(\sym)
 	TRACE_IRQS_OFF_DEBUG
 	movq %rsp,%rdi		/* pt_regs pointer */
 	xorl %esi,%esi		/* no error code */
+#ifdef CONFIG_SMP
+	imul $TSS_size, PER_CPU_VAR(cpu_number), %r13d
+	lea init_tss(%r13), %r13
+#else
+	lea init_tss(%rip), %r13
+#endif
 	subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
 	call \do_sym
 	addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
 	jmp paranoid_exit	/* %ebx: no swapgs flag */
 	CFI_ENDPROC
-END(\sym)
+ENDPROC(\sym)
 .endm
 
 .macro errorentry sym do_sym
@@ -1275,7 +1758,7 @@ ENTRY(\sym)
 	call \do_sym
 	jmp error_exit			/* %ebx: no swapgs flag */
 	CFI_ENDPROC
-END(\sym)
+ENDPROC(\sym)
 .endm
 
 #ifdef CONFIG_TRACING
@@ -1306,7 +1789,7 @@ ENTRY(\sym)
 	call \do_sym
 	jmp paranoid_exit		/* %ebx: no swapgs flag */
 	CFI_ENDPROC
-END(\sym)
+ENDPROC(\sym)
 .endm
 
 zeroentry divide_error do_divide_error
@@ -1336,9 +1819,10 @@ gs_change:
 2:	mfence		/* workaround */
 	SWAPGS
 	popfq_cfi
+	pax_force_retaddr
 	ret
 	CFI_ENDPROC
-END(native_load_gs_index)
+ENDPROC(native_load_gs_index)
 
 	_ASM_EXTABLE(gs_change,bad_gs)
 	.section .fixup,"ax"
@@ -1366,9 +1850,10 @@ ENTRY(do_softirq_own_stack)
 	CFI_DEF_CFA_REGISTER	rsp
 	CFI_ADJUST_CFA_OFFSET   -8
 	decl PER_CPU_VAR(irq_count)
+	pax_force_retaddr
 	ret
 	CFI_ENDPROC
-END(do_softirq_own_stack)
+ENDPROC(do_softirq_own_stack)
 
 #ifdef CONFIG_XEN
 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
@@ -1406,7 +1891,7 @@ ENTRY(xen_do_hypervisor_callback)   # do
 	decl PER_CPU_VAR(irq_count)
 	jmp  error_exit
 	CFI_ENDPROC
-END(xen_do_hypervisor_callback)
+ENDPROC(xen_do_hypervisor_callback)
 
 /*
  * Hypervisor uses this for application faults while it executes.
@@ -1465,7 +1950,7 @@ ENTRY(xen_failsafe_callback)
 	SAVE_ALL
 	jmp error_exit
 	CFI_ENDPROC
-END(xen_failsafe_callback)
+ENDPROC(xen_failsafe_callback)
 
 apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
 	xen_hvm_callback_vector xen_evtchn_do_upcall
@@ -1517,18 +2002,33 @@ ENTRY(paranoid_exit)
 	DEFAULT_FRAME
 	DISABLE_INTERRUPTS(CLBR_NONE)
 	TRACE_IRQS_OFF_DEBUG
-	testl %ebx,%ebx				/* swapgs needed? */
+	testl $1,%ebx				/* swapgs needed? */
 	jnz paranoid_restore
-	testl $3,CS(%rsp)
+	testb $3,CS(%rsp)
 	jnz   paranoid_userspace
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+	pax_exit_kernel
+	TRACE_IRQS_IRETQ 0
+	SWAPGS_UNSAFE_STACK
+	RESTORE_ALL 8
+	pax_force_retaddr_bts
+	jmp irq_return
+#endif
 paranoid_swapgs:
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+	pax_exit_kernel_user
+#else
+	pax_exit_kernel
+#endif
 	TRACE_IRQS_IRETQ 0
 	SWAPGS_UNSAFE_STACK
 	RESTORE_ALL 8
 	jmp irq_return
 paranoid_restore:
+	pax_exit_kernel
 	TRACE_IRQS_IRETQ_DEBUG 0
 	RESTORE_ALL 8
+	pax_force_retaddr_bts
 	jmp irq_return
 paranoid_userspace:
 	GET_THREAD_INFO(%rcx)
@@ -1557,7 +2057,7 @@ paranoid_schedule:
 	TRACE_IRQS_OFF
 	jmp paranoid_userspace
 	CFI_ENDPROC
-END(paranoid_exit)
+ENDPROC(paranoid_exit)
 
 /*
  * Exception entry point. This expects an error code/orig_rax on the stack.
@@ -1584,12 +2084,23 @@ ENTRY(error_entry)
 	movq_cfi r14, R14+8
 	movq_cfi r15, R15+8
 	xorl %ebx,%ebx
-	testl $3,CS+8(%rsp)
+	testb $3,CS+8(%rsp)
 	je error_kernelspace
 error_swapgs:
 	SWAPGS
 error_sti:
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+	testb $3, CS+8(%rsp)
+	jnz 1f
+	pax_enter_kernel
+	jmp 2f
+1:	pax_enter_kernel_user
+2:
+#else
+	pax_enter_kernel
+#endif
 	TRACE_IRQS_OFF
+	pax_force_retaddr
 	ret
 
 /*
@@ -1616,7 +2127,7 @@ bstep_iret:
 	movq %rcx,RIP+8(%rsp)
 	jmp error_swapgs
 	CFI_ENDPROC
-END(error_entry)
+ENDPROC(error_entry)
 
 
 /* ebx:	no swapgs flag (1: don't need swapgs, 0: need it) */
@@ -1627,7 +2138,7 @@ ENTRY(error_exit)
 	DISABLE_INTERRUPTS(CLBR_NONE)
 	TRACE_IRQS_OFF
 	GET_THREAD_INFO(%rcx)
-	testl %eax,%eax
+	testl $1,%eax
 	jne retint_kernel
 	LOCKDEP_SYS_EXIT_IRQ
 	movl TI_flags(%rcx),%edx
@@ -1636,7 +2147,7 @@ ENTRY(error_exit)
 	jnz retint_careful
 	jmp retint_swapgs
 	CFI_ENDPROC
-END(error_exit)
+ENDPROC(error_exit)
 
 /*
  * Test if a given stack is an NMI stack or not.
@@ -1694,9 +2205,11 @@ ENTRY(nmi)
 	 * If %cs was not the kernel segment, then the NMI triggered in user
 	 * space, which means it is definitely not nested.
 	 */
+	cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
+	je 1f
 	cmpl $__KERNEL_CS, 16(%rsp)
 	jne first_nmi
-
+1:
 	/*
 	 * Check the special variable on the stack to see if NMIs are
 	 * executing.
@@ -1730,8 +2243,7 @@ nested_nmi:
 
 1:
 	/* Set up the interrupted NMIs stack to jump to repeat_nmi */
-	leaq -1*8(%rsp), %rdx
-	movq %rdx, %rsp
+	subq $8, %rsp
 	CFI_ADJUST_CFA_OFFSET 1*8
 	leaq -10*8(%rsp), %rdx
 	pushq_cfi $__KERNEL_DS
@@ -1749,6 +2261,7 @@ nested_nmi_out:
 	CFI_RESTORE rdx
 
 	/* No need to check faults here */
+#	pax_force_retaddr_bts
 	INTERRUPT_RETURN
 
 	CFI_RESTORE_STATE
@@ -1845,13 +2358,13 @@ end_repeat_nmi:
 	subq $ORIG_RAX-R15, %rsp
 	CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
 	/*
-	 * Use save_paranoid to handle SWAPGS, but no need to use paranoid_exit
+	 * Use save_paranoid_nmi to handle SWAPGS, but no need to use paranoid_exit
 	 * as we should not be calling schedule in NMI context.
 	 * Even with normal interrupts enabled. An NMI should not be
 	 * setting NEED_RESCHED or anything that normal interrupts and
 	 * exceptions might do.
 	 */
-	call save_paranoid
+	call save_paranoid_nmi
 	DEFAULT_FRAME 0
 
 	/*
@@ -1861,9 +2374,9 @@ end_repeat_nmi:
 	 * NMI itself takes a page fault, the page fault that was preempted
 	 * will read the information from the NMI page fault and not the
 	 * origin fault. Save it off and restore it if it changes.
-	 * Use the r12 callee-saved register.
+	 * Use the r13 callee-saved register.
 	 */
-	movq %cr2, %r12
+	movq %cr2, %r13
 
 	/* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
 	movq %rsp,%rdi
@@ -1872,31 +2385,36 @@ end_repeat_nmi:
 
 	/* Did the NMI take a page fault? Restore cr2 if it did */
 	movq %cr2, %rcx
-	cmpq %rcx, %r12
+	cmpq %rcx, %r13
 	je 1f
-	movq %r12, %cr2
+	movq %r13, %cr2
 1:
 	
-	testl %ebx,%ebx				/* swapgs needed? */
+	testl $1,%ebx				/* swapgs needed? */
 	jnz nmi_restore
 nmi_swapgs:
 	SWAPGS_UNSAFE_STACK
 nmi_restore:
+	pax_exit_kernel_nmi
 	/* Pop the extra iret frame at once */
 	RESTORE_ALL 6*8
+	testb $3, 8(%rsp)
+	jnz 1f
+	pax_force_retaddr_bts
+1:
 
 	/* Clear the NMI executing stack variable */
 	movq $0, 5*8(%rsp)
 	jmp irq_return
 	CFI_ENDPROC
-END(nmi)
+ENDPROC(nmi)
 
 ENTRY(ignore_sysret)
 	CFI_STARTPROC
 	mov $-ENOSYS,%eax
 	sysret
 	CFI_ENDPROC
-END(ignore_sysret)
+ENDPROC(ignore_sysret)
 
 /*
  * End of kprobes section
diff -ruNp linux-3.13.11/arch/x86/kernel/ftrace.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/ftrace.c
--- linux-3.13.11/arch/x86/kernel/ftrace.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/ftrace.c	2014-07-09
12:00:15.000000000 +0200
@@ -104,6 +104,8 @@ ftrace_modify_code_direct(unsigned long
 {
 	unsigned char replaced[MCOUNT_INSN_SIZE];
 
+	ip = ktla_ktva(ip);
+
 	/*
 	 * Note: Due to modules and __init, code can
 	 *  disappear and change, we need to protect against faulting
@@ -229,7 +231,7 @@ static int update_ftrace_func(unsigned l
 	unsigned char old[MCOUNT_INSN_SIZE];
 	int ret;
 
-	memcpy(old, (void *)ip, MCOUNT_INSN_SIZE);
+	memcpy(old, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE);
 
 	ftrace_update_func = ip;
 	/* Make sure the breakpoints see the ftrace_update_func update */
@@ -306,7 +308,7 @@ static int ftrace_write(unsigned long ip
 	 * kernel identity mapping to modify code.
 	 */
 	if (within(ip, (unsigned long)_text, (unsigned long)_etext))
-		ip = (unsigned long)__va(__pa_symbol(ip));
+		ip = (unsigned long)__va(__pa_symbol(ktla_ktva(ip)));
 
 	return probe_kernel_write((void *)ip, val, size);
 }
@@ -316,7 +318,7 @@ static int add_break(unsigned long ip, c
 	unsigned char replaced[MCOUNT_INSN_SIZE];
 	unsigned char brk = BREAKPOINT_INSTRUCTION;
 
-	if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
+	if (probe_kernel_read(replaced, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE))
 		return -EFAULT;
 
 	/* Make sure it is what we expect it to be */
@@ -664,7 +666,7 @@ ftrace_modify_code(unsigned long ip, uns
 	return ret;
 
  fail_update:
-	probe_kernel_write((void *)ip, &old_code[0], 1);
+	probe_kernel_write((void *)ktla_ktva(ip), &old_code[0], 1);
 	goto out;
 }
 
diff -ruNp linux-3.13.11/arch/x86/kernel/head64.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/head64.c
--- linux-3.13.11/arch/x86/kernel/head64.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/head64.c	2014-07-09
12:00:15.000000000 +0200
@@ -67,12 +67,12 @@ again:
 	pgd = *pgd_p;
 
 	/*
-	 * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
-	 * critical -- __PAGE_OFFSET would point us back into the dynamic
+	 * The use of __early_va rather than __va here is critical:
+	 * __va would point us back into the dynamic
 	 * range and we might end up looping forever...
 	 */
 	if (pgd)
-		pud_p = (pudval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
+		pud_p = (pudval_t *)(__early_va(pgd & PTE_PFN_MASK));
 	else {
 		if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
 			reset_early_page_tables();
@@ -82,13 +82,13 @@ again:
 		pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
 		for (i = 0; i < PTRS_PER_PUD; i++)
 			pud_p[i] = 0;
-		*pgd_p = (pgdval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
+		*pgd_p = (pgdval_t)__pa(pud_p) + _KERNPG_TABLE;
 	}
 	pud_p += pud_index(address);
 	pud = *pud_p;
 
 	if (pud)
-		pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
+		pmd_p = (pmdval_t *)(__early_va(pud & PTE_PFN_MASK));
 	else {
 		if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
 			reset_early_page_tables();
@@ -98,7 +98,7 @@ again:
 		pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
 		for (i = 0; i < PTRS_PER_PMD; i++)
 			pmd_p[i] = 0;
-		*pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
+		*pud_p = (pudval_t)__pa(pmd_p) + _KERNPG_TABLE;
 	}
 	pmd = (physaddr & PMD_MASK) + early_pmd_flags;
 	pmd_p[pmd_index(address)] = pmd;
@@ -175,7 +175,6 @@ asmlinkage void __init x86_64_start_kern
 	if (console_loglevel == 10)
 		early_printk("Kernel alive\n");
 
-	clear_page(init_level4_pgt);
 	/* set init_level4_pgt kernel high mapping*/
 	init_level4_pgt[511] = early_level4_pgt[511];
 
diff -ruNp linux-3.13.11/arch/x86/kernel/head_32.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/head_32.S
--- linux-3.13.11/arch/x86/kernel/head_32.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/head_32.S	2014-07-09
12:00:15.000000000 +0200
@@ -26,6 +26,12 @@
 /* Physical address */
 #define pa(X) ((X) - __PAGE_OFFSET)
 
+#ifdef CONFIG_PAX_KERNEXEC
+#define ta(X) (X)
+#else
+#define ta(X) ((X) - __PAGE_OFFSET)
+#endif
+
 /*
  * References to members of the new_cpu_data structure.
  */
@@ -55,11 +61,7 @@
  * and small than max_low_pfn, otherwise will waste some page table entries
  */
 
-#if PTRS_PER_PMD > 1
-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
-#else
-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
-#endif
+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
 
 /* Number of possible pages in the lowmem region */
 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
@@ -78,6 +80,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
 
 /*
+ * Real beginning of normal "text" segment
+ */
+ENTRY(stext)
+ENTRY(_stext)
+
+/*
  * 32-bit kernel entrypoint; only used by the boot CPU.  On entry,
  * %esi points to the real-mode code as a 32-bit pointer.
  * CS and DS must be 4 GB flat segments, but we don't depend on
@@ -85,6 +93,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
  * can.
  */
 __HEAD
+
+#ifdef CONFIG_PAX_KERNEXEC
+	jmp startup_32
+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
+.fill PAGE_SIZE-5,1,0xcc
+#endif
+
 ENTRY(startup_32)
 	movl pa(stack_start),%ecx
 	
@@ -106,6 +121,59 @@ ENTRY(startup_32)
 2:
 	leal -__PAGE_OFFSET(%ecx),%esp
 
+#ifdef CONFIG_SMP
+	movl $pa(cpu_gdt_table),%edi
+	movl $__per_cpu_load,%eax
+	movw %ax,GDT_ENTRY_PERCPU * 8 + 2(%edi)
+	rorl $16,%eax
+	movb %al,GDT_ENTRY_PERCPU * 8 + 4(%edi)
+	movb %ah,GDT_ENTRY_PERCPU * 8 + 7(%edi)
+	movl $__per_cpu_end - 1,%eax
+	subl $__per_cpu_start,%eax
+	movw %ax,GDT_ENTRY_PERCPU * 8 + 0(%edi)
+#endif
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+	movl $NR_CPUS,%ecx
+	movl $pa(cpu_gdt_table),%edi
+1:
+	movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS
* 8 + 4(%edi)
+	movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS
* 8 + 4(%edi)
+	movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS
* 8 + 4(%edi)
+	addl $PAGE_SIZE_asm,%edi
+	loop 1b
+#endif
+
+#ifdef CONFIG_PAX_KERNEXEC
+	movl $pa(boot_gdt),%edi
+	movl $__LOAD_PHYSICAL_ADDR,%eax
+	movw %ax,GDT_ENTRY_BOOT_CS * 8 + 2(%edi)
+	rorl $16,%eax
+	movb %al,GDT_ENTRY_BOOT_CS * 8 + 4(%edi)
+	movb %ah,GDT_ENTRY_BOOT_CS * 8 + 7(%edi)
+	rorl $16,%eax
+
+	ljmp $(__BOOT_CS),$1f
+1:
+
+	movl $NR_CPUS,%ecx
+	movl $pa(cpu_gdt_table),%edi
+	addl $__PAGE_OFFSET,%eax
+1:
+	movb $0xc0,GDT_ENTRY_KERNEL_CS * 8 + 6(%edi)
+	movb $0xc0,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 6(%edi)
+	movw %ax,GDT_ENTRY_KERNEL_CS * 8 + 2(%edi)
+	movw %ax,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 2(%edi)
+	rorl $16,%eax
+	movb %al,GDT_ENTRY_KERNEL_CS * 8 + 4(%edi)
+	movb %al,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 4(%edi)
+	movb %ah,GDT_ENTRY_KERNEL_CS * 8 + 7(%edi)
+	movb %ah,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 7(%edi)
+	rorl $16,%eax
+	addl $PAGE_SIZE_asm,%edi
+	loop 1b
+#endif
+
 /*
  * Clear BSS first so that there are no surprises...
  */
@@ -201,8 +269,11 @@ ENTRY(startup_32)
 	movl %eax, pa(max_pfn_mapped)
 
 	/* Do early initialization of the fixmap area */
-	movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
-	movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
+#ifdef CONFIG_COMPAT_VDSO
+	movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
+#else
+	movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
+#endif
 #else	/* Not PAE */
 
 page_pde_offset = (__PAGE_OFFSET >> 20);
@@ -232,8 +303,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
 	movl %eax, pa(max_pfn_mapped)
 
 	/* Do early initialization of the fixmap area */
-	movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
-	movl %eax,pa(initial_page_table+0xffc)
+#ifdef CONFIG_COMPAT_VDSO
+	movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
+#else
+	movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
+#endif
 #endif
 
 #ifdef CONFIG_PARAVIRT
@@ -247,9 +321,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
 	cmpl $num_subarch_entries, %eax
 	jae bad_subarch
 
-	movl pa(subarch_entries)(,%eax,4), %eax
-	subl $__PAGE_OFFSET, %eax
-	jmp *%eax
+	jmp *pa(subarch_entries)(,%eax,4)
 
 bad_subarch:
 WEAK(lguest_entry)
@@ -261,10 +333,10 @@ WEAK(xen_entry)
 	__INITDATA
 
 subarch_entries:
-	.long default_entry		/* normal x86/PC */
-	.long lguest_entry		/* lguest hypervisor */
-	.long xen_entry			/* Xen hypervisor */
-	.long default_entry		/* Moorestown MID */
+	.long ta(default_entry)		/* normal x86/PC */
+	.long ta(lguest_entry)		/* lguest hypervisor */
+	.long ta(xen_entry)		/* Xen hypervisor */
+	.long ta(default_entry)		/* Moorestown MID */
 num_subarch_entries = (. - subarch_entries) / 4
 .previous
 #else
@@ -354,6 +426,7 @@ default_entry:
 	movl pa(mmu_cr4_features),%eax
 	movl %eax,%cr4
 
+#ifdef CONFIG_X86_PAE
 	testb $X86_CR4_PAE, %al		# check if PAE is enabled
 	jz enable_paging
 
@@ -382,6 +455,9 @@ default_entry:
 	/* Make changes effective */
 	wrmsr
 
+	btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
+#endif
+
 enable_paging:
 
 /*
@@ -449,14 +525,20 @@ is486:
 1:	movl $(__KERNEL_DS),%eax	# reload all the segment registers
 	movl %eax,%ss			# after changing gdt.
 
-	movl $(__USER_DS),%eax		# DS/ES contains default USER segment
+#	movl $(__KERNEL_DS),%eax	# DS/ES contains default KERNEL segment
 	movl %eax,%ds
 	movl %eax,%es
 
 	movl $(__KERNEL_PERCPU), %eax
 	movl %eax,%fs			# set this cpu's percpu
 
+#ifdef CONFIG_CC_STACKPROTECTOR
 	movl $(__KERNEL_STACK_CANARY),%eax
+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
+	movl $(__USER_DS),%eax
+#else
+	xorl %eax,%eax
+#endif
 	movl %eax,%gs
 
 	xorl %eax,%eax			# Clear LDT
@@ -512,8 +594,11 @@ setup_once:
 	 * relocation.  Manually set base address in stack canary
 	 * segment descriptor.
 	 */
-	movl $gdt_page,%eax
+	movl $cpu_gdt_table,%eax
 	movl $stack_canary,%ecx
+#ifdef CONFIG_SMP
+	addl $__per_cpu_load,%ecx
+#endif
 	movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
 	shrl $16, %ecx
 	movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
@@ -548,7 +633,7 @@ ENTRY(early_idt_handler)
 	cmpl $2,(%esp)		# X86_TRAP_NMI
 	je is_nmi		# Ignore NMI
 
-	cmpl $2,%ss:early_recursion_flag
+	cmpl $1,%ss:early_recursion_flag
 	je hlt_loop
 	incl %ss:early_recursion_flag
 
@@ -586,8 +671,8 @@ ENTRY(early_idt_handler)
 	pushl (20+6*4)(%esp)	/* trapno */
 	pushl $fault_msg
 	call printk
-#endif
 	call dump_stack
+#endif
 hlt_loop:
 	hlt
 	jmp hlt_loop
@@ -607,8 +692,11 @@ ENDPROC(early_idt_handler)
 /* This is the default interrupt "handler" :-) */
 	ALIGN
 ignore_int:
-	cld
 #ifdef CONFIG_PRINTK
+	cmpl $2,%ss:early_recursion_flag
+	je hlt_loop
+	incl %ss:early_recursion_flag
+	cld
 	pushl %eax
 	pushl %ecx
 	pushl %edx
@@ -617,9 +705,6 @@ ignore_int:
 	movl $(__KERNEL_DS),%eax
 	movl %eax,%ds
 	movl %eax,%es
-	cmpl $2,early_recursion_flag
-	je hlt_loop
-	incl early_recursion_flag
 	pushl 16(%esp)
 	pushl 24(%esp)
 	pushl 32(%esp)
@@ -653,29 +738,34 @@ ENTRY(setup_once_ref)
 /*
  * BSS section
  */
-__PAGE_ALIGNED_BSS
-	.align PAGE_SIZE
 #ifdef CONFIG_X86_PAE
+.section .initial_pg_pmd,"a",@progbits
 initial_pg_pmd:
 	.fill 1024*KPMDS,4,0
 #else
+.section .initial_page_table,"a",@progbits
 ENTRY(initial_page_table)
 	.fill 1024,4,0
 #endif
+.section .initial_pg_fixmap,"a",@progbits
 initial_pg_fixmap:
 	.fill 1024,4,0
+.section .empty_zero_page,"a",@progbits
 ENTRY(empty_zero_page)
 	.fill 4096,1,0
+.section .swapper_pg_dir,"a",@progbits
 ENTRY(swapper_pg_dir)
+#ifdef CONFIG_X86_PAE
+	.fill 4,8,0
+#else
 	.fill 1024,4,0
+#endif
 
 /*
  * This starts the data section.
  */
 #ifdef CONFIG_X86_PAE
-__PAGE_ALIGNED_DATA
-	/* Page-aligned for the benefit of paravirt? */
-	.align PAGE_SIZE
+.section .initial_page_table,"a",@progbits
 ENTRY(initial_page_table)
 	.long	pa(initial_pg_pmd+PGD_IDENT_ATTR),0	/* low identity map */
 # if KPMDS == 3
@@ -694,12 +784,20 @@ ENTRY(initial_page_table)
 #  error "Kernel PMDs should be 1, 2 or 3"
 # endif
 	.align PAGE_SIZE		/* needs to be page-sized too */
+
+#ifdef CONFIG_PAX_PER_CPU_PGD
+ENTRY(cpu_pgd)
+	.rept 2*NR_CPUS
+	.fill	4,8,0
+	.endr
+#endif
+
 #endif
 
 .data
 .balign 4
 ENTRY(stack_start)
-	.long init_thread_union+THREAD_SIZE
+	.long init_thread_union+THREAD_SIZE-8
 
 __INITRODATA
 int_msg:
@@ -727,7 +825,7 @@ fault_msg:
  * segment size, and 32-bit linear address value:
  */
 
-	.data
+.section .rodata,"a",@progbits
 .globl boot_gdt_descr
 .globl idt_descr
 
@@ -736,7 +834,7 @@ fault_msg:
 	.word 0				# 32 bit align gdt_desc.address
 boot_gdt_descr:
 	.word __BOOT_DS+7
-	.long boot_gdt - __PAGE_OFFSET
+	.long pa(boot_gdt)
 
 	.word 0				# 32-bit align idt_desc.address
 idt_descr:
@@ -747,7 +845,7 @@ idt_descr:
 	.word 0				# 32 bit align gdt_desc.address
 ENTRY(early_gdt_descr)
 	.word GDT_ENTRIES*8-1
-	.long gdt_page			/* Overwritten for secondary CPUs */
+	.long cpu_gdt_table		/* Overwritten for secondary CPUs */
 
 /*
  * The boot_gdt must mirror the equivalent in setup.S and is
@@ -756,5 +854,65 @@ ENTRY(early_gdt_descr)
 	.align L1_CACHE_BYTES
 ENTRY(boot_gdt)
 	.fill GDT_ENTRY_BOOT_CS,8,0
-	.quad 0x00cf9a000000ffff	/* kernel 4GB code at 0x00000000 */
-	.quad 0x00cf92000000ffff	/* kernel 4GB data at 0x00000000 */
+	.quad 0x00cf9b000000ffff	/* kernel 4GB code at 0x00000000 */
+	.quad 0x00cf93000000ffff	/* kernel 4GB data at 0x00000000 */
+
+	.align PAGE_SIZE_asm
+ENTRY(cpu_gdt_table)
+	.rept NR_CPUS
+	.quad 0x0000000000000000	/* NULL descriptor */
+	.quad 0x0000000000000000	/* 0x0b reserved */
+	.quad 0x0000000000000000	/* 0x13 reserved */
+	.quad 0x0000000000000000	/* 0x1b reserved */
+
+#ifdef CONFIG_PAX_KERNEXEC
+	.quad 0x00cf9b000000ffff	/* 0x20 alternate kernel 4GB code at 0x00000000 */
+#else
+	.quad 0x0000000000000000	/* 0x20 unused */
+#endif
+
+	.quad 0x0000000000000000	/* 0x28 unused */
+	.quad 0x0000000000000000	/* 0x33 TLS entry 1 */
+	.quad 0x0000000000000000	/* 0x3b TLS entry 2 */
+	.quad 0x0000000000000000	/* 0x43 TLS entry 3 */
+	.quad 0x0000000000000000	/* 0x4b reserved */
+	.quad 0x0000000000000000	/* 0x53 reserved */
+	.quad 0x0000000000000000	/* 0x5b reserved */
+
+	.quad 0x00cf9b000000ffff	/* 0x60 kernel 4GB code at 0x00000000 */
+	.quad 0x00cf93000000ffff	/* 0x68 kernel 4GB data at 0x00000000 */
+	.quad 0x00cffb000000ffff	/* 0x73 user 4GB code at 0x00000000 */
+	.quad 0x00cff3000000ffff	/* 0x7b user 4GB data at 0x00000000 */
+
+	.quad 0x0000000000000000	/* 0x80 TSS descriptor */
+	.quad 0x0000000000000000	/* 0x88 LDT descriptor */
+
+	/*
+	 * Segments used for calling PnP BIOS have byte granularity.
+	 * The code segments and data segments have fixed 64k limits,
+	 * the transfer segment sizes are set at run time.
+	 */
+	.quad 0x00409b000000ffff	/* 0x90 32-bit code */
+	.quad 0x00009b000000ffff	/* 0x98 16-bit code */
+	.quad 0x000093000000ffff	/* 0xa0 16-bit data */
+	.quad 0x0000930000000000	/* 0xa8 16-bit data */
+	.quad 0x0000930000000000	/* 0xb0 16-bit data */
+
+	/*
+	 * The APM segments have byte granularity and their bases
+	 * are set at run time.  All have 64k limits.
+	 */
+	.quad 0x00409b000000ffff	/* 0xb8 APM CS    code */
+	.quad 0x00009b000000ffff	/* 0xc0 APM CS 16 code (16 bit) */
+	.quad 0x004093000000ffff	/* 0xc8 APM DS    data */
+
+	.quad 0x00c0930000000000	/* 0xd0 - ESPFIX SS */
+	.quad 0x0040930000000000	/* 0xd8 - PERCPU */
+	.quad 0x0040910000000017	/* 0xe0 - STACK_CANARY */
+	.quad 0x0000000000000000	/* 0xe8 - PCIBIOS_CS */
+	.quad 0x0000000000000000	/* 0xf0 - PCIBIOS_DS */
+	.quad 0x0000000000000000	/* 0xf8 - GDT entry 31: double-fault TSS */
+
+	/* Be sure this is zeroed to avoid false validations in Xen */
+	.fill PAGE_SIZE_asm - GDT_SIZE,1,0
+	.endr
diff -ruNp linux-3.13.11/arch/x86/kernel/head_64.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/head_64.S
--- linux-3.13.11/arch/x86/kernel/head_64.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/head_64.S	2014-07-09
12:00:15.000000000 +0200
@@ -20,6 +20,8 @@
 #include <asm/processor-flags.h>
 #include <asm/percpu.h>
 #include <asm/nops.h>
+#include <asm/cpufeature.h>
+#include <asm/alternative-asm.h>
 
 #ifdef CONFIG_PARAVIRT
 #include <asm/asm-offsets.h>
@@ -41,6 +43,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
 L3_START_KERNEL = pud_index(__START_KERNEL_map)
+L4_VMALLOC_START = pgd_index(VMALLOC_START)
+L3_VMALLOC_START = pud_index(VMALLOC_START)
+L4_VMALLOC_END = pgd_index(VMALLOC_END)
+L3_VMALLOC_END = pud_index(VMALLOC_END)
+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
 
 	.text
 	__HEAD
@@ -89,11 +97,24 @@ startup_64:
 	 * Fixup the physical addresses in the page table
 	 */
 	addq	%rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
+	addq	%rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
+	addq	%rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
+	addq	%rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
+	addq	%rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
+	addq	%rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
 
-	addq	%rbp, level3_kernel_pgt + (510*8)(%rip)
-	addq	%rbp, level3_kernel_pgt + (511*8)(%rip)
+	addq	%rbp, level3_ident_pgt + (0*8)(%rip)
+#ifndef CONFIG_XEN
+	addq	%rbp, level3_ident_pgt + (1*8)(%rip)
+#endif
+
+	addq	%rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
+
+	addq	%rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
+	addq	%rbp, level3_kernel_pgt + ((L3_START_KERNEL+1)*8)(%rip)
 
 	addq	%rbp, level2_fixmap_pgt + (506*8)(%rip)
+	addq	%rbp, level2_fixmap_pgt + (507*8)(%rip)
 
 	/*
 	 * Set up the identity mapping for the switchover.  These
@@ -177,8 +198,8 @@ ENTRY(secondary_startup_64)
 	movq	$(init_level4_pgt - __START_KERNEL_map), %rax
 1:
 
-	/* Enable PAE mode and PGE */
-	movl	$(X86_CR4_PAE | X86_CR4_PGE), %ecx
+	/* Enable PAE mode and PSE/PGE */
+	movl	$(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %ecx
 	movq	%rcx, %cr4
 
 	/* Setup early boot stage 4 level pagetables. */
@@ -199,10 +220,19 @@ ENTRY(secondary_startup_64)
 	movl	$MSR_EFER, %ecx
 	rdmsr
 	btsl	$_EFER_SCE, %eax	/* Enable System Call */
-	btl	$20,%edi		/* No Execute supported? */
+	btl	$(X86_FEATURE_NX & 31),%edi	/* No Execute supported? */
 	jnc     1f
 	btsl	$_EFER_NX, %eax
 	btsq	$_PAGE_BIT_NX,early_pmd_flags(%rip)
+#ifndef CONFIG_EFI
+	btsq	$_PAGE_BIT_NX, init_level4_pgt + 8*L4_PAGE_OFFSET(%rip)
+#endif
+	btsq	$_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_START(%rip)
+	btsq	$_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_END(%rip)
+	btsq	$_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMEMMAP_START(%rip)
+	btsq	$_PAGE_BIT_NX, level2_fixmap_pgt + 8*506(%rip)
+	btsq	$_PAGE_BIT_NX, level2_fixmap_pgt + 8*507(%rip)
+	btsq	$_PAGE_BIT_NX, __supported_pte_mask(%rip)
 1:	wrmsr				/* Make changes effective */
 
 	/* Setup cr0 */
@@ -282,6 +312,7 @@ ENTRY(secondary_startup_64)
 	 *	REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
 	 *		address given in m16:64.
 	 */
+	pax_set_fptr_mask
 	movq	initial_code(%rip),%rax
 	pushq	$0		# fake return address to stop unwinder
 	pushq	$__KERNEL_CS	# set correct cs
@@ -313,7 +344,7 @@ ENDPROC(start_cpu0)
 	.quad	INIT_PER_CPU_VAR(irq_stack_union)
 
 	GLOBAL(stack_start)
-	.quad  init_thread_union+THREAD_SIZE-8
+	.quad  init_thread_union+THREAD_SIZE-16
 	.word  0
 	__FINITDATA
 
@@ -391,7 +422,7 @@ ENTRY(early_idt_handler)
 	call dump_stack
 #ifdef CONFIG_KALLSYMS	
 	leaq early_idt_ripmsg(%rip),%rdi
-	movq 40(%rsp),%rsi	# %rip again
+	movq 88(%rsp),%rsi	# %rip again
 	call __print_symbol
 #endif
 #endif /* EARLY_PRINTK */
@@ -420,6 +451,7 @@ ENDPROC(early_idt_handler)
 early_recursion_flag:
 	.long 0
 
+	.section .rodata,"a",@progbits
 #ifdef CONFIG_EARLY_PRINTK
 early_idt_msg:
 	.asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
@@ -447,29 +479,52 @@ NEXT_PAGE(early_level4_pgt)
 NEXT_PAGE(early_dynamic_pgts)
 	.fill	512*EARLY_DYNAMIC_PAGE_TABLES,8,0
 
-	.data
+	.section .rodata,"a",@progbits
 
-#ifndef CONFIG_XEN
 NEXT_PAGE(init_level4_pgt)
-	.fill	512,8,0
-#else
-NEXT_PAGE(init_level4_pgt)
-	.quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
 	.org    init_level4_pgt + L4_PAGE_OFFSET*8, 0
 	.quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
+	.org	init_level4_pgt + L4_VMALLOC_START*8, 0
+	.quad	level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
+	.org	init_level4_pgt + L4_VMALLOC_END*8, 0
+	.quad	level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
+	.org	init_level4_pgt + L4_VMEMMAP_START*8, 0
+	.quad	level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
 	.org    init_level4_pgt + L4_START_KERNEL*8, 0
 	/* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
 	.quad   level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
 
+#ifdef CONFIG_PAX_PER_CPU_PGD
+NEXT_PAGE(cpu_pgd)
+	.rept 2*NR_CPUS
+	.fill	512,8,0
+	.endr
+#endif
+
 NEXT_PAGE(level3_ident_pgt)
 	.quad	level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
+#ifdef CONFIG_XEN
 	.fill	511, 8, 0
+#else
+	.quad	level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
+	.fill	510,8,0
+#endif
+
+NEXT_PAGE(level3_vmalloc_start_pgt)
+	.fill	512,8,0
+
+NEXT_PAGE(level3_vmalloc_end_pgt)
+	.fill	512,8,0
+
+NEXT_PAGE(level3_vmemmap_pgt)
+	.fill	L3_VMEMMAP_START,8,0
+	.quad	level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
+
 NEXT_PAGE(level2_ident_pgt)
-	/* Since I easily can, map the first 1G.
+	/* Since I easily can, map the first 2G.
 	 * Don't set NX because code runs from these pages.
 	 */
-	PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
-#endif
+	PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
 
 NEXT_PAGE(level3_kernel_pgt)
 	.fill	L3_START_KERNEL,8,0
@@ -477,6 +532,9 @@ NEXT_PAGE(level3_kernel_pgt)
 	.quad	level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
 	.quad	level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
 
+NEXT_PAGE(level2_vmemmap_pgt)
+	.fill	512,8,0
+
 NEXT_PAGE(level2_kernel_pgt)
 	/*
 	 * 512 MB kernel mapping. We spend a full page on this pagetable
@@ -494,28 +552,64 @@ NEXT_PAGE(level2_kernel_pgt)
 NEXT_PAGE(level2_fixmap_pgt)
 	.fill	506,8,0
 	.quad	level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
-	/* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
-	.fill	5,8,0
+	.quad	level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
+	/* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
+	.fill	4,8,0
 
 NEXT_PAGE(level1_fixmap_pgt)
 	.fill	512,8,0
 
+NEXT_PAGE(level1_vsyscall_pgt)
+	.fill	512,8,0
+
 #undef PMDS
 
-	.data
+	.align PAGE_SIZE
+ENTRY(cpu_gdt_table)
+	.rept NR_CPUS
+	.quad	0x0000000000000000	/* NULL descriptor */
+	.quad	0x00cf9b000000ffff	/* __KERNEL32_CS */
+	.quad	0x00af9b000000ffff	/* __KERNEL_CS */
+	.quad	0x00cf93000000ffff	/* __KERNEL_DS */
+	.quad	0x00cffb000000ffff	/* __USER32_CS */
+	.quad	0x00cff3000000ffff	/* __USER_DS, __USER32_DS  */
+	.quad	0x00affb000000ffff	/* __USER_CS */
+
+#ifdef CONFIG_PAX_KERNEXEC
+	.quad	0x00af9b000000ffff	/* __KERNEXEC_KERNEL_CS */
+#else
+	.quad	0x0			/* unused */
+#endif
+
+	.quad	0,0			/* TSS */
+	.quad	0,0			/* LDT */
+	.quad	0,0,0			/* three TLS descriptors */
+	.quad	0x0000f40000000000	/* node/CPU stored in limit */
+	/* asm/segment.h:GDT_ENTRIES must match this */
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+	.quad	0x00cf93000000ffff	/* __UDEREF_KERNEL_DS */
+#else
+	.quad	0x0			/* unused */
+#endif
+
+	/* zero the remaining page */
+	.fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
+	.endr
+
 	.align 16
 	.globl early_gdt_descr
 early_gdt_descr:
 	.word	GDT_ENTRIES*8-1
 early_gdt_descr_base:
-	.quad	INIT_PER_CPU_VAR(gdt_page)
+	.quad	cpu_gdt_table
 
 ENTRY(phys_base)
 	/* This must match the first entry in level2_kernel_pgt */
 	.quad   0x0000000000000000
 
 #include "../../x86/xen/xen-head.S"
-	
-	__PAGE_ALIGNED_BSS
+
+	.section .rodata,"a",@progbits
 NEXT_PAGE(empty_zero_page)
 	.skip PAGE_SIZE
diff -ruNp linux-3.13.11/arch/x86/kernel/i386_ksyms_32.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/i386_ksyms_32.c
--- linux-3.13.11/arch/x86/kernel/i386_ksyms_32.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/i386_ksyms_32.c	2014-07-09
12:00:15.000000000 +0200
@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
 EXPORT_SYMBOL(cmpxchg8b_emu);
 #endif
 
+EXPORT_SYMBOL_GPL(cpu_gdt_table);
+
 /* Networking helper routines. */
 EXPORT_SYMBOL(csum_partial_copy_generic);
+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
 
 EXPORT_SYMBOL(__get_user_1);
 EXPORT_SYMBOL(__get_user_2);
@@ -44,3 +48,11 @@ EXPORT_SYMBOL(___preempt_schedule);
 EXPORT_SYMBOL(___preempt_schedule_context);
 #endif
 #endif
+
+#ifdef CONFIG_PAX_KERNEXEC
+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
+#endif
+
+#ifdef CONFIG_PAX_PER_CPU_PGD
+EXPORT_SYMBOL(cpu_pgd);
+#endif
diff -ruNp linux-3.13.11/arch/x86/kernel/i387.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/i387.c
--- linux-3.13.11/arch/x86/kernel/i387.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/i387.c	2014-07-09
12:00:15.000000000 +0200
@@ -51,7 +51,7 @@ static inline bool interrupted_kernel_fp
 static inline bool interrupted_user_mode(void)
 {
 	struct pt_regs *regs = get_irq_regs();
-	return regs && user_mode_vm(regs);
+	return regs && user_mode(regs);
 }
 
 /*
diff -ruNp linux-3.13.11/arch/x86/kernel/i8259.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/i8259.c
--- linux-3.13.11/arch/x86/kernel/i8259.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/i8259.c	2014-07-09
12:00:15.000000000 +0200
@@ -110,7 +110,7 @@ static int i8259A_irq_pending(unsigned i
 static void make_8259A_irq(unsigned int irq)
 {
 	disable_irq_nosync(irq);
-	io_apic_irqs &= ~(1<<irq);
+	io_apic_irqs &= ~(1UL<<irq);
 	irq_set_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq,
 				      i8259A_chip.name);
 	enable_irq(irq);
@@ -209,7 +209,7 @@ spurious_8259A_irq:
 			       "spurious 8259A interrupt: IRQ%d.\n", irq);
 			spurious_irq_mask |= irqmask;
 		}
-		atomic_inc(&irq_err_count);
+		atomic_inc_unchecked(&irq_err_count);
 		/*
 		 * Theoretically we do not have to handle this IRQ,
 		 * but in Linux this does not cause problems and is
@@ -332,14 +332,16 @@ static void init_8259A(int auto_eoi)
 	/* (slave's support for AEOI in flat mode is to be investigated) */
 	outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR);
 
+	pax_open_kernel();
 	if (auto_eoi)
 		/*
 		 * In AEOI mode we just have to mask the interrupt
 		 * when acking.
 		 */
-		i8259A_chip.irq_mask_ack = disable_8259A_irq;
+		*(void **)&i8259A_chip.irq_mask_ack = disable_8259A_irq;
 	else
-		i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
+		*(void **)&i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
+	pax_close_kernel();
 
 	udelay(100);		/* wait for 8259A to initialize */
 
diff -ruNp linux-3.13.11/arch/x86/kernel/io_delay.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/io_delay.c
--- linux-3.13.11/arch/x86/kernel/io_delay.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/io_delay.c	2014-07-09
12:00:15.000000000 +0200
@@ -58,7 +58,7 @@ static int __init dmi_io_delay_0xed_port
  * Quirk table for systems that misbehave (lock up, etc.) if port
  * 0x80 is used:
  */
-static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = {
+static const struct dmi_system_id __initconst io_delay_0xed_port_dmi_table[] = {
 	{
 		.callback	= dmi_io_delay_0xed_port,
 		.ident		= "Compaq Presario V6000",
diff -ruNp linux-3.13.11/arch/x86/kernel/ioport.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/ioport.c
--- linux-3.13.11/arch/x86/kernel/ioport.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/ioport.c	2014-07-09
12:00:15.000000000 +0200
@@ -6,6 +6,7 @@
 #include <linux/sched.h>
 #include <linux/kernel.h>
 #include <linux/capability.h>
+#include <linux/security.h>
 #include <linux/errno.h>
 #include <linux/types.h>
 #include <linux/ioport.h>
@@ -30,6 +31,12 @@ asmlinkage long sys_ioperm(unsigned long
 		return -EINVAL;
 	if (turn_on && !capable(CAP_SYS_RAWIO))
 		return -EPERM;
+#ifdef CONFIG_GRKERNSEC_IO
+	if (turn_on && grsec_disable_privio) {
+		gr_handle_ioperm();
+		return -ENODEV;
+	}
+#endif
 
 	/*
 	 * If it's the first ioperm() call in this thread's lifetime, set the
@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long
 	 * because the ->io_bitmap_max value must match the bitmap
 	 * contents:
 	 */
-	tss = &per_cpu(init_tss, get_cpu());
+	tss = init_tss + get_cpu();
 
 	if (turn_on)
 		bitmap_clear(t->io_bitmap_ptr, from, num);
@@ -105,6 +112,12 @@ SYSCALL_DEFINE1(iopl, unsigned int, leve
 	if (level > old) {
 		if (!capable(CAP_SYS_RAWIO))
 			return -EPERM;
+#ifdef CONFIG_GRKERNSEC_IO
+		if (grsec_disable_privio) {
+			gr_handle_iopl();
+			return -ENODEV;
+		}
+#endif
 	}
 	regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | (level << 12);
 	t->iopl = level << 12;
diff -ruNp linux-3.13.11/arch/x86/kernel/irq.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/irq.c
--- linux-3.13.11/arch/x86/kernel/irq.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/irq.c	2014-07-09
12:00:15.000000000 +0200
@@ -21,7 +21,7 @@
 #define CREATE_TRACE_POINTS
 #include <asm/trace/irq_vectors.h>
 
-atomic_t irq_err_count;
+atomic_unchecked_t irq_err_count;
 
 /* Function pointer for generic interrupt vector handling */
 void (*x86_platform_ipi_callback)(void) = NULL;
@@ -125,9 +125,9 @@ int arch_show_interrupts(struct seq_file
 		seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
 	seq_printf(p, "  Machine check polls\n");
 #endif
-	seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
+	seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
 #if defined(CONFIG_X86_IO_APIC)
-	seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
+	seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
 #endif
 	return 0;
 }
@@ -167,7 +167,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
 
 u64 arch_irq_stat(void)
 {
-	u64 sum = atomic_read(&irq_err_count);
+	u64 sum = atomic_read_unchecked(&irq_err_count);
 	return sum;
 }
 
diff -ruNp linux-3.13.11/arch/x86/kernel/irq_32.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/irq_32.c
--- linux-3.13.11/arch/x86/kernel/irq_32.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/irq_32.c	2014-07-09
12:00:15.000000000 +0200
@@ -39,7 +39,7 @@ static int check_stack_overflow(void)
 	__asm__ __volatile__("andl %%esp,%0" :
 			     "=r" (sp) : "0" (THREAD_SIZE - 1));
 
-	return sp < (sizeof(struct thread_info) + STACK_WARN);
+	return sp < STACK_WARN;
 }
 
 static void print_stack_overflow(void)
@@ -59,8 +59,8 @@ static inline void print_stack_overflow(
  * per-CPU IRQ handling contexts (thread information and stack)
  */
 union irq_ctx {
-	struct thread_info      tinfo;
-	u32                     stack[THREAD_SIZE/sizeof(u32)];
+	unsigned long		previous_esp;
+	u32			stack[THREAD_SIZE/sizeof(u32)];
 } __attribute__((aligned(THREAD_SIZE)));
 
 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
@@ -80,10 +80,9 @@ static void call_on_stack(void *func, vo
 static inline int
 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
 {
-	union irq_ctx *curctx, *irqctx;
+	union irq_ctx *irqctx;
 	u32 *isp, arg1, arg2;
 
-	curctx = (union irq_ctx *) current_thread_info();
 	irqctx = __this_cpu_read(hardirq_ctx);
 
 	/*
@@ -92,13 +91,16 @@ execute_on_irq_stack(int overflow, struc
 	 * handler) we can't do that and just have to keep using the
 	 * current stack (which is the irq stack already after all)
 	 */
-	if (unlikely(curctx == irqctx))
+	if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
 		return 0;
 
 	/* build the stack frame on the IRQ stack */
-	isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
-	irqctx->tinfo.task = curctx->tinfo.task;
-	irqctx->tinfo.previous_esp = current_stack_pointer;
+	isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
+	irqctx->previous_esp = current_stack_pointer;
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+	__set_fs(MAKE_MM_SEG(0));
+#endif
 
 	if (unlikely(overflow))
 		call_on_stack(print_stack_overflow, isp);
@@ -110,6 +112,11 @@ execute_on_irq_stack(int overflow, struc
 		     :  "0" (irq),   "1" (desc),  "2" (isp),
 			"D" (desc->handle_irq)
 		     : "memory", "cc", "ecx");
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+	__set_fs(current_thread_info()->addr_limit);
+#endif
+
 	return 1;
 }
 
@@ -118,48 +125,34 @@ execute_on_irq_stack(int overflow, struc
  */
 void irq_ctx_init(int cpu)
 {
-	union irq_ctx *irqctx;
-
 	if (per_cpu(hardirq_ctx, cpu))
 		return;
 
-	irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
-					       THREADINFO_GFP,
-					       THREAD_SIZE_ORDER));
-	memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
-	irqctx->tinfo.cpu		= cpu;
-	irqctx->tinfo.addr_limit	= MAKE_MM_SEG(0);
-
-	per_cpu(hardirq_ctx, cpu) = irqctx;
-
-	irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
-					       THREADINFO_GFP,
-					       THREAD_SIZE_ORDER));
-	memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
-	irqctx->tinfo.cpu		= cpu;
-	irqctx->tinfo.addr_limit	= MAKE_MM_SEG(0);
-
-	per_cpu(softirq_ctx, cpu) = irqctx;
-
-	printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
-	       cpu, per_cpu(hardirq_ctx, cpu),  per_cpu(softirq_ctx, cpu));
+	per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP,
THREAD_SIZE_ORDER));
+	per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP,
THREAD_SIZE_ORDER));
 }
 
 void do_softirq_own_stack(void)
 {
-	struct thread_info *curctx;
 	union irq_ctx *irqctx;
 	u32 *isp;
 
-	curctx = current_thread_info();
 	irqctx = __this_cpu_read(softirq_ctx);
-	irqctx->tinfo.task = curctx->task;
-	irqctx->tinfo.previous_esp = current_stack_pointer;
+	irqctx->previous_esp = current_stack_pointer;
 
 	/* build the stack frame on the softirq stack */
-	isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
+	isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+	__set_fs(MAKE_MM_SEG(0));
+#endif
 
 	call_on_stack(__do_softirq, isp);
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+	__set_fs(current_thread_info()->addr_limit);
+#endif
+
 }
 
 bool handle_irq(unsigned irq, struct pt_regs *regs)
@@ -173,7 +166,7 @@ bool handle_irq(unsigned irq, struct pt_
 	if (unlikely(!desc))
 		return false;
 
-	if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
+	if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
 		if (unlikely(overflow))
 			print_stack_overflow();
 		desc->handle_irq(irq, desc);
diff -ruNp linux-3.13.11/arch/x86/kernel/irq_64.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/irq_64.c
--- linux-3.13.11/arch/x86/kernel/irq_64.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/irq_64.c	2014-07-09
12:00:15.000000000 +0200
@@ -44,7 +44,7 @@ static inline void stack_overflow_check(
 	u64 estack_top, estack_bottom;
 	u64 curbase = (u64)task_stack_page(current);
 
-	if (user_mode_vm(regs))
+	if (user_mode(regs))
 		return;
 
 	if (regs->sp >= curbase + sizeof(struct thread_info) +
diff -ruNp linux-3.13.11/arch/x86/kernel/jump_label.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/jump_label.c
--- linux-3.13.11/arch/x86/kernel/jump_label.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/jump_label.c	2014-07-09
12:00:15.000000000 +0200
@@ -51,7 +51,7 @@ static void __jump_label_transform(struc
 			 * Jump label is enabled for the first time.
 			 * So we expect a default_nop...
 			 */
-			if (unlikely(memcmp((void *)entry->code, default_nop, 5)
+			if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5)
 				     != 0))
 				bug_at((void *)entry->code, __LINE__);
 		} else {
@@ -59,7 +59,7 @@ static void __jump_label_transform(struc
 			 * ...otherwise expect an ideal_nop. Otherwise
 			 * something went horribly wrong.
 			 */
-			if (unlikely(memcmp((void *)entry->code, ideal_nop, 5)
+			if (unlikely(memcmp((void *)ktla_ktva(entry->code), ideal_nop, 5)
 				     != 0))
 				bug_at((void *)entry->code, __LINE__);
 		}
@@ -75,13 +75,13 @@ static void __jump_label_transform(struc
 		 * are converting the default nop to the ideal nop.
 		 */
 		if (init) {
-			if (unlikely(memcmp((void *)entry->code, default_nop, 5) != 0))
+			if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5) != 0))
 				bug_at((void *)entry->code, __LINE__);
 		} else {
 			code.jump = 0xe9;
 			code.offset = entry->target -
 				(entry->code + JUMP_LABEL_NOP_SIZE);
-			if (unlikely(memcmp((void *)entry->code, &code, 5) != 0))
+			if (unlikely(memcmp((void *)ktla_ktva(entry->code), &code, 5) != 0))
 				bug_at((void *)entry->code, __LINE__);
 		}
 		memcpy(&code, ideal_nops[NOP_ATOMIC5], JUMP_LABEL_NOP_SIZE);
diff -ruNp linux-3.13.11/arch/x86/kernel/kgdb.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/kgdb.c
--- linux-3.13.11/arch/x86/kernel/kgdb.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/kgdb.c	2014-07-09
12:00:15.000000000 +0200
@@ -127,11 +127,11 @@ char *dbg_get_reg(int regno, void *mem,
 #ifdef CONFIG_X86_32
 	switch (regno) {
 	case GDB_SS:
-		if (!user_mode_vm(regs))
+		if (!user_mode(regs))
 			*(unsigned long *)mem = __KERNEL_DS;
 		break;
 	case GDB_SP:
-		if (!user_mode_vm(regs))
+		if (!user_mode(regs))
 			*(unsigned long *)mem = kernel_stack_pointer(regs);
 		break;
 	case GDB_GS:
@@ -229,7 +229,10 @@ static void kgdb_correct_hw_break(void)
 		bp->attr.bp_addr = breakinfo[breakno].addr;
 		bp->attr.bp_len = breakinfo[breakno].len;
 		bp->attr.bp_type = breakinfo[breakno].type;
-		info->address = breakinfo[breakno].addr;
+		if (breakinfo[breakno].type == X86_BREAKPOINT_EXECUTE)
+			info->address = ktla_ktva(breakinfo[breakno].addr);
+		else
+			info->address = breakinfo[breakno].addr;
 		info->len = breakinfo[breakno].len;
 		info->type = breakinfo[breakno].type;
 		val = arch_install_hw_breakpoint(bp);
@@ -476,12 +479,12 @@ int kgdb_arch_handle_exception(int e_vec
 	case 'k':
 		/* clear the trace bit */
 		linux_regs->flags &= ~X86_EFLAGS_TF;
-		atomic_set(&kgdb_cpu_doing_single_step, -1);
+		atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
 
 		/* set the trace bit if we're stepping */
 		if (remcomInBuffer[0] == 's') {
 			linux_regs->flags |= X86_EFLAGS_TF;
-			atomic_set(&kgdb_cpu_doing_single_step,
+			atomic_set_unchecked(&kgdb_cpu_doing_single_step,
 				   raw_smp_processor_id());
 		}
 
@@ -546,7 +549,7 @@ static int __kgdb_notify(struct die_args
 
 	switch (cmd) {
 	case DIE_DEBUG:
-		if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
+		if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
 			if (user_mode(regs))
 				return single_step_cont(regs, args);
 			break;
@@ -751,11 +754,11 @@ int kgdb_arch_set_breakpoint(struct kgdb
 #endif /* CONFIG_DEBUG_RODATA */
 
 	bpt->type = BP_BREAKPOINT;
-	err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
+	err = probe_kernel_read(bpt->saved_instr, ktla_ktva((char *)bpt->bpt_addr),
 				BREAK_INSTR_SIZE);
 	if (err)
 		return err;
-	err = probe_kernel_write((char *)bpt->bpt_addr,
+	err = probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
 				 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
 #ifdef CONFIG_DEBUG_RODATA
 	if (!err)
@@ -768,7 +771,7 @@ int kgdb_arch_set_breakpoint(struct kgdb
 		return -EBUSY;
 	text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
 		  BREAK_INSTR_SIZE);
-	err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
+	err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
 	if (err)
 		return err;
 	if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
@@ -793,13 +796,13 @@ int kgdb_arch_remove_breakpoint(struct k
 	if (mutex_is_locked(&text_mutex))
 		goto knl_write;
 	text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
-	err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
+	err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
 	if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
 		goto knl_write;
 	return err;
 knl_write:
 #endif /* CONFIG_DEBUG_RODATA */
-	return probe_kernel_write((char *)bpt->bpt_addr,
+	return probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
 				  (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
 }
 
diff -ruNp linux-3.13.11/arch/x86/kernel/kprobes/core.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/kprobes/core.c
--- linux-3.13.11/arch/x86/kernel/kprobes/core.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/kprobes/core.c	2014-07-09
12:00:15.000000000 +0200
@@ -119,9 +119,12 @@ static void __kprobes __synthesize_relat
 		s32 raddr;
 	} __packed *insn;
 
-	insn = (struct __arch_relative_insn *)from;
+	insn = (struct __arch_relative_insn *)ktla_ktva(from);
+
+	pax_open_kernel();
 	insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
 	insn->op = op;
+	pax_close_kernel();
 }
 
 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
@@ -164,7 +167,7 @@ int __kprobes can_boost(kprobe_opcode_t
 	kprobe_opcode_t opcode;
 	kprobe_opcode_t *orig_opcodes = opcodes;
 
-	if (search_exception_tables((unsigned long)opcodes))
+	if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
 		return 0;	/* Page fault may occur on this address. */
 
 retry:
@@ -238,9 +241,9 @@ __recover_probed_insn(kprobe_opcode_t *b
 	 *  for the first byte, we can recover the original instruction
 	 *  from it and kp->opcode.
 	 */
-	memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
+	memcpy(buf, ktla_ktva(kp->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
 	buf[0] = kp->opcode;
-	return (unsigned long)buf;
+	return ktva_ktla((unsigned long)buf);
 }
 
 /*
@@ -332,7 +335,9 @@ int __kprobes __copy_instruction(u8 *des
 	/* Another subsystem puts a breakpoint, failed to recover */
 	if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
 		return 0;
+	pax_open_kernel();
 	memcpy(dest, insn.kaddr, insn.length);
+	pax_close_kernel();
 
 #ifdef CONFIG_X86_64
 	if (insn_rip_relative(&insn)) {
@@ -359,7 +364,9 @@ int __kprobes __copy_instruction(u8 *des
 			return 0;
 		}
 		disp = (u8 *) dest + insn_offset_displacement(&insn);
+		pax_open_kernel();
 		*(s32 *) disp = (s32) newdisp;
+		pax_close_kernel();
 	}
 #endif
 	return insn.length;
@@ -498,7 +505,7 @@ setup_singlestep(struct kprobe *p, struc
 		 * nor set current_kprobe, because it doesn't use single
 		 * stepping.
 		 */
-		regs->ip = (unsigned long)p->ainsn.insn;
+		regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
 		preempt_enable_no_resched();
 		return;
 	}
@@ -515,9 +522,9 @@ setup_singlestep(struct kprobe *p, struc
 	regs->flags &= ~X86_EFLAGS_IF;
 	/* single step inline if the instruction is an int3 */
 	if (p->opcode == BREAKPOINT_INSTRUCTION)
-		regs->ip = (unsigned long)p->addr;
+		regs->ip = ktla_ktva((unsigned long)p->addr);
 	else
-		regs->ip = (unsigned long)p->ainsn.insn;
+		regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
 }
 
 /*
@@ -596,7 +603,7 @@ static int __kprobes kprobe_handler(stru
 				setup_singlestep(p, regs, kcb, 0);
 			return 1;
 		}
-	} else if (*addr != BREAKPOINT_INSTRUCTION) {
+	} else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION)
{
 		/*
 		 * The breakpoint instruction was removed right
 		 * after we hit it.  Another cpu has removed
@@ -642,6 +649,9 @@ static void __used __kprobes kretprobe_t
 			"	movq %rax, 152(%rsp)\n"
 			RESTORE_REGS_STRING
 			"	popfq\n"
+#ifdef KERNEXEC_PLUGIN
+			"	btsq $63,(%rsp)\n"
+#endif
 #else
 			"	pushf\n"
 			SAVE_REGS_STRING
@@ -779,7 +789,7 @@ static void __kprobes
 resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
 {
 	unsigned long *tos = stack_addr(regs);
-	unsigned long copy_ip = (unsigned long)p->ainsn.insn;
+	unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
 	unsigned long orig_ip = (unsigned long)p->addr;
 	kprobe_opcode_t *insn = p->ainsn.insn;
 
@@ -961,7 +971,7 @@ kprobe_exceptions_notify(struct notifier
 	struct die_args *args = data;
 	int ret = NOTIFY_DONE;
 
-	if (args->regs && user_mode_vm(args->regs))
+	if (args->regs && user_mode(args->regs))
 		return ret;
 
 	switch (val) {
diff -ruNp linux-3.13.11/arch/x86/kernel/kprobes/opt.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/kprobes/opt.c
--- linux-3.13.11/arch/x86/kernel/kprobes/opt.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/kprobes/opt.c	2014-07-09
12:00:15.000000000 +0200
@@ -79,6 +79,7 @@ found:
 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
 static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
 {
+	pax_open_kernel();
 #ifdef CONFIG_X86_64
 	*addr++ = 0x48;
 	*addr++ = 0xbf;
@@ -86,6 +87,7 @@ static void __kprobes synthesize_set_arg
 	*addr++ = 0xb8;
 #endif
 	*(unsigned long *)addr = val;
+	pax_close_kernel();
 }
 
 asm (
@@ -335,7 +337,7 @@ int __kprobes arch_prepare_optimized_kpr
 	 * Verify if the address gap is in 2GB range, because this uses
 	 * a relative jump.
 	 */
-	rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
+	rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
 	if (abs(rel) > 0x7fffffff)
 		return -ERANGE;
 
@@ -350,16 +352,18 @@ int __kprobes arch_prepare_optimized_kpr
 	op->optinsn.size = ret;
 
 	/* Copy arch-dep-instance from template */
-	memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
+	pax_open_kernel();
+	memcpy(buf, ktla_ktva(&optprobe_template_entry), TMPL_END_IDX);
+	pax_close_kernel();
 
 	/* Set probe information */
 	synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
 
 	/* Set probe function call */
-	synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
+	synthesize_relcall(ktva_ktla(buf) + TMPL_CALL_IDX, optimized_callback);
 
 	/* Set returning jmp instruction at the tail of out-of-line buffer */
-	synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
+	synthesize_reljump(ktva_ktla(buf) + TMPL_END_IDX + op->optinsn.size,
 			   (u8 *)op->kp.addr + op->optinsn.size);
 
 	flush_icache_range((unsigned long) buf,
@@ -384,7 +388,7 @@ void __kprobes arch_optimize_kprobes(str
 		WARN_ON(kprobe_disabled(&op->kp));
 
 		/* Backup instructions which will be replaced by jump address */
-		memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
+		memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
 		       RELATIVE_ADDR_SIZE);
 
 		insn_buf[0] = RELATIVEJUMP_OPCODE;
@@ -433,7 +437,7 @@ setup_detour_execution(struct kprobe *p,
 		/* This kprobe is really able to run optimized path. */
 		op = container_of(p, struct optimized_kprobe, kp);
 		/* Detour through copied instructions */
-		regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
+		regs->ip = ktva_ktla((unsigned long)op->optinsn.insn) + TMPL_END_IDX;
 		if (!reenter)
 			reset_current_kprobe();
 		preempt_enable_no_resched();
diff -ruNp linux-3.13.11/arch/x86/kernel/ldt.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/ldt.c
--- linux-3.13.11/arch/x86/kernel/ldt.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/ldt.c	2014-07-09
12:00:15.000000000 +0200
@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, i
 	if (reload) {
 #ifdef CONFIG_SMP
 		preempt_disable();
-		load_LDT(pc);
+		load_LDT_nolock(pc);
 		if (!cpumask_equal(mm_cpumask(current->mm),
 				   cpumask_of(smp_processor_id())))
 			smp_call_function(flush_ldt, current->mm, 1);
 		preempt_enable();
 #else
-		load_LDT(pc);
+		load_LDT_nolock(pc);
 #endif
 	}
 	if (oldsize) {
@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t
 		return err;
 
 	for (i = 0; i < old->size; i++)
-		write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
+		write_ldt_entry(new->ldt, i, old->ldt + i);
 	return 0;
 }
 
@@ -115,6 +115,24 @@ int init_new_context(struct task_struct
 		retval = copy_ldt(&mm->context, &old_mm->context);
 		mutex_unlock(&old_mm->context.lock);
 	}
+
+	if (tsk == current) {
+		mm->context.vdso = 0;
+
+#ifdef CONFIG_X86_32
+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
+		mm->context.user_cs_base = 0UL;
+		mm->context.user_cs_limit = ~0UL;
+
+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
+		cpus_clear(mm->context.cpu_user_cs_mask);
+#endif
+
+#endif
+#endif
+
+	}
+
 	return retval;
 }
 
@@ -229,6 +247,24 @@ static int write_ldt(void __user *ptr, u
 		}
 	}
 
+#ifdef CONFIG_PAX_SEGMEXEC
+	if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE))
{
+		error = -EINVAL;
+		goto out_unlock;
+	}
+#endif
+
+	/*
+	 * On x86-64 we do not support 16-bit segments due to
+	 * IRET leaking the high bits of the kernel stack address.
+	 */
+#ifdef CONFIG_X86_64
+	if (!ldt_info.seg_32bit) {
+		error = -EINVAL;
+		goto out_unlock;
+	}
+#endif
+
 	fill_ldt(&ldt, &ldt_info);
 	if (oldmode)
 		ldt.avl = 0;
diff -ruNp linux-3.13.11/arch/x86/kernel/machine_kexec_32.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/machine_kexec_32.c
--- linux-3.13.11/arch/x86/kernel/machine_kexec_32.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/machine_kexec_32.c	2014-07-09
12:00:15.000000000 +0200
@@ -26,7 +26,7 @@
 #include <asm/cacheflush.h>
 #include <asm/debugreg.h>
 
-static void set_idt(void *newidt, __u16 limit)
+static void set_idt(struct desc_struct *newidt, __u16 limit)
 {
 	struct desc_ptr curidt;
 
@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16
 }
 
 
-static void set_gdt(void *newgdt, __u16 limit)
+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
 {
 	struct desc_ptr curgdt;
 
@@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image)
 	}
 
 	control_page = page_address(image->control_code_page);
-	memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
+	memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
 
 	relocate_kernel_ptr = control_page;
 	page_list[PA_CONTROL_PAGE] = __pa(control_page);
diff -ruNp linux-3.13.11/arch/x86/kernel/microcode_core.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/microcode_core.c
--- linux-3.13.11/arch/x86/kernel/microcode_core.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/microcode_core.c	2014-07-09
12:00:15.000000000 +0200
@@ -513,7 +513,7 @@ mc_cpu_callback(struct notifier_block *n
 	return NOTIFY_OK;
 }
 
-static struct notifier_block __refdata mc_cpu_notifier = {
+static struct notifier_block mc_cpu_notifier = {
 	.notifier_call	= mc_cpu_callback,
 };
 
diff -ruNp linux-3.13.11/arch/x86/kernel/microcode_intel.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/microcode_intel.c
--- linux-3.13.11/arch/x86/kernel/microcode_intel.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/microcode_intel.c	2014-07-09
12:00:15.000000000 +0200
@@ -293,13 +293,13 @@ static enum ucode_state request_microcod
 
 static int get_ucode_user(void *to, const void *from, size_t n)
 {
-	return copy_from_user(to, from, n);
+	return copy_from_user(to, (const void __force_user *)from, n);
 }
 
 static enum ucode_state
 request_microcode_user(int cpu, const void __user *buf, size_t size)
 {
-	return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
+	return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
 }
 
 static void microcode_fini_cpu(int cpu)
diff -ruNp linux-3.13.11/arch/x86/kernel/module.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/module.c
--- linux-3.13.11/arch/x86/kernel/module.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/module.c	2014-07-09
12:00:15.000000000 +0200
@@ -43,15 +43,60 @@ do {							\
 } while (0)
 #endif
 
-void *module_alloc(unsigned long size)
+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
 {
-	if (PAGE_ALIGN(size) > MODULES_LEN)
+	if (!size || PAGE_ALIGN(size) > MODULES_LEN)
 		return NULL;
 	return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
-				GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
+				GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
 				NUMA_NO_NODE, __builtin_return_address(0));
 }
 
+void *module_alloc(unsigned long size)
+{
+
+#ifdef CONFIG_PAX_KERNEXEC
+	return __module_alloc(size, PAGE_KERNEL);
+#else
+	return __module_alloc(size, PAGE_KERNEL_EXEC);
+#endif
+
+}
+
+#ifdef CONFIG_PAX_KERNEXEC
+#ifdef CONFIG_X86_32
+void *module_alloc_exec(unsigned long size)
+{
+	struct vm_struct *area;
+
+	if (size == 0)
+		return NULL;
+
+	area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned
long)&MODULES_EXEC_END);
+	return area ? area->addr : NULL;
+}
+EXPORT_SYMBOL(module_alloc_exec);
+
+void module_free_exec(struct module *mod, void *module_region)
+{
+	vunmap(module_region);
+}
+EXPORT_SYMBOL(module_free_exec);
+#else
+void module_free_exec(struct module *mod, void *module_region)
+{
+	module_free(mod, module_region);
+}
+EXPORT_SYMBOL(module_free_exec);
+
+void *module_alloc_exec(unsigned long size)
+{
+	return __module_alloc(size, PAGE_KERNEL_RX);
+}
+EXPORT_SYMBOL(module_alloc_exec);
+#endif
+#endif
+
 #ifdef CONFIG_X86_32
 int apply_relocate(Elf32_Shdr *sechdrs,
 		   const char *strtab,
@@ -62,14 +107,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
 	unsigned int i;
 	Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
 	Elf32_Sym *sym;
-	uint32_t *location;
+	uint32_t *plocation, location;
 
 	DEBUGP("Applying relocate section %u to %u\n",
 	       relsec, sechdrs[relsec].sh_info);
 	for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
 		/* This is where to make the change */
-		location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
-			+ rel[i].r_offset;
+		plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
+		location = (uint32_t)plocation;
+		if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
+			plocation = ktla_ktva((void *)plocation);
 		/* This is the symbol it is referring to.  Note that all
 		   undefined symbols have been resolved.  */
 		sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
@@ -78,11 +125,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
 		switch (ELF32_R_TYPE(rel[i].r_info)) {
 		case R_386_32:
 			/* We add the value into the location given */
-			*location += sym->st_value;
+			pax_open_kernel();
+			*plocation += sym->st_value;
+			pax_close_kernel();
 			break;
 		case R_386_PC32:
 			/* Add the value, subtract its position */
-			*location += sym->st_value - (uint32_t)location;
+			pax_open_kernel();
+			*plocation += sym->st_value - location;
+			pax_close_kernel();
 			break;
 		default:
 			pr_err("%s: Unknown relocation: %u\n",
@@ -127,21 +178,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
 		case R_X86_64_NONE:
 			break;
 		case R_X86_64_64:
+			pax_open_kernel();
 			*(u64 *)loc = val;
+			pax_close_kernel();
 			break;
 		case R_X86_64_32:
+			pax_open_kernel();
 			*(u32 *)loc = val;
+			pax_close_kernel();
 			if (val != *(u32 *)loc)
 				goto overflow;
 			break;
 		case R_X86_64_32S:
+			pax_open_kernel();
 			*(s32 *)loc = val;
+			pax_close_kernel();
 			if ((s64)val != *(s32 *)loc)
 				goto overflow;
 			break;
 		case R_X86_64_PC32:
 			val -= (u64)loc;
+			pax_open_kernel();
 			*(u32 *)loc = val;
+			pax_close_kernel();
+
 #if 0
 			if ((s64)val != *(s32 *)loc)
 				goto overflow;
diff -ruNp linux-3.13.11/arch/x86/kernel/msr.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/msr.c
--- linux-3.13.11/arch/x86/kernel/msr.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/msr.c	2014-07-09
12:00:15.000000000 +0200
@@ -37,6 +37,7 @@
 #include <linux/notifier.h>
 #include <linux/uaccess.h>
 #include <linux/gfp.h>
+#include <linux/grsecurity.h>
 
 #include <asm/processor.h>
 #include <asm/msr.h>
@@ -103,6 +104,11 @@ static ssize_t msr_write(struct file *fi
 	int err = 0;
 	ssize_t bytes = 0;
 
+#ifdef CONFIG_GRKERNSEC_KMEM
+	gr_handle_msr_write();
+	return -EPERM;
+#endif
+
 	if (count % 8)
 		return -EINVAL;	/* Invalid chunk size */
 
@@ -150,6 +156,10 @@ static long msr_ioctl(struct file *file,
 			err = -EBADF;
 			break;
 		}
+#ifdef CONFIG_GRKERNSEC_KMEM
+		gr_handle_msr_write();
+		return -EPERM;
+#endif
 		if (copy_from_user(&regs, uregs, sizeof regs)) {
 			err = -EFAULT;
 			break;
@@ -233,7 +243,7 @@ static int msr_class_cpu_callback(struct
 	return notifier_from_errno(err);
 }
 
-static struct notifier_block __refdata msr_class_cpu_notifier = {
+static struct notifier_block msr_class_cpu_notifier = {
 	.notifier_call = msr_class_cpu_callback,
 };
 
diff -ruNp linux-3.13.11/arch/x86/kernel/nmi.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/nmi.c
--- linux-3.13.11/arch/x86/kernel/nmi.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/nmi.c	2014-07-09
12:00:15.000000000 +0200
@@ -138,7 +138,7 @@ static int __kprobes nmi_handle(unsigned
 	return handled;
 }
 
-int __register_nmi_handler(unsigned int type, struct nmiaction *action)
+int __register_nmi_handler(unsigned int type, const struct nmiaction *action)
 {
 	struct nmi_desc *desc = nmi_to_desc(type);
 	unsigned long flags;
@@ -162,9 +162,9 @@ int __register_nmi_handler(unsigned int
 	 * event confuses some handlers (kdump uses this flag)
 	 */
 	if (action->flags & NMI_FLAG_FIRST)
-		list_add_rcu(&action->list, &desc->head);
+		pax_list_add_rcu((struct list_head *)&action->list, &desc->head);
 	else
-		list_add_tail_rcu(&action->list, &desc->head);
+		pax_list_add_tail_rcu((struct list_head *)&action->list, &desc->head);
 	
 	spin_unlock_irqrestore(&desc->lock, flags);
 	return 0;
@@ -187,7 +187,7 @@ void unregister_nmi_handler(unsigned int
 		if (!strcmp(n->name, name)) {
 			WARN(in_nmi(),
 				"Trying to free NMI (%s) from NMI context!\n", n->name);
-			list_del_rcu(&n->list);
+			pax_list_del_rcu((struct list_head *)&n->list);
 			break;
 		}
 	}
@@ -512,6 +512,17 @@ static inline void nmi_nesting_postproce
 dotraplinkage notrace __kprobes void
 do_nmi(struct pt_regs *regs, long error_code)
 {
+
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
+	if (!user_mode(regs)) {
+		unsigned long cs = regs->cs & 0xFFFF;
+		unsigned long ip = ktva_ktla(regs->ip);
+
+		if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
+			regs->ip = ip;
+	}
+#endif
+
 	nmi_nesting_preprocess(regs);
 
 	nmi_enter();
diff -ruNp linux-3.13.11/arch/x86/kernel/nmi_selftest.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/nmi_selftest.c
--- linux-3.13.11/arch/x86/kernel/nmi_selftest.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/nmi_selftest.c	2014-07-09
12:00:15.000000000 +0200
@@ -43,7 +43,7 @@ static void __init init_nmi_testsuite(vo
 {
 	/* trap all the unknown NMIs we may generate */
 	register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk",
-			__initdata);
+			__initconst);
 }
 
 static void __init cleanup_nmi_testsuite(void)
@@ -66,7 +66,7 @@ static void __init test_nmi_ipi(struct c
 	unsigned long timeout;
 
 	if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback,
-				 NMI_FLAG_FIRST, "nmi_selftest", __initdata)) {
+				 NMI_FLAG_FIRST, "nmi_selftest", __initconst)) {
 		nmi_fail = FAILURE;
 		return;
 	}
diff -ruNp linux-3.13.11/arch/x86/kernel/paravirt-spinlocks.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/paravirt-spinlocks.c
--- linux-3.13.11/arch/x86/kernel/paravirt-spinlocks.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/paravirt-spinlocks.c	2014-07-09
12:00:15.000000000 +0200
@@ -8,7 +8,7 @@
 
 #include <asm/paravirt.h>
 
-struct pv_lock_ops pv_lock_ops = {
+struct pv_lock_ops pv_lock_ops __read_only = {
 #ifdef CONFIG_SMP
 	.lock_spinning = __PV_IS_CALLEE_SAVE(paravirt_nop),
 	.unlock_kick = paravirt_nop,
diff -ruNp linux-3.13.11/arch/x86/kernel/paravirt.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/paravirt.c
--- linux-3.13.11/arch/x86/kernel/paravirt.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/paravirt.c	2014-07-09
12:00:15.000000000 +0200
@@ -55,6 +55,9 @@ u64 _paravirt_ident_64(u64 x)
 {
 	return x;
 }
+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
+#endif
 
 void __init default_banner(void)
 {
@@ -142,15 +145,19 @@ unsigned paravirt_patch_default(u8 type,
 	if (opfunc == NULL)
 		/* If there's no function, patch it with a ud2a (BUG) */
 		ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
-	else if (opfunc == _paravirt_nop)
+	else if (opfunc == (void *)_paravirt_nop)
 		/* If the operation is a nop, then nop the callsite */
 		ret = paravirt_patch_nop();
 
 	/* identity functions just return their single argument */
-	else if (opfunc == _paravirt_ident_32)
+	else if (opfunc == (void *)_paravirt_ident_32)
 		ret = paravirt_patch_ident_32(insnbuf, len);
-	else if (opfunc == _paravirt_ident_64)
+	else if (opfunc == (void *)_paravirt_ident_64)
+		ret = paravirt_patch_ident_64(insnbuf, len);
+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
+	else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
 		ret = paravirt_patch_ident_64(insnbuf, len);
+#endif
 
 	else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
 		 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
@@ -175,7 +182,7 @@ unsigned paravirt_patch_insns(void *insn
 	if (insn_len > len || start == NULL)
 		insn_len = len;
 	else
-		memcpy(insnbuf, start, insn_len);
+		memcpy(insnbuf, ktla_ktva(start), insn_len);
 
 	return insn_len;
 }
@@ -299,7 +306,7 @@ enum paravirt_lazy_mode paravirt_get_laz
 	return this_cpu_read(paravirt_lazy_mode);
 }
 
-struct pv_info pv_info = {
+struct pv_info pv_info __read_only = {
 	.name = "bare hardware",
 	.paravirt_enabled = 0,
 	.kernel_rpl = 0,
@@ -310,16 +317,16 @@ struct pv_info pv_info = {
 #endif
 };
 
-struct pv_init_ops pv_init_ops = {
+struct pv_init_ops pv_init_ops __read_only = {
 	.patch = native_patch,
 };
 
-struct pv_time_ops pv_time_ops = {
+struct pv_time_ops pv_time_ops __read_only = {
 	.sched_clock = native_sched_clock,
 	.steal_clock = native_steal_clock,
 };
 
-__visible struct pv_irq_ops pv_irq_ops = {
+__visible struct pv_irq_ops pv_irq_ops __read_only = {
 	.save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
 	.restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
 	.irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
@@ -331,7 +338,7 @@ __visible struct pv_irq_ops pv_irq_ops =
 #endif
 };
 
-__visible struct pv_cpu_ops pv_cpu_ops = {
+__visible struct pv_cpu_ops pv_cpu_ops __read_only = {
 	.cpuid = native_cpuid,
 	.get_debugreg = native_get_debugreg,
 	.set_debugreg = native_set_debugreg,
@@ -389,21 +396,26 @@ __visible struct pv_cpu_ops pv_cpu_ops =
 	.end_context_switch = paravirt_nop,
 };
 
-struct pv_apic_ops pv_apic_ops = {
+struct pv_apic_ops pv_apic_ops __read_only= {
 #ifdef CONFIG_X86_LOCAL_APIC
 	.startup_ipi_hook = paravirt_nop,
 #endif
 };
 
-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
+#ifdef CONFIG_X86_32
+#ifdef CONFIG_X86_PAE
+/* 64-bit pagetable entries */
+#define PTE_IDENT	PV_CALLEE_SAVE(_paravirt_ident_64)
+#else
 /* 32-bit pagetable entries */
 #define PTE_IDENT	__PV_IS_CALLEE_SAVE(_paravirt_ident_32)
+#endif
 #else
 /* 64-bit pagetable entries */
 #define PTE_IDENT	__PV_IS_CALLEE_SAVE(_paravirt_ident_64)
 #endif
 
-struct pv_mmu_ops pv_mmu_ops = {
+struct pv_mmu_ops pv_mmu_ops __read_only = {
 
 	.read_cr2 = native_read_cr2,
 	.write_cr2 = native_write_cr2,
@@ -453,6 +465,7 @@ struct pv_mmu_ops pv_mmu_ops = {
 	.make_pud = PTE_IDENT,
 
 	.set_pgd = native_set_pgd,
+	.set_pgd_batched = native_set_pgd_batched,
 #endif
 #endif /* PAGETABLE_LEVELS >= 3 */
 
@@ -473,6 +486,12 @@ struct pv_mmu_ops pv_mmu_ops = {
 	},
 
 	.set_fixmap = native_set_fixmap,
+
+#ifdef CONFIG_PAX_KERNEXEC
+	.pax_open_kernel = native_pax_open_kernel,
+	.pax_close_kernel = native_pax_close_kernel,
+#endif
+
 };
 
 EXPORT_SYMBOL_GPL(pv_time_ops);
diff -ruNp linux-3.13.11/arch/x86/kernel/pci-calgary_64.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/pci-calgary_64.c
--- linux-3.13.11/arch/x86/kernel/pci-calgary_64.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/pci-calgary_64.c	2014-07-09
12:00:15.000000000 +0200
@@ -1339,7 +1339,7 @@ static void __init get_tce_space_from_ta
 			tce_space = be64_to_cpu(readq(target));
 			tce_space = tce_space & TAR_SW_BITS;
 
-			tce_space = tce_space & (~specified_table_size);
+			tce_space = tce_space & (~(unsigned long)specified_table_size);
 			info->tce_space = (u64 *)__va(tce_space);
 		}
 	}
diff -ruNp linux-3.13.11/arch/x86/kernel/pci-iommu_table.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/pci-iommu_table.c
--- linux-3.13.11/arch/x86/kernel/pci-iommu_table.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/pci-iommu_table.c	2014-07-09
12:00:15.000000000 +0200
@@ -2,7 +2,7 @@
 #include <asm/iommu_table.h>
 #include <linux/string.h>
 #include <linux/kallsyms.h>
-
+#include <linux/sched.h>
 
 #define DEBUG 1
 
diff -ruNp linux-3.13.11/arch/x86/kernel/pci-swiotlb.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/pci-swiotlb.c
--- linux-3.13.11/arch/x86/kernel/pci-swiotlb.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/pci-swiotlb.c	2014-07-09
12:00:15.000000000 +0200
@@ -32,7 +32,7 @@ static void x86_swiotlb_free_coherent(st
 				      void *vaddr, dma_addr_t dma_addr,
 				      struct dma_attrs *attrs)
 {
-	swiotlb_free_coherent(dev, size, vaddr, dma_addr);
+	swiotlb_free_coherent(dev, size, vaddr, dma_addr, attrs);
 }
 
 static struct dma_map_ops swiotlb_dma_ops = {
diff -ruNp linux-3.13.11/arch/x86/kernel/preempt.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/preempt.S
--- linux-3.13.11/arch/x86/kernel/preempt.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/preempt.S	2014-07-09
12:00:15.000000000 +0200
@@ -3,12 +3,14 @@
 #include <asm/dwarf2.h>
 #include <asm/asm.h>
 #include <asm/calling.h>
+#include <asm/alternative-asm.h>
 
 ENTRY(___preempt_schedule)
 	CFI_STARTPROC
 	SAVE_ALL
 	call preempt_schedule
 	RESTORE_ALL
+	pax_force_retaddr
 	ret
 	CFI_ENDPROC
 
@@ -19,6 +21,7 @@ ENTRY(___preempt_schedule_context)
 	SAVE_ALL
 	call preempt_schedule_context
 	RESTORE_ALL
+	pax_force_retaddr
 	ret
 	CFI_ENDPROC
 
diff -ruNp linux-3.13.11/arch/x86/kernel/process.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/process.c
--- linux-3.13.11/arch/x86/kernel/process.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/process.c	2014-07-09
12:00:15.000000000 +0200
@@ -36,7 +36,8 @@
  * section. Since TSS's are completely CPU-local, we want them
  * on exact cacheline boundaries, to eliminate cacheline ping-pong.
  */
-__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
+struct tss_struct init_tss[NR_CPUS] __visible ____cacheline_internodealigned_in_smp
= { [0 ... NR_CPUS-1] = INIT_TSS };
+EXPORT_SYMBOL(init_tss);
 
 #ifdef CONFIG_X86_64
 static DEFINE_PER_CPU(unsigned char, is_idle);
@@ -92,7 +93,7 @@ void arch_task_cache_init(void)
         task_xstate_cachep =
         	kmem_cache_create("task_xstate", xstate_size,
 				  __alignof__(union thread_xstate),
-				  SLAB_PANIC | SLAB_NOTRACK, NULL);
+				  SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
 }
 
 /*
@@ -105,7 +106,7 @@ void exit_thread(void)
 	unsigned long *bp = t->io_bitmap_ptr;
 
 	if (bp) {
-		struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
+		struct tss_struct *tss = init_tss + get_cpu();
 
 		t->io_bitmap_ptr = NULL;
 		clear_thread_flag(TIF_IO_BITMAP);
@@ -125,6 +126,9 @@ void flush_thread(void)
 {
 	struct task_struct *tsk = current;
 
+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
+	loadsegment(gs, 0);
+#endif
 	flush_ptrace_hw_breakpoint(tsk);
 	memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
 	drop_init_fpu(tsk);
@@ -271,7 +275,7 @@ static void __exit_idle(void)
 void exit_idle(void)
 {
 	/* idle loop has pid 0 */
-	if (current->pid)
+	if (task_pid_nr(current))
 		return;
 	__exit_idle();
 }
@@ -327,7 +331,7 @@ bool xen_set_default_idle(void)
 	return ret;
 }
 #endif
-void stop_this_cpu(void *dummy)
+__noreturn void stop_this_cpu(void *dummy)
 {
 	local_irq_disable();
 	/*
@@ -456,16 +460,37 @@ static int __init idle_setup(char *str)
 }
 early_param("idle", idle_setup);
 
-unsigned long arch_align_stack(unsigned long sp)
+#ifdef CONFIG_PAX_RANDKSTACK
+void pax_randomize_kstack(struct pt_regs *regs)
 {
-	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
-		sp -= get_random_int() % 8192;
-	return sp & ~0xf;
-}
+	struct thread_struct *thread = &current->thread;
+	unsigned long time;
 
-unsigned long arch_randomize_brk(struct mm_struct *mm)
-{
-	unsigned long range_end = mm->brk + 0x02000000;
-	return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
-}
+	if (!randomize_va_space)
+		return;
+
+	if (v8086_mode(regs))
+		return;
+
+	rdtscl(time);
 
+	/* P4 seems to return a 0 LSB, ignore it */
+#ifdef CONFIG_MPENTIUM4
+	time &= 0x3EUL;
+	time <<= 2;
+#elif defined(CONFIG_X86_64)
+	time &= 0xFUL;
+	time <<= 4;
+#else
+	time &= 0x1FUL;
+	time <<= 3;
+#endif
+
+	thread->sp0 ^= time;
+	load_sp0(init_tss + smp_processor_id(), thread);
+
+#ifdef CONFIG_X86_64
+	this_cpu_write(kernel_stack, thread->sp0);
+#endif
+}
+#endif
diff -ruNp linux-3.13.11/arch/x86/kernel/process_32.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/process_32.c
--- linux-3.13.11/arch/x86/kernel/process_32.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/process_32.c	2014-07-09
12:00:15.000000000 +0200
@@ -65,6 +65,7 @@ asmlinkage void ret_from_kernel_thread(v
 unsigned long thread_saved_pc(struct task_struct *tsk)
 {
 	return ((unsigned long *)tsk->thread.sp)[3];
+//XXX	return tsk->thread.eip;
 }
 
 void __show_regs(struct pt_regs *regs, int all)
@@ -74,19 +75,18 @@ void __show_regs(struct pt_regs *regs, i
 	unsigned long sp;
 	unsigned short ss, gs;
 
-	if (user_mode_vm(regs)) {
+	if (user_mode(regs)) {
 		sp = regs->sp;
 		ss = regs->ss & 0xffff;
-		gs = get_user_gs(regs);
 	} else {
 		sp = kernel_stack_pointer(regs);
 		savesegment(ss, ss);
-		savesegment(gs, gs);
 	}
+	gs = get_user_gs(regs);
 
 	printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
 			(u16)regs->cs, regs->ip, regs->flags,
-			smp_processor_id());
+			raw_smp_processor_id());
 	print_symbol("EIP is at %s\n", regs->ip);
 
 	printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
@@ -133,20 +133,21 @@ void release_thread(struct task_struct *
 int copy_thread(unsigned long clone_flags, unsigned long sp,
 	unsigned long arg, struct task_struct *p)
 {
-	struct pt_regs *childregs = task_pt_regs(p);
+	struct pt_regs *childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs)
- 8;
 	struct task_struct *tsk;
 	int err;
 
 	p->thread.sp = (unsigned long) childregs;
 	p->thread.sp0 = (unsigned long) (childregs+1);
+	p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
 
 	if (unlikely(p->flags & PF_KTHREAD)) {
 		/* kernel thread */
 		memset(childregs, 0, sizeof(struct pt_regs));
 		p->thread.ip = (unsigned long) ret_from_kernel_thread;
-		task_user_gs(p) = __KERNEL_STACK_CANARY;
-		childregs->ds = __USER_DS;
-		childregs->es = __USER_DS;
+		savesegment(gs, childregs->gs);
+		childregs->ds = __KERNEL_DS;
+		childregs->es = __KERNEL_DS;
 		childregs->fs = __KERNEL_PERCPU;
 		childregs->bx = sp;	/* function */
 		childregs->bp = arg;
@@ -253,7 +254,7 @@ __switch_to(struct task_struct *prev_p,
 	struct thread_struct *prev = &prev_p->thread,
 				 *next = &next_p->thread;
 	int cpu = smp_processor_id();
-	struct tss_struct *tss = &per_cpu(init_tss, cpu);
+	struct tss_struct *tss = init_tss + cpu;
 	fpu_switch_t fpu;
 
 	/* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
@@ -277,6 +278,10 @@ __switch_to(struct task_struct *prev_p,
 	 */
 	lazy_save_gs(prev->gs);
 
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+	__set_fs(task_thread_info(next_p)->addr_limit);
+#endif
+
 	/*
 	 * Load the per-thread Thread-Local Storage descriptor.
 	 */
@@ -315,6 +320,9 @@ __switch_to(struct task_struct *prev_p,
 	 */
 	arch_end_context_switch(next_p);
 
+	this_cpu_write(current_task, next_p);
+	this_cpu_write(current_tinfo, &next_p->tinfo);
+
 	/*
 	 * Restore %gs if needed (which is common)
 	 */
@@ -323,8 +331,6 @@ __switch_to(struct task_struct *prev_p,
 
 	switch_fpu_finish(next_p, fpu);
 
-	this_cpu_write(current_task, next_p);
-
 	return prev_p;
 }
 
@@ -354,4 +360,3 @@ unsigned long get_wchan(struct task_stru
 	} while (count++ < 16);
 	return 0;
 }
-
diff -ruNp linux-3.13.11/arch/x86/kernel/process_64.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/process_64.c
--- linux-3.13.11/arch/x86/kernel/process_64.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/process_64.c	2014-07-09
12:00:15.000000000 +0200
@@ -158,10 +158,11 @@ int copy_thread(unsigned long clone_flag
 	struct pt_regs *childregs;
 	struct task_struct *me = current;
 
-	p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
+	p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE - 16;
 	childregs = task_pt_regs(p);
 	p->thread.sp = (unsigned long) childregs;
 	p->thread.usersp = me->thread.usersp;
+	p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
 	set_tsk_thread_flag(p, TIF_FORK);
 	p->thread.fpu_counter = 0;
 	p->thread.io_bitmap_ptr = NULL;
@@ -172,6 +173,8 @@ int copy_thread(unsigned long clone_flag
 	p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
 	savesegment(es, p->thread.es);
 	savesegment(ds, p->thread.ds);
+	savesegment(ss, p->thread.ss);
+	BUG_ON(p->thread.ss == __UDEREF_KERNEL_DS);
 	memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
 
 	if (unlikely(p->flags & PF_KTHREAD)) {
@@ -280,7 +283,7 @@ __switch_to(struct task_struct *prev_p,
 	struct thread_struct *prev = &prev_p->thread;
 	struct thread_struct *next = &next_p->thread;
 	int cpu = smp_processor_id();
-	struct tss_struct *tss = &per_cpu(init_tss, cpu);
+	struct tss_struct *tss = init_tss + cpu;
 	unsigned fsindex, gsindex;
 	fpu_switch_t fpu;
 
@@ -303,6 +306,9 @@ __switch_to(struct task_struct *prev_p,
 	if (unlikely(next->ds | prev->ds))
 		loadsegment(ds, next->ds);
 
+	savesegment(ss, prev->ss);
+	if (unlikely(next->ss != prev->ss))
+		loadsegment(ss, next->ss);
 
 	/* We must save %fs and %gs before load_TLS() because
 	 * %fs and %gs may be cleared by load_TLS().
@@ -362,6 +368,7 @@ __switch_to(struct task_struct *prev_p,
 	prev->usersp = this_cpu_read(old_rsp);
 	this_cpu_write(old_rsp, next->usersp);
 	this_cpu_write(current_task, next_p);
+	this_cpu_write(current_tinfo, &next_p->tinfo);
 
 	/*
 	 * If it were not for PREEMPT_ACTIVE we could guarantee that the
@@ -371,9 +378,7 @@ __switch_to(struct task_struct *prev_p,
 	task_thread_info(prev_p)->saved_preempt_count = this_cpu_read(__preempt_count);
 	this_cpu_write(__preempt_count, task_thread_info(next_p)->saved_preempt_count);
 
-	this_cpu_write(kernel_stack,
-		  (unsigned long)task_stack_page(next_p) +
-		  THREAD_SIZE - KERNEL_STACK_OFFSET);
+	this_cpu_write(kernel_stack, next->sp0);
 
 	/*
 	 * Now maybe reload the debug registers and handle I/O bitmaps
@@ -442,12 +447,11 @@ unsigned long get_wchan(struct task_stru
 	if (!p || p == current || p->state == TASK_RUNNING)
 		return 0;
 	stack = (unsigned long)task_stack_page(p);
-	if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
+	if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
 		return 0;
 	fp = *(u64 *)(p->thread.sp);
 	do {
-		if (fp < (unsigned long)stack ||
-		    fp >= (unsigned long)stack+THREAD_SIZE)
+		if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
 			return 0;
 		ip = *(u64 *)(fp+8);
 		if (!in_sched_functions(ip))
diff -ruNp linux-3.13.11/arch/x86/kernel/ptrace.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/ptrace.c
--- linux-3.13.11/arch/x86/kernel/ptrace.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/ptrace.c	2014-07-09
12:00:15.000000000 +0200
@@ -184,14 +184,13 @@ unsigned long kernel_stack_pointer(struc
 {
 	unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1);
 	unsigned long sp = (unsigned long)&regs->sp;
-	struct thread_info *tinfo;
 
-	if (context == (sp & ~(THREAD_SIZE - 1)))
+	if (context == ((sp + 8) & ~(THREAD_SIZE - 1)))
 		return sp;
 
-	tinfo = (struct thread_info *)context;
-	if (tinfo->previous_esp)
-		return tinfo->previous_esp;
+	sp = *(unsigned long *)context;
+	if (sp)
+		return sp;
 
 	return (unsigned long)regs;
 }
@@ -588,7 +587,7 @@ static void ptrace_triggered(struct perf
 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
 {
 	int i;
-	int dr7 = 0;
+	unsigned long dr7 = 0;
 	struct arch_hw_breakpoint *info;
 
 	for (i = 0; i < HBP_NUM; i++) {
@@ -822,7 +821,7 @@ long arch_ptrace(struct task_struct *chi
 		 unsigned long addr, unsigned long data)
 {
 	int ret;
-	unsigned long __user *datap = (unsigned long __user *)data;
+	unsigned long __user *datap = (__force unsigned long __user *)data;
 
 	switch (request) {
 	/* read the word at location addr in the USER area. */
@@ -907,14 +906,14 @@ long arch_ptrace(struct task_struct *chi
 		if ((int) addr < 0)
 			return -EIO;
 		ret = do_get_thread_area(child, addr,
-					(struct user_desc __user *)data);
+					(__force struct user_desc __user *) data);
 		break;
 
 	case PTRACE_SET_THREAD_AREA:
 		if ((int) addr < 0)
 			return -EIO;
 		ret = do_set_thread_area(child, addr,
-					(struct user_desc __user *)data, 0);
+					(__force struct user_desc __user *) data, 0);
 		break;
 #endif
 
@@ -1292,7 +1291,7 @@ long compat_arch_ptrace(struct task_stru
 
 #ifdef CONFIG_X86_64
 
-static struct user_regset x86_64_regsets[] __read_mostly = {
+static user_regset_no_const x86_64_regsets[] __read_only = {
 	[REGSET_GENERAL] = {
 		.core_note_type = NT_PRSTATUS,
 		.n = sizeof(struct user_regs_struct) / sizeof(long),
@@ -1333,7 +1332,7 @@ static const struct user_regset_view use
 #endif	/* CONFIG_X86_64 */
 
 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
-static struct user_regset x86_32_regsets[] __read_mostly = {
+static user_regset_no_const x86_32_regsets[] __read_only = {
 	[REGSET_GENERAL] = {
 		.core_note_type = NT_PRSTATUS,
 		.n = sizeof(struct user_regs_struct32) / sizeof(u32),
@@ -1386,7 +1385,7 @@ static const struct user_regset_view use
  */
 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
 
-void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
+void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
 {
 #ifdef CONFIG_X86_64
 	x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
@@ -1421,7 +1420,7 @@ static void fill_sigtrap_info(struct tas
 	memset(info, 0, sizeof(*info));
 	info->si_signo = SIGTRAP;
 	info->si_code = si_code;
-	info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
+	info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
 }
 
 void user_single_step_siginfo(struct task_struct *tsk,
@@ -1450,6 +1449,10 @@ void send_sigtrap(struct task_struct *ts
 # define IS_IA32	0
 #endif
 
+#ifdef CONFIG_GRKERNSEC_SETXID
+extern void gr_delayed_cred_worker(void);
+#endif
+
 /*
  * We must return the syscall number to actually look up in the table.
  * This can be -1L to skip running any syscall at all.
@@ -1460,6 +1463,11 @@ long syscall_trace_enter(struct pt_regs
 
 	user_exit();
 
+#ifdef CONFIG_GRKERNSEC_SETXID
+	if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
+		gr_delayed_cred_worker();
+#endif
+
 	/*
 	 * If we stepped into a sysenter/syscall insn, it trapped in
 	 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
@@ -1515,6 +1523,11 @@ void syscall_trace_leave(struct pt_regs
 	 */
 	user_exit();
 
+#ifdef CONFIG_GRKERNSEC_SETXID
+	if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
+		gr_delayed_cred_worker();
+#endif
+
 	audit_syscall_exit(regs);
 
 	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
diff -ruNp linux-3.13.11/arch/x86/kernel/pvclock.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/pvclock.c
--- linux-3.13.11/arch/x86/kernel/pvclock.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/pvclock.c	2014-07-09
12:00:15.000000000 +0200
@@ -51,11 +51,11 @@ void pvclock_touch_watchdogs(void)
 	reset_hung_task_detector();
 }
 
-static atomic64_t last_value = ATOMIC64_INIT(0);
+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
 
 void pvclock_resume(void)
 {
-	atomic64_set(&last_value, 0);
+	atomic64_set_unchecked(&last_value, 0);
 }
 
 u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
@@ -105,11 +105,11 @@ cycle_t pvclock_clocksource_read(struct
 	 * updating at the same time, and one of them could be slightly behind,
 	 * making the assumption that last_value always go forward fail to hold.
 	 */
-	last = atomic64_read(&last_value);
+	last = atomic64_read_unchecked(&last_value);
 	do {
 		if (ret < last)
 			return last;
-		last = atomic64_cmpxchg(&last_value, last, ret);
+		last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
 	} while (unlikely(last != ret));
 
 	return ret;
diff -ruNp linux-3.13.11/arch/x86/kernel/reboot.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/reboot.c
--- linux-3.13.11/arch/x86/kernel/reboot.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/reboot.c	2014-07-09
12:00:15.000000000 +0200
@@ -68,6 +68,11 @@ static int __init set_bios_reboot(const
 
 void __noreturn machine_real_restart(unsigned int type)
 {
+
+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
+	struct desc_struct *gdt;
+#endif
+
 	local_irq_disable();
 
 	/*
@@ -95,7 +100,29 @@ void __noreturn machine_real_restart(uns
 
 	/* Jump to the identity-mapped low memory code */
 #ifdef CONFIG_X86_32
-	asm volatile("jmpl *%0" : :
+
+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
+	gdt = get_cpu_gdt_table(smp_processor_id());
+	pax_open_kernel();
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+	gdt[GDT_ENTRY_KERNEL_DS].type = 3;
+	gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
+	loadsegment(ds, __KERNEL_DS);
+	loadsegment(es, __KERNEL_DS);
+	loadsegment(ss, __KERNEL_DS);
+#endif
+#ifdef CONFIG_PAX_KERNEXEC
+	gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
+	gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
+	gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
+	gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
+	gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
+	gdt[GDT_ENTRY_KERNEL_CS].g = 1;
+#endif
+	pax_close_kernel();
+#endif
+
+	asm volatile("ljmpl *%0" : :
 		     "rm" (real_mode_header->machine_real_restart_asm),
 		     "a" (type));
 #else
@@ -470,7 +497,7 @@ void __attribute__((weak)) mach_reboot_f
  * try to force a triple fault and then cycle between hitting the keyboard
  * controller and doing that
  */
-static void native_machine_emergency_restart(void)
+static void __noreturn native_machine_emergency_restart(void)
 {
 	int i;
 	int attempt = 0;
@@ -593,13 +620,13 @@ void native_machine_shutdown(void)
 #endif
 }
 
-static void __machine_emergency_restart(int emergency)
+static void __noreturn __machine_emergency_restart(int emergency)
 {
 	reboot_emergency = emergency;
 	machine_ops.emergency_restart();
 }
 
-static void native_machine_restart(char *__unused)
+static void __noreturn native_machine_restart(char *__unused)
 {
 	pr_notice("machine restart\n");
 
@@ -608,7 +635,7 @@ static void native_machine_restart(char
 	__machine_emergency_restart(0);
 }
 
-static void native_machine_halt(void)
+static void __noreturn native_machine_halt(void)
 {
 	/* Stop other cpus and apics */
 	machine_shutdown();
@@ -618,7 +645,7 @@ static void native_machine_halt(void)
 	stop_this_cpu(NULL);
 }
 
-static void native_machine_power_off(void)
+static void __noreturn native_machine_power_off(void)
 {
 	if (pm_power_off) {
 		if (!reboot_force)
@@ -627,9 +654,10 @@ static void native_machine_power_off(voi
 	}
 	/* A fallback in case there is no PM info available */
 	tboot_shutdown(TB_SHUTDOWN_HALT);
+	unreachable();
 }
 
-struct machine_ops machine_ops = {
+struct machine_ops machine_ops __read_only = {
 	.power_off = native_machine_power_off,
 	.shutdown = native_machine_shutdown,
 	.emergency_restart = native_machine_emergency_restart,
diff -ruNp linux-3.13.11/arch/x86/kernel/reboot_fixups_32.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/reboot_fixups_32.c
--- linux-3.13.11/arch/x86/kernel/reboot_fixups_32.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/reboot_fixups_32.c	2014-07-09
12:00:15.000000000 +0200
@@ -57,7 +57,7 @@ struct device_fixup {
 	unsigned int vendor;
 	unsigned int device;
 	void (*reboot_fixup)(struct pci_dev *);
-};
+} __do_const;
 
 /*
  * PCI ids solely used for fixups_table go here
diff -ruNp linux-3.13.11/arch/x86/kernel/relocate_kernel_64.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/relocate_kernel_64.S
--- linux-3.13.11/arch/x86/kernel/relocate_kernel_64.S	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/relocate_kernel_64.S	2014-07-09
12:00:15.000000000 +0200
@@ -96,8 +96,7 @@ relocate_kernel:
 
 	/* jump to identity mapped page */
 	addq	$(identity_mapped - relocate_kernel), %r8
-	pushq	%r8
-	ret
+	jmp	*%r8
 
 identity_mapped:
 	/* set return address to 0 if not preserving context */
diff -ruNp linux-3.13.11/arch/x86/kernel/setup.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/setup.c
--- linux-3.13.11/arch/x86/kernel/setup.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/setup.c	2014-07-09
12:00:15.000000000 +0200
@@ -110,6 +110,7 @@
 #include <asm/mce.h>
 #include <asm/alternative.h>
 #include <asm/prom.h>
+#include <asm/boot.h>
 
 /*
  * max_low_pfn_mapped: highest direct mapped pfn under 4GB
@@ -205,12 +206,50 @@ EXPORT_SYMBOL(boot_cpu_data);
 #endif
 
 
-#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
-__visible unsigned long mmu_cr4_features;
+#ifdef CONFIG_X86_64
+__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PSE | X86_CR4_PAE |
X86_CR4_PGE;
+#elif defined(CONFIG_X86_PAE)
+__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PAE;
 #else
-__visible unsigned long mmu_cr4_features = X86_CR4_PAE;
+__visible unsigned long mmu_cr4_features __read_only;
 #endif
 
+void set_in_cr4(unsigned long mask)
+{
+	unsigned long cr4 = read_cr4();
+
+	if ((cr4 & mask) == mask && cr4 == mmu_cr4_features)
+		return;
+
+	pax_open_kernel();
+	mmu_cr4_features |= mask;
+	pax_close_kernel();
+
+	if (trampoline_cr4_features)
+		*trampoline_cr4_features = mmu_cr4_features;
+	cr4 |= mask;
+	write_cr4(cr4);
+}
+EXPORT_SYMBOL(set_in_cr4);
+
+void clear_in_cr4(unsigned long mask)
+{
+	unsigned long cr4 = read_cr4();
+
+	if (!(cr4 & mask) && cr4 == mmu_cr4_features)
+		return;
+
+	pax_open_kernel();
+	mmu_cr4_features &= ~mask;
+	pax_close_kernel();
+
+	if (trampoline_cr4_features)
+		*trampoline_cr4_features = mmu_cr4_features;
+	cr4 &= ~mask;
+	write_cr4(cr4);
+}
+EXPORT_SYMBOL(clear_in_cr4);
+
 /* Boot loader ID and version as integers, for the benefit of proc_dointvec */
 int bootloader_type, bootloader_version;
 
@@ -768,7 +807,7 @@ static void __init trim_bios_range(void)
 	 * area (640->1Mb) as ram even though it is not.
 	 * take them out.
 	 */
-	e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
+	e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM,
1);
 
 	sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
 }
@@ -776,7 +815,7 @@ static void __init trim_bios_range(void)
 /* called before trim_bios_range() to spare extra sanitize */
 static void __init e820_add_kernel_range(void)
 {
-	u64 start = __pa_symbol(_text);
+	u64 start = __pa_symbol(ktla_ktva(_text));
 	u64 size = __pa_symbol(_end) - start;
 
 	/*
@@ -838,8 +877,12 @@ static void __init trim_low_memory_range
 
 void __init setup_arch(char **cmdline_p)
 {
+#ifdef CONFIG_X86_32
+	memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(__bss_stop) - LOAD_PHYSICAL_ADDR);
+#else
 	memblock_reserve(__pa_symbol(_text),
 			 (unsigned long)__bss_stop - (unsigned long)_text);
+#endif
 
 	early_reserve_initrd();
 
@@ -931,14 +974,14 @@ void __init setup_arch(char **cmdline_p)
 
 	if (!boot_params.hdr.root_flags)
 		root_mountflags &= ~MS_RDONLY;
-	init_mm.start_code = (unsigned long) _text;
-	init_mm.end_code = (unsigned long) _etext;
+	init_mm.start_code = ktla_ktva((unsigned long) _text);
+	init_mm.end_code = ktla_ktva((unsigned long) _etext);
 	init_mm.end_data = (unsigned long) _edata;
 	init_mm.brk = _brk_end;
 
-	code_resource.start = __pa_symbol(_text);
-	code_resource.end = __pa_symbol(_etext)-1;
-	data_resource.start = __pa_symbol(_etext);
+	code_resource.start = __pa_symbol(ktla_ktva(_text));
+	code_resource.end = __pa_symbol(ktla_ktva(_etext))-1;
+	data_resource.start = __pa_symbol(_sdata);
 	data_resource.end = __pa_symbol(_edata)-1;
 	bss_resource.start = __pa_symbol(__bss_start);
 	bss_resource.end = __pa_symbol(__bss_stop)-1;
diff -ruNp linux-3.13.11/arch/x86/kernel/setup_percpu.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/setup_percpu.c
--- linux-3.13.11/arch/x86/kernel/setup_percpu.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/setup_percpu.c	2014-07-09
12:00:15.000000000 +0200
@@ -21,19 +21,17 @@
 #include <asm/cpu.h>
 #include <asm/stackprotector.h>
 
-DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
+#ifdef CONFIG_SMP
+DEFINE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
 EXPORT_PER_CPU_SYMBOL(cpu_number);
+#endif
 
-#ifdef CONFIG_X86_64
 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
-#else
-#define BOOT_PERCPU_OFFSET 0
-#endif
 
 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
 
-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
 	[0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
 };
 EXPORT_SYMBOL(__per_cpu_offset);
@@ -66,7 +64,7 @@ static bool __init pcpu_need_numa(void)
 {
 #ifdef CONFIG_NEED_MULTIPLE_NODES
 	pg_data_t *last = NULL;
-	unsigned int cpu;
+	int cpu;
 
 	for_each_possible_cpu(cpu) {
 		int node = early_cpu_to_node(cpu);
@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(
 {
 #ifdef CONFIG_X86_32
 	struct desc_struct gdt;
+	unsigned long base = per_cpu_offset(cpu);
 
-	pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
-			0x2 | DESCTYPE_S, 0x8);
-	gdt.s = 1;
+	pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
+			0x83 | DESCTYPE_S, 0xC);
 	write_gdt_entry(get_cpu_gdt_table(cpu),
 			GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
 #endif
@@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
 	/* alrighty, percpu areas up and running */
 	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
 	for_each_possible_cpu(cpu) {
+#ifdef CONFIG_CC_STACKPROTECTOR
+#ifdef CONFIG_X86_32
+		unsigned long canary = per_cpu(stack_canary.canary, cpu);
+#endif
+#endif
 		per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
 		per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
 		per_cpu(cpu_number, cpu) = cpu;
@@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
 		 */
 		set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
 #endif
+#ifdef CONFIG_CC_STACKPROTECTOR
+#ifdef CONFIG_X86_32
+		if (!cpu)
+			per_cpu(stack_canary.canary, cpu) = canary;
+#endif
+#endif
 		/*
 		 * Up to this point, the boot CPU has been using .init.data
 		 * area.  Reload any changed state for the boot CPU.
diff -ruNp linux-3.13.11/arch/x86/kernel/signal.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/signal.c
--- linux-3.13.11/arch/x86/kernel/signal.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/signal.c	2014-07-09
12:00:15.000000000 +0200
@@ -190,7 +190,7 @@ static unsigned long align_sigframe(unsi
 	 * Align the stack pointer according to the i386 ABI,
 	 * i.e. so that on function entry ((sp + 4) & 15) == 0.
 	 */
-	sp = ((sp + 4) & -16ul) - 4;
+	sp = ((sp - 12) & -16ul) - 4;
 #else /* !CONFIG_X86_32 */
 	sp = round_down(sp, 16) - 8;
 #endif
@@ -298,9 +298,9 @@ __setup_frame(int sig, struct ksignal *k
 	}
 
 	if (current->mm->context.vdso)
-		restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
+		restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
 	else
-		restorer = &frame->retcode;
+		restorer = (void __user *)&frame->retcode;
 	if (ksig->ka.sa.sa_flags & SA_RESTORER)
 		restorer = ksig->ka.sa.sa_restorer;
 
@@ -314,7 +314,7 @@ __setup_frame(int sig, struct ksignal *k
 	 * reasons and because gdb uses it as a signature to notice
 	 * signal handler stack frames.
 	 */
-	err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
+	err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
 
 	if (err)
 		return -EFAULT;
@@ -361,7 +361,10 @@ static int __setup_rt_frame(int sig, str
 		save_altstack_ex(&frame->uc.uc_stack, regs->sp);
 
 		/* Set up to return from userspace.  */
-		restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
+		if (current->mm->context.vdso)
+			restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
+		else
+		restorer = (void __user *)&frame->retcode;
 		if (ksig->ka.sa.sa_flags & SA_RESTORER)
 			restorer = ksig->ka.sa.sa_restorer;
 		put_user_ex(restorer, &frame->pretcode);
@@ -373,7 +376,7 @@ static int __setup_rt_frame(int sig, str
 		 * reasons and because gdb uses it as a signature to notice
 		 * signal handler stack frames.
 		 */
-		put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
+		put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
 	} put_user_catch(err);
 	
 	err |= copy_siginfo_to_user(&frame->info, &ksig->info);
@@ -609,7 +612,12 @@ setup_rt_frame(struct ksignal *ksig, str
 {
 	int usig = signr_convert(ksig->sig);
 	sigset_t *set = sigmask_to_save();
-	compat_sigset_t *cset = (compat_sigset_t *) set;
+	sigset_t sigcopy;
+	compat_sigset_t *cset;
+
+	sigcopy = *set;
+
+	cset = (compat_sigset_t *) &sigcopy;
 
 	/* Set up the stack frame */
 	if (is_ia32_frame()) {
@@ -620,7 +628,7 @@ setup_rt_frame(struct ksignal *ksig, str
 	} else if (is_x32_frame()) {
 		return x32_setup_rt_frame(ksig, cset, regs);
 	} else {
-		return __setup_rt_frame(ksig->sig, ksig, set, regs);
+		return __setup_rt_frame(ksig->sig, ksig, &sigcopy, regs);
 	}
 }
 
diff -ruNp linux-3.13.11/arch/x86/kernel/smp.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/smp.c
--- linux-3.13.11/arch/x86/kernel/smp.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/smp.c	2014-07-09
12:00:15.000000000 +0200
@@ -341,7 +341,7 @@ static int __init nonmi_ipi_setup(char *
 
 __setup("nonmi_ipi", nonmi_ipi_setup);
 
-struct smp_ops smp_ops = {
+struct smp_ops smp_ops __read_only = {
 	.smp_prepare_boot_cpu	= native_smp_prepare_boot_cpu,
 	.smp_prepare_cpus	= native_smp_prepare_cpus,
 	.smp_cpus_done		= native_smp_cpus_done,
diff -ruNp linux-3.13.11/arch/x86/kernel/smpboot.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/smpboot.c
--- linux-3.13.11/arch/x86/kernel/smpboot.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/smpboot.c	2014-07-09
12:00:15.000000000 +0200
@@ -229,14 +229,18 @@ static void notrace start_secondary(void
 
 	enable_start_cpu0 = 0;
 
-#ifdef CONFIG_X86_32
+	/* otherwise gcc will move up smp_processor_id before the cpu_init */
+	barrier();
+
 	/* switch away from the initial page table */
+#ifdef CONFIG_PAX_PER_CPU_PGD
+	load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
+	__flush_tlb_all();
+#elif defined(CONFIG_X86_32)
 	load_cr3(swapper_pg_dir);
 	__flush_tlb_all();
 #endif
 
-	/* otherwise gcc will move up smp_processor_id before the cpu_init */
-	barrier();
 	/*
 	 * Check TSC synchronization with the BP:
 	 */
@@ -749,8 +753,9 @@ static int do_boot_cpu(int apicid, int c
 	alternatives_enable_smp();
 
 	idle->thread.sp = (unsigned long) (((struct pt_regs *)
-			  (THREAD_SIZE +  task_stack_page(idle))) - 1);
+			  (THREAD_SIZE - 16 + task_stack_page(idle))) - 1);
 	per_cpu(current_task, cpu) = idle;
+	per_cpu(current_tinfo, cpu) = &idle->tinfo;
 
 #ifdef CONFIG_X86_32
 	/* Stack for startup_32 can be just as for start_secondary onwards */
@@ -758,11 +763,13 @@ static int do_boot_cpu(int apicid, int c
 #else
 	clear_tsk_thread_flag(idle, TIF_FORK);
 	initial_gs = per_cpu_offset(cpu);
-	per_cpu(kernel_stack, cpu) =
-		(unsigned long)task_stack_page(idle) -
-		KERNEL_STACK_OFFSET + THREAD_SIZE;
+	per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
 #endif
+
+	pax_open_kernel();
 	early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
+	pax_close_kernel();
+
 	initial_code = (unsigned long)start_secondary;
 	stack_start  = idle->thread.sp;
 
@@ -911,6 +918,15 @@ int native_cpu_up(unsigned int cpu, stru
 	/* the FPU context is blank, nobody can own it */
 	__cpu_disable_lazy_restore(cpu);
 
+#ifdef CONFIG_PAX_PER_CPU_PGD
+	clone_pgd_range(get_cpu_pgd(cpu, kernel) + KERNEL_PGD_BOUNDARY,
+			swapper_pg_dir + KERNEL_PGD_BOUNDARY,
+			KERNEL_PGD_PTRS);
+	clone_pgd_range(get_cpu_pgd(cpu, user) + KERNEL_PGD_BOUNDARY,
+			swapper_pg_dir + KERNEL_PGD_BOUNDARY,
+			KERNEL_PGD_PTRS);
+#endif
+
 	err = do_boot_cpu(apicid, cpu, tidle);
 	if (err) {
 		pr_debug("do_boot_cpu failed %d\n", err);
diff -ruNp linux-3.13.11/arch/x86/kernel/step.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/step.c
--- linux-3.13.11/arch/x86/kernel/step.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/step.c	2014-07-09
12:00:15.000000000 +0200
@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc
 		struct desc_struct *desc;
 		unsigned long base;
 
-		seg &= ~7UL;
+		seg >>= 3;
 
 		mutex_lock(&child->mm->context.lock);
-		if (unlikely((seg >> 3) >= child->mm->context.size))
+		if (unlikely(seg >= child->mm->context.size))
 			addr = -1L; /* bogus selector, access would fault */
 		else {
 			desc = child->mm->context.ldt + seg;
@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struc
 			addr += base;
 		}
 		mutex_unlock(&child->mm->context.lock);
-	}
+	} else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
+		addr = ktla_ktva(addr);
 
 	return addr;
 }
@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct t
 	unsigned char opcode[15];
 	unsigned long addr = convert_ip_to_linear(child, regs);
 
+	if (addr == -EINVAL)
+		return 0;
+
 	copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
 	for (i = 0; i < copied; i++) {
 		switch (opcode[i]) {
diff -ruNp linux-3.13.11/arch/x86/kernel/sys_i386_32.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/sys_i386_32.c
--- linux-3.13.11/arch/x86/kernel/sys_i386_32.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/sys_i386_32.c	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,189 @@
+/*
+ * This file contains various random system calls that
+ * have a non-standard calling sequence on the Linux/i386
+ * platform.
+ */
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/smp.h>
+#include <linux/sem.h>
+#include <linux/msg.h>
+#include <linux/shm.h>
+#include <linux/stat.h>
+#include <linux/syscalls.h>
+#include <linux/mman.h>
+#include <linux/file.h>
+#include <linux/utsname.h>
+#include <linux/ipc.h>
+#include <linux/elf.h>
+
+#include <linux/uaccess.h>
+#include <linux/unistd.h>
+
+#include <asm/syscalls.h>
+
+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
+{
+	unsigned long pax_task_size = TASK_SIZE;
+
+#ifdef CONFIG_PAX_SEGMEXEC
+	if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
+		pax_task_size = SEGMEXEC_TASK_SIZE;
+#endif
+
+	if (flags & MAP_FIXED)
+		if (len > pax_task_size || addr > pax_task_size - len)
+			return -EINVAL;
+
+	return 0;
+}
+
+/*
+ * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
+ */
+static unsigned long get_align_mask(void)
+{
+	if (va_align.flags < 0 || !(va_align.flags & ALIGN_VA_32))
+		return 0;
+
+	if (!(current->flags & PF_RANDOMIZE))
+		return 0;
+
+	return va_align.mask;
+}
+
+unsigned long
+arch_get_unmapped_area(struct file *filp, unsigned long addr,
+		unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+	struct mm_struct *mm = current->mm;
+	struct vm_area_struct *vma;
+	unsigned long pax_task_size = TASK_SIZE;
+	struct vm_unmapped_area_info info;
+	unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+
+#ifdef CONFIG_PAX_SEGMEXEC
+	if (mm->pax_flags & MF_PAX_SEGMEXEC)
+		pax_task_size = SEGMEXEC_TASK_SIZE;
+#endif
+
+	pax_task_size -= PAGE_SIZE;
+
+	if (len > pax_task_size)
+		return -ENOMEM;
+
+	if (flags & MAP_FIXED)
+		return addr;
+
+#ifdef CONFIG_PAX_RANDMMAP
+	if (!(mm->pax_flags & MF_PAX_RANDMMAP))
+#endif
+
+	if (addr) {
+		addr = PAGE_ALIGN(addr);
+		if (pax_task_size - len >= addr) {
+			vma = find_vma(mm, addr);
+			if (check_heap_stack_gap(vma, addr, len, offset))
+				return addr;
+		}
+	}
+
+	info.flags = 0;
+	info.length = len;
+	info.align_mask = filp ? get_align_mask() : 0;
+	info.align_offset = pgoff << PAGE_SHIFT;
+	info.threadstack_offset = offset;
+
+#ifdef CONFIG_PAX_PAGEEXEC
+	if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags
& MAP_EXECUTABLE)) {
+		info.low_limit = 0x00110000UL;
+		info.high_limit = mm->start_code;
+
+#ifdef CONFIG_PAX_RANDMMAP
+		if (mm->pax_flags & MF_PAX_RANDMMAP)
+			info.low_limit += mm->delta_mmap & 0x03FFF000UL;
+#endif
+
+		if (info.low_limit < info.high_limit) {
+			addr = vm_unmapped_area(&info);
+			if (!IS_ERR_VALUE(addr))
+				return addr;
+		}
+	} else
+#endif
+
+	info.low_limit = mm->mmap_base;
+	info.high_limit = pax_task_size;
+
+	return vm_unmapped_area(&info);
+}
+
+unsigned long
+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+			  const unsigned long len, const unsigned long pgoff,
+			  const unsigned long flags)
+{
+	struct vm_area_struct *vma;
+	struct mm_struct *mm = current->mm;
+	unsigned long addr = addr0, pax_task_size = TASK_SIZE;
+	struct vm_unmapped_area_info info;
+	unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+
+#ifdef CONFIG_PAX_SEGMEXEC
+	if (mm->pax_flags & MF_PAX_SEGMEXEC)
+		pax_task_size = SEGMEXEC_TASK_SIZE;
+#endif
+
+	pax_task_size -= PAGE_SIZE;
+
+	/* requested length too big for entire address space */
+	if (len > pax_task_size)
+		return -ENOMEM;
+
+	if (flags & MAP_FIXED)
+		return addr;
+
+#ifdef CONFIG_PAX_PAGEEXEC
+	if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags
& MAP_EXECUTABLE))
+		goto bottomup;
+#endif
+
+#ifdef CONFIG_PAX_RANDMMAP
+	if (!(mm->pax_flags & MF_PAX_RANDMMAP))
+#endif
+
+	/* requesting a specific address */
+	if (addr) {
+		addr = PAGE_ALIGN(addr);
+		if (pax_task_size - len >= addr) {
+			vma = find_vma(mm, addr);
+			if (check_heap_stack_gap(vma, addr, len, offset))
+				return addr;
+		}
+	}
+
+	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
+	info.length = len;
+	info.low_limit = PAGE_SIZE;
+	info.high_limit = mm->mmap_base;
+	info.align_mask = filp ? get_align_mask() : 0;
+	info.align_offset = pgoff << PAGE_SHIFT;
+	info.threadstack_offset = offset;
+
+	addr = vm_unmapped_area(&info);
+	if (!(addr & ~PAGE_MASK))
+		return addr;
+	VM_BUG_ON(addr != -ENOMEM);
+
+bottomup:
+	/*
+	 * A failed mmap() very likely causes application failure,
+	 * so fall back to the bottom-up function here. This scenario
+	 * can happen with large stack limits and large mmap()
+	 * allocations.
+	 */
+	return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
+}
diff -ruNp linux-3.13.11/arch/x86/kernel/sys_x86_64.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/sys_x86_64.c
--- linux-3.13.11/arch/x86/kernel/sys_x86_64.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/sys_x86_64.c	2014-07-09
12:00:15.000000000 +0200
@@ -81,8 +81,8 @@ out:
 	return error;
 }
 
-static void find_start_end(unsigned long flags, unsigned long *begin,
-			   unsigned long *end)
+static void find_start_end(struct mm_struct *mm, unsigned long flags,
+			   unsigned long *begin, unsigned long *end)
 {
 	if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
 		unsigned long new_begin;
@@ -101,7 +101,7 @@ static void find_start_end(unsigned long
 				*begin = new_begin;
 		}
 	} else {
-		*begin = current->mm->mmap_legacy_base;
+		*begin = mm->mmap_legacy_base;
 		*end = TASK_SIZE;
 	}
 }
@@ -114,20 +114,24 @@ arch_get_unmapped_area(struct file *filp
 	struct vm_area_struct *vma;
 	struct vm_unmapped_area_info info;
 	unsigned long begin, end;
+	unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
 
 	if (flags & MAP_FIXED)
 		return addr;
 
-	find_start_end(flags, &begin, &end);
+	find_start_end(mm, flags, &begin, &end);
 
 	if (len > end)
 		return -ENOMEM;
 
+#ifdef CONFIG_PAX_RANDMMAP
+	if (!(mm->pax_flags & MF_PAX_RANDMMAP))
+#endif
+
 	if (addr) {
 		addr = PAGE_ALIGN(addr);
 		vma = find_vma(mm, addr);
-		if (end - len >= addr &&
-		    (!vma || addr + len <= vma->vm_start))
+		if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
 			return addr;
 	}
 
@@ -137,6 +141,7 @@ arch_get_unmapped_area(struct file *filp
 	info.high_limit = end;
 	info.align_mask = filp ? get_align_mask() : 0;
 	info.align_offset = pgoff << PAGE_SHIFT;
+	info.threadstack_offset = offset;
 	return vm_unmapped_area(&info);
 }
 
@@ -149,6 +154,7 @@ arch_get_unmapped_area_topdown(struct fi
 	struct mm_struct *mm = current->mm;
 	unsigned long addr = addr0;
 	struct vm_unmapped_area_info info;
+	unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
 
 	/* requested length too big for entire address space */
 	if (len > TASK_SIZE)
@@ -161,12 +167,15 @@ arch_get_unmapped_area_topdown(struct fi
 	if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
 		goto bottomup;
 
+#ifdef CONFIG_PAX_RANDMMAP
+	if (!(mm->pax_flags & MF_PAX_RANDMMAP))
+#endif
+
 	/* requesting a specific address */
 	if (addr) {
 		addr = PAGE_ALIGN(addr);
 		vma = find_vma(mm, addr);
-		if (TASK_SIZE - len >= addr &&
-				(!vma || addr + len <= vma->vm_start))
+		if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
 			return addr;
 	}
 
@@ -176,6 +185,7 @@ arch_get_unmapped_area_topdown(struct fi
 	info.high_limit = mm->mmap_base;
 	info.align_mask = filp ? get_align_mask() : 0;
 	info.align_offset = pgoff << PAGE_SHIFT;
+	info.threadstack_offset = offset;
 	addr = vm_unmapped_area(&info);
 	if (!(addr & ~PAGE_MASK))
 		return addr;
diff -ruNp linux-3.13.11/arch/x86/kernel/tboot.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/tboot.c
--- linux-3.13.11/arch/x86/kernel/tboot.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/tboot.c	2014-07-09
12:00:15.000000000 +0200
@@ -221,7 +221,7 @@ static int tboot_setup_sleep(void)
 
 void tboot_shutdown(u32 shutdown_type)
 {
-	void (*shutdown)(void);
+	void (* __noreturn shutdown)(void);
 
 	if (!tboot_enabled())
 		return;
@@ -243,7 +243,7 @@ void tboot_shutdown(u32 shutdown_type)
 
 	switch_to_tboot_pt();
 
-	shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
+	shutdown = (void *)(unsigned long)tboot->shutdown_entry;
 	shutdown();
 
 	/* should not reach here */
@@ -310,7 +310,7 @@ static int tboot_extended_sleep(u8 sleep
 	return -ENODEV;
 }
 
-static atomic_t ap_wfs_count;
+static atomic_unchecked_t ap_wfs_count;
 
 static int tboot_wait_for_aps(int num_aps)
 {
@@ -334,9 +334,9 @@ static int tboot_cpu_callback(struct not
 {
 	switch (action) {
 	case CPU_DYING:
-		atomic_inc(&ap_wfs_count);
+		atomic_inc_unchecked(&ap_wfs_count);
 		if (num_online_cpus() == 1)
-			if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
+			if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
 				return NOTIFY_BAD;
 		break;
 	}
@@ -422,7 +422,7 @@ static __init int tboot_late_init(void)
 
 	tboot_create_trampoline();
 
-	atomic_set(&ap_wfs_count, 0);
+	atomic_set_unchecked(&ap_wfs_count, 0);
 	register_hotcpu_notifier(&tboot_cpu_notifier);
 
 #ifdef CONFIG_DEBUG_FS
diff -ruNp linux-3.13.11/arch/x86/kernel/time.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/time.c
--- linux-3.13.11/arch/x86/kernel/time.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/time.c	2014-07-09
12:00:15.000000000 +0200
@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs
 {
 	unsigned long pc = instruction_pointer(regs);
 
-	if (!user_mode_vm(regs) && in_lock_functions(pc)) {
+	if (!user_mode(regs) && in_lock_functions(pc)) {
 #ifdef CONFIG_FRAME_POINTER
-		return *(unsigned long *)(regs->bp + sizeof(long));
+		return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
 #else
 		unsigned long *sp =
 			(unsigned long *)kernel_stack_pointer(regs);
@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs
 		 * or above a saved flags. Eflags has bits 22-31 zero,
 		 * kernel addresses don't.
 		 */
+
+#ifdef CONFIG_PAX_KERNEXEC
+		return ktla_ktva(sp[0]);
+#else
 		if (sp[0] >> 22)
 			return sp[0];
 		if (sp[1] >> 22)
 			return sp[1];
 #endif
+
+#endif
 	}
 	return pc;
 }
diff -ruNp linux-3.13.11/arch/x86/kernel/tls.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/tls.c
--- linux-3.13.11/arch/x86/kernel/tls.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/tls.c	2014-07-09
12:00:15.000000000 +0200
@@ -84,6 +84,11 @@ int do_set_thread_area(struct task_struc
 	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
 		return -EINVAL;
 
+#ifdef CONFIG_PAX_SEGMEXEC
+	if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
+		return -EINVAL;
+#endif
+
 	set_tls_desc(p, idx, &info, 1);
 
 	return 0;
@@ -200,7 +205,7 @@ int regset_tls_set(struct task_struct *t
 
 	if (kbuf)
 		info = kbuf;
-	else if (__copy_from_user(infobuf, ubuf, count))
+	else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count))
 		return -EFAULT;
 	else
 		info = infobuf;
diff -ruNp linux-3.13.11/arch/x86/kernel/tracepoint.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/tracepoint.c
--- linux-3.13.11/arch/x86/kernel/tracepoint.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/tracepoint.c	2014-07-09
12:00:15.000000000 +0200
@@ -9,11 +9,11 @@
 #include <linux/atomic.h>
 
 atomic_t trace_idt_ctr = ATOMIC_INIT(0);
-struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
+const struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
 				(unsigned long) trace_idt_table };
 
 /* No need to be aligned, but done to keep all IDTs defined the same way. */
-gate_desc trace_idt_table[NR_VECTORS] __page_aligned_bss;
+gate_desc trace_idt_table[NR_VECTORS] __page_aligned_rodata;
 
 static int trace_irq_vector_refcount;
 static DEFINE_MUTEX(irq_vector_mutex);
diff -ruNp linux-3.13.11/arch/x86/kernel/traps.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/traps.c
--- linux-3.13.11/arch/x86/kernel/traps.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/traps.c	2014-07-09
12:00:15.000000000 +0200
@@ -66,7 +66,7 @@
 #include <asm/proto.h>
 
 /* No need to be aligned, but done to keep all IDTs defined the same way. */
-gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss;
+gate_desc debug_idt_table[NR_VECTORS] __page_aligned_rodata;
 #else
 #include <asm/processor-flags.h>
 #include <asm/setup.h>
@@ -75,7 +75,7 @@ asmlinkage int system_call(void);
 #endif
 
 /* Must be page-aligned because the real IDT is used in a fixmap. */
-gate_desc idt_table[NR_VECTORS] __page_aligned_bss;
+gate_desc idt_table[NR_VECTORS] __page_aligned_rodata;
 
 DECLARE_BITMAP(used_vectors, NR_VECTORS);
 EXPORT_SYMBOL_GPL(used_vectors);
@@ -107,11 +107,11 @@ static inline void preempt_conditional_c
 }
 
 static int __kprobes
-do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
+do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
 		  struct pt_regs *regs,	long error_code)
 {
 #ifdef CONFIG_X86_32
-	if (regs->flags & X86_VM_MASK) {
+	if (v8086_mode(regs)) {
 		/*
 		 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
 		 * On nmi (interrupt 2), do_trap should not be called.
@@ -124,12 +124,24 @@ do_trap_no_signal(struct task_struct *ts
 		return -1;
 	}
 #endif
-	if (!user_mode(regs)) {
+	if (!user_mode_novm(regs)) {
 		if (!fixup_exception(regs)) {
 			tsk->thread.error_code = error_code;
 			tsk->thread.trap_nr = trapnr;
+
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
+			if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) ==
__KERNEXEC_KERNEL_CS))
+				str = "PAX: suspicious stack segment fault";
+#endif
+
 			die(str, regs, error_code);
 		}
+
+#ifdef CONFIG_PAX_REFCOUNT
+		if (trapnr == 4)
+			pax_report_refcount_overflow(regs);
+#endif
+
 		return 0;
 	}
 
@@ -137,7 +149,7 @@ do_trap_no_signal(struct task_struct *ts
 }
 
 static void __kprobes
-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
 	long error_code, siginfo_t *info)
 {
 	struct task_struct *tsk = current;
@@ -161,7 +173,7 @@ do_trap(int trapnr, int signr, char *str
 	if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
 	    printk_ratelimit()) {
 		pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
-			tsk->comm, tsk->pid, str,
+			tsk->comm, task_pid_nr(tsk), str,
 			regs->ip, regs->sp, error_code);
 		print_vma_addr(" in ", regs->ip);
 		pr_cont("\n");
@@ -277,7 +289,7 @@ do_general_protection(struct pt_regs *re
 	conditional_sti(regs);
 
 #ifdef CONFIG_X86_32
-	if (regs->flags & X86_VM_MASK) {
+	if (v8086_mode(regs)) {
 		local_irq_enable();
 		handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
 		goto exit;
@@ -285,18 +297,42 @@ do_general_protection(struct pt_regs *re
 #endif
 
 	tsk = current;
-	if (!user_mode(regs)) {
+	if (!user_mode_novm(regs)) {
 		if (fixup_exception(regs))
 			goto exit;
 
 		tsk->thread.error_code = error_code;
 		tsk->thread.trap_nr = X86_TRAP_GP;
 		if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
-			       X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
+			       X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) {
+
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
+		if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
+			die("PAX: suspicious general protection fault", regs, error_code);
+		else
+#endif
+
 			die("general protection fault", regs, error_code);
+		}
 		goto exit;
 	}
 
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
+	if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC))
{
+		struct mm_struct *mm = tsk->mm;
+		unsigned long limit;
+
+		down_write(&mm->mmap_sem);
+		limit = mm->context.user_cs_limit;
+		if (limit < TASK_SIZE) {
+			track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
+			up_write(&mm->mmap_sem);
+			return;
+		}
+		up_write(&mm->mmap_sem);
+	}
+#endif
+
 	tsk->thread.error_code = error_code;
 	tsk->thread.trap_nr = X86_TRAP_GP;
 
@@ -457,7 +493,7 @@ dotraplinkage void __kprobes do_debug(st
 	/* It's safe to allow irq's after DR6 has been saved */
 	preempt_conditional_sti(regs);
 
-	if (regs->flags & X86_VM_MASK) {
+	if (v8086_mode(regs)) {
 		handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
 					X86_TRAP_DB);
 		preempt_conditional_cli(regs);
@@ -472,7 +508,7 @@ dotraplinkage void __kprobes do_debug(st
 	 * We already checked v86 mode above, so we can check for kernel mode
 	 * by just checking the CPL of CS.
 	 */
-	if ((dr6 & DR_STEP) && !user_mode(regs)) {
+	if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
 		tsk->thread.debugreg6 &= ~DR_STEP;
 		set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
 		regs->flags &= ~X86_EFLAGS_TF;
@@ -504,7 +540,7 @@ void math_error(struct pt_regs *regs, in
 		return;
 	conditional_sti(regs);
 
-	if (!user_mode_vm(regs))
+	if (!user_mode(regs))
 	{
 		if (!fixup_exception(regs)) {
 			task->thread.error_code = error_code;
diff -ruNp linux-3.13.11/arch/x86/kernel/uprobes.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/uprobes.c
--- linux-3.13.11/arch/x86/kernel/uprobes.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/uprobes.c	2014-07-09
12:00:15.000000000 +0200
@@ -629,7 +629,7 @@ int arch_uprobe_exception_notify(struct
 	int ret = NOTIFY_DONE;
 
 	/* We are only interested in userspace traps */
-	if (regs && !user_mode_vm(regs))
+	if (regs && !user_mode(regs))
 		return NOTIFY_DONE;
 
 	switch (val) {
@@ -719,7 +719,7 @@ arch_uretprobe_hijack_return_addr(unsign
 
 	if (ncopied != rasize) {
 		pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, "
-			"%%ip=%#lx\n", current->pid, regs->sp, regs->ip);
+			"%%ip=%#lx\n", task_pid_nr(current), regs->sp, regs->ip);
 
 		force_sig_info(SIGSEGV, SEND_SIG_FORCED, current);
 	}
diff -ruNp linux-3.13.11/arch/x86/kernel/verify_cpu.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/verify_cpu.S
--- linux-3.13.11/arch/x86/kernel/verify_cpu.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/verify_cpu.S	2014-07-09
12:00:15.000000000 +0200
@@ -20,6 +20,7 @@
  *	arch/x86/boot/compressed/head_64.S: Boot cpu verification
  *	arch/x86/kernel/trampoline_64.S: secondary processor verification
  *	arch/x86/kernel/head_32.S: processor startup
+ *	arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
  *
  *	verify_cpu, returns the status of longmode and SSE in register %eax.
  *		0: Success    1: Failure
diff -ruNp linux-3.13.11/arch/x86/kernel/vm86_32.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/vm86_32.c
--- linux-3.13.11/arch/x86/kernel/vm86_32.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/vm86_32.c	2014-07-09
12:00:15.000000000 +0200
@@ -44,6 +44,7 @@
 #include <linux/ptrace.h>
 #include <linux/audit.h>
 #include <linux/stddef.h>
+#include <linux/grsecurity.h>
 
 #include <asm/uaccess.h>
 #include <asm/io.h>
@@ -150,7 +151,7 @@ struct pt_regs *save_v86_state(struct ke
 		do_exit(SIGSEGV);
 	}
 
-	tss = &per_cpu(init_tss, get_cpu());
+	tss = init_tss + get_cpu();
 	current->thread.sp0 = current->thread.saved_sp0;
 	current->thread.sysenter_cs = __KERNEL_CS;
 	load_sp0(tss, &current->thread);
@@ -214,6 +215,14 @@ SYSCALL_DEFINE1(vm86old, struct vm86_str
 
 	if (tsk->thread.saved_sp0)
 		return -EPERM;
+
+#ifdef CONFIG_GRKERNSEC_VM86
+	if (!capable(CAP_SYS_RAWIO)) {
+		gr_handle_vm86();
+		return -EPERM;
+	}
+#endif
+
 	tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
 				       offsetof(struct kernel_vm86_struct, vm86plus) -
 				       sizeof(info.regs));
@@ -238,6 +247,13 @@ SYSCALL_DEFINE2(vm86, unsigned long, cmd
 	int tmp;
 	struct vm86plus_struct __user *v86;
 
+#ifdef CONFIG_GRKERNSEC_VM86
+	if (!capable(CAP_SYS_RAWIO)) {
+		gr_handle_vm86();
+		return -EPERM;
+	}
+#endif
+
 	tsk = current;
 	switch (cmd) {
 	case VM86_REQUEST_IRQ:
@@ -318,7 +334,7 @@ static void do_sys_vm86(struct kernel_vm
 	tsk->thread.saved_fs = info->regs32->fs;
 	tsk->thread.saved_gs = get_user_gs(info->regs32);
 
-	tss = &per_cpu(init_tss, get_cpu());
+	tss = init_tss + get_cpu();
 	tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
 	if (cpu_has_sep)
 		tsk->thread.sysenter_cs = 0;
@@ -525,7 +541,7 @@ static void do_int(struct kernel_vm86_re
 		goto cannot_handle;
 	if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
 		goto cannot_handle;
-	intr_ptr = (unsigned long __user *) (i << 2);
+	intr_ptr = (__force unsigned long __user *) (i << 2);
 	if (get_user(segoffs, intr_ptr))
 		goto cannot_handle;
 	if ((segoffs >> 16) == BIOSSEG)
diff -ruNp linux-3.13.11/arch/x86/kernel/vmlinux.lds.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/vmlinux.lds.S
--- linux-3.13.11/arch/x86/kernel/vmlinux.lds.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/vmlinux.lds.S	2014-07-09
12:00:15.000000000 +0200
@@ -26,6 +26,13 @@
 #include <asm/page_types.h>
 #include <asm/cache.h>
 #include <asm/boot.h>
+#include <asm/segment.h>
+
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
+#define __KERNEL_TEXT_OFFSET	(LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
+#else
+#define __KERNEL_TEXT_OFFSET	0
+#endif
 
 #undef i386     /* in case the preprocessor is a 32bit one */
 
@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
 
 PHDRS {
 	text PT_LOAD FLAGS(5);          /* R_E */
+#ifdef CONFIG_X86_32
+	module PT_LOAD FLAGS(5);        /* R_E */
+#endif
+#ifdef CONFIG_XEN
+	rodata PT_LOAD FLAGS(5);        /* R_E */
+#else
+	rodata PT_LOAD FLAGS(4);        /* R__ */
+#endif
 	data PT_LOAD FLAGS(6);          /* RW_ */
-#ifdef CONFIG_X86_64
+	init.begin PT_LOAD FLAGS(6);    /* RW_ */
 #ifdef CONFIG_SMP
 	percpu PT_LOAD FLAGS(6);        /* RW_ */
 #endif
+	text.init PT_LOAD FLAGS(5);     /* R_E */
+	text.exit PT_LOAD FLAGS(5);     /* R_E */
 	init PT_LOAD FLAGS(7);          /* RWE */
-#endif
 	note PT_NOTE FLAGS(0);          /* ___ */
 }
 
 SECTIONS
 {
 #ifdef CONFIG_X86_32
-        . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
-        phys_startup_32 = startup_32 - LOAD_OFFSET;
+	. = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
 #else
-        . = __START_KERNEL;
-        phys_startup_64 = startup_64 - LOAD_OFFSET;
+	. = __START_KERNEL;
 #endif
 
 	/* Text and read-only data */
-	.text :  AT(ADDR(.text) - LOAD_OFFSET) {
-		_text = .;
+	.text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET)
{
 		/* bootstrapping code */
+#ifdef CONFIG_X86_32
+		phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
+#else
+		phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
+#endif
+		__LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
+		_text = .;
 		HEAD_TEXT
 		. = ALIGN(8);
 		_stext = .;
@@ -104,13 +124,47 @@ SECTIONS
 		IRQENTRY_TEXT
 		*(.fixup)
 		*(.gnu.warning)
-		/* End of text section */
-		_etext = .;
 	} :text = 0x9090
 
-	NOTES :text :note
+	. += __KERNEL_TEXT_OFFSET;
+
+#ifdef CONFIG_X86_32
+	. = ALIGN(PAGE_SIZE);
+	.module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
+
+#ifdef CONFIG_PAX_KERNEXEC
+		MODULES_EXEC_VADDR = .;
+		BYTE(0)
+		. += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
+		. = ALIGN(HPAGE_SIZE) - 1;
+		MODULES_EXEC_END = .;
+#endif
+
+	} :module
+#endif
+
+	.text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
+		/* End of text section */
+		BYTE(0)
+		_etext = . - __KERNEL_TEXT_OFFSET;
+	}
+
+#ifdef CONFIG_X86_32
+	. = ALIGN(PAGE_SIZE);
+	.rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
+		. = ALIGN(PAGE_SIZE);
+		*(.empty_zero_page)
+		*(.initial_pg_fixmap)
+		*(.initial_pg_pmd)
+		*(.initial_page_table)
+		*(.swapper_pg_dir)
+	} :rodata
+#endif
+
+	. = ALIGN(PAGE_SIZE);
+	NOTES :rodata :note
 
-	EXCEPTION_TABLE(16) :text = 0x9090
+	EXCEPTION_TABLE(16) :rodata
 
 #if defined(CONFIG_DEBUG_RODATA)
 	/* .text should occupy whole number of pages */
@@ -122,16 +176,20 @@ SECTIONS
 
 	/* Data */
 	.data : AT(ADDR(.data) - LOAD_OFFSET) {
+
+#ifdef CONFIG_PAX_KERNEXEC
+		. = ALIGN(HPAGE_SIZE);
+#else
+		. = ALIGN(PAGE_SIZE);
+#endif
+
 		/* Start of data section */
 		_sdata = .;
 
 		/* init_task */
 		INIT_TASK_DATA(THREAD_SIZE)
 
-#ifdef CONFIG_X86_32
-		/* 32 bit has nosave before _edata */
 		NOSAVE_DATA
-#endif
 
 		PAGE_ALIGNED_DATA(PAGE_SIZE)
 
@@ -172,12 +230,19 @@ SECTIONS
 #endif /* CONFIG_X86_64 */
 
 	/* Init code and data - will be freed after init */
-	. = ALIGN(PAGE_SIZE);
 	.init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
+		BYTE(0)
+
+#ifdef CONFIG_PAX_KERNEXEC
+		. = ALIGN(HPAGE_SIZE);
+#else
+		. = ALIGN(PAGE_SIZE);
+#endif
+
 		__init_begin = .; /* paired with __init_end */
-	}
+	} :init.begin
 
-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
+#ifdef CONFIG_SMP
 	/*
 	 * percpu offsets are zero-based on SMP.  PERCPU_VADDR() changes the
 	 * output PHDR, so the next output section - .init.text - should
@@ -186,12 +251,27 @@ SECTIONS
 	PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
 #endif
 
-	INIT_TEXT_SECTION(PAGE_SIZE)
-#ifdef CONFIG_X86_64
-	:init
-#endif
+	. = ALIGN(PAGE_SIZE);
+	init_begin = .;
+	.init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
+		VMLINUX_SYMBOL(_sinittext) = .;
+		INIT_TEXT
+		VMLINUX_SYMBOL(_einittext) = .;
+		. = ALIGN(PAGE_SIZE);
+	} :text.init
 
-	INIT_DATA_SECTION(16)
+	/*
+	 * .exit.text is discard at runtime, not link time, to deal with
+	 *  references from .altinstructions and .eh_frame
+	 */
+	.exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
+		EXIT_TEXT
+		. = ALIGN(16);
+	} :text.exit
+	. = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
+
+	. = ALIGN(PAGE_SIZE);
+	INIT_DATA_SECTION(16) :init
 
 	.x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
 		__x86_cpu_dev_start = .;
@@ -262,19 +342,12 @@ SECTIONS
 	}
 
 	. = ALIGN(8);
-	/*
-	 * .exit.text is discard at runtime, not link time, to deal with
-	 *  references from .altinstructions and .eh_frame
-	 */
-	.exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
-		EXIT_TEXT
-	}
 
 	.exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
 		EXIT_DATA
 	}
 
-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
+#ifndef CONFIG_SMP
 	PERCPU_SECTION(INTERNODE_CACHE_BYTES)
 #endif
 
@@ -293,16 +366,10 @@ SECTIONS
 	.smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
 		__smp_locks = .;
 		*(.smp_locks)
-		. = ALIGN(PAGE_SIZE);
 		__smp_locks_end = .;
+		. = ALIGN(PAGE_SIZE);
 	}
 
-#ifdef CONFIG_X86_64
-	.data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
-		NOSAVE_DATA
-	}
-#endif
-
 	/* BSS */
 	. = ALIGN(PAGE_SIZE);
 	.bss : AT(ADDR(.bss) - LOAD_OFFSET) {
@@ -318,6 +385,7 @@ SECTIONS
 		__brk_base = .;
 		. += 64 * 1024;		/* 64k alignment slop space */
 		*(.brk_reservation)	/* areas brk users have reserved */
+		. = ALIGN(HPAGE_SIZE);
 		__brk_limit = .;
 	}
 
@@ -344,13 +412,12 @@ SECTIONS
  * for the boot processor.
  */
 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
-INIT_PER_CPU(gdt_page);
 INIT_PER_CPU(irq_stack_union);
 
 /*
  * Build-time check on the image size:
  */
-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
 	   "kernel image bigger than KERNEL_IMAGE_SIZE");
 
 #ifdef CONFIG_SMP
diff -ruNp linux-3.13.11/arch/x86/kernel/vsyscall_64.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/vsyscall_64.c
--- linux-3.13.11/arch/x86/kernel/vsyscall_64.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/vsyscall_64.c	2014-07-09
12:00:15.000000000 +0200
@@ -56,15 +56,13 @@
 DEFINE_VVAR(int, vgetcpu_mode);
 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
 
-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
 
 static int __init vsyscall_setup(char *str)
 {
 	if (str) {
 		if (!strcmp("emulate", str))
 			vsyscall_mode = EMULATE;
-		else if (!strcmp("native", str))
-			vsyscall_mode = NATIVE;
 		else if (!strcmp("none", str))
 			vsyscall_mode = NONE;
 		else
@@ -323,8 +321,7 @@ do_ret:
 	return true;
 
 sigsegv:
-	force_sig(SIGSEGV, current);
-	return true;
+	do_group_exit(SIGKILL);
 }
 
 /*
@@ -377,10 +374,7 @@ void __init map_vsyscall(void)
 	extern char __vvar_page;
 	unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
 
-	__set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
-		     vsyscall_mode == NATIVE
-		     ? PAGE_KERNEL_VSYSCALL
-		     : PAGE_KERNEL_VVAR);
+	__set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
 	BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
 		     (unsigned long)VSYSCALL_START);
 
diff -ruNp linux-3.13.11/arch/x86/kernel/x8664_ksyms_64.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/x8664_ksyms_64.c
--- linux-3.13.11/arch/x86/kernel/x8664_ksyms_64.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/x8664_ksyms_64.c	2014-07-09
12:00:15.000000000 +0200
@@ -34,8 +34,6 @@ EXPORT_SYMBOL(copy_user_generic_string);
 EXPORT_SYMBOL(copy_user_generic_unrolled);
 EXPORT_SYMBOL(copy_user_enhanced_fast_string);
 EXPORT_SYMBOL(__copy_user_nocache);
-EXPORT_SYMBOL(_copy_from_user);
-EXPORT_SYMBOL(_copy_to_user);
 
 EXPORT_SYMBOL(copy_page);
 EXPORT_SYMBOL(clear_page);
@@ -73,3 +71,7 @@ EXPORT_SYMBOL(___preempt_schedule);
 EXPORT_SYMBOL(___preempt_schedule_context);
 #endif
 #endif
+
+#ifdef CONFIG_PAX_PER_CPU_PGD
+EXPORT_SYMBOL(cpu_pgd);
+#endif
diff -ruNp linux-3.13.11/arch/x86/kernel/x86_init.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/x86_init.c
--- linux-3.13.11/arch/x86/kernel/x86_init.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/x86_init.c	2014-07-09
12:00:15.000000000 +0200
@@ -93,7 +93,7 @@ struct x86_cpuinit_ops x86_cpuinit = {
 static void default_nmi_init(void) { };
 static int default_i8042_detect(void) { return 1; };
 
-struct x86_platform_ops x86_platform = {
+struct x86_platform_ops x86_platform __read_only = {
 	.calibrate_tsc			= native_calibrate_tsc,
 	.get_wallclock			= mach_get_cmos_time,
 	.set_wallclock			= mach_set_rtc_mmss,
@@ -109,7 +109,7 @@ struct x86_platform_ops x86_platform = {
 EXPORT_SYMBOL_GPL(x86_platform);
 
 #if defined(CONFIG_PCI_MSI)
-struct x86_msi_ops x86_msi = {
+struct x86_msi_ops x86_msi __read_only = {
 	.setup_msi_irqs		= native_setup_msi_irqs,
 	.compose_msi_msg	= native_compose_msi_msg,
 	.teardown_msi_irq	= native_teardown_msi_irq,
@@ -150,7 +150,7 @@ u32 arch_msix_mask_irq(struct msi_desc *
 }
 #endif
 
-struct x86_io_apic_ops x86_io_apic_ops = {
+struct x86_io_apic_ops x86_io_apic_ops __read_only = {
 	.init			= native_io_apic_init_mappings,
 	.read			= native_io_apic_read,
 	.write			= native_io_apic_write,
diff -ruNp linux-3.13.11/arch/x86/kernel/xsave.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/xsave.c
--- linux-3.13.11/arch/x86/kernel/xsave.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kernel/xsave.c	2014-07-09
12:00:15.000000000 +0200
@@ -164,18 +164,18 @@ static inline int save_xstate_epilog(voi
 
 	/* Setup the bytes not touched by the [f]xsave and reserved for SW. */
 	sw_bytes = ia32_frame ? &fx_sw_reserved_ia32 : &fx_sw_reserved;
-	err = __copy_to_user(&x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
+	err = __copy_to_user(x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
 
 	if (!use_xsave())
 		return err;
 
-	err |= __put_user(FP_XSTATE_MAGIC2, (__u32 *)(buf + xstate_size));
+	err |= __put_user(FP_XSTATE_MAGIC2, (__u32 __user *)(buf + xstate_size));
 
 	/*
 	 * Read the xstate_bv which we copied (directly from the cpu or
 	 * from the state in task struct) to the user buffers.
 	 */
-	err |= __get_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
+	err |= __get_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
 
 	/*
 	 * For legacy compatible, we always set FP/SSE bits in the bit
@@ -190,7 +190,7 @@ static inline int save_xstate_epilog(voi
 	 */
 	xstate_bv |= XSTATE_FPSSE;
 
-	err |= __put_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
+	err |= __put_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
 
 	return err;
 }
@@ -199,6 +199,7 @@ static inline int save_user_xstate(struc
 {
 	int err;
 
+	buf = (struct xsave_struct __user *)____m(buf);
 	if (use_xsave())
 		err = xsave_user(buf);
 	else if (use_fxsr())
@@ -311,6 +312,7 @@ sanitize_restored_xstate(struct task_str
  */
 static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
 {
+	buf = (void __user *)____m(buf);
 	if (use_xsave()) {
 		if ((unsigned long)buf % 64 || fx_only) {
 			u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
diff -ruNp linux-3.13.11/arch/x86/kvm/cpuid.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kvm/cpuid.c
--- linux-3.13.11/arch/x86/kvm/cpuid.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kvm/cpuid.c	2014-07-09 12:00:15.000000000
+0200
@@ -156,15 +156,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm
 			      struct kvm_cpuid2 *cpuid,
 			      struct kvm_cpuid_entry2 __user *entries)
 {
-	int r;
+	int r, i;
 
 	r = -E2BIG;
 	if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
 		goto out;
 	r = -EFAULT;
-	if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
-			   cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
+	if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
 		goto out;
+	for (i = 0; i < cpuid->nent; ++i) {
+		struct kvm_cpuid_entry2 cpuid_entry;
+		if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
+			goto out;
+		vcpu->arch.cpuid_entries[i] = cpuid_entry;
+	}
 	vcpu->arch.cpuid_nent = cpuid->nent;
 	kvm_apic_set_version(vcpu);
 	kvm_x86_ops->cpuid_update(vcpu);
@@ -179,15 +184,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm
 			      struct kvm_cpuid2 *cpuid,
 			      struct kvm_cpuid_entry2 __user *entries)
 {
-	int r;
+	int r, i;
 
 	r = -E2BIG;
 	if (cpuid->nent < vcpu->arch.cpuid_nent)
 		goto out;
 	r = -EFAULT;
-	if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
-			 vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
+	if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
 		goto out;
+	for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
+		struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
+		if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
+			goto out;
+	}
 	return 0;
 
 out:
diff -ruNp linux-3.13.11/arch/x86/kvm/lapic.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kvm/lapic.c
--- linux-3.13.11/arch/x86/kvm/lapic.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kvm/lapic.c	2014-07-09 12:00:15.000000000
+0200
@@ -55,7 +55,7 @@
 #define APIC_BUS_CYCLE_NS 1
 
 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
-#define apic_debug(fmt, arg...)
+#define apic_debug(fmt, arg...) do {} while (0)
 
 #define APIC_LVT_NUM			6
 /* 14 is the version for Xeon and Pentium 8.4.8*/
diff -ruNp linux-3.13.11/arch/x86/kvm/paging_tmpl.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kvm/paging_tmpl.h
--- linux-3.13.11/arch/x86/kvm/paging_tmpl.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kvm/paging_tmpl.h	2014-07-09
12:00:15.000000000 +0200
@@ -331,7 +331,7 @@ retry_walk:
 		if (unlikely(kvm_is_error_hva(host_addr)))
 			goto error;
 
-		ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
+		ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
 		if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
 			goto error;
 		walker->ptep_user[walker->level - 1] = ptep_user;
diff -ruNp linux-3.13.11/arch/x86/kvm/svm.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kvm/svm.c
--- linux-3.13.11/arch/x86/kvm/svm.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kvm/svm.c	2014-07-09 12:00:15.000000000
+0200
@@ -3495,7 +3495,11 @@ static void reload_tss(struct kvm_vcpu *
 	int cpu = raw_smp_processor_id();
 
 	struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
+
+	pax_open_kernel();
 	sd->tss_desc->type = 9; /* available 32/64-bit TSS */
+	pax_close_kernel();
+
 	load_TR_desc();
 }
 
@@ -3898,6 +3902,10 @@ static void svm_vcpu_run(struct kvm_vcpu
 #endif
 #endif
 
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
+	__set_fs(current_thread_info()->addr_limit);
+#endif
+
 	reload_tss(vcpu);
 
 	local_irq_disable();
diff -ruNp linux-3.13.11/arch/x86/kvm/vmx.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kvm/vmx.c
--- linux-3.13.11/arch/x86/kvm/vmx.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kvm/vmx.c	2014-07-09 12:00:15.000000000
+0200
@@ -1316,12 +1316,12 @@ static void vmcs_write64(unsigned long f
 #endif
 }
 
-static void vmcs_clear_bits(unsigned long field, u32 mask)
+static void vmcs_clear_bits(unsigned long field, unsigned long mask)
 {
 	vmcs_writel(field, vmcs_readl(field) & ~mask);
 }
 
-static void vmcs_set_bits(unsigned long field, u32 mask)
+static void vmcs_set_bits(unsigned long field, unsigned long mask)
 {
 	vmcs_writel(field, vmcs_readl(field) | mask);
 }
@@ -1522,7 +1522,11 @@ static void reload_tss(void)
 	struct desc_struct *descs;
 
 	descs = (void *)gdt->address;
+
+	pax_open_kernel();
 	descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
+	pax_close_kernel();
+
 	load_TR_desc();
 }
 
@@ -1746,6 +1750,10 @@ static void vmx_vcpu_load(struct kvm_vcp
 		vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
 		vmcs_writel(HOST_GDTR_BASE, gdt->address);   /* 22.2.4 */
 
+#ifdef CONFIG_PAX_PER_CPU_PGD
+		vmcs_writel(HOST_CR3, read_cr3());  /* 22.2.3  FIXME: shadow tables */
+#endif
+
 		rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
 		vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
 		vmx->loaded_vmcs->cpu = cpu;
@@ -2033,7 +2041,7 @@ static void setup_msrs(struct vcpu_vmx *
  * reads and returns guest's timestamp counter "register"
  * guest_tsc = host_tsc + tsc_offset    -- 21.3
  */
-static u64 guest_read_tsc(void)
+static u64 __intentional_overflow(-1) guest_read_tsc(void)
 {
 	u64 host_tsc, tsc_offset;
 
@@ -2987,8 +2995,11 @@ static __init int hardware_setup(void)
 	if (!cpu_has_vmx_flexpriority())
 		flexpriority_enabled = 0;
 
-	if (!cpu_has_vmx_tpr_shadow())
-		kvm_x86_ops->update_cr8_intercept = NULL;
+	if (!cpu_has_vmx_tpr_shadow()) {
+		pax_open_kernel();
+		*(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
+		pax_close_kernel();
+	}
 
 	if (enable_ept && !cpu_has_vmx_ept_2m_page())
 		kvm_disable_largepages();
@@ -2999,13 +3010,15 @@ static __init int hardware_setup(void)
 	if (!cpu_has_vmx_apicv())
 		enable_apicv = 0;
 
+	pax_open_kernel();
 	if (enable_apicv)
-		kvm_x86_ops->update_cr8_intercept = NULL;
+		*(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
 	else {
-		kvm_x86_ops->hwapic_irr_update = NULL;
-		kvm_x86_ops->deliver_posted_interrupt = NULL;
-		kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
+		*(void **)&kvm_x86_ops->hwapic_irr_update = NULL;
+		*(void **)&kvm_x86_ops->deliver_posted_interrupt = NULL;
+		*(void **)&kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
 	}
+	pax_close_kernel();
 
 	if (nested)
 		nested_vmx_setup_ctls_msrs();
@@ -4134,7 +4147,10 @@ static void vmx_set_constant_host_state(
 
 	vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS);  /* 22.2.3 */
 	vmcs_writel(HOST_CR4, read_cr4());  /* 22.2.3, 22.2.5 */
+
+#ifndef CONFIG_PAX_PER_CPU_PGD
 	vmcs_writel(HOST_CR3, read_cr3());  /* 22.2.3  FIXME: shadow tables */
+#endif
 
 	vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS);  /* 22.2.4 */
 #ifdef CONFIG_X86_64
@@ -4156,7 +4172,7 @@ static void vmx_set_constant_host_state(
 	vmcs_writel(HOST_IDTR_BASE, dt.address);   /* 22.2.4 */
 	vmx->host_idt_base = dt.address;
 
-	vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
+	vmcs_writel(HOST_RIP, ktla_ktva(vmx_return)); /* 22.2.5 */
 
 	rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
 	vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
@@ -7219,6 +7235,12 @@ static void __noclone vmx_vcpu_run(struc
 		"jmp 2f \n\t"
 		"1: " __ex(ASM_VMX_VMRESUME) "\n\t"
 		"2: "
+
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
+		"ljmp %[cs],$3f\n\t"
+		"3: "
+#endif
+
 		/* Save guest registers, load host registers, keep flags */
 		"mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
 		"pop %0 \n\t"
@@ -7271,6 +7293,11 @@ static void __noclone vmx_vcpu_run(struc
 #endif
 		[cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
 		[wordsize]"i"(sizeof(ulong))
+
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
+		,[cs]"i"(__KERNEL_CS)
+#endif
+
 	      : "cc", "memory"
 #ifdef CONFIG_X86_64
 		, "rax", "rbx", "rdi", "rsi"
@@ -7284,7 +7311,7 @@ static void __noclone vmx_vcpu_run(struc
 	if (debugctlmsr)
 		update_debugctlmsr(debugctlmsr);
 
-#ifndef CONFIG_X86_64
+#ifdef CONFIG_X86_32
 	/*
 	 * The sysexit path does not restore ds/es, so we must set them to
 	 * a reasonable value ourselves.
@@ -7293,8 +7320,18 @@ static void __noclone vmx_vcpu_run(struc
 	 * may be executed in interrupt context, which saves and restore segments
 	 * around it, nullifying its effect.
 	 */
-	loadsegment(ds, __USER_DS);
-	loadsegment(es, __USER_DS);
+	loadsegment(ds, __KERNEL_DS);
+	loadsegment(es, __KERNEL_DS);
+	loadsegment(ss, __KERNEL_DS);
+
+#ifdef CONFIG_PAX_KERNEXEC
+	loadsegment(fs, __KERNEL_PERCPU);
+#endif
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+	__set_fs(current_thread_info()->addr_limit);
+#endif
+
 #endif
 
 	vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
diff -ruNp linux-3.13.11/arch/x86/kvm/x86.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kvm/x86.c
--- linux-3.13.11/arch/x86/kvm/x86.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/kvm/x86.c	2014-07-09 12:00:15.000000000
+0200
@@ -1791,8 +1791,8 @@ static int xen_hvm_config(struct kvm_vcp
 {
 	struct kvm *kvm = vcpu->kvm;
 	int lm = is_long_mode(vcpu);
-	u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
-		: (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
+	u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
+		: (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
 	u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
 		: kvm->arch.xen_hvm_config.blob_size_32;
 	u32 page_num = data & ~PAGE_MASK;
@@ -2676,6 +2676,8 @@ long kvm_arch_dev_ioctl(struct file *fil
 		if (n < msr_list.nmsrs)
 			goto out;
 		r = -EFAULT;
+		if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
+			goto out;
 		if (copy_to_user(user_msr_list->indices, &msrs_to_save,
 				 num_msrs_to_save * sizeof(u32)))
 			goto out;
@@ -5485,7 +5487,7 @@ static struct notifier_block pvclock_gto
 };
 #endif
 
-int kvm_arch_init(void *opaque)
+int kvm_arch_init(const void *opaque)
 {
 	int r;
 	struct kvm_x86_ops *ops = opaque;
diff -ruNp linux-3.13.11/arch/x86/lguest/boot.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/lguest/boot.c
--- linux-3.13.11/arch/x86/lguest/boot.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/lguest/boot.c	2014-07-09
12:00:15.000000000 +0200
@@ -1206,9 +1206,10 @@ static __init int early_put_chars(u32 vt
  * Rebooting also tells the Host we're finished, but the RESTART flag tells the
  * Launcher to reboot us.
  */
-static void lguest_restart(char *reason)
+static __noreturn void lguest_restart(char *reason)
 {
 	hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
+	BUG();
 }
 
 /*G:050
diff -ruNp linux-3.13.11/arch/x86/lib/atomic64_386_32.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/lib/atomic64_386_32.S
--- linux-3.13.11/arch/x86/lib/atomic64_386_32.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/lib/atomic64_386_32.S	2014-07-09
12:00:15.000000000 +0200
@@ -48,6 +48,10 @@ BEGIN(read)
 	movl  (v), %eax
 	movl 4(v), %edx
 RET_ENDP
+BEGIN(read_unchecked)
+	movl  (v), %eax
+	movl 4(v), %edx
+RET_ENDP
 #undef v
 
 #define v %esi
@@ -55,6 +59,10 @@ BEGIN(set)
 	movl %ebx,  (v)
 	movl %ecx, 4(v)
 RET_ENDP
+BEGIN(set_unchecked)
+	movl %ebx,  (v)
+	movl %ecx, 4(v)
+RET_ENDP
 #undef v
 
 #define v  %esi
@@ -70,6 +78,20 @@ RET_ENDP
 BEGIN(add)
 	addl %eax,  (v)
 	adcl %edx, 4(v)
+
+#ifdef CONFIG_PAX_REFCOUNT
+	jno 0f
+	subl %eax,  (v)
+	sbbl %edx, 4(v)
+	int $4
+0:
+	_ASM_EXTABLE(0b, 0b)
+#endif
+
+RET_ENDP
+BEGIN(add_unchecked)
+	addl %eax,  (v)
+	adcl %edx, 4(v)
 RET_ENDP
 #undef v
 
@@ -77,6 +99,24 @@ RET_ENDP
 BEGIN(add_return)
 	addl  (v), %eax
 	adcl 4(v), %edx
+
+#ifdef CONFIG_PAX_REFCOUNT
+	into
+1234:
+	_ASM_EXTABLE(1234b, 2f)
+#endif
+
+	movl %eax,  (v)
+	movl %edx, 4(v)
+
+#ifdef CONFIG_PAX_REFCOUNT
+2:
+#endif
+
+RET_ENDP
+BEGIN(add_return_unchecked)
+	addl  (v), %eax
+	adcl 4(v), %edx
 	movl %eax,  (v)
 	movl %edx, 4(v)
 RET_ENDP
@@ -86,6 +126,20 @@ RET_ENDP
 BEGIN(sub)
 	subl %eax,  (v)
 	sbbl %edx, 4(v)
+
+#ifdef CONFIG_PAX_REFCOUNT
+	jno 0f
+	addl %eax,  (v)
+	adcl %edx, 4(v)
+	int $4
+0:
+	_ASM_EXTABLE(0b, 0b)
+#endif
+
+RET_ENDP
+BEGIN(sub_unchecked)
+	subl %eax,  (v)
+	sbbl %edx, 4(v)
 RET_ENDP
 #undef v
 
@@ -96,6 +150,27 @@ BEGIN(sub_return)
 	sbbl $0, %edx
 	addl  (v), %eax
 	adcl 4(v), %edx
+
+#ifdef CONFIG_PAX_REFCOUNT
+	into
+1234:
+	_ASM_EXTABLE(1234b, 2f)
+#endif
+
+	movl %eax,  (v)
+	movl %edx, 4(v)
+
+#ifdef CONFIG_PAX_REFCOUNT
+2:
+#endif
+
+RET_ENDP
+BEGIN(sub_return_unchecked)
+	negl %edx
+	negl %eax
+	sbbl $0, %edx
+	addl  (v), %eax
+	adcl 4(v), %edx
 	movl %eax,  (v)
 	movl %edx, 4(v)
 RET_ENDP
@@ -105,6 +180,20 @@ RET_ENDP
 BEGIN(inc)
 	addl $1,  (v)
 	adcl $0, 4(v)
+
+#ifdef CONFIG_PAX_REFCOUNT
+	jno 0f
+	subl $1,  (v)
+	sbbl $0, 4(v)
+	int $4
+0:
+	_ASM_EXTABLE(0b, 0b)
+#endif
+
+RET_ENDP
+BEGIN(inc_unchecked)
+	addl $1,  (v)
+	adcl $0, 4(v)
 RET_ENDP
 #undef v
 
@@ -114,6 +203,26 @@ BEGIN(inc_return)
 	movl 4(v), %edx
 	addl $1, %eax
 	adcl $0, %edx
+
+#ifdef CONFIG_PAX_REFCOUNT
+	into
+1234:
+	_ASM_EXTABLE(1234b, 2f)
+#endif
+
+	movl %eax,  (v)
+	movl %edx, 4(v)
+
+#ifdef CONFIG_PAX_REFCOUNT
+2:
+#endif
+
+RET_ENDP
+BEGIN(inc_return_unchecked)
+	movl  (v), %eax
+	movl 4(v), %edx
+	addl $1, %eax
+	adcl $0, %edx
 	movl %eax,  (v)
 	movl %edx, 4(v)
 RET_ENDP
@@ -123,6 +232,20 @@ RET_ENDP
 BEGIN(dec)
 	subl $1,  (v)
 	sbbl $0, 4(v)
+
+#ifdef CONFIG_PAX_REFCOUNT
+	jno 0f
+	addl $1,  (v)
+	adcl $0, 4(v)
+	int $4
+0:
+	_ASM_EXTABLE(0b, 0b)
+#endif
+
+RET_ENDP
+BEGIN(dec_unchecked)
+	subl $1,  (v)
+	sbbl $0, 4(v)
 RET_ENDP
 #undef v
 
@@ -132,6 +255,26 @@ BEGIN(dec_return)
 	movl 4(v), %edx
 	subl $1, %eax
 	sbbl $0, %edx
+
+#ifdef CONFIG_PAX_REFCOUNT
+	into
+1234:
+	_ASM_EXTABLE(1234b, 2f)
+#endif
+
+	movl %eax,  (v)
+	movl %edx, 4(v)
+
+#ifdef CONFIG_PAX_REFCOUNT
+2:
+#endif
+
+RET_ENDP
+BEGIN(dec_return_unchecked)
+	movl  (v), %eax
+	movl 4(v), %edx
+	subl $1, %eax
+	sbbl $0, %edx
 	movl %eax,  (v)
 	movl %edx, 4(v)
 RET_ENDP
@@ -143,6 +286,13 @@ BEGIN(add_unless)
 	adcl %edx, %edi
 	addl  (v), %eax
 	adcl 4(v), %edx
+
+#ifdef CONFIG_PAX_REFCOUNT
+	into
+1234:
+	_ASM_EXTABLE(1234b, 2f)
+#endif
+
 	cmpl %eax, %ecx
 	je 3f
 1:
@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
 1:
 	addl $1, %eax
 	adcl $0, %edx
+
+#ifdef CONFIG_PAX_REFCOUNT
+	into
+1234:
+	_ASM_EXTABLE(1234b, 2f)
+#endif
+
 	movl %eax,  (v)
 	movl %edx, 4(v)
 	movl $1, %eax
@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
 	movl 4(v), %edx
 	subl $1, %eax
 	sbbl $0, %edx
+
+#ifdef CONFIG_PAX_REFCOUNT
+	into
+1234:
+	_ASM_EXTABLE(1234b, 1f)
+#endif
+
 	js 1f
 	movl %eax,  (v)
 	movl %edx, 4(v)
diff -ruNp linux-3.13.11/arch/x86/lib/atomic64_cx8_32.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/lib/atomic64_cx8_32.S
--- linux-3.13.11/arch/x86/lib/atomic64_cx8_32.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/lib/atomic64_cx8_32.S	2014-07-09
12:00:15.000000000 +0200
@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
 	CFI_STARTPROC
 
 	read64 %ecx
+	pax_force_retaddr
 	ret
 	CFI_ENDPROC
 ENDPROC(atomic64_read_cx8)
 
+ENTRY(atomic64_read_unchecked_cx8)
+	CFI_STARTPROC
+
+	read64 %ecx
+	pax_force_retaddr
+	ret
+	CFI_ENDPROC
+ENDPROC(atomic64_read_unchecked_cx8)
+
 ENTRY(atomic64_set_cx8)
 	CFI_STARTPROC
 
@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
 	cmpxchg8b (%esi)
 	jne 1b
 
+	pax_force_retaddr
 	ret
 	CFI_ENDPROC
 ENDPROC(atomic64_set_cx8)
 
+ENTRY(atomic64_set_unchecked_cx8)
+	CFI_STARTPROC
+
+1:
+/* we don't need LOCK_PREFIX since aligned 64-bit writes
+ * are atomic on 586 and newer */
+	cmpxchg8b (%esi)
+	jne 1b
+
+	pax_force_retaddr
+	ret
+	CFI_ENDPROC
+ENDPROC(atomic64_set_unchecked_cx8)
+
 ENTRY(atomic64_xchg_cx8)
 	CFI_STARTPROC
 
@@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
 	cmpxchg8b (%esi)
 	jne 1b
 
+	pax_force_retaddr
 	ret
 	CFI_ENDPROC
 ENDPROC(atomic64_xchg_cx8)
 
-.macro addsub_return func ins insc
-ENTRY(atomic64_\func\()_return_cx8)
+.macro addsub_return func ins insc unchecked=""
+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
 	CFI_STARTPROC
 	SAVE ebp
 	SAVE ebx
@@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
 	movl %edx, %ecx
 	\ins\()l %esi, %ebx
 	\insc\()l %edi, %ecx
+
+.ifb \unchecked
+#ifdef CONFIG_PAX_REFCOUNT
+	into
+2:
+	_ASM_EXTABLE(2b, 3f)
+#endif
+.endif
+
 	LOCK_PREFIX
 	cmpxchg8b (%ebp)
 	jne 1b
-
-10:
 	movl %ebx, %eax
 	movl %ecx, %edx
+
+.ifb \unchecked
+#ifdef CONFIG_PAX_REFCOUNT
+3:
+#endif
+.endif
+
 	RESTORE edi
 	RESTORE esi
 	RESTORE ebx
 	RESTORE ebp
+	pax_force_retaddr
 	ret
 	CFI_ENDPROC
-ENDPROC(atomic64_\func\()_return_cx8)
+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
 .endm
 
 addsub_return add add adc
 addsub_return sub sub sbb
+addsub_return add add adc _unchecked
+addsub_return sub sub sbb _unchecked
 
-.macro incdec_return func ins insc
-ENTRY(atomic64_\func\()_return_cx8)
+.macro incdec_return func ins insc unchecked=""
+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
 	CFI_STARTPROC
 	SAVE ebx
 
@@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
 	movl %edx, %ecx
 	\ins\()l $1, %ebx
 	\insc\()l $0, %ecx
+
+.ifb \unchecked
+#ifdef CONFIG_PAX_REFCOUNT
+	into
+2:
+	_ASM_EXTABLE(2b, 3f)
+#endif
+.endif
+
 	LOCK_PREFIX
 	cmpxchg8b (%esi)
 	jne 1b
 
-10:
 	movl %ebx, %eax
 	movl %ecx, %edx
+
+.ifb \unchecked
+#ifdef CONFIG_PAX_REFCOUNT
+3:
+#endif
+.endif
+
 	RESTORE ebx
+	pax_force_retaddr
 	ret
 	CFI_ENDPROC
-ENDPROC(atomic64_\func\()_return_cx8)
+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
 .endm
 
 incdec_return inc add adc
 incdec_return dec sub sbb
+incdec_return inc add adc _unchecked
+incdec_return dec sub sbb _unchecked
 
 ENTRY(atomic64_dec_if_positive_cx8)
 	CFI_STARTPROC
@@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
 	movl %edx, %ecx
 	subl $1, %ebx
 	sbb $0, %ecx
+
+#ifdef CONFIG_PAX_REFCOUNT
+	into
+1234:
+	_ASM_EXTABLE(1234b, 2f)
+#endif
+
 	js 2f
 	LOCK_PREFIX
 	cmpxchg8b (%esi)
@@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
 	movl %ebx, %eax
 	movl %ecx, %edx
 	RESTORE ebx
+	pax_force_retaddr
 	ret
 	CFI_ENDPROC
 ENDPROC(atomic64_dec_if_positive_cx8)
@@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
 	movl %edx, %ecx
 	addl %ebp, %ebx
 	adcl %edi, %ecx
+
+#ifdef CONFIG_PAX_REFCOUNT
+	into
+1234:
+	_ASM_EXTABLE(1234b, 3f)
+#endif
+
 	LOCK_PREFIX
 	cmpxchg8b (%esi)
 	jne 1b
@@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
 	CFI_ADJUST_CFA_OFFSET -8
 	RESTORE ebx
 	RESTORE ebp
+	pax_force_retaddr
 	ret
 4:
 	cmpl %edx, 4(%esp)
@@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
 	xorl %ecx, %ecx
 	addl $1, %ebx
 	adcl %edx, %ecx
+
+#ifdef CONFIG_PAX_REFCOUNT
+	into
+1234:
+	_ASM_EXTABLE(1234b, 3f)
+#endif
+
 	LOCK_PREFIX
 	cmpxchg8b (%esi)
 	jne 1b
@@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
 	movl $1, %eax
 3:
 	RESTORE ebx
+	pax_force_retaddr
 	ret
 	CFI_ENDPROC
 ENDPROC(atomic64_inc_not_zero_cx8)
diff -ruNp linux-3.13.11/arch/x86/lib/checksum_32.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/lib/checksum_32.S
--- linux-3.13.11/arch/x86/lib/checksum_32.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/lib/checksum_32.S	2014-07-09
12:00:15.000000000 +0200
@@ -29,7 +29,8 @@
 #include <asm/dwarf2.h>
 #include <asm/errno.h>
 #include <asm/asm.h>
-				
+#include <asm/segment.h>
+
 /*
  * computes a partial checksum, e.g. for TCP/UDP fragments
  */
@@ -293,9 +294,24 @@ unsigned int csum_partial_copy_generic (
 
 #define ARGBASE 16		
 #define FP		12
-		
-ENTRY(csum_partial_copy_generic)
+
+ENTRY(csum_partial_copy_generic_to_user)
 	CFI_STARTPROC
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+	pushl_cfi %gs
+	popl_cfi %es
+	jmp csum_partial_copy_generic
+#endif
+
+ENTRY(csum_partial_copy_generic_from_user)
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+	pushl_cfi %gs
+	popl_cfi %ds
+#endif
+
+ENTRY(csum_partial_copy_generic)
 	subl  $4,%esp	
 	CFI_ADJUST_CFA_OFFSET 4
 	pushl_cfi %edi
@@ -317,7 +333,7 @@ ENTRY(csum_partial_copy_generic)
 	jmp 4f
 SRC(1:	movw (%esi), %bx	)
 	addl $2, %esi
-DST(	movw %bx, (%edi)	)
+DST(	movw %bx, %es:(%edi)	)
 	addl $2, %edi
 	addw %bx, %ax	
 	adcl $0, %eax
@@ -329,30 +345,30 @@ DST(	movw %bx, (%edi)	)
 SRC(1:	movl (%esi), %ebx	)
 SRC(	movl 4(%esi), %edx	)
 	adcl %ebx, %eax
-DST(	movl %ebx, (%edi)	)
+DST(	movl %ebx, %es:(%edi)	)
 	adcl %edx, %eax
-DST(	movl %edx, 4(%edi)	)
+DST(	movl %edx, %es:4(%edi)	)
 
 SRC(	movl 8(%esi), %ebx	)
 SRC(	movl 12(%esi), %edx	)
 	adcl %ebx, %eax
-DST(	movl %ebx, 8(%edi)	)
+DST(	movl %ebx, %es:8(%edi)	)
 	adcl %edx, %eax
-DST(	movl %edx, 12(%edi)	)
+DST(	movl %edx, %es:12(%edi)	)
 
 SRC(	movl 16(%esi), %ebx 	)
 SRC(	movl 20(%esi), %edx	)
 	adcl %ebx, %eax
-DST(	movl %ebx, 16(%edi)	)
+DST(	movl %ebx, %es:16(%edi)	)
 	adcl %edx, %eax
-DST(	movl %edx, 20(%edi)	)
+DST(	movl %edx, %es:20(%edi)	)
 
 SRC(	movl 24(%esi), %ebx	)
 SRC(	movl 28(%esi), %edx	)
 	adcl %ebx, %eax
-DST(	movl %ebx, 24(%edi)	)
+DST(	movl %ebx, %es:24(%edi)	)
 	adcl %edx, %eax
-DST(	movl %edx, 28(%edi)	)
+DST(	movl %edx, %es:28(%edi)	)
 
 	lea 32(%esi), %esi
 	lea 32(%edi), %edi
@@ -366,7 +382,7 @@ DST(	movl %edx, 28(%edi)	)
 	shrl $2, %edx			# This clears CF
 SRC(3:	movl (%esi), %ebx	)
 	adcl %ebx, %eax
-DST(	movl %ebx, (%edi)	)
+DST(	movl %ebx, %es:(%edi)	)
 	lea 4(%esi), %esi
 	lea 4(%edi), %edi
 	dec %edx
@@ -378,12 +394,12 @@ DST(	movl %ebx, (%edi)	)
 	jb 5f
 SRC(	movw (%esi), %cx	)
 	leal 2(%esi), %esi
-DST(	movw %cx, (%edi)	)
+DST(	movw %cx, %es:(%edi)	)
 	leal 2(%edi), %edi
 	je 6f
 	shll $16,%ecx
 SRC(5:	movb (%esi), %cl	)
-DST(	movb %cl, (%edi)	)
+DST(	movb %cl, %es:(%edi)	)
 6:	addl %ecx, %eax
 	adcl $0, %eax
 7:
@@ -394,7 +410,7 @@ DST(	movb %cl, (%edi)	)
 
 6001:
 	movl ARGBASE+20(%esp), %ebx	# src_err_ptr
-	movl $-EFAULT, (%ebx)
+	movl $-EFAULT, %ss:(%ebx)
 
 	# zero the complete destination - computing the rest
 	# is too much work 
@@ -407,11 +423,15 @@ DST(	movb %cl, (%edi)	)
 
 6002:
 	movl ARGBASE+24(%esp), %ebx	# dst_err_ptr
-	movl $-EFAULT,(%ebx)
+	movl $-EFAULT,%ss:(%ebx)
 	jmp 5000b
 
 .previous
 
+	pushl_cfi %ss
+	popl_cfi %ds
+	pushl_cfi %ss
+	popl_cfi %es
 	popl_cfi %ebx
 	CFI_RESTORE ebx
 	popl_cfi %esi
@@ -421,26 +441,43 @@ DST(	movb %cl, (%edi)	)
 	popl_cfi %ecx			# equivalent to addl $4,%esp
 	ret	
 	CFI_ENDPROC
-ENDPROC(csum_partial_copy_generic)
+ENDPROC(csum_partial_copy_generic_to_user)
 
 #else
 
 /* Version for PentiumII/PPro */
 
 #define ROUND1(x) \
+	nop; nop; nop;				\
 	SRC(movl x(%esi), %ebx	)	;	\
 	addl %ebx, %eax			;	\
-	DST(movl %ebx, x(%edi)	)	; 
+	DST(movl %ebx, %es:x(%edi))	;
 
 #define ROUND(x) \
+	nop; nop; nop;				\
 	SRC(movl x(%esi), %ebx	)	;	\
 	adcl %ebx, %eax			;	\
-	DST(movl %ebx, x(%edi)	)	;
+	DST(movl %ebx, %es:x(%edi))	;
 
 #define ARGBASE 12
-		
-ENTRY(csum_partial_copy_generic)
+
+ENTRY(csum_partial_copy_generic_to_user)
 	CFI_STARTPROC
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+	pushl_cfi %gs
+	popl_cfi %es
+	jmp csum_partial_copy_generic
+#endif
+
+ENTRY(csum_partial_copy_generic_from_user)
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+	pushl_cfi %gs
+	popl_cfi %ds
+#endif
+
+ENTRY(csum_partial_copy_generic)
 	pushl_cfi %ebx
 	CFI_REL_OFFSET ebx, 0
 	pushl_cfi %edi
@@ -461,7 +498,7 @@ ENTRY(csum_partial_copy_generic)
 	subl %ebx, %edi  
 	lea  -1(%esi),%edx
 	andl $-32,%edx
-	lea 3f(%ebx,%ebx), %ebx
+	lea 3f(%ebx,%ebx,2), %ebx
 	testl %esi, %esi 
 	jmp *%ebx
 1:	addl $64,%esi
@@ -482,19 +519,19 @@ ENTRY(csum_partial_copy_generic)
 	jb 5f
 SRC(	movw (%esi), %dx         )
 	leal 2(%esi), %esi
-DST(	movw %dx, (%edi)         )
+DST(	movw %dx, %es:(%edi)     )
 	leal 2(%edi), %edi
 	je 6f
 	shll $16,%edx
 5:
 SRC(	movb (%esi), %dl         )
-DST(	movb %dl, (%edi)         )
+DST(	movb %dl, %es:(%edi)     )
 6:	addl %edx, %eax
 	adcl $0, %eax
 7:
 .section .fixup, "ax"
 6001:	movl	ARGBASE+20(%esp), %ebx	# src_err_ptr	
-	movl $-EFAULT, (%ebx)
+	movl $-EFAULT, %ss:(%ebx)
 	# zero the complete destination (computing the rest is too much work)
 	movl ARGBASE+8(%esp),%edi	# dst
 	movl ARGBASE+12(%esp),%ecx	# len
@@ -502,10 +539,17 @@ DST(	movb %dl, (%edi)         )
 	rep; stosb
 	jmp 7b
 6002:	movl ARGBASE+24(%esp), %ebx	# dst_err_ptr
-	movl $-EFAULT, (%ebx)
+	movl $-EFAULT, %ss:(%ebx)
 	jmp  7b			
 .previous				
 
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+	pushl_cfi %ss
+	popl_cfi %ds
+	pushl_cfi %ss
+	popl_cfi %es
+#endif
+
 	popl_cfi %esi
 	CFI_RESTORE esi
 	popl_cfi %edi
@@ -514,7 +558,7 @@ DST(	movb %dl, (%edi)         )
 	CFI_RESTORE ebx
 	ret
 	CFI_ENDPROC
-ENDPROC(csum_partial_copy_generic)
+ENDPROC(csum_partial_copy_generic_to_user)
 				
 #undef ROUND
 #undef ROUND1		
diff -ruNp linux-3.13.11/arch/x86/lib/clear_page_64.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/lib/clear_page_64.S
--- linux-3.13.11/arch/x86/lib/clear_page_64.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/lib/clear_page_64.S	2014-07-09
12:00:15.000000000 +0200
@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
 	movl $4096/8,%ecx
 	xorl %eax,%eax
 	rep stosq
+	pax_force_retaddr
 	ret
 	CFI_ENDPROC
 ENDPROC(clear_page_c)
@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
 	movl $4096,%ecx
 	xorl %eax,%eax
 	rep stosb
+	pax_force_retaddr
 	ret
 	CFI_ENDPROC
 ENDPROC(clear_page_c_e)
@@ -43,6 +45,7 @@ ENTRY(clear_page)
 	leaq	64(%rdi),%rdi
 	jnz	.Lloop
 	nop
+	pax_force_retaddr
 	ret
 	CFI_ENDPROC
 .Lclear_page_end:
@@ -58,7 +61,7 @@ ENDPROC(clear_page)
 
 #include <asm/cpufeature.h>
 
-	.section .altinstr_replacement,"ax"
+	.section .altinstr_replacement,"a"
 1:	.byte 0xeb					/* jmp <disp8> */
 	.byte (clear_page_c - clear_page) - (2f - 1b)	/* offset */
 2:	.byte 0xeb					/* jmp <disp8> */
diff -ruNp linux-3.13.11/arch/x86/lib/cmpxchg16b_emu.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/lib/cmpxchg16b_emu.S
--- linux-3.13.11/arch/x86/lib/cmpxchg16b_emu.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/lib/cmpxchg16b_emu.S	2014-07-09
12:00:15.000000000 +0200
@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
 
 	popf
 	mov $1, %al
+	pax_force_retaddr
 	ret
 
  not_same:
 	popf
 	xor %al,%al
+	pax_force_retaddr
 	ret
 
 CFI_ENDPROC
diff -ruNp linux-3.13.11/arch/x86/lib/copy_page_64.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/lib/copy_page_64.S
--- linux-3.13.11/arch/x86/lib/copy_page_64.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/lib/copy_page_64.S	2014-07-09
12:00:15.000000000 +0200
@@ -9,6 +9,7 @@ copy_page_rep:
 	CFI_STARTPROC
 	movl	$4096/8, %ecx
 	rep	movsq
+	pax_force_retaddr
 	ret
 	CFI_ENDPROC
 ENDPROC(copy_page_rep)
@@ -24,8 +25,8 @@ ENTRY(copy_page)
 	CFI_ADJUST_CFA_OFFSET 2*8
 	movq	%rbx,	(%rsp)
 	CFI_REL_OFFSET rbx, 0
-	movq	%r12,	1*8(%rsp)
-	CFI_REL_OFFSET r12, 1*8
+	movq	%r13,	1*8(%rsp)
+	CFI_REL_OFFSET r13, 1*8
 
 	movl	$(4096/64)-5,	%ecx
 	.p2align 4
@@ -38,7 +39,7 @@ ENTRY(copy_page)
 	movq	0x8*4(%rsi), %r9
 	movq	0x8*5(%rsi), %r10
 	movq	0x8*6(%rsi), %r11
-	movq	0x8*7(%rsi), %r12
+	movq	0x8*7(%rsi), %r13
 
 	prefetcht0 5*64(%rsi)
 
@@ -49,7 +50,7 @@ ENTRY(copy_page)
 	movq	%r9,  0x8*4(%rdi)
 	movq	%r10, 0x8*5(%rdi)
 	movq	%r11, 0x8*6(%rdi)
-	movq	%r12, 0x8*7(%rdi)
+	movq	%r13, 0x8*7(%rdi)
 
 	leaq	64 (%rsi), %rsi
 	leaq	64 (%rdi), %rdi
@@ -68,7 +69,7 @@ ENTRY(copy_page)
 	movq	0x8*4(%rsi), %r9
 	movq	0x8*5(%rsi), %r10
 	movq	0x8*6(%rsi), %r11
-	movq	0x8*7(%rsi), %r12
+	movq	0x8*7(%rsi), %r13
 
 	movq	%rax, 0x8*0(%rdi)
 	movq	%rbx, 0x8*1(%rdi)
@@ -77,7 +78,7 @@ ENTRY(copy_page)
 	movq	%r9,  0x8*4(%rdi)
 	movq	%r10, 0x8*5(%rdi)
 	movq	%r11, 0x8*6(%rdi)
-	movq	%r12, 0x8*7(%rdi)
+	movq	%r13, 0x8*7(%rdi)
 
 	leaq	64(%rdi), %rdi
 	leaq	64(%rsi), %rsi
@@ -85,10 +86,11 @@ ENTRY(copy_page)
 
 	movq	(%rsp), %rbx
 	CFI_RESTORE rbx
-	movq	1*8(%rsp), %r12
-	CFI_RESTORE r12
+	movq	1*8(%rsp), %r13
+	CFI_RESTORE r13
 	addq	$2*8, %rsp
 	CFI_ADJUST_CFA_OFFSET -2*8
+	pax_force_retaddr
 	ret
 .Lcopy_page_end:
 	CFI_ENDPROC
@@ -99,7 +101,7 @@ ENDPROC(copy_page)
 
 #include <asm/cpufeature.h>
 
-	.section .altinstr_replacement,"ax"
+	.section .altinstr_replacement,"a"
 1:	.byte 0xeb					/* jmp <disp8> */
 	.byte (copy_page_rep - copy_page) - (2f - 1b)	/* offset */
 2:
diff -ruNp linux-3.13.11/arch/x86/lib/copy_user_64.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/lib/copy_user_64.S
--- linux-3.13.11/arch/x86/lib/copy_user_64.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/lib/copy_user_64.S	2014-07-09
12:00:15.000000000 +0200
@@ -18,31 +18,7 @@
 #include <asm/alternative-asm.h>
 #include <asm/asm.h>
 #include <asm/smap.h>
-
-/*
- * By placing feature2 after feature1 in altinstructions section, we logically
- * implement:
- * If CPU has feature2, jmp to alt2 is used
- * else if CPU has feature1, jmp to alt1 is used
- * else jmp to orig is used.
- */
-	.macro ALTERNATIVE_JUMP feature1,feature2,orig,alt1,alt2
-0:
-	.byte 0xe9	/* 32bit jump */
-	.long \orig-1f	/* by default jump to orig */
-1:
-	.section .altinstr_replacement,"ax"
-2:	.byte 0xe9			/* near jump with 32bit immediate */
-	.long \alt1-1b /* offset */   /* or alternatively to alt1 */
-3:	.byte 0xe9			/* near jump with 32bit immediate */
-	.long \alt2-1b /* offset */   /* or alternatively to alt2 */
-	.previous
-
-	.section .altinstructions,"a"
-	altinstruction_entry 0b,2b,\feature1,5,5
-	altinstruction_entry 0b,3b,\feature2,5,5
-	.previous
-	.endm
+#include <asm/pgtable.h>
 
 	.macro ALIGN_DESTINATION
 #ifdef FIX_ALIGNMENT
@@ -70,52 +46,6 @@
 #endif
 	.endm
 
-/* Standard copy_to_user with segment limit checking */
-ENTRY(_copy_to_user)
-	CFI_STARTPROC
-	GET_THREAD_INFO(%rax)
-	movq %rdi,%rcx
-	addq %rdx,%rcx
-	jc bad_to_user
-	cmpq TI_addr_limit(%rax),%rcx
-	ja bad_to_user
-	ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS,	\
-		copy_user_generic_unrolled,copy_user_generic_string,	\
-		copy_user_enhanced_fast_string
-	CFI_ENDPROC
-ENDPROC(_copy_to_user)
-
-/* Standard copy_from_user with segment limit checking */
-ENTRY(_copy_from_user)
-	CFI_STARTPROC
-	GET_THREAD_INFO(%rax)
-	movq %rsi,%rcx
-	addq %rdx,%rcx
-	jc bad_from_user
-	cmpq TI_addr_limit(%rax),%rcx
-	ja bad_from_user
-	ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS,	\
-		copy_user_generic_unrolled,copy_user_generic_string,	\
-		copy_user_enhanced_fast_string
-	CFI_ENDPROC
-ENDPROC(_copy_from_user)
-
-	.section .fixup,"ax"
-	/* must zero dest */
-ENTRY(bad_from_user)
-bad_from_user:
-	CFI_STARTPROC
-	movl %edx,%ecx
-	xorl %eax,%eax
-	rep
-	stosb
-bad_to_user:
-	movl %edx,%eax
-	ret
-	CFI_ENDPROC
-ENDPROC(bad_from_user)
-	.previous
-
 /*
  * copy_user_generic_unrolled - memory copy with exception handling.
  * This version is for CPUs like P4 that don't have efficient micro
@@ -131,6 +61,7 @@ ENDPROC(bad_from_user)
  */
 ENTRY(copy_user_generic_unrolled)
 	CFI_STARTPROC
+	ASM_PAX_OPEN_USERLAND
 	ASM_STAC
 	cmpl $8,%edx
 	jb 20f		/* less then 8 bytes, go to byte copy loop */
@@ -180,6 +111,8 @@ ENTRY(copy_user_generic_unrolled)
 	jnz 21b
 23:	xor %eax,%eax
 	ASM_CLAC
+	ASM_PAX_CLOSE_USERLAND
+	pax_force_retaddr
 	ret
 
 	.section .fixup,"ax"
@@ -235,6 +168,7 @@ ENDPROC(copy_user_generic_unrolled)
  */
 ENTRY(copy_user_generic_string)
 	CFI_STARTPROC
+	ASM_PAX_OPEN_USERLAND
 	ASM_STAC
 	andl %edx,%edx
 	jz 4f
@@ -251,6 +185,8 @@ ENTRY(copy_user_generic_string)
 	movsb
 4:	xorl %eax,%eax
 	ASM_CLAC
+	ASM_PAX_CLOSE_USERLAND
+	pax_force_retaddr
 	ret
 
 	.section .fixup,"ax"
@@ -278,6 +214,7 @@ ENDPROC(copy_user_generic_string)
  */
 ENTRY(copy_user_enhanced_fast_string)
 	CFI_STARTPROC
+	ASM_PAX_OPEN_USERLAND
 	ASM_STAC
 	andl %edx,%edx
 	jz 2f
@@ -286,6 +223,8 @@ ENTRY(copy_user_enhanced_fast_string)
 	movsb
 2:	xorl %eax,%eax
 	ASM_CLAC
+	ASM_PAX_CLOSE_USERLAND
+	pax_force_retaddr
 	ret
 
 	.section .fixup,"ax"
diff -ruNp linux-3.13.11/arch/x86/lib/copy_user_nocache_64.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/lib/copy_user_nocache_64.S
--- linux-3.13.11/arch/x86/lib/copy_user_nocache_64.S	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/lib/copy_user_nocache_64.S	2014-07-09
12:00:15.000000000 +0200
@@ -8,6 +8,7 @@
 
 #include <linux/linkage.h>
 #include <asm/dwarf2.h>
+#include <asm/alternative-asm.h>
 
 #define FIX_ALIGNMENT 1
 
@@ -16,6 +17,7 @@
 #include <asm/thread_info.h>
 #include <asm/asm.h>
 #include <asm/smap.h>
+#include <asm/pgtable.h>
 
 	.macro ALIGN_DESTINATION
 #ifdef FIX_ALIGNMENT
@@ -49,6 +51,16 @@
  */
 ENTRY(__copy_user_nocache)
 	CFI_STARTPROC
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+	mov pax_user_shadow_base,%rcx
+	cmp %rcx,%rsi
+	jae 1f
+	add %rcx,%rsi
+1:
+#endif
+
+	ASM_PAX_OPEN_USERLAND
 	ASM_STAC
 	cmpl $8,%edx
 	jb 20f		/* less then 8 bytes, go to byte copy loop */
@@ -98,7 +110,9 @@ ENTRY(__copy_user_nocache)
 	jnz 21b
 23:	xorl %eax,%eax
 	ASM_CLAC
+	ASM_PAX_CLOSE_USERLAND
 	sfence
+	pax_force_retaddr
 	ret
 
 	.section .fixup,"ax"
diff -ruNp linux-3.13.11/arch/x86/lib/csum-copy_64.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/lib/csum-copy_64.S
--- linux-3.13.11/arch/x86/lib/csum-copy_64.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/lib/csum-copy_64.S	2014-07-09
12:00:15.000000000 +0200
@@ -9,6 +9,7 @@
 #include <asm/dwarf2.h>
 #include <asm/errno.h>
 #include <asm/asm.h>
+#include <asm/alternative-asm.h>
 
 /*
  * Checksum copy with exception handling.
@@ -56,8 +57,8 @@ ENTRY(csum_partial_copy_generic)
 	CFI_ADJUST_CFA_OFFSET 7*8
 	movq  %rbx, 2*8(%rsp)
 	CFI_REL_OFFSET rbx, 2*8
-	movq  %r12, 3*8(%rsp)
-	CFI_REL_OFFSET r12, 3*8
+	movq  %r15, 3*8(%rsp)
+	CFI_REL_OFFSET r15, 3*8
 	movq  %r14, 4*8(%rsp)
 	CFI_REL_OFFSET r14, 4*8
 	movq  %r13, 5*8(%rsp)
@@ -72,16 +73,16 @@ ENTRY(csum_partial_copy_generic)
 	movl  %edx, %ecx
 
 	xorl  %r9d, %r9d
-	movq  %rcx, %r12
+	movq  %rcx, %r15
 
-	shrq  $6, %r12
+	shrq  $6, %r15
 	jz	.Lhandle_tail       /* < 64 */
 
 	clc
 
 	/* main loop. clear in 64 byte blocks */
 	/* r9: zero, r8: temp2, rbx: temp1, rax: sum, rcx: saved length */
-	/* r11:	temp3, rdx: temp4, r12 loopcnt */
+	/* r11:	temp3, rdx: temp4, r15 loopcnt */
 	/* r10:	temp5, rbp: temp6, r14 temp7, r13 temp8 */
 	.p2align 4
 .Lloop:
@@ -115,7 +116,7 @@ ENTRY(csum_partial_copy_generic)
 	adcq  %r14, %rax
 	adcq  %r13, %rax
 
-	decl %r12d
+	decl %r15d
 
 	dest
 	movq %rbx, (%rsi)
@@ -210,8 +211,8 @@ ENTRY(csum_partial_copy_generic)
 .Lende:
 	movq 2*8(%rsp), %rbx
 	CFI_RESTORE rbx
-	movq 3*8(%rsp), %r12
-	CFI_RESTORE r12
+	movq 3*8(%rsp), %r15
+	CFI_RESTORE r15
 	movq 4*8(%rsp), %r14
 	CFI_RESTORE r14
 	movq 5*8(%rsp), %r13
@@ -220,6 +221,7 @@ ENTRY(csum_partial_copy_generic)
 	CFI_RESTORE rbp
 	addq $7*8, %rsp
 	CFI_ADJUST_CFA_OFFSET -7*8
+	pax_force_retaddr
 	ret
 	CFI_RESTORE_STATE
 
diff -ruNp linux-3.13.11/arch/x86/lib/csum-wrappers_64.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/lib/csum-wrappers_64.c
--- linux-3.13.11/arch/x86/lib/csum-wrappers_64.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/lib/csum-wrappers_64.c	2014-07-09
12:00:15.000000000 +0200
@@ -53,10 +53,12 @@ csum_partial_copy_from_user(const void _
 			len -= 2;
 		}
 	}
+	pax_open_userland();
 	stac();
-	isum = csum_partial_copy_generic((__force const void *)src,
+	isum = csum_partial_copy_generic((const void __force_kernel *)____m(src),
 				dst, len, isum, errp, NULL);
 	clac();
+	pax_close_userland();
 	if (unlikely(*errp))
 		goto out_err;
 
@@ -110,10 +112,12 @@ csum_partial_copy_to_user(const void *sr
 	}
 
 	*errp = 0;
+	pax_open_userland();
 	stac();
-	ret = csum_partial_copy_generic(src, (void __force *)dst,
+	ret = csum_partial_copy_generic(src, (void __force_kernel *)____m(dst),
 					len, isum, NULL, errp);
 	clac();
+	pax_close_userland();
 	return ret;
 }
 EXPORT_SYMBOL(csum_partial_copy_to_user);
diff -ruNp linux-3.13.11/arch/x86/lib/getuser.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/lib/getuser.S
--- linux-3.13.11/arch/x86/lib/getuser.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/lib/getuser.S	2014-07-09
12:00:15.000000000 +0200
@@ -33,17 +33,40 @@
 #include <asm/thread_info.h>
 #include <asm/asm.h>
 #include <asm/smap.h>
+#include <asm/segment.h>
+#include <asm/pgtable.h>
+#include <asm/alternative-asm.h>
+
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
+#define __copyuser_seg gs;
+#else
+#define __copyuser_seg
+#endif
 
 	.text
 ENTRY(__get_user_1)
 	CFI_STARTPROC
+
+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
 	GET_THREAD_INFO(%_ASM_DX)
 	cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
 	jae bad_get_user
 	ASM_STAC
-1:	movzbl (%_ASM_AX),%edx
+
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+	mov pax_user_shadow_base,%_ASM_DX
+	cmp %_ASM_DX,%_ASM_AX
+	jae 1234f
+	add %_ASM_DX,%_ASM_AX
+1234:
+#endif
+
+#endif
+
+1:	__copyuser_seg movzbl (%_ASM_AX),%edx
 	xor %eax,%eax
 	ASM_CLAC
+	pax_force_retaddr
 	ret
 	CFI_ENDPROC
 ENDPROC(__get_user_1)
@@ -51,14 +74,28 @@ ENDPROC(__get_user_1)
 ENTRY(__get_user_2)
 	CFI_STARTPROC
 	add $1,%_ASM_AX
+
+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
 	jc bad_get_user
 	GET_THREAD_INFO(%_ASM_DX)
 	cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
 	jae bad_get_user
 	ASM_STAC
-2:	movzwl -1(%_ASM_AX),%edx
+
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+	mov pax_user_shadow_base,%_ASM_DX
+	cmp %_ASM_DX,%_ASM_AX
+	jae 1234f
+	add %_ASM_DX,%_ASM_AX
+1234:
+#endif
+
+#endif
+
+2:	__copyuser_seg movzwl -1(%_ASM_AX),%edx
 	xor %eax,%eax
 	ASM_CLAC
+	pax_force_retaddr
 	ret
 	CFI_ENDPROC
 ENDPROC(__get_user_2)
@@ -66,14 +103,28 @@ ENDPROC(__get_user_2)
 ENTRY(__get_user_4)
 	CFI_STARTPROC
 	add $3,%_ASM_AX
+
+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
 	jc bad_get_user
 	GET_THREAD_INFO(%_ASM_DX)
 	cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
 	jae bad_get_user
 	ASM_STAC
-3:	movl -3(%_ASM_AX),%edx
+
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+	mov pax_user_shadow_base,%_ASM_DX
+	cmp %_ASM_DX,%_ASM_AX
+	jae 1234f
+	add %_ASM_DX,%_ASM_AX
+1234:
+#endif
+
+#endif
+
+3:	__copyuser_seg movl -3(%_ASM_AX),%edx
 	xor %eax,%eax
 	ASM_CLAC
+	pax_force_retaddr
 	ret
 	CFI_ENDPROC
 ENDPROC(__get_user_4)
@@ -86,10 +137,20 @@ ENTRY(__get_user_8)
 	GET_THREAD_INFO(%_ASM_DX)
 	cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
 	jae bad_get_user
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+	mov pax_user_shadow_base,%_ASM_DX
+	cmp %_ASM_DX,%_ASM_AX
+	jae 1234f
+	add %_ASM_DX,%_ASM_AX
+1234:
+#endif
+
 	ASM_STAC
 4:	movq -7(%_ASM_AX),%rdx
 	xor %eax,%eax
 	ASM_CLAC
+	pax_force_retaddr
 	ret
 #else
 	add $7,%_ASM_AX
@@ -98,10 +159,11 @@ ENTRY(__get_user_8)
 	cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
 	jae bad_get_user_8
 	ASM_STAC
-4:	movl -7(%_ASM_AX),%edx
-5:	movl -3(%_ASM_AX),%ecx
+4:	__copyuser_seg movl -7(%_ASM_AX),%edx
+5:	__copyuser_seg movl -3(%_ASM_AX),%ecx
 	xor %eax,%eax
 	ASM_CLAC
+	pax_force_retaddr
 	ret
 #endif
 	CFI_ENDPROC
@@ -113,6 +175,7 @@ bad_get_user:
 	xor %edx,%edx
 	mov $(-EFAULT),%_ASM_AX
 	ASM_CLAC
+	pax_force_retaddr
 	ret
 	CFI_ENDPROC
 END(bad_get_user)
@@ -124,6 +187,7 @@ bad_get_user_8:
 	xor %ecx,%ecx
 	mov $(-EFAULT),%_ASM_AX
 	ASM_CLAC
+	pax_force_retaddr
 	ret
 	CFI_ENDPROC
 END(bad_get_user_8)
diff -ruNp linux-3.13.11/arch/x86/lib/insn.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/lib/insn.c
--- linux-3.13.11/arch/x86/lib/insn.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/lib/insn.c	2014-07-09 12:00:15.000000000
+0200
@@ -20,8 +20,10 @@
 
 #ifdef __KERNEL__
 #include <linux/string.h>
+#include <asm/pgtable_types.h>
 #else
 #include <string.h>
+#define ktla_ktva(addr) addr
 #endif
 #include <asm/inat.h>
 #include <asm/insn.h>
@@ -53,8 +55,8 @@
 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
 {
 	memset(insn, 0, sizeof(*insn));
-	insn->kaddr = kaddr;
-	insn->next_byte = kaddr;
+	insn->kaddr = ktla_ktva(kaddr);
+	insn->next_byte = ktla_ktva(kaddr);
 	insn->x86_64 = x86_64 ? 1 : 0;
 	insn->opnd_bytes = 4;
 	if (x86_64)
diff -ruNp linux-3.13.11/arch/x86/lib/iomap_copy_64.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/lib/iomap_copy_64.S
--- linux-3.13.11/arch/x86/lib/iomap_copy_64.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/lib/iomap_copy_64.S	2014-07-09
12:00:15.000000000 +0200
@@ -17,6 +17,7 @@
 
 #include <linux/linkage.h>
 #include <asm/dwarf2.h>
+#include <asm/alternative-asm.h>
 
 /*
  * override generic version in lib/iomap_copy.c
@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
 	CFI_STARTPROC
 	movl %edx,%ecx
 	rep movsd
+	pax_force_retaddr
 	ret
 	CFI_ENDPROC
 ENDPROC(__iowrite32_copy)
diff -ruNp linux-3.13.11/arch/x86/lib/memcpy_64.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/lib/memcpy_64.S
--- linux-3.13.11/arch/x86/lib/memcpy_64.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/lib/memcpy_64.S	2014-07-09
12:00:15.000000000 +0200
@@ -24,7 +24,7 @@
  * This gets patched over the unrolled variant (below) via the
  * alternative instructions framework:
  */
-	.section .altinstr_replacement, "ax", @progbits
+	.section .altinstr_replacement, "a", @progbits
 .Lmemcpy_c:
 	movq %rdi, %rax
 	movq %rdx, %rcx
@@ -33,6 +33,7 @@
 	rep movsq
 	movl %edx, %ecx
 	rep movsb
+	pax_force_retaddr
 	ret
 .Lmemcpy_e:
 	.previous
@@ -44,11 +45,12 @@
  * This gets patched over the unrolled variant (below) via the
  * alternative instructions framework:
  */
-	.section .altinstr_replacement, "ax", @progbits
+	.section .altinstr_replacement, "a", @progbits
 .Lmemcpy_c_e:
 	movq %rdi, %rax
 	movq %rdx, %rcx
 	rep movsb
+	pax_force_retaddr
 	ret
 .Lmemcpy_e_e:
 	.previous
@@ -136,6 +138,7 @@ ENTRY(memcpy)
 	movq %r9,	1*8(%rdi)
 	movq %r10,	-2*8(%rdi, %rdx)
 	movq %r11,	-1*8(%rdi, %rdx)
+	pax_force_retaddr
 	retq
 	.p2align 4
 .Lless_16bytes:
@@ -148,6 +151,7 @@ ENTRY(memcpy)
 	movq -1*8(%rsi, %rdx),	%r9
 	movq %r8,	0*8(%rdi)
 	movq %r9,	-1*8(%rdi, %rdx)
+	pax_force_retaddr
 	retq
 	.p2align 4
 .Lless_8bytes:
@@ -161,6 +165,7 @@ ENTRY(memcpy)
 	movl -4(%rsi, %rdx), %r8d
 	movl %ecx, (%rdi)
 	movl %r8d, -4(%rdi, %rdx)
+	pax_force_retaddr
 	retq
 	.p2align 4
 .Lless_3bytes:
@@ -179,6 +184,7 @@ ENTRY(memcpy)
 	movb %cl, (%rdi)
 
 .Lend:
+	pax_force_retaddr
 	retq
 	CFI_ENDPROC
 ENDPROC(memcpy)
diff -ruNp linux-3.13.11/arch/x86/lib/memmove_64.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/lib/memmove_64.S
--- linux-3.13.11/arch/x86/lib/memmove_64.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/lib/memmove_64.S	2014-07-09
12:00:15.000000000 +0200
@@ -202,14 +202,16 @@ ENTRY(memmove)
 	movb (%rsi), %r11b
 	movb %r11b, (%rdi)
 13:
+	pax_force_retaddr
 	retq
 	CFI_ENDPROC
 
-	.section .altinstr_replacement,"ax"
+	.section .altinstr_replacement,"a"
 .Lmemmove_begin_forward_efs:
 	/* Forward moving data. */
 	movq %rdx, %rcx
 	rep movsb
+	pax_force_retaddr
 	retq
 .Lmemmove_end_forward_efs:
 	.previous
diff -ruNp linux-3.13.11/arch/x86/lib/memset_64.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/lib/memset_64.S
--- linux-3.13.11/arch/x86/lib/memset_64.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/lib/memset_64.S	2014-07-09
12:00:15.000000000 +0200
@@ -16,7 +16,7 @@
  * 
  * rax   original destination
  */	
-	.section .altinstr_replacement, "ax", @progbits
+	.section .altinstr_replacement, "a", @progbits
 .Lmemset_c:
 	movq %rdi,%r9
 	movq %rdx,%rcx
@@ -30,6 +30,7 @@
 	movl %edx,%ecx
 	rep stosb
 	movq %r9,%rax
+	pax_force_retaddr
 	ret
 .Lmemset_e:
 	.previous
@@ -45,13 +46,14 @@
  *
  * rax   original destination
  */
-	.section .altinstr_replacement, "ax", @progbits
+	.section .altinstr_replacement, "a", @progbits
 .Lmemset_c_e:
 	movq %rdi,%r9
 	movb %sil,%al
 	movq %rdx,%rcx
 	rep stosb
 	movq %r9,%rax
+	pax_force_retaddr
 	ret
 .Lmemset_e_e:
 	.previous
@@ -118,6 +120,7 @@ ENTRY(__memset)
 
 .Lende:
 	movq	%r10,%rax
+	pax_force_retaddr
 	ret
 
 	CFI_RESTORE_STATE
diff -ruNp linux-3.13.11/arch/x86/lib/mmx_32.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/lib/mmx_32.c
--- linux-3.13.11/arch/x86/lib/mmx_32.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/lib/mmx_32.c	2014-07-09
12:00:15.000000000 +0200
@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
 {
 	void *p;
 	int i;
+	unsigned long cr0;
 
 	if (unlikely(in_interrupt()))
 		return __memcpy(to, from, len);
@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
 	kernel_fpu_begin();
 
 	__asm__ __volatile__ (
-		"1: prefetch (%0)\n"		/* This set is 28 bytes */
-		"   prefetch 64(%0)\n"
-		"   prefetch 128(%0)\n"
-		"   prefetch 192(%0)\n"
-		"   prefetch 256(%0)\n"
+		"1: prefetch (%1)\n"		/* This set is 28 bytes */
+		"   prefetch 64(%1)\n"
+		"   prefetch 128(%1)\n"
+		"   prefetch 192(%1)\n"
+		"   prefetch 256(%1)\n"
 		"2:  \n"
 		".section .fixup, \"ax\"\n"
-		"3: movw $0x1AEB, 1b\n"	/* jmp on 26 bytes */
+		"3:  \n"
+
+#ifdef CONFIG_PAX_KERNEXEC
+		"   movl %%cr0, %0\n"
+		"   movl %0, %%eax\n"
+		"   andl $0xFFFEFFFF, %%eax\n"
+		"   movl %%eax, %%cr0\n"
+#endif
+
+		"   movw $0x1AEB, 1b\n"	/* jmp on 26 bytes */
+
+#ifdef CONFIG_PAX_KERNEXEC
+		"   movl %0, %%cr0\n"
+#endif
+
 		"   jmp 2b\n"
 		".previous\n"
 			_ASM_EXTABLE(1b, 3b)
-			: : "r" (from));
+			: "=&r" (cr0) : "r" (from) : "ax");
 
 	for ( ; i > 5; i--) {
 		__asm__ __volatile__ (
-		"1:  prefetch 320(%0)\n"
-		"2:  movq (%0), %%mm0\n"
-		"  movq 8(%0), %%mm1\n"
-		"  movq 16(%0), %%mm2\n"
-		"  movq 24(%0), %%mm3\n"
-		"  movq %%mm0, (%1)\n"
-		"  movq %%mm1, 8(%1)\n"
-		"  movq %%mm2, 16(%1)\n"
-		"  movq %%mm3, 24(%1)\n"
-		"  movq 32(%0), %%mm0\n"
-		"  movq 40(%0), %%mm1\n"
-		"  movq 48(%0), %%mm2\n"
-		"  movq 56(%0), %%mm3\n"
-		"  movq %%mm0, 32(%1)\n"
-		"  movq %%mm1, 40(%1)\n"
-		"  movq %%mm2, 48(%1)\n"
-		"  movq %%mm3, 56(%1)\n"
+		"1:  prefetch 320(%1)\n"
+		"2:  movq (%1), %%mm0\n"
+		"  movq 8(%1), %%mm1\n"
+		"  movq 16(%1), %%mm2\n"
+		"  movq 24(%1), %%mm3\n"
+		"  movq %%mm0, (%2)\n"
+		"  movq %%mm1, 8(%2)\n"
+		"  movq %%mm2, 16(%2)\n"
+		"  movq %%mm3, 24(%2)\n"
+		"  movq 32(%1), %%mm0\n"
+		"  movq 40(%1), %%mm1\n"
+		"  movq 48(%1), %%mm2\n"
+		"  movq 56(%1), %%mm3\n"
+		"  movq %%mm0, 32(%2)\n"
+		"  movq %%mm1, 40(%2)\n"
+		"  movq %%mm2, 48(%2)\n"
+		"  movq %%mm3, 56(%2)\n"
 		".section .fixup, \"ax\"\n"
-		"3: movw $0x05EB, 1b\n"	/* jmp on 5 bytes */
+		"3:\n"
+
+#ifdef CONFIG_PAX_KERNEXEC
+		"   movl %%cr0, %0\n"
+		"   movl %0, %%eax\n"
+		"   andl $0xFFFEFFFF, %%eax\n"
+		"   movl %%eax, %%cr0\n"
+#endif
+
+		"   movw $0x05EB, 1b\n"	/* jmp on 5 bytes */
+
+#ifdef CONFIG_PAX_KERNEXEC
+		"   movl %0, %%cr0\n"
+#endif
+
 		"   jmp 2b\n"
 		".previous\n"
 			_ASM_EXTABLE(1b, 3b)
-			: : "r" (from), "r" (to) : "memory");
+			: "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
 
 		from += 64;
 		to += 64;
@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
 static void fast_copy_page(void *to, void *from)
 {
 	int i;
+	unsigned long cr0;
 
 	kernel_fpu_begin();
 
@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
 	 * but that is for later. -AV
 	 */
 	__asm__ __volatile__(
-		"1: prefetch (%0)\n"
-		"   prefetch 64(%0)\n"
-		"   prefetch 128(%0)\n"
-		"   prefetch 192(%0)\n"
-		"   prefetch 256(%0)\n"
+		"1: prefetch (%1)\n"
+		"   prefetch 64(%1)\n"
+		"   prefetch 128(%1)\n"
+		"   prefetch 192(%1)\n"
+		"   prefetch 256(%1)\n"
 		"2:  \n"
 		".section .fixup, \"ax\"\n"
-		"3: movw $0x1AEB, 1b\n"	/* jmp on 26 bytes */
+		"3:  \n"
+
+#ifdef CONFIG_PAX_KERNEXEC
+		"   movl %%cr0, %0\n"
+		"   movl %0, %%eax\n"
+		"   andl $0xFFFEFFFF, %%eax\n"
+		"   movl %%eax, %%cr0\n"
+#endif
+
+		"   movw $0x1AEB, 1b\n"	/* jmp on 26 bytes */
+
+#ifdef CONFIG_PAX_KERNEXEC
+		"   movl %0, %%cr0\n"
+#endif
+
 		"   jmp 2b\n"
 		".previous\n"
-			_ASM_EXTABLE(1b, 3b) : : "r" (from));
+			_ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
 
 	for (i = 0; i < (4096-320)/64; i++) {
 		__asm__ __volatile__ (
-		"1: prefetch 320(%0)\n"
-		"2: movq (%0), %%mm0\n"
-		"   movntq %%mm0, (%1)\n"
-		"   movq 8(%0), %%mm1\n"
-		"   movntq %%mm1, 8(%1)\n"
-		"   movq 16(%0), %%mm2\n"
-		"   movntq %%mm2, 16(%1)\n"
-		"   movq 24(%0), %%mm3\n"
-		"   movntq %%mm3, 24(%1)\n"
-		"   movq 32(%0), %%mm4\n"
-		"   movntq %%mm4, 32(%1)\n"
-		"   movq 40(%0), %%mm5\n"
-		"   movntq %%mm5, 40(%1)\n"
-		"   movq 48(%0), %%mm6\n"
-		"   movntq %%mm6, 48(%1)\n"
-		"   movq 56(%0), %%mm7\n"
-		"   movntq %%mm7, 56(%1)\n"
+		"1: prefetch 320(%1)\n"
+		"2: movq (%1), %%mm0\n"
+		"   movntq %%mm0, (%2)\n"
+		"   movq 8(%1), %%mm1\n"
+		"   movntq %%mm1, 8(%2)\n"
+		"   movq 16(%1), %%mm2\n"
+		"   movntq %%mm2, 16(%2)\n"
+		"   movq 24(%1), %%mm3\n"
+		"   movntq %%mm3, 24(%2)\n"
+		"   movq 32(%1), %%mm4\n"
+		"   movntq %%mm4, 32(%2)\n"
+		"   movq 40(%1), %%mm5\n"
+		"   movntq %%mm5, 40(%2)\n"
+		"   movq 48(%1), %%mm6\n"
+		"   movntq %%mm6, 48(%2)\n"
+		"   movq 56(%1), %%mm7\n"
+		"   movntq %%mm7, 56(%2)\n"
 		".section .fixup, \"ax\"\n"
-		"3: movw $0x05EB, 1b\n"	/* jmp on 5 bytes */
+		"3:\n"
+
+#ifdef CONFIG_PAX_KERNEXEC
+		"   movl %%cr0, %0\n"
+		"   movl %0, %%eax\n"
+		"   andl $0xFFFEFFFF, %%eax\n"
+		"   movl %%eax, %%cr0\n"
+#endif
+
+		"   movw $0x05EB, 1b\n"	/* jmp on 5 bytes */
+
+#ifdef CONFIG_PAX_KERNEXEC
+		"   movl %0, %%cr0\n"
+#endif
+
 		"   jmp 2b\n"
 		".previous\n"
-		_ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
+		_ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
 
 		from += 64;
 		to += 64;
@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
 static void fast_copy_page(void *to, void *from)
 {
 	int i;
+	unsigned long cr0;
 
 	kernel_fpu_begin();
 
 	__asm__ __volatile__ (
-		"1: prefetch (%0)\n"
-		"   prefetch 64(%0)\n"
-		"   prefetch 128(%0)\n"
-		"   prefetch 192(%0)\n"
-		"   prefetch 256(%0)\n"
+		"1: prefetch (%1)\n"
+		"   prefetch 64(%1)\n"
+		"   prefetch 128(%1)\n"
+		"   prefetch 192(%1)\n"
+		"   prefetch 256(%1)\n"
 		"2:  \n"
 		".section .fixup, \"ax\"\n"
-		"3: movw $0x1AEB, 1b\n"	/* jmp on 26 bytes */
+		"3:  \n"
+
+#ifdef CONFIG_PAX_KERNEXEC
+		"   movl %%cr0, %0\n"
+		"   movl %0, %%eax\n"
+		"   andl $0xFFFEFFFF, %%eax\n"
+		"   movl %%eax, %%cr0\n"
+#endif
+
+		"   movw $0x1AEB, 1b\n"	/* jmp on 26 bytes */
+
+#ifdef CONFIG_PAX_KERNEXEC
+		"   movl %0, %%cr0\n"
+#endif
+
 		"   jmp 2b\n"
 		".previous\n"
-			_ASM_EXTABLE(1b, 3b) : : "r" (from));
+			_ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
 
 	for (i = 0; i < 4096/64; i++) {
 		__asm__ __volatile__ (
-		"1: prefetch 320(%0)\n"
-		"2: movq (%0), %%mm0\n"
-		"   movq 8(%0), %%mm1\n"
-		"   movq 16(%0), %%mm2\n"
-		"   movq 24(%0), %%mm3\n"
-		"   movq %%mm0, (%1)\n"
-		"   movq %%mm1, 8(%1)\n"
-		"   movq %%mm2, 16(%1)\n"
-		"   movq %%mm3, 24(%1)\n"
-		"   movq 32(%0), %%mm0\n"
-		"   movq 40(%0), %%mm1\n"
-		"   movq 48(%0), %%mm2\n"
-		"   movq 56(%0), %%mm3\n"
-		"   movq %%mm0, 32(%1)\n"
-		"   movq %%mm1, 40(%1)\n"
-		"   movq %%mm2, 48(%1)\n"
-		"   movq %%mm3, 56(%1)\n"
+		"1: prefetch 320(%1)\n"
+		"2: movq (%1), %%mm0\n"
+		"   movq 8(%1), %%mm1\n"
+		"   movq 16(%1), %%mm2\n"
+		"   movq 24(%1), %%mm3\n"
+		"   movq %%mm0, (%2)\n"
+		"   movq %%mm1, 8(%2)\n"
+		"   movq %%mm2, 16(%2)\n"
+		"   movq %%mm3, 24(%2)\n"
+		"   movq 32(%1), %%mm0\n"
+		"   movq 40(%1), %%mm1\n"
+		"   movq 48(%1), %%mm2\n"
+		"   movq 56(%1), %%mm3\n"
+		"   movq %%mm0, 32(%2)\n"
+		"   movq %%mm1, 40(%2)\n"
+		"   movq %%mm2, 48(%2)\n"
+		"   movq %%mm3, 56(%2)\n"
 		".section .fixup, \"ax\"\n"
-		"3: movw $0x05EB, 1b\n"	/* jmp on 5 bytes */
+		"3:\n"
+
+#ifdef CONFIG_PAX_KERNEXEC
+		"   movl %%cr0, %0\n"
+		"   movl %0, %%eax\n"
+		"   andl $0xFFFEFFFF, %%eax\n"
+		"   movl %%eax, %%cr0\n"
+#endif
+
+		"   movw $0x05EB, 1b\n"	/* jmp on 5 bytes */
+
+#ifdef CONFIG_PAX_KERNEXEC
+		"   movl %0, %%cr0\n"
+#endif
+
 		"   jmp 2b\n"
 		".previous\n"
 			_ASM_EXTABLE(1b, 3b)
-			: : "r" (from), "r" (to) : "memory");
+			: "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
 
 		from += 64;
 		to += 64;
diff -ruNp linux-3.13.11/arch/x86/lib/msr-reg.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/lib/msr-reg.S
--- linux-3.13.11/arch/x86/lib/msr-reg.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/lib/msr-reg.S	2014-07-09
12:00:15.000000000 +0200
@@ -3,6 +3,7 @@
 #include <asm/dwarf2.h>
 #include <asm/asm.h>
 #include <asm/msr.h>
+#include <asm/alternative-asm.h>
 
 #ifdef CONFIG_X86_64
 /*
@@ -37,6 +38,7 @@ ENTRY(\op\()_safe_regs)
 	movl    %edi, 28(%r10)
 	popq_cfi %rbp
 	popq_cfi %rbx
+	pax_force_retaddr
 	ret
 3:
 	CFI_RESTORE_STATE
diff -ruNp linux-3.13.11/arch/x86/lib/putuser.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/lib/putuser.S
--- linux-3.13.11/arch/x86/lib/putuser.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/lib/putuser.S	2014-07-09
12:00:15.000000000 +0200
@@ -16,7 +16,9 @@
 #include <asm/errno.h>
 #include <asm/asm.h>
 #include <asm/smap.h>
-
+#include <asm/segment.h>
+#include <asm/pgtable.h>
+#include <asm/alternative-asm.h>
 
 /*
  * __put_user_X
@@ -30,57 +32,125 @@
  * as they get called from within inline assembly.
  */
 
-#define ENTER	CFI_STARTPROC ; \
-		GET_THREAD_INFO(%_ASM_BX)
-#define EXIT	ASM_CLAC ;	\
-		ret ;		\
+#define ENTER	CFI_STARTPROC
+#define EXIT	ASM_CLAC ;		\
+		pax_force_retaddr ;	\
+		ret ;			\
 		CFI_ENDPROC
 
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+#define _DEST %_ASM_CX,%_ASM_BX
+#else
+#define _DEST %_ASM_CX
+#endif
+
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
+#define __copyuser_seg gs;
+#else
+#define __copyuser_seg
+#endif
+
 .text
 ENTRY(__put_user_1)
 	ENTER
+
+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
+	GET_THREAD_INFO(%_ASM_BX)
 	cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
 	jae bad_put_user
 	ASM_STAC
-1:	movb %al,(%_ASM_CX)
+
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+	mov pax_user_shadow_base,%_ASM_BX
+	cmp %_ASM_BX,%_ASM_CX
+	jb 1234f
+	xor %ebx,%ebx
+1234:
+#endif
+
+#endif
+
+1:	__copyuser_seg movb %al,(_DEST)
 	xor %eax,%eax
 	EXIT
 ENDPROC(__put_user_1)
 
 ENTRY(__put_user_2)
 	ENTER
+
+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
+	GET_THREAD_INFO(%_ASM_BX)
 	mov TI_addr_limit(%_ASM_BX),%_ASM_BX
 	sub $1,%_ASM_BX
 	cmp %_ASM_BX,%_ASM_CX
 	jae bad_put_user
 	ASM_STAC
-2:	movw %ax,(%_ASM_CX)
+
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+	mov pax_user_shadow_base,%_ASM_BX
+	cmp %_ASM_BX,%_ASM_CX
+	jb 1234f
+	xor %ebx,%ebx
+1234:
+#endif
+
+#endif
+
+2:	__copyuser_seg movw %ax,(_DEST)
 	xor %eax,%eax
 	EXIT
 ENDPROC(__put_user_2)
 
 ENTRY(__put_user_4)
 	ENTER
+
+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
+	GET_THREAD_INFO(%_ASM_BX)
 	mov TI_addr_limit(%_ASM_BX),%_ASM_BX
 	sub $3,%_ASM_BX
 	cmp %_ASM_BX,%_ASM_CX
 	jae bad_put_user
 	ASM_STAC
-3:	movl %eax,(%_ASM_CX)
+
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+	mov pax_user_shadow_base,%_ASM_BX
+	cmp %_ASM_BX,%_ASM_CX
+	jb 1234f
+	xor %ebx,%ebx
+1234:
+#endif
+
+#endif
+
+3:	__copyuser_seg movl %eax,(_DEST)
 	xor %eax,%eax
 	EXIT
 ENDPROC(__put_user_4)
 
 ENTRY(__put_user_8)
 	ENTER
+
+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
+	GET_THREAD_INFO(%_ASM_BX)
 	mov TI_addr_limit(%_ASM_BX),%_ASM_BX
 	sub $7,%_ASM_BX
 	cmp %_ASM_BX,%_ASM_CX
 	jae bad_put_user
 	ASM_STAC
-4:	mov %_ASM_AX,(%_ASM_CX)
+
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+	mov pax_user_shadow_base,%_ASM_BX
+	cmp %_ASM_BX,%_ASM_CX
+	jb 1234f
+	xor %ebx,%ebx
+1234:
+#endif
+
+#endif
+
+4:	__copyuser_seg mov %_ASM_AX,(_DEST)
 #ifdef CONFIG_X86_32
-5:	movl %edx,4(%_ASM_CX)
+5:	__copyuser_seg movl %edx,4(_DEST)
 #endif
 	xor %eax,%eax
 	EXIT
diff -ruNp linux-3.13.11/arch/x86/lib/rwlock.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/lib/rwlock.S
--- linux-3.13.11/arch/x86/lib/rwlock.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/lib/rwlock.S	2014-07-09
12:00:15.000000000 +0200
@@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
 	FRAME
 0:	LOCK_PREFIX
 	WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
+
+#ifdef CONFIG_PAX_REFCOUNT
+	jno 1234f
+	LOCK_PREFIX
+	WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
+	int $4
+1234:
+	_ASM_EXTABLE(1234b, 1234b)
+#endif
+
 1:	rep; nop
 	cmpl	$WRITE_LOCK_CMP, (%__lock_ptr)
 	jne	1b
 	LOCK_PREFIX
 	WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
+
+#ifdef CONFIG_PAX_REFCOUNT
+	jno 1234f
+	LOCK_PREFIX
+	WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
+	int $4
+1234:
+	_ASM_EXTABLE(1234b, 1234b)
+#endif
+
 	jnz	0b
 	ENDFRAME
+	pax_force_retaddr
 	ret
 	CFI_ENDPROC
 END(__write_lock_failed)
@@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
 	FRAME
 0:	LOCK_PREFIX
 	READ_LOCK_SIZE(inc) (%__lock_ptr)
+
+#ifdef CONFIG_PAX_REFCOUNT
+	jno 1234f
+	LOCK_PREFIX
+	READ_LOCK_SIZE(dec) (%__lock_ptr)
+	int $4
+1234:
+	_ASM_EXTABLE(1234b, 1234b)
+#endif
+
 1:	rep; nop
 	READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
 	js	1b
 	LOCK_PREFIX
 	READ_LOCK_SIZE(dec) (%__lock_ptr)
+
+#ifdef CONFIG_PAX_REFCOUNT
+	jno 1234f
+	LOCK_PREFIX
+	READ_LOCK_SIZE(inc) (%__lock_ptr)
+	int $4
+1234:
+	_ASM_EXTABLE(1234b, 1234b)
+#endif
+
 	js	0b
 	ENDFRAME
+	pax_force_retaddr
 	ret
 	CFI_ENDPROC
 END(__read_lock_failed)
diff -ruNp linux-3.13.11/arch/x86/lib/rwsem.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/lib/rwsem.S
--- linux-3.13.11/arch/x86/lib/rwsem.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/lib/rwsem.S	2014-07-09 12:00:15.000000000
+0200
@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
 	__ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
 	CFI_RESTORE __ASM_REG(dx)
 	restore_common_regs
+	pax_force_retaddr
 	ret
 	CFI_ENDPROC
 ENDPROC(call_rwsem_down_read_failed)
@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
 	movq %rax,%rdi
 	call rwsem_down_write_failed
 	restore_common_regs
+	pax_force_retaddr
 	ret
 	CFI_ENDPROC
 ENDPROC(call_rwsem_down_write_failed)
@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
 	movq %rax,%rdi
 	call rwsem_wake
 	restore_common_regs
-1:	ret
+1:	pax_force_retaddr
+	ret
 	CFI_ENDPROC
 ENDPROC(call_rwsem_wake)
 
@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
 	__ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
 	CFI_RESTORE __ASM_REG(dx)
 	restore_common_regs
+	pax_force_retaddr
 	ret
 	CFI_ENDPROC
 ENDPROC(call_rwsem_downgrade_wake)
diff -ruNp linux-3.13.11/arch/x86/lib/thunk_64.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/lib/thunk_64.S
--- linux-3.13.11/arch/x86/lib/thunk_64.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/lib/thunk_64.S	2014-07-09
12:00:15.000000000 +0200
@@ -8,6 +8,7 @@
 #include <linux/linkage.h>
 #include <asm/dwarf2.h>
 #include <asm/calling.h>
+#include <asm/alternative-asm.h>
 
 	/* rdi:	arg1 ... normal C conventions. rax is saved/restored. */
 	.macro THUNK name, func, put_ret_addr_in_rdi=0
@@ -15,11 +16,11 @@
 \name:
 	CFI_STARTPROC
 
-	/* this one pushes 9 elems, the next one would be %rIP */
-	SAVE_ARGS
+	/* this one pushes 15+1 elems, the next one would be %rIP */
+	SAVE_ARGS 8
 
 	.if \put_ret_addr_in_rdi
-	movq_cfi_restore 9*8, rdi
+	movq_cfi_restore RIP, rdi
 	.endif
 
 	call \func
@@ -38,8 +39,9 @@
 
 	/* SAVE_ARGS below is used only for the .cfi directives it contains. */
 	CFI_STARTPROC
-	SAVE_ARGS
+	SAVE_ARGS 8
 restore:
-	RESTORE_ARGS
+	RESTORE_ARGS 1,8
+	pax_force_retaddr
 	ret
 	CFI_ENDPROC
diff -ruNp linux-3.13.11/arch/x86/lib/usercopy_32.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/lib/usercopy_32.c
--- linux-3.13.11/arch/x86/lib/usercopy_32.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/lib/usercopy_32.c	2014-07-09
12:00:15.000000000 +0200
@@ -42,11 +42,13 @@ do {									\
 	int __d0;							\
 	might_fault();							\
 	__asm__ __volatile__(						\
+		__COPYUSER_SET_ES					\
 		ASM_STAC "\n"						\
 		"0:	rep; stosl\n"					\
 		"	movl %2,%0\n"					\
 		"1:	rep; stosb\n"					\
 		"2: " ASM_CLAC "\n"					\
+		__COPYUSER_RESTORE_ES					\
 		".section .fixup,\"ax\"\n"				\
 		"3:	lea 0(%2,%0,4),%0\n"				\
 		"	jmp 2b\n"					\
@@ -98,7 +100,7 @@ EXPORT_SYMBOL(__clear_user);
 
 #ifdef CONFIG_X86_INTEL_USERCOPY
 static unsigned long
-__copy_user_intel(void __user *to, const void *from, unsigned long size)
+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
 {
 	int d0, d1;
 	__asm__ __volatile__(
@@ -110,36 +112,36 @@ __copy_user_intel(void __user *to, const
 		       "       .align 2,0x90\n"
 		       "3:     movl 0(%4), %%eax\n"
 		       "4:     movl 4(%4), %%edx\n"
-		       "5:     movl %%eax, 0(%3)\n"
-		       "6:     movl %%edx, 4(%3)\n"
+		       "5:     "__copyuser_seg" movl %%eax, 0(%3)\n"
+		       "6:     "__copyuser_seg" movl %%edx, 4(%3)\n"
 		       "7:     movl 8(%4), %%eax\n"
 		       "8:     movl 12(%4),%%edx\n"
-		       "9:     movl %%eax, 8(%3)\n"
-		       "10:    movl %%edx, 12(%3)\n"
+		       "9:     "__copyuser_seg" movl %%eax, 8(%3)\n"
+		       "10:    "__copyuser_seg" movl %%edx, 12(%3)\n"
 		       "11:    movl 16(%4), %%eax\n"
 		       "12:    movl 20(%4), %%edx\n"
-		       "13:    movl %%eax, 16(%3)\n"
-		       "14:    movl %%edx, 20(%3)\n"
+		       "13:    "__copyuser_seg" movl %%eax, 16(%3)\n"
+		       "14:    "__copyuser_seg" movl %%edx, 20(%3)\n"
 		       "15:    movl 24(%4), %%eax\n"
 		       "16:    movl 28(%4), %%edx\n"
-		       "17:    movl %%eax, 24(%3)\n"
-		       "18:    movl %%edx, 28(%3)\n"
+		       "17:    "__copyuser_seg" movl %%eax, 24(%3)\n"
+		       "18:    "__copyuser_seg" movl %%edx, 28(%3)\n"
 		       "19:    movl 32(%4), %%eax\n"
 		       "20:    movl 36(%4), %%edx\n"
-		       "21:    movl %%eax, 32(%3)\n"
-		       "22:    movl %%edx, 36(%3)\n"
+		       "21:    "__copyuser_seg" movl %%eax, 32(%3)\n"
+		       "22:    "__copyuser_seg" movl %%edx, 36(%3)\n"
 		       "23:    movl 40(%4), %%eax\n"
 		       "24:    movl 44(%4), %%edx\n"
-		       "25:    movl %%eax, 40(%3)\n"
-		       "26:    movl %%edx, 44(%3)\n"
+		       "25:    "__copyuser_seg" movl %%eax, 40(%3)\n"
+		       "26:    "__copyuser_seg" movl %%edx, 44(%3)\n"
 		       "27:    movl 48(%4), %%eax\n"
 		       "28:    movl 52(%4), %%edx\n"
-		       "29:    movl %%eax, 48(%3)\n"
-		       "30:    movl %%edx, 52(%3)\n"
+		       "29:    "__copyuser_seg" movl %%eax, 48(%3)\n"
+		       "30:    "__copyuser_seg" movl %%edx, 52(%3)\n"
 		       "31:    movl 56(%4), %%eax\n"
 		       "32:    movl 60(%4), %%edx\n"
-		       "33:    movl %%eax, 56(%3)\n"
-		       "34:    movl %%edx, 60(%3)\n"
+		       "33:    "__copyuser_seg" movl %%eax, 56(%3)\n"
+		       "34:    "__copyuser_seg" movl %%edx, 60(%3)\n"
 		       "       addl $-64, %0\n"
 		       "       addl $64, %4\n"
 		       "       addl $64, %3\n"
@@ -149,10 +151,116 @@ __copy_user_intel(void __user *to, const
 		       "       shrl  $2, %0\n"
 		       "       andl  $3, %%eax\n"
 		       "       cld\n"
+		       __COPYUSER_SET_ES
 		       "99:    rep; movsl\n"
 		       "36:    movl %%eax, %0\n"
 		       "37:    rep; movsb\n"
 		       "100:\n"
+		       __COPYUSER_RESTORE_ES
+		       ".section .fixup,\"ax\"\n"
+		       "101:   lea 0(%%eax,%0,4),%0\n"
+		       "       jmp 100b\n"
+		       ".previous\n"
+		       _ASM_EXTABLE(1b,100b)
+		       _ASM_EXTABLE(2b,100b)
+		       _ASM_EXTABLE(3b,100b)
+		       _ASM_EXTABLE(4b,100b)
+		       _ASM_EXTABLE(5b,100b)
+		       _ASM_EXTABLE(6b,100b)
+		       _ASM_EXTABLE(7b,100b)
+		       _ASM_EXTABLE(8b,100b)
+		       _ASM_EXTABLE(9b,100b)
+		       _ASM_EXTABLE(10b,100b)
+		       _ASM_EXTABLE(11b,100b)
+		       _ASM_EXTABLE(12b,100b)
+		       _ASM_EXTABLE(13b,100b)
+		       _ASM_EXTABLE(14b,100b)
+		       _ASM_EXTABLE(15b,100b)
+		       _ASM_EXTABLE(16b,100b)
+		       _ASM_EXTABLE(17b,100b)
+		       _ASM_EXTABLE(18b,100b)
+		       _ASM_EXTABLE(19b,100b)
+		       _ASM_EXTABLE(20b,100b)
+		       _ASM_EXTABLE(21b,100b)
+		       _ASM_EXTABLE(22b,100b)
+		       _ASM_EXTABLE(23b,100b)
+		       _ASM_EXTABLE(24b,100b)
+		       _ASM_EXTABLE(25b,100b)
+		       _ASM_EXTABLE(26b,100b)
+		       _ASM_EXTABLE(27b,100b)
+		       _ASM_EXTABLE(28b,100b)
+		       _ASM_EXTABLE(29b,100b)
+		       _ASM_EXTABLE(30b,100b)
+		       _ASM_EXTABLE(31b,100b)
+		       _ASM_EXTABLE(32b,100b)
+		       _ASM_EXTABLE(33b,100b)
+		       _ASM_EXTABLE(34b,100b)
+		       _ASM_EXTABLE(35b,100b)
+		       _ASM_EXTABLE(36b,100b)
+		       _ASM_EXTABLE(37b,100b)
+		       _ASM_EXTABLE(99b,101b)
+		       : "=&c"(size), "=&D" (d0), "=&S" (d1)
+		       :  "1"(to), "2"(from), "0"(size)
+		       : "eax", "edx", "memory");
+	return size;
+}
+
+static unsigned long
+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
+{
+	int d0, d1;
+	__asm__ __volatile__(
+		       "       .align 2,0x90\n"
+		       "1:     "__copyuser_seg" movl 32(%4), %%eax\n"
+		       "       cmpl $67, %0\n"
+		       "       jbe 3f\n"
+		       "2:     "__copyuser_seg" movl 64(%4), %%eax\n"
+		       "       .align 2,0x90\n"
+		       "3:     "__copyuser_seg" movl 0(%4), %%eax\n"
+		       "4:     "__copyuser_seg" movl 4(%4), %%edx\n"
+		       "5:     movl %%eax, 0(%3)\n"
+		       "6:     movl %%edx, 4(%3)\n"
+		       "7:     "__copyuser_seg" movl 8(%4), %%eax\n"
+		       "8:     "__copyuser_seg" movl 12(%4),%%edx\n"
+		       "9:     movl %%eax, 8(%3)\n"
+		       "10:    movl %%edx, 12(%3)\n"
+		       "11:    "__copyuser_seg" movl 16(%4), %%eax\n"
+		       "12:    "__copyuser_seg" movl 20(%4), %%edx\n"
+		       "13:    movl %%eax, 16(%3)\n"
+		       "14:    movl %%edx, 20(%3)\n"
+		       "15:    "__copyuser_seg" movl 24(%4), %%eax\n"
+		       "16:    "__copyuser_seg" movl 28(%4), %%edx\n"
+		       "17:    movl %%eax, 24(%3)\n"
+		       "18:    movl %%edx, 28(%3)\n"
+		       "19:    "__copyuser_seg" movl 32(%4), %%eax\n"
+		       "20:    "__copyuser_seg" movl 36(%4), %%edx\n"
+		       "21:    movl %%eax, 32(%3)\n"
+		       "22:    movl %%edx, 36(%3)\n"
+		       "23:    "__copyuser_seg" movl 40(%4), %%eax\n"
+		       "24:    "__copyuser_seg" movl 44(%4), %%edx\n"
+		       "25:    movl %%eax, 40(%3)\n"
+		       "26:    movl %%edx, 44(%3)\n"
+		       "27:    "__copyuser_seg" movl 48(%4), %%eax\n"
+		       "28:    "__copyuser_seg" movl 52(%4), %%edx\n"
+		       "29:    movl %%eax, 48(%3)\n"
+		       "30:    movl %%edx, 52(%3)\n"
+		       "31:    "__copyuser_seg" movl 56(%4), %%eax\n"
+		       "32:    "__copyuser_seg" movl 60(%4), %%edx\n"
+		       "33:    movl %%eax, 56(%3)\n"
+		       "34:    movl %%edx, 60(%3)\n"
+		       "       addl $-64, %0\n"
+		       "       addl $64, %4\n"
+		       "       addl $64, %3\n"
+		       "       cmpl $63, %0\n"
+		       "       ja  1b\n"
+		       "35:    movl  %0, %%eax\n"
+		       "       shrl  $2, %0\n"
+		       "       andl  $3, %%eax\n"
+		       "       cld\n"
+		       "99:    rep; "__copyuser_seg" movsl\n"
+		       "36:    movl %%eax, %0\n"
+		       "37:    rep; "__copyuser_seg" movsb\n"
+		       "100:\n"
 		       ".section .fixup,\"ax\"\n"
 		       "101:   lea 0(%%eax,%0,4),%0\n"
 		       "       jmp 100b\n"
@@ -207,41 +315,41 @@ __copy_user_zeroing_intel(void *to, cons
 	int d0, d1;
 	__asm__ __volatile__(
 		       "        .align 2,0x90\n"
-		       "0:      movl 32(%4), %%eax\n"
+		       "0:      "__copyuser_seg" movl 32(%4), %%eax\n"
 		       "        cmpl $67, %0\n"
 		       "        jbe 2f\n"
-		       "1:      movl 64(%4), %%eax\n"
+		       "1:      "__copyuser_seg" movl 64(%4), %%eax\n"
 		       "        .align 2,0x90\n"
-		       "2:      movl 0(%4), %%eax\n"
-		       "21:     movl 4(%4), %%edx\n"
+		       "2:      "__copyuser_seg" movl 0(%4), %%eax\n"
+		       "21:     "__copyuser_seg" movl 4(%4), %%edx\n"
 		       "        movl %%eax, 0(%3)\n"
 		       "        movl %%edx, 4(%3)\n"
-		       "3:      movl 8(%4), %%eax\n"
-		       "31:     movl 12(%4),%%edx\n"
+		       "3:      "__copyuser_seg" movl 8(%4), %%eax\n"
+		       "31:     "__copyuser_seg" movl 12(%4),%%edx\n"
 		       "        movl %%eax, 8(%3)\n"
 		       "        movl %%edx, 12(%3)\n"
-		       "4:      movl 16(%4), %%eax\n"
-		       "41:     movl 20(%4), %%edx\n"
+		       "4:      "__copyuser_seg" movl 16(%4), %%eax\n"
+		       "41:     "__copyuser_seg" movl 20(%4), %%edx\n"
 		       "        movl %%eax, 16(%3)\n"
 		       "        movl %%edx, 20(%3)\n"
-		       "10:     movl 24(%4), %%eax\n"
-		       "51:     movl 28(%4), %%edx\n"
+		       "10:     "__copyuser_seg" movl 24(%4), %%eax\n"
+		       "51:     "__copyuser_seg" movl 28(%4), %%edx\n"
 		       "        movl %%eax, 24(%3)\n"
 		       "        movl %%edx, 28(%3)\n"
-		       "11:     movl 32(%4), %%eax\n"
-		       "61:     movl 36(%4), %%edx\n"
+		       "11:     "__copyuser_seg" movl 32(%4), %%eax\n"
+		       "61:     "__copyuser_seg" movl 36(%4), %%edx\n"
 		       "        movl %%eax, 32(%3)\n"
 		       "        movl %%edx, 36(%3)\n"
-		       "12:     movl 40(%4), %%eax\n"
-		       "71:     movl 44(%4), %%edx\n"
+		       "12:     "__copyuser_seg" movl 40(%4), %%eax\n"
+		       "71:     "__copyuser_seg" movl 44(%4), %%edx\n"
 		       "        movl %%eax, 40(%3)\n"
 		       "        movl %%edx, 44(%3)\n"
-		       "13:     movl 48(%4), %%eax\n"
-		       "81:     movl 52(%4), %%edx\n"
+		       "13:     "__copyuser_seg" movl 48(%4), %%eax\n"
+		       "81:     "__copyuser_seg" movl 52(%4), %%edx\n"
 		       "        movl %%eax, 48(%3)\n"
 		       "        movl %%edx, 52(%3)\n"
-		       "14:     movl 56(%4), %%eax\n"
-		       "91:     movl 60(%4), %%edx\n"
+		       "14:     "__copyuser_seg" movl 56(%4), %%eax\n"
+		       "91:     "__copyuser_seg" movl 60(%4), %%edx\n"
 		       "        movl %%eax, 56(%3)\n"
 		       "        movl %%edx, 60(%3)\n"
 		       "        addl $-64, %0\n"
@@ -253,9 +361,9 @@ __copy_user_zeroing_intel(void *to, cons
 		       "        shrl  $2, %0\n"
 		       "        andl $3, %%eax\n"
 		       "        cld\n"
-		       "6:      rep; movsl\n"
+		       "6:      rep; "__copyuser_seg" movsl\n"
 		       "        movl %%eax,%0\n"
-		       "7:      rep; movsb\n"
+		       "7:      rep; "__copyuser_seg" movsb\n"
 		       "8:\n"
 		       ".section .fixup,\"ax\"\n"
 		       "9:      lea 0(%%eax,%0,4),%0\n"
@@ -305,41 +413,41 @@ static unsigned long __copy_user_zeroing
 
 	__asm__ __volatile__(
 	       "        .align 2,0x90\n"
-	       "0:      movl 32(%4), %%eax\n"
+	       "0:      "__copyuser_seg" movl 32(%4), %%eax\n"
 	       "        cmpl $67, %0\n"
 	       "        jbe 2f\n"
-	       "1:      movl 64(%4), %%eax\n"
+	       "1:      "__copyuser_seg" movl 64(%4), %%eax\n"
 	       "        .align 2,0x90\n"
-	       "2:      movl 0(%4), %%eax\n"
-	       "21:     movl 4(%4), %%edx\n"
+	       "2:      "__copyuser_seg" movl 0(%4), %%eax\n"
+	       "21:     "__copyuser_seg" movl 4(%4), %%edx\n"
 	       "        movnti %%eax, 0(%3)\n"
 	       "        movnti %%edx, 4(%3)\n"
-	       "3:      movl 8(%4), %%eax\n"
-	       "31:     movl 12(%4),%%edx\n"
+	       "3:      "__copyuser_seg" movl 8(%4), %%eax\n"
+	       "31:     "__copyuser_seg" movl 12(%4),%%edx\n"
 	       "        movnti %%eax, 8(%3)\n"
 	       "        movnti %%edx, 12(%3)\n"
-	       "4:      movl 16(%4), %%eax\n"
-	       "41:     movl 20(%4), %%edx\n"
+	       "4:      "__copyuser_seg" movl 16(%4), %%eax\n"
+	       "41:     "__copyuser_seg" movl 20(%4), %%edx\n"
 	       "        movnti %%eax, 16(%3)\n"
 	       "        movnti %%edx, 20(%3)\n"
-	       "10:     movl 24(%4), %%eax\n"
-	       "51:     movl 28(%4), %%edx\n"
+	       "10:     "__copyuser_seg" movl 24(%4), %%eax\n"
+	       "51:     "__copyuser_seg" movl 28(%4), %%edx\n"
 	       "        movnti %%eax, 24(%3)\n"
 	       "        movnti %%edx, 28(%3)\n"
-	       "11:     movl 32(%4), %%eax\n"
-	       "61:     movl 36(%4), %%edx\n"
+	       "11:     "__copyuser_seg" movl 32(%4), %%eax\n"
+	       "61:     "__copyuser_seg" movl 36(%4), %%edx\n"
 	       "        movnti %%eax, 32(%3)\n"
 	       "        movnti %%edx, 36(%3)\n"
-	       "12:     movl 40(%4), %%eax\n"
-	       "71:     movl 44(%4), %%edx\n"
+	       "12:     "__copyuser_seg" movl 40(%4), %%eax\n"
+	       "71:     "__copyuser_seg" movl 44(%4), %%edx\n"
 	       "        movnti %%eax, 40(%3)\n"
 	       "        movnti %%edx, 44(%3)\n"
-	       "13:     movl 48(%4), %%eax\n"
-	       "81:     movl 52(%4), %%edx\n"
+	       "13:     "__copyuser_seg" movl 48(%4), %%eax\n"
+	       "81:     "__copyuser_seg" movl 52(%4), %%edx\n"
 	       "        movnti %%eax, 48(%3)\n"
 	       "        movnti %%edx, 52(%3)\n"
-	       "14:     movl 56(%4), %%eax\n"
-	       "91:     movl 60(%4), %%edx\n"
+	       "14:     "__copyuser_seg" movl 56(%4), %%eax\n"
+	       "91:     "__copyuser_seg" movl 60(%4), %%edx\n"
 	       "        movnti %%eax, 56(%3)\n"
 	       "        movnti %%edx, 60(%3)\n"
 	       "        addl $-64, %0\n"
@@ -352,9 +460,9 @@ static unsigned long __copy_user_zeroing
 	       "        shrl  $2, %0\n"
 	       "        andl $3, %%eax\n"
 	       "        cld\n"
-	       "6:      rep; movsl\n"
+	       "6:      rep; "__copyuser_seg" movsl\n"
 	       "        movl %%eax,%0\n"
-	       "7:      rep; movsb\n"
+	       "7:      rep; "__copyuser_seg" movsb\n"
 	       "8:\n"
 	       ".section .fixup,\"ax\"\n"
 	       "9:      lea 0(%%eax,%0,4),%0\n"
@@ -399,41 +507,41 @@ static unsigned long __copy_user_intel_n
 
 	__asm__ __volatile__(
 	       "        .align 2,0x90\n"
-	       "0:      movl 32(%4), %%eax\n"
+	       "0:      "__copyuser_seg" movl 32(%4), %%eax\n"
 	       "        cmpl $67, %0\n"
 	       "        jbe 2f\n"
-	       "1:      movl 64(%4), %%eax\n"
+	       "1:      "__copyuser_seg" movl 64(%4), %%eax\n"
 	       "        .align 2,0x90\n"
-	       "2:      movl 0(%4), %%eax\n"
-	       "21:     movl 4(%4), %%edx\n"
+	       "2:      "__copyuser_seg" movl 0(%4), %%eax\n"
+	       "21:     "__copyuser_seg" movl 4(%4), %%edx\n"
 	       "        movnti %%eax, 0(%3)\n"
 	       "        movnti %%edx, 4(%3)\n"
-	       "3:      movl 8(%4), %%eax\n"
-	       "31:     movl 12(%4),%%edx\n"
+	       "3:      "__copyuser_seg" movl 8(%4), %%eax\n"
+	       "31:     "__copyuser_seg" movl 12(%4),%%edx\n"
 	       "        movnti %%eax, 8(%3)\n"
 	       "        movnti %%edx, 12(%3)\n"
-	       "4:      movl 16(%4), %%eax\n"
-	       "41:     movl 20(%4), %%edx\n"
+	       "4:      "__copyuser_seg" movl 16(%4), %%eax\n"
+	       "41:     "__copyuser_seg" movl 20(%4), %%edx\n"
 	       "        movnti %%eax, 16(%3)\n"
 	       "        movnti %%edx, 20(%3)\n"
-	       "10:     movl 24(%4), %%eax\n"
-	       "51:     movl 28(%4), %%edx\n"
+	       "10:     "__copyuser_seg" movl 24(%4), %%eax\n"
+	       "51:     "__copyuser_seg" movl 28(%4), %%edx\n"
 	       "        movnti %%eax, 24(%3)\n"
 	       "        movnti %%edx, 28(%3)\n"
-	       "11:     movl 32(%4), %%eax\n"
-	       "61:     movl 36(%4), %%edx\n"
+	       "11:     "__copyuser_seg" movl 32(%4), %%eax\n"
+	       "61:     "__copyuser_seg" movl 36(%4), %%edx\n"
 	       "        movnti %%eax, 32(%3)\n"
 	       "        movnti %%edx, 36(%3)\n"
-	       "12:     movl 40(%4), %%eax\n"
-	       "71:     movl 44(%4), %%edx\n"
+	       "12:     "__copyuser_seg" movl 40(%4), %%eax\n"
+	       "71:     "__copyuser_seg" movl 44(%4), %%edx\n"
 	       "        movnti %%eax, 40(%3)\n"
 	       "        movnti %%edx, 44(%3)\n"
-	       "13:     movl 48(%4), %%eax\n"
-	       "81:     movl 52(%4), %%edx\n"
+	       "13:     "__copyuser_seg" movl 48(%4), %%eax\n"
+	       "81:     "__copyuser_seg" movl 52(%4), %%edx\n"
 	       "        movnti %%eax, 48(%3)\n"
 	       "        movnti %%edx, 52(%3)\n"
-	       "14:     movl 56(%4), %%eax\n"
-	       "91:     movl 60(%4), %%edx\n"
+	       "14:     "__copyuser_seg" movl 56(%4), %%eax\n"
+	       "91:     "__copyuser_seg" movl 60(%4), %%edx\n"
 	       "        movnti %%eax, 56(%3)\n"
 	       "        movnti %%edx, 60(%3)\n"
 	       "        addl $-64, %0\n"
@@ -446,9 +554,9 @@ static unsigned long __copy_user_intel_n
 	       "        shrl  $2, %0\n"
 	       "        andl $3, %%eax\n"
 	       "        cld\n"
-	       "6:      rep; movsl\n"
+	       "6:      rep; "__copyuser_seg" movsl\n"
 	       "        movl %%eax,%0\n"
-	       "7:      rep; movsb\n"
+	       "7:      rep; "__copyuser_seg" movsb\n"
 	       "8:\n"
 	       ".section .fixup,\"ax\"\n"
 	       "9:      lea 0(%%eax,%0,4),%0\n"
@@ -488,32 +596,36 @@ static unsigned long __copy_user_intel_n
  */
 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
 					unsigned long size);
-unsigned long __copy_user_intel(void __user *to, const void *from,
+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
+					unsigned long size);
+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
 					unsigned long size);
 unsigned long __copy_user_zeroing_intel_nocache(void *to,
 				const void __user *from, unsigned long size);
 #endif /* CONFIG_X86_INTEL_USERCOPY */
 
 /* Generic arbitrary sized copy.  */
-#define __copy_user(to, from, size)					\
+#define __copy_user(to, from, size, prefix, set, restore)		\
 do {									\
 	int __d0, __d1, __d2;						\
 	__asm__ __volatile__(						\
+		set							\
 		"	cmp  $7,%0\n"					\
 		"	jbe  1f\n"					\
 		"	movl %1,%0\n"					\
 		"	negl %0\n"					\
 		"	andl $7,%0\n"					\
 		"	subl %0,%3\n"					\
-		"4:	rep; movsb\n"					\
+		"4:	rep; "prefix"movsb\n"				\
 		"	movl %3,%0\n"					\
 		"	shrl $2,%0\n"					\
 		"	andl $3,%3\n"					\
 		"	.align 2,0x90\n"				\
-		"0:	rep; movsl\n"					\
+		"0:	rep; "prefix"movsl\n"				\
 		"	movl %3,%0\n"					\
-		"1:	rep; movsb\n"					\
+		"1:	rep; "prefix"movsb\n"				\
 		"2:\n"							\
+		restore							\
 		".section .fixup,\"ax\"\n"				\
 		"5:	addl %3,%0\n"					\
 		"	jmp 2b\n"					\
@@ -538,14 +650,14 @@ do {									\
 		"	negl %0\n"					\
 		"	andl $7,%0\n"					\
 		"	subl %0,%3\n"					\
-		"4:	rep; movsb\n"					\
+		"4:	rep; "__copyuser_seg"movsb\n"			\
 		"	movl %3,%0\n"					\
 		"	shrl $2,%0\n"					\
 		"	andl $3,%3\n"					\
 		"	.align 2,0x90\n"				\
-		"0:	rep; movsl\n"					\
+		"0:	rep; "__copyuser_seg"movsl\n"			\
 		"	movl %3,%0\n"					\
-		"1:	rep; movsb\n"					\
+		"1:	rep; "__copyuser_seg"movsb\n"			\
 		"2:\n"							\
 		".section .fixup,\"ax\"\n"				\
 		"5:	addl %3,%0\n"					\
@@ -572,9 +684,9 @@ unsigned long __copy_to_user_ll(void __u
 {
 	stac();
 	if (movsl_is_ok(to, from, n))
-		__copy_user(to, from, n);
+		__copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
 	else
-		n = __copy_user_intel(to, from, n);
+		n = __generic_copy_to_user_intel(to, from, n);
 	clac();
 	return n;
 }
@@ -598,10 +710,9 @@ unsigned long __copy_from_user_ll_nozero
 {
 	stac();
 	if (movsl_is_ok(to, from, n))
-		__copy_user(to, from, n);
+		__copy_user(to, from, n, __copyuser_seg, "", "");
 	else
-		n = __copy_user_intel((void __user *)to,
-				      (const void *)from, n);
+		n = __generic_copy_from_user_intel(to, from, n);
 	clac();
 	return n;
 }
@@ -632,58 +743,38 @@ unsigned long __copy_from_user_ll_nocach
 	if (n > 64 && cpu_has_xmm2)
 		n = __copy_user_intel_nocache(to, from, n);
 	else
-		__copy_user(to, from, n);
+		__copy_user(to, from, n, __copyuser_seg, "", "");
 #else
-	__copy_user(to, from, n);
+	__copy_user(to, from, n, __copyuser_seg, "", "");
 #endif
 	clac();
 	return n;
 }
 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
 
-/**
- * copy_to_user: - Copy a block of data into user space.
- * @to:   Destination address, in user space.
- * @from: Source address, in kernel space.
- * @n:    Number of bytes to copy.
- *
- * Context: User context only.  This function may sleep.
- *
- * Copy data from kernel space to user space.
- *
- * Returns number of bytes that could not be copied.
- * On success, this will be zero.
- */
-unsigned long _copy_to_user(void __user *to, const void *from, unsigned n)
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+void __set_fs(mm_segment_t x)
 {
-	if (access_ok(VERIFY_WRITE, to, n))
-		n = __copy_to_user(to, from, n);
-	return n;
+	switch (x.seg) {
+	case 0:
+		loadsegment(gs, 0);
+		break;
+	case TASK_SIZE_MAX:
+		loadsegment(gs, __USER_DS);
+		break;
+	case -1UL:
+		loadsegment(gs, __KERNEL_DS);
+		break;
+	default:
+		BUG();
+	}
 }
-EXPORT_SYMBOL(_copy_to_user);
+EXPORT_SYMBOL(__set_fs);
 
-/**
- * copy_from_user: - Copy a block of data from user space.
- * @to:   Destination address, in kernel space.
- * @from: Source address, in user space.
- * @n:    Number of bytes to copy.
- *
- * Context: User context only.  This function may sleep.
- *
- * Copy data from user space to kernel space.
- *
- * Returns number of bytes that could not be copied.
- * On success, this will be zero.
- *
- * If some data could not be copied, this function will pad the copied
- * data to the requested size using zero bytes.
- */
-unsigned long _copy_from_user(void *to, const void __user *from, unsigned n)
+void set_fs(mm_segment_t x)
 {
-	if (access_ok(VERIFY_READ, from, n))
-		n = __copy_from_user(to, from, n);
-	else
-		memset(to, 0, n);
-	return n;
+	current_thread_info()->addr_limit = x;
+	__set_fs(x);
 }
-EXPORT_SYMBOL(_copy_from_user);
+EXPORT_SYMBOL(set_fs);
+#endif
diff -ruNp linux-3.13.11/arch/x86/lib/usercopy_64.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/lib/usercopy_64.c
--- linux-3.13.11/arch/x86/lib/usercopy_64.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/lib/usercopy_64.c	2014-07-09
12:00:15.000000000 +0200
@@ -18,6 +18,7 @@ unsigned long __clear_user(void __user *
 	might_fault();
 	/* no memory constraint because it doesn't change any memory gcc knows
 	   about */
+	pax_open_userland();
 	stac();
 	asm volatile(
 		"	testq  %[size8],%[size8]\n"
@@ -39,9 +40,10 @@ unsigned long __clear_user(void __user *
 		_ASM_EXTABLE(0b,3b)
 		_ASM_EXTABLE(1b,2b)
 		: [size8] "=&c"(size), [dst] "=&D" (__d0)
-		: [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
+		: [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(____m(addr)),
 		  [zero] "r" (0UL), [eight] "r" (8UL));
 	clac();
+	pax_close_userland();
 	return size;
 }
 EXPORT_SYMBOL(__clear_user);
@@ -54,12 +56,11 @@ unsigned long clear_user(void __user *to
 }
 EXPORT_SYMBOL(clear_user);
 
-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long
len)
 {
-	if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) { 
-		return copy_user_generic((__force void *)to, (__force void *)from, len);
-	} 
-	return len;		
+	if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len))
+		return copy_user_generic((void __force_kernel *)____m(to), (void __force_kernel *)____m(from),
len);
+	return len;
 }
 EXPORT_SYMBOL(copy_in_user);
 
@@ -69,11 +70,13 @@ EXPORT_SYMBOL(copy_in_user);
  * it is not necessary to optimize tail handling.
  */
 __visible unsigned long
-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned
zerorest)
 {
 	char c;
 	unsigned zero_len;
 
+	clac();
+	pax_close_userland();
 	for (; len; --len, to++) {
 		if (__get_user_nocheck(c, from++, sizeof(char)))
 			break;
@@ -84,6 +87,5 @@ copy_user_handle_tail(char *to, char *fr
 	for (c = 0, zero_len = len; zerorest && zero_len; --zero_len)
 		if (__put_user_nocheck(c, to++, sizeof(char)))
 			break;
-	clac();
 	return len;
 }
diff -ruNp linux-3.13.11/arch/x86/mm/Makefile linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/mm/Makefile
--- linux-3.13.11/arch/x86/mm/Makefile	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/mm/Makefile	2014-07-09 12:00:15.000000000
+0200
@@ -30,3 +30,7 @@ obj-$(CONFIG_ACPI_NUMA)		+= srat.o
 obj-$(CONFIG_NUMA_EMU)		+= numa_emulation.o
 
 obj-$(CONFIG_MEMTEST)		+= memtest.o
+
+quote:="
+obj-$(CONFIG_X86_64)		+= uderef_64.o
+CFLAGS_uderef_64.o		:= $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
diff -ruNp linux-3.13.11/arch/x86/mm/extable.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/mm/extable.c
--- linux-3.13.11/arch/x86/mm/extable.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/mm/extable.c	2014-07-09
12:00:15.000000000 +0200
@@ -6,12 +6,24 @@
 static inline unsigned long
 ex_insn_addr(const struct exception_table_entry *x)
 {
-	return (unsigned long)&x->insn + x->insn;
+	unsigned long reloc = 0;
+
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
+	reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
+#endif
+
+	return (unsigned long)&x->insn + x->insn + reloc;
 }
 static inline unsigned long
 ex_fixup_addr(const struct exception_table_entry *x)
 {
-	return (unsigned long)&x->fixup + x->fixup;
+	unsigned long reloc = 0;
+
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
+	reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
+#endif
+
+	return (unsigned long)&x->fixup + x->fixup + reloc;
 }
 
 int fixup_exception(struct pt_regs *regs)
@@ -20,7 +32,7 @@ int fixup_exception(struct pt_regs *regs
 	unsigned long new_ip;
 
 #ifdef CONFIG_PNPBIOS
-	if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
+	if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
 		extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
 		extern u32 pnp_bios_is_utter_crap;
 		pnp_bios_is_utter_crap = 1;
@@ -145,6 +157,13 @@ void sort_extable(struct exception_table
 		i += 4;
 		p->fixup -= i;
 		i += 4;
+
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
+		BUILD_BUG_ON(!IS_ENABLED(CONFIG_BUILDTIME_EXTABLE_SORT));
+		p->insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
+		p->fixup -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
+#endif
+
 	}
 }
 
diff -ruNp linux-3.13.11/arch/x86/mm/fault.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/mm/fault.c
--- linux-3.13.11/arch/x86/mm/fault.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/mm/fault.c	2014-07-09 12:00:15.000000000
+0200
@@ -14,11 +14,18 @@
 #include <linux/hugetlb.h>		/* hstate_index_to_shift	*/
 #include <linux/prefetch.h>		/* prefetchw			*/
 #include <linux/context_tracking.h>	/* exception_enter(), ...	*/
+#include <linux/unistd.h>
+#include <linux/compiler.h>
 
 #include <asm/traps.h>			/* dotraplinkage, ...		*/
 #include <asm/pgalloc.h>		/* pgd_*(), ...			*/
 #include <asm/kmemcheck.h>		/* kmemcheck_*(), ...		*/
 #include <asm/fixmap.h>			/* VSYSCALL_START		*/
+#include <asm/tlbflush.h>
+
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+#include <asm/stacktrace.h>
+#endif
 
 #define CREATE_TRACE_POINTS
 #include <asm/trace/exceptions.h>
@@ -59,7 +66,7 @@ static inline int __kprobes kprobes_faul
 	int ret = 0;
 
 	/* kprobe_running() needs smp_processor_id() */
-	if (kprobes_built_in() && !user_mode_vm(regs)) {
+	if (kprobes_built_in() && !user_mode(regs)) {
 		preempt_disable();
 		if (kprobe_running() && kprobe_fault_handler(regs, 14))
 			ret = 1;
@@ -120,7 +127,10 @@ check_prefetch_opcode(struct pt_regs *re
 		return !instr_lo || (instr_lo>>1) == 1;
 	case 0x00:
 		/* Prefetch instruction is 0x0F0D or 0x0F18 */
-		if (probe_kernel_address(instr, opcode))
+		if (user_mode(regs)) {
+			if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
+				return 0;
+		} else if (probe_kernel_address(instr, opcode))
 			return 0;
 
 		*prefetch = (instr_lo == 0xF) &&
@@ -154,7 +164,10 @@ is_prefetch(struct pt_regs *regs, unsign
 	while (instr < max_instr) {
 		unsigned char opcode;
 
-		if (probe_kernel_address(instr, opcode))
+		if (user_mode(regs)) {
+			if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
+				break;
+		} else if (probe_kernel_address(instr, opcode))
 			break;
 
 		instr++;
@@ -185,6 +198,34 @@ force_sig_info_fault(int si_signo, int s
 	force_sig_info(si_signo, &info, tsk);
 }
 
+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned
long address);
+#endif
+
+#ifdef CONFIG_PAX_EMUTRAMP
+static int pax_handle_fetch_fault(struct pt_regs *regs);
+#endif
+
+#ifdef CONFIG_PAX_PAGEEXEC
+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
+{
+	pgd_t *pgd;
+	pud_t *pud;
+	pmd_t *pmd;
+
+	pgd = pgd_offset(mm, address);
+	if (!pgd_present(*pgd))
+		return NULL;
+	pud = pud_offset(pgd, address);
+	if (!pud_present(*pud))
+		return NULL;
+	pmd = pmd_offset(pud, address);
+	if (!pmd_present(*pmd))
+		return NULL;
+	return pmd;
+}
+#endif
+
 DEFINE_SPINLOCK(pgd_lock);
 LIST_HEAD(pgd_list);
 
@@ -235,10 +276,27 @@ void vmalloc_sync_all(void)
 	for (address = VMALLOC_START & PMD_MASK;
 	     address >= TASK_SIZE && address < FIXADDR_TOP;
 	     address += PMD_SIZE) {
+
+#ifdef CONFIG_PAX_PER_CPU_PGD
+		unsigned long cpu;
+#else
 		struct page *page;
+#endif
 
 		spin_lock(&pgd_lock);
+
+#ifdef CONFIG_PAX_PER_CPU_PGD
+		for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
+			pgd_t *pgd = get_cpu_pgd(cpu, user);
+			pmd_t *ret;
+
+			ret = vmalloc_sync_one(pgd, address);
+			if (!ret)
+				break;
+			pgd = get_cpu_pgd(cpu, kernel);
+#else
 		list_for_each_entry(page, &pgd_list, lru) {
+			pgd_t *pgd;
 			spinlock_t *pgt_lock;
 			pmd_t *ret;
 
@@ -246,8 +304,14 @@ void vmalloc_sync_all(void)
 			pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
 
 			spin_lock(pgt_lock);
-			ret = vmalloc_sync_one(page_address(page), address);
+			pgd = page_address(page);
+#endif
+
+			ret = vmalloc_sync_one(pgd, address);
+
+#ifndef CONFIG_PAX_PER_CPU_PGD
 			spin_unlock(pgt_lock);
+#endif
 
 			if (!ret)
 				break;
@@ -281,6 +345,12 @@ static noinline __kprobes int vmalloc_fa
 	 * an interrupt in the middle of a task switch..
 	 */
 	pgd_paddr = read_cr3();
+
+#ifdef CONFIG_PAX_PER_CPU_PGD
+	BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (pgd_paddr & __PHYSICAL_MASK));
+	vmalloc_sync_one(__va(pgd_paddr + PAGE_SIZE), address);
+#endif
+
 	pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
 	if (!pmd_k)
 		return -1;
@@ -376,11 +446,25 @@ static noinline __kprobes int vmalloc_fa
 	 * happen within a race in page table update. In the later
 	 * case just flush:
 	 */
-	pgd = pgd_offset(current->active_mm, address);
+
 	pgd_ref = pgd_offset_k(address);
 	if (pgd_none(*pgd_ref))
 		return -1;
 
+#ifdef CONFIG_PAX_PER_CPU_PGD
+	BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (read_cr3() & __PHYSICAL_MASK));
+	pgd = pgd_offset_cpu(smp_processor_id(), user, address);
+	if (pgd_none(*pgd)) {
+		set_pgd(pgd, *pgd_ref);
+		arch_flush_lazy_mmu_mode();
+	} else {
+		BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
+	}
+	pgd = pgd_offset_cpu(smp_processor_id(), kernel, address);
+#else
+	pgd = pgd_offset(current->active_mm, address);
+#endif
+
 	if (pgd_none(*pgd)) {
 		set_pgd(pgd, *pgd_ref);
 		arch_flush_lazy_mmu_mode();
@@ -546,7 +630,7 @@ static int is_errata93(struct pt_regs *r
 static int is_errata100(struct pt_regs *regs, unsigned long address)
 {
 #ifdef CONFIG_X86_64
-	if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
+	if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
 		return 1;
 #endif
 	return 0;
@@ -573,7 +657,7 @@ static int is_f00f_bug(struct pt_regs *r
 }
 
 static const char nx_warning[] = KERN_CRIT
-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid:
%d)\n";
 
 static void
 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
@@ -582,14 +666,26 @@ show_fault_oops(struct pt_regs *regs, un
 	if (!oops_may_print())
 		return;
 
-	if (error_code & PF_INSTR) {
+	if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
 		unsigned int level;
 
 		pte_t *pte = lookup_address(address, &level);
 
 		if (pte && pte_present(*pte) && !pte_exec(*pte))
-			printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
+			printk(nx_warning, from_kuid_munged(&init_user_ns, current_uid()), current->comm,
task_pid_nr(current));
+	}
+
+#ifdef CONFIG_PAX_KERNEXEC
+	if (init_mm.start_code <= address && address < init_mm.end_code) {
+		if (current->signal->curr_ip)
+			printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel
code\n",
+					&current->signal->curr_ip, current->comm, task_pid_nr(current),
+					from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns,
current_euid()));
+		else
+			printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
current->comm, task_pid_nr(current),
+					from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns,
current_euid()));
 	}
+#endif
 
 	printk(KERN_ALERT "BUG: unable to handle kernel ");
 	if (address < PAGE_SIZE)
@@ -771,6 +867,22 @@ __bad_area_nosemaphore(struct pt_regs *r
 				return;
 		}
 #endif
+
+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
+		if (pax_is_fetch_fault(regs, error_code, address)) {
+
+#ifdef CONFIG_PAX_EMUTRAMP
+			switch (pax_handle_fetch_fault(regs)) {
+			case 2:
+				return;
+			}
+#endif
+
+			pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
+			do_group_exit(SIGKILL);
+		}
+#endif
+
 		/* Kernel addresses are always protection faults: */
 		if (address >= TASK_SIZE)
 			error_code |= PF_PROT;
@@ -856,7 +968,7 @@ do_sigbus(struct pt_regs *regs, unsigned
 	if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
 		printk(KERN_ERR
 	"MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
-			tsk->comm, tsk->pid, address);
+			tsk->comm, task_pid_nr(tsk), address);
 		code = BUS_MCEERR_AR;
 	}
 #endif
@@ -910,6 +1022,99 @@ static int spurious_fault_check(unsigned
 	return 1;
 }
 
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned
long address, unsigned long error_code)
+{
+	pte_t *pte;
+	pmd_t *pmd;
+	spinlock_t *ptl;
+	unsigned char pte_mask;
+
+	if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER)
|| v8086_mode(regs) ||
+	    !(mm->pax_flags & MF_PAX_PAGEEXEC))
+		return 0;
+
+	/* PaX: it's our fault, let's handle it if we can */
+
+	/* PaX: take a look at read faults before acquiring any locks */
+	if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
+		/* instruction fetch attempt from a protected page in user mode */
+		up_read(&mm->mmap_sem);
+
+#ifdef CONFIG_PAX_EMUTRAMP
+		switch (pax_handle_fetch_fault(regs)) {
+		case 2:
+			return 1;
+		}
+#endif
+
+		pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
+		do_group_exit(SIGKILL);
+	}
+
+	pmd = pax_get_pmd(mm, address);
+	if (unlikely(!pmd))
+		return 0;
+
+	pte = pte_offset_map_lock(mm, pmd, address, &ptl);
+	if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
+		pte_unmap_unlock(pte, ptl);
+		return 0;
+	}
+
+	if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
+		/* write attempt to a protected page in user mode */
+		pte_unmap_unlock(pte, ptl);
+		return 0;
+	}
+
+#ifdef CONFIG_SMP
+	if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
+#else
+	if (likely(address > get_limit(regs->cs)))
+#endif
+	{
+		set_pte(pte, pte_mkread(*pte));
+		__flush_tlb_one(address);
+		pte_unmap_unlock(pte, ptl);
+		up_read(&mm->mmap_sem);
+		return 1;
+	}
+
+	pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
+
+	/*
+	 * PaX: fill DTLB with user rights and retry
+	 */
+	__asm__ __volatile__ (
+		"orb %2,(%1)\n"
+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
+/*
+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
+ * page fault when examined during a TLB load attempt. this is true not only
+ * for PTEs holding a non-present entry but also present entries that will
+ * raise a page fault (such as those set up by PaX, or the copy-on-write
+ * mechanism). in effect it means that we do *not* need to flush the TLBs
+ * for our target pages since their PTEs are simply not in the TLBs at all.
+
+ * the best thing in omitting it is that we gain around 15-20% speed in the
+ * fast path of the page fault handler and can get rid of tracing since we
+ * can no longer flush unintended entries.
+ */
+		"invlpg (%0)\n"
+#endif
+		__copyuser_seg"testb $0,(%0)\n"
+		"xorb %3,(%1)\n"
+		:
+		: "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
+		: "memory", "cc");
+	pte_unmap_unlock(pte, ptl);
+	up_read(&mm->mmap_sem);
+	return 1;
+}
+#endif
+
 /*
  * Handle a spurious fault caused by a stale TLB entry.
  *
@@ -976,6 +1181,9 @@ int show_unhandled_signals = 1;
 static inline int
 access_error(unsigned long error_code, struct vm_area_struct *vma)
 {
+	if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags
& VM_EXEC))
+		return 1;
+
 	if (error_code & PF_WRITE) {
 		/* write, present and write, not present: */
 		if (unlikely(!(vma->vm_flags & VM_WRITE)))
@@ -1010,7 +1218,7 @@ static inline bool smap_violation(int er
 	if (error_code & PF_USER)
 		return false;
 
-	if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
+	if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
 		return false;
 
 	return true;
@@ -1037,6 +1245,22 @@ __do_page_fault(struct pt_regs *regs, un
 	/* Get the faulting address: */
 	address = read_cr2();
 
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+	if (!user_mode(regs) && address < 2 * pax_user_shadow_base) {
+		if (!search_exception_tables(regs->ip)) {
+			printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
+			bad_area_nosemaphore(regs, error_code, address);
+			return;
+		}
+		if (address < pax_user_shadow_base) {
+			printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
+			printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
+			show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
+		} else
+			address -= pax_user_shadow_base;
+	}
+#endif
+
 	/*
 	 * Detect and handle instructions that would cause a page fault for
 	 * both a tracked kernel page and a userspace page.
@@ -1114,7 +1338,7 @@ __do_page_fault(struct pt_regs *regs, un
 	 * User-mode registers count as a user access even for any
 	 * potential system fault or CPU buglet:
 	 */
-	if (user_mode_vm(regs)) {
+	if (user_mode(regs)) {
 		local_irq_enable();
 		error_code |= PF_USER;
 		flags |= FAULT_FLAG_USER;
@@ -1161,6 +1385,11 @@ retry:
 		might_sleep();
 	}
 
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
+	if (pax_handle_pageexec_fault(regs, mm, address, error_code))
+		return;
+#endif
+
 	vma = find_vma(mm, address);
 	if (unlikely(!vma)) {
 		bad_area(regs, error_code, address);
@@ -1172,18 +1401,24 @@ retry:
 		bad_area(regs, error_code, address);
 		return;
 	}
-	if (error_code & PF_USER) {
-		/*
-		 * Accessing the stack below %sp is always a bug.
-		 * The large cushion allows instructions like enter
-		 * and pusha to work. ("enter $65535, $31" pushes
-		 * 32 pointers and then decrements %sp by 65535.)
-		 */
-		if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
-			bad_area(regs, error_code, address);
-			return;
-		}
+	/*
+	 * Accessing the stack below %sp is always a bug.
+	 * The large cushion allows instructions like enter
+	 * and pusha to work. ("enter $65535, $31" pushes
+	 * 32 pointers and then decrements %sp by 65535.)
+	 */
+	if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp))
{
+		bad_area(regs, error_code, address);
+		return;
+	}
+
+#ifdef CONFIG_PAX_SEGMEXEC
+	if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE
- 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
+		bad_area(regs, error_code, address);
+		return;
 	}
+#endif
+
 	if (unlikely(expand_stack(vma, address))) {
 		bad_area(regs, error_code, address);
 		return;
@@ -1277,3 +1512,292 @@ trace_do_page_fault(struct pt_regs *regs
 	__do_page_fault(regs, error_code);
 	exception_exit(prev_state);
 }
+
+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned
long address)
+{
+	struct mm_struct *mm = current->mm;
+	unsigned long ip = regs->ip;
+
+	if (v8086_mode(regs))
+		ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
+
+#ifdef CONFIG_PAX_PAGEEXEC
+	if (mm->pax_flags & MF_PAX_PAGEEXEC) {
+		if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
+			return true;
+		if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
+			return true;
+		return false;
+	}
+#endif
+
+#ifdef CONFIG_PAX_SEGMEXEC
+	if (mm->pax_flags & MF_PAX_SEGMEXEC) {
+		if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
+			return true;
+		return false;
+	}
+#endif
+
+	return false;
+}
+#endif
+
+#ifdef CONFIG_PAX_EMUTRAMP
+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
+{
+	int err;
+
+	do { /* PaX: libffi trampoline emulation */
+		unsigned char mov, jmp;
+		unsigned int addr1, addr2;
+
+#ifdef CONFIG_X86_64
+		if ((regs->ip + 9) >> 32)
+			break;
+#endif
+
+		err = get_user(mov, (unsigned char __user *)regs->ip);
+		err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
+		err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
+		err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
+
+		if (err)
+			break;
+
+		if (mov == 0xB8 && jmp == 0xE9) {
+			regs->ax = addr1;
+			regs->ip = (unsigned int)(regs->ip + addr2 + 10);
+			return 2;
+		}
+	} while (0);
+
+	do { /* PaX: gcc trampoline emulation #1 */
+		unsigned char mov1, mov2;
+		unsigned short jmp;
+		unsigned int addr1, addr2;
+
+#ifdef CONFIG_X86_64
+		if ((regs->ip + 11) >> 32)
+			break;
+#endif
+
+		err = get_user(mov1, (unsigned char __user *)regs->ip);
+		err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
+		err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
+		err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
+		err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
+
+		if (err)
+			break;
+
+		if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
+			regs->cx = addr1;
+			regs->ax = addr2;
+			regs->ip = addr2;
+			return 2;
+		}
+	} while (0);
+
+	do { /* PaX: gcc trampoline emulation #2 */
+		unsigned char mov, jmp;
+		unsigned int addr1, addr2;
+
+#ifdef CONFIG_X86_64
+		if ((regs->ip + 9) >> 32)
+			break;
+#endif
+
+		err = get_user(mov, (unsigned char __user *)regs->ip);
+		err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
+		err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
+		err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
+
+		if (err)
+			break;
+
+		if (mov == 0xB9 && jmp == 0xE9) {
+			regs->cx = addr1;
+			regs->ip = (unsigned int)(regs->ip + addr2 + 10);
+			return 2;
+		}
+	} while (0);
+
+	return 1; /* PaX in action */
+}
+
+#ifdef CONFIG_X86_64
+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
+{
+	int err;
+
+	do { /* PaX: libffi trampoline emulation */
+		unsigned short mov1, mov2, jmp1;
+		unsigned char stcclc, jmp2;
+		unsigned long addr1, addr2;
+
+		err = get_user(mov1, (unsigned short __user *)regs->ip);
+		err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
+		err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
+		err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
+		err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
+		err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
+		err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
+
+		if (err)
+			break;
+
+		if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1
== 0xFF49 && jmp2 == 0xE3) {
+			regs->r11 = addr1;
+			regs->r10 = addr2;
+			if (stcclc == 0xF8)
+				regs->flags &= ~X86_EFLAGS_CF;
+			else
+				regs->flags |= X86_EFLAGS_CF;
+			regs->ip = addr1;
+			return 2;
+		}
+	} while (0);
+
+	do { /* PaX: gcc trampoline emulation #1 */
+		unsigned short mov1, mov2, jmp1;
+		unsigned char jmp2;
+		unsigned int addr1;
+		unsigned long addr2;
+
+		err = get_user(mov1, (unsigned short __user *)regs->ip);
+		err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
+		err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
+		err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
+		err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
+		err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
+
+		if (err)
+			break;
+
+		if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
+			regs->r11 = addr1;
+			regs->r10 = addr2;
+			regs->ip = addr1;
+			return 2;
+		}
+	} while (0);
+
+	do { /* PaX: gcc trampoline emulation #2 */
+		unsigned short mov1, mov2, jmp1;
+		unsigned char jmp2;
+		unsigned long addr1, addr2;
+
+		err = get_user(mov1, (unsigned short __user *)regs->ip);
+		err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
+		err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
+		err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
+		err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
+		err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
+
+		if (err)
+			break;
+
+		if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
+			regs->r11 = addr1;
+			regs->r10 = addr2;
+			regs->ip = addr1;
+			return 2;
+		}
+	} while (0);
+
+	return 1; /* PaX in action */
+}
+#endif
+
+/*
+ * PaX: decide what to do with offenders (regs->ip = fault address)
+ *
+ * returns 1 when task should be killed
+ *         2 when gcc trampoline was detected
+ */
+static int pax_handle_fetch_fault(struct pt_regs *regs)
+{
+	if (v8086_mode(regs))
+		return 1;
+
+	if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
+		return 1;
+
+#ifdef CONFIG_X86_32
+	return pax_handle_fetch_fault_32(regs);
+#else
+	if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
+		return pax_handle_fetch_fault_32(regs);
+	else
+		return pax_handle_fetch_fault_64(regs);
+#endif
+}
+#endif
+
+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
+{
+	long i;
+
+	printk(KERN_ERR "PAX: bytes at PC: ");
+	for (i = 0; i < 20; i++) {
+		unsigned char c;
+		if (get_user(c, (unsigned char __force_user *)pc+i))
+			printk(KERN_CONT "?? ");
+		else
+			printk(KERN_CONT "%02x ", c);
+	}
+	printk("\n");
+
+	printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
+	for (i = -1; i < 80 / (long)sizeof(long); i++) {
+		unsigned long c;
+		if (get_user(c, (unsigned long __force_user *)sp+i)) {
+#ifdef CONFIG_X86_32
+			printk(KERN_CONT "???????? ");
+#else
+			if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
+				printk(KERN_CONT "???????? ???????? ");
+			else
+				printk(KERN_CONT "???????????????? ");
+#endif
+		} else {
+#ifdef CONFIG_X86_64
+			if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
+				printk(KERN_CONT "%08x ", (unsigned int)c);
+				printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
+			} else
+#endif
+				printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
+		}
+	}
+	printk("\n");
+}
+#endif
+
+/**
+ * probe_kernel_write(): safely attempt to write to a location
+ * @dst: address to write to
+ * @src: pointer to the data that shall be written
+ * @size: size of the data chunk
+ *
+ * Safely write to address @dst from the buffer at @src.  If a kernel fault
+ * happens, handle that and return -EFAULT.
+ */
+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
+{
+	long ret;
+	mm_segment_t old_fs = get_fs();
+
+	set_fs(KERNEL_DS);
+	pagefault_disable();
+	pax_open_kernel();
+	ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
+	pax_close_kernel();
+	pagefault_enable();
+	set_fs(old_fs);
+
+	return ret ? -EFAULT : 0;
+}
diff -ruNp linux-3.13.11/arch/x86/mm/gup.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/mm/gup.c
--- linux-3.13.11/arch/x86/mm/gup.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/mm/gup.c	2014-07-09 12:00:15.000000000
+0200
@@ -268,7 +268,7 @@ int __get_user_pages_fast(unsigned long
 	addr = start;
 	len = (unsigned long) nr_pages << PAGE_SHIFT;
 	end = start + len;
-	if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
+	if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
 					(void __user *)start, len)))
 		return 0;
 
@@ -344,6 +344,10 @@ int get_user_pages_fast(unsigned long st
 		goto slow_irqon;
 #endif
 
+	if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
+					(void __user *)start, len)))
+		return 0;
+
 	/*
 	 * XXX: batch / limit 'nr', to avoid large irq off latency
 	 * needs some instrumenting to determine the common sizes used by
diff -ruNp linux-3.13.11/arch/x86/mm/highmem_32.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/mm/highmem_32.c
--- linux-3.13.11/arch/x86/mm/highmem_32.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/mm/highmem_32.c	2014-07-09
12:00:15.000000000 +0200
@@ -45,7 +45,11 @@ void *kmap_atomic_prot(struct page *page
 	idx = type + KM_TYPE_NR*smp_processor_id();
 	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
 	BUG_ON(!pte_none(*(kmap_pte-idx)));
+
+	pax_open_kernel();
 	set_pte(kmap_pte-idx, mk_pte(page, prot));
+	pax_close_kernel();
+
 	arch_flush_lazy_mmu_mode();
 
 	return (void *)vaddr;
diff -ruNp linux-3.13.11/arch/x86/mm/hugetlbpage.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/mm/hugetlbpage.c
--- linux-3.13.11/arch/x86/mm/hugetlbpage.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/mm/hugetlbpage.c	2014-07-09
12:00:15.000000000 +0200
@@ -92,23 +92,30 @@ int pmd_huge_support(void)
 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
 		unsigned long addr, unsigned long len,
-		unsigned long pgoff, unsigned long flags)
+		unsigned long pgoff, unsigned long flags, unsigned long offset)
 {
 	struct hstate *h = hstate_file(file);
 	struct vm_unmapped_area_info info;
-
+	
 	info.flags = 0;
 	info.length = len;
 	info.low_limit = TASK_UNMAPPED_BASE;
+
+#ifdef CONFIG_PAX_RANDMMAP
+	if (current->mm->pax_flags & MF_PAX_RANDMMAP)
+		info.low_limit += current->mm->delta_mmap;
+#endif
+
 	info.high_limit = TASK_SIZE;
 	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
 	info.align_offset = 0;
+	info.threadstack_offset = offset;
 	return vm_unmapped_area(&info);
 }
 
 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
 		unsigned long addr0, unsigned long len,
-		unsigned long pgoff, unsigned long flags)
+		unsigned long pgoff, unsigned long flags, unsigned long offset)
 {
 	struct hstate *h = hstate_file(file);
 	struct vm_unmapped_area_info info;
@@ -120,6 +127,7 @@ static unsigned long hugetlb_get_unmappe
 	info.high_limit = current->mm->mmap_base;
 	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
 	info.align_offset = 0;
+	info.threadstack_offset = offset;
 	addr = vm_unmapped_area(&info);
 
 	/*
@@ -132,6 +140,12 @@ static unsigned long hugetlb_get_unmappe
 		VM_BUG_ON(addr != -ENOMEM);
 		info.flags = 0;
 		info.low_limit = TASK_UNMAPPED_BASE;
+
+#ifdef CONFIG_PAX_RANDMMAP
+		if (current->mm->pax_flags & MF_PAX_RANDMMAP)
+			info.low_limit += current->mm->delta_mmap;
+#endif
+
 		info.high_limit = TASK_SIZE;
 		addr = vm_unmapped_area(&info);
 	}
@@ -146,10 +160,20 @@ hugetlb_get_unmapped_area(struct file *f
 	struct hstate *h = hstate_file(file);
 	struct mm_struct *mm = current->mm;
 	struct vm_area_struct *vma;
+	unsigned long pax_task_size = TASK_SIZE;
+	unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
 
 	if (len & ~huge_page_mask(h))
 		return -EINVAL;
-	if (len > TASK_SIZE)
+
+#ifdef CONFIG_PAX_SEGMEXEC
+	if (mm->pax_flags & MF_PAX_SEGMEXEC)
+		pax_task_size = SEGMEXEC_TASK_SIZE;
+#endif
+
+	pax_task_size -= PAGE_SIZE;
+
+	if (len > pax_task_size)
 		return -ENOMEM;
 
 	if (flags & MAP_FIXED) {
@@ -158,19 +182,22 @@ hugetlb_get_unmapped_area(struct file *f
 		return addr;
 	}
 
+#ifdef CONFIG_PAX_RANDMMAP
+	if (!(mm->pax_flags & MF_PAX_RANDMMAP))
+#endif
+
 	if (addr) {
 		addr = ALIGN(addr, huge_page_size(h));
 		vma = find_vma(mm, addr);
-		if (TASK_SIZE - len >= addr &&
-		    (!vma || addr + len <= vma->vm_start))
+		if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
 			return addr;
 	}
 	if (mm->get_unmapped_area == arch_get_unmapped_area)
 		return hugetlb_get_unmapped_area_bottomup(file, addr, len,
-				pgoff, flags);
+				pgoff, flags, offset);
 	else
 		return hugetlb_get_unmapped_area_topdown(file, addr, len,
-				pgoff, flags);
+				pgoff, flags, offset);
 }
 
 #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/
diff -ruNp linux-3.13.11/arch/x86/mm/init.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/mm/init.c
--- linux-3.13.11/arch/x86/mm/init.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/mm/init.c	2014-07-09 12:00:15.000000000
+0200
@@ -4,6 +4,7 @@
 #include <linux/swap.h>
 #include <linux/memblock.h>
 #include <linux/bootmem.h>	/* for max_low_pfn */
+#include <linux/tboot.h>
 
 #include <asm/cacheflush.h>
 #include <asm/e820.h>
@@ -17,6 +18,8 @@
 #include <asm/proto.h>
 #include <asm/dma.h>		/* for MAX_DMA_PFN */
 #include <asm/microcode.h>
+#include <asm/desc.h>
+#include <asm/bios_ebda.h>
 
 #include "mm_internal.h"
 
@@ -563,7 +566,18 @@ void __init init_mem_mapping(void)
 	early_ioremap_page_table_range_init();
 #endif
 
+#ifdef CONFIG_PAX_PER_CPU_PGD
+	clone_pgd_range(get_cpu_pgd(0, kernel) + KERNEL_PGD_BOUNDARY,
+			swapper_pg_dir + KERNEL_PGD_BOUNDARY,
+			KERNEL_PGD_PTRS);
+	clone_pgd_range(get_cpu_pgd(0, user) + KERNEL_PGD_BOUNDARY,
+			swapper_pg_dir + KERNEL_PGD_BOUNDARY,
+			KERNEL_PGD_PTRS);
+	load_cr3(get_cpu_pgd(0, kernel));
+#else
 	load_cr3(swapper_pg_dir);
+#endif
+
 	__flush_tlb_all();
 
 	early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
@@ -579,10 +593,40 @@ void __init init_mem_mapping(void)
  * Access has to be given to non-kernel-ram areas as well, these contain the PCI
  * mmio resources as well as potential bios/acpi data regions.
  */
+
+#ifdef CONFIG_GRKERNSEC_KMEM
+static unsigned int ebda_start __read_only;
+static unsigned int ebda_end __read_only;
+#endif
+
 int devmem_is_allowed(unsigned long pagenr)
 {
-	if (pagenr < 256)
+#ifdef CONFIG_GRKERNSEC_KMEM
+	/* allow BDA */
+	if (!pagenr)
+		return 1;
+	/* allow EBDA */
+	if (pagenr >= ebda_start && pagenr < ebda_end)
 		return 1;
+	/* if tboot is in use, allow access to its hardcoded serial log range */
+	if (tboot_enabled() && ((0x60000 >> PAGE_SHIFT) <= pagenr) && (pagenr < (0x68000 >>
PAGE_SHIFT)))
+		return 1;
+#else
+	if (!pagenr)
+		return 1;
+#ifdef CONFIG_VM86
+	if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
+		return 1;
+#endif
+#endif
+
+	if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
+		return 1;
+#ifdef CONFIG_GRKERNSEC_KMEM
+	/* throw out everything else below 1MB */
+	if (pagenr <= 256)
+		return 0;
+#endif
 	if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
 		return 0;
 	if (!page_is_ram(pagenr))
@@ -628,8 +672,117 @@ void free_init_pages(char *what, unsigne
 #endif
 }
 
+#ifdef CONFIG_GRKERNSEC_KMEM
+static inline void gr_init_ebda(void)
+{
+	unsigned int ebda_addr;
+	unsigned int ebda_size = 0;
+
+	ebda_addr = get_bios_ebda();
+	if (ebda_addr) {
+		ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
+		ebda_size <<= 10;
+	}
+	if (ebda_addr && ebda_size) {
+		ebda_start = ebda_addr >> PAGE_SHIFT;
+		ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000)
>> PAGE_SHIFT;
+	} else {
+		ebda_start = 0x9f000 >> PAGE_SHIFT;
+		ebda_end = 0xa0000 >> PAGE_SHIFT;
+	}
+}
+#else
+static inline void gr_init_ebda(void) { }
+#endif
+
 void free_initmem(void)
 {
+#ifdef CONFIG_PAX_KERNEXEC
+#ifdef CONFIG_X86_32
+	/* PaX: limit KERNEL_CS to actual size */
+	unsigned long addr, limit;
+	struct desc_struct d;
+	int cpu;
+#else
+	pgd_t *pgd;
+	pud_t *pud;
+	pmd_t *pmd;
+	unsigned long addr, end;
+#endif
+#endif
+
+	gr_init_ebda();
+
+#ifdef CONFIG_PAX_KERNEXEC
+#ifdef CONFIG_X86_32
+	limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
+	limit = (limit - 1UL) >> PAGE_SHIFT;
+
+	memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
+	for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
+		pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]),
limit, 0x9B, 0xC);
+		write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
+		write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S);
+	}
+
+	/* PaX: make KERNEL_CS read-only */
+	addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
+	if (!paravirt_enabled())
+		set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
+/*
+		for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr
+= PMD_SIZE) {
+			pgd = pgd_offset_k(addr);
+			pud = pud_offset(pgd, addr);
+			pmd = pmd_offset(pud, addr);
+			set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
+		}
+*/
+#ifdef CONFIG_X86_PAE
+	set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin))
>> PAGE_SHIFT);
+/*
+	for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr
+= PMD_SIZE) {
+		pgd = pgd_offset_k(addr);
+		pud = pud_offset(pgd, addr);
+		pmd = pmd_offset(pud, addr);
+	set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
+	}
+*/
+#endif
+
+#ifdef CONFIG_MODULES
+	set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR)
>> PAGE_SHIFT);
+#endif
+
+#else
+	/* PaX: make kernel code/rodata read-only, rest non-executable */
+	for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr
+= PMD_SIZE) {
+		pgd = pgd_offset_k(addr);
+		pud = pud_offset(pgd, addr);
+		pmd = pmd_offset(pud, addr);
+		if (!pmd_present(*pmd))
+			continue;
+		if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
+			set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
+		else
+			set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
+	}
+
+	addr = (unsigned long)__va(__pa(__START_KERNEL_map));
+	end = addr + KERNEL_IMAGE_SIZE;
+	for (; addr < end; addr += PMD_SIZE) {
+	pgd = pgd_offset_k(addr);
+		pud = pud_offset(pgd, addr);
+		pmd = pmd_offset(pud, addr);
+		if (!pmd_present(*pmd))
+			continue;
+		if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
+			set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
+	}
+#endif
+
+	flush_tlb_all();
+#endif
+
 	free_init_pages("unused kernel",
 			(unsigned long)(&__init_begin),
 			(unsigned long)(&__init_end));
diff -ruNp linux-3.13.11/arch/x86/mm/init_32.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/mm/init_32.c
--- linux-3.13.11/arch/x86/mm/init_32.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/mm/init_32.c	2014-07-09
12:00:15.000000000 +0200
@@ -62,33 +62,6 @@ static noinline int do_test_wp_bit(void)
 bool __read_mostly __vmalloc_start_set = false;
 
 /*
- * Creates a middle page table and puts a pointer to it in the
- * given global directory entry. This only returns the gd entry
- * in non-PAE compilation mode, since the middle layer is folded.
- */
-static pmd_t * __init one_md_table_init(pgd_t *pgd)
-{
-	pud_t *pud;
-	pmd_t *pmd_table;
-
-#ifdef CONFIG_X86_PAE
-	if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
-		pmd_table = (pmd_t *)alloc_low_page();
-		paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
-		set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
-		pud = pud_offset(pgd, 0);
-		BUG_ON(pmd_table != pmd_offset(pud, 0));
-
-		return pmd_table;
-	}
-#endif
-	pud = pud_offset(pgd, 0);
-	pmd_table = pmd_offset(pud, 0);
-
-	return pmd_table;
-}
-
-/*
  * Create a page table and place a pointer to it in a middle page
  * directory entry:
  */
@@ -98,13 +71,28 @@ static pte_t * __init one_page_table_ini
 		pte_t *page_table = (pte_t *)alloc_low_page();
 
 		paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
+		set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
+#else
 		set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
+#endif
 		BUG_ON(page_table != pte_offset_kernel(pmd, 0));
 	}
 
 	return pte_offset_kernel(pmd, 0);
 }
 
+static pmd_t * __init one_md_table_init(pgd_t *pgd)
+{
+	pud_t *pud;
+	pmd_t *pmd_table;
+
+	pud = pud_offset(pgd, 0);
+	pmd_table = pmd_offset(pud, 0);
+
+	return pmd_table;
+}
+
 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
 {
 	int pgd_idx = pgd_index(vaddr);
@@ -208,6 +196,7 @@ page_table_range_init(unsigned long star
 	int pgd_idx, pmd_idx;
 	unsigned long vaddr;
 	pgd_t *pgd;
+	pud_t *pud;
 	pmd_t *pmd;
 	pte_t *pte = NULL;
 	unsigned long count = page_table_range_init_count(start, end);
@@ -222,8 +211,13 @@ page_table_range_init(unsigned long star
 	pgd = pgd_base + pgd_idx;
 
 	for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
-		pmd = one_md_table_init(pgd);
-		pmd = pmd + pmd_index(vaddr);
+		pud = pud_offset(pgd, vaddr);
+		pmd = pmd_offset(pud, vaddr);
+
+#ifdef CONFIG_X86_PAE
+		paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
+#endif
+
 		for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
 							pmd++, pmd_idx++) {
 			pte = page_table_kmap_check(one_page_table_init(pmd),
@@ -235,11 +229,20 @@ page_table_range_init(unsigned long star
 	}
 }
 
-static inline int is_kernel_text(unsigned long addr)
+static inline int is_kernel_text(unsigned long start, unsigned long end)
 {
-	if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
-		return 1;
-	return 0;
+	if ((start >= ktla_ktva((unsigned long)_etext) ||
+	     end <= ktla_ktva((unsigned long)_stext)) &&
+	    (start >= ktla_ktva((unsigned long)_einittext) ||
+	     end <= ktla_ktva((unsigned long)_sinittext)) &&
+
+#ifdef CONFIG_ACPI_SLEEP
+	    (start >= (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned
long)__va(acpi_wakeup_address)) &&
+#endif
+
+	    (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
+		return 0;
+	return 1;
 }
 
 /*
@@ -256,9 +259,10 @@ kernel_physical_mapping_init(unsigned lo
 	unsigned long last_map_addr = end;
 	unsigned long start_pfn, end_pfn;
 	pgd_t *pgd_base = swapper_pg_dir;
-	int pgd_idx, pmd_idx, pte_ofs;
+	unsigned int pgd_idx, pmd_idx, pte_ofs;
 	unsigned long pfn;
 	pgd_t *pgd;
+	pud_t *pud;
 	pmd_t *pmd;
 	pte_t *pte;
 	unsigned pages_2m, pages_4k;
@@ -291,8 +295,13 @@ repeat:
 	pfn = start_pfn;
 	pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
 	pgd = pgd_base + pgd_idx;
-	for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
-		pmd = one_md_table_init(pgd);
+	for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
+		pud = pud_offset(pgd, 0);
+		pmd = pmd_offset(pud, 0);
+
+#ifdef CONFIG_X86_PAE
+		paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
+#endif
 
 		if (pfn >= end_pfn)
 			continue;
@@ -304,14 +313,13 @@ repeat:
 #endif
 		for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
 		     pmd++, pmd_idx++) {
-			unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
+			unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
 
 			/*
 			 * Map with big pages if possible, otherwise
 			 * create normal page tables:
 			 */
 			if (use_pse) {
-				unsigned int addr2;
 				pgprot_t prot = PAGE_KERNEL_LARGE;
 				/*
 				 * first pass will use the same initial
@@ -322,11 +330,7 @@ repeat:
 						 _PAGE_PSE);
 
 				pfn &= PMD_MASK >> PAGE_SHIFT;
-				addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
-					PAGE_OFFSET + PAGE_SIZE-1;
-
-				if (is_kernel_text(addr) ||
-				    is_kernel_text(addr2))
+				if (is_kernel_text(address, address + PMD_SIZE))
 					prot = PAGE_KERNEL_LARGE_EXEC;
 
 				pages_2m++;
@@ -343,7 +347,7 @@ repeat:
 			pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
 			pte += pte_ofs;
 			for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
-			     pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
+			     pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
 				pgprot_t prot = PAGE_KERNEL;
 				/*
 				 * first pass will use the same initial
@@ -351,7 +355,7 @@ repeat:
 				 */
 				pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
 
-				if (is_kernel_text(addr))
+				if (is_kernel_text(address, address + PAGE_SIZE))
 					prot = PAGE_KERNEL_EXEC;
 
 				pages_4k++;
@@ -474,7 +478,7 @@ void __init native_pagetable_init(void)
 
 		pud = pud_offset(pgd, va);
 		pmd = pmd_offset(pud, va);
-		if (!pmd_present(*pmd))
+		if (!pmd_present(*pmd)) // PAX TODO || pmd_large(*pmd))
 			break;
 
 		/* should not be large page here */
@@ -532,12 +536,10 @@ void __init early_ioremap_page_table_ran
 
 static void __init pagetable_init(void)
 {
-	pgd_t *pgd_base = swapper_pg_dir;
-
-	permanent_kmaps_init(pgd_base);
+	permanent_kmaps_init(swapper_pg_dir);
 }
 
-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
 EXPORT_SYMBOL_GPL(__supported_pte_mask);
 
 /* user-defined highmem size */
@@ -787,10 +789,10 @@ void __init mem_init(void)
 		((unsigned long)&__init_end -
 		 (unsigned long)&__init_begin) >> 10,
 
-		(unsigned long)&_etext, (unsigned long)&_edata,
-		((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
+		(unsigned long)&_sdata, (unsigned long)&_edata,
+		((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
 
-		(unsigned long)&_text, (unsigned long)&_etext,
+		ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
 		((unsigned long)&_etext - (unsigned long)&_text) >> 10);
 
 	/*
@@ -880,6 +882,7 @@ void set_kernel_text_rw(void)
 	if (!kernel_set_to_readonly)
 		return;
 
+	start = ktla_ktva(start);
 	pr_debug("Set kernel text: %lx - %lx for read write\n",
 		 start, start+size);
 
@@ -894,6 +897,7 @@ void set_kernel_text_ro(void)
 	if (!kernel_set_to_readonly)
 		return;
 
+	start = ktla_ktva(start);
 	pr_debug("Set kernel text: %lx - %lx for read only\n",
 		 start, start+size);
 
@@ -922,6 +926,7 @@ void mark_rodata_ro(void)
 	unsigned long start = PFN_ALIGN(_text);
 	unsigned long size = PFN_ALIGN(_etext) - start;
 
+	start = ktla_ktva(start);
 	set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
 	printk(KERN_INFO "Write protecting the kernel text: %luk\n",
 		size >> 10);
diff -ruNp linux-3.13.11/arch/x86/mm/init_64.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/mm/init_64.c
--- linux-3.13.11/arch/x86/mm/init_64.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/mm/init_64.c	2014-07-09
12:00:15.000000000 +0200
@@ -151,7 +151,7 @@ early_param("gbpages", parse_direct_gbpa
  * around without checking the pgd every time.
  */
 
-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
 EXPORT_SYMBOL_GPL(__supported_pte_mask);
 
 int force_personality32;
@@ -184,12 +184,29 @@ void sync_global_pgds(unsigned long star
 
 	for (address = start; address <= end; address += PGDIR_SIZE) {
 		const pgd_t *pgd_ref = pgd_offset_k(address);
+
+#ifdef CONFIG_PAX_PER_CPU_PGD
+		unsigned long cpu;
+#else
 		struct page *page;
+#endif
 
 		if (pgd_none(*pgd_ref))
 			continue;
 
 		spin_lock(&pgd_lock);
+
+#ifdef CONFIG_PAX_PER_CPU_PGD
+		for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
+			pgd_t *pgd = pgd_offset_cpu(cpu, user, address);
+
+			if (pgd_none(*pgd))
+				set_pgd(pgd, *pgd_ref);
+			else
+				BUG_ON(pgd_page_vaddr(*pgd)
+				       != pgd_page_vaddr(*pgd_ref));
+			pgd = pgd_offset_cpu(cpu, kernel, address);
+#else
 		list_for_each_entry(page, &pgd_list, lru) {
 			pgd_t *pgd;
 			spinlock_t *pgt_lock;
@@ -198,6 +215,7 @@ void sync_global_pgds(unsigned long star
 			/* the pgt_lock only for Xen */
 			pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
 			spin_lock(pgt_lock);
+#endif
 
 			if (pgd_none(*pgd))
 				set_pgd(pgd, *pgd_ref);
@@ -205,7 +223,10 @@ void sync_global_pgds(unsigned long star
 				BUG_ON(pgd_page_vaddr(*pgd)
 				       != pgd_page_vaddr(*pgd_ref));
 
+#ifndef CONFIG_PAX_PER_CPU_PGD
 			spin_unlock(pgt_lock);
+#endif
+
 		}
 		spin_unlock(&pgd_lock);
 	}
@@ -238,7 +259,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsig
 {
 	if (pgd_none(*pgd)) {
 		pud_t *pud = (pud_t *)spp_getpage();
-		pgd_populate(&init_mm, pgd, pud);
+		pgd_populate_kernel(&init_mm, pgd, pud);
 		if (pud != pud_offset(pgd, 0))
 			printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
 			       pud, pud_offset(pgd, 0));
@@ -250,7 +271,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsig
 {
 	if (pud_none(*pud)) {
 		pmd_t *pmd = (pmd_t *) spp_getpage();
-		pud_populate(&init_mm, pud, pmd);
+		pud_populate_kernel(&init_mm, pud, pmd);
 		if (pmd != pmd_offset(pud, 0))
 			printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
 			       pmd, pmd_offset(pud, 0));
@@ -279,7 +300,9 @@ void set_pte_vaddr_pud(pud_t *pud_page,
 	pmd = fill_pmd(pud, vaddr);
 	pte = fill_pte(pmd, vaddr);
 
+	pax_open_kernel();
 	set_pte(pte, new_pte);
+	pax_close_kernel();
 
 	/*
 	 * It's enough to flush this one mapping.
@@ -338,14 +361,12 @@ static void __init __init_extra_mapping(
 		pgd = pgd_offset_k((unsigned long)__va(phys));
 		if (pgd_none(*pgd)) {
 			pud = (pud_t *) spp_getpage();
-			set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
-						_PAGE_USER));
+			set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
 		}
 		pud = pud_offset(pgd, (unsigned long)__va(phys));
 		if (pud_none(*pud)) {
 			pmd = (pmd_t *) spp_getpage();
-			set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
-						_PAGE_USER));
+			set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
 		}
 		pmd = pmd_offset(pud, phys);
 		BUG_ON(!pmd_none(*pmd));
@@ -586,7 +607,7 @@ phys_pud_init(pud_t *pud_page, unsigned
 					      prot);
 
 		spin_lock(&init_mm.page_table_lock);
-		pud_populate(&init_mm, pud, pmd);
+		pud_populate_kernel(&init_mm, pud, pmd);
 		spin_unlock(&init_mm.page_table_lock);
 	}
 	__flush_tlb_all();
@@ -627,7 +648,7 @@ kernel_physical_mapping_init(unsigned lo
 						 page_size_mask);
 
 		spin_lock(&init_mm.page_table_lock);
-		pgd_populate(&init_mm, pgd, pud);
+		pgd_populate_kernel(&init_mm, pgd, pud);
 		spin_unlock(&init_mm.page_table_lock);
 		pgd_changed = true;
 	}
@@ -1188,8 +1209,8 @@ int kern_addr_valid(unsigned long addr)
 static struct vm_area_struct gate_vma = {
 	.vm_start	= VSYSCALL_START,
 	.vm_end		= VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
-	.vm_page_prot	= PAGE_READONLY_EXEC,
-	.vm_flags	= VM_READ | VM_EXEC
+	.vm_page_prot	= PAGE_READONLY,
+	.vm_flags	= VM_READ
 };
 
 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
@@ -1223,7 +1244,7 @@ int in_gate_area_no_mm(unsigned long add
 
 const char *arch_vma_name(struct vm_area_struct *vma)
 {
-	if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
+	if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
 		return "[vdso]";
 	if (vma == &gate_vma)
 		return "[vsyscall]";
diff -ruNp linux-3.13.11/arch/x86/mm/iomap_32.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/mm/iomap_32.c
--- linux-3.13.11/arch/x86/mm/iomap_32.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/mm/iomap_32.c	2014-07-09
12:00:15.000000000 +0200
@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long
 	type = kmap_atomic_idx_push();
 	idx = type + KM_TYPE_NR * smp_processor_id();
 	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+
+	pax_open_kernel();
 	set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
+	pax_close_kernel();
+
 	arch_flush_lazy_mmu_mode();
 
 	return (void *)vaddr;
diff -ruNp linux-3.13.11/arch/x86/mm/ioremap.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/mm/ioremap.c
--- linux-3.13.11/arch/x86/mm/ioremap.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/mm/ioremap.c	2014-07-09
12:00:15.000000000 +0200
@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(re
 	for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
 		int is_ram = page_is_ram(pfn);
 
-		if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
+		if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
 			return NULL;
 		WARN_ON_ONCE(is_ram);
 	}
@@ -256,7 +256,7 @@ EXPORT_SYMBOL(ioremap_prot);
  *
  * Caller must ensure there is only one unmapping for the same pointer.
  */
-void iounmap(volatile void __iomem *addr)
+void iounmap(const volatile void __iomem *addr)
 {
 	struct vm_struct *p, *o;
 
@@ -310,6 +310,9 @@ void *xlate_dev_mem_ptr(unsigned long ph
 
 	/* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
 	if (page_is_ram(start >> PAGE_SHIFT))
+#ifdef CONFIG_HIGHMEM
+	if ((start >> PAGE_SHIFT) < max_low_pfn)
+#endif
 		return __va(phys);
 
 	addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
@@ -322,6 +325,9 @@ void *xlate_dev_mem_ptr(unsigned long ph
 void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
 {
 	if (page_is_ram(phys >> PAGE_SHIFT))
+#ifdef CONFIG_HIGHMEM
+	if ((phys >> PAGE_SHIFT) < max_low_pfn)
+#endif
 		return;
 
 	iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
@@ -339,7 +345,7 @@ static int __init early_ioremap_debug_se
 early_param("early_ioremap_debug", early_ioremap_debug_setup);
 
 static __initdata int after_paging_init;
-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
 
 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
 {
@@ -376,8 +382,7 @@ void __init early_ioremap_init(void)
 		slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
 
 	pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
-	memset(bm_pte, 0, sizeof(bm_pte));
-	pmd_populate_kernel(&init_mm, pmd, bm_pte);
+	pmd_populate_user(&init_mm, pmd, bm_pte);
 
 	/*
 	 * The boot-ioremap range spans multiple pmds, for which
diff -ruNp linux-3.13.11/arch/x86/mm/kmemcheck/kmemcheck.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/mm/kmemcheck/kmemcheck.c
--- linux-3.13.11/arch/x86/mm/kmemcheck/kmemcheck.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/mm/kmemcheck/kmemcheck.c	2014-07-09
12:00:15.000000000 +0200
@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg
 	 * memory (e.g. tracked pages)? For now, we need this to avoid
 	 * invoking kmemcheck for PnP BIOS calls.
 	 */
-	if (regs->flags & X86_VM_MASK)
+	if (v8086_mode(regs))
 		return false;
-	if (regs->cs != __KERNEL_CS)
+	if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
 		return false;
 
 	pte = kmemcheck_pte_lookup(address);
diff -ruNp linux-3.13.11/arch/x86/mm/mmap.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/mm/mmap.c
--- linux-3.13.11/arch/x86/mm/mmap.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/mm/mmap.c	2014-07-09 12:00:15.000000000
+0200
@@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size
  * Leave an at least ~128 MB hole with possible stack randomization.
  */
 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
-#define MAX_GAP (TASK_SIZE/6*5)
+#define MAX_GAP (pax_task_size/6*5)
 
 static int mmap_is_legacy(void)
 {
@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
 	return rnd << PAGE_SHIFT;
 }
 
-static unsigned long mmap_base(void)
+static unsigned long mmap_base(struct mm_struct *mm)
 {
 	unsigned long gap = rlimit(RLIMIT_STACK);
+	unsigned long pax_task_size = TASK_SIZE;
+
+#ifdef CONFIG_PAX_SEGMEXEC
+	if (mm->pax_flags & MF_PAX_SEGMEXEC)
+		pax_task_size = SEGMEXEC_TASK_SIZE;
+#endif
 
 	if (gap < MIN_GAP)
 		gap = MIN_GAP;
 	else if (gap > MAX_GAP)
 		gap = MAX_GAP;
 
-	return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
+	return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
 }
 
 /*
  * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
  * does, but not when emulating X86_32
  */
-static unsigned long mmap_legacy_base(void)
+static unsigned long mmap_legacy_base(struct mm_struct *mm)
 {
-	if (mmap_is_ia32())
+	if (mmap_is_ia32()) {
+
+#ifdef CONFIG_PAX_SEGMEXEC
+		if (mm->pax_flags & MF_PAX_SEGMEXEC)
+			return SEGMEXEC_TASK_UNMAPPED_BASE;
+		else
+#endif
+
 		return TASK_UNMAPPED_BASE;
-	else
+	} else
 		return TASK_UNMAPPED_BASE + mmap_rnd();
 }
 
@@ -112,8 +125,15 @@ static unsigned long mmap_legacy_base(vo
  */
 void arch_pick_mmap_layout(struct mm_struct *mm)
 {
-	mm->mmap_legacy_base = mmap_legacy_base();
-	mm->mmap_base = mmap_base();
+	mm->mmap_legacy_base = mmap_legacy_base(mm);
+	mm->mmap_base = mmap_base(mm);
+
+#ifdef CONFIG_PAX_RANDMMAP
+	if (mm->pax_flags & MF_PAX_RANDMMAP) {
+		mm->mmap_legacy_base += mm->delta_mmap;
+		mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
+	}
+#endif
 
 	if (mmap_is_legacy()) {
 		mm->mmap_base = mm->mmap_legacy_base;
diff -ruNp linux-3.13.11/arch/x86/mm/mmio-mod.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/mm/mmio-mod.c
--- linux-3.13.11/arch/x86/mm/mmio-mod.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/mm/mmio-mod.c	2014-07-09
12:00:15.000000000 +0200
@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, s
 		break;
 	default:
 		{
-			unsigned char *ip = (unsigned char *)instptr;
+			unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
 			my_trace->opcode = MMIO_UNKNOWN_OP;
 			my_trace->width = 0;
 			my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p,
 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
 							void __iomem *addr)
 {
-	static atomic_t next_id;
+	static atomic_unchecked_t next_id;
 	struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
 	/* These are page-unaligned. */
 	struct mmiotrace_map map = {
@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_
 			.private = trace
 		},
 		.phys = offset,
-		.id = atomic_inc_return(&next_id)
+		.id = atomic_inc_return_unchecked(&next_id)
 	};
 	map.map_id = trace->id;
 
@@ -290,7 +290,7 @@ void mmiotrace_ioremap(resource_size_t o
 	ioremap_trace_core(offset, size, addr);
 }
 
-static void iounmap_trace_core(volatile void __iomem *addr)
+static void iounmap_trace_core(const volatile void __iomem *addr)
 {
 	struct mmiotrace_map map = {
 		.phys = 0,
@@ -328,7 +328,7 @@ not_enabled:
 	}
 }
 
-void mmiotrace_iounmap(volatile void __iomem *addr)
+void mmiotrace_iounmap(const volatile void __iomem *addr)
 {
 	might_sleep();
 	if (is_enabled()) /* recheck and proper locking in *_core() */
diff -ruNp linux-3.13.11/arch/x86/mm/numa.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/mm/numa.c
--- linux-3.13.11/arch/x86/mm/numa.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/mm/numa.c	2014-07-09 12:00:15.000000000
+0200
@@ -474,7 +474,7 @@ static bool __init numa_meminfo_cover_me
 	return true;
 }
 
-static int __init numa_register_memblks(struct numa_meminfo *mi)
+static int __init __intentional_overflow(-1) numa_register_memblks(struct numa_meminfo
*mi)
 {
 	unsigned long uninitialized_var(pfn_align);
 	int i, nid;
diff -ruNp linux-3.13.11/arch/x86/mm/pageattr-test.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/mm/pageattr-test.c
--- linux-3.13.11/arch/x86/mm/pageattr-test.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/mm/pageattr-test.c	2014-07-09
12:00:15.000000000 +0200
@@ -36,7 +36,7 @@ enum {
 
 static int pte_testbit(pte_t pte)
 {
-	return pte_flags(pte) & _PAGE_UNUSED1;
+	return pte_flags(pte) & _PAGE_CPA_TEST;
 }
 
 struct split_state {
diff -ruNp linux-3.13.11/arch/x86/mm/pageattr.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/mm/pageattr.c
--- linux-3.13.11/arch/x86/mm/pageattr.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/mm/pageattr.c	2014-07-09
12:00:15.000000000 +0200
@@ -261,7 +261,7 @@ static inline pgprot_t static_protection
 	 */
 #ifdef CONFIG_PCI_BIOS
 	if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
-		pgprot_val(forbidden) |= _PAGE_NX;
+		pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
 #endif
 
 	/*
@@ -269,9 +269,10 @@ static inline pgprot_t static_protection
 	 * Does not cover __inittext since that is gone later on. On
 	 * 64bit we do not enforce !NX on the low mapping
 	 */
-	if (within(address, (unsigned long)_text, (unsigned long)_etext))
-		pgprot_val(forbidden) |= _PAGE_NX;
+	if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
+		pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
 
+#ifdef CONFIG_DEBUG_RODATA
 	/*
 	 * The .rodata section needs to be read-only. Using the pfn
 	 * catches all aliases.
@@ -279,6 +280,7 @@ static inline pgprot_t static_protection
 	if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
 		   __pa_symbol(__end_rodata) >> PAGE_SHIFT))
 		pgprot_val(forbidden) |= _PAGE_RW;
+#endif
 
 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
 	/*
@@ -317,6 +319,13 @@ static inline pgprot_t static_protection
 	}
 #endif
 
+#ifdef CONFIG_PAX_KERNEXEC
+	if (within(pfn, __pa(ktla_ktva((unsigned long)&_text)), __pa((unsigned long)&_sdata)))
{
+		pgprot_val(forbidden) |= _PAGE_RW;
+		pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
+	}
+#endif
+
 	prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
 
 	return prot;
@@ -400,23 +409,37 @@ EXPORT_SYMBOL_GPL(slow_virt_to_phys);
 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
 {
 	/* change init_mm */
+	pax_open_kernel();
 	set_pte_atomic(kpte, pte);
+
 #ifdef CONFIG_X86_32
 	if (!SHARED_KERNEL_PMD) {
+
+#ifdef CONFIG_PAX_PER_CPU_PGD
+		unsigned long cpu;
+#else
 		struct page *page;
+#endif
 
+#ifdef CONFIG_PAX_PER_CPU_PGD
+		for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
+			pgd_t *pgd = get_cpu_pgd(cpu, kernel);
+#else
 		list_for_each_entry(page, &pgd_list, lru) {
-			pgd_t *pgd;
+			pgd_t *pgd = (pgd_t *)page_address(page);
+#endif
+
 			pud_t *pud;
 			pmd_t *pmd;
 
-			pgd = (pgd_t *)page_address(page) + pgd_index(address);
+			pgd += pgd_index(address);
 			pud = pud_offset(pgd, address);
 			pmd = pmd_offset(pud, address);
 			set_pte_atomic((pte_t *)pmd, pte);
 		}
 	}
 #endif
+	pax_close_kernel();
 }
 
 static int
diff -ruNp linux-3.13.11/arch/x86/mm/pat.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/mm/pat.c
--- linux-3.13.11/arch/x86/mm/pat.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/mm/pat.c	2014-07-09 12:00:15.000000000
+0200
@@ -376,7 +376,7 @@ int free_memtype(u64 start, u64 end)
 
 	if (!entry) {
 		printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
-		       current->comm, current->pid, start, end - 1);
+			current->comm, task_pid_nr(current), start, end - 1);
 		return -EINVAL;
 	}
 
@@ -506,8 +506,8 @@ static inline int range_is_allowed(unsig
 
 	while (cursor < to) {
 		if (!devmem_is_allowed(pfn)) {
-			printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
-				current->comm, from, to - 1);
+			printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]
(%#010Lx)\n",
+				current->comm, from, to - 1, cursor);
 			return 0;
 		}
 		cursor += PAGE_SIZE;
@@ -577,7 +577,7 @@ int kernel_map_sync_memtype(u64 base, un
 	if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
 		printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
 			"for [mem %#010Lx-%#010Lx]\n",
-			current->comm, current->pid,
+			current->comm, task_pid_nr(current),
 			cattr_name(flags),
 			base, (unsigned long long)(base + size-1));
 		return -EINVAL;
@@ -612,7 +612,7 @@ static int reserve_pfn_range(u64 paddr,
 		flags = lookup_memtype(paddr);
 		if (want_flags != flags) {
 			printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got
%s\n",
-				current->comm, current->pid,
+				current->comm, task_pid_nr(current),
 				cattr_name(want_flags),
 				(unsigned long long)paddr,
 				(unsigned long long)(paddr + size - 1),
@@ -634,7 +634,7 @@ static int reserve_pfn_range(u64 paddr,
 			free_memtype(paddr, paddr + size);
 			printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
 				" for [mem %#010Lx-%#010Lx], got %s\n",
-				current->comm, current->pid,
+				current->comm, task_pid_nr(current),
 				cattr_name(want_flags),
 				(unsigned long long)paddr,
 				(unsigned long long)(paddr + size - 1),
diff -ruNp linux-3.13.11/arch/x86/mm/pat_rbtree.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/mm/pat_rbtree.c
--- linux-3.13.11/arch/x86/mm/pat_rbtree.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/mm/pat_rbtree.c	2014-07-09
12:00:15.000000000 +0200
@@ -160,7 +160,7 @@ success:
 
 failure:
 	printk(KERN_INFO "%s:%d conflicting memory types "
-		"%Lx-%Lx %s<->%s\n", current->comm, current->pid, start,
+		"%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), start,
 		end, cattr_name(found_type), cattr_name(match->type));
 	return -EBUSY;
 }
diff -ruNp linux-3.13.11/arch/x86/mm/pf_in.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/mm/pf_in.c
--- linux-3.13.11/arch/x86/mm/pf_in.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/mm/pf_in.c	2014-07-09 12:00:15.000000000
+0200
@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned l
 	int i;
 	enum reason_type rv = OTHERS;
 
-	p = (unsigned char *)ins_addr;
+	p = (unsigned char *)ktla_ktva(ins_addr);
 	p += skip_prefix(p, &prf);
 	p += get_opcode(p, &opcode);
 
@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(un
 	struct prefix_bits prf;
 	int i;
 
-	p = (unsigned char *)ins_addr;
+	p = (unsigned char *)ktla_ktva(ins_addr);
 	p += skip_prefix(p, &prf);
 	p += get_opcode(p, &opcode);
 
@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned
 	struct prefix_bits prf;
 	int i;
 
-	p = (unsigned char *)ins_addr;
+	p = (unsigned char *)ktla_ktva(ins_addr);
 	p += skip_prefix(p, &prf);
 	p += get_opcode(p, &opcode);
 
@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned l
 	struct prefix_bits prf;
 	int i;
 
-	p = (unsigned char *)ins_addr;
+	p = (unsigned char *)ktla_ktva(ins_addr);
 	p += skip_prefix(p, &prf);
 	p += get_opcode(p, &opcode);
 	for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned l
 	struct prefix_bits prf;
 	int i;
 
-	p = (unsigned char *)ins_addr;
+	p = (unsigned char *)ktla_ktva(ins_addr);
 	p += skip_prefix(p, &prf);
 	p += get_opcode(p, &opcode);
 	for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
diff -ruNp linux-3.13.11/arch/x86/mm/pgtable.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/mm/pgtable.c
--- linux-3.13.11/arch/x86/mm/pgtable.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/mm/pgtable.c	2014-07-09
12:00:15.000000000 +0200
@@ -97,10 +97,71 @@ static inline void pgd_list_del(pgd_t *p
 	list_del(&page->lru);
 }
 
-#define UNSHARED_PTRS_PER_PGD				\
-	(SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
 
+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
+{
+	unsigned int count = USER_PGD_PTRS;
+
+	if (!pax_user_shadow_base)
+		return;
+
+	while (count--)
+		*dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
+}
+#endif
+
+#ifdef CONFIG_PAX_PER_CPU_PGD
+void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
+{
+	unsigned int count = USER_PGD_PTRS;
+
+	while (count--) {
+		pgd_t pgd;
 
+#ifdef CONFIG_X86_64
+		pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
+#else
+		pgd = *src++;
+#endif
+
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+		pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
+#endif
+
+		*dst++ = pgd;
+	}
+
+}
+#endif
+
+#ifdef CONFIG_X86_64
+#define pxd_t				pud_t
+#define pyd_t				pgd_t
+#define paravirt_release_pxd(pfn)	paravirt_release_pud(pfn)
+#define pgtable_pxd_page_ctor(page)	true
+#define pgtable_pxd_page_dtor(page)
+#define pxd_free(mm, pud)		pud_free((mm), (pud))
+#define pyd_populate(mm, pgd, pud)	pgd_populate((mm), (pgd), (pud))
+#define pyd_offset(mm, address)		pgd_offset((mm), (address))
+#define PYD_SIZE			PGDIR_SIZE
+#else
+#define pxd_t				pmd_t
+#define pyd_t				pud_t
+#define paravirt_release_pxd(pfn)	paravirt_release_pmd(pfn)
+#define pgtable_pxd_page_ctor(page)	pgtable_pmd_page_ctor(page)
+#define pgtable_pxd_page_dtor(page)	pgtable_pmd_page_dtor(page)
+#define pxd_free(mm, pud)		pmd_free((mm), (pud))
+#define pyd_populate(mm, pgd, pud)	pud_populate((mm), (pgd), (pud))
+#define pyd_offset(mm, address)		pud_offset((mm), (address))
+#define PYD_SIZE			PUD_SIZE
+#endif
+
+#ifdef CONFIG_PAX_PER_CPU_PGD
+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
+static inline void pgd_dtor(pgd_t *pgd) {}
+#else
 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
 {
 	BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
@@ -141,6 +202,7 @@ static void pgd_dtor(pgd_t *pgd)
 	pgd_list_del(pgd);
 	spin_unlock(&pgd_lock);
 }
+#endif
 
 /*
  * List of all pgd's needed for non-PAE so it can invalidate entries
@@ -153,7 +215,7 @@ static void pgd_dtor(pgd_t *pgd)
  * -- nyc
  */
 
-#ifdef CONFIG_X86_PAE
+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
 /*
  * In PAE mode, we need to do a cr3 reload (=tlb flush) when
  * updating the top-level pagetable entries to guarantee the
@@ -165,7 +227,7 @@ static void pgd_dtor(pgd_t *pgd)
  * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
  * and initialize the kernel pmds here.
  */
-#define PREALLOCATED_PMDS	UNSHARED_PTRS_PER_PGD
+#define PREALLOCATED_PXDS	(SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
 
 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
 {
@@ -183,43 +245,45 @@ void pud_populate(struct mm_struct *mm,
 	 */
 	flush_tlb_mm(mm);
 }
+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
+#define PREALLOCATED_PXDS	USER_PGD_PTRS
 #else  /* !CONFIG_X86_PAE */
 
 /* No need to prepopulate any pagetable entries in non-PAE modes. */
-#define PREALLOCATED_PMDS	0
+#define PREALLOCATED_PXDS	0
 
 #endif	/* CONFIG_X86_PAE */
 
-static void free_pmds(pmd_t *pmds[])
+static void free_pxds(pxd_t *pxds[])
 {
 	int i;
 
-	for(i = 0; i < PREALLOCATED_PMDS; i++)
-		if (pmds[i]) {
-			pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
-			free_page((unsigned long)pmds[i]);
+	for(i = 0; i < PREALLOCATED_PXDS; i++)
+		if (pxds[i]) {
+			pgtable_pxd_page_dtor(virt_to_page(pxds[i]));
+			free_page((unsigned long)pxds[i]);
 		}
 }
 
-static int preallocate_pmds(pmd_t *pmds[])
+static int preallocate_pxds(pxd_t *pxds[])
 {
 	int i;
 	bool failed = false;
 
-	for(i = 0; i < PREALLOCATED_PMDS; i++) {
-		pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
-		if (!pmd)
+	for(i = 0; i < PREALLOCATED_PXDS; i++) {
+		pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
+		if (!pxd)
 			failed = true;
-		if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) {
-			free_page((unsigned long)pmd);
-			pmd = NULL;
+		if (pxd && !pgtable_pxd_page_ctor(virt_to_page(pxd))) {
+			free_page((unsigned long)pxd);
+			pxd = NULL;
 			failed = true;
 		}
-		pmds[i] = pmd;
+		pxds[i] = pxd;
 	}
 
 	if (failed) {
-		free_pmds(pmds);
+		free_pxds(pxds);
 		return -ENOMEM;
 	}
 
@@ -232,49 +296,52 @@ static int preallocate_pmds(pmd_t *pmds[
  * preallocate which never got a corresponding vma will need to be
  * freed manually.
  */
-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
 {
 	int i;
 
-	for(i = 0; i < PREALLOCATED_PMDS; i++) {
+	for(i = 0; i < PREALLOCATED_PXDS; i++) {
 		pgd_t pgd = pgdp[i];
 
 		if (pgd_val(pgd) != 0) {
-			pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
+			pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
 
-			pgdp[i] = native_make_pgd(0);
+			set_pgd(pgdp + i, native_make_pgd(0));
 
-			paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
-			pmd_free(mm, pmd);
+			paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
+			pxd_free(mm, pxd);
 		}
 	}
 }
 
-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
 {
-	pud_t *pud;
+	pyd_t *pyd;
 	int i;
 
-	if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
+	if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
 		return;
 
-	pud = pud_offset(pgd, 0);
-
-	for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
-		pmd_t *pmd = pmds[i];
+#ifdef CONFIG_X86_64
+	pyd = pyd_offset(mm, 0L);
+#else
+	pyd = pyd_offset(pgd, 0L);
+#endif
 
+	for (i = 0; i < PREALLOCATED_PXDS; i++, pyd++) {
+		pxd_t *pxd = pxds[i];
 		if (i >= KERNEL_PGD_BOUNDARY)
-			memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
-			       sizeof(pmd_t) * PTRS_PER_PMD);
+			memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
+			       sizeof(pxd_t) * PTRS_PER_PMD);
 
-		pud_populate(mm, pud, pmd);
+		pyd_populate(mm, pyd, pxd);
 	}
 }
 
 pgd_t *pgd_alloc(struct mm_struct *mm)
 {
 	pgd_t *pgd;
-	pmd_t *pmds[PREALLOCATED_PMDS];
+	pxd_t *pxds[PREALLOCATED_PXDS];
 
 	pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
 
@@ -283,11 +350,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
 
 	mm->pgd = pgd;
 
-	if (preallocate_pmds(pmds) != 0)
+	if (preallocate_pxds(pxds) != 0)
 		goto out_free_pgd;
 
 	if (paravirt_pgd_alloc(mm) != 0)
-		goto out_free_pmds;
+		goto out_free_pxds;
 
 	/*
 	 * Make sure that pre-populating the pmds is atomic with
@@ -297,14 +364,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
 	spin_lock(&pgd_lock);
 
 	pgd_ctor(mm, pgd);
-	pgd_prepopulate_pmd(mm, pgd, pmds);
+	pgd_prepopulate_pxd(mm, pgd, pxds);
 
 	spin_unlock(&pgd_lock);
 
 	return pgd;
 
-out_free_pmds:
-	free_pmds(pmds);
+out_free_pxds:
+	free_pxds(pxds);
 out_free_pgd:
 	free_page((unsigned long)pgd);
 out:
@@ -313,7 +380,7 @@ out:
 
 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
 {
-	pgd_mop_up_pmds(mm, pgd);
+	pgd_mop_up_pxds(mm, pgd);
 	pgd_dtor(pgd);
 	paravirt_pgd_free(mm, pgd);
 	free_page((unsigned long)pgd);
diff -ruNp linux-3.13.11/arch/x86/mm/pgtable_32.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/mm/pgtable_32.c
--- linux-3.13.11/arch/x86/mm/pgtable_32.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/mm/pgtable_32.c	2014-07-09
12:00:15.000000000 +0200
@@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr,
 		return;
 	}
 	pte = pte_offset_kernel(pmd, vaddr);
+
+	pax_open_kernel();
 	if (pte_val(pteval))
 		set_pte_at(&init_mm, vaddr, pte, pteval);
 	else
 		pte_clear(&init_mm, vaddr, pte);
+	pax_close_kernel();
 
 	/*
 	 * It's enough to flush this one mapping.
diff -ruNp linux-3.13.11/arch/x86/mm/physaddr.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/mm/physaddr.c
--- linux-3.13.11/arch/x86/mm/physaddr.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/mm/physaddr.c	2014-07-09
12:00:15.000000000 +0200
@@ -10,7 +10,7 @@
 #ifdef CONFIG_X86_64
 
 #ifdef CONFIG_DEBUG_VIRTUAL
-unsigned long __phys_addr(unsigned long x)
+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
 {
 	unsigned long y = x - __START_KERNEL_map;
 
@@ -67,7 +67,7 @@ EXPORT_SYMBOL(__virt_addr_valid);
 #else
 
 #ifdef CONFIG_DEBUG_VIRTUAL
-unsigned long __phys_addr(unsigned long x)
+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
 {
 	unsigned long phys_addr = x - PAGE_OFFSET;
 	/* VMALLOC_* aren't constants  */
diff -ruNp linux-3.13.11/arch/x86/mm/setup_nx.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/mm/setup_nx.c
--- linux-3.13.11/arch/x86/mm/setup_nx.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/mm/setup_nx.c	2014-07-09
12:00:15.000000000 +0200
@@ -5,8 +5,10 @@
 #include <asm/pgtable.h>
 #include <asm/proto.h>
 
+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
 static int disable_nx;
 
+#ifndef CONFIG_PAX_PAGEEXEC
 /*
  * noexec = on|off
  *
@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str
 	return 0;
 }
 early_param("noexec", noexec_setup);
+#endif
+
+#endif
 
 void x86_configure_nx(void)
 {
+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
 	if (cpu_has_nx && !disable_nx)
 		__supported_pte_mask |= _PAGE_NX;
 	else
+#endif
 		__supported_pte_mask &= ~_PAGE_NX;
 }
 
diff -ruNp linux-3.13.11/arch/x86/mm/tlb.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/mm/tlb.c
--- linux-3.13.11/arch/x86/mm/tlb.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/mm/tlb.c	2014-07-09 12:00:15.000000000
+0200
@@ -48,7 +48,11 @@ void leave_mm(int cpu)
 		BUG();
 	if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
 		cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
+
+#ifndef CONFIG_PAX_PER_CPU_PGD
 		load_cr3(swapper_pg_dir);
+#endif
+
 	}
 }
 EXPORT_SYMBOL_GPL(leave_mm);
diff -ruNp linux-3.13.11/arch/x86/mm/uderef_64.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/mm/uderef_64.c
--- linux-3.13.11/arch/x86/mm/uderef_64.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/mm/uderef_64.c	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,37 @@
+#include <linux/mm.h>
+#include <asm/pgtable.h>
+#include <asm/uaccess.h>
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+/* PaX: due to the special call convention these functions must
+ * - remain leaf functions under all configurations,
+ * - never be called directly, only dereferenced from the wrappers.
+ */
+void __pax_open_userland(void)
+{
+	unsigned int cpu;
+
+	if (unlikely(!segment_eq(get_fs(), USER_DS)))
+		return;
+
+	cpu = raw_get_cpu();
+	BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_KERNEL);
+	write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
+	raw_put_cpu_no_resched();
+}
+EXPORT_SYMBOL(__pax_open_userland);
+
+void __pax_close_userland(void)
+{
+	unsigned int cpu;
+
+	if (unlikely(!segment_eq(get_fs(), USER_DS)))
+		return;
+
+	cpu = raw_get_cpu();
+	BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_USER);
+	write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
+	raw_put_cpu_no_resched();
+}
+EXPORT_SYMBOL(__pax_close_userland);
+#endif
diff -ruNp linux-3.13.11/arch/x86/net/bpf_jit.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/net/bpf_jit.S
--- linux-3.13.11/arch/x86/net/bpf_jit.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/net/bpf_jit.S	2014-07-09
12:00:15.000000000 +0200
@@ -9,6 +9,7 @@
  */
 #include <linux/linkage.h>
 #include <asm/dwarf2.h>
+#include <asm/alternative-asm.h>
 
 /*
  * Calling convention :
@@ -35,6 +36,7 @@ sk_load_word_positive_offset:
 	jle	bpf_slow_path_word
 	mov     (SKBDATA,%rsi),%eax
 	bswap   %eax  			/* ntohl() */
+	pax_force_retaddr
 	ret
 
 sk_load_half:
@@ -52,6 +54,7 @@ sk_load_half_positive_offset:
 	jle	bpf_slow_path_half
 	movzwl	(SKBDATA,%rsi),%eax
 	rol	$8,%ax			# ntohs()
+	pax_force_retaddr
 	ret
 
 sk_load_byte:
@@ -66,6 +69,7 @@ sk_load_byte_positive_offset:
 	cmp	%esi,%r9d   /* if (offset >= hlen) goto bpf_slow_path_byte */
 	jle	bpf_slow_path_byte
 	movzbl	(SKBDATA,%rsi),%eax
+	pax_force_retaddr
 	ret
 
 /**
@@ -87,6 +91,7 @@ sk_load_byte_msh_positive_offset:
 	movzbl	(SKBDATA,%rsi),%ebx
 	and	$15,%bl
 	shl	$2,%bl
+	pax_force_retaddr
 	ret
 
 /* rsi contains offset and can be scratched */
@@ -109,6 +114,7 @@ bpf_slow_path_word:
 	js	bpf_error
 	mov	-12(%rbp),%eax
 	bswap	%eax
+	pax_force_retaddr
 	ret
 
 bpf_slow_path_half:
@@ -117,12 +123,14 @@ bpf_slow_path_half:
 	mov	-12(%rbp),%ax
 	rol	$8,%ax
 	movzwl	%ax,%eax
+	pax_force_retaddr
 	ret
 
 bpf_slow_path_byte:
 	bpf_slow_path_common(1)
 	js	bpf_error
 	movzbl	-12(%rbp),%eax
+	pax_force_retaddr
 	ret
 
 bpf_slow_path_byte_msh:
@@ -133,6 +141,7 @@ bpf_slow_path_byte_msh:
 	and	$15,%al
 	shl	$2,%al
 	xchg	%eax,%ebx
+	pax_force_retaddr
 	ret
 
 #define sk_negative_common(SIZE)				\
@@ -157,6 +166,7 @@ sk_load_word_negative_offset:
 	sk_negative_common(4)
 	mov	(%rax), %eax
 	bswap	%eax
+	pax_force_retaddr
 	ret
 
 bpf_slow_path_half_neg:
@@ -168,6 +178,7 @@ sk_load_half_negative_offset:
 	mov	(%rax),%ax
 	rol	$8,%ax
 	movzwl	%ax,%eax
+	pax_force_retaddr
 	ret
 
 bpf_slow_path_byte_neg:
@@ -177,6 +188,7 @@ sk_load_byte_negative_offset:
 	.globl	sk_load_byte_negative_offset
 	sk_negative_common(1)
 	movzbl	(%rax), %eax
+	pax_force_retaddr
 	ret
 
 bpf_slow_path_byte_msh_neg:
@@ -190,6 +202,7 @@ sk_load_byte_msh_negative_offset:
 	and	$15,%al
 	shl	$2,%al
 	xchg	%eax,%ebx
+	pax_force_retaddr
 	ret
 
 bpf_error:
@@ -197,4 +210,5 @@ bpf_error:
 	xor		%eax,%eax
 	mov		-8(%rbp),%rbx
 	leaveq
+	pax_force_retaddr
 	ret
diff -ruNp linux-3.13.11/arch/x86/net/bpf_jit_comp.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/net/bpf_jit_comp.c
--- linux-3.13.11/arch/x86/net/bpf_jit_comp.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/net/bpf_jit_comp.c	2014-07-09
12:00:15.000000000 +0200
@@ -50,13 +50,90 @@ static inline u8 *emit_code(u8 *ptr, u32
 	return ptr + len;
 }
 
+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
+#define MAX_INSTR_CODE_SIZE 96
+#else
+#define MAX_INSTR_CODE_SIZE 64
+#endif
+
 #define EMIT(bytes, len)	do { prog = emit_code(prog, bytes, len); } while (0)
 
 #define EMIT1(b1)		EMIT(b1, 1)
 #define EMIT2(b1, b2)		EMIT((b1) + ((b2) << 8), 2)
 #define EMIT3(b1, b2, b3)	EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
 #define EMIT4(b1, b2, b3, b4)   EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24),
4)
+
+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
+/* original constant will appear in ecx */
+#define DILUTE_CONST_SEQUENCE(_off, _key) 	\
+do {						\
+	/* mov ecx, randkey */			\
+	EMIT1(0xb9);				\
+	EMIT(_key, 4);				\
+	/* xor ecx, randkey ^ off */		\
+	EMIT2(0x81, 0xf1);			\
+	EMIT((_key) ^ (_off), 4);		\
+} while (0)
+
+#define EMIT1_off32(b1, _off)								\
+do { 											\
+	switch (b1) {									\
+		case 0x05: /* add eax, imm32 */						\
+		case 0x2d: /* sub eax, imm32 */						\
+		case 0x25: /* and eax, imm32 */						\
+		case 0x0d: /* or eax, imm32 */						\
+		case 0xb8: /* mov eax, imm32 */						\
+		case 0x35: /* xor eax, imm32 */						\
+		case 0x3d: /* cmp eax, imm32 */						\
+		case 0xa9: /* test eax, imm32 */					\
+			DILUTE_CONST_SEQUENCE(_off, randkey);				\
+			EMIT2((b1) - 4, 0xc8); /* convert imm instruction to eax, ecx */\
+			break;								\
+		case 0xbb: /* mov ebx, imm32 */						\
+			DILUTE_CONST_SEQUENCE(_off, randkey);				\
+			/* mov ebx, ecx */						\
+			EMIT2(0x89, 0xcb);						\
+			break;								\
+		case 0xbe: /* mov esi, imm32 */						\
+			DILUTE_CONST_SEQUENCE(_off, randkey);				\
+			/* mov esi, ecx	*/						\
+			EMIT2(0x89, 0xce);						\
+			break;								\
+		case 0xe8: /* call rel imm32, always to known funcs */			\
+			EMIT1(b1);							\
+			EMIT(_off, 4);							\
+			break;								\
+		case 0xe9: /* jmp rel imm32 */						\
+			EMIT1(b1);							\
+			EMIT(_off, 4);							\
+			/* prevent fall-through, we're not called if off = 0 */		\
+			EMIT(0xcccccccc, 4);						\
+			EMIT(0xcccccccc, 4);						\
+			break;								\
+		default:								\
+			BUILD_BUG();							\
+	}										\
+} while (0)
+
+#define EMIT2_off32(b1, b2, _off) 					\
+do { 									\
+	if ((b1) == 0x8d && (b2) == 0xb3) { /* lea esi, [rbx+imm32] */	\
+		EMIT2(0x8d, 0xb3); /* lea esi, [rbx+randkey] */		\
+		EMIT(randkey, 4);					\
+		EMIT2(0x8d, 0xb6); /* lea esi, [esi+off-randkey] */	\
+		EMIT((_off) - randkey, 4);				\
+	} else if ((b1) == 0x69 && (b2) == 0xc0) { /* imul eax, imm32 */\
+		DILUTE_CONST_SEQUENCE(_off, randkey);			\
+		/* imul eax, ecx */					\
+		EMIT3(0x0f, 0xaf, 0xc1);				\
+	} else {							\
+		BUILD_BUG();						\
+	}								\
+} while (0)
+#else
 #define EMIT1_off32(b1, off)	do { EMIT1(b1); EMIT(off, 4);} while (0)
+#define EMIT2_off32(b1, b2, off) do { EMIT2(b1, b2); EMIT(off, 4);} while (0)
+#endif
 
 #define CLEAR_A() EMIT2(0x31, 0xc0) /* xor %eax,%eax */
 #define CLEAR_X() EMIT2(0x31, 0xdb) /* xor %ebx,%ebx */
@@ -91,6 +168,24 @@ do {									\
 #define X86_JBE 0x76
 #define X86_JA  0x77
 
+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
+#define APPEND_FLOW_VERIFY()	\
+do {				\
+	/* mov ecx, randkey */	\
+	EMIT1(0xb9);		\
+	EMIT(randkey, 4);	\
+	/* cmp ecx, randkey */	\
+	EMIT2(0x81, 0xf9);	\
+	EMIT(randkey, 4);	\
+	/* jz after 8 int 3s */ \
+	EMIT2(0x74, 0x08);	\
+	EMIT(0xcccccccc, 4);	\
+	EMIT(0xcccccccc, 4);	\
+} while (0)
+#else
+#define APPEND_FLOW_VERIFY() do { } while (0)
+#endif
+
 #define EMIT_COND_JMP(op, offset)				\
 do {								\
 	if (is_near(offset))					\
@@ -98,6 +193,7 @@ do {								\
 	else {							\
 		EMIT2(0x0f, op + 0x10);				\
 		EMIT(offset, 4); /* jxx .+off32 */		\
+		APPEND_FLOW_VERIFY();				\
 	}							\
 } while (0)
 
@@ -145,55 +241,54 @@ static int pkt_type_offset(void)
 	return -1;
 }
 
-struct bpf_binary_header {
-	unsigned int	pages;
-	/* Note : for security reasons, bpf code will follow a randomly
-	 * sized amount of int3 instructions
-	 */
-	u8		image[];
-};
-
-static struct bpf_binary_header *bpf_alloc_binary(unsigned int proglen,
+/* Note : for security reasons, bpf code will follow a randomly
+ * sized amount of int3 instructions
+ */
+static u8 *bpf_alloc_binary(unsigned int proglen,
 						  u8 **image_ptr)
 {
 	unsigned int sz, hole;
-	struct bpf_binary_header *header;
+	u8 *header;
 
 	/* Most of BPF filters are really small,
 	 * but if some of them fill a page, allow at least
 	 * 128 extra bytes to insert a random section of int3
 	 */
-	sz = round_up(proglen + sizeof(*header) + 128, PAGE_SIZE);
-	header = module_alloc(sz);
+	sz = round_up(proglen + 128, PAGE_SIZE);
+	header = module_alloc_exec(sz);
 	if (!header)
 		return NULL;
 
+	pax_open_kernel();
 	memset(header, 0xcc, sz); /* fill whole space with int3 instructions */
+	pax_close_kernel();
 
-	header->pages = sz / PAGE_SIZE;
-	hole = sz - (proglen + sizeof(*header));
+	hole = PAGE_SIZE - (proglen & ~PAGE_MASK);
 
 	/* insert a random number of int3 instructions before BPF code */
-	*image_ptr = &header->image[prandom_u32() % hole];
+	*image_ptr = &header[prandom_u32() % hole];
 	return header;
 }
 
 void bpf_jit_compile(struct sk_filter *fp)
 {
-	u8 temp[64];
+	u8 temp[MAX_INSTR_CODE_SIZE];
 	u8 *prog;
 	unsigned int proglen, oldproglen = 0;
 	int ilen, i;
 	int t_offset, f_offset;
 	u8 t_op, f_op, seen = 0, pass;
 	u8 *image = NULL;
-	struct bpf_binary_header *header = NULL;
+	u8 *header = NULL;
 	u8 *func;
 	int pc_ret0 = -1; /* bpf index of first RET #0 instruction (if any) */
 	unsigned int cleanup_addr; /* epilogue code offset */
 	unsigned int *addrs;
 	const struct sock_filter *filter = fp->insns;
 	int flen = fp->len;
+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
+	unsigned int randkey;
+#endif
 
 	if (!bpf_jit_enable)
 		return;
@@ -203,10 +298,10 @@ void bpf_jit_compile(struct sk_filter *f
 		return;
 
 	/* Before first pass, make a rough estimation of addrs[]
-	 * each bpf instruction is translated to less than 64 bytes
+	 * each bpf instruction is translated to less than MAX_INSTR_CODE_SIZE bytes
 	 */
 	for (proglen = 0, i = 0; i < flen; i++) {
-		proglen += 64;
+		proglen += MAX_INSTR_CODE_SIZE;
 		addrs[i] = proglen;
 	}
 	cleanup_addr = proglen; /* epilogue address */
@@ -285,6 +380,10 @@ void bpf_jit_compile(struct sk_filter *f
 		for (i = 0; i < flen; i++) {
 			unsigned int K = filter[i].k;
 
+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
+			randkey = prandom_u32();
+#endif
+
 			switch (filter[i].code) {
 			case BPF_S_ALU_ADD_X: /* A += X; */
 				seen |= SEEN_XREG;
@@ -317,10 +416,8 @@ void bpf_jit_compile(struct sk_filter *f
 			case BPF_S_ALU_MUL_K: /* A *= K */
 				if (is_imm8(K))
 					EMIT3(0x6b, 0xc0, K); /* imul imm8,%eax,%eax */
-				else {
-					EMIT2(0x69, 0xc0);		/* imul imm32,%eax */
-					EMIT(K, 4);
-				}
+				else
+					EMIT2_off32(0x69, 0xc0, K); /* imul imm32,%eax */
 				break;
 			case BPF_S_ALU_DIV_X: /* A /= X; */
 				seen |= SEEN_XREG;
@@ -364,7 +461,11 @@ void bpf_jit_compile(struct sk_filter *f
 					break;
 				}
 				EMIT2(0x31, 0xd2);	/* xor %edx,%edx */
+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
+				DILUTE_CONST_SEQUENCE(K, randkey);
+#else
 				EMIT1(0xb9);EMIT(K, 4);	/* mov imm32,%ecx */
+#endif
 				EMIT2(0xf7, 0xf1);	/* div %ecx */
 				EMIT2(0x89, 0xd0);	/* mov %edx,%eax */
 				break;
@@ -372,7 +473,11 @@ void bpf_jit_compile(struct sk_filter *f
 				if (K == 1)
 					break;
 				EMIT2(0x31, 0xd2);	/* xor %edx,%edx */
+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
+				DILUTE_CONST_SEQUENCE(K, randkey);
+#else
 				EMIT1(0xb9);EMIT(K, 4);	/* mov imm32,%ecx */
+#endif
 				EMIT2(0xf7, 0xf1);	/* div %ecx */
 				break;
 			case BPF_S_ALU_AND_X:
@@ -643,8 +748,7 @@ common_load_ind:		seen |= SEEN_DATAREF |
 					if (is_imm8(K)) {
 						EMIT3(0x8d, 0x73, K); /* lea imm8(%rbx), %esi */
 					} else {
-						EMIT2(0x8d, 0xb3); /* lea imm32(%rbx),%esi */
-						EMIT(K, 4);
+						EMIT2_off32(0x8d, 0xb3, K); /* lea imm32(%rbx),%esi */
 					}
 				} else {
 					EMIT2(0x89,0xde); /* mov %ebx,%esi */
@@ -734,10 +838,12 @@ cond_branch:			f_offset = addrs[i + filt
 				if (unlikely(proglen + ilen > oldproglen)) {
 					pr_err("bpb_jit_compile fatal error\n");
 					kfree(addrs);
-					module_free(NULL, header);
+					module_free_exec(NULL, image);
 					return;
 				}
+				pax_open_kernel();
 				memcpy(image + proglen, temp, ilen);
+				pax_close_kernel();
 			}
 			proglen += ilen;
 			addrs[i] = proglen;
@@ -770,7 +876,6 @@ cond_branch:			f_offset = addrs[i + filt
 
 	if (image) {
 		bpf_flush_icache(header, image + proglen);
-		set_memory_ro((unsigned long)header, header->pages);
 		fp->bpf_func = (void *)image;
 	}
 out:
@@ -782,10 +887,9 @@ static void bpf_jit_free_deferred(struct
 {
 	struct sk_filter *fp = container_of(work, struct sk_filter, work);
 	unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
-	struct bpf_binary_header *header = (void *)addr;
 
-	set_memory_rw(addr, header->pages);
-	module_free(NULL, header);
+	set_memory_rw(addr, 1);
+	module_free_exec(NULL, (void *)addr);
 	kfree(fp);
 }
 
diff -ruNp linux-3.13.11/arch/x86/oprofile/backtrace.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/oprofile/backtrace.c
--- linux-3.13.11/arch/x86/oprofile/backtrace.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/oprofile/backtrace.c	2014-07-09
12:00:15.000000000 +0200
@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_fram
 	struct stack_frame_ia32 *fp;
 	unsigned long bytes;
 
-	bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
+	bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
 	if (bytes != 0)
 		return NULL;
 
-	fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
+	fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
 
 	oprofile_add_trace(bufhead[0].return_address);
 
@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_bac
 	struct stack_frame bufhead[2];
 	unsigned long bytes;
 
-	bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
+	bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
 	if (bytes != 0)
 		return NULL;
 
@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const reg
 {
 	struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
 
-	if (!user_mode_vm(regs)) {
+	if (!user_mode(regs)) {
 		unsigned long stack = kernel_stack_pointer(regs);
 		if (depth)
 			dump_trace(NULL, regs, (unsigned long *)stack, 0,
diff -ruNp linux-3.13.11/arch/x86/oprofile/nmi_int.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/oprofile/nmi_int.c
--- linux-3.13.11/arch/x86/oprofile/nmi_int.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/oprofile/nmi_int.c	2014-07-09
12:00:15.000000000 +0200
@@ -23,6 +23,7 @@
 #include <asm/nmi.h>
 #include <asm/msr.h>
 #include <asm/apic.h>
+#include <asm/pgtable.h>
 
 #include "op_counter.h"
 #include "op_x86_model.h"
@@ -774,8 +775,11 @@ int __init op_nmi_init(struct oprofile_o
 	if (ret)
 		return ret;
 
-	if (!model->num_virt_counters)
-		model->num_virt_counters = model->num_counters;
+	if (!model->num_virt_counters) {
+		pax_open_kernel();
+		*(unsigned int *)&model->num_virt_counters = model->num_counters;
+		pax_close_kernel();
+	}
 
 	mux_init(ops);
 
diff -ruNp linux-3.13.11/arch/x86/oprofile/op_model_amd.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/oprofile/op_model_amd.c
--- linux-3.13.11/arch/x86/oprofile/op_model_amd.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/oprofile/op_model_amd.c	2014-07-09
12:00:15.000000000 +0200
@@ -519,9 +519,11 @@ static int op_amd_init(struct oprofile_o
 		num_counters = AMD64_NUM_COUNTERS;
 	}
 
-	op_amd_spec.num_counters = num_counters;
-	op_amd_spec.num_controls = num_counters;
-	op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
+	pax_open_kernel();
+	*(unsigned int *)&op_amd_spec.num_counters = num_counters;
+	*(unsigned int *)&op_amd_spec.num_controls = num_counters;
+	*(unsigned int *)&op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
+	pax_close_kernel();
 
 	return 0;
 }
diff -ruNp linux-3.13.11/arch/x86/oprofile/op_model_ppro.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/oprofile/op_model_ppro.c
--- linux-3.13.11/arch/x86/oprofile/op_model_ppro.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/oprofile/op_model_ppro.c	2014-07-09
12:00:15.000000000 +0200
@@ -19,6 +19,7 @@
 #include <asm/msr.h>
 #include <asm/apic.h>
 #include <asm/nmi.h>
+#include <asm/pgtable.h>
 
 #include "op_x86_model.h"
 #include "op_counter.h"
@@ -221,8 +222,10 @@ static void arch_perfmon_setup_counters(
 
 	num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER);
 
-	op_arch_perfmon_spec.num_counters = num_counters;
-	op_arch_perfmon_spec.num_controls = num_counters;
+	pax_open_kernel();
+	*(unsigned int *)&op_arch_perfmon_spec.num_counters = num_counters;
+	*(unsigned int *)&op_arch_perfmon_spec.num_controls = num_counters;
+	pax_close_kernel();
 }
 
 static int arch_perfmon_init(struct oprofile_operations *ignore)
diff -ruNp linux-3.13.11/arch/x86/oprofile/op_x86_model.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/oprofile/op_x86_model.h
--- linux-3.13.11/arch/x86/oprofile/op_x86_model.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/oprofile/op_x86_model.h	2014-07-09
12:00:15.000000000 +0200
@@ -52,7 +52,7 @@ struct op_x86_model_spec {
 	void		(*switch_ctrl)(struct op_x86_model_spec const *model,
 				       struct op_msrs const * const msrs);
 #endif
-};
+} __do_const;
 
 struct op_counter_config;
 
diff -ruNp linux-3.13.11/arch/x86/pci/intel_mid_pci.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/pci/intel_mid_pci.c
--- linux-3.13.11/arch/x86/pci/intel_mid_pci.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/pci/intel_mid_pci.c	2014-07-09
12:00:15.000000000 +0200
@@ -241,7 +241,7 @@ int __init intel_mid_pci_init(void)
 	pr_info("Intel MID platform detected, using MID PCI ops\n");
 	pci_mmcfg_late_init();
 	pcibios_enable_irq = intel_mid_pci_irq_enable;
-	pci_root_ops = intel_mid_pci_ops;
+	memcpy((void *)&pci_root_ops, &intel_mid_pci_ops, sizeof pci_root_ops);
 	pci_soc_mode = 1;
 	/* Continue with standard init */
 	return 1;
diff -ruNp linux-3.13.11/arch/x86/pci/irq.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/pci/irq.c
--- linux-3.13.11/arch/x86/pci/irq.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/pci/irq.c	2014-07-09 12:00:15.000000000
+0200
@@ -50,7 +50,7 @@ struct irq_router {
 struct irq_router_handler {
 	u16 vendor;
 	int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
-};
+} __do_const;
 
 int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq;
 void (*pcibios_disable_irq)(struct pci_dev *dev) = NULL;
@@ -794,7 +794,7 @@ static __init int pico_router_probe(stru
 	return 0;
 }
 
-static __initdata struct irq_router_handler pirq_routers[] = {
+static __initconst const struct irq_router_handler pirq_routers[] = {
 	{ PCI_VENDOR_ID_INTEL, intel_router_probe },
 	{ PCI_VENDOR_ID_AL, ali_router_probe },
 	{ PCI_VENDOR_ID_ITE, ite_router_probe },
@@ -821,7 +821,7 @@ static struct pci_dev *pirq_router_dev;
 static void __init pirq_find_router(struct irq_router *r)
 {
 	struct irq_routing_table *rt = pirq_table;
-	struct irq_router_handler *h;
+	const struct irq_router_handler *h;
 
 #ifdef CONFIG_PCI_BIOS
 	if (!rt->signature) {
@@ -1094,7 +1094,7 @@ static int __init fix_acer_tm360_irqrout
 	return 0;
 }
 
-static struct dmi_system_id __initdata pciirq_dmi_table[] = {
+static const struct dmi_system_id __initconst pciirq_dmi_table[] = {
 	{
 		.callback = fix_broken_hp_bios_irq9,
 		.ident = "HP Pavilion N5400 Series Laptop",
diff -ruNp linux-3.13.11/arch/x86/pci/pcbios.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/pci/pcbios.c
--- linux-3.13.11/arch/x86/pci/pcbios.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/pci/pcbios.c	2014-07-09
12:00:15.000000000 +0200
@@ -79,7 +79,7 @@ union bios32 {
 static struct {
 	unsigned long address;
 	unsigned short segment;
-} bios32_indirect = { 0, __KERNEL_CS };
+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
 
 /*
  * Returns the entry point for the given service, NULL on error
@@ -92,37 +92,80 @@ static unsigned long bios32_service(unsi
 	unsigned long length;		/* %ecx */
 	unsigned long entry;		/* %edx */
 	unsigned long flags;
+	struct desc_struct d, *gdt;
 
 	local_irq_save(flags);
-	__asm__("lcall *(%%edi); cld"
+
+	gdt = get_cpu_gdt_table(smp_processor_id());
+
+	pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
+	write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
+	pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
+	write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
+
+	__asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
 		: "=a" (return_code),
 		  "=b" (address),
 		  "=c" (length),
 		  "=d" (entry)
 		: "0" (service),
 		  "1" (0),
-		  "D" (&bios32_indirect));
+		  "D" (&bios32_indirect),
+		  "r"(__PCIBIOS_DS)
+		: "memory");
+
+	pax_open_kernel();
+	gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
+	gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
+	gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
+	gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
+	pax_close_kernel();
+
 	local_irq_restore(flags);
 
 	switch (return_code) {
-		case 0:
-			return address + entry;
-		case 0x80:	/* Not present */
-			printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
-			return 0;
-		default: /* Shouldn't happen */
-			printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
-				service, return_code);
+	case 0: {
+		int cpu;
+		unsigned char flags;
+
+		printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address,
length, entry);
+		if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
+			printk(KERN_WARNING "bios32_service: not valid\n");
 			return 0;
+		}
+		address = address + PAGE_OFFSET;
+		length += 16UL; /* some BIOSs underreport this... */
+		flags = 4;
+		if (length >= 64*1024*1024) {
+			length >>= PAGE_SHIFT;
+			flags |= 8;
+		}
+
+		for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
+			gdt = get_cpu_gdt_table(cpu);
+			pack_descriptor(&d, address, length, 0x9b, flags);
+			write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
+			pack_descriptor(&d, address, length, 0x93, flags);
+			write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
+		}
+		return entry;
+	}
+	case 0x80:	/* Not present */
+		printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
+		return 0;
+	default: /* Shouldn't happen */
+		printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
+			service, return_code);
+		return 0;
 	}
 }
 
 static struct {
 	unsigned long address;
 	unsigned short segment;
-} pci_indirect = { 0, __KERNEL_CS };
+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
 
-static int pci_bios_present;
+static int pci_bios_present __read_only;
 
 static int check_pcibios(void)
 {
@@ -131,11 +174,13 @@ static int check_pcibios(void)
 	unsigned long flags, pcibios_entry;
 
 	if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
-		pci_indirect.address = pcibios_entry + PAGE_OFFSET;
+		pci_indirect.address = pcibios_entry;
 
 		local_irq_save(flags);
-		__asm__(
-			"lcall *(%%edi); cld\n\t"
+		__asm__("movw %w6, %%ds\n\t"
+			"lcall *%%ss:(%%edi); cld\n\t"
+			"push %%ss\n\t"
+			"pop %%ds\n\t"
 			"jc 1f\n\t"
 			"xor %%ah, %%ah\n"
 			"1:"
@@ -144,7 +189,8 @@ static int check_pcibios(void)
 			  "=b" (ebx),
 			  "=c" (ecx)
 			: "1" (PCIBIOS_PCI_BIOS_PRESENT),
-			  "D" (&pci_indirect)
+			  "D" (&pci_indirect),
+			  "r" (__PCIBIOS_DS)
 			: "memory");
 		local_irq_restore(flags);
 
@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int se
 
 	switch (len) {
 	case 1:
-		__asm__("lcall *(%%esi); cld\n\t"
+		__asm__("movw %w6, %%ds\n\t"
+			"lcall *%%ss:(%%esi); cld\n\t"
+			"push %%ss\n\t"
+			"pop %%ds\n\t"
 			"jc 1f\n\t"
 			"xor %%ah, %%ah\n"
 			"1:"
@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int se
 			: "1" (PCIBIOS_READ_CONFIG_BYTE),
 			  "b" (bx),
 			  "D" ((long)reg),
-			  "S" (&pci_indirect));
+			  "S" (&pci_indirect),
+			  "r" (__PCIBIOS_DS));
 		/*
 		 * Zero-extend the result beyond 8 bits, do not trust the
 		 * BIOS having done it:
@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int se
 		*value &= 0xff;
 		break;
 	case 2:
-		__asm__("lcall *(%%esi); cld\n\t"
+		__asm__("movw %w6, %%ds\n\t"
+			"lcall *%%ss:(%%esi); cld\n\t"
+			"push %%ss\n\t"
+			"pop %%ds\n\t"
 			"jc 1f\n\t"
 			"xor %%ah, %%ah\n"
 			"1:"
@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int se
 			: "1" (PCIBIOS_READ_CONFIG_WORD),
 			  "b" (bx),
 			  "D" ((long)reg),
-			  "S" (&pci_indirect));
+			  "S" (&pci_indirect),
+			  "r" (__PCIBIOS_DS));
 		/*
 		 * Zero-extend the result beyond 16 bits, do not trust the
 		 * BIOS having done it:
@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int se
 		*value &= 0xffff;
 		break;
 	case 4:
-		__asm__("lcall *(%%esi); cld\n\t"
+		__asm__("movw %w6, %%ds\n\t"
+			"lcall *%%ss:(%%esi); cld\n\t"
+			"push %%ss\n\t"
+			"pop %%ds\n\t"
 			"jc 1f\n\t"
 			"xor %%ah, %%ah\n"
 			"1:"
@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int se
 			: "1" (PCIBIOS_READ_CONFIG_DWORD),
 			  "b" (bx),
 			  "D" ((long)reg),
-			  "S" (&pci_indirect));
+			  "S" (&pci_indirect),
+			  "r" (__PCIBIOS_DS));
 		break;
 	}
 
@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int s
 
 	switch (len) {
 	case 1:
-		__asm__("lcall *(%%esi); cld\n\t"
+		__asm__("movw %w6, %%ds\n\t"
+			"lcall *%%ss:(%%esi); cld\n\t"
+			"push %%ss\n\t"
+			"pop %%ds\n\t"
 			"jc 1f\n\t"
 			"xor %%ah, %%ah\n"
 			"1:"
@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int s
 			  "c" (value),
 			  "b" (bx),
 			  "D" ((long)reg),
-			  "S" (&pci_indirect));
+			  "S" (&pci_indirect),
+			  "r" (__PCIBIOS_DS));
 		break;
 	case 2:
-		__asm__("lcall *(%%esi); cld\n\t"
+		__asm__("movw %w6, %%ds\n\t"
+			"lcall *%%ss:(%%esi); cld\n\t"
+			"push %%ss\n\t"
+			"pop %%ds\n\t"
 			"jc 1f\n\t"
 			"xor %%ah, %%ah\n"
 			"1:"
@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int s
 			  "c" (value),
 			  "b" (bx),
 			  "D" ((long)reg),
-			  "S" (&pci_indirect));
+			  "S" (&pci_indirect),
+			  "r" (__PCIBIOS_DS));
 		break;
 	case 4:
-		__asm__("lcall *(%%esi); cld\n\t"
+		__asm__("movw %w6, %%ds\n\t"
+			"lcall *%%ss:(%%esi); cld\n\t"
+			"push %%ss\n\t"
+			"pop %%ds\n\t"
 			"jc 1f\n\t"
 			"xor %%ah, %%ah\n"
 			"1:"
@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int s
 			  "c" (value),
 			  "b" (bx),
 			  "D" ((long)reg),
-			  "S" (&pci_indirect));
+			  "S" (&pci_indirect),
+			  "r" (__PCIBIOS_DS));
 		break;
 	}
 
@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_i
 
 	DBG("PCI: Fetching IRQ routing table... ");
 	__asm__("push %%es\n\t"
+		"movw %w8, %%ds\n\t"
 		"push %%ds\n\t"
 		"pop  %%es\n\t"
-		"lcall *(%%esi); cld\n\t"
+		"lcall *%%ss:(%%esi); cld\n\t"
 		"pop %%es\n\t"
+		"push %%ss\n\t"
+		"pop %%ds\n"
 		"jc 1f\n\t"
 		"xor %%ah, %%ah\n"
 		"1:"
@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_i
 		  "1" (0),
 		  "D" ((long) &opt),
 		  "S" (&pci_indirect),
-		  "m" (opt)
+		  "m" (opt),
+		  "r" (__PCIBIOS_DS)
 		: "memory");
 	DBG("OK  ret=%d, size=%d, map=%x\n", ret, opt.size, map);
 	if (ret & 0xff00)
@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_d
 {
 	int ret;
 
-	__asm__("lcall *(%%esi); cld\n\t"
+	__asm__("movw %w5, %%ds\n\t"
+		"lcall *%%ss:(%%esi); cld\n\t"
+		"push %%ss\n\t"
+		"pop %%ds\n"
 		"jc 1f\n\t"
 		"xor %%ah, %%ah\n"
 		"1:"
@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_d
 		: "0" (PCIBIOS_SET_PCI_HW_INT),
 		  "b" ((dev->bus->number << 8) | dev->devfn),
 		  "c" ((irq << 8) | (pin + 10)),
-		  "S" (&pci_indirect));
+		  "S" (&pci_indirect),
+		  "r" (__PCIBIOS_DS));
 	return !(ret & 0xff00);
 }
 EXPORT_SYMBOL(pcibios_set_irq_routing);
diff -ruNp linux-3.13.11/arch/x86/platform/efi/efi_32.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/platform/efi/efi_32.c
--- linux-3.13.11/arch/x86/platform/efi/efi_32.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/platform/efi/efi_32.c	2014-07-09
12:00:15.000000000 +0200
@@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
 {
 	struct desc_ptr gdt_descr;
 
+#ifdef CONFIG_PAX_KERNEXEC
+	struct desc_struct d;
+#endif
+
 	local_irq_save(efi_rt_eflags);
 
 	load_cr3(initial_page_table);
 	__flush_tlb_all();
 
+#ifdef CONFIG_PAX_KERNEXEC
+	pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
+	write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
+	pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
+	write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
+#endif
+
 	gdt_descr.address = __pa(get_cpu_gdt_table(0));
 	gdt_descr.size = GDT_SIZE - 1;
 	load_gdt(&gdt_descr);
@@ -58,11 +69,24 @@ void efi_call_phys_epilog(void)
 {
 	struct desc_ptr gdt_descr;
 
+#ifdef CONFIG_PAX_KERNEXEC
+	struct desc_struct d;
+
+	memset(&d, 0, sizeof d);
+	write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
+	write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
+#endif
+
 	gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
 	gdt_descr.size = GDT_SIZE - 1;
 	load_gdt(&gdt_descr);
 
+#ifdef CONFIG_PAX_PER_CPU_PGD
+	load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
+#else
 	load_cr3(swapper_pg_dir);
+#endif
+
 	__flush_tlb_all();
 
 	local_irq_restore(efi_rt_eflags);
diff -ruNp linux-3.13.11/arch/x86/platform/efi/efi_64.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/platform/efi/efi_64.c
--- linux-3.13.11/arch/x86/platform/efi/efi_64.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/platform/efi/efi_64.c	2014-07-09
12:00:15.000000000 +0200
@@ -76,6 +76,11 @@ void __init efi_call_phys_prelog(void)
 		vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
 		set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
 	}
+
+#ifdef CONFIG_PAX_PER_CPU_PGD
+	load_cr3(swapper_pg_dir);
+#endif
+
 	__flush_tlb_all();
 }
 
@@ -89,6 +94,11 @@ void __init efi_call_phys_epilog(void)
 	for (pgd = 0; pgd < n_pgds; pgd++)
 		set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
 	kfree(save_pgd);
+
+#ifdef CONFIG_PAX_PER_CPU_PGD
+	load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
+#endif
+
 	__flush_tlb_all();
 	local_irq_restore(efi_flags);
 	early_code_mapping_set_exec(0);
diff -ruNp linux-3.13.11/arch/x86/platform/efi/efi_stub_32.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/platform/efi/efi_stub_32.S
--- linux-3.13.11/arch/x86/platform/efi/efi_stub_32.S	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/platform/efi/efi_stub_32.S	2014-07-09
12:00:15.000000000 +0200
@@ -6,7 +6,9 @@
  */
 
 #include <linux/linkage.h>
+#include <linux/init.h>
 #include <asm/page_types.h>
+#include <asm/segment.h>
 
 /*
  * efi_call_phys(void *, ...) is a function with variable parameters.
@@ -20,7 +22,7 @@
  * service functions will comply with gcc calling convention, too.
  */
 
-.text
+__INIT
 ENTRY(efi_call_phys)
 	/*
 	 * 0. The function can only be called in Linux kernel. So CS has been
@@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
 	 * The mapping of lower virtual memory has been created in prelog and
 	 * epilog.
 	 */
-	movl	$1f, %edx
-	subl	$__PAGE_OFFSET, %edx
-	jmp	*%edx
+#ifdef CONFIG_PAX_KERNEXEC
+	movl	$(__KERNEXEC_EFI_DS), %edx
+	mov	%edx, %ds
+	mov	%edx, %es
+	mov	%edx, %ss
+	addl	$2f,(1f)
+	ljmp	*(1f)
+
+__INITDATA
+1:	.long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
+.previous
+
+2:
+	subl	$2b,(1b)
+#else
+	jmp	1f-__PAGE_OFFSET
 1:
+#endif
 
 	/*
 	 * 2. Now on the top of stack is the return
@@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
 	 * parameter 2, ..., param n. To make things easy, we save the return
 	 * address of efi_call_phys in a global variable.
 	 */
-	popl	%edx
-	movl	%edx, saved_return_addr
-	/* get the function pointer into ECX*/
-	popl	%ecx
-	movl	%ecx, efi_rt_function_ptr
-	movl	$2f, %edx
-	subl	$__PAGE_OFFSET, %edx
-	pushl	%edx
+	popl	(saved_return_addr)
+	popl	(efi_rt_function_ptr)
 
 	/*
 	 * 3. Clear PG bit in %CR0.
@@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
 	/*
 	 * 5. Call the physical function.
 	 */
-	jmp	*%ecx
+	call	*(efi_rt_function_ptr-__PAGE_OFFSET)
 
-2:
 	/*
 	 * 6. After EFI runtime service returns, control will return to
 	 * following instruction. We'd better readjust stack pointer first.
@@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
 	movl	%cr0, %edx
 	orl	$0x80000000, %edx
 	movl	%edx, %cr0
-	jmp	1f
-1:
+
 	/*
 	 * 8. Now restore the virtual mode from flat mode by
 	 * adding EIP with PAGE_OFFSET.
 	 */
-	movl	$1f, %edx
-	jmp	*%edx
+#ifdef CONFIG_PAX_KERNEXEC
+	movl	$(__KERNEL_DS), %edx
+	mov	%edx, %ds
+	mov	%edx, %es
+	mov	%edx, %ss
+	ljmp	$(__KERNEL_CS),$1f
+#else
+	jmp	1f+__PAGE_OFFSET
+#endif
 1:
 
 	/*
 	 * 9. Balance the stack. And because EAX contain the return value,
 	 * we'd better not clobber it.
 	 */
-	leal	efi_rt_function_ptr, %edx
-	movl	(%edx), %ecx
-	pushl	%ecx
+	pushl	(efi_rt_function_ptr)
 
 	/*
-	 * 10. Push the saved return address onto the stack and return.
+	 * 10. Return to the saved return address.
 	 */
-	leal	saved_return_addr, %edx
-	movl	(%edx), %ecx
-	pushl	%ecx
-	ret
+	jmpl	*(saved_return_addr)
 ENDPROC(efi_call_phys)
 .previous
 
-.data
+__INITDATA
 saved_return_addr:
 	.long 0
 efi_rt_function_ptr:
diff -ruNp linux-3.13.11/arch/x86/platform/efi/efi_stub_64.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/platform/efi/efi_stub_64.S
--- linux-3.13.11/arch/x86/platform/efi/efi_stub_64.S	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/platform/efi/efi_stub_64.S	2014-07-09
12:00:15.000000000 +0200
@@ -7,6 +7,7 @@
  */
 
 #include <linux/linkage.h>
+#include <asm/alternative-asm.h>
 
 #define SAVE_XMM			\
 	mov %rsp, %rax;			\
@@ -40,6 +41,7 @@ ENTRY(efi_call0)
 	call *%rdi
 	addq $32, %rsp
 	RESTORE_XMM
+	pax_force_retaddr 0, 1
 	ret
 ENDPROC(efi_call0)
 
@@ -50,6 +52,7 @@ ENTRY(efi_call1)
 	call *%rdi
 	addq $32, %rsp
 	RESTORE_XMM
+	pax_force_retaddr 0, 1
 	ret
 ENDPROC(efi_call1)
 
@@ -60,6 +63,7 @@ ENTRY(efi_call2)
 	call *%rdi
 	addq $32, %rsp
 	RESTORE_XMM
+	pax_force_retaddr 0, 1
 	ret
 ENDPROC(efi_call2)
 
@@ -71,6 +75,7 @@ ENTRY(efi_call3)
 	call *%rdi
 	addq $32, %rsp
 	RESTORE_XMM
+	pax_force_retaddr 0, 1
 	ret
 ENDPROC(efi_call3)
 
@@ -83,6 +88,7 @@ ENTRY(efi_call4)
 	call *%rdi
 	addq $32, %rsp
 	RESTORE_XMM
+	pax_force_retaddr 0, 1
 	ret
 ENDPROC(efi_call4)
 
@@ -96,6 +102,7 @@ ENTRY(efi_call5)
 	call *%rdi
 	addq $48, %rsp
 	RESTORE_XMM
+	pax_force_retaddr 0, 1
 	ret
 ENDPROC(efi_call5)
 
@@ -112,5 +119,6 @@ ENTRY(efi_call6)
 	call *%rdi
 	addq $48, %rsp
 	RESTORE_XMM
+	pax_force_retaddr 0, 1
 	ret
 ENDPROC(efi_call6)
diff -ruNp linux-3.13.11/arch/x86/platform/intel-mid/intel-mid.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/platform/intel-mid/intel-mid.c
--- linux-3.13.11/arch/x86/platform/intel-mid/intel-mid.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/platform/intel-mid/intel-mid.c	2014-07-09
12:00:15.000000000 +0200
@@ -65,9 +65,10 @@ static void intel_mid_power_off(void)
 {
 }
 
-static void intel_mid_reboot(void)
+static void __noreturn intel_mid_reboot(void)
 {
 	intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
+	BUG();
 }
 
 static unsigned long __init intel_mid_calibrate_tsc(void)
diff -ruNp linux-3.13.11/arch/x86/platform/olpc/olpc_dt.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/platform/olpc/olpc_dt.c
--- linux-3.13.11/arch/x86/platform/olpc/olpc_dt.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/platform/olpc/olpc_dt.c	2014-07-09
12:00:15.000000000 +0200
@@ -156,7 +156,7 @@ void * __init prom_early_alloc(unsigned
 	return res;
 }
 
-static struct of_pdt_ops prom_olpc_ops __initdata = {
+static struct of_pdt_ops prom_olpc_ops __initconst = {
 	.nextprop = olpc_dt_nextprop,
 	.getproplen = olpc_dt_getproplen,
 	.getproperty = olpc_dt_getproperty,
diff -ruNp linux-3.13.11/arch/x86/power/cpu.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/power/cpu.c
--- linux-3.13.11/arch/x86/power/cpu.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/power/cpu.c	2014-07-09 12:00:15.000000000
+0200
@@ -137,11 +137,8 @@ static void do_fpu_end(void)
 static void fix_processor_context(void)
 {
 	int cpu = smp_processor_id();
-	struct tss_struct *t = &per_cpu(init_tss, cpu);
-#ifdef CONFIG_X86_64
-	struct desc_struct *desc = get_cpu_gdt_table(cpu);
-	tss_desc tss;
-#endif
+	struct tss_struct *t = init_tss + cpu;
+
 	set_tss_desc(cpu, t);	/*
 				 * This just modifies memory; should not be
 				 * necessary. But... This is necessary, because
@@ -150,10 +147,6 @@ static void fix_processor_context(void)
 				 */
 
 #ifdef CONFIG_X86_64
-	memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
-	tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */
-	write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
-
 	syscall_init();				/* This sets MSR_*STAR and related */
 #endif
 	load_TR_desc();				/* This does ltr */
diff -ruNp linux-3.13.11/arch/x86/realmode/init.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/realmode/init.c
--- linux-3.13.11/arch/x86/realmode/init.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/realmode/init.c	2014-07-09
12:00:15.000000000 +0200
@@ -70,7 +70,13 @@ void __init setup_real_mode(void)
 		__va(real_mode_header->trampoline_header);
 
 #ifdef CONFIG_X86_32
-	trampoline_header->start = __pa_symbol(startup_32_smp);
+	trampoline_header->start = __pa_symbol(ktla_ktva(startup_32_smp));
+
+#ifdef CONFIG_PAX_KERNEXEC
+	trampoline_header->start -= LOAD_PHYSICAL_ADDR;
+#endif
+
+	trampoline_header->boot_cs = __BOOT_CS;
 	trampoline_header->gdt_limit = __BOOT_DS + 7;
 	trampoline_header->gdt_base = __pa_symbol(boot_gdt);
 #else
@@ -86,7 +92,7 @@ void __init setup_real_mode(void)
 	*trampoline_cr4_features = read_cr4();
 
 	trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
-	trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
+	trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd & ~_PAGE_NX;
 	trampoline_pgd[511] = init_level4_pgt[511].pgd;
 #endif
 }
diff -ruNp linux-3.13.11/arch/x86/realmode/rm/Makefile linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/realmode/rm/Makefile
--- linux-3.13.11/arch/x86/realmode/rm/Makefile	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/realmode/rm/Makefile	2014-07-09
12:00:15.000000000 +0200
@@ -79,5 +79,8 @@ KBUILD_CFLAGS	:= $(LINUXINCLUDE) -m32 -g
 		   $(call cc-option, -fno-unit-at-a-time)) \
 		   $(call cc-option, -fno-stack-protector) \
 		   $(call cc-option, -mpreferred-stack-boundary=2)
+ifdef CONSTIFY_PLUGIN
+KBUILD_CFLAGS	+= -fplugin-arg-constify_plugin-no-constify
+endif
 KBUILD_AFLAGS	:= $(KBUILD_CFLAGS) -D__ASSEMBLY__
 GCOV_PROFILE := n
diff -ruNp linux-3.13.11/arch/x86/realmode/rm/header.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/realmode/rm/header.S
--- linux-3.13.11/arch/x86/realmode/rm/header.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/realmode/rm/header.S	2014-07-09
12:00:15.000000000 +0200
@@ -30,7 +30,9 @@ GLOBAL(real_mode_header)
 #endif
 	/* APM/BIOS reboot */
 	.long	pa_machine_real_restart_asm
-#ifdef CONFIG_X86_64
+#ifdef CONFIG_X86_32
+	.long	__KERNEL_CS
+#else
 	.long	__KERNEL32_CS
 #endif
 END(real_mode_header)
diff -ruNp linux-3.13.11/arch/x86/realmode/rm/trampoline_32.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/realmode/rm/trampoline_32.S
--- linux-3.13.11/arch/x86/realmode/rm/trampoline_32.S	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/realmode/rm/trampoline_32.S	2014-07-09
12:00:15.000000000 +0200
@@ -25,6 +25,12 @@
 #include <asm/page_types.h>
 #include "realmode.h"
 
+#ifdef CONFIG_PAX_KERNEXEC
+#define ta(X) (X)
+#else
+#define ta(X) (pa_ ## X)
+#endif
+
 	.text
 	.code16
 
@@ -39,8 +45,6 @@ ENTRY(trampoline_start)
 
 	cli			# We should be safe anyway
 
-	movl	tr_start, %eax	# where we need to go
-
 	movl	$0xA5A5A5A5, trampoline_status
 				# write marker for master knows we're running
 
@@ -56,7 +60,7 @@ ENTRY(trampoline_start)
 	movw	$1, %dx			# protected mode (PE) bit
 	lmsw	%dx			# into protected mode
 
-	ljmpl	$__BOOT_CS, $pa_startup_32
+	ljmpl *(trampoline_header)
 
 	.section ".text32","ax"
 	.code32
@@ -67,7 +71,7 @@ ENTRY(startup_32)			# note: also used fr
 	.balign 8
 GLOBAL(trampoline_header)
 	tr_start:		.space	4
-	tr_gdt_pad:		.space	2
+	tr_boot_cs:		.space	2
 	tr_gdt:			.space	6
 END(trampoline_header)
 	
diff -ruNp linux-3.13.11/arch/x86/realmode/rm/trampoline_64.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/realmode/rm/trampoline_64.S
--- linux-3.13.11/arch/x86/realmode/rm/trampoline_64.S	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/realmode/rm/trampoline_64.S	2014-07-09
12:00:15.000000000 +0200
@@ -94,6 +94,7 @@ ENTRY(startup_32)
 	movl	%edx, %gs
 
 	movl	pa_tr_cr4, %eax
+	andl	$~X86_CR4_PCIDE, %eax
 	movl	%eax, %cr4		# Enable PAE mode
 
 	# Setup trampoline 4 level pagetables
@@ -107,7 +108,7 @@ ENTRY(startup_32)
 	wrmsr
 
 	# Enable paging and in turn activate Long Mode
-	movl	$(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
+	movl	$(X86_CR0_PG | X86_CR0_PE), %eax
 	movl	%eax, %cr0
 
 	/*
diff -ruNp linux-3.13.11/arch/x86/syscalls/syscall_32.tbl linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/syscalls/syscall_32.tbl
--- linux-3.13.11/arch/x86/syscalls/syscall_32.tbl	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/syscalls/syscall_32.tbl	2014-07-09
12:00:15.000000000 +0200
@@ -279,7 +279,7 @@
 270	i386	tgkill			sys_tgkill
 271	i386	utimes			sys_utimes			compat_sys_utimes
 272	i386	fadvise64_64		sys_fadvise64_64		sys32_fadvise64_64
-273	i386	vserver
+273	i386	vserver			sys_vserver			sys32_vserver
 274	i386	mbind			sys_mbind
 275	i386	get_mempolicy		sys_get_mempolicy		compat_sys_get_mempolicy
 276	i386	set_mempolicy		sys_set_mempolicy
diff -ruNp linux-3.13.11/arch/x86/syscalls/syscall_64.tbl linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/syscalls/syscall_64.tbl
--- linux-3.13.11/arch/x86/syscalls/syscall_64.tbl	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/syscalls/syscall_64.tbl	2014-07-09
12:00:15.000000000 +0200
@@ -242,7 +242,7 @@
 233	common	epoll_ctl		sys_epoll_ctl
 234	common	tgkill			sys_tgkill
 235	common	utimes			sys_utimes
-236	64	vserver
+236	64	vserver			sys_vserver
 237	common	mbind			sys_mbind
 238	common	set_mempolicy		sys_set_mempolicy
 239	common	get_mempolicy		sys_get_mempolicy
diff -ruNp linux-3.13.11/arch/x86/tools/Makefile linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/tools/Makefile
--- linux-3.13.11/arch/x86/tools/Makefile	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/tools/Makefile	2014-07-09
12:00:15.000000000 +0200
@@ -37,7 +37,7 @@ $(obj)/test_get_len.o: $(srctree)/arch/x
 
 $(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c
$(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h
$(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
 
-HOST_EXTRACFLAGS += -I$(srctree)/tools/include
+HOST_EXTRACFLAGS += -I$(srctree)/tools/include -ggdb
 hostprogs-y	+= relocs
 relocs-objs     := relocs_32.o relocs_64.o relocs_common.o
 relocs: $(obj)/relocs
diff -ruNp linux-3.13.11/arch/x86/tools/relocs.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/tools/relocs.c
--- linux-3.13.11/arch/x86/tools/relocs.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/tools/relocs.c	2014-07-09
12:00:15.000000000 +0200
@@ -1,5 +1,7 @@
 /* This is included from relocs_32/64.c */
 
+#include "../../../include/generated/autoconf.h"
+
 #define ElfW(type)		_ElfW(ELF_BITS, type)
 #define _ElfW(bits, type)	__ElfW(bits, type)
 #define __ElfW(bits, type)	Elf##bits##_##type
@@ -11,6 +13,7 @@
 #define Elf_Sym			ElfW(Sym)
 
 static Elf_Ehdr ehdr;
+static Elf_Phdr *phdr;
 
 struct relocs {
 	uint32_t	*offset;
@@ -383,9 +386,39 @@ static void read_ehdr(FILE *fp)
 	}
 }
 
+static void read_phdrs(FILE *fp)
+{
+	unsigned int i;
+
+	phdr = calloc(ehdr.e_phnum, sizeof(Elf_Phdr));
+	if (!phdr) {
+		die("Unable to allocate %d program headers\n",
+		    ehdr.e_phnum);
+	}
+	if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
+		die("Seek to %d failed: %s\n",
+			ehdr.e_phoff, strerror(errno));
+	}
+	if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
+		die("Cannot read ELF program headers: %s\n",
+			strerror(errno));
+	}
+	for(i = 0; i < ehdr.e_phnum; i++) {
+		phdr[i].p_type      = elf_word_to_cpu(phdr[i].p_type);
+		phdr[i].p_offset    = elf_off_to_cpu(phdr[i].p_offset);
+		phdr[i].p_vaddr     = elf_addr_to_cpu(phdr[i].p_vaddr);
+		phdr[i].p_paddr     = elf_addr_to_cpu(phdr[i].p_paddr);
+		phdr[i].p_filesz    = elf_word_to_cpu(phdr[i].p_filesz);
+		phdr[i].p_memsz     = elf_word_to_cpu(phdr[i].p_memsz);
+		phdr[i].p_flags     = elf_word_to_cpu(phdr[i].p_flags);
+		phdr[i].p_align     = elf_word_to_cpu(phdr[i].p_align);
+	}
+
+}
+
 static void read_shdrs(FILE *fp)
 {
-	int i;
+	unsigned int i;
 	Elf_Shdr shdr;
 
 	secs = calloc(ehdr.e_shnum, sizeof(struct section));
@@ -420,7 +453,7 @@ static void read_shdrs(FILE *fp)
 
 static void read_strtabs(FILE *fp)
 {
-	int i;
+	unsigned int i;
 	for (i = 0; i < ehdr.e_shnum; i++) {
 		struct section *sec = &secs[i];
 		if (sec->shdr.sh_type != SHT_STRTAB) {
@@ -445,7 +478,7 @@ static void read_strtabs(FILE *fp)
 
 static void read_symtabs(FILE *fp)
 {
-	int i,j;
+	unsigned int i,j;
 	for (i = 0; i < ehdr.e_shnum; i++) {
 		struct section *sec = &secs[i];
 		if (sec->shdr.sh_type != SHT_SYMTAB) {
@@ -476,9 +509,11 @@ static void read_symtabs(FILE *fp)
 }
 
 
-static void read_relocs(FILE *fp)
+static void read_relocs(FILE *fp, int use_real_mode)
 {
-	int i,j;
+	unsigned int i,j;
+	uint32_t base;
+
 	for (i = 0; i < ehdr.e_shnum; i++) {
 		struct section *sec = &secs[i];
 		if (sec->shdr.sh_type != SHT_REL_TYPE) {
@@ -498,9 +533,22 @@ static void read_relocs(FILE *fp)
 			die("Cannot read symbol table: %s\n",
 				strerror(errno));
 		}
+		base = 0;
+
+#ifdef CONFIG_X86_32
+		for (j = 0; !use_real_mode && j < ehdr.e_phnum; j++) {
+			if (phdr[j].p_type != PT_LOAD )
+				continue;
+			if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset
>= phdr[j].p_offset + phdr[j].p_filesz)
+				continue;
+			base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
+			break;
+		}
+#endif
+
 		for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
 			Elf_Rel *rel = &sec->reltab[j];
-			rel->r_offset = elf_addr_to_cpu(rel->r_offset);
+			rel->r_offset = elf_addr_to_cpu(rel->r_offset) + base;
 			rel->r_info   = elf_xword_to_cpu(rel->r_info);
 #if (SHT_REL_TYPE == SHT_RELA)
 			rel->r_addend = elf_xword_to_cpu(rel->r_addend);
@@ -512,7 +560,7 @@ static void read_relocs(FILE *fp)
 
 static void print_absolute_symbols(void)
 {
-	int i;
+	unsigned int i;
 	const char *format;
 
 	if (ELF_BITS == 64)
@@ -525,7 +573,7 @@ static void print_absolute_symbols(void)
 	for (i = 0; i < ehdr.e_shnum; i++) {
 		struct section *sec = &secs[i];
 		char *sym_strtab;
-		int j;
+		unsigned int j;
 
 		if (sec->shdr.sh_type != SHT_SYMTAB) {
 			continue;
@@ -552,7 +600,7 @@ static void print_absolute_symbols(void)
 
 static void print_absolute_relocs(void)
 {
-	int i, printed = 0;
+	unsigned int i, printed = 0;
 	const char *format;
 
 	if (ELF_BITS == 64)
@@ -565,7 +613,7 @@ static void print_absolute_relocs(void)
 		struct section *sec_applies, *sec_symtab;
 		char *sym_strtab;
 		Elf_Sym *sh_symtab;
-		int j;
+		unsigned int j;
 		if (sec->shdr.sh_type != SHT_REL_TYPE) {
 			continue;
 		}
@@ -642,13 +690,13 @@ static void add_reloc(struct relocs *r,
 static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
 			Elf_Sym *sym, const char *symname))
 {
-	int i;
+	unsigned int i;
 	/* Walk through the relocations */
 	for (i = 0; i < ehdr.e_shnum; i++) {
 		char *sym_strtab;
 		Elf_Sym *sh_symtab;
 		struct section *sec_applies, *sec_symtab;
-		int j;
+		unsigned int j;
 		struct section *sec = &secs[i];
 
 		if (sec->shdr.sh_type != SHT_REL_TYPE) {
@@ -812,6 +860,23 @@ static int do_reloc32(struct section *se
 {
 	unsigned r_type = ELF32_R_TYPE(rel->r_info);
 	int shn_abs = (sym->st_shndx == SHN_ABS) && !is_reloc(S_REL, symname);
+	char *sym_strtab = sec->link->link->strtab;
+
+	/* Don't relocate actual per-cpu variables, they are absolute indices, not addresses
*/
+	if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab,
sym), "__per_cpu_load"))
+		return 0;
+
+#ifdef CONFIG_PAX_KERNEXEC
+	/* Don't relocate actual code, they are relocated implicitly by the base address of
KERNEL_CS */
+	if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab,
sym), "_etext"))
+		return 0;
+	if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
+		return 0;
+	if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
+		return 0;
+	if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym),
"__LOAD_PHYSICAL_ADDR"))
+		return 0;
+#endif
 
 	switch (r_type) {
 	case R_386_NONE:
@@ -950,7 +1015,7 @@ static int write32_as_text(uint32_t v, F
 
 static void emit_relocs(int as_text, int use_real_mode)
 {
-	int i;
+	unsigned int i;
 	int (*write_reloc)(uint32_t, FILE *) = write32;
 	int (*do_reloc)(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
 			const char *symname);
@@ -1026,10 +1091,11 @@ void process(FILE *fp, int use_real_mode
 {
 	regex_init(use_real_mode);
 	read_ehdr(fp);
+	read_phdrs(fp);
 	read_shdrs(fp);
 	read_strtabs(fp);
 	read_symtabs(fp);
-	read_relocs(fp);
+	read_relocs(fp, use_real_mode);
 	if (ELF_BITS == 64)
 		percpu_init();
 	if (show_absolute_syms) {
diff -ruNp linux-3.13.11/arch/x86/um/tls_32.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/um/tls_32.c
--- linux-3.13.11/arch/x86/um/tls_32.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/um/tls_32.c	2014-07-09 12:00:15.000000000
+0200
@@ -260,7 +260,7 @@ out:
 	if (unlikely(task == current &&
 		     !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
 		printk(KERN_ERR "get_tls_entry: task with pid %d got here "
-				"without flushed TLS.", current->pid);
+				"without flushed TLS.", task_pid_nr(current));
 	}
 
 	return 0;
diff -ruNp linux-3.13.11/arch/x86/vdso/Makefile linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/vdso/Makefile
--- linux-3.13.11/arch/x86/vdso/Makefile	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/vdso/Makefile	2014-07-09
12:00:15.000000000 +0200
@@ -181,7 +181,7 @@ quiet_cmd_vdso = VDSO    $@
 		       -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
 		 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
 
-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
 GCOV_PROFILE := n
 
 #
diff -ruNp linux-3.13.11/arch/x86/vdso/vdso32-setup.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/vdso/vdso32-setup.c
--- linux-3.13.11/arch/x86/vdso/vdso32-setup.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/vdso/vdso32-setup.c	2014-07-09
12:00:15.000000000 +0200
@@ -25,6 +25,7 @@
 #include <asm/tlbflush.h>
 #include <asm/vdso.h>
 #include <asm/proto.h>
+#include <asm/mman.h>
 
 enum {
 	VDSO_DISABLED = 0,
@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m
 void enable_sep_cpu(void)
 {
 	int cpu = get_cpu();
-	struct tss_struct *tss = &per_cpu(init_tss, cpu);
+	struct tss_struct *tss = init_tss + cpu;
 
 	if (!boot_cpu_has(X86_FEATURE_SEP)) {
 		put_cpu();
@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
 	gate_vma.vm_start = FIXADDR_USER_START;
 	gate_vma.vm_end = FIXADDR_USER_END;
 	gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
-	gate_vma.vm_page_prot = __P101;
+	gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
 
 	return 0;
 }
@@ -330,14 +331,14 @@ int arch_setup_additional_pages(struct l
 	if (compat)
 		addr = VDSO_HIGH_BASE;
 	else {
-		addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
+		addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
 		if (IS_ERR_VALUE(addr)) {
 			ret = addr;
 			goto up_fail;
 		}
 	}
 
-	current->mm->context.vdso = (void *)addr;
+	current->mm->context.vdso = addr;
 
 	if (compat_uses_vma || !compat) {
 		/*
@@ -353,11 +354,11 @@ int arch_setup_additional_pages(struct l
 	}
 
 	current_thread_info()->sysenter_return =
-		VDSO32_SYMBOL(addr, SYSENTER_RETURN);
+		(__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
 
   up_fail:
 	if (ret)
-		current->mm->context.vdso = NULL;
+		current->mm->context.vdso = 0;
 
 	up_write(&mm->mmap_sem);
 
@@ -404,8 +405,14 @@ __initcall(ia32_binfmt_init);
 
 const char *arch_vma_name(struct vm_area_struct *vma)
 {
-	if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
+	if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
 		return "[vdso]";
+
+#ifdef CONFIG_PAX_SEGMEXEC
+	if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
+		return "[vdso]";
+#endif
+
 	return NULL;
 }
 
@@ -415,7 +422,7 @@ struct vm_area_struct *get_gate_vma(stru
 	 * Check to see if the corresponding task was created in compat vdso
 	 * mode.
 	 */
-	if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
+	if (mm && mm->context.vdso == VDSO_HIGH_BASE)
 		return &gate_vma;
 	return NULL;
 }
diff -ruNp linux-3.13.11/arch/x86/vdso/vma.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/vdso/vma.c
--- linux-3.13.11/arch/x86/vdso/vma.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/vdso/vma.c	2014-07-09 12:00:15.000000000
+0200
@@ -16,8 +16,6 @@
 #include <asm/vdso.h>
 #include <asm/page.h>
 
-unsigned int __read_mostly vdso_enabled = 1;
-
 extern char vdso_start[], vdso_end[];
 extern unsigned short vdso_sync_cpuid;
 
@@ -141,7 +139,6 @@ static unsigned long vdso_addr(unsigned
 	 * unaligned here as a result of stack start randomization.
 	 */
 	addr = PAGE_ALIGN(addr);
-	addr = align_vdso_addr(addr);
 
 	return addr;
 }
@@ -154,30 +151,31 @@ static int setup_additional_pages(struct
 				  unsigned size)
 {
 	struct mm_struct *mm = current->mm;
-	unsigned long addr;
+	unsigned long addr = 0;
 	int ret;
 
-	if (!vdso_enabled)
-		return 0;
-
 	down_write(&mm->mmap_sem);
+
+#ifdef CONFIG_PAX_RANDMMAP
+	if (!(mm->pax_flags & MF_PAX_RANDMMAP))
+#endif
+
 	addr = vdso_addr(mm->start_stack, size);
+	addr = align_vdso_addr(addr);
 	addr = get_unmapped_area(NULL, addr, size, 0, 0);
 	if (IS_ERR_VALUE(addr)) {
 		ret = addr;
 		goto up_fail;
 	}
 
-	current->mm->context.vdso = (void *)addr;
+	mm->context.vdso = addr;
 
 	ret = install_special_mapping(mm, addr, size,
 				      VM_READ|VM_EXEC|
 				      VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
 				      pages);
-	if (ret) {
-		current->mm->context.vdso = NULL;
-		goto up_fail;
-	}
+	if (ret)
+		mm->context.vdso = 0;
 
 up_fail:
 	up_write(&mm->mmap_sem);
@@ -197,10 +195,3 @@ int x32_setup_additional_pages(struct li
 				      vdsox32_size);
 }
 #endif
-
-static __init int vdso_setup(char *s)
-{
-	vdso_enabled = simple_strtoul(s, NULL, 0);
-	return 0;
-}
-__setup("vdso=", vdso_setup);
diff -ruNp linux-3.13.11/arch/x86/xen/Kconfig linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/xen/Kconfig
--- linux-3.13.11/arch/x86/xen/Kconfig	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/xen/Kconfig	2014-07-09 12:00:15.000000000
+0200
@@ -9,6 +9,7 @@ config XEN
 	select XEN_HAVE_PVMMU
 	depends on X86_64 || (X86_32 && X86_PAE && !X86_VISWS)
 	depends on X86_TSC
+	depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_XEN
 	help
 	  This is the Linux Xen port.  Enabling this will allow the
 	  kernel to boot in a paravirtualized environment under the
diff -ruNp linux-3.13.11/arch/x86/xen/enlighten.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/xen/enlighten.c
--- linux-3.13.11/arch/x86/xen/enlighten.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/xen/enlighten.c	2014-07-09
12:00:15.000000000 +0200
@@ -123,8 +123,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
 
 struct shared_info xen_dummy_shared_info;
 
-void *xen_initial_gdt;
-
 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
 __read_mostly int xen_have_vector_callback;
 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
@@ -541,8 +539,7 @@ static void xen_load_gdt(const struct de
 {
 	unsigned long va = dtr->address;
 	unsigned int size = dtr->size + 1;
-	unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
-	unsigned long frames[pages];
+	unsigned long frames[65536 / PAGE_SIZE];
 	int f;
 
 	/*
@@ -590,8 +587,7 @@ static void __init xen_load_gdt_boot(con
 {
 	unsigned long va = dtr->address;
 	unsigned int size = dtr->size + 1;
-	unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
-	unsigned long frames[pages];
+	unsigned long frames[(GDT_SIZE + PAGE_SIZE - 1) / PAGE_SIZE];
 	int f;
 
 	/*
@@ -599,7 +595,7 @@ static void __init xen_load_gdt_boot(con
 	 * 8-byte entries, or 16 4k pages..
 	 */
 
-	BUG_ON(size > 65536);
+	BUG_ON(size > GDT_SIZE);
 	BUG_ON(va & ~PAGE_MASK);
 
 	for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
@@ -988,7 +984,7 @@ static u32 xen_safe_apic_wait_icr_idle(v
         return 0;
 }
 
-static void set_xen_basic_apic_ops(void)
+static void __init set_xen_basic_apic_ops(void)
 {
 	apic->read = xen_apic_read;
 	apic->write = xen_apic_write;
@@ -1293,30 +1289,30 @@ static const struct pv_apic_ops xen_apic
 #endif
 };
 
-static void xen_reboot(int reason)
+static __noreturn void xen_reboot(int reason)
 {
 	struct sched_shutdown r = { .reason = reason };
 
-	if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
-		BUG();
+	HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
+	BUG();
 }
 
-static void xen_restart(char *msg)
+static __noreturn void xen_restart(char *msg)
 {
 	xen_reboot(SHUTDOWN_reboot);
 }
 
-static void xen_emergency_restart(void)
+static __noreturn void xen_emergency_restart(void)
 {
 	xen_reboot(SHUTDOWN_reboot);
 }
 
-static void xen_machine_halt(void)
+static __noreturn void xen_machine_halt(void)
 {
 	xen_reboot(SHUTDOWN_poweroff);
 }
 
-static void xen_machine_power_off(void)
+static __noreturn void xen_machine_power_off(void)
 {
 	if (pm_power_off)
 		pm_power_off();
@@ -1467,7 +1463,17 @@ asmlinkage void __init xen_start_kernel(
 	__userpte_alloc_gfp &= ~__GFP_HIGHMEM;
 
 	/* Work out if we support NX */
-	x86_configure_nx();
+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
+	if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
+	    (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
+		unsigned l, h;
+
+		__supported_pte_mask |= _PAGE_NX;
+		rdmsr(MSR_EFER, l, h);
+		l |= EFER_NX;
+		wrmsr(MSR_EFER, l, h);
+	}
+#endif
 
 	xen_setup_features();
 
@@ -1498,13 +1504,6 @@ asmlinkage void __init xen_start_kernel(
 
 	machine_ops = xen_machine_ops;
 
-	/*
-	 * The only reliable way to retain the initial address of the
-	 * percpu gdt_page is to remember it here, so we can go and
-	 * mark it RW later, when the initial percpu area is freed.
-	 */
-	xen_initial_gdt = &per_cpu(gdt_page, 0);
-
 	xen_smp_init();
 
 #ifdef CONFIG_ACPI_NUMA
diff -ruNp linux-3.13.11/arch/x86/xen/mmu.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/xen/mmu.c
--- linux-3.13.11/arch/x86/xen/mmu.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/xen/mmu.c	2014-07-09 12:00:15.000000000
+0200
@@ -379,7 +379,7 @@ static pteval_t pte_mfn_to_pfn(pteval_t
 	return val;
 }
 
-static pteval_t pte_pfn_to_mfn(pteval_t val)
+static pteval_t __intentional_overflow(-1) pte_pfn_to_mfn(pteval_t val)
 {
 	if (val & _PAGE_PRESENT) {
 		unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
@@ -1894,6 +1894,9 @@ void __init xen_setup_kernel_pagetable(p
 	/* L3_k[510] -> level2_kernel_pgt
 	 * L3_i[511] -> level2_fixmap_pgt */
 	convert_pfn_mfn(level3_kernel_pgt);
+	convert_pfn_mfn(level3_vmalloc_start_pgt);
+	convert_pfn_mfn(level3_vmalloc_end_pgt);
+	convert_pfn_mfn(level3_vmemmap_pgt);
 
 	/* We get [511][511] and have Xen's version of level2_kernel_pgt */
 	l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
@@ -1923,8 +1926,12 @@ void __init xen_setup_kernel_pagetable(p
 	set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
 	set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
 	set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
+	set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
+	set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
+	set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
 	set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
 	set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
+	set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
 	set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
 	set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
 
@@ -2108,6 +2115,7 @@ static void __init xen_post_allocator_in
 	pv_mmu_ops.set_pud = xen_set_pud;
 #if PAGETABLE_LEVELS == 4
 	pv_mmu_ops.set_pgd = xen_set_pgd;
+	pv_mmu_ops.set_pgd_batched = xen_set_pgd;
 #endif
 
 	/* This will work as long as patching hasn't happened yet
@@ -2186,6 +2194,7 @@ static const struct pv_mmu_ops xen_mmu_o
 	.pud_val = PV_CALLEE_SAVE(xen_pud_val),
 	.make_pud = PV_CALLEE_SAVE(xen_make_pud),
 	.set_pgd = xen_set_pgd_hyper,
+	.set_pgd_batched = xen_set_pgd_hyper,
 
 	.alloc_pud = xen_alloc_pmd_init,
 	.release_pud = xen_release_pmd_init,
diff -ruNp linux-3.13.11/arch/x86/xen/smp.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/xen/smp.c
--- linux-3.13.11/arch/x86/xen/smp.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/xen/smp.c	2014-07-09 12:00:15.000000000
+0200
@@ -274,17 +274,13 @@ static void __init xen_smp_prepare_boot_
 	native_smp_prepare_boot_cpu();
 
 	if (xen_pv_domain()) {
-		/* We've switched to the "real" per-cpu gdt, so make sure the
-		   old memory can be recycled */
-		make_lowmem_page_readwrite(xen_initial_gdt);
-
 #ifdef CONFIG_X86_32
 		/*
 		 * Xen starts us with XEN_FLAT_RING1_DS, but linux code
 		 * expects __USER_DS
 		 */
-		loadsegment(ds, __USER_DS);
-		loadsegment(es, __USER_DS);
+		loadsegment(ds, __KERNEL_DS);
+		loadsegment(es, __KERNEL_DS);
 #endif
 
 		xen_filter_cpu_maps();
@@ -364,7 +360,7 @@ cpu_initialize_context(unsigned int cpu,
 	ctxt->user_regs.ss = __KERNEL_DS;
 #ifdef CONFIG_X86_32
 	ctxt->user_regs.fs = __KERNEL_PERCPU;
-	ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
+	savesegment(gs, ctxt->user_regs.gs);
 #else
 	ctxt->gs_base_kernel = per_cpu_offset(cpu);
 #endif
@@ -374,8 +370,8 @@ cpu_initialize_context(unsigned int cpu,
 
 	{
 		ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
-		ctxt->user_regs.ds = __USER_DS;
-		ctxt->user_regs.es = __USER_DS;
+		ctxt->user_regs.ds = __KERNEL_DS;
+		ctxt->user_regs.es = __KERNEL_DS;
 
 		xen_copy_trap_info(ctxt->trap_ctxt);
 
@@ -420,13 +416,12 @@ static int xen_cpu_up(unsigned int cpu,
 	int rc;
 
 	per_cpu(current_task, cpu) = idle;
+	per_cpu(current_tinfo, cpu) = &idle->tinfo;
 #ifdef CONFIG_X86_32
 	irq_ctx_init(cpu);
 #else
 	clear_tsk_thread_flag(idle, TIF_FORK);
-	per_cpu(kernel_stack, cpu) =
-		(unsigned long)task_stack_page(idle) -
-		KERNEL_STACK_OFFSET + THREAD_SIZE;
+	per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
 #endif
 	xen_setup_runstate_info(cpu);
 	xen_setup_timer(cpu);
@@ -702,7 +697,7 @@ static const struct smp_ops xen_smp_ops
 
 void __init xen_smp_init(void)
 {
-	smp_ops = xen_smp_ops;
+	memcpy((void *)&smp_ops, &xen_smp_ops, sizeof smp_ops);
 	xen_fill_possible_map();
 }
 
diff -ruNp linux-3.13.11/arch/x86/xen/xen-asm_32.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/xen/xen-asm_32.S
--- linux-3.13.11/arch/x86/xen/xen-asm_32.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/xen/xen-asm_32.S	2014-07-09
12:00:15.000000000 +0200
@@ -84,14 +84,14 @@ ENTRY(xen_iret)
 	ESP_OFFSET=4	# bytes pushed onto stack
 
 	/*
-	 * Store vcpu_info pointer for easy access.  Do it this way to
-	 * avoid having to reload %fs
+	 * Store vcpu_info pointer for easy access.
 	 */
 #ifdef CONFIG_SMP
-	GET_THREAD_INFO(%eax)
-	movl %ss:TI_cpu(%eax), %eax
-	movl %ss:__per_cpu_offset(,%eax,4), %eax
-	mov %ss:xen_vcpu(%eax), %eax
+	push %fs
+	mov $(__KERNEL_PERCPU), %eax
+	mov %eax, %fs
+	mov PER_CPU_VAR(xen_vcpu), %eax
+	pop %fs
 #else
 	movl %ss:xen_vcpu, %eax
 #endif
diff -ruNp linux-3.13.11/arch/x86/xen/xen-head.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/xen/xen-head.S
--- linux-3.13.11/arch/x86/xen/xen-head.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/xen/xen-head.S	2014-07-09
12:00:15.000000000 +0200
@@ -19,6 +19,17 @@ ENTRY(startup_xen)
 #ifdef CONFIG_X86_32
 	mov %esi,xen_start_info
 	mov $init_thread_union+THREAD_SIZE,%esp
+#ifdef CONFIG_SMP
+	movl $cpu_gdt_table,%edi
+	movl $__per_cpu_load,%eax
+	movw %ax,__KERNEL_PERCPU + 2(%edi)
+	rorl $16,%eax
+	movb %al,__KERNEL_PERCPU + 4(%edi)
+	movb %ah,__KERNEL_PERCPU + 7(%edi)
+	movl $__per_cpu_end - 1,%eax
+	subl $__per_cpu_start,%eax
+	movw %ax,__KERNEL_PERCPU + 0(%edi)
+#endif
 #else
 	mov %rsi,xen_start_info
 	mov $init_thread_union+THREAD_SIZE,%rsp
diff -ruNp linux-3.13.11/arch/x86/xen/xen-ops.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/xen/xen-ops.h
--- linux-3.13.11/arch/x86/xen/xen-ops.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/x86/xen/xen-ops.h	2014-07-09
12:00:15.000000000 +0200
@@ -10,8 +10,6 @@
 extern const char xen_hypervisor_callback[];
 extern const char xen_failsafe_callback[];
 
-extern void *xen_initial_gdt;
-
 struct trap_info;
 void xen_copy_trap_info(struct trap_info *traps);
 
diff -ruNp linux-3.13.11/arch/xtensa/variants/dc232b/include/variant/core.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/xtensa/variants/dc232b/include/variant/core.h
--- linux-3.13.11/arch/xtensa/variants/dc232b/include/variant/core.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/xtensa/variants/dc232b/include/variant/core.h	2014-07-09
12:00:15.000000000 +0200
@@ -119,9 +119,9 @@
   ----------------------------------------------------------------------*/
 
 #define XCHAL_ICACHE_LINESIZE		32	/* I-cache line size in bytes */
-#define XCHAL_DCACHE_LINESIZE		32	/* D-cache line size in bytes */
 #define XCHAL_ICACHE_LINEWIDTH		5	/* log2(I line size in bytes) */
 #define XCHAL_DCACHE_LINEWIDTH		5	/* log2(D line size in bytes) */
+#define XCHAL_DCACHE_LINESIZE		(_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH)	/* D-cache line
size in bytes */
 
 #define XCHAL_ICACHE_SIZE		16384	/* I-cache size in bytes or 0 */
 #define XCHAL_DCACHE_SIZE		16384	/* D-cache size in bytes or 0 */
diff -ruNp linux-3.13.11/arch/xtensa/variants/fsf/include/variant/core.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/xtensa/variants/fsf/include/variant/core.h
--- linux-3.13.11/arch/xtensa/variants/fsf/include/variant/core.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/xtensa/variants/fsf/include/variant/core.h	2014-07-09
12:00:15.000000000 +0200
@@ -11,6 +11,7 @@
 #ifndef _XTENSA_CORE_H
 #define _XTENSA_CORE_H
 
+#include <linux/const.h>
 
 /****************************************************************************
 	    Parameters Useful for Any Code, USER or PRIVILEGED
@@ -112,9 +113,9 @@
   ----------------------------------------------------------------------*/
 
 #define XCHAL_ICACHE_LINESIZE		16	/* I-cache line size in bytes */
-#define XCHAL_DCACHE_LINESIZE		16	/* D-cache line size in bytes */
 #define XCHAL_ICACHE_LINEWIDTH		4	/* log2(I line size in bytes) */
 #define XCHAL_DCACHE_LINEWIDTH		4	/* log2(D line size in bytes) */
+#define XCHAL_DCACHE_LINESIZE		(_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line
size in bytes */
 
 #define XCHAL_ICACHE_SIZE		8192	/* I-cache size in bytes or 0 */
 #define XCHAL_DCACHE_SIZE		8192	/* D-cache size in bytes or 0 */
diff -ruNp linux-3.13.11/arch/xtensa/variants/s6000/include/variant/core.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/xtensa/variants/s6000/include/variant/core.h
--- linux-3.13.11/arch/xtensa/variants/s6000/include/variant/core.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/arch/xtensa/variants/s6000/include/variant/core.h	2014-07-09
12:00:15.000000000 +0200
@@ -11,6 +11,7 @@
 #ifndef _XTENSA_CORE_CONFIGURATION_H
 #define _XTENSA_CORE_CONFIGURATION_H
 
+#include <linux/const.h>
 
 /****************************************************************************
 	    Parameters Useful for Any Code, USER or PRIVILEGED
@@ -118,9 +119,9 @@
   ----------------------------------------------------------------------*/
 
 #define XCHAL_ICACHE_LINESIZE		16	/* I-cache line size in bytes */
-#define XCHAL_DCACHE_LINESIZE		16	/* D-cache line size in bytes */
 #define XCHAL_ICACHE_LINEWIDTH		4	/* log2(I line size in bytes) */
 #define XCHAL_DCACHE_LINEWIDTH		4	/* log2(D line size in bytes) */
+#define XCHAL_DCACHE_LINESIZE		(_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH)	/* D-cache line
size in bytes */
 
 #define XCHAL_ICACHE_SIZE		32768	/* I-cache size in bytes or 0 */
 #define XCHAL_DCACHE_SIZE		32768	/* D-cache size in bytes or 0 */
diff -ruNp linux-3.13.11/block/blk-cgroup.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/block/blk-cgroup.c
--- linux-3.13.11/block/blk-cgroup.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/block/blk-cgroup.c	2014-07-09 12:00:15.000000000
+0200
@@ -812,7 +812,7 @@ static void blkcg_css_free(struct cgroup
 static struct cgroup_subsys_state *
 blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
 {
-	static atomic64_t id_seq = ATOMIC64_INIT(0);
+	static atomic64_unchecked_t id_seq = ATOMIC64_INIT(0);
 	struct blkcg *blkcg;
 
 	if (!parent_css) {
@@ -826,7 +826,7 @@ blkcg_css_alloc(struct cgroup_subsys_sta
 
 	blkcg->cfq_weight = CFQ_WEIGHT_DEFAULT;
 	blkcg->cfq_leaf_weight = CFQ_WEIGHT_DEFAULT;
-	blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */
+	blkcg->id = atomic64_inc_return_unchecked(&id_seq); /* root is 0, start from 1 */
 done:
 	spin_lock_init(&blkcg->lock);
 	INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC);
diff -ruNp linux-3.13.11/block/blk-iopoll.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/block/blk-iopoll.c
--- linux-3.13.11/block/blk-iopoll.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/block/blk-iopoll.c	2014-07-09 12:00:15.000000000
+0200
@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo
 }
 EXPORT_SYMBOL(blk_iopoll_complete);
 
-static void blk_iopoll_softirq(struct softirq_action *h)
+static __latent_entropy void blk_iopoll_softirq(void)
 {
 	struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
 	int rearm = 0, budget = blk_iopoll_budget;
diff -ruNp linux-3.13.11/block/blk-map.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/block/blk-map.c
--- linux-3.13.11/block/blk-map.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/block/blk-map.c	2014-07-09 12:00:15.000000000
+0200
@@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue
 	if (!len || !kbuf)
 		return -EINVAL;
 
-	do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
+	do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
 	if (do_copy)
 		bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
 	else
diff -ruNp linux-3.13.11/block/blk-softirq.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/block/blk-softirq.c
--- linux-3.13.11/block/blk-softirq.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/block/blk-softirq.c	2014-07-09 12:00:15.000000000
+0200
@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head,
  * Softirq action handler - move entries to local list and loop over them
  * while passing them to the queue registered handler.
  */
-static void blk_done_softirq(struct softirq_action *h)
+static __latent_entropy void blk_done_softirq(void)
 {
 	struct list_head *cpu_list, local_list;
 
diff -ruNp linux-3.13.11/block/bsg.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/block/bsg.c
--- linux-3.13.11/block/bsg.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/block/bsg.c	2014-07-09 12:00:15.000000000
+0200
@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct r
 				struct sg_io_v4 *hdr, struct bsg_device *bd,
 				fmode_t has_write_perm)
 {
+	unsigned char tmpcmd[sizeof(rq->__cmd)];
+	unsigned char *cmdptr;
+
 	if (hdr->request_len > BLK_MAX_CDB) {
 		rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
 		if (!rq->cmd)
 			return -ENOMEM;
-	}
+		cmdptr = rq->cmd;
+	} else
+		cmdptr = tmpcmd;
 
-	if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
+	if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
 			   hdr->request_len))
 		return -EFAULT;
 
+	if (cmdptr != rq->cmd)
+		memcpy(rq->cmd, cmdptr, hdr->request_len);
+
 	if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
 		if (blk_verify_command(rq->cmd, has_write_perm))
 			return -EPERM;
diff -ruNp linux-3.13.11/block/compat_ioctl.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/block/compat_ioctl.c
--- linux-3.13.11/block/compat_ioctl.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/block/compat_ioctl.c	2014-07-09 12:00:15.000000000
+0200
@@ -156,7 +156,7 @@ static int compat_cdrom_generic_command(
 	cgc = compat_alloc_user_space(sizeof(*cgc));
 	cgc32 = compat_ptr(arg);
 
-	if (copy_in_user(&cgc->cmd, &cgc32->cmd, sizeof(cgc->cmd)) ||
+	if (copy_in_user(cgc->cmd, cgc32->cmd, sizeof(cgc->cmd)) ||
 	    get_user(data, &cgc32->buffer) ||
 	    put_user(compat_ptr(data), &cgc->buffer) ||
 	    copy_in_user(&cgc->buflen, &cgc32->buflen,
@@ -341,7 +341,7 @@ static int compat_fd_ioctl(struct block_
 		err |= __get_user(f->spec1, &uf->spec1);
 		err |= __get_user(f->fmt_gap, &uf->fmt_gap);
 		err |= __get_user(name, &uf->name);
-		f->name = compat_ptr(name);
+		f->name = (void __force_kernel *)compat_ptr(name);
 		if (err) {
 			err = -EFAULT;
 			goto out;
diff -ruNp linux-3.13.11/block/genhd.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/block/genhd.c
--- linux-3.13.11/block/genhd.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/block/genhd.c	2014-07-09 12:00:15.000000000
+0200
@@ -467,21 +467,24 @@ static char *bdevt_str(dev_t devt, char
 
 /*
  * Register device numbers dev..(dev+range-1)
- * range must be nonzero
+ * Noop if @range is zero.
  * The hash chain is sorted on range, so that subranges can override.
  */
 void blk_register_region(dev_t devt, unsigned long range, struct module *module,
 			 struct kobject *(*probe)(dev_t, int *, void *),
 			 int (*lock)(dev_t, void *), void *data)
 {
-	kobj_map(bdev_map, devt, range, module, probe, lock, data);
+	if (range)
+		kobj_map(bdev_map, devt, range, module, probe, lock, data);
 }
 
 EXPORT_SYMBOL(blk_register_region);
 
+/* undo blk_register_region(), noop if @range is zero */
 void blk_unregister_region(dev_t devt, unsigned long range)
 {
-	kobj_unmap(bdev_map, devt, range);
+	if (range)
+		kobj_unmap(bdev_map, devt, range);
 }
 
 EXPORT_SYMBOL(blk_unregister_region);
diff -ruNp linux-3.13.11/block/partitions/efi.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/block/partitions/efi.c
--- linux-3.13.11/block/partitions/efi.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/block/partitions/efi.c	2014-07-09
12:00:15.000000000 +0200
@@ -293,14 +293,14 @@ static gpt_entry *alloc_read_gpt_entries
 	if (!gpt)
 		return NULL;
 
-	count = le32_to_cpu(gpt->num_partition_entries) *
-                le32_to_cpu(gpt->sizeof_partition_entry);
-	if (!count)
+	if (!le32_to_cpu(gpt->num_partition_entries))
 		return NULL;
-	pte = kmalloc(count, GFP_KERNEL);
+	pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry),
GFP_KERNEL);
 	if (!pte)
 		return NULL;
 
+	count = le32_to_cpu(gpt->num_partition_entries) *
+                le32_to_cpu(gpt->sizeof_partition_entry);
 	if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
 			(u8 *) pte, count) < count) {
 		kfree(pte);
diff -ruNp linux-3.13.11/block/scsi_ioctl.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/block/scsi_ioctl.c
--- linux-3.13.11/block/scsi_ioctl.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/block/scsi_ioctl.c	2014-07-09 12:00:15.000000000
+0200
@@ -67,7 +67,7 @@ static int scsi_get_bus(struct request_q
 	return put_user(0, p);
 }
 
-static int sg_get_timeout(struct request_queue *q)
+static int __intentional_overflow(-1) sg_get_timeout(struct request_queue *q)
 {
 	return jiffies_to_clock_t(q->sg_timeout);
 }
@@ -224,8 +224,20 @@ EXPORT_SYMBOL(blk_verify_command);
 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
 			     struct sg_io_hdr *hdr, fmode_t mode)
 {
-	if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
+	unsigned char tmpcmd[sizeof(rq->__cmd)];
+	unsigned char *cmdptr;
+
+	if (rq->cmd != rq->__cmd)
+		cmdptr = rq->cmd;
+	else
+		cmdptr = tmpcmd;
+
+	if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
 		return -EFAULT;
+
+	if (cmdptr != rq->cmd)
+		memcpy(rq->cmd, cmdptr, hdr->cmd_len);
+
 	if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
 		return -EPERM;
 
@@ -415,6 +427,8 @@ int sg_scsi_ioctl(struct request_queue *
 	int err;
 	unsigned int in_len, out_len, bytes, opcode, cmdlen;
 	char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
+	unsigned char tmpcmd[sizeof(rq->__cmd)];
+	unsigned char *cmdptr;
 
 	if (!sic)
 		return -EINVAL;
@@ -448,9 +462,18 @@ int sg_scsi_ioctl(struct request_queue *
 	 */
 	err = -EFAULT;
 	rq->cmd_len = cmdlen;
-	if (copy_from_user(rq->cmd, sic->data, cmdlen))
+
+	if (rq->cmd != rq->__cmd)
+		cmdptr = rq->cmd;
+	else
+		cmdptr = tmpcmd;
+
+	if (copy_from_user(cmdptr, sic->data, cmdlen))
 		goto error;
 
+	if (rq->cmd != cmdptr)
+		memcpy(rq->cmd, cmdptr, cmdlen);
+
 	if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
 		goto error;
 
diff -ruNp linux-3.13.11/crypto/cryptd.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/crypto/cryptd.c
--- linux-3.13.11/crypto/cryptd.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/crypto/cryptd.c	2014-07-09 12:00:15.000000000
+0200
@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
 
 struct cryptd_blkcipher_request_ctx {
 	crypto_completion_t complete;
-};
+} __no_const;
 
 struct cryptd_hash_ctx {
 	struct crypto_shash *child;
@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
 
 struct cryptd_aead_request_ctx {
 	crypto_completion_t complete;
-};
+} __no_const;
 
 static void cryptd_queue_worker(struct work_struct *work);
 
diff -ruNp linux-3.13.11/crypto/pcrypt.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/crypto/pcrypt.c
--- linux-3.13.11/crypto/pcrypt.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/crypto/pcrypt.c	2014-07-09 12:00:15.000000000
+0200
@@ -440,7 +440,7 @@ static int pcrypt_sysfs_add(struct padat
 	int ret;
 
 	pinst->kobj.kset = pcrypt_kset;
-	ret = kobject_add(&pinst->kobj, NULL, name);
+	ret = kobject_add(&pinst->kobj, NULL, "%s", name);
 	if (!ret)
 		kobject_uevent(&pinst->kobj, KOBJ_ADD);
 
diff -ruNp linux-3.13.11/drivers/acpi/acpica/hwxfsleep.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/acpi/acpica/hwxfsleep.c
--- linux-3.13.11/drivers/acpi/acpica/hwxfsleep.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/acpi/acpica/hwxfsleep.c	2014-07-09
12:00:15.000000000 +0200
@@ -63,11 +63,12 @@ static acpi_status acpi_hw_sleep_dispatc
 /* Legacy functions are optional, based upon ACPI_REDUCED_HARDWARE */
 
 static struct acpi_sleep_functions acpi_sleep_dispatch[] = {
-	{ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
-	 acpi_hw_extended_sleep},
-	{ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
-	 acpi_hw_extended_wake_prep},
-	{ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake), acpi_hw_extended_wake}
+	{.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
+	 .extended_function = acpi_hw_extended_sleep},
+	{.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
+	 .extended_function = acpi_hw_extended_wake_prep},
+	{.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake),
+	 .extended_function = acpi_hw_extended_wake}
 };
 
 /*
diff -ruNp linux-3.13.11/drivers/acpi/apei/apei-internal.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/acpi/apei/apei-internal.h
--- linux-3.13.11/drivers/acpi/apei/apei-internal.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/acpi/apei/apei-internal.h	2014-07-09
12:00:15.000000000 +0200
@@ -20,7 +20,7 @@ typedef int (*apei_exec_ins_func_t)(stru
 struct apei_exec_ins_type {
 	u32 flags;
 	apei_exec_ins_func_t run;
-};
+} __do_const;
 
 struct apei_exec_context {
 	u32 ip;
diff -ruNp linux-3.13.11/drivers/acpi/apei/ghes.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/acpi/apei/ghes.c
--- linux-3.13.11/drivers/acpi/apei/ghes.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/acpi/apei/ghes.c	2014-07-09
12:00:15.000000000 +0200
@@ -498,7 +498,7 @@ static void __ghes_print_estatus(const c
 				 const struct acpi_hest_generic *generic,
 				 const struct acpi_generic_status *estatus)
 {
-	static atomic_t seqno;
+	static atomic_unchecked_t seqno;
 	unsigned int curr_seqno;
 	char pfx_seq[64];
 
@@ -509,7 +509,7 @@ static void __ghes_print_estatus(const c
 		else
 			pfx = KERN_ERR;
 	}
-	curr_seqno = atomic_inc_return(&seqno);
+	curr_seqno = atomic_inc_return_unchecked(&seqno);
 	snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno);
 	printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
 	       pfx_seq, generic->header.source_id);
diff -ruNp linux-3.13.11/drivers/acpi/bgrt.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/acpi/bgrt.c
--- linux-3.13.11/drivers/acpi/bgrt.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/acpi/bgrt.c	2014-07-09 12:00:15.000000000
+0200
@@ -86,8 +86,10 @@ static int __init bgrt_init(void)
 	if (!bgrt_image)
 		return -ENODEV;
 
-	bin_attr_image.private = bgrt_image;
-	bin_attr_image.size = bgrt_image_size;
+	pax_open_kernel();
+	*(void **)&bin_attr_image.private = bgrt_image;
+	*(size_t *)&bin_attr_image.size = bgrt_image_size;
+	pax_close_kernel();
 
 	bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
 	if (!bgrt_kobj)
diff -ruNp linux-3.13.11/drivers/acpi/blacklist.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/acpi/blacklist.c
--- linux-3.13.11/drivers/acpi/blacklist.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/acpi/blacklist.c	2014-07-09
12:00:15.000000000 +0200
@@ -52,7 +52,7 @@ struct acpi_blacklist_item {
 	u32 is_critical_error;
 };
 
-static struct dmi_system_id acpi_osi_dmi_table[] __initdata;
+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst;
 
 /*
  * POLICY: If *anything* doesn't work, put it on the blacklist.
@@ -164,7 +164,7 @@ static int __init dmi_disable_osi_win8(c
 	return 0;
 }
 
-static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
 	{
 	.callback = dmi_disable_osi_vista,
 	.ident = "Fujitsu Siemens",
diff -ruNp linux-3.13.11/drivers/acpi/custom_method.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/acpi/custom_method.c
--- linux-3.13.11/drivers/acpi/custom_method.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/acpi/custom_method.c	2014-07-09
12:00:15.000000000 +0200
@@ -29,6 +29,10 @@ static ssize_t cm_write(struct file *fil
 	struct acpi_table_header table;
 	acpi_status status;
 
+#ifdef CONFIG_GRKERNSEC_KMEM
+	return -EPERM;
+#endif
+
 	if (!(*ppos)) {
 		/* parse the table header to get the table length */
 		if (count <= sizeof(struct acpi_table_header))
diff -ruNp linux-3.13.11/drivers/acpi/processor_idle.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/acpi/processor_idle.c
--- linux-3.13.11/drivers/acpi/processor_idle.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/acpi/processor_idle.c	2014-07-09
12:00:15.000000000 +0200
@@ -963,7 +963,7 @@ static int acpi_processor_setup_cpuidle_
 {
 	int i, count = CPUIDLE_DRIVER_STATE_START;
 	struct acpi_processor_cx *cx;
-	struct cpuidle_state *state;
+	cpuidle_state_no_const *state;
 	struct cpuidle_driver *drv = &acpi_idle_driver;
 
 	if (!pr->flags.power_setup_done)
diff -ruNp linux-3.13.11/drivers/acpi/sysfs.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/acpi/sysfs.c
--- linux-3.13.11/drivers/acpi/sysfs.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/acpi/sysfs.c	2014-07-09 12:00:15.000000000
+0200
@@ -425,11 +425,11 @@ static u32 num_counters;
 static struct attribute **all_attrs;
 static u32 acpi_gpe_count;
 
-static struct attribute_group interrupt_stats_attr_group = {
+static attribute_group_no_const interrupt_stats_attr_group = {
 	.name = "interrupts",
 };
 
-static struct kobj_attribute *counter_attrs;
+static kobj_attribute_no_const *counter_attrs;
 
 static void delete_gpe_attr_array(void)
 {
diff -ruNp linux-3.13.11/drivers/ata/libahci.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/ata/libahci.c
--- linux-3.13.11/drivers/ata/libahci.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/ata/libahci.c	2014-07-09
12:00:15.000000000 +0200
@@ -1239,7 +1239,7 @@ int ahci_kick_engine(struct ata_port *ap
 }
 EXPORT_SYMBOL_GPL(ahci_kick_engine);
 
-static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
+static int __intentional_overflow(-1) ahci_exec_polled_cmd(struct ata_port *ap, int
pmp,
 				struct ata_taskfile *tf, int is_cmd, u16 flags,
 				unsigned long timeout_msec)
 {
diff -ruNp linux-3.13.11/drivers/ata/libata-core.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/ata/libata-core.c
--- linux-3.13.11/drivers/ata/libata-core.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/ata/libata-core.c	2014-07-09
12:00:15.000000000 +0200
@@ -98,7 +98,7 @@ static unsigned int ata_dev_set_xfermode
 static void ata_dev_xfermask(struct ata_device *dev);
 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
 
-atomic_t ata_print_id = ATOMIC_INIT(0);
+atomic_unchecked_t ata_print_id = ATOMIC_INIT(0);
 
 struct ata_force_param {
 	const char	*name;
@@ -4851,7 +4851,7 @@ void ata_qc_free(struct ata_queued_cmd *
 	struct ata_port *ap;
 	unsigned int tag;
 
-	WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
+	BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
 	ap = qc->ap;
 
 	qc->flags = 0;
@@ -4867,7 +4867,7 @@ void __ata_qc_complete(struct ata_queued
 	struct ata_port *ap;
 	struct ata_link *link;
 
-	WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
+	BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
 	WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
 	ap = qc->ap;
 	link = qc->dev->link;
@@ -5986,6 +5986,7 @@ static void ata_finalize_port_ops(struct
 		return;
 
 	spin_lock(&lock);
+	pax_open_kernel();
 
 	for (cur = ops->inherits; cur; cur = cur->inherits) {
 		void **inherit = (void **)cur;
@@ -5999,8 +6000,9 @@ static void ata_finalize_port_ops(struct
 		if (IS_ERR(*pp))
 			*pp = NULL;
 
-	ops->inherits = NULL;
+	*(struct ata_port_operations **)&ops->inherits = NULL;
 
+	pax_close_kernel();
 	spin_unlock(&lock);
 }
 
@@ -6193,7 +6195,7 @@ int ata_host_register(struct ata_host *h
 
 	/* give ports names and add SCSI hosts */
 	for (i = 0; i < host->n_ports; i++) {
-		host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
+		host->ports[i]->print_id = atomic_inc_return_unchecked(&ata_print_id);
 		host->ports[i]->local_port_no = i + 1;
 	}
 
diff -ruNp linux-3.13.11/drivers/ata/libata-scsi.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/ata/libata-scsi.c
--- linux-3.13.11/drivers/ata/libata-scsi.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/ata/libata-scsi.c	2014-07-09
12:00:15.000000000 +0200
@@ -4147,7 +4147,7 @@ int ata_sas_port_init(struct ata_port *a
 
 	if (rc)
 		return rc;
-	ap->print_id = atomic_inc_return(&ata_print_id);
+	ap->print_id = atomic_inc_return_unchecked(&ata_print_id);
 	return 0;
 }
 EXPORT_SYMBOL_GPL(ata_sas_port_init);
diff -ruNp linux-3.13.11/drivers/ata/libata.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/ata/libata.h
--- linux-3.13.11/drivers/ata/libata.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/ata/libata.h	2014-07-09 12:00:15.000000000
+0200
@@ -53,7 +53,7 @@ enum {
 	ATA_DNXFER_QUIET	= (1 << 31),
 };
 
-extern atomic_t ata_print_id;
+extern atomic_unchecked_t ata_print_id;
 extern int atapi_passthru16;
 extern int libata_fua;
 extern int libata_noacpi;
diff -ruNp linux-3.13.11/drivers/ata/pata_arasan_cf.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/ata/pata_arasan_cf.c
--- linux-3.13.11/drivers/ata/pata_arasan_cf.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/ata/pata_arasan_cf.c	2014-07-09
12:00:15.000000000 +0200
@@ -865,7 +865,9 @@ static int arasan_cf_probe(struct platfo
 	/* Handle platform specific quirks */
 	if (quirk) {
 		if (quirk & CF_BROKEN_PIO) {
-			ap->ops->set_piomode = NULL;
+			pax_open_kernel();
+			*(void **)&ap->ops->set_piomode = NULL;
+			pax_close_kernel();
 			ap->pio_mask = 0;
 		}
 		if (quirk & CF_BROKEN_MWDMA)
diff -ruNp linux-3.13.11/drivers/atm/adummy.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/atm/adummy.c
--- linux-3.13.11/drivers/atm/adummy.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/atm/adummy.c	2014-07-09 12:00:15.000000000
+0200
@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct
 		vcc->pop(vcc, skb);
 	else
 		dev_kfree_skb_any(skb);
-	atomic_inc(&vcc->stats->tx);
+	atomic_inc_unchecked(&vcc->stats->tx);
 
 	return 0;
 }
diff -ruNp linux-3.13.11/drivers/atm/ambassador.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/atm/ambassador.c
--- linux-3.13.11/drivers/atm/ambassador.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/atm/ambassador.c	2014-07-09
12:00:15.000000000 +0200
@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev,
   PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
   
   // VC layer stats
-  atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
+  atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
   
   // free the descriptor
   kfree (tx_descr);
@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev,
 	  dump_skb ("<<<", vc, skb);
 	  
 	  // VC layer stats
-	  atomic_inc(&atm_vcc->stats->rx);
+	  atomic_inc_unchecked(&atm_vcc->stats->rx);
 	  __net_timestamp(skb);
 	  // end of our responsibility
 	  atm_vcc->push (atm_vcc, skb);
@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev,
       } else {
       	PRINTK (KERN_INFO, "dropped over-size frame");
 	// should we count this?
-	atomic_inc(&atm_vcc->stats->rx_drop);
+	atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
       }
       
     } else {
@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * at
   }
   
   if (check_area (skb->data, skb->len)) {
-    atomic_inc(&atm_vcc->stats->tx_err);
+    atomic_inc_unchecked(&atm_vcc->stats->tx_err);
     return -ENOMEM; // ?
   }
   
diff -ruNp linux-3.13.11/drivers/atm/atmtcp.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/atm/atmtcp.c
--- linux-3.13.11/drivers/atm/atmtcp.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/atm/atmtcp.c	2014-07-09 12:00:15.000000000
+0200
@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc
 		if (vcc->pop) vcc->pop(vcc,skb);
 		else dev_kfree_skb(skb);
 		if (dev_data) return 0;
-		atomic_inc(&vcc->stats->tx_err);
+		atomic_inc_unchecked(&vcc->stats->tx_err);
 		return -ENOLINK;
 	}
 	size = skb->len+sizeof(struct atmtcp_hdr);
@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc
 	if (!new_skb) {
 		if (vcc->pop) vcc->pop(vcc,skb);
 		else dev_kfree_skb(skb);
-		atomic_inc(&vcc->stats->tx_err);
+		atomic_inc_unchecked(&vcc->stats->tx_err);
 		return -ENOBUFS;
 	}
 	hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc
 	if (vcc->pop) vcc->pop(vcc,skb);
 	else dev_kfree_skb(skb);
 	out_vcc->push(out_vcc,new_skb);
-	atomic_inc(&vcc->stats->tx);
-	atomic_inc(&out_vcc->stats->rx);
+	atomic_inc_unchecked(&vcc->stats->tx);
+	atomic_inc_unchecked(&out_vcc->stats->rx);
 	return 0;
 }
 
@@ -299,7 +299,7 @@ static int atmtcp_c_send(struct atm_vcc
 	out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
 	read_unlock(&vcc_sklist_lock);
 	if (!out_vcc) {
-		atomic_inc(&vcc->stats->tx_err);
+		atomic_inc_unchecked(&vcc->stats->tx_err);
 		goto done;
 	}
 	skb_pull(skb,sizeof(struct atmtcp_hdr));
@@ -311,8 +311,8 @@ static int atmtcp_c_send(struct atm_vcc
 	__net_timestamp(new_skb);
 	skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
 	out_vcc->push(out_vcc,new_skb);
-	atomic_inc(&vcc->stats->tx);
-	atomic_inc(&out_vcc->stats->rx);
+	atomic_inc_unchecked(&vcc->stats->tx);
+	atomic_inc_unchecked(&out_vcc->stats->rx);
 done:
 	if (vcc->pop) vcc->pop(vcc,skb);
 	else dev_kfree_skb(skb);
diff -ruNp linux-3.13.11/drivers/atm/eni.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/atm/eni.c
--- linux-3.13.11/drivers/atm/eni.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/atm/eni.c	2014-07-09 12:00:15.000000000
+0200
@@ -522,7 +522,7 @@ static int rx_aal0(struct atm_vcc *vcc)
 		DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
 		    vcc->dev->number);
 		length = 0;
-		atomic_inc(&vcc->stats->rx_err);
+		atomic_inc_unchecked(&vcc->stats->rx_err);
 	}
 	else {
 		length = ATM_CELL_SIZE-1; /* no HEC */
@@ -577,7 +577,7 @@ static int rx_aal5(struct atm_vcc *vcc)
 			    size);
 		}
 		eff = length = 0;
-		atomic_inc(&vcc->stats->rx_err);
+		atomic_inc_unchecked(&vcc->stats->rx_err);
 	}
 	else {
 		size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
@@ -594,7 +594,7 @@ static int rx_aal5(struct atm_vcc *vcc)
 			    "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
 			    vcc->dev->number,vcc->vci,length,size << 2,descr);
 			length = eff = 0;
-			atomic_inc(&vcc->stats->rx_err);
+			atomic_inc_unchecked(&vcc->stats->rx_err);
 		}
 	}
 	skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
@@ -767,7 +767,7 @@ rx_dequeued++;
 			vcc->push(vcc,skb);
 			pushed++;
 		}
-		atomic_inc(&vcc->stats->rx);
+		atomic_inc_unchecked(&vcc->stats->rx);
 	}
 	wake_up(&eni_dev->rx_wait);
 }
@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *d
 		    PCI_DMA_TODEVICE);
 		if (vcc->pop) vcc->pop(vcc,skb);
 		else dev_kfree_skb_irq(skb);
-		atomic_inc(&vcc->stats->tx);
+		atomic_inc_unchecked(&vcc->stats->tx);
 		wake_up(&eni_dev->tx_wait);
 dma_complete++;
 	}
diff -ruNp linux-3.13.11/drivers/atm/firestream.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/atm/firestream.c
--- linux-3.13.11/drivers/atm/firestream.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/atm/firestream.c	2014-07-09
12:00:15.000000000 +0200
@@ -749,7 +749,7 @@ static void process_txdone_queue (struct
 				}
 			}
 
-			atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
+			atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
 
 			fs_dprintk (FS_DEBUG_TXMEM, "i");
 			fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
@@ -816,7 +816,7 @@ static void process_incoming (struct fs_
 #endif
 				skb_put (skb, qe->p1 & 0xffff); 
 				ATM_SKB(skb)->vcc = atm_vcc;
-				atomic_inc(&atm_vcc->stats->rx);
+				atomic_inc_unchecked(&atm_vcc->stats->rx);
 				__net_timestamp(skb);
 				fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
 				atm_vcc->push (atm_vcc, skb);
@@ -837,12 +837,12 @@ static void process_incoming (struct fs_
 				kfree (pe);
 			}
 			if (atm_vcc)
-				atomic_inc(&atm_vcc->stats->rx_drop);
+				atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
 			break;
 		case 0x1f: /*  Reassembly abort: no buffers. */
 			/* Silently increment error counter. */
 			if (atm_vcc)
-				atomic_inc(&atm_vcc->stats->rx_drop);
+				atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
 			break;
 		default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
 			printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n", 
diff -ruNp linux-3.13.11/drivers/atm/fore200e.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/atm/fore200e.c
--- linux-3.13.11/drivers/atm/fore200e.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/atm/fore200e.c	2014-07-09
12:00:15.000000000 +0200
@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200
 #endif
 		/* check error condition */
 		if (*entry->status & STATUS_ERROR)
-		    atomic_inc(&vcc->stats->tx_err);
+		    atomic_inc_unchecked(&vcc->stats->tx_err);
 		else
-		    atomic_inc(&vcc->stats->tx);
+		    atomic_inc_unchecked(&vcc->stats->tx);
 	    }
 	}
 
@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore2
     if (skb == NULL) {
 	DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
 
-	atomic_inc(&vcc->stats->rx_drop);
+	atomic_inc_unchecked(&vcc->stats->rx_drop);
 	return -ENOMEM;
     } 
 
@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore2
 
 	dev_kfree_skb_any(skb);
 
-	atomic_inc(&vcc->stats->rx_drop);
+	atomic_inc_unchecked(&vcc->stats->rx_drop);
 	return -ENOMEM;
     }
 
     ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
 
     vcc->push(vcc, skb);
-    atomic_inc(&vcc->stats->rx);
+    atomic_inc_unchecked(&vcc->stats->rx);
 
     ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
 
@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200
 		DPRINTK(2, "damaged PDU on %d.%d.%d\n",
 			fore200e->atm_dev->number,
 			entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
-		atomic_inc(&vcc->stats->rx_err);
+		atomic_inc_unchecked(&vcc->stats->rx_err);
 	    }
 	}
 
@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struc
 		goto retry_here;
 	    }
 
-	    atomic_inc(&vcc->stats->tx_err);
+	    atomic_inc_unchecked(&vcc->stats->tx_err);
 
 	    fore200e->tx_sat++;
 	    DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
diff -ruNp linux-3.13.11/drivers/atm/he.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/atm/he.c
--- linux-3.13.11/drivers/atm/he.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/atm/he.c	2014-07-09 12:00:15.000000000
+0200
@@ -1691,7 +1691,7 @@ he_service_rbrq(struct he_dev *he_dev, i
 
 		if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
 			hprintk("HBUF_ERR!  (cid 0x%x)\n", cid);
-				atomic_inc(&vcc->stats->rx_drop);
+				atomic_inc_unchecked(&vcc->stats->rx_drop);
 			goto return_host_buffers;
 		}
 
@@ -1718,7 +1718,7 @@ he_service_rbrq(struct he_dev *he_dev, i
 				RBRQ_LEN_ERR(he_dev->rbrq_head)
 							? "LEN_ERR" : "",
 							vcc->vpi, vcc->vci);
-			atomic_inc(&vcc->stats->rx_err);
+			atomic_inc_unchecked(&vcc->stats->rx_err);
 			goto return_host_buffers;
 		}
 
@@ -1770,7 +1770,7 @@ he_service_rbrq(struct he_dev *he_dev, i
 		vcc->push(vcc, skb);
 		spin_lock(&he_dev->global_lock);
 
-		atomic_inc(&vcc->stats->rx);
+		atomic_inc_unchecked(&vcc->stats->rx);
 
 return_host_buffers:
 		++pdus_assembled;
@@ -2096,7 +2096,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
 					tpd->vcc->pop(tpd->vcc, tpd->skb);
 				else
 					dev_kfree_skb_any(tpd->skb);
-				atomic_inc(&tpd->vcc->stats->tx_err);
+				atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
 			}
 			pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
 			return;
@@ -2508,7 +2508,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
 			vcc->pop(vcc, skb);
 		else
 			dev_kfree_skb_any(skb);
-		atomic_inc(&vcc->stats->tx_err);
+		atomic_inc_unchecked(&vcc->stats->tx_err);
 		return -EINVAL;
 	}
 
@@ -2519,7 +2519,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
 			vcc->pop(vcc, skb);
 		else
 			dev_kfree_skb_any(skb);
-		atomic_inc(&vcc->stats->tx_err);
+		atomic_inc_unchecked(&vcc->stats->tx_err);
 		return -EINVAL;
 	}
 #endif
@@ -2531,7 +2531,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
 			vcc->pop(vcc, skb);
 		else
 			dev_kfree_skb_any(skb);
-		atomic_inc(&vcc->stats->tx_err);
+		atomic_inc_unchecked(&vcc->stats->tx_err);
 		spin_unlock_irqrestore(&he_dev->global_lock, flags);
 		return -ENOMEM;
 	}
@@ -2573,7 +2573,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
 					vcc->pop(vcc, skb);
 				else
 					dev_kfree_skb_any(skb);
-				atomic_inc(&vcc->stats->tx_err);
+				atomic_inc_unchecked(&vcc->stats->tx_err);
 				spin_unlock_irqrestore(&he_dev->global_lock, flags);
 				return -ENOMEM;
 			}
@@ -2604,7 +2604,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
 	__enqueue_tpd(he_dev, tpd, cid);
 	spin_unlock_irqrestore(&he_dev->global_lock, flags);
 
-	atomic_inc(&vcc->stats->tx);
+	atomic_inc_unchecked(&vcc->stats->tx);
 
 	return 0;
 }
diff -ruNp linux-3.13.11/drivers/atm/horizon.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/atm/horizon.c
--- linux-3.13.11/drivers/atm/horizon.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/atm/horizon.c	2014-07-09
12:00:15.000000000 +0200
@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev,
 	{
 	  struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
 	  // VC layer stats
-	  atomic_inc(&vcc->stats->rx);
+	  atomic_inc_unchecked(&vcc->stats->rx);
 	  __net_timestamp(skb);
 	  // end of our responsibility
 	  vcc->push (vcc, skb);
@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const
 	dev->tx_iovec = NULL;
 	
 	// VC layer stats
-	atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
+	atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
 	
 	// free the skb
 	hrz_kfree_skb (skb);
diff -ruNp linux-3.13.11/drivers/atm/idt77252.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/atm/idt77252.c
--- linux-3.13.11/drivers/atm/idt77252.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/atm/idt77252.c	2014-07-09
12:00:15.000000000 +0200
@@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, str
 		else
 			dev_kfree_skb(skb);
 
-		atomic_inc(&vcc->stats->tx);
+		atomic_inc_unchecked(&vcc->stats->tx);
 	}
 
 	atomic_dec(&scq->used);
@@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, st
 			if ((sb = dev_alloc_skb(64)) == NULL) {
 				printk("%s: Can't allocate buffers for aal0.\n",
 				       card->name);
-				atomic_add(i, &vcc->stats->rx_drop);
+				atomic_add_unchecked(i, &vcc->stats->rx_drop);
 				break;
 			}
 			if (!atm_charge(vcc, sb->truesize)) {
 				RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
 					 card->name);
-				atomic_add(i - 1, &vcc->stats->rx_drop);
+				atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
 				dev_kfree_skb(sb);
 				break;
 			}
@@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, st
 			ATM_SKB(sb)->vcc = vcc;
 			__net_timestamp(sb);
 			vcc->push(vcc, sb);
-			atomic_inc(&vcc->stats->rx);
+			atomic_inc_unchecked(&vcc->stats->rx);
 
 			cell += ATM_CELL_PAYLOAD;
 		}
@@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, st
 			         "(CDC: %08x)\n",
 			         card->name, len, rpp->len, readl(SAR_REG_CDC));
 			recycle_rx_pool_skb(card, rpp);
-			atomic_inc(&vcc->stats->rx_err);
+			atomic_inc_unchecked(&vcc->stats->rx_err);
 			return;
 		}
 		if (stat & SAR_RSQE_CRC) {
 			RXPRINTK("%s: AAL5 CRC error.\n", card->name);
 			recycle_rx_pool_skb(card, rpp);
-			atomic_inc(&vcc->stats->rx_err);
+			atomic_inc_unchecked(&vcc->stats->rx_err);
 			return;
 		}
 		if (skb_queue_len(&rpp->queue) > 1) {
@@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, st
 				RXPRINTK("%s: Can't alloc RX skb.\n",
 					 card->name);
 				recycle_rx_pool_skb(card, rpp);
-				atomic_inc(&vcc->stats->rx_err);
+				atomic_inc_unchecked(&vcc->stats->rx_err);
 				return;
 			}
 			if (!atm_charge(vcc, skb->truesize)) {
@@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, st
 			__net_timestamp(skb);
 
 			vcc->push(vcc, skb);
-			atomic_inc(&vcc->stats->rx);
+			atomic_inc_unchecked(&vcc->stats->rx);
 
 			return;
 		}
@@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, st
 		__net_timestamp(skb);
 
 		vcc->push(vcc, skb);
-		atomic_inc(&vcc->stats->rx);
+		atomic_inc_unchecked(&vcc->stats->rx);
 
 		if (skb->truesize > SAR_FB_SIZE_3)
 			add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *car
 		if (vcc->qos.aal != ATM_AAL0) {
 			RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
 				card->name, vpi, vci);
-			atomic_inc(&vcc->stats->rx_drop);
+			atomic_inc_unchecked(&vcc->stats->rx_drop);
 			goto drop;
 		}
 	
 		if ((sb = dev_alloc_skb(64)) == NULL) {
 			printk("%s: Can't allocate buffers for AAL0.\n",
 			       card->name);
-			atomic_inc(&vcc->stats->rx_err);
+			atomic_inc_unchecked(&vcc->stats->rx_err);
 			goto drop;
 		}
 
@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *car
 		ATM_SKB(sb)->vcc = vcc;
 		__net_timestamp(sb);
 		vcc->push(vcc, sb);
-		atomic_inc(&vcc->stats->rx);
+		atomic_inc_unchecked(&vcc->stats->rx);
 
 drop:
 		skb_pull(queue, 64);
@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
 
 	if (vc == NULL) {
 		printk("%s: NULL connection in send().\n", card->name);
-		atomic_inc(&vcc->stats->tx_err);
+		atomic_inc_unchecked(&vcc->stats->tx_err);
 		dev_kfree_skb(skb);
 		return -EINVAL;
 	}
 	if (!test_bit(VCF_TX, &vc->flags)) {
 		printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
-		atomic_inc(&vcc->stats->tx_err);
+		atomic_inc_unchecked(&vcc->stats->tx_err);
 		dev_kfree_skb(skb);
 		return -EINVAL;
 	}
@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
 		break;
 	default:
 		printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
-		atomic_inc(&vcc->stats->tx_err);
+		atomic_inc_unchecked(&vcc->stats->tx_err);
 		dev_kfree_skb(skb);
 		return -EINVAL;
 	}
 
 	if (skb_shinfo(skb)->nr_frags != 0) {
 		printk("%s: No scatter-gather yet.\n", card->name);
-		atomic_inc(&vcc->stats->tx_err);
+		atomic_inc_unchecked(&vcc->stats->tx_err);
 		dev_kfree_skb(skb);
 		return -EINVAL;
 	}
@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
 
 	err = queue_skb(card, vc, skb, oam);
 	if (err) {
-		atomic_inc(&vcc->stats->tx_err);
+		atomic_inc_unchecked(&vcc->stats->tx_err);
 		dev_kfree_skb(skb);
 		return err;
 	}
@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
 	skb = dev_alloc_skb(64);
 	if (!skb) {
 		printk("%s: Out of memory in send_oam().\n", card->name);
-		atomic_inc(&vcc->stats->tx_err);
+		atomic_inc_unchecked(&vcc->stats->tx_err);
 		return -ENOMEM;
 	}
 	atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
diff -ruNp linux-3.13.11/drivers/atm/iphase.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/atm/iphase.c
--- linux-3.13.11/drivers/atm/iphase.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/atm/iphase.c	2014-07-09 12:00:15.000000000
+0200
@@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
 	status = (u_short) (buf_desc_ptr->desc_mode);  
 	if (status & (RX_CER | RX_PTE | RX_OFL))  
 	{  
-                atomic_inc(&vcc->stats->rx_err);
+                atomic_inc_unchecked(&vcc->stats->rx_err);
 		IF_ERR(printk("IA: bad packet, dropping it");)  
                 if (status & RX_CER) { 
                     IF_ERR(printk(" cause: packet CRC error\n");)
@@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
 	len = dma_addr - buf_addr;  
         if (len > iadev->rx_buf_sz) {
            printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
-           atomic_inc(&vcc->stats->rx_err);
+           atomic_inc_unchecked(&vcc->stats->rx_err);
 	   goto out_free_desc;
         }
 		  
@@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *
           ia_vcc = INPH_IA_VCC(vcc);
           if (ia_vcc == NULL)
           {
-             atomic_inc(&vcc->stats->rx_err);
+             atomic_inc_unchecked(&vcc->stats->rx_err);
              atm_return(vcc, skb->truesize);
              dev_kfree_skb_any(skb);
              goto INCR_DLE;
@@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *
           if ((length > iadev->rx_buf_sz) || (length > 
                               (skb->len - sizeof(struct cpcs_trailer))))
           {
-             atomic_inc(&vcc->stats->rx_err);
+             atomic_inc_unchecked(&vcc->stats->rx_err);
              IF_ERR(printk("rx_dle_intr: Bad  AAL5 trailer %d (skb len %d)", 
                                                             length, skb->len);)
              atm_return(vcc, skb->truesize);
@@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *
 
 	  IF_RX(printk("rx_dle_intr: skb push");)  
 	  vcc->push(vcc,skb);  
-	  atomic_inc(&vcc->stats->rx);
+	  atomic_inc_unchecked(&vcc->stats->rx);
           iadev->rx_pkt_cnt++;
       }  
 INCR_DLE:
@@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev,
          {
              struct k_sonet_stats *stats;
              stats = &PRIV(_ia_dev[board])->sonet_stats;
-             printk("section_bip: %d\n", atomic_read(&stats->section_bip));
-             printk("line_bip   : %d\n", atomic_read(&stats->line_bip));
-             printk("path_bip   : %d\n", atomic_read(&stats->path_bip));
-             printk("line_febe  : %d\n", atomic_read(&stats->line_febe));
-             printk("path_febe  : %d\n", atomic_read(&stats->path_febe));
-             printk("corr_hcs   : %d\n", atomic_read(&stats->corr_hcs));
-             printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
-             printk("tx_cells   : %d\n", atomic_read(&stats->tx_cells));
-             printk("rx_cells   : %d\n", atomic_read(&stats->rx_cells));
+             printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
+             printk("line_bip   : %d\n", atomic_read_unchecked(&stats->line_bip));
+             printk("path_bip   : %d\n", atomic_read_unchecked(&stats->path_bip));
+             printk("line_febe  : %d\n", atomic_read_unchecked(&stats->line_febe));
+             printk("path_febe  : %d\n", atomic_read_unchecked(&stats->path_febe));
+             printk("corr_hcs   : %d\n", atomic_read_unchecked(&stats->corr_hcs));
+             printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
+             printk("tx_cells   : %d\n", atomic_read_unchecked(&stats->tx_cells));
+             printk("rx_cells   : %d\n", atomic_read_unchecked(&stats->rx_cells));
          }
             ia_cmds.status = 0;
             break;
@@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
 	if ((desc == 0) || (desc > iadev->num_tx_desc))  
 	{  
 		IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);) 
-                atomic_inc(&vcc->stats->tx);
+                atomic_inc_unchecked(&vcc->stats->tx);
 		if (vcc->pop)   
 		    vcc->pop(vcc, skb);   
 		else  
@@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vc
         ATM_DESC(skb) = vcc->vci;
         skb_queue_tail(&iadev->tx_dma_q, skb);
 
-        atomic_inc(&vcc->stats->tx);
+        atomic_inc_unchecked(&vcc->stats->tx);
         iadev->tx_pkt_cnt++;
 	/* Increment transaction counter */  
 	writel(2, iadev->dma+IPHASE5575_TX_COUNTER);  
         
 #if 0        
         /* add flow control logic */ 
-        if (atomic_read(&vcc->stats->tx) % 20 == 0) {
+        if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
           if (iavcc->vc_desc_cnt > 10) {
              vcc->tx_quota =  vcc->tx_quota * 3 / 4;
             printk("Tx1:  vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
diff -ruNp linux-3.13.11/drivers/atm/lanai.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/atm/lanai.c
--- linux-3.13.11/drivers/atm/lanai.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/atm/lanai.c	2014-07-09 12:00:15.000000000
+0200
@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct l
 	vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
 	lanai_endtx(lanai, lvcc);
 	lanai_free_skb(lvcc->tx.atmvcc, skb);
-	atomic_inc(&lvcc->tx.atmvcc->stats->tx);
+	atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
 }
 
 /* Try to fill the buffer - don't call unless there is backlog */
@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc
 	ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
 	__net_timestamp(skb);
 	lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
-	atomic_inc(&lvcc->rx.atmvcc->stats->rx);
+	atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
     out:
 	lvcc->rx.buf.ptr = end;
 	cardvcc_write(lvcc, endptr, vcc_rxreadptr);
@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_d
 		DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
 		    "vcc %d\n", lanai->number, (unsigned int) s, vci);
 		lanai->stats.service_rxnotaal5++;
-		atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
+		atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
 		return 0;
 	}
 	if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_d
 		int bytes;
 		read_unlock(&vcc_sklist_lock);
 		DPRINTK("got trashed rx pdu on vci %d\n", vci);
-		atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
+		atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
 		lvcc->stats.x.aal5.service_trash++;
 		bytes = (SERVICE_GET_END(s) * 16) -
 		    (((unsigned long) lvcc->rx.buf.ptr) -
@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_d
 	}
 	if (s & SERVICE_STREAM) {
 		read_unlock(&vcc_sklist_lock);
-		atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
+		atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
 		lvcc->stats.x.aal5.service_stream++;
 		printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
 		    "PDU on VCI %d!\n", lanai->number, vci);
@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_d
 		return 0;
 	}
 	DPRINTK("got rx crc error on vci %d\n", vci);
-	atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
+	atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
 	lvcc->stats.x.aal5.service_rxcrc++;
 	lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
 	cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
diff -ruNp linux-3.13.11/drivers/atm/nicstar.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/atm/nicstar.c
--- linux-3.13.11/drivers/atm/nicstar.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/atm/nicstar.c	2014-07-09
12:00:15.000000000 +0200
@@ -1640,7 +1640,7 @@ static int ns_send(struct atm_vcc *vcc,
 	if ((vc = (vc_map *) vcc->dev_data) == NULL) {
 		printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
 		       card->index);
-		atomic_inc(&vcc->stats->tx_err);
+		atomic_inc_unchecked(&vcc->stats->tx_err);
 		dev_kfree_skb_any(skb);
 		return -EINVAL;
 	}
@@ -1648,7 +1648,7 @@ static int ns_send(struct atm_vcc *vcc,
 	if (!vc->tx) {
 		printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
 		       card->index);
-		atomic_inc(&vcc->stats->tx_err);
+		atomic_inc_unchecked(&vcc->stats->tx_err);
 		dev_kfree_skb_any(skb);
 		return -EINVAL;
 	}
@@ -1656,14 +1656,14 @@ static int ns_send(struct atm_vcc *vcc,
 	if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
 		printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
 		       card->index);
-		atomic_inc(&vcc->stats->tx_err);
+		atomic_inc_unchecked(&vcc->stats->tx_err);
 		dev_kfree_skb_any(skb);
 		return -EINVAL;
 	}
 
 	if (skb_shinfo(skb)->nr_frags != 0) {
 		printk("nicstar%d: No scatter-gather yet.\n", card->index);
-		atomic_inc(&vcc->stats->tx_err);
+		atomic_inc_unchecked(&vcc->stats->tx_err);
 		dev_kfree_skb_any(skb);
 		return -EINVAL;
 	}
@@ -1711,11 +1711,11 @@ static int ns_send(struct atm_vcc *vcc,
 	}
 
 	if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
-		atomic_inc(&vcc->stats->tx_err);
+		atomic_inc_unchecked(&vcc->stats->tx_err);
 		dev_kfree_skb_any(skb);
 		return -EIO;
 	}
-	atomic_inc(&vcc->stats->tx);
+	atomic_inc_unchecked(&vcc->stats->tx);
 
 	return 0;
 }
@@ -2032,14 +2032,14 @@ static void dequeue_rx(ns_dev * card, ns
 				printk
 				    ("nicstar%d: Can't allocate buffers for aal0.\n",
 				     card->index);
-				atomic_add(i, &vcc->stats->rx_drop);
+				atomic_add_unchecked(i, &vcc->stats->rx_drop);
 				break;
 			}
 			if (!atm_charge(vcc, sb->truesize)) {
 				RXPRINTK
 				    ("nicstar%d: atm_charge() dropped aal0 packets.\n",
 				     card->index);
-				atomic_add(i - 1, &vcc->stats->rx_drop);	/* already increased by 1 */
+				atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);	/* already increased by 1 */
 				dev_kfree_skb_any(sb);
 				break;
 			}
@@ -2054,7 +2054,7 @@ static void dequeue_rx(ns_dev * card, ns
 			ATM_SKB(sb)->vcc = vcc;
 			__net_timestamp(sb);
 			vcc->push(vcc, sb);
-			atomic_inc(&vcc->stats->rx);
+			atomic_inc_unchecked(&vcc->stats->rx);
 			cell += ATM_CELL_PAYLOAD;
 		}
 
@@ -2071,7 +2071,7 @@ static void dequeue_rx(ns_dev * card, ns
 			if (iovb == NULL) {
 				printk("nicstar%d: Out of iovec buffers.\n",
 				       card->index);
-				atomic_inc(&vcc->stats->rx_drop);
+				atomic_inc_unchecked(&vcc->stats->rx_drop);
 				recycle_rx_buf(card, skb);
 				return;
 			}
@@ -2095,7 +2095,7 @@ static void dequeue_rx(ns_dev * card, ns
 		   small or large buffer itself. */
 	} else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
 		printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
-		atomic_inc(&vcc->stats->rx_err);
+		atomic_inc_unchecked(&vcc->stats->rx_err);
 		recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
 				      NS_MAX_IOVECS);
 		NS_PRV_IOVCNT(iovb) = 0;
@@ -2115,7 +2115,7 @@ static void dequeue_rx(ns_dev * card, ns
 			    ("nicstar%d: Expected a small buffer, and this is not one.\n",
 			     card->index);
 			which_list(card, skb);
-			atomic_inc(&vcc->stats->rx_err);
+			atomic_inc_unchecked(&vcc->stats->rx_err);
 			recycle_rx_buf(card, skb);
 			vc->rx_iov = NULL;
 			recycle_iov_buf(card, iovb);
@@ -2128,7 +2128,7 @@ static void dequeue_rx(ns_dev * card, ns
 			    ("nicstar%d: Expected a large buffer, and this is not one.\n",
 			     card->index);
 			which_list(card, skb);
-			atomic_inc(&vcc->stats->rx_err);
+			atomic_inc_unchecked(&vcc->stats->rx_err);
 			recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
 					      NS_PRV_IOVCNT(iovb));
 			vc->rx_iov = NULL;
@@ -2151,7 +2151,7 @@ static void dequeue_rx(ns_dev * card, ns
 				printk(" - PDU size mismatch.\n");
 			else
 				printk(".\n");
-			atomic_inc(&vcc->stats->rx_err);
+			atomic_inc_unchecked(&vcc->stats->rx_err);
 			recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
 					      NS_PRV_IOVCNT(iovb));
 			vc->rx_iov = NULL;
@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns
 			/* skb points to a small buffer */
 			if (!atm_charge(vcc, skb->truesize)) {
 				push_rxbufs(card, skb);
-				atomic_inc(&vcc->stats->rx_drop);
+				atomic_inc_unchecked(&vcc->stats->rx_drop);
 			} else {
 				skb_put(skb, len);
 				dequeue_sm_buf(card, skb);
@@ -2175,7 +2175,7 @@ static void dequeue_rx(ns_dev * card, ns
 				ATM_SKB(skb)->vcc = vcc;
 				__net_timestamp(skb);
 				vcc->push(vcc, skb);
-				atomic_inc(&vcc->stats->rx);
+				atomic_inc_unchecked(&vcc->stats->rx);
 			}
 		} else if (NS_PRV_IOVCNT(iovb) == 2) {	/* One small plus one large buffer */
 			struct sk_buff *sb;
@@ -2186,7 +2186,7 @@ static void dequeue_rx(ns_dev * card, ns
 			if (len <= NS_SMBUFSIZE) {
 				if (!atm_charge(vcc, sb->truesize)) {
 					push_rxbufs(card, sb);
-					atomic_inc(&vcc->stats->rx_drop);
+					atomic_inc_unchecked(&vcc->stats->rx_drop);
 				} else {
 					skb_put(sb, len);
 					dequeue_sm_buf(card, sb);
@@ -2196,7 +2196,7 @@ static void dequeue_rx(ns_dev * card, ns
 					ATM_SKB(sb)->vcc = vcc;
 					__net_timestamp(sb);
 					vcc->push(vcc, sb);
-					atomic_inc(&vcc->stats->rx);
+					atomic_inc_unchecked(&vcc->stats->rx);
 				}
 
 				push_rxbufs(card, skb);
@@ -2205,7 +2205,7 @@ static void dequeue_rx(ns_dev * card, ns
 
 				if (!atm_charge(vcc, skb->truesize)) {
 					push_rxbufs(card, skb);
-					atomic_inc(&vcc->stats->rx_drop);
+					atomic_inc_unchecked(&vcc->stats->rx_drop);
 				} else {
 					dequeue_lg_buf(card, skb);
 #ifdef NS_USE_DESTRUCTORS
@@ -2218,7 +2218,7 @@ static void dequeue_rx(ns_dev * card, ns
 					ATM_SKB(skb)->vcc = vcc;
 					__net_timestamp(skb);
 					vcc->push(vcc, skb);
-					atomic_inc(&vcc->stats->rx);
+					atomic_inc_unchecked(&vcc->stats->rx);
 				}
 
 				push_rxbufs(card, sb);
@@ -2239,7 +2239,7 @@ static void dequeue_rx(ns_dev * card, ns
 					printk
 					    ("nicstar%d: Out of huge buffers.\n",
 					     card->index);
-					atomic_inc(&vcc->stats->rx_drop);
+					atomic_inc_unchecked(&vcc->stats->rx_drop);
 					recycle_iovec_rx_bufs(card,
 							      (struct iovec *)
 							      iovb->data,
@@ -2290,7 +2290,7 @@ static void dequeue_rx(ns_dev * card, ns
 					card->hbpool.count++;
 				} else
 					dev_kfree_skb_any(hb);
-				atomic_inc(&vcc->stats->rx_drop);
+				atomic_inc_unchecked(&vcc->stats->rx_drop);
 			} else {
 				/* Copy the small buffer to the huge buffer */
 				sb = (struct sk_buff *)iov->iov_base;
@@ -2327,7 +2327,7 @@ static void dequeue_rx(ns_dev * card, ns
 #endif /* NS_USE_DESTRUCTORS */
 				__net_timestamp(hb);
 				vcc->push(vcc, hb);
-				atomic_inc(&vcc->stats->rx);
+				atomic_inc_unchecked(&vcc->stats->rx);
 			}
 		}
 
diff -ruNp linux-3.13.11/drivers/atm/solos-pci.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/atm/solos-pci.c
--- linux-3.13.11/drivers/atm/solos-pci.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/atm/solos-pci.c	2014-07-09
12:00:15.000000000 +0200
@@ -838,7 +838,7 @@ void solos_bh(unsigned long card_arg)
 				}
 				atm_charge(vcc, skb->truesize);
 				vcc->push(vcc, skb);
-				atomic_inc(&vcc->stats->rx);
+				atomic_inc_unchecked(&vcc->stats->rx);
 				break;
 
 			case PKT_STATUS:
@@ -1116,7 +1116,7 @@ static uint32_t fpga_tx(struct solos_car
 			vcc = SKB_CB(oldskb)->vcc;
 
 			if (vcc) {
-				atomic_inc(&vcc->stats->tx);
+				atomic_inc_unchecked(&vcc->stats->tx);
 				solos_pop(vcc, oldskb);
 			} else {
 				dev_kfree_skb_irq(oldskb);
diff -ruNp linux-3.13.11/drivers/atm/suni.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/atm/suni.c
--- linux-3.13.11/drivers/atm/suni.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/atm/suni.c	2014-07-09 12:00:15.000000000
+0200
@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
 
 
 #define ADD_LIMITED(s,v) \
-    atomic_add((v),&stats->s); \
-    if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
+    atomic_add_unchecked((v),&stats->s); \
+    if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
 
 
 static void suni_hz(unsigned long from_timer)
diff -ruNp linux-3.13.11/drivers/atm/uPD98402.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/atm/uPD98402.c
--- linux-3.13.11/drivers/atm/uPD98402.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/atm/uPD98402.c	2014-07-09
12:00:15.000000000 +0200
@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *d
 	struct sonet_stats tmp;
  	int error = 0;
 
-	atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
+	atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
 	sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
 	if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
 	if (zero && !error) {
@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev
 
 
 #define ADD_LIMITED(s,v) \
-    { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
-    if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
-	atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
+    { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
+    if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
+	atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
 
 
 static void stat_event(struct atm_dev *dev)
@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev
 		if (reason & uPD98402_INT_PFM) stat_event(dev);
 		if (reason & uPD98402_INT_PCO) {
 			(void) GET(PCOCR); /* clear interrupt cause */
-			atomic_add(GET(HECCT),
+			atomic_add_unchecked(GET(HECCT),
 			    &PRIV(dev)->sonet_stats.uncorr_hcs);
 		}
 		if ((reason & uPD98402_INT_RFO) && 
@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev
 	PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
 	  uPD98402_INT_LOS),PIMR); /* enable them */
 	(void) fetch_stats(dev,NULL,1); /* clear kernel counters */
-	atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
-	atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
-	atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
+	atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
+	atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
+	atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
 	return 0;
 }
 
diff -ruNp linux-3.13.11/drivers/atm/zatm.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/atm/zatm.c
--- linux-3.13.11/drivers/atm/zatm.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/atm/zatm.c	2014-07-09 12:00:15.000000000
+0200
@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
 		}
 		if (!size) {
 			dev_kfree_skb_irq(skb);
-			if (vcc) atomic_inc(&vcc->stats->rx_err);
+			if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
 			continue;
 		}
 		if (!atm_charge(vcc,skb->truesize)) {
@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
 		skb->len = size;
 		ATM_SKB(skb)->vcc = vcc;
 		vcc->push(vcc,skb);
-		atomic_inc(&vcc->stats->rx);
+		atomic_inc_unchecked(&vcc->stats->rx);
 	}
 	zout(pos & 0xffff,MTA(mbx));
 #if 0 /* probably a stupid idea */
@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
 			skb_queue_head(&zatm_vcc->backlog,skb);
 			break;
 		}
-	atomic_inc(&vcc->stats->tx);
+	atomic_inc_unchecked(&vcc->stats->tx);
 	wake_up(&zatm_vcc->tx_wait);
 }
 
diff -ruNp linux-3.13.11/drivers/base/bus.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/base/bus.c
--- linux-3.13.11/drivers/base/bus.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/base/bus.c	2014-07-09 12:00:15.000000000
+0200
@@ -1115,7 +1115,7 @@ int subsys_interface_register(struct sub
 		return -EINVAL;
 
 	mutex_lock(&subsys->p->mutex);
-	list_add_tail(&sif->node, &subsys->p->interfaces);
+	pax_list_add_tail((struct list_head *)&sif->node, &subsys->p->interfaces);
 	if (sif->add_dev) {
 		subsys_dev_iter_init(&iter, subsys, NULL, NULL);
 		while ((dev = subsys_dev_iter_next(&iter)))
@@ -1140,7 +1140,7 @@ void subsys_interface_unregister(struct
 	subsys = sif->subsys;
 
 	mutex_lock(&subsys->p->mutex);
-	list_del_init(&sif->node);
+	pax_list_del_init((struct list_head *)&sif->node);
 	if (sif->remove_dev) {
 		subsys_dev_iter_init(&iter, subsys, NULL, NULL);
 		while ((dev = subsys_dev_iter_next(&iter)))
diff -ruNp linux-3.13.11/drivers/base/devtmpfs.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/base/devtmpfs.c
--- linux-3.13.11/drivers/base/devtmpfs.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/base/devtmpfs.c	2014-07-09
12:00:15.000000000 +0200
@@ -354,7 +354,7 @@ int devtmpfs_mount(const char *mntdir)
 	if (!thread)
 		return 0;
 
-	err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
+	err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char
__force_user *)"devtmpfs", MS_SILENT, NULL);
 	if (err)
 		printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
 	else
@@ -380,11 +380,11 @@ static int devtmpfsd(void *p)
 	*err = sys_unshare(CLONE_NEWNS);
 	if (*err)
 		goto out;
-	*err = sys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, options);
+	*err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)"/", (char
__force_user *)"devtmpfs", MS_SILENT, (char __force_user *)options);
 	if (*err)
 		goto out;
-	sys_chdir("/.."); /* will traverse into overmounted root */
-	sys_chroot(".");
+	sys_chdir((char __force_user *)"/.."); /* will traverse into overmounted root */
+	sys_chroot((char __force_user *)".");
 	complete(&setup_done);
 	while (1) {
 		spin_lock(&req_lock);
diff -ruNp linux-3.13.11/drivers/base/node.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/base/node.c
--- linux-3.13.11/drivers/base/node.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/base/node.c	2014-07-09 12:00:15.000000000
+0200
@@ -620,7 +620,7 @@ static ssize_t print_nodes_state(enum no
 struct node_attr {
 	struct device_attribute attr;
 	enum node_states state;
-};
+} __do_const;
 
 static ssize_t show_node_state(struct device *dev,
 			       struct device_attribute *attr, char *buf)
diff -ruNp linux-3.13.11/drivers/base/power/domain.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/base/power/domain.c
--- linux-3.13.11/drivers/base/power/domain.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/base/power/domain.c	2014-07-09
12:00:15.000000000 +0200
@@ -1809,9 +1809,9 @@ int __pm_genpd_remove_callbacks(struct d
 
 	if (dev->power.subsys_data->domain_data) {
 		gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
-		gpd_data->ops = (struct gpd_dev_ops){ NULL };
+		memset(&gpd_data->ops, 0, sizeof(gpd_data->ops));
 		if (clear_td)
-			gpd_data->td = (struct gpd_timing_data){ 0 };
+			memset(&gpd_data->td, 0, sizeof(gpd_data->td));
 
 		if (--gpd_data->refcount == 0) {
 			dev->power.subsys_data->domain_data = NULL;
@@ -1850,7 +1850,7 @@ int pm_genpd_attach_cpuidle(struct gener
 {
 	struct cpuidle_driver *cpuidle_drv;
 	struct gpd_cpu_data *cpu_data;
-	struct cpuidle_state *idle_state;
+	cpuidle_state_no_const *idle_state;
 	int ret = 0;
 
 	if (IS_ERR_OR_NULL(genpd) || state < 0)
@@ -1918,7 +1918,7 @@ int pm_genpd_name_attach_cpuidle(const c
 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
 {
 	struct gpd_cpu_data *cpu_data;
-	struct cpuidle_state *idle_state;
+	cpuidle_state_no_const *idle_state;
 	int ret = 0;
 
 	if (IS_ERR_OR_NULL(genpd))
diff -ruNp linux-3.13.11/drivers/base/power/sysfs.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/base/power/sysfs.c
--- linux-3.13.11/drivers/base/power/sysfs.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/base/power/sysfs.c	2014-07-09
12:00:15.000000000 +0200
@@ -185,7 +185,7 @@ static ssize_t rtpm_status_show(struct d
 			return -EIO;
 		}
 	}
-	return sprintf(buf, p);
+	return sprintf(buf, "%s", p);
 }
 
 static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
diff -ruNp linux-3.13.11/drivers/base/power/wakeup.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/base/power/wakeup.c
--- linux-3.13.11/drivers/base/power/wakeup.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/base/power/wakeup.c	2014-07-09
12:00:15.000000000 +0200
@@ -29,14 +29,14 @@ bool events_check_enabled __read_mostly;
  * They need to be modified together atomically, so it's better to use one
  * atomic variable to hold them both.
  */
-static atomic_t combined_event_count = ATOMIC_INIT(0);
+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
 
 #define IN_PROGRESS_BITS	(sizeof(int) * 4)
 #define MAX_IN_PROGRESS		((1 << IN_PROGRESS_BITS) - 1)
 
 static void split_counters(unsigned int *cnt, unsigned int *inpr)
 {
-	unsigned int comb = atomic_read(&combined_event_count);
+	unsigned int comb = atomic_read_unchecked(&combined_event_count);
 
 	*cnt = (comb >> IN_PROGRESS_BITS);
 	*inpr = comb & MAX_IN_PROGRESS;
@@ -395,7 +395,7 @@ static void wakeup_source_activate(struc
 		ws->start_prevent_time = ws->last_time;
 
 	/* Increment the counter of events in progress. */
-	cec = atomic_inc_return(&combined_event_count);
+	cec = atomic_inc_return_unchecked(&combined_event_count);
 
 	trace_wakeup_source_activate(ws->name, cec);
 }
@@ -521,7 +521,7 @@ static void wakeup_source_deactivate(str
 	 * Increment the counter of registered wakeup events and decrement the
 	 * couter of wakeup events in progress simultaneously.
 	 */
-	cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
+	cec = atomic_add_return_unchecked(MAX_IN_PROGRESS, &combined_event_count);
 	trace_wakeup_source_deactivate(ws->name, cec);
 
 	split_counters(&cnt, &inpr);
diff -ruNp linux-3.13.11/drivers/base/syscore.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/base/syscore.c
--- linux-3.13.11/drivers/base/syscore.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/base/syscore.c	2014-07-09
12:00:15.000000000 +0200
@@ -21,7 +21,7 @@ static DEFINE_MUTEX(syscore_ops_lock);
 void register_syscore_ops(struct syscore_ops *ops)
 {
 	mutex_lock(&syscore_ops_lock);
-	list_add_tail(&ops->node, &syscore_ops_list);
+	pax_list_add_tail((struct list_head *)&ops->node, &syscore_ops_list);
 	mutex_unlock(&syscore_ops_lock);
 }
 EXPORT_SYMBOL_GPL(register_syscore_ops);
@@ -33,7 +33,7 @@ EXPORT_SYMBOL_GPL(register_syscore_ops);
 void unregister_syscore_ops(struct syscore_ops *ops)
 {
 	mutex_lock(&syscore_ops_lock);
-	list_del(&ops->node);
+	pax_list_del((struct list_head *)&ops->node);
 	mutex_unlock(&syscore_ops_lock);
 }
 EXPORT_SYMBOL_GPL(unregister_syscore_ops);
diff -ruNp linux-3.13.11/drivers/block/Kconfig linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/block/Kconfig
--- linux-3.13.11/drivers/block/Kconfig	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/block/Kconfig	2014-07-09
12:00:15.000000000 +0200
@@ -281,6 +281,13 @@ config BLK_DEV_CRYPTOLOOP
 
 source "drivers/block/drbd/Kconfig"
 
+config BLK_DEV_VROOT
+	tristate "Virtual Root device support"
+	depends on QUOTACTL
+	---help---
+	  Saying Y here will allow you to use quota/fs ioctls on a shared
+	  partition within a virtual server without compromising security.
+
 config BLK_DEV_NBD
 	tristate "Network block device support"
 	depends on NET
diff -ruNp linux-3.13.11/drivers/block/Makefile linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/block/Makefile
--- linux-3.13.11/drivers/block/Makefile	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/block/Makefile	2014-07-09
12:00:15.000000000 +0200
@@ -34,6 +34,7 @@ obj-$(CONFIG_VIRTIO_BLK)	+= virtio_blk.o
 obj-$(CONFIG_VIODASD)		+= viodasd.o
 obj-$(CONFIG_BLK_DEV_SX8)	+= sx8.o
 obj-$(CONFIG_BLK_DEV_HD)	+= hd.o
+obj-$(CONFIG_BLK_DEV_VROOT)	+= vroot.o
 
 obj-$(CONFIG_XEN_BLKDEV_FRONTEND)	+= xen-blkfront.o
 obj-$(CONFIG_XEN_BLKDEV_BACKEND)	+= xen-blkback/
diff -ruNp linux-3.13.11/drivers/block/cciss.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/block/cciss.c
--- linux-3.13.11/drivers/block/cciss.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/block/cciss.c	2014-07-09
12:00:15.000000000 +0200
@@ -3011,7 +3011,7 @@ static void start_io(ctlr_info_t *h)
 	while (!list_empty(&h->reqQ)) {
 		c = list_entry(h->reqQ.next, CommandList_struct, list);
 		/* can't do anything if fifo is full */
-		if ((h->access.fifo_full(h))) {
+		if ((h->access->fifo_full(h))) {
 			dev_warn(&h->pdev->dev, "fifo full\n");
 			break;
 		}
@@ -3021,7 +3021,7 @@ static void start_io(ctlr_info_t *h)
 		h->Qdepth--;
 
 		/* Tell the controller execute command */
-		h->access.submit_command(h, c);
+		h->access->submit_command(h, c);
 
 		/* Put job onto the completed Q */
 		addQ(&h->cmpQ, c);
@@ -3447,17 +3447,17 @@ startio:
 
 static inline unsigned long get_next_completion(ctlr_info_t *h)
 {
-	return h->access.command_completed(h);
+	return h->access->command_completed(h);
 }
 
 static inline int interrupt_pending(ctlr_info_t *h)
 {
-	return h->access.intr_pending(h);
+	return h->access->intr_pending(h);
 }
 
 static inline long interrupt_not_for_us(ctlr_info_t *h)
 {
-	return ((h->access.intr_pending(h) == 0) ||
+	return ((h->access->intr_pending(h) == 0) ||
 		(h->interrupts_enabled == 0));
 }
 
@@ -3490,7 +3490,7 @@ static inline u32 next_command(ctlr_info
 	u32 a;
 
 	if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
-		return h->access.command_completed(h);
+		return h->access->command_completed(h);
 
 	if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
 		a = *(h->reply_pool_head); /* Next cmd in ring buffer */
@@ -4047,7 +4047,7 @@ static void cciss_put_controller_into_pe
 		trans_support & CFGTBL_Trans_use_short_tags);
 
 	/* Change the access methods to the performant access methods */
-	h->access = SA5_performant_access;
+	h->access = &SA5_performant_access;
 	h->transMethod = CFGTBL_Trans_Performant;
 
 	return;
@@ -4327,7 +4327,7 @@ static int cciss_pci_init(ctlr_info_t *h
 	if (prod_index < 0)
 		return -ENODEV;
 	h->product_name = products[prod_index].product_name;
-	h->access = *(products[prod_index].access);
+	h->access = products[prod_index].access;
 
 	if (cciss_board_disabled(h)) {
 		dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
@@ -5059,7 +5059,7 @@ reinit_after_soft_reset:
 	}
 
 	/* make sure the board interrupts are off */
-	h->access.set_intr_mask(h, CCISS_INTR_OFF);
+	h->access->set_intr_mask(h, CCISS_INTR_OFF);
 	rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
 	if (rc)
 		goto clean2;
@@ -5109,7 +5109,7 @@ reinit_after_soft_reset:
 		 * fake ones to scoop up any residual completions.
 		 */
 		spin_lock_irqsave(&h->lock, flags);
-		h->access.set_intr_mask(h, CCISS_INTR_OFF);
+		h->access->set_intr_mask(h, CCISS_INTR_OFF);
 		spin_unlock_irqrestore(&h->lock, flags);
 		free_irq(h->intr[h->intr_mode], h);
 		rc = cciss_request_irq(h, cciss_msix_discard_completions,
@@ -5129,9 +5129,9 @@ reinit_after_soft_reset:
 		dev_info(&h->pdev->dev, "Board READY.\n");
 		dev_info(&h->pdev->dev,
 			"Waiting for stale completions to drain.\n");
-		h->access.set_intr_mask(h, CCISS_INTR_ON);
+		h->access->set_intr_mask(h, CCISS_INTR_ON);
 		msleep(10000);
-		h->access.set_intr_mask(h, CCISS_INTR_OFF);
+		h->access->set_intr_mask(h, CCISS_INTR_OFF);
 
 		rc = controller_reset_failed(h->cfgtable);
 		if (rc)
@@ -5154,7 +5154,7 @@ reinit_after_soft_reset:
 	cciss_scsi_setup(h);
 
 	/* Turn the interrupts on so we can service requests */
-	h->access.set_intr_mask(h, CCISS_INTR_ON);
+	h->access->set_intr_mask(h, CCISS_INTR_ON);
 
 	/* Get the firmware version */
 	inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
@@ -5226,7 +5226,7 @@ static void cciss_shutdown(struct pci_de
 	kfree(flush_buf);
 	if (return_code != IO_OK)
 		dev_warn(&h->pdev->dev, "Error flushing cache\n");
-	h->access.set_intr_mask(h, CCISS_INTR_OFF);
+	h->access->set_intr_mask(h, CCISS_INTR_OFF);
 	free_irq(h->intr[h->intr_mode], h);
 }
 
diff -ruNp linux-3.13.11/drivers/block/cciss.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/block/cciss.h
--- linux-3.13.11/drivers/block/cciss.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/block/cciss.h	2014-07-09
12:00:15.000000000 +0200
@@ -101,7 +101,7 @@ struct ctlr_info
 	/* information about each logical volume */
 	drive_info_struct *drv[CISS_MAX_LUN];
 
-	struct access_method access;
+	struct access_method *access;
 
 	/* queue and queue Info */ 
 	struct list_head reqQ;
@@ -402,27 +402,27 @@ static bool SA5_performant_intr_pending(
 }
 
 static struct access_method SA5_access = {
-	SA5_submit_command,
-	SA5_intr_mask,
-	SA5_fifo_full,
-	SA5_intr_pending,
-	SA5_completed,
+	.submit_command = SA5_submit_command,
+	.set_intr_mask = SA5_intr_mask,
+	.fifo_full = SA5_fifo_full,
+	.intr_pending = SA5_intr_pending,
+	.command_completed = SA5_completed,
 };
 
 static struct access_method SA5B_access = {
-        SA5_submit_command,
-        SA5B_intr_mask,
-        SA5_fifo_full,
-        SA5B_intr_pending,
-        SA5_completed,
+	.submit_command = SA5_submit_command,
+	.set_intr_mask = SA5B_intr_mask,
+	.fifo_full = SA5_fifo_full,
+	.intr_pending = SA5B_intr_pending,
+	.command_completed = SA5_completed,
 };
 
 static struct access_method SA5_performant_access = {
-	SA5_submit_command,
-	SA5_performant_intr_mask,
-	SA5_fifo_full,
-	SA5_performant_intr_pending,
-	SA5_performant_completed,
+	.submit_command = SA5_submit_command,
+	.set_intr_mask = SA5_performant_intr_mask,
+	.fifo_full = SA5_fifo_full,
+	.intr_pending = SA5_performant_intr_pending,
+	.command_completed = SA5_performant_completed,
 };
 
 struct board_type {
diff -ruNp linux-3.13.11/drivers/block/cpqarray.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/block/cpqarray.c
--- linux-3.13.11/drivers/block/cpqarray.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/block/cpqarray.c	2014-07-09
12:00:15.000000000 +0200
@@ -404,7 +404,7 @@ static int cpqarray_register_ctlr(int i,
 	if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
 		goto Enomem4;
 	}
-	hba[i]->access.set_intr_mask(hba[i], 0);
+	hba[i]->access->set_intr_mask(hba[i], 0);
 	if (request_irq(hba[i]->intr, do_ida_intr,
 		IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
 	{
@@ -459,7 +459,7 @@ static int cpqarray_register_ctlr(int i,
 	add_timer(&hba[i]->timer);
 
 	/* Enable IRQ now that spinlock and rate limit timer are set up */
-	hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
+	hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
 
 	for(j=0; j<NWD; j++) {
 		struct gendisk *disk = ida_gendisk[i][j];
@@ -694,7 +694,7 @@ DBGINFO(
 	for(i=0; i<NR_PRODUCTS; i++) {
 		if (board_id == products[i].board_id) {
 			c->product_name = products[i].product_name;
-			c->access = *(products[i].access);
+			c->access = products[i].access;
 			break;
 		}
 	}
@@ -792,7 +792,7 @@ static int cpqarray_eisa_detect(void)
 		hba[ctlr]->intr = intr;
 		sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
 		hba[ctlr]->product_name = products[j].product_name;
-		hba[ctlr]->access = *(products[j].access);
+		hba[ctlr]->access = products[j].access;
 		hba[ctlr]->ctlr = ctlr;
 		hba[ctlr]->board_id = board_id;
 		hba[ctlr]->pci_dev = NULL; /* not PCI */
@@ -978,7 +978,7 @@ static void start_io(ctlr_info_t *h)
 
 	while((c = h->reqQ) != NULL) {
 		/* Can't do anything if we're busy */
-		if (h->access.fifo_full(h) == 0)
+		if (h->access->fifo_full(h) == 0)
 			return;
 
 		/* Get the first entry from the request Q */
@@ -986,7 +986,7 @@ static void start_io(ctlr_info_t *h)
 		h->Qdepth--;
 	
 		/* Tell the controller to do our bidding */
-		h->access.submit_command(h, c);
+		h->access->submit_command(h, c);
 
 		/* Get onto the completion Q */
 		addQ(&h->cmpQ, c);
@@ -1048,7 +1048,7 @@ static irqreturn_t do_ida_intr(int irq,
 	unsigned long flags;
 	__u32 a,a1;
 
-	istat = h->access.intr_pending(h);
+	istat = h->access->intr_pending(h);
 	/* Is this interrupt for us? */
 	if (istat == 0)
 		return IRQ_NONE;
@@ -1059,7 +1059,7 @@ static irqreturn_t do_ida_intr(int irq,
 	 */
 	spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
 	if (istat & FIFO_NOT_EMPTY) {
-		while((a = h->access.command_completed(h))) {
+		while((a = h->access->command_completed(h))) {
 			a1 = a; a &= ~3;
 			if ((c = h->cmpQ) == NULL)
 			{  
@@ -1448,11 +1448,11 @@ static int sendcmd(
 	/*
 	 * Disable interrupt
 	 */
-	info_p->access.set_intr_mask(info_p, 0);
+	info_p->access->set_intr_mask(info_p, 0);
 	/* Make sure there is room in the command FIFO */
 	/* Actually it should be completely empty at this time. */
 	for (i = 200000; i > 0; i--) {
-		temp = info_p->access.fifo_full(info_p);
+		temp = info_p->access->fifo_full(info_p);
 		if (temp != 0) {
 			break;
 		}
@@ -1465,7 +1465,7 @@ DBG(
 	/*
 	 * Send the cmd
 	 */
-	info_p->access.submit_command(info_p, c);
+	info_p->access->submit_command(info_p, c);
 	complete = pollcomplete(ctlr);
 	
 	pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr, 
@@ -1548,9 +1548,9 @@ static int revalidate_allvol(ctlr_info_t
 	 * we check the new geometry.  Then turn interrupts back on when
 	 * we're done.
 	 */
-	host->access.set_intr_mask(host, 0);
+	host->access->set_intr_mask(host, 0);
 	getgeometry(ctlr);
-	host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
+	host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
 
 	for(i=0; i<NWD; i++) {
 		struct gendisk *disk = ida_gendisk[ctlr][i];
@@ -1590,7 +1590,7 @@ static int pollcomplete(int ctlr)
 	/* Wait (up to 2 seconds) for a command to complete */
 
 	for (i = 200000; i > 0; i--) {
-		done = hba[ctlr]->access.command_completed(hba[ctlr]);
+		done = hba[ctlr]->access->command_completed(hba[ctlr]);
 		if (done == 0) {
 			udelay(10);	/* a short fixed delay */
 		} else
diff -ruNp linux-3.13.11/drivers/block/cpqarray.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/block/cpqarray.h
--- linux-3.13.11/drivers/block/cpqarray.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/block/cpqarray.h	2014-07-09
12:00:15.000000000 +0200
@@ -99,7 +99,7 @@ struct ctlr_info {
 	drv_info_t	drv[NWD];
 	struct proc_dir_entry *proc;
 
-	struct access_method access;
+	struct access_method *access;
 
 	cmdlist_t *reqQ;
 	cmdlist_t *cmpQ;
diff -ruNp linux-3.13.11/drivers/block/drbd/drbd_int.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/block/drbd/drbd_int.h
--- linux-3.13.11/drivers/block/drbd/drbd_int.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/block/drbd/drbd_int.h	2014-07-09
12:00:15.000000000 +0200
@@ -582,7 +582,7 @@ struct drbd_epoch {
 	struct drbd_tconn *tconn;
 	struct list_head list;
 	unsigned int barrier_nr;
-	atomic_t epoch_size; /* increased on every request added. */
+	atomic_unchecked_t epoch_size; /* increased on every request added. */
 	atomic_t active;     /* increased on every req. added, and dec on every finished.
*/
 	unsigned long flags;
 };
@@ -1022,7 +1022,7 @@ struct drbd_conf {
 	unsigned int al_tr_number;
 	int al_tr_cycle;
 	wait_queue_head_t seq_wait;
-	atomic_t packet_seq;
+	atomic_unchecked_t packet_seq;
 	unsigned int peer_seq;
 	spinlock_t peer_seq_lock;
 	unsigned int minor;
@@ -1573,7 +1573,7 @@ static inline int drbd_setsockopt(struct
 	char __user *uoptval;
 	int err;
 
-	uoptval = (char __user __force *)optval;
+	uoptval = (char __force_user *)optval;
 
 	set_fs(KERNEL_DS);
 	if (level == SOL_SOCKET)
diff -ruNp linux-3.13.11/drivers/block/drbd/drbd_interval.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/block/drbd/drbd_interval.c
--- linux-3.13.11/drivers/block/drbd/drbd_interval.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/block/drbd/drbd_interval.c	2014-07-09
12:00:15.000000000 +0200
@@ -67,9 +67,9 @@ static void augment_rotate(struct rb_nod
 }
 
 static const struct rb_augment_callbacks augment_callbacks = {
-	augment_propagate,
-	augment_copy,
-	augment_rotate,
+	.propagate = augment_propagate,
+	.copy = augment_copy,
+	.rotate = augment_rotate,
 };
 
 /**
diff -ruNp linux-3.13.11/drivers/block/drbd/drbd_main.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/block/drbd/drbd_main.c
--- linux-3.13.11/drivers/block/drbd/drbd_main.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/block/drbd/drbd_main.c	2014-07-09
12:00:15.000000000 +0200
@@ -1317,7 +1317,7 @@ static int _drbd_send_ack(struct drbd_co
 	p->sector = sector;
 	p->block_id = block_id;
 	p->blksize = blksize;
-	p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
+	p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&mdev->packet_seq));
 	return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0);
 }
 
@@ -1619,7 +1619,7 @@ int drbd_send_dblock(struct drbd_conf *m
 		return -EIO;
 	p->sector = cpu_to_be64(req->i.sector);
 	p->block_id = (unsigned long)req;
-	p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
+	p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&mdev->packet_seq));
 	dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
 	if (mdev->state.conn >= C_SYNC_SOURCE &&
 	    mdev->state.conn <= C_PAUSED_SYNC_T)
@@ -2574,8 +2574,8 @@ void conn_destroy(struct kref *kref)
 {
 	struct drbd_tconn *tconn = container_of(kref, struct drbd_tconn, kref);
 
-	if (atomic_read(&tconn->current_epoch->epoch_size) !=  0)
-		conn_err(tconn, "epoch_size:%d\n", atomic_read(&tconn->current_epoch->epoch_size));
+	if (atomic_read_unchecked(&tconn->current_epoch->epoch_size) !=  0)
+		conn_err(tconn, "epoch_size:%d\n", atomic_read_unchecked(&tconn->current_epoch->epoch_size));
 	kfree(tconn->current_epoch);
 
 	idr_destroy(&tconn->volumes);
diff -ruNp linux-3.13.11/drivers/block/drbd/drbd_nl.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/block/drbd/drbd_nl.c
--- linux-3.13.11/drivers/block/drbd/drbd_nl.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/block/drbd/drbd_nl.c	2014-07-09
12:00:15.000000000 +0200
@@ -3440,7 +3440,7 @@ out:
 
 void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
 {
-	static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
+	static atomic_unchecked_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
 	struct sk_buff *msg;
 	struct drbd_genlmsghdr *d_out;
 	unsigned seq;
@@ -3453,7 +3453,7 @@ void drbd_bcast_event(struct drbd_conf *
 			return;
 	}
 
-	seq = atomic_inc_return(&drbd_genl_seq);
+	seq = atomic_inc_return_unchecked(&drbd_genl_seq);
 	msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
 	if (!msg)
 		goto failed;
diff -ruNp linux-3.13.11/drivers/block/drbd/drbd_receiver.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/block/drbd/drbd_receiver.c
--- linux-3.13.11/drivers/block/drbd/drbd_receiver.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/block/drbd/drbd_receiver.c	2014-07-09
12:00:15.000000000 +0200
@@ -834,7 +834,7 @@ int drbd_connected(struct drbd_conf *mde
 {
 	int err;
 
-	atomic_set(&mdev->packet_seq, 0);
+	atomic_set_unchecked(&mdev->packet_seq, 0);
 	mdev->peer_seq = 0;
 
 	mdev->state_mutex = mdev->tconn->agreed_pro_version < 100 ?
@@ -1193,7 +1193,7 @@ static enum finish_epoch drbd_may_finish
 	do {
 		next_epoch = NULL;
 
-		epoch_size = atomic_read(&epoch->epoch_size);
+		epoch_size = atomic_read_unchecked(&epoch->epoch_size);
 
 		switch (ev & ~EV_CLEANUP) {
 		case EV_PUT:
@@ -1233,7 +1233,7 @@ static enum finish_epoch drbd_may_finish
 					rv = FE_DESTROYED;
 			} else {
 				epoch->flags = 0;
-				atomic_set(&epoch->epoch_size, 0);
+				atomic_set_unchecked(&epoch->epoch_size, 0);
 				/* atomic_set(&epoch->active, 0); is already zero */
 				if (rv == FE_STILL_LIVE)
 					rv = FE_RECYCLED;
@@ -1451,7 +1451,7 @@ static int receive_Barrier(struct drbd_t
 		conn_wait_active_ee_empty(tconn);
 		drbd_flush(tconn);
 
-		if (atomic_read(&tconn->current_epoch->epoch_size)) {
+		if (atomic_read_unchecked(&tconn->current_epoch->epoch_size)) {
 			epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
 			if (epoch)
 				break;
@@ -1464,11 +1464,11 @@ static int receive_Barrier(struct drbd_t
 	}
 
 	epoch->flags = 0;
-	atomic_set(&epoch->epoch_size, 0);
+	atomic_set_unchecked(&epoch->epoch_size, 0);
 	atomic_set(&epoch->active, 0);
 
 	spin_lock(&tconn->epoch_lock);
-	if (atomic_read(&tconn->current_epoch->epoch_size)) {
+	if (atomic_read_unchecked(&tconn->current_epoch->epoch_size)) {
 		list_add(&epoch->list, &tconn->current_epoch->list);
 		tconn->current_epoch = epoch;
 		tconn->epochs++;
@@ -2163,7 +2163,7 @@ static int receive_Data(struct drbd_tcon
 
 		err = wait_for_and_update_peer_seq(mdev, peer_seq);
 		drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
-		atomic_inc(&tconn->current_epoch->epoch_size);
+		atomic_inc_unchecked(&tconn->current_epoch->epoch_size);
 		err2 = drbd_drain_block(mdev, pi->size);
 		if (!err)
 			err = err2;
@@ -2197,7 +2197,7 @@ static int receive_Data(struct drbd_tcon
 
 	spin_lock(&tconn->epoch_lock);
 	peer_req->epoch = tconn->current_epoch;
-	atomic_inc(&peer_req->epoch->epoch_size);
+	atomic_inc_unchecked(&peer_req->epoch->epoch_size);
 	atomic_inc(&peer_req->epoch->active);
 	spin_unlock(&tconn->epoch_lock);
 
@@ -4344,7 +4344,7 @@ struct data_cmd {
 	int expect_payload;
 	size_t pkt_size;
 	int (*fn)(struct drbd_tconn *, struct packet_info *);
-};
+} __do_const;
 
 static struct data_cmd drbd_cmd_handler[] = {
 	[P_DATA]	    = { 1, sizeof(struct p_data), receive_Data },
@@ -4464,7 +4464,7 @@ static void conn_disconnect(struct drbd_
 	if (!list_empty(&tconn->current_epoch->list))
 		conn_err(tconn, "ASSERTION FAILED: tconn->current_epoch->list not empty\n");
 	/* ok, no more ee's on the fly, it is safe to reset the epoch_size */
-	atomic_set(&tconn->current_epoch->epoch_size, 0);
+	atomic_set_unchecked(&tconn->current_epoch->epoch_size, 0);
 	tconn->send.seen_any_write_yet = false;
 
 	conn_info(tconn, "Connection closed\n");
@@ -5220,7 +5220,7 @@ static int tconn_finish_peer_reqs(struct
 struct asender_cmd {
 	size_t pkt_size;
 	int (*fn)(struct drbd_tconn *tconn, struct packet_info *);
-};
+} __do_const;
 
 static struct asender_cmd asender_tbl[] = {
 	[P_PING]	    = { 0, got_Ping },
diff -ruNp linux-3.13.11/drivers/block/loop.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/block/loop.c
--- linux-3.13.11/drivers/block/loop.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/block/loop.c	2014-07-09 12:00:15.000000000
+0200
@@ -75,6 +75,7 @@
 #include <linux/sysfs.h>
 #include <linux/miscdevice.h>
 #include <linux/falloc.h>
+#include <linux/vs_context.h>
 #include "loop.h"
 
 #include <asm/uaccess.h>
@@ -232,7 +233,7 @@ static int __do_lo_send_write(struct fil
 
 	file_start_write(file);
 	set_fs(get_ds());
-	bw = file->f_op->write(file, buf, len, &pos);
+	bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
 	set_fs(old_fs);
 	file_end_write(file);
 	if (likely(bw == len))
@@ -884,6 +885,7 @@ static int loop_set_fd(struct loop_devic
 	lo->lo_blocksize = lo_blocksize;
 	lo->lo_device = bdev;
 	lo->lo_flags = lo_flags;
+	lo->lo_xid = vx_current_xid();
 	lo->lo_backing_file = file;
 	lo->transfer = transfer_none;
 	lo->ioctl = NULL;
@@ -1028,6 +1030,7 @@ static int loop_clr_fd(struct loop_devic
 	lo->lo_sizelimit = 0;
 	lo->lo_encrypt_key_size = 0;
 	lo->lo_thread = NULL;
+	lo->lo_xid = 0;
 	memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE);
 	memset(lo->lo_crypt_name, 0, LO_NAME_SIZE);
 	memset(lo->lo_file_name, 0, LO_NAME_SIZE);
@@ -1071,7 +1074,7 @@ loop_set_status(struct loop_device *lo,
 
 	if (lo->lo_encrypt_key_size &&
 	    !uid_eq(lo->lo_key_owner, uid) &&
-	    !capable(CAP_SYS_ADMIN))
+	    !vx_capable(CAP_SYS_ADMIN, VXC_ADMIN_CLOOP))
 		return -EPERM;
 	if (lo->lo_state != Lo_bound)
 		return -ENXIO;
@@ -1161,7 +1164,8 @@ loop_get_status(struct loop_device *lo,
 	memcpy(info->lo_crypt_name, lo->lo_crypt_name, LO_NAME_SIZE);
 	info->lo_encrypt_type =
 		lo->lo_encryption ? lo->lo_encryption->number : 0;
-	if (lo->lo_encrypt_key_size && capable(CAP_SYS_ADMIN)) {
+	if (lo->lo_encrypt_key_size &&
+		vx_capable(CAP_SYS_ADMIN, VXC_ADMIN_CLOOP)) {
 		info->lo_encrypt_key_size = lo->lo_encrypt_key_size;
 		memcpy(info->lo_encrypt_key, lo->lo_encrypt_key,
 		       lo->lo_encrypt_key_size);
@@ -1503,6 +1507,11 @@ static int lo_open(struct block_device *
 		goto out;
 	}
 
+	if (!vx_check(lo->lo_xid, VS_IDENT|VS_HOSTID|VS_ADMIN_P)) {
+		err = -EACCES;
+		goto out;
+	}
+
 	mutex_lock(&lo->lo_ctl_mutex);
 	lo->lo_refcnt++;
 	mutex_unlock(&lo->lo_ctl_mutex);
diff -ruNp linux-3.13.11/drivers/block/loop.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/block/loop.h
--- linux-3.13.11/drivers/block/loop.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/block/loop.h	2014-07-09 12:00:15.000000000
+0200
@@ -41,6 +41,7 @@ struct loop_device {
 	struct loop_func_table *lo_encryption;
 	__u32           lo_init[2];
 	kuid_t		lo_key_owner;	/* Who set the key */
+	vxid_t		lo_xid;
 	int		(*ioctl)(struct loop_device *, int cmd, 
 				 unsigned long arg); 
 
diff -ruNp linux-3.13.11/drivers/block/null_blk.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/block/null_blk.c
--- linux-3.13.11/drivers/block/null_blk.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/block/null_blk.c	2014-07-09
12:00:15.000000000 +0200
@@ -407,14 +407,24 @@ static int null_init_hctx(struct blk_mq_
 	return 0;
 }
 
-static struct blk_mq_ops null_mq_ops = {
-	.queue_rq       = null_queue_rq,
-	.map_queue      = blk_mq_map_queue,
+static struct blk_mq_ops null_mq_single_ops = {
+	.queue_rq	= null_queue_rq,
+	.map_queue	= blk_mq_map_queue,
 	.init_hctx	= null_init_hctx,
+	.alloc_hctx	= blk_mq_alloc_single_hw_queue,
+	.free_hctx	= blk_mq_free_single_hw_queue,
+};
+
+static struct blk_mq_ops null_mq_per_node_ops = {
+	.queue_rq	= null_queue_rq,
+	.map_queue	= blk_mq_map_queue,
+	.init_hctx	= null_init_hctx,
+	.alloc_hctx	= null_alloc_hctx,
+	.free_hctx	= null_free_hctx,
 };
 
 static struct blk_mq_reg null_mq_reg = {
-	.ops		= &null_mq_ops,
+	.ops		= &null_mq_single_ops,
 	.queue_depth	= 64,
 	.cmd_size	= sizeof(struct nullb_cmd),
 	.flags		= BLK_MQ_F_SHOULD_MERGE,
@@ -545,13 +555,8 @@ static int null_add_dev(void)
 		null_mq_reg.queue_depth = hw_queue_depth;
 		null_mq_reg.nr_hw_queues = submit_queues;
 
-		if (use_per_node_hctx) {
-			null_mq_reg.ops->alloc_hctx = null_alloc_hctx;
-			null_mq_reg.ops->free_hctx = null_free_hctx;
-		} else {
-			null_mq_reg.ops->alloc_hctx = blk_mq_alloc_single_hw_queue;
-			null_mq_reg.ops->free_hctx = blk_mq_free_single_hw_queue;
-		}
+		if (use_per_node_hctx)
+			null_mq_reg.ops = &null_mq_per_node_ops;
 
 		nullb->q = blk_mq_init_queue(&null_mq_reg, nullb);
 	} else if (queue_mode == NULL_Q_BIO) {
diff -ruNp linux-3.13.11/drivers/block/pktcdvd.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/block/pktcdvd.c
--- linux-3.13.11/drivers/block/pktcdvd.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/block/pktcdvd.c	2014-07-09
12:00:15.000000000 +0200
@@ -108,7 +108,7 @@ static int pkt_seq_show(struct seq_file
 
 static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
 {
-	return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
+	return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1UL);
 }
 
 /*
@@ -1883,7 +1883,7 @@ static noinline_for_stack int pkt_probe_
 		return -EROFS;
 	}
 	pd->settings.fp = ti.fp;
-	pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
+	pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1UL);
 
 	if (ti.nwa_v) {
 		pd->nwa = be32_to_cpu(ti.next_writable);
diff -ruNp linux-3.13.11/drivers/block/smart1,2.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/block/smart1,2.h
--- linux-3.13.11/drivers/block/smart1,2.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/block/smart1,2.h	2014-07-09
12:00:15.000000000 +0200
@@ -108,11 +108,11 @@ static unsigned long smart4_intr_pending
 }
 
 static struct access_method smart4_access = {
-	smart4_submit_command,
-	smart4_intr_mask,
-	smart4_fifo_full,
-	smart4_intr_pending,
-	smart4_completed,
+	.submit_command = smart4_submit_command,
+	.set_intr_mask = smart4_intr_mask,
+	.fifo_full = smart4_fifo_full,
+	.intr_pending = smart4_intr_pending,
+	.command_completed = smart4_completed,
 };
 
 /*
@@ -144,11 +144,11 @@ static unsigned long smart2_intr_pending
 }
 
 static struct access_method smart2_access = {
-	smart2_submit_command,
-	smart2_intr_mask,
-	smart2_fifo_full,
-	smart2_intr_pending,
-	smart2_completed,
+	.submit_command = smart2_submit_command,
+	.set_intr_mask = smart2_intr_mask,
+	.fifo_full = smart2_fifo_full,
+	.intr_pending = smart2_intr_pending,
+	.command_completed = smart2_completed,
 };
 
 /*
@@ -180,11 +180,11 @@ static unsigned long smart2e_intr_pendin
 }
 
 static struct access_method smart2e_access = {
-	smart2e_submit_command,
-	smart2e_intr_mask,
-	smart2e_fifo_full,
-	smart2e_intr_pending,
-	smart2e_completed,
+	.submit_command = smart2e_submit_command,
+	.set_intr_mask = smart2e_intr_mask,
+	.fifo_full = smart2e_fifo_full,
+	.intr_pending = smart2e_intr_pending,
+	.command_completed = smart2e_completed,
 };
 
 /*
@@ -270,9 +270,9 @@ static unsigned long smart1_intr_pending
 }
 
 static struct access_method smart1_access = {
-	smart1_submit_command,
-	smart1_intr_mask,
-	smart1_fifo_full,
-	smart1_intr_pending,
-	smart1_completed,
+	.submit_command = smart1_submit_command,
+	.set_intr_mask = smart1_intr_mask,
+	.fifo_full = smart1_fifo_full,
+	.intr_pending = smart1_intr_pending,
+	.command_completed = smart1_completed,
 };
diff -ruNp linux-3.13.11/drivers/block/vroot.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/block/vroot.c
--- linux-3.13.11/drivers/block/vroot.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/block/vroot.c	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,290 @@
+/*
+ *  linux/drivers/block/vroot.c
+ *
+ *  written by Herbert Pötzl, 9/11/2002
+ *  ported to 2.6.10 by Herbert Pötzl, 30/12/2004
+ *
+ *  based on the loop.c code by Theodore Ts'o.
+ *
+ * Copyright (C) 2002-2007 by Herbert Pötzl.
+ * Redistribution of this file is permitted under the
+ * GNU General Public License.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/file.h>
+#include <linux/major.h>
+#include <linux/blkdev.h>
+#include <linux/slab.h>
+
+#include <linux/vroot.h>
+#include <linux/vs_context.h>
+
+
+static int max_vroot = 8;
+
+static struct vroot_device *vroot_dev;
+static struct gendisk **disks;
+
+
+static int vroot_set_dev(
+	struct vroot_device *vr,
+	struct block_device *bdev,
+	unsigned int arg)
+{
+	struct block_device *real_bdev;
+	struct file *file;
+	struct inode *inode;
+	int error;
+
+	error = -EBUSY;
+	if (vr->vr_state != Vr_unbound)
+		goto out;
+
+	error = -EBADF;
+	file = fget(arg);
+	if (!file)
+		goto out;
+
+	error = -EINVAL;
+	inode = file->f_dentry->d_inode;
+
+
+	if (S_ISBLK(inode->i_mode)) {
+		real_bdev = inode->i_bdev;
+		vr->vr_device = real_bdev;
+		__iget(real_bdev->bd_inode);
+	} else
+		goto out_fput;
+
+	vxdprintk(VXD_CBIT(misc, 0),
+		"vroot[%d]_set_dev: dev=" VXF_DEV,
+		vr->vr_number, VXD_DEV(real_bdev));
+
+	vr->vr_state = Vr_bound;
+	error = 0;
+
+ out_fput:
+	fput(file);
+ out:
+	return error;
+}
+
+static int vroot_clr_dev(
+	struct vroot_device *vr,
+	struct block_device *bdev)
+{
+	struct block_device *real_bdev;
+
+	if (vr->vr_state != Vr_bound)
+		return -ENXIO;
+	if (vr->vr_refcnt > 1)	/* we needed one fd for the ioctl */
+		return -EBUSY;
+
+	real_bdev = vr->vr_device;
+
+	vxdprintk(VXD_CBIT(misc, 0),
+		"vroot[%d]_clr_dev: dev=" VXF_DEV,
+		vr->vr_number, VXD_DEV(real_bdev));
+
+	bdput(real_bdev);
+	vr->vr_state = Vr_unbound;
+	vr->vr_device = NULL;
+	return 0;
+}
+
+
+static int vr_ioctl(struct block_device *bdev, fmode_t mode,
+	unsigned int cmd, unsigned long arg)
+{
+	struct vroot_device *vr = bdev->bd_disk->private_data;
+	int err;
+
+	down(&vr->vr_ctl_mutex);
+	switch (cmd) {
+	case VROOT_SET_DEV:
+		err = vroot_set_dev(vr, bdev, arg);
+		break;
+	case VROOT_CLR_DEV:
+		err = vroot_clr_dev(vr, bdev);
+		break;
+	default:
+		err = -EINVAL;
+		break;
+	}
+	up(&vr->vr_ctl_mutex);
+	return err;
+}
+
+static int vr_open(struct block_device *bdev, fmode_t mode)
+{
+	struct vroot_device *vr = bdev->bd_disk->private_data;
+
+	down(&vr->vr_ctl_mutex);
+	vr->vr_refcnt++;
+	up(&vr->vr_ctl_mutex);
+	return 0;
+}
+
+static void vr_release(struct gendisk *disk, fmode_t mode)
+{
+	struct vroot_device *vr = disk->private_data;
+
+	down(&vr->vr_ctl_mutex);
+	--vr->vr_refcnt;
+	up(&vr->vr_ctl_mutex);
+}
+
+static struct block_device_operations vr_fops = {
+	.owner =	THIS_MODULE,
+	.open =		vr_open,
+	.release =	vr_release,
+	.ioctl =	vr_ioctl,
+};
+
+static void vroot_make_request(struct request_queue *q, struct bio *bio)
+{
+	printk("vroot_make_request %p, %p\n", q, bio);
+	bio_io_error(bio);
+}
+
+struct block_device *__vroot_get_real_bdev(struct block_device *bdev)
+{
+	struct inode *inode = bdev->bd_inode;
+	struct vroot_device *vr;
+	struct block_device *real_bdev;
+	int minor = iminor(inode);
+
+	vr = &vroot_dev[minor];
+	real_bdev = vr->vr_device;
+
+	vxdprintk(VXD_CBIT(misc, 0),
+		"vroot[%d]_get_real_bdev: dev=" VXF_DEV,
+		vr->vr_number, VXD_DEV(real_bdev));
+
+	if (vr->vr_state != Vr_bound)
+		return ERR_PTR(-ENXIO);
+
+	__iget(real_bdev->bd_inode);
+	return real_bdev;
+}
+
+
+
+/*
+ * And now the modules code and kernel interface.
+ */
+
+module_param(max_vroot, int, 0);
+
+MODULE_PARM_DESC(max_vroot, "Maximum number of vroot devices (1-256)");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_BLOCKDEV_MAJOR(VROOT_MAJOR);
+
+MODULE_AUTHOR ("Herbert Pötzl");
+MODULE_DESCRIPTION ("Virtual Root Device Mapper");
+
+
+int __init vroot_init(void)
+{
+	int err, i;
+
+	if (max_vroot < 1 || max_vroot > 256) {
+		max_vroot = MAX_VROOT_DEFAULT;
+		printk(KERN_WARNING "vroot: invalid max_vroot "
+			"(must be between 1 and 256), "
+			"using default (%d)\n", max_vroot);
+	}
+
+	if (register_blkdev(VROOT_MAJOR, "vroot"))
+		return -EIO;
+
+	err = -ENOMEM;
+	vroot_dev = kmalloc(max_vroot * sizeof(struct vroot_device), GFP_KERNEL);
+	if (!vroot_dev)
+		goto out_mem1;
+	memset(vroot_dev, 0, max_vroot * sizeof(struct vroot_device));
+
+	disks = kmalloc(max_vroot * sizeof(struct gendisk *), GFP_KERNEL);
+	if (!disks)
+		goto out_mem2;
+
+	for (i = 0; i < max_vroot; i++) {
+		disks[i] = alloc_disk(1);
+		if (!disks[i])
+			goto out_mem3;
+		disks[i]->queue = blk_alloc_queue(GFP_KERNEL);
+		if (!disks[i]->queue)
+			goto out_mem3;
+		blk_queue_make_request(disks[i]->queue, vroot_make_request);
+	}
+
+	for (i = 0; i < max_vroot; i++) {
+		struct vroot_device *vr = &vroot_dev[i];
+		struct gendisk *disk = disks[i];
+
+		memset(vr, 0, sizeof(*vr));
+		sema_init(&vr->vr_ctl_mutex, 1);
+		vr->vr_number = i;
+		disk->major = VROOT_MAJOR;
+		disk->first_minor = i;
+		disk->fops = &vr_fops;
+		sprintf(disk->disk_name, "vroot%d", i);
+		disk->private_data = vr;
+	}
+
+	err = register_vroot_grb(&__vroot_get_real_bdev);
+	if (err)
+		goto out_mem3;
+
+	for (i = 0; i < max_vroot; i++)
+		add_disk(disks[i]);
+	printk(KERN_INFO "vroot: loaded (max %d devices)\n", max_vroot);
+	return 0;
+
+out_mem3:
+	while (i--)
+		put_disk(disks[i]);
+	kfree(disks);
+out_mem2:
+	kfree(vroot_dev);
+out_mem1:
+	unregister_blkdev(VROOT_MAJOR, "vroot");
+	printk(KERN_ERR "vroot: ran out of memory\n");
+	return err;
+}
+
+void vroot_exit(void)
+{
+	int i;
+
+	if (unregister_vroot_grb(&__vroot_get_real_bdev))
+		printk(KERN_WARNING "vroot: cannot unregister grb\n");
+
+	for (i = 0; i < max_vroot; i++) {
+		del_gendisk(disks[i]);
+		put_disk(disks[i]);
+	}
+	unregister_blkdev(VROOT_MAJOR, "vroot");
+
+	kfree(disks);
+	kfree(vroot_dev);
+}
+
+module_init(vroot_init);
+module_exit(vroot_exit);
+
+#ifndef MODULE
+
+static int __init max_vroot_setup(char *str)
+{
+	max_vroot = simple_strtol(str, NULL, 0);
+	return 1;
+}
+
+__setup("max_vroot=", max_vroot_setup);
+
+#endif
+
diff -ruNp linux-3.13.11/drivers/bluetooth/btwilink.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/bluetooth/btwilink.c
--- linux-3.13.11/drivers/bluetooth/btwilink.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/bluetooth/btwilink.c	2014-07-09
12:00:15.000000000 +0200
@@ -288,7 +288,7 @@ static int ti_st_send_frame(struct hci_d
 
 static int bt_ti_probe(struct platform_device *pdev)
 {
-	static struct ti_st *hst;
+	struct ti_st *hst;
 	struct hci_dev *hdev;
 	int err;
 
diff -ruNp linux-3.13.11/drivers/bus/arm-cci.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/bus/arm-cci.c
--- linux-3.13.11/drivers/bus/arm-cci.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/bus/arm-cci.c	2014-07-09
12:00:15.000000000 +0200
@@ -979,7 +979,7 @@ static int cci_probe(void)
 
 	nb_cci_ports = cci_config->nb_ace + cci_config->nb_ace_lite;
 
-	ports = kcalloc(sizeof(*ports), nb_cci_ports, GFP_KERNEL);
+	ports = kcalloc(nb_cci_ports, sizeof(*ports), GFP_KERNEL);
 	if (!ports)
 		return -ENOMEM;
 
diff -ruNp linux-3.13.11/drivers/cdrom/cdrom.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/cdrom/cdrom.c
--- linux-3.13.11/drivers/cdrom/cdrom.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/cdrom/cdrom.c	2014-07-09
12:00:15.000000000 +0200
@@ -416,7 +416,6 @@ int register_cdrom(struct cdrom_device_i
 	ENSURE(reset, CDC_RESET);
 	ENSURE(generic_packet, CDC_GENERIC_PACKET);
 	cdi->mc_flags = 0;
-	cdo->n_minors = 0;
         cdi->options = CDO_USE_FFLAGS;
 	
 	if (autoclose==1 && CDROM_CAN(CDC_CLOSE_TRAY))
@@ -436,8 +435,11 @@ int register_cdrom(struct cdrom_device_i
 	else
 		cdi->cdda_method = CDDA_OLD;
 
-	if (!cdo->generic_packet)
-		cdo->generic_packet = cdrom_dummy_generic_packet;
+	if (!cdo->generic_packet) {
+		pax_open_kernel();
+		*(void **)&cdo->generic_packet = cdrom_dummy_generic_packet;
+		pax_close_kernel();
+	}
 
 	cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
 	mutex_lock(&cdrom_mutex);
@@ -458,7 +460,6 @@ void unregister_cdrom(struct cdrom_devic
 	if (cdi->exit)
 		cdi->exit(cdi);
 
-	cdi->ops->n_minors--;
 	cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
 }
 
@@ -2107,7 +2108,7 @@ static int cdrom_read_cdda_old(struct cd
 	 */
 	nr = nframes;
 	do {
-		cgc.buffer = kmalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
+		cgc.buffer = kzalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
 		if (cgc.buffer)
 			break;
 
@@ -3429,7 +3430,7 @@ static int cdrom_print_info(const char *
 	struct cdrom_device_info *cdi;
 	int ret;
 
-	ret = scnprintf(info + *pos, max_size - *pos, header);
+	ret = scnprintf(info + *pos, max_size - *pos, "%s", header);
 	if (!ret)
 		return 1;
 
diff -ruNp linux-3.13.11/drivers/cdrom/gdrom.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/cdrom/gdrom.c
--- linux-3.13.11/drivers/cdrom/gdrom.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/cdrom/gdrom.c	2014-07-09
12:00:15.000000000 +0200
@@ -491,7 +491,6 @@ static struct cdrom_device_ops gdrom_ops
 	.audio_ioctl		= gdrom_audio_ioctl,
 	.capability		= CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
 				  CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
-	.n_minors		= 1,
 };
 
 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
diff -ruNp linux-3.13.11/drivers/char/Kconfig linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/char/Kconfig
--- linux-3.13.11/drivers/char/Kconfig	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/char/Kconfig	2014-07-09 12:00:15.000000000
+0200
@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
 
 config DEVKMEM
 	bool "/dev/kmem virtual device support"
-	default y
+	default n
+	depends on !GRKERNSEC_KMEM
 	help
 	  Say Y here if you want to support the /dev/kmem device. The
 	  /dev/kmem device is rarely used, but can be used for certain
@@ -576,6 +577,7 @@ config DEVPORT
 	bool
 	depends on !M68K
 	depends on ISA || PCI
+	depends on !GRKERNSEC_KMEM
 	default y
 
 source "drivers/s390/char/Kconfig"
diff -ruNp linux-3.13.11/drivers/char/agp/compat_ioctl.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/char/agp/compat_ioctl.c
--- linux-3.13.11/drivers/char/agp/compat_ioctl.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/char/agp/compat_ioctl.c	2014-07-09
12:00:15.000000000 +0200
@@ -108,7 +108,7 @@ static int compat_agpioc_reserve_wrap(st
 			return -ENOMEM;
 		}
 
-		if (copy_from_user(usegment, (void __user *) ureserve.seg_list,
+		if (copy_from_user(usegment, (void __force_user *) ureserve.seg_list,
 				   sizeof(*usegment) * ureserve.seg_count)) {
 			kfree(usegment);
 			kfree(ksegment);
diff -ruNp linux-3.13.11/drivers/char/agp/frontend.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/char/agp/frontend.c
--- linux-3.13.11/drivers/char/agp/frontend.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/char/agp/frontend.c	2014-07-09
12:00:15.000000000 +0200
@@ -819,7 +819,7 @@ static int agpioc_reserve_wrap(struct ag
 	if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
 		return -EFAULT;
 
-	if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
+	if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
 		return -EFAULT;
 
 	client = agp_find_client_by_pid(reserve.pid);
@@ -849,7 +849,7 @@ static int agpioc_reserve_wrap(struct ag
 		if (segment == NULL)
 			return -ENOMEM;
 
-		if (copy_from_user(segment, (void __user *) reserve.seg_list,
+		if (copy_from_user(segment, (void __force_user *) reserve.seg_list,
 				   sizeof(struct agp_segment) * reserve.seg_count)) {
 			kfree(segment);
 			return -EFAULT;
diff -ruNp linux-3.13.11/drivers/char/genrtc.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/char/genrtc.c
--- linux-3.13.11/drivers/char/genrtc.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/char/genrtc.c	2014-07-09
12:00:15.000000000 +0200
@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *fi
 	switch (cmd) {
 
 	case RTC_PLL_GET:
+	    memset(&pll, 0, sizeof(pll));
 	    if (get_rtc_pll(&pll))
 	 	    return -EINVAL;
 	    else
diff -ruNp linux-3.13.11/drivers/char/hpet.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/char/hpet.c
--- linux-3.13.11/drivers/char/hpet.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/char/hpet.c	2014-07-09 12:00:15.000000000
+0200
@@ -578,7 +578,7 @@ static inline unsigned long hpet_time_di
 }
 
 static int
-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
 		  struct hpet_info *info)
 {
 	struct hpet_timer __iomem *timer;
diff -ruNp linux-3.13.11/drivers/char/hw_random/intel-rng.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/char/hw_random/intel-rng.c
--- linux-3.13.11/drivers/char/hw_random/intel-rng.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/char/hw_random/intel-rng.c	2014-07-09
12:00:15.000000000 +0200
@@ -314,7 +314,7 @@ PFX "RNG, try using the 'no_fwh_detect'
 
 		if (no_fwh_detect)
 			return -ENODEV;
-		printk(warning);
+		printk("%s", warning);
 		return -EBUSY;
 	}
 
diff -ruNp linux-3.13.11/drivers/char/ipmi/ipmi_msghandler.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/char/ipmi/ipmi_msghandler.c
--- linux-3.13.11/drivers/char/ipmi/ipmi_msghandler.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/char/ipmi/ipmi_msghandler.c	2014-07-09
12:00:15.000000000 +0200
@@ -420,7 +420,7 @@ struct ipmi_smi {
 	struct proc_dir_entry *proc_dir;
 	char                  proc_dir_name[10];
 
-	atomic_t stats[IPMI_NUM_STATS];
+	atomic_unchecked_t stats[IPMI_NUM_STATS];
 
 	/*
 	 * run_to_completion duplicate of smb_info, smi_info
@@ -453,9 +453,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
 
 
 #define ipmi_inc_stat(intf, stat) \
-	atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
+	atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
 #define ipmi_get_stat(intf, stat) \
-	((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
+	((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
 
 static int is_lan_addr(struct ipmi_addr *addr)
 {
@@ -2883,7 +2883,7 @@ int ipmi_register_smi(struct ipmi_smi_ha
 	INIT_LIST_HEAD(&intf->cmd_rcvrs);
 	init_waitqueue_head(&intf->waitq);
 	for (i = 0; i < IPMI_NUM_STATS; i++)
-		atomic_set(&intf->stats[i], 0);
+		atomic_set_unchecked(&intf->stats[i], 0);
 
 	intf->proc_dir = NULL;
 
diff -ruNp linux-3.13.11/drivers/char/ipmi/ipmi_si_intf.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/char/ipmi/ipmi_si_intf.c
--- linux-3.13.11/drivers/char/ipmi/ipmi_si_intf.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/char/ipmi/ipmi_si_intf.c	2014-07-09
12:00:15.000000000 +0200
@@ -280,7 +280,7 @@ struct smi_info {
 	unsigned char slave_addr;
 
 	/* Counters and things for the proc filesystem. */
-	atomic_t stats[SI_NUM_STATS];
+	atomic_unchecked_t stats[SI_NUM_STATS];
 
 	struct task_struct *thread;
 
@@ -289,9 +289,9 @@ struct smi_info {
 };
 
 #define smi_inc_stat(smi, stat) \
-	atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
+	atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
 #define smi_get_stat(smi, stat) \
-	((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
+	((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
 
 #define SI_MAX_PARMS 4
 
@@ -3324,7 +3324,7 @@ static int try_smi_init(struct smi_info
 	atomic_set(&new_smi->req_events, 0);
 	new_smi->run_to_completion = 0;
 	for (i = 0; i < SI_NUM_STATS; i++)
-		atomic_set(&new_smi->stats[i], 0);
+		atomic_set_unchecked(&new_smi->stats[i], 0);
 
 	new_smi->interrupt_disabled = 1;
 	atomic_set(&new_smi->stop_operation, 0);
diff -ruNp linux-3.13.11/drivers/char/mem.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/char/mem.c
--- linux-3.13.11/drivers/char/mem.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/char/mem.c	2014-07-09 12:00:15.000000000
+0200
@@ -18,6 +18,7 @@
 #include <linux/raw.h>
 #include <linux/tty.h>
 #include <linux/capability.h>
+#include <linux/security.h>
 #include <linux/ptrace.h>
 #include <linux/device.h>
 #include <linux/highmem.h>
@@ -37,6 +38,10 @@
 
 #define DEVPORT_MINOR	4
 
+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
+extern const struct file_operations grsec_fops;
+#endif
+
 static inline unsigned long size_inside_page(unsigned long start,
 					     unsigned long size)
 {
@@ -68,9 +73,13 @@ static inline int range_is_allowed(unsig
 
 	while (cursor < to) {
 		if (!devmem_is_allowed(pfn)) {
+#ifdef CONFIG_GRKERNSEC_KMEM
+			gr_handle_mem_readwrite(from, to);
+#else
 			printk(KERN_INFO
 		"Program %s tried to access /dev/mem between %Lx->%Lx.\n",
 				current->comm, from, to);
+#endif
 			return 0;
 		}
 		cursor += PAGE_SIZE;
@@ -78,6 +87,11 @@ static inline int range_is_allowed(unsig
 	}
 	return 1;
 }
+#elif defined(CONFIG_GRKERNSEC_KMEM)
+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
+{
+	return 0;
+}
 #else
 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
 {
@@ -120,6 +134,7 @@ static ssize_t read_mem(struct file *fil
 
 	while (count > 0) {
 		unsigned long remaining;
+		char *temp;
 
 		sz = size_inside_page(p, count);
 
@@ -135,7 +150,23 @@ static ssize_t read_mem(struct file *fil
 		if (!ptr)
 			return -EFAULT;
 
-		remaining = copy_to_user(buf, ptr, sz);
+#ifdef CONFIG_PAX_USERCOPY
+		temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
+		if (!temp) {
+			unxlate_dev_mem_ptr(p, ptr);
+			return -ENOMEM;
+		}
+		memcpy(temp, ptr, sz);
+#else
+		temp = ptr;
+#endif
+
+		remaining = copy_to_user(buf, temp, sz);
+
+#ifdef CONFIG_PAX_USERCOPY
+		kfree(temp);
+#endif
+
 		unxlate_dev_mem_ptr(p, ptr);
 		if (remaining)
 			return -EFAULT;
@@ -364,9 +395,8 @@ static ssize_t read_kmem(struct file *fi
 			 size_t count, loff_t *ppos)
 {
 	unsigned long p = *ppos;
-	ssize_t low_count, read, sz;
+	ssize_t low_count, read, sz, err = 0;
 	char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
-	int err = 0;
 
 	read = 0;
 	if (p < (unsigned long) high_memory) {
@@ -388,6 +418,8 @@ static ssize_t read_kmem(struct file *fi
 		}
 #endif
 		while (low_count > 0) {
+			char *temp;
+
 			sz = size_inside_page(p, low_count);
 
 			/*
@@ -397,7 +429,22 @@ static ssize_t read_kmem(struct file *fi
 			 */
 			kbuf = xlate_dev_kmem_ptr((char *)p);
 
-			if (copy_to_user(buf, kbuf, sz))
+#ifdef CONFIG_PAX_USERCOPY
+			temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
+			if (!temp)
+				return -ENOMEM;
+			memcpy(temp, kbuf, sz);
+#else
+			temp = kbuf;
+#endif
+
+			err = copy_to_user(buf, temp, sz);
+
+#ifdef CONFIG_PAX_USERCOPY
+			kfree(temp);
+#endif
+
+			if (err)
 				return -EFAULT;
 			buf += sz;
 			p += sz;
@@ -822,6 +869,9 @@ static const struct memdev {
 #ifdef CONFIG_PRINTK
 	[11] = { "kmsg", 0644, &kmsg_fops, NULL },
 #endif
+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
+	[13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
+#endif
 };
 
 static int memory_open(struct inode *inode, struct file *filp)
@@ -893,7 +943,7 @@ static int __init chr_dev_init(void)
 			continue;
 
 		device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
-			      NULL, devlist[minor].name);
+			      NULL, "%s", devlist[minor].name);
 	}
 
 	return tty_init();
diff -ruNp linux-3.13.11/drivers/char/nvram.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/char/nvram.c
--- linux-3.13.11/drivers/char/nvram.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/char/nvram.c	2014-07-09 12:00:15.000000000
+0200
@@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *f
 
 	spin_unlock_irq(&rtc_lock);
 
-	if (copy_to_user(buf, contents, tmp - contents))
+	if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
 		return -EFAULT;
 
 	*ppos = i;
diff -ruNp linux-3.13.11/drivers/char/pcmcia/synclink_cs.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/char/pcmcia/synclink_cs.c
--- linux-3.13.11/drivers/char/pcmcia/synclink_cs.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/char/pcmcia/synclink_cs.c	2014-07-09
12:00:15.000000000 +0200
@@ -2345,9 +2345,9 @@ static void mgslpc_close(struct tty_stru
 
 	if (debug_level >= DEBUG_LEVEL_INFO)
 		printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
-			 __FILE__, __LINE__, info->device_name, port->count);
+			 __FILE__, __LINE__, info->device_name, atomic_read(&port->count));
 
-	WARN_ON(!port->count);
+	WARN_ON(!atomic_read(&port->count));
 
 	if (tty_port_close_start(port, tty, filp) == 0)
 		goto cleanup;
@@ -2365,7 +2365,7 @@ static void mgslpc_close(struct tty_stru
 cleanup:
 	if (debug_level >= DEBUG_LEVEL_INFO)
 		printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__, __LINE__,
-			tty->driver->name, port->count);
+			tty->driver->name, atomic_read(&port->count));
 }
 
 /* Wait until the transmitter is empty.
@@ -2507,7 +2507,7 @@ static int mgslpc_open(struct tty_struct
 
 	if (debug_level >= DEBUG_LEVEL_INFO)
 		printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
-			 __FILE__, __LINE__, tty->driver->name, port->count);
+			 __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
 
 	/* If port is closing, signal caller to try again */
 	if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING){
@@ -2527,11 +2527,11 @@ static int mgslpc_open(struct tty_struct
 		goto cleanup;
 	}
 	spin_lock(&port->lock);
-	port->count++;
+	atomic_inc(&port->count);
 	spin_unlock(&port->lock);
 	spin_unlock_irqrestore(&info->netlock, flags);
 
-	if (port->count == 1) {
+	if (atomic_read(&port->count) == 1) {
 		/* 1st open on this device, init hardware */
 		retval = startup(info, tty);
 		if (retval < 0)
@@ -3920,7 +3920,7 @@ static int hdlcdev_attach(struct net_dev
 	unsigned short new_crctype;
 
 	/* return error if TTY interface open */
-	if (info->port.count)
+	if (atomic_read(&info->port.count))
 		return -EBUSY;
 
 	switch (encoding)
@@ -4024,7 +4024,7 @@ static int hdlcdev_open(struct net_devic
 
 	/* arbitrate between network and tty opens */
 	spin_lock_irqsave(&info->netlock, flags);
-	if (info->port.count != 0 || info->netcount != 0) {
+	if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
 		printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
 		spin_unlock_irqrestore(&info->netlock, flags);
 		return -EBUSY;
@@ -4114,7 +4114,7 @@ static int hdlcdev_ioctl(struct net_devi
 		printk("%s:hdlcdev_ioctl(%s)\n", __FILE__, dev->name);
 
 	/* return error if TTY interface open */
-	if (info->port.count)
+	if (atomic_read(&info->port.count))
 		return -EBUSY;
 
 	if (cmd != SIOCWANDEV)
diff -ruNp linux-3.13.11/drivers/char/random.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/char/random.c
--- linux-3.13.11/drivers/char/random.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/char/random.c	2014-07-09
12:00:15.000000000 +0200
@@ -270,10 +270,17 @@
 /*
  * Configuration information
  */
+#ifdef CONFIG_GRKERNSEC_RANDNET
+#define INPUT_POOL_SHIFT	14
+#define INPUT_POOL_WORDS	(1 << (INPUT_POOL_SHIFT-5))
+#define OUTPUT_POOL_SHIFT	12
+#define OUTPUT_POOL_WORDS	(1 << (OUTPUT_POOL_SHIFT-5))
+#else
 #define INPUT_POOL_SHIFT	12
 #define INPUT_POOL_WORDS	(1 << (INPUT_POOL_SHIFT-5))
 #define OUTPUT_POOL_SHIFT	10
 #define OUTPUT_POOL_WORDS	(1 << (OUTPUT_POOL_SHIFT-5))
+#endif
 #define SEC_XFER_SIZE		512
 #define EXTRACT_SIZE		10
 
@@ -284,9 +291,6 @@
 /*
  * To allow fractional bits to be tracked, the entropy_count field is
  * denominated in units of 1/8th bits.
- *
- * 2*(ENTROPY_SHIFT + log2(poolbits)) must <= 31, or the multiply in
- * credit_entropy_bits() needs to be 64 bits wide.
  */
 #define ENTROPY_SHIFT 3
 #define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
@@ -361,12 +365,19 @@ static struct poolinfo {
 #define S(x) ilog2(x)+5, (x), (x)*4, (x)*32, (x) << (ENTROPY_SHIFT+5)
 	int tap1, tap2, tap3, tap4, tap5;
 } poolinfo_table[] = {
+#ifdef CONFIG_GRKERNSEC_RANDNET
+	/* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
+	{ S(512),	411,	308,	208,	104,	1 },
+	/* x^128 + x^104 + x^76 + x^51 + x^25 + x + 1 -- 105 */
+	{ S(128),	104,	76,	51,	25,	1 },
+#else
 	/* was: x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 */
 	/* x^128 + x^104 + x^76 + x^51 +x^25 + x + 1 */
 	{ S(128),	104,	76,	51,	25,	1 },
 	/* was: x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 */
 	/* x^32 + x^26 + x^19 + x^14 + x^7 + x + 1 */
 	{ S(32),	26,	19,	14,	7,	1 },
+#endif
 #if 0
 	/* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1  -- 115 */
 	{ S(2048),	1638,	1231,	819,	411,	1 },
@@ -433,9 +444,9 @@ struct entropy_store {
 };
 
 static void push_to_pool(struct work_struct *work);
-static __u32 input_pool_data[INPUT_POOL_WORDS];
-static __u32 blocking_pool_data[OUTPUT_POOL_WORDS];
-static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS];
+static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
+static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
+static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
 
 static struct entropy_store input_pool = {
 	.poolinfo = &poolinfo_table[0],
@@ -524,8 +535,8 @@ static void _mix_pool_bytes(struct entro
 		input_rotate = (input_rotate + (i ? 7 : 14)) & 31;
 	}
 
-	ACCESS_ONCE(r->input_rotate) = input_rotate;
-	ACCESS_ONCE(r->add_ptr) = i;
+	ACCESS_ONCE_RW(r->input_rotate) = input_rotate;
+	ACCESS_ONCE_RW(r->add_ptr) = i;
 	smp_wmb();
 
 	if (out)
@@ -632,7 +643,7 @@ retry:
 		/* The +2 corresponds to the /4 in the denominator */
 
 		do {
-			unsigned int anfrac = min(pnfrac, pool_size/2);
+			u64 anfrac = min(pnfrac, pool_size/2);
 			unsigned int add =
 				((pool_size - entropy_count)*anfrac*3) >> s;
 
@@ -1151,7 +1162,7 @@ static ssize_t extract_entropy_user(stru
 
 		extract_buf(r, tmp);
 		i = min_t(int, nbytes, EXTRACT_SIZE);
-		if (copy_to_user(buf, tmp, i)) {
+		if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
 			ret = -EFAULT;
 			break;
 		}
@@ -1507,7 +1518,7 @@ EXPORT_SYMBOL(generate_random_uuid);
 #include <linux/sysctl.h>
 
 static int min_read_thresh = 8, min_write_thresh;
-static int max_read_thresh = INPUT_POOL_WORDS * 32;
+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
 static int max_write_thresh = INPUT_POOL_WORDS * 32;
 static char sysctl_bootid[16];
 
@@ -1523,7 +1534,7 @@ static char sysctl_bootid[16];
 static int proc_do_uuid(struct ctl_table *table, int write,
 			void __user *buffer, size_t *lenp, loff_t *ppos)
 {
-	struct ctl_table fake_table;
+	ctl_table_no_const fake_table;
 	unsigned char buf[64], tmp_uuid[16], *uuid;
 
 	uuid = table->data;
@@ -1553,7 +1564,7 @@ static int proc_do_uuid(struct ctl_table
 static int proc_do_entropy(ctl_table *table, int write,
 			   void __user *buffer, size_t *lenp, loff_t *ppos)
 {
-	ctl_table fake_table;
+	ctl_table_no_const fake_table;
 	int entropy_count;
 
 	entropy_count = *(int *)table->data >> ENTROPY_SHIFT;
diff -ruNp linux-3.13.11/drivers/char/sonypi.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/char/sonypi.c
--- linux-3.13.11/drivers/char/sonypi.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/char/sonypi.c	2014-07-09
12:00:15.000000000 +0200
@@ -54,6 +54,7 @@
 
 #include <asm/uaccess.h>
 #include <asm/io.h>
+#include <asm/local.h>
 
 #include <linux/sonypi.h>
 
@@ -490,7 +491,7 @@ static struct sonypi_device {
 	spinlock_t fifo_lock;
 	wait_queue_head_t fifo_proc_list;
 	struct fasync_struct *fifo_async;
-	int open_count;
+	local_t open_count;
 	int model;
 	struct input_dev *input_jog_dev;
 	struct input_dev *input_key_dev;
@@ -892,7 +893,7 @@ static int sonypi_misc_fasync(int fd, st
 static int sonypi_misc_release(struct inode *inode, struct file *file)
 {
 	mutex_lock(&sonypi_device.lock);
-	sonypi_device.open_count--;
+	local_dec(&sonypi_device.open_count);
 	mutex_unlock(&sonypi_device.lock);
 	return 0;
 }
@@ -901,9 +902,9 @@ static int sonypi_misc_open(struct inode
 {
 	mutex_lock(&sonypi_device.lock);
 	/* Flush input queue on first open */
-	if (!sonypi_device.open_count)
+	if (!local_read(&sonypi_device.open_count))
 		kfifo_reset(&sonypi_device.fifo);
-	sonypi_device.open_count++;
+	local_inc(&sonypi_device.open_count);
 	mutex_unlock(&sonypi_device.lock);
 
 	return 0;
diff -ruNp linux-3.13.11/drivers/char/tpm/tpm_acpi.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/char/tpm/tpm_acpi.c
--- linux-3.13.11/drivers/char/tpm/tpm_acpi.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/char/tpm/tpm_acpi.c	2014-07-09
12:00:15.000000000 +0200
@@ -98,11 +98,12 @@ int read_log(struct tpm_bios_log *log)
 	virt = acpi_os_map_memory(start, len);
 	if (!virt) {
 		kfree(log->bios_event_log);
+		log->bios_event_log = NULL;
 		printk("%s: ERROR - Unable to map memory\n", __func__);
 		return -EIO;
 	}
 
-	memcpy_fromio(log->bios_event_log, virt, len);
+	memcpy_fromio(log->bios_event_log, (const char __force_kernel *)virt, len);
 
 	acpi_os_unmap_memory(virt, len);
 	return 0;
diff -ruNp linux-3.13.11/drivers/char/tpm/tpm_eventlog.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/char/tpm/tpm_eventlog.c
--- linux-3.13.11/drivers/char/tpm/tpm_eventlog.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/char/tpm/tpm_eventlog.c	2014-07-09
12:00:15.000000000 +0200
@@ -95,7 +95,7 @@ static void *tpm_bios_measurements_start
 	event = addr;
 
 	if ((event->event_type == 0 && event->event_size == 0) ||
-	    ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
+	    (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
 		return NULL;
 
 	return addr;
@@ -120,7 +120,7 @@ static void *tpm_bios_measurements_next(
 		return NULL;
 
 	if ((event->event_type == 0 && event->event_size == 0) ||
-	    ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
+	    (event->event_size >= limit - v - sizeof(struct tcpa_event)))
 		return NULL;
 
 	(*pos)++;
@@ -213,7 +213,8 @@ static int tpm_binary_bios_measurements_
 	int i;
 
 	for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
-		seq_putc(m, data[i]);
+		if (!seq_putc(m, data[i]))
+			return -EFAULT;
 
 	return 0;
 }
diff -ruNp linux-3.13.11/drivers/char/virtio_console.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/char/virtio_console.c
--- linux-3.13.11/drivers/char/virtio_console.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/char/virtio_console.c	2014-07-09
12:00:15.000000000 +0200
@@ -684,7 +684,7 @@ static ssize_t fill_readbuf(struct port
 	if (to_user) {
 		ssize_t ret;
 
-		ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
+		ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
 		if (ret)
 			return -EFAULT;
 	} else {
@@ -787,7 +787,7 @@ static ssize_t port_fops_read(struct fil
 	if (!port_has_data(port) && !port->host_connected)
 		return 0;
 
-	return fill_readbuf(port, ubuf, count, true);
+	return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
 }
 
 static int wait_port_writable(struct port *port, bool nonblock)
diff -ruNp linux-3.13.11/drivers/clk/clk-composite.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/clk/clk-composite.c
--- linux-3.13.11/drivers/clk/clk-composite.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/clk/clk-composite.c	2014-07-09
12:00:15.000000000 +0200
@@ -122,7 +122,7 @@ struct clk *clk_register_composite(struc
 	struct clk *clk;
 	struct clk_init_data init;
 	struct clk_composite *composite;
-	struct clk_ops *clk_composite_ops;
+	clk_ops_no_const *clk_composite_ops;
 
 	composite = kzalloc(sizeof(*composite), GFP_KERNEL);
 	if (!composite) {
diff -ruNp linux-3.13.11/drivers/clk/socfpga/clk.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/clk/socfpga/clk.c
--- linux-3.13.11/drivers/clk/socfpga/clk.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/clk/socfpga/clk.c	2014-07-09
12:00:15.000000000 +0200
@@ -22,6 +22,7 @@
 #include <linux/clk-provider.h>
 #include <linux/io.h>
 #include <linux/of.h>
+#include <asm/pgtable.h>
 
 /* Clock Manager offsets */
 #define CLKMGR_CTRL	0x0
@@ -152,8 +153,10 @@ static __init struct clk *socfpga_clk_in
 		streq(clk_name, "periph_pll") ||
 		streq(clk_name, "sdram_pll")) {
 		socfpga_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA;
-		clk_pll_ops.enable = clk_gate_ops.enable;
-		clk_pll_ops.disable = clk_gate_ops.disable;
+		pax_open_kernel();
+		*(void **)&clk_pll_ops.enable = clk_gate_ops.enable;
+		*(void **)&clk_pll_ops.disable = clk_gate_ops.disable;
+		pax_close_kernel();
 	}
 
 	clk = clk_register(NULL, &socfpga_clk->hw.hw);
@@ -244,7 +247,7 @@ static unsigned long socfpga_clk_recalc_
 	return parent_rate / div;
 }
 
-static struct clk_ops gateclk_ops = {
+static clk_ops_no_const gateclk_ops __read_only = {
 	.recalc_rate = socfpga_clk_recalc_rate,
 	.get_parent = socfpga_clk_get_parent,
 	.set_parent = socfpga_clk_set_parent,
diff -ruNp linux-3.13.11/drivers/cpufreq/acpi-cpufreq.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/cpufreq/acpi-cpufreq.c
--- linux-3.13.11/drivers/cpufreq/acpi-cpufreq.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/cpufreq/acpi-cpufreq.c	2014-07-09
12:00:15.000000000 +0200
@@ -172,7 +172,7 @@ static ssize_t show_global_boost(struct
 	return sprintf(buf, "%u\n", boost_enabled);
 }
 
-static struct global_attr global_boost = __ATTR(boost, 0644,
+static global_attr_no_const global_boost = __ATTR(boost, 0644,
 						show_global_boost,
 						store_global_boost);
 
@@ -693,8 +693,11 @@ static int acpi_cpufreq_cpu_init(struct
 	data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
 	per_cpu(acfreq_data, cpu) = data;
 
-	if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
-		acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
+	if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
+		pax_open_kernel();
+		*(u8 *)&acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
+		pax_close_kernel();
+	}
 
 	result = acpi_processor_register_performance(data->acpi_data, cpu);
 	if (result)
@@ -827,7 +830,9 @@ static int acpi_cpufreq_cpu_init(struct
 		policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
 		break;
 	case ACPI_ADR_SPACE_FIXED_HARDWARE:
-		acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
+		pax_open_kernel();
+		*(void **)&acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
+		pax_close_kernel();
 		break;
 	default:
 		break;
diff -ruNp linux-3.13.11/drivers/cpufreq/cpufreq.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/cpufreq/cpufreq.c
--- linux-3.13.11/drivers/cpufreq/cpufreq.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/cpufreq/cpufreq.c	2014-07-09
12:00:15.000000000 +0200
@@ -1878,7 +1878,7 @@ void cpufreq_unregister_governor(struct
 #endif
 
 	mutex_lock(&cpufreq_governor_mutex);
-	list_del(&governor->governor_list);
+	pax_list_del(&governor->governor_list);
 	mutex_unlock(&cpufreq_governor_mutex);
 	return;
 }
@@ -2108,7 +2108,7 @@ static int cpufreq_cpu_callback(struct n
 	return NOTIFY_OK;
 }
 
-static struct notifier_block __refdata cpufreq_cpu_notifier = {
+static struct notifier_block cpufreq_cpu_notifier = {
 	.notifier_call = cpufreq_cpu_callback,
 };
 
@@ -2141,8 +2141,11 @@ int cpufreq_register_driver(struct cpufr
 
 	pr_debug("trying to register driver %s\n", driver_data->name);
 
-	if (driver_data->setpolicy)
-		driver_data->flags |= CPUFREQ_CONST_LOOPS;
+	if (driver_data->setpolicy) {
+		pax_open_kernel();
+		*(u8 *)&driver_data->flags |= CPUFREQ_CONST_LOOPS;
+		pax_close_kernel();
+	}
 
 	write_lock_irqsave(&cpufreq_driver_lock, flags);
 	if (cpufreq_driver) {
diff -ruNp linux-3.13.11/drivers/cpufreq/cpufreq_governor.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/cpufreq/cpufreq_governor.c
--- linux-3.13.11/drivers/cpufreq/cpufreq_governor.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/cpufreq/cpufreq_governor.c	2014-07-09
12:00:15.000000000 +0200
@@ -187,7 +187,7 @@ int cpufreq_governor_dbs(struct cpufreq_
 	struct dbs_data *dbs_data;
 	struct od_cpu_dbs_info_s *od_dbs_info = NULL;
 	struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
-	struct od_ops *od_ops = NULL;
+	const struct od_ops *od_ops = NULL;
 	struct od_dbs_tuners *od_tuners = NULL;
 	struct cs_dbs_tuners *cs_tuners = NULL;
 	struct cpu_dbs_common_info *cpu_cdbs;
@@ -253,7 +253,7 @@ int cpufreq_governor_dbs(struct cpufreq_
 
 		if ((cdata->governor == GOV_CONSERVATIVE) &&
 				(!policy->governor->initialized)) {
-			struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
+			const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
 
 			cpufreq_register_notifier(cs_ops->notifier_block,
 					CPUFREQ_TRANSITION_NOTIFIER);
@@ -273,7 +273,7 @@ int cpufreq_governor_dbs(struct cpufreq_
 
 			if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
 				(policy->governor->initialized == 1)) {
-				struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
+				const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
 
 				cpufreq_unregister_notifier(cs_ops->notifier_block,
 						CPUFREQ_TRANSITION_NOTIFIER);
diff -ruNp linux-3.13.11/drivers/cpufreq/cpufreq_governor.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/cpufreq/cpufreq_governor.h
--- linux-3.13.11/drivers/cpufreq/cpufreq_governor.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/cpufreq/cpufreq_governor.h	2014-07-09
12:00:15.000000000 +0200
@@ -205,7 +205,7 @@ struct common_dbs_data {
 	void (*exit)(struct dbs_data *dbs_data);
 
 	/* Governor specific ops, see below */
-	void *gov_ops;
+	const void *gov_ops;
 };
 
 /* Governor Per policy data */
@@ -225,7 +225,7 @@ struct od_ops {
 	unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
 			unsigned int freq_next, unsigned int relation);
 	void (*freq_increase)(struct cpufreq_policy *policy, unsigned int freq);
-};
+} __no_const;
 
 struct cs_ops {
 	struct notifier_block *notifier_block;
diff -ruNp linux-3.13.11/drivers/cpufreq/cpufreq_ondemand.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/cpufreq/cpufreq_ondemand.c
--- linux-3.13.11/drivers/cpufreq/cpufreq_ondemand.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/cpufreq/cpufreq_ondemand.c	2014-07-09
12:00:15.000000000 +0200
@@ -521,7 +521,7 @@ static void od_exit(struct dbs_data *dbs
 
 define_get_cpu_dbs_routines(od_cpu_dbs_info);
 
-static struct od_ops od_ops = {
+static struct od_ops od_ops __read_only = {
 	.powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu,
 	.powersave_bias_target = generic_powersave_bias_target,
 	.freq_increase = dbs_freq_increase,
@@ -576,14 +576,18 @@ void od_register_powersave_bias_handler(
 		(struct cpufreq_policy *, unsigned int, unsigned int),
 		unsigned int powersave_bias)
 {
-	od_ops.powersave_bias_target = f;
+	pax_open_kernel();
+	*(void **)&od_ops.powersave_bias_target = f;
+	pax_close_kernel();
 	od_set_powersave_bias(powersave_bias);
 }
 EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler);
 
 void od_unregister_powersave_bias_handler(void)
 {
-	od_ops.powersave_bias_target = generic_powersave_bias_target;
+	pax_open_kernel();
+	*(void **)&od_ops.powersave_bias_target = generic_powersave_bias_target;
+	pax_close_kernel();
 	od_set_powersave_bias(0);
 }
 EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
diff -ruNp linux-3.13.11/drivers/cpufreq/cpufreq_stats.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/cpufreq/cpufreq_stats.c
--- linux-3.13.11/drivers/cpufreq/cpufreq_stats.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/cpufreq/cpufreq_stats.c	2014-07-09
12:00:15.000000000 +0200
@@ -352,7 +352,7 @@ static int cpufreq_stat_cpu_callback(str
 }
 
 /* priority=1 so this will get called before cpufreq_remove_dev */
-static struct notifier_block cpufreq_stat_cpu_notifier __refdata = {
+static struct notifier_block cpufreq_stat_cpu_notifier = {
 	.notifier_call = cpufreq_stat_cpu_callback,
 	.priority = 1,
 };
diff -ruNp linux-3.13.11/drivers/cpufreq/intel_pstate.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/cpufreq/intel_pstate.c
--- linux-3.13.11/drivers/cpufreq/intel_pstate.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/cpufreq/intel_pstate.c	2014-07-09
12:00:15.000000000 +0200
@@ -123,10 +123,10 @@ struct pstate_funcs {
 struct cpu_defaults {
 	struct pstate_adjust_policy pid_policy;
 	struct pstate_funcs funcs;
-};
+} __do_const;
 
 static struct pstate_adjust_policy pid_params;
-static struct pstate_funcs pstate_funcs;
+static struct pstate_funcs *pstate_funcs;
 
 struct perf_limits {
 	int no_turbo;
@@ -517,7 +517,7 @@ static void intel_pstate_set_pstate(stru
 
 	cpu->pstate.current_pstate = pstate;
 
-	pstate_funcs.set(cpu, pstate);
+	pstate_funcs->set(cpu, pstate);
 }
 
 static inline void intel_pstate_pstate_increase(struct cpudata *cpu, int steps)
@@ -539,12 +539,12 @@ static void intel_pstate_get_cpu_pstates
 {
 	sprintf(cpu->name, "Intel 2nd generation core");
 
-	cpu->pstate.min_pstate = pstate_funcs.get_min();
-	cpu->pstate.max_pstate = pstate_funcs.get_max();
-	cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
+	cpu->pstate.min_pstate = pstate_funcs->get_min();
+	cpu->pstate.max_pstate = pstate_funcs->get_max();
+	cpu->pstate.turbo_pstate = pstate_funcs->get_turbo();
 
-	if (pstate_funcs.get_vid)
-		pstate_funcs.get_vid(cpu);
+	if (pstate_funcs->get_vid)
+		pstate_funcs->get_vid(cpu);
 
 	/*
 	 * goto max pstate so we don't slow up boot if we are built-in if we are
@@ -808,9 +808,9 @@ static int intel_pstate_msrs_not_valid(v
 	rdmsrl(MSR_IA32_APERF, aperf);
 	rdmsrl(MSR_IA32_MPERF, mperf);
 
-	if (!pstate_funcs.get_max() ||
-		!pstate_funcs.get_min() ||
-		!pstate_funcs.get_turbo())
+	if (!pstate_funcs->get_max() ||
+		!pstate_funcs->get_min() ||
+		!pstate_funcs->get_turbo())
 		return -ENODEV;
 
 	rdmsrl(MSR_IA32_APERF, tmp);
@@ -824,7 +824,7 @@ static int intel_pstate_msrs_not_valid(v
 	return 0;
 }
 
-static void copy_pid_params(struct pstate_adjust_policy *policy)
+static void copy_pid_params(const struct pstate_adjust_policy *policy)
 {
 	pid_params.sample_rate_ms = policy->sample_rate_ms;
 	pid_params.p_gain_pct = policy->p_gain_pct;
@@ -836,11 +836,7 @@ static void copy_pid_params(struct pstat
 
 static void copy_cpu_funcs(struct pstate_funcs *funcs)
 {
-	pstate_funcs.get_max   = funcs->get_max;
-	pstate_funcs.get_min   = funcs->get_min;
-	pstate_funcs.get_turbo = funcs->get_turbo;
-	pstate_funcs.set       = funcs->set;
-	pstate_funcs.get_vid   = funcs->get_vid;
+	pstate_funcs = funcs;
 }
 
 #if IS_ENABLED(CONFIG_ACPI)
diff -ruNp linux-3.13.11/drivers/cpufreq/p4-clockmod.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/cpufreq/p4-clockmod.c
--- linux-3.13.11/drivers/cpufreq/p4-clockmod.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/cpufreq/p4-clockmod.c	2014-07-09
12:00:15.000000000 +0200
@@ -134,10 +134,14 @@ static unsigned int cpufreq_p4_get_frequ
 		case 0x0F: /* Core Duo */
 		case 0x16: /* Celeron Core */
 		case 0x1C: /* Atom */
-			p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
+			pax_open_kernel();
+			*(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
+			pax_close_kernel();
 			return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
 		case 0x0D: /* Pentium M (Dothan) */
-			p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
+			pax_open_kernel();
+			*(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
+			pax_close_kernel();
 			/* fall through */
 		case 0x09: /* Pentium M (Banias) */
 			return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
@@ -149,7 +153,9 @@ static unsigned int cpufreq_p4_get_frequ
 
 	/* on P-4s, the TSC runs with constant frequency independent whether
 	 * throttling is active or not. */
-	p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
+	pax_open_kernel();
+	*(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
+	pax_close_kernel();
 
 	if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
 		printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
diff -ruNp linux-3.13.11/drivers/cpufreq/sparc-us3-cpufreq.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/cpufreq/sparc-us3-cpufreq.c
--- linux-3.13.11/drivers/cpufreq/sparc-us3-cpufreq.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/cpufreq/sparc-us3-cpufreq.c	2014-07-09
12:00:15.000000000 +0200
@@ -18,14 +18,12 @@
 #include <asm/head.h>
 #include <asm/timer.h>
 
-static struct cpufreq_driver *cpufreq_us3_driver;
-
 struct us3_freq_percpu_info {
 	struct cpufreq_frequency_table table[4];
 };
 
 /* Indexed by cpu number. */
-static struct us3_freq_percpu_info *us3_freq_table;
+static struct us3_freq_percpu_info us3_freq_table[NR_CPUS];
 
 /* UltraSPARC-III has three dividers: 1, 2, and 32.  These are controlled
  * in the Safari config register.
@@ -156,14 +154,26 @@ static int __init us3_freq_cpu_init(stru
 
 static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
 {
-	if (cpufreq_us3_driver) {
-		cpufreq_frequency_table_put_attr(policy->cpu);
-		us3_freq_target(policy, 0);
-	}
+	cpufreq_frequency_table_put_attr(policy->cpu);
+	us3_freq_target(policy, 0);
 
 	return 0;
 }
 
+static int __init us3_freq_init(void);
+static void __exit us3_freq_exit(void);
+
+static struct cpufreq_driver cpufreq_us3_driver = {
+	.init		= us3_freq_cpu_init,
+	.verify		= cpufreq_generic_frequency_table_verify,
+	.target_index	= us3_freq_target,
+	.get		= us3_freq_get,
+	.exit		= us3_freq_cpu_exit,
+	.owner		= THIS_MODULE,
+	.name		= "UltraSPARC-III",
+
+};
+
 static int __init us3_freq_init(void)
 {
 	unsigned long manuf, impl, ver;
@@ -180,55 +190,15 @@ static int __init us3_freq_init(void)
 	    (impl == CHEETAH_IMPL ||
 	     impl == CHEETAH_PLUS_IMPL ||
 	     impl == JAGUAR_IMPL ||
-	     impl == PANTHER_IMPL)) {
-		struct cpufreq_driver *driver;
-
-		ret = -ENOMEM;
-		driver = kzalloc(sizeof(*driver), GFP_KERNEL);
-		if (!driver)
-			goto err_out;
-
-		us3_freq_table = kzalloc((NR_CPUS * sizeof(*us3_freq_table)),
-			GFP_KERNEL);
-		if (!us3_freq_table)
-			goto err_out;
-
-		driver->init = us3_freq_cpu_init;
-		driver->verify = cpufreq_generic_frequency_table_verify;
-		driver->target_index = us3_freq_target;
-		driver->get = us3_freq_get;
-		driver->exit = us3_freq_cpu_exit;
-		strcpy(driver->name, "UltraSPARC-III");
-
-		cpufreq_us3_driver = driver;
-		ret = cpufreq_register_driver(driver);
-		if (ret)
-			goto err_out;
-
-		return 0;
-
-err_out:
-		if (driver) {
-			kfree(driver);
-			cpufreq_us3_driver = NULL;
-		}
-		kfree(us3_freq_table);
-		us3_freq_table = NULL;
-		return ret;
-	}
+	     impl == PANTHER_IMPL))
+		return cpufreq_register_driver(&cpufreq_us3_driver);
 
 	return -ENODEV;
 }
 
 static void __exit us3_freq_exit(void)
 {
-	if (cpufreq_us3_driver) {
-		cpufreq_unregister_driver(cpufreq_us3_driver);
-		kfree(cpufreq_us3_driver);
-		cpufreq_us3_driver = NULL;
-		kfree(us3_freq_table);
-		us3_freq_table = NULL;
-	}
+	cpufreq_unregister_driver(&cpufreq_us3_driver);
 }
 
 MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
diff -ruNp linux-3.13.11/drivers/cpufreq/speedstep-centrino.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/cpufreq/speedstep-centrino.c
--- linux-3.13.11/drivers/cpufreq/speedstep-centrino.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/cpufreq/speedstep-centrino.c	2014-07-09
12:00:15.000000000 +0200
@@ -351,8 +351,11 @@ static int centrino_cpu_init(struct cpuf
 	    !cpu_has(cpu, X86_FEATURE_EST))
 		return -ENODEV;
 
-	if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
-		centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
+	if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) {
+		pax_open_kernel();
+		*(u8 *)&centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
+		pax_close_kernel();
+	}
 
 	if (policy->cpu != 0)
 		return -ENODEV;
diff -ruNp linux-3.13.11/drivers/cpuidle/driver.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/cpuidle/driver.c
--- linux-3.13.11/drivers/cpuidle/driver.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/cpuidle/driver.c	2014-07-09
12:00:15.000000000 +0200
@@ -202,7 +202,7 @@ static int poll_idle(struct cpuidle_devi
 
 static void poll_idle_init(struct cpuidle_driver *drv)
 {
-	struct cpuidle_state *state = &drv->states[0];
+	cpuidle_state_no_const *state = &drv->states[0];
 
 	snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
 	snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
diff -ruNp linux-3.13.11/drivers/cpuidle/governor.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/cpuidle/governor.c
--- linux-3.13.11/drivers/cpuidle/governor.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/cpuidle/governor.c	2014-07-09
12:00:15.000000000 +0200
@@ -87,7 +87,7 @@ int cpuidle_register_governor(struct cpu
 	mutex_lock(&cpuidle_lock);
 	if (__cpuidle_find_governor(gov->name) == NULL) {
 		ret = 0;
-		list_add_tail(&gov->governor_list, &cpuidle_governors);
+		pax_list_add_tail((struct list_head *)&gov->governor_list, &cpuidle_governors);
 		if (!cpuidle_curr_governor ||
 		    cpuidle_curr_governor->rating < gov->rating)
 			cpuidle_switch_governor(gov);
diff -ruNp linux-3.13.11/drivers/cpuidle/sysfs.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/cpuidle/sysfs.c
--- linux-3.13.11/drivers/cpuidle/sysfs.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/cpuidle/sysfs.c	2014-07-09
12:00:15.000000000 +0200
@@ -135,7 +135,7 @@ static struct attribute *cpuidle_switch_
 	NULL
 };
 
-static struct attribute_group cpuidle_attr_group = {
+static attribute_group_no_const cpuidle_attr_group = {
 	.attrs = cpuidle_default_attrs,
 	.name = "cpuidle",
 };
diff -ruNp linux-3.13.11/drivers/crypto/hifn_795x.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/crypto/hifn_795x.c
--- linux-3.13.11/drivers/crypto/hifn_795x.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/crypto/hifn_795x.c	2014-07-09
12:00:15.000000000 +0200
@@ -51,7 +51,7 @@ module_param_string(hifn_pll_ref, hifn_p
 MODULE_PARM_DESC(hifn_pll_ref,
 		 "PLL reference clock (pci[freq] or ext[freq], default ext)");
 
-static atomic_t hifn_dev_number;
+static atomic_unchecked_t hifn_dev_number;
 
 #define ACRYPTO_OP_DECRYPT	0
 #define ACRYPTO_OP_ENCRYPT	1
@@ -2577,7 +2577,7 @@ static int hifn_probe(struct pci_dev *pd
 		goto err_out_disable_pci_device;
 
 	snprintf(name, sizeof(name), "hifn%d",
-			atomic_inc_return(&hifn_dev_number)-1);
+			atomic_inc_return_unchecked(&hifn_dev_number)-1);
 
 	err = pci_request_regions(pdev, name);
 	if (err)
diff -ruNp linux-3.13.11/drivers/devfreq/devfreq.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/devfreq/devfreq.c
--- linux-3.13.11/drivers/devfreq/devfreq.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/devfreq/devfreq.c	2014-07-09
12:00:15.000000000 +0200
@@ -607,7 +607,7 @@ int devfreq_add_governor(struct devfreq_
 		goto err_out;
 	}
 
-	list_add(&governor->node, &devfreq_governor_list);
+	pax_list_add((struct list_head *)&governor->node, &devfreq_governor_list);
 
 	list_for_each_entry(devfreq, &devfreq_list, node) {
 		int ret = 0;
@@ -695,7 +695,7 @@ int devfreq_remove_governor(struct devfr
 		}
 	}
 
-	list_del(&governor->node);
+	pax_list_del((struct list_head *)&governor->node);
 err_out:
 	mutex_unlock(&devfreq_list_lock);
 
diff -ruNp linux-3.13.11/drivers/dma/sh/shdmac.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/dma/sh/shdmac.c
--- linux-3.13.11/drivers/dma/sh/shdmac.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/dma/sh/shdmac.c	2014-07-09
12:00:15.000000000 +0200
@@ -511,7 +511,7 @@ static int sh_dmae_nmi_handler(struct no
 	return ret;
 }
 
-static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
+static struct notifier_block sh_dmae_nmi_notifier = {
 	.notifier_call	= sh_dmae_nmi_handler,
 
 	/* Run before NMI debug handler and KGDB */
diff -ruNp linux-3.13.11/drivers/edac/edac_device.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/edac/edac_device.c
--- linux-3.13.11/drivers/edac/edac_device.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/edac/edac_device.c	2014-07-09
12:00:15.000000000 +0200
@@ -474,9 +474,9 @@ void edac_device_reset_delay_period(stru
  */
 int edac_device_alloc_index(void)
 {
-	static atomic_t device_indexes = ATOMIC_INIT(0);
+	static atomic_unchecked_t device_indexes = ATOMIC_INIT(0);
 
-	return atomic_inc_return(&device_indexes) - 1;
+	return atomic_inc_return_unchecked(&device_indexes) - 1;
 }
 EXPORT_SYMBOL_GPL(edac_device_alloc_index);
 
diff -ruNp linux-3.13.11/drivers/edac/edac_mc_sysfs.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/edac/edac_mc_sysfs.c
--- linux-3.13.11/drivers/edac/edac_mc_sysfs.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/edac/edac_mc_sysfs.c	2014-07-09
12:00:15.000000000 +0200
@@ -152,7 +152,7 @@ static const char * const edac_caps[] =
 struct dev_ch_attribute {
 	struct device_attribute attr;
 	int channel;
-};
+} __do_const;
 
 #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
 	struct dev_ch_attribute dev_attr_legacy_##_name = \
@@ -1009,14 +1009,16 @@ int edac_create_sysfs_mci_device(struct
 	}
 
 	if (mci->set_sdram_scrub_rate || mci->get_sdram_scrub_rate) {
+		pax_open_kernel();
 		if (mci->get_sdram_scrub_rate) {
-			dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
-			dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
+			*(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
+			*(void **)&dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
 		}
 		if (mci->set_sdram_scrub_rate) {
-			dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
-			dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
+			*(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
+			*(void **)&dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
 		}
+		pax_close_kernel();
 		err = device_create_file(&mci->dev,
 					 &dev_attr_sdram_scrub_rate);
 		if (err) {
diff -ruNp linux-3.13.11/drivers/edac/edac_pci.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/edac/edac_pci.c
--- linux-3.13.11/drivers/edac/edac_pci.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/edac/edac_pci.c	2014-07-09
12:00:15.000000000 +0200
@@ -29,7 +29,7 @@
 
 static DEFINE_MUTEX(edac_pci_ctls_mutex);
 static LIST_HEAD(edac_pci_list);
-static atomic_t pci_indexes = ATOMIC_INIT(0);
+static atomic_unchecked_t pci_indexes = ATOMIC_INIT(0);
 
 /*
  * edac_pci_alloc_ctl_info
@@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(edac_pci_reset_delay_p
  */
 int edac_pci_alloc_index(void)
 {
-	return atomic_inc_return(&pci_indexes) - 1;
+	return atomic_inc_return_unchecked(&pci_indexes) - 1;
 }
 EXPORT_SYMBOL_GPL(edac_pci_alloc_index);
 
diff -ruNp linux-3.13.11/drivers/edac/edac_pci_sysfs.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/edac/edac_pci_sysfs.c
--- linux-3.13.11/drivers/edac/edac_pci_sysfs.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/edac/edac_pci_sysfs.c	2014-07-09
12:00:15.000000000 +0200
@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1;		/* log
 static int edac_pci_log_npe = 1;	/* log PCI non-parity error errors */
 static int edac_pci_poll_msec = 1000;	/* one second workq period */
 
-static atomic_t pci_parity_count = ATOMIC_INIT(0);
-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
 
 static struct kobject *edac_pci_top_main_kobj;
 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
@@ -235,7 +235,7 @@ struct edac_pci_dev_attribute {
 	void *value;
 	 ssize_t(*show) (void *, char *);
 	 ssize_t(*store) (void *, const char *, size_t);
-};
+} __do_const;
 
 /* Set of show/store abstract level functions for PCI Parity object */
 static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
@@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(str
 			edac_printk(KERN_CRIT, EDAC_PCI,
 				"Signaled System Error on %s\n",
 				pci_name(dev));
-			atomic_inc(&pci_nonparity_count);
+			atomic_inc_unchecked(&pci_nonparity_count);
 		}
 
 		if (status & (PCI_STATUS_PARITY)) {
@@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(str
 				"Master Data Parity Error on %s\n",
 				pci_name(dev));
 
-			atomic_inc(&pci_parity_count);
+			atomic_inc_unchecked(&pci_parity_count);
 		}
 
 		if (status & (PCI_STATUS_DETECTED_PARITY)) {
@@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(str
 				"Detected Parity Error on %s\n",
 				pci_name(dev));
 
-			atomic_inc(&pci_parity_count);
+			atomic_inc_unchecked(&pci_parity_count);
 		}
 	}
 
@@ -618,7 +618,7 @@ static void edac_pci_dev_parity_test(str
 				edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
 					"Signaled System Error on %s\n",
 					pci_name(dev));
-				atomic_inc(&pci_nonparity_count);
+				atomic_inc_unchecked(&pci_nonparity_count);
 			}
 
 			if (status & (PCI_STATUS_PARITY)) {
@@ -626,7 +626,7 @@ static void edac_pci_dev_parity_test(str
 					"Master Data Parity Error on "
 					"%s\n", pci_name(dev));
 
-				atomic_inc(&pci_parity_count);
+				atomic_inc_unchecked(&pci_parity_count);
 			}
 
 			if (status & (PCI_STATUS_DETECTED_PARITY)) {
@@ -634,7 +634,7 @@ static void edac_pci_dev_parity_test(str
 					"Detected Parity Error on %s\n",
 					pci_name(dev));
 
-				atomic_inc(&pci_parity_count);
+				atomic_inc_unchecked(&pci_parity_count);
 			}
 		}
 	}
@@ -672,7 +672,7 @@ void edac_pci_do_parity_check(void)
 	if (!check_pci_errors)
 		return;
 
-	before_count = atomic_read(&pci_parity_count);
+	before_count = atomic_read_unchecked(&pci_parity_count);
 
 	/* scan all PCI devices looking for a Parity Error on devices and
 	 * bridges.
@@ -684,7 +684,7 @@ void edac_pci_do_parity_check(void)
 	/* Only if operator has selected panic on PCI Error */
 	if (edac_pci_get_panic_on_pe()) {
 		/* If the count is different 'after' from 'before' */
-		if (before_count != atomic_read(&pci_parity_count))
+		if (before_count != atomic_read_unchecked(&pci_parity_count))
 			panic("EDAC: PCI Parity Error");
 	}
 }
diff -ruNp linux-3.13.11/drivers/edac/mce_amd.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/edac/mce_amd.h
--- linux-3.13.11/drivers/edac/mce_amd.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/edac/mce_amd.h	2014-07-09
12:00:15.000000000 +0200
@@ -77,7 +77,7 @@ struct amd_decoder_ops {
 	bool (*mc0_mce)(u16, u8);
 	bool (*mc1_mce)(u16, u8);
 	bool (*mc2_mce)(u16, u8);
-};
+} __no_const;
 
 void amd_report_gart_errors(bool);
 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
diff -ruNp linux-3.13.11/drivers/firewire/core-card.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/firewire/core-card.c
--- linux-3.13.11/drivers/firewire/core-card.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/firewire/core-card.c	2014-07-09
12:00:15.000000000 +0200
@@ -528,9 +528,9 @@ void fw_card_initialize(struct fw_card *
 			const struct fw_card_driver *driver,
 			struct device *device)
 {
-	static atomic_t index = ATOMIC_INIT(-1);
+	static atomic_unchecked_t index = ATOMIC_INIT(-1);
 
-	card->index = atomic_inc_return(&index);
+	card->index = atomic_inc_return_unchecked(&index);
 	card->driver = driver;
 	card->device = device;
 	card->current_tlabel = 0;
@@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
 
 void fw_core_remove_card(struct fw_card *card)
 {
-	struct fw_card_driver dummy_driver = dummy_driver_template;
+	fw_card_driver_no_const dummy_driver = dummy_driver_template;
 
 	card->driver->update_phy_reg(card, 4,
 				     PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
diff -ruNp linux-3.13.11/drivers/firewire/core-device.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/firewire/core-device.c
--- linux-3.13.11/drivers/firewire/core-device.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/firewire/core-device.c	2014-07-09
12:00:15.000000000 +0200
@@ -253,7 +253,7 @@ EXPORT_SYMBOL(fw_device_enable_phys_dma)
 struct config_rom_attribute {
 	struct device_attribute attr;
 	u32 key;
-};
+} __do_const;
 
 static ssize_t show_immediate(struct device *dev,
 			      struct device_attribute *dattr, char *buf)
diff -ruNp linux-3.13.11/drivers/firewire/core-transaction.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/firewire/core-transaction.c
--- linux-3.13.11/drivers/firewire/core-transaction.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/firewire/core-transaction.c	2014-07-09
12:00:15.000000000 +0200
@@ -38,6 +38,7 @@
 #include <linux/timer.h>
 #include <linux/types.h>
 #include <linux/workqueue.h>
+#include <linux/sched.h>
 
 #include <asm/byteorder.h>
 
diff -ruNp linux-3.13.11/drivers/firewire/core.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/firewire/core.h
--- linux-3.13.11/drivers/firewire/core.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/firewire/core.h	2014-07-09
12:00:15.000000000 +0200
@@ -111,6 +111,7 @@ struct fw_card_driver {
 
 	int (*stop_iso)(struct fw_iso_context *ctx);
 };
+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
 
 void fw_card_initialize(struct fw_card *card,
 		const struct fw_card_driver *driver, struct device *device);
diff -ruNp linux-3.13.11/drivers/firmware/dmi-id.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/firmware/dmi-id.c
--- linux-3.13.11/drivers/firmware/dmi-id.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/firmware/dmi-id.c	2014-07-09
12:00:15.000000000 +0200
@@ -16,7 +16,7 @@
 struct dmi_device_attribute{
 	struct device_attribute dev_attr;
 	int field;
-};
+} __do_const;
 #define to_dmi_dev_attr(_dev_attr) \
 	container_of(_dev_attr, struct dmi_device_attribute, dev_attr)
 
diff -ruNp linux-3.13.11/drivers/firmware/dmi_scan.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/firmware/dmi_scan.c
--- linux-3.13.11/drivers/firmware/dmi_scan.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/firmware/dmi_scan.c	2014-07-09
12:00:15.000000000 +0200
@@ -835,7 +835,7 @@ int dmi_walk(void (*decode)(const struct
 	if (buf == NULL)
 		return -1;
 
-	dmi_table(buf, dmi_len, dmi_num, decode, private_data);
+	dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
 
 	iounmap(buf);
 	return 0;
diff -ruNp linux-3.13.11/drivers/firmware/efi/cper.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/firmware/efi/cper.c
--- linux-3.13.11/drivers/firmware/efi/cper.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/firmware/efi/cper.c	2014-07-09
12:00:15.000000000 +0200
@@ -41,12 +41,12 @@
  */
 u64 cper_next_record_id(void)
 {
-	static atomic64_t seq;
+	static atomic64_unchecked_t seq;
 
-	if (!atomic64_read(&seq))
-		atomic64_set(&seq, ((u64)get_seconds()) << 32);
+	if (!atomic64_read_unchecked(&seq))
+		atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
 
-	return atomic64_inc_return(&seq);
+	return atomic64_inc_return_unchecked(&seq);
 }
 EXPORT_SYMBOL_GPL(cper_next_record_id);
 
diff -ruNp linux-3.13.11/drivers/firmware/efi/efi.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/firmware/efi/efi.c
--- linux-3.13.11/drivers/firmware/efi/efi.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/firmware/efi/efi.c	2014-07-09
12:00:15.000000000 +0200
@@ -81,14 +81,16 @@ static struct attribute_group efi_subsys
 };
 
 static struct efivars generic_efivars;
-static struct efivar_operations generic_ops;
+static efivar_operations_no_const generic_ops __read_only;
 
 static int generic_ops_register(void)
 {
-	generic_ops.get_variable = efi.get_variable;
-	generic_ops.set_variable = efi.set_variable;
-	generic_ops.get_next_variable = efi.get_next_variable;
-	generic_ops.query_variable_store = efi_query_variable_store;
+	pax_open_kernel();
+	*(void **)&generic_ops.get_variable = efi.get_variable;
+	*(void **)&generic_ops.set_variable = efi.set_variable;
+	*(void **)&generic_ops.get_next_variable = efi.get_next_variable;
+	*(void **)&generic_ops.query_variable_store = efi_query_variable_store;
+	pax_close_kernel();
 
 	return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
 }
diff -ruNp linux-3.13.11/drivers/firmware/efi/efivars.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/firmware/efi/efivars.c
--- linux-3.13.11/drivers/firmware/efi/efivars.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/firmware/efi/efivars.c	2014-07-09
12:00:15.000000000 +0200
@@ -456,7 +456,7 @@ efivar_create_sysfs_entry(struct efivar_
 static int
 create_efivars_bin_attributes(void)
 {
-	struct bin_attribute *attr;
+	bin_attribute_no_const *attr;
 	int error;
 
 	/* new_var */
diff -ruNp linux-3.13.11/drivers/firmware/google/memconsole.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/firmware/google/memconsole.c
--- linux-3.13.11/drivers/firmware/google/memconsole.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/firmware/google/memconsole.c	2014-07-09
12:00:15.000000000 +0200
@@ -147,7 +147,9 @@ static int __init memconsole_init(void)
 	if (!found_memconsole())
 		return -ENODEV;
 
-	memconsole_bin_attr.size = memconsole_length;
+	pax_open_kernel();
+	*(size_t *)&memconsole_bin_attr.size = memconsole_length;
+	pax_close_kernel();
 
 	ret = sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
 
diff -ruNp linux-3.13.11/drivers/gpio/gpio-em.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpio/gpio-em.c
--- linux-3.13.11/drivers/gpio/gpio-em.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpio/gpio-em.c	2014-07-09
12:00:15.000000000 +0200
@@ -257,7 +257,7 @@ static int em_gio_probe(struct platform_
 	struct em_gio_priv *p;
 	struct resource *io[2], *irq[2];
 	struct gpio_chip *gpio_chip;
-	struct irq_chip *irq_chip;
+	irq_chip_no_const *irq_chip;
 	const char *name = dev_name(&pdev->dev);
 	int ret;
 
diff -ruNp linux-3.13.11/drivers/gpio/gpio-ich.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpio/gpio-ich.c
--- linux-3.13.11/drivers/gpio/gpio-ich.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpio/gpio-ich.c	2014-07-09
12:00:15.000000000 +0200
@@ -71,7 +71,7 @@ struct ichx_desc {
 	/* Some chipsets have quirks, let these use their own request/get */
 	int (*request)(struct gpio_chip *chip, unsigned offset);
 	int (*get)(struct gpio_chip *chip, unsigned offset);
-};
+} __do_const;
 
 static struct {
 	spinlock_t lock;
diff -ruNp linux-3.13.11/drivers/gpio/gpio-rcar.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpio/gpio-rcar.c
--- linux-3.13.11/drivers/gpio/gpio-rcar.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpio/gpio-rcar.c	2014-07-09
12:00:15.000000000 +0200
@@ -316,7 +316,7 @@ static int gpio_rcar_probe(struct platfo
 	struct gpio_rcar_priv *p;
 	struct resource *io, *irq;
 	struct gpio_chip *gpio_chip;
-	struct irq_chip *irq_chip;
+	irq_chip_no_const *irq_chip;
 	const char *name = dev_name(&pdev->dev);
 	int ret;
 
diff -ruNp linux-3.13.11/drivers/gpio/gpio-vr41xx.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpio/gpio-vr41xx.c
--- linux-3.13.11/drivers/gpio/gpio-vr41xx.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpio/gpio-vr41xx.c	2014-07-09
12:00:15.000000000 +0200
@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
 	printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
 	       maskl, pendl, maskh, pendh);
 
-	atomic_inc(&irq_err_count);
+	atomic_inc_unchecked(&irq_err_count);
 
 	return -EINVAL;
 }
diff -ruNp linux-3.13.11/drivers/gpu/drm/drm_crtc.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/drm_crtc.c
--- linux-3.13.11/drivers/gpu/drm/drm_crtc.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/drm_crtc.c	2014-07-09
12:00:15.000000000 +0200
@@ -3102,7 +3102,7 @@ int drm_mode_getproperty_ioctl(struct dr
 					goto done;
 				}
 
-				if (copy_to_user(&enum_ptr[copied].name,
+				if (copy_to_user(enum_ptr[copied].name,
 						 &prop_enum->name, DRM_PROP_NAME_LEN)) {
 					ret = -EFAULT;
 					goto done;
diff -ruNp linux-3.13.11/drivers/gpu/drm/drm_crtc_helper.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/drm_crtc_helper.c
--- linux-3.13.11/drivers/gpu/drm/drm_crtc_helper.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/drm_crtc_helper.c	2014-07-09
12:00:15.000000000 +0200
@@ -338,7 +338,7 @@ static bool drm_encoder_crtc_ok(struct d
 	struct drm_crtc *tmp;
 	int crtc_mask = 1;
 
-	WARN(!crtc, "checking null crtc?\n");
+	BUG_ON(!crtc);
 
 	dev = crtc->dev;
 
diff -ruNp linux-3.13.11/drivers/gpu/drm/drm_drv.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/drm_drv.c
--- linux-3.13.11/drivers/gpu/drm/drm_drv.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/drm_drv.c	2014-07-09
12:00:15.000000000 +0200
@@ -233,7 +233,7 @@ module_exit(drm_core_exit);
 /**
  * Copy and IOCTL return string to user space
  */
-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
+static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
 {
 	int len;
 
@@ -303,7 +303,7 @@ long drm_ioctl(struct file *filp,
 	struct drm_file *file_priv = filp->private_data;
 	struct drm_device *dev;
 	const struct drm_ioctl_desc *ioctl = NULL;
-	drm_ioctl_t *func;
+	drm_ioctl_no_const_t func;
 	unsigned int nr = DRM_IOCTL_NR(cmd);
 	int retcode = -EINVAL;
 	char stack_kdata[128];
diff -ruNp linux-3.13.11/drivers/gpu/drm/drm_fops.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/drm_fops.c
--- linux-3.13.11/drivers/gpu/drm/drm_fops.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/drm_fops.c	2014-07-09
12:00:15.000000000 +0200
@@ -97,7 +97,7 @@ int drm_open(struct inode *inode, struct
 	if (drm_device_is_unplugged(dev))
 		return -ENODEV;
 
-	if (!dev->open_count++)
+	if (local_inc_return(&dev->open_count) == 1)
 		need_setup = 1;
 	mutex_lock(&dev->struct_mutex);
 	old_imapping = inode->i_mapping;
@@ -127,7 +127,7 @@ err_undo:
 	iput(container_of(dev->dev_mapping, struct inode, i_data));
 	dev->dev_mapping = old_mapping;
 	mutex_unlock(&dev->struct_mutex);
-	dev->open_count--;
+	local_dec(&dev->open_count);
 	return retcode;
 }
 EXPORT_SYMBOL(drm_open);
@@ -467,7 +467,7 @@ int drm_release(struct inode *inode, str
 
 	mutex_lock(&drm_global_mutex);
 
-	DRM_DEBUG("open_count = %d\n", dev->open_count);
+	DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
 
 	if (dev->driver->preclose)
 		dev->driver->preclose(dev, file_priv);
@@ -476,10 +476,10 @@ int drm_release(struct inode *inode, str
 	 * Begin inline drm_release
 	 */
 
-	DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
+	DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
 		  task_pid_nr(current),
 		  (long)old_encode_dev(file_priv->minor->device),
-		  dev->open_count);
+		  local_read(&dev->open_count));
 
 	/* Release any auth tokens that might point to this file_priv,
 	   (do that under the drm_global_mutex) */
@@ -577,7 +577,7 @@ int drm_release(struct inode *inode, str
 	 * End inline drm_release
 	 */
 
-	if (!--dev->open_count) {
+	if (local_dec_and_test(&dev->open_count)) {
 		if (atomic_read(&dev->ioctl_count)) {
 			DRM_ERROR("Device busy: %d\n",
 				  atomic_read(&dev->ioctl_count));
diff -ruNp linux-3.13.11/drivers/gpu/drm/drm_global.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/drm_global.c
--- linux-3.13.11/drivers/gpu/drm/drm_global.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/drm_global.c	2014-07-09
12:00:15.000000000 +0200
@@ -36,7 +36,7 @@
 struct drm_global_item {
 	struct mutex mutex;
 	void *object;
-	int refcount;
+	atomic_t refcount;
 };
 
 static struct drm_global_item glob[DRM_GLOBAL_NUM];
@@ -49,7 +49,7 @@ void drm_global_init(void)
 		struct drm_global_item *item = &glob[i];
 		mutex_init(&item->mutex);
 		item->object = NULL;
-		item->refcount = 0;
+		atomic_set(&item->refcount, 0);
 	}
 }
 
@@ -59,7 +59,7 @@ void drm_global_release(void)
 	for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
 		struct drm_global_item *item = &glob[i];
 		BUG_ON(item->object != NULL);
-		BUG_ON(item->refcount != 0);
+		BUG_ON(atomic_read(&item->refcount) != 0);
 	}
 }
 
@@ -69,7 +69,7 @@ int drm_global_item_ref(struct drm_globa
 	struct drm_global_item *item = &glob[ref->global_type];
 
 	mutex_lock(&item->mutex);
-	if (item->refcount == 0) {
+	if (atomic_read(&item->refcount) == 0) {
 		item->object = kzalloc(ref->size, GFP_KERNEL);
 		if (unlikely(item->object == NULL)) {
 			ret = -ENOMEM;
@@ -82,7 +82,7 @@ int drm_global_item_ref(struct drm_globa
 			goto out_err;
 
 	}
-	++item->refcount;
+	atomic_inc(&item->refcount);
 	ref->object = item->object;
 	mutex_unlock(&item->mutex);
 	return 0;
@@ -98,9 +98,9 @@ void drm_global_item_unref(struct drm_gl
 	struct drm_global_item *item = &glob[ref->global_type];
 
 	mutex_lock(&item->mutex);
-	BUG_ON(item->refcount == 0);
+	BUG_ON(atomic_read(&item->refcount) == 0);
 	BUG_ON(ref->object != item->object);
-	if (--item->refcount == 0) {
+	if (atomic_dec_and_test(&item->refcount)) {
 		ref->release(ref);
 		item->object = NULL;
 	}
diff -ruNp linux-3.13.11/drivers/gpu/drm/drm_info.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/drm_info.c
--- linux-3.13.11/drivers/gpu/drm/drm_info.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/drm_info.c	2014-07-09
12:00:15.000000000 +0200
@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void
 	struct drm_local_map *map;
 	struct drm_map_list *r_list;
 
-	/* Hardcoded from _DRM_FRAME_BUFFER,
-	   _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
-	   _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
-	const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
+	static const char * const types[] = {
+		[_DRM_FRAME_BUFFER] = "FB",
+		[_DRM_REGISTERS] = "REG",
+		[_DRM_SHM] = "SHM",
+		[_DRM_AGP] = "AGP",
+		[_DRM_SCATTER_GATHER] = "SG",
+		[_DRM_CONSISTENT] = "PCI",
+		[_DRM_GEM] = "GEM" };
 	const char *type;
 	int i;
 
@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void
 		map = r_list->map;
 		if (!map)
 			continue;
-		if (map->type < 0 || map->type > 5)
+		if (map->type >= ARRAY_SIZE(types))
 			type = "??";
 		else
 			type = types[map->type];
@@ -257,7 +261,11 @@ int drm_vma_info(struct seq_file *m, voi
 			   vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
 			   vma->vm_flags & VM_LOCKED ? 'l' : '-',
 			   vma->vm_flags & VM_IO ? 'i' : '-',
+#ifdef CONFIG_GRKERNSEC_HIDESYM
+			   0);
+#else
 			   vma->vm_pgoff);
+#endif
 
 #if defined(__i386__)
 		pgprot = pgprot_val(vma->vm_page_prot);
diff -ruNp linux-3.13.11/drivers/gpu/drm/drm_ioc32.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/drm_ioc32.c
--- linux-3.13.11/drivers/gpu/drm/drm_ioc32.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/drm_ioc32.c	2014-07-09
12:00:15.000000000 +0200
@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct fi
 	request = compat_alloc_user_space(nbytes);
 	if (!access_ok(VERIFY_WRITE, request, nbytes))
 		return -EFAULT;
-	list = (struct drm_buf_desc *) (request + 1);
+	list = (struct drm_buf_desc __user *) (request + 1);
 
 	if (__put_user(count, &request->count)
 	    || __put_user(list, &request->list))
@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct fil
 	request = compat_alloc_user_space(nbytes);
 	if (!access_ok(VERIFY_WRITE, request, nbytes))
 		return -EFAULT;
-	list = (struct drm_buf_pub *) (request + 1);
+	list = (struct drm_buf_pub __user *) (request + 1);
 
 	if (__put_user(count, &request->count)
 	    || __put_user(list, &request->list))
@@ -1016,7 +1016,7 @@ static int compat_drm_wait_vblank(struct
 	return 0;
 }
 
-drm_ioctl_compat_t *drm_compat_ioctls[] = {
+drm_ioctl_compat_t drm_compat_ioctls[] = {
 	[DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
 	[DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
 	[DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
@@ -1062,7 +1062,6 @@ drm_ioctl_compat_t *drm_compat_ioctls[]
 long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
 	unsigned int nr = DRM_IOCTL_NR(cmd);
-	drm_ioctl_compat_t *fn;
 	int ret;
 
 	/* Assume that ioctls without an explicit compat routine will just
@@ -1072,10 +1071,8 @@ long drm_compat_ioctl(struct file *filp,
 	if (nr >= ARRAY_SIZE(drm_compat_ioctls))
 		return drm_ioctl(filp, cmd, arg);
 
-	fn = drm_compat_ioctls[nr];
-
-	if (fn != NULL)
-		ret = (*fn) (filp, cmd, arg);
+	if (drm_compat_ioctls[nr] != NULL)
+		ret = (*drm_compat_ioctls[nr]) (filp, cmd, arg);
 	else
 		ret = drm_ioctl(filp, cmd, arg);
 
diff -ruNp linux-3.13.11/drivers/gpu/drm/drm_stub.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/drm_stub.c
--- linux-3.13.11/drivers/gpu/drm/drm_stub.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/drm_stub.c	2014-07-09
12:00:15.000000000 +0200
@@ -403,7 +403,7 @@ void drm_unplug_dev(struct drm_device *d
 
 	drm_device_set_unplugged(dev);
 
-	if (dev->open_count == 0) {
+	if (local_read(&dev->open_count) == 0) {
 		drm_put_dev(dev);
 	}
 	mutex_unlock(&drm_global_mutex);
diff -ruNp linux-3.13.11/drivers/gpu/drm/drm_sysfs.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/drm_sysfs.c
--- linux-3.13.11/drivers/gpu/drm/drm_sysfs.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/drm_sysfs.c	2014-07-09
12:00:15.000000000 +0200
@@ -505,7 +505,7 @@ static void drm_sysfs_release(struct dev
  */
 int drm_sysfs_device_add(struct drm_minor *minor)
 {
-	char *minor_str;
+	const char *minor_str;
 	int r;
 
 	if (minor->type == DRM_MINOR_CONTROL)
diff -ruNp linux-3.13.11/drivers/gpu/drm/i810/i810_drv.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/i810/i810_drv.h
--- linux-3.13.11/drivers/gpu/drm/i810/i810_drv.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/i810/i810_drv.h	2014-07-09
12:00:15.000000000 +0200
@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
 	int page_flipping;
 
 	wait_queue_head_t irq_queue;
-	atomic_t irq_received;
-	atomic_t irq_emitted;
+	atomic_unchecked_t irq_received;
+	atomic_unchecked_t irq_emitted;
 
 	int front_offset;
 } drm_i810_private_t;
diff -ruNp linux-3.13.11/drivers/gpu/drm/i915/i915_debugfs.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/i915/i915_debugfs.c
--- linux-3.13.11/drivers/gpu/drm/i915/i915_debugfs.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/i915/i915_debugfs.c	2014-07-09
12:00:15.000000000 +0200
@@ -702,7 +702,7 @@ static int i915_interrupt_info(struct se
 			   I915_READ(GTIMR));
 	}
 	seq_printf(m, "Interrupts received: %d\n",
-		   atomic_read(&dev_priv->irq_received));
+		   atomic_read_unchecked(&dev_priv->irq_received));
 	for_each_ring(ring, dev_priv, i) {
 		if (INTEL_INFO(dev)->gen >= 6) {
 			seq_printf(m,
diff -ruNp linux-3.13.11/drivers/gpu/drm/i915/i915_dma.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/i915/i915_dma.c
--- linux-3.13.11/drivers/gpu/drm/i915/i915_dma.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/i915/i915_dma.c	2014-07-09
12:00:15.000000000 +0200
@@ -1271,7 +1271,7 @@ static bool i915_switcheroo_can_switch(s
 	bool can_switch;
 
 	spin_lock(&dev->count_lock);
-	can_switch = (dev->open_count == 0);
+	can_switch = (local_read(&dev->open_count) == 0);
 	spin_unlock(&dev->count_lock);
 	return can_switch;
 }
diff -ruNp linux-3.13.11/drivers/gpu/drm/i915/i915_drv.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/i915/i915_drv.h
--- linux-3.13.11/drivers/gpu/drm/i915/i915_drv.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/i915/i915_drv.h	2014-07-09
12:00:15.000000000 +0200
@@ -1326,7 +1326,7 @@ typedef struct drm_i915_private {
 	drm_dma_handle_t *status_page_dmah;
 	struct resource mch_res;
 
-	atomic_t irq_received;
+	atomic_unchecked_t irq_received;
 
 	/* protects the irq masks */
 	spinlock_t irq_lock;
diff -ruNp linux-3.13.11/drivers/gpu/drm/i915/i915_gem_execbuffer.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/i915/i915_gem_execbuffer.c
--- linux-3.13.11/drivers/gpu/drm/i915/i915_gem_execbuffer.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/i915/i915_gem_execbuffer.c	2014-07-09
12:00:15.000000000 +0200
@@ -861,9 +861,9 @@ i915_gem_check_execbuffer(struct drm_i91
 
 static int
 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
-		   int count)
+		   unsigned int count)
 {
-	int i;
+	unsigned int i;
 	unsigned relocs_total = 0;
 	unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
 
diff -ruNp linux-3.13.11/drivers/gpu/drm/i915/i915_ioc32.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/i915/i915_ioc32.c
--- linux-3.13.11/drivers/gpu/drm/i915/i915_ioc32.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/i915/i915_ioc32.c	2014-07-09
12:00:15.000000000 +0200
@@ -181,7 +181,7 @@ static int compat_i915_alloc(struct file
 			 (unsigned long)request);
 }
 
-static drm_ioctl_compat_t *i915_compat_ioctls[] = {
+static drm_ioctl_compat_t i915_compat_ioctls[] = {
 	[DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
 	[DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
 	[DRM_I915_GETPARAM] = compat_i915_getparam,
@@ -202,18 +202,15 @@ static drm_ioctl_compat_t *i915_compat_i
 long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
 	unsigned int nr = DRM_IOCTL_NR(cmd);
-	drm_ioctl_compat_t *fn = NULL;
 	int ret;
 
 	if (nr < DRM_COMMAND_BASE)
 		return drm_compat_ioctl(filp, cmd, arg);
 
-	if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls))
-		fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
-
-	if (fn != NULL)
+	if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls)) {
+		drm_ioctl_compat_t fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
 		ret = (*fn) (filp, cmd, arg);
-	else
+	} else
 		ret = drm_ioctl(filp, cmd, arg);
 
 	return ret;
diff -ruNp linux-3.13.11/drivers/gpu/drm/i915/i915_irq.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/i915/i915_irq.c
--- linux-3.13.11/drivers/gpu/drm/i915/i915_irq.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/i915/i915_irq.c	2014-07-09
12:00:15.000000000 +0200
@@ -1419,7 +1419,7 @@ static irqreturn_t valleyview_irq_handle
 	int pipe;
 	u32 pipe_stats[I915_MAX_PIPES];
 
-	atomic_inc(&dev_priv->irq_received);
+	atomic_inc_unchecked(&dev_priv->irq_received);
 
 	while (true) {
 		iir = I915_READ(VLV_IIR);
@@ -1729,7 +1729,7 @@ static irqreturn_t ironlake_irq_handler(
 	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
 	irqreturn_t ret = IRQ_NONE;
 
-	atomic_inc(&dev_priv->irq_received);
+	atomic_inc_unchecked(&dev_priv->irq_received);
 
 	/* We get interrupts on unclaimed registers, so check for this before we
 	 * do any I915_{READ,WRITE}. */
@@ -1799,7 +1799,7 @@ static irqreturn_t gen8_irq_handler(int
 	uint32_t tmp = 0;
 	enum pipe pipe;
 
-	atomic_inc(&dev_priv->irq_received);
+	atomic_inc_unchecked(&dev_priv->irq_received);
 
 	master_ctl = I915_READ(GEN8_MASTER_IRQ);
 	master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
@@ -2623,7 +2623,7 @@ static void ironlake_irq_preinstall(stru
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
-	atomic_set(&dev_priv->irq_received, 0);
+	atomic_set_unchecked(&dev_priv->irq_received, 0);
 
 	I915_WRITE(HWSTAM, 0xeffe);
 
@@ -2641,7 +2641,7 @@ static void valleyview_irq_preinstall(st
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	int pipe;
 
-	atomic_set(&dev_priv->irq_received, 0);
+	atomic_set_unchecked(&dev_priv->irq_received, 0);
 
 	/* VLV magic */
 	I915_WRITE(VLV_IMR, 0);
@@ -2672,7 +2672,7 @@ static void gen8_irq_preinstall(struct d
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int pipe;
 
-	atomic_set(&dev_priv->irq_received, 0);
+	atomic_set_unchecked(&dev_priv->irq_received, 0);
 
 	I915_WRITE(GEN8_MASTER_IRQ, 0);
 	POSTING_READ(GEN8_MASTER_IRQ);
@@ -2996,7 +2996,7 @@ static void gen8_irq_uninstall(struct dr
 	if (!dev_priv)
 		return;
 
-	atomic_set(&dev_priv->irq_received, 0);
+	atomic_set_unchecked(&dev_priv->irq_received, 0);
 
 	I915_WRITE(GEN8_MASTER_IRQ, 0);
 
@@ -3090,7 +3090,7 @@ static void i8xx_irq_preinstall(struct d
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	int pipe;
 
-	atomic_set(&dev_priv->irq_received, 0);
+	atomic_set_unchecked(&dev_priv->irq_received, 0);
 
 	for_each_pipe(pipe)
 		I915_WRITE(PIPESTAT(pipe), 0);
@@ -3176,7 +3176,7 @@ static irqreturn_t i8xx_irq_handler(int
 		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
 		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
 
-	atomic_inc(&dev_priv->irq_received);
+	atomic_inc_unchecked(&dev_priv->irq_received);
 
 	iir = I915_READ16(IIR);
 	if (iir == 0)
@@ -3251,7 +3251,7 @@ static void i915_irq_preinstall(struct d
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	int pipe;
 
-	atomic_set(&dev_priv->irq_received, 0);
+	atomic_set_unchecked(&dev_priv->irq_received, 0);
 
 	if (I915_HAS_HOTPLUG(dev)) {
 		I915_WRITE(PORT_HOTPLUG_EN, 0);
@@ -3358,7 +3358,7 @@ static irqreturn_t i915_irq_handler(int
 		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
 	int pipe, ret = IRQ_NONE;
 
-	atomic_inc(&dev_priv->irq_received);
+	atomic_inc_unchecked(&dev_priv->irq_received);
 
 	iir = I915_READ(IIR);
 	do {
@@ -3485,7 +3485,7 @@ static void i965_irq_preinstall(struct d
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	int pipe;
 
-	atomic_set(&dev_priv->irq_received, 0);
+	atomic_set_unchecked(&dev_priv->irq_received, 0);
 
 	I915_WRITE(PORT_HOTPLUG_EN, 0);
 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
@@ -3601,7 +3601,7 @@ static irqreturn_t i965_irq_handler(int
 		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
 		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
 
-	atomic_inc(&dev_priv->irq_received);
+	atomic_inc_unchecked(&dev_priv->irq_received);
 
 	iir = I915_READ(IIR);
 
diff -ruNp linux-3.13.11/drivers/gpu/drm/i915/intel_display.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/i915/intel_display.c
--- linux-3.13.11/drivers/gpu/drm/i915/intel_display.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/i915/intel_display.c	2014-07-09
12:00:15.000000000 +0200
@@ -10506,13 +10506,13 @@ struct intel_quirk {
 	int subsystem_vendor;
 	int subsystem_device;
 	void (*hook)(struct drm_device *dev);
-};
+} __do_const;
 
 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
 struct intel_dmi_quirk {
 	void (*hook)(struct drm_device *dev);
 	const struct dmi_system_id (*dmi_id_list)[];
-};
+} __do_const;
 
 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
 {
@@ -10520,18 +10520,20 @@ static int intel_dmi_reverse_brightness(
 	return 1;
 }
 
-static const struct intel_dmi_quirk intel_dmi_quirks[] = {
+static const struct dmi_system_id intel_dmi_quirks_table[] = {
 	{
-		.dmi_id_list = &(const struct dmi_system_id[]) {
-			{
-				.callback = intel_dmi_reverse_brightness,
-				.ident = "NCR Corporation",
-				.matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
-					    DMI_MATCH(DMI_PRODUCT_NAME, ""),
-				},
-			},
-			{ }  /* terminating entry */
+		.callback = intel_dmi_reverse_brightness,
+		.ident = "NCR Corporation",
+		.matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
+			    DMI_MATCH(DMI_PRODUCT_NAME, ""),
 		},
+	},
+	{ }  /* terminating entry */
+};
+
+static const struct intel_dmi_quirk intel_dmi_quirks[] = {
+	{
+		.dmi_id_list = &intel_dmi_quirks_table,
 		.hook = quirk_invert_brightness,
 	},
 };
diff -ruNp linux-3.13.11/drivers/gpu/drm/mga/mga_drv.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/mga/mga_drv.h
--- linux-3.13.11/drivers/gpu/drm/mga/mga_drv.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/mga/mga_drv.h	2014-07-09
12:00:15.000000000 +0200
@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
 	u32 clear_cmd;
 	u32 maccess;
 
-	atomic_t vbl_received;          /**< Number of vblanks received. */
+	atomic_unchecked_t vbl_received;          /**< Number of vblanks received. */
 	wait_queue_head_t fence_queue;
-	atomic_t last_fence_retired;
+	atomic_unchecked_t last_fence_retired;
 	u32 next_fence_to_post;
 
 	unsigned int fb_cpp;
diff -ruNp linux-3.13.11/drivers/gpu/drm/mga/mga_ioc32.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/mga/mga_ioc32.c
--- linux-3.13.11/drivers/gpu/drm/mga/mga_ioc32.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/mga/mga_ioc32.c	2014-07-09
12:00:15.000000000 +0200
@@ -189,7 +189,7 @@ static int compat_mga_dma_bootstrap(stru
 	return 0;
 }
 
-drm_ioctl_compat_t *mga_compat_ioctls[] = {
+drm_ioctl_compat_t mga_compat_ioctls[] = {
 	[DRM_MGA_INIT] = compat_mga_init,
 	[DRM_MGA_GETPARAM] = compat_mga_getparam,
 	[DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
@@ -207,18 +207,15 @@ drm_ioctl_compat_t *mga_compat_ioctls[]
 long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
 	unsigned int nr = DRM_IOCTL_NR(cmd);
-	drm_ioctl_compat_t *fn = NULL;
 	int ret;
 
 	if (nr < DRM_COMMAND_BASE)
 		return drm_compat_ioctl(filp, cmd, arg);
 
-	if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
-		fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
-
-	if (fn != NULL)
+	if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls)) {
+		drm_ioctl_compat_t fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
 		ret = (*fn) (filp, cmd, arg);
-	else
+	} else
 		ret = drm_ioctl(filp, cmd, arg);
 
 	return ret;
diff -ruNp linux-3.13.11/drivers/gpu/drm/mga/mga_irq.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/mga/mga_irq.c
--- linux-3.13.11/drivers/gpu/drm/mga/mga_irq.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/mga/mga_irq.c	2014-07-09
12:00:15.000000000 +0200
@@ -43,7 +43,7 @@ u32 mga_get_vblank_counter(struct drm_de
 	if (crtc != 0)
 		return 0;
 
-	return atomic_read(&dev_priv->vbl_received);
+	return atomic_read_unchecked(&dev_priv->vbl_received);
 }
 
 
@@ -59,7 +59,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
 	/* VBLANK interrupt */
 	if (status & MGA_VLINEPEN) {
 		MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
-		atomic_inc(&dev_priv->vbl_received);
+		atomic_inc_unchecked(&dev_priv->vbl_received);
 		drm_handle_vblank(dev, 0);
 		handled = 1;
 	}
@@ -78,7 +78,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
 		if ((prim_start & ~0x03) != (prim_end & ~0x03))
 			MGA_WRITE(MGA_PRIMEND, prim_end);
 
-		atomic_inc(&dev_priv->last_fence_retired);
+		atomic_inc_unchecked(&dev_priv->last_fence_retired);
 		DRM_WAKEUP(&dev_priv->fence_queue);
 		handled = 1;
 	}
@@ -129,7 +129,7 @@ int mga_driver_fence_wait(struct drm_dev
 	 * using fences.
 	 */
 	DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
-		    (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
+		    (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
 		      - *sequence) <= (1 << 23)));
 
 	*sequence = cur_fence;
diff -ruNp linux-3.13.11/drivers/gpu/drm/nouveau/nouveau_bios.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/nouveau/nouveau_bios.c
--- linux-3.13.11/drivers/gpu/drm/nouveau/nouveau_bios.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/nouveau/nouveau_bios.c	2014-07-09
12:00:15.000000000 +0200
@@ -965,7 +965,7 @@ static int parse_bit_tmds_tbl_entry(stru
 struct bit_table {
 	const char id;
 	int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
-};
+} __no_const;
 
 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry
})
 
diff -ruNp linux-3.13.11/drivers/gpu/drm/nouveau/nouveau_drm.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/nouveau/nouveau_drm.h
--- linux-3.13.11/drivers/gpu/drm/nouveau/nouveau_drm.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/nouveau/nouveau_drm.h	2014-07-09
12:00:15.000000000 +0200
@@ -96,7 +96,6 @@ struct nouveau_drm {
 		struct drm_global_reference mem_global_ref;
 		struct ttm_bo_global_ref bo_global_ref;
 		struct ttm_bo_device bdev;
-		atomic_t validate_sequence;
 		int (*move)(struct nouveau_channel *,
 			    struct ttm_buffer_object *,
 			    struct ttm_mem_reg *, struct ttm_mem_reg *);
diff -ruNp linux-3.13.11/drivers/gpu/drm/nouveau/nouveau_ioc32.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/nouveau/nouveau_ioc32.c
--- linux-3.13.11/drivers/gpu/drm/nouveau/nouveau_ioc32.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/nouveau/nouveau_ioc32.c	2014-07-09
12:00:15.000000000 +0200
@@ -50,7 +50,7 @@ long nouveau_compat_ioctl(struct file *f
 			 unsigned long arg)
 {
 	unsigned int nr = DRM_IOCTL_NR(cmd);
-	drm_ioctl_compat_t *fn = NULL;
+	drm_ioctl_compat_t fn = NULL;
 	int ret;
 
 	if (nr < DRM_COMMAND_BASE)
diff -ruNp linux-3.13.11/drivers/gpu/drm/nouveau/nouveau_ttm.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/nouveau/nouveau_ttm.c
--- linux-3.13.11/drivers/gpu/drm/nouveau/nouveau_ttm.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/nouveau/nouveau_ttm.c	2014-07-09
12:00:15.000000000 +0200
@@ -130,11 +130,11 @@ nouveau_vram_manager_debug(struct ttm_me
 }
 
 const struct ttm_mem_type_manager_func nouveau_vram_manager = {
-	nouveau_vram_manager_init,
-	nouveau_vram_manager_fini,
-	nouveau_vram_manager_new,
-	nouveau_vram_manager_del,
-	nouveau_vram_manager_debug
+	.init = nouveau_vram_manager_init,
+	.takedown = nouveau_vram_manager_fini,
+	.get_node = nouveau_vram_manager_new,
+	.put_node = nouveau_vram_manager_del,
+	.debug = nouveau_vram_manager_debug
 };
 
 static int
@@ -198,11 +198,11 @@ nouveau_gart_manager_debug(struct ttm_me
 }
 
 const struct ttm_mem_type_manager_func nouveau_gart_manager = {
-	nouveau_gart_manager_init,
-	nouveau_gart_manager_fini,
-	nouveau_gart_manager_new,
-	nouveau_gart_manager_del,
-	nouveau_gart_manager_debug
+	.init = nouveau_gart_manager_init,
+	.takedown = nouveau_gart_manager_fini,
+	.get_node = nouveau_gart_manager_new,
+	.put_node = nouveau_gart_manager_del,
+	.debug = nouveau_gart_manager_debug
 };
 
 #include <core/subdev/vm/nv04.h>
@@ -270,11 +270,11 @@ nv04_gart_manager_debug(struct ttm_mem_t
 }
 
 const struct ttm_mem_type_manager_func nv04_gart_manager = {
-	nv04_gart_manager_init,
-	nv04_gart_manager_fini,
-	nv04_gart_manager_new,
-	nv04_gart_manager_del,
-	nv04_gart_manager_debug
+	.init = nv04_gart_manager_init,
+	.takedown = nv04_gart_manager_fini,
+	.get_node = nv04_gart_manager_new,
+	.put_node = nv04_gart_manager_del,
+	.debug = nv04_gart_manager_debug
 };
 
 int
diff -ruNp linux-3.13.11/drivers/gpu/drm/nouveau/nouveau_vga.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/nouveau/nouveau_vga.c
--- linux-3.13.11/drivers/gpu/drm/nouveau/nouveau_vga.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/nouveau/nouveau_vga.c	2014-07-09
12:00:15.000000000 +0200
@@ -65,7 +65,7 @@ nouveau_switcheroo_can_switch(struct pci
 	bool can_switch;
 
 	spin_lock(&dev->count_lock);
-	can_switch = (dev->open_count == 0);
+	can_switch = (local_read(&dev->open_count) == 0);
 	spin_unlock(&dev->count_lock);
 	return can_switch;
 }
diff -ruNp linux-3.13.11/drivers/gpu/drm/qxl/qxl_cmd.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/qxl/qxl_cmd.c
--- linux-3.13.11/drivers/gpu/drm/qxl/qxl_cmd.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/qxl/qxl_cmd.c	2014-07-09
12:00:15.000000000 +0200
@@ -285,27 +285,27 @@ static int wait_for_io_cmd_user(struct q
 	int ret;
 
 	mutex_lock(&qdev->async_io_mutex);
-	irq_num = atomic_read(&qdev->irq_received_io_cmd);
+	irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
 	if (qdev->last_sent_io_cmd > irq_num) {
 		if (intr)
 			ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
-							       atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
+							       atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
 		else
 			ret = wait_event_timeout(qdev->io_cmd_event,
-						 atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
+						 atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
 		/* 0 is timeout, just bail the "hw" has gone away */
 		if (ret <= 0)
 			goto out;
-		irq_num = atomic_read(&qdev->irq_received_io_cmd);
+		irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
 	}
 	outb(val, addr);
 	qdev->last_sent_io_cmd = irq_num + 1;
 	if (intr)
 		ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
-						       atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
+						       atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
 	else
 		ret = wait_event_timeout(qdev->io_cmd_event,
-					 atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
+					 atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
 out:
 	if (ret > 0)
 		ret = 0;
diff -ruNp linux-3.13.11/drivers/gpu/drm/qxl/qxl_debugfs.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/qxl/qxl_debugfs.c
--- linux-3.13.11/drivers/gpu/drm/qxl/qxl_debugfs.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/qxl/qxl_debugfs.c	2014-07-09
12:00:15.000000000 +0200
@@ -42,10 +42,10 @@ qxl_debugfs_irq_received(struct seq_file
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct qxl_device *qdev = node->minor->dev->dev_private;
 
-	seq_printf(m, "%d\n", atomic_read(&qdev->irq_received));
-	seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_display));
-	seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_cursor));
-	seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_io_cmd));
+	seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received));
+	seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_display));
+	seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_cursor));
+	seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_io_cmd));
 	seq_printf(m, "%d\n", qdev->irq_received_error);
 	return 0;
 }
diff -ruNp linux-3.13.11/drivers/gpu/drm/qxl/qxl_drv.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/qxl/qxl_drv.h
--- linux-3.13.11/drivers/gpu/drm/qxl/qxl_drv.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/qxl/qxl_drv.h	2014-07-09
12:00:15.000000000 +0200
@@ -290,10 +290,10 @@ struct qxl_device {
 	unsigned int last_sent_io_cmd;
 
 	/* interrupt handling */
-	atomic_t irq_received;
-	atomic_t irq_received_display;
-	atomic_t irq_received_cursor;
-	atomic_t irq_received_io_cmd;
+	atomic_unchecked_t irq_received;
+	atomic_unchecked_t irq_received_display;
+	atomic_unchecked_t irq_received_cursor;
+	atomic_unchecked_t irq_received_io_cmd;
 	unsigned irq_received_error;
 	wait_queue_head_t display_event;
 	wait_queue_head_t cursor_event;
diff -ruNp linux-3.13.11/drivers/gpu/drm/qxl/qxl_ioctl.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/qxl/qxl_ioctl.c
--- linux-3.13.11/drivers/gpu/drm/qxl/qxl_ioctl.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/qxl/qxl_ioctl.c	2014-07-09
12:00:15.000000000 +0200
@@ -181,7 +181,7 @@ static int qxl_process_single_command(st
 
 	/* TODO copy slow path code from i915 */
 	fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
-	unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info)
+ (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)cmd->command, cmd->command_size);
+	unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info)
+ (release->release_offset & ~PAGE_SIZE), (void __force_user *)(unsigned long)cmd->command,
cmd->command_size);
 
 	{
 		struct qxl_drawable *draw = fb_cmd;
@@ -201,7 +201,7 @@ static int qxl_process_single_command(st
 		struct drm_qxl_reloc reloc;
 
 		if (DRM_COPY_FROM_USER(&reloc,
-				       &((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i],
+				       &((struct drm_qxl_reloc __force_user *)(uintptr_t)cmd->relocs)[i],
 				       sizeof(reloc))) {
 			ret = -EFAULT;
 			goto out_free_bos;
@@ -297,7 +297,7 @@ static int qxl_execbuffer_ioctl(struct d
 		struct drm_qxl_command *commands =
 			(struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
 
-		if (DRM_COPY_FROM_USER(&user_cmd, &commands[cmd_num],
+		if (DRM_COPY_FROM_USER(&user_cmd, (struct drm_qxl_command __force_user *)&commands[cmd_num],
 				       sizeof(user_cmd)))
 			return -EFAULT;
 
diff -ruNp linux-3.13.11/drivers/gpu/drm/qxl/qxl_irq.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/qxl/qxl_irq.c
--- linux-3.13.11/drivers/gpu/drm/qxl/qxl_irq.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/qxl/qxl_irq.c	2014-07-09
12:00:15.000000000 +0200
@@ -33,19 +33,19 @@ irqreturn_t qxl_irq_handler(DRM_IRQ_ARGS
 
 	pending = xchg(&qdev->ram_header->int_pending, 0);
 
-	atomic_inc(&qdev->irq_received);
+	atomic_inc_unchecked(&qdev->irq_received);
 
 	if (pending & QXL_INTERRUPT_DISPLAY) {
-		atomic_inc(&qdev->irq_received_display);
+		atomic_inc_unchecked(&qdev->irq_received_display);
 		wake_up_all(&qdev->display_event);
 		qxl_queue_garbage_collect(qdev, false);
 	}
 	if (pending & QXL_INTERRUPT_CURSOR) {
-		atomic_inc(&qdev->irq_received_cursor);
+		atomic_inc_unchecked(&qdev->irq_received_cursor);
 		wake_up_all(&qdev->cursor_event);
 	}
 	if (pending & QXL_INTERRUPT_IO_CMD) {
-		atomic_inc(&qdev->irq_received_io_cmd);
+		atomic_inc_unchecked(&qdev->irq_received_io_cmd);
 		wake_up_all(&qdev->io_cmd_event);
 	}
 	if (pending & QXL_INTERRUPT_ERROR) {
@@ -82,10 +82,10 @@ int qxl_irq_init(struct qxl_device *qdev
 	init_waitqueue_head(&qdev->io_cmd_event);
 	INIT_WORK(&qdev->client_monitors_config_work,
 		  qxl_client_monitors_config_work_func);
-	atomic_set(&qdev->irq_received, 0);
-	atomic_set(&qdev->irq_received_display, 0);
-	atomic_set(&qdev->irq_received_cursor, 0);
-	atomic_set(&qdev->irq_received_io_cmd, 0);
+	atomic_set_unchecked(&qdev->irq_received, 0);
+	atomic_set_unchecked(&qdev->irq_received_display, 0);
+	atomic_set_unchecked(&qdev->irq_received_cursor, 0);
+	atomic_set_unchecked(&qdev->irq_received_io_cmd, 0);
 	qdev->irq_received_error = 0;
 	ret = drm_irq_install(qdev->ddev);
 	qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
diff -ruNp linux-3.13.11/drivers/gpu/drm/qxl/qxl_ttm.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/qxl/qxl_ttm.c
--- linux-3.13.11/drivers/gpu/drm/qxl/qxl_ttm.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/qxl/qxl_ttm.c	2014-07-09
12:00:15.000000000 +0200
@@ -103,7 +103,7 @@ static void qxl_ttm_global_fini(struct q
 	}
 }
 
-static struct vm_operations_struct qxl_ttm_vm_ops;
+static vm_operations_struct_no_const qxl_ttm_vm_ops __read_only;
 static const struct vm_operations_struct *ttm_vm_ops;
 
 static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
@@ -147,8 +147,10 @@ int qxl_mmap(struct file *filp, struct v
 		return r;
 	if (unlikely(ttm_vm_ops == NULL)) {
 		ttm_vm_ops = vma->vm_ops;
+		pax_open_kernel();
 		qxl_ttm_vm_ops = *ttm_vm_ops;
 		qxl_ttm_vm_ops.fault = &qxl_ttm_fault;
+		pax_close_kernel();
 	}
 	vma->vm_ops = &qxl_ttm_vm_ops;
 	return 0;
@@ -560,25 +562,23 @@ static int qxl_mm_dump_table(struct seq_
 static int qxl_ttm_debugfs_init(struct qxl_device *qdev)
 {
 #if defined(CONFIG_DEBUG_FS)
-	static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
-	static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32];
-	unsigned i;
-
-	for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) {
-		if (i == 0)
-			sprintf(qxl_mem_types_names[i], "qxl_mem_mm");
-		else
-			sprintf(qxl_mem_types_names[i], "qxl_surf_mm");
-		qxl_mem_types_list[i].name = qxl_mem_types_names[i];
-		qxl_mem_types_list[i].show = &qxl_mm_dump_table;
-		qxl_mem_types_list[i].driver_features = 0;
-		if (i == 0)
-			qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
-		else
-			qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
+	static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES] = {
+		{
+			.name = "qxl_mem_mm",
+			.show = &qxl_mm_dump_table,
+		},
+		{
+			.name = "qxl_surf_mm",
+			.show = &qxl_mm_dump_table,
+		}
+	};
 
-	}
-	return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
+	pax_open_kernel();
+	*(void **)&qxl_mem_types_list[0].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
+	*(void **)&qxl_mem_types_list[1].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
+	pax_close_kernel();
+
+	return qxl_debugfs_add_files(qdev, qxl_mem_types_list, QXL_DEBUGFS_MEM_TYPES);
 #else
 	return 0;
 #endif
diff -ruNp linux-3.13.11/drivers/gpu/drm/r128/r128_cce.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/r128/r128_cce.c
--- linux-3.13.11/drivers/gpu/drm/r128/r128_cce.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/r128/r128_cce.c	2014-07-09
12:00:15.000000000 +0200
@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_d
 
 	/* GH: Simple idle check.
 	 */
-	atomic_set(&dev_priv->idle_count, 0);
+	atomic_set_unchecked(&dev_priv->idle_count, 0);
 
 	/* We don't support anything other than bus-mastering ring mode,
 	 * but the ring can be in either AGP or PCI space for the ring
diff -ruNp linux-3.13.11/drivers/gpu/drm/r128/r128_drv.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/r128/r128_drv.h
--- linux-3.13.11/drivers/gpu/drm/r128/r128_drv.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/r128/r128_drv.h	2014-07-09
12:00:15.000000000 +0200
@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
 	int is_pci;
 	unsigned long cce_buffers_offset;
 
-	atomic_t idle_count;
+	atomic_unchecked_t idle_count;
 
 	int page_flipping;
 	int current_page;
 	u32 crtc_offset;
 	u32 crtc_offset_cntl;
 
-	atomic_t vbl_received;
+	atomic_unchecked_t vbl_received;
 
 	u32 color_fmt;
 	unsigned int front_offset;
diff -ruNp linux-3.13.11/drivers/gpu/drm/r128/r128_ioc32.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/r128/r128_ioc32.c
--- linux-3.13.11/drivers/gpu/drm/r128/r128_ioc32.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/r128/r128_ioc32.c	2014-07-09
12:00:15.000000000 +0200
@@ -177,7 +177,7 @@ static int compat_r128_getparam(struct f
 	return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
 }
 
-drm_ioctl_compat_t *r128_compat_ioctls[] = {
+drm_ioctl_compat_t r128_compat_ioctls[] = {
 	[DRM_R128_INIT] = compat_r128_init,
 	[DRM_R128_DEPTH] = compat_r128_depth,
 	[DRM_R128_STIPPLE] = compat_r128_stipple,
@@ -196,18 +196,15 @@ drm_ioctl_compat_t *r128_compat_ioctls[]
 long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
 	unsigned int nr = DRM_IOCTL_NR(cmd);
-	drm_ioctl_compat_t *fn = NULL;
 	int ret;
 
 	if (nr < DRM_COMMAND_BASE)
 		return drm_compat_ioctl(filp, cmd, arg);
 
-	if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls))
-		fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
-
-	if (fn != NULL)
+	if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls)) {
+		drm_ioctl_compat_t fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
 		ret = (*fn) (filp, cmd, arg);
-	else
+	} else
 		ret = drm_ioctl(filp, cmd, arg);
 
 	return ret;
diff -ruNp linux-3.13.11/drivers/gpu/drm/r128/r128_irq.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/r128/r128_irq.c
--- linux-3.13.11/drivers/gpu/drm/r128/r128_irq.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/r128/r128_irq.c	2014-07-09
12:00:15.000000000 +0200
@@ -41,7 +41,7 @@ u32 r128_get_vblank_counter(struct drm_d
 	if (crtc != 0)
 		return 0;
 
-	return atomic_read(&dev_priv->vbl_received);
+	return atomic_read_unchecked(&dev_priv->vbl_received);
 }
 
 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
@@ -55,7 +55,7 @@ irqreturn_t r128_driver_irq_handler(DRM_
 	/* VBLANK interrupt */
 	if (status & R128_CRTC_VBLANK_INT) {
 		R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
-		atomic_inc(&dev_priv->vbl_received);
+		atomic_inc_unchecked(&dev_priv->vbl_received);
 		drm_handle_vblank(dev, 0);
 		return IRQ_HANDLED;
 	}
diff -ruNp linux-3.13.11/drivers/gpu/drm/r128/r128_state.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/r128/r128_state.c
--- linux-3.13.11/drivers/gpu/drm/r128/r128_state.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/r128/r128_state.c	2014-07-09
12:00:15.000000000 +0200
@@ -320,10 +320,10 @@ static void r128_clear_box(drm_r128_priv
 
 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
 {
-	if (atomic_read(&dev_priv->idle_count) == 0)
+	if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
 		r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
 	else
-		atomic_set(&dev_priv->idle_count, 0);
+		atomic_set_unchecked(&dev_priv->idle_count, 0);
 }
 
 #endif
diff -ruNp linux-3.13.11/drivers/gpu/drm/radeon/mkregtable.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/radeon/mkregtable.c
--- linux-3.13.11/drivers/gpu/drm/radeon/mkregtable.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/radeon/mkregtable.c	2014-07-09
12:00:15.000000000 +0200
@@ -624,14 +624,14 @@ static int parser_auth(struct table *t,
 	regex_t mask_rex;
 	regmatch_t match[4];
 	char buf[1024];
-	size_t end;
+	long end;
 	int len;
 	int done = 0;
 	int r;
 	unsigned o;
 	struct offset *offset;
 	char last_reg_s[10];
-	int last_reg;
+	unsigned long last_reg;
 
 	if (regcomp
 	    (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
diff -ruNp linux-3.13.11/drivers/gpu/drm/radeon/radeon_device.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/radeon/radeon_device.c
--- linux-3.13.11/drivers/gpu/drm/radeon/radeon_device.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/radeon/radeon_device.c	2014-07-09
12:00:15.000000000 +0200
@@ -1120,7 +1120,7 @@ static bool radeon_switcheroo_can_switch
 	bool can_switch;
 
 	spin_lock(&dev->count_lock);
-	can_switch = (dev->open_count == 0);
+	can_switch = (local_read(&dev->open_count) == 0);
 	spin_unlock(&dev->count_lock);
 	return can_switch;
 }
diff -ruNp linux-3.13.11/drivers/gpu/drm/radeon/radeon_drv.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/radeon/radeon_drv.h
--- linux-3.13.11/drivers/gpu/drm/radeon/radeon_drv.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/radeon/radeon_drv.h	2014-07-09
12:00:15.000000000 +0200
@@ -262,7 +262,7 @@ typedef struct drm_radeon_private {
 
 	/* SW interrupt */
 	wait_queue_head_t swi_queue;
-	atomic_t swi_emitted;
+	atomic_unchecked_t swi_emitted;
 	int vblank_crtc;
 	uint32_t irq_enable_reg;
 	uint32_t r500_disp_irq_reg;
diff -ruNp linux-3.13.11/drivers/gpu/drm/radeon/radeon_ioc32.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/radeon/radeon_ioc32.c
--- linux-3.13.11/drivers/gpu/drm/radeon/radeon_ioc32.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/radeon/radeon_ioc32.c	2014-07-09
12:00:15.000000000 +0200
@@ -358,7 +358,7 @@ static int compat_radeon_cp_setparam(str
 	request = compat_alloc_user_space(sizeof(*request));
 	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
 	    || __put_user(req32.param, &request->param)
-	    || __put_user((void __user *)(unsigned long)req32.value,
+	    || __put_user((unsigned long)req32.value,
 			  &request->value))
 		return -EFAULT;
 
@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(str
 #define compat_radeon_cp_setparam NULL
 #endif /* X86_64 || IA64 */
 
-static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
+static drm_ioctl_compat_t radeon_compat_ioctls[] = {
 	[DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
 	[DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
 	[DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
@@ -393,18 +393,15 @@ static drm_ioctl_compat_t *radeon_compat
 long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
 	unsigned int nr = DRM_IOCTL_NR(cmd);
-	drm_ioctl_compat_t *fn = NULL;
 	int ret;
 
 	if (nr < DRM_COMMAND_BASE)
 		return drm_compat_ioctl(filp, cmd, arg);
 
-	if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls))
-		fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
-
-	if (fn != NULL)
+	if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls)) {
+		drm_ioctl_compat_t fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
 		ret = (*fn) (filp, cmd, arg);
-	else
+	} else
 		ret = drm_ioctl(filp, cmd, arg);
 
 	return ret;
diff -ruNp linux-3.13.11/drivers/gpu/drm/radeon/radeon_irq.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/radeon/radeon_irq.c
--- linux-3.13.11/drivers/gpu/drm/radeon/radeon_irq.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/radeon/radeon_irq.c	2014-07-09
12:00:15.000000000 +0200
@@ -226,8 +226,8 @@ static int radeon_emit_irq(struct drm_de
 	unsigned int ret;
 	RING_LOCALS;
 
-	atomic_inc(&dev_priv->swi_emitted);
-	ret = atomic_read(&dev_priv->swi_emitted);
+	atomic_inc_unchecked(&dev_priv->swi_emitted);
+	ret = atomic_read_unchecked(&dev_priv->swi_emitted);
 
 	BEGIN_RING(4);
 	OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
@@ -353,7 +353,7 @@ int radeon_driver_irq_postinstall(struct
 	drm_radeon_private_t *dev_priv =
 	    (drm_radeon_private_t *) dev->dev_private;
 
-	atomic_set(&dev_priv->swi_emitted, 0);
+	atomic_set_unchecked(&dev_priv->swi_emitted, 0);
 	DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
 
 	dev->max_vblank_count = 0x001fffff;
diff -ruNp linux-3.13.11/drivers/gpu/drm/radeon/radeon_state.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/radeon/radeon_state.c
--- linux-3.13.11/drivers/gpu/drm/radeon/radeon_state.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/radeon/radeon_state.c	2014-07-09
12:00:15.000000000 +0200
@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_de
 	if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
 		sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
 
-	if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
+	if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes,
clear->depth_boxes,
 			       sarea_priv->nbox * sizeof(depth_boxes[0])))
 		return -EFAULT;
 
@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm
 {
 	drm_radeon_private_t *dev_priv = dev->dev_private;
 	drm_radeon_getparam_t *param = data;
-	int value;
+	int value = 0;
 
 	DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
 
diff -ruNp linux-3.13.11/drivers/gpu/drm/radeon/radeon_ttm.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/radeon/radeon_ttm.c
--- linux-3.13.11/drivers/gpu/drm/radeon/radeon_ttm.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/radeon/radeon_ttm.c	2014-07-09
12:00:15.000000000 +0200
@@ -787,7 +787,7 @@ void radeon_ttm_set_active_vram_size(str
 	man->size = size >> PAGE_SHIFT;
 }
 
-static struct vm_operations_struct radeon_ttm_vm_ops;
+static vm_operations_struct_no_const radeon_ttm_vm_ops __read_only;
 static const struct vm_operations_struct *ttm_vm_ops = NULL;
 
 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
@@ -828,8 +828,10 @@ int radeon_mmap(struct file *filp, struc
 	}
 	if (unlikely(ttm_vm_ops == NULL)) {
 		ttm_vm_ops = vma->vm_ops;
+		pax_open_kernel();
 		radeon_ttm_vm_ops = *ttm_vm_ops;
 		radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
+		pax_close_kernel();
 	}
 	vma->vm_ops = &radeon_ttm_vm_ops;
 	return 0;
@@ -858,38 +860,33 @@ static int radeon_mm_dump_table(struct s
 static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
 {
 #if defined(CONFIG_DEBUG_FS)
-	static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+2];
-	static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+2][32];
-	unsigned i;
-
-	for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
-		if (i == 0)
-			sprintf(radeon_mem_types_names[i], "radeon_vram_mm");
-		else
-			sprintf(radeon_mem_types_names[i], "radeon_gtt_mm");
-		radeon_mem_types_list[i].name = radeon_mem_types_names[i];
-		radeon_mem_types_list[i].show = &radeon_mm_dump_table;
-		radeon_mem_types_list[i].driver_features = 0;
-		if (i == 0)
-			radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
-		else
-			radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
-
-	}
-	/* Add ttm page pool to debugfs */
-	sprintf(radeon_mem_types_names[i], "ttm_page_pool");
-	radeon_mem_types_list[i].name = radeon_mem_types_names[i];
-	radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
-	radeon_mem_types_list[i].driver_features = 0;
-	radeon_mem_types_list[i++].data = NULL;
+	static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+2] = {
+		{
+			.name = "radeon_vram_mm",
+			.show = &radeon_mm_dump_table,
+		},
+		{
+			.name = "radeon_gtt_mm",
+			.show = &radeon_mm_dump_table,
+		},
+		{
+			.name = "ttm_page_pool",
+			.show = &ttm_page_alloc_debugfs,
+		},
+		{
+			.name = "ttm_dma_page_pool",
+			.show = &ttm_dma_page_alloc_debugfs,
+		},
+	};
+	unsigned i = RADEON_DEBUGFS_MEM_TYPES + 1;
+
+	pax_open_kernel();
+	*(void **)&radeon_mem_types_list[0].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
+	*(void **)&radeon_mem_types_list[1].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
+	pax_close_kernel();
 #ifdef CONFIG_SWIOTLB
-	if (swiotlb_nr_tbl()) {
-		sprintf(radeon_mem_types_names[i], "ttm_dma_page_pool");
-		radeon_mem_types_list[i].name = radeon_mem_types_names[i];
-		radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs;
-		radeon_mem_types_list[i].driver_features = 0;
-		radeon_mem_types_list[i++].data = NULL;
-	}
+	if (swiotlb_nr_tbl())
+		i++;
 #endif
 	return radeon_debugfs_add_files(rdev, radeon_mem_types_list, i);
 
diff -ruNp linux-3.13.11/drivers/gpu/drm/tegra/dc.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/tegra/dc.c
--- linux-3.13.11/drivers/gpu/drm/tegra/dc.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/tegra/dc.c	2014-07-09
12:00:15.000000000 +0200
@@ -1064,7 +1064,7 @@ static int tegra_dc_debugfs_init(struct
 	}
 
 	for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
-		dc->debugfs_files[i].data = dc;
+		*(void **)&dc->debugfs_files[i].data = dc;
 
 	err = drm_debugfs_create_files(dc->debugfs_files,
 				       ARRAY_SIZE(debugfs_files),
diff -ruNp linux-3.13.11/drivers/gpu/drm/tegra/hdmi.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/tegra/hdmi.c
--- linux-3.13.11/drivers/gpu/drm/tegra/hdmi.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/tegra/hdmi.c	2014-07-09
12:00:15.000000000 +0200
@@ -57,7 +57,7 @@ struct tegra_hdmi {
 	bool stereo;
 	bool dvi;
 
-	struct drm_info_list *debugfs_files;
+	drm_info_list_no_const *debugfs_files;
 	struct drm_minor *minor;
 	struct dentry *debugfs;
 };
diff -ruNp linux-3.13.11/drivers/gpu/drm/ttm/ttm_bo_manager.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/ttm/ttm_bo_manager.c
--- linux-3.13.11/drivers/gpu/drm/ttm/ttm_bo_manager.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/ttm/ttm_bo_manager.c	2014-07-09
12:00:15.000000000 +0200
@@ -141,10 +141,10 @@ static void ttm_bo_man_debug(struct ttm_
 }
 
 const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
-	ttm_bo_man_init,
-	ttm_bo_man_takedown,
-	ttm_bo_man_get_node,
-	ttm_bo_man_put_node,
-	ttm_bo_man_debug
+	.init = ttm_bo_man_init,
+	.takedown = ttm_bo_man_takedown,
+	.get_node = ttm_bo_man_get_node,
+	.put_node = ttm_bo_man_put_node,
+	.debug = ttm_bo_man_debug
 };
 EXPORT_SYMBOL(ttm_bo_manager_func);
diff -ruNp linux-3.13.11/drivers/gpu/drm/ttm/ttm_memory.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/ttm/ttm_memory.c
--- linux-3.13.11/drivers/gpu/drm/ttm/ttm_memory.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/ttm/ttm_memory.c	2014-07-09
12:00:15.000000000 +0200
@@ -264,7 +264,7 @@ static int ttm_mem_init_kernel_zone(stru
 	zone->glob = glob;
 	glob->zone_kernel = zone;
 	ret = kobject_init_and_add(
-		&zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
+		&zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
 	if (unlikely(ret != 0)) {
 		kobject_put(&zone->kobj);
 		return ret;
@@ -347,7 +347,7 @@ static int ttm_mem_init_dma32_zone(struc
 	zone->glob = glob;
 	glob->zone_dma32 = zone;
 	ret = kobject_init_and_add(
-		&zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
+		&zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
 	if (unlikely(ret != 0)) {
 		kobject_put(&zone->kobj);
 		return ret;
diff -ruNp linux-3.13.11/drivers/gpu/drm/ttm/ttm_page_alloc.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/ttm/ttm_page_alloc.c
--- linux-3.13.11/drivers/gpu/drm/ttm/ttm_page_alloc.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/ttm/ttm_page_alloc.c	2014-07-09
12:00:15.000000000 +0200
@@ -391,9 +391,9 @@ out:
 static unsigned long
 ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
 {
-	static atomic_t start_pool = ATOMIC_INIT(0);
+	static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
 	unsigned i;
-	unsigned pool_offset = atomic_add_return(1, &start_pool);
+	unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
 	struct ttm_page_pool *pool;
 	int shrink_pages = sc->nr_to_scan;
 	unsigned long freed = 0;
diff -ruNp linux-3.13.11/drivers/gpu/drm/udl/udl_fb.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/udl/udl_fb.c
--- linux-3.13.11/drivers/gpu/drm/udl/udl_fb.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/udl/udl_fb.c	2014-07-09
12:00:15.000000000 +0200
@@ -367,7 +367,6 @@ static int udl_fb_release(struct fb_info
 		fb_deferred_io_cleanup(info);
 		kfree(info->fbdefio);
 		info->fbdefio = NULL;
-		info->fbops->fb_mmap = udl_fb_mmap;
 	}
 
 	pr_warn("released /dev/fb%d user=%d count=%d\n",
diff -ruNp linux-3.13.11/drivers/gpu/drm/via/via_drv.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/via/via_drv.h
--- linux-3.13.11/drivers/gpu/drm/via/via_drv.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/via/via_drv.h	2014-07-09
12:00:15.000000000 +0200
@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
 typedef uint32_t maskarray_t[5];
 
 typedef struct drm_via_irq {
-	atomic_t irq_received;
+	atomic_unchecked_t irq_received;
 	uint32_t pending_mask;
 	uint32_t enable_mask;
 	wait_queue_head_t irq_queue;
@@ -75,7 +75,7 @@ typedef struct drm_via_private {
 	struct timeval last_vblank;
 	int last_vblank_valid;
 	unsigned usec_per_vblank;
-	atomic_t vbl_received;
+	atomic_unchecked_t vbl_received;
 	drm_via_state_t hc_state;
 	char pci_buf[VIA_PCI_BUF_SIZE];
 	const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
diff -ruNp linux-3.13.11/drivers/gpu/drm/via/via_irq.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/via/via_irq.c
--- linux-3.13.11/drivers/gpu/drm/via/via_irq.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/via/via_irq.c	2014-07-09
12:00:15.000000000 +0200
@@ -101,7 +101,7 @@ u32 via_get_vblank_counter(struct drm_de
 	if (crtc != 0)
 		return 0;
 
-	return atomic_read(&dev_priv->vbl_received);
+	return atomic_read_unchecked(&dev_priv->vbl_received);
 }
 
 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
@@ -116,8 +116,8 @@ irqreturn_t via_driver_irq_handler(DRM_I
 
 	status = VIA_READ(VIA_REG_INTERRUPT);
 	if (status & VIA_IRQ_VBLANK_PENDING) {
-		atomic_inc(&dev_priv->vbl_received);
-		if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
+		atomic_inc_unchecked(&dev_priv->vbl_received);
+		if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
 			do_gettimeofday(&cur_vblank);
 			if (dev_priv->last_vblank_valid) {
 				dev_priv->usec_per_vblank =
@@ -127,7 +127,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
 			dev_priv->last_vblank = cur_vblank;
 			dev_priv->last_vblank_valid = 1;
 		}
-		if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
+		if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
 			DRM_DEBUG("US per vblank is: %u\n",
 				  dev_priv->usec_per_vblank);
 		}
@@ -137,7 +137,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
 
 	for (i = 0; i < dev_priv->num_irqs; ++i) {
 		if (status & cur_irq->pending_mask) {
-			atomic_inc(&cur_irq->irq_received);
+			atomic_inc_unchecked(&cur_irq->irq_received);
 			DRM_WAKEUP(&cur_irq->irq_queue);
 			handled = 1;
 			if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
@@ -242,11 +242,11 @@ via_driver_irq_wait(struct drm_device *d
 		DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
 			    ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
 			     masks[irq][4]));
-		cur_irq_sequence = atomic_read(&cur_irq->irq_received);
+		cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
 	} else {
 		DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
 			    (((cur_irq_sequence =
-			       atomic_read(&cur_irq->irq_received)) -
+			       atomic_read_unchecked(&cur_irq->irq_received)) -
 			      *sequence) <= (1 << 23)));
 	}
 	*sequence = cur_irq_sequence;
@@ -284,7 +284,7 @@ void via_driver_irq_preinstall(struct dr
 		}
 
 		for (i = 0; i < dev_priv->num_irqs; ++i) {
-			atomic_set(&cur_irq->irq_received, 0);
+			atomic_set_unchecked(&cur_irq->irq_received, 0);
 			cur_irq->enable_mask = dev_priv->irq_masks[i][0];
 			cur_irq->pending_mask = dev_priv->irq_masks[i][1];
 			DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
@@ -366,7 +366,7 @@ int via_wait_irq(struct drm_device *dev,
 	switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
 	case VIA_IRQ_RELATIVE:
 		irqwait->request.sequence +=
-			atomic_read(&cur_irq->irq_received);
+			atomic_read_unchecked(&cur_irq->irq_received);
 		irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
 	case VIA_IRQ_ABSOLUTE:
 		break;
diff -ruNp linux-3.13.11/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
--- linux-3.13.11/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h	2014-07-09
12:00:15.000000000 +0200
@@ -341,7 +341,7 @@ struct vmw_private {
 	 * Fencing and IRQs.
 	 */
 
-	atomic_t marker_seq;
+	atomic_unchecked_t marker_seq;
 	wait_queue_head_t fence_queue;
 	wait_queue_head_t fifo_queue;
 	int fence_queue_waiters; /* Protected by hw_mutex */
diff -ruNp linux-3.13.11/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
--- linux-3.13.11/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c	2014-07-09
12:00:15.000000000 +0200
@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *de
 		 (unsigned int) min,
 		 (unsigned int) fifo->capabilities);
 
-	atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
+	atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
 	iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
 	vmw_marker_queue_init(&fifo->marker_queue);
 	return vmw_fifo_send_fence(dev_priv, &dummy);
@@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_privat
 				if (reserveable)
 					iowrite32(bytes, fifo_mem +
 						  SVGA_FIFO_RESERVED);
-				return fifo_mem + (next_cmd >> 2);
+				return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
 			} else {
 				need_bounce = true;
 			}
@@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_priva
 
 	fm = vmw_fifo_reserve(dev_priv, bytes);
 	if (unlikely(fm == NULL)) {
-		*seqno = atomic_read(&dev_priv->marker_seq);
+		*seqno = atomic_read_unchecked(&dev_priv->marker_seq);
 		ret = -ENOMEM;
 		(void)vmw_fallback_wait(dev_priv, false, true, *seqno,
 					false, 3*HZ);
@@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_priva
 	}
 
 	do {
-		*seqno = atomic_add_return(1, &dev_priv->marker_seq);
+		*seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
 	} while (*seqno == 0);
 
 	if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
diff -ruNp linux-3.13.11/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
--- linux-3.13.11/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c	2014-07-09
12:00:15.000000000 +0200
@@ -153,9 +153,9 @@ static void vmw_gmrid_man_debug(struct t
 }
 
 const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
-	vmw_gmrid_man_init,
-	vmw_gmrid_man_takedown,
-	vmw_gmrid_man_get_node,
-	vmw_gmrid_man_put_node,
-	vmw_gmrid_man_debug
+	.init = vmw_gmrid_man_init,
+	.takedown = vmw_gmrid_man_takedown,
+	.get_node = vmw_gmrid_man_get_node,
+	.put_node = vmw_gmrid_man_put_node,
+	.debug = vmw_gmrid_man_debug
 };
diff -ruNp linux-3.13.11/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
--- linux-3.13.11/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c	2014-07-09
12:00:15.000000000 +0200
@@ -141,7 +141,7 @@ int vmw_present_ioctl(struct drm_device
 	int ret;
 
 	num_clips = arg->num_clips;
-	clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
+	clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
 
 	if (unlikely(num_clips == 0))
 		return 0;
@@ -225,7 +225,7 @@ int vmw_present_readback_ioctl(struct dr
 	int ret;
 
 	num_clips = arg->num_clips;
-	clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
+	clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
 
 	if (unlikely(num_clips == 0))
 		return 0;
diff -ruNp linux-3.13.11/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
--- linux-3.13.11/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c	2014-07-09
12:00:15.000000000 +0200
@@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private
 	 * emitted. Then the fence is stale and signaled.
 	 */
 
-	ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
+	ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
 	       > VMW_FENCE_WRAP);
 
 	return ret;
@@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private
 
 	if (fifo_idle)
 		down_read(&fifo_state->rwsem);
-	signal_seq = atomic_read(&dev_priv->marker_seq);
+	signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
 	ret = 0;
 
 	for (;;) {
diff -ruNp linux-3.13.11/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
--- linux-3.13.11/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c	2014-07-09
12:00:15.000000000 +0200
@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev
 	while (!vmw_lag_lt(queue, us)) {
 		spin_lock(&queue->lock);
 		if (list_empty(&queue->head))
-			seqno = atomic_read(&dev_priv->marker_seq);
+			seqno = atomic_read_unchecked(&dev_priv->marker_seq);
 		else {
 			marker = list_first_entry(&queue->head,
 						 struct vmw_marker, head);
diff -ruNp linux-3.13.11/drivers/gpu/vga/vga_switcheroo.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/vga/vga_switcheroo.c
--- linux-3.13.11/drivers/gpu/vga/vga_switcheroo.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/gpu/vga/vga_switcheroo.c	2014-07-09
12:00:15.000000000 +0200
@@ -643,7 +643,7 @@ static int vga_switcheroo_runtime_resume
 
 /* this version is for the case where the power switch is separate
    to the device being powered down. */
-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain)
+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain)
 {
 	/* copy over all the bus versions */
 	if (dev->bus && dev->bus->pm) {
@@ -688,7 +688,7 @@ static int vga_switcheroo_runtime_resume
 	return ret;
 }
 
-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain
*domain)
+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const
*domain)
 {
 	/* copy over all the bus versions */
 	if (dev->bus && dev->bus->pm) {
diff -ruNp linux-3.13.11/drivers/hid/hid-core.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/hid/hid-core.c
--- linux-3.13.11/drivers/hid/hid-core.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/hid/hid-core.c	2014-07-09
12:00:15.000000000 +0200
@@ -2416,7 +2416,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
 
 int hid_add_device(struct hid_device *hdev)
 {
-	static atomic_t id = ATOMIC_INIT(0);
+	static atomic_unchecked_t id = ATOMIC_INIT(0);
 	int ret;
 
 	if (WARN_ON(hdev->status & HID_STAT_ADDED))
@@ -2450,7 +2450,7 @@ int hid_add_device(struct hid_device *hd
 	/* XXX hack, any other cleaner solution after the driver core
 	 * is converted to allow more than 20 bytes as the device name? */
 	dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
-		     hdev->vendor, hdev->product, atomic_inc_return(&id));
+		     hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
 
 	hid_debug_register(hdev, dev_name(&hdev->dev));
 	ret = device_add(&hdev->dev);
diff -ruNp linux-3.13.11/drivers/hid/hid-wiimote-debug.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/hid/hid-wiimote-debug.c
--- linux-3.13.11/drivers/hid/hid-wiimote-debug.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/hid/hid-wiimote-debug.c	2014-07-09
12:00:15.000000000 +0200
@@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(stru
 	else if (size == 0)
 		return -EIO;
 
-	if (copy_to_user(u, buf, size))
+	if (size > sizeof(buf) || copy_to_user(u, buf, size))
 		return -EFAULT;
 
 	*off += size;
diff -ruNp linux-3.13.11/drivers/hid/uhid.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/hid/uhid.c
--- linux-3.13.11/drivers/hid/uhid.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/hid/uhid.c	2014-07-09 12:00:15.000000000
+0200
@@ -47,7 +47,7 @@ struct uhid_device {
 	struct mutex report_lock;
 	wait_queue_head_t report_wait;
 	atomic_t report_done;
-	atomic_t report_id;
+	atomic_unchecked_t report_id;
 	struct uhid_event report_buf;
 };
 
@@ -163,7 +163,7 @@ static int uhid_hid_get_raw(struct hid_d
 
 	spin_lock_irqsave(&uhid->qlock, flags);
 	ev->type = UHID_FEATURE;
-	ev->u.feature.id = atomic_inc_return(&uhid->report_id);
+	ev->u.feature.id = atomic_inc_return_unchecked(&uhid->report_id);
 	ev->u.feature.rnum = rnum;
 	ev->u.feature.rtype = report_type;
 
@@ -446,7 +446,7 @@ static int uhid_dev_feature_answer(struc
 	spin_lock_irqsave(&uhid->qlock, flags);
 
 	/* id for old report; drop it silently */
-	if (atomic_read(&uhid->report_id) != ev->u.feature_answer.id)
+	if (atomic_read_unchecked(&uhid->report_id) != ev->u.feature_answer.id)
 		goto unlock;
 	if (atomic_read(&uhid->report_done))
 		goto unlock;
diff -ruNp linux-3.13.11/drivers/hv/channel.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/hv/channel.c
--- linux-3.13.11/drivers/hv/channel.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/hv/channel.c	2014-07-09 12:00:15.000000000
+0200
@@ -362,8 +362,8 @@ int vmbus_establish_gpadl(struct vmbus_c
 	int ret = 0;
 	int t;
 
-	next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
-	atomic_inc(&vmbus_connection.next_gpadl_handle);
+	next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
+	atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
 
 	ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
 	if (ret)
diff -ruNp linux-3.13.11/drivers/hv/hv.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/hv/hv.c
--- linux-3.13.11/drivers/hv/hv.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/hv/hv.c	2014-07-09 12:00:15.000000000
+0200
@@ -112,7 +112,7 @@ static u64 do_hypercall(u64 control, voi
 	u64 output_address = (output) ? virt_to_phys(output) : 0;
 	u32 output_address_hi = output_address >> 32;
 	u32 output_address_lo = output_address & 0xFFFFFFFF;
-	void *hypercall_page = hv_context.hypercall_page;
+	void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
 
 	__asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
 			      "=a"(hv_status_lo) : "d" (control_hi),
@@ -154,7 +154,7 @@ int hv_init(void)
 	/* See if the hypercall page is already set */
 	rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
 
-	virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_EXEC);
+	virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_RX);
 
 	if (!virtaddr)
 		goto cleanup;
diff -ruNp linux-3.13.11/drivers/hv/hv_balloon.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/hv/hv_balloon.c
--- linux-3.13.11/drivers/hv/hv_balloon.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/hv/hv_balloon.c	2014-07-09
12:00:15.000000000 +0200
@@ -464,7 +464,7 @@ MODULE_PARM_DESC(hot_add, "If set attemp
 
 module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
 MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
-static atomic_t trans_id = ATOMIC_INIT(0);
+static atomic_unchecked_t trans_id = ATOMIC_INIT(0);
 
 static int dm_ring_size = (5 * PAGE_SIZE);
 
@@ -886,7 +886,7 @@ static void hot_add_req(struct work_stru
 		pr_info("Memory hot add failed\n");
 
 	dm->state = DM_INITIALIZED;
-	resp.hdr.trans_id = atomic_inc_return(&trans_id);
+	resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
 	vmbus_sendpacket(dm->dev->channel, &resp,
 			sizeof(struct dm_hot_add_response),
 			(unsigned long)NULL,
@@ -960,7 +960,7 @@ static void post_status(struct hv_dynmem
 	memset(&status, 0, sizeof(struct dm_status));
 	status.hdr.type = DM_STATUS_REPORT;
 	status.hdr.size = sizeof(struct dm_status);
-	status.hdr.trans_id = atomic_inc_return(&trans_id);
+	status.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
 
 	/*
 	 * The host expects the guest to report free memory.
@@ -980,7 +980,7 @@ static void post_status(struct hv_dynmem
 	 * send the status. This can happen if we were interrupted
 	 * after we picked our transaction ID.
 	 */
-	if (status.hdr.trans_id != atomic_read(&trans_id))
+	if (status.hdr.trans_id != atomic_read_unchecked(&trans_id))
 		return;
 
 	vmbus_sendpacket(dm->dev->channel, &status,
@@ -1108,7 +1108,7 @@ static void balloon_up(struct work_struc
 		 */
 
 		do {
-			bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
+			bl_resp->hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
 			ret = vmbus_sendpacket(dm_device.dev->channel,
 						bl_resp,
 						bl_resp->hdr.size,
@@ -1152,7 +1152,7 @@ static void balloon_down(struct hv_dynme
 
 	memset(&resp, 0, sizeof(struct dm_unballoon_response));
 	resp.hdr.type = DM_UNBALLOON_RESPONSE;
-	resp.hdr.trans_id = atomic_inc_return(&trans_id);
+	resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
 	resp.hdr.size = sizeof(struct dm_unballoon_response);
 
 	vmbus_sendpacket(dm_device.dev->channel, &resp,
@@ -1215,7 +1215,7 @@ static void version_resp(struct hv_dynme
 	memset(&version_req, 0, sizeof(struct dm_version_request));
 	version_req.hdr.type = DM_VERSION_REQUEST;
 	version_req.hdr.size = sizeof(struct dm_version_request);
-	version_req.hdr.trans_id = atomic_inc_return(&trans_id);
+	version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
 	version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN7;
 	version_req.is_last_attempt = 1;
 
@@ -1385,7 +1385,7 @@ static int balloon_probe(struct hv_devic
 	memset(&version_req, 0, sizeof(struct dm_version_request));
 	version_req.hdr.type = DM_VERSION_REQUEST;
 	version_req.hdr.size = sizeof(struct dm_version_request);
-	version_req.hdr.trans_id = atomic_inc_return(&trans_id);
+	version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
 	version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN8;
 	version_req.is_last_attempt = 0;
 
@@ -1416,7 +1416,7 @@ static int balloon_probe(struct hv_devic
 	memset(&cap_msg, 0, sizeof(struct dm_capabilities));
 	cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
 	cap_msg.hdr.size = sizeof(struct dm_capabilities);
-	cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
+	cap_msg.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
 
 	cap_msg.caps.cap_bits.balloon = 1;
 	cap_msg.caps.cap_bits.hot_add = 1;
diff -ruNp linux-3.13.11/drivers/hv/hyperv_vmbus.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/hv/hyperv_vmbus.h
--- linux-3.13.11/drivers/hv/hyperv_vmbus.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/hv/hyperv_vmbus.h	2014-07-09
12:00:15.000000000 +0200
@@ -602,7 +602,7 @@ enum vmbus_connect_state {
 struct vmbus_connection {
 	enum vmbus_connect_state conn_state;
 
-	atomic_t next_gpadl_handle;
+	atomic_unchecked_t next_gpadl_handle;
 
 	/*
 	 * Represents channel interrupts. Each bit position represents a
diff -ruNp linux-3.13.11/drivers/hv/vmbus_drv.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/hv/vmbus_drv.c
--- linux-3.13.11/drivers/hv/vmbus_drv.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/hv/vmbus_drv.c	2014-07-09
12:00:15.000000000 +0200
@@ -846,10 +846,10 @@ int vmbus_device_register(struct hv_devi
 {
 	int ret = 0;
 
-	static atomic_t device_num = ATOMIC_INIT(0);
+	static atomic_unchecked_t device_num = ATOMIC_INIT(0);
 
 	dev_set_name(&child_device_obj->device, "vmbus_0_%d",
-		     atomic_inc_return(&device_num));
+		     atomic_inc_return_unchecked(&device_num));
 
 	child_device_obj->device.bus = &hv_bus;
 	child_device_obj->device.parent = &hv_acpi_dev->dev;
diff -ruNp linux-3.13.11/drivers/hwmon/acpi_power_meter.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/hwmon/acpi_power_meter.c
--- linux-3.13.11/drivers/hwmon/acpi_power_meter.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/hwmon/acpi_power_meter.c	2014-07-09
12:00:15.000000000 +0200
@@ -117,7 +117,7 @@ struct sensor_template {
 		       struct device_attribute *devattr,
 		       const char *buf, size_t count);
 	int index;
-};
+} __do_const;
 
 /* Averaging interval */
 static int update_avg_interval(struct acpi_power_meter_resource *resource)
@@ -632,7 +632,7 @@ static int register_attrs(struct acpi_po
 			  struct sensor_template *attrs)
 {
 	struct device *dev = &resource->acpi_dev->dev;
-	struct sensor_device_attribute *sensors =
+	sensor_device_attribute_no_const *sensors =
 		&resource->sensors[resource->num_sensors];
 	int res = 0;
 
diff -ruNp linux-3.13.11/drivers/hwmon/applesmc.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/hwmon/applesmc.c
--- linux-3.13.11/drivers/hwmon/applesmc.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/hwmon/applesmc.c	2014-07-09
12:00:15.000000000 +0200
@@ -1106,7 +1106,7 @@ static int applesmc_create_nodes(struct
 {
 	struct applesmc_node_group *grp;
 	struct applesmc_dev_attr *node;
-	struct attribute *attr;
+	attribute_no_const *attr;
 	int ret, i;
 
 	for (grp = groups; grp->format; grp++) {
diff -ruNp linux-3.13.11/drivers/hwmon/asus_atk0110.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/hwmon/asus_atk0110.c
--- linux-3.13.11/drivers/hwmon/asus_atk0110.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/hwmon/asus_atk0110.c	2014-07-09
12:00:15.000000000 +0200
@@ -151,10 +151,10 @@ MODULE_DEVICE_TABLE(acpi, atk_ids);
 struct atk_sensor_data {
 	struct list_head list;
 	struct atk_data *data;
-	struct device_attribute label_attr;
-	struct device_attribute input_attr;
-	struct device_attribute limit1_attr;
-	struct device_attribute limit2_attr;
+	device_attribute_no_const label_attr;
+	device_attribute_no_const input_attr;
+	device_attribute_no_const limit1_attr;
+	device_attribute_no_const limit2_attr;
 	char label_attr_name[ATTR_NAME_SIZE];
 	char input_attr_name[ATTR_NAME_SIZE];
 	char limit1_attr_name[ATTR_NAME_SIZE];
@@ -274,7 +274,7 @@ static ssize_t atk_name_show(struct devi
 static struct device_attribute atk_name_attr =
 		__ATTR(name, 0444, atk_name_show, NULL);
 
-static void atk_init_attribute(struct device_attribute *attr, char *name,
+static void atk_init_attribute(device_attribute_no_const *attr, char *name,
 		sysfs_show_func show)
 {
 	sysfs_attr_init(&attr->attr);
diff -ruNp linux-3.13.11/drivers/hwmon/coretemp.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/hwmon/coretemp.c
--- linux-3.13.11/drivers/hwmon/coretemp.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/hwmon/coretemp.c	2014-07-09
12:00:15.000000000 +0200
@@ -797,7 +797,7 @@ static int coretemp_cpu_callback(struct
 	return NOTIFY_OK;
 }
 
-static struct notifier_block coretemp_cpu_notifier __refdata = {
+static struct notifier_block coretemp_cpu_notifier = {
 	.notifier_call = coretemp_cpu_callback,
 };
 
diff -ruNp linux-3.13.11/drivers/hwmon/ibmaem.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/hwmon/ibmaem.c
--- linux-3.13.11/drivers/hwmon/ibmaem.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/hwmon/ibmaem.c	2014-07-09
12:00:15.000000000 +0200
@@ -926,7 +926,7 @@ static int aem_register_sensors(struct a
 				struct aem_rw_sensor_template *rw)
 {
 	struct device *dev = &data->pdev->dev;
-	struct sensor_device_attribute *sensors = data->sensors;
+	sensor_device_attribute_no_const *sensors = data->sensors;
 	int err;
 
 	/* Set up read-only sensors */
diff -ruNp linux-3.13.11/drivers/hwmon/iio_hwmon.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/hwmon/iio_hwmon.c
--- linux-3.13.11/drivers/hwmon/iio_hwmon.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/hwmon/iio_hwmon.c	2014-07-09
12:00:15.000000000 +0200
@@ -73,7 +73,7 @@ static int iio_hwmon_probe(struct platfo
 {
 	struct device *dev = &pdev->dev;
 	struct iio_hwmon_state *st;
-	struct sensor_device_attribute *a;
+	sensor_device_attribute_no_const *a;
 	int ret, i;
 	int in_i = 1, temp_i = 1, curr_i = 1;
 	enum iio_chan_type type;
diff -ruNp linux-3.13.11/drivers/hwmon/nct6775.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/hwmon/nct6775.c
--- linux-3.13.11/drivers/hwmon/nct6775.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/hwmon/nct6775.c	2014-07-09
12:00:15.000000000 +0200
@@ -944,10 +944,10 @@ static struct attribute_group *
 nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
 			  int repeat)
 {
-	struct attribute_group *group;
+	attribute_group_no_const *group;
 	struct sensor_device_attr_u *su;
-	struct sensor_device_attribute *a;
-	struct sensor_device_attribute_2 *a2;
+	sensor_device_attribute_no_const *a;
+	sensor_device_attribute_2_no_const *a2;
 	struct attribute **attrs;
 	struct sensor_device_template **t;
 	int i, count;
diff -ruNp linux-3.13.11/drivers/hwmon/pmbus/pmbus_core.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/hwmon/pmbus/pmbus_core.c
--- linux-3.13.11/drivers/hwmon/pmbus/pmbus_core.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/hwmon/pmbus/pmbus_core.c	2014-07-09
12:00:15.000000000 +0200
@@ -782,7 +782,7 @@ static int pmbus_add_attribute(struct pm
 	return 0;
 }
 
-static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
+static void pmbus_dev_attr_init(device_attribute_no_const *dev_attr,
 				const char *name,
 				umode_t mode,
 				ssize_t (*show)(struct device *dev,
@@ -799,7 +799,7 @@ static void pmbus_dev_attr_init(struct d
 	dev_attr->store = store;
 }
 
-static void pmbus_attr_init(struct sensor_device_attribute *a,
+static void pmbus_attr_init(sensor_device_attribute_no_const *a,
 			    const char *name,
 			    umode_t mode,
 			    ssize_t (*show)(struct device *dev,
@@ -821,7 +821,7 @@ static int pmbus_add_boolean(struct pmbu
 			     u16 reg, u8 mask)
 {
 	struct pmbus_boolean *boolean;
-	struct sensor_device_attribute *a;
+	sensor_device_attribute_no_const *a;
 
 	boolean = devm_kzalloc(data->dev, sizeof(*boolean), GFP_KERNEL);
 	if (!boolean)
@@ -846,7 +846,7 @@ static struct pmbus_sensor *pmbus_add_se
 					     bool update, bool readonly)
 {
 	struct pmbus_sensor *sensor;
-	struct device_attribute *a;
+	device_attribute_no_const *a;
 
 	sensor = devm_kzalloc(data->dev, sizeof(*sensor), GFP_KERNEL);
 	if (!sensor)
@@ -877,7 +877,7 @@ static int pmbus_add_label(struct pmbus_
 			   const char *lstring, int index)
 {
 	struct pmbus_label *label;
-	struct device_attribute *a;
+	device_attribute_no_const *a;
 
 	label = devm_kzalloc(data->dev, sizeof(*label), GFP_KERNEL);
 	if (!label)
diff -ruNp linux-3.13.11/drivers/hwmon/sht15.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/hwmon/sht15.c
--- linux-3.13.11/drivers/hwmon/sht15.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/hwmon/sht15.c	2014-07-09
12:00:15.000000000 +0200
@@ -169,7 +169,7 @@ struct sht15_data {
 	int				supply_uv;
 	bool				supply_uv_valid;
 	struct work_struct		update_supply_work;
-	atomic_t			interrupt_handled;
+	atomic_unchecked_t		interrupt_handled;
 };
 
 /**
@@ -542,13 +542,13 @@ static int sht15_measurement(struct sht1
 	ret = gpio_direction_input(data->pdata->gpio_data);
 	if (ret)
 		return ret;
-	atomic_set(&data->interrupt_handled, 0);
+	atomic_set_unchecked(&data->interrupt_handled, 0);
 
 	enable_irq(gpio_to_irq(data->pdata->gpio_data));
 	if (gpio_get_value(data->pdata->gpio_data) == 0) {
 		disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
 		/* Only relevant if the interrupt hasn't occurred. */
-		if (!atomic_read(&data->interrupt_handled))
+		if (!atomic_read_unchecked(&data->interrupt_handled))
 			schedule_work(&data->read_work);
 	}
 	ret = wait_event_timeout(data->wait_queue,
@@ -820,7 +820,7 @@ static irqreturn_t sht15_interrupt_fired
 
 	/* First disable the interrupt */
 	disable_irq_nosync(irq);
-	atomic_inc(&data->interrupt_handled);
+	atomic_inc_unchecked(&data->interrupt_handled);
 	/* Then schedule a reading work struct */
 	if (data->state != SHT15_READING_NOTHING)
 		schedule_work(&data->read_work);
@@ -842,11 +842,11 @@ static void sht15_bh_read_data(struct wo
 		 * If not, then start the interrupt again - care here as could
 		 * have gone low in meantime so verify it hasn't!
 		 */
-		atomic_set(&data->interrupt_handled, 0);
+		atomic_set_unchecked(&data->interrupt_handled, 0);
 		enable_irq(gpio_to_irq(data->pdata->gpio_data));
 		/* If still not occurred or another handler was scheduled */
 		if (gpio_get_value(data->pdata->gpio_data)
-		    || atomic_read(&data->interrupt_handled))
+		    || atomic_read_unchecked(&data->interrupt_handled))
 			return;
 	}
 
diff -ruNp linux-3.13.11/drivers/hwmon/via-cputemp.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/hwmon/via-cputemp.c
--- linux-3.13.11/drivers/hwmon/via-cputemp.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/hwmon/via-cputemp.c	2014-07-09
12:00:15.000000000 +0200
@@ -296,7 +296,7 @@ static int via_cputemp_cpu_callback(stru
 	return NOTIFY_OK;
 }
 
-static struct notifier_block via_cputemp_cpu_notifier __refdata = {
+static struct notifier_block via_cputemp_cpu_notifier = {
 	.notifier_call = via_cputemp_cpu_callback,
 };
 
diff -ruNp linux-3.13.11/drivers/i2c/busses/i2c-amd756-s4882.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/i2c/busses/i2c-amd756-s4882.c
--- linux-3.13.11/drivers/i2c/busses/i2c-amd756-s4882.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/i2c/busses/i2c-amd756-s4882.c	2014-07-09
12:00:15.000000000 +0200
@@ -43,7 +43,7 @@
 extern struct i2c_adapter amd756_smbus;
 
 static struct i2c_adapter *s4882_adapter;
-static struct i2c_algorithm *s4882_algo;
+static i2c_algorithm_no_const *s4882_algo;
 
 /* Wrapper access functions for multiplexed SMBus */
 static DEFINE_MUTEX(amd756_lock);
diff -ruNp linux-3.13.11/drivers/i2c/busses/i2c-diolan-u2c.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/i2c/busses/i2c-diolan-u2c.c
--- linux-3.13.11/drivers/i2c/busses/i2c-diolan-u2c.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/i2c/busses/i2c-diolan-u2c.c	2014-07-09
12:00:15.000000000 +0200
@@ -98,7 +98,7 @@ MODULE_PARM_DESC(frequency, "I2C clock f
 /* usb layer */
 
 /* Send command to device, and get response. */
-static int diolan_usb_transfer(struct i2c_diolan_u2c *dev)
+static int __intentional_overflow(-1) diolan_usb_transfer(struct i2c_diolan_u2c *dev)
 {
 	int ret = 0;
 	int actual;
diff -ruNp linux-3.13.11/drivers/i2c/busses/i2c-nforce2-s4985.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/i2c/busses/i2c-nforce2-s4985.c
--- linux-3.13.11/drivers/i2c/busses/i2c-nforce2-s4985.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/i2c/busses/i2c-nforce2-s4985.c	2014-07-09
12:00:15.000000000 +0200
@@ -41,7 +41,7 @@
 extern struct i2c_adapter *nforce2_smbus;
 
 static struct i2c_adapter *s4985_adapter;
-static struct i2c_algorithm *s4985_algo;
+static i2c_algorithm_no_const *s4985_algo;
 
 /* Wrapper access functions for multiplexed SMBus */
 static DEFINE_MUTEX(nforce2_lock);
diff -ruNp linux-3.13.11/drivers/i2c/i2c-dev.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/i2c/i2c-dev.c
--- linux-3.13.11/drivers/i2c/i2c-dev.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/i2c/i2c-dev.c	2014-07-09
12:00:15.000000000 +0200
@@ -277,7 +277,7 @@ static noinline int i2cdev_ioctl_rdrw(st
 			break;
 		}
 
-		data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
+		data_ptrs[i] = (u8 __force_user *)rdwr_pa[i].buf;
 		rdwr_pa[i].buf = memdup_user(data_ptrs[i], rdwr_pa[i].len);
 		if (IS_ERR(rdwr_pa[i].buf)) {
 			res = PTR_ERR(rdwr_pa[i].buf);
diff -ruNp linux-3.13.11/drivers/ide/ide-cd.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/ide/ide-cd.c
--- linux-3.13.11/drivers/ide/ide-cd.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/ide/ide-cd.c	2014-07-09 12:00:15.000000000
+0200
@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_
 		alignment = queue_dma_alignment(q) | q->dma_pad_mask;
 		if ((unsigned long)buf & alignment
 		    || blk_rq_bytes(rq) & q->dma_pad_mask
-		    || object_is_on_stack(buf))
+		    || object_starts_on_stack(buf))
 			drive->dma = 0;
 	}
 }
diff -ruNp linux-3.13.11/drivers/iio/industrialio-core.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/iio/industrialio-core.c
--- linux-3.13.11/drivers/iio/industrialio-core.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/iio/industrialio-core.c	2014-07-09
12:00:15.000000000 +0200
@@ -521,7 +521,7 @@ static ssize_t iio_write_channel_info(st
 }
 
 static
-int __iio_device_attr_init(struct device_attribute *dev_attr,
+int __iio_device_attr_init(device_attribute_no_const *dev_attr,
 			   const char *postfix,
 			   struct iio_chan_spec const *chan,
 			   ssize_t (*readfunc)(struct device *dev,
diff -ruNp linux-3.13.11/drivers/infiniband/Kconfig linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/infiniband/Kconfig
--- linux-3.13.11/drivers/infiniband/Kconfig	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/infiniband/Kconfig	2014-07-09
12:00:15.000000000 +0200
@@ -39,7 +39,7 @@ config INFINIBAND_USER_MEM
 config INFINIBAND_ADDR_TRANS
 	bool
 	depends on INET
-	depends on !(INFINIBAND = y && IPV6 = m)
+	depends on !(INFINIBAND = y && IPV6 = y)
 	default y
 
 source "drivers/infiniband/hw/mthca/Kconfig"
diff -ruNp linux-3.13.11/drivers/infiniband/core/addr.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/infiniband/core/addr.c
--- linux-3.13.11/drivers/infiniband/core/addr.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/infiniband/core/addr.c	2014-07-09
12:00:15.000000000 +0200
@@ -277,7 +277,7 @@ static int addr6_resolve(struct sockaddr
 
 	if (ipv6_addr_any(&fl6.saddr)) {
 		ret = ipv6_dev_get_saddr(&init_net, ip6_dst_idev(dst)->dev,
-					 &fl6.daddr, 0, &fl6.saddr);
+					 &fl6.daddr, 0, &fl6.saddr, NULL);
 		if (ret)
 			goto put;
 
diff -ruNp linux-3.13.11/drivers/infiniband/core/cm.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/infiniband/core/cm.c
--- linux-3.13.11/drivers/infiniband/core/cm.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/infiniband/core/cm.c	2014-07-09
12:00:15.000000000 +0200
@@ -114,7 +114,7 @@ static char const counter_group_names[CM
 
 struct cm_counter_group {
 	struct kobject obj;
-	atomic_long_t counter[CM_ATTR_COUNT];
+	atomic_long_unchecked_t counter[CM_ATTR_COUNT];
 };
 
 struct cm_counter_attribute {
@@ -1392,7 +1392,7 @@ static void cm_dup_req_handler(struct cm
 	struct ib_mad_send_buf *msg = NULL;
 	int ret;
 
-	atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
+	atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
 			counter[CM_REQ_COUNTER]);
 
 	/* Quick state check to discard duplicate REQs. */
@@ -1776,7 +1776,7 @@ static void cm_dup_rep_handler(struct cm
 	if (!cm_id_priv)
 		return;
 
-	atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
+	atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
 			counter[CM_REP_COUNTER]);
 	ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
 	if (ret)
@@ -1943,7 +1943,7 @@ static int cm_rtu_handler(struct cm_work
 	if (cm_id_priv->id.state != IB_CM_REP_SENT &&
 	    cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
 		spin_unlock_irq(&cm_id_priv->lock);
-		atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
+		atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
 				counter[CM_RTU_COUNTER]);
 		goto out;
 	}
@@ -2126,7 +2126,7 @@ static int cm_dreq_handler(struct cm_wor
 	cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
 				   dreq_msg->local_comm_id);
 	if (!cm_id_priv) {
-		atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
+		atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
 				counter[CM_DREQ_COUNTER]);
 		cm_issue_drep(work->port, work->mad_recv_wc);
 		return -EINVAL;
@@ -2151,7 +2151,7 @@ static int cm_dreq_handler(struct cm_wor
 	case IB_CM_MRA_REP_RCVD:
 		break;
 	case IB_CM_TIMEWAIT:
-		atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
+		atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
 				counter[CM_DREQ_COUNTER]);
 		if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
 			goto unlock;
@@ -2165,7 +2165,7 @@ static int cm_dreq_handler(struct cm_wor
 			cm_free_msg(msg);
 		goto deref;
 	case IB_CM_DREQ_RCVD:
-		atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
+		atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
 				counter[CM_DREQ_COUNTER]);
 		goto unlock;
 	default:
@@ -2532,7 +2532,7 @@ static int cm_mra_handler(struct cm_work
 		    ib_modify_mad(cm_id_priv->av.port->mad_agent,
 				  cm_id_priv->msg, timeout)) {
 			if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
-				atomic_long_inc(&work->port->
+				atomic_long_inc_unchecked(&work->port->
 						counter_group[CM_RECV_DUPLICATES].
 						counter[CM_MRA_COUNTER]);
 			goto out;
@@ -2541,7 +2541,7 @@ static int cm_mra_handler(struct cm_work
 		break;
 	case IB_CM_MRA_REQ_RCVD:
 	case IB_CM_MRA_REP_RCVD:
-		atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
+		atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
 				counter[CM_MRA_COUNTER]);
 		/* fall through */
 	default:
@@ -2703,7 +2703,7 @@ static int cm_lap_handler(struct cm_work
 	case IB_CM_LAP_IDLE:
 		break;
 	case IB_CM_MRA_LAP_SENT:
-		atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
+		atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
 				counter[CM_LAP_COUNTER]);
 		if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
 			goto unlock;
@@ -2719,7 +2719,7 @@ static int cm_lap_handler(struct cm_work
 			cm_free_msg(msg);
 		goto deref;
 	case IB_CM_LAP_RCVD:
-		atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
+		atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
 				counter[CM_LAP_COUNTER]);
 		goto unlock;
 	default:
@@ -3003,7 +3003,7 @@ static int cm_sidr_req_handler(struct cm
 	cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
 	if (cur_cm_id_priv) {
 		spin_unlock_irq(&cm.lock);
-		atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
+		atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
 				counter[CM_SIDR_REQ_COUNTER]);
 		goto out; /* Duplicate message. */
 	}
@@ -3215,10 +3215,10 @@ static void cm_send_handler(struct ib_ma
 	if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
 		msg->retries = 1;
 
-	atomic_long_add(1 + msg->retries,
+	atomic_long_add_unchecked(1 + msg->retries,
 			&port->counter_group[CM_XMIT].counter[attr_index]);
 	if (msg->retries)
-		atomic_long_add(msg->retries,
+		atomic_long_add_unchecked(msg->retries,
 				&port->counter_group[CM_XMIT_RETRIES].
 				counter[attr_index]);
 
@@ -3428,7 +3428,7 @@ static void cm_recv_handler(struct ib_ma
 	}
 
 	attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
-	atomic_long_inc(&port->counter_group[CM_RECV].
+	atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
 			counter[attr_id - CM_ATTR_ID_OFFSET]);
 
 	work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
@@ -3633,7 +3633,7 @@ static ssize_t cm_show_counter(struct ko
 	cm_attr = container_of(attr, struct cm_counter_attribute, attr);
 
 	return sprintf(buf, "%ld\n",
-		       atomic_long_read(&group->counter[cm_attr->index]));
+		       atomic_long_read_unchecked(&group->counter[cm_attr->index]));
 }
 
 static const struct sysfs_ops cm_counter_ops = {
diff -ruNp linux-3.13.11/drivers/infiniband/core/fmr_pool.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/infiniband/core/fmr_pool.c
--- linux-3.13.11/drivers/infiniband/core/fmr_pool.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/infiniband/core/fmr_pool.c	2014-07-09
12:00:15.000000000 +0200
@@ -98,8 +98,8 @@ struct ib_fmr_pool {
 
 	struct task_struct       *thread;
 
-	atomic_t                  req_ser;
-	atomic_t                  flush_ser;
+	atomic_unchecked_t        req_ser;
+	atomic_unchecked_t        flush_ser;
 
 	wait_queue_head_t         force_wait;
 };
@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *p
 	struct ib_fmr_pool *pool = pool_ptr;
 
 	do {
-		if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
+		if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser)
< 0) {
 			ib_fmr_batch_release(pool);
 
-			atomic_inc(&pool->flush_ser);
+			atomic_inc_unchecked(&pool->flush_ser);
 			wake_up_interruptible(&pool->force_wait);
 
 			if (pool->flush_function)
@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *p
 		}
 
 		set_current_state(TASK_INTERRUPTIBLE);
-		if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
+		if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser)
>= 0 &&
 		    !kthread_should_stop())
 			schedule();
 		__set_current_state(TASK_RUNNING);
@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(s
 	pool->dirty_watermark = params->dirty_watermark;
 	pool->dirty_len       = 0;
 	spin_lock_init(&pool->pool_lock);
-	atomic_set(&pool->req_ser,   0);
-	atomic_set(&pool->flush_ser, 0);
+	atomic_set_unchecked(&pool->req_ser,   0);
+	atomic_set_unchecked(&pool->flush_ser, 0);
 	init_waitqueue_head(&pool->force_wait);
 
 	pool->thread = kthread_run(ib_fmr_cleanup_thread,
@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool
 	}
 	spin_unlock_irq(&pool->pool_lock);
 
-	serial = atomic_inc_return(&pool->req_ser);
+	serial = atomic_inc_return_unchecked(&pool->req_ser);
 	wake_up_process(pool->thread);
 
 	if (wait_event_interruptible(pool->force_wait,
-				     atomic_read(&pool->flush_ser) - serial >= 0))
+				     atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
 		return -EINTR;
 
 	return 0;
@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr
 		} else {
 			list_add_tail(&fmr->list, &pool->dirty_list);
 			if (++pool->dirty_len >= pool->dirty_watermark) {
-				atomic_inc(&pool->req_ser);
+				atomic_inc_unchecked(&pool->req_ser);
 				wake_up_process(pool->thread);
 			}
 		}
diff -ruNp linux-3.13.11/drivers/infiniband/hw/cxgb4/mem.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/infiniband/hw/cxgb4/mem.c
--- linux-3.13.11/drivers/infiniband/hw/cxgb4/mem.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/infiniband/hw/cxgb4/mem.c	2014-07-09
12:00:15.000000000 +0200
@@ -249,7 +249,7 @@ static int write_tpt_entry(struct c4iw_r
 	int err;
 	struct fw_ri_tpte tpt;
 	u32 stag_idx;
-	static atomic_t key;
+	static atomic_unchecked_t key;
 
 	if (c4iw_fatal_error(rdev))
 		return -EIO;
@@ -266,7 +266,7 @@ static int write_tpt_entry(struct c4iw_r
 		if (rdev->stats.stag.cur > rdev->stats.stag.max)
 			rdev->stats.stag.max = rdev->stats.stag.cur;
 		mutex_unlock(&rdev->stats.lock);
-		*stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
+		*stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
 	}
 	PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
 	     __func__, stag_state, type, pdid, stag_idx);
diff -ruNp linux-3.13.11/drivers/infiniband/hw/ipath/ipath_dma.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/infiniband/hw/ipath/ipath_dma.c
--- linux-3.13.11/drivers/infiniband/hw/ipath/ipath_dma.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/infiniband/hw/ipath/ipath_dma.c	2014-07-09
12:00:15.000000000 +0200
@@ -176,17 +176,17 @@ static void ipath_dma_free_coherent(stru
 }
 
 struct ib_dma_mapping_ops ipath_dma_mapping_ops = {
-	ipath_mapping_error,
-	ipath_dma_map_single,
-	ipath_dma_unmap_single,
-	ipath_dma_map_page,
-	ipath_dma_unmap_page,
-	ipath_map_sg,
-	ipath_unmap_sg,
-	ipath_sg_dma_address,
-	ipath_sg_dma_len,
-	ipath_sync_single_for_cpu,
-	ipath_sync_single_for_device,
-	ipath_dma_alloc_coherent,
-	ipath_dma_free_coherent
+	.mapping_error = ipath_mapping_error,
+	.map_single = ipath_dma_map_single,
+	.unmap_single = ipath_dma_unmap_single,
+	.map_page = ipath_dma_map_page,
+	.unmap_page = ipath_dma_unmap_page,
+	.map_sg = ipath_map_sg,
+	.unmap_sg = ipath_unmap_sg,
+	.dma_address = ipath_sg_dma_address,
+	.dma_len = ipath_sg_dma_len,
+	.sync_single_for_cpu = ipath_sync_single_for_cpu,
+	.sync_single_for_device = ipath_sync_single_for_device,
+	.alloc_coherent = ipath_dma_alloc_coherent,
+	.free_coherent = ipath_dma_free_coherent
 };
diff -ruNp linux-3.13.11/drivers/infiniband/hw/ipath/ipath_rc.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/infiniband/hw/ipath/ipath_rc.c
--- linux-3.13.11/drivers/infiniband/hw/ipath/ipath_rc.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/infiniband/hw/ipath/ipath_rc.c	2014-07-09
12:00:15.000000000 +0200
@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *de
 		struct ib_atomic_eth *ateth;
 		struct ipath_ack_entry *e;
 		u64 vaddr;
-		atomic64_t *maddr;
+		atomic64_unchecked_t *maddr;
 		u64 sdata;
 		u32 rkey;
 		u8 next;
@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *de
 					    IB_ACCESS_REMOTE_ATOMIC)))
 			goto nack_acc_unlck;
 		/* Perform atomic OP and save result. */
-		maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
+		maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
 		sdata = be64_to_cpu(ateth->swap_data);
 		e = &qp->s_ack_queue[qp->r_head_ack_queue];
 		e->atomic_data = (opcode == OP(FETCH_ADD)) ?
-			(u64) atomic64_add_return(sdata, maddr) - sdata :
+			(u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
 			(u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
 				      be64_to_cpu(ateth->compare_data),
 				      sdata);
diff -ruNp linux-3.13.11/drivers/infiniband/hw/ipath/ipath_ruc.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/infiniband/hw/ipath/ipath_ruc.c
--- linux-3.13.11/drivers/infiniband/hw/ipath/ipath_ruc.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/infiniband/hw/ipath/ipath_ruc.c	2014-07-09
12:00:15.000000000 +0200
@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ip
 	unsigned long flags;
 	struct ib_wc wc;
 	u64 sdata;
-	atomic64_t *maddr;
+	atomic64_unchecked_t *maddr;
 	enum ib_wc_status send_status;
 
 	/*
@@ -382,11 +382,11 @@ again:
 					    IB_ACCESS_REMOTE_ATOMIC)))
 			goto acc_err;
 		/* Perform atomic OP and save result. */
-		maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
+		maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
 		sdata = wqe->wr.wr.atomic.compare_add;
 		*(u64 *) sqp->s_sge.sge.vaddr =
 			(wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
-			(u64) atomic64_add_return(sdata, maddr) - sdata :
+			(u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
 			(u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
 				      sdata, wqe->wr.wr.atomic.swap);
 		goto send_comp;
diff -ruNp linux-3.13.11/drivers/infiniband/hw/mlx4/mad.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/infiniband/hw/mlx4/mad.c
--- linux-3.13.11/drivers/infiniband/hw/mlx4/mad.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/infiniband/hw/mlx4/mad.c	2014-07-09
12:00:15.000000000 +0200
@@ -98,7 +98,7 @@ __be64 mlx4_ib_gen_node_guid(void)
 
 __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
 {
-	return cpu_to_be64(atomic_inc_return(&ctx->tid)) |
+	return cpu_to_be64(atomic_inc_return_unchecked(&ctx->tid)) |
 		cpu_to_be64(0xff00000000000000LL);
 }
 
diff -ruNp linux-3.13.11/drivers/infiniband/hw/mlx4/mcg.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/infiniband/hw/mlx4/mcg.c
--- linux-3.13.11/drivers/infiniband/hw/mlx4/mcg.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/infiniband/hw/mlx4/mcg.c	2014-07-09
12:00:15.000000000 +0200
@@ -1040,7 +1040,7 @@ int mlx4_ib_mcg_port_init(struct mlx4_ib
 {
 	char name[20];
 
-	atomic_set(&ctx->tid, 0);
+	atomic_set_unchecked(&ctx->tid, 0);
 	sprintf(name, "mlx4_ib_mcg%d", ctx->port);
 	ctx->mcg_wq = create_singlethread_workqueue(name);
 	if (!ctx->mcg_wq)
diff -ruNp linux-3.13.11/drivers/infiniband/hw/mlx4/mlx4_ib.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/infiniband/hw/mlx4/mlx4_ib.h
--- linux-3.13.11/drivers/infiniband/hw/mlx4/mlx4_ib.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/infiniband/hw/mlx4/mlx4_ib.h	2014-07-09
12:00:15.000000000 +0200
@@ -404,7 +404,7 @@ struct mlx4_ib_demux_ctx {
 	struct list_head	mcg_mgid0_list;
 	struct workqueue_struct	*mcg_wq;
 	struct mlx4_ib_demux_pv_ctx **tun;
-	atomic_t tid;
+	atomic_unchecked_t tid;
 	int    flushing; /* flushing the work queue */
 };
 
diff -ruNp linux-3.13.11/drivers/infiniband/hw/mthca/mthca_cmd.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/infiniband/hw/mthca/mthca_cmd.c
--- linux-3.13.11/drivers/infiniband/hw/mthca/mthca_cmd.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/infiniband/hw/mthca/mthca_cmd.c	2014-07-09
12:00:15.000000000 +0200
@@ -772,7 +772,7 @@ static void mthca_setup_cmd_doorbells(st
 	mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n");
 }
 
-int mthca_QUERY_FW(struct mthca_dev *dev)
+int __intentional_overflow(-1) mthca_QUERY_FW(struct mthca_dev *dev)
 {
 	struct mthca_mailbox *mailbox;
 	u32 *outbox;
@@ -1612,7 +1612,7 @@ int mthca_HW2SW_MPT(struct mthca_dev *de
 			     CMD_TIME_CLASS_B);
 }
 
-int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
+int __intentional_overflow(-1) mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox
*mailbox,
 		    int num_mtt)
 {
 	return mthca_cmd(dev, mailbox->dma, num_mtt, 0, CMD_WRITE_MTT,
@@ -1634,7 +1634,7 @@ int mthca_MAP_EQ(struct mthca_dev *dev,
 			 0, CMD_MAP_EQ, CMD_TIME_CLASS_B);
 }
 
-int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
+int __intentional_overflow(-1) mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox
*mailbox,
 		   int eq_num)
 {
 	return mthca_cmd(dev, mailbox->dma, eq_num, 0, CMD_SW2HW_EQ,
@@ -1857,7 +1857,7 @@ int mthca_CONF_SPECIAL_QP(struct mthca_d
 			 CMD_TIME_CLASS_B);
 }
 
-int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
+int __intentional_overflow(-1) mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey,
int ignore_bkey,
 		  int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
 		  void *in_mad, void *response_mad)
 {
diff -ruNp linux-3.13.11/drivers/infiniband/hw/mthca/mthca_main.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/infiniband/hw/mthca/mthca_main.c
--- linux-3.13.11/drivers/infiniband/hw/mthca/mthca_main.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/infiniband/hw/mthca/mthca_main.c	2014-07-09
12:00:15.000000000 +0200
@@ -692,7 +692,7 @@ err_close:
 	return err;
 }
 
-static int mthca_setup_hca(struct mthca_dev *dev)
+static int __intentional_overflow(-1) mthca_setup_hca(struct mthca_dev *dev)
 {
 	int err;
 
diff -ruNp linux-3.13.11/drivers/infiniband/hw/mthca/mthca_mr.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/infiniband/hw/mthca/mthca_mr.c
--- linux-3.13.11/drivers/infiniband/hw/mthca/mthca_mr.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/infiniband/hw/mthca/mthca_mr.c	2014-07-09
12:00:15.000000000 +0200
@@ -81,7 +81,7 @@ struct mthca_mpt_entry {
  * through the bitmaps)
  */
 
-static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
+static u32 __intentional_overflow(-1) mthca_buddy_alloc(struct mthca_buddy *buddy,
int order)
 {
 	int o;
 	int m;
@@ -426,7 +426,7 @@ static inline u32 adjust_key(struct mthc
 		return key;
 }
 
-int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
+int __intentional_overflow(-1) mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
 		   u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
 {
 	struct mthca_mailbox *mailbox;
@@ -516,7 +516,7 @@ int mthca_mr_alloc_notrans(struct mthca_
 	return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr);
 }
 
-int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
+int __intentional_overflow(-1) mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
 			u64 *buffer_list, int buffer_size_shift,
 			int list_len, u64 iova, u64 total_size,
 			u32 access, struct mthca_mr *mr)
diff -ruNp linux-3.13.11/drivers/infiniband/hw/mthca/mthca_provider.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/infiniband/hw/mthca/mthca_provider.c
--- linux-3.13.11/drivers/infiniband/hw/mthca/mthca_provider.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/infiniband/hw/mthca/mthca_provider.c	2014-07-09
12:00:15.000000000 +0200
@@ -763,7 +763,7 @@ unlock:
 	return 0;
 }
 
-static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
+static int __intentional_overflow(-1) mthca_resize_cq(struct ib_cq *ibcq, int entries,
struct ib_udata *udata)
 {
 	struct mthca_dev *dev = to_mdev(ibcq->device);
 	struct mthca_cq *cq = to_mcq(ibcq);
diff -ruNp linux-3.13.11/drivers/infiniband/hw/nes/nes.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/infiniband/hw/nes/nes.c
--- linux-3.13.11/drivers/infiniband/hw/nes/nes.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/infiniband/hw/nes/nes.c	2014-07-09
12:00:15.000000000 +0200
@@ -98,7 +98,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limi
 LIST_HEAD(nes_adapter_list);
 static LIST_HEAD(nes_dev_list);
 
-atomic_t qps_destroyed;
+atomic_unchecked_t qps_destroyed;
 
 static unsigned int ee_flsh_adapter;
 static unsigned int sysfs_nonidx_addr;
@@ -269,7 +269,7 @@ static void nes_cqp_rem_ref_callback(str
 	struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
 	struct nes_adapter *nesadapter = nesdev->nesadapter;
 
-	atomic_inc(&qps_destroyed);
+	atomic_inc_unchecked(&qps_destroyed);
 
 	/* Free the control structures */
 
diff -ruNp linux-3.13.11/drivers/infiniband/hw/nes/nes.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/infiniband/hw/nes/nes.h
--- linux-3.13.11/drivers/infiniband/hw/nes/nes.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/infiniband/hw/nes/nes.h	2014-07-09
12:00:15.000000000 +0200
@@ -177,17 +177,17 @@ extern unsigned int nes_debug_level;
 extern unsigned int wqm_quanta;
 extern struct list_head nes_adapter_list;
 
-extern atomic_t cm_connects;
-extern atomic_t cm_accepts;
-extern atomic_t cm_disconnects;
-extern atomic_t cm_closes;
-extern atomic_t cm_connecteds;
-extern atomic_t cm_connect_reqs;
-extern atomic_t cm_rejects;
-extern atomic_t mod_qp_timouts;
-extern atomic_t qps_created;
-extern atomic_t qps_destroyed;
-extern atomic_t sw_qps_destroyed;
+extern atomic_unchecked_t cm_connects;
+extern atomic_unchecked_t cm_accepts;
+extern atomic_unchecked_t cm_disconnects;
+extern atomic_unchecked_t cm_closes;
+extern atomic_unchecked_t cm_connecteds;
+extern atomic_unchecked_t cm_connect_reqs;
+extern atomic_unchecked_t cm_rejects;
+extern atomic_unchecked_t mod_qp_timouts;
+extern atomic_unchecked_t qps_created;
+extern atomic_unchecked_t qps_destroyed;
+extern atomic_unchecked_t sw_qps_destroyed;
 extern u32 mh_detected;
 extern u32 mh_pauses_sent;
 extern u32 cm_packets_sent;
@@ -196,16 +196,16 @@ extern u32 cm_packets_created;
 extern u32 cm_packets_received;
 extern u32 cm_packets_dropped;
 extern u32 cm_packets_retrans;
-extern atomic_t cm_listens_created;
-extern atomic_t cm_listens_destroyed;
+extern atomic_unchecked_t cm_listens_created;
+extern atomic_unchecked_t cm_listens_destroyed;
 extern u32 cm_backlog_drops;
-extern atomic_t cm_loopbacks;
-extern atomic_t cm_nodes_created;
-extern atomic_t cm_nodes_destroyed;
-extern atomic_t cm_accel_dropped_pkts;
-extern atomic_t cm_resets_recvd;
-extern atomic_t pau_qps_created;
-extern atomic_t pau_qps_destroyed;
+extern atomic_unchecked_t cm_loopbacks;
+extern atomic_unchecked_t cm_nodes_created;
+extern atomic_unchecked_t cm_nodes_destroyed;
+extern atomic_unchecked_t cm_accel_dropped_pkts;
+extern atomic_unchecked_t cm_resets_recvd;
+extern atomic_unchecked_t pau_qps_created;
+extern atomic_unchecked_t pau_qps_destroyed;
 
 extern u32 int_mod_timer_init;
 extern u32 int_mod_cq_depth_256;
diff -ruNp linux-3.13.11/drivers/infiniband/hw/nes/nes_cm.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/infiniband/hw/nes/nes_cm.c
--- linux-3.13.11/drivers/infiniband/hw/nes/nes_cm.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/infiniband/hw/nes/nes_cm.c	2014-07-09
12:00:15.000000000 +0200
@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
 u32 cm_packets_retrans;
 u32 cm_packets_created;
 u32 cm_packets_received;
-atomic_t cm_listens_created;
-atomic_t cm_listens_destroyed;
+atomic_unchecked_t cm_listens_created;
+atomic_unchecked_t cm_listens_destroyed;
 u32 cm_backlog_drops;
-atomic_t cm_loopbacks;
-atomic_t cm_nodes_created;
-atomic_t cm_nodes_destroyed;
-atomic_t cm_accel_dropped_pkts;
-atomic_t cm_resets_recvd;
+atomic_unchecked_t cm_loopbacks;
+atomic_unchecked_t cm_nodes_created;
+atomic_unchecked_t cm_nodes_destroyed;
+atomic_unchecked_t cm_accel_dropped_pkts;
+atomic_unchecked_t cm_resets_recvd;
 
 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic
*, struct nes_cm_info *);
@@ -133,28 +133,28 @@ static void print_core(struct nes_cm_cor
 /* instance of function pointers for client API */
 /* set address of this instance to cm_core->cm_ops at cm_core alloc */
 static struct nes_cm_ops nes_cm_api = {
-	mini_cm_accelerated,
-	mini_cm_listen,
-	mini_cm_del_listen,
-	mini_cm_connect,
-	mini_cm_close,
-	mini_cm_accept,
-	mini_cm_reject,
-	mini_cm_recv_pkt,
-	mini_cm_dealloc_core,
-	mini_cm_get,
-	mini_cm_set
+	.accelerated = mini_cm_accelerated,
+	.listen = mini_cm_listen,
+	.stop_listener = mini_cm_del_listen,
+	.connect = mini_cm_connect,
+	.close = mini_cm_close,
+	.accept = mini_cm_accept,
+	.reject = mini_cm_reject,
+	.recv_pkt = mini_cm_recv_pkt,
+	.destroy_cm_core = mini_cm_dealloc_core,
+	.get = mini_cm_get,
+	.set = mini_cm_set
 };
 
 static struct nes_cm_core *g_cm_core;
 
-atomic_t cm_connects;
-atomic_t cm_accepts;
-atomic_t cm_disconnects;
-atomic_t cm_closes;
-atomic_t cm_connecteds;
-atomic_t cm_connect_reqs;
-atomic_t cm_rejects;
+atomic_unchecked_t cm_connects;
+atomic_unchecked_t cm_accepts;
+atomic_unchecked_t cm_disconnects;
+atomic_unchecked_t cm_closes;
+atomic_unchecked_t cm_connecteds;
+atomic_unchecked_t cm_connect_reqs;
+atomic_unchecked_t cm_rejects;
 
 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
 {
@@ -1272,7 +1272,7 @@ static int mini_cm_dec_refcnt_listen(str
 		kfree(listener);
 		listener = NULL;
 		ret = 0;
-		atomic_inc(&cm_listens_destroyed);
+		atomic_inc_unchecked(&cm_listens_destroyed);
 	} else {
 		spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
 	}
@@ -1466,7 +1466,7 @@ static struct nes_cm_node *make_cm_node(
 		  cm_node->rem_mac);
 
 	add_hte_node(cm_core, cm_node);
-	atomic_inc(&cm_nodes_created);
+	atomic_inc_unchecked(&cm_nodes_created);
 
 	return cm_node;
 }
@@ -1524,7 +1524,7 @@ static int rem_ref_cm_node(struct nes_cm
 	}
 
 	atomic_dec(&cm_core->node_cnt);
-	atomic_inc(&cm_nodes_destroyed);
+	atomic_inc_unchecked(&cm_nodes_destroyed);
 	nesqp = cm_node->nesqp;
 	if (nesqp) {
 		nesqp->cm_node = NULL;
@@ -1588,7 +1588,7 @@ static int process_options(struct nes_cm
 
 static void drop_packet(struct sk_buff *skb)
 {
-	atomic_inc(&cm_accel_dropped_pkts);
+	atomic_inc_unchecked(&cm_accel_dropped_pkts);
 	dev_kfree_skb_any(skb);
 }
 
@@ -1651,7 +1651,7 @@ static void handle_rst_pkt(struct nes_cm
 {
 
 	int	reset = 0;	/* whether to send reset in case of err.. */
-	atomic_inc(&cm_resets_recvd);
+	atomic_inc_unchecked(&cm_resets_recvd);
 	nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
 			" refcnt=%d\n", cm_node, cm_node->state,
 			atomic_read(&cm_node->ref_count));
@@ -2292,7 +2292,7 @@ static struct nes_cm_node *mini_cm_conne
 				rem_ref_cm_node(cm_node->cm_core, cm_node);
 				return NULL;
 			}
-			atomic_inc(&cm_loopbacks);
+			atomic_inc_unchecked(&cm_loopbacks);
 			loopbackremotenode->loopbackpartner = cm_node;
 			loopbackremotenode->tcp_cntxt.rcv_wscale =
 				NES_CM_DEFAULT_RCV_WND_SCALE;
@@ -2567,7 +2567,7 @@ static int mini_cm_recv_pkt(struct nes_c
 				nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
 			else {
 				rem_ref_cm_node(cm_core, cm_node);
-				atomic_inc(&cm_accel_dropped_pkts);
+				atomic_inc_unchecked(&cm_accel_dropped_pkts);
 				dev_kfree_skb_any(skb);
 			}
 			break;
@@ -2875,7 +2875,7 @@ static int nes_cm_disconn_true(struct ne
 
 	if ((cm_id) && (cm_id->event_handler)) {
 		if (issue_disconn) {
-			atomic_inc(&cm_disconnects);
+			atomic_inc_unchecked(&cm_disconnects);
 			cm_event.event = IW_CM_EVENT_DISCONNECT;
 			cm_event.status = disconn_status;
 			cm_event.local_addr = cm_id->local_addr;
@@ -2897,7 +2897,7 @@ static int nes_cm_disconn_true(struct ne
 		}
 
 		if (issue_close) {
-			atomic_inc(&cm_closes);
+			atomic_inc_unchecked(&cm_closes);
 			nes_disconnect(nesqp, 1);
 
 			cm_id->provider_data = nesqp;
@@ -3035,7 +3035,7 @@ int nes_accept(struct iw_cm_id *cm_id, s
 
 	nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
 		nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
-	atomic_inc(&cm_accepts);
+	atomic_inc_unchecked(&cm_accepts);
 
 	nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
 			netdev_refcnt_read(nesvnic->netdev));
@@ -3224,7 +3224,7 @@ int nes_reject(struct iw_cm_id *cm_id, c
 	struct nes_cm_core *cm_core;
 	u8 *start_buff;
 
-	atomic_inc(&cm_rejects);
+	atomic_inc_unchecked(&cm_rejects);
 	cm_node = (struct nes_cm_node *)cm_id->provider_data;
 	loopback = cm_node->loopbackpartner;
 	cm_core = cm_node->cm_core;
@@ -3286,7 +3286,7 @@ int nes_connect(struct iw_cm_id *cm_id,
 		  ntohs(raddr->sin_port), ntohl(laddr->sin_addr.s_addr),
 		  ntohs(laddr->sin_port));
 
-	atomic_inc(&cm_connects);
+	atomic_inc_unchecked(&cm_connects);
 	nesqp->active_conn = 1;
 
 	/* cache the cm_id in the qp */
@@ -3398,7 +3398,7 @@ int nes_create_listen(struct iw_cm_id *c
 			g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
 			return err;
 		}
-		atomic_inc(&cm_listens_created);
+		atomic_inc_unchecked(&cm_listens_created);
 	}
 
 	cm_id->add_ref(cm_id);
@@ -3505,7 +3505,7 @@ static void cm_event_connected(struct ne
 
 	if (nesqp->destroyed)
 		return;
-	atomic_inc(&cm_connecteds);
+	atomic_inc_unchecked(&cm_connecteds);
 	nes_debug(NES_DBG_CM, "QP%u attempting to connect to  0x%08X:0x%04X on"
 		  " local port 0x%04X. jiffies = %lu.\n",
 		  nesqp->hwqp.qp_id, ntohl(raddr->sin_addr.s_addr),
@@ -3686,7 +3686,7 @@ static void cm_event_reset(struct nes_cm
 
 	cm_id->add_ref(cm_id);
 	ret = cm_id->event_handler(cm_id, &cm_event);
-	atomic_inc(&cm_closes);
+	atomic_inc_unchecked(&cm_closes);
 	cm_event.event = IW_CM_EVENT_CLOSE;
 	cm_event.status = 0;
 	cm_event.provider_data = cm_id->provider_data;
@@ -3726,7 +3726,7 @@ static void cm_event_mpa_req(struct nes_
 		return;
 	cm_id = cm_node->cm_id;
 
-	atomic_inc(&cm_connect_reqs);
+	atomic_inc_unchecked(&cm_connect_reqs);
 	nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
 		  cm_node, cm_id, jiffies);
 
@@ -3770,7 +3770,7 @@ static void cm_event_mpa_reject(struct n
 		return;
 	cm_id = cm_node->cm_id;
 
-	atomic_inc(&cm_connect_reqs);
+	atomic_inc_unchecked(&cm_connect_reqs);
 	nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
 		  cm_node, cm_id, jiffies);
 
diff -ruNp linux-3.13.11/drivers/infiniband/hw/nes/nes_mgt.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/infiniband/hw/nes/nes_mgt.c
--- linux-3.13.11/drivers/infiniband/hw/nes/nes_mgt.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/infiniband/hw/nes/nes_mgt.c	2014-07-09
12:00:15.000000000 +0200
@@ -40,8 +40,8 @@
 #include "nes.h"
 #include "nes_mgt.h"
 
-atomic_t pau_qps_created;
-atomic_t pau_qps_destroyed;
+atomic_unchecked_t pau_qps_created;
+atomic_unchecked_t pau_qps_destroyed;
 
 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
 {
@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_devic
 {
 	struct sk_buff *skb;
 	unsigned long flags;
-	atomic_inc(&pau_qps_destroyed);
+	atomic_inc_unchecked(&pau_qps_destroyed);
 
 	/* Free packets that have not yet been forwarded */
 	/* Lock is acquired by skb_dequeue when removing the skb */
@@ -810,7 +810,7 @@ static void nes_mgt_ce_handler(struct ne
 					cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
 				skb_queue_head_init(&nesqp->pau_list);
 				spin_lock_init(&nesqp->pau_lock);
-				atomic_inc(&pau_qps_created);
+				atomic_inc_unchecked(&pau_qps_created);
 				nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
 			}
 
diff -ruNp linux-3.13.11/drivers/infiniband/hw/nes/nes_nic.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/infiniband/hw/nes/nes_nic.c
--- linux-3.13.11/drivers/infiniband/hw/nes/nes_nic.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/infiniband/hw/nes/nes_nic.c	2014-07-09
12:00:15.000000000 +0200
@@ -1273,39 +1273,39 @@ static void nes_netdev_get_ethtool_stats
 	target_stat_values[++index] = mh_detected;
 	target_stat_values[++index] = mh_pauses_sent;
 	target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
-	target_stat_values[++index] = atomic_read(&cm_connects);
-	target_stat_values[++index] = atomic_read(&cm_accepts);
-	target_stat_values[++index] = atomic_read(&cm_disconnects);
-	target_stat_values[++index] = atomic_read(&cm_connecteds);
-	target_stat_values[++index] = atomic_read(&cm_connect_reqs);
-	target_stat_values[++index] = atomic_read(&cm_rejects);
-	target_stat_values[++index] = atomic_read(&mod_qp_timouts);
-	target_stat_values[++index] = atomic_read(&qps_created);
-	target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
-	target_stat_values[++index] = atomic_read(&qps_destroyed);
-	target_stat_values[++index] = atomic_read(&cm_closes);
+	target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
+	target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
+	target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
+	target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
+	target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
+	target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
+	target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
+	target_stat_values[++index] = atomic_read_unchecked(&qps_created);
+	target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
+	target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
+	target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
 	target_stat_values[++index] = cm_packets_sent;
 	target_stat_values[++index] = cm_packets_bounced;
 	target_stat_values[++index] = cm_packets_created;
 	target_stat_values[++index] = cm_packets_received;
 	target_stat_values[++index] = cm_packets_dropped;
 	target_stat_values[++index] = cm_packets_retrans;
-	target_stat_values[++index] = atomic_read(&cm_listens_created);
-	target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
+	target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
+	target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
 	target_stat_values[++index] = cm_backlog_drops;
-	target_stat_values[++index] = atomic_read(&cm_loopbacks);
-	target_stat_values[++index] = atomic_read(&cm_nodes_created);
-	target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
-	target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
-	target_stat_values[++index] = atomic_read(&cm_resets_recvd);
+	target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
+	target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
+	target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
+	target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
+	target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
 	target_stat_values[++index] = nesadapter->free_4kpbl;
 	target_stat_values[++index] = nesadapter->free_256pbl;
 	target_stat_values[++index] = int_mod_timer_init;
 	target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
 	target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
 	target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
-	target_stat_values[++index] = atomic_read(&pau_qps_created);
-	target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
+	target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
+	target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
 }
 
 /**
diff -ruNp linux-3.13.11/drivers/infiniband/hw/nes/nes_verbs.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/infiniband/hw/nes/nes_verbs.c
--- linux-3.13.11/drivers/infiniband/hw/nes/nes_verbs.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/infiniband/hw/nes/nes_verbs.c	2014-07-09
12:00:15.000000000 +0200
@@ -46,9 +46,9 @@
 
 #include <rdma/ib_umem.h>
 
-atomic_t mod_qp_timouts;
-atomic_t qps_created;
-atomic_t sw_qps_destroyed;
+atomic_unchecked_t mod_qp_timouts;
+atomic_unchecked_t qps_created;
+atomic_unchecked_t sw_qps_destroyed;
 
 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
 
@@ -1134,7 +1134,7 @@ static struct ib_qp *nes_create_qp(struc
 	if (init_attr->create_flags)
 		return ERR_PTR(-EINVAL);
 
-	atomic_inc(&qps_created);
+	atomic_inc_unchecked(&qps_created);
 	switch (init_attr->qp_type) {
 		case IB_QPT_RC:
 			if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
@@ -1466,7 +1466,7 @@ static int nes_destroy_qp(struct ib_qp *
 	struct iw_cm_event cm_event;
 	int ret = 0;
 
-	atomic_inc(&sw_qps_destroyed);
+	atomic_inc_unchecked(&sw_qps_destroyed);
 	nesqp->destroyed = 1;
 
 	/* Blow away the connection if it exists. */
diff -ruNp linux-3.13.11/drivers/infiniband/hw/qib/qib.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/infiniband/hw/qib/qib.h
--- linux-3.13.11/drivers/infiniband/hw/qib/qib.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/infiniband/hw/qib/qib.h	2014-07-09
12:00:15.000000000 +0200
@@ -52,6 +52,7 @@
 #include <linux/kref.h>
 #include <linux/sched.h>
 #include <linux/kthread.h>
+#include <linux/slab.h>
 
 #include "qib_common.h"
 #include "qib_verbs.h"
diff -ruNp linux-3.13.11/drivers/input/gameport/gameport.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/input/gameport/gameport.c
--- linux-3.13.11/drivers/input/gameport/gameport.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/input/gameport/gameport.c	2014-07-09
12:00:15.000000000 +0200
@@ -490,14 +490,14 @@ EXPORT_SYMBOL(gameport_set_phys);
  */
 static void gameport_init_port(struct gameport *gameport)
 {
-	static atomic_t gameport_no = ATOMIC_INIT(0);
+	static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
 
 	__module_get(THIS_MODULE);
 
 	mutex_init(&gameport->drv_mutex);
 	device_initialize(&gameport->dev);
 	dev_set_name(&gameport->dev, "gameport%lu",
-			(unsigned long)atomic_inc_return(&gameport_no) - 1);
+			(unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
 	gameport->dev.bus = &gameport_bus;
 	gameport->dev.release = gameport_release_port;
 	if (gameport->parent)
diff -ruNp linux-3.13.11/drivers/input/input.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/input/input.c
--- linux-3.13.11/drivers/input/input.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/input/input.c	2014-07-09
12:00:15.000000000 +0200
@@ -1734,7 +1734,7 @@ EXPORT_SYMBOL_GPL(input_class);
  */
 struct input_dev *input_allocate_device(void)
 {
-	static atomic_t input_no = ATOMIC_INIT(0);
+	static atomic_unchecked_t input_no = ATOMIC_INIT(0);
 	struct input_dev *dev;
 
 	dev = kzalloc(sizeof(struct input_dev), GFP_KERNEL);
@@ -1749,7 +1749,7 @@ struct input_dev *input_allocate_device(
 		INIT_LIST_HEAD(&dev->node);
 
 		dev_set_name(&dev->dev, "input%ld",
-			     (unsigned long) atomic_inc_return(&input_no) - 1);
+			     (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
 
 		__module_get(THIS_MODULE);
 	}
diff -ruNp linux-3.13.11/drivers/input/joystick/sidewinder.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/input/joystick/sidewinder.c
--- linux-3.13.11/drivers/input/joystick/sidewinder.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/input/joystick/sidewinder.c	2014-07-09
12:00:15.000000000 +0200
@@ -30,6 +30,7 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/slab.h>
+#include <linux/sched.h>
 #include <linux/init.h>
 #include <linux/input.h>
 #include <linux/gameport.h>
diff -ruNp linux-3.13.11/drivers/input/joystick/xpad.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/input/joystick/xpad.c
--- linux-3.13.11/drivers/input/joystick/xpad.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/input/joystick/xpad.c	2014-07-09
12:00:15.000000000 +0200
@@ -736,7 +736,7 @@ static void xpad_led_set(struct led_clas
 
 static int xpad_led_probe(struct usb_xpad *xpad)
 {
-	static atomic_t led_seq	= ATOMIC_INIT(0);
+	static atomic_unchecked_t led_seq	= ATOMIC_INIT(0);
 	long led_no;
 	struct xpad_led *led;
 	struct led_classdev *led_cdev;
@@ -749,7 +749,7 @@ static int xpad_led_probe(struct usb_xpa
 	if (!led)
 		return -ENOMEM;
 
-	led_no = (long)atomic_inc_return(&led_seq) - 1;
+	led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
 
 	snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
 	led->xpad = xpad;
diff -ruNp linux-3.13.11/drivers/input/misc/ims-pcu.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/input/misc/ims-pcu.c
--- linux-3.13.11/drivers/input/misc/ims-pcu.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/input/misc/ims-pcu.c	2014-07-09
12:00:15.000000000 +0200
@@ -1621,7 +1621,7 @@ static int ims_pcu_identify_type(struct
 
 static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
 {
-	static atomic_t device_no = ATOMIC_INIT(0);
+	static atomic_unchecked_t device_no = ATOMIC_INIT(0);
 
 	const struct ims_pcu_device_info *info;
 	u8 device_id;
@@ -1653,7 +1653,7 @@ static int ims_pcu_init_application_mode
 	}
 
 	/* Device appears to be operable, complete initialization */
-	pcu->device_no = atomic_inc_return(&device_no) - 1;
+	pcu->device_no = atomic_inc_return_unchecked(&device_no) - 1;
 
 	error = ims_pcu_setup_backlight(pcu);
 	if (error)
diff -ruNp linux-3.13.11/drivers/input/mouse/psmouse.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/input/mouse/psmouse.h
--- linux-3.13.11/drivers/input/mouse/psmouse.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/input/mouse/psmouse.h	2014-07-09
12:00:15.000000000 +0200
@@ -116,7 +116,7 @@ struct psmouse_attribute {
 	ssize_t (*set)(struct psmouse *psmouse, void *data,
 			const char *buf, size_t count);
 	bool protect;
-};
+} __do_const;
 #define to_psmouse_attr(a)	container_of((a), struct psmouse_attribute, dattr)
 
 ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *attr,
diff -ruNp linux-3.13.11/drivers/input/mousedev.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/input/mousedev.c
--- linux-3.13.11/drivers/input/mousedev.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/input/mousedev.c	2014-07-09
12:00:15.000000000 +0200
@@ -744,7 +744,7 @@ static ssize_t mousedev_read(struct file
 
 	spin_unlock_irq(&client->packet_lock);
 
-	if (copy_to_user(buffer, data, count))
+	if (count > sizeof(data) || copy_to_user(buffer, data, count))
 		return -EFAULT;
 
 	return count;
diff -ruNp linux-3.13.11/drivers/input/serio/serio.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/input/serio/serio.c
--- linux-3.13.11/drivers/input/serio/serio.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/input/serio/serio.c	2014-07-09
12:00:15.000000000 +0200
@@ -505,7 +505,7 @@ static void serio_release_port(struct de
  */
 static void serio_init_port(struct serio *serio)
 {
-	static atomic_t serio_no = ATOMIC_INIT(0);
+	static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
 
 	__module_get(THIS_MODULE);
 
@@ -516,7 +516,7 @@ static void serio_init_port(struct serio
 	mutex_init(&serio->drv_mutex);
 	device_initialize(&serio->dev);
 	dev_set_name(&serio->dev, "serio%ld",
-			(long)atomic_inc_return(&serio_no) - 1);
+			(long)atomic_inc_return_unchecked(&serio_no) - 1);
 	serio->dev.bus = &serio_bus;
 	serio->dev.release = serio_release_port;
 	serio->dev.groups = serio_device_attr_groups;
diff -ruNp linux-3.13.11/drivers/input/serio/serio_raw.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/input/serio/serio_raw.c
--- linux-3.13.11/drivers/input/serio/serio_raw.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/input/serio/serio_raw.c	2014-07-09
12:00:15.000000000 +0200
@@ -293,7 +293,7 @@ static irqreturn_t serio_raw_interrupt(s
 
 static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
 {
-	static atomic_t serio_raw_no = ATOMIC_INIT(0);
+	static atomic_unchecked_t serio_raw_no = ATOMIC_INIT(0);
 	struct serio_raw *serio_raw;
 	int err;
 
@@ -304,7 +304,7 @@ static int serio_raw_connect(struct seri
 	}
 
 	snprintf(serio_raw->name, sizeof(serio_raw->name),
-		 "serio_raw%ld", (long)atomic_inc_return(&serio_raw_no) - 1);
+		 "serio_raw%ld", (long)atomic_inc_return_unchecked(&serio_raw_no) - 1);
 	kref_init(&serio_raw->kref);
 	INIT_LIST_HEAD(&serio_raw->client_list);
 	init_waitqueue_head(&serio_raw->wait);
diff -ruNp linux-3.13.11/drivers/iommu/iommu.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/iommu/iommu.c
--- linux-3.13.11/drivers/iommu/iommu.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/iommu/iommu.c	2014-07-09
12:00:15.000000000 +0200
@@ -588,7 +588,7 @@ static struct notifier_block iommu_bus_n
 static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
 {
 	bus_register_notifier(bus, &iommu_bus_nb);
-	bus_for_each_dev(bus, NULL, ops, add_iommu_group);
+	bus_for_each_dev(bus, NULL, (void *)ops, add_iommu_group);
 }
 
 /**
diff -ruNp linux-3.13.11/drivers/iommu/irq_remapping.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/iommu/irq_remapping.c
--- linux-3.13.11/drivers/iommu/irq_remapping.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/iommu/irq_remapping.c	2014-07-09
12:00:15.000000000 +0200
@@ -356,7 +356,7 @@ int setup_hpet_msi_remapped(unsigned int
 void panic_if_irq_remap(const char *msg)
 {
 	if (irq_remapping_enabled)
-		panic(msg);
+		panic("%s", msg);
 }
 
 static void ir_ack_apic_edge(struct irq_data *data)
@@ -377,10 +377,12 @@ static void ir_print_prefix(struct irq_d
 
 void irq_remap_modify_chip_defaults(struct irq_chip *chip)
 {
-	chip->irq_print_chip = ir_print_prefix;
-	chip->irq_ack = ir_ack_apic_edge;
-	chip->irq_eoi = ir_ack_apic_level;
-	chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
+	pax_open_kernel();
+	*(void **)&chip->irq_print_chip = ir_print_prefix;
+	*(void **)&chip->irq_ack = ir_ack_apic_edge;
+	*(void **)&chip->irq_eoi = ir_ack_apic_level;
+	*(void **)&chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
+	pax_close_kernel();
 }
 
 bool setup_remapped_irq(int irq, struct irq_cfg *cfg, struct irq_chip *chip)
diff -ruNp linux-3.13.11/drivers/irqchip/irq-gic.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/irqchip/irq-gic.c
--- linux-3.13.11/drivers/irqchip/irq-gic.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/irqchip/irq-gic.c	2014-07-09
12:00:15.000000000 +0200
@@ -84,7 +84,7 @@ static u8 gic_cpu_map[NR_GIC_CPU_IF] __r
  * Supported arch specific GIC irq extension.
  * Default make them NULL.
  */
-struct irq_chip gic_arch_extn = {
+irq_chip_no_const gic_arch_extn = {
 	.irq_eoi	= NULL,
 	.irq_mask	= NULL,
 	.irq_unmask	= NULL,
@@ -332,7 +332,7 @@ static void gic_handle_cascade_irq(unsig
 	chained_irq_exit(chip, desc);
 }
 
-static struct irq_chip gic_chip = {
+static irq_chip_no_const gic_chip __read_only = {
 	.name			= "GIC",
 	.irq_mask		= gic_mask_irq,
 	.irq_unmask		= gic_unmask_irq,
diff -ruNp linux-3.13.11/drivers/isdn/capi/capi.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/isdn/capi/capi.c
--- linux-3.13.11/drivers/isdn/capi/capi.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/isdn/capi/capi.c	2014-07-09
12:00:15.000000000 +0200
@@ -81,8 +81,8 @@ struct capiminor {
 
 	struct capi20_appl	*ap;
 	u32			ncci;
-	atomic_t		datahandle;
-	atomic_t		msgid;
+	atomic_unchecked_t	datahandle;
+	atomic_unchecked_t	msgid;
 
 	struct tty_port port;
 	int                ttyinstop;
@@ -391,7 +391,7 @@ gen_data_b3_resp_for(struct capiminor *m
 		capimsg_setu16(s, 2, mp->ap->applid);
 		capimsg_setu8 (s, 4, CAPI_DATA_B3);
 		capimsg_setu8 (s, 5, CAPI_RESP);
-		capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
+		capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
 		capimsg_setu32(s, 8, mp->ncci);
 		capimsg_setu16(s, 12, datahandle);
 	}
@@ -512,14 +512,14 @@ static void handle_minor_send(struct cap
 		mp->outbytes -= len;
 		spin_unlock_bh(&mp->outlock);
 
-		datahandle = atomic_inc_return(&mp->datahandle);
+		datahandle = atomic_inc_return_unchecked(&mp->datahandle);
 		skb_push(skb, CAPI_DATA_B3_REQ_LEN);
 		memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
 		capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
 		capimsg_setu16(skb->data, 2, mp->ap->applid);
 		capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
 		capimsg_setu8 (skb->data, 5, CAPI_REQ);
-		capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
+		capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
 		capimsg_setu32(skb->data, 8, mp->ncci);	/* NCCI */
 		capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
 		capimsg_setu16(skb->data, 16, len);	/* Data length */
diff -ruNp linux-3.13.11/drivers/isdn/gigaset/bas-gigaset.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/isdn/gigaset/bas-gigaset.c
--- linux-3.13.11/drivers/isdn/gigaset/bas-gigaset.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/isdn/gigaset/bas-gigaset.c	2014-07-09
12:00:15.000000000 +0200
@@ -2564,22 +2564,22 @@ static int gigaset_post_reset(struct usb
 
 
 static const struct gigaset_ops gigops = {
-	gigaset_write_cmd,
-	gigaset_write_room,
-	gigaset_chars_in_buffer,
-	gigaset_brkchars,
-	gigaset_init_bchannel,
-	gigaset_close_bchannel,
-	gigaset_initbcshw,
-	gigaset_freebcshw,
-	gigaset_reinitbcshw,
-	gigaset_initcshw,
-	gigaset_freecshw,
-	gigaset_set_modem_ctrl,
-	gigaset_baud_rate,
-	gigaset_set_line_ctrl,
-	gigaset_isoc_send_skb,
-	gigaset_isoc_input,
+	.write_cmd = gigaset_write_cmd,
+	.write_room = gigaset_write_room,
+	.chars_in_buffer = gigaset_chars_in_buffer,
+	.brkchars = gigaset_brkchars,
+	.init_bchannel = gigaset_init_bchannel,
+	.close_bchannel = gigaset_close_bchannel,
+	.initbcshw = gigaset_initbcshw,
+	.freebcshw = gigaset_freebcshw,
+	.reinitbcshw = gigaset_reinitbcshw,
+	.initcshw = gigaset_initcshw,
+	.freecshw = gigaset_freecshw,
+	.set_modem_ctrl = gigaset_set_modem_ctrl,
+	.baud_rate = gigaset_baud_rate,
+	.set_line_ctrl = gigaset_set_line_ctrl,
+	.send_skb = gigaset_isoc_send_skb,
+	.handle_input = gigaset_isoc_input,
 };
 
 /* bas_gigaset_init
diff -ruNp linux-3.13.11/drivers/isdn/gigaset/interface.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/isdn/gigaset/interface.c
--- linux-3.13.11/drivers/isdn/gigaset/interface.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/isdn/gigaset/interface.c	2014-07-09
12:00:15.000000000 +0200
@@ -130,9 +130,9 @@ static int if_open(struct tty_struct *tt
 	}
 	tty->driver_data = cs;
 
-	++cs->port.count;
+	atomic_inc(&cs->port.count);
 
-	if (cs->port.count == 1) {
+	if (atomic_read(&cs->port.count) == 1) {
 		tty_port_tty_set(&cs->port, tty);
 		cs->port.low_latency = 1;
 	}
@@ -156,9 +156,9 @@ static void if_close(struct tty_struct *
 
 	if (!cs->connected)
 		gig_dbg(DEBUG_IF, "not connected");	/* nothing to do */
-	else if (!cs->port.count)
+	else if (!atomic_read(&cs->port.count))
 		dev_warn(cs->dev, "%s: device not opened\n", __func__);
-	else if (!--cs->port.count)
+	else if (!atomic_dec_return(&cs->port.count))
 		tty_port_tty_set(&cs->port, NULL);
 
 	mutex_unlock(&cs->mutex);
diff -ruNp linux-3.13.11/drivers/isdn/gigaset/ser-gigaset.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/isdn/gigaset/ser-gigaset.c
--- linux-3.13.11/drivers/isdn/gigaset/ser-gigaset.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/isdn/gigaset/ser-gigaset.c	2014-07-09
12:00:15.000000000 +0200
@@ -453,22 +453,22 @@ static int gigaset_set_line_ctrl(struct
 }
 
 static const struct gigaset_ops ops = {
-	gigaset_write_cmd,
-	gigaset_write_room,
-	gigaset_chars_in_buffer,
-	gigaset_brkchars,
-	gigaset_init_bchannel,
-	gigaset_close_bchannel,
-	gigaset_initbcshw,
-	gigaset_freebcshw,
-	gigaset_reinitbcshw,
-	gigaset_initcshw,
-	gigaset_freecshw,
-	gigaset_set_modem_ctrl,
-	gigaset_baud_rate,
-	gigaset_set_line_ctrl,
-	gigaset_m10x_send_skb,	/* asyncdata.c */
-	gigaset_m10x_input,	/* asyncdata.c */
+	.write_cmd = gigaset_write_cmd,
+	.write_room = gigaset_write_room,
+	.chars_in_buffer = gigaset_chars_in_buffer,
+	.brkchars = gigaset_brkchars,
+	.init_bchannel = gigaset_init_bchannel,
+	.close_bchannel = gigaset_close_bchannel,
+	.initbcshw = gigaset_initbcshw,
+	.freebcshw = gigaset_freebcshw,
+	.reinitbcshw = gigaset_reinitbcshw,
+	.initcshw = gigaset_initcshw,
+	.freecshw = gigaset_freecshw,
+	.set_modem_ctrl = gigaset_set_modem_ctrl,
+	.baud_rate = gigaset_baud_rate,
+	.set_line_ctrl = gigaset_set_line_ctrl,
+	.send_skb = gigaset_m10x_send_skb,	/* asyncdata.c */
+	.handle_input = gigaset_m10x_input,	/* asyncdata.c */
 };
 
 
diff -ruNp linux-3.13.11/drivers/isdn/gigaset/usb-gigaset.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/isdn/gigaset/usb-gigaset.c
--- linux-3.13.11/drivers/isdn/gigaset/usb-gigaset.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/isdn/gigaset/usb-gigaset.c	2014-07-09
12:00:15.000000000 +0200
@@ -547,7 +547,7 @@ static int gigaset_brkchars(struct cards
 	gigaset_dbg_buffer(DEBUG_USBREQ, "brkchars", 6, buf);
 	memcpy(cs->hw.usb->bchars, buf, 6);
 	return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x19, 0x41,
-			       0, 0, &buf, 6, 2000);
+			       0, 0, buf, 6, 2000);
 }
 
 static void gigaset_freebcshw(struct bc_state *bcs)
@@ -869,22 +869,22 @@ static int gigaset_pre_reset(struct usb_
 }
 
 static const struct gigaset_ops ops = {
-	gigaset_write_cmd,
-	gigaset_write_room,
-	gigaset_chars_in_buffer,
-	gigaset_brkchars,
-	gigaset_init_bchannel,
-	gigaset_close_bchannel,
-	gigaset_initbcshw,
-	gigaset_freebcshw,
-	gigaset_reinitbcshw,
-	gigaset_initcshw,
-	gigaset_freecshw,
-	gigaset_set_modem_ctrl,
-	gigaset_baud_rate,
-	gigaset_set_line_ctrl,
-	gigaset_m10x_send_skb,
-	gigaset_m10x_input,
+	.write_cmd = gigaset_write_cmd,
+	.write_room = gigaset_write_room,
+	.chars_in_buffer = gigaset_chars_in_buffer,
+	.brkchars = gigaset_brkchars,
+	.init_bchannel = gigaset_init_bchannel,
+	.close_bchannel = gigaset_close_bchannel,
+	.initbcshw = gigaset_initbcshw,
+	.freebcshw = gigaset_freebcshw,
+	.reinitbcshw = gigaset_reinitbcshw,
+	.initcshw = gigaset_initcshw,
+	.freecshw = gigaset_freecshw,
+	.set_modem_ctrl = gigaset_set_modem_ctrl,
+	.baud_rate = gigaset_baud_rate,
+	.set_line_ctrl = gigaset_set_line_ctrl,
+	.send_skb = gigaset_m10x_send_skb,
+	.handle_input = gigaset_m10x_input,
 };
 
 /*
diff -ruNp linux-3.13.11/drivers/isdn/hardware/avm/b1.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/isdn/hardware/avm/b1.c
--- linux-3.13.11/drivers/isdn/hardware/avm/b1.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/isdn/hardware/avm/b1.c	2014-07-09
12:00:15.000000000 +0200
@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capilo
 	}
 	if (left) {
 		if (t4file->user) {
-			if (copy_from_user(buf, dp, left))
+			if (left > sizeof buf || copy_from_user(buf, dp, left))
 				return -EFAULT;
 		} else {
 			memcpy(buf, dp, left);
@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capilo
 	}
 	if (left) {
 		if (config->user) {
-			if (copy_from_user(buf, dp, left))
+			if (left > sizeof buf || copy_from_user(buf, dp, left))
 				return -EFAULT;
 		} else {
 			memcpy(buf, dp, left);
diff -ruNp linux-3.13.11/drivers/isdn/i4l/isdn_common.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/isdn/i4l/isdn_common.c
--- linux-3.13.11/drivers/isdn/i4l/isdn_common.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/isdn/i4l/isdn_common.c	2014-07-09
12:00:15.000000000 +0200
@@ -1651,6 +1651,8 @@ isdn_ioctl(struct file *file, uint cmd,
 			} else
 				return -EINVAL;
 		case IIOCDBGVAR:
+			if (!capable(CAP_SYS_RAWIO))
+				return -EPERM;
 			if (arg) {
 				if (copy_to_user(argp, &dev, sizeof(ulong)))
 					return -EFAULT;
diff -ruNp linux-3.13.11/drivers/isdn/i4l/isdn_concap.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/isdn/i4l/isdn_concap.c
--- linux-3.13.11/drivers/isdn/i4l/isdn_concap.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/isdn/i4l/isdn_concap.c	2014-07-09
12:00:15.000000000 +0200
@@ -80,9 +80,9 @@ static int isdn_concap_dl_disconn_req(st
 }
 
 struct concap_device_ops isdn_concap_reliable_dl_dops = {
-	&isdn_concap_dl_data_req,
-	&isdn_concap_dl_connect_req,
-	&isdn_concap_dl_disconn_req
+	.data_req = &isdn_concap_dl_data_req,
+	.connect_req = &isdn_concap_dl_connect_req,
+	.disconn_req = &isdn_concap_dl_disconn_req
 };
 
 /* The following should better go into a dedicated source file such that
diff -ruNp linux-3.13.11/drivers/isdn/i4l/isdn_tty.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/isdn/i4l/isdn_tty.c
--- linux-3.13.11/drivers/isdn/i4l/isdn_tty.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/isdn/i4l/isdn_tty.c	2014-07-09
12:00:15.000000000 +0200
@@ -1508,9 +1508,9 @@ isdn_tty_open(struct tty_struct *tty, st
 
 #ifdef ISDN_DEBUG_MODEM_OPEN
 	printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name,
-	       port->count);
+	       atomic_read(&port->count));
 #endif
-	port->count++;
+	atomic_inc(&port->count);
 	port->tty = tty;
 	/*
 	 * Start up serial port
@@ -1554,7 +1554,7 @@ isdn_tty_close(struct tty_struct *tty, s
 #endif
 		return;
 	}
-	if ((tty->count == 1) && (port->count != 1)) {
+	if ((tty->count == 1) && (atomic_read(&port->count) != 1)) {
 		/*
 		 * Uh, oh.  tty->count is 1, which means that the tty
 		 * structure will be freed.  Info->count should always
@@ -1563,15 +1563,15 @@ isdn_tty_close(struct tty_struct *tty, s
 		 * serial port won't be shutdown.
 		 */
 		printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, "
-		       "info->count is %d\n", port->count);
-		port->count = 1;
+		       "info->count is %d\n", atomic_read(&port->count));
+		atomic_set(&port->count, 1);
 	}
-	if (--port->count < 0) {
+	if (atomic_dec_return(&port->count) < 0) {
 		printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n",
-		       info->line, port->count);
-		port->count = 0;
+		       info->line, atomic_read(&port->count));
+		atomic_set(&port->count, 0);
 	}
-	if (port->count) {
+	if (atomic_read(&port->count)) {
 #ifdef ISDN_DEBUG_MODEM_OPEN
 		printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n");
 #endif
@@ -1625,7 +1625,7 @@ isdn_tty_hangup(struct tty_struct *tty)
 	if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup"))
 		return;
 	isdn_tty_shutdown(info);
-	port->count = 0;
+	atomic_set(&port->count, 0);
 	port->flags &= ~ASYNC_NORMAL_ACTIVE;
 	port->tty = NULL;
 	wake_up_interruptible(&port->open_wait);
@@ -1970,7 +1970,7 @@ isdn_tty_find_icall(int di, int ch, setu
 	for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
 		modem_info *info = &dev->mdm.info[i];
 
-		if (info->port.count == 0)
+		if (atomic_read(&info->port.count) == 0)
 			continue;
 		if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) &&  /* SI1 is matching */
 		    (info->emu.mdmreg[REG_SI2] == si2))	{         /* SI2 is matching */
diff -ruNp linux-3.13.11/drivers/isdn/i4l/isdn_x25iface.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/isdn/i4l/isdn_x25iface.c
--- linux-3.13.11/drivers/isdn/i4l/isdn_x25iface.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/isdn/i4l/isdn_x25iface.c	2014-07-09
12:00:15.000000000 +0200
@@ -53,14 +53,14 @@ static int isdn_x25iface_disconn_ind(str
 
 
 static struct concap_proto_ops ix25_pops = {
-	&isdn_x25iface_proto_new,
-	&isdn_x25iface_proto_del,
-	&isdn_x25iface_proto_restart,
-	&isdn_x25iface_proto_close,
-	&isdn_x25iface_xmit,
-	&isdn_x25iface_receive,
-	&isdn_x25iface_connect_ind,
-	&isdn_x25iface_disconn_ind
+	.proto_new = &isdn_x25iface_proto_new,
+	.proto_del = &isdn_x25iface_proto_del,
+	.restart = &isdn_x25iface_proto_restart,
+	.close = &isdn_x25iface_proto_close,
+	.encap_and_xmit = &isdn_x25iface_xmit,
+	.data_ind = &isdn_x25iface_receive,
+	.connect_ind = &isdn_x25iface_connect_ind,
+	.disconn_ind = &isdn_x25iface_disconn_ind
 };
 
 /* error message helper function */
diff -ruNp linux-3.13.11/drivers/isdn/icn/icn.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/isdn/icn/icn.c
--- linux-3.13.11/drivers/isdn/icn/icn.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/isdn/icn/icn.c	2014-07-09
12:00:15.000000000 +0200
@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len,
 		if (count > len)
 			count = len;
 		if (user) {
-			if (copy_from_user(msg, buf, count))
+			if (count > sizeof msg || copy_from_user(msg, buf, count))
 				return -EFAULT;
 		} else
 			memcpy(msg, buf, count);
diff -ruNp linux-3.13.11/drivers/isdn/mISDN/dsp_cmx.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/isdn/mISDN/dsp_cmx.c
--- linux-3.13.11/drivers/isdn/mISDN/dsp_cmx.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/isdn/mISDN/dsp_cmx.c	2014-07-09
12:00:15.000000000 +0200
@@ -1628,7 +1628,7 @@ unsigned long	dsp_spl_jiffies; /* calcul
 static u16	dsp_count; /* last sample count */
 static int	dsp_count_valid; /* if we have last sample count */
 
-void
+void __intentional_overflow(-1)
 dsp_cmx_send(void *arg)
 {
 	struct dsp_conf *conf;
diff -ruNp linux-3.13.11/drivers/leds/leds-clevo-mail.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/leds/leds-clevo-mail.c
--- linux-3.13.11/drivers/leds/leds-clevo-mail.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/leds/leds-clevo-mail.c	2014-07-09
12:00:15.000000000 +0200
@@ -40,7 +40,7 @@ static int __init clevo_mail_led_dmi_cal
  * detected as working, but in reality it is not) as low as
  * possible.
  */
-static struct dmi_system_id clevo_mail_led_dmi_table[] __initdata = {
+static struct dmi_system_id clevo_mail_led_dmi_table[] __initconst = {
 	{
 		.callback = clevo_mail_led_dmi_callback,
 		.ident = "Clevo D410J",
diff -ruNp linux-3.13.11/drivers/leds/leds-ss4200.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/leds/leds-ss4200.c
--- linux-3.13.11/drivers/leds/leds-ss4200.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/leds/leds-ss4200.c	2014-07-09
12:00:15.000000000 +0200
@@ -91,7 +91,7 @@ MODULE_PARM_DESC(nodetect, "Skip DMI-bas
  * detected as working, but in reality it is not) as low as
  * possible.
  */
-static struct dmi_system_id nas_led_whitelist[] __initdata = {
+static struct dmi_system_id nas_led_whitelist[] __initconst = {
 	{
 		.callback = ss4200_led_dmi_callback,
 		.ident = "Intel SS4200-E",
diff -ruNp linux-3.13.11/drivers/lguest/core.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/lguest/core.c
--- linux-3.13.11/drivers/lguest/core.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/lguest/core.c	2014-07-09
12:00:15.000000000 +0200
@@ -97,9 +97,17 @@ static __init int map_switcher(void)
 	 * The end address needs +1 because __get_vm_area allocates an
 	 * extra guard page, so we need space for that.
 	 */
+
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
+	switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
+				     VM_ALLOC | VM_KERNEXEC, switcher_addr, switcher_addr
+				     + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
+#else
 	switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
 				     VM_ALLOC, switcher_addr, switcher_addr
 				     + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
+#endif
+
 	if (!switcher_vma) {
 		err = -ENOMEM;
 		printk("lguest: could not map switcher pages high\n");
@@ -124,7 +132,7 @@ static __init int map_switcher(void)
 	 * Now the Switcher is mapped at the right address, we can't fail!
 	 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
 	 */
-	memcpy(switcher_vma->addr, start_switcher_text,
+	memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
 	       end_switcher_text - start_switcher_text);
 
 	printk(KERN_INFO "lguest: mapped switcher at %p\n",
diff -ruNp linux-3.13.11/drivers/lguest/page_tables.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/lguest/page_tables.c
--- linux-3.13.11/drivers/lguest/page_tables.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/lguest/page_tables.c	2014-07-09
12:00:15.000000000 +0200
@@ -559,7 +559,7 @@ void pin_page(struct lg_cpu *cpu, unsign
 /*:*/
 
 #ifdef CONFIG_X86_PAE
-static void release_pmd(pmd_t *spmd)
+static void __intentional_overflow(-1) release_pmd(pmd_t *spmd)
 {
 	/* If the entry's not present, there's nothing to release. */
 	if (pmd_flags(*spmd) & _PAGE_PRESENT) {
diff -ruNp linux-3.13.11/drivers/lguest/x86/core.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/lguest/x86/core.c
--- linux-3.13.11/drivers/lguest/x86/core.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/lguest/x86/core.c	2014-07-09
12:00:15.000000000 +0200
@@ -59,7 +59,7 @@ static struct {
 /* Offset from where switcher.S was compiled to where we've copied it */
 static unsigned long switcher_offset(void)
 {
-	return switcher_addr - (unsigned long)start_switcher_text;
+	return switcher_addr - (unsigned long)ktla_ktva(start_switcher_text);
 }
 
 /* This cpu's struct lguest_pages (after the Switcher text page) */
@@ -99,7 +99,13 @@ static void copy_in_guest_info(struct lg
 	 * These copies are pretty cheap, so we do them unconditionally: */
 	/* Save the current Host top-level page directory.
 	 */
+
+#ifdef CONFIG_PAX_PER_CPU_PGD
+	pages->state.host_cr3 = read_cr3();
+#else
 	pages->state.host_cr3 = __pa(current->mm->pgd);
+#endif
+
 	/*
 	 * Set up the Guest's page tables to see this CPU's pages (and no
 	 * other CPU's pages).
@@ -477,7 +483,7 @@ void __init lguest_arch_host_init(void)
 	 * compiled-in switcher code and the high-mapped copy we just made.
 	 */
 	for (i = 0; i < IDT_ENTRIES; i++)
-		default_idt_entries[i] += switcher_offset();
+		default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
 
 	/*
 	 * Set up the Switcher's per-cpu areas.
@@ -560,7 +566,7 @@ void __init lguest_arch_host_init(void)
 	 * it will be undisturbed when we switch.  To change %cs and jump we
 	 * need this structure to feed to Intel's "lcall" instruction.
 	 */
-	lguest_entry.offset = (long)switch_to_guest + switcher_offset();
+	lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
 	lguest_entry.segment = LGUEST_CS;
 
 	/*
diff -ruNp linux-3.13.11/drivers/lguest/x86/switcher_32.S linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/lguest/x86/switcher_32.S
--- linux-3.13.11/drivers/lguest/x86/switcher_32.S	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/lguest/x86/switcher_32.S	2014-07-09
12:00:15.000000000 +0200
@@ -87,6 +87,7 @@
 #include <asm/page.h>
 #include <asm/segment.h>
 #include <asm/lguest.h>
+#include <asm/processor-flags.h>
 
 // We mark the start of the code to copy
 // It's placed in .text tho it's never run here
@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
 	// Changes type when we load it: damn Intel!
 	// For after we switch over our page tables
 	// That entry will be read-only: we'd crash.
+
+#ifdef CONFIG_PAX_KERNEXEC
+	mov	%cr0, %edx
+	xor	$X86_CR0_WP, %edx
+	mov	%edx, %cr0
+#endif
+
 	movl	$(GDT_ENTRY_TSS*8), %edx
 	ltr	%dx
 
@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
 	// Let's clear it again for our return.
 	// The GDT descriptor of the Host
 	// Points to the table after two "size" bytes
-	movl	(LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
+	movl	(LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
 	// Clear "used" from type field (byte 5, bit 2)
-	andb	$0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
+	andb	$0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
+
+#ifdef CONFIG_PAX_KERNEXEC
+	mov	%cr0, %eax
+	xor	$X86_CR0_WP, %eax
+	mov	%eax, %cr0
+#endif
 
 	// Once our page table's switched, the Guest is live!
 	// The Host fades as we run this final step.
@@ -295,13 +309,12 @@ deliver_to_host:
 	// I consulted gcc, and it gave
 	// These instructions, which I gladly credit:
 	leal	(%edx,%ebx,8), %eax
-	movzwl	(%eax),%edx
-	movl	4(%eax), %eax
-	xorw	%ax, %ax
-	orl	%eax, %edx
+	movl	4(%eax), %edx
+	movw	(%eax), %dx
 	// Now the address of the handler's in %edx
 	// We call it now: its "iret" drops us home.
-	jmp	*%edx
+	ljmp	$__KERNEL_CS, $1f
+1:	jmp	*%edx
 
 // Every interrupt can come to us here
 // But we must truly tell each apart.
diff -ruNp linux-3.13.11/drivers/md/bcache/closure.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/md/bcache/closure.h
--- linux-3.13.11/drivers/md/bcache/closure.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/md/bcache/closure.h	2014-07-09
12:00:15.000000000 +0200
@@ -483,7 +483,7 @@ static inline void closure_queue(struct
 static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
 				  struct workqueue_struct *wq)
 {
-	BUG_ON(object_is_on_stack(cl));
+	BUG_ON(object_starts_on_stack(cl));
 	closure_set_ip(cl);
 	cl->fn = fn;
 	cl->wq = wq;
diff -ruNp linux-3.13.11/drivers/md/bitmap.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/md/bitmap.c
--- linux-3.13.11/drivers/md/bitmap.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/md/bitmap.c	2014-07-09 12:00:15.000000000
+0200
@@ -1779,7 +1779,7 @@ void bitmap_status(struct seq_file *seq,
 		   chunk_kb ? "KB" : "B");
 	if (bitmap->storage.file) {
 		seq_printf(seq, ", file: ");
-		seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
+		seq_path(seq, &bitmap->storage.file->f_path, " \t\n\\");
 	}
 
 	seq_printf(seq, "\n");
diff -ruNp linux-3.13.11/drivers/md/dm-ioctl.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/md/dm-ioctl.c
--- linux-3.13.11/drivers/md/dm-ioctl.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/md/dm-ioctl.c	2014-07-09
12:00:15.000000000 +0200
@@ -16,6 +16,7 @@
 #include <linux/dm-ioctl.h>
 #include <linux/hdreg.h>
 #include <linux/compat.h>
+#include <linux/vs_context.h>
 
 #include <asm/uaccess.h>
 
@@ -114,7 +115,8 @@ static struct hash_cell *__get_name_cell
 	unsigned int h = hash_str(str);
 
 	list_for_each_entry (hc, _name_buckets + h, name_list)
-		if (!strcmp(hc->name, str)) {
+		if (vx_check(dm_get_xid(hc->md), VS_WATCH_P | VS_IDENT) &&
+			!strcmp(hc->name, str)) {
 			dm_get(hc->md);
 			return hc;
 		}
@@ -128,7 +130,8 @@ static struct hash_cell *__get_uuid_cell
 	unsigned int h = hash_str(str);
 
 	list_for_each_entry (hc, _uuid_buckets + h, uuid_list)
-		if (!strcmp(hc->uuid, str)) {
+		if (vx_check(dm_get_xid(hc->md), VS_WATCH_P | VS_IDENT) &&
+			!strcmp(hc->uuid, str)) {
 			dm_get(hc->md);
 			return hc;
 		}
@@ -139,13 +142,15 @@ static struct hash_cell *__get_uuid_cell
 static struct hash_cell *__get_dev_cell(uint64_t dev)
 {
 	struct mapped_device *md;
-	struct hash_cell *hc;
+	struct hash_cell *hc = NULL;
 
 	md = dm_get_md(huge_decode_dev(dev));
 	if (!md)
 		return NULL;
 
-	hc = dm_get_mdptr(md);
+	if (vx_check(dm_get_xid(md), VS_WATCH_P | VS_IDENT))
+		hc = dm_get_mdptr(md);
+
 	if (!hc) {
 		dm_put(md);
 		return NULL;
@@ -467,6 +472,9 @@ typedef int (*ioctl_fn)(struct dm_ioctl
 
 static int remove_all(struct dm_ioctl *param, size_t param_size)
 {
+	if (!vx_check(0, VS_ADMIN))
+		return -EPERM;
+
 	dm_hash_remove_all(true, !!(param->flags & DM_DEFERRED_REMOVE), false);
 	param->data_size = 0;
 	return 0;
@@ -514,6 +522,8 @@ static int list_devices(struct dm_ioctl
 	 */
 	for (i = 0; i < NUM_BUCKETS; i++) {
 		list_for_each_entry (hc, _name_buckets + i, name_list) {
+			if (!vx_check(dm_get_xid(hc->md), VS_WATCH_P | VS_IDENT))
+				continue;
 			needed += sizeof(struct dm_name_list);
 			needed += strlen(hc->name) + 1;
 			needed += ALIGN_MASK;
@@ -537,6 +547,8 @@ static int list_devices(struct dm_ioctl
 	 */
 	for (i = 0; i < NUM_BUCKETS; i++) {
 		list_for_each_entry (hc, _name_buckets + i, name_list) {
+			if (!vx_check(dm_get_xid(hc->md), VS_WATCH_P | VS_IDENT))
+				continue;
 			if (old_nl)
 				old_nl->next = (uint32_t) ((void *) nl -
 							   (void *) old_nl);
@@ -1769,7 +1781,7 @@ static int validate_params(uint cmd, str
 	    cmd == DM_LIST_VERSIONS_CMD)
 		return 0;
 
-	if ((cmd == DM_DEV_CREATE_CMD)) {
+	if (cmd == DM_DEV_CREATE_CMD) {
 		if (!*param->name) {
 			DMWARN("name not supplied when creating device");
 			return -EINVAL;
@@ -1797,8 +1809,8 @@ static int ctl_ioctl(uint command, struc
 	size_t input_param_size;
 	struct dm_ioctl param_kernel;
 
-	/* only root can play with this */
-	if (!capable(CAP_SYS_ADMIN))
+	/* only root and certain contexts can play with this */
+	if (!vx_capable(CAP_SYS_ADMIN, VXC_ADMIN_MAPPER))
 		return -EACCES;
 
 	if (_IOC_TYPE(command) != DM_IOCTL)
diff -ruNp linux-3.13.11/drivers/md/dm-raid1.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/md/dm-raid1.c
--- linux-3.13.11/drivers/md/dm-raid1.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/md/dm-raid1.c	2014-07-09
12:00:15.000000000 +0200
@@ -40,7 +40,7 @@ enum dm_raid1_error {
 
 struct mirror {
 	struct mirror_set *ms;
-	atomic_t error_count;
+	atomic_unchecked_t error_count;
 	unsigned long error_type;
 	struct dm_dev *dev;
 	sector_t offset;
@@ -186,7 +186,7 @@ static struct mirror *get_valid_mirror(s
 	struct mirror *m;
 
 	for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
-		if (!atomic_read(&m->error_count))
+		if (!atomic_read_unchecked(&m->error_count))
 			return m;
 
 	return NULL;
@@ -218,7 +218,7 @@ static void fail_mirror(struct mirror *m
 	 * simple way to tell if a device has encountered
 	 * errors.
 	 */
-	atomic_inc(&m->error_count);
+	atomic_inc_unchecked(&m->error_count);
 
 	if (test_and_set_bit(error_type, &m->error_type))
 		return;
@@ -409,7 +409,7 @@ static struct mirror *choose_mirror(stru
 	struct mirror *m = get_default_mirror(ms);
 
 	do {
-		if (likely(!atomic_read(&m->error_count)))
+		if (likely(!atomic_read_unchecked(&m->error_count)))
 			return m;
 
 		if (m-- == ms->mirror)
@@ -423,7 +423,7 @@ static int default_ok(struct mirror *m)
 {
 	struct mirror *default_mirror = get_default_mirror(m->ms);
 
-	return !atomic_read(&default_mirror->error_count);
+	return !atomic_read_unchecked(&default_mirror->error_count);
 }
 
 static int mirror_available(struct mirror_set *ms, struct bio *bio)
@@ -560,7 +560,7 @@ static void do_reads(struct mirror_set *
 		 */
 		if (likely(region_in_sync(ms, region, 1)))
 			m = choose_mirror(ms, bio->bi_sector);
-		else if (m && atomic_read(&m->error_count))
+		else if (m && atomic_read_unchecked(&m->error_count))
 			m = NULL;
 
 		if (likely(m))
@@ -927,7 +927,7 @@ static int get_mirror(struct mirror_set
 	}
 
 	ms->mirror[mirror].ms = ms;
-	atomic_set(&(ms->mirror[mirror].error_count), 0);
+	atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
 	ms->mirror[mirror].error_type = 0;
 	ms->mirror[mirror].offset = offset;
 
@@ -1339,7 +1339,7 @@ static void mirror_resume(struct dm_targ
  */
 static char device_status_char(struct mirror *m)
 {
-	if (!atomic_read(&(m->error_count)))
+	if (!atomic_read_unchecked(&(m->error_count)))
 		return 'A';
 
 	return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
diff -ruNp linux-3.13.11/drivers/md/dm-stats.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/md/dm-stats.c
--- linux-3.13.11/drivers/md/dm-stats.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/md/dm-stats.c	2014-07-09
12:00:15.000000000 +0200
@@ -382,7 +382,7 @@ do_sync_free:
 		synchronize_rcu_expedited();
 		dm_stat_free(&s->rcu_head);
 	} else {
-		ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1;
+		ACCESS_ONCE_RW(dm_stat_need_rcu_barrier) = 1;
 		call_rcu(&s->rcu_head, dm_stat_free);
 	}
 	return 0;
@@ -554,8 +554,8 @@ void dm_stats_account_io(struct dm_stats
 				       ((bi_rw & (REQ_WRITE | REQ_DISCARD)) ==
 					(ACCESS_ONCE(last->last_rw) & (REQ_WRITE | REQ_DISCARD)))
 				       ));
-		ACCESS_ONCE(last->last_sector) = end_sector;
-		ACCESS_ONCE(last->last_rw) = bi_rw;
+		ACCESS_ONCE_RW(last->last_sector) = end_sector;
+		ACCESS_ONCE_RW(last->last_rw) = bi_rw;
 	}
 
 	rcu_read_lock();
diff -ruNp linux-3.13.11/drivers/md/dm-stripe.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/md/dm-stripe.c
--- linux-3.13.11/drivers/md/dm-stripe.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/md/dm-stripe.c	2014-07-09
12:00:15.000000000 +0200
@@ -21,7 +21,7 @@ struct stripe {
 	struct dm_dev *dev;
 	sector_t physical_start;
 
-	atomic_t error_count;
+	atomic_unchecked_t error_count;
 };
 
 struct stripe_c {
@@ -186,7 +186,7 @@ static int stripe_ctr(struct dm_target *
 			kfree(sc);
 			return r;
 		}
-		atomic_set(&(sc->stripe[i].error_count), 0);
+		atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
 	}
 
 	ti->private = sc;
@@ -327,7 +327,7 @@ static void stripe_status(struct dm_targ
 		DMEMIT("%d ", sc->stripes);
 		for (i = 0; i < sc->stripes; i++)  {
 			DMEMIT("%s ", sc->stripe[i].dev->name);
-			buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
+			buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
 				'D' : 'A';
 		}
 		buffer[i] = '\0';
@@ -372,8 +372,8 @@ static int stripe_end_io(struct dm_targe
 	 */
 	for (i = 0; i < sc->stripes; i++)
 		if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
-			atomic_inc(&(sc->stripe[i].error_count));
-			if (atomic_read(&(sc->stripe[i].error_count)) <
+			atomic_inc_unchecked(&(sc->stripe[i].error_count));
+			if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
 			    DM_IO_ERROR_THRESHOLD)
 				schedule_work(&sc->trigger_event);
 		}
diff -ruNp linux-3.13.11/drivers/md/dm-table.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/md/dm-table.c
--- linux-3.13.11/drivers/md/dm-table.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/md/dm-table.c	2014-07-09
12:00:15.000000000 +0200
@@ -291,7 +291,7 @@ static struct dm_dev_internal *find_devi
 static int open_dev(struct dm_dev_internal *d, dev_t dev,
 		    struct mapped_device *md)
 {
-	static char *_claim_ptr = "I belong to device-mapper";
+	static char _claim_ptr[] = "I belong to device-mapper";
 	struct block_device *bdev;
 
 	int r;
@@ -359,7 +359,7 @@ static int device_area_is_invalid(struct
 	if (!dev_size)
 		return 0;
 
-	if ((start >= dev_size) || (start + len > dev_size)) {
+	if ((start >= dev_size) || (len > dev_size - start)) {
 		DMWARN("%s: %s too small for target: "
 		       "start=%llu, len=%llu, dev_size=%llu",
 		       dm_device_name(ti->table->md), bdevname(bdev, b),
diff -ruNp linux-3.13.11/drivers/md/dm-thin-metadata.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/md/dm-thin-metadata.c
--- linux-3.13.11/drivers/md/dm-thin-metadata.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/md/dm-thin-metadata.c	2014-07-09
12:00:15.000000000 +0200
@@ -397,7 +397,7 @@ static void __setup_btree_details(struct
 {
 	pmd->info.tm = pmd->tm;
 	pmd->info.levels = 2;
-	pmd->info.value_type.context = pmd->data_sm;
+	pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
 	pmd->info.value_type.size = sizeof(__le64);
 	pmd->info.value_type.inc = data_block_inc;
 	pmd->info.value_type.dec = data_block_dec;
@@ -416,7 +416,7 @@ static void __setup_btree_details(struct
 
 	pmd->bl_info.tm = pmd->tm;
 	pmd->bl_info.levels = 1;
-	pmd->bl_info.value_type.context = pmd->data_sm;
+	pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
 	pmd->bl_info.value_type.size = sizeof(__le64);
 	pmd->bl_info.value_type.inc = data_block_inc;
 	pmd->bl_info.value_type.dec = data_block_dec;
diff -ruNp linux-3.13.11/drivers/md/dm.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/md/dm.c
--- linux-3.13.11/drivers/md/dm.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/md/dm.c	2014-07-09 12:00:15.000000000
+0200
@@ -19,6 +19,7 @@
 #include <linux/idr.h>
 #include <linux/hdreg.h>
 #include <linux/delay.h>
+#include <linux/vs_base.h>
 
 #include <trace/events/block.h>
 
@@ -139,6 +140,7 @@ struct mapped_device {
 	struct mutex suspend_lock;
 	atomic_t holders;
 	atomic_t open_count;
+	vxid_t xid;
 
 	/*
 	 * The current mapping.
@@ -185,9 +187,9 @@ struct mapped_device {
 	/*
 	 * Event handling.
 	 */
-	atomic_t event_nr;
+	atomic_unchecked_t event_nr;
 	wait_queue_head_t eventq;
-	atomic_t uevent_seq;
+	atomic_unchecked_t uevent_seq;
 	struct list_head uevent_list;
 	spinlock_t uevent_lock; /* Protect access to uevent_list */
 
@@ -384,6 +386,7 @@ int dm_deleting_md(struct mapped_device
 static int dm_blk_open(struct block_device *bdev, fmode_t mode)
 {
 	struct mapped_device *md;
+	int ret = -ENXIO;
 
 	spin_lock(&_minor_lock);
 
@@ -392,18 +395,19 @@ static int dm_blk_open(struct block_devi
 		goto out;
 
 	if (test_bit(DMF_FREEING, &md->flags) ||
-	    dm_deleting_md(md)) {
-		md = NULL;
+	    dm_deleting_md(md))
+		goto out;
+
+	ret = -EACCES;
+	if (!vx_check(md->xid, VS_IDENT|VS_HOSTID))
 		goto out;
-	}
 
 	dm_get(md);
 	atomic_inc(&md->open_count);
-
+	ret = 0;
 out:
 	spin_unlock(&_minor_lock);
-
-	return md ? 0 : -ENXIO;
+	return ret;
 }
 
 static void dm_blk_close(struct gendisk *disk, fmode_t mode)
@@ -687,6 +691,14 @@ int dm_set_geometry(struct mapped_device
 	return 0;
 }
 
+/*
+ * Get the xid associated with a dm device
+ */
+vxid_t dm_get_xid(struct mapped_device *md)
+{
+	return md->xid;
+}
+
 /*-----------------------------------------------------------------
  * CRUD START:
  *   A more elegant soln is in the works that uses the queue
@@ -2021,11 +2033,12 @@ static struct mapped_device *alloc_dev(i
 	spin_lock_init(&md->deferred_lock);
 	atomic_set(&md->holders, 1);
 	atomic_set(&md->open_count, 0);
-	atomic_set(&md->event_nr, 0);
-	atomic_set(&md->uevent_seq, 0);
+	atomic_set_unchecked(&md->event_nr, 0);
+	atomic_set_unchecked(&md->uevent_seq, 0);
 	INIT_LIST_HEAD(&md->uevent_list);
 	spin_lock_init(&md->uevent_lock);
 
+	md->xid = vx_current_xid();
 	md->queue = blk_alloc_queue(GFP_KERNEL);
 	if (!md->queue)
 		goto bad_queue;
@@ -2176,7 +2189,7 @@ static void event_callback(void *context
 
 	dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
 
-	atomic_inc(&md->event_nr);
+	atomic_inc_unchecked(&md->event_nr);
 	wake_up(&md->eventq);
 }
 
@@ -2869,18 +2882,18 @@ int dm_kobject_uevent(struct mapped_devi
 
 uint32_t dm_next_uevent_seq(struct mapped_device *md)
 {
-	return atomic_add_return(1, &md->uevent_seq);
+	return atomic_add_return_unchecked(1, &md->uevent_seq);
 }
 
 uint32_t dm_get_event_nr(struct mapped_device *md)
 {
-	return atomic_read(&md->event_nr);
+	return atomic_read_unchecked(&md->event_nr);
 }
 
 int dm_wait_event(struct mapped_device *md, int event_nr)
 {
 	return wait_event_interruptible(md->eventq,
-			(event_nr != atomic_read(&md->event_nr)));
+			(event_nr != atomic_read_unchecked(&md->event_nr)));
 }
 
 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
diff -ruNp linux-3.13.11/drivers/md/dm.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/md/dm.h
--- linux-3.13.11/drivers/md/dm.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/md/dm.h	2014-07-09 12:00:15.000000000
+0200
@@ -50,6 +50,8 @@ struct dm_dev_internal {
 struct dm_table;
 struct dm_md_mempools;
 
+vxid_t dm_get_xid(struct mapped_device *md);
+
 /*-----------------------------------------------------------------
  * Internal table functions.
  *---------------------------------------------------------------*/
diff -ruNp linux-3.13.11/drivers/md/md.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/md/md.c
--- linux-3.13.11/drivers/md/md.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/md/md.c	2014-07-09 12:00:15.000000000
+0200
@@ -194,10 +194,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
  *  start build, activate spare
  */
 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
-static atomic_t md_event_count;
+static atomic_unchecked_t md_event_count;
 void md_new_event(struct mddev *mddev)
 {
-	atomic_inc(&md_event_count);
+	atomic_inc_unchecked(&md_event_count);
 	wake_up(&md_event_waiters);
 }
 EXPORT_SYMBOL_GPL(md_new_event);
@@ -207,7 +207,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
  */
 static void md_new_event_inintr(struct mddev *mddev)
 {
-	atomic_inc(&md_event_count);
+	atomic_inc_unchecked(&md_event_count);
 	wake_up(&md_event_waiters);
 }
 
@@ -1463,7 +1463,7 @@ static int super_1_load(struct md_rdev *
 	if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
 	    (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
 		rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
-	atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
+	atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
 
 	rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
 	bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
@@ -1710,7 +1710,7 @@ static void super_1_sync(struct mddev *m
 	else
 		sb->resync_offset = cpu_to_le64(0);
 
-	sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
+	sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
 
 	sb->raid_disks = cpu_to_le32(mddev->raid_disks);
 	sb->size = cpu_to_le64(mddev->dev_sectors);
@@ -2715,7 +2715,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_sho
 static ssize_t
 errors_show(struct md_rdev *rdev, char *page)
 {
-	return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
+	return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
 }
 
 static ssize_t
@@ -2724,7 +2724,7 @@ errors_store(struct md_rdev *rdev, const
 	char *e;
 	unsigned long n = simple_strtoul(buf, &e, 10);
 	if (*buf && (*e == 0 || *e == '\n')) {
-		atomic_set(&rdev->corrected_errors, n);
+		atomic_set_unchecked(&rdev->corrected_errors, n);
 		return len;
 	}
 	return -EINVAL;
@@ -3173,8 +3173,8 @@ int md_rdev_init(struct md_rdev *rdev)
 	rdev->sb_loaded = 0;
 	rdev->bb_page = NULL;
 	atomic_set(&rdev->nr_pending, 0);
-	atomic_set(&rdev->read_errors, 0);
-	atomic_set(&rdev->corrected_errors, 0);
+	atomic_set_unchecked(&rdev->read_errors, 0);
+	atomic_set_unchecked(&rdev->corrected_errors, 0);
 
 	INIT_LIST_HEAD(&rdev->same_set);
 	init_waitqueue_head(&rdev->blocked_wait);
@@ -7038,7 +7038,7 @@ static int md_seq_show(struct seq_file *
 
 		spin_unlock(&pers_lock);
 		seq_printf(seq, "\n");
-		seq->poll_event = atomic_read(&md_event_count);
+		seq->poll_event = atomic_read_unchecked(&md_event_count);
 		return 0;
 	}
 	if (v == (void*)2) {
@@ -7141,7 +7141,7 @@ static int md_seq_open(struct inode *ino
 		return error;
 
 	seq = file->private_data;
-	seq->poll_event = atomic_read(&md_event_count);
+	seq->poll_event = atomic_read_unchecked(&md_event_count);
 	return error;
 }
 
@@ -7155,7 +7155,7 @@ static unsigned int mdstat_poll(struct f
 	/* always allow read */
 	mask = POLLIN | POLLRDNORM;
 
-	if (seq->poll_event != atomic_read(&md_event_count))
+	if (seq->poll_event != atomic_read_unchecked(&md_event_count))
 		mask |= POLLERR | POLLPRI;
 	return mask;
 }
@@ -7199,7 +7199,7 @@ static int is_mddev_idle(struct mddev *m
 		struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
 		curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
 			      (int)part_stat_read(&disk->part0, sectors[1]) -
-			      atomic_read(&disk->sync_io);
+			      atomic_read_unchecked(&disk->sync_io);
 		/* sync IO will cause sync_io to increase before the disk_stats
 		 * as sync_io is counted when a request starts, and
 		 * disk_stats is counted when it completes.
diff -ruNp linux-3.13.11/drivers/md/md.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/md/md.h
--- linux-3.13.11/drivers/md/md.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/md/md.h	2014-07-09 12:00:15.000000000
+0200
@@ -94,13 +94,13 @@ struct md_rdev {
 					 * only maintained for arrays that
 					 * support hot removal
 					 */
-	atomic_t	read_errors;	/* number of consecutive read errors that
+	atomic_unchecked_t	read_errors;	/* number of consecutive read errors that
 					 * we have tried to ignore.
 					 */
 	struct timespec last_read_error;	/* monotonic time since our
 						 * last read error
 						 */
-	atomic_t	corrected_errors; /* number of corrected read errors,
+	atomic_unchecked_t	corrected_errors; /* number of corrected read errors,
 					   * for reporting to userspace and storing
 					   * in superblock.
 					   */
@@ -449,7 +449,7 @@ static inline void rdev_dec_pending(stru
 
 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
 {
-        atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
+	atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
 }
 
 struct md_personality
diff -ruNp linux-3.13.11/drivers/md/persistent-data/dm-space-map-metadata.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/md/persistent-data/dm-space-map-metadata.c
--- linux-3.13.11/drivers/md/persistent-data/dm-space-map-metadata.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/md/persistent-data/dm-space-map-metadata.c	2014-07-09
12:00:15.000000000 +0200
@@ -679,7 +679,7 @@ static int sm_metadata_extend(struct dm_
 	 * Flick into a mode where all blocks get allocated in the new area.
 	 */
 	smm->begin = old_len;
-	memcpy(sm, &bootstrap_ops, sizeof(*sm));
+	memcpy((void *)sm, &bootstrap_ops, sizeof(*sm));
 
 	/*
 	 * Extend.
@@ -710,7 +710,7 @@ out:
 	/*
 	 * Switch back to normal behaviour.
 	 */
-	memcpy(sm, &ops, sizeof(*sm));
+	memcpy((void *)sm, &ops, sizeof(*sm));
 	return r;
 }
 
diff -ruNp linux-3.13.11/drivers/md/persistent-data/dm-space-map.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/md/persistent-data/dm-space-map.h
--- linux-3.13.11/drivers/md/persistent-data/dm-space-map.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/md/persistent-data/dm-space-map.h	2014-07-09
12:00:15.000000000 +0200
@@ -71,6 +71,7 @@ struct dm_space_map {
 					   dm_sm_threshold_fn fn,
 					   void *context);
 };
+typedef struct dm_space_map __no_const dm_space_map_no_const;
 
 /*----------------------------------------------------------------*/
 
diff -ruNp linux-3.13.11/drivers/md/raid1.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/md/raid1.c
--- linux-3.13.11/drivers/md/raid1.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/md/raid1.c	2014-07-09 12:00:15.000000000
+0200
@@ -1921,7 +1921,7 @@ static int fix_sync_read_error(struct r1
 			if (r1_sync_page_io(rdev, sect, s,
 					    bio->bi_io_vec[idx].bv_page,
 					    READ) != 0)
-				atomic_add(s, &rdev->corrected_errors);
+				atomic_add_unchecked(s, &rdev->corrected_errors);
 		}
 		sectors -= s;
 		sect += s;
@@ -2155,7 +2155,7 @@ static void fix_read_error(struct r1conf
 			    test_bit(In_sync, &rdev->flags)) {
 				if (r1_sync_page_io(rdev, sect, s,
 						    conf->tmppage, READ)) {
-					atomic_add(s, &rdev->corrected_errors);
+					atomic_add_unchecked(s, &rdev->corrected_errors);
 					printk(KERN_INFO
 					       "md/raid1:%s: read error corrected "
 					       "(%d sectors at %llu on %s)\n",
diff -ruNp linux-3.13.11/drivers/md/raid10.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/md/raid10.c
--- linux-3.13.11/drivers/md/raid10.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/md/raid10.c	2014-07-09 12:00:15.000000000
+0200
@@ -1963,7 +1963,7 @@ static void end_sync_read(struct bio *bi
 		/* The write handler will notice the lack of
 		 * R10BIO_Uptodate and record any errors etc
 		 */
-		atomic_add(r10_bio->sectors,
+		atomic_add_unchecked(r10_bio->sectors,
 			   &conf->mirrors[d].rdev->corrected_errors);
 
 	/* for reconstruct, we always reschedule after a read.
@@ -2321,7 +2321,7 @@ static void check_decay_read_errors(stru
 {
 	struct timespec cur_time_mon;
 	unsigned long hours_since_last;
-	unsigned int read_errors = atomic_read(&rdev->read_errors);
+	unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
 
 	ktime_get_ts(&cur_time_mon);
 
@@ -2343,9 +2343,9 @@ static void check_decay_read_errors(stru
 	 * overflowing the shift of read_errors by hours_since_last.
 	 */
 	if (hours_since_last >= 8 * sizeof(read_errors))
-		atomic_set(&rdev->read_errors, 0);
+		atomic_set_unchecked(&rdev->read_errors, 0);
 	else
-		atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
+		atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
 }
 
 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
@@ -2399,8 +2399,8 @@ static void fix_read_error(struct r10con
 		return;
 
 	check_decay_read_errors(mddev, rdev);
-	atomic_inc(&rdev->read_errors);
-	if (atomic_read(&rdev->read_errors) > max_read_errors) {
+	atomic_inc_unchecked(&rdev->read_errors);
+	if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
 		char b[BDEVNAME_SIZE];
 		bdevname(rdev->bdev, b);
 
@@ -2408,7 +2408,7 @@ static void fix_read_error(struct r10con
 		       "md/raid10:%s: %s: Raid device exceeded "
 		       "read_error threshold [cur %d:max %d]\n",
 		       mdname(mddev), b,
-		       atomic_read(&rdev->read_errors), max_read_errors);
+		       atomic_read_unchecked(&rdev->read_errors), max_read_errors);
 		printk(KERN_NOTICE
 		       "md/raid10:%s: %s: Failing raid device\n",
 		       mdname(mddev), b);
@@ -2563,7 +2563,7 @@ static void fix_read_error(struct r10con
 					       sect +
 					       choose_data_offset(r10_bio, rdev)),
 				       bdevname(rdev->bdev, b));
-				atomic_add(s, &rdev->corrected_errors);
+				atomic_add_unchecked(s, &rdev->corrected_errors);
 			}
 
 			rdev_dec_pending(rdev, mddev);
diff -ruNp linux-3.13.11/drivers/md/raid5.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/md/raid5.c
--- linux-3.13.11/drivers/md/raid5.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/md/raid5.c	2014-07-09 12:00:15.000000000
+0200
@@ -1991,21 +1991,21 @@ static void raid5_end_read_request(struc
 				mdname(conf->mddev), STRIPE_SECTORS,
 				(unsigned long long)s,
 				bdevname(rdev->bdev, b));
-			atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
+			atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
 			clear_bit(R5_ReadError, &sh->dev[i].flags);
 			clear_bit(R5_ReWrite, &sh->dev[i].flags);
 		} else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
 			clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
 
-		if (atomic_read(&rdev->read_errors))
-			atomic_set(&rdev->read_errors, 0);
+		if (atomic_read_unchecked(&rdev->read_errors))
+			atomic_set_unchecked(&rdev->read_errors, 0);
 	} else {
 		const char *bdn = bdevname(rdev->bdev, b);
 		int retry = 0;
 		int set_bad = 0;
 
 		clear_bit(R5_UPTODATE, &sh->dev[i].flags);
-		atomic_inc(&rdev->read_errors);
+		atomic_inc_unchecked(&rdev->read_errors);
 		if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
 			printk_ratelimited(
 				KERN_WARNING
@@ -2033,7 +2033,7 @@ static void raid5_end_read_request(struc
 				mdname(conf->mddev),
 				(unsigned long long)s,
 				bdn);
-		} else if (atomic_read(&rdev->read_errors)
+		} else if (atomic_read_unchecked(&rdev->read_errors)
 			 > conf->max_nr_stripes)
 			printk(KERN_WARNING
 			       "md/raid:%s: Too many read errors, failing device %s.\n",
diff -ruNp linux-3.13.11/drivers/media/dvb-core/dvbdev.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/media/dvb-core/dvbdev.c
--- linux-3.13.11/drivers/media/dvb-core/dvbdev.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/media/dvb-core/dvbdev.c	2014-07-09
12:00:15.000000000 +0200
@@ -185,7 +185,7 @@ int dvb_register_device(struct dvb_adapt
 			const struct dvb_device *template, void *priv, int type)
 {
 	struct dvb_device *dvbdev;
-	struct file_operations *dvbdevfops;
+	file_operations_no_const *dvbdevfops;
 	struct device *clsdev;
 	int minor;
 	int id;
diff -ruNp linux-3.13.11/drivers/media/dvb-frontends/dib3000.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/media/dvb-frontends/dib3000.h
--- linux-3.13.11/drivers/media/dvb-frontends/dib3000.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/media/dvb-frontends/dib3000.h	2014-07-09
12:00:15.000000000 +0200
@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
 	int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
 	int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
 	int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
-};
+} __no_const;
 
 #if IS_ENABLED(CONFIG_DVB_DIB3000MB)
 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
diff -ruNp linux-3.13.11/drivers/media/pci/cx88/cx88-video.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/media/pci/cx88/cx88-video.c
--- linux-3.13.11/drivers/media/pci/cx88/cx88-video.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/media/pci/cx88/cx88-video.c	2014-07-09
12:00:15.000000000 +0200
@@ -50,9 +50,9 @@ MODULE_VERSION(CX88_VERSION);
 
 /* ------------------------------------------------------------------ */
 
-static unsigned int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
-static unsigned int vbi_nr[]   = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
-static unsigned int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
+static int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
+static int vbi_nr[]   = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
+static int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
 
 module_param_array(video_nr, int, NULL, 0444);
 module_param_array(vbi_nr,   int, NULL, 0444);
diff -ruNp linux-3.13.11/drivers/media/pci/ivtv/ivtv-driver.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/media/pci/ivtv/ivtv-driver.c
--- linux-3.13.11/drivers/media/pci/ivtv/ivtv-driver.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/media/pci/ivtv/ivtv-driver.c	2014-07-09
12:00:15.000000000 +0200
@@ -83,7 +83,7 @@ static struct pci_device_id ivtv_pci_tbl
 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
 
 /* ivtv instance counter */
-static atomic_t ivtv_instance = ATOMIC_INIT(0);
+static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
 
 /* Parameter declarations */
 static int cardtype[IVTV_MAX_CARDS];
diff -ruNp linux-3.13.11/drivers/media/platform/omap/omap_vout.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/media/platform/omap/omap_vout.c
--- linux-3.13.11/drivers/media/platform/omap/omap_vout.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/media/platform/omap/omap_vout.c	2014-07-09
12:00:15.000000000 +0200
@@ -63,7 +63,6 @@ enum omap_vout_channels {
 	OMAP_VIDEO2,
 };
 
-static struct videobuf_queue_ops video_vbq_ops;
 /* Variables configurable through module params*/
 static u32 video1_numbuffers = 3;
 static u32 video2_numbuffers = 3;
@@ -1014,6 +1013,12 @@ static int omap_vout_open(struct file *f
 {
 	struct videobuf_queue *q;
 	struct omap_vout_device *vout = NULL;
+	static struct videobuf_queue_ops video_vbq_ops = {
+		.buf_setup = omap_vout_buffer_setup,
+		.buf_prepare = omap_vout_buffer_prepare,
+		.buf_release = omap_vout_buffer_release,
+		.buf_queue = omap_vout_buffer_queue,
+	};
 
 	vout = video_drvdata(file);
 	v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
@@ -1031,10 +1036,6 @@ static int omap_vout_open(struct file *f
 	vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
 
 	q = &vout->vbq;
-	video_vbq_ops.buf_setup = omap_vout_buffer_setup;
-	video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
-	video_vbq_ops.buf_release = omap_vout_buffer_release;
-	video_vbq_ops.buf_queue = omap_vout_buffer_queue;
 	spin_lock_init(&vout->vbq_lock);
 
 	videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
diff -ruNp linux-3.13.11/drivers/media/platform/s5p-tv/mixer.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/media/platform/s5p-tv/mixer.h
--- linux-3.13.11/drivers/media/platform/s5p-tv/mixer.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/media/platform/s5p-tv/mixer.h	2014-07-09
12:00:15.000000000 +0200
@@ -156,7 +156,7 @@ struct mxr_layer {
 	/** layer index (unique identifier) */
 	int idx;
 	/** callbacks for layer methods */
-	struct mxr_layer_ops ops;
+	struct mxr_layer_ops *ops;
 	/** format array */
 	const struct mxr_format **fmt_array;
 	/** size of format array */
diff -ruNp linux-3.13.11/drivers/media/platform/s5p-tv/mixer_grp_layer.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/media/platform/s5p-tv/mixer_grp_layer.c
--- linux-3.13.11/drivers/media/platform/s5p-tv/mixer_grp_layer.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/media/platform/s5p-tv/mixer_grp_layer.c	2014-07-09
12:00:15.000000000 +0200
@@ -235,7 +235,7 @@ struct mxr_layer *mxr_graph_layer_create
 {
 	struct mxr_layer *layer;
 	int ret;
-	struct mxr_layer_ops ops = {
+	static struct mxr_layer_ops ops = {
 		.release = mxr_graph_layer_release,
 		.buffer_set = mxr_graph_buffer_set,
 		.stream_set = mxr_graph_stream_set,
diff -ruNp linux-3.13.11/drivers/media/platform/s5p-tv/mixer_reg.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/media/platform/s5p-tv/mixer_reg.c
--- linux-3.13.11/drivers/media/platform/s5p-tv/mixer_reg.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/media/platform/s5p-tv/mixer_reg.c	2014-07-09
12:00:15.000000000 +0200
@@ -276,7 +276,7 @@ static void mxr_irq_layer_handle(struct
 		layer->update_buf = next;
 	}
 
-	layer->ops.buffer_set(layer, layer->update_buf);
+	layer->ops->buffer_set(layer, layer->update_buf);
 
 	if (done && done != layer->shadow_buf)
 		vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
diff -ruNp linux-3.13.11/drivers/media/platform/s5p-tv/mixer_video.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/media/platform/s5p-tv/mixer_video.c
--- linux-3.13.11/drivers/media/platform/s5p-tv/mixer_video.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/media/platform/s5p-tv/mixer_video.c	2014-07-09
12:00:15.000000000 +0200
@@ -210,7 +210,7 @@ static void mxr_layer_default_geo(struct
 	layer->geo.src.height = layer->geo.src.full_height;
 
 	mxr_geometry_dump(mdev, &layer->geo);
-	layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
+	layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
 	mxr_geometry_dump(mdev, &layer->geo);
 }
 
@@ -228,7 +228,7 @@ static void mxr_layer_update_output(stru
 	layer->geo.dst.full_width = mbus_fmt.width;
 	layer->geo.dst.full_height = mbus_fmt.height;
 	layer->geo.dst.field = mbus_fmt.field;
-	layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
+	layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
 
 	mxr_geometry_dump(mdev, &layer->geo);
 }
@@ -334,7 +334,7 @@ static int mxr_s_fmt(struct file *file,
 	/* set source size to highest accepted value */
 	geo->src.full_width = max(geo->dst.full_width, pix->width);
 	geo->src.full_height = max(geo->dst.full_height, pix->height);
-	layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
+	layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
 	mxr_geometry_dump(mdev, &layer->geo);
 	/* set cropping to total visible screen */
 	geo->src.width = pix->width;
@@ -342,12 +342,12 @@ static int mxr_s_fmt(struct file *file,
 	geo->src.x_offset = 0;
 	geo->src.y_offset = 0;
 	/* assure consistency of geometry */
-	layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
+	layer->ops->fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
 	mxr_geometry_dump(mdev, &layer->geo);
 	/* set full size to lowest possible value */
 	geo->src.full_width = 0;
 	geo->src.full_height = 0;
-	layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
+	layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
 	mxr_geometry_dump(mdev, &layer->geo);
 
 	/* returning results */
@@ -474,7 +474,7 @@ static int mxr_s_selection(struct file *
 		target->width = s->r.width;
 		target->height = s->r.height;
 
-		layer->ops.fix_geometry(layer, stage, s->flags);
+		layer->ops->fix_geometry(layer, stage, s->flags);
 
 		/* retrieve update selection rectangle */
 		res.left = target->x_offset;
@@ -955,13 +955,13 @@ static int start_streaming(struct vb2_qu
 	mxr_output_get(mdev);
 
 	mxr_layer_update_output(layer);
-	layer->ops.format_set(layer);
+	layer->ops->format_set(layer);
 	/* enabling layer in hardware */
 	spin_lock_irqsave(&layer->enq_slock, flags);
 	layer->state = MXR_LAYER_STREAMING;
 	spin_unlock_irqrestore(&layer->enq_slock, flags);
 
-	layer->ops.stream_set(layer, MXR_ENABLE);
+	layer->ops->stream_set(layer, MXR_ENABLE);
 	mxr_streamer_get(mdev);
 
 	return 0;
@@ -1031,7 +1031,7 @@ static int stop_streaming(struct vb2_que
 	spin_unlock_irqrestore(&layer->enq_slock, flags);
 
 	/* disabling layer in hardware */
-	layer->ops.stream_set(layer, MXR_DISABLE);
+	layer->ops->stream_set(layer, MXR_DISABLE);
 	/* remove one streamer */
 	mxr_streamer_put(mdev);
 	/* allow changes in output configuration */
@@ -1070,8 +1070,8 @@ void mxr_base_layer_unregister(struct mx
 
 void mxr_layer_release(struct mxr_layer *layer)
 {
-	if (layer->ops.release)
-		layer->ops.release(layer);
+	if (layer->ops->release)
+		layer->ops->release(layer);
 }
 
 void mxr_base_layer_release(struct mxr_layer *layer)
@@ -1097,7 +1097,7 @@ struct mxr_layer *mxr_base_layer_create(
 
 	layer->mdev = mdev;
 	layer->idx = idx;
-	layer->ops = *ops;
+	layer->ops = ops;
 
 	spin_lock_init(&layer->enq_slock);
 	INIT_LIST_HEAD(&layer->enq_list);
diff -ruNp linux-3.13.11/drivers/media/platform/s5p-tv/mixer_vp_layer.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/media/platform/s5p-tv/mixer_vp_layer.c
--- linux-3.13.11/drivers/media/platform/s5p-tv/mixer_vp_layer.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/media/platform/s5p-tv/mixer_vp_layer.c	2014-07-09
12:00:15.000000000 +0200
@@ -206,7 +206,7 @@ struct mxr_layer *mxr_vp_layer_create(st
 {
 	struct mxr_layer *layer;
 	int ret;
-	struct mxr_layer_ops ops = {
+	static struct mxr_layer_ops ops = {
 		.release = mxr_vp_layer_release,
 		.buffer_set = mxr_vp_buffer_set,
 		.stream_set = mxr_vp_stream_set,
diff -ruNp linux-3.13.11/drivers/media/platform/vivi.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/media/platform/vivi.c
--- linux-3.13.11/drivers/media/platform/vivi.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/media/platform/vivi.c	2014-07-09
12:00:15.000000000 +0200
@@ -58,8 +58,8 @@ MODULE_AUTHOR("Mauro Carvalho Chehab, Te
 MODULE_LICENSE("Dual BSD/GPL");
 MODULE_VERSION(VIVI_VERSION);
 
-static unsigned video_nr = -1;
-module_param(video_nr, uint, 0644);
+static int video_nr = -1;
+module_param(video_nr, int, 0644);
 MODULE_PARM_DESC(video_nr, "videoX start number, -1 is autodetect");
 
 static unsigned n_devs = 1;
diff -ruNp linux-3.13.11/drivers/media/radio/radio-cadet.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/media/radio/radio-cadet.c
--- linux-3.13.11/drivers/media/radio/radio-cadet.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/media/radio/radio-cadet.c	2014-07-09
12:00:15.000000000 +0200
@@ -324,6 +324,8 @@ static ssize_t cadet_read(struct file *f
 	unsigned char readbuf[RDS_BUFFER];
 	int i = 0;
 
+	if (count > RDS_BUFFER)
+		return -EFAULT;
 	mutex_lock(&dev->lock);
 	if (dev->rdsstat == 0)
 		cadet_start_rds(dev);
@@ -339,7 +341,7 @@ static ssize_t cadet_read(struct file *f
 	while (i < count && dev->rdsin != dev->rdsout)
 		readbuf[i++] = dev->rdsbuf[dev->rdsout++];
 
-	if (i && copy_to_user(data, readbuf, i))
+	if (i > sizeof(readbuf) || copy_to_user(data, readbuf, i))
 		i = -EFAULT;
 unlock:
 	mutex_unlock(&dev->lock);
diff -ruNp linux-3.13.11/drivers/media/radio/radio-maxiradio.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/media/radio/radio-maxiradio.c
--- linux-3.13.11/drivers/media/radio/radio-maxiradio.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/media/radio/radio-maxiradio.c	2014-07-09
12:00:15.000000000 +0200
@@ -61,7 +61,7 @@ MODULE_PARM_DESC(radio_nr, "Radio device
 /* TEA5757 pin mappings */
 static const int clk = 1, data = 2, wren = 4, mo_st = 8, power = 16;
 
-static atomic_t maxiradio_instance = ATOMIC_INIT(0);
+static atomic_unchecked_t maxiradio_instance = ATOMIC_INIT(0);
 
 #define PCI_VENDOR_ID_GUILLEMOT 0x5046
 #define PCI_DEVICE_ID_GUILLEMOT_MAXIRADIO 0x1001
diff -ruNp linux-3.13.11/drivers/media/radio/radio-shark.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/media/radio/radio-shark.c
--- linux-3.13.11/drivers/media/radio/radio-shark.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/media/radio/radio-shark.c	2014-07-09
12:00:15.000000000 +0200
@@ -79,7 +79,7 @@ struct shark_device {
 	u32 last_val;
 };
 
-static atomic_t shark_instance = ATOMIC_INIT(0);
+static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
 
 static void shark_write_val(struct snd_tea575x *tea, u32 val)
 {
diff -ruNp linux-3.13.11/drivers/media/radio/radio-shark2.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/media/radio/radio-shark2.c
--- linux-3.13.11/drivers/media/radio/radio-shark2.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/media/radio/radio-shark2.c	2014-07-09
12:00:15.000000000 +0200
@@ -74,7 +74,7 @@ struct shark_device {
 	u8 *transfer_buffer;
 };
 
-static atomic_t shark_instance = ATOMIC_INIT(0);
+static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
 
 static int shark_write_reg(struct radio_tea5777 *tea, u64 reg)
 {
diff -ruNp linux-3.13.11/drivers/media/radio/radio-si476x.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/media/radio/radio-si476x.c
--- linux-3.13.11/drivers/media/radio/radio-si476x.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/media/radio/radio-si476x.c	2014-07-09
12:00:15.000000000 +0200
@@ -1445,7 +1445,7 @@ static int si476x_radio_probe(struct pla
 	struct si476x_radio *radio;
 	struct v4l2_ctrl *ctrl;
 
-	static atomic_t instance = ATOMIC_INIT(0);
+	static atomic_unchecked_t instance = ATOMIC_INIT(0);
 
 	radio = devm_kzalloc(&pdev->dev, sizeof(*radio), GFP_KERNEL);
 	if (!radio)
diff -ruNp linux-3.13.11/drivers/media/rc/rc-main.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/media/rc/rc-main.c
--- linux-3.13.11/drivers/media/rc/rc-main.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/media/rc/rc-main.c	2014-07-09
12:00:15.000000000 +0200
@@ -1065,7 +1065,7 @@ EXPORT_SYMBOL_GPL(rc_free_device);
 int rc_register_device(struct rc_dev *dev)
 {
 	static bool raw_init = false; /* raw decoders loaded? */
-	static atomic_t devno = ATOMIC_INIT(0);
+	static atomic_unchecked_t devno = ATOMIC_INIT(0);
 	struct rc_map *rc_map;
 	const char *path;
 	int rc;
@@ -1096,7 +1096,7 @@ int rc_register_device(struct rc_dev *de
 	 */
 	mutex_lock(&dev->lock);
 
-	dev->devno = (unsigned long)(atomic_inc_return(&devno) - 1);
+	dev->devno = (unsigned long)(atomic_inc_return_unchecked(&devno) - 1);
 	dev_set_name(&dev->dev, "rc%ld", dev->devno);
 	dev_set_drvdata(&dev->dev, dev);
 	rc = device_add(&dev->dev);
diff -ruNp linux-3.13.11/drivers/media/usb/dvb-usb/cxusb.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/media/usb/dvb-usb/cxusb.c
--- linux-3.13.11/drivers/media/usb/dvb-usb/cxusb.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/media/usb/dvb-usb/cxusb.c	2014-07-09
12:00:15.000000000 +0200
@@ -1112,7 +1112,7 @@ static struct dib0070_config dib7070p_di
 
 struct dib0700_adapter_state {
 	int (*set_param_save) (struct dvb_frontend *);
-};
+} __no_const;
 
 static int dib7070_set_param_override(struct dvb_frontend *fe)
 {
diff -ruNp linux-3.13.11/drivers/media/usb/dvb-usb/dw2102.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/media/usb/dvb-usb/dw2102.c
--- linux-3.13.11/drivers/media/usb/dvb-usb/dw2102.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/media/usb/dvb-usb/dw2102.c	2014-07-09
12:00:15.000000000 +0200
@@ -121,7 +121,7 @@ struct su3000_state {
 
 struct s6x0_state {
 	int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
-};
+} __no_const;
 
 /* debug */
 static int dvb_usb_dw2102_debug;
diff -ruNp linux-3.13.11/drivers/media/v4l2-core/v4l2-compat-ioctl32.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
--- linux-3.13.11/drivers/media/v4l2-core/v4l2-compat-ioctl32.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/media/v4l2-core/v4l2-compat-ioctl32.c	2014-07-09
12:00:15.000000000 +0200
@@ -326,7 +326,7 @@ struct v4l2_buffer32 {
 	__u32			reserved;
 };
 
-static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
+static int get_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user
*up32,
 				enum v4l2_memory memory)
 {
 	void __user *up_pln;
@@ -355,7 +355,7 @@ static int get_v4l2_plane32(struct v4l2_
 	return 0;
 }
 
-static int put_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
+static int put_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user
*up32,
 				enum v4l2_memory memory)
 {
 	if (copy_in_user(up32, up, 2 * sizeof(__u32)) ||
@@ -425,7 +425,7 @@ static int get_v4l2_buffer32(struct v4l2
 		 * by passing a very big num_planes value */
 		uplane = compat_alloc_user_space(num_planes *
 						sizeof(struct v4l2_plane));
-		kp->m.planes = uplane;
+		kp->m.planes = (struct v4l2_plane __force_kernel *)uplane;
 
 		while (--num_planes >= 0) {
 			ret = get_v4l2_plane32(uplane, uplane32, kp->memory);
@@ -496,7 +496,7 @@ static int put_v4l2_buffer32(struct v4l2
 		if (num_planes == 0)
 			return 0;
 
-		uplane = kp->m.planes;
+		uplane = (struct v4l2_plane __force_user *)kp->m.planes;
 		if (get_user(p, &up->m.planes))
 			return -EFAULT;
 		uplane32 = compat_ptr(p);
@@ -550,7 +550,7 @@ static int get_v4l2_framebuffer32(struct
 		get_user(kp->capability, &up->capability) ||
 		get_user(kp->flags, &up->flags))
 			return -EFAULT;
-	kp->base = compat_ptr(tmp);
+	kp->base = (void __force_kernel *)compat_ptr(tmp);
 	get_v4l2_pix_format(&kp->fmt, &up->fmt);
 	return 0;
 }
@@ -656,7 +656,7 @@ static int get_v4l2_ext_controls32(struc
 			n * sizeof(struct v4l2_ext_control32)))
 		return -EFAULT;
 	kcontrols = compat_alloc_user_space(n * sizeof(struct v4l2_ext_control));
-	kp->controls = kcontrols;
+	kp->controls = (struct v4l2_ext_control __force_kernel *)kcontrols;
 	while (--n >= 0) {
 		if (copy_in_user(kcontrols, ucontrols, sizeof(*ucontrols)))
 			return -EFAULT;
@@ -678,7 +678,7 @@ static int get_v4l2_ext_controls32(struc
 static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext_controls32
__user *up)
 {
 	struct v4l2_ext_control32 __user *ucontrols;
-	struct v4l2_ext_control __user *kcontrols = kp->controls;
+	struct v4l2_ext_control __user *kcontrols = (struct v4l2_ext_control __force_user
*)kp->controls;
 	int n = kp->count;
 	compat_caddr_t p;
 
@@ -772,7 +772,7 @@ static int put_v4l2_subdev_edid32(struct
 		put_user(kp->start_block, &up->start_block) ||
 		put_user(kp->blocks, &up->blocks) ||
 		put_user(tmp, &up->edid) ||
-		copy_to_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
+		copy_to_user(up->reserved, kp->reserved, sizeof(kp->reserved)))
 			return -EFAULT;
 	return 0;
 }
diff -ruNp linux-3.13.11/drivers/media/v4l2-core/v4l2-ctrls.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/media/v4l2-core/v4l2-ctrls.c
--- linux-3.13.11/drivers/media/v4l2-core/v4l2-ctrls.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/media/v4l2-core/v4l2-ctrls.c	2014-07-09
12:00:15.000000000 +0200
@@ -1396,8 +1396,8 @@ static int validate_new(const struct v4l
 		return 0;
 
 	case V4L2_CTRL_TYPE_STRING:
-		len = strlen(c->string);
-		if (len < ctrl->minimum)
+		len = strlen_user(c->string);
+		if (!len || len < ctrl->minimum)
 			return -ERANGE;
 		if ((len - ctrl->minimum) % ctrl->step)
 			return -ERANGE;
diff -ruNp linux-3.13.11/drivers/media/v4l2-core/v4l2-device.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/media/v4l2-core/v4l2-device.c
--- linux-3.13.11/drivers/media/v4l2-core/v4l2-device.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/media/v4l2-core/v4l2-device.c	2014-07-09
12:00:15.000000000 +0200
@@ -75,9 +75,9 @@ int v4l2_device_put(struct v4l2_device *
 EXPORT_SYMBOL_GPL(v4l2_device_put);
 
 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
-						atomic_t *instance)
+						atomic_unchecked_t *instance)
 {
-	int num = atomic_inc_return(instance) - 1;
+	int num = atomic_inc_return_unchecked(instance) - 1;
 	int len = strlen(basename);
 
 	if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
diff -ruNp linux-3.13.11/drivers/media/v4l2-core/v4l2-ioctl.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/media/v4l2-core/v4l2-ioctl.c
--- linux-3.13.11/drivers/media/v4l2-core/v4l2-ioctl.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/media/v4l2-core/v4l2-ioctl.c	2014-07-09
12:00:15.000000000 +0200
@@ -1939,7 +1939,8 @@ struct v4l2_ioctl_info {
 				struct file *file, void *fh, void *p);
 	} u;
 	void (*debug)(const void *arg, bool write_only);
-};
+} __do_const;
+typedef struct v4l2_ioctl_info __no_const v4l2_ioctl_info_no_const;
 
 /* This control needs a priority check */
 #define INFO_FL_PRIO	(1 << 0)
@@ -2120,7 +2121,7 @@ static long __video_do_ioctl(struct file
 	struct video_device *vfd = video_devdata(file);
 	const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
 	bool write_only = false;
-	struct v4l2_ioctl_info default_info;
+	v4l2_ioctl_info_no_const default_info;
 	const struct v4l2_ioctl_info *info;
 	void *fh = file->private_data;
 	struct v4l2_fh *vfh = NULL;
@@ -2194,7 +2195,7 @@ done:
 }
 
 static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
-			    void * __user *user_ptr, void ***kernel_ptr)
+			    void __user **user_ptr, void ***kernel_ptr)
 {
 	int ret = 0;
 
@@ -2210,7 +2211,7 @@ static int check_array_args(unsigned int
 				ret = -EINVAL;
 				break;
 			}
-			*user_ptr = (void __user *)buf->m.planes;
+			*user_ptr = (void __force_user *)buf->m.planes;
 			*kernel_ptr = (void *)&buf->m.planes;
 			*array_size = sizeof(struct v4l2_plane) * buf->length;
 			ret = 1;
@@ -2245,7 +2246,7 @@ static int check_array_args(unsigned int
 				ret = -EINVAL;
 				break;
 			}
-			*user_ptr = (void __user *)ctrls->controls;
+			*user_ptr = (void __force_user *)ctrls->controls;
 			*kernel_ptr = (void *)&ctrls->controls;
 			*array_size = sizeof(struct v4l2_ext_control)
 				    * ctrls->count;
@@ -2340,7 +2341,7 @@ video_usercopy(struct file *file, unsign
 		err = -ENOTTY;
 
 	if (has_array_args) {
-		*kernel_ptr = user_ptr;
+		*kernel_ptr = (void __force_kernel *)user_ptr;
 		if (copy_to_user(user_ptr, mbuf, array_size))
 			err = -EFAULT;
 		goto out_array_args;
diff -ruNp linux-3.13.11/drivers/message/fusion/mptbase.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/message/fusion/mptbase.c
--- linux-3.13.11/drivers/message/fusion/mptbase.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/message/fusion/mptbase.c	2014-07-09
12:00:15.000000000 +0200
@@ -6755,8 +6755,13 @@ static int mpt_iocinfo_proc_show(struct
 	seq_printf(m, "  MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
 	seq_printf(m, "  MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
 
+#ifdef CONFIG_GRKERNSEC_HIDESYM
+	seq_printf(m, "  RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
+#else
 	seq_printf(m, "  RequestFrames @ 0x%p (Dma @ 0x%p)\n",
 					(void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
+#endif
+
 	/*
 	 *  Rounding UP to nearest 4-kB boundary here...
 	 */
@@ -6769,7 +6774,11 @@ static int mpt_iocinfo_proc_show(struct
 					ioc->facts.GlobalCredits);
 
 	seq_printf(m, "  Frames   @ 0x%p (Dma @ 0x%p)\n",
+#ifdef CONFIG_GRKERNSEC_HIDESYM
+					NULL, NULL);
+#else
 					(void *)ioc->alloc, (void *)(ulong)ioc->alloc_dma);
+#endif
 	sz = (ioc->reply_sz * ioc->reply_depth) + 128;
 	seq_printf(m, "    {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n",
 					ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz);
diff -ruNp linux-3.13.11/drivers/message/fusion/mptsas.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/message/fusion/mptsas.c
--- linux-3.13.11/drivers/message/fusion/mptsas.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/message/fusion/mptsas.c	2014-07-09
12:00:15.000000000 +0200
@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devin
 		return 0;
 }
 
+static inline void
+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy
*rphy)
+{
+	if (phy_info->port_details) {
+		phy_info->port_details->rphy = rphy;
+		dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
+		    ioc->name, rphy));
+	}
+
+	if (rphy) {
+		dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
+		    &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
+		dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
+		    ioc->name, rphy, rphy->dev.release));
+	}
+}
+
 /* no mutex */
 static void
 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *p
 		return NULL;
 }
 
-static inline void
-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy
*rphy)
-{
-	if (phy_info->port_details) {
-		phy_info->port_details->rphy = rphy;
-		dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
-		    ioc->name, rphy));
-	}
-
-	if (rphy) {
-		dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
-		    &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
-		dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
-		    ioc->name, rphy, rphy->dev.release));
-	}
-}
-
 static inline struct sas_port *
 mptsas_get_port(struct mptsas_phyinfo *phy_info)
 {
diff -ruNp linux-3.13.11/drivers/message/fusion/mptscsih.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/message/fusion/mptscsih.c
--- linux-3.13.11/drivers/message/fusion/mptscsih.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/message/fusion/mptscsih.c	2014-07-09
12:00:15.000000000 +0200
@@ -1271,15 +1271,16 @@ mptscsih_info(struct Scsi_Host *SChost)
 
 	h = shost_priv(SChost);
 
-	if (h) {
-		if (h->info_kbuf == NULL)
-			if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
-				return h->info_kbuf;
-		h->info_kbuf[0] = '\0';
+	if (!h)
+		return NULL;
 
-		mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
-		h->info_kbuf[size-1] = '\0';
-	}
+	if (h->info_kbuf == NULL)
+		if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
+			return h->info_kbuf;
+	h->info_kbuf[0] = '\0';
+
+	mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
+	h->info_kbuf[size-1] = '\0';
 
 	return h->info_kbuf;
 }
diff -ruNp linux-3.13.11/drivers/message/i2o/i2o_proc.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/message/i2o/i2o_proc.c
--- linux-3.13.11/drivers/message/i2o/i2o_proc.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/message/i2o/i2o_proc.c	2014-07-09
12:00:15.000000000 +0200
@@ -255,12 +255,6 @@ static char *scsi_devices[] = {
 	"Array Controller Device"
 };
 
-static char *chtostr(char *tmp, u8 *chars, int n)
-{
-	tmp[0] = 0;
-	return strncat(tmp, (char *)chars, n);
-}
-
 static int i2o_report_query_status(struct seq_file *seq, int block_status,
 				   char *group)
 {
@@ -707,9 +701,9 @@ static int i2o_seq_show_status(struct se
 static int i2o_seq_show_hw(struct seq_file *seq, void *v)
 {
 	struct i2o_controller *c = (struct i2o_controller *)seq->private;
-	static u32 work32[5];
-	static u8 *work8 = (u8 *) work32;
-	static u16 *work16 = (u16 *) work32;
+	u32 work32[5];
+	u8 *work8 = (u8 *) work32;
+	u16 *work16 = (u16 *) work32;
 	int token;
 	u32 hwcap;
 
@@ -790,7 +784,6 @@ static int i2o_seq_show_ddm_table(struct
 	} *result;
 
 	i2o_exec_execute_ddm_table ddm_table;
-	char tmp[28 + 1];
 
 	result = kmalloc(sizeof(*result), GFP_KERNEL);
 	if (!result)
@@ -825,8 +818,7 @@ static int i2o_seq_show_ddm_table(struct
 
 		seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
 		seq_printf(seq, "%-#8x", ddm_table.module_id);
-		seq_printf(seq, "%-29s",
-			   chtostr(tmp, ddm_table.module_name_version, 28));
+		seq_printf(seq, "%-.28s", ddm_table.module_name_version);
 		seq_printf(seq, "%9d  ", ddm_table.data_size);
 		seq_printf(seq, "%8d", ddm_table.code_size);
 
@@ -893,7 +885,6 @@ static int i2o_seq_show_drivers_stored(s
 
 	i2o_driver_result_table *result;
 	i2o_driver_store_table *dst;
-	char tmp[28 + 1];
 
 	result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL);
 	if (result == NULL)
@@ -928,9 +919,8 @@ static int i2o_seq_show_drivers_stored(s
 
 		seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
 		seq_printf(seq, "%-#8x", dst->module_id);
-		seq_printf(seq, "%-29s",
-			   chtostr(tmp, dst->module_name_version, 28));
-		seq_printf(seq, "%-9s", chtostr(tmp, dst->date, 8));
+		seq_printf(seq, "%-.28s", dst->module_name_version);
+		seq_printf(seq, "%-.8s", dst->date);
 		seq_printf(seq, "%8d ", dst->module_size);
 		seq_printf(seq, "%8d ", dst->mpb_size);
 		seq_printf(seq, "0x%04x", dst->module_flags);
@@ -1246,11 +1236,10 @@ static int i2o_seq_show_authorized_users
 static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
 {
 	struct i2o_device *d = (struct i2o_device *)seq->private;
-	static u32 work32[128];	// allow for "stuff" + up to 256 byte (max) serial number
+	u32 work32[128];	// allow for "stuff" + up to 256 byte (max) serial number
 	// == (allow) 512d bytes (max)
-	static u16 *work16 = (u16 *) work32;
+	u16 *work16 = (u16 *) work32;
 	int token;
-	char tmp[16 + 1];
 
 	token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
 
@@ -1262,14 +1251,10 @@ static int i2o_seq_show_dev_identity(str
 	seq_printf(seq, "Device Class  : %s\n", i2o_get_class_name(work16[0]));
 	seq_printf(seq, "Owner TID     : %0#5x\n", work16[2]);
 	seq_printf(seq, "Parent TID    : %0#5x\n", work16[3]);
-	seq_printf(seq, "Vendor info   : %s\n",
-		   chtostr(tmp, (u8 *) (work32 + 2), 16));
-	seq_printf(seq, "Product info  : %s\n",
-		   chtostr(tmp, (u8 *) (work32 + 6), 16));
-	seq_printf(seq, "Description   : %s\n",
-		   chtostr(tmp, (u8 *) (work32 + 10), 16));
-	seq_printf(seq, "Product rev.  : %s\n",
-		   chtostr(tmp, (u8 *) (work32 + 14), 8));
+	seq_printf(seq, "Vendor info   : %.16s\n", (u8 *) (work32 + 2));
+	seq_printf(seq, "Product info  : %.16s\n", (u8 *) (work32 + 6));
+	seq_printf(seq, "Description   : %.16s\n", (u8 *) (work32 + 10));
+	seq_printf(seq, "Product rev.  : %.8s\n", (u8 *) (work32 + 14));
 
 	seq_printf(seq, "Serial number : ");
 	print_serial_number(seq, (u8 *) (work32 + 16),
@@ -1306,8 +1291,6 @@ static int i2o_seq_show_ddm_identity(str
 		u8 pad[256];	// allow up to 256 byte (max) serial number
 	} result;
 
-	char tmp[24 + 1];
-
 	token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result));
 
 	if (token < 0) {
@@ -1316,10 +1299,8 @@ static int i2o_seq_show_ddm_identity(str
 	}
 
 	seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
-	seq_printf(seq, "Module name         : %s\n",
-		   chtostr(tmp, result.module_name, 24));
-	seq_printf(seq, "Module revision     : %s\n",
-		   chtostr(tmp, result.module_rev, 8));
+	seq_printf(seq, "Module name         : %.24s\n", result.module_name);
+	seq_printf(seq, "Module revision     : %.8s\n", result.module_rev);
 
 	seq_printf(seq, "Serial number       : ");
 	print_serial_number(seq, result.serial_number, sizeof(result) - 36);
@@ -1343,8 +1324,6 @@ static int i2o_seq_show_uinfo(struct seq
 		u8 instance_number[4];
 	} result;
 
-	char tmp[64 + 1];
-
 	token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result));
 
 	if (token < 0) {
@@ -1352,14 +1331,10 @@ static int i2o_seq_show_uinfo(struct seq
 		return 0;
 	}
 
-	seq_printf(seq, "Device name     : %s\n",
-		   chtostr(tmp, result.device_name, 64));
-	seq_printf(seq, "Service name    : %s\n",
-		   chtostr(tmp, result.service_name, 64));
-	seq_printf(seq, "Physical name   : %s\n",
-		   chtostr(tmp, result.physical_location, 64));
-	seq_printf(seq, "Instance number : %s\n",
-		   chtostr(tmp, result.instance_number, 4));
+	seq_printf(seq, "Device name     : %.64s\n", result.device_name);
+	seq_printf(seq, "Service name    : %.64s\n", result.service_name);
+	seq_printf(seq, "Physical name   : %.64s\n", result.physical_location);
+	seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
 
 	return 0;
 }
@@ -1368,9 +1343,9 @@ static int i2o_seq_show_uinfo(struct seq
 static int i2o_seq_show_sgl_limits(struct seq_file *seq, void *v)
 {
 	struct i2o_device *d = (struct i2o_device *)seq->private;
-	static u32 work32[12];
-	static u16 *work16 = (u16 *) work32;
-	static u8 *work8 = (u8 *) work32;
+	u32 work32[12];
+	u16 *work16 = (u16 *) work32;
+	u8 *work8 = (u8 *) work32;
 	int token;
 
 	token = i2o_parm_field_get(d, 0xF103, -1, &work32, sizeof(work32));
diff -ruNp linux-3.13.11/drivers/message/i2o/iop.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/message/i2o/iop.c
--- linux-3.13.11/drivers/message/i2o/iop.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/message/i2o/iop.c	2014-07-09
12:00:15.000000000 +0200
@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_contro
 
 	spin_lock_irqsave(&c->context_list_lock, flags);
 
-	if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
-		atomic_inc(&c->context_list_counter);
+	if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
+		atomic_inc_unchecked(&c->context_list_counter);
 
-	entry->context = atomic_read(&c->context_list_counter);
+	entry->context = atomic_read_unchecked(&c->context_list_counter);
 
 	list_add(&entry->list, &c->context_list);
 
@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(voi
 
 #if BITS_PER_LONG == 64
 	spin_lock_init(&c->context_list_lock);
-	atomic_set(&c->context_list_counter, 0);
+	atomic_set_unchecked(&c->context_list_counter, 0);
 	INIT_LIST_HEAD(&c->context_list);
 #endif
 
diff -ruNp linux-3.13.11/drivers/mfd/ab8500-debugfs.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/mfd/ab8500-debugfs.c
--- linux-3.13.11/drivers/mfd/ab8500-debugfs.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/mfd/ab8500-debugfs.c	2014-07-09
12:00:15.000000000 +0200
@@ -100,7 +100,7 @@ static int irq_last;
 static u32 *irq_count;
 static int num_irqs;
 
-static struct device_attribute **dev_attr;
+static device_attribute_no_const **dev_attr;
 static char **event_name;
 
 static u8 avg_sample = SAMPLE_16;
diff -ruNp linux-3.13.11/drivers/mfd/janz-cmodio.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/mfd/janz-cmodio.c
--- linux-3.13.11/drivers/mfd/janz-cmodio.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/mfd/janz-cmodio.c	2014-07-09
12:00:15.000000000 +0200
@@ -13,6 +13,7 @@
 
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/slab.h>
 #include <linux/init.h>
 #include <linux/pci.h>
 #include <linux/interrupt.h>
diff -ruNp linux-3.13.11/drivers/mfd/max8925-i2c.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/mfd/max8925-i2c.c
--- linux-3.13.11/drivers/mfd/max8925-i2c.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/mfd/max8925-i2c.c	2014-07-09
12:00:15.000000000 +0200
@@ -152,7 +152,7 @@ static int max8925_probe(struct i2c_clie
 				   const struct i2c_device_id *id)
 {
 	struct max8925_platform_data *pdata = dev_get_platdata(&client->dev);
-	static struct max8925_chip *chip;
+	struct max8925_chip *chip;
 	struct device_node *node = client->dev.of_node;
 
 	if (node && !pdata) {
diff -ruNp linux-3.13.11/drivers/mfd/tps65910.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/mfd/tps65910.c
--- linux-3.13.11/drivers/mfd/tps65910.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/mfd/tps65910.c	2014-07-09
12:00:15.000000000 +0200
@@ -230,7 +230,7 @@ static int tps65910_irq_init(struct tps6
 		    struct tps65910_platform_data *pdata)
 {
 	int ret = 0;
-	static struct regmap_irq_chip *tps6591x_irqs_chip;
+	struct regmap_irq_chip *tps6591x_irqs_chip;
 
 	if (!irq) {
 		dev_warn(tps65910->dev, "No interrupt support, no core IRQ\n");
diff -ruNp linux-3.13.11/drivers/mfd/twl4030-irq.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/mfd/twl4030-irq.c
--- linux-3.13.11/drivers/mfd/twl4030-irq.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/mfd/twl4030-irq.c	2014-07-09
12:00:15.000000000 +0200
@@ -35,6 +35,7 @@
 #include <linux/of.h>
 #include <linux/irqdomain.h>
 #include <linux/i2c/twl.h>
+#include <asm/pgtable.h>
 
 #include "twl-core.h"
 
@@ -726,10 +727,12 @@ int twl4030_init_irq(struct device *dev,
 	 * Install an irq handler for each of the SIH modules;
 	 * clone dummy irq_chip since PIH can't *do* anything
 	 */
-	twl4030_irq_chip = dummy_irq_chip;
-	twl4030_irq_chip.name = "twl4030";
+	pax_open_kernel();
+	memcpy((void *)&twl4030_irq_chip, &dummy_irq_chip, sizeof twl4030_irq_chip);
+	*(const char **)&twl4030_irq_chip.name = "twl4030";
 
-	twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
+	*(void **)&twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
+	pax_close_kernel();
 
 	for (i = irq_base; i < irq_end; i++) {
 		irq_set_chip_and_handler(i, &twl4030_irq_chip,
diff -ruNp linux-3.13.11/drivers/misc/c2port/core.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/misc/c2port/core.c
--- linux-3.13.11/drivers/misc/c2port/core.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/misc/c2port/core.c	2014-07-09
12:00:15.000000000 +0200
@@ -922,7 +922,9 @@ struct c2port_device *c2port_device_regi
 		goto error_idr_alloc;
 	c2dev->id = ret;
 
-	bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
+	pax_open_kernel();
+	*(size_t *)&bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
+	pax_close_kernel();
 
 	c2dev->dev = device_create(c2port_class, NULL, 0, c2dev,
 				   "c2port%d", c2dev->id);
diff -ruNp linux-3.13.11/drivers/misc/eeprom/sunxi_sid.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/misc/eeprom/sunxi_sid.c
--- linux-3.13.11/drivers/misc/eeprom/sunxi_sid.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/misc/eeprom/sunxi_sid.c	2014-07-09
12:00:15.000000000 +0200
@@ -127,7 +127,9 @@ static int sunxi_sid_probe(struct platfo
 
 	platform_set_drvdata(pdev, sid_data);
 
-	sid_bin_attr.size = sid_data->keysize;
+	pax_open_kernel();
+	*(size_t *)&sid_bin_attr.size = sid_data->keysize;
+	pax_close_kernel();
 	if (device_create_bin_file(&pdev->dev, &sid_bin_attr))
 		return -ENODEV;
 
diff -ruNp linux-3.13.11/drivers/misc/kgdbts.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/misc/kgdbts.c
--- linux-3.13.11/drivers/misc/kgdbts.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/misc/kgdbts.c	2014-07-09
12:00:15.000000000 +0200
@@ -834,7 +834,7 @@ static void run_plant_and_detach_test(in
 	char before[BREAK_INSTR_SIZE];
 	char after[BREAK_INSTR_SIZE];
 
-	probe_kernel_read(before, (char *)kgdbts_break_test,
+	probe_kernel_read(before, ktla_ktva((char *)kgdbts_break_test),
 	  BREAK_INSTR_SIZE);
 	init_simple_test();
 	ts.tst = plant_and_detach_test;
@@ -842,7 +842,7 @@ static void run_plant_and_detach_test(in
 	/* Activate test with initial breakpoint */
 	if (!is_early)
 		kgdb_breakpoint();
-	probe_kernel_read(after, (char *)kgdbts_break_test,
+	probe_kernel_read(after, ktla_ktva((char *)kgdbts_break_test),
 	  BREAK_INSTR_SIZE);
 	if (memcmp(before, after, BREAK_INSTR_SIZE)) {
 		printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
diff -ruNp linux-3.13.11/drivers/misc/lis3lv02d/lis3lv02d.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/misc/lis3lv02d/lis3lv02d.c
--- linux-3.13.11/drivers/misc/lis3lv02d/lis3lv02d.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/misc/lis3lv02d/lis3lv02d.c	2014-07-09
12:00:15.000000000 +0200
@@ -498,7 +498,7 @@ static irqreturn_t lis302dl_interrupt(in
 	 * the lid is closed. This leads to interrupts as soon as a little move
 	 * is done.
 	 */
-	atomic_inc(&lis3->count);
+	atomic_inc_unchecked(&lis3->count);
 
 	wake_up_interruptible(&lis3->misc_wait);
 	kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
@@ -584,7 +584,7 @@ static int lis3lv02d_misc_open(struct in
 	if (lis3->pm_dev)
 		pm_runtime_get_sync(lis3->pm_dev);
 
-	atomic_set(&lis3->count, 0);
+	atomic_set_unchecked(&lis3->count, 0);
 	return 0;
 }
 
@@ -616,7 +616,7 @@ static ssize_t lis3lv02d_misc_read(struc
 	add_wait_queue(&lis3->misc_wait, &wait);
 	while (true) {
 		set_current_state(TASK_INTERRUPTIBLE);
-		data = atomic_xchg(&lis3->count, 0);
+		data = atomic_xchg_unchecked(&lis3->count, 0);
 		if (data)
 			break;
 
@@ -657,7 +657,7 @@ static unsigned int lis3lv02d_misc_poll(
 					      struct lis3lv02d, miscdev);
 
 	poll_wait(file, &lis3->misc_wait, wait);
-	if (atomic_read(&lis3->count))
+	if (atomic_read_unchecked(&lis3->count))
 		return POLLIN | POLLRDNORM;
 	return 0;
 }
diff -ruNp linux-3.13.11/drivers/misc/lis3lv02d/lis3lv02d.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/misc/lis3lv02d/lis3lv02d.h
--- linux-3.13.11/drivers/misc/lis3lv02d/lis3lv02d.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/misc/lis3lv02d/lis3lv02d.h	2014-07-09
12:00:15.000000000 +0200
@@ -297,7 +297,7 @@ struct lis3lv02d {
 	struct input_polled_dev	*idev;     /* input device */
 	struct platform_device	*pdev;     /* platform device */
 	struct regulator_bulk_data regulators[2];
-	atomic_t		count;     /* interrupt count after last read */
+	atomic_unchecked_t	count;     /* interrupt count after last read */
 	union axis_conversion	ac;        /* hw -> logical axis */
 	int			mapped_btns[3];
 
diff -ruNp linux-3.13.11/drivers/misc/sgi-gru/gruhandles.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/misc/sgi-gru/gruhandles.c
--- linux-3.13.11/drivers/misc/sgi-gru/gruhandles.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/misc/sgi-gru/gruhandles.c	2014-07-09
12:00:15.000000000 +0200
@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op
 	unsigned long nsec;
 
 	nsec = CLKS2NSEC(clks);
-	atomic_long_inc(&mcs_op_statistics[op].count);
-	atomic_long_add(nsec, &mcs_op_statistics[op].total);
+	atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
+	atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
 	if (mcs_op_statistics[op].max < nsec)
 		mcs_op_statistics[op].max = nsec;
 }
diff -ruNp linux-3.13.11/drivers/misc/sgi-gru/gruprocfs.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/misc/sgi-gru/gruprocfs.c
--- linux-3.13.11/drivers/misc/sgi-gru/gruprocfs.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/misc/sgi-gru/gruprocfs.c	2014-07-09
12:00:15.000000000 +0200
@@ -32,9 +32,9 @@
 
 #define printstat(s, f)		printstat_val(s, &gru_stats.f, #f)
 
-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
 {
-	unsigned long val = atomic_long_read(v);
+	unsigned long val = atomic_long_read_unchecked(v);
 
 	seq_printf(s, "%16lu %s\n", val, id);
 }
@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct se
 
 	seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
 	for (op = 0; op < mcsop_last; op++) {
-		count = atomic_long_read(&mcs_op_statistics[op].count);
-		total = atomic_long_read(&mcs_op_statistics[op].total);
+		count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
+		total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
 		max = mcs_op_statistics[op].max;
 		seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
 			   count ? total / count : 0, max);
diff -ruNp linux-3.13.11/drivers/misc/sgi-gru/grutables.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/misc/sgi-gru/grutables.h
--- linux-3.13.11/drivers/misc/sgi-gru/grutables.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/misc/sgi-gru/grutables.h	2014-07-09
12:00:15.000000000 +0200
@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
  * GRU statistics.
  */
 struct gru_stats_s {
-	atomic_long_t vdata_alloc;
-	atomic_long_t vdata_free;
-	atomic_long_t gts_alloc;
-	atomic_long_t gts_free;
-	atomic_long_t gms_alloc;
-	atomic_long_t gms_free;
-	atomic_long_t gts_double_allocate;
-	atomic_long_t assign_context;
-	atomic_long_t assign_context_failed;
-	atomic_long_t free_context;
-	atomic_long_t load_user_context;
-	atomic_long_t load_kernel_context;
-	atomic_long_t lock_kernel_context;
-	atomic_long_t unlock_kernel_context;
-	atomic_long_t steal_user_context;
-	atomic_long_t steal_kernel_context;
-	atomic_long_t steal_context_failed;
-	atomic_long_t nopfn;
-	atomic_long_t asid_new;
-	atomic_long_t asid_next;
-	atomic_long_t asid_wrap;
-	atomic_long_t asid_reuse;
-	atomic_long_t intr;
-	atomic_long_t intr_cbr;
-	atomic_long_t intr_tfh;
-	atomic_long_t intr_spurious;
-	atomic_long_t intr_mm_lock_failed;
-	atomic_long_t call_os;
-	atomic_long_t call_os_wait_queue;
-	atomic_long_t user_flush_tlb;
-	atomic_long_t user_unload_context;
-	atomic_long_t user_exception;
-	atomic_long_t set_context_option;
-	atomic_long_t check_context_retarget_intr;
-	atomic_long_t check_context_unload;
-	atomic_long_t tlb_dropin;
-	atomic_long_t tlb_preload_page;
-	atomic_long_t tlb_dropin_fail_no_asid;
-	atomic_long_t tlb_dropin_fail_upm;
-	atomic_long_t tlb_dropin_fail_invalid;
-	atomic_long_t tlb_dropin_fail_range_active;
-	atomic_long_t tlb_dropin_fail_idle;
-	atomic_long_t tlb_dropin_fail_fmm;
-	atomic_long_t tlb_dropin_fail_no_exception;
-	atomic_long_t tfh_stale_on_fault;
-	atomic_long_t mmu_invalidate_range;
-	atomic_long_t mmu_invalidate_page;
-	atomic_long_t flush_tlb;
-	atomic_long_t flush_tlb_gru;
-	atomic_long_t flush_tlb_gru_tgh;
-	atomic_long_t flush_tlb_gru_zero_asid;
-
-	atomic_long_t copy_gpa;
-	atomic_long_t read_gpa;
-
-	atomic_long_t mesq_receive;
-	atomic_long_t mesq_receive_none;
-	atomic_long_t mesq_send;
-	atomic_long_t mesq_send_failed;
-	atomic_long_t mesq_noop;
-	atomic_long_t mesq_send_unexpected_error;
-	atomic_long_t mesq_send_lb_overflow;
-	atomic_long_t mesq_send_qlimit_reached;
-	atomic_long_t mesq_send_amo_nacked;
-	atomic_long_t mesq_send_put_nacked;
-	atomic_long_t mesq_page_overflow;
-	atomic_long_t mesq_qf_locked;
-	atomic_long_t mesq_qf_noop_not_full;
-	atomic_long_t mesq_qf_switch_head_failed;
-	atomic_long_t mesq_qf_unexpected_error;
-	atomic_long_t mesq_noop_unexpected_error;
-	atomic_long_t mesq_noop_lb_overflow;
-	atomic_long_t mesq_noop_qlimit_reached;
-	atomic_long_t mesq_noop_amo_nacked;
-	atomic_long_t mesq_noop_put_nacked;
-	atomic_long_t mesq_noop_page_overflow;
+	atomic_long_unchecked_t vdata_alloc;
+	atomic_long_unchecked_t vdata_free;
+	atomic_long_unchecked_t gts_alloc;
+	atomic_long_unchecked_t gts_free;
+	atomic_long_unchecked_t gms_alloc;
+	atomic_long_unchecked_t gms_free;
+	atomic_long_unchecked_t gts_double_allocate;
+	atomic_long_unchecked_t assign_context;
+	atomic_long_unchecked_t assign_context_failed;
+	atomic_long_unchecked_t free_context;
+	atomic_long_unchecked_t load_user_context;
+	atomic_long_unchecked_t load_kernel_context;
+	atomic_long_unchecked_t lock_kernel_context;
+	atomic_long_unchecked_t unlock_kernel_context;
+	atomic_long_unchecked_t steal_user_context;
+	atomic_long_unchecked_t steal_kernel_context;
+	atomic_long_unchecked_t steal_context_failed;
+	atomic_long_unchecked_t nopfn;
+	atomic_long_unchecked_t asid_new;
+	atomic_long_unchecked_t asid_next;
+	atomic_long_unchecked_t asid_wrap;
+	atomic_long_unchecked_t asid_reuse;
+	atomic_long_unchecked_t intr;
+	atomic_long_unchecked_t intr_cbr;
+	atomic_long_unchecked_t intr_tfh;
+	atomic_long_unchecked_t intr_spurious;
+	atomic_long_unchecked_t intr_mm_lock_failed;
+	atomic_long_unchecked_t call_os;
+	atomic_long_unchecked_t call_os_wait_queue;
+	atomic_long_unchecked_t user_flush_tlb;
+	atomic_long_unchecked_t user_unload_context;
+	atomic_long_unchecked_t user_exception;
+	atomic_long_unchecked_t set_context_option;
+	atomic_long_unchecked_t check_context_retarget_intr;
+	atomic_long_unchecked_t check_context_unload;
+	atomic_long_unchecked_t tlb_dropin;
+	atomic_long_unchecked_t tlb_preload_page;
+	atomic_long_unchecked_t tlb_dropin_fail_no_asid;
+	atomic_long_unchecked_t tlb_dropin_fail_upm;
+	atomic_long_unchecked_t tlb_dropin_fail_invalid;
+	atomic_long_unchecked_t tlb_dropin_fail_range_active;
+	atomic_long_unchecked_t tlb_dropin_fail_idle;
+	atomic_long_unchecked_t tlb_dropin_fail_fmm;
+	atomic_long_unchecked_t tlb_dropin_fail_no_exception;
+	atomic_long_unchecked_t tfh_stale_on_fault;
+	atomic_long_unchecked_t mmu_invalidate_range;
+	atomic_long_unchecked_t mmu_invalidate_page;
+	atomic_long_unchecked_t flush_tlb;
+	atomic_long_unchecked_t flush_tlb_gru;
+	atomic_long_unchecked_t flush_tlb_gru_tgh;
+	atomic_long_unchecked_t flush_tlb_gru_zero_asid;
+
+	atomic_long_unchecked_t copy_gpa;
+	atomic_long_unchecked_t read_gpa;
+
+	atomic_long_unchecked_t mesq_receive;
+	atomic_long_unchecked_t mesq_receive_none;
+	atomic_long_unchecked_t mesq_send;
+	atomic_long_unchecked_t mesq_send_failed;
+	atomic_long_unchecked_t mesq_noop;
+	atomic_long_unchecked_t mesq_send_unexpected_error;
+	atomic_long_unchecked_t mesq_send_lb_overflow;
+	atomic_long_unchecked_t mesq_send_qlimit_reached;
+	atomic_long_unchecked_t mesq_send_amo_nacked;
+	atomic_long_unchecked_t mesq_send_put_nacked;
+	atomic_long_unchecked_t mesq_page_overflow;
+	atomic_long_unchecked_t mesq_qf_locked;
+	atomic_long_unchecked_t mesq_qf_noop_not_full;
+	atomic_long_unchecked_t mesq_qf_switch_head_failed;
+	atomic_long_unchecked_t mesq_qf_unexpected_error;
+	atomic_long_unchecked_t mesq_noop_unexpected_error;
+	atomic_long_unchecked_t mesq_noop_lb_overflow;
+	atomic_long_unchecked_t mesq_noop_qlimit_reached;
+	atomic_long_unchecked_t mesq_noop_amo_nacked;
+	atomic_long_unchecked_t mesq_noop_put_nacked;
+	atomic_long_unchecked_t mesq_noop_page_overflow;
 
 };
 
@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start
 	tghop_invalidate, mcsop_last};
 
 struct mcs_op_statistic {
-	atomic_long_t	count;
-	atomic_long_t	total;
+	atomic_long_unchecked_t	count;
+	atomic_long_unchecked_t	total;
 	unsigned long	max;
 };
 
@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_st
 
 #define STAT(id)	do {						\
 				if (gru_options & OPT_STATS)		\
-					atomic_long_inc(&gru_stats.id);	\
+					atomic_long_inc_unchecked(&gru_stats.id);	\
 			} while (0)
 
 #ifdef CONFIG_SGI_GRU_DEBUG
diff -ruNp linux-3.13.11/drivers/misc/sgi-xp/xp.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/misc/sgi-xp/xp.h
--- linux-3.13.11/drivers/misc/sgi-xp/xp.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/misc/sgi-xp/xp.h	2014-07-09
12:00:15.000000000 +0200
@@ -288,7 +288,7 @@ struct xpc_interface {
 					xpc_notify_func, void *);
 	void (*received) (short, int, void *);
 	enum xp_retval (*partid_to_nasids) (short, void *);
-};
+} __no_const;
 
 extern struct xpc_interface xpc_interface;
 
diff -ruNp linux-3.13.11/drivers/misc/sgi-xp/xp_main.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/misc/sgi-xp/xp_main.c
--- linux-3.13.11/drivers/misc/sgi-xp/xp_main.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/misc/sgi-xp/xp_main.c	2014-07-09
12:00:15.000000000 +0200
@@ -78,13 +78,13 @@ xpc_notloaded(void)
 }
 
 struct xpc_interface xpc_interface = {
-	(void (*)(int))xpc_notloaded,
-	(void (*)(int))xpc_notloaded,
-	(enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
-	(enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
+	.connect = (void (*)(int))xpc_notloaded,
+	.disconnect = (void (*)(int))xpc_notloaded,
+	.send = (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
+	.send_notify = (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
 			   void *))xpc_notloaded,
-	(void (*)(short, int, void *))xpc_notloaded,
-	(enum xp_retval(*)(short, void *))xpc_notloaded
+	.received = (void (*)(short, int, void *))xpc_notloaded,
+	.partid_to_nasids = (enum xp_retval(*)(short, void *))xpc_notloaded
 };
 EXPORT_SYMBOL_GPL(xpc_interface);
 
diff -ruNp linux-3.13.11/drivers/misc/sgi-xp/xpc.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/misc/sgi-xp/xpc.h
--- linux-3.13.11/drivers/misc/sgi-xp/xpc.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/misc/sgi-xp/xpc.h	2014-07-09
12:00:15.000000000 +0200
@@ -835,6 +835,7 @@ struct xpc_arch_operations {
 	void (*received_payload) (struct xpc_channel *, void *);
 	void (*notify_senders_of_disconnect) (struct xpc_channel *);
 };
+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
 
 /* struct xpc_partition act_state values (for XPC HB) */
 
@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_regis
 /* found in xpc_main.c */
 extern struct device *xpc_part;
 extern struct device *xpc_chan;
-extern struct xpc_arch_operations xpc_arch_ops;
+extern xpc_arch_operations_no_const xpc_arch_ops;
 extern int xpc_disengage_timelimit;
 extern int xpc_disengage_timedout;
 extern int xpc_activate_IRQ_rcvd;
diff -ruNp linux-3.13.11/drivers/misc/sgi-xp/xpc_main.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/misc/sgi-xp/xpc_main.c
--- linux-3.13.11/drivers/misc/sgi-xp/xpc_main.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/misc/sgi-xp/xpc_main.c	2014-07-09
12:00:15.000000000 +0200
@@ -166,7 +166,7 @@ static struct notifier_block xpc_die_not
 	.notifier_call = xpc_system_die,
 };
 
-struct xpc_arch_operations xpc_arch_ops;
+xpc_arch_operations_no_const xpc_arch_ops;
 
 /*
  * Timer function to enforce the timelimit on the partition disengage.
@@ -1210,7 +1210,7 @@ xpc_system_die(struct notifier_block *nb
 
 		if (((die_args->trapnr == X86_TRAP_MF) ||
 		     (die_args->trapnr == X86_TRAP_XF)) &&
-		    !user_mode_vm(die_args->regs))
+		    !user_mode(die_args->regs))
 			xpc_die_deactivate();
 
 		break;
diff -ruNp linux-3.13.11/drivers/mmc/card/block.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/mmc/card/block.c
--- linux-3.13.11/drivers/mmc/card/block.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/mmc/card/block.c	2014-07-09
12:00:15.000000000 +0200
@@ -575,7 +575,7 @@ static int mmc_blk_ioctl_cmd(struct bloc
 	if (idata->ic.postsleep_min_us)
 		usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
 
-	if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
+	if (copy_to_user(ic_ptr->response, cmd.resp, sizeof(cmd.resp))) {
 		err = -EFAULT;
 		goto cmd_rel_host;
 	}
diff -ruNp linux-3.13.11/drivers/mmc/core/mmc_ops.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/mmc/core/mmc_ops.c
--- linux-3.13.11/drivers/mmc/core/mmc_ops.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/mmc/core/mmc_ops.c	2014-07-09
12:00:15.000000000 +0200
@@ -247,7 +247,7 @@ mmc_send_cxd_data(struct mmc_card *card,
 	void *data_buf;
 	int is_on_stack;
 
-	is_on_stack = object_is_on_stack(buf);
+	is_on_stack = object_starts_on_stack(buf);
 	if (is_on_stack) {
 		/*
 		 * dma onto stack is unsafe/nonportable, but callers to this
diff -ruNp linux-3.13.11/drivers/mmc/host/dw_mmc.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/mmc/host/dw_mmc.h
--- linux-3.13.11/drivers/mmc/host/dw_mmc.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/mmc/host/dw_mmc.h	2014-07-09
12:00:15.000000000 +0200
@@ -258,5 +258,5 @@ struct dw_mci_drv_data {
 	int		(*parse_dt)(struct dw_mci *host);
 	int		(*execute_tuning)(struct dw_mci_slot *slot, u32 opcode,
 					struct dw_mci_tuning_data *tuning_data);
-};
+} __do_const;
 #endif /* _DW_MMC_H_ */
diff -ruNp linux-3.13.11/drivers/mmc/host/mmci.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/mmc/host/mmci.c
--- linux-3.13.11/drivers/mmc/host/mmci.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/mmc/host/mmci.c	2014-07-09
12:00:15.000000000 +0200
@@ -1504,7 +1504,9 @@ static int mmci_probe(struct amba_device
 	}
 
 	if (variant->busy_detect) {
-		mmci_ops.card_busy = mmci_card_busy;
+		pax_open_kernel();
+		*(void **)&mmci_ops.card_busy = mmci_card_busy;
+		pax_close_kernel();
 		mmci_write_datactrlreg(host, MCI_ST_DPSM_BUSYMODE);
 	}
 
diff -ruNp linux-3.13.11/drivers/mmc/host/sdhci-esdhc-imx.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/mmc/host/sdhci-esdhc-imx.c
--- linux-3.13.11/drivers/mmc/host/sdhci-esdhc-imx.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/mmc/host/sdhci-esdhc-imx.c	2014-07-09
12:00:15.000000000 +0200
@@ -1009,9 +1009,12 @@ static int sdhci_esdhc_imx_probe(struct
 		host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN;
 	}
 
-	if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING)
-		sdhci_esdhc_ops.platform_execute_tuning =
+	if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) {
+		pax_open_kernel();
+		*(void **)&sdhci_esdhc_ops.platform_execute_tuning =
 					esdhc_executing_tuning;
+		pax_close_kernel();
+	}
 	boarddata = &imx_data->boarddata;
 	if (sdhci_esdhc_imx_probe_dt(pdev, boarddata) < 0) {
 		if (!host->mmc->parent->platform_data) {
diff -ruNp linux-3.13.11/drivers/mmc/host/sdhci-s3c.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/mmc/host/sdhci-s3c.c
--- linux-3.13.11/drivers/mmc/host/sdhci-s3c.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/mmc/host/sdhci-s3c.c	2014-07-09
12:00:15.000000000 +0200
@@ -668,9 +668,11 @@ static int sdhci_s3c_probe(struct platfo
 	 * we can use overriding functions instead of default.
 	 */
 	if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) {
-		sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
-		sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
-		sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
+		pax_open_kernel();
+		*(void **)&sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
+		*(void **)&sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
+		*(void **)&sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
+		pax_close_kernel();
 	}
 
 	/* It supports additional host capabilities if needed */
diff -ruNp linux-3.13.11/drivers/mtd/chips/cfi_cmdset_0020.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/mtd/chips/cfi_cmdset_0020.c
--- linux-3.13.11/drivers/mtd/chips/cfi_cmdset_0020.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/mtd/chips/cfi_cmdset_0020.c	2014-07-09
12:00:15.000000000 +0200
@@ -669,7 +669,7 @@ cfi_staa_writev(struct mtd_info *mtd, co
 	size_t	 totlen = 0, thislen;
 	int	 ret = 0;
 	size_t	 buflen = 0;
-	static char *buffer;
+	char *buffer;
 
 	if (!ECCBUF_SIZE) {
 		/* We should fall back to a general writev implementation.
diff -ruNp linux-3.13.11/drivers/mtd/nand/denali.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/mtd/nand/denali.c
--- linux-3.13.11/drivers/mtd/nand/denali.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/mtd/nand/denali.c	2014-07-09
12:00:15.000000000 +0200
@@ -24,6 +24,7 @@
 #include <linux/slab.h>
 #include <linux/mtd/mtd.h>
 #include <linux/module.h>
+#include <linux/slab.h>
 
 #include "denali.h"
 
diff -ruNp linux-3.13.11/drivers/mtd/nftlmount.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/mtd/nftlmount.c
--- linux-3.13.11/drivers/mtd/nftlmount.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/mtd/nftlmount.c	2014-07-09
12:00:15.000000000 +0200
@@ -24,6 +24,7 @@
 #include <asm/errno.h>
 #include <linux/delay.h>
 #include <linux/slab.h>
+#include <linux/sched.h>
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/nand.h>
 #include <linux/mtd/nftl.h>
diff -ruNp linux-3.13.11/drivers/mtd/sm_ftl.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/mtd/sm_ftl.c
--- linux-3.13.11/drivers/mtd/sm_ftl.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/mtd/sm_ftl.c	2014-07-09 12:00:15.000000000
+0200
@@ -56,7 +56,7 @@ static ssize_t sm_attr_show(struct devic
 #define SM_CIS_VENDOR_OFFSET 0x59
 static struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
 {
-	struct attribute_group *attr_group;
+	attribute_group_no_const *attr_group;
 	struct attribute **attributes;
 	struct sm_sysfs_attribute *vendor_attribute;
 
diff -ruNp linux-3.13.11/drivers/net/bonding/bond_main.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/bonding/bond_main.c
--- linux-3.13.11/drivers/net/bonding/bond_main.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/bonding/bond_main.c	2014-07-09
12:00:15.000000000 +0200
@@ -4527,6 +4527,7 @@ static void __exit bonding_exit(void)
 
 	bond_netlink_fini();
 	unregister_pernet_subsys(&bond_net_ops);
+	rtnl_link_unregister(&bond_link_ops);
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	/*
diff -ruNp linux-3.13.11/drivers/net/bonding/bond_netlink.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/bonding/bond_netlink.c
--- linux-3.13.11/drivers/net/bonding/bond_netlink.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/bonding/bond_netlink.c	2014-07-09
12:00:15.000000000 +0200
@@ -102,7 +102,7 @@ nla_put_failure:
 	return -EMSGSIZE;
 }
 
-struct rtnl_link_ops bond_link_ops __read_mostly = {
+struct rtnl_link_ops bond_link_ops = {
 	.kind			= "bond",
 	.priv_size		= sizeof(struct bonding),
 	.setup			= bond_setup,
diff -ruNp linux-3.13.11/drivers/net/can/Kconfig linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/can/Kconfig
--- linux-3.13.11/drivers/net/can/Kconfig	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/can/Kconfig	2014-07-09
12:00:15.000000000 +0200
@@ -104,7 +104,7 @@ config CAN_JANZ_ICAN3
 
 config CAN_FLEXCAN
 	tristate "Support for Freescale FLEXCAN based chips"
-	depends on ARM || PPC
+	depends on (ARM && CPU_LITTLE_ENDIAN) || PPC
 	---help---
 	  Say Y here if you want to support for Freescale FlexCAN.
 
diff -ruNp linux-3.13.11/drivers/net/ethernet/8390/ax88796.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/ethernet/8390/ax88796.c
--- linux-3.13.11/drivers/net/ethernet/8390/ax88796.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/ethernet/8390/ax88796.c	2014-07-09
12:00:15.000000000 +0200
@@ -872,9 +872,11 @@ static int ax_probe(struct platform_devi
 	if (ax->plat->reg_offsets)
 		ei_local->reg_offset = ax->plat->reg_offsets;
 	else {
+		resource_size_t _mem_size = mem_size;
+		do_div(_mem_size, 0x18);
 		ei_local->reg_offset = ax->reg_offsets;
 		for (ret = 0; ret < 0x18; ret++)
-			ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
+			ax->reg_offsets[ret] = _mem_size * ret;
 	}
 
 	if (!request_mem_region(mem->start, mem_size, pdev->name)) {
diff -ruNp linux-3.13.11/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
--- linux-3.13.11/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h	2014-07-09
12:00:15.000000000 +0200
@@ -1139,7 +1139,7 @@ static inline u8 bnx2x_get_path_func_num
 static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
 {
 	/* RX_MODE controlling object */
-	bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
+	bnx2x_init_rx_mode_obj(bp);
 
 	/* multicast configuration controlling object */
 	bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
diff -ruNp linux-3.13.11/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
--- linux-3.13.11/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c	2014-07-09
12:00:15.000000000 +0200
@@ -2591,15 +2591,14 @@ int bnx2x_config_rx_mode(struct bnx2x *b
 	return rc;
 }
 
-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
-			    struct bnx2x_rx_mode_obj *o)
+void bnx2x_init_rx_mode_obj(struct bnx2x *bp)
 {
 	if (CHIP_IS_E1x(bp)) {
-		o->wait_comp      = bnx2x_empty_rx_mode_wait;
-		o->config_rx_mode = bnx2x_set_rx_mode_e1x;
+		bp->rx_mode_obj.wait_comp      = bnx2x_empty_rx_mode_wait;
+		bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e1x;
 	} else {
-		o->wait_comp      = bnx2x_wait_rx_mode_comp_e2;
-		o->config_rx_mode = bnx2x_set_rx_mode_e2;
+		bp->rx_mode_obj.wait_comp      = bnx2x_wait_rx_mode_comp_e2;
+		bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e2;
 	}
 }
 
diff -ruNp linux-3.13.11/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
--- linux-3.13.11/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h	2014-07-09
12:00:15.000000000 +0200
@@ -1332,8 +1332,7 @@ int bnx2x_vlan_mac_move(struct bnx2x *bp
 
 /********************* RX MODE ****************/
 
-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
-			    struct bnx2x_rx_mode_obj *o);
+void bnx2x_init_rx_mode_obj(struct bnx2x *bp);
 
 /**
  * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
diff -ruNp linux-3.13.11/drivers/net/ethernet/broadcom/tg3.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/ethernet/broadcom/tg3.h
--- linux-3.13.11/drivers/net/ethernet/broadcom/tg3.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/ethernet/broadcom/tg3.h	2014-07-09
12:00:15.000000000 +0200
@@ -150,6 +150,7 @@
 #define  CHIPREV_ID_5750_A0		 0x4000
 #define  CHIPREV_ID_5750_A1		 0x4001
 #define  CHIPREV_ID_5750_A3		 0x4003
+#define  CHIPREV_ID_5750_C1		 0x4201
 #define  CHIPREV_ID_5750_C2		 0x4202
 #define  CHIPREV_ID_5752_A0_HW		 0x5000
 #define  CHIPREV_ID_5752_A0		 0x6000
diff -ruNp linux-3.13.11/drivers/net/ethernet/brocade/bna/bna_enet.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/ethernet/brocade/bna/bna_enet.c
--- linux-3.13.11/drivers/net/ethernet/brocade/bna/bna_enet.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/ethernet/brocade/bna/bna_enet.c	2014-07-09
12:00:15.000000000 +0200
@@ -1690,10 +1690,10 @@ bna_cb_ioceth_reset(void *arg)
 }
 
 static struct bfa_ioc_cbfn bna_ioceth_cbfn = {
-	bna_cb_ioceth_enable,
-	bna_cb_ioceth_disable,
-	bna_cb_ioceth_hbfail,
-	bna_cb_ioceth_reset
+	.enable_cbfn = bna_cb_ioceth_enable,
+	.disable_cbfn = bna_cb_ioceth_disable,
+	.hbfail_cbfn = bna_cb_ioceth_hbfail,
+	.reset_cbfn = bna_cb_ioceth_reset
 };
 
 static void bna_attr_init(struct bna_ioceth *ioceth)
diff -ruNp linux-3.13.11/drivers/net/ethernet/chelsio/cxgb3/l2t.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/ethernet/chelsio/cxgb3/l2t.h
--- linux-3.13.11/drivers/net/ethernet/chelsio/cxgb3/l2t.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/ethernet/chelsio/cxgb3/l2t.h	2014-07-09
12:00:15.000000000 +0200
@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)
  */
 struct l2t_skb_cb {
 	arp_failure_handler_func arp_failure_handler;
-};
+} __no_const;
 
 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
 
diff -ruNp linux-3.13.11/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
--- linux-3.13.11/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c	2014-07-09
12:00:15.000000000 +0200
@@ -2120,7 +2120,7 @@ static void get_regs(struct net_device *
 
 	int i;
 	struct adapter *ap = netdev2adap(dev);
-	static const unsigned int *reg_ranges;
+	const unsigned int *reg_ranges;
 	int arr_size = 0, buf_size = 0;
 
 	if (is_t4(ap->params.chip)) {
diff -ruNp linux-3.13.11/drivers/net/ethernet/dec/tulip/de4x5.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/ethernet/dec/tulip/de4x5.c
--- linux-3.13.11/drivers/net/ethernet/dec/tulip/de4x5.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/ethernet/dec/tulip/de4x5.c	2014-07-09
12:00:15.000000000 +0200
@@ -5388,7 +5388,7 @@ de4x5_ioctl(struct net_device *dev, stru
 	for (i=0; i<ETH_ALEN; i++) {
 	    tmp.addr[i] = dev->dev_addr[i];
 	}
-	if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
+	if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return
-EFAULT;
 	break;
 
     case DE4X5_SET_HWADDR:           /* Set the hardware address */
@@ -5428,7 +5428,7 @@ de4x5_ioctl(struct net_device *dev, stru
 	spin_lock_irqsave(&lp->lock, flags);
 	memcpy(&statbuf, &lp->pktStats, ioc->len);
 	spin_unlock_irqrestore(&lp->lock, flags);
-	if (copy_to_user(ioc->data, &statbuf, ioc->len))
+	if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
 		return -EFAULT;
 	break;
     }
diff -ruNp linux-3.13.11/drivers/net/ethernet/emulex/benet/be_main.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/ethernet/emulex/benet/be_main.c
--- linux-3.13.11/drivers/net/ethernet/emulex/benet/be_main.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/ethernet/emulex/benet/be_main.c	2014-07-09
12:00:15.000000000 +0200
@@ -533,7 +533,7 @@ static void accumulate_16bit_val(u32 *ac
 
 	if (wrapped)
 		newacc += 65536;
-	ACCESS_ONCE(*acc) = newacc;
+	ACCESS_ONCE_RW(*acc) = newacc;
 }
 
 static void populate_erx_stats(struct be_adapter *adapter,
diff -ruNp linux-3.13.11/drivers/net/ethernet/faraday/ftgmac100.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/ethernet/faraday/ftgmac100.c
--- linux-3.13.11/drivers/net/ethernet/faraday/ftgmac100.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/ethernet/faraday/ftgmac100.c	2014-07-09
12:00:15.000000000 +0200
@@ -31,6 +31,8 @@
 #include <linux/netdevice.h>
 #include <linux/phy.h>
 #include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
 #include <net/ip.h>
 
 #include "ftgmac100.h"
diff -ruNp linux-3.13.11/drivers/net/ethernet/faraday/ftmac100.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/ethernet/faraday/ftmac100.c
--- linux-3.13.11/drivers/net/ethernet/faraday/ftmac100.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/ethernet/faraday/ftmac100.c	2014-07-09
12:00:15.000000000 +0200
@@ -31,6 +31,8 @@
 #include <linux/module.h>
 #include <linux/netdevice.h>
 #include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
 
 #include "ftmac100.h"
 
diff -ruNp linux-3.13.11/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
--- linux-3.13.11/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c	2014-07-09
12:00:15.000000000 +0200
@@ -776,7 +776,7 @@ void ixgbe_ptp_start_cyclecounter(struct
 	}
 
 	/* update the base incval used to calculate frequency adjustment */
-	ACCESS_ONCE(adapter->base_incval) = incval;
+	ACCESS_ONCE_RW(adapter->base_incval) = incval;
 	smp_mb();
 
 	/* need lock to prevent incorrect read while modifying cyclecounter */
diff -ruNp linux-3.13.11/drivers/net/ethernet/neterion/vxge/vxge-config.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/ethernet/neterion/vxge/vxge-config.c
--- linux-3.13.11/drivers/net/ethernet/neterion/vxge/vxge-config.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/ethernet/neterion/vxge/vxge-config.c	2014-07-09
12:00:15.000000000 +0200
@@ -3461,7 +3461,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_v
 	struct __vxge_hw_fifo *fifo;
 	struct vxge_hw_fifo_config *config;
 	u32 txdl_size, txdl_per_memblock;
-	struct vxge_hw_mempool_cbs fifo_mp_callback;
+	static struct vxge_hw_mempool_cbs fifo_mp_callback = {
+		.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc,
+	};
+
 	struct __vxge_hw_virtualpath *vpath;
 
 	if ((vp == NULL) || (attr == NULL)) {
@@ -3544,8 +3547,6 @@ __vxge_hw_fifo_create(struct __vxge_hw_v
 		goto exit;
 	}
 
-	fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
-
 	fifo->mempool =
 		__vxge_hw_mempool_create(vpath->hldev,
 			fifo->config->memblock_size,
diff -ruNp linux-3.13.11/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
--- linux-3.13.11/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c	2014-07-09
12:00:15.000000000 +0200
@@ -2086,7 +2086,9 @@ int qlcnic_83xx_configure_opmode(struct
 		adapter->max_tx_rings = QLCNIC_MAX_VNIC_TX_RINGS;
 	} else if (ret == QLC_83XX_DEFAULT_OPMODE) {
 		ahw->nic_mode = QLCNIC_DEFAULT_MODE;
-		adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
+		pax_open_kernel();
+		*(void **)&adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
+		pax_close_kernel();
 		ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
 		adapter->max_sds_rings = QLCNIC_MAX_SDS_RINGS;
 		adapter->max_tx_rings = QLCNIC_MAX_TX_RINGS;
diff -ruNp linux-3.13.11/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
--- linux-3.13.11/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c	2014-07-09
12:00:15.000000000 +0200
@@ -207,17 +207,23 @@ int qlcnic_83xx_config_vnic_opmode(struc
 	case QLCNIC_NON_PRIV_FUNC:
 		ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
 		ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
-		nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
+		pax_open_kernel();
+		*(void **)&nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
+		pax_close_kernel();
 		break;
 	case QLCNIC_PRIV_FUNC:
 		ahw->op_mode = QLCNIC_PRIV_FUNC;
 		ahw->idc.state_entry = qlcnic_83xx_idc_vnic_pf_entry;
-		nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
+		pax_open_kernel();
+		*(void **)&nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
+		pax_close_kernel();
 		break;
 	case QLCNIC_MGMT_FUNC:
 		ahw->op_mode = QLCNIC_MGMT_FUNC;
 		ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
-		nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
+		pax_open_kernel();
+		*(void **)&nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
+		pax_close_kernel();
 		break;
 	default:
 		dev_err(&adapter->pdev->dev, "Invalid Virtual NIC opmode\n");
diff -ruNp linux-3.13.11/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
--- linux-3.13.11/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c	2014-07-09
12:00:15.000000000 +0200
@@ -1108,7 +1108,7 @@ int qlcnic_dump_fw(struct qlcnic_adapter
 	struct qlcnic_dump_entry *entry;
 	struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
 	struct qlcnic_dump_template_hdr *tmpl_hdr = fw_dump->tmpl_hdr;
-	static const struct qlcnic_dump_operations *fw_dump_ops;
+	const struct qlcnic_dump_operations *fw_dump_ops;
 	struct device *dev = &adapter->pdev->dev;
 	struct qlcnic_hardware_context *ahw;
 	void *temp_buffer;
diff -ruNp linux-3.13.11/drivers/net/ethernet/realtek/r8169.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/ethernet/realtek/r8169.c
--- linux-3.13.11/drivers/net/ethernet/realtek/r8169.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/ethernet/realtek/r8169.c	2014-07-09
12:00:15.000000000 +0200
@@ -759,22 +759,22 @@ struct rtl8169_private {
 	struct mdio_ops {
 		void (*write)(struct rtl8169_private *, int, int);
 		int (*read)(struct rtl8169_private *, int);
-	} mdio_ops;
+	} __no_const mdio_ops;
 
 	struct pll_power_ops {
 		void (*down)(struct rtl8169_private *);
 		void (*up)(struct rtl8169_private *);
-	} pll_power_ops;
+	} __no_const pll_power_ops;
 
 	struct jumbo_ops {
 		void (*enable)(struct rtl8169_private *);
 		void (*disable)(struct rtl8169_private *);
-	} jumbo_ops;
+	} __no_const jumbo_ops;
 
 	struct csi_ops {
 		void (*write)(struct rtl8169_private *, int, int);
 		u32 (*read)(struct rtl8169_private *, int);
-	} csi_ops;
+	} __no_const csi_ops;
 
 	int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
 	int (*get_settings)(struct net_device *, struct ethtool_cmd *);
diff -ruNp linux-3.13.11/drivers/net/ethernet/sfc/ptp.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/ethernet/sfc/ptp.c
--- linux-3.13.11/drivers/net/ethernet/sfc/ptp.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/ethernet/sfc/ptp.c	2014-07-09
12:00:15.000000000 +0200
@@ -541,7 +541,7 @@ static int efx_ptp_synchronize(struct ef
 		       ptp->start.dma_addr);
 
 	/* Clear flag that signals MC ready */
-	ACCESS_ONCE(*start) = 0;
+	ACCESS_ONCE_RW(*start) = 0;
 	rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
 				MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
 	EFX_BUG_ON_PARANOID(rc);
diff -ruNp linux-3.13.11/drivers/net/ethernet/stmicro/stmmac/mmc_core.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
--- linux-3.13.11/drivers/net/ethernet/stmicro/stmmac/mmc_core.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/ethernet/stmicro/stmmac/mmc_core.c	2014-07-09
12:00:15.000000000 +0200
@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr
 
 	writel(value, ioaddr + MMC_CNTRL);
 
-	pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
-		 MMC_CNTRL, value);
+//	pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
+//		 MMC_CNTRL, value);
 }
 
 /* To mask all all interrupts.*/
diff -ruNp linux-3.13.11/drivers/net/hyperv/hyperv_net.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/hyperv/hyperv_net.h
--- linux-3.13.11/drivers/net/hyperv/hyperv_net.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/hyperv/hyperv_net.h	2014-07-09
12:00:15.000000000 +0200
@@ -101,7 +101,7 @@ struct rndis_device {
 
 	enum rndis_device_state state;
 	bool link_state;
-	atomic_t new_req_id;
+	atomic_unchecked_t new_req_id;
 
 	spinlock_t request_lock;
 	struct list_head req_list;
diff -ruNp linux-3.13.11/drivers/net/hyperv/rndis_filter.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/hyperv/rndis_filter.c
--- linux-3.13.11/drivers/net/hyperv/rndis_filter.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/hyperv/rndis_filter.c	2014-07-09
12:00:15.000000000 +0200
@@ -104,7 +104,7 @@ static struct rndis_request *get_rndis_r
 	 * template
 	 */
 	set = &rndis_msg->msg.set_req;
-	set->req_id = atomic_inc_return(&dev->new_req_id);
+	set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
 
 	/* Add to the request list */
 	spin_lock_irqsave(&dev->request_lock, flags);
@@ -752,7 +752,7 @@ static void rndis_filter_halt_device(str
 
 	/* Setup the rndis set */
 	halt = &request->request_msg.msg.halt_req;
-	halt->req_id = atomic_inc_return(&dev->new_req_id);
+	halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
 
 	/* Ignore return since this msg is optional. */
 	rndis_filter_send_request(dev, request);
diff -ruNp linux-3.13.11/drivers/net/ieee802154/fakehard.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/ieee802154/fakehard.c
--- linux-3.13.11/drivers/net/ieee802154/fakehard.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/ieee802154/fakehard.c	2014-07-09
12:00:15.000000000 +0200
@@ -364,7 +364,7 @@ static int ieee802154fake_probe(struct p
 	phy->transmit_power = 0xbf;
 
 	dev->netdev_ops = &fake_ops;
-	dev->ml_priv = &fake_mlme;
+	dev->ml_priv = (void *)&fake_mlme;
 
 	priv = netdev_priv(dev);
 	priv->phy = phy;
diff -ruNp linux-3.13.11/drivers/net/macvlan.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/macvlan.c
--- linux-3.13.11/drivers/net/macvlan.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/macvlan.c	2014-07-09
12:00:15.000000000 +0200
@@ -993,13 +993,15 @@ static const struct nla_policy macvlan_p
 int macvlan_link_register(struct rtnl_link_ops *ops)
 {
 	/* common fields */
-	ops->priv_size		= sizeof(struct macvlan_dev);
-	ops->validate		= macvlan_validate;
-	ops->maxtype		= IFLA_MACVLAN_MAX;
-	ops->policy		= macvlan_policy;
-	ops->changelink		= macvlan_changelink;
-	ops->get_size		= macvlan_get_size;
-	ops->fill_info		= macvlan_fill_info;
+	pax_open_kernel();
+	*(size_t *)&ops->priv_size	= sizeof(struct macvlan_dev);
+	*(void **)&ops->validate	= macvlan_validate;
+	*(int *)&ops->maxtype		= IFLA_MACVLAN_MAX;
+	*(const void **)&ops->policy	= macvlan_policy;
+	*(void **)&ops->changelink	= macvlan_changelink;
+	*(void **)&ops->get_size	= macvlan_get_size;
+	*(void **)&ops->fill_info	= macvlan_fill_info;
+	pax_close_kernel();
 
 	return rtnl_link_register(ops);
 };
@@ -1054,7 +1056,7 @@ static int macvlan_device_event(struct n
 	return NOTIFY_DONE;
 }
 
-static struct notifier_block macvlan_notifier_block __read_mostly = {
+static struct notifier_block macvlan_notifier_block = {
 	.notifier_call	= macvlan_device_event,
 };
 
diff -ruNp linux-3.13.11/drivers/net/macvtap.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/macvtap.c
--- linux-3.13.11/drivers/net/macvtap.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/macvtap.c	2014-07-09
12:00:15.000000000 +0200
@@ -1012,7 +1012,7 @@ static long macvtap_ioctl(struct file *f
 		}
 
 		ret = 0;
-		if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
+		if (copy_to_user(ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
 		    put_user(q->flags, &ifr->ifr_flags))
 			ret = -EFAULT;
 		macvtap_put_vlan(vlan);
@@ -1182,7 +1182,7 @@ static int macvtap_device_event(struct n
 	return NOTIFY_DONE;
 }
 
-static struct notifier_block macvtap_notifier_block __read_mostly = {
+static struct notifier_block macvtap_notifier_block = {
 	.notifier_call	= macvtap_device_event,
 };
 
diff -ruNp linux-3.13.11/drivers/net/phy/mdio-bitbang.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/phy/mdio-bitbang.c
--- linux-3.13.11/drivers/net/phy/mdio-bitbang.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/phy/mdio-bitbang.c	2014-07-09
12:00:15.000000000 +0200
@@ -234,6 +234,7 @@ void free_mdio_bitbang(struct mii_bus *b
 	struct mdiobb_ctrl *ctrl = bus->priv;
 
 	module_put(ctrl->ops->owner);
+	mdiobus_unregister(bus);
 	mdiobus_free(bus);
 }
 EXPORT_SYMBOL(free_mdio_bitbang);
diff -ruNp linux-3.13.11/drivers/net/ppp/ppp_generic.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/ppp/ppp_generic.c
--- linux-3.13.11/drivers/net/ppp/ppp_generic.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/ppp/ppp_generic.c	2014-07-09
12:00:15.000000000 +0200
@@ -999,7 +999,6 @@ ppp_net_ioctl(struct net_device *dev, st
 	void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
 	struct ppp_stats stats;
 	struct ppp_comp_stats cstats;
-	char *vers;
 
 	switch (cmd) {
 	case SIOCGPPPSTATS:
@@ -1021,8 +1020,7 @@ ppp_net_ioctl(struct net_device *dev, st
 		break;
 
 	case SIOCGPPPVER:
-		vers = PPP_VERSION;
-		if (copy_to_user(addr, vers, strlen(vers) + 1))
+		if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
 			break;
 		err = 0;
 		break;
diff -ruNp linux-3.13.11/drivers/net/slip/slhc.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/slip/slhc.c
--- linux-3.13.11/drivers/net/slip/slhc.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/slip/slhc.c	2014-07-09
12:00:15.000000000 +0200
@@ -488,7 +488,7 @@ slhc_uncompress(struct slcompress *comp,
 	register struct tcphdr *thp;
 	register struct iphdr *ip;
 	register struct cstate *cs;
-	int len, hdrlen;
+	long len, hdrlen;
 	unsigned char *cp = icp;
 
 	/* We've got a compressed packet; read the change byte */
diff -ruNp linux-3.13.11/drivers/net/team/team.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/team/team.c
--- linux-3.13.11/drivers/net/team/team.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/team/team.c	2014-07-09
12:00:15.000000000 +0200
@@ -2865,7 +2865,7 @@ static int team_device_event(struct noti
 	return NOTIFY_DONE;
 }
 
-static struct notifier_block team_notifier_block __read_mostly = {
+static struct notifier_block team_notifier_block = {
 	.notifier_call = team_device_event,
 };
 
diff -ruNp linux-3.13.11/drivers/net/tun.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/tun.c
--- linux-3.13.11/drivers/net/tun.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/tun.c	2014-07-09 12:00:15.000000000
+0200
@@ -65,6 +65,7 @@
 #include <linux/nsproxy.h>
 #include <linux/virtio_net.h>
 #include <linux/rcupdate.h>
+#include <linux/vs_network.h>
 #include <net/net_namespace.h>
 #include <net/netns/generic.h>
 #include <net/rtnetlink.h>
@@ -168,6 +169,7 @@ struct tun_struct {
 	unsigned int 		flags;
 	kuid_t			owner;
 	kgid_t			group;
+	vnid_t			nid;
 
 	struct net_device	*dev;
 	netdev_features_t	set_features;
@@ -385,6 +387,7 @@ static inline bool tun_not_capable(struc
 	return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) ||
 		  (gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
 		!ns_capable(net->user_ns, CAP_NET_ADMIN);
+		/* !cap_raised(current_cap(), CAP_NET_ADMIN) */
 }
 
 static void tun_set_real_num_queues(struct tun_struct *tun)
@@ -1382,6 +1385,7 @@ static void tun_setup(struct net_device
 
 	tun->owner = INVALID_UID;
 	tun->group = INVALID_GID;
+	tun->nid = nx_current_nid();
 
 	dev->ethtool_ops = &tun_ethtool_ops;
 	dev->destructor = tun_free_netdev;
@@ -1598,7 +1602,7 @@ static int tun_set_iff(struct net *net,
 		int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ?
 			     MAX_TAP_QUEUES : 1;
 
-		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
+		if (!nx_ns_capable(net->user_ns, CAP_NET_ADMIN, NXC_TUN_CREATE))
 			return -EPERM;
 		err = security_tun_dev_create();
 		if (err < 0)
@@ -1841,7 +1845,7 @@ unlock:
 }
 
 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
-			    unsigned long arg, int ifreq_len)
+			    unsigned long arg, size_t ifreq_len)
 {
 	struct tun_file *tfile = file->private_data;
 	struct tun_struct *tun;
@@ -1854,6 +1858,9 @@ static long __tun_chr_ioctl(struct file
 	unsigned int ifindex;
 	int ret;
 
+	if (ifreq_len > sizeof ifr)
+		return -EFAULT;
+
 	if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
 		if (copy_from_user(&ifr, argp, ifreq_len))
 			return -EFAULT;
@@ -1969,6 +1976,16 @@ static long __tun_chr_ioctl(struct file
 			  from_kgid(&init_user_ns, tun->group));
 		break;
 
+	case TUNSETNID:
+		if (!capable(CAP_CONTEXT))
+			return -EPERM;
+
+		/* Set nid owner of the device */
+		tun->nid = (vnid_t) arg;
+
+		tun_debug(KERN_INFO, tun, "nid owner set to %u\n", tun->nid);
+		break;
+
 	case TUNSETLINK:
 		/* Only allow setting the type when the interface is down */
 		if (tun->dev->flags & IFF_UP) {
diff -ruNp linux-3.13.11/drivers/net/usb/hso.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/usb/hso.c
--- linux-3.13.11/drivers/net/usb/hso.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/usb/hso.c	2014-07-09
12:00:15.000000000 +0200
@@ -71,7 +71,7 @@
 #include <asm/byteorder.h>
 #include <linux/serial_core.h>
 #include <linux/serial.h>
-
+#include <asm/local.h>
 
 #define MOD_AUTHOR			"Option Wireless"
 #define MOD_DESCRIPTION			"USB High Speed Option driver"
@@ -1179,7 +1179,7 @@ static void put_rxbuf_data_and_resubmit_
 	struct urb *urb;
 
 	urb = serial->rx_urb[0];
-	if (serial->port.count > 0) {
+	if (atomic_read(&serial->port.count) > 0) {
 		count = put_rxbuf_data(urb, serial);
 		if (count == -1)
 			return;
@@ -1215,7 +1215,7 @@ static void hso_std_serial_read_bulk_cal
 	DUMP1(urb->transfer_buffer, urb->actual_length);
 
 	/* Anyone listening? */
-	if (serial->port.count == 0)
+	if (atomic_read(&serial->port.count) == 0)
 		return;
 
 	if (status == 0) {
@@ -1297,8 +1297,7 @@ static int hso_serial_open(struct tty_st
 	tty_port_tty_set(&serial->port, tty);
 
 	/* check for port already opened, if not set the termios */
-	serial->port.count++;
-	if (serial->port.count == 1) {
+	if (atomic_inc_return(&serial->port.count) == 1) {
 		serial->rx_state = RX_IDLE;
 		/* Force default termio settings */
 		_hso_serial_set_termios(tty, NULL);
@@ -1310,7 +1309,7 @@ static int hso_serial_open(struct tty_st
 		result = hso_start_serial_device(serial->parent, GFP_KERNEL);
 		if (result) {
 			hso_stop_serial_device(serial->parent);
-			serial->port.count--;
+			atomic_dec(&serial->port.count);
 			kref_put(&serial->parent->ref, hso_serial_ref_free);
 		}
 	} else {
@@ -1347,10 +1346,10 @@ static void hso_serial_close(struct tty_
 
 	/* reset the rts and dtr */
 	/* do the actual close */
-	serial->port.count--;
+	atomic_dec(&serial->port.count);
 
-	if (serial->port.count <= 0) {
-		serial->port.count = 0;
+	if (atomic_read(&serial->port.count) <= 0) {
+		atomic_set(&serial->port.count, 0);
 		tty_port_tty_set(&serial->port, NULL);
 		if (!usb_gone)
 			hso_stop_serial_device(serial->parent);
@@ -1426,7 +1425,7 @@ static void hso_serial_set_termios(struc
 
 	/* the actual setup */
 	spin_lock_irqsave(&serial->serial_lock, flags);
-	if (serial->port.count)
+	if (atomic_read(&serial->port.count))
 		_hso_serial_set_termios(tty, old);
 	else
 		tty->termios = *old;
@@ -1895,7 +1894,7 @@ static void intr_callback(struct urb *ur
 				D1("Pending read interrupt on port %d\n", i);
 				spin_lock(&serial->serial_lock);
 				if (serial->rx_state == RX_IDLE &&
-					serial->port.count > 0) {
+					atomic_read(&serial->port.count) > 0) {
 					/* Setup and send a ctrl req read on
 					 * port i */
 					if (!serial->rx_urb_filled[0]) {
@@ -3071,7 +3070,7 @@ static int hso_resume(struct usb_interfa
 	/* Start all serial ports */
 	for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
 		if (serial_table[i] && (serial_table[i]->interface == iface)) {
-			if (dev2ser(serial_table[i])->port.count) {
+			if (atomic_read(&dev2ser(serial_table[i])->port.count)) {
 				result =
 				    hso_start_serial_device(serial_table[i], GFP_NOIO);
 				hso_kick_transmit(dev2ser(serial_table[i]));
diff -ruNp linux-3.13.11/drivers/net/usb/sierra_net.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/usb/sierra_net.c
--- linux-3.13.11/drivers/net/usb/sierra_net.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/usb/sierra_net.c	2014-07-09
12:00:15.000000000 +0200
@@ -52,7 +52,7 @@ static const char driver_name[] = "sierr
 /* atomic counter partially included in MAC address to make sure 2 devices
  * do not end up with the same MAC - concept breaks in case of > 255 ifaces
  */
-static	atomic_t iface_counter = ATOMIC_INIT(0);
+static	atomic_unchecked_t iface_counter = ATOMIC_INIT(0);
 
 /*
  * SYNC Timer Delay definition used to set the expiry time
@@ -698,7 +698,7 @@ static int sierra_net_bind(struct usbnet
 	dev->net->netdev_ops = &sierra_net_device_ops;
 
 	/* change MAC addr to include, ifacenum, and to be unique */
-	dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter);
+	dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return_unchecked(&iface_counter);
 	dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
 
 	/* we will have to manufacture ethernet headers, prepare template */
diff -ruNp linux-3.13.11/drivers/net/vxlan.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/vxlan.c
--- linux-3.13.11/drivers/net/vxlan.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/vxlan.c	2014-07-09 12:00:15.000000000
+0200
@@ -2721,7 +2721,7 @@ nla_put_failure:
 	return -EMSGSIZE;
 }
 
-static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
+static struct rtnl_link_ops vxlan_link_ops = {
 	.kind		= "vxlan",
 	.maxtype	= IFLA_VXLAN_MAX,
 	.policy		= vxlan_policy,
diff -ruNp linux-3.13.11/drivers/net/wan/lmc/lmc_media.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/wan/lmc/lmc_media.c
--- linux-3.13.11/drivers/net/wan/lmc/lmc_media.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/wan/lmc/lmc_media.c	2014-07-09
12:00:15.000000000 +0200
@@ -95,62 +95,63 @@ static inline void write_av9110_bit (lmc
 static void write_av9110(lmc_softc_t *, u32, u32, u32, u32, u32);
 
 lmc_media_t lmc_ds3_media = {
-  lmc_ds3_init,			/* special media init stuff */
-  lmc_ds3_default,		/* reset to default state */
-  lmc_ds3_set_status,		/* reset status to state provided */
-  lmc_dummy_set_1,		/* set clock source */
-  lmc_dummy_set2_1,		/* set line speed */
-  lmc_ds3_set_100ft,		/* set cable length */
-  lmc_ds3_set_scram,		/* set scrambler */
-  lmc_ds3_get_link_status,	/* get link status */
-  lmc_dummy_set_1,		/* set link status */
-  lmc_ds3_set_crc_length,	/* set CRC length */
-  lmc_dummy_set_1,		/* set T1 or E1 circuit type */
-  lmc_ds3_watchdog
+  .init = lmc_ds3_init,				/* special media init stuff */
+  .defaults = lmc_ds3_default,			/* reset to default state */
+  .set_status = lmc_ds3_set_status,		/* reset status to state provided */
+  .set_clock_source = lmc_dummy_set_1,		/* set clock source */
+  .set_speed = lmc_dummy_set2_1,		/* set line speed */
+  .set_cable_length = lmc_ds3_set_100ft,	/* set cable length */
+  .set_scrambler = lmc_ds3_set_scram,		/* set scrambler */
+  .get_link_status = lmc_ds3_get_link_status,	/* get link status */
+  .set_link_status = lmc_dummy_set_1,		/* set link status */
+  .set_crc_length = lmc_ds3_set_crc_length,	/* set CRC length */
+  .set_circuit_type = lmc_dummy_set_1,		/* set T1 or E1 circuit type */
+  .watchdog = lmc_ds3_watchdog
 };
 
 lmc_media_t lmc_hssi_media = {
-  lmc_hssi_init,		/* special media init stuff */
-  lmc_hssi_default,		/* reset to default state */
-  lmc_hssi_set_status,		/* reset status to state provided */
-  lmc_hssi_set_clock,		/* set clock source */
-  lmc_dummy_set2_1,		/* set line speed */
-  lmc_dummy_set_1,		/* set cable length */
-  lmc_dummy_set_1,		/* set scrambler */
-  lmc_hssi_get_link_status,	/* get link status */
-  lmc_hssi_set_link_status,	/* set link status */
-  lmc_hssi_set_crc_length,	/* set CRC length */
-  lmc_dummy_set_1,		/* set T1 or E1 circuit type */
-  lmc_hssi_watchdog
+  .init = lmc_hssi_init,			/* special media init stuff */
+  .defaults = lmc_hssi_default,			/* reset to default state */
+  .set_status = lmc_hssi_set_status,		/* reset status to state provided */
+  .set_clock_source = lmc_hssi_set_clock,	/* set clock source */
+  .set_speed = lmc_dummy_set2_1,		/* set line speed */
+  .set_cable_length = lmc_dummy_set_1,		/* set cable length */
+  .set_scrambler = lmc_dummy_set_1,		/* set scrambler */
+  .get_link_status = lmc_hssi_get_link_status,	/* get link status */
+  .set_link_status = lmc_hssi_set_link_status,	/* set link status */
+  .set_crc_length = lmc_hssi_set_crc_length,	/* set CRC length */
+  .set_circuit_type = lmc_dummy_set_1,		/* set T1 or E1 circuit type */
+  .watchdog = lmc_hssi_watchdog
 };
 
-lmc_media_t lmc_ssi_media = { lmc_ssi_init,	/* special media init stuff */
-  lmc_ssi_default,		/* reset to default state */
-  lmc_ssi_set_status,		/* reset status to state provided */
-  lmc_ssi_set_clock,		/* set clock source */
-  lmc_ssi_set_speed,		/* set line speed */
-  lmc_dummy_set_1,		/* set cable length */
-  lmc_dummy_set_1,		/* set scrambler */
-  lmc_ssi_get_link_status,	/* get link status */
-  lmc_ssi_set_link_status,	/* set link status */
-  lmc_ssi_set_crc_length,	/* set CRC length */
-  lmc_dummy_set_1,		/* set T1 or E1 circuit type */
-  lmc_ssi_watchdog
+lmc_media_t lmc_ssi_media = {
+  .init = lmc_ssi_init,				/* special media init stuff */
+  .defaults = lmc_ssi_default,			/* reset to default state */
+  .set_status = lmc_ssi_set_status,		/* reset status to state provided */
+  .set_clock_source = lmc_ssi_set_clock,	/* set clock source */
+  .set_speed = lmc_ssi_set_speed,		/* set line speed */
+  .set_cable_length = lmc_dummy_set_1,		/* set cable length */
+  .set_scrambler = lmc_dummy_set_1,		/* set scrambler */
+  .get_link_status = lmc_ssi_get_link_status,	/* get link status */
+  .set_link_status = lmc_ssi_set_link_status,	/* set link status */
+  .set_crc_length = lmc_ssi_set_crc_length,	/* set CRC length */
+  .set_circuit_type = lmc_dummy_set_1,		/* set T1 or E1 circuit type */
+  .watchdog = lmc_ssi_watchdog
 };
 
 lmc_media_t lmc_t1_media = {
-  lmc_t1_init,			/* special media init stuff */
-  lmc_t1_default,		/* reset to default state */
-  lmc_t1_set_status,		/* reset status to state provided */
-  lmc_t1_set_clock,		/* set clock source */
-  lmc_dummy_set2_1,		/* set line speed */
-  lmc_dummy_set_1,		/* set cable length */
-  lmc_dummy_set_1,		/* set scrambler */
-  lmc_t1_get_link_status,	/* get link status */
-  lmc_dummy_set_1,		/* set link status */
-  lmc_t1_set_crc_length,	/* set CRC length */
-  lmc_t1_set_circuit_type,	/* set T1 or E1 circuit type */
-  lmc_t1_watchdog
+  .init = lmc_t1_init,				/* special media init stuff */
+  .defaults = lmc_t1_default,			/* reset to default state */
+  .set_status = lmc_t1_set_status,		/* reset status to state provided */
+  .set_clock_source = lmc_t1_set_clock,		/* set clock source */
+  .set_speed = lmc_dummy_set2_1,		/* set line speed */
+  .set_cable_length = lmc_dummy_set_1,		/* set cable length */
+  .set_scrambler = lmc_dummy_set_1,		/* set scrambler */
+  .get_link_status = lmc_t1_get_link_status,	/* get link status */
+  .set_link_status = lmc_dummy_set_1,		/* set link status */
+  .set_crc_length = lmc_t1_set_crc_length,	/* set CRC length */
+  .set_circuit_type = lmc_t1_set_circuit_type,	/* set T1 or E1 circuit type */
+  .watchdog = lmc_t1_watchdog
 };
 
 static void
diff -ruNp linux-3.13.11/drivers/net/wan/z85230.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/wan/z85230.c
--- linux-3.13.11/drivers/net/wan/z85230.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/wan/z85230.c	2014-07-09
12:00:15.000000000 +0200
@@ -485,9 +485,9 @@ static void z8530_status(struct z8530_ch
 
 struct z8530_irqhandler z8530_sync =
 {
-	z8530_rx,
-	z8530_tx,
-	z8530_status
+	.rx = z8530_rx,
+	.tx = z8530_tx,
+	.status = z8530_status
 };
 
 EXPORT_SYMBOL(z8530_sync);
@@ -605,15 +605,15 @@ static void z8530_dma_status(struct z853
 }
 
 static struct z8530_irqhandler z8530_dma_sync = {
-	z8530_dma_rx,
-	z8530_dma_tx,
-	z8530_dma_status
+	.rx = z8530_dma_rx,
+	.tx = z8530_dma_tx,
+	.status = z8530_dma_status
 };
 
 static struct z8530_irqhandler z8530_txdma_sync = {
-	z8530_rx,
-	z8530_dma_tx,
-	z8530_dma_status
+	.rx = z8530_rx,
+	.tx = z8530_dma_tx,
+	.status = z8530_dma_status
 };
 
 /**
@@ -680,9 +680,9 @@ static void z8530_status_clear(struct z8
 
 struct z8530_irqhandler z8530_nop=
 {
-	z8530_rx_clear,
-	z8530_tx_clear,
-	z8530_status_clear
+	.rx = z8530_rx_clear,
+	.tx = z8530_tx_clear,
+	.status = z8530_status_clear
 };
 
 
diff -ruNp linux-3.13.11/drivers/net/wimax/i2400m/rx.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/wimax/i2400m/rx.c
--- linux-3.13.11/drivers/net/wimax/i2400m/rx.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/wimax/i2400m/rx.c	2014-07-09
12:00:15.000000000 +0200
@@ -1359,7 +1359,7 @@ int i2400m_rx_setup(struct i2400m *i2400
 		if (i2400m->rx_roq == NULL)
 			goto error_roq_alloc;
 
-		rd = kcalloc(I2400M_RO_CIN + 1, sizeof(*i2400m->rx_roq[0].log),
+		rd = kcalloc(sizeof(*i2400m->rx_roq[0].log), I2400M_RO_CIN + 1,
 			     GFP_KERNEL);
 		if (rd == NULL) {
 			result = -ENOMEM;
diff -ruNp linux-3.13.11/drivers/net/wireless/airo.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/wireless/airo.c
--- linux-3.13.11/drivers/net/wireless/airo.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/wireless/airo.c	2014-07-09
12:00:15.000000000 +0200
@@ -7843,7 +7843,7 @@ static int writerids(struct net_device *
 	struct airo_info *ai = dev->ml_priv;
 	int  ridcode;
         int  enabled;
-	static int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
+	int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
 	unsigned char *iobuf;
 
 	/* Only super-user can write RIDs */
diff -ruNp linux-3.13.11/drivers/net/wireless/at76c50x-usb.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/wireless/at76c50x-usb.c
--- linux-3.13.11/drivers/net/wireless/at76c50x-usb.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/wireless/at76c50x-usb.c	2014-07-09
12:00:15.000000000 +0200
@@ -353,7 +353,7 @@ static int at76_dfu_get_state(struct usb
 }
 
 /* Convert timeout from the DFU status to jiffies */
-static inline unsigned long at76_get_timeout(struct dfu_status *s)
+static inline unsigned long __intentional_overflow(-1) at76_get_timeout(struct dfu_status
*s)
 {
 	return msecs_to_jiffies((s->poll_timeout[2] << 16)
 				| (s->poll_timeout[1] << 8)
diff -ruNp linux-3.13.11/drivers/net/wireless/ath/ath10k/htc.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/wireless/ath/ath10k/htc.c
--- linux-3.13.11/drivers/net/wireless/ath/ath10k/htc.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/wireless/ath/ath10k/htc.c	2014-07-09
12:00:15.000000000 +0200
@@ -842,7 +842,10 @@ void ath10k_htc_stop(struct ath10k_htc *
 /* registered target arrival callback from the HIF layer */
 int ath10k_htc_init(struct ath10k *ar)
 {
-	struct ath10k_hif_cb htc_callbacks;
+	static struct ath10k_hif_cb htc_callbacks = {
+		.rx_completion = ath10k_htc_rx_completion_handler,
+		.tx_completion = ath10k_htc_tx_completion_handler,
+	};
 	struct ath10k_htc_ep *ep = NULL;
 	struct ath10k_htc *htc = &ar->htc;
 
@@ -852,8 +855,6 @@ int ath10k_htc_init(struct ath10k *ar)
 	ath10k_htc_reset_endpoint_states(htc);
 
 	/* setup HIF layer callbacks */
-	htc_callbacks.rx_completion = ath10k_htc_rx_completion_handler;
-	htc_callbacks.tx_completion = ath10k_htc_tx_completion_handler;
 	htc->ar = ar;
 
 	/* Get HIF default pipe for HTC message exchange */
diff -ruNp linux-3.13.11/drivers/net/wireless/ath/ath10k/htc.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/wireless/ath/ath10k/htc.h
--- linux-3.13.11/drivers/net/wireless/ath/ath10k/htc.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/wireless/ath/ath10k/htc.h	2014-07-09
12:00:15.000000000 +0200
@@ -271,13 +271,13 @@ enum ath10k_htc_ep_id {
 
 struct ath10k_htc_ops {
 	void (*target_send_suspend_complete)(struct ath10k *ar);
-};
+} __no_const;
 
 struct ath10k_htc_ep_ops {
 	void (*ep_tx_complete)(struct ath10k *, struct sk_buff *);
 	void (*ep_rx_complete)(struct ath10k *, struct sk_buff *);
 	void (*ep_tx_credits)(struct ath10k *);
-};
+} __no_const;
 
 /* service connection information */
 struct ath10k_htc_svc_conn_req {
diff -ruNp linux-3.13.11/drivers/net/wireless/ath/ath9k/ar9002_mac.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/wireless/ath/ath9k/ar9002_mac.c
--- linux-3.13.11/drivers/net/wireless/ath/ath9k/ar9002_mac.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/wireless/ath/ath9k/ar9002_mac.c	2014-07-09
12:00:15.000000000 +0200
@@ -218,8 +218,8 @@ ar9002_set_txdesc(struct ath_hw *ah, voi
 	ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
 	ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
 
-	ACCESS_ONCE(ads->ds_link) = i->link;
-	ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
+	ACCESS_ONCE_RW(ads->ds_link) = i->link;
+	ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
 
 	ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
 	ctl6 = SM(i->keytype, AR_EncrType);
@@ -233,26 +233,26 @@ ar9002_set_txdesc(struct ath_hw *ah, voi
 
 	if ((i->is_first || i->is_last) &&
 	    i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
-		ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
+		ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
 			| set11nTries(i->rates, 1)
 			| set11nTries(i->rates, 2)
 			| set11nTries(i->rates, 3)
 			| (i->dur_update ? AR_DurUpdateEna : 0)
 			| SM(0, AR_BurstDur);
 
-		ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
+		ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
 			| set11nRate(i->rates, 1)
 			| set11nRate(i->rates, 2)
 			| set11nRate(i->rates, 3);
 	} else {
-		ACCESS_ONCE(ads->ds_ctl2) = 0;
-		ACCESS_ONCE(ads->ds_ctl3) = 0;
+		ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
+		ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
 	}
 
 	if (!i->is_first) {
-		ACCESS_ONCE(ads->ds_ctl0) = 0;
-		ACCESS_ONCE(ads->ds_ctl1) = ctl1;
-		ACCESS_ONCE(ads->ds_ctl6) = ctl6;
+		ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
+		ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
+		ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
 		return;
 	}
 
@@ -277,7 +277,7 @@ ar9002_set_txdesc(struct ath_hw *ah, voi
 		break;
 	}
 
-	ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
+	ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
 		| (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
 		| SM(i->txpower, AR_XmitPower)
 		| (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
@@ -287,19 +287,19 @@ ar9002_set_txdesc(struct ath_hw *ah, voi
 		| (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
 		   (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
 
-	ACCESS_ONCE(ads->ds_ctl1) = ctl1;
-	ACCESS_ONCE(ads->ds_ctl6) = ctl6;
+	ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
+	ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
 
 	if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
 		return;
 
-	ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
+	ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
 		| set11nPktDurRTSCTS(i->rates, 1);
 
-	ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
+	ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
 		| set11nPktDurRTSCTS(i->rates, 3);
 
-	ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
+	ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
 		| set11nRateFlags(i->rates, 1)
 		| set11nRateFlags(i->rates, 2)
 		| set11nRateFlags(i->rates, 3)
diff -ruNp linux-3.13.11/drivers/net/wireless/ath/ath9k/ar9003_mac.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/wireless/ath/ath9k/ar9003_mac.c
--- linux-3.13.11/drivers/net/wireless/ath/ath9k/ar9003_mac.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/wireless/ath/ath9k/ar9003_mac.c	2014-07-09
12:00:15.000000000 +0200
@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, voi
 	      (i->qcu << AR_TxQcuNum_S) | desc_len;
 
 	checksum += val;
-	ACCESS_ONCE(ads->info) = val;
+	ACCESS_ONCE_RW(ads->info) = val;
 
 	checksum += i->link;
-	ACCESS_ONCE(ads->link) = i->link;
+	ACCESS_ONCE_RW(ads->link) = i->link;
 
 	checksum += i->buf_addr[0];
-	ACCESS_ONCE(ads->data0) = i->buf_addr[0];
+	ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
 	checksum += i->buf_addr[1];
-	ACCESS_ONCE(ads->data1) = i->buf_addr[1];
+	ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
 	checksum += i->buf_addr[2];
-	ACCESS_ONCE(ads->data2) = i->buf_addr[2];
+	ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
 	checksum += i->buf_addr[3];
-	ACCESS_ONCE(ads->data3) = i->buf_addr[3];
+	ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
 
 	checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
-	ACCESS_ONCE(ads->ctl3) = val;
+	ACCESS_ONCE_RW(ads->ctl3) = val;
 	checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
-	ACCESS_ONCE(ads->ctl5) = val;
+	ACCESS_ONCE_RW(ads->ctl5) = val;
 	checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
-	ACCESS_ONCE(ads->ctl7) = val;
+	ACCESS_ONCE_RW(ads->ctl7) = val;
 	checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
-	ACCESS_ONCE(ads->ctl9) = val;
+	ACCESS_ONCE_RW(ads->ctl9) = val;
 
 	checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
-	ACCESS_ONCE(ads->ctl10) = checksum;
+	ACCESS_ONCE_RW(ads->ctl10) = checksum;
 
 	if (i->is_first || i->is_last) {
-		ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
+		ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
 			| set11nTries(i->rates, 1)
 			| set11nTries(i->rates, 2)
 			| set11nTries(i->rates, 3)
 			| (i->dur_update ? AR_DurUpdateEna : 0)
 			| SM(0, AR_BurstDur);
 
-		ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
+		ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
 			| set11nRate(i->rates, 1)
 			| set11nRate(i->rates, 2)
 			| set11nRate(i->rates, 3);
 	} else {
-		ACCESS_ONCE(ads->ctl13) = 0;
-		ACCESS_ONCE(ads->ctl14) = 0;
+		ACCESS_ONCE_RW(ads->ctl13) = 0;
+		ACCESS_ONCE_RW(ads->ctl14) = 0;
 	}
 
 	ads->ctl20 = 0;
@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, voi
 
 	ctl17 = SM(i->keytype, AR_EncrType);
 	if (!i->is_first) {
-		ACCESS_ONCE(ads->ctl11) = 0;
-		ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
-		ACCESS_ONCE(ads->ctl15) = 0;
-		ACCESS_ONCE(ads->ctl16) = 0;
-		ACCESS_ONCE(ads->ctl17) = ctl17;
-		ACCESS_ONCE(ads->ctl18) = 0;
-		ACCESS_ONCE(ads->ctl19) = 0;
+		ACCESS_ONCE_RW(ads->ctl11) = 0;
+		ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
+		ACCESS_ONCE_RW(ads->ctl15) = 0;
+		ACCESS_ONCE_RW(ads->ctl16) = 0;
+		ACCESS_ONCE_RW(ads->ctl17) = ctl17;
+		ACCESS_ONCE_RW(ads->ctl18) = 0;
+		ACCESS_ONCE_RW(ads->ctl19) = 0;
 		return;
 	}
 
-	ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
+	ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
 		| (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
 		| SM(i->txpower, AR_XmitPower)
 		| (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
@@ -135,22 +135,22 @@ ar9003_set_txdesc(struct ath_hw *ah, voi
 	val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
 	ctl12 |= SM(val, AR_PAPRDChainMask);
 
-	ACCESS_ONCE(ads->ctl12) = ctl12;
-	ACCESS_ONCE(ads->ctl17) = ctl17;
+	ACCESS_ONCE_RW(ads->ctl12) = ctl12;
+	ACCESS_ONCE_RW(ads->ctl17) = ctl17;
 
-	ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
+	ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
 		| set11nPktDurRTSCTS(i->rates, 1);
 
-	ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
+	ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
 		| set11nPktDurRTSCTS(i->rates, 3);
 
-	ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
+	ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
 		| set11nRateFlags(i->rates, 1)
 		| set11nRateFlags(i->rates, 2)
 		| set11nRateFlags(i->rates, 3)
 		| SM(i->rtscts_rate, AR_RTSCTSRate);
 
-	ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
+	ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
 }
 
 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
diff -ruNp linux-3.13.11/drivers/net/wireless/ath/ath9k/hw.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/wireless/ath/ath9k/hw.h
--- linux-3.13.11/drivers/net/wireless/ath/ath9k/hw.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/wireless/ath/ath9k/hw.h	2014-07-09
12:00:15.000000000 +0200
@@ -635,7 +635,7 @@ struct ath_hw_private_ops {
 
 	/* ANI */
 	void (*ani_cache_ini_regs)(struct ath_hw *ah);
-};
+} __no_const;
 
 /**
  * struct ath_spec_scan - parameters for Atheros spectral scan
@@ -711,7 +711,7 @@ struct ath_hw_ops {
 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
 	void (*set_bt_ant_diversity)(struct ath_hw *hw, bool enable);
 #endif
-};
+} __no_const;
 
 struct ath_nf_limits {
 	s16 max;
diff -ruNp linux-3.13.11/drivers/net/wireless/b43/phy_lp.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/wireless/b43/phy_lp.c
--- linux-3.13.11/drivers/net/wireless/b43/phy_lp.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/wireless/b43/phy_lp.c	2014-07-09
12:00:15.000000000 +0200
@@ -2514,7 +2514,7 @@ static int lpphy_b2063_tune(struct b43_w
 {
 	struct ssb_bus *bus = dev->dev->sdev->bus;
 
-	static const struct b206x_channel *chandata = NULL;
+	const struct b206x_channel *chandata = NULL;
 	u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000;
 	u32 freqref, vco_freq, val1, val2, val3, timeout, timeoutref, count;
 	u16 old_comm15, scale;
diff -ruNp linux-3.13.11/drivers/net/wireless/iwlegacy/3945-mac.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/wireless/iwlegacy/3945-mac.c
--- linux-3.13.11/drivers/net/wireless/iwlegacy/3945-mac.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/wireless/iwlegacy/3945-mac.c	2014-07-09
12:00:15.000000000 +0200
@@ -3639,7 +3639,9 @@ il3945_pci_probe(struct pci_dev *pdev, c
 	 */
 	if (il3945_mod_params.disable_hw_scan) {
 		D_INFO("Disabling hw_scan\n");
-		il3945_mac_ops.hw_scan = NULL;
+		pax_open_kernel();
+		*(void **)&il3945_mac_ops.hw_scan = NULL;
+		pax_close_kernel();
 	}
 
 	D_INFO("*** LOAD DRIVER ***\n");
diff -ruNp linux-3.13.11/drivers/net/wireless/iwlwifi/dvm/debugfs.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/wireless/iwlwifi/dvm/debugfs.c
--- linux-3.13.11/drivers/net/wireless/iwlwifi/dvm/debugfs.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/wireless/iwlwifi/dvm/debugfs.c	2014-07-09
12:00:15.000000000 +0200
@@ -188,7 +188,7 @@ static ssize_t iwl_dbgfs_sram_write(stru
 {
 	struct iwl_priv *priv = file->private_data;
 	char buf[64];
-	int buf_size;
+	size_t buf_size;
 	u32 offset, len;
 
 	memset(buf, 0, sizeof(buf));
@@ -458,7 +458,7 @@ static ssize_t iwl_dbgfs_rx_handlers_wri
 	struct iwl_priv *priv = file->private_data;
 
 	char buf[8];
-	int buf_size;
+	size_t buf_size;
 	u32 reset_flag;
 
 	memset(buf, 0, sizeof(buf));
@@ -539,7 +539,7 @@ static ssize_t iwl_dbgfs_disable_ht40_wr
 {
 	struct iwl_priv *priv = file->private_data;
 	char buf[8];
-	int buf_size;
+	size_t buf_size;
 	int ht40;
 
 	memset(buf, 0, sizeof(buf));
@@ -591,7 +591,7 @@ static ssize_t iwl_dbgfs_sleep_level_ove
 {
 	struct iwl_priv *priv = file->private_data;
 	char buf[8];
-	int buf_size;
+	size_t buf_size;
 	int value;
 
 	memset(buf, 0, sizeof(buf));
@@ -683,10 +683,10 @@ DEBUGFS_READ_FILE_OPS(temperature);
 DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override);
 DEBUGFS_READ_FILE_OPS(current_sleep_command);
 
-static const char *fmt_value = "  %-30s %10u\n";
-static const char *fmt_hex   = "  %-30s       0x%02X\n";
-static const char *fmt_table = "  %-30s %10u  %10u  %10u  %10u\n";
-static const char *fmt_header =
+static const char fmt_value[] = "  %-30s %10u\n";
+static const char fmt_hex[]   = "  %-30s       0x%02X\n";
+static const char fmt_table[] = "  %-30s %10u  %10u  %10u  %10u\n";
+static const char fmt_header[] =
 	"%-32s    current  cumulative       delta         max\n";
 
 static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
@@ -1856,7 +1856,7 @@ static ssize_t iwl_dbgfs_clear_ucode_sta
 {
 	struct iwl_priv *priv = file->private_data;
 	char buf[8];
-	int buf_size;
+	size_t buf_size;
 	int clear;
 
 	memset(buf, 0, sizeof(buf));
@@ -1901,7 +1901,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_w
 {
 	struct iwl_priv *priv = file->private_data;
 	char buf[8];
-	int buf_size;
+	size_t buf_size;
 	int trace;
 
 	memset(buf, 0, sizeof(buf));
@@ -1972,7 +1972,7 @@ static ssize_t iwl_dbgfs_missed_beacon_w
 {
 	struct iwl_priv *priv = file->private_data;
 	char buf[8];
-	int buf_size;
+	size_t buf_size;
 	int missed;
 
 	memset(buf, 0, sizeof(buf));
@@ -2013,7 +2013,7 @@ static ssize_t iwl_dbgfs_plcp_delta_writ
 
 	struct iwl_priv *priv = file->private_data;
 	char buf[8];
-	int buf_size;
+	size_t buf_size;
 	int plcp;
 
 	memset(buf, 0, sizeof(buf));
@@ -2073,7 +2073,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_wr
 
 	struct iwl_priv *priv = file->private_data;
 	char buf[8];
-	int buf_size;
+	size_t buf_size;
 	int flush;
 
 	memset(buf, 0, sizeof(buf));
@@ -2163,7 +2163,7 @@ static ssize_t iwl_dbgfs_protection_mode
 
 	struct iwl_priv *priv = file->private_data;
 	char buf[8];
-	int buf_size;
+	size_t buf_size;
 	int rts;
 
 	if (!priv->cfg->ht_params)
@@ -2205,7 +2205,7 @@ static ssize_t iwl_dbgfs_echo_test_write
 {
 	struct iwl_priv *priv = file->private_data;
 	char buf[8];
-	int buf_size;
+	size_t buf_size;
 
 	memset(buf, 0, sizeof(buf));
 	buf_size = min(count, sizeof(buf) -  1);
@@ -2239,7 +2239,7 @@ static ssize_t iwl_dbgfs_log_event_write
 	struct iwl_priv *priv = file->private_data;
 	u32 event_log_flag;
 	char buf[8];
-	int buf_size;
+	size_t buf_size;
 
 	/* check that the interface is up */
 	if (!iwl_is_ready(priv))
@@ -2293,7 +2293,7 @@ static ssize_t iwl_dbgfs_calib_disabled_
 	struct iwl_priv *priv = file->private_data;
 	char buf[8];
 	u32 calib_disabled;
-	int buf_size;
+	size_t buf_size;
 
 	memset(buf, 0, sizeof(buf));
 	buf_size = min(count, sizeof(buf) - 1);
diff -ruNp linux-3.13.11/drivers/net/wireless/iwlwifi/dvm/main.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/wireless/iwlwifi/dvm/main.c
--- linux-3.13.11/drivers/net/wireless/iwlwifi/dvm/main.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/wireless/iwlwifi/dvm/main.c	2014-07-09
12:00:15.000000000 +0200
@@ -1123,7 +1123,7 @@ static void iwl_option_config(struct iwl
 static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
 {
 	struct iwl_nvm_data *data = priv->nvm_data;
-	char *debug_msg;
+	static const char debug_msg[] = "Device SKU: 24GHz %s %s, 52GHz %s %s, 11.n %s %s\n";
 
 	if (data->sku_cap_11n_enable &&
 	    !priv->cfg->ht_params) {
@@ -1137,7 +1137,6 @@ static int iwl_eeprom_init_hw_params(str
 		return -EINVAL;
 	}
 
-	debug_msg = "Device SKU: 24GHz %s %s, 52GHz %s %s, 11.n %s %s\n";
 	IWL_DEBUG_INFO(priv, debug_msg,
 		       data->sku_cap_band_24GHz_enable ? "" : "NOT", "enabled",
 		       data->sku_cap_band_52GHz_enable ? "" : "NOT", "enabled",
diff -ruNp linux-3.13.11/drivers/net/wireless/iwlwifi/pcie/trans.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/wireless/iwlwifi/pcie/trans.c
--- linux-3.13.11/drivers/net/wireless/iwlwifi/pcie/trans.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/wireless/iwlwifi/pcie/trans.c	2014-07-09
12:00:15.000000000 +0200
@@ -1390,7 +1390,7 @@ static ssize_t iwl_dbgfs_interrupt_write
 	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
 
 	char buf[8];
-	int buf_size;
+	size_t buf_size;
 	u32 reset_flag;
 
 	memset(buf, 0, sizeof(buf));
@@ -1411,7 +1411,7 @@ static ssize_t iwl_dbgfs_csr_write(struc
 {
 	struct iwl_trans *trans = file->private_data;
 	char buf[8];
-	int buf_size;
+	size_t buf_size;
 	int csr;
 
 	memset(buf, 0, sizeof(buf));
diff -ruNp linux-3.13.11/drivers/net/wireless/mac80211_hwsim.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/wireless/mac80211_hwsim.c
--- linux-3.13.11/drivers/net/wireless/mac80211_hwsim.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/wireless/mac80211_hwsim.c	2014-07-09
12:00:15.000000000 +0200
@@ -2224,25 +2224,19 @@ static int __init init_mac80211_hwsim(vo
 
 	if (channels > 1) {
 		hwsim_if_comb.num_different_channels = channels;
-		mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
-		mac80211_hwsim_ops.cancel_hw_scan =
-			mac80211_hwsim_cancel_hw_scan;
-		mac80211_hwsim_ops.sw_scan_start = NULL;
-		mac80211_hwsim_ops.sw_scan_complete = NULL;
-		mac80211_hwsim_ops.remain_on_channel =
-			mac80211_hwsim_roc;
-		mac80211_hwsim_ops.cancel_remain_on_channel =
-			mac80211_hwsim_croc;
-		mac80211_hwsim_ops.add_chanctx =
-			mac80211_hwsim_add_chanctx;
-		mac80211_hwsim_ops.remove_chanctx =
-			mac80211_hwsim_remove_chanctx;
-		mac80211_hwsim_ops.change_chanctx =
-			mac80211_hwsim_change_chanctx;
-		mac80211_hwsim_ops.assign_vif_chanctx =
-			mac80211_hwsim_assign_vif_chanctx;
-		mac80211_hwsim_ops.unassign_vif_chanctx =
-			mac80211_hwsim_unassign_vif_chanctx;
+		pax_open_kernel();
+		*(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
+		*(void **)&mac80211_hwsim_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
+		*(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
+		*(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
+		*(void **)&mac80211_hwsim_ops.remain_on_channel = mac80211_hwsim_roc;
+		*(void **)&mac80211_hwsim_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
+		*(void **)&mac80211_hwsim_ops.add_chanctx = mac80211_hwsim_add_chanctx;
+		*(void **)&mac80211_hwsim_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
+		*(void **)&mac80211_hwsim_ops.change_chanctx = mac80211_hwsim_change_chanctx;
+		*(void **)&mac80211_hwsim_ops.assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx;
+		*(void **)&mac80211_hwsim_ops.unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx;
+		pax_close_kernel();
 	}
 
 	spin_lock_init(&hwsim_radio_lock);
diff -ruNp linux-3.13.11/drivers/net/wireless/rndis_wlan.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/wireless/rndis_wlan.c
--- linux-3.13.11/drivers/net/wireless/rndis_wlan.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/wireless/rndis_wlan.c	2014-07-09
12:00:15.000000000 +0200
@@ -1238,7 +1238,7 @@ static int set_rts_threshold(struct usbn
 
 	netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
 
-	if (rts_threshold < 0 || rts_threshold > 2347)
+	if (rts_threshold > 2347)
 		rts_threshold = 2347;
 
 	tmp = cpu_to_le32(rts_threshold);
diff -ruNp linux-3.13.11/drivers/net/wireless/rt2x00/rt2x00.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/wireless/rt2x00/rt2x00.h
--- linux-3.13.11/drivers/net/wireless/rt2x00/rt2x00.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/wireless/rt2x00/rt2x00.h	2014-07-09
12:00:15.000000000 +0200
@@ -377,7 +377,7 @@ struct rt2x00_intf {
 	 * for hardware which doesn't support hardware
 	 * sequence counting.
 	 */
-	atomic_t seqno;
+	atomic_unchecked_t seqno;
 };
 
 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
diff -ruNp linux-3.13.11/drivers/net/wireless/rt2x00/rt2x00queue.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/wireless/rt2x00/rt2x00queue.c
--- linux-3.13.11/drivers/net/wireless/rt2x00/rt2x00queue.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/wireless/rt2x00/rt2x00queue.c	2014-07-09
12:00:15.000000000 +0200
@@ -252,9 +252,9 @@ static void rt2x00queue_create_tx_descri
 	 * sequence counter given by mac80211.
 	 */
 	if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
-		seqno = atomic_add_return(0x10, &intf->seqno);
+		seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
 	else
-		seqno = atomic_read(&intf->seqno);
+		seqno = atomic_read_unchecked(&intf->seqno);
 
 	hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
 	hdr->seq_ctrl |= cpu_to_le16(seqno);
diff -ruNp linux-3.13.11/drivers/net/wireless/ti/wl1251/sdio.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/wireless/ti/wl1251/sdio.c
--- linux-3.13.11/drivers/net/wireless/ti/wl1251/sdio.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/wireless/ti/wl1251/sdio.c	2014-07-09
12:00:15.000000000 +0200
@@ -271,13 +271,17 @@ static int wl1251_sdio_probe(struct sdio
 
 		irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
 
-		wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
-		wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
+		pax_open_kernel();
+		*(void **)&wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
+		*(void **)&wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
+		pax_close_kernel();
 
 		wl1251_info("using dedicated interrupt line");
 	} else {
-		wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
-		wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
+		pax_open_kernel();
+		*(void **)&wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
+		*(void **)&wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
+		pax_close_kernel();
 
 		wl1251_info("using SDIO interrupt");
 	}
diff -ruNp linux-3.13.11/drivers/net/wireless/ti/wl12xx/main.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/wireless/ti/wl12xx/main.c
--- linux-3.13.11/drivers/net/wireless/ti/wl12xx/main.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/wireless/ti/wl12xx/main.c	2014-07-09
12:00:15.000000000 +0200
@@ -656,7 +656,9 @@ static int wl12xx_identify_chip(struct w
 		       sizeof(wl->conf.mem));
 
 		/* read data preparation is only needed by wl127x */
-		wl->ops->prepare_read = wl127x_prepare_read;
+		pax_open_kernel();
+		*(void **)&wl->ops->prepare_read = wl127x_prepare_read;
+		pax_close_kernel();
 
 		wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
 			      WL127X_IFTYPE_SR_VER,  WL127X_MAJOR_SR_VER,
@@ -681,7 +683,9 @@ static int wl12xx_identify_chip(struct w
 		       sizeof(wl->conf.mem));
 
 		/* read data preparation is only needed by wl127x */
-		wl->ops->prepare_read = wl127x_prepare_read;
+		pax_open_kernel();
+		*(void **)&wl->ops->prepare_read = wl127x_prepare_read;
+		pax_close_kernel();
 
 		wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
 			      WL127X_IFTYPE_SR_VER,  WL127X_MAJOR_SR_VER,
diff -ruNp linux-3.13.11/drivers/net/wireless/ti/wl18xx/main.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/wireless/ti/wl18xx/main.c
--- linux-3.13.11/drivers/net/wireless/ti/wl18xx/main.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/wireless/ti/wl18xx/main.c	2014-07-09
12:00:15.000000000 +0200
@@ -1823,8 +1823,10 @@ static int wl18xx_setup(struct wl1271 *w
 	}
 
 	if (!checksum_param) {
-		wl18xx_ops.set_rx_csum = NULL;
-		wl18xx_ops.init_vif = NULL;
+		pax_open_kernel();
+		*(void **)&wl18xx_ops.set_rx_csum = NULL;
+		*(void **)&wl18xx_ops.init_vif = NULL;
+		pax_close_kernel();
 	}
 
 	/* Enable 11a Band only if we have 5G antennas */
diff -ruNp linux-3.13.11/drivers/net/wireless/zd1211rw/zd_usb.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/wireless/zd1211rw/zd_usb.c
--- linux-3.13.11/drivers/net/wireless/zd1211rw/zd_usb.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/net/wireless/zd1211rw/zd_usb.c	2014-07-09
12:00:15.000000000 +0200
@@ -386,7 +386,7 @@ static inline void handle_regs_int(struc
 {
 	struct zd_usb *usb = urb->context;
 	struct zd_usb_interrupt *intr = &usb->intr;
-	int len;
+	unsigned int len;
 	u16 int_num;
 
 	ZD_ASSERT(in_interrupt());
diff -ruNp linux-3.13.11/drivers/nfc/nfcwilink.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/nfc/nfcwilink.c
--- linux-3.13.11/drivers/nfc/nfcwilink.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/nfc/nfcwilink.c	2014-07-09
12:00:15.000000000 +0200
@@ -498,7 +498,7 @@ static struct nci_ops nfcwilink_ops = {
 
 static int nfcwilink_probe(struct platform_device *pdev)
 {
-	static struct nfcwilink *drv;
+	struct nfcwilink *drv;
 	int rc;
 	__u32 protocols;
 
diff -ruNp linux-3.13.11/drivers/oprofile/buffer_sync.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/oprofile/buffer_sync.c
--- linux-3.13.11/drivers/oprofile/buffer_sync.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/oprofile/buffer_sync.c	2014-07-09
12:00:15.000000000 +0200
@@ -332,7 +332,7 @@ static void add_data(struct op_entry *en
 		if (cookie == NO_COOKIE)
 			offset = pc;
 		if (cookie == INVALID_COOKIE) {
-			atomic_inc(&oprofile_stats.sample_lost_no_mapping);
+			atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
 			offset = pc;
 		}
 		if (cookie != last_cookie) {
@@ -376,14 +376,14 @@ add_sample(struct mm_struct *mm, struct
 	/* add userspace sample */
 
 	if (!mm) {
-		atomic_inc(&oprofile_stats.sample_lost_no_mm);
+		atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
 		return 0;
 	}
 
 	cookie = lookup_dcookie(mm, s->eip, &offset);
 
 	if (cookie == INVALID_COOKIE) {
-		atomic_inc(&oprofile_stats.sample_lost_no_mapping);
+		atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
 		return 0;
 	}
 
@@ -552,7 +552,7 @@ void sync_buffer(int cpu)
 		/* ignore backtraces if failed to add a sample */
 		if (state == sb_bt_start) {
 			state = sb_bt_ignore;
-			atomic_inc(&oprofile_stats.bt_lost_no_mapping);
+			atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
 		}
 	}
 	release_mm(mm);
diff -ruNp linux-3.13.11/drivers/oprofile/event_buffer.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/oprofile/event_buffer.c
--- linux-3.13.11/drivers/oprofile/event_buffer.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/oprofile/event_buffer.c	2014-07-09
12:00:15.000000000 +0200
@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value
 	}
 
 	if (buffer_pos == buffer_size) {
-		atomic_inc(&oprofile_stats.event_lost_overflow);
+		atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
 		return;
 	}
 
diff -ruNp linux-3.13.11/drivers/oprofile/oprof.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/oprofile/oprof.c
--- linux-3.13.11/drivers/oprofile/oprof.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/oprofile/oprof.c	2014-07-09
12:00:15.000000000 +0200
@@ -110,7 +110,7 @@ static void switch_worker(struct work_st
 	if (oprofile_ops.switch_events())
 		return;
 
-	atomic_inc(&oprofile_stats.multiplex_counter);
+	atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
 	start_switch_worker();
 }
 
diff -ruNp linux-3.13.11/drivers/oprofile/oprofile_files.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/oprofile/oprofile_files.c
--- linux-3.13.11/drivers/oprofile/oprofile_files.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/oprofile/oprofile_files.c	2014-07-09
12:00:15.000000000 +0200
@@ -27,7 +27,7 @@ unsigned long oprofile_time_slice;
 
 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
 
-static ssize_t timeout_read(struct file *file, char __user *buf,
+static ssize_t __intentional_overflow(-1) timeout_read(struct file *file, char __user
*buf,
 		size_t count, loff_t *offset)
 {
 	return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice),
diff -ruNp linux-3.13.11/drivers/oprofile/oprofile_stats.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/oprofile/oprofile_stats.c
--- linux-3.13.11/drivers/oprofile/oprofile_stats.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/oprofile/oprofile_stats.c	2014-07-09
12:00:15.000000000 +0200
@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
 		cpu_buf->sample_invalid_eip = 0;
 	}
 
-	atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
-	atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
-	atomic_set(&oprofile_stats.event_lost_overflow, 0);
-	atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
-	atomic_set(&oprofile_stats.multiplex_counter, 0);
+	atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
+	atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
+	atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
+	atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
+	atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
 }
 
 
diff -ruNp linux-3.13.11/drivers/oprofile/oprofile_stats.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/oprofile/oprofile_stats.h
--- linux-3.13.11/drivers/oprofile/oprofile_stats.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/oprofile/oprofile_stats.h	2014-07-09
12:00:15.000000000 +0200
@@ -13,11 +13,11 @@
 #include <linux/atomic.h>
 
 struct oprofile_stat_struct {
-	atomic_t sample_lost_no_mm;
-	atomic_t sample_lost_no_mapping;
-	atomic_t bt_lost_no_mapping;
-	atomic_t event_lost_overflow;
-	atomic_t multiplex_counter;
+	atomic_unchecked_t sample_lost_no_mm;
+	atomic_unchecked_t sample_lost_no_mapping;
+	atomic_unchecked_t bt_lost_no_mapping;
+	atomic_unchecked_t event_lost_overflow;
+	atomic_unchecked_t multiplex_counter;
 };
 
 extern struct oprofile_stat_struct oprofile_stats;
diff -ruNp linux-3.13.11/drivers/oprofile/oprofilefs.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/oprofile/oprofilefs.c
--- linux-3.13.11/drivers/oprofile/oprofilefs.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/oprofile/oprofilefs.c	2014-07-09
12:00:15.000000000 +0200
@@ -176,8 +176,8 @@ int oprofilefs_create_ro_ulong(struct de
 
 static ssize_t atomic_read_file(struct file *file, char __user *buf, size_t count,
loff_t *offset)
 {
-	atomic_t *val = file->private_data;
-	return oprofilefs_ulong_to_user(atomic_read(val), buf, count, offset);
+	atomic_unchecked_t *val = file->private_data;
+	return oprofilefs_ulong_to_user(atomic_read_unchecked(val), buf, count, offset);
 }
 
 
@@ -189,7 +189,7 @@ static const struct file_operations atom
 
 
 int oprofilefs_create_ro_atomic(struct dentry *root,
-	char const *name, atomic_t *val)
+	char const *name, atomic_unchecked_t *val)
 {
 	return __oprofilefs_create_file(root, name,
 					&atomic_ro_fops, 0444, val);
diff -ruNp linux-3.13.11/drivers/oprofile/timer_int.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/oprofile/timer_int.c
--- linux-3.13.11/drivers/oprofile/timer_int.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/oprofile/timer_int.c	2014-07-09
12:00:15.000000000 +0200
@@ -93,7 +93,7 @@ static int oprofile_cpu_notify(struct no
 	return NOTIFY_OK;
 }
 
-static struct notifier_block __refdata oprofile_cpu_notifier = {
+static struct notifier_block oprofile_cpu_notifier = {
 	.notifier_call = oprofile_cpu_notify,
 };
 
diff -ruNp linux-3.13.11/drivers/parport/procfs.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/parport/procfs.c
--- linux-3.13.11/drivers/parport/procfs.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/parport/procfs.c	2014-07-09
12:00:15.000000000 +0200
@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *t
 
 	*ppos += len;
 
-	return copy_to_user(result, buffer, len) ? -EFAULT : 0;
+	return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
 }
 
 #ifdef CONFIG_PARPORT_1284
@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table
 
 	*ppos += len;
 
-	return copy_to_user (result, buffer, len) ? -EFAULT : 0;
+	return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
 }
 #endif /* IEEE1284.3 support. */
 
diff -ruNp linux-3.13.11/drivers/pci/hotplug/acpiphp_ibm.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/pci/hotplug/acpiphp_ibm.c
--- linux-3.13.11/drivers/pci/hotplug/acpiphp_ibm.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/pci/hotplug/acpiphp_ibm.c	2014-07-09
12:00:15.000000000 +0200
@@ -453,7 +453,9 @@ static int __init ibm_acpiphp_init(void)
 		goto init_cleanup;
 	}
 
-	ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
+	pax_open_kernel();
+	*(size_t *)&ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
+	pax_close_kernel();
 	retval = sysfs_create_bin_file(sysdir, &ibm_apci_table_attr);
 
 	return retval;
diff -ruNp linux-3.13.11/drivers/pci/hotplug/cpcihp_generic.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/pci/hotplug/cpcihp_generic.c
--- linux-3.13.11/drivers/pci/hotplug/cpcihp_generic.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/pci/hotplug/cpcihp_generic.c	2014-07-09
12:00:15.000000000 +0200
@@ -73,7 +73,6 @@ static u16 port;
 static unsigned int enum_bit;
 static u8 enum_mask;
 
-static struct cpci_hp_controller_ops generic_hpc_ops;
 static struct cpci_hp_controller generic_hpc;
 
 static int __init validate_parameters(void)
@@ -139,6 +138,10 @@ static int query_enum(void)
 	return ((value & enum_mask) == enum_mask);
 }
 
+static struct cpci_hp_controller_ops generic_hpc_ops = {
+	.query_enum = query_enum,
+};
+
 static int __init cpcihp_generic_init(void)
 {
 	int status;
@@ -165,7 +168,6 @@ static int __init cpcihp_generic_init(vo
 	pci_dev_put(dev);
 
 	memset(&generic_hpc, 0, sizeof (struct cpci_hp_controller));
-	generic_hpc_ops.query_enum = query_enum;
 	generic_hpc.ops = &generic_hpc_ops;
 
 	status = cpci_hp_register_controller(&generic_hpc);
diff -ruNp linux-3.13.11/drivers/pci/hotplug/cpcihp_zt5550.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/pci/hotplug/cpcihp_zt5550.c
--- linux-3.13.11/drivers/pci/hotplug/cpcihp_zt5550.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/pci/hotplug/cpcihp_zt5550.c	2014-07-09
12:00:15.000000000 +0200
@@ -59,7 +59,6 @@
 /* local variables */
 static bool debug;
 static bool poll;
-static struct cpci_hp_controller_ops zt5550_hpc_ops;
 static struct cpci_hp_controller zt5550_hpc;
 
 /* Primary cPCI bus bridge device */
@@ -205,6 +204,10 @@ static int zt5550_hc_disable_irq(void)
 	return 0;
 }
 
+static struct cpci_hp_controller_ops zt5550_hpc_ops = {
+	.query_enum = zt5550_hc_query_enum,
+};
+
 static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
 {
 	int status;
@@ -216,16 +219,17 @@ static int zt5550_hc_init_one (struct pc
 	dbg("returned from zt5550_hc_config");
 
 	memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
-	zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
 	zt5550_hpc.ops = &zt5550_hpc_ops;
 	if(!poll) {
 		zt5550_hpc.irq = hc_dev->irq;
 		zt5550_hpc.irq_flags = IRQF_SHARED;
 		zt5550_hpc.dev_id = hc_dev;
 
-		zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
-		zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
-		zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
+		pax_open_kernel();
+		*(void **)&zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
+		*(void **)&zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
+		*(void **)&zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
+		pax_open_kernel();
 	} else {
 		info("using ENUM# polling mode");
 	}
diff -ruNp linux-3.13.11/drivers/pci/hotplug/cpqphp_nvram.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/pci/hotplug/cpqphp_nvram.c
--- linux-3.13.11/drivers/pci/hotplug/cpqphp_nvram.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/pci/hotplug/cpqphp_nvram.c	2014-07-09
12:00:15.000000000 +0200
@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_
 
 void compaq_nvram_init (void __iomem *rom_start)
 {
+
+#ifndef CONFIG_PAX_KERNEXEC
 	if (rom_start) {
 		compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
 	}
+#endif
+
 	dbg("int15 entry  = %p\n", compaq_int15_entry_point);
 
 	/* initialize our int15 lock */
diff -ruNp linux-3.13.11/drivers/pci/hotplug/pci_hotplug_core.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/pci/hotplug/pci_hotplug_core.c
--- linux-3.13.11/drivers/pci/hotplug/pci_hotplug_core.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/pci/hotplug/pci_hotplug_core.c	2014-07-09
12:00:15.000000000 +0200
@@ -441,8 +441,10 @@ int __pci_hp_register(struct hotplug_slo
 		return -EINVAL;
 	}
 
-	slot->ops->owner = owner;
-	slot->ops->mod_name = mod_name;
+	pax_open_kernel();
+	*(struct module **)&slot->ops->owner = owner;
+	*(const char **)&slot->ops->mod_name = mod_name;
+	pax_close_kernel();
 
 	mutex_lock(&pci_hp_mutex);
 	/*
diff -ruNp linux-3.13.11/drivers/pci/hotplug/pciehp_core.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/pci/hotplug/pciehp_core.c
--- linux-3.13.11/drivers/pci/hotplug/pciehp_core.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/pci/hotplug/pciehp_core.c	2014-07-09
12:00:15.000000000 +0200
@@ -92,7 +92,7 @@ static int init_slot(struct controller *
 	struct slot *slot = ctrl->slot;
 	struct hotplug_slot *hotplug = NULL;
 	struct hotplug_slot_info *info = NULL;
-	struct hotplug_slot_ops *ops = NULL;
+	hotplug_slot_ops_no_const *ops = NULL;
 	char name[SLOT_NAME_SIZE];
 	int retval = -ENOMEM;
 
diff -ruNp linux-3.13.11/drivers/pci/pci-sysfs.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/pci/pci-sysfs.c
--- linux-3.13.11/drivers/pci/pci-sysfs.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/pci/pci-sysfs.c	2014-07-09
12:00:15.000000000 +0200
@@ -1117,7 +1117,7 @@ static int pci_create_attr(struct pci_de
 {
 	/* allocate attribute structure, piggyback attribute name */
 	int name_len = write_combine ? 13 : 10;
-	struct bin_attribute *res_attr;
+	bin_attribute_no_const *res_attr;
 	int retval;
 
 	res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
@@ -1302,7 +1302,7 @@ static struct device_attribute reset_att
 static int pci_create_capabilities_sysfs(struct pci_dev *dev)
 {
 	int retval;
-	struct bin_attribute *attr;
+	bin_attribute_no_const *attr;
 
 	/* If the device has VPD, try to expose it in sysfs. */
 	if (dev->vpd) {
@@ -1349,7 +1349,7 @@ int __must_check pci_create_sysfs_dev_fi
 {
 	int retval;
 	int rom_size = 0;
-	struct bin_attribute *attr;
+	bin_attribute_no_const *attr;
 
 	if (!sysfs_initialized)
 		return -EACCES;
diff -ruNp linux-3.13.11/drivers/pci/pci.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/pci/pci.h
--- linux-3.13.11/drivers/pci/pci.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/pci/pci.h	2014-07-09 12:00:15.000000000
+0200
@@ -95,7 +95,7 @@ struct pci_vpd_ops {
 struct pci_vpd {
 	unsigned int len;
 	const struct pci_vpd_ops *ops;
-	struct bin_attribute *attr; /* descriptor for sysfs VPD entry */
+	bin_attribute_no_const *attr; /* descriptor for sysfs VPD entry */
 };
 
 int pci_vpd_pci22_init(struct pci_dev *dev);
diff -ruNp linux-3.13.11/drivers/pci/pcie/aspm.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/pci/pcie/aspm.c
--- linux-3.13.11/drivers/pci/pcie/aspm.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/pci/pcie/aspm.c	2014-07-09
12:00:15.000000000 +0200
@@ -27,9 +27,9 @@
 #define MODULE_PARAM_PREFIX "pcie_aspm."
 
 /* Note: those are not register definitions */
-#define ASPM_STATE_L0S_UP	(1)	/* Upstream direction L0s state */
-#define ASPM_STATE_L0S_DW	(2)	/* Downstream direction L0s state */
-#define ASPM_STATE_L1		(4)	/* L1 state */
+#define ASPM_STATE_L0S_UP	(1U)	/* Upstream direction L0s state */
+#define ASPM_STATE_L0S_DW	(2U)	/* Downstream direction L0s state */
+#define ASPM_STATE_L1		(4U)	/* L1 state */
 #define ASPM_STATE_L0S		(ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
 #define ASPM_STATE_ALL		(ASPM_STATE_L0S | ASPM_STATE_L1)
 
diff -ruNp linux-3.13.11/drivers/pci/probe.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/pci/probe.c
--- linux-3.13.11/drivers/pci/probe.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/pci/probe.c	2014-07-09 12:00:15.000000000
+0200
@@ -175,7 +175,7 @@ int __pci_read_base(struct pci_dev *dev,
 	struct pci_bus_region region, inverted_region;
 	bool bar_too_big = false, bar_disabled = false;
 
-	mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
+	mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
 
 	/* No printks while decoding is disabled! */
 	if (!dev->mmio_always_on) {
diff -ruNp linux-3.13.11/drivers/pci/proc.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/pci/proc.c
--- linux-3.13.11/drivers/pci/proc.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/pci/proc.c	2014-07-09 12:00:15.000000000
+0200
@@ -434,7 +434,16 @@ static const struct file_operations proc
 static int __init pci_proc_init(void)
 {
 	struct pci_dev *dev = NULL;
+
+#ifdef CONFIG_GRKERNSEC_PROC_ADD
+#ifdef CONFIG_GRKERNSEC_PROC_USER
+	proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
+	proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP,
NULL);
+#endif
+#else
 	proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
+#endif
 	proc_create("devices", 0, proc_bus_pci_dir,
 		    &proc_bus_pci_dev_operations);
 	proc_initialized = 1;
diff -ruNp linux-3.13.11/drivers/platform/chrome/chromeos_laptop.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/platform/chrome/chromeos_laptop.c
--- linux-3.13.11/drivers/platform/chrome/chromeos_laptop.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/platform/chrome/chromeos_laptop.c	2014-07-09
12:00:15.000000000 +0200
@@ -301,7 +301,7 @@ static int __init setup_tsl2563_als(cons
 	return 0;
 }
 
-static struct dmi_system_id __initdata chromeos_laptop_dmi_table[] = {
+static struct dmi_system_id __initconst chromeos_laptop_dmi_table[] = {
 	{
 		.ident = "Samsung Series 5 550 - Touchpad",
 		.matches = {
diff -ruNp linux-3.13.11/drivers/platform/x86/asus-wmi.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/platform/x86/asus-wmi.c
--- linux-3.13.11/drivers/platform/x86/asus-wmi.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/platform/x86/asus-wmi.c	2014-07-09
12:00:15.000000000 +0200
@@ -1618,6 +1618,10 @@ static int show_dsts(struct seq_file *m,
 	int err;
 	u32 retval = -1;
 
+#ifdef CONFIG_GRKERNSEC_KMEM
+	return -EPERM;
+#endif
+
 	err = asus_wmi_get_devstate(asus, asus->debug.dev_id, &retval);
 
 	if (err < 0)
@@ -1634,6 +1638,10 @@ static int show_devs(struct seq_file *m,
 	int err;
 	u32 retval = -1;
 
+#ifdef CONFIG_GRKERNSEC_KMEM
+	return -EPERM;
+#endif
+
 	err = asus_wmi_set_devstate(asus->debug.dev_id, asus->debug.ctrl_param,
 				    &retval);
 
@@ -1658,6 +1666,10 @@ static int show_call(struct seq_file *m,
 	union acpi_object *obj;
 	acpi_status status;
 
+#ifdef CONFIG_GRKERNSEC_KMEM
+	return -EPERM;
+#endif
+
 	status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID,
 				     1, asus->debug.method_id,
 				     &input, &output);
diff -ruNp linux-3.13.11/drivers/platform/x86/msi-laptop.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/platform/x86/msi-laptop.c
--- linux-3.13.11/drivers/platform/x86/msi-laptop.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/platform/x86/msi-laptop.c	2014-07-09
12:00:15.000000000 +0200
@@ -1000,12 +1000,14 @@ static int __init load_scm_model_init(st
 
 	if (!quirks->ec_read_only) {
 		/* allow userland write sysfs file  */
-		dev_attr_bluetooth.store = store_bluetooth;
-		dev_attr_wlan.store = store_wlan;
-		dev_attr_threeg.store = store_threeg;
-		dev_attr_bluetooth.attr.mode |= S_IWUSR;
-		dev_attr_wlan.attr.mode |= S_IWUSR;
-		dev_attr_threeg.attr.mode |= S_IWUSR;
+		pax_open_kernel();
+		*(void **)&dev_attr_bluetooth.store = store_bluetooth;
+		*(void **)&dev_attr_wlan.store = store_wlan;
+		*(void **)&dev_attr_threeg.store = store_threeg;
+		*(umode_t *)&dev_attr_bluetooth.attr.mode |= S_IWUSR;
+		*(umode_t *)&dev_attr_wlan.attr.mode |= S_IWUSR;
+		*(umode_t *)&dev_attr_threeg.attr.mode |= S_IWUSR;
+		pax_close_kernel();
 	}
 
 	/* disable hardware control by fn key */
diff -ruNp linux-3.13.11/drivers/platform/x86/msi-wmi.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/platform/x86/msi-wmi.c
--- linux-3.13.11/drivers/platform/x86/msi-wmi.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/platform/x86/msi-wmi.c	2014-07-09
12:00:15.000000000 +0200
@@ -183,7 +183,7 @@ static const struct backlight_ops msi_ba
 static void msi_wmi_notify(u32 value, void *context)
 {
 	struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
-	static struct key_entry *key;
+	struct key_entry *key;
 	union acpi_object *obj;
 	acpi_status status;
 
diff -ruNp linux-3.13.11/drivers/platform/x86/sony-laptop.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/platform/x86/sony-laptop.c
--- linux-3.13.11/drivers/platform/x86/sony-laptop.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/platform/x86/sony-laptop.c	2014-07-09
12:00:15.000000000 +0200
@@ -2453,7 +2453,7 @@ static void sony_nc_gfx_switch_cleanup(s
 }
 
 /* High speed charging function */
-static struct device_attribute *hsc_handle;
+static device_attribute_no_const *hsc_handle;
 
 static ssize_t sony_nc_highspeed_charging_store(struct device *dev,
 		struct device_attribute *attr,
diff -ruNp linux-3.13.11/drivers/platform/x86/thinkpad_acpi.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/platform/x86/thinkpad_acpi.c
--- linux-3.13.11/drivers/platform/x86/thinkpad_acpi.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/platform/x86/thinkpad_acpi.c	2014-07-09
12:00:15.000000000 +0200
@@ -2100,7 +2100,7 @@ static int hotkey_mask_get(void)
 	return 0;
 }
 
-void static hotkey_mask_warn_incomplete_mask(void)
+static void hotkey_mask_warn_incomplete_mask(void)
 {
 	/* log only what the user can fix... */
 	const u32 wantedmask = hotkey_driver_mask &
@@ -2327,11 +2327,6 @@ static void hotkey_read_nvram(struct tp_
 	}
 }
 
-static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
-					   struct tp_nvram_state *newn,
-					   const u32 event_mask)
-{
-
 #define TPACPI_COMPARE_KEY(__scancode, __member) \
 	do { \
 		if ((event_mask & (1 << __scancode)) && \
@@ -2345,36 +2340,42 @@ static void hotkey_compare_and_issue_eve
 			tpacpi_hotkey_send_key(__scancode); \
 	} while (0)
 
-	void issue_volchange(const unsigned int oldvol,
-			     const unsigned int newvol)
-	{
-		unsigned int i = oldvol;
-
-		while (i > newvol) {
-			TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
-			i--;
-		}
-		while (i < newvol) {
-			TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
-			i++;
-		}
+static void issue_volchange(const unsigned int oldvol,
+			    const unsigned int newvol,
+			    const u32 event_mask)
+{
+	unsigned int i = oldvol;
+
+	while (i > newvol) {
+		TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
+		i--;
 	}
+	while (i < newvol) {
+		TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
+		i++;
+	}
+}
 
-	void issue_brightnesschange(const unsigned int oldbrt,
-				    const unsigned int newbrt)
-	{
-		unsigned int i = oldbrt;
+static void issue_brightnesschange(const unsigned int oldbrt,
+				   const unsigned int newbrt,
+				   const u32 event_mask)
+{
+	unsigned int i = oldbrt;
 
-		while (i > newbrt) {
-			TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
-			i--;
-		}
-		while (i < newbrt) {
-			TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
-			i++;
-		}
+	while (i > newbrt) {
+		TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
+		i--;
+	}
+	while (i < newbrt) {
+		TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
+		i++;
 	}
+}
 
+static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
+					   struct tp_nvram_state *newn,
+					   const u32 event_mask)
+{
 	TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
 	TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
 	TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
@@ -2408,7 +2409,7 @@ static void hotkey_compare_and_issue_eve
 		    oldn->volume_level != newn->volume_level) {
 			/* recently muted, or repeated mute keypress, or
 			 * multiple presses ending in mute */
-			issue_volchange(oldn->volume_level, newn->volume_level);
+			issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
 			TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
 		}
 	} else {
@@ -2418,7 +2419,7 @@ static void hotkey_compare_and_issue_eve
 			TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
 		}
 		if (oldn->volume_level != newn->volume_level) {
-			issue_volchange(oldn->volume_level, newn->volume_level);
+			issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
 		} else if (oldn->volume_toggle != newn->volume_toggle) {
 			/* repeated vol up/down keypress at end of scale ? */
 			if (newn->volume_level == 0)
@@ -2431,7 +2432,8 @@ static void hotkey_compare_and_issue_eve
 	/* handle brightness */
 	if (oldn->brightness_level != newn->brightness_level) {
 		issue_brightnesschange(oldn->brightness_level,
-				       newn->brightness_level);
+				       newn->brightness_level,
+				       event_mask);
 	} else if (oldn->brightness_toggle != newn->brightness_toggle) {
 		/* repeated key presses that didn't change state */
 		if (newn->brightness_level == 0)
@@ -2440,10 +2442,10 @@ static void hotkey_compare_and_issue_eve
 				&& !tp_features.bright_unkfw)
 			TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
 	}
+}
 
 #undef TPACPI_COMPARE_KEY
 #undef TPACPI_MAY_SEND_KEY
-}
 
 /*
  * Polling driver
diff -ruNp linux-3.13.11/drivers/pnp/pnpbios/bioscalls.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/pnp/pnpbios/bioscalls.c
--- linux-3.13.11/drivers/pnp/pnpbios/bioscalls.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/pnp/pnpbios/bioscalls.c	2014-07-09
12:00:15.000000000 +0200
@@ -58,7 +58,7 @@ do { \
 	set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
 } while(0)
 
-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
 			(unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
 
 /*
@@ -95,7 +95,10 @@ static inline u16 call_pnp_bios(u16 func
 
 	cpu = get_cpu();
 	save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
+
+	pax_open_kernel();
 	get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
+	pax_close_kernel();
 
 	/* On some boxes IRQ's during PnP BIOS calls are deadly.  */
 	spin_lock_irqsave(&pnp_bios_lock, flags);
@@ -133,7 +136,10 @@ static inline u16 call_pnp_bios(u16 func
 			     :"memory");
 	spin_unlock_irqrestore(&pnp_bios_lock, flags);
 
+	pax_open_kernel();
 	get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
+	pax_close_kernel();
+
 	put_cpu();
 
 	/* If we get here and this is set then the PnP BIOS faulted on us. */
@@ -467,7 +473,7 @@ int pnp_bios_read_escd(char *data, u32 n
 	return status;
 }
 
-void pnpbios_calls_init(union pnp_bios_install_struct *header)
+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
 {
 	int i;
 
@@ -475,6 +481,8 @@ void pnpbios_calls_init(union pnp_bios_i
 	pnp_bios_callpoint.offset = header->fields.pm16offset;
 	pnp_bios_callpoint.segment = PNP_CS16;
 
+	pax_open_kernel();
+
 	for_each_possible_cpu(i) {
 		struct desc_struct *gdt = get_cpu_gdt_table(i);
 		if (!gdt)
@@ -486,4 +494,6 @@ void pnpbios_calls_init(union pnp_bios_i
 		set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
 			 (unsigned long)__va(header->fields.pm16dseg));
 	}
+
+	pax_close_kernel();
 }
diff -ruNp linux-3.13.11/drivers/pnp/resource.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/pnp/resource.c
--- linux-3.13.11/drivers/pnp/resource.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/pnp/resource.c	2014-07-09
12:00:15.000000000 +0200
@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, s
 		return 1;
 
 	/* check if the resource is valid */
-	if (*irq < 0 || *irq > 15)
+	if (*irq > 15)
 		return 0;
 
 	/* check if the resource is reserved */
@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, s
 		return 1;
 
 	/* check if the resource is valid */
-	if (*dma < 0 || *dma == 4 || *dma > 7)
+	if (*dma == 4 || *dma > 7)
 		return 0;
 
 	/* check if the resource is reserved */
diff -ruNp linux-3.13.11/drivers/power/pda_power.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/power/pda_power.c
--- linux-3.13.11/drivers/power/pda_power.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/power/pda_power.c	2014-07-09
12:00:15.000000000 +0200
@@ -37,7 +37,11 @@ static int polling;
 
 #if IS_ENABLED(CONFIG_USB_PHY)
 static struct usb_phy *transceiver;
-static struct notifier_block otg_nb;
+static int otg_handle_notification(struct notifier_block *nb,
+		unsigned long event, void *unused);
+static struct notifier_block otg_nb = {
+	.notifier_call = otg_handle_notification
+};
 #endif
 
 static struct regulator *ac_draw;
@@ -369,7 +373,6 @@ static int pda_power_probe(struct platfo
 
 #if IS_ENABLED(CONFIG_USB_PHY)
 	if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier) {
-		otg_nb.notifier_call = otg_handle_notification;
 		ret = usb_register_notifier(transceiver, &otg_nb);
 		if (ret) {
 			dev_err(dev, "failure to register otg notifier\n");
diff -ruNp linux-3.13.11/drivers/power/power_supply.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/power/power_supply.h
--- linux-3.13.11/drivers/power/power_supply.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/power/power_supply.h	2014-07-09
12:00:15.000000000 +0200
@@ -16,12 +16,12 @@ struct power_supply;
 
 #ifdef CONFIG_SYSFS
 
-extern void power_supply_init_attrs(struct device_type *dev_type);
+extern void power_supply_init_attrs(void);
 extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env);
 
 #else
 
-static inline void power_supply_init_attrs(struct device_type *dev_type) {}
+static inline void power_supply_init_attrs(void) {}
 #define power_supply_uevent NULL
 
 #endif /* CONFIG_SYSFS */
diff -ruNp linux-3.13.11/drivers/power/power_supply_core.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/power/power_supply_core.c
--- linux-3.13.11/drivers/power/power_supply_core.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/power/power_supply_core.c	2014-07-09
12:00:15.000000000 +0200
@@ -24,7 +24,10 @@
 struct class *power_supply_class;
 EXPORT_SYMBOL_GPL(power_supply_class);
 
-static struct device_type power_supply_dev_type;
+extern const struct attribute_group *power_supply_attr_groups[];
+static struct device_type power_supply_dev_type = {
+	.groups = power_supply_attr_groups,
+};
 
 static bool __power_supply_is_supplied_by(struct power_supply *supplier,
 					 struct power_supply *supply)
@@ -584,7 +587,7 @@ static int __init power_supply_class_ini
 		return PTR_ERR(power_supply_class);
 
 	power_supply_class->dev_uevent = power_supply_uevent;
-	power_supply_init_attrs(&power_supply_dev_type);
+	power_supply_init_attrs();
 
 	return 0;
 }
diff -ruNp linux-3.13.11/drivers/power/power_supply_sysfs.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/power/power_supply_sysfs.c
--- linux-3.13.11/drivers/power/power_supply_sysfs.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/power/power_supply_sysfs.c	2014-07-09
12:00:15.000000000 +0200
@@ -230,17 +230,15 @@ static struct attribute_group power_supp
 	.is_visible = power_supply_attr_is_visible,
 };
 
-static const struct attribute_group *power_supply_attr_groups[] = {
+const struct attribute_group *power_supply_attr_groups[] = {
 	&power_supply_attr_group,
 	NULL,
 };
 
-void power_supply_init_attrs(struct device_type *dev_type)
+void power_supply_init_attrs(void)
 {
 	int i;
 
-	dev_type->groups = power_supply_attr_groups;
-
 	for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++)
 		__power_supply_attrs[i] = &power_supply_attrs[i].attr;
 }
diff -ruNp linux-3.13.11/drivers/powercap/powercap_sys.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/powercap/powercap_sys.c
--- linux-3.13.11/drivers/powercap/powercap_sys.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/powercap/powercap_sys.c	2014-07-09
12:00:15.000000000 +0200
@@ -154,8 +154,77 @@ struct powercap_constraint_attr {
 	struct device_attribute name_attr;
 };
 
+static ssize_t show_constraint_name(struct device *dev,
+				struct device_attribute *dev_attr,
+				char *buf);
+
 static struct powercap_constraint_attr
-				constraint_attrs[MAX_CONSTRAINTS_PER_ZONE];
+				constraint_attrs[MAX_CONSTRAINTS_PER_ZONE] = {
+	[0 ... MAX_CONSTRAINTS_PER_ZONE - 1] = {
+		.power_limit_attr = {
+			.attr = {
+				.name	= NULL,
+				.mode	= S_IWUSR | S_IRUGO
+			},
+			.show	= show_constraint_power_limit_uw,
+			.store	= store_constraint_power_limit_uw
+		},
+
+		.time_window_attr = {
+			.attr = {
+				.name	= NULL,
+				.mode	= S_IWUSR | S_IRUGO
+			},
+			.show	= show_constraint_time_window_us,
+			.store	= store_constraint_time_window_us
+		},
+
+		.max_power_attr = {
+			.attr = {
+				.name	= NULL,
+				.mode	= S_IRUGO
+			},
+			.show	= show_constraint_max_power_uw,
+			.store	= NULL
+		},
+
+		.min_power_attr = {
+			.attr = {
+				.name	= NULL,
+				.mode	= S_IRUGO
+			},
+			.show	= show_constraint_min_power_uw,
+			.store	= NULL
+		},
+
+		.max_time_window_attr = {
+			.attr = {
+				.name	= NULL,
+				.mode	= S_IRUGO
+			},
+			.show	= show_constraint_max_time_window_us,
+			.store	= NULL
+		},
+
+		.min_time_window_attr = {
+			.attr = {
+				.name	= NULL,
+				.mode	= S_IRUGO
+			},
+			.show	= show_constraint_min_time_window_us,
+			.store	= NULL
+		},
+
+		.name_attr = {
+			.attr = {
+				.name	= NULL,
+				.mode	= S_IRUGO
+			},
+			.show	= show_constraint_name,
+			.store	= NULL
+		}
+	}
+};
 
 /* A list of powercap control_types */
 static LIST_HEAD(powercap_cntrl_list);
@@ -193,23 +262,16 @@ static ssize_t show_constraint_name(stru
 }
 
 static int create_constraint_attribute(int id, const char *name,
-				int mode,
-				struct device_attribute *dev_attr,
-				ssize_t (*show)(struct device *,
-					struct device_attribute *, char *),
-				ssize_t (*store)(struct device *,
-					struct device_attribute *,
-				const char *, size_t)
-				)
+				struct device_attribute *dev_attr)
 {
+	name = kasprintf(GFP_KERNEL, "constraint_%d_%s", id, name);
 
-	dev_attr->attr.name = kasprintf(GFP_KERNEL, "constraint_%d_%s",
-								id, name);
-	if (!dev_attr->attr.name)
+	if (!name)
 		return -ENOMEM;
-	dev_attr->attr.mode = mode;
-	dev_attr->show = show;
-	dev_attr->store = store;
+
+	pax_open_kernel();
+	*(const char **)&dev_attr->attr.name = name;
+	pax_close_kernel();
 
 	return 0;
 }
@@ -236,49 +298,31 @@ static int seed_constraint_attributes(vo
 
 	for (i = 0; i < MAX_CONSTRAINTS_PER_ZONE; ++i) {
 		ret = create_constraint_attribute(i, "power_limit_uw",
-					S_IWUSR | S_IRUGO,
-					&constraint_attrs[i].power_limit_attr,
-					show_constraint_power_limit_uw,
-					store_constraint_power_limit_uw);
+					&constraint_attrs[i].power_limit_attr);
 		if (ret)
 			goto err_alloc;
 		ret = create_constraint_attribute(i, "time_window_us",
-					S_IWUSR | S_IRUGO,
-					&constraint_attrs[i].time_window_attr,
-					show_constraint_time_window_us,
-					store_constraint_time_window_us);
+					&constraint_attrs[i].time_window_attr);
 		if (ret)
 			goto err_alloc;
-		ret = create_constraint_attribute(i, "name", S_IRUGO,
-				&constraint_attrs[i].name_attr,
-				show_constraint_name,
-				NULL);
+		ret = create_constraint_attribute(i, "name",
+				&constraint_attrs[i].name_attr);
 		if (ret)
 			goto err_alloc;
-		ret = create_constraint_attribute(i, "max_power_uw", S_IRUGO,
-				&constraint_attrs[i].max_power_attr,
-				show_constraint_max_power_uw,
-				NULL);
+		ret = create_constraint_attribute(i, "max_power_uw",
+				&constraint_attrs[i].max_power_attr);
 		if (ret)
 			goto err_alloc;
-		ret = create_constraint_attribute(i, "min_power_uw", S_IRUGO,
-				&constraint_attrs[i].min_power_attr,
-				show_constraint_min_power_uw,
-				NULL);
+		ret = create_constraint_attribute(i, "min_power_uw",
+				&constraint_attrs[i].min_power_attr);
 		if (ret)
 			goto err_alloc;
 		ret = create_constraint_attribute(i, "max_time_window_us",
-				S_IRUGO,
-				&constraint_attrs[i].max_time_window_attr,
-				show_constraint_max_time_window_us,
-				NULL);
+				&constraint_attrs[i].max_time_window_attr);
 		if (ret)
 			goto err_alloc;
 		ret = create_constraint_attribute(i, "min_time_window_us",
-				S_IRUGO,
-				&constraint_attrs[i].min_time_window_attr,
-				show_constraint_min_time_window_us,
-				NULL);
+				&constraint_attrs[i].min_time_window_attr);
 		if (ret)
 			goto err_alloc;
 
@@ -378,10 +422,12 @@ static void create_power_zone_common_att
 		power_zone->zone_dev_attrs[count++] =
 					&dev_attr_max_energy_range_uj.attr;
 	if (power_zone->ops->get_energy_uj) {
+		pax_open_kernel();
 		if (power_zone->ops->reset_energy_uj)
-			dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
+			*(umode_t *)&dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
 		else
-			dev_attr_energy_uj.attr.mode = S_IRUGO;
+			*(umode_t *)&dev_attr_energy_uj.attr.mode = S_IRUGO;
+		pax_close_kernel();
 		power_zone->zone_dev_attrs[count++] =
 					&dev_attr_energy_uj.attr;
 	}
diff -ruNp linux-3.13.11/drivers/regulator/core.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/regulator/core.c
--- linux-3.13.11/drivers/regulator/core.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/regulator/core.c	2014-07-09
12:00:15.000000000 +0200
@@ -3366,7 +3366,7 @@ regulator_register(const struct regulato
 {
 	const struct regulation_constraints *constraints = NULL;
 	const struct regulator_init_data *init_data;
-	static atomic_t regulator_no = ATOMIC_INIT(0);
+	static atomic_unchecked_t regulator_no = ATOMIC_INIT(0);
 	struct regulator_dev *rdev;
 	struct device *dev;
 	int ret, i;
@@ -3436,7 +3436,7 @@ regulator_register(const struct regulato
 	rdev->dev.of_node = config->of_node;
 	rdev->dev.parent = dev;
 	dev_set_name(&rdev->dev, "regulator.%d",
-		     atomic_inc_return(&regulator_no) - 1);
+		     atomic_inc_return_unchecked(&regulator_no) - 1);
 	ret = device_register(&rdev->dev);
 	if (ret != 0) {
 		put_device(&rdev->dev);
diff -ruNp linux-3.13.11/drivers/regulator/max8660.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/regulator/max8660.c
--- linux-3.13.11/drivers/regulator/max8660.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/regulator/max8660.c	2014-07-09
12:00:15.000000000 +0200
@@ -420,8 +420,10 @@ static int max8660_probe(struct i2c_clie
 		max8660->shadow_regs[MAX8660_OVER1] = 5;
 	} else {
 		/* Otherwise devices can be toggled via software */
-		max8660_dcdc_ops.enable = max8660_dcdc_enable;
-		max8660_dcdc_ops.disable = max8660_dcdc_disable;
+		pax_open_kernel();
+		*(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
+		*(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
+		pax_close_kernel();
 	}
 
 	/*
diff -ruNp linux-3.13.11/drivers/regulator/max8973-regulator.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/regulator/max8973-regulator.c
--- linux-3.13.11/drivers/regulator/max8973-regulator.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/regulator/max8973-regulator.c	2014-07-09
12:00:15.000000000 +0200
@@ -406,9 +406,11 @@ static int max8973_probe(struct i2c_clie
 	if (!pdata || !pdata->enable_ext_control) {
 		max->desc.enable_reg = MAX8973_VOUT;
 		max->desc.enable_mask = MAX8973_VOUT_ENABLE;
-		max->ops.enable = regulator_enable_regmap;
-		max->ops.disable = regulator_disable_regmap;
-		max->ops.is_enabled = regulator_is_enabled_regmap;
+		pax_open_kernel();
+		*(void **)&max->ops.enable = regulator_enable_regmap;
+		*(void **)&max->ops.disable = regulator_disable_regmap;
+		*(void **)&max->ops.is_enabled = regulator_is_enabled_regmap;
+		pax_close_kernel();
 	}
 
 	if (pdata) {
diff -ruNp linux-3.13.11/drivers/regulator/mc13892-regulator.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/regulator/mc13892-regulator.c
--- linux-3.13.11/drivers/regulator/mc13892-regulator.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/regulator/mc13892-regulator.c	2014-07-09
12:00:15.000000000 +0200
@@ -582,10 +582,12 @@ static int mc13892_regulator_probe(struc
 	}
 	mc13xxx_unlock(mc13892);
 
-	mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
+	pax_open_kernel();
+	*(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
 		= mc13892_vcam_set_mode;
-	mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
+	*(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
 		= mc13892_vcam_get_mode;
+	pax_close_kernel();
 
 	mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
 					ARRAY_SIZE(mc13892_regulators));
diff -ruNp linux-3.13.11/drivers/rtc/rtc-cmos.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/rtc/rtc-cmos.c
--- linux-3.13.11/drivers/rtc/rtc-cmos.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/rtc/rtc-cmos.c	2014-07-09
12:00:15.000000000 +0200
@@ -779,7 +779,9 @@ cmos_do_probe(struct device *dev, struct
 	hpet_rtc_timer_init();
 
 	/* export at least the first block of NVRAM */
-	nvram.size = address_space - NVRAM_OFFSET;
+	pax_open_kernel();
+	*(size_t *)&nvram.size = address_space - NVRAM_OFFSET;
+	pax_close_kernel();
 	retval = sysfs_create_bin_file(&dev->kobj, &nvram);
 	if (retval < 0) {
 		dev_dbg(dev, "can't create nvram file? %d\n", retval);
diff -ruNp linux-3.13.11/drivers/rtc/rtc-dev.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/rtc/rtc-dev.c
--- linux-3.13.11/drivers/rtc/rtc-dev.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/rtc/rtc-dev.c	2014-07-09
12:00:15.000000000 +0200
@@ -16,6 +16,7 @@
 #include <linux/module.h>
 #include <linux/rtc.h>
 #include <linux/sched.h>
+#include <linux/grsecurity.h>
 #include "rtc-core.h"
 
 static dev_t rtc_devt;
@@ -347,6 +348,8 @@ static long rtc_dev_ioctl(struct file *f
 		if (copy_from_user(&tm, uarg, sizeof(tm)))
 			return -EFAULT;
 
+		gr_log_timechange();
+
 		return rtc_set_time(rtc, &tm);
 
 	case RTC_PIE_ON:
diff -ruNp linux-3.13.11/drivers/rtc/rtc-ds1307.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/rtc/rtc-ds1307.c
--- linux-3.13.11/drivers/rtc/rtc-ds1307.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/rtc/rtc-ds1307.c	2014-07-09
12:00:15.000000000 +0200
@@ -107,7 +107,7 @@ struct ds1307 {
 	u8			offset; /* register's offset */
 	u8			regs[11];
 	u16			nvram_offset;
-	struct bin_attribute	*nvram;
+	bin_attribute_no_const	*nvram;
 	enum ds_type		type;
 	unsigned long		flags;
 #define HAS_NVRAM	0		/* bit 0 == sysfs file active */
diff -ruNp linux-3.13.11/drivers/rtc/rtc-m48t59.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/rtc/rtc-m48t59.c
--- linux-3.13.11/drivers/rtc/rtc-m48t59.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/rtc/rtc-m48t59.c	2014-07-09
12:00:15.000000000 +0200
@@ -483,7 +483,9 @@ static int m48t59_rtc_probe(struct platf
 	if (IS_ERR(m48t59->rtc))
 		return PTR_ERR(m48t59->rtc);
 
-	m48t59_nvram_attr.size = pdata->offset;
+	pax_open_kernel();
+	*(size_t *)&m48t59_nvram_attr.size = pdata->offset;
+	pax_close_kernel();
 
 	ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
 	if (ret)
diff -ruNp linux-3.13.11/drivers/scsi/aic7xxx/aic79xx_pci.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/aic7xxx/aic79xx_pci.c
--- linux-3.13.11/drivers/scsi/aic7xxx/aic79xx_pci.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/aic7xxx/aic79xx_pci.c	2014-07-09
12:00:15.000000000 +0200
@@ -827,7 +827,7 @@ ahd_pci_intr(struct ahd_softc *ahd)
 		for (bit = 0; bit < 8; bit++) {
 
 			if ((pci_status[i] & (0x1 << bit)) != 0) {
-				static const char *s;
+				const char *s;
 
 				s = pci_status_strings[bit];
 				if (i == 7/*TARG*/ && bit == 3)
@@ -887,23 +887,15 @@ ahd_pci_split_intr(struct ahd_softc *ahd
 
 		for (bit = 0; bit < 8; bit++) {
 
-			if ((split_status[i] & (0x1 << bit)) != 0) {
-				static const char *s;
-
-				s = split_status_strings[bit];
-				printk(s, ahd_name(ahd),
+			if ((split_status[i] & (0x1 << bit)) != 0)
+				printk(split_status_strings[bit], ahd_name(ahd),
 				       split_status_source[i]);
-			}
 
 			if (i > 1)
 				continue;
 
-			if ((sg_split_status[i] & (0x1 << bit)) != 0) {
-				static const char *s;
-
-				s = split_status_strings[bit];
-				printk(s, ahd_name(ahd), "SG");
-			}
+			if ((sg_split_status[i] & (0x1 << bit)) != 0)
+				printk(split_status_strings[bit], ahd_name(ahd), "SG");
 		}
 	}
 	/*
diff -ruNp linux-3.13.11/drivers/scsi/bfa/bfa_fcpim.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/bfa/bfa_fcpim.h
--- linux-3.13.11/drivers/scsi/bfa/bfa_fcpim.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/bfa/bfa_fcpim.h	2014-07-09
12:00:15.000000000 +0200
@@ -36,7 +36,7 @@ struct bfa_iotag_s {
 
 struct bfa_itn_s {
 	bfa_isr_func_t isr;
-};
+} __no_const;
 
 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
 		void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
diff -ruNp linux-3.13.11/drivers/scsi/bfa/bfa_fcs.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/bfa/bfa_fcs.c
--- linux-3.13.11/drivers/scsi/bfa/bfa_fcs.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/bfa/bfa_fcs.c	2014-07-09
12:00:15.000000000 +0200
@@ -38,10 +38,21 @@ struct bfa_fcs_mod_s {
 #define BFA_FCS_MODULE(_mod) { _mod ## _modinit, _mod ## _modexit }
 
 static struct bfa_fcs_mod_s fcs_modules[] = {
-	{ bfa_fcs_port_attach, NULL, NULL },
-	{ bfa_fcs_uf_attach, NULL, NULL },
-	{ bfa_fcs_fabric_attach, bfa_fcs_fabric_modinit,
-	  bfa_fcs_fabric_modexit },
+	{
+		.attach = bfa_fcs_port_attach,
+		.modinit = NULL,
+		.modexit = NULL
+	},
+	{
+		.attach = bfa_fcs_uf_attach,
+		.modinit = NULL,
+		.modexit = NULL
+	},
+	{
+		.attach = bfa_fcs_fabric_attach,
+		.modinit = bfa_fcs_fabric_modinit,
+		.modexit = bfa_fcs_fabric_modexit
+	},
 };
 
 /*
diff -ruNp linux-3.13.11/drivers/scsi/bfa/bfa_fcs_lport.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/bfa/bfa_fcs_lport.c
--- linux-3.13.11/drivers/scsi/bfa/bfa_fcs_lport.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/bfa/bfa_fcs_lport.c	2014-07-09
12:00:15.000000000 +0200
@@ -89,15 +89,26 @@ static struct {
 	void		(*offline) (struct bfa_fcs_lport_s *port);
 } __port_action[] = {
 	{
-	bfa_fcs_lport_unknown_init, bfa_fcs_lport_unknown_online,
-			bfa_fcs_lport_unknown_offline}, {
-	bfa_fcs_lport_fab_init, bfa_fcs_lport_fab_online,
-			bfa_fcs_lport_fab_offline}, {
-	bfa_fcs_lport_n2n_init, bfa_fcs_lport_n2n_online,
-			bfa_fcs_lport_n2n_offline}, {
-	bfa_fcs_lport_loop_init, bfa_fcs_lport_loop_online,
-			bfa_fcs_lport_loop_offline},
-	};
+		.init = bfa_fcs_lport_unknown_init,
+		.online = bfa_fcs_lport_unknown_online,
+		.offline = bfa_fcs_lport_unknown_offline
+	},
+	{
+		.init = bfa_fcs_lport_fab_init,
+		.online = bfa_fcs_lport_fab_online,
+		.offline = bfa_fcs_lport_fab_offline
+	},
+	{
+		.init = bfa_fcs_lport_n2n_init,
+		.online = bfa_fcs_lport_n2n_online,
+		.offline = bfa_fcs_lport_n2n_offline
+	},
+	{
+		.init = bfa_fcs_lport_loop_init,
+		.online = bfa_fcs_lport_loop_online,
+		.offline = bfa_fcs_lport_loop_offline
+	},
+};
 
 /*
  *  fcs_port_sm FCS logical port state machine
diff -ruNp linux-3.13.11/drivers/scsi/bfa/bfa_ioc.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/bfa/bfa_ioc.h
--- linux-3.13.11/drivers/scsi/bfa/bfa_ioc.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/bfa/bfa_ioc.h	2014-07-09
12:00:15.000000000 +0200
@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
 	bfa_ioc_disable_cbfn_t	disable_cbfn;
 	bfa_ioc_hbfail_cbfn_t	hbfail_cbfn;
 	bfa_ioc_reset_cbfn_t	reset_cbfn;
-};
+} __no_const;
 
 /*
  * IOC event notification mechanism.
@@ -352,7 +352,7 @@ struct bfa_ioc_hwif_s {
 	void		(*ioc_set_alt_fwstate)	(struct bfa_ioc_s *ioc,
 					enum bfi_ioc_state fwstate);
 	enum bfi_ioc_state	(*ioc_get_alt_fwstate)	(struct bfa_ioc_s *ioc);
-};
+} __no_const;
 
 /*
  * Queue element to wait for room in request queue. FIFO order is
diff -ruNp linux-3.13.11/drivers/scsi/bfa/bfa_modules.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/bfa/bfa_modules.h
--- linux-3.13.11/drivers/scsi/bfa/bfa_modules.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/bfa/bfa_modules.h	2014-07-09
12:00:15.000000000 +0200
@@ -78,12 +78,12 @@ enum {
 									\
 	extern struct bfa_module_s hal_mod_ ## __mod;			\
 	struct bfa_module_s hal_mod_ ## __mod = {			\
-		bfa_ ## __mod ## _meminfo,				\
-		bfa_ ## __mod ## _attach,				\
-		bfa_ ## __mod ## _detach,				\
-		bfa_ ## __mod ## _start,				\
-		bfa_ ## __mod ## _stop,					\
-		bfa_ ## __mod ## _iocdisable,				\
+		.meminfo = bfa_ ## __mod ## _meminfo,			\
+		.attach = bfa_ ## __mod ## _attach,			\
+		.detach = bfa_ ## __mod ## _detach,			\
+		.start = bfa_ ## __mod ## _start,			\
+		.stop = bfa_ ## __mod ## _stop,				\
+		.iocdisable = bfa_ ## __mod ## _iocdisable,		\
 	}
 
 #define BFA_CACHELINE_SZ	(256)
diff -ruNp linux-3.13.11/drivers/scsi/fcoe/fcoe_sysfs.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/fcoe/fcoe_sysfs.c
--- linux-3.13.11/drivers/scsi/fcoe/fcoe_sysfs.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/fcoe/fcoe_sysfs.c	2014-07-09
12:00:15.000000000 +0200
@@ -33,8 +33,8 @@
  */
 #include "libfcoe.h"
 
-static atomic_t ctlr_num;
-static atomic_t fcf_num;
+static atomic_unchecked_t ctlr_num;
+static atomic_unchecked_t fcf_num;
 
 /*
  * fcoe_fcf_dev_loss_tmo: the default number of seconds that fcoe sysfs
@@ -685,7 +685,7 @@ struct fcoe_ctlr_device *fcoe_ctlr_devic
 	if (!ctlr)
 		goto out;
 
-	ctlr->id = atomic_inc_return(&ctlr_num) - 1;
+	ctlr->id = atomic_inc_return_unchecked(&ctlr_num) - 1;
 	ctlr->f = f;
 	ctlr->mode = FIP_CONN_TYPE_FABRIC;
 	INIT_LIST_HEAD(&ctlr->fcfs);
@@ -902,7 +902,7 @@ struct fcoe_fcf_device *fcoe_fcf_device_
 	fcf->dev.parent = &ctlr->dev;
 	fcf->dev.bus = &fcoe_bus_type;
 	fcf->dev.type = &fcoe_fcf_device_type;
-	fcf->id = atomic_inc_return(&fcf_num) - 1;
+	fcf->id = atomic_inc_return_unchecked(&fcf_num) - 1;
 	fcf->state = FCOE_FCF_STATE_UNKNOWN;
 
 	fcf->dev_loss_tmo = ctlr->fcf_dev_loss_tmo;
@@ -938,8 +938,8 @@ int __init fcoe_sysfs_setup(void)
 {
 	int error;
 
-	atomic_set(&ctlr_num, 0);
-	atomic_set(&fcf_num, 0);
+	atomic_set_unchecked(&ctlr_num, 0);
+	atomic_set_unchecked(&fcf_num, 0);
 
 	error = bus_register(&fcoe_bus_type);
 	if (error)
diff -ruNp linux-3.13.11/drivers/scsi/hosts.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/hosts.c
--- linux-3.13.11/drivers/scsi/hosts.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/hosts.c	2014-07-09 12:00:15.000000000
+0200
@@ -42,7 +42,7 @@
 #include "scsi_logging.h"
 
 
-static atomic_t scsi_host_next_hn = ATOMIC_INIT(0);	/* host_no for next new host */
+static atomic_unchecked_t scsi_host_next_hn = ATOMIC_INIT(0);	/* host_no for next new
host */
 
 
 static void scsi_host_cls_release(struct device *dev)
@@ -367,7 +367,7 @@ struct Scsi_Host *scsi_host_alloc(struct
 	 * subtract one because we increment first then return, but we need to
 	 * know what the next host number was before increment
 	 */
-	shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
+	shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
 	shost->dma_channel = 0xff;
 
 	/* These three are default values which can be overridden */
diff -ruNp linux-3.13.11/drivers/scsi/hpsa.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/hpsa.c
--- linux-3.13.11/drivers/scsi/hpsa.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/hpsa.c	2014-07-09 12:00:15.000000000
+0200
@@ -578,7 +578,7 @@ static inline u32 next_command(struct ct
 	unsigned long flags;
 
 	if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
-		return h->access.command_completed(h, q);
+		return h->access->command_completed(h, q);
 
 	if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
 		a = rq->head[rq->current_entry];
@@ -3444,7 +3444,7 @@ static void start_io(struct ctlr_info *h
 	while (!list_empty(&h->reqQ)) {
 		c = list_entry(h->reqQ.next, struct CommandList, list);
 		/* can't do anything if fifo is full */
-		if ((h->access.fifo_full(h))) {
+		if ((h->access->fifo_full(h))) {
 			dev_warn(&h->pdev->dev, "fifo full\n");
 			break;
 		}
@@ -3466,7 +3466,7 @@ static void start_io(struct ctlr_info *h
 
 		/* Tell the controller execute command */
 		spin_unlock_irqrestore(&h->lock, flags);
-		h->access.submit_command(h, c);
+		h->access->submit_command(h, c);
 		spin_lock_irqsave(&h->lock, flags);
 	}
 	spin_unlock_irqrestore(&h->lock, flags);
@@ -3474,17 +3474,17 @@ static void start_io(struct ctlr_info *h
 
 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
 {
-	return h->access.command_completed(h, q);
+	return h->access->command_completed(h, q);
 }
 
 static inline bool interrupt_pending(struct ctlr_info *h)
 {
-	return h->access.intr_pending(h);
+	return h->access->intr_pending(h);
 }
 
 static inline long interrupt_not_for_us(struct ctlr_info *h)
 {
-	return (h->access.intr_pending(h) == 0) ||
+	return (h->access->intr_pending(h) == 0) ||
 		(h->interrupts_enabled == 0);
 }
 
@@ -4386,7 +4386,7 @@ static int hpsa_pci_init(struct ctlr_inf
 	if (prod_index < 0)
 		return -ENODEV;
 	h->product_name = products[prod_index].product_name;
-	h->access = *(products[prod_index].access);
+	h->access = products[prod_index].access;
 
 	pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
 			       PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
@@ -4668,7 +4668,7 @@ static void controller_lockup_detected(s
 
 	assert_spin_locked(&lockup_detector_lock);
 	remove_ctlr_from_lockup_detector_list(h);
-	h->access.set_intr_mask(h, HPSA_INTR_OFF);
+	h->access->set_intr_mask(h, HPSA_INTR_OFF);
 	spin_lock_irqsave(&h->lock, flags);
 	h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
 	spin_unlock_irqrestore(&h->lock, flags);
@@ -4845,7 +4845,7 @@ reinit_after_soft_reset:
 	}
 
 	/* make sure the board interrupts are off */
-	h->access.set_intr_mask(h, HPSA_INTR_OFF);
+	h->access->set_intr_mask(h, HPSA_INTR_OFF);
 
 	if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
 		goto clean2;
@@ -4879,7 +4879,7 @@ reinit_after_soft_reset:
 		 * fake ones to scoop up any residual completions.
 		 */
 		spin_lock_irqsave(&h->lock, flags);
-		h->access.set_intr_mask(h, HPSA_INTR_OFF);
+		h->access->set_intr_mask(h, HPSA_INTR_OFF);
 		spin_unlock_irqrestore(&h->lock, flags);
 		free_irqs(h);
 		rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
@@ -4898,9 +4898,9 @@ reinit_after_soft_reset:
 		dev_info(&h->pdev->dev, "Board READY.\n");
 		dev_info(&h->pdev->dev,
 			"Waiting for stale completions to drain.\n");
-		h->access.set_intr_mask(h, HPSA_INTR_ON);
+		h->access->set_intr_mask(h, HPSA_INTR_ON);
 		msleep(10000);
-		h->access.set_intr_mask(h, HPSA_INTR_OFF);
+		h->access->set_intr_mask(h, HPSA_INTR_OFF);
 
 		rc = controller_reset_failed(h->cfgtable);
 		if (rc)
@@ -4921,7 +4921,7 @@ reinit_after_soft_reset:
 	}
 
 	/* Turn the interrupts on so we can service requests */
-	h->access.set_intr_mask(h, HPSA_INTR_ON);
+	h->access->set_intr_mask(h, HPSA_INTR_ON);
 
 	hpsa_hba_inquiry(h);
 	hpsa_register_scsi(h);	/* hook ourselves into SCSI subsystem */
@@ -4976,7 +4976,7 @@ static void hpsa_shutdown(struct pci_dev
 	 * To write all data in the battery backed cache to disks
 	 */
 	hpsa_flush_cache(h);
-	h->access.set_intr_mask(h, HPSA_INTR_OFF);
+	h->access->set_intr_mask(h, HPSA_INTR_OFF);
 	hpsa_free_irqs_and_disable_msix(h);
 }
 
@@ -5143,7 +5143,7 @@ static void hpsa_enter_performant_mode(s
 		return;
 	}
 	/* Change the access methods to the performant access methods */
-	h->access = SA5_performant_access;
+	h->access = &SA5_performant_access;
 	h->transMethod = CFGTBL_Trans_Performant;
 }
 
diff -ruNp linux-3.13.11/drivers/scsi/hpsa.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/hpsa.h
--- linux-3.13.11/drivers/scsi/hpsa.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/hpsa.h	2014-07-09 12:00:15.000000000
+0200
@@ -79,7 +79,7 @@ struct ctlr_info {
 	unsigned int msix_vector;
 	unsigned int msi_vector;
 	int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
-	struct access_method access;
+	struct access_method *access;
 
 	/* queue and queue Info */
 	struct list_head reqQ;
@@ -381,19 +381,19 @@ static bool SA5_performant_intr_pending(
 }
 
 static struct access_method SA5_access = {
-	SA5_submit_command,
-	SA5_intr_mask,
-	SA5_fifo_full,
-	SA5_intr_pending,
-	SA5_completed,
+	.submit_command = SA5_submit_command,
+	.set_intr_mask = SA5_intr_mask,
+	.fifo_full = SA5_fifo_full,
+	.intr_pending = SA5_intr_pending,
+	.command_completed = SA5_completed,
 };
 
 static struct access_method SA5_performant_access = {
-	SA5_submit_command,
-	SA5_performant_intr_mask,
-	SA5_fifo_full,
-	SA5_performant_intr_pending,
-	SA5_performant_completed,
+	.submit_command = SA5_submit_command,
+	.set_intr_mask = SA5_performant_intr_mask,
+	.fifo_full = SA5_fifo_full,
+	.intr_pending = SA5_performant_intr_pending,
+	.command_completed = SA5_performant_completed,
 };
 
 struct board_type {
diff -ruNp linux-3.13.11/drivers/scsi/libfc/fc_exch.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/libfc/fc_exch.c
--- linux-3.13.11/drivers/scsi/libfc/fc_exch.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/libfc/fc_exch.c	2014-07-09
12:00:15.000000000 +0200
@@ -101,12 +101,12 @@ struct fc_exch_mgr {
 	u16		pool_max_index;
 
 	struct {
-		atomic_t no_free_exch;
-		atomic_t no_free_exch_xid;
-		atomic_t xid_not_found;
-		atomic_t xid_busy;
-		atomic_t seq_not_found;
-		atomic_t non_bls_resp;
+		atomic_unchecked_t no_free_exch;
+		atomic_unchecked_t no_free_exch_xid;
+		atomic_unchecked_t xid_not_found;
+		atomic_unchecked_t xid_busy;
+		atomic_unchecked_t seq_not_found;
+		atomic_unchecked_t non_bls_resp;
 	} stats;
 };
 
@@ -811,7 +811,7 @@ static struct fc_exch *fc_exch_em_alloc(
 	/* allocate memory for exchange */
 	ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
 	if (!ep) {
-		atomic_inc(&mp->stats.no_free_exch);
+		atomic_inc_unchecked(&mp->stats.no_free_exch);
 		goto out;
 	}
 	memset(ep, 0, sizeof(*ep));
@@ -874,7 +874,7 @@ out:
 	return ep;
 err:
 	spin_unlock_bh(&pool->lock);
-	atomic_inc(&mp->stats.no_free_exch_xid);
+	atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
 	mempool_free(ep, mp->ep_pool);
 	return NULL;
 }
@@ -1023,7 +1023,7 @@ static enum fc_pf_rjt_reason fc_seq_look
 		xid = ntohs(fh->fh_ox_id);	/* we originated exch */
 		ep = fc_exch_find(mp, xid);
 		if (!ep) {
-			atomic_inc(&mp->stats.xid_not_found);
+			atomic_inc_unchecked(&mp->stats.xid_not_found);
 			reject = FC_RJT_OX_ID;
 			goto out;
 		}
@@ -1053,7 +1053,7 @@ static enum fc_pf_rjt_reason fc_seq_look
 		ep = fc_exch_find(mp, xid);
 		if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
 			if (ep) {
-				atomic_inc(&mp->stats.xid_busy);
+				atomic_inc_unchecked(&mp->stats.xid_busy);
 				reject = FC_RJT_RX_ID;
 				goto rel;
 			}
@@ -1064,7 +1064,7 @@ static enum fc_pf_rjt_reason fc_seq_look
 			}
 			xid = ep->xid;	/* get our XID */
 		} else if (!ep) {
-			atomic_inc(&mp->stats.xid_not_found);
+			atomic_inc_unchecked(&mp->stats.xid_not_found);
 			reject = FC_RJT_RX_ID;	/* XID not found */
 			goto out;
 		}
@@ -1082,7 +1082,7 @@ static enum fc_pf_rjt_reason fc_seq_look
 	} else {
 		sp = &ep->seq;
 		if (sp->id != fh->fh_seq_id) {
-			atomic_inc(&mp->stats.seq_not_found);
+			atomic_inc_unchecked(&mp->stats.seq_not_found);
 			if (f_ctl & FC_FC_END_SEQ) {
 				/*
 				 * Update sequence_id based on incoming last
@@ -1533,22 +1533,22 @@ static void fc_exch_recv_seq_resp(struct
 
 	ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
 	if (!ep) {
-		atomic_inc(&mp->stats.xid_not_found);
+		atomic_inc_unchecked(&mp->stats.xid_not_found);
 		goto out;
 	}
 	if (ep->esb_stat & ESB_ST_COMPLETE) {
-		atomic_inc(&mp->stats.xid_not_found);
+		atomic_inc_unchecked(&mp->stats.xid_not_found);
 		goto rel;
 	}
 	if (ep->rxid == FC_XID_UNKNOWN)
 		ep->rxid = ntohs(fh->fh_rx_id);
 	if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
-		atomic_inc(&mp->stats.xid_not_found);
+		atomic_inc_unchecked(&mp->stats.xid_not_found);
 		goto rel;
 	}
 	if (ep->did != ntoh24(fh->fh_s_id) &&
 	    ep->did != FC_FID_FLOGI) {
-		atomic_inc(&mp->stats.xid_not_found);
+		atomic_inc_unchecked(&mp->stats.xid_not_found);
 		goto rel;
 	}
 	sof = fr_sof(fp);
@@ -1557,7 +1557,7 @@ static void fc_exch_recv_seq_resp(struct
 		sp->ssb_stat |= SSB_ST_RESP;
 		sp->id = fh->fh_seq_id;
 	} else if (sp->id != fh->fh_seq_id) {
-		atomic_inc(&mp->stats.seq_not_found);
+		atomic_inc_unchecked(&mp->stats.seq_not_found);
 		goto rel;
 	}
 
@@ -1619,9 +1619,9 @@ static void fc_exch_recv_resp(struct fc_
 	sp = fc_seq_lookup_orig(mp, fp);	/* doesn't hold sequence */
 
 	if (!sp)
-		atomic_inc(&mp->stats.xid_not_found);
+		atomic_inc_unchecked(&mp->stats.xid_not_found);
 	else
-		atomic_inc(&mp->stats.non_bls_resp);
+		atomic_inc_unchecked(&mp->stats.non_bls_resp);
 
 	fc_frame_free(fp);
 }
@@ -2261,13 +2261,13 @@ void fc_exch_update_stats(struct fc_lpor
 
 	list_for_each_entry(ema, &lport->ema_list, ema_list) {
 		mp = ema->mp;
-		st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
+		st->fc_no_free_exch += atomic_read_unchecked(&mp->stats.no_free_exch);
 		st->fc_no_free_exch_xid +=
-				atomic_read(&mp->stats.no_free_exch_xid);
-		st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
-		st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
-		st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
-		st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
+				atomic_read_unchecked(&mp->stats.no_free_exch_xid);
+		st->fc_xid_not_found += atomic_read_unchecked(&mp->stats.xid_not_found);
+		st->fc_xid_busy += atomic_read_unchecked(&mp->stats.xid_busy);
+		st->fc_seq_not_found += atomic_read_unchecked(&mp->stats.seq_not_found);
+		st->fc_non_bls_resp += atomic_read_unchecked(&mp->stats.non_bls_resp);
 	}
 }
 EXPORT_SYMBOL(fc_exch_update_stats);
diff -ruNp linux-3.13.11/drivers/scsi/libsas/sas_ata.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/libsas/sas_ata.c
--- linux-3.13.11/drivers/scsi/libsas/sas_ata.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/libsas/sas_ata.c	2014-07-09
12:00:15.000000000 +0200
@@ -554,7 +554,7 @@ static struct ata_port_operations sas_sa
 	.postreset		= ata_std_postreset,
 	.error_handler		= ata_std_error_handler,
 	.post_internal_cmd	= sas_ata_post_internal,
-	.qc_defer               = ata_std_qc_defer,
+	.qc_defer		= ata_std_qc_defer,
 	.qc_prep		= ata_noop_qc_prep,
 	.qc_issue		= sas_ata_qc_issue,
 	.qc_fill_rtf		= sas_ata_qc_fill_rtf,
diff -ruNp linux-3.13.11/drivers/scsi/lpfc/lpfc.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/lpfc/lpfc.h
--- linux-3.13.11/drivers/scsi/lpfc/lpfc.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/lpfc/lpfc.h	2014-07-09
12:00:15.000000000 +0200
@@ -432,7 +432,7 @@ struct lpfc_vport {
 	struct dentry *debug_nodelist;
 	struct dentry *vport_debugfs_root;
 	struct lpfc_debugfs_trc *disc_trc;
-	atomic_t disc_trc_cnt;
+	atomic_unchecked_t disc_trc_cnt;
 #endif
 	uint8_t stat_data_enabled;
 	uint8_t stat_data_blocked;
@@ -865,8 +865,8 @@ struct lpfc_hba {
 	struct timer_list fabric_block_timer;
 	unsigned long bit_flags;
 #define	FABRIC_COMANDS_BLOCKED	0
-	atomic_t num_rsrc_err;
-	atomic_t num_cmd_success;
+	atomic_unchecked_t num_rsrc_err;
+	atomic_unchecked_t num_cmd_success;
 	unsigned long last_rsrc_error_time;
 	unsigned long last_ramp_down_time;
 	unsigned long last_ramp_up_time;
@@ -902,7 +902,7 @@ struct lpfc_hba {
 
 	struct dentry *debug_slow_ring_trc;
 	struct lpfc_debugfs_trc *slow_ring_trc;
-	atomic_t slow_ring_trc_cnt;
+	atomic_unchecked_t slow_ring_trc_cnt;
 	/* iDiag debugfs sub-directory */
 	struct dentry *idiag_root;
 	struct dentry *idiag_pci_cfg;
diff -ruNp linux-3.13.11/drivers/scsi/lpfc/lpfc_debugfs.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/lpfc/lpfc_debugfs.c
--- linux-3.13.11/drivers/scsi/lpfc/lpfc_debugfs.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/lpfc/lpfc_debugfs.c	2014-07-09
12:00:15.000000000 +0200
@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_
 
 #include <linux/debugfs.h>
 
-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
 static unsigned long lpfc_debugfs_start_time = 0L;
 
 /* iDiag */
@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_v
 	lpfc_debugfs_enable = 0;
 
 	len = 0;
-	index = (atomic_read(&vport->disc_trc_cnt) + 1) &
+	index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
 		(lpfc_debugfs_max_disc_trc - 1);
 	for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
 		dtp = vport->disc_trc + i;
@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct l
 	lpfc_debugfs_enable = 0;
 
 	len = 0;
-	index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
+	index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
 		(lpfc_debugfs_max_slow_ring_trc - 1);
 	for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
 		dtp = phba->slow_ring_trc + i;
@@ -646,14 +646,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport
 		!vport || !vport->disc_trc)
 		return;
 
-	index = atomic_inc_return(&vport->disc_trc_cnt) &
+	index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
 		(lpfc_debugfs_max_disc_trc - 1);
 	dtp = vport->disc_trc + index;
 	dtp->fmt = fmt;
 	dtp->data1 = data1;
 	dtp->data2 = data2;
 	dtp->data3 = data3;
-	dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
+	dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
 	dtp->jif = jiffies;
 #endif
 	return;
@@ -684,14 +684,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_h
 		!phba || !phba->slow_ring_trc)
 		return;
 
-	index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
+	index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
 		(lpfc_debugfs_max_slow_ring_trc - 1);
 	dtp = phba->slow_ring_trc + index;
 	dtp->fmt = fmt;
 	dtp->data1 = data1;
 	dtp->data2 = data2;
 	dtp->data3 = data3;
-	dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
+	dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
 	dtp->jif = jiffies;
 #endif
 	return;
@@ -4168,7 +4168,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
 						 "slow_ring buffer\n");
 				goto debug_failed;
 			}
-			atomic_set(&phba->slow_ring_trc_cnt, 0);
+			atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
 			memset(phba->slow_ring_trc, 0,
 				(sizeof(struct lpfc_debugfs_trc) *
 				lpfc_debugfs_max_slow_ring_trc));
@@ -4214,7 +4214,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
 				 "buffer\n");
 		goto debug_failed;
 	}
-	atomic_set(&vport->disc_trc_cnt, 0);
+	atomic_set_unchecked(&vport->disc_trc_cnt, 0);
 
 	snprintf(name, sizeof(name), "discovery_trace");
 	vport->debug_disc_trc =
diff -ruNp linux-3.13.11/drivers/scsi/lpfc/lpfc_init.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/lpfc/lpfc_init.c
--- linux-3.13.11/drivers/scsi/lpfc/lpfc_init.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/lpfc/lpfc_init.c	2014-07-09
12:00:15.000000000 +0200
@@ -10949,8 +10949,10 @@ lpfc_init(void)
 			"misc_register returned with status %d", error);
 
 	if (lpfc_enable_npiv) {
-		lpfc_transport_functions.vport_create = lpfc_vport_create;
-		lpfc_transport_functions.vport_delete = lpfc_vport_delete;
+		pax_open_kernel();
+		*(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
+		*(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
+		pax_close_kernel();
 	}
 	lpfc_transport_template =
 				fc_attach_transport(&lpfc_transport_functions);
diff -ruNp linux-3.13.11/drivers/scsi/lpfc/lpfc_scsi.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/lpfc/lpfc_scsi.c
--- linux-3.13.11/drivers/scsi/lpfc/lpfc_scsi.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/lpfc/lpfc_scsi.c	2014-07-09
12:00:15.000000000 +0200
@@ -353,7 +353,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hb
 	uint32_t evt_posted;
 
 	spin_lock_irqsave(&phba->hbalock, flags);
-	atomic_inc(&phba->num_rsrc_err);
+	atomic_inc_unchecked(&phba->num_rsrc_err);
 	phba->last_rsrc_error_time = jiffies;
 
 	if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
@@ -394,7 +394,7 @@ lpfc_rampup_queue_depth(struct lpfc_vpor
 	unsigned long flags;
 	struct lpfc_hba *phba = vport->phba;
 	uint32_t evt_posted;
-	atomic_inc(&phba->num_cmd_success);
+	atomic_inc_unchecked(&phba->num_cmd_success);
 
 	if (vport->cfg_lun_queue_depth <= queue_depth)
 		return;
@@ -438,8 +438,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
 	unsigned long num_rsrc_err, num_cmd_success;
 	int i;
 
-	num_rsrc_err = atomic_read(&phba->num_rsrc_err);
-	num_cmd_success = atomic_read(&phba->num_cmd_success);
+	num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
+	num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
 
 	/*
 	 * The error and success command counters are global per
@@ -467,8 +467,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
 			}
 		}
 	lpfc_destroy_vport_work_array(phba, vports);
-	atomic_set(&phba->num_rsrc_err, 0);
-	atomic_set(&phba->num_cmd_success, 0);
+	atomic_set_unchecked(&phba->num_rsrc_err, 0);
+	atomic_set_unchecked(&phba->num_cmd_success, 0);
 }
 
 /**
@@ -502,8 +502,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_h
 			}
 		}
 	lpfc_destroy_vport_work_array(phba, vports);
-	atomic_set(&phba->num_rsrc_err, 0);
-	atomic_set(&phba->num_cmd_success, 0);
+	atomic_set_unchecked(&phba->num_rsrc_err, 0);
+	atomic_set_unchecked(&phba->num_cmd_success, 0);
 }
 
 /**
diff -ruNp linux-3.13.11/drivers/scsi/mpt2sas/mpt2sas_scsih.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/mpt2sas/mpt2sas_scsih.c
--- linux-3.13.11/drivers/scsi/mpt2sas/mpt2sas_scsih.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/mpt2sas/mpt2sas_scsih.c	2014-07-09
12:00:15.000000000 +0200
@@ -1557,7 +1557,7 @@ _scsih_get_resync(struct device *dev)
 {
 	struct scsi_device *sdev = to_scsi_device(dev);
 	struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
-	static struct _raid_device *raid_device;
+	struct _raid_device *raid_device;
 	unsigned long flags;
 	Mpi2RaidVolPage0_t vol_pg0;
 	Mpi2ConfigReply_t mpi_reply;
@@ -1609,7 +1609,7 @@ _scsih_get_state(struct device *dev)
 {
 	struct scsi_device *sdev = to_scsi_device(dev);
 	struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
-	static struct _raid_device *raid_device;
+	struct _raid_device *raid_device;
 	unsigned long flags;
 	Mpi2RaidVolPage0_t vol_pg0;
 	Mpi2ConfigReply_t mpi_reply;
@@ -6637,7 +6637,7 @@ _scsih_sas_ir_operation_status_event(str
     struct fw_event_work *fw_event)
 {
 	Mpi2EventDataIrOperationStatus_t *event_data = fw_event->event_data;
-	static struct _raid_device *raid_device;
+	struct _raid_device *raid_device;
 	unsigned long flags;
 	u16 handle;
 
@@ -7108,7 +7108,7 @@ _scsih_scan_for_devices_after_reset(stru
 	u64 sas_address;
 	struct _sas_device *sas_device;
 	struct _sas_node *expander_device;
-	static struct _raid_device *raid_device;
+	struct _raid_device *raid_device;
 	u8 retry_count;
 	unsigned long flags;
 
diff -ruNp linux-3.13.11/drivers/scsi/pmcraid.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/pmcraid.c
--- linux-3.13.11/drivers/scsi/pmcraid.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/pmcraid.c	2014-07-09
12:00:15.000000000 +0200
@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct sc
 		res->scsi_dev = scsi_dev;
 		scsi_dev->hostdata = res;
 		res->change_detected = 0;
-		atomic_set(&res->read_failures, 0);
-		atomic_set(&res->write_failures, 0);
+		atomic_set_unchecked(&res->read_failures, 0);
+		atomic_set_unchecked(&res->write_failures, 0);
 		rc = 0;
 	}
 	spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
@@ -2687,9 +2687,9 @@ static int pmcraid_error_handler(struct
 
 	/* If this was a SCSI read/write command keep count of errors */
 	if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
-		atomic_inc(&res->read_failures);
+		atomic_inc_unchecked(&res->read_failures);
 	else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
-		atomic_inc(&res->write_failures);
+		atomic_inc_unchecked(&res->write_failures);
 
 	if (!RES_IS_GSCSI(res->cfg_entry) &&
 		masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
@@ -3545,7 +3545,7 @@ static int pmcraid_queuecommand_lck(
 	 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
 	 * hrrq_id assigned here in queuecommand
 	 */
-	ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
+	ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
 			  pinstance->num_hrrq;
 	cmd->cmd_done = pmcraid_io_done;
 
@@ -3857,7 +3857,7 @@ static long pmcraid_ioctl_passthrough(
 	 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
 	 * hrrq_id assigned here in queuecommand
 	 */
-	ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
+	ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
 			  pinstance->num_hrrq;
 
 	if (request_size) {
@@ -4495,7 +4495,7 @@ static void pmcraid_worker_function(stru
 
 	pinstance = container_of(workp, struct pmcraid_instance, worker_q);
 	/* add resources only after host is added into system */
-	if (!atomic_read(&pinstance->expose_resources))
+	if (!atomic_read_unchecked(&pinstance->expose_resources))
 		return;
 
 	fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
@@ -5322,8 +5322,8 @@ static int pmcraid_init_instance(struct
 	init_waitqueue_head(&pinstance->reset_wait_q);
 
 	atomic_set(&pinstance->outstanding_cmds, 0);
-	atomic_set(&pinstance->last_message_id, 0);
-	atomic_set(&pinstance->expose_resources, 0);
+	atomic_set_unchecked(&pinstance->last_message_id, 0);
+	atomic_set_unchecked(&pinstance->expose_resources, 0);
 
 	INIT_LIST_HEAD(&pinstance->free_res_q);
 	INIT_LIST_HEAD(&pinstance->used_res_q);
@@ -6036,7 +6036,7 @@ static int pmcraid_probe(struct pci_dev
 	/* Schedule worker thread to handle CCN and take care of adding and
 	 * removing devices to OS
 	 */
-	atomic_set(&pinstance->expose_resources, 1);
+	atomic_set_unchecked(&pinstance->expose_resources, 1);
 	schedule_work(&pinstance->worker_q);
 	return rc;
 
diff -ruNp linux-3.13.11/drivers/scsi/pmcraid.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/pmcraid.h
--- linux-3.13.11/drivers/scsi/pmcraid.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/pmcraid.h	2014-07-09
12:00:15.000000000 +0200
@@ -748,7 +748,7 @@ struct pmcraid_instance {
 	struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
 
 	/* Message id as filled in last fired IOARCB, used to identify HRRQ */
-	atomic_t last_message_id;
+	atomic_unchecked_t last_message_id;
 
 	/* configuration table */
 	struct pmcraid_config_table *cfg_table;
@@ -777,7 +777,7 @@ struct pmcraid_instance {
 	atomic_t outstanding_cmds;
 
 	/* should add/delete resources to mid-layer now ?*/
-	atomic_t expose_resources;
+	atomic_unchecked_t expose_resources;
 
 
 
@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
 		struct pmcraid_config_table_entry_ext cfg_entry_ext;
 	};
 	struct scsi_device *scsi_dev;	/* Link scsi_device structure */
-	atomic_t read_failures;		/* count of failed READ commands */
-	atomic_t write_failures;	/* count of failed WRITE commands */
+	atomic_unchecked_t read_failures;	/* count of failed READ commands */
+	atomic_unchecked_t write_failures;	/* count of failed WRITE commands */
 
 	/* To indicate add/delete/modify during CCN */
 	u8 change_detected;
diff -ruNp linux-3.13.11/drivers/scsi/qla2xxx/qla_attr.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/qla2xxx/qla_attr.c
--- linux-3.13.11/drivers/scsi/qla2xxx/qla_attr.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/qla2xxx/qla_attr.c	2014-07-09
12:00:15.000000000 +0200
@@ -2040,7 +2040,7 @@ qla24xx_vport_disable(struct fc_vport *f
 	return 0;
 }
 
-struct fc_function_template qla2xxx_transport_functions = {
+fc_function_template_no_const qla2xxx_transport_functions = {
 
 	.show_host_node_name = 1,
 	.show_host_port_name = 1,
@@ -2088,7 +2088,7 @@ struct fc_function_template qla2xxx_tran
 	.bsg_timeout = qla24xx_bsg_timeout,
 };
 
-struct fc_function_template qla2xxx_transport_vport_functions = {
+fc_function_template_no_const qla2xxx_transport_vport_functions = {
 
 	.show_host_node_name = 1,
 	.show_host_port_name = 1,
diff -ruNp linux-3.13.11/drivers/scsi/qla2xxx/qla_gbl.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/qla2xxx/qla_gbl.h
--- linux-3.13.11/drivers/scsi/qla2xxx/qla_gbl.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/qla2xxx/qla_gbl.h	2014-07-09
12:00:15.000000000 +0200
@@ -538,8 +538,8 @@ extern void qla2x00_get_sym_node_name(sc
 struct device_attribute;
 extern struct device_attribute *qla2x00_host_attrs[];
 struct fc_function_template;
-extern struct fc_function_template qla2xxx_transport_functions;
-extern struct fc_function_template qla2xxx_transport_vport_functions;
+extern fc_function_template_no_const qla2xxx_transport_functions;
+extern fc_function_template_no_const qla2xxx_transport_vport_functions;
 extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
 extern void qla2x00_init_host_attr(scsi_qla_host_t *);
diff -ruNp linux-3.13.11/drivers/scsi/qla2xxx/qla_os.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/qla2xxx/qla_os.c
--- linux-3.13.11/drivers/scsi/qla2xxx/qla_os.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/qla2xxx/qla_os.c	2014-07-09
12:00:15.000000000 +0200
@@ -1568,8 +1568,10 @@ qla2x00_config_dma_addressing(struct qla
 		    !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
 			/* Ok, a 64bit DMA mask is applicable. */
 			ha->flags.enable_64bit_addressing = 1;
-			ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
-			ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
+			pax_open_kernel();
+			*(void **)&ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
+			*(void **)&ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
+			pax_close_kernel();
 			return;
 		}
 	}
diff -ruNp linux-3.13.11/drivers/scsi/qla4xxx/ql4_def.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/qla4xxx/ql4_def.h
--- linux-3.13.11/drivers/scsi/qla4xxx/ql4_def.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/qla4xxx/ql4_def.h	2014-07-09
12:00:15.000000000 +0200
@@ -296,7 +296,7 @@ struct ddb_entry {
 					   * (4000 only) */
 	atomic_t relogin_timer;		  /* Max Time to wait for
 					   * relogin to complete */
-	atomic_t relogin_retry_count;	  /* Num of times relogin has been
+	atomic_unchecked_t relogin_retry_count;	  /* Num of times relogin has been
 					   * retried */
 	uint32_t default_time2wait;	  /* Default Min time between
 					   * relogins (+aens) */
diff -ruNp linux-3.13.11/drivers/scsi/qla4xxx/ql4_os.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/qla4xxx/ql4_os.c
--- linux-3.13.11/drivers/scsi/qla4xxx/ql4_os.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/qla4xxx/ql4_os.c	2014-07-09
12:00:15.000000000 +0200
@@ -3311,12 +3311,12 @@ static void qla4xxx_check_relogin_flash_
 		 */
 		if (!iscsi_is_session_online(cls_sess)) {
 			/* Reset retry relogin timer */
-			atomic_inc(&ddb_entry->relogin_retry_count);
+			atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
 			DEBUG2(ql4_printk(KERN_INFO, ha,
 				"%s: index[%d] relogin timed out-retrying"
 				" relogin (%d), retry (%d)\n", __func__,
 				ddb_entry->fw_ddb_index,
-				atomic_read(&ddb_entry->relogin_retry_count),
+				atomic_read_unchecked(&ddb_entry->relogin_retry_count),
 				ddb_entry->default_time2wait + 4));
 			set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
 			atomic_set(&ddb_entry->retry_relogin_timer,
@@ -5458,7 +5458,7 @@ static void qla4xxx_setup_flash_ddb_entr
 
 	atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
 	atomic_set(&ddb_entry->relogin_timer, 0);
-	atomic_set(&ddb_entry->relogin_retry_count, 0);
+	atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
 	def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
 	ddb_entry->default_relogin_timeout =
 		(def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
diff -ruNp linux-3.13.11/drivers/scsi/scsi.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/scsi.c
--- linux-3.13.11/drivers/scsi/scsi.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/scsi.c	2014-07-09 12:00:15.000000000
+0200
@@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
 	struct Scsi_Host *host = cmd->device->host;
 	int rtn = 0;
 
-	atomic_inc(&cmd->device->iorequest_cnt);
+	atomic_inc_unchecked(&cmd->device->iorequest_cnt);
 
 	/* check if the device is still usable */
 	if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
diff -ruNp linux-3.13.11/drivers/scsi/scsi_lib.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/scsi_lib.c
--- linux-3.13.11/drivers/scsi/scsi_lib.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/scsi_lib.c	2014-07-09
12:00:15.000000000 +0200
@@ -1474,7 +1474,7 @@ static void scsi_kill_request(struct req
 	shost = sdev->host;
 	scsi_init_cmd_errh(cmd);
 	cmd->result = DID_NO_CONNECT << 16;
-	atomic_inc(&cmd->device->iorequest_cnt);
+	atomic_inc_unchecked(&cmd->device->iorequest_cnt);
 
 	/*
 	 * SCSI request completion path will do scsi_device_unbusy(),
@@ -1500,9 +1500,9 @@ static void scsi_softirq_done(struct req
 
 	INIT_LIST_HEAD(&cmd->eh_entry);
 
-	atomic_inc(&cmd->device->iodone_cnt);
+	atomic_inc_unchecked(&cmd->device->iodone_cnt);
 	if (cmd->result)
-		atomic_inc(&cmd->device->ioerr_cnt);
+		atomic_inc_unchecked(&cmd->device->ioerr_cnt);
 
 	disposition = scsi_decide_disposition(cmd);
 	if (disposition != SUCCESS &&
diff -ruNp linux-3.13.11/drivers/scsi/scsi_sysfs.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/scsi_sysfs.c
--- linux-3.13.11/drivers/scsi/scsi_sysfs.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/scsi_sysfs.c	2014-07-09
12:00:15.000000000 +0200
@@ -725,7 +725,7 @@ show_iostat_##field(struct device *dev,
 		    char *buf)						\
 {									\
 	struct scsi_device *sdev = to_scsi_device(dev);			\
-	unsigned long long count = atomic_read(&sdev->field);		\
+	unsigned long long count = atomic_read_unchecked(&sdev->field);	\
 	return snprintf(buf, 20, "0x%llx\n", count);			\
 }									\
 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
diff -ruNp linux-3.13.11/drivers/scsi/scsi_tgt_lib.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/scsi_tgt_lib.c
--- linux-3.13.11/drivers/scsi/scsi_tgt_lib.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/scsi_tgt_lib.c	2014-07-09
12:00:15.000000000 +0200
@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct sc
 	int err;
 
 	dprintk("%lx %u\n", uaddr, len);
-	err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
+	err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
 	if (err) {
 		/*
 		 * TODO: need to fixup sg_tablesize, max_segment_size,
diff -ruNp linux-3.13.11/drivers/scsi/scsi_transport_fc.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/scsi_transport_fc.c
--- linux-3.13.11/drivers/scsi/scsi_transport_fc.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/scsi_transport_fc.c	2014-07-09
12:00:15.000000000 +0200
@@ -497,7 +497,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_
  * Netlink Infrastructure
  */
 
-static atomic_t fc_event_seq;
+static atomic_unchecked_t fc_event_seq;
 
 /**
  * fc_get_event_number - Obtain the next sequential FC event number
@@ -510,7 +510,7 @@ static atomic_t fc_event_seq;
 u32
 fc_get_event_number(void)
 {
-	return atomic_add_return(1, &fc_event_seq);
+	return atomic_add_return_unchecked(1, &fc_event_seq);
 }
 EXPORT_SYMBOL(fc_get_event_number);
 
@@ -654,7 +654,7 @@ static __init int fc_transport_init(void
 {
 	int error;
 
-	atomic_set(&fc_event_seq, 0);
+	atomic_set_unchecked(&fc_event_seq, 0);
 
 	error = transport_class_register(&fc_host_class);
 	if (error)
@@ -844,7 +844,7 @@ static int fc_str_to_dev_loss(const char
 	char *cp;
 
 	*val = simple_strtoul(buf, &cp, 0);
-	if ((*cp && (*cp != '\n')) || (*val < 0))
+	if (*cp && (*cp != '\n'))
 		return -EINVAL;
 	/*
 	 * Check for overflow; dev_loss_tmo is u32
diff -ruNp linux-3.13.11/drivers/scsi/scsi_transport_iscsi.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/scsi_transport_iscsi.c
--- linux-3.13.11/drivers/scsi/scsi_transport_iscsi.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/scsi_transport_iscsi.c	2014-07-09
12:00:15.000000000 +0200
@@ -79,7 +79,7 @@ struct iscsi_internal {
 	struct transport_container session_cont;
 };
 
-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session
*/
 static struct workqueue_struct *iscsi_eh_timer_workq;
 
 static DEFINE_IDA(iscsi_sess_ida);
@@ -1737,7 +1737,7 @@ int iscsi_add_session(struct iscsi_cls_s
 	int err;
 
 	ihost = shost->shost_data;
-	session->sid = atomic_add_return(1, &iscsi_session_nr);
+	session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
 
 	if (target_id == ISCSI_MAX_TARGET) {
 		id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
@@ -4103,7 +4103,7 @@ static __init int iscsi_transport_init(v
 	printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
 		ISCSI_TRANSPORT_VERSION);
 
-	atomic_set(&iscsi_session_nr, 0);
+	atomic_set_unchecked(&iscsi_session_nr, 0);
 
 	err = class_register(&iscsi_transport_class);
 	if (err)
diff -ruNp linux-3.13.11/drivers/scsi/scsi_transport_srp.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/scsi_transport_srp.c
--- linux-3.13.11/drivers/scsi/scsi_transport_srp.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/scsi_transport_srp.c	2014-07-09
12:00:15.000000000 +0200
@@ -36,7 +36,7 @@
 #include "scsi_transport_srp_internal.h"
 
 struct srp_host_attrs {
-	atomic_t next_port_id;
+	atomic_unchecked_t next_port_id;
 };
 #define to_srp_host_attrs(host)	((struct srp_host_attrs *)(host)->shost_data)
 
@@ -94,7 +94,7 @@ static int srp_host_setup(struct transpo
 	struct Scsi_Host *shost = dev_to_shost(dev);
 	struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
 
-	atomic_set(&srp_host->next_port_id, 0);
+	atomic_set_unchecked(&srp_host->next_port_id, 0);
 	return 0;
 }
 
@@ -730,7 +730,7 @@ struct srp_rport *srp_rport_add(struct S
 			  rport_fast_io_fail_timedout);
 	INIT_DELAYED_WORK(&rport->dev_loss_work, rport_dev_loss_timedout);
 
-	id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
+	id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
 	dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
 
 	transport_setup_device(&rport->dev);
diff -ruNp linux-3.13.11/drivers/scsi/sd.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/sd.c
--- linux-3.13.11/drivers/scsi/sd.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/sd.c	2014-07-09 12:00:15.000000000
+0200
@@ -2964,7 +2964,7 @@ static int sd_probe(struct device *dev)
 	sdkp->disk = gd;
 	sdkp->index = index;
 	atomic_set(&sdkp->openers, 0);
-	atomic_set(&sdkp->device->ioerr_cnt, 0);
+	atomic_set_unchecked(&sdkp->device->ioerr_cnt, 0);
 
 	if (!sdp->request_queue->rq_timeout) {
 		if (sdp->type != TYPE_MOD)
diff -ruNp linux-3.13.11/drivers/scsi/sg.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/sg.c
--- linux-3.13.11/drivers/scsi/sg.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/scsi/sg.c	2014-07-09 12:00:15.000000000
+0200
@@ -1102,7 +1102,7 @@ sg_ioctl(struct file *filp, unsigned int
 				       sdp->disk->disk_name,
 				       MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
 				       NULL,
-				       (char *)arg);
+				       (char __user *)arg);
 	case BLKTRACESTART:
 		return blk_trace_startstop(sdp->device->request_queue, 1);
 	case BLKTRACESTOP:
diff -ruNp linux-3.13.11/drivers/spi/spi.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/spi/spi.c
--- linux-3.13.11/drivers/spi/spi.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/spi/spi.c	2014-07-09 12:00:15.000000000
+0200
@@ -1945,7 +1945,7 @@ int spi_bus_unlock(struct spi_master *ma
 EXPORT_SYMBOL_GPL(spi_bus_unlock);
 
 /* portable code must never pass more than 32 bytes */
-#define	SPI_BUFSIZ	max(32, SMP_CACHE_BYTES)
+#define	SPI_BUFSIZ	max(32UL, SMP_CACHE_BYTES)
 
 static u8	*buf;
 
diff -ruNp linux-3.13.11/drivers/staging/android/timed_output.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/staging/android/timed_output.c
--- linux-3.13.11/drivers/staging/android/timed_output.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/staging/android/timed_output.c	2014-07-09
12:00:15.000000000 +0200
@@ -25,7 +25,7 @@
 #include "timed_output.h"
 
 static struct class *timed_output_class;
-static atomic_t device_count;
+static atomic_unchecked_t device_count;
 
 static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
 			   char *buf)
@@ -63,7 +63,7 @@ static int create_timed_output_class(voi
 		timed_output_class = class_create(THIS_MODULE, "timed_output");
 		if (IS_ERR(timed_output_class))
 			return PTR_ERR(timed_output_class);
-		atomic_set(&device_count, 0);
+		atomic_set_unchecked(&device_count, 0);
 		timed_output_class->dev_groups = timed_output_groups;
 	}
 
@@ -81,7 +81,7 @@ int timed_output_dev_register(struct tim
 	if (ret < 0)
 		return ret;
 
-	tdev->index = atomic_inc_return(&device_count);
+	tdev->index = atomic_inc_return_unchecked(&device_count);
 	tdev->dev = device_create(timed_output_class, NULL,
 		MKDEV(0, tdev->index), NULL, "%s", tdev->name);
 	if (IS_ERR(tdev->dev))
diff -ruNp linux-3.13.11/drivers/staging/gdm724x/gdm_tty.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/staging/gdm724x/gdm_tty.c
--- linux-3.13.11/drivers/staging/gdm724x/gdm_tty.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/staging/gdm724x/gdm_tty.c	2014-07-09
12:00:15.000000000 +0200
@@ -45,7 +45,7 @@
 #define gdm_tty_send_control(n, r, v, d, l) (\
 	n->tty_dev->send_control(n->tty_dev->priv_dev, r, v, d, l))
 
-#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && gdm->port.count)
+#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && atomic_read(&gdm->port.count))
 
 static struct tty_driver *gdm_driver[TTY_MAX_COUNT];
 static struct gdm *gdm_table[TTY_MAX_COUNT][GDM_TTY_MINOR];
diff -ruNp linux-3.13.11/drivers/staging/imx-drm/imx-drm-core.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/staging/imx-drm/imx-drm-core.c
--- linux-3.13.11/drivers/staging/imx-drm/imx-drm-core.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/staging/imx-drm/imx-drm-core.c	2014-07-09
12:00:15.000000000 +0200
@@ -510,7 +510,7 @@ int imx_drm_add_crtc(struct drm_crtc *cr
 		goto err_busy;
 	}
 
-	if (imxdrm->drm->open_count) {
+	if (local_read(&imxdrm->drm->open_count)) {
 		ret = -EBUSY;
 		goto err_busy;
 	}
@@ -590,7 +590,7 @@ int imx_drm_add_encoder(struct drm_encod
 
 	mutex_lock(&imxdrm->mutex);
 
-	if (imxdrm->drm->open_count) {
+	if (local_read(&imxdrm->drm->open_count)) {
 		ret = -EBUSY;
 		goto err_busy;
 	}
@@ -729,7 +729,7 @@ int imx_drm_add_connector(struct drm_con
 
 	mutex_lock(&imxdrm->mutex);
 
-	if (imxdrm->drm->open_count) {
+	if (local_read(&imxdrm->drm->open_count)) {
 		ret = -EBUSY;
 		goto err_busy;
 	}
diff -ruNp linux-3.13.11/drivers/staging/lustre/lnet/selftest/brw_test.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/staging/lustre/lnet/selftest/brw_test.c
--- linux-3.13.11/drivers/staging/lustre/lnet/selftest/brw_test.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/staging/lustre/lnet/selftest/brw_test.c	2014-07-09
12:00:15.000000000 +0200
@@ -487,13 +487,11 @@ brw_server_handle(struct srpc_server_rpc
 	return 0;
 }
 
-sfw_test_client_ops_t brw_test_client;
-void brw_init_test_client(void)
-{
-	brw_test_client.tso_init       = brw_client_init;
-	brw_test_client.tso_fini       = brw_client_fini;
-	brw_test_client.tso_prep_rpc   = brw_client_prep_rpc;
-	brw_test_client.tso_done_rpc   = brw_client_done_rpc;
+sfw_test_client_ops_t brw_test_client = {
+	.tso_init       = brw_client_init,
+	.tso_fini       = brw_client_fini,
+	.tso_prep_rpc   = brw_client_prep_rpc,
+	.tso_done_rpc   = brw_client_done_rpc,
 };
 
 srpc_service_t brw_test_service;
diff -ruNp linux-3.13.11/drivers/staging/lustre/lnet/selftest/framework.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/staging/lustre/lnet/selftest/framework.c
--- linux-3.13.11/drivers/staging/lustre/lnet/selftest/framework.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/staging/lustre/lnet/selftest/framework.c	2014-07-09
12:00:15.000000000 +0200
@@ -1635,12 +1635,10 @@ static srpc_service_t sfw_services[] =
 
 extern sfw_test_client_ops_t ping_test_client;
 extern srpc_service_t	ping_test_service;
-extern void ping_init_test_client(void);
 extern void ping_init_test_service(void);
 
 extern sfw_test_client_ops_t brw_test_client;
 extern srpc_service_t	brw_test_service;
-extern void brw_init_test_client(void);
 extern void brw_init_test_service(void);
 
 
@@ -1684,12 +1682,10 @@ sfw_startup (void)
 	INIT_LIST_HEAD(&sfw_data.fw_zombie_rpcs);
 	INIT_LIST_HEAD(&sfw_data.fw_zombie_sessions);
 
-	brw_init_test_client();
 	brw_init_test_service();
 	rc = sfw_register_test(&brw_test_service, &brw_test_client);
 	LASSERT (rc == 0);
 
-	ping_init_test_client();
 	ping_init_test_service();
 	rc = sfw_register_test(&ping_test_service, &ping_test_client);
 	LASSERT (rc == 0);
diff -ruNp linux-3.13.11/drivers/staging/lustre/lnet/selftest/ping_test.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/staging/lustre/lnet/selftest/ping_test.c
--- linux-3.13.11/drivers/staging/lustre/lnet/selftest/ping_test.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/staging/lustre/lnet/selftest/ping_test.c	2014-07-09
12:00:15.000000000 +0200
@@ -210,14 +210,12 @@ ping_server_handle(struct srpc_server_rp
 	return 0;
 }
 
-sfw_test_client_ops_t ping_test_client;
-void ping_init_test_client(void)
-{
-	ping_test_client.tso_init     = ping_client_init;
-	ping_test_client.tso_fini     = ping_client_fini;
-	ping_test_client.tso_prep_rpc = ping_client_prep_rpc;
-	ping_test_client.tso_done_rpc = ping_client_done_rpc;
-}
+sfw_test_client_ops_t ping_test_client = {
+	.tso_init     = ping_client_init,
+	.tso_fini     = ping_client_fini,
+	.tso_prep_rpc = ping_client_prep_rpc,
+	.tso_done_rpc = ping_client_done_rpc,
+};
 
 srpc_service_t ping_test_service;
 void ping_init_test_service(void)
diff -ruNp linux-3.13.11/drivers/staging/lustre/lustre/include/lustre_dlm.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/staging/lustre/lustre/include/lustre_dlm.h
--- linux-3.13.11/drivers/staging/lustre/lustre/include/lustre_dlm.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/staging/lustre/lustre/include/lustre_dlm.h	2014-07-09
12:00:15.000000000 +0200
@@ -1141,7 +1141,7 @@ struct ldlm_callback_suite {
 	ldlm_completion_callback lcs_completion;
 	ldlm_blocking_callback   lcs_blocking;
 	ldlm_glimpse_callback    lcs_glimpse;
-};
+} __no_const;
 
 /* ldlm_lockd.c */
 int ldlm_del_waiting_lock(struct ldlm_lock *lock);
diff -ruNp linux-3.13.11/drivers/staging/lustre/lustre/include/obd.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/staging/lustre/lustre/include/obd.h
--- linux-3.13.11/drivers/staging/lustre/lustre/include/obd.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/staging/lustre/lustre/include/obd.h	2014-07-09
12:00:15.000000000 +0200
@@ -1417,7 +1417,7 @@ struct md_ops {
 	 * lprocfs_alloc_md_stats() in obdclass/lprocfs_status.c. Also, add a
 	 * wrapper function in include/linux/obd_class.h.
 	 */
-};
+} __no_const;
 
 struct lsm_operations {
 	void (*lsm_free)(struct lov_stripe_md *);
diff -ruNp linux-3.13.11/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
--- linux-3.13.11/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c	2014-07-09
12:00:15.000000000 +0200
@@ -249,7 +249,7 @@ ldlm_process_flock_lock(struct ldlm_lock
 	int added = (mode == LCK_NL);
 	int overlaps = 0;
 	int splitted = 0;
-	const struct ldlm_callback_suite null_cbs = { NULL };
+	const struct ldlm_callback_suite null_cbs = { };
 	int rc;
 
 	CDEBUG(D_DLMTRACE, "flags %#llx owner "LPU64" pid %u mode %u start "
diff -ruNp linux-3.13.11/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
--- linux-3.13.11/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c	2014-04-23
01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c	2014-07-09
12:00:15.000000000 +0200
@@ -219,7 +219,7 @@ DECLARE_PROC_HANDLER(proc_debug_mb)
 int LL_PROC_PROTO(proc_console_max_delay_cs)
 {
 	int rc, max_delay_cs;
-	ctl_table_t dummy = *table;
+	ctl_table_no_const dummy = *table;
 	cfs_duration_t d;
 
 	dummy.data = &max_delay_cs;
@@ -250,7 +250,7 @@ int LL_PROC_PROTO(proc_console_max_delay
 int LL_PROC_PROTO(proc_console_min_delay_cs)
 {
 	int rc, min_delay_cs;
-	ctl_table_t dummy = *table;
+	ctl_table_no_const dummy = *table;
 	cfs_duration_t d;
 
 	dummy.data = &min_delay_cs;
@@ -281,7 +281,7 @@ int LL_PROC_PROTO(proc_console_min_delay
 int LL_PROC_PROTO(proc_console_backoff)
 {
 	int rc, backoff;
-	ctl_table_t dummy = *table;
+	ctl_table_no_const dummy = *table;
 
 	dummy.data = &backoff;
 	dummy.proc_handler = &proc_dointvec;
diff -ruNp linux-3.13.11/drivers/staging/lustre/lustre/libcfs/module.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/staging/lustre/lustre/libcfs/module.c
--- linux-3.13.11/drivers/staging/lustre/lustre/libcfs/module.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/staging/lustre/lustre/libcfs/module.c	2014-07-09
12:00:15.000000000 +0200
@@ -348,11 +348,11 @@ out:
 
 
 struct cfs_psdev_ops libcfs_psdev_ops = {
-	libcfs_psdev_open,
-	libcfs_psdev_release,
-	NULL,
-	NULL,
-	libcfs_ioctl
+	.p_open = libcfs_psdev_open,
+	.p_close = libcfs_psdev_release,
+	.p_read = NULL,
+	.p_write = NULL,
+	.p_ioctl = libcfs_ioctl
 };
 
 extern int insert_proc(void);
diff -ruNp linux-3.13.11/drivers/staging/lustre/lustre/llite/dir.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/staging/lustre/lustre/llite/dir.c
--- linux-3.13.11/drivers/staging/lustre/lustre/llite/dir.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/staging/lustre/lustre/llite/dir.c	2014-07-09
12:00:15.000000000 +0200
@@ -660,7 +660,7 @@ int ll_dir_setdirstripe(struct inode *di
 	int mode;
 	int err;
 
-	mode = (0755 & (S_IRWXUGO|S_ISVTX) & ~current->fs->umask) | S_IFDIR;
+	mode = (0755 & (S_IRWXUGO|S_ISVTX) & ~current_umask()) | S_IFDIR;
 	op_data = ll_prep_md_op_data(NULL, dir, NULL, filename,
 				     strlen(filename), mode, LUSTRE_OPC_MKDIR,
 				     lump);
diff -ruNp linux-3.13.11/drivers/staging/media/solo6x10/solo6x10-core.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/staging/media/solo6x10/solo6x10-core.c
--- linux-3.13.11/drivers/staging/media/solo6x10/solo6x10-core.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/staging/media/solo6x10/solo6x10-core.c	2014-07-09
12:00:15.000000000 +0200
@@ -434,7 +434,7 @@ static void solo_device_release(struct d
 
 static int solo_sysfs_init(struct solo_dev *solo_dev)
 {
-	struct bin_attribute *sdram_attr = &solo_dev->sdram_attr;
+	bin_attribute_no_const *sdram_attr = &solo_dev->sdram_attr;
 	struct device *dev = &solo_dev->dev;
 	const char *driver;
 	int i;
diff -ruNp linux-3.13.11/drivers/staging/media/solo6x10/solo6x10-g723.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/staging/media/solo6x10/solo6x10-g723.c
--- linux-3.13.11/drivers/staging/media/solo6x10/solo6x10-g723.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/staging/media/solo6x10/solo6x10-g723.c	2014-07-09
12:00:15.000000000 +0200
@@ -355,7 +355,7 @@ static int solo_snd_pcm_init(struct solo
 
 int solo_g723_init(struct solo_dev *solo_dev)
 {
-	static struct snd_device_ops ops = { NULL };
+	static struct snd_device_ops ops = { };
 	struct snd_card *card;
 	struct snd_kcontrol_new kctl;
 	char name[32];
diff -ruNp linux-3.13.11/drivers/staging/media/solo6x10/solo6x10-p2m.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/staging/media/solo6x10/solo6x10-p2m.c
--- linux-3.13.11/drivers/staging/media/solo6x10/solo6x10-p2m.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/staging/media/solo6x10/solo6x10-p2m.c	2014-07-09
12:00:15.000000000 +0200
@@ -77,7 +77,7 @@ int solo_p2m_dma_desc(struct solo_dev *s
 
 	/* Get next ID. According to Softlogic, 6110 has problems on !=0 P2M */
 	if (solo_dev->type != SOLO_DEV_6110 && multi_p2m) {
-		p2m_id = atomic_inc_return(&solo_dev->p2m_count) % SOLO_NR_P2M;
+		p2m_id = atomic_inc_return_unchecked(&solo_dev->p2m_count) % SOLO_NR_P2M;
 		if (p2m_id < 0)
 			p2m_id = -p2m_id;
 	}
diff -ruNp linux-3.13.11/drivers/staging/media/solo6x10/solo6x10.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/staging/media/solo6x10/solo6x10.h
--- linux-3.13.11/drivers/staging/media/solo6x10/solo6x10.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/staging/media/solo6x10/solo6x10.h	2014-07-09
12:00:15.000000000 +0200
@@ -237,7 +237,7 @@ struct solo_dev {
 
 	/* P2M DMA Engine */
 	struct solo_p2m_dev	p2m_dev[SOLO_NR_P2M];
-	atomic_t		p2m_count;
+	atomic_unchecked_t	p2m_count;
 	int			p2m_jiffies;
 	unsigned int		p2m_timeouts;
 
diff -ruNp linux-3.13.11/drivers/staging/octeon/ethernet-rx.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/staging/octeon/ethernet-rx.c
--- linux-3.13.11/drivers/staging/octeon/ethernet-rx.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/staging/octeon/ethernet-rx.c	2014-07-09
12:00:15.000000000 +0200
@@ -418,11 +418,11 @@ static int cvm_oct_napi_poll(struct napi
 				/* Increment RX stats for virtual ports */
 				if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
 #ifdef CONFIG_64BIT
-					atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
-					atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
+					atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
+					atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
 #else
-					atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
-					atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
+					atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
+					atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
 #endif
 				}
 				netif_receive_skb(skb);
@@ -433,9 +433,9 @@ static int cvm_oct_napi_poll(struct napi
 					   dev->name);
 				*/
 #ifdef CONFIG_64BIT
-				atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
+				atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
 #else
-				atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
+				atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
 #endif
 				dev_kfree_skb_irq(skb);
 			}
diff -ruNp linux-3.13.11/drivers/staging/octeon/ethernet.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/staging/octeon/ethernet.c
--- linux-3.13.11/drivers/staging/octeon/ethernet.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/staging/octeon/ethernet.c	2014-07-09
12:00:15.000000000 +0200
@@ -254,11 +254,11 @@ static struct net_device_stats *cvm_oct_
 		 * since the RX tasklet also increments it.
 		 */
 #ifdef CONFIG_64BIT
-		atomic64_add(rx_status.dropped_packets,
-			     (atomic64_t *)&priv->stats.rx_dropped);
+		atomic64_add_unchecked(rx_status.dropped_packets,
+			     (atomic64_unchecked_t *)&priv->stats.rx_dropped);
 #else
-		atomic_add(rx_status.dropped_packets,
-			     (atomic_t *)&priv->stats.rx_dropped);
+		atomic_add_unchecked(rx_status.dropped_packets,
+			     (atomic_unchecked_t *)&priv->stats.rx_dropped);
 #endif
 	}
 
diff -ruNp linux-3.13.11/drivers/staging/rtl8188eu/include/hal_intf.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/staging/rtl8188eu/include/hal_intf.h
--- linux-3.13.11/drivers/staging/rtl8188eu/include/hal_intf.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/staging/rtl8188eu/include/hal_intf.h	2014-07-09
12:00:15.000000000 +0200
@@ -271,7 +271,7 @@ struct hal_ops {
 	s32 (*c2h_handler)(struct adapter *padapter,
 			   struct c2h_evt_hdr *c2h_evt);
 	c2h_id_filter c2h_id_filter_ccx;
-};
+} __no_const;
 
 enum rt_eeprom_type {
 	EEPROM_93C46,
diff -ruNp linux-3.13.11/drivers/staging/rtl8188eu/include/rtw_io.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/staging/rtl8188eu/include/rtw_io.h
--- linux-3.13.11/drivers/staging/rtl8188eu/include/rtw_io.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/staging/rtl8188eu/include/rtw_io.h	2014-07-09
12:00:15.000000000 +0200
@@ -126,7 +126,7 @@ struct _io_ops {
 	u32 (*_write_scsi)(struct intf_hdl *pintfhdl,u32 cnt, u8 *pmem);
 	void (*_read_port_cancel)(struct intf_hdl *pintfhdl);
 	void (*_write_port_cancel)(struct intf_hdl *pintfhdl);
-};
+} __no_const;
 
 struct io_req {
 	struct list_head list;
diff -ruNp linux-3.13.11/drivers/staging/rtl8712/rtl871x_io.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/staging/rtl8712/rtl871x_io.h
--- linux-3.13.11/drivers/staging/rtl8712/rtl871x_io.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/staging/rtl8712/rtl871x_io.h	2014-07-09
12:00:15.000000000 +0200
@@ -108,7 +108,7 @@ struct	_io_ops {
 			  u8 *pmem);
 	u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
 			   u8 *pmem);
-};
+} __no_const;
 
 struct io_req {
 	struct list_head list;
diff -ruNp linux-3.13.11/drivers/staging/sbe-2t3e3/netdev.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/staging/sbe-2t3e3/netdev.c
--- linux-3.13.11/drivers/staging/sbe-2t3e3/netdev.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/staging/sbe-2t3e3/netdev.c	2014-07-09
12:00:15.000000000 +0200
@@ -51,7 +51,7 @@ static int t3e3_ioctl(struct net_device
 	t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
 
 	if (rlen)
-		if (copy_to_user(data, &resp, rlen))
+		if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
 			return -EFAULT;
 
 	return 0;
diff -ruNp linux-3.13.11/drivers/staging/usbip/vhci.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/staging/usbip/vhci.h
--- linux-3.13.11/drivers/staging/usbip/vhci.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/staging/usbip/vhci.h	2014-07-09
12:00:15.000000000 +0200
@@ -83,7 +83,7 @@ struct vhci_hcd {
 	unsigned resuming:1;
 	unsigned long re_timeout;
 
-	atomic_t seqnum;
+	atomic_unchecked_t seqnum;
 
 	/*
 	 * NOTE:
diff -ruNp linux-3.13.11/drivers/staging/usbip/vhci_hcd.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/staging/usbip/vhci_hcd.c
--- linux-3.13.11/drivers/staging/usbip/vhci_hcd.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/staging/usbip/vhci_hcd.c	2014-07-09
12:00:15.000000000 +0200
@@ -441,7 +441,7 @@ static void vhci_tx_urb(struct urb *urb)
 
 	spin_lock(&vdev->priv_lock);
 
-	priv->seqnum = atomic_inc_return(&the_controller->seqnum);
+	priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
 	if (priv->seqnum == 0xffff)
 		dev_info(&urb->dev->dev, "seqnum max\n");
 
@@ -687,7 +687,7 @@ static int vhci_urb_dequeue(struct usb_h
 			return -ENOMEM;
 		}
 
-		unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
+		unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
 		if (unlink->seqnum == 0xffff)
 			pr_info("seqnum max\n");
 
@@ -891,7 +891,7 @@ static int vhci_start(struct usb_hcd *hc
 		vdev->rhport = rhport;
 	}
 
-	atomic_set(&vhci->seqnum, 0);
+	atomic_set_unchecked(&vhci->seqnum, 0);
 	spin_lock_init(&vhci->lock);
 
 	hcd->power_budget = 0; /* no limit */
diff -ruNp linux-3.13.11/drivers/staging/usbip/vhci_rx.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/staging/usbip/vhci_rx.c
--- linux-3.13.11/drivers/staging/usbip/vhci_rx.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/staging/usbip/vhci_rx.c	2014-07-09
12:00:15.000000000 +0200
@@ -80,7 +80,7 @@ static void vhci_recv_ret_submit(struct
 	if (!urb) {
 		pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
 		pr_info("max seqnum %d\n",
-			atomic_read(&the_controller->seqnum));
+			atomic_read_unchecked(&the_controller->seqnum));
 		usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
 		return;
 	}
diff -ruNp linux-3.13.11/drivers/staging/vt6655/hostap.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/staging/vt6655/hostap.c
--- linux-3.13.11/drivers/staging/vt6655/hostap.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/staging/vt6655/hostap.c	2014-07-09
12:00:15.000000000 +0200
@@ -69,14 +69,13 @@ static int msglevel = MSG_LEVEL_INFO;
  *
  */
 
+static net_device_ops_no_const apdev_netdev_ops;
+
 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
 {
 	PSDevice apdev_priv;
 	struct net_device *dev = pDevice->dev;
 	int ret;
-	const struct net_device_ops apdev_netdev_ops = {
-		.ndo_start_xmit         = pDevice->tx_80211,
-	};
 
 	DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
 
@@ -88,6 +87,8 @@ static int hostap_enable_hostapd(PSDevic
 	*apdev_priv = *pDevice;
 	eth_hw_addr_inherit(pDevice->apdev, dev);
 
+	/* only half broken now */
+	apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
 	pDevice->apdev->netdev_ops = &apdev_netdev_ops;
 
 	pDevice->apdev->type = ARPHRD_IEEE80211;
diff -ruNp linux-3.13.11/drivers/staging/vt6656/hostap.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/staging/vt6656/hostap.c
--- linux-3.13.11/drivers/staging/vt6656/hostap.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/staging/vt6656/hostap.c	2014-07-09
12:00:15.000000000 +0200
@@ -60,14 +60,13 @@ static int          msglevel
  *
  */
 
+static net_device_ops_no_const apdev_netdev_ops;
+
 static int hostap_enable_hostapd(struct vnt_private *pDevice, int rtnl_locked)
 {
 	struct vnt_private *apdev_priv;
 	struct net_device *dev = pDevice->dev;
 	int ret;
-	const struct net_device_ops apdev_netdev_ops = {
-		.ndo_start_xmit = pDevice->tx_80211,
-	};
 
     DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
 
@@ -79,6 +78,8 @@ static int hostap_enable_hostapd(struct
     *apdev_priv = *pDevice;
 	memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
 
+	/* only half broken now */
+	apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
 	pDevice->apdev->netdev_ops = &apdev_netdev_ops;
 
 	pDevice->apdev->type = ARPHRD_IEEE80211;
diff -ruNp linux-3.13.11/drivers/target/sbp/sbp_target.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/target/sbp/sbp_target.c
--- linux-3.13.11/drivers/target/sbp/sbp_target.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/target/sbp/sbp_target.c	2014-07-09
12:00:15.000000000 +0200
@@ -62,7 +62,7 @@ static const u32 sbp_unit_directory_temp
 
 #define SESSION_MAINTENANCE_INTERVAL HZ
 
-static atomic_t login_id = ATOMIC_INIT(0);
+static atomic_unchecked_t login_id = ATOMIC_INIT(0);
 
 static void session_maintenance_work(struct work_struct *);
 static int sbp_run_transaction(struct fw_card *, int, int, int, int,
@@ -444,7 +444,7 @@ static void sbp_management_request_login
 	login->lun = se_lun;
 	login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
 	login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
-	login->login_id = atomic_inc_return(&login_id);
+	login->login_id = atomic_inc_return_unchecked(&login_id);
 
 	login->tgt_agt = sbp_target_agent_register(login);
 	if (IS_ERR(login->tgt_agt)) {
diff -ruNp linux-3.13.11/drivers/target/target_core_device.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/target/target_core_device.c
--- linux-3.13.11/drivers/target/target_core_device.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/target/target_core_device.c	2014-07-09
12:00:15.000000000 +0200
@@ -1435,7 +1435,7 @@ struct se_device *target_alloc_device(st
 	spin_lock_init(&dev->se_tmr_lock);
 	spin_lock_init(&dev->qf_cmd_lock);
 	sema_init(&dev->caw_sem, 1);
-	atomic_set(&dev->dev_ordered_id, 0);
+	atomic_set_unchecked(&dev->dev_ordered_id, 0);
 	INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
 	spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
 	INIT_LIST_HEAD(&dev->t10_pr.registration_list);
diff -ruNp linux-3.13.11/drivers/target/target_core_transport.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/target/target_core_transport.c
--- linux-3.13.11/drivers/target/target_core_transport.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/target/target_core_transport.c	2014-07-09
12:00:15.000000000 +0200
@@ -1113,7 +1113,7 @@ transport_check_alloc_task_attr(struct s
 	 * Used to determine when ORDERED commands should go from
 	 * Dormant to Active status.
 	 */
-	cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
+	cmd->se_ordered_id = atomic_inc_return_unchecked(&dev->dev_ordered_id);
 	smp_mb__after_atomic_inc();
 	pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
 			cmd->se_ordered_id, cmd->sam_task_attr,
diff -ruNp linux-3.13.11/drivers/tty/cyclades.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/tty/cyclades.c
--- linux-3.13.11/drivers/tty/cyclades.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/tty/cyclades.c	2014-07-09
12:00:15.000000000 +0200
@@ -1570,10 +1570,10 @@ static int cy_open(struct tty_struct *tt
 	printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
 			info->port.count);
 #endif
-	info->port.count++;
+	atomic_inc(&info->port.count);
 #ifdef CY_DEBUG_COUNT
 	printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
-		current->pid, info->port.count);
+		current->pid, atomic_read(&info->port.count));
 #endif
 
 	/*
@@ -3972,7 +3972,7 @@ static int cyclades_proc_show(struct seq
 		for (j = 0; j < cy_card[i].nports; j++) {
 			info = &cy_card[i].ports[j];
 
-			if (info->port.count) {
+			if (atomic_read(&info->port.count)) {
 				/* XXX is the ldisc num worth this? */
 				struct tty_struct *tty;
 				struct tty_ldisc *ld;
diff -ruNp linux-3.13.11/drivers/tty/hvc/hvc_console.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/tty/hvc/hvc_console.c
--- linux-3.13.11/drivers/tty/hvc/hvc_console.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/tty/hvc/hvc_console.c	2014-07-09
12:00:15.000000000 +0200
@@ -338,7 +338,7 @@ static int hvc_open(struct tty_struct *t
 
 	spin_lock_irqsave(&hp->port.lock, flags);
 	/* Check and then increment for fast path open. */
-	if (hp->port.count++ > 0) {
+	if (atomic_inc_return(&hp->port.count) > 1) {
 		spin_unlock_irqrestore(&hp->port.lock, flags);
 		hvc_kick();
 		return 0;
@@ -393,7 +393,7 @@ static void hvc_close(struct tty_struct
 
 	spin_lock_irqsave(&hp->port.lock, flags);
 
-	if (--hp->port.count == 0) {
+	if (atomic_dec_return(&hp->port.count) == 0) {
 		spin_unlock_irqrestore(&hp->port.lock, flags);
 		/* We are done with the tty pointer now. */
 		tty_port_tty_set(&hp->port, NULL);
@@ -415,9 +415,9 @@ static void hvc_close(struct tty_struct
 		 */
 		tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT);
 	} else {
-		if (hp->port.count < 0)
+		if (atomic_read(&hp->port.count) < 0)
 			printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
-				hp->vtermno, hp->port.count);
+				hp->vtermno, atomic_read(&hp->port.count));
 		spin_unlock_irqrestore(&hp->port.lock, flags);
 	}
 }
@@ -447,12 +447,12 @@ static void hvc_hangup(struct tty_struct
 	 * open->hangup case this can be called after the final close so prevent
 	 * that from happening for now.
 	 */
-	if (hp->port.count <= 0) {
+	if (atomic_read(&hp->port.count) <= 0) {
 		spin_unlock_irqrestore(&hp->port.lock, flags);
 		return;
 	}
 
-	hp->port.count = 0;
+	atomic_set(&hp->port.count, 0);
 	spin_unlock_irqrestore(&hp->port.lock, flags);
 	tty_port_tty_set(&hp->port, NULL);
 
@@ -500,7 +500,7 @@ static int hvc_write(struct tty_struct *
 		return -EPIPE;
 
 	/* FIXME what's this (unprotected) check for? */
-	if (hp->port.count <= 0)
+	if (atomic_read(&hp->port.count) <= 0)
 		return -EIO;
 
 	spin_lock_irqsave(&hp->lock, flags);
diff -ruNp linux-3.13.11/drivers/tty/hvc/hvcs.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/tty/hvc/hvcs.c
--- linux-3.13.11/drivers/tty/hvc/hvcs.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/tty/hvc/hvcs.c	2014-07-09
12:00:15.000000000 +0200
@@ -83,6 +83,7 @@
 #include <asm/hvcserver.h>
 #include <asm/uaccess.h>
 #include <asm/vio.h>
+#include <asm/local.h>
 
 /*
  * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
@@ -416,7 +417,7 @@ static ssize_t hvcs_vterm_state_store(st
 
 	spin_lock_irqsave(&hvcsd->lock, flags);
 
-	if (hvcsd->port.count > 0) {
+	if (atomic_read(&hvcsd->port.count) > 0) {
 		spin_unlock_irqrestore(&hvcsd->lock, flags);
 		printk(KERN_INFO "HVCS: vterm state unchanged.  "
 				"The hvcs device node is still in use.\n");
@@ -1127,7 +1128,7 @@ static int hvcs_install(struct tty_drive
 		}
 	}
 
-	hvcsd->port.count = 0;
+	atomic_set(&hvcsd->port.count, 0);
 	hvcsd->port.tty = tty;
 	tty->driver_data = hvcsd;
 
@@ -1180,7 +1181,7 @@ static int hvcs_open(struct tty_struct *
 	unsigned long flags;
 
 	spin_lock_irqsave(&hvcsd->lock, flags);
-	hvcsd->port.count++;
+	atomic_inc(&hvcsd->port.count);
 	hvcsd->todo_mask |= HVCS_SCHED_READ;
 	spin_unlock_irqrestore(&hvcsd->lock, flags);
 
@@ -1216,7 +1217,7 @@ static void hvcs_close(struct tty_struct
 	hvcsd = tty->driver_data;
 
 	spin_lock_irqsave(&hvcsd->lock, flags);
-	if (--hvcsd->port.count == 0) {
+	if (atomic_dec_and_test(&hvcsd->port.count)) {
 
 		vio_disable_interrupts(hvcsd->vdev);
 
@@ -1241,10 +1242,10 @@ static void hvcs_close(struct tty_struct
 
 		free_irq(irq, hvcsd);
 		return;
-	} else if (hvcsd->port.count < 0) {
+	} else if (atomic_read(&hvcsd->port.count) < 0) {
 		printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
 				" is missmanaged.\n",
-		hvcsd->vdev->unit_address, hvcsd->port.count);
+		hvcsd->vdev->unit_address, atomic_read(&hvcsd->port.count));
 	}
 
 	spin_unlock_irqrestore(&hvcsd->lock, flags);
@@ -1266,7 +1267,7 @@ static void hvcs_hangup(struct tty_struc
 
 	spin_lock_irqsave(&hvcsd->lock, flags);
 	/* Preserve this so that we know how many kref refs to put */
-	temp_open_count = hvcsd->port.count;
+	temp_open_count = atomic_read(&hvcsd->port.count);
 
 	/*
 	 * Don't kref put inside the spinlock because the destruction
@@ -1281,7 +1282,7 @@ static void hvcs_hangup(struct tty_struc
 	tty->driver_data = NULL;
 	hvcsd->port.tty = NULL;
 
-	hvcsd->port.count = 0;
+	atomic_set(&hvcsd->port.count, 0);
 
 	/* This will drop any buffered data on the floor which is OK in a hangup
 	 * scenario. */
@@ -1352,7 +1353,7 @@ static int hvcs_write(struct tty_struct
 	 * the middle of a write operation?  This is a crummy place to do this
 	 * but we want to keep it all in the spinlock.
 	 */
-	if (hvcsd->port.count <= 0) {
+	if (atomic_read(&hvcsd->port.count) <= 0) {
 		spin_unlock_irqrestore(&hvcsd->lock, flags);
 		return -ENODEV;
 	}
@@ -1426,7 +1427,7 @@ static int hvcs_write_room(struct tty_st
 {
 	struct hvcs_struct *hvcsd = tty->driver_data;
 
-	if (!hvcsd || hvcsd->port.count <= 0)
+	if (!hvcsd || atomic_read(&hvcsd->port.count) <= 0)
 		return 0;
 
 	return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
diff -ruNp linux-3.13.11/drivers/tty/hvc/hvsi.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/tty/hvc/hvsi.c
--- linux-3.13.11/drivers/tty/hvc/hvsi.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/tty/hvc/hvsi.c	2014-07-09
12:00:15.000000000 +0200
@@ -85,7 +85,7 @@ struct hvsi_struct {
 	int n_outbuf;
 	uint32_t vtermno;
 	uint32_t virq;
-	atomic_t seqno; /* HVSI packet sequence number */
+	atomic_unchecked_t seqno; /* HVSI packet sequence number */
 	uint16_t mctrl;
 	uint8_t state;  /* HVSI protocol state */
 	uint8_t flags;
@@ -295,7 +295,7 @@ static int hvsi_version_respond(struct h
 
 	packet.hdr.type = VS_QUERY_RESPONSE_PACKET_HEADER;
 	packet.hdr.len = sizeof(struct hvsi_query_response);
-	packet.hdr.seqno = atomic_inc_return(&hp->seqno);
+	packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
 	packet.verb = VSV_SEND_VERSION_NUMBER;
 	packet.u.version = HVSI_VERSION;
 	packet.query_seqno = query_seqno+1;
@@ -555,7 +555,7 @@ static int hvsi_query(struct hvsi_struct
 
 	packet.hdr.type = VS_QUERY_PACKET_HEADER;
 	packet.hdr.len = sizeof(struct hvsi_query);
-	packet.hdr.seqno = atomic_inc_return(&hp->seqno);
+	packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
 	packet.verb = verb;
 
 	pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
@@ -597,7 +597,7 @@ static int hvsi_set_mctrl(struct hvsi_st
 	int wrote;
 
 	packet.hdr.type = VS_CONTROL_PACKET_HEADER,
-	packet.hdr.seqno = atomic_inc_return(&hp->seqno);
+	packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
 	packet.hdr.len = sizeof(struct hvsi_control);
 	packet.verb = VSV_SET_MODEM_CTL;
 	packet.mask = HVSI_TSDTR;
@@ -680,7 +680,7 @@ static int hvsi_put_chars(struct hvsi_st
 	BUG_ON(count > HVSI_MAX_OUTGOING_DATA);
 
 	packet.hdr.type = VS_DATA_PACKET_HEADER;
-	packet.hdr.seqno = atomic_inc_return(&hp->seqno);
+	packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
 	packet.hdr.len = count + sizeof(struct hvsi_header);
 	memcpy(&packet.data, buf, count);
 
@@ -697,7 +697,7 @@ static void hvsi_close_protocol(struct h
 	struct hvsi_control packet __ALIGNED__;
 
 	packet.hdr.type = VS_CONTROL_PACKET_HEADER;
-	packet.hdr.seqno = atomic_inc_return(&hp->seqno);
+	packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
 	packet.hdr.len = 6;
 	packet.verb = VSV_CLOSE_PROTOCOL;
 
@@ -725,7 +725,7 @@ static int hvsi_open(struct tty_struct *
 
 	tty_port_tty_set(&hp->port, tty);
 	spin_lock_irqsave(&hp->lock, flags);
-	hp->port.count++;
+	atomic_inc(&hp->port.count);
 	atomic_set(&hp->seqno, 0);
 	h_vio_signal(hp->vtermno, VIO_IRQ_ENABLE);
 	spin_unlock_irqrestore(&hp->lock, flags);
@@ -782,7 +782,7 @@ static void hvsi_close(struct tty_struct
 
 	spin_lock_irqsave(&hp->lock, flags);
 
-	if (--hp->port.count == 0) {
+	if (atomic_dec_return(&hp->port.count) == 0) {
 		tty_port_tty_set(&hp->port, NULL);
 		hp->inbuf_end = hp->inbuf; /* discard remaining partial packets */
 
@@ -815,9 +815,9 @@ static void hvsi_close(struct tty_struct
 
 			spin_lock_irqsave(&hp->lock, flags);
 		}
-	} else if (hp->port.count < 0)
+	} else if (atomic_read(&hp->port.count) < 0)
 		printk(KERN_ERR "hvsi_close %lu: oops, count is %d\n",
-		       hp - hvsi_ports, hp->port.count);
+		       hp - hvsi_ports, atomic_read(&hp->port.count));
 
 	spin_unlock_irqrestore(&hp->lock, flags);
 }
@@ -832,7 +832,7 @@ static void hvsi_hangup(struct tty_struc
 	tty_port_tty_set(&hp->port, NULL);
 
 	spin_lock_irqsave(&hp->lock, flags);
-	hp->port.count = 0;
+	atomic_set(&hp->port.count, 0);
 	hp->n_outbuf = 0;
 	spin_unlock_irqrestore(&hp->lock, flags);
 }
diff -ruNp linux-3.13.11/drivers/tty/hvc/hvsi_lib.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/tty/hvc/hvsi_lib.c
--- linux-3.13.11/drivers/tty/hvc/hvsi_lib.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/tty/hvc/hvsi_lib.c	2014-07-09
12:00:15.000000000 +0200
@@ -9,7 +9,7 @@
 
 static int hvsi_send_packet(struct hvsi_priv *pv, struct hvsi_header *packet)
 {
-	packet->seqno = cpu_to_be16(atomic_inc_return(&pv->seqno));
+	packet->seqno = cpu_to_be16(atomic_inc_return_unchecked(&pv->seqno));
 
 	/* Assumes that always succeeds, works in practice */
 	return pv->put_chars(pv->termno, (char *)packet, packet->len);
@@ -21,7 +21,7 @@ static void hvsi_start_handshake(struct
 
 	/* Reset state */
 	pv->established = 0;
-	atomic_set(&pv->seqno, 0);
+	atomic_set_unchecked(&pv->seqno, 0);
 
 	pr_devel("HVSI@%x: Handshaking started\n", pv->termno);
 
diff -ruNp linux-3.13.11/drivers/tty/ipwireless/tty.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/tty/ipwireless/tty.c
--- linux-3.13.11/drivers/tty/ipwireless/tty.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/tty/ipwireless/tty.c	2014-07-09
12:00:15.000000000 +0200
@@ -29,6 +29,7 @@
 #include <linux/tty_driver.h>
 #include <linux/tty_flip.h>
 #include <linux/uaccess.h>
+#include <asm/local.h>
 
 #include "tty.h"
 #include "network.h"
@@ -99,10 +100,10 @@ static int ipw_open(struct tty_struct *l
 		mutex_unlock(&tty->ipw_tty_mutex);
 		return -ENODEV;
 	}
-	if (tty->port.count == 0)
+	if (atomic_read(&tty->port.count) == 0)
 		tty->tx_bytes_queued = 0;
 
-	tty->port.count++;
+	atomic_inc(&tty->port.count);
 
 	tty->port.tty = linux_tty;
 	linux_tty->driver_data = tty;
@@ -118,9 +119,7 @@ static int ipw_open(struct tty_struct *l
 
 static void do_ipw_close(struct ipw_tty *tty)
 {
-	tty->port.count--;
-
-	if (tty->port.count == 0) {
+	if (atomic_dec_return(&tty->port.count) == 0) {
 		struct tty_struct *linux_tty = tty->port.tty;
 
 		if (linux_tty != NULL) {
@@ -141,7 +140,7 @@ static void ipw_hangup(struct tty_struct
 		return;
 
 	mutex_lock(&tty->ipw_tty_mutex);
-	if (tty->port.count == 0) {
+	if (atomic_read(&tty->port.count) == 0) {
 		mutex_unlock(&tty->ipw_tty_mutex);
 		return;
 	}
@@ -164,7 +163,7 @@ void ipwireless_tty_received(struct ipw_
 
 	mutex_lock(&tty->ipw_tty_mutex);
 
-	if (!tty->port.count) {
+	if (!atomic_read(&tty->port.count)) {
 		mutex_unlock(&tty->ipw_tty_mutex);
 		return;
 	}
@@ -203,7 +202,7 @@ static int ipw_write(struct tty_struct *
 		return -ENODEV;
 
 	mutex_lock(&tty->ipw_tty_mutex);
-	if (!tty->port.count) {
+	if (!atomic_read(&tty->port.count)) {
 		mutex_unlock(&tty->ipw_tty_mutex);
 		return -EINVAL;
 	}
@@ -243,7 +242,7 @@ static int ipw_write_room(struct tty_str
 	if (!tty)
 		return -ENODEV;
 
-	if (!tty->port.count)
+	if (!atomic_read(&tty->port.count))
 		return -EINVAL;
 
 	room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
@@ -285,7 +284,7 @@ static int ipw_chars_in_buffer(struct tt
 	if (!tty)
 		return 0;
 
-	if (!tty->port.count)
+	if (!atomic_read(&tty->port.count))
 		return 0;
 
 	return tty->tx_bytes_queued;
@@ -366,7 +365,7 @@ static int ipw_tiocmget(struct tty_struc
 	if (!tty)
 		return -ENODEV;
 
-	if (!tty->port.count)
+	if (!atomic_read(&tty->port.count))
 		return -EINVAL;
 
 	return get_control_lines(tty);
@@ -382,7 +381,7 @@ ipw_tiocmset(struct tty_struct *linux_tt
 	if (!tty)
 		return -ENODEV;
 
-	if (!tty->port.count)
+	if (!atomic_read(&tty->port.count))
 		return -EINVAL;
 
 	return set_control_lines(tty, set, clear);
@@ -396,7 +395,7 @@ static int ipw_ioctl(struct tty_struct *
 	if (!tty)
 		return -ENODEV;
 
-	if (!tty->port.count)
+	if (!atomic_read(&tty->port.count))
 		return -EINVAL;
 
 	/* FIXME: Exactly how is the tty object locked here .. */
@@ -552,7 +551,7 @@ void ipwireless_tty_free(struct ipw_tty
 				 * are gone */
 				mutex_lock(&ttyj->ipw_tty_mutex);
 			}
-			while (ttyj->port.count)
+			while (atomic_read(&ttyj->port.count))
 				do_ipw_close(ttyj);
 			ipwireless_disassociate_network_ttys(network,
 							     ttyj->channel_idx);
diff -ruNp linux-3.13.11/drivers/tty/moxa.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/tty/moxa.c
--- linux-3.13.11/drivers/tty/moxa.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/tty/moxa.c	2014-07-09 12:00:15.000000000
+0200
@@ -1189,7 +1189,7 @@ static int moxa_open(struct tty_struct *
 	}
 
 	ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
-	ch->port.count++;
+	atomic_inc(&ch->port.count);
 	tty->driver_data = ch;
 	tty_port_tty_set(&ch->port, tty);
 	mutex_lock(&ch->port.mutex);
diff -ruNp linux-3.13.11/drivers/tty/n_gsm.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/tty/n_gsm.c
--- linux-3.13.11/drivers/tty/n_gsm.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/tty/n_gsm.c	2014-07-09 12:00:15.000000000
+0200
@@ -1643,7 +1643,7 @@ static struct gsm_dlci *gsm_dlci_alloc(s
 	spin_lock_init(&dlci->lock);
 	mutex_init(&dlci->mutex);
 	dlci->fifo = &dlci->_fifo;
-	if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
+	if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
 		kfree(dlci);
 		return NULL;
 	}
@@ -2946,7 +2946,7 @@ static int gsmtty_open(struct tty_struct
 	struct gsm_dlci *dlci = tty->driver_data;
 	struct tty_port *port = &dlci->port;
 
-	port->count++;
+	atomic_inc(&port->count);
 	dlci_get(dlci);
 	dlci_get(dlci->gsm->dlci[0]);
 	mux_get(dlci->gsm);
diff -ruNp linux-3.13.11/drivers/tty/n_tty.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/tty/n_tty.c
--- linux-3.13.11/drivers/tty/n_tty.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/tty/n_tty.c	2014-07-09 12:00:15.000000000
+0200
@@ -114,7 +114,7 @@ struct n_tty_data {
 	int minimum_to_wake;
 
 	/* consumer-published */
-	size_t read_tail;
+	size_t read_tail __intentional_overflow(-1);
 	size_t line_start;
 
 	/* protected by output lock */
@@ -2504,6 +2504,7 @@ void n_tty_inherit_ops(struct tty_ldisc_
 {
 	*ops = tty_ldisc_N_TTY;
 	ops->owner = NULL;
-	ops->refcount = ops->flags = 0;
+	atomic_set(&ops->refcount, 0);
+	ops->flags = 0;
 }
 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
diff -ruNp linux-3.13.11/drivers/tty/pty.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/tty/pty.c
--- linux-3.13.11/drivers/tty/pty.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/tty/pty.c	2014-07-09 12:00:15.000000000
+0200
@@ -790,8 +790,10 @@ static void __init unix98_pty_init(void)
 		panic("Couldn't register Unix98 pts driver");
 
 	/* Now create the /dev/ptmx special device */
+	pax_open_kernel();
 	tty_default_fops(&ptmx_fops);
-	ptmx_fops.open = ptmx_open;
+	*(void **)&ptmx_fops.open = ptmx_open;
+	pax_close_kernel();
 
 	cdev_init(&ptmx_cdev, &ptmx_fops);
 	if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
diff -ruNp linux-3.13.11/drivers/tty/rocket.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/tty/rocket.c
--- linux-3.13.11/drivers/tty/rocket.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/tty/rocket.c	2014-07-09 12:00:15.000000000
+0200
@@ -914,7 +914,7 @@ static int rp_open(struct tty_struct *tt
 	tty->driver_data = info;
 	tty_port_tty_set(port, tty);
 
-	if (port->count++ == 0) {
+	if (atomic_inc_return(&port->count) == 1) {
 		atomic_inc(&rp_num_ports_open);
 
 #ifdef ROCKET_DEBUG_OPEN
@@ -923,7 +923,7 @@ static int rp_open(struct tty_struct *tt
 #endif
 	}
 #ifdef ROCKET_DEBUG_OPEN
-	printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count);
+	printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, atomic-read(&info->port.count));
 #endif
 
 	/*
@@ -1515,7 +1515,7 @@ static void rp_hangup(struct tty_struct
 		spin_unlock_irqrestore(&info->port.lock, flags);
 		return;
 	}
-	if (info->port.count)
+	if (atomic_read(&info->port.count))
 		atomic_dec(&rp_num_ports_open);
 	clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
 	spin_unlock_irqrestore(&info->port.lock, flags);
diff -ruNp linux-3.13.11/drivers/tty/serial/ioc4_serial.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/tty/serial/ioc4_serial.c
--- linux-3.13.11/drivers/tty/serial/ioc4_serial.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/tty/serial/ioc4_serial.c	2014-07-09
12:00:15.000000000 +0200
@@ -437,7 +437,7 @@ struct ioc4_soft {
 		} is_intr_info[MAX_IOC4_INTR_ENTS];
 
 		/* Number of entries active in the above array */
-		atomic_t is_num_intrs;
+		atomic_unchecked_t is_num_intrs;
 	} is_intr_type[IOC4_NUM_INTR_TYPES];
 
 	/* is_ir_lock must be held while
@@ -974,7 +974,7 @@ intr_connect(struct ioc4_soft *soft, int
 	BUG_ON(!((type == IOC4_SIO_INTR_TYPE)
 	       || (type == IOC4_OTHER_INTR_TYPE)));
 
-	i = atomic_inc_return(&soft-> is_intr_type[type].is_num_intrs) - 1;
+	i = atomic_inc_return_unchecked(&soft-> is_intr_type[type].is_num_intrs) - 1;
 	BUG_ON(!(i < MAX_IOC4_INTR_ENTS || (printk("i %d\n", i), 0)));
 
 	/* Save off the lower level interrupt handler */
@@ -1001,7 +1001,7 @@ static irqreturn_t ioc4_intr(int irq, vo
 
 	soft = arg;
 	for (intr_type = 0; intr_type < IOC4_NUM_INTR_TYPES; intr_type++) {
-		num_intrs = (int)atomic_read(
+		num_intrs = (int)atomic_read_unchecked(
 				&soft->is_intr_type[intr_type].is_num_intrs);
 
 		this_mir = this_ir = pending_intrs(soft, intr_type);
diff -ruNp linux-3.13.11/drivers/tty/serial/kgdboc.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/tty/serial/kgdboc.c
--- linux-3.13.11/drivers/tty/serial/kgdboc.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/tty/serial/kgdboc.c	2014-07-09
12:00:15.000000000 +0200
@@ -24,8 +24,9 @@
 #define MAX_CONFIG_LEN		40
 
 static struct kgdb_io		kgdboc_io_ops;
+static struct kgdb_io		kgdboc_io_ops_console;
 
-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
 static int configured		= -1;
 
 static char config[MAX_CONFIG_LEN];
@@ -151,6 +152,8 @@ static void cleanup_kgdboc(void)
 	kgdboc_unregister_kbd();
 	if (configured == 1)
 		kgdb_unregister_io_module(&kgdboc_io_ops);
+	else if (configured == 2)
+		kgdb_unregister_io_module(&kgdboc_io_ops_console);
 }
 
 static int configure_kgdboc(void)
@@ -160,13 +163,13 @@ static int configure_kgdboc(void)
 	int err;
 	char *cptr = config;
 	struct console *cons;
+	int is_console = 0;
 
 	err = kgdboc_option_setup(config);
 	if (err || !strlen(config) || isspace(config[0]))
 		goto noconfig;
 
 	err = -ENODEV;
-	kgdboc_io_ops.is_console = 0;
 	kgdb_tty_driver = NULL;
 
 	kgdboc_use_kms = 0;
@@ -187,7 +190,7 @@ static int configure_kgdboc(void)
 		int idx;
 		if (cons->device && cons->device(cons, &idx) == p &&
 		    idx == tty_line) {
-			kgdboc_io_ops.is_console = 1;
+			is_console = 1;
 			break;
 		}
 		cons = cons->next;
@@ -197,7 +200,13 @@ static int configure_kgdboc(void)
 	kgdb_tty_line = tty_line;
 
 do_register:
-	err = kgdb_register_io_module(&kgdboc_io_ops);
+	if (is_console) {
+		err = kgdb_register_io_module(&kgdboc_io_ops_console);
+		configured = 2;
+	} else {
+		err = kgdb_register_io_module(&kgdboc_io_ops);
+		configured = 1;
+	}
 	if (err)
 		goto noconfig;
 
@@ -205,8 +214,6 @@ do_register:
 	if (err)
 		goto nmi_con_failed;
 
-	configured = 1;
-
 	return 0;
 
 nmi_con_failed:
@@ -223,7 +230,7 @@ noconfig:
 static int __init init_kgdboc(void)
 {
 	/* Already configured? */
-	if (configured == 1)
+	if (configured >= 1)
 		return 0;
 
 	return configure_kgdboc();
@@ -272,7 +279,7 @@ static int param_set_kgdboc_var(const ch
 	if (config[len - 1] == '\n')
 		config[len - 1] = '\0';
 
-	if (configured == 1)
+	if (configured >= 1)
 		cleanup_kgdboc();
 
 	/* Go and configure with the new params. */
@@ -312,6 +319,15 @@ static struct kgdb_io kgdboc_io_ops = {
 	.post_exception		= kgdboc_post_exp_handler,
 };
 
+static struct kgdb_io kgdboc_io_ops_console = {
+	.name			= "kgdboc",
+	.read_char		= kgdboc_get_char,
+	.write_char		= kgdboc_put_char,
+	.pre_exception		= kgdboc_pre_exp_handler,
+	.post_exception		= kgdboc_post_exp_handler,
+	.is_console		= 1
+};
+
 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
 /* This is only available if kgdboc is a built in for early debugging */
 static int __init kgdboc_early_init(char *opt)
diff -ruNp linux-3.13.11/drivers/tty/serial/msm_serial.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/tty/serial/msm_serial.c
--- linux-3.13.11/drivers/tty/serial/msm_serial.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/tty/serial/msm_serial.c	2014-07-09
12:00:15.000000000 +0200
@@ -897,7 +897,7 @@ static struct uart_driver msm_uart_drive
 	.cons = MSM_CONSOLE,
 };
 
-static atomic_t msm_uart_next_id = ATOMIC_INIT(0);
+static atomic_unchecked_t msm_uart_next_id = ATOMIC_INIT(0);
 
 static const struct of_device_id msm_uartdm_table[] = {
 	{ .compatible = "qcom,msm-uartdm" },
@@ -912,7 +912,7 @@ static int __init msm_serial_probe(struc
 	int irq;
 
 	if (pdev->id == -1)
-		pdev->id = atomic_inc_return(&msm_uart_next_id) - 1;
+		pdev->id = atomic_inc_return_unchecked(&msm_uart_next_id) - 1;
 
 	if (unlikely(pdev->id < 0 || pdev->id >= UART_NR))
 		return -ENXIO;
diff -ruNp linux-3.13.11/drivers/tty/serial/samsung.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/tty/serial/samsung.c
--- linux-3.13.11/drivers/tty/serial/samsung.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/tty/serial/samsung.c	2014-07-09
12:00:15.000000000 +0200
@@ -463,11 +463,16 @@ static void s3c24xx_serial_shutdown(stru
 	}
 }
 
+static int s3c64xx_serial_startup(struct uart_port *port);
 static int s3c24xx_serial_startup(struct uart_port *port)
 {
 	struct s3c24xx_uart_port *ourport = to_ourport(port);
 	int ret;
 
+	/* Startup sequence is different for s3c64xx and higher SoC's */
+	if (s3c24xx_serial_has_interrupt_mask(port))
+		return s3c64xx_serial_startup(port);
+
 	dbg("s3c24xx_serial_startup: port=%p (%08lx,%p)\n",
 	    port->mapbase, port->membase);
 
@@ -1141,10 +1146,6 @@ static int s3c24xx_serial_init_port(stru
 	/* setup info for port */
 	port->dev	= &platdev->dev;
 
-	/* Startup sequence is different for s3c64xx and higher SoC's */
-	if (s3c24xx_serial_has_interrupt_mask(port))
-		s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
-
 	port->uartclk = 1;
 
 	if (cfg->uart_flags & UPF_CONS_FLOW) {
diff -ruNp linux-3.13.11/drivers/tty/serial/serial_core.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/tty/serial/serial_core.c
--- linux-3.13.11/drivers/tty/serial/serial_core.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/tty/serial/serial_core.c	2014-07-09
12:00:15.000000000 +0200
@@ -1448,7 +1448,7 @@ static void uart_hangup(struct tty_struc
 		uart_flush_buffer(tty);
 		uart_shutdown(tty, state);
 		spin_lock_irqsave(&port->lock, flags);
-		port->count = 0;
+		atomic_set(&port->count, 0);
 		clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
 		spin_unlock_irqrestore(&port->lock, flags);
 		tty_port_tty_set(port, NULL);
@@ -1544,7 +1544,7 @@ static int uart_open(struct tty_struct *
 		goto end;
 	}
 
-	port->count++;
+	atomic_inc(&port->count);
 	if (!state->uart_port || state->uart_port->flags & UPF_DEAD) {
 		retval = -ENXIO;
 		goto err_dec_count;
@@ -1572,7 +1572,7 @@ static int uart_open(struct tty_struct *
 	/*
 	 * Make sure the device is in D0 state.
 	 */
-	if (port->count == 1)
+	if (atomic_read(&port->count) == 1)
 		uart_change_pm(state, UART_PM_STATE_ON);
 
 	/*
@@ -1590,7 +1590,7 @@ static int uart_open(struct tty_struct *
 end:
 	return retval;
 err_dec_count:
-	port->count--;
+	atomic_inc(&port->count);
 	mutex_unlock(&port->mutex);
 	goto end;
 }
diff -ruNp linux-3.13.11/drivers/tty/synclink.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/tty/synclink.c
--- linux-3.13.11/drivers/tty/synclink.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/tty/synclink.c	2014-07-09
12:00:15.000000000 +0200
@@ -3090,7 +3090,7 @@ static void mgsl_close(struct tty_struct
 	
 	if (debug_level >= DEBUG_LEVEL_INFO)
 		printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
-			 __FILE__,__LINE__, info->device_name, info->port.count);
+			 __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
 
 	if (tty_port_close_start(&info->port, tty, filp) == 0)
 		goto cleanup;
@@ -3108,7 +3108,7 @@ static void mgsl_close(struct tty_struct
 cleanup:			
 	if (debug_level >= DEBUG_LEVEL_INFO)
 		printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
-			tty->driver->name, info->port.count);
+			tty->driver->name, atomic_read(&info->port.count));
 			
 }	/* end of mgsl_close() */
 
@@ -3207,8 +3207,8 @@ static void mgsl_hangup(struct tty_struc
 
 	mgsl_flush_buffer(tty);
 	shutdown(info);
-	
-	info->port.count = 0;	
+
+	atomic_set(&info->port.count, 0);
 	info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
 	info->port.tty = NULL;
 
@@ -3297,12 +3297,12 @@ static int block_til_ready(struct tty_st
 	
 	if (debug_level >= DEBUG_LEVEL_INFO)
 		printk("%s(%d):block_til_ready before block on %s count=%d\n",
-			 __FILE__,__LINE__, tty->driver->name, port->count );
+			 __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
 
 	spin_lock_irqsave(&info->irq_spinlock, flags);
 	if (!tty_hung_up_p(filp)) {
 		extra_count = true;
-		port->count--;
+		atomic_dec(&port->count);
 	}
 	spin_unlock_irqrestore(&info->irq_spinlock, flags);
 	port->blocked_open++;
@@ -3331,7 +3331,7 @@ static int block_til_ready(struct tty_st
 		
 		if (debug_level >= DEBUG_LEVEL_INFO)
 			printk("%s(%d):block_til_ready blocking on %s count=%d\n",
-				 __FILE__,__LINE__, tty->driver->name, port->count );
+				 __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
 				 
 		tty_unlock(tty);
 		schedule();
@@ -3343,12 +3343,12 @@ static int block_til_ready(struct tty_st
 	
 	/* FIXME: Racy on hangup during close wait */
 	if (extra_count)
-		port->count++;
+		atomic_inc(&port->count);
 	port->blocked_open--;
 	
 	if (debug_level >= DEBUG_LEVEL_INFO)
 		printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
-			 __FILE__,__LINE__, tty->driver->name, port->count );
+			 __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
 			 
 	if (!retval)
 		port->flags |= ASYNC_NORMAL_ACTIVE;
@@ -3400,7 +3400,7 @@ static int mgsl_open(struct tty_struct *
 		
 	if (debug_level >= DEBUG_LEVEL_INFO)
 		printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
-			 __FILE__,__LINE__,tty->driver->name, info->port.count);
+			 __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
 
 	/* If port is closing, signal caller to try again */
 	if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
@@ -3419,10 +3419,10 @@ static int mgsl_open(struct tty_struct *
 		spin_unlock_irqrestore(&info->netlock, flags);
 		goto cleanup;
 	}
-	info->port.count++;
+	atomic_inc(&info->port.count);
 	spin_unlock_irqrestore(&info->netlock, flags);
 
-	if (info->port.count == 1) {
+	if (atomic_read(&info->port.count) == 1) {
 		/* 1st open on this device, init hardware */
 		retval = startup(info);
 		if (retval < 0)
@@ -3446,8 +3446,8 @@ cleanup:
 	if (retval) {
 		if (tty->count == 1)
 			info->port.tty = NULL; /* tty layer will release tty struct */
-		if(info->port.count)
-			info->port.count--;
+		if (atomic_read(&info->port.count))
+			atomic_dec(&info->port.count);
 	}
 	
 	return retval;
@@ -7665,7 +7665,7 @@ static int hdlcdev_attach(struct net_dev
 	unsigned short new_crctype;
 
 	/* return error if TTY interface open */
-	if (info->port.count)
+	if (atomic_read(&info->port.count))
 		return -EBUSY;
 
 	switch (encoding)
@@ -7760,7 +7760,7 @@ static int hdlcdev_open(struct net_devic
 
 	/* arbitrate between network and tty opens */
 	spin_lock_irqsave(&info->netlock, flags);
-	if (info->port.count != 0 || info->netcount != 0) {
+	if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
 		printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
 		spin_unlock_irqrestore(&info->netlock, flags);
 		return -EBUSY;
@@ -7846,7 +7846,7 @@ static int hdlcdev_ioctl(struct net_devi
 		printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
 
 	/* return error if TTY interface open */
-	if (info->port.count)
+	if (atomic_read(&info->port.count))
 		return -EBUSY;
 
 	if (cmd != SIOCWANDEV)
diff -ruNp linux-3.13.11/drivers/tty/synclink_gt.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/tty/synclink_gt.c
--- linux-3.13.11/drivers/tty/synclink_gt.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/tty/synclink_gt.c	2014-07-09
12:00:15.000000000 +0200
@@ -670,7 +670,7 @@ static int open(struct tty_struct *tty,
 	tty->driver_data = info;
 	info->port.tty = tty;
 
-	DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
+	DBGINFO(("%s open, old ref count = %d\n", info->device_name, atomic_read(&info->port.count)));
 
 	/* If port is closing, signal caller to try again */
 	if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
@@ -691,10 +691,10 @@ static int open(struct tty_struct *tty,
 		mutex_unlock(&info->port.mutex);
 		goto cleanup;
 	}
-	info->port.count++;
+	atomic_inc(&info->port.count);
 	spin_unlock_irqrestore(&info->netlock, flags);
 
-	if (info->port.count == 1) {
+	if (atomic_read(&info->port.count) == 1) {
 		/* 1st open on this device, init hardware */
 		retval = startup(info);
 		if (retval < 0) {
@@ -715,8 +715,8 @@ cleanup:
 	if (retval) {
 		if (tty->count == 1)
 			info->port.tty = NULL; /* tty layer will release tty struct */
-		if(info->port.count)
-			info->port.count--;
+		if(atomic_read(&info->port.count))
+			atomic_dec(&info->port.count);
 	}
 
 	DBGINFO(("%s open rc=%d\n", info->device_name, retval));
@@ -729,7 +729,7 @@ static void close(struct tty_struct *tty
 
 	if (sanity_check(info, tty->name, "close"))
 		return;
-	DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
+	DBGINFO(("%s close entry, count=%d\n", info->device_name, atomic_read(&info->port.count)));
 
 	if (tty_port_close_start(&info->port, tty, filp) == 0)
 		goto cleanup;
@@ -746,7 +746,7 @@ static void close(struct tty_struct *tty
 	tty_port_close_end(&info->port, tty);
 	info->port.tty = NULL;
 cleanup:
-	DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
+	DBGINFO(("%s close exit, count=%d\n", tty->driver->name, atomic_read(&info->port.count)));
 }
 
 static void hangup(struct tty_struct *tty)
@@ -764,7 +764,7 @@ static void hangup(struct tty_struct *tt
 	shutdown(info);
 
 	spin_lock_irqsave(&info->port.lock, flags);
-	info->port.count = 0;
+	atomic_set(&info->port.count, 0);
 	info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
 	info->port.tty = NULL;
 	spin_unlock_irqrestore(&info->port.lock, flags);
@@ -1449,7 +1449,7 @@ static int hdlcdev_attach(struct net_dev
 	unsigned short new_crctype;
 
 	/* return error if TTY interface open */
-	if (info->port.count)
+	if (atomic_read(&info->port.count))
 		return -EBUSY;
 
 	DBGINFO(("%s hdlcdev_attach\n", info->device_name));
@@ -1544,7 +1544,7 @@ static int hdlcdev_open(struct net_devic
 
 	/* arbitrate between network and tty opens */
 	spin_lock_irqsave(&info->netlock, flags);
-	if (info->port.count != 0 || info->netcount != 0) {
+	if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
 		DBGINFO(("%s hdlc_open busy\n", dev->name));
 		spin_unlock_irqrestore(&info->netlock, flags);
 		return -EBUSY;
@@ -1629,7 +1629,7 @@ static int hdlcdev_ioctl(struct net_devi
 	DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
 
 	/* return error if TTY interface open */
-	if (info->port.count)
+	if (atomic_read(&info->port.count))
 		return -EBUSY;
 
 	if (cmd != SIOCWANDEV)
@@ -2413,7 +2413,7 @@ static irqreturn_t slgt_interrupt(int du
 		if (port == NULL)
 			continue;
 		spin_lock(&port->lock);
-		if ((port->port.count || port->netcount) &&
+		if ((atomic_read(&port->port.count) || port->netcount) &&
 		    port->pending_bh && !port->bh_running &&
 		    !port->bh_requested) {
 			DBGISR(("%s bh queued\n", port->device_name));
@@ -3302,7 +3302,7 @@ static int block_til_ready(struct tty_st
 	spin_lock_irqsave(&info->lock, flags);
 	if (!tty_hung_up_p(filp)) {
 		extra_count = true;
-		port->count--;
+		atomic_dec(&port->count);
 	}
 	spin_unlock_irqrestore(&info->lock, flags);
 	port->blocked_open++;
@@ -3339,7 +3339,7 @@ static int block_til_ready(struct tty_st
 	remove_wait_queue(&port->open_wait, &wait);
 
 	if (extra_count)
-		port->count++;
+		atomic_inc(&port->count);
 	port->blocked_open--;
 
 	if (!retval)
diff -ruNp linux-3.13.11/drivers/tty/synclinkmp.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/tty/synclinkmp.c
--- linux-3.13.11/drivers/tty/synclinkmp.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/tty/synclinkmp.c	2014-07-09
12:00:15.000000000 +0200
@@ -750,7 +750,7 @@ static int open(struct tty_struct *tty,
 
 	if (debug_level >= DEBUG_LEVEL_INFO)
 		printk("%s(%d):%s open(), old ref count = %d\n",
-			 __FILE__,__LINE__,tty->driver->name, info->port.count);
+			 __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
 
 	/* If port is closing, signal caller to try again */
 	if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
@@ -769,10 +769,10 @@ static int open(struct tty_struct *tty,
 		spin_unlock_irqrestore(&info->netlock, flags);
 		goto cleanup;
 	}
-	info->port.count++;
+	atomic_inc(&info->port.count);
 	spin_unlock_irqrestore(&info->netlock, flags);
 
-	if (info->port.count == 1) {
+	if (atomic_read(&info->port.count) == 1) {
 		/* 1st open on this device, init hardware */
 		retval = startup(info);
 		if (retval < 0)
@@ -796,8 +796,8 @@ cleanup:
 	if (retval) {
 		if (tty->count == 1)
 			info->port.tty = NULL; /* tty layer will release tty struct */
-		if(info->port.count)
-			info->port.count--;
+		if(atomic_read(&info->port.count))
+			atomic_dec(&info->port.count);
 	}
 
 	return retval;
@@ -815,7 +815,7 @@ static void close(struct tty_struct *tty
 
 	if (debug_level >= DEBUG_LEVEL_INFO)
 		printk("%s(%d):%s close() entry, count=%d\n",
-			 __FILE__,__LINE__, info->device_name, info->port.count);
+			 __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
 
 	if (tty_port_close_start(&info->port, tty, filp) == 0)
 		goto cleanup;
@@ -834,7 +834,7 @@ static void close(struct tty_struct *tty
 cleanup:
 	if (debug_level >= DEBUG_LEVEL_INFO)
 		printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__,
-			tty->driver->name, info->port.count);
+			tty->driver->name, atomic_read(&info->port.count));
 }
 
 /* Called by tty_hangup() when a hangup is signaled.
@@ -857,7 +857,7 @@ static void hangup(struct tty_struct *tt
 	shutdown(info);
 
 	spin_lock_irqsave(&info->port.lock, flags);
-	info->port.count = 0;
+	atomic_set(&info->port.count, 0);
 	info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
 	info->port.tty = NULL;
 	spin_unlock_irqrestore(&info->port.lock, flags);
@@ -1565,7 +1565,7 @@ static int hdlcdev_attach(struct net_dev
 	unsigned short new_crctype;
 
 	/* return error if TTY interface open */
-	if (info->port.count)
+	if (atomic_read(&info->port.count))
 		return -EBUSY;
 
 	switch (encoding)
@@ -1660,7 +1660,7 @@ static int hdlcdev_open(struct net_devic
 
 	/* arbitrate between network and tty opens */
 	spin_lock_irqsave(&info->netlock, flags);
-	if (info->port.count != 0 || info->netcount != 0) {
+	if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
 		printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
 		spin_unlock_irqrestore(&info->netlock, flags);
 		return -EBUSY;
@@ -1746,7 +1746,7 @@ static int hdlcdev_ioctl(struct net_devi
 		printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
 
 	/* return error if TTY interface open */
-	if (info->port.count)
+	if (atomic_read(&info->port.count))
 		return -EBUSY;
 
 	if (cmd != SIOCWANDEV)
@@ -2620,7 +2620,7 @@ static irqreturn_t synclinkmp_interrupt(
 		 * do not request bottom half processing if the
 		 * device is not open in a normal mode.
 		 */
-		if ( port && (port->port.count || port->netcount) &&
+		if ( port && (atomic_read(&port->port.count) || port->netcount) &&
 		     port->pending_bh && !port->bh_running &&
 		     !port->bh_requested ) {
 			if ( debug_level >= DEBUG_LEVEL_ISR )
@@ -3318,12 +3318,12 @@ static int block_til_ready(struct tty_st
 
 	if (debug_level >= DEBUG_LEVEL_INFO)
 		printk("%s(%d):%s block_til_ready() before block, count=%d\n",
-			 __FILE__,__LINE__, tty->driver->name, port->count );
+			 __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
 
 	spin_lock_irqsave(&info->lock, flags);
 	if (!tty_hung_up_p(filp)) {
 		extra_count = true;
-		port->count--;
+		atomic_dec(&port->count);
 	}
 	spin_unlock_irqrestore(&info->lock, flags);
 	port->blocked_open++;
@@ -3352,7 +3352,7 @@ static int block_til_ready(struct tty_st
 
 		if (debug_level >= DEBUG_LEVEL_INFO)
 			printk("%s(%d):%s block_til_ready() count=%d\n",
-				 __FILE__,__LINE__, tty->driver->name, port->count );
+				 __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
 
 		tty_unlock(tty);
 		schedule();
@@ -3363,12 +3363,12 @@ static int block_til_ready(struct tty_st
 	remove_wait_queue(&port->open_wait, &wait);
 
 	if (extra_count)
-		port->count++;
+		atomic_inc(&port->count);
 	port->blocked_open--;
 
 	if (debug_level >= DEBUG_LEVEL_INFO)
 		printk("%s(%d):%s block_til_ready() after, count=%d\n",
-			 __FILE__,__LINE__, tty->driver->name, port->count );
+			 __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
 
 	if (!retval)
 		port->flags |= ASYNC_NORMAL_ACTIVE;
diff -ruNp linux-3.13.11/drivers/tty/sysrq.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/tty/sysrq.c
--- linux-3.13.11/drivers/tty/sysrq.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/tty/sysrq.c	2014-07-09 12:00:15.000000000
+0200
@@ -46,6 +46,7 @@
 #include <linux/jiffies.h>
 #include <linux/syscalls.h>
 #include <linux/of.h>
+#include <linux/vserver/debug.h>
 
 #include <asm/ptrace.h>
 #include <asm/irq_regs.h>
@@ -407,6 +408,21 @@ static struct sysrq_key_op sysrq_unrt_op
 	.enable_mask	= SYSRQ_ENABLE_RTNICE,
 };
 
+
+#ifdef CONFIG_VSERVER_DEBUG
+static void sysrq_handle_vxinfo(int key)
+{
+	dump_vx_info_inactive((key == 'x') ? 0 : 1);
+}
+
+static struct sysrq_key_op sysrq_showvxinfo_op = {
+	.handler	= sysrq_handle_vxinfo,
+	.help_msg	= "conteXt",
+	.action_msg	= "Show Context Info",
+	.enable_mask	= SYSRQ_ENABLE_DUMP,
+};
+#endif
+
 /* Key Operations table and lock */
 static DEFINE_SPINLOCK(sysrq_key_table_lock);
 
@@ -462,7 +478,11 @@ static struct sysrq_key_op *sysrq_key_ta
 	&sysrq_showstate_blocked_op,	/* w */
 	/* x: May be registered on ppc/powerpc for xmon */
 	/* x: May be registered on sparc64 for global PMU dump */
+#ifdef CONFIG_VSERVER_DEBUG
+	&sysrq_showvxinfo_op,		/* x */
+#else
 	NULL,				/* x */
+#endif
 	/* y: May be registered on sparc64 for global register dump */
 	NULL,				/* y */
 	&sysrq_ftrace_dump_op,		/* z */
@@ -477,6 +497,8 @@ static int sysrq_key_table_key2index(int
 		retval = key - '0';
 	else if ((key >= 'a') && (key <= 'z'))
 		retval = key + 10 - 'a';
+	else if ((key >= 'A') && (key <= 'Z'))
+		retval = key + 10 - 'A';
 	else
 		retval = -1;
 	return retval;
@@ -1075,7 +1097,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
 				   size_t count, loff_t *ppos)
 {
-	if (count) {
+	if (count && capable(CAP_SYS_ADMIN)) {
 		char c;
 
 		if (get_user(c, buf))
diff -ruNp linux-3.13.11/drivers/tty/tty_io.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/tty/tty_io.c
--- linux-3.13.11/drivers/tty/tty_io.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/tty/tty_io.c	2014-07-09 12:00:15.000000000
+0200
@@ -104,6 +104,7 @@
 
 #include <linux/kmod.h>
 #include <linux/nsproxy.h>
+#include <linux/vs_pid.h>
 
 #undef TTY_DEBUG_HANGUP
 
@@ -2219,7 +2220,8 @@ static int tiocsti(struct tty_struct *tt
 	char ch, mbz = 0;
 	struct tty_ldisc *ld;
 
-	if ((current->signal->tty != tty) && !capable(CAP_SYS_ADMIN))
+	if (((current->signal->tty != tty) &&
+		!vx_capable(CAP_SYS_ADMIN, VXC_TIOCSTI)))
 		return -EPERM;
 	if (get_user(ch, p))
 		return -EFAULT;
@@ -2507,6 +2509,7 @@ static int tiocspgrp(struct tty_struct *
 		return -ENOTTY;
 	if (get_user(pgrp_nr, p))
 		return -EFAULT;
+	pgrp_nr = vx_rmap_pid(pgrp_nr);
 	if (pgrp_nr < 0)
 		return -EINVAL;
 	rcu_read_lock();
@@ -3475,7 +3478,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
 
 void tty_default_fops(struct file_operations *fops)
 {
-	*fops = tty_fops;
+	memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
 }
 
 /*
diff -ruNp linux-3.13.11/drivers/tty/tty_ldisc.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/tty/tty_ldisc.c
--- linux-3.13.11/drivers/tty/tty_ldisc.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/tty/tty_ldisc.c	2014-07-09
12:00:15.000000000 +0200
@@ -72,7 +72,7 @@ int tty_register_ldisc(int disc, struct
 	raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
 	tty_ldiscs[disc] = new_ldisc;
 	new_ldisc->num = disc;
-	new_ldisc->refcount = 0;
+	atomic_set(&new_ldisc->refcount, 0);
 	raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
 
 	return ret;
@@ -100,7 +100,7 @@ int tty_unregister_ldisc(int disc)
 		return -EINVAL;
 
 	raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
-	if (tty_ldiscs[disc]->refcount)
+	if (atomic_read(&tty_ldiscs[disc]->refcount))
 		ret = -EBUSY;
 	else
 		tty_ldiscs[disc] = NULL;
@@ -121,7 +121,7 @@ static struct tty_ldisc_ops *get_ldops(i
 	if (ldops) {
 		ret = ERR_PTR(-EAGAIN);
 		if (try_module_get(ldops->owner)) {
-			ldops->refcount++;
+			atomic_inc(&ldops->refcount);
 			ret = ldops;
 		}
 	}
@@ -134,7 +134,7 @@ static void put_ldops(struct tty_ldisc_o
 	unsigned long flags;
 
 	raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
-	ldops->refcount--;
+	atomic_dec(&ldops->refcount);
 	module_put(ldops->owner);
 	raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
 }
diff -ruNp linux-3.13.11/drivers/tty/tty_port.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/tty/tty_port.c
--- linux-3.13.11/drivers/tty/tty_port.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/tty/tty_port.c	2014-07-09
12:00:15.000000000 +0200
@@ -236,7 +236,7 @@ void tty_port_hangup(struct tty_port *po
 	unsigned long flags;
 
 	spin_lock_irqsave(&port->lock, flags);
-	port->count = 0;
+	atomic_set(&port->count, 0);
 	port->flags &= ~ASYNC_NORMAL_ACTIVE;
 	tty = port->tty;
 	if (tty)
@@ -394,7 +394,7 @@ int tty_port_block_til_ready(struct tty_
 	/* The port lock protects the port counts */
 	spin_lock_irqsave(&port->lock, flags);
 	if (!tty_hung_up_p(filp))
-		port->count--;
+		atomic_dec(&port->count);
 	port->blocked_open++;
 	spin_unlock_irqrestore(&port->lock, flags);
 
@@ -436,7 +436,7 @@ int tty_port_block_til_ready(struct tty_
 	   we must not mess that up further */
 	spin_lock_irqsave(&port->lock, flags);
 	if (!tty_hung_up_p(filp))
-		port->count++;
+		atomic_inc(&port->count);
 	port->blocked_open--;
 	if (retval == 0)
 		port->flags |= ASYNC_NORMAL_ACTIVE;
@@ -470,19 +470,19 @@ int tty_port_close_start(struct tty_port
 		return 0;
 	}
 
-	if (tty->count == 1 && port->count != 1) {
+	if (tty->count == 1 && atomic_read(&port->count) != 1) {
 		printk(KERN_WARNING
 		    "tty_port_close_start: tty->count = 1 port count = %d.\n",
-								port->count);
-		port->count = 1;
+								atomic_read(&port->count));
+		atomic_set(&port->count, 1);
 	}
-	if (--port->count < 0) {
+	if (atomic_dec_return(&port->count) < 0) {
 		printk(KERN_WARNING "tty_port_close_start: count = %d\n",
-								port->count);
-		port->count = 0;
+								atomic_read(&port->count));
+		atomic_set(&port->count, 0);
 	}
 
-	if (port->count) {
+	if (atomic_read(&port->count)) {
 		spin_unlock_irqrestore(&port->lock, flags);
 		return 0;
 	}
@@ -564,7 +564,7 @@ int tty_port_open(struct tty_port *port,
 {
 	spin_lock_irq(&port->lock);
 	if (!tty_hung_up_p(filp))
-		++port->count;
+		atomic_inc(&port->count);
 	spin_unlock_irq(&port->lock);
 	tty_port_tty_set(port, tty);
 
diff -ruNp linux-3.13.11/drivers/tty/vt/keyboard.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/tty/vt/keyboard.c
--- linux-3.13.11/drivers/tty/vt/keyboard.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/tty/vt/keyboard.c	2014-07-09
12:00:15.000000000 +0200
@@ -641,6 +641,16 @@ static void k_spec(struct vc_data *vc, u
 	     kbd->kbdmode == VC_OFF) &&
 	     value != KVAL(K_SAK))
 		return;		/* SAK is allowed even in raw mode */
+
+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
+	{
+		void *func = fn_handler[value];
+		if (func == fn_show_state || func == fn_show_ptregs ||
+		    func == fn_show_mem)
+			return;
+	}
+#endif
+
 	fn_handler[value](vc);
 }
 
@@ -1776,9 +1786,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbe
 	if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
 		return -EFAULT;
 
-	if (!capable(CAP_SYS_TTY_CONFIG))
-		perm = 0;
-
 	switch (cmd) {
 	case KDGKBENT:
 		/* Ensure another thread doesn't free it under us */
@@ -1793,6 +1800,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbe
 		spin_unlock_irqrestore(&kbd_event_lock, flags);
 		return put_user(val, &user_kbe->kb_value);
 	case KDSKBENT:
+		if (!capable(CAP_SYS_TTY_CONFIG))
+			perm = 0;
+
 		if (!perm)
 			return -EPERM;
 		if (!i && v == K_NOSUCHMAP) {
@@ -1883,9 +1893,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kb
 	int i, j, k;
 	int ret;
 
-	if (!capable(CAP_SYS_TTY_CONFIG))
-		perm = 0;
-
 	kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
 	if (!kbs) {
 		ret = -ENOMEM;
@@ -1919,6 +1926,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kb
 		kfree(kbs);
 		return ((p && *p) ? -EOVERFLOW : 0);
 	case KDSKBSENT:
+		if (!capable(CAP_SYS_TTY_CONFIG))
+			perm = 0;
+
 		if (!perm) {
 			ret = -EPERM;
 			goto reterr;
diff -ruNp linux-3.13.11/drivers/uio/uio.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/uio/uio.c
--- linux-3.13.11/drivers/uio/uio.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/uio/uio.c	2014-07-09 12:00:15.000000000
+0200
@@ -25,6 +25,7 @@
 #include <linux/kobject.h>
 #include <linux/cdev.h>
 #include <linux/uio_driver.h>
+#include <asm/local.h>
 
 #define UIO_MAX_DEVICES		(1U << MINORBITS)
 
@@ -32,7 +33,7 @@ struct uio_device {
 	struct module		*owner;
 	struct device		*dev;
 	int			minor;
-	atomic_t		event;
+	atomic_unchecked_t	event;
 	struct fasync_struct	*async_queue;
 	wait_queue_head_t	wait;
 	struct uio_info		*info;
@@ -243,7 +244,7 @@ static ssize_t event_show(struct device
 			  struct device_attribute *attr, char *buf)
 {
 	struct uio_device *idev = dev_get_drvdata(dev);
-	return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
+	return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
 }
 static DEVICE_ATTR_RO(event);
 
@@ -405,7 +406,7 @@ void uio_event_notify(struct uio_info *i
 {
 	struct uio_device *idev = info->uio_dev;
 
-	atomic_inc(&idev->event);
+	atomic_inc_unchecked(&idev->event);
 	wake_up_interruptible(&idev->wait);
 	kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
 }
@@ -458,7 +459,7 @@ static int uio_open(struct inode *inode,
 	}
 
 	listener->dev = idev;
-	listener->event_count = atomic_read(&idev->event);
+	listener->event_count = atomic_read_unchecked(&idev->event);
 	filep->private_data = listener;
 
 	if (idev->info->open) {
@@ -509,7 +510,7 @@ static unsigned int uio_poll(struct file
 		return -EIO;
 
 	poll_wait(filep, &idev->wait, wait);
-	if (listener->event_count != atomic_read(&idev->event))
+	if (listener->event_count != atomic_read_unchecked(&idev->event))
 		return POLLIN | POLLRDNORM;
 	return 0;
 }
@@ -534,7 +535,7 @@ static ssize_t uio_read(struct file *fil
 	do {
 		set_current_state(TASK_INTERRUPTIBLE);
 
-		event_count = atomic_read(&idev->event);
+		event_count = atomic_read_unchecked(&idev->event);
 		if (event_count != listener->event_count) {
 			if (copy_to_user(buf, &event_count, count))
 				retval = -EFAULT;
@@ -591,9 +592,13 @@ static ssize_t uio_write(struct file *fi
 static int uio_find_mem_index(struct vm_area_struct *vma)
 {
 	struct uio_device *idev = vma->vm_private_data;
+	unsigned long size;
 
 	if (vma->vm_pgoff < MAX_UIO_MAPS) {
-		if (idev->info->mem[vma->vm_pgoff].size == 0)
+		size = idev->info->mem[vma->vm_pgoff].size;
+		if (size == 0)
+			return -1;
+		if (vma->vm_end - vma->vm_start > size)
 			return -1;
 		return (int)vma->vm_pgoff;
 	}
@@ -825,7 +830,7 @@ int __uio_register_device(struct module
 	idev->owner = owner;
 	idev->info = info;
 	init_waitqueue_head(&idev->wait);
-	atomic_set(&idev->event, 0);
+	atomic_set_unchecked(&idev->event, 0);
 
 	ret = uio_get_minor(idev);
 	if (ret)
diff -ruNp linux-3.13.11/drivers/usb/atm/cxacru.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/usb/atm/cxacru.c
--- linux-3.13.11/drivers/usb/atm/cxacru.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/usb/atm/cxacru.c	2014-07-09
12:00:15.000000000 +0200
@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_c
 		ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
 		if (ret < 2)
 			return -EINVAL;
-		if (index < 0 || index > 0x7f)
+		if (index > 0x7f)
 			return -EINVAL;
 		pos += tmp;
 
diff -ruNp linux-3.13.11/drivers/usb/atm/usbatm.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/usb/atm/usbatm.c
--- linux-3.13.11/drivers/usb/atm/usbatm.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/usb/atm/usbatm.c	2014-07-09
12:00:15.000000000 +0200
@@ -331,7 +331,7 @@ static void usbatm_extract_one_cell(stru
 		if (printk_ratelimit())
 			atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
 				__func__, vpi, vci);
-		atomic_inc(&vcc->stats->rx_err);
+		atomic_inc_unchecked(&vcc->stats->rx_err);
 		return;
 	}
 
@@ -358,7 +358,7 @@ static void usbatm_extract_one_cell(stru
 		if (length > ATM_MAX_AAL5_PDU) {
 			atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
 				  __func__, length, vcc);
-			atomic_inc(&vcc->stats->rx_err);
+			atomic_inc_unchecked(&vcc->stats->rx_err);
 			goto out;
 		}
 
@@ -367,14 +367,14 @@ static void usbatm_extract_one_cell(stru
 		if (sarb->len < pdu_length) {
 			atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
 				  __func__, pdu_length, sarb->len, vcc);
-			atomic_inc(&vcc->stats->rx_err);
+			atomic_inc_unchecked(&vcc->stats->rx_err);
 			goto out;
 		}
 
 		if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b)
{
 			atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
 				  __func__, vcc);
-			atomic_inc(&vcc->stats->rx_err);
+			atomic_inc_unchecked(&vcc->stats->rx_err);
 			goto out;
 		}
 
@@ -386,7 +386,7 @@ static void usbatm_extract_one_cell(stru
 			if (printk_ratelimit())
 				atm_err(instance, "%s: no memory for skb (length: %u)!\n",
 					__func__, length);
-			atomic_inc(&vcc->stats->rx_drop);
+			atomic_inc_unchecked(&vcc->stats->rx_drop);
 			goto out;
 		}
 
@@ -414,7 +414,7 @@ static void usbatm_extract_one_cell(stru
 
 		vcc->push(vcc, skb);
 
-		atomic_inc(&vcc->stats->rx);
+		atomic_inc_unchecked(&vcc->stats->rx);
 	out:
 		skb_trim(sarb, 0);
 	}
@@ -612,7 +612,7 @@ static void usbatm_tx_process(unsigned l
 			struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
 
 			usbatm_pop(vcc, skb);
-			atomic_inc(&vcc->stats->tx);
+			atomic_inc_unchecked(&vcc->stats->tx);
 
 			skb = skb_dequeue(&instance->sndqueue);
 		}
@@ -756,11 +756,11 @@ static int usbatm_atm_proc_read(struct a
 	if (!left--)
 		return sprintf(page,
 			       "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
-			       atomic_read(&atm_dev->stats.aal5.tx),
-			       atomic_read(&atm_dev->stats.aal5.tx_err),
-			       atomic_read(&atm_dev->stats.aal5.rx),
-			       atomic_read(&atm_dev->stats.aal5.rx_err),
-			       atomic_read(&atm_dev->stats.aal5.rx_drop));
+			       atomic_read_unchecked(&atm_dev->stats.aal5.tx),
+			       atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
+			       atomic_read_unchecked(&atm_dev->stats.aal5.rx),
+			       atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
+			       atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
 
 	if (!left--) {
 		if (instance->disconnected)
diff -ruNp linux-3.13.11/drivers/usb/core/devices.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/usb/core/devices.c
--- linux-3.13.11/drivers/usb/core/devices.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/usb/core/devices.c	2014-07-09
12:00:15.000000000 +0200
@@ -126,7 +126,7 @@ static const char format_endpt[] =
  * time it gets called.
  */
 static struct device_connect_event {
-	atomic_t count;
+	atomic_unchecked_t count;
 	wait_queue_head_t wait;
 } device_event = {
 	.count = ATOMIC_INIT(1),
@@ -164,7 +164,7 @@ static const struct class_info clas_info
 
 void usbfs_conn_disc_event(void)
 {
-	atomic_add(2, &device_event.count);
+	atomic_add_unchecked(2, &device_event.count);
 	wake_up(&device_event.wait);
 }
 
@@ -652,7 +652,7 @@ static unsigned int usb_device_poll(stru
 
 	poll_wait(file, &device_event.wait, wait);
 
-	event_count = atomic_read(&device_event.count);
+	event_count = atomic_read_unchecked(&device_event.count);
 	if (file->f_version != event_count) {
 		file->f_version = event_count;
 		return POLLIN | POLLRDNORM;
diff -ruNp linux-3.13.11/drivers/usb/core/devio.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/usb/core/devio.c
--- linux-3.13.11/drivers/usb/core/devio.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/usb/core/devio.c	2014-07-09
12:00:15.000000000 +0200
@@ -187,7 +187,7 @@ static ssize_t usbdev_read(struct file *
 	struct dev_state *ps = file->private_data;
 	struct usb_device *dev = ps->dev;
 	ssize_t ret = 0;
-	unsigned len;
+	size_t len;
 	loff_t pos;
 	int i;
 
@@ -229,22 +229,22 @@ static ssize_t usbdev_read(struct file *
 	for (i = 0; nbytes && i < dev->descriptor.bNumConfigurations; i++) {
 		struct usb_config_descriptor *config =
 			(struct usb_config_descriptor *)dev->rawdescriptors[i];
-		unsigned int length = le16_to_cpu(config->wTotalLength);
+		size_t length = le16_to_cpu(config->wTotalLength);
 
 		if (*ppos < pos + length) {
 
 			/* The descriptor may claim to be longer than it
 			 * really is.  Here is the actual allocated length. */
-			unsigned alloclen =
+			size_t alloclen =
 				le16_to_cpu(dev->config[i].desc.wTotalLength);
 
-			len = length - (*ppos - pos);
+			len = length + pos - *ppos;
 			if (len > nbytes)
 				len = nbytes;
 
 			/* Simply don't write (skip over) unallocated parts */
 			if (alloclen > (*ppos - pos)) {
-				alloclen -= (*ppos - pos);
+				alloclen = alloclen + pos - *ppos;
 				if (copy_to_user(buf,
 				    dev->rawdescriptors[i] + (*ppos - pos),
 				    min(len, alloclen))) {
diff -ruNp linux-3.13.11/drivers/usb/core/hcd.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/usb/core/hcd.c
--- linux-3.13.11/drivers/usb/core/hcd.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/usb/core/hcd.c	2014-07-09
12:00:15.000000000 +0200
@@ -1549,7 +1549,7 @@ int usb_hcd_submit_urb (struct urb *urb,
 	 */
 	usb_get_urb(urb);
 	atomic_inc(&urb->use_count);
-	atomic_inc(&urb->dev->urbnum);
+	atomic_inc_unchecked(&urb->dev->urbnum);
 	usbmon_urb_submit(&hcd->self, urb);
 
 	/* NOTE requirements on root-hub callers (usbfs and the hub
@@ -1576,7 +1576,7 @@ int usb_hcd_submit_urb (struct urb *urb,
 		urb->hcpriv = NULL;
 		INIT_LIST_HEAD(&urb->urb_list);
 		atomic_dec(&urb->use_count);
-		atomic_dec(&urb->dev->urbnum);
+		atomic_dec_unchecked(&urb->dev->urbnum);
 		if (atomic_read(&urb->reject))
 			wake_up(&usb_kill_urb_queue);
 		usb_put_urb(urb);
diff -ruNp linux-3.13.11/drivers/usb/core/hub.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/usb/core/hub.c
--- linux-3.13.11/drivers/usb/core/hub.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/usb/core/hub.c	2014-07-09
12:00:15.000000000 +0200
@@ -27,6 +27,7 @@
 #include <linux/freezer.h>
 #include <linux/random.h>
 #include <linux/pm_qos.h>
+#include <linux/grsecurity.h>
 
 #include <asm/uaccess.h>
 #include <asm/byteorder.h>
@@ -4437,6 +4438,10 @@ static void hub_port_connect_change(stru
 			goto done;
 		return;
 	}
+
+	if (gr_handle_new_usb())
+		goto done;
+
 	if (hub_is_superspeed(hub->hdev))
 		unit_load = 150;
 	else
diff -ruNp linux-3.13.11/drivers/usb/core/message.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/usb/core/message.c
--- linux-3.13.11/drivers/usb/core/message.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/usb/core/message.c	2014-07-09
12:00:15.000000000 +0200
@@ -129,7 +129,7 @@ static int usb_internal_control_msg(stru
  * Return: If successful, the number of bytes transferred. Otherwise, a negative
  * error number.
  */
-int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
+int __intentional_overflow(-1) usb_control_msg(struct usb_device *dev, unsigned int
pipe, __u8 request,
 		    __u8 requesttype, __u16 value, __u16 index, void *data,
 		    __u16 size, int timeout)
 {
@@ -181,7 +181,7 @@ EXPORT_SYMBOL_GPL(usb_control_msg);
  * If successful, 0. Otherwise a negative error number. The number of actual
  * bytes transferred will be stored in the @actual_length paramater.
  */
-int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
+int __intentional_overflow(-1) usb_interrupt_msg(struct usb_device *usb_dev, unsigned
int pipe,
 		      void *data, int len, int *actual_length, int timeout)
 {
 	return usb_bulk_msg(usb_dev, pipe, data, len, actual_length, timeout);
@@ -221,7 +221,7 @@ EXPORT_SYMBOL_GPL(usb_interrupt_msg);
  * bytes transferred will be stored in the @actual_length paramater.
  *
  */
-int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
+int __intentional_overflow(-1) usb_bulk_msg(struct usb_device *usb_dev, unsigned int
pipe,
 		 void *data, int len, int *actual_length, int timeout)
 {
 	struct urb *urb;
diff -ruNp linux-3.13.11/drivers/usb/core/sysfs.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/usb/core/sysfs.c
--- linux-3.13.11/drivers/usb/core/sysfs.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/usb/core/sysfs.c	2014-07-09
12:00:15.000000000 +0200
@@ -244,7 +244,7 @@ static ssize_t urbnum_show(struct device
 	struct usb_device *udev;
 
 	udev = to_usb_device(dev);
-	return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
+	return sprintf(buf, "%d\n", atomic_read_unchecked(&udev->urbnum));
 }
 static DEVICE_ATTR_RO(urbnum);
 
diff -ruNp linux-3.13.11/drivers/usb/core/usb.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/usb/core/usb.c
--- linux-3.13.11/drivers/usb/core/usb.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/usb/core/usb.c	2014-07-09
12:00:15.000000000 +0200
@@ -433,7 +433,7 @@ struct usb_device *usb_alloc_dev(struct
 	set_dev_node(&dev->dev, dev_to_node(bus->controller));
 	dev->state = USB_STATE_ATTACHED;
 	dev->lpm_disable_count = 1;
-	atomic_set(&dev->urbnum, 0);
+	atomic_set_unchecked(&dev->urbnum, 0);
 
 	INIT_LIST_HEAD(&dev->ep0.urb_list);
 	dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
diff -ruNp linux-3.13.11/drivers/usb/dwc3/gadget.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/usb/dwc3/gadget.c
--- linux-3.13.11/drivers/usb/dwc3/gadget.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/usb/dwc3/gadget.c	2014-07-09
12:00:15.000000000 +0200
@@ -532,8 +532,6 @@ static int __dwc3_gadget_ep_enable(struc
 		if (!usb_endpoint_xfer_isoc(desc))
 			return 0;
 
-		memset(&trb_link, 0, sizeof(trb_link));
-
 		/* Link TRB for ISOC. The HWO bit is never reset */
 		trb_st_hw = &dep->trb_pool[0];
 
diff -ruNp linux-3.13.11/drivers/usb/early/ehci-dbgp.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/usb/early/ehci-dbgp.c
--- linux-3.13.11/drivers/usb/early/ehci-dbgp.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/usb/early/ehci-dbgp.c	2014-07-09
12:00:15.000000000 +0200
@@ -98,7 +98,8 @@ static inline u32 dbgp_len_update(u32 x,
 
 #ifdef CONFIG_KGDB
 static struct kgdb_io kgdbdbgp_io_ops;
-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
+static struct kgdb_io kgdbdbgp_io_ops_console;
+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
 #else
 #define dbgp_kgdb_mode (0)
 #endif
@@ -1043,6 +1044,13 @@ static struct kgdb_io kgdbdbgp_io_ops =
 	.write_char = kgdbdbgp_write_char,
 };
 
+static struct kgdb_io kgdbdbgp_io_ops_console = {
+	.name = "kgdbdbgp",
+	.read_char = kgdbdbgp_read_char,
+	.write_char = kgdbdbgp_write_char,
+	.is_console = 1
+};
+
 static int kgdbdbgp_wait_time;
 
 static int __init kgdbdbgp_parse_config(char *str)
@@ -1058,8 +1066,10 @@ static int __init kgdbdbgp_parse_config(
 		ptr++;
 		kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
 	}
-	kgdb_register_io_module(&kgdbdbgp_io_ops);
-	kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
+	if (early_dbgp_console.index != -1)
+		kgdb_register_io_module(&kgdbdbgp_io_ops_console);
+	else
+		kgdb_register_io_module(&kgdbdbgp_io_ops);
 
 	return 0;
 }
diff -ruNp linux-3.13.11/drivers/usb/gadget/u_serial.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/usb/gadget/u_serial.c
--- linux-3.13.11/drivers/usb/gadget/u_serial.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/usb/gadget/u_serial.c	2014-07-09
12:00:15.000000000 +0200
@@ -733,9 +733,9 @@ static int gs_open(struct tty_struct *tt
 			spin_lock_irq(&port->port_lock);
 
 			/* already open?  Great. */
-			if (port->port.count) {
+			if (atomic_read(&port->port.count)) {
 				status = 0;
-				port->port.count++;
+				atomic_inc(&port->port.count);
 
 			/* currently opening/closing? wait ... */
 			} else if (port->openclose) {
@@ -794,7 +794,7 @@ static int gs_open(struct tty_struct *tt
 	tty->driver_data = port;
 	port->port.tty = tty;
 
-	port->port.count = 1;
+	atomic_set(&port->port.count, 1);
 	port->openclose = false;
 
 	/* if connected, start the I/O stream */
@@ -836,11 +836,11 @@ static void gs_close(struct tty_struct *
 
 	spin_lock_irq(&port->port_lock);
 
-	if (port->port.count != 1) {
-		if (port->port.count == 0)
+	if (atomic_read(&port->port.count) != 1) {
+		if (atomic_read(&port->port.count) == 0)
 			WARN_ON(1);
 		else
-			--port->port.count;
+			atomic_dec(&port->port.count);
 		goto exit;
 	}
 
@@ -850,7 +850,7 @@ static void gs_close(struct tty_struct *
 	 * and sleep if necessary
 	 */
 	port->openclose = true;
-	port->port.count = 0;
+	atomic_set(&port->port.count, 0);
 
 	gser = port->port_usb;
 	if (gser && gser->disconnect)
@@ -1066,7 +1066,7 @@ static int gs_closed(struct gs_port *por
 	int cond;
 
 	spin_lock_irq(&port->port_lock);
-	cond = (port->port.count == 0) && !port->openclose;
+	cond = (atomic_read(&port->port.count) == 0) && !port->openclose;
 	spin_unlock_irq(&port->port_lock);
 	return cond;
 }
@@ -1209,7 +1209,7 @@ int gserial_connect(struct gserial *gser
 	/* if it's already open, start I/O ... and notify the serial
 	 * protocol about open/close status (connect/disconnect).
 	 */
-	if (port->port.count) {
+	if (atomic_read(&port->port.count)) {
 		pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
 		gs_start_io(port);
 		if (gser->connect)
@@ -1256,7 +1256,7 @@ void gserial_disconnect(struct gserial *
 
 	port->port_usb = NULL;
 	gser->ioport = NULL;
-	if (port->port.count > 0 || port->openclose) {
+	if (atomic_read(&port->port.count) > 0 || port->openclose) {
 		wake_up_interruptible(&port->drain_wait);
 		if (port->port.tty)
 			tty_hangup(port->port.tty);
@@ -1272,7 +1272,7 @@ void gserial_disconnect(struct gserial *
 
 	/* finally, free any unused/unusable I/O buffers */
 	spin_lock_irqsave(&port->port_lock, flags);
-	if (port->port.count == 0 && !port->openclose)
+	if (atomic_read(&port->port.count) == 0 && !port->openclose)
 		gs_buf_free(&port->port_write_buf);
 	gs_free_requests(gser->out, &port->read_pool, NULL);
 	gs_free_requests(gser->out, &port->read_queue, NULL);
diff -ruNp linux-3.13.11/drivers/usb/host/ehci-hub.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/usb/host/ehci-hub.c
--- linux-3.13.11/drivers/usb/host/ehci-hub.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/usb/host/ehci-hub.c	2014-07-09
12:00:15.000000000 +0200
@@ -780,7 +780,7 @@ static struct urb *request_single_step_s
 	urb->transfer_flags = URB_DIR_IN;
 	usb_get_urb(urb);
 	atomic_inc(&urb->use_count);
-	atomic_inc(&urb->dev->urbnum);
+	atomic_inc_unchecked(&urb->dev->urbnum);
 	urb->setup_dma = dma_map_single(
 			hcd->self.controller,
 			urb->setup_packet,
@@ -847,7 +847,7 @@ static int ehset_single_step_set_feature
 	urb->status = -EINPROGRESS;
 	usb_get_urb(urb);
 	atomic_inc(&urb->use_count);
-	atomic_inc(&urb->dev->urbnum);
+	atomic_inc_unchecked(&urb->dev->urbnum);
 	retval = submit_single_step_set_feature(hcd, urb, 0);
 	if (!retval && !wait_for_completion_timeout(&done,
 						msecs_to_jiffies(2000))) {
diff -ruNp linux-3.13.11/drivers/usb/misc/appledisplay.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/usb/misc/appledisplay.c
--- linux-3.13.11/drivers/usb/misc/appledisplay.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/usb/misc/appledisplay.c	2014-07-09
12:00:15.000000000 +0200
@@ -83,7 +83,7 @@ struct appledisplay {
 	spinlock_t lock;
 };
 
-static atomic_t count_displays = ATOMIC_INIT(0);
+static atomic_unchecked_t count_displays = ATOMIC_INIT(0);
 static struct workqueue_struct *wq;
 
 static void appledisplay_complete(struct urb *urb)
@@ -281,7 +281,7 @@ static int appledisplay_probe(struct usb
 
 	/* Register backlight device */
 	snprintf(bl_name, sizeof(bl_name), "appledisplay%d",
-		atomic_inc_return(&count_displays) - 1);
+		atomic_inc_return_unchecked(&count_displays) - 1);
 	memset(&props, 0, sizeof(struct backlight_properties));
 	props.type = BACKLIGHT_RAW;
 	props.max_brightness = 0xff;
diff -ruNp linux-3.13.11/drivers/usb/serial/console.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/usb/serial/console.c
--- linux-3.13.11/drivers/usb/serial/console.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/usb/serial/console.c	2014-07-09
12:00:15.000000000 +0200
@@ -124,7 +124,7 @@ static int usb_console_setup(struct cons
 
 	info->port = port;
 
-	++port->port.count;
+	atomic_inc(&port->port.count);
 	if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
 		if (serial->type->set_termios) {
 			/*
@@ -170,7 +170,7 @@ static int usb_console_setup(struct cons
 	}
 	/* Now that any required fake tty operations are completed restore
 	 * the tty port count */
-	--port->port.count;
+	atomic_dec(&port->port.count);
 	/* The console is special in terms of closing the device so
 	 * indicate this port is now acting as a system console. */
 	port->port.console = 1;
@@ -183,7 +183,7 @@ static int usb_console_setup(struct cons
  free_tty:
 	kfree(tty);
  reset_open_count:
-	port->port.count = 0;
+	atomic_set(&port->port.count, 0);
 	usb_autopm_put_interface(serial->interface);
  error_get_interface:
 	usb_serial_put(serial);
@@ -194,7 +194,7 @@ static int usb_console_setup(struct cons
 static void usb_console_write(struct console *co,
 					const char *buf, unsigned count)
 {
-	static struct usbcons_info *info = &usbcons_info;
+	struct usbcons_info *info = &usbcons_info;
 	struct usb_serial_port *port = info->port;
 	struct usb_serial *serial;
 	int retval = -ENODEV;
diff -ruNp linux-3.13.11/drivers/usb/storage/usb.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/usb/storage/usb.h
--- linux-3.13.11/drivers/usb/storage/usb.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/usb/storage/usb.h	2014-07-09
12:00:15.000000000 +0200
@@ -63,7 +63,7 @@ struct us_unusual_dev {
 	__u8  useProtocol;
 	__u8  useTransport;
 	int (*initFunction)(struct us_data *);
-};
+} __do_const;
 
 
 /* Dynamic bitflag definitions (us->dflags): used in set_bit() etc. */
diff -ruNp linux-3.13.11/drivers/usb/wusbcore/wa-hc.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/usb/wusbcore/wa-hc.h
--- linux-3.13.11/drivers/usb/wusbcore/wa-hc.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/usb/wusbcore/wa-hc.h	2014-07-09
12:00:15.000000000 +0200
@@ -225,7 +225,7 @@ struct wahc {
 	spinlock_t xfer_list_lock;
 	struct work_struct xfer_enqueue_work;
 	struct work_struct xfer_error_work;
-	atomic_t xfer_id_count;
+	atomic_unchecked_t xfer_id_count;
 
 	kernel_ulong_t	quirks;
 };
@@ -287,7 +287,7 @@ static inline void wa_init(struct wahc *
 	INIT_WORK(&wa->xfer_enqueue_work, wa_urb_enqueue_run);
 	INIT_WORK(&wa->xfer_error_work, wa_process_errored_transfers_run);
 	wa->dto_in_use = 0;
-	atomic_set(&wa->xfer_id_count, 1);
+	atomic_set_unchecked(&wa->xfer_id_count, 1);
 }
 
 /**
diff -ruNp linux-3.13.11/drivers/usb/wusbcore/wa-xfer.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/usb/wusbcore/wa-xfer.c
--- linux-3.13.11/drivers/usb/wusbcore/wa-xfer.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/usb/wusbcore/wa-xfer.c	2014-07-09
12:00:15.000000000 +0200
@@ -312,7 +312,7 @@ static void wa_xfer_completion(struct wa
  */
 static void wa_xfer_id_init(struct wa_xfer *xfer)
 {
-	xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
+	xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
 }
 
 /* Return the xfer's ID. */
diff -ruNp linux-3.13.11/drivers/vfio/vfio.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/vfio/vfio.c
--- linux-3.13.11/drivers/vfio/vfio.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/vfio/vfio.c	2014-07-09 12:00:15.000000000
+0200
@@ -488,7 +488,7 @@ static int vfio_group_nb_add_dev(struct
 		return 0;
 
 	/* TODO Prevent device auto probing */
-	WARN("Device %s added to live group %d!\n", dev_name(dev),
+	WARN(1, "Device %s added to live group %d!\n", dev_name(dev),
 	     iommu_group_id(group->iommu_group));
 
 	return 0;
diff -ruNp linux-3.13.11/drivers/vhost/vringh.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/vhost/vringh.c
--- linux-3.13.11/drivers/vhost/vringh.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/vhost/vringh.c	2014-07-09
12:00:15.000000000 +0200
@@ -530,17 +530,17 @@ static inline void __vringh_notify_disab
 /* Userspace access helpers: in this case, addresses are really userspace. */
 static inline int getu16_user(u16 *val, const u16 *p)
 {
-	return get_user(*val, (__force u16 __user *)p);
+	return get_user(*val, (u16 __force_user *)p);
 }
 
 static inline int putu16_user(u16 *p, u16 val)
 {
-	return put_user(val, (__force u16 __user *)p);
+	return put_user(val, (u16 __force_user *)p);
 }
 
 static inline int copydesc_user(void *dst, const void *src, size_t len)
 {
-	return copy_from_user(dst, (__force void __user *)src, len) ?
+	return copy_from_user(dst, (void __force_user *)src, len) ?
 		-EFAULT : 0;
 }
 
@@ -548,19 +548,19 @@ static inline int putused_user(struct vr
 			       const struct vring_used_elem *src,
 			       unsigned int num)
 {
-	return copy_to_user((__force void __user *)dst, src,
+	return copy_to_user((void __force_user *)dst, src,
 			    sizeof(*dst) * num) ? -EFAULT : 0;
 }
 
 static inline int xfer_from_user(void *src, void *dst, size_t len)
 {
-	return copy_from_user(dst, (__force void __user *)src, len) ?
+	return copy_from_user(dst, (void __force_user *)src, len) ?
 		-EFAULT : 0;
 }
 
 static inline int xfer_to_user(void *dst, void *src, size_t len)
 {
-	return copy_to_user((__force void __user *)dst, src, len) ?
+	return copy_to_user((void __force_user *)dst, src, len) ?
 		-EFAULT : 0;
 }
 
@@ -596,9 +596,9 @@ int vringh_init_user(struct vringh *vrh,
 	vrh->last_used_idx = 0;
 	vrh->vring.num = num;
 	/* vring expects kernel addresses, but only used via accessors. */
-	vrh->vring.desc = (__force struct vring_desc *)desc;
-	vrh->vring.avail = (__force struct vring_avail *)avail;
-	vrh->vring.used = (__force struct vring_used *)used;
+	vrh->vring.desc = (__force_kernel struct vring_desc *)desc;
+	vrh->vring.avail = (__force_kernel struct vring_avail *)avail;
+	vrh->vring.used = (__force_kernel struct vring_used *)used;
 	return 0;
 }
 EXPORT_SYMBOL(vringh_init_user);
@@ -800,7 +800,7 @@ static inline int getu16_kern(u16 *val,
 
 static inline int putu16_kern(u16 *p, u16 val)
 {
-	ACCESS_ONCE(*p) = val;
+	ACCESS_ONCE_RW(*p) = val;
 	return 0;
 }
 
diff -ruNp linux-3.13.11/drivers/video/arcfb.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/video/arcfb.c
--- linux-3.13.11/drivers/video/arcfb.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/video/arcfb.c	2014-07-09
12:00:15.000000000 +0200
@@ -458,7 +458,7 @@ static ssize_t arcfb_write(struct fb_inf
 		return -ENOSPC;
 
 	err = 0;
-	if ((count + p) > fbmemlength) {
+	if (count > (fbmemlength - p)) {
 		count = fbmemlength - p;
 		err = -ENOSPC;
 	}
diff -ruNp linux-3.13.11/drivers/video/aty/aty128fb.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/video/aty/aty128fb.c
--- linux-3.13.11/drivers/video/aty/aty128fb.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/video/aty/aty128fb.c	2014-07-09
12:00:15.000000000 +0200
@@ -149,7 +149,7 @@ enum {
 };
 
 /* Must match above enum */
-static char * const r128_family[] = {
+static const char * const r128_family[] = {
 	"AGP",
 	"PCI",
 	"PRO AGP",
diff -ruNp linux-3.13.11/drivers/video/aty/atyfb_base.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/video/aty/atyfb_base.c
--- linux-3.13.11/drivers/video/aty/atyfb_base.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/video/aty/atyfb_base.c	2014-07-09
12:00:15.000000000 +0200
@@ -1326,10 +1326,14 @@ static int atyfb_set_par(struct fb_info
 	par->accel_flags = var->accel_flags; /* hack */
 
 	if (var->accel_flags) {
-		info->fbops->fb_sync = atyfb_sync;
+		pax_open_kernel();
+		*(void **)&info->fbops->fb_sync = atyfb_sync;
+		pax_close_kernel();
 		info->flags &= ~FBINFO_HWACCEL_DISABLED;
 	} else {
-		info->fbops->fb_sync = NULL;
+		pax_open_kernel();
+		*(void **)&info->fbops->fb_sync = NULL;
+		pax_close_kernel();
 		info->flags |= FBINFO_HWACCEL_DISABLED;
 	}
 
diff -ruNp linux-3.13.11/drivers/video/aty/mach64_cursor.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/video/aty/mach64_cursor.c
--- linux-3.13.11/drivers/video/aty/mach64_cursor.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/video/aty/mach64_cursor.c	2014-07-09
12:00:15.000000000 +0200
@@ -7,6 +7,7 @@
 #include <linux/string.h>
 
 #include <asm/io.h>
+#include <asm/pgtable.h>
 
 #ifdef __sparc__
 #include <asm/fbio.h>
@@ -208,7 +209,9 @@ int aty_init_cursor(struct fb_info *info
 	info->sprite.buf_align = 16; 	/* and 64 lines tall. */
 	info->sprite.flags = FB_PIXMAP_IO;
 
-	info->fbops->fb_cursor = atyfb_cursor;
+	pax_open_kernel();
+	*(void **)&info->fbops->fb_cursor = atyfb_cursor;
+	pax_close_kernel();
 
 	return 0;
 }
diff -ruNp linux-3.13.11/drivers/video/backlight/kb3886_bl.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/video/backlight/kb3886_bl.c
--- linux-3.13.11/drivers/video/backlight/kb3886_bl.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/video/backlight/kb3886_bl.c	2014-07-09
12:00:15.000000000 +0200
@@ -78,7 +78,7 @@ static struct kb3886bl_machinfo *bl_mach
 static unsigned long kb3886bl_flags;
 #define KB3886BL_SUSPENDED     0x01
 
-static struct dmi_system_id __initdata kb3886bl_device_table[] = {
+static const struct dmi_system_id __initconst kb3886bl_device_table[] = {
 	{
 		.ident = "Sahara Touch-iT",
 		.matches = {
diff -ruNp linux-3.13.11/drivers/video/fb_defio.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/video/fb_defio.c
--- linux-3.13.11/drivers/video/fb_defio.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/video/fb_defio.c	2014-07-09
12:00:15.000000000 +0200
@@ -206,7 +206,9 @@ void fb_deferred_io_init(struct fb_info
 
 	BUG_ON(!fbdefio);
 	mutex_init(&fbdefio->lock);
-	info->fbops->fb_mmap = fb_deferred_io_mmap;
+	pax_open_kernel();
+	*(void **)&info->fbops->fb_mmap = fb_deferred_io_mmap;
+	pax_close_kernel();
 	INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
 	INIT_LIST_HEAD(&fbdefio->pagelist);
 	if (fbdefio->delay == 0) /* set a default of 1 s */
@@ -237,7 +239,7 @@ void fb_deferred_io_cleanup(struct fb_in
 		page->mapping = NULL;
 	}
 
-	info->fbops->fb_mmap = NULL;
+	*(void **)&info->fbops->fb_mmap = NULL;
 	mutex_destroy(&fbdefio->lock);
 }
 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
diff -ruNp linux-3.13.11/drivers/video/fbmem.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/video/fbmem.c
--- linux-3.13.11/drivers/video/fbmem.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/video/fbmem.c	2014-07-09
12:00:15.000000000 +0200
@@ -433,7 +433,7 @@ static void fb_do_show_logo(struct fb_in
 			image->dx += image->width + 8;
 		}
 	} else if (rotate == FB_ROTATE_UD) {
-		for (x = 0; x < num && image->dx >= 0; x++) {
+		for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
 			info->fbops->fb_imageblit(info, image);
 			image->dx -= image->width + 8;
 		}
@@ -445,7 +445,7 @@ static void fb_do_show_logo(struct fb_in
 			image->dy += image->height + 8;
 		}
 	} else if (rotate == FB_ROTATE_CCW) {
-		for (x = 0; x < num && image->dy >= 0; x++) {
+		for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
 			info->fbops->fb_imageblit(info, image);
 			image->dy -= image->height + 8;
 		}
@@ -1179,7 +1179,7 @@ static long do_fb_ioctl(struct fb_info *
 			return -EFAULT;
 		if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
 			return -EINVAL;
-		if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
+		if (con2fb.framebuffer >= FB_MAX)
 			return -EINVAL;
 		if (!registered_fb[con2fb.framebuffer])
 			request_module("fb%d", con2fb.framebuffer);
@@ -1300,7 +1300,7 @@ static int do_fscreeninfo_to_user(struct
 	__u32 data;
 	int err;
 
-	err = copy_to_user(&fix32->id, &fix->id, sizeof(fix32->id));
+	err = copy_to_user(fix32->id, &fix->id, sizeof(fix32->id));
 
 	data = (__u32) (unsigned long) fix->smem_start;
 	err |= put_user(data, &fix32->smem_start);
diff -ruNp linux-3.13.11/drivers/video/hyperv_fb.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/video/hyperv_fb.c
--- linux-3.13.11/drivers/video/hyperv_fb.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/video/hyperv_fb.c	2014-07-09
12:00:15.000000000 +0200
@@ -233,7 +233,7 @@ static uint screen_fb_size;
 static inline int synthvid_send(struct hv_device *hdev,
 				struct synthvid_msg *msg)
 {
-	static atomic64_t request_id = ATOMIC64_INIT(0);
+	static atomic64_unchecked_t request_id = ATOMIC64_INIT(0);
 	int ret;
 
 	msg->pipe_hdr.type = PIPE_MSG_DATA;
@@ -241,7 +241,7 @@ static inline int synthvid_send(struct h
 
 	ret = vmbus_sendpacket(hdev->channel, msg,
 			       msg->vid_hdr.size + sizeof(struct pipe_msg_hdr),
-			       atomic64_inc_return(&request_id),
+			       atomic64_inc_return_unchecked(&request_id),
 			       VM_PKT_DATA_INBAND, 0);
 
 	if (ret)
diff -ruNp linux-3.13.11/drivers/video/i810/i810_accel.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/video/i810/i810_accel.c
--- linux-3.13.11/drivers/video/i810/i810_accel.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/video/i810/i810_accel.c	2014-07-09
12:00:15.000000000 +0200
@@ -73,6 +73,7 @@ static inline int wait_for_space(struct
 		}
 	}
 	printk("ringbuffer lockup!!!\n");
+	printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
 	i810_report_error(mmio); 
 	par->dev_flags |= LOCKUP;
 	info->pixmap.scan_align = 1;
diff -ruNp linux-3.13.11/drivers/video/logo/logo_linux_clut224.ppm linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/video/logo/logo_linux_clut224.ppm
--- linux-3.13.11/drivers/video/logo/logo_linux_clut224.ppm	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/video/logo/logo_linux_clut224.ppm	2014-07-09
12:00:15.000000000 +0200
@@ -2,1603 +2,1123 @@ P3
 # Standard 224-color Linux logo
 80 80
 255
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  6   6   6   6   6   6  10  10  10  10  10  10
- 10  10  10   6   6   6   6   6   6   6   6   6
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   6   6   6  10  10  10  14  14  14
- 22  22  22  26  26  26  30  30  30  34  34  34
- 30  30  30  30  30  30  26  26  26  18  18  18
- 14  14  14  10  10  10   6   6   6   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   1   0   0   1   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  6   6   6  14  14  14  26  26  26  42  42  42
- 54  54  54  66  66  66  78  78  78  78  78  78
- 78  78  78  74  74  74  66  66  66  54  54  54
- 42  42  42  26  26  26  18  18  18  10  10  10
-  6   6   6   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   1   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  10  10  10
- 22  22  22  42  42  42  66  66  66  86  86  86
- 66  66  66  38  38  38  38  38  38  22  22  22
- 26  26  26  34  34  34  54  54  54  66  66  66
- 86  86  86  70  70  70  46  46  46  26  26  26
- 14  14  14   6   6   6   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   1   0   0   1   0   0   1   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0  10  10  10  26  26  26
- 50  50  50  82  82  82  58  58  58   6   6   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  6   6   6  54  54  54  86  86  86  66  66  66
- 38  38  38  18  18  18   6   6   6   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   6   6   6  22  22  22  50  50  50
- 78  78  78  34  34  34   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   6   6   6  70  70  70
- 78  78  78  46  46  46  22  22  22   6   6   6
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   1   0   0   1   0   0   1   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  6   6   6  18  18  18  42  42  42  82  82  82
- 26  26  26   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6  14  14  14
- 46  46  46  34  34  34   6   6   6   2   2   6
- 42  42  42  78  78  78  42  42  42  18  18  18
-  6   6   6   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   1   0   0   0   0   0   1   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
- 10  10  10  30  30  30  66  66  66  58  58  58
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6  26  26  26
- 86  86  86 101 101 101  46  46  46  10  10  10
-  2   2   6  58  58  58  70  70  70  34  34  34
- 10  10  10   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   1   0   0   1   0   0   1   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
- 14  14  14  42  42  42  86  86  86  10  10  10
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6  30  30  30
- 94  94  94  94  94  94  58  58  58  26  26  26
-  2   2   6   6   6   6  78  78  78  54  54  54
- 22  22  22   6   6   6   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   6   6   6
- 22  22  22  62  62  62  62  62  62   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6  26  26  26
- 54  54  54  38  38  38  18  18  18  10  10  10
-  2   2   6   2   2   6  34  34  34  82  82  82
- 38  38  38  14  14  14   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   1   0   0   1   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   6   6   6
- 30  30  30  78  78  78  30  30  30   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6  10  10  10
- 10  10  10   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6  78  78  78
- 50  50  50  18  18  18   6   6   6   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   1   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  10  10  10
- 38  38  38  86  86  86  14  14  14   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6  54  54  54
- 66  66  66  26  26  26   6   6   6   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   1   0   0   1   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  14  14  14
- 42  42  42  82  82  82   2   2   6   2   2   6
-  2   2   6   6   6   6  10  10  10   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   6   6   6
- 14  14  14  10  10  10   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6  18  18  18
- 82  82  82  34  34  34  10  10  10   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   1   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  14  14  14
- 46  46  46  86  86  86   2   2   6   2   2   6
-  6   6   6   6   6   6  22  22  22  34  34  34
-  6   6   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6  18  18  18  34  34  34
- 10  10  10  50  50  50  22  22  22   2   2   6
-  2   2   6   2   2   6   2   2   6  10  10  10
- 86  86  86  42  42  42  14  14  14   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   1   0   0   1   0   0   1   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  14  14  14
- 46  46  46  86  86  86   2   2   6   2   2   6
- 38  38  38 116 116 116  94  94  94  22  22  22
- 22  22  22   2   2   6   2   2   6   2   2   6
- 14  14  14  86  86  86 138 138 138 162 162 162
-154 154 154  38  38  38  26  26  26   6   6   6
-  2   2   6   2   2   6   2   2   6   2   2   6
- 86  86  86  46  46  46  14  14  14   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  14  14  14
- 46  46  46  86  86  86   2   2   6  14  14  14
-134 134 134 198 198 198 195 195 195 116 116 116
- 10  10  10   2   2   6   2   2   6   6   6   6
-101  98  89 187 187 187 210 210 210 218 218 218
-214 214 214 134 134 134  14  14  14   6   6   6
-  2   2   6   2   2   6   2   2   6   2   2   6
- 86  86  86  50  50  50  18  18  18   6   6   6
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   1   0   0   0
-  0   0   1   0   0   1   0   0   1   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  14  14  14
- 46  46  46  86  86  86   2   2   6  54  54  54
-218 218 218 195 195 195 226 226 226 246 246 246
- 58  58  58   2   2   6   2   2   6  30  30  30
-210 210 210 253 253 253 174 174 174 123 123 123
-221 221 221 234 234 234  74  74  74   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
- 70  70  70  58  58  58  22  22  22   6   6   6
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  14  14  14
- 46  46  46  82  82  82   2   2   6 106 106 106
-170 170 170  26  26  26  86  86  86 226 226 226
-123 123 123  10  10  10  14  14  14  46  46  46
-231 231 231 190 190 190   6   6   6  70  70  70
- 90  90  90 238 238 238 158 158 158   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
- 70  70  70  58  58  58  22  22  22   6   6   6
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   1   0   0   0
-  0   0   1   0   0   1   0   0   1   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  14  14  14
- 42  42  42  86  86  86   6   6   6 116 116 116
-106 106 106   6   6   6  70  70  70 149 149 149
-128 128 128  18  18  18  38  38  38  54  54  54
-221 221 221 106 106 106   2   2   6  14  14  14
- 46  46  46 190 190 190 198 198 198   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
- 74  74  74  62  62  62  22  22  22   6   6   6
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   1   0   0   0
-  0   0   1   0   0   0   0   0   1   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  14  14  14
- 42  42  42  94  94  94  14  14  14 101 101 101
-128 128 128   2   2   6  18  18  18 116 116 116
-118  98  46 121  92   8 121  92   8  98  78  10
-162 162 162 106 106 106   2   2   6   2   2   6
-  2   2   6 195 195 195 195 195 195   6   6   6
-  2   2   6   2   2   6   2   2   6   2   2   6
- 74  74  74  62  62  62  22  22  22   6   6   6
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   1   0   0   1
-  0   0   1   0   0   0   0   0   1   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  10  10  10
- 38  38  38  90  90  90  14  14  14  58  58  58
-210 210 210  26  26  26  54  38   6 154 114  10
-226 170  11 236 186  11 225 175  15 184 144  12
-215 174  15 175 146  61  37  26   9   2   2   6
- 70  70  70 246 246 246 138 138 138   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
- 70  70  70  66  66  66  26  26  26   6   6   6
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  10  10  10
- 38  38  38  86  86  86  14  14  14  10  10  10
-195 195 195 188 164 115 192 133   9 225 175  15
-239 182  13 234 190  10 232 195  16 232 200  30
-245 207  45 241 208  19 232 195  16 184 144  12
-218 194 134 211 206 186  42  42  42   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
- 50  50  50  74  74  74  30  30  30   6   6   6
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  10  10  10
- 34  34  34  86  86  86  14  14  14   2   2   6
-121  87  25 192 133   9 219 162  10 239 182  13
-236 186  11 232 195  16 241 208  19 244 214  54
-246 218  60 246 218  38 246 215  20 241 208  19
-241 208  19 226 184  13 121  87  25   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
- 50  50  50  82  82  82  34  34  34  10  10  10
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  10  10  10
- 34  34  34  82  82  82  30  30  30  61  42   6
-180 123   7 206 145  10 230 174  11 239 182  13
-234 190  10 238 202  15 241 208  19 246 218  74
-246 218  38 246 215  20 246 215  20 246 215  20
-226 184  13 215 174  15 184 144  12   6   6   6
-  2   2   6   2   2   6   2   2   6   2   2   6
- 26  26  26  94  94  94  42  42  42  14  14  14
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  10  10  10
- 30  30  30  78  78  78  50  50  50 104  69   6
-192 133   9 216 158  10 236 178  12 236 186  11
-232 195  16 241 208  19 244 214  54 245 215  43
-246 215  20 246 215  20 241 208  19 198 155  10
-200 144  11 216 158  10 156 118  10   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  6   6   6  90  90  90  54  54  54  18  18  18
-  6   6   6   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  10  10  10
- 30  30  30  78  78  78  46  46  46  22  22  22
-137  92   6 210 162  10 239 182  13 238 190  10
-238 202  15 241 208  19 246 215  20 246 215  20
-241 208  19 203 166  17 185 133  11 210 150  10
-216 158  10 210 150  10 102  78  10   2   2   6
-  6   6   6  54  54  54  14  14  14   2   2   6
-  2   2   6  62  62  62  74  74  74  30  30  30
- 10  10  10   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  10  10  10
- 34  34  34  78  78  78  50  50  50   6   6   6
- 94  70  30 139 102  15 190 146  13 226 184  13
-232 200  30 232 195  16 215 174  15 190 146  13
-168 122  10 192 133   9 210 150  10 213 154  11
-202 150  34 182 157 106 101  98  89   2   2   6
-  2   2   6  78  78  78 116 116 116  58  58  58
-  2   2   6  22  22  22  90  90  90  46  46  46
- 18  18  18   6   6   6   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  10  10  10
- 38  38  38  86  86  86  50  50  50   6   6   6
-128 128 128 174 154 114 156 107  11 168 122  10
-198 155  10 184 144  12 197 138  11 200 144  11
-206 145  10 206 145  10 197 138  11 188 164 115
-195 195 195 198 198 198 174 174 174  14  14  14
-  2   2   6  22  22  22 116 116 116 116 116 116
- 22  22  22   2   2   6  74  74  74  70  70  70
- 30  30  30  10  10  10   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   6   6   6  18  18  18
- 50  50  50 101 101 101  26  26  26  10  10  10
-138 138 138 190 190 190 174 154 114 156 107  11
-197 138  11 200 144  11 197 138  11 192 133   9
-180 123   7 190 142  34 190 178 144 187 187 187
-202 202 202 221 221 221 214 214 214  66  66  66
-  2   2   6   2   2   6  50  50  50  62  62  62
-  6   6   6   2   2   6  10  10  10  90  90  90
- 50  50  50  18  18  18   6   6   6   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0  10  10  10  34  34  34
- 74  74  74  74  74  74   2   2   6   6   6   6
-144 144 144 198 198 198 190 190 190 178 166 146
-154 121  60 156 107  11 156 107  11 168 124  44
-174 154 114 187 187 187 190 190 190 210 210 210
-246 246 246 253 253 253 253 253 253 182 182 182
-  6   6   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6  62  62  62
- 74  74  74  34  34  34  14  14  14   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0  10  10  10  22  22  22  54  54  54
- 94  94  94  18  18  18   2   2   6  46  46  46
-234 234 234 221 221 221 190 190 190 190 190 190
-190 190 190 187 187 187 187 187 187 190 190 190
-190 190 190 195 195 195 214 214 214 242 242 242
-253 253 253 253 253 253 253 253 253 253 253 253
- 82  82  82   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6  14  14  14
- 86  86  86  54  54  54  22  22  22   6   6   6
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  6   6   6  18  18  18  46  46  46  90  90  90
- 46  46  46  18  18  18   6   6   6 182 182 182
-253 253 253 246 246 246 206 206 206 190 190 190
-190 190 190 190 190 190 190 190 190 190 190 190
-206 206 206 231 231 231 250 250 250 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-202 202 202  14  14  14   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
- 42  42  42  86  86  86  42  42  42  18  18  18
-  6   6   6   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   6   6   6
- 14  14  14  38  38  38  74  74  74  66  66  66
-  2   2   6   6   6   6  90  90  90 250 250 250
-253 253 253 253 253 253 238 238 238 198 198 198
-190 190 190 190 190 190 195 195 195 221 221 221
-246 246 246 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253  82  82  82   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6  78  78  78  70  70  70  34  34  34
- 14  14  14   6   6   6   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  14  14  14
- 34  34  34  66  66  66  78  78  78   6   6   6
-  2   2   6  18  18  18 218 218 218 253 253 253
-253 253 253 253 253 253 253 253 253 246 246 246
-226 226 226 231 231 231 246 246 246 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 178 178 178   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6  18  18  18  90  90  90  62  62  62
- 30  30  30  10  10  10   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0  10  10  10  26  26  26
- 58  58  58  90  90  90  18  18  18   2   2   6
-  2   2   6 110 110 110 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-250 250 250 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 231 231 231  18  18  18   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6  18  18  18  94  94  94
- 54  54  54  26  26  26  10  10  10   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   6   6   6  22  22  22  50  50  50
- 90  90  90  26  26  26   2   2   6   2   2   6
- 14  14  14 195 195 195 250 250 250 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-250 250 250 242 242 242  54  54  54   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6  38  38  38
- 86  86  86  50  50  50  22  22  22   6   6   6
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  6   6   6  14  14  14  38  38  38  82  82  82
- 34  34  34   2   2   6   2   2   6   2   2   6
- 42  42  42 195 195 195 246 246 246 253 253 253
-253 253 253 253 253 253 253 253 253 250 250 250
-242 242 242 242 242 242 250 250 250 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 250 250 250 246 246 246 238 238 238
-226 226 226 231 231 231 101 101 101   6   6   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
- 38  38  38  82  82  82  42  42  42  14  14  14
-  6   6   6   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
- 10  10  10  26  26  26  62  62  62  66  66  66
-  2   2   6   2   2   6   2   2   6   6   6   6
- 70  70  70 170 170 170 206 206 206 234 234 234
-246 246 246 250 250 250 250 250 250 238 238 238
-226 226 226 231 231 231 238 238 238 250 250 250
-250 250 250 250 250 250 246 246 246 231 231 231
-214 214 214 206 206 206 202 202 202 202 202 202
-198 198 198 202 202 202 182 182 182  18  18  18
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6  62  62  62  66  66  66  30  30  30
- 10  10  10   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
- 14  14  14  42  42  42  82  82  82  18  18  18
-  2   2   6   2   2   6   2   2   6  10  10  10
- 94  94  94 182 182 182 218 218 218 242 242 242
-250 250 250 253 253 253 253 253 253 250 250 250
-234 234 234 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 246 246 246
-238 238 238 226 226 226 210 210 210 202 202 202
-195 195 195 195 195 195 210 210 210 158 158 158
-  6   6   6  14  14  14  50  50  50  14  14  14
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   6   6   6  86  86  86  46  46  46
- 18  18  18   6   6   6   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   6   6   6
- 22  22  22  54  54  54  70  70  70   2   2   6
-  2   2   6  10  10  10   2   2   6  22  22  22
-166 166 166 231 231 231 250 250 250 253 253 253
-253 253 253 253 253 253 253 253 253 250 250 250
-242 242 242 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 246 246 246
-231 231 231 206 206 206 198 198 198 226 226 226
- 94  94  94   2   2   6   6   6   6  38  38  38
- 30  30  30   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6  62  62  62  66  66  66
- 26  26  26  10  10  10   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  10  10  10
- 30  30  30  74  74  74  50  50  50   2   2   6
- 26  26  26  26  26  26   2   2   6 106 106 106
-238 238 238 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 246 246 246 218 218 218 202 202 202
-210 210 210  14  14  14   2   2   6   2   2   6
- 30  30  30  22  22  22   2   2   6   2   2   6
-  2   2   6   2   2   6  18  18  18  86  86  86
- 42  42  42  14  14  14   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  14  14  14
- 42  42  42  90  90  90  22  22  22   2   2   6
- 42  42  42   2   2   6  18  18  18 218 218 218
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 250 250 250 221 221 221
-218 218 218 101 101 101   2   2   6  14  14  14
- 18  18  18  38  38  38  10  10  10   2   2   6
-  2   2   6   2   2   6   2   2   6  78  78  78
- 58  58  58  22  22  22   6   6   6   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   6   6   6  18  18  18
- 54  54  54  82  82  82   2   2   6  26  26  26
- 22  22  22   2   2   6 123 123 123 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 250 250 250
-238 238 238 198 198 198   6   6   6  38  38  38
- 58  58  58  26  26  26  38  38  38   2   2   6
-  2   2   6   2   2   6   2   2   6  46  46  46
- 78  78  78  30  30  30  10  10  10   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0  10  10  10  30  30  30
- 74  74  74  58  58  58   2   2   6  42  42  42
-  2   2   6  22  22  22 231 231 231 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 250 250 250
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 246 246 246  46  46  46  38  38  38
- 42  42  42  14  14  14  38  38  38  14  14  14
-  2   2   6   2   2   6   2   2   6   6   6   6
- 86  86  86  46  46  46  14  14  14   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   6   6   6  14  14  14  42  42  42
- 90  90  90  18  18  18  18  18  18  26  26  26
-  2   2   6 116 116 116 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 250 250 250 238 238 238
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253  94  94  94   6   6   6
-  2   2   6   2   2   6  10  10  10  34  34  34
-  2   2   6   2   2   6   2   2   6   2   2   6
- 74  74  74  58  58  58  22  22  22   6   6   6
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0  10  10  10  26  26  26  66  66  66
- 82  82  82   2   2   6  38  38  38   6   6   6
- 14  14  14 210 210 210 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 246 246 246 242 242 242
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 144 144 144   2   2   6
-  2   2   6   2   2   6   2   2   6  46  46  46
-  2   2   6   2   2   6   2   2   6   2   2   6
- 42  42  42  74  74  74  30  30  30  10  10  10
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  6   6   6  14  14  14  42  42  42  90  90  90
- 26  26  26   6   6   6  42  42  42   2   2   6
- 74  74  74 250 250 250 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 242 242 242 242 242 242
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 182 182 182   2   2   6
-  2   2   6   2   2   6   2   2   6  46  46  46
-  2   2   6   2   2   6   2   2   6   2   2   6
- 10  10  10  86  86  86  38  38  38  10  10  10
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
- 10  10  10  26  26  26  66  66  66  82  82  82
-  2   2   6  22  22  22  18  18  18   2   2   6
-149 149 149 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 234 234 234 242 242 242
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 206 206 206   2   2   6
-  2   2   6   2   2   6   2   2   6  38  38  38
-  2   2   6   2   2   6   2   2   6   2   2   6
-  6   6   6  86  86  86  46  46  46  14  14  14
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   6   6   6
- 18  18  18  46  46  46  86  86  86  18  18  18
-  2   2   6  34  34  34  10  10  10   6   6   6
-210 210 210 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 234 234 234 242 242 242
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 221 221 221   6   6   6
-  2   2   6   2   2   6   6   6   6  30  30  30
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6  82  82  82  54  54  54  18  18  18
-  6   6   6   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  10  10  10
- 26  26  26  66  66  66  62  62  62   2   2   6
-  2   2   6  38  38  38  10  10  10  26  26  26
-238 238 238 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 231 231 231 238 238 238
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 231 231 231   6   6   6
-  2   2   6   2   2   6  10  10  10  30  30  30
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6  66  66  66  58  58  58  22  22  22
-  6   6   6   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  10  10  10
- 38  38  38  78  78  78   6   6   6   2   2   6
-  2   2   6  46  46  46  14  14  14  42  42  42
-246 246 246 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 231 231 231 242 242 242
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 234 234 234  10  10  10
-  2   2   6   2   2   6  22  22  22  14  14  14
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6  66  66  66  62  62  62  22  22  22
-  6   6   6   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   6   6   6  18  18  18
- 50  50  50  74  74  74   2   2   6   2   2   6
- 14  14  14  70  70  70  34  34  34  62  62  62
-250 250 250 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 231 231 231 246 246 246
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 234 234 234  14  14  14
-  2   2   6   2   2   6  30  30  30   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6  66  66  66  62  62  62  22  22  22
-  6   6   6   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   6   6   6  18  18  18
- 54  54  54  62  62  62   2   2   6   2   2   6
-  2   2   6  30  30  30  46  46  46  70  70  70
-250 250 250 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 231 231 231 246 246 246
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 226 226 226  10  10  10
-  2   2   6   6   6   6  30  30  30   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6  66  66  66  58  58  58  22  22  22
-  6   6   6   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   6   6   6  22  22  22
- 58  58  58  62  62  62   2   2   6   2   2   6
-  2   2   6   2   2   6  30  30  30  78  78  78
-250 250 250 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 231 231 231 246 246 246
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 206 206 206   2   2   6
- 22  22  22  34  34  34  18  14   6  22  22  22
- 26  26  26  18  18  18   6   6   6   2   2   6
-  2   2   6  82  82  82  54  54  54  18  18  18
-  6   6   6   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   6   6   6  26  26  26
- 62  62  62 106 106 106  74  54  14 185 133  11
-210 162  10 121  92   8   6   6   6  62  62  62
-238 238 238 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 231 231 231 246 246 246
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 158 158 158  18  18  18
- 14  14  14   2   2   6   2   2   6   2   2   6
-  6   6   6  18  18  18  66  66  66  38  38  38
-  6   6   6  94  94  94  50  50  50  18  18  18
-  6   6   6   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   6   6   6
- 10  10  10  10  10  10  18  18  18  38  38  38
- 78  78  78 142 134 106 216 158  10 242 186  14
-246 190  14 246 190  14 156 118  10  10  10  10
- 90  90  90 238 238 238 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 231 231 231 250 250 250
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 246 230 190
-238 204  91 238 204  91 181 142  44  37  26   9
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6  38  38  38  46  46  46
- 26  26  26 106 106 106  54  54  54  18  18  18
-  6   6   6   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   6   6   6  14  14  14  22  22  22
- 30  30  30  38  38  38  50  50  50  70  70  70
-106 106 106 190 142  34 226 170  11 242 186  14
-246 190  14 246 190  14 246 190  14 154 114  10
-  6   6   6  74  74  74 226 226 226 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 231 231 231 250 250 250
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 228 184  62
-241 196  14 241 208  19 232 195  16  38  30  10
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   6   6   6  30  30  30  26  26  26
-203 166  17 154 142  90  66  66  66  26  26  26
-  6   6   6   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  6   6   6  18  18  18  38  38  38  58  58  58
- 78  78  78  86  86  86 101 101 101 123 123 123
-175 146  61 210 150  10 234 174  13 246 186  14
-246 190  14 246 190  14 246 190  14 238 190  10
-102  78  10   2   2   6  46  46  46 198 198 198
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 234 234 234 242 242 242
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 224 178  62
-242 186  14 241 196  14 210 166  10  22  18   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   6   6   6 121  92   8
-238 202  15 232 195  16  82  82  82  34  34  34
- 10  10  10   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
- 14  14  14  38  38  38  70  70  70 154 122  46
-190 142  34 200 144  11 197 138  11 197 138  11
-213 154  11 226 170  11 242 186  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-225 175  15  46  32   6   2   2   6  22  22  22
-158 158 158 250 250 250 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 250 250 250 242 242 242 224 178  62
-239 182  13 236 186  11 213 154  11  46  32   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6  61  42   6 225 175  15
-238 190  10 236 186  11 112 100  78  42  42  42
- 14  14  14   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   6   6   6
- 22  22  22  54  54  54 154 122  46 213 154  11
-226 170  11 230 174  11 226 170  11 226 170  11
-236 178  12 242 186  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-241 196  14 184 144  12  10  10  10   2   2   6
-  6   6   6 116 116 116 242 242 242 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 231 231 231 198 198 198 214 170  54
-236 178  12 236 178  12 210 150  10 137  92   6
- 18  14   6   2   2   6   2   2   6   2   2   6
-  6   6   6  70  47   6 200 144  11 236 178  12
-239 182  13 239 182  13 124 112  88  58  58  58
- 22  22  22   6   6   6   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  10  10  10
- 30  30  30  70  70  70 180 133  36 226 170  11
-239 182  13 242 186  14 242 186  14 246 186  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 232 195  16  98  70   6   2   2   6
-  2   2   6   2   2   6  66  66  66 221 221 221
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 206 206 206 198 198 198 214 166  58
-230 174  11 230 174  11 216 158  10 192 133   9
-163 110   8 116  81   8 102  78  10 116  81   8
-167 114   7 197 138  11 226 170  11 239 182  13
-242 186  14 242 186  14 162 146  94  78  78  78
- 34  34  34  14  14  14   6   6   6   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   6   6   6
- 30  30  30  78  78  78 190 142  34 226 170  11
-239 182  13 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 241 196  14 203 166  17  22  18   6
-  2   2   6   2   2   6   2   2   6  38  38  38
-218 218 218 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-250 250 250 206 206 206 198 198 198 202 162  69
-226 170  11 236 178  12 224 166  10 210 150  10
-200 144  11 197 138  11 192 133   9 197 138  11
-210 150  10 226 170  11 242 186  14 246 190  14
-246 190  14 246 186  14 225 175  15 124 112  88
- 62  62  62  30  30  30  14  14  14   6   6   6
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  10  10  10
- 30  30  30  78  78  78 174 135  50 224 166  10
-239 182  13 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 241 196  14 139 102  15
-  2   2   6   2   2   6   2   2   6   2   2   6
- 78  78  78 250 250 250 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-250 250 250 214 214 214 198 198 198 190 150  46
-219 162  10 236 178  12 234 174  13 224 166  10
-216 158  10 213 154  11 213 154  11 216 158  10
-226 170  11 239 182  13 246 190  14 246 190  14
-246 190  14 246 190  14 242 186  14 206 162  42
-101 101 101  58  58  58  30  30  30  14  14  14
-  6   6   6   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  10  10  10
- 30  30  30  74  74  74 174 135  50 216 158  10
-236 178  12 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 241 196  14 226 184  13
- 61  42   6   2   2   6   2   2   6   2   2   6
- 22  22  22 238 238 238 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 226 226 226 187 187 187 180 133  36
-216 158  10 236 178  12 239 182  13 236 178  12
-230 174  11 226 170  11 226 170  11 230 174  11
-236 178  12 242 186  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 186  14 239 182  13
-206 162  42 106 106 106  66  66  66  34  34  34
- 14  14  14   6   6   6   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   6   6   6
- 26  26  26  70  70  70 163 133  67 213 154  11
-236 178  12 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 241 196  14
-190 146  13  18  14   6   2   2   6   2   2   6
- 46  46  46 246 246 246 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 221 221 221  86  86  86 156 107  11
-216 158  10 236 178  12 242 186  14 246 186  14
-242 186  14 239 182  13 239 182  13 242 186  14
-242 186  14 246 186  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-242 186  14 225 175  15 142 122  72  66  66  66
- 30  30  30  10  10  10   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   6   6   6
- 26  26  26  70  70  70 163 133  67 210 150  10
-236 178  12 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-232 195  16 121  92   8  34  34  34 106 106 106
-221 221 221 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-242 242 242  82  82  82  18  14   6 163 110   8
-216 158  10 236 178  12 242 186  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 242 186  14 163 133  67
- 46  46  46  18  18  18   6   6   6   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  10  10  10
- 30  30  30  78  78  78 163 133  67 210 150  10
-236 178  12 246 186  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-241 196  14 215 174  15 190 178 144 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 218 218 218
- 58  58  58   2   2   6  22  18   6 167 114   7
-216 158  10 236 178  12 246 186  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 186  14 242 186  14 190 150  46
- 54  54  54  22  22  22   6   6   6   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  14  14  14
- 38  38  38  86  86  86 180 133  36 213 154  11
-236 178  12 246 186  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 232 195  16 190 146  13 214 214 214
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 250 250 250 170 170 170  26  26  26
-  2   2   6   2   2   6  37  26   9 163 110   8
-219 162  10 239 182  13 246 186  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 186  14 236 178  12 224 166  10 142 122  72
- 46  46  46  18  18  18   6   6   6   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   6   6   6  18  18  18
- 50  50  50 109 106  95 192 133   9 224 166  10
-242 186  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-242 186  14 226 184  13 210 162  10 142 110  46
-226 226 226 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-253 253 253 253 253 253 253 253 253 253 253 253
-198 198 198  66  66  66   2   2   6   2   2   6
-  2   2   6   2   2   6  50  34   6 156 107  11
-219 162  10 239 182  13 246 186  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 242 186  14
-234 174  13 213 154  11 154 122  46  66  66  66
- 30  30  30  10  10  10   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   6   6   6  22  22  22
- 58  58  58 154 121  60 206 145  10 234 174  13
-242 186  14 246 186  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 186  14 236 178  12 210 162  10 163 110   8
- 61  42   6 138 138 138 218 218 218 250 250 250
-253 253 253 253 253 253 253 253 253 250 250 250
-242 242 242 210 210 210 144 144 144  66  66  66
-  6   6   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6  61  42   6 163 110   8
-216 158  10 236 178  12 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 239 182  13 230 174  11 216 158  10
-190 142  34 124 112  88  70  70  70  38  38  38
- 18  18  18   6   6   6   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   6   6   6  22  22  22
- 62  62  62 168 124  44 206 145  10 224 166  10
-236 178  12 239 182  13 242 186  14 242 186  14
-246 186  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 236 178  12 216 158  10 175 118   6
- 80  54   7   2   2   6   6   6   6  30  30  30
- 54  54  54  62  62  62  50  50  50  38  38  38
- 14  14  14   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   6   6   6  80  54   7 167 114   7
-213 154  11 236 178  12 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 190  14 242 186  14 239 182  13 239 182  13
-230 174  11 210 150  10 174 135  50 124 112  88
- 82  82  82  54  54  54  34  34  34  18  18  18
-  6   6   6   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   6   6   6  18  18  18
- 50  50  50 158 118  36 192 133   9 200 144  11
-216 158  10 219 162  10 224 166  10 226 170  11
-230 174  11 236 178  12 239 182  13 239 182  13
-242 186  14 246 186  14 246 190  14 246 190  14
-246 190  14 246 190  14 246 190  14 246 190  14
-246 186  14 230 174  11 210 150  10 163 110   8
-104  69   6  10  10  10   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   6   6   6  91  60   6 167 114   7
-206 145  10 230 174  11 242 186  14 246 190  14
-246 190  14 246 190  14 246 186  14 242 186  14
-239 182  13 230 174  11 224 166  10 213 154  11
-180 133  36 124 112  88  86  86  86  58  58  58
- 38  38  38  22  22  22  10  10  10   6   6   6
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0  14  14  14
- 34  34  34  70  70  70 138 110  50 158 118  36
-167 114   7 180 123   7 192 133   9 197 138  11
-200 144  11 206 145  10 213 154  11 219 162  10
-224 166  10 230 174  11 239 182  13 242 186  14
-246 186  14 246 186  14 246 186  14 246 186  14
-239 182  13 216 158  10 185 133  11 152  99   6
-104  69   6  18  14   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   2   2   6   2   2   6   2   2   6
-  2   2   6   6   6   6  80  54   7 152  99   6
-192 133   9 219 162  10 236 178  12 239 182  13
-246 186  14 242 186  14 239 182  13 236 178  12
-224 166  10 206 145  10 192 133   9 154 121  60
- 94  94  94  62  62  62  42  42  42  22  22  22
- 14  14  14   6   6   6   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   6   6   6
- 18  18  18  34  34  34  58  58  58  78  78  78
-101  98  89 124 112  88 142 110  46 156 107  11
-163 110   8 167 114   7 175 118   6 180 123   7
-185 133  11 197 138  11 210 150  10 219 162  10
-226 170  11 236 178  12 236 178  12 234 174  13
-219 162  10 197 138  11 163 110   8 130  83   6
- 91  60   6  10  10  10   2   2   6   2   2   6
- 18  18  18  38  38  38  38  38  38  38  38  38
- 38  38  38  38  38  38  38  38  38  38  38  38
- 38  38  38  38  38  38  26  26  26   2   2   6
-  2   2   6   6   6   6  70  47   6 137  92   6
-175 118   6 200 144  11 219 162  10 230 174  11
-234 174  13 230 174  11 219 162  10 210 150  10
-192 133   9 163 110   8 124 112  88  82  82  82
- 50  50  50  30  30  30  14  14  14   6   6   6
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  6   6   6  14  14  14  22  22  22  34  34  34
- 42  42  42  58  58  58  74  74  74  86  86  86
-101  98  89 122 102  70 130  98  46 121  87  25
-137  92   6 152  99   6 163 110   8 180 123   7
-185 133  11 197 138  11 206 145  10 200 144  11
-180 123   7 156 107  11 130  83   6 104  69   6
- 50  34   6  54  54  54 110 110 110 101  98  89
- 86  86  86  82  82  82  78  78  78  78  78  78
- 78  78  78  78  78  78  78  78  78  78  78  78
- 78  78  78  82  82  82  86  86  86  94  94  94
-106 106 106 101 101 101  86  66  34 124  80   6
-156 107  11 180 123   7 192 133   9 200 144  11
-206 145  10 200 144  11 192 133   9 175 118   6
-139 102  15 109 106  95  70  70  70  42  42  42
- 22  22  22  10  10  10   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   6   6   6  10  10  10
- 14  14  14  22  22  22  30  30  30  38  38  38
- 50  50  50  62  62  62  74  74  74  90  90  90
-101  98  89 112 100  78 121  87  25 124  80   6
-137  92   6 152  99   6 152  99   6 152  99   6
-138  86   6 124  80   6  98  70   6  86  66  30
-101  98  89  82  82  82  58  58  58  46  46  46
- 38  38  38  34  34  34  34  34  34  34  34  34
- 34  34  34  34  34  34  34  34  34  34  34  34
- 34  34  34  34  34  34  38  38  38  42  42  42
- 54  54  54  82  82  82  94  86  76  91  60   6
-134  86   6 156 107  11 167 114   7 175 118   6
-175 118   6 167 114   7 152  99   6 121  87  25
-101  98  89  62  62  62  34  34  34  18  18  18
-  6   6   6   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   6   6   6   6   6   6  10  10  10
- 18  18  18  22  22  22  30  30  30  42  42  42
- 50  50  50  66  66  66  86  86  86 101  98  89
-106  86  58  98  70   6 104  69   6 104  69   6
-104  69   6  91  60   6  82  62  34  90  90  90
- 62  62  62  38  38  38  22  22  22  14  14  14
- 10  10  10  10  10  10  10  10  10  10  10  10
- 10  10  10  10  10  10   6   6   6  10  10  10
- 10  10  10  10  10  10  10  10  10  14  14  14
- 22  22  22  42  42  42  70  70  70  89  81  66
- 80  54   7 104  69   6 124  80   6 137  92   6
-134  86   6 116  81   8 100  82  52  86  86  86
- 58  58  58  30  30  30  14  14  14   6   6   6
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   6   6   6  10  10  10  14  14  14
- 18  18  18  26  26  26  38  38  38  54  54  54
- 70  70  70  86  86  86  94  86  76  89  81  66
- 89  81  66  86  86  86  74  74  74  50  50  50
- 30  30  30  14  14  14   6   6   6   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  6   6   6  18  18  18  34  34  34  58  58  58
- 82  82  82  89  81  66  89  81  66  89  81  66
- 94  86  66  94  86  76  74  74  74  50  50  50
- 26  26  26  14  14  14   6   6   6   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  6   6   6   6   6   6  14  14  14  18  18  18
- 30  30  30  38  38  38  46  46  46  54  54  54
- 50  50  50  42  42  42  30  30  30  18  18  18
- 10  10  10   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   6   6   6  14  14  14  26  26  26
- 38  38  38  50  50  50  58  58  58  58  58  58
- 54  54  54  42  42  42  30  30  30  18  18  18
- 10  10  10   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   6   6   6
-  6   6   6  10  10  10  14  14  14  18  18  18
- 18  18  18  14  14  14  10  10  10   6   6   6
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   6   6   6
- 14  14  14  18  18  18  22  22  22  22  22  22
- 18  18  18  14  14  14  10  10  10   6   6   6
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
-  0   0   0   0   0   0   0   0   0   0   0   0
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  3 3 3  0 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  3 3 3  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  1 1 1  0 0 0
+0 0 0  3 3 3  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  2 1 0  2 1 0  3 2 2
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  2 2 2  0 0 0  3 4 3  26 28 28
+37 38 37  37 38 37  14 17 19  2 2 2  0 0 0  2 2 2
+5 5 5  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  3 3 3  0 0 0  1 1 1  6 6 6
+2 2 2  0 0 0  3 3 3  4 4 4  4 4 4  4 4 4
+4 4 5  3 3 3  1 0 0  0 0 0  1 0 0  0 0 0
+1 1 1  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+2 2 2  0 0 0  0 0 0  14 17 19  60 74 84  137 136 137
+153 152 153  137 136 137  125 124 125  60 73 81  6 6 6  3 1 0
+0 0 0  3 3 3  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  0 0 0  4 4 4  41 54 63  125 124 125
+60 73 81  6 6 6  4 0 0  3 3 3  4 4 4  4 4 4
+4 4 4  0 0 0  6 9 11  41 54 63  41 65 82  22 30 35
+2 2 2  2 1 0  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  5 5 5  5 5 5  2 2 2  0 0 0
+4 0 0  6 6 6  41 54 63  137 136 137  174 174 174  167 166 167
+165 164 165  165 164 165  163 162 163  163 162 163  125 124 125  41 54 63
+1 1 1  0 0 0  0 0 0  3 3 3  5 5 5  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  5 5 5  5 5 5
+3 3 3  2 0 0  4 0 0  60 73 81  156 155 156  167 166 167
+163 162 163  85 115 134  5 7 8  0 0 0  4 4 4  5 5 5
+0 0 0  2 5 5  55 98 126  90 154 193  90 154 193  72 125 159
+37 51 59  2 0 0  1 1 1  4 5 5  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  5 5 5  4 4 4  1 1 1  0 0 0  3 3 3
+37 38 37  125 124 125  163 162 163  174 174 174  158 157 158  158 157 158
+156 155 156  156 155 156  158 157 158  165 164 165  174 174 174  166 165 166
+125 124 125  16 19 21  1 0 0  0 0 0  0 0 0  4 4 4
+5 5 5  5 5 5  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  5 5 5  5 5 5  1 1 1
+0 0 0  0 0 0  37 38 37  153 152 153  174 174 174  158 157 158
+174 174 174  163 162 163  37 38 37  4 3 3  4 0 0  1 1 1
+0 0 0  22 40 52  101 161 196  101 161 196  90 154 193  101 161 196
+64 123 161  14 17 19  0 0 0  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  5 5 5
+5 5 5  2 2 2  0 0 0  4 0 0  24 26 27  85 115 134
+156 155 156  174 174 174  167 166 167  156 155 156  154 153 154  157 156 157
+156 155 156  156 155 156  155 154 155  153 152 153  158 157 158  167 166 167
+174 174 174  156 155 156  60 74 84  16 19 21  0 0 0  0 0 0
+1 1 1  5 5 5  5 5 5  4 4 4  4 4 4  4 4 4
+4 4 4  5 5 5  6 6 6  3 3 3  0 0 0  4 0 0
+13 16 17  60 73 81  137 136 137  165 164 165  156 155 156  153 152 153
+174 174 174  177 184 187  60 73 81  3 1 0  0 0 0  1 1 2
+22 30 35  64 123 161  136 185 209  90 154 193  90 154 193  90 154 193
+90 154 193  21 29 34  0 0 0  3 2 2  4 4 5  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  5 5 5  3 3 3
+0 0 0  0 0 0  10 13 16  60 74 84  157 156 157  174 174 174
+174 174 174  158 157 158  153 152 153  154 153 154  156 155 156  155 154 155
+156 155 156  155 154 155  154 153 154  157 156 157  154 153 154  153 152 153
+163 162 163  174 174 174  177 184 187  137 136 137  60 73 81  13 16 17
+4 0 0  0 0 0  3 3 3  5 5 5  4 4 4  4 4 4
+5 5 5  4 4 4  1 1 1  0 0 0  3 3 3  41 54 63
+131 129 131  174 174 174  174 174 174  174 174 174  167 166 167  174 174 174
+190 197 201  137 136 137  24 26 27  4 0 0  16 21 25  50 82 103
+90 154 193  136 185 209  90 154 193  101 161 196  101 161 196  101 161 196
+31 91 132  3 6 7  0 0 0  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  2 2 2  0 0 0  4 0 0
+4 0 0  43 57 68  137 136 137  177 184 187  174 174 174  163 162 163
+155 154 155  155 154 155  156 155 156  155 154 155  158 157 158  165 164 165
+167 166 167  166 165 166  163 162 163  157 156 157  155 154 155  155 154 155
+153 152 153  156 155 156  167 166 167  174 174 174  174 174 174  131 129 131
+41 54 63  5 5 5  0 0 0  0 0 0  3 3 3  4 4 4
+1 1 1  0 0 0  1 0 0  26 28 28  125 124 125  174 174 174
+177 184 187  174 174 174  174 174 174  156 155 156  131 129 131  137 136 137
+125 124 125  24 26 27  4 0 0  41 65 82  90 154 193  136 185 209
+136 185 209  101 161 196  53 118 160  37 112 160  90 154 193  34 86 122
+7 12 15  0 0 0  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  3 3 3  0 0 0  0 0 0  5 5 5  37 38 37
+125 124 125  167 166 167  174 174 174  167 166 167  158 157 158  155 154 155
+156 155 156  156 155 156  156 155 156  163 162 163  167 166 167  155 154 155
+137 136 137  153 152 153  156 155 156  165 164 165  163 162 163  156 155 156
+156 155 156  156 155 156  155 154 155  158 157 158  166 165 166  174 174 174
+167 166 167  125 124 125  37 38 37  1 0 0  0 0 0  0 0 0
+0 0 0  24 26 27  60 74 84  158 157 158  174 174 174  174 174 174
+166 165 166  158 157 158  125 124 125  41 54 63  13 16 17  6 6 6
+6 6 6  37 38 37  80 127 157  136 185 209  101 161 196  101 161 196
+90 154 193  28 67 93  6 10 14  13 20 25  13 20 25  6 10 14
+1 1 2  4 3 3  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+1 1 1  1 0 0  4 3 3  37 38 37  60 74 84  153 152 153
+167 166 167  167 166 167  158 157 158  154 153 154  155 154 155  156 155 156
+157 156 157  158 157 158  167 166 167  167 166 167  131 129 131  43 57 68
+26 28 28  37 38 37  60 73 81  131 129 131  165 164 165  166 165 166
+158 157 158  155 154 155  156 155 156  156 155 156  156 155 156  158 157 158
+165 164 165  174 174 174  163 162 163  60 74 84  16 19 21  13 16 17
+60 73 81  131 129 131  174 174 174  174 174 174  167 166 167  165 164 165
+137 136 137  60 73 81  24 26 27  4 0 0  4 0 0  16 19 21
+52 104 138  101 161 196  136 185 209  136 185 209  90 154 193  27 99 146
+13 20 25  4 5 7  2 5 5  4 5 7  1 1 2  0 0 0
+4 4 4  4 4 4  3 3 3  2 2 2  2 2 2  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  3 3 3  0 0 0
+0 0 0  13 16 17  60 73 81  137 136 137  174 174 174  166 165 166
+158 157 158  156 155 156  157 156 157  156 155 156  155 154 155  158 157 158
+167 166 167  174 174 174  153 152 153  60 73 81  16 19 21  4 0 0
+4 0 0  4 0 0  6 6 6  26 28 28  60 74 84  158 157 158
+174 174 174  166 165 166  157 156 157  155 154 155  156 155 156  156 155 156
+155 154 155  158 157 158  167 166 167  167 166 167  131 129 131  125 124 125
+137 136 137  167 166 167  167 166 167  174 174 174  158 157 158  125 124 125
+16 19 21  4 0 0  4 0 0  10 13 16  49 76 92  107 159 188
+136 185 209  136 185 209  90 154 193  26 108 161  22 40 52  6 10 14
+2 3 3  1 1 2  1 1 2  4 4 5  4 4 5  4 4 5
+4 4 5  2 2 1  0 0 0  0 0 0  0 0 0  2 2 2
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  5 5 5  3 3 3  0 0 0  1 0 0  4 0 0
+37 51 59  131 129 131  167 166 167  167 166 167  163 162 163  157 156 157
+157 156 157  155 154 155  153 152 153  157 156 157  167 166 167  174 174 174
+153 152 153  125 124 125  37 38 37  4 0 0  4 0 0  4 0 0
+4 3 3  4 3 3  4 0 0  6 6 6  4 0 0  37 38 37
+125 124 125  174 174 174  174 174 174  165 164 165  156 155 156  154 153 154
+156 155 156  156 155 156  155 154 155  163 162 163  158 157 158  163 162 163
+174 174 174  174 174 174  174 174 174  125 124 125  37 38 37  0 0 0
+4 0 0  6 9 11  41 54 63  90 154 193  136 185 209  146 190 211
+136 185 209  37 112 160  22 40 52  6 10 14  3 6 7  1 1 2
+1 1 2  3 3 3  1 1 2  3 3 3  4 4 4  4 4 4
+2 2 2  2 0 0  16 19 21  37 38 37  24 26 27  0 0 0
+0 0 0  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  5 5 5  5 5 5
+4 4 4  0 0 0  0 0 0  0 0 0  26 28 28  120 125 127
+158 157 158  174 174 174  165 164 165  157 156 157  155 154 155  156 155 156
+153 152 153  153 152 153  167 166 167  174 174 174  174 174 174  125 124 125
+37 38 37  4 0 0  0 0 0  4 0 0  4 3 3  4 4 4
+4 4 4  4 4 4  5 5 5  4 0 0  4 0 0  4 0 0
+4 3 3  43 57 68  137 136 137  174 174 174  174 174 174  165 164 165
+154 153 154  153 152 153  153 152 153  153 152 153  163 162 163  174 174 174
+174 174 174  153 152 153  60 73 81  6 6 6  4 0 0  4 3 3
+32 43 50  80 127 157  136 185 209  146 190 211  146 190 211  90 154 193
+28 67 93  28 67 93  40 71 93  3 6 7  1 1 2  2 5 5
+50 82 103  79 117 143  26 37 45  0 0 0  3 3 3  1 1 1
+0 0 0  41 54 63  137 136 137  174 174 174  153 152 153  60 73 81
+2 0 0  0 0 0
+4 4 4  4 4 4  4 4 4  4 4 4  6 6 6  2 2 2
+0 0 0  2 0 0  24 26 27  60 74 84  153 152 153  174 174 174
+174 174 174  157 156 157  154 153 154  156 155 156  154 153 154  153 152 153
+165 164 165  174 174 174  177 184 187  137 136 137  43 57 68  6 6 6
+4 0 0  2 0 0  3 3 3  5 5 5  5 5 5  4 4 4
+4 4 4  4 4 4  4 4 4  5 5 5  6 6 6  4 3 3
+4 0 0  4 0 0  24 26 27  60 73 81  153 152 153  174 174 174
+174 174 174  158 157 158  158 157 158  174 174 174  174 174 174  158 157 158
+60 74 84  24 26 27  4 0 0  4 0 0  17 23 27  59 113 148
+136 185 209  191 222 234  146 190 211  136 185 209  31 91 132  7 11 13
+22 40 52  101 161 196  90 154 193  6 9 11  3 4 4  43 95 132
+136 185 209  172 205 220  55 98 126  0 0 0  0 0 0  2 0 0
+26 28 28  153 152 153  177 184 187  167 166 167  177 184 187  165 164 165
+37 38 37  0 0 0
+4 4 4  4 4 4  5 5 5  5 5 5  1 1 1  0 0 0
+13 16 17  60 73 81  137 136 137  174 174 174  174 174 174  165 164 165
+153 152 153  153 152 153  155 154 155  154 153 154  158 157 158  174 174 174
+177 184 187  163 162 163  60 73 81  16 19 21  4 0 0  4 0 0
+4 3 3  4 4 4  5 5 5  5 5 5  4 4 4  5 5 5
+5 5 5  5 5 5  5 5 5  4 4 4  4 4 4  5 5 5
+6 6 6  4 0 0  4 0 0  4 0 0  24 26 27  60 74 84
+166 165 166  174 174 174  177 184 187  165 164 165  125 124 125  24 26 27
+4 0 0  4 0 0  5 5 5  50 82 103  136 185 209  172 205 220
+146 190 211  136 185 209  26 108 161  22 40 52  7 12 15  44 81 103
+71 116 144  28 67 93  37 51 59  41 65 82  100 139 164  101 161 196
+90 154 193  90 154 193  28 67 93  0 0 0  0 0 0  26 28 28
+125 124 125  167 166 167  163 162 163  153 152 153  163 162 163  174 174 174
+85 115 134  4 0 0
+4 4 4  5 5 5  4 4 4  1 0 0  4 0 0  34 47 55
+125 124 125  174 174 174  174 174 174  167 166 167  157 156 157  153 152 153
+155 154 155  155 154 155  158 157 158  166 165 166  167 166 167  154 153 154
+125 124 125  26 28 28  4 0 0  4 0 0  4 0 0  5 5 5
+5 5 5  4 4 4  4 4 4  4 4 4  4 4 4  1 1 1
+0 0 0  0 0 0  1 1 1  4 4 4  4 4 4  4 4 4
+5 5 5  5 5 5  4 3 3  4 0 0  4 0 0  6 6 6
+37 38 37  131 129 131  137 136 137  37 38 37  0 0 0  4 0 0
+4 5 5  43 61 72  90 154 193  172 205 220  146 190 211  136 185 209
+90 154 193  28 67 93  13 20 25  43 61 72  71 116 144  44 81 103
+2 5 5  7 11 13  59 113 148  101 161 196  90 154 193  28 67 93
+13 20 25  6 10 14  0 0 0  13 16 17  60 73 81  137 136 137
+166 165 166  158 157 158  156 155 156  154 153 154  167 166 167  174 174 174
+60 73 81  4 0 0
+4 4 4  4 4 4  0 0 0  3 3 3  60 74 84  174 174 174
+174 174 174  167 166 167  163 162 163  155 154 155  157 156 157  155 154 155
+156 155 156  163 162 163  167 166 167  158 157 158  125 124 125  37 38 37
+4 3 3  4 0 0  4 0 0  6 6 6  6 6 6  5 5 5
+4 4 4  4 4 4  4 4 4  1 1 1  0 0 0  2 3 3
+10 13 16  7 11 13  1 0 0  0 0 0  2 2 1  4 4 4
+4 4 4  4 4 4  4 4 4  5 5 5  4 3 3  4 0 0
+4 0 0  7 11 13  13 16 17  4 0 0  3 3 3  34 47 55
+80 127 157  146 190 211  172 205 220  136 185 209  136 185 209  136 185 209
+28 67 93  22 40 52  55 98 126  55 98 126  21 29 34  7 11 13
+50 82 103  101 161 196  101 161 196  35 83 115  13 20 25  2 2 1
+1 1 2  1 1 2  37 51 59  131 129 131  174 174 174  174 174 174
+167 166 167  163 162 163  163 162 163  167 166 167  174 174 174  125 124 125
+16 19 21  4 0 0
+4 4 4  4 0 0  4 0 0  60 74 84  174 174 174  174 174 174
+158 157 158  155 154 155  155 154 155  156 155 156  155 154 155  158 157 158
+167 166 167  165 164 165  131 129 131  60 73 81  13 16 17  4 0 0
+4 0 0  4 3 3  6 6 6  4 3 3  5 5 5  4 4 4
+4 4 4  3 2 2  0 0 0  0 0 0  7 11 13  45 69 86
+80 127 157  71 116 144  43 61 72  7 11 13  0 0 0  1 1 1
+4 3 3  4 4 4  4 4 4  4 4 4  6 6 6  5 5 5
+3 2 2  4 0 0  1 0 0  21 29 34  59 113 148  136 185 209
+146 190 211  136 185 209  136 185 209  136 185 209  136 185 209  136 185 209
+68 124 159  44 81 103  22 40 52  13 16 17  43 61 72  90 154 193
+136 185 209  59 113 148  21 29 34  3 4 3  1 1 1  0 0 0
+24 26 27  125 124 125  163 162 163  174 174 174  166 165 166  165 164 165
+163 162 163  125 124 125  125 124 125  125 124 125  125 124 125  26 28 28
+4 0 0  4 3 3
+3 3 3  0 0 0  24 26 27  153 152 153  177 184 187  158 157 158
+156 155 156  156 155 156  155 154 155  155 154 155  165 164 165  174 174 174
+155 154 155  60 74 84  26 28 28  4 0 0  4 0 0  3 1 0
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 3 3
+2 0 0  0 0 0  0 0 0  32 43 50  72 125 159  101 161 196
+136 185 209  101 161 196  101 161 196  79 117 143  32 43 50  0 0 0
+0 0 0  2 2 2  4 4 4  4 4 4  3 3 3  1 0 0
+0 0 0  4 5 5  49 76 92  101 161 196  146 190 211  146 190 211
+136 185 209  136 185 209  136 185 209  136 185 209  136 185 209  90 154 193
+28 67 93  13 16 17  37 51 59  80 127 157  136 185 209  90 154 193
+22 40 52  6 9 11  3 4 3  2 2 1  16 19 21  60 73 81
+137 136 137  163 162 163  158 157 158  166 165 166  167 166 167  153 152 153
+60 74 84  37 38 37  6 6 6  13 16 17  4 0 0  1 0 0
+3 2 2  4 4 4
+3 2 2  4 0 0  37 38 37  137 136 137  167 166 167  158 157 158
+157 156 157  154 153 154  157 156 157  167 166 167  174 174 174  125 124 125
+37 38 37  4 0 0  4 0 0  4 0 0  4 3 3  4 4 4
+4 4 4  4 4 4  5 5 5  5 5 5  1 1 1  0 0 0
+0 0 0  16 21 25  55 98 126  90 154 193  136 185 209  101 161 196
+101 161 196  101 161 196  136 185 209  136 185 209  101 161 196  55 98 126
+14 17 19  0 0 0  0 0 0  0 0 0  0 0 0  0 0 0
+22 40 52  90 154 193  146 190 211  146 190 211  136 185 209  136 185 209
+136 185 209  136 185 209  136 185 209  101 161 196  35 83 115  7 11 13
+17 23 27  59 113 148  136 185 209  101 161 196  34 86 122  7 12 15
+2 5 5  3 4 3  6 6 6  60 73 81  131 129 131  163 162 163
+166 165 166  174 174 174  174 174 174  163 162 163  125 124 125  41 54 63
+13 16 17  4 0 0  4 0 0  4 0 0  1 0 0  2 2 2
+4 4 4  4 4 4
+1 1 1  2 1 0  43 57 68  137 136 137  153 152 153  153 152 153
+163 162 163  156 155 156  165 164 165  167 166 167  60 74 84  6 6 6
+4 0 0  4 0 0  5 5 5  4 4 4  4 4 4  4 4 4
+4 5 5  6 6 6  4 3 3  0 0 0  0 0 0  11 15 18
+40 71 93  100 139 164  101 161 196  101 161 196  101 161 196  101 161 196
+101 161 196  101 161 196  101 161 196  101 161 196  136 185 209  136 185 209
+101 161 196  45 69 86  6 6 6  0 0 0  17 23 27  55 98 126
+136 185 209  146 190 211  136 185 209  136 185 209  136 185 209  136 185 209
+136 185 209  136 185 209  90 154 193  22 40 52  7 11 13  50 82 103
+136 185 209  136 185 209  53 118 160  22 40 52  7 11 13  2 5 5
+3 4 3  37 38 37  125 124 125  157 156 157  166 165 166  167 166 167
+174 174 174  174 174 174  137 136 137  60 73 81  4 0 0  4 0 0
+4 0 0  4 0 0  5 5 5  3 3 3  3 3 3  4 4 4
+4 4 4  4 4 4
+4 0 0  4 0 0  41 54 63  137 136 137  125 124 125  131 129 131
+155 154 155  167 166 167  174 174 174  60 74 84  6 6 6  4 0 0
+4 3 3  6 6 6  4 4 4  4 4 4  4 4 4  5 5 5
+4 4 4  1 1 1  0 0 0  3 6 7  41 65 82  72 125 159
+101 161 196  101 161 196  101 161 196  90 154 193  90 154 193  101 161 196
+101 161 196  101 161 196  101 161 196  101 161 196  101 161 196  136 185 209
+136 185 209  136 185 209  80 127 157  55 98 126  101 161 196  146 190 211
+136 185 209  136 185 209  136 185 209  101 161 196  136 185 209  101 161 196
+136 185 209  101 161 196  35 83 115  22 30 35  101 161 196  172 205 220
+90 154 193  28 67 93  7 11 13  2 5 5  3 4 3  13 16 17
+85 115 134  167 166 167  174 174 174  174 174 174  174 174 174  174 174 174
+167 166 167  60 74 84  13 16 17  4 0 0  4 0 0  4 3 3
+6 6 6  5 5 5  4 4 4  5 5 5  4 4 4  5 5 5
+5 5 5  5 5 5
+1 1 1  4 0 0  41 54 63  137 136 137  137 136 137  125 124 125
+131 129 131  167 166 167  157 156 157  37 38 37  6 6 6  4 0 0
+6 6 6  5 5 5  4 4 4  4 4 4  4 5 5  2 2 1
+0 0 0  0 0 0  26 37 45  58 111 146  101 161 196  101 161 196
+101 161 196  90 154 193  90 154 193  90 154 193  101 161 196  101 161 196
+101 161 196  101 161 196  101 161 196  101 161 196  101 161 196  101 161 196
+101 161 196  136 185 209  136 185 209  136 185 209  146 190 211  136 185 209
+136 185 209  101 161 196  136 185 209  136 185 209  101 161 196  136 185 209
+101 161 196  136 185 209  136 185 209  136 185 209  136 185 209  16 89 141
+7 11 13  2 5 5  2 5 5  13 16 17  60 73 81  154 154 154
+174 174 174  174 174 174  174 174 174  174 174 174  163 162 163  125 124 125
+24 26 27  4 0 0  4 0 0  4 0 0  5 5 5  5 5 5
+4 4 4  4 4 4  4 4 4  5 5 5  5 5 5  5 5 5
+5 5 5  4 4 4
+4 0 0  6 6 6  37 38 37  137 136 137  137 136 137  131 129 131
+131 129 131  153 152 153  131 129 131  26 28 28  4 0 0  4 3 3
+6 6 6  4 4 4  4 4 4  4 4 4  0 0 0  0 0 0
+13 20 25  51 88 114  90 154 193  101 161 196  101 161 196  90 154 193
+90 154 193  90 154 193  90 154 193  90 154 193  90 154 193  101 161 196
+101 161 196  101 161 196  101 161 196  101 161 196  136 185 209  101 161 196
+101 161 196  136 185 209  101 161 196  136 185 209  136 185 209  101 161 196
+136 185 209  101 161 196  136 185 209  101 161 196  101 161 196  101 161 196
+136 185 209  136 185 209  136 185 209  37 112 160  21 29 34  5 7 8
+2 5 5  13 16 17  43 57 68  131 129 131  174 174 174  174 174 174
+174 174 174  167 166 167  157 156 157  125 124 125  37 38 37  4 0 0
+4 0 0  4 0 0  5 5 5  5 5 5  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+1 1 1  4 0 0  41 54 63  153 152 153  137 136 137  137 136 137
+137 136 137  153 152 153  125 124 125  24 26 27  4 0 0  3 2 2
+4 4 4  4 4 4  4 3 3  4 0 0  3 6 7  43 61 72
+64 123 161  101 161 196  90 154 193  90 154 193  90 154 193  90 154 193
+90 154 193  90 154 193  90 154 193  90 154 193  101 161 196  90 154 193
+101 161 196  101 161 196  101 161 196  101 161 196  101 161 196  101 161 196
+101 161 196  101 161 196  101 161 196  101 161 196  101 161 196  101 161 196
+136 185 209  101 161 196  101 161 196  136 185 209  136 185 209  101 161 196
+101 161 196  90 154 193  28 67 93  13 16 17  7 11 13  3 6 7
+37 51 59  125 124 125  163 162 163  174 174 174  167 166 167  166 165 166
+167 166 167  131 129 131  60 73 81  4 0 0  4 0 0  4 0 0
+3 3 3  5 5 5  6 6 6  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 0 0  4 0 0  41 54 63  137 136 137  153 152 153  137 136 137
+153 152 153  157 156 157  125 124 125  24 26 27  0 0 0  2 2 2
+4 4 4  4 4 4  2 0 0  0 0 0  28 67 93  90 154 193
+90 154 193  90 154 193  90 154 193  90 154 193  64 123 161  90 154 193
+90 154 193  90 154 193  90 154 193  90 154 193  90 154 193  101 161 196
+90 154 193  101 161 196  101 161 196  101 161 196  90 154 193  136 185 209
+101 161 196  101 161 196  136 185 209  101 161 196  136 185 209  101 161 196
+101 161 196  101 161 196  136 185 209  101 161 196  101 161 196  90 154 193
+35 83 115  13 16 17  3 6 7  2 5 5  13 16 17  60 74 84
+154 154 154  166 165 166  165 164 165  158 157 158  163 162 163  157 156 157
+60 74 84  13 16 17  4 0 0  4 0 0  3 2 2  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+1 1 1  4 0 0  41 54 63  157 156 157  155 154 155  137 136 137
+153 152 153  158 157 158  137 136 137  26 28 28  2 0 0  2 2 2
+4 4 4  4 4 4  1 0 0  6 10 14  34 86 122  90 154 193
+64 123 161  90 154 193  64 123 161  90 154 193  90 154 193  90 154 193
+64 123 161  90 154 193  90 154 193  90 154 193  90 154 193  90 154 193
+101 161 196  101 161 196  101 161 196  101 161 196  101 161 196  101 161 196
+101 161 196  101 161 196  101 161 196  101 161 196  101 161 196  101 161 196
+136 185 209  101 161 196  136 185 209  90 154 193  26 108 161  22 40 52
+13 16 17  5 7 8  2 5 5  2 5 5  37 38 37  165 164 165
+174 174 174  163 162 163  154 154 154  165 164 165  167 166 167  60 73 81
+6 6 6  4 0 0  4 0 0  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 0 0  6 6 6  41 54 63  156 155 156  158 157 158  153 152 153
+156 155 156  165 164 165  137 136 137  26 28 28  0 0 0  2 2 2
+4 4 5  4 4 4  2 0 0  7 12 15  31 96 139  64 123 161
+90 154 193  64 123 161  90 154 193  90 154 193  64 123 161  90 154 193
+90 154 193  90 154 193  90 154 193  90 154 193  90 154 193  90 154 193
+90 154 193  90 154 193  90 154 193  101 161 196  101 161 196  101 161 196
+101 161 196  101 161 196  101 161 196  101 161 196  101 161 196  136 185 209
+101 161 196  136 185 209  26 108 161  22 40 52  7 11 13  5 7 8
+2 5 5  2 5 5  2 5 5  2 2 1  37 38 37  158 157 158
+174 174 174  154 154 154  156 155 156  167 166 167  165 164 165  37 38 37
+4 0 0  4 3 3  5 5 5  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+3 1 0  4 0 0  60 73 81  157 156 157  163 162 163  153 152 153
+158 157 158  167 166 167  137 136 137  26 28 28  2 0 0  2 2 2
+4 5 5  4 4 4  4 0 0  7 12 15  24 86 132  26 108 161
+37 112 160  64 123 161  90 154 193  64 123 161  90 154 193  90 154 193
+90 154 193  90 154 193  90 154 193  90 154 193  90 154 193  90 154 193
+90 154 193  101 161 196  90 154 193  101 161 196  101 161 196  101 161 196
+101 161 196  101 161 196  101 161 196  136 185 209  101 161 196  136 185 209
+90 154 193  35 83 115  13 16 17  13 16 17  7 11 13  3 6 7
+5 7 8  6 6 6  3 4 3  2 2 1  30 32 34  154 154 154
+167 166 167  154 154 154  154 154 154  174 174 174  165 164 165  37 38 37
+6 6 6  4 0 0  6 6 6  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 0 0  4 0 0  41 54 63  163 162 163  166 165 166  154 154 154
+163 162 163  174 174 174  137 136 137  26 28 28  0 0 0  2 2 2
+4 5 5  4 4 5  1 1 2  6 10 14  28 67 93  18 97 151
+18 97 151  18 97 151  26 108 161  37 112 160  37 112 160  90 154 193
+64 123 161  90 154 193  90 154 193  90 154 193  90 154 193  101 161 196
+90 154 193  101 161 196  101 161 196  90 154 193  101 161 196  101 161 196
+101 161 196  101 161 196  101 161 196  136 185 209  90 154 193  16 89 141
+13 20 25  7 11 13  5 7 8  5 7 8  2 5 5  4 5 5
+3 4 3  4 5 5  3 4 3  0 0 0  37 38 37  158 157 158
+174 174 174  158 157 158  158 157 158  167 166 167  174 174 174  41 54 63
+4 0 0  3 2 2  5 5 5  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+1 1 1  4 0 0  60 73 81  165 164 165  174 174 174  158 157 158
+167 166 167  174 174 174  153 152 153  26 28 28  2 0 0  2 2 2
+4 5 5  4 4 4  4 0 0  7 12 15  10 87 144  10 87 144
+18 97 151  18 97 151  18 97 151  26 108 161  26 108 161  26 108 161
+26 108 161  37 112 160  53 118 160  90 154 193  90 154 193  90 154 193
+90 154 193  90 154 193  101 161 196  101 161 196  101 161 196  101 161 196
+101 161 196  136 185 209  90 154 193  26 108 161  22 40 52  13 16 17
+7 11 13  3 6 7  5 7 8  5 7 8  2 5 5  4 5 5
+4 5 5  6 6 6  3 4 3  0 0 0  30 32 34  158 157 158
+174 174 174  156 155 156  155 154 155  165 164 165  154 153 154  37 38 37
+4 0 0  4 3 3  5 5 5  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 0 0  4 0 0  60 73 81  167 166 167  174 174 174  163 162 163
+174 174 174  174 174 174  153 152 153  26 28 28  0 0 0  3 3 3
+5 5 5  4 4 4  1 1 2  7 12 15  28 67 93  18 97 151
+18 97 151  18 97 151  18 97 151  18 97 151  18 97 151  26 108 161
+26 108 161  26 108 161  26 108 161  26 108 161  26 108 161  26 108 161
+90 154 193  26 108 161  90 154 193  90 154 193  90 154 193  101 161 196
+101 161 196  26 108 161  22 40 52  13 16 17  7 11 13  2 5 5
+2 5 5  6 6 6  2 5 5  4 5 5  4 5 5  4 5 5
+3 4 3  5 5 5  3 4 3  2 0 0  30 32 34  137 136 137
+153 152 153  137 136 137  131 129 131  137 136 137  131 129 131  37 38 37
+4 0 0  4 3 3  5 5 5  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+1 1 1  4 0 0  60 73 81  167 166 167  174 174 174  166 165 166
+174 174 174  177 184 187  153 152 153  30 32 34  1 0 0  3 3 3
+5 5 5  4 3 3  4 0 0  7 12 15  10 87 144  10 87 144
+18 97 151  18 97 151  18 97 151  26 108 161  26 108 161  26 108 161
+26 108 161  26 108 161  26 108 161  26 108 161  26 108 161  26 108 161
+26 108 161  26 108 161  26 108 161  90 154 193  90 154 193  26 108 161
+35 83 115  13 16 17  7 11 13  5 7 8  3 6 7  5 7 8
+2 5 5  6 6 6  4 5 5  4 5 5  3 4 3  4 5 5
+3 4 3  6 6 6  3 4 3  0 0 0  26 28 28  125 124 125
+131 129 131  125 124 125  125 124 125  131 129 131  131 129 131  37 38 37
+4 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+3 1 0  4 0 0  60 73 81  174 174 174  177 184 187  167 166 167
+174 174 174  177 184 187  153 152 153  30 32 34  0 0 0  3 3 3
+5 5 5  4 4 4  1 1 2  6 10 14  28 67 93  18 97 151
+18 97 151  18 97 151  18 97 151  18 97 151  18 97 151  26 108 161
+26 108 161  26 108 161  26 108 161  26 108 161  26 108 161  26 108 161
+26 108 161  90 154 193  26 108 161  26 108 161  24 86 132  13 20 25
+7 11 13  13 20 25  22 40 52  5 7 8  3 4 3  3 4 3
+4 5 5  3 4 3  4 5 5  3 4 3  4 5 5  3 4 3
+4 4 4  5 5 5  3 3 3  2 0 0  26 28 28  125 124 125
+137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
+0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+1 1 1  4 0 0  60 73 81  174 174 174  177 184 187  174 174 174
+174 174 174  190 197 201  157 156 157  30 32 34  1 0 0  3 3 3
+5 5 5  4 3 3  4 0 0  7 12 15  10 87 144  10 87 144
+18 97 151  19 95 150  19 95 150  18 97 151  18 97 151  26 108 161
+18 97 151  26 108 161  26 108 161  26 108 161  26 108 161  90 154 193
+26 108 161  26 108 161  26 108 161  22 40 52  2 5 5  3 4 3
+28 67 93  37 112 160  34 86 122  2 5 5  3 4 3  3 4 3
+3 4 3  3 4 3  3 4 3  2 2 1  3 4 3  4 4 4
+4 5 5  5 5 5  3 3 3  0 0 0  26 28 28  131 129 131
+137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
+0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 0 0  4 0 0  60 73 81  174 174 174  177 184 187  174 174 174
+174 174 174  190 197 201  158 157 158  30 32 34  0 0 0  2 2 2
+5 5 5  4 4 4  1 1 2  6 10 14  28 67 93  18 97 151
+10 87 144  19 95 150  19 95 150  18 97 151  18 97 151  18 97 151
+26 108 161  26 108 161  26 108 161  26 108 161  26 108 161  26 108 161
+18 97 151  22 40 52  2 5 5  2 2 1  22 40 52  26 108 161
+90 154 193  37 112 160  22 40 52  3 4 3  13 20 25  22 30 35
+3 6 7  1 1 1  2 2 2  6 9 11  5 5 5  4 3 3
+4 4 4  5 5 5  3 3 3  2 0 0  26 28 28  131 129 131
+137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
+0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+1 1 1  4 0 0  60 73 81  177 184 187  193 200 203  174 174 174
+177 184 187  193 200 203  163 162 163  30 32 34  4 0 0  2 2 2
+5 5 5  4 3 3  4 0 0  6 10 14  24 86 132  10 87 144
+10 87 144  10 87 144  19 95 150  19 95 150  19 95 150  18 97 151
+26 108 161  26 108 161  26 108 161  90 154 193  26 108 161  28 67 93
+6 10 14  2 5 5  13 20 25  24 86 132  37 112 160  90 154 193
+10 87 144  7 12 15  2 5 5  28 67 93  37 112 160  28 67 93
+2 2 1  7 12 15  35 83 115  28 67 93  3 6 7  1 0 0
+4 4 4  5 5 5  3 3 3  0 0 0  26 28 28  131 129 131
+137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
+0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 0 0  4 0 0  60 73 81  174 174 174  190 197 201  174 174 174
+177 184 187  193 200 203  163 162 163  30 32 34  0 0 0  2 2 2
+5 5 5  4 4 4  1 1 2  6 10 14  28 67 93  10 87 144
+10 87 144  16 89 141  19 95 150  10 87 144  26 108 161  26 108 161
+26 108 161  26 108 161  26 108 161  28 67 93  6 10 14  1 1 2
+7 12 15  28 67 93  26 108 161  16 89 141  24 86 132  21 29 34
+3 4 3  21 29 34  37 112 160  37 112 160  27 99 146  21 29 34
+21 29 34  26 108 161  90 154 193  35 83 115  1 1 2  2 0 0
+4 4 4  5 5 5  3 3 3  2 0 0  26 28 28  125 124 125
+137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
+0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+3 1 0  4 0 0  60 73 81  193 200 203  193 200 203  174 174 174
+190 197 201  193 200 203  165 164 165  37 38 37  4 0 0  2 2 2
+5 5 5  4 3 3  4 0 0  6 10 14  24 86 132  10 87 144
+10 87 144  10 87 144  16 89 141  18 97 151  18 97 151  10 87 144
+24 86 132  24 86 132  13 20 25  4 5 7  4 5 7  22 40 52
+18 97 151  37 112 160  26 108 161  7 12 15  1 1 1  0 0 0
+28 67 93  37 112 160  26 108 161  28 67 93  22 40 52  28 67 93
+26 108 161  90 154 193  26 108 161  10 87 144  0 0 0  2 0 0
+4 4 4  5 5 5  3 3 3  0 0 0  26 28 28  131 129 131
+137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
+0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 0 0  6 6 6  60 73 81  174 174 174  193 200 203  174 174 174
+190 197 201  193 200 203  165 164 165  30 32 34  0 0 0  2 2 2
+5 5 5  4 4 4  1 1 2  6 10 14  28 67 93  10 87 144
+10 87 144  10 87 144  10 87 144  18 97 151  28 67 93  6 10 14
+0 0 0  1 1 2  4 5 7  13 20 25  16 89 141  26 108 161
+26 108 161  26 108 161  24 86 132  6 9 11  2 3 3  22 40 52
+37 112 160  16 89 141  22 40 52  28 67 93  26 108 161  26 108 161
+90 154 193  26 108 161  26 108 161  28 67 93  1 1 1  4 0 0
+4 4 4  5 5 5  3 3 3  4 0 0  26 28 28  124 126 130
+137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
+0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 0 0  4 0 0  60 73 81  193 200 203  193 200 203  174 174 174
+193 200 203  193 200 203  167 166 167  37 38 37  4 0 0  2 2 2
+5 5 5  4 4 4  4 0 0  6 10 14  28 67 93  10 87 144
+10 87 144  10 87 144  18 97 151  10 87 144  13 20 25  4 5 7
+1 1 2  1 1 1  22 40 52  26 108 161  26 108 161  26 108 161
+26 108 161  26 108 161  26 108 161  24 86 132  22 40 52  22 40 52
+22 40 52  22 40 52  10 87 144  26 108 161  26 108 161  26 108 161
+26 108 161  26 108 161  90 154 193  10 87 144  0 0 0  4 0 0
+4 4 4  5 5 5  3 3 3  0 0 0  26 28 28  131 129 131
+137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
+0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 0 0  6 6 6  60 73 81  174 174 174  220 221 221  174 174 174
+190 197 201  205 212 215  167 166 167  30 32 34  0 0 0  2 2 2
+5 5 5  4 4 4  1 1 2  6 10 14  28 67 93  10 87 144
+10 87 144  10 87 144  10 87 144  10 87 144  22 40 52  1 1 2
+2 0 0  1 1 2  24 86 132  26 108 161  26 108 161  26 108 161
+26 108 161  19 95 150  16 89 141  10 87 144  22 40 52  22 40 52
+10 87 144  26 108 161  37 112 160  26 108 161  26 108 161  26 108 161
+26 108 161  26 108 161  26 108 161  28 67 93  2 0 0  3 1 0
+4 4 4  5 5 5  3 3 3  2 0 0  26 28 28  131 129 131
+137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
+0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 0 0  4 0 0  60 73 81  220 221 221  190 197 201  174 174 174
+193 200 203  193 200 203  174 174 174  37 38 37  4 0 0  2 2 2
+5 5 5  4 4 4  3 2 2  1 1 2  13 20 25  10 87 144
+10 87 144  10 87 144  10 87 144  10 87 144  10 87 144  13 20 25
+13 20 25  22 40 52  10 87 144  18 97 151  18 97 151  26 108 161
+10 87 144  13 20 25  6 10 14  21 29 34  24 86 132  18 97 151
+26 108 161  26 108 161  26 108 161  26 108 161  26 108 161  26 108 161
+26 108 161  90 154 193  18 97 151  13 20 25  0 0 0  4 3 3
+4 4 4  5 5 5  3 3 3  0 0 0  26 28 28  131 129 131
+137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
+0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 0 0  6 6 6  60 73 81  174 174 174  220 221 221  174 174 174
+190 197 201  220 221 221  167 166 167  30 32 34  1 0 0  2 2 2
+5 5 5  4 4 4  4 4 5  2 5 5  4 5 7  13 20 25
+28 67 93  10 87 144  10 87 144  10 87 144  10 87 144  10 87 144
+10 87 144  10 87 144  18 97 151  10 87 144  18 97 151  18 97 151
+28 67 93  2 3 3  0 0 0  28 67 93  26 108 161  26 108 161
+26 108 161  26 108 161  26 108 161  26 108 161  26 108 161  26 108 161
+26 108 161  10 87 144  13 20 25  1 1 2  3 2 2  4 4 4
+4 4 4  5 5 5  3 3 3  2 0 0  26 28 28  131 129 131
+137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
+0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 0 0  4 0 0  60 73 81  220 221 221  190 197 201  174 174 174
+193 200 203  193 200 203  174 174 174  26 28 28  4 0 0  4 3 3
+5 5 5  4 4 4  4 4 4  4 4 5  1 1 2  2 5 5
+4 5 7  22 40 52  10 87 144  10 87 144  18 97 151  10 87 144
+10 87 144  10 87 144  10 87 144  10 87 144  10 87 144  18 97 151
+10 87 144  28 67 93  22 40 52  10 87 144  26 108 161  18 97 151
+18 97 151  18 97 151  26 108 161  26 108 161  26 108 161  26 108 161
+22 40 52  1 1 2  0 0 0  2 3 3  4 4 4  4 4 4
+4 4 4  5 5 5  4 4 4  0 0 0  26 28 28  131 129 131
+137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
+0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 0 0  6 6 6  60 73 81  174 174 174  220 221 221  174 174 174
+190 197 201  220 221 221  190 197 201  41 54 63  4 0 0  2 2 2
+6 6 6  4 4 4  4 4 4  4 4 5  4 4 5  3 3 3
+1 1 2  1 1 2  6 10 14  22 40 52  10 87 144  18 97 151
+18 97 151  10 87 144  10 87 144  10 87 144  18 97 151  10 87 144
+10 87 144  18 97 151  26 108 161  18 97 151  18 97 151  10 87 144
+26 108 161  26 108 161  26 108 161  10 87 144  28 67 93  6 10 14
+1 1 2  1 1 2  4 3 3  4 4 5  4 4 4  4 4 4
+5 5 5  5 5 5  1 1 1  4 0 0  37 51 59  137 136 137
+137 136 137  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
+0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 0 0  4 0 0  60 73 81  220 221 221  193 200 203  174 174 174
+193 200 203  193 200 203  220 221 221  137 136 137  13 16 17  4 0 0
+2 2 2  4 4 4  4 4 4  4 4 4  4 4 4  4 4 5
+4 4 5  4 3 3  1 1 2  4 5 7  13 20 25  28 67 93
+10 87 144  10 87 144  10 87 144  10 87 144  10 87 144  10 87 144
+10 87 144  18 97 151  18 97 151  10 87 144  18 97 151  26 108 161
+26 108 161  18 97 151  28 67 93  6 10 14  0 0 0  0 0 0
+2 3 3  4 5 5  4 4 5  4 4 4  4 4 4  5 5 5
+3 3 3  1 1 1  0 0 0  16 19 21  125 124 125  137 136 137
+131 129 131  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
+0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 0 0  6 6 6  60 73 81  174 174 174  220 221 221  174 174 174
+193 200 203  190 197 201  220 221 221  220 221 221  153 152 153  30 32 34
+0 0 0  0 0 0  2 2 2  4 4 4  4 4 4  4 4 4
+4 4 4  4 5 5  4 5 7  1 1 2  1 1 2  4 5 7
+13 20 25  28 67 93  10 87 144  18 97 151  10 87 144  10 87 144
+10 87 144  10 87 144  10 87 144  18 97 151  26 108 161  18 97 151
+28 67 93  7 12 15  0 0 0  0 0 0  2 2 1  4 4 4
+4 5 5  4 5 5  4 4 4  4 4 4  3 3 3  0 0 0
+0 0 0  0 0 0  37 38 37  125 124 125  158 157 158  131 129 131
+125 124 125  125 124 125  125 124 125  137 136 137  131 129 131  37 38 37
+0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 3 3  4 0 0  41 54 63  193 200 203  220 221 221  174 174 174
+193 200 203  193 200 203  193 200 203  220 221 221  244 246 246  193 200 203
+120 125 127  5 5 5  1 0 0  0 0 0  1 1 1  4 4 4
+4 4 4  4 4 4  4 5 5  4 5 5  4 4 5  1 1 2
+4 5 7  4 5 7  22 40 52  10 87 144  10 87 144  10 87 144
+10 87 144  10 87 144  18 97 151  10 87 144  10 87 144  13 20 25
+4 5 7  2 3 3  1 1 2  4 4 4  4 5 5  4 4 4
+4 4 4  4 4 4  4 4 4  1 1 1  0 0 0  1 1 2
+24 26 27  60 74 84  153 152 153  163 162 163  137 136 137  125 124 125
+125 124 125  125 124 125  125 124 125  137 136 137  125 124 125  26 28 28
+0 0 0  3 3 3  5 5 5  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 0 0  6 6 6  26 28 28  156 155 156  220 221 221  220 221 221
+174 174 174  193 200 203  193 200 203  193 200 203  205 212 215  220 221 221
+220 221 221  167 166 167  60 73 81  7 11 13  0 0 0  0 0 0
+3 3 3  4 4 4  4 4 4  4 4 4  4 4 5  4 4 5
+4 4 5  1 1 2  1 1 2  4 5 7  22 40 52  10 87 144
+10 87 144  10 87 144  10 87 144  22 40 52  4 5 7  1 1 2
+1 1 2  4 4 5  4 4 4  4 4 4  4 4 4  4 4 4
+5 5 5  2 2 2  0 0 0  4 0 0  16 19 21  60 73 81
+137 136 137  167 166 167  158 157 158  137 136 137  131 129 131  131 129 131
+125 124 125  125 124 125  131 129 131  155 154 155  60 74 84  5 7 8
+0 0 0  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+5 5 5  4 0 0  4 0 0  60 73 81  193 200 203  220 221 221
+193 200 203  193 200 203  193 200 203  193 200 203  205 212 215  220 221 221
+220 221 221  220 221 221  220 221 221  137 136 137  43 57 68  6 6 6
+4 0 0  1 1 1  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 5  4 4 5  3 2 2  1 1 2  2 5 5  13 20 25
+22 40 52  22 40 52  13 20 25  2 3 3  1 1 2  3 3 3
+4 5 7  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+1 1 1  0 0 0  2 3 3  41 54 63  131 129 131  166 165 166
+166 165 166  155 154 155  153 152 153  137 136 137  137 136 137  125 124 125
+125 124 125  137 136 137  137 136 137  125 124 125  37 38 37  4 3 3
+4 3 3  5 5 5  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 3 3  6 6 6  6 6 6  13 16 17  60 73 81  167 166 167
+220 221 221  220 221 221  220 221 221  193 200 203  193 200 203  193 200 203
+205 212 215  220 221 221  220 221 221  244 246 246  205 212 215  125 124 125
+24 26 27  0 0 0  0 0 0  2 2 2  5 5 5  5 5 5
+4 4 4  4 4 4  4 4 4  4 4 5  1 1 2  4 5 7
+4 5 7  4 5 7  1 1 2  3 2 2  4 4 5  4 4 4
+4 4 4  4 4 4  5 5 5  4 4 4  0 0 0  0 0 0
+2 0 0  26 28 28  125 124 125  174 174 174  174 174 174  166 165 166
+156 155 156  153 152 153  137 136 137  137 136 137  131 129 131  137 136 137
+137 136 137  137 136 137  60 74 84  30 32 34  4 0 0  4 0 0
+5 5 5  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+5 5 5  6 6 6  4 0 0  4 0 0  6 6 6  26 28 28
+125 124 125  174 174 174  220 221 221  220 221 221  220 221 221  193 200 203
+205 212 215  220 221 221  205 212 215  220 221 221  220 221 221  244 246 246
+193 200 203  60 74 84  13 16 17  4 0 0  0 0 0  3 3 3
+5 5 5  5 5 5  4 4 4  4 4 4  4 4 5  3 3 3
+1 1 2  3 3 3  4 4 5  4 4 5  4 4 4  4 4 4
+5 5 5  5 5 5  2 2 2  0 0 0  0 0 0  13 16 17
+60 74 84  174 174 174  193 200 203  174 174 174  167 166 167  163 162 163
+153 152 153  153 152 153  137 136 137  137 136 137  153 152 153  137 136 137
+125 124 125  41 54 63  24 26 27  4 0 0  4 0 0  5 5 5
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 3 3  6 6 6  6 6 6  6 6 6  6 6 6  6 6 6
+6 6 6  37 38 37  131 129 131  220 221 221  220 221 221  220 221 221
+193 200 203  193 200 203  220 221 221  205 212 215  220 221 221  244 246 246
+244 246 246  244 246 246  174 174 174  41 54 63  0 0 0  0 0 0
+0 0 0  4 4 4  5 5 5  5 5 5  4 4 4  4 4 5
+4 4 5  4 4 5  4 4 4  4 4 4  6 6 6  6 6 6
+3 3 3  0 0 0  2 0 0  13 16 17  60 73 81  156 155 156
+220 221 221  193 200 203  174 174 174  165 164 165  163 162 163  154 153 154
+153 152 153  153 152 153  158 157 158  163 162 163  137 136 137  60 73 81
+13 16 17  4 0 0  4 0 0  4 3 3  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+5 5 5  4 3 3  4 3 3  6 6 6  6 6 6  6 6 6
+6 6 6  6 6 6  6 6 6  37 38 37  167 166 167  244 246 246
+244 246 246  220 221 221  205 212 215  205 212 215  220 221 221  193 200 203
+220 221 221  244 246 246  244 246 246  244 246 246  137 136 137  37 38 37
+3 2 2  0 0 0  1 1 1  5 5 5  5 5 5  4 4 4
+4 4 4  4 4 4  4 4 4  5 5 5  4 4 4  1 1 1
+0 0 0  5 5 5  43 57 68  153 152 153  193 200 203  220 221 221
+177 184 187  174 174 174  167 166 167  166 165 166  158 157 158  157 156 157
+158 157 158  166 165 166  156 155 156  85 115 134  13 16 17  4 0 0
+4 0 0  4 0 0  5 5 5  5 5 5  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+5 5 5  4 3 3  6 6 6  6 6 6  4 0 0  6 6 6
+6 6 6  6 6 6  6 6 6  6 6 6  13 16 17  60 73 81
+177 184 187  220 221 221  220 221 221  220 221 221  205 212 215  220 221 221
+220 221 221  205 212 215  220 221 221  244 246 246  244 246 246  205 212 215
+125 124 125  30 32 34  0 0 0  0 0 0  2 2 2  5 5 5
+4 4 4  4 4 4  4 4 4  1 1 1  0 0 0  1 0 0
+37 38 37  131 129 131  205 212 215  220 221 221  193 200 203  174 174 174
+174 174 174  174 174 174  167 166 167  165 164 165  166 165 166  167 166 167
+158 157 158  125 124 125  37 38 37  4 0 0  4 0 0  4 0 0
+4 3 3  5 5 5  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  5 5 5  4 3 3  4 3 3  6 6 6  6 6 6
+4 0 0  6 6 6  6 6 6  6 6 6  6 6 6  6 6 6
+26 28 28  125 124 125  205 212 215  220 221 221  220 221 221  220 221 221
+205 212 215  220 221 221  205 212 215  220 221 221  220 221 221  244 246 246
+244 246 246  190 197 201  60 74 84  16 19 21  4 0 0  0 0 0
+0 0 0  0 0 0  0 0 0  0 0 0  16 19 21  120 125 127
+177 184 187  220 221 221  205 212 215  177 184 187  174 174 174  177 184 187
+174 174 174  174 174 174  167 166 167  174 174 174  166 165 166  137 136 137
+60 73 81  13 16 17  4 0 0  4 0 0  4 3 3  6 6 6
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+5 5 5  4 3 3  5 5 5  4 3 3  6 6 6  4 0 0
+6 6 6  6 6 6  4 0 0  6 6 6  4 0 0  6 6 6
+6 6 6  6 6 6  37 38 37  137 136 137  193 200 203  220 221 221
+220 221 221  205 212 215  220 221 221  205 212 215  205 212 215  220 221 221
+220 221 221  220 221 221  244 246 246  166 165 166  43 57 68  2 2 2
+0 0 0  4 0 0  16 19 21  60 73 81  157 156 157  202 210 214
+220 221 221  193 200 203  177 184 187  177 184 187  177 184 187  174 174 174
+174 174 174  174 174 174  174 174 174  157 156 157  60 74 84  24 26 27
+4 0 0  4 0 0  4 0 0  6 6 6  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  5 5 5  4 3 3  5 5 5  6 6 6
+6 6 6  4 0 0  6 6 6  6 6 6  6 6 6  4 0 0
+4 0 0  4 0 0  6 6 6  24 26 27  60 73 81  167 166 167
+220 221 221  220 221 221  220 221 221  205 212 215  205 212 215  205 212 215
+205 212 215  220 221 221  220 221 221  220 221 221  205 212 215  137 136 137
+60 74 84  125 124 125  137 136 137  190 197 201  220 221 221  193 200 203
+177 184 187  177 184 187  177 184 187  174 174 174  174 174 174  177 184 187
+190 197 201  174 174 174  125 124 125  37 38 37  6 6 6  4 0 0
+4 0 0  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  5 5 5  5 5 5  4 3 3  6 6 6
+4 0 0  6 6 6  6 6 6  6 6 6  4 0 0  6 6 6
+6 6 6  6 6 6  4 0 0  4 0 0  6 6 6  6 6 6
+125 124 125  193 200 203  244 246 246  220 221 221  205 212 215  205 212 215
+205 212 215  193 200 203  205 212 215  205 212 215  220 221 221  220 221 221
+193 200 203  193 200 203  205 212 215  193 200 203  193 200 203  177 184 187
+190 197 201  190 197 201  174 174 174  190 197 201  193 200 203  190 197 201
+153 152 153  60 73 81  4 0 0  4 0 0  4 0 0  3 2 2
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  5 5 5  4 3 3
+6 6 6  4 3 3  4 3 3  4 3 3  6 6 6  6 6 6
+4 0 0  6 6 6  6 6 6  6 6 6  4 0 0  4 0 0
+4 0 0  26 28 28  131 129 131  220 221 221  244 246 246  220 221 221
+205 212 215  193 200 203  205 212 215  193 200 203  193 200 203  205 212 215
+220 221 221  193 200 203  193 200 203  193 200 203  190 197 201  174 174 174
+174 174 174  190 197 201  193 200 203  193 200 203  167 166 167  125 124 125
+6 6 6  4 0 0  4 0 0  4 3 3  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  5 5 5
+5 5 5  4 3 3  5 5 5  6 6 6  4 3 3  5 5 5
+6 6 6  6 6 6  4 0 0  6 6 6  6 6 6  6 6 6
+4 0 0  4 0 0  6 6 6  41 54 63  158 157 158  220 221 221
+220 221 221  220 221 221  193 200 203  193 200 203  193 200 203  190 197 201
+190 197 201  190 197 201  190 197 201  190 197 201  174 174 174  193 200 203
+193 200 203  220 221 221  174 174 174  125 124 125  37 38 37  4 0 0
+4 0 0  4 3 3  6 6 6  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  5 5 5  4 3 3  4 3 3  4 3 3  5 5 5
+4 3 3  6 6 6  5 5 5  4 3 3  6 6 6  6 6 6
+6 6 6  6 6 6  4 0 0  4 0 0  13 16 17  60 73 81
+174 174 174  220 221 221  220 221 221  205 212 215  190 197 201  174 174 174
+193 200 203  174 174 174  190 197 201  174 174 174  193 200 203  220 221 221
+193 200 203  131 129 131  37 38 37  6 6 6  4 0 0  4 0 0
+6 6 6  6 6 6  4 3 3  5 5 5  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  5 5 5  5 5 5  5 5 5
+5 5 5  4 3 3  4 3 3  5 5 5  4 3 3  4 3 3
+5 5 5  6 6 6  6 6 6  4 0 0  6 6 6  6 6 6
+6 6 6  125 124 125  174 174 174  220 221 221  220 221 221  193 200 203
+193 200 203  193 200 203  193 200 203  193 200 203  220 221 221  158 157 158
+60 73 81  6 6 6  4 0 0  4 0 0  5 5 5  6 6 6
+5 5 5  5 5 5  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  5 5 5  5 5 5  4 3 3  5 5 5  4 3 3
+5 5 5  5 5 5  6 6 6  6 6 6  4 0 0  4 0 0
+4 0 0  4 0 0  26 28 28  125 124 125  174 174 174  193 200 203
+193 200 203  174 174 174  193 200 203  167 166 167  125 124 125  6 6 6
+6 6 6  6 6 6  4 0 0  6 6 6  6 6 6  5 5 5
+4 3 3  5 5 5  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  5 5 5
+4 3 3  6 6 6  4 0 0  6 6 6  6 6 6  6 6 6
+6 6 6  4 0 0  4 0 0  6 6 6  37 38 37  125 124 125
+153 152 153  131 129 131  125 124 125  37 38 37  6 6 6  6 6 6
+6 6 6  4 0 0  6 6 6  6 6 6  4 3 3  5 5 5
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  5 5 5  5 5 5  4 3 3  5 5 5  4 3 3
+6 6 6  6 6 6  4 0 0  4 0 0  6 6 6  6 6 6
+24 26 27  24 26 27  6 6 6  6 6 6  6 6 6  4 0 0
+6 6 6  6 6 6  4 0 0  6 6 6  5 5 5  4 3 3
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  5 5 5  4 3 3  5 5 5  6 6 6
+4 0 0  6 6 6  6 6 6  6 6 6  6 6 6  6 6 6
+6 6 6  6 6 6  6 6 6  4 0 0  6 6 6  6 6 6
+4 0 0  6 6 6  6 6 6  4 3 3  5 5 5  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  5 5 5  4 3 3  5 5 5
+5 5 5  5 5 5  4 0 0  6 6 6  4 0 0  6 6 6
+6 6 6  6 6 6  6 6 6  4 0 0  6 6 6  4 0 0
+6 6 6  4 3 3  5 5 5  4 3 3  5 5 5  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  5 5 5
+4 3 3  6 6 6  4 3 3  6 6 6  6 6 6  6 6 6
+4 0 0  6 6 6  4 0 0  6 6 6  6 6 6  6 6 6
+6 6 6  4 3 3  5 5 5  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  5 5 5  4 3 3  5 5 5  4 0 0  6 6 6
+6 6 6  4 0 0  6 6 6  6 6 6  4 0 0  6 6 6
+4 3 3  5 5 5  5 5 5  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  5 5 5  4 3 3  5 5 5  6 6 6  4 3 3
+4 3 3  6 6 6  6 6 6  4 3 3  6 6 6  4 3 3
+5 5 5  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  5 5 5  4 3 3  6 6 6
+5 5 5  4 3 3  4 3 3  4 3 3  5 5 5  5 5 5
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  5 5 5  4 3 3
+5 5 5  4 3 3  5 5 5  5 5 5  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4  4 4 4  4 4 4  4 4 4  4 4 4
+4 4 4  4 4 4
diff -ruNp linux-3.13.11/drivers/video/matrox/matroxfb_DAC1064.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/video/matrox/matroxfb_DAC1064.c
--- linux-3.13.11/drivers/video/matrox/matroxfb_DAC1064.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/video/matrox/matroxfb_DAC1064.c	2014-07-09
12:00:15.000000000 +0200
@@ -1088,14 +1088,20 @@ static void MGAG100_restore(struct matro
 
 #ifdef CONFIG_FB_MATROX_MYSTIQUE
 struct matrox_switch matrox_mystique = {
-	MGA1064_preinit, MGA1064_reset, MGA1064_init, MGA1064_restore,
+	.preinit = MGA1064_preinit,
+	.reset = MGA1064_reset,
+	.init = MGA1064_init,
+	.restore = MGA1064_restore,
 };
 EXPORT_SYMBOL(matrox_mystique);
 #endif
 
 #ifdef CONFIG_FB_MATROX_G
 struct matrox_switch matrox_G100 = {
-	MGAG100_preinit, MGAG100_reset, MGAG100_init, MGAG100_restore,
+	.preinit = MGAG100_preinit,
+	.reset = MGAG100_reset,
+	.init = MGAG100_init,
+	.restore = MGAG100_restore,
 };
 EXPORT_SYMBOL(matrox_G100);
 #endif
diff -ruNp linux-3.13.11/drivers/video/matrox/matroxfb_Ti3026.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/video/matrox/matroxfb_Ti3026.c
--- linux-3.13.11/drivers/video/matrox/matroxfb_Ti3026.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/video/matrox/matroxfb_Ti3026.c	2014-07-09
12:00:15.000000000 +0200
@@ -738,7 +738,10 @@ static int Ti3026_preinit(struct matrox_
 }
 
 struct matrox_switch matrox_millennium = {
-	Ti3026_preinit, Ti3026_reset, Ti3026_init, Ti3026_restore
+	.preinit = Ti3026_preinit,
+	.reset = Ti3026_reset,
+	.init = Ti3026_init,
+	.restore = Ti3026_restore
 };
 EXPORT_SYMBOL(matrox_millennium);
 #endif
diff -ruNp linux-3.13.11/drivers/video/mb862xx/mb862xxfb_accel.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/video/mb862xx/mb862xxfb_accel.c
--- linux-3.13.11/drivers/video/mb862xx/mb862xxfb_accel.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/video/mb862xx/mb862xxfb_accel.c	2014-07-09
12:00:15.000000000 +0200
@@ -312,14 +312,18 @@ void mb862xxfb_init_accel(struct fb_info
 	struct mb862xxfb_par *par = info->par;
 
 	if (info->var.bits_per_pixel == 32) {
-		info->fbops->fb_fillrect = cfb_fillrect;
-		info->fbops->fb_copyarea = cfb_copyarea;
-		info->fbops->fb_imageblit = cfb_imageblit;
+		pax_open_kernel();
+		*(void **)&info->fbops->fb_fillrect = cfb_fillrect;
+		*(void **)&info->fbops->fb_copyarea = cfb_copyarea;
+		*(void **)&info->fbops->fb_imageblit = cfb_imageblit;
+		pax_close_kernel();
 	} else {
 		outreg(disp, GC_L0EM, 3);
-		info->fbops->fb_fillrect = mb86290fb_fillrect;
-		info->fbops->fb_copyarea = mb86290fb_copyarea;
-		info->fbops->fb_imageblit = mb86290fb_imageblit;
+		pax_open_kernel();
+		*(void **)&info->fbops->fb_fillrect = mb86290fb_fillrect;
+		*(void **)&info->fbops->fb_copyarea = mb86290fb_copyarea;
+		*(void **)&info->fbops->fb_imageblit = mb86290fb_imageblit;
+		pax_close_kernel();
 	}
 	outreg(draw, GDC_REG_DRAW_BASE, 0);
 	outreg(draw, GDC_REG_MODE_MISC, 0x8000);
diff -ruNp linux-3.13.11/drivers/video/nvidia/nvidia.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/video/nvidia/nvidia.c
--- linux-3.13.11/drivers/video/nvidia/nvidia.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/video/nvidia/nvidia.c	2014-07-09
12:00:15.000000000 +0200
@@ -669,19 +669,23 @@ static int nvidiafb_set_par(struct fb_in
 	info->fix.line_length = (info->var.xres_virtual *
 				 info->var.bits_per_pixel) >> 3;
 	if (info->var.accel_flags) {
-		info->fbops->fb_imageblit = nvidiafb_imageblit;
-		info->fbops->fb_fillrect = nvidiafb_fillrect;
-		info->fbops->fb_copyarea = nvidiafb_copyarea;
-		info->fbops->fb_sync = nvidiafb_sync;
+		pax_open_kernel();
+		*(void **)&info->fbops->fb_imageblit = nvidiafb_imageblit;
+		*(void **)&info->fbops->fb_fillrect = nvidiafb_fillrect;
+		*(void **)&info->fbops->fb_copyarea = nvidiafb_copyarea;
+		*(void **)&info->fbops->fb_sync = nvidiafb_sync;
+		pax_close_kernel();
 		info->pixmap.scan_align = 4;
 		info->flags &= ~FBINFO_HWACCEL_DISABLED;
 		info->flags |= FBINFO_READS_FAST;
 		NVResetGraphics(info);
 	} else {
-		info->fbops->fb_imageblit = cfb_imageblit;
-		info->fbops->fb_fillrect = cfb_fillrect;
-		info->fbops->fb_copyarea = cfb_copyarea;
-		info->fbops->fb_sync = NULL;
+		pax_open_kernel();
+		*(void **)&info->fbops->fb_imageblit = cfb_imageblit;
+		*(void **)&info->fbops->fb_fillrect = cfb_fillrect;
+		*(void **)&info->fbops->fb_copyarea = cfb_copyarea;
+		*(void **)&info->fbops->fb_sync = NULL;
+		pax_close_kernel();
 		info->pixmap.scan_align = 1;
 		info->flags |= FBINFO_HWACCEL_DISABLED;
 		info->flags &= ~FBINFO_READS_FAST;
@@ -1173,8 +1177,11 @@ static int nvidia_set_fbinfo(struct fb_i
 	info->pixmap.size = 8 * 1024;
 	info->pixmap.flags = FB_PIXMAP_SYSTEM;
 
-	if (!hwcur)
-	    info->fbops->fb_cursor = NULL;
+	if (!hwcur) {
+		pax_open_kernel();
+		*(void **)&info->fbops->fb_cursor = NULL;
+		pax_close_kernel();
+	}
 
 	info->var.accel_flags = (!noaccel);
 
diff -ruNp linux-3.13.11/drivers/video/omap2/dss/display.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/video/omap2/dss/display.c
--- linux-3.13.11/drivers/video/omap2/dss/display.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/video/omap2/dss/display.c	2014-07-09
12:00:15.000000000 +0200
@@ -137,12 +137,14 @@ int omapdss_register_display(struct omap
 	snprintf(dssdev->alias, sizeof(dssdev->alias),
 			"display%d", disp_num_counter++);
 
+	pax_open_kernel();
 	if (drv && drv->get_resolution == NULL)
-		drv->get_resolution = omapdss_default_get_resolution;
+		*(void **)&drv->get_resolution = omapdss_default_get_resolution;
 	if (drv && drv->get_recommended_bpp == NULL)
-		drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
+		*(void **)&drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
 	if (drv && drv->get_timings == NULL)
-		drv->get_timings = omapdss_default_get_timings;
+		*(void **)&drv->get_timings = omapdss_default_get_timings;
+	pax_close_kernel();
 
 	mutex_lock(&panel_list_mutex);
 	list_add_tail(&dssdev->panel_list, &panel_list);
diff -ruNp linux-3.13.11/drivers/video/s1d13xxxfb.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/video/s1d13xxxfb.c
--- linux-3.13.11/drivers/video/s1d13xxxfb.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/video/s1d13xxxfb.c	2014-07-09
12:00:15.000000000 +0200
@@ -881,8 +881,10 @@ static int s1d13xxxfb_probe(struct platf
 
 	switch(prod_id) {
 	case S1D13506_PROD_ID:	/* activate acceleration */
-		s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
-		s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
+		pax_open_kernel();
+		*(void **)&s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
+		*(void **)&s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
+		pax_close_kernel();
 		info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
 			FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA;
 		break;
diff -ruNp linux-3.13.11/drivers/video/smscufx.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/video/smscufx.c
--- linux-3.13.11/drivers/video/smscufx.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/video/smscufx.c	2014-07-09
12:00:15.000000000 +0200
@@ -1175,7 +1175,9 @@ static int ufx_ops_release(struct fb_inf
 		fb_deferred_io_cleanup(info);
 		kfree(info->fbdefio);
 		info->fbdefio = NULL;
-		info->fbops->fb_mmap = ufx_ops_mmap;
+		pax_open_kernel();
+		*(void **)&info->fbops->fb_mmap = ufx_ops_mmap;
+		pax_close_kernel();
 	}
 
 	pr_debug("released /dev/fb%d user=%d count=%d",
diff -ruNp linux-3.13.11/drivers/video/udlfb.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/video/udlfb.c
--- linux-3.13.11/drivers/video/udlfb.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/video/udlfb.c	2014-07-09
12:00:15.000000000 +0200
@@ -623,11 +623,11 @@ static int dlfb_handle_damage(struct dlf
 		dlfb_urb_completion(urb);
 
 error:
-	atomic_add(bytes_sent, &dev->bytes_sent);
-	atomic_add(bytes_identical, &dev->bytes_identical);
-	atomic_add(width*height*2, &dev->bytes_rendered);
+	atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
+	atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
+	atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
 	end_cycles = get_cycles();
-	atomic_add(((unsigned int) ((end_cycles - start_cycles)
+	atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
 		    >> 10)), /* Kcycles */
 		   &dev->cpu_kcycles_used);
 
@@ -748,11 +748,11 @@ static void dlfb_dpy_deferred_io(struct
 		dlfb_urb_completion(urb);
 
 error:
-	atomic_add(bytes_sent, &dev->bytes_sent);
-	atomic_add(bytes_identical, &dev->bytes_identical);
-	atomic_add(bytes_rendered, &dev->bytes_rendered);
+	atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
+	atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
+	atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
 	end_cycles = get_cycles();
-	atomic_add(((unsigned int) ((end_cycles - start_cycles)
+	atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
 		    >> 10)), /* Kcycles */
 		   &dev->cpu_kcycles_used);
 }
@@ -993,7 +993,9 @@ static int dlfb_ops_release(struct fb_in
 		fb_deferred_io_cleanup(info);
 		kfree(info->fbdefio);
 		info->fbdefio = NULL;
-		info->fbops->fb_mmap = dlfb_ops_mmap;
+		pax_open_kernel();
+		*(void **)&info->fbops->fb_mmap = dlfb_ops_mmap;
+		pax_close_kernel();
 	}
 
 	pr_warn("released /dev/fb%d user=%d count=%d\n",
@@ -1376,7 +1378,7 @@ static ssize_t metrics_bytes_rendered_sh
 	struct fb_info *fb_info = dev_get_drvdata(fbdev);
 	struct dlfb_data *dev = fb_info->par;
 	return snprintf(buf, PAGE_SIZE, "%u\n",
-			atomic_read(&dev->bytes_rendered));
+			atomic_read_unchecked(&dev->bytes_rendered));
 }
 
 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
@@ -1384,7 +1386,7 @@ static ssize_t metrics_bytes_identical_s
 	struct fb_info *fb_info = dev_get_drvdata(fbdev);
 	struct dlfb_data *dev = fb_info->par;
 	return snprintf(buf, PAGE_SIZE, "%u\n",
-			atomic_read(&dev->bytes_identical));
+			atomic_read_unchecked(&dev->bytes_identical));
 }
 
 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
@@ -1392,7 +1394,7 @@ static ssize_t metrics_bytes_sent_show(s
 	struct fb_info *fb_info = dev_get_drvdata(fbdev);
 	struct dlfb_data *dev = fb_info->par;
 	return snprintf(buf, PAGE_SIZE, "%u\n",
-			atomic_read(&dev->bytes_sent));
+			atomic_read_unchecked(&dev->bytes_sent));
 }
 
 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
@@ -1400,7 +1402,7 @@ static ssize_t metrics_cpu_kcycles_used_
 	struct fb_info *fb_info = dev_get_drvdata(fbdev);
 	struct dlfb_data *dev = fb_info->par;
 	return snprintf(buf, PAGE_SIZE, "%u\n",
-			atomic_read(&dev->cpu_kcycles_used));
+			atomic_read_unchecked(&dev->cpu_kcycles_used));
 }
 
 static ssize_t edid_show(
@@ -1460,10 +1462,10 @@ static ssize_t metrics_reset_store(struc
 	struct fb_info *fb_info = dev_get_drvdata(fbdev);
 	struct dlfb_data *dev = fb_info->par;
 
-	atomic_set(&dev->bytes_rendered, 0);
-	atomic_set(&dev->bytes_identical, 0);
-	atomic_set(&dev->bytes_sent, 0);
-	atomic_set(&dev->cpu_kcycles_used, 0);
+	atomic_set_unchecked(&dev->bytes_rendered, 0);
+	atomic_set_unchecked(&dev->bytes_identical, 0);
+	atomic_set_unchecked(&dev->bytes_sent, 0);
+	atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
 
 	return count;
 }
diff -ruNp linux-3.13.11/drivers/video/uvesafb.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/video/uvesafb.c
--- linux-3.13.11/drivers/video/uvesafb.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/video/uvesafb.c	2014-07-09
12:00:15.000000000 +0200
@@ -19,6 +19,7 @@
 #include <linux/io.h>
 #include <linux/mutex.h>
 #include <linux/slab.h>
+#include <linux/moduleloader.h>
 #include <video/edid.h>
 #include <video/uvesafb.h>
 #ifdef CONFIG_X86
@@ -565,10 +566,32 @@ static int uvesafb_vbe_getpmi(struct uve
 	if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
 		par->pmi_setpal = par->ypan = 0;
 	} else {
+
+#ifdef CONFIG_PAX_KERNEXEC
+#ifdef CONFIG_MODULES
+		par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
+#endif
+		if (!par->pmi_code) {
+			par->pmi_setpal = par->ypan = 0;
+			return 0;
+		}
+#endif
+
 		par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
 						+ task->t.regs.edi);
+
+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
+		pax_open_kernel();
+		memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
+		pax_close_kernel();
+
+		par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
+		par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
+#else
 		par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
 		par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
+#endif
+
 		printk(KERN_INFO "uvesafb: protected mode interface info at "
 				 "%04x:%04x\n",
 				 (u16)task->t.regs.es, (u16)task->t.regs.edi);
@@ -813,13 +836,14 @@ static int uvesafb_vbe_init(struct fb_in
 	par->ypan = ypan;
 
 	if (par->pmi_setpal || par->ypan) {
+#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
 		if (__supported_pte_mask & _PAGE_NX) {
 			par->pmi_setpal = par->ypan = 0;
 			printk(KERN_WARNING "uvesafb: NX protection is active, "
 					    "better not use the PMI.\n");
-		} else {
+		} else
+#endif
 			uvesafb_vbe_getpmi(task, par);
-		}
 	}
 #else
 	/* The protected mode interface is not available on non-x86. */
@@ -1453,8 +1477,11 @@ static void uvesafb_init_info(struct fb_
 	info->fix.ywrapstep = (par->ypan > 1) ? 1 : 0;
 
 	/* Disable blanking if the user requested so. */
-	if (!blank)
-		info->fbops->fb_blank = NULL;
+	if (!blank) {
+		pax_open_kernel();
+		*(void **)&info->fbops->fb_blank = NULL;
+		pax_close_kernel();
+	}
 
 	/*
 	 * Find out how much IO memory is required for the mode with
@@ -1530,8 +1557,11 @@ static void uvesafb_init_info(struct fb_
 	info->flags = FBINFO_FLAG_DEFAULT |
 			(par->ypan ? FBINFO_HWACCEL_YPAN : 0);
 
-	if (!par->ypan)
-		info->fbops->fb_pan_display = NULL;
+	if (!par->ypan) {
+		pax_open_kernel();
+		*(void **)&info->fbops->fb_pan_display = NULL;
+		pax_close_kernel();
+	}
 }
 
 static void uvesafb_init_mtrr(struct fb_info *info)
@@ -1792,6 +1822,11 @@ out_mode:
 out:
 	kfree(par->vbe_modes);
 
+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
+	if (par->pmi_code)
+		module_free_exec(NULL, par->pmi_code);
+#endif
+
 	framebuffer_release(info);
 	return err;
 }
@@ -1816,6 +1851,12 @@ static int uvesafb_remove(struct platfor
 			kfree(par->vbe_modes);
 			kfree(par->vbe_state_orig);
 			kfree(par->vbe_state_saved);
+
+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
+			if (par->pmi_code)
+				module_free_exec(NULL, par->pmi_code);
+#endif
+
 		}
 
 		framebuffer_release(info);
diff -ruNp linux-3.13.11/drivers/video/vesafb.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/video/vesafb.c
--- linux-3.13.11/drivers/video/vesafb.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/video/vesafb.c	2014-07-09
12:00:15.000000000 +0200
@@ -9,6 +9,7 @@
  */
 
 #include <linux/module.h>
+#include <linux/moduleloader.h>
 #include <linux/kernel.h>
 #include <linux/errno.h>
 #include <linux/string.h>
@@ -52,8 +53,8 @@ static int   vram_remap;			/* Set amount
 static int   vram_total;			/* Set total amount of memory */
 static int   pmi_setpal __read_mostly = 1;	/* pmi for palette changes ??? */
 static int   ypan       __read_mostly;		/* 0..nothing, 1..ypan, 2..ywrap */
-static void  (*pmi_start)(void) __read_mostly;
-static void  (*pmi_pal)  (void) __read_mostly;
+static void  (*pmi_start)(void) __read_only;
+static void  (*pmi_pal)  (void) __read_only;
 static int   depth      __read_mostly;
 static int   vga_compat __read_mostly;
 /* --------------------------------------------------------------------- */
@@ -234,6 +235,7 @@ static int vesafb_probe(struct platform_
 	unsigned int size_remap;
 	unsigned int size_total;
 	char *option = NULL;
+	void *pmi_code = NULL;
 
 	/* ignore error return of fb_get_options */
 	fb_get_options("vesafb", &option);
@@ -280,10 +282,6 @@ static int vesafb_probe(struct platform_
 		size_remap = size_total;
 	vesafb_fix.smem_len = size_remap;
 
-#ifndef __i386__
-	screen_info.vesapm_seg = 0;
-#endif
-
 	if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
 		printk(KERN_WARNING
 		       "vesafb: cannot reserve video memory at 0x%lx\n",
@@ -312,9 +310,21 @@ static int vesafb_probe(struct platform_
 	printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
 	       vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length,
screen_info.pages);
 
+#ifdef __i386__
+
+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
+	pmi_code = module_alloc_exec(screen_info.vesapm_size);
+	if (!pmi_code)
+#elif !defined(CONFIG_PAX_KERNEXEC)
+	if (0)
+#endif
+
+#endif
+	screen_info.vesapm_seg = 0;
+
 	if (screen_info.vesapm_seg) {
-		printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
-		       screen_info.vesapm_seg,screen_info.vesapm_off);
+		printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
+		       screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
 	}
 
 	if (screen_info.vesapm_seg < 0xc000)
@@ -322,9 +332,25 @@ static int vesafb_probe(struct platform_
 
 	if (ypan || pmi_setpal) {
 		unsigned short *pmi_base;
+
 		pmi_base  = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg
<< 4) + screen_info.vesapm_off);
-		pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
-		pmi_pal   = (void*)((char*)pmi_base + pmi_base[2]);
+
+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
+		pax_open_kernel();
+		memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
+#else
+		pmi_code  = pmi_base;
+#endif
+
+		pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
+		pmi_pal   = (void*)((char*)pmi_code + pmi_base[2]);
+
+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
+		pmi_start = ktva_ktla(pmi_start);
+		pmi_pal = ktva_ktla(pmi_pal);
+		pax_close_kernel();
+#endif
+
 		printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
 		if (pmi_base[3]) {
 			printk(KERN_INFO "vesafb: pmi: ports = ");
@@ -477,8 +503,11 @@ static int vesafb_probe(struct platform_
 	info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
 		(ypan ? FBINFO_HWACCEL_YPAN : 0);
 
-	if (!ypan)
-		info->fbops->fb_pan_display = NULL;
+	if (!ypan) {
+		pax_open_kernel();
+		*(void **)&info->fbops->fb_pan_display = NULL;
+		pax_close_kernel();
+	}
 
 	if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
 		err = -ENOMEM;
@@ -492,6 +521,11 @@ static int vesafb_probe(struct platform_
 	fb_info(info, "%s frame buffer device\n", info->fix.id);
 	return 0;
 err:
+
+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
+	module_free_exec(NULL, pmi_code);
+#endif
+
 	if (info->screen_base)
 		iounmap(info->screen_base);
 	framebuffer_release(info);
diff -ruNp linux-3.13.11/drivers/video/via/via_clock.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/video/via/via_clock.h
--- linux-3.13.11/drivers/video/via/via_clock.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/video/via/via_clock.h	2014-07-09
12:00:15.000000000 +0200
@@ -56,7 +56,7 @@ struct via_clock {
 
 	void (*set_engine_pll_state)(u8 state);
 	void (*set_engine_pll)(struct via_pll_config config);
-};
+} __no_const;
 
 
 static inline u32 get_pll_internal_frequency(u32 ref_freq,
diff -ruNp linux-3.13.11/drivers/xen/xenfs/xenstored.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/xen/xenfs/xenstored.c
--- linux-3.13.11/drivers/xen/xenfs/xenstored.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/drivers/xen/xenfs/xenstored.c	2014-07-09
12:00:15.000000000 +0200
@@ -24,7 +24,12 @@ static int xsd_release(struct inode *ino
 static int xsd_kva_open(struct inode *inode, struct file *file)
 {
 	file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
+#ifdef CONFIG_GRKERNSEC_HIDESYM
+					       NULL);
+#else
 					       xen_store_interface);
+#endif
+
 	if (!file->private_data)
 		return -ENOMEM;
 	return 0;
diff -ruNp linux-3.13.11/fs/9p/vfs_addr.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/9p/vfs_addr.c
--- linux-3.13.11/fs/9p/vfs_addr.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/9p/vfs_addr.c	2014-07-09 12:00:15.000000000
+0200
@@ -187,7 +187,7 @@ static int v9fs_vfs_writepage_locked(str
 
 	retval = v9fs_file_write_internal(inode,
 					  v9inode->writeback_fid,
-					  (__force const char __user *)buffer,
+					  (const char __force_user *)buffer,
 					  len, &offset, 0);
 	if (retval > 0)
 		retval = 0;
diff -ruNp linux-3.13.11/fs/9p/vfs_inode.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/9p/vfs_inode.c
--- linux-3.13.11/fs/9p/vfs_inode.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/9p/vfs_inode.c	2014-07-09 12:00:15.000000000
+0200
@@ -1306,7 +1306,7 @@ static void *v9fs_vfs_follow_link(struct
 void
 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
 {
-	char *s = nd_get_link(nd);
+	const char *s = nd_get_link(nd);
 
 	p9_debug(P9_DEBUG_VFS, " %s %s\n",
 		 dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
diff -ruNp linux-3.13.11/fs/Kconfig.binfmt linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/Kconfig.binfmt
--- linux-3.13.11/fs/Kconfig.binfmt	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/Kconfig.binfmt	2014-07-09 12:00:15.000000000
+0200
@@ -103,7 +103,7 @@ config HAVE_AOUT
 
 config BINFMT_AOUT
 	tristate "Kernel support for a.out and ECOFF binaries"
-	depends on HAVE_AOUT
+	depends on HAVE_AOUT && BROKEN
 	---help---
 	  A.out (Assembler.OUTput) is a set of formats for libraries and
 	  executables used in the earliest versions of UNIX.  Linux used
diff -ruNp linux-3.13.11/fs/afs/inode.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/afs/inode.c
--- linux-3.13.11/fs/afs/inode.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/afs/inode.c	2014-07-09 12:00:15.000000000
+0200
@@ -141,7 +141,7 @@ struct inode *afs_iget_autocell(struct i
 	struct afs_vnode *vnode;
 	struct super_block *sb;
 	struct inode *inode;
-	static atomic_t afs_autocell_ino;
+	static atomic_unchecked_t afs_autocell_ino;
 
 	_enter("{%x:%u},%*.*s,",
 	       AFS_FS_I(dir)->fid.vid, AFS_FS_I(dir)->fid.vnode,
@@ -154,7 +154,7 @@ struct inode *afs_iget_autocell(struct i
 	data.fid.unique = 0;
 	data.fid.vnode = 0;
 
-	inode = iget5_locked(sb, atomic_inc_return(&afs_autocell_ino),
+	inode = iget5_locked(sb, atomic_inc_return_unchecked(&afs_autocell_ino),
 			     afs_iget5_autocell_test, afs_iget5_set,
 			     &data);
 	if (!inode) {
diff -ruNp linux-3.13.11/fs/aio.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/aio.c
--- linux-3.13.11/fs/aio.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/aio.c	2014-07-09 12:00:15.000000000
+0200
@@ -374,7 +374,7 @@ static int aio_setup_ring(struct kioctx
 	size += sizeof(struct io_event) * nr_events;
 
 	nr_pages = PFN_UP(size);
-	if (nr_pages < 0)
+	if (nr_pages <= 0)
 		return -EINVAL;
 
 	file = aio_private_file(ctx, nr_pages);
diff -ruNp linux-3.13.11/fs/anon_inodes.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/anon_inodes.c
--- linux-3.13.11/fs/anon_inodes.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/anon_inodes.c	2014-07-09 12:00:15.000000000
+0200
@@ -41,19 +41,8 @@ static const struct dentry_operations an
 static struct dentry *anon_inodefs_mount(struct file_system_type *fs_type,
 				int flags, const char *dev_name, void *data)
 {
-	struct dentry *root;
-	root = mount_pseudo(fs_type, "anon_inode:", NULL,
+	return mount_pseudo(fs_type, "anon_inode:", NULL,
 			&anon_inodefs_dentry_operations, ANON_INODE_FS_MAGIC);
-	if (!IS_ERR(root)) {
-		struct super_block *s = root->d_sb;
-		anon_inode_inode = alloc_anon_inode(s);
-		if (IS_ERR(anon_inode_inode)) {
-			dput(root);
-			deactivate_locked_super(s);
-			root = ERR_CAST(anon_inode_inode);
-		}
-	}
-	return root;
 }
 
 static struct file_system_type anon_inode_fs_type = {
@@ -175,22 +164,15 @@ EXPORT_SYMBOL_GPL(anon_inode_getfd);
 
 static int __init anon_inode_init(void)
 {
-	int error;
-
-	error = register_filesystem(&anon_inode_fs_type);
-	if (error)
-		goto err_exit;
 	anon_inode_mnt = kern_mount(&anon_inode_fs_type);
-	if (IS_ERR(anon_inode_mnt)) {
-		error = PTR_ERR(anon_inode_mnt);
-		goto err_unregister_filesystem;
-	}
-	return 0;
+	if (IS_ERR(anon_inode_mnt))
+		panic("anon_inode_init() kernel mount failed (%ld)\n", PTR_ERR(anon_inode_mnt));
 
-err_unregister_filesystem:
-	unregister_filesystem(&anon_inode_fs_type);
-err_exit:
-	panic(KERN_ERR "anon_inode_init() failed (%d)\n", error);
+	anon_inode_inode = alloc_anon_inode(anon_inode_mnt->mnt_sb);
+	if (IS_ERR(anon_inode_inode))
+		panic("anon_inode_init() inode allocation failed (%ld)\n", PTR_ERR(anon_inode_inode));
+
+	return 0;
 }
 
 fs_initcall(anon_inode_init);
diff -ruNp linux-3.13.11/fs/attr.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/attr.c
--- linux-3.13.11/fs/attr.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/attr.c	2014-07-09 12:00:15.000000000
+0200
@@ -15,6 +15,9 @@
 #include <linux/security.h>
 #include <linux/evm.h>
 #include <linux/ima.h>
+#include <linux/proc_fs.h>
+#include <linux/devpts_fs.h>
+#include <linux/vs_tag.h>
 
 /**
  * inode_change_ok - check if attribute changes to an inode are allowed
@@ -77,6 +80,10 @@ int inode_change_ok(const struct inode *
 			return -EPERM;
 	}
 
+	/* check for inode tag permission */
+	if (dx_permission(inode, MAY_WRITE))
+		return -EACCES;
+
 	return 0;
 }
 EXPORT_SYMBOL(inode_change_ok);
@@ -102,6 +109,7 @@ int inode_newsize_ok(const struct inode
 		unsigned long limit;
 
 		limit = rlimit(RLIMIT_FSIZE);
+		gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
 		if (limit != RLIM_INFINITY && offset > limit)
 			goto out_sig;
 		if (offset > inode->i_sb->s_maxbytes)
@@ -147,6 +155,8 @@ void setattr_copy(struct inode *inode, c
 		inode->i_uid = attr->ia_uid;
 	if (ia_valid & ATTR_GID)
 		inode->i_gid = attr->ia_gid;
+	if ((ia_valid & ATTR_TAG) && IS_TAGGED(inode))
+		inode->i_tag = attr->ia_tag;
 	if (ia_valid & ATTR_ATIME)
 		inode->i_atime = timespec_trunc(attr->ia_atime,
 						inode->i_sb->s_time_gran);
@@ -197,7 +207,8 @@ int notify_change(struct dentry * dentry
 
 	WARN_ON_ONCE(!mutex_is_locked(&inode->i_mutex));
 
-	if (ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID | ATTR_TIMES_SET)) {
+	if (ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID |
+		ATTR_TAG | ATTR_TIMES_SET)) {
 		if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
 			return -EPERM;
 	}
diff -ruNp linux-3.13.11/fs/autofs4/waitq.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/autofs4/waitq.c
--- linux-3.13.11/fs/autofs4/waitq.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/autofs4/waitq.c	2014-07-09 12:00:15.000000000
+0200
@@ -59,7 +59,7 @@ static int autofs4_write(struct autofs_s
 {
 	unsigned long sigpipe, flags;
 	mm_segment_t fs;
-	const char *data = (const char *)addr;
+	const char __user *data = (const char __force_user *)addr;
 	ssize_t wr = 0;
 
 	sigpipe = sigismember(&current->pending.signal, SIGPIPE);
@@ -340,6 +340,10 @@ static int validate_request(struct autof
 	return 1;
 }
 
+#ifdef CONFIG_GRKERNSEC_HIDESYM
+static atomic_unchecked_t autofs_dummy_name_id = ATOMIC_INIT(0);
+#endif
+
 int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
 		enum autofs_notify notify)
 {
@@ -373,7 +377,12 @@ int autofs4_wait(struct autofs_sb_info *
 
 	/* If this is a direct mount request create a dummy name */
 	if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
+#ifdef CONFIG_GRKERNSEC_HIDESYM
+		/* this name does get written to userland via autofs4_write() */
+		qstr.len = sprintf(name, "%08x", atomic_inc_return_unchecked(&autofs_dummy_name_id));
+#else
 		qstr.len = sprintf(name, "%p", dentry);
+#endif
 	else {
 		qstr.len = autofs4_getpath(sbi, dentry, &name);
 		if (!qstr.len) {
diff -ruNp linux-3.13.11/fs/befs/endian.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/befs/endian.h
--- linux-3.13.11/fs/befs/endian.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/befs/endian.h	2014-07-09 12:00:15.000000000
+0200
@@ -11,7 +11,7 @@
 
 #include <asm/byteorder.h>
 
-static inline u64
+static inline u64 __intentional_overflow(-1)
 fs64_to_cpu(const struct super_block *sb, fs64 n)
 {
 	if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
@@ -29,7 +29,7 @@ cpu_to_fs64(const struct super_block *sb
 		return (__force fs64)cpu_to_be64(n);
 }
 
-static inline u32
+static inline u32 __intentional_overflow(-1)
 fs32_to_cpu(const struct super_block *sb, fs32 n)
 {
 	if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
@@ -47,7 +47,7 @@ cpu_to_fs32(const struct super_block *sb
 		return (__force fs32)cpu_to_be32(n);
 }
 
-static inline u16
+static inline u16 __intentional_overflow(-1)
 fs16_to_cpu(const struct super_block *sb, fs16 n)
 {
 	if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
diff -ruNp linux-3.13.11/fs/binfmt_aout.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/binfmt_aout.c
--- linux-3.13.11/fs/binfmt_aout.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/binfmt_aout.c	2014-07-09 12:00:15.000000000
+0200
@@ -16,6 +16,7 @@
 #include <linux/string.h>
 #include <linux/fs.h>
 #include <linux/file.h>
+#include <linux/security.h>
 #include <linux/stat.h>
 #include <linux/fcntl.h>
 #include <linux/ptrace.h>
@@ -58,6 +59,8 @@ static int aout_core_dump(struct coredum
 #endif
 #       define START_STACK(u)   ((void __user *)u.start_stack)
 
+	memset(&dump, 0, sizeof(dump));
+
 	fs = get_fs();
 	set_fs(KERNEL_DS);
 	has_dumped = 1;
@@ -68,10 +71,12 @@ static int aout_core_dump(struct coredum
 
 /* If the size of the dump file exceeds the rlimit, then see what would happen
    if we wrote the stack, but not the data area.  */
+	gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE,
1);
 	if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
 		dump.u_dsize = 0;
 
 /* Make sure we have enough room to write the stack and data areas. */
+	gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
 	if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
 		dump.u_ssize = 0;
 
@@ -232,6 +237,8 @@ static int load_aout_binary(struct linux
 	rlim = rlimit(RLIMIT_DATA);
 	if (rlim >= RLIM_INFINITY)
 		rlim = ~0;
+
+	gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
 	if (ex.a_data + ex.a_bss > rlim)
 		return -ENOMEM;
 
@@ -264,6 +271,27 @@ static int load_aout_binary(struct linux
 
 	install_exec_creds(bprm);
 
+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
+	current->mm->pax_flags = 0UL;
+#endif
+
+#ifdef CONFIG_PAX_PAGEEXEC
+	if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
+		current->mm->pax_flags |= MF_PAX_PAGEEXEC;
+
+#ifdef CONFIG_PAX_EMUTRAMP
+		if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
+			current->mm->pax_flags |= MF_PAX_EMUTRAMP;
+#endif
+
+#ifdef CONFIG_PAX_MPROTECT
+		if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
+			current->mm->pax_flags |= MF_PAX_MPROTECT;
+#endif
+
+	}
+#endif
+
 	if (N_MAGIC(ex) == OMAGIC) {
 		unsigned long text_addr, map_size;
 		loff_t pos;
@@ -321,7 +349,7 @@ static int load_aout_binary(struct linux
 		}
 
 		error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
-				PROT_READ | PROT_WRITE | PROT_EXEC,
+				PROT_READ | PROT_WRITE,
 				MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
 				fd_offset + ex.a_text);
 		if (error != N_DATADDR(ex)) {
diff -ruNp linux-3.13.11/fs/binfmt_elf.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/binfmt_elf.c
--- linux-3.13.11/fs/binfmt_elf.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/binfmt_elf.c	2014-07-09 12:00:15.000000000
+0200
@@ -34,6 +34,7 @@
 #include <linux/utsname.h>
 #include <linux/coredump.h>
 #include <linux/sched.h>
+#include <linux/xattr.h>
 #include <asm/uaccess.h>
 #include <asm/param.h>
 #include <asm/page.h>
@@ -48,7 +49,7 @@
 static int load_elf_binary(struct linux_binprm *bprm);
 static int load_elf_library(struct file *);
 static unsigned long elf_map(struct file *, unsigned long, struct elf_phdr *,
-				int, int, unsigned long);
+				int, int, unsigned long) __intentional_overflow(-1);
 
 /*
  * If we don't support core dumping, then supply a NULL so we
@@ -60,6 +61,14 @@ static int elf_core_dump(struct coredump
 #define elf_core_dump	NULL
 #endif
 
+#ifdef CONFIG_PAX_MPROTECT
+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
+#endif
+
+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
+static void elf_handle_mmap(struct file *file);
+#endif
+
 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
 #define ELF_MIN_ALIGN	ELF_EXEC_PAGESIZE
 #else
@@ -79,6 +88,15 @@ static struct linux_binfmt elf_format =
 	.load_binary	= load_elf_binary,
 	.load_shlib	= load_elf_library,
 	.core_dump	= elf_core_dump,
+
+#ifdef CONFIG_PAX_MPROTECT
+	.handle_mprotect= elf_handle_mprotect,
+#endif
+
+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
+	.handle_mmap	= elf_handle_mmap,
+#endif
+
 	.min_coredump	= ELF_EXEC_PAGESIZE,
 };
 
@@ -86,6 +104,8 @@ static struct linux_binfmt elf_format =
 
 static int set_brk(unsigned long start, unsigned long end)
 {
+	unsigned long e = end;
+
 	start = ELF_PAGEALIGN(start);
 	end = ELF_PAGEALIGN(end);
 	if (end > start) {
@@ -94,7 +114,7 @@ static int set_brk(unsigned long start,
 		if (BAD_ADDR(addr))
 			return addr;
 	}
-	current->mm->start_brk = current->mm->brk = end;
+	current->mm->start_brk = current->mm->brk = e;
 	return 0;
 }
 
@@ -155,12 +175,13 @@ create_elf_tables(struct linux_binprm *b
 	elf_addr_t __user *u_rand_bytes;
 	const char *k_platform = ELF_PLATFORM;
 	const char *k_base_platform = ELF_BASE_PLATFORM;
-	unsigned char k_rand_bytes[16];
+	u32 k_rand_bytes[4];
 	int items;
 	elf_addr_t *elf_info;
 	int ei_index = 0;
 	const struct cred *cred = current_cred();
 	struct vm_area_struct *vma;
+	unsigned long saved_auxv[AT_VECTOR_SIZE];
 
 	/*
 	 * In some cases (e.g. Hyper-Threading), we want to avoid L1
@@ -202,8 +223,12 @@ create_elf_tables(struct linux_binprm *b
 	 * Generate 16 random bytes for userspace PRNG seeding.
 	 */
 	get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
-	u_rand_bytes = (elf_addr_t __user *)
-		       STACK_ALLOC(p, sizeof(k_rand_bytes));
+	prandom_seed(k_rand_bytes[0] ^ prandom_u32());
+	prandom_seed(k_rand_bytes[1] ^ prandom_u32());
+	prandom_seed(k_rand_bytes[2] ^ prandom_u32());
+	prandom_seed(k_rand_bytes[3] ^ prandom_u32());
+	p = STACK_ROUND(p, sizeof(k_rand_bytes));
+	u_rand_bytes = (elf_addr_t __user *) p;
 	if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
 		return -EFAULT;
 
@@ -318,9 +343,11 @@ create_elf_tables(struct linux_binprm *b
 		return -EFAULT;
 	current->mm->env_end = p;
 
+	memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
+
 	/* Put the elf_info on the stack in the right place.  */
 	sp = (elf_addr_t __user *)envp + 1;
-	if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
+	if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
 		return -EFAULT;
 	return 0;
 }
@@ -388,15 +415,14 @@ static unsigned long total_mapping_size(
    an ELF header */
 
 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
-		struct file *interpreter, unsigned long *interp_map_addr,
-		unsigned long no_base)
+		struct file *interpreter, unsigned long no_base)
 {
 	struct elf_phdr *elf_phdata;
 	struct elf_phdr *eppnt;
-	unsigned long load_addr = 0;
+	unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
 	int load_addr_set = 0;
 	unsigned long last_bss = 0, elf_bss = 0;
-	unsigned long error = ~0UL;
+	unsigned long error = -EINVAL;
 	unsigned long total_size;
 	int retval, i, size;
 
@@ -442,6 +468,11 @@ static unsigned long load_elf_interp(str
 		goto out_close;
 	}
 
+#ifdef CONFIG_PAX_SEGMEXEC
+	if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
+		pax_task_size = SEGMEXEC_TASK_SIZE;
+#endif
+
 	eppnt = elf_phdata;
 	for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
 		if (eppnt->p_type == PT_LOAD) {
@@ -465,8 +496,6 @@ static unsigned long load_elf_interp(str
 			map_addr = elf_map(interpreter, load_addr + vaddr,
 					eppnt, elf_prot, elf_type, total_size);
 			total_size = 0;
-			if (!*interp_map_addr)
-				*interp_map_addr = map_addr;
 			error = map_addr;
 			if (BAD_ADDR(map_addr))
 				goto out_close;
@@ -485,8 +514,8 @@ static unsigned long load_elf_interp(str
 			k = load_addr + eppnt->p_vaddr;
 			if (BAD_ADDR(k) ||
 			    eppnt->p_filesz > eppnt->p_memsz ||
-			    eppnt->p_memsz > TASK_SIZE ||
-			    TASK_SIZE - eppnt->p_memsz < k) {
+			    eppnt->p_memsz > pax_task_size ||
+			    pax_task_size - eppnt->p_memsz < k) {
 				error = -ENOMEM;
 				goto out_close;
 			}
@@ -525,9 +554,11 @@ static unsigned long load_elf_interp(str
 		elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
 
 		/* Map the last of the bss segment */
-		error = vm_brk(elf_bss, last_bss - elf_bss);
-		if (BAD_ADDR(error))
-			goto out_close;
+		if (last_bss > elf_bss) {
+			error = vm_brk(elf_bss, last_bss - elf_bss);
+			if (BAD_ADDR(error))
+				goto out_close;
+		}
 	}
 
 	error = load_addr;
@@ -538,6 +569,336 @@ out:
 	return error;
 }
 
+#ifdef CONFIG_PAX_PT_PAX_FLAGS
+#ifdef CONFIG_PAX_SOFTMODE
+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
+{
+	unsigned long pax_flags = 0UL;
+
+#ifdef CONFIG_PAX_PAGEEXEC
+	if (elf_phdata->p_flags & PF_PAGEEXEC)
+		pax_flags |= MF_PAX_PAGEEXEC;
+#endif
+
+#ifdef CONFIG_PAX_SEGMEXEC
+	if (elf_phdata->p_flags & PF_SEGMEXEC)
+		pax_flags |= MF_PAX_SEGMEXEC;
+#endif
+
+#ifdef CONFIG_PAX_EMUTRAMP
+	if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
+		pax_flags |= MF_PAX_EMUTRAMP;
+#endif
+
+#ifdef CONFIG_PAX_MPROTECT
+	if (elf_phdata->p_flags & PF_MPROTECT)
+		pax_flags |= MF_PAX_MPROTECT;
+#endif
+
+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
+	if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
+		pax_flags |= MF_PAX_RANDMMAP;
+#endif
+
+	return pax_flags;
+}
+#endif
+
+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
+{
+	unsigned long pax_flags = 0UL;
+
+#ifdef CONFIG_PAX_PAGEEXEC
+	if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
+		pax_flags |= MF_PAX_PAGEEXEC;
+#endif
+
+#ifdef CONFIG_PAX_SEGMEXEC
+	if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
+		pax_flags |= MF_PAX_SEGMEXEC;
+#endif
+
+#ifdef CONFIG_PAX_EMUTRAMP
+	if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
+		pax_flags |= MF_PAX_EMUTRAMP;
+#endif
+
+#ifdef CONFIG_PAX_MPROTECT
+	if (!(elf_phdata->p_flags & PF_NOMPROTECT))
+		pax_flags |= MF_PAX_MPROTECT;
+#endif
+
+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
+	if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
+		pax_flags |= MF_PAX_RANDMMAP;
+#endif
+
+	return pax_flags;
+}
+#endif
+
+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
+#ifdef CONFIG_PAX_SOFTMODE
+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
+{
+	unsigned long pax_flags = 0UL;
+
+#ifdef CONFIG_PAX_PAGEEXEC
+	if (pax_flags_softmode & MF_PAX_PAGEEXEC)
+		pax_flags |= MF_PAX_PAGEEXEC;
+#endif
+
+#ifdef CONFIG_PAX_SEGMEXEC
+	if (pax_flags_softmode & MF_PAX_SEGMEXEC)
+		pax_flags |= MF_PAX_SEGMEXEC;
+#endif
+
+#ifdef CONFIG_PAX_EMUTRAMP
+	if ((pax_flags_softmode & MF_PAX_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
+		pax_flags |= MF_PAX_EMUTRAMP;
+#endif
+
+#ifdef CONFIG_PAX_MPROTECT
+	if (pax_flags_softmode & MF_PAX_MPROTECT)
+		pax_flags |= MF_PAX_MPROTECT;
+#endif
+
+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
+	if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
+		pax_flags |= MF_PAX_RANDMMAP;
+#endif
+
+	return pax_flags;
+}
+#endif
+
+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
+{
+	unsigned long pax_flags = 0UL;
+
+#ifdef CONFIG_PAX_PAGEEXEC
+	if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
+		pax_flags |= MF_PAX_PAGEEXEC;
+#endif
+
+#ifdef CONFIG_PAX_SEGMEXEC
+	if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
+		pax_flags |= MF_PAX_SEGMEXEC;
+#endif
+
+#ifdef CONFIG_PAX_EMUTRAMP
+	if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
+		pax_flags |= MF_PAX_EMUTRAMP;
+#endif
+
+#ifdef CONFIG_PAX_MPROTECT
+	if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
+		pax_flags |= MF_PAX_MPROTECT;
+#endif
+
+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
+	if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
+		pax_flags |= MF_PAX_RANDMMAP;
+#endif
+
+	return pax_flags;
+}
+#endif
+
+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
+static unsigned long pax_parse_defaults(void)
+{
+	unsigned long pax_flags = 0UL;
+
+#ifdef CONFIG_PAX_SOFTMODE
+	if (pax_softmode)
+		return pax_flags;
+#endif
+
+#ifdef CONFIG_PAX_PAGEEXEC
+	pax_flags |= MF_PAX_PAGEEXEC;
+#endif
+
+#ifdef CONFIG_PAX_SEGMEXEC
+	pax_flags |= MF_PAX_SEGMEXEC;
+#endif
+
+#ifdef CONFIG_PAX_MPROTECT
+	pax_flags |= MF_PAX_MPROTECT;
+#endif
+
+#ifdef CONFIG_PAX_RANDMMAP
+	if (randomize_va_space)
+		pax_flags |= MF_PAX_RANDMMAP;
+#endif
+
+	return pax_flags;
+}
+
+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
+{
+	unsigned long pax_flags = PAX_PARSE_FLAGS_FALLBACK;
+
+#ifdef CONFIG_PAX_EI_PAX
+
+#ifdef CONFIG_PAX_SOFTMODE
+	if (pax_softmode)
+		return pax_flags;
+#endif
+
+	pax_flags = 0UL;
+
+#ifdef CONFIG_PAX_PAGEEXEC
+	if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
+		pax_flags |= MF_PAX_PAGEEXEC;
+#endif
+
+#ifdef CONFIG_PAX_SEGMEXEC
+	if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
+		pax_flags |= MF_PAX_SEGMEXEC;
+#endif
+
+#ifdef CONFIG_PAX_EMUTRAMP
+	if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX]
& EF_PAX_EMUTRAMP))
+		pax_flags |= MF_PAX_EMUTRAMP;
+#endif
+
+#ifdef CONFIG_PAX_MPROTECT
+	if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX]
& EF_PAX_MPROTECT))
+		pax_flags |= MF_PAX_MPROTECT;
+#endif
+
+#ifdef CONFIG_PAX_ASLR
+	if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
+		pax_flags |= MF_PAX_RANDMMAP;
+#endif
+
+#endif
+
+	return pax_flags;
+
+}
+
+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct
elf_phdr * const elf_phdata)
+{
+
+#ifdef CONFIG_PAX_PT_PAX_FLAGS
+	unsigned long i;
+
+	for (i = 0UL; i < elf_ex->e_phnum; i++)
+		if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
+			if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC))
||
+			    ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC))
||
+			    ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP))
||
+			    ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT))
||
+			    ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
+				return PAX_PARSE_FLAGS_FALLBACK;
+
+#ifdef CONFIG_PAX_SOFTMODE
+			if (pax_softmode)
+				return pax_parse_pt_pax_softmode(&elf_phdata[i]);
+			else
+#endif
+
+				return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
+			break;
+		}
+#endif
+
+	return PAX_PARSE_FLAGS_FALLBACK;
+}
+
+static unsigned long pax_parse_xattr_pax(struct file * const file)
+{
+
+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
+	ssize_t xattr_size, i;
+	unsigned char xattr_value[sizeof("pemrs") - 1];
+	unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
+
+	xattr_size = pax_getxattr(file->f_path.dentry, xattr_value, sizeof xattr_value);
+	if (xattr_size < 0 || xattr_size > sizeof xattr_value)
+		return PAX_PARSE_FLAGS_FALLBACK;
+
+	for (i = 0; i < xattr_size; i++)
+		switch (xattr_value[i]) {
+		default:
+			return PAX_PARSE_FLAGS_FALLBACK;
+
+#define parse_flag(option1, option2, flag)			\
+		case option1:					\
+			if (pax_flags_hardmode & MF_PAX_##flag)	\
+				return PAX_PARSE_FLAGS_FALLBACK;\
+			pax_flags_hardmode |= MF_PAX_##flag;	\
+			break;					\
+		case option2:					\
+			if (pax_flags_softmode & MF_PAX_##flag)	\
+				return PAX_PARSE_FLAGS_FALLBACK;\
+			pax_flags_softmode |= MF_PAX_##flag;	\
+			break;
+
+		parse_flag('p', 'P', PAGEEXEC);
+		parse_flag('e', 'E', EMUTRAMP);
+		parse_flag('m', 'M', MPROTECT);
+		parse_flag('r', 'R', RANDMMAP);
+		parse_flag('s', 'S', SEGMEXEC);
+
+#undef parse_flag
+		}
+
+	if (pax_flags_hardmode & pax_flags_softmode)
+		return PAX_PARSE_FLAGS_FALLBACK;
+
+#ifdef CONFIG_PAX_SOFTMODE
+	if (pax_softmode)
+		return pax_parse_xattr_pax_softmode(pax_flags_softmode);
+	else
+#endif
+
+		return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
+#else
+	return PAX_PARSE_FLAGS_FALLBACK;
+#endif
+
+}
+
+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr
* const elf_phdata, struct file * const file)
+{
+	unsigned long pax_flags, ei_pax_flags,  pt_pax_flags, xattr_pax_flags;
+
+	pax_flags = pax_parse_defaults();
+	ei_pax_flags = pax_parse_ei_pax(elf_ex);
+	pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
+	xattr_pax_flags = pax_parse_xattr_pax(file);
+
+	if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
+	    xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
+	    pt_pax_flags != xattr_pax_flags)
+		return -EINVAL;
+	if (xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
+		pax_flags = xattr_pax_flags;
+	else if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
+		pax_flags = pt_pax_flags;
+	else if (ei_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
+		pax_flags = ei_pax_flags;
+
+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
+	if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
{
+		if ((__supported_pte_mask & _PAGE_NX))
+			pax_flags &= ~MF_PAX_SEGMEXEC;
+		else
+			pax_flags &= ~MF_PAX_PAGEEXEC;
+	}
+#endif
+
+	if (0 > pax_check_flags(&pax_flags))
+		return -EINVAL;
+
+	current->mm->pax_flags = pax_flags;
+	return 0;
+}
+#endif
+
 /*
  * These are the functions used to load ELF style executables and shared
  * libraries.  There is no binary dependent code anywhere else.
@@ -554,6 +915,11 @@ static unsigned long randomize_stack_top
 {
 	unsigned int random_variable = 0;
 
+#ifdef CONFIG_PAX_RANDUSTACK
+	if (current->mm->pax_flags & MF_PAX_RANDMMAP)
+		return stack_top - current->mm->delta_stack;
+#endif
+
 	if ((current->flags & PF_RANDOMIZE) &&
 		!(current->personality & ADDR_NO_RANDOMIZE)) {
 		random_variable = get_random_int() & STACK_RND_MASK;
@@ -572,7 +938,7 @@ static int load_elf_binary(struct linux_
  	unsigned long load_addr = 0, load_bias = 0;
 	int load_addr_set = 0;
 	char * elf_interpreter = NULL;
-	unsigned long error;
+	unsigned long error = 0;
 	struct elf_phdr *elf_ppnt, *elf_phdata;
 	unsigned long elf_bss, elf_brk;
 	int retval, i;
@@ -582,12 +948,12 @@ static int load_elf_binary(struct linux_
 	unsigned long start_code, end_code, start_data, end_data;
 	unsigned long reloc_func_desc __maybe_unused = 0;
 	int executable_stack = EXSTACK_DEFAULT;
-	unsigned long def_flags = 0;
 	struct pt_regs *regs = current_pt_regs();
 	struct {
 		struct elfhdr elf_ex;
 		struct elfhdr interp_elf_ex;
 	} *loc;
+	unsigned long pax_task_size;
 
 	loc = kmalloc(sizeof(*loc), GFP_KERNEL);
 	if (!loc) {
@@ -723,11 +1089,82 @@ static int load_elf_binary(struct linux_
 		goto out_free_dentry;
 
 	/* OK, This is the point of no return */
-	current->mm->def_flags = def_flags;
+	current->mm->def_flags = 0;
 
 	/* Do this immediately, since STACK_TOP as used in setup_arg_pages
 	   may depend on the personality.  */
 	SET_PERSONALITY(loc->elf_ex);
+
+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
+	current->mm->pax_flags = 0UL;
+#endif
+
+#ifdef CONFIG_PAX_DLRESOLVE
+	current->mm->call_dl_resolve = 0UL;
+#endif
+
+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
+	current->mm->call_syscall = 0UL;
+#endif
+
+#ifdef CONFIG_PAX_ASLR
+	current->mm->delta_mmap = 0UL;
+	current->mm->delta_stack = 0UL;
+#endif
+
+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
+	if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
+		send_sig(SIGKILL, current, 0);
+		goto out_free_dentry;
+	}
+#endif
+
+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
+	pax_set_initial_flags(bprm);
+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
+	if (pax_set_initial_flags_func)
+		(pax_set_initial_flags_func)(bprm);
+#endif
+
+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
+	if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX))
{
+		current->mm->context.user_cs_limit = PAGE_SIZE;
+		current->mm->def_flags |= VM_PAGEEXEC | VM_NOHUGEPAGE;
+	}
+#endif
+
+#ifdef CONFIG_PAX_SEGMEXEC
+	if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
+		current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
+		current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
+		pax_task_size = SEGMEXEC_TASK_SIZE;
+		current->mm->def_flags |= VM_NOHUGEPAGE;
+	} else
+#endif
+
+	pax_task_size = TASK_SIZE;
+
+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
+	if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
+		set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit,
get_cpu());
+		put_cpu();
+	}
+#endif
+
+#ifdef CONFIG_PAX_ASLR
+	if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
+		current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1))
<< PAGE_SHIFT;
+		current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1))
<< PAGE_SHIFT;
+	}
+#endif
+
+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
+	if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
+		executable_stack = EXSTACK_DISABLE_X;
+		current->personality &= ~READ_IMPLIES_EXEC;
+	} else
+#endif
+
 	if (elf_read_implies_exec(loc->elf_ex, executable_stack))
 		current->personality |= READ_IMPLIES_EXEC;
 
@@ -817,6 +1254,20 @@ static int load_elf_binary(struct linux_
 #else
 			load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
 #endif
+
+#ifdef CONFIG_PAX_RANDMMAP
+			/* PaX: randomize base address at the default exe base if requested */
+			if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
+#ifdef CONFIG_SPARC64
+				load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
+#else
+				load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
+#endif
+				load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
+				elf_flags |= MAP_FIXED;
+			}
+#endif
+
 		}
 
 		error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
@@ -849,9 +1300,9 @@ static int load_elf_binary(struct linux_
 		 * allowed task size. Note that p_filesz must always be
 		 * <= p_memsz so it is only necessary to check p_memsz.
 		 */
-		if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
-		    elf_ppnt->p_memsz > TASK_SIZE ||
-		    TASK_SIZE - elf_ppnt->p_memsz < k) {
+		if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
+		    elf_ppnt->p_memsz > pax_task_size ||
+		    pax_task_size - elf_ppnt->p_memsz < k) {
 			/* set_brk can never work. Avoid overflows. */
 			send_sig(SIGKILL, current, 0);
 			retval = -EINVAL;
@@ -890,17 +1341,45 @@ static int load_elf_binary(struct linux_
 		goto out_free_dentry;
 	}
 	if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
-		send_sig(SIGSEGV, current, 0);
-		retval = -EFAULT; /* Nobody gets to see this, but.. */
-		goto out_free_dentry;
+		/*
+		 * This bss-zeroing can fail if the ELF
+		 * file specifies odd protections. So
+		 * we don't check the return value
+		 */
 	}
 
-	if (elf_interpreter) {
-		unsigned long interp_map_addr = 0;
+#ifdef CONFIG_PAX_RANDMMAP
+	if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
+		unsigned long start, size, flags;
+		vm_flags_t vm_flags;
+
+		start = ELF_PAGEALIGN(elf_brk);
+		size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
+		flags = MAP_FIXED | MAP_PRIVATE;
+		vm_flags = VM_DONTEXPAND | VM_DONTDUMP;
+
+		down_write(&current->mm->mmap_sem);
+		start = get_unmapped_area(NULL, start, PAGE_ALIGN(size), 0, flags);
+		retval = -ENOMEM;
+		if (!IS_ERR_VALUE(start) && !find_vma_intersection(current->mm, start, start + size
+ PAGE_SIZE)) {
+//			if (current->personality & ADDR_NO_RANDOMIZE)
+//				vm_flags |= VM_READ | VM_MAYREAD;
+			start = mmap_region(NULL, start, PAGE_ALIGN(size), vm_flags, 0);
+			retval = IS_ERR_VALUE(start) ? start : 0;
+		}
+		up_write(&current->mm->mmap_sem);
+		if (retval == 0)
+			retval = set_brk(start + size, start + size + PAGE_SIZE);
+		if (retval < 0) {
+			send_sig(SIGKILL, current, 0);
+			goto out_free_dentry;
+		}
+	}
+#endif
 
+	if (elf_interpreter) {
 		elf_entry = load_elf_interp(&loc->interp_elf_ex,
 					    interpreter,
-					    &interp_map_addr,
 					    load_bias);
 		if (!IS_ERR((void *)elf_entry)) {
 			/*
@@ -1122,7 +1601,7 @@ static bool always_dump_vma(struct vm_ar
  * Decide what to dump of a segment, part, all or none.
  */
 static unsigned long vma_dump_size(struct vm_area_struct *vma,
-				   unsigned long mm_flags)
+				   unsigned long mm_flags, long signr)
 {
 #define FILTER(type)	(mm_flags & (1UL << MMF_DUMP_##type))
 
@@ -1160,7 +1639,7 @@ static unsigned long vma_dump_size(struc
 	if (vma->vm_file == NULL)
 		return 0;
 
-	if (FILTER(MAPPED_PRIVATE))
+	if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
 		goto whole;
 
 	/*
@@ -1367,9 +1846,9 @@ static void fill_auxv_note(struct memelf
 {
 	elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
 	int i = 0;
-	do
+	do {
 		i += 2;
-	while (auxv[i - 2] != AT_NULL);
+	} while (auxv[i - 2] != AT_NULL);
 	fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
 }
 
@@ -1378,7 +1857,7 @@ static void fill_siginfo_note(struct mem
 {
 	mm_segment_t old_fs = get_fs();
 	set_fs(KERNEL_DS);
-	copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo);
+	copy_siginfo_to_user((user_siginfo_t __force_user *) csigdata, siginfo);
 	set_fs(old_fs);
 	fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
 }
@@ -2002,14 +2481,14 @@ static void fill_extnum_info(struct elfh
 }
 
 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
-				     unsigned long mm_flags)
+				     struct coredump_params *cprm)
 {
 	struct vm_area_struct *vma;
 	size_t size = 0;
 
 	for (vma = first_vma(current, gate_vma); vma != NULL;
 	     vma = next_vma(vma, gate_vma))
-		size += vma_dump_size(vma, mm_flags);
+		size += vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
 	return size;
 }
 
@@ -2100,7 +2579,7 @@ static int elf_core_dump(struct coredump
 
 	dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
 
-	offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
+	offset += elf_core_vma_data_size(gate_vma, cprm);
 	offset += elf_core_extra_data_size();
 	e_shoff = offset;
 
@@ -2128,7 +2607,7 @@ static int elf_core_dump(struct coredump
 		phdr.p_offset = offset;
 		phdr.p_vaddr = vma->vm_start;
 		phdr.p_paddr = 0;
-		phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
+		phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
 		phdr.p_memsz = vma->vm_end - vma->vm_start;
 		offset += phdr.p_filesz;
 		phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
@@ -2161,7 +2640,7 @@ static int elf_core_dump(struct coredump
 		unsigned long addr;
 		unsigned long end;
 
-		end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
+		end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
 
 		for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
 			struct page *page;
@@ -2202,6 +2681,167 @@ out:
 
 #endif		/* CONFIG_ELF_CORE */
 
+#ifdef CONFIG_PAX_MPROTECT
+/* PaX: non-PIC ELF libraries need relocations on their executable segments
+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
+ * we'll remove VM_MAYWRITE for good on RELRO segments.
+ *
+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
+ * basis because we want to allow the common case and not the special ones.
+ */
+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
+{
+	struct elfhdr elf_h;
+	struct elf_phdr elf_p;
+	unsigned long i;
+	unsigned long oldflags;
+	bool is_textrel_rw, is_textrel_rx, is_relro;
+
+	if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT) || !vma->vm_file)
+		return;
+
+	oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE
| VM_READ);
+	newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
+
+#ifdef CONFIG_PAX_ELFRELOCS
+	/* possible TEXTREL */
+	is_textrel_rw = !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC |
VM_READ) && newflags == (VM_WRITE | VM_READ);
+	is_textrel_rx = vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD
| VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
+#else
+	is_textrel_rw = false;
+	is_textrel_rx = false;
+#endif
+
+	/* possible RELRO */
+	is_relro = vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags
== (VM_MAYWRITE | VM_MAYREAD | VM_READ);
+
+	if (!is_textrel_rw && !is_textrel_rx && !is_relro)
+		return;
+
+	if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h))
||
+	    memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
+
+#ifdef CONFIG_PAX_ETEXECRELOCS
+	    ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type !=
ET_EXEC)) ||
+#else
+	    ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
+#endif
+
+	    (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
+	    !elf_check_arch(&elf_h) ||
+	    elf_h.e_phentsize != sizeof(struct elf_phdr) ||
+	    elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
+		return;
+
+	for (i = 0UL; i < elf_h.e_phnum; i++) {
+		if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char
*)&elf_p, sizeof(elf_p)))
+			return;
+		switch (elf_p.p_type) {
+		case PT_DYNAMIC:
+			if (!is_textrel_rw && !is_textrel_rx)
+				continue;
+			i = 0UL;
+			while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
+				elf_dyn dyn;
+
+				if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char
*)&dyn, sizeof(dyn)))
+					break;
+				if (dyn.d_tag == DT_NULL)
+					break;
+				if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL)))
{
+					gr_log_textrel(vma);
+					if (is_textrel_rw)
+						vma->vm_flags |= VM_MAYWRITE;
+					else
+						/* PaX: disallow write access after relocs are done, hopefully noone else needs
it... */
+						vma->vm_flags &= ~VM_MAYWRITE;
+					break;
+				}
+				i++;
+			}
+			is_textrel_rw = false;
+			is_textrel_rx = false;
+			continue;
+
+		case PT_GNU_RELRO:
+			if (!is_relro)
+				continue;
+			if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz)
== vma->vm_end - vma->vm_start)
+				vma->vm_flags &= ~VM_MAYWRITE;
+			is_relro = false;
+			continue;
+
+#ifdef CONFIG_PAX_PT_PAX_FLAGS
+		case PT_PAX_FLAGS: {
+			const char *msg_mprotect = "", *msg_emutramp = "";
+			char *buffer_lib, *buffer_exe;
+
+			if (elf_p.p_flags & PF_NOMPROTECT)
+				msg_mprotect = "MPROTECT disabled";
+
+#ifdef CONFIG_PAX_EMUTRAMP
+			if (!(vma->vm_mm->pax_flags & MF_PAX_EMUTRAMP) && !(elf_p.p_flags & PF_NOEMUTRAMP))
+				msg_emutramp = "EMUTRAMP enabled";
+#endif
+
+			if (!msg_mprotect[0] && !msg_emutramp[0])
+				continue;
+
+			if (!printk_ratelimit())
+				continue;
+
+			buffer_lib = (char *)__get_free_page(GFP_KERNEL);
+			buffer_exe = (char *)__get_free_page(GFP_KERNEL);
+			if (buffer_lib && buffer_exe) {
+				char *path_lib, *path_exe;
+
+				path_lib = pax_get_path(&vma->vm_file->f_path, buffer_lib, PAGE_SIZE);
+				path_exe = pax_get_path(&vma->vm_mm->exe_file->f_path, buffer_exe, PAGE_SIZE);
+
+				pr_info("PAX: %s wants %s%s%s on %s\n", path_lib, msg_mprotect,
+					(msg_mprotect[0] && msg_emutramp[0] ? " and " : ""), msg_emutramp, path_exe);
+
+			}
+			free_page((unsigned long)buffer_exe);
+			free_page((unsigned long)buffer_lib);
+			continue;
+		}
+#endif
+
+		}
+	}
+}
+#endif
+
+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
+
+extern int grsec_enable_log_rwxmaps;
+
+static void elf_handle_mmap(struct file *file)
+{
+	struct elfhdr elf_h;
+	struct elf_phdr elf_p;
+	unsigned long i;
+
+	if (!grsec_enable_log_rwxmaps)
+		return;
+
+	if (sizeof(elf_h) != kernel_read(file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
+	    memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
+	    (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC) || !elf_check_arch(&elf_h)
||
+	    elf_h.e_phentsize != sizeof(struct elf_phdr) ||
+	    elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
+		return;
+
+	for (i = 0UL; i < elf_h.e_phnum; i++) {
+		if (sizeof(elf_p) != kernel_read(file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p,
sizeof(elf_p)))
+			return;
+		if (elf_p.p_type == PT_GNU_STACK && (elf_p.p_flags & PF_X))
+			gr_log_ptgnustack(file);
+	}
+}
+#endif
+
 static int __init init_elf_binfmt(void)
 {
 	register_binfmt(&elf_format);
diff -ruNp linux-3.13.11/fs/binfmt_flat.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/binfmt_flat.c
--- linux-3.13.11/fs/binfmt_flat.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/binfmt_flat.c	2014-07-09 12:00:15.000000000
+0200
@@ -566,7 +566,9 @@ static int load_flat_file(struct linux_b
 				realdatastart = (unsigned long) -ENOMEM;
 			printk("Unable to allocate RAM for process data, errno %d\n",
 					(int)-realdatastart);
+			down_write(&current->mm->mmap_sem);
 			vm_munmap(textpos, text_len);
+			up_write(&current->mm->mmap_sem);
 			ret = realdatastart;
 			goto err;
 		}
@@ -590,8 +592,10 @@ static int load_flat_file(struct linux_b
 		}
 		if (IS_ERR_VALUE(result)) {
 			printk("Unable to read data+bss, errno %d\n", (int)-result);
+			down_write(&current->mm->mmap_sem);
 			vm_munmap(textpos, text_len);
 			vm_munmap(realdatastart, len);
+			up_write(&current->mm->mmap_sem);
 			ret = result;
 			goto err;
 		}
@@ -653,8 +657,10 @@ static int load_flat_file(struct linux_b
 		}
 		if (IS_ERR_VALUE(result)) {
 			printk("Unable to read code+data+bss, errno %d\n",(int)-result);
+			down_write(&current->mm->mmap_sem);
 			vm_munmap(textpos, text_len + data_len + extra +
 				MAX_SHARED_LIBS * sizeof(unsigned long));
+			up_write(&current->mm->mmap_sem);
 			ret = result;
 			goto err;
 		}
diff -ruNp linux-3.13.11/fs/bio.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/bio.c
--- linux-3.13.11/fs/bio.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/bio.c	2014-07-09 12:00:15.000000000
+0200
@@ -1106,7 +1106,7 @@ struct bio *bio_copy_user_iov(struct req
 		/*
 		 * Overflow, abort
 		 */
-		if (end < start)
+		if (end < start || end - start > INT_MAX - nr_pages)
 			return ERR_PTR(-EINVAL);
 
 		nr_pages += end - start;
@@ -1240,7 +1240,7 @@ static struct bio *__bio_map_user_iov(st
 		/*
 		 * Overflow, abort
 		 */
-		if (end < start)
+		if (end < start || end - start > INT_MAX - nr_pages)
 			return ERR_PTR(-EINVAL);
 
 		nr_pages += end - start;
@@ -1502,7 +1502,7 @@ static void bio_copy_kern_endio(struct b
 	const int read = bio_data_dir(bio) == READ;
 	struct bio_map_data *bmd = bio->bi_private;
 	int i;
-	char *p = bmd->sgvecs[0].iov_base;
+	char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
 
 	bio_for_each_segment_all(bvec, bio, i) {
 		char *addr = page_address(bvec->bv_page);
diff -ruNp linux-3.13.11/fs/block_dev.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/block_dev.c
--- linux-3.13.11/fs/block_dev.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/block_dev.c	2014-07-09 12:00:15.000000000
+0200
@@ -28,6 +28,7 @@
 #include <linux/log2.h>
 #include <linux/cleancache.h>
 #include <linux/aio.h>
+#include <linux/vs_device.h>
 #include <asm/uaccess.h>
 #include "internal.h"
 
@@ -515,6 +516,7 @@ struct block_device *bdget(dev_t dev)
 		bdev->bd_invalidated = 0;
 		inode->i_mode = S_IFBLK;
 		inode->i_rdev = dev;
+		inode->i_mdev = dev;
 		inode->i_bdev = bdev;
 		inode->i_data.a_ops = &def_blk_aops;
 		mapping_set_gfp_mask(&inode->i_data, GFP_USER);
@@ -562,6 +564,11 @@ EXPORT_SYMBOL(bdput);
 static struct block_device *bd_acquire(struct inode *inode)
 {
 	struct block_device *bdev;
+	dev_t mdev;
+
+	if (!vs_map_blkdev(inode->i_rdev, &mdev, DATTR_OPEN))
+		return NULL;
+	inode->i_mdev = mdev;
 
 	spin_lock(&bdev_lock);
 	bdev = inode->i_bdev;
@@ -572,7 +579,7 @@ static struct block_device *bd_acquire(s
 	}
 	spin_unlock(&bdev_lock);
 
-	bdev = bdget(inode->i_rdev);
+	bdev = bdget(mdev);
 	if (bdev) {
 		spin_lock(&bdev_lock);
 		if (!inode->i_bdev) {
@@ -637,7 +644,7 @@ static bool bd_may_claim(struct block_de
 	else if (bdev->bd_contains == bdev)
 		return true;  	 /* is a whole device which isn't held */
 
-	else if (whole->bd_holder == bd_may_claim)
+	else if (whole->bd_holder == (void *)bd_may_claim)
 		return true; 	 /* is a partition of a device that is being partitioned */
 	else if (whole->bd_holder != NULL)
 		return false;	 /* is a partition of a held device */
diff -ruNp linux-3.13.11/fs/btrfs/ctree.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/btrfs/ctree.c
--- linux-3.13.11/fs/btrfs/ctree.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/btrfs/ctree.c	2014-07-09 12:00:15.000000000
+0200
@@ -1217,9 +1217,12 @@ static noinline int __btrfs_cow_block(st
 		free_extent_buffer(buf);
 		add_root_to_dirty_list(root);
 	} else {
-		if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
-			parent_start = parent->start;
-		else
+		if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
+			if (parent)
+				parent_start = parent->start;
+			else
+				parent_start = 0;
+		} else
 			parent_start = 0;
 
 		WARN_ON(trans->transid != btrfs_header_generation(parent));
diff -ruNp linux-3.13.11/fs/btrfs/ctree.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/btrfs/ctree.h
--- linux-3.13.11/fs/btrfs/ctree.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/btrfs/ctree.h	2014-07-09 12:00:15.000000000
+0200
@@ -718,11 +718,14 @@ struct btrfs_inode_item {
 	/* modification sequence number for NFS */
 	__le64 sequence;
 
+	__le16 tag;
 	/*
 	 * a little future expansion, for more than this we can
 	 * just grow the inode item and version it
 	 */
-	__le64 reserved[4];
+	__le16 reserved16;
+	__le32 reserved32;
+	__le64 reserved[3];
 	struct btrfs_timespec atime;
 	struct btrfs_timespec ctime;
 	struct btrfs_timespec mtime;
@@ -2000,6 +2003,8 @@ struct btrfs_ioctl_defrag_range_args {
 
 #define BTRFS_DEFAULT_COMMIT_INTERVAL	(30)
 
+#define BTRFS_MOUNT_TAGGED		(1 << 24)
+
 #define btrfs_clear_opt(o, opt)		((o) &= ~BTRFS_MOUNT_##opt)
 #define btrfs_set_opt(o, opt)		((o) |= BTRFS_MOUNT_##opt)
 #define btrfs_raw_test_opt(o, opt)	((o) & BTRFS_MOUNT_##opt)
@@ -2269,6 +2274,7 @@ BTRFS_SETGET_FUNCS(inode_block_group, st
 BTRFS_SETGET_FUNCS(inode_nlink, struct btrfs_inode_item, nlink, 32);
 BTRFS_SETGET_FUNCS(inode_uid, struct btrfs_inode_item, uid, 32);
 BTRFS_SETGET_FUNCS(inode_gid, struct btrfs_inode_item, gid, 32);
+BTRFS_SETGET_FUNCS(inode_tag, struct btrfs_inode_item, tag, 16);
 BTRFS_SETGET_FUNCS(inode_mode, struct btrfs_inode_item, mode, 32);
 BTRFS_SETGET_FUNCS(inode_rdev, struct btrfs_inode_item, rdev, 64);
 BTRFS_SETGET_FUNCS(inode_flags, struct btrfs_inode_item, flags, 64);
@@ -2341,6 +2347,10 @@ BTRFS_SETGET_FUNCS(extent_flags, struct
 
 BTRFS_SETGET_FUNCS(extent_refs_v0, struct btrfs_extent_item_v0, refs, 32);
 
+#define BTRFS_INODE_IXUNLINK		(1 << 24)
+#define BTRFS_INODE_BARRIER		(1 << 25)
+#define BTRFS_INODE_COW			(1 << 26)
+
 
 BTRFS_SETGET_FUNCS(tree_block_level, struct btrfs_tree_block_info, level, 8);
 
@@ -3722,6 +3732,7 @@ long btrfs_ioctl(struct file *file, unsi
 void btrfs_update_iflags(struct inode *inode);
 void btrfs_inherit_iflags(struct inode *inode, struct inode *dir);
 int btrfs_is_empty_uuid(u8 *uuid);
+int btrfs_sync_flags(struct inode *inode, int, int);
 int btrfs_defrag_file(struct inode *inode, struct file *file,
 		      struct btrfs_ioctl_defrag_range_args *range,
 		      u64 newer_than, unsigned long max_pages);
diff -ruNp linux-3.13.11/fs/btrfs/delayed-inode.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/btrfs/delayed-inode.c
--- linux-3.13.11/fs/btrfs/delayed-inode.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/btrfs/delayed-inode.c	2014-07-09
12:00:15.000000000 +0200
@@ -459,7 +459,7 @@ static int __btrfs_add_delayed_deletion_
 
 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
 {
-	int seq = atomic_inc_return(&delayed_root->items_seq);
+	int seq = atomic_inc_return_unchecked(&delayed_root->items_seq);
 	if ((atomic_dec_return(&delayed_root->items) <
 	    BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
 	    waitqueue_active(&delayed_root->wait))
@@ -1379,7 +1379,7 @@ void btrfs_assert_delayed_root_empty(str
 static int refs_newer(struct btrfs_delayed_root *delayed_root,
 		      int seq, int count)
 {
-	int val = atomic_read(&delayed_root->items_seq);
+	int val = atomic_read_unchecked(&delayed_root->items_seq);
 
 	if (val < seq || val >= seq + count)
 		return 1;
@@ -1396,7 +1396,7 @@ void btrfs_balance_delayed_items(struct
 	if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
 		return;
 
-	seq = atomic_read(&delayed_root->items_seq);
+	seq = atomic_read_unchecked(&delayed_root->items_seq);
 
 	if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
 		int ret;
diff -ruNp linux-3.13.11/fs/btrfs/delayed-inode.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/btrfs/delayed-inode.h
--- linux-3.13.11/fs/btrfs/delayed-inode.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/btrfs/delayed-inode.h	2014-07-09
12:00:15.000000000 +0200
@@ -43,7 +43,7 @@ struct btrfs_delayed_root {
 	 */
 	struct list_head prepare_list;
 	atomic_t items;		/* for delayed items */
-	atomic_t items_seq;	/* for delayed items */
+	atomic_unchecked_t items_seq;	/* for delayed items */
 	int nodes;		/* for delayed nodes */
 	wait_queue_head_t wait;
 };
@@ -87,7 +87,7 @@ static inline void btrfs_init_delayed_ro
 				struct btrfs_delayed_root *delayed_root)
 {
 	atomic_set(&delayed_root->items, 0);
-	atomic_set(&delayed_root->items_seq, 0);
+	atomic_set_unchecked(&delayed_root->items_seq, 0);
 	delayed_root->nodes = 0;
 	spin_lock_init(&delayed_root->lock);
 	init_waitqueue_head(&delayed_root->wait);
diff -ruNp linux-3.13.11/fs/btrfs/disk-io.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/btrfs/disk-io.c
--- linux-3.13.11/fs/btrfs/disk-io.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/btrfs/disk-io.c	2014-07-09 12:00:15.000000000
+0200
@@ -2388,6 +2388,9 @@ int open_ctree(struct super_block *sb,
 		goto fail_alloc;
 	}
 
+	if (btrfs_test_opt(tree_root, TAGGED))
+		sb->s_flags |= MS_TAGGED;
+
 	features = btrfs_super_incompat_flags(disk_super) &
 		~BTRFS_FEATURE_INCOMPAT_SUPP;
 	if (features) {
diff -ruNp linux-3.13.11/fs/btrfs/inode.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/btrfs/inode.c
--- linux-3.13.11/fs/btrfs/inode.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/btrfs/inode.c	2014-07-09 12:00:15.000000000
+0200
@@ -43,6 +43,7 @@
 #include <linux/btrfs.h>
 #include <linux/blkdev.h>
 #include <linux/posix_acl_xattr.h>
+#include <linux/vs_tag.h>
 #include "ctree.h"
 #include "disk-io.h"
 #include "transaction.h"
@@ -3317,6 +3318,9 @@ static void btrfs_read_locked_inode(stru
 	struct btrfs_key location;
 	int maybe_acls;
 	u32 rdev;
+	kuid_t kuid;
+	kgid_t kgid;
+	ktag_t ktag;
 	int ret;
 	bool filled = false;
 
@@ -3344,8 +3348,14 @@ static void btrfs_read_locked_inode(stru
 				    struct btrfs_inode_item);
 	inode->i_mode = btrfs_inode_mode(leaf, inode_item);
 	set_nlink(inode, btrfs_inode_nlink(leaf, inode_item));
-	i_uid_write(inode, btrfs_inode_uid(leaf, inode_item));
-	i_gid_write(inode, btrfs_inode_gid(leaf, inode_item));
+
+	kuid = make_kuid(&init_user_ns, btrfs_inode_uid(leaf, inode_item));
+	kgid = make_kgid(&init_user_ns, btrfs_inode_gid(leaf, inode_item));
+	ktag = make_ktag(&init_user_ns, btrfs_inode_tag(leaf, inode_item));
+
+	inode->i_uid = INOTAG_KUID(DX_TAG(inode), kuid, kgid);
+	inode->i_gid = INOTAG_KGID(DX_TAG(inode), kuid, kgid);
+	inode->i_tag = INOTAG_KTAG(DX_TAG(inode), kuid, kgid, ktag);
 	btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
 
 	tspec = btrfs_inode_atime(inode_item);
@@ -3436,11 +3446,18 @@ static void fill_inode_item(struct btrfs
 			    struct inode *inode)
 {
 	struct btrfs_map_token token;
+	uid_t uid = from_kuid(&init_user_ns,
+		TAGINO_KUID(DX_TAG(inode), inode->i_uid, inode->i_tag));
+	gid_t gid = from_kgid(&init_user_ns,
+		TAGINO_KGID(DX_TAG(inode), inode->i_gid, inode->i_tag));
 
 	btrfs_init_map_token(&token);
 
-	btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token);
-	btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token);
+	btrfs_set_token_inode_uid(leaf, item, uid, &token);
+	btrfs_set_token_inode_gid(leaf, item, gid, &token);
+#ifdef CONFIG_TAGGING_INTERN
+	btrfs_set_token_inode_tag(leaf, item, i_tag_read(inode), &token);
+#endif
 	btrfs_set_token_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size,
 				   &token);
 	btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token);
@@ -8652,12 +8669,15 @@ static const struct inode_operations btr
 	.listxattr	= btrfs_listxattr,
 	.removexattr	= btrfs_removexattr,
 	.permission	= btrfs_permission,
+	.sync_flags	= btrfs_sync_flags,
 	.get_acl	= btrfs_get_acl,
 	.update_time	= btrfs_update_time,
 };
+
 static const struct inode_operations btrfs_dir_ro_inode_operations = {
 	.lookup		= btrfs_lookup,
 	.permission	= btrfs_permission,
+	.sync_flags	= btrfs_sync_flags,
 	.get_acl	= btrfs_get_acl,
 	.update_time	= btrfs_update_time,
 };
@@ -8727,6 +8747,7 @@ static const struct inode_operations btr
 	.removexattr	= btrfs_removexattr,
 	.permission	= btrfs_permission,
 	.fiemap		= btrfs_fiemap,
+	.sync_flags	= btrfs_sync_flags,
 	.get_acl	= btrfs_get_acl,
 	.update_time	= btrfs_update_time,
 };
diff -ruNp linux-3.13.11/fs/btrfs/ioctl.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/btrfs/ioctl.c
--- linux-3.13.11/fs/btrfs/ioctl.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/btrfs/ioctl.c	2014-07-09 12:00:15.000000000
+0200
@@ -78,10 +78,13 @@ static unsigned int btrfs_flags_to_ioctl
 {
 	unsigned int iflags = 0;
 
-	if (flags & BTRFS_INODE_SYNC)
-		iflags |= FS_SYNC_FL;
 	if (flags & BTRFS_INODE_IMMUTABLE)
 		iflags |= FS_IMMUTABLE_FL;
+	if (flags & BTRFS_INODE_IXUNLINK)
+		iflags |= FS_IXUNLINK_FL;
+
+	if (flags & BTRFS_INODE_SYNC)
+		iflags |= FS_SYNC_FL;
 	if (flags & BTRFS_INODE_APPEND)
 		iflags |= FS_APPEND_FL;
 	if (flags & BTRFS_INODE_NODUMP)
@@ -98,28 +101,78 @@ static unsigned int btrfs_flags_to_ioctl
 	else if (flags & BTRFS_INODE_NOCOMPRESS)
 		iflags |= FS_NOCOMP_FL;
 
+	if (flags & BTRFS_INODE_BARRIER)
+		iflags |= FS_BARRIER_FL;
+	if (flags & BTRFS_INODE_COW)
+		iflags |= FS_COW_FL;
 	return iflags;
 }
 
 /*
- * Update inode->i_flags based on the btrfs internal flags.
+ * Update inode->i_(v)flags based on the btrfs internal flags.
  */
 void btrfs_update_iflags(struct inode *inode)
 {
 	struct btrfs_inode *ip = BTRFS_I(inode);
 
-	inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
+	inode->i_flags &= ~(S_IMMUTABLE | S_IXUNLINK |
+		S_SYNC | S_APPEND | S_NOATIME | S_DIRSYNC);
 
-	if (ip->flags & BTRFS_INODE_SYNC)
-		inode->i_flags |= S_SYNC;
 	if (ip->flags & BTRFS_INODE_IMMUTABLE)
 		inode->i_flags |= S_IMMUTABLE;
+	if (ip->flags & BTRFS_INODE_IXUNLINK)
+		inode->i_flags |= S_IXUNLINK;
+
+	if (ip->flags & BTRFS_INODE_SYNC)
+		inode->i_flags |= S_SYNC;
 	if (ip->flags & BTRFS_INODE_APPEND)
 		inode->i_flags |= S_APPEND;
 	if (ip->flags & BTRFS_INODE_NOATIME)
 		inode->i_flags |= S_NOATIME;
 	if (ip->flags & BTRFS_INODE_DIRSYNC)
 		inode->i_flags |= S_DIRSYNC;
+
+	inode->i_vflags &= ~(V_BARRIER | V_COW);
+
+	if (ip->flags & BTRFS_INODE_BARRIER)
+		inode->i_vflags |= V_BARRIER;
+	if (ip->flags & BTRFS_INODE_COW)
+		inode->i_vflags |= V_COW;
+}
+
+/*
+ * Update btrfs internal flags from inode->i_(v)flags.
+ */
+void btrfs_update_flags(struct inode *inode)
+{
+	struct btrfs_inode *ip = BTRFS_I(inode);
+
+	unsigned int flags = inode->i_flags;
+	unsigned int vflags = inode->i_vflags;
+
+	ip->flags &= ~(BTRFS_INODE_SYNC | BTRFS_INODE_APPEND |
+			BTRFS_INODE_IMMUTABLE | BTRFS_INODE_IXUNLINK |
+			BTRFS_INODE_NOATIME | BTRFS_INODE_DIRSYNC |
+			BTRFS_INODE_BARRIER | BTRFS_INODE_COW);
+
+	if (flags & S_IMMUTABLE)
+		ip->flags |= BTRFS_INODE_IMMUTABLE;
+	if (flags & S_IXUNLINK)
+		ip->flags |= BTRFS_INODE_IXUNLINK;
+
+	if (flags & S_SYNC)
+		ip->flags |= BTRFS_INODE_SYNC;
+	if (flags & S_APPEND)
+		ip->flags |= BTRFS_INODE_APPEND;
+	if (flags & S_NOATIME)
+		ip->flags |= BTRFS_INODE_NOATIME;
+	if (flags & S_DIRSYNC)
+		ip->flags |= BTRFS_INODE_DIRSYNC;
+
+	if (vflags & V_BARRIER)
+		ip->flags |= BTRFS_INODE_BARRIER;
+	if (vflags & V_COW)
+		ip->flags |= BTRFS_INODE_COW;
 }
 
 /*
@@ -135,6 +188,7 @@ void btrfs_inherit_iflags(struct inode *
 		return;
 
 	flags = BTRFS_I(dir)->flags;
+	flags &= ~BTRFS_INODE_BARRIER;
 
 	if (flags & BTRFS_INODE_NOCOMPRESS) {
 		BTRFS_I(inode)->flags &= ~BTRFS_INODE_COMPRESS;
@@ -153,6 +207,30 @@ void btrfs_inherit_iflags(struct inode *
 	btrfs_update_iflags(inode);
 }
 
+int btrfs_sync_flags(struct inode *inode, int flags, int vflags)
+{
+	struct btrfs_inode *ip = BTRFS_I(inode);
+	struct btrfs_root *root = ip->root;
+	struct btrfs_trans_handle *trans;
+	int ret;
+
+	trans = btrfs_join_transaction(root);
+	BUG_ON(!trans);
+
+	inode->i_flags = flags;
+	inode->i_vflags = vflags;
+	btrfs_update_flags(inode);
+
+	ret = btrfs_update_inode(trans, root, inode);
+	BUG_ON(ret);
+
+	btrfs_update_iflags(inode);
+	inode->i_ctime = CURRENT_TIME;
+	btrfs_end_transaction(trans, root);
+
+	return 0;
+}
+
 static int btrfs_ioctl_getflags(struct file *file, void __user *arg)
 {
 	struct btrfs_inode *ip = BTRFS_I(file_inode(file));
@@ -215,21 +293,27 @@ static int btrfs_ioctl_setflags(struct f
 
 	flags = btrfs_mask_flags(inode->i_mode, flags);
 	oldflags = btrfs_flags_to_ioctl(ip->flags);
-	if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) {
+	if ((flags ^ oldflags) & (FS_APPEND_FL |
+		FS_IMMUTABLE_FL | FS_IXUNLINK_FL)) {
 		if (!capable(CAP_LINUX_IMMUTABLE)) {
 			ret = -EPERM;
 			goto out_unlock;
 		}
 	}
 
-	if (flags & FS_SYNC_FL)
-		ip->flags |= BTRFS_INODE_SYNC;
-	else
-		ip->flags &= ~BTRFS_INODE_SYNC;
 	if (flags & FS_IMMUTABLE_FL)
 		ip->flags |= BTRFS_INODE_IMMUTABLE;
 	else
 		ip->flags &= ~BTRFS_INODE_IMMUTABLE;
+	if (flags & FS_IXUNLINK_FL)
+		ip->flags |= BTRFS_INODE_IXUNLINK;
+	else
+		ip->flags &= ~BTRFS_INODE_IXUNLINK;
+
+	if (flags & FS_SYNC_FL)
+		ip->flags |= BTRFS_INODE_SYNC;
+	else
+		ip->flags &= ~BTRFS_INODE_SYNC;
 	if (flags & FS_APPEND_FL)
 		ip->flags |= BTRFS_INODE_APPEND;
 	else
@@ -3457,9 +3541,12 @@ static long btrfs_ioctl_space_info(struc
 	for (i = 0; i < num_types; i++) {
 		struct btrfs_space_info *tmp;
 
+		/* Don't copy in more than we allocated */
 		if (!slot_count)
 			break;
 
+		slot_count--;
+
 		info = NULL;
 		rcu_read_lock();
 		list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
@@ -3481,10 +3568,7 @@ static long btrfs_ioctl_space_info(struc
 				memcpy(dest, &space, sizeof(space));
 				dest++;
 				space_args.total_spaces++;
-				slot_count--;
 			}
-			if (!slot_count)
-				break;
 		}
 		up_read(&info->groups_sem);
 	}
diff -ruNp linux-3.13.11/fs/btrfs/super.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/btrfs/super.c
--- linux-3.13.11/fs/btrfs/super.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/btrfs/super.c	2014-07-09 12:00:15.000000000
+0200
@@ -265,7 +265,7 @@ void __btrfs_abort_transaction(struct bt
 		           function, line, errstr);
 		return;
 	}
-	ACCESS_ONCE(trans->transaction->aborted) = errno;
+	ACCESS_ONCE_RW(trans->transaction->aborted) = errno;
 	/* Wake up anybody who may be waiting on this transaction */
 	wake_up(&root->fs_info->transaction_wait);
 	wake_up(&root->fs_info->transaction_blocked_wait);
@@ -323,7 +323,7 @@ enum {
 	Opt_check_integrity, Opt_check_integrity_including_extent_data,
 	Opt_check_integrity_print_mask, Opt_fatal_errors, Opt_rescan_uuid_tree,
 	Opt_commit_interval,
-	Opt_err,
+	Opt_tag, Opt_notag, Opt_tagid, Opt_err,
 };
 
 static match_table_t tokens = {
@@ -365,6 +365,9 @@ static match_table_t tokens = {
 	{Opt_rescan_uuid_tree, "rescan_uuid_tree"},
 	{Opt_fatal_errors, "fatal_errors=%s"},
 	{Opt_commit_interval, "commit=%d"},
+	{Opt_tag, "tag"},
+	{Opt_notag, "notag"},
+	{Opt_tagid, "tagid=%u"},
 	{Opt_err, NULL},
 };
 
@@ -674,6 +677,22 @@ int btrfs_parse_options(struct btrfs_roo
 				info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
 			}
 			break;
+#ifndef CONFIG_TAGGING_NONE
+		case Opt_tag:
+			printk(KERN_INFO "btrfs: use tagging\n");
+			btrfs_set_opt(info->mount_opt, TAGGED);
+			break;
+		case Opt_notag:
+			printk(KERN_INFO "btrfs: disabled tagging\n");
+			btrfs_clear_opt(info->mount_opt, TAGGED);
+			break;
+#endif
+#ifdef CONFIG_PROPAGATE
+		case Opt_tagid:
+			/* use args[0] */
+			btrfs_set_opt(info->mount_opt, TAGGED);
+			break;
+#endif
 		case Opt_err:
 			printk(KERN_INFO "btrfs: unrecognized mount option "
 			       "'%s'\n", p);
@@ -1320,6 +1339,12 @@ static int btrfs_remount(struct super_bl
 	btrfs_resize_thread_pool(fs_info,
 		fs_info->thread_pool_size, old_thread_pool_size);
 
+	if (btrfs_test_opt(root, TAGGED) && !(sb->s_flags & MS_TAGGED)) {
+		printk("btrfs: %s: tagging not permitted on remount.\n",
+			sb->s_id);
+		return -EINVAL;
+	}
+
 	if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
 		goto out;
 
diff -ruNp linux-3.13.11/fs/buffer.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/buffer.c
--- linux-3.13.11/fs/buffer.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/buffer.c	2014-07-09 12:00:15.000000000
+0200
@@ -3428,7 +3428,7 @@ void __init buffer_init(void)
 	bh_cachep = kmem_cache_create("buffer_head",
 			sizeof(struct buffer_head), 0,
 				(SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
-				SLAB_MEM_SPREAD),
+				SLAB_MEM_SPREAD|SLAB_NO_SANITIZE),
 				NULL);
 
 	/*
diff -ruNp linux-3.13.11/fs/cachefiles/bind.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/cachefiles/bind.c
--- linux-3.13.11/fs/cachefiles/bind.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/cachefiles/bind.c	2014-07-09 12:00:15.000000000
+0200
@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachef
 	       args);
 
 	/* start by checking things over */
-	ASSERT(cache->fstop_percent >= 0 &&
-	       cache->fstop_percent < cache->fcull_percent &&
+	ASSERT(cache->fstop_percent < cache->fcull_percent &&
 	       cache->fcull_percent < cache->frun_percent &&
 	       cache->frun_percent  < 100);
 
-	ASSERT(cache->bstop_percent >= 0 &&
-	       cache->bstop_percent < cache->bcull_percent &&
+	ASSERT(cache->bstop_percent < cache->bcull_percent &&
 	       cache->bcull_percent < cache->brun_percent &&
 	       cache->brun_percent  < 100);
 
diff -ruNp linux-3.13.11/fs/cachefiles/daemon.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/cachefiles/daemon.c
--- linux-3.13.11/fs/cachefiles/daemon.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/cachefiles/daemon.c	2014-07-09
12:00:15.000000000 +0200
@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(st
 	if (n > buflen)
 		return -EMSGSIZE;
 
-	if (copy_to_user(_buffer, buffer, n) != 0)
+	if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
 		return -EFAULT;
 
 	return n;
@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(s
 	if (test_bit(CACHEFILES_DEAD, &cache->flags))
 		return -EIO;
 
-	if (datalen < 0 || datalen > PAGE_SIZE - 1)
+	if (datalen > PAGE_SIZE - 1)
 		return -EOPNOTSUPP;
 
 	/* drag the command string into the kernel so we can parse it */
@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struc
 	if (args[0] != '%' || args[1] != '\0')
 		return -EINVAL;
 
-	if (fstop < 0 || fstop >= cache->fcull_percent)
+	if (fstop >= cache->fcull_percent)
 		return cachefiles_daemon_range_error(cache, args);
 
 	cache->fstop_percent = fstop;
@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struc
 	if (args[0] != '%' || args[1] != '\0')
 		return -EINVAL;
 
-	if (bstop < 0 || bstop >= cache->bcull_percent)
+	if (bstop >= cache->bcull_percent)
 		return cachefiles_daemon_range_error(cache, args);
 
 	cache->bstop_percent = bstop;
diff -ruNp linux-3.13.11/fs/cachefiles/internal.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/cachefiles/internal.h
--- linux-3.13.11/fs/cachefiles/internal.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/cachefiles/internal.h	2014-07-09
12:00:15.000000000 +0200
@@ -59,7 +59,7 @@ struct cachefiles_cache {
 	wait_queue_head_t		daemon_pollwq;	/* poll waitqueue for daemon */
 	struct rb_root			active_nodes;	/* active nodes (can't be culled) */
 	rwlock_t			active_lock;	/* lock for active_nodes */
-	atomic_t			gravecounter;	/* graveyard uniquifier */
+	atomic_unchecked_t		gravecounter;	/* graveyard uniquifier */
 	unsigned			frun_percent;	/* when to stop culling (% files) */
 	unsigned			fcull_percent;	/* when to start culling (% files) */
 	unsigned			fstop_percent;	/* when to stop allocating (% files) */
@@ -171,19 +171,19 @@ extern int cachefiles_check_in_use(struc
  * proc.c
  */
 #ifdef CONFIG_CACHEFILES_HISTOGRAM
-extern atomic_t cachefiles_lookup_histogram[HZ];
-extern atomic_t cachefiles_mkdir_histogram[HZ];
-extern atomic_t cachefiles_create_histogram[HZ];
+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
 
 extern int __init cachefiles_proc_init(void);
 extern void cachefiles_proc_cleanup(void);
 static inline
-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
 {
 	unsigned long jif = jiffies - start_jif;
 	if (jif >= HZ)
 		jif = HZ - 1;
-	atomic_inc(&histogram[jif]);
+	atomic_inc_unchecked(&histogram[jif]);
 }
 
 #else
diff -ruNp linux-3.13.11/fs/cachefiles/namei.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/cachefiles/namei.c
--- linux-3.13.11/fs/cachefiles/namei.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/cachefiles/namei.c	2014-07-09
12:00:15.000000000 +0200
@@ -317,7 +317,7 @@ try_again:
 	/* first step is to make up a grave dentry in the graveyard */
 	sprintf(nbuffer, "%08x%08x",
 		(uint32_t) get_seconds(),
-		(uint32_t) atomic_inc_return(&cache->gravecounter));
+		(uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
 
 	/* do the multiway lock magic */
 	trap = lock_rename(cache->graveyard, dir);
diff -ruNp linux-3.13.11/fs/cachefiles/proc.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/cachefiles/proc.c
--- linux-3.13.11/fs/cachefiles/proc.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/cachefiles/proc.c	2014-07-09 12:00:15.000000000
+0200
@@ -14,9 +14,9 @@
 #include <linux/seq_file.h>
 #include "internal.h"
 
-atomic_t cachefiles_lookup_histogram[HZ];
-atomic_t cachefiles_mkdir_histogram[HZ];
-atomic_t cachefiles_create_histogram[HZ];
+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
+atomic_unchecked_t cachefiles_create_histogram[HZ];
 
 /*
  * display the latency histogram
@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(str
 		return 0;
 	default:
 		index = (unsigned long) v - 3;
-		x = atomic_read(&cachefiles_lookup_histogram[index]);
-		y = atomic_read(&cachefiles_mkdir_histogram[index]);
-		z = atomic_read(&cachefiles_create_histogram[index]);
+		x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
+		y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
+		z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
 		if (x == 0 && y == 0 && z == 0)
 			return 0;
 
diff -ruNp linux-3.13.11/fs/cachefiles/rdwr.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/cachefiles/rdwr.c
--- linux-3.13.11/fs/cachefiles/rdwr.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/cachefiles/rdwr.c	2014-07-09 12:00:15.000000000
+0200
@@ -950,7 +950,7 @@ int cachefiles_write_page(struct fscache
 			old_fs = get_fs();
 			set_fs(KERNEL_DS);
 			ret = file->f_op->write(
-				file, (const void __user *) data, len, &pos);
+				file, (const void __force_user *) data, len, &pos);
 			set_fs(old_fs);
 			kunmap(page);
 			file_end_write(file);
diff -ruNp linux-3.13.11/fs/ceph/dir.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ceph/dir.c
--- linux-3.13.11/fs/ceph/dir.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ceph/dir.c	2014-07-09 12:00:15.000000000
+0200
@@ -240,7 +240,7 @@ static int ceph_readdir(struct file *fil
 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
 	struct ceph_mds_client *mdsc = fsc->mdsc;
 	unsigned frag = fpos_frag(ctx->pos);
-	int off = fpos_off(ctx->pos);
+	unsigned int off = fpos_off(ctx->pos);
 	int err;
 	u32 ftype;
 	struct ceph_mds_reply_info_parsed *rinfo;
diff -ruNp linux-3.13.11/fs/ceph/super.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ceph/super.c
--- linux-3.13.11/fs/ceph/super.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ceph/super.c	2014-07-09 12:00:15.000000000
+0200
@@ -870,7 +870,7 @@ static int ceph_compare_super(struct sup
 /*
  * construct our own bdi so we can control readahead, etc.
  */
-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
 
 static int ceph_register_bdi(struct super_block *sb,
 			     struct ceph_fs_client *fsc)
@@ -887,7 +887,7 @@ static int ceph_register_bdi(struct supe
 			default_backing_dev_info.ra_pages;
 
 	err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld",
-			   atomic_long_inc_return(&bdi_seq));
+			   atomic_long_inc_return_unchecked(&bdi_seq));
 	if (!err)
 		sb->s_bdi = &fsc->backing_dev_info;
 	return err;
diff -ruNp linux-3.13.11/fs/char_dev.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/char_dev.c
--- linux-3.13.11/fs/char_dev.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/char_dev.c	2014-07-09 12:00:15.000000000
+0200
@@ -21,6 +21,8 @@
 #include <linux/mutex.h>
 #include <linux/backing-dev.h>
 #include <linux/tty.h>
+#include <linux/vs_context.h>
+#include <linux/vs_device.h>
 
 #include "internal.h"
 
@@ -372,14 +374,21 @@ static int chrdev_open(struct inode *ino
 	struct cdev *p;
 	struct cdev *new = NULL;
 	int ret = 0;
+	dev_t mdev;
+
+	if (!vs_map_chrdev(inode->i_rdev, &mdev, DATTR_OPEN))
+		return -EPERM;
+	inode->i_mdev = mdev;
 
 	spin_lock(&cdev_lock);
 	p = inode->i_cdev;
 	if (!p) {
 		struct kobject *kobj;
 		int idx;
+
 		spin_unlock(&cdev_lock);
-		kobj = kobj_lookup(cdev_map, inode->i_rdev, &idx);
+
+		kobj = kobj_lookup(cdev_map, mdev, &idx);
 		if (!kobj)
 			return -ENXIO;
 		new = container_of(kobj, struct cdev, kobj);
diff -ruNp linux-3.13.11/fs/cifs/cifs_debug.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/cifs/cifs_debug.c
--- linux-3.13.11/fs/cifs/cifs_debug.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/cifs/cifs_debug.c	2014-07-09 12:00:15.000000000
+0200
@@ -286,8 +286,8 @@ static ssize_t cifs_stats_proc_write(str
 
 	if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
 #ifdef CONFIG_CIFS_STATS2
-		atomic_set(&totBufAllocCount, 0);
-		atomic_set(&totSmBufAllocCount, 0);
+		atomic_set_unchecked(&totBufAllocCount, 0);
+		atomic_set_unchecked(&totSmBufAllocCount, 0);
 #endif /* CONFIG_CIFS_STATS2 */
 		spin_lock(&cifs_tcp_ses_lock);
 		list_for_each(tmp1, &cifs_tcp_ses_list) {
@@ -300,7 +300,7 @@ static ssize_t cifs_stats_proc_write(str
 					tcon = list_entry(tmp3,
 							  struct cifs_tcon,
 							  tcon_list);
-					atomic_set(&tcon->num_smbs_sent, 0);
+					atomic_set_unchecked(&tcon->num_smbs_sent, 0);
 					if (server->ops->clear_stats)
 						server->ops->clear_stats(tcon);
 				}
@@ -332,8 +332,8 @@ static int cifs_stats_proc_show(struct s
 			smBufAllocCount.counter, cifs_min_small);
 #ifdef CONFIG_CIFS_STATS2
 	seq_printf(m, "Total Large %d Small %d Allocations\n",
-				atomic_read(&totBufAllocCount),
-				atomic_read(&totSmBufAllocCount));
+				atomic_read_unchecked(&totBufAllocCount),
+				atomic_read_unchecked(&totSmBufAllocCount));
 #endif /* CONFIG_CIFS_STATS2 */
 
 	seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
@@ -362,7 +362,7 @@ static int cifs_stats_proc_show(struct s
 				if (tcon->need_reconnect)
 					seq_puts(m, "\tDISCONNECTED ");
 				seq_printf(m, "\nSMBs: %d",
-					   atomic_read(&tcon->num_smbs_sent));
+					   atomic_read_unchecked(&tcon->num_smbs_sent));
 				if (server->ops->print_stats)
 					server->ops->print_stats(m, tcon);
 			}
diff -ruNp linux-3.13.11/fs/cifs/cifsfs.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/cifs/cifsfs.c
--- linux-3.13.11/fs/cifs/cifsfs.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/cifs/cifsfs.c	2014-07-09 12:00:15.000000000
+0200
@@ -1056,7 +1056,7 @@ cifs_init_request_bufs(void)
 */
 	cifs_req_cachep = kmem_cache_create("cifs_request",
 					    CIFSMaxBufSize + max_hdr_size, 0,
-					    SLAB_HWCACHE_ALIGN, NULL);
+					    SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
 	if (cifs_req_cachep == NULL)
 		return -ENOMEM;
 
@@ -1083,7 +1083,7 @@ cifs_init_request_bufs(void)
 	efficient to alloc 1 per page off the slab compared to 17K (5page)
 	alloc of large cifs buffers even when page debugging is on */
 	cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
-			MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
+			MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
 			NULL);
 	if (cifs_sm_req_cachep == NULL) {
 		mempool_destroy(cifs_req_poolp);
@@ -1168,8 +1168,8 @@ init_cifs(void)
 	atomic_set(&bufAllocCount, 0);
 	atomic_set(&smBufAllocCount, 0);
 #ifdef CONFIG_CIFS_STATS2
-	atomic_set(&totBufAllocCount, 0);
-	atomic_set(&totSmBufAllocCount, 0);
+	atomic_set_unchecked(&totBufAllocCount, 0);
+	atomic_set_unchecked(&totSmBufAllocCount, 0);
 #endif /* CONFIG_CIFS_STATS2 */
 
 	atomic_set(&midCount, 0);
diff -ruNp linux-3.13.11/fs/cifs/cifsglob.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/cifs/cifsglob.h
--- linux-3.13.11/fs/cifs/cifsglob.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/cifs/cifsglob.h	2014-07-09 12:00:15.000000000
+0200
@@ -797,35 +797,35 @@ struct cifs_tcon {
 	__u16 Flags;		/* optional support bits */
 	enum statusEnum tidStatus;
 #ifdef CONFIG_CIFS_STATS
-	atomic_t num_smbs_sent;
+	atomic_unchecked_t num_smbs_sent;
 	union {
 		struct {
-			atomic_t num_writes;
-			atomic_t num_reads;
-			atomic_t num_flushes;
-			atomic_t num_oplock_brks;
-			atomic_t num_opens;
-			atomic_t num_closes;
-			atomic_t num_deletes;
-			atomic_t num_mkdirs;
-			atomic_t num_posixopens;
-			atomic_t num_posixmkdirs;
-			atomic_t num_rmdirs;
-			atomic_t num_renames;
-			atomic_t num_t2renames;
-			atomic_t num_ffirst;
-			atomic_t num_fnext;
-			atomic_t num_fclose;
-			atomic_t num_hardlinks;
-			atomic_t num_symlinks;
-			atomic_t num_locks;
-			atomic_t num_acl_get;
-			atomic_t num_acl_set;
+			atomic_unchecked_t num_writes;
+			atomic_unchecked_t num_reads;
+			atomic_unchecked_t num_flushes;
+			atomic_unchecked_t num_oplock_brks;
+			atomic_unchecked_t num_opens;
+			atomic_unchecked_t num_closes;
+			atomic_unchecked_t num_deletes;
+			atomic_unchecked_t num_mkdirs;
+			atomic_unchecked_t num_posixopens;
+			atomic_unchecked_t num_posixmkdirs;
+			atomic_unchecked_t num_rmdirs;
+			atomic_unchecked_t num_renames;
+			atomic_unchecked_t num_t2renames;
+			atomic_unchecked_t num_ffirst;
+			atomic_unchecked_t num_fnext;
+			atomic_unchecked_t num_fclose;
+			atomic_unchecked_t num_hardlinks;
+			atomic_unchecked_t num_symlinks;
+			atomic_unchecked_t num_locks;
+			atomic_unchecked_t num_acl_get;
+			atomic_unchecked_t num_acl_set;
 		} cifs_stats;
 #ifdef CONFIG_CIFS_SMB2
 		struct {
-			atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
-			atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
+			atomic_unchecked_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
+			atomic_unchecked_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
 		} smb2_stats;
 #endif /* CONFIG_CIFS_SMB2 */
 	} stats;
@@ -1155,7 +1155,7 @@ convert_delimiter(char *path, char delim
 }
 
 #ifdef CONFIG_CIFS_STATS
-#define cifs_stats_inc atomic_inc
+#define cifs_stats_inc atomic_inc_unchecked
 
 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
 					    unsigned int bytes)
@@ -1521,8 +1521,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnect
 /* Various Debug counters */
 GLOBAL_EXTERN atomic_t bufAllocCount;    /* current number allocated  */
 #ifdef CONFIG_CIFS_STATS2
-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time
*/
+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
 #endif
 GLOBAL_EXTERN atomic_t smBufAllocCount;
 GLOBAL_EXTERN atomic_t midCount;
diff -ruNp linux-3.13.11/fs/cifs/file.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/cifs/file.c
--- linux-3.13.11/fs/cifs/file.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/cifs/file.c	2014-07-09 12:00:15.000000000
+0200
@@ -1900,10 +1900,14 @@ static int cifs_writepages(struct addres
 		index = mapping->writeback_index; /* Start from prev offset */
 		end = -1;
 	} else {
-		index = wbc->range_start >> PAGE_CACHE_SHIFT;
-		end = wbc->range_end >> PAGE_CACHE_SHIFT;
-		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
+		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
 			range_whole = true;
+			index = 0;
+			end = ULONG_MAX;
+		} else {
+			index = wbc->range_start >> PAGE_CACHE_SHIFT;
+			end = wbc->range_end >> PAGE_CACHE_SHIFT;
+		}
 		scanned = true;
 	}
 retry:
diff -ruNp linux-3.13.11/fs/cifs/misc.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/cifs/misc.c
--- linux-3.13.11/fs/cifs/misc.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/cifs/misc.c	2014-07-09 12:00:15.000000000
+0200
@@ -170,7 +170,7 @@ cifs_buf_get(void)
 		memset(ret_buf, 0, buf_size + 3);
 		atomic_inc(&bufAllocCount);
 #ifdef CONFIG_CIFS_STATS2
-		atomic_inc(&totBufAllocCount);
+		atomic_inc_unchecked(&totBufAllocCount);
 #endif /* CONFIG_CIFS_STATS2 */
 	}
 
@@ -205,7 +205,7 @@ cifs_small_buf_get(void)
 	/*	memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
 		atomic_inc(&smBufAllocCount);
 #ifdef CONFIG_CIFS_STATS2
-		atomic_inc(&totSmBufAllocCount);
+		atomic_inc_unchecked(&totSmBufAllocCount);
 #endif /* CONFIG_CIFS_STATS2 */
 
 	}
diff -ruNp linux-3.13.11/fs/cifs/smb1ops.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/cifs/smb1ops.c
--- linux-3.13.11/fs/cifs/smb1ops.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/cifs/smb1ops.c	2014-07-09 12:00:15.000000000
+0200
@@ -609,27 +609,27 @@ static void
 cifs_clear_stats(struct cifs_tcon *tcon)
 {
 #ifdef CONFIG_CIFS_STATS
-	atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
-	atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
-	atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
-	atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0);
-	atomic_set(&tcon->stats.cifs_stats.num_opens, 0);
-	atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0);
-	atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
-	atomic_set(&tcon->stats.cifs_stats.num_closes, 0);
-	atomic_set(&tcon->stats.cifs_stats.num_deletes, 0);
-	atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0);
-	atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0);
-	atomic_set(&tcon->stats.cifs_stats.num_renames, 0);
-	atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0);
-	atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0);
-	atomic_set(&tcon->stats.cifs_stats.num_fnext, 0);
-	atomic_set(&tcon->stats.cifs_stats.num_fclose, 0);
-	atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0);
-	atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0);
-	atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
-	atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
-	atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
+	atomic_set_unchecked(&tcon->stats.cifs_stats.num_writes, 0);
+	atomic_set_unchecked(&tcon->stats.cifs_stats.num_reads, 0);
+	atomic_set_unchecked(&tcon->stats.cifs_stats.num_flushes, 0);
+	atomic_set_unchecked(&tcon->stats.cifs_stats.num_oplock_brks, 0);
+	atomic_set_unchecked(&tcon->stats.cifs_stats.num_opens, 0);
+	atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixopens, 0);
+	atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
+	atomic_set_unchecked(&tcon->stats.cifs_stats.num_closes, 0);
+	atomic_set_unchecked(&tcon->stats.cifs_stats.num_deletes, 0);
+	atomic_set_unchecked(&tcon->stats.cifs_stats.num_mkdirs, 0);
+	atomic_set_unchecked(&tcon->stats.cifs_stats.num_rmdirs, 0);
+	atomic_set_unchecked(&tcon->stats.cifs_stats.num_renames, 0);
+	atomic_set_unchecked(&tcon->stats.cifs_stats.num_t2renames, 0);
+	atomic_set_unchecked(&tcon->stats.cifs_stats.num_ffirst, 0);
+	atomic_set_unchecked(&tcon->stats.cifs_stats.num_fnext, 0);
+	atomic_set_unchecked(&tcon->stats.cifs_stats.num_fclose, 0);
+	atomic_set_unchecked(&tcon->stats.cifs_stats.num_hardlinks, 0);
+	atomic_set_unchecked(&tcon->stats.cifs_stats.num_symlinks, 0);
+	atomic_set_unchecked(&tcon->stats.cifs_stats.num_locks, 0);
+	atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_get, 0);
+	atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_set, 0);
 #endif
 }
 
@@ -638,36 +638,36 @@ cifs_print_stats(struct seq_file *m, str
 {
 #ifdef CONFIG_CIFS_STATS
 	seq_printf(m, " Oplocks breaks: %d",
-		   atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
+		   atomic_read_unchecked(&tcon->stats.cifs_stats.num_oplock_brks));
 	seq_printf(m, "\nReads:  %d Bytes: %llu",
-		   atomic_read(&tcon->stats.cifs_stats.num_reads),
+		   atomic_read_unchecked(&tcon->stats.cifs_stats.num_reads),
 		   (long long)(tcon->bytes_read));
 	seq_printf(m, "\nWrites: %d Bytes: %llu",
-		   atomic_read(&tcon->stats.cifs_stats.num_writes),
+		   atomic_read_unchecked(&tcon->stats.cifs_stats.num_writes),
 		   (long long)(tcon->bytes_written));
 	seq_printf(m, "\nFlushes: %d",
-		   atomic_read(&tcon->stats.cifs_stats.num_flushes));
+		   atomic_read_unchecked(&tcon->stats.cifs_stats.num_flushes));
 	seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d",
-		   atomic_read(&tcon->stats.cifs_stats.num_locks),
-		   atomic_read(&tcon->stats.cifs_stats.num_hardlinks),
-		   atomic_read(&tcon->stats.cifs_stats.num_symlinks));
+		   atomic_read_unchecked(&tcon->stats.cifs_stats.num_locks),
+		   atomic_read_unchecked(&tcon->stats.cifs_stats.num_hardlinks),
+		   atomic_read_unchecked(&tcon->stats.cifs_stats.num_symlinks));
 	seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d",
-		   atomic_read(&tcon->stats.cifs_stats.num_opens),
-		   atomic_read(&tcon->stats.cifs_stats.num_closes),
-		   atomic_read(&tcon->stats.cifs_stats.num_deletes));
+		   atomic_read_unchecked(&tcon->stats.cifs_stats.num_opens),
+		   atomic_read_unchecked(&tcon->stats.cifs_stats.num_closes),
+		   atomic_read_unchecked(&tcon->stats.cifs_stats.num_deletes));
 	seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d",
-		   atomic_read(&tcon->stats.cifs_stats.num_posixopens),
-		   atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs));
+		   atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixopens),
+		   atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs));
 	seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
-		   atomic_read(&tcon->stats.cifs_stats.num_mkdirs),
-		   atomic_read(&tcon->stats.cifs_stats.num_rmdirs));
+		   atomic_read_unchecked(&tcon->stats.cifs_stats.num_mkdirs),
+		   atomic_read_unchecked(&tcon->stats.cifs_stats.num_rmdirs));
 	seq_printf(m, "\nRenames: %d T2 Renames %d",
-		   atomic_read(&tcon->stats.cifs_stats.num_renames),
-		   atomic_read(&tcon->stats.cifs_stats.num_t2renames));
+		   atomic_read_unchecked(&tcon->stats.cifs_stats.num_renames),
+		   atomic_read_unchecked(&tcon->stats.cifs_stats.num_t2renames));
 	seq_printf(m, "\nFindFirst: %d FNext %d FClose %d",
-		   atomic_read(&tcon->stats.cifs_stats.num_ffirst),
-		   atomic_read(&tcon->stats.cifs_stats.num_fnext),
-		   atomic_read(&tcon->stats.cifs_stats.num_fclose));
+		   atomic_read_unchecked(&tcon->stats.cifs_stats.num_ffirst),
+		   atomic_read_unchecked(&tcon->stats.cifs_stats.num_fnext),
+		   atomic_read_unchecked(&tcon->stats.cifs_stats.num_fclose));
 #endif
 }
 
diff -ruNp linux-3.13.11/fs/cifs/smb2ops.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/cifs/smb2ops.c
--- linux-3.13.11/fs/cifs/smb2ops.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/cifs/smb2ops.c	2014-07-09 12:00:15.000000000
+0200
@@ -364,8 +364,8 @@ smb2_clear_stats(struct cifs_tcon *tcon)
 #ifdef CONFIG_CIFS_STATS
 	int i;
 	for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
-		atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
-		atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
+		atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
+		atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
 	}
 #endif
 }
@@ -405,65 +405,65 @@ static void
 smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
 {
 #ifdef CONFIG_CIFS_STATS
-	atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
-	atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
+	atomic_unchecked_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
+	atomic_unchecked_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
 	seq_printf(m, "\nNegotiates: %d sent %d failed",
-		   atomic_read(&sent[SMB2_NEGOTIATE_HE]),
-		   atomic_read(&failed[SMB2_NEGOTIATE_HE]));
+		   atomic_read_unchecked(&sent[SMB2_NEGOTIATE_HE]),
+		   atomic_read_unchecked(&failed[SMB2_NEGOTIATE_HE]));
 	seq_printf(m, "\nSessionSetups: %d sent %d failed",
-		   atomic_read(&sent[SMB2_SESSION_SETUP_HE]),
-		   atomic_read(&failed[SMB2_SESSION_SETUP_HE]));
+		   atomic_read_unchecked(&sent[SMB2_SESSION_SETUP_HE]),
+		   atomic_read_unchecked(&failed[SMB2_SESSION_SETUP_HE]));
 	seq_printf(m, "\nLogoffs: %d sent %d failed",
-		   atomic_read(&sent[SMB2_LOGOFF_HE]),
-		   atomic_read(&failed[SMB2_LOGOFF_HE]));
+		   atomic_read_unchecked(&sent[SMB2_LOGOFF_HE]),
+		   atomic_read_unchecked(&failed[SMB2_LOGOFF_HE]));
 	seq_printf(m, "\nTreeConnects: %d sent %d failed",
-		   atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
-		   atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
+		   atomic_read_unchecked(&sent[SMB2_TREE_CONNECT_HE]),
+		   atomic_read_unchecked(&failed[SMB2_TREE_CONNECT_HE]));
 	seq_printf(m, "\nTreeDisconnects: %d sent %d failed",
-		   atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
-		   atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
+		   atomic_read_unchecked(&sent[SMB2_TREE_DISCONNECT_HE]),
+		   atomic_read_unchecked(&failed[SMB2_TREE_DISCONNECT_HE]));
 	seq_printf(m, "\nCreates: %d sent %d failed",
-		   atomic_read(&sent[SMB2_CREATE_HE]),
-		   atomic_read(&failed[SMB2_CREATE_HE]));
+		   atomic_read_unchecked(&sent[SMB2_CREATE_HE]),
+		   atomic_read_unchecked(&failed[SMB2_CREATE_HE]));
 	seq_printf(m, "\nCloses: %d sent %d failed",
-		   atomic_read(&sent[SMB2_CLOSE_HE]),
-		   atomic_read(&failed[SMB2_CLOSE_HE]));
+		   atomic_read_unchecked(&sent[SMB2_CLOSE_HE]),
+		   atomic_read_unchecked(&failed[SMB2_CLOSE_HE]));
 	seq_printf(m, "\nFlushes: %d sent %d failed",
-		   atomic_read(&sent[SMB2_FLUSH_HE]),
-		   atomic_read(&failed[SMB2_FLUSH_HE]));
+		   atomic_read_unchecked(&sent[SMB2_FLUSH_HE]),
+		   atomic_read_unchecked(&failed[SMB2_FLUSH_HE]));
 	seq_printf(m, "\nReads: %d sent %d failed",
-		   atomic_read(&sent[SMB2_READ_HE]),
-		   atomic_read(&failed[SMB2_READ_HE]));
+		   atomic_read_unchecked(&sent[SMB2_READ_HE]),
+		   atomic_read_unchecked(&failed[SMB2_READ_HE]));
 	seq_printf(m, "\nWrites: %d sent %d failed",
-		   atomic_read(&sent[SMB2_WRITE_HE]),
-		   atomic_read(&failed[SMB2_WRITE_HE]));
+		   atomic_read_unchecked(&sent[SMB2_WRITE_HE]),
+		   atomic_read_unchecked(&failed[SMB2_WRITE_HE]));
 	seq_printf(m, "\nLocks: %d sent %d failed",
-		   atomic_read(&sent[SMB2_LOCK_HE]),
-		   atomic_read(&failed[SMB2_LOCK_HE]));
+		   atomic_read_unchecked(&sent[SMB2_LOCK_HE]),
+		   atomic_read_unchecked(&failed[SMB2_LOCK_HE]));
 	seq_printf(m, "\nIOCTLs: %d sent %d failed",
-		   atomic_read(&sent[SMB2_IOCTL_HE]),
-		   atomic_read(&failed[SMB2_IOCTL_HE]));
+		   atomic_read_unchecked(&sent[SMB2_IOCTL_HE]),
+		   atomic_read_unchecked(&failed[SMB2_IOCTL_HE]));
 	seq_printf(m, "\nCancels: %d sent %d failed",
-		   atomic_read(&sent[SMB2_CANCEL_HE]),
-		   atomic_read(&failed[SMB2_CANCEL_HE]));
+		   atomic_read_unchecked(&sent[SMB2_CANCEL_HE]),
+		   atomic_read_unchecked(&failed[SMB2_CANCEL_HE]));
 	seq_printf(m, "\nEchos: %d sent %d failed",
-		   atomic_read(&sent[SMB2_ECHO_HE]),
-		   atomic_read(&failed[SMB2_ECHO_HE]));
+		   atomic_read_unchecked(&sent[SMB2_ECHO_HE]),
+		   atomic_read_unchecked(&failed[SMB2_ECHO_HE]));
 	seq_printf(m, "\nQueryDirectories: %d sent %d failed",
-		   atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
-		   atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
+		   atomic_read_unchecked(&sent[SMB2_QUERY_DIRECTORY_HE]),
+		   atomic_read_unchecked(&failed[SMB2_QUERY_DIRECTORY_HE]));
 	seq_printf(m, "\nChangeNotifies: %d sent %d failed",
-		   atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
-		   atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
+		   atomic_read_unchecked(&sent[SMB2_CHANGE_NOTIFY_HE]),
+		   atomic_read_unchecked(&failed[SMB2_CHANGE_NOTIFY_HE]));
 	seq_printf(m, "\nQueryInfos: %d sent %d failed",
-		   atomic_read(&sent[SMB2_QUERY_INFO_HE]),
-		   atomic_read(&failed[SMB2_QUERY_INFO_HE]));
+		   atomic_read_unchecked(&sent[SMB2_QUERY_INFO_HE]),
+		   atomic_read_unchecked(&failed[SMB2_QUERY_INFO_HE]));
 	seq_printf(m, "\nSetInfos: %d sent %d failed",
-		   atomic_read(&sent[SMB2_SET_INFO_HE]),
-		   atomic_read(&failed[SMB2_SET_INFO_HE]));
+		   atomic_read_unchecked(&sent[SMB2_SET_INFO_HE]),
+		   atomic_read_unchecked(&failed[SMB2_SET_INFO_HE]));
 	seq_printf(m, "\nOplockBreaks: %d sent %d failed",
-		   atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
-		   atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
+		   atomic_read_unchecked(&sent[SMB2_OPLOCK_BREAK_HE]),
+		   atomic_read_unchecked(&failed[SMB2_OPLOCK_BREAK_HE]));
 #endif
 }
 
diff -ruNp linux-3.13.11/fs/cifs/smb2pdu.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/cifs/smb2pdu.c
--- linux-3.13.11/fs/cifs/smb2pdu.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/cifs/smb2pdu.c	2014-07-09 12:00:15.000000000
+0200
@@ -2093,8 +2093,7 @@ SMB2_query_directory(const unsigned int
 	default:
 		cifs_dbg(VFS, "info level %u isn't supported\n",
 			 srch_inf->info_level);
-		rc = -EINVAL;
-		goto qdir_exit;
+		return -EINVAL;
 	}
 
 	req->FileIndex = cpu_to_le32(index);
diff -ruNp linux-3.13.11/fs/coda/cache.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/coda/cache.c
--- linux-3.13.11/fs/coda/cache.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/coda/cache.c	2014-07-09 12:00:15.000000000
+0200
@@ -24,7 +24,7 @@
 #include "coda_linux.h"
 #include "coda_cache.h"
 
-static atomic_t permission_epoch = ATOMIC_INIT(0);
+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
 
 /* replace or extend an acl cache hit */
 void coda_cache_enter(struct inode *inode, int mask)
@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inod
 	struct coda_inode_info *cii = ITOC(inode);
 
 	spin_lock(&cii->c_lock);
-	cii->c_cached_epoch = atomic_read(&permission_epoch);
+	cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
 	if (!uid_eq(cii->c_uid, current_fsuid())) {
 		cii->c_uid = current_fsuid();
                 cii->c_cached_perm = mask;
@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode
 {
 	struct coda_inode_info *cii = ITOC(inode);
 	spin_lock(&cii->c_lock);
-	cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
+	cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
 	spin_unlock(&cii->c_lock);
 }
 
 /* remove all acl caches */
 void coda_cache_clear_all(struct super_block *sb)
 {
-	atomic_inc(&permission_epoch);
+	atomic_inc_unchecked(&permission_epoch);
 }
 
 
@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode
 	spin_lock(&cii->c_lock);
 	hit = (mask & cii->c_cached_perm) == mask &&
 	    uid_eq(cii->c_uid, current_fsuid()) &&
-	    cii->c_cached_epoch == atomic_read(&permission_epoch);
+	    cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
 	spin_unlock(&cii->c_lock);
 
 	return hit;
diff -ruNp linux-3.13.11/fs/compat.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/compat.c
--- linux-3.13.11/fs/compat.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/compat.c	2014-07-09 12:00:15.000000000
+0200
@@ -54,7 +54,7 @@
 #include <asm/ioctls.h>
 #include "internal.h"
 
-int compat_log = 1;
+int compat_log = 0;
 
 int compat_printk(const char *fmt, ...)
 {
@@ -488,7 +488,7 @@ compat_sys_io_setup(unsigned nr_reqs, u3
 
 	set_fs(KERNEL_DS);
 	/* The __user pointer cast is valid because of the set_fs() */
-	ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
+	ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
 	set_fs(oldfs);
 	/* truncating is ok because it's a user address */
 	if (!ret)
@@ -546,7 +546,7 @@ ssize_t compat_rw_copy_check_uvector(int
 		goto out;
 
 	ret = -EINVAL;
-	if (nr_segs > UIO_MAXIOV || nr_segs < 0)
+	if (nr_segs > UIO_MAXIOV)
 		goto out;
 	if (nr_segs > fast_segs) {
 		ret = -ENOMEM;
@@ -834,6 +834,7 @@ struct compat_old_linux_dirent {
 struct compat_readdir_callback {
 	struct dir_context ctx;
 	struct compat_old_linux_dirent __user *dirent;
+	struct file * file;
 	int result;
 };
 
@@ -851,6 +852,10 @@ static int compat_fillonedir(void *__buf
 		buf->result = -EOVERFLOW;
 		return -EOVERFLOW;
 	}
+
+	if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
+		return 0;
+
 	buf->result++;
 	dirent = buf->dirent;
 	if (!access_ok(VERIFY_WRITE, dirent,
@@ -882,6 +887,7 @@ asmlinkage long compat_sys_old_readdir(u
 	if (!f.file)
 		return -EBADF;
 
+	buf.file = f.file;
 	error = iterate_dir(f.file, &buf.ctx);
 	if (buf.result)
 		error = buf.result;
@@ -901,6 +907,7 @@ struct compat_getdents_callback {
 	struct dir_context ctx;
 	struct compat_linux_dirent __user *current_dir;
 	struct compat_linux_dirent __user *previous;
+	struct file * file;
 	int count;
 	int error;
 };
@@ -922,6 +929,10 @@ static int compat_filldir(void *__buf, c
 		buf->error = -EOVERFLOW;
 		return -EOVERFLOW;
 	}
+
+	if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
+		return 0;
+
 	dirent = buf->previous;
 	if (dirent) {
 		if (__put_user(offset, &dirent->d_off))
@@ -967,6 +978,7 @@ asmlinkage long compat_sys_getdents(unsi
 	if (!f.file)
 		return -EBADF;
 
+	buf.file = f.file;
 	error = iterate_dir(f.file, &buf.ctx);
 	if (error >= 0)
 		error = buf.error;
@@ -987,6 +999,7 @@ struct compat_getdents_callback64 {
 	struct dir_context ctx;
 	struct linux_dirent64 __user *current_dir;
 	struct linux_dirent64 __user *previous;
+	struct file * file;
 	int count;
 	int error;
 };
@@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf
 	buf->error = -EINVAL;	/* only used if we fail.. */
 	if (reclen > buf->count)
 		return -EINVAL;
+
+	if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
+		return 0;
+
 	dirent = buf->previous;
 
 	if (dirent) {
@@ -1052,6 +1069,7 @@ asmlinkage long compat_sys_getdents64(un
 	if (!f.file)
 		return -EBADF;
 
+	buf.file = f.file;
 	error = iterate_dir(f.file, &buf.ctx);
 	if (error >= 0)
 		error = buf.error;
diff -ruNp linux-3.13.11/fs/compat_binfmt_elf.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/compat_binfmt_elf.c
--- linux-3.13.11/fs/compat_binfmt_elf.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/compat_binfmt_elf.c	2014-07-09
12:00:15.000000000 +0200
@@ -30,11 +30,13 @@
 #undef	elf_phdr
 #undef	elf_shdr
 #undef	elf_note
+#undef	elf_dyn
 #undef	elf_addr_t
 #define elfhdr		elf32_hdr
 #define elf_phdr	elf32_phdr
 #define elf_shdr	elf32_shdr
 #define elf_note	elf32_note
+#define elf_dyn		Elf32_Dyn
 #define elf_addr_t	Elf32_Addr
 
 /*
diff -ruNp linux-3.13.11/fs/compat_ioctl.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/compat_ioctl.c
--- linux-3.13.11/fs/compat_ioctl.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/compat_ioctl.c	2014-07-09 12:00:15.000000000
+0200
@@ -621,7 +621,7 @@ static int serial_struct_ioctl(unsigned
 			return -EFAULT;
                 if (__get_user(udata, &ss32->iomem_base))
 			return -EFAULT;
-                ss.iomem_base = compat_ptr(udata);
+                ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
                 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
 		    __get_user(ss.port_high, &ss32->port_high))
 			return -EFAULT;
@@ -702,8 +702,8 @@ static int do_i2c_rdwr_ioctl(unsigned in
 	for (i = 0; i < nmsgs; i++) {
 		if (copy_in_user(&tmsgs[i].addr, &umsgs[i].addr, 3*sizeof(u16)))
 			return -EFAULT;
-		if (get_user(datap, &umsgs[i].buf) ||
-		    put_user(compat_ptr(datap), &tmsgs[i].buf))
+		if (get_user(datap, (u8 __user * __user *)&umsgs[i].buf) ||
+		    put_user(compat_ptr(datap), (u8 __user * __user *)&tmsgs[i].buf))
 			return -EFAULT;
 	}
 	return sys_ioctl(fd, cmd, (unsigned long)tdata);
@@ -796,7 +796,7 @@ static int compat_ioctl_preallocate(stru
 	    copy_in_user(&p->l_len,	&p32->l_len,	sizeof(s64)) ||
 	    copy_in_user(&p->l_sysid,	&p32->l_sysid,	sizeof(s32)) ||
 	    copy_in_user(&p->l_pid,	&p32->l_pid,	sizeof(u32)) ||
-	    copy_in_user(&p->l_pad,	&p32->l_pad,	4*sizeof(u32)))
+	    copy_in_user(p->l_pad,	p32->l_pad,	4*sizeof(u32)))
 		return -EFAULT;
 
 	return ioctl_preallocate(file, p);
@@ -1616,8 +1616,8 @@ asmlinkage long compat_sys_ioctl(unsigne
 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
 {
 	unsigned int a, b;
-	a = *(unsigned int *)p;
-	b = *(unsigned int *)q;
+	a = *(const unsigned int *)p;
+	b = *(const unsigned int *)q;
 	if (a > b)
 		return 1;
 	if (a < b)
diff -ruNp linux-3.13.11/fs/configfs/dir.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/configfs/dir.c
--- linux-3.13.11/fs/configfs/dir.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/configfs/dir.c	2014-07-09 12:00:15.000000000
+0200
@@ -1548,7 +1548,8 @@ static int configfs_readdir(struct file
 	}
 	for (p = q->next; p != &parent_sd->s_children; p = p->next) {
 		struct configfs_dirent *next;
-		const char *name;
+		const unsigned char * name;
+		char d_name[sizeof(next->s_dentry->d_iname)];
 		int len;
 		struct inode *inode = NULL;
 
@@ -1557,7 +1558,12 @@ static int configfs_readdir(struct file
 			continue;
 
 		name = configfs_get_name(next);
-		len = strlen(name);
+		if (next->s_dentry && name == next->s_dentry->d_iname) {
+			len =  next->s_dentry->d_name.len;
+			memcpy(d_name, name, len);
+			name = d_name;
+		} else
+			len = strlen(name);
 
 		/*
 		 * We'll have a dentry and an inode for
diff -ruNp linux-3.13.11/fs/coredump.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/coredump.c
--- linux-3.13.11/fs/coredump.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/coredump.c	2014-07-09 12:00:15.000000000
+0200
@@ -438,8 +438,8 @@ static void wait_for_dump_helpers(struct
 	struct pipe_inode_info *pipe = file->private_data;
 
 	pipe_lock(pipe);
-	pipe->readers++;
-	pipe->writers--;
+	atomic_inc(&pipe->readers);
+	atomic_dec(&pipe->writers);
 	wake_up_interruptible_sync(&pipe->wait);
 	kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
 	pipe_unlock(pipe);
@@ -448,11 +448,11 @@ static void wait_for_dump_helpers(struct
 	 * We actually want wait_event_freezable() but then we need
 	 * to clear TIF_SIGPENDING and improve dump_interrupted().
 	 */
-	wait_event_interruptible(pipe->wait, pipe->readers == 1);
+	wait_event_interruptible(pipe->wait, atomic_read(&pipe->readers) == 1);
 
 	pipe_lock(pipe);
-	pipe->readers--;
-	pipe->writers++;
+	atomic_dec(&pipe->readers);
+	atomic_inc(&pipe->writers);
 	pipe_unlock(pipe);
 }
 
@@ -499,7 +499,9 @@ void do_coredump(const siginfo_t *siginf
 	struct files_struct *displaced;
 	bool need_nonrelative = false;
 	bool core_dumped = false;
-	static atomic_t core_dump_count = ATOMIC_INIT(0);
+	static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
+	long signr = siginfo->si_signo;
+	int dumpable;
 	struct coredump_params cprm = {
 		.siginfo = siginfo,
 		.regs = signal_pt_regs(),
@@ -512,12 +514,17 @@ void do_coredump(const siginfo_t *siginf
 		.mm_flags = mm->flags,
 	};
 
-	audit_core_dumps(siginfo->si_signo);
+	audit_core_dumps(signr);
+
+	dumpable = __get_dumpable(cprm.mm_flags);
+
+	if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
+		gr_handle_brute_attach(dumpable);
 
 	binfmt = mm->binfmt;
 	if (!binfmt || !binfmt->core_dump)
 		goto fail;
-	if (!__get_dumpable(cprm.mm_flags))
+	if (!dumpable)
 		goto fail;
 
 	cred = prepare_creds();
@@ -536,7 +543,7 @@ void do_coredump(const siginfo_t *siginf
 		need_nonrelative = true;
 	}
 
-	retval = coredump_wait(siginfo->si_signo, &core_state);
+	retval = coredump_wait(signr, &core_state);
 	if (retval < 0)
 		goto fail_creds;
 
@@ -579,7 +586,7 @@ void do_coredump(const siginfo_t *siginf
 		}
 		cprm.limit = RLIM_INFINITY;
 
-		dump_count = atomic_inc_return(&core_dump_count);
+		dump_count = atomic_inc_return_unchecked(&core_dump_count);
 		if (core_pipe_limit && (core_pipe_limit < dump_count)) {
 			printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
 			       task_tgid_vnr(current), current->comm);
@@ -611,6 +618,8 @@ void do_coredump(const siginfo_t *siginf
 	} else {
 		struct inode *inode;
 
+		gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
+
 		if (cprm.limit < binfmt->min_coredump)
 			goto fail_unlock;
 
@@ -669,7 +678,7 @@ close_fail:
 		filp_close(cprm.file, NULL);
 fail_dropcount:
 	if (ispipe)
-		atomic_dec(&core_dump_count);
+		atomic_dec_unchecked(&core_dump_count);
 fail_unlock:
 	kfree(cn.corename);
 	coredump_finish(mm, core_dumped);
@@ -690,6 +699,8 @@ int dump_emit(struct coredump_params *cp
 	struct file *file = cprm->file;
 	loff_t pos = file->f_pos;
 	ssize_t n;
+
+	gr_learn_resource(current, RLIMIT_CORE, cprm->written + nr, 1);
 	if (cprm->written + nr > cprm->limit)
 		return 0;
 	while (nr) {
diff -ruNp linux-3.13.11/fs/dcache.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/dcache.c
--- linux-3.13.11/fs/dcache.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/dcache.c	2014-07-09 12:00:15.000000000
+0200
@@ -38,6 +38,7 @@
 #include <linux/prefetch.h>
 #include <linux/ratelimit.h>
 #include <linux/list_lru.h>
+#include <linux/vs_limit.h>
 #include "internal.h"
 #include "mount.h"
 
@@ -640,6 +641,8 @@ int d_invalidate(struct dentry * dentry)
 		spin_lock(&dentry->d_lock);
 	}
 
+	vx_dentry_dec(dentry);
+
 	/*
 	 * Somebody else still using it?
 	 *
@@ -669,6 +672,7 @@ EXPORT_SYMBOL(d_invalidate);
 static inline void __dget_dlock(struct dentry *dentry)
 {
 	dentry->d_lockref.count++;
+	vx_dentry_inc(dentry);
 }
 
 static inline void __dget(struct dentry *dentry)
@@ -1483,6 +1487,9 @@ struct dentry *__d_alloc(struct super_bl
 	struct dentry *dentry;
 	char *dname;
 
+	if (!vx_dentry_avail(1))
+		return NULL;
+
 	dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
 	if (!dentry)
 		return NULL;
@@ -1495,7 +1502,7 @@ struct dentry *__d_alloc(struct super_bl
 	 */
 	dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
 	if (name->len > DNAME_INLINE_LEN-1) {
-		dname = kmalloc(name->len + 1, GFP_KERNEL);
+		dname = kmalloc(round_up(name->len + 1, sizeof(unsigned long)), GFP_KERNEL);
 		if (!dname) {
 			kmem_cache_free(dentry_cache, dentry); 
 			return NULL;
@@ -1515,6 +1522,7 @@ struct dentry *__d_alloc(struct super_bl
 
 	dentry->d_lockref.count = 1;
 	dentry->d_flags = 0;
+	vx_dentry_inc(dentry);
 	spin_lock_init(&dentry->d_lock);
 	seqcount_init(&dentry->d_seq);
 	dentry->d_inode = NULL;
@@ -2278,6 +2286,7 @@ struct dentry *__d_lookup(const struct d
 		}
 
 		dentry->d_lockref.count++;
+		vx_dentry_inc(dentry);
 		found = dentry;
 		spin_unlock(&dentry->d_lock);
 		break;
@@ -3428,7 +3437,8 @@ void __init vfs_caches_init(unsigned lon
 	mempages -= reserve;
 
 	names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
-			SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+			SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY|
+			SLAB_NO_SANITIZE, NULL);
 
 	dcache_init();
 	inode_init();
diff -ruNp linux-3.13.11/fs/debugfs/inode.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/debugfs/inode.c
--- linux-3.13.11/fs/debugfs/inode.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/debugfs/inode.c	2014-07-09 12:00:15.000000000
+0200
@@ -415,7 +415,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
  */
 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
 {
+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
+	return __create_file(name, S_IFDIR | S_IRWXU,
+#else
 	return __create_file(name, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
+#endif
 				   parent, NULL, NULL);
 }
 EXPORT_SYMBOL_GPL(debugfs_create_dir);
diff -ruNp linux-3.13.11/fs/devpts/inode.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/devpts/inode.c
--- linux-3.13.11/fs/devpts/inode.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/devpts/inode.c	2014-07-09 12:00:15.000000000
+0200
@@ -25,6 +25,7 @@
 #include <linux/parser.h>
 #include <linux/fsnotify.h>
 #include <linux/seq_file.h>
+#include <linux/vs_base.h>
 
 #define DEVPTS_DEFAULT_MODE 0600
 /*
@@ -36,6 +37,21 @@
 #define DEVPTS_DEFAULT_PTMX_MODE 0000
 #define PTMX_MINOR	2
 
+static int devpts_permission(struct inode *inode, int mask)
+{
+	int ret = -EACCES;
+
+	/* devpts is xid tagged */
+	if (vx_check((vxid_t)i_tag_read(inode), VS_WATCH_P | VS_IDENT))
+		ret = generic_permission(inode, mask);
+	return ret;
+}
+
+static struct inode_operations devpts_file_inode_operations = {
+	.permission     = devpts_permission,
+};
+
+
 /*
  * sysctl support for setting limits on the number of Unix98 ptys allocated.
  * Otherwise one can eat up all kernel memory by opening /dev/ptmx repeatedly.
@@ -345,6 +361,34 @@ static int devpts_show_options(struct se
 	return 0;
 }
 
+static int devpts_filter(struct dentry *de)
+{
+	vxid_t xid = 0;
+
+	/* devpts is xid tagged */
+	if (de && de->d_inode)
+		xid = (vxid_t)i_tag_read(de->d_inode);
+#ifdef CONFIG_VSERVER_WARN_DEVPTS
+	else
+		vxwprintk_task(1, "devpts " VS_Q("%.*s") " without inode.",
+			de->d_name.len, de->d_name.name);
+#endif
+	return vx_check(xid, VS_WATCH_P | VS_IDENT);
+}
+
+static int devpts_readdir(struct file * filp, struct dir_context *ctx)
+{
+	return dcache_readdir_filter(filp, ctx, devpts_filter);
+}
+
+static struct file_operations devpts_dir_operations = {
+	.open		= dcache_dir_open,
+	.release	= dcache_dir_close,
+	.llseek		= dcache_dir_lseek,
+	.read		= generic_read_dir,
+	.iterate	= devpts_readdir,
+};
+
 static const struct super_operations devpts_sops = {
 	.statfs		= simple_statfs,
 	.remount_fs	= devpts_remount,
@@ -388,8 +432,10 @@ devpts_fill_super(struct super_block *s,
 	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
 	inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR;
 	inode->i_op = &simple_dir_inode_operations;
-	inode->i_fop = &simple_dir_operations;
+	inode->i_fop = &devpts_dir_operations;
 	set_nlink(inode, 2);
+	/* devpts is xid tagged */
+	i_tag_write(inode, (vtag_t)vx_current_xid());
 
 	s->s_root = d_make_root(inode);
 	if (s->s_root)
@@ -593,6 +639,9 @@ struct inode *devpts_pty_new(struct inod
 	inode->i_gid = opts->setgid ? opts->gid : current_fsgid();
 	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
 	init_special_inode(inode, S_IFCHR|opts->mode, device);
+	/* devpts is xid tagged */
+	i_tag_write(inode, (vtag_t)vx_current_xid());
+	inode->i_op = &devpts_file_inode_operations;
 	inode->i_private = priv;
 
 	sprintf(s, "%d", index);
diff -ruNp linux-3.13.11/fs/ecryptfs/inode.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ecryptfs/inode.c
--- linux-3.13.11/fs/ecryptfs/inode.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ecryptfs/inode.c	2014-07-09 12:00:15.000000000
+0200
@@ -675,7 +675,7 @@ static int ecryptfs_readlink_lower(struc
 	old_fs = get_fs();
 	set_fs(get_ds());
 	rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
-						   (char __user *)lower_buf,
+						   (char __force_user *)lower_buf,
 						   PATH_MAX);
 	set_fs(old_fs);
 	if (rc < 0)
diff -ruNp linux-3.13.11/fs/ecryptfs/miscdev.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ecryptfs/miscdev.c
--- linux-3.13.11/fs/ecryptfs/miscdev.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ecryptfs/miscdev.c	2014-07-09
12:00:15.000000000 +0200
@@ -304,7 +304,7 @@ check_list:
 		goto out_unlock_msg_ctx;
 	i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
 	if (msg_ctx->msg) {
-		if (copy_to_user(&buf[i], packet_length, packet_length_size))
+		if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length,
packet_length_size))
 			goto out_unlock_msg_ctx;
 		i += packet_length_size;
 		if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
diff -ruNp linux-3.13.11/fs/exec.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/exec.c
--- linux-3.13.11/fs/exec.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/exec.c	2014-07-09 12:00:15.000000000
+0200
@@ -55,8 +55,20 @@
 #include <linux/pipe_fs_i.h>
 #include <linux/oom.h>
 #include <linux/compat.h>
+#include <linux/random.h>
+#include <linux/seq_file.h>
+#include <linux/coredump.h>
+#include <linux/mman.h>
+
+#ifdef CONFIG_PAX_REFCOUNT
+#include <linux/kallsyms.h>
+#include <linux/kdebug.h>
+#endif
+
+#include <trace/events/fs.h>
 
 #include <asm/uaccess.h>
+#include <asm/sections.h>
 #include <asm/mmu_context.h>
 #include <asm/tlb.h>
 
@@ -66,19 +78,34 @@
 
 #include <trace/events/sched.h>
 
+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
+void __weak pax_set_initial_flags(struct linux_binprm *bprm)
+{
+	pr_warn_once("PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags
callback, this is probably not what you wanted.\n");
+}
+#endif
+
+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
+EXPORT_SYMBOL(pax_set_initial_flags_func);
+#endif
+
 int suid_dumpable = 0;
 
 static LIST_HEAD(formats);
 static DEFINE_RWLOCK(binfmt_lock);
 
+extern int gr_process_kernel_exec_ban(void);
+extern int gr_process_suid_exec_ban(const struct linux_binprm *bprm);
+
 void __register_binfmt(struct linux_binfmt * fmt, int insert)
 {
 	BUG_ON(!fmt);
 	if (WARN_ON(!fmt->load_binary))
 		return;
 	write_lock(&binfmt_lock);
-	insert ? list_add(&fmt->lh, &formats) :
-		 list_add_tail(&fmt->lh, &formats);
+	insert ? pax_list_add((struct list_head *)&fmt->lh, &formats) :
+		 pax_list_add_tail((struct list_head *)&fmt->lh, &formats);
 	write_unlock(&binfmt_lock);
 }
 
@@ -87,7 +114,7 @@ EXPORT_SYMBOL(__register_binfmt);
 void unregister_binfmt(struct linux_binfmt * fmt)
 {
 	write_lock(&binfmt_lock);
-	list_del(&fmt->lh);
+	pax_list_del((struct list_head *)&fmt->lh);
 	write_unlock(&binfmt_lock);
 }
 
@@ -181,18 +208,10 @@ static struct page *get_arg_page(struct
 		int write)
 {
 	struct page *page;
-	int ret;
 
-#ifdef CONFIG_STACK_GROWSUP
-	if (write) {
-		ret = expand_downwards(bprm->vma, pos);
-		if (ret < 0)
-			return NULL;
-	}
-#endif
-	ret = get_user_pages(current, bprm->mm, pos,
-			1, write, 1, &page, NULL);
-	if (ret <= 0)
+	if (0 > expand_downwards(bprm->vma, pos))
+		return NULL;
+	if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
 		return NULL;
 
 	if (write) {
@@ -208,6 +227,17 @@ static struct page *get_arg_page(struct
 		if (size <= ARG_MAX)
 			return page;
 
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+		// only allow 512KB for argv+env on suid/sgid binaries
+		// to prevent easy ASLR exhaustion
+		if (((!uid_eq(bprm->cred->euid, current_euid())) ||
+		     (!gid_eq(bprm->cred->egid, current_egid()))) &&
+		    (size > (512 * 1024))) {
+			put_page(page);
+			return NULL;
+		}
+#endif
+
 		/*
 		 * Limit to 1/4-th the stack size for the argv+env strings.
 		 * This ensures that:
@@ -267,6 +297,11 @@ static int __bprm_mm_init(struct linux_b
 	vma->vm_end = STACK_TOP_MAX;
 	vma->vm_start = vma->vm_end - PAGE_SIZE;
 	vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
+
+#ifdef CONFIG_PAX_SEGMEXEC
+	vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
+#endif
+
 	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
 	INIT_LIST_HEAD(&vma->anon_vma_chain);
 
@@ -277,6 +312,12 @@ static int __bprm_mm_init(struct linux_b
 	mm->stack_vm = mm->total_vm = 1;
 	up_write(&mm->mmap_sem);
 	bprm->p = vma->vm_end - sizeof(void *);
+
+#ifdef CONFIG_PAX_RANDUSTACK
+	if (randomize_va_space)
+		bprm->p ^= prandom_u32() & ~PAGE_MASK;
+#endif
+
 	return 0;
 err:
 	up_write(&mm->mmap_sem);
@@ -397,7 +438,7 @@ struct user_arg_ptr {
 	} ptr;
 };
 
-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
 {
 	const char __user *native;
 
@@ -406,14 +447,14 @@ static const char __user *get_user_arg_p
 		compat_uptr_t compat;
 
 		if (get_user(compat, argv.ptr.compat + nr))
-			return ERR_PTR(-EFAULT);
+			return (const char __force_user *)ERR_PTR(-EFAULT);
 
 		return compat_ptr(compat);
 	}
 #endif
 
 	if (get_user(native, argv.ptr.native + nr))
-		return ERR_PTR(-EFAULT);
+		return (const char __force_user *)ERR_PTR(-EFAULT);
 
 	return native;
 }
@@ -432,7 +473,7 @@ static int count(struct user_arg_ptr arg
 			if (!p)
 				break;
 
-			if (IS_ERR(p))
+			if (IS_ERR((const char __force_kernel *)p))
 				return -EFAULT;
 
 			if (i >= max)
@@ -467,7 +508,7 @@ static int copy_strings(int argc, struct
 
 		ret = -EFAULT;
 		str = get_user_arg_ptr(argv, argc);
-		if (IS_ERR(str))
+		if (IS_ERR((const char __force_kernel *)str))
 			goto out;
 
 		len = strnlen_user(str, MAX_ARG_STRLEN);
@@ -549,7 +590,7 @@ int copy_strings_kernel(int argc, const
 	int r;
 	mm_segment_t oldfs = get_fs();
 	struct user_arg_ptr argv = {
-		.ptr.native = (const char __user *const  __user *)__argv,
+		.ptr.native = (const char __user * const __force_user *)__argv,
 	};
 
 	set_fs(KERNEL_DS);
@@ -584,7 +625,8 @@ static int shift_arg_pages(struct vm_are
 	unsigned long new_end = old_end - shift;
 	struct mmu_gather tlb;
 
-	BUG_ON(new_start > new_end);
+	if (new_start >= new_end || new_start < mmap_min_addr)
+		return -ENOMEM;
 
 	/*
 	 * ensure there are no vmas between where we want to go
@@ -593,6 +635,10 @@ static int shift_arg_pages(struct vm_are
 	if (vma != find_vma(mm, new_start))
 		return -EFAULT;
 
+#ifdef CONFIG_PAX_SEGMEXEC
+	BUG_ON(pax_find_mirror_vma(vma));
+#endif
+
 	/*
 	 * cover the whole range: [new_start, old_end)
 	 */
@@ -673,10 +719,6 @@ int setup_arg_pages(struct linux_binprm
 	stack_top = arch_align_stack(stack_top);
 	stack_top = PAGE_ALIGN(stack_top);
 
-	if (unlikely(stack_top < mmap_min_addr) ||
-	    unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
-		return -ENOMEM;
-
 	stack_shift = vma->vm_end - stack_top;
 
 	bprm->p -= stack_shift;
@@ -688,8 +730,28 @@ int setup_arg_pages(struct linux_binprm
 	bprm->exec -= stack_shift;
 
 	down_write(&mm->mmap_sem);
+
+	/* Move stack pages down in memory. */
+	if (stack_shift) {
+		ret = shift_arg_pages(vma, stack_shift);
+		if (ret)
+			goto out_unlock;
+	}
+
 	vm_flags = VM_STACK_FLAGS;
 
+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
+	if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
+		vm_flags &= ~VM_EXEC;
+
+#ifdef CONFIG_PAX_MPROTECT
+		if (mm->pax_flags & MF_PAX_MPROTECT)
+			vm_flags &= ~VM_MAYEXEC;
+#endif
+
+	}
+#endif
+
 	/*
 	 * Adjust stack execute permissions; explicitly enable for
 	 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
@@ -708,13 +770,6 @@ int setup_arg_pages(struct linux_binprm
 		goto out_unlock;
 	BUG_ON(prev != vma);
 
-	/* Move stack pages down in memory. */
-	if (stack_shift) {
-		ret = shift_arg_pages(vma, stack_shift);
-		if (ret)
-			goto out_unlock;
-	}
-
 	/* mprotect_fixup is overkill to remove the temporary stack flags */
 	vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
 
@@ -738,6 +793,27 @@ int setup_arg_pages(struct linux_binprm
 #endif
 	current->mm->start_stack = bprm->p;
 	ret = expand_stack(vma, stack_base);
+
+#if !defined(CONFIG_STACK_GROWSUP) && defined(CONFIG_PAX_RANDMMAP)
+	if (!ret && (mm->pax_flags & MF_PAX_RANDMMAP) && STACK_TOP <= 0xFFFFFFFFU && STACK_TOP
> vma->vm_end) {
+		unsigned long size;
+		vm_flags_t vm_flags;
+
+		size = STACK_TOP - vma->vm_end;
+		vm_flags = VM_NONE | VM_DONTEXPAND | VM_DONTDUMP;
+
+		ret = vma->vm_end != mmap_region(NULL, vma->vm_end, size, vm_flags, 0);
+
+#ifdef CONFIG_X86
+		if (!ret) {
+			size = PAGE_SIZE + mmap_min_addr + ((mm->delta_mmap ^ mm->delta_stack) & (0xFFUL
<< PAGE_SHIFT));
+			ret = 0 != mmap_region(NULL, 0, PAGE_ALIGN(size), vm_flags, 0);
+		}
+#endif
+
+	}
+#endif
+
 	if (ret)
 		ret = -EFAULT;
 
@@ -774,6 +850,8 @@ struct file *open_exec(const char *name)
 
 	fsnotify_open(file);
 
+	trace_open_exec(name);
+
 	err = deny_write_access(file);
 	if (err)
 		goto exit;
@@ -797,7 +875,7 @@ int kernel_read(struct file *file, loff_
 	old_fs = get_fs();
 	set_fs(get_ds());
 	/* The cast to a user pointer is valid due to the set_fs() */
-	result = vfs_read(file, (void __user *)addr, count, &pos);
+	result = vfs_read(file, (void __force_user *)addr, count, &pos);
 	set_fs(old_fs);
 	return result;
 }
@@ -1253,7 +1331,7 @@ static int check_unsafe_exec(struct linu
 	}
 	rcu_read_unlock();
 
-	if (p->fs->users > n_fs) {
+	if (atomic_read(&p->fs->users) > n_fs) {
 		bprm->unsafe |= LSM_UNSAFE_SHARE;
 	} else {
 		res = -EAGAIN;
@@ -1443,6 +1521,31 @@ static int exec_binprm(struct linux_binp
 	return ret;
 }
 
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+static DEFINE_PER_CPU(u64, exec_counter);
+static int __init init_exec_counters(void)
+{
+	unsigned int cpu;
+
+	for_each_possible_cpu(cpu) {
+		per_cpu(exec_counter, cpu) = (u64)cpu;
+	}
+
+	return 0;
+}
+early_initcall(init_exec_counters);
+static inline void increment_exec_counter(void)
+{
+	BUILD_BUG_ON(NR_CPUS > (1 << 16));
+	current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
+}
+#else
+static inline void increment_exec_counter(void) {}
+#endif
+
+extern void gr_handle_exec_args(struct linux_binprm *bprm,
+				struct user_arg_ptr argv);
+
 /*
  * sys_execve() executes a new program.
  */
@@ -1450,12 +1553,19 @@ static int do_execve_common(const char *
 				struct user_arg_ptr argv,
 				struct user_arg_ptr envp)
 {
+#ifdef CONFIG_GRKERNSEC
+	struct file *old_exec_file;
+	struct acl_subject_label *old_acl;
+	struct rlimit old_rlim[RLIM_NLIMITS];
+#endif
 	struct linux_binprm *bprm;
 	struct file *file;
 	struct files_struct *displaced;
 	bool clear_in_exec;
 	int retval;
 
+	gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current_user()->processes),
1);
+
 	/*
 	 * We move the actual failure in case of RLIMIT_NPROC excess from
 	 * set*uid() to execve() because too many poorly written programs
@@ -1496,12 +1606,22 @@ static int do_execve_common(const char *
 	if (IS_ERR(file))
 		goto out_unmark;
 
+	if (gr_ptrace_readexec(file, bprm->unsafe)) {
+		retval = -EPERM;
+		goto out_file;
+	}
+
 	sched_exec();
 
 	bprm->file = file;
 	bprm->filename = filename;
 	bprm->interp = filename;
 
+	if (!gr_acl_handle_execve(file->f_path.dentry, file->f_path.mnt)) {
+		retval = -EACCES;
+		goto out_file;
+	}
+
 	retval = bprm_mm_init(bprm);
 	if (retval)
 		goto out_file;
@@ -1518,24 +1638,70 @@ static int do_execve_common(const char *
 	if (retval < 0)
 		goto out;
 
+#ifdef CONFIG_GRKERNSEC
+	old_acl = current->acl;
+	memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
+	old_exec_file = current->exec_file;
+	get_file(file);
+	current->exec_file = file;
+#endif
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+	/* limit suid stack to 8MB
+	 * we saved the old limits above and will restore them if this exec fails
+	 */
+	if (((!uid_eq(bprm->cred->euid, current_euid())) || (!gid_eq(bprm->cred->egid, current_egid())))
&&
+	    (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
+		current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
+#endif
+
+	if (gr_process_kernel_exec_ban() || gr_process_suid_exec_ban(bprm)) {
+		retval = -EPERM;
+		goto out_fail;
+	}
+
+	if (!gr_tpe_allow(file)) {
+		retval = -EACCES;
+		goto out_fail;
+	}
+
+	if (gr_check_crash_exec(file)) {
+		retval = -EACCES;
+		goto out_fail;
+	}
+
+	retval = gr_set_proc_label(file->f_path.dentry, file->f_path.mnt,
+					bprm->unsafe);
+	if (retval < 0)
+		goto out_fail;
+
 	retval = copy_strings_kernel(1, &bprm->filename, bprm);
 	if (retval < 0)
-		goto out;
+		goto out_fail;
 
 	bprm->exec = bprm->p;
 	retval = copy_strings(bprm->envc, envp, bprm);
 	if (retval < 0)
-		goto out;
+		goto out_fail;
 
 	retval = copy_strings(bprm->argc, argv, bprm);
 	if (retval < 0)
-		goto out;
+		goto out_fail;
+
+	gr_log_chroot_exec(file->f_path.dentry, file->f_path.mnt);
+
+	gr_handle_exec_args(bprm, argv);
 
 	retval = exec_binprm(bprm);
 	if (retval < 0)
-		goto out;
+		goto out_fail;
+#ifdef CONFIG_GRKERNSEC
+	if (old_exec_file)
+		fput(old_exec_file);
+#endif
 
 	/* execve succeeded */
+
+	increment_exec_counter();
 	current->fs->in_exec = 0;
 	current->in_execve = 0;
 	acct_update_integrals(current);
@@ -1545,6 +1711,14 @@ static int do_execve_common(const char *
 		put_files_struct(displaced);
 	return retval;
 
+out_fail:
+#ifdef CONFIG_GRKERNSEC
+	current->acl = old_acl;
+	memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
+	fput(current->exec_file);
+	current->exec_file = old_exec_file;
+#endif
+
 out:
 	if (bprm->mm) {
 		acct_arg_size(bprm, 0);
@@ -1699,3 +1873,295 @@ asmlinkage long compat_sys_execve(const
 	return error;
 }
 #endif
+
+int pax_check_flags(unsigned long *flags)
+{
+	int retval = 0;
+
+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
+	if (*flags & MF_PAX_SEGMEXEC)
+	{
+		*flags &= ~MF_PAX_SEGMEXEC;
+	retval = -EINVAL;
+	}
+#endif
+
+	if ((*flags & MF_PAX_PAGEEXEC)
+
+#ifdef CONFIG_PAX_PAGEEXEC
+	    &&  (*flags & MF_PAX_SEGMEXEC)
+#endif
+
+	   )
+	{
+		*flags &= ~MF_PAX_PAGEEXEC;
+		retval = -EINVAL;
+	}
+
+	if ((*flags & MF_PAX_MPROTECT)
+
+#ifdef CONFIG_PAX_MPROTECT
+	    && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
+#endif
+
+	   )
+	{
+		*flags &= ~MF_PAX_MPROTECT;
+	retval = -EINVAL;
+	}
+
+	if ((*flags & MF_PAX_EMUTRAMP)
+
+#ifdef CONFIG_PAX_EMUTRAMP
+	    && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
+#endif
+
+	   )
+	{
+		*flags &= ~MF_PAX_EMUTRAMP;
+		retval = -EINVAL;
+	}
+
+	return retval;
+}
+
+EXPORT_SYMBOL(pax_check_flags);
+
+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
+char *pax_get_path(const struct path *path, char *buf, int buflen)
+{
+	char *pathname = d_path(path, buf, buflen);
+
+	if (IS_ERR(pathname))
+		goto toolong;
+
+	pathname = mangle_path(buf, pathname, "\t\n\\");
+	if (!pathname)
+		goto toolong;
+
+	*pathname = 0;
+	return buf;
+
+toolong:
+	return "<path too long>";
+}
+EXPORT_SYMBOL(pax_get_path);
+
+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
+{
+	struct task_struct *tsk = current;
+	struct mm_struct *mm = current->mm;
+	char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
+	char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
+	char *path_exec = NULL;
+	char *path_fault = NULL;
+	unsigned long start = 0UL, end = 0UL, offset = 0UL;
+	siginfo_t info = { };
+
+	if (buffer_exec && buffer_fault) {
+		struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
+
+		down_read(&mm->mmap_sem);
+		vma = mm->mmap;
+		while (vma && (!vma_exec || !vma_fault)) {
+			if (vma->vm_file && mm->exe_file == vma->vm_file && (vma->vm_flags & VM_EXEC))
+				vma_exec = vma;
+			if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
+				vma_fault = vma;
+			vma = vma->vm_next;
+		}
+		if (vma_exec)
+			path_exec = pax_get_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
+		if (vma_fault) {
+			start = vma_fault->vm_start;
+			end = vma_fault->vm_end;
+			offset = vma_fault->vm_pgoff << PAGE_SHIFT;
+			if (vma_fault->vm_file)
+				path_fault = pax_get_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
+			else if ((unsigned long)pc >= mm->start_brk && (unsigned long)pc < mm->brk)
+				path_fault = "<heap>";
+			else if (vma_fault->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
+				path_fault = "<stack>";
+			else
+				path_fault = "<anonymous mapping>";
+		}
+		up_read(&mm->mmap_sem);
+	}
+	if (tsk->signal->curr_ip)
+		printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n",
&tsk->signal->curr_ip, path_fault, start, end, offset);
+	else
+		printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault,
start, end, offset);
+	printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, PC: %p, SP: %p\n",
path_exec, tsk->comm, task_pid_nr(tsk),
+			from_kuid_munged(&init_user_ns, task_uid(tsk)), from_kuid_munged(&init_user_ns,
task_euid(tsk)), pc, sp);
+	free_page((unsigned long)buffer_exec);
+	free_page((unsigned long)buffer_fault);
+	pax_report_insns(regs, pc, sp);
+	info.si_signo = SIGKILL;
+	info.si_errno = 0;
+	info.si_code = SI_KERNEL;
+	info.si_pid = 0;
+	info.si_uid = 0;
+	do_coredump(&info);
+}
+#endif
+
+#ifdef CONFIG_PAX_REFCOUNT
+void pax_report_refcount_overflow(struct pt_regs *regs)
+{
+	if (current->signal->curr_ip)
+		printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid:
%u/%u\n",
+				&current->signal->curr_ip, current->comm, task_pid_nr(current),
+				from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns,
current_euid()));
+	else
+		printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", current->comm,
task_pid_nr(current),
+				from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns,
current_euid()));
+	print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
+	preempt_disable();
+	show_regs(regs);
+	preempt_enable();
+	force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
+}
+#endif
+
+#ifdef CONFIG_PAX_USERCOPY
+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error)
*/
+static noinline int check_stack_object(const void *obj, unsigned long len)
+{
+	const void * const stack = task_stack_page(current);
+	const void * const stackend = stack + THREAD_SIZE;
+
+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
+	const void *frame = NULL;
+	const void *oldframe;
+#endif
+
+	if (obj + len < obj)
+		return -1;
+
+	if (obj + len <= stack || stackend <= obj)
+		return 0;
+
+	if (obj < stack || stackend < obj + len)
+		return -1;
+
+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
+	oldframe = __builtin_frame_address(1);
+	if (oldframe)
+		frame = __builtin_frame_address(2);
+	/*
+	  low ----------------------------------------------> high
+	  [saved bp][saved ip][args][local vars][saved bp][saved ip]
+			      ^----------------^
+			  allow copies only within here
+	*/
+	while (stack <= frame && frame < stackend) {
+		/* if obj + len extends past the last frame, this
+		   check won't pass and the next frame will be 0,
+		   causing us to bail out and correctly report
+		   the copy as invalid
+		*/
+		if (obj + len <= frame)
+			return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
+		oldframe = frame;
+		frame = *(const void * const *)frame;
+	}
+	return -1;
+#else
+	return 1;
+#endif
+}
+
+static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool
to_user, const char *type)
+{
+	if (current->signal->curr_ip)
+		printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu
bytes)\n",
+			&current->signal->curr_ip, to_user ? "leak" : "overwrite", to_user ? "from" : "to",
ptr, type ? : "unknown", len);
+	else
+		printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
+			to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown",
len);
+	dump_stack();
+	gr_handle_kernel_exploit();
+	do_group_exit(SIGKILL);
+}
+#endif
+
+#ifdef CONFIG_PAX_USERCOPY
+static inline bool check_kernel_text_object(unsigned long low, unsigned long high)
+{
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
+	unsigned long textlow = ktla_ktva((unsigned long)_stext);
+#ifdef CONFIG_MODULES
+	unsigned long texthigh = (unsigned long)MODULES_EXEC_VADDR;
+#else
+	unsigned long texthigh = ktla_ktva((unsigned long)_etext);
+#endif
+
+#else
+	unsigned long textlow = (unsigned long)_stext;
+	unsigned long texthigh = (unsigned long)_etext;
+
+#ifdef CONFIG_X86_64
+	/* check against linear mapping as well */
+	if (high > (unsigned long)__va(__pa(textlow)) &&
+	    low < (unsigned long)__va(__pa(texthigh)))
+		return true;
+#endif
+
+#endif
+
+	if (high <= textlow || low >= texthigh)
+		return false;
+	else
+		return true;
+}
+#endif
+
+void __check_object_size(const void *ptr, unsigned long n, bool to_user)
+{
+
+#ifdef CONFIG_PAX_USERCOPY
+	const char *type;
+
+	if (!n)
+		return;
+
+	type = check_heap_object(ptr, n);
+	if (!type) {
+		int ret = check_stack_object(ptr, n);
+		if (ret == 1 || ret == 2)
+			return;
+		if (ret == 0) {
+			if (check_kernel_text_object((unsigned long)ptr, (unsigned long)ptr + n))
+				type = "<kernel text>";
+			else
+				return;
+		} else
+			type = "<process stack>";
+	}
+
+	pax_report_usercopy(ptr, n, to_user, type);
+#endif
+
+}
+EXPORT_SYMBOL(__check_object_size);
+
+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
+void pax_track_stack(void)
+{
+	unsigned long sp = (unsigned long)&sp;
+	if (sp < current_thread_info()->lowest_stack &&
+	    sp > (unsigned long)task_stack_page(current))
+		current_thread_info()->lowest_stack = sp;
+}
+EXPORT_SYMBOL(pax_track_stack);
+#endif
+
+#ifdef CONFIG_PAX_SIZE_OVERFLOW
+void report_size_overflow(const char *file, unsigned int line, const char *func, const
char *ssa_name)
+{
+	printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u %s", func, file,
line, ssa_name);
+	dump_stack();
+	do_group_exit(SIGKILL);
+}
+EXPORT_SYMBOL(report_size_overflow);
+#endif
diff -ruNp linux-3.13.11/fs/ext2/balloc.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ext2/balloc.c
--- linux-3.13.11/fs/ext2/balloc.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ext2/balloc.c	2014-07-09 12:00:15.000000000
+0200
@@ -693,7 +693,6 @@ ext2_try_to_allocate(struct super_block
 			start = 0;
 		end = EXT2_BLOCKS_PER_GROUP(sb);
 	}
-
 	BUG_ON(start > EXT2_BLOCKS_PER_GROUP(sb));
 
 repeat:
@@ -1184,10 +1183,10 @@ static int ext2_has_free_blocks(struct e
 
 	free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
 	root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
-	if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
+	if (free_blocks < root_blocks + 1 &&
 		!uid_eq(sbi->s_resuid, current_fsuid()) &&
 		(gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
-		 !in_group_p (sbi->s_resgid))) {
+		 !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
 		return 0;
 	}
 	return 1;
diff -ruNp linux-3.13.11/fs/ext2/ext2.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ext2/ext2.h
--- linux-3.13.11/fs/ext2/ext2.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ext2/ext2.h	2014-07-09 12:00:15.000000000
+0200
@@ -244,8 +244,12 @@ struct ext2_group_desc
 #define EXT2_NOTAIL_FL			FS_NOTAIL_FL	/* file tail should not be merged */
 #define EXT2_DIRSYNC_FL			FS_DIRSYNC_FL	/* dirsync behaviour (directories only) */
 #define EXT2_TOPDIR_FL			FS_TOPDIR_FL	/* Top of directory hierarchies*/
+#define EXT2_IXUNLINK_FL		FS_IXUNLINK_FL	/* Immutable invert on unlink */
 #define EXT2_RESERVED_FL		FS_RESERVED_FL	/* reserved for ext2 lib */
 
+#define EXT2_BARRIER_FL			FS_BARRIER_FL	/* Barrier for chroot() */
+#define EXT2_COW_FL			FS_COW_FL	/* Copy on Write marker */
+
 #define EXT2_FL_USER_VISIBLE		FS_FL_USER_VISIBLE	/* User visible flags */
 #define EXT2_FL_USER_MODIFIABLE		FS_FL_USER_MODIFIABLE	/* User modifiable flags */
 
@@ -329,7 +333,8 @@ struct ext2_inode {
 			__u16	i_pad1;
 			__le16	l_i_uid_high;	/* these 2 fields    */
 			__le16	l_i_gid_high;	/* were reserved2[0] */
-			__u32	l_i_reserved2;
+			__le16	l_i_tag;	/* Context Tag */
+			__u16	l_i_reserved2;
 		} linux2;
 		struct {
 			__u8	h_i_frag;	/* Fragment number */
@@ -357,6 +362,7 @@ struct ext2_inode {
 #define i_gid_low	i_gid
 #define i_uid_high	osd2.linux2.l_i_uid_high
 #define i_gid_high	osd2.linux2.l_i_gid_high
+#define i_raw_tag	osd2.linux2.l_i_tag
 #define i_reserved2	osd2.linux2.l_i_reserved2
 
 /*
@@ -384,6 +390,7 @@ struct ext2_inode {
 #define EXT2_MOUNT_USRQUOTA		0x020000  /* user quota */
 #define EXT2_MOUNT_GRPQUOTA		0x040000  /* group quota */
 #define EXT2_MOUNT_RESERVATION		0x080000  /* Preallocation */
+#define EXT2_MOUNT_TAGGED		(1<<24)	  /* Enable Context Tags */
 
 
 #define clear_opt(o, opt)		o &= ~EXT2_MOUNT_##opt
@@ -757,6 +764,7 @@ extern void ext2_set_inode_flags(struct
 extern void ext2_get_inode_flags(struct ext2_inode_info *);
 extern int ext2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
 		       u64 start, u64 len);
+extern int ext2_sync_flags(struct inode *, int, int);
 
 /* ioctl.c */
 extern long ext2_ioctl(struct file *, unsigned int, unsigned long);
diff -ruNp linux-3.13.11/fs/ext2/file.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ext2/file.c
--- linux-3.13.11/fs/ext2/file.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ext2/file.c	2014-07-09 12:00:15.000000000
+0200
@@ -104,4 +104,5 @@ const struct inode_operations ext2_file_
 	.setattr	= ext2_setattr,
 	.get_acl	= ext2_get_acl,
 	.fiemap		= ext2_fiemap,
+	.sync_flags	= ext2_sync_flags,
 };
diff -ruNp linux-3.13.11/fs/ext2/ialloc.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ext2/ialloc.c
--- linux-3.13.11/fs/ext2/ialloc.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ext2/ialloc.c	2014-07-09 12:00:15.000000000
+0200
@@ -17,6 +17,7 @@
 #include <linux/backing-dev.h>
 #include <linux/buffer_head.h>
 #include <linux/random.h>
+#include <linux/vs_tag.h>
 #include "ext2.h"
 #include "xattr.h"
 #include "acl.h"
@@ -546,6 +547,7 @@ got:
 		inode->i_mode = mode;
 		inode->i_uid = current_fsuid();
 		inode->i_gid = dir->i_gid;
+		i_tag_write(inode, dx_current_fstag(sb));
 	} else
 		inode_init_owner(inode, dir, mode);
 
diff -ruNp linux-3.13.11/fs/ext2/inode.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ext2/inode.c
--- linux-3.13.11/fs/ext2/inode.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ext2/inode.c	2014-07-09 12:00:15.000000000
+0200
@@ -32,6 +32,7 @@
 #include <linux/fiemap.h>
 #include <linux/namei.h>
 #include <linux/aio.h>
+#include <linux/vs_tag.h>
 #include "ext2.h"
 #include "acl.h"
 #include "xip.h"
@@ -1182,7 +1183,7 @@ static void ext2_truncate_blocks(struct
 		return;
 	if (ext2_inode_is_fast_symlink(inode))
 		return;
-	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
+	if (IS_APPEND(inode) || IS_IXORUNLINK(inode))
 		return;
 	__ext2_truncate_blocks(inode, offset);
 }
@@ -1273,36 +1274,61 @@ void ext2_set_inode_flags(struct inode *
 {
 	unsigned int flags = EXT2_I(inode)->i_flags;
 
-	inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
+	inode->i_flags &= ~(S_IMMUTABLE | S_IXUNLINK |
+		S_SYNC | S_APPEND | S_NOATIME | S_DIRSYNC);
+
+
+	if (flags & EXT2_IMMUTABLE_FL)
+		inode->i_flags |= S_IMMUTABLE;
+	if (flags & EXT2_IXUNLINK_FL)
+		inode->i_flags |= S_IXUNLINK;
+
 	if (flags & EXT2_SYNC_FL)
 		inode->i_flags |= S_SYNC;
 	if (flags & EXT2_APPEND_FL)
 		inode->i_flags |= S_APPEND;
-	if (flags & EXT2_IMMUTABLE_FL)
-		inode->i_flags |= S_IMMUTABLE;
 	if (flags & EXT2_NOATIME_FL)
 		inode->i_flags |= S_NOATIME;
 	if (flags & EXT2_DIRSYNC_FL)
 		inode->i_flags |= S_DIRSYNC;
+
+	inode->i_vflags &= ~(V_BARRIER | V_COW);
+
+	if (flags & EXT2_BARRIER_FL)
+		inode->i_vflags |= V_BARRIER;
+	if (flags & EXT2_COW_FL)
+		inode->i_vflags |= V_COW;
 }
 
 /* Propagate flags from i_flags to EXT2_I(inode)->i_flags */
 void ext2_get_inode_flags(struct ext2_inode_info *ei)
 {
 	unsigned int flags = ei->vfs_inode.i_flags;
+	unsigned int vflags = ei->vfs_inode.i_vflags;
+
+	ei->i_flags &= ~(EXT2_SYNC_FL | EXT2_APPEND_FL |
+			EXT2_IMMUTABLE_FL | EXT2_IXUNLINK_FL |
+			EXT2_NOATIME_FL | EXT2_DIRSYNC_FL |
+			EXT2_BARRIER_FL | EXT2_COW_FL);
+
+	if (flags & S_IMMUTABLE)
+		ei->i_flags |= EXT2_IMMUTABLE_FL;
+	if (flags & S_IXUNLINK)
+		ei->i_flags |= EXT2_IXUNLINK_FL;
 
-	ei->i_flags &= ~(EXT2_SYNC_FL|EXT2_APPEND_FL|
-			EXT2_IMMUTABLE_FL|EXT2_NOATIME_FL|EXT2_DIRSYNC_FL);
 	if (flags & S_SYNC)
 		ei->i_flags |= EXT2_SYNC_FL;
 	if (flags & S_APPEND)
 		ei->i_flags |= EXT2_APPEND_FL;
-	if (flags & S_IMMUTABLE)
-		ei->i_flags |= EXT2_IMMUTABLE_FL;
 	if (flags & S_NOATIME)
 		ei->i_flags |= EXT2_NOATIME_FL;
 	if (flags & S_DIRSYNC)
 		ei->i_flags |= EXT2_DIRSYNC_FL;
+
+	if (vflags & V_BARRIER)
+		ei->i_flags |= EXT2_BARRIER_FL;
+	if (vflags & V_COW)
+		ei->i_flags |= EXT2_COW_FL;
 }
 
 struct inode *ext2_iget (struct super_block *sb, unsigned long ino)
@@ -1338,8 +1364,10 @@ struct inode *ext2_iget (struct super_bl
 		i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
 		i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
 	}
-	i_uid_write(inode, i_uid);
-	i_gid_write(inode, i_gid);
+	i_uid_write(inode, INOTAG_UID(DX_TAG(inode), i_uid, i_gid));
+	i_gid_write(inode, INOTAG_GID(DX_TAG(inode), i_uid, i_gid));
+	i_tag_write(inode, INOTAG_TAG(DX_TAG(inode), i_uid, i_gid,
+		le16_to_cpu(raw_inode->i_raw_tag)));
 	set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
 	inode->i_size = le32_to_cpu(raw_inode->i_size);
 	inode->i_atime.tv_sec = (signed)le32_to_cpu(raw_inode->i_atime);
@@ -1437,8 +1465,10 @@ static int __ext2_write_inode(struct ino
 	struct ext2_inode_info *ei = EXT2_I(inode);
 	struct super_block *sb = inode->i_sb;
 	ino_t ino = inode->i_ino;
-	uid_t uid = i_uid_read(inode);
-	gid_t gid = i_gid_read(inode);
+	uid_t uid = from_kuid(&init_user_ns,
+		TAGINO_KUID(DX_TAG(inode), inode->i_uid, inode->i_tag));
+	gid_t gid = from_kgid(&init_user_ns,
+		TAGINO_KGID(DX_TAG(inode), inode->i_gid, inode->i_tag));
 	struct buffer_head * bh;
 	struct ext2_inode * raw_inode = ext2_get_inode(sb, ino, &bh);
 	int n;
@@ -1474,6 +1504,9 @@ static int __ext2_write_inode(struct ino
 		raw_inode->i_uid_high = 0;
 		raw_inode->i_gid_high = 0;
 	}
+#ifdef CONFIG_TAGGING_INTERN
+	raw_inode->i_raw_tag = cpu_to_le16(i_tag_read(inode));
+#endif
 	raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
 	raw_inode->i_size = cpu_to_le32(inode->i_size);
 	raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
@@ -1554,7 +1587,8 @@ int ext2_setattr(struct dentry *dentry,
 	if (is_quota_modification(inode, iattr))
 		dquot_initialize(inode);
 	if ((iattr->ia_valid & ATTR_UID && !uid_eq(iattr->ia_uid, inode->i_uid)) ||
-	    (iattr->ia_valid & ATTR_GID && !gid_eq(iattr->ia_gid, inode->i_gid))) {
+	    (iattr->ia_valid & ATTR_GID && !gid_eq(iattr->ia_gid, inode->i_gid)) ||
+	    (iattr->ia_valid & ATTR_TAG && !tag_eq(iattr->ia_tag, inode->i_tag))) {
 		error = dquot_transfer(inode, iattr);
 		if (error)
 			return error;
diff -ruNp linux-3.13.11/fs/ext2/ioctl.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ext2/ioctl.c
--- linux-3.13.11/fs/ext2/ioctl.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ext2/ioctl.c	2014-07-09 12:00:15.000000000
+0200
@@ -17,6 +17,16 @@
 #include <asm/uaccess.h>
 
 
+int ext2_sync_flags(struct inode *inode, int flags, int vflags)
+{
+	inode->i_flags = flags;
+	inode->i_vflags = vflags;
+	ext2_get_inode_flags(EXT2_I(inode));
+	inode->i_ctime = CURRENT_TIME_SEC;
+	mark_inode_dirty(inode);
+	return 0;
+}
+
 long ext2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
 	struct inode *inode = file_inode(filp);
@@ -51,6 +61,11 @@ long ext2_ioctl(struct file *filp, unsig
 
 		flags = ext2_mask_flags(inode->i_mode, flags);
 
+		if (IS_BARRIER(inode)) {
+			vxwprintk_task(1, "messing with the barrier.");
+			return -EACCES;
+		}
+
 		mutex_lock(&inode->i_mutex);
 		/* Is it quota file? Do not allow user to mess with it */
 		if (IS_NOQUOTA(inode)) {
@@ -66,7 +81,9 @@ long ext2_ioctl(struct file *filp, unsig
 		 *
 		 * This test looks nicer. Thanks to Pauline Middelink
 		 */
-		if ((flags ^ oldflags) & (EXT2_APPEND_FL | EXT2_IMMUTABLE_FL)) {
+		if ((oldflags & EXT2_IMMUTABLE_FL) ||
+			((flags ^ oldflags) & (EXT2_APPEND_FL |
+			EXT2_IMMUTABLE_FL | EXT2_IXUNLINK_FL))) {
 			if (!capable(CAP_LINUX_IMMUTABLE)) {
 				mutex_unlock(&inode->i_mutex);
 				ret = -EPERM;
@@ -74,7 +91,7 @@ long ext2_ioctl(struct file *filp, unsig
 			}
 		}
 
-		flags = flags & EXT2_FL_USER_MODIFIABLE;
+		flags &= EXT2_FL_USER_MODIFIABLE;
 		flags |= oldflags & ~EXT2_FL_USER_MODIFIABLE;
 		ei->i_flags = flags;
 
diff -ruNp linux-3.13.11/fs/ext2/namei.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ext2/namei.c
--- linux-3.13.11/fs/ext2/namei.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ext2/namei.c	2014-07-09 12:00:15.000000000
+0200
@@ -32,6 +32,7 @@
 
 #include <linux/pagemap.h>
 #include <linux/quotaops.h>
+#include <linux/vs_tag.h>
 #include "ext2.h"
 #include "xattr.h"
 #include "acl.h"
@@ -73,6 +74,7 @@ static struct dentry *ext2_lookup(struct
 					(unsigned long) ino);
 			return ERR_PTR(-EIO);
 		}
+		dx_propagate_tag(nd, inode);
 	}
 	return d_splice_alias(inode, dentry);
 }
@@ -432,5 +434,6 @@ const struct inode_operations ext2_speci
 	.removexattr	= generic_removexattr,
 #endif
 	.setattr	= ext2_setattr,
+	.sync_flags	= ext2_sync_flags,
 	.get_acl	= ext2_get_acl,
 };
diff -ruNp linux-3.13.11/fs/ext2/super.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ext2/super.c
--- linux-3.13.11/fs/ext2/super.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ext2/super.c	2014-07-09 12:00:15.000000000
+0200
@@ -395,7 +395,8 @@ enum {
 	Opt_err_ro, Opt_nouid32, Opt_nocheck, Opt_debug,
 	Opt_oldalloc, Opt_orlov, Opt_nobh, Opt_user_xattr, Opt_nouser_xattr,
 	Opt_acl, Opt_noacl, Opt_xip, Opt_ignore, Opt_err, Opt_quota,
-	Opt_usrquota, Opt_grpquota, Opt_reservation, Opt_noreservation
+	Opt_usrquota, Opt_grpquota, Opt_reservation, Opt_noreservation,
+	Opt_tag, Opt_notag, Opt_tagid
 };
 
 static const match_table_t tokens = {
@@ -423,6 +424,9 @@ static const match_table_t tokens = {
 	{Opt_acl, "acl"},
 	{Opt_noacl, "noacl"},
 	{Opt_xip, "xip"},
+	{Opt_tag, "tag"},
+	{Opt_notag, "notag"},
+	{Opt_tagid, "tagid=%u"},
 	{Opt_grpquota, "grpquota"},
 	{Opt_ignore, "noquota"},
 	{Opt_quota, "quota"},
@@ -506,6 +510,20 @@ static int parse_options(char *options,
 		case Opt_nouid32:
 			set_opt (sbi->s_mount_opt, NO_UID32);
 			break;
+#ifndef CONFIG_TAGGING_NONE
+		case Opt_tag:
+			set_opt (sbi->s_mount_opt, TAGGED);
+			break;
+		case Opt_notag:
+			clear_opt (sbi->s_mount_opt, TAGGED);
+			break;
+#endif
+#ifdef CONFIG_PROPAGATE
+		case Opt_tagid:
+			/* use args[0] */
+			set_opt (sbi->s_mount_opt, TAGGED);
+			break;
+#endif
 		case Opt_nocheck:
 			clear_opt (sbi->s_mount_opt, CHECK);
 			break;
@@ -864,6 +882,8 @@ static int ext2_fill_super(struct super_
 	if (!parse_options((char *) data, sb))
 		goto failed_mount;
 
+	if (EXT2_SB(sb)->s_mount_opt & EXT2_MOUNT_TAGGED)
+		sb->s_flags |= MS_TAGGED;
 	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
 		((EXT2_SB(sb)->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ?
 		 MS_POSIXACL : 0);
@@ -1269,6 +1289,14 @@ static int ext2_remount (struct super_bl
 		err = -EINVAL;
 		goto restore_opts;
 	}
+
+	if ((sbi->s_mount_opt & EXT2_MOUNT_TAGGED) &&
+		!(sb->s_flags & MS_TAGGED)) {
+		printk("EXT2-fs: %s: tagging not permitted on remount.\n",
+		       sb->s_id);
+		err = -EINVAL;
+		goto restore_opts;
+	}
 
 	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
 		((sbi->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
diff -ruNp linux-3.13.11/fs/ext2/xattr.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ext2/xattr.c
--- linux-3.13.11/fs/ext2/xattr.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ext2/xattr.c	2014-07-09 12:00:15.000000000
+0200
@@ -247,7 +247,7 @@ ext2_xattr_list(struct dentry *dentry, c
 	struct buffer_head *bh = NULL;
 	struct ext2_xattr_entry *entry;
 	char *end;
-	size_t rest = buffer_size;
+	size_t rest = buffer_size, total_size = 0;
 	int error;
 
 	ea_idebug(inode, "buffer=%p, buffer_size=%ld",
@@ -305,9 +305,10 @@ bad_block:	ext2_error(inode->i_sb, "ext2
 				buffer += size;
 			}
 			rest -= size;
+			total_size += size;
 		}
 	}
-	error = buffer_size - rest;  /* total size */
+	error = total_size;
 
 cleanup:
 	brelse(bh);
diff -ruNp linux-3.13.11/fs/ext3/balloc.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ext3/balloc.c
--- linux-3.13.11/fs/ext3/balloc.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ext3/balloc.c	2014-07-09 12:00:15.000000000
+0200
@@ -1438,10 +1438,10 @@ static int ext3_has_free_blocks(struct e
 
 	free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
 	root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
-	if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
+	if (free_blocks < root_blocks + 1 &&
 		!use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) &&
 		(gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
-		 !in_group_p (sbi->s_resgid))) {
+		 !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
 		return 0;
 	}
 	return 1;
diff -ruNp linux-3.13.11/fs/ext3/ext3.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ext3/ext3.h
--- linux-3.13.11/fs/ext3/ext3.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ext3/ext3.h	2014-07-09 12:00:15.000000000
+0200
@@ -151,10 +151,14 @@ struct ext3_group_desc
 #define EXT3_NOTAIL_FL			0x00008000 /* file tail should not be merged */
 #define EXT3_DIRSYNC_FL			0x00010000 /* dirsync behaviour (directories only) */
 #define EXT3_TOPDIR_FL			0x00020000 /* Top of directory hierarchies*/
+#define EXT3_IXUNLINK_FL		0x08000000 /* Immutable invert on unlink */
 #define EXT3_RESERVED_FL		0x80000000 /* reserved for ext3 lib */
 
-#define EXT3_FL_USER_VISIBLE		0x0003DFFF /* User visible flags */
-#define EXT3_FL_USER_MODIFIABLE		0x000380FF /* User modifiable flags */
+#define EXT3_BARRIER_FL			0x04000000 /* Barrier for chroot() */
+#define EXT3_COW_FL			0x20000000 /* Copy on Write marker */
+
+#define EXT3_FL_USER_VISIBLE		0x0103DFFF /* User visible flags */
+#define EXT3_FL_USER_MODIFIABLE		0x010380FF /* User modifiable flags */
 
 /* Flags that should be inherited by new inodes from their parent. */
 #define EXT3_FL_INHERITED (EXT3_SECRM_FL | EXT3_UNRM_FL | EXT3_COMPR_FL |\
@@ -290,7 +294,8 @@ struct ext3_inode {
 			__u16	i_pad1;
 			__le16	l_i_uid_high;	/* these 2 fields    */
 			__le16	l_i_gid_high;	/* were reserved2[0] */
-			__u32	l_i_reserved2;
+			__le16	l_i_tag;	/* Context Tag */
+			__u16	l_i_reserved2;
 		} linux2;
 		struct {
 			__u8	h_i_frag;	/* Fragment number */
@@ -320,6 +325,7 @@ struct ext3_inode {
 #define i_gid_low	i_gid
 #define i_uid_high	osd2.linux2.l_i_uid_high
 #define i_gid_high	osd2.linux2.l_i_gid_high
+#define i_raw_tag	osd2.linux2.l_i_tag
 #define i_reserved2	osd2.linux2.l_i_reserved2
 
 /*
@@ -364,6 +370,7 @@ struct ext3_inode {
 #define EXT3_MOUNT_GRPQUOTA		0x200000 /* "old" group quota */
 #define EXT3_MOUNT_DATA_ERR_ABORT	0x400000 /* Abort on file data write
 						  * error in ordered mode */
+#define EXT3_MOUNT_TAGGED		(1<<24) /* Enable Context Tags */
 
 /* Compatibility, for having both ext2_fs.h and ext3_fs.h included at once */
 #ifndef _LINUX_EXT2_FS_H
@@ -1061,6 +1068,7 @@ extern void ext3_get_inode_flags(struct
 extern void ext3_set_aops(struct inode *inode);
 extern int ext3_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
 		       u64 start, u64 len);
+extern int ext3_sync_flags(struct inode *, int, int);
 
 /* ioctl.c */
 extern long ext3_ioctl(struct file *, unsigned int, unsigned long);
diff -ruNp linux-3.13.11/fs/ext3/file.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ext3/file.c
--- linux-3.13.11/fs/ext3/file.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ext3/file.c	2014-07-09 12:00:15.000000000
+0200
@@ -76,5 +76,6 @@ const struct inode_operations ext3_file_
 #endif
 	.get_acl	= ext3_get_acl,
 	.fiemap		= ext3_fiemap,
+	.sync_flags	= ext3_sync_flags,
 };
 
diff -ruNp linux-3.13.11/fs/ext3/ialloc.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ext3/ialloc.c
--- linux-3.13.11/fs/ext3/ialloc.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ext3/ialloc.c	2014-07-09 12:00:15.000000000
+0200
@@ -14,6 +14,7 @@
 
 #include <linux/quotaops.h>
 #include <linux/random.h>
+#include <linux/vs_tag.h>
 
 #include "ext3.h"
 #include "xattr.h"
@@ -469,6 +470,7 @@ got:
 		inode->i_mode = mode;
 		inode->i_uid = current_fsuid();
 		inode->i_gid = dir->i_gid;
+		i_tag_write(inode, dx_current_fstag(sb));
 	} else
 		inode_init_owner(inode, dir, mode);
 
diff -ruNp linux-3.13.11/fs/ext3/inode.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ext3/inode.c
--- linux-3.13.11/fs/ext3/inode.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ext3/inode.c	2014-07-09 12:00:15.000000000
+0200
@@ -28,6 +28,8 @@
 #include <linux/mpage.h>
 #include <linux/namei.h>
 #include <linux/aio.h>
+#include <linux/vs_tag.h>
+
 #include "ext3.h"
 #include "xattr.h"
 #include "acl.h"
@@ -2855,36 +2857,60 @@ void ext3_set_inode_flags(struct inode *
 {
 	unsigned int flags = EXT3_I(inode)->i_flags;
 
-	inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
+	inode->i_flags &= ~(S_IMMUTABLE | S_IXUNLINK |
+		S_SYNC | S_APPEND | S_NOATIME | S_DIRSYNC);
+
+	if (flags & EXT3_IMMUTABLE_FL)
+		inode->i_flags |= S_IMMUTABLE;
+	if (flags & EXT3_IXUNLINK_FL)
+		inode->i_flags |= S_IXUNLINK;
+
 	if (flags & EXT3_SYNC_FL)
 		inode->i_flags |= S_SYNC;
 	if (flags & EXT3_APPEND_FL)
 		inode->i_flags |= S_APPEND;
-	if (flags & EXT3_IMMUTABLE_FL)
-		inode->i_flags |= S_IMMUTABLE;
 	if (flags & EXT3_NOATIME_FL)
 		inode->i_flags |= S_NOATIME;
 	if (flags & EXT3_DIRSYNC_FL)
 		inode->i_flags |= S_DIRSYNC;
+
+	inode->i_vflags &= ~(V_BARRIER | V_COW);
+
+	if (flags & EXT3_BARRIER_FL)
+		inode->i_vflags |= V_BARRIER;
+	if (flags & EXT3_COW_FL)
+		inode->i_vflags |= V_COW;
 }
 
 /* Propagate flags from i_flags to EXT3_I(inode)->i_flags */
 void ext3_get_inode_flags(struct ext3_inode_info *ei)
 {
 	unsigned int flags = ei->vfs_inode.i_flags;
+	unsigned int vflags = ei->vfs_inode.i_vflags;
+
+	ei->i_flags &= ~(EXT3_SYNC_FL | EXT3_APPEND_FL |
+			EXT3_IMMUTABLE_FL | EXT3_IXUNLINK_FL |
+			EXT3_NOATIME_FL | EXT3_DIRSYNC_FL |
+			EXT3_BARRIER_FL | EXT3_COW_FL);
+
+	if (flags & S_IMMUTABLE)
+		ei->i_flags |= EXT3_IMMUTABLE_FL;
+	if (flags & S_IXUNLINK)
+		ei->i_flags |= EXT3_IXUNLINK_FL;
 
-	ei->i_flags &= ~(EXT3_SYNC_FL|EXT3_APPEND_FL|
-			EXT3_IMMUTABLE_FL|EXT3_NOATIME_FL|EXT3_DIRSYNC_FL);
 	if (flags & S_SYNC)
 		ei->i_flags |= EXT3_SYNC_FL;
 	if (flags & S_APPEND)
 		ei->i_flags |= EXT3_APPEND_FL;
-	if (flags & S_IMMUTABLE)
-		ei->i_flags |= EXT3_IMMUTABLE_FL;
 	if (flags & S_NOATIME)
 		ei->i_flags |= EXT3_NOATIME_FL;
 	if (flags & S_DIRSYNC)
 		ei->i_flags |= EXT3_DIRSYNC_FL;
+
+	if (vflags & V_BARRIER)
+		ei->i_flags |= EXT3_BARRIER_FL;
+	if (vflags & V_COW)
+		ei->i_flags |= EXT3_COW_FL;
 }
 
 struct inode *ext3_iget(struct super_block *sb, unsigned long ino)
@@ -2922,8 +2948,10 @@ struct inode *ext3_iget(struct super_blo
 		i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
 		i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
 	}
-	i_uid_write(inode, i_uid);
-	i_gid_write(inode, i_gid);
+	i_uid_write(inode, INOTAG_UID(DX_TAG(inode), i_uid, i_gid));
+	i_gid_write(inode, INOTAG_GID(DX_TAG(inode), i_uid, i_gid));
+	i_tag_write(inode, INOTAG_TAG(DX_TAG(inode), i_uid, i_gid,
+		le16_to_cpu(raw_inode->i_raw_tag)));
 	set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
 	inode->i_size = le32_to_cpu(raw_inode->i_size);
 	inode->i_atime.tv_sec = (signed)le32_to_cpu(raw_inode->i_atime);
@@ -3095,8 +3123,10 @@ again:
 
 	ext3_get_inode_flags(ei);
 	raw_inode->i_mode = cpu_to_le16(inode->i_mode);
-	i_uid = i_uid_read(inode);
-	i_gid = i_gid_read(inode);
+	i_uid = from_kuid(&init_user_ns,
+		TAGINO_KUID(DX_TAG(inode), inode->i_uid, inode->i_tag));
+	i_gid = from_kgid(&init_user_ns,
+		TAGINO_KGID(DX_TAG(inode), inode->i_gid, inode->i_tag));
 	if(!(test_opt(inode->i_sb, NO_UID32))) {
 		raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid));
 		raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid));
@@ -3121,6 +3151,9 @@ again:
 		raw_inode->i_uid_high = 0;
 		raw_inode->i_gid_high = 0;
 	}
+#ifdef CONFIG_TAGGING_INTERN
+	raw_inode->i_raw_tag = cpu_to_le16(i_tag_read(inode));
+#endif
 	raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
 	disksize = cpu_to_le32(ei->i_disksize);
 	if (disksize != raw_inode->i_size) {
@@ -3289,7 +3322,8 @@ int ext3_setattr(struct dentry *dentry,
 	if (is_quota_modification(inode, attr))
 		dquot_initialize(inode);
 	if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
-	    (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
+	    (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid)) ||
+	    (ia_valid & ATTR_TAG && !tag_eq(attr->ia_tag, inode->i_tag))) {
 		handle_t *handle;
 
 		/* (user+group)*(old+new) structure, inode write (sb,
@@ -3311,6 +3345,8 @@ int ext3_setattr(struct dentry *dentry,
 			inode->i_uid = attr->ia_uid;
 		if (attr->ia_valid & ATTR_GID)
 			inode->i_gid = attr->ia_gid;
+		if ((attr->ia_valid & ATTR_TAG) && IS_TAGGED(inode))
+			inode->i_tag = attr->ia_tag;
 		error = ext3_mark_inode_dirty(handle, inode);
 		ext3_journal_stop(handle);
 	}
diff -ruNp linux-3.13.11/fs/ext3/ioctl.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ext3/ioctl.c
--- linux-3.13.11/fs/ext3/ioctl.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ext3/ioctl.c	2014-07-09 12:00:15.000000000
+0200
@@ -12,6 +12,34 @@
 #include <asm/uaccess.h>
 #include "ext3.h"
 
+
+int ext3_sync_flags(struct inode *inode, int flags, int vflags)
+{
+	handle_t *handle = NULL;
+	struct ext3_iloc iloc;
+	int err;
+
+	handle = ext3_journal_start(inode, 1);
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+
+	if (IS_SYNC(inode))
+		handle->h_sync = 1;
+	err = ext3_reserve_inode_write(handle, inode, &iloc);
+	if (err)
+		goto flags_err;
+
+	inode->i_flags = flags;
+	inode->i_vflags = vflags;
+	ext3_get_inode_flags(EXT3_I(inode));
+	inode->i_ctime = CURRENT_TIME_SEC;
+
+	err = ext3_mark_iloc_dirty(handle, inode, &iloc);
+flags_err:
+	ext3_journal_stop(handle);
+	return err;
+}
+
 long ext3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
 	struct inode *inode = file_inode(filp);
@@ -45,6 +73,11 @@ long ext3_ioctl(struct file *filp, unsig
 
 		flags = ext3_mask_flags(inode->i_mode, flags);
 
+		if (IS_BARRIER(inode)) {
+			vxwprintk_task(1, "messing with the barrier.");
+			return -EACCES;
+		}
+
 		mutex_lock(&inode->i_mutex);
 
 		/* Is it quota file? Do not allow user to mess with it */
@@ -63,7 +96,9 @@ long ext3_ioctl(struct file *filp, unsig
 		 *
 		 * This test looks nicer. Thanks to Pauline Middelink
 		 */
-		if ((flags ^ oldflags) & (EXT3_APPEND_FL | EXT3_IMMUTABLE_FL)) {
+		if ((oldflags & EXT3_IMMUTABLE_FL) ||
+			((flags ^ oldflags) & (EXT3_APPEND_FL |
+			EXT3_IMMUTABLE_FL | EXT3_IXUNLINK_FL))) {
 			if (!capable(CAP_LINUX_IMMUTABLE))
 				goto flags_out;
 		}
@@ -88,7 +123,7 @@ long ext3_ioctl(struct file *filp, unsig
 		if (err)
 			goto flags_err;
 
-		flags = flags & EXT3_FL_USER_MODIFIABLE;
+		flags &= EXT3_FL_USER_MODIFIABLE;
 		flags |= oldflags & ~EXT3_FL_USER_MODIFIABLE;
 		ei->i_flags = flags;
 
diff -ruNp linux-3.13.11/fs/ext3/namei.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ext3/namei.c
--- linux-3.13.11/fs/ext3/namei.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ext3/namei.c	2014-07-09 12:00:15.000000000
+0200
@@ -25,6 +25,8 @@
  */
 
 #include <linux/quotaops.h>
+#include <linux/vs_tag.h>
+
 #include "ext3.h"
 #include "namei.h"
 #include "xattr.h"
@@ -915,6 +917,7 @@ restart:
 					submit_bh(READ | REQ_META | REQ_PRIO,
 						  bh);
 				}
+		dx_propagate_tag(nd, inode);
 			}
 		}
 		if ((bh = bh_use[ra_ptr++]) == NULL)
@@ -2568,6 +2571,7 @@ const struct inode_operations ext3_dir_i
 	.listxattr	= ext3_listxattr,
 	.removexattr	= generic_removexattr,
 #endif
+	.sync_flags	= ext3_sync_flags,
 	.get_acl	= ext3_get_acl,
 };
 
diff -ruNp linux-3.13.11/fs/ext3/super.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ext3/super.c
--- linux-3.13.11/fs/ext3/super.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ext3/super.c	2014-07-09 12:00:15.000000000
+0200
@@ -826,7 +826,8 @@ enum {
 	Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
 	Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
 	Opt_noquota, Opt_ignore, Opt_barrier, Opt_nobarrier, Opt_err,
-	Opt_resize, Opt_usrquota, Opt_grpquota
+	Opt_resize, Opt_usrquota, Opt_grpquota,
+	Opt_tag, Opt_notag, Opt_tagid
 };
 
 static const match_table_t tokens = {
@@ -884,6 +885,9 @@ static const match_table_t tokens = {
 	{Opt_barrier, "barrier"},
 	{Opt_nobarrier, "nobarrier"},
 	{Opt_resize, "resize"},
+	{Opt_tag, "tag"},
+	{Opt_notag, "notag"},
+	{Opt_tagid, "tagid=%u"},
 	{Opt_err, NULL},
 };
 
@@ -1056,6 +1060,20 @@ static int parse_options (char *options,
 		case Opt_nouid32:
 			set_opt (sbi->s_mount_opt, NO_UID32);
 			break;
+#ifndef CONFIG_TAGGING_NONE
+		case Opt_tag:
+			set_opt (sbi->s_mount_opt, TAGGED);
+			break;
+		case Opt_notag:
+			clear_opt (sbi->s_mount_opt, TAGGED);
+			break;
+#endif
+#ifdef CONFIG_PROPAGATE
+		case Opt_tagid:
+			/* use args[0] */
+			set_opt (sbi->s_mount_opt, TAGGED);
+			break;
+#endif
 		case Opt_nocheck:
 			clear_opt (sbi->s_mount_opt, CHECK);
 			break;
@@ -1788,6 +1806,9 @@ static int ext3_fill_super (struct super
 			    NULL, 0))
 		goto failed_mount;
 
+	if (EXT3_SB(sb)->s_mount_opt & EXT3_MOUNT_TAGGED)
+		sb->s_flags |= MS_TAGGED;
+
 	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
 		(test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
 
@@ -2683,6 +2704,14 @@ static int ext3_remount (struct super_bl
 	if (test_opt(sb, ABORT))
 		ext3_abort(sb, __func__, "Abort forced by user");
 
+	if ((sbi->s_mount_opt & EXT3_MOUNT_TAGGED) &&
+		!(sb->s_flags & MS_TAGGED)) {
+		printk("EXT3-fs: %s: tagging not permitted on remount.\n",
+			sb->s_id);
+		err = -EINVAL;
+		goto restore_opts;
+	}
+
 	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
 		(test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
 
diff -ruNp linux-3.13.11/fs/ext3/xattr.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ext3/xattr.c
--- linux-3.13.11/fs/ext3/xattr.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ext3/xattr.c	2014-07-09 12:00:15.000000000
+0200
@@ -330,7 +330,7 @@ static int
 ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
 			char *buffer, size_t buffer_size)
 {
-	size_t rest = buffer_size;
+	size_t rest = buffer_size, total_size = 0;
 
 	for (; !IS_LAST_ENTRY(entry); entry = EXT3_XATTR_NEXT(entry)) {
 		const struct xattr_handler *handler =
@@ -347,9 +347,10 @@ ext3_xattr_list_entries(struct dentry *d
 				buffer += size;
 			}
 			rest -= size;
+			total_size += size;
 		}
 	}
-	return buffer_size - rest;
+	return total_size;
 }
 
 static int
diff -ruNp linux-3.13.11/fs/ext4/balloc.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ext4/balloc.c
--- linux-3.13.11/fs/ext4/balloc.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ext4/balloc.c	2014-07-09 12:00:15.000000000
+0200
@@ -534,8 +534,8 @@ static int ext4_has_free_clusters(struct
 	/* Hm, nope.  Are (enough) root reserved clusters available? */
 	if (uid_eq(sbi->s_resuid, current_fsuid()) ||
 	    (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
-	    capable(CAP_SYS_RESOURCE) ||
-	    (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
+	    (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
+	    capable_nolog(CAP_SYS_RESOURCE)) {
 
 		if (free_clusters >= (nclusters + dirty_clusters +
 				      resv_clusters))
diff -ruNp linux-3.13.11/fs/ext4/ext4.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ext4/ext4.h
--- linux-3.13.11/fs/ext4/ext4.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ext4/ext4.h	2014-07-09 12:00:15.000000000
+0200
@@ -385,7 +385,10 @@ struct flex_groups {
 #define EXT4_EXTENTS_FL			0x00080000 /* Inode uses extents */
 #define EXT4_EA_INODE_FL	        0x00200000 /* Inode used for large EA */
 #define EXT4_EOFBLOCKS_FL		0x00400000 /* Blocks allocated beyond EOF */
+#define EXT4_BARRIER_FL			0x04000000 /* Barrier for chroot() */
+#define EXT4_IXUNLINK_FL		0x08000000 /* Immutable invert on unlink */
 #define EXT4_INLINE_DATA_FL		0x10000000 /* Inode has inline data. */
+#define EXT4_COW_FL			0x20000000 /* Copy on Write marker */
 #define EXT4_RESERVED_FL		0x80000000 /* reserved for ext4 lib */
 
 #define EXT4_FL_USER_VISIBLE		0x004BDFFF /* User visible flags */
@@ -670,7 +673,7 @@ struct ext4_inode {
 			__le16	l_i_uid_high;	/* these 2 fields */
 			__le16	l_i_gid_high;	/* were reserved2[0] */
 			__le16	l_i_checksum_lo;/* crc32c(uuid+inum+inode) LE */
-			__le16	l_i_reserved;
+			__le16	l_i_tag;	/* Context Tag */
 		} linux2;
 		struct {
 			__le16	h_i_reserved1;	/* Obsoleted fragment number/size which are removed in ext4
*/
@@ -790,6 +793,7 @@ do {									       \
 #define i_gid_low	i_gid
 #define i_uid_high	osd2.linux2.l_i_uid_high
 #define i_gid_high	osd2.linux2.l_i_gid_high
+#define i_raw_tag	osd2.linux2.l_i_tag
 #define i_checksum_lo	osd2.linux2.l_i_checksum_lo
 
 #elif defined(__GNU__)
@@ -976,6 +980,7 @@ struct ext4_inode_info {
 #define EXT4_MOUNT_POSIX_ACL		0x08000	/* POSIX Access Control Lists */
 #define EXT4_MOUNT_NO_AUTO_DA_ALLOC	0x10000	/* No auto delalloc mapping */
 #define EXT4_MOUNT_BARRIER		0x20000 /* Use block barriers */
+#define EXT4_MOUNT_TAGGED		0x40000 /* Enable Context Tags */
 #define EXT4_MOUNT_QUOTA		0x80000 /* Some quota option set */
 #define EXT4_MOUNT_USRQUOTA		0x100000 /* "old" user quota */
 #define EXT4_MOUNT_GRPQUOTA		0x200000 /* "old" group quota */
@@ -1269,19 +1274,19 @@ struct ext4_sb_info {
 	unsigned long s_mb_last_start;
 
 	/* stats for buddy allocator */
-	atomic_t s_bal_reqs;	/* number of reqs with len > 1 */
-	atomic_t s_bal_success;	/* we found long enough chunks */
-	atomic_t s_bal_allocated;	/* in blocks */
-	atomic_t s_bal_ex_scanned;	/* total extents scanned */
-	atomic_t s_bal_goals;	/* goal hits */
-	atomic_t s_bal_breaks;	/* too long searches */
-	atomic_t s_bal_2orders;	/* 2^order hits */
+	atomic_unchecked_t s_bal_reqs;	/* number of reqs with len > 1 */
+	atomic_unchecked_t s_bal_success;	/* we found long enough chunks */
+	atomic_unchecked_t s_bal_allocated;	/* in blocks */
+	atomic_unchecked_t s_bal_ex_scanned;	/* total extents scanned */
+	atomic_unchecked_t s_bal_goals;	/* goal hits */
+	atomic_unchecked_t s_bal_breaks;	/* too long searches */
+	atomic_unchecked_t s_bal_2orders;	/* 2^order hits */
 	spinlock_t s_bal_lock;
 	unsigned long s_mb_buddies_generated;
 	unsigned long long s_mb_generation_time;
-	atomic_t s_mb_lost_chunks;
-	atomic_t s_mb_preallocated;
-	atomic_t s_mb_discarded;
+	atomic_unchecked_t s_mb_lost_chunks;
+	atomic_unchecked_t s_mb_preallocated;
+	atomic_unchecked_t s_mb_discarded;
 	atomic_t s_lock_busy;
 
 	/* locality groups */
@@ -2653,6 +2658,7 @@ extern struct buffer_head *ext4_get_firs
 extern int ext4_inline_data_fiemap(struct inode *inode,
 				   struct fiemap_extent_info *fieinfo,
 				   int *has_inline);
+extern int ext4_sync_flags(struct inode *, int, int);
 extern int ext4_try_to_evict_inline_data(handle_t *handle,
 					 struct inode *inode,
 					 int needed);
diff -ruNp linux-3.13.11/fs/ext4/file.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ext4/file.c
--- linux-3.13.11/fs/ext4/file.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ext4/file.c	2014-07-09 12:00:15.000000000
+0200
@@ -618,5 +618,6 @@ const struct inode_operations ext4_file_
 	.removexattr	= generic_removexattr,
 	.get_acl	= ext4_get_acl,
 	.fiemap		= ext4_fiemap,
+	.sync_flags	= ext4_sync_flags,
 };
 
diff -ruNp linux-3.13.11/fs/ext4/ialloc.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ext4/ialloc.c
--- linux-3.13.11/fs/ext4/ialloc.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ext4/ialloc.c	2014-07-09 12:00:15.000000000
+0200
@@ -22,6 +22,7 @@
 #include <linux/random.h>
 #include <linux/bitops.h>
 #include <linux/blkdev.h>
+#include <linux/vs_tag.h>
 #include <asm/byteorder.h>
 
 #include "ext4.h"
@@ -731,6 +732,7 @@ struct inode *__ext4_new_inode(handle_t
 		inode->i_mode = mode;
 		inode->i_uid = current_fsuid();
 		inode->i_gid = dir->i_gid;
+		i_tag_write(inode, dx_current_fstag(sb));
 	} else
 		inode_init_owner(inode, dir, mode);
 	dquot_initialize(inode);
diff -ruNp linux-3.13.11/fs/ext4/inode.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ext4/inode.c
--- linux-3.13.11/fs/ext4/inode.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ext4/inode.c	2014-07-09 12:00:15.000000000
+0200
@@ -39,6 +39,7 @@
 #include <linux/ratelimit.h>
 #include <linux/aio.h>
 #include <linux/bitops.h>
+#include <linux/vs_tag.h>
 
 #include "ext4_jbd2.h"
 #include "xattr.h"
@@ -3929,42 +3930,67 @@ void ext4_set_inode_flags(struct inode *
 	unsigned int flags = EXT4_I(inode)->i_flags;
 	unsigned int new_fl = 0;
 
+	if (flags & EXT4_IMMUTABLE_FL)
+		new_fl |= S_IMMUTABLE;
+	if (flags & EXT4_IXUNLINK_FL)
+		new_fl |= S_IXUNLINK;
+
 	if (flags & EXT4_SYNC_FL)
 		new_fl |= S_SYNC;
 	if (flags & EXT4_APPEND_FL)
 		new_fl |= S_APPEND;
-	if (flags & EXT4_IMMUTABLE_FL)
-		new_fl |= S_IMMUTABLE;
 	if (flags & EXT4_NOATIME_FL)
 		new_fl |= S_NOATIME;
 	if (flags & EXT4_DIRSYNC_FL)
 		new_fl |= S_DIRSYNC;
+
 	set_mask_bits(&inode->i_flags,
-		      S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC, new_fl);
+		S_IXUNLINK | S_IMMUTABLE |
+		S_SYNC | S_APPEND | S_NOATIME | S_DIRSYNC, new_fl);
+
+	new_fl = 0;
+	if (flags & EXT4_BARRIER_FL)
+		new_fl |= V_BARRIER;
+	if (flags & EXT4_COW_FL)
+		new_fl |= V_COW;
+
+	set_mask_bits(&inode->i_vflags,
+		V_BARRIER | V_COW, new_fl);
 }
 
 /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */
 void ext4_get_inode_flags(struct ext4_inode_info *ei)
 {
-	unsigned int vfs_fl;
+	unsigned int vfs_fl, vfs_vf;
 	unsigned long old_fl, new_fl;
 
 	do {
 		vfs_fl = ei->vfs_inode.i_flags;
+		vfs_vf = ei->vfs_inode.i_vflags;
 		old_fl = ei->i_flags;
 		new_fl = old_fl & ~(EXT4_SYNC_FL|EXT4_APPEND_FL|
 				EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|
-				EXT4_DIRSYNC_FL);
+				EXT4_DIRSYNC_FL|EXT4_BARRIER_FL|
+				EXT4_COW_FL);
+
+		if (vfs_fl & S_IMMUTABLE)
+			new_fl |= EXT4_IMMUTABLE_FL;
+		if (vfs_fl & S_IXUNLINK)
+			new_fl |= EXT4_IXUNLINK_FL;
+
 		if (vfs_fl & S_SYNC)
 			new_fl |= EXT4_SYNC_FL;
 		if (vfs_fl & S_APPEND)
 			new_fl |= EXT4_APPEND_FL;
-		if (vfs_fl & S_IMMUTABLE)
-			new_fl |= EXT4_IMMUTABLE_FL;
 		if (vfs_fl & S_NOATIME)
 			new_fl |= EXT4_NOATIME_FL;
 		if (vfs_fl & S_DIRSYNC)
 			new_fl |= EXT4_DIRSYNC_FL;
+
+		if (vfs_vf & V_BARRIER)
+			new_fl |= EXT4_BARRIER_FL;
+		if (vfs_vf & V_COW)
+			new_fl |= EXT4_COW_FL;
 	} while (cmpxchg(&ei->i_flags, old_fl, new_fl) != old_fl);
 }
 
@@ -4069,8 +4095,10 @@ struct inode *ext4_iget(struct super_blo
 		i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
 		i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
 	}
-	i_uid_write(inode, i_uid);
-	i_gid_write(inode, i_gid);
+	i_uid_write(inode, INOTAG_UID(DX_TAG(inode), i_uid, i_gid));
+	i_gid_write(inode, INOTAG_GID(DX_TAG(inode), i_uid, i_gid));
+	i_tag_write(inode, INOTAG_TAG(DX_TAG(inode), i_uid, i_gid,
+		le16_to_cpu(raw_inode->i_raw_tag)));
 	set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
 
 	ext4_clear_state_flags(ei);	/* Only relevant on 32-bit archs */
@@ -4298,8 +4326,10 @@ static int ext4_do_update_inode(handle_t
 
 	ext4_get_inode_flags(ei);
 	raw_inode->i_mode = cpu_to_le16(inode->i_mode);
-	i_uid = i_uid_read(inode);
-	i_gid = i_gid_read(inode);
+	i_uid = from_kuid(&init_user_ns,
+		TAGINO_KUID(DX_TAG(inode), inode->i_uid, inode->i_tag));
+	i_gid = from_kgid(&init_user_ns,
+		TAGINO_KGID(DX_TAG(inode), inode->i_gid, inode->i_tag));
 	if (!(test_opt(inode->i_sb, NO_UID32))) {
 		raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid));
 		raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid));
@@ -4322,6 +4352,9 @@ static int ext4_do_update_inode(handle_t
 		raw_inode->i_uid_high = 0;
 		raw_inode->i_gid_high = 0;
 	}
+#ifdef CONFIG_TAGGING_INTERN
+	raw_inode->i_raw_tag = cpu_to_le16(i_tag_read(inode));
+#endif
 	raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
 
 	EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
@@ -4553,7 +4586,8 @@ int ext4_setattr(struct dentry *dentry,
 	if (is_quota_modification(inode, attr))
 		dquot_initialize(inode);
 	if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
-	    (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
+	    (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid)) ||
+	    (ia_valid & ATTR_TAG && !tag_eq(attr->ia_tag, inode->i_tag))) {
 		handle_t *handle;
 
 		/* (user+group)*(old+new) structure, inode write (sb,
@@ -4576,6 +4610,8 @@ int ext4_setattr(struct dentry *dentry,
 			inode->i_uid = attr->ia_uid;
 		if (attr->ia_valid & ATTR_GID)
 			inode->i_gid = attr->ia_gid;
+		if ((attr->ia_valid & ATTR_TAG) && IS_TAGGED(inode))
+			inode->i_tag = attr->ia_tag;
 		error = ext4_mark_inode_dirty(handle, inode);
 		ext4_journal_stop(handle);
 	}
diff -ruNp linux-3.13.11/fs/ext4/ioctl.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ext4/ioctl.c
--- linux-3.13.11/fs/ext4/ioctl.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ext4/ioctl.c	2014-07-09 12:00:15.000000000
+0200
@@ -14,6 +14,7 @@
 #include <linux/compat.h>
 #include <linux/mount.h>
 #include <linux/file.h>
+#include <linux/vs_tag.h>
 #include <asm/uaccess.h>
 #include "ext4_jbd2.h"
 #include "ext4.h"
@@ -214,6 +215,33 @@ swap_boot_out:
 	return err;
 }
 
+int ext4_sync_flags(struct inode *inode, int flags, int vflags)
+{
+	handle_t *handle = NULL;
+	struct ext4_iloc iloc;
+	int err;
+
+	handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+
+	if (IS_SYNC(inode))
+		ext4_handle_sync(handle);
+	err = ext4_reserve_inode_write(handle, inode, &iloc);
+	if (err)
+		goto flags_err;
+
+	inode->i_flags = flags;
+	inode->i_vflags = vflags;
+	ext4_get_inode_flags(EXT4_I(inode));
+	inode->i_ctime = ext4_current_time(inode);
+
+	err = ext4_mark_iloc_dirty(handle, inode, &iloc);
+flags_err:
+	ext4_journal_stop(handle);
+	return err;
+}
+
 long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
 	struct inode *inode = file_inode(filp);
@@ -247,6 +275,11 @@ long ext4_ioctl(struct file *filp, unsig
 
 		flags = ext4_mask_flags(inode->i_mode, flags);
 
+		if (IS_BARRIER(inode)) {
+			vxwprintk_task(1, "messing with the barrier.");
+			return -EACCES;
+		}
+
 		err = -EPERM;
 		mutex_lock(&inode->i_mutex);
 		/* Is it quota file? Do not allow user to mess with it */
@@ -264,7 +297,9 @@ long ext4_ioctl(struct file *filp, unsig
 		 *
 		 * This test looks nicer. Thanks to Pauline Middelink
 		 */
-		if ((flags ^ oldflags) & (EXT4_APPEND_FL | EXT4_IMMUTABLE_FL)) {
+		if ((oldflags & EXT4_IMMUTABLE_FL) ||
+			((flags ^ oldflags) & (EXT4_APPEND_FL |
+			EXT4_IMMUTABLE_FL | EXT4_IXUNLINK_FL))) {
 			if (!capable(CAP_LINUX_IMMUTABLE))
 				goto flags_out;
 		}
diff -ruNp linux-3.13.11/fs/ext4/mballoc.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ext4/mballoc.c
--- linux-3.13.11/fs/ext4/mballoc.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ext4/mballoc.c	2014-07-09 12:00:15.000000000
+0200
@@ -1880,7 +1880,7 @@ void ext4_mb_simple_scan_group(struct ex
 		BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
 
 		if (EXT4_SB(sb)->s_mb_stats)
-			atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
+			atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
 
 		break;
 	}
@@ -2189,7 +2189,7 @@ repeat:
 			ac->ac_status = AC_STATUS_CONTINUE;
 			ac->ac_flags |= EXT4_MB_HINT_FIRST;
 			cr = 3;
-			atomic_inc(&sbi->s_mb_lost_chunks);
+			atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
 			goto repeat;
 		}
 	}
@@ -2697,25 +2697,25 @@ int ext4_mb_release(struct super_block *
 	if (sbi->s_mb_stats) {
 		ext4_msg(sb, KERN_INFO,
 		       "mballoc: %u blocks %u reqs (%u success)",
-				atomic_read(&sbi->s_bal_allocated),
-				atomic_read(&sbi->s_bal_reqs),
-				atomic_read(&sbi->s_bal_success));
+				atomic_read_unchecked(&sbi->s_bal_allocated),
+				atomic_read_unchecked(&sbi->s_bal_reqs),
+				atomic_read_unchecked(&sbi->s_bal_success));
 		ext4_msg(sb, KERN_INFO,
 		      "mballoc: %u extents scanned, %u goal hits, "
 				"%u 2^N hits, %u breaks, %u lost",
-				atomic_read(&sbi->s_bal_ex_scanned),
-				atomic_read(&sbi->s_bal_goals),
-				atomic_read(&sbi->s_bal_2orders),
-				atomic_read(&sbi->s_bal_breaks),
-				atomic_read(&sbi->s_mb_lost_chunks));
+				atomic_read_unchecked(&sbi->s_bal_ex_scanned),
+				atomic_read_unchecked(&sbi->s_bal_goals),
+				atomic_read_unchecked(&sbi->s_bal_2orders),
+				atomic_read_unchecked(&sbi->s_bal_breaks),
+				atomic_read_unchecked(&sbi->s_mb_lost_chunks));
 		ext4_msg(sb, KERN_INFO,
 		       "mballoc: %lu generated and it took %Lu",
 				sbi->s_mb_buddies_generated,
 				sbi->s_mb_generation_time);
 		ext4_msg(sb, KERN_INFO,
 		       "mballoc: %u preallocated, %u discarded",
-				atomic_read(&sbi->s_mb_preallocated),
-				atomic_read(&sbi->s_mb_discarded));
+				atomic_read_unchecked(&sbi->s_mb_preallocated),
+				atomic_read_unchecked(&sbi->s_mb_discarded));
 	}
 
 	free_percpu(sbi->s_locality_groups);
@@ -3169,16 +3169,16 @@ static void ext4_mb_collect_stats(struct
 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
 
 	if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
-		atomic_inc(&sbi->s_bal_reqs);
-		atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
+		atomic_inc_unchecked(&sbi->s_bal_reqs);
+		atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
 		if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
-			atomic_inc(&sbi->s_bal_success);
-		atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
+			atomic_inc_unchecked(&sbi->s_bal_success);
+		atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
 		if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
 				ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
-			atomic_inc(&sbi->s_bal_goals);
+			atomic_inc_unchecked(&sbi->s_bal_goals);
 		if (ac->ac_found > sbi->s_mb_max_to_scan)
-			atomic_inc(&sbi->s_bal_breaks);
+			atomic_inc_unchecked(&sbi->s_bal_breaks);
 	}
 
 	if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
@@ -3583,7 +3583,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
 	trace_ext4_mb_new_inode_pa(ac, pa);
 
 	ext4_mb_use_inode_pa(ac, pa);
-	atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
+	atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
 
 	ei = EXT4_I(ac->ac_inode);
 	grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
@@ -3643,7 +3643,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
 	trace_ext4_mb_new_group_pa(ac, pa);
 
 	ext4_mb_use_group_pa(ac, pa);
-	atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
+	atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
 
 	grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
 	lg = ac->ac_lg;
@@ -3732,7 +3732,7 @@ ext4_mb_release_inode_pa(struct ext4_bud
 		 * from the bitmap and continue.
 		 */
 	}
-	atomic_add(free, &sbi->s_mb_discarded);
+	atomic_add_unchecked(free, &sbi->s_mb_discarded);
 
 	return err;
 }
@@ -3750,7 +3750,7 @@ ext4_mb_release_group_pa(struct ext4_bud
 	ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
 	BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
 	mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
-	atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
+	atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
 	trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
 
 	return 0;
diff -ruNp linux-3.13.11/fs/ext4/mmp.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ext4/mmp.c
--- linux-3.13.11/fs/ext4/mmp.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ext4/mmp.c	2014-07-09 12:00:15.000000000
+0200
@@ -113,7 +113,7 @@ static int read_mmp_block(struct super_b
 void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
 		    const char *function, unsigned int line, const char *msg)
 {
-	__ext4_warning(sb, function, line, msg);
+	__ext4_warning(sb, function, line, "%s", msg);
 	__ext4_warning(sb, function, line,
 		       "MMP failure info: last update time: %llu, last update "
 		       "node: %s, last update device: %s\n",
diff -ruNp linux-3.13.11/fs/ext4/namei.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ext4/namei.c
--- linux-3.13.11/fs/ext4/namei.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ext4/namei.c	2014-07-09 12:00:15.000000000
+0200
@@ -34,6 +34,7 @@
 #include <linux/quotaops.h>
 #include <linux/buffer_head.h>
 #include <linux/bio.h>
+#include <linux/vs_tag.h>
 #include "ext4.h"
 #include "ext4_jbd2.h"
 
@@ -1299,6 +1300,7 @@ restart:
 					ll_rw_block(READ | REQ_META | REQ_PRIO,
 						    1, &bh);
 			}
+		dx_propagate_tag(nd, inode);
 		}
 		if ((bh = bh_use[ra_ptr++]) == NULL)
 			goto next;
@@ -3226,6 +3228,7 @@ const struct inode_operations ext4_dir_i
 	.removexattr	= generic_removexattr,
 	.get_acl	= ext4_get_acl,
 	.fiemap         = ext4_fiemap,
+	.sync_flags	= ext4_sync_flags,
 };
 
 const struct inode_operations ext4_special_inode_operations = {
diff -ruNp linux-3.13.11/fs/ext4/super.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ext4/super.c
--- linux-3.13.11/fs/ext4/super.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ext4/super.c	2014-07-09 12:00:15.000000000
+0200
@@ -1162,7 +1162,7 @@ enum {
 	Opt_inode_readahead_blks, Opt_journal_ioprio,
 	Opt_dioread_nolock, Opt_dioread_lock,
 	Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable,
-	Opt_max_dir_size_kb,
+	Opt_max_dir_size_kb, Opt_tag, Opt_notag, Opt_tagid
 };
 
 static const match_table_t tokens = {
@@ -1243,6 +1243,9 @@ static const match_table_t tokens = {
 	{Opt_removed, "reservation"},	/* mount option from ext2/3 */
 	{Opt_removed, "noreservation"}, /* mount option from ext2/3 */
 	{Opt_removed, "journal=%u"},	/* mount option from ext2/3 */
+	{Opt_tag, "tag"},
+	{Opt_notag, "notag"},
+	{Opt_tagid, "tagid=%u"},
 	{Opt_err, NULL},
 };
 
@@ -1270,7 +1273,7 @@ static ext4_fsblk_t get_sb_block(void **
 }
 
 #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
-static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
+static const char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
 	"Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
 
 #ifdef CONFIG_QUOTA
@@ -1475,6 +1478,20 @@ static int handle_mount_opt(struct super
 	case Opt_i_version:
 		sb->s_flags |= MS_I_VERSION;
 		return 1;
+#ifndef CONFIG_TAGGING_NONE
+	case Opt_tag:
+		set_opt(sb, TAGGED);
+		return 1;
+	case Opt_notag:
+		clear_opt(sb, TAGGED);
+		return 1;
+#endif
+#ifdef CONFIG_PROPAGATE
+	case Opt_tagid:
+		/* use args[0] */
+		set_opt(sb, TAGGED);
+		return 1;
+#endif
 	}
 
 	for (m = ext4_mount_opts; m->token != Opt_err; m++)
@@ -2450,7 +2467,7 @@ struct ext4_attr {
 		int offset;
 		int deprecated_val;
 	} u;
-};
+} __do_const;
 
 static int parse_strtoull(const char *buf,
 		unsigned long long max, unsigned long long *value)
@@ -3564,6 +3581,9 @@ static int ext4_fill_super(struct super_
 			clear_opt(sb, DELALLOC);
 	}
 
+	if (EXT4_SB(sb)->s_mount_opt & EXT4_MOUNT_TAGGED)
+		sb->s_flags |= MS_TAGGED;
+
 	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
 		(test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
 
@@ -4818,6 +4838,14 @@ static int ext4_remount(struct super_blo
 	if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED)
 		ext4_abort(sb, "Abort forced by user");
 
+	if ((sbi->s_mount_opt & EXT4_MOUNT_TAGGED) &&
+		!(sb->s_flags & MS_TAGGED)) {
+		printk("EXT4-fs: %s: tagging not permitted on remount.\n",
+			sb->s_id);
+		err = -EINVAL;
+		goto restore_opts;
+	}
+
 	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
 		(test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
 
diff -ruNp linux-3.13.11/fs/ext4/xattr.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ext4/xattr.c
--- linux-3.13.11/fs/ext4/xattr.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ext4/xattr.c	2014-07-09 12:00:15.000000000
+0200
@@ -381,7 +381,7 @@ static int
 ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
 			char *buffer, size_t buffer_size)
 {
-	size_t rest = buffer_size;
+	size_t rest = buffer_size, total_size = 0;
 
 	for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
 		const struct xattr_handler *handler =
@@ -398,9 +398,10 @@ ext4_xattr_list_entries(struct dentry *d
 				buffer += size;
 			}
 			rest -= size;
+			total_size += size;
 		}
 	}
-	return buffer_size - rest;
+	return total_size;
 }
 
 static int
diff -ruNp linux-3.13.11/fs/fcntl.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/fcntl.c
--- linux-3.13.11/fs/fcntl.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/fcntl.c	2014-07-09 12:00:15.000000000
+0200
@@ -21,6 +21,7 @@
 #include <linux/rcupdate.h>
 #include <linux/pid_namespace.h>
 #include <linux/user_namespace.h>
+#include <linux/vs_limit.h>
 
 #include <asm/poll.h>
 #include <asm/siginfo.h>
@@ -106,6 +107,11 @@ int __f_setown(struct file *filp, struct
 	if (err)
 		return err;
 
+	if (gr_handle_chroot_fowner(pid, type))
+		return -ENOENT;
+	if (gr_check_protected_task_fowner(pid, type))
+		return -EACCES;
+
 	f_modown(filp, pid, type, force);
 	return 0;
 }
@@ -377,6 +383,8 @@ SYSCALL_DEFINE3(fcntl64, unsigned int, f
 
 	if (!f.file)
 		goto out;
+	if (!vx_files_avail(1))
+		goto out;
 
 	if (unlikely(f.file->f_mode & FMODE_PATH)) {
 		if (!check_fcntl_cmd(cmd))
diff -ruNp linux-3.13.11/fs/fhandle.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/fhandle.c
--- linux-3.13.11/fs/fhandle.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/fhandle.c	2014-07-09 12:00:15.000000000
+0200
@@ -67,8 +67,7 @@ static long do_sys_name_to_handle(struct
 	} else
 		retval = 0;
 	/* copy the mount id */
-	if (copy_to_user(mnt_id, &real_mount(path->mnt)->mnt_id,
-			 sizeof(*mnt_id)) ||
+	if (put_user(real_mount(path->mnt)->mnt_id, mnt_id) ||
 	    copy_to_user(ufh, handle,
 			 sizeof(struct file_handle) + handle_bytes))
 		retval = -EFAULT;
diff -ruNp linux-3.13.11/fs/file.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/file.c
--- linux-3.13.11/fs/file.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/file.c	2014-07-09 12:00:15.000000000
+0200
@@ -16,12 +16,14 @@
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
 #include <linux/file.h>
+#include <linux/security.h>
 #include <linux/fdtable.h>
 #include <linux/bitops.h>
 #include <linux/interrupt.h>
 #include <linux/spinlock.h>
 #include <linux/rcupdate.h>
 #include <linux/workqueue.h>
+#include <linux/vs_limit.h>
 
 int sysctl_nr_open __read_mostly = 1024*1024;
 int sysctl_nr_open_min = BITS_PER_LONG;
@@ -141,7 +143,7 @@ out:
  * Return <0 error code on error; 1 on successful completion.
  * The files->file_lock should be held on entry, and will be held on exit.
  */
-static int expand_fdtable(struct files_struct *files, int nr)
+static int expand_fdtable(struct files_struct *files, unsigned int nr)
 	__releases(files->file_lock)
 	__acquires(files->file_lock)
 {
@@ -186,7 +188,7 @@ static int expand_fdtable(struct files_s
  * expanded and execution may have blocked.
  * The files->file_lock should be held on entry, and will be held on exit.
  */
-static int expand_files(struct files_struct *files, int nr)
+static int expand_files(struct files_struct *files, unsigned int nr)
 {
 	struct fdtable *fdt;
 
@@ -311,6 +313,8 @@ struct files_struct *dup_fd(struct files
 		struct file *f = *old_fds++;
 		if (f) {
 			get_file(f);
+			/* TODO: sum it first for check and performance */
+			vx_openfd_inc(open_files - i);
 		} else {
 			/*
 			 * The fd may be claimed in the fd bitmap but not yet
@@ -376,9 +380,11 @@ static void close_files(struct files_str
 					filp_close(file, files);
 					cond_resched();
 				}
+				vx_openfd_dec(i);
 			}
 			i++;
 			set >>= 1;
+			cond_resched();
 		}
 	}
 }
@@ -503,6 +509,7 @@ repeat:
 	else
 		__clear_close_on_exec(fd, fdt);
 	error = fd;
+	vx_openfd_inc(fd);
 #if 1
 	/* Sanity check */
 	if (rcu_dereference_raw(fdt->fd[fd]) != NULL) {
@@ -533,6 +540,7 @@ static void __put_unused_fd(struct files
 	__clear_open_fd(fd, fdt);
 	if (fd < files->next_fd)
 		files->next_fd = fd;
+	vx_openfd_dec(fd);
 }
 
 void put_unused_fd(unsigned int fd)
@@ -812,6 +820,8 @@ static int do_dup2(struct files_struct *
 
 	if (tofree)
 		filp_close(tofree, files);
+	else
+		vx_openfd_inc(fd);	/* fd was unused */
 
 	return fd;
 
@@ -828,6 +838,7 @@ int replace_fd(unsigned fd, struct file
 	if (!file)
 		return __close_fd(files, fd);
 
+	gr_learn_resource(current, RLIMIT_NOFILE, fd, 0);
 	if (fd >= rlimit(RLIMIT_NOFILE))
 		return -EBADF;
 
@@ -854,6 +865,7 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldf
 	if (unlikely(oldfd == newfd))
 		return -EINVAL;
 
+	gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
 	if (newfd >= rlimit(RLIMIT_NOFILE))
 		return -EBADF;
 
@@ -909,6 +921,7 @@ SYSCALL_DEFINE1(dup, unsigned int, filde
 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
 {
 	int err;
+	gr_learn_resource(current, RLIMIT_NOFILE, from, 0);
 	if (from >= rlimit(RLIMIT_NOFILE))
 		return -EINVAL;
 	err = alloc_fd(from, flags);
diff -ruNp linux-3.13.11/fs/file_table.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/file_table.c
--- linux-3.13.11/fs/file_table.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/file_table.c	2014-07-09 12:00:15.000000000
+0200
@@ -26,6 +26,8 @@
 #include <linux/hardirq.h>
 #include <linux/task_work.h>
 #include <linux/ima.h>
+#include <linux/vs_limit.h>
+#include <linux/vs_context.h>
 
 #include <linux/atomic.h>
 
@@ -137,6 +139,8 @@ struct file *get_empty_filp(void)
 	spin_lock_init(&f->f_lock);
 	eventpoll_init_file(f);
 	/* f->f_version: 0 */
+	f->f_xid = vx_current_xid();
+	vx_files_inc(f);
 	return f;
 
 over:
@@ -254,6 +258,8 @@ static void __fput(struct file *file)
 		i_readcount_dec(inode);
 	if (file->f_mode & FMODE_WRITE)
 		drop_file_write_access(file);
+	vx_files_dec(file);
+	file->f_xid = 0;
 	file->f_path.dentry = NULL;
 	file->f_path.mnt = NULL;
 	file->f_inode = NULL;
@@ -340,6 +346,8 @@ void put_filp(struct file *file)
 {
 	if (atomic_long_dec_and_test(&file->f_count)) {
 		security_file_free(file);
+		vx_files_dec(file);
+		file->f_xid = 0;
 		file_free(file);
 	}
 }
diff -ruNp linux-3.13.11/fs/filesystems.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/filesystems.c
--- linux-3.13.11/fs/filesystems.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/filesystems.c	2014-07-09 12:00:15.000000000
+0200
@@ -273,7 +273,11 @@ struct file_system_type *get_fs_type(con
 	int len = dot ? dot - name : strlen(name);
 
 	fs = __get_fs_type(name, len);
+#ifdef CONFIG_GRKERNSEC_MODHARDEN
+	if (!fs && (___request_module(true, "grsec_modharden_fs", "fs-%.*s", len, name) ==
0))
+#else
 	if (!fs && (request_module("fs-%.*s", len, name) == 0))
+#endif
 		fs = __get_fs_type(name, len);
 
 	if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
diff -ruNp linux-3.13.11/fs/fs_struct.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/fs_struct.c
--- linux-3.13.11/fs/fs_struct.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/fs_struct.c	2014-07-09 12:00:15.000000000
+0200
@@ -4,6 +4,8 @@
 #include <linux/path.h>
 #include <linux/slab.h>
 #include <linux/fs_struct.h>
+#include <linux/vserver/global.h>
+#include <linux/grsecurity.h>
 #include "internal.h"
 
 /*
@@ -19,6 +21,7 @@ void set_fs_root(struct fs_struct *fs, c
 	write_seqcount_begin(&fs->seq);
 	old_root = fs->root;
 	fs->root = *path;
+	gr_set_chroot_entries(current, path);
 	write_seqcount_end(&fs->seq);
 	spin_unlock(&fs->lock);
 	if (old_root.dentry)
@@ -67,6 +70,10 @@ void chroot_fs_refs(const struct path *o
 			int hits = 0;
 			spin_lock(&fs->lock);
 			write_seqcount_begin(&fs->seq);
+			/* this root replacement is only done by pivot_root,
+			   leave grsec's chroot tagging alone for this task
+			   so that a pivoted root isn't treated as a chroot
+			*/
 			hits += replace_path(&fs->root, old_root, new_root);
 			hits += replace_path(&fs->pwd, old_root, new_root);
 			write_seqcount_end(&fs->seq);
@@ -87,6 +94,7 @@ void free_fs_struct(struct fs_struct *fs
 {
 	path_put(&fs->root);
 	path_put(&fs->pwd);
+	atomic_dec(&vs_global_fs);
 	kmem_cache_free(fs_cachep, fs);
 }
 
@@ -99,7 +107,8 @@ void exit_fs(struct task_struct *tsk)
 		task_lock(tsk);
 		spin_lock(&fs->lock);
 		tsk->fs = NULL;
-		kill = !--fs->users;
+		gr_clear_chroot_entries(tsk);
+		kill = !atomic_dec_return(&fs->users);
 		spin_unlock(&fs->lock);
 		task_unlock(tsk);
 		if (kill)
@@ -112,7 +121,7 @@ struct fs_struct *copy_fs_struct(struct
 	struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
 	/* We don't need to lock fs - think why ;-) */
 	if (fs) {
-		fs->users = 1;
+		atomic_set(&fs->users, 1);
 		fs->in_exec = 0;
 		spin_lock_init(&fs->lock);
 		seqcount_init(&fs->seq);
@@ -121,9 +130,13 @@ struct fs_struct *copy_fs_struct(struct
 		spin_lock(&old->lock);
 		fs->root = old->root;
 		path_get(&fs->root);
+		/* instead of calling gr_set_chroot_entries here,
+		   we call it from every caller of this function
+		*/
 		fs->pwd = old->pwd;
 		path_get(&fs->pwd);
 		spin_unlock(&old->lock);
+		atomic_inc(&vs_global_fs);
 	}
 	return fs;
 }
@@ -139,8 +152,9 @@ int unshare_fs_struct(void)
 
 	task_lock(current);
 	spin_lock(&fs->lock);
-	kill = !--fs->users;
+	kill = !atomic_dec_return(&fs->users);
 	current->fs = new_fs;
+	gr_set_chroot_entries(current, &new_fs->root);
 	spin_unlock(&fs->lock);
 	task_unlock(current);
 
@@ -153,13 +167,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
 
 int current_umask(void)
 {
-	return current->fs->umask;
+	return current->fs->umask | gr_acl_umask();
 }
 EXPORT_SYMBOL(current_umask);
 
 /* to be mentioned only in INIT_TASK */
 struct fs_struct init_fs = {
-	.users		= 1,
+	.users		= ATOMIC_INIT(1),
 	.lock		= __SPIN_LOCK_UNLOCKED(init_fs.lock),
 	.seq		= SEQCNT_ZERO(init_fs.seq),
 	.umask		= 0022,
diff -ruNp linux-3.13.11/fs/fscache/cookie.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/fscache/cookie.c
--- linux-3.13.11/fs/fscache/cookie.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/fscache/cookie.c	2014-07-09 12:00:15.000000000
+0200
@@ -19,7 +19,7 @@
 
 struct kmem_cache *fscache_cookie_jar;
 
-static atomic_t fscache_object_debug_id = ATOMIC_INIT(0);
+static atomic_unchecked_t fscache_object_debug_id = ATOMIC_INIT(0);
 
 static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie);
 static int fscache_alloc_object(struct fscache_cache *cache,
@@ -69,11 +69,11 @@ struct fscache_cookie *__fscache_acquire
 	       parent ? (char *) parent->def->name : "<no-parent>",
 	       def->name, netfs_data, enable);
 
-	fscache_stat(&fscache_n_acquires);
+	fscache_stat_unchecked(&fscache_n_acquires);
 
 	/* if there's no parent cookie, then we don't create one here either */
 	if (!parent) {
-		fscache_stat(&fscache_n_acquires_null);
+		fscache_stat_unchecked(&fscache_n_acquires_null);
 		_leave(" [no parent]");
 		return NULL;
 	}
@@ -88,7 +88,7 @@ struct fscache_cookie *__fscache_acquire
 	/* allocate and initialise a cookie */
 	cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
 	if (!cookie) {
-		fscache_stat(&fscache_n_acquires_oom);
+		fscache_stat_unchecked(&fscache_n_acquires_oom);
 		_leave(" [ENOMEM]");
 		return NULL;
 	}
@@ -115,13 +115,13 @@ struct fscache_cookie *__fscache_acquire
 
 	switch (cookie->def->type) {
 	case FSCACHE_COOKIE_TYPE_INDEX:
-		fscache_stat(&fscache_n_cookie_index);
+		fscache_stat_unchecked(&fscache_n_cookie_index);
 		break;
 	case FSCACHE_COOKIE_TYPE_DATAFILE:
-		fscache_stat(&fscache_n_cookie_data);
+		fscache_stat_unchecked(&fscache_n_cookie_data);
 		break;
 	default:
-		fscache_stat(&fscache_n_cookie_special);
+		fscache_stat_unchecked(&fscache_n_cookie_special);
 		break;
 	}
 
@@ -135,7 +135,7 @@ struct fscache_cookie *__fscache_acquire
 			} else {
 				atomic_dec(&parent->n_children);
 				__fscache_cookie_put(cookie);
-				fscache_stat(&fscache_n_acquires_nobufs);
+				fscache_stat_unchecked(&fscache_n_acquires_nobufs);
 				_leave(" = NULL");
 				return NULL;
 			}
@@ -144,7 +144,7 @@ struct fscache_cookie *__fscache_acquire
 		}
 	}
 
-	fscache_stat(&fscache_n_acquires_ok);
+	fscache_stat_unchecked(&fscache_n_acquires_ok);
 	_leave(" = %p", cookie);
 	return cookie;
 }
@@ -213,7 +213,7 @@ static int fscache_acquire_non_index_coo
 	cache = fscache_select_cache_for_object(cookie->parent);
 	if (!cache) {
 		up_read(&fscache_addremove_sem);
-		fscache_stat(&fscache_n_acquires_no_cache);
+		fscache_stat_unchecked(&fscache_n_acquires_no_cache);
 		_leave(" = -ENOMEDIUM [no cache]");
 		return -ENOMEDIUM;
 	}
@@ -297,14 +297,14 @@ static int fscache_alloc_object(struct f
 	object = cache->ops->alloc_object(cache, cookie);
 	fscache_stat_d(&fscache_n_cop_alloc_object);
 	if (IS_ERR(object)) {
-		fscache_stat(&fscache_n_object_no_alloc);
+		fscache_stat_unchecked(&fscache_n_object_no_alloc);
 		ret = PTR_ERR(object);
 		goto error;
 	}
 
-	fscache_stat(&fscache_n_object_alloc);
+	fscache_stat_unchecked(&fscache_n_object_alloc);
 
-	object->debug_id = atomic_inc_return(&fscache_object_debug_id);
+	object->debug_id = atomic_inc_return_unchecked(&fscache_object_debug_id);
 
 	_debug("ALLOC OBJ%x: %s {%lx}",
 	       object->debug_id, cookie->def->name, object->events);
@@ -418,7 +418,7 @@ void __fscache_invalidate(struct fscache
 
 	_enter("{%s}", cookie->def->name);
 
-	fscache_stat(&fscache_n_invalidates);
+	fscache_stat_unchecked(&fscache_n_invalidates);
 
 	/* Only permit invalidation of data files.  Invalidating an index will
 	 * require the caller to release all its attachments to the tree rooted
@@ -477,10 +477,10 @@ void __fscache_update_cookie(struct fsca
 {
 	struct fscache_object *object;
 
-	fscache_stat(&fscache_n_updates);
+	fscache_stat_unchecked(&fscache_n_updates);
 
 	if (!cookie) {
-		fscache_stat(&fscache_n_updates_null);
+		fscache_stat_unchecked(&fscache_n_updates_null);
 		_leave(" [no cookie]");
 		return;
 	}
@@ -581,12 +581,12 @@ EXPORT_SYMBOL(__fscache_disable_cookie);
  */
 void __fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
 {
-	fscache_stat(&fscache_n_relinquishes);
+	fscache_stat_unchecked(&fscache_n_relinquishes);
 	if (retire)
-		fscache_stat(&fscache_n_relinquishes_retire);
+		fscache_stat_unchecked(&fscache_n_relinquishes_retire);
 
 	if (!cookie) {
-		fscache_stat(&fscache_n_relinquishes_null);
+		fscache_stat_unchecked(&fscache_n_relinquishes_null);
 		_leave(" [no cookie]");
 		return;
 	}
@@ -687,7 +687,7 @@ int __fscache_check_consistency(struct f
 	if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
 		goto inconsistent;
 
-	op->debug_id = atomic_inc_return(&fscache_op_debug_id);
+	op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
 
 	__fscache_use_cookie(cookie);
 	if (fscache_submit_op(object, op) < 0)
diff -ruNp linux-3.13.11/fs/fscache/internal.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/fscache/internal.h
--- linux-3.13.11/fs/fscache/internal.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/fscache/internal.h	2014-07-09
12:00:15.000000000 +0200
@@ -133,8 +133,8 @@ extern void fscache_operation_gc(struct
 extern int fscache_wait_for_deferred_lookup(struct fscache_cookie *);
 extern int fscache_wait_for_operation_activation(struct fscache_object *,
 						 struct fscache_operation *,
-						 atomic_t *,
-						 atomic_t *,
+						 atomic_unchecked_t *,
+						 atomic_unchecked_t *,
 						 void (*)(struct fscache_operation *));
 extern void fscache_invalidate_writes(struct fscache_cookie *);
 
@@ -153,101 +153,101 @@ extern void fscache_proc_cleanup(void);
  * stats.c
  */
 #ifdef CONFIG_FSCACHE_STATS
-extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
-extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
+extern atomic_unchecked_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
+extern atomic_unchecked_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
 
-extern atomic_t fscache_n_op_pend;
-extern atomic_t fscache_n_op_run;
-extern atomic_t fscache_n_op_enqueue;
-extern atomic_t fscache_n_op_deferred_release;
-extern atomic_t fscache_n_op_release;
-extern atomic_t fscache_n_op_gc;
-extern atomic_t fscache_n_op_cancelled;
-extern atomic_t fscache_n_op_rejected;
-
-extern atomic_t fscache_n_attr_changed;
-extern atomic_t fscache_n_attr_changed_ok;
-extern atomic_t fscache_n_attr_changed_nobufs;
-extern atomic_t fscache_n_attr_changed_nomem;
-extern atomic_t fscache_n_attr_changed_calls;
-
-extern atomic_t fscache_n_allocs;
-extern atomic_t fscache_n_allocs_ok;
-extern atomic_t fscache_n_allocs_wait;
-extern atomic_t fscache_n_allocs_nobufs;
-extern atomic_t fscache_n_allocs_intr;
-extern atomic_t fscache_n_allocs_object_dead;
-extern atomic_t fscache_n_alloc_ops;
-extern atomic_t fscache_n_alloc_op_waits;
-
-extern atomic_t fscache_n_retrievals;
-extern atomic_t fscache_n_retrievals_ok;
-extern atomic_t fscache_n_retrievals_wait;
-extern atomic_t fscache_n_retrievals_nodata;
-extern atomic_t fscache_n_retrievals_nobufs;
-extern atomic_t fscache_n_retrievals_intr;
-extern atomic_t fscache_n_retrievals_nomem;
-extern atomic_t fscache_n_retrievals_object_dead;
-extern atomic_t fscache_n_retrieval_ops;
-extern atomic_t fscache_n_retrieval_op_waits;
-
-extern atomic_t fscache_n_stores;
-extern atomic_t fscache_n_stores_ok;
-extern atomic_t fscache_n_stores_again;
-extern atomic_t fscache_n_stores_nobufs;
-extern atomic_t fscache_n_stores_oom;
-extern atomic_t fscache_n_store_ops;
-extern atomic_t fscache_n_store_calls;
-extern atomic_t fscache_n_store_pages;
-extern atomic_t fscache_n_store_radix_deletes;
-extern atomic_t fscache_n_store_pages_over_limit;
-
-extern atomic_t fscache_n_store_vmscan_not_storing;
-extern atomic_t fscache_n_store_vmscan_gone;
-extern atomic_t fscache_n_store_vmscan_busy;
-extern atomic_t fscache_n_store_vmscan_cancelled;
-extern atomic_t fscache_n_store_vmscan_wait;
-
-extern atomic_t fscache_n_marks;
-extern atomic_t fscache_n_uncaches;
-
-extern atomic_t fscache_n_acquires;
-extern atomic_t fscache_n_acquires_null;
-extern atomic_t fscache_n_acquires_no_cache;
-extern atomic_t fscache_n_acquires_ok;
-extern atomic_t fscache_n_acquires_nobufs;
-extern atomic_t fscache_n_acquires_oom;
-
-extern atomic_t fscache_n_invalidates;
-extern atomic_t fscache_n_invalidates_run;
-
-extern atomic_t fscache_n_updates;
-extern atomic_t fscache_n_updates_null;
-extern atomic_t fscache_n_updates_run;
-
-extern atomic_t fscache_n_relinquishes;
-extern atomic_t fscache_n_relinquishes_null;
-extern atomic_t fscache_n_relinquishes_waitcrt;
-extern atomic_t fscache_n_relinquishes_retire;
-
-extern atomic_t fscache_n_cookie_index;
-extern atomic_t fscache_n_cookie_data;
-extern atomic_t fscache_n_cookie_special;
-
-extern atomic_t fscache_n_object_alloc;
-extern atomic_t fscache_n_object_no_alloc;
-extern atomic_t fscache_n_object_lookups;
-extern atomic_t fscache_n_object_lookups_negative;
-extern atomic_t fscache_n_object_lookups_positive;
-extern atomic_t fscache_n_object_lookups_timed_out;
-extern atomic_t fscache_n_object_created;
-extern atomic_t fscache_n_object_avail;
-extern atomic_t fscache_n_object_dead;
-
-extern atomic_t fscache_n_checkaux_none;
-extern atomic_t fscache_n_checkaux_okay;
-extern atomic_t fscache_n_checkaux_update;
-extern atomic_t fscache_n_checkaux_obsolete;
+extern atomic_unchecked_t fscache_n_op_pend;
+extern atomic_unchecked_t fscache_n_op_run;
+extern atomic_unchecked_t fscache_n_op_enqueue;
+extern atomic_unchecked_t fscache_n_op_deferred_release;
+extern atomic_unchecked_t fscache_n_op_release;
+extern atomic_unchecked_t fscache_n_op_gc;
+extern atomic_unchecked_t fscache_n_op_cancelled;
+extern atomic_unchecked_t fscache_n_op_rejected;
+
+extern atomic_unchecked_t fscache_n_attr_changed;
+extern atomic_unchecked_t fscache_n_attr_changed_ok;
+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
+extern atomic_unchecked_t fscache_n_attr_changed_calls;
+
+extern atomic_unchecked_t fscache_n_allocs;
+extern atomic_unchecked_t fscache_n_allocs_ok;
+extern atomic_unchecked_t fscache_n_allocs_wait;
+extern atomic_unchecked_t fscache_n_allocs_nobufs;
+extern atomic_unchecked_t fscache_n_allocs_intr;
+extern atomic_unchecked_t fscache_n_allocs_object_dead;
+extern atomic_unchecked_t fscache_n_alloc_ops;
+extern atomic_unchecked_t fscache_n_alloc_op_waits;
+
+extern atomic_unchecked_t fscache_n_retrievals;
+extern atomic_unchecked_t fscache_n_retrievals_ok;
+extern atomic_unchecked_t fscache_n_retrievals_wait;
+extern atomic_unchecked_t fscache_n_retrievals_nodata;
+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
+extern atomic_unchecked_t fscache_n_retrievals_intr;
+extern atomic_unchecked_t fscache_n_retrievals_nomem;
+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
+extern atomic_unchecked_t fscache_n_retrieval_ops;
+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
+
+extern atomic_unchecked_t fscache_n_stores;
+extern atomic_unchecked_t fscache_n_stores_ok;
+extern atomic_unchecked_t fscache_n_stores_again;
+extern atomic_unchecked_t fscache_n_stores_nobufs;
+extern atomic_unchecked_t fscache_n_stores_oom;
+extern atomic_unchecked_t fscache_n_store_ops;
+extern atomic_unchecked_t fscache_n_store_calls;
+extern atomic_unchecked_t fscache_n_store_pages;
+extern atomic_unchecked_t fscache_n_store_radix_deletes;
+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
+
+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
+extern atomic_unchecked_t fscache_n_store_vmscan_wait;
+
+extern atomic_unchecked_t fscache_n_marks;
+extern atomic_unchecked_t fscache_n_uncaches;
+
+extern atomic_unchecked_t fscache_n_acquires;
+extern atomic_unchecked_t fscache_n_acquires_null;
+extern atomic_unchecked_t fscache_n_acquires_no_cache;
+extern atomic_unchecked_t fscache_n_acquires_ok;
+extern atomic_unchecked_t fscache_n_acquires_nobufs;
+extern atomic_unchecked_t fscache_n_acquires_oom;
+
+extern atomic_unchecked_t fscache_n_invalidates;
+extern atomic_unchecked_t fscache_n_invalidates_run;
+
+extern atomic_unchecked_t fscache_n_updates;
+extern atomic_unchecked_t fscache_n_updates_null;
+extern atomic_unchecked_t fscache_n_updates_run;
+
+extern atomic_unchecked_t fscache_n_relinquishes;
+extern atomic_unchecked_t fscache_n_relinquishes_null;
+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
+extern atomic_unchecked_t fscache_n_relinquishes_retire;
+
+extern atomic_unchecked_t fscache_n_cookie_index;
+extern atomic_unchecked_t fscache_n_cookie_data;
+extern atomic_unchecked_t fscache_n_cookie_special;
+
+extern atomic_unchecked_t fscache_n_object_alloc;
+extern atomic_unchecked_t fscache_n_object_no_alloc;
+extern atomic_unchecked_t fscache_n_object_lookups;
+extern atomic_unchecked_t fscache_n_object_lookups_negative;
+extern atomic_unchecked_t fscache_n_object_lookups_positive;
+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
+extern atomic_unchecked_t fscache_n_object_created;
+extern atomic_unchecked_t fscache_n_object_avail;
+extern atomic_unchecked_t fscache_n_object_dead;
+
+extern atomic_unchecked_t fscache_n_checkaux_none;
+extern atomic_unchecked_t fscache_n_checkaux_okay;
+extern atomic_unchecked_t fscache_n_checkaux_update;
+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
 
 extern atomic_t fscache_n_cop_alloc_object;
 extern atomic_t fscache_n_cop_lookup_object;
@@ -272,6 +272,11 @@ static inline void fscache_stat(atomic_t
 	atomic_inc(stat);
 }
 
+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
+{
+	atomic_inc_unchecked(stat);
+}
+
 static inline void fscache_stat_d(atomic_t *stat)
 {
 	atomic_dec(stat);
@@ -284,6 +289,7 @@ extern const struct file_operations fsca
 
 #define __fscache_stat(stat) (NULL)
 #define fscache_stat(stat) do {} while (0)
+#define fscache_stat_unchecked(stat) do {} while (0)
 #define fscache_stat_d(stat) do {} while (0)
 #endif
 
diff -ruNp linux-3.13.11/fs/fscache/object.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/fscache/object.c
--- linux-3.13.11/fs/fscache/object.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/fscache/object.c	2014-07-09 12:00:15.000000000
+0200
@@ -451,7 +451,7 @@ static const struct fscache_state *fscac
 	_debug("LOOKUP \"%s\" in \"%s\"",
 	       cookie->def->name, object->cache->tag->name);
 
-	fscache_stat(&fscache_n_object_lookups);
+	fscache_stat_unchecked(&fscache_n_object_lookups);
 	fscache_stat(&fscache_n_cop_lookup_object);
 	ret = object->cache->ops->lookup_object(object);
 	fscache_stat_d(&fscache_n_cop_lookup_object);
@@ -461,7 +461,7 @@ static const struct fscache_state *fscac
 	if (ret == -ETIMEDOUT) {
 		/* probably stuck behind another object, so move this one to
 		 * the back of the queue */
-		fscache_stat(&fscache_n_object_lookups_timed_out);
+		fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
 		_leave(" [timeout]");
 		return NO_TRANSIT;
 	}
@@ -489,7 +489,7 @@ void fscache_object_lookup_negative(stru
 	_enter("{OBJ%x,%s}", object->debug_id, object->state->name);
 
 	if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
-		fscache_stat(&fscache_n_object_lookups_negative);
+		fscache_stat_unchecked(&fscache_n_object_lookups_negative);
 
 		/* Allow write requests to begin stacking up and read requests to begin
 		 * returning ENODATA.
@@ -524,7 +524,7 @@ void fscache_obtained_object(struct fsca
 	/* if we were still looking up, then we must have a positive lookup
 	 * result, in which case there may be data available */
 	if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
-		fscache_stat(&fscache_n_object_lookups_positive);
+		fscache_stat_unchecked(&fscache_n_object_lookups_positive);
 
 		/* We do (presumably) have data */
 		clear_bit_unlock(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
@@ -536,7 +536,7 @@ void fscache_obtained_object(struct fsca
 		clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
 		wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
 	} else {
-		fscache_stat(&fscache_n_object_created);
+		fscache_stat_unchecked(&fscache_n_object_created);
 	}
 
 	set_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags);
@@ -572,7 +572,7 @@ static const struct fscache_state *fscac
 	fscache_stat_d(&fscache_n_cop_lookup_complete);
 
 	fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
-	fscache_stat(&fscache_n_object_avail);
+	fscache_stat_unchecked(&fscache_n_object_avail);
 
 	_leave("");
 	return transit_to(JUMPSTART_DEPS);
@@ -719,7 +719,7 @@ static const struct fscache_state *fscac
 
 	/* this just shifts the object release to the work processor */
 	fscache_put_object(object);
-	fscache_stat(&fscache_n_object_dead);
+	fscache_stat_unchecked(&fscache_n_object_dead);
 
 	_leave("");
 	return transit_to(OBJECT_DEAD);
@@ -884,7 +884,7 @@ enum fscache_checkaux fscache_check_aux(
 	enum fscache_checkaux result;
 
 	if (!object->cookie->def->check_aux) {
-		fscache_stat(&fscache_n_checkaux_none);
+		fscache_stat_unchecked(&fscache_n_checkaux_none);
 		return FSCACHE_CHECKAUX_OKAY;
 	}
 
@@ -893,17 +893,17 @@ enum fscache_checkaux fscache_check_aux(
 	switch (result) {
 		/* entry okay as is */
 	case FSCACHE_CHECKAUX_OKAY:
-		fscache_stat(&fscache_n_checkaux_okay);
+		fscache_stat_unchecked(&fscache_n_checkaux_okay);
 		break;
 
 		/* entry requires update */
 	case FSCACHE_CHECKAUX_NEEDS_UPDATE:
-		fscache_stat(&fscache_n_checkaux_update);
+		fscache_stat_unchecked(&fscache_n_checkaux_update);
 		break;
 
 		/* entry requires deletion */
 	case FSCACHE_CHECKAUX_OBSOLETE:
-		fscache_stat(&fscache_n_checkaux_obsolete);
+		fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
 		break;
 
 	default:
@@ -989,7 +989,7 @@ static const struct fscache_state *fscac
 {
 	const struct fscache_state *s;
 
-	fscache_stat(&fscache_n_invalidates_run);
+	fscache_stat_unchecked(&fscache_n_invalidates_run);
 	fscache_stat(&fscache_n_cop_invalidate_object);
 	s = _fscache_invalidate_object(object, event);
 	fscache_stat_d(&fscache_n_cop_invalidate_object);
@@ -1004,7 +1004,7 @@ static const struct fscache_state *fscac
 {
 	_enter("{OBJ%x},%d", object->debug_id, event);
 
-	fscache_stat(&fscache_n_updates_run);
+	fscache_stat_unchecked(&fscache_n_updates_run);
 	fscache_stat(&fscache_n_cop_update_object);
 	object->cache->ops->update_object(object);
 	fscache_stat_d(&fscache_n_cop_update_object);
diff -ruNp linux-3.13.11/fs/fscache/operation.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/fscache/operation.c
--- linux-3.13.11/fs/fscache/operation.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/fscache/operation.c	2014-07-09
12:00:15.000000000 +0200
@@ -17,7 +17,7 @@
 #include <linux/slab.h>
 #include "internal.h"
 
-atomic_t fscache_op_debug_id;
+atomic_unchecked_t fscache_op_debug_id;
 EXPORT_SYMBOL(fscache_op_debug_id);
 
 /**
@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fs
 	ASSERTCMP(atomic_read(&op->usage), >, 0);
 	ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
 
-	fscache_stat(&fscache_n_op_enqueue);
+	fscache_stat_unchecked(&fscache_n_op_enqueue);
 	switch (op->flags & FSCACHE_OP_TYPE) {
 	case FSCACHE_OP_ASYNC:
 		_debug("queue async");
@@ -73,7 +73,7 @@ static void fscache_run_op(struct fscach
 		wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
 	if (op->processor)
 		fscache_enqueue_operation(op);
-	fscache_stat(&fscache_n_op_run);
+	fscache_stat_unchecked(&fscache_n_op_run);
 }
 
 /*
@@ -105,11 +105,11 @@ int fscache_submit_exclusive_op(struct f
 		if (object->n_in_progress > 0) {
 			atomic_inc(&op->usage);
 			list_add_tail(&op->pend_link, &object->pending_ops);
-			fscache_stat(&fscache_n_op_pend);
+			fscache_stat_unchecked(&fscache_n_op_pend);
 		} else if (!list_empty(&object->pending_ops)) {
 			atomic_inc(&op->usage);
 			list_add_tail(&op->pend_link, &object->pending_ops);
-			fscache_stat(&fscache_n_op_pend);
+			fscache_stat_unchecked(&fscache_n_op_pend);
 			fscache_start_operations(object);
 		} else {
 			ASSERTCMP(object->n_in_progress, ==, 0);
@@ -125,7 +125,7 @@ int fscache_submit_exclusive_op(struct f
 		object->n_exclusive++;	/* reads and writes must wait */
 		atomic_inc(&op->usage);
 		list_add_tail(&op->pend_link, &object->pending_ops);
-		fscache_stat(&fscache_n_op_pend);
+		fscache_stat_unchecked(&fscache_n_op_pend);
 		ret = 0;
 	} else {
 		/* If we're in any other state, there must have been an I/O
@@ -212,11 +212,11 @@ int fscache_submit_op(struct fscache_obj
 		if (object->n_exclusive > 0) {
 			atomic_inc(&op->usage);
 			list_add_tail(&op->pend_link, &object->pending_ops);
-			fscache_stat(&fscache_n_op_pend);
+			fscache_stat_unchecked(&fscache_n_op_pend);
 		} else if (!list_empty(&object->pending_ops)) {
 			atomic_inc(&op->usage);
 			list_add_tail(&op->pend_link, &object->pending_ops);
-			fscache_stat(&fscache_n_op_pend);
+			fscache_stat_unchecked(&fscache_n_op_pend);
 			fscache_start_operations(object);
 		} else {
 			ASSERTCMP(object->n_exclusive, ==, 0);
@@ -228,10 +228,10 @@ int fscache_submit_op(struct fscache_obj
 		object->n_ops++;
 		atomic_inc(&op->usage);
 		list_add_tail(&op->pend_link, &object->pending_ops);
-		fscache_stat(&fscache_n_op_pend);
+		fscache_stat_unchecked(&fscache_n_op_pend);
 		ret = 0;
 	} else if (fscache_object_is_dying(object)) {
-		fscache_stat(&fscache_n_op_rejected);
+		fscache_stat_unchecked(&fscache_n_op_rejected);
 		op->state = FSCACHE_OP_ST_CANCELLED;
 		ret = -ENOBUFS;
 	} else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
@@ -310,7 +310,7 @@ int fscache_cancel_op(struct fscache_ope
 	ret = -EBUSY;
 	if (op->state == FSCACHE_OP_ST_PENDING) {
 		ASSERT(!list_empty(&op->pend_link));
-		fscache_stat(&fscache_n_op_cancelled);
+		fscache_stat_unchecked(&fscache_n_op_cancelled);
 		list_del_init(&op->pend_link);
 		if (do_cancel)
 			do_cancel(op);
@@ -342,7 +342,7 @@ void fscache_cancel_all_ops(struct fscac
 	while (!list_empty(&object->pending_ops)) {
 		op = list_entry(object->pending_ops.next,
 				struct fscache_operation, pend_link);
-		fscache_stat(&fscache_n_op_cancelled);
+		fscache_stat_unchecked(&fscache_n_op_cancelled);
 		list_del_init(&op->pend_link);
 
 		ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
@@ -414,7 +414,7 @@ void fscache_put_operation(struct fscach
 		    op->state, ==, FSCACHE_OP_ST_CANCELLED);
 	op->state = FSCACHE_OP_ST_DEAD;
 
-	fscache_stat(&fscache_n_op_release);
+	fscache_stat_unchecked(&fscache_n_op_release);
 
 	if (op->release) {
 		op->release(op);
@@ -433,7 +433,7 @@ void fscache_put_operation(struct fscach
 	 * lock, and defer it otherwise */
 	if (!spin_trylock(&object->lock)) {
 		_debug("defer put");
-		fscache_stat(&fscache_n_op_deferred_release);
+		fscache_stat_unchecked(&fscache_n_op_deferred_release);
 
 		cache = object->cache;
 		spin_lock(&cache->op_gc_list_lock);
@@ -486,7 +486,7 @@ void fscache_operation_gc(struct work_st
 
 		_debug("GC DEFERRED REL OBJ%x OP%x",
 		       object->debug_id, op->debug_id);
-		fscache_stat(&fscache_n_op_gc);
+		fscache_stat_unchecked(&fscache_n_op_gc);
 
 		ASSERTCMP(atomic_read(&op->usage), ==, 0);
 		ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
diff -ruNp linux-3.13.11/fs/fscache/page.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/fscache/page.c
--- linux-3.13.11/fs/fscache/page.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/fscache/page.c	2014-07-09 12:00:15.000000000
+0200
@@ -61,7 +61,7 @@ try_again:
 	val = radix_tree_lookup(&cookie->stores, page->index);
 	if (!val) {
 		rcu_read_unlock();
-		fscache_stat(&fscache_n_store_vmscan_not_storing);
+		fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
 		__fscache_uncache_page(cookie, page);
 		return true;
 	}
@@ -91,11 +91,11 @@ try_again:
 	spin_unlock(&cookie->stores_lock);
 
 	if (xpage) {
-		fscache_stat(&fscache_n_store_vmscan_cancelled);
-		fscache_stat(&fscache_n_store_radix_deletes);
+		fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
+		fscache_stat_unchecked(&fscache_n_store_radix_deletes);
 		ASSERTCMP(xpage, ==, page);
 	} else {
-		fscache_stat(&fscache_n_store_vmscan_gone);
+		fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
 	}
 
 	wake_up_bit(&cookie->flags, 0);
@@ -110,11 +110,11 @@ page_busy:
 	 * sleeping on memory allocation, so we may need to impose a timeout
 	 * too. */
 	if (!(gfp & __GFP_WAIT) || !(gfp & __GFP_FS)) {
-		fscache_stat(&fscache_n_store_vmscan_busy);
+		fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
 		return false;
 	}
 
-	fscache_stat(&fscache_n_store_vmscan_wait);
+	fscache_stat_unchecked(&fscache_n_store_vmscan_wait);
 	__fscache_wait_on_page_write(cookie, page);
 	gfp &= ~__GFP_WAIT;
 	goto try_again;
@@ -140,7 +140,7 @@ static void fscache_end_page_write(struc
 				     FSCACHE_COOKIE_STORING_TAG);
 		if (!radix_tree_tag_get(&cookie->stores, page->index,
 					FSCACHE_COOKIE_PENDING_TAG)) {
-			fscache_stat(&fscache_n_store_radix_deletes);
+			fscache_stat_unchecked(&fscache_n_store_radix_deletes);
 			xpage = radix_tree_delete(&cookie->stores, page->index);
 		}
 		spin_unlock(&cookie->stores_lock);
@@ -161,7 +161,7 @@ static void fscache_attr_changed_op(stru
 
 	_enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
 
-	fscache_stat(&fscache_n_attr_changed_calls);
+	fscache_stat_unchecked(&fscache_n_attr_changed_calls);
 
 	if (fscache_object_is_active(object)) {
 		fscache_stat(&fscache_n_cop_attr_changed);
@@ -188,11 +188,11 @@ int __fscache_attr_changed(struct fscach
 
 	ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
 
-	fscache_stat(&fscache_n_attr_changed);
+	fscache_stat_unchecked(&fscache_n_attr_changed);
 
 	op = kzalloc(sizeof(*op), GFP_KERNEL);
 	if (!op) {
-		fscache_stat(&fscache_n_attr_changed_nomem);
+		fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
 		_leave(" = -ENOMEM");
 		return -ENOMEM;
 	}
@@ -214,7 +214,7 @@ int __fscache_attr_changed(struct fscach
 	if (fscache_submit_exclusive_op(object, op) < 0)
 		goto nobufs;
 	spin_unlock(&cookie->lock);
-	fscache_stat(&fscache_n_attr_changed_ok);
+	fscache_stat_unchecked(&fscache_n_attr_changed_ok);
 	fscache_put_operation(op);
 	_leave(" = 0");
 	return 0;
@@ -225,7 +225,7 @@ nobufs:
 	kfree(op);
 	if (wake_cookie)
 		__fscache_wake_unused_cookie(cookie);
-	fscache_stat(&fscache_n_attr_changed_nobufs);
+	fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
 	_leave(" = %d", -ENOBUFS);
 	return -ENOBUFS;
 }
@@ -264,7 +264,7 @@ static struct fscache_retrieval *fscache
 	/* allocate a retrieval operation and attempt to submit it */
 	op = kzalloc(sizeof(*op), GFP_NOIO);
 	if (!op) {
-		fscache_stat(&fscache_n_retrievals_nomem);
+		fscache_stat_unchecked(&fscache_n_retrievals_nomem);
 		return NULL;
 	}
 
@@ -294,13 +294,13 @@ int fscache_wait_for_deferred_lookup(str
 		return 0;
 	}
 
-	fscache_stat(&fscache_n_retrievals_wait);
+	fscache_stat_unchecked(&fscache_n_retrievals_wait);
 
 	jif = jiffies;
 	if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
 			fscache_wait_bit_interruptible,
 			TASK_INTERRUPTIBLE) != 0) {
-		fscache_stat(&fscache_n_retrievals_intr);
+		fscache_stat_unchecked(&fscache_n_retrievals_intr);
 		_leave(" = -ERESTARTSYS");
 		return -ERESTARTSYS;
 	}
@@ -329,8 +329,8 @@ static void fscache_do_cancel_retrieval(
  */
 int fscache_wait_for_operation_activation(struct fscache_object *object,
 					  struct fscache_operation *op,
-					  atomic_t *stat_op_waits,
-					  atomic_t *stat_object_dead,
+					  atomic_unchecked_t *stat_op_waits,
+					  atomic_unchecked_t *stat_object_dead,
 					  void (*do_cancel)(struct fscache_operation *))
 {
 	int ret;
@@ -340,7 +340,7 @@ int fscache_wait_for_operation_activatio
 
 	_debug(">>> WT");
 	if (stat_op_waits)
-		fscache_stat(stat_op_waits);
+		fscache_stat_unchecked(stat_op_waits);
 	if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
 			fscache_wait_bit_interruptible,
 			TASK_INTERRUPTIBLE) != 0) {
@@ -358,7 +358,7 @@ int fscache_wait_for_operation_activatio
 check_if_dead:
 	if (op->state == FSCACHE_OP_ST_CANCELLED) {
 		if (stat_object_dead)
-			fscache_stat(stat_object_dead);
+			fscache_stat_unchecked(stat_object_dead);
 		_leave(" = -ENOBUFS [cancelled]");
 		return -ENOBUFS;
 	}
@@ -366,7 +366,7 @@ check_if_dead:
 		pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->state);
 		fscache_cancel_op(op, do_cancel);
 		if (stat_object_dead)
-			fscache_stat(stat_object_dead);
+			fscache_stat_unchecked(stat_object_dead);
 		return -ENOBUFS;
 	}
 	return 0;
@@ -394,7 +394,7 @@ int __fscache_read_or_alloc_page(struct
 
 	_enter("%p,%p,,,", cookie, page);
 
-	fscache_stat(&fscache_n_retrievals);
+	fscache_stat_unchecked(&fscache_n_retrievals);
 
 	if (hlist_empty(&cookie->backing_objects))
 		goto nobufs;
@@ -436,7 +436,7 @@ int __fscache_read_or_alloc_page(struct
 		goto nobufs_unlock_dec;
 	spin_unlock(&cookie->lock);
 
-	fscache_stat(&fscache_n_retrieval_ops);
+	fscache_stat_unchecked(&fscache_n_retrieval_ops);
 
 	/* pin the netfs read context in case we need to do the actual netfs
 	 * read because we've encountered a cache read failure */
@@ -467,15 +467,15 @@ int __fscache_read_or_alloc_page(struct
 
 error:
 	if (ret == -ENOMEM)
-		fscache_stat(&fscache_n_retrievals_nomem);
+		fscache_stat_unchecked(&fscache_n_retrievals_nomem);
 	else if (ret == -ERESTARTSYS)
-		fscache_stat(&fscache_n_retrievals_intr);
+		fscache_stat_unchecked(&fscache_n_retrievals_intr);
 	else if (ret == -ENODATA)
-		fscache_stat(&fscache_n_retrievals_nodata);
+		fscache_stat_unchecked(&fscache_n_retrievals_nodata);
 	else if (ret < 0)
-		fscache_stat(&fscache_n_retrievals_nobufs);
+		fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
 	else
-		fscache_stat(&fscache_n_retrievals_ok);
+		fscache_stat_unchecked(&fscache_n_retrievals_ok);
 
 	fscache_put_retrieval(op);
 	_leave(" = %d", ret);
@@ -490,7 +490,7 @@ nobufs_unlock:
 		__fscache_wake_unused_cookie(cookie);
 	kfree(op);
 nobufs:
-	fscache_stat(&fscache_n_retrievals_nobufs);
+	fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
 	_leave(" = -ENOBUFS");
 	return -ENOBUFS;
 }
@@ -529,7 +529,7 @@ int __fscache_read_or_alloc_pages(struct
 
 	_enter("%p,,%d,,,", cookie, *nr_pages);
 
-	fscache_stat(&fscache_n_retrievals);
+	fscache_stat_unchecked(&fscache_n_retrievals);
 
 	if (hlist_empty(&cookie->backing_objects))
 		goto nobufs;
@@ -567,7 +567,7 @@ int __fscache_read_or_alloc_pages(struct
 		goto nobufs_unlock_dec;
 	spin_unlock(&cookie->lock);
 
-	fscache_stat(&fscache_n_retrieval_ops);
+	fscache_stat_unchecked(&fscache_n_retrieval_ops);
 
 	/* pin the netfs read context in case we need to do the actual netfs
 	 * read because we've encountered a cache read failure */
@@ -598,15 +598,15 @@ int __fscache_read_or_alloc_pages(struct
 
 error:
 	if (ret == -ENOMEM)
-		fscache_stat(&fscache_n_retrievals_nomem);
+		fscache_stat_unchecked(&fscache_n_retrievals_nomem);
 	else if (ret == -ERESTARTSYS)
-		fscache_stat(&fscache_n_retrievals_intr);
+		fscache_stat_unchecked(&fscache_n_retrievals_intr);
 	else if (ret == -ENODATA)
-		fscache_stat(&fscache_n_retrievals_nodata);
+		fscache_stat_unchecked(&fscache_n_retrievals_nodata);
 	else if (ret < 0)
-		fscache_stat(&fscache_n_retrievals_nobufs);
+		fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
 	else
-		fscache_stat(&fscache_n_retrievals_ok);
+		fscache_stat_unchecked(&fscache_n_retrievals_ok);
 
 	fscache_put_retrieval(op);
 	_leave(" = %d", ret);
@@ -621,7 +621,7 @@ nobufs_unlock:
 	if (wake_cookie)
 		__fscache_wake_unused_cookie(cookie);
 nobufs:
-	fscache_stat(&fscache_n_retrievals_nobufs);
+	fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
 	_leave(" = -ENOBUFS");
 	return -ENOBUFS;
 }
@@ -646,7 +646,7 @@ int __fscache_alloc_page(struct fscache_
 
 	_enter("%p,%p,,,", cookie, page);
 
-	fscache_stat(&fscache_n_allocs);
+	fscache_stat_unchecked(&fscache_n_allocs);
 
 	if (hlist_empty(&cookie->backing_objects))
 		goto nobufs;
@@ -680,7 +680,7 @@ int __fscache_alloc_page(struct fscache_
 		goto nobufs_unlock_dec;
 	spin_unlock(&cookie->lock);
 
-	fscache_stat(&fscache_n_alloc_ops);
+	fscache_stat_unchecked(&fscache_n_alloc_ops);
 
 	ret = fscache_wait_for_operation_activation(
 		object, &op->op,
@@ -697,11 +697,11 @@ int __fscache_alloc_page(struct fscache_
 
 error:
 	if (ret == -ERESTARTSYS)
-		fscache_stat(&fscache_n_allocs_intr);
+		fscache_stat_unchecked(&fscache_n_allocs_intr);
 	else if (ret < 0)
-		fscache_stat(&fscache_n_allocs_nobufs);
+		fscache_stat_unchecked(&fscache_n_allocs_nobufs);
 	else
-		fscache_stat(&fscache_n_allocs_ok);
+		fscache_stat_unchecked(&fscache_n_allocs_ok);
 
 	fscache_put_retrieval(op);
 	_leave(" = %d", ret);
@@ -715,7 +715,7 @@ nobufs_unlock:
 	if (wake_cookie)
 		__fscache_wake_unused_cookie(cookie);
 nobufs:
-	fscache_stat(&fscache_n_allocs_nobufs);
+	fscache_stat_unchecked(&fscache_n_allocs_nobufs);
 	_leave(" = -ENOBUFS");
 	return -ENOBUFS;
 }
@@ -791,7 +791,7 @@ static void fscache_write_op(struct fsca
 
 	spin_lock(&cookie->stores_lock);
 
-	fscache_stat(&fscache_n_store_calls);
+	fscache_stat_unchecked(&fscache_n_store_calls);
 
 	/* find a page to store */
 	page = NULL;
@@ -802,7 +802,7 @@ static void fscache_write_op(struct fsca
 	page = results[0];
 	_debug("gang %d [%lx]", n, page->index);
 	if (page->index > op->store_limit) {
-		fscache_stat(&fscache_n_store_pages_over_limit);
+		fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
 		goto superseded;
 	}
 
@@ -814,7 +814,7 @@ static void fscache_write_op(struct fsca
 	spin_unlock(&cookie->stores_lock);
 	spin_unlock(&object->lock);
 
-	fscache_stat(&fscache_n_store_pages);
+	fscache_stat_unchecked(&fscache_n_store_pages);
 	fscache_stat(&fscache_n_cop_write_page);
 	ret = object->cache->ops->write_page(op, page);
 	fscache_stat_d(&fscache_n_cop_write_page);
@@ -918,7 +918,7 @@ int __fscache_write_page(struct fscache_
 	ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
 	ASSERT(PageFsCache(page));
 
-	fscache_stat(&fscache_n_stores);
+	fscache_stat_unchecked(&fscache_n_stores);
 
 	if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
 		_leave(" = -ENOBUFS [invalidating]");
@@ -977,7 +977,7 @@ int __fscache_write_page(struct fscache_
 	spin_unlock(&cookie->stores_lock);
 	spin_unlock(&object->lock);
 
-	op->op.debug_id	= atomic_inc_return(&fscache_op_debug_id);
+	op->op.debug_id	= atomic_inc_return_unchecked(&fscache_op_debug_id);
 	op->store_limit = object->store_limit;
 
 	__fscache_use_cookie(cookie);
@@ -986,8 +986,8 @@ int __fscache_write_page(struct fscache_
 
 	spin_unlock(&cookie->lock);
 	radix_tree_preload_end();
-	fscache_stat(&fscache_n_store_ops);
-	fscache_stat(&fscache_n_stores_ok);
+	fscache_stat_unchecked(&fscache_n_store_ops);
+	fscache_stat_unchecked(&fscache_n_stores_ok);
 
 	/* the work queue now carries its own ref on the object */
 	fscache_put_operation(&op->op);
@@ -995,14 +995,14 @@ int __fscache_write_page(struct fscache_
 	return 0;
 
 already_queued:
-	fscache_stat(&fscache_n_stores_again);
+	fscache_stat_unchecked(&fscache_n_stores_again);
 already_pending:
 	spin_unlock(&cookie->stores_lock);
 	spin_unlock(&object->lock);
 	spin_unlock(&cookie->lock);
 	radix_tree_preload_end();
 	kfree(op);
-	fscache_stat(&fscache_n_stores_ok);
+	fscache_stat_unchecked(&fscache_n_stores_ok);
 	_leave(" = 0");
 	return 0;
 
@@ -1024,14 +1024,14 @@ nobufs:
 	kfree(op);
 	if (wake_cookie)
 		__fscache_wake_unused_cookie(cookie);
-	fscache_stat(&fscache_n_stores_nobufs);
+	fscache_stat_unchecked(&fscache_n_stores_nobufs);
 	_leave(" = -ENOBUFS");
 	return -ENOBUFS;
 
 nomem_free:
 	kfree(op);
 nomem:
-	fscache_stat(&fscache_n_stores_oom);
+	fscache_stat_unchecked(&fscache_n_stores_oom);
 	_leave(" = -ENOMEM");
 	return -ENOMEM;
 }
@@ -1049,7 +1049,7 @@ void __fscache_uncache_page(struct fscac
 	ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
 	ASSERTCMP(page, !=, NULL);
 
-	fscache_stat(&fscache_n_uncaches);
+	fscache_stat_unchecked(&fscache_n_uncaches);
 
 	/* cache withdrawal may beat us to it */
 	if (!PageFsCache(page))
@@ -1100,7 +1100,7 @@ void fscache_mark_page_cached(struct fsc
 	struct fscache_cookie *cookie = op->op.object->cookie;
 
 #ifdef CONFIG_FSCACHE_STATS
-	atomic_inc(&fscache_n_marks);
+	atomic_inc_unchecked(&fscache_n_marks);
 #endif
 
 	_debug("- mark %p{%lx}", page, page->index);
diff -ruNp linux-3.13.11/fs/fscache/stats.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/fscache/stats.c
--- linux-3.13.11/fs/fscache/stats.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/fscache/stats.c	2014-07-09 12:00:15.000000000
+0200
@@ -18,99 +18,99 @@
 /*
  * operation counters
  */
-atomic_t fscache_n_op_pend;
-atomic_t fscache_n_op_run;
-atomic_t fscache_n_op_enqueue;
-atomic_t fscache_n_op_requeue;
-atomic_t fscache_n_op_deferred_release;
-atomic_t fscache_n_op_release;
-atomic_t fscache_n_op_gc;
-atomic_t fscache_n_op_cancelled;
-atomic_t fscache_n_op_rejected;
-
-atomic_t fscache_n_attr_changed;
-atomic_t fscache_n_attr_changed_ok;
-atomic_t fscache_n_attr_changed_nobufs;
-atomic_t fscache_n_attr_changed_nomem;
-atomic_t fscache_n_attr_changed_calls;
-
-atomic_t fscache_n_allocs;
-atomic_t fscache_n_allocs_ok;
-atomic_t fscache_n_allocs_wait;
-atomic_t fscache_n_allocs_nobufs;
-atomic_t fscache_n_allocs_intr;
-atomic_t fscache_n_allocs_object_dead;
-atomic_t fscache_n_alloc_ops;
-atomic_t fscache_n_alloc_op_waits;
-
-atomic_t fscache_n_retrievals;
-atomic_t fscache_n_retrievals_ok;
-atomic_t fscache_n_retrievals_wait;
-atomic_t fscache_n_retrievals_nodata;
-atomic_t fscache_n_retrievals_nobufs;
-atomic_t fscache_n_retrievals_intr;
-atomic_t fscache_n_retrievals_nomem;
-atomic_t fscache_n_retrievals_object_dead;
-atomic_t fscache_n_retrieval_ops;
-atomic_t fscache_n_retrieval_op_waits;
-
-atomic_t fscache_n_stores;
-atomic_t fscache_n_stores_ok;
-atomic_t fscache_n_stores_again;
-atomic_t fscache_n_stores_nobufs;
-atomic_t fscache_n_stores_oom;
-atomic_t fscache_n_store_ops;
-atomic_t fscache_n_store_calls;
-atomic_t fscache_n_store_pages;
-atomic_t fscache_n_store_radix_deletes;
-atomic_t fscache_n_store_pages_over_limit;
-
-atomic_t fscache_n_store_vmscan_not_storing;
-atomic_t fscache_n_store_vmscan_gone;
-atomic_t fscache_n_store_vmscan_busy;
-atomic_t fscache_n_store_vmscan_cancelled;
-atomic_t fscache_n_store_vmscan_wait;
-
-atomic_t fscache_n_marks;
-atomic_t fscache_n_uncaches;
-
-atomic_t fscache_n_acquires;
-atomic_t fscache_n_acquires_null;
-atomic_t fscache_n_acquires_no_cache;
-atomic_t fscache_n_acquires_ok;
-atomic_t fscache_n_acquires_nobufs;
-atomic_t fscache_n_acquires_oom;
-
-atomic_t fscache_n_invalidates;
-atomic_t fscache_n_invalidates_run;
-
-atomic_t fscache_n_updates;
-atomic_t fscache_n_updates_null;
-atomic_t fscache_n_updates_run;
-
-atomic_t fscache_n_relinquishes;
-atomic_t fscache_n_relinquishes_null;
-atomic_t fscache_n_relinquishes_waitcrt;
-atomic_t fscache_n_relinquishes_retire;
-
-atomic_t fscache_n_cookie_index;
-atomic_t fscache_n_cookie_data;
-atomic_t fscache_n_cookie_special;
-
-atomic_t fscache_n_object_alloc;
-atomic_t fscache_n_object_no_alloc;
-atomic_t fscache_n_object_lookups;
-atomic_t fscache_n_object_lookups_negative;
-atomic_t fscache_n_object_lookups_positive;
-atomic_t fscache_n_object_lookups_timed_out;
-atomic_t fscache_n_object_created;
-atomic_t fscache_n_object_avail;
-atomic_t fscache_n_object_dead;
-
-atomic_t fscache_n_checkaux_none;
-atomic_t fscache_n_checkaux_okay;
-atomic_t fscache_n_checkaux_update;
-atomic_t fscache_n_checkaux_obsolete;
+atomic_unchecked_t fscache_n_op_pend;
+atomic_unchecked_t fscache_n_op_run;
+atomic_unchecked_t fscache_n_op_enqueue;
+atomic_unchecked_t fscache_n_op_requeue;
+atomic_unchecked_t fscache_n_op_deferred_release;
+atomic_unchecked_t fscache_n_op_release;
+atomic_unchecked_t fscache_n_op_gc;
+atomic_unchecked_t fscache_n_op_cancelled;
+atomic_unchecked_t fscache_n_op_rejected;
+
+atomic_unchecked_t fscache_n_attr_changed;
+atomic_unchecked_t fscache_n_attr_changed_ok;
+atomic_unchecked_t fscache_n_attr_changed_nobufs;
+atomic_unchecked_t fscache_n_attr_changed_nomem;
+atomic_unchecked_t fscache_n_attr_changed_calls;
+
+atomic_unchecked_t fscache_n_allocs;
+atomic_unchecked_t fscache_n_allocs_ok;
+atomic_unchecked_t fscache_n_allocs_wait;
+atomic_unchecked_t fscache_n_allocs_nobufs;
+atomic_unchecked_t fscache_n_allocs_intr;
+atomic_unchecked_t fscache_n_allocs_object_dead;
+atomic_unchecked_t fscache_n_alloc_ops;
+atomic_unchecked_t fscache_n_alloc_op_waits;
+
+atomic_unchecked_t fscache_n_retrievals;
+atomic_unchecked_t fscache_n_retrievals_ok;
+atomic_unchecked_t fscache_n_retrievals_wait;
+atomic_unchecked_t fscache_n_retrievals_nodata;
+atomic_unchecked_t fscache_n_retrievals_nobufs;
+atomic_unchecked_t fscache_n_retrievals_intr;
+atomic_unchecked_t fscache_n_retrievals_nomem;
+atomic_unchecked_t fscache_n_retrievals_object_dead;
+atomic_unchecked_t fscache_n_retrieval_ops;
+atomic_unchecked_t fscache_n_retrieval_op_waits;
+
+atomic_unchecked_t fscache_n_stores;
+atomic_unchecked_t fscache_n_stores_ok;
+atomic_unchecked_t fscache_n_stores_again;
+atomic_unchecked_t fscache_n_stores_nobufs;
+atomic_unchecked_t fscache_n_stores_oom;
+atomic_unchecked_t fscache_n_store_ops;
+atomic_unchecked_t fscache_n_store_calls;
+atomic_unchecked_t fscache_n_store_pages;
+atomic_unchecked_t fscache_n_store_radix_deletes;
+atomic_unchecked_t fscache_n_store_pages_over_limit;
+
+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
+atomic_unchecked_t fscache_n_store_vmscan_gone;
+atomic_unchecked_t fscache_n_store_vmscan_busy;
+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
+atomic_unchecked_t fscache_n_store_vmscan_wait;
+
+atomic_unchecked_t fscache_n_marks;
+atomic_unchecked_t fscache_n_uncaches;
+
+atomic_unchecked_t fscache_n_acquires;
+atomic_unchecked_t fscache_n_acquires_null;
+atomic_unchecked_t fscache_n_acquires_no_cache;
+atomic_unchecked_t fscache_n_acquires_ok;
+atomic_unchecked_t fscache_n_acquires_nobufs;
+atomic_unchecked_t fscache_n_acquires_oom;
+
+atomic_unchecked_t fscache_n_invalidates;
+atomic_unchecked_t fscache_n_invalidates_run;
+
+atomic_unchecked_t fscache_n_updates;
+atomic_unchecked_t fscache_n_updates_null;
+atomic_unchecked_t fscache_n_updates_run;
+
+atomic_unchecked_t fscache_n_relinquishes;
+atomic_unchecked_t fscache_n_relinquishes_null;
+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
+atomic_unchecked_t fscache_n_relinquishes_retire;
+
+atomic_unchecked_t fscache_n_cookie_index;
+atomic_unchecked_t fscache_n_cookie_data;
+atomic_unchecked_t fscache_n_cookie_special;
+
+atomic_unchecked_t fscache_n_object_alloc;
+atomic_unchecked_t fscache_n_object_no_alloc;
+atomic_unchecked_t fscache_n_object_lookups;
+atomic_unchecked_t fscache_n_object_lookups_negative;
+atomic_unchecked_t fscache_n_object_lookups_positive;
+atomic_unchecked_t fscache_n_object_lookups_timed_out;
+atomic_unchecked_t fscache_n_object_created;
+atomic_unchecked_t fscache_n_object_avail;
+atomic_unchecked_t fscache_n_object_dead;
+
+atomic_unchecked_t fscache_n_checkaux_none;
+atomic_unchecked_t fscache_n_checkaux_okay;
+atomic_unchecked_t fscache_n_checkaux_update;
+atomic_unchecked_t fscache_n_checkaux_obsolete;
 
 atomic_t fscache_n_cop_alloc_object;
 atomic_t fscache_n_cop_lookup_object;
@@ -138,118 +138,118 @@ static int fscache_stats_show(struct seq
 	seq_puts(m, "FS-Cache statistics\n");
 
 	seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
-		   atomic_read(&fscache_n_cookie_index),
-		   atomic_read(&fscache_n_cookie_data),
-		   atomic_read(&fscache_n_cookie_special));
+		   atomic_read_unchecked(&fscache_n_cookie_index),
+		   atomic_read_unchecked(&fscache_n_cookie_data),
+		   atomic_read_unchecked(&fscache_n_cookie_special));
 
 	seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
-		   atomic_read(&fscache_n_object_alloc),
-		   atomic_read(&fscache_n_object_no_alloc),
-		   atomic_read(&fscache_n_object_avail),
-		   atomic_read(&fscache_n_object_dead));
+		   atomic_read_unchecked(&fscache_n_object_alloc),
+		   atomic_read_unchecked(&fscache_n_object_no_alloc),
+		   atomic_read_unchecked(&fscache_n_object_avail),
+		   atomic_read_unchecked(&fscache_n_object_dead));
 	seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
-		   atomic_read(&fscache_n_checkaux_none),
-		   atomic_read(&fscache_n_checkaux_okay),
-		   atomic_read(&fscache_n_checkaux_update),
-		   atomic_read(&fscache_n_checkaux_obsolete));
+		   atomic_read_unchecked(&fscache_n_checkaux_none),
+		   atomic_read_unchecked(&fscache_n_checkaux_okay),
+		   atomic_read_unchecked(&fscache_n_checkaux_update),
+		   atomic_read_unchecked(&fscache_n_checkaux_obsolete));
 
 	seq_printf(m, "Pages  : mrk=%u unc=%u\n",
-		   atomic_read(&fscache_n_marks),
-		   atomic_read(&fscache_n_uncaches));
+		   atomic_read_unchecked(&fscache_n_marks),
+		   atomic_read_unchecked(&fscache_n_uncaches));
 
 	seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
 		   " oom=%u\n",
-		   atomic_read(&fscache_n_acquires),
-		   atomic_read(&fscache_n_acquires_null),
-		   atomic_read(&fscache_n_acquires_no_cache),
-		   atomic_read(&fscache_n_acquires_ok),
-		   atomic_read(&fscache_n_acquires_nobufs),
-		   atomic_read(&fscache_n_acquires_oom));
+		   atomic_read_unchecked(&fscache_n_acquires),
+		   atomic_read_unchecked(&fscache_n_acquires_null),
+		   atomic_read_unchecked(&fscache_n_acquires_no_cache),
+		   atomic_read_unchecked(&fscache_n_acquires_ok),
+		   atomic_read_unchecked(&fscache_n_acquires_nobufs),
+		   atomic_read_unchecked(&fscache_n_acquires_oom));
 
 	seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
-		   atomic_read(&fscache_n_object_lookups),
-		   atomic_read(&fscache_n_object_lookups_negative),
-		   atomic_read(&fscache_n_object_lookups_positive),
-		   atomic_read(&fscache_n_object_created),
-		   atomic_read(&fscache_n_object_lookups_timed_out));
+		   atomic_read_unchecked(&fscache_n_object_lookups),
+		   atomic_read_unchecked(&fscache_n_object_lookups_negative),
+		   atomic_read_unchecked(&fscache_n_object_lookups_positive),
+		   atomic_read_unchecked(&fscache_n_object_created),
+		   atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
 
 	seq_printf(m, "Invals : n=%u run=%u\n",
-		   atomic_read(&fscache_n_invalidates),
-		   atomic_read(&fscache_n_invalidates_run));
+		   atomic_read_unchecked(&fscache_n_invalidates),
+		   atomic_read_unchecked(&fscache_n_invalidates_run));
 
 	seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
-		   atomic_read(&fscache_n_updates),
-		   atomic_read(&fscache_n_updates_null),
-		   atomic_read(&fscache_n_updates_run));
+		   atomic_read_unchecked(&fscache_n_updates),
+		   atomic_read_unchecked(&fscache_n_updates_null),
+		   atomic_read_unchecked(&fscache_n_updates_run));
 
 	seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
-		   atomic_read(&fscache_n_relinquishes),
-		   atomic_read(&fscache_n_relinquishes_null),
-		   atomic_read(&fscache_n_relinquishes_waitcrt),
-		   atomic_read(&fscache_n_relinquishes_retire));
+		   atomic_read_unchecked(&fscache_n_relinquishes),
+		   atomic_read_unchecked(&fscache_n_relinquishes_null),
+		   atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
+		   atomic_read_unchecked(&fscache_n_relinquishes_retire));
 
 	seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
-		   atomic_read(&fscache_n_attr_changed),
-		   atomic_read(&fscache_n_attr_changed_ok),
-		   atomic_read(&fscache_n_attr_changed_nobufs),
-		   atomic_read(&fscache_n_attr_changed_nomem),
-		   atomic_read(&fscache_n_attr_changed_calls));
+		   atomic_read_unchecked(&fscache_n_attr_changed),
+		   atomic_read_unchecked(&fscache_n_attr_changed_ok),
+		   atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
+		   atomic_read_unchecked(&fscache_n_attr_changed_nomem),
+		   atomic_read_unchecked(&fscache_n_attr_changed_calls));
 
 	seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
-		   atomic_read(&fscache_n_allocs),
-		   atomic_read(&fscache_n_allocs_ok),
-		   atomic_read(&fscache_n_allocs_wait),
-		   atomic_read(&fscache_n_allocs_nobufs),
-		   atomic_read(&fscache_n_allocs_intr));
+		   atomic_read_unchecked(&fscache_n_allocs),
+		   atomic_read_unchecked(&fscache_n_allocs_ok),
+		   atomic_read_unchecked(&fscache_n_allocs_wait),
+		   atomic_read_unchecked(&fscache_n_allocs_nobufs),
+		   atomic_read_unchecked(&fscache_n_allocs_intr));
 	seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
-		   atomic_read(&fscache_n_alloc_ops),
-		   atomic_read(&fscache_n_alloc_op_waits),
-		   atomic_read(&fscache_n_allocs_object_dead));
+		   atomic_read_unchecked(&fscache_n_alloc_ops),
+		   atomic_read_unchecked(&fscache_n_alloc_op_waits),
+		   atomic_read_unchecked(&fscache_n_allocs_object_dead));
 
 	seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
 		   " int=%u oom=%u\n",
-		   atomic_read(&fscache_n_retrievals),
-		   atomic_read(&fscache_n_retrievals_ok),
-		   atomic_read(&fscache_n_retrievals_wait),
-		   atomic_read(&fscache_n_retrievals_nodata),
-		   atomic_read(&fscache_n_retrievals_nobufs),
-		   atomic_read(&fscache_n_retrievals_intr),
-		   atomic_read(&fscache_n_retrievals_nomem));
+		   atomic_read_unchecked(&fscache_n_retrievals),
+		   atomic_read_unchecked(&fscache_n_retrievals_ok),
+		   atomic_read_unchecked(&fscache_n_retrievals_wait),
+		   atomic_read_unchecked(&fscache_n_retrievals_nodata),
+		   atomic_read_unchecked(&fscache_n_retrievals_nobufs),
+		   atomic_read_unchecked(&fscache_n_retrievals_intr),
+		   atomic_read_unchecked(&fscache_n_retrievals_nomem));
 	seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
-		   atomic_read(&fscache_n_retrieval_ops),
-		   atomic_read(&fscache_n_retrieval_op_waits),
-		   atomic_read(&fscache_n_retrievals_object_dead));
+		   atomic_read_unchecked(&fscache_n_retrieval_ops),
+		   atomic_read_unchecked(&fscache_n_retrieval_op_waits),
+		   atomic_read_unchecked(&fscache_n_retrievals_object_dead));
 
 	seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
-		   atomic_read(&fscache_n_stores),
-		   atomic_read(&fscache_n_stores_ok),
-		   atomic_read(&fscache_n_stores_again),
-		   atomic_read(&fscache_n_stores_nobufs),
-		   atomic_read(&fscache_n_stores_oom));
+		   atomic_read_unchecked(&fscache_n_stores),
+		   atomic_read_unchecked(&fscache_n_stores_ok),
+		   atomic_read_unchecked(&fscache_n_stores_again),
+		   atomic_read_unchecked(&fscache_n_stores_nobufs),
+		   atomic_read_unchecked(&fscache_n_stores_oom));
 	seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
-		   atomic_read(&fscache_n_store_ops),
-		   atomic_read(&fscache_n_store_calls),
-		   atomic_read(&fscache_n_store_pages),
-		   atomic_read(&fscache_n_store_radix_deletes),
-		   atomic_read(&fscache_n_store_pages_over_limit));
+		   atomic_read_unchecked(&fscache_n_store_ops),
+		   atomic_read_unchecked(&fscache_n_store_calls),
+		   atomic_read_unchecked(&fscache_n_store_pages),
+		   atomic_read_unchecked(&fscache_n_store_radix_deletes),
+		   atomic_read_unchecked(&fscache_n_store_pages_over_limit));
 
 	seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n",
-		   atomic_read(&fscache_n_store_vmscan_not_storing),
-		   atomic_read(&fscache_n_store_vmscan_gone),
-		   atomic_read(&fscache_n_store_vmscan_busy),
-		   atomic_read(&fscache_n_store_vmscan_cancelled),
-		   atomic_read(&fscache_n_store_vmscan_wait));
+		   atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
+		   atomic_read_unchecked(&fscache_n_store_vmscan_gone),
+		   atomic_read_unchecked(&fscache_n_store_vmscan_busy),
+		   atomic_read_unchecked(&fscache_n_store_vmscan_cancelled),
+		   atomic_read_unchecked(&fscache_n_store_vmscan_wait));
 
 	seq_printf(m, "Ops    : pend=%u run=%u enq=%u can=%u rej=%u\n",
-		   atomic_read(&fscache_n_op_pend),
-		   atomic_read(&fscache_n_op_run),
-		   atomic_read(&fscache_n_op_enqueue),
-		   atomic_read(&fscache_n_op_cancelled),
-		   atomic_read(&fscache_n_op_rejected));
+		   atomic_read_unchecked(&fscache_n_op_pend),
+		   atomic_read_unchecked(&fscache_n_op_run),
+		   atomic_read_unchecked(&fscache_n_op_enqueue),
+		   atomic_read_unchecked(&fscache_n_op_cancelled),
+		   atomic_read_unchecked(&fscache_n_op_rejected));
 	seq_printf(m, "Ops    : dfr=%u rel=%u gc=%u\n",
-		   atomic_read(&fscache_n_op_deferred_release),
-		   atomic_read(&fscache_n_op_release),
-		   atomic_read(&fscache_n_op_gc));
+		   atomic_read_unchecked(&fscache_n_op_deferred_release),
+		   atomic_read_unchecked(&fscache_n_op_release),
+		   atomic_read_unchecked(&fscache_n_op_gc));
 
 	seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
 		   atomic_read(&fscache_n_cop_alloc_object),
diff -ruNp linux-3.13.11/fs/fuse/cuse.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/fuse/cuse.c
--- linux-3.13.11/fs/fuse/cuse.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/fuse/cuse.c	2014-07-09 12:00:15.000000000
+0200
@@ -606,10 +606,12 @@ static int __init cuse_init(void)
 		INIT_LIST_HEAD(&cuse_conntbl[i]);
 
 	/* inherit and extend fuse_dev_operations */
-	cuse_channel_fops		= fuse_dev_operations;
-	cuse_channel_fops.owner		= THIS_MODULE;
-	cuse_channel_fops.open		= cuse_channel_open;
-	cuse_channel_fops.release	= cuse_channel_release;
+	pax_open_kernel();
+	memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
+	*(void **)&cuse_channel_fops.owner	= THIS_MODULE;
+	*(void **)&cuse_channel_fops.open	= cuse_channel_open;
+	*(void **)&cuse_channel_fops.release	= cuse_channel_release;
+	pax_close_kernel();
 
 	cuse_class = class_create(THIS_MODULE, "cuse");
 	if (IS_ERR(cuse_class))
diff -ruNp linux-3.13.11/fs/fuse/dev.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/fuse/dev.c
--- linux-3.13.11/fs/fuse/dev.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/fuse/dev.c	2014-07-09 12:00:15.000000000
+0200
@@ -1323,7 +1323,7 @@ static ssize_t fuse_dev_splice_read(stru
 	ret = 0;
 	pipe_lock(pipe);
 
-	if (!pipe->readers) {
+	if (!atomic_read(&pipe->readers)) {
 		send_sig(SIGPIPE, current, 0);
 		if (!ret)
 			ret = -EPIPE;
@@ -1352,7 +1352,7 @@ static ssize_t fuse_dev_splice_read(stru
 		page_nr++;
 		ret += buf->len;
 
-		if (pipe->files)
+		if (atomic_read(&pipe->files))
 			do_wakeup = 1;
 	}
 
diff -ruNp linux-3.13.11/fs/fuse/dir.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/fuse/dir.c
--- linux-3.13.11/fs/fuse/dir.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/fuse/dir.c	2014-07-09 12:00:15.000000000
+0200
@@ -1408,7 +1408,7 @@ static char *read_link(struct dentry *de
 	return link;
 }
 
-static void free_link(char *link)
+static void free_link(const char *link)
 {
 	if (!IS_ERR(link))
 		free_page((unsigned long) link);
diff -ruNp linux-3.13.11/fs/gfs2/file.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/gfs2/file.c
--- linux-3.13.11/fs/gfs2/file.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/gfs2/file.c	2014-07-09 12:00:15.000000000
+0200
@@ -137,6 +137,9 @@ static const u32 fsflags_to_gfs2[32] = {
 	[12] = GFS2_DIF_EXHASH,
 	[14] = GFS2_DIF_INHERIT_JDATA,
 	[17] = GFS2_DIF_TOPDIR,
+	[27] = GFS2_DIF_IXUNLINK,
+	[26] = GFS2_DIF_BARRIER,
+	[29] = GFS2_DIF_COW,
 };
 
 static const u32 gfs2_to_fsflags[32] = {
@@ -147,6 +150,9 @@ static const u32 gfs2_to_fsflags[32] = {
 	[gfs2fl_ExHash] = FS_INDEX_FL,
 	[gfs2fl_TopLevel] = FS_TOPDIR_FL,
 	[gfs2fl_InheritJdata] = FS_JOURNAL_DATA_FL,
+	[gfs2fl_IXUnlink] = FS_IXUNLINK_FL,
+	[gfs2fl_Barrier] = FS_BARRIER_FL,
+	[gfs2fl_Cow] = FS_COW_FL,
 };
 
 static int gfs2_get_flags(struct file *filp, u32 __user *ptr)
@@ -177,12 +183,18 @@ void gfs2_set_inode_flags(struct inode *
 {
 	struct gfs2_inode *ip = GFS2_I(inode);
 	unsigned int flags = inode->i_flags;
+	unsigned int vflags = inode->i_vflags;
+
+	flags &= ~(S_IMMUTABLE | S_IXUNLINK |
+		S_SYNC | S_APPEND | S_NOATIME | S_DIRSYNC | S_NOSEC);
 
-	flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_NOSEC);
 	if ((ip->i_eattr == 0) && !is_sxid(inode->i_mode))
 		inode->i_flags |= S_NOSEC;
 	if (ip->i_diskflags & GFS2_DIF_IMMUTABLE)
 		flags |= S_IMMUTABLE;
+	if (ip->i_diskflags & GFS2_DIF_IXUNLINK)
+		flags |= S_IXUNLINK;
+
 	if (ip->i_diskflags & GFS2_DIF_APPENDONLY)
 		flags |= S_APPEND;
 	if (ip->i_diskflags & GFS2_DIF_NOATIME)
@@ -190,6 +202,43 @@ void gfs2_set_inode_flags(struct inode *
 	if (ip->i_diskflags & GFS2_DIF_SYNC)
 		flags |= S_SYNC;
 	inode->i_flags = flags;
+
+	vflags &= ~(V_BARRIER | V_COW);
+
+	if (ip->i_diskflags & GFS2_DIF_BARRIER)
+		vflags |= V_BARRIER;
+	if (ip->i_diskflags & GFS2_DIF_COW)
+		vflags |= V_COW;
+	inode->i_vflags = vflags;
+}
+
+void gfs2_get_inode_flags(struct inode *inode)
+{
+	struct gfs2_inode *ip = GFS2_I(inode);
+	unsigned int flags = inode->i_flags;
+	unsigned int vflags = inode->i_vflags;
+
+	ip->i_diskflags &= ~(GFS2_DIF_APPENDONLY |
+			GFS2_DIF_NOATIME | GFS2_DIF_SYNC |
+			GFS2_DIF_IMMUTABLE | GFS2_DIF_IXUNLINK |
+			GFS2_DIF_BARRIER | GFS2_DIF_COW);
+
+	if (flags & S_IMMUTABLE)
+		ip->i_diskflags |= GFS2_DIF_IMMUTABLE;
+	if (flags & S_IXUNLINK)
+		ip->i_diskflags |= GFS2_DIF_IXUNLINK;
+
+	if (flags & S_APPEND)
+		ip->i_diskflags |= GFS2_DIF_APPENDONLY;
+	if (flags & S_NOATIME)
+		ip->i_diskflags |= GFS2_DIF_NOATIME;
+	if (flags & S_SYNC)
+		ip->i_diskflags |= GFS2_DIF_SYNC;
+
+	if (vflags & V_BARRIER)
+		ip->i_diskflags |= GFS2_DIF_BARRIER;
+	if (vflags & V_COW)
+		ip->i_diskflags |= GFS2_DIF_COW;
 }
 
 /* Flags that can be set by user space */
@@ -303,6 +352,37 @@ static int gfs2_set_flags(struct file *f
 	return do_gfs2_set_flags(filp, gfsflags, ~GFS2_DIF_JDATA);
 }
 
+int gfs2_sync_flags(struct inode *inode, int flags, int vflags)
+{
+	struct gfs2_inode *ip = GFS2_I(inode);
+	struct gfs2_sbd *sdp = GFS2_SB(inode);
+	struct buffer_head *bh;
+	struct gfs2_holder gh;
+	int error;
+
+	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
+	if (error)
+		return error;
+	error = gfs2_trans_begin(sdp, RES_DINODE, 0);
+	if (error)
+		goto out;
+	error = gfs2_meta_inode_buffer(ip, &bh);
+	if (error)
+		goto out_trans_end;
+	gfs2_trans_add_meta(ip->i_gl, bh);
+	inode->i_flags = flags;
+	inode->i_vflags = vflags;
+	gfs2_get_inode_flags(inode);
+	gfs2_dinode_out(ip, bh->b_data);
+	brelse(bh);
+	gfs2_set_aops(inode);
+out_trans_end:
+	gfs2_trans_end(sdp);
+out:
+	gfs2_glock_dq_uninit(&gh);
+	return error;
+}
+
 static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
 	switch(cmd) {
diff -ruNp linux-3.13.11/fs/gfs2/inode.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/gfs2/inode.h
--- linux-3.13.11/fs/gfs2/inode.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/gfs2/inode.h	2014-07-09 12:00:15.000000000
+0200
@@ -118,6 +118,7 @@ extern const struct file_operations gfs2
 extern const struct file_operations gfs2_dir_fops_nolock;
 
 extern void gfs2_set_inode_flags(struct inode *inode);
+extern int gfs2_sync_flags(struct inode *inode, int flags, int vflags);
  
 #ifdef CONFIG_GFS2_FS_LOCKING_DLM
 extern const struct file_operations gfs2_file_fops;
diff -ruNp linux-3.13.11/fs/hostfs/hostfs.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/hostfs/hostfs.h
--- linux-3.13.11/fs/hostfs/hostfs.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/hostfs/hostfs.h	2014-07-09 12:00:15.000000000
+0200
@@ -42,6 +42,7 @@ struct hostfs_iattr {
 	unsigned short	ia_mode;
 	uid_t		ia_uid;
 	gid_t		ia_gid;
+	vtag_t		ia_tag;
 	loff_t		ia_size;
 	struct timespec	ia_atime;
 	struct timespec	ia_mtime;
diff -ruNp linux-3.13.11/fs/hostfs/hostfs_kern.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/hostfs/hostfs_kern.c
--- linux-3.13.11/fs/hostfs/hostfs_kern.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/hostfs/hostfs_kern.c	2014-07-09
12:00:15.000000000 +0200
@@ -895,7 +895,7 @@ static void *hostfs_follow_link(struct d
 
 static void hostfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
 {
-	char *s = nd_get_link(nd);
+	const char *s = nd_get_link(nd);
 	if (!IS_ERR(s))
 		__putname(s);
 }
diff -ruNp linux-3.13.11/fs/hugetlbfs/inode.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/hugetlbfs/inode.c
--- linux-3.13.11/fs/hugetlbfs/inode.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/hugetlbfs/inode.c	2014-07-09 12:00:15.000000000
+0200
@@ -152,6 +152,7 @@ hugetlb_get_unmapped_area(struct file *f
 	struct mm_struct *mm = current->mm;
 	struct vm_area_struct *vma;
 	struct hstate *h = hstate_file(file);
+	unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
 	struct vm_unmapped_area_info info;
 
 	if (len & ~huge_page_mask(h))
@@ -165,17 +166,26 @@ hugetlb_get_unmapped_area(struct file *f
 		return addr;
 	}
 
+#ifdef CONFIG_PAX_RANDMMAP
+	if (!(mm->pax_flags & MF_PAX_RANDMMAP))
+#endif
+
 	if (addr) {
 		addr = ALIGN(addr, huge_page_size(h));
 		vma = find_vma(mm, addr);
-		if (TASK_SIZE - len >= addr &&
-		    (!vma || addr + len <= vma->vm_start))
+		if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
 			return addr;
 	}
 
 	info.flags = 0;
 	info.length = len;
 	info.low_limit = TASK_UNMAPPED_BASE;
+
+#ifdef CONFIG_PAX_RANDMMAP
+	if (mm->pax_flags & MF_PAX_RANDMMAP)
+		info.low_limit += mm->delta_mmap;
+#endif
+
 	info.high_limit = TASK_SIZE;
 	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
 	info.align_offset = 0;
@@ -908,7 +918,7 @@ static struct file_system_type hugetlbfs
 };
 MODULE_ALIAS_FS("hugetlbfs");
 
-static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
+struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
 
 static int can_do_hugetlb_shm(void)
 {
diff -ruNp linux-3.13.11/fs/inode.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/inode.c
--- linux-3.13.11/fs/inode.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/inode.c	2014-07-09 12:00:15.000000000
+0200
@@ -18,6 +18,7 @@
 #include <linux/buffer_head.h> /* for inode_has_buffers */
 #include <linux/ratelimit.h>
 #include <linux/list_lru.h>
+#include <linux/vs_tag.h>
 #include "internal.h"
 
 /*
@@ -129,6 +130,8 @@ int inode_init_always(struct super_block
 	struct address_space *const mapping = &inode->i_data;
 
 	inode->i_sb = sb;
+
+	/* essential because of inode slab reuse */
 	inode->i_blkbits = sb->s_blocksize_bits;
 	inode->i_flags = 0;
 	atomic_set(&inode->i_count, 1);
@@ -138,6 +141,7 @@ int inode_init_always(struct super_block
 	inode->i_opflags = 0;
 	i_uid_write(inode, 0);
 	i_gid_write(inode, 0);
+	i_tag_write(inode, 0);
 	atomic_set(&inode->i_writecount, 0);
 	inode->i_size = 0;
 	inode->i_blocks = 0;
@@ -150,6 +154,7 @@ int inode_init_always(struct super_block
 	inode->i_bdev = NULL;
 	inode->i_cdev = NULL;
 	inode->i_rdev = 0;
+	inode->i_mdev = 0;
 	inode->dirtied_when = 0;
 
 	if (security_inode_alloc(inode))
@@ -477,6 +482,8 @@ void __insert_inode_hash(struct inode *i
 }
 EXPORT_SYMBOL(__insert_inode_hash);
 
+EXPORT_SYMBOL_GPL(__iget);
+
 /**
  *	__remove_inode_hash - remove an inode from the hash
  *	@inode: inode to unhash
@@ -841,8 +848,8 @@ unsigned int get_next_ino(void)
 
 #ifdef CONFIG_SMP
 	if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
-		static atomic_t shared_last_ino;
-		int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
+		static atomic_unchecked_t shared_last_ino;
+		int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
 
 		res = next - LAST_INO_BATCH;
 	}
@@ -1802,9 +1809,11 @@ void init_special_inode(struct inode *in
 	if (S_ISCHR(mode)) {
 		inode->i_fop = &def_chr_fops;
 		inode->i_rdev = rdev;
+		inode->i_mdev = rdev;
 	} else if (S_ISBLK(mode)) {
 		inode->i_fop = &def_blk_fops;
 		inode->i_rdev = rdev;
+		inode->i_mdev = rdev;
 	} else if (S_ISFIFO(mode))
 		inode->i_fop = &pipefifo_fops;
 	else if (S_ISSOCK(mode))
@@ -1833,6 +1842,7 @@ void inode_init_owner(struct inode *inod
 	} else
 		inode->i_gid = current_fsgid();
 	inode->i_mode = mode;
+	i_tag_write(inode, dx_current_fstag(inode->i_sb));
 }
 EXPORT_SYMBOL(inode_init_owner);
 
diff -ruNp linux-3.13.11/fs/ioctl.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ioctl.c
--- linux-3.13.11/fs/ioctl.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ioctl.c	2014-07-09 12:00:15.000000000
+0200
@@ -15,6 +15,9 @@
 #include <linux/writeback.h>
 #include <linux/buffer_head.h>
 #include <linux/falloc.h>
+#include <linux/proc_fs.h>
+#include <linux/vserver/inode.h>
+#include <linux/vs_tag.h>
 
 #include <asm/ioctls.h>
 
diff -ruNp linux-3.13.11/fs/ioprio.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ioprio.c
--- linux-3.13.11/fs/ioprio.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ioprio.c	2014-07-09 12:00:15.000000000
+0200
@@ -28,6 +28,7 @@
 #include <linux/syscalls.h>
 #include <linux/security.h>
 #include <linux/pid_namespace.h>
+#include <linux/vs_base.h>
 
 int set_task_ioprio(struct task_struct *task, int ioprio)
 {
@@ -105,6 +106,8 @@ SYSCALL_DEFINE3(ioprio_set, int, which,
 			else
 				pgrp = find_vpid(who);
 			do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
+				if (!vx_check(p->xid, VS_ADMIN_P | VS_IDENT))
+					continue;
 				ret = set_task_ioprio(p, ioprio);
 				if (ret)
 					break;
@@ -198,6 +201,8 @@ SYSCALL_DEFINE2(ioprio_get, int, which,
 			else
 				pgrp = find_vpid(who);
 			do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
+				if (!vx_check(p->xid, VS_ADMIN_P | VS_IDENT))
+					continue;
 				tmpio = get_task_ioprio(p);
 				if (tmpio < 0)
 					continue;
diff -ruNp linux-3.13.11/fs/jffs2/erase.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/jffs2/erase.c
--- linux-3.13.11/fs/jffs2/erase.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/jffs2/erase.c	2014-07-09 12:00:15.000000000
+0200
@@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(stru
 		struct jffs2_unknown_node marker = {
 			.magic =	cpu_to_je16(JFFS2_MAGIC_BITMASK),
 			.nodetype =	cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
-			.totlen =	cpu_to_je32(c->cleanmarker_size)
+			.totlen =	cpu_to_je32(c->cleanmarker_size),
+			.hdr_crc =	cpu_to_je32(0)
 		};
 
 		jffs2_prealloc_raw_node_refs(c, jeb, 1);
diff -ruNp linux-3.13.11/fs/jffs2/wbuf.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/jffs2/wbuf.c
--- linux-3.13.11/fs/jffs2/wbuf.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/jffs2/wbuf.c	2014-07-09 12:00:15.000000000
+0200
@@ -1023,7 +1023,8 @@ static const struct jffs2_unknown_node o
 {
 	.magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
 	.nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
-	.totlen = constant_cpu_to_je32(8)
+	.totlen = constant_cpu_to_je32(8),
+	.hdr_crc = constant_cpu_to_je32(0)
 };
 
 /*
diff -ruNp linux-3.13.11/fs/jfs/file.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/jfs/file.c
--- linux-3.13.11/fs/jfs/file.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/jfs/file.c	2014-07-09 12:00:15.000000000
+0200
@@ -109,7 +109,8 @@ int jfs_setattr(struct dentry *dentry, s
 	if (is_quota_modification(inode, iattr))
 		dquot_initialize(inode);
 	if ((iattr->ia_valid & ATTR_UID && !uid_eq(iattr->ia_uid, inode->i_uid)) ||
-	    (iattr->ia_valid & ATTR_GID && !gid_eq(iattr->ia_gid, inode->i_gid))) {
+	    (iattr->ia_valid & ATTR_GID && !gid_eq(iattr->ia_gid, inode->i_gid)) ||
+	    (iattr->ia_valid & ATTR_TAG && !tag_eq(iattr->ia_tag, inode->i_tag))) {
 		rc = dquot_transfer(inode, iattr);
 		if (rc)
 			return rc;
@@ -144,6 +145,7 @@ const struct inode_operations jfs_file_i
 #ifdef CONFIG_JFS_POSIX_ACL
 	.get_acl	= jfs_get_acl,
 #endif
+	.sync_flags	= jfs_sync_flags,
 };
 
 const struct file_operations jfs_file_operations = {
diff -ruNp linux-3.13.11/fs/jfs/ioctl.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/jfs/ioctl.c
--- linux-3.13.11/fs/jfs/ioctl.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/jfs/ioctl.c	2014-07-09 12:00:15.000000000
+0200
@@ -12,6 +12,7 @@
 #include <linux/time.h>
 #include <linux/sched.h>
 #include <linux/blkdev.h>
+#include <linux/mount.h>
 #include <asm/current.h>
 #include <asm/uaccess.h>
 
@@ -56,6 +57,16 @@ static long jfs_map_ext2(unsigned long f
 }
 
 
+int jfs_sync_flags(struct inode *inode, int flags, int vflags)
+{
+	inode->i_flags = flags;
+	inode->i_vflags = vflags;
+	jfs_get_inode_flags(JFS_IP(inode));
+	inode->i_ctime = CURRENT_TIME_SEC;
+	mark_inode_dirty(inode);
+	return 0;
+}
+
 long jfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
 	struct inode *inode = file_inode(filp);
@@ -89,6 +100,11 @@ long jfs_ioctl(struct file *filp, unsign
 		if (!S_ISDIR(inode->i_mode))
 			flags &= ~JFS_DIRSYNC_FL;
 
+		if (IS_BARRIER(inode)) {
+			vxwprintk_task(1, "messing with the barrier.");
+			return -EACCES;
+		}
+
 		/* Is it quota file? Do not allow user to mess with it */
 		if (IS_NOQUOTA(inode)) {
 			err = -EPERM;
@@ -106,8 +122,8 @@ long jfs_ioctl(struct file *filp, unsign
 		 * the relevant capability.
 		 */
 		if ((oldflags & JFS_IMMUTABLE_FL) ||
-			((flags ^ oldflags) &
-			(JFS_APPEND_FL | JFS_IMMUTABLE_FL))) {
+			((flags ^ oldflags) & (JFS_APPEND_FL |
+			JFS_IMMUTABLE_FL | JFS_IXUNLINK_FL))) {
 			if (!capable(CAP_LINUX_IMMUTABLE)) {
 				mutex_unlock(&inode->i_mutex);
 				err = -EPERM;
@@ -115,7 +131,7 @@ long jfs_ioctl(struct file *filp, unsign
 			}
 		}
 
-		flags = flags & JFS_FL_USER_MODIFIABLE;
+		flags &= JFS_FL_USER_MODIFIABLE;
 		flags |= oldflags & ~JFS_FL_USER_MODIFIABLE;
 		jfs_inode->mode2 = flags;
 
diff -ruNp linux-3.13.11/fs/jfs/jfs_dinode.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/jfs/jfs_dinode.h
--- linux-3.13.11/fs/jfs/jfs_dinode.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/jfs/jfs_dinode.h	2014-07-09 12:00:15.000000000
+0200
@@ -161,9 +161,13 @@ struct dinode {
 
 #define JFS_APPEND_FL		0x01000000 /* writes to file may only append */
 #define JFS_IMMUTABLE_FL	0x02000000 /* Immutable file */
+#define JFS_IXUNLINK_FL		0x08000000 /* Immutable invert on unlink */
 
-#define JFS_FL_USER_VISIBLE	0x03F80000
-#define JFS_FL_USER_MODIFIABLE	0x03F80000
+#define JFS_BARRIER_FL		0x04000000 /* Barrier for chroot() */
+#define JFS_COW_FL		0x20000000 /* Copy on Write marker */
+
+#define JFS_FL_USER_VISIBLE	0x07F80000
+#define JFS_FL_USER_MODIFIABLE	0x07F80000
 #define JFS_FL_INHERIT		0x03C80000
 
 /* These are identical to EXT[23]_IOC_GETFLAGS/SETFLAGS */
diff -ruNp linux-3.13.11/fs/jfs/jfs_filsys.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/jfs/jfs_filsys.h
--- linux-3.13.11/fs/jfs/jfs_filsys.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/jfs/jfs_filsys.h	2014-07-09 12:00:15.000000000
+0200
@@ -266,6 +266,7 @@
 #define JFS_NAME_MAX	255
 #define JFS_PATH_MAX	BPSIZE
 
+#define JFS_TAGGED		0x00800000	/* Context Tagging */
 
 /*
  *	file system state (superblock state)
diff -ruNp linux-3.13.11/fs/jfs/jfs_imap.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/jfs/jfs_imap.c
--- linux-3.13.11/fs/jfs/jfs_imap.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/jfs/jfs_imap.c	2014-07-09 12:00:15.000000000
+0200
@@ -46,6 +46,7 @@
 #include <linux/pagemap.h>
 #include <linux/quotaops.h>
 #include <linux/slab.h>
+#include <linux/vs_tag.h>
 
 #include "jfs_incore.h"
 #include "jfs_inode.h"
@@ -3047,6 +3048,8 @@ static int copy_from_dinode(struct dinod
 {
 	struct jfs_inode_info *jfs_ip = JFS_IP(ip);
 	struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
+	kuid_t kuid;
+	kgid_t kgid;
 
 	jfs_ip->fileset = le32_to_cpu(dip->di_fileset);
 	jfs_ip->mode2 = le32_to_cpu(dip->di_mode);
@@ -3067,14 +3070,18 @@ static int copy_from_dinode(struct dinod
 	}
 	set_nlink(ip, le32_to_cpu(dip->di_nlink));
 
-	jfs_ip->saved_uid = make_kuid(&init_user_ns, le32_to_cpu(dip->di_uid));
+	kuid = make_kuid(&init_user_ns, le32_to_cpu(dip->di_uid));
+	kgid = make_kgid(&init_user_ns, le32_to_cpu(dip->di_gid));
+	ip->i_tag = INOTAG_KTAG(DX_TAG(ip), kuid, kgid, GLOBAL_ROOT_TAG);
+
+	jfs_ip->saved_uid = INOTAG_KUID(DX_TAG(ip), kuid, kgid);
 	if (!uid_valid(sbi->uid))
 		ip->i_uid = jfs_ip->saved_uid;
 	else {
 		ip->i_uid = sbi->uid;
 	}
 
-	jfs_ip->saved_gid = make_kgid(&init_user_ns, le32_to_cpu(dip->di_gid));
+	jfs_ip->saved_gid = INOTAG_KGID(DX_TAG(ip), kuid, kgid);
 	if (!gid_valid(sbi->gid))
 		ip->i_gid = jfs_ip->saved_gid;
 	else {
@@ -3139,16 +3146,14 @@ static void copy_to_dinode(struct dinode
 	dip->di_size = cpu_to_le64(ip->i_size);
 	dip->di_nblocks = cpu_to_le64(PBLK2LBLK(ip->i_sb, ip->i_blocks));
 	dip->di_nlink = cpu_to_le32(ip->i_nlink);
-	if (!uid_valid(sbi->uid))
-		dip->di_uid = cpu_to_le32(i_uid_read(ip));
-	else
-		dip->di_uid =cpu_to_le32(from_kuid(&init_user_ns,
-						   jfs_ip->saved_uid));
-	if (!gid_valid(sbi->gid))
-		dip->di_gid = cpu_to_le32(i_gid_read(ip));
-	else
-		dip->di_gid = cpu_to_le32(from_kgid(&init_user_ns,
-						    jfs_ip->saved_gid));
+	dip->di_uid = cpu_to_le32(from_kuid(&init_user_ns,
+		TAGINO_KUID(DX_TAG(ip),
+		!uid_valid(sbi->uid) ? ip->i_uid : jfs_ip->saved_uid,
+		ip->i_tag)));
+	dip->di_gid = cpu_to_le32(from_kgid(&init_user_ns,
+		TAGINO_KGID(DX_TAG(ip),
+		!gid_valid(sbi->gid) ? ip->i_gid : jfs_ip->saved_gid,
+		ip->i_tag)));
 	jfs_get_inode_flags(jfs_ip);
 	/*
 	 * mode2 is only needed for storing the higher order bits.
diff -ruNp linux-3.13.11/fs/jfs/jfs_inode.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/jfs/jfs_inode.c
--- linux-3.13.11/fs/jfs/jfs_inode.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/jfs/jfs_inode.c	2014-07-09 12:00:15.000000000
+0200
@@ -18,6 +18,7 @@
 
 #include <linux/fs.h>
 #include <linux/quotaops.h>
+#include <linux/vs_tag.h>
 #include "jfs_incore.h"
 #include "jfs_inode.h"
 #include "jfs_filsys.h"
@@ -30,29 +31,46 @@ void jfs_set_inode_flags(struct inode *i
 {
 	unsigned int flags = JFS_IP(inode)->mode2;
 
-	inode->i_flags &= ~(S_IMMUTABLE | S_APPEND |
-		S_NOATIME | S_DIRSYNC | S_SYNC);
+	inode->i_flags &= ~(S_IMMUTABLE | S_IXUNLINK |
+		S_SYNC | S_APPEND | S_NOATIME | S_DIRSYNC);
 
 	if (flags & JFS_IMMUTABLE_FL)
 		inode->i_flags |= S_IMMUTABLE;
+	if (flags & JFS_IXUNLINK_FL)
+		inode->i_flags |= S_IXUNLINK;
+
+	if (flags & JFS_SYNC_FL)
+		inode->i_flags |= S_SYNC;
 	if (flags & JFS_APPEND_FL)
 		inode->i_flags |= S_APPEND;
 	if (flags & JFS_NOATIME_FL)
 		inode->i_flags |= S_NOATIME;
 	if (flags & JFS_DIRSYNC_FL)
 		inode->i_flags |= S_DIRSYNC;
-	if (flags & JFS_SYNC_FL)
-		inode->i_flags |= S_SYNC;
+
+	inode->i_vflags &= ~(V_BARRIER | V_COW);
+
+	if (flags & JFS_BARRIER_FL)
+		inode->i_vflags |= V_BARRIER;
+	if (flags & JFS_COW_FL)
+		inode->i_vflags |= V_COW;
 }
 
 void jfs_get_inode_flags(struct jfs_inode_info *jfs_ip)
 {
 	unsigned int flags = jfs_ip->vfs_inode.i_flags;
+	unsigned int vflags = jfs_ip->vfs_inode.i_vflags;
+
+	jfs_ip->mode2 &= ~(JFS_IMMUTABLE_FL | JFS_IXUNLINK_FL |
+			   JFS_APPEND_FL | JFS_NOATIME_FL |
+			   JFS_DIRSYNC_FL | JFS_SYNC_FL |
+			   JFS_BARRIER_FL | JFS_COW_FL);
 
-	jfs_ip->mode2 &= ~(JFS_IMMUTABLE_FL | JFS_APPEND_FL | JFS_NOATIME_FL |
-			   JFS_DIRSYNC_FL | JFS_SYNC_FL);
 	if (flags & S_IMMUTABLE)
 		jfs_ip->mode2 |= JFS_IMMUTABLE_FL;
+	if (flags & S_IXUNLINK)
+		jfs_ip->mode2 |= JFS_IXUNLINK_FL;
+
 	if (flags & S_APPEND)
 		jfs_ip->mode2 |= JFS_APPEND_FL;
 	if (flags & S_NOATIME)
@@ -61,6 +79,11 @@ void jfs_get_inode_flags(struct jfs_inod
 		jfs_ip->mode2 |= JFS_DIRSYNC_FL;
 	if (flags & S_SYNC)
 		jfs_ip->mode2 |= JFS_SYNC_FL;
+
+	if (vflags & V_BARRIER)
+		jfs_ip->mode2 |= JFS_BARRIER_FL;
+	if (vflags & V_COW)
+		jfs_ip->mode2 |= JFS_COW_FL;
 }
 
 /*
diff -ruNp linux-3.13.11/fs/jfs/jfs_inode.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/jfs/jfs_inode.h
--- linux-3.13.11/fs/jfs/jfs_inode.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/jfs/jfs_inode.h	2014-07-09 12:00:15.000000000
+0200
@@ -39,6 +39,7 @@ extern struct dentry *jfs_fh_to_dentry(s
 extern struct dentry *jfs_fh_to_parent(struct super_block *sb, struct fid *fid,
 	int fh_len, int fh_type);
 extern void jfs_set_inode_flags(struct inode *);
+extern int jfs_sync_flags(struct inode *, int, int);
 extern int jfs_get_block(struct inode *, sector_t, struct buffer_head *, int);
 extern int jfs_setattr(struct dentry *, struct iattr *);
 
diff -ruNp linux-3.13.11/fs/jfs/namei.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/jfs/namei.c
--- linux-3.13.11/fs/jfs/namei.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/jfs/namei.c	2014-07-09 12:00:15.000000000
+0200
@@ -22,6 +22,7 @@
 #include <linux/ctype.h>
 #include <linux/quotaops.h>
 #include <linux/exportfs.h>
+#include <linux/vs_tag.h>
 #include "jfs_incore.h"
 #include "jfs_superblock.h"
 #include "jfs_inode.h"
@@ -1461,6 +1462,7 @@ static struct dentry *jfs_lookup(struct
 			jfs_err("jfs_lookup: iget failed on inum %d", (uint)inum);
 	}
 
+	dx_propagate_tag(nd, ip);
 	return d_splice_alias(ip, dentry);
 }
 
@@ -1525,6 +1527,7 @@ const struct inode_operations jfs_dir_in
 #ifdef CONFIG_JFS_POSIX_ACL
 	.get_acl	= jfs_get_acl,
 #endif
+	.sync_flags	= jfs_sync_flags,
 };
 
 const struct file_operations jfs_dir_operations = {
diff -ruNp linux-3.13.11/fs/jfs/super.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/jfs/super.c
--- linux-3.13.11/fs/jfs/super.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/jfs/super.c	2014-07-09 12:00:15.000000000
+0200
@@ -203,7 +203,8 @@ enum {
 	Opt_integrity, Opt_nointegrity, Opt_iocharset, Opt_resize,
 	Opt_resize_nosize, Opt_errors, Opt_ignore, Opt_err, Opt_quota,
 	Opt_usrquota, Opt_grpquota, Opt_uid, Opt_gid, Opt_umask,
-	Opt_discard, Opt_nodiscard, Opt_discard_minblk
+	Opt_discard, Opt_nodiscard, Opt_discard_minblk,
+	Opt_tag, Opt_notag, Opt_tagid
 };
 
 static const match_table_t tokens = {
@@ -213,6 +214,10 @@ static const match_table_t tokens = {
 	{Opt_resize, "resize=%u"},
 	{Opt_resize_nosize, "resize"},
 	{Opt_errors, "errors=%s"},
+	{Opt_tag, "tag"},
+	{Opt_notag, "notag"},
+	{Opt_tagid, "tagid=%u"},
+	{Opt_tag, "tagxid"},
 	{Opt_ignore, "noquota"},
 	{Opt_ignore, "quota"},
 	{Opt_usrquota, "usrquota"},
@@ -389,7 +394,20 @@ static int parse_options(char *options,
 			}
 			break;
 		}
-
+#ifndef CONFIG_TAGGING_NONE
+		case Opt_tag:
+			*flag |= JFS_TAGGED;
+			break;
+		case Opt_notag:
+			*flag &= JFS_TAGGED;
+			break;
+#endif
+#ifdef CONFIG_PROPAGATE
+		case Opt_tagid:
+			/* use args[0] */
+			*flag |= JFS_TAGGED;
+			break;
+#endif
 		default:
 			printk("jfs: Unrecognized mount option \"%s\" "
 					" or missing value\n", p);
@@ -421,6 +439,12 @@ static int jfs_remount(struct super_bloc
 		return -EINVAL;
 	}
 
+	if ((flag & JFS_TAGGED) && !(sb->s_flags & MS_TAGGED)) {
+		printk(KERN_ERR "JFS: %s: tagging not permitted on remount.\n",
+			sb->s_id);
+		return -EINVAL;
+	}
+
 	if (newLVSize) {
 		if (sb->s_flags & MS_RDONLY) {
 			pr_err("JFS: resize requires volume" \
@@ -506,6 +530,9 @@ static int jfs_fill_super(struct super_b
 #ifdef CONFIG_JFS_POSIX_ACL
 	sb->s_flags |= MS_POSIXACL;
 #endif
+	/* map mount option tagxid */
+	if (sbi->flag & JFS_TAGGED)
+		sb->s_flags |= MS_TAGGED;
 
 	if (newLVSize) {
 		pr_err("resize option for remount only\n");
@@ -882,7 +909,7 @@ static int __init init_jfs_fs(void)
 
 	jfs_inode_cachep =
 	    kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
-			    SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
+			    SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
 			    init_once);
 	if (jfs_inode_cachep == NULL)
 		return -ENOMEM;
diff -ruNp linux-3.13.11/fs/libfs.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/libfs.c
--- linux-3.13.11/fs/libfs.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/libfs.c	2014-07-09 12:00:15.000000000
+0200
@@ -145,13 +145,14 @@ static inline unsigned char dt_type(stru
  * both impossible due to the lock on directory.
  */
 
-int dcache_readdir(struct file *file, struct dir_context *ctx)
+static inline int do_dcache_readdir_filter(struct file *filp,
+	struct dir_context *ctx, int (*filter)(struct dentry *dentry))
 {
-	struct dentry *dentry = file->f_path.dentry;
-	struct dentry *cursor = file->private_data;
+	struct dentry *dentry = filp->f_path.dentry;
+	struct dentry *cursor = filp->private_data;
 	struct list_head *p, *q = &cursor->d_u.d_child;
 
-	if (!dir_emit_dots(file, ctx))
+	if (!dir_emit_dots(filp, ctx))
 		return 0;
 	spin_lock(&dentry->d_lock);
 	if (ctx->pos == 2)
@@ -159,6 +160,12 @@ int dcache_readdir(struct file *file, st
 
 	for (p = q->next; p != &dentry->d_subdirs; p = p->next) {
 		struct dentry *next = list_entry(p, struct dentry, d_u.d_child);
+		char d_name[sizeof(next->d_iname)];
+		const unsigned char *name;
+
+		if (filter && !filter(next))
+			continue;
+
 		spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
 		if (!simple_positive(next)) {
 			spin_unlock(&next->d_lock);
@@ -167,7 +174,12 @@ int dcache_readdir(struct file *file, st
 
 		spin_unlock(&next->d_lock);
 		spin_unlock(&dentry->d_lock);
-		if (!dir_emit(ctx, next->d_name.name, next->d_name.len,
+		name = next->d_name.name;
+		if (name == next->d_iname) {
+			memcpy(d_name, name, next->d_name.len);
+			name = d_name;
+		}
+		if (!dir_emit(ctx, name, next->d_name.len,
 			      next->d_inode->i_ino, dt_type(next->d_inode)))
 			return 0;
 		spin_lock(&dentry->d_lock);
@@ -181,8 +193,22 @@ int dcache_readdir(struct file *file, st
 	spin_unlock(&dentry->d_lock);
 	return 0;
 }
+
 EXPORT_SYMBOL(dcache_readdir);
 
+int dcache_readdir(struct file *filp, struct dir_context *ctx)
+{
+	return do_dcache_readdir_filter(filp, ctx, NULL);
+}
+
+EXPORT_SYMBOL(dcache_readdir_filter);
+
+int dcache_readdir_filter(struct file *filp, struct dir_context *ctx,
+	int (*filter)(struct dentry *))
+{
+	return do_dcache_readdir_filter(filp, ctx, filter);
+}
+
 ssize_t generic_read_dir(struct file *filp, char __user *buf, size_t siz, loff_t *ppos)
 {
 	return -EISDIR;
@@ -999,7 +1025,7 @@ EXPORT_SYMBOL(noop_fsync);
 void kfree_put_link(struct dentry *dentry, struct nameidata *nd,
 				void *cookie)
 {
-	char *s = nd_get_link(nd);
+	const char *s = nd_get_link(nd);
 	if (!IS_ERR(s))
 		kfree(s);
 }
diff -ruNp linux-3.13.11/fs/lockd/clntproc.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/lockd/clntproc.c
--- linux-3.13.11/fs/lockd/clntproc.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/lockd/clntproc.c	2014-07-09 12:00:15.000000000
+0200
@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt
 /*
  * Cookie counter for NLM requests
  */
-static atomic_t	nlm_cookie = ATOMIC_INIT(0x1234);
+static atomic_unchecked_t	nlm_cookie = ATOMIC_INIT(0x1234);
 
 void nlmclnt_next_cookie(struct nlm_cookie *c)
 {
-	u32	cookie = atomic_inc_return(&nlm_cookie);
+	u32	cookie = atomic_inc_return_unchecked(&nlm_cookie);
 
 	memcpy(c->data, &cookie, 4);
 	c->len=4;
diff -ruNp linux-3.13.11/fs/locks.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/locks.c
--- linux-3.13.11/fs/locks.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/locks.c	2014-07-09 12:00:15.000000000
+0200
@@ -129,6 +129,8 @@
 #include <linux/hashtable.h>
 #include <linux/percpu.h>
 #include <linux/lglock.h>
+#include <linux/vs_base.h>
+#include <linux/vs_limit.h>
 
 #include <asm/uaccess.h>
 
@@ -210,11 +212,17 @@ static void locks_init_lock_heads(struct
 /* Allocate an empty lock structure. */
 struct file_lock *locks_alloc_lock(void)
 {
-	struct file_lock *fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL);
+	struct file_lock *fl;
 
-	if (fl)
-		locks_init_lock_heads(fl);
+	if (!vx_locks_avail(1))
+		return NULL;
+
+	fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL);
 
+	if (fl) {
+		locks_init_lock_heads(fl);
+		fl->fl_xid = -1;
+	}
 	return fl;
 }
 EXPORT_SYMBOL_GPL(locks_alloc_lock);
@@ -238,6 +246,7 @@ void locks_free_lock(struct file_lock *f
 	BUG_ON(!list_empty(&fl->fl_block));
 	BUG_ON(!hlist_unhashed(&fl->fl_link));
 
+	vx_locks_dec(fl);
 	locks_release_private(fl);
 	kmem_cache_free(filelock_cache, fl);
 }
@@ -247,6 +256,7 @@ void locks_init_lock(struct file_lock *f
 {
 	memset(fl, 0, sizeof(struct file_lock));
 	locks_init_lock_heads(fl);
+	fl->fl_xid = -1;
 }
 
 EXPORT_SYMBOL(locks_init_lock);
@@ -287,6 +297,7 @@ void locks_copy_lock(struct file_lock *n
 	new->fl_file = fl->fl_file;
 	new->fl_ops = fl->fl_ops;
 	new->fl_lmops = fl->fl_lmops;
+	new->fl_xid = fl->fl_xid;
 
 	locks_copy_private(new, fl);
 }
@@ -325,6 +336,11 @@ static int flock_make_lock(struct file *
 	fl->fl_flags = FL_FLOCK;
 	fl->fl_type = type;
 	fl->fl_end = OFFSET_MAX;
+
+	vxd_assert(filp->f_xid == vx_current_xid(),
+		"f_xid(%d) == current(%d)", filp->f_xid, vx_current_xid());
+	fl->fl_xid = filp->f_xid;
+	vx_locks_inc(fl);
 	
 	*lock = fl;
 	return 0;
@@ -464,6 +480,7 @@ static int lease_init(struct file *filp,
 
 	fl->fl_owner = current->files;
 	fl->fl_pid = current->tgid;
+	fl->fl_xid = vx_current_xid();
 
 	fl->fl_file = filp;
 	fl->fl_flags = FL_LEASE;
@@ -483,6 +500,11 @@ static struct file_lock *lease_alloc(str
 	if (fl == NULL)
 		return ERR_PTR(error);
 
+	fl->fl_xid = vx_current_xid();
+	if (filp)
+		vxd_assert(filp->f_xid == fl->fl_xid,
+			"f_xid(%d) == fl_xid(%d)", filp->f_xid, fl->fl_xid);
+	vx_locks_inc(fl);
 	error = lease_init(filp, type, fl);
 	if (error) {
 		locks_free_lock(fl);
@@ -858,6 +880,7 @@ static int flock_lock_file(struct file *
 		spin_lock(&inode->i_lock);
 	}
 
+	new_fl->fl_xid = -1;
 find_conflict:
 	for_each_lock(inode, before) {
 		struct file_lock *fl = *before;
@@ -878,6 +901,7 @@ find_conflict:
 		goto out;
 	locks_copy_lock(new_fl, request);
 	locks_insert_lock(before, new_fl);
+	vx_locks_inc(new_fl);
 	new_fl = NULL;
 	error = 0;
 
@@ -888,7 +912,8 @@ out:
 	return error;
 }
 
-static int __posix_lock_file(struct inode *inode, struct file_lock *request, struct
file_lock *conflock)
+static int __posix_lock_file(struct inode *inode, struct file_lock *request,
+	struct file_lock *conflock, vxid_t xid)
 {
 	struct file_lock *fl;
 	struct file_lock *new_fl = NULL;
@@ -899,6 +924,8 @@ static int __posix_lock_file(struct inod
 	int error;
 	bool added = false;
 
+	vxd_assert(xid == vx_current_xid(),
+		"xid(%d) == current(%d)", xid, vx_current_xid());
 	/*
 	 * We may need two file_lock structures for this operation,
 	 * so we get them in advance to avoid races.
@@ -909,7 +936,11 @@ static int __posix_lock_file(struct inod
 	    (request->fl_type != F_UNLCK ||
 	     request->fl_start != 0 || request->fl_end != OFFSET_MAX)) {
 		new_fl = locks_alloc_lock();
+		new_fl->fl_xid = xid;
+		vx_locks_inc(new_fl);
 		new_fl2 = locks_alloc_lock();
+		new_fl2->fl_xid = xid;
+		vx_locks_inc(new_fl2);
 	}
 
 	spin_lock(&inode->i_lock);
@@ -1118,7 +1149,8 @@ static int __posix_lock_file(struct inod
 int posix_lock_file(struct file *filp, struct file_lock *fl,
 			struct file_lock *conflock)
 {
-	return __posix_lock_file(file_inode(filp), fl, conflock);
+	return __posix_lock_file(file_inode(filp),
+		fl, conflock, filp->f_xid);
 }
 EXPORT_SYMBOL(posix_lock_file);
 
@@ -1208,7 +1240,7 @@ int locks_mandatory_area(int read_write,
 	fl.fl_end = offset + count - 1;
 
 	for (;;) {
-		error = __posix_lock_file(inode, &fl, NULL);
+		error = __posix_lock_file(inode, &fl, NULL, filp->f_xid);
 		if (error != FILE_LOCK_DEFERRED)
 			break;
 		error = wait_event_interruptible(fl.fl_wait, !fl.fl_next);
@@ -1549,6 +1581,7 @@ static int generic_add_lease(struct file
 		goto out;
 
 	locks_insert_lock(before, lease);
+	vx_locks_inc(lease);
 	error = 0;
 out:
 	if (is_deleg)
@@ -1991,6 +2024,11 @@ int fcntl_setlk(unsigned int fd, struct
 	if (file_lock == NULL)
 		return -ENOLCK;
 
+	vxd_assert(filp->f_xid == vx_current_xid(),
+		"f_xid(%d) == current(%d)", filp->f_xid, vx_current_xid());
+	file_lock->fl_xid = filp->f_xid;
+	vx_locks_inc(file_lock);
+
 	/*
 	 * This might block, so we do it before checking the inode.
 	 */
@@ -2109,6 +2147,11 @@ int fcntl_setlk64(unsigned int fd, struc
 	if (file_lock == NULL)
 		return -ENOLCK;
 
+	vxd_assert(filp->f_xid == vx_current_xid(),
+		"f_xid(%d) == current(%d)", filp->f_xid, vx_current_xid());
+	file_lock->fl_xid = filp->f_xid;
+	vx_locks_inc(file_lock);
+
 	/*
 	 * This might block, so we do it before checking the inode.
 	 */
@@ -2219,16 +2262,16 @@ void locks_remove_flock(struct file *fil
 		return;
 
 	if (filp->f_op->flock) {
-		struct file_lock fl = {
+		struct file_lock flock = {
 			.fl_pid = current->tgid,
 			.fl_file = filp,
 			.fl_flags = FL_FLOCK,
 			.fl_type = F_UNLCK,
 			.fl_end = OFFSET_MAX,
 		};
-		filp->f_op->flock(filp, F_SETLKW, &fl);
-		if (fl.fl_ops && fl.fl_ops->fl_release_private)
-			fl.fl_ops->fl_release_private(&fl);
+		filp->f_op->flock(filp, F_SETLKW, &flock);
+		if (flock.fl_ops && flock.fl_ops->fl_release_private)
+			flock.fl_ops->fl_release_private(&flock);
 	}
 
 	spin_lock(&inode->i_lock);
@@ -2378,8 +2421,11 @@ static int locks_show(struct seq_file *f
 
 	lock_get_status(f, fl, iter->li_pos, "");
 
-	list_for_each_entry(bfl, &fl->fl_block, fl_block)
+	list_for_each_entry(bfl, &fl->fl_block, fl_block) {
+		if (!vx_check(fl->fl_xid, VS_WATCH_P | VS_IDENT))
+			continue;
 		lock_get_status(f, bfl, iter->li_pos, " ->");
+	}
 
 	return 0;
 }
diff -ruNp linux-3.13.11/fs/mount.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/mount.h
--- linux-3.13.11/fs/mount.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/mount.h	2014-07-09 12:00:15.000000000
+0200
@@ -11,7 +11,7 @@ struct mnt_namespace {
 	u64			seq;	/* Sequence number to prevent loops */
 	wait_queue_head_t poll;
 	int event;
-};
+} __randomize_layout;
 
 struct mnt_pcp {
 	int mnt_count;
@@ -57,7 +57,8 @@ struct mount {
 	int mnt_expiry_mark;		/* true if marked for expiry */
 	int mnt_pinned;
 	struct path mnt_ex_mountpoint;
-};
+	vtag_t mnt_tag;			/* tagging used for vfsmount */
+} __randomize_layout;
 
 #define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */
 
diff -ruNp linux-3.13.11/fs/namei.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/namei.c
--- linux-3.13.11/fs/namei.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/namei.c	2014-07-09 12:04:19.000000000
+0200
@@ -34,9 +34,19 @@
 #include <linux/device_cgroup.h>
 #include <linux/fs_struct.h>
 #include <linux/posix_acl.h>
+#include <linux/proc_fs.h>
+#include <linux/magic.h>
+#include <linux/vserver/inode.h>
+#include <linux/vs_base.h>
+#include <linux/vs_tag.h>
+#include <linux/vs_cowbl.h>
+#include <linux/vs_device.h>
+#include <linux/vs_context.h>
+#include <linux/pid_namespace.h>
 #include <asm/uaccess.h>
 
 #include "internal.h"
+#include "proc/internal.h"
 #include "mount.h"
 
 /* [Feb-1997 T. Schoebel-Theuer]
@@ -266,6 +276,89 @@ static int check_acl(struct inode *inode
 	return -EAGAIN;
 }
 
+static inline int dx_barrier(const struct inode *inode)
+{
+	if (IS_BARRIER(inode) && !vx_check(0, VS_ADMIN | VS_WATCH)) {
+		vxwprintk_task(1, "did hit the barrier.");
+		return 1;
+	}
+	return 0;
+}
+
+static int __dx_permission(const struct inode *inode, int mask)
+{
+	if (dx_barrier(inode))
+		return -EACCES;
+
+	if (inode->i_sb->s_magic == DEVPTS_SUPER_MAGIC) {
+		/* devpts is xid tagged */
+		if (S_ISDIR(inode->i_mode) ||
+		    vx_check((vxid_t)i_tag_read(inode), VS_IDENT | VS_WATCH_P))
+			return 0;
+
+		/* just pretend we didn't find anything */
+		return -ENOENT;
+	}
+	else if (inode->i_sb->s_magic == PROC_SUPER_MAGIC) {
+		struct proc_dir_entry *de = PDE(inode);
+
+		if (de && !vx_hide_check(0, de->vx_flags))
+			goto out;
+
+		if ((mask & (MAY_WRITE | MAY_APPEND))) {
+			struct pid *pid;
+			struct task_struct *tsk;
+
+			if (vx_check(0, VS_ADMIN | VS_WATCH_P) ||
+			    vx_flags(VXF_STATE_SETUP, 0))
+				return 0;
+
+			pid = PROC_I(inode)->pid;
+			if (!pid)
+				goto out;
+
+			rcu_read_lock();
+			tsk = pid_task(pid, PIDTYPE_PID);
+			vxdprintk(VXD_CBIT(tag, 0), "accessing %p[#%u]",
+				  tsk, (tsk ? vx_task_xid(tsk) : 0));
+			if (tsk &&
+				vx_check(vx_task_xid(tsk), VS_IDENT | VS_WATCH_P)) {
+				rcu_read_unlock();
+				return 0;
+			}
+			rcu_read_unlock();
+		}
+		else {
+			/* FIXME: Should we block some entries here? */
+			return 0;
+		}
+	}
+	else {
+		if (dx_notagcheck(inode->i_sb) ||
+		    dx_check((vxid_t)i_tag_read(inode),
+			DX_HOSTID | DX_ADMIN | DX_WATCH | DX_IDENT))
+			return 0;
+	}
+
+out:
+	return -EACCES;
+}
+
+int dx_permission(const struct inode *inode, int mask)
+{
+	int ret = __dx_permission(inode, mask);
+	if (unlikely(ret)) {
+#ifndef	CONFIG_VSERVER_WARN_DEVPTS
+		if (inode->i_sb->s_magic != DEVPTS_SUPER_MAGIC)
+#endif
+		    vxwprintk_task(1,
+			"denied [0x%x] access to inode %s:%p[#%d,%lu]",
+			mask, inode->i_sb->s_id, inode,
+			i_tag_read(inode), inode->i_ino);
+	}
+	return ret;
+}
+
 /*
  * This does the basic permission checking
  */
@@ -319,16 +412,32 @@ int generic_permission(struct inode *ino
 	if (ret != -EACCES)
 		return ret;
 
+#ifdef CONFIG_GRKERNSEC
+	/* we'll block if we have to log due to a denied capability use */
+	if (mask & MAY_NOT_BLOCK)
+		return -ECHILD;
+#endif
+
 	if (S_ISDIR(inode->i_mode)) {
 		/* DACs are overridable for directories */
-		if (inode_capable(inode, CAP_DAC_OVERRIDE))
-			return 0;
 		if (!(mask & MAY_WRITE))
-			if (inode_capable(inode, CAP_DAC_READ_SEARCH))
+			if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
+			    inode_capable(inode, CAP_DAC_READ_SEARCH))
 				return 0;
+		if (inode_capable(inode, CAP_DAC_OVERRIDE))
+			return 0;
 		return -EACCES;
 	}
 	/*
+	 * Searching includes executable on directories, else just read.
+	 */
+	mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
+	if (mask == MAY_READ)
+		if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
+		    inode_capable(inode, CAP_DAC_READ_SEARCH))
+			return 0;
+
+	/*
 	 * Read/write DACs are always overridable.
 	 * Executable DACs are overridable when there is
 	 * at least one exec bit set.
@@ -337,14 +446,6 @@ int generic_permission(struct inode *ino
 		if (inode_capable(inode, CAP_DAC_OVERRIDE))
 			return 0;
 
-	/*
-	 * Searching includes executable on directories, else just read.
-	 */
-	mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
-	if (mask == MAY_READ)
-		if (inode_capable(inode, CAP_DAC_READ_SEARCH))
-			return 0;
-
 	return -EACCES;
 }
 
@@ -388,10 +489,14 @@ int __inode_permission(struct inode *ino
 		/*
 		 * Nobody gets write access to an immutable file.
 		 */
-		if (IS_IMMUTABLE(inode))
+		if (IS_IMMUTABLE(inode) && !IS_COW(inode))
 			return -EACCES;
 	}
 
+	retval = dx_permission(inode, mask);
+	if (retval)
+		return retval;
+
 	retval = do_inode_permission(inode, mask);
 	if (retval)
 		return retval;
@@ -810,7 +915,7 @@ follow_link(struct path *link, struct na
 {
 	struct dentry *dentry = link->dentry;
 	int error;
-	char *s;
+	const char *s;
 
 	BUG_ON(nd->flags & LOOKUP_RCU);
 
@@ -831,6 +936,12 @@ follow_link(struct path *link, struct na
 	if (error)
 		goto out_put_nd_path;
 
+	if (gr_handle_follow_link(dentry->d_parent->d_inode,
+				  dentry->d_inode, dentry, nd->path.mnt)) {
+		error = -EACCES;
+		goto out_put_nd_path;
+	}	
+
 	nd->last_type = LAST_BIND;
 	*p = dentry->d_inode->i_op->follow_link(dentry, nd);
 	error = PTR_ERR(*p);
@@ -1383,6 +1494,9 @@ static int lookup_fast(struct nameidata
 				goto unlazy;
 			}
 		}
+
+		/* FIXME: check dx permission */
+
 		path->mnt = mnt;
 		path->dentry = dentry;
 		if (unlikely(!__follow_mount_rcu(nd, path, inode)))
@@ -1413,6 +1527,8 @@ unlazy:
 		}
 	}
 
+	/* FIXME: check dx permission */
+
 	path->mnt = mnt;
 	path->dentry = dentry;
 	err = follow_managed(path, nd->flags);
@@ -1579,6 +1695,8 @@ static inline int nested_symlink(struct
 		if (res)
 			break;
 		res = walk_component(nd, path, LOOKUP_FOLLOW);
+		if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode))
+			res = -EACCES;
 		put_link(nd, &link, cookie);
 	} while (res > 0);
 
@@ -1652,7 +1770,7 @@ EXPORT_SYMBOL(full_name_hash);
 static inline unsigned long hash_name(const char *name, unsigned int *hashp)
 {
 	unsigned long a, b, adata, bdata, mask, hash, len;
-	const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
+	static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
 
 	hash = a = 0;
 	len = -sizeof(unsigned long);
@@ -1936,6 +2054,8 @@ static int path_lookupat(int dfd, const
 			if (err)
 				break;
 			err = lookup_last(nd, &path);
+			if (!err && gr_handle_symlink_owner(&link, nd->inode))
+				err = -EACCES;
 			put_link(nd, &link, cookie);
 		}
 	}
@@ -1943,6 +2063,13 @@ static int path_lookupat(int dfd, const
 	if (!err)
 		err = complete_walk(nd);
 
+	if (!err && !(nd->flags & LOOKUP_PARENT)) {
+		if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
+			path_put(&nd->path);
+			err = -ENOENT;
+		}
+	}
+
 	if (!err && nd->flags & LOOKUP_DIRECTORY) {
 		if (!d_is_directory(nd->path.dentry)) {
 			path_put(&nd->path);
@@ -1970,8 +2097,15 @@ static int filename_lookup(int dfd, stru
 		retval = path_lookupat(dfd, name->name,
 						flags | LOOKUP_REVAL, nd);
 
-	if (likely(!retval))
+	if (likely(!retval)) {
 		audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT);
+		if (name->name[0] != '/' && nd->path.dentry && nd->inode) {
+			if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt)) {
+				path_put(&nd->path);
+				return -ENOENT;
+			}
+		}
+	}
 	return retval;
 }
 
@@ -2400,7 +2534,7 @@ static int may_delete(struct inode *dir,
 		return -EPERM;
 
 	if (check_sticky(dir, inode) || IS_APPEND(inode) ||
-	    IS_IMMUTABLE(inode) || IS_SWAPFILE(inode))
+		IS_IXORUNLINK(inode) || IS_SWAPFILE(inode))
 		return -EPERM;
 	if (isdir) {
 		if (!d_is_directory(victim) && !d_is_autodir(victim))
@@ -2480,19 +2614,25 @@ int vfs_create(struct inode *dir, struct
 		bool want_excl)
 {
 	int error = may_create(dir, dentry);
-	if (error)
+	if (error) {
+		vxdprintk(VXD_CBIT(misc, 3), "may_create failed with %d", error);
 		return error;
+	}
 
 	if (!dir->i_op->create)
 		return -EACCES;	/* shouldn't it be ENOSYS? */
 	mode &= S_IALLUGO;
 	mode |= S_IFREG;
 	error = security_inode_create(dir, dentry, mode);
-	if (error)
+	if (error) {
+		vxdprintk(VXD_CBIT(misc, 3), "security_inode_create failed with %d", error);
 		return error;
+	}
 	error = dir->i_op->create(dir, dentry, mode, want_excl);
 	if (!error)
 		fsnotify_create(dir, dentry);
+	else
+		vxdprintk(VXD_CBIT(misc, 3), "i_op->create failed with %d", error);
 	return error;
 }
 
@@ -2527,6 +2667,15 @@ static int may_open(struct path *path, i
 		break;
 	}
 
+#ifdef	CONFIG_VSERVER_COWBL
+	if (IS_COW(inode) &&
+		((flag & O_ACCMODE) != O_RDONLY)) {
+		if (IS_COW_LINK(inode))
+			return -EMLINK;
+		inode->i_flags &= ~(S_IXUNLINK|S_IMMUTABLE);
+		mark_inode_dirty(inode);
+	}
+#endif
 	error = inode_permission(inode, acc_mode);
 	if (error)
 		return error;
@@ -2545,6 +2694,13 @@ static int may_open(struct path *path, i
 	if (flag & O_NOATIME && !inode_owner_or_capable(inode))
 		return -EPERM;
 
+	if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
+		return -EPERM;
+	if (gr_handle_rawio(inode))
+		return -EPERM;
+	if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
+		return -EACCES;
+
 	return 0;
 }
 
@@ -2776,7 +2932,7 @@ looked_up:
  * cleared otherwise prior to returning.
  */
 static int lookup_open(struct nameidata *nd, struct path *path,
-			struct file *file,
+			struct path *link, struct file *file,
 			const struct open_flags *op,
 			bool got_write, int *opened)
 {
@@ -2811,6 +2967,17 @@ static int lookup_open(struct nameidata
 	/* Negative dentry, just create the file */
 	if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
 		umode_t mode = op->mode;
+
+		if (link && gr_handle_symlink_owner(link, dir->d_inode)) {
+			error = -EACCES;
+			goto out_dput;
+		}
+
+		if (!gr_acl_handle_creat(dentry, dir, nd->path.mnt, op->open_flag, op->acc_mode,
mode)) {
+			error = -EACCES;
+			goto out_dput;
+		}
+
 		if (!IS_POSIXACL(dir->d_inode))
 			mode &= ~current_umask();
 		/*
@@ -2832,6 +2999,8 @@ static int lookup_open(struct nameidata
 				   nd->flags & LOOKUP_EXCL);
 		if (error)
 			goto out_dput;
+		else
+			gr_handle_create(dentry, nd->path.mnt);
 	}
 out_no_open:
 	path->dentry = dentry;
@@ -2846,7 +3015,7 @@ out_dput:
 /*
  * Handle the last step of open()
  */
-static int do_last(struct nameidata *nd, struct path *path,
+static int do_last(struct nameidata *nd, struct path *path, struct path *link,
 		   struct file *file, const struct open_flags *op,
 		   int *opened, struct filename *name)
 {
@@ -2896,6 +3065,15 @@ static int do_last(struct nameidata *nd,
 		if (error)
 			return error;
 
+		if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
+			error = -ENOENT;
+			goto out;
+		}
+		if (link && gr_handle_symlink_owner(link, nd->inode)) {
+			error = -EACCES;
+			goto out;
+		}
+
 		audit_inode(name, dir, LOOKUP_PARENT);
 		error = -EISDIR;
 		/* trailing slashes? */
@@ -2915,7 +3093,7 @@ retry_lookup:
 		 */
 	}
 	mutex_lock(&dir->d_inode->i_mutex);
-	error = lookup_open(nd, path, file, op, got_write, opened);
+	error = lookup_open(nd, path, link, file, op, got_write, opened);
 	mutex_unlock(&dir->d_inode->i_mutex);
 
 	if (error <= 0) {
@@ -2939,11 +3117,28 @@ retry_lookup:
 		goto finish_open_created;
 	}
 
+	if (!gr_acl_handle_hidden_file(path->dentry, nd->path.mnt)) {
+		error = -ENOENT;
+		goto exit_dput;
+	}
+	if (link && gr_handle_symlink_owner(link, path->dentry->d_inode)) {
+		error = -EACCES;
+		goto exit_dput;
+	}
+
 	/*
 	 * create/update audit record if it already exists.
 	 */
-	if (d_is_positive(path->dentry))
+	if (d_is_positive(path->dentry)) {
+		/* only check if O_CREAT is specified, all other checks need to go
+		   into may_open */
+		if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
+			error = -EACCES;
+			goto exit_dput;
+		}
+
 		audit_inode(name, path->dentry, 0);
+	}
 
 	/*
 	 * If atomic_open() acquired write access it is dropped now due to
@@ -2984,6 +3179,11 @@ finish_lookup:
 			}
 		}
 		BUG_ON(inode != path->dentry->d_inode);
+		/* if we're resolving a symlink to another symlink */
+		if (link && gr_handle_symlink_owner(link, inode)) {
+			error = -EACCES;
+			goto out;
+		}
 		return 1;
 	}
 
@@ -2993,7 +3193,6 @@ finish_lookup:
 		save_parent.dentry = nd->path.dentry;
 		save_parent.mnt = mntget(path->mnt);
 		nd->path.dentry = path->dentry;
-
 	}
 	nd->inode = inode;
 	/* Why this, you ask?  _Now_ we might have grown LOOKUP_JUMPED... */
@@ -3003,7 +3202,18 @@ finish_open:
 		path_put(&save_parent);
 		return error;
 	}
+
+	if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
+		error = -ENOENT;
+		goto out;
+	}
+	if (link && gr_handle_symlink_owner(link, nd->inode)) {
+		error = -EACCES;
+		goto out;
+	}
+
 	audit_inode(name, nd->path.dentry, 0);
+
 	error = -EISDIR;
 	if ((open_flag & O_CREAT) &&
 	    (d_is_directory(nd->path.dentry) || d_is_autodir(nd->path.dentry)))
@@ -3022,6 +3232,16 @@ finish_open:
 	}
 finish_open_created:
 	error = may_open(&nd->path, acc_mode, open_flag);
+#ifdef	CONFIG_VSERVER_COWBL
+	if (error == -EMLINK) {
+		struct dentry *dentry;
+		dentry = cow_break_link(name->name);
+		if (IS_ERR(dentry))
+			error = PTR_ERR(dentry);
+		else
+			dput(dentry);
+	}
+#endif
 	if (error)
 		goto out;
 	file->f_path.mnt = nd->path.mnt;
@@ -3147,6 +3367,7 @@ static struct file *path_openat(int dfd,
 	int opened = 0;
 	int error;
 
+restart:
 	file = get_empty_filp();
 	if (IS_ERR(file))
 		return file;
@@ -3167,7 +3388,7 @@ static struct file *path_openat(int dfd,
 	if (unlikely(error))
 		goto out;
 
-	error = do_last(nd, &path, file, op, &opened, pathname);
+	error = do_last(nd, &path, NULL, file, op, &opened, pathname);
 	while (unlikely(error > 0)) { /* trailing symlink */
 		struct path link = path;
 		void *cookie;
@@ -3185,9 +3406,19 @@ static struct file *path_openat(int dfd,
 		error = follow_link(&link, nd, &cookie);
 		if (unlikely(error))
 			break;
-		error = do_last(nd, &path, file, op, &opened, pathname);
+		error = do_last(nd, &path, &link, file, op, &opened, pathname);
 		put_link(nd, &link, cookie);
 	}
+
+#ifdef	CONFIG_VSERVER_COWBL
+	if (error == -EMLINK) {
+		if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT))
+			path_put(&nd->root);
+		if (base)
+			fput(base);
+		goto restart;
+	}
+#endif
 out:
 	if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT))
 		path_put(&nd->root);
@@ -3285,9 +3516,11 @@ struct dentry *kern_path_create(int dfd,
 		goto unlock;
 
 	error = -EEXIST;
-	if (d_is_positive(dentry))
+	if (d_is_positive(dentry)) {
+		if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt))
+			error = -ENOENT;
 		goto fail;
-
+	}
 	/*
 	 * Special case - lookup gave negative, but... we had foo/bar/
 	 * From the vfs_mknod() POV we just have a negative dentry -
@@ -3303,6 +3536,11 @@ struct dentry *kern_path_create(int dfd,
 		goto fail;
 	}
 	*path = nd.path;
+	vxdprintk(VXD_CBIT(misc, 3), "kern_path_create path.dentry = %p (%.*s), dentry = %p
(%.*s), d_inode = %p",
+		path->dentry, path->dentry->d_name.len,
+		path->dentry->d_name.name, dentry,
+		dentry->d_name.len, dentry->d_name.name,
+		path->dentry->d_inode);
 	return dentry;
 fail:
 	dput(dentry);
@@ -3339,6 +3577,20 @@ struct dentry *user_path_create(int dfd,
 }
 EXPORT_SYMBOL(user_path_create);
 
+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname,
struct path *path, struct filename **to, unsigned int lookup_flags)
+{
+	struct filename *tmp = getname(pathname);
+	struct dentry *res;
+	if (IS_ERR(tmp))
+		return ERR_CAST(tmp);
+	res = kern_path_create(dfd, tmp->name, path, lookup_flags);
+	if (IS_ERR(res))
+		putname(tmp);
+	else
+		*to = tmp;
+	return res;
+}
+
 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
 {
 	int error = may_create(dir, dentry);
@@ -3401,6 +3653,17 @@ retry:
 
 	if (!IS_POSIXACL(path.dentry->d_inode))
 		mode &= ~current_umask();
+
+	if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
+		error = -EPERM;
+		goto out;
+	}
+
+	if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
+		error = -EACCES;
+		goto out;
+	}
+
 	error = security_path_mknod(&path, dentry, mode, dev);
 	if (error)
 		goto out;
@@ -3417,6 +3680,8 @@ retry:
 			break;
 	}
 out:
+	if (!error)
+		gr_handle_create(dentry, path.mnt);
 	done_path_create(&path, dentry);
 	if (retry_estale(error, lookup_flags)) {
 		lookup_flags |= LOOKUP_REVAL;
@@ -3469,9 +3734,16 @@ retry:
 
 	if (!IS_POSIXACL(path.dentry->d_inode))
 		mode &= ~current_umask();
+	if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
+		error = -EACCES;
+		goto out;
+	}
 	error = security_path_mkdir(&path, dentry, mode);
 	if (!error)
 		error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
+	if (!error)
+		gr_handle_create(dentry, path.mnt);
+out:
 	done_path_create(&path, dentry);
 	if (retry_estale(error, lookup_flags)) {
 		lookup_flags |= LOOKUP_REVAL;
@@ -3552,6 +3824,8 @@ static long do_rmdir(int dfd, const char
 	struct filename *name;
 	struct dentry *dentry;
 	struct nameidata nd;
+	ino_t saved_ino = 0;
+	dev_t saved_dev = 0;
 	unsigned int lookup_flags = 0;
 retry:
 	name = user_path_parent(dfd, pathname, &nd, lookup_flags);
@@ -3584,10 +3858,21 @@ retry:
 		error = -ENOENT;
 		goto exit3;
 	}
+
+	saved_ino = dentry->d_inode->i_ino;
+	saved_dev = gr_get_dev_from_dentry(dentry);
+
+	if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
+		error = -EACCES;
+		goto exit3;
+	}
+
 	error = security_path_rmdir(&nd.path, dentry);
 	if (error)
 		goto exit3;
 	error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
+	if (!error && (saved_dev || saved_ino))
+		gr_handle_delete(saved_ino, saved_dev);
 exit3:
 	dput(dentry);
 exit2:
@@ -3677,6 +3962,8 @@ static long do_unlinkat(int dfd, const c
 	struct nameidata nd;
 	struct inode *inode = NULL;
 	struct inode *delegated_inode = NULL;
+	ino_t saved_ino = 0;
+	dev_t saved_dev = 0;
 	unsigned int lookup_flags = 0;
 retry:
 	name = user_path_parent(dfd, pathname, &nd, lookup_flags);
@@ -3703,10 +3990,22 @@ retry_deleg:
 		if (d_is_negative(dentry))
 			goto slashes;
 		ihold(inode);
+
+		if (inode->i_nlink <= 1) {
+			saved_ino = inode->i_ino;
+			saved_dev = gr_get_dev_from_dentry(dentry);
+		}
+		if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
+			error = -EACCES;
+			goto exit2;
+		}
+
 		error = security_path_unlink(&nd.path, dentry);
 		if (error)
 			goto exit2;
 		error = vfs_unlink(nd.path.dentry->d_inode, dentry, &delegated_inode);
+		if (!error && (saved_ino || saved_dev))
+			gr_handle_delete(saved_ino, saved_dev);
 exit2:
 		dput(dentry);
 	}
@@ -3794,9 +4093,17 @@ retry:
 	if (IS_ERR(dentry))
 		goto out_putname;
 
+	if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
+		error = -EACCES;
+		goto out;
+	}
+
 	error = security_path_symlink(&path, dentry, from->name);
 	if (!error)
 		error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
+	if (!error)
+		gr_handle_create(dentry, path.mnt);
+out:
 	done_path_create(&path, dentry);
 	if (retry_estale(error, lookup_flags)) {
 		lookup_flags |= LOOKUP_REVAL;
@@ -3850,7 +4157,7 @@ int vfs_link(struct dentry *old_dentry,
 	/*
 	 * A link to an append-only or immutable file cannot be created.
 	 */
-	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
+	if (IS_APPEND(inode) || IS_IXORUNLINK(inode))
 		return -EPERM;
 	if (!dir->i_op->link)
 		return -EPERM;
@@ -3899,6 +4206,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
 	struct dentry *new_dentry;
 	struct path old_path, new_path;
 	struct inode *delegated_inode = NULL;
+	struct filename *to = NULL;
 	int how = 0;
 	int error;
 
@@ -3922,7 +4230,7 @@ retry:
 	if (error)
 		return error;
 
-	new_dentry = user_path_create(newdfd, newname, &new_path,
+	new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to,
 					(how & LOOKUP_REVAL));
 	error = PTR_ERR(new_dentry);
 	if (IS_ERR(new_dentry))
@@ -3934,11 +4242,28 @@ retry:
 	error = may_linkat(&old_path);
 	if (unlikely(error))
 		goto out_dput;
+
+	if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
+			       old_path.dentry->d_inode,
+			       old_path.dentry->d_inode->i_mode, to)) {
+		error = -EACCES;
+		goto out_dput;
+	}
+
+	if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
+				old_path.dentry, old_path.mnt, to)) {
+		error = -EACCES;
+		goto out_dput;
+	}
+
 	error = security_path_link(old_path.dentry, &new_path, new_dentry);
 	if (error)
 		goto out_dput;
 	error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry, &delegated_inode);
+	if (!error)
+		gr_handle_create(new_dentry, new_path.mnt);
 out_dput:
+	putname(to);
 	done_path_create(&new_path, new_dentry);
 	if (delegated_inode) {
 		error = break_deleg_wait(&delegated_inode);
@@ -4225,6 +4550,12 @@ retry_deleg:
 	if (new_dentry == trap)
 		goto exit5;
 
+	error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
+				     old_dentry, old_dir->d_inode, oldnd.path.mnt,
+				     to);
+	if (error)
+		goto exit5;
+
 	error = security_path_rename(&oldnd.path, old_dentry,
 				     &newnd.path, new_dentry);
 	if (error)
@@ -4232,6 +4563,9 @@ retry_deleg:
 	error = vfs_rename(old_dir->d_inode, old_dentry,
 				   new_dir->d_inode, new_dentry,
 				   &delegated_inode);
+	if (!error)
+		gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
+				 new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
 exit5:
 	dput(new_dentry);
 exit4:
@@ -4268,6 +4602,8 @@ SYSCALL_DEFINE2(rename, const char __use
 
 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char
*link)
 {
+	char tmpbuf[64];
+	const char *newlink;
 	int len;
 
 	len = PTR_ERR(link);
@@ -4277,7 +4613,14 @@ int vfs_readlink(struct dentry *dentry,
 	len = strlen(link);
 	if (len > (unsigned) buflen)
 		len = buflen;
-	if (copy_to_user(buffer, link, len))
+
+	if (len < sizeof(tmpbuf)) {
+		memcpy(tmpbuf, link, len);
+		newlink = tmpbuf;
+	} else
+		newlink = link;
+
+	if (copy_to_user(buffer, newlink, len))
 		len = -EFAULT;
 out:
 	return len;
@@ -4305,6 +4648,287 @@ int generic_readlink(struct dentry *dent
 	return res;
 }
 
+
+#ifdef	CONFIG_VSERVER_COWBL
+
+static inline
+long do_cow_splice(struct file *in, struct file *out, size_t len)
+{
+	loff_t ppos = 0;
+	loff_t opos = 0;
+
+	return do_splice_direct(in, &ppos, out, &opos, len, 0);
+}
+
+struct dentry *cow_break_link(const char *pathname)
+{
+	int ret, mode, pathlen, redo = 0, drop = 1;
+	struct nameidata old_nd, dir_nd;
+	struct path dir_path, *old_path, *new_path;
+	struct dentry *dir, *old_dentry, *new_dentry = NULL;
+	struct file *old_file;
+	struct file *new_file;
+	char *to, *path, pad='\251';
+	loff_t size;
+
+	vxdprintk(VXD_CBIT(misc, 1),
+		"cow_break_link(" VS_Q("%s") ")", pathname);
+
+	path = kmalloc(PATH_MAX, GFP_KERNEL);
+	ret = -ENOMEM;
+	if (!path)
+		goto out;
+
+	/* old_nd.path will have refs to dentry and mnt */
+	ret = do_path_lookup(AT_FDCWD, pathname, LOOKUP_FOLLOW, &old_nd);
+	vxdprintk(VXD_CBIT(misc, 2),
+		"do_path_lookup(old): %d", ret);
+	if (ret < 0)
+		goto out_free_path;
+
+	/* dentry/mnt refs handed over to old_path */
+	old_path = &old_nd.path;
+	/* no explicit reference for old_dentry here */
+	old_dentry = old_path->dentry;
+
+	mode = old_dentry->d_inode->i_mode;
+	to = d_path(old_path, path, PATH_MAX-2);
+	pathlen = strlen(to);
+	vxdprintk(VXD_CBIT(misc, 2),
+		"old path " VS_Q("%s") " [%p:" VS_Q("%.*s") ":%d]", to,
+		old_dentry,
+		old_dentry->d_name.len, old_dentry->d_name.name,
+		old_dentry->d_name.len);
+
+	to[pathlen + 1] = 0;
+retry:
+	new_dentry = NULL;
+	to[pathlen] = pad--;
+	ret = -ELOOP;
+	if (pad <= '\240')
+		goto out_rel_old;
+
+	vxdprintk(VXD_CBIT(misc, 1), "temp copy " VS_Q("%s"), to);
+
+	/* dir_nd.path will have refs to dentry and mnt */
+	ret = do_path_lookup(AT_FDCWD, to,
+		LOOKUP_PARENT | LOOKUP_OPEN | LOOKUP_CREATE, &dir_nd);
+	vxdprintk(VXD_CBIT(misc, 2), "do_path_lookup(new): %d", ret);
+	if (ret < 0)
+		goto retry;
+
+	/* this puppy downs the dir inode mutex if successful.
+	   dir_path will hold refs to dentry and mnt and
+	   we'll have write access to the mnt */
+	new_dentry = kern_path_create(AT_FDCWD, to, &dir_path, 0);
+	if (!new_dentry || IS_ERR(new_dentry)) {
+		path_put(&dir_nd.path);
+		vxdprintk(VXD_CBIT(misc, 2),
+			"kern_path_create(new) failed with %ld",
+			PTR_ERR(new_dentry));
+		goto retry;
+	}
+	vxdprintk(VXD_CBIT(misc, 2),
+		"kern_path_create(new): %p [" VS_Q("%.*s") ":%d]",
+		new_dentry,
+		new_dentry->d_name.len, new_dentry->d_name.name,
+		new_dentry->d_name.len);
+
+	/* take a reference on new_dentry */
+	dget(new_dentry);
+
+	/* dentry/mnt refs handed over to new_path */
+	new_path = &dir_path;
+
+	/* dentry for old/new dir */
+	dir = dir_nd.path.dentry;
+
+	/* give up reference on dir */
+	dput(new_path->dentry);
+
+	/* new_dentry already has a reference */
+	new_path->dentry = new_dentry;
+
+	ret = vfs_create(dir->d_inode, new_dentry, mode, 1);
+	vxdprintk(VXD_CBIT(misc, 2),
+		"vfs_create(new): %d", ret);
+	if (ret == -EEXIST) {
+		path_put(&dir_nd.path);
+		mutex_unlock(&dir->d_inode->i_mutex);
+		mnt_drop_write(new_path->mnt);
+		path_put(new_path);
+		new_dentry = NULL;
+		goto retry;
+	}
+	else if (ret < 0)
+		goto out_unlock_new;
+
+	/* drop out early, ret passes ENOENT */
+	ret = -ENOENT;
+	if ((redo = d_unhashed(old_dentry)))
+		goto out_unlock_new;
+
+	/* doesn't change refs for old_path */
+	old_file = dentry_open(old_path, O_RDONLY, current_cred());
+	vxdprintk(VXD_CBIT(misc, 2),
+		"dentry_open(old): %p", old_file);
+	if (IS_ERR(old_file)) {
+		ret = PTR_ERR(old_file);
+		goto out_unlock_new;
+	}
+
+	/* doesn't change refs for new_path */
+	new_file = dentry_open(new_path, O_WRONLY, current_cred());
+	vxdprintk(VXD_CBIT(misc, 2),
+		"dentry_open(new): %p", new_file);
+	if (IS_ERR(new_file)) {
+		ret = PTR_ERR(new_file);
+		goto out_fput_old;
+	}
+
+	/* unlock the inode mutex from kern_path_create() */
+	mutex_unlock(&dir->d_inode->i_mutex);
+
+	/* drop write access to mnt */
+	mnt_drop_write(new_path->mnt);
+
+	drop = 0;
+
+	size = i_size_read(old_file->f_dentry->d_inode);
+	ret = do_cow_splice(old_file, new_file, size);
+	vxdprintk(VXD_CBIT(misc, 2), "do_splice_direct: %d", ret);
+	if (ret < 0) {
+		goto out_fput_both;
+	} else if (ret < size) {
+		ret = -ENOSPC;
+		goto out_fput_both;
+	} else {
+		struct inode *old_inode = old_dentry->d_inode;
+		struct inode *new_inode = new_dentry->d_inode;
+		struct iattr attr = {
+			.ia_uid = old_inode->i_uid,
+			.ia_gid = old_inode->i_gid,
+			.ia_valid = ATTR_UID | ATTR_GID
+			};
+
+		setattr_copy(new_inode, &attr);
+		mark_inode_dirty(new_inode);
+	}
+
+	/* lock rename mutex */
+	mutex_lock(&old_dentry->d_inode->i_sb->s_vfs_rename_mutex);
+
+	/* drop out late */
+	ret = -ENOENT;
+	if ((redo = d_unhashed(old_dentry)))
+		goto out_unlock;
+
+	vxdprintk(VXD_CBIT(misc, 2),
+		"vfs_rename: [" VS_Q("%*s") ":%d] -> [" VS_Q("%*s") ":%d]",
+		new_dentry->d_name.len, new_dentry->d_name.name,
+		new_dentry->d_name.len,
+		old_dentry->d_name.len, old_dentry->d_name.name,
+		old_dentry->d_name.len);
+	ret = vfs_rename(dir_nd.path.dentry->d_inode, new_dentry,
+		old_dentry->d_parent->d_inode, old_dentry, NULL);
+	vxdprintk(VXD_CBIT(misc, 2), "vfs_rename: %d", ret);
+
+out_unlock:
+	mutex_unlock(&old_dentry->d_inode->i_sb->s_vfs_rename_mutex);
+
+out_fput_both:
+	vxdprintk(VXD_CBIT(misc, 3),
+		"fput(new_file=%p[#%ld])", new_file,
+		atomic_long_read(&new_file->f_count));
+	fput(new_file);
+
+out_fput_old:
+	vxdprintk(VXD_CBIT(misc, 3),
+		"fput(old_file=%p[#%ld])", old_file,
+		atomic_long_read(&old_file->f_count));
+	fput(old_file);
+
+out_unlock_new:
+	/* drop references from dir_nd.path */
+	path_put(&dir_nd.path);
+
+	if (drop) {
+		/* unlock the inode mutex from kern_path_create() */
+		mutex_unlock(&dir->d_inode->i_mutex);
+
+		/* drop write access to mnt */
+		mnt_drop_write(new_path->mnt);
+	}
+
+	if (!ret)
+		goto out_redo;
+
+	/* error path cleanup */
+	vfs_unlink(dir->d_inode, new_dentry, NULL);
+
+out_redo:
+	if (!redo)
+		goto out_rel_both;
+
+	/* lookup dentry once again
+	   old_nd.path will be freed as old_path in out_rel_old */
+	ret = do_path_lookup(AT_FDCWD, pathname, LOOKUP_FOLLOW, &old_nd);
+	if (ret)
+		goto out_rel_both;
+
+	/* drop reference on new_dentry */
+	dput(new_dentry);
+	new_dentry = old_path->dentry;
+	dget(new_dentry);
+	vxdprintk(VXD_CBIT(misc, 2),
+		"do_path_lookup(redo): %p [" VS_Q("%.*s") ":%d]",
+		new_dentry,
+		new_dentry->d_name.len, new_dentry->d_name.name,
+		new_dentry->d_name.len);
+
+out_rel_both:
+	if (new_path)
+		path_put(new_path);
+out_rel_old:
+	path_put(old_path);
+out_free_path:
+	kfree(path);
+out:
+	if (ret) {
+		dput(new_dentry);
+		new_dentry = ERR_PTR(ret);
+	}
+	vxdprintk(VXD_CBIT(misc, 3),
+		"cow_break_link returning with %p", new_dentry);
+	return new_dentry;
+}
+
+#endif
+
+int	vx_info_mnt_namespace(struct mnt_namespace *ns, char *buffer)
+{
+	struct path path;
+	struct vfsmount *vmnt;
+	char *pstr, *root;
+	int length = 0;
+
+	pstr = kmalloc(PATH_MAX, GFP_KERNEL);
+	if (!pstr)
+		return 0;
+
+	vmnt = &ns->root->mnt;
+	path.mnt = vmnt;
+	path.dentry = vmnt->mnt_root;
+	root = d_path(&path, pstr, PATH_MAX - 2);
+	length = sprintf(buffer + length,
+		"Namespace:\t%p [#%u]\n"
+		"RootPath:\t%s\n",
+		ns, atomic_read(&ns->count),
+		root);
+	kfree(pstr);
+	return length;
+}
+
 /* get the link contents into pagecache */
 static char *page_getlink(struct dentry * dentry, struct page **ppage)
 {
@@ -4427,3 +5051,4 @@ EXPORT_SYMBOL(vfs_symlink);
 EXPORT_SYMBOL(vfs_unlink);
 EXPORT_SYMBOL(dentry_unhash);
 EXPORT_SYMBOL(generic_readlink);
+EXPORT_SYMBOL(vx_info_mnt_namespace);
diff -ruNp linux-3.13.11/fs/namespace.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/namespace.c
--- linux-3.13.11/fs/namespace.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/namespace.c	2014-07-09 12:00:15.000000000
+0200
@@ -24,6 +24,11 @@
 #include <linux/proc_ns.h>
 #include <linux/magic.h>
 #include <linux/bootmem.h>
+#include <linux/vs_base.h>
+#include <linux/vs_context.h>
+#include <linux/vs_tag.h>
+#include <linux/vserver/space.h>
+#include <linux/vserver/global.h>
 #include "pnode.h"
 #include "internal.h"
 
@@ -839,6 +844,10 @@ vfs_kern_mount(struct file_system_type *
 	if (!type)
 		return ERR_PTR(-ENODEV);
 
+	if ((type->fs_flags & FS_BINARY_MOUNTDATA) &&
+		!vx_capable(CAP_SYS_ADMIN, VXC_BINARY_MOUNT))
+		return ERR_PTR(-EPERM);
+
 	mnt = alloc_vfsmnt(name);
 	if (!mnt)
 		return ERR_PTR(-ENOMEM);
@@ -899,6 +908,7 @@ static struct mount *clone_mnt(struct mo
 	mnt->mnt.mnt_root = dget(root);
 	mnt->mnt_mountpoint = mnt->mnt.mnt_root;
 	mnt->mnt_parent = mnt;
+	mnt->mnt_tag = old->mnt_tag;
 	lock_mount_hash();
 	list_add_tail(&mnt->mnt_instance, &sb->s_mounts);
 	unlock_mount_hash();
@@ -1339,6 +1349,9 @@ static int do_umount(struct mount *mnt,
 		if (!(sb->s_flags & MS_RDONLY))
 			retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
 		up_write(&sb->s_umount);
+
+		gr_log_remount(mnt->mnt_devname, retval);
+
 		return retval;
 	}
 
@@ -1361,6 +1374,9 @@ static int do_umount(struct mount *mnt,
 	}
 	unlock_mount_hash();
 	namespace_unlock();
+
+	gr_log_unmount(mnt->mnt_devname, retval);
+
 	return retval;
 }
 
@@ -1369,7 +1385,8 @@ static int do_umount(struct mount *mnt,
  */
 static inline bool may_mount(void)
 {
-	return ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN);
+	return vx_ns_capable(current->nsproxy->mnt_ns->user_ns,
+		CAP_SYS_ADMIN, VXC_SECURE_MOUNT);
 }
 
 /*
@@ -1380,7 +1397,7 @@ static inline bool may_mount(void)
  * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
  */
 
-SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
+SYSCALL_DEFINE2(umount, const char __user *, name, int, flags)
 {
 	struct path path;
 	struct mount *mnt;
@@ -1422,7 +1439,7 @@ out:
 /*
  *	The 2.0 compatible umount. No flags.
  */
-SYSCALL_DEFINE1(oldumount, char __user *, name)
+SYSCALL_DEFINE1(oldumount, const char __user *, name)
 {
 	return sys_umount(name, 0);
 }
@@ -1785,6 +1802,7 @@ static int do_change_type(struct path *p
 		if (err)
 			goto out_unlock;
 	}
+	// mnt->mnt_flags = mnt_flags;
 
 	lock_mount_hash();
 	for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))
@@ -1813,12 +1831,14 @@ static bool has_locked_children(struct m
  * do loopback mount.
  */
 static int do_loopback(struct path *path, const char *old_name,
-				int recurse)
+	vtag_t tag, unsigned long flags, int mnt_flags)
 {
 	struct path old_path;
 	struct mount *mnt = NULL, *old, *parent;
 	struct mountpoint *mp;
+	int recurse = flags & MS_REC;
 	int err;
+
 	if (!old_name || !*old_name)
 		return -EINVAL;
 	err = kern_path(old_name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &old_path);
@@ -1898,7 +1918,7 @@ static int change_mount_flags(struct vfs
  * on it - tough luck.
  */
 static int do_remount(struct path *path, int flags, int mnt_flags,
-		      void *data)
+	void *data, vxid_t xid)
 {
 	int err;
 	struct super_block *sb = path->mnt->mnt_sb;
@@ -2377,6 +2397,7 @@ long do_mount(const char *dev_name, cons
 	struct path path;
 	int retval = 0;
 	int mnt_flags = 0;
+	vtag_t tag = 0;
 
 	/* Discard magic */
 	if ((flags & MS_MGC_MSK) == MS_MGC_VAL)
@@ -2406,6 +2427,12 @@ long do_mount(const char *dev_name, cons
 	if (!(flags & MS_NOATIME))
 		mnt_flags |= MNT_RELATIME;
 
+	if (dx_parse_tag(data_page, &tag, 1, &mnt_flags, &flags)) {
+		/* FIXME: bind and re-mounts get the tag flag? */
+		if (flags & (MS_BIND|MS_REMOUNT))
+			flags |= MS_TAGID;
+	}
+
 	/* Separate the per-mountpoint flags */
 	if (flags & MS_NOSUID)
 		mnt_flags |= MNT_NOSUID;
@@ -2422,15 +2449,27 @@ long do_mount(const char *dev_name, cons
 	if (flags & MS_RDONLY)
 		mnt_flags |= MNT_READONLY;
 
+	if (!vx_capable(CAP_SYS_ADMIN, VXC_DEV_MOUNT))
+		mnt_flags |= MNT_NODEV;
 	flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE | MS_BORN |
 		   MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
 		   MS_STRICTATIME);
 
+	if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
+		retval = -EPERM;
+		goto dput_out;
+	}
+
+	if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
+		retval = -EPERM;
+		goto dput_out;
+	}
+
 	if (flags & MS_REMOUNT)
 		retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
-				    data_page);
+				    data_page, tag);
 	else if (flags & MS_BIND)
-		retval = do_loopback(&path, dev_name, flags & MS_REC);
+		retval = do_loopback(&path, dev_name, tag, flags, mnt_flags);
 	else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
 		retval = do_change_type(&path, flags);
 	else if (flags & MS_MOVE)
@@ -2440,6 +2479,9 @@ long do_mount(const char *dev_name, cons
 				      dev_name, data_page);
 dput_out:
 	path_put(&path);
+
+	gr_log_mount(dev_name, dir_name, retval);
+
 	return retval;
 }
 
@@ -2457,7 +2499,7 @@ static void free_mnt_ns(struct mnt_names
  * number incrementing at 10Ghz will take 12,427 years to wrap which
  * is effectively never, so we can ignore the possibility.
  */
-static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1);
+static atomic64_unchecked_t mnt_ns_seq = ATOMIC64_INIT(1);
 
 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
 {
@@ -2472,7 +2514,7 @@ static struct mnt_namespace *alloc_mnt_n
 		kfree(new_ns);
 		return ERR_PTR(ret);
 	}
-	new_ns->seq = atomic64_add_return(1, &mnt_ns_seq);
+	new_ns->seq = atomic64_inc_return_unchecked(&mnt_ns_seq);
 	atomic_set(&new_ns->count, 1);
 	new_ns->root = NULL;
 	INIT_LIST_HEAD(&new_ns->list);
@@ -2482,7 +2524,7 @@ static struct mnt_namespace *alloc_mnt_n
 	return new_ns;
 }
 
-struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
+__latent_entropy struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace
*ns,
 		struct user_namespace *user_ns, struct fs_struct *new_fs)
 {
 	struct mnt_namespace *new_ns;
@@ -2546,6 +2588,7 @@ struct mnt_namespace *copy_mnt_ns(unsign
 			p = next_mnt(p, old);
 	}
 	namespace_unlock();
+	atomic_inc(&vs_global_mnt_ns);
 
 	if (rootmnt)
 		mntput(rootmnt);
@@ -2603,8 +2646,8 @@ struct dentry *mount_subtree(struct vfsm
 }
 EXPORT_SYMBOL(mount_subtree);
 
-SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
-		char __user *, type, unsigned long, flags, void __user *, data)
+SYSCALL_DEFINE5(mount, const char __user *, dev_name, const char __user *, dir_name,
+		const char __user *, type, unsigned long, flags, void __user *, data)
 {
 	int ret;
 	char *kernel_type;
@@ -2717,6 +2760,11 @@ SYSCALL_DEFINE2(pivot_root, const char _
 	if (error)
 		goto out2;
 
+	if (gr_handle_chroot_pivot()) {
+		error = -EPERM;
+		goto out2;
+	}
+
 	get_fs_root(current->fs, &root);
 	old_mp = lock_mount(&old);
 	error = PTR_ERR(old_mp);
@@ -2727,9 +2775,10 @@ SYSCALL_DEFINE2(pivot_root, const char _
 	new_mnt = real_mount(new.mnt);
 	root_mnt = real_mount(root.mnt);
 	old_mnt = real_mount(old.mnt);
-	if (IS_MNT_SHARED(old_mnt) ||
+	if ((IS_MNT_SHARED(old_mnt) ||
 		IS_MNT_SHARED(new_mnt->mnt_parent) ||
-		IS_MNT_SHARED(root_mnt->mnt_parent))
+		IS_MNT_SHARED(root_mnt->mnt_parent)) &&
+		!vx_flags(VXF_STATE_SETUP, 0))
 		goto out4;
 	if (!check_mnt(root_mnt) || !check_mnt(new_mnt))
 		goto out4;
@@ -2859,6 +2908,7 @@ void put_mnt_ns(struct mnt_namespace *ns
 	if (!atomic_dec_and_test(&ns->count))
 		return;
 	drop_collected_mounts(&ns->root->mnt);
+	atomic_dec(&vs_global_mnt_ns);
 	free_mnt_ns(ns);
 }
 
@@ -2983,7 +3033,7 @@ static int mntns_install(struct nsproxy
 	    !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
 		return -EPERM;
 
-	if (fs->users != 1)
+	if (atomic_read(&fs->users) != 1)
 		return -EINVAL;
 
 	get_mnt_ns(mnt_ns);
diff -ruNp linux-3.13.11/fs/nfs/callback_xdr.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/nfs/callback_xdr.c
--- linux-3.13.11/fs/nfs/callback_xdr.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/nfs/callback_xdr.c	2014-07-09
12:00:15.000000000 +0200
@@ -51,7 +51,7 @@ struct callback_op {
 	callback_decode_arg_t decode_args;
 	callback_encode_res_t encode_res;
 	long res_maxsize;
-};
+} __do_const;
 
 static struct callback_op callback_ops[];
 
diff -ruNp linux-3.13.11/fs/nfs/client.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/nfs/client.c
--- linux-3.13.11/fs/nfs/client.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/nfs/client.c	2014-07-09 12:00:15.000000000
+0200
@@ -685,6 +685,9 @@ int nfs_init_server_rpcclient(struct nfs
 	if (server->flags & NFS_MOUNT_SOFT)
 		server->client->cl_softrtry = 1;
 
+	server->client->cl_tag = 0;
+	if (server->flags & NFS_MOUNT_TAGGED)
+		server->client->cl_tag = 1;
 	return 0;
 }
 EXPORT_SYMBOL_GPL(nfs_init_server_rpcclient);
@@ -863,6 +866,10 @@ static void nfs_server_set_fsinfo(struct
 		server->acdirmin = server->acdirmax = 0;
 	}
 
+	/* FIXME: needs fsinfo
+	if (server->flags & NFS_MOUNT_TAGGED)
+		sb->s_flags |= MS_TAGGED;	*/
+
 	server->maxfilesize = fsinfo->maxfilesize;
 
 	server->time_delta = fsinfo->time_delta;
diff -ruNp linux-3.13.11/fs/nfs/dir.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/nfs/dir.c
--- linux-3.13.11/fs/nfs/dir.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/nfs/dir.c	2014-07-09 12:00:15.000000000
+0200
@@ -37,6 +37,7 @@
 #include <linux/sched.h>
 #include <linux/kmemleak.h>
 #include <linux/xattr.h>
+#include <linux/vs_tag.h>
 
 #include "delegation.h"
 #include "iostat.h"
@@ -1319,6 +1320,7 @@ struct dentry *nfs_lookup(struct inode *
 	/* Success: notify readdir to use READDIRPLUS */
 	nfs_advise_use_readdirplus(dir);
 
+	dx_propagate_tag(nd, inode);
 no_entry:
 	res = d_materialise_unique(dentry, inode);
 	if (res != NULL) {
diff -ruNp linux-3.13.11/fs/nfs/inode.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/nfs/inode.c
--- linux-3.13.11/fs/nfs/inode.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/nfs/inode.c	2014-07-09 12:00:15.000000000
+0200
@@ -38,6 +38,7 @@
 #include <linux/slab.h>
 #include <linux/compat.h>
 #include <linux/freezer.h>
+#include <linux/vs_tag.h>
 
 #include <asm/uaccess.h>
 
@@ -359,6 +360,8 @@ nfs_fhget(struct super_block *sb, struct
 	if (inode->i_state & I_NEW) {
 		struct nfs_inode *nfsi = NFS_I(inode);
 		unsigned long now = jiffies;
+		kuid_t kuid;
+		kgid_t kgid;
 
 		/* We set i_ino for the few things that still rely on it,
 		 * such as stat(2) */
@@ -403,8 +406,8 @@ nfs_fhget(struct super_block *sb, struct
 		inode->i_version = 0;
 		inode->i_size = 0;
 		clear_nlink(inode);
-		inode->i_uid = make_kuid(&init_user_ns, -2);
-		inode->i_gid = make_kgid(&init_user_ns, -2);
+		kuid = make_kuid(&init_user_ns, -2);
+		kgid = make_kgid(&init_user_ns, -2);
 		inode->i_blocks = 0;
 		memset(nfsi->cookieverf, 0, sizeof(nfsi->cookieverf));
 		nfsi->write_io = 0;
@@ -438,11 +441,11 @@ nfs_fhget(struct super_block *sb, struct
 		else if (nfs_server_capable(inode, NFS_CAP_NLINK))
 			nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
 		if (fattr->valid & NFS_ATTR_FATTR_OWNER)
-			inode->i_uid = fattr->uid;
+			kuid = fattr->uid;
 		else if (nfs_server_capable(inode, NFS_CAP_OWNER))
 			nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
 		if (fattr->valid & NFS_ATTR_FATTR_GROUP)
-			inode->i_gid = fattr->gid;
+			kgid = fattr->gid;
 		else if (nfs_server_capable(inode, NFS_CAP_OWNER_GROUP))
 			nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
 		if (fattr->valid & NFS_ATTR_FATTR_BLOCKS_USED)
@@ -453,6 +456,10 @@ nfs_fhget(struct super_block *sb, struct
 			 */
 			inode->i_blocks = nfs_calc_block_size(fattr->du.nfs3.used);
 		}
+		inode->i_uid = INOTAG_KUID(DX_TAG(inode), kuid, kgid);
+		inode->i_gid = INOTAG_KGID(DX_TAG(inode), kuid, kgid);
+		inode->i_tag = INOTAG_KTAG(DX_TAG(inode), kuid, kgid, GLOBAL_ROOT_TAG);
+				/* maybe fattr->xid someday */
 
 		nfs_setsecurity(inode, fattr, label);
 
@@ -578,6 +585,8 @@ void nfs_setattr_update_inode(struct ino
 			inode->i_uid = attr->ia_uid;
 		if ((attr->ia_valid & ATTR_GID) != 0)
 			inode->i_gid = attr->ia_gid;
+		if ((attr->ia_valid & ATTR_TAG) && IS_TAGGED(inode))
+			inode->i_tag = attr->ia_tag;
 		NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
 		spin_unlock(&inode->i_lock);
 	}
@@ -1092,7 +1101,9 @@ static int nfs_check_inode_attributes(st
 	struct nfs_inode *nfsi = NFS_I(inode);
 	loff_t cur_size, new_isize;
 	unsigned long invalid = 0;
-
+	kuid_t kuid;
+	kgid_t kgid;
+	ktag_t ktag;
 
 	if (nfs_have_delegated_attributes(inode))
 		return 0;
@@ -1117,13 +1128,18 @@ static int nfs_check_inode_attributes(st
 			invalid |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE;
 	}
 
+	kuid = INOTAG_KUID(DX_TAG(inode), fattr->uid, fattr->gid);
+	kgid = INOTAG_KGID(DX_TAG(inode), fattr->uid, fattr->gid);
+	ktag = INOTAG_KTAG(DX_TAG(inode), fattr->uid, fattr->gid, GLOBAL_ROOT_TAG);
+
 	/* Have any file permissions changed? */
 	if ((fattr->valid & NFS_ATTR_FATTR_MODE) && (inode->i_mode & S_IALLUGO) != (fattr->mode
& S_IALLUGO))
 		invalid |= NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL;
-	if ((fattr->valid & NFS_ATTR_FATTR_OWNER) && !uid_eq(inode->i_uid, fattr->uid))
+	if ((fattr->valid & NFS_ATTR_FATTR_OWNER) && !uid_eq(inode->i_uid, kuid))
 		invalid |= NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL;
-	if ((fattr->valid & NFS_ATTR_FATTR_GROUP) && !gid_eq(inode->i_gid, fattr->gid))
+	if ((fattr->valid & NFS_ATTR_FATTR_GROUP) && !gid_eq(inode->i_gid, kgid))
 		invalid |= NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL;
+		/* maybe check for tag too? */
 
 	/* Has the link count changed? */
 	if ((fattr->valid & NFS_ATTR_FATTR_NLINK) && inode->i_nlink != fattr->nlink)
@@ -1153,16 +1169,16 @@ static int nfs_size_need_update(const st
 	return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
 }
 
-static atomic_long_t nfs_attr_generation_counter;
+static atomic_long_unchecked_t nfs_attr_generation_counter;
 
 static unsigned long nfs_read_attr_generation_counter(void)
 {
-	return atomic_long_read(&nfs_attr_generation_counter);
+	return atomic_long_read_unchecked(&nfs_attr_generation_counter);
 }
 
 unsigned long nfs_inc_attr_generation_counter(void)
 {
-	return atomic_long_inc_return(&nfs_attr_generation_counter);
+	return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
 }
 
 void nfs_fattr_init(struct nfs_fattr *fattr)
@@ -1440,6 +1456,9 @@ static int nfs_update_inode(struct inode
 	unsigned long invalid = 0;
 	unsigned long now = jiffies;
 	unsigned long save_cache_validity;
+	kuid_t kuid;
+	kgid_t kgid;
+	ktag_t ktag;
 
 	dfprintk(VFS, "NFS: %s(%s/%ld fh_crc=0x%08x ct=%d info=0x%x)\n",
 			__func__, inode->i_sb->s_id, inode->i_ino,
@@ -1541,6 +1560,9 @@ static int nfs_update_inode(struct inode
 				| NFS_INO_REVAL_PAGECACHE
 				| NFS_INO_REVAL_FORCED);
 
+	kuid = TAGINO_KUID(DX_TAG(inode), inode->i_uid, inode->i_tag);
+	kgid = TAGINO_KGID(DX_TAG(inode), inode->i_gid, inode->i_tag);
+	ktag = TAGINO_KTAG(DX_TAG(inode), inode->i_tag);
 
 	if (fattr->valid & NFS_ATTR_FATTR_ATIME)
 		memcpy(&inode->i_atime, &fattr->atime, sizeof(inode->i_atime));
@@ -1583,6 +1605,10 @@ static int nfs_update_inode(struct inode
 				| NFS_INO_INVALID_ACL
 				| NFS_INO_REVAL_FORCED);
 
+	inode->i_uid = INOTAG_KUID(DX_TAG(inode), kuid, kgid);
+	inode->i_gid = INOTAG_KGID(DX_TAG(inode), kuid, kgid);
+	inode->i_tag = INOTAG_KTAG(DX_TAG(inode), kuid, kgid, ktag);
+
 	if (fattr->valid & NFS_ATTR_FATTR_NLINK) {
 		if (inode->i_nlink != fattr->nlink) {
 			invalid |= NFS_INO_INVALID_ATTR;
diff -ruNp linux-3.13.11/fs/nfs/nfs3xdr.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/nfs/nfs3xdr.c
--- linux-3.13.11/fs/nfs/nfs3xdr.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/nfs/nfs3xdr.c	2014-07-09 12:00:15.000000000
+0200
@@ -20,6 +20,7 @@
 #include <linux/nfs3.h>
 #include <linux/nfs_fs.h>
 #include <linux/nfsacl.h>
+#include <linux/vs_tag.h>
 #include "internal.h"
 
 #define NFSDBG_FACILITY		NFSDBG_XDR
@@ -558,7 +559,8 @@ static __be32 *xdr_decode_nfstime3(__be3
  *		set_mtime	mtime;
  *	};
  */
-static void encode_sattr3(struct xdr_stream *xdr, const struct iattr *attr)
+static void encode_sattr3(struct xdr_stream *xdr,
+	const struct iattr *attr, int tag)
 {
 	u32 nbytes;
 	__be32 *p;
@@ -590,15 +592,19 @@ static void encode_sattr3(struct xdr_str
 	} else
 		*p++ = xdr_zero;
 
-	if (attr->ia_valid & ATTR_UID) {
+	if (attr->ia_valid & ATTR_UID ||
+		(tag && (attr->ia_valid & ATTR_TAG))) {
 		*p++ = xdr_one;
-		*p++ = cpu_to_be32(from_kuid(&init_user_ns, attr->ia_uid));
+		*p++ = cpu_to_be32(from_kuid(&init_user_ns,
+			TAGINO_KUID(tag, attr->ia_uid, attr->ia_tag)));
 	} else
 		*p++ = xdr_zero;
 
-	if (attr->ia_valid & ATTR_GID) {
+	if (attr->ia_valid & ATTR_GID ||
+		(tag && (attr->ia_valid & ATTR_TAG))) {
 		*p++ = xdr_one;
-		*p++ = cpu_to_be32(from_kgid(&init_user_ns, attr->ia_gid));
+		*p++ = cpu_to_be32(from_kgid(&init_user_ns,
+			TAGINO_KGID(tag, attr->ia_gid, attr->ia_tag)));
 	} else
 		*p++ = xdr_zero;
 
@@ -887,7 +893,7 @@ static void nfs3_xdr_enc_setattr3args(st
 				      const struct nfs3_sattrargs *args)
 {
 	encode_nfs_fh3(xdr, args->fh);
-	encode_sattr3(xdr, args->sattr);
+	encode_sattr3(xdr, args->sattr, req->rq_task->tk_client->cl_tag);
 	encode_sattrguard3(xdr, args);
 }
 
@@ -1037,13 +1043,13 @@ static void nfs3_xdr_enc_write3args(stru
  *	};
  */
 static void encode_createhow3(struct xdr_stream *xdr,
-			      const struct nfs3_createargs *args)
+	const struct nfs3_createargs *args, int tag)
 {
 	encode_uint32(xdr, args->createmode);
 	switch (args->createmode) {
 	case NFS3_CREATE_UNCHECKED:
 	case NFS3_CREATE_GUARDED:
-		encode_sattr3(xdr, args->sattr);
+		encode_sattr3(xdr, args->sattr, tag);
 		break;
 	case NFS3_CREATE_EXCLUSIVE:
 		encode_createverf3(xdr, args->verifier);
@@ -1058,7 +1064,7 @@ static void nfs3_xdr_enc_create3args(str
 				     const struct nfs3_createargs *args)
 {
 	encode_diropargs3(xdr, args->fh, args->name, args->len);
-	encode_createhow3(xdr, args);
+	encode_createhow3(xdr, args, req->rq_task->tk_client->cl_tag);
 }
 
 /*
@@ -1074,7 +1080,7 @@ static void nfs3_xdr_enc_mkdir3args(stru
 				    const struct nfs3_mkdirargs *args)
 {
 	encode_diropargs3(xdr, args->fh, args->name, args->len);
-	encode_sattr3(xdr, args->sattr);
+	encode_sattr3(xdr, args->sattr, req->rq_task->tk_client->cl_tag);
 }
 
 /*
@@ -1091,9 +1097,9 @@ static void nfs3_xdr_enc_mkdir3args(stru
  *	};
  */
 static void encode_symlinkdata3(struct xdr_stream *xdr,
-				const struct nfs3_symlinkargs *args)
+	const struct nfs3_symlinkargs *args, int tag)
 {
-	encode_sattr3(xdr, args->sattr);
+	encode_sattr3(xdr, args->sattr, tag);
 	encode_nfspath3(xdr, args->pages, args->pathlen);
 }
 
@@ -1102,7 +1108,7 @@ static void nfs3_xdr_enc_symlink3args(st
 				      const struct nfs3_symlinkargs *args)
 {
 	encode_diropargs3(xdr, args->fromfh, args->fromname, args->fromlen);
-	encode_symlinkdata3(xdr, args);
+	encode_symlinkdata3(xdr, args, req->rq_task->tk_client->cl_tag);
 }
 
 /*
@@ -1130,24 +1136,24 @@ static void nfs3_xdr_enc_symlink3args(st
  *	};
  */
 static void encode_devicedata3(struct xdr_stream *xdr,
-			       const struct nfs3_mknodargs *args)
+	const struct nfs3_mknodargs *args, int tag)
 {
-	encode_sattr3(xdr, args->sattr);
+	encode_sattr3(xdr, args->sattr, tag);
 	encode_specdata3(xdr, args->rdev);
 }
 
 static void encode_mknoddata3(struct xdr_stream *xdr,
-			      const struct nfs3_mknodargs *args)
+	const struct nfs3_mknodargs *args, int tag)
 {
 	encode_ftype3(xdr, args->type);
 	switch (args->type) {
 	case NF3CHR:
 	case NF3BLK:
-		encode_devicedata3(xdr, args);
+		encode_devicedata3(xdr, args, tag);
 		break;
 	case NF3SOCK:
 	case NF3FIFO:
-		encode_sattr3(xdr, args->sattr);
+		encode_sattr3(xdr, args->sattr, tag);
 		break;
 	case NF3REG:
 	case NF3DIR:
@@ -1162,7 +1168,7 @@ static void nfs3_xdr_enc_mknod3args(stru
 				    const struct nfs3_mknodargs *args)
 {
 	encode_diropargs3(xdr, args->fh, args->name, args->len);
-	encode_mknoddata3(xdr, args);
+	encode_mknoddata3(xdr, args, req->rq_task->tk_client->cl_tag);
 }
 
 /*
diff -ruNp linux-3.13.11/fs/nfs/nfs4proc.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/nfs/nfs4proc.c
--- linux-3.13.11/fs/nfs/nfs4proc.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/nfs/nfs4proc.c	2014-07-09 12:00:15.000000000
+0200
@@ -1070,6 +1070,7 @@ static void nfs4_opendata_free(struct kr
 	dput(p->dentry);
 	nfs_sb_deactive(sb);
 	nfs_fattr_free_names(&p->f_attr);
+	kfree(p->f_attr.mdsthreshold);
 	kfree(p);
 }
 
@@ -2246,10 +2247,12 @@ static int _nfs4_do_open(struct inode *d
 		}
 	}
 
-	if (ctx_th && server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) {
-		opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc();
-		if (!opendata->f_attr.mdsthreshold)
-			goto err_free_label;
+	if (server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) {
+		if (!opendata->f_attr.mdsthreshold) {
+			opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc();
+			if (!opendata->f_attr.mdsthreshold)
+				goto err_free_label;
+		}
 		opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0];
 	}
 	if (dentry->d_inode != NULL)
@@ -2277,11 +2280,10 @@ static int _nfs4_do_open(struct inode *d
 	if (opendata->file_created)
 		*opened |= FILE_CREATED;
 
-	if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server))
+	if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) {
 		*ctx_th = opendata->f_attr.mdsthreshold;
-	else
-		kfree(opendata->f_attr.mdsthreshold);
-	opendata->f_attr.mdsthreshold = NULL;
+		opendata->f_attr.mdsthreshold = NULL;
+	}
 
 	nfs4_label_free(olabel);
 
@@ -2291,7 +2293,6 @@ static int _nfs4_do_open(struct inode *d
 err_free_label:
 	nfs4_label_free(olabel);
 err_opendata_put:
-	kfree(opendata->f_attr.mdsthreshold);
 	nfs4_opendata_put(opendata);
 err_put_state_owner:
 	nfs4_put_state_owner(sp);
diff -ruNp linux-3.13.11/fs/nfs/super.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/nfs/super.c
--- linux-3.13.11/fs/nfs/super.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/nfs/super.c	2014-07-09 12:00:15.000000000
+0200
@@ -55,6 +55,7 @@
 #include <linux/parser.h>
 #include <linux/nsproxy.h>
 #include <linux/rcupdate.h>
+#include <linux/vs_tag.h>
 
 #include <asm/uaccess.h>
 
@@ -103,6 +104,7 @@ enum {
 	Opt_mountport,
 	Opt_mountvers,
 	Opt_minorversion,
+	Opt_tagid,
 
 	/* Mount options that take string arguments */
 	Opt_nfsvers,
@@ -115,6 +117,9 @@ enum {
 	/* Special mount options */
 	Opt_userspace, Opt_deprecated, Opt_sloppy,
 
+	/* Linux-VServer tagging options */
+	Opt_tag, Opt_notag,
+
 	Opt_err
 };
 
@@ -184,6 +189,10 @@ static const match_table_t nfs_mount_opt
 	{ Opt_fscache_uniq, "fsc=%s" },
 	{ Opt_local_lock, "local_lock=%s" },
 
+	{ Opt_tag, "tag" },
+	{ Opt_notag, "notag" },
+	{ Opt_tagid, "tagid=%u" },
+
 	/* The following needs to be listed after all other options */
 	{ Opt_nfsvers, "v%s" },
 
@@ -638,6 +647,7 @@ static void nfs_show_mount_options(struc
 		{ NFS_MOUNT_NORDIRPLUS, ",nordirplus", "" },
 		{ NFS_MOUNT_UNSHARED, ",nosharecache", "" },
 		{ NFS_MOUNT_NORESVPORT, ",noresvport", "" },
+		{ NFS_MOUNT_TAGGED, ",tag", "" },
 		{ 0, NULL, NULL }
 	};
 	const struct proc_nfs_info *nfs_infop;
@@ -1321,6 +1331,14 @@ static int nfs_parse_mount_options(char
 		case Opt_nomigration:
 			mnt->options &= NFS_OPTION_MIGRATION;
 			break;
+#ifndef CONFIG_TAGGING_NONE
+		case Opt_tag:
+			mnt->flags |= NFS_MOUNT_TAGGED;
+			break;
+		case Opt_notag:
+			mnt->flags &= ~NFS_MOUNT_TAGGED;
+			break;
+#endif
 
 		/*
 		 * options that take numeric values
@@ -1407,6 +1425,12 @@ static int nfs_parse_mount_options(char
 				goto out_invalid_value;
 			mnt->minorversion = option;
 			break;
+#ifdef CONFIG_PROPAGATE
+		case Opt_tagid:
+			/* use args[0] */
+			nfs_data.flags |= NFS_MOUNT_TAGGED;
+			break;
+#endif
 
 		/*
 		 * options that take text values
diff -ruNp linux-3.13.11/fs/nfsd/auth.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/nfsd/auth.c
--- linux-3.13.11/fs/nfsd/auth.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/nfsd/auth.c	2014-07-09 12:00:15.000000000
+0200
@@ -2,6 +2,7 @@
 
 #include <linux/sched.h>
 #include <linux/user_namespace.h>
+#include <linux/vs_tag.h>
 #include "nfsd.h"
 #include "auth.h"
 
@@ -37,6 +38,9 @@ int nfsd_setuser(struct svc_rqst *rqstp,
 
 	new->fsuid = rqstp->rq_cred.cr_uid;
 	new->fsgid = rqstp->rq_cred.cr_gid;
+	/* FIXME: this desperately needs a tag :)
+	new->xid = (vxid_t)INOTAG_TAG(DX_TAG_NFSD, cred.cr_uid, cred.cr_gid, 0);
+			*/
 
 	rqgi = rqstp->rq_cred.cr_group_info;
 
diff -ruNp linux-3.13.11/fs/nfsd/nfs3xdr.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/nfsd/nfs3xdr.c
--- linux-3.13.11/fs/nfsd/nfs3xdr.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/nfsd/nfs3xdr.c	2014-07-09 12:00:15.000000000
+0200
@@ -8,6 +8,7 @@
 
 #include <linux/namei.h>
 #include <linux/sunrpc/svc_xprt.h>
+#include <linux/vs_tag.h>
 #include "xdr3.h"
 #include "auth.h"
 #include "netns.h"
@@ -98,6 +99,8 @@ static __be32 *
 decode_sattr3(__be32 *p, struct iattr *iap)
 {
 	u32	tmp;
+	kuid_t	kuid = GLOBAL_ROOT_UID;
+	kgid_t	kgid = GLOBAL_ROOT_GID;
 
 	iap->ia_valid = 0;
 
@@ -106,15 +109,18 @@ decode_sattr3(__be32 *p, struct iattr *i
 		iap->ia_mode = ntohl(*p++);
 	}
 	if (*p++) {
-		iap->ia_uid = make_kuid(&init_user_ns, ntohl(*p++));
+		kuid = make_kuid(&init_user_ns, ntohl(*p++));
 		if (uid_valid(iap->ia_uid))
 			iap->ia_valid |= ATTR_UID;
 	}
 	if (*p++) {
-		iap->ia_gid = make_kgid(&init_user_ns, ntohl(*p++));
+		kgid = make_kgid(&init_user_ns, ntohl(*p++));
 		if (gid_valid(iap->ia_gid))
 			iap->ia_valid |= ATTR_GID;
 	}
+	iap->ia_uid = INOTAG_KUID(DX_TAG_NFSD, kuid, kgid);
+	iap->ia_gid = INOTAG_KGID(DX_TAG_NFSD, kuid, kgid);
+	iap->ia_tag = INOTAG_KTAG(DX_TAG_NFSD, kuid, kgid, GLOBAL_ROOT_TAG);
 	if (*p++) {
 		u64	newsize;
 
@@ -170,8 +176,12 @@ encode_fattr3(struct svc_rqst *rqstp, __
 	*p++ = htonl(nfs3_ftypes[(stat->mode & S_IFMT) >> 12]);
 	*p++ = htonl((u32) stat->mode);
 	*p++ = htonl((u32) stat->nlink);
-	*p++ = htonl((u32) from_kuid(&init_user_ns, stat->uid));
-	*p++ = htonl((u32) from_kgid(&init_user_ns, stat->gid));
+	*p++ = htonl((u32) from_kuid(&init_user_ns,
+		TAGINO_KUID(0 /* FIXME: DX_TAG(dentry->d_inode) */,
+		stat->uid, stat->tag)));
+	*p++ = htonl((u32) from_kgid(&init_user_ns,
+		TAGINO_KGID(0 /* FIXME: DX_TAG(dentry->d_inode) */,
+		stat->gid, stat->tag)));
 	if (S_ISLNK(stat->mode) && stat->size > NFS3_MAXPATHLEN) {
 		p = xdr_encode_hyper(p, (u64) NFS3_MAXPATHLEN);
 	} else {
diff -ruNp linux-3.13.11/fs/nfsd/nfs4proc.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/nfsd/nfs4proc.c
--- linux-3.13.11/fs/nfsd/nfs4proc.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/nfsd/nfs4proc.c	2014-07-09 12:00:15.000000000
+0200
@@ -1168,7 +1168,7 @@ struct nfsd4_operation {
 	nfsd4op_rsize op_rsize_bop;
 	stateid_getter op_get_currentstateid;
 	stateid_setter op_set_currentstateid;
-};
+} __do_const;
 
 static struct nfsd4_operation nfsd4_ops[];
 
diff -ruNp linux-3.13.11/fs/nfsd/nfs4xdr.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/nfsd/nfs4xdr.c
--- linux-3.13.11/fs/nfsd/nfs4xdr.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/nfsd/nfs4xdr.c	2014-07-09 12:00:15.000000000
+0200
@@ -46,6 +46,7 @@
 #include <linux/utsname.h>
 #include <linux/pagemap.h>
 #include <linux/sunrpc/svcauth_gss.h>
+#include <linux/vs_tag.h>
 
 #include "idmap.h"
 #include "acl.h"
@@ -1523,7 +1524,7 @@ nfsd4_decode_notsupp(struct nfsd4_compou
 
 typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, void *);
 
-static nfsd4_dec nfsd4_dec_ops[] = {
+static const nfsd4_dec nfsd4_dec_ops[] = {
 	[OP_ACCESS]		= (nfsd4_dec)nfsd4_decode_access,
 	[OP_CLOSE]		= (nfsd4_dec)nfsd4_decode_close,
 	[OP_COMMIT]		= (nfsd4_dec)nfsd4_decode_commit,
@@ -2430,14 +2431,18 @@ out_acl:
 		WRITE32(stat.nlink);
 	}
 	if (bmval1 & FATTR4_WORD1_OWNER) {
-		status = nfsd4_encode_user(rqstp, stat.uid, &p, &buflen);
+		status = nfsd4_encode_user(rqstp,
+			TAGINO_KUID(DX_TAG(dentry->d_inode),
+			stat.uid, stat.tag), &p, &buflen);
 		if (status == nfserr_resource)
 			goto out_resource;
 		if (status)
 			goto out;
 	}
 	if (bmval1 & FATTR4_WORD1_OWNER_GROUP) {
-		status = nfsd4_encode_group(rqstp, stat.gid, &p, &buflen);
+		status = nfsd4_encode_group(rqstp,
+			TAGINO_KGID(DX_TAG(dentry->d_inode),
+			stat.gid, stat.tag), &p, &buflen);
 		if (status == nfserr_resource)
 			goto out_resource;
 		if (status)
diff -ruNp linux-3.13.11/fs/nfsd/nfscache.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/nfsd/nfscache.c
--- linux-3.13.11/fs/nfsd/nfscache.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/nfsd/nfscache.c	2014-07-09 12:00:15.000000000
+0200
@@ -547,14 +547,17 @@ nfsd_cache_update(struct svc_rqst *rqstp
 {
 	struct svc_cacherep *rp = rqstp->rq_cacherep;
 	struct kvec	*resv = &rqstp->rq_res.head[0], *cachv;
-	int		len;
+	long		len;
 	size_t		bufsize = 0;
 
 	if (!rp)
 		return;
 
-	len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
-	len >>= 2;
+	if (statp) {
+		len = (char*)statp - (char*)resv->iov_base;
+		len = resv->iov_len - len;
+		len >>= 2;
+	}
 
 	/* Don't cache excessive amounts of data and XDR failures */
 	if (!statp || len > (256 >> 2)) {
diff -ruNp linux-3.13.11/fs/nfsd/nfsxdr.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/nfsd/nfsxdr.c
--- linux-3.13.11/fs/nfsd/nfsxdr.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/nfsd/nfsxdr.c	2014-07-09 12:00:15.000000000
+0200
@@ -7,6 +7,7 @@
 #include "vfs.h"
 #include "xdr.h"
 #include "auth.h"
+#include <linux/vs_tag.h>
 
 #define NFSDDBG_FACILITY		NFSDDBG_XDR
 
@@ -89,6 +90,8 @@ static __be32 *
 decode_sattr(__be32 *p, struct iattr *iap)
 {
 	u32	tmp, tmp1;
+	kuid_t	kuid = GLOBAL_ROOT_UID;
+	kgid_t	kgid = GLOBAL_ROOT_GID;
 
 	iap->ia_valid = 0;
 
@@ -101,15 +104,18 @@ decode_sattr(__be32 *p, struct iattr *ia
 		iap->ia_mode = tmp;
 	}
 	if ((tmp = ntohl(*p++)) != (u32)-1) {
-		iap->ia_uid = make_kuid(&init_user_ns, tmp);
+		kuid = make_kuid(&init_user_ns, tmp);
 		if (uid_valid(iap->ia_uid))
 			iap->ia_valid |= ATTR_UID;
 	}
 	if ((tmp = ntohl(*p++)) != (u32)-1) {
-		iap->ia_gid = make_kgid(&init_user_ns, tmp);
+		kgid = make_kgid(&init_user_ns, tmp);
 		if (gid_valid(iap->ia_gid))
 			iap->ia_valid |= ATTR_GID;
 	}
+	iap->ia_uid = INOTAG_KUID(DX_TAG_NFSD, kuid, kgid);
+	iap->ia_gid = INOTAG_KGID(DX_TAG_NFSD, kuid, kgid);
+	iap->ia_tag = INOTAG_KTAG(DX_TAG_NFSD, kuid, kgid, GLOBAL_ROOT_TAG);
 	if ((tmp = ntohl(*p++)) != (u32)-1) {
 		iap->ia_valid |= ATTR_SIZE;
 		iap->ia_size = tmp;
@@ -154,8 +160,10 @@ encode_fattr(struct svc_rqst *rqstp, __b
 	*p++ = htonl(nfs_ftypes[type >> 12]);
 	*p++ = htonl((u32) stat->mode);
 	*p++ = htonl((u32) stat->nlink);
-	*p++ = htonl((u32) from_kuid(&init_user_ns, stat->uid));
-	*p++ = htonl((u32) from_kgid(&init_user_ns, stat->gid));
+	*p++ = htonl((u32) from_kuid(&init_user_ns,
+		TAGINO_KUID(DX_TAG(dentry->d_inode), stat->uid, stat->tag)));
+	*p++ = htonl((u32) from_kgid(&init_user_ns,
+		TAGINO_KGID(DX_TAG(dentry->d_inode), stat->gid, stat->tag)));
 
 	if (S_ISLNK(type) && stat->size > NFS_MAXPATHLEN) {
 		*p++ = htonl(NFS_MAXPATHLEN);
diff -ruNp linux-3.13.11/fs/nfsd/vfs.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/nfsd/vfs.c
--- linux-3.13.11/fs/nfsd/vfs.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/nfsd/vfs.c	2014-07-09 12:00:15.000000000
+0200
@@ -993,7 +993,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st
 	} else {
 		oldfs = get_fs();
 		set_fs(KERNEL_DS);
-		host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
+		host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
 		set_fs(oldfs);
 	}
 
@@ -1084,7 +1084,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
 
 	/* Write the data. */
 	oldfs = get_fs(); set_fs(KERNEL_DS);
-	host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos);
+	host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &pos);
 	set_fs(oldfs);
 	if (host_err < 0)
 		goto out_nfserr;
@@ -1629,7 +1629,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st
 	 */
 
 	oldfs = get_fs(); set_fs(KERNEL_DS);
-	host_err = inode->i_op->readlink(path.dentry, (char __user *)buf, *lenp);
+	host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
 	set_fs(oldfs);
 
 	if (host_err < 0)
diff -ruNp linux-3.13.11/fs/nls/nls_base.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/nls/nls_base.c
--- linux-3.13.11/fs/nls/nls_base.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/nls/nls_base.c	2014-07-09 12:00:15.000000000
+0200
@@ -234,20 +234,22 @@ EXPORT_SYMBOL(utf16s_to_utf8s);
 
 int register_nls(struct nls_table * nls)
 {
-	struct nls_table ** tmp = &tables;
+	struct nls_table *tmp = tables;
 
 	if (nls->next)
 		return -EBUSY;
 
 	spin_lock(&nls_lock);
-	while (*tmp) {
-		if (nls == *tmp) {
+	while (tmp) {
+		if (nls == tmp) {
 			spin_unlock(&nls_lock);
 			return -EBUSY;
 		}
-		tmp = &(*tmp)->next;
+		tmp = tmp->next;
 	}
-	nls->next = tables;
+	pax_open_kernel();
+	*(struct nls_table **)&nls->next = tables;
+	pax_close_kernel();
 	tables = nls;
 	spin_unlock(&nls_lock);
 	return 0;	
@@ -255,12 +257,14 @@ int register_nls(struct nls_table * nls)
 
 int unregister_nls(struct nls_table * nls)
 {
-	struct nls_table ** tmp = &tables;
+	struct nls_table * const * tmp = &tables;
 
 	spin_lock(&nls_lock);
 	while (*tmp) {
 		if (nls == *tmp) {
-			*tmp = nls->next;
+			pax_open_kernel();
+			*(struct nls_table **)tmp = nls->next;
+			pax_close_kernel();
 			spin_unlock(&nls_lock);
 			return 0;
 		}
diff -ruNp linux-3.13.11/fs/nls/nls_euc-jp.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/nls/nls_euc-jp.c
--- linux-3.13.11/fs/nls/nls_euc-jp.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/nls/nls_euc-jp.c	2014-07-09 12:00:15.000000000
+0200
@@ -561,8 +561,10 @@ static int __init init_nls_euc_jp(void)
 	p_nls = load_nls("cp932");
 
 	if (p_nls) {
-		table.charset2upper = p_nls->charset2upper;
-		table.charset2lower = p_nls->charset2lower;
+		pax_open_kernel();
+		*(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
+		*(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
+		pax_close_kernel();
 		return register_nls(&table);
 	}
 
diff -ruNp linux-3.13.11/fs/nls/nls_koi8-ru.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/nls/nls_koi8-ru.c
--- linux-3.13.11/fs/nls/nls_koi8-ru.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/nls/nls_koi8-ru.c	2014-07-09 12:00:15.000000000
+0200
@@ -63,8 +63,10 @@ static int __init init_nls_koi8_ru(void)
 	p_nls = load_nls("koi8-u");
 
 	if (p_nls) {
-		table.charset2upper = p_nls->charset2upper;
-		table.charset2lower = p_nls->charset2lower;
+		pax_open_kernel();
+		*(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
+		*(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
+		pax_close_kernel();
 		return register_nls(&table);
 	}
 
diff -ruNp linux-3.13.11/fs/notify/fanotify/fanotify_user.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/notify/fanotify/fanotify_user.c
--- linux-3.13.11/fs/notify/fanotify/fanotify_user.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/notify/fanotify/fanotify_user.c	2014-07-09
12:00:15.000000000 +0200
@@ -253,8 +253,8 @@ static ssize_t copy_event_to_user(struct
 
 	fd = fanotify_event_metadata.fd;
 	ret = -EFAULT;
-	if (copy_to_user(buf, &fanotify_event_metadata,
-			 fanotify_event_metadata.event_len))
+	if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
+	    copy_to_user(buf, &fanotify_event_metadata, fanotify_event_metadata.event_len))
 		goto out_close_fd;
 
 	ret = prepare_for_access_response(group, event, fd);
diff -ruNp linux-3.13.11/fs/notify/notification.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/notify/notification.c
--- linux-3.13.11/fs/notify/notification.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/notify/notification.c	2014-07-09
12:00:15.000000000 +0200
@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event
  * get set to 0 so it will never get 'freed'
  */
 static struct fsnotify_event *q_overflow_event;
-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
 
 /**
  * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = A
  */
 u32 fsnotify_get_cookie(void)
 {
-	return atomic_inc_return(&fsnotify_sync_cookie);
+	return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
 }
 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
 
diff -ruNp linux-3.13.11/fs/ntfs/dir.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ntfs/dir.c
--- linux-3.13.11/fs/ntfs/dir.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ntfs/dir.c	2014-07-09 12:00:15.000000000
+0200
@@ -1310,7 +1310,7 @@ find_next_index_buffer:
 	ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
 			~(s64)(ndir->itype.index.block_size - 1)));
 	/* Bounds checks. */
-	if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
+	if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
 		ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
 				"inode 0x%lx or driver bug.", vdir->i_ino);
 		goto err_out;
diff -ruNp linux-3.13.11/fs/ntfs/file.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ntfs/file.c
--- linux-3.13.11/fs/ntfs/file.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ntfs/file.c	2014-07-09 12:00:15.000000000
+0200
@@ -1282,7 +1282,7 @@ static inline size_t ntfs_copy_from_user
 	char *addr;
 	size_t total = 0;
 	unsigned len;
-	int left;
+	unsigned left;
 
 	do {
 		len = PAGE_CACHE_SIZE - ofs;
diff -ruNp linux-3.13.11/fs/ntfs/super.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ntfs/super.c
--- linux-3.13.11/fs/ntfs/super.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ntfs/super.c	2014-07-09 12:00:15.000000000
+0200
@@ -685,7 +685,7 @@ static struct buffer_head *read_ntfs_boo
 		if (!silent)
 			ntfs_error(sb, "Primary boot sector is invalid.");
 	} else if (!silent)
-		ntfs_error(sb, read_err_str, "primary");
+		ntfs_error(sb, read_err_str, "%s", "primary");
 	if (!(NTFS_SB(sb)->on_errors & ON_ERRORS_RECOVER)) {
 		if (bh_primary)
 			brelse(bh_primary);
@@ -701,7 +701,7 @@ static struct buffer_head *read_ntfs_boo
 			goto hotfix_primary_boot_sector;
 		brelse(bh_backup);
 	} else if (!silent)
-		ntfs_error(sb, read_err_str, "backup");
+		ntfs_error(sb, read_err_str, "%s", "backup");
 	/* Try to read NT3.51- backup boot sector. */
 	if ((bh_backup = sb_bread(sb, nr_blocks >> 1))) {
 		if (is_boot_sector_ntfs(sb, (NTFS_BOOT_SECTOR*)
@@ -712,7 +712,7 @@ static struct buffer_head *read_ntfs_boo
 					"sector.");
 		brelse(bh_backup);
 	} else if (!silent)
-		ntfs_error(sb, read_err_str, "backup");
+		ntfs_error(sb, read_err_str, "%s", "backup");
 	/* We failed. Cleanup and return. */
 	if (bh_primary)
 		brelse(bh_primary);
diff -ruNp linux-3.13.11/fs/ocfs2/dlmglue.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ocfs2/dlmglue.c
--- linux-3.13.11/fs/ocfs2/dlmglue.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ocfs2/dlmglue.c	2014-07-09 12:00:15.000000000
+0200
@@ -2047,6 +2047,7 @@ static void __ocfs2_stuff_meta_lvb(struc
 	lvb->lvb_iclusters = cpu_to_be32(oi->ip_clusters);
 	lvb->lvb_iuid      = cpu_to_be32(i_uid_read(inode));
 	lvb->lvb_igid      = cpu_to_be32(i_gid_read(inode));
+	lvb->lvb_itag      = cpu_to_be16(i_tag_read(inode));
 	lvb->lvb_imode     = cpu_to_be16(inode->i_mode);
 	lvb->lvb_inlink    = cpu_to_be16(inode->i_nlink);
 	lvb->lvb_iatime_packed  =
@@ -2097,6 +2098,7 @@ static void ocfs2_refresh_inode_from_lvb
 
 	i_uid_write(inode, be32_to_cpu(lvb->lvb_iuid));
 	i_gid_write(inode, be32_to_cpu(lvb->lvb_igid));
+	i_tag_write(inode, be16_to_cpu(lvb->lvb_itag));
 	inode->i_mode    = be16_to_cpu(lvb->lvb_imode);
 	set_nlink(inode, be16_to_cpu(lvb->lvb_inlink));
 	ocfs2_unpack_timespec(&inode->i_atime,
diff -ruNp linux-3.13.11/fs/ocfs2/dlmglue.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ocfs2/dlmglue.h
--- linux-3.13.11/fs/ocfs2/dlmglue.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ocfs2/dlmglue.h	2014-07-09 12:00:15.000000000
+0200
@@ -46,7 +46,8 @@ struct ocfs2_meta_lvb {
 	__be16       lvb_inlink;
 	__be32       lvb_iattr;
 	__be32       lvb_igeneration;
-	__be32       lvb_reserved2;
+	__be16       lvb_itag;
+	__be16       lvb_reserved2;
 };
 
 #define OCFS2_QINFO_LVB_VERSION 1
diff -ruNp linux-3.13.11/fs/ocfs2/file.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ocfs2/file.c
--- linux-3.13.11/fs/ocfs2/file.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ocfs2/file.c	2014-07-09 12:00:15.000000000
+0200
@@ -1119,7 +1119,7 @@ int ocfs2_setattr(struct dentry *dentry,
 		attr->ia_valid &= ~ATTR_SIZE;
 
 #define OCFS2_VALID_ATTRS (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME | ATTR_SIZE \
-			   | ATTR_GID | ATTR_UID | ATTR_MODE)
+			   | ATTR_GID | ATTR_UID | ATTR_TAG | ATTR_MODE)
 	if (!(attr->ia_valid & OCFS2_VALID_ATTRS))
 		return 0;
 
diff -ruNp linux-3.13.11/fs/ocfs2/inode.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ocfs2/inode.c
--- linux-3.13.11/fs/ocfs2/inode.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ocfs2/inode.c	2014-07-09 12:00:15.000000000
+0200
@@ -28,6 +28,7 @@
 #include <linux/highmem.h>
 #include <linux/pagemap.h>
 #include <linux/quotaops.h>
+#include <linux/vs_tag.h>
 
 #include <asm/byteorder.h>
 
@@ -78,11 +79,13 @@ void ocfs2_set_inode_flags(struct inode
 {
 	unsigned int flags = OCFS2_I(inode)->ip_attr;
 
-	inode->i_flags &= ~(S_IMMUTABLE |
+	inode->i_flags &= ~(S_IMMUTABLE | S_IXUNLINK |
 		S_SYNC | S_APPEND | S_NOATIME | S_DIRSYNC);
 
 	if (flags & OCFS2_IMMUTABLE_FL)
 		inode->i_flags |= S_IMMUTABLE;
+	if (flags & OCFS2_IXUNLINK_FL)
+		inode->i_flags |= S_IXUNLINK;
 
 	if (flags & OCFS2_SYNC_FL)
 		inode->i_flags |= S_SYNC;
@@ -92,25 +95,44 @@ void ocfs2_set_inode_flags(struct inode
 		inode->i_flags |= S_NOATIME;
 	if (flags & OCFS2_DIRSYNC_FL)
 		inode->i_flags |= S_DIRSYNC;
+
+	inode->i_vflags &= ~(V_BARRIER | V_COW);
+
+	if (flags & OCFS2_BARRIER_FL)
+		inode->i_vflags |= V_BARRIER;
+	if (flags & OCFS2_COW_FL)
+		inode->i_vflags |= V_COW;
 }
 
 /* Propagate flags from i_flags to OCFS2_I(inode)->ip_attr */
 void ocfs2_get_inode_flags(struct ocfs2_inode_info *oi)
 {
 	unsigned int flags = oi->vfs_inode.i_flags;
+	unsigned int vflags = oi->vfs_inode.i_vflags;
+
+	oi->ip_attr &= ~(OCFS2_SYNC_FL | OCFS2_APPEND_FL |
+			OCFS2_IMMUTABLE_FL | OCFS2_IXUNLINK_FL |
+			OCFS2_NOATIME_FL | OCFS2_DIRSYNC_FL |
+			OCFS2_BARRIER_FL | OCFS2_COW_FL);
+
+	if (flags & S_IMMUTABLE)
+		oi->ip_attr |= OCFS2_IMMUTABLE_FL;
+	if (flags & S_IXUNLINK)
+		oi->ip_attr |= OCFS2_IXUNLINK_FL;
 
-	oi->ip_attr &= ~(OCFS2_SYNC_FL|OCFS2_APPEND_FL|
-			OCFS2_IMMUTABLE_FL|OCFS2_NOATIME_FL|OCFS2_DIRSYNC_FL);
 	if (flags & S_SYNC)
 		oi->ip_attr |= OCFS2_SYNC_FL;
 	if (flags & S_APPEND)
 		oi->ip_attr |= OCFS2_APPEND_FL;
-	if (flags & S_IMMUTABLE)
-		oi->ip_attr |= OCFS2_IMMUTABLE_FL;
 	if (flags & S_NOATIME)
 		oi->ip_attr |= OCFS2_NOATIME_FL;
 	if (flags & S_DIRSYNC)
 		oi->ip_attr |= OCFS2_DIRSYNC_FL;
+
+	if (vflags & V_BARRIER)
+		oi->ip_attr |= OCFS2_BARRIER_FL;
+	if (vflags & V_COW)
+		oi->ip_attr |= OCFS2_COW_FL;
 }
 
 struct inode *ocfs2_ilookup(struct super_block *sb, u64 blkno)
@@ -241,6 +263,8 @@ void ocfs2_populate_inode(struct inode *
 	struct super_block *sb;
 	struct ocfs2_super *osb;
 	int use_plocks = 1;
+	uid_t uid;
+	gid_t gid;
 
 	sb = inode->i_sb;
 	osb = OCFS2_SB(sb);
@@ -269,8 +293,12 @@ void ocfs2_populate_inode(struct inode *
 	inode->i_generation = le32_to_cpu(fe->i_generation);
 	inode->i_rdev = huge_decode_dev(le64_to_cpu(fe->id1.dev1.i_rdev));
 	inode->i_mode = le16_to_cpu(fe->i_mode);
-	i_uid_write(inode, le32_to_cpu(fe->i_uid));
-	i_gid_write(inode, le32_to_cpu(fe->i_gid));
+	uid = le32_to_cpu(fe->i_uid);
+	gid = le32_to_cpu(fe->i_gid);
+	i_uid_write(inode, INOTAG_UID(DX_TAG(inode), uid, gid));
+	i_gid_write(inode, INOTAG_GID(DX_TAG(inode), uid, gid));
+	i_tag_write(inode, INOTAG_TAG(DX_TAG(inode), uid, gid,
+		/* le16_to_cpu(raw_inode->i_raw_tag) */ 0));
 
 	/* Fast symlinks will have i_size but no allocated clusters. */
 	if (S_ISLNK(inode->i_mode) && !fe->i_clusters) {
diff -ruNp linux-3.13.11/fs/ocfs2/inode.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ocfs2/inode.h
--- linux-3.13.11/fs/ocfs2/inode.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ocfs2/inode.h	2014-07-09 12:00:15.000000000
+0200
@@ -152,6 +152,7 @@ struct buffer_head *ocfs2_bread(struct i
 
 void ocfs2_set_inode_flags(struct inode *inode);
 void ocfs2_get_inode_flags(struct ocfs2_inode_info *oi);
+int ocfs2_sync_flags(struct inode *inode, int, int);
 
 static inline blkcnt_t ocfs2_inode_sector_count(struct inode *inode)
 {
diff -ruNp linux-3.13.11/fs/ocfs2/ioctl.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ocfs2/ioctl.c
--- linux-3.13.11/fs/ocfs2/ioctl.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ocfs2/ioctl.c	2014-07-09 12:00:15.000000000
+0200
@@ -76,7 +76,41 @@ static int ocfs2_get_inode_attr(struct i
 	return status;
 }
 
-static int ocfs2_set_inode_attr(struct inode *inode, unsigned flags,
+int ocfs2_sync_flags(struct inode *inode, int flags, int vflags)
+{
+	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+	struct buffer_head *bh = NULL;
+	handle_t *handle = NULL;
+	int status;
+
+	status = ocfs2_inode_lock(inode, &bh, 1);
+	if (status < 0) {
+		mlog_errno(status);
+		return status;
+	}
+	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
+	if (IS_ERR(handle)) {
+		status = PTR_ERR(handle);
+		mlog_errno(status);
+		goto bail_unlock;
+	}
+
+	inode->i_flags = flags;
+	inode->i_vflags = vflags;
+	ocfs2_get_inode_flags(OCFS2_I(inode));
+
+	status = ocfs2_mark_inode_dirty(handle, inode, bh);
+	if (status < 0)
+		mlog_errno(status);
+
+	ocfs2_commit_trans(osb, handle);
+bail_unlock:
+	ocfs2_inode_unlock(inode, 1);
+	brelse(bh);
+	return status;
+}
+
+int ocfs2_set_inode_attr(struct inode *inode, unsigned flags,
 				unsigned mask)
 {
 	struct ocfs2_inode_info *ocfs2_inode = OCFS2_I(inode);
@@ -116,6 +150,11 @@ static int ocfs2_set_inode_attr(struct i
 			goto bail_unlock;
 	}
 
+	if (IS_BARRIER(inode)) {
+		vxwprintk_task(1, "messing with the barrier.");
+		goto bail_unlock;
+	}
+
 	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
 	if (IS_ERR(handle)) {
 		status = PTR_ERR(handle);
@@ -881,6 +920,7 @@ bail:
 	return status;
 }
 
+
 long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
 	struct inode *inode = file_inode(filp);
diff -ruNp linux-3.13.11/fs/ocfs2/localalloc.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ocfs2/localalloc.c
--- linux-3.13.11/fs/ocfs2/localalloc.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ocfs2/localalloc.c	2014-07-09
12:00:15.000000000 +0200
@@ -1278,7 +1278,7 @@ static int ocfs2_local_alloc_slide_windo
 		goto bail;
 	}
 
-	atomic_inc(&osb->alloc_stats.moves);
+	atomic_inc_unchecked(&osb->alloc_stats.moves);
 
 bail:
 	if (handle)
diff -ruNp linux-3.13.11/fs/ocfs2/namei.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ocfs2/namei.c
--- linux-3.13.11/fs/ocfs2/namei.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ocfs2/namei.c	2014-07-09 12:00:15.000000000
+0200
@@ -41,6 +41,7 @@
 #include <linux/slab.h>
 #include <linux/highmem.h>
 #include <linux/quotaops.h>
+#include <linux/vs_tag.h>
 
 #include <cluster/masklog.h>
 
@@ -475,6 +476,7 @@ static int __ocfs2_mknod_locked(struct i
 	struct ocfs2_dinode *fe = NULL;
 	struct ocfs2_extent_list *fel;
 	u16 feat;
+	ktag_t ktag;
 
 	*new_fe_bh = NULL;
 
@@ -512,8 +514,13 @@ static int __ocfs2_mknod_locked(struct i
 	fe->i_suballoc_loc = cpu_to_le64(suballoc_loc);
 	fe->i_suballoc_bit = cpu_to_le16(suballoc_bit);
 	fe->i_suballoc_slot = cpu_to_le16(inode_ac->ac_alloc_slot);
-	fe->i_uid = cpu_to_le32(i_uid_read(inode));
-	fe->i_gid = cpu_to_le32(i_gid_read(inode));
+
+	ktag = make_ktag(&init_user_ns, dx_current_fstag(osb->sb));
+	fe->i_uid = cpu_to_le32(from_kuid(&init_user_ns,
+		TAGINO_KUID(DX_TAG(inode), inode->i_uid, ktag)));
+	fe->i_gid = cpu_to_le32(from_kgid(&init_user_ns,
+		TAGINO_KGID(DX_TAG(inode), inode->i_gid, ktag)));
+	inode->i_tag = ktag; /* is this correct? */
 	fe->i_mode = cpu_to_le16(inode->i_mode);
 	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
 		fe->id1.dev1.i_rdev = cpu_to_le64(huge_encode_dev(dev));
diff -ruNp linux-3.13.11/fs/ocfs2/ocfs2.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ocfs2/ocfs2.h
--- linux-3.13.11/fs/ocfs2/ocfs2.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ocfs2/ocfs2.h	2014-07-09 12:00:15.000000000
+0200
@@ -235,11 +235,11 @@ enum ocfs2_vol_state
 
 struct ocfs2_alloc_stats
 {
-	atomic_t moves;
-	atomic_t local_data;
-	atomic_t bitmap_data;
-	atomic_t bg_allocs;
-	atomic_t bg_extends;
+	atomic_unchecked_t moves;
+	atomic_unchecked_t local_data;
+	atomic_unchecked_t bitmap_data;
+	atomic_unchecked_t bg_allocs;
+	atomic_unchecked_t bg_extends;
 };
 
 enum ocfs2_local_alloc_state
@@ -272,6 +272,7 @@ enum ocfs2_mount_options
 						     writes */
 	OCFS2_MOUNT_HB_NONE = 1 << 13, /* No heartbeat */
 	OCFS2_MOUNT_HB_GLOBAL = 1 << 14, /* Global heartbeat */
+	OCFS2_MOUNT_TAGGED = 1 << 15, /* use tagging */
 };
 
 #define OCFS2_OSB_SOFT_RO			0x0001
diff -ruNp linux-3.13.11/fs/ocfs2/ocfs2_fs.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ocfs2/ocfs2_fs.h
--- linux-3.13.11/fs/ocfs2/ocfs2_fs.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ocfs2/ocfs2_fs.h	2014-07-09 12:00:15.000000000
+0200
@@ -266,6 +266,11 @@
 #define OCFS2_TOPDIR_FL			FS_TOPDIR_FL	/* Top of directory hierarchies*/
 #define OCFS2_RESERVED_FL		FS_RESERVED_FL	/* reserved for ext2 lib */
 
+#define OCFS2_IXUNLINK_FL		FS_IXUNLINK_FL	/* Immutable invert on unlink */
+
+#define OCFS2_BARRIER_FL		FS_BARRIER_FL	/* Barrier for chroot() */
+#define OCFS2_COW_FL			FS_COW_FL	/* Copy on Write marker */
+
 #define OCFS2_FL_VISIBLE		FS_FL_USER_VISIBLE	/* User visible flags */
 #define OCFS2_FL_MODIFIABLE		FS_FL_USER_MODIFIABLE	/* User modifiable flags */
 
diff -ruNp linux-3.13.11/fs/ocfs2/suballoc.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ocfs2/suballoc.c
--- linux-3.13.11/fs/ocfs2/suballoc.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ocfs2/suballoc.c	2014-07-09 12:00:15.000000000
+0200
@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(s
 				mlog_errno(status);
 			goto bail;
 		}
-		atomic_inc(&osb->alloc_stats.bg_extends);
+		atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
 
 		/* You should never ask for this much metadata */
 		BUG_ON(bits_wanted >
@@ -2000,7 +2000,7 @@ int ocfs2_claim_metadata(handle_t *handl
 		mlog_errno(status);
 		goto bail;
 	}
-	atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
+	atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
 
 	*suballoc_loc = res.sr_bg_blkno;
 	*suballoc_bit_start = res.sr_bit_offset;
@@ -2164,7 +2164,7 @@ int ocfs2_claim_new_inode_at_loc(handle_
 	trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
 					   res->sr_bits);
 
-	atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
+	atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
 
 	BUG_ON(res->sr_bits != 1);
 
@@ -2206,7 +2206,7 @@ int ocfs2_claim_new_inode(handle_t *hand
 		mlog_errno(status);
 		goto bail;
 	}
-	atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
+	atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
 
 	BUG_ON(res.sr_bits != 1);
 
@@ -2310,7 +2310,7 @@ int __ocfs2_claim_clusters(handle_t *han
 						      cluster_start,
 						      num_clusters);
 		if (!status)
-			atomic_inc(&osb->alloc_stats.local_data);
+			atomic_inc_unchecked(&osb->alloc_stats.local_data);
 	} else {
 		if (min_clusters > (osb->bitmap_cpg - 1)) {
 			/* The only paths asking for contiguousness
@@ -2336,7 +2336,7 @@ int __ocfs2_claim_clusters(handle_t *han
 				ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
 								 res.sr_bg_blkno,
 								 res.sr_bit_offset);
-			atomic_inc(&osb->alloc_stats.bitmap_data);
+			atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
 			*num_clusters = res.sr_bits;
 		}
 	}
diff -ruNp linux-3.13.11/fs/ocfs2/super.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ocfs2/super.c
--- linux-3.13.11/fs/ocfs2/super.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ocfs2/super.c	2014-07-09 12:00:15.000000000
+0200
@@ -185,6 +185,7 @@ enum {
 	Opt_coherency_full,
 	Opt_resv_level,
 	Opt_dir_resv_level,
+	Opt_tag, Opt_notag, Opt_tagid,
 	Opt_err,
 };
 
@@ -216,6 +217,9 @@ static const match_table_t tokens = {
 	{Opt_coherency_full, "coherency=full"},
 	{Opt_resv_level, "resv_level=%u"},
 	{Opt_dir_resv_level, "dir_resv_level=%u"},
+	{Opt_tag, "tag"},
+	{Opt_notag, "notag"},
+	{Opt_tagid, "tagid=%u"},
 	{Opt_err, NULL}
 };
 
@@ -300,11 +304,11 @@ static int ocfs2_osb_dump(struct ocfs2_s
 			"%10s => GlobalAllocs: %d  LocalAllocs: %d  "
 			"SubAllocs: %d  LAWinMoves: %d  SAExtends: %d\n",
 			"Stats",
-			atomic_read(&osb->alloc_stats.bitmap_data),
-			atomic_read(&osb->alloc_stats.local_data),
-			atomic_read(&osb->alloc_stats.bg_allocs),
-			atomic_read(&osb->alloc_stats.moves),
-			atomic_read(&osb->alloc_stats.bg_extends));
+			atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
+			atomic_read_unchecked(&osb->alloc_stats.local_data),
+			atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
+			atomic_read_unchecked(&osb->alloc_stats.moves),
+			atomic_read_unchecked(&osb->alloc_stats.bg_extends));
 
 	out += snprintf(buf + out, len - out,
 			"%10s => State: %u  Descriptor: %llu  Size: %u bits  "
@@ -661,6 +665,13 @@ static int ocfs2_remount(struct super_bl
 		goto out;
 	}
 
+	if ((osb->s_mount_opt & OCFS2_MOUNT_TAGGED) !=
+	    (parsed_options.mount_opt & OCFS2_MOUNT_TAGGED)) {
+		ret = -EINVAL;
+		mlog(ML_ERROR, "Cannot change tagging on remount\n");
+		goto out;
+	}
+
 	/* We're going to/from readonly mode. */
 	if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY)) {
 		/* Disable quota accounting before remounting RO */
@@ -1176,6 +1187,9 @@ static int ocfs2_fill_super(struct super
 
 	ocfs2_complete_mount_recovery(osb);
 
+	if (osb->s_mount_opt & OCFS2_MOUNT_TAGGED)
+		sb->s_flags |= MS_TAGGED;
+
 	if (ocfs2_mount_local(osb))
 		snprintf(nodestr, sizeof(nodestr), "local");
 	else
@@ -1503,6 +1517,20 @@ static int ocfs2_parse_options(struct su
 			    option < OCFS2_MAX_RESV_LEVEL)
 				mopt->dir_resv_level = option;
 			break;
+#ifndef CONFIG_TAGGING_NONE
+		case Opt_tag:
+			mopt->mount_opt |= OCFS2_MOUNT_TAGGED;
+			break;
+		case Opt_notag:
+			mopt->mount_opt &= ~OCFS2_MOUNT_TAGGED;
+			break;
+#endif
+#ifdef CONFIG_PROPAGATE
+		case Opt_tagid:
+			/* use args[0] */
+			mopt->mount_opt |= OCFS2_MOUNT_TAGGED;
+			break;
+#endif
 		default:
 			mlog(ML_ERROR,
 			     "Unrecognized mount option \"%s\" "
@@ -2121,11 +2149,11 @@ static int ocfs2_initialize_super(struct
 	spin_lock_init(&osb->osb_xattr_lock);
 	ocfs2_init_steal_slots(osb);
 
-	atomic_set(&osb->alloc_stats.moves, 0);
-	atomic_set(&osb->alloc_stats.local_data, 0);
-	atomic_set(&osb->alloc_stats.bitmap_data, 0);
-	atomic_set(&osb->alloc_stats.bg_allocs, 0);
-	atomic_set(&osb->alloc_stats.bg_extends, 0);
+	atomic_set_unchecked(&osb->alloc_stats.moves, 0);
+	atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
+	atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
+	atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
+	atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
 
 	/* Copy the blockcheck stats from the superblock probe */
 	osb->osb_ecc_stats = *stats;
diff -ruNp linux-3.13.11/fs/open.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/open.c
--- linux-3.13.11/fs/open.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/open.c	2014-07-09 12:00:15.000000000
+0200
@@ -31,7 +31,14 @@
 #include <linux/ima.h>
 #include <linux/dnotify.h>
 #include <linux/compat.h>
+#include <linux/vs_base.h>
+#include <linux/vs_limit.h>
+#include <linux/vs_tag.h>
+#include <linux/vs_cowbl.h>
+#include <linux/vserver/dlimit.h>
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/fs.h>
 #include "internal.h"
 
 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
@@ -68,6 +75,11 @@ long vfs_truncate(struct path *path, lof
 	struct inode *inode;
 	long error;
 
+#ifdef CONFIG_VSERVER_COWBL
+	error = cow_check_and_break(path);
+	if (error)
+		goto out;
+#endif
 	inode = path->dentry->d_inode;
 
 	/* For directories it's -EISDIR, for other non-regulars - -EINVAL */
@@ -103,6 +115,8 @@ long vfs_truncate(struct path *path, lof
 	error = locks_verify_truncate(inode, NULL, length);
 	if (!error)
 		error = security_path_truncate(path);
+	if (!error && !gr_acl_handle_truncate(path->dentry, path->mnt))
+		error = -EACCES;
 	if (!error)
 		error = do_truncate(path->dentry, length, 0, NULL);
 
@@ -187,6 +201,8 @@ static long do_sys_ftruncate(unsigned in
 	error = locks_verify_truncate(inode, f.file, length);
 	if (!error)
 		error = security_path_truncate(&f.file->f_path);
+	if (!error && !gr_acl_handle_truncate(f.file->f_path.dentry, f.file->f_path.mnt))
+		error = -EACCES;
 	if (!error)
 		error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file);
 	sb_end_write(inode->i_sb);
@@ -361,6 +377,9 @@ retry:
 	if (__mnt_is_readonly(path.mnt))
 		res = -EROFS;
 
+	if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
+		res = -EACCES;
+
 out_path_release:
 	path_put(&path);
 	if (retry_estale(res, lookup_flags)) {
@@ -392,6 +411,8 @@ retry:
 	if (error)
 		goto dput_and_out;
 
+	gr_log_chdir(path.dentry, path.mnt);
+
 	set_fs_pwd(current->fs, &path);
 
 dput_and_out:
@@ -421,6 +442,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd
 		goto out_putf;
 
 	error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
+
+	if (!error && !gr_chroot_fchdir(f.file->f_path.dentry, f.file->f_path.mnt))
+		error = -EPERM;
+
+	if (!error)
+		gr_log_chdir(f.file->f_path.dentry, f.file->f_path.mnt);
+
 	if (!error)
 		set_fs_pwd(current->fs, &f.file->f_path);
 out_putf:
@@ -450,7 +478,13 @@ retry:
 	if (error)
 		goto dput_and_out;
 
+	if (gr_handle_chroot_chroot(path.dentry, path.mnt))
+		goto dput_and_out;
+
 	set_fs_root(current->fs, &path);
+
+	gr_handle_chroot_chdir(&path);
+
 	error = 0;
 dput_and_out:
 	path_put(&path);
@@ -474,6 +508,16 @@ static int chmod_common(struct path *pat
 		return error;
 retry_deleg:
 	mutex_lock(&inode->i_mutex);
+
+	if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
+		error = -EACCES;
+		goto out_unlock;
+	}
+	if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
+		error = -EACCES;
+		goto out_unlock;
+	}
+
 	error = security_path_chmod(path, mode);
 	if (error)
 		goto out_unlock;
@@ -511,6 +555,13 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, cons
 	unsigned int lookup_flags = LOOKUP_FOLLOW;
 retry:
 	error = user_path_at(dfd, filename, lookup_flags, &path);
+#ifdef CONFIG_VSERVER_COWBL
+	if (!error) {
+		error = cow_check_and_break(&path);
+		if (error)
+			path_put(&path);
+	}
+#endif
 	if (!error) {
 		error = chmod_common(&path, mode);
 		path_put(&path);
@@ -539,18 +590,23 @@ static int chown_common(struct path *pat
 	uid = make_kuid(current_user_ns(), user);
 	gid = make_kgid(current_user_ns(), group);
 
+	if (!gr_acl_handle_chown(path->dentry, path->mnt))
+		return -EACCES;
+
 	newattrs.ia_valid =  ATTR_CTIME;
 	if (user != (uid_t) -1) {
 		if (!uid_valid(uid))
 			return -EINVAL;
 		newattrs.ia_valid |= ATTR_UID;
-		newattrs.ia_uid = uid;
+		newattrs.ia_uid = make_kuid(&init_user_ns,
+			dx_map_uid(user));
 	}
 	if (group != (gid_t) -1) {
 		if (!gid_valid(gid))
 			return -EINVAL;
 		newattrs.ia_valid |= ATTR_GID;
-		newattrs.ia_gid = gid;
+		newattrs.ia_gid = make_kgid(&init_user_ns,
+			dx_map_gid(group));
 	}
 	if (!S_ISDIR(inode->i_mode))
 		newattrs.ia_valid |=
@@ -589,6 +645,18 @@ retry:
 	error = mnt_want_write(path.mnt);
 	if (error)
 		goto out_release;
+#ifdef CONFIG_VSERVER_COWBL
+	error = cow_check_and_break(&path);
+	if (!error)
+#endif
+#ifdef CONFIG_VSERVER_COWBL
+	error = cow_check_and_break(&path);
+	if (!error)
+#endif
+#ifdef CONFIG_VSERVER_COWBL
+	error = cow_check_and_break(&path);
+	if (!error)
+#endif
 	error = chown_common(&path, user, group);
 	mnt_drop_write(path.mnt);
 out_release:
@@ -990,6 +1058,7 @@ long do_sys_open(int dfd, const char __u
 		} else {
 			fsnotify_open(f);
 			fd_install(fd, f);
+			trace_do_sys_open(tmp->name, flags, mode);
 		}
 	}
 	putname(tmp);
diff -ruNp linux-3.13.11/fs/pipe.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/pipe.c
--- linux-3.13.11/fs/pipe.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/pipe.c	2014-07-09 12:00:15.000000000
+0200
@@ -56,7 +56,7 @@ unsigned int pipe_min_size = PAGE_SIZE;
 
 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
 {
-	if (pipe->files)
+	if (atomic_read(&pipe->files))
 		mutex_lock_nested(&pipe->mutex, subclass);
 }
 
@@ -71,7 +71,7 @@ EXPORT_SYMBOL(pipe_lock);
 
 void pipe_unlock(struct pipe_inode_info *pipe)
 {
-	if (pipe->files)
+	if (atomic_read(&pipe->files))
 		mutex_unlock(&pipe->mutex);
 }
 EXPORT_SYMBOL(pipe_unlock);
@@ -449,9 +449,9 @@ redo:
 		}
 		if (bufs)	/* More to do? */
 			continue;
-		if (!pipe->writers)
+		if (!atomic_read(&pipe->writers))
 			break;
-		if (!pipe->waiting_writers) {
+		if (!atomic_read(&pipe->waiting_writers)) {
 			/* syscall merging: Usually we must not sleep
 			 * if O_NONBLOCK is set, or if we got some data.
 			 * But if a writer sleeps in kernel space, then
@@ -513,7 +513,7 @@ pipe_write(struct kiocb *iocb, const str
 	ret = 0;
 	__pipe_lock(pipe);
 
-	if (!pipe->readers) {
+	if (!atomic_read(&pipe->readers)) {
 		send_sig(SIGPIPE, current, 0);
 		ret = -EPIPE;
 		goto out;
@@ -562,7 +562,7 @@ redo1:
 	for (;;) {
 		int bufs;
 
-		if (!pipe->readers) {
+		if (!atomic_read(&pipe->readers)) {
 			send_sig(SIGPIPE, current, 0);
 			if (!ret)
 				ret = -EPIPE;
@@ -653,9 +653,9 @@ redo2:
 			kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
 			do_wakeup = 0;
 		}
-		pipe->waiting_writers++;
+		atomic_inc(&pipe->waiting_writers);
 		pipe_wait(pipe);
-		pipe->waiting_writers--;
+		atomic_dec(&pipe->waiting_writers);
 	}
 out:
 	__pipe_unlock(pipe);
@@ -709,7 +709,7 @@ pipe_poll(struct file *filp, poll_table
 	mask = 0;
 	if (filp->f_mode & FMODE_READ) {
 		mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
-		if (!pipe->writers && filp->f_version != pipe->w_counter)
+		if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
 			mask |= POLLHUP;
 	}
 
@@ -719,7 +719,7 @@ pipe_poll(struct file *filp, poll_table
 		 * Most Unices do not set POLLERR for FIFOs but on Linux they
 		 * behave exactly like pipes for poll().
 		 */
-		if (!pipe->readers)
+		if (!atomic_read(&pipe->readers))
 			mask |= POLLERR;
 	}
 
@@ -731,7 +731,7 @@ static void put_pipe_info(struct inode *
 	int kill = 0;
 
 	spin_lock(&inode->i_lock);
-	if (!--pipe->files) {
+	if (atomic_dec_and_test(&pipe->files)) {
 		inode->i_pipe = NULL;
 		kill = 1;
 	}
@@ -748,11 +748,11 @@ pipe_release(struct inode *inode, struct
 
 	__pipe_lock(pipe);
 	if (file->f_mode & FMODE_READ)
-		pipe->readers--;
+		atomic_dec(&pipe->readers);
 	if (file->f_mode & FMODE_WRITE)
-		pipe->writers--;
+		atomic_dec(&pipe->writers);
 
-	if (pipe->readers || pipe->writers) {
+	if (atomic_read(&pipe->readers) || atomic_read(&pipe->writers)) {
 		wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM
| POLLERR | POLLHUP);
 		kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
 		kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
@@ -817,7 +817,7 @@ void free_pipe_info(struct pipe_inode_in
 	kfree(pipe);
 }
 
-static struct vfsmount *pipe_mnt __read_mostly;
+struct vfsmount *pipe_mnt __read_mostly;
 
 /*
  * pipefs_dname() is called from d_path().
@@ -847,8 +847,9 @@ static struct inode * get_pipe_inode(voi
 		goto fail_iput;
 
 	inode->i_pipe = pipe;
-	pipe->files = 2;
-	pipe->readers = pipe->writers = 1;
+	atomic_set(&pipe->files, 2);
+	atomic_set(&pipe->readers, 1);
+	atomic_set(&pipe->writers, 1);
 	inode->i_fop = &pipefifo_fops;
 
 	/*
@@ -1027,17 +1028,17 @@ static int fifo_open(struct inode *inode
 	spin_lock(&inode->i_lock);
 	if (inode->i_pipe) {
 		pipe = inode->i_pipe;
-		pipe->files++;
+		atomic_inc(&pipe->files);
 		spin_unlock(&inode->i_lock);
 	} else {
 		spin_unlock(&inode->i_lock);
 		pipe = alloc_pipe_info();
 		if (!pipe)
 			return -ENOMEM;
-		pipe->files = 1;
+		atomic_set(&pipe->files, 1);
 		spin_lock(&inode->i_lock);
 		if (unlikely(inode->i_pipe)) {
-			inode->i_pipe->files++;
+			atomic_inc(&inode->i_pipe->files);
 			spin_unlock(&inode->i_lock);
 			free_pipe_info(pipe);
 			pipe = inode->i_pipe;
@@ -1062,10 +1063,10 @@ static int fifo_open(struct inode *inode
 	 *  opened, even when there is no process writing the FIFO.
 	 */
 		pipe->r_counter++;
-		if (pipe->readers++ == 0)
+		if (atomic_inc_return(&pipe->readers) == 1)
 			wake_up_partner(pipe);
 
-		if (!is_pipe && !pipe->writers) {
+		if (!is_pipe && !atomic_read(&pipe->writers)) {
 			if ((filp->f_flags & O_NONBLOCK)) {
 				/* suppress POLLHUP until we have
 				 * seen a writer */
@@ -1084,14 +1085,14 @@ static int fifo_open(struct inode *inode
 	 *  errno=ENXIO when there is no process reading the FIFO.
 	 */
 		ret = -ENXIO;
-		if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
+		if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
 			goto err;
 
 		pipe->w_counter++;
-		if (!pipe->writers++)
+		if (atomic_inc_return(&pipe->writers) == 1)
 			wake_up_partner(pipe);
 
-		if (!is_pipe && !pipe->readers) {
+		if (!is_pipe && !atomic_read(&pipe->readers)) {
 			if (wait_for_partner(pipe, &pipe->r_counter))
 				goto err_wr;
 		}
@@ -1105,11 +1106,11 @@ static int fifo_open(struct inode *inode
 	 *  the process can at least talk to itself.
 	 */
 
-		pipe->readers++;
-		pipe->writers++;
+		atomic_inc(&pipe->readers);
+		atomic_inc(&pipe->writers);
 		pipe->r_counter++;
 		pipe->w_counter++;
-		if (pipe->readers == 1 || pipe->writers == 1)
+		if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
 			wake_up_partner(pipe);
 		break;
 
@@ -1123,13 +1124,13 @@ static int fifo_open(struct inode *inode
 	return 0;
 
 err_rd:
-	if (!--pipe->readers)
+	if (atomic_dec_and_test(&pipe->readers))
 		wake_up_interruptible(&pipe->wait);
 	ret = -ERESTARTSYS;
 	goto err;
 
 err_wr:
-	if (!--pipe->writers)
+	if (atomic_dec_and_test(&pipe->writers))
 		wake_up_interruptible(&pipe->wait);
 	ret = -ERESTARTSYS;
 	goto err;
diff -ruNp linux-3.13.11/fs/posix_acl.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/posix_acl.c
--- linux-3.13.11/fs/posix_acl.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/posix_acl.c	2014-07-09 12:00:15.000000000
+0200
@@ -19,6 +19,7 @@
 #include <linux/sched.h>
 #include <linux/posix_acl.h>
 #include <linux/export.h>
+#include <linux/grsecurity.h>
 
 #include <linux/errno.h>
 
@@ -183,7 +184,7 @@ posix_acl_equiv_mode(const struct posix_
 		}
 	}
         if (mode_p)
-                *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
+                *mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask();
         return not_equiv;
 }
 
@@ -331,7 +332,7 @@ static int posix_acl_create_masq(struct
 		mode &= (group_obj->e_perm << 3) | ~S_IRWXG;
 	}
 
-	*mode_p = (*mode_p & ~S_IRWXUGO) | mode;
+	*mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask();
         return not_equiv;
 }
 
@@ -389,6 +390,8 @@ posix_acl_create(struct posix_acl **acl,
 	struct posix_acl *clone = posix_acl_clone(*acl, gfp);
 	int err = -ENOMEM;
 	if (clone) {
+		*mode_p &= ~gr_acl_umask();
+
 		err = posix_acl_create_masq(clone, mode_p);
 		if (err < 0) {
 			posix_acl_release(clone);
diff -ruNp linux-3.13.11/fs/proc/Kconfig linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/proc/Kconfig
--- linux-3.13.11/fs/proc/Kconfig	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/proc/Kconfig	2014-07-09 12:00:15.000000000
+0200
@@ -30,7 +30,7 @@ config PROC_FS
 
 config PROC_KCORE
 	bool "/proc/kcore support" if !ARM
-	depends on PROC_FS && MMU
+	depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
 	help
 	  Provides a virtual ELF core file of the live kernel.  This can
 	  be read with gdb and other ELF tools.  No modifications can be
@@ -38,8 +38,8 @@ config PROC_KCORE
 
 config PROC_VMCORE
 	bool "/proc/vmcore support"
-	depends on PROC_FS && CRASH_DUMP
-	default y
+	depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
+	default n
         help
         Exports the dump image of crashed kernel in ELF format.
 
@@ -63,8 +63,8 @@ config PROC_SYSCTL
 	  limited in memory.
 
 config PROC_PAGE_MONITOR
- 	default y
-	depends on PROC_FS && MMU
+ 	default n
+	depends on PROC_FS && MMU && !GRKERNSEC
 	bool "Enable /proc page monitoring" if EXPERT
  	help
 	  Various /proc files exist to monitor process memory utilization:
diff -ruNp linux-3.13.11/fs/proc/array.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/proc/array.c
--- linux-3.13.11/fs/proc/array.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/proc/array.c	2014-07-09 12:00:15.000000000
+0200
@@ -60,6 +60,7 @@
 #include <linux/tty.h>
 #include <linux/string.h>
 #include <linux/mman.h>
+#include <linux/grsecurity.h>
 #include <linux/proc_fs.h>
 #include <linux/ioport.h>
 #include <linux/uaccess.h>
@@ -82,6 +83,8 @@
 #include <linux/ptrace.h>
 #include <linux/tracehook.h>
 #include <linux/user_namespace.h>
+#include <linux/vs_context.h>
+#include <linux/vs_network.h>
 
 #include <asm/pgtable.h>
 #include <asm/processor.h>
@@ -173,6 +176,9 @@ static inline void task_state(struct seq
 	rcu_read_lock();
 	ppid = pid_alive(p) ?
 		task_tgid_nr_ns(rcu_dereference(p->real_parent), ns) : 0;
+	if (unlikely(vx_current_initpid(p->pid)))
+		ppid = 0;
+
 	tpid = 0;
 	if (pid_alive(p)) {
 		struct task_struct *tracer = ptrace_parent(p);
@@ -299,7 +305,7 @@ static inline void task_sig(struct seq_f
 }
 
 static void render_cap_t(struct seq_file *m, const char *header,
-			kernel_cap_t *a)
+			struct vx_info *vxi, kernel_cap_t *a)
 {
 	unsigned __capi;
 
@@ -333,10 +339,11 @@ static inline void task_cap(struct seq_f
 	NORM_CAPS(cap_effective);
 	NORM_CAPS(cap_bset);
 
-	render_cap_t(m, "CapInh:\t", &cap_inheritable);
-	render_cap_t(m, "CapPrm:\t", &cap_permitted);
-	render_cap_t(m, "CapEff:\t", &cap_effective);
-	render_cap_t(m, "CapBnd:\t", &cap_bset);
+	/* FIXME: maybe move the p->vx_info masking to __task_cred() ? */
+	render_cap_t(m, "CapInh:\t", p->vx_info, &cap_inheritable);
+	render_cap_t(m, "CapPrm:\t", p->vx_info, &cap_permitted);
+	render_cap_t(m, "CapEff:\t", p->vx_info, &cap_effective);
+	render_cap_t(m, "CapBnd:\t", p->vx_info, &cap_bset);
 }
 
 static inline void task_seccomp(struct seq_file *m, struct task_struct *p)
@@ -365,6 +372,58 @@ static void task_cpus_allowed(struct seq
 	seq_putc(m, '\n');
 }
 
+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
+static inline void task_pax(struct seq_file *m, struct task_struct *p)
+{
+	if (p->mm)
+		seq_printf(m, "PaX:\t%c%c%c%c%c\n",
+			   p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
+			   p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
+			   p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
+			   p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
+			   p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
+	else
+		seq_printf(m, "PaX:\t-----\n");
+}
+#endif
+
+int proc_pid_nsproxy(struct seq_file *m, struct pid_namespace *ns,
+			struct pid *pid, struct task_struct *task)
+{
+	seq_printf(m,	"Proxy:\t%p(%c)\n"
+			"Count:\t%u\n"
+			"uts:\t%p(%c)\n"
+			"ipc:\t%p(%c)\n"
+			"mnt:\t%p(%c)\n"
+			"pid:\t%p(%c)\n"
+			"net:\t%p(%c)\n",
+			task->nsproxy,
+			(task->nsproxy == init_task.nsproxy ? 'I' : '-'),
+			atomic_read(&task->nsproxy->count),
+			task->nsproxy->uts_ns,
+			(task->nsproxy->uts_ns == init_task.nsproxy->uts_ns ? 'I' : '-'),
+			task->nsproxy->ipc_ns,
+			(task->nsproxy->ipc_ns == init_task.nsproxy->ipc_ns ? 'I' : '-'),
+			task->nsproxy->mnt_ns,
+			(task->nsproxy->mnt_ns == init_task.nsproxy->mnt_ns ? 'I' : '-'),
+			task->nsproxy->pid_ns_for_children,
+			(task->nsproxy->pid_ns_for_children ==
+				init_task.nsproxy->pid_ns_for_children ? 'I' : '-'),
+			task->nsproxy->net_ns,
+			(task->nsproxy->net_ns == init_task.nsproxy->net_ns ? 'I' : '-'));
+	return 0;
+}
+
+void task_vs_id(struct seq_file *m, struct task_struct *task)
+{
+	if (task_vx_flags(task, VXF_HIDE_VINFO, 0))
+		return;
+
+	seq_printf(m, "VxID: %d\n", vx_task_xid(task));
+	seq_printf(m, "NxID: %d\n", nx_task_nid(task));
+}
+
+
 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
 			struct pid *pid, struct task_struct *task)
 {
@@ -382,10 +441,26 @@ int proc_pid_status(struct seq_file *m,
 	task_seccomp(m, task);
 	task_cpus_allowed(m, task);
 	cpuset_task_status_allowed(m, task);
+	task_vs_id(m, task);
 	task_context_switch_counts(m, task);
+
+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
+	task_pax(m, task);
+#endif
+
+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
+	task_grsec_rbac(m, task);
+#endif
+
 	return 0;
 }
 
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
+			     (_mm->pax_flags & MF_PAX_RANDMMAP || \
+			      _mm->pax_flags & MF_PAX_SEGMEXEC))
+#endif
+
 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
 			struct pid *pid, struct task_struct *task, int whole)
 {
@@ -407,6 +482,13 @@ static int do_task_stat(struct seq_file
 	char tcomm[sizeof(task->comm)];
 	unsigned long flags;
 
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+	if (current->exec_id != m->exec_id) {
+		gr_log_badprocpid("stat");
+		return 0;
+	}
+#endif
+
 	state = *get_task_state(task);
 	vsize = eip = esp = 0;
 	permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
@@ -478,6 +560,19 @@ static int do_task_stat(struct seq_file
 		gtime = task_gtime(task);
 	}
 
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+	if (PAX_RAND_FLAGS(mm)) {
+		eip = 0;
+		esp = 0;
+		wchan = 0;
+	}
+#endif
+#ifdef CONFIG_GRKERNSEC_HIDESYM
+	wchan = 0;
+	eip =0;
+	esp =0;
+#endif
+
 	/* scale priority and nice values from timeslices to -20..20 */
 	/* to make it look like a "normal" Unix priority/nice value  */
 	priority = task_prio(task);
@@ -491,6 +586,17 @@ static int do_task_stat(struct seq_file
 	/* convert nsec -> ticks */
 	start_time = nsec_to_clock_t(start_time);
 
+	/* fixup start time for virt uptime */
+	if (vx_flags(VXF_VIRT_UPTIME, 0)) {
+		unsigned long long bias =
+			current->vx_info->cvirt.bias_clock;
+
+		if (start_time > bias)
+			start_time -= bias;
+		else
+			start_time = 0;
+	}
+
 	seq_printf(m, "%d (%s) %c", pid_nr_ns(pid, ns), tcomm, state);
 	seq_put_decimal_ll(m, ' ', ppid);
 	seq_put_decimal_ll(m, ' ', pgid);
@@ -514,9 +620,15 @@ static int do_task_stat(struct seq_file
 	seq_put_decimal_ull(m, ' ', vsize);
 	seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
 	seq_put_decimal_ull(m, ' ', rsslim);
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+	seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code
: 1) : 0));
+	seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code
: 1) : 0));
+	seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack
: 0));
+#else
 	seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
 	seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
 	seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
+#endif
 	seq_put_decimal_ull(m, ' ', esp);
 	seq_put_decimal_ull(m, ' ', eip);
 	/* The signal information here is obsolete.
@@ -538,7 +650,11 @@ static int do_task_stat(struct seq_file
 	seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
 	seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
 
-	if (mm && permitted) {
+	if (mm && permitted
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+		&& !PAX_RAND_FLAGS(mm)
+#endif
+	   ) {
 		seq_put_decimal_ull(m, ' ', mm->start_data);
 		seq_put_decimal_ull(m, ' ', mm->end_data);
 		seq_put_decimal_ull(m, ' ', mm->start_brk);
@@ -576,8 +692,15 @@ int proc_pid_statm(struct seq_file *m, s
 			struct pid *pid, struct task_struct *task)
 {
 	unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
-	struct mm_struct *mm = get_task_mm(task);
+	struct mm_struct *mm;
 
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+	if (current->exec_id != m->exec_id) {
+		gr_log_badprocpid("statm");
+		return 0;
+	}
+#endif
+	mm = get_task_mm(task);
 	if (mm) {
 		size = task_statm(mm, &shared, &text, &data, &resident);
 		mmput(mm);
@@ -600,6 +723,13 @@ int proc_pid_statm(struct seq_file *m, s
 	return 0;
 }
 
+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
+{
+	return sprintf(buffer, "%pI4\n", &task->signal->curr_ip);
+}
+#endif
+
 #ifdef CONFIG_CHECKPOINT_RESTORE
 static struct pid *
 get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
diff -ruNp linux-3.13.11/fs/proc/base.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/proc/base.c
--- linux-3.13.11/fs/proc/base.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/proc/base.c	2014-07-09 12:00:15.000000000
+0200
@@ -87,6 +87,8 @@
 #include <linux/slab.h>
 #include <linux/flex_array.h>
 #include <linux/posix-timers.h>
+#include <linux/vs_context.h>
+#include <linux/vs_network.h>
 #ifdef CONFIG_HARDWALL
 #include <asm/hardwall.h>
 #endif
@@ -113,6 +115,14 @@ struct pid_entry {
 	union proc_op op;
 };
 
+struct getdents_callback {
+	struct linux_dirent __user * current_dir;
+	struct linux_dirent __user * previous;
+	struct file * file;
+	int count;
+	int error;
+};
+
 #define NOD(NAME, MODE, IOP, FOP, OP) {			\
 	.name = (NAME),					\
 	.len  = sizeof(NAME) - 1,			\
@@ -210,6 +220,9 @@ static int proc_pid_cmdline(struct task_
 	if (!mm->arg_end)
 		goto out_mm;	/* Shh! No looking before we're done */
 
+	if (gr_acl_handle_procpidmem(task))
+		goto out_mm;
+
  	len = mm->arg_end - mm->arg_start;
  
 	if (len > PAGE_SIZE)
@@ -237,12 +250,28 @@ out:
 	return res;
 }
 
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
+			     (_mm->pax_flags & MF_PAX_RANDMMAP || \
+			      _mm->pax_flags & MF_PAX_SEGMEXEC))
+#endif
+
 static int proc_pid_auxv(struct task_struct *task, char *buffer)
 {
 	struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
 	int res = PTR_ERR(mm);
 	if (mm && !IS_ERR(mm)) {
 		unsigned int nwords = 0;
+
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+		/* allow if we're currently ptracing this task */
+		if (PAX_RAND_FLAGS(mm) &&
+		    (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
+			mmput(mm);
+			return 0;
+		}
+#endif
+
 		do {
 			nwords += 2;
 		} while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
@@ -256,7 +285,7 @@ static int proc_pid_auxv(struct task_str
 }
 
 
-#ifdef CONFIG_KALLSYMS
+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
 /*
  * Provides a wchan file via kallsyms in a proper one-value-per-file format.
  * Returns the resolved symbol.  If that fails, simply return the address.
@@ -295,7 +324,7 @@ static void unlock_trace(struct task_str
 	mutex_unlock(&task->signal->cred_guard_mutex);
 }
 
-#ifdef CONFIG_STACKTRACE
+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
 
 #define MAX_STACK_TRACE_DEPTH	64
 
@@ -518,7 +547,7 @@ static int proc_pid_limits(struct task_s
 	return count;
 }
 
-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
 static int proc_pid_syscall(struct task_struct *task, char *buffer)
 {
 	long nr;
@@ -547,7 +576,7 @@ static int proc_pid_syscall(struct task_
 /************************************************************************/
 
 /* permission checks */
-static int proc_fd_access_allowed(struct inode *inode)
+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
 {
 	struct task_struct *task;
 	int allowed = 0;
@@ -557,7 +586,10 @@ static int proc_fd_access_allowed(struct
 	 */
 	task = get_proc_task(inode);
 	if (task) {
-		allowed = ptrace_may_access(task, PTRACE_MODE_READ);
+		if (log)
+			allowed = ptrace_may_access(task, PTRACE_MODE_READ);
+		else
+			allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
 		put_task_struct(task);
 	}
 	return allowed;
@@ -588,10 +620,35 @@ static bool has_pid_permissions(struct p
 				 struct task_struct *task,
 				 int hide_pid_min)
 {
+	if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
+		return false;
+
+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
+	rcu_read_lock();
+	{
+		const struct cred *tmpcred = current_cred();
+		const struct cred *cred = __task_cred(task);
+
+		if (uid_eq(tmpcred->uid, GLOBAL_ROOT_UID) || uid_eq(tmpcred->uid, cred->uid)
+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
+			|| in_group_p(grsec_proc_gid)
+#endif
+		) {
+			rcu_read_unlock();
+			return true;
+		}
+	}
+	rcu_read_unlock();
+
+	if (!pid->hide_pid)
+		return false;
+#endif
+
 	if (pid->hide_pid < hide_pid_min)
 		return true;
 	if (in_group_p(pid->pid_gid))
 		return true;
+
 	return ptrace_may_access(task, PTRACE_MODE_READ);
 }
 
@@ -609,7 +666,11 @@ static int proc_pid_permission(struct in
 	put_task_struct(task);
 
 	if (!has_perms) {
+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
+		{
+#else
 		if (pid->hide_pid == 2) {
+#endif
 			/*
 			 * Let's make getdents(), stat(), and open()
 			 * consistent with each other.  If a process
@@ -707,6 +768,11 @@ static int __mem_open(struct inode *inod
 	if (!task)
 		return -ESRCH;
 
+	if (gr_acl_handle_procpidmem(task)) {
+		put_task_struct(task);
+		return -EPERM;
+	}
+
 	mm = mm_access(task, mode);
 	put_task_struct(task);
 
@@ -722,6 +788,10 @@ static int __mem_open(struct inode *inod
 
 	file->private_data = mm;
 
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+	file->f_version = current->exec_id;
+#endif
+
 	return 0;
 }
 
@@ -743,6 +813,17 @@ static ssize_t mem_rw(struct file *file,
 	ssize_t copied;
 	char *page;
 
+#ifdef CONFIG_GRKERNSEC
+	if (write)
+		return -EPERM;
+#endif
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+	if (file->f_version != current->exec_id) {
+		gr_log_badprocpid("mem");
+		return 0;
+	}
+#endif
+
 	if (!mm)
 		return 0;
 
@@ -755,7 +836,7 @@ static ssize_t mem_rw(struct file *file,
 		goto free;
 
 	while (count > 0) {
-		int this_len = min_t(int, count, PAGE_SIZE);
+		ssize_t this_len = min_t(ssize_t, count, PAGE_SIZE);
 
 		if (write && copy_from_user(page, buf, this_len)) {
 			copied = -EFAULT;
@@ -847,6 +928,13 @@ static ssize_t environ_read(struct file
 	if (!mm)
 		return 0;
 
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+	if (file->f_version != current->exec_id) {
+		gr_log_badprocpid("environ");
+		return 0;
+	}
+#endif
+
 	page = (char *)__get_free_page(GFP_TEMPORARY);
 	if (!page)
 		return -ENOMEM;
@@ -856,7 +944,7 @@ static ssize_t environ_read(struct file
 		goto free;
 	while (count > 0) {
 		size_t this_len, max_len;
-		int retval;
+		ssize_t retval;
 
 		if (src >= (mm->env_end - mm->env_start))
 			break;
@@ -976,11 +1064,15 @@ static ssize_t oom_adj_write(struct file
 		oom_adj = (oom_adj * OOM_SCORE_ADJ_MAX) / -OOM_DISABLE;
 
 	if (oom_adj < task->signal->oom_score_adj &&
-	    !capable(CAP_SYS_RESOURCE)) {
+	    !vx_capable(CAP_SYS_RESOURCE, VXC_OOM_ADJUST)) {
 		err = -EACCES;
 		goto err_sighand;
 	}
 
+	/* prevent guest processes from circumventing the oom killer */
+	if (vx_current_xid() && (oom_adj == OOM_DISABLE))
+		oom_adj = OOM_ADJUST_MIN;
+
 	/*
 	 * /proc/pid/oom_adj is provided for legacy purposes, ask users to use
 	 * /proc/pid/oom_score_adj instead.
@@ -1467,7 +1559,7 @@ static void *proc_pid_follow_link(struct
 	int error = -EACCES;
 
 	/* Are we allowed to snoop on the tasks file descriptors? */
-	if (!proc_fd_access_allowed(inode))
+	if (!proc_fd_access_allowed(inode, 0))
 		goto out;
 
 	error = PROC_I(inode)->op.proc_get_link(dentry, &path);
@@ -1511,8 +1603,18 @@ static int proc_pid_readlink(struct dent
 	struct path path;
 
 	/* Are we allowed to snoop on the tasks file descriptors? */
-	if (!proc_fd_access_allowed(inode))
-		goto out;
+	/* logging this is needed for learning on chromium to work properly,
+	   but we don't want to flood the logs from 'ps' which does a readlink
+	   on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
+	   CAP_SYS_PTRACE as it's not necessary for its basic functionality
+	 */
+	if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
+		if (!proc_fd_access_allowed(inode,0))
+			goto out;
+	} else {
+		if (!proc_fd_access_allowed(inode,1))
+			goto out;
+	}
 
 	error = PROC_I(inode)->op.proc_get_link(dentry, &path);
 	if (error)
@@ -1562,9 +1664,15 @@ struct inode *proc_pid_make_inode(struct
 		rcu_read_lock();
 		cred = __task_cred(task);
 		inode->i_uid = cred->euid;
+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
+		inode->i_gid = grsec_proc_gid;
+#else
 		inode->i_gid = cred->egid;
+#endif
 		rcu_read_unlock();
 	}
+	/* procfs is xid tagged */
+	i_tag_write(inode, (vtag_t)vx_task_xid(task));
 	security_task_to_inode(task, inode);
 
 out:
@@ -1598,10 +1706,19 @@ int pid_getattr(struct vfsmount *mnt, st
 			return -ENOENT;
 		}
 		if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
+#ifdef CONFIG_GRKERNSEC_PROC_USER
+		    (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
+		    (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
+#endif
 		    task_dumpable(task)) {
 			cred = __task_cred(task);
 			stat->uid = cred->euid;
+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
+			stat->gid = grsec_proc_gid;
+#else
 			stat->gid = cred->egid;
+#endif
 		}
 	}
 	rcu_read_unlock();
@@ -1610,6 +1727,8 @@ int pid_getattr(struct vfsmount *mnt, st
 
 /* dentry stuff */
 
+static unsigned name_to_int(struct dentry *dentry);
+
 /*
  *	Exceptional case: normally we are not allowed to unhash a busy
  * directory. In this case, however, we can do it - no aliasing problems
@@ -1638,12 +1757,27 @@ int pid_revalidate(struct dentry *dentry
 	task = get_proc_task(inode);
 
 	if (task) {
+		unsigned pid = name_to_int(dentry);
+
+		if (pid != ~0U && pid != vx_map_pid(task->pid)) {
+			put_task_struct(task);
+			goto drop;
+		}
 		if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
+#ifdef CONFIG_GRKERNSEC_PROC_USER
+		    (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
+		    (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
+#endif
 		    task_dumpable(task)) {
 			rcu_read_lock();
 			cred = __task_cred(task);
 			inode->i_uid = cred->euid;
+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
+			inode->i_gid = grsec_proc_gid;
+#else
 			inode->i_gid = cred->egid;
+#endif
 			rcu_read_unlock();
 		} else {
 			inode->i_uid = GLOBAL_ROOT_UID;
@@ -1654,6 +1788,7 @@ int pid_revalidate(struct dentry *dentry
 		put_task_struct(task);
 		return 1;
 	}
+drop:
 	d_drop(dentry);
 	return 0;
 }
@@ -2173,6 +2308,16 @@ static struct dentry *proc_pident_lookup
 	if (!task)
 		goto out_no_task;
 
+	if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
+		goto out;
+
+	/* TODO: maybe we can come up with a generic approach? */
+	if (task_vx_flags(task, VXF_HIDE_VINFO, 0) &&
+		(dentry->d_name.len == 5) &&
+		(!memcmp(dentry->d_name.name, "vinfo", 5) ||
+		!memcmp(dentry->d_name.name, "ninfo", 5)))
+		goto out;
+
 	/*
 	 * Yes, it does not scale. And it should not. Don't add
 	 * new entries into /proc/<tgid>/ without very good reasons.
@@ -2203,6 +2348,9 @@ static int proc_pident_readdir(struct fi
 	if (!task)
 		return -ENOENT;
 
+	if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
+		goto out;
+
 	if (!dir_emit_dots(file, ctx))
 		goto out;
 
@@ -2569,6 +2717,9 @@ static int proc_pid_personality(struct s
 static const struct file_operations proc_task_operations;
 static const struct inode_operations proc_task_inode_operations;
 
+extern int proc_pid_vx_info(struct task_struct *, char *);
+extern int proc_pid_nx_info(struct task_struct *, char *);
+
 static const struct pid_entry tgid_base_stuff[] = {
 	DIR("task",       S_IRUGO|S_IXUGO, proc_task_inode_operations, proc_task_operations),
 	DIR("fd",         S_IRUSR|S_IXUSR, proc_fd_inode_operations, proc_fd_operations),
@@ -2592,7 +2743,7 @@ static const struct pid_entry tgid_base_
 	REG("autogroup",  S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
 #endif
 	REG("comm",      S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
 	INF("syscall",    S_IRUGO, proc_pid_syscall),
 #endif
 	INF("cmdline",    S_IRUGO, proc_pid_cmdline),
@@ -2617,10 +2768,10 @@ static const struct pid_entry tgid_base_
 #ifdef CONFIG_SECURITY
 	DIR("attr",       S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
 #endif
-#ifdef CONFIG_KALLSYMS
+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
 	INF("wchan",      S_IRUGO, proc_pid_wchan),
 #endif
-#ifdef CONFIG_STACKTRACE
+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
 	ONE("stack",      S_IRUGO, proc_pid_stack),
 #endif
 #ifdef CONFIG_SCHEDSTATS
@@ -2635,6 +2786,8 @@ static const struct pid_entry tgid_base_
 #ifdef CONFIG_CGROUPS
 	REG("cgroup",  S_IRUGO, proc_cgroup_operations),
 #endif
+	INF("vinfo",      S_IRUGO, proc_pid_vx_info),
+	INF("ninfo",	  S_IRUGO, proc_pid_nx_info),
 	INF("oom_score",  S_IRUGO, proc_oom_score),
 	REG("oom_adj",    S_IRUGO|S_IWUSR, proc_oom_adj_operations),
 	REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations),
@@ -2654,6 +2807,9 @@ static const struct pid_entry tgid_base_
 #ifdef CONFIG_HARDWALL
 	INF("hardwall",   S_IRUGO, proc_pid_hardwall),
 #endif
+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
+	INF("ipaddr",     S_IRUSR, proc_pid_ipaddr),
+#endif
 #ifdef CONFIG_USER_NS
 	REG("uid_map",    S_IRUGO|S_IWUSR, proc_uid_map_operations),
 	REG("gid_map",    S_IRUGO|S_IWUSR, proc_gid_map_operations),
@@ -2784,7 +2940,14 @@ static int proc_pid_instantiate(struct i
 	if (!inode)
 		goto out;
 
+#ifdef CONFIG_GRKERNSEC_PROC_USER
+	inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
+	inode->i_gid = grsec_proc_gid;
+	inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
+#else
 	inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
+#endif
 	inode->i_op = &proc_tgid_base_inode_operations;
 	inode->i_fop = &proc_tgid_base_operations;
 	inode->i_flags|=S_IMMUTABLE;
@@ -2822,7 +2985,11 @@ struct dentry *proc_pid_lookup(struct in
 	if (!task)
 		goto out;
 
+	if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
+		goto out_put_task;
+
 	result = proc_pid_instantiate(dir, dentry, task, NULL);
+out_put_task:
 	put_task_struct(task);
 out:
 	return ERR_PTR(result);
@@ -2847,7 +3014,7 @@ retry:
 	iter.task = NULL;
 	pid = find_ge_pid(iter.tgid, ns);
 	if (pid) {
-		iter.tgid = pid_nr_ns(pid, ns);
+		iter.tgid = pid_unmapped_nr_ns(pid, ns);
 		iter.task = pid_task(pid, PIDTYPE_PID);
 		/* What we to know is if the pid we have find is the
 		 * pid of a thread_group_leader.  Testing for task
@@ -2900,8 +3067,10 @@ int proc_pid_readdir(struct file *file,
 		if (!has_pid_permissions(ns, iter.task, 2))
 			continue;
 
-		len = snprintf(name, sizeof(name), "%d", iter.tgid);
+		len = snprintf(name, sizeof(name), "%d", vx_map_tgid(iter.tgid));
 		ctx->pos = iter.tgid + TGID_OFFSET;
+		if (!vx_proc_task_visible(iter.task))
+			continue;
 		if (!proc_fill_cache(file, ctx, name, len,
 				     proc_pid_instantiate, iter.task, NULL)) {
 			put_task_struct(iter.task);
@@ -2928,7 +3097,7 @@ static const struct pid_entry tid_base_s
 	REG("sched",     S_IRUGO|S_IWUSR, proc_pid_sched_operations),
 #endif
 	REG("comm",      S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
 	INF("syscall",   S_IRUGO, proc_pid_syscall),
 #endif
 	INF("cmdline",   S_IRUGO, proc_pid_cmdline),
@@ -2955,10 +3124,10 @@ static const struct pid_entry tid_base_s
 #ifdef CONFIG_SECURITY
 	DIR("attr",      S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
 #endif
-#ifdef CONFIG_KALLSYMS
+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
 	INF("wchan",     S_IRUGO, proc_pid_wchan),
 #endif
-#ifdef CONFIG_STACKTRACE
+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
 	ONE("stack",      S_IRUGO, proc_pid_stack),
 #endif
 #ifdef CONFIG_SCHEDSTATS
@@ -2994,6 +3163,7 @@ static const struct pid_entry tid_base_s
 	REG("gid_map",    S_IRUGO|S_IWUSR, proc_gid_map_operations),
 	REG("projid_map", S_IRUGO|S_IWUSR, proc_projid_map_operations),
 #endif
+	ONE("nsproxy",	S_IRUGO, proc_pid_nsproxy),
 };
 
 static int proc_tid_base_readdir(struct file *file, struct dir_context *ctx)
@@ -3060,6 +3230,8 @@ static struct dentry *proc_task_lookup(s
 	tid = name_to_int(dentry);
 	if (tid == ~0U)
 		goto out;
+	if (vx_current_initpid(tid))
+		goto out;
 
 	ns = dentry->d_sb->s_fs_info;
 	rcu_read_lock();
diff -ruNp linux-3.13.11/fs/proc/cmdline.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/proc/cmdline.c
--- linux-3.13.11/fs/proc/cmdline.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/proc/cmdline.c	2014-07-09 12:00:15.000000000
+0200
@@ -23,7 +23,11 @@ static const struct file_operations cmdl
 
 static int __init proc_cmdline_init(void)
 {
+#ifdef CONFIG_GRKERNSEC_PROC_ADD
+	proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
+#else
 	proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
+#endif
 	return 0;
 }
 module_init(proc_cmdline_init);
diff -ruNp linux-3.13.11/fs/proc/devices.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/proc/devices.c
--- linux-3.13.11/fs/proc/devices.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/proc/devices.c	2014-07-09 12:00:15.000000000
+0200
@@ -64,7 +64,11 @@ static const struct file_operations proc
 
 static int __init proc_devices_init(void)
 {
+#ifdef CONFIG_GRKERNSEC_PROC_ADD
+	proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
+#else
 	proc_create("devices", 0, NULL, &proc_devinfo_operations);
+#endif
 	return 0;
 }
 module_init(proc_devices_init);
diff -ruNp linux-3.13.11/fs/proc/fd.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/proc/fd.c
--- linux-3.13.11/fs/proc/fd.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/proc/fd.c	2014-07-09 12:00:15.000000000
+0200
@@ -25,7 +25,8 @@ static int seq_show(struct seq_file *m,
 	if (!task)
 		return -ENOENT;
 
-	files = get_files_struct(task);
+	if (!gr_acl_handle_procpidmem(task))
+		files = get_files_struct(task);
 	put_task_struct(task);
 
 	if (files) {
@@ -283,11 +284,21 @@ static struct dentry *proc_lookupfd(stru
  */
 int proc_fd_permission(struct inode *inode, int mask)
 {
+	struct task_struct *task;
 	int rv = generic_permission(inode, mask);
-	if (rv == 0)
-		return 0;
+
 	if (task_tgid(current) == proc_pid(inode))
 		rv = 0;
+
+	task = get_proc_task(inode);
+	if (task == NULL)
+		return rv;
+
+	if (gr_acl_handle_procpidmem(task))
+		rv = -EACCES;
+
+	put_task_struct(task);
+
 	return rv;
 }
 
diff -ruNp linux-3.13.11/fs/proc/generic.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/proc/generic.c
--- linux-3.13.11/fs/proc/generic.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/proc/generic.c	2014-07-09 12:00:15.000000000
+0200
@@ -23,6 +23,7 @@
 #include <linux/bitops.h>
 #include <linux/spinlock.h>
 #include <linux/completion.h>
+#include <linux/vserver/inode.h>
 #include <asm/uaccess.h>
 
 #include "internal.h"
@@ -187,6 +188,8 @@ struct dentry *proc_lookup_de(struct pro
 	for (de = de->subdir; de ; de = de->next) {
 		if (de->namelen != dentry->d_name.len)
 			continue;
+		if (!vx_hide_check(0, de->vx_flags))
+			continue;
 		if (!memcmp(dentry->d_name.name, de->name, de->namelen)) {
 			pde_get(de);
 			spin_unlock(&proc_subdir_lock);
@@ -195,6 +198,8 @@ struct dentry *proc_lookup_de(struct pro
 				return ERR_PTR(-ENOMEM);
 			d_set_d_op(dentry, &simple_dentry_operations);
 			d_add(dentry, inode);
+			/* generic proc entries belong to the host */
+			i_tag_write(inode, 0);
 			return NULL;
 		}
 	}
@@ -242,6 +247,9 @@ int proc_readdir_de(struct proc_dir_entr
 	do {
 		struct proc_dir_entry *next;
 		pde_get(de);
+
+		if (!vx_hide_check(0, de->vx_flags))
+			goto skip;
 		spin_unlock(&proc_subdir_lock);
 		if (!dir_emit(ctx, de->name, de->namelen,
 			    de->low_ino, de->mode >> 12)) {
@@ -249,6 +257,7 @@ int proc_readdir_de(struct proc_dir_entr
 			return 0;
 		}
 		spin_lock(&proc_subdir_lock);
+	skip:
 		ctx->pos++;
 		next = de->next;
 		pde_put(de);
@@ -355,6 +364,7 @@ static struct proc_dir_entry *__proc_cre
 	ent->namelen = len;
 	ent->mode = mode;
 	ent->nlink = nlink;
+	ent->vx_flags = IATTR_PROC_DEFAULT;
 	atomic_set(&ent->count, 1);
 	spin_lock_init(&ent->pde_unload_lock);
 	INIT_LIST_HEAD(&ent->pde_openers);
@@ -378,7 +388,8 @@ struct proc_dir_entry *proc_symlink(cons
 				kfree(ent->data);
 				kfree(ent);
 				ent = NULL;
-			}
+			} else
+				ent->vx_flags = IATTR_PROC_SYMLINK;
 		} else {
 			kfree(ent);
 			ent = NULL;
diff -ruNp linux-3.13.11/fs/proc/inode.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/proc/inode.c
--- linux-3.13.11/fs/proc/inode.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/proc/inode.c	2014-07-09 12:00:15.000000000
+0200
@@ -23,11 +23,17 @@
 #include <linux/slab.h>
 #include <linux/mount.h>
 #include <linux/magic.h>
+#include <linux/grsecurity.h>
 
 #include <asm/uaccess.h>
 
 #include "internal.h"
 
+#ifdef CONFIG_PROC_SYSCTL
+extern const struct inode_operations proc_sys_inode_operations;
+extern const struct inode_operations proc_sys_dir_operations;
+#endif
+
 static void proc_evict_inode(struct inode *inode)
 {
 	struct proc_dir_entry *de;
@@ -55,6 +61,13 @@ static void proc_evict_inode(struct inod
 	ns = PROC_I(inode)->ns.ns;
 	if (ns_ops && ns)
 		ns_ops->put(ns);
+
+#ifdef CONFIG_PROC_SYSCTL
+	if (inode->i_op == &proc_sys_inode_operations ||
+	    inode->i_op == &proc_sys_dir_operations)
+		gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
+#endif
+
 }
 
 static struct kmem_cache * proc_inode_cachep;
@@ -413,8 +426,14 @@ struct inode *proc_get_inode(struct supe
 		if (de->mode) {
 			inode->i_mode = de->mode;
 			inode->i_uid = de->uid;
+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
+			inode->i_gid = grsec_proc_gid;
+#else
 			inode->i_gid = de->gid;
+#endif
 		}
+		if (de->vx_flags)
+			PROC_I(inode)->vx_flags = de->vx_flags;
 		if (de->size)
 			inode->i_size = de->size;
 		if (de->nlink)
diff -ruNp linux-3.13.11/fs/proc/internal.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/proc/internal.h
--- linux-3.13.11/fs/proc/internal.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/proc/internal.h	2014-07-09 12:00:15.000000000
+0200
@@ -14,6 +14,7 @@
 #include <linux/spinlock.h>
 #include <linux/atomic.h>
 #include <linux/binfmts.h>
+#include <linux/vs_pid.h>
 
 struct ctl_table_header;
 struct mempolicy;
@@ -35,6 +36,7 @@ struct proc_dir_entry {
 	nlink_t nlink;
 	kuid_t uid;
 	kgid_t gid;
+	int vx_flags;
 	loff_t size;
 	const struct inode_operations *proc_iops;
 	const struct file_operations *proc_fops;
@@ -48,7 +50,10 @@ struct proc_dir_entry {
 	spinlock_t pde_unload_lock; /* proc_fops checks and pde_users bumps */
 	u8 namelen;
 	char name[];
-};
+} __randomize_layout;
+
+struct vx_info;
+struct nx_info;
 
 union proc_op {
 	int (*proc_get_link)(struct dentry *, struct path *);
@@ -56,10 +61,14 @@ union proc_op {
 	int (*proc_show)(struct seq_file *m,
 		struct pid_namespace *ns, struct pid *pid,
 		struct task_struct *task);
+	int (*proc_vs_read)(char *page);
+	int (*proc_vxi_read)(struct vx_info *vxi, char *page);
+	int (*proc_nxi_read)(struct nx_info *nxi, char *page);
 };
 
 struct proc_inode {
 	struct pid *pid;
+	int vx_flags;
 	int fd;
 	union proc_op op;
 	struct proc_dir_entry *pde;
@@ -67,7 +76,7 @@ struct proc_inode {
 	struct ctl_table *sysctl_entry;
 	struct proc_ns ns;
 	struct inode vfs_inode;
-};
+} __randomize_layout;
 
 /*
  * General functions
@@ -92,11 +101,16 @@ static inline struct pid *proc_pid(struc
 	return PROC_I(inode)->pid;
 }
 
-static inline struct task_struct *get_proc_task(struct inode *inode)
+static inline struct task_struct *get_proc_task_real(struct inode *inode)
 {
 	return get_pid_task(proc_pid(inode), PIDTYPE_PID);
 }
 
+static inline struct task_struct *get_proc_task(struct inode *inode)
+{
+	return vx_get_proc_task(inode, proc_pid(inode));
+}
+
 static inline int task_dumpable(struct task_struct *task)
 {
 	int dumpable = 0;
@@ -155,6 +169,11 @@ extern int proc_pid_status(struct seq_fi
 			   struct pid *, struct task_struct *);
 extern int proc_pid_statm(struct seq_file *, struct pid_namespace *,
 			  struct pid *, struct task_struct *);
+extern int proc_pid_nsproxy(struct seq_file *m, struct pid_namespace *ns,
+			    struct pid *pid, struct task_struct *task);
+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
+#endif
 
 /*
  * base.c
diff -ruNp linux-3.13.11/fs/proc/interrupts.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/proc/interrupts.c
--- linux-3.13.11/fs/proc/interrupts.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/proc/interrupts.c	2014-07-09 12:00:15.000000000
+0200
@@ -47,7 +47,11 @@ static const struct file_operations proc
 
 static int __init proc_interrupts_init(void)
 {
+#ifdef CONFIG_GRKERNSEC_PROC_ADD
+	proc_create_grsec("interrupts", 0, NULL, &proc_interrupts_operations);
+#else
 	proc_create("interrupts", 0, NULL, &proc_interrupts_operations);
+#endif
 	return 0;
 }
 module_init(proc_interrupts_init);
diff -ruNp linux-3.13.11/fs/proc/kcore.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/proc/kcore.c
--- linux-3.13.11/fs/proc/kcore.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/proc/kcore.c	2014-07-09 12:00:15.000000000
+0200
@@ -483,9 +483,10 @@ read_kcore(struct file *file, char __use
 	 * the addresses in the elf_phdr on our list.
 	 */
 	start = kc_offset_to_vaddr(*fpos - elf_buflen);
-	if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
+	tsz = PAGE_SIZE - (start & ~PAGE_MASK);
+	if (tsz > buflen)
 		tsz = buflen;
-		
+
 	while (buflen) {
 		struct kcore_list *m;
 
@@ -514,20 +515,23 @@ read_kcore(struct file *file, char __use
 			kfree(elf_buf);
 		} else {
 			if (kern_addr_valid(start)) {
-				unsigned long n;
+				char *elf_buf;
+				mm_segment_t oldfs;
 
-				n = copy_to_user(buffer, (char *)start, tsz);
-				/*
-				 * We cannot distinguish between fault on source
-				 * and fault on destination. When this happens
-				 * we clear too and hope it will trigger the
-				 * EFAULT again.
-				 */
-				if (n) { 
-					if (clear_user(buffer + tsz - n,
-								n))
+				elf_buf = kmalloc(tsz, GFP_KERNEL);
+				if (!elf_buf)
+					return -ENOMEM;
+				oldfs = get_fs();
+				set_fs(KERNEL_DS);
+				if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
+					set_fs(oldfs);
+					if (copy_to_user(buffer, elf_buf, tsz)) {
+						kfree(elf_buf);
 						return -EFAULT;
+					}
 				}
+				set_fs(oldfs);
+				kfree(elf_buf);
 			} else {
 				if (clear_user(buffer, tsz))
 					return -EFAULT;
@@ -547,6 +551,9 @@ read_kcore(struct file *file, char __use
 
 static int open_kcore(struct inode *inode, struct file *filp)
 {
+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
+	return -EPERM;
+#endif
 	if (!capable(CAP_SYS_RAWIO))
 		return -EPERM;
 	if (kcore_need_update)
diff -ruNp linux-3.13.11/fs/proc/loadavg.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/proc/loadavg.c
--- linux-3.13.11/fs/proc/loadavg.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/proc/loadavg.c	2014-07-09 12:00:15.000000000
+0200
@@ -12,15 +12,27 @@
 
 static int loadavg_proc_show(struct seq_file *m, void *v)
 {
+	unsigned long running;
+	unsigned int threads;
 	unsigned long avnrun[3];
 
 	get_avenrun(avnrun, FIXED_1/200, 0);
 
+	if (vx_flags(VXF_VIRT_LOAD, 0)) {
+		struct vx_info *vxi = current_vx_info();
+
+		running = atomic_read(&vxi->cvirt.nr_running);
+		threads = atomic_read(&vxi->cvirt.nr_threads);
+	} else {
+		running = nr_running();
+		threads = nr_threads;
+	}
+
 	seq_printf(m, "%lu.%02lu %lu.%02lu %lu.%02lu %ld/%d %d\n",
 		LOAD_INT(avnrun[0]), LOAD_FRAC(avnrun[0]),
 		LOAD_INT(avnrun[1]), LOAD_FRAC(avnrun[1]),
 		LOAD_INT(avnrun[2]), LOAD_FRAC(avnrun[2]),
-		nr_running(), nr_threads,
+		running, threads,
 		task_active_pid_ns(current)->last_pid);
 	return 0;
 }
diff -ruNp linux-3.13.11/fs/proc/meminfo.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/proc/meminfo.c
--- linux-3.13.11/fs/proc/meminfo.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/proc/meminfo.c	2014-07-09 12:00:15.000000000
+0200
@@ -37,7 +37,8 @@ static int meminfo_proc_show(struct seq_
 	si_swapinfo(&i);
 	committed = percpu_counter_read_positive(&vm_committed_as);
 
-	cached = global_page_state(NR_FILE_PAGES) -
+	cached = vx_flags(VXF_VIRT_MEM, 0) ?
+		vx_vsi_cached(&i) : global_page_state(NR_FILE_PAGES) -
 			total_swapcache_pages() - i.bufferram;
 	if (cached < 0)
 		cached = 0;
@@ -150,7 +151,7 @@ static int meminfo_proc_show(struct seq_
 		vmi.used >> 10,
 		vmi.largest_chunk >> 10
 #ifdef CONFIG_MEMORY_FAILURE
-		,atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10)
+		,atomic_long_read_unchecked(&num_poisoned_pages) << (PAGE_SHIFT - 10)
 #endif
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 		,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
diff -ruNp linux-3.13.11/fs/proc/nommu.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/proc/nommu.c
--- linux-3.13.11/fs/proc/nommu.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/proc/nommu.c	2014-07-09 12:00:15.000000000
+0200
@@ -64,7 +64,7 @@ static int nommu_region_show(struct seq_
 
 	if (file) {
 		seq_pad(m, ' ');
-		seq_path(m, &file->f_path, "");
+		seq_path(m, &file->f_path, "\n\\");
 	}
 
 	seq_putc(m, '\n');
diff -ruNp linux-3.13.11/fs/proc/proc_net.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/proc/proc_net.c
--- linux-3.13.11/fs/proc/proc_net.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/proc/proc_net.c	2014-07-09 12:00:15.000000000
+0200
@@ -23,6 +23,7 @@
 #include <linux/nsproxy.h>
 #include <net/net_namespace.h>
 #include <linux/seq_file.h>
+#include <linux/grsecurity.h>
 
 #include "internal.h"
 
@@ -109,6 +110,17 @@ static struct net *get_proc_task_net(str
 	struct task_struct *task;
 	struct nsproxy *ns;
 	struct net *net = NULL;
+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
+	const struct cred *cred = current_cred();
+#endif
+
+#ifdef CONFIG_GRKERNSEC_PROC_USER
+	if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID))
+		return net;
+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
+	if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID) && !in_group_p(grsec_proc_gid))
+		return net;
+#endif
 
 	rcu_read_lock();
 	task = pid_task(proc_pid(dir), PIDTYPE_PID);
diff -ruNp linux-3.13.11/fs/proc/proc_sysctl.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/proc/proc_sysctl.c
--- linux-3.13.11/fs/proc/proc_sysctl.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/proc/proc_sysctl.c	2014-07-09
12:00:15.000000000 +0200
@@ -11,13 +11,21 @@
 #include <linux/namei.h>
 #include <linux/mm.h>
 #include <linux/module.h>
+#include <linux/nsproxy.h>
+#ifdef CONFIG_GRKERNSEC
+#include <net/net_namespace.h>
+#endif
 #include "internal.h"
 
+extern int gr_handle_chroot_sysctl(const int op);
+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
+				const int op);
+
 static const struct dentry_operations proc_sys_dentry_operations;
 static const struct file_operations proc_sys_file_operations;
-static const struct inode_operations proc_sys_inode_operations;
+const struct inode_operations proc_sys_inode_operations;
 static const struct file_operations proc_sys_dir_file_operations;
-static const struct inode_operations proc_sys_dir_operations;
+const struct inode_operations proc_sys_dir_operations;
 
 void proc_sys_poll_notify(struct ctl_table_poll *poll)
 {
@@ -467,6 +475,9 @@ static struct dentry *proc_sys_lookup(st
 
 	err = NULL;
 	d_set_d_op(dentry, &proc_sys_dentry_operations);
+
+	gr_handle_proc_create(dentry, inode);
+
 	d_add(dentry, inode);
 
 out:
@@ -482,6 +493,7 @@ static ssize_t proc_sys_call_handler(str
 	struct inode *inode = file_inode(filp);
 	struct ctl_table_header *head = grab_header(inode);
 	struct ctl_table *table = PROC_I(inode)->sysctl_entry;
+	int op = write ? MAY_WRITE : MAY_READ;
 	ssize_t error;
 	size_t res;
 
@@ -493,7 +505,7 @@ static ssize_t proc_sys_call_handler(str
 	 * and won't be until we finish.
 	 */
 	error = -EPERM;
-	if (sysctl_perm(head, table, write ? MAY_WRITE : MAY_READ))
+	if (sysctl_perm(head, table, op))
 		goto out;
 
 	/* if that can happen at all, it should be -EINVAL, not -EISDIR */
@@ -501,6 +513,27 @@ static ssize_t proc_sys_call_handler(str
 	if (!table->proc_handler)
 		goto out;
 
+#ifdef CONFIG_GRKERNSEC
+	error = -EPERM;
+	if (gr_handle_chroot_sysctl(op))
+		goto out;
+	dget(filp->f_path.dentry);
+	if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname,
op)) {
+		dput(filp->f_path.dentry);
+		goto out;
+	}
+	dput(filp->f_path.dentry);
+	if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
+		goto out;
+	if (write) {
+		if (current->nsproxy->net_ns != table->extra2) {
+			if (!capable(CAP_SYS_ADMIN))
+				goto out;
+		} else if (!ns_capable(current->nsproxy->net_ns->user_ns, CAP_NET_ADMIN))
+			goto out;
+	}
+#endif
+
 	/* careful: calling conventions are nasty here */
 	res = count;
 	error = table->proc_handler(table, write, buf, &res, ppos);
@@ -598,6 +631,9 @@ static bool proc_sys_fill_cache(struct f
 				return false;
 			} else {
 				d_set_d_op(child, &proc_sys_dentry_operations);
+
+				gr_handle_proc_create(child, inode);
+
 				d_add(child, inode);
 			}
 		} else {
@@ -641,6 +677,9 @@ static int scan(struct ctl_table_header
 	if ((*pos)++ < ctx->pos)
 		return true;
 
+	if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
+		return 0;
+
 	if (unlikely(S_ISLNK(table->mode)))
 		res = proc_sys_link_fill_cache(file, ctx, head, table);
 	else
@@ -734,6 +773,9 @@ static int proc_sys_getattr(struct vfsmo
 	if (IS_ERR(head))
 		return PTR_ERR(head);
 
+	if (table && !gr_acl_handle_hidden_file(dentry, mnt))
+		return -ENOENT;
+
 	generic_fillattr(inode, stat);
 	if (table)
 		stat->mode = (stat->mode & S_IFMT) | table->mode;
@@ -756,13 +798,13 @@ static const struct file_operations proc
 	.llseek		= generic_file_llseek,
 };
 
-static const struct inode_operations proc_sys_inode_operations = {
+const struct inode_operations proc_sys_inode_operations = {
 	.permission	= proc_sys_permission,
 	.setattr	= proc_sys_setattr,
 	.getattr	= proc_sys_getattr,
 };
 
-static const struct inode_operations proc_sys_dir_operations = {
+const struct inode_operations proc_sys_dir_operations = {
 	.lookup		= proc_sys_lookup,
 	.permission	= proc_sys_permission,
 	.setattr	= proc_sys_setattr,
@@ -839,7 +881,7 @@ static struct ctl_dir *find_subdir(struc
 static struct ctl_dir *new_dir(struct ctl_table_set *set,
 			       const char *name, int namelen)
 {
-	struct ctl_table *table;
+	ctl_table_no_const *table;
 	struct ctl_dir *new;
 	struct ctl_node *node;
 	char *new_name;
@@ -851,7 +893,7 @@ static struct ctl_dir *new_dir(struct ct
 		return NULL;
 
 	node = (struct ctl_node *)(new + 1);
-	table = (struct ctl_table *)(node + 1);
+	table = (ctl_table_no_const *)(node + 1);
 	new_name = (char *)(table + 2);
 	memcpy(new_name, name, namelen);
 	new_name[namelen] = '\0';
@@ -1020,7 +1062,8 @@ static int sysctl_check_table(const char
 static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table *table,
 	struct ctl_table_root *link_root)
 {
-	struct ctl_table *link_table, *entry, *link;
+	ctl_table_no_const *link_table, *link;
+	struct ctl_table *entry;
 	struct ctl_table_header *links;
 	struct ctl_node *node;
 	char *link_name;
@@ -1043,7 +1086,7 @@ static struct ctl_table_header *new_link
 		return NULL;
 
 	node = (struct ctl_node *)(links + 1);
-	link_table = (struct ctl_table *)(node + nr_entries);
+	link_table = (ctl_table_no_const *)(node + nr_entries);
 	link_name = (char *)&link_table[nr_entries + 1];
 
 	for (link = link_table, entry = table; entry->procname; link++, entry++) {
@@ -1291,8 +1334,8 @@ static int register_leaf_sysctl_tables(c
 	struct ctl_table_header ***subheader, struct ctl_table_set *set,
 	struct ctl_table *table)
 {
-	struct ctl_table *ctl_table_arg = NULL;
-	struct ctl_table *entry, *files;
+	ctl_table_no_const *ctl_table_arg = NULL, *files = NULL;
+	struct ctl_table *entry;
 	int nr_files = 0;
 	int nr_dirs = 0;
 	int err = -ENOMEM;
@@ -1304,10 +1347,9 @@ static int register_leaf_sysctl_tables(c
 			nr_files++;
 	}
 
-	files = table;
 	/* If there are mixed files and directories we need a new table */
 	if (nr_dirs && nr_files) {
-		struct ctl_table *new;
+		ctl_table_no_const *new;
 		files = kzalloc(sizeof(struct ctl_table) * (nr_files + 1),
 				GFP_KERNEL);
 		if (!files)
@@ -1325,7 +1367,7 @@ static int register_leaf_sysctl_tables(c
 	/* Register everything except a directory full of subdirectories */
 	if (nr_files || !nr_dirs) {
 		struct ctl_table_header *header;
-		header = __register_sysctl_table(set, path, files);
+		header = __register_sysctl_table(set, path, files ? files : table);
 		if (!header) {
 			kfree(ctl_table_arg);
 			goto out;
diff -ruNp linux-3.13.11/fs/proc/root.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/proc/root.c
--- linux-3.13.11/fs/proc/root.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/proc/root.c	2014-07-09 12:00:15.000000000
+0200
@@ -20,9 +20,14 @@
 #include <linux/mount.h>
 #include <linux/pid_namespace.h>
 #include <linux/parser.h>
+#include <linux/vserver/inode.h>
 
 #include "internal.h"
 
+struct proc_dir_entry *proc_virtual;
+
+extern void proc_vx_init(void);
+
 static int proc_test_super(struct super_block *sb, void *data)
 {
 	return sb->s_fs_info == data;
@@ -114,7 +119,8 @@ static struct dentry *proc_mount(struct
 			return ERR_PTR(-EPERM);
 
 		/* Does the mounter have privilege over the pid namespace? */
-		if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN))
+		if (!vx_ns_capable(ns->user_ns,
+			CAP_SYS_ADMIN, VXC_SECURE_MOUNT))
 			return ERR_PTR(-EPERM);
 	}
 
@@ -186,8 +192,17 @@ void __init proc_root_init(void)
 #ifdef CONFIG_PROC_DEVICETREE
 	proc_device_tree_init();
 #endif
+#ifdef CONFIG_GRKERNSEC_PROC_ADD
+#ifdef CONFIG_GRKERNSEC_PROC_USER
+	proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
+	proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
+#endif
+#else
 	proc_mkdir("bus", NULL);
+#endif
 	proc_sys_init();
+	proc_vx_init();
 }
 
 static int proc_root_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat
*stat
@@ -249,6 +264,7 @@ struct proc_dir_entry proc_root = {
 	.proc_iops	= &proc_root_inode_operations, 
 	.proc_fops	= &proc_root_operations,
 	.parent		= &proc_root,
+	.vx_flags	= IATTR_ADMIN | IATTR_WATCH,
 	.name		= "/proc",
 };
 
diff -ruNp linux-3.13.11/fs/proc/self.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/proc/self.c
--- linux-3.13.11/fs/proc/self.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/proc/self.c	2014-07-09 12:00:15.000000000
+0200
@@ -2,6 +2,7 @@
 #include <linux/namei.h>
 #include <linux/slab.h>
 #include <linux/pid_namespace.h>
+#include <linux/vserver/inode.h>
 #include "internal.h"
 
 /*
@@ -54,6 +55,8 @@ int proc_setup_self(struct super_block *
 	self = d_alloc_name(s->s_root, "self");
 	if (self) {
 		struct inode *inode = new_inode_pseudo(s);
+
+		// self->vx_flags = IATTR_PROC_SYMLINK;
 		if (inode) {
 			inode->i_ino = self_inum;
 			inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
diff -ruNp linux-3.13.11/fs/proc/stat.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/proc/stat.c
--- linux-3.13.11/fs/proc/stat.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/proc/stat.c	2014-07-09 12:00:15.000000000
+0200
@@ -9,8 +9,11 @@
 #include <linux/slab.h>
 #include <linux/time.h>
 #include <linux/irqnr.h>
+#include <linux/vserver/cvirt.h>
 #include <asm/cputime.h>
 #include <linux/tick.h>
+#include <linux/cpuset.h>
+#include <linux/grsecurity.h>
 
 #ifndef arch_irq_stat_cpu
 #define arch_irq_stat_cpu(cpu) 0
@@ -87,14 +90,39 @@ static int show_stat(struct seq_file *p,
 	u64 sum_softirq = 0;
 	unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
 	struct timespec boottime;
+	cpumask_var_t cpus_allowed;
+	bool virt_cpu = vx_flags(VXF_VIRT_CPU, 0);
+	int unrestricted = 1;
+
+#ifdef CONFIG_GRKERNSEC_PROC_ADD
+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
+	if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
+		&& !in_group_p(grsec_proc_gid)
+#endif
+	)
+		unrestricted = 0;
+#endif
+#endif
 
 	user = nice = system = idle = iowait =
 		irq = softirq = steal = 0;
 	guest = guest_nice = 0;
 	getboottime(&boottime);
+
+	if (vx_flags(VXF_VIRT_UPTIME, 0))
+		vx_vsi_boottime(&boottime);
+
+	if (virt_cpu)
+		cpuset_cpus_allowed(current, cpus_allowed);
+
 	jif = boottime.tv_sec;
 
+	if (unrestricted) {
 	for_each_possible_cpu(i) {
+		if (virt_cpu && !cpumask_test_cpu(i, cpus_allowed))
+			continue;
+
 		user += kcpustat_cpu(i).cpustat[CPUTIME_USER];
 		nice += kcpustat_cpu(i).cpustat[CPUTIME_NICE];
 		system += kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
@@ -116,6 +144,7 @@ static int show_stat(struct seq_file *p,
 		}
 	}
 	sum += arch_irq_stat();
+	}
 
 	seq_puts(p, "cpu ");
 	seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
@@ -131,6 +160,10 @@ static int show_stat(struct seq_file *p,
 	seq_putc(p, '\n');
 
 	for_each_online_cpu(i) {
+		if (unrestricted) {
+		if (virt_cpu && !cpumask_test_cpu(i, cpus_allowed))
+			continue;
+
 		/* Copy values here to work around gcc-2.95.3, gcc-2.96 */
 		user = kcpustat_cpu(i).cpustat[CPUTIME_USER];
 		nice = kcpustat_cpu(i).cpustat[CPUTIME_NICE];
@@ -142,6 +175,7 @@ static int show_stat(struct seq_file *p,
 		steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
 		guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
 		guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
+		}
 		seq_printf(p, "cpu%d", i);
 		seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
 		seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(nice));
@@ -159,7 +193,7 @@ static int show_stat(struct seq_file *p,
 
 	/* sum again ? it could be updated? */
 	for_each_irq_nr(j)
-		seq_put_decimal_ull(p, ' ', kstat_irqs(j));
+		seq_put_decimal_ull(p, ' ', unrestricted ? kstat_irqs(j) : 0ULL);
 
 	seq_printf(p,
 		"\nctxt %llu\n"
@@ -167,11 +201,11 @@ static int show_stat(struct seq_file *p,
 		"processes %lu\n"
 		"procs_running %lu\n"
 		"procs_blocked %lu\n",
-		nr_context_switches(),
+		unrestricted ? nr_context_switches() : 0ULL,
 		(unsigned long)jif,
-		total_forks,
-		nr_running(),
-		nr_iowait());
+		unrestricted ? total_forks : 0UL,
+		unrestricted ? nr_running() : 0UL,
+		unrestricted ? nr_iowait() : 0UL);
 
 	seq_printf(p, "softirq %llu", (unsigned long long)sum_softirq);
 
diff -ruNp linux-3.13.11/fs/proc/task_mmu.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/proc/task_mmu.c
--- linux-3.13.11/fs/proc/task_mmu.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/proc/task_mmu.c	2014-07-09 12:00:15.000000000
+0200
@@ -12,12 +12,19 @@
 #include <linux/swap.h>
 #include <linux/swapops.h>
 #include <linux/mmu_notifier.h>
+#include <linux/grsecurity.h>
 
 #include <asm/elf.h>
 #include <asm/uaccess.h>
 #include <asm/tlbflush.h>
 #include "internal.h"
 
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
+			     (_mm->pax_flags & MF_PAX_RANDMMAP || \
+			      _mm->pax_flags & MF_PAX_SEGMEXEC))
+#endif
+
 void task_mem(struct seq_file *m, struct mm_struct *mm)
 {
 	unsigned long data, text, lib, swap;
@@ -53,8 +60,13 @@ void task_mem(struct seq_file *m, struct
 		"VmExe:\t%8lu kB\n"
 		"VmLib:\t%8lu kB\n"
 		"VmPTE:\t%8lu kB\n"
-		"VmSwap:\t%8lu kB\n",
-		hiwater_vm << (PAGE_SHIFT-10),
+		"VmSwap:\t%8lu kB\n"
+
+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
+		"CsBase:\t%8lx\nCsLim:\t%8lx\n"
+#endif
+
+		,hiwater_vm << (PAGE_SHIFT-10),
 		total_vm << (PAGE_SHIFT-10),
 		mm->locked_vm << (PAGE_SHIFT-10),
 		mm->pinned_vm << (PAGE_SHIFT-10),
@@ -64,7 +76,19 @@ void task_mem(struct seq_file *m, struct
 		mm->stack_vm << (PAGE_SHIFT-10), text, lib,
 		(PTRS_PER_PTE * sizeof(pte_t) *
 		 atomic_long_read(&mm->nr_ptes)) >> 10,
-		swap << (PAGE_SHIFT-10));
+		swap << (PAGE_SHIFT-10)
+
+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+		, PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
+		, PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
+#else
+		, mm->context.user_cs_base
+		, mm->context.user_cs_limit
+#endif
+#endif
+
+	);
 }
 
 unsigned long task_vsize(struct mm_struct *mm)
@@ -270,13 +294,13 @@ show_map_vma(struct seq_file *m, struct
 		pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
 	}
 
-	/* We don't show the stack guard page in /proc/maps */
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+	start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
+	end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
+#else
 	start = vma->vm_start;
-	if (stack_guard_page_start(vma, start))
-		start += PAGE_SIZE;
 	end = vma->vm_end;
-	if (stack_guard_page_end(vma, end))
-		end -= PAGE_SIZE;
+#endif
 
 	seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
 	seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
@@ -286,7 +310,11 @@ show_map_vma(struct seq_file *m, struct
 			flags & VM_WRITE ? 'w' : '-',
 			flags & VM_EXEC ? 'x' : '-',
 			flags & VM_MAYSHARE ? 's' : 'p',
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+			PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
+#else
 			pgoff,
+#endif
 			MAJOR(dev), MINOR(dev), ino);
 
 	/*
@@ -295,7 +323,7 @@ show_map_vma(struct seq_file *m, struct
 	 */
 	if (file) {
 		seq_pad(m, ' ');
-		seq_path(m, &file->f_path, "\n");
+		seq_path(m, &file->f_path, "\n\\");
 		goto done;
 	}
 
@@ -321,8 +349,9 @@ show_map_vma(struct seq_file *m, struct
 			 * Thread stack in /proc/PID/task/TID/maps or
 			 * the main process stack.
 			 */
-			if (!is_pid || (vma->vm_start <= mm->start_stack &&
-			    vma->vm_end >= mm->start_stack)) {
+			if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
+			    (vma->vm_start <= mm->start_stack &&
+			     vma->vm_end >= mm->start_stack)) {
 				name = "[stack]";
 			} else {
 				/* Thread stack in /proc/PID/maps */
@@ -346,6 +375,13 @@ static int show_map(struct seq_file *m,
 	struct proc_maps_private *priv = m->private;
 	struct task_struct *task = priv->task;
 
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+	if (current->exec_id != m->exec_id) {
+		gr_log_badprocpid("maps");
+		return 0;
+	}
+#endif
+
 	show_map_vma(m, vma, is_pid);
 
 	if (m->count < m->size)  /* vma is copied successfully */
@@ -586,12 +622,23 @@ static int show_smap(struct seq_file *m,
 		.private = &mss,
 	};
 
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+	if (current->exec_id != m->exec_id) {
+		gr_log_badprocpid("smaps");
+		return 0;
+	}
+#endif
 	memset(&mss, 0, sizeof mss);
-	mss.vma = vma;
-	/* mmap_sem is held in m_start */
-	if (vma->vm_mm && !is_vm_hugetlb_page(vma))
-		walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
-
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+	if (!PAX_RAND_FLAGS(vma->vm_mm)) {
+#endif
+		mss.vma = vma;
+		/* mmap_sem is held in m_start */
+		if (vma->vm_mm && !is_vm_hugetlb_page(vma))
+			walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+	}
+#endif
 	show_map_vma(m, vma, is_pid);
 
 	seq_printf(m,
@@ -609,7 +656,11 @@ static int show_smap(struct seq_file *m,
 		   "KernelPageSize: %8lu kB\n"
 		   "MMUPageSize:    %8lu kB\n"
 		   "Locked:         %8lu kB\n",
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+		   PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
+#else
 		   (vma->vm_end - vma->vm_start) >> 10,
+#endif
 		   mss.resident >> 10,
 		   (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
 		   mss.shared_clean  >> 10,
@@ -1387,6 +1438,13 @@ static int show_numa_map(struct seq_file
 	char buffer[64];
 	int nid;
 
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+	if (current->exec_id != m->exec_id) {
+		gr_log_badprocpid("numa_maps");
+		return 0;
+	}
+#endif
+
 	if (!mm)
 		return 0;
 
@@ -1404,11 +1462,15 @@ static int show_numa_map(struct seq_file
 	mpol_to_str(buffer, sizeof(buffer), pol);
 	mpol_cond_put(pol);
 
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+	seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
+#else
 	seq_printf(m, "%08lx %s", vma->vm_start, buffer);
+#endif
 
 	if (file) {
 		seq_printf(m, " file=");
-		seq_path(m, &file->f_path, "\n\t= ");
+		seq_path(m, &file->f_path, "\n\t\\= ");
 	} else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
 		seq_printf(m, " heap");
 	} else {
diff -ruNp linux-3.13.11/fs/proc/task_nommu.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/proc/task_nommu.c
--- linux-3.13.11/fs/proc/task_nommu.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/proc/task_nommu.c	2014-07-09 12:00:15.000000000
+0200
@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct
 	else
 		bytes += kobjsize(mm);
 	
-	if (current->fs && current->fs->users > 1)
+	if (current->fs && atomic_read(&current->fs->users) > 1)
 		sbytes += kobjsize(current->fs);
 	else
 		bytes += kobjsize(current->fs);
@@ -161,7 +161,7 @@ static int nommu_vma_show(struct seq_fil
 
 	if (file) {
 		seq_pad(m, ' ');
-		seq_path(m, &file->f_path, "");
+		seq_path(m, &file->f_path, "\n\\");
 	} else if (mm) {
 		pid_t tid = vm_is_stack(priv->task, vma, is_pid);
 
diff -ruNp linux-3.13.11/fs/proc/uptime.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/proc/uptime.c
--- linux-3.13.11/fs/proc/uptime.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/proc/uptime.c	2014-07-09 12:00:15.000000000
+0200
@@ -5,6 +5,7 @@
 #include <linux/seq_file.h>
 #include <linux/time.h>
 #include <linux/kernel_stat.h>
+#include <linux/vserver/cvirt.h>
 #include <asm/cputime.h>
 
 static int uptime_proc_show(struct seq_file *m, void *v)
@@ -24,6 +25,10 @@ static int uptime_proc_show(struct seq_f
 	nsec = cputime64_to_jiffies64(idletime) * TICK_NSEC;
 	idle.tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem);
 	idle.tv_nsec = rem;
+
+	if (vx_flags(VXF_VIRT_UPTIME, 0))
+		vx_vsi_uptime(&uptime, &idle);
+
 	seq_printf(m, "%lu.%02lu %lu.%02lu\n",
 			(unsigned long) uptime.tv_sec,
 			(uptime.tv_nsec / (NSEC_PER_SEC / 100)),
diff -ruNp linux-3.13.11/fs/proc/vmcore.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/proc/vmcore.c
--- linux-3.13.11/fs/proc/vmcore.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/proc/vmcore.c	2014-07-09 12:00:15.000000000
+0200
@@ -105,9 +105,13 @@ static ssize_t read_from_oldmem(char *bu
 			nr_bytes = count;
 
 		/* If pfn is not ram, return zeros for sparse dump files */
-		if (pfn_is_ram(pfn) == 0)
-			memset(buf, 0, nr_bytes);
-		else {
+		if (pfn_is_ram(pfn) == 0) {
+			if (userbuf) {
+				if (clear_user((char __force_user *)buf, nr_bytes))
+					return -EFAULT;
+			} else
+				memset(buf, 0, nr_bytes);
+		} else {
 			tmp = copy_oldmem_page(pfn, buf, nr_bytes,
 						offset, userbuf);
 			if (tmp < 0)
@@ -170,7 +174,7 @@ int __weak remap_oldmem_pfn_range(struct
 static int copy_to(void *target, void *src, size_t size, int userbuf)
 {
 	if (userbuf) {
-		if (copy_to_user((char __user *) target, src, size))
+		if (copy_to_user((char __force_user *) target, src, size))
 			return -EFAULT;
 	} else {
 		memcpy(target, src, size);
@@ -233,7 +237,7 @@ static ssize_t __read_vmcore(char *buffe
 		if (*fpos < m->offset + m->size) {
 			tsz = min_t(size_t, m->offset + m->size - *fpos, buflen);
 			start = m->paddr + *fpos - m->offset;
-			tmp = read_from_oldmem(buffer, tsz, &start, userbuf);
+			tmp = read_from_oldmem((char __force_kernel *)buffer, tsz, &start, userbuf);
 			if (tmp < 0)
 				return tmp;
 			buflen -= tsz;
@@ -253,7 +257,7 @@ static ssize_t __read_vmcore(char *buffe
 static ssize_t read_vmcore(struct file *file, char __user *buffer,
 			   size_t buflen, loff_t *fpos)
 {
-	return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
+	return __read_vmcore((__force_kernel char *) buffer, buflen, fpos, 1);
 }
 
 /*
diff -ruNp linux-3.13.11/fs/proc_namespace.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/proc_namespace.c
--- linux-3.13.11/fs/proc_namespace.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/proc_namespace.c	2014-07-09 12:00:15.000000000
+0200
@@ -44,6 +44,8 @@ static int show_sb_opts(struct seq_file
 		{ MS_SYNCHRONOUS, ",sync" },
 		{ MS_DIRSYNC, ",dirsync" },
 		{ MS_MANDLOCK, ",mand" },
+		{ MS_TAGGED, ",tag" },
+		{ MS_NOTAGCHECK, ",notagcheck" },
 		{ 0, NULL }
 	};
 	const struct proc_fs_info *fs_infop;
@@ -80,6 +82,38 @@ static inline void mangle(struct seq_fil
 	seq_escape(m, s, " \t\n\\");
 }
 
+#ifdef	CONFIG_VSERVER_EXTRA_MNT_CHECK
+
+static int mnt_is_reachable(struct vfsmount *vfsmnt)
+{
+	struct path root;
+	struct dentry *point;
+	struct mount *mnt = real_mount(vfsmnt);
+	struct mount *root_mnt;
+	int ret;
+
+	if (mnt == mnt->mnt_ns->root)
+		return 1;
+
+	rcu_read_lock();
+	root = current->fs->root;
+	root_mnt = real_mount(root.mnt);
+	point = root.dentry;
+
+	while ((mnt != mnt->mnt_parent) && (mnt != root_mnt)) {
+		point = mnt->mnt_mountpoint;
+		mnt = mnt->mnt_parent;
+	}
+	rcu_read_unlock();
+
+	ret = (mnt == root_mnt) && is_subdir(point, root.dentry);
+	return ret;
+}
+
+#else
+#define	mnt_is_reachable(v)	(1)
+#endif
+
 static void show_type(struct seq_file *m, struct super_block *sb)
 {
 	mangle(m, sb->s_type->name);
@@ -96,6 +130,17 @@ static int show_vfsmnt(struct seq_file *
 	struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
 	struct super_block *sb = mnt_path.dentry->d_sb;
 
+	if (vx_flags(VXF_HIDE_MOUNT, 0))
+		return SEQ_SKIP;
+	if (!mnt_is_reachable(mnt) && !vx_check(0, VS_WATCH_P))
+		return SEQ_SKIP;
+
+	if (!vx_check(0, VS_ADMIN|VS_WATCH) &&
+		mnt == current->fs->root.mnt) {
+		seq_puts(m, "/dev/root / ");
+		goto type;
+	}
+
 	if (sb->s_op->show_devname) {
 		err = sb->s_op->show_devname(m, mnt_path.dentry);
 		if (err)
@@ -106,6 +151,7 @@ static int show_vfsmnt(struct seq_file *
 	seq_putc(m, ' ');
 	seq_path(m, &mnt_path, " \t\n\\");
 	seq_putc(m, ' ');
+type:
 	show_type(m, sb);
 	seq_puts(m, __mnt_is_readonly(mnt) ? " ro" : " rw");
 	err = show_sb_opts(m, sb);
@@ -128,6 +174,11 @@ static int show_mountinfo(struct seq_fil
 	struct path root = p->root;
 	int err = 0;
 
+	if (vx_flags(VXF_HIDE_MOUNT, 0))
+		return SEQ_SKIP;
+	if (!mnt_is_reachable(mnt) && !vx_check(0, VS_WATCH_P))
+		return SEQ_SKIP;
+
 	seq_printf(m, "%i %i %u:%u ", r->mnt_id, r->mnt_parent->mnt_id,
 		   MAJOR(sb->s_dev), MINOR(sb->s_dev));
 	if (sb->s_op->show_path)
@@ -187,6 +238,17 @@ static int show_vfsstat(struct seq_file
 	struct super_block *sb = mnt_path.dentry->d_sb;
 	int err = 0;
 
+	if (vx_flags(VXF_HIDE_MOUNT, 0))
+		return SEQ_SKIP;
+	if (!mnt_is_reachable(mnt) && !vx_check(0, VS_WATCH_P))
+		return SEQ_SKIP;
+
+	if (!vx_check(0, VS_ADMIN|VS_WATCH) &&
+		mnt == current->fs->root.mnt) {
+		seq_puts(m, "device /dev/root mounted on / ");
+		goto type;
+	}
+
 	/* device */
 	if (sb->s_op->show_devname) {
 		seq_puts(m, "device ");
@@ -203,7 +265,7 @@ static int show_vfsstat(struct seq_file
 	seq_puts(m, " mounted on ");
 	seq_path(m, &mnt_path, " \t\n\\");
 	seq_putc(m, ' ');
-
+type:
 	/* file system type */
 	seq_puts(m, "with fstype ");
 	show_type(m, sb);
diff -ruNp linux-3.13.11/fs/qnx6/qnx6.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/qnx6/qnx6.h
--- linux-3.13.11/fs/qnx6/qnx6.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/qnx6/qnx6.h	2014-07-09 12:00:15.000000000
+0200
@@ -74,7 +74,7 @@ enum {
 	BYTESEX_BE,
 };
 
-static inline __u64 fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
+static inline __u64 __intentional_overflow(-1) fs64_to_cpu(struct qnx6_sb_info *sbi,
__fs64 n)
 {
 	if (sbi->s_bytesex == BYTESEX_LE)
 		return le64_to_cpu((__force __le64)n);
@@ -90,7 +90,7 @@ static inline __fs64 cpu_to_fs64(struct
 		return (__force __fs64)cpu_to_be64(n);
 }
 
-static inline __u32 fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct qnx6_sb_info *sbi,
__fs32 n)
 {
 	if (sbi->s_bytesex == BYTESEX_LE)
 		return le32_to_cpu((__force __le32)n);
diff -ruNp linux-3.13.11/fs/quota/dquot.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/quota/dquot.c
--- linux-3.13.11/fs/quota/dquot.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/quota/dquot.c	2014-07-09 12:00:15.000000000
+0200
@@ -1602,6 +1602,9 @@ int __dquot_alloc_space(struct inode *in
 	struct dquot **dquots = inode->i_dquot;
 	int reserve = flags & DQUOT_SPACE_RESERVE;
 
+	if ((ret = dl_alloc_space(inode, number)))
+		return ret;
+
 	/*
 	 * First test before acquiring mutex - solves deadlocks when we
 	 * re-enter the quota code and are already holding the mutex
@@ -1657,6 +1660,9 @@ int dquot_alloc_inode(const struct inode
 	struct dquot_warn warn[MAXQUOTAS];
 	struct dquot * const *dquots = inode->i_dquot;
 
+	if ((ret = dl_alloc_inode(inode)))
+		return ret;
+
 	/* First test before acquiring mutex - solves deadlocks when we
          * re-enter the quota code and are already holding the mutex */
 	if (!dquot_active(inode))
@@ -1757,6 +1763,8 @@ void __dquot_free_space(struct inode *in
 	struct dquot **dquots = inode->i_dquot;
 	int reserve = flags & DQUOT_SPACE_RESERVE;
 
+	dl_free_space(inode, number);
+
 	/* First test before acquiring mutex - solves deadlocks when we
          * re-enter the quota code and are already holding the mutex */
 	if (!dquot_active(inode)) {
@@ -1801,6 +1809,8 @@ void dquot_free_inode(const struct inode
 	struct dquot_warn warn[MAXQUOTAS];
 	struct dquot * const *dquots = inode->i_dquot;
 
+	dl_free_inode(inode);
+
 	/* First test before acquiring mutex - solves deadlocks when we
          * re-enter the quota code and are already holding the mutex */
 	if (!dquot_active(inode))
diff -ruNp linux-3.13.11/fs/quota/netlink.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/quota/netlink.c
--- linux-3.13.11/fs/quota/netlink.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/quota/netlink.c	2014-07-09 12:00:15.000000000
+0200
@@ -45,7 +45,7 @@ static struct genl_family quota_genl_fam
 void quota_send_warning(struct kqid qid, dev_t dev,
 			const char warntype)
 {
-	static atomic_t seq;
+	static atomic_unchecked_t seq;
 	struct sk_buff *skb;
 	void *msg_head;
 	int ret;
@@ -61,7 +61,7 @@ void quota_send_warning(struct kqid qid,
 		  "VFS: Not enough memory to send quota warning.\n");
 		return;
 	}
-	msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
+	msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
 			&quota_genl_family, 0, QUOTA_NL_C_WARNING);
 	if (!msg_head) {
 		printk(KERN_ERR
diff -ruNp linux-3.13.11/fs/quota/quota.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/quota/quota.c
--- linux-3.13.11/fs/quota/quota.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/quota/quota.c	2014-07-09 12:00:15.000000000
+0200
@@ -8,6 +8,7 @@
 #include <linux/fs.h>
 #include <linux/namei.h>
 #include <linux/slab.h>
+#include <linux/vs_context.h>
 #include <asm/current.h>
 #include <linux/uaccess.h>
 #include <linux/kernel.h>
@@ -38,7 +39,7 @@ static int check_quotactl_permission(str
 			break;
 		/*FALLTHROUGH*/
 	default:
-		if (!capable(CAP_SYS_ADMIN))
+		if (!vx_capable(CAP_SYS_ADMIN, VXC_QUOTA_CTL))
 			return -EPERM;
 	}
 
@@ -338,6 +339,46 @@ static int do_quotactl(struct super_bloc
 
 #ifdef CONFIG_BLOCK
 
+#if defined(CONFIG_BLK_DEV_VROOT) || defined(CONFIG_BLK_DEV_VROOT_MODULE)
+
+#include <linux/vroot.h>
+#include <linux/major.h>
+#include <linux/module.h>
+#include <linux/kallsyms.h>
+#include <linux/vserver/debug.h>
+
+static vroot_grb_func *vroot_get_real_bdev = NULL;
+
+static DEFINE_SPINLOCK(vroot_grb_lock);
+
+int register_vroot_grb(vroot_grb_func *func) {
+	int ret = -EBUSY;
+
+	spin_lock(&vroot_grb_lock);
+	if (!vroot_get_real_bdev) {
+		vroot_get_real_bdev = func;
+		ret = 0;
+	}
+	spin_unlock(&vroot_grb_lock);
+	return ret;
+}
+EXPORT_SYMBOL(register_vroot_grb);
+
+int unregister_vroot_grb(vroot_grb_func *func) {
+	int ret = -EINVAL;
+
+	spin_lock(&vroot_grb_lock);
+	if (vroot_get_real_bdev) {
+		vroot_get_real_bdev = NULL;
+		ret = 0;
+	}
+	spin_unlock(&vroot_grb_lock);
+	return ret;
+}
+EXPORT_SYMBOL(unregister_vroot_grb);
+
+#endif
+
 /* Return 1 if 'cmd' will block on frozen filesystem */
 static int quotactl_cmd_write(int cmd)
 {
@@ -373,6 +414,22 @@ static struct super_block *quotactl_bloc
 	putname(tmp);
 	if (IS_ERR(bdev))
 		return ERR_CAST(bdev);
+#if defined(CONFIG_BLK_DEV_VROOT) || defined(CONFIG_BLK_DEV_VROOT_MODULE)
+	if (bdev && bdev->bd_inode &&
+		imajor(bdev->bd_inode) == VROOT_MAJOR) {
+		struct block_device *bdnew = (void *)-EINVAL;
+
+		if (vroot_get_real_bdev)
+			bdnew = vroot_get_real_bdev(bdev);
+		else
+			vxdprintk(VXD_CBIT(misc, 0),
+					"vroot_get_real_bdev not set");
+		bdput(bdev);
+		if (IS_ERR(bdnew))
+			return ERR_PTR(PTR_ERR(bdnew));
+		bdev = bdnew;
+	}
+#endif
 	if (quotactl_cmd_write(cmd))
 		sb = get_super_thawed(bdev);
 	else
diff -ruNp linux-3.13.11/fs/read_write.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/read_write.c
--- linux-3.13.11/fs/read_write.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/read_write.c	2014-07-09 12:00:15.000000000
+0200
@@ -438,7 +438,7 @@ ssize_t __kernel_write(struct file *file
 
 	old_fs = get_fs();
 	set_fs(get_ds());
-	p = (__force const char __user *)buf;
+	p = (const char __force_user *)buf;
 	if (count > MAX_RW_COUNT)
 		count =  MAX_RW_COUNT;
 	if (file->f_op->write)
diff -ruNp linux-3.13.11/fs/readdir.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/readdir.c
--- linux-3.13.11/fs/readdir.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/readdir.c	2014-07-09 12:00:15.000000000
+0200
@@ -17,6 +17,7 @@
 #include <linux/security.h>
 #include <linux/syscalls.h>
 #include <linux/unistd.h>
+#include <linux/namei.h>
 
 #include <asm/uaccess.h>
 
@@ -69,6 +70,7 @@ struct old_linux_dirent {
 struct readdir_callback {
 	struct dir_context ctx;
 	struct old_linux_dirent __user * dirent;
+	struct file * file;
 	int result;
 };
 
@@ -86,6 +88,10 @@ static int fillonedir(void * __buf, cons
 		buf->result = -EOVERFLOW;
 		return -EOVERFLOW;
 	}
+
+	if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
+		return 0;
+
 	buf->result++;
 	dirent = buf->dirent;
 	if (!access_ok(VERIFY_WRITE, dirent,
@@ -117,6 +123,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned in
 	if (!f.file)
 		return -EBADF;
 
+	buf.file = f.file;
 	error = iterate_dir(f.file, &buf.ctx);
 	if (buf.result)
 		error = buf.result;
@@ -142,6 +149,7 @@ struct getdents_callback {
 	struct dir_context ctx;
 	struct linux_dirent __user * current_dir;
 	struct linux_dirent __user * previous;
+	struct file * file;
 	int count;
 	int error;
 };
@@ -163,6 +171,10 @@ static int filldir(void * __buf, const c
 		buf->error = -EOVERFLOW;
 		return -EOVERFLOW;
 	}
+
+	if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
+		return 0;
+
 	dirent = buf->previous;
 	if (dirent) {
 		if (__put_user(offset, &dirent->d_off))
@@ -208,6 +220,7 @@ SYSCALL_DEFINE3(getdents, unsigned int,
 	if (!f.file)
 		return -EBADF;
 
+	buf.file = f.file;
 	error = iterate_dir(f.file, &buf.ctx);
 	if (error >= 0)
 		error = buf.error;
@@ -226,6 +239,7 @@ struct getdents_callback64 {
 	struct dir_context ctx;
 	struct linux_dirent64 __user * current_dir;
 	struct linux_dirent64 __user * previous;
+	struct file *file;
 	int count;
 	int error;
 };
@@ -241,6 +255,10 @@ static int filldir64(void * __buf, const
 	buf->error = -EINVAL;	/* only used if we fail.. */
 	if (reclen > buf->count)
 		return -EINVAL;
+
+	if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
+		return 0;
+
 	dirent = buf->previous;
 	if (dirent) {
 		if (__put_user(offset, &dirent->d_off))
@@ -288,6 +306,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
 	if (!f.file)
 		return -EBADF;
 
+	buf.file = f.file;
 	error = iterate_dir(f.file, &buf.ctx);
 	if (error >= 0)
 		error = buf.error;
diff -ruNp linux-3.13.11/fs/reiserfs/do_balan.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/reiserfs/do_balan.c
--- linux-3.13.11/fs/reiserfs/do_balan.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/reiserfs/do_balan.c	2014-07-09
12:00:15.000000000 +0200
@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb,
 		return;
 	}
 
-	atomic_inc(&(fs_generation(tb->tb_sb)));
+	atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
 	do_balance_starts(tb);
 
 	/* balance leaf returns 0 except if combining L R and S into
diff -ruNp linux-3.13.11/fs/reiserfs/item_ops.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/reiserfs/item_ops.c
--- linux-3.13.11/fs/reiserfs/item_ops.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/reiserfs/item_ops.c	2014-07-09
12:00:15.000000000 +0200
@@ -725,18 +725,18 @@ static void errcatch_print_vi(struct vir
 }
 
 static struct item_operations errcatch_ops = {
-	errcatch_bytes_number,
-	errcatch_decrement_key,
-	errcatch_is_left_mergeable,
-	errcatch_print_item,
-	errcatch_check_item,
+	.bytes_number = errcatch_bytes_number,
+	.decrement_key = errcatch_decrement_key,
+	.is_left_mergeable = errcatch_is_left_mergeable,
+	.print_item = errcatch_print_item,
+	.check_item = errcatch_check_item,
 
-	errcatch_create_vi,
-	errcatch_check_left,
-	errcatch_check_right,
-	errcatch_part_size,
-	errcatch_unit_num,
-	errcatch_print_vi
+	.create_vi = errcatch_create_vi,
+	.check_left = errcatch_check_left,
+	.check_right = errcatch_check_right,
+	.part_size = errcatch_part_size,
+	.unit_num = errcatch_unit_num,
+	.print_vi = errcatch_print_vi
 };
 
 //////////////////////////////////////////////////////////////////////////////
diff -ruNp linux-3.13.11/fs/reiserfs/procfs.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/reiserfs/procfs.c
--- linux-3.13.11/fs/reiserfs/procfs.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/reiserfs/procfs.c	2014-07-09 12:00:15.000000000
+0200
@@ -114,7 +114,7 @@ static int show_super(struct seq_file *m
 		   "SMALL_TAILS " : "NO_TAILS ",
 		   replay_only(sb) ? "REPLAY_ONLY " : "",
 		   convert_reiserfs(sb) ? "CONV " : "",
-		   atomic_read(&r->s_generation_counter),
+		   atomic_read_unchecked(&r->s_generation_counter),
 		   SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
 		   SF(s_do_balance), SF(s_unneeded_left_neighbor),
 		   SF(s_good_search_by_key_reada), SF(s_bmaps),
diff -ruNp linux-3.13.11/fs/reiserfs/reiserfs.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/reiserfs/reiserfs.h
--- linux-3.13.11/fs/reiserfs/reiserfs.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/reiserfs/reiserfs.h	2014-07-09
12:00:15.000000000 +0200
@@ -453,7 +453,7 @@ struct reiserfs_sb_info {
 	/* Comment? -Hans */
 	wait_queue_head_t s_wait;
 	/* To be obsoleted soon by per buffer seals.. -Hans */
-	atomic_t s_generation_counter;	// increased by one every time the
+	atomic_unchecked_t s_generation_counter;	// increased by one every time the
 	// tree gets re-balanced
 	unsigned long s_properties;	/* File system properties. Currently holds
 					   on-disk FS format */
@@ -1982,7 +1982,7 @@ static inline loff_t max_reiserfs_offset
 #define REISERFS_USER_MEM		1	/* reiserfs user memory mode            */
 
 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
-#define get_generation(s) atomic_read (&fs_generation(s))
+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
 #define FILESYSTEM_CHANGED_TB(tb)  (get_generation((tb)->tb_sb) != (tb)->fs_gen)
 #define __fs_changed(gen,s) (gen != get_generation (s))
 #define fs_changed(gen,s)		\
diff -ruNp linux-3.13.11/fs/select.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/select.c
--- linux-3.13.11/fs/select.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/select.c	2014-07-09 12:00:15.000000000
+0200
@@ -20,6 +20,7 @@
 #include <linux/export.h>
 #include <linux/slab.h>
 #include <linux/poll.h>
+#include <linux/security.h>
 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
 #include <linux/file.h>
 #include <linux/fdtable.h>
@@ -880,6 +881,7 @@ int do_sys_poll(struct pollfd __user *uf
  	struct poll_list *walk = head;
  	unsigned long todo = nfds;
 
+	gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
 	if (nfds > rlimit(RLIMIT_NOFILE))
 		return -EINVAL;
 
diff -ruNp linux-3.13.11/fs/seq_file.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/seq_file.c
--- linux-3.13.11/fs/seq_file.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/seq_file.c	2014-07-09 12:00:15.000000000
+0200
@@ -10,6 +10,7 @@
 #include <linux/seq_file.h>
 #include <linux/slab.h>
 #include <linux/cred.h>
+#include <linux/sched.h>
 
 #include <asm/uaccess.h>
 #include <asm/page.h>
@@ -60,6 +61,9 @@ int seq_open(struct file *file, const st
 #ifdef CONFIG_USER_NS
 	p->user_ns = file->f_cred->user_ns;
 #endif
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+	p->exec_id = current->exec_id;
+#endif
 
 	/*
 	 * Wrappers around seq_open(e.g. swaps_open) need to be
@@ -96,7 +100,7 @@ static int traverse(struct seq_file *m,
 		return 0;
 	}
 	if (!m->buf) {
-		m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
+		m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
 		if (!m->buf)
 			return -ENOMEM;
 	}
@@ -137,7 +141,7 @@ Eoverflow:
 	m->op->stop(m, p);
 	kfree(m->buf);
 	m->count = 0;
-	m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
+	m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
 	return !m->buf ? -ENOMEM : -EAGAIN;
 }
 
@@ -153,7 +157,7 @@ Eoverflow:
 ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
 {
 	struct seq_file *m = file->private_data;
-	size_t copied = 0;
+	ssize_t copied = 0;
 	loff_t pos;
 	size_t n;
 	void *p;
@@ -192,7 +196,7 @@ ssize_t seq_read(struct file *file, char
 
 	/* grab buffer if we didn't have one */
 	if (!m->buf) {
-		m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
+		m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
 		if (!m->buf)
 			goto Enomem;
 	}
@@ -234,7 +238,7 @@ ssize_t seq_read(struct file *file, char
 		m->op->stop(m, p);
 		kfree(m->buf);
 		m->count = 0;
-		m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
+		m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
 		if (!m->buf)
 			goto Enomem;
 		m->version = 0;
@@ -584,7 +588,7 @@ static void single_stop(struct seq_file
 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
 		void *data)
 {
-	struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
+	seq_operations_no_const *op = kzalloc(sizeof(*op), GFP_KERNEL);
 	int res = -ENOMEM;
 
 	if (op) {
diff -ruNp linux-3.13.11/fs/splice.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/splice.c
--- linux-3.13.11/fs/splice.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/splice.c	2014-07-09 12:00:15.000000000
+0200
@@ -196,7 +196,7 @@ ssize_t splice_to_pipe(struct pipe_inode
 	pipe_lock(pipe);
 
 	for (;;) {
-		if (!pipe->readers) {
+		if (!atomic_read(&pipe->readers)) {
 			send_sig(SIGPIPE, current, 0);
 			if (!ret)
 				ret = -EPIPE;
@@ -219,7 +219,7 @@ ssize_t splice_to_pipe(struct pipe_inode
 			page_nr++;
 			ret += buf->len;
 
-			if (pipe->files)
+			if (atomic_read(&pipe->files))
 				do_wakeup = 1;
 
 			if (!--spd->nr_pages)
@@ -250,9 +250,9 @@ ssize_t splice_to_pipe(struct pipe_inode
 			do_wakeup = 0;
 		}
 
-		pipe->waiting_writers++;
+		atomic_inc(&pipe->waiting_writers);
 		pipe_wait(pipe);
-		pipe->waiting_writers--;
+		atomic_dec(&pipe->waiting_writers);
 	}
 
 	pipe_unlock(pipe);
@@ -583,7 +583,7 @@ static ssize_t kernel_readv(struct file
 	old_fs = get_fs();
 	set_fs(get_ds());
 	/* The cast to a user pointer is valid due to the set_fs() */
-	res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
+	res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
 	set_fs(old_fs);
 
 	return res;
@@ -598,7 +598,7 @@ ssize_t kernel_write(struct file *file,
 	old_fs = get_fs();
 	set_fs(get_ds());
 	/* The cast to a user pointer is valid due to the set_fs() */
-	res = vfs_write(file, (__force const char __user *)buf, count, &pos);
+	res = vfs_write(file, (const char __force_user *)buf, count, &pos);
 	set_fs(old_fs);
 
 	return res;
@@ -651,7 +651,7 @@ ssize_t default_file_splice_read(struct
 			goto err;
 
 		this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
-		vec[i].iov_base = (void __user *) page_address(page);
+		vec[i].iov_base = (void __force_user *) page_address(page);
 		vec[i].iov_len = this_len;
 		spd.pages[i] = page;
 		spd.nr_pages++;
@@ -847,7 +847,7 @@ int splice_from_pipe_feed(struct pipe_in
 			ops->release(pipe, buf);
 			pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
 			pipe->nrbufs--;
-			if (pipe->files)
+			if (atomic_read(&pipe->files))
 				sd->need_wakeup = true;
 		}
 
@@ -872,10 +872,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
 {
 	while (!pipe->nrbufs) {
-		if (!pipe->writers)
+		if (!atomic_read(&pipe->writers))
 			return 0;
 
-		if (!pipe->waiting_writers && sd->num_spliced)
+		if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
 			return 0;
 
 		if (sd->flags & SPLICE_F_NONBLOCK)
@@ -1197,7 +1197,7 @@ ssize_t splice_direct_to_actor(struct fi
 		 * out of the pipe right after the splice_to_pipe(). So set
 		 * PIPE_READERS appropriately.
 		 */
-		pipe->readers = 1;
+		atomic_set(&pipe->readers, 1);
 
 		current->splice_pipe = pipe;
 	}
@@ -1493,6 +1493,7 @@ static int get_iovec_page_array(const st
 
 			partial[buffers].offset = off;
 			partial[buffers].len = plen;
+			partial[buffers].private = 0;
 
 			off = 0;
 			len -= plen;
@@ -1795,9 +1796,9 @@ static int ipipe_prep(struct pipe_inode_
 			ret = -ERESTARTSYS;
 			break;
 		}
-		if (!pipe->writers)
+		if (!atomic_read(&pipe->writers))
 			break;
-		if (!pipe->waiting_writers) {
+		if (!atomic_read(&pipe->waiting_writers)) {
 			if (flags & SPLICE_F_NONBLOCK) {
 				ret = -EAGAIN;
 				break;
@@ -1829,7 +1830,7 @@ static int opipe_prep(struct pipe_inode_
 	pipe_lock(pipe);
 
 	while (pipe->nrbufs >= pipe->buffers) {
-		if (!pipe->readers) {
+		if (!atomic_read(&pipe->readers)) {
 			send_sig(SIGPIPE, current, 0);
 			ret = -EPIPE;
 			break;
@@ -1842,9 +1843,9 @@ static int opipe_prep(struct pipe_inode_
 			ret = -ERESTARTSYS;
 			break;
 		}
-		pipe->waiting_writers++;
+		atomic_inc(&pipe->waiting_writers);
 		pipe_wait(pipe);
-		pipe->waiting_writers--;
+		atomic_dec(&pipe->waiting_writers);
 	}
 
 	pipe_unlock(pipe);
@@ -1880,14 +1881,14 @@ retry:
 	pipe_double_lock(ipipe, opipe);
 
 	do {
-		if (!opipe->readers) {
+		if (!atomic_read(&opipe->readers)) {
 			send_sig(SIGPIPE, current, 0);
 			if (!ret)
 				ret = -EPIPE;
 			break;
 		}
 
-		if (!ipipe->nrbufs && !ipipe->writers)
+		if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
 			break;
 
 		/*
@@ -1984,7 +1985,7 @@ static int link_pipe(struct pipe_inode_i
 	pipe_double_lock(ipipe, opipe);
 
 	do {
-		if (!opipe->readers) {
+		if (!atomic_read(&opipe->readers)) {
 			send_sig(SIGPIPE, current, 0);
 			if (!ret)
 				ret = -EPIPE;
@@ -2029,7 +2030,7 @@ static int link_pipe(struct pipe_inode_i
 	 * return EAGAIN if we have the potential of some data in the
 	 * future, otherwise just return 0
 	 */
-	if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
+	if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
 		ret = -EAGAIN;
 
 	pipe_unlock(ipipe);
diff -ruNp linux-3.13.11/fs/stat.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/stat.c
--- linux-3.13.11/fs/stat.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/stat.c	2014-07-09 12:00:15.000000000
+0200
@@ -26,10 +26,16 @@ void generic_fillattr(struct inode *inod
 	stat->nlink = inode->i_nlink;
 	stat->uid = inode->i_uid;
 	stat->gid = inode->i_gid;
+	stat->tag = inode->i_tag;
 	stat->rdev = inode->i_rdev;
 	stat->size = i_size_read(inode);
-	stat->atime = inode->i_atime;
-	stat->mtime = inode->i_mtime;
+	if (is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
+		stat->atime = inode->i_ctime;
+		stat->mtime = inode->i_ctime;
+	} else {
+		stat->atime = inode->i_atime;
+		stat->mtime = inode->i_mtime;
+	}
 	stat->ctime = inode->i_ctime;
 	stat->blksize = (1 << inode->i_blkbits);
 	stat->blocks = inode->i_blocks;
@@ -52,9 +58,16 @@ EXPORT_SYMBOL(generic_fillattr);
 int vfs_getattr_nosec(struct path *path, struct kstat *stat)
 {
 	struct inode *inode = path->dentry->d_inode;
+	int retval;
 
-	if (inode->i_op->getattr)
-		return inode->i_op->getattr(path->mnt, path->dentry, stat);
+	if (inode->i_op->getattr) {
+		retval = inode->i_op->getattr(path->mnt, path->dentry, stat);
+		if (!retval && is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
+			stat->atime = stat->ctime;
+			stat->mtime = stat->ctime;
+		}
+		return retval;
+	}
 
 	generic_fillattr(inode, stat);
 	return 0;
diff -ruNp linux-3.13.11/fs/statfs.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/statfs.c
--- linux-3.13.11/fs/statfs.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/statfs.c	2014-07-09 12:00:15.000000000
+0200
@@ -7,6 +7,8 @@
 #include <linux/statfs.h>
 #include <linux/security.h>
 #include <linux/uaccess.h>
+#include <linux/vs_base.h>
+#include <linux/vs_dlimit.h>
 #include "internal.h"
 
 static int flags_by_mnt(int mnt_flags)
@@ -60,6 +62,8 @@ static int statfs_by_dentry(struct dentr
 	retval = dentry->d_sb->s_op->statfs(dentry, buf);
 	if (retval == 0 && buf->f_frsize == 0)
 		buf->f_frsize = buf->f_bsize;
+	if (!vx_check(0, VS_ADMIN|VS_WATCH))
+		vx_vsi_statfs(dentry->d_sb, buf);
 	return retval;
 }
 
diff -ruNp linux-3.13.11/fs/super.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/super.c
--- linux-3.13.11/fs/super.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/super.c	2014-07-09 12:00:15.000000000
+0200
@@ -34,6 +34,8 @@
 #include <linux/cleancache.h>
 #include <linux/fsnotify.h>
 #include <linux/lockdep.h>
+#include <linux/magic.h>
+#include <linux/vs_context.h>
 #include "internal.h"
 
 
@@ -1098,6 +1100,13 @@ mount_fs(struct file_system_type *type,
 	WARN_ON(sb->s_bdi == &default_backing_dev_info);
 	sb->s_flags |= MS_BORN;
 
+	error = -EPERM;
+	if (!vx_capable(CAP_SYS_ADMIN, VXC_BINARY_MOUNT) &&
+		!sb->s_bdev &&
+		(sb->s_magic != PROC_SUPER_MAGIC) &&
+		(sb->s_magic != DEVPTS_SUPER_MAGIC))
+		goto out_sb;
+
 	error = security_sb_kern_mount(sb, flags, secdata);
 	if (error)
 		goto out_sb;
diff -ruNp linux-3.13.11/fs/sysfs/dir.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/sysfs/dir.c
--- linux-3.13.11/fs/sysfs/dir.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/sysfs/dir.c	2014-07-09 12:00:15.000000000
+0200
@@ -40,7 +40,7 @@ static DEFINE_IDA(sysfs_ino_ida);
  *
  *	Returns 31 bit hash of ns + name (so it fits in an off_t )
  */
-static unsigned int sysfs_name_hash(const char *name, const void *ns)
+static unsigned int sysfs_name_hash(const unsigned char *name, const void *ns)
 {
 	unsigned long hash = init_name_hash();
 	unsigned int len = strlen(name);
@@ -676,6 +676,18 @@ static int create_dir(struct kobject *ko
 	struct sysfs_dirent *sd;
 	int rc;
 
+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
+	const char *parent_name = parent_sd->s_name;
+
+	mode = S_IFDIR | S_IRWXU;
+
+	if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs")))
||
+	    (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
+	    (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse")
|| !strcmp(name, "ecryptfs"))) ||
+	    (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
+		mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
+#endif
+
 	/* allocate */
 	sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
 	if (!sd)
diff -ruNp linux-3.13.11/fs/sysfs/file.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/sysfs/file.c
--- linux-3.13.11/fs/sysfs/file.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/sysfs/file.c	2014-07-09 12:00:15.000000000
+0200
@@ -42,7 +42,7 @@ static DEFINE_MUTEX(sysfs_open_file_mute
 
 struct sysfs_open_dirent {
 	atomic_t		refcnt;
-	atomic_t		event;
+	atomic_unchecked_t	event;
 	wait_queue_head_t	poll;
 	struct list_head	files; /* goes through sysfs_open_file.list */
 };
@@ -112,7 +112,7 @@ static int sysfs_seq_show(struct seq_fil
 		return -ENODEV;
 	}
 
-	of->event = atomic_read(&of->sd->s_attr.open->event);
+	of->event = atomic_read_unchecked(&of->sd->s_attr.open->event);
 
 	/*
 	 * Lookup @ops and invoke show().  Control may reach here via seq
@@ -365,12 +365,12 @@ static int sysfs_bin_page_mkwrite(struct
 	return ret;
 }
 
-static int sysfs_bin_access(struct vm_area_struct *vma, unsigned long addr,
-			    void *buf, int len, int write)
+static ssize_t sysfs_bin_access(struct vm_area_struct *vma, unsigned long addr,
+			    void *buf, size_t len, int write)
 {
 	struct file *file = vma->vm_file;
 	struct sysfs_open_file *of = sysfs_of(file);
-	int ret;
+	ssize_t ret;
 
 	if (!of->vm_ops)
 		return -EINVAL;
@@ -564,7 +564,7 @@ static int sysfs_get_open_dirent(struct
 		return -ENOMEM;
 
 	atomic_set(&new_od->refcnt, 0);
-	atomic_set(&new_od->event, 1);
+	atomic_set_unchecked(&new_od->event, 1);
 	init_waitqueue_head(&new_od->poll);
 	INIT_LIST_HEAD(&new_od->files);
 	goto retry;
@@ -768,7 +768,7 @@ static unsigned int sysfs_poll(struct fi
 
 	sysfs_put_active(attr_sd);
 
-	if (of->event != atomic_read(&od->event))
+	if (of->event != atomic_read_unchecked(&od->event))
 		goto trigger;
 
 	return DEFAULT_POLLMASK;
@@ -787,7 +787,7 @@ void sysfs_notify_dirent(struct sysfs_di
 	if (!WARN_ON(sysfs_type(sd) != SYSFS_KOBJ_ATTR)) {
 		od = sd->s_attr.open;
 		if (od) {
-			atomic_inc(&od->event);
+			atomic_inc_unchecked(&od->event);
 			wake_up_interruptible(&od->poll);
 		}
 	}
diff -ruNp linux-3.13.11/fs/sysfs/mount.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/sysfs/mount.c
--- linux-3.13.11/fs/sysfs/mount.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/sysfs/mount.c	2014-07-09 12:00:15.000000000
+0200
@@ -48,7 +48,7 @@ static int sysfs_fill_super(struct super
 
 	sb->s_blocksize = PAGE_CACHE_SIZE;
 	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
-	sb->s_magic = SYSFS_MAGIC;
+	sb->s_magic = SYSFS_SUPER_MAGIC;
 	sb->s_op = &sysfs_ops;
 	sb->s_time_gran = 1;
 
diff -ruNp linux-3.13.11/fs/sysfs/symlink.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/sysfs/symlink.c
--- linux-3.13.11/fs/sysfs/symlink.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/sysfs/symlink.c	2014-07-09 12:00:15.000000000
+0200
@@ -314,7 +314,7 @@ static void *sysfs_follow_link(struct de
 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd,
 			   void *cookie)
 {
-	char *page = nd_get_link(nd);
+	const char *page = nd_get_link(nd);
 	if (!IS_ERR(page))
 		free_page((unsigned long)page);
 }
diff -ruNp linux-3.13.11/fs/sysv/sysv.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/sysv/sysv.h
--- linux-3.13.11/fs/sysv/sysv.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/sysv/sysv.h	2014-07-09 12:00:15.000000000
+0200
@@ -188,7 +188,7 @@ static inline u32 PDP_swab(u32 x)
 #endif
 }
 
-static inline __u32 fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct sysv_sb_info *sbi,
__fs32 n)
 {
 	if (sbi->s_bytesex == BYTESEX_PDP)
 		return PDP_swab((__force __u32)n);
diff -ruNp linux-3.13.11/fs/ubifs/io.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ubifs/io.c
--- linux-3.13.11/fs/ubifs/io.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ubifs/io.c	2014-07-09 12:00:15.000000000
+0200
@@ -155,7 +155,7 @@ int ubifs_leb_change(struct ubifs_info *
 	return err;
 }
 
-int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
+int __intentional_overflow(-1) ubifs_leb_unmap(struct ubifs_info *c, int lnum)
 {
 	int err;
 
diff -ruNp linux-3.13.11/fs/udf/misc.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/udf/misc.c
--- linux-3.13.11/fs/udf/misc.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/udf/misc.c	2014-07-09 12:00:15.000000000
+0200
@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t id
 
 u8 udf_tag_checksum(const struct tag *t)
 {
-	u8 *data = (u8 *)t;
+	const u8 *data = (const u8 *)t;
 	u8 checksum = 0;
 	int i;
 	for (i = 0; i < sizeof(struct tag); ++i)
diff -ruNp linux-3.13.11/fs/ufs/swab.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ufs/swab.h
--- linux-3.13.11/fs/ufs/swab.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/ufs/swab.h	2014-07-09 12:00:15.000000000
+0200
@@ -22,7 +22,7 @@ enum {
 	BYTESEX_BE
 };
 
-static inline u64
+static inline u64 __intentional_overflow(-1)
 fs64_to_cpu(struct super_block *sbp, __fs64 n)
 {
 	if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
@@ -40,7 +40,7 @@ cpu_to_fs64(struct super_block *sbp, u64
 		return (__force __fs64)cpu_to_be64(n);
 }
 
-static inline u32
+static inline u32 __intentional_overflow(-1)
 fs32_to_cpu(struct super_block *sbp, __fs32 n)
 {
 	if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
diff -ruNp linux-3.13.11/fs/utimes.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/utimes.c
--- linux-3.13.11/fs/utimes.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/utimes.c	2014-07-09 12:00:15.000000000
+0200
@@ -1,6 +1,7 @@
 #include <linux/compiler.h>
 #include <linux/file.h>
 #include <linux/fs.h>
+#include <linux/security.h>
 #include <linux/linkage.h>
 #include <linux/mount.h>
 #include <linux/namei.h>
@@ -8,6 +9,8 @@
 #include <linux/stat.h>
 #include <linux/utime.h>
 #include <linux/syscalls.h>
+#include <linux/mount.h>
+#include <linux/vs_cowbl.h>
 #include <asm/uaccess.h>
 #include <asm/unistd.h>
 
@@ -52,13 +55,19 @@ static int utimes_common(struct path *pa
 {
 	int error;
 	struct iattr newattrs;
-	struct inode *inode = path->dentry->d_inode;
 	struct inode *delegated_inode = NULL;
+	struct inode *inode;
+
+	error = cow_check_and_break(path);
+	if (error)
+		goto out;
 
 	error = mnt_want_write(path->mnt);
 	if (error)
 		goto out;
 
+	inode = path->dentry->d_inode;
+
 	if (times && times[0].tv_nsec == UTIME_NOW &&
 		     times[1].tv_nsec == UTIME_NOW)
 		times = NULL;
@@ -103,6 +112,12 @@ static int utimes_common(struct path *pa
 		}
 	}
 retry_deleg:
+
+	if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
+		error = -EACCES;
+		goto mnt_drop_write_and_out;
+	}
+
 	mutex_lock(&inode->i_mutex);
 	error = notify_change(path->dentry, &newattrs, &delegated_inode);
 	mutex_unlock(&inode->i_mutex);
diff -ruNp linux-3.13.11/fs/xattr.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/xattr.c
--- linux-3.13.11/fs/xattr.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/xattr.c	2014-07-09 12:00:15.000000000
+0200
@@ -21,6 +21,7 @@
 #include <linux/audit.h>
 #include <linux/vmalloc.h>
 #include <linux/posix_acl_xattr.h>
+#include <linux/mount.h>
 
 #include <asm/uaccess.h>
 
@@ -52,7 +53,7 @@ xattr_permission(struct inode *inode, co
 	 * The trusted.* namespace can only be accessed by privileged users.
 	 */
 	if (!strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN)) {
-		if (!capable(CAP_SYS_ADMIN))
+		if (!vx_capable(CAP_SYS_ADMIN, VXC_FS_TRUSTED))
 			return (mask & MAY_WRITE) ? -EPERM : -ENODATA;
 		return 0;
 	}
@@ -227,6 +228,27 @@ int vfs_xattr_cmp(struct dentry *dentry,
 	return rc;
 }
 
+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
+ssize_t
+pax_getxattr(struct dentry *dentry, void *value, size_t size)
+{
+	struct inode *inode = dentry->d_inode;
+	ssize_t error;
+
+	error = inode_permission(inode, MAY_EXEC);
+	if (error)
+		return error;
+
+	if (inode->i_op->getxattr)
+		error = inode->i_op->getxattr(dentry, XATTR_NAME_PAX_FLAGS, value, size);
+	else
+		error = -EOPNOTSUPP;
+
+	return error;
+}
+EXPORT_SYMBOL(pax_getxattr);
+#endif
+
 ssize_t
 vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
 {
@@ -319,7 +341,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
  * Extended attribute SET operations
  */
 static long
-setxattr(struct dentry *d, const char __user *name, const void __user *value,
+setxattr(struct path *path, const char __user *name, const void __user *value,
 	 size_t size, int flags)
 {
 	int error;
@@ -355,7 +377,12 @@ setxattr(struct dentry *d, const char __
 			posix_acl_fix_xattr_from_user(kvalue, size);
 	}
 
-	error = vfs_setxattr(d, kname, kvalue, size, flags);
+	if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
+		error = -EACCES;
+		goto out;
+	}
+
+	error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
 out:
 	if (vvalue)
 		vfree(vvalue);
@@ -377,7 +404,7 @@ retry:
 		return error;
 	error = mnt_want_write(path.mnt);
 	if (!error) {
-		error = setxattr(path.dentry, name, value, size, flags);
+		error = setxattr(&path, name, value, size, flags);
 		mnt_drop_write(path.mnt);
 	}
 	path_put(&path);
@@ -401,7 +428,7 @@ retry:
 		return error;
 	error = mnt_want_write(path.mnt);
 	if (!error) {
-		error = setxattr(path.dentry, name, value, size, flags);
+		error = setxattr(&path, name, value, size, flags);
 		mnt_drop_write(path.mnt);
 	}
 	path_put(&path);
@@ -416,16 +443,14 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
 		const void __user *,value, size_t, size, int, flags)
 {
 	struct fd f = fdget(fd);
-	struct dentry *dentry;
 	int error = -EBADF;
 
 	if (!f.file)
 		return error;
-	dentry = f.file->f_path.dentry;
-	audit_inode(NULL, dentry, 0);
+	audit_inode(NULL, f.file->f_path.dentry, 0);
 	error = mnt_want_write_file(f.file);
 	if (!error) {
-		error = setxattr(dentry, name, value, size, flags);
+		error = setxattr(&f.file->f_path, name, value, size, flags);
 		mnt_drop_write_file(f.file);
 	}
 	fdput(f);
@@ -626,7 +651,7 @@ SYSCALL_DEFINE3(flistxattr, int, fd, cha
  * Extended attribute REMOVE operations
  */
 static long
-removexattr(struct dentry *d, const char __user *name)
+removexattr(struct path *path, const char __user *name)
 {
 	int error;
 	char kname[XATTR_NAME_MAX + 1];
@@ -637,7 +662,10 @@ removexattr(struct dentry *d, const char
 	if (error < 0)
 		return error;
 
-	return vfs_removexattr(d, kname);
+	if (!gr_acl_handle_removexattr(path->dentry, path->mnt))
+		return -EACCES;
+
+	return vfs_removexattr(path->dentry, kname);
 }
 
 SYSCALL_DEFINE2(removexattr, const char __user *, pathname,
@@ -652,7 +680,7 @@ retry:
 		return error;
 	error = mnt_want_write(path.mnt);
 	if (!error) {
-		error = removexattr(path.dentry, name);
+		error = removexattr(&path, name);
 		mnt_drop_write(path.mnt);
 	}
 	path_put(&path);
@@ -675,7 +703,7 @@ retry:
 		return error;
 	error = mnt_want_write(path.mnt);
 	if (!error) {
-		error = removexattr(path.dentry, name);
+		error = removexattr(&path, name);
 		mnt_drop_write(path.mnt);
 	}
 	path_put(&path);
@@ -689,16 +717,16 @@ retry:
 SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name)
 {
 	struct fd f = fdget(fd);
-	struct dentry *dentry;
+	struct path *path;
 	int error = -EBADF;
 
 	if (!f.file)
 		return error;
-	dentry = f.file->f_path.dentry;
-	audit_inode(NULL, dentry, 0);
+	path = &f.file->f_path;
+	audit_inode(NULL, path->dentry, 0);
 	error = mnt_want_write_file(f.file);
 	if (!error) {
-		error = removexattr(dentry, name);
+		error = removexattr(path, name);
 		mnt_drop_write_file(f.file);
 	}
 	fdput(f);
diff -ruNp linux-3.13.11/fs/xattr_acl.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/xattr_acl.c
--- linux-3.13.11/fs/xattr_acl.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/xattr_acl.c	2014-07-09 12:00:15.000000000
+0200
@@ -10,6 +10,7 @@
 #include <linux/posix_acl_xattr.h>
 #include <linux/gfp.h>
 #include <linux/user_namespace.h>
+#include <linux/grsecurity.h>
 
 /*
  * Fix up the uids and gids in posix acl extended attributes in place.
@@ -76,11 +77,12 @@ struct posix_acl *
 posix_acl_from_xattr(struct user_namespace *user_ns,
 		     const void *value, size_t size)
 {
-	posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
-	posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
+	const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
+	const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
 	int count;
 	struct posix_acl *acl;
 	struct posix_acl_entry *acl_e;
+	umode_t umask = gr_acl_umask();
 
 	if (!value)
 		return NULL;
@@ -106,12 +108,18 @@ posix_acl_from_xattr(struct user_namespa
 
 		switch(acl_e->e_tag) {
 			case ACL_USER_OBJ:
+				acl_e->e_perm &= ~((umask & S_IRWXU) >> 6);
+				break;
 			case ACL_GROUP_OBJ:
 			case ACL_MASK:
+				acl_e->e_perm &= ~((umask & S_IRWXG) >> 3);
+				break;
 			case ACL_OTHER:
+				acl_e->e_perm &= ~(umask & S_IRWXO);
 				break;
 
 			case ACL_USER:
+				acl_e->e_perm &= ~((umask & S_IRWXU) >> 6);
 				acl_e->e_uid =
 					make_kuid(user_ns,
 						  le32_to_cpu(entry->e_id));
@@ -119,6 +127,7 @@ posix_acl_from_xattr(struct user_namespa
 					goto fail;
 				break;
 			case ACL_GROUP:
+				acl_e->e_perm &= ~((umask & S_IRWXG) >> 3);
 				acl_e->e_gid =
 					make_kgid(user_ns,
 						  le32_to_cpu(entry->e_id));
diff -ruNp linux-3.13.11/fs/xfs/xfs_bmap.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/xfs/xfs_bmap.c
--- linux-3.13.11/fs/xfs/xfs_bmap.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/xfs/xfs_bmap.c	2014-07-09 12:00:15.000000000
+0200
@@ -584,7 +584,7 @@ xfs_bmap_validate_ret(
 
 #else
 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork)		do { } while (0)
-#define	xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
+#define	xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)	do { } while (0)
 #endif /* DEBUG */
 
 /*
diff -ruNp linux-3.13.11/fs/xfs/xfs_dir2_readdir.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/xfs/xfs_dir2_readdir.c
--- linux-3.13.11/fs/xfs/xfs_dir2_readdir.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/xfs/xfs_dir2_readdir.c	2014-07-09
12:00:15.000000000 +0200
@@ -160,7 +160,12 @@ xfs_dir2_sf_getdents(
 		ino = dp->d_ops->sf_get_ino(sfp, sfep);
 		filetype = dp->d_ops->sf_get_ftype(sfep);
 		ctx->pos = off & 0x7fffffff;
-		if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
+		if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
+			char name[sfep->namelen];
+			memcpy(name, sfep->name, sfep->namelen);
+			if (!dir_emit(ctx, name, sfep->namelen, ino, xfs_dir3_get_dtype(mp, filetype)))
+				return 0;
+		} else if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
 			    xfs_dir3_get_dtype(mp, filetype)))
 			return 0;
 		sfep = dp->d_ops->sf_nextentry(sfp, sfep);
diff -ruNp linux-3.13.11/fs/xfs/xfs_ioctl.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/xfs/xfs_ioctl.c
--- linux-3.13.11/fs/xfs/xfs_ioctl.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/xfs/xfs_ioctl.c	2014-07-09 12:00:15.000000000
+0200
@@ -126,7 +126,7 @@ xfs_find_handle(
 	}
 
 	error = -EFAULT;
-	if (copy_to_user(hreq->ohandle, &handle, hsize) ||
+	if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
 	    copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
 		goto out_put;
 
diff -ruNp linux-3.13.11/fs/xfs/xfs_iops.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/xfs/xfs_iops.c
--- linux-3.13.11/fs/xfs/xfs_iops.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/fs/xfs/xfs_iops.c	2014-07-09 12:00:15.000000000
+0200
@@ -397,7 +397,7 @@ xfs_vn_put_link(
 	struct nameidata *nd,
 	void		*p)
 {
-	char		*s = nd_get_link(nd);
+	const char	*s = nd_get_link(nd);
 
 	if (!IS_ERR(s))
 		kfree(s);
diff -ruNp linux-3.13.11/grsecurity/Kconfig linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/Kconfig
--- linux-3.13.11/grsecurity/Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/Kconfig	2014-07-09 12:00:15.000000000
+0200
@@ -0,0 +1,1161 @@
+#
+# grecurity configuration
+#
+menu "Memory Protections"
+depends on GRKERNSEC
+
+config GRKERNSEC_KMEM
+	bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
+	default y if GRKERNSEC_CONFIG_AUTO
+	select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
+	help
+	  If you say Y here, /dev/kmem and /dev/mem won't be allowed to
+	  be written to or read from to modify or leak the contents of the running
+	  kernel.  /dev/port will also not be allowed to be opened, writing to
+	  /dev/cpu/*/msr will be prevented, and support for kexec will be removed.
+	  If you have module support disabled, enabling this will close up several
+	  ways that are currently used to insert malicious code into the running
+	  kernel.
+
+	  Even with this feature enabled, we still highly recommend that
+	  you use the RBAC system, as it is still possible for an attacker to
+	  modify the running kernel through other more obscure methods.
+
+	  It is highly recommended that you say Y here if you meet all the
+	  conditions above.
+
+config GRKERNSEC_VM86
+	bool "Restrict VM86 mode"
+	default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
+	depends on X86_32
+
+	help
+	  If you say Y here, only processes with CAP_SYS_RAWIO will be able to
+	  make use of a special execution mode on 32bit x86 processors called
+	  Virtual 8086 (VM86) mode.  XFree86 may need vm86 mode for certain
+	  video cards and will still work with this option enabled.  The purpose
+	  of the option is to prevent exploitation of emulation errors in
+	  virtualization of vm86 mode like the one discovered in VMWare in 2009.
+	  Nearly all users should be able to enable this option.
+
+config GRKERNSEC_IO
+	bool "Disable privileged I/O"
+	default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
+	depends on X86
+	select RTC_CLASS
+	select RTC_INTF_DEV
+	select RTC_DRV_CMOS
+
+	help
+	  If you say Y here, all ioperm and iopl calls will return an error.
+	  Ioperm and iopl can be used to modify the running kernel.
+	  Unfortunately, some programs need this access to operate properly,
+	  the most notable of which are XFree86 and hwclock.  hwclock can be
+	  remedied by having RTC support in the kernel, so real-time 
+	  clock support is enabled if this option is enabled, to ensure 
+	  that hwclock operates correctly.  If hwclock still does not work,
+	  either update udev or symlink /dev/rtc to /dev/rtc0.
+
+	  If you're using XFree86 or a version of Xorg from 2012 or earlier,
+	  you may not be able to boot into a graphical environment with this
+	  option enabled.  In this case, you should use the RBAC system instead.
+
+config GRKERNSEC_JIT_HARDEN
+	bool "Harden BPF JIT against spray attacks"
+	default y if GRKERNSEC_CONFIG_AUTO
+	depends on BPF_JIT && X86
+	help
+	  If you say Y here, the native code generated by the kernel's Berkeley
+	  Packet Filter (BPF) JIT engine will be hardened against JIT-spraying
+	  attacks that attempt to fit attacker-beneficial instructions in
+	  32bit immediate fields of JIT-generated native instructions.  The
+	  attacker will generally aim to cause an unintended instruction sequence
+	  of JIT-generated native code to execute by jumping into the middle of
+	  a generated instruction.  This feature effectively randomizes the 32bit
+	  immediate constants present in the generated code to thwart such attacks.
+
+	  If you're using KERNEXEC, it's recommended that you enable this option
+	  to supplement the hardening of the kernel.
+  
+config GRKERNSEC_PERF_HARDEN
+	bool "Disable unprivileged PERF_EVENTS usage by default"
+	default y if GRKERNSEC_CONFIG_AUTO
+	depends on PERF_EVENTS
+	help
+	  If you say Y here, the range of acceptable values for the
+	  /proc/sys/kernel/perf_event_paranoid sysctl will be expanded to allow and
+	  default to a new value: 3.  When the sysctl is set to this value, no
+	  unprivileged use of the PERF_EVENTS syscall interface will be permitted.
+
+	  Though PERF_EVENTS can be used legitimately for performance monitoring
+	  and low-level application profiling, it is forced on regardless of
+	  configuration, has been at fault for several vulnerabilities, and
+	  creates new opportunities for side channels and other information leaks.
+
+	  This feature puts PERF_EVENTS into a secure default state and permits
+	  the administrator to change out of it temporarily if unprivileged
+	  application profiling is needed.
+
+config GRKERNSEC_RAND_THREADSTACK
+	bool "Insert random gaps between thread stacks"
+	default y if GRKERNSEC_CONFIG_AUTO
+	depends on PAX_RANDMMAP && !PPC
+	help
+	  If you say Y here, a random-sized gap will be enforced between allocated
+	  thread stacks.  Glibc's NPTL and other threading libraries that
+	  pass MAP_STACK to the kernel for thread stack allocation are supported.
+	  The implementation currently provides 8 bits of entropy for the gap.
+
+	  Many distributions do not compile threaded remote services with the
+	  -fstack-check argument to GCC, causing the variable-sized stack-based
+	  allocator, alloca(), to not probe the stack on allocation.  This
+	  permits an unbounded alloca() to skip over any guard page and potentially
+	  modify another thread's stack reliably.  An enforced random gap
+	  reduces the reliability of such an attack and increases the chance
+	  that such a read/write to another thread's stack instead lands in
+	  an unmapped area, causing a crash and triggering grsecurity's
+	  anti-bruteforcing logic.
+
+config GRKERNSEC_PROC_MEMMAP
+	bool "Harden ASLR against information leaks and entropy reduction"
+	default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
+	depends on PAX_NOEXEC || PAX_ASLR
+	help
+	  If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
+	  give no information about the addresses of its mappings if
+	  PaX features that rely on random addresses are enabled on the task.
+	  In addition to sanitizing this information and disabling other
+	  dangerous sources of information, this option causes reads of sensitive
+	  /proc/<pid> entries where the file descriptor was opened in a different
+	  task than the one performing the read.  Such attempts are logged.
+	  This option also limits argv/env strings for suid/sgid binaries
+	  to 512KB to prevent a complete exhaustion of the stack entropy provided
+	  by ASLR.  Finally, it places an 8MB stack resource limit on suid/sgid
+	  binaries to prevent alternative mmap layouts from being abused.
+
+	  If you use PaX it is essential that you say Y here as it closes up
+	  several holes that make full ASLR useless locally.
+
+config GRKERNSEC_BRUTE
+	bool "Deter exploit bruteforcing"
+	default y if GRKERNSEC_CONFIG_AUTO
+	help
+	  If you say Y here, attempts to bruteforce exploits against forking
+	  daemons such as apache or sshd, as well as against suid/sgid binaries
+	  will be deterred.  When a child of a forking daemon is killed by PaX
+	  or crashes due to an illegal instruction or other suspicious signal,
+	  the parent process will be delayed 30 seconds upon every subsequent
+	  fork until the administrator is able to assess the situation and
+	  restart the daemon.
+	  In the suid/sgid case, the attempt is logged, the user has all their
+	  existing instances of the suid/sgid binary terminated and will
+	  be unable to execute any suid/sgid binaries for 15 minutes.
+
+	  It is recommended that you also enable signal logging in the auditing
+	  section so that logs are generated when a process triggers a suspicious
+	  signal.
+	  If the sysctl option is enabled, a sysctl option with name
+	  "deter_bruteforce" is created.
+
+config GRKERNSEC_MODHARDEN
+	bool "Harden module auto-loading"
+	default y if GRKERNSEC_CONFIG_AUTO
+	depends on MODULES
+	help
+	  If you say Y here, module auto-loading in response to use of some
+	  feature implemented by an unloaded module will be restricted to
+	  root users.  Enabling this option helps defend against attacks 
+	  by unprivileged users who abuse the auto-loading behavior to 
+	  cause a vulnerable module to load that is then exploited.
+
+	  If this option prevents a legitimate use of auto-loading for a 
+	  non-root user, the administrator can execute modprobe manually 
+	  with the exact name of the module mentioned in the alert log.
+	  Alternatively, the administrator can add the module to the list
+	  of modules loaded at boot by modifying init scripts.
+
+	  Modification of init scripts will most likely be needed on 
+	  Ubuntu servers with encrypted home directory support enabled,
+	  as the first non-root user logging in will cause the ecb(aes),
+	  ecb(aes)-all, cbc(aes), and cbc(aes)-all  modules to be loaded.
+
+config GRKERNSEC_HIDESYM
+	bool "Hide kernel symbols"
+	default y if GRKERNSEC_CONFIG_AUTO
+	select PAX_USERCOPY_SLABS
+	help
+	  If you say Y here, getting information on loaded modules, and
+	  displaying all kernel symbols through a syscall will be restricted
+	  to users with CAP_SYS_MODULE.  For software compatibility reasons,
+	  /proc/kallsyms will be restricted to the root user.  The RBAC
+	  system can hide that entry even from root.
+
+	  This option also prevents leaking of kernel addresses through
+	  several /proc entries.
+
+	  Note that this option is only effective provided the following
+	  conditions are met:
+	  1) The kernel using grsecurity is not precompiled by some distribution
+	  2) You have also enabled GRKERNSEC_DMESG
+	  3) You are using the RBAC system and hiding other files such as your
+	     kernel image and System.map.  Alternatively, enabling this option
+	     causes the permissions on /boot, /lib/modules, and the kernel
+	     source directory to change at compile time to prevent 
+	     reading by non-root users.
+	  If the above conditions are met, this option will aid in providing a
+	  useful protection against local kernel exploitation of overflows
+	  and arbitrary read/write vulnerabilities.
+
+	  It is highly recommended that you enable GRKERNSEC_PERF_HARDEN
+	  in addition to this feature.
+
+config GRKERNSEC_RANDSTRUCT
+	bool "Randomize layout of sensitive kernel structures"
+	default y if GRKERNSEC_CONFIG_AUTO
+	select GRKERNSEC_HIDESYM
+	select MODVERSIONS if MODULES
+	help
+	  If you say Y here, the layouts of a number of sensitive kernel
+	  structures (task, fs, cred, etc) and all structures composed entirely
+	  of function pointers (aka "ops" structs) will be randomized at compile-time.
+	  This can introduce the requirement of an additional infoleak
+	  vulnerability for exploits targeting these structure types.
+
+	  Enabling this feature will introduce some performance impact, slightly
+	  increase memory usage, and prevent the use of forensic tools like
+	  Volatility against the system (unless the kernel source tree isn't
+	  cleaned after kernel installation).
+
+	  The seed used for compilation is located at tools/gcc/randomize_layout_seed.h.
+	  It remains after a make clean to allow for external modules to be compiled
+	  with the existing seed and will be removed by a make mrproper or
+	  make distclean.
+
+          Note that the implementation requires gcc 4.6.4. or newer.  You may need
+	  to install the supporting headers explicitly in addition to the normal
+	  gcc package.
+
+config GRKERNSEC_RANDSTRUCT_PERFORMANCE
+	bool "Use cacheline-aware structure randomization"
+	depends on GRKERNSEC_RANDSTRUCT
+	default y if GRKERNSEC_CONFIG_PRIORITY_PERF
+	help
+	  If you say Y here, the RANDSTRUCT randomization will make a best effort
+	  at restricting randomization to cacheline-sized groups of elements.  It
+	  will further not randomize bitfields in structures.  This reduces the
+	  performance hit of RANDSTRUCT at the cost of weakened randomization.
+
+config GRKERNSEC_KERN_LOCKOUT
+	bool "Active kernel exploit response"
+	default y if GRKERNSEC_CONFIG_AUTO
+	depends on X86 || ARM || PPC || SPARC
+	help
+	  If you say Y here, when a PaX alert is triggered due to suspicious
+	  activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
+	  or an OOPS occurs due to bad memory accesses, instead of just
+	  terminating the offending process (and potentially allowing
+	  a subsequent exploit from the same user), we will take one of two
+	  actions:
+	   If the user was root, we will panic the system
+	   If the user was non-root, we will log the attempt, terminate
+	   all processes owned by the user, then prevent them from creating
+	   any new processes until the system is restarted
+	  This deters repeated kernel exploitation/bruteforcing attempts
+	  and is useful for later forensics.
+
+config GRKERNSEC_OLD_ARM_USERLAND
+	bool "Old ARM userland compatibility"
+	depends on ARM && (CPU_V6 || CPU_V6K || CPU_V7)
+	help
+	  If you say Y here, stubs of executable code to perform such operations
+	  as "compare-exchange" will be placed at fixed locations in the ARM vector
+	  table.  This is unfortunately needed for old ARM userland meant to run
+	  across a wide range of processors.  Without this option enabled,
+	  the get_tls and data memory barrier stubs will be emulated by the kernel,
+	  which is enough for Linaro userlands or other userlands designed for v6
+	  and newer ARM CPUs.  It's recommended that you try without this option enabled
+	  first, and only enable it if your userland does not boot (it will likely fail
+	  at init time).
+
+endmenu
+menu "Role Based Access Control Options"
+depends on GRKERNSEC
+
+config GRKERNSEC_RBAC_DEBUG
+	bool
+
+config GRKERNSEC_NO_RBAC
+	bool "Disable RBAC system"
+	help
+	  If you say Y here, the /dev/grsec device will be removed from the kernel,
+	  preventing the RBAC system from being enabled.  You should only say Y
+	  here if you have no intention of using the RBAC system, so as to prevent
+	  an attacker with root access from misusing the RBAC system to hide files
+	  and processes when loadable module support and /dev/[k]mem have been
+	  locked down.
+
+config GRKERNSEC_ACL_HIDEKERN
+	bool "Hide kernel processes"
+	help
+	  If you say Y here, all kernel threads will be hidden to all
+	  processes but those whose subject has the "view hidden processes"
+	  flag.
+
+config GRKERNSEC_ACL_MAXTRIES
+	int "Maximum tries before password lockout"
+	default 3
+	help
+	  This option enforces the maximum number of times a user can attempt
+	  to authorize themselves with the grsecurity RBAC system before being
+	  denied the ability to attempt authorization again for a specified time.
+	  The lower the number, the harder it will be to brute-force a password.
+
+config GRKERNSEC_ACL_TIMEOUT
+	int "Time to wait after max password tries, in seconds"
+	default 30
+	help
+	  This option specifies the time the user must wait after attempting to
+	  authorize to the RBAC system with the maximum number of invalid
+	  passwords.  The higher the number, the harder it will be to brute-force
+	  a password.
+
+endmenu
+menu "Filesystem Protections"
+depends on GRKERNSEC
+
+config GRKERNSEC_PROC
+	bool "Proc restrictions"
+	default y if GRKERNSEC_CONFIG_AUTO
+	help
+	  If you say Y here, the permissions of the /proc filesystem
+	  will be altered to enhance system security and privacy.  You MUST
+  	  choose either a user only restriction or a user and group restriction.
+	  Depending upon the option you choose, you can either restrict users to
+	  see only the processes they themselves run, or choose a group that can
+	  view all processes and files normally restricted to root if you choose
+	  the "restrict to user only" option.  NOTE: If you're running identd or
+	  ntpd as a non-root user, you will have to run it as the group you
+	  specify here.
+
+config GRKERNSEC_PROC_USER
+	bool "Restrict /proc to user only"
+	depends on GRKERNSEC_PROC
+	help
+	  If you say Y here, non-root users will only be able to view their own
+	  processes, and restricts them from viewing network-related information,
+	  and viewing kernel symbol and module information.
+
+config GRKERNSEC_PROC_USERGROUP
+	bool "Allow special group"
+	default y if GRKERNSEC_CONFIG_AUTO
+	depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
+	help
+	  If you say Y here, you will be able to select a group that will be
+	  able to view all processes and network-related information.  If you've
+	  enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
+	  remain hidden.  This option is useful if you want to run identd as
+	  a non-root user.  The group you select may also be chosen at boot time
+	  via "grsec_proc_gid=" on the kernel commandline.
+
+config GRKERNSEC_PROC_GID
+	int "GID for special group"
+	depends on GRKERNSEC_PROC_USERGROUP
+	default 1001
+
+config GRKERNSEC_PROC_ADD
+	bool "Additional restrictions"
+	default y if GRKERNSEC_CONFIG_AUTO
+	depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
+	help
+	  If you say Y here, additional restrictions will be placed on
+	  /proc that keep normal users from viewing device information and 
+	  slabinfo information that could be useful for exploits.
+
+config GRKERNSEC_LINK
+	bool "Linking restrictions"
+	default y if GRKERNSEC_CONFIG_AUTO
+	help
+	  If you say Y here, /tmp race exploits will be prevented, since users
+	  will no longer be able to follow symlinks owned by other users in
+	  world-writable +t directories (e.g. /tmp), unless the owner of the
+	  symlink is the owner of the directory. users will also not be
+	  able to hardlink to files they do not own.  If the sysctl option is
+	  enabled, a sysctl option with name "linking_restrictions" is created.
+
+config GRKERNSEC_SYMLINKOWN
+	bool "Kernel-enforced SymlinksIfOwnerMatch"
+	default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
+	help
+	  Apache's SymlinksIfOwnerMatch option has an inherent race condition
+	  that prevents it from being used as a security feature.  As Apache
+	  verifies the symlink by performing a stat() against the target of
+	  the symlink before it is followed, an attacker can setup a symlink
+	  to point to a same-owned file, then replace the symlink with one
+	  that targets another user's file just after Apache "validates" the
+	  symlink -- a classic TOCTOU race.  If you say Y here, a complete,
+	  race-free replacement for Apache's "SymlinksIfOwnerMatch" option
+	  will be in place for the group you specify. If the sysctl option
+	  is enabled, a sysctl option with name "enforce_symlinksifowner" is
+	  created.
+
+config GRKERNSEC_SYMLINKOWN_GID
+	int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
+	depends on GRKERNSEC_SYMLINKOWN
+	default 1006
+	help
+	  Setting this GID determines what group kernel-enforced
+	  SymlinksIfOwnerMatch will be enabled for.  If the sysctl option
+	  is enabled, a sysctl option with name "symlinkown_gid" is created.
+
+config GRKERNSEC_FIFO
+	bool "FIFO restrictions"
+	default y if GRKERNSEC_CONFIG_AUTO
+	help
+	  If you say Y here, users will not be able to write to FIFOs they don't
+	  own in world-writable +t directories (e.g. /tmp), unless the owner of
+	  the FIFO is the same owner of the directory it's held in.  If the sysctl
+	  option is enabled, a sysctl option with name "fifo_restrictions" is
+	  created.
+
+config GRKERNSEC_SYSFS_RESTRICT
+	bool "Sysfs/debugfs restriction"
+	default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
+	depends on SYSFS
+	help
+	  If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
+	  any filesystem normally mounted under it (e.g. debugfs) will be
+	  mostly accessible only by root.  These filesystems generally provide access
+	  to hardware and debug information that isn't appropriate for unprivileged
+	  users of the system.  Sysfs and debugfs have also become a large source
+	  of new vulnerabilities, ranging from infoleaks to local compromise.
+	  There has been very little oversight with an eye toward security involved
+	  in adding new exporters of information to these filesystems, so their
+	  use is discouraged.
+	  For reasons of compatibility, a few directories have been whitelisted
+	  for access by non-root users:
+	  /sys/fs/selinux
+	  /sys/fs/fuse
+	  /sys/devices/system/cpu
+
+config GRKERNSEC_ROFS
+	bool "Runtime read-only mount protection"
+	depends on SYSCTL
+	help
+	  If you say Y here, a sysctl option with name "romount_protect" will
+	  be created.  By setting this option to 1 at runtime, filesystems
+	  will be protected in the following ways:
+	  * No new writable mounts will be allowed
+	  * Existing read-only mounts won't be able to be remounted read/write
+	  * Write operations will be denied on all block devices
+	  This option acts independently of grsec_lock: once it is set to 1,
+	  it cannot be turned off.  Therefore, please be mindful of the resulting
+	  behavior if this option is enabled in an init script on a read-only
+	  filesystem.
+	  Also be aware that as with other root-focused features, GRKERNSEC_KMEM
+	  and GRKERNSEC_IO should be enabled and module loading disabled via
+	  config or at runtime.
+	  This feature is mainly intended for secure embedded systems.
+	  
+
+config GRKERNSEC_DEVICE_SIDECHANNEL
+	bool "Eliminate stat/notify-based device sidechannels"
+	default y if GRKERNSEC_CONFIG_AUTO
+	help
+	  If you say Y here, timing analyses on block or character
+	  devices like /dev/ptmx using stat or inotify/dnotify/fanotify
+	  will be thwarted for unprivileged users.  If a process without
+	  CAP_MKNOD stats such a device, the last access and last modify times
+	  will match the device's create time.  No access or modify events
+	  will be triggered through inotify/dnotify/fanotify for such devices.
+	  This feature will prevent attacks that may at a minimum
+	  allow an attacker to determine the administrator's password length.
+
+config GRKERNSEC_CHROOT
+	bool "Chroot jail restrictions"
+	default y if GRKERNSEC_CONFIG_AUTO
+	help
+	  If you say Y here, you will be able to choose several options that will
+	  make breaking out of a chrooted jail much more difficult.  If you
+	  encounter no software incompatibilities with the following options, it
+	  is recommended that you enable each one.
+
+	  Note that the chroot restrictions are not intended to apply to "chroots"
+	  to directories that are simple bind mounts of the global root filesystem.
+	  For several other reasons, a user shouldn't expect any significant
+	  security by performing such a chroot.
+
+config GRKERNSEC_CHROOT_MOUNT
+	bool "Deny mounts"
+	default y if GRKERNSEC_CONFIG_AUTO
+	depends on GRKERNSEC_CHROOT
+	help
+	  If you say Y here, processes inside a chroot will not be able to
+	  mount or remount filesystems.  If the sysctl option is enabled, a
+	  sysctl option with name "chroot_deny_mount" is created.
+
+config GRKERNSEC_CHROOT_DOUBLE
+	bool "Deny double-chroots"
+	default y if GRKERNSEC_CONFIG_AUTO
+	depends on GRKERNSEC_CHROOT
+	help
+	  If you say Y here, processes inside a chroot will not be able to chroot
+	  again outside the chroot.  This is a widely used method of breaking
+	  out of a chroot jail and should not be allowed.  If the sysctl 
+	  option is enabled, a sysctl option with name 
+	  "chroot_deny_chroot" is created.
+
+config GRKERNSEC_CHROOT_PIVOT
+	bool "Deny pivot_root in chroot"
+	default y if GRKERNSEC_CONFIG_AUTO
+	depends on GRKERNSEC_CHROOT
+	help
+	  If you say Y here, processes inside a chroot will not be able to use
+	  a function called pivot_root() that was introduced in Linux 2.3.41.  It
+	  works similar to chroot in that it changes the root filesystem.  This
+	  function could be misused in a chrooted process to attempt to break out
+	  of the chroot, and therefore should not be allowed.  If the sysctl
+	  option is enabled, a sysctl option with name "chroot_deny_pivot" is
+	  created.
+
+config GRKERNSEC_CHROOT_CHDIR
+	bool "Enforce chdir(\"/\") on all chroots"
+	default y if GRKERNSEC_CONFIG_AUTO
+	depends on GRKERNSEC_CHROOT
+	help
+	  If you say Y here, the current working directory of all newly-chrooted
+	  applications will be set to the the root directory of the chroot.
+	  The man page on chroot(2) states:
+	  Note that this call does not change  the  current  working
+	  directory,  so  that `.' can be outside the tree rooted at
+	  `/'.  In particular, the  super-user  can  escape  from  a
+	  `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
+
+	  It is recommended that you say Y here, since it's not known to break
+	  any software.  If the sysctl option is enabled, a sysctl option with
+	  name "chroot_enforce_chdir" is created.
+
+config GRKERNSEC_CHROOT_CHMOD
+	bool "Deny (f)chmod +s"
+	default y if GRKERNSEC_CONFIG_AUTO
+	depends on GRKERNSEC_CHROOT
+	help
+	  If you say Y here, processes inside a chroot will not be able to chmod
+	  or fchmod files to make them have suid or sgid bits.  This protects
+	  against another published method of breaking a chroot.  If the sysctl
+	  option is enabled, a sysctl option with name "chroot_deny_chmod" is
+	  created.
+
+config GRKERNSEC_CHROOT_FCHDIR
+	bool "Deny fchdir out of chroot"
+	default y if GRKERNSEC_CONFIG_AUTO
+	depends on GRKERNSEC_CHROOT
+	help
+	  If you say Y here, a well-known method of breaking chroots by fchdir'ing
+	  to a file descriptor of the chrooting process that points to a directory
+	  outside the filesystem will be stopped.  If the sysctl option
+	  is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
+
+config GRKERNSEC_CHROOT_MKNOD
+	bool "Deny mknod"
+	default y if GRKERNSEC_CONFIG_AUTO
+	depends on GRKERNSEC_CHROOT
+	help
+	  If you say Y here, processes inside a chroot will not be allowed to
+	  mknod.  The problem with using mknod inside a chroot is that it
+	  would allow an attacker to create a device entry that is the same
+	  as one on the physical root of your system, which could range from
+	  anything from the console device to a device for your harddrive (which
+	  they could then use to wipe the drive or steal data).  It is recommended
+	  that you say Y here, unless you run into software incompatibilities.
+	  If the sysctl option is enabled, a sysctl option with name
+	  "chroot_deny_mknod" is created.
+
+config GRKERNSEC_CHROOT_SHMAT
+	bool "Deny shmat() out of chroot"
+	default y if GRKERNSEC_CONFIG_AUTO
+	depends on GRKERNSEC_CHROOT
+	help
+	  If you say Y here, processes inside a chroot will not be able to attach
+	  to shared memory segments that were created outside of the chroot jail.
+	  It is recommended that you say Y here.  If the sysctl option is enabled,
+	  a sysctl option with name "chroot_deny_shmat" is created.
+
+config GRKERNSEC_CHROOT_UNIX
+	bool "Deny access to abstract AF_UNIX sockets out of chroot"
+	default y if GRKERNSEC_CONFIG_AUTO
+	depends on GRKERNSEC_CHROOT
+	help
+	  If you say Y here, processes inside a chroot will not be able to
+	  connect to abstract (meaning not belonging to a filesystem) Unix
+	  domain sockets that were bound outside of a chroot.  It is recommended
+	  that you say Y here.  If the sysctl option is enabled, a sysctl option
+	  with name "chroot_deny_unix" is created.
+
+config GRKERNSEC_CHROOT_FINDTASK
+	bool "Protect outside processes"
+	default y if GRKERNSEC_CONFIG_AUTO
+	depends on GRKERNSEC_CHROOT
+	help
+	  If you say Y here, processes inside a chroot will not be able to
+	  kill, send signals with fcntl, ptrace, capget, getpgid, setpgid, 
+	  getsid, or view any process outside of the chroot.  If the sysctl
+	  option is enabled, a sysctl option with name "chroot_findtask" is
+	  created.
+
+config GRKERNSEC_CHROOT_NICE
+	bool "Restrict priority changes"
+	default y if GRKERNSEC_CONFIG_AUTO
+	depends on GRKERNSEC_CHROOT
+	help
+	  If you say Y here, processes inside a chroot will not be able to raise
+	  the priority of processes in the chroot, or alter the priority of
+	  processes outside the chroot.  This provides more security than simply
+	  removing CAP_SYS_NICE from the process' capability set.  If the
+	  sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
+	  is created.
+
+config GRKERNSEC_CHROOT_SYSCTL
+	bool "Deny sysctl writes"
+	default y if GRKERNSEC_CONFIG_AUTO
+	depends on GRKERNSEC_CHROOT
+	help
+	  If you say Y here, an attacker in a chroot will not be able to
+	  write to sysctl entries, either by sysctl(2) or through a /proc
+	  interface.  It is strongly recommended that you say Y here. If the
+	  sysctl option is enabled, a sysctl option with name
+	  "chroot_deny_sysctl" is created.
+
+config GRKERNSEC_CHROOT_CAPS
+	bool "Capability restrictions"
+	default y if GRKERNSEC_CONFIG_AUTO
+	depends on GRKERNSEC_CHROOT
+	help
+	  If you say Y here, the capabilities on all processes within a
+	  chroot jail will be lowered to stop module insertion, raw i/o,
+	  system and net admin tasks, rebooting the system, modifying immutable
+	  files, modifying IPC owned by another, and changing the system time.
+	  This is left an option because it can break some apps.  Disable this
+	  if your chrooted apps are having problems performing those kinds of
+	  tasks.  If the sysctl option is enabled, a sysctl option with
+	  name "chroot_caps" is created.
+
+config GRKERNSEC_CHROOT_INITRD
+	bool "Exempt initrd tasks from restrictions"
+	default y if GRKERNSEC_CONFIG_AUTO
+	depends on GRKERNSEC_CHROOT && BLK_DEV_INITRD
+	help
+	  If you say Y here, tasks started prior to init will be exempted from
+	  grsecurity's chroot restrictions.  This option is mainly meant to
+	  resolve Plymouth's performing privileged operations unnecessarily
+	  in a chroot.
+
+endmenu
+menu "Kernel Auditing"
+depends on GRKERNSEC
+
+config GRKERNSEC_AUDIT_GROUP
+	bool "Single group for auditing"
+	help
+	  If you say Y here, the exec and chdir logging features will only operate
+	  on a group you specify.  This option is recommended if you only want to
+	  watch certain users instead of having a large amount of logs from the
+	  entire system.  If the sysctl option is enabled, a sysctl option with
+	  name "audit_group" is created.
+
+config GRKERNSEC_AUDIT_GID
+	int "GID for auditing"
+	depends on GRKERNSEC_AUDIT_GROUP
+	default 1007
+
+config GRKERNSEC_EXECLOG
+	bool "Exec logging"
+	help
+	  If you say Y here, all execve() calls will be logged (since the
+	  other exec*() calls are frontends to execve(), all execution
+	  will be logged).  Useful for shell-servers that like to keep track
+	  of their users.  If the sysctl option is enabled, a sysctl option with
+	  name "exec_logging" is created.
+	  WARNING: This option when enabled will produce a LOT of logs, especially
+	  on an active system.
+
+config GRKERNSEC_RESLOG
+	bool "Resource logging"
+	default y if GRKERNSEC_CONFIG_AUTO
+	help
+	  If you say Y here, all attempts to overstep resource limits will
+	  be logged with the resource name, the requested size, and the current
+	  limit.  It is highly recommended that you say Y here.  If the sysctl
+	  option is enabled, a sysctl option with name "resource_logging" is
+	  created.  If the RBAC system is enabled, the sysctl value is ignored.
+
+config GRKERNSEC_CHROOT_EXECLOG
+	bool "Log execs within chroot"
+	help
+	  If you say Y here, all executions inside a chroot jail will be logged
+	  to syslog.  This can cause a large amount of logs if certain
+	  applications (eg. djb's daemontools) are installed on the system, and
+	  is therefore left as an option.  If the sysctl option is enabled, a
+	  sysctl option with name "chroot_execlog" is created.
+
+config GRKERNSEC_AUDIT_PTRACE
+	bool "Ptrace logging"
+	help
+	  If you say Y here, all attempts to attach to a process via ptrace
+	  will be logged.  If the sysctl option is enabled, a sysctl option
+	  with name "audit_ptrace" is created.
+
+config GRKERNSEC_AUDIT_CHDIR
+	bool "Chdir logging"
+	help
+	  If you say Y here, all chdir() calls will be logged.  If the sysctl
+ 	  option is enabled, a sysctl option with name "audit_chdir" is created.
+
+config GRKERNSEC_AUDIT_MOUNT
+	bool "(Un)Mount logging"
+	help
+	  If you say Y here, all mounts and unmounts will be logged.  If the
+	  sysctl option is enabled, a sysctl option with name "audit_mount" is
+	  created.
+
+config GRKERNSEC_SIGNAL
+	bool "Signal logging"
+	default y if GRKERNSEC_CONFIG_AUTO
+	help
+	  If you say Y here, certain important signals will be logged, such as
+	  SIGSEGV, which will as a result inform you of when a error in a program
+	  occurred, which in some cases could mean a possible exploit attempt.
+	  If the sysctl option is enabled, a sysctl option with name
+	  "signal_logging" is created.
+
+config GRKERNSEC_FORKFAIL
+	bool "Fork failure logging"
+	help
+	  If you say Y here, all failed fork() attempts will be logged.
+	  This could suggest a fork bomb, or someone attempting to overstep
+	  their process limit.  If the sysctl option is enabled, a sysctl option
+	  with name "forkfail_logging" is created.
+
+config GRKERNSEC_TIME
+	bool "Time change logging"
+	default y if GRKERNSEC_CONFIG_AUTO
+	help
+	  If you say Y here, any changes of the system clock will be logged.
+	  If the sysctl option is enabled, a sysctl option with name
+	  "timechange_logging" is created.
+
+config GRKERNSEC_PROC_IPADDR
+	bool "/proc/<pid>/ipaddr support"
+	default y if GRKERNSEC_CONFIG_AUTO
+	help
+	  If you say Y here, a new entry will be added to each /proc/<pid>
+	  directory that contains the IP address of the person using the task.
+	  The IP is carried across local TCP and AF_UNIX stream sockets.
+	  This information can be useful for IDS/IPSes to perform remote response
+	  to a local attack.  The entry is readable by only the owner of the
+	  process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
+	  the RBAC system), and thus does not create privacy concerns.
+
+config GRKERNSEC_RWXMAP_LOG
+	bool 'Denied RWX mmap/mprotect logging'
+	default y if GRKERNSEC_CONFIG_AUTO
+	depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
+	help
+	  If you say Y here, calls to mmap() and mprotect() with explicit
+	  usage of PROT_WRITE and PROT_EXEC together will be logged when
+	  denied by the PAX_MPROTECT feature.  This feature will also
+	  log other problematic scenarios that can occur when PAX_MPROTECT
+	  is enabled on a binary, like textrels and PT_GNU_STACK.  If the 
+          sysctl option is enabled, a sysctl option with name "rwxmap_logging"
+	  is created.
+
+endmenu
+
+menu "Executable Protections"
+depends on GRKERNSEC
+
+config GRKERNSEC_DMESG
+	bool "Dmesg(8) restriction"
+	default y if GRKERNSEC_CONFIG_AUTO
+	help
+	  If you say Y here, non-root users will not be able to use dmesg(8)
+	  to view the contents of the kernel's circular log buffer.
+	  The kernel's log buffer often contains kernel addresses and other
+	  identifying information useful to an attacker in fingerprinting a
+	  system for a targeted exploit.
+	  If the sysctl option is enabled, a sysctl option with name "dmesg" is
+	  created.
+
+config GRKERNSEC_HARDEN_PTRACE
+	bool "Deter ptrace-based process snooping"
+	default y if GRKERNSEC_CONFIG_AUTO
+	help
+	  If you say Y here, TTY sniffers and other malicious monitoring
+	  programs implemented through ptrace will be defeated.  If you
+	  have been using the RBAC system, this option has already been
+	  enabled for several years for all users, with the ability to make
+	  fine-grained exceptions.
+
+	  This option only affects the ability of non-root users to ptrace
+	  processes that are not a descendent of the ptracing process.
+	  This means that strace ./binary and gdb ./binary will still work,
+	  but attaching to arbitrary processes will not.  If the sysctl
+	  option is enabled, a sysctl option with name "harden_ptrace" is
+	  created.
+
+config GRKERNSEC_PTRACE_READEXEC
+	bool "Require read access to ptrace sensitive binaries"
+	default y if GRKERNSEC_CONFIG_AUTO
+	help
+	  If you say Y here, unprivileged users will not be able to ptrace unreadable
+	  binaries.  This option is useful in environments that
+	  remove the read bits (e.g. file mode 4711) from suid binaries to
+	  prevent infoleaking of their contents.  This option adds
+	  consistency to the use of that file mode, as the binary could normally
+	  be read out when run without privileges while ptracing.
+
+	  If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
+	  is created.
+
+config GRKERNSEC_SETXID
+	bool "Enforce consistent multithreaded privileges"
+	default y if GRKERNSEC_CONFIG_AUTO
+	depends on (X86 || SPARC64 || PPC || ARM || MIPS)
+	help
+	  If you say Y here, a change from a root uid to a non-root uid
+	  in a multithreaded application will cause the resulting uids,
+	  gids, supplementary groups, and capabilities in that thread
+	  to be propagated to the other threads of the process.  In most
+	  cases this is unnecessary, as glibc will emulate this behavior
+	  on behalf of the application.  Other libcs do not act in the
+	  same way, allowing the other threads of the process to continue
+	  running with root privileges.  If the sysctl option is enabled,
+	  a sysctl option with name "consistent_setxid" is created.
+
+config GRKERNSEC_HARDEN_IPC
+	bool "Disallow access to overly-permissive IPC objects"
+	default y if GRKERNSEC_CONFIG_AUTO
+	depends on SYSVIPC
+	help
+	  If you say Y here, access to overly-permissive IPC objects (shared
+	  memory, message queues, and semaphores) will be denied for processes
+	  given the following criteria beyond normal permission checks:
+	  1) If the IPC object is world-accessible and the euid doesn't match
+	     that of the creator or current uid for the IPC object
+	  2) If the IPC object is group-accessible and the egid doesn't
+	     match that of the creator or current gid for the IPC object
+	  It's a common error to grant too much permission to these objects,
+	  with impact ranging from denial of service and information leaking to
+	  privilege escalation.  This feature was developed in response to
+	  research by Tim Brown:
+	  http://labs.portcullis.co.uk/whitepapers/memory-squatting-attacks-on-system-v-shared-memory/
+	  who found hundreds of such insecure usages.  Processes with
+	  CAP_IPC_OWNER are still permitted to access these IPC objects.
+	  If the sysctl option is enabled, a sysctl option with name
+	  "harden_ipc" is created.
+
+config GRKERNSEC_TPE
+	bool "Trusted Path Execution (TPE)"
+	default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
+	help
+	  If you say Y here, you will be able to choose a gid to add to the
+	  supplementary groups of users you want to mark as "untrusted."
+	  These users will not be able to execute any files that are not in
+	  root-owned directories writable only by root.  If the sysctl option
+	  is enabled, a sysctl option with name "tpe" is created.
+
+config GRKERNSEC_TPE_ALL
+	bool "Partially restrict all non-root users"
+	depends on GRKERNSEC_TPE
+	help
+	  If you say Y here, all non-root users will be covered under
+	  a weaker TPE restriction.  This is separate from, and in addition to,
+	  the main TPE options that you have selected elsewhere.  Thus, if a
+	  "trusted" GID is chosen, this restriction applies to even that GID.
+	  Under this restriction, all non-root users will only be allowed to
+	  execute files in directories they own that are not group or
+	  world-writable, or in directories owned by root and writable only by
+	  root.  If the sysctl option is enabled, a sysctl option with name
+	  "tpe_restrict_all" is created.
+
+config GRKERNSEC_TPE_INVERT
+	bool "Invert GID option"
+	depends on GRKERNSEC_TPE
+	help
+	  If you say Y here, the group you specify in the TPE configuration will
+	  decide what group TPE restrictions will be *disabled* for.  This
+	  option is useful if you want TPE restrictions to be applied to most
+	  users on the system.  If the sysctl option is enabled, a sysctl option
+	  with name "tpe_invert" is created.  Unlike other sysctl options, this
+	  entry will default to on for backward-compatibility.
+
+config GRKERNSEC_TPE_GID
+	int
+	default GRKERNSEC_TPE_UNTRUSTED_GID if (GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT)
+	default GRKERNSEC_TPE_TRUSTED_GID if (GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT)
+	
+config GRKERNSEC_TPE_UNTRUSTED_GID
+	int "GID for TPE-untrusted users"
+	depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
+	default 1005
+	help
+	  Setting this GID determines what group TPE restrictions will be
+	  *enabled* for.  If the sysctl option is enabled, a sysctl option
+	  with name "tpe_gid" is created.
+
+config GRKERNSEC_TPE_TRUSTED_GID
+	int "GID for TPE-trusted users"
+	depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
+	default 1005
+	help
+	  Setting this GID determines what group TPE restrictions will be
+	  *disabled* for.  If the sysctl option is enabled, a sysctl option
+	  with name "tpe_gid" is created.
+
+endmenu
+menu "Network Protections"
+depends on GRKERNSEC
+
+config GRKERNSEC_RANDNET
+	bool "Larger entropy pools"
+	default y if GRKERNSEC_CONFIG_AUTO
+	help
+	  If you say Y here, the entropy pools used for many features of Linux
+	  and grsecurity will be doubled in size.  Since several grsecurity
+	  features use additional randomness, it is recommended that you say Y
+	  here.  Saying Y here has a similar effect as modifying
+	  /proc/sys/kernel/random/poolsize.
+
+config GRKERNSEC_BLACKHOLE
+	bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
+	default y if GRKERNSEC_CONFIG_AUTO
+	depends on NET
+	help
+	  If you say Y here, neither TCP resets nor ICMP
+	  destination-unreachable packets will be sent in response to packets
+	  sent to ports for which no associated listening process exists.
+	  This feature supports both IPV4 and IPV6 and exempts the 
+	  loopback interface from blackholing.  Enabling this feature 
+	  makes a host more resilient to DoS attacks and reduces network
+	  visibility against scanners.
+
+	  The blackhole feature as-implemented is equivalent to the FreeBSD
+	  blackhole feature, as it prevents RST responses to all packets, not
+	  just SYNs.  Under most application behavior this causes no
+	  problems, but applications (like haproxy) may not close certain
+	  connections in a way that cleanly terminates them on the remote
+	  end, leaving the remote host in LAST_ACK state.  Because of this
+	  side-effect and to prevent intentional LAST_ACK DoSes, this
+	  feature also adds automatic mitigation against such attacks.
+	  The mitigation drastically reduces the amount of time a socket
+	  can spend in LAST_ACK state.  If you're using haproxy and not
+	  all servers it connects to have this option enabled, consider
+	  disabling this feature on the haproxy host.
+
+	  If the sysctl option is enabled, two sysctl options with names
+	  "ip_blackhole" and "lastack_retries" will be created.
+	  While "ip_blackhole" takes the standard zero/non-zero on/off
+	  toggle, "lastack_retries" uses the same kinds of values as
+	  "tcp_retries1" and "tcp_retries2".  The default value of 4
+	  prevents a socket from lasting more than 45 seconds in LAST_ACK
+	  state.
+
+config GRKERNSEC_NO_SIMULT_CONNECT
+	bool "Disable TCP Simultaneous Connect"
+	default y if GRKERNSEC_CONFIG_AUTO
+	depends on NET
+	help
+	  If you say Y here, a feature by Willy Tarreau will be enabled that
+	  removes a weakness in Linux's strict implementation of TCP that
+	  allows two clients to connect to each other without either entering
+	  a listening state.  The weakness allows an attacker to easily prevent
+	  a client from connecting to a known server provided the source port
+	  for the connection is guessed correctly.
+
+	  As the weakness could be used to prevent an antivirus or IPS from
+	  fetching updates, or prevent an SSL gateway from fetching a CRL,
+	  it should be eliminated by enabling this option.  Though Linux is
+	  one of few operating systems supporting simultaneous connect, it
+	  has no legitimate use in practice and is rarely supported by firewalls.
+	
+config GRKERNSEC_SOCKET
+	bool "Socket restrictions"
+	depends on NET
+	help
+	  If you say Y here, you will be able to choose from several options.
+	  If you assign a GID on your system and add it to the supplementary
+	  groups of users you want to restrict socket access to, this patch
+	  will perform up to three things, based on the option(s) you choose.
+
+config GRKERNSEC_SOCKET_ALL
+	bool "Deny any sockets to group"
+	depends on GRKERNSEC_SOCKET
+	help
+	  If you say Y here, you will be able to choose a GID of whose users will
+	  be unable to connect to other hosts from your machine or run server
+	  applications from your machine.  If the sysctl option is enabled, a
+	  sysctl option with name "socket_all" is created.
+
+config GRKERNSEC_SOCKET_ALL_GID
+	int "GID to deny all sockets for"
+	depends on GRKERNSEC_SOCKET_ALL
+	default 1004
+	help
+	  Here you can choose the GID to disable socket access for. Remember to
+	  add the users you want socket access disabled for to the GID
+	  specified here.  If the sysctl option is enabled, a sysctl option
+	  with name "socket_all_gid" is created.
+
+config GRKERNSEC_SOCKET_CLIENT
+	bool "Deny client sockets to group"
+	depends on GRKERNSEC_SOCKET
+	help
+	  If you say Y here, you will be able to choose a GID of whose users will
+	  be unable to connect to other hosts from your machine, but will be
+	  able to run servers.  If this option is enabled, all users in the group
+	  you specify will have to use passive mode when initiating ftp transfers
+	  from the shell on your machine.  If the sysctl option is enabled, a
+	  sysctl option with name "socket_client" is created.
+
+config GRKERNSEC_SOCKET_CLIENT_GID
+	int "GID to deny client sockets for"
+	depends on GRKERNSEC_SOCKET_CLIENT
+	default 1003
+	help
+	  Here you can choose the GID to disable client socket access for.
+	  Remember to add the users you want client socket access disabled for to
+	  the GID specified here.  If the sysctl option is enabled, a sysctl
+	  option with name "socket_client_gid" is created.
+
+config GRKERNSEC_SOCKET_SERVER
+	bool "Deny server sockets to group"
+	depends on GRKERNSEC_SOCKET
+	help
+	  If you say Y here, you will be able to choose a GID of whose users will
+	  be unable to run server applications from your machine.  If the sysctl
+	  option is enabled, a sysctl option with name "socket_server" is created.
+
+config GRKERNSEC_SOCKET_SERVER_GID
+	int "GID to deny server sockets for"
+	depends on GRKERNSEC_SOCKET_SERVER
+	default 1002
+	help
+	  Here you can choose the GID to disable server socket access for.
+	  Remember to add the users you want server socket access disabled for to
+	  the GID specified here.  If the sysctl option is enabled, a sysctl
+	  option with name "socket_server_gid" is created.
+
+endmenu
+
+menu "Physical Protections"
+depends on GRKERNSEC
+
+config GRKERNSEC_DENYUSB
+	bool "Deny new USB connections after toggle"
+	default y if GRKERNSEC_CONFIG_AUTO
+	depends on SYSCTL && USB_SUPPORT
+	help
+	  If you say Y here, a new sysctl option with name "deny_new_usb"
+	  will be created.  Setting its value to 1 will prevent any new
+	  USB devices from being recognized by the OS.  Any attempted USB
+	  device insertion will be logged.  This option is intended to be
+	  used against custom USB devices designed to exploit vulnerabilities
+	  in various USB device drivers.
+
+	  For greatest effectiveness, this sysctl should be set after any
+	  relevant init scripts.  This option is safe to enable in distros
+	  as each user can choose whether or not to toggle the sysctl.
+
+config GRKERNSEC_DENYUSB_FORCE
+	bool "Reject all USB devices not connected at boot"
+	select USB
+	depends on GRKERNSEC_DENYUSB
+	help
+	  If you say Y here, a variant of GRKERNSEC_DENYUSB will be enabled
+	  that doesn't involve a sysctl entry.  This option should only be
+	  enabled if you're sure you want to deny all new USB connections
+	  at runtime and don't want to modify init scripts.  This should not
+	  be enabled by distros.  It forces the core USB code to be built
+	  into the kernel image so that all devices connected at boot time
+	  can be recognized and new USB device connections can be prevented
+	  prior to init running.
+
+endmenu
+
+menu "Sysctl Support"
+depends on GRKERNSEC && SYSCTL
+
+config GRKERNSEC_SYSCTL
+	bool "Sysctl support"
+	default y if GRKERNSEC_CONFIG_AUTO
+	help
+	  If you say Y here, you will be able to change the options that
+	  grsecurity runs with at bootup, without having to recompile your
+	  kernel.  You can echo values to files in /proc/sys/kernel/grsecurity
+	  to enable (1) or disable (0) various features.  All the sysctl entries
+	  are mutable until the "grsec_lock" entry is set to a non-zero value.
+	  All features enabled in the kernel configuration are disabled at boot
+	  if you do not say Y to the "Turn on features by default" option.
+	  All options should be set at startup, and the grsec_lock entry should
+	  be set to a non-zero value after all the options are set.
+	  *THIS IS EXTREMELY IMPORTANT*
+
+config GRKERNSEC_SYSCTL_DISTRO
+	bool "Extra sysctl support for distro makers (READ HELP)"
+	depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
+	help
+	  If you say Y here, additional sysctl options will be created
+	  for features that affect processes running as root.  Therefore,
+	  it is critical when using this option that the grsec_lock entry be
+	  enabled after boot.  Only distros with prebuilt kernel packages
+	  with this option enabled that can ensure grsec_lock is enabled
+	  after boot should use this option.
+	  *Failure to set grsec_lock after boot makes all grsec features
+	  this option covers useless*
+
+	  Currently this option creates the following sysctl entries:
+	  "Disable Privileged I/O": "disable_priv_io"	
+
+config GRKERNSEC_SYSCTL_ON
+	bool "Turn on features by default"
+	default y if GRKERNSEC_CONFIG_AUTO
+	depends on GRKERNSEC_SYSCTL
+	help
+	  If you say Y here, instead of having all features enabled in the
+	  kernel configuration disabled at boot time, the features will be
+	  enabled at boot time.  It is recommended you say Y here unless
+	  there is some reason you would want all sysctl-tunable features to
+	  be disabled by default.  As mentioned elsewhere, it is important
+	  to enable the grsec_lock entry once you have finished modifying
+	  the sysctl entries.
+
+endmenu
+menu "Logging Options"
+depends on GRKERNSEC
+
+config GRKERNSEC_FLOODTIME
+	int "Seconds in between log messages (minimum)"
+	default 10
+	help
+	  This option allows you to enforce the number of seconds between
+	  grsecurity log messages.  The default should be suitable for most
+	  people, however, if you choose to change it, choose a value small enough
+	  to allow informative logs to be produced, but large enough to
+	  prevent flooding.
+
+	  Setting both this value and GRKERNSEC_FLOODBURST to 0 will disable
+	  any rate limiting on grsecurity log messages.
+
+config GRKERNSEC_FLOODBURST
+	int "Number of messages in a burst (maximum)"
+	default 6
+	help
+	  This option allows you to choose the maximum number of messages allowed
+	  within the flood time interval you chose in a separate option.  The
+	  default should be suitable for most people, however if you find that
+	  many of your logs are being interpreted as flooding, you may want to
+	  raise this value.
+
+	  Setting both this value and GRKERNSEC_FLOODTIME to 0 will disable
+	  any rate limiting on grsecurity log messages.
+
+endmenu
diff -ruNp linux-3.13.11/grsecurity/Makefile linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/Makefile
--- linux-3.13.11/grsecurity/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/Makefile	2014-07-09 12:00:15.000000000
+0200
@@ -0,0 +1,54 @@
+# grsecurity – access control and security hardening for Linux
+# All code in this directory and various hooks located throughout the Linux kernel
are
+# Copyright (C) 2001-2014 Bradley Spengler, Open Source Security, Inc.
+# http://www.grsecurity.net spender@grsecurity.net
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License version 2
+# as published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+
+KBUILD_CFLAGS += -Werror
+
+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
+	grsec_mount.o grsec_sig.o grsec_sysctl.o \
+	grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o \
+	grsec_usb.o grsec_ipc.o
+
+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
+	gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
+	gracl_learn.o grsec_log.o gracl_policy.o
+ifdef CONFIG_COMPAT
+obj-$(CONFIG_GRKERNSEC) += gracl_compat.o
+endif
+
+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
+
+ifdef CONFIG_NET
+obj-y += grsec_sock.o
+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
+endif
+
+ifndef CONFIG_GRKERNSEC
+obj-y += grsec_disabled.o
+endif
+
+ifdef CONFIG_GRKERNSEC_HIDESYM
+extra-y := grsec_hidesym.o
+$(obj)/grsec_hidesym.o:
+	@-chmod -f 500 /boot
+	@-chmod -f 500 /lib/modules
+	@-chmod -f 500 /lib64/modules
+	@-chmod -f 500 /lib32/modules
+	@-chmod -f 700 .
+	@-chmod -f 700 $(objtree)
+	@echo '  grsec: protected kernel image paths'
+endif
diff -ruNp linux-3.13.11/grsecurity/gracl.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/gracl.c
--- linux-3.13.11/grsecurity/gracl.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/gracl.c	2014-07-09 12:00:15.000000000
+0200
@@ -0,0 +1,2679 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/namei.h>
+#include <linux/mount.h>
+#include <linux/tty.h>
+#include <linux/proc_fs.h>
+#include <linux/lglock.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/types.h>
+#include <linux/sysctl.h>
+#include <linux/netdevice.h>
+#include <linux/ptrace.h>
+#include <linux/gracl.h>
+#include <linux/gralloc.h>
+#include <linux/security.h>
+#include <linux/grinternal.h>
+#include <linux/pid_namespace.h>
+#include <linux/stop_machine.h>
+#include <linux/fdtable.h>
+#include <linux/percpu.h>
+#include <linux/lglock.h>
+#include <linux/hugetlb.h>
+#include <linux/posix-timers.h>
+#include <linux/prefetch.h>
+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
+#include <linux/magic.h>
+#include <linux/pagemap.h>
+#include "../fs/btrfs/async-thread.h"
+#include "../fs/btrfs/ctree.h"
+#include "../fs/btrfs/btrfs_inode.h"
+#endif
+#include "../fs/mount.h"
+
+#include <asm/uaccess.h>
+#include <asm/errno.h>
+#include <asm/mman.h>
+
+#define FOR_EACH_ROLE_START(role) \
+	role = running_polstate.role_list; \
+	while (role) {
+
+#define FOR_EACH_ROLE_END(role) \
+		role = role->prev; \
+	}
+
+extern struct path gr_real_root;
+
+static struct gr_policy_state running_polstate;
+struct gr_policy_state *polstate = &running_polstate;
+extern struct gr_alloc_state *current_alloc_state;
+
+extern char *gr_shared_page[4];
+DEFINE_RWLOCK(gr_inode_lock);
+
+static unsigned int gr_status __read_only = GR_STATUS_INIT;
+
+#ifdef CONFIG_NET
+extern struct vfsmount *sock_mnt;
+#endif
+
+extern struct vfsmount *pipe_mnt;
+extern struct vfsmount *shm_mnt;
+
+#ifdef CONFIG_HUGETLBFS
+extern struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
+#endif
+
+extern u16 acl_sp_role_value;
+extern struct acl_object_label *fakefs_obj_rw;
+extern struct acl_object_label *fakefs_obj_rwx;
+
+int gr_acl_is_enabled(void)
+{
+	return (gr_status & GR_READY);
+}
+
+void gr_enable_rbac_system(void)
+{
+	pax_open_kernel();
+	gr_status |= GR_READY;
+	pax_close_kernel();
+}
+
+int gr_rbac_disable(void *unused)
+{
+        pax_open_kernel();
+        gr_status &= ~GR_READY;
+        pax_close_kernel();
+
+        return 0;
+}
+
+static inline dev_t __get_dev(const struct dentry *dentry)
+{
+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
+	if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
+		return BTRFS_I(dentry->d_inode)->root->anon_dev;
+	else
+#endif
+		return dentry->d_sb->s_dev;
+}
+
+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
+{
+	return __get_dev(dentry);
+}
+
+static char gr_task_roletype_to_char(struct task_struct *task)
+{
+	switch (task->role->roletype &
+		(GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
+		 GR_ROLE_SPECIAL)) {
+	case GR_ROLE_DEFAULT:
+		return 'D';
+	case GR_ROLE_USER:
+		return 'U';
+	case GR_ROLE_GROUP:
+		return 'G';
+	case GR_ROLE_SPECIAL:
+		return 'S';
+	}
+
+	return 'X';
+}
+
+char gr_roletype_to_char(void)
+{
+	return gr_task_roletype_to_char(current);
+}
+
+__inline__ int
+gr_acl_tpe_check(void)
+{
+	if (unlikely(!(gr_status & GR_READY)))
+		return 0;
+	if (current->role->roletype & GR_ROLE_TPE)
+		return 1;
+	else
+		return 0;
+}
+
+int
+gr_handle_rawio(const struct inode *inode)
+{
+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
+	if (inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) ==
RAW_MAJOR)) &&
+	    grsec_enable_chroot_caps && proc_is_chrooted(current) &&
+	    !capable(CAP_SYS_RAWIO))
+		return 1;
+#endif
+	return 0;
+}
+
+int
+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int
lenb)
+{
+	if (likely(lena != lenb))
+		return 0;
+
+	return !memcmp(a, b, lena);
+}
+
+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
+{
+	*buflen -= namelen;
+	if (*buflen < 0)
+		return -ENAMETOOLONG;
+	*buffer -= namelen;
+	memcpy(*buffer, str, namelen);
+	return 0;
+}
+
+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
+{
+	return prepend(buffer, buflen, name->name, name->len);
+}
+
+static int prepend_path(const struct path *path, struct path *root,
+			char **buffer, int *buflen)
+{
+	struct dentry *dentry = path->dentry;
+	struct vfsmount *vfsmnt = path->mnt;
+	struct mount *mnt = real_mount(vfsmnt);
+	bool slash = false;
+	int error = 0;
+
+	while (dentry != root->dentry || vfsmnt != root->mnt) {
+		struct dentry * parent;
+
+		if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
+			/* Global root? */
+			if (!mnt_has_parent(mnt)) {
+				goto out;
+			}
+			dentry = mnt->mnt_mountpoint;
+			mnt = mnt->mnt_parent;
+			vfsmnt = &mnt->mnt;
+			continue;
+		}
+		parent = dentry->d_parent;
+		prefetch(parent);
+		spin_lock(&dentry->d_lock);
+		error = prepend_name(buffer, buflen, &dentry->d_name);
+		spin_unlock(&dentry->d_lock);
+		if (!error)
+			error = prepend(buffer, buflen, "/", 1);
+		if (error)
+			break;
+
+		slash = true;
+		dentry = parent;
+	}
+
+out:
+	if (!error && !slash)
+		error = prepend(buffer, buflen, "/", 1);
+
+	return error;
+}
+
+/* this must be called with mount_lock and rename_lock held */
+
+static char *__our_d_path(const struct path *path, struct path *root,
+			char *buf, int buflen)
+{
+	char *res = buf + buflen;
+	int error;
+
+	prepend(&res, &buflen, "\0", 1);
+	error = prepend_path(path, root, &res, &buflen);
+	if (error)
+		return ERR_PTR(error);
+
+	return res;
+}
+
+static char *
+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
+{
+	char *retval;
+
+	retval = __our_d_path(path, root, buf, buflen);
+	if (unlikely(IS_ERR(retval)))
+		retval = strcpy(buf, "<path too long>");
+	else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
+		retval[1] = '\0';
+
+	return retval;
+}
+
+static char *
+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
+		char *buf, int buflen)
+{
+	struct path path;
+	char *res;
+
+	path.dentry = (struct dentry *)dentry;
+	path.mnt = (struct vfsmount *)vfsmnt;
+
+	/* we can use gr_real_root.dentry, gr_real_root.mnt, because this is only called
+	   by the RBAC system */
+	res = gen_full_path(&path, &gr_real_root, buf, buflen);
+
+	return res;
+}
+
+static char *
+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
+	    char *buf, int buflen)
+{
+	char *res;
+	struct path path;
+	struct path root;
+	struct task_struct *reaper = init_pid_ns.child_reaper;
+
+	path.dentry = (struct dentry *)dentry;
+	path.mnt = (struct vfsmount *)vfsmnt;
+
+	/* we can't use gr_real_root.dentry, gr_real_root.mnt, because they belong only to
the RBAC system */
+	get_fs_root(reaper->fs, &root);
+
+	read_seqlock_excl(&mount_lock);
+	write_seqlock(&rename_lock);
+	res = gen_full_path(&path, &root, buf, buflen);
+	write_sequnlock(&rename_lock);
+	read_sequnlock_excl(&mount_lock);
+
+	path_put(&root);
+	return res;
+}
+
+char *
+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
+{
+	char *ret;
+	read_seqlock_excl(&mount_lock);
+	write_seqlock(&rename_lock);
+	ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
+			     PAGE_SIZE);
+	write_sequnlock(&rename_lock);
+	read_sequnlock_excl(&mount_lock);
+	return ret;
+}
+
+static char *
+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
+{
+	char *ret;
+	char *buf;
+	int buflen;
+
+	read_seqlock_excl(&mount_lock);
+	write_seqlock(&rename_lock);
+	buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
+	ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
+	buflen = (int)(ret - buf);
+	if (buflen >= 5)
+		prepend(&ret, &buflen, "/proc", 5);
+	else
+		ret = strcpy(buf, "<path too long>");
+	write_sequnlock(&rename_lock);
+	read_sequnlock_excl(&mount_lock);
+	return ret;
+}
+
+char *
+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
+{
+	return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
+			     PAGE_SIZE);
+}
+
+char *
+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
+{
+	return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
+			   PAGE_SIZE);
+}
+
+char *
+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
+{
+	return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
+			   PAGE_SIZE);
+}
+
+char *
+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
+{
+	return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
+			   PAGE_SIZE);
+}
+
+char *
+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
+{
+	return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
+			   PAGE_SIZE);
+}
+
+__inline__ __u32
+to_gr_audit(const __u32 reqmode)
+{
+	/* masks off auditable permission flags, then shifts them to create
+	   auditing flags, and adds the special case of append auditing if
+	   we're requesting write */
+	return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND :
0));
+}
+
+struct acl_role_label *
+__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct
*task, const uid_t uid,
+		      const gid_t gid)
+{
+	unsigned int index = gr_rhash(uid, GR_ROLE_USER, state->acl_role_set.r_size);
+	struct acl_role_label *match;
+	struct role_allowed_ip *ipp;
+	unsigned int x;
+	u32 curr_ip = task->signal->saved_ip;
+
+	match = state->acl_role_set.r_hash[index];
+
+	while (match) {
+		if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER))
{
+			for (x = 0; x < match->domain_child_num; x++) {
+				if (match->domain_children[x] == uid)
+					goto found;
+			}
+		} else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
+			break;
+		match = match->next;
+	}
+found:
+	if (match == NULL) {
+	      try_group:
+		index = gr_rhash(gid, GR_ROLE_GROUP, state->acl_role_set.r_size);
+		match = state->acl_role_set.r_hash[index];
+
+		while (match) {
+			if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP))
{
+				for (x = 0; x < match->domain_child_num; x++) {
+					if (match->domain_children[x] == gid)
+						goto found2;
+				}
+			} else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
+				break;
+			match = match->next;
+		}
+found2:
+		if (match == NULL)
+			match = state->default_role;
+		if (match->allowed_ips == NULL)
+			return match;
+		else {
+			for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
+				if (likely
+				    ((ntohl(curr_ip) & ipp->netmask) ==
+				     (ntohl(ipp->addr) & ipp->netmask)))
+					return match;
+			}
+			match = state->default_role;
+		}
+	} else if (match->allowed_ips == NULL) {
+		return match;
+	} else {
+		for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
+			if (likely
+			    ((ntohl(curr_ip) & ipp->netmask) ==
+			     (ntohl(ipp->addr) & ipp->netmask)))
+				return match;
+		}
+		goto try_group;
+	}
+
+	return match;
+}
+
+static struct acl_role_label *
+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
+		      const gid_t gid)
+{
+	return __lookup_acl_role_label(&running_polstate, task, uid, gid);
+}
+
+struct acl_subject_label *
+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
+		      const struct acl_role_label *role)
+{
+	unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
+	struct acl_subject_label *match;
+
+	match = role->subj_hash[index];
+
+	while (match && (match->inode != ino || match->device != dev ||
+	       (match->mode & GR_DELETED))) {
+		match = match->next;
+	}
+
+	if (match && !(match->mode & GR_DELETED))
+		return match;
+	else
+		return NULL;
+}
+
+struct acl_subject_label *
+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
+			  const struct acl_role_label *role)
+{
+	unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
+	struct acl_subject_label *match;
+
+	match = role->subj_hash[index];
+
+	while (match && (match->inode != ino || match->device != dev ||
+	       !(match->mode & GR_DELETED))) {
+		match = match->next;
+	}
+
+	if (match && (match->mode & GR_DELETED))
+		return match;
+	else
+		return NULL;
+}
+
+static struct acl_object_label *
+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
+		     const struct acl_subject_label *subj)
+{
+	unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
+	struct acl_object_label *match;
+
+	match = subj->obj_hash[index];
+
+	while (match && (match->inode != ino || match->device != dev ||
+	       (match->mode & GR_DELETED))) {
+		match = match->next;
+	}
+
+	if (match && !(match->mode & GR_DELETED))
+		return match;
+	else
+		return NULL;
+}
+
+static struct acl_object_label *
+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
+		     const struct acl_subject_label *subj)
+{
+	unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
+	struct acl_object_label *match;
+
+	match = subj->obj_hash[index];
+
+	while (match && (match->inode != ino || match->device != dev ||
+	       !(match->mode & GR_DELETED))) {
+		match = match->next;
+	}
+
+	if (match && (match->mode & GR_DELETED))
+		return match;
+
+	match = subj->obj_hash[index];
+
+	while (match && (match->inode != ino || match->device != dev ||
+	       (match->mode & GR_DELETED))) {
+		match = match->next;
+	}
+
+	if (match && !(match->mode & GR_DELETED))
+		return match;
+	else
+		return NULL;
+}
+
+struct name_entry *
+__lookup_name_entry(const struct gr_policy_state *state, const char *name)
+{
+	unsigned int len = strlen(name);
+	unsigned int key = full_name_hash(name, len);
+	unsigned int index = key % state->name_set.n_size;
+	struct name_entry *match;
+
+	match = state->name_set.n_hash[index];
+
+	while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
+		match = match->next;
+
+	return match;
+}
+
+static struct name_entry *
+lookup_name_entry(const char *name)
+{
+	return __lookup_name_entry(&running_polstate, name);
+}
+
+static struct name_entry *
+lookup_name_entry_create(const char *name)
+{
+	unsigned int len = strlen(name);
+	unsigned int key = full_name_hash(name, len);
+	unsigned int index = key % running_polstate.name_set.n_size;
+	struct name_entry *match;
+
+	match = running_polstate.name_set.n_hash[index];
+
+	while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)
||
+			 !match->deleted))
+		match = match->next;
+
+	if (match && match->deleted)
+		return match;
+
+	match = running_polstate.name_set.n_hash[index];
+
+	while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)
||
+			 match->deleted))
+		match = match->next;
+
+	if (match && !match->deleted)
+		return match;
+	else
+		return NULL;
+}
+
+static struct inodev_entry *
+lookup_inodev_entry(const ino_t ino, const dev_t dev)
+{
+	unsigned int index = gr_fhash(ino, dev, running_polstate.inodev_set.i_size);
+	struct inodev_entry *match;
+
+	match = running_polstate.inodev_set.i_hash[index];
+
+	while (match && (match->nentry->inode != ino || match->nentry->device != dev))
+		match = match->next;
+
+	return match;
+}
+
+void
+__insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry)
+{
+	unsigned int index = gr_fhash(entry->nentry->inode, entry->nentry->device,
+				    state->inodev_set.i_size);
+	struct inodev_entry **curr;
+
+	entry->prev = NULL;
+
+	curr = &state->inodev_set.i_hash[index];
+	if (*curr != NULL)
+		(*curr)->prev = entry;
+	
+	entry->next = *curr;
+	*curr = entry;
+
+	return;
+}
+
+static void
+insert_inodev_entry(struct inodev_entry *entry)
+{
+	__insert_inodev_entry(&running_polstate, entry);
+}
+
+void
+insert_acl_obj_label(struct acl_object_label *obj,
+		     struct acl_subject_label *subj)
+{
+	unsigned int index =
+	    gr_fhash(obj->inode, obj->device, subj->obj_hash_size);
+	struct acl_object_label **curr;
+
+	obj->prev = NULL;
+
+	curr = &subj->obj_hash[index];
+	if (*curr != NULL)
+		(*curr)->prev = obj;
+
+	obj->next = *curr;
+	*curr = obj;
+
+	return;
+}
+
+void
+insert_acl_subj_label(struct acl_subject_label *obj,
+		      struct acl_role_label *role)
+{
+	unsigned int index = gr_fhash(obj->inode, obj->device, role->subj_hash_size);
+	struct acl_subject_label **curr;
+
+	obj->prev = NULL;
+
+	curr = &role->subj_hash[index];
+	if (*curr != NULL)
+		(*curr)->prev = obj;
+
+	obj->next = *curr;
+	*curr = obj;
+
+	return;
+}
+
+/* derived from glibc fnmatch() 0: match, 1: no match*/
+
+static int
+glob_match(const char *p, const char *n)
+{
+	char c;
+
+	while ((c = *p++) != '\0') {
+	switch (c) {
+		case '?':
+			if (*n == '\0')
+				return 1;
+			else if (*n == '/')
+				return 1;
+			break;
+		case '\\':
+			if (*n != c)
+				return 1;
+			break;
+		case '*':
+			for (c = *p++; c == '?' || c == '*'; c = *p++) {
+				if (*n == '/')
+					return 1;
+				else if (c == '?') {
+					if (*n == '\0')
+						return 1;
+					else
+						++n;
+				}
+			}
+			if (c == '\0') {
+				return 0;
+			} else {
+				const char *endp;
+
+				if ((endp = strchr(n, '/')) == NULL)
+					endp = n + strlen(n);
+
+				if (c == '[') {
+					for (--p; n < endp; ++n)
+						if (!glob_match(p, n))
+							return 0;
+				} else if (c == '/') {
+					while (*n != '\0' && *n != '/')
+						++n;
+					if (*n == '/' && !glob_match(p, n + 1))
+						return 0;
+				} else {
+					for (--p; n < endp; ++n)
+						if (*n == c && !glob_match(p, n))
+							return 0;
+				}
+
+				return 1;
+			}
+		case '[':
+			{
+			int not;
+			char cold;
+
+			if (*n == '\0' || *n == '/')
+				return 1;
+
+			not = (*p == '!' || *p == '^');
+			if (not)
+				++p;
+
+			c = *p++;
+			for (;;) {
+				unsigned char fn = (unsigned char)*n;
+
+				if (c == '\0')
+					return 1;
+				else {
+					if (c == fn)
+						goto matched;
+					cold = c;
+					c = *p++;
+
+					if (c == '-' && *p != ']') {
+						unsigned char cend = *p++;
+
+						if (cend == '\0')
+							return 1;
+
+						if (cold <= fn && fn <= cend)
+							goto matched;
+
+						c = *p++;
+					}
+				}
+
+				if (c == ']')
+					break;
+			}
+			if (!not)
+				return 1;
+			break;
+		matched:
+			while (c != ']') {
+				if (c == '\0')
+					return 1;
+
+				c = *p++;
+			}
+			if (not)
+				return 1;
+		}
+		break;
+	default:
+		if (c != *n)
+			return 1;
+	}
+
+	++n;
+	}
+
+	if (*n == '\0')
+		return 0;
+
+	if (*n == '/')
+		return 0;
+
+	return 1;
+}
+
+static struct acl_object_label *
+chk_glob_label(struct acl_object_label *globbed,
+	const struct dentry *dentry, const struct vfsmount *mnt, char **path)
+{
+	struct acl_object_label *tmp;
+
+	if (*path == NULL)
+		*path = gr_to_filename_nolock(dentry, mnt);
+
+	tmp = globbed;
+
+	while (tmp) {
+		if (!glob_match(tmp->filename, *path))
+			return tmp;
+		tmp = tmp->next;
+	}
+
+	return NULL;
+}
+
+static struct acl_object_label *
+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
+	    const ino_t curr_ino, const dev_t curr_dev,
+	    const struct acl_subject_label *subj, char **path, const int checkglob)
+{
+	struct acl_subject_label *tmpsubj;
+	struct acl_object_label *retval;
+	struct acl_object_label *retval2;
+
+	tmpsubj = (struct acl_subject_label *) subj;
+	read_lock(&gr_inode_lock);
+	do {
+		retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
+		if (retval) {
+			if (checkglob && retval->globbed) {
+				retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
+				if (retval2)
+					retval = retval2;
+			}
+			break;
+		}
+	} while ((tmpsubj = tmpsubj->parent_subject));
+	read_unlock(&gr_inode_lock);
+
+	return retval;
+}
+
+static __inline__ struct acl_object_label *
+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
+	    struct dentry *curr_dentry,
+	    const struct acl_subject_label *subj, char **path, const int checkglob)
+{
+	int newglob = checkglob;
+	ino_t inode;
+	dev_t device;
+
+	/* if we aren't checking a subdirectory of the original path yet, don't do glob checking
+	   as we don't want a / * rule to match instead of the / object
+	   don't do this for create lookups that call this function though, since they're
looking up
+	   on the parent and thus need globbing checks on all paths
+	*/
+	if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
+		newglob = GR_NO_GLOB;
+
+	spin_lock(&curr_dentry->d_lock);
+	inode = curr_dentry->d_inode->i_ino;
+	device = __get_dev(curr_dentry);
+	spin_unlock(&curr_dentry->d_lock);
+
+	return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
+}
+
+#ifdef CONFIG_HUGETLBFS
+static inline bool
+is_hugetlbfs_mnt(const struct vfsmount *mnt)
+{
+	int i;
+	for (i = 0; i < HUGE_MAX_HSTATE; i++) {
+		if (unlikely(hugetlbfs_vfsmount[i] == mnt))
+			return true;
+	}
+
+	return false;
+}
+#endif
+
+static struct acl_object_label *
+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
+	      const struct acl_subject_label *subj, char *path, const int checkglob)
+{
+	struct dentry *dentry = (struct dentry *) l_dentry;
+	struct vfsmount *mnt = (struct vfsmount *) l_mnt;
+	struct mount *real_mnt = real_mount(mnt);
+	struct acl_object_label *retval;
+	struct dentry *parent;
+
+	read_seqlock_excl(&mount_lock);
+	write_seqlock(&rename_lock);
+
+	if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt
||
+#ifdef CONFIG_NET
+	    mnt == sock_mnt ||
+#endif
+#ifdef CONFIG_HUGETLBFS
+	    (is_hugetlbfs_mnt(mnt) && dentry->d_inode->i_nlink == 0) ||
+#endif
+		/* ignore Eric Biederman */
+	    IS_PRIVATE(l_dentry->d_inode))) {
+		retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
+		goto out;
+	}
+
+	for (;;) {
+		if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
+			break;
+
+		if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
+			if (!mnt_has_parent(real_mnt))
+				break;
+
+			retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
+			if (retval != NULL)
+				goto out;
+
+			dentry = real_mnt->mnt_mountpoint;
+			real_mnt = real_mnt->mnt_parent;
+			mnt = &real_mnt->mnt;
+			continue;
+		}
+
+		parent = dentry->d_parent;
+		retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
+		if (retval != NULL)
+			goto out;
+
+		dentry = parent;
+	}
+
+	retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
+
+	/* gr_real_root is pinned so we don't have to hold a reference */
+	if (retval == NULL)
+		retval = full_lookup(l_dentry, l_mnt, gr_real_root.dentry, subj, &path, checkglob);
+out:
+	write_sequnlock(&rename_lock);
+	read_sequnlock_excl(&mount_lock);
+
+	BUG_ON(retval == NULL);
+
+	return retval;
+}
+
+static __inline__ struct acl_object_label *
+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
+	      const struct acl_subject_label *subj)
+{
+	char *path = NULL;
+	return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
+}
+
+static __inline__ struct acl_object_label *
+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
+	      const struct acl_subject_label *subj)
+{
+	char *path = NULL;
+	return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
+}
+
+static __inline__ struct acl_object_label *
+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
+		     const struct acl_subject_label *subj, char *path)
+{
+	return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
+}
+
+struct acl_subject_label *
+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
+	       const struct acl_role_label *role)
+{
+	struct dentry *dentry = (struct dentry *) l_dentry;
+	struct vfsmount *mnt = (struct vfsmount *) l_mnt;
+	struct mount *real_mnt = real_mount(mnt);
+	struct acl_subject_label *retval;
+	struct dentry *parent;
+
+	read_seqlock_excl(&mount_lock);
+	write_seqlock(&rename_lock);
+
+	for (;;) {
+		if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
+			break;
+		if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
+			if (!mnt_has_parent(real_mnt))
+				break;
+
+			spin_lock(&dentry->d_lock);
+			read_lock(&gr_inode_lock);
+			retval =
+				lookup_acl_subj_label(dentry->d_inode->i_ino,
+						__get_dev(dentry), role);
+			read_unlock(&gr_inode_lock);
+			spin_unlock(&dentry->d_lock);
+			if (retval != NULL)
+				goto out;
+
+			dentry = real_mnt->mnt_mountpoint;
+			real_mnt = real_mnt->mnt_parent;
+			mnt = &real_mnt->mnt;
+			continue;
+		}
+
+		spin_lock(&dentry->d_lock);
+		read_lock(&gr_inode_lock);
+		retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
+					  __get_dev(dentry), role);
+		read_unlock(&gr_inode_lock);
+		parent = dentry->d_parent;
+		spin_unlock(&dentry->d_lock);
+
+		if (retval != NULL)
+			goto out;
+
+		dentry = parent;
+	}
+
+	spin_lock(&dentry->d_lock);
+	read_lock(&gr_inode_lock);
+	retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
+				  __get_dev(dentry), role);
+	read_unlock(&gr_inode_lock);
+	spin_unlock(&dentry->d_lock);
+
+	if (unlikely(retval == NULL)) {
+		/* gr_real_root is pinned, we don't need to hold a reference */
+		read_lock(&gr_inode_lock);
+		retval = lookup_acl_subj_label(gr_real_root.dentry->d_inode->i_ino,
+					  __get_dev(gr_real_root.dentry), role);
+		read_unlock(&gr_inode_lock);
+	}
+out:
+	write_sequnlock(&rename_lock);
+	read_sequnlock_excl(&mount_lock);
+
+	BUG_ON(retval == NULL);
+
+	return retval;
+}
+
+void
+assign_special_role(const char *rolename)
+{
+	struct acl_object_label *obj;
+	struct acl_role_label *r;
+	struct acl_role_label *assigned = NULL;
+	struct task_struct *tsk;
+	struct file *filp;
+
+	FOR_EACH_ROLE_START(r)
+		if (!strcmp(rolename, r->rolename) &&
+		    (r->roletype & GR_ROLE_SPECIAL)) {
+			assigned = r;
+			break;
+		}
+	FOR_EACH_ROLE_END(r)
+
+	if (!assigned)
+		return;
+
+	read_lock(&tasklist_lock);
+	read_lock(&grsec_exec_file_lock);
+
+	tsk = current->real_parent;
+	if (tsk == NULL)
+		goto out_unlock;
+
+	filp = tsk->exec_file;
+	if (filp == NULL)
+		goto out_unlock;
+
+	tsk->is_writable = 0;
+	tsk->inherited = 0;
+
+	tsk->acl_sp_role = 1;
+	tsk->acl_role_id = ++acl_sp_role_value;
+	tsk->role = assigned;
+	tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
+
+	/* ignore additional mmap checks for processes that are writable
+	   by the default ACL */
+	obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
+	if (unlikely(obj->mode & GR_WRITE))
+		tsk->is_writable = 1;
+	obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
+	if (unlikely(obj->mode & GR_WRITE))
+		tsk->is_writable = 1;
+
+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
+	printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename,
+			tsk->acl->filename, tsk->comm, task_pid_nr(tsk));
+#endif
+
+out_unlock:
+	read_unlock(&grsec_exec_file_lock);
+	read_unlock(&tasklist_lock);
+	return;
+}
+
+
+static void
+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
+{
+	struct task_struct *task = current;
+	const struct cred *cred = current_cred();
+
+	security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
+		       GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
+		       task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
+		       1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
+
+	return;
+}
+
+static void
+gr_log_learn_uid_change(const kuid_t real, const kuid_t effective, const kuid_t fs)
+{
+	struct task_struct *task = current;
+	const struct cred *cred = current_cred();
+
+	security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
+		       GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
+		       task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
+		       'u', GR_GLOBAL_UID(real), GR_GLOBAL_UID(effective), GR_GLOBAL_UID(fs), &task->signal->saved_ip);
+
+	return;
+}
+
+static void
+gr_log_learn_gid_change(const kgid_t real, const kgid_t effective, const kgid_t fs)
+{
+	struct task_struct *task = current;
+	const struct cred *cred = current_cred();
+
+	security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
+		       GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
+		       task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
+		       'g', GR_GLOBAL_GID(real), GR_GLOBAL_GID(effective), GR_GLOBAL_GID(fs), &task->signal->saved_ip);
+
+	return;
+}
+
+static void
+gr_set_proc_res(struct task_struct *task)
+{
+	struct acl_subject_label *proc;
+	unsigned short i;
+
+	proc = task->acl;
+
+	if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
+		return;
+
+	for (i = 0; i < RLIM_NLIMITS; i++) {
+		if (!(proc->resmask & (1U << i)))
+			continue;
+
+		task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
+		task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
+
+		if (i == RLIMIT_CPU)
+			update_rlimit_cpu(task, proc->res[i].rlim_cur);
+	}
+
+	return;
+}
+
+/* both of the below must be called with
+	rcu_read_lock();
+	read_lock(&tasklist_lock);
+	read_lock(&grsec_exec_file_lock);
+*/
+
+struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state,
struct task_struct *task, const char *filename)
+{
+	char *tmpname;
+	struct acl_subject_label *tmpsubj;
+	struct file *filp;
+	struct name_entry *nmatch;
+
+	filp = task->exec_file;
+	if (filp == NULL)
+		return NULL;
+
+	/* the following is to apply the correct subject
+	   on binaries running when the RBAC system
+	   is enabled, when the binaries have been
+	   replaced or deleted since their execution
+	   -----
+	   when the RBAC system starts, the inode/dev
+	   from exec_file will be one the RBAC system
+	   is unaware of.  It only knows the inode/dev
+	   of the present file on disk, or the absence
+	   of it.
+	*/
+
+	if (filename)
+		nmatch = __lookup_name_entry(state, filename);
+	else {
+		preempt_disable();
+		tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
+
+		nmatch = __lookup_name_entry(state, tmpname);
+		preempt_enable();
+	}
+	tmpsubj = NULL;
+	if (nmatch) {
+		if (nmatch->deleted)
+			tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
+		else
+			tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
+	}
+	/* this also works for the reload case -- if we don't match a potentially inherited
subject
+	   then we fall back to a normal lookup based on the binary's ino/dev
+	*/
+	if (tmpsubj == NULL)
+		tmpsubj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, task->role);
+
+	return tmpsubj;
+}
+
+static struct acl_subject_label *gr_get_subject_for_task(struct task_struct *task,
const char *filename)
+{
+	return __gr_get_subject_for_task(&running_polstate, task, filename);
+}
+
+void __gr_apply_subject_to_task(const struct gr_policy_state *state, struct task_struct
*task, struct acl_subject_label *subj)
+{
+	struct acl_object_label *obj;
+	struct file *filp;
+
+	filp = task->exec_file;
+
+	task->acl = subj;
+	task->is_writable = 0;
+	/* ignore additional mmap checks for processes that are writable 
+	   by the default ACL */
+	obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, state->default_role->root_label);
+	if (unlikely(obj->mode & GR_WRITE))
+		task->is_writable = 1;
+	obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
+	if (unlikely(obj->mode & GR_WRITE))
+		task->is_writable = 1;
+
+	gr_set_proc_res(task);
+
+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
+	printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task),
task->role->rolename, task->acl->filename);
+#endif
+}
+
+static void gr_apply_subject_to_task(struct task_struct *task, struct acl_subject_label
*subj)
+{
+	__gr_apply_subject_to_task(&running_polstate, task, subj);
+}
+
+__u32
+gr_search_file(const struct dentry * dentry, const __u32 mode,
+	       const struct vfsmount * mnt)
+{
+	__u32 retval = mode;
+	struct acl_subject_label *curracl;
+	struct acl_object_label *currobj;
+
+	if (unlikely(!(gr_status & GR_READY)))
+		return (mode & ~GR_AUDITS);
+
+	curracl = current->acl;
+
+	currobj = chk_obj_label(dentry, mnt, curracl);
+	retval = currobj->mode & mode;
+
+	/* if we're opening a specified transfer file for writing
+	   (e.g. /dev/initctl), then transfer our role to init
+	*/
+	if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
+		     current->role->roletype & GR_ROLE_PERSIST)) {
+		struct task_struct *task = init_pid_ns.child_reaper;
+
+		if (task->role != current->role) {
+			struct acl_subject_label *subj;
+
+			task->acl_sp_role = 0;
+			task->acl_role_id = current->acl_role_id;
+			task->role = current->role;
+			rcu_read_lock();
+			read_lock(&grsec_exec_file_lock);
+			subj = gr_get_subject_for_task(task, NULL);
+			gr_apply_subject_to_task(task, subj);
+			read_unlock(&grsec_exec_file_lock);
+			rcu_read_unlock();
+			gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
+		}
+	}
+
+	if (unlikely
+	    ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
+	     && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
+		__u32 new_mode = mode;
+
+		new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
+
+		retval = new_mode;
+
+		if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
+			new_mode |= GR_INHERIT;
+
+		if (!(mode & GR_NOLEARN))
+			gr_log_learn(dentry, mnt, new_mode);
+	}
+
+	return retval;
+}
+
+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
+					      const struct dentry *parent,
+					      const struct vfsmount *mnt)
+{
+	struct name_entry *match;
+	struct acl_object_label *matchpo;
+	struct acl_subject_label *curracl;
+	char *path;
+
+	if (unlikely(!(gr_status & GR_READY)))
+		return NULL;
+
+	preempt_disable();
+	path = gr_to_filename_rbac(new_dentry, mnt);
+	match = lookup_name_entry_create(path);
+
+	curracl = current->acl;
+
+	if (match) {
+		read_lock(&gr_inode_lock);
+		matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
+		read_unlock(&gr_inode_lock);
+
+		if (matchpo) {
+			preempt_enable();
+			return matchpo;
+		}
+	}
+
+	// lookup parent
+
+	matchpo = chk_obj_create_label(parent, mnt, curracl, path);
+
+	preempt_enable();
+	return matchpo;
+}
+
+__u32
+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
+		const struct vfsmount * mnt, const __u32 mode)
+{
+	struct acl_object_label *matchpo;
+	__u32 retval;
+
+	if (unlikely(!(gr_status & GR_READY)))
+		return (mode & ~GR_AUDITS);
+
+	matchpo = gr_get_create_object(new_dentry, parent, mnt);
+
+	retval = matchpo->mode & mode;
+
+	if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
+	    && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
+		__u32 new_mode = mode;
+
+		new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
+
+		gr_log_learn(new_dentry, mnt, new_mode);
+		return new_mode;
+	}
+
+	return retval;
+}
+
+__u32
+gr_check_link(const struct dentry * new_dentry,
+	      const struct dentry * parent_dentry,
+	      const struct vfsmount * parent_mnt,
+	      const struct dentry * old_dentry, const struct vfsmount * old_mnt)
+{
+	struct acl_object_label *obj;
+	__u32 oldmode, newmode;
+	__u32 needmode;
+	__u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
+			   GR_DELETE | GR_INHERIT;
+
+	if (unlikely(!(gr_status & GR_READY)))
+		return (GR_CREATE | GR_LINK);
+
+	obj = chk_obj_label(old_dentry, old_mnt, current->acl);
+	oldmode = obj->mode;
+
+	obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
+	newmode = obj->mode;
+
+	needmode = newmode & checkmodes;
+
+	// old name for hardlink must have at least the permissions of the new name
+	if ((oldmode & needmode) != needmode)
+		goto bad;
+
+	// if old name had restrictions/auditing, make sure the new name does as well
+	needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
+
+	// don't allow hardlinking of suid/sgid/fcapped files without permission
+	if (is_privileged_binary(old_dentry))
+		needmode |= GR_SETID;
+
+	if ((newmode & needmode) != needmode)
+		goto bad;
+
+	// enforce minimum permissions
+	if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
+		return newmode;
+bad:
+	needmode = oldmode;
+	if (is_privileged_binary(old_dentry))
+		needmode |= GR_SETID;
+	
+	if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
+		gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
+		return (GR_CREATE | GR_LINK);
+	} else if (newmode & GR_SUPPRESS)
+		return GR_SUPPRESS;
+	else
+		return 0;
+}
+
+int
+gr_check_hidden_task(const struct task_struct *task)
+{
+	if (unlikely(!(gr_status & GR_READY)))
+		return 0;
+
+	if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
+		return 1;
+
+	return 0;
+}
+
+int
+gr_check_protected_task(const struct task_struct *task)
+{
+	if (unlikely(!(gr_status & GR_READY) || !task))
+		return 0;
+
+	if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
+	    task->acl != current->acl)
+		return 1;
+
+	return 0;
+}
+
+int
+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
+{
+	struct task_struct *p;
+	int ret = 0;
+
+	if (unlikely(!(gr_status & GR_READY) || !pid))
+		return ret;
+
+	read_lock(&tasklist_lock);
+	do_each_pid_task(pid, type, p) {
+		if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
+		    p->acl != current->acl) {
+			ret = 1;
+			goto out;
+		}
+	} while_each_pid_task(pid, type, p);
+out:
+	read_unlock(&tasklist_lock);
+
+	return ret;
+}
+
+void
+gr_copy_label(struct task_struct *tsk)
+{
+	struct task_struct *p = current;
+
+	tsk->inherited = p->inherited;
+	tsk->acl_sp_role = 0;
+	tsk->acl_role_id = p->acl_role_id;
+	tsk->acl = p->acl;
+	tsk->role = p->role;
+	tsk->signal->used_accept = 0;
+	tsk->signal->curr_ip = p->signal->curr_ip;
+	tsk->signal->saved_ip = p->signal->saved_ip;
+	if (p->exec_file)
+		get_file(p->exec_file);
+	tsk->exec_file = p->exec_file;
+	tsk->is_writable = p->is_writable;
+	if (unlikely(p->signal->used_accept)) {
+		p->signal->curr_ip = 0;
+		p->signal->saved_ip = 0;
+	}
+
+	return;
+}
+
+extern int gr_process_kernel_setuid_ban(struct user_struct *user);
+
+int
+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
+{
+	unsigned int i;
+	__u16 num;
+	uid_t *uidlist;
+	uid_t curuid;
+	int realok = 0;
+	int effectiveok = 0;
+	int fsok = 0;
+	uid_t globalreal, globaleffective, globalfs;
+
+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT)
+	struct user_struct *user;
+
+	if (!uid_valid(real))
+		goto skipit;
+
+	/* find user based on global namespace */
+
+	globalreal = GR_GLOBAL_UID(real);
+
+	user = find_user(make_kuid(&init_user_ns, globalreal));
+	if (user == NULL)
+		goto skipit;
+
+	if (gr_process_kernel_setuid_ban(user)) {
+		/* for find_user */
+		free_uid(user);
+		return 1;
+	}
+
+	/* for find_user */
+	free_uid(user);
+
+skipit:
+#endif
+
+	if (unlikely(!(gr_status & GR_READY)))
+		return 0;
+
+	if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
+		gr_log_learn_uid_change(real, effective, fs);
+
+	num = current->acl->user_trans_num;
+	uidlist = current->acl->user_transitions;
+
+	if (uidlist == NULL)
+		return 0;
+
+	if (!uid_valid(real)) {
+		realok = 1;
+		globalreal = (uid_t)-1;		
+	} else {
+		globalreal = GR_GLOBAL_UID(real);		
+	}
+	if (!uid_valid(effective)) {
+		effectiveok = 1;
+		globaleffective = (uid_t)-1;
+	} else {
+		globaleffective = GR_GLOBAL_UID(effective);
+	}
+	if (!uid_valid(fs)) {
+		fsok = 1;
+		globalfs = (uid_t)-1;
+	} else {
+		globalfs = GR_GLOBAL_UID(fs);
+	}
+
+	if (current->acl->user_trans_type & GR_ID_ALLOW) {
+		for (i = 0; i < num; i++) {
+			curuid = uidlist[i];
+			if (globalreal == curuid)
+				realok = 1;
+			if (globaleffective == curuid)
+				effectiveok = 1;
+			if (globalfs == curuid)
+				fsok = 1;
+		}
+	} else if (current->acl->user_trans_type & GR_ID_DENY) {
+		for (i = 0; i < num; i++) {
+			curuid = uidlist[i];
+			if (globalreal == curuid)
+				break;
+			if (globaleffective == curuid)
+				break;
+			if (globalfs == curuid)
+				break;
+		}
+		/* not in deny list */
+		if (i == num) {
+			realok = 1;
+			effectiveok = 1;
+			fsok = 1;
+		}
+	}
+
+	if (realok && effectiveok && fsok)
+		return 0;
+	else {
+		gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0
: globalfs) : globaleffective) : globalreal);
+		return 1;
+	}
+}
+
+int
+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
+{
+	unsigned int i;
+	__u16 num;
+	gid_t *gidlist;
+	gid_t curgid;
+	int realok = 0;
+	int effectiveok = 0;
+	int fsok = 0;
+	gid_t globalreal, globaleffective, globalfs;
+
+	if (unlikely(!(gr_status & GR_READY)))
+		return 0;
+
+	if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
+		gr_log_learn_gid_change(real, effective, fs);
+
+	num = current->acl->group_trans_num;
+	gidlist = current->acl->group_transitions;
+
+	if (gidlist == NULL)
+		return 0;
+
+	if (!gid_valid(real)) {
+		realok = 1;
+		globalreal = (gid_t)-1;		
+	} else {
+		globalreal = GR_GLOBAL_GID(real);
+	}
+	if (!gid_valid(effective)) {
+		effectiveok = 1;
+		globaleffective = (gid_t)-1;		
+	} else {
+		globaleffective = GR_GLOBAL_GID(effective);
+	}
+	if (!gid_valid(fs)) {
+		fsok = 1;
+		globalfs = (gid_t)-1;		
+	} else {
+		globalfs = GR_GLOBAL_GID(fs);
+	}
+
+	if (current->acl->group_trans_type & GR_ID_ALLOW) {
+		for (i = 0; i < num; i++) {
+			curgid = gidlist[i];
+			if (globalreal == curgid)
+				realok = 1;
+			if (globaleffective == curgid)
+				effectiveok = 1;
+			if (globalfs == curgid)
+				fsok = 1;
+		}
+	} else if (current->acl->group_trans_type & GR_ID_DENY) {
+		for (i = 0; i < num; i++) {
+			curgid = gidlist[i];
+			if (globalreal == curgid)
+				break;
+			if (globaleffective == curgid)
+				break;
+			if (globalfs == curgid)
+				break;
+		}
+		/* not in deny list */
+		if (i == num) {
+			realok = 1;
+			effectiveok = 1;
+			fsok = 1;
+		}
+	}
+
+	if (realok && effectiveok && fsok)
+		return 0;
+	else {
+		gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0
: globalfs) : globaleffective) : globalreal);
+		return 1;
+	}
+}
+
+extern int gr_acl_is_capable(const int cap);
+
+void
+gr_set_role_label(struct task_struct *task, const kuid_t kuid, const kgid_t kgid)
+{
+	struct acl_role_label *role = task->role;
+	struct acl_subject_label *subj = NULL;
+	struct acl_object_label *obj;
+	struct file *filp;
+	uid_t uid;
+	gid_t gid;
+
+	if (unlikely(!(gr_status & GR_READY)))
+		return;
+
+	uid = GR_GLOBAL_UID(kuid);
+	gid = GR_GLOBAL_GID(kgid);
+
+	filp = task->exec_file;
+
+	/* kernel process, we'll give them the kernel role */
+	if (unlikely(!filp)) {
+		task->role = running_polstate.kernel_role;
+		task->acl = running_polstate.kernel_role->root_label;
+		return;
+	} else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL)) {
+		/* save the current ip at time of role lookup so that the proper
+		   IP will be learned for role_allowed_ip */
+		task->signal->saved_ip = task->signal->curr_ip;
+		role = lookup_acl_role_label(task, uid, gid);
+	}
+
+	/* don't change the role if we're not a privileged process */
+	if (role && task->role != role &&
+	    (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
+	     ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
+		return;
+
+	/* perform subject lookup in possibly new role
+	   we can use this result below in the case where role == task->role
+	*/
+	subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
+
+	/* if we changed uid/gid, but result in the same role
+	   and are using inheritance, don't lose the inherited subject
+	   if current subject is other than what normal lookup
+	   would result in, we arrived via inheritance, don't
+	   lose subject
+	*/
+	if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
+				   (subj == task->acl)))
+		task->acl = subj;
+
+	/* leave task->inherited unaffected */
+
+	task->role = role;
+
+	task->is_writable = 0;
+
+	/* ignore additional mmap checks for processes that are writable 
+	   by the default ACL */
+	obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
+	if (unlikely(obj->mode & GR_WRITE))
+		task->is_writable = 1;
+	obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
+	if (unlikely(obj->mode & GR_WRITE))
+		task->is_writable = 1;
+
+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
+	printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm,
task_pid_nr(task), task->role->rolename, task->acl->filename);
+#endif
+
+	gr_set_proc_res(task);
+
+	return;
+}
+
+int
+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
+		  const int unsafe_flags)
+{
+	struct task_struct *task = current;
+	struct acl_subject_label *newacl;
+	struct acl_object_label *obj;
+	__u32 retmode;
+
+	if (unlikely(!(gr_status & GR_READY)))
+		return 0;
+
+	newacl = chk_subj_label(dentry, mnt, task->role);
+
+	/* special handling for if we did an strace -f -p <pid> from an admin role, where
pid then
+	   did an exec
+	*/
+	rcu_read_lock();
+	read_lock(&tasklist_lock);
+	if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD)
||
+	    (task->parent->acl->mode & GR_POVERRIDE))) {
+		read_unlock(&tasklist_lock);
+		rcu_read_unlock();
+		goto skip_check;
+	}
+	read_unlock(&tasklist_lock);
+	rcu_read_unlock();
+
+	if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
+	     !(task->role->roletype & GR_ROLE_GOD) &&
+	     !gr_search_file(dentry, GR_PTRACERD, mnt) &&
+	     !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
+		if (unsafe_flags & LSM_UNSAFE_SHARE)
+			gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
+		else
+			gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
+		return -EACCES;
+	}
+
+skip_check:
+
+	obj = chk_obj_label(dentry, mnt, task->acl);
+	retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
+
+	if (!(task->acl->mode & GR_INHERITLEARN) &&
+	    ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
+		if (obj->nested)
+			task->acl = obj->nested;
+		else
+			task->acl = newacl;
+		task->inherited = 0;
+	} else {
+		task->inherited = 1;
+		if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
+			gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
+	}
+
+	task->is_writable = 0;
+
+	/* ignore additional mmap checks for processes that are writable 
+	   by the default ACL */
+	obj = chk_obj_label(dentry, mnt, running_polstate.default_role->root_label);
+	if (unlikely(obj->mode & GR_WRITE))
+		task->is_writable = 1;
+	obj = chk_obj_label(dentry, mnt, task->role->root_label);
+	if (unlikely(obj->mode & GR_WRITE))
+		task->is_writable = 1;
+
+	gr_set_proc_res(task);
+
+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
+	printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm,
task_pid_nr(task), task->role->rolename, task->acl->filename);
+#endif
+	return 0;
+}
+
+/* always called with valid inodev ptr */
+static void
+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
+{
+	struct acl_object_label *matchpo;
+	struct acl_subject_label *matchps;
+	struct acl_subject_label *subj;
+	struct acl_role_label *role;
+	unsigned int x;
+
+	FOR_EACH_ROLE_START(role)
+		FOR_EACH_SUBJECT_START(role, subj, x)
+			if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
+				matchpo->mode |= GR_DELETED;
+		FOR_EACH_SUBJECT_END(subj,x)
+		FOR_EACH_NESTED_SUBJECT_START(role, subj)
+			/* nested subjects aren't in the role's subj_hash table */
+			if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
+				matchpo->mode |= GR_DELETED;
+		FOR_EACH_NESTED_SUBJECT_END(subj)
+		if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
+			matchps->mode |= GR_DELETED;
+	FOR_EACH_ROLE_END(role)
+
+	inodev->nentry->deleted = 1;
+
+	return;
+}
+
+void
+gr_handle_delete(const ino_t ino, const dev_t dev)
+{
+	struct inodev_entry *inodev;
+
+	if (unlikely(!(gr_status & GR_READY)))
+		return;
+
+	write_lock(&gr_inode_lock);
+	inodev = lookup_inodev_entry(ino, dev);
+	if (inodev != NULL)
+		do_handle_delete(inodev, ino, dev);
+	write_unlock(&gr_inode_lock);
+
+	return;
+}
+
+static void
+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
+		     const ino_t newinode, const dev_t newdevice,
+		     struct acl_subject_label *subj)
+{
+	unsigned int index = gr_fhash(oldinode, olddevice, subj->obj_hash_size);
+	struct acl_object_label *match;
+
+	match = subj->obj_hash[index];
+
+	while (match && (match->inode != oldinode ||
+	       match->device != olddevice ||
+	       !(match->mode & GR_DELETED)))
+		match = match->next;
+
+	if (match && (match->inode == oldinode)
+	    && (match->device == olddevice)
+	    && (match->mode & GR_DELETED)) {
+		if (match->prev == NULL) {
+			subj->obj_hash[index] = match->next;
+			if (match->next != NULL)
+				match->next->prev = NULL;
+		} else {
+			match->prev->next = match->next;
+			if (match->next != NULL)
+				match->next->prev = match->prev;
+		}
+		match->prev = NULL;
+		match->next = NULL;
+		match->inode = newinode;
+		match->device = newdevice;
+		match->mode &= ~GR_DELETED;
+
+		insert_acl_obj_label(match, subj);
+	}
+
+	return;
+}
+
+static void
+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
+		      const ino_t newinode, const dev_t newdevice,
+		      struct acl_role_label *role)
+{
+	unsigned int index = gr_fhash(oldinode, olddevice, role->subj_hash_size);
+	struct acl_subject_label *match;
+
+	match = role->subj_hash[index];
+
+	while (match && (match->inode != oldinode ||
+	       match->device != olddevice ||
+	       !(match->mode & GR_DELETED)))
+		match = match->next;
+
+	if (match && (match->inode == oldinode)
+	    && (match->device == olddevice)
+	    && (match->mode & GR_DELETED)) {
+		if (match->prev == NULL) {
+			role->subj_hash[index] = match->next;
+			if (match->next != NULL)
+				match->next->prev = NULL;
+		} else {
+			match->prev->next = match->next;
+			if (match->next != NULL)
+				match->next->prev = match->prev;
+		}
+		match->prev = NULL;
+		match->next = NULL;
+		match->inode = newinode;
+		match->device = newdevice;
+		match->mode &= ~GR_DELETED;
+
+		insert_acl_subj_label(match, role);
+	}
+
+	return;
+}
+
+static void
+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
+		    const ino_t newinode, const dev_t newdevice)
+{
+	unsigned int index = gr_fhash(oldinode, olddevice, running_polstate.inodev_set.i_size);
+	struct inodev_entry *match;
+
+	match = running_polstate.inodev_set.i_hash[index];
+
+	while (match && (match->nentry->inode != oldinode ||
+	       match->nentry->device != olddevice || !match->nentry->deleted))
+		match = match->next;
+
+	if (match && (match->nentry->inode == oldinode)
+	    && (match->nentry->device == olddevice) &&
+	    match->nentry->deleted) {
+		if (match->prev == NULL) {
+			running_polstate.inodev_set.i_hash[index] = match->next;
+			if (match->next != NULL)
+				match->next->prev = NULL;
+		} else {
+			match->prev->next = match->next;
+			if (match->next != NULL)
+				match->next->prev = match->prev;
+		}
+		match->prev = NULL;
+		match->next = NULL;
+		match->nentry->inode = newinode;
+		match->nentry->device = newdevice;
+		match->nentry->deleted = 0;
+
+		insert_inodev_entry(match);
+	}
+
+	return;
+}
+
+static void
+__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
+{
+	struct acl_subject_label *subj;
+	struct acl_role_label *role;
+	unsigned int x;
+
+	FOR_EACH_ROLE_START(role)
+		update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
+
+		FOR_EACH_NESTED_SUBJECT_START(role, subj)
+			if ((subj->inode == ino) && (subj->device == dev)) {
+				subj->inode = ino;
+				subj->device = dev;
+			}
+			/* nested subjects aren't in the role's subj_hash table */
+			update_acl_obj_label(matchn->inode, matchn->device,
+					     ino, dev, subj);
+		FOR_EACH_NESTED_SUBJECT_END(subj)
+		FOR_EACH_SUBJECT_START(role, subj, x)
+			update_acl_obj_label(matchn->inode, matchn->device,
+					     ino, dev, subj);
+		FOR_EACH_SUBJECT_END(subj,x)
+	FOR_EACH_ROLE_END(role)
+
+	update_inodev_entry(matchn->inode, matchn->device, ino, dev);
+
+	return;
+}
+
+static void
+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
+		 const struct vfsmount *mnt)
+{
+	ino_t ino = dentry->d_inode->i_ino;
+	dev_t dev = __get_dev(dentry);
+
+	__do_handle_create(matchn, ino, dev);	
+
+	return;
+}
+
+void
+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
+{
+	struct name_entry *matchn;
+
+	if (unlikely(!(gr_status & GR_READY)))
+		return;
+
+	preempt_disable();
+	matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
+
+	if (unlikely((unsigned long)matchn)) {
+		write_lock(&gr_inode_lock);
+		do_handle_create(matchn, dentry, mnt);
+		write_unlock(&gr_inode_lock);
+	}
+	preempt_enable();
+
+	return;
+}
+
+void
+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
+{
+	struct name_entry *matchn;
+
+	if (unlikely(!(gr_status & GR_READY)))
+		return;
+
+	preempt_disable();
+	matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
+
+	if (unlikely((unsigned long)matchn)) {
+		write_lock(&gr_inode_lock);
+		__do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
+		write_unlock(&gr_inode_lock);
+	}
+	preempt_enable();
+
+	return;
+}
+
+void
+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
+		 struct dentry *old_dentry,
+		 struct dentry *new_dentry,
+		 struct vfsmount *mnt, const __u8 replace)
+{
+	struct name_entry *matchn;
+	struct inodev_entry *inodev;
+	struct inode *inode = new_dentry->d_inode;
+	ino_t old_ino = old_dentry->d_inode->i_ino;
+	dev_t old_dev = __get_dev(old_dentry);
+
+	/* vfs_rename swaps the name and parent link for old_dentry and
+	   new_dentry
+	   at this point, old_dentry has the new name, parent link, and inode
+	   for the renamed file
+	   if a file is being replaced by a rename, new_dentry has the inode
+	   and name for the replaced file
+	*/
+
+	if (unlikely(!(gr_status & GR_READY)))
+		return;
+
+	preempt_disable();
+	matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
+
+	/* we wouldn't have to check d_inode if it weren't for
+	   NFS silly-renaming
+	 */
+
+	write_lock(&gr_inode_lock);
+	if (unlikely(replace && inode)) {
+		ino_t new_ino = inode->i_ino;
+		dev_t new_dev = __get_dev(new_dentry);
+
+		inodev = lookup_inodev_entry(new_ino, new_dev);
+		if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
+			do_handle_delete(inodev, new_ino, new_dev);
+	}
+
+	inodev = lookup_inodev_entry(old_ino, old_dev);
+	if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
+		do_handle_delete(inodev, old_ino, old_dev);
+
+	if (unlikely((unsigned long)matchn))
+		do_handle_create(matchn, old_dentry, mnt);
+
+	write_unlock(&gr_inode_lock);
+	preempt_enable();
+
+	return;
+}
+
+#if defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC)
+static const unsigned long res_learn_bumps[GR_NLIMITS] = {
+	[RLIMIT_CPU] = GR_RLIM_CPU_BUMP,
+	[RLIMIT_FSIZE] = GR_RLIM_FSIZE_BUMP,
+	[RLIMIT_DATA] = GR_RLIM_DATA_BUMP,
+	[RLIMIT_STACK] = GR_RLIM_STACK_BUMP,
+	[RLIMIT_CORE] = GR_RLIM_CORE_BUMP,
+	[RLIMIT_RSS] = GR_RLIM_RSS_BUMP,
+	[RLIMIT_NPROC] = GR_RLIM_NPROC_BUMP,
+	[RLIMIT_NOFILE] = GR_RLIM_NOFILE_BUMP,
+	[RLIMIT_MEMLOCK] = GR_RLIM_MEMLOCK_BUMP,
+	[RLIMIT_AS] = GR_RLIM_AS_BUMP,
+	[RLIMIT_LOCKS] = GR_RLIM_LOCKS_BUMP,
+	[RLIMIT_SIGPENDING] = GR_RLIM_SIGPENDING_BUMP,
+	[RLIMIT_MSGQUEUE] = GR_RLIM_MSGQUEUE_BUMP,
+	[RLIMIT_NICE] = GR_RLIM_NICE_BUMP,
+	[RLIMIT_RTPRIO] = GR_RLIM_RTPRIO_BUMP,
+	[RLIMIT_RTTIME] = GR_RLIM_RTTIME_BUMP
+};
+
+void
+gr_learn_resource(const struct task_struct *task,
+		  const int res, const unsigned long wanted, const int gt)
+{
+	struct acl_subject_label *acl;
+	const struct cred *cred;
+
+	if (unlikely((gr_status & GR_READY) &&
+		     task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
+		goto skip_reslog;
+
+	gr_log_resource(task, res, wanted, gt);
+skip_reslog:
+
+	if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
+		return;
+
+	acl = task->acl;
+
+	if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
+		   !(acl->resmask & (1U << (unsigned short) res))))
+		return;
+
+	if (wanted >= acl->res[res].rlim_cur) {
+		unsigned long res_add;
+
+		res_add = wanted + res_learn_bumps[res];
+
+		acl->res[res].rlim_cur = res_add;
+
+		if (wanted > acl->res[res].rlim_max)
+			acl->res[res].rlim_max = res_add;
+
+		/* only log the subject filename, since resource logging is supported for
+		   single-subject learning only */
+		rcu_read_lock();
+		cred = __task_cred(task);
+		security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
+			       task->role->roletype, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid),
acl->filename,
+			       acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
+			       "", (unsigned long) res, &task->signal->saved_ip);
+		rcu_read_unlock();
+	}
+
+	return;
+}
+EXPORT_SYMBOL_GPL(gr_learn_resource);
+#endif
+
+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
+void
+pax_set_initial_flags(struct linux_binprm *bprm)
+{
+	struct task_struct *task = current;
+        struct acl_subject_label *proc;
+	unsigned long flags;
+
+        if (unlikely(!(gr_status & GR_READY)))
+                return;
+
+	flags = pax_get_flags(task);
+
+        proc = task->acl;
+
+	if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
+		flags &= ~MF_PAX_PAGEEXEC;
+	if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
+		flags &= ~MF_PAX_SEGMEXEC;
+	if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
+		flags &= ~MF_PAX_RANDMMAP;
+	if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
+		flags &= ~MF_PAX_EMUTRAMP;
+	if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
+		flags &= ~MF_PAX_MPROTECT;
+
+	if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
+		flags |= MF_PAX_PAGEEXEC;
+	if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
+		flags |= MF_PAX_SEGMEXEC;
+	if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
+		flags |= MF_PAX_RANDMMAP;
+	if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
+		flags |= MF_PAX_EMUTRAMP;
+	if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
+		flags |= MF_PAX_MPROTECT;
+
+	pax_set_flags(task, flags);
+
+        return;
+}
+#endif
+
+int
+gr_handle_proc_ptrace(struct task_struct *task)
+{
+	struct file *filp;
+	struct task_struct *tmp = task;
+	struct task_struct *curtemp = current;
+	__u32 retmode;
+
+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
+	if (unlikely(!(gr_status & GR_READY)))
+		return 0;
+#endif
+
+	read_lock(&tasklist_lock);
+	read_lock(&grsec_exec_file_lock);
+	filp = task->exec_file;
+
+	while (task_pid_nr(tmp) > 0) {
+		if (tmp == curtemp)
+			break;
+		tmp = tmp->real_parent;
+	}
+
+	if (!filp || (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid())
&& !(gr_status & GR_READY)) ||
+				((gr_status & GR_READY)	&& !(current->acl->mode & GR_RELAXPTRACE))))) {
+		read_unlock(&grsec_exec_file_lock);
+		read_unlock(&tasklist_lock);
+		return 1;
+	}
+
+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
+	if (!(gr_status & GR_READY)) {
+		read_unlock(&grsec_exec_file_lock);
+		read_unlock(&tasklist_lock);
+		return 0;
+	}
+#endif
+
+	retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
+	read_unlock(&grsec_exec_file_lock);
+	read_unlock(&tasklist_lock);
+
+	if (retmode & GR_NOPTRACE)
+		return 1;
+
+	if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
+	    && (current->acl != task->acl || (current->acl != current->role->root_label
+	    && task_pid_nr(current) != task_pid_nr(task))))
+		return 1;
+
+	return 0;
+}
+
+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
+{
+	if (unlikely(!(gr_status & GR_READY)))
+		return;
+
+	if (!(current->role->roletype & GR_ROLE_GOD))
+		return;
+
+	seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
+			p->role->rolename, gr_task_roletype_to_char(p),
+			p->acl->filename);
+}
+
+int
+gr_handle_ptrace(struct task_struct *task, const long request)
+{
+	struct task_struct *tmp = task;
+	struct task_struct *curtemp = current;
+	__u32 retmode;
+
+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
+	if (unlikely(!(gr_status & GR_READY)))
+		return 0;
+#endif
+	if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
+		read_lock(&tasklist_lock);
+		while (task_pid_nr(tmp) > 0) {
+			if (tmp == curtemp)
+				break;
+			tmp = tmp->real_parent;
+		}
+
+		if (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid())
&& !(gr_status & GR_READY)) ||
+					((gr_status & GR_READY)	&& !(current->acl->mode & GR_RELAXPTRACE)))) {
+			read_unlock(&tasklist_lock);
+			gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
+			return 1;
+		}
+		read_unlock(&tasklist_lock);
+	}
+
+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
+	if (!(gr_status & GR_READY))
+		return 0;
+#endif
+
+	read_lock(&grsec_exec_file_lock);
+	if (unlikely(!task->exec_file)) {
+		read_unlock(&grsec_exec_file_lock);
+		return 0;
+	}
+
+	retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE,
task->exec_file->f_path.mnt);
+	read_unlock(&grsec_exec_file_lock);
+
+	if (retmode & GR_NOPTRACE) {
+		gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
+		return 1;
+	}
+		
+	if (retmode & GR_PTRACERD) {
+		switch (request) {
+		case PTRACE_SEIZE:
+		case PTRACE_POKETEXT:
+		case PTRACE_POKEDATA:
+		case PTRACE_POKEUSR:
+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) &&
!defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
+		case PTRACE_SETREGS:
+		case PTRACE_SETFPREGS:
+#endif
+#ifdef CONFIG_X86
+		case PTRACE_SETFPXREGS:
+#endif
+#ifdef CONFIG_ALTIVEC
+		case PTRACE_SETVRREGS:
+#endif
+			return 1;
+		default:
+			return 0;
+		}
+	} else if (!(current->acl->mode & GR_POVERRIDE) &&
+		   !(current->role->roletype & GR_ROLE_GOD) &&
+		   (current->acl != task->acl)) {
+		gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
+		return 1;
+	}
+
+	return 0;
+}
+
+static int is_writable_mmap(const struct file *filp)
+{
+	struct task_struct *task = current;
+	struct acl_object_label *obj, *obj2;
+
+	if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
+	    !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt
!= shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
+		obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
+		obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
+				     task->role->root_label);
+		if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
+			gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
+			return 1;
+		}
+	}
+	return 0;
+}
+
+int
+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
+{
+	__u32 mode;
+
+	if (unlikely(!file || !(prot & PROT_EXEC)))
+		return 1;
+
+	if (is_writable_mmap(file))
+		return 0;
+
+	mode =
+	    gr_search_file(file->f_path.dentry,
+			   GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
+			   file->f_path.mnt);
+
+	if (!gr_tpe_allow(file))
+		return 0;
+
+	if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
+		gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
+		return 0;
+	} else if (unlikely(!(mode & GR_EXEC))) {
+		return 0;
+	} else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
+		gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
+		return 1;
+	}
+
+	return 1;
+}
+
+int
+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
+{
+	__u32 mode;
+
+	if (unlikely(!file || !(prot & PROT_EXEC)))
+		return 1;
+
+	if (is_writable_mmap(file))
+		return 0;
+
+	mode =
+	    gr_search_file(file->f_path.dentry,
+			   GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
+			   file->f_path.mnt);
+
+	if (!gr_tpe_allow(file))
+		return 0;
+
+	if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
+		gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
+		return 0;
+	} else if (unlikely(!(mode & GR_EXEC))) {
+		return 0;
+	} else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
+		gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
+		return 1;
+	}
+
+	return 1;
+}
+
+void
+gr_acl_handle_psacct(struct task_struct *task, const long code)
+{
+	unsigned long runtime, cputime;
+	cputime_t utime, stime;
+	unsigned int wday, cday;
+	__u8 whr, chr;
+	__u8 wmin, cmin;
+	__u8 wsec, csec;
+	struct timespec timeval;
+
+	if (unlikely(!(gr_status & GR_READY) || !task->acl ||
+		     !(task->acl->mode & GR_PROCACCT)))
+		return;
+
+	do_posix_clock_monotonic_gettime(&timeval);
+	runtime = timeval.tv_sec - task->start_time.tv_sec;
+	wday = runtime / (60 * 60 * 24);
+	runtime -= wday * (60 * 60 * 24);
+	whr = runtime / (60 * 60);
+	runtime -= whr * (60 * 60);
+	wmin = runtime / 60;
+	runtime -= wmin * 60;
+	wsec = runtime;
+
+	task_cputime(task, &utime, &stime);
+	cputime = cputime_to_secs(utime + stime);
+	cday = cputime / (60 * 60 * 24);
+	cputime -= cday * (60 * 60 * 24);
+	chr = cputime / (60 * 60);
+	cputime -= chr * (60 * 60);
+	cmin = cputime / 60;
+	cputime -= cmin * 60;
+	csec = cputime;
+
+	gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday,
chr, cmin, csec, code);
+
+	return;
+}
+
+#ifdef CONFIG_TASKSTATS
+int gr_is_taskstats_denied(int pid)
+{
+	struct task_struct *task;
+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
+	const struct cred *cred;
+#endif
+	int ret = 0;
+
+	/* restrict taskstats viewing to un-chrooted root users
+	   who have the 'view' subject flag if the RBAC system is enabled
+	*/
+
+	rcu_read_lock();
+	read_lock(&tasklist_lock);
+	task = find_task_by_vpid(pid);
+	if (task) {
+#ifdef CONFIG_GRKERNSEC_CHROOT
+		if (proc_is_chrooted(task))
+			ret = -EACCES;
+#endif
+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
+		cred = __task_cred(task);
+#ifdef CONFIG_GRKERNSEC_PROC_USER
+		if (gr_is_global_nonroot(cred->uid))
+			ret = -EACCES;
+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
+		if (gr_is_global_nonroot(cred->uid) && !groups_search(cred->group_info, grsec_proc_gid))
+			ret = -EACCES;
+#endif
+#endif
+		if (gr_status & GR_READY) {
+			if (!(task->acl->mode & GR_VIEW))
+				ret = -EACCES;
+		}
+	} else
+		ret = -ENOENT;
+
+	read_unlock(&tasklist_lock);
+	rcu_read_unlock();
+
+	return ret;
+}
+#endif
+
+/* AUXV entries are filled via a descendant of search_binary_handler
+   after we've already applied the subject for the target
+*/
+int gr_acl_enable_at_secure(void)
+{
+	if (unlikely(!(gr_status & GR_READY)))
+		return 0;
+
+	if (current->acl->mode & GR_ATSECURE)
+		return 1;
+
+	return 0;
+}
+	
+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned
int namelen, const ino_t ino)
+{
+	struct task_struct *task = current;
+	struct dentry *dentry = file->f_path.dentry;
+	struct vfsmount *mnt = file->f_path.mnt;
+	struct acl_object_label *obj, *tmp;
+	struct acl_subject_label *subj;
+	unsigned int bufsize;
+	int is_not_root;
+	char *path;
+	dev_t dev = __get_dev(dentry);
+
+	if (unlikely(!(gr_status & GR_READY)))
+		return 1;
+
+	if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
+		return 1;
+
+	/* ignore Eric Biederman */
+	if (IS_PRIVATE(dentry->d_inode))
+		return 1;
+
+	subj = task->acl;
+	read_lock(&gr_inode_lock);
+	do {
+		obj = lookup_acl_obj_label(ino, dev, subj);
+		if (obj != NULL) {
+			read_unlock(&gr_inode_lock);
+			return (obj->mode & GR_FIND) ? 1 : 0;
+		}
+	} while ((subj = subj->parent_subject));
+	read_unlock(&gr_inode_lock);
+	
+	/* this is purely an optimization since we're looking for an object
+	   for the directory we're doing a readdir on
+	   if it's possible for any globbed object to match the entry we're
+	   filling into the directory, then the object we find here will be
+	   an anchor point with attached globbed objects
+	*/
+	obj = chk_obj_label_noglob(dentry, mnt, task->acl);
+	if (obj->globbed == NULL)
+		return (obj->mode & GR_FIND) ? 1 : 0;
+
+	is_not_root = ((obj->filename[0] == '/') &&
+		   (obj->filename[1] == '\0')) ? 0 : 1;
+	bufsize = PAGE_SIZE - namelen - is_not_root;
+
+	/* check bufsize > PAGE_SIZE || bufsize == 0 */
+	if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
+		return 1;
+
+	preempt_disable();
+	path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
+			   bufsize);
+
+	bufsize = strlen(path);
+
+	/* if base is "/", don't append an additional slash */
+	if (is_not_root)
+		*(path + bufsize) = '/';
+	memcpy(path + bufsize + is_not_root, name, namelen);
+	*(path + bufsize + namelen + is_not_root) = '\0';
+
+	tmp = obj->globbed;
+	while (tmp) {
+		if (!glob_match(tmp->filename, path)) {
+			preempt_enable();
+			return (tmp->mode & GR_FIND) ? 1 : 0;
+		}
+		tmp = tmp->next;
+	}
+	preempt_enable();
+	return (obj->mode & GR_FIND) ? 1 : 0;
+}
+
+void gr_put_exec_file(struct task_struct *task)
+{
+	struct file *filp;  
+
+	write_lock(&grsec_exec_file_lock);
+	filp = task->exec_file;   
+	task->exec_file = NULL;
+	write_unlock(&grsec_exec_file_lock);
+
+	if (filp)
+		fput(filp);
+
+	return;
+}
+
+
+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
+EXPORT_SYMBOL_GPL(gr_acl_is_enabled);
+#endif
+#ifdef CONFIG_SECURITY
+EXPORT_SYMBOL_GPL(gr_check_user_change);
+EXPORT_SYMBOL_GPL(gr_check_group_change);
+#endif
+
diff -ruNp linux-3.13.11/grsecurity/gracl_alloc.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/gracl_alloc.c
--- linux-3.13.11/grsecurity/gracl_alloc.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/gracl_alloc.c	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,105 @@
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/gracl.h>
+#include <linux/grsecurity.h>
+
+static struct gr_alloc_state __current_alloc_state = { 1, 1, NULL };
+struct gr_alloc_state *current_alloc_state = &__current_alloc_state;
+
+static __inline__ int
+alloc_pop(void)
+{
+	if (current_alloc_state->alloc_stack_next == 1)
+		return 0;
+
+	kfree(current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 2]);
+
+	current_alloc_state->alloc_stack_next--;
+
+	return 1;
+}
+
+static __inline__ int
+alloc_push(void *buf)
+{
+	if (current_alloc_state->alloc_stack_next >= current_alloc_state->alloc_stack_size)
+		return 1;
+
+	current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 1] = buf;
+
+	current_alloc_state->alloc_stack_next++;
+
+	return 0;
+}
+
+void *
+acl_alloc(unsigned long len)
+{
+	void *ret = NULL;
+
+	if (!len || len > PAGE_SIZE)
+		goto out;
+
+	ret = kmalloc(len, GFP_KERNEL);
+
+	if (ret) {
+		if (alloc_push(ret)) {
+			kfree(ret);
+			ret = NULL;
+		}
+	}
+
+out:
+	return ret;
+}
+
+void *
+acl_alloc_num(unsigned long num, unsigned long len)
+{
+	if (!len || (num > (PAGE_SIZE / len)))
+		return NULL;
+
+	return acl_alloc(num * len);
+}
+
+void
+acl_free_all(void)
+{
+	if (!current_alloc_state->alloc_stack)
+		return;
+
+	while (alloc_pop()) ;
+
+	if (current_alloc_state->alloc_stack) {
+		if ((current_alloc_state->alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
+			kfree(current_alloc_state->alloc_stack);
+		else
+			vfree(current_alloc_state->alloc_stack);
+	}
+
+	current_alloc_state->alloc_stack = NULL;
+	current_alloc_state->alloc_stack_size = 1;
+	current_alloc_state->alloc_stack_next = 1;
+
+	return;
+}
+
+int
+acl_alloc_stack_init(unsigned long size)
+{
+	if ((size * sizeof (void *)) <= PAGE_SIZE)
+		current_alloc_state->alloc_stack =
+		    (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
+	else
+		current_alloc_state->alloc_stack = (void **) vmalloc(size * sizeof (void *));
+
+	current_alloc_state->alloc_stack_size = size;
+	current_alloc_state->alloc_stack_next = 1;
+
+	if (!current_alloc_state->alloc_stack)
+		return 0;
+	else
+		return 1;
+}
diff -ruNp linux-3.13.11/grsecurity/gracl_cap.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/gracl_cap.c
--- linux-3.13.11/grsecurity/gracl_cap.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/gracl_cap.c	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,110 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/gracl.h>
+#include <linux/grsecurity.h>
+#include <linux/grinternal.h>
+
+extern const char *captab_log[];
+extern int captab_log_entries;
+
+int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred,
const int cap)
+{
+	struct acl_subject_label *curracl;
+	kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
+	kernel_cap_t cap_audit = __cap_empty_set;
+
+	if (!gr_acl_is_enabled())
+		return 1;
+
+	curracl = task->acl;
+
+	cap_drop = curracl->cap_lower;
+	cap_mask = curracl->cap_mask;
+	cap_audit = curracl->cap_invert_audit;
+
+	while ((curracl = curracl->parent_subject)) {
+		/* if the cap isn't specified in the current computed mask but is specified in the
+		   current level subject, and is lowered in the current level subject, then add
+		   it to the set of dropped capabilities
+		   otherwise, add the current level subject's mask to the current computed mask
+		 */
+		if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
+			cap_raise(cap_mask, cap);
+			if (cap_raised(curracl->cap_lower, cap))
+				cap_raise(cap_drop, cap);
+			if (cap_raised(curracl->cap_invert_audit, cap))
+				cap_raise(cap_audit, cap);
+		}
+	}
+
+	if (!cap_raised(cap_drop, cap)) {
+		if (cap_raised(cap_audit, cap))
+			gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
+		return 1;
+	}
+
+	curracl = task->acl;
+
+	if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
+	    && cap_raised(cred->cap_effective, cap)) {
+		security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
+			       task->role->roletype, GR_GLOBAL_UID(cred->uid),
+			       GR_GLOBAL_GID(cred->gid), task->exec_file ?
+			       gr_to_filename(task->exec_file->f_path.dentry,
+			       task->exec_file->f_path.mnt) : curracl->filename,
+			       curracl->filename, 0UL,
+			       0UL, "", (unsigned long) cap, &task->signal->saved_ip);
+		return 1;
+	}
+
+	if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap)
&& !cap_raised(cap_audit, cap))
+		gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
+
+	return 0;
+}
+
+int
+gr_acl_is_capable(const int cap)
+{
+	return gr_task_acl_is_capable(current, current_cred(), cap);
+}
+
+int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
+{
+	struct acl_subject_label *curracl;
+	kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
+
+	if (!gr_acl_is_enabled())
+		return 1;
+
+	curracl = task->acl;
+
+	cap_drop = curracl->cap_lower;
+	cap_mask = curracl->cap_mask;
+
+	while ((curracl = curracl->parent_subject)) {
+		/* if the cap isn't specified in the current computed mask but is specified in the
+		   current level subject, and is lowered in the current level subject, then add
+		   it to the set of dropped capabilities
+		   otherwise, add the current level subject's mask to the current computed mask
+		 */
+		if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
+			cap_raise(cap_mask, cap);
+			if (cap_raised(curracl->cap_lower, cap))
+				cap_raise(cap_drop, cap);
+		}
+	}
+
+	if (!cap_raised(cap_drop, cap))
+		return 1;
+
+	return 0;
+}
+
+int
+gr_acl_is_capable_nolog(const int cap)
+{
+	return gr_task_acl_is_capable_nolog(current, cap);
+}
+
diff -ruNp linux-3.13.11/grsecurity/gracl_compat.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/gracl_compat.c
--- linux-3.13.11/grsecurity/gracl_compat.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/gracl_compat.c	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,270 @@
+#include <linux/kernel.h>
+#include <linux/gracl.h>
+#include <linux/compat.h>
+#include <linux/gracl_compat.h>
+
+#include <asm/uaccess.h>
+
+int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap)
+{
+	struct gr_arg_wrapper_compat uwrapcompat;
+
+        if (copy_from_user(&uwrapcompat, buf, sizeof(uwrapcompat)))
+                return -EFAULT;
+
+        if (((uwrapcompat.version != GRSECURITY_VERSION) &&
+	     (uwrapcompat.version != 0x2901)) ||
+	    (uwrapcompat.size != sizeof(struct gr_arg_compat)))  
+                return -EINVAL;
+
+	uwrap->arg = compat_ptr(uwrapcompat.arg);
+	uwrap->version = uwrapcompat.version;
+	uwrap->size = sizeof(struct gr_arg);
+
+        return 0;
+}
+
+int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg)
+{
+	struct gr_arg_compat argcompat;
+
+        if (copy_from_user(&argcompat, buf, sizeof(argcompat)))
+                return -EFAULT;
+
+	arg->role_db.r_table = compat_ptr(argcompat.role_db.r_table);
+	arg->role_db.num_pointers = argcompat.role_db.num_pointers;
+	arg->role_db.num_roles = argcompat.role_db.num_roles;
+	arg->role_db.num_domain_children = argcompat.role_db.num_domain_children;
+	arg->role_db.num_subjects = argcompat.role_db.num_subjects;
+	arg->role_db.num_objects = argcompat.role_db.num_objects;
+
+	memcpy(&arg->pw, &argcompat.pw, sizeof(arg->pw));
+	memcpy(&arg->salt, &argcompat.salt, sizeof(arg->salt));
+	memcpy(&arg->sum, &argcompat.sum, sizeof(arg->sum));
+	memcpy(&arg->sp_role, &argcompat.sp_role, sizeof(arg->sp_role));
+	arg->sprole_pws = compat_ptr(argcompat.sprole_pws);
+	arg->segv_device = argcompat.segv_device;
+	arg->segv_inode = argcompat.segv_inode;
+	arg->segv_uid = argcompat.segv_uid;
+	arg->num_sprole_pws = argcompat.num_sprole_pws;
+	arg->mode = argcompat.mode;
+
+	return 0;
+}
+
+int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label
*userp)
+{
+	struct acl_object_label_compat objcompat;
+
+	if (copy_from_user(&objcompat, userp, sizeof(objcompat)))
+                return -EFAULT;
+
+	obj->filename = compat_ptr(objcompat.filename);
+	obj->inode = objcompat.inode;
+	obj->device = objcompat.device;
+	obj->mode = objcompat.mode;
+
+	obj->nested = compat_ptr(objcompat.nested);
+	obj->globbed = compat_ptr(objcompat.globbed);
+
+	obj->prev = compat_ptr(objcompat.prev);
+	obj->next = compat_ptr(objcompat.next);
+
+	return 0;
+}
+
+int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label
*userp)
+{
+	unsigned int i;
+	struct acl_subject_label_compat subjcompat;
+
+	if (copy_from_user(&subjcompat, userp, sizeof(subjcompat)))
+                return -EFAULT;
+
+	subj->filename = compat_ptr(subjcompat.filename);
+	subj->inode = subjcompat.inode;
+	subj->device = subjcompat.device;
+	subj->mode = subjcompat.mode;
+	subj->cap_mask = subjcompat.cap_mask;
+	subj->cap_lower = subjcompat.cap_lower;
+	subj->cap_invert_audit = subjcompat.cap_invert_audit;
+
+	for (i = 0; i < GR_NLIMITS; i++) {
+		if (subjcompat.res[i].rlim_cur == COMPAT_RLIM_INFINITY)
+			subj->res[i].rlim_cur = RLIM_INFINITY;
+		else
+			subj->res[i].rlim_cur = subjcompat.res[i].rlim_cur;
+		if (subjcompat.res[i].rlim_max == COMPAT_RLIM_INFINITY)
+			subj->res[i].rlim_max = RLIM_INFINITY;
+		else
+			subj->res[i].rlim_max = subjcompat.res[i].rlim_max;
+	}
+	subj->resmask = subjcompat.resmask;
+
+	subj->user_trans_type = subjcompat.user_trans_type;
+	subj->group_trans_type = subjcompat.group_trans_type;
+	subj->user_transitions = compat_ptr(subjcompat.user_transitions);
+	subj->group_transitions = compat_ptr(subjcompat.group_transitions);
+	subj->user_trans_num = subjcompat.user_trans_num;
+	subj->group_trans_num = subjcompat.group_trans_num;
+
+	memcpy(&subj->sock_families, &subjcompat.sock_families, sizeof(subj->sock_families));
+	memcpy(&subj->ip_proto, &subjcompat.ip_proto, sizeof(subj->ip_proto));
+	subj->ip_type = subjcompat.ip_type;
+	subj->ips = compat_ptr(subjcompat.ips);
+	subj->ip_num = subjcompat.ip_num;
+	subj->inaddr_any_override = subjcompat.inaddr_any_override;
+
+	subj->crashes = subjcompat.crashes;
+	subj->expires = subjcompat.expires;
+
+	subj->parent_subject = compat_ptr(subjcompat.parent_subject);
+	subj->hash = compat_ptr(subjcompat.hash);
+	subj->prev = compat_ptr(subjcompat.prev);
+	subj->next = compat_ptr(subjcompat.next);
+
+	subj->obj_hash = compat_ptr(subjcompat.obj_hash);
+	subj->obj_hash_size = subjcompat.obj_hash_size;
+	subj->pax_flags = subjcompat.pax_flags;
+
+	return 0;
+}
+
+int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label
*userp)
+{
+	struct acl_role_label_compat rolecompat;
+
+	if (copy_from_user(&rolecompat, userp, sizeof(rolecompat)))
+                return -EFAULT;
+
+	role->rolename = compat_ptr(rolecompat.rolename);
+	role->uidgid = rolecompat.uidgid;
+	role->roletype = rolecompat.roletype;
+
+	role->auth_attempts = rolecompat.auth_attempts;
+	role->expires = rolecompat.expires;
+
+	role->root_label = compat_ptr(rolecompat.root_label);
+	role->hash = compat_ptr(rolecompat.hash);
+
+	role->prev = compat_ptr(rolecompat.prev);
+	role->next = compat_ptr(rolecompat.next);
+
+	role->transitions = compat_ptr(rolecompat.transitions);
+	role->allowed_ips = compat_ptr(rolecompat.allowed_ips);
+	role->domain_children = compat_ptr(rolecompat.domain_children);
+	role->domain_child_num = rolecompat.domain_child_num;
+
+	role->umask = rolecompat.umask;
+
+	role->subj_hash = compat_ptr(rolecompat.subj_hash);
+	role->subj_hash_size = rolecompat.subj_hash_size;
+
+	return 0;
+}
+
+int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip
*userp)
+{
+	struct role_allowed_ip_compat roleip_compat;
+
+	if (copy_from_user(&roleip_compat, userp, sizeof(roleip_compat)))
+                return -EFAULT;
+
+	roleip->addr = roleip_compat.addr;
+	roleip->netmask = roleip_compat.netmask;
+
+	roleip->prev = compat_ptr(roleip_compat.prev);
+	roleip->next = compat_ptr(roleip_compat.next);
+
+	return 0;
+}
+
+int copy_role_transition_compat(struct role_transition *trans, const struct role_transition
*userp)
+{
+	struct role_transition_compat trans_compat;
+
+	if (copy_from_user(&trans_compat, userp, sizeof(trans_compat)))
+                return -EFAULT;
+
+	trans->rolename = compat_ptr(trans_compat.rolename);
+
+	trans->prev = compat_ptr(trans_compat.prev);
+	trans->next = compat_ptr(trans_compat.next);
+
+	return 0;
+
+}
+
+int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct
*userp)
+{
+	struct gr_hash_struct_compat hash_compat;
+
+	if (copy_from_user(&hash_compat, userp, sizeof(hash_compat)))
+                return -EFAULT;
+
+	hash->table = compat_ptr(hash_compat.table);
+	hash->nametable = compat_ptr(hash_compat.nametable);
+	hash->first = compat_ptr(hash_compat.first);
+
+	hash->table_size = hash_compat.table_size;
+	hash->used_size = hash_compat.used_size;
+
+	hash->type = hash_compat.type;
+
+	return 0;
+}
+
+int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp)
+{
+	compat_uptr_t ptrcompat;
+
+	if (copy_from_user(&ptrcompat, userp + (idx * sizeof(ptrcompat)), sizeof(ptrcompat)))
+                return -EFAULT;
+
+	*(void **)ptr = compat_ptr(ptrcompat);
+
+	return 0;
+}
+
+int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp)
+{
+	struct acl_ip_label_compat ip_compat;
+
+	if (copy_from_user(&ip_compat, userp, sizeof(ip_compat)))
+                return -EFAULT;
+
+	ip->iface = compat_ptr(ip_compat.iface);
+	ip->addr = ip_compat.addr;
+	ip->netmask = ip_compat.netmask;
+	ip->low = ip_compat.low;
+	ip->high = ip_compat.high;
+	ip->mode = ip_compat.mode;
+	ip->type = ip_compat.type;
+
+	memcpy(&ip->proto, &ip_compat.proto, sizeof(ip->proto));
+
+	ip->prev = compat_ptr(ip_compat.prev);
+	ip->next = compat_ptr(ip_compat.next);
+
+	return 0;
+}
+
+int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw
*userp)
+{
+	struct sprole_pw_compat pw_compat;
+
+	if (copy_from_user(&pw_compat, (const void *)userp + (sizeof(pw_compat) * idx), sizeof(pw_compat)))
+                return -EFAULT;
+
+	pw->rolename = compat_ptr(pw_compat.rolename);
+	memcpy(&pw->salt, pw_compat.salt, sizeof(pw->salt));
+	memcpy(&pw->sum, pw_compat.sum, sizeof(pw->sum));
+
+	return 0;
+}
+
+size_t get_gr_arg_wrapper_size_compat(void)
+{
+	return sizeof(struct gr_arg_wrapper_compat);
+}
+
diff -ruNp linux-3.13.11/grsecurity/gracl_fs.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/gracl_fs.c
--- linux-3.13.11/grsecurity/gracl_fs.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/gracl_fs.c	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,437 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/stat.h>
+#include <linux/grsecurity.h>
+#include <linux/grinternal.h>
+#include <linux/gracl.h>
+
+umode_t
+gr_acl_umask(void)
+{
+	if (unlikely(!gr_acl_is_enabled()))
+		return 0;
+
+	return current->role->umask;
+}
+
+__u32
+gr_acl_handle_hidden_file(const struct dentry * dentry,
+			  const struct vfsmount * mnt)
+{
+	__u32 mode;
+
+	if (unlikely(d_is_negative(dentry)))
+		return GR_FIND;
+
+	mode =
+	    gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
+
+	if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
+		gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
+		return mode;
+	} else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
+		gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
+		return 0;
+	} else if (unlikely(!(mode & GR_FIND)))
+		return 0;
+
+	return GR_FIND;
+}
+
+__u32
+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
+		   int acc_mode)
+{
+	__u32 reqmode = GR_FIND;
+	__u32 mode;
+
+	if (unlikely(d_is_negative(dentry)))
+		return reqmode;
+
+	if (acc_mode & MAY_APPEND)
+		reqmode |= GR_APPEND;
+	else if (acc_mode & MAY_WRITE)
+		reqmode |= GR_WRITE;
+	if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
+		reqmode |= GR_READ;
+
+	mode =
+	    gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
+			   mnt);
+
+	if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
+		gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
+			       reqmode & GR_READ ? " reading" : "",
+			       reqmode & GR_WRITE ? " writing" : reqmode &
+			       GR_APPEND ? " appending" : "");
+		return reqmode;
+	} else
+	    if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
+	{
+		gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
+			       reqmode & GR_READ ? " reading" : "",
+			       reqmode & GR_WRITE ? " writing" : reqmode &
+			       GR_APPEND ? " appending" : "");
+		return 0;
+	} else if (unlikely((mode & reqmode) != reqmode))
+		return 0;
+
+	return reqmode;
+}
+
+__u32
+gr_acl_handle_creat(const struct dentry * dentry,
+		    const struct dentry * p_dentry,
+		    const struct vfsmount * p_mnt, int open_flags, int acc_mode,
+		    const int imode)
+{
+	__u32 reqmode = GR_WRITE | GR_CREATE;
+	__u32 mode;
+
+	if (acc_mode & MAY_APPEND)
+		reqmode |= GR_APPEND;
+	// if a directory was required or the directory already exists, then
+	// don't count this open as a read
+	if ((acc_mode & MAY_READ) &&
+	    !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
+		reqmode |= GR_READ;
+	if ((open_flags & O_CREAT) &&
+	    ((imode & S_ISUID) || ((imode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
+		reqmode |= GR_SETID;
+
+	mode =
+	    gr_check_create(dentry, p_dentry, p_mnt,
+			    reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
+
+	if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
+		gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
+			       reqmode & GR_READ ? " reading" : "",
+			       reqmode & GR_WRITE ? " writing" : reqmode &
+			       GR_APPEND ? " appending" : "");
+		return reqmode;
+	} else
+	    if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
+	{
+		gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
+			       reqmode & GR_READ ? " reading" : "",
+			       reqmode & GR_WRITE ? " writing" : reqmode &
+			       GR_APPEND ? " appending" : "");
+		return 0;
+	} else if (unlikely((mode & reqmode) != reqmode))
+		return 0;
+
+	return reqmode;
+}
+
+__u32
+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
+		     const int fmode)
+{
+	__u32 mode, reqmode = GR_FIND;
+
+	if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
+		reqmode |= GR_EXEC;
+	if (fmode & S_IWOTH)
+		reqmode |= GR_WRITE;
+	if (fmode & S_IROTH)
+		reqmode |= GR_READ;
+
+	mode =
+	    gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
+			   mnt);
+
+	if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
+		gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
+			       reqmode & GR_READ ? " reading" : "",
+			       reqmode & GR_WRITE ? " writing" : "",
+			       reqmode & GR_EXEC ? " executing" : "");
+		return reqmode;
+	} else
+	    if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
+	{
+		gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
+			       reqmode & GR_READ ? " reading" : "",
+			       reqmode & GR_WRITE ? " writing" : "",
+			       reqmode & GR_EXEC ? " executing" : "");
+		return 0;
+	} else if (unlikely((mode & reqmode) != reqmode))
+		return 0;
+
+	return reqmode;
+}
+
+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount
*mnt, __u32 reqmode, const char *fmt)
+{
+	__u32 mode;
+
+	mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
+
+	if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
+		gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
+		return mode;
+	} else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
+		gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
+		return 0;
+	} else if (unlikely((mode & (reqmode)) != (reqmode)))
+		return 0;
+
+	return (reqmode);
+}
+
+__u32
+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
+{
+	return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
+}
+
+__u32
+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
+{
+	return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
+}
+
+__u32
+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
+{
+	return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
+}
+
+__u32
+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
+{
+	return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
+}
+
+__u32
+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
+		     umode_t *modeptr)
+{
+	umode_t mode;
+
+	*modeptr &= ~gr_acl_umask();
+	mode = *modeptr;
+
+	if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
+		return 1;
+
+	if (unlikely(dentry->d_inode && !S_ISDIR(dentry->d_inode->i_mode) &&
+		     ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)))))
{
+		return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
+				   GR_CHMOD_ACL_MSG);
+	} else {
+		return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
+	}
+}
+
+__u32
+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
+{
+	return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
+}
+
+__u32
+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
+{
+	return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
+}
+
+__u32
+gr_acl_handle_removexattr(const struct dentry *dentry, const struct vfsmount *mnt)
+{
+	return generic_fs_handler(dentry, mnt, GR_WRITE, GR_REMOVEXATTR_ACL_MSG);
+}
+
+__u32
+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
+{
+	return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
+}
+
+__u32
+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
+{
+	return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
+			   GR_UNIXCONNECT_ACL_MSG);
+}
+
+/* hardlinks require at minimum create and link permission,
+   any additional privilege required is based on the
+   privilege of the file being linked to
+*/
+__u32
+gr_acl_handle_link(const struct dentry * new_dentry,
+		   const struct dentry * parent_dentry,
+		   const struct vfsmount * parent_mnt,
+		   const struct dentry * old_dentry,
+		   const struct vfsmount * old_mnt, const struct filename *to)
+{
+	__u32 mode;
+	__u32 needmode = GR_CREATE | GR_LINK;
+	__u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
+
+	mode =
+	    gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
+			  old_mnt);
+
+	if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
+		gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
+		return mode;
+	} else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
+		gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
+		return 0;
+	} else if (unlikely((mode & needmode) != needmode))
+		return 0;
+
+	return 1;
+}
+
+__u32
+gr_acl_handle_symlink(const struct dentry * new_dentry,
+		      const struct dentry * parent_dentry,
+		      const struct vfsmount * parent_mnt, const struct filename *from)
+{
+	__u32 needmode = GR_WRITE | GR_CREATE;
+	__u32 mode;
+
+	mode =
+	    gr_check_create(new_dentry, parent_dentry, parent_mnt,
+			    GR_CREATE | GR_AUDIT_CREATE |
+			    GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
+
+	if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
+		gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
+		return mode;
+	} else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
+		gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
+		return 0;
+	} else if (unlikely((mode & needmode) != needmode))
+		return 0;
+
+	return (GR_WRITE | GR_CREATE);
+}
+
+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct
dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char
*fmt)
+{
+	__u32 mode;
+
+	mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode)
| GR_SUPPRESS);
+
+	if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
+		gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
+		return mode;
+	} else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
+		gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
+		return 0;
+	} else if (unlikely((mode & (reqmode)) != (reqmode)))
+		return 0;
+
+	return (reqmode);
+}
+
+__u32
+gr_acl_handle_mknod(const struct dentry * new_dentry,
+		    const struct dentry * parent_dentry,
+		    const struct vfsmount * parent_mnt,
+		    const int mode)
+{
+	__u32 reqmode = GR_WRITE | GR_CREATE;
+	if (unlikely((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
+		reqmode |= GR_SETID;
+
+	return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
+				  reqmode, GR_MKNOD_ACL_MSG);
+}
+
+__u32
+gr_acl_handle_mkdir(const struct dentry *new_dentry,
+		    const struct dentry *parent_dentry,
+		    const struct vfsmount *parent_mnt)
+{
+	return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
+				  GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
+}
+
+#define RENAME_CHECK_SUCCESS(old, new) \
+	(((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
+	 ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
+
+int
+gr_acl_handle_rename(struct dentry *new_dentry,
+		     struct dentry *parent_dentry,
+		     const struct vfsmount *parent_mnt,
+		     struct dentry *old_dentry,
+		     struct inode *old_parent_inode,
+		     struct vfsmount *old_mnt, const struct filename *newname)
+{
+	__u32 comp1, comp2;
+	int error = 0;
+
+	if (unlikely(!gr_acl_is_enabled()))
+		return 0;
+
+	if (d_is_negative(new_dentry)) {
+		comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
+					GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
+					GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
+		comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
+				       GR_DELETE | GR_AUDIT_DELETE |
+				       GR_AUDIT_READ | GR_AUDIT_WRITE |
+				       GR_SUPPRESS, old_mnt);
+	} else {
+		comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
+				       GR_CREATE | GR_DELETE |
+				       GR_AUDIT_CREATE | GR_AUDIT_DELETE |
+				       GR_AUDIT_READ | GR_AUDIT_WRITE |
+				       GR_SUPPRESS, parent_mnt);
+		comp2 =
+		    gr_search_file(old_dentry,
+				   GR_READ | GR_WRITE | GR_AUDIT_READ |
+				   GR_DELETE | GR_AUDIT_DELETE |
+				   GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
+	}
+
+	if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
+	    ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
+		gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
+	else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
+		 && !(comp2 & GR_SUPPRESS)) {
+		gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
+		error = -EACCES;
+	} else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
+		error = -EACCES;
+
+	return error;
+}
+
+void
+gr_acl_handle_exit(void)
+{
+	u16 id;
+	char *rolename;
+
+	if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
+	    !(current->role->roletype & GR_ROLE_PERSIST))) {
+		id = current->acl_role_id;
+		rolename = current->role->rolename;
+		gr_set_acls(1);
+		gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
+	}
+
+	gr_put_exec_file(current);
+	return;
+}
+
+int
+gr_acl_handle_procpidmem(const struct task_struct *task)
+{
+	if (unlikely(!gr_acl_is_enabled()))
+		return 0;
+
+	if (task != current && task->acl->mode & GR_PROTPROCFD)
+		return -EACCES;
+
+	return 0;
+}
diff -ruNp linux-3.13.11/grsecurity/gracl_ip.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/gracl_ip.c
--- linux-3.13.11/grsecurity/gracl_ip.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/gracl_ip.c	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,386 @@
+#include <linux/kernel.h>
+#include <asm/uaccess.h>
+#include <asm/errno.h>
+#include <net/sock.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/net.h>
+#include <linux/in.h>
+#include <linux/skbuff.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/gracl.h>
+#include <linux/grsecurity.h>
+#include <linux/grinternal.h>
+
+#define GR_BIND			0x01
+#define GR_CONNECT		0x02
+#define GR_INVERT		0x04
+#define GR_BINDOVERRIDE		0x08
+#define GR_CONNECTOVERRIDE	0x10
+#define GR_SOCK_FAMILY		0x20
+
+static const char * gr_protocols[IPPROTO_MAX] = {
+	"ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
+	"egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
+	"chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
+	"trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
+	"merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
+	"il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
+	"mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
+	"tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
+	"sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
+	"cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak", 
+	"iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf", 
+	"eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
+	"scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
+	"aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
+	"vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
+	"uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
+	"sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
+	"unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141",
"unknown:142", "unknown:143",
+	"unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149",
"unknown:150", "unknown:151",
+	"unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157",
"unknown:158", "unknown:159",
+	"unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165",
"unknown:166", "unknown:167",
+	"unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173",
"unknown:174", "unknown:175",
+	"unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181",
"unknown:182", "unknown:183",
+	"unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189",
"unknown:190", "unknown:191",
+	"unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197",
"unknown:198", "unknown:199",
+	"unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205",
"unknown:206", "unknown:207",
+	"unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213",
"unknown:214", "unknown:215",
+	"unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221",
"unknown:222", "unknown:223",
+	"unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229",
"unknown:230", "unknown:231",
+	"unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237",
"unknown:238", "unknown:239",
+	"unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245",
"unknown:246", "unknown:247",
+	"unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253",
"unknown:254", "unknown:255",
+	};
+
+static const char * gr_socktypes[SOCK_MAX] = {
+	"unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6", 
+	"unknown:7", "unknown:8", "unknown:9", "packet"
+	};
+
+static const char * gr_sockfamilies[AF_MAX+1] = {
+	"unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc",
"x25",
+	"inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
+	"econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
+	"tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
+	};
+
+const char *
+gr_proto_to_name(unsigned char proto)
+{
+	return gr_protocols[proto];
+}
+
+const char *
+gr_socktype_to_name(unsigned char type)
+{
+	return gr_socktypes[type];
+}
+
+const char *
+gr_sockfamily_to_name(unsigned char family)
+{
+	return gr_sockfamilies[family];
+}
+
+extern const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
+
+int
+gr_search_socket(const int domain, const int type, const int protocol)
+{
+	struct acl_subject_label *curr;
+	const struct cred *cred = current_cred();
+
+	if (unlikely(!gr_acl_is_enabled()))
+		goto exit;
+
+	if ((domain < 0) || (type < 0) || (protocol < 0) ||
+	    (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
+		goto exit;	// let the kernel handle it
+
+	curr = current->acl;
+
+	if (curr->sock_families[domain / 32] & (1U << (domain % 32))) {
+		/* the family is allowed, if this is PF_INET allow it only if
+		   the extra sock type/protocol checks pass */
+		if (domain == PF_INET)
+			goto inet_check;
+		goto exit;
+	} else {
+		if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
+			__u32 fakeip = 0;
+			security_learn(GR_IP_LEARN_MSG, current->role->rolename,
+				       current->role->roletype, GR_GLOBAL_UID(cred->uid),
+				       GR_GLOBAL_GID(cred->gid), current->exec_file ?
+				       gr_to_filename(current->exec_file->f_path.dentry,
+				       current->exec_file->f_path.mnt) :
+				       curr->filename, curr->filename,
+				       &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
+				       &current->signal->saved_ip);
+			goto exit;
+		}
+		goto exit_fail;
+	}
+
+inet_check:
+	/* the rest of this checking is for IPv4 only */
+	if (!curr->ips)
+		goto exit;
+
+	if ((curr->ip_type & (1U << type)) &&
+	    (curr->ip_proto[protocol / 32] & (1U << (protocol % 32))))
+		goto exit;
+
+	if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
+		/* we don't place acls on raw sockets , and sometimes
+		   dgram/ip sockets are opened for ioctl and not
+		   bind/connect, so we'll fake a bind learn log */
+		if (type == SOCK_RAW || type == SOCK_PACKET) {
+			__u32 fakeip = 0;
+			security_learn(GR_IP_LEARN_MSG, current->role->rolename,
+				       current->role->roletype, GR_GLOBAL_UID(cred->uid),
+				       GR_GLOBAL_GID(cred->gid), current->exec_file ?
+				       gr_to_filename(current->exec_file->f_path.dentry,
+				       current->exec_file->f_path.mnt) :
+				       curr->filename, curr->filename,
+				       &fakeip, 0, type,
+				       protocol, GR_CONNECT, &current->signal->saved_ip);
+		} else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
+			__u32 fakeip = 0;
+			security_learn(GR_IP_LEARN_MSG, current->role->rolename,
+				       current->role->roletype, GR_GLOBAL_UID(cred->uid),
+				       GR_GLOBAL_GID(cred->gid), current->exec_file ?
+				       gr_to_filename(current->exec_file->f_path.dentry,
+				       current->exec_file->f_path.mnt) :
+				       curr->filename, curr->filename,
+				       &fakeip, 0, type,
+				       protocol, GR_BIND, &current->signal->saved_ip);
+		}
+		/* we'll log when they use connect or bind */
+		goto exit;
+	}
+
+exit_fail:
+	if (domain == PF_INET)
+		gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain), 
+			    gr_socktype_to_name(type), gr_proto_to_name(protocol));
+	else if (rcu_access_pointer(net_families[domain]) != NULL)
+		gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),

+			    gr_socktype_to_name(type), protocol);
+
+	return 0;
+exit:
+	return 1;
+}
+
+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol,
const int mode, const int type, __u32 our_addr, __u32 our_netmask)
+{
+	if ((ip->mode & mode) &&
+	    (ip_port >= ip->low) &&
+	    (ip_port <= ip->high) &&
+	    ((ntohl(ip_addr) & our_netmask) ==
+	     (ntohl(our_addr) & our_netmask))
+	    && (ip->proto[protocol / 32] & (1U << (protocol % 32)))
+	    && (ip->type & (1U << type))) {
+		if (ip->mode & GR_INVERT)
+			return 2; // specifically denied
+		else
+			return 1; // allowed
+	}
+
+	return 0; // not specifically allowed, may continue parsing
+}
+
+static int
+gr_search_connectbind(const int full_mode, struct sock *sk,
+		      struct sockaddr_in *addr, const int type)
+{
+	char iface[IFNAMSIZ] = {0};
+	struct acl_subject_label *curr;
+	struct acl_ip_label *ip;
+	struct inet_sock *isk;
+	struct net_device *dev;
+	struct in_device *idev;
+	unsigned long i;
+	int ret;
+	int mode = full_mode & (GR_BIND | GR_CONNECT);
+	__u32 ip_addr = 0;
+	__u32 our_addr;
+	__u32 our_netmask;
+	char *p;
+	__u16 ip_port = 0;
+	const struct cred *cred = current_cred();
+
+	if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
+		return 0;
+
+	curr = current->acl;
+	isk = inet_sk(sk);
+
+	/* INADDR_ANY overriding for binds, inaddr_any_override is already in network order
*/
+	if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) &&
curr->inaddr_any_override != 0)
+		addr->sin_addr.s_addr = curr->inaddr_any_override;
+	if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override
!= 0) {
+		struct sockaddr_in saddr;
+		int err;
+
+		saddr.sin_family = AF_INET;
+		saddr.sin_addr.s_addr = curr->inaddr_any_override;
+		saddr.sin_port = isk->inet_sport;
+
+		err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct
sockaddr_in));
+		if (err)
+			return err;
+
+		err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct
sockaddr_in));
+		if (err)
+			return err;
+	}
+
+	if (!curr->ips)
+		return 0;
+
+	ip_addr = addr->sin_addr.s_addr;
+	ip_port = ntohs(addr->sin_port);
+
+	if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
+		security_learn(GR_IP_LEARN_MSG, current->role->rolename,
+			       current->role->roletype, GR_GLOBAL_UID(cred->uid),
+			       GR_GLOBAL_GID(cred->gid), current->exec_file ?
+			       gr_to_filename(current->exec_file->f_path.dentry,
+			       current->exec_file->f_path.mnt) :
+			       curr->filename, curr->filename,
+			       &ip_addr, ip_port, type,
+			       sk->sk_protocol, mode, &current->signal->saved_ip);
+		return 0;
+	}
+
+	for (i = 0; i < curr->ip_num; i++) {
+		ip = *(curr->ips + i);
+		if (ip->iface != NULL) {
+			strncpy(iface, ip->iface, IFNAMSIZ - 1);
+			p = strchr(iface, ':');
+			if (p != NULL)
+				*p = '\0';
+			dev = dev_get_by_name(sock_net(sk), iface);
+			if (dev == NULL)
+				continue;
+			idev = in_dev_get(dev);
+			if (idev == NULL) {
+				dev_put(dev);
+				continue;
+			}
+			rcu_read_lock();
+			for_ifa(idev) {
+				if (!strcmp(ip->iface, ifa->ifa_label)) {
+					our_addr = ifa->ifa_address;
+					our_netmask = 0xffffffff;
+					ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr,
our_netmask);
+					if (ret == 1) {
+						rcu_read_unlock();
+						in_dev_put(idev);
+						dev_put(dev);
+						return 0;
+					} else if (ret == 2) {
+						rcu_read_unlock();
+						in_dev_put(idev);
+						dev_put(dev);
+						goto denied;
+					}
+				}
+			} endfor_ifa(idev);
+			rcu_read_unlock();
+			in_dev_put(idev);
+			dev_put(dev);
+		} else {
+			our_addr = ip->addr;
+			our_netmask = ip->netmask;
+			ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr,
our_netmask);
+			if (ret == 1)
+				return 0;
+			else if (ret == 2)
+				goto denied;
+		}
+	}
+
+denied:
+	if (mode == GR_BIND)
+		gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type),
gr_proto_to_name(sk->sk_protocol));
+	else if (mode == GR_CONNECT)
+		gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type),
gr_proto_to_name(sk->sk_protocol));
+
+	return -EACCES;
+}
+
+int
+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
+{
+	/* always allow disconnection of dgram sockets with connect */
+	if (addr->sin_family == AF_UNSPEC)
+		return 0;
+	return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
+}
+
+int
+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
+{
+	return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
+}
+
+int gr_search_listen(struct socket *sock)
+{
+	struct sock *sk = sock->sk;
+	struct sockaddr_in addr;
+
+	addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
+	addr.sin_port = inet_sk(sk)->inet_sport;
+
+	return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
+}
+
+int gr_search_accept(struct socket *sock)
+{
+	struct sock *sk = sock->sk;
+	struct sockaddr_in addr;
+
+	addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
+	addr.sin_port = inet_sk(sk)->inet_sport;
+
+	return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
+}
+
+int
+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
+{
+	if (addr)
+		return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
+	else {
+		struct sockaddr_in sin;
+		const struct inet_sock *inet = inet_sk(sk);
+
+		sin.sin_addr.s_addr = inet->inet_daddr;
+		sin.sin_port = inet->inet_dport;
+
+		return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
+	}
+}
+
+int
+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
+{
+	struct sockaddr_in sin;
+
+	if (unlikely(skb->len < sizeof (struct udphdr)))
+		return 0;	// skip this packet
+
+	sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
+	sin.sin_port = udp_hdr(skb)->source;
+
+	return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
+}
diff -ruNp linux-3.13.11/grsecurity/gracl_learn.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/gracl_learn.c
--- linux-3.13.11/grsecurity/gracl_learn.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/gracl_learn.c	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,207 @@
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/poll.h>
+#include <linux/string.h>
+#include <linux/file.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+#include <linux/grinternal.h>
+
+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
+				   size_t count, loff_t *ppos);
+extern int gr_acl_is_enabled(void);
+
+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
+static int gr_learn_attached;
+
+/* use a 512k buffer */
+#define LEARN_BUFFER_SIZE (512 * 1024)
+
+static DEFINE_SPINLOCK(gr_learn_lock);
+static DEFINE_MUTEX(gr_learn_user_mutex);
+
+/* we need to maintain two buffers, so that the kernel context of grlearn
+   uses a semaphore around the userspace copying, and the other kernel contexts
+   use a spinlock when copying into the buffer, since they cannot sleep
+*/
+static char *learn_buffer;
+static char *learn_buffer_user;
+static int learn_buffer_len;
+static int learn_buffer_user_len;
+
+static ssize_t
+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
+{
+	DECLARE_WAITQUEUE(wait, current);
+	ssize_t retval = 0;
+
+	add_wait_queue(&learn_wait, &wait);
+	set_current_state(TASK_INTERRUPTIBLE);
+	do {
+		mutex_lock(&gr_learn_user_mutex);
+		spin_lock(&gr_learn_lock);
+		if (learn_buffer_len)
+			break;
+		spin_unlock(&gr_learn_lock);
+		mutex_unlock(&gr_learn_user_mutex);
+		if (file->f_flags & O_NONBLOCK) {
+			retval = -EAGAIN;
+			goto out;
+		}
+		if (signal_pending(current)) {
+			retval = -ERESTARTSYS;
+			goto out;
+		}
+
+		schedule();
+	} while (1);
+
+	memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
+	learn_buffer_user_len = learn_buffer_len;
+	retval = learn_buffer_len;
+	learn_buffer_len = 0;
+
+	spin_unlock(&gr_learn_lock);
+
+	if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
+		retval = -EFAULT;
+
+	mutex_unlock(&gr_learn_user_mutex);
+out:
+	set_current_state(TASK_RUNNING);
+	remove_wait_queue(&learn_wait, &wait);
+	return retval;
+}
+
+static unsigned int
+poll_learn(struct file * file, poll_table * wait)
+{
+	poll_wait(file, &learn_wait, wait);
+
+	if (learn_buffer_len)
+		return (POLLIN | POLLRDNORM);
+
+	return 0;
+}
+
+void
+gr_clear_learn_entries(void)
+{
+	char *tmp;
+
+	mutex_lock(&gr_learn_user_mutex);
+	spin_lock(&gr_learn_lock);
+	tmp = learn_buffer;
+	learn_buffer = NULL;
+	spin_unlock(&gr_learn_lock);
+	if (tmp)
+		vfree(tmp);
+	if (learn_buffer_user != NULL) {
+		vfree(learn_buffer_user);
+		learn_buffer_user = NULL;
+	}
+	learn_buffer_len = 0;
+	mutex_unlock(&gr_learn_user_mutex);
+
+	return;
+}
+
+void
+gr_add_learn_entry(const char *fmt, ...)
+{
+	va_list args;
+	unsigned int len;
+
+	if (!gr_learn_attached)
+		return;
+
+	spin_lock(&gr_learn_lock);
+
+	/* leave a gap at the end so we know when it's "full" but don't have to
+	   compute the exact length of the string we're trying to append
+	*/
+	if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
+		spin_unlock(&gr_learn_lock);
+		wake_up_interruptible(&learn_wait);
+		return;
+	}
+	if (learn_buffer == NULL) {
+		spin_unlock(&gr_learn_lock);
+		return;
+	}
+
+	va_start(args, fmt);
+	len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len,
fmt, args);
+	va_end(args);
+
+	learn_buffer_len += len + 1;
+
+	spin_unlock(&gr_learn_lock);
+	wake_up_interruptible(&learn_wait);
+
+	return;
+}
+
+static int
+open_learn(struct inode *inode, struct file *file)
+{
+	if (file->f_mode & FMODE_READ && gr_learn_attached)
+		return -EBUSY;
+	if (file->f_mode & FMODE_READ) {
+		int retval = 0;
+		mutex_lock(&gr_learn_user_mutex);
+		if (learn_buffer == NULL)
+			learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
+		if (learn_buffer_user == NULL)
+			learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
+		if (learn_buffer == NULL) {
+			retval = -ENOMEM;
+			goto out_error;
+		}
+		if (learn_buffer_user == NULL) {
+			retval = -ENOMEM;
+			goto out_error;
+		}
+		learn_buffer_len = 0;
+		learn_buffer_user_len = 0;
+		gr_learn_attached = 1;
+out_error:
+		mutex_unlock(&gr_learn_user_mutex);
+		return retval;
+	}
+	return 0;
+}
+
+static int
+close_learn(struct inode *inode, struct file *file)
+{
+	if (file->f_mode & FMODE_READ) {
+		char *tmp = NULL;
+		mutex_lock(&gr_learn_user_mutex);
+		spin_lock(&gr_learn_lock);
+		tmp = learn_buffer;
+		learn_buffer = NULL;
+		spin_unlock(&gr_learn_lock);
+		if (tmp)
+			vfree(tmp);
+		if (learn_buffer_user != NULL) {
+			vfree(learn_buffer_user);
+			learn_buffer_user = NULL;
+		}
+		learn_buffer_len = 0;
+		learn_buffer_user_len = 0;
+		gr_learn_attached = 0;
+		mutex_unlock(&gr_learn_user_mutex);
+	}
+
+	return 0;
+}
+		
+const struct file_operations grsec_fops = {
+	.read		= read_learn,
+	.write		= write_grsec_handler,
+	.open		= open_learn,
+	.release	= close_learn,
+	.poll		= poll_learn,
+};
diff -ruNp linux-3.13.11/grsecurity/gracl_policy.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/gracl_policy.c
--- linux-3.13.11/grsecurity/gracl_policy.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/gracl_policy.c	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,1782 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/namei.h>
+#include <linux/mount.h>
+#include <linux/tty.h>
+#include <linux/proc_fs.h>
+#include <linux/lglock.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/types.h>
+#include <linux/sysctl.h>
+#include <linux/netdevice.h>
+#include <linux/ptrace.h>
+#include <linux/gracl.h>
+#include <linux/gralloc.h>
+#include <linux/security.h>
+#include <linux/grinternal.h>
+#include <linux/pid_namespace.h>
+#include <linux/stop_machine.h>
+#include <linux/fdtable.h>
+#include <linux/percpu.h>
+#include <linux/lglock.h>
+#include <linux/hugetlb.h>
+#include <linux/posix-timers.h>
+#include "../fs/mount.h"
+
+#include <asm/uaccess.h>
+#include <asm/errno.h>
+#include <asm/mman.h>
+
+extern struct gr_policy_state *polstate;
+
+#define FOR_EACH_ROLE_START(role) \
+	role = polstate->role_list; \
+	while (role) {
+
+#define FOR_EACH_ROLE_END(role) \
+		role = role->prev; \
+	}
+
+struct path gr_real_root;
+
+extern struct gr_alloc_state *current_alloc_state;
+
+u16 acl_sp_role_value;
+
+static DEFINE_MUTEX(gr_dev_mutex);
+
+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
+extern void gr_clear_learn_entries(void);
+
+static struct gr_arg gr_usermode;
+static unsigned char gr_system_salt[GR_SALT_LEN];
+static unsigned char gr_system_sum[GR_SHA_LEN];
+
+static unsigned int gr_auth_attempts = 0;
+static unsigned long gr_auth_expires = 0UL;
+
+struct acl_object_label *fakefs_obj_rw;
+struct acl_object_label *fakefs_obj_rwx;
+
+extern int gr_init_uidset(void);
+extern void gr_free_uidset(void);
+extern void gr_remove_uid(uid_t uid);
+extern int gr_find_uid(uid_t uid);
+
+extern struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state
*state, struct task_struct *task, const char *filename);
+extern void __gr_apply_subject_to_task(struct gr_policy_state *state, struct task_struct
*task, struct acl_subject_label *subj);
+extern int gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned
int lenb);
+extern void __insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry
*entry);
+extern struct acl_role_label *__lookup_acl_role_label(const struct gr_policy_state
*state, const struct task_struct *task, const uid_t uid, const gid_t gid);
+extern void insert_acl_obj_label(struct acl_object_label *obj, struct acl_subject_label
*subj);
+extern void insert_acl_subj_label(struct acl_subject_label *obj, struct acl_role_label
*role);
+extern struct name_entry * __lookup_name_entry(const struct gr_policy_state *state,
const char *name);
+extern char *gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount
*mnt);
+extern struct acl_subject_label *lookup_acl_subj_label(const ino_t ino, const dev_t
dev, const struct acl_role_label *role);
+extern struct acl_subject_label *lookup_acl_subj_label_deleted(const ino_t ino, const
dev_t dev, const struct acl_role_label *role);
+extern void assign_special_role(const char *rolename);
+extern struct acl_subject_label *chk_subj_label(const struct dentry *l_dentry, const
struct vfsmount *l_mnt, const struct acl_role_label *role);
+extern int gr_rbac_disable(void *unused);
+extern void gr_enable_rbac_system(void);
+
+static int copy_acl_object_label_normal(struct acl_object_label *obj, const struct
acl_object_label *userp)
+{
+	if (copy_from_user(obj, userp, sizeof(struct acl_object_label)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int copy_acl_ip_label_normal(struct acl_ip_label *ip, const struct acl_ip_label
*userp)
+{
+	if (copy_from_user(ip, userp, sizeof(struct acl_ip_label)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int copy_acl_subject_label_normal(struct acl_subject_label *subj, const struct
acl_subject_label *userp)
+{
+	if (copy_from_user(subj, userp, sizeof(struct acl_subject_label)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int copy_acl_role_label_normal(struct acl_role_label *role, const struct acl_role_label
*userp)
+{
+	if (copy_from_user(role, userp, sizeof(struct acl_role_label)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int copy_role_allowed_ip_normal(struct role_allowed_ip *roleip, const struct
role_allowed_ip *userp)
+{
+	if (copy_from_user(roleip, userp, sizeof(struct role_allowed_ip)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int copy_sprole_pw_normal(struct sprole_pw *pw, unsigned long idx, const struct
sprole_pw *userp)
+{
+	if (copy_from_user(pw, userp + idx, sizeof(struct sprole_pw)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int copy_gr_hash_struct_normal(struct gr_hash_struct *hash, const struct gr_hash_struct
*userp)
+{
+	if (copy_from_user(hash, userp, sizeof(struct gr_hash_struct)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int copy_role_transition_normal(struct role_transition *trans, const struct
role_transition *userp)
+{
+	if (copy_from_user(trans, userp, sizeof(struct role_transition)))
+		return -EFAULT;
+
+	return 0;
+}
+
+int copy_pointer_from_array_normal(void *ptr, unsigned long idx, const void *userp)
+{
+	if (copy_from_user(ptr, userp + (idx * sizeof(void *)), sizeof(void *)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int copy_gr_arg_wrapper_normal(const char __user *buf, struct gr_arg_wrapper
*uwrap)
+{
+	if (copy_from_user(uwrap, buf, sizeof (struct gr_arg_wrapper)))
+		return -EFAULT;
+
+	if (((uwrap->version != GRSECURITY_VERSION) &&
+	     (uwrap->version != 0x2901)) ||
+	    (uwrap->size != sizeof(struct gr_arg)))
+		return -EINVAL;
+
+	return 0;
+}
+
+static int copy_gr_arg_normal(const struct gr_arg __user *buf, struct gr_arg *arg)
+{
+	if (copy_from_user(arg, buf, sizeof (struct gr_arg)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static size_t get_gr_arg_wrapper_size_normal(void)
+{
+	return sizeof(struct gr_arg_wrapper);
+}
+
+#ifdef CONFIG_COMPAT
+extern int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap);
+extern int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg);
+extern int copy_acl_object_label_compat(struct acl_object_label *obj, const struct
acl_object_label *userp);
+extern int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct
acl_subject_label *userp);
+extern int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label
*userp);
+extern int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct
role_allowed_ip *userp);
+extern int copy_role_transition_compat(struct role_transition *trans, const struct
role_transition *userp);
+extern int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct
*userp);
+extern int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void
*userp);
+extern int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label
*userp);
+extern int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct
sprole_pw *userp);
+extern size_t get_gr_arg_wrapper_size_compat(void);
+
+int (* copy_gr_arg_wrapper)(const char *buf, struct gr_arg_wrapper *uwrap) __read_only;
+int (* copy_gr_arg)(const struct gr_arg *buf, struct gr_arg *arg) __read_only;
+int (* copy_acl_object_label)(struct acl_object_label *obj, const struct acl_object_label
*userp) __read_only;
+int (* copy_acl_subject_label)(struct acl_subject_label *subj, const struct acl_subject_label
*userp) __read_only;
+int (* copy_acl_role_label)(struct acl_role_label *role, const struct acl_role_label
*userp) __read_only;
+int (* copy_acl_ip_label)(struct acl_ip_label *ip, const struct acl_ip_label *userp)
__read_only;
+int (* copy_pointer_from_array)(void *ptr, unsigned long idx, const void *userp) __read_only;
+int (* copy_sprole_pw)(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw
*userp) __read_only;
+int (* copy_gr_hash_struct)(struct gr_hash_struct *hash, const struct gr_hash_struct
*userp) __read_only;
+int (* copy_role_transition)(struct role_transition *trans, const struct role_transition
*userp) __read_only;
+int (* copy_role_allowed_ip)(struct role_allowed_ip *roleip, const struct role_allowed_ip
*userp) __read_only;
+size_t (* get_gr_arg_wrapper_size)(void) __read_only;
+
+#else
+#define copy_gr_arg_wrapper copy_gr_arg_wrapper_normal
+#define copy_gr_arg copy_gr_arg_normal
+#define copy_gr_hash_struct copy_gr_hash_struct_normal
+#define copy_acl_object_label copy_acl_object_label_normal
+#define copy_acl_subject_label copy_acl_subject_label_normal
+#define copy_acl_role_label copy_acl_role_label_normal
+#define copy_acl_ip_label copy_acl_ip_label_normal
+#define copy_pointer_from_array copy_pointer_from_array_normal
+#define copy_sprole_pw copy_sprole_pw_normal
+#define copy_role_transition copy_role_transition_normal
+#define copy_role_allowed_ip copy_role_allowed_ip_normal
+#define get_gr_arg_wrapper_size get_gr_arg_wrapper_size_normal
+#endif
+
+static struct acl_subject_label *
+lookup_subject_map(const struct acl_subject_label *userp)
+{
+	unsigned int index = gr_shash(userp, polstate->subj_map_set.s_size);
+	struct subject_map *match;
+
+	match = polstate->subj_map_set.s_hash[index];
+
+	while (match && match->user != userp)
+		match = match->next;
+
+	if (match != NULL)
+		return match->kernel;
+	else
+		return NULL;
+}
+
+static void
+insert_subj_map_entry(struct subject_map *subjmap)
+{
+	unsigned int index = gr_shash(subjmap->user, polstate->subj_map_set.s_size);
+	struct subject_map **curr;
+
+	subjmap->prev = NULL;
+
+	curr = &polstate->subj_map_set.s_hash[index];
+	if (*curr != NULL)
+		(*curr)->prev = subjmap;
+
+	subjmap->next = *curr;
+	*curr = subjmap;
+
+	return;
+}
+
+static void
+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
+{
+	unsigned int index =
+	    gr_rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), polstate->acl_role_set.r_size);
+	struct acl_role_label **curr;
+	struct acl_role_label *tmp, *tmp2;
+
+	curr = &polstate->acl_role_set.r_hash[index];
+
+	/* simple case, slot is empty, just set it to our role */
+	if (*curr == NULL) {
+		*curr = role;
+	} else {
+		/* example:
+		   1 -> 2 -> 3 (adding 2 -> 3 to here)
+		   2 -> 3
+		*/
+		/* first check to see if we can already be reached via this slot */
+		tmp = *curr;
+		while (tmp && tmp != role)
+			tmp = tmp->next;
+		if (tmp == role) {
+			/* we don't need to add ourselves to this slot's chain */
+			return;
+		}
+		/* we need to add ourselves to this chain, two cases */
+		if (role->next == NULL) {
+			/* simple case, append the current chain to our role */
+			role->next = *curr;
+			*curr = role;
+		} else {
+			/* 1 -> 2 -> 3 -> 4
+			   2 -> 3 -> 4
+			   3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
+			*/			   
+			/* trickier case: walk our role's chain until we find
+			   the role for the start of the current slot's chain */
+			tmp = role;
+			tmp2 = *curr;
+			while (tmp->next && tmp->next != tmp2)
+				tmp = tmp->next;
+			if (tmp->next == tmp2) {
+				/* from example above, we found 3, so just
+				   replace this slot's chain with ours */
+				*curr = role;
+			} else {
+				/* we didn't find a subset of our role's chain
+				   in the current slot's chain, so append their
+				   chain to ours, and set us as the first role in
+				   the slot's chain
+
+				   we could fold this case with the case above,
+				   but making it explicit for clarity
+				*/
+				tmp->next = tmp2;
+				*curr = role;
+			}
+		}
+	}
+
+	return;
+}
+
+static void
+insert_acl_role_label(struct acl_role_label *role)
+{
+	int i;
+
+	if (polstate->role_list == NULL) {
+		polstate->role_list = role;
+		role->prev = NULL;
+	} else {
+		role->prev = polstate->role_list;
+		polstate->role_list = role;
+	}
+	
+	/* used for hash chains */
+	role->next = NULL;
+
+	if (role->roletype & GR_ROLE_DOMAIN) {
+		for (i = 0; i < role->domain_child_num; i++)
+			__insert_acl_role_label(role, role->domain_children[i]);
+	} else
+		__insert_acl_role_label(role, role->uidgid);
+}
+					
+static int
+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
+{
+	struct name_entry **curr, *nentry;
+	struct inodev_entry *ientry;
+	unsigned int len = strlen(name);
+	unsigned int key = full_name_hash(name, len);
+	unsigned int index = key % polstate->name_set.n_size;
+
+	curr = &polstate->name_set.n_hash[index];
+
+	while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len,
len)))
+		curr = &((*curr)->next);
+
+	if (*curr != NULL)
+		return 1;
+
+	nentry = acl_alloc(sizeof (struct name_entry));
+	if (nentry == NULL)
+		return 0;
+	ientry = acl_alloc(sizeof (struct inodev_entry));
+	if (ientry == NULL)
+		return 0;
+	ientry->nentry = nentry;
+
+	nentry->key = key;
+	nentry->name = name;
+	nentry->inode = inode;
+	nentry->device = device;
+	nentry->len = len;
+	nentry->deleted = deleted;
+
+	nentry->prev = NULL;
+	curr = &polstate->name_set.n_hash[index];
+	if (*curr != NULL)
+		(*curr)->prev = nentry;
+	nentry->next = *curr;
+	*curr = nentry;
+
+	/* insert us into the table searchable by inode/dev */
+	__insert_inodev_entry(polstate, ientry);
+
+	return 1;
+}
+
+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
+
+static void *
+create_table(__u32 * len, int elementsize)
+{
+	unsigned int table_sizes[] = {
+		7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
+		32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
+		4194301, 8388593, 16777213, 33554393, 67108859
+	};
+	void *newtable = NULL;
+	unsigned int pwr = 0;
+
+	while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
+	       table_sizes[pwr] <= *len)
+		pwr++;
+
+	if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
+		return newtable;
+
+	if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
+		newtable =
+		    kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
+	else
+		newtable = vmalloc(table_sizes[pwr] * elementsize);
+
+	*len = table_sizes[pwr];
+
+	return newtable;
+}
+
+static int
+init_variables(const struct gr_arg *arg, bool reload)
+{
+	struct task_struct *reaper = init_pid_ns.child_reaper;
+	unsigned int stacksize;
+
+	polstate->subj_map_set.s_size = arg->role_db.num_subjects;
+	polstate->acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
+	polstate->name_set.n_size = arg->role_db.num_objects;
+	polstate->inodev_set.i_size = arg->role_db.num_objects;
+
+	if (!polstate->subj_map_set.s_size || !polstate->acl_role_set.r_size ||
+	    !polstate->name_set.n_size || !polstate->inodev_set.i_size)
+		return 1;
+
+	if (!reload) {
+		if (!gr_init_uidset())
+			return 1;
+	}
+
+	/* set up the stack that holds allocation info */
+
+	stacksize = arg->role_db.num_pointers + 5;
+
+	if (!acl_alloc_stack_init(stacksize))
+		return 1;
+
+	if (!reload) {
+		/* grab reference for the real root dentry and vfsmount */
+		get_fs_root(reaper->fs, &gr_real_root);
+	
+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
+	printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(gr_real_root.dentry),
gr_real_root.dentry->d_inode->i_ino);
+#endif
+
+		fakefs_obj_rw = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
+		if (fakefs_obj_rw == NULL)
+			return 1;
+		fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
+	
+		fakefs_obj_rwx = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
+		if (fakefs_obj_rwx == NULL)
+			return 1;
+		fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
+	}
+
+	polstate->subj_map_set.s_hash =
+	    (struct subject_map **) create_table(&polstate->subj_map_set.s_size, sizeof(void
*));
+	polstate->acl_role_set.r_hash =
+	    (struct acl_role_label **) create_table(&polstate->acl_role_set.r_size, sizeof(void
*));
+	polstate->name_set.n_hash = (struct name_entry **) create_table(&polstate->name_set.n_size,
sizeof(void *));
+	polstate->inodev_set.i_hash =
+	    (struct inodev_entry **) create_table(&polstate->inodev_set.i_size, sizeof(void
*));
+
+	if (!polstate->subj_map_set.s_hash || !polstate->acl_role_set.r_hash ||
+	    !polstate->name_set.n_hash || !polstate->inodev_set.i_hash)
+		return 1;
+
+	memset(polstate->subj_map_set.s_hash, 0,
+	       sizeof(struct subject_map *) * polstate->subj_map_set.s_size);
+	memset(polstate->acl_role_set.r_hash, 0,
+	       sizeof (struct acl_role_label *) * polstate->acl_role_set.r_size);
+	memset(polstate->name_set.n_hash, 0,
+	       sizeof (struct name_entry *) * polstate->name_set.n_size);
+	memset(polstate->inodev_set.i_hash, 0,
+	       sizeof (struct inodev_entry *) * polstate->inodev_set.i_size);
+
+	return 0;
+}
+
+/* free information not needed after startup
+   currently contains user->kernel pointer mappings for subjects
+*/
+
+static void
+free_init_variables(void)
+{
+	__u32 i;
+
+	if (polstate->subj_map_set.s_hash) {
+		for (i = 0; i < polstate->subj_map_set.s_size; i++) {
+			if (polstate->subj_map_set.s_hash[i]) {
+				kfree(polstate->subj_map_set.s_hash[i]);
+				polstate->subj_map_set.s_hash[i] = NULL;
+			}
+		}
+
+		if ((polstate->subj_map_set.s_size * sizeof (struct subject_map *)) <=
+		    PAGE_SIZE)
+			kfree(polstate->subj_map_set.s_hash);
+		else
+			vfree(polstate->subj_map_set.s_hash);
+	}
+
+	return;
+}
+
+static void
+free_variables(bool reload)
+{
+	struct acl_subject_label *s;
+	struct acl_role_label *r;
+	struct task_struct *task, *task2;
+	unsigned int x;
+
+	if (!reload) {
+		gr_clear_learn_entries();
+
+		read_lock(&tasklist_lock);
+		do_each_thread(task2, task) {
+			task->acl_sp_role = 0;
+			task->acl_role_id = 0;
+			task->inherited = 0;
+			task->acl = NULL;
+			task->role = NULL;
+		} while_each_thread(task2, task);
+		read_unlock(&tasklist_lock);
+
+		kfree(fakefs_obj_rw);
+		fakefs_obj_rw = NULL;
+		kfree(fakefs_obj_rwx);
+		fakefs_obj_rwx = NULL;
+
+		/* release the reference to the real root dentry and vfsmount */
+		path_put(&gr_real_root);
+		memset(&gr_real_root, 0, sizeof(gr_real_root));
+	}
+
+	/* free all object hash tables */
+
+	FOR_EACH_ROLE_START(r)
+		if (r->subj_hash == NULL)
+			goto next_role;
+		FOR_EACH_SUBJECT_START(r, s, x)
+			if (s->obj_hash == NULL)
+				break;
+			if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
+				kfree(s->obj_hash);
+			else
+				vfree(s->obj_hash);
+		FOR_EACH_SUBJECT_END(s, x)
+		FOR_EACH_NESTED_SUBJECT_START(r, s)
+			if (s->obj_hash == NULL)
+				break;
+			if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
+				kfree(s->obj_hash);
+			else
+				vfree(s->obj_hash);
+		FOR_EACH_NESTED_SUBJECT_END(s)
+		if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
+			kfree(r->subj_hash);
+		else
+			vfree(r->subj_hash);
+		r->subj_hash = NULL;
+next_role:
+	FOR_EACH_ROLE_END(r)
+
+	acl_free_all();
+
+	if (polstate->acl_role_set.r_hash) {
+		if ((polstate->acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
+		    PAGE_SIZE)
+			kfree(polstate->acl_role_set.r_hash);
+		else
+			vfree(polstate->acl_role_set.r_hash);
+	}
+	if (polstate->name_set.n_hash) {
+		if ((polstate->name_set.n_size * sizeof (struct name_entry *)) <=
+		    PAGE_SIZE)
+			kfree(polstate->name_set.n_hash);
+		else
+			vfree(polstate->name_set.n_hash);
+	}
+
+	if (polstate->inodev_set.i_hash) {
+		if ((polstate->inodev_set.i_size * sizeof (struct inodev_entry *)) <=
+		    PAGE_SIZE)
+			kfree(polstate->inodev_set.i_hash);
+		else
+			vfree(polstate->inodev_set.i_hash);
+	}
+
+	if (!reload)
+		gr_free_uidset();
+
+	memset(&polstate->name_set, 0, sizeof (struct name_db));
+	memset(&polstate->inodev_set, 0, sizeof (struct inodev_db));
+	memset(&polstate->acl_role_set, 0, sizeof (struct acl_role_db));
+	memset(&polstate->subj_map_set, 0, sizeof (struct acl_subj_map_db));
+
+	polstate->default_role = NULL;
+	polstate->kernel_role = NULL;
+	polstate->role_list = NULL;
+
+	return;
+}
+
+static struct acl_subject_label *
+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int
*already_copied);
+
+static int alloc_and_copy_string(char **name, unsigned int maxlen)
+{
+	unsigned int len = strnlen_user(*name, maxlen);
+	char *tmp;
+
+	if (!len || len >= maxlen)
+		return -EINVAL;
+
+	if ((tmp = (char *) acl_alloc(len)) == NULL)
+		return -ENOMEM;
+
+	if (copy_from_user(tmp, *name, len))
+		return -EFAULT;
+
+	tmp[len-1] = '\0';
+	*name = tmp;
+
+	return 0;
+}
+
+static int
+copy_user_glob(struct acl_object_label *obj)
+{
+	struct acl_object_label *g_tmp, **guser;
+	int error;
+
+	if (obj->globbed == NULL)
+		return 0;
+
+	guser = &obj->globbed;
+	while (*guser) {
+		g_tmp = (struct acl_object_label *)
+			acl_alloc(sizeof (struct acl_object_label));
+		if (g_tmp == NULL)
+			return -ENOMEM;
+
+		if (copy_acl_object_label(g_tmp, *guser))
+			return -EFAULT;
+
+		error = alloc_and_copy_string(&g_tmp->filename, PATH_MAX);
+		if (error)
+			return error;
+
+		*guser = g_tmp;
+		guser = &(g_tmp->next);
+	}
+
+	return 0;
+}
+
+static int
+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
+	       struct acl_role_label *role)
+{
+	struct acl_object_label *o_tmp;
+	int ret;
+
+	while (userp) {
+		if ((o_tmp = (struct acl_object_label *)
+		     acl_alloc(sizeof (struct acl_object_label))) == NULL)
+			return -ENOMEM;
+
+		if (copy_acl_object_label(o_tmp, userp))
+			return -EFAULT;
+
+		userp = o_tmp->prev;
+
+		ret = alloc_and_copy_string(&o_tmp->filename, PATH_MAX);
+		if (ret)
+			return ret;
+
+		insert_acl_obj_label(o_tmp, subj);
+		if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
+				       o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
+			return -ENOMEM;
+
+		ret = copy_user_glob(o_tmp);
+		if (ret)
+			return ret;
+
+		if (o_tmp->nested) {
+			int already_copied;
+
+			o_tmp->nested = do_copy_user_subj(o_tmp->nested, role, &already_copied);
+			if (IS_ERR(o_tmp->nested))
+				return PTR_ERR(o_tmp->nested);
+
+			/* insert into nested subject list if we haven't copied this one yet
+			   to prevent duplicate entries */
+			if (!already_copied) {
+				o_tmp->nested->next = role->hash->first;
+				role->hash->first = o_tmp->nested;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static __u32
+count_user_subjs(struct acl_subject_label *userp)
+{
+	struct acl_subject_label s_tmp;
+	__u32 num = 0;
+
+	while (userp) {
+		if (copy_acl_subject_label(&s_tmp, userp))
+			break;
+
+		userp = s_tmp.prev;
+	}
+
+	return num;
+}
+
+static int
+copy_user_allowedips(struct acl_role_label *rolep)
+{
+	struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
+
+	ruserip = rolep->allowed_ips;
+
+	while (ruserip) {
+		rlast = rtmp;
+
+		if ((rtmp = (struct role_allowed_ip *)
+		     acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
+			return -ENOMEM;
+
+		if (copy_role_allowed_ip(rtmp, ruserip))
+			return -EFAULT;
+
+		ruserip = rtmp->prev;
+
+		if (!rlast) {
+			rtmp->prev = NULL;
+			rolep->allowed_ips = rtmp;
+		} else {
+			rlast->next = rtmp;
+			rtmp->prev = rlast;
+		}
+
+		if (!ruserip)
+			rtmp->next = NULL;
+	}
+
+	return 0;
+}
+
+static int
+copy_user_transitions(struct acl_role_label *rolep)
+{
+	struct role_transition *rusertp, *rtmp = NULL, *rlast;
+	int error;
+
+	rusertp = rolep->transitions;
+
+	while (rusertp) {
+		rlast = rtmp;
+
+		if ((rtmp = (struct role_transition *)
+		     acl_alloc(sizeof (struct role_transition))) == NULL)
+			return -ENOMEM;
+
+		if (copy_role_transition(rtmp, rusertp))
+			return -EFAULT;
+
+		rusertp = rtmp->prev;
+
+		error = alloc_and_copy_string(&rtmp->rolename, GR_SPROLE_LEN);
+		if (error)
+			return error;
+
+		if (!rlast) {
+			rtmp->prev = NULL;
+			rolep->transitions = rtmp;
+		} else {
+			rlast->next = rtmp;
+			rtmp->prev = rlast;
+		}
+
+		if (!rusertp)
+			rtmp->next = NULL;
+	}
+
+	return 0;
+}
+
+static __u32 count_user_objs(const struct acl_object_label __user *userp)
+{
+	struct acl_object_label o_tmp;
+	__u32 num = 0;
+
+	while (userp) {
+		if (copy_acl_object_label(&o_tmp, userp))
+			break;
+
+		userp = o_tmp.prev;
+		num++;
+	}
+
+	return num;
+}
+
+static struct acl_subject_label *
+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int
*already_copied)
+{
+	struct acl_subject_label *s_tmp = NULL, *s_tmp2;
+	__u32 num_objs;
+	struct acl_ip_label **i_tmp, *i_utmp2;
+	struct gr_hash_struct ghash;
+	struct subject_map *subjmap;
+	unsigned int i_num;
+	int err;
+
+	if (already_copied != NULL)
+		*already_copied = 0;
+
+	s_tmp = lookup_subject_map(userp);
+
+	/* we've already copied this subject into the kernel, just return
+	   the reference to it, and don't copy it over again
+	*/
+	if (s_tmp) {
+		if (already_copied != NULL)
+			*already_copied = 1;
+		return(s_tmp);
+	}
+
+	if ((s_tmp = (struct acl_subject_label *)
+	    acl_alloc(sizeof (struct acl_subject_label))) == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
+	if (subjmap == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	subjmap->user = userp;
+	subjmap->kernel = s_tmp;
+	insert_subj_map_entry(subjmap);
+
+	if (copy_acl_subject_label(s_tmp, userp))
+		return ERR_PTR(-EFAULT);
+
+	err = alloc_and_copy_string(&s_tmp->filename, PATH_MAX);
+	if (err)
+		return ERR_PTR(err);
+
+	if (!strcmp(s_tmp->filename, "/"))
+		role->root_label = s_tmp;
+
+	if (copy_gr_hash_struct(&ghash, s_tmp->hash))
+		return ERR_PTR(-EFAULT);
+
+	/* copy user and group transition tables */
+
+	if (s_tmp->user_trans_num) {
+		uid_t *uidlist;
+
+		uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
+		if (uidlist == NULL)
+			return ERR_PTR(-ENOMEM);
+		if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
+			return ERR_PTR(-EFAULT);
+
+		s_tmp->user_transitions = uidlist;
+	}
+
+	if (s_tmp->group_trans_num) {
+		gid_t *gidlist;
+
+		gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
+		if (gidlist == NULL)
+			return ERR_PTR(-ENOMEM);
+		if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
+			return ERR_PTR(-EFAULT);
+
+		s_tmp->group_transitions = gidlist;
+	}
+
+	/* set up object hash table */
+	num_objs = count_user_objs(ghash.first);
+
+	s_tmp->obj_hash_size = num_objs;
+	s_tmp->obj_hash =
+	    (struct acl_object_label **)
+	    create_table(&(s_tmp->obj_hash_size), sizeof(void *));
+
+	if (!s_tmp->obj_hash)
+		return ERR_PTR(-ENOMEM);
+
+	memset(s_tmp->obj_hash, 0,
+	       s_tmp->obj_hash_size *
+	       sizeof (struct acl_object_label *));
+
+	/* add in objects */
+	err = copy_user_objs(ghash.first, s_tmp, role);
+
+	if (err)
+		return ERR_PTR(err);
+
+	/* set pointer for parent subject */
+	if (s_tmp->parent_subject) {
+		s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role, NULL);
+
+		if (IS_ERR(s_tmp2))
+			return s_tmp2;
+
+		s_tmp->parent_subject = s_tmp2;
+	}
+
+	/* add in ip acls */
+
+	if (!s_tmp->ip_num) {
+		s_tmp->ips = NULL;
+		goto insert;
+	}
+
+	i_tmp =
+	    (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
+					       sizeof (struct acl_ip_label *));
+
+	if (!i_tmp)
+		return ERR_PTR(-ENOMEM);
+
+	for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
+		*(i_tmp + i_num) =
+		    (struct acl_ip_label *)
+		    acl_alloc(sizeof (struct acl_ip_label));
+		if (!*(i_tmp + i_num))
+			return ERR_PTR(-ENOMEM);
+
+		if (copy_pointer_from_array(&i_utmp2, i_num, s_tmp->ips))
+			return ERR_PTR(-EFAULT);
+
+		if (copy_acl_ip_label(*(i_tmp + i_num), i_utmp2))
+			return ERR_PTR(-EFAULT);
+		
+		if ((*(i_tmp + i_num))->iface == NULL)
+			continue;
+
+		err = alloc_and_copy_string(&(*(i_tmp + i_num))->iface, IFNAMSIZ);
+		if (err)
+			return ERR_PTR(err);
+	}
+
+	s_tmp->ips = i_tmp;
+
+insert:
+	if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
+			       s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
+		return ERR_PTR(-ENOMEM);
+
+	return s_tmp;
+}
+
+static int
+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
+{
+	struct acl_subject_label s_pre;
+	struct acl_subject_label * ret;
+	int err;
+
+	while (userp) {
+		if (copy_acl_subject_label(&s_pre, userp))
+			return -EFAULT;
+		
+		ret = do_copy_user_subj(userp, role, NULL);
+
+		err = PTR_ERR(ret);
+		if (IS_ERR(ret))
+			return err;
+
+		insert_acl_subj_label(ret, role);
+
+		userp = s_pre.prev;
+	}
+
+	return 0;
+}
+
+static int
+copy_user_acl(struct gr_arg *arg)
+{
+	struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
+	struct acl_subject_label *subj_list;
+	struct sprole_pw *sptmp;
+	struct gr_hash_struct *ghash;
+	uid_t *domainlist;
+	unsigned int r_num;
+	int err = 0;
+	__u16 i;
+	__u32 num_subjs;
+
+	/* we need a default and kernel role */
+	if (arg->role_db.num_roles < 2)
+		return -EINVAL;
+
+	/* copy special role authentication info from userspace */
+
+	polstate->num_sprole_pws = arg->num_sprole_pws;
+	polstate->acl_special_roles = (struct sprole_pw **) acl_alloc_num(polstate->num_sprole_pws,
sizeof(struct sprole_pw *));
+
+	if (!polstate->acl_special_roles && polstate->num_sprole_pws)
+		return -ENOMEM;
+
+	for (i = 0; i < polstate->num_sprole_pws; i++) {
+		sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
+		if (!sptmp)
+			return -ENOMEM;
+		if (copy_sprole_pw(sptmp, i, arg->sprole_pws))
+			return -EFAULT;
+
+		err = alloc_and_copy_string((char **)&sptmp->rolename, GR_SPROLE_LEN);
+		if (err)
+			return err;
+
+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
+		printk(KERN_ALERT "Copying special role %s\n", sptmp->rolename);
+#endif
+
+		polstate->acl_special_roles[i] = sptmp;
+	}
+
+	r_utmp = (struct acl_role_label **) arg->role_db.r_table;
+
+	for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
+		r_tmp = acl_alloc(sizeof (struct acl_role_label));
+
+		if (!r_tmp)
+			return -ENOMEM;
+
+		if (copy_pointer_from_array(&r_utmp2, r_num, r_utmp))
+			return -EFAULT;
+
+		if (copy_acl_role_label(r_tmp, r_utmp2))
+			return -EFAULT;
+
+		err = alloc_and_copy_string(&r_tmp->rolename, GR_SPROLE_LEN);
+		if (err)
+			return err;
+
+		if (!strcmp(r_tmp->rolename, "default")
+		    && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
+			polstate->default_role = r_tmp;
+		} else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
+			polstate->kernel_role = r_tmp;
+		}
+
+		if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct)))
== NULL)
+			return -ENOMEM;
+
+		if (copy_gr_hash_struct(ghash, r_tmp->hash))
+			return -EFAULT;
+
+		r_tmp->hash = ghash;
+
+		num_subjs = count_user_subjs(r_tmp->hash->first);
+
+		r_tmp->subj_hash_size = num_subjs;
+		r_tmp->subj_hash =
+		    (struct acl_subject_label **)
+		    create_table(&(r_tmp->subj_hash_size), sizeof(void *));
+
+		if (!r_tmp->subj_hash)
+			return -ENOMEM;
+
+		err = copy_user_allowedips(r_tmp);
+		if (err)
+			return err;
+
+		/* copy domain info */
+		if (r_tmp->domain_children != NULL) {
+			domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
+			if (domainlist == NULL)
+				return -ENOMEM;
+
+			if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num *
sizeof(uid_t)))
+				return -EFAULT;
+
+			r_tmp->domain_children = domainlist;
+		}
+
+		err = copy_user_transitions(r_tmp);
+		if (err)
+			return err;
+
+		memset(r_tmp->subj_hash, 0,
+		       r_tmp->subj_hash_size *
+		       sizeof (struct acl_subject_label *));
+
+		/* acquire the list of subjects, then NULL out
+		   the list prior to parsing the subjects for this role,
+		   as during this parsing the list is replaced with a list
+		   of *nested* subjects for the role
+		*/
+		subj_list = r_tmp->hash->first;
+
+		/* set nested subject list to null */
+		r_tmp->hash->first = NULL;
+
+		err = copy_user_subjs(subj_list, r_tmp);
+
+		if (err)
+			return err;
+
+		insert_acl_role_label(r_tmp);
+	}
+
+	if (polstate->default_role == NULL || polstate->kernel_role == NULL)
+		return -EINVAL;
+
+	return err;
+}
+
+static int gracl_reload_apply_policies(void *reload)
+{
+	struct gr_reload_state *reload_state = (struct gr_reload_state *)reload;
+	struct task_struct *task, *task2;
+	struct acl_role_label *role, *rtmp;
+	struct acl_subject_label *subj;
+	const struct cred *cred;
+	int role_applied;
+	int ret = 0;
+
+	memcpy(&reload_state->oldpolicy, reload_state->oldpolicy_ptr, sizeof(struct gr_policy_state));
+	memcpy(&reload_state->oldalloc, reload_state->oldalloc_ptr, sizeof(struct gr_alloc_state));
+
+	/* first make sure we'll be able to apply the new policy cleanly */
+	do_each_thread(task2, task) {
+		if (task->exec_file == NULL)
+			continue;
+		role_applied = 0;
+		if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
+			/* preserve special roles */
+			FOR_EACH_ROLE_START(role)
+				if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename))
{
+					rtmp = task->role;
+					task->role = role;
+					role_applied = 1;
+					break;
+				}
+			FOR_EACH_ROLE_END(role)
+		}
+		if (!role_applied) {
+			cred = __task_cred(task);
+			rtmp = task->role;
+			task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
+		}
+		/* this handles non-nested inherited subjects, nested subjects will still
+		   be dropped currently */
+		subj = __gr_get_subject_for_task(polstate, task, task->acl->filename);
+		task->tmpacl = __gr_get_subject_for_task(polstate, task, NULL);
+		/* change the role back so that we've made no modifications to the policy */
+		task->role = rtmp;
+
+		if (subj == NULL || task->tmpacl == NULL) {
+			ret = -EINVAL;
+			goto out;
+		}
+	} while_each_thread(task2, task);
+
+	/* now actually apply the policy */
+
+	do_each_thread(task2, task) {
+		if (task->exec_file) {
+			role_applied = 0;
+			if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
+				/* preserve special roles */
+				FOR_EACH_ROLE_START(role)
+					if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename))
{
+						task->role = role;
+						role_applied = 1;
+						break;
+					}
+				FOR_EACH_ROLE_END(role)
+			}
+			if (!role_applied) {
+				cred = __task_cred(task);
+				task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid),
GR_GLOBAL_GID(cred->gid));
+			}
+			/* this handles non-nested inherited subjects, nested subjects will still
+			   be dropped currently */
+			if (!reload_state->oldmode && task->inherited)
+				subj = __gr_get_subject_for_task(polstate, task, task->acl->filename);
+			else {
+				/* looked up and tagged to the task previously */
+				subj = task->tmpacl;
+			}
+			/* subj will be non-null */
+			__gr_apply_subject_to_task(polstate, task, subj);
+			if (reload_state->oldmode) {
+				task->acl_role_id = 0;
+				task->acl_sp_role = 0;
+				task->inherited = 0;
+			}
+		} else {
+			// it's a kernel process
+			task->role = polstate->kernel_role;
+			task->acl = polstate->kernel_role->root_label;
+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
+			task->acl->mode &= ~GR_PROCFIND;
+#endif
+		}
+	} while_each_thread(task2, task);
+
+	memcpy(reload_state->oldpolicy_ptr, &reload_state->newpolicy, sizeof(struct gr_policy_state));
+	memcpy(reload_state->oldalloc_ptr, &reload_state->newalloc, sizeof(struct gr_alloc_state));
+
+out:
+
+	return ret;
+}
+
+static int gracl_reload(struct gr_arg *args, unsigned char oldmode)
+{
+	struct gr_reload_state new_reload_state = { };
+	int err;
+
+	new_reload_state.oldpolicy_ptr = polstate;
+	new_reload_state.oldalloc_ptr = current_alloc_state;
+	new_reload_state.oldmode = oldmode;
+
+	current_alloc_state = &new_reload_state.newalloc;
+	polstate = &new_reload_state.newpolicy;
+
+	/* everything relevant is now saved off, copy in the new policy */
+	if (init_variables(args, true)) {
+		gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
+		err = -ENOMEM;
+		goto error;
+	}
+
+	err = copy_user_acl(args);
+	free_init_variables();
+	if (err)
+		goto error;
+	/* the new policy is copied in, with the old policy available via saved_state
+	   first go through applying roles, making sure to preserve special roles
+	   then apply new subjects, making sure to preserve inherited and nested subjects,
+	   though currently only inherited subjects will be preserved
+	*/
+	err = stop_machine(gracl_reload_apply_policies, &new_reload_state, NULL);
+	if (err)
+		goto error;
+
+	/* we've now applied the new policy, so restore the old policy state to free it */
+	polstate = &new_reload_state.oldpolicy;
+	current_alloc_state = &new_reload_state.oldalloc;
+	free_variables(true);
+
+	/* oldpolicy/oldalloc_ptr point to the new policy/alloc states as they were copied
+	   to running_polstate/current_alloc_state inside stop_machine
+	*/
+	err = 0;
+	goto out;
+error:
+	/* on error of loading the new policy, we'll just keep the previous
+	   policy set around
+	*/
+	free_variables(true);
+
+	/* doesn't affect runtime, but maintains consistent state */
+out:
+	polstate = new_reload_state.oldpolicy_ptr;
+	current_alloc_state = new_reload_state.oldalloc_ptr;
+
+	return err;
+}
+
+static int
+gracl_init(struct gr_arg *args)
+{
+	int error = 0;
+
+	memcpy(&gr_system_salt, args->salt, sizeof(gr_system_salt));
+	memcpy(&gr_system_sum, args->sum, sizeof(gr_system_sum));
+
+	if (init_variables(args, false)) {
+		gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
+		error = -ENOMEM;
+		goto out;
+	}
+
+	error = copy_user_acl(args);
+	free_init_variables();
+	if (error)
+		goto out;
+
+	error = gr_set_acls(0);
+	if (error)
+		goto out;
+
+	gr_enable_rbac_system();
+
+	return 0;
+
+out:
+	free_variables(false);
+	return error;
+}
+
+static int
+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
+			 unsigned char **sum)
+{
+	struct acl_role_label *r;
+	struct role_allowed_ip *ipp;
+	struct role_transition *trans;
+	unsigned int i;
+	int found = 0;
+	u32 curr_ip = current->signal->curr_ip;
+
+	current->signal->saved_ip = curr_ip;
+
+	/* check transition table */
+
+	for (trans = current->role->transitions; trans; trans = trans->next) {
+		if (!strcmp(rolename, trans->rolename)) {
+			found = 1;
+			break;
+		}
+	}
+
+	if (!found)
+		return 0;
+
+	/* handle special roles that do not require authentication
+	   and check ip */
+
+	FOR_EACH_ROLE_START(r)
+		if (!strcmp(rolename, r->rolename) &&
+		    (r->roletype & GR_ROLE_SPECIAL)) {
+			found = 0;
+			if (r->allowed_ips != NULL) {
+				for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
+					if ((ntohl(curr_ip) & ipp->netmask) ==
+					     (ntohl(ipp->addr) & ipp->netmask))
+						found = 1;
+				}
+			} else
+				found = 2;
+			if (!found)
+				return 0;
+
+			if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
+			    ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
+				*salt = NULL;
+				*sum = NULL;
+				return 1;
+			}
+		}
+	FOR_EACH_ROLE_END(r)
+
+	for (i = 0; i < polstate->num_sprole_pws; i++) {
+		if (!strcmp(rolename, polstate->acl_special_roles[i]->rolename)) {
+			*salt = polstate->acl_special_roles[i]->salt;
+			*sum = polstate->acl_special_roles[i]->sum;
+			return 1;
+		}
+	}
+
+	return 0;
+}
+
+int gr_check_secure_terminal(struct task_struct *task)
+{
+	struct task_struct *p, *p2, *p3;
+	struct files_struct *files;
+	struct fdtable *fdt;
+	struct file *our_file = NULL, *file;
+	int i;
+
+	if (task->signal->tty == NULL)
+		return 1;
+
+	files = get_files_struct(task);
+	if (files != NULL) {
+		rcu_read_lock();
+		fdt = files_fdtable(files);
+		for (i=0; i < fdt->max_fds; i++) {
+			file = fcheck_files(files, i);
+			if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
+				get_file(file);
+				our_file = file;
+			}
+		}
+		rcu_read_unlock();
+		put_files_struct(files);
+	}
+
+	if (our_file == NULL)
+		return 1;
+
+	read_lock(&tasklist_lock);
+	do_each_thread(p2, p) {
+		files = get_files_struct(p);
+		if (files == NULL ||
+		    (p->signal && p->signal->tty == task->signal->tty)) {
+			if (files != NULL)
+				put_files_struct(files);
+			continue;
+		}
+		rcu_read_lock();
+		fdt = files_fdtable(files);
+		for (i=0; i < fdt->max_fds; i++) {
+			file = fcheck_files(files, i);
+			if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
+			    file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev)
{
+				p3 = task;
+				while (task_pid_nr(p3) > 0) {
+					if (p3 == p)
+						break;
+					p3 = p3->real_parent;
+				}
+				if (p3 == p)
+					break;
+				gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
+				gr_handle_alertkill(p);
+				rcu_read_unlock();
+				put_files_struct(files);
+				read_unlock(&tasklist_lock);
+				fput(our_file);
+				return 0;
+			}
+		}
+		rcu_read_unlock();
+		put_files_struct(files);
+	} while_each_thread(p2, p);
+	read_unlock(&tasklist_lock);
+
+	fput(our_file);
+	return 1;
+}
+
+ssize_t
+write_grsec_handler(struct file *file, const char __user * buf, size_t count, loff_t
*ppos)
+{
+	struct gr_arg_wrapper uwrap;
+	unsigned char *sprole_salt = NULL;
+	unsigned char *sprole_sum = NULL;
+	int error = 0;
+	int error2 = 0;
+	size_t req_count = 0;
+	unsigned char oldmode = 0;
+
+	mutex_lock(&gr_dev_mutex);
+
+	if (gr_acl_is_enabled() && !(current->acl->mode & GR_KERNELAUTH)) {
+		error = -EPERM;
+		goto out;
+	}
+
+#ifdef CONFIG_COMPAT
+	pax_open_kernel();
+	if (is_compat_task()) {
+		copy_gr_arg_wrapper = &copy_gr_arg_wrapper_compat;
+		copy_gr_arg = &copy_gr_arg_compat;
+		copy_acl_object_label = &copy_acl_object_label_compat;
+		copy_acl_subject_label = &copy_acl_subject_label_compat;
+		copy_acl_role_label = &copy_acl_role_label_compat;
+		copy_acl_ip_label = &copy_acl_ip_label_compat;
+		copy_role_allowed_ip = &copy_role_allowed_ip_compat;
+		copy_role_transition = &copy_role_transition_compat;
+		copy_sprole_pw = &copy_sprole_pw_compat;
+		copy_gr_hash_struct = &copy_gr_hash_struct_compat;
+		copy_pointer_from_array = &copy_pointer_from_array_compat;
+		get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_compat;
+	} else {
+		copy_gr_arg_wrapper = &copy_gr_arg_wrapper_normal;
+		copy_gr_arg = &copy_gr_arg_normal;
+		copy_acl_object_label = &copy_acl_object_label_normal;
+		copy_acl_subject_label = &copy_acl_subject_label_normal;
+		copy_acl_role_label = &copy_acl_role_label_normal;
+		copy_acl_ip_label = &copy_acl_ip_label_normal;
+		copy_role_allowed_ip = &copy_role_allowed_ip_normal;
+		copy_role_transition = &copy_role_transition_normal;
+		copy_sprole_pw = &copy_sprole_pw_normal;
+		copy_gr_hash_struct = &copy_gr_hash_struct_normal;
+		copy_pointer_from_array = &copy_pointer_from_array_normal;
+		get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_normal;
+	}
+	pax_close_kernel();
+#endif
+
+	req_count = get_gr_arg_wrapper_size();
+
+	if (count != req_count) {
+		gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)req_count);
+		error = -EINVAL;
+		goto out;
+	}
+
+	
+	if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
+		gr_auth_expires = 0;
+		gr_auth_attempts = 0;
+	}
+
+	error = copy_gr_arg_wrapper(buf, &uwrap);
+	if (error)
+		goto out;
+
+	error = copy_gr_arg(uwrap.arg, &gr_usermode);
+	if (error)
+		goto out;
+
+	if (gr_usermode.mode != GR_SPROLE && gr_usermode.mode != GR_SPROLEPAM &&
+	    gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
+	    time_after(gr_auth_expires, get_seconds())) {
+		error = -EBUSY;
+		goto out;
+	}
+
+	/* if non-root trying to do anything other than use a special role,
+	   do not attempt authentication, do not count towards authentication
+	   locking
+	 */
+
+	if (gr_usermode.mode != GR_SPROLE && gr_usermode.mode != GR_STATUS &&
+	    gr_usermode.mode != GR_UNSPROLE && gr_usermode.mode != GR_SPROLEPAM &&
+	    gr_is_global_nonroot(current_uid())) {
+		error = -EPERM;
+		goto out;
+	}
+
+	/* ensure pw and special role name are null terminated */
+
+	gr_usermode.pw[GR_PW_LEN - 1] = '\0';
+	gr_usermode.sp_role[GR_SPROLE_LEN - 1] = '\0';
+
+	/* Okay. 
+	 * We have our enough of the argument structure..(we have yet
+	 * to copy_from_user the tables themselves) . Copy the tables
+	 * only if we need them, i.e. for loading operations. */
+
+	switch (gr_usermode.mode) {
+	case GR_STATUS:
+			if (gr_acl_is_enabled()) {
+				error = 1;
+				if (!gr_check_secure_terminal(current))
+					error = 3;
+			} else
+				error = 2;
+			goto out;
+	case GR_SHUTDOWN:
+		if (gr_acl_is_enabled() && !(chkpw(&gr_usermode, (unsigned char *)&gr_system_salt,
(unsigned char *)&gr_system_sum))) {
+			stop_machine(gr_rbac_disable, NULL, NULL);
+			free_variables(false);
+			memset(&gr_usermode, 0, sizeof(gr_usermode));
+			memset(&gr_system_salt, 0, sizeof(gr_system_salt));
+			memset(&gr_system_sum, 0, sizeof(gr_system_sum));
+			gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
+		} else if (gr_acl_is_enabled()) {
+			gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
+			error = -EPERM;
+		} else {
+			gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
+			error = -EAGAIN;
+		}
+		break;
+	case GR_ENABLE:
+		if (!gr_acl_is_enabled() && !(error2 = gracl_init(&gr_usermode)))
+			gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
+		else {
+			if (gr_acl_is_enabled())
+				error = -EAGAIN;
+			else
+				error = error2;
+			gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
+		}
+		break;
+	case GR_OLDRELOAD:
+		oldmode = 1;
+	case GR_RELOAD:
+		if (!gr_acl_is_enabled()) {
+			gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
+			error = -EAGAIN;
+		} else if (!(chkpw(&gr_usermode, (unsigned char *)&gr_system_salt, (unsigned char
*)&gr_system_sum))) {
+			error2 = gracl_reload(&gr_usermode, oldmode);
+			if (!error2)
+				gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
+			else {
+				gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
+				error = error2;
+			}
+		} else {
+			gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
+			error = -EPERM;
+		}
+		break;
+	case GR_SEGVMOD:
+		if (unlikely(!gr_acl_is_enabled())) {
+			gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
+			error = -EAGAIN;
+			break;
+		}
+
+		if (!(chkpw(&gr_usermode, (unsigned char *)&gr_system_salt, (unsigned char *)&gr_system_sum)))
{
+			gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
+			if (gr_usermode.segv_device && gr_usermode.segv_inode) {
+				struct acl_subject_label *segvacl;
+				segvacl =
+				    lookup_acl_subj_label(gr_usermode.segv_inode,
+							  gr_usermode.segv_device,
+							  current->role);
+				if (segvacl) {
+					segvacl->crashes = 0;
+					segvacl->expires = 0;
+				}
+			} else if (gr_find_uid(gr_usermode.segv_uid) >= 0) {
+				gr_remove_uid(gr_usermode.segv_uid);
+			}
+		} else {
+			gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
+			error = -EPERM;
+		}
+		break;
+	case GR_SPROLE:
+	case GR_SPROLEPAM:
+		if (unlikely(!gr_acl_is_enabled())) {
+			gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
+			error = -EAGAIN;
+			break;
+		}
+
+		if (current->role->expires && time_after_eq(get_seconds(), current->role->expires))
{
+			current->role->expires = 0;
+			current->role->auth_attempts = 0;
+		}
+
+		if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
+		    time_after(current->role->expires, get_seconds())) {
+			error = -EBUSY;
+			goto out;
+		}
+
+		if (lookup_special_role_auth
+		    (gr_usermode.mode, gr_usermode.sp_role, &sprole_salt, &sprole_sum)
+		    && ((!sprole_salt && !sprole_sum)
+			|| !(chkpw(&gr_usermode, sprole_salt, sprole_sum)))) {
+			char *p = "";
+			assign_special_role(gr_usermode.sp_role);
+			read_lock(&tasklist_lock);
+			if (current->real_parent)
+				p = current->real_parent->role->rolename;
+			read_unlock(&tasklist_lock);
+			gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
+					p, acl_sp_role_value);
+		} else {
+			gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode.sp_role);
+			error = -EPERM;
+			if(!(current->role->auth_attempts++))
+				current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
+
+			goto out;
+		}
+		break;
+	case GR_UNSPROLE:
+		if (unlikely(!gr_acl_is_enabled())) {
+			gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
+			error = -EAGAIN;
+			break;
+		}
+
+		if (current->role->roletype & GR_ROLE_SPECIAL) {
+			char *p = "";
+			int i = 0;
+
+			read_lock(&tasklist_lock);
+			if (current->real_parent) {
+				p = current->real_parent->role->rolename;
+				i = current->real_parent->acl_role_id;
+			}
+			read_unlock(&tasklist_lock);
+
+			gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
+			gr_set_acls(1);
+		} else {
+			error = -EPERM;
+			goto out;
+		}
+		break;
+	default:
+		gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode.mode);
+		error = -EINVAL;
+		break;
+	}
+
+	if (error != -EPERM)
+		goto out;
+
+	if(!(gr_auth_attempts++))
+		gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
+
+      out:
+	mutex_unlock(&gr_dev_mutex);
+
+	if (!error)
+		error = req_count;
+
+	return error;
+}
+
+int
+gr_set_acls(const int type)
+{
+	struct task_struct *task, *task2;
+	struct acl_role_label *role = current->role;
+	struct acl_subject_label *subj;
+	__u16 acl_role_id = current->acl_role_id;
+	const struct cred *cred;
+	int ret;
+
+	rcu_read_lock();
+	read_lock(&tasklist_lock);
+	read_lock(&grsec_exec_file_lock);
+	do_each_thread(task2, task) {
+		/* check to see if we're called from the exit handler,
+		   if so, only replace ACLs that have inherited the admin
+		   ACL */
+
+		if (type && (task->role != role ||
+			     task->acl_role_id != acl_role_id))
+			continue;
+
+		task->acl_role_id = 0;
+		task->acl_sp_role = 0;
+		task->inherited = 0;
+
+		if (task->exec_file) {
+			cred = __task_cred(task);
+			task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
+			subj = __gr_get_subject_for_task(polstate, task, NULL);
+			if (subj == NULL) {
+				ret = -EINVAL;
+				read_unlock(&grsec_exec_file_lock);
+				read_unlock(&tasklist_lock);
+				rcu_read_unlock();
+				gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task_pid_nr(task));
+				return ret;
+			}
+			__gr_apply_subject_to_task(polstate, task, subj);
+		} else {
+			// it's a kernel process
+			task->role = polstate->kernel_role;
+			task->acl = polstate->kernel_role->root_label;
+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
+			task->acl->mode &= ~GR_PROCFIND;
+#endif
+		}
+	} while_each_thread(task2, task);
+	read_unlock(&grsec_exec_file_lock);
+	read_unlock(&tasklist_lock);
+	rcu_read_unlock();
+
+	return 0;
+}
diff -ruNp linux-3.13.11/grsecurity/gracl_res.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/gracl_res.c
--- linux-3.13.11/grsecurity/gracl_res.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/gracl_res.c	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,68 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/gracl.h>
+#include <linux/grinternal.h>
+
+static const char *restab_log[] = {
+	[RLIMIT_CPU] = "RLIMIT_CPU",
+	[RLIMIT_FSIZE] = "RLIMIT_FSIZE",
+	[RLIMIT_DATA] = "RLIMIT_DATA",
+	[RLIMIT_STACK] = "RLIMIT_STACK",
+	[RLIMIT_CORE] = "RLIMIT_CORE",
+	[RLIMIT_RSS] = "RLIMIT_RSS",
+	[RLIMIT_NPROC] = "RLIMIT_NPROC",
+	[RLIMIT_NOFILE] = "RLIMIT_NOFILE",
+	[RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
+	[RLIMIT_AS] = "RLIMIT_AS",
+	[RLIMIT_LOCKS] = "RLIMIT_LOCKS",
+	[RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
+	[RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
+	[RLIMIT_NICE] = "RLIMIT_NICE",
+	[RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
+	[RLIMIT_RTTIME] = "RLIMIT_RTTIME",
+	[GR_CRASH_RES] = "RLIMIT_CRASH"
+};
+
+void
+gr_log_resource(const struct task_struct *task,
+		const int res, const unsigned long wanted, const int gt)
+{
+	const struct cred *cred;
+	unsigned long rlim;
+
+	if (!gr_acl_is_enabled() && !grsec_resource_logging)
+		return;
+
+	// not yet supported resource
+	if (unlikely(!restab_log[res]))
+		return;
+
+	if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
+		rlim = task_rlimit_max(task, res);
+	else
+		rlim = task_rlimit(task, res);
+
+	if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
+		return;
+
+	rcu_read_lock();
+	cred = __task_cred(task);
+
+	if (res == RLIMIT_NPROC && 
+	    (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) || 
+	     cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
+		goto out_rcu_unlock;
+	else if (res == RLIMIT_MEMLOCK &&
+		 cap_raised(cred->cap_effective, CAP_IPC_LOCK))
+		goto out_rcu_unlock;
+	else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
+		goto out_rcu_unlock;
+	rcu_read_unlock();
+
+	gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res],
rlim);
+
+	return;
+out_rcu_unlock:
+	rcu_read_unlock();
+	return;
+}
diff -ruNp linux-3.13.11/grsecurity/gracl_segv.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/gracl_segv.c
--- linux-3.13.11/grsecurity/gracl_segv.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/gracl_segv.c	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,313 @@
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <asm/uaccess.h>
+#include <asm/errno.h>
+#include <asm/mman.h>
+#include <net/sock.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/net.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/gracl.h>
+#include <linux/grsecurity.h>
+#include <linux/grinternal.h>
+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
+#include <linux/magic.h>
+#include <linux/pagemap.h>
+#include "../fs/btrfs/async-thread.h"
+#include "../fs/btrfs/ctree.h"
+#include "../fs/btrfs/btrfs_inode.h"
+#endif
+
+static struct crash_uid *uid_set;
+static unsigned short uid_used;
+static DEFINE_SPINLOCK(gr_uid_lock);
+extern rwlock_t gr_inode_lock;
+extern struct acl_subject_label *
+	lookup_acl_subj_label(const ino_t inode, const dev_t dev,
+			      struct acl_role_label *role);
+
+static inline dev_t __get_dev(const struct dentry *dentry)
+{
+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
+	if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
+		return BTRFS_I(dentry->d_inode)->root->anon_dev;
+	else
+#endif
+		return dentry->d_sb->s_dev;
+}
+
+int
+gr_init_uidset(void)
+{
+	uid_set =
+	    kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
+	uid_used = 0;
+
+	return uid_set ? 1 : 0;
+}
+
+void
+gr_free_uidset(void)
+{
+	if (uid_set) {
+		struct crash_uid *tmpset;
+		spin_lock(&gr_uid_lock);
+		tmpset = uid_set;
+		uid_set = NULL;
+		uid_used = 0;
+		spin_unlock(&gr_uid_lock);
+		if (tmpset)
+			kfree(tmpset);
+	}
+
+	return;
+}
+
+int
+gr_find_uid(const uid_t uid)
+{
+	struct crash_uid *tmp = uid_set;
+	uid_t buid;
+	int low = 0, high = uid_used - 1, mid;
+
+	while (high >= low) {
+		mid = (low + high) >> 1;
+		buid = tmp[mid].uid;
+		if (buid == uid)
+			return mid;
+		if (buid > uid)
+			high = mid - 1;
+		if (buid < uid)
+			low = mid + 1;
+	}
+
+	return -1;
+}
+
+static __inline__ void
+gr_insertsort(void)
+{
+	unsigned short i, j;
+	struct crash_uid index;
+
+	for (i = 1; i < uid_used; i++) {
+		index = uid_set[i];
+		j = i;
+		while ((j > 0) && uid_set[j - 1].uid > index.uid) {
+			uid_set[j] = uid_set[j - 1];
+			j--;
+		}
+		uid_set[j] = index;
+	}
+
+	return;
+}
+
+static __inline__ void
+gr_insert_uid(const kuid_t kuid, const unsigned long expires)
+{
+	int loc;
+	uid_t uid = GR_GLOBAL_UID(kuid);
+
+	if (uid_used == GR_UIDTABLE_MAX)
+		return;
+
+	loc = gr_find_uid(uid);
+
+	if (loc >= 0) {
+		uid_set[loc].expires = expires;
+		return;
+	}
+
+	uid_set[uid_used].uid = uid;
+	uid_set[uid_used].expires = expires;
+	uid_used++;
+
+	gr_insertsort();
+
+	return;
+}
+
+void
+gr_remove_uid(const unsigned short loc)
+{
+	unsigned short i;
+
+	for (i = loc + 1; i < uid_used; i++)
+		uid_set[i - 1] = uid_set[i];
+
+	uid_used--;
+
+	return;
+}
+
+int
+gr_check_crash_uid(const kuid_t kuid)
+{
+	int loc;
+	int ret = 0;
+	uid_t uid;
+
+	if (unlikely(!gr_acl_is_enabled()))
+		return 0;
+
+	uid = GR_GLOBAL_UID(kuid);
+
+	spin_lock(&gr_uid_lock);
+	loc = gr_find_uid(uid);
+
+	if (loc < 0)
+		goto out_unlock;
+
+	if (time_before_eq(uid_set[loc].expires, get_seconds()))
+		gr_remove_uid(loc);
+	else
+		ret = 1;
+
+out_unlock:
+	spin_unlock(&gr_uid_lock);
+	return ret;
+}
+
+static __inline__ int
+proc_is_setxid(const struct cred *cred)
+{
+	if (!uid_eq(cred->uid, cred->euid) || !uid_eq(cred->uid, cred->suid) ||
+	    !uid_eq(cred->uid, cred->fsuid))
+		return 1;
+	if (!gid_eq(cred->gid, cred->egid) || !gid_eq(cred->gid, cred->sgid) ||
+	    !gid_eq(cred->gid, cred->fsgid))
+		return 1;
+
+	return 0;
+}
+
+extern int gr_fake_force_sig(int sig, struct task_struct *t);
+
+void
+gr_handle_crash(struct task_struct *task, const int sig)
+{
+	struct acl_subject_label *curr;
+	struct task_struct *tsk, *tsk2;
+	const struct cred *cred;
+	const struct cred *cred2;
+
+	if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
+		return;
+
+	if (unlikely(!gr_acl_is_enabled()))
+		return;
+
+	curr = task->acl;
+
+	if (!(curr->resmask & (1U << GR_CRASH_RES)))
+		return;
+
+	if (time_before_eq(curr->expires, get_seconds())) {
+		curr->expires = 0;
+		curr->crashes = 0;
+	}
+
+	curr->crashes++;
+
+	if (!curr->expires)
+		curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
+
+	if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
+	    time_after(curr->expires, get_seconds())) {
+		rcu_read_lock();
+		cred = __task_cred(task);
+		if (gr_is_global_nonroot(cred->uid) && proc_is_setxid(cred)) {
+			gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
+			spin_lock(&gr_uid_lock);
+			gr_insert_uid(cred->uid, curr->expires);
+			spin_unlock(&gr_uid_lock);
+			curr->expires = 0;
+			curr->crashes = 0;
+			read_lock(&tasklist_lock);
+			do_each_thread(tsk2, tsk) {
+				cred2 = __task_cred(tsk);
+				if (tsk != task && uid_eq(cred2->uid, cred->uid))
+					gr_fake_force_sig(SIGKILL, tsk);
+			} while_each_thread(tsk2, tsk);
+			read_unlock(&tasklist_lock);
+		} else {
+			gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
+			read_lock(&tasklist_lock);
+			read_lock(&grsec_exec_file_lock);
+			do_each_thread(tsk2, tsk) {
+				if (likely(tsk != task)) {
+					// if this thread has the same subject as the one that triggered
+					// RES_CRASH and it's the same binary, kill it
+					if (tsk->acl == task->acl && gr_is_same_file(tsk->exec_file, task->exec_file))
+						gr_fake_force_sig(SIGKILL, tsk);
+				}
+			} while_each_thread(tsk2, tsk);
+			read_unlock(&grsec_exec_file_lock);
+			read_unlock(&tasklist_lock);
+		}
+		rcu_read_unlock();
+	}
+
+	return;
+}
+
+int
+gr_check_crash_exec(const struct file *filp)
+{
+	struct acl_subject_label *curr;
+
+	if (unlikely(!gr_acl_is_enabled()))
+		return 0;
+
+	read_lock(&gr_inode_lock);
+	curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
+				     __get_dev(filp->f_path.dentry),
+				     current->role);
+	read_unlock(&gr_inode_lock);
+
+	if (!curr || !(curr->resmask & (1U << GR_CRASH_RES)) ||
+	    (!curr->crashes && !curr->expires))
+		return 0;
+
+	if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
+	    time_after(curr->expires, get_seconds()))
+		return 1;
+	else if (time_before_eq(curr->expires, get_seconds())) {
+		curr->crashes = 0;
+		curr->expires = 0;
+	}
+
+	return 0;
+}
+
+void
+gr_handle_alertkill(struct task_struct *task)
+{
+	struct acl_subject_label *curracl;
+	__u32 curr_ip;
+	struct task_struct *p, *p2;
+
+	if (unlikely(!gr_acl_is_enabled()))
+		return;
+
+	curracl = task->acl;
+	curr_ip = task->signal->curr_ip;
+
+	if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
+		read_lock(&tasklist_lock);
+		do_each_thread(p2, p) {
+			if (p->signal->curr_ip == curr_ip)
+				gr_fake_force_sig(SIGKILL, p);
+		} while_each_thread(p2, p);
+		read_unlock(&tasklist_lock);
+	} else if (curracl->mode & GR_KILLPROC)
+		gr_fake_force_sig(SIGKILL, task);
+
+	return;
+}
diff -ruNp linux-3.13.11/grsecurity/gracl_shm.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/gracl_shm.c
--- linux-3.13.11/grsecurity/gracl_shm.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/gracl_shm.c	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,40 @@
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/file.h>
+#include <linux/ipc.h>
+#include <linux/gracl.h>
+#include <linux/grsecurity.h>
+#include <linux/grinternal.h>
+
+int
+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
+		const time_t shm_createtime, const kuid_t cuid, const int shmid)
+{
+	struct task_struct *task;
+
+	if (!gr_acl_is_enabled())
+		return 1;
+
+	rcu_read_lock();
+	read_lock(&tasklist_lock);
+
+	task = find_task_by_vpid(shm_cprid);
+
+	if (unlikely(!task))
+		task = find_task_by_vpid(shm_lapid);
+
+	if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned
long)shm_createtime) ||
+			      (task_pid_nr(task) == shm_lapid)) &&
+		     (task->acl->mode & GR_PROTSHM) &&
+		     (task->acl != current->acl))) {
+		read_unlock(&tasklist_lock);
+		rcu_read_unlock();
+		gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, GR_GLOBAL_UID(cuid), shm_cprid, shmid);
+		return 0;
+	}
+	read_unlock(&tasklist_lock);
+	rcu_read_unlock();
+
+	return 1;
+}
diff -ruNp linux-3.13.11/grsecurity/grsec_chdir.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/grsec_chdir.c
--- linux-3.13.11/grsecurity/grsec_chdir.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/grsec_chdir.c	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,19 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/grsecurity.h>
+#include <linux/grinternal.h>
+
+void
+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
+{
+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
+	if ((grsec_enable_chdir && grsec_enable_group &&
+	     in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
+					      !grsec_enable_group)) {
+		gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
+	}
+#endif
+	return;
+}
diff -ruNp linux-3.13.11/grsecurity/grsec_chroot.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/grsec_chroot.c
--- linux-3.13.11/grsecurity/grsec_chroot.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/grsec_chroot.c	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,370 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/mount.h>
+#include <linux/types.h>
+#include "../fs/mount.h"
+#include <linux/grsecurity.h>
+#include <linux/grinternal.h>
+
+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
+int gr_init_ran;
+#endif
+
+void gr_set_chroot_entries(struct task_struct *task, const struct path *path)
+{
+#ifdef CONFIG_GRKERNSEC
+	if (task_pid_nr(task) > 1 && path->dentry != init_task.fs->root.dentry &&
+	    		     path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root
+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
+			     && gr_init_ran
+#endif
+	   )
+		task->gr_is_chrooted = 1;
+	else {
+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
+		if (task_pid_nr(task) == 1 && !gr_init_ran)
+			gr_init_ran = 1;
+#endif
+		task->gr_is_chrooted = 0;
+	}
+
+	task->gr_chroot_dentry = path->dentry;
+#endif
+	return;
+}
+
+void gr_clear_chroot_entries(struct task_struct *task)
+{
+#ifdef CONFIG_GRKERNSEC
+	task->gr_is_chrooted = 0;
+	task->gr_chroot_dentry = NULL;
+#endif
+	return;
+}	
+
+int
+gr_handle_chroot_unix(const pid_t pid)
+{
+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
+	struct task_struct *p;
+
+	if (unlikely(!grsec_enable_chroot_unix))
+		return 1;
+
+	if (likely(!proc_is_chrooted(current)))
+		return 1;
+
+	rcu_read_lock();
+	read_lock(&tasklist_lock);
+	p = find_task_by_vpid_unrestricted(pid);
+	if (unlikely(p && !have_same_root(current, p))) {
+		read_unlock(&tasklist_lock);
+		rcu_read_unlock();
+		gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
+		return 0;
+	}
+	read_unlock(&tasklist_lock);
+	rcu_read_unlock();
+#endif
+	return 1;
+}
+
+int
+gr_handle_chroot_nice(void)
+{
+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
+	if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
+		gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
+		return -EPERM;
+	}
+#endif
+	return 0;
+}
+
+int
+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
+{
+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
+	if (grsec_enable_chroot_nice && (niceval < task_nice(p))
+			&& proc_is_chrooted(current)) {
+		gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, task_pid_nr(p));
+		return -EACCES;
+	}
+#endif
+	return 0;
+}
+
+int
+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
+{
+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
+	struct task_struct *p;
+	int ret = 0;
+	if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
+		return ret;
+
+	read_lock(&tasklist_lock);
+	do_each_pid_task(pid, type, p) {
+		if (!have_same_root(current, p)) {
+			ret = 1;
+			goto out;
+		}
+	} while_each_pid_task(pid, type, p);
+out:
+	read_unlock(&tasklist_lock);
+	return ret;
+#endif
+	return 0;
+}
+
+int
+gr_pid_is_chrooted(struct task_struct *p)
+{
+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
+	if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
+		return 0;
+
+	if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
+	    !have_same_root(current, p)) {
+		return 1;
+	}
+#endif
+	return 0;
+}
+
+EXPORT_SYMBOL_GPL(gr_pid_is_chrooted);
+
+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
+{
+	struct path path, currentroot;
+	int ret = 0;
+
+	path.dentry = (struct dentry *)u_dentry;
+	path.mnt = (struct vfsmount *)u_mnt;
+	get_fs_root(current->fs, &currentroot);
+	if (path_is_under(&path, &currentroot))
+		ret = 1;
+	path_put(&currentroot);
+
+	return ret;
+}
+#endif
+
+int
+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
+{
+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
+	if (!grsec_enable_chroot_fchdir)
+		return 1;
+
+	if (!proc_is_chrooted(current))
+		return 1;
+	else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
+		gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
+		return 0;
+	}
+#endif
+	return 1;
+}
+
+int
+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
+		const time_t shm_createtime)
+{
+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
+	struct task_struct *p;
+	time_t starttime;
+
+	if (unlikely(!grsec_enable_chroot_shmat))
+		return 1;
+
+	if (likely(!proc_is_chrooted(current)))
+		return 1;
+
+	rcu_read_lock();
+	read_lock(&tasklist_lock);
+
+	if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
+		starttime = p->start_time.tv_sec;
+		if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
+			if (have_same_root(current, p)) {
+				goto allow;
+			} else {
+				read_unlock(&tasklist_lock);
+				rcu_read_unlock();
+				gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
+				return 0;
+			}
+		}
+		/* creator exited, pid reuse, fall through to next check */
+	}
+	if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
+		if (unlikely(!have_same_root(current, p))) {
+			read_unlock(&tasklist_lock);
+			rcu_read_unlock();
+			gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
+			return 0;
+		}
+	}
+
+allow:
+	read_unlock(&tasklist_lock);
+	rcu_read_unlock();
+#endif
+	return 1;
+}
+
+void
+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
+{
+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
+	if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
+		gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
+#endif
+	return;
+}
+
+int
+gr_handle_chroot_mknod(const struct dentry *dentry,
+		       const struct vfsmount *mnt, const int mode)
+{
+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
+	if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) && 
+	    proc_is_chrooted(current)) {
+		gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
+		return -EPERM;
+	}
+#endif
+	return 0;
+}
+
+int
+gr_handle_chroot_mount(const struct dentry *dentry,
+		       const struct vfsmount *mnt, const char *dev_name)
+{
+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
+	if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
+		gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry,
mnt);
+		return -EPERM;
+	}
+#endif
+	return 0;
+}
+
+int
+gr_handle_chroot_pivot(void)
+{
+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
+	if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
+		gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
+		return -EPERM;
+	}
+#endif
+	return 0;
+}
+
+int
+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
+{
+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
+	if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
+	    !gr_is_outside_chroot(dentry, mnt)) {
+		gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
+		return -EPERM;
+	}
+#endif
+	return 0;
+}
+
+extern const char *captab_log[];
+extern int captab_log_entries;
+
+int
+gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred,
const int cap)
+{
+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
+	if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
+		kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
+		if (cap_raised(chroot_caps, cap)) {
+			if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
+				gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
+			}
+			return 0;
+		}
+	}
+#endif
+	return 1;
+}
+
+int
+gr_chroot_is_capable(const int cap)
+{
+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
+	return gr_task_chroot_is_capable(current, current_cred(), cap);
+#endif
+	return 1;
+}
+
+int
+gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
+{
+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
+	if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
+		kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
+		if (cap_raised(chroot_caps, cap)) {
+			return 0;
+		}
+	}
+#endif
+	return 1;
+}
+
+int
+gr_chroot_is_capable_nolog(const int cap)
+{
+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
+	return gr_task_chroot_is_capable_nolog(current, cap);
+#endif
+	return 1;
+}
+
+int
+gr_handle_chroot_sysctl(const int op)
+{
+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
+	if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
+	    proc_is_chrooted(current))
+		return -EACCES;
+#endif
+	return 0;
+}
+
+void
+gr_handle_chroot_chdir(const struct path *path)
+{
+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
+	if (grsec_enable_chroot_chdir)
+		set_fs_pwd(current->fs, path);
+#endif
+	return;
+}
+
+int
+gr_handle_chroot_chmod(const struct dentry *dentry,
+		       const struct vfsmount *mnt, const int mode)
+{
+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
+	/* allow chmod +s on directories, but not files */
+	if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
+	    ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
+	    proc_is_chrooted(current)) {
+		gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
+		return -EPERM;
+	}
+#endif
+	return 0;
+}
diff -ruNp linux-3.13.11/grsecurity/grsec_disabled.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/grsec_disabled.c
--- linux-3.13.11/grsecurity/grsec_disabled.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/grsec_disabled.c	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,433 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/kdev_t.h>
+#include <linux/net.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/sysctl.h>
+
+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
+void
+pax_set_initial_flags(struct linux_binprm *bprm)
+{
+	return;
+}
+#endif
+
+#ifdef CONFIG_SYSCTL
+__u32
+gr_handle_sysctl(const struct ctl_table * table, const int op)
+{
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_TASKSTATS
+int gr_is_taskstats_denied(int pid)
+{
+	return 0;
+}
+#endif
+
+int
+gr_acl_is_enabled(void)
+{
+	return 0;
+}
+
+void
+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode) 
+{
+	return;
+}
+
+int
+gr_handle_rawio(const struct inode *inode)
+{
+	return 0;
+}
+
+void
+gr_acl_handle_psacct(struct task_struct *task, const long code)
+{
+	return;
+}
+
+int
+gr_handle_ptrace(struct task_struct *task, const long request)
+{
+	return 0;
+}
+
+int
+gr_handle_proc_ptrace(struct task_struct *task)
+{
+	return 0;
+}
+
+int
+gr_set_acls(const int type)
+{
+	return 0;
+}
+
+int
+gr_check_hidden_task(const struct task_struct *tsk)
+{
+	return 0;
+}
+
+int
+gr_check_protected_task(const struct task_struct *task)
+{
+	return 0;
+}
+
+int
+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
+{
+	return 0;
+}
+
+void
+gr_copy_label(struct task_struct *tsk)
+{
+	return;
+}
+
+void
+gr_set_pax_flags(struct task_struct *task)
+{
+	return;
+}
+
+int
+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
+		  const int unsafe_share)
+{
+	return 0;
+}
+
+void
+gr_handle_delete(const ino_t ino, const dev_t dev)
+{
+	return;
+}
+
+void
+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
+{
+	return;
+}
+
+void
+gr_handle_crash(struct task_struct *task, const int sig)
+{
+	return;
+}
+
+int
+gr_check_crash_exec(const struct file *filp)
+{
+	return 0;
+}
+
+int
+gr_check_crash_uid(const kuid_t uid)
+{
+	return 0;
+}
+
+void
+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
+		 struct dentry *old_dentry,
+		 struct dentry *new_dentry,
+		 struct vfsmount *mnt, const __u8 replace)
+{
+	return;
+}
+
+int
+gr_search_socket(const int family, const int type, const int protocol)
+{
+	return 1;
+}
+
+int
+gr_search_connectbind(const int mode, const struct socket *sock,
+		      const struct sockaddr_in *addr)
+{
+	return 0;
+}
+
+void
+gr_handle_alertkill(struct task_struct *task)
+{
+	return;
+}
+
+__u32
+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
+{
+	return 1;
+}
+
+__u32
+gr_acl_handle_hidden_file(const struct dentry * dentry,
+			  const struct vfsmount * mnt)
+{
+	return 1;
+}
+
+__u32
+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
+		   int acc_mode)
+{
+	return 1;
+}
+
+__u32
+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
+{
+	return 1;
+}
+
+__u32
+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
+{
+	return 1;
+}
+
+int
+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
+		   unsigned int *vm_flags)
+{
+	return 1;
+}
+
+__u32
+gr_acl_handle_truncate(const struct dentry * dentry,
+		       const struct vfsmount * mnt)
+{
+	return 1;
+}
+
+__u32
+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
+{
+	return 1;
+}
+
+__u32
+gr_acl_handle_access(const struct dentry * dentry,
+		     const struct vfsmount * mnt, const int fmode)
+{
+	return 1;
+}
+
+__u32
+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
+		    umode_t *mode)
+{
+	return 1;
+}
+
+__u32
+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
+{
+	return 1;
+}
+
+__u32
+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
+{
+	return 1;
+}
+
+__u32
+gr_acl_handle_removexattr(const struct dentry * dentry, const struct vfsmount * mnt)
+{
+	return 1;
+}
+
+void
+grsecurity_init(void)
+{
+	return;
+}
+
+umode_t gr_acl_umask(void)
+{
+	return 0;
+}
+
+__u32
+gr_acl_handle_mknod(const struct dentry * new_dentry,
+		    const struct dentry * parent_dentry,
+		    const struct vfsmount * parent_mnt,
+		    const int mode)
+{
+	return 1;
+}
+
+__u32
+gr_acl_handle_mkdir(const struct dentry * new_dentry,
+		    const struct dentry * parent_dentry,
+		    const struct vfsmount * parent_mnt)
+{
+	return 1;
+}
+
+__u32
+gr_acl_handle_symlink(const struct dentry * new_dentry,
+		      const struct dentry * parent_dentry,
+		      const struct vfsmount * parent_mnt, const struct filename *from)
+{
+	return 1;
+}
+
+__u32
+gr_acl_handle_link(const struct dentry * new_dentry,
+		   const struct dentry * parent_dentry,
+		   const struct vfsmount * parent_mnt,
+		   const struct dentry * old_dentry,
+		   const struct vfsmount * old_mnt, const struct filename *to)
+{
+	return 1;
+}
+
+int
+gr_acl_handle_rename(const struct dentry *new_dentry,
+		     const struct dentry *parent_dentry,
+		     const struct vfsmount *parent_mnt,
+		     const struct dentry *old_dentry,
+		     const struct inode *old_parent_inode,
+		     const struct vfsmount *old_mnt, const struct filename *newname)
+{
+	return 0;
+}
+
+int
+gr_acl_handle_filldir(const struct file *file, const char *name,
+		      const int namelen, const ino_t ino)
+{
+	return 1;
+}
+
+int
+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
+		const time_t shm_createtime, const kuid_t cuid, const int shmid)
+{
+	return 1;
+}
+
+int
+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
+{
+	return 0;
+}
+
+int
+gr_search_accept(const struct socket *sock)
+{
+	return 0;
+}
+
+int
+gr_search_listen(const struct socket *sock)
+{
+	return 0;
+}
+
+int
+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
+{
+	return 0;
+}
+
+__u32
+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
+{
+	return 1;
+}
+
+__u32
+gr_acl_handle_creat(const struct dentry * dentry,
+		    const struct dentry * p_dentry,
+		    const struct vfsmount * p_mnt, int open_flags, int acc_mode,
+		    const int imode)
+{
+	return 1;
+}
+
+void
+gr_acl_handle_exit(void)
+{
+	return;
+}
+
+int
+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
+{
+	return 1;
+}
+
+void
+gr_set_role_label(const kuid_t uid, const kgid_t gid)
+{
+	return;
+}
+
+int
+gr_acl_handle_procpidmem(const struct task_struct *task)
+{
+	return 0;
+}
+
+int
+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
+{
+	return 0;
+}
+
+int
+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
+{
+	return 0;
+}
+
+int
+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
+{
+	return 0;
+}
+
+int
+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
+{
+	return 0;
+}
+
+int gr_acl_enable_at_secure(void)
+{
+	return 0;
+}
+
+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
+{
+	return dentry->d_sb->s_dev;
+}
+
+void gr_put_exec_file(struct task_struct *task)
+{
+	return;
+}
+
+#ifdef CONFIG_SECURITY
+EXPORT_SYMBOL_GPL(gr_check_user_change);
+EXPORT_SYMBOL_GPL(gr_check_group_change);
+#endif
diff -ruNp linux-3.13.11/grsecurity/grsec_exec.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/grsec_exec.c
--- linux-3.13.11/grsecurity/grsec_exec.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/grsec_exec.c	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,187 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/file.h>
+#include <linux/binfmts.h>
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/grdefs.h>
+#include <linux/grsecurity.h>
+#include <linux/grinternal.h>
+#include <linux/capability.h>
+#include <linux/module.h>
+#include <linux/compat.h>
+
+#include <asm/uaccess.h>
+
+#ifdef CONFIG_GRKERNSEC_EXECLOG
+static char gr_exec_arg_buf[132];
+static DEFINE_MUTEX(gr_exec_arg_mutex);
+#endif
+
+struct user_arg_ptr {
+#ifdef CONFIG_COMPAT
+	bool is_compat;
+#endif
+	union {
+		const char __user *const __user *native;
+#ifdef CONFIG_COMPAT
+		const compat_uptr_t __user *compat;
+#endif
+	} ptr;
+};
+
+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
+
+void
+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
+{
+#ifdef CONFIG_GRKERNSEC_EXECLOG
+	char *grarg = gr_exec_arg_buf;
+	unsigned int i, x, execlen = 0;
+	char c;
+
+	if (!((grsec_enable_execlog && grsec_enable_group &&
+	       in_group_p(grsec_audit_gid))
+	      || (grsec_enable_execlog && !grsec_enable_group)))
+		return;
+
+	mutex_lock(&gr_exec_arg_mutex);
+	memset(grarg, 0, sizeof(gr_exec_arg_buf));
+
+	for (i = 0; i < bprm->argc && execlen < 128; i++) {
+		const char __user *p;
+		unsigned int len;
+
+		p = get_user_arg_ptr(argv, i);
+		if (IS_ERR(p))
+			goto log;
+
+		len = strnlen_user(p, 128 - execlen);
+		if (len > 128 - execlen)
+			len = 128 - execlen;
+		else if (len > 0)
+			len--;
+		if (copy_from_user(grarg + execlen, p, len))
+			goto log;
+
+		/* rewrite unprintable characters */
+		for (x = 0; x < len; x++) {
+			c = *(grarg + execlen + x);
+			if (c < 32 || c > 126)
+				*(grarg + execlen + x) = ' ';
+		}
+
+		execlen += len;
+		*(grarg + execlen) = ' ';
+		*(grarg + execlen + 1) = '\0';
+		execlen++;
+	}
+
+      log:
+	gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
+			bprm->file->f_path.mnt, grarg);
+	mutex_unlock(&gr_exec_arg_mutex);
+#endif
+	return;
+}
+
+#ifdef CONFIG_GRKERNSEC
+extern int gr_acl_is_capable(const int cap);
+extern int gr_acl_is_capable_nolog(const int cap);
+extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred
*cred, const int cap);
+extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
+extern int gr_chroot_is_capable(const int cap);
+extern int gr_chroot_is_capable_nolog(const int cap);
+extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred
*cred, const int cap);
+extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int
cap);
+#endif
+
+const char *captab_log[] = {
+	"CAP_CHOWN",
+	"CAP_DAC_OVERRIDE",
+	"CAP_DAC_READ_SEARCH",
+	"CAP_FOWNER",
+	"CAP_FSETID",
+	"CAP_KILL",
+	"CAP_SETGID",
+	"CAP_SETUID",
+	"CAP_SETPCAP",
+	"CAP_LINUX_IMMUTABLE",
+	"CAP_NET_BIND_SERVICE",
+	"CAP_NET_BROADCAST",
+	"CAP_NET_ADMIN",
+	"CAP_NET_RAW",
+	"CAP_IPC_LOCK",
+	"CAP_IPC_OWNER",
+	"CAP_SYS_MODULE",
+	"CAP_SYS_RAWIO",
+	"CAP_SYS_CHROOT",
+	"CAP_SYS_PTRACE",
+	"CAP_SYS_PACCT",
+	"CAP_SYS_ADMIN",
+	"CAP_SYS_BOOT",
+	"CAP_SYS_NICE",
+	"CAP_SYS_RESOURCE",
+	"CAP_SYS_TIME",
+	"CAP_SYS_TTY_CONFIG",
+	"CAP_MKNOD",
+	"CAP_LEASE",
+	"CAP_AUDIT_WRITE",
+	"CAP_AUDIT_CONTROL",
+	"CAP_SETFCAP",
+	"CAP_MAC_OVERRIDE",
+	"CAP_MAC_ADMIN",
+	"CAP_SYSLOG",
+	"CAP_WAKE_ALARM"
+};
+
+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
+
+int gr_is_capable(const int cap)
+{
+#ifdef CONFIG_GRKERNSEC
+	if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
+		return 1;
+	return 0;
+#else
+	return 1;
+#endif
+}
+
+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const
int cap)
+{
+#ifdef CONFIG_GRKERNSEC
+	if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred,
cap))
+		return 1;
+	return 0;
+#else
+	return 1;
+#endif
+}
+
+int gr_is_capable_nolog(const int cap)
+{
+#ifdef CONFIG_GRKERNSEC
+	if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
+		return 1;
+	return 0;
+#else
+	return 1;
+#endif
+}
+
+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
+{
+#ifdef CONFIG_GRKERNSEC
+	if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task,
cap))
+		return 1;
+	return 0;
+#else
+	return 1;
+#endif
+}
+
+EXPORT_SYMBOL_GPL(gr_is_capable);
+EXPORT_SYMBOL_GPL(gr_is_capable_nolog);
+EXPORT_SYMBOL_GPL(gr_task_is_capable);
+EXPORT_SYMBOL_GPL(gr_task_is_capable_nolog);
diff -ruNp linux-3.13.11/grsecurity/grsec_fifo.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/grsec_fifo.c
--- linux-3.13.11/grsecurity/grsec_fifo.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/grsec_fifo.c	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,24 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/grinternal.h>
+
+int
+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
+	       const struct dentry *dir, const int flag, const int acc_mode)
+{
+#ifdef CONFIG_GRKERNSEC_FIFO
+	const struct cred *cred = current_cred();
+
+	if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
+	    !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
+	    !uid_eq(dentry->d_inode->i_uid, dir->d_inode->i_uid) &&
+	    !uid_eq(cred->fsuid, dentry->d_inode->i_uid)) {
+		if (!inode_permission(dentry->d_inode, acc_mode))
+			gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, GR_GLOBAL_UID(dentry->d_inode->i_uid),
GR_GLOBAL_GID(dentry->d_inode->i_gid));
+		return -EACCES;
+	}
+#endif
+	return 0;
+}
diff -ruNp linux-3.13.11/grsecurity/grsec_fork.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/grsec_fork.c
--- linux-3.13.11/grsecurity/grsec_fork.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/grsec_fork.c	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,23 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/grsecurity.h>
+#include <linux/grinternal.h>
+#include <linux/errno.h>
+
+void
+gr_log_forkfail(const int retval)
+{
+#ifdef CONFIG_GRKERNSEC_FORKFAIL
+	if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
+		switch (retval) {
+			case -EAGAIN:
+				gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
+				break;
+			case -ENOMEM:
+				gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
+				break;
+		}
+	}
+#endif
+	return;
+}
diff -ruNp linux-3.13.11/grsecurity/grsec_init.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/grsec_init.c
--- linux-3.13.11/grsecurity/grsec_init.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/grsec_init.c	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,272 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/gracl.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/percpu.h>
+#include <linux/module.h>
+
+int grsec_enable_ptrace_readexec;
+int grsec_enable_setxid;
+int grsec_enable_symlinkown;
+kgid_t grsec_symlinkown_gid;
+int grsec_enable_brute;
+int grsec_enable_link;
+int grsec_enable_dmesg;
+int grsec_enable_harden_ptrace;
+int grsec_enable_harden_ipc;
+int grsec_enable_fifo;
+int grsec_enable_execlog;
+int grsec_enable_signal;
+int grsec_enable_forkfail;
+int grsec_enable_audit_ptrace;
+int grsec_enable_time;
+int grsec_enable_group;
+kgid_t grsec_audit_gid;
+int grsec_enable_chdir;
+int grsec_enable_mount;
+int grsec_enable_rofs;
+int grsec_deny_new_usb;
+int grsec_enable_chroot_findtask;
+int grsec_enable_chroot_mount;
+int grsec_enable_chroot_shmat;
+int grsec_enable_chroot_fchdir;
+int grsec_enable_chroot_double;
+int grsec_enable_chroot_pivot;
+int grsec_enable_chroot_chdir;
+int grsec_enable_chroot_chmod;
+int grsec_enable_chroot_mknod;
+int grsec_enable_chroot_nice;
+int grsec_enable_chroot_execlog;
+int grsec_enable_chroot_caps;
+int grsec_enable_chroot_sysctl;
+int grsec_enable_chroot_unix;
+int grsec_enable_tpe;
+kgid_t grsec_tpe_gid;
+int grsec_enable_blackhole;
+#ifdef CONFIG_IPV6_MODULE
+EXPORT_SYMBOL_GPL(grsec_enable_blackhole);
+#endif
+int grsec_lastack_retries;
+int grsec_enable_tpe_all;
+int grsec_enable_tpe_invert;
+int grsec_enable_socket_all;
+kgid_t grsec_socket_all_gid;
+int grsec_enable_socket_client;
+kgid_t grsec_socket_client_gid;
+int grsec_enable_socket_server;
+kgid_t grsec_socket_server_gid;
+int grsec_resource_logging;
+int grsec_disable_privio;
+int grsec_enable_log_rwxmaps;
+int grsec_lock;
+
+DEFINE_SPINLOCK(grsec_alert_lock);
+unsigned long grsec_alert_wtime = 0;
+unsigned long grsec_alert_fyet = 0;
+
+DEFINE_SPINLOCK(grsec_audit_lock);
+
+DEFINE_RWLOCK(grsec_exec_file_lock);
+
+char *gr_shared_page[4];
+
+char *gr_alert_log_fmt;
+char *gr_audit_log_fmt;
+char *gr_alert_log_buf;
+char *gr_audit_log_buf;
+
+void __init
+grsecurity_init(void)
+{
+	int j;
+	/* create the per-cpu shared pages */
+
+#ifdef CONFIG_X86
+	memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
+#endif
+
+	for (j = 0; j < 4; j++) {
+		gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
+		if (gr_shared_page[j] == NULL) {
+			panic("Unable to allocate grsecurity shared page");
+			return;
+		}
+	}
+
+	/* allocate log buffers */
+	gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
+	if (!gr_alert_log_fmt) {
+		panic("Unable to allocate grsecurity alert log format buffer");
+		return;
+	}
+	gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
+	if (!gr_audit_log_fmt) {
+		panic("Unable to allocate grsecurity audit log format buffer");
+		return;
+	}
+	gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
+	if (!gr_alert_log_buf) {
+		panic("Unable to allocate grsecurity alert log buffer");
+		return;
+	}
+	gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
+	if (!gr_audit_log_buf) {
+		panic("Unable to allocate grsecurity audit log buffer");
+		return;
+	}
+
+#ifdef CONFIG_GRKERNSEC_IO
+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
+	grsec_disable_privio = 1;
+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
+	grsec_disable_privio = 1;
+#else
+	grsec_disable_privio = 0;
+#endif
+#endif
+
+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
+	/* for backward compatibility, tpe_invert always defaults to on if
+	   enabled in the kernel
+	*/
+	grsec_enable_tpe_invert = 1;
+#endif
+
+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
+#ifndef CONFIG_GRKERNSEC_SYSCTL
+	grsec_lock = 1;
+#endif
+
+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
+	grsec_enable_log_rwxmaps = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
+	grsec_enable_group = 1;
+	grsec_audit_gid = KGIDT_INIT(CONFIG_GRKERNSEC_AUDIT_GID);
+#endif
+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
+	grsec_enable_ptrace_readexec = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
+	grsec_enable_chdir = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
+	grsec_enable_harden_ptrace = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
+	grsec_enable_harden_ipc = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
+	grsec_enable_mount = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_LINK
+	grsec_enable_link = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_BRUTE
+	grsec_enable_brute = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_DMESG
+	grsec_enable_dmesg = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
+	grsec_enable_blackhole = 1;
+	grsec_lastack_retries = 4;
+#endif
+#ifdef CONFIG_GRKERNSEC_FIFO
+	grsec_enable_fifo = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_EXECLOG
+	grsec_enable_execlog = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_SETXID
+	grsec_enable_setxid = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_SIGNAL
+	grsec_enable_signal = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_FORKFAIL
+	grsec_enable_forkfail = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_TIME
+	grsec_enable_time = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_RESLOG
+	grsec_resource_logging = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
+	grsec_enable_chroot_findtask = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
+	grsec_enable_chroot_unix = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
+	grsec_enable_chroot_mount = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
+	grsec_enable_chroot_fchdir = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
+	grsec_enable_chroot_shmat = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
+	grsec_enable_audit_ptrace = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
+	grsec_enable_chroot_double = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
+	grsec_enable_chroot_pivot = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
+	grsec_enable_chroot_chdir = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
+	grsec_enable_chroot_chmod = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
+	grsec_enable_chroot_mknod = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
+	grsec_enable_chroot_nice = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
+	grsec_enable_chroot_execlog = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
+	grsec_enable_chroot_caps = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
+	grsec_enable_chroot_sysctl = 1;
+#endif
+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
+	grsec_enable_symlinkown = 1;
+	grsec_symlinkown_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SYMLINKOWN_GID);
+#endif
+#ifdef CONFIG_GRKERNSEC_TPE
+	grsec_enable_tpe = 1;
+	grsec_tpe_gid = KGIDT_INIT(CONFIG_GRKERNSEC_TPE_GID);
+#ifdef CONFIG_GRKERNSEC_TPE_ALL
+	grsec_enable_tpe_all = 1;
+#endif
+#endif
+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
+	grsec_enable_socket_all = 1;
+	grsec_socket_all_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_ALL_GID);
+#endif
+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
+	grsec_enable_socket_client = 1;
+	grsec_socket_client_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_CLIENT_GID);
+#endif
+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
+	grsec_enable_socket_server = 1;
+	grsec_socket_server_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_SERVER_GID);
+#endif
+#endif
+#ifdef CONFIG_GRKERNSEC_DENYUSB_FORCE
+	grsec_deny_new_usb = 1;
+#endif
+
+	return;
+}
diff -ruNp linux-3.13.11/grsecurity/grsec_ipc.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/grsec_ipc.c
--- linux-3.13.11/grsecurity/grsec_ipc.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/grsec_ipc.c	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,48 @@
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/file.h>
+#include <linux/ipc.h>
+#include <linux/ipc_namespace.h>
+#include <linux/grsecurity.h>
+#include <linux/grinternal.h>
+
+int
+gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode,
int granted_mode)
+{
+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
+	int write;
+	int orig_granted_mode;
+	kuid_t euid;
+	kgid_t egid;
+
+	if (!grsec_enable_harden_ipc)
+		return 1;
+
+	euid = current_euid();
+	egid = current_egid();
+
+	write = requested_mode & 00002;
+	orig_granted_mode = ipcp->mode;
+
+	if (uid_eq(euid, ipcp->cuid) || uid_eq(euid, ipcp->uid))
+		orig_granted_mode >>= 6;
+	else {
+		/* if likely wrong permissions, lock to user */
+		if (orig_granted_mode & 0007)
+			orig_granted_mode = 0;
+		/* otherwise do a egid-only check */
+		else if (gid_eq(egid, ipcp->cgid) || gid_eq(egid, ipcp->gid))
+			orig_granted_mode >>= 3;
+		/* otherwise, no access */
+		else
+			orig_granted_mode = 0;
+	}
+	if (!(requested_mode & ~granted_mode & 0007) && (requested_mode & ~orig_granted_mode
& 0007) &&
+	    !ns_capable_nolog(ns->user_ns, CAP_IPC_OWNER)) {
+		gr_log_str_int(GR_DONT_AUDIT, GR_IPC_DENIED_MSG, write ? "write" : "read", GR_GLOBAL_UID(ipcp->cuid));
+		return 0;
+	}
+#endif
+	return 1;
+}
diff -ruNp linux-3.13.11/grsecurity/grsec_link.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/grsec_link.c
--- linux-3.13.11/grsecurity/grsec_link.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/grsec_link.c	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,58 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/grinternal.h>
+
+int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
+{
+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
+	const struct inode *link_inode = link->dentry->d_inode;
+
+	if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
+	   /* ignore root-owned links, e.g. /proc/self */
+	    gr_is_global_nonroot(link_inode->i_uid) && target &&
+	    !uid_eq(link_inode->i_uid, target->i_uid)) {
+		gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid,
target->i_uid);
+		return 1;
+	}
+#endif
+	return 0;
+}
+
+int
+gr_handle_follow_link(const struct inode *parent,
+		      const struct inode *inode,
+		      const struct dentry *dentry, const struct vfsmount *mnt)
+{
+#ifdef CONFIG_GRKERNSEC_LINK
+	const struct cred *cred = current_cred();
+
+	if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
+	    (parent->i_mode & S_ISVTX) && !uid_eq(parent->i_uid, inode->i_uid) &&
+	    (parent->i_mode & S_IWOTH) && !uid_eq(cred->fsuid, inode->i_uid)) {
+		gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
+		return -EACCES;
+	}
+#endif
+	return 0;
+}
+
+int
+gr_handle_hardlink(const struct dentry *dentry,
+		   const struct vfsmount *mnt,
+		   struct inode *inode, const int mode, const struct filename *to)
+{
+#ifdef CONFIG_GRKERNSEC_LINK
+	const struct cred *cred = current_cred();
+
+	if (grsec_enable_link && !uid_eq(cred->fsuid, inode->i_uid) &&
+	    (!S_ISREG(mode) || is_privileged_binary(dentry) || 
+	     (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
+	    !capable(CAP_FOWNER) && gr_is_global_nonroot(cred->uid)) {
+		gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid,
to->name);
+		return -EPERM;
+	}
+#endif
+	return 0;
+}
diff -ruNp linux-3.13.11/grsecurity/grsec_log.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/grsec_log.c
--- linux-3.13.11/grsecurity/grsec_log.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/grsec_log.c	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,341 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/file.h>
+#include <linux/tty.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/grinternal.h>
+
+#ifdef CONFIG_TREE_PREEMPT_RCU
+#define DISABLE_PREEMPT() preempt_disable()
+#define ENABLE_PREEMPT() preempt_enable()
+#else
+#define DISABLE_PREEMPT()
+#define ENABLE_PREEMPT()
+#endif
+
+#define BEGIN_LOCKS(x) \
+	DISABLE_PREEMPT(); \
+	rcu_read_lock(); \
+	read_lock(&tasklist_lock); \
+	read_lock(&grsec_exec_file_lock); \
+	if (x != GR_DO_AUDIT) \
+		spin_lock(&grsec_alert_lock); \
+	else \
+		spin_lock(&grsec_audit_lock)
+
+#define END_LOCKS(x) \
+	if (x != GR_DO_AUDIT) \
+		spin_unlock(&grsec_alert_lock); \
+	else \
+		spin_unlock(&grsec_audit_lock); \
+	read_unlock(&grsec_exec_file_lock); \
+	read_unlock(&tasklist_lock); \
+	rcu_read_unlock(); \
+	ENABLE_PREEMPT(); \
+	if (x == GR_DONT_AUDIT) \
+		gr_handle_alertkill(current)
+
+enum {
+	FLOODING,
+	NO_FLOODING
+};
+
+extern char *gr_alert_log_fmt;
+extern char *gr_audit_log_fmt;
+extern char *gr_alert_log_buf;
+extern char *gr_audit_log_buf;
+
+static int gr_log_start(int audit)
+{
+	char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
+	char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
+	char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
+	unsigned long curr_secs = get_seconds();
+
+	if (audit == GR_DO_AUDIT)
+		goto set_fmt;
+
+	if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME))
{
+		grsec_alert_wtime = curr_secs;
+		grsec_alert_fyet = 0;
+	} else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
+		    && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
+		grsec_alert_fyet++;
+	} else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
+		grsec_alert_wtime = curr_secs;
+		grsec_alert_fyet++;
+		printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
+		return FLOODING;
+	}
+	else return FLOODING;
+
+set_fmt:
+#endif
+	memset(buf, 0, PAGE_SIZE);
+	if (current->signal->curr_ip && gr_acl_is_enabled()) {
+		sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
+		snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename,
gr_roletype_to_char(), current->acl->filename);
+	} else if (current->signal->curr_ip) {
+		sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
+		snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
+	} else if (gr_acl_is_enabled()) {
+		sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
+		snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(),
current->acl->filename);
+	} else {
+		sprintf(fmt, "%s%s", loglevel, "grsec: ");
+		strcpy(buf, fmt);
+	}
+
+	return NO_FLOODING;
+}
+
+static void gr_log_middle(int audit, const char *msg, va_list ap)
+	__attribute__ ((format (printf, 2, 0)));
+
+static void gr_log_middle(int audit, const char *msg, va_list ap)
+{
+	char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
+	unsigned int len = strlen(buf);
+
+	vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
+
+	return;
+}
+
+static void gr_log_middle_varargs(int audit, const char *msg, ...)
+	__attribute__ ((format (printf, 2, 3)));
+
+static void gr_log_middle_varargs(int audit, const char *msg, ...)
+{
+	char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
+	unsigned int len = strlen(buf);
+	va_list ap;
+
+	va_start(ap, msg);
+	vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
+	va_end(ap);
+
+	return;
+}
+
+static void gr_log_end(int audit, int append_default)
+{
+	char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
+	if (append_default) {
+		struct task_struct *task = current;
+		struct task_struct *parent = task->real_parent;
+		const struct cred *cred = __task_cred(task);
+		const struct cred *pcred = __task_cred(parent);
+		unsigned int len = strlen(buf);
+
+		snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, gr_task_fullpath(task), task->comm,
task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid),
GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), parent->comm, task_pid_nr(task->real_parent),
GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
+	}
+
+	printk("%s\n", buf);
+
+	return;
+}
+
+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
+{
+	int logtype;
+	char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
+	char *str1 = NULL, *str2 = NULL, *str3 = NULL;
+	void *voidptr = NULL;
+	int num1 = 0, num2 = 0;
+	unsigned long ulong1 = 0, ulong2 = 0;
+	struct dentry *dentry = NULL;
+	struct vfsmount *mnt = NULL;
+	struct file *file = NULL;
+	struct task_struct *task = NULL;
+	struct vm_area_struct *vma = NULL;
+	const struct cred *cred, *pcred;
+	va_list ap;
+
+	BEGIN_LOCKS(audit);
+	logtype = gr_log_start(audit);
+	if (logtype == FLOODING) {
+		END_LOCKS(audit);
+		return;
+	}
+	va_start(ap, argtypes);
+	switch (argtypes) {
+	case GR_TTYSNIFF:
+		task = va_arg(ap, struct task_struct *);
+		gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task),
task->comm, task_pid_nr(task), gr_parent_task_fullpath0(task), task->real_parent->comm,
task_pid_nr(task->real_parent));
+		break;
+	case GR_SYSCTL_HIDDEN:
+		str1 = va_arg(ap, char *);
+		gr_log_middle_varargs(audit, msg, result, str1);
+		break;
+	case GR_RBAC:
+		dentry = va_arg(ap, struct dentry *);
+		mnt = va_arg(ap, struct vfsmount *);
+		gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
+		break;
+	case GR_RBAC_STR:
+		dentry = va_arg(ap, struct dentry *);
+		mnt = va_arg(ap, struct vfsmount *);
+		str1 = va_arg(ap, char *);
+		gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
+		break;
+	case GR_STR_RBAC:
+		str1 = va_arg(ap, char *);
+		dentry = va_arg(ap, struct dentry *);
+		mnt = va_arg(ap, struct vfsmount *);
+		gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
+		break;
+	case GR_RBAC_MODE2:
+		dentry = va_arg(ap, struct dentry *);
+		mnt = va_arg(ap, struct vfsmount *);
+		str1 = va_arg(ap, char *);
+		str2 = va_arg(ap, char *);
+		gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
+		break;
+	case GR_RBAC_MODE3:
+		dentry = va_arg(ap, struct dentry *);
+		mnt = va_arg(ap, struct vfsmount *);
+		str1 = va_arg(ap, char *);
+		str2 = va_arg(ap, char *);
+		str3 = va_arg(ap, char *);
+		gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2,
str3);
+		break;
+	case GR_FILENAME:
+		dentry = va_arg(ap, struct dentry *);
+		mnt = va_arg(ap, struct vfsmount *);
+		gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
+		break;
+	case GR_STR_FILENAME:
+		str1 = va_arg(ap, char *);
+		dentry = va_arg(ap, struct dentry *);
+		mnt = va_arg(ap, struct vfsmount *);
+		gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
+		break;
+	case GR_FILENAME_STR:
+		dentry = va_arg(ap, struct dentry *);
+		mnt = va_arg(ap, struct vfsmount *);
+		str1 = va_arg(ap, char *);
+		gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
+		break;
+	case GR_FILENAME_TWO_INT:
+		dentry = va_arg(ap, struct dentry *);
+		mnt = va_arg(ap, struct vfsmount *);
+		num1 = va_arg(ap, int);
+		num2 = va_arg(ap, int);
+		gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
+		break;
+	case GR_FILENAME_TWO_INT_STR:
+		dentry = va_arg(ap, struct dentry *);
+		mnt = va_arg(ap, struct vfsmount *);
+		num1 = va_arg(ap, int);
+		num2 = va_arg(ap, int);
+		str1 = va_arg(ap, char *);
+		gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
+		break;
+	case GR_TEXTREL:
+		file = va_arg(ap, struct file *);
+		ulong1 = va_arg(ap, unsigned long);
+		ulong2 = va_arg(ap, unsigned long);
+		gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt)
: "<anonymous mapping>", ulong1, ulong2);
+		break;
+	case GR_PTRACE:
+		task = va_arg(ap, struct task_struct *);
+		gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry,
task->exec_file->f_path.mnt) : "(none)", task->comm, task_pid_nr(task));
+		break;
+	case GR_RESOURCE:
+		task = va_arg(ap, struct task_struct *);
+		cred = __task_cred(task);
+		pcred = __task_cred(task->real_parent);
+		ulong1 = va_arg(ap, unsigned long);
+		str1 = va_arg(ap, char *);
+		ulong2 = va_arg(ap, unsigned long);
+		gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm,
task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid),
GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent),
GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
+		break;
+	case GR_CAP:
+		task = va_arg(ap, struct task_struct *);
+		cred = __task_cred(task);
+		pcred = __task_cred(task->real_parent);
+		str1 = va_arg(ap, char *);
+		gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task_pid_nr(task),
GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid),
gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent),
GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
+		break;
+	case GR_SIG:
+		str1 = va_arg(ap, char *);
+		voidptr = va_arg(ap, void *);
+		gr_log_middle_varargs(audit, msg, str1, voidptr);
+		break;
+	case GR_SIG2:
+		task = va_arg(ap, struct task_struct *);
+		cred = __task_cred(task);
+		pcred = __task_cred(task->real_parent);
+		num1 = va_arg(ap, int);
+		gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task_pid_nr(task),
GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid),
gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent),
GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
+		break;
+	case GR_CRASH1:
+		task = va_arg(ap, struct task_struct *);
+		cred = __task_cred(task);
+		pcred = __task_cred(task->real_parent);
+		ulong1 = va_arg(ap, unsigned long);
+		gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task),
GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid),
gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent),
GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid),
GR_GLOBAL_UID(cred->uid), ulong1);
+		break;
+	case GR_CRASH2:
+		task = va_arg(ap, struct task_struct *);
+		cred = __task_cred(task);
+		pcred = __task_cred(task->real_parent);
+		ulong1 = va_arg(ap, unsigned long);
+		gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task),
GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid),
gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent),
GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid),
ulong1);
+		break;
+	case GR_RWXMAP:
+		file = va_arg(ap, struct file *);
+		gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt)
: "<anonymous mapping>");
+		break;
+	case GR_RWXMAPVMA:
+		vma = va_arg(ap, struct vm_area_struct *);
+		if (vma->vm_file)
+			str1 = gr_to_filename(vma->vm_file->f_path.dentry, vma->vm_file->f_path.mnt);
+		else if (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
+			str1 = "<stack>";
+		else if (vma->vm_start <= current->mm->brk &&
+			 vma->vm_end >= current->mm->start_brk)
+			str1 = "<heap>";
+		else
+			str1 = "<anonymous mapping>";
+		gr_log_middle_varargs(audit, msg, str1);
+		break;
+	case GR_PSACCT:
+		{
+			unsigned int wday, cday;
+			__u8 whr, chr;
+			__u8 wmin, cmin;
+			__u8 wsec, csec;
+			char cur_tty[64] = { 0 };
+			char parent_tty[64] = { 0 };
+
+			task = va_arg(ap, struct task_struct *);
+			wday = va_arg(ap, unsigned int);
+			cday = va_arg(ap, unsigned int);
+			whr = va_arg(ap, int);
+			chr = va_arg(ap, int);
+			wmin = va_arg(ap, int);
+			cmin = va_arg(ap, int);
+			wsec = va_arg(ap, int);
+			csec = va_arg(ap, int);
+			ulong1 = va_arg(ap, unsigned long);
+			cred = __task_cred(task);
+			pcred = __task_cred(task->real_parent);
+
+			gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task),
&task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), GR_GLOBAL_UID(cred->uid),
GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), wday,
whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal"
: "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent),
&task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty),
GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
+		}
+		break;
+	default:
+		gr_log_middle(audit, msg, ap);
+	}
+	va_end(ap);
+	// these don't need DEFAULTSECARGS printed on the end
+	if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
+		gr_log_end(audit, 0);
+	else
+		gr_log_end(audit, 1);
+	END_LOCKS(audit);
+}
diff -ruNp linux-3.13.11/grsecurity/grsec_mem.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/grsec_mem.c
--- linux-3.13.11/grsecurity/grsec_mem.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/grsec_mem.c	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,48 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/module.h>
+#include <linux/grinternal.h>
+
+void gr_handle_msr_write(void)
+{
+	gr_log_noargs(GR_DONT_AUDIT, GR_MSRWRITE_MSG);
+	return;
+}
+EXPORT_SYMBOL_GPL(gr_handle_msr_write);
+
+void
+gr_handle_ioperm(void)
+{
+	gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
+	return;
+}
+
+void
+gr_handle_iopl(void)
+{
+	gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
+	return;
+}
+
+void
+gr_handle_mem_readwrite(u64 from, u64 to)
+{
+	gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
+	return;
+}
+
+void
+gr_handle_vm86(void)
+{
+	gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
+	return;
+}
+
+void
+gr_log_badprocpid(const char *entry)
+{
+	gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
+	return;
+}
diff -ruNp linux-3.13.11/grsecurity/grsec_mount.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/grsec_mount.c
--- linux-3.13.11/grsecurity/grsec_mount.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/grsec_mount.c	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,65 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mount.h>
+#include <linux/major.h>
+#include <linux/grsecurity.h>
+#include <linux/grinternal.h>
+
+void
+gr_log_remount(const char *devname, const int retval)
+{
+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
+	if (grsec_enable_mount && (retval >= 0))
+		gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
+#endif
+	return;
+}
+
+void
+gr_log_unmount(const char *devname, const int retval)
+{
+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
+	if (grsec_enable_mount && (retval >= 0))
+		gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
+#endif
+	return;
+}
+
+void
+gr_log_mount(const char *from, const char *to, const int retval)
+{
+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
+	if (grsec_enable_mount && (retval >= 0))
+		gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
+#endif
+	return;
+}
+
+int
+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
+{
+#ifdef CONFIG_GRKERNSEC_ROFS
+	if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
+		gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
+		return -EPERM;
+	} else
+		return 0;
+#endif
+	return 0;
+}
+
+int
+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
+{
+#ifdef CONFIG_GRKERNSEC_ROFS
+	struct inode *inode = dentry->d_inode;
+
+	if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
+	    inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) ==
RAW_MAJOR))) {
+		gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
+		return -EPERM;
+	} else
+		return 0;
+#endif
+	return 0;
+}
diff -ruNp linux-3.13.11/grsecurity/grsec_pax.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/grsec_pax.c
--- linux-3.13.11/grsecurity/grsec_pax.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/grsec_pax.c	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,45 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/file.h>
+#include <linux/grinternal.h>
+#include <linux/grsecurity.h>
+
+void
+gr_log_textrel(struct vm_area_struct * vma)
+{
+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
+	if (grsec_enable_log_rwxmaps)
+		gr_log_textrel_ulong_ulong(GR_DONT_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start,
vma->vm_pgoff);
+#endif
+	return;
+}
+
+void gr_log_ptgnustack(struct file *file)
+{
+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
+	if (grsec_enable_log_rwxmaps)
+		gr_log_rwxmap(GR_DONT_AUDIT, GR_PTGNUSTACK_MSG, file);
+#endif
+	return;
+}
+
+void
+gr_log_rwxmmap(struct file *file)
+{
+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
+	if (grsec_enable_log_rwxmaps)
+		gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
+#endif
+	return;
+}
+
+void
+gr_log_rwxmprotect(struct vm_area_struct *vma)
+{
+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
+	if (grsec_enable_log_rwxmaps)
+		gr_log_rwxmap_vma(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, vma);
+#endif
+	return;
+}
diff -ruNp linux-3.13.11/grsecurity/grsec_ptrace.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/grsec_ptrace.c
--- linux-3.13.11/grsecurity/grsec_ptrace.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/grsec_ptrace.c	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,30 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/grinternal.h>
+#include <linux/security.h>
+
+void
+gr_audit_ptrace(struct task_struct *task)
+{
+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
+	if (grsec_enable_audit_ptrace)
+		gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
+#endif
+	return;
+}
+
+int
+gr_ptrace_readexec(struct file *file, int unsafe_flags)
+{
+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
+	const struct dentry *dentry = file->f_path.dentry;
+	const struct vfsmount *mnt = file->f_path.mnt;
+
+	if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) && 
+	    (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt,
MAY_READ))) {
+		gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
+		return -EACCES;
+	}
+#endif
+	return 0;
+}
diff -ruNp linux-3.13.11/grsecurity/grsec_sig.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/grsec_sig.c
--- linux-3.13.11/grsecurity/grsec_sig.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/grsec_sig.c	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,236 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/delay.h>
+#include <linux/grsecurity.h>
+#include <linux/grinternal.h>
+#include <linux/hardirq.h>
+
+char *signames[] = {
+	[SIGSEGV] = "Segmentation fault",
+	[SIGILL] = "Illegal instruction",
+	[SIGABRT] = "Abort",
+	[SIGBUS] = "Invalid alignment/Bus error"
+};
+
+void
+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
+{
+#ifdef CONFIG_GRKERNSEC_SIGNAL
+	if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
+				    (sig == SIGABRT) || (sig == SIGBUS))) {
+		if (task_pid_nr(t) == task_pid_nr(current)) {
+			gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
+		} else {
+			gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
+		}
+	}
+#endif
+	return;
+}
+
+int
+gr_handle_signal(const struct task_struct *p, const int sig)
+{
+#ifdef CONFIG_GRKERNSEC
+	/* ignore the 0 signal for protected task checks */
+	if (task_pid_nr(current) > 1 && sig && gr_check_protected_task(p)) {
+		gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
+		return -EPERM;
+	} else if (gr_pid_is_chrooted((struct task_struct *)p)) {
+		return -EPERM;
+	}
+#endif
+	return 0;
+}
+
+#ifdef CONFIG_GRKERNSEC
+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct
*t);
+
+int gr_fake_force_sig(int sig, struct task_struct *t)
+{
+	unsigned long int flags;
+	int ret, blocked, ignored;
+	struct k_sigaction *action;
+
+	spin_lock_irqsave(&t->sighand->siglock, flags);
+	action = &t->sighand->action[sig-1];
+	ignored = action->sa.sa_handler == SIG_IGN;
+	blocked = sigismember(&t->blocked, sig);
+	if (blocked || ignored) {
+		action->sa.sa_handler = SIG_DFL;
+		if (blocked) {
+			sigdelset(&t->blocked, sig);
+			recalc_sigpending_and_wake(t);
+		}
+	}
+	if (action->sa.sa_handler == SIG_DFL)
+		t->signal->flags &= ~SIGNAL_UNKILLABLE;
+	ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
+
+	spin_unlock_irqrestore(&t->sighand->siglock, flags);
+
+	return ret;
+}
+#endif
+
+#define GR_USER_BAN_TIME (15 * 60)
+#define GR_DAEMON_BRUTE_TIME (30 * 60)
+
+void gr_handle_brute_attach(int dumpable)
+{
+#ifdef CONFIG_GRKERNSEC_BRUTE
+	struct task_struct *p = current;
+	kuid_t uid = GLOBAL_ROOT_UID;
+	int daemon = 0;
+
+	if (!grsec_enable_brute)
+		return;
+
+	rcu_read_lock();
+	read_lock(&tasklist_lock);
+	read_lock(&grsec_exec_file_lock);
+	if (p->real_parent && gr_is_same_file(p->real_parent->exec_file, p->exec_file)) {
+		p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME;
+		p->real_parent->brute = 1;
+		daemon = 1;
+	} else {
+		const struct cred *cred = __task_cred(p), *cred2;
+		struct task_struct *tsk, *tsk2;
+
+		if (dumpable != SUID_DUMP_USER && gr_is_global_nonroot(cred->uid)) {
+			struct user_struct *user;
+
+			uid = cred->uid;
+
+			/* this is put upon execution past expiration */
+			user = find_user(uid);
+			if (user == NULL)
+				goto unlock;
+			user->suid_banned = 1;
+			user->suid_ban_expires = get_seconds() + GR_USER_BAN_TIME;
+			if (user->suid_ban_expires == ~0UL)
+				user->suid_ban_expires--;
+
+			/* only kill other threads of the same binary, from the same user */
+			do_each_thread(tsk2, tsk) {
+				cred2 = __task_cred(tsk);
+				if (tsk != p && uid_eq(cred2->uid, uid) && gr_is_same_file(tsk->exec_file, p->exec_file))
+					gr_fake_force_sig(SIGKILL, tsk);
+			} while_each_thread(tsk2, tsk);
+		}
+	}
+unlock:
+	read_unlock(&grsec_exec_file_lock);
+	read_unlock(&tasklist_lock);
+	rcu_read_unlock();
+
+	if (gr_is_global_nonroot(uid))
+		gr_log_fs_int2(GR_DONT_AUDIT, GR_BRUTE_SUID_MSG, p->exec_file->f_path.dentry, p->exec_file->f_path.mnt,
GR_GLOBAL_UID(uid), GR_USER_BAN_TIME / 60);
+	else if (daemon)
+		gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG);
+
+#endif
+	return;
+}
+
+void gr_handle_brute_check(void)
+{
+#ifdef CONFIG_GRKERNSEC_BRUTE
+	struct task_struct *p = current;
+
+	if (unlikely(p->brute)) {
+		if (!grsec_enable_brute)
+			p->brute = 0;
+		else if (time_before(get_seconds(), p->brute_expires))
+			msleep(30 * 1000);
+	}
+#endif
+	return;
+}
+
+void gr_handle_kernel_exploit(void)
+{
+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
+	const struct cred *cred;
+	struct task_struct *tsk, *tsk2;
+	struct user_struct *user;
+	kuid_t uid;
+
+	if (in_irq() || in_serving_softirq() || in_nmi())
+		panic("grsec: halting the system due to suspicious kernel crash caused in interrupt
context");
+
+	uid = current_uid();
+
+	if (gr_is_global_root(uid))
+		panic("grsec: halting the system due to suspicious kernel crash caused by root");
+	else {
+		/* kill all the processes of this user, hold a reference
+		   to their creds struct, and prevent them from creating
+		   another process until system reset
+		*/
+		printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious
kernel crash\n",
+			GR_GLOBAL_UID(uid));
+		/* we intentionally leak this ref */
+		user = get_uid(current->cred->user);
+		if (user)
+			user->kernel_banned = 1;
+
+		/* kill all processes of this user */
+		read_lock(&tasklist_lock);
+		do_each_thread(tsk2, tsk) {
+			cred = __task_cred(tsk);
+			if (uid_eq(cred->uid, uid))
+				gr_fake_force_sig(SIGKILL, tsk);
+		} while_each_thread(tsk2, tsk);
+		read_unlock(&tasklist_lock); 
+	}
+#endif
+}
+
+#ifdef CONFIG_GRKERNSEC_BRUTE
+static bool suid_ban_expired(struct user_struct *user)
+{
+	if (user->suid_ban_expires != ~0UL && time_after_eq(get_seconds(), user->suid_ban_expires))
{
+		user->suid_banned = 0;
+		user->suid_ban_expires = 0;
+		free_uid(user);
+		return true;
+	}
+
+	return false;
+}
+#endif
+
+int gr_process_kernel_exec_ban(void)
+{
+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
+	if (unlikely(current->cred->user->kernel_banned))
+		return -EPERM;
+#endif
+	return 0;
+}
+
+int gr_process_kernel_setuid_ban(struct user_struct *user)
+{
+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
+	if (unlikely(user->kernel_banned))
+		gr_fake_force_sig(SIGKILL, current);
+#endif
+	return 0;
+}
+
+int gr_process_suid_exec_ban(const struct linux_binprm *bprm)
+{
+#ifdef CONFIG_GRKERNSEC_BRUTE
+	struct user_struct *user = current->cred->user;
+	if (unlikely(user->suid_banned)) {
+		if (suid_ban_expired(user))
+			return 0;
+		/* disallow execution of suid binaries only */
+		else if (!uid_eq(bprm->cred->euid, current->cred->uid))
+			return -EPERM;
+	}
+#endif
+	return 0;
+}
diff -ruNp linux-3.13.11/grsecurity/grsec_sock.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/grsec_sock.c
--- linux-3.13.11/grsecurity/grsec_sock.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/grsec_sock.c	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,244 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/file.h>
+#include <linux/net.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <net/sock.h>
+#include <net/inet_sock.h>
+#include <linux/grsecurity.h>
+#include <linux/grinternal.h>
+#include <linux/gracl.h>
+
+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
+
+EXPORT_SYMBOL_GPL(gr_search_udp_recvmsg);
+EXPORT_SYMBOL_GPL(gr_search_udp_sendmsg);
+
+#ifdef CONFIG_UNIX_MODULE
+EXPORT_SYMBOL_GPL(gr_acl_handle_unix);
+EXPORT_SYMBOL_GPL(gr_acl_handle_mknod);
+EXPORT_SYMBOL_GPL(gr_handle_chroot_unix);
+EXPORT_SYMBOL_GPL(gr_handle_create);
+#endif
+
+#ifdef CONFIG_GRKERNSEC
+#define gr_conn_table_size 32749
+struct conn_table_entry {
+	struct conn_table_entry *next;
+	struct signal_struct *sig;
+};
+
+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
+DEFINE_SPINLOCK(gr_conn_table_lock);
+
+extern const char * gr_socktype_to_name(unsigned char type);
+extern const char * gr_proto_to_name(unsigned char proto);
+extern const char * gr_sockfamily_to_name(unsigned char family);
+
+static __inline__ int 
+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
+{
+	return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
+}
+
+static __inline__ int
+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr, 
+	   __u16 sport, __u16 dport)
+{
+	if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
+		     sig->gr_sport == sport && sig->gr_dport == dport))
+		return 1;
+	else
+		return 0;
+}
+
+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry
*newent)
+{
+	struct conn_table_entry **match;
+	unsigned int index;
+
+	index = conn_hash(sig->gr_saddr, sig->gr_daddr, 
+			  sig->gr_sport, sig->gr_dport, 
+			  gr_conn_table_size);
+
+	newent->sig = sig;
+	
+	match = &gr_conn_table[index];
+	newent->next = *match;
+	*match = newent;
+
+	return;
+}
+
+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
+{
+	struct conn_table_entry *match, *last = NULL;
+	unsigned int index;
+
+	index = conn_hash(sig->gr_saddr, sig->gr_daddr, 
+			  sig->gr_sport, sig->gr_dport, 
+			  gr_conn_table_size);
+
+	match = gr_conn_table[index];
+	while (match && !conn_match(match->sig, 
+		sig->gr_saddr, sig->gr_daddr, sig->gr_sport, 
+		sig->gr_dport)) {
+		last = match;
+		match = match->next;
+	}
+
+	if (match) {
+		if (last)
+			last->next = match->next;
+		else
+			gr_conn_table[index] = NULL;
+		kfree(match);
+	}
+
+	return;
+}
+
+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
+					     __u16 sport, __u16 dport)
+{
+	struct conn_table_entry *match;
+	unsigned int index;
+
+	index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
+
+	match = gr_conn_table[index];
+	while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
+		match = match->next;
+
+	if (match)
+		return match->sig;
+	else
+		return NULL;
+}
+
+#endif
+
+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
+{
+#ifdef CONFIG_GRKERNSEC
+	struct signal_struct *sig = task->signal;
+	struct conn_table_entry *newent;
+
+	newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
+	if (newent == NULL)
+		return;
+	/* no bh lock needed since we are called with bh disabled */
+	spin_lock(&gr_conn_table_lock);
+	gr_del_task_from_ip_table_nolock(sig);
+	sig->gr_saddr = inet->inet_rcv_saddr;
+	sig->gr_daddr = inet->inet_daddr;
+	sig->gr_sport = inet->inet_sport;
+	sig->gr_dport = inet->inet_dport;
+	gr_add_to_task_ip_table_nolock(sig, newent);
+	spin_unlock(&gr_conn_table_lock);
+#endif
+	return;
+}
+
+void gr_del_task_from_ip_table(struct task_struct *task)
+{
+#ifdef CONFIG_GRKERNSEC
+	spin_lock_bh(&gr_conn_table_lock);
+	gr_del_task_from_ip_table_nolock(task->signal);
+	spin_unlock_bh(&gr_conn_table_lock);
+#endif
+	return;
+}
+
+void
+gr_attach_curr_ip(const struct sock *sk)
+{
+#ifdef CONFIG_GRKERNSEC
+	struct signal_struct *p, *set;
+	const struct inet_sock *inet = inet_sk(sk);	
+
+	if (unlikely(sk->sk_protocol != IPPROTO_TCP))
+		return;
+
+	set = current->signal;
+
+	spin_lock_bh(&gr_conn_table_lock);
+	p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
+				    inet->inet_dport, inet->inet_sport);
+	if (unlikely(p != NULL)) {
+		set->curr_ip = p->curr_ip;
+		set->used_accept = 1;
+		gr_del_task_from_ip_table_nolock(p);
+		spin_unlock_bh(&gr_conn_table_lock);
+		return;
+	}
+	spin_unlock_bh(&gr_conn_table_lock);
+
+	set->curr_ip = inet->inet_daddr;
+	set->used_accept = 1;
+#endif
+	return;
+}
+
+int
+gr_handle_sock_all(const int family, const int type, const int protocol)
+{
+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
+	if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
+	    (family != AF_UNIX)) {
+		if (family == AF_INET)
+			gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type),
gr_proto_to_name(protocol));
+		else
+			gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family),
gr_socktype_to_name(type), protocol);
+		return -EACCES;
+	}
+#endif
+	return 0;
+}
+
+int
+gr_handle_sock_server(const struct sockaddr *sck)
+{
+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
+	if (grsec_enable_socket_server &&
+	    in_group_p(grsec_socket_server_gid) &&
+	    sck && (sck->sa_family != AF_UNIX) &&
+	    (sck->sa_family != AF_LOCAL)) {
+		gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
+		return -EACCES;
+	}
+#endif
+	return 0;
+}
+
+int
+gr_handle_sock_server_other(const struct sock *sck)
+{
+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
+	if (grsec_enable_socket_server &&
+	    in_group_p(grsec_socket_server_gid) &&
+	    sck && (sck->sk_family != AF_UNIX) &&
+	    (sck->sk_family != AF_LOCAL)) {
+		gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
+		return -EACCES;
+	}
+#endif
+	return 0;
+}
+
+int
+gr_handle_sock_client(const struct sockaddr *sck)
+{
+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
+	if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
+	    sck && (sck->sa_family != AF_UNIX) &&
+	    (sck->sa_family != AF_LOCAL)) {
+		gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
+		return -EACCES;
+	}
+#endif
+	return 0;
+}
diff -ruNp linux-3.13.11/grsecurity/grsec_sysctl.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/grsec_sysctl.c
--- linux-3.13.11/grsecurity/grsec_sysctl.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/grsec_sysctl.c	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,479 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/sysctl.h>
+#include <linux/grsecurity.h>
+#include <linux/grinternal.h>
+
+int
+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
+{
+#ifdef CONFIG_GRKERNSEC_SYSCTL
+	if (dirname == NULL || name == NULL)
+		return 0;
+	if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
+		gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
+		return -EACCES;
+	}
+#endif
+	return 0;
+}
+
+#if defined(CONFIG_GRKERNSEC_ROFS) || defined(CONFIG_GRKERNSEC_DENYUSB)
+static int __maybe_unused __read_only one = 1;
+#endif
+
+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS) || \
+	defined(CONFIG_GRKERNSEC_DENYUSB)
+struct ctl_table grsecurity_table[] = {
+#ifdef CONFIG_GRKERNSEC_SYSCTL
+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
+#ifdef CONFIG_GRKERNSEC_IO
+	{
+		.procname	= "disable_priv_io",
+		.data		= &grsec_disable_privio,
+		.maxlen		= sizeof(int),
+		.mode		= 0600,
+		.proc_handler	= &proc_dointvec,
+	},
+#endif
+#endif
+#ifdef CONFIG_GRKERNSEC_LINK
+	{
+		.procname	= "linking_restrictions",
+		.data		= &grsec_enable_link,
+		.maxlen		= sizeof(int),
+		.mode		= 0600,
+		.proc_handler	= &proc_dointvec,
+	},
+#endif
+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
+	{
+		.procname	= "enforce_symlinksifowner",
+		.data		= &grsec_enable_symlinkown,
+		.maxlen		= sizeof(int),
+		.mode		= 0600,
+		.proc_handler	= &proc_dointvec,
+	},
+	{
+		.procname	= "symlinkown_gid",
+		.data		= &grsec_symlinkown_gid,
+		.maxlen		= sizeof(int),
+		.mode		= 0600,
+		.proc_handler	= &proc_dointvec,
+	},
+#endif
+#ifdef CONFIG_GRKERNSEC_BRUTE
+	{
+		.procname	= "deter_bruteforce",
+		.data		= &grsec_enable_brute,
+		.maxlen		= sizeof(int),
+		.mode		= 0600,
+		.proc_handler	= &proc_dointvec,
+	},
+#endif
+#ifdef CONFIG_GRKERNSEC_FIFO
+	{
+		.procname	= "fifo_restrictions",
+		.data		= &grsec_enable_fifo,
+		.maxlen		= sizeof(int),
+		.mode		= 0600,
+		.proc_handler	= &proc_dointvec,
+	},
+#endif
+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
+	{
+		.procname	= "ptrace_readexec",
+		.data		= &grsec_enable_ptrace_readexec,
+		.maxlen		= sizeof(int),
+		.mode		= 0600,
+		.proc_handler	= &proc_dointvec,
+	},
+#endif
+#ifdef CONFIG_GRKERNSEC_SETXID
+	{
+		.procname	= "consistent_setxid",
+		.data		= &grsec_enable_setxid,
+		.maxlen		= sizeof(int),
+		.mode		= 0600,
+		.proc_handler	= &proc_dointvec,
+	},
+#endif
+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
+	{
+		.procname	= "ip_blackhole",
+		.data		= &grsec_enable_blackhole,
+		.maxlen		= sizeof(int),
+		.mode		= 0600,
+		.proc_handler	= &proc_dointvec,
+	},
+	{
+		.procname	= "lastack_retries",
+		.data		= &grsec_lastack_retries,
+		.maxlen		= sizeof(int),
+		.mode		= 0600,
+		.proc_handler	= &proc_dointvec,
+	},
+#endif
+#ifdef CONFIG_GRKERNSEC_EXECLOG
+	{
+		.procname	= "exec_logging",
+		.data		= &grsec_enable_execlog,
+		.maxlen		= sizeof(int),
+		.mode		= 0600,
+		.proc_handler	= &proc_dointvec,
+	},
+#endif
+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
+	{
+		.procname	= "rwxmap_logging",
+		.data		= &grsec_enable_log_rwxmaps,
+		.maxlen		= sizeof(int),
+		.mode		= 0600,
+		.proc_handler	= &proc_dointvec,
+	},
+#endif
+#ifdef CONFIG_GRKERNSEC_SIGNAL
+	{
+		.procname	= "signal_logging",
+		.data		= &grsec_enable_signal,
+		.maxlen		= sizeof(int),
+		.mode		= 0600,
+		.proc_handler	= &proc_dointvec,
+	},
+#endif
+#ifdef CONFIG_GRKERNSEC_FORKFAIL
+	{
+		.procname	= "forkfail_logging",
+		.data		= &grsec_enable_forkfail,
+		.maxlen		= sizeof(int),
+		.mode		= 0600,
+		.proc_handler	= &proc_dointvec,
+	},
+#endif
+#ifdef CONFIG_GRKERNSEC_TIME
+	{
+		.procname	= "timechange_logging",
+		.data		= &grsec_enable_time,
+		.maxlen		= sizeof(int),
+		.mode		= 0600,
+		.proc_handler	= &proc_dointvec,
+	},
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
+	{
+		.procname	= "chroot_deny_shmat",
+		.data		= &grsec_enable_chroot_shmat,
+		.maxlen		= sizeof(int),
+		.mode		= 0600,
+		.proc_handler	= &proc_dointvec,
+	},
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
+	{
+		.procname	= "chroot_deny_unix",
+		.data		= &grsec_enable_chroot_unix,
+		.maxlen		= sizeof(int),
+		.mode		= 0600,
+		.proc_handler	= &proc_dointvec,
+	},
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
+	{
+		.procname	= "chroot_deny_mount",
+		.data		= &grsec_enable_chroot_mount,
+		.maxlen		= sizeof(int),
+		.mode		= 0600,
+		.proc_handler	= &proc_dointvec,
+	},
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
+	{
+		.procname	= "chroot_deny_fchdir",
+		.data		= &grsec_enable_chroot_fchdir,
+		.maxlen		= sizeof(int),
+		.mode		= 0600,
+		.proc_handler	= &proc_dointvec,
+	},
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
+	{
+		.procname	= "chroot_deny_chroot",
+		.data		= &grsec_enable_chroot_double,
+		.maxlen		= sizeof(int),
+		.mode		= 0600,
+		.proc_handler	= &proc_dointvec,
+	},
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
+	{
+		.procname	= "chroot_deny_pivot",
+		.data		= &grsec_enable_chroot_pivot,
+		.maxlen		= sizeof(int),
+		.mode		= 0600,
+		.proc_handler	= &proc_dointvec,
+	},
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
+	{
+		.procname	= "chroot_enforce_chdir",
+		.data		= &grsec_enable_chroot_chdir,
+		.maxlen		= sizeof(int),
+		.mode		= 0600,
+		.proc_handler	= &proc_dointvec,
+	},
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
+	{
+		.procname	= "chroot_deny_chmod",
+		.data		= &grsec_enable_chroot_chmod,
+		.maxlen		= sizeof(int),
+		.mode		= 0600,
+		.proc_handler	= &proc_dointvec,
+	},
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
+	{
+		.procname	= "chroot_deny_mknod",
+		.data		= &grsec_enable_chroot_mknod,
+		.maxlen		= sizeof(int),
+		.mode		= 0600,
+		.proc_handler	= &proc_dointvec,
+	},
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
+	{
+		.procname	= "chroot_restrict_nice",
+		.data		= &grsec_enable_chroot_nice,
+		.maxlen		= sizeof(int),
+		.mode		= 0600,
+		.proc_handler	= &proc_dointvec,
+	},
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
+	{
+		.procname	= "chroot_execlog",
+		.data		= &grsec_enable_chroot_execlog,
+		.maxlen		= sizeof(int),
+		.mode		= 0600,
+		.proc_handler	= &proc_dointvec,
+	},
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
+	{
+		.procname	= "chroot_caps",
+		.data		= &grsec_enable_chroot_caps,
+		.maxlen		= sizeof(int),
+		.mode		= 0600,
+		.proc_handler	= &proc_dointvec,
+	},
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
+	{
+		.procname	= "chroot_deny_sysctl",
+		.data		= &grsec_enable_chroot_sysctl,
+		.maxlen		= sizeof(int),
+		.mode		= 0600,
+		.proc_handler	= &proc_dointvec,
+	},
+#endif
+#ifdef CONFIG_GRKERNSEC_TPE
+	{
+		.procname	= "tpe",
+		.data		= &grsec_enable_tpe,
+		.maxlen		= sizeof(int),
+		.mode		= 0600,
+		.proc_handler	= &proc_dointvec,
+	},
+	{
+		.procname	= "tpe_gid",
+		.data		= &grsec_tpe_gid,
+		.maxlen		= sizeof(int),
+		.mode		= 0600,
+		.proc_handler	= &proc_dointvec,
+	},
+#endif
+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
+	{
+		.procname	= "tpe_invert",
+		.data		= &grsec_enable_tpe_invert,
+		.maxlen		= sizeof(int),
+		.mode		= 0600,
+		.proc_handler	= &proc_dointvec,
+	},
+#endif
+#ifdef CONFIG_GRKERNSEC_TPE_ALL
+	{
+		.procname	= "tpe_restrict_all",
+		.data		= &grsec_enable_tpe_all,
+		.maxlen		= sizeof(int),
+		.mode		= 0600,
+		.proc_handler	= &proc_dointvec,
+	},
+#endif
+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
+	{
+		.procname	= "socket_all",
+		.data		= &grsec_enable_socket_all,
+		.maxlen		= sizeof(int),
+		.mode		= 0600,
+		.proc_handler	= &proc_dointvec,
+	},
+	{
+		.procname	= "socket_all_gid",
+		.data		= &grsec_socket_all_gid,
+		.maxlen		= sizeof(int),
+		.mode		= 0600,
+		.proc_handler	= &proc_dointvec,
+	},
+#endif
+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
+	{
+		.procname	= "socket_client",
+		.data		= &grsec_enable_socket_client,
+		.maxlen		= sizeof(int),
+		.mode		= 0600,
+		.proc_handler	= &proc_dointvec,
+	},
+	{
+		.procname	= "socket_client_gid",
+		.data		= &grsec_socket_client_gid,
+		.maxlen		= sizeof(int),
+		.mode		= 0600,
+		.proc_handler	= &proc_dointvec,
+	},
+#endif
+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
+	{
+		.procname	= "socket_server",
+		.data		= &grsec_enable_socket_server,
+		.maxlen		= sizeof(int),
+		.mode		= 0600,
+		.proc_handler	= &proc_dointvec,
+	},
+	{
+		.procname	= "socket_server_gid",
+		.data		= &grsec_socket_server_gid,
+		.maxlen		= sizeof(int),
+		.mode		= 0600,
+		.proc_handler	= &proc_dointvec,
+	},
+#endif
+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
+	{
+		.procname	= "audit_group",
+		.data		= &grsec_enable_group,
+		.maxlen		= sizeof(int),
+		.mode		= 0600,
+		.proc_handler	= &proc_dointvec,
+	},
+	{
+		.procname	= "audit_gid",
+		.data		= &grsec_audit_gid,
+		.maxlen		= sizeof(int),
+		.mode		= 0600,
+		.proc_handler	= &proc_dointvec,
+	},
+#endif
+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
+	{
+		.procname	= "audit_chdir",
+		.data		= &grsec_enable_chdir,
+		.maxlen		= sizeof(int),
+		.mode		= 0600,
+		.proc_handler	= &proc_dointvec,
+	},
+#endif
+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
+	{
+		.procname	= "audit_mount",
+		.data		= &grsec_enable_mount,
+		.maxlen		= sizeof(int),
+		.mode		= 0600,
+		.proc_handler	= &proc_dointvec,
+	},
+#endif
+#ifdef CONFIG_GRKERNSEC_DMESG
+	{
+		.procname	= "dmesg",
+		.data		= &grsec_enable_dmesg,
+		.maxlen		= sizeof(int),
+		.mode		= 0600,
+		.proc_handler	= &proc_dointvec,
+	},
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
+	{
+		.procname	= "chroot_findtask",
+		.data		= &grsec_enable_chroot_findtask,
+		.maxlen		= sizeof(int),
+		.mode		= 0600,
+		.proc_handler	= &proc_dointvec,
+	},
+#endif
+#ifdef CONFIG_GRKERNSEC_RESLOG
+	{
+		.procname	= "resource_logging",
+		.data		= &grsec_resource_logging,
+		.maxlen		= sizeof(int),
+		.mode		= 0600,
+		.proc_handler	= &proc_dointvec,
+	},
+#endif
+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
+	{
+		.procname	= "audit_ptrace",
+		.data		= &grsec_enable_audit_ptrace,
+		.maxlen		= sizeof(int),
+		.mode		= 0600,
+		.proc_handler	= &proc_dointvec,
+	},
+#endif
+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
+	{
+		.procname	= "harden_ptrace",
+		.data		= &grsec_enable_harden_ptrace,
+		.maxlen		= sizeof(int),
+		.mode		= 0600,
+		.proc_handler	= &proc_dointvec,
+	},
+#endif
+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
+	{
+		.procname	= "harden_ipc",
+		.data		= &grsec_enable_harden_ipc,
+		.maxlen		= sizeof(int),
+		.mode		= 0600,
+		.proc_handler	= &proc_dointvec,
+	},
+#endif
+	{
+		.procname	= "grsec_lock",
+		.data		= &grsec_lock,
+		.maxlen		= sizeof(int),
+		.mode		= 0600,
+		.proc_handler	= &proc_dointvec,
+	},
+#endif
+#ifdef CONFIG_GRKERNSEC_ROFS
+	{
+		.procname	= "romount_protect",
+		.data		= &grsec_enable_rofs,
+		.maxlen		= sizeof(int),
+		.mode		= 0600,
+		.proc_handler	= &proc_dointvec_minmax,
+		.extra1		= &one,
+		.extra2		= &one,
+	},
+#endif
+#if defined(CONFIG_GRKERNSEC_DENYUSB) && !defined(CONFIG_GRKERNSEC_DENYUSB_FORCE)
+	{
+		.procname	= "deny_new_usb",
+		.data		= &grsec_deny_new_usb,
+		.maxlen		= sizeof(int),
+		.mode		= 0600,
+		.proc_handler	= &proc_dointvec,
+	},
+#endif
+	{ }
+};
+#endif
diff -ruNp linux-3.13.11/grsecurity/grsec_time.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/grsec_time.c
--- linux-3.13.11/grsecurity/grsec_time.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/grsec_time.c	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,16 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/grinternal.h>
+#include <linux/module.h>
+
+void
+gr_log_timechange(void)
+{
+#ifdef CONFIG_GRKERNSEC_TIME
+	if (grsec_enable_time)
+		gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
+#endif
+	return;
+}
+
+EXPORT_SYMBOL_GPL(gr_log_timechange);
diff -ruNp linux-3.13.11/grsecurity/grsec_tpe.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/grsec_tpe.c
--- linux-3.13.11/grsecurity/grsec_tpe.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/grsec_tpe.c	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,73 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/grinternal.h>
+
+extern int gr_acl_tpe_check(void);
+
+int
+gr_tpe_allow(const struct file *file)
+{
+#ifdef CONFIG_GRKERNSEC
+	struct inode *inode = file->f_path.dentry->d_parent->d_inode;
+	const struct cred *cred = current_cred();
+	char *msg = NULL;
+	char *msg2 = NULL;
+
+	// never restrict root
+	if (gr_is_global_root(cred->uid))
+		return 1;
+
+	if (grsec_enable_tpe) {
+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
+		if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
+			msg = "not being in trusted group";
+		else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
+			msg = "being in untrusted group";
+#else
+		if (in_group_p(grsec_tpe_gid))
+			msg = "being in untrusted group";
+#endif
+	}
+	if (!msg && gr_acl_tpe_check())
+		msg = "being in untrusted role";
+
+	// not in any affected group/role
+	if (!msg)
+		goto next_check;
+
+	if (gr_is_global_nonroot(inode->i_uid))
+		msg2 = "file in non-root-owned directory";
+	else if (inode->i_mode & S_IWOTH)
+		msg2 = "file in world-writable directory";
+	else if (inode->i_mode & S_IWGRP)
+		msg2 = "file in group-writable directory";
+
+	if (msg && msg2) {
+		char fullmsg[70] = {0};
+		snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
+		gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
+		return 0;
+	}
+	msg = NULL;
+next_check:
+#ifdef CONFIG_GRKERNSEC_TPE_ALL
+	if (!grsec_enable_tpe || !grsec_enable_tpe_all)
+		return 1;
+
+	if (gr_is_global_nonroot(inode->i_uid) && !uid_eq(inode->i_uid, cred->uid))
+		msg = "directory not owned by user";
+	else if (inode->i_mode & S_IWOTH)
+		msg = "file in world-writable directory";
+	else if (inode->i_mode & S_IWGRP)
+		msg = "file in group-writable directory";
+
+	if (msg) {
+		gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
+		return 0;
+	}
+#endif
+#endif
+	return 1;
+}
diff -ruNp linux-3.13.11/grsecurity/grsec_usb.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/grsec_usb.c
--- linux-3.13.11/grsecurity/grsec_usb.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/grsec_usb.c	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,15 @@
+#include <linux/kernel.h>
+#include <linux/grinternal.h>
+#include <linux/module.h>
+
+int gr_handle_new_usb(void)
+{
+#ifdef CONFIG_GRKERNSEC_DENYUSB
+	if (grsec_deny_new_usb) {
+		printk(KERN_ALERT "grsec: denied insert of new USB device\n");
+		return 1;
+	}
+#endif
+	return 0;
+}
+EXPORT_SYMBOL_GPL(gr_handle_new_usb);
diff -ruNp linux-3.13.11/grsecurity/grsum.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/grsum.c
--- linux-3.13.11/grsecurity/grsum.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/grsecurity/grsum.c	2014-07-09 12:00:15.000000000
+0200
@@ -0,0 +1,61 @@
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/crypto.h>
+#include <linux/gracl.h>
+
+
+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256)
|| defined(CONFIG_CRYPTO_SHA256_MODULE)
+#error "crypto and sha256 must be built into the kernel"
+#endif
+
+int
+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
+{
+	char *p;
+	struct crypto_hash *tfm;
+	struct hash_desc desc;
+	struct scatterlist sg;
+	unsigned char temp_sum[GR_SHA_LEN];
+	volatile int retval = 0;
+	volatile int dummy = 0;
+	unsigned int i;
+
+	sg_init_table(&sg, 1);
+
+	tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
+	if (IS_ERR(tfm)) {
+		/* should never happen, since sha256 should be built in */
+		return 1;
+	}
+
+	desc.tfm = tfm;
+	desc.flags = 0;
+
+	crypto_hash_init(&desc);
+
+	p = salt;
+	sg_set_buf(&sg, p, GR_SALT_LEN);
+	crypto_hash_update(&desc, &sg, sg.length);
+
+	p = entry->pw;
+	sg_set_buf(&sg, p, strlen(p));
+	
+	crypto_hash_update(&desc, &sg, sg.length);
+
+	crypto_hash_final(&desc, temp_sum);
+
+	memset(entry->pw, 0, GR_PW_LEN);
+
+	for (i = 0; i < GR_SHA_LEN; i++)
+		if (sum[i] != temp_sum[i])
+			retval = 1;
+		else
+			dummy = 1;	// waste a cycle
+
+	crypto_free_hash(tfm);
+
+	return retval;
+}
diff -ruNp linux-3.13.11/include/asm-generic/4level-fixup.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/asm-generic/4level-fixup.h
--- linux-3.13.11/include/asm-generic/4level-fixup.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/asm-generic/4level-fixup.h	2014-07-09
12:00:15.000000000 +0200
@@ -13,8 +13,10 @@
 #define pmd_alloc(mm, pud, address) \
 	((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
  		NULL: pmd_offset(pud, address))
+#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address))
 
 #define pud_alloc(mm, pgd, address)	(pgd)
+#define pud_alloc_kernel(mm, pgd, address)	pud_alloc((mm), (pgd), (address))
 #define pud_offset(pgd, start)		(pgd)
 #define pud_none(pud)			0
 #define pud_bad(pud)			0
diff -ruNp linux-3.13.11/include/asm-generic/atomic-long.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/asm-generic/atomic-long.h
--- linux-3.13.11/include/asm-generic/atomic-long.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/asm-generic/atomic-long.h	2014-07-09
12:00:15.000000000 +0200
@@ -22,6 +22,12 @@
 
 typedef atomic64_t atomic_long_t;
 
+#ifdef CONFIG_PAX_REFCOUNT
+typedef atomic64_unchecked_t atomic_long_unchecked_t;
+#else
+typedef atomic64_t atomic_long_unchecked_t;
+#endif
+
 #define ATOMIC_LONG_INIT(i)	ATOMIC64_INIT(i)
 
 static inline long atomic_long_read(atomic_long_t *l)
@@ -31,6 +37,15 @@ static inline long atomic_long_read(atom
 	return (long)atomic64_read(v);
 }
 
+#ifdef CONFIG_PAX_REFCOUNT
+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
+{
+	atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
+
+	return (long)atomic64_read_unchecked(v);
+}
+#endif
+
 static inline void atomic_long_set(atomic_long_t *l, long i)
 {
 	atomic64_t *v = (atomic64_t *)l;
@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi
 	atomic64_set(v, i);
 }
 
+#ifdef CONFIG_PAX_REFCOUNT
+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
+{
+	atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
+
+	atomic64_set_unchecked(v, i);
+}
+#endif
+
 static inline void atomic_long_inc(atomic_long_t *l)
 {
 	atomic64_t *v = (atomic64_t *)l;
@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi
 	atomic64_inc(v);
 }
 
+#ifdef CONFIG_PAX_REFCOUNT
+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
+{
+	atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
+
+	atomic64_inc_unchecked(v);
+}
+#endif
+
 static inline void atomic_long_dec(atomic_long_t *l)
 {
 	atomic64_t *v = (atomic64_t *)l;
@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi
 	atomic64_dec(v);
 }
 
+#ifdef CONFIG_PAX_REFCOUNT
+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
+{
+	atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
+
+	atomic64_dec_unchecked(v);
+}
+#endif
+
 static inline void atomic_long_add(long i, atomic_long_t *l)
 {
 	atomic64_t *v = (atomic64_t *)l;
@@ -59,6 +101,15 @@ static inline void atomic_long_add(long
 	atomic64_add(i, v);
 }
 
+#ifdef CONFIG_PAX_REFCOUNT
+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
+{
+	atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
+
+	atomic64_add_unchecked(i, v);
+}
+#endif
+
 static inline void atomic_long_sub(long i, atomic_long_t *l)
 {
 	atomic64_t *v = (atomic64_t *)l;
@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long
 	atomic64_sub(i, v);
 }
 
+#ifdef CONFIG_PAX_REFCOUNT
+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
+{
+	atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
+
+	atomic64_sub_unchecked(i, v);
+}
+#endif
+
 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
 {
 	atomic64_t *v = (atomic64_t *)l;
@@ -94,13 +154,22 @@ static inline int atomic_long_add_negati
 	return atomic64_add_negative(i, v);
 }
 
-static inline long atomic_long_add_return(long i, atomic_long_t *l)
+static inline long __intentional_overflow(-1) atomic_long_add_return(long i, atomic_long_t
*l)
 {
 	atomic64_t *v = (atomic64_t *)l;
 
 	return (long)atomic64_add_return(i, v);
 }
 
+#ifdef CONFIG_PAX_REFCOUNT
+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t
*l)
+{
+	atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
+
+	return (long)atomic64_add_return_unchecked(i, v);
+}
+#endif
+
 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
 {
 	atomic64_t *v = (atomic64_t *)l;
@@ -115,6 +184,15 @@ static inline long atomic_long_inc_retur
 	return (long)atomic64_inc_return(v);
 }
 
+#ifdef CONFIG_PAX_REFCOUNT
+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
+{
+	atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
+
+	return (long)atomic64_inc_return_unchecked(v);
+}
+#endif
+
 static inline long atomic_long_dec_return(atomic_long_t *l)
 {
 	atomic64_t *v = (atomic64_t *)l;
@@ -140,6 +218,12 @@ static inline long atomic_long_add_unles
 
 typedef atomic_t atomic_long_t;
 
+#ifdef CONFIG_PAX_REFCOUNT
+typedef atomic_unchecked_t atomic_long_unchecked_t;
+#else
+typedef atomic_t atomic_long_unchecked_t;
+#endif
+
 #define ATOMIC_LONG_INIT(i)	ATOMIC_INIT(i)
 static inline long atomic_long_read(atomic_long_t *l)
 {
@@ -148,6 +232,15 @@ static inline long atomic_long_read(atom
 	return (long)atomic_read(v);
 }
 
+#ifdef CONFIG_PAX_REFCOUNT
+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
+{
+	atomic_unchecked_t *v = (atomic_unchecked_t *)l;
+
+	return (long)atomic_read_unchecked(v);
+}
+#endif
+
 static inline void atomic_long_set(atomic_long_t *l, long i)
 {
 	atomic_t *v = (atomic_t *)l;
@@ -155,6 +248,15 @@ static inline void atomic_long_set(atomi
 	atomic_set(v, i);
 }
 
+#ifdef CONFIG_PAX_REFCOUNT
+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
+{
+	atomic_unchecked_t *v = (atomic_unchecked_t *)l;
+
+	atomic_set_unchecked(v, i);
+}
+#endif
+
 static inline void atomic_long_inc(atomic_long_t *l)
 {
 	atomic_t *v = (atomic_t *)l;
@@ -162,6 +264,15 @@ static inline void atomic_long_inc(atomi
 	atomic_inc(v);
 }
 
+#ifdef CONFIG_PAX_REFCOUNT
+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
+{
+	atomic_unchecked_t *v = (atomic_unchecked_t *)l;
+
+	atomic_inc_unchecked(v);
+}
+#endif
+
 static inline void atomic_long_dec(atomic_long_t *l)
 {
 	atomic_t *v = (atomic_t *)l;
@@ -169,6 +280,15 @@ static inline void atomic_long_dec(atomi
 	atomic_dec(v);
 }
 
+#ifdef CONFIG_PAX_REFCOUNT
+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
+{
+	atomic_unchecked_t *v = (atomic_unchecked_t *)l;
+
+	atomic_dec_unchecked(v);
+}
+#endif
+
 static inline void atomic_long_add(long i, atomic_long_t *l)
 {
 	atomic_t *v = (atomic_t *)l;
@@ -176,6 +296,15 @@ static inline void atomic_long_add(long
 	atomic_add(i, v);
 }
 
+#ifdef CONFIG_PAX_REFCOUNT
+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
+{
+	atomic_unchecked_t *v = (atomic_unchecked_t *)l;
+
+	atomic_add_unchecked(i, v);
+}
+#endif
+
 static inline void atomic_long_sub(long i, atomic_long_t *l)
 {
 	atomic_t *v = (atomic_t *)l;
@@ -183,6 +312,15 @@ static inline void atomic_long_sub(long
 	atomic_sub(i, v);
 }
 
+#ifdef CONFIG_PAX_REFCOUNT
+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
+{
+	atomic_unchecked_t *v = (atomic_unchecked_t *)l;
+
+	atomic_sub_unchecked(i, v);
+}
+#endif
+
 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
 {
 	atomic_t *v = (atomic_t *)l;
@@ -218,6 +356,16 @@ static inline long atomic_long_add_retur
 	return (long)atomic_add_return(i, v);
 }
 
+#ifdef CONFIG_PAX_REFCOUNT
+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t
*l)
+{
+	atomic_unchecked_t *v = (atomic_unchecked_t *)l;
+
+	return (long)atomic_add_return_unchecked(i, v);
+}
+
+#endif
+
 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
 {
 	atomic_t *v = (atomic_t *)l;
@@ -232,6 +380,15 @@ static inline long atomic_long_inc_retur
 	return (long)atomic_inc_return(v);
 }
 
+#ifdef CONFIG_PAX_REFCOUNT
+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
+{
+	atomic_unchecked_t *v = (atomic_unchecked_t *)l;
+
+	return (long)atomic_inc_return_unchecked(v);
+}
+#endif
+
 static inline long atomic_long_dec_return(atomic_long_t *l)
 {
 	atomic_t *v = (atomic_t *)l;
@@ -255,4 +412,57 @@ static inline long atomic_long_add_unles
 
 #endif  /*  BITS_PER_LONG == 64  */
 
+#ifdef CONFIG_PAX_REFCOUNT
+static inline void pax_refcount_needs_these_functions(void)
+{
+	atomic_read_unchecked((atomic_unchecked_t *)NULL);
+	atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
+	atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
+	atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
+	atomic_inc_unchecked((atomic_unchecked_t *)NULL);
+	(void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
+	atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
+	atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
+	atomic_dec_unchecked((atomic_unchecked_t *)NULL);
+	atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
+	(void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
+#ifdef CONFIG_X86
+	atomic_clear_mask_unchecked(0, NULL);
+	atomic_set_mask_unchecked(0, NULL);
+#endif
+
+	atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
+	atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
+	atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
+	atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
+	atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
+	atomic_long_add_return_unchecked(0, (atomic_long_unchecked_t *)NULL);
+	atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
+	atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
+}
+#else
+#define atomic_read_unchecked(v) atomic_read(v)
+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
+#define atomic_inc_unchecked(v) atomic_inc(v)
+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
+#define atomic_dec_unchecked(v) atomic_dec(v)
+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
+#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
+#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
+
+#define atomic_long_read_unchecked(v) atomic_long_read(v)
+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
+#define atomic_long_add_return_unchecked(i, v) atomic_long_add_return((i), (v))
+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
+#endif
+
 #endif  /*  _ASM_GENERIC_ATOMIC_LONG_H  */
diff -ruNp linux-3.13.11/include/asm-generic/atomic.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/asm-generic/atomic.h
--- linux-3.13.11/include/asm-generic/atomic.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/asm-generic/atomic.h	2014-07-09
12:00:15.000000000 +0200
@@ -153,7 +153,7 @@ static inline int __atomic_add_unless(at
  * Atomically clears the bits set in @mask from @v
  */
 #ifndef atomic_clear_mask
-static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
 {
 	unsigned long flags;
 
diff -ruNp linux-3.13.11/include/asm-generic/atomic64.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/asm-generic/atomic64.h
--- linux-3.13.11/include/asm-generic/atomic64.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/asm-generic/atomic64.h	2014-07-09
12:00:15.000000000 +0200
@@ -16,6 +16,8 @@ typedef struct {
 	long long counter;
 } atomic64_t;
 
+typedef atomic64_t atomic64_unchecked_t;
+
 #define ATOMIC64_INIT(i)	{ (i) }
 
 extern long long atomic64_read(const atomic64_t *v);
@@ -39,4 +41,14 @@ extern int	 atomic64_add_unless(atomic64
 #define atomic64_dec_and_test(v)	(atomic64_dec_return((v)) == 0)
 #define atomic64_inc_not_zero(v) 	atomic64_add_unless((v), 1LL, 0LL)
 
+#define atomic64_read_unchecked(v) atomic64_read(v)
+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
+#define atomic64_inc_unchecked(v) atomic64_inc(v)
+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
+#define atomic64_dec_unchecked(v) atomic64_dec(v)
+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
+
 #endif  /*  _ASM_GENERIC_ATOMIC64_H  */
diff -ruNp linux-3.13.11/include/asm-generic/bitops/__fls.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/asm-generic/bitops/__fls.h
--- linux-3.13.11/include/asm-generic/bitops/__fls.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/asm-generic/bitops/__fls.h	2014-07-09
12:00:15.000000000 +0200
@@ -9,7 +9,7 @@
  *
  * Undefined if no set bit exists, so code should check against 0 first.
  */
-static __always_inline unsigned long __fls(unsigned long word)
+static __always_inline unsigned long __intentional_overflow(-1) __fls(unsigned long
word)
 {
 	int num = BITS_PER_LONG - 1;
 
diff -ruNp linux-3.13.11/include/asm-generic/bitops/fls.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/asm-generic/bitops/fls.h
--- linux-3.13.11/include/asm-generic/bitops/fls.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/asm-generic/bitops/fls.h	2014-07-09
12:00:15.000000000 +0200
@@ -9,7 +9,7 @@
  * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
  */
 
-static __always_inline int fls(int x)
+static __always_inline int __intentional_overflow(-1) fls(int x)
 {
 	int r = 32;
 
diff -ruNp linux-3.13.11/include/asm-generic/bitops/fls64.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/asm-generic/bitops/fls64.h
--- linux-3.13.11/include/asm-generic/bitops/fls64.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/asm-generic/bitops/fls64.h	2014-07-09
12:00:15.000000000 +0200
@@ -15,7 +15,7 @@
  * at position 64.
  */
 #if BITS_PER_LONG == 32
-static __always_inline int fls64(__u64 x)
+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
 {
 	__u32 h = x >> 32;
 	if (h)
@@ -23,7 +23,7 @@ static __always_inline int fls64(__u64 x
 	return fls(x);
 }
 #elif BITS_PER_LONG == 64
-static __always_inline int fls64(__u64 x)
+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
 {
 	if (x == 0)
 		return 0;
diff -ruNp linux-3.13.11/include/asm-generic/cache.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/asm-generic/cache.h
--- linux-3.13.11/include/asm-generic/cache.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/asm-generic/cache.h	2014-07-09
12:00:15.000000000 +0200
@@ -6,7 +6,7 @@
  * cache lines need to provide their own cache.h.
  */
 
-#define L1_CACHE_SHIFT		5
-#define L1_CACHE_BYTES		(1 << L1_CACHE_SHIFT)
+#define L1_CACHE_SHIFT		5UL
+#define L1_CACHE_BYTES		(1UL << L1_CACHE_SHIFT)
 
 #endif /* __ASM_GENERIC_CACHE_H */
diff -ruNp linux-3.13.11/include/asm-generic/emergency-restart.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/asm-generic/emergency-restart.h
--- linux-3.13.11/include/asm-generic/emergency-restart.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/asm-generic/emergency-restart.h	2014-07-09
12:00:15.000000000 +0200
@@ -1,7 +1,7 @@
 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
 #define _ASM_GENERIC_EMERGENCY_RESTART_H
 
-static inline void machine_emergency_restart(void)
+static inline __noreturn void machine_emergency_restart(void)
 {
 	machine_restart(NULL);
 }
diff -ruNp linux-3.13.11/include/asm-generic/kmap_types.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/asm-generic/kmap_types.h
--- linux-3.13.11/include/asm-generic/kmap_types.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/asm-generic/kmap_types.h	2014-07-09
12:00:15.000000000 +0200
@@ -2,9 +2,9 @@
 #define _ASM_GENERIC_KMAP_TYPES_H
 
 #ifdef __WITH_KM_FENCE
-# define KM_TYPE_NR 41
+# define KM_TYPE_NR 42
 #else
-# define KM_TYPE_NR 20
+# define KM_TYPE_NR 21
 #endif
 
 #endif
diff -ruNp linux-3.13.11/include/asm-generic/local.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/asm-generic/local.h
--- linux-3.13.11/include/asm-generic/local.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/asm-generic/local.h	2014-07-09
12:00:15.000000000 +0200
@@ -23,24 +23,37 @@ typedef struct
 	atomic_long_t a;
 } local_t;
 
+typedef struct {
+	atomic_long_unchecked_t a;
+} local_unchecked_t;
+
 #define LOCAL_INIT(i)	{ ATOMIC_LONG_INIT(i) }
 
 #define local_read(l)	atomic_long_read(&(l)->a)
+#define local_read_unchecked(l)	atomic_long_read_unchecked(&(l)->a)
 #define local_set(l,i)	atomic_long_set((&(l)->a),(i))
+#define local_set_unchecked(l,i)	atomic_long_set_unchecked((&(l)->a),(i))
 #define local_inc(l)	atomic_long_inc(&(l)->a)
+#define local_inc_unchecked(l)	atomic_long_inc_unchecked(&(l)->a)
 #define local_dec(l)	atomic_long_dec(&(l)->a)
+#define local_dec_unchecked(l)	atomic_long_dec_unchecked(&(l)->a)
 #define local_add(i,l)	atomic_long_add((i),(&(l)->a))
+#define local_add_unchecked(i,l)	atomic_long_add_unchecked((i),(&(l)->a))
 #define local_sub(i,l)	atomic_long_sub((i),(&(l)->a))
+#define local_sub_unchecked(i,l)	atomic_long_sub_unchecked((i),(&(l)->a))
 
 #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
 #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
 #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
 #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
+#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
 
 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
+#define local_cmpxchg_unchecked(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
 #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
 #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
diff -ruNp linux-3.13.11/include/asm-generic/pgtable-nopmd.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/asm-generic/pgtable-nopmd.h
--- linux-3.13.11/include/asm-generic/pgtable-nopmd.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/asm-generic/pgtable-nopmd.h	2014-07-09
12:00:15.000000000 +0200
@@ -1,14 +1,19 @@
 #ifndef _PGTABLE_NOPMD_H
 #define _PGTABLE_NOPMD_H
 
-#ifndef __ASSEMBLY__
-
 #include <asm-generic/pgtable-nopud.h>
 
-struct mm_struct;
-
 #define __PAGETABLE_PMD_FOLDED
 
+#define PMD_SHIFT	PUD_SHIFT
+#define PTRS_PER_PMD	1
+#define PMD_SIZE  	(_AC(1,UL) << PMD_SHIFT)
+#define PMD_MASK  	(~(PMD_SIZE-1))
+
+#ifndef __ASSEMBLY__
+
+struct mm_struct;
+
 /*
  * Having the pmd type consist of a pud gets the size right, and allows
  * us to conceptually access the pud entry that this pmd is folded into
@@ -16,11 +21,6 @@ struct mm_struct;
  */
 typedef struct { pud_t pud; } pmd_t;
 
-#define PMD_SHIFT	PUD_SHIFT
-#define PTRS_PER_PMD	1
-#define PMD_SIZE  	(1UL << PMD_SHIFT)
-#define PMD_MASK  	(~(PMD_SIZE-1))
-
 /*
  * The "pud_xxx()" functions here are trivial for a folded two-level
  * setup: the pmd is never bad, and a pmd always exists (as it's folded
diff -ruNp linux-3.13.11/include/asm-generic/pgtable-nopud.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/asm-generic/pgtable-nopud.h
--- linux-3.13.11/include/asm-generic/pgtable-nopud.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/asm-generic/pgtable-nopud.h	2014-07-09
12:00:15.000000000 +0200
@@ -1,10 +1,15 @@
 #ifndef _PGTABLE_NOPUD_H
 #define _PGTABLE_NOPUD_H
 
-#ifndef __ASSEMBLY__
-
 #define __PAGETABLE_PUD_FOLDED
 
+#define PUD_SHIFT	PGDIR_SHIFT
+#define PTRS_PER_PUD	1
+#define PUD_SIZE  	(_AC(1,UL) << PUD_SHIFT)
+#define PUD_MASK  	(~(PUD_SIZE-1))
+
+#ifndef __ASSEMBLY__
+
 /*
  * Having the pud type consist of a pgd gets the size right, and allows
  * us to conceptually access the pgd entry that this pud is folded into
@@ -12,11 +17,6 @@
  */
 typedef struct { pgd_t pgd; } pud_t;
 
-#define PUD_SHIFT	PGDIR_SHIFT
-#define PTRS_PER_PUD	1
-#define PUD_SIZE  	(1UL << PUD_SHIFT)
-#define PUD_MASK  	(~(PUD_SIZE-1))
-
 /*
  * The "pgd_xxx()" functions here are trivial for a folded two-level
  * setup: the pud is never bad, and a pud always exists (as it's folded
@@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd)
 #define pud_ERROR(pud)				(pgd_ERROR((pud).pgd))
 
 #define pgd_populate(mm, pgd, pud)		do { } while (0)
+#define pgd_populate_kernel(mm, pgd, pud)	do { } while (0)
 /*
  * (puds are folded into pgds so this doesn't get actually called,
  * but the define is needed for a generic inline function.)
diff -ruNp linux-3.13.11/include/asm-generic/pgtable.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/asm-generic/pgtable.h
--- linux-3.13.11/include/asm-generic/pgtable.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/asm-generic/pgtable.h	2014-07-09
12:00:15.000000000 +0200
@@ -748,6 +748,22 @@ static inline pmd_t pmd_mknuma(pmd_t pmd
 }
 #endif /* CONFIG_NUMA_BALANCING */
 
+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
+#ifdef CONFIG_PAX_KERNEXEC
+#error KERNEXEC requires pax_open_kernel
+#else
+static inline unsigned long pax_open_kernel(void) { return 0; }
+#endif
+#endif
+
+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
+#ifdef CONFIG_PAX_KERNEXEC
+#error KERNEXEC requires pax_close_kernel
+#else
+static inline unsigned long pax_close_kernel(void) { return 0; }
+#endif
+#endif
+
 #endif /* CONFIG_MMU */
 
 #endif /* !__ASSEMBLY__ */
diff -ruNp linux-3.13.11/include/asm-generic/uaccess.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/asm-generic/uaccess.h
--- linux-3.13.11/include/asm-generic/uaccess.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/asm-generic/uaccess.h	2014-07-09
12:00:15.000000000 +0200
@@ -343,4 +343,20 @@ clear_user(void __user *to, unsigned lon
 	return __clear_user(to, n);
 }
 
+#ifndef __HAVE_ARCH_PAX_OPEN_USERLAND
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+#error UDEREF requires pax_open_userland
+#else
+static inline unsigned long pax_open_userland(void) { return 0; }
+#endif
+#endif
+
+#ifndef __HAVE_ARCH_PAX_CLOSE_USERLAND
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+#error UDEREF requires pax_close_userland
+#else
+static inline unsigned long pax_close_userland(void) { return 0; }
+#endif
+#endif
+
 #endif /* __ASM_GENERIC_UACCESS_H */
diff -ruNp linux-3.13.11/include/asm-generic/vmlinux.lds.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/asm-generic/vmlinux.lds.h
--- linux-3.13.11/include/asm-generic/vmlinux.lds.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/asm-generic/vmlinux.lds.h	2014-07-09
12:00:15.000000000 +0200
@@ -232,6 +232,7 @@
 	.rodata           : AT(ADDR(.rodata) - LOAD_OFFSET) {		\
 		VMLINUX_SYMBOL(__start_rodata) = .;			\
 		*(.rodata) *(.rodata.*)					\
+		*(.data..read_only)					\
 		*(__vermagic)		/* Kernel version magic */	\
 		. = ALIGN(8);						\
 		VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .;		\
@@ -716,17 +717,18 @@
  * section in the linker script will go there too.  @phdr should have
  * a leading colon.
  *
- * Note that this macros defines __per_cpu_load as an absolute symbol.
+ * Note that this macros defines per_cpu_load as an absolute symbol.
  * If there is no need to put the percpu section at a predetermined
  * address, use PERCPU_SECTION.
  */
 #define PERCPU_VADDR(cacheline, vaddr, phdr)				\
-	VMLINUX_SYMBOL(__per_cpu_load) = .;				\
-	.data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load)		\
+	per_cpu_load = .;						\
+	.data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load)		\
 				- LOAD_OFFSET) {			\
+		VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load;	\
 		PERCPU_INPUT(cacheline)					\
 	} phdr								\
-	. = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
+	. = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
 
 /**
  * PERCPU_SECTION - define output section for percpu area, simple version
diff -ruNp linux-3.13.11/include/crypto/algapi.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/crypto/algapi.h
--- linux-3.13.11/include/crypto/algapi.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/crypto/algapi.h	2014-07-09
12:00:15.000000000 +0200
@@ -34,7 +34,7 @@ struct crypto_type {
 	unsigned int maskclear;
 	unsigned int maskset;
 	unsigned int tfmsize;
-};
+} __do_const;
 
 struct crypto_instance {
 	struct crypto_alg alg;
diff -ruNp linux-3.13.11/include/drm/drmP.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/drm/drmP.h
--- linux-3.13.11/include/drm/drmP.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/drm/drmP.h	2014-07-09 12:00:15.000000000
+0200
@@ -66,6 +66,7 @@
 #include <linux/workqueue.h>
 #include <linux/poll.h>
 #include <asm/pgalloc.h>
+#include <asm/local.h>
 #include <drm/drm.h>
 #include <drm/drm_sarea.h>
 #include <drm/drm_vma_manager.h>
@@ -278,10 +279,12 @@ do {										\
  * \param cmd command.
  * \param arg argument.
  */
-typedef int drm_ioctl_t(struct drm_device *dev, void *data,
+typedef int (* const drm_ioctl_t)(struct drm_device *dev, void *data,
+			struct drm_file *file_priv);
+typedef int (* drm_ioctl_no_const_t)(struct drm_device *dev, void *data,
 			struct drm_file *file_priv);
 
-typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
+typedef int (* const drm_ioctl_compat_t)(struct file *filp, unsigned int cmd,
 			       unsigned long arg);
 
 #define DRM_IOCTL_NR(n)                _IOC_NR(n)
@@ -297,10 +300,10 @@ typedef int drm_ioctl_compat_t(struct fi
 struct drm_ioctl_desc {
 	unsigned int cmd;
 	int flags;
-	drm_ioctl_t *func;
+	drm_ioctl_t func;
 	unsigned int cmd_drv;
 	const char *name;
-};
+} __do_const;
 
 /**
  * Creates a driver or general drm_ioctl_desc array entry for the given
@@ -1013,7 +1016,8 @@ struct drm_info_list {
 	int (*show)(struct seq_file*, void*); /** show callback */
 	u32 driver_features; /**< Required driver features for this entry */
 	void *data;
-};
+} __do_const;
+typedef struct drm_info_list __no_const drm_info_list_no_const;
 
 /**
  * debugfs node structure. This structure represents a debugfs file.
@@ -1097,7 +1101,7 @@ struct drm_device {
 
 	/** \name Usage Counters */
 	/*@{ */
-	int open_count;			/**< Outstanding files open */
+	local_t open_count;		/**< Outstanding files open */
 	atomic_t ioctl_count;		/**< Outstanding IOCTLs pending */
 	atomic_t vma_count;		/**< Outstanding vma areas open */
 	int buf_use;			/**< Buffers in use -- cannot alloc */
diff -ruNp linux-3.13.11/include/drm/drm_crtc_helper.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/drm/drm_crtc_helper.h
--- linux-3.13.11/include/drm/drm_crtc_helper.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/drm/drm_crtc_helper.h	2014-07-09
12:00:15.000000000 +0200
@@ -109,7 +109,7 @@ struct drm_encoder_helper_funcs {
 					    struct drm_connector *connector);
 	/* disable encoder when not in use - more explicit than dpms off */
 	void (*disable)(struct drm_encoder *encoder);
-};
+} __no_const;
 
 /**
  * drm_connector_helper_funcs - helper operations for connectors
diff -ruNp linux-3.13.11/include/drm/i915_pciids.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/drm/i915_pciids.h
--- linux-3.13.11/include/drm/i915_pciids.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/drm/i915_pciids.h	2014-07-09
12:00:15.000000000 +0200
@@ -37,7 +37,7 @@
  */
 #define INTEL_VGA_DEVICE(id, info) {		\
 	0x8086,	id,				\
-	~0, ~0,					\
+	PCI_ANY_ID, PCI_ANY_ID,			\
 	0x030000, 0xff0000,			\
 	(unsigned long) info }
 
diff -ruNp linux-3.13.11/include/drm/ttm/ttm_memory.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/drm/ttm/ttm_memory.h
--- linux-3.13.11/include/drm/ttm/ttm_memory.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/drm/ttm/ttm_memory.h	2014-07-09
12:00:15.000000000 +0200
@@ -48,7 +48,7 @@
 
 struct ttm_mem_shrink {
 	int (*do_shrink) (struct ttm_mem_shrink *);
-};
+} __no_const;
 
 /**
  * struct ttm_mem_global - Global memory accounting structure.
diff -ruNp linux-3.13.11/include/drm/ttm/ttm_page_alloc.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/drm/ttm/ttm_page_alloc.h
--- linux-3.13.11/include/drm/ttm/ttm_page_alloc.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/drm/ttm/ttm_page_alloc.h	2014-07-09
12:00:15.000000000 +0200
@@ -78,6 +78,7 @@ void ttm_dma_page_alloc_fini(void);
  */
 extern int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
 
+struct device;
 extern int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev);
 extern void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
 
diff -ruNp linux-3.13.11/include/keys/asymmetric-subtype.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/keys/asymmetric-subtype.h
--- linux-3.13.11/include/keys/asymmetric-subtype.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/keys/asymmetric-subtype.h	2014-07-09
12:00:15.000000000 +0200
@@ -37,7 +37,7 @@ struct asymmetric_key_subtype {
 	/* Verify the signature on a key of this subtype (optional) */
 	int (*verify_signature)(const struct key *key,
 				const struct public_key_signature *sig);
-};
+} __do_const;
 
 /**
  * asymmetric_key_subtype - Get the subtype from an asymmetric key
diff -ruNp linux-3.13.11/include/linux/atmdev.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/atmdev.h
--- linux-3.13.11/include/linux/atmdev.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/atmdev.h	2014-07-09
12:00:15.000000000 +0200
@@ -28,7 +28,7 @@ struct compat_atm_iobuf {
 #endif
 
 struct k_atm_aal_stats {
-#define __HANDLE_ITEM(i) atomic_t i
+#define __HANDLE_ITEM(i) atomic_unchecked_t i
 	__AAL_STAT_ITEMS
 #undef __HANDLE_ITEM
 };
@@ -200,7 +200,7 @@ struct atmdev_ops { /* only send is requ
 	int (*change_qos)(struct atm_vcc *vcc,struct atm_qos *qos,int flags);
 	int (*proc_read)(struct atm_dev *dev,loff_t *pos,char *page);
 	struct module *owner;
-};
+} __do_const ;
 
 struct atmphy_ops {
 	int (*start)(struct atm_dev *dev);
diff -ruNp linux-3.13.11/include/linux/audit.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/audit.h
--- linux-3.13.11/include/linux/audit.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/audit.h	2014-07-09
12:00:15.000000000 +0200
@@ -195,7 +195,7 @@ static inline void audit_ptrace(struct t
 extern unsigned int audit_serial(void);
 extern int auditsc_get_stamp(struct audit_context *ctx,
 			      struct timespec *t, unsigned int *serial);
-extern int audit_set_loginuid(kuid_t loginuid);
+extern int __intentional_overflow(-1) audit_set_loginuid(kuid_t loginuid);
 
 static inline kuid_t audit_get_loginuid(struct task_struct *tsk)
 {
diff -ruNp linux-3.13.11/include/linux/binfmts.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/binfmts.h
--- linux-3.13.11/include/linux/binfmts.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/binfmts.h	2014-07-09
12:00:15.000000000 +0200
@@ -45,7 +45,7 @@ struct linux_binprm {
 	unsigned interp_data;
 	unsigned long loader, exec;
 	char tcomm[TASK_COMM_LEN];
-};
+} __randomize_layout;
 
 #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
 #define BINPRM_FLAGS_ENFORCE_NONDUMP (1 << BINPRM_FLAGS_ENFORCE_NONDUMP_BIT)
@@ -74,8 +74,10 @@ struct linux_binfmt {
 	int (*load_binary)(struct linux_binprm *);
 	int (*load_shlib)(struct file *);
 	int (*core_dump)(struct coredump_params *cprm);
+	void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
+	void (*handle_mmap)(struct file *);
 	unsigned long min_coredump;	/* minimal dump size */
-};
+} __do_const __randomize_layout;
 
 extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
 
diff -ruNp linux-3.13.11/include/linux/bitops.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/bitops.h
--- linux-3.13.11/include/linux/bitops.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/bitops.h	2014-07-09
12:00:15.000000000 +0200
@@ -102,7 +102,7 @@ static inline __u64 ror64(__u64 word, un
  * @word: value to rotate
  * @shift: bits to roll
  */
-static inline __u32 rol32(__u32 word, unsigned int shift)
+static inline __u32 __intentional_overflow(-1) rol32(__u32 word, unsigned int shift)
 {
 	return (word << shift) | (word >> (32 - shift));
 }
@@ -112,7 +112,7 @@ static inline __u32 rol32(__u32 word, un
  * @word: value to rotate
  * @shift: bits to roll
  */
-static inline __u32 ror32(__u32 word, unsigned int shift)
+static inline __u32 __intentional_overflow(-1) ror32(__u32 word, unsigned int shift)
 {
 	return (word >> shift) | (word << (32 - shift));
 }
@@ -168,7 +168,7 @@ static inline __s32 sign_extend32(__u32
 	return (__s32)(value << shift) >> shift;
 }
 
-static inline unsigned fls_long(unsigned long l)
+static inline unsigned __intentional_overflow(-1) fls_long(unsigned long l)
 {
 	if (sizeof(l) == 4)
 		return fls(l);
diff -ruNp linux-3.13.11/include/linux/blkdev.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/blkdev.h
--- linux-3.13.11/include/linux/blkdev.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/blkdev.h	2014-07-09
12:00:15.000000000 +0200
@@ -1578,7 +1578,7 @@ struct block_device_operations {
 	/* this callback is with swap_lock and sometimes page table lock held */
 	void (*swap_slot_free_notify) (struct block_device *, unsigned long);
 	struct module *owner;
-};
+} __do_const;
 
 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
 				 unsigned long);
diff -ruNp linux-3.13.11/include/linux/blktrace_api.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/blktrace_api.h
--- linux-3.13.11/include/linux/blktrace_api.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/blktrace_api.h	2014-07-09
12:00:15.000000000 +0200
@@ -25,7 +25,7 @@ struct blk_trace {
 	struct dentry *dropped_file;
 	struct dentry *msg_file;
 	struct list_head running_list;
-	atomic_t dropped;
+	atomic_unchecked_t dropped;
 };
 
 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
diff -ruNp linux-3.13.11/include/linux/cache.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/cache.h
--- linux-3.13.11/include/linux/cache.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/cache.h	2014-07-09
12:00:15.000000000 +0200
@@ -16,6 +16,14 @@
 #define __read_mostly
 #endif
 
+#ifndef __read_only
+#ifdef CONFIG_PAX_KERNEXEC
+#error KERNEXEC requires __read_only
+#else
+#define __read_only __read_mostly
+#endif
+#endif
+
 #ifndef ____cacheline_aligned
 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
 #endif
diff -ruNp linux-3.13.11/include/linux/capability.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/capability.h
--- linux-3.13.11/include/linux/capability.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/capability.h	2014-07-09
12:00:15.000000000 +0200
@@ -212,8 +212,13 @@ extern bool capable(int cap);
 extern bool ns_capable(struct user_namespace *ns, int cap);
 extern bool inode_capable(const struct inode *inode, int cap);
 extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int
cap);
+extern bool capable_nolog(int cap);
+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
+extern bool inode_capable_nolog(const struct inode *inode, int cap);
 
 /* audit system wants to get cap info from files as well */
 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data
*cpu_caps);
 
+extern int is_privileged_binary(const struct dentry *dentry);
+
 #endif /* !_LINUX_CAPABILITY_H */
diff -ruNp linux-3.13.11/include/linux/cdrom.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/cdrom.h
--- linux-3.13.11/include/linux/cdrom.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/cdrom.h	2014-07-09
12:00:15.000000000 +0200
@@ -87,7 +87,6 @@ struct cdrom_device_ops {
 
 /* driver specifications */
 	const int capability;   /* capability flags */
-	int n_minors;           /* number of active minor devices */
 	/* handle uniform packets for scsi type devices (scsi,atapi) */
 	int (*generic_packet) (struct cdrom_device_info *,
 			       struct packet_command *);
diff -ruNp linux-3.13.11/include/linux/cleancache.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/cleancache.h
--- linux-3.13.11/include/linux/cleancache.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/cleancache.h	2014-07-09
12:00:15.000000000 +0200
@@ -31,7 +31,7 @@ struct cleancache_ops {
 	void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
 	void (*invalidate_inode)(int, struct cleancache_filekey);
 	void (*invalidate_fs)(int);
-};
+} __no_const;
 
 extern struct cleancache_ops *
 	cleancache_register_ops(struct cleancache_ops *ops);
diff -ruNp linux-3.13.11/include/linux/clk-provider.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/clk-provider.h
--- linux-3.13.11/include/linux/clk-provider.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/clk-provider.h	2014-07-09
12:00:15.000000000 +0200
@@ -141,6 +141,7 @@ struct clk_ops {
 				    unsigned long);
 	void		(*init)(struct clk_hw *hw);
 };
+typedef struct clk_ops __no_const clk_ops_no_const;
 
 /**
  * struct clk_init_data - holds init data that's common to all clocks and is
diff -ruNp linux-3.13.11/include/linux/compat.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/compat.h
--- linux-3.13.11/include/linux/compat.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/compat.h	2014-07-09
12:00:15.000000000 +0200
@@ -313,7 +313,7 @@ compat_sys_get_robust_list(int pid, comp
 			   compat_size_t __user *len_ptr);
 
 asmlinkage long compat_sys_ipc(u32, int, int, u32, compat_uptr_t, u32);
-asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
+asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg) __intentional_overflow(0);
 asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
 asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
 		compat_ssize_t msgsz, int msgflg);
@@ -420,7 +420,7 @@ extern int compat_ptrace_request(struct
 extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
 			       compat_ulong_t addr, compat_ulong_t data);
 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
-				  compat_long_t addr, compat_long_t data);
+				  compat_ulong_t addr, compat_ulong_t data);
 
 asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t);
 /*
diff -ruNp linux-3.13.11/include/linux/compiler-gcc4.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/compiler-gcc4.h
--- linux-3.13.11/include/linux/compiler-gcc4.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/compiler-gcc4.h	2014-07-09
12:00:15.000000000 +0200
@@ -39,9 +39,34 @@
 # define __compiletime_warning(message) __attribute__((warning(message)))
 # define __compiletime_error(message) __attribute__((error(message)))
 #endif /* __CHECKER__ */
+
+#define __alloc_size(...)	__attribute((alloc_size(__VA_ARGS__)))
+#define __bos(ptr, arg)		__builtin_object_size((ptr), (arg))
+#define __bos0(ptr)		__bos((ptr), 0)
+#define __bos1(ptr)		__bos((ptr), 1)
 #endif /* GCC_VERSION >= 40300 */
 
 #if GCC_VERSION >= 40500
+
+#ifdef RANDSTRUCT_PLUGIN
+#define __randomize_layout __attribute__((randomize_layout))
+#define __no_randomize_layout __attribute__((no_randomize_layout))
+#endif
+
+#ifdef CONSTIFY_PLUGIN
+#define __no_const __attribute__((no_const))
+#define __do_const __attribute__((do_const))
+#endif
+
+#ifdef SIZE_OVERFLOW_PLUGIN
+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
+#endif
+
+#ifdef LATENT_ENTROPY_PLUGIN
+#define __latent_entropy __attribute__((latent_entropy))
+#endif
+
 /*
  * Mark a position in code as unreachable.  This can be used to
  * suppress control flow warnings after asm blocks that transfer
diff -ruNp linux-3.13.11/include/linux/compiler.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/compiler.h
--- linux-3.13.11/include/linux/compiler.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/compiler.h	2014-07-09
12:00:15.000000000 +0200
@@ -5,11 +5,14 @@
 
 #ifdef __CHECKER__
 # define __user		__attribute__((noderef, address_space(1)))
+# define __force_user	__force __user
 # define __kernel	__attribute__((address_space(0)))
+# define __force_kernel	__force __kernel
 # define __safe		__attribute__((safe))
 # define __force	__attribute__((force))
 # define __nocast	__attribute__((nocast))
 # define __iomem	__attribute__((noderef, address_space(2)))
+# define __force_iomem	__force __iomem
 # define __must_hold(x)	__attribute__((context(x,1,1)))
 # define __acquires(x)	__attribute__((context(x,0,1)))
 # define __releases(x)	__attribute__((context(x,1,0)))
@@ -17,20 +20,37 @@
 # define __release(x)	__context__(x,-1)
 # define __cond_lock(x,c)	((c) ? ({ __acquire(x); 1; }) : 0)
 # define __percpu	__attribute__((noderef, address_space(3)))
+# define __force_percpu	__force __percpu
 #ifdef CONFIG_SPARSE_RCU_POINTER
 # define __rcu		__attribute__((noderef, address_space(4)))
+# define __force_rcu	__force __rcu
 #else
 # define __rcu
+# define __force_rcu
 #endif
 extern void __chk_user_ptr(const volatile void __user *);
 extern void __chk_io_ptr(const volatile void __iomem *);
 #else
-# define __user
-# define __kernel
+# ifdef CHECKER_PLUGIN
+//#  define __user
+//#  define __force_user
+//#  define __kernel
+//#  define __force_kernel
+# else
+#  ifdef STRUCTLEAK_PLUGIN
+#   define __user __attribute__((user))
+#  else
+#   define __user
+#  endif
+#  define __force_user
+#  define __kernel
+#  define __force_kernel
+# endif
 # define __safe
 # define __force
 # define __nocast
 # define __iomem
+# define __force_iomem
 # define __chk_user_ptr(x) (void)0
 # define __chk_io_ptr(x) (void)0
 # define __builtin_warning(x, y...) (1)
@@ -41,7 +61,9 @@ extern void __chk_io_ptr(const volatile
 # define __release(x) (void)0
 # define __cond_lock(x,c) (c)
 # define __percpu
+# define __force_percpu
 # define __rcu
+# define __force_rcu
 #endif
 
 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
@@ -275,6 +297,34 @@ void ftrace_likely_update(struct ftrace_
 # define __attribute_const__	/* unimplemented */
 #endif
 
+#ifndef __randomize_layout
+# define __randomize_layout
+#endif
+
+#ifndef __no_randomize_layout
+# define __no_randomize_layout
+#endif
+
+#ifndef __no_const
+# define __no_const
+#endif
+
+#ifndef __do_const
+# define __do_const
+#endif
+
+#ifndef __size_overflow
+# define __size_overflow(...)
+#endif
+
+#ifndef __intentional_overflow
+# define __intentional_overflow(...)
+#endif
+
+#ifndef __latent_entropy
+# define __latent_entropy
+#endif
+
 /*
  * Tell gcc if a function is cold. The compiler will assume any path
  * directly leading to the call is unlikely.
@@ -284,6 +334,22 @@ void ftrace_likely_update(struct ftrace_
 #define __cold
 #endif
 
+#ifndef __alloc_size
+#define __alloc_size(...)
+#endif
+
+#ifndef __bos
+#define __bos(ptr, arg)
+#endif
+
+#ifndef __bos0
+#define __bos0(ptr)
+#endif
+
+#ifndef __bos1
+#define __bos1(ptr)
+#endif
+
 /* Simple shorthand for a section definition */
 #ifndef __section
 # define __section(S) __attribute__ ((__section__(#S)))
@@ -349,7 +415,8 @@ void ftrace_likely_update(struct ftrace_
  * use is to mediate communication between process-level code and irq/NMI
  * handlers, all running on the same CPU.
  */
-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
 
 /* Ignore/forbid kprobes attach on very low level functions marked by this attribute:
*/
 #ifdef CONFIG_KPROBES
diff -ruNp linux-3.13.11/include/linux/completion.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/completion.h
--- linux-3.13.11/include/linux/completion.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/completion.h	2014-07-09
12:00:15.000000000 +0200
@@ -90,16 +90,16 @@ static inline void reinit_completion(str
 
 extern void wait_for_completion(struct completion *);
 extern void wait_for_completion_io(struct completion *);
-extern int wait_for_completion_interruptible(struct completion *x);
-extern int wait_for_completion_killable(struct completion *x);
+extern int wait_for_completion_interruptible(struct completion *x) __intentional_overflow(-1);
+extern int wait_for_completion_killable(struct completion *x) __intentional_overflow(-1);
 extern unsigned long wait_for_completion_timeout(struct completion *x,
-						   unsigned long timeout);
+						   unsigned long timeout) __intentional_overflow(-1);
 extern unsigned long wait_for_completion_io_timeout(struct completion *x,
-						    unsigned long timeout);
+						    unsigned long timeout) __intentional_overflow(-1);
 extern long wait_for_completion_interruptible_timeout(
-	struct completion *x, unsigned long timeout);
+	struct completion *x, unsigned long timeout) __intentional_overflow(-1);
 extern long wait_for_completion_killable_timeout(
-	struct completion *x, unsigned long timeout);
+	struct completion *x, unsigned long timeout) __intentional_overflow(-1);
 extern bool try_wait_for_completion(struct completion *x);
 extern bool completion_done(struct completion *x);
 
diff -ruNp linux-3.13.11/include/linux/configfs.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/configfs.h
--- linux-3.13.11/include/linux/configfs.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/configfs.h	2014-07-09
12:00:15.000000000 +0200
@@ -125,7 +125,7 @@ struct configfs_attribute {
 	const char		*ca_name;
 	struct module 		*ca_owner;
 	umode_t			ca_mode;
-};
+} __do_const;
 
 /*
  * Users often need to create attribute structures for their configurable
diff -ruNp linux-3.13.11/include/linux/cpufreq.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/cpufreq.h
--- linux-3.13.11/include/linux/cpufreq.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/cpufreq.h	2014-07-09
12:00:15.000000000 +0200
@@ -189,6 +189,7 @@ struct global_attr {
 	ssize_t (*store)(struct kobject *a, struct attribute *b,
 			 const char *c, size_t count);
 };
+typedef struct global_attr __no_const global_attr_no_const;
 
 #define define_one_global_ro(_name)		\
 static struct global_attr _name =		\
@@ -225,7 +226,7 @@ struct cpufreq_driver {
 	int	(*suspend)	(struct cpufreq_policy *policy);
 	int	(*resume)	(struct cpufreq_policy *policy);
 	struct freq_attr	**attr;
-};
+} __do_const;
 
 /* flags */
 #define CPUFREQ_STICKY		(1 << 0)	/* driver isn't removed even if
diff -ruNp linux-3.13.11/include/linux/cpuidle.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/cpuidle.h
--- linux-3.13.11/include/linux/cpuidle.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/cpuidle.h	2014-07-09
12:00:15.000000000 +0200
@@ -50,7 +50,8 @@ struct cpuidle_state {
 			int index);
 
 	int (*enter_dead) (struct cpuidle_device *dev, int index);
-};
+} __do_const;
+typedef struct cpuidle_state __no_const cpuidle_state_no_const;
 
 /* Idle State Flags */
 #define CPUIDLE_FLAG_TIME_VALID	(0x01) /* is residency time measurable? */
@@ -192,7 +193,7 @@ struct cpuidle_governor {
 	void (*reflect)		(struct cpuidle_device *dev, int index);
 
 	struct module 		*owner;
-};
+} __do_const;
 
 #ifdef CONFIG_CPU_IDLE
 extern int cpuidle_register_governor(struct cpuidle_governor *gov);
diff -ruNp linux-3.13.11/include/linux/cpumask.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/cpumask.h
--- linux-3.13.11/include/linux/cpumask.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/cpumask.h	2014-07-09
12:00:15.000000000 +0200
@@ -118,17 +118,17 @@ static inline unsigned int cpumask_first
 }
 
 /* Valid inputs for n are -1 and 0. */
-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct
cpumask *srcp)
 {
 	return n+1;
 }
 
-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const
struct cpumask *srcp)
 {
 	return n+1;
 }
 
-static inline unsigned int cpumask_next_and(int n,
+static inline unsigned int __intentional_overflow(-1) cpumask_next_and(int n,
 					    const struct cpumask *srcp,
 					    const struct cpumask *andp)
 {
@@ -167,7 +167,7 @@ static inline unsigned int cpumask_first
  *
  * Returns >= nr_cpu_ids if no further cpus set.
  */
-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct
cpumask *srcp)
 {
 	/* -1 is a legal arg here. */
 	if (n != -1)
@@ -182,7 +182,7 @@ static inline unsigned int cpumask_next(
  *
  * Returns >= nr_cpu_ids if no further cpus unset.
  */
-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const
struct cpumask *srcp)
 {
 	/* -1 is a legal arg here. */
 	if (n != -1)
@@ -190,7 +190,7 @@ static inline unsigned int cpumask_next_
 	return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
 }
 
-int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
+int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *) __intentional_overflow(-1);
 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
 
 /**
diff -ruNp linux-3.13.11/include/linux/cred.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/cred.h
--- linux-3.13.11/include/linux/cred.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/cred.h	2014-07-09 12:00:15.000000000
+0200
@@ -35,7 +35,7 @@ struct group_info {
 	int		nblocks;
 	kgid_t		small_block[NGROUPS_SMALL];
 	kgid_t		*blocks[0];
-};
+} __randomize_layout;
 
 /**
  * get_group_info - Get a reference to a group info structure
@@ -136,13 +136,14 @@ struct cred {
 	struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to.
*/
 	struct group_info *group_info;	/* supplementary groups for euid/fsgid */
 	struct rcu_head	rcu;		/* RCU deletion hook */
-};
+} __randomize_layout;
 
 extern void __put_cred(struct cred *);
 extern void exit_creds(struct task_struct *);
 extern int copy_creds(struct task_struct *, unsigned long);
 extern const struct cred *get_task_cred(struct task_struct *);
 extern struct cred *cred_alloc_blank(void);
+extern struct cred *__prepare_creds(const struct cred *);
 extern struct cred *prepare_creds(void);
 extern struct cred *prepare_exec_creds(void);
 extern int commit_creds(struct cred *);
@@ -194,7 +195,35 @@ static inline void validate_creds_for_do
 static inline void validate_process_creds(void)
 {
 }
+static inline void validate_task_creds(struct task_struct *task)
+{
+}
+#endif
+
+static inline void set_cred_subscribers(struct cred *cred, int n)
+{
+#ifdef CONFIG_DEBUG_CREDENTIALS
+	atomic_set(&cred->subscribers, n);
 #endif
+}
+
+static inline int read_cred_subscribers(const struct cred *cred)
+{
+#ifdef CONFIG_DEBUG_CREDENTIALS
+	return atomic_read(&cred->subscribers);
+#else
+	return 0;
+#endif
+}
+
+static inline void alter_cred_subscribers(const struct cred *_cred, int n)
+{
+#ifdef CONFIG_DEBUG_CREDENTIALS
+	struct cred *cred = (struct cred *) _cred;
+
+	atomic_add(n, &cred->subscribers);
+#endif
+}
 
 /**
  * get_new_cred - Get a reference on a new set of credentials
diff -ruNp linux-3.13.11/include/linux/crypto.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/crypto.h
--- linux-3.13.11/include/linux/crypto.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/crypto.h	2014-07-09
12:00:15.000000000 +0200
@@ -373,7 +373,7 @@ struct cipher_tfm {
 	                  const u8 *key, unsigned int keylen);
 	void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
 	void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
-};
+} __no_const;
 
 struct hash_tfm {
 	int (*init)(struct hash_desc *desc);
@@ -394,13 +394,13 @@ struct compress_tfm {
 	int (*cot_decompress)(struct crypto_tfm *tfm,
 	                      const u8 *src, unsigned int slen,
 	                      u8 *dst, unsigned int *dlen);
-};
+} __no_const;
 
 struct rng_tfm {
 	int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
 			      unsigned int dlen);
 	int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
-};
+} __no_const;
 
 #define crt_ablkcipher	crt_u.ablkcipher
 #define crt_aead	crt_u.aead
diff -ruNp linux-3.13.11/include/linux/ctype.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/ctype.h
--- linux-3.13.11/include/linux/ctype.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/ctype.h	2014-07-09
12:00:15.000000000 +0200
@@ -56,7 +56,7 @@ static inline unsigned char __toupper(un
  * Fast implementation of tolower() for internal usage. Do not use in your
  * code.
  */
-static inline char _tolower(const char c)
+static inline unsigned char _tolower(const unsigned char c)
 {
 	return c | 0x20;
 }
diff -ruNp linux-3.13.11/include/linux/dcache.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/dcache.h
--- linux-3.13.11/include/linux/dcache.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/dcache.h	2014-07-09
12:00:15.000000000 +0200
@@ -133,7 +133,7 @@ struct dentry {
 	} d_u;
 	struct list_head d_subdirs;	/* our children */
 	struct hlist_node d_alias;	/* inode alias list */
-};
+} __randomize_layout;
 
 /*
  * dentry->d_lock spinlock nesting subclasses:
diff -ruNp linux-3.13.11/include/linux/decompress/mm.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/decompress/mm.h
--- linux-3.13.11/include/linux/decompress/mm.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/decompress/mm.h	2014-07-09
12:00:15.000000000 +0200
@@ -77,7 +77,7 @@ static void free(void *where)
  * warnings when not needed (indeed large_malloc / large_free are not
  * needed by inflate */
 
-#define malloc(a) kmalloc(a, GFP_KERNEL)
+#define malloc(a) kmalloc((a), GFP_KERNEL)
 #define free(a) kfree(a)
 
 #define large_malloc(a) vmalloc(a)
diff -ruNp linux-3.13.11/include/linux/devfreq.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/devfreq.h
--- linux-3.13.11/include/linux/devfreq.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/devfreq.h	2014-07-09
12:00:15.000000000 +0200
@@ -114,7 +114,7 @@ struct devfreq_governor {
 	int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
 	int (*event_handler)(struct devfreq *devfreq,
 				unsigned int event, void *data);
-};
+} __do_const;
 
 /**
  * struct devfreq - Device devfreq structure
diff -ruNp linux-3.13.11/include/linux/device.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/device.h
--- linux-3.13.11/include/linux/device.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/device.h	2014-07-09
12:00:15.000000000 +0200
@@ -310,7 +310,7 @@ struct subsys_interface {
 	struct list_head node;
 	int (*add_dev)(struct device *dev, struct subsys_interface *sif);
 	int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
-};
+} __do_const;
 
 int subsys_interface_register(struct subsys_interface *sif);
 void subsys_interface_unregister(struct subsys_interface *sif);
@@ -506,7 +506,7 @@ struct device_type {
 	void (*release)(struct device *dev);
 
 	const struct dev_pm_ops *pm;
-};
+} __do_const;
 
 /* interface for exporting device attributes */
 struct device_attribute {
@@ -516,11 +516,12 @@ struct device_attribute {
 	ssize_t (*store)(struct device *dev, struct device_attribute *attr,
 			 const char *buf, size_t count);
 };
+typedef struct device_attribute __no_const device_attribute_no_const;
 
 struct dev_ext_attribute {
 	struct device_attribute attr;
 	void *var;
-};
+} __do_const;
 
 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
 			  char *buf);
diff -ruNp linux-3.13.11/include/linux/devpts_fs.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/devpts_fs.h
--- linux-3.13.11/include/linux/devpts_fs.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/devpts_fs.h	2014-07-09
12:00:15.000000000 +0200
@@ -45,5 +45,4 @@ static inline void devpts_pty_kill(struc
 
 #endif
 
-
 #endif /* _LINUX_DEVPTS_FS_H */
diff -ruNp linux-3.13.11/include/linux/dma-mapping.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/dma-mapping.h
--- linux-3.13.11/include/linux/dma-mapping.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/dma-mapping.h	2014-07-09
12:00:15.000000000 +0200
@@ -54,7 +54,7 @@ struct dma_map_ops {
 	u64 (*get_required_mask)(struct device *dev);
 #endif
 	int is_phys;
-};
+} __do_const;
 
 #define DMA_BIT_MASK(n)	(((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
 
diff -ruNp linux-3.13.11/include/linux/dmaengine.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/dmaengine.h
--- linux-3.13.11/include/linux/dmaengine.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/dmaengine.h	2014-07-09
12:00:15.000000000 +0200
@@ -1114,9 +1114,9 @@ struct dma_pinned_list {
 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
 
-dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
+dma_cookie_t __intentional_overflow(0) dma_memcpy_to_iovec(struct dma_chan *chan, struct
iovec *iov,
 	struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
-dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
+dma_cookie_t __intentional_overflow(0) dma_memcpy_pg_to_iovec(struct dma_chan *chan,
struct iovec *iov,
 	struct dma_pinned_list *pinned_list, struct page *page,
 	unsigned int offset, size_t len);
 
diff -ruNp linux-3.13.11/include/linux/efi.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/efi.h
--- linux-3.13.11/include/linux/efi.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/efi.h	2014-07-09 12:00:15.000000000
+0200
@@ -764,6 +764,7 @@ struct efivar_operations {
 	efi_set_variable_t *set_variable;
 	efi_query_variable_store_t *query_variable_store;
 };
+typedef struct efivar_operations __no_const efivar_operations_no_const;
 
 struct efivars {
 	/*
diff -ruNp linux-3.13.11/include/linux/elf.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/elf.h
--- linux-3.13.11/include/linux/elf.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/elf.h	2014-07-09 12:00:15.000000000
+0200
@@ -24,6 +24,7 @@ extern Elf32_Dyn _DYNAMIC [];
 #define elf_note	elf32_note
 #define elf_addr_t	Elf32_Off
 #define Elf_Half	Elf32_Half
+#define elf_dyn		Elf32_Dyn
 
 #else
 
@@ -34,6 +35,7 @@ extern Elf64_Dyn _DYNAMIC [];
 #define elf_note	elf64_note
 #define elf_addr_t	Elf64_Off
 #define Elf_Half	Elf64_Half
+#define elf_dyn		Elf64_Dyn
 
 #endif
 
diff -ruNp linux-3.13.11/include/linux/err.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/err.h
--- linux-3.13.11/include/linux/err.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/err.h	2014-07-09 12:00:15.000000000
+0200
@@ -19,12 +19,12 @@
 
 #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
 
-static inline void * __must_check ERR_PTR(long error)
+static inline void * __must_check __intentional_overflow(-1) ERR_PTR(long error)
 {
 	return (void *) error;
 }
 
-static inline long __must_check PTR_ERR(__force const void *ptr)
+static inline long __must_check __intentional_overflow(-1) PTR_ERR(__force const void
*ptr)
 {
 	return (long) ptr;
 }
diff -ruNp linux-3.13.11/include/linux/extcon.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/extcon.h
--- linux-3.13.11/include/linux/extcon.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/extcon.h	2014-07-09
12:00:15.000000000 +0200
@@ -135,7 +135,7 @@ struct extcon_dev {
 	/* /sys/class/extcon/.../mutually_exclusive/... */
 	struct attribute_group attr_g_muex;
 	struct attribute **attrs_muex;
-	struct device_attribute *d_attrs_muex;
+	device_attribute_no_const *d_attrs_muex;
 };
 
 /**
diff -ruNp linux-3.13.11/include/linux/fb.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/fb.h
--- linux-3.13.11/include/linux/fb.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/fb.h	2014-07-09 12:00:15.000000000
+0200
@@ -304,7 +304,7 @@ struct fb_ops {
 	/* called at KDB enter and leave time to prepare the console */
 	int (*fb_debug_enter)(struct fb_info *info);
 	int (*fb_debug_leave)(struct fb_info *info);
-};
+} __do_const;
 
 #ifdef CONFIG_FB_TILEBLITTING
 #define FB_TILE_CURSOR_NONE        0
diff -ruNp linux-3.13.11/include/linux/fdtable.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/fdtable.h
--- linux-3.13.11/include/linux/fdtable.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/fdtable.h	2014-07-09
12:00:15.000000000 +0200
@@ -95,7 +95,7 @@ struct files_struct *get_files_struct(st
 void put_files_struct(struct files_struct *fs);
 void reset_files_struct(struct files_struct *);
 int unshare_files(struct files_struct **);
-struct files_struct *dup_fd(struct files_struct *, int *);
+struct files_struct *dup_fd(struct files_struct *, int *) __latent_entropy;
 void do_close_on_exec(struct files_struct *);
 int iterate_fd(struct files_struct *, unsigned,
 		int (*)(const void *, struct file *, unsigned),
diff -ruNp linux-3.13.11/include/linux/frontswap.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/frontswap.h
--- linux-3.13.11/include/linux/frontswap.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/frontswap.h	2014-07-09
12:00:15.000000000 +0200
@@ -11,7 +11,7 @@ struct frontswap_ops {
 	int (*load)(unsigned, pgoff_t, struct page *);
 	void (*invalidate_page)(unsigned, pgoff_t);
 	void (*invalidate_area)(unsigned);
-};
+} __no_const;
 
 extern bool frontswap_enabled;
 extern struct frontswap_ops *
diff -ruNp linux-3.13.11/include/linux/fs.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/fs.h
--- linux-3.13.11/include/linux/fs.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/fs.h	2014-07-09 12:00:15.000000000
+0200
@@ -213,6 +213,7 @@ typedef void (dio_iodone_t)(struct kiocb
 #define ATTR_KILL_PRIV	(1 << 14)
 #define ATTR_OPEN	(1 << 15) /* Truncating from open(O_TRUNC) */
 #define ATTR_TIMES_SET	(1 << 16)
+#define ATTR_TAG	(1 << 17)
 
 /*
  * This is the Inode Attributes structure, used for notify_change().  It
@@ -228,6 +229,7 @@ struct iattr {
 	umode_t		ia_mode;
 	kuid_t		ia_uid;
 	kgid_t		ia_gid;
+	ktag_t		ia_tag;
 	loff_t		ia_size;
 	struct timespec	ia_atime;
 	struct timespec	ia_mtime;
@@ -423,7 +425,7 @@ struct address_space {
 	spinlock_t		private_lock;	/* for use by the address_space */
 	struct list_head	private_list;	/* ditto */
 	void			*private_data;	/* ditto */
-} __attribute__((aligned(sizeof(long))));
+} __attribute__((aligned(sizeof(long)))) __randomize_layout;
 	/*
 	 * On most architectures that alignment is already the case; but
 	 * must be enforced here for CRIS, to let the least significant bit
@@ -466,7 +468,7 @@ struct block_device {
 	int			bd_fsfreeze_count;
 	/* Mutex for freeze */
 	struct mutex		bd_fsfreeze_mutex;
-};
+} __randomize_layout;
 
 /*
  * Radix-tree tags, for tagging dirty and writeback pages within the pagecache
@@ -526,7 +528,9 @@ struct inode {
 	unsigned short		i_opflags;
 	kuid_t			i_uid;
 	kgid_t			i_gid;
-	unsigned int		i_flags;
+	ktag_t			i_tag;
+	unsigned short		i_flags;
+	unsigned short		i_vflags;
 
 #ifdef CONFIG_FS_POSIX_ACL
 	struct posix_acl	*i_acl;
@@ -555,6 +559,7 @@ struct inode {
 		unsigned int __i_nlink;
 	};
 	dev_t			i_rdev;
+	dev_t			i_mdev;
 	loff_t			i_size;
 	struct timespec		i_atime;
 	struct timespec		i_mtime;
@@ -610,7 +615,7 @@ struct inode {
 	atomic_t		i_readcount; /* struct files open RO */
 #endif
 	void			*i_private; /* fs or device private pointer */
-};
+} __randomize_layout;
 
 static inline int inode_unhashed(struct inode *inode)
 {
@@ -713,6 +718,11 @@ static inline gid_t i_gid_read(const str
 	return from_kgid(&init_user_ns, inode->i_gid);
 }
 
+static inline vtag_t i_tag_read(const struct inode *inode)
+{
+	return from_ktag(&init_user_ns, inode->i_tag);
+}
+
 static inline void i_uid_write(struct inode *inode, uid_t uid)
 {
 	inode->i_uid = make_kuid(&init_user_ns, uid);
@@ -723,14 +733,19 @@ static inline void i_gid_write(struct in
 	inode->i_gid = make_kgid(&init_user_ns, gid);
 }
 
+static inline void i_tag_write(struct inode *inode, vtag_t tag)
+{
+	inode->i_tag = make_ktag(&init_user_ns, tag);
+}
+
 static inline unsigned iminor(const struct inode *inode)
 {
-	return MINOR(inode->i_rdev);
+	return MINOR(inode->i_mdev);
 }
 
 static inline unsigned imajor(const struct inode *inode)
 {
-	return MAJOR(inode->i_rdev);
+	return MAJOR(inode->i_mdev);
 }
 
 extern struct block_device *I_BDEV(struct inode *inode);
@@ -790,6 +805,7 @@ struct file {
 	loff_t			f_pos;
 	struct fown_struct	f_owner;
 	const struct cred	*f_cred;
+	vxid_t			f_xid;
 	struct file_ra_state	f_ra;
 
 	u64			f_version;
@@ -808,7 +824,7 @@ struct file {
 #ifdef CONFIG_DEBUG_WRITECOUNT
 	unsigned long f_mnt_write_state;
 #endif
-};
+} __randomize_layout;
 
 struct file_handle {
 	__u32 handle_bytes;
@@ -962,6 +978,7 @@ struct file_lock {
 	struct file *fl_file;
 	loff_t fl_start;
 	loff_t fl_end;
+	vxid_t fl_xid;
 
 	struct fasync_struct *	fl_fasync; /* for lease break notifications */
 	/* for lease breaks: */
@@ -978,7 +995,7 @@ struct file_lock {
 			int state;		/* state of grant or error if -ve */
 		} afs;
 	} fl_u;
-};
+} __randomize_layout;
 
 /* The following constant reflects the upper bound of the file/locking space */
 #ifndef OFFSET_MAX
@@ -1325,7 +1342,7 @@ struct super_block {
 	struct list_lru		s_dentry_lru ____cacheline_aligned_in_smp;
 	struct list_lru		s_inode_lru ____cacheline_aligned_in_smp;
 	struct rcu_head		rcu;
-};
+} __randomize_layout;
 
 extern struct timespec current_fs_time(struct super_block *sb);
 
@@ -1547,7 +1564,8 @@ struct file_operations {
 	long (*fallocate)(struct file *file, int mode, loff_t offset,
 			  loff_t len);
 	int (*show_fdinfo)(struct seq_file *m, struct file *f);
-};
+} __do_const __randomize_layout;
+typedef struct file_operations __no_const file_operations_no_const;
 
 struct inode_operations {
 	struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
@@ -1573,6 +1591,7 @@ struct inode_operations {
 	ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t);
 	ssize_t (*listxattr) (struct dentry *, char *, size_t);
 	int (*removexattr) (struct dentry *, const char *);
+	int (*sync_flags) (struct inode *, int, int);
 	int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start,
 		      u64 len);
 	int (*update_time)(struct inode *, struct timespec *, int);
@@ -1586,6 +1605,7 @@ ssize_t rw_copy_check_uvector(int type,
 			      unsigned long nr_segs, unsigned long fast_segs,
 			      struct iovec *fast_pointer,
 			      struct iovec **ret_pointer);
+ssize_t vfs_sendfile(struct file *, struct file *, loff_t *, size_t, loff_t);
 
 extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *);
 extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *);
@@ -1639,6 +1659,14 @@ struct super_operations {
 #define S_IMA		1024	/* Inode has an associated IMA struct */
 #define S_AUTOMOUNT	2048	/* Automount/referral quasi-directory */
 #define S_NOSEC		4096	/* no suid or xattr security attributes */
+#define S_IXUNLINK	8192	/* Immutable Invert on unlink */
+
+/* Linux-VServer related Inode flags */
+
+#define V_VALID		1
+#define V_XATTR		2
+#define V_BARRIER	4	/* Barrier for chroot() */
+#define V_COW		8	/* Copy on Write */
 
 /*
  * Note that nosuid etc flags are inode-specific: setting some file-system
@@ -1663,10 +1691,13 @@ struct super_operations {
 #define IS_MANDLOCK(inode)	__IS_FLG(inode, MS_MANDLOCK)
 #define IS_NOATIME(inode)	__IS_FLG(inode, MS_RDONLY|MS_NOATIME)
 #define IS_I_VERSION(inode)	__IS_FLG(inode, MS_I_VERSION)
+#define IS_TAGGED(inode)	__IS_FLG(inode, MS_TAGGED)
 
 #define IS_NOQUOTA(inode)	((inode)->i_flags & S_NOQUOTA)
 #define IS_APPEND(inode)	((inode)->i_flags & S_APPEND)
 #define IS_IMMUTABLE(inode)	((inode)->i_flags & S_IMMUTABLE)
+#define IS_IXUNLINK(inode)	((inode)->i_flags & S_IXUNLINK)
+#define IS_IXORUNLINK(inode)	((IS_IXUNLINK(inode) ? S_IMMUTABLE : 0) ^ IS_IMMUTABLE(inode))
 #define IS_POSIXACL(inode)	__IS_FLG(inode, MS_POSIXACL)
 
 #define IS_DEADDIR(inode)	((inode)->i_flags & S_DEAD)
@@ -1677,6 +1708,16 @@ struct super_operations {
 #define IS_AUTOMOUNT(inode)	((inode)->i_flags & S_AUTOMOUNT)
 #define IS_NOSEC(inode)		((inode)->i_flags & S_NOSEC)
 
+#define IS_BARRIER(inode)	(S_ISDIR((inode)->i_mode) && ((inode)->i_vflags & V_BARRIER))
+
+#ifdef CONFIG_VSERVER_COWBL
+#  define IS_COW(inode)		(IS_IXUNLINK(inode) && IS_IMMUTABLE(inode))
+#  define IS_COW_LINK(inode)	(S_ISREG((inode)->i_mode) && ((inode)->i_nlink > 1))
+#else
+#  define IS_COW(inode)		(0)
+#  define IS_COW_LINK(inode)	(0)
+#endif
+
 /*
  * Inode state bits.  Protected by inode->i_lock
  *
@@ -1920,6 +1961,9 @@ extern struct kobject *fs_kobj;
 extern int locks_mandatory_locked(struct inode *);
 extern int locks_mandatory_area(int, struct inode *, struct file *, loff_t, size_t);
 
+#define ATTR_FLAG_BARRIER	512	/* Barrier for chroot() */
+#define ATTR_FLAG_IXUNLINK	1024	/* Immutable invert on unlink */
+
 /*
  * Candidates for mandatory locking have the setgid bit set
  * but no group execute bit -  an otherwise meaningless combination.
@@ -2605,6 +2649,7 @@ extern int dcache_dir_open(struct inode
 extern int dcache_dir_close(struct inode *, struct file *);
 extern loff_t dcache_dir_lseek(struct file *, loff_t, int);
 extern int dcache_readdir(struct file *, struct dir_context *);
+extern int dcache_readdir_filter(struct file *, struct dir_context *, int (*)(struct
dentry *));
 extern int simple_setattr(struct dentry *, struct iattr *);
 extern int simple_getattr(struct vfsmount *, struct dentry *, struct kstat *);
 extern int simple_statfs(struct dentry *, struct kstatfs *);
@@ -2808,4 +2853,14 @@ static inline bool dir_relax(struct inod
 	return !IS_DEADDIR(inode);
 }
 
+static inline bool is_sidechannel_device(const struct inode *inode)
+{
+#ifdef CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL
+	umode_t mode = inode->i_mode;
+	return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
+#else
+	return false;
+#endif
+}
+
 #endif /* _LINUX_FS_H */
diff -ruNp linux-3.13.11/include/linux/fs_struct.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/fs_struct.h
--- linux-3.13.11/include/linux/fs_struct.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/fs_struct.h	2014-07-09
12:00:15.000000000 +0200
@@ -6,13 +6,13 @@
 #include <linux/seqlock.h>
 
 struct fs_struct {
-	int users;
+	atomic_t users;
 	spinlock_t lock;
 	seqcount_t seq;
 	int umask;
 	int in_exec;
 	struct path root, pwd;
-};
+} __randomize_layout;
 
 extern struct kmem_cache *fs_cachep;
 
diff -ruNp linux-3.13.11/include/linux/fscache-cache.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/fscache-cache.h
--- linux-3.13.11/include/linux/fscache-cache.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/fscache-cache.h	2014-07-09
12:00:15.000000000 +0200
@@ -113,7 +113,7 @@ struct fscache_operation {
 	fscache_operation_release_t release;
 };
 
-extern atomic_t fscache_op_debug_id;
+extern atomic_unchecked_t fscache_op_debug_id;
 extern void fscache_op_work_func(struct work_struct *work);
 
 extern void fscache_enqueue_operation(struct fscache_operation *);
@@ -135,7 +135,7 @@ static inline void fscache_operation_ini
 	INIT_WORK(&op->work, fscache_op_work_func);
 	atomic_set(&op->usage, 1);
 	op->state = FSCACHE_OP_ST_INITIALISED;
-	op->debug_id = atomic_inc_return(&fscache_op_debug_id);
+	op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
 	op->processor = processor;
 	op->release = release;
 	INIT_LIST_HEAD(&op->pend_link);
diff -ruNp linux-3.13.11/include/linux/fscache.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/fscache.h
--- linux-3.13.11/include/linux/fscache.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/fscache.h	2014-07-09
12:00:15.000000000 +0200
@@ -152,7 +152,7 @@ struct fscache_cookie_def {
 	 * - this is mandatory for any object that may have data
 	 */
 	void (*now_uncached)(void *cookie_netfs_data);
-};
+} __do_const;
 
 /*
  * fscache cached network filesystem type
diff -ruNp linux-3.13.11/include/linux/fsnotify.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/fsnotify.h
--- linux-3.13.11/include/linux/fsnotify.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/fsnotify.h	2014-07-09
12:00:15.000000000 +0200
@@ -195,6 +195,9 @@ static inline void fsnotify_access(struc
 	struct inode *inode = file_inode(file);
 	__u32 mask = FS_ACCESS;
 
+	if (is_sidechannel_device(inode))
+		return;
+
 	if (S_ISDIR(inode->i_mode))
 		mask |= FS_ISDIR;
 
@@ -213,6 +216,9 @@ static inline void fsnotify_modify(struc
 	struct inode *inode = file_inode(file);
 	__u32 mask = FS_MODIFY;
 
+	if (is_sidechannel_device(inode))
+		return;
+
 	if (S_ISDIR(inode->i_mode))
 		mask |= FS_ISDIR;
 
@@ -315,7 +321,7 @@ static inline void fsnotify_change(struc
  */
 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
 {
-	return kstrdup(name, GFP_KERNEL);
+	return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
 }
 
 /*
diff -ruNp linux-3.13.11/include/linux/genhd.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/genhd.h
--- linux-3.13.11/include/linux/genhd.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/genhd.h	2014-07-09
12:00:15.000000000 +0200
@@ -194,7 +194,7 @@ struct gendisk {
 	struct kobject *slave_dir;
 
 	struct timer_rand_state *random;
-	atomic_t sync_io;		/* RAID */
+	atomic_unchecked_t sync_io;	/* RAID */
 	struct disk_events *ev;
 #ifdef  CONFIG_BLK_DEV_INTEGRITY
 	struct blk_integrity *integrity;
@@ -435,7 +435,7 @@ extern void disk_flush_events(struct gen
 extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask);
 
 /* drivers/char/random.c */
-extern void add_disk_randomness(struct gendisk *disk);
+extern void add_disk_randomness(struct gendisk *disk) __latent_entropy;
 extern void rand_initialize_disk(struct gendisk *disk);
 
 static inline sector_t get_start_sect(struct block_device *bdev)
diff -ruNp linux-3.13.11/include/linux/genl_magic_func.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/genl_magic_func.h
--- linux-3.13.11/include/linux/genl_magic_func.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/genl_magic_func.h	2014-07-09
12:00:15.000000000 +0200
@@ -246,7 +246,7 @@ const char *CONCAT_(GENL_MAGIC_FAMILY, _
 },
 
 #define ZZZ_genl_ops		CONCAT_(GENL_MAGIC_FAMILY, _genl_ops)
-static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
+static struct genl_ops ZZZ_genl_ops[] = {
 #include GENL_MAGIC_INCLUDE_FILE
 };
 
diff -ruNp linux-3.13.11/include/linux/gfp.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/gfp.h
--- linux-3.13.11/include/linux/gfp.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/gfp.h	2014-07-09 12:00:15.000000000
+0200
@@ -35,6 +35,13 @@ struct vm_area_struct;
 #define ___GFP_NO_KSWAPD	0x400000u
 #define ___GFP_OTHER_NODE	0x800000u
 #define ___GFP_WRITE		0x1000000u
+
+#ifdef CONFIG_PAX_USERCOPY_SLABS
+#define ___GFP_USERCOPY		0x2000000u
+#else
+#define ___GFP_USERCOPY		0
+#endif
+
 /* If the above are modified, __GFP_BITS_SHIFT may need updating */
 
 /*
@@ -92,6 +99,7 @@ struct vm_area_struct;
 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node
*/
 #define __GFP_KMEMCG	((__force gfp_t)___GFP_KMEMCG) /* Allocation comes from a memcg-accounted
resource */
 #define __GFP_WRITE	((__force gfp_t)___GFP_WRITE)	/* Allocator intends to dirty page
*/
+#define __GFP_USERCOPY	((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy
page to/from userland */
 
 /*
  * This may seem redundant, but it's a way of annotating false positives vs.
@@ -99,7 +107,7 @@ struct vm_area_struct;
  */
 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
 
-#define __GFP_BITS_SHIFT 25	/* Room for N __GFP_FOO bits */
+#define __GFP_BITS_SHIFT 26	/* Room for N __GFP_FOO bits */
 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
 
 /* This equals 0, but use constants in case they ever change */
@@ -153,6 +161,8 @@ struct vm_area_struct;
 /* 4GB DMA on some platforms */
 #define GFP_DMA32	__GFP_DMA32
 
+#define GFP_USERCOPY	__GFP_USERCOPY
+
 /* Convert GFP flags to their corresponding migrate type */
 static inline int allocflags_to_migratetype(gfp_t gfp_flags)
 {
diff -ruNp linux-3.13.11/include/linux/gracl.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/gracl.h
--- linux-3.13.11/include/linux/gracl.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/gracl.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,340 @@
+#ifndef GR_ACL_H
+#define GR_ACL_H
+
+#include <linux/grdefs.h>
+#include <linux/resource.h>
+#include <linux/capability.h>
+#include <linux/dcache.h>
+#include <asm/resource.h>
+
+/* Major status information */
+
+#define GR_VERSION  "grsecurity 3.0"
+#define GRSECURITY_VERSION 0x3000
+
+enum {
+	GR_SHUTDOWN = 0,
+	GR_ENABLE = 1,
+	GR_SPROLE = 2,
+	GR_OLDRELOAD = 3,
+	GR_SEGVMOD = 4,
+	GR_STATUS = 5,
+	GR_UNSPROLE = 6,
+	GR_PASSSET = 7,
+	GR_SPROLEPAM = 8,
+	GR_RELOAD = 9,
+};
+
+/* Password setup definitions
+ * kernel/grhash.c */
+enum {
+	GR_PW_LEN = 128,
+	GR_SALT_LEN = 16,
+	GR_SHA_LEN = 32,
+};
+
+enum {
+	GR_SPROLE_LEN = 64,
+};
+
+enum {
+	GR_NO_GLOB = 0,
+	GR_REG_GLOB,
+	GR_CREATE_GLOB
+};
+
+#define GR_NLIMITS 32
+
+/* Begin Data Structures */
+
+struct sprole_pw {
+	unsigned char *rolename;
+	unsigned char salt[GR_SALT_LEN];
+	unsigned char sum[GR_SHA_LEN];	/* 256-bit SHA hash of the password */
+};
+
+struct name_entry {
+	__u32 key;
+	ino_t inode;
+	dev_t device;
+	char *name;
+	__u16 len;
+	__u8 deleted;
+	struct name_entry *prev;
+	struct name_entry *next;
+};
+
+struct inodev_entry {
+	struct name_entry *nentry;
+	struct inodev_entry *prev;
+	struct inodev_entry *next;
+};
+
+struct acl_role_db {
+	struct acl_role_label **r_hash;
+	__u32 r_size;
+};
+
+struct inodev_db {
+	struct inodev_entry **i_hash;
+	__u32 i_size;
+};
+
+struct name_db {
+	struct name_entry **n_hash;
+	__u32 n_size;
+};
+
+struct crash_uid {
+	uid_t uid;
+	unsigned long expires;
+};
+
+struct gr_hash_struct {
+	void **table;
+	void **nametable;
+	void *first;
+	__u32 table_size;
+	__u32 used_size;
+	int type;
+};
+
+/* Userspace Grsecurity ACL data structures */
+
+struct acl_subject_label {
+	char *filename;
+	ino_t inode;
+	dev_t device;
+	__u32 mode;
+	kernel_cap_t cap_mask;
+	kernel_cap_t cap_lower;
+	kernel_cap_t cap_invert_audit;
+
+	struct rlimit res[GR_NLIMITS];
+	__u32 resmask;
+
+	__u8 user_trans_type;
+	__u8 group_trans_type;
+	uid_t *user_transitions;
+	gid_t *group_transitions;
+	__u16 user_trans_num;
+	__u16 group_trans_num;
+
+	__u32 sock_families[2];
+	__u32 ip_proto[8];
+	__u32 ip_type;
+	struct acl_ip_label **ips;
+	__u32 ip_num;
+	__u32 inaddr_any_override;
+
+	__u32 crashes;
+	unsigned long expires;
+
+	struct acl_subject_label *parent_subject;
+	struct gr_hash_struct *hash;
+	struct acl_subject_label *prev;
+	struct acl_subject_label *next;
+
+	struct acl_object_label **obj_hash;
+	__u32 obj_hash_size;
+	__u16 pax_flags;
+};
+
+struct role_allowed_ip {
+	__u32 addr;
+	__u32 netmask;
+
+	struct role_allowed_ip *prev;
+	struct role_allowed_ip *next;
+};
+
+struct role_transition {
+	char *rolename;
+
+	struct role_transition *prev;
+	struct role_transition *next;
+};
+
+struct acl_role_label {
+	char *rolename;
+	uid_t uidgid;
+	__u16 roletype;
+
+	__u16 auth_attempts;
+	unsigned long expires;
+
+	struct acl_subject_label *root_label;
+	struct gr_hash_struct *hash;
+
+	struct acl_role_label *prev;
+	struct acl_role_label *next;
+
+	struct role_transition *transitions;
+	struct role_allowed_ip *allowed_ips;
+	uid_t *domain_children;
+	__u16 domain_child_num;
+
+	umode_t umask;
+
+	struct acl_subject_label **subj_hash;
+	__u32 subj_hash_size;
+};
+
+struct user_acl_role_db {
+	struct acl_role_label **r_table;
+	__u32 num_pointers;		/* Number of allocations to track */
+	__u32 num_roles;		/* Number of roles */
+	__u32 num_domain_children;	/* Number of domain children */
+	__u32 num_subjects;		/* Number of subjects */
+	__u32 num_objects;		/* Number of objects */
+};
+
+struct acl_object_label {
+	char *filename;
+	ino_t inode;
+	dev_t device;
+	__u32 mode;
+
+	struct acl_subject_label *nested;
+	struct acl_object_label *globbed;
+
+	/* next two structures not used */
+
+	struct acl_object_label *prev;
+	struct acl_object_label *next;
+};
+
+struct acl_ip_label {
+	char *iface;
+	__u32 addr;
+	__u32 netmask;
+	__u16 low, high;
+	__u8 mode;
+	__u32 type;
+	__u32 proto[8];
+
+	/* next two structures not used */
+
+	struct acl_ip_label *prev;
+	struct acl_ip_label *next;
+};
+
+struct gr_arg {
+	struct user_acl_role_db role_db;
+	unsigned char pw[GR_PW_LEN];
+	unsigned char salt[GR_SALT_LEN];
+	unsigned char sum[GR_SHA_LEN];
+	unsigned char sp_role[GR_SPROLE_LEN];
+	struct sprole_pw *sprole_pws;
+	dev_t segv_device;
+	ino_t segv_inode;
+	uid_t segv_uid;
+	__u16 num_sprole_pws;
+	__u16 mode;
+};
+
+struct gr_arg_wrapper {
+	struct gr_arg *arg;
+	__u32 version;
+	__u32 size;
+};
+
+struct subject_map {
+	struct acl_subject_label *user;
+	struct acl_subject_label *kernel;
+	struct subject_map *prev;
+	struct subject_map *next;
+};
+
+struct acl_subj_map_db {
+	struct subject_map **s_hash;
+	__u32 s_size;
+};
+
+struct gr_policy_state {
+	struct sprole_pw **acl_special_roles;
+	__u16 num_sprole_pws;
+	struct acl_role_label *kernel_role;
+	struct acl_role_label *role_list;
+	struct acl_role_label *default_role;
+	struct acl_role_db acl_role_set;
+	struct acl_subj_map_db subj_map_set;
+	struct name_db name_set;
+	struct inodev_db inodev_set;
+};
+
+struct gr_alloc_state {
+	unsigned long alloc_stack_next;
+	unsigned long alloc_stack_size;
+	void **alloc_stack;
+};
+
+struct gr_reload_state {
+	struct gr_policy_state oldpolicy;
+	struct gr_alloc_state oldalloc;
+	struct gr_policy_state newpolicy;
+	struct gr_alloc_state newalloc;
+	struct gr_policy_state *oldpolicy_ptr;
+	struct gr_alloc_state *oldalloc_ptr;
+	unsigned char oldmode;
+};
+
+/* End Data Structures Section */
+
+/* Hash functions generated by empirical testing by Brad Spengler
+   Makes good use of the low bits of the inode.  Generally 0-1 times
+   in loop for successful match.  0-3 for unsuccessful match.
+   Shift/add algorithm with modulus of table size and an XOR*/
+
+static __inline__ unsigned int
+gr_rhash(const uid_t uid, const __u16 type, const unsigned int sz)
+{
+	return ((((uid + type) << (16 + type)) ^ uid) % sz);
+}
+
+ static __inline__ unsigned int
+gr_shash(const struct acl_subject_label *userp, const unsigned int sz)
+{
+	return ((const unsigned long)userp % sz);
+}
+
+static __inline__ unsigned int
+gr_fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
+{
+	return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
+}
+
+static __inline__ unsigned int
+gr_nhash(const char *name, const __u16 len, const unsigned int sz)
+{
+	return full_name_hash((const unsigned char *)name, len) % sz;
+}
+
+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
+	subj = NULL; \
+	iter = 0; \
+	while (iter < role->subj_hash_size) { \
+		if (subj == NULL) \
+			subj = role->subj_hash[iter]; \
+		if (subj == NULL) { \
+			iter++; \
+			continue; \
+		}
+
+#define FOR_EACH_SUBJECT_END(subj,iter) \
+		subj = subj->next; \
+		if (subj == NULL) \
+			iter++; \
+	}
+
+
+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
+	subj = role->hash->first; \
+	while (subj != NULL) {
+
+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
+		subj = subj->next; \
+	}
+
+#endif
+
diff -ruNp linux-3.13.11/include/linux/gracl_compat.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/gracl_compat.h
--- linux-3.13.11/include/linux/gracl_compat.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/gracl_compat.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,156 @@
+#ifndef GR_ACL_COMPAT_H
+#define GR_ACL_COMPAT_H
+
+#include <linux/resource.h>
+#include <asm/resource.h>
+
+struct sprole_pw_compat {
+	compat_uptr_t rolename;
+	unsigned char salt[GR_SALT_LEN];
+	unsigned char sum[GR_SHA_LEN];
+};
+
+struct gr_hash_struct_compat {
+	compat_uptr_t table;
+	compat_uptr_t nametable;
+	compat_uptr_t first;
+	__u32 table_size;
+	__u32 used_size;
+	int type;
+};
+
+struct acl_subject_label_compat {
+	compat_uptr_t filename;
+	compat_ino_t inode;
+	__u32 device;
+	__u32 mode;
+	kernel_cap_t cap_mask;
+	kernel_cap_t cap_lower;
+	kernel_cap_t cap_invert_audit;
+
+	struct compat_rlimit res[GR_NLIMITS];
+	__u32 resmask;
+
+	__u8 user_trans_type;
+	__u8 group_trans_type;
+	compat_uptr_t user_transitions;
+	compat_uptr_t group_transitions;
+	__u16 user_trans_num;
+	__u16 group_trans_num;
+
+	__u32 sock_families[2];
+	__u32 ip_proto[8];
+	__u32 ip_type;
+	compat_uptr_t ips;
+	__u32 ip_num;
+	__u32 inaddr_any_override;
+
+	__u32 crashes;
+	compat_ulong_t expires;
+
+	compat_uptr_t parent_subject;
+	compat_uptr_t hash;
+	compat_uptr_t prev;
+	compat_uptr_t next;
+
+	compat_uptr_t obj_hash;
+	__u32 obj_hash_size;
+	__u16 pax_flags;
+};
+
+struct role_allowed_ip_compat {
+	__u32 addr;
+	__u32 netmask;
+
+	compat_uptr_t prev;
+	compat_uptr_t next;
+};
+
+struct role_transition_compat {
+	compat_uptr_t rolename;
+
+	compat_uptr_t prev;
+	compat_uptr_t next;
+};
+
+struct acl_role_label_compat {
+	compat_uptr_t rolename;
+	uid_t uidgid;
+	__u16 roletype;
+
+	__u16 auth_attempts;
+	compat_ulong_t expires;
+
+	compat_uptr_t root_label;
+	compat_uptr_t hash;
+
+	compat_uptr_t prev;
+	compat_uptr_t next;
+
+	compat_uptr_t transitions;
+	compat_uptr_t allowed_ips;
+	compat_uptr_t domain_children;
+	__u16 domain_child_num;
+
+	umode_t umask;
+
+	compat_uptr_t subj_hash;
+	__u32 subj_hash_size;
+};
+
+struct user_acl_role_db_compat {
+	compat_uptr_t r_table;
+	__u32 num_pointers;
+	__u32 num_roles;
+	__u32 num_domain_children;
+	__u32 num_subjects;
+	__u32 num_objects;
+};
+
+struct acl_object_label_compat {
+	compat_uptr_t filename;
+	compat_ino_t inode;
+	__u32 device;
+	__u32 mode;
+
+	compat_uptr_t nested;
+	compat_uptr_t globbed;
+
+	compat_uptr_t prev;
+	compat_uptr_t next;
+};
+
+struct acl_ip_label_compat {
+	compat_uptr_t iface;
+	__u32 addr;
+	__u32 netmask;
+	__u16 low, high;
+	__u8 mode;
+	__u32 type;
+	__u32 proto[8];
+
+	compat_uptr_t prev;
+	compat_uptr_t next;
+};
+
+struct gr_arg_compat {
+	struct user_acl_role_db_compat role_db;
+	unsigned char pw[GR_PW_LEN];
+	unsigned char salt[GR_SALT_LEN];
+	unsigned char sum[GR_SHA_LEN];
+	unsigned char sp_role[GR_SPROLE_LEN];
+	compat_uptr_t sprole_pws;
+	__u32 segv_device;
+	compat_ino_t segv_inode;
+	uid_t segv_uid;
+	__u16 num_sprole_pws;
+	__u16 mode;
+};
+
+struct gr_arg_wrapper_compat {
+	compat_uptr_t arg;
+	__u32 version;
+	__u32 size;
+};
+
+#endif
diff -ruNp linux-3.13.11/include/linux/gralloc.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/gralloc.h
--- linux-3.13.11/include/linux/gralloc.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/gralloc.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,9 @@
+#ifndef __GRALLOC_H
+#define __GRALLOC_H
+
+void acl_free_all(void);
+int acl_alloc_stack_init(unsigned long size);
+void *acl_alloc(unsigned long len);
+void *acl_alloc_num(unsigned long num, unsigned long len);
+
+#endif
diff -ruNp linux-3.13.11/include/linux/grdefs.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/grdefs.h
--- linux-3.13.11/include/linux/grdefs.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/grdefs.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,140 @@
+#ifndef GRDEFS_H
+#define GRDEFS_H
+
+/* Begin grsecurity status declarations */
+
+enum {
+	GR_READY = 0x01,
+	GR_STATUS_INIT = 0x00	// disabled state
+};
+
+/* Begin  ACL declarations */
+
+/* Role flags */
+
+enum {
+	GR_ROLE_USER = 0x0001,
+	GR_ROLE_GROUP = 0x0002,
+	GR_ROLE_DEFAULT = 0x0004,
+	GR_ROLE_SPECIAL = 0x0008,
+	GR_ROLE_AUTH = 0x0010,
+	GR_ROLE_NOPW = 0x0020,
+	GR_ROLE_GOD = 0x0040,
+	GR_ROLE_LEARN = 0x0080,
+	GR_ROLE_TPE = 0x0100,
+	GR_ROLE_DOMAIN = 0x0200,
+	GR_ROLE_PAM = 0x0400,
+	GR_ROLE_PERSIST = 0x0800
+};
+
+/* ACL Subject and Object mode flags */
+enum {
+	GR_DELETED = 0x80000000
+};
+
+/* ACL Object-only mode flags */
+enum {
+	GR_READ 	= 0x00000001,
+	GR_APPEND 	= 0x00000002,
+	GR_WRITE 	= 0x00000004,
+	GR_EXEC 	= 0x00000008,
+	GR_FIND 	= 0x00000010,
+	GR_INHERIT 	= 0x00000020,
+	GR_SETID 	= 0x00000040,
+	GR_CREATE 	= 0x00000080,
+	GR_DELETE 	= 0x00000100,
+	GR_LINK		= 0x00000200,
+	GR_AUDIT_READ 	= 0x00000400,
+	GR_AUDIT_APPEND = 0x00000800,
+	GR_AUDIT_WRITE 	= 0x00001000,
+	GR_AUDIT_EXEC 	= 0x00002000,
+	GR_AUDIT_FIND 	= 0x00004000,
+	GR_AUDIT_INHERIT= 0x00008000,
+	GR_AUDIT_SETID 	= 0x00010000,
+	GR_AUDIT_CREATE = 0x00020000,
+	GR_AUDIT_DELETE = 0x00040000,
+	GR_AUDIT_LINK	= 0x00080000,
+	GR_PTRACERD 	= 0x00100000,
+	GR_NOPTRACE	= 0x00200000,
+	GR_SUPPRESS 	= 0x00400000,
+	GR_NOLEARN 	= 0x00800000,
+	GR_INIT_TRANSFER= 0x01000000
+};
+
+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC
| \
+		   GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
+		   GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
+
+/* ACL subject-only mode flags */
+enum {
+	GR_KILL 	= 0x00000001,
+	GR_VIEW 	= 0x00000002,
+	GR_PROTECTED 	= 0x00000004,
+	GR_LEARN 	= 0x00000008,
+	GR_OVERRIDE 	= 0x00000010,
+	/* just a placeholder, this mode is only used in userspace */
+	GR_DUMMY 	= 0x00000020,
+	GR_PROTSHM	= 0x00000040,
+	GR_KILLPROC	= 0x00000080,
+	GR_KILLIPPROC	= 0x00000100,
+	/* just a placeholder, this mode is only used in userspace */
+	GR_NOTROJAN	= 0x00000200,
+	GR_PROTPROCFD	= 0x00000400,
+	GR_PROCACCT	= 0x00000800,
+	GR_RELAXPTRACE	= 0x00001000,
+	//GR_NESTED	= 0x00002000,
+	GR_INHERITLEARN	= 0x00004000,
+	GR_PROCFIND	= 0x00008000,
+	GR_POVERRIDE	= 0x00010000,
+	GR_KERNELAUTH	= 0x00020000,
+	GR_ATSECURE	= 0x00040000,
+	GR_SHMEXEC	= 0x00080000
+};
+
+enum {
+	GR_PAX_ENABLE_SEGMEXEC	= 0x0001,
+	GR_PAX_ENABLE_PAGEEXEC	= 0x0002,
+	GR_PAX_ENABLE_MPROTECT	= 0x0004,
+	GR_PAX_ENABLE_RANDMMAP	= 0x0008,
+	GR_PAX_ENABLE_EMUTRAMP	= 0x0010,
+	GR_PAX_DISABLE_SEGMEXEC	= 0x0100,
+	GR_PAX_DISABLE_PAGEEXEC	= 0x0200,
+	GR_PAX_DISABLE_MPROTECT	= 0x0400,
+	GR_PAX_DISABLE_RANDMMAP	= 0x0800,
+	GR_PAX_DISABLE_EMUTRAMP	= 0x1000,
+};
+
+enum {
+	GR_ID_USER	= 0x01,
+	GR_ID_GROUP	= 0x02,
+};
+
+enum {
+	GR_ID_ALLOW	= 0x01,
+	GR_ID_DENY	= 0x02,
+};
+
+#define GR_CRASH_RES	31
+#define GR_UIDTABLE_MAX 500
+
+/* begin resource learning section */
+enum {
+	GR_RLIM_CPU_BUMP = 60,
+	GR_RLIM_FSIZE_BUMP = 50000,
+	GR_RLIM_DATA_BUMP = 10000,
+	GR_RLIM_STACK_BUMP = 1000,
+	GR_RLIM_CORE_BUMP = 10000,
+	GR_RLIM_RSS_BUMP = 500000,
+	GR_RLIM_NPROC_BUMP = 1,
+	GR_RLIM_NOFILE_BUMP = 5,
+	GR_RLIM_MEMLOCK_BUMP = 50000,
+	GR_RLIM_AS_BUMP = 500000,
+	GR_RLIM_LOCKS_BUMP = 2,
+	GR_RLIM_SIGPENDING_BUMP = 5,
+	GR_RLIM_MSGQUEUE_BUMP = 10000,
+	GR_RLIM_NICE_BUMP = 1,
+	GR_RLIM_RTPRIO_BUMP = 1,
+	GR_RLIM_RTTIME_BUMP = 1000000
+};
+
+#endif
diff -ruNp linux-3.13.11/include/linux/grinternal.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/grinternal.h
--- linux-3.13.11/include/linux/grinternal.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/grinternal.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,229 @@
+#ifndef __GRINTERNAL_H
+#define __GRINTERNAL_H
+
+#ifdef CONFIG_GRKERNSEC
+
+#include <linux/fs.h>
+#include <linux/mnt_namespace.h>
+#include <linux/nsproxy.h>
+#include <linux/gracl.h>
+#include <linux/grdefs.h>
+#include <linux/grmsg.h>
+
+void gr_add_learn_entry(const char *fmt, ...)
+	__attribute__ ((format (printf, 1, 2)));
+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
+			    const struct vfsmount *mnt);
+__u32 gr_check_create(const struct dentry *new_dentry,
+			     const struct dentry *parent,
+			     const struct vfsmount *mnt, const __u32 mode);
+int gr_check_protected_task(const struct task_struct *task);
+__u32 to_gr_audit(const __u32 reqmode);
+int gr_set_acls(const int type);
+int gr_acl_is_enabled(void);
+char gr_roletype_to_char(void);
+
+void gr_handle_alertkill(struct task_struct *task);
+char *gr_to_filename(const struct dentry *dentry,
+			    const struct vfsmount *mnt);
+char *gr_to_filename1(const struct dentry *dentry,
+			    const struct vfsmount *mnt);
+char *gr_to_filename2(const struct dentry *dentry,
+			    const struct vfsmount *mnt);
+char *gr_to_filename3(const struct dentry *dentry,
+			    const struct vfsmount *mnt);
+
+extern int grsec_enable_ptrace_readexec;
+extern int grsec_enable_harden_ptrace;
+extern int grsec_enable_link;
+extern int grsec_enable_fifo;
+extern int grsec_enable_execve;
+extern int grsec_enable_shm;
+extern int grsec_enable_execlog;
+extern int grsec_enable_signal;
+extern int grsec_enable_audit_ptrace;
+extern int grsec_enable_forkfail;
+extern int grsec_enable_time;
+extern int grsec_enable_rofs;
+extern int grsec_deny_new_usb;
+extern int grsec_enable_chroot_shmat;
+extern int grsec_enable_chroot_mount;
+extern int grsec_enable_chroot_double;
+extern int grsec_enable_chroot_pivot;
+extern int grsec_enable_chroot_chdir;
+extern int grsec_enable_chroot_chmod;
+extern int grsec_enable_chroot_mknod;
+extern int grsec_enable_chroot_fchdir;
+extern int grsec_enable_chroot_nice;
+extern int grsec_enable_chroot_execlog;
+extern int grsec_enable_chroot_caps;
+extern int grsec_enable_chroot_sysctl;
+extern int grsec_enable_chroot_unix;
+extern int grsec_enable_symlinkown;
+extern kgid_t grsec_symlinkown_gid;
+extern int grsec_enable_tpe;
+extern kgid_t grsec_tpe_gid;
+extern int grsec_enable_tpe_all;
+extern int grsec_enable_tpe_invert;
+extern int grsec_enable_socket_all;
+extern kgid_t grsec_socket_all_gid;
+extern int grsec_enable_socket_client;
+extern kgid_t grsec_socket_client_gid;
+extern int grsec_enable_socket_server;
+extern kgid_t grsec_socket_server_gid;
+extern kgid_t grsec_audit_gid;
+extern int grsec_enable_group;
+extern int grsec_enable_log_rwxmaps;
+extern int grsec_enable_mount;
+extern int grsec_enable_chdir;
+extern int grsec_resource_logging;
+extern int grsec_enable_blackhole;
+extern int grsec_lastack_retries;
+extern int grsec_enable_brute;
+extern int grsec_enable_harden_ipc;
+extern int grsec_lock;
+
+extern spinlock_t grsec_alert_lock;
+extern unsigned long grsec_alert_wtime;
+extern unsigned long grsec_alert_fyet;
+
+extern spinlock_t grsec_audit_lock;
+
+extern rwlock_t grsec_exec_file_lock;
+
+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
+			gr_to_filename2((tsk)->exec_file->f_path.dentry, \
+			(tsk)->exec_file->f_path.mnt) : "/")
+
+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
+			gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
+			(tsk)->real_parent->exec_file->f_path.mnt) : "/")
+
+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
+			gr_to_filename((tsk)->exec_file->f_path.dentry, \
+			(tsk)->exec_file->f_path.mnt) : "/")
+
+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
+			gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
+			(tsk)->real_parent->exec_file->f_path.mnt) : "/")
+
+#define proc_is_chrooted(tsk_a)  ((tsk_a)->gr_is_chrooted)
+
+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
+
+static inline bool gr_is_same_file(const struct file *file1, const struct file *file2)
+{
+	if (file1 && file2) {
+		const struct inode *inode1 = file1->f_path.dentry->d_inode;
+		const struct inode *inode2 = file2->f_path.dentry->d_inode;
+		if (inode1->i_ino == inode2->i_ino && inode1->i_sb->s_dev == inode2->i_sb->s_dev)
+			return true;
+	}
+
+	return false;
+}
+
+#define GR_CHROOT_CAPS {{ \
+	CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
+	CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
+	CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
+	CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
+	CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
+	CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
+	CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
+
+#define security_learn(normal_msg,args...) \
+({ \
+	read_lock(&grsec_exec_file_lock); \
+	gr_add_learn_entry(normal_msg "\n", ## args); \
+	read_unlock(&grsec_exec_file_lock); \
+})
+
+enum {
+	GR_DO_AUDIT,
+	GR_DONT_AUDIT,
+	/* used for non-audit messages that we shouldn't kill the task on */
+	GR_DONT_AUDIT_GOOD
+};
+
+enum {
+	GR_TTYSNIFF,
+	GR_RBAC,
+	GR_RBAC_STR,
+	GR_STR_RBAC,
+	GR_RBAC_MODE2,
+	GR_RBAC_MODE3,
+	GR_FILENAME,
+	GR_SYSCTL_HIDDEN,
+	GR_NOARGS,
+	GR_ONE_INT,
+	GR_ONE_INT_TWO_STR,
+	GR_ONE_STR,
+	GR_STR_INT,
+	GR_TWO_STR_INT,
+	GR_TWO_INT,
+	GR_TWO_U64,
+	GR_THREE_INT,
+	GR_FIVE_INT_TWO_STR,
+	GR_TWO_STR,
+	GR_THREE_STR,
+	GR_FOUR_STR,
+	GR_STR_FILENAME,
+	GR_FILENAME_STR,
+	GR_FILENAME_TWO_INT,
+	GR_FILENAME_TWO_INT_STR,
+	GR_TEXTREL,
+	GR_PTRACE,
+	GR_RESOURCE,
+	GR_CAP,
+	GR_SIG,
+	GR_SIG2,
+	GR_CRASH1,
+	GR_CRASH2,
+	GR_PSACCT,
+	GR_RWXMAP,
+	GR_RWXMAPVMA
+};
+
+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN,
str)
+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg,
GR_RBAC, dentry, mnt)
+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg,
GR_RBAC_STR, dentry, mnt, str)
+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg,
GR_STR_RBAC, str, dentry, mnt)
+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit,
msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit,
msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME,
dentry, mnt)
+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR,
num, str1, str2)
+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT,
str, num)
+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT,
num1, num2)
+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64,
num1, num2)
+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT,
num1, num2, num3)
+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit,
msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR,
str1, str2)
+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT,
str1, str2, num)
+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR,
str1, str2, str3)
+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg,
GR_FOUR_STR, str1, str2, str3, str4)
+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME,
str, dentry, mnt)
+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR,
dentry, mnt, str)
+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg,
GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit,
msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit,
msg, GR_TEXTREL, file, ulong1, ulong2)
+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit,
msg, GR_RESOURCE, task, ulong1, str, ulong2)
+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task,
str)
+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str,
addr)
+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2,
task, num)
+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1,
task, ulong)
+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2,
task, ulong1)
+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7,
num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5,
num6, num7, num8, num9)
+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
+#define gr_log_rwxmap_vma(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAPVMA,
str)
+
+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
+
+#endif
+
+#endif
diff -ruNp linux-3.13.11/include/linux/grmsg.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/grmsg.h
--- linux-3.13.11/include/linux/grmsg.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/grmsg.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,116 @@
+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d]
uid/euid:%u/%u gid/egid:%u/%u"
+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u
run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent
%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
+#define GR_STOPMOD_MSG "denied modification of module state by "
+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
+#define GR_IOPERM_MSG "denied use of ioperm() by "
+#define GR_IOPL_MSG "denied use of iopl() by "
+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by
"
+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of
chroot by "
+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent
%.480s[%.16s:%d] against "
+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning
uid %u from login for %lu seconds"
+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning
execution for %lu seconds"
+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
+#define GR_SETXATTR_ACL_MSG "%s setting extended attribute of %.950s by "
+#define GR_REMOVEXATTR_ACL_MSG "%s removing extended attribute of %.950s by "
+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled.
To disable acls at startup use <kernel image name> gracl=off from your boot loader"
+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by
"
+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system
for "
+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system
for "
+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
+#define GR_FAILFORK_MSG "failed fork with errno %s by "
+#define GR_NICE_CHROOT_MSG "denied priority change by "
+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG
" by "
+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
+#define GR_TIME_MSG "time set by "
+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
+#define GR_BIND_MSG "denied bind() by "
+#define GR_CONNECT_MSG "denied connect() by "
+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s
by "
+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol
%.16s by "
+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
+#define GR_CAP_ACL_MSG "use of %s denied for "
+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against
limit %lu for "
+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
+#define GR_TEXTREL_AUDIT_MSG "denied text relocation in %.950s, VMA:0x%08lx 0x%08lx
by "
+#define GR_PTGNUSTACK_MSG "denied marking stack executable as requested by PT_GNU_STACK
marking in %.950s by "
+#define GR_VM86_MSG "denied use of vm86 by "
+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init
by "
+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed
across exec by "
+#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u
does not match target owner %u, by "
+#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes
or until service restarted, stalling each fork 30 seconds.  Please investigate the crash
report for "
+#define GR_BRUTE_SUID_MSG "bruteforce prevention initiated due to crash of %.950s against
uid %u, banning suid/sgid execs for %u minutes.  Please investigate the crash report
for "
+#define GR_IPC_DENIED_MSG "denied %s of overly-permissive IPC object with creator uid
%u by "
+#define GR_MSRWRITE_MSG "denied write to CPU MSR by "
diff -ruNp linux-3.13.11/include/linux/grsecurity.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/grsecurity.h
--- linux-3.13.11/include/linux/grsecurity.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/grsecurity.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,246 @@
+#ifndef GR_SECURITY_H
+#define GR_SECURITY_H
+#include <linux/fs.h>
+#include <linux/fs_struct.h>
+#include <linux/binfmts.h>
+#include <linux/gracl.h>
+
+/* notify of brain-dead configs */
+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both
be enabled."
+#endif
+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC)
&& !defined(CONFIG_PAX_KERNEXEC)
+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
+#endif
+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK)
&& !defined(CONFIG_PAX_RANDMMAP)
+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
+#endif
+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
+#error "CONFIG_PAX enabled, but no PaX options are enabled."
+#endif
+
+int gr_handle_new_usb(void);
+
+void gr_handle_brute_attach(int dumpable);
+void gr_handle_brute_check(void);
+void gr_handle_kernel_exploit(void);
+
+char gr_roletype_to_char(void);
+
+int gr_acl_enable_at_secure(void);
+
+int gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs);
+int gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs);
+
+void gr_del_task_from_ip_table(struct task_struct *p);
+
+int gr_pid_is_chrooted(struct task_struct *p);
+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
+int gr_handle_chroot_nice(void);
+int gr_handle_chroot_sysctl(const int op);
+int gr_handle_chroot_setpriority(struct task_struct *p,
+					const int niceval);
+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
+int gr_handle_chroot_chroot(const struct dentry *dentry,
+				   const struct vfsmount *mnt);
+void gr_handle_chroot_chdir(const struct path *path);
+int gr_handle_chroot_chmod(const struct dentry *dentry,
+				  const struct vfsmount *mnt, const int mode);
+int gr_handle_chroot_mknod(const struct dentry *dentry,
+				  const struct vfsmount *mnt, const int mode);
+int gr_handle_chroot_mount(const struct dentry *dentry,
+				  const struct vfsmount *mnt,
+				  const char *dev_name);
+int gr_handle_chroot_pivot(void);
+int gr_handle_chroot_unix(const pid_t pid);
+
+int gr_handle_rawio(const struct inode *inode);
+
+void gr_handle_ioperm(void);
+void gr_handle_iopl(void);
+void gr_handle_msr_write(void);
+
+umode_t gr_acl_umask(void);
+
+int gr_tpe_allow(const struct file *file);
+
+void gr_set_chroot_entries(struct task_struct *task, const struct path *path);
+void gr_clear_chroot_entries(struct task_struct *task);
+
+void gr_log_forkfail(const int retval);
+void gr_log_timechange(void);
+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
+void gr_log_chdir(const struct dentry *dentry,
+			 const struct vfsmount *mnt);
+void gr_log_chroot_exec(const struct dentry *dentry,
+			       const struct vfsmount *mnt);
+void gr_log_remount(const char *devname, const int retval);
+void gr_log_unmount(const char *devname, const int retval);
+void gr_log_mount(const char *from, const char *to, const int retval);
+void gr_log_textrel(struct vm_area_struct *vma);
+void gr_log_ptgnustack(struct file *file);
+void gr_log_rwxmmap(struct file *file);
+void gr_log_rwxmprotect(struct vm_area_struct *vma);
+
+int gr_handle_follow_link(const struct inode *parent,
+				 const struct inode *inode,
+				 const struct dentry *dentry,
+				 const struct vfsmount *mnt);
+int gr_handle_fifo(const struct dentry *dentry,
+			  const struct vfsmount *mnt,
+			  const struct dentry *dir, const int flag,
+			  const int acc_mode);
+int gr_handle_hardlink(const struct dentry *dentry,
+			      const struct vfsmount *mnt,
+			      struct inode *inode,
+			      const int mode, const struct filename *to);
+
+int gr_is_capable(const int cap);
+int gr_is_capable_nolog(const int cap);
+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const
int cap);
+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
+
+void gr_copy_label(struct task_struct *tsk);
+void gr_handle_crash(struct task_struct *task, const int sig);
+int gr_handle_signal(const struct task_struct *p, const int sig);
+int gr_check_crash_uid(const kuid_t uid);
+int gr_check_protected_task(const struct task_struct *task);
+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
+int gr_acl_handle_mmap(const struct file *file,
+			      const unsigned long prot);
+int gr_acl_handle_mprotect(const struct file *file,
+				  const unsigned long prot);
+int gr_check_hidden_task(const struct task_struct *tsk);
+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
+				    const struct vfsmount *mnt);
+__u32 gr_acl_handle_utime(const struct dentry *dentry,
+				 const struct vfsmount *mnt);
+__u32 gr_acl_handle_access(const struct dentry *dentry,
+				  const struct vfsmount *mnt, const int fmode);
+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
+				 const struct vfsmount *mnt, umode_t *mode);
+__u32 gr_acl_handle_chown(const struct dentry *dentry,
+				 const struct vfsmount *mnt);
+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
+				 const struct vfsmount *mnt);
+__u32 gr_acl_handle_removexattr(const struct dentry *dentry,
+				 const struct vfsmount *mnt);
+int gr_handle_ptrace(struct task_struct *task, const long request);
+int gr_handle_proc_ptrace(struct task_struct *task);
+__u32 gr_acl_handle_execve(const struct dentry *dentry,
+				  const struct vfsmount *mnt);
+int gr_check_crash_exec(const struct file *filp);
+int gr_acl_is_enabled(void);
+void gr_set_role_label(struct task_struct *task, const kuid_t uid,
+			      const kgid_t gid);
+int gr_set_proc_label(const struct dentry *dentry,
+			const struct vfsmount *mnt,
+			const int unsafe_flags);
+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
+				const struct vfsmount *mnt);
+__u32 gr_acl_handle_open(const struct dentry *dentry,
+				const struct vfsmount *mnt, int acc_mode);
+__u32 gr_acl_handle_creat(const struct dentry *dentry,
+				 const struct dentry *p_dentry,
+				 const struct vfsmount *p_mnt,
+				 int open_flags, int acc_mode, const int imode);
+void gr_handle_create(const struct dentry *dentry,
+			     const struct vfsmount *mnt);
+void gr_handle_proc_create(const struct dentry *dentry,
+			   const struct inode *inode);
+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
+				 const struct dentry *parent_dentry,
+				 const struct vfsmount *parent_mnt,
+				 const int mode);
+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
+				 const struct dentry *parent_dentry,
+				 const struct vfsmount *parent_mnt);
+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
+				 const struct vfsmount *mnt);
+void gr_handle_delete(const ino_t ino, const dev_t dev);
+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
+				  const struct vfsmount *mnt);
+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
+				   const struct dentry *parent_dentry,
+				   const struct vfsmount *parent_mnt,
+				   const struct filename *from);
+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
+				const struct dentry *parent_dentry,
+				const struct vfsmount *parent_mnt,
+				const struct dentry *old_dentry,
+				const struct vfsmount *old_mnt, const struct filename *to);
+int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
+int gr_acl_handle_rename(struct dentry *new_dentry,
+				struct dentry *parent_dentry,
+				const struct vfsmount *parent_mnt,
+				struct dentry *old_dentry,
+				struct inode *old_parent_inode,
+				struct vfsmount *old_mnt, const struct filename *newname);
+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
+				struct dentry *old_dentry,
+				struct dentry *new_dentry,
+				struct vfsmount *mnt, const __u8 replace);
+__u32 gr_check_link(const struct dentry *new_dentry,
+			   const struct dentry *parent_dentry,
+			   const struct vfsmount *parent_mnt,
+			   const struct dentry *old_dentry,
+			   const struct vfsmount *old_mnt);
+int gr_acl_handle_filldir(const struct file *file, const char *name,
+				 const unsigned int namelen, const ino_t ino);
+
+__u32 gr_acl_handle_unix(const struct dentry *dentry,
+				const struct vfsmount *mnt);
+void gr_acl_handle_exit(void);
+void gr_acl_handle_psacct(struct task_struct *task, const long code);
+int gr_acl_handle_procpidmem(const struct task_struct *task);
+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
+void gr_audit_ptrace(struct task_struct *task);
+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
+void gr_put_exec_file(struct task_struct *task);
+
+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
+
+#if defined(CONFIG_GRKERNSEC) && (defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC))
+extern void gr_learn_resource(const struct task_struct *task, const int res,
+			      const unsigned long wanted, const int gt);
+#else
+static inline void gr_learn_resource(const struct task_struct *task, const int res,
+				     const unsigned long wanted, const int gt)
+{
+}
+#endif
+
+#ifdef CONFIG_GRKERNSEC_RESLOG
+extern void gr_log_resource(const struct task_struct *task, const int res,
+				   const unsigned long wanted, const int gt);
+#else
+static inline void gr_log_resource(const struct task_struct *task, const int res,
+				   const unsigned long wanted, const int gt)
+{
+}
+#endif
+
+#ifdef CONFIG_GRKERNSEC
+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
+void gr_handle_vm86(void);
+void gr_handle_mem_readwrite(u64 from, u64 to);
+
+void gr_log_badprocpid(const char *entry);
+
+extern int grsec_enable_dmesg;
+extern int grsec_disable_privio;
+
+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
+extern kgid_t grsec_proc_gid;
+#endif
+
+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
+extern int grsec_enable_chroot_findtask;
+#endif
+#ifdef CONFIG_GRKERNSEC_SETXID
+extern int grsec_enable_setxid;
+#endif
+#endif
+
+#endif
diff -ruNp linux-3.13.11/include/linux/grsock.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/grsock.h
--- linux-3.13.11/include/linux/grsock.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/grsock.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,19 @@
+#ifndef __GRSOCK_H
+#define __GRSOCK_H
+
+extern void gr_attach_curr_ip(const struct sock *sk);
+extern int gr_handle_sock_all(const int family, const int type,
+			      const int protocol);
+extern int gr_handle_sock_server(const struct sockaddr *sck);
+extern int gr_handle_sock_server_other(const struct sock *sck);
+extern int gr_handle_sock_client(const struct sockaddr *sck);
+extern int gr_search_connect(struct socket * sock,
+			     struct sockaddr_in * addr);
+extern int gr_search_bind(struct socket * sock,
+			  struct sockaddr_in * addr);
+extern int gr_search_listen(struct socket * sock);
+extern int gr_search_accept(struct socket * sock);
+extern int gr_search_socket(const int domain, const int type,
+			    const int protocol);
+
+#endif
diff -ruNp linux-3.13.11/include/linux/highmem.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/highmem.h
--- linux-3.13.11/include/linux/highmem.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/highmem.h	2014-07-09
12:00:15.000000000 +0200
@@ -189,6 +189,18 @@ static inline void clear_highpage(struct
 	kunmap_atomic(kaddr);
 }
 
+static inline void sanitize_highpage(struct page *page)
+{
+	void *kaddr;
+	unsigned long flags;
+
+	local_irq_save(flags);
+	kaddr = kmap_atomic(page);
+	clear_page(kaddr);
+	kunmap_atomic(kaddr);
+	local_irq_restore(flags);
+}
+
 static inline void zero_user_segments(struct page *page,
 	unsigned start1, unsigned end1,
 	unsigned start2, unsigned end2)
diff -ruNp linux-3.13.11/include/linux/hwmon-sysfs.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/hwmon-sysfs.h
--- linux-3.13.11/include/linux/hwmon-sysfs.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/hwmon-sysfs.h	2014-07-09
12:00:15.000000000 +0200
@@ -25,7 +25,8 @@
 struct sensor_device_attribute{
 	struct device_attribute dev_attr;
 	int index;
-};
+} __do_const;
+typedef struct sensor_device_attribute __no_const sensor_device_attribute_no_const;
 #define to_sensor_dev_attr(_dev_attr) \
 	container_of(_dev_attr, struct sensor_device_attribute, dev_attr)
 
@@ -41,7 +42,8 @@ struct sensor_device_attribute_2 {
 	struct device_attribute dev_attr;
 	u8 index;
 	u8 nr;
-};
+} __do_const;
+typedef struct sensor_device_attribute_2 __no_const sensor_device_attribute_2_no_const;
 #define to_sensor_dev_attr_2(_dev_attr) \
 	container_of(_dev_attr, struct sensor_device_attribute_2, dev_attr)
 
diff -ruNp linux-3.13.11/include/linux/i2c.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/i2c.h
--- linux-3.13.11/include/linux/i2c.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/i2c.h	2014-07-09 12:00:15.000000000
+0200
@@ -364,6 +364,7 @@ struct i2c_algorithm {
 	/* To determine what the adapter supports */
 	u32 (*functionality) (struct i2c_adapter *);
 };
+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
 
 /**
  * struct i2c_bus_recovery_info - I2C bus recovery information
diff -ruNp linux-3.13.11/include/linux/i2o.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/i2o.h
--- linux-3.13.11/include/linux/i2o.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/i2o.h	2014-07-09 12:00:15.000000000
+0200
@@ -565,7 +565,7 @@ struct i2o_controller {
 	struct i2o_device *exec;	/* Executive */
 #if BITS_PER_LONG == 64
 	spinlock_t context_list_lock;	/* lock for context_list */
-	atomic_t context_list_counter;	/* needed for unique contexts */
+	atomic_unchecked_t context_list_counter;	/* needed for unique contexts */
 	struct list_head context_list;	/* list of context id's
 					   and pointers */
 #endif
diff -ruNp linux-3.13.11/include/linux/if_pppox.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/if_pppox.h
--- linux-3.13.11/include/linux/if_pppox.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/if_pppox.h	2014-07-09
12:00:15.000000000 +0200
@@ -76,7 +76,7 @@ struct pppox_proto {
 	int		(*ioctl)(struct socket *sock, unsigned int cmd,
 				 unsigned long arg);
 	struct module	*owner;
-};
+} __do_const;
 
 extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
 extern void unregister_pppox_proto(int proto_num);
diff -ruNp linux-3.13.11/include/linux/init.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/init.h
--- linux-3.13.11/include/linux/init.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/init.h	2014-07-09 12:00:15.000000000
+0200
@@ -37,9 +37,17 @@
  * section.
  */
 
+#define add_init_latent_entropy __latent_entropy
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+#define add_meminit_latent_entropy
+#else
+#define add_meminit_latent_entropy __latent_entropy
+#endif
+
 /* These are for everybody (although not all archs will actually
    discard it in modules) */
-#define __init		__section(.init.text) __cold notrace
+#define __init		__section(.init.text) __cold notrace add_init_latent_entropy
 #define __initdata	__section(.init.data)
 #define __initconst	__constsection(.init.rodata)
 #define __exitdata	__section(.exit.data)
@@ -100,7 +108,7 @@
 #define __cpuexitconst
 
 /* Used for MEMORY_HOTPLUG */
-#define __meminit        __section(.meminit.text) __cold notrace
+#define __meminit        __section(.meminit.text) __cold notrace add_meminit_latent_entropy
 #define __meminitdata    __section(.meminit.data)
 #define __meminitconst   __constsection(.meminit.rodata)
 #define __memexit        __section(.memexit.text) __exitused __cold notrace
diff -ruNp linux-3.13.11/include/linux/init_task.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/init_task.h
--- linux-3.13.11/include/linux/init_task.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/init_task.h	2014-07-09
12:00:15.000000000 +0200
@@ -154,6 +154,12 @@ extern struct task_group root_task_group
 
 #define INIT_TASK_COMM "swapper"
 
+#ifdef CONFIG_X86
+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
+#else
+#define INIT_TASK_THREAD_INFO
+#endif
+
 /*
  *  INIT_TASK is used to set up the first task table, touch at
  * your own risk!. Base=0, limit=0x1fffff (=2MB)
@@ -193,6 +199,7 @@ extern struct task_group root_task_group
 	RCU_POINTER_INITIALIZER(cred, &init_cred),			\
 	.comm		= INIT_TASK_COMM,				\
 	.thread		= INIT_THREAD,					\
+	INIT_TASK_THREAD_INFO						\
 	.fs		= &init_fs,					\
 	.files		= &init_files,					\
 	.signal		= &init_signals,				\
@@ -222,6 +229,10 @@ extern struct task_group root_task_group
 	INIT_TASK_RCU_PREEMPT(tsk)					\
 	INIT_CPUSET_SEQ(tsk)						\
 	INIT_VTIME(tsk)							\
+	.xid		= 0,						\
+	.vx_info	= NULL,						\
+	.nid		= 0,						\
+	.nx_info	= NULL,						\
 }
 
 
diff -ruNp linux-3.13.11/include/linux/interrupt.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/interrupt.h
--- linux-3.13.11/include/linux/interrupt.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/interrupt.h	2014-07-09
12:00:15.000000000 +0200
@@ -360,7 +360,7 @@ enum
 /* map softirq index to softirq name. update 'softirq_to_name' in
  * kernel/softirq.c when adding a new softirq.
  */
-extern char *softirq_to_name[NR_SOFTIRQS];
+extern const char * const softirq_to_name[NR_SOFTIRQS];
 
 /* softirq mask and active fields moved to irq_cpustat_t in
  * asm/hardirq.h to get better cache usage.  KAO
@@ -368,8 +368,8 @@ extern char *softirq_to_name[NR_SOFTIRQS
 
 struct softirq_action
 {
-	void	(*action)(struct softirq_action *);
-};
+	void	(*action)(void);
+} __no_const;
 
 asmlinkage void do_softirq(void);
 asmlinkage void __do_softirq(void);
@@ -383,7 +383,7 @@ static inline void do_softirq_own_stack(
 }
 #endif
 
-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
+extern void open_softirq(int nr, void (*action)(void));
 extern void softirq_init(void);
 extern void __raise_softirq_irqoff(unsigned int nr);
 
diff -ruNp linux-3.13.11/include/linux/iommu.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/iommu.h
--- linux-3.13.11/include/linux/iommu.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/iommu.h	2014-07-09
12:00:15.000000000 +0200
@@ -130,7 +130,7 @@ struct iommu_ops {
 	u32 (*domain_get_windows)(struct iommu_domain *domain);
 
 	unsigned long pgsize_bitmap;
-};
+} __do_const;
 
 #define IOMMU_GROUP_NOTIFY_ADD_DEVICE		1 /* Device added */
 #define IOMMU_GROUP_NOTIFY_DEL_DEVICE		2 /* Pre Device removed */
diff -ruNp linux-3.13.11/include/linux/ioport.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/ioport.h
--- linux-3.13.11/include/linux/ioport.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/ioport.h	2014-07-09
12:00:15.000000000 +0200
@@ -161,7 +161,7 @@ struct resource *lookup_resource(struct
 int adjust_resource(struct resource *res, resource_size_t start,
 		    resource_size_t size);
 resource_size_t resource_alignment(struct resource *res);
-static inline resource_size_t resource_size(const struct resource *res)
+static inline resource_size_t __intentional_overflow(-1) resource_size(const struct
resource *res)
 {
 	return res->end - res->start + 1;
 }
diff -ruNp linux-3.13.11/include/linux/ipc.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/ipc.h
--- linux-3.13.11/include/linux/ipc.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/ipc.h	2014-07-09 12:00:15.000000000
+0200
@@ -16,6 +16,7 @@ struct kern_ipc_perm
 	key_t		key;
 	kuid_t		uid;
 	kgid_t		gid;
+	vxid_t		xid;
 	kuid_t		cuid;
 	kgid_t		cgid;
 	umode_t		mode; 
diff -ruNp linux-3.13.11/include/linux/ipc_namespace.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/ipc_namespace.h
--- linux-3.13.11/include/linux/ipc_namespace.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/ipc_namespace.h	2014-07-09
12:00:15.000000000 +0200
@@ -70,7 +70,7 @@ struct ipc_namespace {
 	struct user_namespace *user_ns;
 
 	unsigned int	proc_inum;
-};
+} __randomize_layout;
 
 extern struct ipc_namespace init_ipc_ns;
 extern atomic_t nr_ipc_ns;
diff -ruNp linux-3.13.11/include/linux/irq.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/irq.h
--- linux-3.13.11/include/linux/irq.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/irq.h	2014-07-09 12:00:15.000000000
+0200
@@ -338,7 +338,8 @@ struct irq_chip {
 	void		(*irq_print_chip)(struct irq_data *data, struct seq_file *p);
 
 	unsigned long	flags;
-};
+} __do_const;
+typedef struct irq_chip __no_const irq_chip_no_const;
 
 /*
  * irq_chip specific flags
diff -ruNp linux-3.13.11/include/linux/irqchip/arm-gic.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/irqchip/arm-gic.h
--- linux-3.13.11/include/linux/irqchip/arm-gic.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/irqchip/arm-gic.h	2014-07-09
12:00:15.000000000 +0200
@@ -61,9 +61,11 @@
 
 #ifndef __ASSEMBLY__
 
+#include <linux/irq.h>
+
 struct device_node;
 
-extern struct irq_chip gic_arch_extn;
+extern irq_chip_no_const gic_arch_extn;
 
 void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
 		    u32 offset, struct device_node *);
diff -ruNp linux-3.13.11/include/linux/jiffies.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/jiffies.h
--- linux-3.13.11/include/linux/jiffies.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/jiffies.h	2014-07-09
12:00:15.000000000 +0200
@@ -292,14 +292,14 @@ extern unsigned long preset_lpj;
 /*
  * Convert various time units to each other:
  */
-extern unsigned int jiffies_to_msecs(const unsigned long j);
-extern unsigned int jiffies_to_usecs(const unsigned long j);
-extern unsigned long msecs_to_jiffies(const unsigned int m);
-extern unsigned long usecs_to_jiffies(const unsigned int u);
-extern unsigned long timespec_to_jiffies(const struct timespec *value);
+extern unsigned int jiffies_to_msecs(const unsigned long j) __intentional_overflow(-1);
+extern unsigned int jiffies_to_usecs(const unsigned long j) __intentional_overflow(-1);
+extern unsigned long msecs_to_jiffies(const unsigned int m) __intentional_overflow(-1);
+extern unsigned long usecs_to_jiffies(const unsigned int u) __intentional_overflow(-1);
+extern unsigned long timespec_to_jiffies(const struct timespec *value) __intentional_overflow(-1);
 extern void jiffies_to_timespec(const unsigned long jiffies,
 				struct timespec *value);
-extern unsigned long timeval_to_jiffies(const struct timeval *value);
+extern unsigned long timeval_to_jiffies(const struct timeval *value) __intentional_overflow(-1);
 extern void jiffies_to_timeval(const unsigned long jiffies,
 			       struct timeval *value);
 
diff -ruNp linux-3.13.11/include/linux/kallsyms.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/kallsyms.h
--- linux-3.13.11/include/linux/kallsyms.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/kallsyms.h	2014-07-09
12:00:15.000000000 +0200
@@ -15,7 +15,8 @@
 
 struct module;
 
-#ifdef CONFIG_KALLSYMS
+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
 /* Lookup the address for a symbol. Returns 0 if not found. */
 unsigned long kallsyms_lookup_name(const char *name);
 
@@ -106,6 +107,21 @@ static inline int lookup_symbol_attrs(un
 /* Stupid that this does nothing, but I didn't create this mess. */
 #define __print_symbol(fmt, addr)
 #endif /*CONFIG_KALLSYMS*/
+#else /* when included by kallsyms.c, vsnprintf.c, kprobes.c, or
+	arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
+extern unsigned long kallsyms_lookup_name(const char *name);
+extern void __print_symbol(const char *fmt, unsigned long address);
+extern int sprint_backtrace(char *buffer, unsigned long address);
+extern int sprint_symbol(char *buffer, unsigned long address);
+extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
+const char *kallsyms_lookup(unsigned long addr,
+			    unsigned long *symbolsize,
+			    unsigned long *offset,
+			    char **modname, char *namebuf);
+extern int kallsyms_lookup_size_offset(unsigned long addr,
+				  unsigned long *symbolsize,
+				  unsigned long *offset);
+#endif
 
 /* This macro allows us to keep printk typechecking */
 static __printf(1, 2)
diff -ruNp linux-3.13.11/include/linux/key-type.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/key-type.h
--- linux-3.13.11/include/linux/key-type.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/key-type.h	2014-07-09
12:00:15.000000000 +0200
@@ -131,7 +131,7 @@ struct key_type {
 	/* internal fields */
 	struct list_head	link;		/* link in types list */
 	struct lock_class_key	lock_class;	/* key->sem lock class */
-};
+} __do_const;
 
 extern struct key_type key_type_keyring;
 
diff -ruNp linux-3.13.11/include/linux/kgdb.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/kgdb.h
--- linux-3.13.11/include/linux/kgdb.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/kgdb.h	2014-07-09 12:00:15.000000000
+0200
@@ -52,7 +52,7 @@ extern int kgdb_connected;
 extern int kgdb_io_module_registered;
 
 extern atomic_t			kgdb_setting_breakpoint;
-extern atomic_t			kgdb_cpu_doing_single_step;
+extern atomic_unchecked_t	kgdb_cpu_doing_single_step;
 
 extern struct task_struct	*kgdb_usethread;
 extern struct task_struct	*kgdb_contthread;
@@ -254,7 +254,7 @@ struct kgdb_arch {
 	void	(*correct_hw_break)(void);
 
 	void	(*enable_nmi)(bool on);
-};
+} __do_const;
 
 /**
  * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
@@ -279,7 +279,7 @@ struct kgdb_io {
 	void			(*pre_exception) (void);
 	void			(*post_exception) (void);
 	int			is_console;
-};
+} __do_const;
 
 extern struct kgdb_arch		arch_kgdb_ops;
 
diff -ruNp linux-3.13.11/include/linux/kmod.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/kmod.h
--- linux-3.13.11/include/linux/kmod.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/kmod.h	2014-07-09 12:00:15.000000000
+0200
@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysc
  * usually useless though. */
 extern __printf(2, 3)
 int __request_module(bool wait, const char *name, ...);
+extern __printf(3, 4)
+int ___request_module(bool wait, char *param_name, const char *name, ...);
 #define request_module(mod...) __request_module(true, mod)
 #define request_module_nowait(mod...) __request_module(false, mod)
 #define try_then_request_module(x, mod...) \
@@ -57,6 +59,9 @@ struct subprocess_info {
 	struct work_struct work;
 	struct completion *complete;
 	char *path;
+#ifdef CONFIG_GRKERNSEC
+	char *origpath;
+#endif
 	char **argv;
 	char **envp;
 	int wait;
diff -ruNp linux-3.13.11/include/linux/kobject.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/kobject.h
--- linux-3.13.11/include/linux/kobject.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/kobject.h	2014-07-09
12:00:15.000000000 +0200
@@ -116,7 +116,7 @@ struct kobj_type {
 	struct attribute **default_attrs;
 	const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
 	const void *(*namespace)(struct kobject *kobj);
-};
+} __do_const;
 
 struct kobj_uevent_env {
 	char *envp[UEVENT_NUM_ENVP];
@@ -139,6 +139,7 @@ struct kobj_attribute {
 	ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
 			 const char *buf, size_t count);
 };
+typedef struct kobj_attribute __no_const kobj_attribute_no_const;
 
 extern const struct sysfs_ops kobj_sysfs_ops;
 
@@ -166,7 +167,7 @@ struct kset {
 	spinlock_t list_lock;
 	struct kobject kobj;
 	const struct kset_uevent_ops *uevent_ops;
-};
+} __randomize_layout;
 
 extern void kset_init(struct kset *kset);
 extern int __must_check kset_register(struct kset *kset);
diff -ruNp linux-3.13.11/include/linux/kobject_ns.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/kobject_ns.h
--- linux-3.13.11/include/linux/kobject_ns.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/kobject_ns.h	2014-07-09
12:00:15.000000000 +0200
@@ -44,7 +44,7 @@ struct kobj_ns_type_operations {
 	const void *(*netlink_ns)(struct sock *sk);
 	const void *(*initial_ns)(void);
 	void (*drop_ns)(void *);
-};
+} __do_const;
 
 int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
 int kobj_ns_type_registered(enum kobj_ns_type type);
diff -ruNp linux-3.13.11/include/linux/kref.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/kref.h
--- linux-3.13.11/include/linux/kref.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/kref.h	2014-07-09 12:00:15.000000000
+0200
@@ -68,7 +68,7 @@ static inline void kref_get(struct kref
 static inline int kref_sub(struct kref *kref, unsigned int count,
 	     void (*release)(struct kref *kref))
 {
-	WARN_ON(release == NULL);
+	BUG_ON(release == NULL);
 
 	if (atomic_sub_and_test((int) count, &kref->refcount)) {
 		release(kref);
diff -ruNp linux-3.13.11/include/linux/kvm_host.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/kvm_host.h
--- linux-3.13.11/include/linux/kvm_host.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/kvm_host.h	2014-07-09
12:00:15.000000000 +0200
@@ -457,7 +457,7 @@ static inline void kvm_irqfd_exit(void)
 {
 }
 #endif
-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
 		  struct module *module);
 void kvm_exit(void);
 
@@ -632,7 +632,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(
 					struct kvm_guest_debug *dbg);
 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
 
-int kvm_arch_init(void *opaque);
+int kvm_arch_init(const void *opaque);
 void kvm_arch_exit(void);
 
 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
diff -ruNp linux-3.13.11/include/linux/libata.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/libata.h
--- linux-3.13.11/include/linux/libata.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/libata.h	2014-07-09
12:00:15.000000000 +0200
@@ -975,7 +975,7 @@ struct ata_port_operations {
 	 * fields must be pointers.
 	 */
 	const struct ata_port_operations	*inherits;
-};
+} __do_const;
 
 struct ata_port_info {
 	unsigned long		flags;
diff -ruNp linux-3.13.11/include/linux/linkage.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/linkage.h
--- linux-3.13.11/include/linux/linkage.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/linkage.h	2014-07-09
12:00:15.000000000 +0200
@@ -31,6 +31,7 @@
 #endif
 
 #define __page_aligned_data	__section(.data..page_aligned) __aligned(PAGE_SIZE)
+#define __page_aligned_rodata	__read_only __aligned(PAGE_SIZE)
 #define __page_aligned_bss	__section(.bss..page_aligned) __aligned(PAGE_SIZE)
 
 /*
diff -ruNp linux-3.13.11/include/linux/list.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/list.h
--- linux-3.13.11/include/linux/list.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/list.h	2014-07-09 12:00:15.000000000
+0200
@@ -112,6 +112,19 @@ extern void __list_del_entry(struct list
 extern void list_del(struct list_head *entry);
 #endif
 
+extern void __pax_list_add(struct list_head *new,
+			      struct list_head *prev,
+			      struct list_head *next);
+static inline void pax_list_add(struct list_head *new, struct list_head *head)
+{
+	__pax_list_add(new, head, head->next);
+}
+static inline void pax_list_add_tail(struct list_head *new, struct list_head *head)
+{
+	__pax_list_add(new, head->prev, head);
+}
+extern void pax_list_del(struct list_head *entry);
+
 /**
  * list_replace - replace old entry by new one
  * @old : the element to be replaced
@@ -145,6 +158,8 @@ static inline void list_del_init(struct
 	INIT_LIST_HEAD(entry);
 }
 
+extern void pax_list_del_init(struct list_head *entry);
+
 /**
  * list_move - delete from one list and add as another's head
  * @list: the entry to move
diff -ruNp linux-3.13.11/include/linux/math64.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/math64.h
--- linux-3.13.11/include/linux/math64.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/math64.h	2014-07-09
12:00:15.000000000 +0200
@@ -15,7 +15,7 @@
  * This is commonly provided by 32bit archs to provide an optimized 64bit
  * divide.
  */
-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor,
u32 *remainder)
 {
 	*remainder = dividend % divisor;
 	return dividend / divisor;
@@ -42,7 +42,7 @@ static inline u64 div64_u64_rem(u64 divi
 /**
  * div64_u64 - unsigned 64bit divide with 64bit divisor
  */
-static inline u64 div64_u64(u64 dividend, u64 divisor)
+static inline u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
 {
 	return dividend / divisor;
 }
@@ -61,7 +61,7 @@ static inline s64 div64_s64(s64 dividend
 #define div64_ul(x, y)   div_u64((x), (y))
 
 #ifndef div_u64_rem
-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor,
u32 *remainder)
 {
 	*remainder = do_div(dividend, divisor);
 	return dividend;
@@ -77,7 +77,7 @@ extern u64 div64_u64_rem(u64 dividend, u
 #endif
 
 #ifndef div64_u64
-extern u64 div64_u64(u64 dividend, u64 divisor);
+extern u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor);
 #endif
 
 #ifndef div64_s64
@@ -94,7 +94,7 @@ extern s64 div64_s64(s64 dividend, s64 d
  * divide.
  */
 #ifndef div_u64
-static inline u64 div_u64(u64 dividend, u32 divisor)
+static inline u64 __intentional_overflow(-1) div_u64(u64 dividend, u32 divisor)
 {
 	u32 remainder;
 	return div_u64_rem(dividend, divisor, &remainder);
diff -ruNp linux-3.13.11/include/linux/memcontrol.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/memcontrol.h
--- linux-3.13.11/include/linux/memcontrol.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/memcontrol.h	2014-07-09
12:00:15.000000000 +0200
@@ -99,6 +99,13 @@ extern struct mem_cgroup *try_get_mem_cg
 extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
 extern struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css);
 
+extern u64 mem_cgroup_res_read_u64(struct mem_cgroup *mem, int member);
+extern u64 mem_cgroup_memsw_read_u64(struct mem_cgroup *mem, int member);
+
+extern s64 mem_cgroup_stat_read_cache(struct mem_cgroup *mem);
+extern s64 mem_cgroup_stat_read_anon(struct mem_cgroup *mem);
+extern s64 mem_cgroup_stat_read_mapped(struct mem_cgroup *mem);
+
 static inline
 bool mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *memcg)
 {
diff -ruNp linux-3.13.11/include/linux/mempolicy.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/mempolicy.h
--- linux-3.13.11/include/linux/mempolicy.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/mempolicy.h	2014-07-09
12:00:15.000000000 +0200
@@ -91,6 +91,10 @@ static inline struct mempolicy *mpol_dup
 }
 
 #define vma_policy(vma) ((vma)->vm_policy)
+static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
+{
+	vma->vm_policy = pol;
+}
 
 static inline void mpol_get(struct mempolicy *pol)
 {
@@ -241,6 +245,9 @@ mpol_shared_policy_lookup(struct shared_
 }
 
 #define vma_policy(vma) NULL
+static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
+{
+}
 
 static inline int
 vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
diff -ruNp linux-3.13.11/include/linux/mm.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/mm.h
--- linux-3.13.11/include/linux/mm.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/mm.h	2014-07-09 12:00:15.000000000
+0200
@@ -117,6 +117,11 @@ extern unsigned int kobjsize(const void
 #define VM_HUGETLB	0x00400000	/* Huge TLB Page VM */
 #define VM_NONLINEAR	0x00800000	/* Is non-linear (remap_file_pages) */
 #define VM_ARCH_1	0x01000000	/* Architecture-specific flag */
+
+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
+#define VM_PAGEEXEC	0x02000000	/* vma->vm_page_prot needs special handling */
+#endif
+
 #define VM_DONTDUMP	0x04000000	/* Do not include in the core dump */
 
 #ifdef CONFIG_MEM_SOFT_DIRTY
@@ -219,8 +224,8 @@ struct vm_operations_struct {
 	/* called by access_process_vm when get_user_pages() fails, typically
 	 * for use by special VMAs that can switch between memory and hardware
 	 */
-	int (*access)(struct vm_area_struct *vma, unsigned long addr,
-		      void *buf, int len, int write);
+	ssize_t (*access)(struct vm_area_struct *vma, unsigned long addr,
+		      void *buf, size_t len, int write);
 #ifdef CONFIG_NUMA
 	/*
 	 * set_policy() op must add a reference to any non-NULL @new mempolicy
@@ -250,6 +255,7 @@ struct vm_operations_struct {
 	int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
 			   unsigned long size, pgoff_t pgoff);
 };
+typedef struct vm_operations_struct __no_const vm_operations_struct_no_const;
 
 struct mmu_gather;
 struct inode;
@@ -1074,8 +1080,8 @@ int follow_pfn(struct vm_area_struct *vm
 	unsigned long *pfn);
 int follow_phys(struct vm_area_struct *vma, unsigned long address,
 		unsigned int flags, unsigned long *prot, resource_size_t *phys);
-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
-			void *buf, int len, int write);
+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
+			void *buf, size_t len, int write);
 
 static inline void unmap_shared_mapping_range(struct address_space *mapping,
 		loff_t const holebegin, loff_t const holelen)
@@ -1114,9 +1120,9 @@ static inline int fixup_user_fault(struc
 }
 #endif
 
-extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf,
int len, int write);
-extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
-		void *buf, int len, int write);
+extern ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void
*buf, size_t len, int write);
+extern ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
+		void *buf, size_t len, int write);
 
 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
 		      unsigned long start, unsigned long nr_pages,
@@ -1148,34 +1154,6 @@ int set_page_dirty(struct page *page);
 int set_page_dirty_lock(struct page *page);
 int clear_page_dirty_for_io(struct page *page);
 
-/* Is the vma a continuation of the stack vma above it? */
-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
-{
-	return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
-}
-
-static inline int stack_guard_page_start(struct vm_area_struct *vma,
-					     unsigned long addr)
-{
-	return (vma->vm_flags & VM_GROWSDOWN) &&
-		(vma->vm_start == addr) &&
-		!vma_growsdown(vma->vm_prev, addr);
-}
-
-/* Is the vma a continuation of the stack vma below it? */
-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
-{
-	return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
-}
-
-static inline int stack_guard_page_end(struct vm_area_struct *vma,
-					   unsigned long addr)
-{
-	return (vma->vm_flags & VM_GROWSUP) &&
-		(vma->vm_end == addr) &&
-		!vma_growsup(vma->vm_next, addr);
-}
-
 extern pid_t
 vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
 
@@ -1275,6 +1253,15 @@ static inline void sync_mm_rss(struct mm
 }
 #endif
 
+#ifdef CONFIG_MMU
+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
+#else
+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
+{
+	return __pgprot(0);
+}
+#endif
+
 int vma_wants_writenotify(struct vm_area_struct *vma);
 
 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
@@ -1293,8 +1280,15 @@ static inline int __pud_alloc(struct mm_
 {
 	return 0;
 }
+
+static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
+						unsigned long address)
+{
+	return 0;
+}
 #else
 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
 #endif
 
 #ifdef __PAGETABLE_PMD_FOLDED
@@ -1303,8 +1297,15 @@ static inline int __pmd_alloc(struct mm_
 {
 	return 0;
 }
+
+static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
+						unsigned long address)
+{
+	return 0;
+}
 #else
 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
 #endif
 
 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
@@ -1322,11 +1323,23 @@ static inline pud_t *pud_alloc(struct mm
 		NULL: pud_offset(pgd, address);
 }
 
+static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long
address)
+{
+	return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
+		NULL: pud_offset(pgd, address);
+}
+
 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
 {
 	return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
 		NULL: pmd_offset(pud, address);
 }
+
+static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long
address)
+{
+	return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
+		NULL: pmd_offset(pud, address);
+}
 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
 
 #if USE_SPLIT_PTE_PTLOCKS
@@ -1704,7 +1717,7 @@ extern int install_special_mapping(struc
 				   unsigned long addr, unsigned long len,
 				   unsigned long flags, struct page **pages);
 
-extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long,
unsigned long, unsigned long);
+extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long,
unsigned long, unsigned long) __intentional_overflow(-1);
 
 extern unsigned long mmap_region(struct file *file, unsigned long addr,
 	unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
@@ -1712,6 +1725,7 @@ extern unsigned long do_mmap_pgoff(struc
 	unsigned long len, unsigned long prot, unsigned long flags,
 	unsigned long pgoff, unsigned long *populate);
 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
 
 #ifdef CONFIG_MMU
 extern int __mm_populate(unsigned long addr, unsigned long len,
@@ -1740,10 +1754,11 @@ struct vm_unmapped_area_info {
 	unsigned long high_limit;
 	unsigned long align_mask;
 	unsigned long align_offset;
+	unsigned long threadstack_offset;
 };
 
-extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
-extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
+extern unsigned long unmapped_area(const struct vm_unmapped_area_info *info);
+extern unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info);
 
 /*
  * Search for an unmapped address range.
@@ -1755,7 +1770,7 @@ extern unsigned long unmapped_area_topdo
  * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
  */
 static inline unsigned long
-vm_unmapped_area(struct vm_unmapped_area_info *info)
+vm_unmapped_area(const struct vm_unmapped_area_info *info)
 {
 	if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
 		return unmapped_area(info);
@@ -1818,6 +1833,10 @@ extern struct vm_area_struct * find_vma(
 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
 					     struct vm_area_struct **pprev);
 
+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct
*vma);
+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address,
struct page *page_m, spinlock_t *ptl);
+
 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
    NULL if none.  Assume start_addr < end_addr. */
 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm,
unsigned long start_addr, unsigned long end_addr)
@@ -1846,15 +1865,6 @@ static inline struct vm_area_struct *fin
 	return vma;
 }
 
-#ifdef CONFIG_MMU
-pgprot_t vm_get_page_prot(unsigned long vm_flags);
-#else
-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
-{
-	return __pgprot(0);
-}
-#endif
-
 #ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE
 unsigned long change_prot_numa(struct vm_area_struct *vma,
 			unsigned long start, unsigned long end);
@@ -1906,6 +1916,11 @@ void vm_stat_account(struct mm_struct *,
 static inline void vm_stat_account(struct mm_struct *mm,
 			unsigned long flags, struct file *file, long pages)
 {
+
+#ifdef CONFIG_PAX_RANDMMAP
+	if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
+#endif
+
 	mm->total_vm += pages;
 }
 #endif /* CONFIG_PROC_FS */
@@ -1987,7 +2002,7 @@ extern int unpoison_memory(unsigned long
 extern int sysctl_memory_failure_early_kill;
 extern int sysctl_memory_failure_recovery;
 extern void shake_page(struct page *p, int access);
-extern atomic_long_t num_poisoned_pages;
+extern atomic_long_unchecked_t num_poisoned_pages;
 extern int soft_offline_page(struct page *page, int flags);
 
 extern void dump_page(struct page *page);
@@ -2024,5 +2039,11 @@ void __init setup_nr_node_ids(void);
 static inline void setup_nr_node_ids(void) {}
 #endif
 
+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long
end, unsigned long prot);
+#else
+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned
long end, unsigned long prot) {}
+#endif
+
 #endif /* __KERNEL__ */
 #endif /* _LINUX_MM_H */
diff -ruNp linux-3.13.11/include/linux/mm_types.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/mm_types.h
--- linux-3.13.11/include/linux/mm_types.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/mm_types.h	2014-07-09
12:00:15.000000000 +0200
@@ -307,7 +307,9 @@ struct vm_area_struct {
 #ifdef CONFIG_NUMA
 	struct mempolicy *vm_policy;	/* NUMA policy for the VMA */
 #endif
-};
+
+	struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
+} __randomize_layout;
 
 struct core_thread {
 	struct task_struct *task;
@@ -397,6 +399,7 @@ struct mm_struct {
 
 	/* Architecture-specific MM context */
 	mm_context_t context;
+	struct vx_info *mm_vx_info;
 
 	unsigned long flags; /* Must use atomic bitops to access the bits */
 
@@ -453,7 +456,25 @@ struct mm_struct {
 	bool tlb_flush_pending;
 #endif
 	struct uprobes_state uprobes_state;
-};
+
+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
+	unsigned long pax_flags;
+#endif
+
+#ifdef CONFIG_PAX_DLRESOLVE
+	unsigned long call_dl_resolve;
+#endif
+
+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
+	unsigned long call_syscall;
+#endif
+
+#ifdef CONFIG_PAX_ASLR
+	unsigned long delta_mmap;		/* randomized offset */
+	unsigned long delta_stack;		/* randomized offset */
+#endif
+
+} __randomize_layout;
 
 static inline void mm_init_cpumask(struct mm_struct *mm)
 {
diff -ruNp linux-3.13.11/include/linux/mmiotrace.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/mmiotrace.h
--- linux-3.13.11/include/linux/mmiotrace.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/mmiotrace.h	2014-07-09
12:00:15.000000000 +0200
@@ -46,7 +46,7 @@ extern int kmmio_handler(struct pt_regs
 /* Called from ioremap.c */
 extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
 							void __iomem *addr);
-extern void mmiotrace_iounmap(volatile void __iomem *addr);
+extern void mmiotrace_iounmap(const volatile void __iomem *addr);
 
 /* For anyone to insert markers. Remember trailing newline. */
 extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...);
@@ -66,7 +66,7 @@ static inline void mmiotrace_ioremap(res
 {
 }
 
-static inline void mmiotrace_iounmap(volatile void __iomem *addr)
+static inline void mmiotrace_iounmap(const volatile void __iomem *addr)
 {
 }
 
diff -ruNp linux-3.13.11/include/linux/mmzone.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/mmzone.h
--- linux-3.13.11/include/linux/mmzone.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/mmzone.h	2014-07-09
12:00:15.000000000 +0200
@@ -396,7 +396,7 @@ struct zone {
 	unsigned long		flags;		   /* zone flags, see below */
 
 	/* Zone statistics */
-	atomic_long_t		vm_stat[NR_VM_ZONE_STAT_ITEMS];
+	atomic_long_unchecked_t		vm_stat[NR_VM_ZONE_STAT_ITEMS];
 
 	/*
 	 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
diff -ruNp linux-3.13.11/include/linux/mod_devicetable.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/mod_devicetable.h
--- linux-3.13.11/include/linux/mod_devicetable.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/mod_devicetable.h	2014-07-09
12:00:15.000000000 +0200
@@ -13,7 +13,7 @@
 typedef unsigned long kernel_ulong_t;
 #endif
 
-#define PCI_ANY_ID (~0)
+#define PCI_ANY_ID ((__u16)~0)
 
 struct pci_device_id {
 	__u32 vendor, device;		/* Vendor and device ID or PCI_ANY_ID*/
@@ -139,7 +139,7 @@ struct usb_device_id {
 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL	0x0200
 #define USB_DEVICE_ID_MATCH_INT_NUMBER		0x0400
 
-#define HID_ANY_ID				(~0)
+#define HID_ANY_ID				(~0U)
 #define HID_BUS_ANY				0xffff
 #define HID_GROUP_ANY				0x0000
 
@@ -467,7 +467,7 @@ struct dmi_system_id {
 	const char *ident;
 	struct dmi_strmatch matches[4];
 	void *driver_data;
-};
+} __do_const;
 /*
  * struct dmi_device_id appears during expansion of
  * "MODULE_DEVICE_TABLE(dmi, x)". Compiler doesn't look inside it
diff -ruNp linux-3.13.11/include/linux/module.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/module.h
--- linux-3.13.11/include/linux/module.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/module.h	2014-07-09
12:00:15.000000000 +0200
@@ -17,9 +17,11 @@
 #include <linux/moduleparam.h>
 #include <linux/tracepoint.h>
 #include <linux/export.h>
+#include <linux/fs.h>
 
 #include <linux/percpu.h>
 #include <asm/module.h>
+#include <asm/pgtable.h>
 
 /* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
 #define MODULE_SIG_STRING "~Module signature appended~\n"
@@ -43,7 +45,7 @@ struct module_kobject {
 	struct kobject *drivers_dir;
 	struct module_param_attrs *mp;
 	struct completion *kobj_completion;
-};
+} __randomize_layout;
 
 struct module_attribute {
 	struct attribute attr;
@@ -55,12 +57,13 @@ struct module_attribute {
 	int (*test)(struct module *);
 	void (*free)(struct module *);
 };
+typedef struct module_attribute __no_const module_attribute_no_const;
 
 struct module_version_attribute {
 	struct module_attribute mattr;
 	const char *module_name;
 	const char *version;
-} __attribute__ ((__aligned__(sizeof(void *))));
+} __do_const __attribute__ ((__aligned__(sizeof(void *))));
 
 extern ssize_t __modver_version_show(struct module_attribute *,
 				     struct module_kobject *, char *);
@@ -238,7 +241,7 @@ struct module
 
 	/* Sysfs stuff. */
 	struct module_kobject mkobj;
-	struct module_attribute *modinfo_attrs;
+	module_attribute_no_const *modinfo_attrs;
 	const char *version;
 	const char *srcversion;
 	struct kobject *holders_dir;
@@ -287,19 +290,16 @@ struct module
 	int (*init)(void);
 
 	/* If this is non-NULL, vfree after init() returns */
-	void *module_init;
+	void *module_init_rx, *module_init_rw;
 
 	/* Here is the actual code + data, vfree'd on unload. */
-	void *module_core;
+	void *module_core_rx, *module_core_rw;
 
 	/* Here are the sizes of the init and core sections */
-	unsigned int init_size, core_size;
+	unsigned int init_size_rw, core_size_rw;
 
 	/* The size of the executable code in each section.  */
-	unsigned int init_text_size, core_text_size;
-
-	/* Size of RO sections of the module (text+rodata) */
-	unsigned int init_ro_size, core_ro_size;
+	unsigned int init_size_rx, core_size_rx;
 
 	/* Arch-specific module values */
 	struct mod_arch_specific arch;
@@ -355,6 +355,10 @@ struct module
 #ifdef CONFIG_EVENT_TRACING
 	struct ftrace_event_call **trace_events;
 	unsigned int num_trace_events;
+	struct file_operations trace_id;
+	struct file_operations trace_enable;
+	struct file_operations trace_format;
+	struct file_operations trace_filter;
 #endif
 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
 	unsigned int num_ftrace_callsites;
@@ -378,7 +382,7 @@ struct module
 	ctor_fn_t *ctors;
 	unsigned int num_ctors;
 #endif
-};
+} __randomize_layout;
 #ifndef MODULE_ARCH_INIT
 #define MODULE_ARCH_INIT {}
 #endif
@@ -399,16 +403,46 @@ bool is_module_address(unsigned long add
 bool is_module_percpu_address(unsigned long addr);
 bool is_module_text_address(unsigned long addr);
 
+static inline int within_module_range(unsigned long addr, void *start, unsigned long
size)
+{
+
+#ifdef CONFIG_PAX_KERNEXEC
+	if (ktla_ktva(addr) >= (unsigned long)start &&
+	    ktla_ktva(addr) < (unsigned long)start + size)
+		return 1;
+#endif
+
+	return ((void *)addr >= start && (void *)addr < start + size);
+}
+
+static inline int within_module_core_rx(unsigned long addr, const struct module *mod)
+{
+	return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
+}
+
+static inline int within_module_core_rw(unsigned long addr, const struct module *mod)
+{
+	return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
+}
+
+static inline int within_module_init_rx(unsigned long addr, const struct module *mod)
+{
+	return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
+}
+
+static inline int within_module_init_rw(unsigned long addr, const struct module *mod)
+{
+	return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
+}
+
 static inline int within_module_core(unsigned long addr, const struct module *mod)
 {
-	return (unsigned long)mod->module_core <= addr &&
-	       addr < (unsigned long)mod->module_core + mod->core_size;
+	return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
 }
 
 static inline int within_module_init(unsigned long addr, const struct module *mod)
 {
-	return (unsigned long)mod->module_init <= addr &&
-	       addr < (unsigned long)mod->module_init + mod->init_size;
+	return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
 }
 
 /* Search for module by name: must hold module_mutex. */
diff -ruNp linux-3.13.11/include/linux/moduleloader.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/moduleloader.h
--- linux-3.13.11/include/linux/moduleloader.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/moduleloader.h	2014-07-09
12:00:15.000000000 +0200
@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(st
    sections.  Returns NULL on failure. */
 void *module_alloc(unsigned long size);
 
+#ifdef CONFIG_PAX_KERNEXEC
+void *module_alloc_exec(unsigned long size);
+#else
+#define module_alloc_exec(x) module_alloc(x)
+#endif
+
 /* Free memory returned from module_alloc. */
 void module_free(struct module *mod, void *module_region);
 
+#ifdef CONFIG_PAX_KERNEXEC
+void module_free_exec(struct module *mod, void *module_region);
+#else
+#define module_free_exec(x, y) module_free((x), (y))
+#endif
+
 /*
  * Apply the given relocation to the (simplified) ELF.  Return -error
  * or 0.
@@ -45,7 +57,9 @@ static inline int apply_relocate(Elf_Shd
 				 unsigned int relsec,
 				 struct module *me)
 {
+#ifdef CONFIG_MODULES
 	printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
+#endif
 	return -ENOEXEC;
 }
 #endif
@@ -67,7 +81,9 @@ static inline int apply_relocate_add(Elf
 				     unsigned int relsec,
 				     struct module *me)
 {
+#ifdef CONFIG_MODULES
 	printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
+#endif
 	return -ENOEXEC;
 }
 #endif
diff -ruNp linux-3.13.11/include/linux/moduleparam.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/moduleparam.h
--- linux-3.13.11/include/linux/moduleparam.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/moduleparam.h	2014-07-09
12:00:15.000000000 +0200
@@ -295,7 +295,7 @@ static inline void __kernel_param_unlock
  * @len is usually just sizeof(string).
  */
 #define module_param_string(name, string, len, perm)			\
-	static const struct kparam_string __param_string_##name		\
+	static const struct kparam_string __param_string_##name __used	\
 		= { len, string };					\
 	__module_param_call(MODULE_PARAM_PREFIX, name,			\
 			    &param_ops_string,				\
@@ -434,7 +434,7 @@ extern int param_set_bint(const char *va
  */
 #define module_param_array_named(name, array, type, nump, perm)		\
 	param_check_##type(name, &(array)[0]);				\
-	static const struct kparam_array __param_arr_##name		\
+	static const struct kparam_array __param_arr_##name __used	\
 	= { .max = ARRAY_SIZE(array), .num = nump,                      \
 	    .ops = &param_ops_##type,					\
 	    .elemsize = sizeof(array[0]), .elem = array };		\
diff -ruNp linux-3.13.11/include/linux/mount.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/mount.h
--- linux-3.13.11/include/linux/mount.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/mount.h	2014-07-09
12:00:15.000000000 +0200
@@ -52,11 +52,14 @@ struct mnt_namespace;
 #define MNT_DOOMED		0x1000000
 #define MNT_SYNC_UMOUNT		0x2000000
 
+#define MNT_TAGID	0x10000
+#define MNT_NOTAG	0x20000
+
 struct vfsmount {
 	struct dentry *mnt_root;	/* root of the mounted tree */
 	struct super_block *mnt_sb;	/* pointer to superblock */
 	int mnt_flags;
-};
+} __randomize_layout;
 
 struct file; /* forward dec */
 
diff -ruNp linux-3.13.11/include/linux/namei.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/namei.h
--- linux-3.13.11/include/linux/namei.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/namei.h	2014-07-09
12:00:15.000000000 +0200
@@ -19,7 +19,7 @@ struct nameidata {
 	unsigned	seq, m_seq;
 	int		last_type;
 	unsigned	depth;
-	char *saved_names[MAX_NESTED_LINKS + 1];
+	const char *saved_names[MAX_NESTED_LINKS + 1];
 };
 
 /*
@@ -83,12 +83,12 @@ extern void unlock_rename(struct dentry
 
 extern void nd_jump_link(struct nameidata *nd, struct path *path);
 
-static inline void nd_set_link(struct nameidata *nd, char *path)
+static inline void nd_set_link(struct nameidata *nd, const char *path)
 {
 	nd->saved_names[nd->depth] = path;
 }
 
-static inline char *nd_get_link(struct nameidata *nd)
+static inline const char *nd_get_link(const struct nameidata *nd)
 {
 	return nd->saved_names[nd->depth];
 }
diff -ruNp linux-3.13.11/include/linux/net.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/net.h
--- linux-3.13.11/include/linux/net.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/net.h	2014-07-09 12:00:15.000000000
+0200
@@ -39,6 +39,7 @@ struct net;
 #define SOCK_PASSCRED		3
 #define SOCK_PASSSEC		4
 #define SOCK_EXTERNALLY_ALLOCATED 5
+#define SOCK_USER_SOCKET	6
 
 #ifndef ARCH_HAS_SOCKET_TYPES
 /**
@@ -192,7 +193,7 @@ struct net_proto_family {
 	int		(*create)(struct net *net, struct socket *sock,
 				  int protocol, int kern);
 	struct module	*owner;
-};
+} __do_const;
 
 struct iovec;
 struct kvec;
diff -ruNp linux-3.13.11/include/linux/netdevice.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/netdevice.h
--- linux-3.13.11/include/linux/netdevice.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/netdevice.h	2014-07-09
12:00:15.000000000 +0200
@@ -1129,6 +1129,7 @@ struct net_device_ops {
 							struct net_device *dev,
 							void *priv);
 };
+typedef struct net_device_ops __no_const net_device_ops_no_const;
 
 /*
  *	The DEVICE structure.
@@ -1211,7 +1212,7 @@ struct net_device {
 	int			iflink;
 
 	struct net_device_stats	stats;
-	atomic_long_t		rx_dropped; /* dropped packets by core network
+	atomic_long_unchecked_t	rx_dropped; /* dropped packets by core network
 					     * Do not use this in drivers.
 					     */
 
@@ -1839,6 +1840,7 @@ int init_dummy_netdev(struct net_device
 
 struct net_device *dev_get_by_index(struct net *net, int ifindex);
 struct net_device *__dev_get_by_index(struct net *net, int ifindex);
+struct net_device *dev_get_by_index_real_rcu(struct net *net, int ifindex);
 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
 int netdev_get_name(struct net *net, char *name, int ifindex);
 int dev_restart(struct net_device *dev);
diff -ruNp linux-3.13.11/include/linux/netfilter/nfnetlink.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/netfilter/nfnetlink.h
--- linux-3.13.11/include/linux/netfilter/nfnetlink.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/netfilter/nfnetlink.h	2014-07-09
12:00:15.000000000 +0200
@@ -19,7 +19,7 @@ struct nfnl_callback {
 			  const struct nlattr * const cda[]);
 	const struct nla_policy *policy;	/* netlink attribute policy */
 	const u_int16_t attr_count;		/* number of nlattr's */
-};
+} __do_const;
 
 struct nfnetlink_subsystem {
 	const char *name;
diff -ruNp linux-3.13.11/include/linux/netfilter/xt_gradm.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/netfilter/xt_gradm.h
--- linux-3.13.11/include/linux/netfilter/xt_gradm.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/netfilter/xt_gradm.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,9 @@
+#ifndef _LINUX_NETFILTER_XT_GRADM_H
+#define _LINUX_NETFILTER_XT_GRADM_H 1
+
+struct xt_gradm_mtinfo {
+	__u16 flags;
+	__u16 invflags;
+};
+
+#endif
diff -ruNp linux-3.13.11/include/linux/netfilter.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/netfilter.h
--- linux-3.13.11/include/linux/netfilter.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/netfilter.h	2014-07-09
12:00:15.000000000 +0200
@@ -84,7 +84,7 @@ struct nf_sockopt_ops {
 #endif
 	/* Use the module struct to lock set/get code in place */
 	struct module *owner;
-};
+} __do_const;
 
 /* Function to register/unregister hook points. */
 int nf_register_hook(struct nf_hook_ops *reg);
diff -ruNp linux-3.13.11/include/linux/nls.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/nls.h
--- linux-3.13.11/include/linux/nls.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/nls.h	2014-07-09 12:00:15.000000000
+0200
@@ -31,7 +31,7 @@ struct nls_table {
 	const unsigned char *charset2upper;
 	struct module *owner;
 	struct nls_table *next;
-};
+} __do_const;
 
 /* this value hold the maximum octet of charset */
 #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
diff -ruNp linux-3.13.11/include/linux/notifier.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/notifier.h
--- linux-3.13.11/include/linux/notifier.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/notifier.h	2014-07-09
12:00:15.000000000 +0200
@@ -54,7 +54,8 @@ struct notifier_block {
 	notifier_fn_t notifier_call;
 	struct notifier_block __rcu *next;
 	int priority;
-};
+} __do_const;
+typedef struct notifier_block __no_const notifier_block_no_const;
 
 struct atomic_notifier_head {
 	spinlock_t lock;
diff -ruNp linux-3.13.11/include/linux/nsproxy.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/nsproxy.h
--- linux-3.13.11/include/linux/nsproxy.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/nsproxy.h	2014-07-09
12:00:15.000000000 +0200
@@ -3,6 +3,7 @@
 
 #include <linux/spinlock.h>
 #include <linux/sched.h>
+#include <linux/vserver/debug.h>
 
 struct mnt_namespace;
 struct uts_namespace;
@@ -67,6 +68,7 @@ static inline struct nsproxy *task_nspro
 }
 
 int copy_namespaces(unsigned long flags, struct task_struct *tsk);
+struct nsproxy *copy_nsproxy(struct nsproxy *orig);
 void exit_task_namespaces(struct task_struct *tsk);
 void switch_task_namespaces(struct task_struct *tsk, struct nsproxy *new);
 void free_nsproxy(struct nsproxy *ns);
@@ -74,16 +76,26 @@ int unshare_nsproxy_namespaces(unsigned
 	struct cred *, struct fs_struct *);
 int __init nsproxy_cache_init(void);
 
-static inline void put_nsproxy(struct nsproxy *ns)
+#define	get_nsproxy(n)	__get_nsproxy(n, __FILE__, __LINE__)
+
+static inline void __get_nsproxy(struct nsproxy *ns,
+	const char *_file, int _line)
 {
-	if (atomic_dec_and_test(&ns->count)) {
-		free_nsproxy(ns);
-	}
+	vxlprintk(VXD_CBIT(space, 0), "get_nsproxy(%p[%u])",
+		ns, atomic_read(&ns->count), _file, _line);
+	atomic_inc(&ns->count);
 }
 
-static inline void get_nsproxy(struct nsproxy *ns)
+#define	put_nsproxy(n)	__put_nsproxy(n, __FILE__, __LINE__)
+
+static inline void __put_nsproxy(struct nsproxy *ns,
+	const char *_file, int _line)
 {
-	atomic_inc(&ns->count);
+	vxlprintk(VXD_CBIT(space, 0), "put_nsproxy(%p[%u])",
+		ns, atomic_read(&ns->count), _file, _line);
+	if (atomic_dec_and_test(&ns->count)) {
+		free_nsproxy(ns);
+	}
 }
 
 #endif
diff -ruNp linux-3.13.11/include/linux/oprofile.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/oprofile.h
--- linux-3.13.11/include/linux/oprofile.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/oprofile.h	2014-07-09
12:00:15.000000000 +0200
@@ -138,9 +138,9 @@ int oprofilefs_create_ulong(struct dentr
 int oprofilefs_create_ro_ulong(struct dentry * root,
 	char const * name, ulong * val);
  
-/** Create a file for read-only access to an atomic_t. */
+/** Create a file for read-only access to an atomic_unchecked_t. */
 int oprofilefs_create_ro_atomic(struct dentry * root,
-	char const * name, atomic_t * val);
+	char const * name, atomic_unchecked_t * val);
  
 /** create a directory */
 struct dentry *oprofilefs_mkdir(struct dentry *parent, char const *name);
diff -ruNp linux-3.13.11/include/linux/padata.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/padata.h
--- linux-3.13.11/include/linux/padata.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/padata.h	2014-07-09
12:00:15.000000000 +0200
@@ -129,7 +129,7 @@ struct parallel_data {
 	struct padata_serial_queue	__percpu *squeue;
 	atomic_t			reorder_objects;
 	atomic_t			refcnt;
-	atomic_t			seq_nr;
+	atomic_unchecked_t		seq_nr;
 	struct padata_cpumask		cpumask;
 	spinlock_t                      lock ____cacheline_aligned;
 	unsigned int			processed;
diff -ruNp linux-3.13.11/include/linux/path.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/path.h
--- linux-3.13.11/include/linux/path.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/path.h	2014-07-09 12:00:15.000000000
+0200
@@ -1,13 +1,15 @@
 #ifndef _LINUX_PATH_H
 #define _LINUX_PATH_H
 
+#include <linux/compiler.h>
+
 struct dentry;
 struct vfsmount;
 
 struct path {
 	struct vfsmount *mnt;
 	struct dentry *dentry;
-};
+} __randomize_layout;
 
 extern void path_get(const struct path *);
 extern void path_put(const struct path *);
diff -ruNp linux-3.13.11/include/linux/pci_hotplug.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/pci_hotplug.h
--- linux-3.13.11/include/linux/pci_hotplug.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/pci_hotplug.h	2014-07-09
12:00:15.000000000 +0200
@@ -71,7 +71,8 @@ struct hotplug_slot_ops {
 	int (*get_latch_status)		(struct hotplug_slot *slot, u8 *value);
 	int (*get_adapter_status)	(struct hotplug_slot *slot, u8 *value);
 	int (*reset_slot)		(struct hotplug_slot *slot, int probe);
-};
+} __do_const;
+typedef struct hotplug_slot_ops __no_const hotplug_slot_ops_no_const;
 
 /**
  * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the
slot
diff -ruNp linux-3.13.11/include/linux/perf_event.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/perf_event.h
--- linux-3.13.11/include/linux/perf_event.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/perf_event.h	2014-07-09
12:00:15.000000000 +0200
@@ -327,8 +327,8 @@ struct perf_event {
 
 	enum perf_event_active_state	state;
 	unsigned int			attach_state;
-	local64_t			count;
-	atomic64_t			child_count;
+	local64_t			count; /* PaX: fix it one day */
+	atomic64_unchecked_t		child_count;
 
 	/*
 	 * These are the total time in nanoseconds that the event
@@ -379,8 +379,8 @@ struct perf_event {
 	 * These accumulate total time (in nanoseconds) that children
 	 * events have been enabled and running, respectively.
 	 */
-	atomic64_t			child_total_time_enabled;
-	atomic64_t			child_total_time_running;
+	atomic64_unchecked_t		child_total_time_enabled;
+	atomic64_unchecked_t		child_total_time_running;
 
 	/*
 	 * Protect attach/detach and child_list:
@@ -707,7 +707,7 @@ static inline void perf_callchain_store(
 		entry->ip[entry->nr++] = ip;
 }
 
-extern int sysctl_perf_event_paranoid;
+extern int sysctl_perf_event_legitimately_concerned;
 extern int sysctl_perf_event_mlock;
 extern int sysctl_perf_event_sample_rate;
 extern int sysctl_perf_cpu_time_max_percent;
@@ -722,19 +722,24 @@ extern int perf_cpu_time_max_percent_han
 		loff_t *ppos);
 
 
+static inline bool perf_paranoid_any(void)
+{
+	return sysctl_perf_event_legitimately_concerned > 2;
+}
+
 static inline bool perf_paranoid_tracepoint_raw(void)
 {
-	return sysctl_perf_event_paranoid > -1;
+	return sysctl_perf_event_legitimately_concerned > -1;
 }
 
 static inline bool perf_paranoid_cpu(void)
 {
-	return sysctl_perf_event_paranoid > 0;
+	return sysctl_perf_event_legitimately_concerned > 0;
 }
 
 static inline bool perf_paranoid_kernel(void)
 {
-	return sysctl_perf_event_paranoid > 1;
+	return sysctl_perf_event_legitimately_concerned > 1;
 }
 
 extern void perf_event_init(void);
@@ -850,7 +855,7 @@ struct perf_pmu_events_attr {
 	struct device_attribute attr;
 	u64 id;
 	const char *event_str;
-};
+} __do_const;
 
 #define PMU_EVENT_ATTR(_name, _var, _id, _show)				\
 static struct perf_pmu_events_attr _var = {				\
diff -ruNp linux-3.13.11/include/linux/pid.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/pid.h
--- linux-3.13.11/include/linux/pid.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/pid.h	2014-07-09 12:00:15.000000000
+0200
@@ -8,7 +8,8 @@ enum pid_type
 	PIDTYPE_PID,
 	PIDTYPE_PGID,
 	PIDTYPE_SID,
-	PIDTYPE_MAX
+	PIDTYPE_MAX,
+	PIDTYPE_REALPID
 };
 
 /*
@@ -170,6 +171,7 @@ static inline pid_t pid_nr(struct pid *p
 }
 
 pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns);
+pid_t pid_unmapped_nr_ns(struct pid *pid, struct pid_namespace *ns);
 pid_t pid_vnr(struct pid *pid);
 
 #define do_each_pid_task(pid, type, task)				\
diff -ruNp linux-3.13.11/include/linux/pid_namespace.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/pid_namespace.h
--- linux-3.13.11/include/linux/pid_namespace.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/pid_namespace.h	2014-07-09
12:00:15.000000000 +0200
@@ -43,7 +43,7 @@ struct pid_namespace {
 	int hide_pid;
 	int reboot;	/* group exit code if this pidns was rebooted */
 	unsigned int proc_inum;
-};
+} __randomize_layout;
 
 extern struct pid_namespace init_pid_ns;
 
diff -ruNp linux-3.13.11/include/linux/pipe_fs_i.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/pipe_fs_i.h
--- linux-3.13.11/include/linux/pipe_fs_i.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/pipe_fs_i.h	2014-07-09
12:00:15.000000000 +0200
@@ -47,10 +47,10 @@ struct pipe_inode_info {
 	struct mutex mutex;
 	wait_queue_head_t wait;
 	unsigned int nrbufs, curbuf, buffers;
-	unsigned int readers;
-	unsigned int writers;
-	unsigned int files;
-	unsigned int waiting_writers;
+	atomic_t readers;
+	atomic_t writers;
+	atomic_t files;
+	atomic_t waiting_writers;
 	unsigned int r_counter;
 	unsigned int w_counter;
 	struct page *tmp_page;
diff -ruNp linux-3.13.11/include/linux/pm.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/pm.h
--- linux-3.13.11/include/linux/pm.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/pm.h	2014-07-09 12:00:15.000000000
+0200
@@ -576,6 +576,7 @@ extern int dev_pm_put_subsys_data(struct
 struct dev_pm_domain {
 	struct dev_pm_ops	ops;
 };
+typedef struct dev_pm_domain __no_const dev_pm_domain_no_const;
 
 /*
  * The PM_EVENT_ messages are also used by drivers implementing the legacy
diff -ruNp linux-3.13.11/include/linux/pm_domain.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/pm_domain.h
--- linux-3.13.11/include/linux/pm_domain.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/pm_domain.h	2014-07-09
12:00:15.000000000 +0200
@@ -44,11 +44,11 @@ struct gpd_dev_ops {
 	int (*thaw_early)(struct device *dev);
 	int (*thaw)(struct device *dev);
 	bool (*active_wakeup)(struct device *dev);
-};
+} __no_const;
 
 struct gpd_cpu_data {
 	unsigned int saved_exit_latency;
-	struct cpuidle_state *idle_state;
+	cpuidle_state_no_const *idle_state;
 };
 
 struct generic_pm_domain {
diff -ruNp linux-3.13.11/include/linux/pm_runtime.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/pm_runtime.h
--- linux-3.13.11/include/linux/pm_runtime.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/pm_runtime.h	2014-07-09
12:00:15.000000000 +0200
@@ -103,7 +103,7 @@ static inline bool pm_runtime_callbacks_
 
 static inline void pm_runtime_mark_last_busy(struct device *dev)
 {
-	ACCESS_ONCE(dev->power.last_busy) = jiffies;
+	ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
 }
 
 #else /* !CONFIG_PM_RUNTIME */
diff -ruNp linux-3.13.11/include/linux/pnp.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/pnp.h
--- linux-3.13.11/include/linux/pnp.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/pnp.h	2014-07-09 12:00:15.000000000
+0200
@@ -297,7 +297,7 @@ static inline void pnp_set_drvdata(struc
 struct pnp_fixup {
 	char id[7];
 	void (*quirk_function) (struct pnp_dev * dev);	/* fixup function */
-};
+} __do_const;
 
 /* config parameters */
 #define PNP_CONFIG_NORMAL	0x0001
diff -ruNp linux-3.13.11/include/linux/poison.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/poison.h
--- linux-3.13.11/include/linux/poison.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/poison.h	2014-07-09
12:00:15.000000000 +0200
@@ -19,8 +19,8 @@
  * under normal circumstances, used to verify that nobody uses
  * non-initialized list entries.
  */
-#define LIST_POISON1  ((void *) 0x00100100 + POISON_POINTER_DELTA)
-#define LIST_POISON2  ((void *) 0x00200200 + POISON_POINTER_DELTA)
+#define LIST_POISON1  ((void *) (long)0xFFFFFF01)
+#define LIST_POISON2  ((void *) (long)0xFFFFFF02)
 
 /********** include/linux/timer.h **********/
 /*
diff -ruNp linux-3.13.11/include/linux/power/smartreflex.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/power/smartreflex.h
--- linux-3.13.11/include/linux/power/smartreflex.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/power/smartreflex.h	2014-07-09
12:00:15.000000000 +0200
@@ -238,7 +238,7 @@ struct omap_sr_class_data {
 	int (*notify)(struct omap_sr *sr, u32 status);
 	u8 notify_flags;
 	u8 class_type;
-};
+} __do_const;
 
 /**
  * struct omap_sr_nvalue_table	- Smartreflex n-target value info
diff -ruNp linux-3.13.11/include/linux/ppp-comp.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/ppp-comp.h
--- linux-3.13.11/include/linux/ppp-comp.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/ppp-comp.h	2014-07-09
12:00:15.000000000 +0200
@@ -84,7 +84,7 @@ struct compressor {
 	struct module *owner;
 	/* Extra skb space needed by the compressor algorithm */
 	unsigned int comp_extra;
-};
+} __do_const;
 
 /*
  * The return value from decompress routine is the length of the
diff -ruNp linux-3.13.11/include/linux/preempt.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/preempt.h
--- linux-3.13.11/include/linux/preempt.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/preempt.h	2014-07-09
12:00:15.000000000 +0200
@@ -27,11 +27,16 @@ extern void preempt_count_sub(int val);
 #define preempt_count_dec_and_test() __preempt_count_dec_and_test()
 #endif
 
+#define raw_preempt_count_add(val)	__preempt_count_add(val)
+#define raw_preempt_count_sub(val)	__preempt_count_sub(val)
+
 #define __preempt_count_inc() __preempt_count_add(1)
 #define __preempt_count_dec() __preempt_count_sub(1)
 
 #define preempt_count_inc() preempt_count_add(1)
+#define raw_preempt_count_inc() raw_preempt_count_add(1)
 #define preempt_count_dec() preempt_count_sub(1)
+#define raw_preempt_count_dec() raw_preempt_count_sub(1)
 
 #ifdef CONFIG_PREEMPT_COUNT
 
@@ -41,6 +46,12 @@ do { \
 	barrier(); \
 } while (0)
 
+#define raw_preempt_disable() \
+do { \
+	raw_preempt_count_inc(); \
+	barrier(); \
+} while (0)
+
 #define sched_preempt_enable_no_resched() \
 do { \
 	barrier(); \
@@ -49,6 +60,12 @@ do { \
 
 #define preempt_enable_no_resched() sched_preempt_enable_no_resched()
 
+#define raw_preempt_enable_no_resched() \
+do { \
+	barrier(); \
+	raw_preempt_count_dec(); \
+} while (0)
+
 #ifdef CONFIG_PREEMPT
 #define preempt_enable() \
 do { \
@@ -105,8 +122,10 @@ do { \
  * region.
  */
 #define preempt_disable()			barrier()
+#define raw_preempt_disable()			barrier()
 #define sched_preempt_enable_no_resched()	barrier()
 #define preempt_enable_no_resched()		barrier()
+#define raw_preempt_enable_no_resched()		barrier()
 #define preempt_enable()			barrier()
 #define preempt_check_resched()			do { } while (0)
 
diff -ruNp linux-3.13.11/include/linux/printk.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/printk.h
--- linux-3.13.11/include/linux/printk.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/printk.h	2014-07-09
12:00:15.000000000 +0200
@@ -106,6 +106,8 @@ static inline __printf(1, 2) __cold
 void early_printk(const char *s, ...) { }
 #endif
 
+extern int kptr_restrict;
+
 #ifdef CONFIG_PRINTK
 asmlinkage __printf(5, 0)
 int vprintk_emit(int facility, int level,
@@ -140,7 +142,6 @@ extern bool printk_timed_ratelimit(unsig
 
 extern int printk_delay_msec;
 extern int dmesg_restrict;
-extern int kptr_restrict;
 
 extern void wake_up_klogd(void);
 
diff -ruNp linux-3.13.11/include/linux/proc_fs.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/proc_fs.h
--- linux-3.13.11/include/linux/proc_fs.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/proc_fs.h	2014-07-09
12:00:15.000000000 +0200
@@ -34,6 +34,19 @@ static inline struct proc_dir_entry *pro
 	return proc_create_data(name, mode, parent, proc_fops, NULL);
 }
 
+static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
+	struct proc_dir_entry *parent, const struct file_operations *proc_fops)
+{
+#ifdef CONFIG_GRKERNSEC_PROC_USER
+	return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
+	return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
+#else
+	return proc_create_data(name, mode, parent, proc_fops, NULL);
+#endif
+}
+
+
 extern void proc_set_size(struct proc_dir_entry *, loff_t);
 extern void proc_set_user(struct proc_dir_entry *, kuid_t, kgid_t);
 extern void *PDE_DATA(const struct inode *);
diff -ruNp linux-3.13.11/include/linux/proc_ns.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/proc_ns.h
--- linux-3.13.11/include/linux/proc_ns.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/proc_ns.h	2014-07-09
12:00:15.000000000 +0200
@@ -14,7 +14,7 @@ struct proc_ns_operations {
 	void (*put)(void *ns);
 	int (*install)(struct nsproxy *nsproxy, void *ns);
 	unsigned int (*inum)(void *ns);
-};
+} __do_const __randomize_layout;
 
 struct proc_ns {
 	void *ns;
diff -ruNp linux-3.13.11/include/linux/quota.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/quota.h
--- linux-3.13.11/include/linux/quota.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/quota.h	2014-07-09
12:00:15.000000000 +0200
@@ -70,7 +70,7 @@ struct kqid {			/* Type in which we stor
 
 extern bool qid_eq(struct kqid left, struct kqid right);
 extern bool qid_lt(struct kqid left, struct kqid right);
-extern qid_t from_kqid(struct user_namespace *to, struct kqid qid);
+extern qid_t from_kqid(struct user_namespace *to, struct kqid qid) __intentional_overflow(-1);
 extern qid_t from_kqid_munged(struct user_namespace *to, struct kqid qid);
 extern bool qid_valid(struct kqid qid);
 
diff -ruNp linux-3.13.11/include/linux/quotaops.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/quotaops.h
--- linux-3.13.11/include/linux/quotaops.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/quotaops.h	2014-07-09
12:00:15.000000000 +0200
@@ -8,6 +8,7 @@
 #define _LINUX_QUOTAOPS_
 
 #include <linux/fs.h>
+#include <linux/vs_dlimit.h>
 
 #define DQUOT_SPACE_WARN	0x1
 #define DQUOT_SPACE_RESERVE	0x2
@@ -207,11 +208,12 @@ static inline void dquot_drop(struct ino
 
 static inline int dquot_alloc_inode(const struct inode *inode)
 {
-	return 0;
+	return dl_alloc_inode(inode);
 }
 
 static inline void dquot_free_inode(const struct inode *inode)
 {
+	dl_free_inode(inode);
 }
 
 static inline int dquot_transfer(struct inode *inode, struct iattr *iattr)
@@ -222,6 +224,10 @@ static inline int dquot_transfer(struct
 static inline int __dquot_alloc_space(struct inode *inode, qsize_t number,
 		int flags)
 {
+	int ret = 0;
+
+	if ((ret = dl_alloc_space(inode, number)))
+		return ret;
 	if (!(flags & DQUOT_SPACE_RESERVE))
 		inode_add_bytes(inode, number);
 	return 0;
@@ -232,6 +238,7 @@ static inline void __dquot_free_space(st
 {
 	if (!(flags & DQUOT_SPACE_RESERVE))
 		inode_sub_bytes(inode, number);
+	dl_free_space(inode, number);
 }
 
 static inline int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
diff -ruNp linux-3.13.11/include/linux/random.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/random.h
--- linux-3.13.11/include/linux/random.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/random.h	2014-07-09
12:00:15.000000000 +0200
@@ -10,9 +10,19 @@
 
 
 extern void add_device_randomness(const void *, unsigned int);
+
+static inline void add_latent_entropy(void)
+{
+
+#ifdef LATENT_ENTROPY_PLUGIN
+	add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
+#endif
+
+}
+
 extern void add_input_randomness(unsigned int type, unsigned int code,
-				 unsigned int value);
-extern void add_interrupt_randomness(int irq, int irq_flags);
+				 unsigned int value) __latent_entropy;
+extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
 
 extern void get_random_bytes(void *buf, int nbytes);
 extern void get_random_bytes_arch(void *buf, int nbytes);
@@ -23,10 +33,10 @@ extern int random_int_secret_init(void);
 extern const struct file_operations random_fops, urandom_fops;
 #endif
 
-unsigned int get_random_int(void);
+unsigned int __intentional_overflow(-1) get_random_int(void);
 unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long
len);
 
-u32 prandom_u32(void);
+u32 prandom_u32(void) __intentional_overflow(-1);
 void prandom_bytes(void *buf, int nbytes);
 void prandom_seed(u32 seed);
 void prandom_reseed_late(void);
@@ -38,6 +48,11 @@ struct rnd_state {
 u32 prandom_u32_state(struct rnd_state *state);
 void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes);
 
+static inline unsigned long __intentional_overflow(-1) pax_get_random_long(void)
+{
+	return prandom_u32() + (sizeof(long) > 4 ? (unsigned long)prandom_u32() << 32 : 0);
+}
+
 /*
  * Handle minimum values for seeds
  */
diff -ruNp linux-3.13.11/include/linux/rbtree_augmented.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/rbtree_augmented.h
--- linux-3.13.11/include/linux/rbtree_augmented.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/rbtree_augmented.h	2014-07-09
12:00:15.000000000 +0200
@@ -80,7 +80,9 @@ rbname ## _rotate(struct rb_node *rb_old
 	old->rbaugmented = rbcompute(old);				\
 }									\
 rbstatic const struct rb_augment_callbacks rbname = {			\
-	rbname ## _propagate, rbname ## _copy, rbname ## _rotate	\
+	.propagate = rbname ## _propagate,				\
+	.copy = rbname ## _copy,					\
+	.rotate = rbname ## _rotate					\
 };
 
 
diff -ruNp linux-3.13.11/include/linux/rculist.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/rculist.h
--- linux-3.13.11/include/linux/rculist.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/rculist.h	2014-07-09
12:00:15.000000000 +0200
@@ -29,8 +29,8 @@
  */
 static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
 {
-	ACCESS_ONCE(list->next) = list;
-	ACCESS_ONCE(list->prev) = list;
+	ACCESS_ONCE_RW(list->next) = list;
+	ACCESS_ONCE_RW(list->prev) = list;
 }
 
 /*
@@ -59,6 +59,9 @@ extern void __list_add_rcu(struct list_h
 		struct list_head *prev, struct list_head *next);
 #endif
 
+extern void __pax_list_add_rcu(struct list_head *new,
+		struct list_head *prev, struct list_head *next);
+
 /**
  * list_add_rcu - add a new entry to rcu-protected list
  * @new: new entry to be added
@@ -80,6 +83,11 @@ static inline void list_add_rcu(struct l
 	__list_add_rcu(new, head, head->next);
 }
 
+static inline void pax_list_add_rcu(struct list_head *new, struct list_head *head)
+{
+	__pax_list_add_rcu(new, head, head->next);
+}
+
 /**
  * list_add_tail_rcu - add a new entry to rcu-protected list
  * @new: new entry to be added
@@ -102,6 +110,12 @@ static inline void list_add_tail_rcu(str
 	__list_add_rcu(new, head->prev, head);
 }
 
+static inline void pax_list_add_tail_rcu(struct list_head *new,
+					struct list_head *head)
+{
+	__pax_list_add_rcu(new, head->prev, head);
+}
+
 /**
  * list_del_rcu - deletes entry from list without re-initialization
  * @entry: the element to delete from the list.
@@ -132,6 +146,8 @@ static inline void list_del_rcu(struct l
 	entry->prev = LIST_POISON2;
 }
 
+extern void pax_list_del_rcu(struct list_head *entry);
+
 /**
  * hlist_del_init_rcu - deletes entry from hash list with re-initialization
  * @n: the element to delete from the hash list.
diff -ruNp linux-3.13.11/include/linux/reboot.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/reboot.h
--- linux-3.13.11/include/linux/reboot.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/reboot.h	2014-07-09
12:00:15.000000000 +0200
@@ -44,9 +44,9 @@ extern int unregister_reboot_notifier(st
  */
 
 extern void migrate_to_reboot_cpu(void);
-extern void machine_restart(char *cmd);
-extern void machine_halt(void);
-extern void machine_power_off(void);
+extern void machine_restart(char *cmd) __noreturn;
+extern void machine_halt(void) __noreturn;
+extern void machine_power_off(void) __noreturn;
 
 extern void machine_shutdown(void);
 struct pt_regs;
@@ -57,9 +57,9 @@ extern void machine_crash_shutdown(struc
  */
 
 extern void kernel_restart_prepare(char *cmd);
-extern void kernel_restart(char *cmd);
-extern void kernel_halt(void);
-extern void kernel_power_off(void);
+extern void kernel_restart(char *cmd) __noreturn;
+extern void kernel_halt(void) __noreturn;
+extern void kernel_power_off(void) __noreturn;
 
 extern int C_A_D; /* for sysctl */
 void ctrl_alt_del(void);
@@ -73,7 +73,7 @@ extern int orderly_poweroff(bool force);
  * Emergency restart, callable from an interrupt handler.
  */
 
-extern void emergency_restart(void);
+extern void emergency_restart(void) __noreturn;
 #include <asm/emergency-restart.h>
 
 #endif /* _LINUX_REBOOT_H */
diff -ruNp linux-3.13.11/include/linux/regset.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/regset.h
--- linux-3.13.11/include/linux/regset.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/regset.h	2014-07-09
12:00:15.000000000 +0200
@@ -161,7 +161,8 @@ struct user_regset {
 	unsigned int 			align;
 	unsigned int 			bias;
 	unsigned int 			core_note_type;
-};
+} __do_const;
+typedef struct user_regset __no_const user_regset_no_const;
 
 /**
  * struct user_regset_view - available regsets
diff -ruNp linux-3.13.11/include/linux/relay.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/relay.h
--- linux-3.13.11/include/linux/relay.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/relay.h	2014-07-09
12:00:15.000000000 +0200
@@ -157,7 +157,7 @@ struct rchan_callbacks
 	 * The callback should return 0 if successful, negative if not.
 	 */
 	int (*remove_buf_file)(struct dentry *dentry);
-};
+} __no_const;
 
 /*
  * CONFIG_RELAY kernel API, kernel/relay.c
diff -ruNp linux-3.13.11/include/linux/rio.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/rio.h
--- linux-3.13.11/include/linux/rio.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/rio.h	2014-07-09 12:00:15.000000000
+0200
@@ -355,7 +355,7 @@ struct rio_ops {
 	int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart,
 			u64 rstart, u32 size, u32 flags);
 	void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart);
-};
+} __no_const;
 
 #define RIO_RESOURCE_MEM	0x00000100
 #define RIO_RESOURCE_DOORBELL	0x00000200
diff -ruNp linux-3.13.11/include/linux/rmap.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/rmap.h
--- linux-3.13.11/include/linux/rmap.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/rmap.h	2014-07-09 12:00:15.000000000
+0200
@@ -145,8 +145,8 @@ static inline void anon_vma_unlock_read(
 void anon_vma_init(void);	/* create anon_vma_cachep */
 int  anon_vma_prepare(struct vm_area_struct *);
 void unlink_anon_vmas(struct vm_area_struct *);
-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
 
 static inline void anon_vma_merge(struct vm_area_struct *vma,
 				  struct vm_area_struct *next)
diff -ruNp linux-3.13.11/include/linux/sched/sysctl.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/sched/sysctl.h
--- linux-3.13.11/include/linux/sched/sysctl.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/sched/sysctl.h	2014-07-09
12:00:15.000000000 +0200
@@ -30,6 +30,7 @@ enum { sysctl_hung_task_timeout_secs = 0
 #define DEFAULT_MAX_MAP_COUNT	(USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
 
 extern int sysctl_max_map_count;
+extern unsigned long sysctl_heap_stack_gap;
 
 extern unsigned int sysctl_sched_latency;
 extern unsigned int sysctl_sched_min_granularity;
diff -ruNp linux-3.13.11/include/linux/sched.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/sched.h
--- linux-3.13.11/include/linux/sched.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/sched.h	2014-07-09
12:00:15.000000000 +0200
@@ -63,6 +63,7 @@ struct bio_list;
 struct fs_struct;
 struct perf_event_context;
 struct blk_plug;
+struct linux_binprm;
 
 /*
  * List of flags we want to share for kernel threads,
@@ -304,7 +305,7 @@ extern char __sched_text_start[], __sche
 extern int in_sched_functions(unsigned long addr);
 
 #define	MAX_SCHEDULE_TIMEOUT	LONG_MAX
-extern signed long schedule_timeout(signed long timeout);
+extern signed long schedule_timeout(signed long timeout) __intentional_overflow(-1);
 extern signed long schedule_timeout_interruptible(signed long timeout);
 extern signed long schedule_timeout_killable(signed long timeout);
 extern signed long schedule_timeout_uninterruptible(signed long timeout);
@@ -315,6 +316,19 @@ struct nsproxy;
 struct user_namespace;
 
 #ifdef CONFIG_MMU
+
+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
+extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct
file *filp, unsigned long flags);
+#else
+static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm,
const struct file *filp, unsigned long flags)
+{
+	return 0;
+}
+#endif
+
+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr,
unsigned long len, unsigned long offset);
+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned
long len, unsigned long offset);
+
 extern void arch_pick_mmap_layout(struct mm_struct *mm);
 extern unsigned long
 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
@@ -600,6 +614,17 @@ struct signal_struct {
 #ifdef CONFIG_TASKSTATS
 	struct taskstats *stats;
 #endif
+
+#ifdef CONFIG_GRKERNSEC
+	u32 curr_ip;
+	u32 saved_ip;
+	u32 gr_saddr;
+	u32 gr_daddr;
+	u16 gr_sport;
+	u16 gr_dport;
+	u8 used_accept:1;
+#endif
+
 #ifdef CONFIG_AUDIT
 	unsigned audit_tty;
 	unsigned audit_tty_log_passwd;
@@ -626,7 +651,7 @@ struct signal_struct {
 	struct mutex cred_guard_mutex;	/* guard against foreign influences on
 					 * credential calculations
 					 * (notably. ptrace) */
-};
+} __randomize_layout;
 
 /*
  * Bits in flags field of signal_struct.
@@ -680,6 +705,14 @@ struct user_struct {
 	struct key *session_keyring;	/* UID's default session keyring */
 #endif
 
+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
+	unsigned char kernel_banned;
+#endif
+#ifdef CONFIG_GRKERNSEC_BRUTE
+	unsigned char suid_banned;
+	unsigned long suid_ban_expires;
+#endif
+
 	/* Hash table maintenance information */
 	struct hlist_node uidhash_node;
 	kuid_t uid;
@@ -687,7 +720,7 @@ struct user_struct {
 #ifdef CONFIG_PERF_EVENTS
 	atomic_long_t locked_vm;
 #endif
-};
+} __randomize_layout;
 
 extern int uids_sysfs_init(void);
 
@@ -1162,8 +1195,8 @@ struct task_struct {
 	struct list_head thread_group;
 
 	struct completion *vfork_done;		/* for vfork() */
-	int __user *set_child_tid;		/* CLONE_CHILD_SETTID */
-	int __user *clear_child_tid;		/* CLONE_CHILD_CLEARTID */
+	pid_t __user *set_child_tid;		/* CLONE_CHILD_SETTID */
+	pid_t __user *clear_child_tid;		/* CLONE_CHILD_CLEARTID */
 
 	cputime_t utime, stime, utimescaled, stimescaled;
 	cputime_t gtime;
@@ -1188,11 +1221,6 @@ struct task_struct {
 	struct task_cputime cputime_expires;
 	struct list_head cpu_timers[3];
 
-/* process credentials */
-	const struct cred __rcu *real_cred; /* objective and real subjective task
-					 * credentials (COW) */
-	const struct cred __rcu *cred;	/* effective (overridable) subjective task
-					 * credentials (COW) */
 	char comm[TASK_COMM_LEN]; /* executable name excluding path
 				     - access with [gs]et_task_comm (which lock
 				       it with task_lock())
@@ -1209,6 +1237,10 @@ struct task_struct {
 #endif
 /* CPU-specific state of this task */
 	struct thread_struct thread;
+/* thread_info moved to task_struct */
+#ifdef CONFIG_X86
+	struct thread_info tinfo;
+#endif
 /* filesystem information */
 	struct fs_struct *fs;
 /* open file information */
@@ -1237,6 +1269,14 @@ struct task_struct {
 #endif
 	struct seccomp seccomp;
 
+/* vserver context data */
+	struct vx_info *vx_info;
+	struct nx_info *nx_info;
+
+	vxid_t xid;
+	vnid_t nid;
+	vtag_t tag;
+
 /* Thread group tracking */
    	u32 parent_exec_id;
    	u32 self_exec_id;
@@ -1282,6 +1322,10 @@ struct task_struct {
 	gfp_t lockdep_reclaim_gfp;
 #endif
 
+/* process credentials */
+	const struct cred __rcu *real_cred; /* objective and real subjective task
+					 * credentials (COW) */
+
 /* journalling filesystem info */
 	void *journal_info;
 
@@ -1320,6 +1364,10 @@ struct task_struct {
 	/* cg_list protected by css_set_lock and tsk->alloc_lock */
 	struct list_head cg_list;
 #endif
+
+	const struct cred __rcu *cred;	/* effective (overridable) subjective task
+					 * credentials (COW) */
+
 #ifdef CONFIG_FUTEX
 	struct robust_list_head __user *robust_list;
 #ifdef CONFIG_COMPAT
@@ -1454,7 +1502,78 @@ struct task_struct {
 	unsigned int	sequential_io;
 	unsigned int	sequential_io_avg;
 #endif
-};
+
+#ifdef CONFIG_GRKERNSEC
+	/* grsecurity */
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+	u64 exec_id;
+#endif
+#ifdef CONFIG_GRKERNSEC_SETXID
+	const struct cred *delayed_cred;
+#endif
+	struct dentry *gr_chroot_dentry;
+	struct acl_subject_label *acl;
+	struct acl_subject_label *tmpacl;
+	struct acl_role_label *role;
+	struct file *exec_file;
+	unsigned long brute_expires;
+	u16 acl_role_id;
+	u8 inherited;
+	/* is this the task that authenticated to the special role */
+	u8 acl_sp_role;
+	u8 is_writable;
+	u8 brute;
+	u8 gr_is_chrooted;
+#endif
+
+} __randomize_layout;
+
+#define MF_PAX_PAGEEXEC		0x01000000	/* Paging based non-executable pages */
+#define MF_PAX_EMUTRAMP		0x02000000	/* Emulate trampolines */
+#define MF_PAX_MPROTECT		0x04000000	/* Restrict mprotect() */
+#define MF_PAX_RANDMMAP		0x08000000	/* Randomize mmap() base */
+/*#define MF_PAX_RANDEXEC		0x10000000*/	/* Randomize ET_EXEC base */
+#define MF_PAX_SEGMEXEC		0x20000000	/* Segmentation based non-executable pages */
+
+#ifdef CONFIG_PAX_SOFTMODE
+extern int pax_softmode;
+#endif
+
+extern int pax_check_flags(unsigned long *);
+#define PAX_PARSE_FLAGS_FALLBACK	(~0UL)
+
+/* if tsk != current then task_lock must be held on it */
+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
+static inline unsigned long pax_get_flags(struct task_struct *tsk)
+{
+	if (likely(tsk->mm))
+		return tsk->mm->pax_flags;
+	else
+		return 0UL;
+}
+
+/* if tsk != current then task_lock must be held on it */
+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
+{
+	if (likely(tsk->mm)) {
+		tsk->mm->pax_flags = flags;
+		return 0;
+	}
+	return -EINVAL;
+}
+#endif
+
+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
+extern void pax_set_initial_flags(struct linux_binprm *bprm);
+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
+#endif
+
+struct path;
+extern char *pax_get_path(const struct path *path, char *buf, int buflen);
+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
+extern void pax_report_refcount_overflow(struct pt_regs *regs);
 
 /* Future-safe accessor for struct task_struct's cpus_allowed. */
 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
@@ -1531,7 +1650,12 @@ struct pid_namespace;
 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
 			struct pid_namespace *ns);
 
-static inline pid_t task_pid_nr(struct task_struct *tsk)
+#include <linux/vserver/base.h>
+#include <linux/vserver/context.h>
+#include <linux/vserver/debug.h>
+#include <linux/vserver/pid.h>
+
+static inline pid_t task_pid_nr(const struct task_struct *tsk)
 {
 	return tsk->pid;
 }
@@ -1544,7 +1668,8 @@ static inline pid_t task_pid_nr_ns(struc
 
 static inline pid_t task_pid_vnr(struct task_struct *tsk)
 {
-	return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
+	// return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
+	return vx_map_pid(__task_pid_nr_ns(tsk, PIDTYPE_PID, NULL));
 }
 
 
@@ -1557,7 +1682,7 @@ pid_t task_tgid_nr_ns(struct task_struct
 
 static inline pid_t task_tgid_vnr(struct task_struct *tsk)
 {
-	return pid_vnr(task_tgid(tsk));
+	return vx_map_tgid(pid_vnr(task_tgid(tsk)));
 }
 
 
@@ -1981,7 +2106,9 @@ void yield(void);
 extern struct exec_domain	default_exec_domain;
 
 union thread_union {
+#ifndef CONFIG_X86
 	struct thread_info thread_info;
+#endif
 	unsigned long stack[THREAD_SIZE/sizeof(long)];
 };
 
@@ -2014,6 +2141,7 @@ extern struct pid_namespace init_pid_ns;
  */
 
 extern struct task_struct *find_task_by_vpid(pid_t nr);
+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
 		struct pid_namespace *ns);
 
@@ -2178,7 +2306,7 @@ extern void __cleanup_sighand(struct sig
 extern void exit_itimers(struct signal_struct *);
 extern void flush_itimer_signals(void);
 
-extern void do_group_exit(int);
+extern __noreturn void do_group_exit(int);
 
 extern int allow_signal(int);
 extern int disallow_signal(int);
@@ -2369,9 +2497,9 @@ static inline unsigned long *end_of_stac
 
 #endif
 
-static inline int object_is_on_stack(void *obj)
+static inline int object_starts_on_stack(void *obj)
 {
-	void *stack = task_stack_page(current);
+	const void *stack = task_stack_page(current);
 
 	return (obj >= stack) && (obj < (stack + THREAD_SIZE));
 }
diff -ruNp linux-3.13.11/include/linux/security.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/security.h
--- linux-3.13.11/include/linux/security.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/security.h	2014-07-09
12:00:15.000000000 +0200
@@ -27,6 +27,7 @@
 #include <linux/slab.h>
 #include <linux/err.h>
 #include <linux/string.h>
+#include <linux/grsecurity.h>
 
 struct linux_binprm;
 struct cred;
@@ -116,8 +117,6 @@ struct seq_file;
 
 extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb);
 
-void reset_security_ops(void);
-
 #ifdef CONFIG_MMU
 extern unsigned long mmap_min_addr;
 extern unsigned long dac_mmap_min_addr;
@@ -1718,7 +1717,7 @@ struct security_operations {
 				 struct audit_context *actx);
 	void (*audit_rule_free) (void *lsmrule);
 #endif /* CONFIG_AUDIT */
-};
+} __randomize_layout;
 
 /* prototypes */
 extern int security_init(void);
diff -ruNp linux-3.13.11/include/linux/semaphore.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/semaphore.h
--- linux-3.13.11/include/linux/semaphore.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/semaphore.h	2014-07-09
12:00:15.000000000 +0200
@@ -37,7 +37,7 @@ static inline void sema_init(struct sema
 }
 
 extern void down(struct semaphore *sem);
-extern int __must_check down_interruptible(struct semaphore *sem);
+extern int __must_check down_interruptible(struct semaphore *sem) __intentional_overflow(-1);
 extern int __must_check down_killable(struct semaphore *sem);
 extern int __must_check down_trylock(struct semaphore *sem);
 extern int __must_check down_timeout(struct semaphore *sem, long jiffies);
diff -ruNp linux-3.13.11/include/linux/seq_file.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/seq_file.h
--- linux-3.13.11/include/linux/seq_file.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/seq_file.h	2014-07-09
12:00:15.000000000 +0200
@@ -27,6 +27,9 @@ struct seq_file {
 	struct mutex lock;
 	const struct seq_operations *op;
 	int poll_event;
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+	u64 exec_id;
+#endif
 #ifdef CONFIG_USER_NS
 	struct user_namespace *user_ns;
 #endif
@@ -39,6 +42,7 @@ struct seq_operations {
 	void * (*next) (struct seq_file *m, void *v, loff_t *pos);
 	int (*show) (struct seq_file *m, void *v);
 };
+typedef struct seq_operations __no_const seq_operations_no_const;
 
 #define SEQ_SKIP 1
 
diff -ruNp linux-3.13.11/include/linux/shm.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/shm.h
--- linux-3.13.11/include/linux/shm.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/shm.h	2014-07-09 12:00:15.000000000
+0200
@@ -21,6 +21,10 @@ struct shmid_kernel /* private to the ke
 
 	/* The task created the shm object.  NULL if the task is dead. */
 	struct task_struct	*shm_creator;
+#ifdef CONFIG_GRKERNSEC
+	time_t			shm_createtime;
+	pid_t			shm_lapid;
+#endif
 };
 
 /* shm_mode upper byte flags */
diff -ruNp linux-3.13.11/include/linux/shmem_fs.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/shmem_fs.h
--- linux-3.13.11/include/linux/shmem_fs.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/shmem_fs.h	2014-07-09
12:00:15.000000000 +0200
@@ -9,6 +9,9 @@
 
 /* inode in-kernel data */
 
+#define TMPFS_SUPER_MAGIC	0x01021994
+
+
 struct shmem_inode_info {
 	spinlock_t		lock;
 	unsigned long		flags;
diff -ruNp linux-3.13.11/include/linux/skbuff.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/skbuff.h
--- linux-3.13.11/include/linux/skbuff.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/skbuff.h	2014-07-09
12:00:15.000000000 +0200
@@ -643,7 +643,7 @@ bool skb_try_coalesce(struct sk_buff *to
 struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
 			    int node);
 struct sk_buff *build_skb(void *data, unsigned int frag_size);
-static inline struct sk_buff *alloc_skb(unsigned int size,
+static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
 					gfp_t priority)
 {
 	return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
@@ -750,7 +750,7 @@ static inline struct skb_shared_hwtstamp
  */
 static inline int skb_queue_empty(const struct sk_buff_head *list)
 {
-	return list->next == (struct sk_buff *)list;
+	return list->next == (const struct sk_buff *)list;
 }
 
 /**
@@ -763,7 +763,7 @@ static inline int skb_queue_empty(const
 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
 				     const struct sk_buff *skb)
 {
-	return skb->next == (struct sk_buff *)list;
+	return skb->next == (const struct sk_buff *)list;
 }
 
 /**
@@ -776,7 +776,7 @@ static inline bool skb_queue_is_last(con
 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
 				      const struct sk_buff *skb)
 {
-	return skb->prev == (struct sk_buff *)list;
+	return skb->prev == (const struct sk_buff *)list;
 }
 
 /**
@@ -1686,7 +1686,7 @@ static inline u32 skb_inner_network_head
 	return skb->inner_transport_header - skb->inner_network_header;
 }
 
-static inline int skb_network_offset(const struct sk_buff *skb)
+static inline int __intentional_overflow(0) skb_network_offset(const struct sk_buff
*skb)
 {
 	return skb_network_header(skb) - skb->data;
 }
@@ -1746,7 +1746,7 @@ static inline int pskb_network_may_pull(
  * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
  */
 #ifndef NET_SKB_PAD
-#define NET_SKB_PAD	max(32, L1_CACHE_BYTES)
+#define NET_SKB_PAD	max(_AC(32,UL), L1_CACHE_BYTES)
 #endif
 
 int ___pskb_trim(struct sk_buff *skb, unsigned int len);
@@ -2345,7 +2345,7 @@ struct sk_buff *skb_recv_datagram(struct
 				  int *err);
 unsigned int datagram_poll(struct file *file, struct socket *sock,
 			   struct poll_table_struct *wait);
-int skb_copy_datagram_iovec(const struct sk_buff *from, int offset,
+int __intentional_overflow(0) skb_copy_datagram_iovec(const struct sk_buff *from, int
offset,
 			    struct iovec *to, int size);
 int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, int hlen,
 				     struct iovec *iov);
@@ -2618,6 +2618,9 @@ static inline void nf_reset(struct sk_bu
 	nf_bridge_put(skb->nf_bridge);
 	skb->nf_bridge = NULL;
 #endif
+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
+	skb->nf_trace = 0;
+#endif
 }
 
 static inline void nf_reset_trace(struct sk_buff *skb)
diff -ruNp linux-3.13.11/include/linux/slab.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/slab.h
--- linux-3.13.11/include/linux/slab.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/slab.h	2014-07-09 12:00:15.000000000
+0200
@@ -14,15 +14,29 @@
 #include <linux/gfp.h>
 #include <linux/types.h>
 #include <linux/workqueue.h>
-
+#include <linux/err.h>
 
 /*
  * Flags to pass to kmem_cache_create().
  * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
  */
 #define SLAB_DEBUG_FREE		0x00000100UL	/* DEBUG: Perform (expensive) checks on free
*/
+
+#ifdef CONFIG_PAX_USERCOPY_SLABS
+#define SLAB_USERCOPY		0x00000200UL	/* PaX: Allow copying objs to/from userland */
+#else
+#define SLAB_USERCOPY		0x00000000UL
+#endif
+
 #define SLAB_RED_ZONE		0x00000400UL	/* DEBUG: Red zone objs in a cache */
 #define SLAB_POISON		0x00000800UL	/* DEBUG: Poison objects */
+
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
+#define SLAB_NO_SANITIZE	0x00001000UL	/* PaX: Do not sanitize objs on free */
+#else
+#define SLAB_NO_SANITIZE	0x00000000UL
+#endif
+
 #define SLAB_HWCACHE_ALIGN	0x00002000UL	/* Align objs on cache lines */
 #define SLAB_CACHE_DMA		0x00004000UL	/* Use GFP_DMA memory */
 #define SLAB_STORE_USER		0x00010000UL	/* DEBUG: Store the last owner for bug hunting
*/
@@ -98,10 +112,13 @@
  * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
  * Both make kfree a no-op.
  */
-#define ZERO_SIZE_PTR ((void *)16)
+#define ZERO_SIZE_PTR				\
+({						\
+	BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
+	(void *)(-MAX_ERRNO-1L);		\
+})
 
-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
-				(unsigned long)ZERO_SIZE_PTR)
+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR
- 1)
 
 #include <linux/kmemleak.h>
 
@@ -142,6 +159,8 @@ void * __must_check krealloc(const void
 void kfree(const void *);
 void kzfree(const void *);
 size_t ksize(const void *);
+const char *check_heap_object(const void *ptr, unsigned long n);
+bool is_usercopy_object(const void *ptr);
 
 /*
  * Some archs want to perform DMA into kmalloc caches and need a guaranteed
@@ -174,7 +193,7 @@ struct kmem_cache {
 	unsigned int align;	/* Alignment as calculated */
 	unsigned long flags;	/* Active flags on the slab */
 	const char *name;	/* Slab name for sysfs */
-	int refcount;		/* Use counter */
+	atomic_t refcount;	/* Use counter */
 	void (*ctor)(void *);	/* Called on object slot creation */
 	struct list_head list;	/* List of all slab caches on the system */
 };
@@ -248,6 +267,10 @@ extern struct kmem_cache *kmalloc_caches
 extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
 #endif
 
+#ifdef CONFIG_PAX_USERCOPY_SLABS
+extern struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
+#endif
+
 /*
  * Figure out which kmalloc slab an allocation of a certain size
  * belongs to.
@@ -256,7 +279,7 @@ extern struct kmem_cache *kmalloc_dma_ca
  * 2 = 120 .. 192 bytes
  * n = 2^(n-1) .. 2^n -1
  */
-static __always_inline int kmalloc_index(size_t size)
+static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
 {
 	if (!size)
 		return 0;
@@ -299,11 +322,11 @@ static __always_inline int kmalloc_index
 }
 #endif /* !CONFIG_SLOB */
 
-void *__kmalloc(size_t size, gfp_t flags);
+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
 void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
 
 #ifdef CONFIG_NUMA
-void *__kmalloc_node(size_t size, gfp_t flags, int node);
+void *__kmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1);
 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
 #else
 static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
diff -ruNp linux-3.13.11/include/linux/slab_def.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/slab_def.h
--- linux-3.13.11/include/linux/slab_def.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/slab_def.h	2014-07-09
12:00:15.000000000 +0200
@@ -36,7 +36,7 @@ struct kmem_cache {
 /* 4) cache creation/removal */
 	const char *name;
 	struct list_head list;
-	int refcount;
+	atomic_t refcount;
 	int object_size;
 	int align;
 
@@ -52,10 +52,14 @@ struct kmem_cache {
 	unsigned long node_allocs;
 	unsigned long node_frees;
 	unsigned long node_overflow;
-	atomic_t allochit;
-	atomic_t allocmiss;
-	atomic_t freehit;
-	atomic_t freemiss;
+	atomic_unchecked_t allochit;
+	atomic_unchecked_t allocmiss;
+	atomic_unchecked_t freehit;
+	atomic_unchecked_t freemiss;
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
+	atomic_unchecked_t sanitized;
+	atomic_unchecked_t not_sanitized;
+#endif
 
 	/*
 	 * If debugging is enabled, then the allocator can add additional
diff -ruNp linux-3.13.11/include/linux/slub_def.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/slub_def.h
--- linux-3.13.11/include/linux/slub_def.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/slub_def.h	2014-07-09
12:00:15.000000000 +0200
@@ -74,7 +74,7 @@ struct kmem_cache {
 	struct kmem_cache_order_objects max;
 	struct kmem_cache_order_objects min;
 	gfp_t allocflags;	/* gfp flags to use on each alloc */
-	int refcount;		/* Refcount for slab cache destroy */
+	atomic_t refcount;	/* Refcount for slab cache destroy */
 	void (*ctor)(void *);
 	int inuse;		/* Offset to metadata */
 	int align;		/* Alignment */
diff -ruNp linux-3.13.11/include/linux/smp.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/smp.h
--- linux-3.13.11/include/linux/smp.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/smp.h	2014-07-09 12:00:15.000000000
+0200
@@ -176,7 +176,9 @@ static inline void kick_all_cpus_sync(vo
 #endif
 
 #define get_cpu()		({ preempt_disable(); smp_processor_id(); })
+#define raw_get_cpu()		({ raw_preempt_disable(); raw_smp_processor_id(); })
 #define put_cpu()		preempt_enable()
+#define raw_put_cpu_no_resched()	raw_preempt_enable_no_resched()
 
 /*
  * Callback to arch code if there's nosmp or maxcpus=0 on the
diff -ruNp linux-3.13.11/include/linux/sock_diag.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/sock_diag.h
--- linux-3.13.11/include/linux/sock_diag.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/sock_diag.h	2014-07-09
12:00:15.000000000 +0200
@@ -11,7 +11,7 @@ struct sock;
 struct sock_diag_handler {
 	__u8 family;
 	int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
-};
+} __do_const;
 
 int sock_diag_register(const struct sock_diag_handler *h);
 void sock_diag_unregister(const struct sock_diag_handler *h);
diff -ruNp linux-3.13.11/include/linux/sonet.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/sonet.h
--- linux-3.13.11/include/linux/sonet.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/sonet.h	2014-07-09
12:00:15.000000000 +0200
@@ -7,7 +7,7 @@
 #include <uapi/linux/sonet.h>
 
 struct k_sonet_stats {
-#define __HANDLE_ITEM(i) atomic_t i
+#define __HANDLE_ITEM(i) atomic_unchecked_t i
 	__SONET_ITEMS
 #undef __HANDLE_ITEM
 };
diff -ruNp linux-3.13.11/include/linux/stat.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/stat.h
--- linux-3.13.11/include/linux/stat.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/stat.h	2014-07-09 12:00:15.000000000
+0200
@@ -25,6 +25,7 @@ struct kstat {
 	unsigned int	nlink;
 	kuid_t		uid;
 	kgid_t		gid;
+	ktag_t		tag;
 	dev_t		rdev;
 	loff_t		size;
 	struct timespec  atime;
diff -ruNp linux-3.13.11/include/linux/sunrpc/addr.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/sunrpc/addr.h
--- linux-3.13.11/include/linux/sunrpc/addr.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/sunrpc/addr.h	2014-07-09
12:00:15.000000000 +0200
@@ -23,9 +23,9 @@ static inline unsigned short rpc_get_por
 {
 	switch (sap->sa_family) {
 	case AF_INET:
-		return ntohs(((struct sockaddr_in *)sap)->sin_port);
+		return ntohs(((const struct sockaddr_in *)sap)->sin_port);
 	case AF_INET6:
-		return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
+		return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
 	}
 	return 0;
 }
@@ -58,7 +58,7 @@ static inline bool __rpc_cmp_addr4(const
 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
 				    const struct sockaddr *src)
 {
-	const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
+	const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
 	struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
 
 	dsin->sin_family = ssin->sin_family;
@@ -164,7 +164,7 @@ static inline u32 rpc_get_scope_id(const
 	if (sa->sa_family != AF_INET6)
 		return 0;
 
-	return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
+	return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
 }
 
 #endif /* _LINUX_SUNRPC_ADDR_H */
diff -ruNp linux-3.13.11/include/linux/sunrpc/auth.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/sunrpc/auth.h
--- linux-3.13.11/include/linux/sunrpc/auth.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/sunrpc/auth.h	2014-07-09
12:00:15.000000000 +0200
@@ -36,6 +36,7 @@ enum {
 struct auth_cred {
 	kuid_t	uid;
 	kgid_t	gid;
+	ktag_t	tag;
 	struct group_info *group_info;
 	const char *principal;
 	unsigned long ac_flags;
diff -ruNp linux-3.13.11/include/linux/sunrpc/clnt.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/sunrpc/clnt.h
--- linux-3.13.11/include/linux/sunrpc/clnt.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/sunrpc/clnt.h	2014-07-09
12:00:15.000000000 +0200
@@ -51,7 +51,8 @@ struct rpc_clnt {
 				cl_discrtry : 1,/* disconnect before retry */
 				cl_noretranstimeo: 1,/* No retransmit timeouts */
 				cl_autobind : 1,/* use getport() */
-				cl_chatty   : 1;/* be verbose */
+				cl_chatty   : 1,/* be verbose */
+				cl_tag      : 1;/* context tagging */
 
 	struct rpc_rtt *	cl_rtt;		/* RTO estimator data */
 	const struct rpc_timeout *cl_timeout;	/* Timeout strategy */
@@ -97,7 +98,7 @@ struct rpc_procinfo {
 	unsigned int		p_timer;	/* Which RTT timer to use */
 	u32			p_statidx;	/* Which procedure to account */
 	const char *		p_name;		/* name of procedure */
-};
+} __do_const;
 
 #ifdef __KERNEL__
 
diff -ruNp linux-3.13.11/include/linux/sunrpc/svc.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/sunrpc/svc.h
--- linux-3.13.11/include/linux/sunrpc/svc.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/sunrpc/svc.h	2014-07-09
12:00:15.000000000 +0200
@@ -410,7 +410,7 @@ struct svc_procedure {
 	unsigned int		pc_count;	/* call count */
 	unsigned int		pc_cachetype;	/* cache info (NFS) */
 	unsigned int		pc_xdrressize;	/* maximum size of XDR reply */
-};
+} __do_const;
 
 /*
  * Function prototypes.
diff -ruNp linux-3.13.11/include/linux/sunrpc/svc_rdma.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/sunrpc/svc_rdma.h
--- linux-3.13.11/include/linux/sunrpc/svc_rdma.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/sunrpc/svc_rdma.h	2014-07-09
12:00:15.000000000 +0200
@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
 extern unsigned int svcrdma_max_requests;
 extern unsigned int svcrdma_max_req_size;
 
-extern atomic_t rdma_stat_recv;
-extern atomic_t rdma_stat_read;
-extern atomic_t rdma_stat_write;
-extern atomic_t rdma_stat_sq_starve;
-extern atomic_t rdma_stat_rq_starve;
-extern atomic_t rdma_stat_rq_poll;
-extern atomic_t rdma_stat_rq_prod;
-extern atomic_t rdma_stat_sq_poll;
-extern atomic_t rdma_stat_sq_prod;
+extern atomic_unchecked_t rdma_stat_recv;
+extern atomic_unchecked_t rdma_stat_read;
+extern atomic_unchecked_t rdma_stat_write;
+extern atomic_unchecked_t rdma_stat_sq_starve;
+extern atomic_unchecked_t rdma_stat_rq_starve;
+extern atomic_unchecked_t rdma_stat_rq_poll;
+extern atomic_unchecked_t rdma_stat_rq_prod;
+extern atomic_unchecked_t rdma_stat_sq_poll;
+extern atomic_unchecked_t rdma_stat_sq_prod;
 
 #define RPCRDMA_VERSION 1
 
diff -ruNp linux-3.13.11/include/linux/sunrpc/svcauth.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/sunrpc/svcauth.h
--- linux-3.13.11/include/linux/sunrpc/svcauth.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/sunrpc/svcauth.h	2014-07-09
12:00:15.000000000 +0200
@@ -120,7 +120,7 @@ struct auth_ops {
 	int	(*release)(struct svc_rqst *rq);
 	void	(*domain_release)(struct auth_domain *);
 	int	(*set_client)(struct svc_rqst *rq);
-};
+} __do_const;
 
 #define	SVC_GARBAGE	1
 #define	SVC_SYSERR	2
diff -ruNp linux-3.13.11/include/linux/swiotlb.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/swiotlb.h
--- linux-3.13.11/include/linux/swiotlb.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/swiotlb.h	2014-07-09
12:00:15.000000000 +0200
@@ -60,7 +60,8 @@ extern void
 
 extern void
 swiotlb_free_coherent(struct device *hwdev, size_t size,
-		      void *vaddr, dma_addr_t dma_handle);
+		      void *vaddr, dma_addr_t dma_handle,
+		      struct dma_attrs *attrs);
 
 extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
 				   unsigned long offset, size_t size,
diff -ruNp linux-3.13.11/include/linux/syscalls.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/syscalls.h
--- linux-3.13.11/include/linux/syscalls.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/syscalls.h	2014-07-09
12:00:15.000000000 +0200
@@ -97,8 +97,14 @@ struct sigaltstack;
 #define __MAP(n,...) __MAP##n(__VA_ARGS__)
 
 #define __SC_DECL(t, a)	t a
+#define __TYPE_IS_U(t) (__same_type((t)0, 0UL) || __same_type((t)0, 0U) || __same_type((t)0,
(unsigned short)0) || __same_type((t)0, (unsigned char)0))
 #define __TYPE_IS_LL(t) (__same_type((t)0, 0LL) || __same_type((t)0, 0ULL))
-#define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), 0LL, 0L)) a
+#define __SC_LONG(t, a)	__typeof(				\
+	__builtin_choose_expr(					\
+		sizeof(t) > sizeof(int),			\
+		(t) 0,						\
+		__builtin_choose_expr(__TYPE_IS_U(t), 0UL, 0L)	\
+	)) a
 #define __SC_CAST(t, a)	(t) a
 #define __SC_ARGS(t, a)	a
 #define __SC_TEST(t, a) (void)BUILD_BUG_ON_ZERO(!__TYPE_IS_LL(t) && sizeof(t) > sizeof(long))
@@ -363,11 +369,11 @@ asmlinkage long sys_sync(void);
 asmlinkage long sys_fsync(unsigned int fd);
 asmlinkage long sys_fdatasync(unsigned int fd);
 asmlinkage long sys_bdflush(int func, long data);
-asmlinkage long sys_mount(char __user *dev_name, char __user *dir_name,
-				char __user *type, unsigned long flags,
+asmlinkage long sys_mount(const char __user *dev_name, const char __user *dir_name,
+				const char __user *type, unsigned long flags,
 				void __user *data);
-asmlinkage long sys_umount(char __user *name, int flags);
-asmlinkage long sys_oldumount(char __user *name);
+asmlinkage long sys_umount(const char __user *name, int flags);
+asmlinkage long sys_oldumount(const char __user *name);
 asmlinkage long sys_truncate(const char __user *path, long length);
 asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
 asmlinkage long sys_stat(const char __user *filename,
@@ -579,7 +585,7 @@ asmlinkage long sys_getsockname(int, str
 asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *);
 asmlinkage long sys_send(int, void __user *, size_t, unsigned);
 asmlinkage long sys_sendto(int, void __user *, size_t, unsigned,
-				struct sockaddr __user *, int);
+				struct sockaddr __user *, int) __intentional_overflow(0);
 asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags);
 asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg,
 			     unsigned int vlen, unsigned flags);
diff -ruNp linux-3.13.11/include/linux/syscore_ops.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/syscore_ops.h
--- linux-3.13.11/include/linux/syscore_ops.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/syscore_ops.h	2014-07-09
12:00:15.000000000 +0200
@@ -16,7 +16,7 @@ struct syscore_ops {
 	int (*suspend)(void);
 	void (*resume)(void);
 	void (*shutdown)(void);
-};
+} __do_const;
 
 extern void register_syscore_ops(struct syscore_ops *ops);
 extern void unregister_syscore_ops(struct syscore_ops *ops);
diff -ruNp linux-3.13.11/include/linux/sysctl.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/sysctl.h
--- linux-3.13.11/include/linux/sysctl.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/sysctl.h	2014-07-09
12:00:15.000000000 +0200
@@ -34,13 +34,13 @@ struct ctl_table_root;
 struct ctl_table_header;
 struct ctl_dir;
 
-typedef struct ctl_table ctl_table;
-
 typedef int proc_handler (struct ctl_table *ctl, int write,
 			  void __user *buffer, size_t *lenp, loff_t *ppos);
 
 extern int proc_dostring(struct ctl_table *, int,
 			 void __user *, size_t *, loff_t *);
+extern int proc_dostring_modpriv(struct ctl_table *, int,
+			 void __user *, size_t *, loff_t *);
 extern int proc_dointvec(struct ctl_table *, int,
 			 void __user *, size_t *, loff_t *);
 extern int proc_dointvec_minmax(struct ctl_table *, int,
@@ -115,7 +115,9 @@ struct ctl_table
 	struct ctl_table_poll *poll;
 	void *extra1;
 	void *extra2;
-};
+} __do_const __randomize_layout;
+typedef struct ctl_table __no_const ctl_table_no_const;
+typedef struct ctl_table ctl_table;
 
 struct ctl_node {
 	struct rb_node node;
diff -ruNp linux-3.13.11/include/linux/sysfs.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/sysfs.h
--- linux-3.13.11/include/linux/sysfs.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/sysfs.h	2014-07-09
12:00:15.000000000 +0200
@@ -20,6 +20,8 @@
 #include <linux/stat.h>
 #include <linux/atomic.h>
 
+#define SYSFS_SUPER_MAGIC	0x62656572
+
 struct kobject;
 struct module;
 struct bin_attribute;
@@ -33,7 +35,8 @@ struct attribute {
 	struct lock_class_key	*key;
 	struct lock_class_key	skey;
 #endif
-};
+} __do_const;
+typedef struct attribute __no_const attribute_no_const;
 
 /**
  *	sysfs_attr_init - initialize a dynamically allocated sysfs attribute
@@ -62,7 +65,8 @@ struct attribute_group {
 					      struct attribute *, int);
 	struct attribute	**attrs;
 	struct bin_attribute	**bin_attrs;
-};
+} __do_const;
+typedef struct attribute_group __no_const attribute_group_no_const;
 
 /**
  * Use these macros to make defining attributes easier. See include/linux/device.h
@@ -126,7 +130,8 @@ struct bin_attribute {
 			 char *, loff_t, size_t);
 	int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
 		    struct vm_area_struct *vma);
-};
+} __do_const;
+typedef struct bin_attribute __no_const bin_attribute_no_const;
 
 /**
  *	sysfs_bin_attr_init - initialize a dynamically allocated bin_attribute
diff -ruNp linux-3.13.11/include/linux/sysrq.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/sysrq.h
--- linux-3.13.11/include/linux/sysrq.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/sysrq.h	2014-07-09
12:00:15.000000000 +0200
@@ -16,6 +16,7 @@
 
 #include <linux/errno.h>
 #include <linux/types.h>
+#include <linux/compiler.h>
 
 /* Possible values of bitmask for enabling sysrq functions */
 /* 0x0001 is reserved for enable everything */
@@ -33,7 +34,7 @@ struct sysrq_key_op {
 	char *help_msg;
 	char *action_msg;
 	int enable_mask;
-};
+} __do_const;
 
 #ifdef CONFIG_MAGIC_SYSRQ
 
diff -ruNp linux-3.13.11/include/linux/thread_info.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/thread_info.h
--- linux-3.13.11/include/linux/thread_info.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/thread_info.h	2014-07-09
12:00:15.000000000 +0200
@@ -161,6 +161,15 @@ static inline bool test_and_clear_restor
 #error "no set_restore_sigmask() provided and default one won't work"
 #endif
 
+extern void __check_object_size(const void *ptr, unsigned long n, bool to_user);
+static inline void check_object_size(const void *ptr, unsigned long n, bool to_user)
+{
+#ifndef CONFIG_PAX_USERCOPY_DEBUG
+	if (!__builtin_constant_p(n))
+#endif
+		__check_object_size(ptr, n, to_user);
+}
+
 #endif	/* __KERNEL__ */
 
 #endif /* _LINUX_THREAD_INFO_H */
diff -ruNp linux-3.13.11/include/linux/tty.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/tty.h
--- linux-3.13.11/include/linux/tty.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/tty.h	2014-07-09 12:00:15.000000000
+0200
@@ -196,7 +196,7 @@ struct tty_port {
 	const struct tty_port_operations *ops;	/* Port operations */
 	spinlock_t		lock;		/* Lock protecting tty field */
 	int			blocked_open;	/* Waiting to open */
-	int			count;		/* Usage count */
+	atomic_t		count;		/* Usage count */
 	wait_queue_head_t	open_wait;	/* Open waiters */
 	wait_queue_head_t	close_wait;	/* Close waiters */
 	wait_queue_head_t	delta_msr_wait;	/* Modem status change */
@@ -278,7 +278,7 @@ struct tty_struct {
 	/* If the tty has a pending do_SAK, queue it here - akpm */
 	struct work_struct SAK_work;
 	struct tty_port *port;
-};
+} __randomize_layout;
 
 /* Each of a tty's open files has private_data pointing to tty_file_private */
 struct tty_file_private {
@@ -545,7 +545,7 @@ extern int tty_port_open(struct tty_port
 				struct tty_struct *tty, struct file *filp);
 static inline int tty_port_users(struct tty_port *port)
 {
-	return port->count + port->blocked_open;
+	return atomic_read(&port->count) + port->blocked_open;
 }
 
 extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
diff -ruNp linux-3.13.11/include/linux/tty_driver.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/tty_driver.h
--- linux-3.13.11/include/linux/tty_driver.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/tty_driver.h	2014-07-09
12:00:15.000000000 +0200
@@ -285,7 +285,7 @@ struct tty_operations {
 	void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
 #endif
 	const struct file_operations *proc_fops;
-};
+} __do_const __randomize_layout;
 
 struct tty_driver {
 	int	magic;		/* magic number for this structure */
@@ -319,7 +319,7 @@ struct tty_driver {
 
 	const struct tty_operations *ops;
 	struct list_head tty_drivers;
-};
+} __randomize_layout;
 
 extern struct list_head tty_drivers;
 
diff -ruNp linux-3.13.11/include/linux/tty_ldisc.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/tty_ldisc.h
--- linux-3.13.11/include/linux/tty_ldisc.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/tty_ldisc.h	2014-07-09
12:00:15.000000000 +0200
@@ -211,7 +211,7 @@ struct tty_ldisc_ops {
 
 	struct  module *owner;
 
-	int refcount;
+	atomic_t refcount;
 };
 
 struct tty_ldisc {
diff -ruNp linux-3.13.11/include/linux/types.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/types.h
--- linux-3.13.11/include/linux/types.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/types.h	2014-07-09
12:00:15.000000000 +0200
@@ -32,6 +32,9 @@ typedef __kernel_uid32_t	uid_t;
 typedef __kernel_gid32_t	gid_t;
 typedef __kernel_uid16_t        uid16_t;
 typedef __kernel_gid16_t        gid16_t;
+typedef unsigned int		vxid_t;
+typedef unsigned int		vnid_t;
+typedef unsigned int		vtag_t;
 
 typedef unsigned long		uintptr_t;
 
@@ -176,10 +179,26 @@ typedef struct {
 	int counter;
 } atomic_t;
 
+#ifdef CONFIG_PAX_REFCOUNT
+typedef struct {
+	int counter;
+} atomic_unchecked_t;
+#else
+typedef atomic_t atomic_unchecked_t;
+#endif
+
 #ifdef CONFIG_64BIT
 typedef struct {
 	long counter;
 } atomic64_t;
+
+#ifdef CONFIG_PAX_REFCOUNT
+typedef struct {
+	long counter;
+} atomic64_unchecked_t;
+#else
+typedef atomic64_t atomic64_unchecked_t;
+#endif
 #endif
 
 struct list_head {
diff -ruNp linux-3.13.11/include/linux/uaccess.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/uaccess.h
--- linux-3.13.11/include/linux/uaccess.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/uaccess.h	2014-07-09
12:00:15.000000000 +0200
@@ -72,11 +72,11 @@ static inline unsigned long __copy_from_
 		long ret;				\
 		mm_segment_t old_fs = get_fs();		\
 							\
-		set_fs(KERNEL_DS);			\
 		pagefault_disable();			\
-		ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr),
sizeof(retval));		\
-		pagefault_enable();			\
+		set_fs(KERNEL_DS);			\
+		ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr),
sizeof(retval));		\
 		set_fs(old_fs);				\
+		pagefault_enable();			\
 		ret;					\
 	})
 
diff -ruNp linux-3.13.11/include/linux/uidgid.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/uidgid.h
--- linux-3.13.11/include/linux/uidgid.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/uidgid.h	2014-07-09
12:00:15.000000000 +0200
@@ -23,13 +23,17 @@ typedef struct {
 	uid_t val;
 } kuid_t;
 
-
 typedef struct {
 	gid_t val;
 } kgid_t;
 
+typedef struct {
+	vtag_t val;
+} ktag_t;
+
 #define KUIDT_INIT(value) (kuid_t){ value }
 #define KGIDT_INIT(value) (kgid_t){ value }
+#define KTAGT_INIT(value) (ktag_t){ value }
 
 static inline uid_t __kuid_val(kuid_t uid)
 {
@@ -41,10 +45,16 @@ static inline gid_t __kgid_val(kgid_t gi
 	return gid.val;
 }
 
+static inline vtag_t __ktag_val(ktag_t tag)
+{
+	return tag.val;
+}
+
 #else
 
 typedef uid_t kuid_t;
 typedef gid_t kgid_t;
+typedef vtag_t ktag_t;
 
 static inline uid_t __kuid_val(kuid_t uid)
 {
@@ -56,16 +66,24 @@ static inline gid_t __kgid_val(kgid_t gi
 	return gid;
 }
 
+static inline vtag_t __ktag_val(ktag_t tag)
+{
+	return tag;
+}
+
 #define KUIDT_INIT(value) ((kuid_t) value )
 #define KGIDT_INIT(value) ((kgid_t) value )
+#define KTAGT_INIT(value) ((ktag_t) value )
 
 #endif
 
 #define GLOBAL_ROOT_UID KUIDT_INIT(0)
 #define GLOBAL_ROOT_GID KGIDT_INIT(0)
+#define GLOBAL_ROOT_TAG KTAGT_INIT(0)
 
 #define INVALID_UID KUIDT_INIT(-1)
 #define INVALID_GID KGIDT_INIT(-1)
+#define INVALID_TAG KTAGT_INIT(-1)
 
 static inline bool uid_eq(kuid_t left, kuid_t right)
 {
@@ -77,6 +95,11 @@ static inline bool gid_eq(kgid_t left, k
 	return __kgid_val(left) == __kgid_val(right);
 }
 
+static inline bool tag_eq(ktag_t left, ktag_t right)
+{
+	return __ktag_val(left) == __ktag_val(right);
+}
+
 static inline bool uid_gt(kuid_t left, kuid_t right)
 {
 	return __kuid_val(left) > __kuid_val(right);
@@ -127,13 +150,21 @@ static inline bool gid_valid(kgid_t gid)
 	return !gid_eq(gid, INVALID_GID);
 }
 
+static inline bool tag_valid(ktag_t tag)
+{
+	return !tag_eq(tag, INVALID_TAG);
+}
+
 #ifdef CONFIG_USER_NS
 
 extern kuid_t make_kuid(struct user_namespace *from, uid_t uid);
 extern kgid_t make_kgid(struct user_namespace *from, gid_t gid);
+extern krag_t make_ktag(struct user_namespace *from, gid_t gid);
 
 extern uid_t from_kuid(struct user_namespace *to, kuid_t uid);
 extern gid_t from_kgid(struct user_namespace *to, kgid_t gid);
+extern vtag_t from_ktag(struct user_namespace *to, ktag_t tag);
+
 extern uid_t from_kuid_munged(struct user_namespace *to, kuid_t uid);
 extern gid_t from_kgid_munged(struct user_namespace *to, kgid_t gid);
 
@@ -159,6 +190,11 @@ static inline kgid_t make_kgid(struct us
 	return KGIDT_INIT(gid);
 }
 
+static inline ktag_t make_ktag(struct user_namespace *from, vtag_t tag)
+{
+	return KTAGT_INIT(tag);
+}
+
 static inline uid_t from_kuid(struct user_namespace *to, kuid_t kuid)
 {
 	return __kuid_val(kuid);
@@ -169,6 +205,11 @@ static inline gid_t from_kgid(struct use
 	return __kgid_val(kgid);
 }
 
+static inline vtag_t from_ktag(struct user_namespace *to, ktag_t ktag)
+{
+	return __ktag_val(ktag);
+}
+
 static inline uid_t from_kuid_munged(struct user_namespace *to, kuid_t kuid)
 {
 	uid_t uid = from_kuid(to, kuid);
@@ -197,4 +238,9 @@ static inline bool kgid_has_mapping(stru
 
 #endif /* CONFIG_USER_NS */
 
+#define GR_GLOBAL_UID(x) from_kuid_munged(&init_user_ns, (x))
+#define GR_GLOBAL_GID(x) from_kgid_munged(&init_user_ns, (x))
+#define gr_is_global_root(x) uid_eq((x), GLOBAL_ROOT_UID)
+#define gr_is_global_nonroot(x) (!uid_eq((x), GLOBAL_ROOT_UID))
+
 #endif /* _LINUX_UIDGID_H */
diff -ruNp linux-3.13.11/include/linux/unaligned/access_ok.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/unaligned/access_ok.h
--- linux-3.13.11/include/linux/unaligned/access_ok.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/unaligned/access_ok.h	2014-07-09
12:00:15.000000000 +0200
@@ -4,34 +4,34 @@
 #include <linux/kernel.h>
 #include <asm/byteorder.h>
 
-static inline u16 get_unaligned_le16(const void *p)
+static inline u16 __intentional_overflow(-1) get_unaligned_le16(const void *p)
 {
-	return le16_to_cpup((__le16 *)p);
+	return le16_to_cpup((const __le16 *)p);
 }
 
-static inline u32 get_unaligned_le32(const void *p)
+static inline u32 __intentional_overflow(-1) get_unaligned_le32(const void *p)
 {
-	return le32_to_cpup((__le32 *)p);
+	return le32_to_cpup((const __le32 *)p);
 }
 
-static inline u64 get_unaligned_le64(const void *p)
+static inline u64 __intentional_overflow(-1) get_unaligned_le64(const void *p)
 {
-	return le64_to_cpup((__le64 *)p);
+	return le64_to_cpup((const __le64 *)p);
 }
 
-static inline u16 get_unaligned_be16(const void *p)
+static inline u16 __intentional_overflow(-1) get_unaligned_be16(const void *p)
 {
-	return be16_to_cpup((__be16 *)p);
+	return be16_to_cpup((const __be16 *)p);
 }
 
-static inline u32 get_unaligned_be32(const void *p)
+static inline u32 __intentional_overflow(-1) get_unaligned_be32(const void *p)
 {
-	return be32_to_cpup((__be32 *)p);
+	return be32_to_cpup((const __be32 *)p);
 }
 
-static inline u64 get_unaligned_be64(const void *p)
+static inline u64 __intentional_overflow(-1) get_unaligned_be64(const void *p)
 {
-	return be64_to_cpup((__be64 *)p);
+	return be64_to_cpup((const __be64 *)p);
 }
 
 static inline void put_unaligned_le16(u16 val, void *p)
diff -ruNp linux-3.13.11/include/linux/usb/renesas_usbhs.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/usb/renesas_usbhs.h
--- linux-3.13.11/include/linux/usb/renesas_usbhs.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/usb/renesas_usbhs.h	2014-07-09
12:00:15.000000000 +0200
@@ -39,7 +39,7 @@ enum {
  */
 struct renesas_usbhs_driver_callback {
 	int (*notify_hotplug)(struct platform_device *pdev);
-};
+} __no_const;
 
 /*
  * callback functions for platform
diff -ruNp linux-3.13.11/include/linux/usb.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/usb.h
--- linux-3.13.11/include/linux/usb.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/usb.h	2014-07-09 12:00:15.000000000
+0200
@@ -563,7 +563,7 @@ struct usb_device {
 	int maxchild;
 
 	u32 quirks;
-	atomic_t urbnum;
+	atomic_unchecked_t urbnum;
 
 	unsigned long active_duration;
 
@@ -1641,7 +1641,7 @@ void usb_buffer_unmap_sg(const struct us
 
 extern int usb_control_msg(struct usb_device *dev, unsigned int pipe,
 	__u8 request, __u8 requesttype, __u16 value, __u16 index,
-	void *data, __u16 size, int timeout);
+	void *data, __u16 size, int timeout) __intentional_overflow(-1);
 extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
 	void *data, int len, int *actual_length, int timeout);
 extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
diff -ruNp linux-3.13.11/include/linux/user_namespace.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/user_namespace.h
--- linux-3.13.11/include/linux/user_namespace.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/user_namespace.h	2014-07-09
12:00:15.000000000 +0200
@@ -33,7 +33,7 @@ struct user_namespace {
 	struct key		*persistent_keyring_register;
 	struct rw_semaphore	persistent_keyring_register_sem;
 #endif
-};
+} __randomize_layout;
 
 extern struct user_namespace init_user_ns;
 
diff -ruNp linux-3.13.11/include/linux/utsname.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/utsname.h
--- linux-3.13.11/include/linux/utsname.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/utsname.h	2014-07-09
12:00:15.000000000 +0200
@@ -24,7 +24,7 @@ struct uts_namespace {
 	struct new_utsname name;
 	struct user_namespace *user_ns;
 	unsigned int proc_inum;
-};
+} __randomize_layout;
 extern struct uts_namespace init_uts_ns;
 
 #ifdef CONFIG_UTS_NS
diff -ruNp linux-3.13.11/include/linux/vermagic.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vermagic.h
--- linux-3.13.11/include/linux/vermagic.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vermagic.h	2014-07-09
12:00:15.000000000 +0200
@@ -25,9 +25,42 @@
 #define MODULE_ARCH_VERMAGIC ""
 #endif
 
+#ifdef CONFIG_PAX_REFCOUNT
+#define MODULE_PAX_REFCOUNT "REFCOUNT "
+#else
+#define MODULE_PAX_REFCOUNT ""
+#endif
+
+#ifdef CONSTIFY_PLUGIN
+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
+#else
+#define MODULE_CONSTIFY_PLUGIN ""
+#endif
+
+#ifdef STACKLEAK_PLUGIN
+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
+#else
+#define MODULE_STACKLEAK_PLUGIN ""
+#endif
+
+#ifdef RANDSTRUCT_PLUGIN
+#include <generated/randomize_layout_hash.h>
+#define MODULE_RANDSTRUCT_PLUGIN "RANDSTRUCT_PLUGIN_" RANDSTRUCT_HASHED_SEED
+#else
+#define MODULE_RANDSTRUCT_PLUGIN
+#endif
+
+#ifdef CONFIG_GRKERNSEC
+#define MODULE_GRSEC "GRSEC "
+#else
+#define MODULE_GRSEC ""
+#endif
+
 #define VERMAGIC_STRING 						\
 	UTS_RELEASE " "							\
 	MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT 			\
 	MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS	\
-	MODULE_ARCH_VERMAGIC
+	MODULE_ARCH_VERMAGIC						\
+	MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
+	MODULE_GRSEC MODULE_RANDSTRUCT_PLUGIN
 
diff -ruNp linux-3.13.11/include/linux/vga_switcheroo.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vga_switcheroo.h
--- linux-3.13.11/include/linux/vga_switcheroo.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vga_switcheroo.h	2014-07-09
12:00:15.000000000 +0200
@@ -63,8 +63,8 @@ int vga_switcheroo_get_client_state(stru
 
 void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state
dynamic);
 
-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain);
-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain
*domain);
+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain);
+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const
*domain);
 #else
 
 static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
@@ -81,8 +81,8 @@ static inline int vga_switcheroo_get_cli
 
 static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state
dynamic) {}
 
-static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain
*domain) { return -EINVAL; }
-static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev,
struct dev_pm_domain *domain) { return -EINVAL; }
+static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const
*domain) { return -EINVAL; }
+static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev,
dev_pm_domain_no_const *domain) { return -EINVAL; }
 
 #endif
 #endif /* _LINUX_VGA_SWITCHEROO_H_ */
diff -ruNp linux-3.13.11/include/linux/vmalloc.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vmalloc.h
--- linux-3.13.11/include/linux/vmalloc.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vmalloc.h	2014-07-09
12:00:15.000000000 +0200
@@ -16,6 +16,11 @@ struct vm_area_struct;		/* vma defining
 #define VM_USERMAP		0x00000008	/* suitable for remap_vmalloc_range */
 #define VM_VPAGES		0x00000010	/* buffer for pages was vmalloc'ed */
 #define VM_UNINITIALIZED	0x00000020	/* vm_struct is not fully initialized */
+
+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
+#define VM_KERNEXEC		0x00000040	/* allocate from executable kernel memory range */
+#endif
+
 /* bits [20..32] reserved for arch specific ioremap internals */
 
 /*
@@ -142,7 +147,7 @@ extern void free_vm_area(struct vm_struc
 
 /* for /dev/kmem */
 extern long vread(char *buf, char *addr, unsigned long count);
-extern long vwrite(char *buf, char *addr, unsigned long count);
+extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
 
 /*
  *	Internals.  Dont't use..
diff -ruNp linux-3.13.11/include/linux/vmstat.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vmstat.h
--- linux-3.13.11/include/linux/vmstat.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vmstat.h	2014-07-09
12:00:15.000000000 +0200
@@ -90,18 +90,18 @@ static inline void vm_events_fold_cpu(in
 /*
  * Zone based page accounting with per cpu differentials.
  */
-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
 
 static inline void zone_page_state_add(long x, struct zone *zone,
 				 enum zone_stat_item item)
 {
-	atomic_long_add(x, &zone->vm_stat[item]);
-	atomic_long_add(x, &vm_stat[item]);
+	atomic_long_add_unchecked(x, &zone->vm_stat[item]);
+	atomic_long_add_unchecked(x, &vm_stat[item]);
 }
 
-static inline unsigned long global_page_state(enum zone_stat_item item)
+static inline unsigned long __intentional_overflow(-1) global_page_state(enum zone_stat_item
item)
 {
-	long x = atomic_long_read(&vm_stat[item]);
+	long x = atomic_long_read_unchecked(&vm_stat[item]);
 #ifdef CONFIG_SMP
 	if (x < 0)
 		x = 0;
@@ -109,10 +109,10 @@ static inline unsigned long global_page_
 	return x;
 }
 
-static inline unsigned long zone_page_state(struct zone *zone,
+static inline unsigned long __intentional_overflow(-1) zone_page_state(struct zone
*zone,
 					enum zone_stat_item item)
 {
-	long x = atomic_long_read(&zone->vm_stat[item]);
+	long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
 #ifdef CONFIG_SMP
 	if (x < 0)
 		x = 0;
@@ -129,7 +129,7 @@ static inline unsigned long zone_page_st
 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
 					enum zone_stat_item item)
 {
-	long x = atomic_long_read(&zone->vm_stat[item]);
+	long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
 
 #ifdef CONFIG_SMP
 	int cpu;
@@ -218,8 +218,8 @@ static inline void __mod_zone_page_state
 
 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
 {
-	atomic_long_inc(&zone->vm_stat[item]);
-	atomic_long_inc(&vm_stat[item]);
+	atomic_long_inc_unchecked(&zone->vm_stat[item]);
+	atomic_long_inc_unchecked(&vm_stat[item]);
 }
 
 static inline void __inc_zone_page_state(struct page *page,
@@ -230,8 +230,8 @@ static inline void __inc_zone_page_state
 
 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
 {
-	atomic_long_dec(&zone->vm_stat[item]);
-	atomic_long_dec(&vm_stat[item]);
+	atomic_long_dec_unchecked(&zone->vm_stat[item]);
+	atomic_long_dec_unchecked(&vm_stat[item]);
 }
 
 static inline void __dec_zone_page_state(struct page *page,
diff -ruNp linux-3.13.11/include/linux/vroot.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vroot.h
--- linux-3.13.11/include/linux/vroot.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vroot.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,51 @@
+
+/*
+ * include/linux/vroot.h
+ *
+ * written by Herbert Pötzl, 9/11/2002
+ * ported to 2.6 by Herbert Pötzl, 30/12/2004
+ *
+ * Copyright (C) 2002-2007 by Herbert Pötzl.
+ * Redistribution of this file is permitted under the
+ * GNU General Public License.
+ */
+
+#ifndef _LINUX_VROOT_H
+#define _LINUX_VROOT_H
+
+
+#ifdef __KERNEL__
+
+/* Possible states of device */
+enum {
+	Vr_unbound,
+	Vr_bound,
+};
+
+struct vroot_device {
+	int		vr_number;
+	int		vr_refcnt;
+
+	struct semaphore	vr_ctl_mutex;
+	struct block_device    *vr_device;
+	int			vr_state;
+};
+
+
+typedef struct block_device *(vroot_grb_func)(struct block_device *);
+
+extern int register_vroot_grb(vroot_grb_func *);
+extern int unregister_vroot_grb(vroot_grb_func *);
+
+#endif /* __KERNEL__ */
+
+#define MAX_VROOT_DEFAULT	8
+
+/*
+ * IOCTL commands --- we will commandeer 0x56 ('V')
+ */
+
+#define VROOT_SET_DEV		0x5600
+#define VROOT_CLR_DEV		0x5601
+
+#endif /* _LINUX_VROOT_H */
diff -ruNp linux-3.13.11/include/linux/vs_base.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vs_base.h
--- linux-3.13.11/include/linux/vs_base.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vs_base.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,10 @@
+#ifndef _VS_BASE_H
+#define _VS_BASE_H
+
+#include "vserver/base.h"
+#include "vserver/check.h"
+#include "vserver/debug.h"
+
+#else
+#warning duplicate inclusion
+#endif
diff -ruNp linux-3.13.11/include/linux/vs_context.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vs_context.h
--- linux-3.13.11/include/linux/vs_context.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vs_context.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,242 @@
+#ifndef _VS_CONTEXT_H
+#define _VS_CONTEXT_H
+
+#include "vserver/base.h"
+#include "vserver/check.h"
+#include "vserver/context.h"
+#include "vserver/history.h"
+#include "vserver/debug.h"
+
+#include <linux/sched.h>
+
+
+#define get_vx_info(i) __get_vx_info(i, __FILE__, __LINE__, __HERE__)
+
+static inline struct vx_info *__get_vx_info(struct vx_info *vxi,
+	const char *_file, int _line, void *_here)
+{
+	if (!vxi)
+		return NULL;
+
+	vxlprintk(VXD_CBIT(xid, 2), "get_vx_info(%p[#%d.%d])",
+		vxi, vxi ? vxi->vx_id : 0,
+		vxi ? atomic_read(&vxi->vx_usecnt) : 0,
+		_file, _line);
+	__vxh_get_vx_info(vxi, _here);
+
+	atomic_inc(&vxi->vx_usecnt);
+	return vxi;
+}
+
+
+extern void free_vx_info(struct vx_info *);
+
+#define put_vx_info(i) __put_vx_info(i, __FILE__, __LINE__, __HERE__)
+
+static inline void __put_vx_info(struct vx_info *vxi,
+	const char *_file, int _line, void *_here)
+{
+	if (!vxi)
+		return;
+
+	vxlprintk(VXD_CBIT(xid, 2), "put_vx_info(%p[#%d.%d])",
+		vxi, vxi ? vxi->vx_id : 0,
+		vxi ? atomic_read(&vxi->vx_usecnt) : 0,
+		_file, _line);
+	__vxh_put_vx_info(vxi, _here);
+
+	if (atomic_dec_and_test(&vxi->vx_usecnt))
+		free_vx_info(vxi);
+}
+
+
+#define init_vx_info(p, i) \
+	__init_vx_info(p, i, __FILE__, __LINE__, __HERE__)
+
+static inline void __init_vx_info(struct vx_info **vxp, struct vx_info *vxi,
+	const char *_file, int _line, void *_here)
+{
+	if (vxi) {
+		vxlprintk(VXD_CBIT(xid, 3),
+			"init_vx_info(%p[#%d.%d])",
+			vxi, vxi ? vxi->vx_id : 0,
+			vxi ? atomic_read(&vxi->vx_usecnt) : 0,
+			_file, _line);
+		__vxh_init_vx_info(vxi, vxp, _here);
+
+		atomic_inc(&vxi->vx_usecnt);
+	}
+	*vxp = vxi;
+}
+
+
+#define set_vx_info(p, i) \
+	__set_vx_info(p, i, __FILE__, __LINE__, __HERE__)
+
+static inline void __set_vx_info(struct vx_info **vxp, struct vx_info *vxi,
+	const char *_file, int _line, void *_here)
+{
+	struct vx_info *vxo;
+
+	if (!vxi)
+		return;
+
+	vxlprintk(VXD_CBIT(xid, 3), "set_vx_info(%p[#%d.%d])",
+		vxi, vxi ? vxi->vx_id : 0,
+		vxi ? atomic_read(&vxi->vx_usecnt) : 0,
+		_file, _line);
+	__vxh_set_vx_info(vxi, vxp, _here);
+
+	atomic_inc(&vxi->vx_usecnt);
+	vxo = xchg(vxp, vxi);
+	BUG_ON(vxo);
+}
+
+
+#define clr_vx_info(p) __clr_vx_info(p, __FILE__, __LINE__, __HERE__)
+
+static inline void __clr_vx_info(struct vx_info **vxp,
+	const char *_file, int _line, void *_here)
+{
+	struct vx_info *vxo;
+
+	vxo = xchg(vxp, NULL);
+	if (!vxo)
+		return;
+
+	vxlprintk(VXD_CBIT(xid, 3), "clr_vx_info(%p[#%d.%d])",
+		vxo, vxo ? vxo->vx_id : 0,
+		vxo ? atomic_read(&vxo->vx_usecnt) : 0,
+		_file, _line);
+	__vxh_clr_vx_info(vxo, vxp, _here);
+
+	if (atomic_dec_and_test(&vxo->vx_usecnt))
+		free_vx_info(vxo);
+}
+
+
+#define claim_vx_info(v, p) \
+	__claim_vx_info(v, p, __FILE__, __LINE__, __HERE__)
+
+static inline void __claim_vx_info(struct vx_info *vxi,
+	struct task_struct *task,
+	const char *_file, int _line, void *_here)
+{
+	vxlprintk(VXD_CBIT(xid, 3), "claim_vx_info(%p[#%d.%d.%d]) %p",
+		vxi, vxi ? vxi->vx_id : 0,
+		vxi ? atomic_read(&vxi->vx_usecnt) : 0,
+		vxi ? atomic_read(&vxi->vx_tasks) : 0,
+		task, _file, _line);
+	__vxh_claim_vx_info(vxi, task, _here);
+
+	atomic_inc(&vxi->vx_tasks);
+}
+
+
+extern void unhash_vx_info(struct vx_info *);
+
+#define release_vx_info(v, p) \
+	__release_vx_info(v, p, __FILE__, __LINE__, __HERE__)
+
+static inline void __release_vx_info(struct vx_info *vxi,
+	struct task_struct *task,
+	const char *_file, int _line, void *_here)
+{
+	vxlprintk(VXD_CBIT(xid, 3), "release_vx_info(%p[#%d.%d.%d]) %p",
+		vxi, vxi ? vxi->vx_id : 0,
+		vxi ? atomic_read(&vxi->vx_usecnt) : 0,
+		vxi ? atomic_read(&vxi->vx_tasks) : 0,
+		task, _file, _line);
+	__vxh_release_vx_info(vxi, task, _here);
+
+	might_sleep();
+
+	if (atomic_dec_and_test(&vxi->vx_tasks))
+		unhash_vx_info(vxi);
+}
+
+
+#define task_get_vx_info(p) \
+	__task_get_vx_info(p, __FILE__, __LINE__, __HERE__)
+
+static inline struct vx_info *__task_get_vx_info(struct task_struct *p,
+	const char *_file, int _line, void *_here)
+{
+	struct vx_info *vxi;
+
+	task_lock(p);
+	vxlprintk(VXD_CBIT(xid, 5), "task_get_vx_info(%p)",
+		p, _file, _line);
+	vxi = __get_vx_info(p->vx_info, _file, _line, _here);
+	task_unlock(p);
+	return vxi;
+}
+
+
+static inline void __wakeup_vx_info(struct vx_info *vxi)
+{
+	if (waitqueue_active(&vxi->vx_wait))
+		wake_up_interruptible(&vxi->vx_wait);
+}
+
+
+#define enter_vx_info(v, s) __enter_vx_info(v, s, __FILE__, __LINE__)
+
+static inline void __enter_vx_info(struct vx_info *vxi,
+	struct vx_info_save *vxis, const char *_file, int _line)
+{
+	vxlprintk(VXD_CBIT(xid, 5), "enter_vx_info(%p[#%d],%p) %p[#%d,%p]",
+		vxi, vxi ? vxi->vx_id : 0, vxis, current,
+		current->xid, current->vx_info, _file, _line);
+	vxis->vxi = xchg(&current->vx_info, vxi);
+	vxis->xid = current->xid;
+	current->xid = vxi ? vxi->vx_id : 0;
+}
+
+#define leave_vx_info(s) __leave_vx_info(s, __FILE__, __LINE__)
+
+static inline void __leave_vx_info(struct vx_info_save *vxis,
+	const char *_file, int _line)
+{
+	vxlprintk(VXD_CBIT(xid, 5), "leave_vx_info(%p[#%d,%p]) %p[#%d,%p]",
+		vxis, vxis->xid, vxis->vxi, current,
+		current->xid, current->vx_info, _file, _line);
+	(void)xchg(&current->vx_info, vxis->vxi);
+	current->xid = vxis->xid;
+}
+
+
+static inline void __enter_vx_admin(struct vx_info_save *vxis)
+{
+	vxis->vxi = xchg(&current->vx_info, NULL);
+	vxis->xid = xchg(&current->xid, (vxid_t)0);
+}
+
+static inline void __leave_vx_admin(struct vx_info_save *vxis)
+{
+	(void)xchg(&current->xid, vxis->xid);
+	(void)xchg(&current->vx_info, vxis->vxi);
+}
+
+#define task_is_init(p) \
+	__task_is_init(p, __FILE__, __LINE__, __HERE__)
+
+static inline int __task_is_init(struct task_struct *p,
+	const char *_file, int _line, void *_here)
+{
+	int is_init = is_global_init(p);
+
+	task_lock(p);
+	if (p->vx_info)
+		is_init = p->vx_info->vx_initpid == p->pid;
+	task_unlock(p);
+	return is_init;
+}
+
+extern void exit_vx_info(struct task_struct *, int);
+extern void exit_vx_info_early(struct task_struct *, int);
+
+
+#else
+#warning duplicate inclusion
+#endif
diff -ruNp linux-3.13.11/include/linux/vs_cowbl.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vs_cowbl.h
--- linux-3.13.11/include/linux/vs_cowbl.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vs_cowbl.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,48 @@
+#ifndef _VS_COWBL_H
+#define _VS_COWBL_H
+
+#include <linux/fs.h>
+#include <linux/dcache.h>
+#include <linux/namei.h>
+#include <linux/slab.h>
+
+extern struct dentry *cow_break_link(const char *pathname);
+
+static inline int cow_check_and_break(struct path *path)
+{
+	struct inode *inode = path->dentry->d_inode;
+	int error = 0;
+
+	/* do we need this check? */
+	if (IS_RDONLY(inode))
+		return -EROFS;
+
+	if (IS_COW(inode)) {
+		if (IS_COW_LINK(inode)) {
+			struct dentry *new_dentry, *old_dentry = path->dentry;
+			char *pp, *buf;
+
+			buf = kmalloc(PATH_MAX, GFP_KERNEL);
+			if (!buf) {
+				return -ENOMEM;
+			}
+			pp = d_path(path, buf, PATH_MAX);
+			new_dentry = cow_break_link(pp);
+			kfree(buf);
+			if (!IS_ERR(new_dentry)) {
+				path->dentry = new_dentry;
+				dput(old_dentry);
+			} else
+				error = PTR_ERR(new_dentry);
+		} else {
+			inode->i_flags &= ~(S_IXUNLINK | S_IMMUTABLE);
+			inode->i_ctime = CURRENT_TIME;
+			mark_inode_dirty(inode);
+		}
+	}
+	return error;
+}
+
+#else
+#warning duplicate inclusion
+#endif
diff -ruNp linux-3.13.11/include/linux/vs_cvirt.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vs_cvirt.h
--- linux-3.13.11/include/linux/vs_cvirt.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vs_cvirt.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,50 @@
+#ifndef _VS_CVIRT_H
+#define _VS_CVIRT_H
+
+#include "vserver/cvirt.h"
+#include "vserver/context.h"
+#include "vserver/base.h"
+#include "vserver/check.h"
+#include "vserver/debug.h"
+
+
+static inline void vx_activate_task(struct task_struct *p)
+{
+	struct vx_info *vxi;
+
+	if ((vxi = p->vx_info)) {
+		vx_update_load(vxi);
+		atomic_inc(&vxi->cvirt.nr_running);
+	}
+}
+
+static inline void vx_deactivate_task(struct task_struct *p)
+{
+	struct vx_info *vxi;
+
+	if ((vxi = p->vx_info)) {
+		vx_update_load(vxi);
+		atomic_dec(&vxi->cvirt.nr_running);
+	}
+}
+
+static inline void vx_uninterruptible_inc(struct task_struct *p)
+{
+	struct vx_info *vxi;
+
+	if ((vxi = p->vx_info))
+		atomic_inc(&vxi->cvirt.nr_uninterruptible);
+}
+
+static inline void vx_uninterruptible_dec(struct task_struct *p)
+{
+	struct vx_info *vxi;
+
+	if ((vxi = p->vx_info))
+		atomic_dec(&vxi->cvirt.nr_uninterruptible);
+}
+
+
+#else
+#warning duplicate inclusion
+#endif
diff -ruNp linux-3.13.11/include/linux/vs_device.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vs_device.h
--- linux-3.13.11/include/linux/vs_device.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vs_device.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,45 @@
+#ifndef _VS_DEVICE_H
+#define _VS_DEVICE_H
+
+#include "vserver/base.h"
+#include "vserver/device.h"
+#include "vserver/debug.h"
+
+
+#ifdef CONFIG_VSERVER_DEVICE
+
+int vs_map_device(struct vx_info *, dev_t, dev_t *, umode_t);
+
+#define vs_device_perm(v, d, m, p) \
+	((vs_map_device(current_vx_info(), d, NULL, m) & (p)) == (p))
+
+#else
+
+static inline
+int vs_map_device(struct vx_info *vxi,
+	dev_t device, dev_t *target, umode_t mode)
+{
+	if (target)
+		*target = device;
+	return ~0;
+}
+
+#define vs_device_perm(v, d, m, p) ((p) == (p))
+
+#endif
+
+
+#define vs_map_chrdev(d, t, p) \
+	((vs_map_device(current_vx_info(), d, t, S_IFCHR) & (p)) == (p))
+#define vs_map_blkdev(d, t, p) \
+	((vs_map_device(current_vx_info(), d, t, S_IFBLK) & (p)) == (p))
+
+#define vs_chrdev_perm(d, p) \
+	vs_device_perm(current_vx_info(), d, S_IFCHR, p)
+#define vs_blkdev_perm(d, p) \
+	vs_device_perm(current_vx_info(), d, S_IFBLK, p)
+
+
+#else
+#warning duplicate inclusion
+#endif
diff -ruNp linux-3.13.11/include/linux/vs_dlimit.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vs_dlimit.h
--- linux-3.13.11/include/linux/vs_dlimit.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vs_dlimit.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,215 @@
+#ifndef _VS_DLIMIT_H
+#define _VS_DLIMIT_H
+
+#include <linux/fs.h>
+
+#include "vserver/dlimit.h"
+#include "vserver/base.h"
+#include "vserver/debug.h"
+
+
+#define get_dl_info(i)	__get_dl_info(i, __FILE__, __LINE__)
+
+static inline struct dl_info *__get_dl_info(struct dl_info *dli,
+	const char *_file, int _line)
+{
+	if (!dli)
+		return NULL;
+	vxlprintk(VXD_CBIT(dlim, 4), "get_dl_info(%p[#%d.%d])",
+		dli, dli ? dli->dl_tag : 0,
+		dli ? atomic_read(&dli->dl_usecnt) : 0,
+		_file, _line);
+	atomic_inc(&dli->dl_usecnt);
+	return dli;
+}
+
+
+#define free_dl_info(i) \
+	call_rcu(&(i)->dl_rcu, rcu_free_dl_info)
+
+#define put_dl_info(i)	__put_dl_info(i, __FILE__, __LINE__)
+
+static inline void __put_dl_info(struct dl_info *dli,
+	const char *_file, int _line)
+{
+	if (!dli)
+		return;
+	vxlprintk(VXD_CBIT(dlim, 4), "put_dl_info(%p[#%d.%d])",
+		dli, dli ? dli->dl_tag : 0,
+		dli ? atomic_read(&dli->dl_usecnt) : 0,
+		_file, _line);
+	if (atomic_dec_and_test(&dli->dl_usecnt))
+		free_dl_info(dli);
+}
+
+
+#define __dlimit_char(d)	((d) ? '*' : ' ')
+
+static inline int __dl_alloc_space(struct super_block *sb,
+	vtag_t tag, dlsize_t nr, const char *file, int line)
+{
+	struct dl_info *dli = NULL;
+	int ret = 0;
+
+	if (nr == 0)
+		goto out;
+	dli = locate_dl_info(sb, tag);
+	if (!dli)
+		goto out;
+
+	spin_lock(&dli->dl_lock);
+	ret = (dli->dl_space_used + nr > dli->dl_space_total);
+	if (!ret)
+		dli->dl_space_used += nr;
+	spin_unlock(&dli->dl_lock);
+	put_dl_info(dli);
+out:
+	vxlprintk(VXD_CBIT(dlim, 1),
+		"ALLOC (%p,#%d)%c %lld bytes (%d)",
+		sb, tag, __dlimit_char(dli), (long long)nr,
+		ret, file, line);
+	return ret ? -ENOSPC : 0;
+}
+
+static inline void __dl_free_space(struct super_block *sb,
+	vtag_t tag, dlsize_t nr, const char *_file, int _line)
+{
+	struct dl_info *dli = NULL;
+
+	if (nr == 0)
+		goto out;
+	dli = locate_dl_info(sb, tag);
+	if (!dli)
+		goto out;
+
+	spin_lock(&dli->dl_lock);
+	if (dli->dl_space_used > nr)
+		dli->dl_space_used -= nr;
+	else
+		dli->dl_space_used = 0;
+	spin_unlock(&dli->dl_lock);
+	put_dl_info(dli);
+out:
+	vxlprintk(VXD_CBIT(dlim, 1),
+		"FREE  (%p,#%d)%c %lld bytes",
+		sb, tag, __dlimit_char(dli), (long long)nr,
+		_file, _line);
+}
+
+static inline int __dl_alloc_inode(struct super_block *sb,
+	vtag_t tag, const char *_file, int _line)
+{
+	struct dl_info *dli;
+	int ret = 0;
+
+	dli = locate_dl_info(sb, tag);
+	if (!dli)
+		goto out;
+
+	spin_lock(&dli->dl_lock);
+	dli->dl_inodes_used++;
+	ret = (dli->dl_inodes_used > dli->dl_inodes_total);
+	spin_unlock(&dli->dl_lock);
+	put_dl_info(dli);
+out:
+	vxlprintk(VXD_CBIT(dlim, 0),
+		"ALLOC (%p,#%d)%c inode (%d)",
+		sb, tag, __dlimit_char(dli), ret, _file, _line);
+	return ret ? -ENOSPC : 0;
+}
+
+static inline void __dl_free_inode(struct super_block *sb,
+	vtag_t tag, const char *_file, int _line)
+{
+	struct dl_info *dli;
+
+	dli = locate_dl_info(sb, tag);
+	if (!dli)
+		goto out;
+
+	spin_lock(&dli->dl_lock);
+	if (dli->dl_inodes_used > 1)
+		dli->dl_inodes_used--;
+	else
+		dli->dl_inodes_used = 0;
+	spin_unlock(&dli->dl_lock);
+	put_dl_info(dli);
+out:
+	vxlprintk(VXD_CBIT(dlim, 0),
+		"FREE  (%p,#%d)%c inode",
+		sb, tag, __dlimit_char(dli), _file, _line);
+}
+
+static inline void __dl_adjust_block(struct super_block *sb, vtag_t tag,
+	unsigned long long *free_blocks, unsigned long long *root_blocks,
+	const char *_file, int _line)
+{
+	struct dl_info *dli;
+	uint64_t broot, bfree;
+
+	dli = locate_dl_info(sb, tag);
+	if (!dli)
+		return;
+
+	spin_lock(&dli->dl_lock);
+	broot = (dli->dl_space_total -
+		(dli->dl_space_total >> 10) * dli->dl_nrlmult)
+		>> sb->s_blocksize_bits;
+	bfree = (dli->dl_space_total - dli->dl_space_used)
+			>> sb->s_blocksize_bits;
+	spin_unlock(&dli->dl_lock);
+
+	vxlprintk(VXD_CBIT(dlim, 2),
+		"ADJUST: %lld,%lld on %lld,%lld [mult=%d]",
+		(long long)bfree, (long long)broot,
+		*free_blocks, *root_blocks, dli->dl_nrlmult,
+		_file, _line);
+	if (free_blocks) {
+		if (*free_blocks > bfree)
+			*free_blocks = bfree;
+	}
+	if (root_blocks) {
+		if (*root_blocks > broot)
+			*root_blocks = broot;
+	}
+	put_dl_info(dli);
+}
+
+#define dl_prealloc_space(in, bytes) \
+	__dl_alloc_space((in)->i_sb, i_tag_read(in), (dlsize_t)(bytes), \
+		__FILE__, __LINE__ )
+
+#define dl_alloc_space(in, bytes) \
+	__dl_alloc_space((in)->i_sb, i_tag_read(in), (dlsize_t)(bytes), \
+		__FILE__, __LINE__ )
+
+#define dl_reserve_space(in, bytes) \
+	__dl_alloc_space((in)->i_sb, i_tag_read(in), (dlsize_t)(bytes), \
+		__FILE__, __LINE__ )
+
+#define dl_claim_space(in, bytes) (0)
+
+#define dl_release_space(in, bytes) \
+	__dl_free_space((in)->i_sb, i_tag_read(in), (dlsize_t)(bytes), \
+		__FILE__, __LINE__ )
+
+#define dl_free_space(in, bytes) \
+	__dl_free_space((in)->i_sb, i_tag_read(in), (dlsize_t)(bytes), \
+		__FILE__, __LINE__ )
+
+
+
+#define dl_alloc_inode(in) \
+	__dl_alloc_inode((in)->i_sb, i_tag_read(in), __FILE__, __LINE__ )
+
+#define dl_free_inode(in) \
+	__dl_free_inode((in)->i_sb, i_tag_read(in), __FILE__, __LINE__ )
+
+
+#define dl_adjust_block(sb, tag, fb, rb) \
+	__dl_adjust_block(sb, tag, fb, rb, __FILE__, __LINE__ )
+
+
+#else
+#warning duplicate inclusion
+#endif
diff -ruNp linux-3.13.11/include/linux/vs_inet.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vs_inet.h
--- linux-3.13.11/include/linux/vs_inet.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vs_inet.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,364 @@
+#ifndef _VS_INET_H
+#define _VS_INET_H
+
+#include "vserver/base.h"
+#include "vserver/network.h"
+#include "vserver/debug.h"
+
+#define IPI_LOOPBACK	htonl(INADDR_LOOPBACK)
+
+#define NXAV4(a)	NIPQUAD((a)->ip[0]), NIPQUAD((a)->ip[1]), \
+			NIPQUAD((a)->mask), (a)->type
+#define NXAV4_FMT	"[" NIPQUAD_FMT "-" NIPQUAD_FMT "/" NIPQUAD_FMT ":%04x]"
+
+#define NIPQUAD(addr) \
+	((unsigned char *)&addr)[0], \
+	((unsigned char *)&addr)[1], \
+	((unsigned char *)&addr)[2], \
+	((unsigned char *)&addr)[3]
+
+#define NIPQUAD_FMT "%u.%u.%u.%u"
+
+
+static inline
+int v4_addr_match(struct nx_addr_v4 *nxa, __be32 addr, uint16_t tmask)
+{
+	__be32 ip = nxa->ip[0].s_addr;
+	__be32 mask = nxa->mask.s_addr;
+	__be32 bcast = ip | ~mask;
+	int ret = 0;
+
+	switch (nxa->type & tmask) {
+	case NXA_TYPE_MASK:
+		ret = (ip == (addr & mask));
+		break;
+	case NXA_TYPE_ADDR:
+		ret = 3;
+		if (addr == ip)
+			break;
+		/* fall through to broadcast */
+	case NXA_MOD_BCAST:
+		ret = ((tmask & NXA_MOD_BCAST) && (addr == bcast));
+		break;
+	case NXA_TYPE_RANGE:
+		ret = ((nxa->ip[0].s_addr <= addr) &&
+			(nxa->ip[1].s_addr > addr));
+		break;
+	case NXA_TYPE_ANY:
+		ret = 2;
+		break;
+	}
+
+	vxdprintk(VXD_CBIT(net, 0),
+		"v4_addr_match(%p" NXAV4_FMT "," NIPQUAD_FMT ",%04x) = %d",
+		nxa, NXAV4(nxa), NIPQUAD(addr), tmask, ret);
+	return ret;
+}
+
+static inline
+int v4_addr_in_nx_info(struct nx_info *nxi, __be32 addr, uint16_t tmask)
+{
+	struct nx_addr_v4 *nxa;
+	unsigned long irqflags;
+	int ret = 1;
+
+	if (!nxi)
+		goto out;
+
+	ret = 2;
+	/* allow 127.0.0.1 when remapping lback */
+	if ((tmask & NXA_LOOPBACK) &&
+		(addr == IPI_LOOPBACK) &&
+		nx_info_flags(nxi, NXF_LBACK_REMAP, 0))
+		goto out;
+	ret = 3;
+	/* check for lback address */
+	if ((tmask & NXA_MOD_LBACK) &&
+		(nxi->v4_lback.s_addr == addr))
+		goto out;
+	ret = 4;
+	/* check for broadcast address */
+	if ((tmask & NXA_MOD_BCAST) &&
+		(nxi->v4_bcast.s_addr == addr))
+		goto out;
+	ret = 5;
+
+	/* check for v4 addresses */
+	spin_lock_irqsave(&nxi->addr_lock, irqflags);
+	for (nxa = &nxi->v4; nxa; nxa = nxa->next)
+		if (v4_addr_match(nxa, addr, tmask))
+			goto out_unlock;
+	ret = 0;
+out_unlock:
+	spin_unlock_irqrestore(&nxi->addr_lock, irqflags);
+out:
+	vxdprintk(VXD_CBIT(net, 0),
+		"v4_addr_in_nx_info(%p[#%u]," NIPQUAD_FMT ",%04x) = %d",
+		nxi, nxi ? nxi->nx_id : 0, NIPQUAD(addr), tmask, ret);
+	return ret;
+}
+
+static inline
+int v4_nx_addr_match(struct nx_addr_v4 *nxa, struct nx_addr_v4 *addr, uint16_t mask)
+{
+	/* FIXME: needs full range checks */
+	return v4_addr_match(nxa, addr->ip[0].s_addr, mask);
+}
+
+static inline
+int v4_nx_addr_in_nx_info(struct nx_info *nxi, struct nx_addr_v4 *nxa, uint16_t mask)
+{
+	struct nx_addr_v4 *ptr;
+	unsigned long irqflags;
+	int ret = 1;
+
+	spin_lock_irqsave(&nxi->addr_lock, irqflags);
+	for (ptr = &nxi->v4; ptr; ptr = ptr->next)
+		if (v4_nx_addr_match(ptr, nxa, mask))
+			goto out_unlock;
+	ret = 0;
+out_unlock:
+	spin_unlock_irqrestore(&nxi->addr_lock, irqflags);
+	return ret;
+}
+
+#include <net/inet_sock.h>
+
+/*
+ *	Check if a given address matches for a socket
+ *
+ *	nxi:		the socket's nx_info if any
+ *	addr:		to be verified address
+ */
+static inline
+int v4_sock_addr_match (
+	struct nx_info *nxi,
+	struct inet_sock *inet,
+	__be32 addr)
+{
+	__be32 saddr = inet->inet_rcv_saddr;
+	__be32 bcast = nxi ? nxi->v4_bcast.s_addr : INADDR_BROADCAST;
+
+	if (addr && (saddr == addr || bcast == addr))
+		return 1;
+	if (!saddr)
+		return v4_addr_in_nx_info(nxi, addr, NXA_MASK_BIND);
+	return 0;
+}
+
+
+/* inet related checks and helpers */
+
+
+struct in_ifaddr;
+struct net_device;
+struct sock;
+
+#ifdef CONFIG_INET
+
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <net/inet_sock.h>
+#include <net/inet_timewait_sock.h>
+
+
+int dev_in_nx_info(struct net_device *, struct nx_info *);
+int v4_dev_in_nx_info(struct net_device *, struct nx_info *);
+int nx_v4_addr_conflict(struct nx_info *, struct nx_info *);
+
+
+/*
+ *	check if address is covered by socket
+ *
+ *	sk:	the socket to check against
+ *	addr:	the address in question (must be != 0)
+ */
+
+static inline
+int __v4_addr_match_socket(const struct sock *sk, struct nx_addr_v4 *nxa)
+{
+	struct nx_info *nxi = sk->sk_nx_info;
+	__be32 saddr = sk->sk_rcv_saddr;
+
+	vxdprintk(VXD_CBIT(net, 5),
+		"__v4_addr_in_socket(%p," NXAV4_FMT ") %p:" NIPQUAD_FMT " %p;%lx",
+		sk, NXAV4(nxa), nxi, NIPQUAD(saddr), sk->sk_socket,
+		(sk->sk_socket?sk->sk_socket->flags:0));
+
+	if (saddr) {		/* direct address match */
+		return v4_addr_match(nxa, saddr, -1);
+	} else if (nxi) {	/* match against nx_info */
+		return v4_nx_addr_in_nx_info(nxi, nxa, -1);
+	} else {		/* unrestricted any socket */
+		return 1;
+	}
+}
+
+
+
+static inline
+int nx_dev_visible(struct nx_info *nxi, struct net_device *dev)
+{
+	vxdprintk(VXD_CBIT(net, 1),
+		"nx_dev_visible(%p[#%u],%p " VS_Q("%s") ") %d",
+		nxi, nxi ? nxi->nx_id : 0, dev, dev->name,
+		nxi ? dev_in_nx_info(dev, nxi) : 0);
+
+	if (!nx_info_flags(nxi, NXF_HIDE_NETIF, 0))
+		return 1;
+	if (dev_in_nx_info(dev, nxi))
+		return 1;
+	return 0;
+}
+
+
+static inline
+int v4_ifa_in_nx_info(struct in_ifaddr *ifa, struct nx_info *nxi)
+{
+	if (!nxi)
+		return 1;
+	if (!ifa)
+		return 0;
+	return v4_addr_in_nx_info(nxi, ifa->ifa_local, NXA_MASK_SHOW);
+}
+
+static inline
+int nx_v4_ifa_visible(struct nx_info *nxi, struct in_ifaddr *ifa)
+{
+	vxdprintk(VXD_CBIT(net, 1), "nx_v4_ifa_visible(%p[#%u],%p) %d",
+		nxi, nxi ? nxi->nx_id : 0, ifa,
+		nxi ? v4_ifa_in_nx_info(ifa, nxi) : 0);
+
+	if (!nx_info_flags(nxi, NXF_HIDE_NETIF, 0))
+		return 1;
+	if (v4_ifa_in_nx_info(ifa, nxi))
+		return 1;
+	return 0;
+}
+
+
+struct nx_v4_sock_addr {
+	__be32 saddr;	/* Address used for validation */
+	__be32 baddr;	/* Address used for socket bind */
+};
+
+static inline
+int v4_map_sock_addr(struct inet_sock *inet, struct sockaddr_in *addr,
+	struct nx_v4_sock_addr *nsa)
+{
+	struct sock *sk = &inet->sk;
+	struct nx_info *nxi = sk->sk_nx_info;
+	__be32 saddr = addr->sin_addr.s_addr;
+	__be32 baddr = saddr;
+
+	vxdprintk(VXD_CBIT(net, 3),
+		"inet_bind(%p)* %p,%p;%lx " NIPQUAD_FMT,
+		sk, sk->sk_nx_info, sk->sk_socket,
+		(sk->sk_socket ? sk->sk_socket->flags : 0),
+		NIPQUAD(saddr));
+
+	if (nxi) {
+		if (saddr == INADDR_ANY) {
+			if (nx_info_flags(nxi, NXF_SINGLE_IP, 0))
+				baddr = nxi->v4.ip[0].s_addr;
+		} else if (saddr == IPI_LOOPBACK) {
+			if (nx_info_flags(nxi, NXF_LBACK_REMAP, 0))
+				baddr = nxi->v4_lback.s_addr;
+		} else if (!ipv4_is_multicast(saddr) ||
+			!nx_info_ncaps(nxi, NXC_MULTICAST)) {
+			/* normal address bind */
+			if (!v4_addr_in_nx_info(nxi, saddr, NXA_MASK_BIND))
+				return -EADDRNOTAVAIL;
+		}
+	}
+
+	vxdprintk(VXD_CBIT(net, 3),
+		"inet_bind(%p) " NIPQUAD_FMT ", " NIPQUAD_FMT,
+		sk, NIPQUAD(saddr), NIPQUAD(baddr));
+
+	nsa->saddr = saddr;
+	nsa->baddr = baddr;
+	return 0;
+}
+
+static inline
+void v4_set_sock_addr(struct inet_sock *inet, struct nx_v4_sock_addr *nsa)
+{
+	inet->inet_saddr = nsa->baddr;
+	inet->inet_rcv_saddr = nsa->baddr;
+}
+
+
+/*
+ *      helper to simplify inet_lookup_listener
+ *
+ *      nxi:	the socket's nx_info if any
+ *      addr:	to be verified address
+ *      saddr:	socket address
+ */
+static inline int v4_inet_addr_match (
+	struct nx_info *nxi,
+	__be32 addr,
+	__be32 saddr)
+{
+	if (addr && (saddr == addr))
+		return 1;
+	if (!saddr)
+		return nxi ? v4_addr_in_nx_info(nxi, addr, NXA_MASK_BIND) : 1;
+	return 0;
+}
+
+static inline __be32 nx_map_sock_lback(struct nx_info *nxi, __be32 addr)
+{
+	if (nx_info_flags(nxi, NXF_HIDE_LBACK, 0) &&
+		(addr == nxi->v4_lback.s_addr))
+		return IPI_LOOPBACK;
+	return addr;
+}
+
+static inline
+int nx_info_has_v4(struct nx_info *nxi)
+{
+	if (!nxi)
+		return 1;
+	if (NX_IPV4(nxi))
+		return 1;
+	if (nx_info_flags(nxi, NXF_LBACK_REMAP, 0))
+		return 1;
+	return 0;
+}
+
+#else /* CONFIG_INET */
+
+static inline
+int nx_dev_visible(struct nx_info *n, struct net_device *d)
+{
+	return 1;
+}
+
+static inline
+int nx_v4_addr_conflict(struct nx_info *n, uint32_t a, const struct sock *s)
+{
+	return 1;
+}
+
+static inline
+int v4_ifa_in_nx_info(struct in_ifaddr *a, struct nx_info *n)
+{
+	return 1;
+}
+
+static inline
+int nx_info_has_v4(struct nx_info *nxi)
+{
+	return 0;
+}
+
+#endif /* CONFIG_INET */
+
+#define current_nx_info_has_v4() \
+	nx_info_has_v4(current_nx_info())
+
+#else
+// #warning duplicate inclusion
+#endif
diff -ruNp linux-3.13.11/include/linux/vs_inet6.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vs_inet6.h
--- linux-3.13.11/include/linux/vs_inet6.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vs_inet6.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,257 @@
+#ifndef _VS_INET6_H
+#define _VS_INET6_H
+
+#include "vserver/base.h"
+#include "vserver/network.h"
+#include "vserver/debug.h"
+
+#include <net/ipv6.h>
+
+#define NXAV6(a)	&(a)->ip, &(a)->mask, (a)->prefix, (a)->type
+#define NXAV6_FMT	"[%pI6/%pI6/%d:%04x]"
+
+
+#ifdef	CONFIG_IPV6
+
+static inline
+int v6_addr_match(struct nx_addr_v6 *nxa,
+	const struct in6_addr *addr, uint16_t mask)
+{
+	int ret = 0;
+
+	switch (nxa->type & mask) {
+	case NXA_TYPE_MASK:
+		ret = ipv6_masked_addr_cmp(&nxa->ip, &nxa->mask, addr);
+		break;
+	case NXA_TYPE_ADDR:
+		ret = ipv6_addr_equal(&nxa->ip, addr);
+		break;
+	case NXA_TYPE_ANY:
+		ret = 1;
+		break;
+	}
+	vxdprintk(VXD_CBIT(net, 0),
+		"v6_addr_match(%p" NXAV6_FMT ",%pI6,%04x) = %d",
+		nxa, NXAV6(nxa), addr, mask, ret);
+	return ret;
+}
+
+static inline
+int v6_addr_in_nx_info(struct nx_info *nxi,
+	const struct in6_addr *addr, uint16_t mask)
+{
+	struct nx_addr_v6 *nxa;
+	unsigned long irqflags;
+	int ret = 1;
+
+	if (!nxi)
+		goto out;
+
+	spin_lock_irqsave(&nxi->addr_lock, irqflags);
+	for (nxa = &nxi->v6; nxa; nxa = nxa->next)
+		if (v6_addr_match(nxa, addr, mask))
+			goto out_unlock;
+	ret = 0;
+out_unlock:
+	spin_unlock_irqrestore(&nxi->addr_lock, irqflags);
+out:
+	vxdprintk(VXD_CBIT(net, 0),
+		"v6_addr_in_nx_info(%p[#%u],%pI6,%04x) = %d",
+		nxi, nxi ? nxi->nx_id : 0, addr, mask, ret);
+	return ret;
+}
+
+static inline
+int v6_nx_addr_match(struct nx_addr_v6 *nxa, struct nx_addr_v6 *addr, uint16_t mask)
+{
+	/* FIXME: needs full range checks */
+	return v6_addr_match(nxa, &addr->ip, mask);
+}
+
+static inline
+int v6_nx_addr_in_nx_info(struct nx_info *nxi, struct nx_addr_v6 *nxa, uint16_t mask)
+{
+	struct nx_addr_v6 *ptr;
+	unsigned long irqflags;
+	int ret = 1;
+
+	spin_lock_irqsave(&nxi->addr_lock, irqflags);
+	for (ptr = &nxi->v6; ptr; ptr = ptr->next)
+		if (v6_nx_addr_match(ptr, nxa, mask))
+			goto out_unlock;
+	ret = 0;
+out_unlock:
+	spin_unlock_irqrestore(&nxi->addr_lock, irqflags);
+	return ret;
+}
+
+
+/*
+ *	Check if a given address matches for a socket
+ *
+ *	nxi:		the socket's nx_info if any
+ *	addr:		to be verified address
+ */
+static inline
+int v6_sock_addr_match (
+	struct nx_info *nxi,
+	struct inet_sock *inet,
+	struct in6_addr *addr)
+{
+	struct sock *sk = &inet->sk;
+	const struct in6_addr *saddr = inet6_rcv_saddr(sk);
+
+	if (!ipv6_addr_any(addr) &&
+		ipv6_addr_equal(saddr, addr))
+		return 1;
+	if (ipv6_addr_any(saddr))
+		return v6_addr_in_nx_info(nxi, addr, -1);
+	return 0;
+}
+
+/*
+ *	check if address is covered by socket
+ *
+ *	sk:	the socket to check against
+ *	addr:	the address in question (must be != 0)
+ */
+
+static inline
+int __v6_addr_match_socket(const struct sock *sk, struct nx_addr_v6 *nxa)
+{
+	struct nx_info *nxi = sk->sk_nx_info;
+	const struct in6_addr *saddr = inet6_rcv_saddr(sk);
+
+	vxdprintk(VXD_CBIT(net, 5),
+		"__v6_addr_in_socket(%p," NXAV6_FMT ") %p:%pI6 %p;%lx",
+		sk, NXAV6(nxa), nxi, saddr, sk->sk_socket,
+		(sk->sk_socket?sk->sk_socket->flags:0));
+
+	if (!ipv6_addr_any(saddr)) {	/* direct address match */
+		return v6_addr_match(nxa, saddr, -1);
+	} else if (nxi) {		/* match against nx_info */
+		return v6_nx_addr_in_nx_info(nxi, nxa, -1);
+	} else {			/* unrestricted any socket */
+		return 1;
+	}
+}
+
+
+/* inet related checks and helpers */
+
+
+struct in_ifaddr;
+struct net_device;
+struct sock;
+
+
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <net/inet_timewait_sock.h>
+
+
+int dev_in_nx_info(struct net_device *, struct nx_info *);
+int v6_dev_in_nx_info(struct net_device *, struct nx_info *);
+int nx_v6_addr_conflict(struct nx_info *, struct nx_info *);
+
+
+
+static inline
+int v6_ifa_in_nx_info(struct inet6_ifaddr *ifa, struct nx_info *nxi)
+{
+	if (!nxi)
+		return 1;
+	if (!ifa)
+		return 0;
+	return v6_addr_in_nx_info(nxi, &ifa->addr, -1);
+}
+
+static inline
+int nx_v6_ifa_visible(struct nx_info *nxi, struct inet6_ifaddr *ifa)
+{
+	vxdprintk(VXD_CBIT(net, 1), "nx_v6_ifa_visible(%p[#%u],%p) %d",
+		nxi, nxi ? nxi->nx_id : 0, ifa,
+		nxi ? v6_ifa_in_nx_info(ifa, nxi) : 0);
+
+	if (!nx_info_flags(nxi, NXF_HIDE_NETIF, 0))
+		return 1;
+	if (v6_ifa_in_nx_info(ifa, nxi))
+		return 1;
+	return 0;
+}
+
+
+struct nx_v6_sock_addr {
+	struct in6_addr saddr;	/* Address used for validation */
+	struct in6_addr baddr;	/* Address used for socket bind */
+};
+
+static inline
+int v6_map_sock_addr(struct inet_sock *inet, struct sockaddr_in6 *addr,
+	struct nx_v6_sock_addr *nsa)
+{
+	// struct sock *sk = &inet->sk;
+	// struct nx_info *nxi = sk->sk_nx_info;
+	struct in6_addr saddr = addr->sin6_addr;
+	struct in6_addr baddr = saddr;
+
+	nsa->saddr = saddr;
+	nsa->baddr = baddr;
+	return 0;
+}
+
+static inline
+void v6_set_sock_addr(struct inet_sock *inet, struct nx_v6_sock_addr *nsa)
+{
+	// struct sock *sk = &inet->sk;
+	// struct in6_addr *saddr = inet6_rcv_saddr(sk);
+
+	// *saddr = nsa->baddr;
+	// inet->inet_saddr = nsa->baddr;
+}
+
+static inline
+int nx_info_has_v6(struct nx_info *nxi)
+{
+	if (!nxi)
+		return 1;
+	if (NX_IPV6(nxi))
+		return 1;
+	return 0;
+}
+
+#else /* CONFIG_IPV6 */
+
+static inline
+int nx_v6_dev_visible(struct nx_info *n, struct net_device *d)
+{
+	return 1;
+}
+
+
+static inline
+int nx_v6_addr_conflict(struct nx_info *n, uint32_t a, const struct sock *s)
+{
+	return 1;
+}
+
+static inline
+int v6_ifa_in_nx_info(struct in_ifaddr *a, struct nx_info *n)
+{
+	return 1;
+}
+
+static inline
+int nx_info_has_v6(struct nx_info *nxi)
+{
+	return 0;
+}
+
+#endif /* CONFIG_IPV6 */
+
+#define current_nx_info_has_v6() \
+	nx_info_has_v6(current_nx_info())
+
+#else
+#warning duplicate inclusion
+#endif
diff -ruNp linux-3.13.11/include/linux/vs_limit.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vs_limit.h
--- linux-3.13.11/include/linux/vs_limit.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vs_limit.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,140 @@
+#ifndef _VS_LIMIT_H
+#define _VS_LIMIT_H
+
+#include "vserver/limit.h"
+#include "vserver/base.h"
+#include "vserver/context.h"
+#include "vserver/debug.h"
+#include "vserver/context.h"
+#include "vserver/limit_int.h"
+
+
+#define vx_acc_cres(v, d, p, r) \
+	__vx_acc_cres(v, r, d, p, __FILE__, __LINE__)
+
+#define vx_acc_cres_cond(x, d, p, r) \
+	__vx_acc_cres(((x) == vx_current_xid()) ? current_vx_info() : 0, \
+	r, d, p, __FILE__, __LINE__)
+
+
+#define vx_add_cres(v, a, p, r) \
+	__vx_add_cres(v, r, a, p, __FILE__, __LINE__)
+#define vx_sub_cres(v, a, p, r)		vx_add_cres(v, -(a), p, r)
+
+#define vx_add_cres_cond(x, a, p, r) \
+	__vx_add_cres(((x) == vx_current_xid()) ? current_vx_info() : 0, \
+	r, a, p, __FILE__, __LINE__)
+#define vx_sub_cres_cond(x, a, p, r)	vx_add_cres_cond(x, -(a), p, r)
+
+
+/* process and file limits */
+
+#define vx_nproc_inc(p) \
+	vx_acc_cres((p)->vx_info, 1, p, RLIMIT_NPROC)
+
+#define vx_nproc_dec(p) \
+	vx_acc_cres((p)->vx_info,-1, p, RLIMIT_NPROC)
+
+#define vx_files_inc(f) \
+	vx_acc_cres_cond((f)->f_xid, 1, f, RLIMIT_NOFILE)
+
+#define vx_files_dec(f) \
+	vx_acc_cres_cond((f)->f_xid,-1, f, RLIMIT_NOFILE)
+
+#define vx_locks_inc(l) \
+	vx_acc_cres_cond((l)->fl_xid, 1, l, RLIMIT_LOCKS)
+
+#define vx_locks_dec(l) \
+	vx_acc_cres_cond((l)->fl_xid,-1, l, RLIMIT_LOCKS)
+
+#define vx_openfd_inc(f) \
+	vx_acc_cres(current_vx_info(), 1, (void *)(long)(f), VLIMIT_OPENFD)
+
+#define vx_openfd_dec(f) \
+	vx_acc_cres(current_vx_info(),-1, (void *)(long)(f), VLIMIT_OPENFD)
+
+
+#define vx_cres_avail(v, n, r) \
+	__vx_cres_avail(v, r, n, __FILE__, __LINE__)
+
+
+#define vx_nproc_avail(n) \
+	vx_cres_avail(current_vx_info(), n, RLIMIT_NPROC)
+
+#define vx_files_avail(n) \
+	vx_cres_avail(current_vx_info(), n, RLIMIT_NOFILE)
+
+#define vx_locks_avail(n) \
+	vx_cres_avail(current_vx_info(), n, RLIMIT_LOCKS)
+
+#define vx_openfd_avail(n) \
+	vx_cres_avail(current_vx_info(), n, VLIMIT_OPENFD)
+
+
+/* dentry limits */
+
+#define vx_dentry_inc(d) do {						\
+	if (d_count(d) == 1)						\
+		vx_acc_cres(current_vx_info(), 1, d, VLIMIT_DENTRY);	\
+	} while (0)
+
+#define vx_dentry_dec(d) do {						\
+	if (d_count(d) == 0)						\
+		vx_acc_cres(current_vx_info(),-1, d, VLIMIT_DENTRY);	\
+	} while (0)
+
+#define vx_dentry_avail(n) \
+	vx_cres_avail(current_vx_info(), n, VLIMIT_DENTRY)
+
+
+/* socket limits */
+
+#define vx_sock_inc(s) \
+	vx_acc_cres((s)->sk_vx_info, 1, s, VLIMIT_NSOCK)
+
+#define vx_sock_dec(s) \
+	vx_acc_cres((s)->sk_vx_info,-1, s, VLIMIT_NSOCK)
+
+#define vx_sock_avail(n) \
+	vx_cres_avail(current_vx_info(), n, VLIMIT_NSOCK)
+
+
+/* ipc resource limits */
+
+#define vx_ipcmsg_add(v, u, a) \
+	vx_add_cres(v, a, u, RLIMIT_MSGQUEUE)
+
+#define vx_ipcmsg_sub(v, u, a) \
+	vx_sub_cres(v, a, u, RLIMIT_MSGQUEUE)
+
+#define vx_ipcmsg_avail(v, a) \
+	vx_cres_avail(v, a, RLIMIT_MSGQUEUE)
+
+
+#define vx_ipcshm_add(v, k, a) \
+	vx_add_cres(v, a, (void *)(long)(k), VLIMIT_SHMEM)
+
+#define vx_ipcshm_sub(v, k, a) \
+	vx_sub_cres(v, a, (void *)(long)(k), VLIMIT_SHMEM)
+
+#define vx_ipcshm_avail(v, a) \
+	vx_cres_avail(v, a, VLIMIT_SHMEM)
+
+
+#define vx_semary_inc(a) \
+	vx_acc_cres(current_vx_info(), 1, a, VLIMIT_SEMARY)
+
+#define vx_semary_dec(a) \
+	vx_acc_cres(current_vx_info(), -1, a, VLIMIT_SEMARY)
+
+
+#define vx_nsems_add(a,n) \
+	vx_add_cres(current_vx_info(), n, a, VLIMIT_NSEMS)
+
+#define vx_nsems_sub(a,n) \
+	vx_sub_cres(current_vx_info(), n, a, VLIMIT_NSEMS)
+
+
+#else
+#warning duplicate inclusion
+#endif
diff -ruNp linux-3.13.11/include/linux/vs_network.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vs_network.h
--- linux-3.13.11/include/linux/vs_network.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vs_network.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,169 @@
+#ifndef _NX_VS_NETWORK_H
+#define _NX_VS_NETWORK_H
+
+#include "vserver/context.h"
+#include "vserver/network.h"
+#include "vserver/base.h"
+#include "vserver/check.h"
+#include "vserver/debug.h"
+
+#include <linux/sched.h>
+
+
+#define get_nx_info(i) __get_nx_info(i, __FILE__, __LINE__)
+
+static inline struct nx_info *__get_nx_info(struct nx_info *nxi,
+	const char *_file, int _line)
+{
+	if (!nxi)
+		return NULL;
+
+	vxlprintk(VXD_CBIT(nid, 2), "get_nx_info(%p[#%d.%d])",
+		nxi, nxi ? nxi->nx_id : 0,
+		nxi ? atomic_read(&nxi->nx_usecnt) : 0,
+		_file, _line);
+
+	atomic_inc(&nxi->nx_usecnt);
+	return nxi;
+}
+
+
+extern void free_nx_info(struct nx_info *);
+
+#define put_nx_info(i) __put_nx_info(i, __FILE__, __LINE__)
+
+static inline void __put_nx_info(struct nx_info *nxi, const char *_file, int _line)
+{
+	if (!nxi)
+		return;
+
+	vxlprintk(VXD_CBIT(nid, 2), "put_nx_info(%p[#%d.%d])",
+		nxi, nxi ? nxi->nx_id : 0,
+		nxi ? atomic_read(&nxi->nx_usecnt) : 0,
+		_file, _line);
+
+	if (atomic_dec_and_test(&nxi->nx_usecnt))
+		free_nx_info(nxi);
+}
+
+
+#define init_nx_info(p, i) __init_nx_info(p, i, __FILE__, __LINE__)
+
+static inline void __init_nx_info(struct nx_info **nxp, struct nx_info *nxi,
+		const char *_file, int _line)
+{
+	if (nxi) {
+		vxlprintk(VXD_CBIT(nid, 3),
+			"init_nx_info(%p[#%d.%d])",
+			nxi, nxi ? nxi->nx_id : 0,
+			nxi ? atomic_read(&nxi->nx_usecnt) : 0,
+			_file, _line);
+
+		atomic_inc(&nxi->nx_usecnt);
+	}
+	*nxp = nxi;
+}
+
+
+#define set_nx_info(p, i) __set_nx_info(p, i, __FILE__, __LINE__)
+
+static inline void __set_nx_info(struct nx_info **nxp, struct nx_info *nxi,
+	const char *_file, int _line)
+{
+	struct nx_info *nxo;
+
+	if (!nxi)
+		return;
+
+	vxlprintk(VXD_CBIT(nid, 3), "set_nx_info(%p[#%d.%d])",
+		nxi, nxi ? nxi->nx_id : 0,
+		nxi ? atomic_read(&nxi->nx_usecnt) : 0,
+		_file, _line);
+
+	atomic_inc(&nxi->nx_usecnt);
+	nxo = xchg(nxp, nxi);
+	BUG_ON(nxo);
+}
+
+#define clr_nx_info(p) __clr_nx_info(p, __FILE__, __LINE__)
+
+static inline void __clr_nx_info(struct nx_info **nxp,
+	const char *_file, int _line)
+{
+	struct nx_info *nxo;
+
+	nxo = xchg(nxp, NULL);
+	if (!nxo)
+		return;
+
+	vxlprintk(VXD_CBIT(nid, 3), "clr_nx_info(%p[#%d.%d])",
+		nxo, nxo ? nxo->nx_id : 0,
+		nxo ? atomic_read(&nxo->nx_usecnt) : 0,
+		_file, _line);
+
+	if (atomic_dec_and_test(&nxo->nx_usecnt))
+		free_nx_info(nxo);
+}
+
+
+#define claim_nx_info(v, p) __claim_nx_info(v, p, __FILE__, __LINE__)
+
+static inline void __claim_nx_info(struct nx_info *nxi,
+	struct task_struct *task, const char *_file, int _line)
+{
+	vxlprintk(VXD_CBIT(nid, 3), "claim_nx_info(%p[#%d.%d.%d]) %p",
+		nxi, nxi ? nxi->nx_id : 0,
+		nxi?atomic_read(&nxi->nx_usecnt):0,
+		nxi?atomic_read(&nxi->nx_tasks):0,
+		task, _file, _line);
+
+	atomic_inc(&nxi->nx_tasks);
+}
+
+
+extern void unhash_nx_info(struct nx_info *);
+
+#define release_nx_info(v, p) __release_nx_info(v, p, __FILE__, __LINE__)
+
+static inline void __release_nx_info(struct nx_info *nxi,
+	struct task_struct *task, const char *_file, int _line)
+{
+	vxlprintk(VXD_CBIT(nid, 3), "release_nx_info(%p[#%d.%d.%d]) %p",
+		nxi, nxi ? nxi->nx_id : 0,
+		nxi ? atomic_read(&nxi->nx_usecnt) : 0,
+		nxi ? atomic_read(&nxi->nx_tasks) : 0,
+		task, _file, _line);
+
+	might_sleep();
+
+	if (atomic_dec_and_test(&nxi->nx_tasks))
+		unhash_nx_info(nxi);
+}
+
+
+#define task_get_nx_info(i)	__task_get_nx_info(i, __FILE__, __LINE__)
+
+static __inline__ struct nx_info *__task_get_nx_info(struct task_struct *p,
+	const char *_file, int _line)
+{
+	struct nx_info *nxi;
+
+	task_lock(p);
+	vxlprintk(VXD_CBIT(nid, 5), "task_get_nx_info(%p)",
+		p, _file, _line);
+	nxi = __get_nx_info(p->nx_info, _file, _line);
+	task_unlock(p);
+	return nxi;
+}
+
+
+static inline void exit_nx_info(struct task_struct *p)
+{
+	if (p->nx_info)
+		release_nx_info(p->nx_info, p);
+}
+
+
+#else
+#warning duplicate inclusion
+#endif
diff -ruNp linux-3.13.11/include/linux/vs_pid.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vs_pid.h
--- linux-3.13.11/include/linux/vs_pid.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vs_pid.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,50 @@
+#ifndef _VS_PID_H
+#define _VS_PID_H
+
+#include "vserver/base.h"
+#include "vserver/check.h"
+#include "vserver/context.h"
+#include "vserver/debug.h"
+#include "vserver/pid.h"
+#include <linux/pid_namespace.h>
+
+
+#define VXF_FAKE_INIT	(VXF_INFO_INIT | VXF_STATE_INIT)
+
+static inline
+int vx_proc_task_visible(struct task_struct *task)
+{
+	if ((task->pid == 1) &&
+		!vx_flags(VXF_FAKE_INIT, VXF_FAKE_INIT))
+		/* show a blend through init */
+		goto visible;
+	if (vx_check(vx_task_xid(task), VS_WATCH | VS_IDENT))
+		goto visible;
+	return 0;
+visible:
+	return 1;
+}
+
+#define find_task_by_real_pid(pid) find_task_by_pid_ns(pid, &init_pid_ns)
+
+
+static inline
+struct task_struct *vx_get_proc_task(struct inode *inode, struct pid *pid)
+{
+	struct task_struct *task = get_pid_task(pid, PIDTYPE_PID);
+
+	if (task && !vx_proc_task_visible(task)) {
+		vxdprintk(VXD_CBIT(misc, 6),
+			"dropping task (get) %p[#%u,%u] for %p[#%u,%u]",
+			task, task->xid, task->pid,
+			current, current->xid, current->pid);
+		put_task_struct(task);
+		task = NULL;
+	}
+	return task;
+}
+
+
+#else
+#warning duplicate inclusion
+#endif
diff -ruNp linux-3.13.11/include/linux/vs_sched.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vs_sched.h
--- linux-3.13.11/include/linux/vs_sched.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vs_sched.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,40 @@
+#ifndef _VS_SCHED_H
+#define _VS_SCHED_H
+
+#include "vserver/base.h"
+#include "vserver/context.h"
+#include "vserver/sched.h"
+
+
+#define MAX_PRIO_BIAS		 20
+#define MIN_PRIO_BIAS		-20
+
+static inline
+int vx_adjust_prio(struct task_struct *p, int prio, int max_user)
+{
+	struct vx_info *vxi = p->vx_info;
+
+	if (vxi)
+		prio += vx_cpu(vxi, sched_pc).prio_bias;
+	return prio;
+}
+
+static inline void vx_account_user(struct vx_info *vxi,
+	cputime_t cputime, int nice)
+{
+	if (!vxi)
+		return;
+	vx_cpu(vxi, sched_pc).user_ticks += cputime;
+}
+
+static inline void vx_account_system(struct vx_info *vxi,
+	cputime_t cputime, int idle)
+{
+	if (!vxi)
+		return;
+	vx_cpu(vxi, sched_pc).sys_ticks += cputime;
+}
+
+#else
+#warning duplicate inclusion
+#endif
diff -ruNp linux-3.13.11/include/linux/vs_socket.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vs_socket.h
--- linux-3.13.11/include/linux/vs_socket.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vs_socket.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,67 @@
+#ifndef _VS_SOCKET_H
+#define _VS_SOCKET_H
+
+#include "vserver/debug.h"
+#include "vserver/base.h"
+#include "vserver/cacct.h"
+#include "vserver/context.h"
+#include "vserver/tag.h"
+
+
+/* socket accounting */
+
+#include <linux/socket.h>
+
+static inline int vx_sock_type(int family)
+{
+	switch (family) {
+	case PF_UNSPEC:
+		return VXA_SOCK_UNSPEC;
+	case PF_UNIX:
+		return VXA_SOCK_UNIX;
+	case PF_INET:
+		return VXA_SOCK_INET;
+	case PF_INET6:
+		return VXA_SOCK_INET6;
+	case PF_PACKET:
+		return VXA_SOCK_PACKET;
+	default:
+		return VXA_SOCK_OTHER;
+	}
+}
+
+#define vx_acc_sock(v, f, p, s) \
+	__vx_acc_sock(v, f, p, s, __FILE__, __LINE__)
+
+static inline void __vx_acc_sock(struct vx_info *vxi,
+	int family, int pos, int size, char *file, int line)
+{
+	if (vxi) {
+		int type = vx_sock_type(family);
+
+		atomic_long_inc(&vxi->cacct.sock[type][pos].count);
+		atomic_long_add(size, &vxi->cacct.sock[type][pos].total);
+	}
+}
+
+#define vx_sock_recv(sk, s) \
+	vx_acc_sock((sk)->sk_vx_info, (sk)->sk_family, 0, s)
+#define vx_sock_send(sk, s) \
+	vx_acc_sock((sk)->sk_vx_info, (sk)->sk_family, 1, s)
+#define vx_sock_fail(sk, s) \
+	vx_acc_sock((sk)->sk_vx_info, (sk)->sk_family, 2, s)
+
+
+#define sock_vx_init(s) do {		\
+	(s)->sk_xid = 0;		\
+	(s)->sk_vx_info = NULL;		\
+	} while (0)
+
+#define sock_nx_init(s) do {		\
+	(s)->sk_nid = 0;		\
+	(s)->sk_nx_info = NULL;		\
+	} while (0)
+
+#else
+#warning duplicate inclusion
+#endif
diff -ruNp linux-3.13.11/include/linux/vs_tag.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vs_tag.h
--- linux-3.13.11/include/linux/vs_tag.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vs_tag.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,47 @@
+#ifndef _VS_TAG_H
+#define _VS_TAG_H
+
+#include <linux/vserver/tag.h>
+
+/* check conditions */
+
+#define DX_ADMIN	0x0001
+#define DX_WATCH	0x0002
+#define DX_HOSTID	0x0008
+
+#define DX_IDENT	0x0010
+
+#define DX_ARG_MASK	0x0010
+
+
+#define dx_task_tag(t)	((t)->tag)
+
+#define dx_current_tag() dx_task_tag(current)
+
+#define dx_check(c, m)	__dx_check(dx_current_tag(), c, m)
+
+#define dx_weak_check(c, m)	((m) ? dx_check(c, m) : 1)
+
+
+/*
+ * check current context for ADMIN/WATCH and
+ * optionally against supplied argument
+ */
+static inline int __dx_check(vtag_t cid, vtag_t id, unsigned int mode)
+{
+	if (mode & DX_ARG_MASK) {
+		if ((mode & DX_IDENT) && (id == cid))
+			return 1;
+	}
+	return (((mode & DX_ADMIN) && (cid == 0)) ||
+		((mode & DX_WATCH) && (cid == 1)) ||
+		((mode & DX_HOSTID) && (id == 0)));
+}
+
+struct inode;
+int dx_permission(const struct inode *inode, int mask);
+
+
+#else
+#warning duplicate inclusion
+#endif
diff -ruNp linux-3.13.11/include/linux/vs_time.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vs_time.h
--- linux-3.13.11/include/linux/vs_time.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vs_time.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,19 @@
+#ifndef _VS_TIME_H
+#define _VS_TIME_H
+
+
+/* time faking stuff */
+
+#ifdef CONFIG_VSERVER_VTIME
+
+extern void vx_adjust_timespec(struct timespec *ts);
+extern int vx_settimeofday(const struct timespec *ts);
+
+#else
+#define	vx_adjust_timespec(t)	do { } while (0)
+#define	vx_settimeofday(t)	do_settimeofday(t)
+#endif
+
+#else
+#warning duplicate inclusion
+#endif
diff -ruNp linux-3.13.11/include/linux/vserver/base.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/base.h
--- linux-3.13.11/include/linux/vserver/base.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/base.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,184 @@
+#ifndef _VSERVER_BASE_H
+#define _VSERVER_BASE_H
+
+
+/* context state changes */
+
+enum {
+	VSC_STARTUP = 1,
+	VSC_SHUTDOWN,
+
+	VSC_NETUP,
+	VSC_NETDOWN,
+};
+
+
+
+#define vx_task_xid(t)	((t)->xid)
+
+#define vx_current_xid() vx_task_xid(current)
+
+#define current_vx_info() (current->vx_info)
+
+
+#define nx_task_nid(t)	((t)->nid)
+
+#define nx_current_nid() nx_task_nid(current)
+
+#define current_nx_info() (current->nx_info)
+
+
+/* generic flag merging */
+
+#define vs_check_flags(v, m, f)	(((v) & (m)) ^ (f))
+
+#define vs_mask_flags(v, f, m)	(((v) & ~(m)) | ((f) & (m)))
+
+#define vs_mask_mask(v, f, m)	(((v) & ~(m)) | ((v) & (f) & (m)))
+
+#define vs_check_bit(v, n)	((v) & (1LL << (n)))
+
+
+/* context flags */
+
+#define __vx_flags(v)	((v) ? (v)->vx_flags : 0)
+
+#define vx_current_flags()	__vx_flags(current_vx_info())
+
+#define vx_info_flags(v, m, f) \
+	vs_check_flags(__vx_flags(v), m, f)
+
+#define task_vx_flags(t, m, f) \
+	((t) && vx_info_flags((t)->vx_info, m, f))
+
+#define vx_flags(m, f)	vx_info_flags(current_vx_info(), m, f)
+
+
+/* context caps */
+
+#define __vx_ccaps(v)	((v) ? (v)->vx_ccaps : 0)
+
+#define vx_current_ccaps()	__vx_ccaps(current_vx_info())
+
+#define vx_info_ccaps(v, c)	(__vx_ccaps(v) & (c))
+
+#define vx_ccaps(c)	vx_info_ccaps(current_vx_info(), (c))
+
+
+
+/* network flags */
+
+#define __nx_flags(n)	((n) ? (n)->nx_flags : 0)
+
+#define nx_current_flags()	__nx_flags(current_nx_info())
+
+#define nx_info_flags(n, m, f) \
+	vs_check_flags(__nx_flags(n), m, f)
+
+#define task_nx_flags(t, m, f) \
+	((t) && nx_info_flags((t)->nx_info, m, f))
+
+#define nx_flags(m, f)	nx_info_flags(current_nx_info(), m, f)
+
+
+/* network caps */
+
+#define __nx_ncaps(n)	((n) ? (n)->nx_ncaps : 0)
+
+#define nx_current_ncaps()	__nx_ncaps(current_nx_info())
+
+#define nx_info_ncaps(n, c)	(__nx_ncaps(n) & (c))
+
+#define nx_ncaps(c)	nx_info_ncaps(current_nx_info(), c)
+
+
+/* context mask capabilities */
+
+#define __vx_mcaps(v)	((v) ? (v)->vx_ccaps >> 32UL : ~0 )
+
+#define vx_info_mcaps(v, c)	(__vx_mcaps(v) & (c))
+
+#define vx_mcaps(c)	vx_info_mcaps(current_vx_info(), c)
+
+
+/* context bcap mask */
+
+#define __vx_bcaps(v)		((v)->vx_bcaps)
+
+#define vx_current_bcaps()	__vx_bcaps(current_vx_info())
+
+
+/* mask given bcaps */
+
+#define vx_info_mbcaps(v, c)	((v) ? cap_intersect(__vx_bcaps(v), c) : c)
+
+#define vx_mbcaps(c)		vx_info_mbcaps(current_vx_info(), c)
+
+
+/* masked cap_bset */
+
+#define vx_info_cap_bset(v)	vx_info_mbcaps(v, current->cap_bset)
+
+#define vx_current_cap_bset()	vx_info_cap_bset(current_vx_info())
+
+#if 0
+#define vx_info_mbcap(v, b) \
+	(!vx_info_flags(v, VXF_STATE_SETUP, 0) ? \
+	vx_info_bcaps(v, b) : (b))
+
+#define task_vx_mbcap(t, b) \
+	vx_info_mbcap((t)->vx_info, (t)->b)
+
+#define vx_mbcap(b)	task_vx_mbcap(current, b)
+#endif
+
+#define vx_cap_raised(v, c, f)	cap_raised(vx_info_mbcaps(v, c), f)
+
+#define vx_capable(b, c) (capable(b) || \
+	(cap_raised(current_cap(), b) && vx_ccaps(c)))
+
+#define vx_ns_capable(n, b, c) (ns_capable(n, b) || \
+	(cap_raised(current_cap(), b) && vx_ccaps(c)))
+
+#define nx_capable(b, c) (capable(b) || \
+	(cap_raised(current_cap(), b) && nx_ncaps(c)))
+
+#define nx_ns_capable(n, b, c) (ns_capable(n, b) || \
+	(cap_raised(current_cap(), b) && nx_ncaps(c)))
+
+#define vx_task_initpid(t, n) \
+	((t)->vx_info && \
+	((t)->vx_info->vx_initpid == (n)))
+
+#define vx_current_initpid(n)	vx_task_initpid(current, n)
+
+
+/* context unshare mask */
+
+#define __vx_umask(v)		((v)->vx_umask)
+
+#define vx_current_umask()	__vx_umask(current_vx_info())
+
+#define vx_can_unshare(b, f) (capable(b) || \
+	(cap_raised(current_cap(), b) && \
+	!((f) & ~vx_current_umask())))
+
+#define vx_ns_can_unshare(n, b, f) (ns_capable(n, b) || \
+	(cap_raised(current_cap(), b) && \
+	!((f) & ~vx_current_umask())))
+
+#define __vx_wmask(v)		((v)->vx_wmask)
+
+#define vx_current_wmask()	__vx_wmask(current_vx_info())
+
+
+#define __vx_state(v)	((v) ? ((v)->vx_state) : 0)
+
+#define vx_info_state(v, m)	(__vx_state(v) & (m))
+
+
+#define __nx_state(n)	((n) ? ((n)->nx_state) : 0)
+
+#define nx_info_state(n, m)	(__nx_state(n) & (m))
+
+#endif
diff -ruNp linux-3.13.11/include/linux/vserver/cacct.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/cacct.h
--- linux-3.13.11/include/linux/vserver/cacct.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/cacct.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,15 @@
+#ifndef _VSERVER_CACCT_H
+#define _VSERVER_CACCT_H
+
+
+enum sock_acc_field {
+	VXA_SOCK_UNSPEC = 0,
+	VXA_SOCK_UNIX,
+	VXA_SOCK_INET,
+	VXA_SOCK_INET6,
+	VXA_SOCK_PACKET,
+	VXA_SOCK_OTHER,
+	VXA_SOCK_SIZE	/* array size */
+};
+
+#endif	/* _VSERVER_CACCT_H */
diff -ruNp linux-3.13.11/include/linux/vserver/cacct_cmd.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/cacct_cmd.h
--- linux-3.13.11/include/linux/vserver/cacct_cmd.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/cacct_cmd.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,10 @@
+#ifndef _VSERVER_CACCT_CMD_H
+#define _VSERVER_CACCT_CMD_H
+
+
+#include <linux/compiler.h>
+#include <uapi/vserver/cacct_cmd.h>
+
+extern int vc_sock_stat(struct vx_info *, void __user *);
+
+#endif	/* _VSERVER_CACCT_CMD_H */
diff -ruNp linux-3.13.11/include/linux/vserver/cacct_def.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/cacct_def.h
--- linux-3.13.11/include/linux/vserver/cacct_def.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/cacct_def.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,43 @@
+#ifndef _VSERVER_CACCT_DEF_H
+#define _VSERVER_CACCT_DEF_H
+
+#include <asm/atomic.h>
+#include <linux/vserver/cacct.h>
+
+
+struct _vx_sock_acc {
+	atomic_long_t count;
+	atomic_long_t total;
+};
+
+/* context sub struct */
+
+struct _vx_cacct {
+	struct _vx_sock_acc sock[VXA_SOCK_SIZE][3];
+	atomic_t slab[8];
+	atomic_t page[6][8];
+};
+
+#ifdef CONFIG_VSERVER_DEBUG
+
+static inline void __dump_vx_cacct(struct _vx_cacct *cacct)
+{
+	int i, j;
+
+	printk("\t_vx_cacct:");
+	for (i = 0; i < 6; i++) {
+		struct _vx_sock_acc *ptr = cacct->sock[i];
+
+		printk("\t [%d] =", i);
+		for (j = 0; j < 3; j++) {
+			printk(" [%d] = %8lu, %8lu", j,
+				atomic_long_read(&ptr[j].count),
+				atomic_long_read(&ptr[j].total));
+		}
+		printk("\n");
+	}
+}
+
+#endif
+
+#endif	/* _VSERVER_CACCT_DEF_H */
diff -ruNp linux-3.13.11/include/linux/vserver/cacct_int.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/cacct_int.h
--- linux-3.13.11/include/linux/vserver/cacct_int.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/cacct_int.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,17 @@
+#ifndef _VSERVER_CACCT_INT_H
+#define _VSERVER_CACCT_INT_H
+
+static inline
+unsigned long vx_sock_count(struct _vx_cacct *cacct, int type, int pos)
+{
+	return atomic_long_read(&cacct->sock[type][pos].count);
+}
+
+
+static inline
+unsigned long vx_sock_total(struct _vx_cacct *cacct, int type, int pos)
+{
+	return atomic_long_read(&cacct->sock[type][pos].total);
+}
+
+#endif	/* _VSERVER_CACCT_INT_H */
diff -ruNp linux-3.13.11/include/linux/vserver/check.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/check.h
--- linux-3.13.11/include/linux/vserver/check.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/check.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,89 @@
+#ifndef _VSERVER_CHECK_H
+#define _VSERVER_CHECK_H
+
+
+#define MAX_S_CONTEXT	65535	/* Arbitrary limit */
+
+#ifdef	CONFIG_VSERVER_DYNAMIC_IDS
+#define MIN_D_CONTEXT	49152	/* dynamic contexts start here */
+#else
+#define MIN_D_CONTEXT	65536
+#endif
+
+/* check conditions */
+
+#define VS_ADMIN	0x0001
+#define VS_WATCH	0x0002
+#define VS_HIDE		0x0004
+#define VS_HOSTID	0x0008
+
+#define VS_IDENT	0x0010
+#define VS_EQUIV	0x0020
+#define VS_PARENT	0x0040
+#define VS_CHILD	0x0080
+
+#define VS_ARG_MASK	0x00F0
+
+#define VS_DYNAMIC	0x0100
+#define VS_STATIC	0x0200
+
+#define VS_ATR_MASK	0x0F00
+
+#ifdef	CONFIG_VSERVER_PRIVACY
+#define VS_ADMIN_P	(0)
+#define VS_WATCH_P	(0)
+#else
+#define VS_ADMIN_P	VS_ADMIN
+#define VS_WATCH_P	VS_WATCH
+#endif
+
+#define VS_HARDIRQ	0x1000
+#define VS_SOFTIRQ	0x2000
+#define VS_IRQ		0x4000
+
+#define VS_IRQ_MASK	0xF000
+
+#include <linux/hardirq.h>
+
+/*
+ * check current context for ADMIN/WATCH and
+ * optionally against supplied argument
+ */
+static inline int __vs_check(int cid, int id, unsigned int mode)
+{
+	if (mode & VS_ARG_MASK) {
+		if ((mode & VS_IDENT) && (id == cid))
+			return 1;
+	}
+	if (mode & VS_ATR_MASK) {
+		if ((mode & VS_DYNAMIC) &&
+			(id >= MIN_D_CONTEXT) &&
+			(id <= MAX_S_CONTEXT))
+			return 1;
+		if ((mode & VS_STATIC) &&
+			(id > 1) && (id < MIN_D_CONTEXT))
+			return 1;
+	}
+	if (mode & VS_IRQ_MASK) {
+		if ((mode & VS_IRQ) && unlikely(in_interrupt()))
+			return 1;
+		if ((mode & VS_HARDIRQ) && unlikely(in_irq()))
+			return 1;
+		if ((mode & VS_SOFTIRQ) && unlikely(in_softirq()))
+			return 1;
+	}
+	return (((mode & VS_ADMIN) && (cid == 0)) ||
+		((mode & VS_WATCH) && (cid == 1)) ||
+		((mode & VS_HOSTID) && (id == 0)));
+}
+
+#define vx_check(c, m)	__vs_check(vx_current_xid(), c, (m) | VS_IRQ)
+
+#define vx_weak_check(c, m)	((m) ? vx_check(c, m) : 1)
+
+
+#define nx_check(c, m)	__vs_check(nx_current_nid(), c, m)
+
+#define nx_weak_check(c, m)	((m) ? nx_check(c, m) : 1)
+
+#endif
diff -ruNp linux-3.13.11/include/linux/vserver/context.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/context.h
--- linux-3.13.11/include/linux/vserver/context.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/context.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,110 @@
+#ifndef _VSERVER_CONTEXT_H
+#define _VSERVER_CONTEXT_H
+
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/rcupdate.h>
+#include <uapi/vserver/context.h>
+
+#include "limit_def.h"
+#include "sched_def.h"
+#include "cvirt_def.h"
+#include "cacct_def.h"
+#include "device_def.h"
+
+#define VX_SPACES	2
+
+struct _vx_info_pc {
+	struct _vx_sched_pc sched_pc;
+	struct _vx_cvirt_pc cvirt_pc;
+};
+
+struct _vx_space {
+	unsigned long vx_nsmask;		/* assignment mask */
+	struct nsproxy *vx_nsproxy;             /* private namespaces */
+	struct fs_struct *vx_fs;                /* private namespace fs */
+	const struct cred *vx_cred;             /* task credentials */
+};
+
+struct vx_info {
+	struct hlist_node vx_hlist;		/* linked list of contexts */
+	vxid_t vx_id;				/* context id */
+	atomic_t vx_usecnt;			/* usage count */
+	atomic_t vx_tasks;			/* tasks count */
+	struct vx_info *vx_parent;		/* parent context */
+	int vx_state;				/* context state */
+
+	struct _vx_space space[VX_SPACES];	/* namespace store */
+
+	uint64_t vx_flags;			/* context flags */
+	uint64_t vx_ccaps;			/* context caps (vserver) */
+	uint64_t vx_umask;			/* unshare mask (guest) */
+	uint64_t vx_wmask;			/* warn mask (guest) */
+	kernel_cap_t vx_bcaps;			/* bounding caps (system) */
+
+	struct task_struct *vx_reaper;		/* guest reaper process */
+	pid_t vx_initpid;			/* PID of guest init */
+	int64_t vx_badness_bias;		/* OOM points bias */
+
+	struct _vx_limit limit;			/* vserver limits */
+	struct _vx_sched sched;			/* vserver scheduler */
+	struct _vx_cvirt cvirt;			/* virtual/bias stuff */
+	struct _vx_cacct cacct;			/* context accounting */
+
+	struct _vx_device dmap;			/* default device map targets */
+
+#ifndef CONFIG_SMP
+	struct _vx_info_pc info_pc;		/* per cpu data */
+#else
+	struct _vx_info_pc *ptr_pc;		/* per cpu array */
+#endif
+
+	wait_queue_head_t vx_wait;		/* context exit waitqueue */
+	int reboot_cmd;				/* last sys_reboot() cmd */
+	int exit_code;				/* last process exit code */
+
+	char vx_name[65];			/* vserver name */
+};
+
+#ifndef CONFIG_SMP
+#define	vx_ptr_pc(vxi)		(&(vxi)->info_pc)
+#define	vx_per_cpu(vxi, v, id)	vx_ptr_pc(vxi)->v
+#else
+#define	vx_ptr_pc(vxi)		((vxi)->ptr_pc)
+#define	vx_per_cpu(vxi, v, id)	per_cpu_ptr(vx_ptr_pc(vxi), id)->v
+#endif
+
+#define	vx_cpu(vxi, v)		vx_per_cpu(vxi, v, smp_processor_id())
+
+
+struct vx_info_save {
+	struct vx_info *vxi;
+	vxid_t xid;
+};
+
+
+/* status flags */
+
+#define VXS_HASHED	0x0001
+#define VXS_PAUSED	0x0010
+#define VXS_SHUTDOWN	0x0100
+#define VXS_HELPER	0x1000
+#define VXS_RELEASED	0x8000
+
+
+extern void claim_vx_info(struct vx_info *, struct task_struct *);
+extern void release_vx_info(struct vx_info *, struct task_struct *);
+
+extern struct vx_info *lookup_vx_info(int);
+extern struct vx_info *lookup_or_create_vx_info(int);
+
+extern int get_xid_list(int, unsigned int *, int);
+extern int xid_is_hashed(vxid_t);
+
+extern int vx_migrate_task(struct task_struct *, struct vx_info *, int);
+
+extern long vs_state_change(struct vx_info *, unsigned int);
+
+
+#endif	/* _VSERVER_CONTEXT_H */
diff -ruNp linux-3.13.11/include/linux/vserver/context_cmd.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/context_cmd.h
--- linux-3.13.11/include/linux/vserver/context_cmd.h	1970-01-01 01:00:00.000000000
+0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/context_cmd.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,33 @@
+#ifndef _VSERVER_CONTEXT_CMD_H
+#define _VSERVER_CONTEXT_CMD_H
+
+#include <uapi/vserver/context_cmd.h>
+
+extern int vc_task_xid(uint32_t);
+
+extern int vc_vx_info(struct vx_info *, void __user *);
+
+extern int vc_ctx_stat(struct vx_info *, void __user *);
+
+extern int vc_ctx_create(uint32_t, void __user *);
+extern int vc_ctx_migrate(struct vx_info *, void __user *);
+
+extern int vc_get_cflags(struct vx_info *, void __user *);
+extern int vc_set_cflags(struct vx_info *, void __user *);
+
+extern int vc_get_ccaps(struct vx_info *, void __user *);
+extern int vc_set_ccaps(struct vx_info *, void __user *);
+
+extern int vc_get_bcaps(struct vx_info *, void __user *);
+extern int vc_set_bcaps(struct vx_info *, void __user *);
+
+extern int vc_get_umask(struct vx_info *, void __user *);
+extern int vc_set_umask(struct vx_info *, void __user *);
+
+extern int vc_get_wmask(struct vx_info *, void __user *);
+extern int vc_set_wmask(struct vx_info *, void __user *);
+
+extern int vc_get_badness(struct vx_info *, void __user *);
+extern int vc_set_badness(struct vx_info *, void __user *);
+
+#endif	/* _VSERVER_CONTEXT_CMD_H */
diff -ruNp linux-3.13.11/include/linux/vserver/cvirt.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/cvirt.h
--- linux-3.13.11/include/linux/vserver/cvirt.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/cvirt.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,18 @@
+#ifndef _VSERVER_CVIRT_H
+#define _VSERVER_CVIRT_H
+
+struct timespec;
+
+void vx_vsi_boottime(struct timespec *);
+
+void vx_vsi_uptime(struct timespec *, struct timespec *);
+
+
+struct vx_info;
+
+void vx_update_load(struct vx_info *);
+
+
+int vx_do_syslog(int, char __user *, int);
+
+#endif	/* _VSERVER_CVIRT_H */
diff -ruNp linux-3.13.11/include/linux/vserver/cvirt_cmd.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/cvirt_cmd.h
--- linux-3.13.11/include/linux/vserver/cvirt_cmd.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/cvirt_cmd.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,13 @@
+#ifndef _VSERVER_CVIRT_CMD_H
+#define _VSERVER_CVIRT_CMD_H
+
+
+#include <linux/compiler.h>
+#include <uapi/vserver/cvirt_cmd.h>
+
+extern int vc_set_vhi_name(struct vx_info *, void __user *);
+extern int vc_get_vhi_name(struct vx_info *, void __user *);
+
+extern int vc_virt_stat(struct vx_info *, void __user *);
+
+#endif	/* _VSERVER_CVIRT_CMD_H */
diff -ruNp linux-3.13.11/include/linux/vserver/cvirt_def.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/cvirt_def.h
--- linux-3.13.11/include/linux/vserver/cvirt_def.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/cvirt_def.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,80 @@
+#ifndef _VSERVER_CVIRT_DEF_H
+#define _VSERVER_CVIRT_DEF_H
+
+#include <linux/jiffies.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/time.h>
+#include <asm/atomic.h>
+
+
+struct _vx_usage_stat {
+	uint64_t user;
+	uint64_t nice;
+	uint64_t system;
+	uint64_t softirq;
+	uint64_t irq;
+	uint64_t idle;
+	uint64_t iowait;
+};
+
+struct _vx_syslog {
+	wait_queue_head_t log_wait;
+	spinlock_t logbuf_lock;		/* lock for the log buffer */
+
+	unsigned long log_start;	/* next char to be read by syslog() */
+	unsigned long con_start;	/* next char to be sent to consoles */
+	unsigned long log_end;	/* most-recently-written-char + 1 */
+	unsigned long logged_chars;	/* #chars since last read+clear operation */
+
+	char log_buf[1024];
+};
+
+
+/* context sub struct */
+
+struct _vx_cvirt {
+	atomic_t nr_threads;		/* number of current threads */
+	atomic_t nr_running;		/* number of running threads */
+	atomic_t nr_uninterruptible;	/* number of uninterruptible threads */
+
+	atomic_t nr_onhold;		/* processes on hold */
+	uint32_t onhold_last;		/* jiffies when put on hold */
+
+	struct timespec bias_ts;	/* time offset to the host */
+	struct timespec bias_idle;
+	struct timespec bias_uptime;	/* context creation point */
+	uint64_t bias_clock;		/* offset in clock_t */
+
+	spinlock_t load_lock;		/* lock for the load averages */
+	atomic_t load_updates;		/* nr of load updates done so far */
+	uint32_t load_last;		/* last time load was calculated */
+	uint32_t load[3];		/* load averages 1,5,15 */
+
+	atomic_t total_forks;		/* number of forks so far */
+
+	struct _vx_syslog syslog;
+};
+
+struct _vx_cvirt_pc {
+	struct _vx_usage_stat cpustat;
+};
+
+
+#ifdef CONFIG_VSERVER_DEBUG
+
+static inline void __dump_vx_cvirt(struct _vx_cvirt *cvirt)
+{
+	printk("\t_vx_cvirt:\n");
+	printk("\t threads: %4d, %4d, %4d, %4d\n",
+		atomic_read(&cvirt->nr_threads),
+		atomic_read(&cvirt->nr_running),
+		atomic_read(&cvirt->nr_uninterruptible),
+		atomic_read(&cvirt->nr_onhold));
+	/* add rest here */
+	printk("\t total_forks = %d\n", atomic_read(&cvirt->total_forks));
+}
+
+#endif
+
+#endif	/* _VSERVER_CVIRT_DEF_H */
diff -ruNp linux-3.13.11/include/linux/vserver/debug.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/debug.h
--- linux-3.13.11/include/linux/vserver/debug.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/debug.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,146 @@
+#ifndef _VSERVER_DEBUG_H
+#define _VSERVER_DEBUG_H
+
+
+#define VXD_CBIT(n, m)	(vs_debug_ ## n & (1 << (m)))
+#define VXD_CMIN(n, m)	(vs_debug_ ## n > (m))
+#define VXD_MASK(n, m)	(vs_debug_ ## n & (m))
+
+#define VXD_DEV(d)	(d), (d)->bd_inode->i_ino,		\
+			imajor((d)->bd_inode), iminor((d)->bd_inode)
+#define VXF_DEV		"%p[%lu,%d:%d]"
+
+#if	defined(CONFIG_QUOTES_UTF8)
+#define	VS_Q_LQM	"\xc2\xbb"
+#define	VS_Q_RQM	"\xc2\xab"
+#elif	defined(CONFIG_QUOTES_ASCII)
+#define	VS_Q_LQM	"\x27"
+#define	VS_Q_RQM	"\x27"
+#else
+#define	VS_Q_LQM	"\xbb"
+#define	VS_Q_RQM	"\xab"
+#endif
+
+#define	VS_Q(f)		VS_Q_LQM f VS_Q_RQM
+
+
+#define vxd_path(p)						\
+	({ static char _buffer[PATH_MAX];			\
+	   d_path(p, _buffer, sizeof(_buffer)); })
+
+#define vxd_cond_path(n)					\
+	((n) ? vxd_path(&(n)->path) : "<null>" )
+
+
+#ifdef	CONFIG_VSERVER_DEBUG
+
+extern unsigned int vs_debug_switch;
+extern unsigned int vs_debug_xid;
+extern unsigned int vs_debug_nid;
+extern unsigned int vs_debug_tag;
+extern unsigned int vs_debug_net;
+extern unsigned int vs_debug_limit;
+extern unsigned int vs_debug_cres;
+extern unsigned int vs_debug_dlim;
+extern unsigned int vs_debug_quota;
+extern unsigned int vs_debug_cvirt;
+extern unsigned int vs_debug_space;
+extern unsigned int vs_debug_perm;
+extern unsigned int vs_debug_misc;
+
+
+#define VX_LOGLEVEL	"vxD: "
+#define VX_PROC_FMT	"%p: "
+#define VX_PROCESS	current
+
+#define vxdprintk(c, f, x...)					\
+	do {							\
+		if (c)						\
+			printk(VX_LOGLEVEL VX_PROC_FMT f "\n",	\
+				VX_PROCESS , ##x);		\
+	} while (0)
+
+#define vxlprintk(c, f, x...)					\
+	do {							\
+		if (c)						\
+			printk(VX_LOGLEVEL f " @%s:%d\n", x);	\
+	} while (0)
+
+#define vxfprintk(c, f, x...)					\
+	do {							\
+		if (c)						\
+			printk(VX_LOGLEVEL f " %s@%s:%d\n", x); \
+	} while (0)
+
+
+struct vx_info;
+
+void dump_vx_info(struct vx_info *, int);
+void dump_vx_info_inactive(int);
+
+#else	/* CONFIG_VSERVER_DEBUG */
+
+#define vs_debug_switch	0
+#define vs_debug_xid	0
+#define vs_debug_nid	0
+#define vs_debug_tag	0
+#define vs_debug_net	0
+#define vs_debug_limit	0
+#define vs_debug_cres	0
+#define vs_debug_dlim	0
+#define vs_debug_quota	0
+#define vs_debug_cvirt	0
+#define vs_debug_space	0
+#define vs_debug_perm	0
+#define vs_debug_misc	0
+
+#define vxdprintk(x...) do { } while (0)
+#define vxlprintk(x...) do { } while (0)
+#define vxfprintk(x...) do { } while (0)
+
+#endif	/* CONFIG_VSERVER_DEBUG */
+
+
+#ifdef	CONFIG_VSERVER_WARN
+
+#define VX_WARNLEVEL	KERN_WARNING "vxW: "
+#define VX_WARN_TASK	"[" VS_Q("%s") ",%u:#%u|%u|%u] "
+#define VX_WARN_XID	"[xid #%u] "
+#define VX_WARN_NID	"[nid #%u] "
+#define VX_WARN_TAG	"[tag #%u] "
+
+#define vxwprintk(c, f, x...)					\
+	do {							\
+		if (c)						\
+			printk(VX_WARNLEVEL f "\n", ##x);	\
+	} while (0)
+
+#else	/* CONFIG_VSERVER_WARN */
+
+#define vxwprintk(x...) do { } while (0)
+
+#endif	/* CONFIG_VSERVER_WARN */
+
+#define vxwprintk_task(c, f, x...)				\
+	vxwprintk(c, VX_WARN_TASK f,				\
+		current->comm, current->pid,			\
+		current->xid, current->nid, 			\
+		current->tag, ##x)
+#define vxwprintk_xid(c, f, x...)				\
+	vxwprintk(c, VX_WARN_XID f, current->xid, x)
+#define vxwprintk_nid(c, f, x...)				\
+	vxwprintk(c, VX_WARN_NID f, current->nid, x)
+#define vxwprintk_tag(c, f, x...)				\
+	vxwprintk(c, VX_WARN_TAG f, current->tag, x)
+
+#ifdef	CONFIG_VSERVER_DEBUG
+#define vxd_assert_lock(l)	assert_spin_locked(l)
+#define vxd_assert(c, f, x...)	vxlprintk(!(c), \
+	"assertion [" f "] failed.", ##x, __FILE__, __LINE__)
+#else
+#define vxd_assert_lock(l)	do { } while (0)
+#define vxd_assert(c, f, x...)	do { } while (0)
+#endif
+
+
+#endif /* _VSERVER_DEBUG_H */
diff -ruNp linux-3.13.11/include/linux/vserver/debug_cmd.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/debug_cmd.h
--- linux-3.13.11/include/linux/vserver/debug_cmd.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/debug_cmd.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,37 @@
+#ifndef _VSERVER_DEBUG_CMD_H
+#define _VSERVER_DEBUG_CMD_H
+
+#include <uapi/vserver/debug_cmd.h>
+
+
+#ifdef	CONFIG_COMPAT
+
+#include <asm/compat.h>
+
+struct	vcmd_read_history_v0_x32 {
+	uint32_t index;
+	uint32_t count;
+	compat_uptr_t data_ptr;
+};
+
+struct	vcmd_read_monitor_v0_x32 {
+	uint32_t index;
+	uint32_t count;
+	compat_uptr_t data_ptr;
+};
+
+#endif  /* CONFIG_COMPAT */
+
+extern int vc_dump_history(uint32_t);
+
+extern int vc_read_history(uint32_t, void __user *);
+extern int vc_read_monitor(uint32_t, void __user *);
+
+#ifdef	CONFIG_COMPAT
+
+extern int vc_read_history_x32(uint32_t, void __user *);
+extern int vc_read_monitor_x32(uint32_t, void __user *);
+
+#endif  /* CONFIG_COMPAT */
+
+#endif	/* _VSERVER_DEBUG_CMD_H */
diff -ruNp linux-3.13.11/include/linux/vserver/device.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/device.h
--- linux-3.13.11/include/linux/vserver/device.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/device.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,9 @@
+#ifndef _VSERVER_DEVICE_H
+#define _VSERVER_DEVICE_H
+
+
+#include <uapi/vserver/device.h>
+
+#else	/* _VSERVER_DEVICE_H */
+#warning duplicate inclusion
+#endif	/* _VSERVER_DEVICE_H */
diff -ruNp linux-3.13.11/include/linux/vserver/device_cmd.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/device_cmd.h
--- linux-3.13.11/include/linux/vserver/device_cmd.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/device_cmd.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,31 @@
+#ifndef _VSERVER_DEVICE_CMD_H
+#define _VSERVER_DEVICE_CMD_H
+
+#include <uapi/vserver/device_cmd.h>
+
+
+#ifdef	CONFIG_COMPAT
+
+#include <asm/compat.h>
+
+struct	vcmd_set_mapping_v0_x32 {
+	compat_uptr_t device_ptr;
+	compat_uptr_t target_ptr;
+	uint32_t flags;
+};
+
+#endif	/* CONFIG_COMPAT */
+
+#include <linux/compiler.h>
+
+extern int vc_set_mapping(struct vx_info *, void __user *);
+extern int vc_unset_mapping(struct vx_info *, void __user *);
+
+#ifdef	CONFIG_COMPAT
+
+extern int vc_set_mapping_x32(struct vx_info *, void __user *);
+extern int vc_unset_mapping_x32(struct vx_info *, void __user *);
+
+#endif	/* CONFIG_COMPAT */
+
+#endif	/* _VSERVER_DEVICE_CMD_H */
diff -ruNp linux-3.13.11/include/linux/vserver/device_def.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/device_def.h
--- linux-3.13.11/include/linux/vserver/device_def.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/device_def.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,17 @@
+#ifndef _VSERVER_DEVICE_DEF_H
+#define _VSERVER_DEVICE_DEF_H
+
+#include <linux/types.h>
+
+struct vx_dmap_target {
+	dev_t target;
+	uint32_t flags;
+};
+
+struct _vx_device {
+#ifdef CONFIG_VSERVER_DEVICE
+	struct vx_dmap_target targets[2];
+#endif
+};
+
+#endif	/* _VSERVER_DEVICE_DEF_H */
diff -ruNp linux-3.13.11/include/linux/vserver/dlimit.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/dlimit.h
--- linux-3.13.11/include/linux/vserver/dlimit.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/dlimit.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,54 @@
+#ifndef _VSERVER_DLIMIT_H
+#define _VSERVER_DLIMIT_H
+
+#include "switch.h"
+
+
+#ifdef	__KERNEL__
+
+/*      keep in sync with CDLIM_INFINITY	*/
+
+#define DLIM_INFINITY		(~0ULL)
+
+#include <linux/spinlock.h>
+#include <linux/rcupdate.h>
+
+struct super_block;
+
+struct dl_info {
+	struct hlist_node dl_hlist;		/* linked list of contexts */
+	struct rcu_head dl_rcu;			/* the rcu head */
+	vtag_t dl_tag;				/* context tag */
+	atomic_t dl_usecnt;			/* usage count */
+	atomic_t dl_refcnt;			/* reference count */
+
+	struct super_block *dl_sb;		/* associated superblock */
+
+	spinlock_t dl_lock;			/* protect the values */
+
+	unsigned long long dl_space_used;	/* used space in bytes */
+	unsigned long long dl_space_total;	/* maximum space in bytes */
+	unsigned long dl_inodes_used;		/* used inodes */
+	unsigned long dl_inodes_total;		/* maximum inodes */
+
+	unsigned int dl_nrlmult;		/* non root limit mult */
+};
+
+struct rcu_head;
+
+extern void rcu_free_dl_info(struct rcu_head *);
+extern void unhash_dl_info(struct dl_info *);
+
+extern struct dl_info *locate_dl_info(struct super_block *, vtag_t);
+
+
+struct kstatfs;
+
+extern void vx_vsi_statfs(struct super_block *, struct kstatfs *);
+
+typedef uint64_t dlsize_t;
+
+#endif	/* __KERNEL__ */
+#else	/* _VSERVER_DLIMIT_H */
+#warning duplicate inclusion
+#endif	/* _VSERVER_DLIMIT_H */
diff -ruNp linux-3.13.11/include/linux/vserver/dlimit_cmd.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/dlimit_cmd.h
--- linux-3.13.11/include/linux/vserver/dlimit_cmd.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/dlimit_cmd.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,46 @@
+#ifndef _VSERVER_DLIMIT_CMD_H
+#define _VSERVER_DLIMIT_CMD_H
+
+#include <uapi/vserver/dlimit_cmd.h>
+
+
+#ifdef	CONFIG_COMPAT
+
+#include <asm/compat.h>
+
+struct	vcmd_ctx_dlimit_base_v0_x32 {
+	compat_uptr_t name_ptr;
+	uint32_t flags;
+};
+
+struct	vcmd_ctx_dlimit_v0_x32 {
+	compat_uptr_t name_ptr;
+	uint32_t space_used;			/* used space in kbytes */
+	uint32_t space_total;			/* maximum space in kbytes */
+	uint32_t inodes_used;			/* used inodes */
+	uint32_t inodes_total;			/* maximum inodes */
+	uint32_t reserved;			/* reserved for root in % */
+	uint32_t flags;
+};
+
+#endif	/* CONFIG_COMPAT */
+
+#include <linux/compiler.h>
+
+extern int vc_add_dlimit(uint32_t, void __user *);
+extern int vc_rem_dlimit(uint32_t, void __user *);
+
+extern int vc_set_dlimit(uint32_t, void __user *);
+extern int vc_get_dlimit(uint32_t, void __user *);
+
+#ifdef	CONFIG_COMPAT
+
+extern int vc_add_dlimit_x32(uint32_t, void __user *);
+extern int vc_rem_dlimit_x32(uint32_t, void __user *);
+
+extern int vc_set_dlimit_x32(uint32_t, void __user *);
+extern int vc_get_dlimit_x32(uint32_t, void __user *);
+
+#endif	/* CONFIG_COMPAT */
+
+#endif	/* _VSERVER_DLIMIT_CMD_H */
diff -ruNp linux-3.13.11/include/linux/vserver/global.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/global.h
--- linux-3.13.11/include/linux/vserver/global.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/global.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,19 @@
+#ifndef _VSERVER_GLOBAL_H
+#define _VSERVER_GLOBAL_H
+
+
+extern atomic_t vx_global_ctotal;
+extern atomic_t vx_global_cactive;
+
+extern atomic_t nx_global_ctotal;
+extern atomic_t nx_global_cactive;
+
+extern atomic_t vs_global_nsproxy;
+extern atomic_t vs_global_fs;
+extern atomic_t vs_global_mnt_ns;
+extern atomic_t vs_global_uts_ns;
+extern atomic_t vs_global_user_ns;
+extern atomic_t vs_global_pid_ns;
+
+
+#endif /* _VSERVER_GLOBAL_H */
diff -ruNp linux-3.13.11/include/linux/vserver/history.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/history.h
--- linux-3.13.11/include/linux/vserver/history.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/history.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,197 @@
+#ifndef _VSERVER_HISTORY_H
+#define _VSERVER_HISTORY_H
+
+
+enum {
+	VXH_UNUSED = 0,
+	VXH_THROW_OOPS = 1,
+
+	VXH_GET_VX_INFO,
+	VXH_PUT_VX_INFO,
+	VXH_INIT_VX_INFO,
+	VXH_SET_VX_INFO,
+	VXH_CLR_VX_INFO,
+	VXH_CLAIM_VX_INFO,
+	VXH_RELEASE_VX_INFO,
+	VXH_ALLOC_VX_INFO,
+	VXH_DEALLOC_VX_INFO,
+	VXH_HASH_VX_INFO,
+	VXH_UNHASH_VX_INFO,
+	VXH_LOC_VX_INFO,
+	VXH_LOOKUP_VX_INFO,
+	VXH_CREATE_VX_INFO,
+};
+
+struct _vxhe_vxi {
+	struct vx_info *ptr;
+	unsigned xid;
+	unsigned usecnt;
+	unsigned tasks;
+};
+
+struct _vxhe_set_clr {
+	void *data;
+};
+
+struct _vxhe_loc_lookup {
+	unsigned arg;
+};
+
+struct _vx_hist_entry {
+	void *loc;
+	unsigned short seq;
+	unsigned short type;
+	struct _vxhe_vxi vxi;
+	union {
+		struct _vxhe_set_clr sc;
+		struct _vxhe_loc_lookup ll;
+	};
+};
+
+#ifdef	CONFIG_VSERVER_HISTORY
+
+extern unsigned volatile int vxh_active;
+
+struct _vx_hist_entry *vxh_advance(void *loc);
+
+
+static inline
+void	__vxh_copy_vxi(struct _vx_hist_entry *entry, struct vx_info *vxi)
+{
+	entry->vxi.ptr = vxi;
+	if (vxi) {
+		entry->vxi.usecnt = atomic_read(&vxi->vx_usecnt);
+		entry->vxi.tasks = atomic_read(&vxi->vx_tasks);
+		entry->vxi.xid = vxi->vx_id;
+	}
+}
+
+
+#define	__HERE__ current_text_addr()
+
+#define __VXH_BODY(__type, __data, __here)	\
+	struct _vx_hist_entry *entry;		\
+						\
+	preempt_disable();			\
+	entry = vxh_advance(__here);		\
+	__data;					\
+	entry->type = __type;			\
+	preempt_enable();
+
+
+	/* pass vxi only */
+
+#define __VXH_SMPL				\
+	__vxh_copy_vxi(entry, vxi)
+
+static inline
+void	__vxh_smpl(struct vx_info *vxi, int __type, void *__here)
+{
+	__VXH_BODY(__type, __VXH_SMPL, __here)
+}
+
+	/* pass vxi and data (void *) */
+
+#define __VXH_DATA				\
+	__vxh_copy_vxi(entry, vxi);		\
+	entry->sc.data = data
+
+static inline
+void	__vxh_data(struct vx_info *vxi, void *data,
+			int __type, void *__here)
+{
+	__VXH_BODY(__type, __VXH_DATA, __here)
+}
+
+	/* pass vxi and arg (long) */
+
+#define __VXH_LONG				\
+	__vxh_copy_vxi(entry, vxi);		\
+	entry->ll.arg = arg
+
+static inline
+void	__vxh_long(struct vx_info *vxi, long arg,
+			int __type, void *__here)
+{
+	__VXH_BODY(__type, __VXH_LONG, __here)
+}
+
+
+static inline
+void	__vxh_throw_oops(void *__here)
+{
+	__VXH_BODY(VXH_THROW_OOPS, {}, __here);
+	/* prevent further acquisition */
+	vxh_active = 0;
+}
+
+
+#define vxh_throw_oops()	__vxh_throw_oops(__HERE__);
+
+#define __vxh_get_vx_info(v, h)	__vxh_smpl(v, VXH_GET_VX_INFO, h);
+#define __vxh_put_vx_info(v, h)	__vxh_smpl(v, VXH_PUT_VX_INFO, h);
+
+#define __vxh_init_vx_info(v, d, h) \
+	__vxh_data(v, d, VXH_INIT_VX_INFO, h);
+#define __vxh_set_vx_info(v, d, h) \
+	__vxh_data(v, d, VXH_SET_VX_INFO, h);
+#define __vxh_clr_vx_info(v, d, h) \
+	__vxh_data(v, d, VXH_CLR_VX_INFO, h);
+
+#define __vxh_claim_vx_info(v, d, h) \
+	__vxh_data(v, d, VXH_CLAIM_VX_INFO, h);
+#define __vxh_release_vx_info(v, d, h) \
+	__vxh_data(v, d, VXH_RELEASE_VX_INFO, h);
+
+#define vxh_alloc_vx_info(v) \
+	__vxh_smpl(v, VXH_ALLOC_VX_INFO, __HERE__);
+#define vxh_dealloc_vx_info(v) \
+	__vxh_smpl(v, VXH_DEALLOC_VX_INFO, __HERE__);
+
+#define vxh_hash_vx_info(v) \
+	__vxh_smpl(v, VXH_HASH_VX_INFO, __HERE__);
+#define vxh_unhash_vx_info(v) \
+	__vxh_smpl(v, VXH_UNHASH_VX_INFO, __HERE__);
+
+#define vxh_loc_vx_info(v, l) \
+	__vxh_long(v, l, VXH_LOC_VX_INFO, __HERE__);
+#define vxh_lookup_vx_info(v, l) \
+	__vxh_long(v, l, VXH_LOOKUP_VX_INFO, __HERE__);
+#define vxh_create_vx_info(v, l) \
+	__vxh_long(v, l, VXH_CREATE_VX_INFO, __HERE__);
+
+extern void vxh_dump_history(void);
+
+
+#else  /* CONFIG_VSERVER_HISTORY */
+
+#define	__HERE__	0
+
+#define vxh_throw_oops()		do { } while (0)
+
+#define __vxh_get_vx_info(v, h)		do { } while (0)
+#define __vxh_put_vx_info(v, h)		do { } while (0)
+
+#define __vxh_init_vx_info(v, d, h)	do { } while (0)
+#define __vxh_set_vx_info(v, d, h)	do { } while (0)
+#define __vxh_clr_vx_info(v, d, h)	do { } while (0)
+
+#define __vxh_claim_vx_info(v, d, h)	do { } while (0)
+#define __vxh_release_vx_info(v, d, h)	do { } while (0)
+
+#define vxh_alloc_vx_info(v)		do { } while (0)
+#define vxh_dealloc_vx_info(v)		do { } while (0)
+
+#define vxh_hash_vx_info(v)		do { } while (0)
+#define vxh_unhash_vx_info(v)		do { } while (0)
+
+#define vxh_loc_vx_info(v, l)		do { } while (0)
+#define vxh_lookup_vx_info(v, l)	do { } while (0)
+#define vxh_create_vx_info(v, l)	do { } while (0)
+
+#define vxh_dump_history()		do { } while (0)
+
+
+#endif /* CONFIG_VSERVER_HISTORY */
+
+#endif /* _VSERVER_HISTORY_H */
diff -ruNp linux-3.13.11/include/linux/vserver/inode.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/inode.h
--- linux-3.13.11/include/linux/vserver/inode.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/inode.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,19 @@
+#ifndef _VSERVER_INODE_H
+#define _VSERVER_INODE_H
+
+#include <uapi/vserver/inode.h>
+
+
+#ifdef	CONFIG_VSERVER_PROC_SECURE
+#define IATTR_PROC_DEFAULT	( IATTR_ADMIN | IATTR_HIDE )
+#define IATTR_PROC_SYMLINK	( IATTR_ADMIN )
+#else
+#define IATTR_PROC_DEFAULT	( IATTR_ADMIN )
+#define IATTR_PROC_SYMLINK	( IATTR_ADMIN )
+#endif
+
+#define vx_hide_check(c, m)	(((m) & IATTR_HIDE) ? vx_check(c, m) : 1)
+
+#else	/* _VSERVER_INODE_H */
+#warning duplicate inclusion
+#endif	/* _VSERVER_INODE_H */
diff -ruNp linux-3.13.11/include/linux/vserver/inode_cmd.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/inode_cmd.h
--- linux-3.13.11/include/linux/vserver/inode_cmd.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/inode_cmd.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,36 @@
+#ifndef _VSERVER_INODE_CMD_H
+#define _VSERVER_INODE_CMD_H
+
+#include <uapi/vserver/inode_cmd.h>
+
+
+
+#ifdef	CONFIG_COMPAT
+
+#include <asm/compat.h>
+
+struct	vcmd_ctx_iattr_v1_x32 {
+	compat_uptr_t name_ptr;
+	uint32_t tag;
+	uint32_t flags;
+	uint32_t mask;
+};
+
+#endif	/* CONFIG_COMPAT */
+
+#include <linux/compiler.h>
+
+extern int vc_get_iattr(void __user *);
+extern int vc_set_iattr(void __user *);
+
+extern int vc_fget_iattr(uint32_t, void __user *);
+extern int vc_fset_iattr(uint32_t, void __user *);
+
+#ifdef	CONFIG_COMPAT
+
+extern int vc_get_iattr_x32(void __user *);
+extern int vc_set_iattr_x32(void __user *);
+
+#endif	/* CONFIG_COMPAT */
+
+#endif	/* _VSERVER_INODE_CMD_H */
diff -ruNp linux-3.13.11/include/linux/vserver/limit.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/limit.h
--- linux-3.13.11/include/linux/vserver/limit.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/limit.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,61 @@
+#ifndef _VSERVER_LIMIT_H
+#define _VSERVER_LIMIT_H
+
+#include <uapi/vserver/limit.h>
+
+
+#define	VLIM_NOCHECK	((1L << VLIMIT_DENTRY) | (1L << RLIMIT_RSS))
+
+/*	keep in sync with CRLIM_INFINITY */
+
+#define	VLIM_INFINITY	(~0ULL)
+
+#include <asm/atomic.h>
+#include <asm/resource.h>
+
+#ifndef RLIM_INFINITY
+#warning RLIM_INFINITY is undefined
+#endif
+
+#define __rlim_val(l, r, v)	((l)->res[r].v)
+
+#define __rlim_soft(l, r)	__rlim_val(l, r, soft)
+#define __rlim_hard(l, r)	__rlim_val(l, r, hard)
+
+#define __rlim_rcur(l, r)	__rlim_val(l, r, rcur)
+#define __rlim_rmin(l, r)	__rlim_val(l, r, rmin)
+#define __rlim_rmax(l, r)	__rlim_val(l, r, rmax)
+
+#define __rlim_lhit(l, r)	__rlim_val(l, r, lhit)
+#define __rlim_hit(l, r)	atomic_inc(&__rlim_lhit(l, r))
+
+typedef atomic_long_t rlim_atomic_t;
+typedef unsigned long rlim_t;
+
+#define __rlim_get(l, r)	atomic_long_read(&__rlim_rcur(l, r))
+#define __rlim_set(l, r, v)	atomic_long_set(&__rlim_rcur(l, r), v)
+#define __rlim_inc(l, r)	atomic_long_inc(&__rlim_rcur(l, r))
+#define __rlim_dec(l, r)	atomic_long_dec(&__rlim_rcur(l, r))
+#define __rlim_add(l, r, v)	atomic_long_add(v, &__rlim_rcur(l, r))
+#define __rlim_sub(l, r, v)	atomic_long_sub(v, &__rlim_rcur(l, r))
+
+
+#if	(RLIM_INFINITY == VLIM_INFINITY)
+#define	VX_VLIM(r) ((long long)(long)(r))
+#define	VX_RLIM(v) ((rlim_t)(v))
+#else
+#define	VX_VLIM(r) (((r) == RLIM_INFINITY) \
+		? VLIM_INFINITY : (long long)(r))
+#define	VX_RLIM(v) (((v) == VLIM_INFINITY) \
+		? RLIM_INFINITY : (rlim_t)(v))
+#endif
+
+struct sysinfo;
+
+void vx_vsi_meminfo(struct sysinfo *);
+void vx_vsi_swapinfo(struct sysinfo *);
+long vx_vsi_cached(struct sysinfo *);
+
+#define NUM_LIMITS	24
+
+#endif	/* _VSERVER_LIMIT_H */
diff -ruNp linux-3.13.11/include/linux/vserver/limit_cmd.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/limit_cmd.h
--- linux-3.13.11/include/linux/vserver/limit_cmd.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/limit_cmd.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,35 @@
+#ifndef _VSERVER_LIMIT_CMD_H
+#define _VSERVER_LIMIT_CMD_H
+
+#include <uapi/vserver/limit_cmd.h>
+
+
+#ifdef	CONFIG_IA32_EMULATION
+
+struct	vcmd_ctx_rlimit_v0_x32 {
+	uint32_t id;
+	uint64_t minimum;
+	uint64_t softlimit;
+	uint64_t maximum;
+} __attribute__ ((packed));
+
+#endif	/* CONFIG_IA32_EMULATION */
+
+#include <linux/compiler.h>
+
+extern int vc_get_rlimit_mask(uint32_t, void __user *);
+extern int vc_get_rlimit(struct vx_info *, void __user *);
+extern int vc_set_rlimit(struct vx_info *, void __user *);
+extern int vc_reset_hits(struct vx_info *, void __user *);
+extern int vc_reset_minmax(struct vx_info *, void __user *);
+
+extern int vc_rlimit_stat(struct vx_info *, void __user *);
+
+#ifdef	CONFIG_IA32_EMULATION
+
+extern int vc_get_rlimit_x32(struct vx_info *, void __user *);
+extern int vc_set_rlimit_x32(struct vx_info *, void __user *);
+
+#endif	/* CONFIG_IA32_EMULATION */
+
+#endif	/* _VSERVER_LIMIT_CMD_H */
diff -ruNp linux-3.13.11/include/linux/vserver/limit_def.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/limit_def.h
--- linux-3.13.11/include/linux/vserver/limit_def.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/limit_def.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,47 @@
+#ifndef _VSERVER_LIMIT_DEF_H
+#define _VSERVER_LIMIT_DEF_H
+
+#include <asm/atomic.h>
+#include <asm/resource.h>
+
+#include "limit.h"
+
+
+struct _vx_res_limit {
+	rlim_t soft;		/* Context soft limit */
+	rlim_t hard;		/* Context hard limit */
+
+	rlim_atomic_t rcur;	/* Current value */
+	rlim_t rmin;		/* Context minimum */
+	rlim_t rmax;		/* Context maximum */
+
+	atomic_t lhit;		/* Limit hits */
+};
+
+/* context sub struct */
+
+struct _vx_limit {
+	struct _vx_res_limit res[NUM_LIMITS];
+};
+
+#ifdef CONFIG_VSERVER_DEBUG
+
+static inline void __dump_vx_limit(struct _vx_limit *limit)
+{
+	int i;
+
+	printk("\t_vx_limit:");
+	for (i = 0; i < NUM_LIMITS; i++) {
+		printk("\t [%2d] = %8lu %8lu/%8lu, %8ld/%8ld, %8d\n",
+			i, (unsigned long)__rlim_get(limit, i),
+			(unsigned long)__rlim_rmin(limit, i),
+			(unsigned long)__rlim_rmax(limit, i),
+			(long)__rlim_soft(limit, i),
+			(long)__rlim_hard(limit, i),
+			atomic_read(&__rlim_lhit(limit, i)));
+	}
+}
+
+#endif
+
+#endif	/* _VSERVER_LIMIT_DEF_H */
diff -ruNp linux-3.13.11/include/linux/vserver/limit_int.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/limit_int.h
--- linux-3.13.11/include/linux/vserver/limit_int.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/limit_int.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,193 @@
+#ifndef _VSERVER_LIMIT_INT_H
+#define _VSERVER_LIMIT_INT_H
+
+#define VXD_RCRES_COND(r)	VXD_CBIT(cres, r)
+#define VXD_RLIMIT_COND(r)	VXD_CBIT(limit, r)
+
+extern const char *vlimit_name[NUM_LIMITS];
+
+static inline void __vx_acc_cres(struct vx_info *vxi,
+	int res, int dir, void *_data, char *_file, int _line)
+{
+	if (VXD_RCRES_COND(res))
+		vxlprintk(1, "vx_acc_cres[%5d,%s,%2d]: %5ld%s (%p)",
+			(vxi ? vxi->vx_id : -1), vlimit_name[res], res,
+			(vxi ? (long)__rlim_get(&vxi->limit, res) : 0),
+			(dir > 0) ? "++" : "--", _data, _file, _line);
+	if (!vxi)
+		return;
+
+	if (dir > 0)
+		__rlim_inc(&vxi->limit, res);
+	else
+		__rlim_dec(&vxi->limit, res);
+}
+
+static inline void __vx_add_cres(struct vx_info *vxi,
+	int res, int amount, void *_data, char *_file, int _line)
+{
+	if (VXD_RCRES_COND(res))
+		vxlprintk(1, "vx_add_cres[%5d,%s,%2d]: %5ld += %5d (%p)",
+			(vxi ? vxi->vx_id : -1), vlimit_name[res], res,
+			(vxi ? (long)__rlim_get(&vxi->limit, res) : 0),
+			amount, _data, _file, _line);
+	if (amount == 0)
+		return;
+	if (!vxi)
+		return;
+	__rlim_add(&vxi->limit, res, amount);
+}
+
+static inline
+int __vx_cres_adjust_max(struct _vx_limit *limit, int res, rlim_t value)
+{
+	int cond = (value > __rlim_rmax(limit, res));
+
+	if (cond)
+		__rlim_rmax(limit, res) = value;
+	return cond;
+}
+
+static inline
+int __vx_cres_adjust_min(struct _vx_limit *limit, int res, rlim_t value)
+{
+	int cond = (value < __rlim_rmin(limit, res));
+
+	if (cond)
+		__rlim_rmin(limit, res) = value;
+	return cond;
+}
+
+static inline
+void __vx_cres_fixup(struct _vx_limit *limit, int res, rlim_t value)
+{
+	if (!__vx_cres_adjust_max(limit, res, value))
+		__vx_cres_adjust_min(limit, res, value);
+}
+
+
+/*	return values:
+	 +1 ... no limit hit
+	 -1 ... over soft limit
+	  0 ... over hard limit		*/
+
+static inline int __vx_cres_avail(struct vx_info *vxi,
+	int res, int num, char *_file, int _line)
+{
+	struct _vx_limit *limit;
+	rlim_t value;
+
+	if (VXD_RLIMIT_COND(res))
+		vxlprintk(1, "vx_cres_avail[%5d,%s,%2d]: %5ld/%5ld > %5ld + %5d",
+			(vxi ? vxi->vx_id : -1), vlimit_name[res], res,
+			(vxi ? (long)__rlim_soft(&vxi->limit, res) : -1),
+			(vxi ? (long)__rlim_hard(&vxi->limit, res) : -1),
+			(vxi ? (long)__rlim_get(&vxi->limit, res) : 0),
+			num, _file, _line);
+	if (!vxi)
+		return 1;
+
+	limit = &vxi->limit;
+	value = __rlim_get(limit, res);
+
+	if (!__vx_cres_adjust_max(limit, res, value))
+		__vx_cres_adjust_min(limit, res, value);
+
+	if (num == 0)
+		return 1;
+
+	if (__rlim_soft(limit, res) == RLIM_INFINITY)
+		return -1;
+	if (value + num <= __rlim_soft(limit, res))
+		return -1;
+
+	if (__rlim_hard(limit, res) == RLIM_INFINITY)
+		return 1;
+	if (value + num <= __rlim_hard(limit, res))
+		return 1;
+
+	__rlim_hit(limit, res);
+	return 0;
+}
+
+
+static const int VLA_RSS[] = { RLIMIT_RSS, VLIMIT_ANON, VLIMIT_MAPPED, 0 };
+
+static inline
+rlim_t __vx_cres_array_sum(struct _vx_limit *limit, const int *array)
+{
+	rlim_t value, sum = 0;
+	int res;
+
+	while ((res = *array++)) {
+		value = __rlim_get(limit, res);
+		__vx_cres_fixup(limit, res, value);
+		sum += value;
+	}
+	return sum;
+}
+
+static inline
+rlim_t __vx_cres_array_fixup(struct _vx_limit *limit, const int *array)
+{
+	rlim_t value = __vx_cres_array_sum(limit, array + 1);
+	int res = *array;
+
+	if (value == __rlim_get(limit, res))
+		return value;
+
+	__rlim_set(limit, res, value);
+	/* now adjust min/max */
+	if (!__vx_cres_adjust_max(limit, res, value))
+		__vx_cres_adjust_min(limit, res, value);
+
+	return value;
+}
+
+static inline int __vx_cres_array_avail(struct vx_info *vxi,
+	const int *array, int num, char *_file, int _line)
+{
+	struct _vx_limit *limit;
+	rlim_t value = 0;
+	int res;
+
+	if (num == 0)
+		return 1;
+	if (!vxi)
+		return 1;
+
+	limit = &vxi->limit;
+	res = *array;
+	value = __vx_cres_array_sum(limit, array + 1);
+
+	__rlim_set(limit, res, value);
+	__vx_cres_fixup(limit, res, value);
+
+	return __vx_cres_avail(vxi, res, num, _file, _line);
+}
+
+
+static inline void vx_limit_fixup(struct _vx_limit *limit, int id)
+{
+	rlim_t value;
+	int res;
+
+	/* complex resources first */
+	if ((id < 0) || (id == RLIMIT_RSS))
+		__vx_cres_array_fixup(limit, VLA_RSS);
+
+	for (res = 0; res < NUM_LIMITS; res++) {
+		if ((id > 0) && (res != id))
+			continue;
+
+		value = __rlim_get(limit, res);
+		__vx_cres_fixup(limit, res, value);
+
+		/* not supposed to happen, maybe warn? */
+		if (__rlim_rmax(limit, res) > __rlim_hard(limit, res))
+			__rlim_rmax(limit, res) = __rlim_hard(limit, res);
+	}
+}
+
+
+#endif	/* _VSERVER_LIMIT_INT_H */
diff -ruNp linux-3.13.11/include/linux/vserver/monitor.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/monitor.h
--- linux-3.13.11/include/linux/vserver/monitor.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/monitor.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,6 @@
+#ifndef _VSERVER_MONITOR_H
+#define _VSERVER_MONITOR_H
+
+#include <uapi/vserver/monitor.h>
+
+#endif /* _VSERVER_MONITOR_H */
diff -ruNp linux-3.13.11/include/linux/vserver/network.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/network.h
--- linux-3.13.11/include/linux/vserver/network.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/network.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,76 @@
+#ifndef _VSERVER_NETWORK_H
+#define _VSERVER_NETWORK_H
+
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/rcupdate.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <asm/atomic.h>
+#include <uapi/vserver/network.h>
+
+struct nx_addr_v4 {
+	struct nx_addr_v4 *next;
+	struct in_addr ip[2];
+	struct in_addr mask;
+	uint16_t type;
+	uint16_t flags;
+};
+
+struct nx_addr_v6 {
+	struct nx_addr_v6 *next;
+	struct in6_addr ip;
+	struct in6_addr mask;
+	uint32_t prefix;
+	uint16_t type;
+	uint16_t flags;
+};
+
+struct nx_info {
+	struct hlist_node nx_hlist;	/* linked list of nxinfos */
+	vnid_t nx_id;			/* vnet id */
+	atomic_t nx_usecnt;		/* usage count */
+	atomic_t nx_tasks;		/* tasks count */
+	int nx_state;			/* context state */
+
+	uint64_t nx_flags;		/* network flag word */
+	uint64_t nx_ncaps;		/* network capabilities */
+
+	spinlock_t addr_lock;		/* protect address changes */
+	struct in_addr v4_lback;	/* Loopback address */
+	struct in_addr v4_bcast;	/* Broadcast address */
+	struct nx_addr_v4 v4;		/* First/Single ipv4 address */
+#ifdef	CONFIG_IPV6
+	struct nx_addr_v6 v6;		/* First/Single ipv6 address */
+#endif
+	char nx_name[65];		/* network context name */
+};
+
+
+/* status flags */
+
+#define NXS_HASHED      0x0001
+#define NXS_SHUTDOWN    0x0100
+#define NXS_RELEASED    0x8000
+
+extern struct nx_info *lookup_nx_info(int);
+
+extern int get_nid_list(int, unsigned int *, int);
+extern int nid_is_hashed(vnid_t);
+
+extern int nx_migrate_task(struct task_struct *, struct nx_info *);
+
+extern long vs_net_change(struct nx_info *, unsigned int);
+
+struct sock;
+
+
+#define NX_IPV4(n)	((n)->v4.type != NXA_TYPE_NONE)
+#ifdef  CONFIG_IPV6
+#define NX_IPV6(n)	((n)->v6.type != NXA_TYPE_NONE)
+#else
+#define NX_IPV6(n)	(0)
+#endif
+
+#endif	/* _VSERVER_NETWORK_H */
diff -ruNp linux-3.13.11/include/linux/vserver/network_cmd.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/network_cmd.h
--- linux-3.13.11/include/linux/vserver/network_cmd.h	1970-01-01 01:00:00.000000000
+0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/network_cmd.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,37 @@
+#ifndef _VSERVER_NETWORK_CMD_H
+#define _VSERVER_NETWORK_CMD_H
+
+#include <uapi/vserver/network_cmd.h>
+
+extern int vc_task_nid(uint32_t);
+
+extern int vc_nx_info(struct nx_info *, void __user *);
+
+extern int vc_net_create(uint32_t, void __user *);
+extern int vc_net_migrate(struct nx_info *, void __user *);
+
+extern int vc_net_add(struct nx_info *, void __user *);
+extern int vc_net_remove(struct nx_info *, void __user *);
+
+extern int vc_net_add_ipv4_v1(struct nx_info *, void __user *);
+extern int vc_net_add_ipv4(struct nx_info *, void __user *);
+
+extern int vc_net_rem_ipv4_v1(struct nx_info *, void __user *);
+extern int vc_net_rem_ipv4(struct nx_info *, void __user *);
+
+extern int vc_net_add_ipv6(struct nx_info *, void __user *);
+extern int vc_net_remove_ipv6(struct nx_info *, void __user *);
+
+extern int vc_add_match_ipv4(struct nx_info *, void __user *);
+extern int vc_get_match_ipv4(struct nx_info *, void __user *);
+
+extern int vc_add_match_ipv6(struct nx_info *, void __user *);
+extern int vc_get_match_ipv6(struct nx_info *, void __user *);
+
+extern int vc_get_nflags(struct nx_info *, void __user *);
+extern int vc_set_nflags(struct nx_info *, void __user *);
+
+extern int vc_get_ncaps(struct nx_info *, void __user *);
+extern int vc_set_ncaps(struct nx_info *, void __user *);
+
+#endif	/* _VSERVER_CONTEXT_CMD_H */
diff -ruNp linux-3.13.11/include/linux/vserver/percpu.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/percpu.h
--- linux-3.13.11/include/linux/vserver/percpu.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/percpu.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,14 @@
+#ifndef _VSERVER_PERCPU_H
+#define _VSERVER_PERCPU_H
+
+#include "cvirt_def.h"
+#include "sched_def.h"
+
+struct	_vx_percpu {
+	struct _vx_cvirt_pc cvirt;
+	struct _vx_sched_pc sched;
+};
+
+#define	PERCPU_PERCTX	(sizeof(struct _vx_percpu))
+
+#endif	/* _VSERVER_PERCPU_H */
diff -ruNp linux-3.13.11/include/linux/vserver/pid.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/pid.h
--- linux-3.13.11/include/linux/vserver/pid.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/pid.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,51 @@
+#ifndef _VSERVER_PID_H
+#define _VSERVER_PID_H
+
+/* pid faking stuff */
+
+#define vx_info_map_pid(v, p) \
+	__vx_info_map_pid((v), (p), __func__, __FILE__, __LINE__)
+#define vx_info_map_tgid(v,p)  vx_info_map_pid(v,p)
+#define vx_map_pid(p) vx_info_map_pid(current_vx_info(), p)
+#define vx_map_tgid(p) vx_map_pid(p)
+
+static inline int __vx_info_map_pid(struct vx_info *vxi, int pid,
+	const char *func, const char *file, int line)
+{
+	if (vx_info_flags(vxi, VXF_INFO_INIT, 0)) {
+		vxfprintk(VXD_CBIT(cvirt, 2),
+			"vx_map_tgid: %p/%llx: %d -> %d",
+			vxi, (long long)vxi->vx_flags, pid,
+			(pid && pid == vxi->vx_initpid) ? 1 : pid,
+			func, file, line);
+		if (pid == 0)
+			return 0;
+		if (pid == vxi->vx_initpid)
+			return 1;
+	}
+	return pid;
+}
+
+#define vx_info_rmap_pid(v, p) \
+	__vx_info_rmap_pid((v), (p), __func__, __FILE__, __LINE__)
+#define vx_rmap_pid(p) vx_info_rmap_pid(current_vx_info(), p)
+#define vx_rmap_tgid(p) vx_rmap_pid(p)
+
+static inline int __vx_info_rmap_pid(struct vx_info *vxi, int pid,
+	const char *func, const char *file, int line)
+{
+	if (vx_info_flags(vxi, VXF_INFO_INIT, 0)) {
+		vxfprintk(VXD_CBIT(cvirt, 2),
+			"vx_rmap_tgid: %p/%llx: %d -> %d",
+			vxi, (long long)vxi->vx_flags, pid,
+			(pid == 1) ? vxi->vx_initpid : pid,
+			func, file, line);
+		if ((pid == 1) && vxi->vx_initpid)
+			return vxi->vx_initpid;
+		if (pid == vxi->vx_initpid)
+			return ~0U;
+	}
+	return pid;
+}
+
+#endif
diff -ruNp linux-3.13.11/include/linux/vserver/sched.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/sched.h
--- linux-3.13.11/include/linux/vserver/sched.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/sched.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,23 @@
+#ifndef _VSERVER_SCHED_H
+#define _VSERVER_SCHED_H
+
+
+#ifdef	__KERNEL__
+
+struct timespec;
+
+void vx_vsi_uptime(struct timespec *, struct timespec *);
+
+
+struct vx_info;
+
+void vx_update_load(struct vx_info *);
+
+
+void vx_update_sched_param(struct _vx_sched *sched,
+	struct _vx_sched_pc *sched_pc);
+
+#endif	/* __KERNEL__ */
+#else	/* _VSERVER_SCHED_H */
+#warning duplicate inclusion
+#endif	/* _VSERVER_SCHED_H */
diff -ruNp linux-3.13.11/include/linux/vserver/sched_cmd.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/sched_cmd.h
--- linux-3.13.11/include/linux/vserver/sched_cmd.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/sched_cmd.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,11 @@
+#ifndef _VSERVER_SCHED_CMD_H
+#define _VSERVER_SCHED_CMD_H
+
+
+#include <linux/compiler.h>
+#include <uapi/vserver/sched_cmd.h>
+
+extern int vc_set_prio_bias(struct vx_info *, void __user *);
+extern int vc_get_prio_bias(struct vx_info *, void __user *);
+
+#endif	/* _VSERVER_SCHED_CMD_H */
diff -ruNp linux-3.13.11/include/linux/vserver/sched_def.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/sched_def.h
--- linux-3.13.11/include/linux/vserver/sched_def.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/sched_def.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,38 @@
+#ifndef _VSERVER_SCHED_DEF_H
+#define _VSERVER_SCHED_DEF_H
+
+#include <linux/spinlock.h>
+#include <linux/jiffies.h>
+#include <linux/cpumask.h>
+#include <asm/atomic.h>
+#include <asm/param.h>
+
+
+/* context sub struct */
+
+struct _vx_sched {
+	int prio_bias;			/* bias offset for priority */
+
+	cpumask_t update;		/* CPUs which should update */
+};
+
+struct _vx_sched_pc {
+	int prio_bias;			/* bias offset for priority */
+
+	uint64_t user_ticks;		/* token tick events */
+	uint64_t sys_ticks;		/* token tick events */
+	uint64_t hold_ticks;		/* token ticks paused */
+};
+
+
+#ifdef CONFIG_VSERVER_DEBUG
+
+static inline void __dump_vx_sched(struct _vx_sched *sched)
+{
+	printk("\t_vx_sched:\n");
+	printk("\t priority = %4d\n", sched->prio_bias);
+}
+
+#endif
+
+#endif	/* _VSERVER_SCHED_DEF_H */
diff -ruNp linux-3.13.11/include/linux/vserver/signal.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/signal.h
--- linux-3.13.11/include/linux/vserver/signal.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/signal.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,14 @@
+#ifndef _VSERVER_SIGNAL_H
+#define _VSERVER_SIGNAL_H
+
+
+#ifdef	__KERNEL__
+
+struct vx_info;
+
+int vx_info_kill(struct vx_info *, int, int);
+
+#endif	/* __KERNEL__ */
+#else	/* _VSERVER_SIGNAL_H */
+#warning duplicate inclusion
+#endif	/* _VSERVER_SIGNAL_H */
diff -ruNp linux-3.13.11/include/linux/vserver/signal_cmd.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/signal_cmd.h
--- linux-3.13.11/include/linux/vserver/signal_cmd.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/signal_cmd.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,14 @@
+#ifndef _VSERVER_SIGNAL_CMD_H
+#define _VSERVER_SIGNAL_CMD_H
+
+#include <uapi/vserver/signal_cmd.h>
+
+
+extern int vc_ctx_kill(struct vx_info *, void __user *);
+extern int vc_wait_exit(struct vx_info *, void __user *);
+
+
+extern int vc_get_pflags(uint32_t pid, void __user *);
+extern int vc_set_pflags(uint32_t pid, void __user *);
+
+#endif	/* _VSERVER_SIGNAL_CMD_H */
diff -ruNp linux-3.13.11/include/linux/vserver/space.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/space.h
--- linux-3.13.11/include/linux/vserver/space.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/space.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,12 @@
+#ifndef _VSERVER_SPACE_H
+#define _VSERVER_SPACE_H
+
+#include <linux/types.h>
+
+struct vx_info;
+
+int vx_set_space(struct vx_info *vxi, unsigned long mask, unsigned index);
+
+#else	/* _VSERVER_SPACE_H */
+#warning duplicate inclusion
+#endif	/* _VSERVER_SPACE_H */
diff -ruNp linux-3.13.11/include/linux/vserver/space_cmd.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/space_cmd.h
--- linux-3.13.11/include/linux/vserver/space_cmd.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/space_cmd.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,13 @@
+#ifndef _VSERVER_SPACE_CMD_H
+#define _VSERVER_SPACE_CMD_H
+
+#include <uapi/vserver/space_cmd.h>
+
+
+extern int vc_enter_space_v1(struct vx_info *, void __user *);
+extern int vc_set_space_v1(struct vx_info *, void __user *);
+extern int vc_enter_space(struct vx_info *, void __user *);
+extern int vc_set_space(struct vx_info *, void __user *);
+extern int vc_get_space_mask(void __user *, int);
+
+#endif	/* _VSERVER_SPACE_CMD_H */
diff -ruNp linux-3.13.11/include/linux/vserver/switch.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/switch.h
--- linux-3.13.11/include/linux/vserver/switch.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/switch.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,8 @@
+#ifndef _VSERVER_SWITCH_H
+#define _VSERVER_SWITCH_H
+
+
+#include <linux/errno.h>
+#include <uapi/vserver/switch.h>
+
+#endif	/* _VSERVER_SWITCH_H */
diff -ruNp linux-3.13.11/include/linux/vserver/tag.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/tag.h
--- linux-3.13.11/include/linux/vserver/tag.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/tag.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,160 @@
+#ifndef _DX_TAG_H
+#define _DX_TAG_H
+
+#include <linux/types.h>
+#include <linux/uidgid.h>
+
+
+#define DX_TAG(in)	(IS_TAGGED(in))
+
+
+#ifdef CONFIG_TAG_NFSD
+#define DX_TAG_NFSD	1
+#else
+#define DX_TAG_NFSD	0
+#endif
+
+
+#ifdef CONFIG_TAGGING_NONE
+
+#define MAX_UID		0xFFFFFFFF
+#define MAX_GID		0xFFFFFFFF
+
+#define INOTAG_TAG(cond, uid, gid, tag)	(0)
+
+#define TAGINO_UID(cond, uid, tag)	(uid)
+#define TAGINO_GID(cond, gid, tag)	(gid)
+
+#endif
+
+
+#ifdef CONFIG_TAGGING_GID16
+
+#define MAX_UID		0xFFFFFFFF
+#define MAX_GID		0x0000FFFF
+
+#define INOTAG_TAG(cond, uid, gid, tag)	\
+	((cond) ? (((gid) >> 16) & 0xFFFF) : 0)
+
+#define TAGINO_UID(cond, uid, tag)	(uid)
+#define TAGINO_GID(cond, gid, tag)	\
+	((cond) ? (((gid) & 0xFFFF) | ((tag) << 16)) : (gid))
+
+#endif
+
+
+#ifdef CONFIG_TAGGING_ID24
+
+#define MAX_UID		0x00FFFFFF
+#define MAX_GID		0x00FFFFFF
+
+#define INOTAG_TAG(cond, uid, gid, tag)	\
+	((cond) ? ((((uid) >> 16) & 0xFF00) | (((gid) >> 24) & 0xFF)) : 0)
+
+#define TAGINO_UID(cond, uid, tag)	\
+	((cond) ? (((uid) & 0xFFFFFF) | (((tag) & 0xFF00) << 16)) : (uid))
+#define TAGINO_GID(cond, gid, tag)	\
+	((cond) ? (((gid) & 0xFFFFFF) | (((tag) & 0x00FF) << 24)) : (gid))
+
+#endif
+
+
+#ifdef CONFIG_TAGGING_UID16
+
+#define MAX_UID		0x0000FFFF
+#define MAX_GID		0xFFFFFFFF
+
+#define INOTAG_TAG(cond, uid, gid, tag)	\
+	((cond) ? (((uid) >> 16) & 0xFFFF) : 0)
+
+#define TAGINO_UID(cond, uid, tag)	\
+	((cond) ? (((uid) & 0xFFFF) | ((tag) << 16)) : (uid))
+#define TAGINO_GID(cond, gid, tag)	(gid)
+
+#endif
+
+
+#ifdef CONFIG_TAGGING_INTERN
+
+#define MAX_UID		0xFFFFFFFF
+#define MAX_GID		0xFFFFFFFF
+
+#define INOTAG_TAG(cond, uid, gid, tag)	\
+	((cond) ? (tag) : 0)
+
+#define TAGINO_UID(cond, uid, tag)	(uid)
+#define TAGINO_GID(cond, gid, tag)	(gid)
+
+#endif
+
+
+#ifndef CONFIG_TAGGING_NONE
+#define dx_current_fstag(sb)	\
+	((sb)->s_flags & MS_TAGGED ? dx_current_tag() : 0)
+#else
+#define dx_current_fstag(sb)	(0)
+#endif
+
+#ifndef CONFIG_TAGGING_INTERN
+#define TAGINO_TAG(cond, tag)	(0)
+#else
+#define TAGINO_TAG(cond, tag)	((cond) ? (tag) : 0)
+#endif
+
+#define TAGINO_KUID(cond, kuid, ktag)	\
+	KUIDT_INIT(TAGINO_UID(cond, __kuid_val(kuid), __ktag_val(ktag)))
+#define TAGINO_KGID(cond, kgid, ktag)	\
+	KGIDT_INIT(TAGINO_GID(cond, __kgid_val(kgid), __ktag_val(ktag)))
+#define TAGINO_KTAG(cond, ktag)		\
+	KTAGT_INIT(TAGINO_TAG(cond, __ktag_val(ktag)))
+
+
+#define INOTAG_UID(cond, uid, gid)	\
+	((cond) ? ((uid) & MAX_UID) : (uid))
+#define INOTAG_GID(cond, uid, gid)	\
+	((cond) ? ((gid) & MAX_GID) : (gid))
+
+#define INOTAG_KUID(cond, kuid, kgid)	\
+	KUIDT_INIT(INOTAG_UID(cond, __kuid_val(kuid), __kgid_val(kgid)))
+#define INOTAG_KGID(cond, kuid, kgid)	\
+	KGIDT_INIT(INOTAG_GID(cond, __kuid_val(kuid), __kgid_val(kgid)))
+#define INOTAG_KTAG(cond, kuid, kgid, ktag) \
+	KTAGT_INIT(INOTAG_TAG(cond, \
+		__kuid_val(kuid), __kgid_val(kgid), __ktag_val(ktag)))
+
+
+static inline uid_t dx_map_uid(uid_t uid)
+{
+	if ((uid > MAX_UID) && (uid != -1))
+		uid = -2;
+	return (uid & MAX_UID);
+}
+
+static inline gid_t dx_map_gid(gid_t gid)
+{
+	if ((gid > MAX_GID) && (gid != -1))
+		gid = -2;
+	return (gid & MAX_GID);
+}
+
+struct peer_tag {
+	int32_t xid;
+	int32_t nid;
+};
+
+#define dx_notagcheck(sb) ((sb) && ((sb)->s_flags & MS_NOTAGCHECK))
+
+int dx_parse_tag(char *string, vtag_t *tag, int remove, int *mnt_flags,
+		 unsigned long *flags);
+
+#ifdef	CONFIG_PROPAGATE
+
+void __dx_propagate_tag(struct nameidata *nd, struct inode *inode);
+
+#define dx_propagate_tag(n, i)	__dx_propagate_tag(n, i)
+
+#else
+#define dx_propagate_tag(n, i)	do { } while (0)
+#endif
+
+#endif /* _DX_TAG_H */
diff -ruNp linux-3.13.11/include/linux/vserver/tag_cmd.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/tag_cmd.h
--- linux-3.13.11/include/linux/vserver/tag_cmd.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/vserver/tag_cmd.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,10 @@
+#ifndef _VSERVER_TAG_CMD_H
+#define _VSERVER_TAG_CMD_H
+
+#include <uapi/vserver/tag_cmd.h>
+
+extern int vc_task_tag(uint32_t);
+
+extern int vc_tag_migrate(uint32_t);
+
+#endif	/* _VSERVER_TAG_CMD_H */
diff -ruNp linux-3.13.11/include/linux/xattr.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/xattr.h
--- linux-3.13.11/include/linux/xattr.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/xattr.h	2014-07-09
12:00:15.000000000 +0200
@@ -28,7 +28,7 @@ struct xattr_handler {
 		   size_t size, int handler_flags);
 	int (*set)(struct dentry *dentry, const char *name, const void *buffer,
 		   size_t size, int flags, int handler_flags);
-};
+} __do_const;
 
 struct xattr {
 	const char *name;
@@ -37,6 +37,9 @@ struct xattr {
 };
 
 ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
+ssize_t pax_getxattr(struct dentry *, void *, size_t);
+#endif
 ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
 ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
 int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int);
diff -ruNp linux-3.13.11/include/linux/zlib.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/zlib.h
--- linux-3.13.11/include/linux/zlib.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/linux/zlib.h	2014-07-09 12:00:15.000000000
+0200
@@ -31,6 +31,7 @@
 #define _ZLIB_H
 
 #include <linux/zconf.h>
+#include <linux/compiler.h>
 
 /* zlib deflate based on ZLIB_VERSION "1.1.3" */
 /* zlib inflate based on ZLIB_VERSION "1.2.3" */
@@ -179,7 +180,7 @@ typedef z_stream *z_streamp;
 
                         /* basic functions */
 
-extern int zlib_deflate_workspacesize (int windowBits, int memLevel);
+extern int zlib_deflate_workspacesize (int windowBits, int memLevel) __intentional_overflow(0);
 /*
    Returns the number of bytes that needs to be allocated for a per-
    stream workspace with the specified parameters.  A pointer to this
diff -ruNp linux-3.13.11/include/media/v4l2-dev.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/media/v4l2-dev.h
--- linux-3.13.11/include/media/v4l2-dev.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/media/v4l2-dev.h	2014-07-09
12:00:15.000000000 +0200
@@ -76,7 +76,7 @@ struct v4l2_file_operations {
 	int (*mmap) (struct file *, struct vm_area_struct *);
 	int (*open) (struct file *);
 	int (*release) (struct file *);
-};
+} __do_const;
 
 /*
  * Newer version of video_device, handled by videodev2.c
diff -ruNp linux-3.13.11/include/media/v4l2-device.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/media/v4l2-device.h
--- linux-3.13.11/include/media/v4l2-device.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/media/v4l2-device.h	2014-07-09
12:00:15.000000000 +0200
@@ -95,7 +95,7 @@ int __must_check v4l2_device_register(st
    this function returns 0. If the name ends with a digit (e.g. cx18),
    then the name will be set to cx18-0 since cx180 looks really odd. */
 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
-						atomic_t *instance);
+						atomic_unchecked_t *instance);
 
 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
    Since the parent disappears this ensures that v4l2_dev doesn't have an
diff -ruNp linux-3.13.11/include/net/9p/transport.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/9p/transport.h
--- linux-3.13.11/include/net/9p/transport.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/9p/transport.h	2014-07-09
12:00:15.000000000 +0200
@@ -60,7 +60,7 @@ struct p9_trans_module {
 	int (*cancel) (struct p9_client *, struct p9_req_t *req);
 	int (*zc_request)(struct p9_client *, struct p9_req_t *,
 			  char *, char *, int , int, int, int);
-};
+} __do_const;
 
 void v9fs_register_trans(struct p9_trans_module *m);
 void v9fs_unregister_trans(struct p9_trans_module *m);
diff -ruNp linux-3.13.11/include/net/addrconf.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/addrconf.h
--- linux-3.13.11/include/net/addrconf.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/addrconf.h	2014-07-09
12:00:15.000000000 +0200
@@ -79,7 +79,7 @@ struct inet6_ifaddr *ipv6_get_ifaddr(str
 
 int ipv6_dev_get_saddr(struct net *net, const struct net_device *dev,
 		       const struct in6_addr *daddr, unsigned int srcprefs,
-		       struct in6_addr *saddr);
+		       struct in6_addr *saddr, struct nx_info *nxi);
 int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr,
 		      unsigned char banned_flags);
 int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
diff -ruNp linux-3.13.11/include/net/af_unix.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/af_unix.h
--- linux-3.13.11/include/net/af_unix.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/af_unix.h	2014-07-09
12:00:15.000000000 +0200
@@ -4,6 +4,7 @@
 #include <linux/socket.h>
 #include <linux/un.h>
 #include <linux/mutex.h>
+#include <linux/vs_base.h>
 #include <net/sock.h>
 
 void unix_inflight(struct file *fp);
@@ -36,7 +37,7 @@ struct unix_skb_parms {
 	u32			secid;		/* Security ID		*/
 #endif
 	u32			consumed;
-};
+} __randomize_layout;
 
 #define UNIXCB(skb) 	(*(struct unix_skb_parms *)&((skb)->cb))
 #define UNIXSID(skb)	(&UNIXCB((skb)).secid)
diff -ruNp linux-3.13.11/include/net/bluetooth/l2cap.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/bluetooth/l2cap.h
--- linux-3.13.11/include/net/bluetooth/l2cap.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/bluetooth/l2cap.h	2014-07-09
12:00:15.000000000 +0200
@@ -557,7 +557,7 @@ struct l2cap_ops {
 	long			(*get_sndtimeo) (struct l2cap_chan *chan);
 	struct sk_buff		*(*alloc_skb) (struct l2cap_chan *chan,
 					       unsigned long len, int nb);
-};
+} __do_const;
 
 struct l2cap_conn {
 	struct hci_conn		*hcon;
diff -ruNp linux-3.13.11/include/net/caif/cfctrl.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/caif/cfctrl.h
--- linux-3.13.11/include/net/caif/cfctrl.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/caif/cfctrl.h	2014-07-09
12:00:15.000000000 +0200
@@ -52,7 +52,7 @@ struct cfctrl_rsp {
 	void (*radioset_rsp)(void);
 	void (*reject_rsp)(struct cflayer *layer, u8 linkid,
 				struct cflayer *client_layer);
-};
+} __no_const;
 
 /* Link Setup Parameters for CAIF-Links. */
 struct cfctrl_link_param {
@@ -101,8 +101,8 @@ struct cfctrl_request_info {
 struct cfctrl {
 	struct cfsrvl serv;
 	struct cfctrl_rsp res;
-	atomic_t req_seq_no;
-	atomic_t rsp_seq_no;
+	atomic_unchecked_t req_seq_no;
+	atomic_unchecked_t rsp_seq_no;
 	struct list_head list;
 	/* Protects from simultaneous access to first_req list */
 	spinlock_t info_list_lock;
diff -ruNp linux-3.13.11/include/net/flow.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/flow.h
--- linux-3.13.11/include/net/flow.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/flow.h	2014-07-09 12:00:15.000000000
+0200
@@ -222,6 +222,6 @@ struct flow_cache_object *flow_cache_loo
 
 void flow_cache_flush(void);
 void flow_cache_flush_deferred(void);
-extern atomic_t flow_cache_genid;
+extern atomic_unchecked_t flow_cache_genid;
 
 #endif
diff -ruNp linux-3.13.11/include/net/genetlink.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/genetlink.h
--- linux-3.13.11/include/net/genetlink.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/genetlink.h	2014-07-09
12:00:15.000000000 +0200
@@ -118,7 +118,7 @@ struct genl_ops {
 	u8			cmd;
 	u8			internal_flags;
 	u8			flags;
-};
+} __do_const;
 
 int __genl_register_family(struct genl_family *family);
 
diff -ruNp linux-3.13.11/include/net/gro_cells.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/gro_cells.h
--- linux-3.13.11/include/net/gro_cells.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/gro_cells.h	2014-07-09
12:00:15.000000000 +0200
@@ -29,7 +29,7 @@ static inline void gro_cells_receive(str
 		cell += skb_get_rx_queue(skb) & gcells->gro_cells_mask;
 
 	if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
-		atomic_long_inc(&dev->rx_dropped);
+		atomic_long_inc_unchecked(&dev->rx_dropped);
 		kfree_skb(skb);
 		return;
 	}
diff -ruNp linux-3.13.11/include/net/inet_connection_sock.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/inet_connection_sock.h
--- linux-3.13.11/include/net/inet_connection_sock.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/inet_connection_sock.h	2014-07-09
12:00:15.000000000 +0200
@@ -62,7 +62,7 @@ struct inet_connection_sock_af_ops {
 	void	    (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
 	int	    (*bind_conflict)(const struct sock *sk,
 				     const struct inet_bind_bucket *tb, bool relax);
-};
+} __do_const;
 
 /** inet_connection_sock - INET connection oriented sock
  *
diff -ruNp linux-3.13.11/include/net/inet_timewait_sock.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/inet_timewait_sock.h
--- linux-3.13.11/include/net/inet_timewait_sock.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/inet_timewait_sock.h	2014-07-09
12:00:15.000000000 +0200
@@ -121,6 +121,10 @@ struct inet_timewait_sock {
 #define tw_v6_rcv_saddr    	__tw_common.skc_v6_rcv_saddr
 #define tw_dport		__tw_common.skc_dport
 #define tw_num			__tw_common.skc_num
+#define tw_xid			__tw_common.skc_xid
+#define tw_vx_info		__tw_common.skc_vx_info
+#define tw_nid			__tw_common.skc_nid
+#define tw_nx_info		__tw_common.skc_nx_info
 
 	int			tw_timeout;
 	volatile unsigned char	tw_substate;
diff -ruNp linux-3.13.11/include/net/inetpeer.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/inetpeer.h
--- linux-3.13.11/include/net/inetpeer.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/inetpeer.h	2014-07-09
12:00:15.000000000 +0200
@@ -47,8 +47,8 @@ struct inet_peer {
 	 */
 	union {
 		struct {
-			atomic_t			rid;		/* Frag reception counter */
-			atomic_t			ip_id_count;	/* IP ID for the next packet */
+			atomic_unchecked_t		rid;		/* Frag reception counter */
+			atomic_unchecked_t		ip_id_count;	/* IP ID for the next packet */
 		};
 		struct rcu_head         rcu;
 		struct inet_peer	*gc_next;
@@ -178,16 +178,13 @@ static inline void inet_peer_refcheck(co
 /* can be called with or without local BH being disabled */
 static inline int inet_getid(struct inet_peer *p, int more)
 {
-	int old, new;
+	int id;
 	more++;
 	inet_peer_refcheck(p);
-	do {
-		old = atomic_read(&p->ip_id_count);
-		new = old + more;
-		if (!new)
-			new = 1;
-	} while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
-	return new;
+	id = atomic_add_return_unchecked(more, &p->ip_id_count);
+	if (!id)
+		id = atomic_inc_return_unchecked(&p->ip_id_count);
+	return id;
 }
 
 #endif /* _NET_INETPEER_H */
diff -ruNp linux-3.13.11/include/net/ip.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/ip.h
--- linux-3.13.11/include/net/ip.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/ip.h	2014-07-09 12:00:15.000000000
+0200
@@ -219,7 +219,7 @@ static inline void snmp_mib_free(void __
 
 void inet_get_local_port_range(struct net *net, int *low, int *high);
 
-extern unsigned long *sysctl_local_reserved_ports;
+extern unsigned long sysctl_local_reserved_ports[65536 / 8 / sizeof(unsigned long)];
 static inline int inet_is_reserved_local_port(int port)
 {
 	return test_bit(port, sysctl_local_reserved_ports);
diff -ruNp linux-3.13.11/include/net/ip6_route.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/ip6_route.h
--- linux-3.13.11/include/net/ip6_route.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/ip6_route.h	2014-07-09
12:00:15.000000000 +0200
@@ -90,7 +90,7 @@ int ip6_del_rt(struct rt6_info *);
 
 int ip6_route_get_saddr(struct net *net, struct rt6_info *rt,
 			const struct in6_addr *daddr, unsigned int prefs,
-			struct in6_addr *saddr);
+			struct in6_addr *saddr, struct nx_info *nxi);
 
 struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
 			    const struct in6_addr *saddr, int oif, int flags);
diff -ruNp linux-3.13.11/include/net/ip_fib.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/ip_fib.h
--- linux-3.13.11/include/net/ip_fib.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/ip_fib.h	2014-07-09 12:00:15.000000000
+0200
@@ -169,7 +169,7 @@ __be32 fib_info_update_nh_saddr(struct n
 
 #define FIB_RES_SADDR(net, res)				\
 	((FIB_RES_NH(res).nh_saddr_genid ==		\
-	  atomic_read(&(net)->ipv4.dev_addr_genid)) ?	\
+	  atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ?	\
 	 FIB_RES_NH(res).nh_saddr :			\
 	 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
 #define FIB_RES_GW(res)			(FIB_RES_NH(res).nh_gw)
diff -ruNp linux-3.13.11/include/net/ip_vs.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/ip_vs.h
--- linux-3.13.11/include/net/ip_vs.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/ip_vs.h	2014-07-09 12:00:15.000000000
+0200
@@ -558,7 +558,7 @@ struct ip_vs_conn {
 	struct ip_vs_conn       *control;       /* Master control connection */
 	atomic_t                n_control;      /* Number of controlled ones */
 	struct ip_vs_dest       *dest;          /* real server */
-	atomic_t                in_pkts;        /* incoming packet counter */
+	atomic_unchecked_t      in_pkts;        /* incoming packet counter */
 
 	/* packet transmitter for different forwarding methods.  If it
 	   mangles the packet, it must return NF_DROP or better NF_STOLEN,
@@ -705,7 +705,7 @@ struct ip_vs_dest {
 	__be16			port;		/* port number of the server */
 	union nf_inet_addr	addr;		/* IP address of the server */
 	volatile unsigned int	flags;		/* dest status flags */
-	atomic_t		conn_flags;	/* flags to copy to conn */
+	atomic_unchecked_t	conn_flags;	/* flags to copy to conn */
 	atomic_t		weight;		/* server weight */
 
 	atomic_t		refcnt;		/* reference counter */
@@ -960,11 +960,11 @@ struct netns_ipvs {
 	/* ip_vs_lblc */
 	int			sysctl_lblc_expiration;
 	struct ctl_table_header	*lblc_ctl_header;
-	struct ctl_table	*lblc_ctl_table;
+	ctl_table_no_const	*lblc_ctl_table;
 	/* ip_vs_lblcr */
 	int			sysctl_lblcr_expiration;
 	struct ctl_table_header	*lblcr_ctl_header;
-	struct ctl_table	*lblcr_ctl_table;
+	ctl_table_no_const	*lblcr_ctl_table;
 	/* ip_vs_est */
 	struct list_head	est_list;	/* estimator list */
 	spinlock_t		est_lock;
diff -ruNp linux-3.13.11/include/net/irda/ircomm_tty.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/irda/ircomm_tty.h
--- linux-3.13.11/include/net/irda/ircomm_tty.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/irda/ircomm_tty.h	2014-07-09
12:00:15.000000000 +0200
@@ -35,6 +35,7 @@
 #include <linux/termios.h>
 #include <linux/timer.h>
 #include <linux/tty.h>		/* struct tty_struct */
+#include <asm/local.h>
 
 #include <net/irda/irias_object.h>
 #include <net/irda/ircomm_core.h>
diff -ruNp linux-3.13.11/include/net/iucv/af_iucv.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/iucv/af_iucv.h
--- linux-3.13.11/include/net/iucv/af_iucv.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/iucv/af_iucv.h	2014-07-09
12:00:15.000000000 +0200
@@ -149,7 +149,7 @@ struct iucv_skb_cb {
 struct iucv_sock_list {
 	struct hlist_head head;
 	rwlock_t	  lock;
-	atomic_t	  autobind_name;
+	atomic_unchecked_t autobind_name;
 };
 
 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
diff -ruNp linux-3.13.11/include/net/llc_c_ac.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/llc_c_ac.h
--- linux-3.13.11/include/net/llc_c_ac.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/llc_c_ac.h	2014-07-09
12:00:15.000000000 +0200
@@ -87,7 +87,7 @@
 #define LLC_CONN_AC_STOP_SENDACK_TMR			70
 #define LLC_CONN_AC_START_SENDACK_TMR_IF_NOT_RUNNING	71
 
-typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
+typedef int (* const llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
 
 int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
 int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
diff -ruNp linux-3.13.11/include/net/llc_c_ev.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/llc_c_ev.h
--- linux-3.13.11/include/net/llc_c_ev.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/llc_c_ev.h	2014-07-09
12:00:15.000000000 +0200
@@ -125,8 +125,8 @@ static __inline__ struct llc_conn_state_
 	return (struct llc_conn_state_ev *)skb->cb;
 }
 
-typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
-typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
+typedef int (* const llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
+typedef int (* const llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
 
 int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
 int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
diff -ruNp linux-3.13.11/include/net/llc_c_st.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/llc_c_st.h
--- linux-3.13.11/include/net/llc_c_st.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/llc_c_st.h	2014-07-09
12:00:15.000000000 +0200
@@ -37,7 +37,7 @@ struct llc_conn_state_trans {
 	u8		   next_state;
 	llc_conn_ev_qfyr_t *ev_qualifiers;
 	llc_conn_action_t  *ev_actions;
-};
+} __do_const;
 
 struct llc_conn_state {
 	u8			    current_state;
diff -ruNp linux-3.13.11/include/net/llc_s_ac.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/llc_s_ac.h
--- linux-3.13.11/include/net/llc_s_ac.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/llc_s_ac.h	2014-07-09
12:00:15.000000000 +0200
@@ -23,7 +23,7 @@
 #define SAP_ACT_TEST_IND	9
 
 /* All action functions must look like this */
-typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
+typedef int (* const llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
 
 int llc_sap_action_unitdata_ind(struct llc_sap *sap, struct sk_buff *skb);
 int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb);
diff -ruNp linux-3.13.11/include/net/llc_s_st.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/llc_s_st.h
--- linux-3.13.11/include/net/llc_s_st.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/llc_s_st.h	2014-07-09
12:00:15.000000000 +0200
@@ -20,7 +20,7 @@ struct llc_sap_state_trans {
 	llc_sap_ev_t	  ev;
 	u8		  next_state;
 	llc_sap_action_t *ev_actions;
-};
+} __do_const;
 
 struct llc_sap_state {
 	u8			   curr_state;
diff -ruNp linux-3.13.11/include/net/mac80211.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/mac80211.h
--- linux-3.13.11/include/net/mac80211.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/mac80211.h	2014-07-09
12:00:15.000000000 +0200
@@ -4407,7 +4407,7 @@ struct rate_control_ops {
 	void (*add_sta_debugfs)(void *priv, void *priv_sta,
 				struct dentry *dir);
 	void (*remove_sta_debugfs)(void *priv, void *priv_sta);
-};
+} __do_const;
 
 static inline int rate_supported(struct ieee80211_sta *sta,
 				 enum ieee80211_band band,
diff -ruNp linux-3.13.11/include/net/neighbour.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/neighbour.h
--- linux-3.13.11/include/net/neighbour.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/neighbour.h	2014-07-09
12:00:15.000000000 +0200
@@ -123,7 +123,7 @@ struct neigh_ops {
 	void			(*error_report)(struct neighbour *, struct sk_buff *);
 	int			(*output)(struct neighbour *, struct sk_buff *);
 	int			(*connected_output)(struct neighbour *, struct sk_buff *);
-};
+} __do_const;
 
 struct pneigh_entry {
 	struct pneigh_entry	*next;
@@ -163,7 +163,6 @@ struct neigh_table {
 	void			(*proxy_redo)(struct sk_buff *skb);
 	char			*id;
 	struct neigh_parms	parms;
-	/* HACK. gc_* should follow parms without a gap! */
 	int			gc_interval;
 	int			gc_thresh1;
 	int			gc_thresh2;
@@ -178,7 +177,7 @@ struct neigh_table {
 	struct neigh_statistics	__percpu *stats;
 	struct neigh_hash_table __rcu *nht;
 	struct pneigh_entry	**phash_buckets;
-};
+} __randomize_layout;
 
 #define NEIGH_PRIV_ALIGN	sizeof(long long)
 #define NEIGH_ENTRY_SIZE(size)	ALIGN((size), NEIGH_PRIV_ALIGN)
diff -ruNp linux-3.13.11/include/net/net_namespace.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/net_namespace.h
--- linux-3.13.11/include/net/net_namespace.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/net_namespace.h	2014-07-09
12:00:15.000000000 +0200
@@ -124,8 +124,8 @@ struct net {
 	struct netns_ipvs	*ipvs;
 #endif
 	struct sock		*diag_nlsk;
-	atomic_t		fnhe_genid;
-};
+	atomic_unchecked_t	fnhe_genid;
+} __randomize_layout;
 
 /*
  * ifindex generation is per-net namespace, and loopback is
@@ -281,7 +281,11 @@ static inline struct net *read_pnet(stru
 #define __net_init	__init
 #define __net_exit	__exit_refok
 #define __net_initdata	__initdata
+#ifdef CONSTIFY_PLUGIN
 #define __net_initconst	__initconst
+#else
+#define __net_initconst	__initdata
+#endif
 #endif
 
 struct pernet_operations {
@@ -291,7 +295,7 @@ struct pernet_operations {
 	void (*exit_batch)(struct list_head *net_exit_list);
 	int *id;
 	size_t size;
-};
+} __do_const;
 
 /*
  * Use these carefully.  If you implement a network device and it
@@ -339,23 +343,23 @@ static inline void unregister_net_sysctl
 
 static inline int rt_genid_ipv4(struct net *net)
 {
-	return atomic_read(&net->ipv4.rt_genid);
+	return atomic_read_unchecked(&net->ipv4.rt_genid);
 }
 
 static inline void rt_genid_bump_ipv4(struct net *net)
 {
-	atomic_inc(&net->ipv4.rt_genid);
+	atomic_inc_unchecked(&net->ipv4.rt_genid);
 }
 
 #if IS_ENABLED(CONFIG_IPV6)
 static inline int rt_genid_ipv6(struct net *net)
 {
-	return atomic_read(&net->ipv6.rt_genid);
+	return atomic_read_unchecked(&net->ipv6.rt_genid);
 }
 
 static inline void rt_genid_bump_ipv6(struct net *net)
 {
-	atomic_inc(&net->ipv6.rt_genid);
+	atomic_inc_unchecked(&net->ipv6.rt_genid);
 }
 #else
 static inline int rt_genid_ipv6(struct net *net)
@@ -377,12 +381,12 @@ static inline void rt_genid_bump_all(str
 
 static inline int fnhe_genid(struct net *net)
 {
-	return atomic_read(&net->fnhe_genid);
+	return atomic_read_unchecked(&net->fnhe_genid);
 }
 
 static inline void fnhe_genid_bump(struct net *net)
 {
-	atomic_inc(&net->fnhe_genid);
+	atomic_inc_unchecked(&net->fnhe_genid);
 }
 
 #endif /* __NET_NET_NAMESPACE_H */
diff -ruNp linux-3.13.11/include/net/netdma.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/netdma.h
--- linux-3.13.11/include/net/netdma.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/netdma.h	2014-07-09 12:00:15.000000000
+0200
@@ -24,7 +24,7 @@
 #include <linux/dmaengine.h>
 #include <linux/skbuff.h>
 
-int dma_skb_copy_datagram_iovec(struct dma_chan* chan,
+int __intentional_overflow(3,5) dma_skb_copy_datagram_iovec(struct dma_chan* chan,
 		struct sk_buff *skb, int offset, struct iovec *to,
 		size_t len, struct dma_pinned_list *pinned_list);
 
diff -ruNp linux-3.13.11/include/net/netfilter/nf_conntrack_extend.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/netfilter/nf_conntrack_extend.h
--- linux-3.13.11/include/net/netfilter/nf_conntrack_extend.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/netfilter/nf_conntrack_extend.h	2014-07-09
12:00:15.000000000 +0200
@@ -47,8 +47,8 @@ enum nf_ct_ext_id {
 /* Extensions: optional stuff which isn't permanently in struct. */
 struct nf_ct_ext {
 	struct rcu_head rcu;
-	u8 offset[NF_CT_EXT_NUM];
-	u8 len;
+	u16 offset[NF_CT_EXT_NUM];
+	u16 len;
 	char data[0];
 };
 
diff -ruNp linux-3.13.11/include/net/netlink.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/netlink.h
--- linux-3.13.11/include/net/netlink.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/netlink.h	2014-07-09
12:00:15.000000000 +0200
@@ -521,7 +521,7 @@ static inline void *nlmsg_get_pos(struct
 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
 {
 	if (mark)
-		skb_trim(skb, (unsigned char *) mark - skb->data);
+		skb_trim(skb, (const unsigned char *) mark - skb->data);
 }
 
 /**
diff -ruNp linux-3.13.11/include/net/netns/conntrack.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/netns/conntrack.h
--- linux-3.13.11/include/net/netns/conntrack.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/netns/conntrack.h	2014-07-09
12:00:15.000000000 +0200
@@ -12,10 +12,10 @@ struct nf_conntrack_ecache;
 struct nf_proto_net {
 #ifdef CONFIG_SYSCTL
 	struct ctl_table_header *ctl_table_header;
-	struct ctl_table        *ctl_table;
+	ctl_table_no_const      *ctl_table;
 #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
 	struct ctl_table_header *ctl_compat_header;
-	struct ctl_table        *ctl_compat_table;
+	ctl_table_no_const      *ctl_compat_table;
 #endif
 #endif
 	unsigned int		users;
@@ -58,7 +58,7 @@ struct nf_ip_net {
 	struct nf_icmp_net	icmpv6;
 #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
 	struct ctl_table_header *ctl_table_header;
-	struct ctl_table	*ctl_table;
+	ctl_table_no_const	*ctl_table;
 #endif
 };
 
diff -ruNp linux-3.13.11/include/net/netns/ipv4.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/netns/ipv4.h
--- linux-3.13.11/include/net/netns/ipv4.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/netns/ipv4.h	2014-07-09
12:00:15.000000000 +0200
@@ -72,7 +72,7 @@ struct netns_ipv4 {
 
 	kgid_t sysctl_ping_group_range[2];
 
-	atomic_t dev_addr_genid;
+	atomic_unchecked_t dev_addr_genid;
 
 #ifdef CONFIG_IP_MROUTE
 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
@@ -82,6 +82,6 @@ struct netns_ipv4 {
 	struct fib_rules_ops	*mr_rules_ops;
 #endif
 #endif
-	atomic_t	rt_genid;
+	atomic_unchecked_t	rt_genid;
 };
 #endif
diff -ruNp linux-3.13.11/include/net/netns/ipv6.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/netns/ipv6.h
--- linux-3.13.11/include/net/netns/ipv6.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/netns/ipv6.h	2014-07-09
12:00:15.000000000 +0200
@@ -71,8 +71,8 @@ struct netns_ipv6 {
 	struct fib_rules_ops	*mr6_rules_ops;
 #endif
 #endif
-	atomic_t		dev_addr_genid;
-	atomic_t		rt_genid;
+	atomic_unchecked_t	dev_addr_genid;
+	atomic_unchecked_t	rt_genid;
 };
 
 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
diff -ruNp linux-3.13.11/include/net/ping.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/ping.h
--- linux-3.13.11/include/net/ping.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/ping.h	2014-07-09 12:00:15.000000000
+0200
@@ -56,7 +56,7 @@ struct ping_iter_state {
 extern struct proto ping_prot;
 extern struct ping_table ping_table;
 #if IS_ENABLED(CONFIG_IPV6)
-extern struct pingv6_ops pingv6_ops;
+extern struct pingv6_ops *pingv6_ops;
 #endif
 
 struct pingfakehdr {
diff -ruNp linux-3.13.11/include/net/protocol.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/protocol.h
--- linux-3.13.11/include/net/protocol.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/protocol.h	2014-07-09
12:00:15.000000000 +0200
@@ -44,7 +44,7 @@ struct net_protocol {
 	void			(*err_handler)(struct sk_buff *skb, u32 info);
 	unsigned int		no_policy:1,
 				netns_ok:1;
-};
+} __do_const;
 
 #if IS_ENABLED(CONFIG_IPV6)
 struct inet6_protocol {
@@ -57,7 +57,7 @@ struct inet6_protocol {
 			       u8 type, u8 code, int offset,
 			       __be32 info);
 	unsigned int	flags;	/* INET6_PROTO_xxx */
-};
+} __do_const;
 
 #define INET6_PROTO_NOPOLICY	0x1
 #define INET6_PROTO_FINAL	0x2
diff -ruNp linux-3.13.11/include/net/route.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/route.h
--- linux-3.13.11/include/net/route.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/route.h	2014-07-09 12:00:15.000000000
+0200
@@ -203,6 +203,9 @@ static inline void ip_rt_put(struct rtab
 	dst_release(&rt->dst);
 }
 
+#include <linux/vs_base.h>
+#include <linux/vs_inet.h>
+
 #define IPTOS_RT_MASK	(IPTOS_TOS_MASK & ~3)
 
 extern const __u8 ip_tos2prio[16];
@@ -252,6 +255,9 @@ static inline void ip_route_connect_init
 			   protocol, flow_flags, dst, src, dport, sport);
 }
 
+extern struct rtable *ip_v4_find_src(struct net *net, struct nx_info *,
+	struct flowi4 *);
+
 static inline struct rtable *ip_route_connect(struct flowi4 *fl4,
 					      __be32 dst, __be32 src, u32 tos,
 					      int oif, u8 protocol,
@@ -260,11 +266,25 @@ static inline struct rtable *ip_route_co
 {
 	struct net *net = sock_net(sk);
 	struct rtable *rt;
+	struct nx_info *nx_info = current_nx_info();
 
 	ip_route_connect_init(fl4, dst, src, tos, oif, protocol,
 			      sport, dport, sk, can_sleep);
 
-	if (!dst || !src) {
+	if (sk)
+		nx_info = sk->sk_nx_info;
+
+	vxdprintk(VXD_CBIT(net, 4),
+		"ip_route_connect(%p) %p,%p;%lx",
+		sk, nx_info, sk->sk_socket,
+		(sk->sk_socket?sk->sk_socket->flags:0));
+
+	rt = ip_v4_find_src(net, nx_info, fl4);
+	if (IS_ERR(rt))
+		return rt;
+	ip_rt_put(rt);
+
+	if (!fl4->daddr || !fl4->saddr) {
 		rt = __ip_route_output_key(net, fl4);
 		if (IS_ERR(rt))
 			return rt;
diff -ruNp linux-3.13.11/include/net/rtnetlink.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/rtnetlink.h
--- linux-3.13.11/include/net/rtnetlink.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/rtnetlink.h	2014-07-09
12:00:15.000000000 +0200
@@ -79,7 +79,7 @@ struct rtnl_link_ops {
 					       const struct net_device *dev);
 	unsigned int		(*get_num_tx_queues)(void);
 	unsigned int		(*get_num_rx_queues)(void);
-};
+} __do_const;
 
 int __rtnl_link_register(struct rtnl_link_ops *ops);
 void __rtnl_link_unregister(struct rtnl_link_ops *ops);
diff -ruNp linux-3.13.11/include/net/sctp/checksum.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/sctp/checksum.h
--- linux-3.13.11/include/net/sctp/checksum.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/sctp/checksum.h	2014-07-09
12:00:15.000000000 +0200
@@ -62,8 +62,8 @@ static inline __le32 sctp_compute_cksum(
 					unsigned int offset)
 {
 	struct sctphdr *sh = sctp_hdr(skb);
-        __le32 ret, old = sh->checksum;
-	const struct skb_checksum_ops ops = {
+	__le32 ret, old = sh->checksum;
+	static const struct skb_checksum_ops ops = {
 		.update  = sctp_csum_update,
 		.combine = sctp_csum_combine,
 	};
diff -ruNp linux-3.13.11/include/net/sctp/sm.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/sctp/sm.h
--- linux-3.13.11/include/net/sctp/sm.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/sctp/sm.h	2014-07-09
12:00:15.000000000 +0200
@@ -81,7 +81,7 @@ typedef void (sctp_timer_event_t) (unsig
 typedef struct {
 	sctp_state_fn_t *fn;
 	const char *name;
-} sctp_sm_table_entry_t;
+} __do_const sctp_sm_table_entry_t;
 
 /* A naming convention of "sctp_sf_xxx" applies to all the state functions
  * currently in use.
@@ -293,7 +293,7 @@ __u32 sctp_generate_tag(const struct sct
 __u32 sctp_generate_tsn(const struct sctp_endpoint *);
 
 /* Extern declarations for major data structures.  */
-extern sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
+extern sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
 
 
 /* Get the size of a DATA chunk payload. */
diff -ruNp linux-3.13.11/include/net/sctp/structs.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/sctp/structs.h
--- linux-3.13.11/include/net/sctp/structs.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/sctp/structs.h	2014-07-09
12:00:15.000000000 +0200
@@ -508,7 +508,7 @@ struct sctp_pf {
 					  struct sctp_association *asoc);
 	void (*addr_v4map) (struct sctp_sock *, union sctp_addr *);
 	struct sctp_af *af;
-};
+} __do_const;
 
 
 /* Structure to track chunk fragments that have been acked, but peer
diff -ruNp linux-3.13.11/include/net/sock.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/sock.h
--- linux-3.13.11/include/net/sock.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/sock.h	2014-07-09 12:00:15.000000000
+0200
@@ -191,6 +191,10 @@ struct sock_common {
 #ifdef CONFIG_NET_NS
 	struct net	 	*skc_net;
 #endif
+	vxid_t			skc_xid;
+	struct vx_info		*skc_vx_info;
+	vnid_t			skc_nid;
+	struct nx_info		*skc_nx_info;
 
 #if IS_ENABLED(CONFIG_IPV6)
 	struct in6_addr		skc_v6_daddr;
@@ -321,7 +325,11 @@ struct sock {
 #define sk_prot			__sk_common.skc_prot
 #define sk_net			__sk_common.skc_net
 #define sk_v6_daddr		__sk_common.skc_v6_daddr
-#define sk_v6_rcv_saddr	__sk_common.skc_v6_rcv_saddr
+#define sk_v6_rcv_saddr		__sk_common.skc_v6_rcv_saddr
+#define sk_xid			__sk_common.skc_xid
+#define sk_vx_info		__sk_common.skc_vx_info
+#define sk_nid			__sk_common.skc_nid
+#define sk_nx_info		__sk_common.skc_nx_info
 
 	socket_lock_t		sk_lock;
 	struct sk_buff_head	sk_receive_queue;
@@ -348,7 +356,7 @@ struct sock {
 	unsigned int		sk_napi_id;
 	unsigned int		sk_ll_usec;
 #endif
-	atomic_t		sk_drops;
+	atomic_unchecked_t	sk_drops;
 	int			sk_rcvbuf;
 
 	struct sk_filter __rcu	*sk_filter;
@@ -1022,7 +1030,7 @@ struct proto {
 	void			(*destroy_cgroup)(struct mem_cgroup *memcg);
 	struct cg_proto		*(*proto_cgroup)(struct mem_cgroup *memcg);
 #endif
-};
+} __randomize_layout;
 
 /*
  * Bits in struct cg_proto.flags
@@ -1209,7 +1217,7 @@ static inline u64 memcg_memory_allocated
 	return ret >> PAGE_SHIFT;
 }
 
-static inline long
+static inline long __intentional_overflow(-1)
 sk_memory_allocated(const struct sock *sk)
 {
 	struct proto *prot = sk->sk_prot;
@@ -1354,7 +1362,7 @@ struct sock_iocb {
 	struct scm_cookie	*scm;
 	struct msghdr		*msg, async_msg;
 	struct kiocb		*kiocb;
-};
+} __randomize_layout;
 
 static inline struct sock_iocb *kiocb_to_siocb(struct kiocb *iocb)
 {
@@ -1818,7 +1826,7 @@ static inline void sk_nocaps_add(struct
 }
 
 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
-					   char __user *from, char *to,
+					   char __user *from, unsigned char *to,
 					   int copy, int offset)
 {
 	if (skb->ip_summed == CHECKSUM_NONE) {
@@ -2080,7 +2088,7 @@ static inline void sk_stream_moderate_sn
 	}
 }
 
-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
+struct sk_buff * __intentional_overflow(0) sk_stream_alloc_skb(struct sock *sk, int
size, gfp_t gfp);
 
 /**
  * sk_page_frag - return an appropriate page_frag
diff -ruNp linux-3.13.11/include/net/tcp.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/tcp.h
--- linux-3.13.11/include/net/tcp.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/tcp.h	2014-07-09 12:00:15.000000000
+0200
@@ -541,7 +541,7 @@ void tcp_retransmit_timer(struct sock *s
 void tcp_xmit_retransmit_queue(struct sock *);
 void tcp_simple_retransmit(struct sock *);
 int tcp_trim_head(struct sock *, struct sk_buff *, u32);
-int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
+int __intentional_overflow(3) tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned
int);
 
 void tcp_send_probe0(struct sock *);
 void tcp_send_partial(struct sock *);
@@ -712,8 +712,8 @@ struct tcp_skb_cb {
 		struct inet6_skb_parm	h6;
 #endif
 	} header;	/* For incoming frames		*/
-	__u32		seq;		/* Starting sequence number	*/
-	__u32		end_seq;	/* SEQ + FIN + SYN + datalen	*/
+	__u32		seq __intentional_overflow(0);	/* Starting sequence number	*/
+	__u32		end_seq __intentional_overflow(0);	/* SEQ + FIN + SYN + datalen	*/
 	__u32		when;		/* used to compute rtt's	*/
 	__u8		tcp_flags;	/* TCP header flags. (tcp[13])	*/
 
@@ -727,7 +727,7 @@ struct tcp_skb_cb {
 
 	__u8		ip_dsfield;	/* IPv4 tos or IPv6 dsfield	*/
 	/* 1 byte hole */
-	__u32		ack_seq;	/* Sequence number ACK'd	*/
+	__u32		ack_seq __intentional_overflow(0);	/* Sequence number ACK'd	*/
 };
 
 #define TCP_SKB_CB(__skb)	((struct tcp_skb_cb *)&((__skb)->cb[0]))
diff -ruNp linux-3.13.11/include/net/xfrm.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/xfrm.h
--- linux-3.13.11/include/net/xfrm.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/net/xfrm.h	2014-07-09 12:00:15.000000000
+0200
@@ -287,7 +287,6 @@ struct xfrm_dst;
 struct xfrm_policy_afinfo {
 	unsigned short		family;
 	struct dst_ops		*dst_ops;
-	void			(*garbage_collect)(struct net *net);
 	struct dst_entry	*(*dst_lookup)(struct net *net, int tos,
 					       const xfrm_address_t *saddr,
 					       const xfrm_address_t *daddr);
@@ -305,7 +304,7 @@ struct xfrm_policy_afinfo {
 					    struct net_device *dev,
 					    const struct flowi *fl);
 	struct dst_entry	*(*blackhole_route)(struct net *net, struct dst_entry *orig);
-};
+} __do_const;
 
 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
@@ -344,7 +343,7 @@ struct xfrm_state_afinfo {
 	int			(*transport_finish)(struct sk_buff *skb,
 						    int async);
 	void			(*local_error)(struct sk_buff *skb, u32 mtu);
-};
+} __do_const;
 
 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
@@ -429,7 +428,7 @@ struct xfrm_mode {
 	struct module *owner;
 	unsigned int encap;
 	int flags;
-};
+} __do_const;
 
 /* Flags for xfrm_mode. */
 enum {
@@ -526,7 +525,7 @@ struct xfrm_policy {
 	struct timer_list	timer;
 
 	struct flow_cache_object flo;
-	atomic_t		genid;
+	atomic_unchecked_t	genid;
 	u32			priority;
 	u32			index;
 	struct xfrm_mark	mark;
@@ -1166,6 +1165,7 @@ static inline void xfrm_sk_free_policy(s
 }
 
 void xfrm_garbage_collect(struct net *net);
+void xfrm_garbage_collect_deferred(struct net *net);
 
 #else
 
@@ -1204,6 +1204,9 @@ static inline int xfrm6_policy_check_rev
 static inline void xfrm_garbage_collect(struct net *net)
 {
 }
+static inline void xfrm_garbage_collect_deferred(struct net *net)
+{
+}
 #endif
 
 static __inline__
diff -ruNp linux-3.13.11/include/rdma/iw_cm.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/rdma/iw_cm.h
--- linux-3.13.11/include/rdma/iw_cm.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/rdma/iw_cm.h	2014-07-09 12:00:15.000000000
+0200
@@ -122,7 +122,7 @@ struct iw_cm_verbs {
 					 int backlog);
 
 	int		(*destroy_listen)(struct iw_cm_id *cm_id);
-};
+} __no_const;
 
 /**
  * iw_create_cm_id - Create an IW CM identifier.
diff -ruNp linux-3.13.11/include/scsi/libfc.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/scsi/libfc.h
--- linux-3.13.11/include/scsi/libfc.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/scsi/libfc.h	2014-07-09 12:00:15.000000000
+0200
@@ -771,6 +771,7 @@ struct libfc_function_template {
 	 */
 	void (*disc_stop_final) (struct fc_lport *);
 };
+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
 
 /**
  * struct fc_disc - Discovery context
@@ -875,7 +876,7 @@ struct fc_lport {
 	struct fc_vport		       *vport;
 
 	/* Operational Information */
-	struct libfc_function_template tt;
+	libfc_function_template_no_const tt;
 	u8			       link_up;
 	u8			       qfull;
 	enum fc_lport_state	       state;
diff -ruNp linux-3.13.11/include/scsi/scsi_device.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/scsi/scsi_device.h
--- linux-3.13.11/include/scsi/scsi_device.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/scsi/scsi_device.h	2014-07-09
12:00:15.000000000 +0200
@@ -180,9 +180,9 @@ struct scsi_device {
 	unsigned int max_device_blocked; /* what device_blocked counts down from  */
 #define SCSI_DEFAULT_DEVICE_BLOCKED	3
 
-	atomic_t iorequest_cnt;
-	atomic_t iodone_cnt;
-	atomic_t ioerr_cnt;
+	atomic_unchecked_t iorequest_cnt;
+	atomic_unchecked_t iodone_cnt;
+	atomic_unchecked_t ioerr_cnt;
 
 	struct device		sdev_gendev,
 				sdev_dev;
diff -ruNp linux-3.13.11/include/scsi/scsi_transport_fc.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/scsi/scsi_transport_fc.h
--- linux-3.13.11/include/scsi/scsi_transport_fc.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/scsi/scsi_transport_fc.h	2014-07-09
12:00:15.000000000 +0200
@@ -751,7 +751,8 @@ struct fc_function_template {
 	unsigned long	show_host_system_hostname:1;
 
 	unsigned long	disable_target_scan:1;
-};
+} __do_const;
+typedef struct fc_function_template __no_const fc_function_template_no_const;
 
 
 /**
diff -ruNp linux-3.13.11/include/sound/compress_driver.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/sound/compress_driver.h
--- linux-3.13.11/include/sound/compress_driver.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/sound/compress_driver.h	2014-07-09
12:00:15.000000000 +0200
@@ -128,7 +128,7 @@ struct snd_compr_ops {
 			struct snd_compr_caps *caps);
 	int (*get_codec_caps) (struct snd_compr_stream *stream,
 			struct snd_compr_codec_caps *codec);
-};
+} __no_const;
 
 /**
  * struct snd_compr: Compressed device
diff -ruNp linux-3.13.11/include/sound/soc.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/sound/soc.h
--- linux-3.13.11/include/sound/soc.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/sound/soc.h	2014-07-09 12:00:15.000000000
+0200
@@ -763,7 +763,7 @@ struct snd_soc_codec_driver {
 	/* probe ordering - for components with runtime dependencies */
 	int probe_order;
 	int remove_order;
-};
+} __do_const;
 
 /* SoC platform interface */
 struct snd_soc_platform_driver {
@@ -809,7 +809,7 @@ struct snd_soc_platform_driver {
 	unsigned int (*read)(struct snd_soc_platform *, unsigned int);
 	int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
 	int (*bespoke_trigger)(struct snd_pcm_substream *, int);
-};
+} __do_const;
 
 struct snd_soc_platform {
 	const char *name;
diff -ruNp linux-3.13.11/include/target/target_core_base.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/target/target_core_base.h
--- linux-3.13.11/include/target/target_core_base.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/target/target_core_base.h	2014-07-09
12:00:15.000000000 +0200
@@ -687,7 +687,7 @@ struct se_device {
 	atomic_long_t		write_bytes;
 	/* Active commands on this virtual SE device */
 	atomic_t		simple_cmds;
-	atomic_t		dev_ordered_id;
+	atomic_unchecked_t	dev_ordered_id;
 	atomic_t		dev_ordered_sync;
 	atomic_t		dev_qf_count;
 	int			export_count;
diff -ruNp linux-3.13.11/include/trace/events/fs.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/trace/events/fs.h
--- linux-3.13.11/include/trace/events/fs.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/trace/events/fs.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,53 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM fs
+
+#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_FS_H
+
+#include <linux/fs.h>
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(do_sys_open,
+
+	TP_PROTO(const char *filename, int flags, int mode),
+
+	TP_ARGS(filename, flags, mode),
+
+	TP_STRUCT__entry(
+		__string(	filename, filename		)
+		__field(	int, flags			)
+		__field(	int, mode			)
+	),
+
+	TP_fast_assign(
+		__assign_str(filename, filename);
+		__entry->flags = flags;
+		__entry->mode = mode;
+	),
+
+	TP_printk("\"%s\" %x %o",
+		  __get_str(filename), __entry->flags, __entry->mode)
+);
+
+TRACE_EVENT(open_exec,
+
+	TP_PROTO(const char *filename),
+
+	TP_ARGS(filename),
+
+	TP_STRUCT__entry(
+		__string(	filename, filename		)
+	),
+
+	TP_fast_assign(
+		__assign_str(filename, filename);
+	),
+
+	TP_printk("\"%s\"",
+		  __get_str(filename))
+);
+
+#endif /* _TRACE_FS_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff -ruNp linux-3.13.11/include/trace/events/irq.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/trace/events/irq.h
--- linux-3.13.11/include/trace/events/irq.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/trace/events/irq.h	2014-07-09
12:00:15.000000000 +0200
@@ -36,7 +36,7 @@ struct softirq_action;
  */
 TRACE_EVENT(irq_handler_entry,
 
-	TP_PROTO(int irq, struct irqaction *action),
+	TP_PROTO(int irq, const struct irqaction *action),
 
 	TP_ARGS(irq, action),
 
@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
  */
 TRACE_EVENT(irq_handler_exit,
 
-	TP_PROTO(int irq, struct irqaction *action, int ret),
+	TP_PROTO(int irq, const struct irqaction *action, int ret),
 
 	TP_ARGS(irq, action, ret),
 
diff -ruNp linux-3.13.11/include/uapi/Kbuild linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/Kbuild
--- linux-3.13.11/include/uapi/Kbuild	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/Kbuild	2014-07-09 12:00:15.000000000
+0200
@@ -12,3 +12,4 @@ header-y += video/
 header-y += drm/
 header-y += xen/
 header-y += scsi/
+header-y += vserver/
diff -ruNp linux-3.13.11/include/uapi/linux/a.out.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/linux/a.out.h
--- linux-3.13.11/include/uapi/linux/a.out.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/linux/a.out.h	2014-07-09
12:00:15.000000000 +0200
@@ -39,6 +39,14 @@ enum machine_type {
   M_MIPS2 = 152		/* MIPS R6000/R4000 binary */
 };
 
+/* Constants for the N_FLAGS field */
+#define F_PAX_PAGEEXEC	1	/* Paging based non-executable pages */
+#define F_PAX_EMUTRAMP	2	/* Emulate trampolines */
+#define F_PAX_MPROTECT	4	/* Restrict mprotect() */
+#define F_PAX_RANDMMAP	8	/* Randomize mmap() base */
+/*#define F_PAX_RANDEXEC	16*/	/* Randomize ET_EXEC base */
+#define F_PAX_SEGMEXEC	32	/* Segmentation based non-executable pages */
+
 #if !defined (N_MAGIC)
 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
 #endif
diff -ruNp linux-3.13.11/include/uapi/linux/bcache.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/linux/bcache.h
--- linux-3.13.11/include/uapi/linux/bcache.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/linux/bcache.h	2014-07-09
12:00:15.000000000 +0200
@@ -5,6 +5,7 @@
  * Bcache on disk data structures
  */
 
+#include <linux/compiler.h>
 #include <asm/types.h>
 
 #define BITMASK(name, type, field, offset, size)		\
@@ -20,8 +21,8 @@ static inline void SET_##name(type *k, _
 /* Btree keys - all units are in sectors */
 
 struct bkey {
-	__u64	high;
-	__u64	low;
+	__u64	high __intentional_overflow(-1);
+	__u64	low __intentional_overflow(-1);
 	__u64	ptr[];
 };
 
diff -ruNp linux-3.13.11/include/uapi/linux/byteorder/little_endian.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/linux/byteorder/little_endian.h
--- linux-3.13.11/include/uapi/linux/byteorder/little_endian.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/linux/byteorder/little_endian.h	2014-07-09
12:00:15.000000000 +0200
@@ -42,51 +42,51 @@
 
 static inline __le64 __cpu_to_le64p(const __u64 *p)
 {
-	return (__force __le64)*p;
+	return (__force const __le64)*p;
 }
-static inline __u64 __le64_to_cpup(const __le64 *p)
+static inline __u64 __intentional_overflow(-1) __le64_to_cpup(const __le64 *p)
 {
-	return (__force __u64)*p;
+	return (__force const __u64)*p;
 }
 static inline __le32 __cpu_to_le32p(const __u32 *p)
 {
-	return (__force __le32)*p;
+	return (__force const __le32)*p;
 }
 static inline __u32 __le32_to_cpup(const __le32 *p)
 {
-	return (__force __u32)*p;
+	return (__force const __u32)*p;
 }
 static inline __le16 __cpu_to_le16p(const __u16 *p)
 {
-	return (__force __le16)*p;
+	return (__force const __le16)*p;
 }
 static inline __u16 __le16_to_cpup(const __le16 *p)
 {
-	return (__force __u16)*p;
+	return (__force const __u16)*p;
 }
 static inline __be64 __cpu_to_be64p(const __u64 *p)
 {
-	return (__force __be64)__swab64p(p);
+	return (__force const __be64)__swab64p(p);
 }
 static inline __u64 __be64_to_cpup(const __be64 *p)
 {
-	return __swab64p((__u64 *)p);
+	return __swab64p((const __u64 *)p);
 }
 static inline __be32 __cpu_to_be32p(const __u32 *p)
 {
-	return (__force __be32)__swab32p(p);
+	return (__force const __be32)__swab32p(p);
 }
-static inline __u32 __be32_to_cpup(const __be32 *p)
+static inline __u32 __intentional_overflow(-1) __be32_to_cpup(const __be32 *p)
 {
-	return __swab32p((__u32 *)p);
+	return __swab32p((const __u32 *)p);
 }
 static inline __be16 __cpu_to_be16p(const __u16 *p)
 {
-	return (__force __be16)__swab16p(p);
+	return (__force const __be16)__swab16p(p);
 }
 static inline __u16 __be16_to_cpup(const __be16 *p)
 {
-	return __swab16p((__u16 *)p);
+	return __swab16p((const __u16 *)p);
 }
 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
 #define __le64_to_cpus(x) do { (void)(x); } while (0)
diff -ruNp linux-3.13.11/include/uapi/linux/capability.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/linux/capability.h
--- linux-3.13.11/include/uapi/linux/capability.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/linux/capability.h	2014-07-09
12:00:15.000000000 +0200
@@ -259,6 +259,7 @@ struct vfs_cap_data {
    arbitrary SCSI commands */
 /* Allow setting encryption key on loopback filesystem */
 /* Allow setting zone reclaim policy */
+/* Allow the selection of a security context */
 
 #define CAP_SYS_ADMIN        21
 
@@ -345,7 +346,12 @@ struct vfs_cap_data {
 
 #define CAP_LAST_CAP         CAP_BLOCK_SUSPEND
 
-#define cap_valid(x) ((x) >= 0 && (x) <= CAP_LAST_CAP)
+/* Allow context manipulations */
+/* Allow changing context info on files */
+
+#define CAP_CONTEXT	     63
+
+#define cap_valid(x) ((x) >= 0 && ((x) <= CAP_LAST_CAP || (x) == CAP_CONTEXT))
 
 /*
  * Bit location of each capability (used by user-space library and kernel)
diff -ruNp linux-3.13.11/include/uapi/linux/elf.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/linux/elf.h
--- linux-3.13.11/include/uapi/linux/elf.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/linux/elf.h	2014-07-09
12:00:15.000000000 +0200
@@ -37,6 +37,17 @@ typedef __s64	Elf64_Sxword;
 #define PT_GNU_EH_FRAME		0x6474e550
 
 #define PT_GNU_STACK	(PT_LOOS + 0x474e551)
+#define PT_GNU_RELRO	(PT_LOOS + 0x474e552)
+
+#define PT_PAX_FLAGS	(PT_LOOS + 0x5041580)
+
+/* Constants for the e_flags field */
+#define EF_PAX_PAGEEXEC		1	/* Paging based non-executable pages */
+#define EF_PAX_EMUTRAMP		2	/* Emulate trampolines */
+#define EF_PAX_MPROTECT		4	/* Restrict mprotect() */
+#define EF_PAX_RANDMMAP		8	/* Randomize mmap() base */
+/*#define EF_PAX_RANDEXEC		16*/	/* Randomize ET_EXEC base */
+#define EF_PAX_SEGMEXEC		32	/* Segmentation based non-executable pages */
 
 /*
  * Extended Numbering
@@ -94,6 +105,8 @@ typedef __s64	Elf64_Sxword;
 #define DT_DEBUG	21
 #define DT_TEXTREL	22
 #define DT_JMPREL	23
+#define DT_FLAGS	30
+  #define DF_TEXTREL  0x00000004
 #define DT_ENCODING	32
 #define OLD_DT_LOOS	0x60000000
 #define DT_LOOS		0x6000000d
@@ -240,6 +253,19 @@ typedef struct elf64_hdr {
 #define PF_W		0x2
 #define PF_X		0x1
 
+#define PF_PAGEEXEC	(1U << 4)	/* Enable  PAGEEXEC */
+#define PF_NOPAGEEXEC	(1U << 5)	/* Disable PAGEEXEC */
+#define PF_SEGMEXEC	(1U << 6)	/* Enable  SEGMEXEC */
+#define PF_NOSEGMEXEC	(1U << 7)	/* Disable SEGMEXEC */
+#define PF_MPROTECT	(1U << 8)	/* Enable  MPROTECT */
+#define PF_NOMPROTECT	(1U << 9)	/* Disable MPROTECT */
+/*#define PF_RANDEXEC	(1U << 10)*/	/* Enable  RANDEXEC */
+/*#define PF_NORANDEXEC	(1U << 11)*/	/* Disable RANDEXEC */
+#define PF_EMUTRAMP	(1U << 12)	/* Enable  EMUTRAMP */
+#define PF_NOEMUTRAMP	(1U << 13)	/* Disable EMUTRAMP */
+#define PF_RANDMMAP	(1U << 14)	/* Enable  RANDMMAP */
+#define PF_NORANDMMAP	(1U << 15)	/* Disable RANDMMAP */
+
 typedef struct elf32_phdr{
   Elf32_Word	p_type;
   Elf32_Off	p_offset;
@@ -332,6 +358,8 @@ typedef struct elf64_shdr {
 #define	EI_OSABI	7
 #define	EI_PAD		8
 
+#define	EI_PAX		14
+
 #define	ELFMAG0		0x7f		/* EI_MAG */
 #define	ELFMAG1		'E'
 #define	ELFMAG2		'L'
diff -ruNp linux-3.13.11/include/uapi/linux/fs.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/linux/fs.h
--- linux-3.13.11/include/uapi/linux/fs.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/linux/fs.h	2014-07-09
12:00:15.000000000 +0200
@@ -86,6 +86,9 @@ struct inodes_stat_t {
 #define MS_KERNMOUNT	(1<<22) /* this is a kern_mount call */
 #define MS_I_VERSION	(1<<23) /* Update inode I_version field */
 #define MS_STRICTATIME	(1<<24) /* Always perform atime updates */
+#define MS_TAGGED	(1<<8)	/* use generic inode tagging */
+#define MS_NOTAGCHECK	(1<<9)	/* don't check tags */
+#define MS_TAGID	(1<<25) /* use specific tag for this mount */
 
 /* These sb flags are internal to the kernel */
 #define MS_NOSEC	(1<<28)
@@ -191,11 +194,14 @@ struct inodes_stat_t {
 #define FS_EXTENT_FL			0x00080000 /* Extents */
 #define FS_DIRECTIO_FL			0x00100000 /* Use direct i/o */
 #define FS_NOCOW_FL			0x00800000 /* Do not cow file */
+#define FS_IXUNLINK_FL			0x08000000 /* Immutable invert on unlink */
 #define FS_RESERVED_FL			0x80000000 /* reserved for ext2 lib */
 
-#define FS_FL_USER_VISIBLE		0x0003DFFF /* User visible flags */
-#define FS_FL_USER_MODIFIABLE		0x000380FF /* User modifiable flags */
+#define FS_BARRIER_FL			0x04000000 /* Barrier for chroot() */
+#define FS_COW_FL			0x20000000 /* Copy on Write marker */
 
+#define FS_FL_USER_VISIBLE		0x0103DFFF /* User visible flags */
+#define FS_FL_USER_MODIFIABLE		0x010380FF /* User modifiable flags */
 
 #define SYNC_FILE_RANGE_WAIT_BEFORE	1
 #define SYNC_FILE_RANGE_WRITE		2
diff -ruNp linux-3.13.11/include/uapi/linux/gfs2_ondisk.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/linux/gfs2_ondisk.h
--- linux-3.13.11/include/uapi/linux/gfs2_ondisk.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/linux/gfs2_ondisk.h	2014-07-09
12:00:15.000000000 +0200
@@ -225,6 +225,9 @@ enum {
 	gfs2fl_Sync		= 8,
 	gfs2fl_System		= 9,
 	gfs2fl_TopLevel		= 10,
+	gfs2fl_IXUnlink         = 16,
+	gfs2fl_Barrier          = 17,
+	gfs2fl_Cow              = 18,
 	gfs2fl_TruncInProg	= 29,
 	gfs2fl_InheritDirectio	= 30,
 	gfs2fl_InheritJdata	= 31,
@@ -242,6 +245,9 @@ enum {
 #define GFS2_DIF_SYNC			0x00000100
 #define GFS2_DIF_SYSTEM			0x00000200 /* New in gfs2 */
 #define GFS2_DIF_TOPDIR			0x00000400 /* New in gfs2 */
+#define GFS2_DIF_IXUNLINK               0x00010000
+#define GFS2_DIF_BARRIER                0x00020000
+#define GFS2_DIF_COW                    0x00040000
 #define GFS2_DIF_TRUNC_IN_PROG		0x20000000 /* New in gfs2 */
 #define GFS2_DIF_INHERIT_DIRECTIO	0x40000000 /* only in gfs1 */
 #define GFS2_DIF_INHERIT_JDATA		0x80000000
diff -ruNp linux-3.13.11/include/uapi/linux/if_tun.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/linux/if_tun.h
--- linux-3.13.11/include/uapi/linux/if_tun.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/linux/if_tun.h	2014-07-09
12:00:15.000000000 +0200
@@ -58,6 +58,7 @@
 #define TUNSETQUEUE  _IOW('T', 217, int)
 #define TUNSETIFINDEX	_IOW('T', 218, unsigned int)
 #define TUNGETFILTER _IOR('T', 219, struct sock_fprog)
+#define TUNSETNID     _IOW('T', 220, int)
 
 /* TUNSETIFF ifr flags */
 #define IFF_TUN		0x0001
diff -ruNp linux-3.13.11/include/uapi/linux/major.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/linux/major.h
--- linux-3.13.11/include/uapi/linux/major.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/linux/major.h	2014-07-09
12:00:15.000000000 +0200
@@ -15,6 +15,7 @@
 #define HD_MAJOR		IDE0_MAJOR
 #define PTY_SLAVE_MAJOR		3
 #define TTY_MAJOR		4
+#define VROOT_MAJOR		4
 #define TTYAUX_MAJOR		5
 #define LP_MAJOR		6
 #define VCS_MAJOR		7
diff -ruNp linux-3.13.11/include/uapi/linux/nfs_mount.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/linux/nfs_mount.h
--- linux-3.13.11/include/uapi/linux/nfs_mount.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/linux/nfs_mount.h	2014-07-09
12:00:15.000000000 +0200
@@ -63,7 +63,8 @@ struct nfs_mount_data {
 #define NFS_MOUNT_SECFLAVOUR	0x2000	/* 5 non-text parsed mount data only */
 #define NFS_MOUNT_NORDIRPLUS	0x4000	/* 5 */
 #define NFS_MOUNT_UNSHARED	0x8000	/* 5 */
-#define NFS_MOUNT_FLAGMASK	0xFFFF
+#define NFS_MOUNT_TAGGED	0x10000	/* context tagging */
+#define NFS_MOUNT_FLAGMASK	0x1FFFF
 
 /* The following are for internal use only */
 #define NFS_MOUNT_LOOKUP_CACHE_NONEG	0x10000
diff -ruNp linux-3.13.11/include/uapi/linux/personality.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/linux/personality.h
--- linux-3.13.11/include/uapi/linux/personality.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/linux/personality.h	2014-07-09
12:00:15.000000000 +0200
@@ -30,6 +30,7 @@ enum {
 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC  | \
 			    ADDR_NO_RANDOMIZE  | \
 			    ADDR_COMPAT_LAYOUT | \
+			    ADDR_LIMIT_3GB     | \
 			    MMAP_PAGE_ZERO)
 
 /*
diff -ruNp linux-3.13.11/include/uapi/linux/reboot.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/linux/reboot.h
--- linux-3.13.11/include/uapi/linux/reboot.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/linux/reboot.h	2014-07-09
12:00:15.000000000 +0200
@@ -33,7 +33,7 @@
 #define	LINUX_REBOOT_CMD_RESTART2	0xA1B2C3D4
 #define	LINUX_REBOOT_CMD_SW_SUSPEND	0xD000FCE2
 #define	LINUX_REBOOT_CMD_KEXEC		0x45584543
-
+#define	LINUX_REBOOT_CMD_OOM		0xDEADBEEF
 
 
 #endif /* _UAPI_LINUX_REBOOT_H */
diff -ruNp linux-3.13.11/include/uapi/linux/screen_info.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/linux/screen_info.h
--- linux-3.13.11/include/uapi/linux/screen_info.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/linux/screen_info.h	2014-07-09
12:00:15.000000000 +0200
@@ -43,7 +43,8 @@ struct screen_info {
 	__u16 pages;		/* 0x32 */
 	__u16 vesa_attributes;	/* 0x34 */
 	__u32 capabilities;     /* 0x36 */
-	__u8  _reserved[6];	/* 0x3a */
+	__u16 vesapm_size;	/* 0x3a */
+	__u8  _reserved[4];	/* 0x3c */
 } __attribute__((packed));
 
 #define VIDEO_TYPE_MDA		0x10	/* Monochrome Text Display	*/
diff -ruNp linux-3.13.11/include/uapi/linux/swab.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/linux/swab.h
--- linux-3.13.11/include/uapi/linux/swab.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/linux/swab.h	2014-07-09
12:00:15.000000000 +0200
@@ -43,7 +43,7 @@
  * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32
  */
 
-static inline __attribute_const__ __u16 __fswab16(__u16 val)
+static inline __intentional_overflow(-1) __attribute_const__ __u16 __fswab16(__u16
val)
 {
 #ifdef __HAVE_BUILTIN_BSWAP16__
 	return __builtin_bswap16(val);
@@ -54,7 +54,7 @@ static inline __attribute_const__ __u16
 #endif
 }
 
-static inline __attribute_const__ __u32 __fswab32(__u32 val)
+static inline __intentional_overflow(-1) __attribute_const__ __u32 __fswab32(__u32
val)
 {
 #ifdef __HAVE_BUILTIN_BSWAP32__
 	return __builtin_bswap32(val);
@@ -65,7 +65,7 @@ static inline __attribute_const__ __u32
 #endif
 }
 
-static inline __attribute_const__ __u64 __fswab64(__u64 val)
+static inline __intentional_overflow(-1) __attribute_const__ __u64 __fswab64(__u64
val)
 {
 #ifdef __HAVE_BUILTIN_BSWAP64__
 	return __builtin_bswap64(val);
diff -ruNp linux-3.13.11/include/uapi/linux/sysctl.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/linux/sysctl.h
--- linux-3.13.11/include/uapi/linux/sysctl.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/linux/sysctl.h	2014-07-09
12:00:15.000000000 +0200
@@ -60,6 +60,7 @@ enum
 	CTL_ABI=9,		/* Binary emulation */
 	CTL_CPU=10,		/* CPU stuff (speed scaling, etc) */
 	CTL_ARLAN=254,		/* arlan wireless driver */
+	CTL_VSERVER=4242,	/* Linux-VServer debug */
 	CTL_S390DBF=5677,	/* s390 debug */
 	CTL_SUNRPC=7249,	/* sunrpc debug */
 	CTL_PM=9899,		/* frv power management */
@@ -94,6 +95,7 @@ enum
 
 	KERN_PANIC=15,		/* int: panic timeout */
 	KERN_REALROOTDEV=16,	/* real root device to mount after initrd */
+	KERN_VSHELPER=17,	/* string: path to vshelper policy agent */
 
 	KERN_SPARC_REBOOT=21,	/* reboot command on Sparc */
 	KERN_CTLALTDEL=22,	/* int: allow ctl-alt-del to reboot */
@@ -155,8 +157,6 @@ enum
 	KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
 };
 
-
-
 /* CTL_VM names: */
 enum
 {
diff -ruNp linux-3.13.11/include/uapi/linux/videodev2.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/linux/videodev2.h
--- linux-3.13.11/include/uapi/linux/videodev2.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/linux/videodev2.h	2014-07-09
12:00:15.000000000 +0200
@@ -1227,7 +1227,7 @@ struct v4l2_ext_control {
 	union {
 		__s32 value;
 		__s64 value64;
-		char *string;
+		char __user *string;
 	};
 } __attribute__ ((packed));
 
diff -ruNp linux-3.13.11/include/uapi/linux/xattr.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/linux/xattr.h
--- linux-3.13.11/include/uapi/linux/xattr.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/linux/xattr.h	2014-07-09
12:00:15.000000000 +0200
@@ -63,5 +63,9 @@
 #define XATTR_POSIX_ACL_DEFAULT  "posix_acl_default"
 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
 
+/* User namespace */
+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
+#define XATTR_PAX_FLAGS_SUFFIX "flags"
+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
 
 #endif /* _UAPI_LINUX_XATTR_H */
diff -ruNp linux-3.13.11/include/uapi/vserver/Kbuild linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/vserver/Kbuild
--- linux-3.13.11/include/uapi/vserver/Kbuild	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/vserver/Kbuild	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,9 @@
+
+header-y += context_cmd.h network_cmd.h space_cmd.h \
+	cacct_cmd.h cvirt_cmd.h limit_cmd.h dlimit_cmd.h \
+	inode_cmd.h tag_cmd.h sched_cmd.h signal_cmd.h \
+	debug_cmd.h device_cmd.h
+
+header-y += switch.h context.h network.h monitor.h \
+	limit.h inode.h device.h
+
diff -ruNp linux-3.13.11/include/uapi/vserver/cacct_cmd.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/vserver/cacct_cmd.h
--- linux-3.13.11/include/uapi/vserver/cacct_cmd.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/vserver/cacct_cmd.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,15 @@
+#ifndef _UAPI_VS_CACCT_CMD_H
+#define _UAPI_VS_CACCT_CMD_H
+
+
+/* virtual host info name commands */
+
+#define VCMD_sock_stat		VC_CMD(VSTAT, 5, 0)
+
+struct	vcmd_sock_stat_v0 {
+	uint32_t field;
+	uint32_t count[3];
+	uint64_t total[3];
+};
+
+#endif /* _UAPI_VS_CACCT_CMD_H */
diff -ruNp linux-3.13.11/include/uapi/vserver/context.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/vserver/context.h
--- linux-3.13.11/include/uapi/vserver/context.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/vserver/context.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,81 @@
+#ifndef _UAPI_VS_CONTEXT_H
+#define _UAPI_VS_CONTEXT_H
+
+#include <linux/types.h>
+#include <linux/capability.h>
+
+
+/* context flags */
+
+#define VXF_INFO_SCHED		0x00000002
+#define VXF_INFO_NPROC		0x00000004
+#define VXF_INFO_PRIVATE	0x00000008
+
+#define VXF_INFO_INIT		0x00000010
+#define VXF_INFO_HIDE		0x00000020
+#define VXF_INFO_ULIMIT		0x00000040
+#define VXF_INFO_NSPACE		0x00000080
+
+#define VXF_SCHED_HARD		0x00000100
+#define VXF_SCHED_PRIO		0x00000200
+#define VXF_SCHED_PAUSE		0x00000400
+
+#define VXF_VIRT_MEM		0x00010000
+#define VXF_VIRT_UPTIME		0x00020000
+#define VXF_VIRT_CPU		0x00040000
+#define VXF_VIRT_LOAD		0x00080000
+#define VXF_VIRT_TIME		0x00100000
+
+#define VXF_HIDE_MOUNT		0x01000000
+/* was	VXF_HIDE_NETIF		0x02000000 */
+#define VXF_HIDE_VINFO		0x04000000
+
+#define VXF_STATE_SETUP		(1ULL << 32)
+#define VXF_STATE_INIT		(1ULL << 33)
+#define VXF_STATE_ADMIN		(1ULL << 34)
+
+#define VXF_SC_HELPER		(1ULL << 36)
+#define VXF_REBOOT_KILL		(1ULL << 37)
+#define VXF_PERSISTENT		(1ULL << 38)
+
+#define VXF_FORK_RSS		(1ULL << 48)
+#define VXF_PROLIFIC		(1ULL << 49)
+
+#define VXF_IGNEG_NICE		(1ULL << 52)
+
+#define VXF_ONE_TIME		(0x0007ULL << 32)
+
+#define VXF_INIT_SET		(VXF_STATE_SETUP | VXF_STATE_INIT | VXF_STATE_ADMIN)
+
+
+/* context migration */
+
+#define VXM_SET_INIT		0x00000001
+#define VXM_SET_REAPER		0x00000002
+
+/* context caps */
+
+#define VXC_SET_UTSNAME		0x00000001
+#define VXC_SET_RLIMIT		0x00000002
+#define VXC_FS_SECURITY		0x00000004
+#define VXC_FS_TRUSTED		0x00000008
+#define VXC_TIOCSTI		0x00000010
+
+/* was	VXC_RAW_ICMP		0x00000100 */
+#define VXC_SYSLOG		0x00001000
+#define VXC_OOM_ADJUST		0x00002000
+#define VXC_AUDIT_CONTROL	0x00004000
+
+#define VXC_SECURE_MOUNT	0x00010000
+/* #define VXC_SECURE_REMOUNT	0x00020000 */
+#define VXC_BINARY_MOUNT	0x00040000
+#define VXC_DEV_MOUNT		0x00080000
+
+#define VXC_QUOTA_CTL		0x00100000
+#define VXC_ADMIN_MAPPER	0x00200000
+#define VXC_ADMIN_CLOOP		0x00400000
+
+#define VXC_KTHREAD		0x01000000
+#define VXC_NAMESPACE		0x02000000
+
+#endif /* _UAPI_VS_CONTEXT_H */
diff -ruNp linux-3.13.11/include/uapi/vserver/context_cmd.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/vserver/context_cmd.h
--- linux-3.13.11/include/uapi/vserver/context_cmd.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/vserver/context_cmd.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,115 @@
+#ifndef _UAPI_VS_CONTEXT_CMD_H
+#define _UAPI_VS_CONTEXT_CMD_H
+
+
+/* vinfo commands */
+
+#define VCMD_task_xid		VC_CMD(VINFO, 1, 0)
+
+
+#define VCMD_vx_info		VC_CMD(VINFO, 5, 0)
+
+struct	vcmd_vx_info_v0 {
+	uint32_t xid;
+	uint32_t initpid;
+	/* more to come */
+};
+
+
+#define VCMD_ctx_stat		VC_CMD(VSTAT, 0, 0)
+
+struct	vcmd_ctx_stat_v0 {
+	uint32_t usecnt;
+	uint32_t tasks;
+	/* more to come */
+};
+
+
+/* context commands */
+
+#define VCMD_ctx_create_v0	VC_CMD(VPROC, 1, 0)
+#define VCMD_ctx_create		VC_CMD(VPROC, 1, 1)
+
+struct	vcmd_ctx_create {
+	uint64_t flagword;
+};
+
+#define VCMD_ctx_migrate_v0	VC_CMD(PROCMIG, 1, 0)
+#define VCMD_ctx_migrate	VC_CMD(PROCMIG, 1, 1)
+
+struct	vcmd_ctx_migrate {
+	uint64_t flagword;
+};
+
+
+
+/* flag commands */
+
+#define VCMD_get_cflags		VC_CMD(FLAGS, 1, 0)
+#define VCMD_set_cflags		VC_CMD(FLAGS, 2, 0)
+
+struct	vcmd_ctx_flags_v0 {
+	uint64_t flagword;
+	uint64_t mask;
+};
+
+
+
+/* context caps commands */
+
+#define VCMD_get_ccaps		VC_CMD(FLAGS, 3, 1)
+#define VCMD_set_ccaps		VC_CMD(FLAGS, 4, 1)
+
+struct	vcmd_ctx_caps_v1 {
+	uint64_t ccaps;
+	uint64_t cmask;
+};
+
+
+
+/* bcaps commands */
+
+#define VCMD_get_bcaps		VC_CMD(FLAGS, 9, 0)
+#define VCMD_set_bcaps		VC_CMD(FLAGS, 10, 0)
+
+struct	vcmd_bcaps {
+	uint64_t bcaps;
+	uint64_t bmask;
+};
+
+
+
+/* umask commands */
+
+#define VCMD_get_umask		VC_CMD(FLAGS, 13, 0)
+#define VCMD_set_umask		VC_CMD(FLAGS, 14, 0)
+
+struct	vcmd_umask {
+	uint64_t umask;
+	uint64_t mask;
+};
+
+
+
+/* wmask commands */
+
+#define VCMD_get_wmask		VC_CMD(FLAGS, 15, 0)
+#define VCMD_set_wmask		VC_CMD(FLAGS, 16, 0)
+
+struct	vcmd_wmask {
+	uint64_t wmask;
+	uint64_t mask;
+};
+
+
+
+/* OOM badness */
+
+#define VCMD_get_badness	VC_CMD(MEMCTRL, 5, 0)
+#define VCMD_set_badness	VC_CMD(MEMCTRL, 6, 0)
+
+struct	vcmd_badness_v0 {
+	int64_t bias;
+};
+
+#endif /* _UAPI_VS_CONTEXT_CMD_H */
diff -ruNp linux-3.13.11/include/uapi/vserver/cvirt_cmd.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/vserver/cvirt_cmd.h
--- linux-3.13.11/include/uapi/vserver/cvirt_cmd.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/vserver/cvirt_cmd.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,41 @@
+#ifndef _UAPI_VS_CVIRT_CMD_H
+#define _UAPI_VS_CVIRT_CMD_H
+
+
+/* virtual host info name commands */
+
+#define VCMD_set_vhi_name	VC_CMD(VHOST, 1, 0)
+#define VCMD_get_vhi_name	VC_CMD(VHOST, 2, 0)
+
+struct	vcmd_vhi_name_v0 {
+	uint32_t field;
+	char name[65];
+};
+
+
+enum vhi_name_field {
+	VHIN_CONTEXT = 0,
+	VHIN_SYSNAME,
+	VHIN_NODENAME,
+	VHIN_RELEASE,
+	VHIN_VERSION,
+	VHIN_MACHINE,
+	VHIN_DOMAINNAME,
+};
+
+
+
+#define VCMD_virt_stat		VC_CMD(VSTAT, 3, 0)
+
+struct	vcmd_virt_stat_v0 {
+	uint64_t offset;
+	uint64_t uptime;
+	uint32_t nr_threads;
+	uint32_t nr_running;
+	uint32_t nr_uninterruptible;
+	uint32_t nr_onhold;
+	uint32_t nr_forks;
+	uint32_t load[3];
+};
+
+#endif /* _UAPI_VS_CVIRT_CMD_H */
diff -ruNp linux-3.13.11/include/uapi/vserver/debug_cmd.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/vserver/debug_cmd.h
--- linux-3.13.11/include/uapi/vserver/debug_cmd.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/vserver/debug_cmd.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,24 @@
+#ifndef _UAPI_VS_DEBUG_CMD_H
+#define _UAPI_VS_DEBUG_CMD_H
+
+
+/* debug commands */
+
+#define VCMD_dump_history	VC_CMD(DEBUG, 1, 0)
+
+#define VCMD_read_history	VC_CMD(DEBUG, 5, 0)
+#define VCMD_read_monitor	VC_CMD(DEBUG, 6, 0)
+
+struct  vcmd_read_history_v0 {
+	uint32_t index;
+	uint32_t count;
+	char __user *data;
+};
+
+struct  vcmd_read_monitor_v0 {
+	uint32_t index;
+	uint32_t count;
+	char __user *data;
+};
+
+#endif /* _UAPI_VS_DEBUG_CMD_H */
diff -ruNp linux-3.13.11/include/uapi/vserver/device.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/vserver/device.h
--- linux-3.13.11/include/uapi/vserver/device.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/vserver/device.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,12 @@
+#ifndef _UAPI_VS_DEVICE_H
+#define _UAPI_VS_DEVICE_H
+
+
+#define DATTR_CREATE	0x00000001
+#define DATTR_OPEN	0x00000002
+
+#define DATTR_REMAP	0x00000010
+
+#define DATTR_MASK	0x00000013
+
+#endif	/* _UAPI_VS_DEVICE_H */
diff -ruNp linux-3.13.11/include/uapi/vserver/device_cmd.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/vserver/device_cmd.h
--- linux-3.13.11/include/uapi/vserver/device_cmd.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/vserver/device_cmd.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,16 @@
+#ifndef _UAPI_VS_DEVICE_CMD_H
+#define _UAPI_VS_DEVICE_CMD_H
+
+
+/*  device vserver commands */
+
+#define VCMD_set_mapping	VC_CMD(DEVICE, 1, 0)
+#define VCMD_unset_mapping	VC_CMD(DEVICE, 2, 0)
+
+struct	vcmd_set_mapping_v0 {
+	const char __user *device;
+	const char __user *target;
+	uint32_t flags;
+};
+
+#endif /* _UAPI_VS_DEVICE_CMD_H */
diff -ruNp linux-3.13.11/include/uapi/vserver/dlimit_cmd.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/vserver/dlimit_cmd.h
--- linux-3.13.11/include/uapi/vserver/dlimit_cmd.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/vserver/dlimit_cmd.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,67 @@
+#ifndef _UAPI_VS_DLIMIT_CMD_H
+#define _UAPI_VS_DLIMIT_CMD_H
+
+
+/*  dlimit vserver commands */
+
+#define VCMD_add_dlimit		VC_CMD(DLIMIT, 1, 0)
+#define VCMD_rem_dlimit		VC_CMD(DLIMIT, 2, 0)
+
+#define VCMD_set_dlimit		VC_CMD(DLIMIT, 5, 0)
+#define VCMD_get_dlimit		VC_CMD(DLIMIT, 6, 0)
+
+struct	vcmd_ctx_dlimit_base_v0 {
+	const char __user *name;
+	uint32_t flags;
+};
+
+struct	vcmd_ctx_dlimit_v0 {
+	const char __user *name;
+	uint32_t space_used;			/* used space in kbytes */
+	uint32_t space_total;			/* maximum space in kbytes */
+	uint32_t inodes_used;			/* used inodes */
+	uint32_t inodes_total;			/* maximum inodes */
+	uint32_t reserved;			/* reserved for root in % */
+	uint32_t flags;
+};
+
+#define CDLIM_UNSET		((uint32_t)0UL)
+#define CDLIM_INFINITY		((uint32_t)~0UL)
+#define CDLIM_KEEP		((uint32_t)~1UL)
+
+#define DLIME_UNIT	0
+#define DLIME_KILO	1
+#define DLIME_MEGA	2
+#define DLIME_GIGA	3
+
+#define DLIMF_SHIFT	0x10
+
+#define DLIMS_USED	0
+#define DLIMS_TOTAL	2
+
+static inline
+uint64_t dlimit_space_32to64(uint32_t val, uint32_t flags, int shift)
+{
+	int exp = (flags & DLIMF_SHIFT) ?
+		(flags >> shift) & DLIME_GIGA : DLIME_KILO;
+	return ((uint64_t)val) << (10 * exp);
+}
+
+static inline
+uint32_t dlimit_space_64to32(uint64_t val, uint32_t *flags, int shift)
+{
+	int exp = 0;
+
+	if (*flags & DLIMF_SHIFT) {
+		while (val > (1LL << 32) && (exp < 3)) {
+			val >>= 10;
+			exp++;
+		}
+		*flags &= ~(DLIME_GIGA << shift);
+		*flags |= exp << shift;
+	} else
+		val >>= 10;
+	return val;
+}
+
+#endif /* _UAPI_VS_DLIMIT_CMD_H */
diff -ruNp linux-3.13.11/include/uapi/vserver/inode.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/vserver/inode.h
--- linux-3.13.11/include/uapi/vserver/inode.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/vserver/inode.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,23 @@
+#ifndef _UAPI_VS_INODE_H
+#define _UAPI_VS_INODE_H
+
+
+#define IATTR_TAG	0x01000000
+
+#define IATTR_ADMIN	0x00000001
+#define IATTR_WATCH	0x00000002
+#define IATTR_HIDE	0x00000004
+#define IATTR_FLAGS	0x00000007
+
+#define IATTR_BARRIER	0x00010000
+#define IATTR_IXUNLINK	0x00020000
+#define IATTR_IMMUTABLE 0x00040000
+#define IATTR_COW	0x00080000
+
+
+/* inode ioctls */
+
+#define FIOC_GETXFLG	_IOR('x', 5, long)
+#define FIOC_SETXFLG	_IOW('x', 6, long)
+
+#endif	/* _UAPI_VS_INODE_H */
diff -ruNp linux-3.13.11/include/uapi/vserver/inode_cmd.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/vserver/inode_cmd.h
--- linux-3.13.11/include/uapi/vserver/inode_cmd.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/vserver/inode_cmd.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,26 @@
+#ifndef _UAPI_VS_INODE_CMD_H
+#define _UAPI_VS_INODE_CMD_H
+
+
+/*  inode vserver commands */
+
+#define VCMD_get_iattr		VC_CMD(INODE, 1, 1)
+#define VCMD_set_iattr		VC_CMD(INODE, 2, 1)
+
+#define VCMD_fget_iattr		VC_CMD(INODE, 3, 0)
+#define VCMD_fset_iattr		VC_CMD(INODE, 4, 0)
+
+struct	vcmd_ctx_iattr_v1 {
+	const char __user *name;
+	uint32_t tag;
+	uint32_t flags;
+	uint32_t mask;
+};
+
+struct	vcmd_ctx_fiattr_v0 {
+	uint32_t tag;
+	uint32_t flags;
+	uint32_t mask;
+};
+
+#endif /* _UAPI_VS_INODE_CMD_H */
diff -ruNp linux-3.13.11/include/uapi/vserver/limit.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/vserver/limit.h
--- linux-3.13.11/include/uapi/vserver/limit.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/vserver/limit.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,14 @@
+#ifndef _UAPI_VS_LIMIT_H
+#define _UAPI_VS_LIMIT_H
+
+
+#define VLIMIT_NSOCK	16
+#define VLIMIT_OPENFD	17
+#define VLIMIT_ANON	18
+#define VLIMIT_SHMEM	19
+#define VLIMIT_SEMARY	20
+#define VLIMIT_NSEMS	21
+#define VLIMIT_DENTRY	22
+#define VLIMIT_MAPPED	23
+
+#endif /* _UAPI_VS_LIMIT_H */
diff -ruNp linux-3.13.11/include/uapi/vserver/limit_cmd.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/vserver/limit_cmd.h
--- linux-3.13.11/include/uapi/vserver/limit_cmd.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/vserver/limit_cmd.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,40 @@
+#ifndef _UAPI_VS_LIMIT_CMD_H
+#define _UAPI_VS_LIMIT_CMD_H
+
+
+/*  rlimit vserver commands */
+
+#define VCMD_get_rlimit		VC_CMD(RLIMIT, 1, 0)
+#define VCMD_set_rlimit		VC_CMD(RLIMIT, 2, 0)
+#define VCMD_get_rlimit_mask	VC_CMD(RLIMIT, 3, 0)
+#define VCMD_reset_hits		VC_CMD(RLIMIT, 7, 0)
+#define VCMD_reset_minmax	VC_CMD(RLIMIT, 9, 0)
+
+struct	vcmd_ctx_rlimit_v0 {
+	uint32_t id;
+	uint64_t minimum;
+	uint64_t softlimit;
+	uint64_t maximum;
+};
+
+struct	vcmd_ctx_rlimit_mask_v0 {
+	uint32_t minimum;
+	uint32_t softlimit;
+	uint32_t maximum;
+};
+
+#define VCMD_rlimit_stat	VC_CMD(VSTAT, 1, 0)
+
+struct	vcmd_rlimit_stat_v0 {
+	uint32_t id;
+	uint32_t hits;
+	uint64_t value;
+	uint64_t minimum;
+	uint64_t maximum;
+};
+
+#define CRLIM_UNSET		(0ULL)
+#define CRLIM_INFINITY		(~0ULL)
+#define CRLIM_KEEP		(~1ULL)
+
+#endif /* _UAPI_VS_LIMIT_CMD_H */
diff -ruNp linux-3.13.11/include/uapi/vserver/monitor.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/vserver/monitor.h
--- linux-3.13.11/include/uapi/vserver/monitor.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/vserver/monitor.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,96 @@
+#ifndef _UAPI_VS_MONITOR_H
+#define _UAPI_VS_MONITOR_H
+
+#include <linux/types.h>
+
+
+enum {
+	VXM_UNUSED = 0,
+
+	VXM_SYNC = 0x10,
+
+	VXM_UPDATE = 0x20,
+	VXM_UPDATE_1,
+	VXM_UPDATE_2,
+
+	VXM_RQINFO_1 = 0x24,
+	VXM_RQINFO_2,
+
+	VXM_ACTIVATE = 0x40,
+	VXM_DEACTIVATE,
+	VXM_IDLE,
+
+	VXM_HOLD = 0x44,
+	VXM_UNHOLD,
+
+	VXM_MIGRATE = 0x48,
+	VXM_RESCHED,
+
+	/* all other bits are flags */
+	VXM_SCHED = 0x80,
+};
+
+struct _vxm_update_1 {
+	uint32_t tokens_max;
+	uint32_t fill_rate;
+	uint32_t interval;
+};
+
+struct _vxm_update_2 {
+	uint32_t tokens_min;
+	uint32_t fill_rate;
+	uint32_t interval;
+};
+
+struct _vxm_rqinfo_1 {
+	uint16_t running;
+	uint16_t onhold;
+	uint16_t iowait;
+	uint16_t uintr;
+	uint32_t idle_tokens;
+};
+
+struct _vxm_rqinfo_2 {
+	uint32_t norm_time;
+	uint32_t idle_time;
+	uint32_t idle_skip;
+};
+
+struct _vxm_sched {
+	uint32_t tokens;
+	uint32_t norm_time;
+	uint32_t idle_time;
+};
+
+struct _vxm_task {
+	uint16_t pid;
+	uint16_t state;
+};
+
+struct _vxm_event {
+	uint32_t jif;
+	union {
+		uint32_t seq;
+		uint32_t sec;
+	};
+	union {
+		uint32_t tokens;
+		uint32_t nsec;
+		struct _vxm_task tsk;
+	};
+};
+
+struct _vx_mon_entry {
+	uint16_t type;
+	uint16_t xid;
+	union {
+		struct _vxm_event ev;
+		struct _vxm_sched sd;
+		struct _vxm_update_1 u1;
+		struct _vxm_update_2 u2;
+		struct _vxm_rqinfo_1 q1;
+		struct _vxm_rqinfo_2 q2;
+	};
+};
+
+#endif /* _UAPI_VS_MONITOR_H */
diff -ruNp linux-3.13.11/include/uapi/vserver/network.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/vserver/network.h
--- linux-3.13.11/include/uapi/vserver/network.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/vserver/network.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,76 @@
+#ifndef _UAPI_VS_NETWORK_H
+#define _UAPI_VS_NETWORK_H
+
+#include <linux/types.h>
+
+
+#define MAX_N_CONTEXT	65535	/* Arbitrary limit */
+
+
+/* network flags */
+
+#define NXF_INFO_PRIVATE	0x00000008
+
+#define NXF_SINGLE_IP		0x00000100
+#define NXF_LBACK_REMAP		0x00000200
+#define NXF_LBACK_ALLOW		0x00000400
+
+#define NXF_HIDE_NETIF		0x02000000
+#define NXF_HIDE_LBACK		0x04000000
+
+#define NXF_STATE_SETUP		(1ULL << 32)
+#define NXF_STATE_ADMIN		(1ULL << 34)
+
+#define NXF_SC_HELPER		(1ULL << 36)
+#define NXF_PERSISTENT		(1ULL << 38)
+
+#define NXF_ONE_TIME		(0x0005ULL << 32)
+
+
+#define	NXF_INIT_SET		(__nxf_init_set())
+
+static inline uint64_t __nxf_init_set(void) {
+	return	  NXF_STATE_ADMIN
+#ifdef	CONFIG_VSERVER_AUTO_LBACK
+		| NXF_LBACK_REMAP
+		| NXF_HIDE_LBACK
+#endif
+#ifdef	CONFIG_VSERVER_AUTO_SINGLE
+		| NXF_SINGLE_IP
+#endif
+		| NXF_HIDE_NETIF;
+}
+
+
+/* network caps */
+
+#define NXC_TUN_CREATE		0x00000001
+
+#define NXC_RAW_ICMP		0x00000100
+
+#define NXC_MULTICAST		0x00001000
+
+
+/* address types */
+
+#define NXA_TYPE_IPV4		0x0001
+#define NXA_TYPE_IPV6		0x0002
+
+#define NXA_TYPE_NONE		0x0000
+#define NXA_TYPE_ANY		0x00FF
+
+#define NXA_TYPE_ADDR		0x0010
+#define NXA_TYPE_MASK		0x0020
+#define NXA_TYPE_RANGE		0x0040
+
+#define NXA_MASK_ALL		(NXA_TYPE_ADDR | NXA_TYPE_MASK | NXA_TYPE_RANGE)
+
+#define NXA_MOD_BCAST		0x0100
+#define NXA_MOD_LBACK		0x0200
+
+#define NXA_LOOPBACK		0x1000
+
+#define NXA_MASK_BIND		(NXA_MASK_ALL | NXA_MOD_BCAST | NXA_MOD_LBACK)
+#define NXA_MASK_SHOW		(NXA_MASK_ALL | NXA_LOOPBACK)
+
+#endif /* _UAPI_VS_NETWORK_H */
diff -ruNp linux-3.13.11/include/uapi/vserver/network_cmd.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/vserver/network_cmd.h
--- linux-3.13.11/include/uapi/vserver/network_cmd.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/vserver/network_cmd.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,123 @@
+#ifndef _UAPI_VS_NETWORK_CMD_H
+#define _UAPI_VS_NETWORK_CMD_H
+
+
+/* vinfo commands */
+
+#define VCMD_task_nid		VC_CMD(VINFO, 2, 0)
+
+
+#define VCMD_nx_info		VC_CMD(VINFO, 6, 0)
+
+struct	vcmd_nx_info_v0 {
+	uint32_t nid;
+	/* more to come */
+};
+
+
+#include <linux/in.h>
+#include <linux/in6.h>
+
+#define VCMD_net_create_v0	VC_CMD(VNET, 1, 0)
+#define VCMD_net_create		VC_CMD(VNET, 1, 1)
+
+struct  vcmd_net_create {
+	uint64_t flagword;
+};
+
+#define VCMD_net_migrate	VC_CMD(NETMIG, 1, 0)
+
+#define VCMD_net_add		VC_CMD(NETALT, 1, 0)
+#define VCMD_net_remove		VC_CMD(NETALT, 2, 0)
+
+struct	vcmd_net_addr_v0 {
+	uint16_t type;
+	uint16_t count;
+	struct in_addr ip[4];
+	struct in_addr mask[4];
+};
+
+#define VCMD_net_add_ipv4_v1	VC_CMD(NETALT, 1, 1)
+#define VCMD_net_rem_ipv4_v1	VC_CMD(NETALT, 2, 1)
+
+struct	vcmd_net_addr_ipv4_v1 {
+	uint16_t type;
+	uint16_t flags;
+	struct in_addr ip;
+	struct in_addr mask;
+};
+
+#define VCMD_net_add_ipv4	VC_CMD(NETALT, 1, 2)
+#define VCMD_net_rem_ipv4	VC_CMD(NETALT, 2, 2)
+
+struct	vcmd_net_addr_ipv4_v2 {
+	uint16_t type;
+	uint16_t flags;
+	struct in_addr ip;
+	struct in_addr ip2;
+	struct in_addr mask;
+};
+
+#define VCMD_net_add_ipv6	VC_CMD(NETALT, 3, 1)
+#define VCMD_net_remove_ipv6	VC_CMD(NETALT, 4, 1)
+
+struct	vcmd_net_addr_ipv6_v1 {
+	uint16_t type;
+	uint16_t flags;
+	uint32_t prefix;
+	struct in6_addr ip;
+	struct in6_addr mask;
+};
+
+#define VCMD_add_match_ipv4	VC_CMD(NETALT, 5, 0)
+#define VCMD_get_match_ipv4	VC_CMD(NETALT, 6, 0)
+
+struct	vcmd_match_ipv4_v0 {
+	uint16_t type;
+	uint16_t flags;
+	uint16_t parent;
+	uint16_t prefix;
+	struct in_addr ip;
+	struct in_addr ip2;
+	struct in_addr mask;
+};
+
+#define VCMD_add_match_ipv6	VC_CMD(NETALT, 7, 0)
+#define VCMD_get_match_ipv6	VC_CMD(NETALT, 8, 0)
+
+struct	vcmd_match_ipv6_v0 {
+	uint16_t type;
+	uint16_t flags;
+	uint16_t parent;
+	uint16_t prefix;
+	struct in6_addr ip;
+	struct in6_addr ip2;
+	struct in6_addr mask;
+};
+
+
+
+
+/* flag commands */
+
+#define VCMD_get_nflags		VC_CMD(FLAGS, 5, 0)
+#define VCMD_set_nflags		VC_CMD(FLAGS, 6, 0)
+
+struct	vcmd_net_flags_v0 {
+	uint64_t flagword;
+	uint64_t mask;
+};
+
+
+
+/* network caps commands */
+
+#define VCMD_get_ncaps		VC_CMD(FLAGS, 7, 0)
+#define VCMD_set_ncaps		VC_CMD(FLAGS, 8, 0)
+
+struct	vcmd_net_caps_v0 {
+	uint64_t ncaps;
+	uint64_t cmask;
+};
+
+#endif /* _UAPI_VS_NETWORK_CMD_H */
diff -ruNp linux-3.13.11/include/uapi/vserver/sched_cmd.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/vserver/sched_cmd.h
--- linux-3.13.11/include/uapi/vserver/sched_cmd.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/vserver/sched_cmd.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,13 @@
+#ifndef _UAPI_VS_SCHED_CMD_H
+#define _UAPI_VS_SCHED_CMD_H
+
+
+struct	vcmd_prio_bias {
+	int32_t cpu_id;
+	int32_t prio_bias;
+};
+
+#define VCMD_set_prio_bias	VC_CMD(SCHED, 4, 0)
+#define VCMD_get_prio_bias	VC_CMD(SCHED, 5, 0)
+
+#endif /* _UAPI_VS_SCHED_CMD_H */
diff -ruNp linux-3.13.11/include/uapi/vserver/signal_cmd.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/vserver/signal_cmd.h
--- linux-3.13.11/include/uapi/vserver/signal_cmd.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/vserver/signal_cmd.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,31 @@
+#ifndef _UAPI_VS_SIGNAL_CMD_H
+#define _UAPI_VS_SIGNAL_CMD_H
+
+
+/*  signalling vserver commands */
+
+#define VCMD_ctx_kill		VC_CMD(PROCTRL, 1, 0)
+#define VCMD_wait_exit		VC_CMD(EVENT, 99, 0)
+
+struct	vcmd_ctx_kill_v0 {
+	int32_t pid;
+	int32_t sig;
+};
+
+struct	vcmd_wait_exit_v0 {
+	int32_t reboot_cmd;
+	int32_t exit_code;
+};
+
+
+/*  process alteration commands */
+
+#define VCMD_get_pflags		VC_CMD(PROCALT, 5, 0)
+#define VCMD_set_pflags		VC_CMD(PROCALT, 6, 0)
+
+struct	vcmd_pflags_v0 {
+	uint32_t flagword;
+	uint32_t mask;
+};
+
+#endif /* _UAPI_VS_SIGNAL_CMD_H */
diff -ruNp linux-3.13.11/include/uapi/vserver/space_cmd.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/vserver/space_cmd.h
--- linux-3.13.11/include/uapi/vserver/space_cmd.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/vserver/space_cmd.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,28 @@
+#ifndef _UAPI_VS_SPACE_CMD_H
+#define _UAPI_VS_SPACE_CMD_H
+
+
+#define VCMD_enter_space_v0	VC_CMD(PROCALT, 1, 0)
+#define VCMD_enter_space_v1	VC_CMD(PROCALT, 1, 1)
+#define VCMD_enter_space	VC_CMD(PROCALT, 1, 2)
+
+#define VCMD_set_space_v0	VC_CMD(PROCALT, 3, 0)
+#define VCMD_set_space_v1	VC_CMD(PROCALT, 3, 1)
+#define VCMD_set_space		VC_CMD(PROCALT, 3, 2)
+
+#define VCMD_get_space_mask_v0	VC_CMD(PROCALT, 4, 0)
+
+#define VCMD_get_space_mask	VC_CMD(VSPACE, 0, 1)
+#define VCMD_get_space_default	VC_CMD(VSPACE, 1, 0)
+
+
+struct	vcmd_space_mask_v1 {
+	uint64_t mask;
+};
+
+struct	vcmd_space_mask_v2 {
+	uint64_t mask;
+	uint32_t index;
+};
+
+#endif /* _UAPI_VS_SPACE_CMD_H */
diff -ruNp linux-3.13.11/include/uapi/vserver/switch.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/vserver/switch.h
--- linux-3.13.11/include/uapi/vserver/switch.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/vserver/switch.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,90 @@
+#ifndef _UAPI_VS_SWITCH_H
+#define _UAPI_VS_SWITCH_H
+
+#include <linux/types.h>
+
+
+#define VC_CATEGORY(c)		(((c) >> 24) & 0x3F)
+#define VC_COMMAND(c)		(((c) >> 16) & 0xFF)
+#define VC_VERSION(c)		((c) & 0xFFF)
+
+#define VC_CMD(c, i, v)		((((VC_CAT_ ## c) & 0x3F) << 24) \
+				| (((i) & 0xFF) << 16) | ((v) & 0xFFF))
+
+/*
+
+  Syscall Matrix V2.8
+
+	 |VERSION|CREATE |MODIFY |MIGRATE|CONTROL|EXPERIM| |SPECIAL|SPECIAL|
+	 |STATS  |DESTROY|ALTER  |CHANGE |LIMIT  |TEST   | |       |       |
+	 |INFO   |SETUP  |       |MOVE   |       |       | |       |       |
+  -------+-------+-------+-------+-------+-------+-------+ +-------+-------+
+  SYSTEM |VERSION|VSETUP |VHOST  |       |       |       | |DEVICE |       |
+  HOST   |     00|     01|     02|     03|     04|     05| |     06|     07|
+  -------+-------+-------+-------+-------+-------+-------+ +-------+-------+
+  CPU    |       |VPROC  |PROCALT|PROCMIG|PROCTRL|       | |SCHED. |       |
+  PROCESS|     08|     09|     10|     11|     12|     13| |     14|     15|
+  -------+-------+-------+-------+-------+-------+-------+ +-------+-------+
+  MEMORY |       |       |       |       |MEMCTRL|       | |SWAP   |       |
+	 |     16|     17|     18|     19|     20|     21| |     22|     23|
+  -------+-------+-------+-------+-------+-------+-------+ +-------+-------+
+  NETWORK|       |VNET   |NETALT |NETMIG |NETCTL |       | |SERIAL |       |
+	 |     24|     25|     26|     27|     28|     29| |     30|     31|
+  -------+-------+-------+-------+-------+-------+-------+ +-------+-------+
+  DISK   |       |       |       |TAGMIG |DLIMIT |       | |INODE  |       |
+  VFS    |     32|     33|     34|     35|     36|     37| |     38|     39|
+  -------+-------+-------+-------+-------+-------+-------+ +-------+-------+
+  OTHER  |VSTAT  |       |       |       |       |       | |VINFO  |       |
+	 |     40|     41|     42|     43|     44|     45| |     46|     47|
+  =======+=======+=======+=======+=======+=======+=======+ +=======+=======+
+  SPECIAL|EVENT  |       |       |       |FLAGS  |       | |VSPACE |       |
+	 |     48|     49|     50|     51|     52|     53| |     54|     55|
+  -------+-------+-------+-------+-------+-------+-------+ +-------+-------+
+  SPECIAL|DEBUG  |       |       |       |RLIMIT |SYSCALL| |       |COMPAT |
+	 |     56|     57|     58|     59|     60|TEST 61| |     62|     63|
+  -------+-------+-------+-------+-------+-------+-------+ +-------+-------+
+
+*/
+
+#define VC_CAT_VERSION		0
+
+#define VC_CAT_VSETUP		1
+#define VC_CAT_VHOST		2
+
+#define VC_CAT_DEVICE		6
+
+#define VC_CAT_VPROC		9
+#define VC_CAT_PROCALT		10
+#define VC_CAT_PROCMIG		11
+#define VC_CAT_PROCTRL		12
+
+#define VC_CAT_SCHED		14
+#define VC_CAT_MEMCTRL		20
+
+#define VC_CAT_VNET		25
+#define VC_CAT_NETALT		26
+#define VC_CAT_NETMIG		27
+#define VC_CAT_NETCTRL		28
+
+#define VC_CAT_TAGMIG		35
+#define VC_CAT_DLIMIT		36
+#define VC_CAT_INODE		38
+
+#define VC_CAT_VSTAT		40
+#define VC_CAT_VINFO		46
+#define VC_CAT_EVENT		48
+
+#define VC_CAT_FLAGS		52
+#define VC_CAT_VSPACE		54
+#define VC_CAT_DEBUG		56
+#define VC_CAT_RLIMIT		60
+
+#define VC_CAT_SYSTEST		61
+#define VC_CAT_COMPAT		63
+
+/*  query version */
+
+#define VCMD_get_version	VC_CMD(VERSION, 0, 0)
+#define VCMD_get_vci		VC_CMD(VERSION, 1, 0)
+
+#endif /* _UAPI_VS_SWITCH_H */
diff -ruNp linux-3.13.11/include/uapi/vserver/tag_cmd.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/vserver/tag_cmd.h
--- linux-3.13.11/include/uapi/vserver/tag_cmd.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/uapi/vserver/tag_cmd.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,14 @@
+#ifndef _UAPI_VS_TAG_CMD_H
+#define _UAPI_VS_TAG_CMD_H
+
+
+/* vinfo commands */
+
+#define VCMD_task_tag		VC_CMD(VINFO, 3, 0)
+
+
+/* context commands */
+
+#define VCMD_tag_migrate	VC_CMD(TAGMIG, 1, 0)
+
+#endif /* _UAPI_VS_TAG_CMD_H */
diff -ruNp linux-3.13.11/include/video/udlfb.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/video/udlfb.h
--- linux-3.13.11/include/video/udlfb.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/video/udlfb.h	2014-07-09
12:00:15.000000000 +0200
@@ -53,10 +53,10 @@ struct dlfb_data {
 	u32 pseudo_palette[256];
 	int blank_mode; /*one of FB_BLANK_ */
 	/* blit-only rendering path metrics, exposed through sysfs */
-	atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
-	atomic_t bytes_identical; /* saved effort with backbuffer comparison */
-	atomic_t bytes_sent; /* to usb, after compression including overhead */
-	atomic_t cpu_kcycles_used; /* transpired during pixel processing */
+	atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
+	atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
+	atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
+	atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
 };
 
 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
diff -ruNp linux-3.13.11/include/video/uvesafb.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/video/uvesafb.h
--- linux-3.13.11/include/video/uvesafb.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/include/video/uvesafb.h	2014-07-09
12:00:15.000000000 +0200
@@ -122,6 +122,7 @@ struct uvesafb_par {
 	u8 ypan;			/* 0 - nothing, 1 - ypan, 2 - ywrap */
 	u8 pmi_setpal;			/* PMI for palette changes */
 	u16 *pmi_base;			/* protected mode interface location */
+	u8 *pmi_code;			/* protected mode code location */
 	void *pmi_start;
 	void *pmi_pal;
 	u8 *vbe_state_orig;		/*
diff -ruNp linux-3.13.11/init/Kconfig linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/init/Kconfig
--- linux-3.13.11/init/Kconfig	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/init/Kconfig	2014-07-09 12:00:15.000000000
+0200
@@ -855,6 +855,7 @@ config NUMA_BALANCING
 menuconfig CGROUPS
 	boolean "Control Group support"
 	depends on EVENTFD
+	default y
 	help
 	  This option adds support for grouping sets of processes together, for
 	  use with process control subsystems such as Cpusets, CFS, memory
@@ -1079,6 +1080,7 @@ endif # CGROUPS
 
 config CHECKPOINT_RESTORE
 	bool "Checkpoint/restore support" if EXPERT
+	depends on !GRKERNSEC
 	default n
 	help
 	  Enables additional kernel features in a sake of checkpoint/restore.
@@ -1117,6 +1119,7 @@ config IPC_NS
 config USER_NS
 	bool "User namespace"
 	select UIDGID_STRICT_TYPE_CHECKS
+	depends on VSERVER_DISABLED
 
 	default n
 	help
@@ -1557,7 +1560,7 @@ config SLUB_DEBUG
 
 config COMPAT_BRK
 	bool "Disable heap randomization"
-	default y
+	default n
 	help
 	  Randomizing heap placement makes heap exploits harder, but it
 	  also breaks ancient binaries (including anything libc5 based).
@@ -1845,7 +1848,7 @@ config INIT_ALL_POSSIBLE
 config STOP_MACHINE
 	bool
 	default y
-	depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
+	depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
 	help
 	  Need stop_machine() primitive.
 
diff -ruNp linux-3.13.11/init/Makefile linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/init/Makefile
--- linux-3.13.11/init/Makefile	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/init/Makefile	2014-07-09 12:00:15.000000000
+0200
@@ -2,6 +2,9 @@
 # Makefile for the linux kernel.
 #
 
+ccflags-y := $(GCC_PLUGINS_CFLAGS)
+asflags-y := $(GCC_PLUGINS_AFLAGS)
+
 obj-y                          := main.o version.o mounts.o
 ifneq ($(CONFIG_BLK_DEV_INITRD),y)
 obj-y                          += noinitramfs.o
diff -ruNp linux-3.13.11/init/do_mounts.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/init/do_mounts.c
--- linux-3.13.11/init/do_mounts.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/init/do_mounts.c	2014-07-09 12:00:15.000000000
+0200
@@ -359,11 +359,11 @@ static void __init get_fs_names(char *pa
 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
 {
 	struct super_block *s;
-	int err = sys_mount(name, "/root", fs, flags, data);
+	int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char
__force_user *)fs, flags, (void __force_user *)data);
 	if (err)
 		return err;
 
-	sys_chdir("/root");
+	sys_chdir((const char __force_user *)"/root");
 	s = current->fs->pwd.dentry->d_sb;
 	ROOT_DEV = s->s_dev;
 	printk(KERN_INFO
@@ -484,18 +484,18 @@ void __init change_floppy(char *fmt, ...
 	va_start(args, fmt);
 	vsprintf(buf, fmt, args);
 	va_end(args);
-	fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
+	fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
 	if (fd >= 0) {
 		sys_ioctl(fd, FDEJECT, 0);
 		sys_close(fd);
 	}
 	printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
-	fd = sys_open("/dev/console", O_RDWR, 0);
+	fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
 	if (fd >= 0) {
 		sys_ioctl(fd, TCGETS, (long)&termios);
 		termios.c_lflag &= ~ICANON;
 		sys_ioctl(fd, TCSETSF, (long)&termios);
-		sys_read(fd, &c, 1);
+		sys_read(fd, (char __user *)&c, 1);
 		termios.c_lflag |= ICANON;
 		sys_ioctl(fd, TCSETSF, (long)&termios);
 		sys_close(fd);
@@ -589,8 +589,8 @@ void __init prepare_namespace(void)
 	mount_root();
 out:
 	devtmpfs_mount("dev");
-	sys_mount(".", "/", NULL, MS_MOVE, NULL);
-	sys_chroot(".");
+	sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
+	sys_chroot((const char __force_user *)".");
 }
 
 static bool is_tmpfs;
diff -ruNp linux-3.13.11/init/do_mounts.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/init/do_mounts.h
--- linux-3.13.11/init/do_mounts.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/init/do_mounts.h	2014-07-09 12:00:15.000000000
+0200
@@ -15,15 +15,15 @@ extern int root_mountflags;
 
 static inline int create_dev(char *name, dev_t dev)
 {
-	sys_unlink(name);
-	return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
+	sys_unlink((char __force_user *)name);
+	return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
 }
 
 #if BITS_PER_LONG == 32
 static inline u32 bstat(char *name)
 {
 	struct stat64 stat;
-	if (sys_stat64(name, &stat) != 0)
+	if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) !=
0)
 		return 0;
 	if (!S_ISBLK(stat.st_mode))
 		return 0;
@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
 static inline u32 bstat(char *name)
 {
 	struct stat stat;
-	if (sys_newstat(name, &stat) != 0)
+	if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat)
!= 0)
 		return 0;
 	if (!S_ISBLK(stat.st_mode))
 		return 0;
diff -ruNp linux-3.13.11/init/do_mounts_initrd.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/init/do_mounts_initrd.c
--- linux-3.13.11/init/do_mounts_initrd.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/init/do_mounts_initrd.c	2014-07-09
12:00:15.000000000 +0200
@@ -37,13 +37,13 @@ static int init_linuxrc(struct subproces
 {
 	sys_unshare(CLONE_FS | CLONE_FILES);
 	/* stdin/stdout/stderr for /linuxrc */
-	sys_open("/dev/console", O_RDWR, 0);
+	sys_open((const char __force_user *)"/dev/console", O_RDWR, 0);
 	sys_dup(0);
 	sys_dup(0);
 	/* move initrd over / and chdir/chroot in initrd root */
-	sys_chdir("/root");
-	sys_mount(".", "/", NULL, MS_MOVE, NULL);
-	sys_chroot(".");
+	sys_chdir((const char __force_user *)"/root");
+	sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
+	sys_chroot((const char __force_user *)".");
 	sys_setsid();
 	return 0;
 }
@@ -59,8 +59,8 @@ static void __init handle_initrd(void)
 	create_dev("/dev/root.old", Root_RAM0);
 	/* mount initrd on rootfs' /root */
 	mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
-	sys_mkdir("/old", 0700);
-	sys_chdir("/old");
+	sys_mkdir((const char __force_user *)"/old", 0700);
+	sys_chdir((const char __force_user *)"/old");
 
 	/* try loading default modules from initrd */
 	load_default_modules();
@@ -80,31 +80,31 @@ static void __init handle_initrd(void)
 	current->flags &= ~PF_FREEZER_SKIP;
 
 	/* move initrd to rootfs' /old */
-	sys_mount("..", ".", NULL, MS_MOVE, NULL);
+	sys_mount((char __force_user *)"..", (char __force_user *)".", NULL, MS_MOVE, NULL);
 	/* switch root and cwd back to / of rootfs */
-	sys_chroot("..");
+	sys_chroot((const char __force_user *)"..");
 
 	if (new_decode_dev(real_root_dev) == Root_RAM0) {
-		sys_chdir("/old");
+		sys_chdir((const char __force_user *)"/old");
 		return;
 	}
 
-	sys_chdir("/");
+	sys_chdir((const char __force_user *)"/");
 	ROOT_DEV = new_decode_dev(real_root_dev);
 	mount_root();
 
 	printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
-	error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
+	error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd",
NULL, MS_MOVE, NULL);
 	if (!error)
 		printk("okay\n");
 	else {
-		int fd = sys_open("/dev/root.old", O_RDWR, 0);
+		int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
 		if (error == -ENOENT)
 			printk("/initrd does not exist. Ignored.\n");
 		else
 			printk("failed\n");
 		printk(KERN_NOTICE "Unmounting old root\n");
-		sys_umount("/old", MNT_DETACH);
+		sys_umount((char __force_user *)"/old", MNT_DETACH);
 		printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
 		if (fd < 0) {
 			error = fd;
@@ -127,11 +127,11 @@ int __init initrd_load(void)
 		 * mounted in the normal path.
 		 */
 		if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
-			sys_unlink("/initrd.image");
+			sys_unlink((const char __force_user *)"/initrd.image");
 			handle_initrd();
 			return 1;
 		}
 	}
-	sys_unlink("/initrd.image");
+	sys_unlink((const char __force_user *)"/initrd.image");
 	return 0;
 }
diff -ruNp linux-3.13.11/init/do_mounts_md.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/init/do_mounts_md.c
--- linux-3.13.11/init/do_mounts_md.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/init/do_mounts_md.c	2014-07-09 12:00:15.000000000
+0200
@@ -180,7 +180,7 @@ static void __init md_setup_drive(void)
 			partitioned ? "_d" : "", minor,
 			md_setup_args[ent].device_names);
 
-		fd = sys_open(name, 0, 0);
+		fd = sys_open((char __force_user *)name, 0, 0);
 		if (fd < 0) {
 			printk(KERN_ERR "md: open failed - cannot start "
 					"array %s\n", name);
@@ -243,7 +243,7 @@ static void __init md_setup_drive(void)
 			 * array without it
 			 */
 			sys_close(fd);
-			fd = sys_open(name, 0, 0);
+			fd = sys_open((char __force_user *)name, 0, 0);
 			sys_ioctl(fd, BLKRRPART, 0);
 		}
 		sys_close(fd);
@@ -293,7 +293,7 @@ static void __init autodetect_raid(void)
 
 	wait_for_device_probe();
 
-	fd = sys_open("/dev/md0", 0, 0);
+	fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
 	if (fd >= 0) {
 		sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
 		sys_close(fd);
diff -ruNp linux-3.13.11/init/init_task.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/init/init_task.c
--- linux-3.13.11/init/init_task.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/init/init_task.c	2014-07-09 12:00:15.000000000
+0200
@@ -22,5 +22,9 @@ EXPORT_SYMBOL(init_task);
  * Initial thread structure. Alignment of this is handled by a special
  * linker map entry.
  */
+#ifdef CONFIG_X86
+union thread_union init_thread_union __init_task_data;
+#else
 union thread_union init_thread_union __init_task_data =
 	{ INIT_THREAD_INFO(init_task) };
+#endif
diff -ruNp linux-3.13.11/init/initramfs.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/init/initramfs.c
--- linux-3.13.11/init/initramfs.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/init/initramfs.c	2014-07-09 12:00:15.000000000
+0200
@@ -84,7 +84,7 @@ static void __init free_hash(void)
 	}
 }
 
-static long __init do_utime(char *filename, time_t mtime)
+static long __init do_utime(char __force_user *filename, time_t mtime)
 {
 	struct timespec t[2];
 
@@ -119,7 +119,7 @@ static void __init dir_utime(void)
 	struct dir_entry *de, *tmp;
 	list_for_each_entry_safe(de, tmp, &dir_list, list) {
 		list_del(&de->list);
-		do_utime(de->name, de->mtime);
+		do_utime((char __force_user *)de->name, de->mtime);
 		kfree(de->name);
 		kfree(de);
 	}
@@ -281,7 +281,7 @@ static int __init maybe_link(void)
 	if (nlink >= 2) {
 		char *old = find_link(major, minor, ino, mode, collected);
 		if (old)
-			return (sys_link(old, collected) < 0) ? -1 : 1;
+			return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0)
? -1 : 1;
 	}
 	return 0;
 }
@@ -290,11 +290,11 @@ static void __init clean_path(char *path
 {
 	struct stat st;
 
-	if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
+	if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode)
& S_IFMT) {
 		if (S_ISDIR(st.st_mode))
-			sys_rmdir(path);
+			sys_rmdir((char __force_user *)path);
 		else
-			sys_unlink(path);
+			sys_unlink((char __force_user *)path);
 	}
 }
 
@@ -315,7 +315,7 @@ static int __init do_name(void)
 			int openflags = O_WRONLY|O_CREAT;
 			if (ml != 1)
 				openflags |= O_TRUNC;
-			wfd = sys_open(collected, openflags, mode);
+			wfd = sys_open((char __force_user *)collected, openflags, mode);
 
 			if (wfd >= 0) {
 				sys_fchown(wfd, uid, gid);
@@ -327,17 +327,17 @@ static int __init do_name(void)
 			}
 		}
 	} else if (S_ISDIR(mode)) {
-		sys_mkdir(collected, mode);
-		sys_chown(collected, uid, gid);
-		sys_chmod(collected, mode);
+		sys_mkdir((char __force_user *)collected, mode);
+		sys_chown((char __force_user *)collected, uid, gid);
+		sys_chmod((char __force_user *)collected, mode);
 		dir_add(collected, mtime);
 	} else if (S_ISBLK(mode) || S_ISCHR(mode) ||
 		   S_ISFIFO(mode) || S_ISSOCK(mode)) {
 		if (maybe_link() == 0) {
-			sys_mknod(collected, mode, rdev);
-			sys_chown(collected, uid, gid);
-			sys_chmod(collected, mode);
-			do_utime(collected, mtime);
+			sys_mknod((char __force_user *)collected, mode, rdev);
+			sys_chown((char __force_user *)collected, uid, gid);
+			sys_chmod((char __force_user *)collected, mode);
+			do_utime((char __force_user *)collected, mtime);
 		}
 	}
 	return 0;
@@ -346,15 +346,15 @@ static int __init do_name(void)
 static int __init do_copy(void)
 {
 	if (count >= body_len) {
-		sys_write(wfd, victim, body_len);
+		sys_write(wfd, (char __force_user *)victim, body_len);
 		sys_close(wfd);
-		do_utime(vcollected, mtime);
+		do_utime((char __force_user *)vcollected, mtime);
 		kfree(vcollected);
 		eat(body_len);
 		state = SkipIt;
 		return 0;
 	} else {
-		sys_write(wfd, victim, count);
+		sys_write(wfd, (char __force_user *)victim, count);
 		body_len -= count;
 		eat(count);
 		return 1;
@@ -365,9 +365,9 @@ static int __init do_symlink(void)
 {
 	collected[N_ALIGN(name_len) + body_len] = '\0';
 	clean_path(collected, 0);
-	sys_symlink(collected + N_ALIGN(name_len), collected);
-	sys_lchown(collected, uid, gid);
-	do_utime(collected, mtime);
+	sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user
*)collected);
+	sys_lchown((char __force_user *)collected, uid, gid);
+	do_utime((char __force_user *)collected, mtime);
 	state = SkipIt;
 	next_state = Reset;
 	return 0;
@@ -583,7 +583,7 @@ static int __init populate_rootfs(void)
 {
 	char *err = unpack_to_rootfs(__initramfs_start, __initramfs_size);
 	if (err)
-		panic(err);	/* Failed to decompress INTERNAL initramfs */
+		panic("%s", err);	/* Failed to decompress INTERNAL initramfs */
 	if (initrd_start) {
 #ifdef CONFIG_BLK_DEV_RAM
 		int fd;
diff -ruNp linux-3.13.11/init/main.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/init/main.c
--- linux-3.13.11/init/main.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/init/main.c	2014-07-09 12:00:15.000000000
+0200
@@ -77,6 +77,7 @@
 #include <linux/sched_clock.h>
 #include <linux/context_tracking.h>
 #include <linux/random.h>
+#include <linux/vserver/percpu.h>
 
 #include <asm/io.h>
 #include <asm/bugs.h>
@@ -103,6 +104,8 @@ static inline void mark_rodata_ro(void)
 extern void tc_init(void);
 #endif
 
+extern void grsecurity_init(void);
+
 /*
  * Debug helper: via this flag we know that we are in 'early bootup code'
  * where only the boot processor is running with IRQ disabled.  This means
@@ -164,6 +167,75 @@ static int __init set_reset_devices(char
 
 __setup("reset_devices", set_reset_devices);
 
+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
+kgid_t grsec_proc_gid = KGIDT_INIT(CONFIG_GRKERNSEC_PROC_GID);
+static int __init setup_grsec_proc_gid(char *str)
+{
+	grsec_proc_gid = KGIDT_INIT(simple_strtol(str, NULL, 0));
+	return 1;
+}
+__setup("grsec_proc_gid=", setup_grsec_proc_gid);
+#endif
+
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+unsigned long pax_user_shadow_base __read_only;
+EXPORT_SYMBOL(pax_user_shadow_base);
+extern char pax_enter_kernel_user[];
+extern char pax_exit_kernel_user[];
+#endif
+
+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
+static int __init setup_pax_nouderef(char *str)
+{
+#ifdef CONFIG_X86_32
+	unsigned int cpu;
+	struct desc_struct *gdt;
+
+	for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
+		gdt = get_cpu_gdt_table(cpu);
+		gdt[GDT_ENTRY_KERNEL_DS].type = 3;
+		gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
+		gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
+		gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
+	}
+	loadsegment(ds, __KERNEL_DS);
+	loadsegment(es, __KERNEL_DS);
+	loadsegment(ss, __KERNEL_DS);
+#else
+	memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
+	memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
+	clone_pgd_mask = ~(pgdval_t)0UL;
+	pax_user_shadow_base = 0UL;
+	setup_clear_cpu_cap(X86_FEATURE_PCID);
+	setup_clear_cpu_cap(X86_FEATURE_INVPCID);
+#endif
+
+	return 0;
+}
+early_param("pax_nouderef", setup_pax_nouderef);
+
+#ifdef CONFIG_X86_64
+static int __init setup_pax_weakuderef(char *str)
+{
+	if (clone_pgd_mask != ~(pgdval_t)0UL)
+		pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
+	return 1;
+}
+__setup("pax_weakuderef", setup_pax_weakuderef);
+#endif
+#endif
+
+#ifdef CONFIG_PAX_SOFTMODE
+int pax_softmode;
+
+static int __init setup_pax_softmode(char *str)
+{
+	get_option(&str, &pax_softmode);
+	return 1;
+}
+__setup("pax_softmode=", setup_pax_softmode);
+#endif
+
 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
 static const char *panic_later, *panic_param;
@@ -691,25 +763,24 @@ int __init_or_module do_one_initcall(ini
 {
 	int count = preempt_count();
 	int ret;
-	char msgbuf[64];
+	const char *msg1 = "", *msg2 = "";
 
 	if (initcall_debug)
 		ret = do_one_initcall_debug(fn);
 	else
 		ret = fn();
 
-	msgbuf[0] = 0;
-
 	if (preempt_count() != count) {
-		sprintf(msgbuf, "preemption imbalance ");
+		msg1 = " preemption imbalance";
 		preempt_count_set(count);
 	}
 	if (irqs_disabled()) {
-		strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
+		msg2 = " disabled interrupts";
 		local_irq_enable();
 	}
-	WARN(msgbuf[0], "initcall %pF returned with %s\n", fn, msgbuf);
+	WARN(*msg1 || *msg2, "initcall %pF returned with%s%s\n", fn, msg1, msg2);
 
+	add_latent_entropy();
 	return ret;
 }
 
@@ -816,8 +887,8 @@ static int run_init_process(const char *
 {
 	argv_init[0] = init_filename;
 	return do_execve(init_filename,
-		(const char __user *const __user *)argv_init,
-		(const char __user *const __user *)envp_init);
+		(const char __user *const __force_user *)argv_init,
+		(const char __user *const __force_user *)envp_init);
 }
 
 static int try_to_run_init_process(const char *init_filename)
@@ -834,6 +905,10 @@ static int try_to_run_init_process(const
 	return ret;
 }
 
+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
+extern int gr_init_ran;
+#endif
+
 static noinline void __init kernel_init_freeable(void);
 
 static int __ref kernel_init(void *unused)
@@ -858,6 +933,11 @@ static int __ref kernel_init(void *unuse
 		       ramdisk_execute_command, ret);
 	}
 
+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
+	/* if no initrd was used, be extra sure we enforce chroot restrictions */
+	gr_init_ran = 1;
+#endif
+
 	/*
 	 * We try each of these until one succeeds.
 	 *
@@ -913,7 +993,7 @@ static noinline void __init kernel_init_
 	do_basic_setup();
 
 	/* Open the /dev/console on the rootfs, this should never fail */
-	if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
+	if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
 		pr_err("Warning: unable to open an initial console.\n");
 
 	(void) sys_dup(0);
@@ -926,11 +1006,13 @@ static noinline void __init kernel_init_
 	if (!ramdisk_execute_command)
 		ramdisk_execute_command = "/init";
 
-	if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
+	if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
 		ramdisk_execute_command = NULL;
 		prepare_namespace();
 	}
 
+	grsecurity_init();
+
 	/*
 	 * Ok, we have completed the initial bootup, and
 	 * we're essentially up and running. Get rid of the
diff -ruNp linux-3.13.11/ipc/compat.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/ipc/compat.c
--- linux-3.13.11/ipc/compat.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/ipc/compat.c	2014-07-09 12:00:15.000000000
+0200
@@ -399,7 +399,7 @@ COMPAT_SYSCALL_DEFINE6(ipc, u32, call, i
 			       COMPAT_SHMLBA);
 		if (err < 0)
 			return err;
-		return put_user(raddr, (compat_ulong_t *)compat_ptr(third));
+		return put_user(raddr, (compat_ulong_t __user *)compat_ptr(third));
 	}
 	case SHMDT:
 		return sys_shmdt(compat_ptr(ptr));
diff -ruNp linux-3.13.11/ipc/ipc_sysctl.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/ipc/ipc_sysctl.c
--- linux-3.13.11/ipc/ipc_sysctl.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/ipc/ipc_sysctl.c	2014-07-09 12:00:15.000000000
+0200
@@ -30,7 +30,7 @@ static void *get_ipc(ctl_table *table)
 static int proc_ipc_dointvec(ctl_table *table, int write,
 	void __user *buffer, size_t *lenp, loff_t *ppos)
 {
-	struct ctl_table ipc_table;
+	ctl_table_no_const ipc_table;
 
 	memcpy(&ipc_table, table, sizeof(ipc_table));
 	ipc_table.data = get_ipc(table);
@@ -41,7 +41,7 @@ static int proc_ipc_dointvec(ctl_table *
 static int proc_ipc_dointvec_minmax(ctl_table *table, int write,
 	void __user *buffer, size_t *lenp, loff_t *ppos)
 {
-	struct ctl_table ipc_table;
+	ctl_table_no_const ipc_table;
 
 	memcpy(&ipc_table, table, sizeof(ipc_table));
 	ipc_table.data = get_ipc(table);
@@ -65,7 +65,7 @@ static int proc_ipc_dointvec_minmax_orph
 static int proc_ipc_callback_dointvec_minmax(ctl_table *table, int write,
 	void __user *buffer, size_t *lenp, loff_t *ppos)
 {
-	struct ctl_table ipc_table;
+	ctl_table_no_const ipc_table;
 	size_t lenp_bef = *lenp;
 	int rc;
 
@@ -88,7 +88,7 @@ static int proc_ipc_callback_dointvec_mi
 static int proc_ipc_doulongvec_minmax(ctl_table *table, int write,
 	void __user *buffer, size_t *lenp, loff_t *ppos)
 {
-	struct ctl_table ipc_table;
+	ctl_table_no_const ipc_table;
 	memcpy(&ipc_table, table, sizeof(ipc_table));
 	ipc_table.data = get_ipc(table);
 
@@ -122,7 +122,7 @@ static void ipc_auto_callback(int val)
 static int proc_ipcauto_dointvec_minmax(ctl_table *table, int write,
 	void __user *buffer, size_t *lenp, loff_t *ppos)
 {
-	struct ctl_table ipc_table;
+	ctl_table_no_const ipc_table;
 	size_t lenp_bef = *lenp;
 	int oldval;
 	int rc;
diff -ruNp linux-3.13.11/ipc/mq_sysctl.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/ipc/mq_sysctl.c
--- linux-3.13.11/ipc/mq_sysctl.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/ipc/mq_sysctl.c	2014-07-09 12:00:15.000000000
+0200
@@ -25,7 +25,7 @@ static void *get_mq(ctl_table *table)
 static int proc_mq_dointvec(ctl_table *table, int write,
 			    void __user *buffer, size_t *lenp, loff_t *ppos)
 {
-	struct ctl_table mq_table;
+	ctl_table_no_const mq_table;
 	memcpy(&mq_table, table, sizeof(mq_table));
 	mq_table.data = get_mq(table);
 
@@ -35,7 +35,7 @@ static int proc_mq_dointvec(ctl_table *t
 static int proc_mq_dointvec_minmax(ctl_table *table, int write,
 	void __user *buffer, size_t *lenp, loff_t *ppos)
 {
-	struct ctl_table mq_table;
+	ctl_table_no_const mq_table;
 	memcpy(&mq_table, table, sizeof(mq_table));
 	mq_table.data = get_mq(table);
 
diff -ruNp linux-3.13.11/ipc/mqueue.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/ipc/mqueue.c
--- linux-3.13.11/ipc/mqueue.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/ipc/mqueue.c	2014-07-09 12:00:15.000000000
+0200
@@ -35,6 +35,8 @@
 #include <linux/ipc_namespace.h>
 #include <linux/user_namespace.h>
 #include <linux/slab.h>
+#include <linux/vs_context.h>
+#include <linux/vs_limit.h>
 
 #include <net/sock.h>
 #include "util.h"
@@ -76,6 +78,7 @@ struct mqueue_inode_info {
 	struct pid* notify_owner;
 	struct user_namespace *notify_user_ns;
 	struct user_struct *user;	/* user who created, for accounting */
+	struct vx_info *vxi;
 	struct sock *notify_sock;
 	struct sk_buff *notify_cookie;
 
@@ -234,6 +237,7 @@ static struct inode *mqueue_get_inode(st
 	if (S_ISREG(mode)) {
 		struct mqueue_inode_info *info;
 		unsigned long mq_bytes, mq_treesize;
+		struct vx_info *vxi = current_vx_info();
 
 		inode->i_fop = &mqueue_file_operations;
 		inode->i_size = FILENT_SIZE;
@@ -247,6 +251,7 @@ static struct inode *mqueue_get_inode(st
 		info->notify_user_ns = NULL;
 		info->qsize = 0;
 		info->user = NULL;	/* set when all is ok */
+		info->vxi = NULL;
 		info->msg_tree = RB_ROOT;
 		info->node_cache = NULL;
 		memset(&info->attr, 0, sizeof(info->attr));
@@ -278,19 +283,23 @@ static struct inode *mqueue_get_inode(st
 		mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
 					  info->attr.mq_msgsize);
 
+		gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
 		spin_lock(&mq_lock);
 		if (u->mq_bytes + mq_bytes < u->mq_bytes ||
-		    u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
+		    u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE) ||
+		    !vx_ipcmsg_avail(vxi, mq_bytes)) {
 			spin_unlock(&mq_lock);
 			/* mqueue_evict_inode() releases info->messages */
 			ret = -EMFILE;
 			goto out_inode;
 		}
 		u->mq_bytes += mq_bytes;
+		vx_ipcmsg_add(vxi, u, mq_bytes);
 		spin_unlock(&mq_lock);
 
 		/* all is ok */
 		info->user = get_uid(u);
+		info->vxi = get_vx_info(vxi);
 	} else if (S_ISDIR(mode)) {
 		inc_nlink(inode);
 		/* Some things misbehave if size == 0 on a directory */
@@ -402,8 +411,11 @@ static void mqueue_evict_inode(struct in
 
 	user = info->user;
 	if (user) {
+		struct vx_info *vxi = info->vxi;
+
 		spin_lock(&mq_lock);
 		user->mq_bytes -= mq_bytes;
+		vx_ipcmsg_sub(vxi, user, mq_bytes);
 		/*
 		 * get_ns_from_inode() ensures that the
 		 * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns
@@ -413,6 +425,7 @@ static void mqueue_evict_inode(struct in
 		if (ipc_ns)
 			ipc_ns->mq_queues_count--;
 		spin_unlock(&mq_lock);
+		put_vx_info(vxi);
 		free_uid(user);
 	}
 	if (ipc_ns)
diff -ruNp linux-3.13.11/ipc/msg.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/ipc/msg.c
--- linux-3.13.11/ipc/msg.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/ipc/msg.c	2014-07-09 12:00:15.000000000
+0200
@@ -37,6 +37,7 @@
 #include <linux/rwsem.h>
 #include <linux/nsproxy.h>
 #include <linux/ipc_namespace.h>
+#include <linux/vs_base.h>
 
 #include <asm/current.h>
 #include <asm/uaccess.h>
@@ -194,6 +195,7 @@ static int newque(struct ipc_namespace *
 
 	msq->q_perm.mode = msgflg & S_IRWXUGO;
 	msq->q_perm.key = key;
+	msq->q_perm.xid = vx_current_xid();
 
 	msq->q_perm.security = NULL;
 	retval = security_msg_queue_alloc(msq);
@@ -297,18 +299,19 @@ static inline int msg_security(struct ke
 	return security_msg_queue_associate(msq, msgflg);
 }
 
+static struct ipc_ops msg_ops = {
+	.getnew		= newque,
+	.associate	= msg_security,
+	.more_checks	= NULL
+};
+
 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
 {
 	struct ipc_namespace *ns;
-	struct ipc_ops msg_ops;
 	struct ipc_params msg_params;
 
 	ns = current->nsproxy->ipc_ns;
 
-	msg_ops.getnew = newque;
-	msg_ops.associate = msg_security;
-	msg_ops.more_checks = NULL;
-
 	msg_params.key = key;
 	msg_params.flg = msgflg;
 
diff -ruNp linux-3.13.11/ipc/sem.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/ipc/sem.c
--- linux-3.13.11/ipc/sem.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/ipc/sem.c	2014-07-09 12:00:15.000000000
+0200
@@ -86,6 +86,8 @@
 #include <linux/rwsem.h>
 #include <linux/nsproxy.h>
 #include <linux/ipc_namespace.h>
+#include <linux/vs_base.h>
+#include <linux/vs_limit.h>
 
 #include <asm/uaccess.h>
 #include "util.h"
@@ -500,6 +502,7 @@ static int newary(struct ipc_namespace *
 
 	sma->sem_perm.mode = (semflg & S_IRWXUGO);
 	sma->sem_perm.key = key;
+	sma->sem_perm.xid = vx_current_xid();
 
 	sma->sem_perm.security = NULL;
 	retval = security_sem_alloc(sma);
@@ -514,6 +517,9 @@ static int newary(struct ipc_namespace *
 		return id;
 	}
 	ns->used_sems += nsems;
+	/* FIXME: obsoleted? */
+	vx_semary_inc(sma);
+	vx_nsems_add(sma, nsems);
 
 	sma->sem_base = (struct sem *) &sma[1];
 
@@ -562,10 +568,15 @@ static inline int sem_more_checks(struct
 	return 0;
 }
 
+static struct ipc_ops sem_ops = {
+	.getnew		= newary,
+	.associate	= sem_security,
+	.more_checks	= sem_more_checks
+};
+
 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
 {
 	struct ipc_namespace *ns;
-	struct ipc_ops sem_ops;
 	struct ipc_params sem_params;
 
 	ns = current->nsproxy->ipc_ns;
@@ -573,10 +584,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int,
 	if (nsems < 0 || nsems > ns->sc_semmsl)
 		return -EINVAL;
 
-	sem_ops.getnew = newary;
-	sem_ops.associate = sem_security;
-	sem_ops.more_checks = sem_more_checks;
-
 	sem_params.key = key;
 	sem_params.flg = semflg;
 	sem_params.u.nsems = nsems;
@@ -1103,6 +1110,9 @@ static void freeary(struct ipc_namespace
 
 	wake_up_sem_queue_do(&tasks);
 	ns->used_sems -= sma->sem_nsems;
+	/* FIXME: obsoleted? */
+	vx_nsems_sub(sma, sma->sem_nsems);
+	vx_semary_dec(sma);
 	ipc_rcu_putref(sma, sem_rcu_free);
 }
 
diff -ruNp linux-3.13.11/ipc/shm.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/ipc/shm.c
--- linux-3.13.11/ipc/shm.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/ipc/shm.c	2014-07-09 12:00:15.000000000
+0200
@@ -42,6 +42,8 @@
 #include <linux/nsproxy.h>
 #include <linux/mount.h>
 #include <linux/ipc_namespace.h>
+#include <linux/vs_context.h>
+#include <linux/vs_limit.h>
 
 #include <asm/uaccess.h>
 
@@ -72,6 +74,14 @@ static void shm_destroy (struct ipc_name
 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
 #endif
 
+#ifdef CONFIG_GRKERNSEC
+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
+			   const time_t shm_createtime, const kuid_t cuid,
+			   const int shmid);
+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
+			   const time_t shm_createtime);
+#endif
+
 void shm_init_ns(struct ipc_namespace *ns)
 {
 	ns->shm_ctlmax = SHMMAX;
@@ -209,10 +219,14 @@ static void shm_open(struct vm_area_stru
 static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
 {
 	struct file *shm_file;
+	struct vx_info *vxi = lookup_vx_info(shp->shm_perm.xid);
+	int numpages = (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
 
 	shm_file = shp->shm_file;
 	shp->shm_file = NULL;
-	ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
+	vx_ipcshm_sub(vxi, shp, numpages);
+	ns->shm_tot -= numpages;
+
 	shm_rmid(ns, shp);
 	shm_unlock(shp);
 	if (!is_file_hugepages(shm_file))
@@ -220,6 +234,7 @@ static void shm_destroy(struct ipc_names
 	else if (shp->mlock_user)
 		user_shm_unlock(file_inode(shm_file)->i_size, shp->mlock_user);
 	fput(shm_file);
+	put_vx_info(vxi);
 	ipc_rcu_putref(shp, shm_rcu_free);
 }
 
@@ -497,11 +512,15 @@ static int newseg(struct ipc_namespace *
 	if (ns->shm_tot + numpages > ns->shm_ctlall)
 		return -ENOSPC;
 
+	if (!vx_ipcshm_avail(current_vx_info(), numpages))
+		return -ENOSPC;
+
 	shp = ipc_rcu_alloc(sizeof(*shp));
 	if (!shp)
 		return -ENOMEM;
 
 	shp->shm_perm.key = key;
+	shp->shm_perm.xid = vx_current_xid();
 	shp->shm_perm.mode = (shmflg & S_IRWXUGO);
 	shp->mlock_user = NULL;
 
@@ -554,6 +573,14 @@ static int newseg(struct ipc_namespace *
 	shp->shm_lprid = 0;
 	shp->shm_atim = shp->shm_dtim = 0;
 	shp->shm_ctim = get_seconds();
+#ifdef CONFIG_GRKERNSEC
+	{
+		struct timespec timeval;
+		do_posix_clock_monotonic_gettime(&timeval);
+
+		shp->shm_createtime = timeval.tv_sec;
+	}
+#endif
 	shp->shm_segsz = size;
 	shp->shm_nattch = 0;
 	shp->shm_file = file;
@@ -570,6 +597,7 @@ static int newseg(struct ipc_namespace *
 
 	ipc_unlock_object(&shp->shm_perm);
 	rcu_read_unlock();
+	vx_ipcshm_add(current_vx_info(), key, numpages);
 	return error;
 
 no_id:
@@ -607,18 +635,19 @@ static inline int shm_more_checks(struct
 	return 0;
 }
 
+static struct ipc_ops shm_ops = {
+	.getnew		= newseg,
+	.associate	= shm_security,
+	.more_checks	= shm_more_checks
+};
+
 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
 {
 	struct ipc_namespace *ns;
-	struct ipc_ops shm_ops;
 	struct ipc_params shm_params;
 
 	ns = current->nsproxy->ipc_ns;
 
-	shm_ops.getnew = newseg;
-	shm_ops.associate = shm_security;
-	shm_ops.more_checks = shm_more_checks;
-
 	shm_params.key = key;
 	shm_params.flg = shmflg;
 	shm_params.u.size = size;
@@ -1089,6 +1118,12 @@ long do_shmat(int shmid, char __user *sh
 		f_mode = FMODE_READ | FMODE_WRITE;
 	}
 	if (shmflg & SHM_EXEC) {
+
+#ifdef CONFIG_PAX_MPROTECT
+		if (current->mm->pax_flags & MF_PAX_MPROTECT)
+			goto out;
+#endif
+
 		prot |= PROT_EXEC;
 		acc_mode |= S_IXUGO;
 	}
@@ -1113,6 +1148,15 @@ long do_shmat(int shmid, char __user *sh
 	if (err)
 		goto out_unlock;
 
+#ifdef CONFIG_GRKERNSEC
+	if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
+			     shp->shm_perm.cuid, shmid) ||
+	    !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
+		err = -EACCES;
+		goto out_unlock;
+	}
+#endif
+
 	ipc_lock_object(&shp->shm_perm);
 
 	/* check if shm_destroy() is tearing down shp */
@@ -1125,6 +1169,9 @@ long do_shmat(int shmid, char __user *sh
 	path = shp->shm_file->f_path;
 	path_get(&path);
 	shp->shm_nattch++;
+#ifdef CONFIG_GRKERNSEC
+	shp->shm_lapid = current->pid;
+#endif
 	size = i_size_read(path.dentry->d_inode);
 	ipc_unlock_object(&shp->shm_perm);
 	rcu_read_unlock();
diff -ruNp linux-3.13.11/ipc/util.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/ipc/util.c
--- linux-3.13.11/ipc/util.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/ipc/util.c	2014-07-09 12:00:15.000000000
+0200
@@ -71,6 +71,8 @@ struct ipc_proc_iface {
 	int (*show)(struct seq_file *, void *);
 };
 
+extern int gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int
requested_mode, int granted_mode);
+
 static void ipc_memory_notifier(struct work_struct *work)
 {
 	ipcns_notify(IPCNS_MEMCHANGED);
@@ -558,6 +560,10 @@ int ipcperms(struct ipc_namespace *ns, s
 		granted_mode >>= 6;
 	else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid))
 		granted_mode >>= 3;
+
+	if (!gr_ipc_permitted(ns, ipcp, requested_mode, granted_mode))
+		return -1;
+
 	/* is there some bit set in requested_mode but not in granted_mode? */
 	if ((requested_mode & ~granted_mode & 0007) && 
 	    !ns_capable(ns->user_ns, CAP_IPC_OWNER))
diff -ruNp linux-3.13.11/kernel/Makefile linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/Makefile
--- linux-3.13.11/kernel/Makefile	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/Makefile	2014-07-09 12:00:15.000000000
+0200
@@ -25,6 +25,7 @@ obj-y += printk/
 obj-y += cpu/
 obj-y += irq/
 obj-y += rcu/
+obj-y += vserver/
 
 obj-$(CONFIG_CHECKPOINT_RESTORE) += kcmp.o
 obj-$(CONFIG_FREEZER) += freezer.o
diff -ruNp linux-3.13.11/kernel/acct.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/acct.c
--- linux-3.13.11/kernel/acct.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/acct.c	2014-07-09 12:00:15.000000000
+0200
@@ -556,7 +556,7 @@ static void do_acct_process(struct bsd_a
 	 */
 	flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
 	current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
-	file->f_op->write(file, (char *)&ac,
+	file->f_op->write(file, (char __force_user *)&ac,
 			       sizeof(acct_t), &file->f_pos);
 	current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
 	set_fs(fs);
diff -ruNp linux-3.13.11/kernel/audit.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/audit.c
--- linux-3.13.11/kernel/audit.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/audit.c	2014-07-09 12:00:15.000000000
+0200
@@ -118,7 +118,7 @@ u32		audit_sig_sid = 0;
    3) suppressed due to audit_rate_limit
    4) suppressed due to audit_backlog_limit
 */
-static atomic_t    audit_lost = ATOMIC_INIT(0);
+static atomic_unchecked_t    audit_lost = ATOMIC_INIT(0);
 
 /* The netlink socket. */
 static struct sock *audit_sock;
@@ -251,7 +251,7 @@ void audit_log_lost(const char *message)
 	unsigned long		now;
 	int			print;
 
-	atomic_inc(&audit_lost);
+	atomic_inc_unchecked(&audit_lost);
 
 	print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
 
@@ -270,7 +270,7 @@ void audit_log_lost(const char *message)
 			printk(KERN_WARNING
 				"audit: audit_lost=%d audit_rate_limit=%d "
 				"audit_backlog_limit=%d\n",
-				atomic_read(&audit_lost),
+				atomic_read_unchecked(&audit_lost),
 				audit_rate_limit,
 				audit_backlog_limit);
 		audit_panic(message);
@@ -766,7 +766,7 @@ static int audit_receive_msg(struct sk_b
 		status_set.pid		 = audit_pid;
 		status_set.rate_limit	 = audit_rate_limit;
 		status_set.backlog_limit = audit_backlog_limit;
-		status_set.lost		 = atomic_read(&audit_lost);
+		status_set.lost		 = atomic_read_unchecked(&audit_lost);
 		status_set.backlog	 = skb_queue_len(&audit_skb_queue);
 		audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_GET, 0, 0,
 				 &status_set, sizeof(status_set));
@@ -1359,7 +1359,7 @@ void audit_log_n_hex(struct audit_buffer
 	int i, avail, new_len;
 	unsigned char *ptr;
 	struct sk_buff *skb;
-	static const unsigned char *hex = "0123456789ABCDEF";
+	static const unsigned char hex[] = "0123456789ABCDEF";
 
 	if (!ab)
 		return;
diff -ruNp linux-3.13.11/kernel/auditsc.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/auditsc.c
--- linux-3.13.11/kernel/auditsc.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/auditsc.c	2014-07-09 12:00:15.000000000
+0200
@@ -1945,7 +1945,7 @@ int auditsc_get_stamp(struct audit_conte
 }
 
 /* global counter which is incremented every time something logs in */
-static atomic_t session_id = ATOMIC_INIT(0);
+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
 
 static int audit_set_loginuid_perm(kuid_t loginuid)
 {
@@ -1956,7 +1956,7 @@ static int audit_set_loginuid_perm(kuid_
 	if (is_audit_feature_set(AUDIT_FEATURE_LOGINUID_IMMUTABLE))
 		return -EPERM;
 	/* it is set, you need permission */
-	if (!capable(CAP_AUDIT_CONTROL))
+	if (!vx_capable(CAP_AUDIT_CONTROL, VXC_AUDIT_CONTROL))
 		return -EPERM;
 	/* reject if this is not an unset and we don't allow that */
 	if (is_audit_feature_set(AUDIT_FEATURE_ONLY_UNSET_LOGINUID) && uid_valid(loginuid))
@@ -2011,7 +2011,7 @@ int audit_set_loginuid(kuid_t loginuid)
 
 	/* are we setting or clearing? */
 	if (uid_valid(loginuid))
-		sessionid = atomic_inc_return(&session_id);
+		sessionid = atomic_inc_return_unchecked(&session_id);
 
 	task->sessionid = sessionid;
 	task->loginuid = loginuid;
diff -ruNp linux-3.13.11/kernel/capability.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/capability.c
--- linux-3.13.11/kernel/capability.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/capability.c	2014-07-09 12:00:15.000000000
+0200
@@ -15,6 +15,7 @@
 #include <linux/syscalls.h>
 #include <linux/pid_namespace.h>
 #include <linux/user_namespace.h>
+#include <linux/vs_context.h>
 #include <asm/uaccess.h>
 
 /*
@@ -116,6 +117,7 @@ static int cap_validate_magic(cap_user_h
 	return 0;
 }
 
+
 /*
  * The only thing that can change the capabilities of the current
  * process is the current process. As such, we can't be in this code
@@ -202,6 +204,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_
 		 * before modification is attempted and the application
 		 * fails.
 		 */
+		if (tocopy > ARRAY_SIZE(kdata))
+			return -EFAULT;
+
 		if (copy_to_user(dataptr, kdata, tocopy
 				 * sizeof(struct __user_cap_data_struct))) {
 			return -EFAULT;
@@ -303,10 +308,11 @@ bool has_ns_capability(struct task_struc
 	int ret;
 
 	rcu_read_lock();
-	ret = security_capable(__task_cred(t), ns, cap);
+	ret = security_capable(__task_cred(t), ns, cap) == 0 &&
+		gr_task_is_capable(t, __task_cred(t), cap);
 	rcu_read_unlock();
 
-	return (ret == 0);
+	return ret;
 }
 
 /**
@@ -343,12 +349,14 @@ bool has_ns_capability_noaudit(struct ta
 	int ret;
 
 	rcu_read_lock();
-	ret = security_capable_noaudit(__task_cred(t), ns, cap);
+	ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t,
cap);
 	rcu_read_unlock();
 
-	return (ret == 0);
+	return ret;
 }
 
+#include <linux/vserver/base.h>
+
 /**
  * has_capability_noaudit - Does a task have a capability (unaudited) in the
  * initial user ns
@@ -384,7 +392,7 @@ bool ns_capable(struct user_namespace *n
 		BUG();
 	}
 
-	if (security_capable(current_cred(), ns, cap) == 0) {
+	if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
 		current->flags |= PF_SUPERPRIV;
 		return true;
 	}
@@ -392,6 +400,21 @@ bool ns_capable(struct user_namespace *n
 }
 EXPORT_SYMBOL(ns_capable);
 
+bool ns_capable_nolog(struct user_namespace *ns, int cap)
+{
+	if (unlikely(!cap_valid(cap))) {
+		printk(KERN_CRIT "capable_nolog() called with invalid cap=%u\n", cap);
+		BUG();
+	}
+
+	if (security_capable_noaudit(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap))
{
+		current->flags |= PF_SUPERPRIV;
+		return true;
+	}
+	return false;
+}
+EXPORT_SYMBOL(ns_capable_nolog);
+
 /**
  * file_ns_capable - Determine if the file's opener had a capability in effect
  * @file:  The file we want to check
@@ -432,6 +455,12 @@ bool capable(int cap)
 }
 EXPORT_SYMBOL(capable);
 
+bool capable_nolog(int cap)
+{
+	return ns_capable_nolog(&init_user_ns, cap);
+}
+EXPORT_SYMBOL(capable_nolog);
+
 /**
  * inode_capable - Check superior capability over inode
  * @inode: The inode in question
@@ -453,3 +482,11 @@ bool inode_capable(const struct inode *i
 	return ns_capable(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
 }
 EXPORT_SYMBOL(inode_capable);
+
+bool inode_capable_nolog(const struct inode *inode, int cap)
+{
+	struct user_namespace *ns = current_user_ns();
+
+	return ns_capable_nolog(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
+}
+EXPORT_SYMBOL(inode_capable_nolog);
diff -ruNp linux-3.13.11/kernel/cgroup.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/cgroup.c
--- linux-3.13.11/kernel/cgroup.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/cgroup.c	2014-07-09 12:00:15.000000000
+0200
@@ -5609,7 +5609,7 @@ static int cgroup_css_links_read(struct
 		struct css_set *cset = link->cset;
 		struct task_struct *task;
 		int count = 0;
-		seq_printf(seq, "css_set %p\n", cset);
+		seq_printf(seq, "css_set %pK\n", cset);
 		list_for_each_entry(task, &cset->tasks, cg_list) {
 			if (count++ > MAX_TASKS_SHOWN_PER_CSS) {
 				seq_puts(seq, "  ...\n");
diff -ruNp linux-3.13.11/kernel/compat.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/compat.c
--- linux-3.13.11/kernel/compat.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/compat.c	2014-07-09 12:00:15.000000000
+0200
@@ -13,6 +13,7 @@
 
 #include <linux/linkage.h>
 #include <linux/compat.h>
+#include <linux/module.h>
 #include <linux/errno.h>
 #include <linux/time.h>
 #include <linux/signal.h>
@@ -27,6 +28,7 @@
 #include <linux/times.h>
 #include <linux/ptrace.h>
 #include <linux/gfp.h>
+#include <linux/vs_time.h>
 
 #include <asm/uaccess.h>
 
@@ -220,7 +222,7 @@ static long compat_nanosleep_restart(str
 	mm_segment_t oldfs;
 	long ret;
 
-	restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
+	restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
 	oldfs = get_fs();
 	set_fs(KERNEL_DS);
 	ret = hrtimer_nanosleep_restart(restart);
@@ -252,7 +254,7 @@ asmlinkage long compat_sys_nanosleep(str
 	oldfs = get_fs();
 	set_fs(KERNEL_DS);
 	ret = hrtimer_nanosleep(&tu,
-				rmtp ? (struct timespec __user *)&rmt : NULL,
+				rmtp ? (struct timespec __force_user *)&rmt : NULL,
 				HRTIMER_MODE_REL, CLOCK_MONOTONIC);
 	set_fs(oldfs);
 
@@ -361,7 +363,7 @@ asmlinkage long compat_sys_sigpending(co
 	mm_segment_t old_fs = get_fs();
 
 	set_fs(KERNEL_DS);
-	ret = sys_sigpending((old_sigset_t __user *) &s);
+	ret = sys_sigpending((old_sigset_t __force_user *) &s);
 	set_fs(old_fs);
 	if (ret == 0)
 		ret = put_user(s, set);
@@ -451,7 +453,7 @@ asmlinkage long compat_sys_old_getrlimit
 	mm_segment_t old_fs = get_fs();
 
 	set_fs(KERNEL_DS);
-	ret = sys_old_getrlimit(resource, &r);
+	ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
 	set_fs(old_fs);
 
 	if (!ret) {
@@ -533,8 +535,8 @@ COMPAT_SYSCALL_DEFINE4(wait4,
 		set_fs (KERNEL_DS);
 		ret = sys_wait4(pid,
 				(stat_addr ?
-				 (unsigned int __user *) &status : NULL),
-				options, (struct rusage __user *) &r);
+				 (unsigned int __force_user *) &status : NULL),
+				options, (struct rusage __force_user *) &r);
 		set_fs (old_fs);
 
 		if (ret > 0) {
@@ -560,8 +562,8 @@ COMPAT_SYSCALL_DEFINE5(waitid,
 	memset(&info, 0, sizeof(info));
 
 	set_fs(KERNEL_DS);
-	ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
-			 uru ? (struct rusage __user *)&ru : NULL);
+	ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
+			 uru ? (struct rusage __force_user *)&ru : NULL);
 	set_fs(old_fs);
 
 	if ((ret < 0) || (info.si_signo == 0))
@@ -695,8 +697,8 @@ long compat_sys_timer_settime(timer_t ti
 	oldfs = get_fs();
 	set_fs(KERNEL_DS);
 	err = sys_timer_settime(timer_id, flags,
-				(struct itimerspec __user *) &newts,
-				(struct itimerspec __user *) &oldts);
+				(struct itimerspec __force_user *) &newts,
+				(struct itimerspec __force_user *) &oldts);
 	set_fs(oldfs);
 	if (!err && old && put_compat_itimerspec(old, &oldts))
 		return -EFAULT;
@@ -713,7 +715,7 @@ long compat_sys_timer_gettime(timer_t ti
 	oldfs = get_fs();
 	set_fs(KERNEL_DS);
 	err = sys_timer_gettime(timer_id,
-				(struct itimerspec __user *) &ts);
+				(struct itimerspec __force_user *) &ts);
 	set_fs(oldfs);
 	if (!err && put_compat_itimerspec(setting, &ts))
 		return -EFAULT;
@@ -732,7 +734,7 @@ long compat_sys_clock_settime(clockid_t
 	oldfs = get_fs();
 	set_fs(KERNEL_DS);
 	err = sys_clock_settime(which_clock,
-				(struct timespec __user *) &ts);
+				(struct timespec __force_user *) &ts);
 	set_fs(oldfs);
 	return err;
 }
@@ -747,7 +749,7 @@ long compat_sys_clock_gettime(clockid_t
 	oldfs = get_fs();
 	set_fs(KERNEL_DS);
 	err = sys_clock_gettime(which_clock,
-				(struct timespec __user *) &ts);
+				(struct timespec __force_user *) &ts);
 	set_fs(oldfs);
 	if (!err && put_compat_timespec(&ts, tp))
 		return -EFAULT;
@@ -767,7 +769,7 @@ long compat_sys_clock_adjtime(clockid_t
 
 	oldfs = get_fs();
 	set_fs(KERNEL_DS);
-	ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
+	ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
 	set_fs(oldfs);
 
 	err = compat_put_timex(utp, &txc);
@@ -787,7 +789,7 @@ long compat_sys_clock_getres(clockid_t w
 	oldfs = get_fs();
 	set_fs(KERNEL_DS);
 	err = sys_clock_getres(which_clock,
-			       (struct timespec __user *) &ts);
+			       (struct timespec __force_user *) &ts);
 	set_fs(oldfs);
 	if (!err && tp && put_compat_timespec(&ts, tp))
 		return -EFAULT;
@@ -799,9 +801,9 @@ static long compat_clock_nanosleep_resta
 	long err;
 	mm_segment_t oldfs;
 	struct timespec tu;
-	struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
+	struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
 
-	restart->nanosleep.rmtp = (struct timespec __user *) &tu;
+	restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
 	oldfs = get_fs();
 	set_fs(KERNEL_DS);
 	err = clock_nanosleep_restart(restart);
@@ -833,8 +835,8 @@ long compat_sys_clock_nanosleep(clockid_
 	oldfs = get_fs();
 	set_fs(KERNEL_DS);
 	err = sys_clock_nanosleep(which_clock, flags,
-				  (struct timespec __user *) &in,
-				  (struct timespec __user *) &out);
+				  (struct timespec __force_user *) &in,
+				  (struct timespec __force_user *) &out);
 	set_fs(oldfs);
 
 	if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
@@ -1040,7 +1042,7 @@ asmlinkage long compat_sys_stime(compat_
 	if (err)
 		return err;
 
-	do_settimeofday(&tv);
+	vx_settimeofday(&tv);
 	return 0;
 }
 
@@ -1128,7 +1130,7 @@ COMPAT_SYSCALL_DEFINE2(sched_rr_get_inte
 	mm_segment_t old_fs = get_fs();
 
 	set_fs(KERNEL_DS);
-	ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
+	ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
 	set_fs(old_fs);
 	if (put_compat_timespec(&t, interval))
 		return -EFAULT;
diff -ruNp linux-3.13.11/kernel/configs.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/configs.c
--- linux-3.13.11/kernel/configs.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/configs.c	2014-07-09 12:00:15.000000000
+0200
@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
 	struct proc_dir_entry *entry;
 
 	/* create the current config file */
+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
+	entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
+			    &ikconfig_file_ops);
+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
+	entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
+			    &ikconfig_file_ops);
+#endif
+#else
 	entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
 			    &ikconfig_file_ops);
+#endif
+
 	if (!entry)
 		return -ENOMEM;
 
diff -ruNp linux-3.13.11/kernel/cred.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/cred.c
--- linux-3.13.11/kernel/cred.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/cred.c	2014-07-09 12:00:15.000000000
+0200
@@ -56,31 +56,6 @@ struct cred init_cred = {
 	.group_info		= &init_groups,
 };
 
-static inline void set_cred_subscribers(struct cred *cred, int n)
-{
-#ifdef CONFIG_DEBUG_CREDENTIALS
-	atomic_set(&cred->subscribers, n);
-#endif
-}
-
-static inline int read_cred_subscribers(const struct cred *cred)
-{
-#ifdef CONFIG_DEBUG_CREDENTIALS
-	return atomic_read(&cred->subscribers);
-#else
-	return 0;
-#endif
-}
-
-static inline void alter_cred_subscribers(const struct cred *_cred, int n)
-{
-#ifdef CONFIG_DEBUG_CREDENTIALS
-	struct cred *cred = (struct cred *) _cred;
-
-	atomic_add(n, &cred->subscribers);
-#endif
-}
-
 /*
  * The RCU callback to actually dispose of a set of credentials
  */
@@ -164,6 +139,16 @@ void exit_creds(struct task_struct *tsk)
 	validate_creds(cred);
 	alter_cred_subscribers(cred, -1);
 	put_cred(cred);
+
+#ifdef CONFIG_GRKERNSEC_SETXID
+	cred = (struct cred *) tsk->delayed_cred;
+	if (cred != NULL) {
+		tsk->delayed_cred = NULL;
+		validate_creds(cred);
+		alter_cred_subscribers(cred, -1);
+		put_cred(cred);
+	}
+#endif
 }
 
 /**
@@ -232,21 +217,16 @@ error:
  *
  * Call commit_creds() or abort_creds() to clean up.
  */
-struct cred *prepare_creds(void)
+struct cred *__prepare_creds(const struct cred *old)
 {
-	struct task_struct *task = current;
-	const struct cred *old;
 	struct cred *new;
 
-	validate_process_creds();
-
 	new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
 	if (!new)
 		return NULL;
 
 	kdebug("prepare_creds() alloc %p", new);
 
-	old = task->cred;
 	memcpy(new, old, sizeof(struct cred));
 
 	atomic_set(&new->usage, 1);
@@ -275,6 +255,13 @@ error:
 	abort_creds(new);
 	return NULL;
 }
+
+struct cred *prepare_creds(void)
+{
+	validate_process_creds();
+
+	return __prepare_creds(current->cred);
+}
 EXPORT_SYMBOL(prepare_creds);
 
 /*
@@ -411,7 +398,7 @@ static bool cred_cap_issubset(const stru
  * Always returns 0 thus allowing this function to be tail-called at the end
  * of, say, sys_setgid().
  */
-int commit_creds(struct cred *new)
+static int __commit_creds(struct cred *new)
 {
 	struct task_struct *task = current;
 	const struct cred *old = task->real_cred;
@@ -430,6 +417,8 @@ int commit_creds(struct cred *new)
 
 	get_cred(new); /* we will require a ref for the subj creds too */
 
+	gr_set_role_label(task, new->uid, new->gid);
+
 	/* dumpability changes */
 	if (!uid_eq(old->euid, new->euid) ||
 	    !gid_eq(old->egid, new->egid) ||
@@ -479,6 +468,102 @@ int commit_creds(struct cred *new)
 	put_cred(old);
 	return 0;
 }
+#ifdef CONFIG_GRKERNSEC_SETXID
+extern int set_user(struct cred *new);
+
+void gr_delayed_cred_worker(void)
+{
+	const struct cred *new = current->delayed_cred;
+	struct cred *ncred;
+
+	current->delayed_cred = NULL;
+
+	if (!uid_eq(current_uid(), GLOBAL_ROOT_UID) && new != NULL) {
+		// from doing get_cred on it when queueing this
+		put_cred(new);
+		return;
+	} else if (new == NULL)
+		return;
+
+	ncred = prepare_creds();
+	if (!ncred)
+		goto die;
+	// uids
+	ncred->uid = new->uid;
+	ncred->euid = new->euid;
+	ncred->suid = new->suid;
+	ncred->fsuid = new->fsuid;
+	// gids
+	ncred->gid = new->gid;
+	ncred->egid = new->egid;
+	ncred->sgid = new->sgid;
+	ncred->fsgid = new->fsgid;
+	// groups
+	if (set_groups(ncred, new->group_info) < 0) {
+		abort_creds(ncred);
+		goto die;
+	}
+	// caps
+	ncred->securebits = new->securebits;
+	ncred->cap_inheritable = new->cap_inheritable;
+	ncred->cap_permitted = new->cap_permitted;
+	ncred->cap_effective = new->cap_effective;
+	ncred->cap_bset = new->cap_bset;
+
+	if (set_user(ncred)) {
+		abort_creds(ncred);
+		goto die;
+	}
+
+	// from doing get_cred on it when queueing this
+	put_cred(new);
+
+	__commit_creds(ncred);
+	return;
+die:
+	// from doing get_cred on it when queueing this
+	put_cred(new);
+	do_group_exit(SIGKILL);
+}
+#endif
+
+int commit_creds(struct cred *new)
+{
+#ifdef CONFIG_GRKERNSEC_SETXID
+	int ret;
+	int schedule_it = 0;
+	struct task_struct *t;
+
+	/* we won't get called with tasklist_lock held for writing
+	   and interrupts disabled as the cred struct in that case is
+	   init_cred
+	*/
+	if (grsec_enable_setxid && !current_is_single_threaded() &&
+	    uid_eq(current_uid(), GLOBAL_ROOT_UID) &&
+	    !uid_eq(new->uid, GLOBAL_ROOT_UID)) {
+		schedule_it = 1;
+	}
+	ret = __commit_creds(new);
+	if (schedule_it) {
+		rcu_read_lock();
+		read_lock(&tasklist_lock);
+		for (t = next_thread(current); t != current;
+		     t = next_thread(t)) {
+			if (t->delayed_cred == NULL) {
+				t->delayed_cred = get_cred(new);
+				set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
+				set_tsk_need_resched(t);
+			}
+		}
+		read_unlock(&tasklist_lock);
+		rcu_read_unlock();
+	}
+	return ret;
+#else
+	return __commit_creds(new);
+#endif
+}
+
 EXPORT_SYMBOL(commit_creds);
 
 /**
diff -ruNp linux-3.13.11/kernel/debug/debug_core.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/debug/debug_core.c
--- linux-3.13.11/kernel/debug/debug_core.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/debug/debug_core.c	2014-07-09
12:00:15.000000000 +0200
@@ -123,7 +123,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_loc
  */
 static atomic_t			masters_in_kgdb;
 static atomic_t			slaves_in_kgdb;
-static atomic_t			kgdb_break_tasklet_var;
+static atomic_unchecked_t	kgdb_break_tasklet_var;
 atomic_t			kgdb_setting_breakpoint;
 
 struct task_struct		*kgdb_usethread;
@@ -133,7 +133,7 @@ int				kgdb_single_step;
 static pid_t			kgdb_sstep_pid;
 
 /* to keep track of the CPU which is doing the single stepping*/
-atomic_t			kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
+atomic_unchecked_t		kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
 
 /*
  * If you are debugging a problem where roundup (the collection of
@@ -541,7 +541,7 @@ return_normal:
 	 * kernel will only try for the value of sstep_tries before
 	 * giving up and continuing on.
 	 */
-	if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
+	if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
 	    (kgdb_info[cpu].task &&
 	     kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
 		atomic_set(&kgdb_active, -1);
@@ -639,8 +639,8 @@ cpu_master_loop:
 	}
 
 kgdb_restore:
-	if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
-		int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
+	if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
+		int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
 		if (kgdb_info[sstep_cpu].task)
 			kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
 		else
@@ -916,18 +916,18 @@ static void kgdb_unregister_callbacks(vo
 static void kgdb_tasklet_bpt(unsigned long ing)
 {
 	kgdb_breakpoint();
-	atomic_set(&kgdb_break_tasklet_var, 0);
+	atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
 }
 
 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
 
 void kgdb_schedule_breakpoint(void)
 {
-	if (atomic_read(&kgdb_break_tasklet_var) ||
+	if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
 		atomic_read(&kgdb_active) != -1 ||
 		atomic_read(&kgdb_setting_breakpoint))
 		return;
-	atomic_inc(&kgdb_break_tasklet_var);
+	atomic_inc_unchecked(&kgdb_break_tasklet_var);
 	tasklet_schedule(&kgdb_tasklet_breakpoint);
 }
 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
diff -ruNp linux-3.13.11/kernel/debug/kdb/kdb_main.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/debug/kdb/kdb_main.c
--- linux-3.13.11/kernel/debug/kdb/kdb_main.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/debug/kdb/kdb_main.c	2014-07-09
12:00:15.000000000 +0200
@@ -1977,7 +1977,7 @@ static int kdb_lsmod(int argc, const cha
 			continue;
 
 		kdb_printf("%-20s%8u  0x%p ", mod->name,
-			   mod->core_size, (void *)mod);
+			   mod->core_size_rx + mod->core_size_rw, (void *)mod);
 #ifdef CONFIG_MODULE_UNLOAD
 		kdb_printf("%4ld ", module_refcount(mod));
 #endif
@@ -1987,7 +1987,7 @@ static int kdb_lsmod(int argc, const cha
 			kdb_printf(" (Loading)");
 		else
 			kdb_printf(" (Live)");
-		kdb_printf(" 0x%p", mod->module_core);
+		kdb_printf(" 0x%p 0x%p", mod->module_core_rx,  mod->module_core_rw);
 
 #ifdef CONFIG_MODULE_UNLOAD
 		{
diff -ruNp linux-3.13.11/kernel/events/core.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/events/core.c
--- linux-3.13.11/kernel/events/core.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/events/core.c	2014-07-09 12:00:15.000000000
+0200
@@ -157,8 +157,15 @@ static struct srcu_struct pmus_srcu;
  *   0 - disallow raw tracepoint access for unpriv
  *   1 - disallow cpu events for unpriv
  *   2 - disallow kernel profiling for unpriv
+ *   3 - disallow all unpriv perf event use
  */
-int sysctl_perf_event_paranoid __read_mostly = 1;
+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
+int sysctl_perf_event_legitimately_concerned __read_mostly = 3;
+#elif defined(CONFIG_GRKERNSEC_HIDESYM)
+int sysctl_perf_event_legitimately_concerned __read_mostly = 2;
+#else
+int sysctl_perf_event_legitimately_concerned __read_mostly = 1;
+#endif
 
 /* Minimum for 512 kiB + 1 user control page */
 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB
per user */
@@ -184,7 +191,7 @@ void update_perf_cpu_limits(void)
 
 	tmp *= sysctl_perf_cpu_time_max_percent;
 	do_div(tmp, 100);
-	ACCESS_ONCE(perf_sample_allowed_ns) = tmp;
+	ACCESS_ONCE_RW(perf_sample_allowed_ns) = tmp;
 }
 
 static int perf_rotate_context(struct perf_cpu_context *cpuctx);
@@ -271,7 +278,7 @@ void perf_sample_event_took(u64 sample_l
 	update_perf_cpu_limits();
 }
 
-static atomic64_t perf_event_id;
+static atomic64_unchecked_t perf_event_id;
 
 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
 			      enum event_type_t event_type);
@@ -2985,7 +2992,7 @@ static void __perf_event_read(void *info
 
 static inline u64 perf_event_count(struct perf_event *event)
 {
-	return local64_read(&event->count) + atomic64_read(&event->child_count);
+	return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
 }
 
 static u64 perf_event_read(struct perf_event *event)
@@ -3353,9 +3360,9 @@ u64 perf_event_read_value(struct perf_ev
 	mutex_lock(&event->child_mutex);
 	total += perf_event_read(event);
 	*enabled += event->total_time_enabled +
-			atomic64_read(&event->child_total_time_enabled);
+			atomic64_read_unchecked(&event->child_total_time_enabled);
 	*running += event->total_time_running +
-			atomic64_read(&event->child_total_time_running);
+			atomic64_read_unchecked(&event->child_total_time_running);
 
 	list_for_each_entry(child, &event->child_list, child_list) {
 		total += perf_event_read(child);
@@ -3770,10 +3777,10 @@ void perf_event_update_userpage(struct p
 		userpg->offset -= local64_read(&event->hw.prev_count);
 
 	userpg->time_enabled = enabled +
-			atomic64_read(&event->child_total_time_enabled);
+			atomic64_read_unchecked(&event->child_total_time_enabled);
 
 	userpg->time_running = running +
-			atomic64_read(&event->child_total_time_running);
+			atomic64_read_unchecked(&event->child_total_time_running);
 
 	arch_perf_update_userpage(userpg, now);
 
@@ -4324,7 +4331,7 @@ perf_output_sample_ustack(struct perf_ou
 
 		/* Data. */
 		sp = perf_user_stack_pointer(regs);
-		rem = __output_copy_user(handle, (void *) sp, dump_size);
+		rem = __output_copy_user(handle, (void __user *) sp, dump_size);
 		dyn_size = dump_size - rem;
 
 		perf_output_skip(handle, rem);
@@ -4415,11 +4422,11 @@ static void perf_output_read_one(struct
 	values[n++] = perf_event_count(event);
 	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
 		values[n++] = enabled +
-			atomic64_read(&event->child_total_time_enabled);
+			atomic64_read_unchecked(&event->child_total_time_enabled);
 	}
 	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
 		values[n++] = running +
-			atomic64_read(&event->child_total_time_running);
+			atomic64_read_unchecked(&event->child_total_time_running);
 	}
 	if (read_format & PERF_FORMAT_ID)
 		values[n++] = primary_event_id(event);
@@ -6686,7 +6693,7 @@ perf_event_alloc(struct perf_event_attr
 	event->parent		= parent_event;
 
 	event->ns		= get_pid_ns(task_active_pid_ns(current));
-	event->id		= atomic64_inc_return(&perf_event_id);
+	event->id		= atomic64_inc_return_unchecked(&perf_event_id);
 
 	event->state		= PERF_EVENT_STATE_INACTIVE;
 
@@ -6985,6 +6992,11 @@ SYSCALL_DEFINE5(perf_event_open,
 	if (flags & ~PERF_FLAG_ALL)
 		return -EINVAL;
 
+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
+	if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN))
+		return -EACCES;
+#endif
+
 	err = perf_copy_attr(attr_uptr, &attr);
 	if (err)
 		return err;
@@ -7316,10 +7328,10 @@ static void sync_child_event(struct perf
 	/*
 	 * Add back the child's count to the parent's count:
 	 */
-	atomic64_add(child_val, &parent_event->child_count);
-	atomic64_add(child_event->total_time_enabled,
+	atomic64_add_unchecked(child_val, &parent_event->child_count);
+	atomic64_add_unchecked(child_event->total_time_enabled,
 		     &parent_event->child_total_time_enabled);
-	atomic64_add(child_event->total_time_running,
+	atomic64_add_unchecked(child_event->total_time_running,
 		     &parent_event->child_total_time_running);
 
 	/*
diff -ruNp linux-3.13.11/kernel/events/internal.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/events/internal.h
--- linux-3.13.11/kernel/events/internal.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/events/internal.h	2014-07-09
12:00:15.000000000 +0200
@@ -81,10 +81,10 @@ static inline unsigned long perf_data_si
 	return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
 }
 
-#define DEFINE_OUTPUT_COPY(func_name, memcpy_func)			\
+#define DEFINE_OUTPUT_COPY(func_name, memcpy_func, user)		\
 static inline unsigned long						\
 func_name(struct perf_output_handle *handle,				\
-	  const void *buf, unsigned long len)				\
+	  const void user *buf, unsigned long len)			\
 {									\
 	unsigned long size, written;					\
 									\
@@ -117,7 +117,7 @@ memcpy_common(void *dst, const void *src
 	return 0;
 }
 
-DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
+DEFINE_OUTPUT_COPY(__output_copy, memcpy_common, )
 
 static inline unsigned long
 memcpy_skip(void *dst, const void *src, unsigned long n)
@@ -125,7 +125,7 @@ memcpy_skip(void *dst, const void *src,
 	return 0;
 }
 
-DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
+DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip, )
 
 #ifndef arch_perf_out_copy_user
 #define arch_perf_out_copy_user arch_perf_out_copy_user
@@ -143,7 +143,7 @@ arch_perf_out_copy_user(void *dst, const
 }
 #endif
 
-DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
+DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user, __user)
 
 /* Callchain handling */
 extern struct perf_callchain_entry *
diff -ruNp linux-3.13.11/kernel/events/uprobes.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/events/uprobes.c
--- linux-3.13.11/kernel/events/uprobes.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/events/uprobes.c	2014-07-09
12:00:15.000000000 +0200
@@ -1640,7 +1640,7 @@ static int is_trap_at_addr(struct mm_str
 {
 	struct page *page;
 	uprobe_opcode_t opcode;
-	int result;
+	long result;
 
 	pagefault_disable();
 	result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr,
diff -ruNp linux-3.13.11/kernel/exit.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/exit.c
--- linux-3.13.11/kernel/exit.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/exit.c	2014-07-09 12:00:15.000000000
+0200
@@ -48,6 +48,10 @@
 #include <linux/fs_struct.h>
 #include <linux/init_task.h>
 #include <linux/perf_event.h>
+#include <linux/vs_limit.h>
+#include <linux/vs_context.h>
+#include <linux/vs_network.h>
+#include <linux/vs_pid.h>
 #include <trace/events/sched.h>
 #include <linux/hw_breakpoint.h>
 #include <linux/oom.h>
@@ -172,6 +176,10 @@ void release_task(struct task_struct * p
 	struct task_struct *leader;
 	int zap_leader;
 repeat:
+#ifdef CONFIG_NET
+	gr_del_task_from_ip_table(p);
+#endif
+
 	/* don't need to get the RCU readlock here - the process is dead and
 	 * can't be modifying its own credentials. But shut RCU-lockdep up */
 	rcu_read_lock();
@@ -329,7 +337,7 @@ int allow_signal(int sig)
 	 * know it'll be handled, so that they don't get converted to
 	 * SIGKILL or just silently dropped.
 	 */
-	current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
+	current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
 	recalc_sigpending();
 	spin_unlock_irq(&current->sighand->siglock);
 	return 0;
@@ -503,15 +511,25 @@ static struct task_struct *find_new_reap
 	__acquires(&tasklist_lock)
 {
 	struct pid_namespace *pid_ns = task_active_pid_ns(father);
-	struct task_struct *thread;
+	struct vx_info *vxi = task_get_vx_info(father);
+	struct task_struct *thread = father;
+	struct task_struct *reaper;
 
-	thread = father;
 	while_each_thread(father, thread) {
 		if (thread->flags & PF_EXITING)
 			continue;
 		if (unlikely(pid_ns->child_reaper == father))
 			pid_ns->child_reaper = thread;
-		return thread;
+		reaper = thread;
+		goto out_put;
+	}
+
+	reaper = pid_ns->child_reaper;
+	if (vxi) {
+		BUG_ON(!vxi->vx_reaper);
+		if (vxi->vx_reaper != init_pid_ns.child_reaper &&
+		    vxi->vx_reaper != father)
+			reaper = vxi->vx_reaper;
 	}
 
 	if (unlikely(pid_ns->child_reaper == father)) {
@@ -549,7 +567,9 @@ static struct task_struct *find_new_reap
 		}
 	}
 
-	return pid_ns->child_reaper;
+out_put:
+	put_vx_info(vxi);
+	return reaper;
 }
 
 /*
@@ -607,10 +627,15 @@ static void forget_original_parent(struc
 	list_for_each_entry_safe(p, n, &father->children, sibling) {
 		struct task_struct *t = p;
 		do {
-			t->real_parent = reaper;
+			struct task_struct *new_parent = reaper;
+
+			if (unlikely(p == reaper))
+				new_parent = task_active_pid_ns(p)->child_reaper;
+
+			t->real_parent = new_parent;
 			if (t->parent == father) {
 				BUG_ON(t->ptrace);
-				t->parent = t->real_parent;
+				t->parent = new_parent;
 			}
 			if (t->pdeath_signal)
 				group_send_sig_info(t->pdeath_signal,
@@ -705,6 +730,8 @@ void do_exit(long code)
 	struct task_struct *tsk = current;
 	int group_dead;
 
+	set_fs(USER_DS);
+
 	profile_task_exit(tsk);
 
 	WARN_ON(blk_needs_flush_plug(tsk));
@@ -721,7 +748,6 @@ void do_exit(long code)
 	 * mm_release()->clear_child_tid() from writing to a user-controlled
 	 * kernel address.
 	 */
-	set_fs(USER_DS);
 
 	ptrace_event(PTRACE_EVENT_EXIT, code);
 
@@ -780,6 +806,9 @@ void do_exit(long code)
 	tsk->exit_code = code;
 	taskstats_exit(tsk, group_dead);
 
+	gr_acl_handle_psacct(tsk, code);
+	gr_acl_handle_exit();
+
 	exit_mm(tsk);
 
 	if (group_dead)
@@ -815,6 +844,9 @@ void do_exit(long code)
 	 */
 	flush_ptrace_hw_breakpoint(tsk);
 
+	/* needs to stay before exit_notify() */
+	exit_vx_info_early(tsk, code);
+
 	exit_notify(tsk, group_dead);
 #ifdef CONFIG_NUMA
 	task_lock(tsk);
@@ -868,10 +900,15 @@ void do_exit(long code)
 	smp_mb();
 	raw_spin_unlock_wait(&tsk->pi_lock);
 
+	/* needs to stay after exit_notify() */
+	exit_vx_info(tsk, code);
+	exit_nx_info(tsk);
+
 	/* causes final put_task_struct in finish_task_switch(). */
 	tsk->state = TASK_DEAD;
 	tsk->flags |= PF_NOFREEZE;	/* tell freezer to ignore us */
 	schedule();
+	printk("bad task: %p [%lx]\n", current, current->state);
 	BUG();
 	/* Avoid "noreturn function does return".  */
 	for (;;)
@@ -899,7 +936,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
  * Take down every thread in the group.  This is called by fatal signals
  * as well as by sys_exit_group (below).
  */
-void
+__noreturn void
 do_group_exit(int exit_code)
 {
 	struct signal_struct *sig = current->signal;
diff -ruNp linux-3.13.11/kernel/fork.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/fork.c
--- linux-3.13.11/kernel/fork.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/fork.c	2014-07-09 12:00:15.000000000
+0200
@@ -71,6 +71,9 @@
 #include <linux/signalfd.h>
 #include <linux/uprobes.h>
 #include <linux/aio.h>
+#include <linux/vs_context.h>
+#include <linux/vs_network.h>
+#include <linux/vs_limit.h>
 
 #include <asm/pgtable.h>
 #include <asm/pgalloc.h>
@@ -211,6 +214,8 @@ void free_task(struct task_struct *tsk)
 	arch_release_thread_info(tsk->stack);
 	free_thread_info(tsk->stack);
 	rt_mutex_debug_task_free(tsk);
+	clr_vx_info(&tsk->vx_info);
+	clr_nx_info(&tsk->nx_info);
 	ftrace_graph_exit_task(tsk);
 	put_seccomp_filter(tsk);
 	arch_release_task_struct(tsk);
@@ -319,7 +324,7 @@ static struct task_struct *dup_task_stru
 	*stackend = STACK_END_MAGIC;	/* for overflow detection */
 
 #ifdef CONFIG_CC_STACKPROTECTOR
-	tsk->stack_canary = get_random_int();
+	tsk->stack_canary = pax_get_random_long();
 #endif
 
 	/*
@@ -345,12 +350,80 @@ free_tsk:
 }
 
 #ifdef CONFIG_MMU
-static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm,
struct vm_area_struct *mpnt)
+{
+	struct vm_area_struct *tmp;
+	unsigned long charge;
+	struct file *file;
+	int retval;
+
+	charge = 0;
+	if (mpnt->vm_flags & VM_ACCOUNT) {
+		unsigned long len = vma_pages(mpnt);
+
+		if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
+			goto fail_nomem;
+		charge = len;
+	}
+	tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
+	if (!tmp)
+		goto fail_nomem;
+	*tmp = *mpnt;
+	tmp->vm_mm = mm;
+	INIT_LIST_HEAD(&tmp->anon_vma_chain);
+	retval = vma_dup_policy(mpnt, tmp);
+	if (retval)
+		goto fail_nomem_policy;
+	if (anon_vma_fork(tmp, mpnt))
+		goto fail_nomem_anon_vma_fork;
+	tmp->vm_flags &= ~VM_LOCKED;
+	tmp->vm_next = tmp->vm_prev = NULL;
+	tmp->vm_mirror = NULL;
+	file = tmp->vm_file;
+	if (file) {
+		struct inode *inode = file_inode(file);
+		struct address_space *mapping = file->f_mapping;
+
+		get_file(file);
+		if (tmp->vm_flags & VM_DENYWRITE)
+			atomic_dec(&inode->i_writecount);
+		mutex_lock(&mapping->i_mmap_mutex);
+		if (tmp->vm_flags & VM_SHARED)
+			mapping->i_mmap_writable++;
+		flush_dcache_mmap_lock(mapping);
+		/* insert tmp into the share list, just after mpnt */
+		if (unlikely(tmp->vm_flags & VM_NONLINEAR))
+			vma_nonlinear_insert(tmp, &mapping->i_mmap_nonlinear);
+		else
+			vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap);
+		flush_dcache_mmap_unlock(mapping);
+		mutex_unlock(&mapping->i_mmap_mutex);
+	}
+
+	/*
+	 * Clear hugetlb-related page reserves for children. This only
+	 * affects MAP_PRIVATE mappings. Faults generated by the child
+	 * are not guaranteed to succeed, even if read-only
+	 */
+	if (is_vm_hugetlb_page(tmp))
+		reset_vma_resv_huge_pages(tmp);
+
+	return tmp;
+
+fail_nomem_anon_vma_fork:
+	mpol_put(vma_policy(tmp));
+fail_nomem_policy:
+	kmem_cache_free(vm_area_cachep, tmp);
+fail_nomem:
+	vm_unacct_memory(charge);
+	return NULL;
+}
+
+static __latent_entropy int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
 {
 	struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
 	struct rb_node **rb_link, *rb_parent;
 	int retval;
-	unsigned long charge;
 
 	uprobe_start_dup_mmap();
 	down_write(&oldmm->mmap_sem);
@@ -379,55 +452,15 @@ static int dup_mmap(struct mm_struct *mm
 
 	prev = NULL;
 	for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
-		struct file *file;
-
 		if (mpnt->vm_flags & VM_DONTCOPY) {
 			vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
 							-vma_pages(mpnt));
 			continue;
 		}
-		charge = 0;
-		if (mpnt->vm_flags & VM_ACCOUNT) {
-			unsigned long len = vma_pages(mpnt);
-
-			if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
-				goto fail_nomem;
-			charge = len;
-		}
-		tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
-		if (!tmp)
-			goto fail_nomem;
-		*tmp = *mpnt;
-		INIT_LIST_HEAD(&tmp->anon_vma_chain);
-		retval = vma_dup_policy(mpnt, tmp);
-		if (retval)
-			goto fail_nomem_policy;
-		tmp->vm_mm = mm;
-		if (anon_vma_fork(tmp, mpnt))
-			goto fail_nomem_anon_vma_fork;
-		tmp->vm_flags &= ~VM_LOCKED;
-		tmp->vm_next = tmp->vm_prev = NULL;
-		file = tmp->vm_file;
-		if (file) {
-			struct inode *inode = file_inode(file);
-			struct address_space *mapping = file->f_mapping;
-
-			get_file(file);
-			if (tmp->vm_flags & VM_DENYWRITE)
-				atomic_dec(&inode->i_writecount);
-			mutex_lock(&mapping->i_mmap_mutex);
-			if (tmp->vm_flags & VM_SHARED)
-				mapping->i_mmap_writable++;
-			flush_dcache_mmap_lock(mapping);
-			/* insert tmp into the share list, just after mpnt */
-			if (unlikely(tmp->vm_flags & VM_NONLINEAR))
-				vma_nonlinear_insert(tmp,
-						&mapping->i_mmap_nonlinear);
-			else
-				vma_interval_tree_insert_after(tmp, mpnt,
-							&mapping->i_mmap);
-			flush_dcache_mmap_unlock(mapping);
-			mutex_unlock(&mapping->i_mmap_mutex);
+		tmp = dup_vma(mm, oldmm, mpnt);
+		if (!tmp) {
+			retval = -ENOMEM;
+			goto out;
 		}
 
 		/*
@@ -459,6 +492,31 @@ static int dup_mmap(struct mm_struct *mm
 		if (retval)
 			goto out;
 	}
+
+#ifdef CONFIG_PAX_SEGMEXEC
+	if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
+		struct vm_area_struct *mpnt_m;
+
+		for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m =
mpnt_m->vm_next) {
+			BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm !=
mm);
+
+			if (!mpnt->vm_mirror)
+				continue;
+
+			if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
+				BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
+				mpnt->vm_mirror = mpnt_m;
+			} else {
+				BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm
!= mm);
+				mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
+				mpnt_m->vm_mirror->vm_mirror = mpnt_m;
+				mpnt->vm_mirror->vm_mirror = mpnt;
+			}
+		}
+		BUG_ON(mpnt_m);
+	}
+#endif
+
 	/* a new mm has just been created */
 	arch_dup_mmap(oldmm, mm);
 	retval = 0;
@@ -468,14 +526,6 @@ out:
 	up_write(&oldmm->mmap_sem);
 	uprobe_end_dup_mmap();
 	return retval;
-fail_nomem_anon_vma_fork:
-	mpol_put(vma_policy(tmp));
-fail_nomem_policy:
-	kmem_cache_free(vm_area_cachep, tmp);
-fail_nomem:
-	retval = -ENOMEM;
-	vm_unacct_memory(charge);
-	goto out;
 }
 
 static inline int mm_alloc_pgd(struct mm_struct *mm)
@@ -542,6 +592,7 @@ static struct mm_struct *mm_init(struct
 	if (likely(!mm_alloc_pgd(mm))) {
 		mm->def_flags = 0;
 		mmu_notifier_mm_init(mm);
+		set_vx_info(&mm->mm_vx_info, p->vx_info);
 		return mm;
 	}
 
@@ -594,6 +645,7 @@ void __mmdrop(struct mm_struct *mm)
 	destroy_context(mm);
 	mmu_notifier_mm_destroy(mm);
 	check_mm(mm);
+	clr_vx_info(&mm->mm_vx_info);
 	free_mm(mm);
 }
 EXPORT_SYMBOL_GPL(__mmdrop);
@@ -689,8 +741,8 @@ struct mm_struct *mm_access(struct task_
 		return ERR_PTR(err);
 
 	mm = get_task_mm(task);
-	if (mm && mm != current->mm &&
-			!ptrace_may_access(task, mode)) {
+	if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
+		  (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task)))))
{
 		mmput(mm);
 		mm = ERR_PTR(-EACCES);
 	}
@@ -813,6 +865,7 @@ struct mm_struct *dup_mm(struct task_str
 		goto fail_nomem;
 
 	memcpy(mm, oldmm, sizeof(*mm));
+	mm->mm_vx_info = NULL;
 	mm_init_cpumask(mm);
 
 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
@@ -851,6 +904,7 @@ fail_nocontext:
 	 * If init_new_context() failed, we cannot use mmput() to free the mm
 	 * because it calls destroy_context()
 	 */
+	clr_vx_info(&mm->mm_vx_info);
 	mm_free_pgd(mm);
 	free_mm(mm);
 	return NULL;
@@ -909,13 +963,20 @@ static int copy_fs(unsigned long clone_f
 			spin_unlock(&fs->lock);
 			return -EAGAIN;
 		}
-		fs->users++;
+		atomic_inc(&fs->users);
 		spin_unlock(&fs->lock);
 		return 0;
 	}
 	tsk->fs = copy_fs_struct(fs);
 	if (!tsk->fs)
 		return -ENOMEM;
+	/* Carry through gr_chroot_dentry and is_chrooted instead
+	   of recomputing it here.  Already copied when the task struct
+	   is duplicated.  This allows pivot_root to not be treated as
+	   a chroot
+	*/
+	//gr_set_chroot_entries(tsk, &tsk->fs->root);
+
 	return 0;
 }
 
@@ -1126,7 +1187,7 @@ init_task_pid(struct task_struct *task,
  * parts of the process environment (as per the clone
  * flags). The actual kick-off is left to the caller.
  */
-static struct task_struct *copy_process(unsigned long clone_flags,
+static __latent_entropy struct task_struct *copy_process(unsigned long clone_flags,
 					unsigned long stack_start,
 					unsigned long stack_size,
 					int __user *child_tidptr,
@@ -1135,6 +1196,8 @@ static struct task_struct *copy_process(
 {
 	int retval;
 	struct task_struct *p;
+	struct vx_info *vxi;
+	struct nx_info *nxi;
 
 	if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
 		return ERR_PTR(-EINVAL);
@@ -1197,7 +1260,16 @@ static struct task_struct *copy_process(
 	DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
 	DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
 #endif
+	init_vx_info(&p->vx_info, current_vx_info());
+	init_nx_info(&p->nx_info, current_nx_info());
+
 	retval = -EAGAIN;
+
+	if (!vx_nproc_avail(1))
+		goto bad_fork_free;
+
+	gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
+
 	if (atomic_read(&p->real_cred->user->processes) >=
 			task_rlimit(p, RLIMIT_NPROC)) {
 		if (p->real_cred->user != INIT_USER &&
@@ -1446,6 +1518,11 @@ static struct task_struct *copy_process(
 		goto bad_fork_free_pid;
 	}
 
+	/* synchronizes with gr_set_acls()
+	   we need to call this past the point of no return for fork()
+	*/
+	gr_copy_label(p);
+
 	if (likely(p->pid)) {
 		ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
 
@@ -1479,6 +1556,18 @@ static struct task_struct *copy_process(
 
 	total_forks++;
 	spin_unlock(&current->sighand->siglock);
+
+	/* p is copy of current */
+	vxi = p->vx_info;
+	if (vxi) {
+		claim_vx_info(vxi, p);
+		atomic_inc(&vxi->cvirt.nr_threads);
+		atomic_inc(&vxi->cvirt.total_forks);
+		vx_nproc_inc(p);
+	}
+	nxi = p->nx_info;
+	if (nxi)
+		claim_nx_info(nxi, p);
 	write_unlock_irq(&tasklist_lock);
 	proc_fork_connector(p);
 	cgroup_post_fork(p);
@@ -1532,6 +1621,8 @@ bad_fork_cleanup_count:
 bad_fork_free:
 	free_task(p);
 fork_out:
+	gr_log_forkfail(retval);
+
 	return ERR_PTR(retval);
 }
 
@@ -1593,6 +1684,7 @@ long do_fork(unsigned long clone_flags,
 
 	p = copy_process(clone_flags, stack_start, stack_size,
 			 child_tidptr, NULL, trace);
+	add_latent_entropy();
 	/*
 	 * Do this prior waking up the new thread - the thread pointer
 	 * might get invalid after that point, if the thread exits quickly.
@@ -1607,6 +1699,8 @@ long do_fork(unsigned long clone_flags,
 		if (clone_flags & CLONE_PARENT_SETTID)
 			put_user(nr, parent_tidptr);
 
+		gr_handle_brute_check();
+
 		if (clone_flags & CLONE_VFORK) {
 			p->vfork_done = &vfork;
 			init_completion(&vfork);
@@ -1723,7 +1817,7 @@ void __init proc_caches_init(void)
 	mm_cachep = kmem_cache_create("mm_struct",
 			sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
 			SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
-	vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
+	vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC | SLAB_NO_SANITIZE);
 	mmap_init();
 	nsproxy_cache_init();
 }
@@ -1763,7 +1857,7 @@ static int unshare_fs(unsigned long unsh
 		return 0;
 
 	/* don't need lock here; in the worst case we'll do useless copy */
-	if (fs->users == 1)
+	if (atomic_read(&fs->users) == 1)
 		return 0;
 
 	*new_fsp = copy_fs_struct(fs);
@@ -1870,7 +1964,8 @@ SYSCALL_DEFINE1(unshare, unsigned long,
 			fs = current->fs;
 			spin_lock(&fs->lock);
 			current->fs = new_fs;
-			if (--fs->users)
+			gr_set_chroot_entries(current, &current->fs->root);
+			if (atomic_dec_return(&fs->users))
 				new_fs = NULL;
 			else
 				new_fs = fs;
diff -ruNp linux-3.13.11/kernel/futex.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/futex.c
--- linux-3.13.11/kernel/futex.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/futex.c	2014-07-09 12:00:15.000000000
+0200
@@ -54,6 +54,7 @@
 #include <linux/mount.h>
 #include <linux/pagemap.h>
 #include <linux/syscalls.h>
+#include <linux/ptrace.h>
 #include <linux/signal.h>
 #include <linux/export.h>
 #include <linux/magic.h>
@@ -245,6 +246,11 @@ get_futex_key(u32 __user *uaddr, int fsh
 	struct page *page, *page_head;
 	int err, ro = 0;
 
+#ifdef CONFIG_PAX_SEGMEXEC
+	if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
+		return -EFAULT;
+#endif
+
 	/*
 	 * The futex address must be "naturally" aligned.
 	 */
@@ -444,7 +450,7 @@ static int cmpxchg_futex_value_locked(u3
 
 static int get_futex_value_locked(u32 *dest, u32 __user *from)
 {
-	int ret;
+	unsigned long ret;
 
 	pagefault_disable();
 	ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
@@ -2737,6 +2743,7 @@ static void __init futex_detect_cmpxchg(
 {
 #ifndef CONFIG_HAVE_FUTEX_CMPXCHG
 	u32 curval;
+	mm_segment_t oldfs;
 
 	/*
 	 * This will fail and we want it. Some arch implementations do
@@ -2748,8 +2755,11 @@ static void __init futex_detect_cmpxchg(
 	 * implementation, the non-functional ones will return
 	 * -ENOSYS.
 	 */
+	oldfs = get_fs();
+	set_fs(USER_DS);
 	if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
 		futex_cmpxchg_enabled = 1;
+	set_fs(oldfs);
 #endif
 }
 
diff -ruNp linux-3.13.11/kernel/futex_compat.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/futex_compat.c
--- linux-3.13.11/kernel/futex_compat.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/futex_compat.c	2014-07-09
12:00:15.000000000 +0200
@@ -32,7 +32,7 @@ fetch_robust_entry(compat_uptr_t *uentry
 	return 0;
 }
 
-static void __user *futex_uaddr(struct robust_list __user *entry,
+static void __user __intentional_overflow(-1) *futex_uaddr(struct robust_list __user
*entry,
 				compat_long_t futex_offset)
 {
 	compat_uptr_t base = ptr_to_compat(entry);
diff -ruNp linux-3.13.11/kernel/gcov/base.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/gcov/base.c
--- linux-3.13.11/kernel/gcov/base.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/gcov/base.c	2014-07-09 12:00:15.000000000
+0200
@@ -108,11 +108,6 @@ void gcov_enable_events(void)
 }
 
 #ifdef CONFIG_MODULES
-static inline int within(void *addr, void *start, unsigned long size)
-{
-	return ((addr >= start) && (addr < start + size));
-}
-
 /* Update list and generate events when modules are unloaded. */
 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
 				void *data)
@@ -127,7 +122,7 @@ static int gcov_module_notifier(struct n
 
 	/* Remove entries located in module from linked list. */
 	while ((info = gcov_info_next(info))) {
-		if (within(info, mod->module_core, mod->core_size)) {
+		if (within_module_core_rw((unsigned long)info, mod)) {
 			gcov_info_unlink(prev, info);
 			if (gcov_events_enabled)
 				gcov_event(GCOV_REMOVE, info);
diff -ruNp linux-3.13.11/kernel/hrtimer.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/hrtimer.c
--- linux-3.13.11/kernel/hrtimer.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/hrtimer.c	2014-07-09 12:00:15.000000000
+0200
@@ -1438,7 +1438,7 @@ void hrtimer_peek_ahead_timers(void)
 	local_irq_restore(flags);
 }
 
-static void run_hrtimer_softirq(struct softirq_action *h)
+static __latent_entropy void run_hrtimer_softirq(void)
 {
 	hrtimer_peek_ahead_timers();
 }
diff -ruNp linux-3.13.11/kernel/irq_work.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/irq_work.c
--- linux-3.13.11/kernel/irq_work.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/irq_work.c	2014-07-09 12:00:15.000000000
+0200
@@ -189,12 +189,13 @@ static int irq_work_cpu_notify(struct no
 	return NOTIFY_OK;
 }
 
-static struct notifier_block cpu_notify;
+static struct notifier_block cpu_notify = {
+	.notifier_call = irq_work_cpu_notify,
+	.priority = 0,
+};
 
 static __init int irq_work_init_cpu_notifier(void)
 {
-	cpu_notify.notifier_call = irq_work_cpu_notify;
-	cpu_notify.priority = 0;
 	register_cpu_notifier(&cpu_notify);
 	return 0;
 }
diff -ruNp linux-3.13.11/kernel/jump_label.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/jump_label.c
--- linux-3.13.11/kernel/jump_label.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/jump_label.c	2014-07-09 12:00:15.000000000
+0200
@@ -14,6 +14,7 @@
 #include <linux/err.h>
 #include <linux/static_key.h>
 #include <linux/jump_label_ratelimit.h>
+#include <linux/mm.h>
 
 #ifdef HAVE_JUMP_LABEL
 
@@ -51,7 +52,9 @@ jump_label_sort_entries(struct jump_entr
 
 	size = (((unsigned long)stop - (unsigned long)start)
 					/ sizeof(struct jump_entry));
+	pax_open_kernel();
 	sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
+	pax_close_kernel();
 }
 
 static void jump_label_update(struct static_key *key, int enable);
@@ -363,10 +366,12 @@ static void jump_label_invalidate_module
 	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
 	struct jump_entry *iter;
 
+	pax_open_kernel();
 	for (iter = iter_start; iter < iter_stop; iter++) {
 		if (within_module_init(iter->code, mod))
 			iter->code = 0;
 	}
+	pax_close_kernel();
 }
 
 static int
diff -ruNp linux-3.13.11/kernel/kallsyms.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/kallsyms.c
--- linux-3.13.11/kernel/kallsyms.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/kallsyms.c	2014-07-09 12:00:15.000000000
+0200
@@ -11,6 +11,9 @@
  *      Changed the compression method from stem compression to "table lookup"
  *      compression (see scripts/kallsyms.c for a more complete description)
  */
+#ifdef CONFIG_GRKERNSEC_HIDESYM
+#define __INCLUDED_BY_HIDESYM 1
+#endif
 #include <linux/kallsyms.h>
 #include <linux/module.h>
 #include <linux/init.h>
@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_mark
 
 static inline int is_kernel_inittext(unsigned long addr)
 {
+	if (system_state != SYSTEM_BOOTING)
+		return 0;
+
 	if (addr >= (unsigned long)_sinittext
 	    && addr <= (unsigned long)_einittext)
 		return 1;
 	return 0;
 }
 
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
+#ifdef CONFIG_MODULES
+static inline int is_module_text(unsigned long addr)
+{
+	if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
+		return 1;
+
+	addr = ktla_ktva(addr);
+	return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
+}
+#else
+static inline int is_module_text(unsigned long addr)
+{
+	return 0;
+}
+#endif
+#endif
+
 static inline int is_kernel_text(unsigned long addr)
 {
 	if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigne
 
 static inline int is_kernel(unsigned long addr)
 {
+
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
+	if (is_kernel_text(addr) || is_kernel_inittext(addr))
+		return 1;
+
+	if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
+#else
 	if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
+#endif
+
 		return 1;
 	return in_gate_area_no_mm(addr);
 }
 
 static int is_ksym_addr(unsigned long addr)
 {
+
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
+	if (is_module_text(addr))
+		return 0;
+#endif
+
 	if (all_var)
 		return is_kernel(addr);
 
@@ -480,7 +519,6 @@ static unsigned long get_ksymbol_core(st
 
 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
 {
-	iter->name[0] = '\0';
 	iter->nameoff = get_symbol_offset(new_pos);
 	iter->pos = new_pos;
 }
@@ -528,6 +566,11 @@ static int s_show(struct seq_file *m, vo
 {
 	struct kallsym_iter *iter = m->private;
 
+#ifdef CONFIG_GRKERNSEC_HIDESYM
+	if (!uid_eq(current_uid(), GLOBAL_ROOT_UID))
+		return 0;
+#endif
+
 	/* Some debugging symbols have no name.  Ignore them. */
 	if (!iter->name[0])
 		return 0;
@@ -541,6 +584,7 @@ static int s_show(struct seq_file *m, vo
 		 */
 		type = iter->exported ? toupper(iter->type) :
 					tolower(iter->type);
+
 		seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
 			   type, iter->name, iter->module_name);
 	} else
@@ -566,7 +610,7 @@ static int kallsyms_open(struct inode *i
 	struct kallsym_iter *iter;
 	int ret;
 
-	iter = kmalloc(sizeof(*iter), GFP_KERNEL);
+	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
 	if (!iter)
 		return -ENOMEM;
 	reset_iter(iter, 0);
diff -ruNp linux-3.13.11/kernel/kcmp.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/kcmp.c
--- linux-3.13.11/kernel/kcmp.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/kcmp.c	2014-07-09 12:00:15.000000000
+0200
@@ -99,6 +99,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t
 	struct task_struct *task1, *task2;
 	int ret;
 
+#ifdef CONFIG_GRKERNSEC
+	return -ENOSYS;
+#endif
+
 	rcu_read_lock();
 
 	/*
diff -ruNp linux-3.13.11/kernel/kexec.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/kexec.c
--- linux-3.13.11/kernel/kexec.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/kexec.c	2014-07-09 12:00:15.000000000
+0200
@@ -1044,7 +1044,8 @@ asmlinkage long compat_sys_kexec_load(un
 				unsigned long flags)
 {
 	struct compat_kexec_segment in;
-	struct kexec_segment out, __user *ksegments;
+	struct kexec_segment out;
+	struct kexec_segment __user *ksegments;
 	unsigned long i, result;
 
 	/* Don't allow clients that don't understand the native
diff -ruNp linux-3.13.11/kernel/kmod.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/kmod.c
--- linux-3.13.11/kernel/kmod.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/kmod.c	2014-07-09 12:00:15.000000000
+0200
@@ -75,7 +75,7 @@ static void free_modprobe_argv(struct su
 	kfree(info->argv);
 }
 
-static int call_modprobe(char *module_name, int wait)
+static int call_modprobe(char *module_name, char *module_param, int wait)
 {
 	struct subprocess_info *info;
 	static char *envp[] = {
@@ -85,7 +85,7 @@ static int call_modprobe(char *module_na
 		NULL
 	};
 
-	char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
+	char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
 	if (!argv)
 		goto out;
 
@@ -97,7 +97,8 @@ static int call_modprobe(char *module_na
 	argv[1] = "-q";
 	argv[2] = "--";
 	argv[3] = module_name;	/* check free_modprobe_argv() */
-	argv[4] = NULL;
+	argv[4] = module_param;
+	argv[5] = NULL;
 
 	info = call_usermodehelper_setup(modprobe_path, argv, envp, GFP_KERNEL,
 					 NULL, free_modprobe_argv, NULL);
@@ -129,9 +130,8 @@ out:
  * If module auto-loading support is disabled then this function
  * becomes a no-operation.
  */
-int __request_module(bool wait, const char *fmt, ...)
+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list
ap)
 {
-	va_list args;
 	char module_name[MODULE_NAME_LEN];
 	unsigned int max_modprobes;
 	int ret;
@@ -150,9 +150,7 @@ int __request_module(bool wait, const ch
 	if (!modprobe_path[0])
 		return 0;
 
-	va_start(args, fmt);
-	ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
-	va_end(args);
+	ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
 	if (ret >= MODULE_NAME_LEN)
 		return -ENAMETOOLONG;
 
@@ -160,6 +158,20 @@ int __request_module(bool wait, const ch
 	if (ret)
 		return ret;
 
+#ifdef CONFIG_GRKERNSEC_MODHARDEN
+	if (uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
+		/* hack to workaround consolekit/udisks stupidity */
+		read_lock(&tasklist_lock);
+		if (!strcmp(current->comm, "mount") &&
+		    current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
+			read_unlock(&tasklist_lock);
+			printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n",
module_name);
+			return -EPERM;
+		}
+		read_unlock(&tasklist_lock);
+	}
+#endif
+
 	/* If modprobe needs a service that is in a module, we get a recursive
 	 * loop.  Limit the number of running kmod threads to max_threads/2 or
 	 * MAX_KMOD_CONCURRENT, whichever is the smaller.  A cleaner method
@@ -188,11 +200,52 @@ int __request_module(bool wait, const ch
 
 	trace_module_request(module_name, wait, _RET_IP_);
 
-	ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
+	ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
 
 	atomic_dec(&kmod_concurrent);
 	return ret;
 }
+
+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
+{
+	va_list args;
+	int ret;
+
+	va_start(args, fmt);
+	ret = ____request_module(wait, module_param, fmt, args);
+	va_end(args);
+
+	return ret;
+}
+
+int __request_module(bool wait, const char *fmt, ...)
+{
+	va_list args;
+	int ret;
+
+#ifdef CONFIG_GRKERNSEC_MODHARDEN
+	if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
+		char module_param[MODULE_NAME_LEN];
+
+		memset(module_param, 0, sizeof(module_param));
+
+		snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", GR_GLOBAL_UID(current_uid()));
+
+		va_start(args, fmt);
+		ret = ____request_module(wait, module_param, fmt, args);
+		va_end(args);
+
+		return ret;
+	}
+#endif
+
+	va_start(args, fmt);
+	ret = ____request_module(wait, NULL, fmt, args);
+	va_end(args);
+
+	return ret;
+}
+
 EXPORT_SYMBOL(__request_module);
 #endif /* CONFIG_MODULES */
 
@@ -218,6 +271,20 @@ static int ____call_usermodehelper(void
 	 */
 	set_user_nice(current, 0);
 
+#ifdef CONFIG_GRKERNSEC
+	/* this is race-free as far as userland is concerned as we copied
+	   out the path to be used prior to this point and are now operating
+	   on that copy
+	*/
+	if ((strncmp(sub_info->path, "/sbin/", 6) && strncmp(sub_info->path, "/usr/lib/",
9) &&
+	     strncmp(sub_info->path, "/lib/", 5) && strncmp(sub_info->path, "/lib64/", 7)
&&
+	     strcmp(sub_info->path, "/usr/share/apport/apport")) || strstr(sub_info->path,
"..")) {
+		printk(KERN_ALERT "grsec: denied exec of usermode helper binary %.950s located outside
of /sbin and system library paths\n", sub_info->path);
+		retval = -EPERM;
+		goto fail;
+	}
+#endif
+
 	retval = -ENOMEM;
 	new = prepare_kernel_cred(current);
 	if (!new)
@@ -240,8 +307,8 @@ static int ____call_usermodehelper(void
 	commit_creds(new);
 
 	retval = do_execve(sub_info->path,
-			   (const char __user *const __user *)sub_info->argv,
-			   (const char __user *const __user *)sub_info->envp);
+			   (const char __user *const __force_user *)sub_info->argv,
+			   (const char __user *const __force_user *)sub_info->envp);
 	if (!retval)
 		return 0;
 
@@ -260,6 +327,10 @@ static int call_helper(void *data)
 
 static void call_usermodehelper_freeinfo(struct subprocess_info *info)
 {
+#ifdef CONFIG_GRKERNSEC
+	kfree(info->path);
+	info->path = info->origpath;
+#endif
 	if (info->cleanup)
 		(*info->cleanup)(info);
 	kfree(info);
@@ -303,7 +374,7 @@ static int wait_for_helper(void *data)
 		 *
 		 * Thus the __user pointer cast is valid here.
 		 */
-		sys_wait4(pid, (int __user *)&ret, 0, NULL);
+		sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
 
 		/*
 		 * If ret is 0, either ____call_usermodehelper failed and the
@@ -542,7 +613,12 @@ struct subprocess_info *call_usermodehel
 		goto out;
 
 	INIT_WORK(&sub_info->work, __call_usermodehelper);
+#ifdef CONFIG_GRKERNSEC
+	sub_info->origpath = path;
+	sub_info->path = kstrdup(path, gfp_mask);
+#else
 	sub_info->path = path;
+#endif
 	sub_info->argv = argv;
 	sub_info->envp = envp;
 
@@ -650,7 +726,7 @@ EXPORT_SYMBOL(call_usermodehelper);
 static int proc_cap_handler(struct ctl_table *table, int write,
 			 void __user *buffer, size_t *lenp, loff_t *ppos)
 {
-	struct ctl_table t;
+	ctl_table_no_const t;
 	unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
 	kernel_cap_t new_cap;
 	int err, i;
diff -ruNp linux-3.13.11/kernel/kprobes.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/kprobes.c
--- linux-3.13.11/kernel/kprobes.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/kprobes.c	2014-07-09 12:00:15.000000000
+0200
@@ -31,6 +31,9 @@
  *		<jkenisto@us.ibm.com> and Prasanna S Panchamukhi
  *		<prasanna@in.ibm.com> added function-return probes.
  */
+#ifdef CONFIG_GRKERNSEC_HIDESYM
+#define __INCLUDED_BY_HIDESYM 1
+#endif
 #include <linux/kprobes.h>
 #include <linux/hash.h>
 #include <linux/init.h>
@@ -135,12 +138,12 @@ enum kprobe_slot_state {
 
 static void *alloc_insn_page(void)
 {
-	return module_alloc(PAGE_SIZE);
+	return module_alloc_exec(PAGE_SIZE);
 }
 
 static void free_insn_page(void *page)
 {
-	module_free(NULL, page);
+	module_free_exec(NULL, page);
 }
 
 struct kprobe_insn_cache kprobe_insn_slots = {
@@ -2151,11 +2154,11 @@ static void __kprobes report_probe(struc
 		kprobe_type = "k";
 
 	if (sym)
-		seq_printf(pi, "%p  %s  %s+0x%x  %s ",
+		seq_printf(pi, "%pK  %s  %s+0x%x  %s ",
 			p->addr, kprobe_type, sym, offset,
 			(modname ? modname : " "));
 	else
-		seq_printf(pi, "%p  %s  %p ",
+		seq_printf(pi, "%pK  %s  %pK ",
 			p->addr, kprobe_type, p->addr);
 
 	if (!pp)
diff -ruNp linux-3.13.11/kernel/ksysfs.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/ksysfs.c
--- linux-3.13.11/kernel/ksysfs.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/ksysfs.c	2014-07-09 12:00:15.000000000
+0200
@@ -46,6 +46,8 @@ static ssize_t uevent_helper_store(struc
 {
 	if (count+1 > UEVENT_HELPER_PATH_LEN)
 		return -ENOENT;
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
 	memcpy(uevent_helper, buf, count);
 	uevent_helper[count] = '\0';
 	if (count && uevent_helper[count-1] == '\n')
@@ -172,7 +174,7 @@ static ssize_t notes_read(struct file *f
 	return count;
 }
 
-static struct bin_attribute notes_attr = {
+static bin_attribute_no_const notes_attr __read_only = {
 	.attr = {
 		.name = "notes",
 		.mode = S_IRUGO,
diff -ruNp linux-3.13.11/kernel/kthread.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/kthread.c
--- linux-3.13.11/kernel/kthread.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/kthread.c	2014-07-09 12:00:15.000000000
+0200
@@ -18,6 +18,7 @@
 #include <linux/freezer.h>
 #include <linux/ptrace.h>
 #include <linux/uaccess.h>
+#include <linux/vs_pid.h>
 #include <trace/events/sched.h>
 
 static DEFINE_SPINLOCK(kthread_create_lock);
diff -ruNp linux-3.13.11/kernel/locking/lockdep.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/locking/lockdep.c
--- linux-3.13.11/kernel/locking/lockdep.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/locking/lockdep.c	2014-07-09
12:00:15.000000000 +0200
@@ -596,6 +596,10 @@ static int static_obj(void *obj)
 		      end   = (unsigned long) &_end,
 		      addr  = (unsigned long) obj;
 
+#ifdef CONFIG_PAX_KERNEXEC
+	start = ktla_ktva(start);
+#endif
+
 	/*
 	 * static variable?
 	 */
@@ -736,6 +740,7 @@ register_lock_class(struct lockdep_map *
 	if (!static_obj(lock->key)) {
 		debug_locks_off();
 		printk("INFO: trying to register non-static key.\n");
+		printk("lock:%pS key:%pS.\n", lock, lock->key);
 		printk("the code is fine but needs lockdep annotation.\n");
 		printk("turning off the locking correctness validator.\n");
 		dump_stack();
@@ -3080,7 +3085,7 @@ static int __lock_acquire(struct lockdep
 		if (!class)
 			return 0;
 	}
-	atomic_inc((atomic_t *)&class->ops);
+	atomic_long_inc_unchecked((atomic_long_unchecked_t *)&class->ops);
 	if (very_verbose(class)) {
 		printk("\nacquire class [%p] %s", class->key, class->name);
 		if (class->name_version > 1)
diff -ruNp linux-3.13.11/kernel/locking/lockdep_proc.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/locking/lockdep_proc.c
--- linux-3.13.11/kernel/locking/lockdep_proc.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/locking/lockdep_proc.c	2014-07-09
12:00:15.000000000 +0200
@@ -65,7 +65,7 @@ static int l_show(struct seq_file *m, vo
 		return 0;
 	}
 
-	seq_printf(m, "%p", class->key);
+	seq_printf(m, "%pK", class->key);
 #ifdef CONFIG_DEBUG_LOCKDEP
 	seq_printf(m, " OPS:%8ld", class->ops);
 #endif
@@ -83,7 +83,7 @@ static int l_show(struct seq_file *m, vo
 
 	list_for_each_entry(entry, &class->locks_after, entry) {
 		if (entry->distance == 1) {
-			seq_printf(m, " -> [%p] ", entry->class->key);
+			seq_printf(m, " -> [%pK] ", entry->class->key);
 			print_name(m, entry->class);
 			seq_puts(m, "\n");
 		}
@@ -152,7 +152,7 @@ static int lc_show(struct seq_file *m, v
 		if (!class->key)
 			continue;
 
-		seq_printf(m, "[%p] ", class->key);
+		seq_printf(m, "[%pK] ", class->key);
 		print_name(m, class);
 		seq_puts(m, "\n");
 	}
@@ -496,7 +496,7 @@ static void seq_stats(struct seq_file *m
 		if (!i)
 			seq_line(m, '-', 40-namelen, namelen);
 
-		snprintf(ip, sizeof(ip), "[<%p>]",
+		snprintf(ip, sizeof(ip), "[<%pK>]",
 				(void *)class->contention_point[i]);
 		seq_printf(m, "%40s %14lu %29s %pS\n",
 			   name, stats->contention_point[i],
@@ -511,7 +511,7 @@ static void seq_stats(struct seq_file *m
 		if (!i)
 			seq_line(m, '-', 40-namelen, namelen);
 
-		snprintf(ip, sizeof(ip), "[<%p>]",
+		snprintf(ip, sizeof(ip), "[<%pK>]",
 				(void *)class->contending_point[i]);
 		seq_printf(m, "%40s %14lu %29s %pS\n",
 			   name, stats->contending_point[i],
diff -ruNp linux-3.13.11/kernel/locking/mutex-debug.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/locking/mutex-debug.c
--- linux-3.13.11/kernel/locking/mutex-debug.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/locking/mutex-debug.c	2014-07-09
12:00:15.000000000 +0200
@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mute
 }
 
 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
-			    struct thread_info *ti)
+			    struct task_struct *task)
 {
 	SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
 
 	/* Mark the current thread as blocked on the lock: */
-	ti->task->blocked_on = waiter;
+	task->blocked_on = waiter;
 }
 
 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
-			 struct thread_info *ti)
+			 struct task_struct *task)
 {
 	DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
-	DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
-	DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
-	ti->task->blocked_on = NULL;
+	DEBUG_LOCKS_WARN_ON(waiter->task != task);
+	DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
+	task->blocked_on = NULL;
 
 	list_del_init(&waiter->list);
 	waiter->task = NULL;
diff -ruNp linux-3.13.11/kernel/locking/mutex-debug.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/locking/mutex-debug.h
--- linux-3.13.11/kernel/locking/mutex-debug.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/locking/mutex-debug.h	2014-07-09
12:00:15.000000000 +0200
@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(stru
 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
 extern void debug_mutex_add_waiter(struct mutex *lock,
 				   struct mutex_waiter *waiter,
-				   struct thread_info *ti);
+				   struct task_struct *task);
 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
-				struct thread_info *ti);
+				struct task_struct *task);
 extern void debug_mutex_unlock(struct mutex *lock);
 extern void debug_mutex_init(struct mutex *lock, const char *name,
 			     struct lock_class_key *key);
diff -ruNp linux-3.13.11/kernel/locking/mutex.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/locking/mutex.c
--- linux-3.13.11/kernel/locking/mutex.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/locking/mutex.c	2014-07-09
12:00:15.000000000 +0200
@@ -135,7 +135,7 @@ void mspin_lock(struct mspin_node **lock
 		node->locked = 1;
 		return;
 	}
-	ACCESS_ONCE(prev->next) = node;
+	ACCESS_ONCE_RW(prev->next) = node;
 	smp_wmb();
 	/* Wait until the lock holder passes the lock down */
 	while (!ACCESS_ONCE(node->locked))
@@ -156,7 +156,7 @@ static void mspin_unlock(struct mspin_no
 		while (!(next = ACCESS_ONCE(node->next)))
 			arch_mutex_cpu_relax();
 	}
-	ACCESS_ONCE(next->locked) = 1;
+	ACCESS_ONCE_RW(next->locked) = 1;
 	smp_wmb();
 }
 
@@ -520,7 +520,7 @@ slowpath:
 		goto skip_wait;
 
 	debug_mutex_lock_common(lock, &waiter);
-	debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
+	debug_mutex_add_waiter(lock, &waiter, task);
 
 	/* add waiting tasks to the end of the waitqueue (FIFO): */
 	list_add_tail(&waiter.list, &lock->wait_list);
@@ -564,7 +564,7 @@ slowpath:
 		schedule_preempt_disabled();
 		spin_lock_mutex(&lock->wait_lock, flags);
 	}
-	mutex_remove_waiter(lock, &waiter, current_thread_info());
+	mutex_remove_waiter(lock, &waiter, task);
 	/* set it to 0 if there are no waiters left: */
 	if (likely(list_empty(&lock->wait_list)))
 		atomic_set(&lock->count, 0);
@@ -601,7 +601,7 @@ skip_wait:
 	return 0;
 
 err:
-	mutex_remove_waiter(lock, &waiter, task_thread_info(task));
+	mutex_remove_waiter(lock, &waiter, task);
 	spin_unlock_mutex(&lock->wait_lock, flags);
 	debug_mutex_free_waiter(&waiter);
 	mutex_release(&lock->dep_map, 1, ip);
diff -ruNp linux-3.13.11/kernel/locking/rtmutex-tester.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/locking/rtmutex-tester.c
--- linux-3.13.11/kernel/locking/rtmutex-tester.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/locking/rtmutex-tester.c	2014-07-09
12:00:15.000000000 +0200
@@ -22,7 +22,7 @@
 #define MAX_RT_TEST_MUTEXES	8
 
 static spinlock_t rttest_lock;
-static atomic_t rttest_event;
+static atomic_unchecked_t rttest_event;
 
 struct test_thread_data {
 	int			opcode;
@@ -63,7 +63,7 @@ static int handle_op(struct test_thread_
 
 	case RTTEST_LOCKCONT:
 		td->mutexes[td->opdata] = 1;
-		td->event = atomic_add_return(1, &rttest_event);
+		td->event = atomic_add_return_unchecked(1, &rttest_event);
 		return 0;
 
 	case RTTEST_RESET:
@@ -76,7 +76,7 @@ static int handle_op(struct test_thread_
 		return 0;
 
 	case RTTEST_RESETEVENT:
-		atomic_set(&rttest_event, 0);
+		atomic_set_unchecked(&rttest_event, 0);
 		return 0;
 
 	default:
@@ -93,9 +93,9 @@ static int handle_op(struct test_thread_
 			return ret;
 
 		td->mutexes[id] = 1;
-		td->event = atomic_add_return(1, &rttest_event);
+		td->event = atomic_add_return_unchecked(1, &rttest_event);
 		rt_mutex_lock(&mutexes[id]);
-		td->event = atomic_add_return(1, &rttest_event);
+		td->event = atomic_add_return_unchecked(1, &rttest_event);
 		td->mutexes[id] = 4;
 		return 0;
 
@@ -106,9 +106,9 @@ static int handle_op(struct test_thread_
 			return ret;
 
 		td->mutexes[id] = 1;
-		td->event = atomic_add_return(1, &rttest_event);
+		td->event = atomic_add_return_unchecked(1, &rttest_event);
 		ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
-		td->event = atomic_add_return(1, &rttest_event);
+		td->event = atomic_add_return_unchecked(1, &rttest_event);
 		td->mutexes[id] = ret ? 0 : 4;
 		return ret ? -EINTR : 0;
 
@@ -117,9 +117,9 @@ static int handle_op(struct test_thread_
 		if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
 			return ret;
 
-		td->event = atomic_add_return(1, &rttest_event);
+		td->event = atomic_add_return_unchecked(1, &rttest_event);
 		rt_mutex_unlock(&mutexes[id]);
-		td->event = atomic_add_return(1, &rttest_event);
+		td->event = atomic_add_return_unchecked(1, &rttest_event);
 		td->mutexes[id] = 0;
 		return 0;
 
@@ -166,7 +166,7 @@ void schedule_rt_mutex_test(struct rt_mu
 			break;
 
 		td->mutexes[dat] = 2;
-		td->event = atomic_add_return(1, &rttest_event);
+		td->event = atomic_add_return_unchecked(1, &rttest_event);
 		break;
 
 	default:
@@ -186,7 +186,7 @@ void schedule_rt_mutex_test(struct rt_mu
 			return;
 
 		td->mutexes[dat] = 3;
-		td->event = atomic_add_return(1, &rttest_event);
+		td->event = atomic_add_return_unchecked(1, &rttest_event);
 		break;
 
 	case RTTEST_LOCKNOWAIT:
@@ -198,7 +198,7 @@ void schedule_rt_mutex_test(struct rt_mu
 			return;
 
 		td->mutexes[dat] = 1;
-		td->event = atomic_add_return(1, &rttest_event);
+		td->event = atomic_add_return_unchecked(1, &rttest_event);
 		return;
 
 	default:
diff -ruNp linux-3.13.11/kernel/module.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/module.c
--- linux-3.13.11/kernel/module.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/module.c	2014-07-09 12:00:15.000000000
+0200
@@ -61,6 +61,7 @@
 #include <linux/pfn.h>
 #include <linux/bsearch.h>
 #include <linux/fips.h>
+#include <linux/grsecurity.h>
 #include <uapi/linux/module.h>
 #include "module-internal.h"
 
@@ -157,7 +158,8 @@ static BLOCKING_NOTIFIER_HEAD(module_not
 
 /* Bounds of module allocation, for speeding __module_address.
  * Protected by module_mutex. */
-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
 
 int register_module_notifier(struct notifier_block * nb)
 {
@@ -324,7 +326,7 @@ bool each_symbol_section(bool (*fn)(cons
 		return true;
 
 	list_for_each_entry_rcu(mod, &modules, list) {
-		struct symsearch arr[] = {
+		struct symsearch modarr[] = {
 			{ mod->syms, mod->syms + mod->num_syms, mod->crcs,
 			  NOT_GPL_ONLY, false },
 			{ mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
@@ -349,7 +351,7 @@ bool each_symbol_section(bool (*fn)(cons
 		if (mod->state == MODULE_STATE_UNFORMED)
 			continue;
 
-		if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
+		if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
 			return true;
 	}
 	return false;
@@ -489,7 +491,7 @@ static int percpu_modalloc(struct module
 	if (!pcpusec->sh_size)
 		return 0;
 
-	if (align > PAGE_SIZE) {
+	if (align-1 >= PAGE_SIZE) {
 		pr_warn("%s: per-cpu alignment %li > %li\n",
 			mod->name, align, PAGE_SIZE);
 		align = PAGE_SIZE;
@@ -1064,7 +1066,7 @@ struct module_attribute module_uevent =
 static ssize_t show_coresize(struct module_attribute *mattr,
 			     struct module_kobject *mk, char *buffer)
 {
-	return sprintf(buffer, "%u\n", mk->mod->core_size);
+	return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
 }
 
 static struct module_attribute modinfo_coresize =
@@ -1073,7 +1075,7 @@ static struct module_attribute modinfo_c
 static ssize_t show_initsize(struct module_attribute *mattr,
 			     struct module_kobject *mk, char *buffer)
 {
-	return sprintf(buffer, "%u\n", mk->mod->init_size);
+	return sprintf(buffer, "%u\n", mk->mod->init_size_rx +  mk->mod->init_size_rw);
 }
 
 static struct module_attribute modinfo_initsize =
@@ -1165,12 +1167,29 @@ static int check_version(Elf_Shdr *sechd
 		goto bad_version;
 	}
 
+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
+	/*
+	 * avoid potentially printing jibberish on attempted load
+	 * of a module randomized with a different seed
+	 */
+	pr_warn("no symbol version for %s\n", symname);
+#else
 	pr_warn("%s: no symbol version for %s\n", mod->name, symname);
+#endif
 	return 0;
 
 bad_version:
+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
+	/*
+	 * avoid potentially printing jibberish on attempted load
+	 * of a module randomized with a different seed
+	 */
+	printk("attempted module disagrees about version of symbol %s\n",
+	       symname);
+#else
 	printk("%s: disagrees about version of symbol %s\n",
 	       mod->name, symname);
+#endif
 	return 0;
 }
 
@@ -1286,7 +1305,7 @@ resolve_symbol_wait(struct module *mod,
  */
 #ifdef CONFIG_SYSFS
 
-#ifdef CONFIG_KALLSYMS
+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
 static inline bool sect_empty(const Elf_Shdr *sect)
 {
 	return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
@@ -1426,7 +1445,7 @@ static void add_notes_attrs(struct modul
 {
 	unsigned int notes, loaded, i;
 	struct module_notes_attrs *notes_attrs;
-	struct bin_attribute *nattr;
+	bin_attribute_no_const *nattr;
 
 	/* failed to create section attributes, so can't create notes */
 	if (!mod->sect_attrs)
@@ -1538,7 +1557,7 @@ static void del_usage_links(struct modul
 static int module_add_modinfo_attrs(struct module *mod)
 {
 	struct module_attribute *attr;
-	struct module_attribute *temp_attr;
+	module_attribute_no_const *temp_attr;
 	int error = 0;
 	int i;
 
@@ -1759,21 +1778,21 @@ static void set_section_ro_nx(void *base
 
 static void unset_module_core_ro_nx(struct module *mod)
 {
-	set_page_attributes(mod->module_core + mod->core_text_size,
-		mod->module_core + mod->core_size,
+	set_page_attributes(mod->module_core_rw,
+		mod->module_core_rw + mod->core_size_rw,
 		set_memory_x);
-	set_page_attributes(mod->module_core,
-		mod->module_core + mod->core_ro_size,
+	set_page_attributes(mod->module_core_rx,
+		mod->module_core_rx + mod->core_size_rx,
 		set_memory_rw);
 }
 
 static void unset_module_init_ro_nx(struct module *mod)
 {
-	set_page_attributes(mod->module_init + mod->init_text_size,
-		mod->module_init + mod->init_size,
+	set_page_attributes(mod->module_init_rw,
+		mod->module_init_rw + mod->init_size_rw,
 		set_memory_x);
-	set_page_attributes(mod->module_init,
-		mod->module_init + mod->init_ro_size,
+	set_page_attributes(mod->module_init_rx,
+		mod->module_init_rx + mod->init_size_rx,
 		set_memory_rw);
 }
 
@@ -1786,14 +1805,14 @@ void set_all_modules_text_rw(void)
 	list_for_each_entry_rcu(mod, &modules, list) {
 		if (mod->state == MODULE_STATE_UNFORMED)
 			continue;
-		if ((mod->module_core) && (mod->core_text_size)) {
-			set_page_attributes(mod->module_core,
-						mod->module_core + mod->core_text_size,
+		if ((mod->module_core_rx) && (mod->core_size_rx)) {
+			set_page_attributes(mod->module_core_rx,
+						mod->module_core_rx + mod->core_size_rx,
 						set_memory_rw);
 		}
-		if ((mod->module_init) && (mod->init_text_size)) {
-			set_page_attributes(mod->module_init,
-						mod->module_init + mod->init_text_size,
+		if ((mod->module_init_rx) && (mod->init_size_rx)) {
+			set_page_attributes(mod->module_init_rx,
+						mod->module_init_rx + mod->init_size_rx,
 						set_memory_rw);
 		}
 	}
@@ -1809,14 +1828,14 @@ void set_all_modules_text_ro(void)
 	list_for_each_entry_rcu(mod, &modules, list) {
 		if (mod->state == MODULE_STATE_UNFORMED)
 			continue;
-		if ((mod->module_core) && (mod->core_text_size)) {
-			set_page_attributes(mod->module_core,
-						mod->module_core + mod->core_text_size,
+		if ((mod->module_core_rx) && (mod->core_size_rx)) {
+			set_page_attributes(mod->module_core_rx,
+						mod->module_core_rx + mod->core_size_rx,
 						set_memory_ro);
 		}
-		if ((mod->module_init) && (mod->init_text_size)) {
-			set_page_attributes(mod->module_init,
-						mod->module_init + mod->init_text_size,
+		if ((mod->module_init_rx) && (mod->init_size_rx)) {
+			set_page_attributes(mod->module_init_rx,
+						mod->module_init_rx + mod->init_size_rx,
 						set_memory_ro);
 		}
 	}
@@ -1867,16 +1886,19 @@ static void free_module(struct module *m
 
 	/* This may be NULL, but that's OK */
 	unset_module_init_ro_nx(mod);
-	module_free(mod, mod->module_init);
+	module_free(mod, mod->module_init_rw);
+	module_free_exec(mod, mod->module_init_rx);
 	kfree(mod->args);
 	percpu_modfree(mod);
 
 	/* Free lock-classes: */
-	lockdep_free_key_range(mod->module_core, mod->core_size);
+	lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
+	lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
 
 	/* Finally, free the core (containing the module structure) */
 	unset_module_core_ro_nx(mod);
-	module_free(mod, mod->module_core);
+	module_free_exec(mod, mod->module_core_rx);
+	module_free(mod, mod->module_core_rw);
 
 #ifdef CONFIG_MPU
 	update_protections(current->mm);
@@ -1945,9 +1967,31 @@ static int simplify_symbols(struct modul
 	int ret = 0;
 	const struct kernel_symbol *ksym;
 
+#ifdef CONFIG_GRKERNSEC_MODHARDEN
+	int is_fs_load = 0;
+	int register_filesystem_found = 0;
+	char *p;
+
+	p = strstr(mod->args, "grsec_modharden_fs");
+	if (p) {
+		char *endptr = p + sizeof("grsec_modharden_fs") - 1;
+		/* copy \0 as well */
+		memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
+		is_fs_load = 1;
+	}
+#endif
+
 	for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
 		const char *name = info->strtab + sym[i].st_name;
 
+#ifdef CONFIG_GRKERNSEC_MODHARDEN
+		/* it's a real shame this will never get ripped and copied
+		   upstream! ;(
+		*/
+		if (is_fs_load && !strcmp(name, "register_filesystem"))
+			register_filesystem_found = 1;
+#endif
+
 		switch (sym[i].st_shndx) {
 		case SHN_COMMON:
 			/* We compiled with -fno-common.  These are not
@@ -1968,7 +2012,9 @@ static int simplify_symbols(struct modul
 			ksym = resolve_symbol_wait(mod, info, name);
 			/* Ok if resolved.  */
 			if (ksym && !IS_ERR(ksym)) {
+				pax_open_kernel();
 				sym[i].st_value = ksym->value;
+				pax_close_kernel();
 				break;
 			}
 
@@ -1987,11 +2033,20 @@ static int simplify_symbols(struct modul
 				secbase = (unsigned long)mod_percpu(mod);
 			else
 				secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
+			pax_open_kernel();
 			sym[i].st_value += secbase;
+			pax_close_kernel();
 			break;
 		}
 	}
 
+#ifdef CONFIG_GRKERNSEC_MODHARDEN
+	if (is_fs_load && !register_filesystem_found) {
+		printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n",
mod->name);
+		ret = -EPERM;
+	}
+#endif
+
 	return ret;
 }
 
@@ -2075,22 +2130,12 @@ static void layout_sections(struct modul
 			    || s->sh_entsize != ~0UL
 			    || strstarts(sname, ".init"))
 				continue;
-			s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
+			if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
+				s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
+			else
+				s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
 			pr_debug("\t%s\n", sname);
 		}
-		switch (m) {
-		case 0: /* executable */
-			mod->core_size = debug_align(mod->core_size);
-			mod->core_text_size = mod->core_size;
-			break;
-		case 1: /* RO: text and ro-data */
-			mod->core_size = debug_align(mod->core_size);
-			mod->core_ro_size = mod->core_size;
-			break;
-		case 3: /* whole core */
-			mod->core_size = debug_align(mod->core_size);
-			break;
-		}
 	}
 
 	pr_debug("Init section allocation order:\n");
@@ -2104,23 +2149,13 @@ static void layout_sections(struct modul
 			    || s->sh_entsize != ~0UL
 			    || !strstarts(sname, ".init"))
 				continue;
-			s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
-					 | INIT_OFFSET_MASK);
+			if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
+				s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
+			else
+				s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
+			s->sh_entsize |= INIT_OFFSET_MASK;
 			pr_debug("\t%s\n", sname);
 		}
-		switch (m) {
-		case 0: /* executable */
-			mod->init_size = debug_align(mod->init_size);
-			mod->init_text_size = mod->init_size;
-			break;
-		case 1: /* RO: text and ro-data */
-			mod->init_size = debug_align(mod->init_size);
-			mod->init_ro_size = mod->init_size;
-			break;
-		case 3: /* whole init */
-			mod->init_size = debug_align(mod->init_size);
-			break;
-		}
 	}
 }
 
@@ -2293,7 +2328,7 @@ static void layout_symtab(struct module
 
 	/* Put symbol section at end of init part of module. */
 	symsect->sh_flags |= SHF_ALLOC;
-	symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
+	symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
 					 info->index.sym) | INIT_OFFSET_MASK;
 	pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
 
@@ -2310,13 +2345,13 @@ static void layout_symtab(struct module
 	}
 
 	/* Append room for core symbols at end of core part. */
-	info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
-	info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
-	mod->core_size += strtab_size;
+	info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
+	info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
+	mod->core_size_rx += strtab_size;
 
 	/* Put string table section at end of init part of module. */
 	strsect->sh_flags |= SHF_ALLOC;
-	strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
+	strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
 					 info->index.str) | INIT_OFFSET_MASK;
 	pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
 }
@@ -2334,12 +2369,14 @@ static void add_kallsyms(struct module *
 	/* Make sure we get permanent strtab: don't use info->strtab. */
 	mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
 
+	pax_open_kernel();
+
 	/* Set types up while we still have access to sections. */
 	for (i = 0; i < mod->num_symtab; i++)
 		mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
 
-	mod->core_symtab = dst = mod->module_core + info->symoffs;
-	mod->core_strtab = s = mod->module_core + info->stroffs;
+	mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
+	mod->core_strtab = s = mod->module_core_rx + info->stroffs;
 	src = mod->symtab;
 	for (ndst = i = 0; i < mod->num_symtab; i++) {
 		if (i == 0 ||
@@ -2351,6 +2388,8 @@ static void add_kallsyms(struct module *
 		}
 	}
 	mod->core_num_syms = ndst;
+
+	pax_close_kernel();
 }
 #else
 static inline void layout_symtab(struct module *mod, struct load_info *info)
@@ -2384,17 +2423,33 @@ void * __weak module_alloc(unsigned long
 	return vmalloc_exec(size);
 }
 
-static void *module_alloc_update_bounds(unsigned long size)
+static void *module_alloc_update_bounds_rw(unsigned long size)
 {
 	void *ret = module_alloc(size);
 
 	if (ret) {
 		mutex_lock(&module_mutex);
 		/* Update module bounds. */
-		if ((unsigned long)ret < module_addr_min)
-			module_addr_min = (unsigned long)ret;
-		if ((unsigned long)ret + size > module_addr_max)
-			module_addr_max = (unsigned long)ret + size;
+		if ((unsigned long)ret < module_addr_min_rw)
+			module_addr_min_rw = (unsigned long)ret;
+		if ((unsigned long)ret + size > module_addr_max_rw)
+			module_addr_max_rw = (unsigned long)ret + size;
+		mutex_unlock(&module_mutex);
+	}
+	return ret;
+}
+
+static void *module_alloc_update_bounds_rx(unsigned long size)
+{
+	void *ret = module_alloc_exec(size);
+
+	if (ret) {
+		mutex_lock(&module_mutex);
+		/* Update module bounds. */
+		if ((unsigned long)ret < module_addr_min_rx)
+			module_addr_min_rx = (unsigned long)ret;
+		if ((unsigned long)ret + size > module_addr_max_rx)
+			module_addr_max_rx = (unsigned long)ret + size;
 		mutex_unlock(&module_mutex);
 	}
 	return ret;
@@ -2651,7 +2706,15 @@ static struct module *setup_load_info(st
 	mod = (void *)info->sechdrs[info->index.mod].sh_addr;
 
 	if (info->index.sym == 0) {
+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
+		/*
+		 * avoid potentially printing jibberish on attempted load
+		 * of a module randomized with a different seed
+		 */
+		pr_warn("module has no symbols (stripped?)\n");
+#else
 		pr_warn("%s: module has no symbols (stripped?)\n", mod->name);
+#endif
 		return ERR_PTR(-ENOEXEC);
 	}
 
@@ -2667,8 +2730,14 @@ static struct module *setup_load_info(st
 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
 {
 	const char *modmagic = get_modinfo(info, "vermagic");
+	const char *license = get_modinfo(info, "license");
 	int err;
 
+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
+	if (!license || !license_is_gpl_compatible(license))
+		return -ENOEXEC;
+#endif
+
 	if (flags & MODULE_INIT_IGNORE_VERMAGIC)
 		modmagic = NULL;
 
@@ -2693,7 +2762,7 @@ static int check_modinfo(struct module *
 	}
 
 	/* Set up license info based on the info section */
-	set_license(mod, get_modinfo(info, "license"));
+	set_license(mod, license);
 
 	return 0;
 }
@@ -2787,7 +2856,7 @@ static int move_module(struct module *mo
 	void *ptr;
 
 	/* Do the allocs. */
-	ptr = module_alloc_update_bounds(mod->core_size);
+	ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
 	/*
 	 * The pointer to this block is stored in the module structure
 	 * which is inside the block. Just mark it as not being a
@@ -2797,11 +2866,11 @@ static int move_module(struct module *mo
 	if (!ptr)
 		return -ENOMEM;
 
-	memset(ptr, 0, mod->core_size);
-	mod->module_core = ptr;
+	memset(ptr, 0, mod->core_size_rw);
+	mod->module_core_rw = ptr;
 
-	if (mod->init_size) {
-		ptr = module_alloc_update_bounds(mod->init_size);
+	if (mod->init_size_rw) {
+		ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
 		/*
 		 * The pointer to this block is stored in the module structure
 		 * which is inside the block. This block doesn't need to be
@@ -2810,13 +2879,45 @@ static int move_module(struct module *mo
 		 */
 		kmemleak_ignore(ptr);
 		if (!ptr) {
-			module_free(mod, mod->module_core);
+			module_free(mod, mod->module_core_rw);
+			return -ENOMEM;
+		}
+		memset(ptr, 0, mod->init_size_rw);
+		mod->module_init_rw = ptr;
+	} else
+		mod->module_init_rw = NULL;
+
+	ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
+	kmemleak_not_leak(ptr);
+	if (!ptr) {
+		if (mod->module_init_rw)
+			module_free(mod, mod->module_init_rw);
+		module_free(mod, mod->module_core_rw);
+		return -ENOMEM;
+	}
+
+	pax_open_kernel();
+	memset(ptr, 0, mod->core_size_rx);
+	pax_close_kernel();
+	mod->module_core_rx = ptr;
+
+	if (mod->init_size_rx) {
+		ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
+		kmemleak_ignore(ptr);
+		if (!ptr && mod->init_size_rx) {
+			module_free_exec(mod, mod->module_core_rx);
+			if (mod->module_init_rw)
+				module_free(mod, mod->module_init_rw);
+			module_free(mod, mod->module_core_rw);
 			return -ENOMEM;
 		}
-		memset(ptr, 0, mod->init_size);
-		mod->module_init = ptr;
+
+		pax_open_kernel();
+		memset(ptr, 0, mod->init_size_rx);
+		pax_close_kernel();
+		mod->module_init_rx = ptr;
 	} else
-		mod->module_init = NULL;
+		mod->module_init_rx = NULL;
 
 	/* Transfer each section which specifies SHF_ALLOC */
 	pr_debug("final section addresses:\n");
@@ -2827,16 +2928,45 @@ static int move_module(struct module *mo
 		if (!(shdr->sh_flags & SHF_ALLOC))
 			continue;
 
-		if (shdr->sh_entsize & INIT_OFFSET_MASK)
-			dest = mod->module_init
-				+ (shdr->sh_entsize & ~INIT_OFFSET_MASK);
-		else
-			dest = mod->module_core + shdr->sh_entsize;
+		if (shdr->sh_entsize & INIT_OFFSET_MASK) {
+			if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
+				dest = mod->module_init_rw
+					+ (shdr->sh_entsize & ~INIT_OFFSET_MASK);
+			else
+				dest = mod->module_init_rx
+					+ (shdr->sh_entsize & ~INIT_OFFSET_MASK);
+		} else {
+			if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
+				dest = mod->module_core_rw + shdr->sh_entsize;
+			else
+				dest = mod->module_core_rx + shdr->sh_entsize;
+		}
+
+		if (shdr->sh_type != SHT_NOBITS) {
+
+#ifdef CONFIG_PAX_KERNEXEC
+#ifdef CONFIG_X86_64
+			if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
+				set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
+#endif
+			if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
+				pax_open_kernel();
+				memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
+				pax_close_kernel();
+			} else
+#endif
 
-		if (shdr->sh_type != SHT_NOBITS)
 			memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
+		}
 		/* Update sh_addr to point to copy in image. */
-		shdr->sh_addr = (unsigned long)dest;
+
+#ifdef CONFIG_PAX_KERNEXEC
+		if (shdr->sh_flags & SHF_EXECINSTR)
+			shdr->sh_addr = ktva_ktla((unsigned long)dest);
+		else
+#endif
+
+			shdr->sh_addr = (unsigned long)dest;
 		pr_debug("\t0x%lx %s\n",
 			 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
 	}
@@ -2893,12 +3023,12 @@ static void flush_module_icache(const st
 	 * Do it before processing of module parameters, so the module
 	 * can provide parameter accessor functions of its own.
 	 */
-	if (mod->module_init)
-		flush_icache_range((unsigned long)mod->module_init,
-				   (unsigned long)mod->module_init
-				   + mod->init_size);
-	flush_icache_range((unsigned long)mod->module_core,
-			   (unsigned long)mod->module_core + mod->core_size);
+	if (mod->module_init_rx)
+		flush_icache_range((unsigned long)mod->module_init_rx,
+				   (unsigned long)mod->module_init_rx
+				   + mod->init_size_rx);
+	flush_icache_range((unsigned long)mod->module_core_rx,
+			   (unsigned long)mod->module_core_rx + mod->core_size_rx);
 
 	set_fs(old_fs);
 }
@@ -2955,8 +3085,10 @@ static struct module *layout_and_allocat
 static void module_deallocate(struct module *mod, struct load_info *info)
 {
 	percpu_modfree(mod);
-	module_free(mod, mod->module_init);
-	module_free(mod, mod->module_core);
+	module_free_exec(mod, mod->module_init_rx);
+	module_free_exec(mod, mod->module_core_rx);
+	module_free(mod, mod->module_init_rw);
+	module_free(mod, mod->module_core_rw);
 }
 
 int __weak module_finalize(const Elf_Ehdr *hdr,
@@ -2969,7 +3101,9 @@ int __weak module_finalize(const Elf_Ehd
 static int post_relocation(struct module *mod, const struct load_info *info)
 {
 	/* Sort exception table now relocations are done. */
+	pax_open_kernel();
 	sort_extable(mod->extable, mod->extable + mod->num_exentries);
+	pax_close_kernel();
 
 	/* Copy relocated percpu area over. */
 	percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
@@ -3023,16 +3157,16 @@ static int do_init_module(struct module
 			MODULE_STATE_COMING, mod);
 
 	/* Set RO and NX regions for core */
-	set_section_ro_nx(mod->module_core,
-				mod->core_text_size,
-				mod->core_ro_size,
-				mod->core_size);
+	set_section_ro_nx(mod->module_core_rx,
+				mod->core_size_rx,
+				mod->core_size_rx,
+				mod->core_size_rx);
 
 	/* Set RO and NX regions for init */
-	set_section_ro_nx(mod->module_init,
-				mod->init_text_size,
-				mod->init_ro_size,
-				mod->init_size);
+	set_section_ro_nx(mod->module_init_rx,
+				mod->init_size_rx,
+				mod->init_size_rx,
+				mod->init_size_rx);
 
 	do_mod_ctors(mod);
 	/* Start the module */
@@ -3093,11 +3227,12 @@ static int do_init_module(struct module
 	mod->strtab = mod->core_strtab;
 #endif
 	unset_module_init_ro_nx(mod);
-	module_free(mod, mod->module_init);
-	mod->module_init = NULL;
-	mod->init_size = 0;
-	mod->init_ro_size = 0;
-	mod->init_text_size = 0;
+	module_free(mod, mod->module_init_rw);
+	module_free_exec(mod, mod->module_init_rx);
+	mod->module_init_rw = NULL;
+	mod->module_init_rx = NULL;
+	mod->init_size_rw = 0;
+	mod->init_size_rx = 0;
 	mutex_unlock(&module_mutex);
 	wake_up_all(&module_wq);
 
@@ -3240,9 +3375,38 @@ static int load_module(struct load_info
 	if (err)
 		goto free_unload;
 
+	/* Now copy in args */
+	mod->args = strndup_user(uargs, ~0UL >> 1);
+	if (IS_ERR(mod->args)) {
+		err = PTR_ERR(mod->args);
+		goto free_unload;
+	}
+
 	/* Set up MODINFO_ATTR fields */
 	setup_modinfo(mod, info);
 
+#ifdef CONFIG_GRKERNSEC_MODHARDEN
+	{
+		char *p, *p2;
+
+		if (strstr(mod->args, "grsec_modharden_netdev")) {
+			printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device
with CAP_SYS_MODULE (deprecated).  Use CAP_NET_ADMIN and alias netdev-%.64s instead.",
mod->name);
+			err = -EPERM;
+			goto free_modinfo;
+		} else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
+			p += sizeof("grsec_modharden_normal") - 1;
+			p2 = strstr(p, "_");
+			if (p2) {
+				*p2 = '\0';
+				printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n",
mod->name, p);
+				*p2 = '_';
+			}
+			err = -EPERM;
+			goto free_modinfo;
+		}
+	}
+#endif
+
 	/* Fix up syms, so that st_value is a pointer to location. */
 	err = simplify_symbols(mod, info);
 	if (err < 0)
@@ -3258,13 +3422,6 @@ static int load_module(struct load_info
 
 	flush_module_icache(mod);
 
-	/* Now copy in args */
-	mod->args = strndup_user(uargs, ~0UL >> 1);
-	if (IS_ERR(mod->args)) {
-		err = PTR_ERR(mod->args);
-		goto free_arch_cleanup;
-	}
-
 	dynamic_debug_setup(info->debug, info->num_debug);
 
 	/* Finally it's fully formed, ready to start executing. */
@@ -3299,11 +3456,10 @@ static int load_module(struct load_info
  ddebug_cleanup:
 	dynamic_debug_remove(info->debug);
 	synchronize_sched();
-	kfree(mod->args);
- free_arch_cleanup:
 	module_arch_cleanup(mod);
  free_modinfo:
 	free_modinfo(mod);
+	kfree(mod->args);
  free_unload:
 	module_unload_free(mod);
  unlink_mod:
@@ -3386,10 +3542,16 @@ static const char *get_ksymbol(struct mo
 	unsigned long nextval;
 
 	/* At worse, next value is at end of module */
-	if (within_module_init(addr, mod))
-		nextval = (unsigned long)mod->module_init+mod->init_text_size;
+	if (within_module_init_rx(addr, mod))
+		nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
+	else if (within_module_init_rw(addr, mod))
+		nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
+	else if (within_module_core_rx(addr, mod))
+		nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
+	else if (within_module_core_rw(addr, mod))
+		nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
 	else
-		nextval = (unsigned long)mod->module_core+mod->core_text_size;
+		return NULL;
 
 	/* Scan for closest preceding symbol, and next symbol. (ELF
 	   starts real symbols at 1). */
@@ -3640,7 +3802,7 @@ static int m_show(struct seq_file *m, vo
 		return 0;
 
 	seq_printf(m, "%s %u",
-		   mod->name, mod->init_size + mod->core_size);
+		   mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
 	print_unload_info(m, mod);
 
 	/* Informative for users. */
@@ -3649,7 +3811,7 @@ static int m_show(struct seq_file *m, vo
 		   mod->state == MODULE_STATE_COMING ? "Loading":
 		   "Live");
 	/* Used by oprofile and other similar tools. */
-	seq_printf(m, " 0x%pK", mod->module_core);
+	seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
 
 	/* Taints info */
 	if (mod->taints)
@@ -3685,7 +3847,17 @@ static const struct file_operations proc
 
 static int __init proc_modules_init(void)
 {
+#ifndef CONFIG_GRKERNSEC_HIDESYM
+#ifdef CONFIG_GRKERNSEC_PROC_USER
+	proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
+	proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
+#else
 	proc_create("modules", 0, NULL, &proc_modules_operations);
+#endif
+#else
+	proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
+#endif
 	return 0;
 }
 module_init(proc_modules_init);
@@ -3746,14 +3918,14 @@ struct module *__module_address(unsigned
 {
 	struct module *mod;
 
-	if (addr < module_addr_min || addr > module_addr_max)
+	if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
+	    (addr < module_addr_min_rw || addr > module_addr_max_rw))
 		return NULL;
 
 	list_for_each_entry_rcu(mod, &modules, list) {
 		if (mod->state == MODULE_STATE_UNFORMED)
 			continue;
-		if (within_module_core(addr, mod)
-		    || within_module_init(addr, mod))
+		if (within_module_init(addr, mod) || within_module_core(addr, mod))
 			return mod;
 	}
 	return NULL;
@@ -3788,11 +3960,20 @@ bool is_module_text_address(unsigned lon
  */
 struct module *__module_text_address(unsigned long addr)
 {
-	struct module *mod = __module_address(addr);
+	struct module *mod;
+
+#ifdef CONFIG_X86_32
+	addr = ktla_ktva(addr);
+#endif
+
+	if (addr < module_addr_min_rx || addr > module_addr_max_rx)
+		return NULL;
+
+	mod = __module_address(addr);
+
 	if (mod) {
 		/* Make sure it's within the text section. */
-		if (!within(addr, mod->module_init, mod->init_text_size)
-		    && !within(addr, mod->module_core, mod->core_text_size))
+		if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
 			mod = NULL;
 	}
 	return mod;
diff -ruNp linux-3.13.11/kernel/notifier.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/notifier.c
--- linux-3.13.11/kernel/notifier.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/notifier.c	2014-07-09 12:00:15.000000000
+0200
@@ -5,6 +5,7 @@
 #include <linux/rcupdate.h>
 #include <linux/vmalloc.h>
 #include <linux/reboot.h>
+#include <linux/mm.h>
 
 /*
  *	Notifier list for kernel code which wants to be called
@@ -24,10 +25,12 @@ static int notifier_chain_register(struc
 	while ((*nl) != NULL) {
 		if (n->priority > (*nl)->priority)
 			break;
-		nl = &((*nl)->next);
+		nl = (struct notifier_block **)&((*nl)->next);
 	}
-	n->next = *nl;
+	pax_open_kernel();
+	*(const void **)&n->next = *nl;
 	rcu_assign_pointer(*nl, n);
+	pax_close_kernel();
 	return 0;
 }
 
@@ -39,10 +42,12 @@ static int notifier_chain_cond_register(
 			return 0;
 		if (n->priority > (*nl)->priority)
 			break;
-		nl = &((*nl)->next);
+		nl = (struct notifier_block **)&((*nl)->next);
 	}
-	n->next = *nl;
+	pax_open_kernel();
+	*(const void **)&n->next = *nl;
 	rcu_assign_pointer(*nl, n);
+	pax_close_kernel();
 	return 0;
 }
 
@@ -51,10 +56,12 @@ static int notifier_chain_unregister(str
 {
 	while ((*nl) != NULL) {
 		if ((*nl) == n) {
+			pax_open_kernel();
 			rcu_assign_pointer(*nl, n->next);
+			pax_close_kernel();
 			return 0;
 		}
-		nl = &((*nl)->next);
+		nl = (struct notifier_block **)&((*nl)->next);
 	}
 	return -ENOENT;
 }
diff -ruNp linux-3.13.11/kernel/nsproxy.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/nsproxy.c
--- linux-3.13.11/kernel/nsproxy.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/nsproxy.c	2014-07-09 12:00:15.000000000
+0200
@@ -20,11 +20,14 @@
 #include <linux/mnt_namespace.h>
 #include <linux/utsname.h>
 #include <linux/pid_namespace.h>
+#include <linux/vserver/global.h>
+#include <linux/vserver/debug.h>
 #include <net/net_namespace.h>
 #include <linux/ipc_namespace.h>
 #include <linux/proc_ns.h>
 #include <linux/file.h>
 #include <linux/syscalls.h>
+#include "../fs/mount.h"
 
 static struct kmem_cache *nsproxy_cachep;
 
@@ -46,8 +49,11 @@ static inline struct nsproxy *create_nsp
 	struct nsproxy *nsproxy;
 
 	nsproxy = kmem_cache_alloc(nsproxy_cachep, GFP_KERNEL);
-	if (nsproxy)
+	if (nsproxy) {
 		atomic_set(&nsproxy->count, 1);
+		atomic_inc(&vs_global_nsproxy);
+	}
+	vxdprintk(VXD_CBIT(space, 2), "create_nsproxy = %p[1]", nsproxy);
 	return nsproxy;
 }
 
@@ -56,9 +62,12 @@ static inline struct nsproxy *create_nsp
  * Return the newly created nsproxy.  Do not attach this to the task,
  * leave it to the caller to do proper locking and attach it to task.
  */
-static struct nsproxy *create_new_namespaces(unsigned long flags,
-	struct task_struct *tsk, struct user_namespace *user_ns,
-	struct fs_struct *new_fs)
+static struct nsproxy *unshare_namespaces(
+	unsigned long flags,
+	struct nsproxy *orig,
+	struct fs_struct *new_fs,
+	struct user_namespace *new_user,
+	struct pid_namespace *new_pid)
 {
 	struct nsproxy *new_nsp;
 	int err;
@@ -67,32 +76,31 @@ static struct nsproxy *create_new_namesp
 	if (!new_nsp)
 		return ERR_PTR(-ENOMEM);
 
-	new_nsp->mnt_ns = copy_mnt_ns(flags, tsk->nsproxy->mnt_ns, user_ns, new_fs);
+	new_nsp->mnt_ns = copy_mnt_ns(flags, orig->mnt_ns, new_user, new_fs);
 	if (IS_ERR(new_nsp->mnt_ns)) {
 		err = PTR_ERR(new_nsp->mnt_ns);
 		goto out_ns;
 	}
 
-	new_nsp->uts_ns = copy_utsname(flags, user_ns, tsk->nsproxy->uts_ns);
+	new_nsp->uts_ns = copy_utsname(flags, new_user, orig->uts_ns);
 	if (IS_ERR(new_nsp->uts_ns)) {
 		err = PTR_ERR(new_nsp->uts_ns);
 		goto out_uts;
 	}
 
-	new_nsp->ipc_ns = copy_ipcs(flags, user_ns, tsk->nsproxy->ipc_ns);
+	new_nsp->ipc_ns = copy_ipcs(flags, new_user, orig->ipc_ns);
 	if (IS_ERR(new_nsp->ipc_ns)) {
 		err = PTR_ERR(new_nsp->ipc_ns);
 		goto out_ipc;
 	}
 
-	new_nsp->pid_ns_for_children =
-		copy_pid_ns(flags, user_ns, tsk->nsproxy->pid_ns_for_children);
+	new_nsp->pid_ns_for_children = copy_pid_ns(flags, new_user, new_pid);
 	if (IS_ERR(new_nsp->pid_ns_for_children)) {
 		err = PTR_ERR(new_nsp->pid_ns_for_children);
 		goto out_pid;
 	}
 
-	new_nsp->net_ns = copy_net_ns(flags, user_ns, tsk->nsproxy->net_ns);
+	new_nsp->net_ns = copy_net_ns(flags, new_user, orig->net_ns);
 	if (IS_ERR(new_nsp->net_ns)) {
 		err = PTR_ERR(new_nsp->net_ns);
 		goto out_net;
@@ -117,6 +125,41 @@ out_ns:
 	return ERR_PTR(err);
 }
 
+static struct nsproxy *create_new_namespaces(unsigned long flags,
+	struct task_struct *tsk, struct user_namespace *user_ns,
+	struct fs_struct *new_fs)
+
+{
+	return unshare_namespaces(flags, tsk->nsproxy,
+		new_fs, user_ns, task_active_pid_ns(tsk));
+}
+
+/*
+ * copies the nsproxy, setting refcount to 1, and grabbing a
+ * reference to all contained namespaces.
+ */
+struct nsproxy *copy_nsproxy(struct nsproxy *orig)
+{
+	struct nsproxy *ns = create_nsproxy();
+
+	if (ns) {
+		memcpy(ns, orig, sizeof(struct nsproxy));
+		atomic_set(&ns->count, 1);
+
+		if (ns->mnt_ns)
+			get_mnt_ns(ns->mnt_ns);
+		if (ns->uts_ns)
+			get_uts_ns(ns->uts_ns);
+		if (ns->ipc_ns)
+			get_ipc_ns(ns->ipc_ns);
+		if (ns->pid_ns_for_children)
+			get_pid_ns(ns->pid_ns_for_children);
+		if (ns->net_ns)
+			get_net(ns->net_ns);
+	}
+	return ns;
+}
+
 /*
  * called from clone.  This now handles copy for nsproxy and all
  * namespaces therein.
@@ -125,7 +168,10 @@ int copy_namespaces(unsigned long flags,
 {
 	struct nsproxy *old_ns = tsk->nsproxy;
 	struct user_namespace *user_ns = task_cred_xxx(tsk, user_ns);
-	struct nsproxy *new_ns;
+	struct nsproxy *new_ns = NULL;
+
+	vxdprintk(VXD_CBIT(space, 7), "copy_namespaces(0x%08lx,%p[%p])",
+		flags, tsk, old_ns);
 
 	if (likely(!(flags & (CLONE_NEWNS | CLONE_NEWUTS | CLONE_NEWIPC |
 			      CLONE_NEWPID | CLONE_NEWNET)))) {
@@ -133,7 +179,7 @@ int copy_namespaces(unsigned long flags,
 		return 0;
 	}
 
-	if (!ns_capable(user_ns, CAP_SYS_ADMIN))
+	if (!vx_ns_can_unshare(user_ns, CAP_SYS_ADMIN, flags))
 		return -EPERM;
 
 	/*
@@ -152,6 +198,9 @@ int copy_namespaces(unsigned long flags,
 		return  PTR_ERR(new_ns);
 
 	tsk->nsproxy = new_ns;
+	vxdprintk(VXD_CBIT(space, 3),
+		"copy_namespaces(0x%08lx,%p[%p]) = [%p]",
+		flags, tsk, old_ns, new_ns);
 	return 0;
 }
 
@@ -165,7 +214,9 @@ void free_nsproxy(struct nsproxy *ns)
 		put_ipc_ns(ns->ipc_ns);
 	if (ns->pid_ns_for_children)
 		put_pid_ns(ns->pid_ns_for_children);
-	put_net(ns->net_ns);
+	if (ns->net_ns)
+		put_net(ns->net_ns);
+	atomic_dec(&vs_global_nsproxy);
 	kmem_cache_free(nsproxy_cachep, ns);
 }
 
@@ -179,12 +230,16 @@ int unshare_nsproxy_namespaces(unsigned
 	struct user_namespace *user_ns;
 	int err = 0;
 
+	vxdprintk(VXD_CBIT(space, 4),
+		"unshare_nsproxy_namespaces(0x%08lx,[%p])",
+		unshare_flags, current->nsproxy);
+
 	if (!(unshare_flags & (CLONE_NEWNS | CLONE_NEWUTS | CLONE_NEWIPC |
 			       CLONE_NEWNET | CLONE_NEWPID)))
 		return 0;
 
 	user_ns = new_cred ? new_cred->user_ns : current_user_ns();
-	if (!ns_capable(user_ns, CAP_SYS_ADMIN))
+	if (!vx_ns_can_unshare(user_ns, CAP_SYS_ADMIN, unshare_flags))
 		return -EPERM;
 
 	*new_nsp = create_new_namespaces(unshare_flags, current, user_ns,
diff -ruNp linux-3.13.11/kernel/padata.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/padata.c
--- linux-3.13.11/kernel/padata.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/padata.c	2014-07-09 12:00:15.000000000
+0200
@@ -54,7 +54,7 @@ static int padata_cpu_hash(struct parall
 	 * seq_nr mod. number of cpus in use.
 	 */
 
-	seq_nr = atomic_inc_return(&pd->seq_nr);
+	seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
 	cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
 
 	return padata_index_to_cpu(pd, cpu_index);
@@ -428,7 +428,7 @@ static struct parallel_data *padata_allo
 	padata_init_pqueues(pd);
 	padata_init_squeues(pd);
 	setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
-	atomic_set(&pd->seq_nr, -1);
+	atomic_set_unchecked(&pd->seq_nr, -1);
 	atomic_set(&pd->reorder_objects, 0);
 	atomic_set(&pd->refcnt, 0);
 	pd->pinst = pinst;
diff -ruNp linux-3.13.11/kernel/panic.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/panic.c
--- linux-3.13.11/kernel/panic.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/panic.c	2014-07-09 12:00:15.000000000
+0200
@@ -52,7 +52,7 @@ EXPORT_SYMBOL(panic_blink);
 /*
  * Stop ourself in panic -- architecture code may override this
  */
-void __weak panic_smp_self_stop(void)
+void __weak __noreturn panic_smp_self_stop(void)
 {
 	while (1)
 		cpu_relax();
@@ -407,7 +407,7 @@ static void warn_slowpath_common(const c
 	disable_trace_on_warning();
 
 	pr_warn("------------[ cut here ]------------\n");
-	pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS()\n",
+	pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pA()\n",
 		raw_smp_processor_id(), current->pid, file, line, caller);
 
 	if (args)
@@ -461,7 +461,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
  */
 void __stack_chk_fail(void)
 {
-	panic("stack-protector: Kernel stack is corrupted in: %p\n",
+	dump_stack();
+	panic("stack-protector: Kernel stack is corrupted in: %pA\n",
 		__builtin_return_address(0));
 }
 EXPORT_SYMBOL(__stack_chk_fail);
diff -ruNp linux-3.13.11/kernel/pid.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/pid.c
--- linux-3.13.11/kernel/pid.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/pid.c	2014-07-09 12:00:15.000000000
+0200
@@ -33,11 +33,13 @@
 #include <linux/rculist.h>
 #include <linux/bootmem.h>
 #include <linux/hash.h>
+#include <linux/security.h>
 #include <linux/pid_namespace.h>
 #include <linux/init_task.h>
 #include <linux/syscalls.h>
 #include <linux/proc_ns.h>
 #include <linux/proc_fs.h>
+#include <linux/vs_pid.h>
 
 #define pid_hashfn(nr, ns)	\
 	hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift)
@@ -47,7 +49,7 @@ struct pid init_struct_pid = INIT_STRUCT
 
 int pid_max = PID_MAX_DEFAULT;
 
-#define RESERVED_PIDS		300
+#define RESERVED_PIDS		500
 
 int pid_max_min = RESERVED_PIDS + 1;
 int pid_max_max = PID_MAX_LIMIT;
@@ -373,7 +375,7 @@ EXPORT_SYMBOL_GPL(find_pid_ns);
 
 struct pid *find_vpid(int nr)
 {
-	return find_pid_ns(nr, task_active_pid_ns(current));
+	return find_pid_ns(vx_rmap_pid(nr), task_active_pid_ns(current));
 }
 EXPORT_SYMBOL_GPL(find_vpid);
 
@@ -429,6 +431,9 @@ void transfer_pid(struct task_struct *ol
 struct task_struct *pid_task(struct pid *pid, enum pid_type type)
 {
 	struct task_struct *result = NULL;
+
+	if (type == PIDTYPE_REALPID)
+		type = PIDTYPE_PID;
 	if (pid) {
 		struct hlist_node *first;
 		first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]),
@@ -445,10 +450,18 @@ EXPORT_SYMBOL(pid_task);
  */
 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
 {
+	struct task_struct *task;
+
 	rcu_lockdep_assert(rcu_read_lock_held(),
 			   "find_task_by_pid_ns() needs rcu_read_lock()"
 			   " protection");
-	return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
+
+	task = pid_task(find_pid_ns(vx_rmap_pid(nr), ns), PIDTYPE_PID);
+
+	if (gr_pid_is_chrooted(task))
+		return NULL;
+
+	return task;
 }
 
 struct task_struct *find_task_by_vpid(pid_t vnr)
@@ -456,6 +469,14 @@ struct task_struct *find_task_by_vpid(pi
 	return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
 }
 
+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
+{
+	rcu_lockdep_assert(rcu_read_lock_held(),
+			   "find_task_by_pid_ns() needs rcu_read_lock()"
+			   " protection");
+	return pid_task(find_pid_ns(vnr, task_active_pid_ns(current)), PIDTYPE_PID);
+}
+
 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
 {
 	struct pid *pid;
@@ -492,7 +513,7 @@ struct pid *find_get_pid(pid_t nr)
 }
 EXPORT_SYMBOL_GPL(find_get_pid);
 
-pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
+pid_t pid_unmapped_nr_ns(struct pid *pid, struct pid_namespace *ns)
 {
 	struct upid *upid;
 	pid_t nr = 0;
@@ -506,6 +527,11 @@ pid_t pid_nr_ns(struct pid *pid, struct
 }
 EXPORT_SYMBOL_GPL(pid_nr_ns);
 
+pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
+{
+	return vx_map_pid(pid_unmapped_nr_ns(pid, ns));
+}
+
 pid_t pid_vnr(struct pid *pid)
 {
 	return pid_nr_ns(pid, task_active_pid_ns(current));
diff -ruNp linux-3.13.11/kernel/pid_namespace.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/pid_namespace.c
--- linux-3.13.11/kernel/pid_namespace.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/pid_namespace.c	2014-07-09
12:00:15.000000000 +0200
@@ -18,6 +18,7 @@
 #include <linux/proc_ns.h>
 #include <linux/reboot.h>
 #include <linux/export.h>
+#include <linux/vserver/global.h>
 
 struct pid_cache {
 	int nr_ids;
@@ -110,6 +111,7 @@ static struct pid_namespace *create_pid_
 		goto out_free_map;
 
 	kref_init(&ns->kref);
+	atomic_inc(&vs_global_pid_ns);
 	ns->level = level;
 	ns->parent = get_pid_ns(parent_pid_ns);
 	ns->user_ns = get_user_ns(user_ns);
@@ -127,6 +129,7 @@ static struct pid_namespace *create_pid_
 out_free_map:
 	kfree(ns->pidmap[0].page);
 out_free:
+	atomic_dec(&vs_global_pid_ns);
 	kmem_cache_free(pid_ns_cachep, ns);
 out:
 	return ERR_PTR(err);
@@ -253,7 +256,7 @@ static int pid_ns_ctl_handler(struct ctl
 		void __user *buffer, size_t *lenp, loff_t *ppos)
 {
 	struct pid_namespace *pid_ns = task_active_pid_ns(current);
-	struct ctl_table tmp = *table;
+	ctl_table_no_const tmp = *table;
 
 	if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN))
 		return -EPERM;
diff -ruNp linux-3.13.11/kernel/posix-cpu-timers.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/posix-cpu-timers.c
--- linux-3.13.11/kernel/posix-cpu-timers.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/posix-cpu-timers.c	2014-07-09
12:00:15.000000000 +0200
@@ -1521,14 +1521,14 @@ struct k_clock clock_posix_cpu = {
 
 static __init int init_posix_cpu_timers(void)
 {
-	struct k_clock process = {
+	static struct k_clock process = {
 		.clock_getres	= process_cpu_clock_getres,
 		.clock_get	= process_cpu_clock_get,
 		.timer_create	= process_cpu_timer_create,
 		.nsleep		= process_cpu_nsleep,
 		.nsleep_restart	= process_cpu_nsleep_restart,
 	};
-	struct k_clock thread = {
+	static struct k_clock thread = {
 		.clock_getres	= thread_cpu_clock_getres,
 		.clock_get	= thread_cpu_clock_get,
 		.timer_create	= thread_cpu_timer_create,
diff -ruNp linux-3.13.11/kernel/posix-timers.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/posix-timers.c
--- linux-3.13.11/kernel/posix-timers.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/posix-timers.c	2014-07-09
12:00:15.000000000 +0200
@@ -43,11 +43,13 @@
 #include <linux/hash.h>
 #include <linux/posix-clock.h>
 #include <linux/posix-timers.h>
+#include <linux/grsecurity.h>
 #include <linux/syscalls.h>
 #include <linux/wait.h>
 #include <linux/workqueue.h>
 #include <linux/export.h>
 #include <linux/hashtable.h>
+#include <linux/vs_context.h>
 
 /*
  * Management arrays for POSIX timers. Timers are now kept in static hash table
@@ -122,7 +124,7 @@ static DEFINE_SPINLOCK(hash_lock);
  *	    which we beg off on and pass to do_sys_settimeofday().
  */
 
-static struct k_clock posix_clocks[MAX_CLOCKS];
+static struct k_clock *posix_clocks[MAX_CLOCKS];
 
 /*
  * These ones are defined below.
@@ -275,7 +277,7 @@ static int posix_get_tai(clockid_t which
  */
 static __init int init_posix_timers(void)
 {
-	struct k_clock clock_realtime = {
+	static struct k_clock clock_realtime = {
 		.clock_getres	= hrtimer_get_res,
 		.clock_get	= posix_clock_realtime_get,
 		.clock_set	= posix_clock_realtime_set,
@@ -287,7 +289,7 @@ static __init int init_posix_timers(void
 		.timer_get	= common_timer_get,
 		.timer_del	= common_timer_del,
 	};
-	struct k_clock clock_monotonic = {
+	static struct k_clock clock_monotonic = {
 		.clock_getres	= hrtimer_get_res,
 		.clock_get	= posix_ktime_get_ts,
 		.nsleep		= common_nsleep,
@@ -297,19 +299,19 @@ static __init int init_posix_timers(void
 		.timer_get	= common_timer_get,
 		.timer_del	= common_timer_del,
 	};
-	struct k_clock clock_monotonic_raw = {
+	static struct k_clock clock_monotonic_raw = {
 		.clock_getres	= hrtimer_get_res,
 		.clock_get	= posix_get_monotonic_raw,
 	};
-	struct k_clock clock_realtime_coarse = {
+	static struct k_clock clock_realtime_coarse = {
 		.clock_getres	= posix_get_coarse_res,
 		.clock_get	= posix_get_realtime_coarse,
 	};
-	struct k_clock clock_monotonic_coarse = {
+	static struct k_clock clock_monotonic_coarse = {
 		.clock_getres	= posix_get_coarse_res,
 		.clock_get	= posix_get_monotonic_coarse,
 	};
-	struct k_clock clock_tai = {
+	static struct k_clock clock_tai = {
 		.clock_getres	= hrtimer_get_res,
 		.clock_get	= posix_get_tai,
 		.nsleep		= common_nsleep,
@@ -319,7 +321,7 @@ static __init int init_posix_timers(void
 		.timer_get	= common_timer_get,
 		.timer_del	= common_timer_del,
 	};
-	struct k_clock clock_boottime = {
+	static struct k_clock clock_boottime = {
 		.clock_getres	= hrtimer_get_res,
 		.clock_get	= posix_get_boottime,
 		.nsleep		= common_nsleep,
@@ -398,6 +400,7 @@ int posix_timer_event(struct k_itimer *t
 {
 	struct task_struct *task;
 	int shared, ret = -1;
+
 	/*
 	 * FIXME: if ->sigq is queued we can race with
 	 * dequeue_signal()->do_schedule_next_timer().
@@ -414,10 +417,18 @@ int posix_timer_event(struct k_itimer *t
 	rcu_read_lock();
 	task = pid_task(timr->it_pid, PIDTYPE_PID);
 	if (task) {
+		struct vx_info_save vxis;
+		struct vx_info *vxi;
+
+		vxi = get_vx_info(task->vx_info);
+		enter_vx_info(vxi, &vxis);
 		shared = !(timr->it_sigev_notify & SIGEV_THREAD_ID);
 		ret = send_sigqueue(timr->sigq, task, shared);
+		leave_vx_info(&vxis);
+		put_vx_info(vxi);
 	}
 	rcu_read_unlock();
+
 	/* If we failed to send the signal the timer stops. */
 	return ret > 0;
 }
@@ -531,7 +542,7 @@ void posix_timers_register_clock(const c
 		return;
 	}
 
-	posix_clocks[clock_id] = *new_clock;
+	posix_clocks[clock_id] = new_clock;
 }
 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
 
@@ -577,9 +588,9 @@ static struct k_clock *clockid_to_kclock
 		return (id & CLOCKFD_MASK) == CLOCKFD ?
 			&clock_posix_dynamic : &clock_posix_cpu;
 
-	if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
+	if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
 		return NULL;
-	return &posix_clocks[id];
+	return posix_clocks[id];
 }
 
 static int common_timer_create(struct k_itimer *new_timer)
@@ -597,7 +608,7 @@ SYSCALL_DEFINE3(timer_create, const cloc
 	struct k_clock *kc = clockid_to_kclock(which_clock);
 	struct k_itimer *new_timer;
 	int error, new_timer_id;
-	sigevent_t event;
+	sigevent_t event = { };
 	int it_id_set = IT_ID_NOT_SET;
 
 	if (!kc)
@@ -1011,6 +1022,13 @@ SYSCALL_DEFINE2(clock_settime, const clo
 	if (copy_from_user(&new_tp, tp, sizeof (*tp)))
 		return -EFAULT;
 
+	/* only the CLOCK_REALTIME clock can be set, all other clocks
+	   have their clock_set fptr set to a nosettime dummy function
+	   CLOCK_REALTIME has a NULL clock_set fptr which causes it to
+	   call common_clock_set, which calls do_sys_settimeofday, which
+	   we hook
+	*/
+
 	return kc->clock_set(which_clock, &new_tp);
 }
 
diff -ruNp linux-3.13.11/kernel/power/Kconfig linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/power/Kconfig
--- linux-3.13.11/kernel/power/Kconfig	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/power/Kconfig	2014-07-09 12:00:15.000000000
+0200
@@ -24,6 +24,8 @@ config HIBERNATE_CALLBACKS
 config HIBERNATION
 	bool "Hibernation (aka 'suspend to disk')"
 	depends on SWAP && ARCH_HIBERNATION_POSSIBLE
+	depends on !GRKERNSEC_KMEM
+	depends on !PAX_MEMORY_SANITIZE
 	select HIBERNATE_CALLBACKS
 	select LZO_COMPRESS
 	select LZO_DECOMPRESS
diff -ruNp linux-3.13.11/kernel/power/process.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/power/process.c
--- linux-3.13.11/kernel/power/process.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/power/process.c	2014-07-09
12:00:15.000000000 +0200
@@ -34,6 +34,7 @@ static int try_to_freeze_tasks(bool user
 	unsigned int elapsed_msecs;
 	bool wakeup = false;
 	int sleep_usecs = USEC_PER_MSEC;
+	bool timedout = false;
 
 	do_gettimeofday(&start);
 
@@ -44,13 +45,20 @@ static int try_to_freeze_tasks(bool user
 
 	while (true) {
 		todo = 0;
+		if (time_after(jiffies, end_time))
+			timedout = true;
 		read_lock(&tasklist_lock);
 		do_each_thread(g, p) {
 			if (p == current || !freeze_task(p))
 				continue;
 
-			if (!freezer_should_skip(p))
+			if (!freezer_should_skip(p)) {
 				todo++;
+				if (timedout) {
+					printk(KERN_ERR "Task refusing to freeze:\n");
+					sched_show_task(p);
+				}
+			}
 		} while_each_thread(g, p);
 		read_unlock(&tasklist_lock);
 
@@ -59,7 +67,7 @@ static int try_to_freeze_tasks(bool user
 			todo += wq_busy;
 		}
 
-		if (!todo || time_after(jiffies, end_time))
+		if (!todo || timedout)
 			break;
 
 		if (pm_wakeup_pending()) {
diff -ruNp linux-3.13.11/kernel/printk/printk.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/printk/printk.c
--- linux-3.13.11/kernel/printk/printk.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/printk/printk.c	2014-07-09
12:00:15.000000000 +0200
@@ -45,6 +45,7 @@
 #include <linux/poll.h>
 #include <linux/irq_work.h>
 #include <linux/utsname.h>
+#include <linux/vs_cvirt.h>
 
 #include <asm/uaccess.h>
 
@@ -385,8 +386,13 @@ static int check_syslog_permissions(int
 	if (from_file && type != SYSLOG_ACTION_OPEN)
 		return 0;
 
+#ifdef CONFIG_GRKERNSEC_DMESG
+	if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
+		return -EPERM;
+#endif
+
 	if (syslog_action_restricted(type)) {
-		if (capable(CAP_SYSLOG))
+		if (vx_capable(CAP_SYSLOG, VXC_SYSLOG))
 			return 0;
 		/*
 		 * For historical reasons, accept CAP_SYS_ADMIN too, with
@@ -1134,12 +1140,9 @@ int do_syslog(int type, char __user *buf
 	if (error)
 		return error;
 
-	switch (type) {
-	case SYSLOG_ACTION_CLOSE:	/* Close log */
-		break;
-	case SYSLOG_ACTION_OPEN:	/* Open log */
-		break;
-	case SYSLOG_ACTION_READ:	/* Read from log */
+	if ((type == SYSLOG_ACTION_READ) ||
+	    (type == SYSLOG_ACTION_READ_ALL) ||
+	    (type == SYSLOG_ACTION_READ_CLEAR)) {
 		error = -EINVAL;
 		if (!buf || len < 0)
 			goto out;
@@ -1150,6 +1153,16 @@ int do_syslog(int type, char __user *buf
 			error = -EFAULT;
 			goto out;
 		}
+	}
+	if (!vx_check(0, VS_ADMIN|VS_WATCH))
+		return vx_do_syslog(type, buf, len);
+
+	switch (type) {
+	case SYSLOG_ACTION_CLOSE:	/* Close log */
+		break;
+	case SYSLOG_ACTION_OPEN:	/* Open log */
+		break;
+	case SYSLOG_ACTION_READ:	/* Read from log */
 		error = wait_event_interruptible(log_wait,
 						 syslog_seq != log_next_seq);
 		if (error)
@@ -1162,16 +1175,6 @@ int do_syslog(int type, char __user *buf
 		/* FALL THRU */
 	/* Read last kernel messages */
 	case SYSLOG_ACTION_READ_ALL:
-		error = -EINVAL;
-		if (!buf || len < 0)
-			goto out;
-		error = 0;
-		if (!len)
-			goto out;
-		if (!access_ok(VERIFY_WRITE, buf, len)) {
-			error = -EFAULT;
-			goto out;
-		}
 		error = syslog_print_all(buf, len, clear);
 		break;
 	/* Clear ring buffer */
diff -ruNp linux-3.13.11/kernel/profile.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/profile.c
--- linux-3.13.11/kernel/profile.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/profile.c	2014-07-09 12:00:15.000000000
+0200
@@ -37,7 +37,7 @@ struct profile_hit {
 #define NR_PROFILE_HIT		(PAGE_SIZE/sizeof(struct profile_hit))
 #define NR_PROFILE_GRP		(NR_PROFILE_HIT/PROFILE_GRPSZ)
 
-static atomic_t *prof_buffer;
+static atomic_unchecked_t *prof_buffer;
 static unsigned long prof_len, prof_shift;
 
 int prof_on __read_mostly;
@@ -260,7 +260,7 @@ static void profile_flip_buffers(void)
 					hits[i].pc = 0;
 				continue;
 			}
-			atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
+			atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
 			hits[i].hits = hits[i].pc = 0;
 		}
 	}
@@ -321,9 +321,9 @@ static void do_profile_hits(int type, vo
 	 * Add the current hit(s) and flush the write-queue out
 	 * to the global buffer:
 	 */
-	atomic_add(nr_hits, &prof_buffer[pc]);
+	atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
 	for (i = 0; i < NR_PROFILE_HIT; ++i) {
-		atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
+		atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
 		hits[i].pc = hits[i].hits = 0;
 	}
 out:
@@ -398,7 +398,7 @@ static void do_profile_hits(int type, vo
 {
 	unsigned long pc;
 	pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
-	atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
+	atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
 }
 #endif /* !CONFIG_SMP */
 
@@ -494,7 +494,7 @@ read_profile(struct file *file, char __u
 			return -EFAULT;
 		buf++; p++; count--; read++;
 	}
-	pnt = (char *)prof_buffer + p - sizeof(atomic_t);
+	pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
 	if (copy_to_user(buf, (void *)pnt, count))
 		return -EFAULT;
 	read += count;
@@ -525,7 +525,7 @@ static ssize_t write_profile(struct file
 	}
 #endif
 	profile_discard_flip_buffers();
-	memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
+	memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
 	return count;
 }
 
diff -ruNp linux-3.13.11/kernel/ptrace.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/ptrace.c
--- linux-3.13.11/kernel/ptrace.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/ptrace.c	2014-07-09 12:00:15.000000000
+0200
@@ -23,6 +23,7 @@
 #include <linux/syscalls.h>
 #include <linux/uaccess.h>
 #include <linux/regset.h>
+#include <linux/vs_context.h>
 #include <linux/hw_breakpoint.h>
 #include <linux/cn_proc.h>
 #include <linux/compat.h>
@@ -264,6 +265,11 @@ ok:
 	}
 	rcu_read_unlock();
 
+	if (!vx_check(task->xid, VS_ADMIN_P|VS_WATCH_P|VS_IDENT))
+		return -EPERM;
+	if (!vx_check(task->xid, VS_IDENT) &&
+		!task_vx_flags(task, VXF_STATE_ADMIN, 0))
+		return -EACCES;
 	return security_ptrace_access_check(task, mode);
 }
 
@@ -327,7 +333,7 @@ static int ptrace_attach(struct task_str
 	if (seize)
 		flags |= PT_SEIZED;
 	rcu_read_lock();
-	if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
+	if (ns_capable_nolog(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
 		flags |= PT_PTRACE_CAP;
 	rcu_read_unlock();
 	task->ptrace = flags;
@@ -538,7 +544,7 @@ int ptrace_readdata(struct task_struct *
 				break;
 			return -EIO;
 		}
-		if (copy_to_user(dst, buf, retval))
+		if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
 			return -EFAULT;
 		copied += retval;
 		src += retval;
@@ -806,7 +812,7 @@ int ptrace_request(struct task_struct *c
 	bool seized = child->ptrace & PT_SEIZED;
 	int ret = -EIO;
 	siginfo_t siginfo, *si;
-	void __user *datavp = (void __user *) data;
+	void __user *datavp = (__force void __user *) data;
 	unsigned long __user *datalp = datavp;
 	unsigned long flags;
 
@@ -1052,14 +1058,21 @@ SYSCALL_DEFINE4(ptrace, long, request, l
 		goto out;
 	}
 
+	if (gr_handle_ptrace(child, request)) {
+		ret = -EPERM;
+		goto out_put_task_struct;
+	}
+
 	if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
 		ret = ptrace_attach(child, request, addr, data);
 		/*
 		 * Some architectures need to do book-keeping after
 		 * a ptrace attach.
 		 */
-		if (!ret)
+		if (!ret) {
 			arch_ptrace_attach(child);
+			gr_audit_ptrace(child);
+		}
 		goto out_put_task_struct;
 	}
 
@@ -1087,7 +1100,7 @@ int generic_ptrace_peekdata(struct task_
 	copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
 	if (copied != sizeof(tmp))
 		return -EIO;
-	return put_user(tmp, (unsigned long __user *)data);
+	return put_user(tmp, (__force unsigned long __user *)data);
 }
 
 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
@@ -1181,7 +1194,7 @@ int compat_ptrace_request(struct task_st
 }
 
 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
-				  compat_long_t addr, compat_long_t data)
+				  compat_ulong_t addr, compat_ulong_t data)
 {
 	struct task_struct *child;
 	long ret;
@@ -1197,14 +1210,21 @@ asmlinkage long compat_sys_ptrace(compat
 		goto out;
 	}
 
+	if (gr_handle_ptrace(child, request)) {
+		ret = -EPERM;
+		goto out_put_task_struct;
+	}
+
 	if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
 		ret = ptrace_attach(child, request, addr, data);
 		/*
 		 * Some architectures need to do book-keeping after
 		 * a ptrace attach.
 		 */
-		if (!ret)
+		if (!ret) {
 			arch_ptrace_attach(child);
+			gr_audit_ptrace(child);
+		}
 		goto out_put_task_struct;
 	}
 
diff -ruNp linux-3.13.11/kernel/rcu/srcu.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/rcu/srcu.c
--- linux-3.13.11/kernel/rcu/srcu.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/rcu/srcu.c	2014-07-09 12:00:15.000000000
+0200
@@ -300,9 +300,9 @@ int __srcu_read_lock(struct srcu_struct
 
 	idx = ACCESS_ONCE(sp->completed) & 0x1;
 	preempt_disable();
-	ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
+	ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
 	smp_mb(); /* B */  /* Avoid leaking the critical section. */
-	ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
+	ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
 	preempt_enable();
 	return idx;
 }
diff -ruNp linux-3.13.11/kernel/rcu/tiny.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/rcu/tiny.c
--- linux-3.13.11/kernel/rcu/tiny.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/rcu/tiny.c	2014-07-09 12:00:15.000000000
+0200
@@ -46,7 +46,7 @@
 /* Forward declarations for tiny_plugin.h. */
 struct rcu_ctrlblk;
 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
-static void rcu_process_callbacks(struct softirq_action *unused);
+static void rcu_process_callbacks(void);
 static void __call_rcu(struct rcu_head *head,
 		       void (*func)(struct rcu_head *rcu),
 		       struct rcu_ctrlblk *rcp);
@@ -312,7 +312,7 @@ static void __rcu_process_callbacks(stru
 				      false));
 }
 
-static void rcu_process_callbacks(struct softirq_action *unused)
+static __latent_entropy void rcu_process_callbacks(void)
 {
 	__rcu_process_callbacks(&rcu_sched_ctrlblk);
 	__rcu_process_callbacks(&rcu_bh_ctrlblk);
diff -ruNp linux-3.13.11/kernel/rcu/torture.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/rcu/torture.c
--- linux-3.13.11/kernel/rcu/torture.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/rcu/torture.c	2014-07-09 12:00:15.000000000
+0200
@@ -176,12 +176,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_
 	{ 0 };
 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
 	{ 0 };
-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
-static atomic_t n_rcu_torture_alloc;
-static atomic_t n_rcu_torture_alloc_fail;
-static atomic_t n_rcu_torture_free;
-static atomic_t n_rcu_torture_mberror;
-static atomic_t n_rcu_torture_error;
+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
+static atomic_unchecked_t n_rcu_torture_alloc;
+static atomic_unchecked_t n_rcu_torture_alloc_fail;
+static atomic_unchecked_t n_rcu_torture_free;
+static atomic_unchecked_t n_rcu_torture_mberror;
+static atomic_unchecked_t n_rcu_torture_error;
 static long n_rcu_torture_barrier_error;
 static long n_rcu_torture_boost_ktrerror;
 static long n_rcu_torture_boost_rterror;
@@ -299,11 +299,11 @@ rcu_torture_alloc(void)
 
 	spin_lock_bh(&rcu_torture_lock);
 	if (list_empty(&rcu_torture_freelist)) {
-		atomic_inc(&n_rcu_torture_alloc_fail);
+		atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
 		spin_unlock_bh(&rcu_torture_lock);
 		return NULL;
 	}
-	atomic_inc(&n_rcu_torture_alloc);
+	atomic_inc_unchecked(&n_rcu_torture_alloc);
 	p = rcu_torture_freelist.next;
 	list_del_init(p);
 	spin_unlock_bh(&rcu_torture_lock);
@@ -316,7 +316,7 @@ rcu_torture_alloc(void)
 static void
 rcu_torture_free(struct rcu_torture *p)
 {
-	atomic_inc(&n_rcu_torture_free);
+	atomic_inc_unchecked(&n_rcu_torture_free);
 	spin_lock_bh(&rcu_torture_lock);
 	list_add_tail(&p->rtort_free, &rcu_torture_freelist);
 	spin_unlock_bh(&rcu_torture_lock);
@@ -437,7 +437,7 @@ rcu_torture_cb(struct rcu_head *p)
 	i = rp->rtort_pipe_count;
 	if (i > RCU_TORTURE_PIPE_LEN)
 		i = RCU_TORTURE_PIPE_LEN;
-	atomic_inc(&rcu_torture_wcount[i]);
+	atomic_inc_unchecked(&rcu_torture_wcount[i]);
 	if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
 		rp->rtort_mbtest = 0;
 		rcu_torture_free(rp);
@@ -827,7 +827,7 @@ rcu_torture_writer(void *arg)
 			i = old_rp->rtort_pipe_count;
 			if (i > RCU_TORTURE_PIPE_LEN)
 				i = RCU_TORTURE_PIPE_LEN;
-			atomic_inc(&rcu_torture_wcount[i]);
+			atomic_inc_unchecked(&rcu_torture_wcount[i]);
 			old_rp->rtort_pipe_count++;
 			if (gp_normal == gp_exp)
 				exp = !!(rcu_random(&rand) & 0x80);
@@ -845,7 +845,7 @@ rcu_torture_writer(void *arg)
 					i = rp->rtort_pipe_count;
 					if (i > RCU_TORTURE_PIPE_LEN)
 						i = RCU_TORTURE_PIPE_LEN;
-					atomic_inc(&rcu_torture_wcount[i]);
+					atomic_inc_unchecked(&rcu_torture_wcount[i]);
 					if (++rp->rtort_pipe_count >=
 					    RCU_TORTURE_PIPE_LEN) {
 						rp->rtort_mbtest = 0;
@@ -944,7 +944,7 @@ static void rcu_torture_timer(unsigned l
 		return;
 	}
 	if (p->rtort_mbtest == 0)
-		atomic_inc(&n_rcu_torture_mberror);
+		atomic_inc_unchecked(&n_rcu_torture_mberror);
 	spin_lock(&rand_lock);
 	cur_ops->read_delay(&rand);
 	n_rcu_torture_timers++;
@@ -1014,7 +1014,7 @@ rcu_torture_reader(void *arg)
 			continue;
 		}
 		if (p->rtort_mbtest == 0)
-			atomic_inc(&n_rcu_torture_mberror);
+			atomic_inc_unchecked(&n_rcu_torture_mberror);
 		cur_ops->read_delay(&rand);
 		preempt_disable();
 		pipe_count = p->rtort_pipe_count;
@@ -1077,11 +1077,11 @@ rcu_torture_printk(char *page)
 		       rcu_torture_current,
 		       rcu_torture_current_version,
 		       list_empty(&rcu_torture_freelist),
-		       atomic_read(&n_rcu_torture_alloc),
-		       atomic_read(&n_rcu_torture_alloc_fail),
-		       atomic_read(&n_rcu_torture_free));
+		       atomic_read_unchecked(&n_rcu_torture_alloc),
+		       atomic_read_unchecked(&n_rcu_torture_alloc_fail),
+		       atomic_read_unchecked(&n_rcu_torture_free));
 	cnt += sprintf(&page[cnt], "rtmbe: %d rtbke: %ld rtbre: %ld ",
-		       atomic_read(&n_rcu_torture_mberror),
+		       atomic_read_unchecked(&n_rcu_torture_mberror),
 		       n_rcu_torture_boost_ktrerror,
 		       n_rcu_torture_boost_rterror);
 	cnt += sprintf(&page[cnt], "rtbf: %ld rtb: %ld nt: %ld ",
@@ -1100,14 +1100,14 @@ rcu_torture_printk(char *page)
 		       n_barrier_attempts,
 		       n_rcu_torture_barrier_error);
 	cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
-	if (atomic_read(&n_rcu_torture_mberror) != 0 ||
+	if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
 	    n_rcu_torture_barrier_error != 0 ||
 	    n_rcu_torture_boost_ktrerror != 0 ||
 	    n_rcu_torture_boost_rterror != 0 ||
 	    n_rcu_torture_boost_failure != 0 ||
 	    i > 1) {
 		cnt += sprintf(&page[cnt], "!!! ");
-		atomic_inc(&n_rcu_torture_error);
+		atomic_inc_unchecked(&n_rcu_torture_error);
 		WARN_ON_ONCE(1);
 	}
 	cnt += sprintf(&page[cnt], "Reader Pipe: ");
@@ -1121,7 +1121,7 @@ rcu_torture_printk(char *page)
 	cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
 	for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
 		cnt += sprintf(&page[cnt], " %d",
-			       atomic_read(&rcu_torture_wcount[i]));
+			       atomic_read_unchecked(&rcu_torture_wcount[i]));
 	}
 	cnt += sprintf(&page[cnt], "\n");
 	if (cur_ops->stats)
@@ -1836,7 +1836,7 @@ rcu_torture_cleanup(void)
 
 	rcu_torture_stats_print();  /* -After- the stats thread is stopped! */
 
-	if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
+	if (atomic_read_unchecked(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
 		rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
 	else if (n_online_successes != n_online_attempts ||
 		 n_offline_successes != n_offline_attempts)
@@ -1958,18 +1958,18 @@ rcu_torture_init(void)
 
 	rcu_torture_current = NULL;
 	rcu_torture_current_version = 0;
-	atomic_set(&n_rcu_torture_alloc, 0);
-	atomic_set(&n_rcu_torture_alloc_fail, 0);
-	atomic_set(&n_rcu_torture_free, 0);
-	atomic_set(&n_rcu_torture_mberror, 0);
-	atomic_set(&n_rcu_torture_error, 0);
+	atomic_set_unchecked(&n_rcu_torture_alloc, 0);
+	atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
+	atomic_set_unchecked(&n_rcu_torture_free, 0);
+	atomic_set_unchecked(&n_rcu_torture_mberror, 0);
+	atomic_set_unchecked(&n_rcu_torture_error, 0);
 	n_rcu_torture_barrier_error = 0;
 	n_rcu_torture_boost_ktrerror = 0;
 	n_rcu_torture_boost_rterror = 0;
 	n_rcu_torture_boost_failure = 0;
 	n_rcu_torture_boosts = 0;
 	for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
-		atomic_set(&rcu_torture_wcount[i], 0);
+		atomic_set_unchecked(&rcu_torture_wcount[i], 0);
 	for_each_possible_cpu(cpu) {
 		for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
 			per_cpu(rcu_torture_count, cpu)[i] = 0;
diff -ruNp linux-3.13.11/kernel/rcu/tree.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/rcu/tree.c
--- linux-3.13.11/kernel/rcu/tree.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/rcu/tree.c	2014-07-09 12:00:15.000000000
+0200
@@ -383,9 +383,9 @@ static void rcu_eqs_enter_common(struct
 	rcu_prepare_for_idle(smp_processor_id());
 	/* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
 	smp_mb__before_atomic_inc();  /* See above. */
-	atomic_inc(&rdtp->dynticks);
+	atomic_inc_unchecked(&rdtp->dynticks);
 	smp_mb__after_atomic_inc();  /* Force ordering with next sojourn. */
-	WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
+	WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
 
 	/*
 	 * It is illegal to enter an extended quiescent state while
@@ -502,10 +502,10 @@ static void rcu_eqs_exit_common(struct r
 			       int user)
 {
 	smp_mb__before_atomic_inc();  /* Force ordering w/previous sojourn. */
-	atomic_inc(&rdtp->dynticks);
+	atomic_inc_unchecked(&rdtp->dynticks);
 	/* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
 	smp_mb__after_atomic_inc();  /* See above. */
-	WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
+	WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
 	rcu_cleanup_after_idle(smp_processor_id());
 	trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
 	if (!user && !is_idle_task(current)) {
@@ -625,14 +625,14 @@ void rcu_nmi_enter(void)
 	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
 
 	if (rdtp->dynticks_nmi_nesting == 0 &&
-	    (atomic_read(&rdtp->dynticks) & 0x1))
+	    (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
 		return;
 	rdtp->dynticks_nmi_nesting++;
 	smp_mb__before_atomic_inc();  /* Force delay from prior write. */
-	atomic_inc(&rdtp->dynticks);
+	atomic_inc_unchecked(&rdtp->dynticks);
 	/* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
 	smp_mb__after_atomic_inc();  /* See above. */
-	WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
+	WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
 }
 
 /**
@@ -651,9 +651,9 @@ void rcu_nmi_exit(void)
 		return;
 	/* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
 	smp_mb__before_atomic_inc();  /* See above. */
-	atomic_inc(&rdtp->dynticks);
+	atomic_inc_unchecked(&rdtp->dynticks);
 	smp_mb__after_atomic_inc();  /* Force delay to next write. */
-	WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
+	WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
 }
 
 /**
@@ -666,7 +666,7 @@ void rcu_nmi_exit(void)
  */
 bool notrace __rcu_is_watching(void)
 {
-	return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
+	return atomic_read_unchecked(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
 }
 
 /**
@@ -749,7 +749,7 @@ static int rcu_is_cpu_rrupt_from_idle(vo
 static int dyntick_save_progress_counter(struct rcu_data *rdp,
 					 bool *isidle, unsigned long *maxj)
 {
-	rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
+	rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
 	rcu_sysidle_check_cpu(rdp, isidle, maxj);
 	return (rdp->dynticks_snap & 0x1) == 0;
 }
@@ -766,7 +766,7 @@ static int rcu_implicit_dynticks_qs(stru
 	unsigned int curr;
 	unsigned int snap;
 
-	curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
+	curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
 	snap = (unsigned int)rdp->dynticks_snap;
 
 	/*
@@ -1412,9 +1412,9 @@ static int rcu_gp_init(struct rcu_state
 		rdp = this_cpu_ptr(rsp->rda);
 		rcu_preempt_check_blocked_tasks(rnp);
 		rnp->qsmask = rnp->qsmaskinit;
-		ACCESS_ONCE(rnp->gpnum) = rsp->gpnum;
+		ACCESS_ONCE_RW(rnp->gpnum) = rsp->gpnum;
 		WARN_ON_ONCE(rnp->completed != rsp->completed);
-		ACCESS_ONCE(rnp->completed) = rsp->completed;
+		ACCESS_ONCE_RW(rnp->completed) = rsp->completed;
 		if (rnp == rdp->mynode)
 			__note_gp_changes(rsp, rnp, rdp);
 		rcu_preempt_boost_start_gp(rnp);
@@ -1505,7 +1505,7 @@ static void rcu_gp_cleanup(struct rcu_st
 	 */
 	rcu_for_each_node_breadth_first(rsp, rnp) {
 		raw_spin_lock_irq(&rnp->lock);
-		ACCESS_ONCE(rnp->completed) = rsp->gpnum;
+		ACCESS_ONCE_RW(rnp->completed) = rsp->gpnum;
 		rdp = this_cpu_ptr(rsp->rda);
 		if (rnp == rdp->mynode)
 			__note_gp_changes(rsp, rnp, rdp);
@@ -1865,7 +1865,7 @@ rcu_send_cbs_to_orphanage(int cpu, struc
 		rsp->qlen += rdp->qlen;
 		rdp->n_cbs_orphaned += rdp->qlen;
 		rdp->qlen_lazy = 0;
-		ACCESS_ONCE(rdp->qlen) = 0;
+		ACCESS_ONCE_RW(rdp->qlen) = 0;
 	}
 
 	/*
@@ -2111,7 +2111,7 @@ static void rcu_do_batch(struct rcu_stat
 	}
 	smp_mb(); /* List handling before counting for rcu_barrier(). */
 	rdp->qlen_lazy -= count_lazy;
-	ACCESS_ONCE(rdp->qlen) -= count;
+	ACCESS_ONCE_RW(rdp->qlen) -= count;
 	rdp->n_cbs_invoked += count;
 
 	/* Reinstate batch limit if we have worked down the excess. */
@@ -2308,7 +2308,7 @@ __rcu_process_callbacks(struct rcu_state
 /*
  * Do RCU core processing for the current CPU.
  */
-static void rcu_process_callbacks(struct softirq_action *unused)
+static void rcu_process_callbacks(void)
 {
 	struct rcu_state *rsp;
 
@@ -2415,7 +2415,7 @@ __call_rcu(struct rcu_head *head, void (
 	WARN_ON_ONCE((unsigned long)head & 0x3); /* Misaligned rcu_head! */
 	if (debug_rcu_head_queue(head)) {
 		/* Probable double call_rcu(), so leak the callback. */
-		ACCESS_ONCE(head->func) = rcu_leak_callback;
+		ACCESS_ONCE_RW(head->func) = rcu_leak_callback;
 		WARN_ONCE(1, "__call_rcu(): Leaked duplicate callback\n");
 		return;
 	}
@@ -2443,7 +2443,7 @@ __call_rcu(struct rcu_head *head, void (
 		local_irq_restore(flags);
 		return;
 	}
-	ACCESS_ONCE(rdp->qlen)++;
+	ACCESS_ONCE_RW(rdp->qlen)++;
 	if (lazy)
 		rdp->qlen_lazy++;
 	else
@@ -2652,11 +2652,11 @@ void synchronize_sched_expedited(void)
 	 * counter wrap on a 32-bit system.  Quite a few more CPUs would of
 	 * course be required on a 64-bit system.
 	 */
-	if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
+	if (ULONG_CMP_GE((ulong)atomic_long_read_unchecked(&rsp->expedited_start),
 			 (ulong)atomic_long_read(&rsp->expedited_done) +
 			 ULONG_MAX / 8)) {
 		synchronize_sched();
-		atomic_long_inc(&rsp->expedited_wrap);
+		atomic_long_inc_unchecked(&rsp->expedited_wrap);
 		return;
 	}
 
@@ -2664,7 +2664,7 @@ void synchronize_sched_expedited(void)
 	 * Take a ticket.  Note that atomic_inc_return() implies a
 	 * full memory barrier.
 	 */
-	snap = atomic_long_inc_return(&rsp->expedited_start);
+	snap = atomic_long_inc_return_unchecked(&rsp->expedited_start);
 	firstsnap = snap;
 	get_online_cpus();
 	WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
@@ -2677,14 +2677,14 @@ void synchronize_sched_expedited(void)
 			     synchronize_sched_expedited_cpu_stop,
 			     NULL) == -EAGAIN) {
 		put_online_cpus();
-		atomic_long_inc(&rsp->expedited_tryfail);
+		atomic_long_inc_unchecked(&rsp->expedited_tryfail);
 
 		/* Check to see if someone else did our work for us. */
 		s = atomic_long_read(&rsp->expedited_done);
 		if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
 			/* ensure test happens before caller kfree */
 			smp_mb__before_atomic_inc(); /* ^^^ */
-			atomic_long_inc(&rsp->expedited_workdone1);
+			atomic_long_inc_unchecked(&rsp->expedited_workdone1);
 			return;
 		}
 
@@ -2693,7 +2693,7 @@ void synchronize_sched_expedited(void)
 			udelay(trycount * num_online_cpus());
 		} else {
 			wait_rcu_gp(call_rcu_sched);
-			atomic_long_inc(&rsp->expedited_normal);
+			atomic_long_inc_unchecked(&rsp->expedited_normal);
 			return;
 		}
 
@@ -2702,7 +2702,7 @@ void synchronize_sched_expedited(void)
 		if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
 			/* ensure test happens before caller kfree */
 			smp_mb__before_atomic_inc(); /* ^^^ */
-			atomic_long_inc(&rsp->expedited_workdone2);
+			atomic_long_inc_unchecked(&rsp->expedited_workdone2);
 			return;
 		}
 
@@ -2714,10 +2714,10 @@ void synchronize_sched_expedited(void)
 		 * period works for us.
 		 */
 		get_online_cpus();
-		snap = atomic_long_read(&rsp->expedited_start);
+		snap = atomic_long_read_unchecked(&rsp->expedited_start);
 		smp_mb(); /* ensure read is before try_stop_cpus(). */
 	}
-	atomic_long_inc(&rsp->expedited_stoppedcpus);
+	atomic_long_inc_unchecked(&rsp->expedited_stoppedcpus);
 
 	/*
 	 * Everyone up to our most recent fetch is covered by our grace
@@ -2726,16 +2726,16 @@ void synchronize_sched_expedited(void)
 	 * than we did already did their update.
 	 */
 	do {
-		atomic_long_inc(&rsp->expedited_done_tries);
+		atomic_long_inc_unchecked(&rsp->expedited_done_tries);
 		s = atomic_long_read(&rsp->expedited_done);
 		if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
 			/* ensure test happens before caller kfree */
 			smp_mb__before_atomic_inc(); /* ^^^ */
-			atomic_long_inc(&rsp->expedited_done_lost);
+			atomic_long_inc_unchecked(&rsp->expedited_done_lost);
 			break;
 		}
 	} while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
-	atomic_long_inc(&rsp->expedited_done_exit);
+	atomic_long_inc_unchecked(&rsp->expedited_done_exit);
 
 	put_online_cpus();
 }
@@ -2931,7 +2931,7 @@ static void _rcu_barrier(struct rcu_stat
 	 * ACCESS_ONCE() to prevent the compiler from speculating
 	 * the increment to precede the early-exit check.
 	 */
-	ACCESS_ONCE(rsp->n_barrier_done)++;
+	ACCESS_ONCE_RW(rsp->n_barrier_done)++;
 	WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
 	_rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
 	smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
@@ -2981,7 +2981,7 @@ static void _rcu_barrier(struct rcu_stat
 
 	/* Increment ->n_barrier_done to prevent duplicate work. */
 	smp_mb(); /* Keep increment after above mechanism. */
-	ACCESS_ONCE(rsp->n_barrier_done)++;
+	ACCESS_ONCE_RW(rsp->n_barrier_done)++;
 	WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
 	_rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
 	smp_mb(); /* Keep increment before caller's subsequent code. */
@@ -3026,10 +3026,10 @@ rcu_boot_init_percpu_data(int cpu, struc
 	rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
 	init_callback_list(rdp);
 	rdp->qlen_lazy = 0;
-	ACCESS_ONCE(rdp->qlen) = 0;
+	ACCESS_ONCE_RW(rdp->qlen) = 0;
 	rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
 	WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
-	WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
+	WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
 	rdp->cpu = cpu;
 	rdp->rsp = rsp;
 	rcu_boot_init_nocb_percpu_data(rdp);
@@ -3063,8 +3063,8 @@ rcu_init_percpu_data(int cpu, struct rcu
 	init_callback_list(rdp);  /* Re-enable callbacks on this CPU. */
 	rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
 	rcu_sysidle_init_percpu_data(rdp->dynticks);
-	atomic_set(&rdp->dynticks->dynticks,
-		   (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
+	atomic_set_unchecked(&rdp->dynticks->dynticks,
+		   (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
 	raw_spin_unlock(&rnp->lock);		/* irqs remain disabled. */
 
 	/* Add CPU to rcu_node bitmasks. */
diff -ruNp linux-3.13.11/kernel/rcu/tree.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/rcu/tree.h
--- linux-3.13.11/kernel/rcu/tree.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/rcu/tree.h	2014-07-09 12:00:15.000000000
+0200
@@ -87,11 +87,11 @@ struct rcu_dynticks {
 	long long dynticks_nesting; /* Track irq/process nesting level. */
 				    /* Process level is worth LLONG_MAX/2. */
 	int dynticks_nmi_nesting;   /* Track NMI nesting level. */
-	atomic_t dynticks;	    /* Even value for idle, else odd. */
+	atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
 #ifdef CONFIG_NO_HZ_FULL_SYSIDLE
 	long long dynticks_idle_nesting;
 				    /* irq/process nesting level from idle. */
-	atomic_t dynticks_idle;	    /* Even value for idle, else odd. */
+	atomic_unchecked_t dynticks_idle;/* Even value for idle, else odd. */
 				    /*  "Idle" excludes userspace execution. */
 	unsigned long dynticks_idle_jiffies;
 				    /* End of last non-NMI non-idle period. */
@@ -429,17 +429,17 @@ struct rcu_state {
 						/*  _rcu_barrier(). */
 	/* End of fields guarded by barrier_mutex. */
 
-	atomic_long_t expedited_start;		/* Starting ticket. */
-	atomic_long_t expedited_done;		/* Done ticket. */
-	atomic_long_t expedited_wrap;		/* # near-wrap incidents. */
-	atomic_long_t expedited_tryfail;	/* # acquisition failures. */
-	atomic_long_t expedited_workdone1;	/* # done by others #1. */
-	atomic_long_t expedited_workdone2;	/* # done by others #2. */
-	atomic_long_t expedited_normal;		/* # fallbacks to normal. */
-	atomic_long_t expedited_stoppedcpus;	/* # successful stop_cpus. */
-	atomic_long_t expedited_done_tries;	/* # tries to update _done. */
-	atomic_long_t expedited_done_lost;	/* # times beaten to _done. */
-	atomic_long_t expedited_done_exit;	/* # times exited _done loop. */
+	atomic_long_unchecked_t expedited_start;	/* Starting ticket. */
+	atomic_long_t expedited_done;			/* Done ticket. */
+	atomic_long_unchecked_t expedited_wrap;		/* # near-wrap incidents. */
+	atomic_long_unchecked_t expedited_tryfail;	/* # acquisition failures. */
+	atomic_long_unchecked_t expedited_workdone1;	/* # done by others #1. */
+	atomic_long_unchecked_t expedited_workdone2;	/* # done by others #2. */
+	atomic_long_unchecked_t expedited_normal;	/* # fallbacks to normal. */
+	atomic_long_unchecked_t expedited_stoppedcpus;	/* # successful stop_cpus. */
+	atomic_long_unchecked_t expedited_done_tries;	/* # tries to update _done. */
+	atomic_long_unchecked_t expedited_done_lost;	/* # times beaten to _done. */
+	atomic_long_unchecked_t expedited_done_exit;	/* # times exited _done loop. */
 
 	unsigned long jiffies_force_qs;		/* Time at which to invoke */
 						/*  force_quiescent_state(). */
diff -ruNp linux-3.13.11/kernel/rcu/tree_plugin.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/rcu/tree_plugin.h
--- linux-3.13.11/kernel/rcu/tree_plugin.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/rcu/tree_plugin.h	2014-07-09
12:00:15.000000000 +0200
@@ -749,7 +749,7 @@ static int rcu_preempted_readers_exp(str
 static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
 {
 	return !rcu_preempted_readers_exp(rnp) &&
-	       ACCESS_ONCE(rnp->expmask) == 0;
+	       ACCESS_ONCE_RW(rnp->expmask) == 0;
 }
 
 /*
@@ -905,7 +905,7 @@ void synchronize_rcu_expedited(void)
 
 	/* Clean up and exit. */
 	smp_mb(); /* ensure expedited GP seen before counter increment. */
-	ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
+	ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
 unlock_mb_ret:
 	mutex_unlock(&sync_rcu_preempt_exp_mutex);
 mb_ret:
@@ -1479,7 +1479,7 @@ static void rcu_boost_kthread_setaffinit
 	free_cpumask_var(cm);
 }
 
-static struct smp_hotplug_thread rcu_cpu_thread_spec = {
+static struct smp_hotplug_thread rcu_cpu_thread_spec __read_only = {
 	.store			= &rcu_cpu_kthread_task,
 	.thread_should_run	= rcu_cpu_kthread_should_run,
 	.thread_fn		= rcu_cpu_kthread,
@@ -1946,7 +1946,7 @@ static void print_cpu_stall_info(struct
 	print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
 	pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u %s\n",
 	       cpu, ticks_value, ticks_title,
-	       atomic_read(&rdtp->dynticks) & 0xfff,
+	       atomic_read_unchecked(&rdtp->dynticks) & 0xfff,
 	       rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
 	       rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
 	       fast_no_hz);
@@ -2109,7 +2109,7 @@ static void __call_rcu_nocb_enqueue(stru
 
 	/* Enqueue the callback on the nocb list and update counts. */
 	old_rhpp = xchg(&rdp->nocb_tail, rhtp);
-	ACCESS_ONCE(*old_rhpp) = rhp;
+	ACCESS_ONCE_RW(*old_rhpp) = rhp;
 	atomic_long_add(rhcount, &rdp->nocb_q_count);
 	atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
 
@@ -2272,12 +2272,12 @@ static int rcu_nocb_kthread(void *arg)
 		 * Extract queued callbacks, update counts, and wait
 		 * for a grace period to elapse.
 		 */
-		ACCESS_ONCE(rdp->nocb_head) = NULL;
+		ACCESS_ONCE_RW(rdp->nocb_head) = NULL;
 		tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
 		c = atomic_long_xchg(&rdp->nocb_q_count, 0);
 		cl = atomic_long_xchg(&rdp->nocb_q_count_lazy, 0);
-		ACCESS_ONCE(rdp->nocb_p_count) += c;
-		ACCESS_ONCE(rdp->nocb_p_count_lazy) += cl;
+		ACCESS_ONCE_RW(rdp->nocb_p_count) += c;
+		ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) += cl;
 		rcu_nocb_wait_gp(rdp);
 
 		/* Each pass through the following loop invokes a callback. */
@@ -2303,8 +2303,8 @@ static int rcu_nocb_kthread(void *arg)
 			list = next;
 		}
 		trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
-		ACCESS_ONCE(rdp->nocb_p_count) -= c;
-		ACCESS_ONCE(rdp->nocb_p_count_lazy) -= cl;
+		ACCESS_ONCE_RW(rdp->nocb_p_count) -= c;
+		ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) -= cl;
 		rdp->n_nocbs_invoked += c;
 	}
 	return 0;
@@ -2331,7 +2331,7 @@ static void __init rcu_spawn_nocb_kthrea
 		t = kthread_run(rcu_nocb_kthread, rdp,
 				"rcuo%c/%d", rsp->abbr, cpu);
 		BUG_ON(IS_ERR(t));
-		ACCESS_ONCE(rdp->nocb_kthread) = t;
+		ACCESS_ONCE_RW(rdp->nocb_kthread) = t;
 	}
 }
 
@@ -2457,11 +2457,11 @@ static void rcu_sysidle_enter(struct rcu
 
 	/* Record start of fully idle period. */
 	j = jiffies;
-	ACCESS_ONCE(rdtp->dynticks_idle_jiffies) = j;
+	ACCESS_ONCE_RW(rdtp->dynticks_idle_jiffies) = j;
 	smp_mb__before_atomic_inc();
-	atomic_inc(&rdtp->dynticks_idle);
+	atomic_inc_unchecked(&rdtp->dynticks_idle);
 	smp_mb__after_atomic_inc();
-	WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1);
+	WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1);
 }
 
 /*
@@ -2526,9 +2526,9 @@ static void rcu_sysidle_exit(struct rcu_
 
 	/* Record end of idle period. */
 	smp_mb__before_atomic_inc();
-	atomic_inc(&rdtp->dynticks_idle);
+	atomic_inc_unchecked(&rdtp->dynticks_idle);
 	smp_mb__after_atomic_inc();
-	WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1));
+	WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1));
 
 	/*
 	 * If we are the timekeeping CPU, we are permitted to be non-idle
@@ -2569,7 +2569,7 @@ static void rcu_sysidle_check_cpu(struct
 		WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu);
 
 	/* Pick up current idle and NMI-nesting counter and check. */
-	cur = atomic_read(&rdtp->dynticks_idle);
+	cur = atomic_read_unchecked(&rdtp->dynticks_idle);
 	if (cur & 0x1) {
 		*isidle = false; /* We are not idle! */
 		return;
@@ -2632,7 +2632,7 @@ static void rcu_sysidle(unsigned long j)
 	case RCU_SYSIDLE_NOT:
 
 		/* First time all are idle, so note a short idle period. */
-		ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_SHORT;
+		ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_SHORT;
 		break;
 
 	case RCU_SYSIDLE_SHORT:
@@ -2669,7 +2669,7 @@ static void rcu_sysidle(unsigned long j)
 static void rcu_sysidle_cancel(void)
 {
 	smp_mb();
-	ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_NOT;
+	ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_NOT;
 }
 
 /*
@@ -2717,7 +2717,7 @@ static void rcu_sysidle_cb(struct rcu_he
 	smp_mb();  /* grace period precedes setting inuse. */
 
 	rshp = container_of(rhp, struct rcu_sysidle_head, rh);
-	ACCESS_ONCE(rshp->inuse) = 0;
+	ACCESS_ONCE_RW(rshp->inuse) = 0;
 }
 
 /*
diff -ruNp linux-3.13.11/kernel/rcu/tree_trace.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/rcu/tree_trace.c
--- linux-3.13.11/kernel/rcu/tree_trace.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/rcu/tree_trace.c	2014-07-09
12:00:15.000000000 +0200
@@ -121,7 +121,7 @@ static void print_one_rcu_data(struct se
 		   ulong2long(rdp->completed), ulong2long(rdp->gpnum),
 		   rdp->passed_quiesce, rdp->qs_pending);
 	seq_printf(m, " dt=%d/%llx/%d df=%lu",
-		   atomic_read(&rdp->dynticks->dynticks),
+		   atomic_read_unchecked(&rdp->dynticks->dynticks),
 		   rdp->dynticks->dynticks_nesting,
 		   rdp->dynticks->dynticks_nmi_nesting,
 		   rdp->dynticks_fqs);
@@ -182,17 +182,17 @@ static int show_rcuexp(struct seq_file *
 	struct rcu_state *rsp = (struct rcu_state *)m->private;
 
 	seq_printf(m, "s=%lu d=%lu w=%lu tf=%lu wd1=%lu wd2=%lu n=%lu sc=%lu dt=%lu dl=%lu
dx=%lu\n",
-		   atomic_long_read(&rsp->expedited_start),
+		   atomic_long_read_unchecked(&rsp->expedited_start),
 		   atomic_long_read(&rsp->expedited_done),
-		   atomic_long_read(&rsp->expedited_wrap),
-		   atomic_long_read(&rsp->expedited_tryfail),
-		   atomic_long_read(&rsp->expedited_workdone1),
-		   atomic_long_read(&rsp->expedited_workdone2),
-		   atomic_long_read(&rsp->expedited_normal),
-		   atomic_long_read(&rsp->expedited_stoppedcpus),
-		   atomic_long_read(&rsp->expedited_done_tries),
-		   atomic_long_read(&rsp->expedited_done_lost),
-		   atomic_long_read(&rsp->expedited_done_exit));
+		   atomic_long_read_unchecked(&rsp->expedited_wrap),
+		   atomic_long_read_unchecked(&rsp->expedited_tryfail),
+		   atomic_long_read_unchecked(&rsp->expedited_workdone1),
+		   atomic_long_read_unchecked(&rsp->expedited_workdone2),
+		   atomic_long_read_unchecked(&rsp->expedited_normal),
+		   atomic_long_read_unchecked(&rsp->expedited_stoppedcpus),
+		   atomic_long_read_unchecked(&rsp->expedited_done_tries),
+		   atomic_long_read_unchecked(&rsp->expedited_done_lost),
+		   atomic_long_read_unchecked(&rsp->expedited_done_exit));
 	return 0;
 }
 
diff -ruNp linux-3.13.11/kernel/rcu/update.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/rcu/update.c
--- linux-3.13.11/kernel/rcu/update.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/rcu/update.c	2014-07-09 12:00:15.000000000
+0200
@@ -318,10 +318,10 @@ int rcu_jiffies_till_stall_check(void)
 	 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
 	 */
 	if (till_stall_check < 3) {
-		ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
+		ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
 		till_stall_check = 3;
 	} else if (till_stall_check > 300) {
-		ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
+		ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
 		till_stall_check = 300;
 	}
 	return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
diff -ruNp linux-3.13.11/kernel/reboot.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/reboot.c
--- linux-3.13.11/kernel/reboot.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/reboot.c	2014-07-09 12:00:15.000000000
+0200
@@ -16,6 +16,7 @@
 #include <linux/syscalls.h>
 #include <linux/syscore_ops.h>
 #include <linux/uaccess.h>
+#include <linux/vs_pid.h>
 
 /*
  * this indicates whether you can reboot with ctrl-alt-del: the default is yes
@@ -188,6 +189,8 @@ EXPORT_SYMBOL_GPL(kernel_power_off);
 
 static DEFINE_MUTEX(reboot_mutex);
 
+long vs_reboot(unsigned int, void __user *);
+
 /*
  * Reboot system call: for obvious reasons only root may call it,
  * and even root needs to set up some magic numbers in the registers
@@ -230,6 +233,9 @@ SYSCALL_DEFINE4(reboot, int, magic1, int
 	if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !pm_power_off)
 		cmd = LINUX_REBOOT_CMD_HALT;
 
+	if (!vx_check(0, VS_ADMIN|VS_WATCH))
+		return vs_reboot(cmd, arg);
+
 	mutex_lock(&reboot_mutex);
 	switch (cmd) {
 	case LINUX_REBOOT_CMD_RESTART:
diff -ruNp linux-3.13.11/kernel/resource.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/resource.c
--- linux-3.13.11/kernel/resource.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/resource.c	2014-07-09 12:00:15.000000000
+0200
@@ -152,8 +152,18 @@ static const struct file_operations proc
 
 static int __init ioresources_init(void)
 {
+#ifdef CONFIG_GRKERNSEC_PROC_ADD
+#ifdef CONFIG_GRKERNSEC_PROC_USER
+	proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
+	proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
+	proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
+	proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
+#endif
+#else
 	proc_create("ioports", 0, NULL, &proc_ioports_operations);
 	proc_create("iomem", 0, NULL, &proc_iomem_operations);
+#endif
 	return 0;
 }
 __initcall(ioresources_init);
diff -ruNp linux-3.13.11/kernel/sched/auto_group.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/sched/auto_group.c
--- linux-3.13.11/kernel/sched/auto_group.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/sched/auto_group.c	2014-07-09
12:00:15.000000000 +0200
@@ -11,7 +11,7 @@
 
 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
 static struct autogroup autogroup_default;
-static atomic_t autogroup_seq_nr;
+static atomic_unchecked_t autogroup_seq_nr;
 
 void __init autogroup_init(struct task_struct *init_task)
 {
@@ -79,7 +79,7 @@ static inline struct autogroup *autogrou
 
 	kref_init(&ag->kref);
 	init_rwsem(&ag->lock);
-	ag->id = atomic_inc_return(&autogroup_seq_nr);
+	ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
 	ag->tg = tg;
 #ifdef CONFIG_RT_GROUP_SCHED
 	/*
diff -ruNp linux-3.13.11/kernel/sched/completion.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/sched/completion.c
--- linux-3.13.11/kernel/sched/completion.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/sched/completion.c	2014-07-09
12:00:15.000000000 +0200
@@ -204,7 +204,7 @@ EXPORT_SYMBOL(wait_for_completion_interr
  * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
  * or number of jiffies left till timeout) if completed.
  */
-long __sched
+long __sched __intentional_overflow(-1)
 wait_for_completion_interruptible_timeout(struct completion *x,
 					  unsigned long timeout)
 {
@@ -221,7 +221,7 @@ EXPORT_SYMBOL(wait_for_completion_interr
  *
  * Return: -ERESTARTSYS if interrupted, 0 if completed.
  */
-int __sched wait_for_completion_killable(struct completion *x)
+int __sched __intentional_overflow(-1) wait_for_completion_killable(struct completion
*x)
 {
 	long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
 	if (t == -ERESTARTSYS)
@@ -242,7 +242,7 @@ EXPORT_SYMBOL(wait_for_completion_killab
  * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
  * or number of jiffies left till timeout) if completed.
  */
-long __sched
+long __sched __intentional_overflow(-1)
 wait_for_completion_killable_timeout(struct completion *x,
 				     unsigned long timeout)
 {
diff -ruNp linux-3.13.11/kernel/sched/core.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/sched/core.c
--- linux-3.13.11/kernel/sched/core.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/sched/core.c	2014-07-09 12:00:15.000000000
+0200
@@ -73,6 +73,8 @@
 #include <linux/init_task.h>
 #include <linux/binfmts.h>
 #include <linux/context_tracking.h>
+#include <linux/vs_sched.h>
+#include <linux/vs_cvirt.h>
 
 #include <asm/switch_to.h>
 #include <asm/tlb.h>
@@ -1768,7 +1770,7 @@ void set_numabalancing_state(bool enable
 int sysctl_numa_balancing(struct ctl_table *table, int write,
 			 void __user *buffer, size_t *lenp, loff_t *ppos)
 {
-	struct ctl_table t;
+	ctl_table_no_const t;
 	int err;
 	int state = numabalancing_enabled;
 
@@ -2893,6 +2895,8 @@ int can_nice(const struct task_struct *p
 	/* convert nice value [19,-20] to rlimit style value [1,40] */
 	int nice_rlim = 20 - nice;
 
+	gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
+
 	return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
 		capable(CAP_SYS_NICE));
 }
@@ -2926,8 +2930,9 @@ SYSCALL_DEFINE1(nice, int, increment)
 	if (nice > 19)
 		nice = 19;
 
-	if (increment < 0 && !can_nice(current, nice))
-		return -EPERM;
+	if (increment < 0 && (!can_nice(current, nice) ||
+			      gr_handle_chroot_nice()))
+		return vx_flags(VXF_IGNEG_NICE, 0) ? 0 : -EPERM;
 
 	retval = security_task_setnice(current, nice);
 	if (retval)
@@ -3088,6 +3093,7 @@ recheck:
 			unsigned long rlim_rtprio =
 					task_rlimit(p, RLIMIT_RTPRIO);
 
+			 gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
 			/* can't set/change the rt policy */
 			if (policy != p->policy && !rlim_rtprio)
 				return -EPERM;
@@ -4254,7 +4260,7 @@ static void migrate_tasks(unsigned int d
 
 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
 
-static struct ctl_table sd_ctl_dir[] = {
+static ctl_table_no_const sd_ctl_dir[] __read_only = {
 	{
 		.procname	= "sched_domain",
 		.mode		= 0555,
@@ -4271,17 +4277,17 @@ static struct ctl_table sd_ctl_root[] =
 	{}
 };
 
-static struct ctl_table *sd_alloc_ctl_entry(int n)
+static ctl_table_no_const *sd_alloc_ctl_entry(int n)
 {
-	struct ctl_table *entry =
+	ctl_table_no_const *entry =
 		kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
 
 	return entry;
 }
 
-static void sd_free_ctl_entry(struct ctl_table **tablep)
+static void sd_free_ctl_entry(ctl_table_no_const *tablep)
 {
-	struct ctl_table *entry;
+	ctl_table_no_const *entry;
 
 	/*
 	 * In the intermediate directories, both the child directory and
@@ -4289,22 +4295,25 @@ static void sd_free_ctl_entry(struct ctl
 	 * will always be set. In the lowest directory the names are
 	 * static strings and all have proc handlers.
 	 */
-	for (entry = *tablep; entry->mode; entry++) {
-		if (entry->child)
-			sd_free_ctl_entry(&entry->child);
+	for (entry = tablep; entry->mode; entry++) {
+		if (entry->child) {
+			sd_free_ctl_entry(entry->child);
+			pax_open_kernel();
+			entry->child = NULL;
+			pax_close_kernel();
+		}
 		if (entry->proc_handler == NULL)
 			kfree(entry->procname);
 	}
 
-	kfree(*tablep);
-	*tablep = NULL;
+	kfree(tablep);
 }
 
 static int min_load_idx = 0;
 static int max_load_idx = CPU_LOAD_IDX_MAX-1;
 
 static void
-set_table_entry(struct ctl_table *entry,
+set_table_entry(ctl_table_no_const *entry,
 		const char *procname, void *data, int maxlen,
 		umode_t mode, proc_handler *proc_handler,
 		bool load_idx)
@@ -4324,7 +4333,7 @@ set_table_entry(struct ctl_table *entry,
 static struct ctl_table *
 sd_alloc_ctl_domain_table(struct sched_domain *sd)
 {
-	struct ctl_table *table = sd_alloc_ctl_entry(13);
+	ctl_table_no_const *table = sd_alloc_ctl_entry(13);
 
 	if (table == NULL)
 		return NULL;
@@ -4359,9 +4368,9 @@ sd_alloc_ctl_domain_table(struct sched_d
 	return table;
 }
 
-static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
+static ctl_table_no_const *sd_alloc_ctl_cpu_table(int cpu)
 {
-	struct ctl_table *entry, *table;
+	ctl_table_no_const *entry, *table;
 	struct sched_domain *sd;
 	int domain_num = 0, i;
 	char buf[32];
@@ -4388,11 +4397,13 @@ static struct ctl_table_header *sd_sysct
 static void register_sched_domain_sysctl(void)
 {
 	int i, cpu_num = num_possible_cpus();
-	struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
+	ctl_table_no_const *entry = sd_alloc_ctl_entry(cpu_num + 1);
 	char buf[32];
 
 	WARN_ON(sd_ctl_dir[0].child);
+	pax_open_kernel();
 	sd_ctl_dir[0].child = entry;
+	pax_close_kernel();
 
 	if (entry == NULL)
 		return;
@@ -4415,8 +4426,12 @@ static void unregister_sched_domain_sysc
 	if (sd_sysctl_header)
 		unregister_sysctl_table(sd_sysctl_header);
 	sd_sysctl_header = NULL;
-	if (sd_ctl_dir[0].child)
-		sd_free_ctl_entry(&sd_ctl_dir[0].child);
+	if (sd_ctl_dir[0].child) {
+		sd_free_ctl_entry(sd_ctl_dir[0].child);
+		pax_open_kernel();
+		sd_ctl_dir[0].child = NULL;
+		pax_close_kernel();
+	}
 }
 #else
 static void register_sched_domain_sysctl(void)
diff -ruNp linux-3.13.11/kernel/sched/cputime.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/sched/cputime.c
--- linux-3.13.11/kernel/sched/cputime.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/sched/cputime.c	2014-07-09
12:00:15.000000000 +0200
@@ -4,6 +4,7 @@
 #include <linux/kernel_stat.h>
 #include <linux/static_key.h>
 #include <linux/context_tracking.h>
+#include <linux/vs_sched.h>
 #include "sched.h"
 
 
@@ -135,14 +136,17 @@ static inline void task_group_account_fi
 void account_user_time(struct task_struct *p, cputime_t cputime,
 		       cputime_t cputime_scaled)
 {
+	struct vx_info *vxi = p->vx_info;  /* p is _always_ current */
+	int nice = (TASK_NICE(p) > 0);
 	int index;
 
 	/* Add user time to process. */
 	p->utime += cputime;
 	p->utimescaled += cputime_scaled;
+	vx_account_user(vxi, cputime, nice);
 	account_group_user_time(p, cputime);
 
-	index = (TASK_NICE(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
+	index = (nice) ? CPUTIME_NICE : CPUTIME_USER;
 
 	/* Add user time to cpustat. */
 	task_group_account_field(p, index, (__force u64) cputime);
@@ -189,9 +193,12 @@ static inline
 void __account_system_time(struct task_struct *p, cputime_t cputime,
 			cputime_t cputime_scaled, int index)
 {
+	struct vx_info *vxi = p->vx_info;  /* p is _always_ current */
+
 	/* Add system time to process. */
 	p->stime += cputime;
 	p->stimescaled += cputime_scaled;
+	vx_account_system(vxi, cputime, 0 /* do we have idle time? */);
 	account_group_system_time(p, cputime);
 
 	/* Add system time to cpustat. */
diff -ruNp linux-3.13.11/kernel/sched/fair.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/sched/fair.c
--- linux-3.13.11/kernel/sched/fair.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/sched/fair.c	2014-07-09 12:00:15.000000000
+0200
@@ -29,6 +29,7 @@
 #include <linux/mempolicy.h>
 #include <linux/migrate.h>
 #include <linux/task_work.h>
+#include <linux/vs_cvirt.h>
 
 #include <trace/events/sched.h>
 
@@ -1652,7 +1653,7 @@ void task_numa_fault(int last_cpupid, in
 
 static void reset_ptenuma_scan(struct task_struct *p)
 {
-	ACCESS_ONCE(p->mm->numa_scan_seq)++;
+	ACCESS_ONCE_RW(p->mm->numa_scan_seq)++;
 	p->mm->numa_scan_offset = 0;
 }
 
@@ -2577,6 +2578,8 @@ enqueue_entity(struct cfs_rq *cfs_rq, st
 		__enqueue_entity(cfs_rq, se);
 	se->on_rq = 1;
 
+	if (entity_is_task(se))
+		vx_activate_task(task_of(se));
 	if (cfs_rq->nr_running == 1) {
 		list_add_leaf_cfs_rq(cfs_rq);
 		check_enqueue_throttle(cfs_rq);
@@ -2658,6 +2661,8 @@ dequeue_entity(struct cfs_rq *cfs_rq, st
 	if (se != cfs_rq->curr)
 		__dequeue_entity(cfs_rq, se);
 	se->on_rq = 0;
+	if (entity_is_task(se))
+		vx_deactivate_task(task_of(se));
 	account_entity_dequeue(cfs_rq, se);
 
 	/*
@@ -6863,7 +6868,7 @@ static void nohz_idle_balance(int this_c
  * run_rebalance_domains is triggered when needed from the scheduler tick.
  * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
  */
-static void run_rebalance_domains(struct softirq_action *h)
+static __latent_entropy void run_rebalance_domains(void)
 {
 	int this_cpu = smp_processor_id();
 	struct rq *this_rq = cpu_rq(this_cpu);
diff -ruNp linux-3.13.11/kernel/sched/proc.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/sched/proc.c
--- linux-3.13.11/kernel/sched/proc.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/sched/proc.c	2014-07-09 12:00:15.000000000
+0200
@@ -78,9 +78,17 @@ EXPORT_SYMBOL(avenrun); /* should be rem
  */
 void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
 {
-	loads[0] = (avenrun[0] + offset) << shift;
-	loads[1] = (avenrun[1] + offset) << shift;
-	loads[2] = (avenrun[2] + offset) << shift;
+	if (vx_flags(VXF_VIRT_LOAD, 0)) {
+		struct vx_info *vxi = current_vx_info();
+
+		loads[0] = (vxi->cvirt.load[0] + offset) << shift;
+		loads[1] = (vxi->cvirt.load[1] + offset) << shift;
+		loads[2] = (vxi->cvirt.load[2] + offset) << shift;
+	} else {
+		loads[0] = (avenrun[0] + offset) << shift;
+		loads[1] = (avenrun[1] + offset) << shift;
+		loads[2] = (avenrun[2] + offset) << shift;
+	}
 }
 
 long calc_load_fold_active(struct rq *this_rq)
diff -ruNp linux-3.13.11/kernel/sched/sched.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/sched/sched.h
--- linux-3.13.11/kernel/sched/sched.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/sched/sched.h	2014-07-09 12:00:15.000000000
+0200
@@ -1035,7 +1035,7 @@ struct sched_class {
 #ifdef CONFIG_FAIR_GROUP_SCHED
 	void (*task_move_group) (struct task_struct *p, int on_rq);
 #endif
-};
+} __do_const;
 
 #define sched_class_highest (&stop_sched_class)
 #define for_each_class(class) \
diff -ruNp linux-3.13.11/kernel/signal.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/signal.c
--- linux-3.13.11/kernel/signal.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/signal.c	2014-07-09 12:00:15.000000000
+0200
@@ -33,6 +33,8 @@
 #include <linux/uprobes.h>
 #include <linux/compat.h>
 #include <linux/cn_proc.h>
+#include <linux/vs_context.h>
+#include <linux/vs_pid.h>
 #define CREATE_TRACE_POINTS
 #include <trace/events/signal.h>
 
@@ -51,12 +53,12 @@ static struct kmem_cache *sigqueue_cache
 
 int print_fatal_signals __read_mostly;
 
-static void __user *sig_handler(struct task_struct *t, int sig)
+static __sighandler_t sig_handler(struct task_struct *t, int sig)
 {
 	return t->sighand->action[sig - 1].sa.sa_handler;
 }
 
-static int sig_handler_ignored(void __user *handler, int sig)
+static int sig_handler_ignored(__sighandler_t handler, int sig)
 {
 	/* Is it explicitly or implicitly ignored? */
 	return handler == SIG_IGN ||
@@ -65,7 +67,7 @@ static int sig_handler_ignored(void __us
 
 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
 {
-	void __user *handler;
+	__sighandler_t handler;
 
 	handler = sig_handler(t, sig);
 
@@ -369,6 +371,9 @@ __sigqueue_alloc(int sig, struct task_st
 	atomic_inc(&user->sigpending);
 	rcu_read_unlock();
 
+	if (!override_rlimit)
+		gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
+
 	if (override_rlimit ||
 	    atomic_read(&user->sigpending) <=
 			task_rlimit(t, RLIMIT_SIGPENDING)) {
@@ -496,7 +501,7 @@ flush_signal_handlers(struct task_struct
 
 int unhandled_signal(struct task_struct *tsk, int sig)
 {
-	void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
+	__sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
 	if (is_global_init(tsk))
 		return 1;
 	if (handler != SIG_IGN && handler != SIG_DFL)
@@ -790,9 +795,18 @@ static int check_kill_permission(int sig
 	struct pid *sid;
 	int error;
 
+	vxdprintk(VXD_CBIT(misc, 7),
+		"check_kill_permission(%d,%p,%p[#%u,%u])",
+		sig, info, t, vx_task_xid(t), t->pid);
+
 	if (!valid_signal(sig))
 		return -EINVAL;
 
+/*	FIXME: needed? if so, why?
+	if ((info != SEND_SIG_NOINFO) &&
+		(is_si_special(info) || !si_fromuser(info)))
+		goto skip;	*/
+
 	if (!si_fromuser(info))
 		return 0;
 
@@ -816,6 +830,27 @@ static int check_kill_permission(int sig
 		}
 	}
 
+	/* allow glibc communication via tgkill to other threads in our
+	   thread group */
+	if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
+	     sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
+	    && gr_handle_signal(t, sig))
+		return -EPERM;
+
+	error = -EPERM;
+	if (t->pid == 1 && current->xid)
+		return error;
+
+	error = -ESRCH;
+	/* FIXME: we shouldn't return ESRCH ever, to avoid
+		  loops, maybe ENOENT or EACCES? */
+	if (!vx_check(vx_task_xid(t), VS_WATCH_P | VS_IDENT)) {
+		vxdprintk(current->xid || VXD_CBIT(misc, 7),
+			"signal %d[%p] xid mismatch %p[#%u,%u] xid=#%u",
+			sig, info, t, vx_task_xid(t), t->pid, current->xid);
+		return error;
+	}
+/* skip: */
 	return security_task_kill(t, info, sig, 0);
 }
 
@@ -1199,7 +1234,7 @@ __group_send_sig_info(int sig, struct si
 	return send_signal(sig, info, p, 1);
 }
 
-static int
+int
 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
 {
 	return send_signal(sig, info, t, 0);
@@ -1236,6 +1271,7 @@ force_sig_info(int sig, struct siginfo *
 	unsigned long int flags;
 	int ret, blocked, ignored;
 	struct k_sigaction *action;
+	int is_unhandled = 0;
 
 	spin_lock_irqsave(&t->sighand->siglock, flags);
 	action = &t->sighand->action[sig-1];
@@ -1250,9 +1286,18 @@ force_sig_info(int sig, struct siginfo *
 	}
 	if (action->sa.sa_handler == SIG_DFL)
 		t->signal->flags &= ~SIGNAL_UNKILLABLE;
+	if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
+		is_unhandled = 1;
 	ret = specific_send_sig_info(sig, info, t);
 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
 
+	/* only deal with unhandled signals, java etc trigger SIGSEGV during
+	   normal operation */
+	if (is_unhandled) {
+		gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
+		gr_handle_crash(t, sig);
+	}
+
 	return ret;
 }
 
@@ -1319,8 +1364,11 @@ int group_send_sig_info(int sig, struct
 	ret = check_kill_permission(sig, info, p);
 	rcu_read_unlock();
 
-	if (!ret && sig)
+	if (!ret && sig) {
 		ret = do_send_sig_info(sig, info, p, true);
+		if (!ret)
+			gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
+	}
 
 	return ret;
 }
@@ -1353,7 +1401,7 @@ int kill_pid_info(int sig, struct siginf
 	rcu_read_lock();
 retry:
 	p = pid_task(pid, PIDTYPE_PID);
-	if (p) {
+	if (p && vx_check(vx_task_xid(p), VS_IDENT)) {
 		error = group_send_sig_info(sig, info, p);
 		if (unlikely(error == -ESRCH))
 			/*
@@ -1401,7 +1449,7 @@ int kill_pid_info_as_cred(int sig, struc
 
 	rcu_read_lock();
 	p = pid_task(pid, PIDTYPE_PID);
-	if (!p) {
+	if (!p || !vx_check(vx_task_xid(p), VS_IDENT)) {
 		ret = -ESRCH;
 		goto out_unlock;
 	}
@@ -1453,8 +1501,10 @@ static int kill_something_info(int sig,
 		struct task_struct * p;
 
 		for_each_process(p) {
-			if (task_pid_vnr(p) > 1 &&
-					!same_thread_group(p, current)) {
+			if (vx_check(vx_task_xid(p), VS_ADMIN|VS_IDENT) &&
+				task_pid_vnr(p) > 1 &&
+				!same_thread_group(p, current) &&
+				!vx_current_initpid(p->pid)) {
 				int err = group_send_sig_info(sig, info, p);
 				++count;
 				if (err != -EPERM)
@@ -2308,6 +2358,11 @@ relock:
 				!sig_kernel_only(signr))
 			continue;
 
+		/* virtual init is protected against user signals */
+		if ((info->si_code == SI_USER) &&
+			vx_current_initpid(current->pid))
+			continue;
+
 		if (sig_kernel_stop(signr)) {
 			/*
 			 * The default action is to stop all threads in
@@ -2926,7 +2981,15 @@ do_send_specific(pid_t tgid, pid_t pid,
 	int error = -ESRCH;
 
 	rcu_read_lock();
-	p = find_task_by_vpid(pid);
+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
+	/* allow glibc communication via tgkill to other threads in our
+	   thread group */
+	if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
+	    sig == (SIGRTMIN+1) && tgid == info->si_pid)	    
+		p = find_task_by_vpid_unrestricted(pid);
+	else
+#endif
+		p = find_task_by_vpid(pid);
 	if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
 		error = check_kill_permission(sig, info, p);
 		/*
@@ -3240,8 +3303,8 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack,
 	}
 	seg = get_fs();
 	set_fs(KERNEL_DS);
-	ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
-			     (stack_t __force __user *) &uoss,
+	ret = do_sigaltstack((stack_t __force_user *) (uss_ptr ? &uss : NULL),
+			     (stack_t __force_user *) &uoss,
 			     compat_user_stack_pointer());
 	set_fs(seg);
 	if (ret >= 0 && uoss_ptr)  {
diff -ruNp linux-3.13.11/kernel/smpboot.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/smpboot.c
--- linux-3.13.11/kernel/smpboot.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/smpboot.c	2014-07-09 12:00:15.000000000
+0200
@@ -288,7 +288,7 @@ int smpboot_register_percpu_thread(struc
 		}
 		smpboot_unpark_thread(plug_thread, cpu);
 	}
-	list_add(&plug_thread->list, &hotplug_threads);
+	pax_list_add(&plug_thread->list, &hotplug_threads);
 out:
 	mutex_unlock(&smpboot_threads_lock);
 	return ret;
@@ -305,7 +305,7 @@ void smpboot_unregister_percpu_thread(st
 {
 	get_online_cpus();
 	mutex_lock(&smpboot_threads_lock);
-	list_del(&plug_thread->list);
+	pax_list_del(&plug_thread->list);
 	smpboot_destroy_threads(plug_thread);
 	mutex_unlock(&smpboot_threads_lock);
 	put_online_cpus();
diff -ruNp linux-3.13.11/kernel/softirq.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/softirq.c
--- linux-3.13.11/kernel/softirq.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/softirq.c	2014-07-09 12:00:15.000000000
+0200
@@ -23,6 +23,7 @@
 #include <linux/smp.h>
 #include <linux/smpboot.h>
 #include <linux/tick.h>
+#include <linux/vs_context.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/irq.h>
@@ -50,11 +51,11 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cach
 EXPORT_SYMBOL(irq_stat);
 #endif
 
-static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
+static struct softirq_action softirq_vec[NR_SOFTIRQS] __read_only __aligned(PAGE_SIZE);
 
 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
 
-char *softirq_to_name[NR_SOFTIRQS] = {
+const char * const softirq_to_name[NR_SOFTIRQS] = {
 	"HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
 	"TASKLET", "SCHED", "HRTIMER", "RCU"
 };
@@ -250,7 +251,7 @@ restart:
 			kstat_incr_softirqs_this_cpu(vec_nr);
 
 			trace_softirq_entry(vec_nr);
-			h->action(h);
+			h->action();
 			trace_softirq_exit(vec_nr);
 			if (unlikely(prev_count != preempt_count())) {
 				printk(KERN_ERR "huh, entered softirq %u %s %p"
@@ -419,7 +420,7 @@ void __raise_softirq_irqoff(unsigned int
 	or_softirq_pending(1UL << nr);
 }
 
-void open_softirq(int nr, void (*action)(struct softirq_action *))
+void __init open_softirq(int nr, void (*action)(void))
 {
 	softirq_vec[nr].action = action;
 }
@@ -475,7 +476,7 @@ void __tasklet_hi_schedule_first(struct
 
 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
 
-static void tasklet_action(struct softirq_action *a)
+static __latent_entropy void tasklet_action(void)
 {
 	struct tasklet_struct *list;
 
@@ -510,7 +511,7 @@ static void tasklet_action(struct softir
 	}
 }
 
-static void tasklet_hi_action(struct softirq_action *a)
+static __latent_entropy void tasklet_hi_action(void)
 {
 	struct tasklet_struct *list;
 
@@ -740,7 +741,7 @@ static struct notifier_block cpu_nfb = {
 	.notifier_call = cpu_callback
 };
 
-static struct smp_hotplug_thread softirq_threads = {
+static struct smp_hotplug_thread softirq_threads __read_only = {
 	.store			= &ksoftirqd,
 	.thread_should_run	= ksoftirqd_should_run,
 	.thread_fn		= run_ksoftirqd,
diff -ruNp linux-3.13.11/kernel/sys.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/sys.c
--- linux-3.13.11/kernel/sys.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/sys.c	2014-07-09 12:00:15.000000000
+0200
@@ -54,6 +54,7 @@
 #include <linux/cred.h>
 
 #include <linux/kmsg_dump.h>
+#include <linux/vs_pid.h>
 /* Move somewhere else to avoid recompiling? */
 #include <generated/utsrelease.h>
 
@@ -145,9 +146,18 @@ static int set_one_prio(struct task_stru
 		goto out;
 	}
 	if (niceval < task_nice(p) && !can_nice(p, niceval)) {
+		if (vx_flags(VXF_IGNEG_NICE, 0))
+			error = 0;
+		else
+			error = -EACCES;
+		goto out;
+	}
+
+	if (gr_handle_chroot_setpriority(p, niceval)) {
 		error = -EACCES;
 		goto out;
 	}
+
 	no_nice = security_task_setnice(p, niceval);
 	if (no_nice) {
 		error = no_nice;
@@ -196,6 +206,8 @@ SYSCALL_DEFINE3(setpriority, int, which,
 			else
 				pgrp = task_pgrp(current);
 			do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
+				if (!vx_check(p->xid, VS_ADMIN_P | VS_IDENT))
+					continue;
 				error = set_one_prio(p, niceval, error);
 			} while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
 			break;
@@ -261,6 +273,8 @@ SYSCALL_DEFINE2(getpriority, int, which,
 			else
 				pgrp = task_pgrp(current);
 			do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
+				if (!vx_check(p->xid, VS_ADMIN_P | VS_IDENT))
+					continue;
 				niceval = 20 - task_nice(p);
 				if (niceval > retval)
 					retval = niceval;
@@ -351,6 +365,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, g
 			goto error;
 	}
 
+	if (gr_check_group_change(new->gid, new->egid, INVALID_GID))
+		goto error;
+
 	if (rgid != (gid_t) -1 ||
 	    (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
 		new->sgid = new->egid;
@@ -386,6 +403,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
 	old = current_cred();
 
 	retval = -EPERM;
+
+	if (gr_check_group_change(kgid, kgid, kgid))
+		goto error;
+
 	if (ns_capable(old->user_ns, CAP_SETGID))
 		new->gid = new->egid = new->sgid = new->fsgid = kgid;
 	else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
@@ -403,7 +424,7 @@ error:
 /*
  * change the user struct in a credentials set to match the new UID
  */
-static int set_user(struct cred *new)
+int set_user(struct cred *new)
 {
 	struct user_struct *new_user;
 
@@ -483,6 +504,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, u
 			goto error;
 	}
 
+	if (gr_check_user_change(new->uid, new->euid, INVALID_UID))
+		goto error;
+
 	if (!uid_eq(new->uid, old->uid)) {
 		retval = set_user(new);
 		if (retval < 0)
@@ -533,6 +557,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
 	old = current_cred();
 
 	retval = -EPERM;
+
+	if (gr_check_crash_uid(kuid))
+		goto error;
+	if (gr_check_user_change(kuid, kuid, kuid))
+		goto error;
+
 	if (ns_capable(old->user_ns, CAP_SETUID)) {
 		new->suid = new->uid = kuid;
 		if (!uid_eq(kuid, old->uid)) {
@@ -602,6 +632,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid,
 			goto error;
 	}
 
+	if (gr_check_user_change(kruid, keuid, INVALID_UID))
+		goto error;
+
 	if (ruid != (uid_t) -1) {
 		new->uid = kruid;
 		if (!uid_eq(kruid, old->uid)) {
@@ -684,6 +717,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid,
 			goto error;
 	}
 
+	if (gr_check_group_change(krgid, kegid, INVALID_GID))
+		goto error;
+
 	if (rgid != (gid_t) -1)
 		new->gid = krgid;
 	if (egid != (gid_t) -1)
@@ -745,12 +781,16 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
 	    uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
 	    ns_capable(old->user_ns, CAP_SETUID)) {
 		if (!uid_eq(kuid, old->fsuid)) {
+			if (gr_check_user_change(INVALID_UID, INVALID_UID, kuid))
+				goto error;
+
 			new->fsuid = kuid;
 			if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
 				goto change_okay;
 		}
 	}
 
+error:
 	abort_creds(new);
 	return old_fsuid;
 
@@ -783,12 +823,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
 	if (gid_eq(kgid, old->gid)  || gid_eq(kgid, old->egid)  ||
 	    gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
 	    ns_capable(old->user_ns, CAP_SETGID)) {
+		if (gr_check_group_change(INVALID_GID, INVALID_GID, kgid))
+			goto error;
+
 		if (!gid_eq(kgid, old->fsgid)) {
 			new->fsgid = kgid;
 			goto change_okay;
 		}
 	}
 
+error:
 	abort_creds(new);
 	return old_fsgid;
 
@@ -1168,19 +1212,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_
 		return -EFAULT;
 
 	down_read(&uts_sem);
-	error = __copy_to_user(&name->sysname, &utsname()->sysname,
+	error = __copy_to_user(name->sysname, &utsname()->sysname,
 			       __OLD_UTS_LEN);
 	error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
-	error |= __copy_to_user(&name->nodename, &utsname()->nodename,
+	error |= __copy_to_user(name->nodename, &utsname()->nodename,
 				__OLD_UTS_LEN);
 	error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
-	error |= __copy_to_user(&name->release, &utsname()->release,
+	error |= __copy_to_user(name->release, &utsname()->release,
 				__OLD_UTS_LEN);
 	error |= __put_user(0, name->release + __OLD_UTS_LEN);
-	error |= __copy_to_user(&name->version, &utsname()->version,
+	error |= __copy_to_user(name->version, &utsname()->version,
 				__OLD_UTS_LEN);
 	error |= __put_user(0, name->version + __OLD_UTS_LEN);
-	error |= __copy_to_user(&name->machine, &utsname()->machine,
+	error |= __copy_to_user(name->machine, &utsname()->machine,
 				__OLD_UTS_LEN);
 	error |= __put_user(0, name->machine + __OLD_UTS_LEN);
 	up_read(&uts_sem);
@@ -1198,7 +1242,8 @@ SYSCALL_DEFINE2(sethostname, char __user
 	int errno;
 	char tmp[__NEW_UTS_LEN];
 
-	if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
+	if (!vx_ns_capable(current->nsproxy->uts_ns->user_ns,
+		CAP_SYS_ADMIN, VXC_SET_UTSNAME))
 		return -EPERM;
 
 	if (len < 0 || len > __NEW_UTS_LEN)
@@ -1249,7 +1294,8 @@ SYSCALL_DEFINE2(setdomainname, char __us
 	int errno;
 	char tmp[__NEW_UTS_LEN];
 
-	if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
+	if (!vx_ns_capable(current->nsproxy->uts_ns->user_ns,
+		CAP_SYS_ADMIN, VXC_SET_UTSNAME))
 		return -EPERM;
 	if (len < 0 || len > __NEW_UTS_LEN)
 		return -EINVAL;
@@ -1368,7 +1414,7 @@ int do_prlimit(struct task_struct *tsk,
 		/* Keep the capable check against init_user_ns until
 		   cgroups can contain all limits */
 		if (new_rlim->rlim_max > rlim->rlim_max &&
-				!capable(CAP_SYS_RESOURCE))
+			!vx_capable(CAP_SYS_RESOURCE, VXC_SET_RLIMIT))
 			retval = -EPERM;
 		if (!retval)
 			retval = security_task_setrlimit(tsk->group_leader,
@@ -1382,6 +1428,13 @@ int do_prlimit(struct task_struct *tsk,
 			 */
 			new_rlim->rlim_cur = 1;
 		}
+		/* Handle the case where a fork and setuid occur and then RLIMIT_NPROC
+		   is changed to a lower value.  Since tasks can be created by the same
+		   user in between this limit change and an execve by this task, force
+		   a recheck only for this task by setting PF_NPROC_EXCEEDED
+		*/
+		if (resource == RLIMIT_NPROC && tsk->real_cred->user != INIT_USER)
+			tsk->flags |= PF_NPROC_EXCEEDED;
 	}
 	if (!retval) {
 		if (old_rlim)
@@ -1421,7 +1474,8 @@ static int check_prlimit_permission(stru
 	    gid_eq(cred->gid, tcred->sgid) &&
 	    gid_eq(cred->gid, tcred->gid))
 		return 0;
-	if (ns_capable(tcred->user_ns, CAP_SYS_RESOURCE))
+	if (vx_ns_capable(tcred->user_ns,
+		CAP_SYS_RESOURCE, VXC_SET_RLIMIT))
 		return 0;
 
 	return -EPERM;
diff -ruNp linux-3.13.11/kernel/sysctl.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/sysctl.c
--- linux-3.13.11/kernel/sysctl.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/sysctl.c	2014-07-09 12:00:15.000000000
+0200
@@ -83,6 +83,7 @@
 #if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_LOCK_STAT)
 #include <linux/lockdep.h>
 #endif
+extern char vshelper_path[];
 #ifdef CONFIG_CHR_DEV_SG
 #include <scsi/sg.h>
 #endif
@@ -93,7 +94,6 @@
 
 
 #if defined(CONFIG_SYSCTL)
-
 /* External variables not in a header file. */
 extern int sysctl_overcommit_memory;
 extern int sysctl_overcommit_ratio;
@@ -119,17 +119,18 @@ extern int blk_iopoll_enabled;
 
 /* Constants used for minimum and  maximum */
 #ifdef CONFIG_LOCKUP_DETECTOR
-static int sixty = 60;
+static int sixty __read_only = 60;
 #endif
 
-static int zero;
-static int __maybe_unused one = 1;
-static int __maybe_unused two = 2;
-static int __maybe_unused three = 3;
-static unsigned long one_ul = 1;
-static int one_hundred = 100;
+static int neg_one __read_only = -1;
+static int zero __read_only = 0;
+static int __maybe_unused one __read_only = 1;
+static int __maybe_unused two __read_only = 2;
+static int __maybe_unused three __read_only = 3;
+static unsigned long one_ul __read_only = 1;
+static int one_hundred __read_only = 100;
 #ifdef CONFIG_PRINTK
-static int ten_thousand = 10000;
+static int ten_thousand __read_only = 10000;
 #endif
 
 /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
@@ -176,10 +177,8 @@ static int proc_taint(struct ctl_table *
 			       void __user *buffer, size_t *lenp, loff_t *ppos);
 #endif
 
-#ifdef CONFIG_PRINTK
 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
 				void __user *buffer, size_t *lenp, loff_t *ppos);
-#endif
 
 static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
 		void __user *buffer, size_t *lenp, loff_t *ppos);
@@ -210,6 +209,8 @@ static int sysrq_sysctl_handler(ctl_tabl
 
 #endif
 
+extern struct ctl_table grsecurity_table[];
+
 static struct ctl_table kern_table[];
 static struct ctl_table vm_table[];
 static struct ctl_table fs_table[];
@@ -224,6 +225,20 @@ extern struct ctl_table epoll_table[];
 int sysctl_legacy_va_layout;
 #endif
 
+#ifdef CONFIG_PAX_SOFTMODE
+static ctl_table pax_table[] = {
+	{
+		.procname	= "softmode",
+		.data		= &pax_softmode,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0600,
+		.proc_handler	= &proc_dointvec,
+	},
+
+	{ }
+};
+#endif
+
 /* The default sysctl tables: */
 
 static struct ctl_table sysctl_base_table[] = {
@@ -272,6 +287,22 @@ static int max_extfrag_threshold = 1000;
 #endif
 
 static struct ctl_table kern_table[] = {
+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
+	{
+		.procname	= "grsecurity",
+		.mode		= 0500,
+		.child		= grsecurity_table,
+	},
+#endif
+
+#ifdef CONFIG_PAX_SOFTMODE
+	{
+		.procname	= "pax",
+		.mode		= 0500,
+		.child		= pax_table,
+	},
+#endif
+
 	{
 		.procname	= "sched_child_runs_first",
 		.data		= &sysctl_sched_child_runs_first,
@@ -629,7 +660,7 @@ static struct ctl_table kern_table[] = {
 		.data		= &modprobe_path,
 		.maxlen		= KMOD_PATH_LEN,
 		.mode		= 0644,
-		.proc_handler	= proc_dostring,
+		.proc_handler	= proc_dostring_modpriv,
 	},
 	{
 		.procname	= "modules_disabled",
@@ -650,6 +681,13 @@ static struct ctl_table kern_table[] = {
 		.mode		= 0644,
 		.proc_handler	= proc_dostring,
 	},
+	{
+		.procname	= "vshelper",
+		.data		= &vshelper_path,
+		.maxlen		= 256,
+		.mode		= 0644,
+		.proc_handler	= &proc_dostring,
+	},
 
 #ifdef CONFIG_CHR_DEV_SG
 	{
@@ -796,16 +834,20 @@ static struct ctl_table kern_table[] = {
 		.extra1		= &zero,
 		.extra2		= &one,
 	},
+#endif
 	{
 		.procname	= "kptr_restrict",
 		.data		= &kptr_restrict,
 		.maxlen		= sizeof(int),
 		.mode		= 0644,
 		.proc_handler	= proc_dointvec_minmax_sysadmin,
+#ifdef CONFIG_GRKERNSEC_HIDESYM
+		.extra1		= &two,
+#else
 		.extra1		= &zero,
+#endif
 		.extra2		= &two,
 	},
-#endif
 	{
 		.procname	= "ngroups_max",
 		.data		= &ngroups_max,
@@ -1048,10 +1090,17 @@ static struct ctl_table kern_table[] = {
 	 */
 	{
 		.procname	= "perf_event_paranoid",
-		.data		= &sysctl_perf_event_paranoid,
-		.maxlen		= sizeof(sysctl_perf_event_paranoid),
+		.data		= &sysctl_perf_event_legitimately_concerned,
+		.maxlen		= sizeof(sysctl_perf_event_legitimately_concerned),
 		.mode		= 0644,
-		.proc_handler	= proc_dointvec,
+		/* go ahead, be a hero */
+		.proc_handler	= proc_dointvec_minmax_sysadmin,
+		.extra1		= &neg_one,
+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
+		.extra2		= &three,
+#else
+		.extra2		= &two,
+#endif
 	},
 	{
 		.procname	= "perf_event_mlock_kb",
@@ -1315,6 +1364,13 @@ static struct ctl_table vm_table[] = {
 		.proc_handler	= proc_dointvec_minmax,
 		.extra1		= &zero,
 	},
+	{
+		.procname	= "heap_stack_gap",
+		.data		= &sysctl_heap_stack_gap,
+		.maxlen		= sizeof(sysctl_heap_stack_gap),
+		.mode		= 0644,
+		.proc_handler	= proc_doulongvec_minmax,
+	},
 #else
 	{
 		.procname	= "nr_trim_pages",
@@ -1779,6 +1835,16 @@ int proc_dostring(struct ctl_table *tabl
 			       buffer, lenp, ppos);
 }
 
+int proc_dostring_modpriv(struct ctl_table *table, int write,
+		  void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+	if (write && !capable(CAP_SYS_MODULE))
+		return -EPERM;
+
+	return _proc_do_string(table->data, table->maxlen, write,
+			       buffer, lenp, ppos);
+}
+
 static size_t proc_skip_spaces(char **buf)
 {
 	size_t ret;
@@ -1884,6 +1950,8 @@ static int proc_put_long(void __user **b
 	len = strlen(tmp);
 	if (len > *size)
 		len = *size;
+	if (len > sizeof(tmp))
+		len = sizeof(tmp);
 	if (copy_to_user(*buf, tmp, len))
 		return -EFAULT;
 	*size -= len;
@@ -2048,7 +2116,7 @@ int proc_dointvec(struct ctl_table *tabl
 static int proc_taint(struct ctl_table *table, int write,
 			       void __user *buffer, size_t *lenp, loff_t *ppos)
 {
-	struct ctl_table t;
+	ctl_table_no_const t;
 	unsigned long tmptaint = get_taint();
 	int err;
 
@@ -2076,7 +2144,6 @@ static int proc_taint(struct ctl_table *
 	return err;
 }
 
-#ifdef CONFIG_PRINTK
 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
 				void __user *buffer, size_t *lenp, loff_t *ppos)
 {
@@ -2085,7 +2152,6 @@ static int proc_dointvec_minmax_sysadmin
 
 	return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
 }
-#endif
 
 struct do_proc_dointvec_minmax_conv_param {
 	int *min;
@@ -2632,6 +2698,12 @@ int proc_dostring(struct ctl_table *tabl
 	return -ENOSYS;
 }
 
+int proc_dostring_modpriv(struct ctl_table *table, int write,
+		  void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+	return -ENOSYS;
+}
+
 int proc_dointvec(struct ctl_table *table, int write,
 		  void __user *buffer, size_t *lenp, loff_t *ppos)
 {
@@ -2688,5 +2760,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
 EXPORT_SYMBOL(proc_dostring);
+EXPORT_SYMBOL(proc_dostring_modpriv);
 EXPORT_SYMBOL(proc_doulongvec_minmax);
 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
diff -ruNp linux-3.13.11/kernel/sysctl_binary.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/sysctl_binary.c
--- linux-3.13.11/kernel/sysctl_binary.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/sysctl_binary.c	2014-07-09
12:00:15.000000000 +0200
@@ -73,6 +73,7 @@ static const struct bin_table bin_kern_t
 
 	{ CTL_INT,	KERN_PANIC,			"panic" },
 	{ CTL_INT,	KERN_REALROOTDEV,		"real-root-dev" },
+	{ CTL_STR,	KERN_VSHELPER,			"vshelper" },
 
 	{ CTL_STR,	KERN_SPARC_REBOOT,		"reboot-cmd" },
 	{ CTL_INT,	KERN_CTLALTDEL,			"ctrl-alt-del" },
diff -ruNp linux-3.13.11/kernel/taskstats.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/taskstats.c
--- linux-3.13.11/kernel/taskstats.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/taskstats.c	2014-07-09 12:00:15.000000000
+0200
@@ -28,9 +28,12 @@
 #include <linux/fs.h>
 #include <linux/file.h>
 #include <linux/pid_namespace.h>
+#include <linux/grsecurity.h>
 #include <net/genetlink.h>
 #include <linux/atomic.h>
 
+extern int gr_is_taskstats_denied(int pid);
+
 /*
  * Maximum length of a cpumask that can be specified in
  * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
@@ -576,6 +579,9 @@ err:
 
 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
 {
+	if (gr_is_taskstats_denied(current->pid))
+		return -EACCES;
+
 	if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
 		return cmd_attr_register_cpumask(info);
 	else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
diff -ruNp linux-3.13.11/kernel/time/alarmtimer.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/time/alarmtimer.c
--- linux-3.13.11/kernel/time/alarmtimer.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/time/alarmtimer.c	2014-07-09
12:00:15.000000000 +0200
@@ -795,7 +795,7 @@ static int __init alarmtimer_init(void)
 	struct platform_device *pdev;
 	int error = 0;
 	int i;
-	struct k_clock alarm_clock = {
+	static struct k_clock alarm_clock = {
 		.clock_getres	= alarm_clock_getres,
 		.clock_get	= alarm_clock_get,
 		.timer_create	= alarm_timer_create,
diff -ruNp linux-3.13.11/kernel/time/timekeeping.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/time/timekeeping.c
--- linux-3.13.11/kernel/time/timekeeping.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/time/timekeeping.c	2014-07-09
12:00:15.000000000 +0200
@@ -15,6 +15,7 @@
 #include <linux/init.h>
 #include <linux/mm.h>
 #include <linux/sched.h>
+#include <linux/grsecurity.h>
 #include <linux/syscore_ops.h>
 #include <linux/clocksource.h>
 #include <linux/jiffies.h>
@@ -22,6 +23,7 @@
 #include <linux/tick.h>
 #include <linux/stop_machine.h>
 #include <linux/pvclock_gtod.h>
+#include <linux/vs_time.h>
 
 #include "tick-internal.h"
 #include "ntp_internal.h"
@@ -500,6 +502,8 @@ int do_settimeofday(const struct timespe
 	if (!timespec_valid_strict(tv))
 		return -EINVAL;
 
+	gr_log_timechange();
+
 	raw_spin_lock_irqsave(&timekeeper_lock, flags);
 	write_seqcount_begin(&timekeeper_seq);
 
@@ -709,6 +713,7 @@ void getrawmonotonic(struct timespec *ts
 	} while (read_seqcount_retry(&timekeeper_seq, seq));
 
 	timespec_add_ns(ts, nsecs);
+	vx_adjust_timespec(ts);
 }
 EXPORT_SYMBOL(getrawmonotonic);
 
diff -ruNp linux-3.13.11/kernel/time/timer_list.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/time/timer_list.c
--- linux-3.13.11/kernel/time/timer_list.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/time/timer_list.c	2014-07-09
12:00:15.000000000 +0200
@@ -45,12 +45,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base,
 
 static void print_name_offset(struct seq_file *m, void *sym)
 {
+#ifdef CONFIG_GRKERNSEC_HIDESYM
+	SEQ_printf(m, "<%p>", NULL);
+#else
 	char symname[KSYM_NAME_LEN];
 
 	if (lookup_symbol_name((unsigned long)sym, symname) < 0)
 		SEQ_printf(m, "<%pK>", sym);
 	else
 		SEQ_printf(m, "%s", symname);
+#endif
 }
 
 static void
@@ -119,7 +123,11 @@ next_one:
 static void
 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
 {
+#ifdef CONFIG_GRKERNSEC_HIDESYM
+	SEQ_printf(m, "  .base:       %p\n", NULL);
+#else
 	SEQ_printf(m, "  .base:       %pK\n", base);
+#endif
 	SEQ_printf(m, "  .index:      %d\n",
 			base->index);
 	SEQ_printf(m, "  .resolution: %Lu nsecs\n",
@@ -362,7 +370,11 @@ static int __init init_timer_list_procfs
 {
 	struct proc_dir_entry *pe;
 
+#ifdef CONFIG_GRKERNSEC_PROC_ADD
+	pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
+#else
 	pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
+#endif
 	if (!pe)
 		return -ENOMEM;
 	return 0;
diff -ruNp linux-3.13.11/kernel/time/timer_stats.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/time/timer_stats.c
--- linux-3.13.11/kernel/time/timer_stats.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/time/timer_stats.c	2014-07-09
12:00:15.000000000 +0200
@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
 static unsigned long nr_entries;
 static struct entry entries[MAX_ENTRIES];
 
-static atomic_t overflow_count;
+static atomic_unchecked_t overflow_count;
 
 /*
  * The entries are in a hash-table, for fast lookup:
@@ -140,7 +140,7 @@ static void reset_entries(void)
 	nr_entries = 0;
 	memset(entries, 0, sizeof(entries));
 	memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
-	atomic_set(&overflow_count, 0);
+	atomic_set_unchecked(&overflow_count, 0);
 }
 
 static struct entry *alloc_entry(void)
@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *time
 	if (likely(entry))
 		entry->count++;
 	else
-		atomic_inc(&overflow_count);
+		atomic_inc_unchecked(&overflow_count);
 
  out_unlock:
 	raw_spin_unlock_irqrestore(lock, flags);
@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *time
 
 static void print_name_offset(struct seq_file *m, unsigned long addr)
 {
+#ifdef CONFIG_GRKERNSEC_HIDESYM
+	seq_printf(m, "<%p>", NULL);
+#else
 	char symname[KSYM_NAME_LEN];
 
 	if (lookup_symbol_name(addr, symname) < 0)
-		seq_printf(m, "<%p>", (void *)addr);
+		seq_printf(m, "<%pK>", (void *)addr);
 	else
 		seq_printf(m, "%s", symname);
+#endif
 }
 
 static int tstats_show(struct seq_file *m, void *v)
@@ -300,8 +304,8 @@ static int tstats_show(struct seq_file *
 
 	seq_puts(m, "Timer Stats Version: v0.3\n");
 	seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
-	if (atomic_read(&overflow_count))
-		seq_printf(m, "Overflow: %d entries\n", atomic_read(&overflow_count));
+	if (atomic_read_unchecked(&overflow_count))
+		seq_printf(m, "Overflow: %d entries\n", atomic_read_unchecked(&overflow_count));
 	seq_printf(m, "Collection: %s\n", timer_stats_active ? "active" : "inactive");
 
 	for (i = 0; i < nr_entries; i++) {
@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(voi
 {
 	struct proc_dir_entry *pe;
 
+#ifdef CONFIG_GRKERNSEC_PROC_ADD
+	pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
+#else
 	pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
+#endif
 	if (!pe)
 		return -ENOMEM;
 	return 0;
diff -ruNp linux-3.13.11/kernel/time.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/time.c
--- linux-3.13.11/kernel/time.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/time.c	2014-07-09 12:00:15.000000000
+0200
@@ -37,6 +37,7 @@
 #include <linux/fs.h>
 #include <linux/math64.h>
 #include <linux/ptrace.h>
+#include <linux/vs_time.h>
 
 #include <asm/uaccess.h>
 #include <asm/unistd.h>
@@ -92,7 +93,7 @@ SYSCALL_DEFINE1(stime, time_t __user *,
 	if (err)
 		return err;
 
-	do_settimeofday(&tv);
+	vx_settimeofday(&tv);
 	return 0;
 }
 
@@ -172,6 +173,11 @@ int do_sys_settimeofday(const struct tim
 		return error;
 
 	if (tz) {
+		/* we log in do_settimeofday called below, so don't log twice
+		*/
+		if (!tv)
+			gr_log_timechange();
+
 		sys_tz = *tz;
 		update_vsyscall_tz();
 		if (firsttime) {
@@ -181,7 +187,7 @@ int do_sys_settimeofday(const struct tim
 		}
 	}
 	if (tv)
-		return do_settimeofday(tv);
+		return vx_settimeofday(tv);
 	return 0;
 }
 
diff -ruNp linux-3.13.11/kernel/timer.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/timer.c
--- linux-3.13.11/kernel/timer.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/timer.c	2014-07-09 12:00:15.000000000
+0200
@@ -42,6 +42,10 @@
 #include <linux/sched/sysctl.h>
 #include <linux/slab.h>
 #include <linux/compat.h>
+#include <linux/vs_base.h>
+#include <linux/vs_cvirt.h>
+#include <linux/vs_pid.h>
+#include <linux/vserver/sched.h>
 
 #include <asm/uaccess.h>
 #include <asm/unistd.h>
@@ -1366,7 +1370,7 @@ void update_process_times(int user_tick)
 /*
  * This function runs timers and the timer-tq in bottom half context.
  */
-static void run_timer_softirq(struct softirq_action *h)
+static __latent_entropy void run_timer_softirq(void)
 {
 	struct tvec_base *base = __this_cpu_read(tvec_bases);
 
@@ -1429,7 +1433,7 @@ static void process_timeout(unsigned lon
  *
  * In all cases the return value is guaranteed to be non-negative.
  */
-signed long __sched schedule_timeout(signed long timeout)
+signed long __sched __intentional_overflow(-1) schedule_timeout(signed long timeout)
 {
 	struct timer_list timer;
 	unsigned long expire;
diff -ruNp linux-3.13.11/kernel/trace/blktrace.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/trace/blktrace.c
--- linux-3.13.11/kernel/trace/blktrace.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/trace/blktrace.c	2014-07-09
12:00:15.000000000 +0200
@@ -328,7 +328,7 @@ static ssize_t blk_dropped_read(struct f
 	struct blk_trace *bt = filp->private_data;
 	char buf[16];
 
-	snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
+	snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
 
 	return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
 }
@@ -386,7 +386,7 @@ static int blk_subbuf_start_callback(str
 		return 1;
 
 	bt = buf->chan->private_data;
-	atomic_inc(&bt->dropped);
+	atomic_inc_unchecked(&bt->dropped);
 	return 0;
 }
 
@@ -487,7 +487,7 @@ int do_blk_trace_setup(struct request_qu
 
 	bt->dir = dir;
 	bt->dev = dev;
-	atomic_set(&bt->dropped, 0);
+	atomic_set_unchecked(&bt->dropped, 0);
 	INIT_LIST_HEAD(&bt->running_list);
 
 	ret = -EIO;
diff -ruNp linux-3.13.11/kernel/trace/ftrace.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/trace/ftrace.c
--- linux-3.13.11/kernel/trace/ftrace.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/trace/ftrace.c	2014-07-09
12:00:15.000000000 +0200
@@ -1978,12 +1978,17 @@ ftrace_code_disable(struct module *mod,
 	if (unlikely(ftrace_disabled))
 		return 0;
 
+	ret = ftrace_arch_code_modify_prepare();
+	FTRACE_WARN_ON(ret);
+	if (ret)
+		return 0;
+
 	ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
+	FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
 	if (ret) {
 		ftrace_bug(ret, ip);
-		return 0;
 	}
-	return 1;
+	return ret ? 0 : 1;
 }
 
 /*
@@ -4190,8 +4195,10 @@ static int ftrace_process_locs(struct mo
 	if (!count)
 		return 0;
 
+	pax_open_kernel();
 	sort(start, count, sizeof(*start),
 	     ftrace_cmp_ips, ftrace_swap_ips);
+	pax_close_kernel();
 
 	start_pg = ftrace_allocate_pages(count);
 	if (!start_pg)
@@ -4922,8 +4929,6 @@ ftrace_enable_sysctl(struct ctl_table *t
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 
 static int ftrace_graph_active;
-static struct notifier_block ftrace_suspend_notifier;
-
 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
 {
 	return 0;
@@ -5099,6 +5104,10 @@ static void update_function_graph_func(v
 		ftrace_graph_entry = ftrace_graph_entry_test;
 }
 
+static struct notifier_block ftrace_suspend_notifier = {
+	.notifier_call = ftrace_suspend_notifier_call
+};
+
 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
 			trace_func_graph_ent_t entryfunc)
 {
@@ -5112,7 +5121,6 @@ int register_ftrace_graph(trace_func_gra
 		goto out;
 	}
 
-	ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
 	register_pm_notifier(&ftrace_suspend_notifier);
 
 	ftrace_graph_active++;
diff -ruNp linux-3.13.11/kernel/trace/ring_buffer.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/trace/ring_buffer.c
--- linux-3.13.11/kernel/trace/ring_buffer.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/trace/ring_buffer.c	2014-07-09
12:00:15.000000000 +0200
@@ -352,9 +352,9 @@ struct buffer_data_page {
  */
 struct buffer_page {
 	struct list_head list;		/* list of buffer pages */
-	local_t		 write;		/* index for next write */
+	local_unchecked_t	 write;		/* index for next write */
 	unsigned	 read;		/* index for next read */
-	local_t		 entries;	/* entries on this page */
+	local_unchecked_t	 entries;	/* entries on this page */
 	unsigned long	 real_end;	/* real end of data */
 	struct buffer_data_page *page;	/* Actual data page */
 };
@@ -473,8 +473,8 @@ struct ring_buffer_per_cpu {
 	unsigned long			last_overrun;
 	local_t				entries_bytes;
 	local_t				entries;
-	local_t				overrun;
-	local_t				commit_overrun;
+	local_unchecked_t		overrun;
+	local_unchecked_t		commit_overrun;
 	local_t				dropped_events;
 	local_t				committing;
 	local_t				commits;
@@ -992,8 +992,8 @@ static int rb_tail_page_update(struct ri
 	 *
 	 * We add a counter to the write field to denote this.
 	 */
-	old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
-	old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
+	old_write = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->write);
+	old_entries = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->entries);
 
 	/*
 	 * Just make sure we have seen our old_write and synchronize
@@ -1021,8 +1021,8 @@ static int rb_tail_page_update(struct ri
 		 * cmpxchg to only update if an interrupt did not already
 		 * do it for us. If the cmpxchg fails, we don't care.
 		 */
-		(void)local_cmpxchg(&next_page->write, old_write, val);
-		(void)local_cmpxchg(&next_page->entries, old_entries, eval);
+		(void)local_cmpxchg_unchecked(&next_page->write, old_write, val);
+		(void)local_cmpxchg_unchecked(&next_page->entries, old_entries, eval);
 
 		/*
 		 * No need to worry about races with clearing out the commit.
@@ -1386,12 +1386,12 @@ static void rb_reset_cpu(struct ring_buf
 
 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
 {
-	return local_read(&bpage->entries) & RB_WRITE_MASK;
+	return local_read_unchecked(&bpage->entries) & RB_WRITE_MASK;
 }
 
 static inline unsigned long rb_page_write(struct buffer_page *bpage)
 {
-	return local_read(&bpage->write) & RB_WRITE_MASK;
+	return local_read_unchecked(&bpage->write) & RB_WRITE_MASK;
 }
 
 static int
@@ -1486,7 +1486,7 @@ rb_remove_pages(struct ring_buffer_per_c
 			 * bytes consumed in ring buffer from here.
 			 * Increment overrun to account for the lost events.
 			 */
-			local_add(page_entries, &cpu_buffer->overrun);
+			local_add_unchecked(page_entries, &cpu_buffer->overrun);
 			local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
 		}
 
@@ -2064,7 +2064,7 @@ rb_handle_head_page(struct ring_buffer_p
 		 * it is our responsibility to update
 		 * the counters.
 		 */
-		local_add(entries, &cpu_buffer->overrun);
+		local_add_unchecked(entries, &cpu_buffer->overrun);
 		local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
 
 		/*
@@ -2214,7 +2214,7 @@ rb_reset_tail(struct ring_buffer_per_cpu
 		if (tail == BUF_PAGE_SIZE)
 			tail_page->real_end = 0;
 
-		local_sub(length, &tail_page->write);
+		local_sub_unchecked(length, &tail_page->write);
 		return;
 	}
 
@@ -2249,7 +2249,7 @@ rb_reset_tail(struct ring_buffer_per_cpu
 		rb_event_set_padding(event);
 
 		/* Set the write back to the previous setting */
-		local_sub(length, &tail_page->write);
+		local_sub_unchecked(length, &tail_page->write);
 		return;
 	}
 
@@ -2261,7 +2261,7 @@ rb_reset_tail(struct ring_buffer_per_cpu
 
 	/* Set write to end of buffer */
 	length = (tail + length) - BUF_PAGE_SIZE;
-	local_sub(length, &tail_page->write);
+	local_sub_unchecked(length, &tail_page->write);
 }
 
 /*
@@ -2287,7 +2287,7 @@ rb_move_tail(struct ring_buffer_per_cpu
 	 * about it.
 	 */
 	if (unlikely(next_page == commit_page)) {
-		local_inc(&cpu_buffer->commit_overrun);
+		local_inc_unchecked(&cpu_buffer->commit_overrun);
 		goto out_reset;
 	}
 
@@ -2343,7 +2343,7 @@ rb_move_tail(struct ring_buffer_per_cpu
 				      cpu_buffer->tail_page) &&
 				     (cpu_buffer->commit_page ==
 				      cpu_buffer->reader_page))) {
-				local_inc(&cpu_buffer->commit_overrun);
+				local_inc_unchecked(&cpu_buffer->commit_overrun);
 				goto out_reset;
 			}
 		}
@@ -2391,7 +2391,7 @@ __rb_reserve_next(struct ring_buffer_per
 		length += RB_LEN_TIME_EXTEND;
 
 	tail_page = cpu_buffer->tail_page;
-	write = local_add_return(length, &tail_page->write);
+	write = local_add_return_unchecked(length, &tail_page->write);
 
 	/* set write to only the index of the write */
 	write &= RB_WRITE_MASK;
@@ -2415,7 +2415,7 @@ __rb_reserve_next(struct ring_buffer_per
 	kmemcheck_annotate_bitfield(event, bitfield);
 	rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
 
-	local_inc(&tail_page->entries);
+	local_inc_unchecked(&tail_page->entries);
 
 	/*
 	 * If this is the first commit on the page, then update
@@ -2448,7 +2448,7 @@ rb_try_to_discard(struct ring_buffer_per
 
 	if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
 		unsigned long write_mask =
-			local_read(&bpage->write) & ~RB_WRITE_MASK;
+			local_read_unchecked(&bpage->write) & ~RB_WRITE_MASK;
 		unsigned long event_length = rb_event_length(event);
 		/*
 		 * This is on the tail page. It is possible that
@@ -2458,7 +2458,7 @@ rb_try_to_discard(struct ring_buffer_per
 		 */
 		old_index += write_mask;
 		new_index += write_mask;
-		index = local_cmpxchg(&bpage->write, old_index, new_index);
+		index = local_cmpxchg_unchecked(&bpage->write, old_index, new_index);
 		if (index == old_index) {
 			/* update counters */
 			local_sub(event_length, &cpu_buffer->entries_bytes);
@@ -2850,7 +2850,7 @@ rb_decrement_entry(struct ring_buffer_pe
 
 	/* Do the likely case first */
 	if (likely(bpage->page == (void *)addr)) {
-		local_dec(&bpage->entries);
+		local_dec_unchecked(&bpage->entries);
 		return;
 	}
 
@@ -2862,7 +2862,7 @@ rb_decrement_entry(struct ring_buffer_pe
 	start = bpage;
 	do {
 		if (bpage->page == (void *)addr) {
-			local_dec(&bpage->entries);
+			local_dec_unchecked(&bpage->entries);
 			return;
 		}
 		rb_inc_page(cpu_buffer, &bpage);
@@ -3146,7 +3146,7 @@ static inline unsigned long
 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
 {
 	return local_read(&cpu_buffer->entries) -
-		(local_read(&cpu_buffer->overrun) + cpu_buffer->read);
+		(local_read_unchecked(&cpu_buffer->overrun) + cpu_buffer->read);
 }
 
 /**
@@ -3235,7 +3235,7 @@ unsigned long ring_buffer_overrun_cpu(st
 		return 0;
 
 	cpu_buffer = buffer->buffers[cpu];
-	ret = local_read(&cpu_buffer->overrun);
+	ret = local_read_unchecked(&cpu_buffer->overrun);
 
 	return ret;
 }
@@ -3258,7 +3258,7 @@ ring_buffer_commit_overrun_cpu(struct ri
 		return 0;
 
 	cpu_buffer = buffer->buffers[cpu];
-	ret = local_read(&cpu_buffer->commit_overrun);
+	ret = local_read_unchecked(&cpu_buffer->commit_overrun);
 
 	return ret;
 }
@@ -3343,7 +3343,7 @@ unsigned long ring_buffer_overruns(struc
 	/* if you care about this being correct, lock the buffer */
 	for_each_buffer_cpu(buffer, cpu) {
 		cpu_buffer = buffer->buffers[cpu];
-		overruns += local_read(&cpu_buffer->overrun);
+		overruns += local_read_unchecked(&cpu_buffer->overrun);
 	}
 
 	return overruns;
@@ -3519,8 +3519,8 @@ rb_get_reader_page(struct ring_buffer_pe
 	/*
 	 * Reset the reader page to size zero.
 	 */
-	local_set(&cpu_buffer->reader_page->write, 0);
-	local_set(&cpu_buffer->reader_page->entries, 0);
+	local_set_unchecked(&cpu_buffer->reader_page->write, 0);
+	local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
 	local_set(&cpu_buffer->reader_page->page->commit, 0);
 	cpu_buffer->reader_page->real_end = 0;
 
@@ -3554,7 +3554,7 @@ rb_get_reader_page(struct ring_buffer_pe
 	 * want to compare with the last_overrun.
 	 */
 	smp_mb();
-	overwrite = local_read(&(cpu_buffer->overrun));
+	overwrite = local_read_unchecked(&(cpu_buffer->overrun));
 
 	/*
 	 * Here's the tricky part.
@@ -4124,8 +4124,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu
 
 	cpu_buffer->head_page
 		= list_entry(cpu_buffer->pages, struct buffer_page, list);
-	local_set(&cpu_buffer->head_page->write, 0);
-	local_set(&cpu_buffer->head_page->entries, 0);
+	local_set_unchecked(&cpu_buffer->head_page->write, 0);
+	local_set_unchecked(&cpu_buffer->head_page->entries, 0);
 	local_set(&cpu_buffer->head_page->page->commit, 0);
 
 	cpu_buffer->head_page->read = 0;
@@ -4135,14 +4135,14 @@ rb_reset_cpu(struct ring_buffer_per_cpu
 
 	INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
 	INIT_LIST_HEAD(&cpu_buffer->new_pages);
-	local_set(&cpu_buffer->reader_page->write, 0);
-	local_set(&cpu_buffer->reader_page->entries, 0);
+	local_set_unchecked(&cpu_buffer->reader_page->write, 0);
+	local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
 	local_set(&cpu_buffer->reader_page->page->commit, 0);
 	cpu_buffer->reader_page->read = 0;
 
 	local_set(&cpu_buffer->entries_bytes, 0);
-	local_set(&cpu_buffer->overrun, 0);
-	local_set(&cpu_buffer->commit_overrun, 0);
+	local_set_unchecked(&cpu_buffer->overrun, 0);
+	local_set_unchecked(&cpu_buffer->commit_overrun, 0);
 	local_set(&cpu_buffer->dropped_events, 0);
 	local_set(&cpu_buffer->entries, 0);
 	local_set(&cpu_buffer->committing, 0);
@@ -4547,8 +4547,8 @@ int ring_buffer_read_page(struct ring_bu
 		rb_init_page(bpage);
 		bpage = reader->page;
 		reader->page = *data_page;
-		local_set(&reader->write, 0);
-		local_set(&reader->entries, 0);
+		local_set_unchecked(&reader->write, 0);
+		local_set_unchecked(&reader->entries, 0);
 		reader->read = 0;
 		*data_page = bpage;
 
diff -ruNp linux-3.13.11/kernel/trace/trace.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/trace/trace.c
--- linux-3.13.11/kernel/trace/trace.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/trace/trace.c	2014-07-09 12:00:15.000000000
+0200
@@ -3352,7 +3352,7 @@ int trace_keep_overwrite(struct tracer *
 	return 0;
 }
 
-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled)
 {
 	/* do nothing if flag is already set */
 	if (!!(trace_flags & mask) == !!enabled)
diff -ruNp linux-3.13.11/kernel/trace/trace.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/trace/trace.h
--- linux-3.13.11/kernel/trace/trace.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/trace/trace.h	2014-07-09 12:00:15.000000000
+0200
@@ -1040,7 +1040,7 @@ extern const char *__stop___tracepoint_s
 void trace_printk_init_buffers(void);
 void trace_printk_start_comm(void);
 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled);
 
 /*
  * Normal trace_printk() and friends allocates special buffers
diff -ruNp linux-3.13.11/kernel/trace/trace_clock.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/trace/trace_clock.c
--- linux-3.13.11/kernel/trace/trace_clock.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/trace/trace_clock.c	2014-07-09
12:00:15.000000000 +0200
@@ -123,7 +123,7 @@ u64 notrace trace_clock_global(void)
 	return now;
 }
 
-static atomic64_t trace_counter;
+static atomic64_unchecked_t trace_counter;
 
 /*
  * trace_clock_counter(): simply an atomic counter.
@@ -132,5 +132,5 @@ static atomic64_t trace_counter;
  */
 u64 notrace trace_clock_counter(void)
 {
-	return atomic64_add_return(1, &trace_counter);
+	return atomic64_inc_return_unchecked(&trace_counter);
 }
diff -ruNp linux-3.13.11/kernel/trace/trace_events.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/trace/trace_events.c
--- linux-3.13.11/kernel/trace/trace_events.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/trace/trace_events.c	2014-07-09
12:00:15.000000000 +0200
@@ -1675,7 +1675,6 @@ __trace_early_add_new_event(struct ftrac
 	return 0;
 }
 
-struct ftrace_module_file_ops;
 static void __add_event_to_tracers(struct ftrace_event_call *call);
 
 /* Add an additional event_call dynamically */
diff -ruNp linux-3.13.11/kernel/trace/trace_mmiotrace.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/trace/trace_mmiotrace.c
--- linux-3.13.11/kernel/trace/trace_mmiotrace.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/trace/trace_mmiotrace.c	2014-07-09
12:00:15.000000000 +0200
@@ -24,7 +24,7 @@ struct header_iter {
 static struct trace_array *mmio_trace_array;
 static bool overrun_detected;
 static unsigned long prev_overruns;
-static atomic_t dropped_count;
+static atomic_unchecked_t dropped_count;
 
 static void mmio_reset_data(struct trace_array *tr)
 {
@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iter
 
 static unsigned long count_overruns(struct trace_iterator *iter)
 {
-	unsigned long cnt = atomic_xchg(&dropped_count, 0);
+	unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
 	unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer);
 
 	if (over > prev_overruns)
@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct
 	event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
 					  sizeof(*entry), 0, pc);
 	if (!event) {
-		atomic_inc(&dropped_count);
+		atomic_inc_unchecked(&dropped_count);
 		return;
 	}
 	entry	= ring_buffer_event_data(event);
@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct
 	event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
 					  sizeof(*entry), 0, pc);
 	if (!event) {
-		atomic_inc(&dropped_count);
+		atomic_inc_unchecked(&dropped_count);
 		return;
 	}
 	entry	= ring_buffer_event_data(event);
diff -ruNp linux-3.13.11/kernel/trace/trace_output.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/trace/trace_output.c
--- linux-3.13.11/kernel/trace/trace_output.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/trace/trace_output.c	2014-07-09
12:00:15.000000000 +0200
@@ -294,7 +294,7 @@ int trace_seq_path(struct trace_seq *s,
 
 	p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
 	if (!IS_ERR(p)) {
-		p = mangle_path(s->buffer + s->len, p, "\n");
+		p = mangle_path(s->buffer + s->len, p, "\n\\");
 		if (p) {
 			s->len = p - s->buffer;
 			return 1;
@@ -908,14 +908,16 @@ int register_ftrace_event(struct trace_e
 			goto out;
 	}
 
+	pax_open_kernel();
 	if (event->funcs->trace == NULL)
-		event->funcs->trace = trace_nop_print;
+		*(void **)&event->funcs->trace = trace_nop_print;
 	if (event->funcs->raw == NULL)
-		event->funcs->raw = trace_nop_print;
+		*(void **)&event->funcs->raw = trace_nop_print;
 	if (event->funcs->hex == NULL)
-		event->funcs->hex = trace_nop_print;
+		*(void **)&event->funcs->hex = trace_nop_print;
 	if (event->funcs->binary == NULL)
-		event->funcs->binary = trace_nop_print;
+		*(void **)&event->funcs->binary = trace_nop_print;
+	pax_close_kernel();
 
 	key = event->type & (EVENT_HASHSIZE - 1);
 
diff -ruNp linux-3.13.11/kernel/trace/trace_stack.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/trace/trace_stack.c
--- linux-3.13.11/kernel/trace/trace_stack.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/trace/trace_stack.c	2014-07-09
12:00:15.000000000 +0200
@@ -68,7 +68,7 @@ check_stack(unsigned long ip, unsigned l
 		return;
 
 	/* we do not handle interrupt stacks yet */
-	if (!object_is_on_stack(stack))
+	if (!object_starts_on_stack(stack))
 		return;
 
 	local_irq_save(flags);
diff -ruNp linux-3.13.11/kernel/user_namespace.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/user_namespace.c
--- linux-3.13.11/kernel/user_namespace.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/user_namespace.c	2014-07-09
12:00:15.000000000 +0200
@@ -22,6 +22,7 @@
 #include <linux/ctype.h>
 #include <linux/projid.h>
 #include <linux/fs_struct.h>
+#include <linux/vserver/global.h>
 
 static struct kmem_cache *user_ns_cachep __read_mostly;
 
@@ -82,6 +83,21 @@ int create_user_ns(struct cred *new)
 	    !kgid_has_mapping(parent_ns, group))
 		return -EPERM;
 
+#ifdef CONFIG_GRKERNSEC
+	/*
+	 * This doesn't really inspire confidence:
+	 * http://marc.info/?l=linux-kernel&m=135543612731939&w=2
+	 * http://marc.info/?l=linux-kernel&m=135545831607095&w=2
+	 * Increases kernel attack surface in areas developers
+	 * previously cared little about ("low importance due
+	 * to requiring "root" capability")
+	 * To be removed when this code receives *proper* review
+	 */
+	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
+			!capable(CAP_SETGID))
+		return -EPERM;
+#endif
+
 	ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
 	if (!ns)
 		return -ENOMEM;
@@ -94,6 +110,7 @@ int create_user_ns(struct cred *new)
 
 	atomic_set(&ns->count, 1);
 	/* Leave the new->user_ns reference with the new user namespace. */
+	atomic_inc(&vs_global_user_ns);
 	ns->parent = parent_ns;
 	ns->level = parent_ns->level + 1;
 	ns->owner = owner;
@@ -847,6 +864,8 @@ static void *userns_get(struct task_stru
 
 static void userns_put(void *ns)
 {
+	/* FIXME: maybe move into destroyer? */
+	atomic_dec(&vs_global_user_ns);
 	put_user_ns(ns);
 }
 
@@ -865,7 +884,7 @@ static int userns_install(struct nsproxy
 	if (atomic_read(&current->mm->mm_users) > 1)
 		return -EINVAL;
 
-	if (current->fs->users != 1)
+	if (atomic_read(&current->fs->users) != 1)
 		return -EINVAL;
 
 	if (!ns_capable(user_ns, CAP_SYS_ADMIN))
diff -ruNp linux-3.13.11/kernel/utsname.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/utsname.c
--- linux-3.13.11/kernel/utsname.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/utsname.c	2014-07-09 12:00:15.000000000
+0200
@@ -16,14 +16,17 @@
 #include <linux/slab.h>
 #include <linux/user_namespace.h>
 #include <linux/proc_ns.h>
+#include <linux/vserver/global.h>
 
 static struct uts_namespace *create_uts_ns(void)
 {
 	struct uts_namespace *uts_ns;
 
 	uts_ns = kmalloc(sizeof(struct uts_namespace), GFP_KERNEL);
-	if (uts_ns)
+	if (uts_ns) {
 		kref_init(&uts_ns->kref);
+		atomic_inc(&vs_global_uts_ns);
+	}
 	return uts_ns;
 }
 
@@ -85,6 +88,7 @@ void free_uts_ns(struct kref *kref)
 	ns = container_of(kref, struct uts_namespace, kref);
 	put_user_ns(ns->user_ns);
 	proc_free_inum(ns->proc_inum);
+	atomic_dec(&vs_global_uts_ns);
 	kfree(ns);
 }
 
diff -ruNp linux-3.13.11/kernel/utsname_sysctl.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/utsname_sysctl.c
--- linux-3.13.11/kernel/utsname_sysctl.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/utsname_sysctl.c	2014-07-09
12:00:15.000000000 +0200
@@ -47,7 +47,7 @@ static void put_uts(ctl_table *table, in
 static int proc_do_uts_string(ctl_table *table, int write,
 		  void __user *buffer, size_t *lenp, loff_t *ppos)
 {
-	struct ctl_table uts_table;
+	ctl_table_no_const uts_table;
 	int r;
 	memcpy(&uts_table, table, sizeof(uts_table));
 	uts_table.data = get_uts(table, write);
diff -ruNp linux-3.13.11/kernel/vserver/Kconfig linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/Kconfig
--- linux-3.13.11/kernel/vserver/Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/Kconfig	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,230 @@
+#
+# Linux VServer configuration
+#
+
+menu "Linux VServer"
+
+config	VSERVER_AUTO_LBACK
+	bool    "Automatically Assign Loopback IP"
+	default y
+	help
+	  Automatically assign a guest specific loopback
+	  IP and add it to the kernel network stack on
+	  startup.
+
+config	VSERVER_AUTO_SINGLE
+	bool	"Automatic Single IP Special Casing"
+	default n
+	help
+	  This allows network contexts with a single IP to
+	  automatically remap 0.0.0.0 bindings to that IP,
+	  avoiding further network checks and improving
+	  performance.
+
+	  (note: such guests do not allow to change the ip
+	   on the fly and do not show loopback addresses)
+
+config	VSERVER_COWBL
+	bool	"Enable COW Immutable Link Breaking"
+	default y
+	help
+	  This enables the COW (Copy-On-Write) link break code.
+	  It allows you to treat unified files like normal files
+	  when writing to them (which will implicitely break the
+	  link and create a copy of the unified file)
+
+config	VSERVER_VTIME
+	bool	"Enable Virtualized Guest Time (EXPERIMENTAL)"
+	default n
+	help
+	  This enables per guest time offsets to allow for
+	  adjusting the system clock individually per guest.
+	  this adds some overhead to the time functions and
+	  therefore should not be enabled without good reason.
+
+config	VSERVER_DEVICE
+	bool	"Enable Guest Device Mapping (EXPERIMENTAL)"
+	default n
+	help
+	  This enables generic device remapping.
+
+config	VSERVER_PROC_SECURE
+	bool	"Enable Proc Security"
+	depends on PROC_FS
+	default y
+	help
+	  This configures ProcFS security to initially hide
+	  non-process entries for all contexts except the main and
+	  spectator context (i.e. for all guests), which is a secure
+	  default.
+
+	  (note: on 1.2x the entries were visible by default)
+
+choice
+	prompt	"Persistent Inode Tagging"
+	default	TAGGING_ID24
+	help
+	  This adds persistent context information to filesystems
+	  mounted with the tagxid option. Tagging is a requirement
+	  for per-context disk limits and per-context quota.
+
+
+config	TAGGING_NONE
+	bool	"Disabled"
+	help
+	  do not store per-context information in inodes.
+
+config	TAGGING_UID16
+	bool	"UID16/GID32"
+	help
+	  reduces UID to 16 bit, but leaves GID at 32 bit.
+
+config	TAGGING_GID16
+	bool	"UID32/GID16"
+	help
+	  reduces GID to 16 bit, but leaves UID at 32 bit.
+
+config	TAGGING_ID24
+	bool	"UID24/GID24"
+	help
+	  uses the upper 8bit from UID and GID for XID tagging
+	  which leaves 24bit for UID/GID each, which should be
+	  more than sufficient for normal use.
+
+config	TAGGING_INTERN
+	bool	"UID32/GID32"
+	help
+	  this uses otherwise reserved inode fields in the on
+	  disk representation, which limits the use to a few
+	  filesystems (currently ext2 and ext3)
+
+endchoice
+
+config	TAG_NFSD
+	bool	"Tag NFSD User Auth and Files"
+	default n
+	help
+	  Enable this if you do want the in-kernel NFS
+	  Server to use the tagging specified above.
+	  (will require patched clients too)
+
+config	VSERVER_PRIVACY
+	bool	"Honor Privacy Aspects of Guests"
+	default n
+	help
+	  When enabled, most context checks will disallow
+	  access to structures assigned to a specific context,
+	  like ptys or loop devices.
+
+config	VSERVER_CONTEXTS
+	int	"Maximum number of Contexts (1-65533)"	if EMBEDDED
+	range 1 65533
+	default "768"	if 64BIT
+	default "256"
+	help
+	  This setting will optimize certain data structures
+	  and memory allocations according to the expected
+	  maximum.
+
+	  note: this is not a strict upper limit.
+
+config	VSERVER_WARN
+	bool	"VServer Warnings"
+	default y
+	help
+	  This enables various runtime warnings, which will
+	  notify about potential manipulation attempts or
+	  resource shortage. It is generally considered to
+	  be a good idea to have that enabled.
+
+config	VSERVER_WARN_DEVPTS
+	bool	"VServer DevPTS Warnings"
+	depends on VSERVER_WARN
+	default y
+	help
+	  This enables DevPTS related warnings, issued when a
+	  process inside a context tries to lookup or access
+	  a dynamic pts from the host or a different context.
+
+config	VSERVER_DEBUG
+	bool	"VServer Debugging Code"
+	default n
+	help
+	  Set this to yes if you want to be able to activate
+	  debugging output at runtime. It adds a very small
+	  overhead to all vserver related functions and
+	  increases the kernel size by about 20k.
+
+config	VSERVER_HISTORY
+	bool	"VServer History Tracing"
+	depends on VSERVER_DEBUG
+	default n
+	help
+	  Set this to yes if you want to record the history of
+	  linux-vserver activities, so they can be replayed in
+	  the event of a kernel panic or oops.
+
+config	VSERVER_HISTORY_SIZE
+	int	"Per-CPU History Size (32-65536)"
+	depends on VSERVER_HISTORY
+	range 32 65536
+	default 64
+	help
+	  This allows you to specify the number of entries in
+	  the per-CPU history buffer.
+
+config	VSERVER_EXTRA_MNT_CHECK
+	bool	"Extra Checks for Reachability"
+	default n
+	help
+	  Set this to yes if you want to do extra checks for
+	  vfsmount reachability in the proc filesystem code.
+	  This shouldn't be required on any setup utilizing
+	  mnt namespaces.
+
+choice
+	prompt	"Quotes used in debug and warn messages"
+	default	QUOTES_ISO8859
+
+config	QUOTES_ISO8859
+	bool	"Extended ASCII (ISO 8859) angle quotes"
+	help
+	  This uses the extended ASCII characters \xbb
+	  and \xab for quoting file and process names.
+
+config	QUOTES_UTF8
+	bool	"UTF-8 angle quotes"
+	help
+	  This uses the the UTF-8 sequences for angle
+	  quotes to quote file and process names.
+
+config	QUOTES_ASCII
+	bool	"ASCII single quotes"
+	help
+	  This uses the ASCII single quote character
+	  (\x27) to quote file and process names.
+
+endchoice
+
+endmenu
+
+
+config	VSERVER
+	bool
+	default y
+	select NAMESPACES
+	select UTS_NS
+	select IPC_NS
+#	select USER_NS
+	select SYSVIPC
+
+config	VSERVER_SECURITY
+	bool
+	depends on SECURITY
+	default y
+	select SECURITY_CAPABILITIES
+
+config	VSERVER_DISABLED
+	bool
+	default n
+
diff -ruNp linux-3.13.11/kernel/vserver/Makefile linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/Makefile
--- linux-3.13.11/kernel/vserver/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/Makefile	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,18 @@
+#
+# Makefile for the Linux vserver routines.
+#
+
+
+obj-y		+= vserver.o
+
+vserver-y	:= switch.o context.o space.o sched.o network.o inode.o \
+		   limit.o cvirt.o cacct.o signal.o helper.o init.o \
+		   dlimit.o tag.o
+
+vserver-$(CONFIG_INET) += inet.o
+vserver-$(CONFIG_PROC_FS) += proc.o
+vserver-$(CONFIG_VSERVER_DEBUG) += sysctl.o debug.o
+vserver-$(CONFIG_VSERVER_HISTORY) += history.o
+vserver-$(CONFIG_VSERVER_MONITOR) += monitor.o
+vserver-$(CONFIG_VSERVER_DEVICE) += device.o
+
diff -ruNp linux-3.13.11/kernel/vserver/cacct.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/cacct.c
--- linux-3.13.11/kernel/vserver/cacct.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/cacct.c	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,42 @@
+/*
+ *  linux/kernel/vserver/cacct.c
+ *
+ *  Virtual Server: Context Accounting
+ *
+ *  Copyright (C) 2006-2007 Herbert Pötzl
+ *
+ *  V0.01  added accounting stats
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/vs_context.h>
+#include <linux/vserver/cacct_cmd.h>
+#include <linux/vserver/cacct_int.h>
+
+#include <asm/errno.h>
+#include <asm/uaccess.h>
+
+
+int vc_sock_stat(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_sock_stat_v0 vc_data;
+	int j, field;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	field = vc_data.field;
+	if ((field < 0) || (field >= VXA_SOCK_SIZE))
+		return -EINVAL;
+
+	for (j = 0; j < 3; j++) {
+		vc_data.count[j] = vx_sock_count(&vxi->cacct, field, j);
+		vc_data.total[j] = vx_sock_total(&vxi->cacct, field, j);
+	}
+
+	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+		return -EFAULT;
+	return 0;
+}
+
diff -ruNp linux-3.13.11/kernel/vserver/cacct_init.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/cacct_init.h
--- linux-3.13.11/kernel/vserver/cacct_init.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/cacct_init.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,25 @@
+
+
+static inline void vx_info_init_cacct(struct _vx_cacct *cacct)
+{
+	int i, j;
+
+
+	for (i = 0; i < VXA_SOCK_SIZE; i++) {
+		for (j = 0; j < 3; j++) {
+			atomic_long_set(&cacct->sock[i][j].count, 0);
+			atomic_long_set(&cacct->sock[i][j].total, 0);
+		}
+	}
+	for (i = 0; i < 8; i++)
+		atomic_set(&cacct->slab[i], 0);
+	for (i = 0; i < 5; i++)
+		for (j = 0; j < 4; j++)
+			atomic_set(&cacct->page[i][j], 0);
+}
+
+static inline void vx_info_exit_cacct(struct _vx_cacct *cacct)
+{
+	return;
+}
+
diff -ruNp linux-3.13.11/kernel/vserver/cacct_proc.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/cacct_proc.h
--- linux-3.13.11/kernel/vserver/cacct_proc.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/cacct_proc.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,53 @@
+#ifndef _VX_CACCT_PROC_H
+#define _VX_CACCT_PROC_H
+
+#include <linux/vserver/cacct_int.h>
+
+
+#define VX_SOCKA_TOP	\
+	"Type\t    recv #/bytes\t\t   send #/bytes\t\t    fail #/bytes\n"
+
+static inline int vx_info_proc_cacct(struct _vx_cacct *cacct, char *buffer)
+{
+	int i, j, length = 0;
+	static char *type[VXA_SOCK_SIZE] = {
+		"UNSPEC", "UNIX", "INET", "INET6", "PACKET", "OTHER"
+	};
+
+	length += sprintf(buffer + length, VX_SOCKA_TOP);
+	for (i = 0; i < VXA_SOCK_SIZE; i++) {
+		length += sprintf(buffer + length, "%s:", type[i]);
+		for (j = 0; j < 3; j++) {
+			length += sprintf(buffer + length,
+				"\t%10lu/%-10lu",
+				vx_sock_count(cacct, i, j),
+				vx_sock_total(cacct, i, j));
+		}
+		buffer[length++] = '\n';
+	}
+
+	length += sprintf(buffer + length, "\n");
+	length += sprintf(buffer + length,
+		"slab:\t %8u %8u %8u %8u\n",
+		atomic_read(&cacct->slab[1]),
+		atomic_read(&cacct->slab[4]),
+		atomic_read(&cacct->slab[0]),
+		atomic_read(&cacct->slab[2]));
+
+	length += sprintf(buffer + length, "\n");
+	for (i = 0; i < 5; i++) {
+		length += sprintf(buffer + length,
+			"page[%d]: %8u %8u %8u %8u\t %8u %8u %8u %8u\n", i,
+			atomic_read(&cacct->page[i][0]),
+			atomic_read(&cacct->page[i][1]),
+			atomic_read(&cacct->page[i][2]),
+			atomic_read(&cacct->page[i][3]),
+			atomic_read(&cacct->page[i][4]),
+			atomic_read(&cacct->page[i][5]),
+			atomic_read(&cacct->page[i][6]),
+			atomic_read(&cacct->page[i][7]));
+	}
+	return length;
+}
+
+#endif	/* _VX_CACCT_PROC_H */
diff -ruNp linux-3.13.11/kernel/vserver/context.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/context.c
--- linux-3.13.11/kernel/vserver/context.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/context.c	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,1119 @@
+/*
+ *  linux/kernel/vserver/context.c
+ *
+ *  Virtual Server: Context Support
+ *
+ *  Copyright (C) 2003-2011  Herbert Pötzl
+ *
+ *  V0.01  context helper
+ *  V0.02  vx_ctx_kill syscall command
+ *  V0.03  replaced context_info calls
+ *  V0.04  redesign of struct (de)alloc
+ *  V0.05  rlimit basic implementation
+ *  V0.06  task_xid and info commands
+ *  V0.07  context flags and caps
+ *  V0.08  switch to RCU based hash
+ *  V0.09  revert to non RCU for now
+ *  V0.10  and back to working RCU hash
+ *  V0.11  and back to locking again
+ *  V0.12  referenced context store
+ *  V0.13  separate per cpu data
+ *  V0.14  changed vcmds to vxi arg
+ *  V0.15  added context stat
+ *  V0.16  have __create claim() the vxi
+ *  V0.17  removed older and legacy stuff
+ *  V0.18  added user credentials
+ *  V0.19  added warn mask
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/security.h>
+#include <linux/pid_namespace.h>
+#include <linux/capability.h>
+
+#include <linux/vserver/context.h>
+#include <linux/vserver/network.h>
+#include <linux/vserver/debug.h>
+#include <linux/vserver/limit.h>
+#include <linux/vserver/limit_int.h>
+#include <linux/vserver/space.h>
+#include <linux/init_task.h>
+#include <linux/fs_struct.h>
+#include <linux/cred.h>
+
+#include <linux/vs_context.h>
+#include <linux/vs_limit.h>
+#include <linux/vs_pid.h>
+#include <linux/vserver/context_cmd.h>
+
+#include "cvirt_init.h"
+#include "cacct_init.h"
+#include "limit_init.h"
+#include "sched_init.h"
+
+
+atomic_t vx_global_ctotal	= ATOMIC_INIT(0);
+atomic_t vx_global_cactive	= ATOMIC_INIT(0);
+
+
+/*	now inactive context structures */
+
+static struct hlist_head vx_info_inactive = HLIST_HEAD_INIT;
+
+static DEFINE_SPINLOCK(vx_info_inactive_lock);
+
+
+/*	__alloc_vx_info()
+
+	* allocate an initialized vx_info struct
+	* doesn't make it visible (hash)			*/
+
+static struct vx_info *__alloc_vx_info(vxid_t xid)
+{
+	struct vx_info *new = NULL;
+	int cpu, index;
+
+	vxdprintk(VXD_CBIT(xid, 0), "alloc_vx_info(%d)*", xid);
+
+	/* would this benefit from a slab cache? */
+	new = kmalloc(sizeof(struct vx_info), GFP_KERNEL);
+	if (!new)
+		return 0;
+
+	memset(new, 0, sizeof(struct vx_info));
+#ifdef CONFIG_SMP
+	new->ptr_pc = alloc_percpu(struct _vx_info_pc);
+	if (!new->ptr_pc)
+		goto error;
+#endif
+	new->vx_id = xid;
+	INIT_HLIST_NODE(&new->vx_hlist);
+	atomic_set(&new->vx_usecnt, 0);
+	atomic_set(&new->vx_tasks, 0);
+	new->vx_parent = NULL;
+	new->vx_state = 0;
+	init_waitqueue_head(&new->vx_wait);
+
+	/* prepare reaper */
+	get_task_struct(init_pid_ns.child_reaper);
+	new->vx_reaper = init_pid_ns.child_reaper;
+	new->vx_badness_bias = 0;
+
+	/* rest of init goes here */
+	vx_info_init_limit(&new->limit);
+	vx_info_init_sched(&new->sched);
+	vx_info_init_cvirt(&new->cvirt);
+	vx_info_init_cacct(&new->cacct);
+
+	/* per cpu data structures */
+	for_each_possible_cpu(cpu) {
+		vx_info_init_sched_pc(
+			&vx_per_cpu(new, sched_pc, cpu), cpu);
+		vx_info_init_cvirt_pc(
+			&vx_per_cpu(new, cvirt_pc, cpu), cpu);
+	}
+
+	new->vx_flags = VXF_INIT_SET;
+	new->vx_bcaps = CAP_FULL_SET;	// maybe ~CAP_SETPCAP
+	new->vx_ccaps = 0;
+	new->vx_umask = 0;
+	new->vx_wmask = 0;
+
+	new->reboot_cmd = 0;
+	new->exit_code = 0;
+
+	// preconfig spaces
+	for (index = 0; index < VX_SPACES; index++) {
+		struct _vx_space *space = &new->space[index];
+
+		// filesystem
+		spin_lock(&init_fs.lock);
+		atomic_inc(&init_fs.users);
+		spin_unlock(&init_fs.lock);
+		space->vx_fs = &init_fs;
+
+		/* FIXME: do we want defaults? */
+		// space->vx_real_cred = 0;
+		// space->vx_cred = 0;
+	}
+
+
+	vxdprintk(VXD_CBIT(xid, 0),
+		"alloc_vx_info(%d) = %p", xid, new);
+	vxh_alloc_vx_info(new);
+	atomic_inc(&vx_global_ctotal);
+	return new;
+#ifdef CONFIG_SMP
+error:
+	kfree(new);
+	return 0;
+#endif
+}
+
+/*	__dealloc_vx_info()
+
+	* final disposal of vx_info				*/
+
+static void __dealloc_vx_info(struct vx_info *vxi)
+{
+#ifdef	CONFIG_VSERVER_WARN
+	struct vx_info_save vxis;
+	int cpu;
+#endif
+	vxdprintk(VXD_CBIT(xid, 0),
+		"dealloc_vx_info(%p)", vxi);
+	vxh_dealloc_vx_info(vxi);
+
+#ifdef	CONFIG_VSERVER_WARN
+	enter_vx_info(vxi, &vxis);
+	vx_info_exit_limit(&vxi->limit);
+	vx_info_exit_sched(&vxi->sched);
+	vx_info_exit_cvirt(&vxi->cvirt);
+	vx_info_exit_cacct(&vxi->cacct);
+
+	for_each_possible_cpu(cpu) {
+		vx_info_exit_sched_pc(
+			&vx_per_cpu(vxi, sched_pc, cpu), cpu);
+		vx_info_exit_cvirt_pc(
+			&vx_per_cpu(vxi, cvirt_pc, cpu), cpu);
+	}
+	leave_vx_info(&vxis);
+#endif
+
+	vxi->vx_id = -1;
+	vxi->vx_state |= VXS_RELEASED;
+
+#ifdef CONFIG_SMP
+	free_percpu(vxi->ptr_pc);
+#endif
+	kfree(vxi);
+	atomic_dec(&vx_global_ctotal);
+}
+
+static void __shutdown_vx_info(struct vx_info *vxi)
+{
+	struct nsproxy *nsproxy;
+	struct fs_struct *fs;
+	struct cred *cred;
+	int index, kill;
+
+	might_sleep();
+
+	vxi->vx_state |= VXS_SHUTDOWN;
+	vs_state_change(vxi, VSC_SHUTDOWN);
+
+	for (index = 0; index < VX_SPACES; index++) {
+		struct _vx_space *space = &vxi->space[index];
+
+		nsproxy = xchg(&space->vx_nsproxy, NULL);
+		if (nsproxy)
+			put_nsproxy(nsproxy);
+
+		fs = xchg(&space->vx_fs, NULL);
+		spin_lock(&fs->lock);
+		kill = !atomic_dec_return(&fs->users);
+		spin_unlock(&fs->lock);
+		if (kill)
+			free_fs_struct(fs);
+
+		cred = (struct cred *)xchg(&space->vx_cred, NULL);
+		if (cred)
+			abort_creds(cred);
+	}
+}
+
+/* exported stuff */
+
+void free_vx_info(struct vx_info *vxi)
+{
+	unsigned long flags;
+	unsigned index;
+
+	/* check for reference counts first */
+	BUG_ON(atomic_read(&vxi->vx_usecnt));
+	BUG_ON(atomic_read(&vxi->vx_tasks));
+
+	/* context must not be hashed */
+	BUG_ON(vx_info_state(vxi, VXS_HASHED));
+
+	/* context shutdown is mandatory */
+	BUG_ON(!vx_info_state(vxi, VXS_SHUTDOWN));
+
+	/* spaces check */
+	for (index = 0; index < VX_SPACES; index++) {
+		struct _vx_space *space = &vxi->space[index];
+
+		BUG_ON(space->vx_nsproxy);
+		BUG_ON(space->vx_fs);
+		// BUG_ON(space->vx_real_cred);
+		// BUG_ON(space->vx_cred);
+	}
+
+	spin_lock_irqsave(&vx_info_inactive_lock, flags);
+	hlist_del(&vxi->vx_hlist);
+	spin_unlock_irqrestore(&vx_info_inactive_lock, flags);
+
+	__dealloc_vx_info(vxi);
+}
+
+
+/*	hash table for vx_info hash */
+
+#define VX_HASH_SIZE	13
+
+static struct hlist_head vx_info_hash[VX_HASH_SIZE] =
+	{ [0 ... VX_HASH_SIZE-1] = HLIST_HEAD_INIT };
+
+static DEFINE_SPINLOCK(vx_info_hash_lock);
+
+
+static inline unsigned int __hashval(vxid_t xid)
+{
+	return (xid % VX_HASH_SIZE);
+}
+
+
+
+/*	__hash_vx_info()
+
+	* add the vxi to the global hash table
+	* requires the hash_lock to be held			*/
+
+static inline void __hash_vx_info(struct vx_info *vxi)
+{
+	struct hlist_head *head;
+
+	vxd_assert_lock(&vx_info_hash_lock);
+	vxdprintk(VXD_CBIT(xid, 4),
+		"__hash_vx_info: %p[#%d]", vxi, vxi->vx_id);
+	vxh_hash_vx_info(vxi);
+
+	/* context must not be hashed */
+	BUG_ON(vx_info_state(vxi, VXS_HASHED));
+
+	vxi->vx_state |= VXS_HASHED;
+	head = &vx_info_hash[__hashval(vxi->vx_id)];
+	hlist_add_head(&vxi->vx_hlist, head);
+	atomic_inc(&vx_global_cactive);
+}
+
+/*	__unhash_vx_info()
+
+	* remove the vxi from the global hash table
+	* requires the hash_lock to be held			*/
+
+static inline void __unhash_vx_info(struct vx_info *vxi)
+{
+	unsigned long flags;
+
+	vxd_assert_lock(&vx_info_hash_lock);
+	vxdprintk(VXD_CBIT(xid, 4),
+		"__unhash_vx_info: %p[#%d.%d.%d]", vxi, vxi->vx_id,
+		atomic_read(&vxi->vx_usecnt), atomic_read(&vxi->vx_tasks));
+	vxh_unhash_vx_info(vxi);
+
+	/* context must be hashed */
+	BUG_ON(!vx_info_state(vxi, VXS_HASHED));
+	/* but without tasks */
+	BUG_ON(atomic_read(&vxi->vx_tasks));
+
+	vxi->vx_state &= ~VXS_HASHED;
+	hlist_del_init(&vxi->vx_hlist);
+	spin_lock_irqsave(&vx_info_inactive_lock, flags);
+	hlist_add_head(&vxi->vx_hlist, &vx_info_inactive);
+	spin_unlock_irqrestore(&vx_info_inactive_lock, flags);
+	atomic_dec(&vx_global_cactive);
+}
+
+
+/*	__lookup_vx_info()
+
+	* requires the hash_lock to be held
+	* doesn't increment the vx_refcnt			*/
+
+static inline struct vx_info *__lookup_vx_info(vxid_t xid)
+{
+	struct hlist_head *head = &vx_info_hash[__hashval(xid)];
+	struct hlist_node *pos;
+	struct vx_info *vxi;
+
+	vxd_assert_lock(&vx_info_hash_lock);
+	hlist_for_each(pos, head) {
+		vxi = hlist_entry(pos, struct vx_info, vx_hlist);
+
+		if (vxi->vx_id == xid)
+			goto found;
+	}
+	vxi = NULL;
+found:
+	vxdprintk(VXD_CBIT(xid, 0),
+		"__lookup_vx_info(#%u): %p[#%u]",
+		xid, vxi, vxi ? vxi->vx_id : 0);
+	vxh_lookup_vx_info(vxi, xid);
+	return vxi;
+}
+
+
+/*	__create_vx_info()
+
+	* create the requested context
+	* get(), claim() and hash it				*/
+
+static struct vx_info *__create_vx_info(int id)
+{
+	struct vx_info *new, *vxi = NULL;
+
+	vxdprintk(VXD_CBIT(xid, 1), "create_vx_info(%d)*", id);
+
+	if (!(new = __alloc_vx_info(id)))
+		return ERR_PTR(-ENOMEM);
+
+	/* required to make dynamic xids unique */
+	spin_lock(&vx_info_hash_lock);
+
+	/* static context requested */
+	if ((vxi = __lookup_vx_info(id))) {
+		vxdprintk(VXD_CBIT(xid, 0),
+			"create_vx_info(%d) = %p (already there)", id, vxi);
+		if (vx_info_flags(vxi, VXF_STATE_SETUP, 0))
+			vxi = ERR_PTR(-EBUSY);
+		else
+			vxi = ERR_PTR(-EEXIST);
+		goto out_unlock;
+	}
+	/* new context */
+	vxdprintk(VXD_CBIT(xid, 0),
+		"create_vx_info(%d) = %p (new)", id, new);
+	claim_vx_info(new, NULL);
+	__hash_vx_info(get_vx_info(new));
+	vxi = new, new = NULL;
+
+out_unlock:
+	spin_unlock(&vx_info_hash_lock);
+	vxh_create_vx_info(IS_ERR(vxi) ? NULL : vxi, id);
+	if (new)
+		__dealloc_vx_info(new);
+	return vxi;
+}
+
+
+/*	exported stuff						*/
+
+
+void unhash_vx_info(struct vx_info *vxi)
+{
+	spin_lock(&vx_info_hash_lock);
+	__unhash_vx_info(vxi);
+	spin_unlock(&vx_info_hash_lock);
+	__shutdown_vx_info(vxi);
+	__wakeup_vx_info(vxi);
+}
+
+
+/*	lookup_vx_info()
+
+	* search for a vx_info and get() it
+	* negative id means current				*/
+
+struct vx_info *lookup_vx_info(int id)
+{
+	struct vx_info *vxi = NULL;
+
+	if (id < 0) {
+		vxi = get_vx_info(current_vx_info());
+	} else if (id > 1) {
+		spin_lock(&vx_info_hash_lock);
+		vxi = get_vx_info(__lookup_vx_info(id));
+		spin_unlock(&vx_info_hash_lock);
+	}
+	return vxi;
+}
+
+/*	xid_is_hashed()
+
+	* verify that xid is still hashed			*/
+
+int xid_is_hashed(vxid_t xid)
+{
+	int hashed;
+
+	spin_lock(&vx_info_hash_lock);
+	hashed = (__lookup_vx_info(xid) != NULL);
+	spin_unlock(&vx_info_hash_lock);
+	return hashed;
+}
+
+#ifdef	CONFIG_PROC_FS
+
+/*	get_xid_list()
+
+	* get a subset of hashed xids for proc
+	* assumes size is at least one				*/
+
+int get_xid_list(int index, unsigned int *xids, int size)
+{
+	int hindex, nr_xids = 0;
+
+	/* only show current and children */
+	if (!vx_check(0, VS_ADMIN | VS_WATCH)) {
+		if (index > 0)
+			return 0;
+		xids[nr_xids] = vx_current_xid();
+		return 1;
+	}
+
+	for (hindex = 0; hindex < VX_HASH_SIZE; hindex++) {
+		struct hlist_head *head = &vx_info_hash[hindex];
+		struct hlist_node *pos;
+
+		spin_lock(&vx_info_hash_lock);
+		hlist_for_each(pos, head) {
+			struct vx_info *vxi;
+
+			if (--index > 0)
+				continue;
+
+			vxi = hlist_entry(pos, struct vx_info, vx_hlist);
+			xids[nr_xids] = vxi->vx_id;
+			if (++nr_xids >= size) {
+				spin_unlock(&vx_info_hash_lock);
+				goto out;
+			}
+		}
+		/* keep the lock time short */
+		spin_unlock(&vx_info_hash_lock);
+	}
+out:
+	return nr_xids;
+}
+#endif
+
+#ifdef	CONFIG_VSERVER_DEBUG
+
+void	dump_vx_info_inactive(int level)
+{
+	struct hlist_node *entry, *next;
+
+	hlist_for_each_safe(entry, next, &vx_info_inactive) {
+		struct vx_info *vxi =
+			list_entry(entry, struct vx_info, vx_hlist);
+
+		dump_vx_info(vxi, level);
+	}
+}
+
+#endif
+
+#if 0
+int vx_migrate_user(struct task_struct *p, struct vx_info *vxi)
+{
+	struct user_struct *new_user, *old_user;
+
+	if (!p || !vxi)
+		BUG();
+
+	if (vx_info_flags(vxi, VXF_INFO_PRIVATE, 0))
+		return -EACCES;
+
+	new_user = alloc_uid(vxi->vx_id, p->uid);
+	if (!new_user)
+		return -ENOMEM;
+
+	old_user = p->user;
+	if (new_user != old_user) {
+		atomic_inc(&new_user->processes);
+		atomic_dec(&old_user->processes);
+		p->user = new_user;
+	}
+	free_uid(old_user);
+	return 0;
+}
+#endif
+
+#if 0
+void vx_mask_cap_bset(struct vx_info *vxi, struct task_struct *p)
+{
+	// p->cap_effective &= vxi->vx_cap_bset;
+	p->cap_effective =
+		cap_intersect(p->cap_effective, vxi->cap_bset);
+	// p->cap_inheritable &= vxi->vx_cap_bset;
+	p->cap_inheritable =
+		cap_intersect(p->cap_inheritable, vxi->cap_bset);
+	// p->cap_permitted &= vxi->vx_cap_bset;
+	p->cap_permitted =
+		cap_intersect(p->cap_permitted, vxi->cap_bset);
+}
+#endif
+
+
+#include <linux/file.h>
+#include <linux/fdtable.h>
+
+static int vx_openfd_task(struct task_struct *tsk)
+{
+	struct files_struct *files = tsk->files;
+	struct fdtable *fdt;
+	const unsigned long *bptr;
+	int count, total;
+
+	/* no rcu_read_lock() because of spin_lock() */
+	spin_lock(&files->file_lock);
+	fdt = files_fdtable(files);
+	bptr = fdt->open_fds;
+	count = fdt->max_fds / (sizeof(unsigned long) * 8);
+	for (total = 0; count > 0; count--) {
+		if (*bptr)
+			total += hweight_long(*bptr);
+		bptr++;
+	}
+	spin_unlock(&files->file_lock);
+	return total;
+}
+
+
+/*	for *space compatibility */
+
+asmlinkage long sys_unshare(unsigned long);
+
+/*
+ *	migrate task to new context
+ *	gets vxi, puts old_vxi on change
+ *	optionally unshares namespaces (hack)
+ */
+
+int vx_migrate_task(struct task_struct *p, struct vx_info *vxi, int unshare)
+{
+	struct vx_info *old_vxi;
+	int ret = 0;
+
+	if (!p || !vxi)
+		BUG();
+
+	vxdprintk(VXD_CBIT(xid, 5),
+		"vx_migrate_task(%p,%p[#%d.%d])", p, vxi,
+		vxi->vx_id, atomic_read(&vxi->vx_usecnt));
+
+	if (vx_info_flags(vxi, VXF_INFO_PRIVATE, 0) &&
+		!vx_info_flags(vxi, VXF_STATE_SETUP, 0))
+		return -EACCES;
+
+	if (vx_info_state(vxi, VXS_SHUTDOWN))
+		return -EFAULT;
+
+	old_vxi = task_get_vx_info(p);
+	if (old_vxi == vxi)
+		goto out;
+
+//	if (!(ret = vx_migrate_user(p, vxi))) {
+	{
+		int openfd;
+
+		task_lock(p);
+		openfd = vx_openfd_task(p);
+
+		if (old_vxi) {
+			atomic_dec(&old_vxi->cvirt.nr_threads);
+			atomic_dec(&old_vxi->cvirt.nr_running);
+			__rlim_dec(&old_vxi->limit, RLIMIT_NPROC);
+			/* FIXME: what about the struct files here? */
+			__rlim_sub(&old_vxi->limit, VLIMIT_OPENFD, openfd);
+			/* account for the executable */
+			__rlim_dec(&old_vxi->limit, VLIMIT_DENTRY);
+		}
+		atomic_inc(&vxi->cvirt.nr_threads);
+		atomic_inc(&vxi->cvirt.nr_running);
+		__rlim_inc(&vxi->limit, RLIMIT_NPROC);
+		/* FIXME: what about the struct files here? */
+		__rlim_add(&vxi->limit, VLIMIT_OPENFD, openfd);
+		/* account for the executable */
+		__rlim_inc(&vxi->limit, VLIMIT_DENTRY);
+
+		if (old_vxi) {
+			release_vx_info(old_vxi, p);
+			clr_vx_info(&p->vx_info);
+		}
+		claim_vx_info(vxi, p);
+		set_vx_info(&p->vx_info, vxi);
+		p->xid = vxi->vx_id;
+
+		vxdprintk(VXD_CBIT(xid, 5),
+			"moved task %p into vxi:%p[#%d]",
+			p, vxi, vxi->vx_id);
+
+		// vx_mask_cap_bset(vxi, p);
+		task_unlock(p);
+
+		/* hack for *spaces to provide compatibility */
+		if (unshare) {
+			struct nsproxy *old_nsp, *new_nsp;
+
+			ret = unshare_nsproxy_namespaces(
+				CLONE_NEWUTS | CLONE_NEWIPC | CLONE_NEWUSER,
+				&new_nsp, NULL, NULL);
+			if (ret)
+				goto out;
+
+			old_nsp = xchg(&p->nsproxy, new_nsp);
+			vx_set_space(vxi,
+				CLONE_NEWUTS | CLONE_NEWIPC | CLONE_NEWUSER, 0);
+			put_nsproxy(old_nsp);
+		}
+	}
+out:
+	put_vx_info(old_vxi);
+	return ret;
+}
+
+int vx_set_reaper(struct vx_info *vxi, struct task_struct *p)
+{
+	struct task_struct *old_reaper;
+	struct vx_info *reaper_vxi;
+
+	if (!vxi)
+		return -EINVAL;
+
+	vxdprintk(VXD_CBIT(xid, 6),
+		"vx_set_reaper(%p[#%d],%p[#%d,%d])",
+		vxi, vxi->vx_id, p, p->xid, p->pid);
+
+	old_reaper = vxi->vx_reaper;
+	if (old_reaper == p)
+		return 0;
+
+	reaper_vxi = task_get_vx_info(p);
+	if (reaper_vxi && reaper_vxi != vxi) {
+		vxwprintk(1,
+			"Unsuitable reaper [" VS_Q("%s") ",%u:#%u] "
+			"for [xid #%u]",
+			p->comm, p->pid, p->xid, vx_current_xid());
+		goto out;
+	}
+
+	/* set new child reaper */
+	get_task_struct(p);
+	vxi->vx_reaper = p;
+	put_task_struct(old_reaper);
+out:
+	put_vx_info(reaper_vxi);
+	return 0;
+}
+
+int vx_set_init(struct vx_info *vxi, struct task_struct *p)
+{
+	if (!vxi)
+		return -EINVAL;
+
+	vxdprintk(VXD_CBIT(xid, 6),
+		"vx_set_init(%p[#%d],%p[#%d,%d,%d])",
+		vxi, vxi->vx_id, p, p->xid, p->pid, p->tgid);
+
+	vxi->vx_flags &= ~VXF_STATE_INIT;
+	// vxi->vx_initpid = p->tgid;
+	vxi->vx_initpid = p->pid;
+	return 0;
+}
+
+void vx_exit_init(struct vx_info *vxi, struct task_struct *p, int code)
+{
+	vxdprintk(VXD_CBIT(xid, 6),
+		"vx_exit_init(%p[#%d],%p[#%d,%d,%d])",
+		vxi, vxi->vx_id, p, p->xid, p->pid, p->tgid);
+
+	vxi->exit_code = code;
+	vxi->vx_initpid = 0;
+}
+
+
+void vx_set_persistent(struct vx_info *vxi)
+{
+	vxdprintk(VXD_CBIT(xid, 6),
+		"vx_set_persistent(%p[#%d])", vxi, vxi->vx_id);
+
+	get_vx_info(vxi);
+	claim_vx_info(vxi, NULL);
+}
+
+void vx_clear_persistent(struct vx_info *vxi)
+{
+	vxdprintk(VXD_CBIT(xid, 6),
+		"vx_clear_persistent(%p[#%d])", vxi, vxi->vx_id);
+
+	release_vx_info(vxi, NULL);
+	put_vx_info(vxi);
+}
+
+void vx_update_persistent(struct vx_info *vxi)
+{
+	if (vx_info_flags(vxi, VXF_PERSISTENT, 0))
+		vx_set_persistent(vxi);
+	else
+		vx_clear_persistent(vxi);
+}
+
+
+/*	task must be current or locked		*/
+
+void	exit_vx_info(struct task_struct *p, int code)
+{
+	struct vx_info *vxi = p->vx_info;
+
+	if (vxi) {
+		atomic_dec(&vxi->cvirt.nr_threads);
+		vx_nproc_dec(p);
+
+		vxi->exit_code = code;
+		release_vx_info(vxi, p);
+	}
+}
+
+void	exit_vx_info_early(struct task_struct *p, int code)
+{
+	struct vx_info *vxi = p->vx_info;
+
+	if (vxi) {
+		if (vxi->vx_initpid == p->pid)
+			vx_exit_init(vxi, p, code);
+		if (vxi->vx_reaper == p)
+			vx_set_reaper(vxi, init_pid_ns.child_reaper);
+	}
+}
+
+
+/* vserver syscall commands below here */
+
+/* taks xid and vx_info functions */
+
+#include <asm/uaccess.h>
+
+
+int vc_task_xid(uint32_t id)
+{
+	vxid_t xid;
+
+	if (id) {
+		struct task_struct *tsk;
+
+		rcu_read_lock();
+		tsk = find_task_by_real_pid(id);
+		xid = (tsk) ? tsk->xid : -ESRCH;
+		rcu_read_unlock();
+	} else
+		xid = vx_current_xid();
+	return xid;
+}
+
+
+int vc_vx_info(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_vx_info_v0 vc_data;
+
+	vc_data.xid = vxi->vx_id;
+	vc_data.initpid = vxi->vx_initpid;
+
+	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+		return -EFAULT;
+	return 0;
+}
+
+
+int vc_ctx_stat(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_ctx_stat_v0 vc_data;
+
+	vc_data.usecnt = atomic_read(&vxi->vx_usecnt);
+	vc_data.tasks = atomic_read(&vxi->vx_tasks);
+
+	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+		return -EFAULT;
+	return 0;
+}
+
+
+/* context functions */
+
+int vc_ctx_create(uint32_t xid, void __user *data)
+{
+	struct vcmd_ctx_create vc_data = { .flagword = VXF_INIT_SET };
+	struct vx_info *new_vxi;
+	int ret;
+
+	if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	if ((xid > MAX_S_CONTEXT) || (xid < 2))
+		return -EINVAL;
+
+	new_vxi = __create_vx_info(xid);
+	if (IS_ERR(new_vxi))
+		return PTR_ERR(new_vxi);
+
+	/* initial flags */
+	new_vxi->vx_flags = vc_data.flagword;
+
+	ret = -ENOEXEC;
+	if (vs_state_change(new_vxi, VSC_STARTUP))
+		goto out;
+
+	ret = vx_migrate_task(current, new_vxi, (!data));
+	if (ret)
+		goto out;
+
+	/* return context id on success */
+	ret = new_vxi->vx_id;
+
+	/* get a reference for persistent contexts */
+	if ((vc_data.flagword & VXF_PERSISTENT))
+		vx_set_persistent(new_vxi);
+out:
+	release_vx_info(new_vxi, NULL);
+	put_vx_info(new_vxi);
+	return ret;
+}
+
+
+int vc_ctx_migrate(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_ctx_migrate vc_data = { .flagword = 0 };
+	int ret;
+
+	if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	ret = vx_migrate_task(current, vxi, 0);
+	if (ret)
+		return ret;
+	if (vc_data.flagword & VXM_SET_INIT)
+		ret = vx_set_init(vxi, current);
+	if (ret)
+		return ret;
+	if (vc_data.flagword & VXM_SET_REAPER)
+		ret = vx_set_reaper(vxi, current);
+	return ret;
+}
+
+
+int vc_get_cflags(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_ctx_flags_v0 vc_data;
+
+	vc_data.flagword = vxi->vx_flags;
+
+	/* special STATE flag handling */
+	vc_data.mask = vs_mask_flags(~0ULL, vxi->vx_flags, VXF_ONE_TIME);
+
+	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+		return -EFAULT;
+	return 0;
+}
+
+int vc_set_cflags(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_ctx_flags_v0 vc_data;
+	uint64_t mask, trigger;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	/* special STATE flag handling */
+	mask = vs_mask_mask(vc_data.mask, vxi->vx_flags, VXF_ONE_TIME);
+	trigger = (mask & vxi->vx_flags) ^ (mask & vc_data.flagword);
+
+	if (vxi == current_vx_info()) {
+		/* if (trigger & VXF_STATE_SETUP)
+			vx_mask_cap_bset(vxi, current); */
+		if (trigger & VXF_STATE_INIT) {
+			int ret;
+
+			ret = vx_set_init(vxi, current);
+			if (ret)
+				return ret;
+			ret = vx_set_reaper(vxi, current);
+			if (ret)
+				return ret;
+		}
+	}
+
+	vxi->vx_flags = vs_mask_flags(vxi->vx_flags,
+		vc_data.flagword, mask);
+	if (trigger & VXF_PERSISTENT)
+		vx_update_persistent(vxi);
+
+	return 0;
+}
+
+
+static inline uint64_t caps_from_cap_t(kernel_cap_t c)
+{
+	uint64_t v = c.cap[0] | ((uint64_t)c.cap[1] << 32);
+
+	// printk("caps_from_cap_t(%08x:%08x) = %016llx\n", c.cap[1], c.cap[0], v);
+	return v;
+}
+
+static inline kernel_cap_t cap_t_from_caps(uint64_t v)
+{
+	kernel_cap_t c = __cap_empty_set;
+
+	c.cap[0] = v & 0xFFFFFFFF;
+	c.cap[1] = (v >> 32) & 0xFFFFFFFF;
+
+	// printk("cap_t_from_caps(%016llx) = %08x:%08x\n", v, c.cap[1], c.cap[0]);
+	return c;
+}
+
+
+static int do_get_caps(struct vx_info *vxi, uint64_t *bcaps, uint64_t *ccaps)
+{
+	if (bcaps)
+		*bcaps = caps_from_cap_t(vxi->vx_bcaps);
+	if (ccaps)
+		*ccaps = vxi->vx_ccaps;
+
+	return 0;
+}
+
+int vc_get_ccaps(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_ctx_caps_v1 vc_data;
+	int ret;
+
+	ret = do_get_caps(vxi, NULL, &vc_data.ccaps);
+	if (ret)
+		return ret;
+	vc_data.cmask = ~0ULL;
+
+	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+		return -EFAULT;
+	return 0;
+}
+
+static int do_set_caps(struct vx_info *vxi,
+	uint64_t bcaps, uint64_t bmask, uint64_t ccaps, uint64_t cmask)
+{
+	uint64_t bcold = caps_from_cap_t(vxi->vx_bcaps);
+
+#if 0
+	printk("do_set_caps(%16llx, %16llx, %16llx, %16llx)\n",
+		bcaps, bmask, ccaps, cmask);
+#endif
+	vxi->vx_bcaps = cap_t_from_caps(
+		vs_mask_flags(bcold, bcaps, bmask));
+	vxi->vx_ccaps = vs_mask_flags(vxi->vx_ccaps, ccaps, cmask);
+
+	return 0;
+}
+
+int vc_set_ccaps(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_ctx_caps_v1 vc_data;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	return do_set_caps(vxi, 0, 0, vc_data.ccaps, vc_data.cmask);
+}
+
+int vc_get_bcaps(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_bcaps vc_data;
+	int ret;
+
+	ret = do_get_caps(vxi, &vc_data.bcaps, NULL);
+	if (ret)
+		return ret;
+	vc_data.bmask = ~0ULL;
+
+	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+		return -EFAULT;
+	return 0;
+}
+
+int vc_set_bcaps(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_bcaps vc_data;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	return do_set_caps(vxi, vc_data.bcaps, vc_data.bmask, 0, 0);
+}
+
+
+int vc_get_umask(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_umask vc_data;
+
+	vc_data.umask = vxi->vx_umask;
+	vc_data.mask = ~0ULL;
+
+	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+		return -EFAULT;
+	return 0;
+}
+
+int vc_set_umask(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_umask vc_data;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	vxi->vx_umask = vs_mask_flags(vxi->vx_umask,
+		vc_data.umask, vc_data.mask);
+	return 0;
+}
+
+
+int vc_get_wmask(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_wmask vc_data;
+
+	vc_data.wmask = vxi->vx_wmask;
+	vc_data.mask = ~0ULL;
+
+	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+		return -EFAULT;
+	return 0;
+}
+
+int vc_set_wmask(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_wmask vc_data;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	vxi->vx_wmask = vs_mask_flags(vxi->vx_wmask,
+		vc_data.wmask, vc_data.mask);
+	return 0;
+}
+
+
+int vc_get_badness(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_badness_v0 vc_data;
+
+	vc_data.bias = vxi->vx_badness_bias;
+
+	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+		return -EFAULT;
+	return 0;
+}
+
+int vc_set_badness(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_badness_v0 vc_data;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	vxi->vx_badness_bias = vc_data.bias;
+	return 0;
+}
+
+#include <linux/module.h>
+
+EXPORT_SYMBOL_GPL(free_vx_info);
+
diff -ruNp linux-3.13.11/kernel/vserver/cvirt.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/cvirt.c
--- linux-3.13.11/kernel/vserver/cvirt.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/cvirt.c	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,313 @@
+/*
+ *  linux/kernel/vserver/cvirt.c
+ *
+ *  Virtual Server: Context Virtualization
+ *
+ *  Copyright (C) 2004-2007  Herbert Pötzl
+ *
+ *  V0.01  broken out from limit.c
+ *  V0.02  added utsname stuff
+ *  V0.03  changed vcmds to vxi arg
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/utsname.h>
+#include <linux/vs_cvirt.h>
+#include <linux/vserver/switch.h>
+#include <linux/vserver/cvirt_cmd.h>
+
+#include <asm/uaccess.h>
+
+
+void vx_vsi_boottime(struct timespec *boottime)
+{
+	struct vx_info *vxi = current_vx_info();
+
+	set_normalized_timespec(boottime,
+		boottime->tv_sec + vxi->cvirt.bias_uptime.tv_sec,
+		boottime->tv_nsec + vxi->cvirt.bias_uptime.tv_nsec);
+	return;
+}
+
+void vx_vsi_uptime(struct timespec *uptime, struct timespec *idle)
+{
+	struct vx_info *vxi = current_vx_info();
+
+	set_normalized_timespec(uptime,
+		uptime->tv_sec - vxi->cvirt.bias_uptime.tv_sec,
+		uptime->tv_nsec - vxi->cvirt.bias_uptime.tv_nsec);
+	if (!idle)
+		return;
+	set_normalized_timespec(idle,
+		idle->tv_sec - vxi->cvirt.bias_idle.tv_sec,
+		idle->tv_nsec - vxi->cvirt.bias_idle.tv_nsec);
+	return;
+}
+
+uint64_t vx_idle_jiffies(void)
+{
+	return init_task.utime + init_task.stime;
+}
+
+
+
+static inline uint32_t __update_loadavg(uint32_t load,
+	int wsize, int delta, int n)
+{
+	unsigned long long calc, prev;
+
+	/* just set it to n */
+	if (unlikely(delta >= wsize))
+		return (n << FSHIFT);
+
+	calc = delta * n;
+	calc <<= FSHIFT;
+	prev = (wsize - delta);
+	prev *= load;
+	calc += prev;
+	do_div(calc, wsize);
+	return calc;
+}
+
+
+void vx_update_load(struct vx_info *vxi)
+{
+	uint32_t now, last, delta;
+	unsigned int nr_running, nr_uninterruptible;
+	unsigned int total;
+	unsigned long flags;
+
+	spin_lock_irqsave(&vxi->cvirt.load_lock, flags);
+
+	now = jiffies;
+	last = vxi->cvirt.load_last;
+	delta = now - last;
+
+	if (delta < 5*HZ)
+		goto out;
+
+	nr_running = atomic_read(&vxi->cvirt.nr_running);
+	nr_uninterruptible = atomic_read(&vxi->cvirt.nr_uninterruptible);
+	total = nr_running + nr_uninterruptible;
+
+	vxi->cvirt.load[0] = __update_loadavg(vxi->cvirt.load[0],
+		60*HZ, delta, total);
+	vxi->cvirt.load[1] = __update_loadavg(vxi->cvirt.load[1],
+		5*60*HZ, delta, total);
+	vxi->cvirt.load[2] = __update_loadavg(vxi->cvirt.load[2],
+		15*60*HZ, delta, total);
+
+	vxi->cvirt.load_last = now;
+out:
+	atomic_inc(&vxi->cvirt.load_updates);
+	spin_unlock_irqrestore(&vxi->cvirt.load_lock, flags);
+}
+
+
+/*
+ * Commands to do_syslog:
+ *
+ *      0 -- Close the log.  Currently a NOP.
+ *      1 -- Open the log. Currently a NOP.
+ *      2 -- Read from the log.
+ *      3 -- Read all messages remaining in the ring buffer.
+ *      4 -- Read and clear all messages remaining in the ring buffer
+ *      5 -- Clear ring buffer.
+ *      6 -- Disable printk's to console
+ *      7 -- Enable printk's to console
+ *      8 -- Set level of messages printed to console
+ *      9 -- Return number of unread characters in the log buffer
+ *     10 -- Return size of the log buffer
+ */
+int vx_do_syslog(int type, char __user *buf, int len)
+{
+	int error = 0;
+	int do_clear = 0;
+	struct vx_info *vxi = current_vx_info();
+	struct _vx_syslog *log;
+
+	if (!vxi)
+		return -EINVAL;
+	log = &vxi->cvirt.syslog;
+
+	switch (type) {
+	case 0:		/* Close log */
+	case 1:		/* Open log */
+		break;
+	case 2:		/* Read from log */
+		error = wait_event_interruptible(log->log_wait,
+			(log->log_start - log->log_end));
+		if (error)
+			break;
+		spin_lock_irq(&log->logbuf_lock);
+		spin_unlock_irq(&log->logbuf_lock);
+		break;
+	case 4:		/* Read/clear last kernel messages */
+		do_clear = 1;
+		/* fall through */
+	case 3:		/* Read last kernel messages */
+		return 0;
+
+	case 5:		/* Clear ring buffer */
+		return 0;
+
+	case 6:		/* Disable logging to console */
+	case 7:		/* Enable logging to console */
+	case 8:		/* Set level of messages printed to console */
+		break;
+
+	case 9:		/* Number of chars in the log buffer */
+		return 0;
+	case 10:	/* Size of the log buffer */
+		return 0;
+	default:
+		error = -EINVAL;
+		break;
+	}
+	return error;
+}
+
+
+/* virtual host info names */
+
+static char *vx_vhi_name(struct vx_info *vxi, int id)
+{
+	struct nsproxy *nsproxy;
+	struct uts_namespace *uts;
+
+	if (id == VHIN_CONTEXT)
+		return vxi->vx_name;
+
+	nsproxy = vxi->space[0].vx_nsproxy;
+	if (!nsproxy)
+		return NULL;
+
+	uts = nsproxy->uts_ns;
+	if (!uts)
+		return NULL;
+
+	switch (id) {
+	case VHIN_SYSNAME:
+		return uts->name.sysname;
+	case VHIN_NODENAME:
+		return uts->name.nodename;
+	case VHIN_RELEASE:
+		return uts->name.release;
+	case VHIN_VERSION:
+		return uts->name.version;
+	case VHIN_MACHINE:
+		return uts->name.machine;
+	case VHIN_DOMAINNAME:
+		return uts->name.domainname;
+	default:
+		return NULL;
+	}
+	return NULL;
+}
+
+int vc_set_vhi_name(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_vhi_name_v0 vc_data;
+	char *name;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	name = vx_vhi_name(vxi, vc_data.field);
+	if (!name)
+		return -EINVAL;
+
+	memcpy(name, vc_data.name, 65);
+	return 0;
+}
+
+int vc_get_vhi_name(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_vhi_name_v0 vc_data;
+	char *name;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	name = vx_vhi_name(vxi, vc_data.field);
+	if (!name)
+		return -EINVAL;
+
+	memcpy(vc_data.name, name, 65);
+	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+		return -EFAULT;
+	return 0;
+}
+
+
+int vc_virt_stat(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_virt_stat_v0 vc_data;
+	struct _vx_cvirt *cvirt = &vxi->cvirt;
+	struct timespec uptime;
+
+	do_posix_clock_monotonic_gettime(&uptime);
+	set_normalized_timespec(&uptime,
+		uptime.tv_sec - cvirt->bias_uptime.tv_sec,
+		uptime.tv_nsec - cvirt->bias_uptime.tv_nsec);
+
+	vc_data.offset = timespec_to_ns(&cvirt->bias_ts);
+	vc_data.uptime = timespec_to_ns(&uptime);
+	vc_data.nr_threads = atomic_read(&cvirt->nr_threads);
+	vc_data.nr_running = atomic_read(&cvirt->nr_running);
+	vc_data.nr_uninterruptible = atomic_read(&cvirt->nr_uninterruptible);
+	vc_data.nr_onhold = atomic_read(&cvirt->nr_onhold);
+	vc_data.nr_forks = atomic_read(&cvirt->total_forks);
+	vc_data.load[0] = cvirt->load[0];
+	vc_data.load[1] = cvirt->load[1];
+	vc_data.load[2] = cvirt->load[2];
+
+	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+		return -EFAULT;
+	return 0;
+}
+
+
+#ifdef CONFIG_VSERVER_VTIME
+
+/* virtualized time base */
+
+void vx_adjust_timespec(struct timespec *ts)
+{
+	struct vx_info *vxi;
+
+	if (!vx_flags(VXF_VIRT_TIME, 0))
+		return;
+
+	vxi = current_vx_info();
+	ts->tv_sec += vxi->cvirt.bias_ts.tv_sec;
+	ts->tv_nsec += vxi->cvirt.bias_ts.tv_nsec;
+
+	if (ts->tv_nsec >= NSEC_PER_SEC) {
+		ts->tv_sec++;
+		ts->tv_nsec -= NSEC_PER_SEC;
+	} else if (ts->tv_nsec < 0) {
+		ts->tv_sec--;
+		ts->tv_nsec += NSEC_PER_SEC;
+	}
+}
+
+int vx_settimeofday(const struct timespec *ts)
+{
+	struct timespec ats, delta;
+	struct vx_info *vxi;
+
+	if (!vx_flags(VXF_VIRT_TIME, 0))
+		return do_settimeofday(ts);
+
+	getnstimeofday(&ats);
+	delta = timespec_sub(*ts, ats);
+
+	vxi = current_vx_info();
+	vxi->cvirt.bias_ts = timespec_add(vxi->cvirt.bias_ts, delta);
+	return 0;
+}
+
+#endif
+
diff -ruNp linux-3.13.11/kernel/vserver/cvirt_init.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/cvirt_init.h
--- linux-3.13.11/kernel/vserver/cvirt_init.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/cvirt_init.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,70 @@
+
+
+extern uint64_t vx_idle_jiffies(void);
+
+static inline void vx_info_init_cvirt(struct _vx_cvirt *cvirt)
+{
+	uint64_t idle_jiffies = vx_idle_jiffies();
+	uint64_t nsuptime;
+
+	do_posix_clock_monotonic_gettime(&cvirt->bias_uptime);
+	nsuptime = (unsigned long long)cvirt->bias_uptime.tv_sec
+		* NSEC_PER_SEC + cvirt->bias_uptime.tv_nsec;
+	cvirt->bias_clock = nsec_to_clock_t(nsuptime);
+	cvirt->bias_ts.tv_sec = 0;
+	cvirt->bias_ts.tv_nsec = 0;
+
+	jiffies_to_timespec(idle_jiffies, &cvirt->bias_idle);
+	atomic_set(&cvirt->nr_threads, 0);
+	atomic_set(&cvirt->nr_running, 0);
+	atomic_set(&cvirt->nr_uninterruptible, 0);
+	atomic_set(&cvirt->nr_onhold, 0);
+
+	spin_lock_init(&cvirt->load_lock);
+	cvirt->load_last = jiffies;
+	atomic_set(&cvirt->load_updates, 0);
+	cvirt->load[0] = 0;
+	cvirt->load[1] = 0;
+	cvirt->load[2] = 0;
+	atomic_set(&cvirt->total_forks, 0);
+
+	spin_lock_init(&cvirt->syslog.logbuf_lock);
+	init_waitqueue_head(&cvirt->syslog.log_wait);
+	cvirt->syslog.log_start = 0;
+	cvirt->syslog.log_end = 0;
+	cvirt->syslog.con_start = 0;
+	cvirt->syslog.logged_chars = 0;
+}
+
+static inline
+void vx_info_init_cvirt_pc(struct _vx_cvirt_pc *cvirt_pc, int cpu)
+{
+	// cvirt_pc->cpustat = { 0 };
+}
+
+static inline void vx_info_exit_cvirt(struct _vx_cvirt *cvirt)
+{
+#ifdef	CONFIG_VSERVER_WARN
+	int value;
+#endif
+	vxwprintk_xid((value = atomic_read(&cvirt->nr_threads)),
+		"!!! cvirt: %p[nr_threads] = %d on exit.",
+		cvirt, value);
+	vxwprintk_xid((value = atomic_read(&cvirt->nr_running)),
+		"!!! cvirt: %p[nr_running] = %d on exit.",
+		cvirt, value);
+	vxwprintk_xid((value = atomic_read(&cvirt->nr_uninterruptible)),
+		"!!! cvirt: %p[nr_uninterruptible] = %d on exit.",
+		cvirt, value);
+	vxwprintk_xid((value = atomic_read(&cvirt->nr_onhold)),
+		"!!! cvirt: %p[nr_onhold] = %d on exit.",
+		cvirt, value);
+	return;
+}
+
+static inline
+void vx_info_exit_cvirt_pc(struct _vx_cvirt_pc *cvirt_pc, int cpu)
+{
+	return;
+}
+
diff -ruNp linux-3.13.11/kernel/vserver/cvirt_proc.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/cvirt_proc.h
--- linux-3.13.11/kernel/vserver/cvirt_proc.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/cvirt_proc.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,123 @@
+#ifndef _VX_CVIRT_PROC_H
+#define _VX_CVIRT_PROC_H
+
+#include <linux/nsproxy.h>
+#include <linux/mnt_namespace.h>
+#include <linux/ipc_namespace.h>
+#include <linux/utsname.h>
+#include <linux/ipc.h>
+
+extern int vx_info_mnt_namespace(struct mnt_namespace *, char *);
+
+static inline
+int vx_info_proc_nsproxy(struct nsproxy *nsproxy, char *buffer)
+{
+	struct mnt_namespace *ns;
+	struct uts_namespace *uts;
+	struct ipc_namespace *ipc;
+	int length = 0;
+
+	if (!nsproxy)
+		goto out;
+
+	length += sprintf(buffer + length,
+		"NSProxy:\t%p [%p,%p,%p]\n",
+		nsproxy, nsproxy->mnt_ns,
+		nsproxy->uts_ns, nsproxy->ipc_ns);
+
+	ns = nsproxy->mnt_ns;
+	if (!ns)
+		goto skip_ns;
+
+	length += vx_info_mnt_namespace(ns, buffer + length);
+
+skip_ns:
+
+	uts = nsproxy->uts_ns;
+	if (!uts)
+		goto skip_uts;
+
+	length += sprintf(buffer + length,
+		"SysName:\t%.*s\n"
+		"NodeName:\t%.*s\n"
+		"Release:\t%.*s\n"
+		"Version:\t%.*s\n"
+		"Machine:\t%.*s\n"
+		"DomainName:\t%.*s\n",
+		__NEW_UTS_LEN, uts->name.sysname,
+		__NEW_UTS_LEN, uts->name.nodename,
+		__NEW_UTS_LEN, uts->name.release,
+		__NEW_UTS_LEN, uts->name.version,
+		__NEW_UTS_LEN, uts->name.machine,
+		__NEW_UTS_LEN, uts->name.domainname);
+skip_uts:
+
+	ipc = nsproxy->ipc_ns;
+	if (!ipc)
+		goto skip_ipc;
+
+	length += sprintf(buffer + length,
+		"SEMS:\t\t%d %d %d %d  %d\n"
+		"MSG:\t\t%d %d %d\n"
+		"SHM:\t\t%lu %lu  %d %ld\n",
+		ipc->sem_ctls[0], ipc->sem_ctls[1],
+		ipc->sem_ctls[2], ipc->sem_ctls[3],
+		ipc->used_sems,
+		ipc->msg_ctlmax, ipc->msg_ctlmnb, ipc->msg_ctlmni,
+		(unsigned long)ipc->shm_ctlmax,
+		(unsigned long)ipc->shm_ctlall,
+		ipc->shm_ctlmni, ipc->shm_tot);
+skip_ipc:
+out:
+	return length;
+}
+
+
+#include <linux/sched.h>
+
+#define LOAD_INT(x) ((x) >> FSHIFT)
+#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1 - 1)) * 100)
+
+static inline
+int vx_info_proc_cvirt(struct _vx_cvirt *cvirt, char *buffer)
+{
+	int length = 0;
+	int a, b, c;
+
+	length += sprintf(buffer + length,
+		"BiasUptime:\t%lu.%02lu\n",
+		(unsigned long)cvirt->bias_uptime.tv_sec,
+		(cvirt->bias_uptime.tv_nsec / (NSEC_PER_SEC / 100)));
+
+	a = cvirt->load[0] + (FIXED_1 / 200);
+	b = cvirt->load[1] + (FIXED_1 / 200);
+	c = cvirt->load[2] + (FIXED_1 / 200);
+	length += sprintf(buffer + length,
+		"nr_threads:\t%d\n"
+		"nr_running:\t%d\n"
+		"nr_unintr:\t%d\n"
+		"nr_onhold:\t%d\n"
+		"load_updates:\t%d\n"
+		"loadavg:\t%d.%02d %d.%02d %d.%02d\n"
+		"total_forks:\t%d\n",
+		atomic_read(&cvirt->nr_threads),
+		atomic_read(&cvirt->nr_running),
+		atomic_read(&cvirt->nr_uninterruptible),
+		atomic_read(&cvirt->nr_onhold),
+		atomic_read(&cvirt->load_updates),
+		LOAD_INT(a), LOAD_FRAC(a),
+		LOAD_INT(b), LOAD_FRAC(b),
+		LOAD_INT(c), LOAD_FRAC(c),
+		atomic_read(&cvirt->total_forks));
+	return length;
+}
+
+static inline
+int vx_info_proc_cvirt_pc(struct _vx_cvirt_pc *cvirt_pc,
+	char *buffer, int cpu)
+{
+	int length = 0;
+	return length;
+}
+
+#endif	/* _VX_CVIRT_PROC_H */
diff -ruNp linux-3.13.11/kernel/vserver/debug.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/debug.c
--- linux-3.13.11/kernel/vserver/debug.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/debug.c	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,32 @@
+/*
+ *  kernel/vserver/debug.c
+ *
+ *  Copyright (C) 2005-2007 Herbert Pötzl
+ *
+ *  V0.01  vx_info dump support
+ *
+ */
+
+#include <linux/module.h>
+
+#include <linux/vserver/context.h>
+
+
+void	dump_vx_info(struct vx_info *vxi, int level)
+{
+	printk("vx_info %p[#%d, %d.%d, %4x]\n", vxi, vxi->vx_id,
+		atomic_read(&vxi->vx_usecnt),
+		atomic_read(&vxi->vx_tasks),
+		vxi->vx_state);
+	if (level > 0) {
+		__dump_vx_limit(&vxi->limit);
+		__dump_vx_sched(&vxi->sched);
+		__dump_vx_cvirt(&vxi->cvirt);
+		__dump_vx_cacct(&vxi->cacct);
+	}
+	printk("---\n");
+}
+
+
+EXPORT_SYMBOL_GPL(dump_vx_info);
+
diff -ruNp linux-3.13.11/kernel/vserver/device.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/device.c
--- linux-3.13.11/kernel/vserver/device.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/device.c	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,443 @@
+/*
+ *  linux/kernel/vserver/device.c
+ *
+ *  Linux-VServer: Device Support
+ *
+ *  Copyright (C) 2006  Herbert Pötzl
+ *  Copyright (C) 2007  Daniel Hokka Zakrisson
+ *
+ *  V0.01  device mapping basics
+ *  V0.02  added defaults
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/rcupdate.h>
+#include <linux/fs.h>
+#include <linux/namei.h>
+#include <linux/hash.h>
+
+#include <asm/errno.h>
+#include <asm/uaccess.h>
+#include <linux/vserver/base.h>
+#include <linux/vserver/debug.h>
+#include <linux/vserver/context.h>
+#include <linux/vserver/device.h>
+#include <linux/vserver/device_cmd.h>
+
+
+#define DMAP_HASH_BITS	4
+
+
+struct vs_mapping {
+	union {
+		struct hlist_node hlist;
+		struct list_head list;
+	} u;
+#define dm_hlist	u.hlist
+#define dm_list		u.list
+	vxid_t xid;
+	dev_t device;
+	struct vx_dmap_target target;
+};
+
+
+static struct hlist_head dmap_main_hash[1 << DMAP_HASH_BITS];
+
+static DEFINE_SPINLOCK(dmap_main_hash_lock);
+
+static struct vx_dmap_target dmap_defaults[2] = {
+	{ .flags = DATTR_OPEN },
+	{ .flags = DATTR_OPEN },
+};
+
+
+struct kmem_cache *dmap_cachep __read_mostly;
+
+int __init dmap_cache_init(void)
+{
+	dmap_cachep = kmem_cache_create("dmap_cache",
+		sizeof(struct vs_mapping), 0,
+		SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+	return 0;
+}
+
+__initcall(dmap_cache_init);
+
+
+static inline unsigned int __hashval(dev_t dev, int bits)
+{
+	return hash_long((unsigned long)dev, bits);
+}
+
+
+/*	__hash_mapping()
+ *	add the mapping to the hash table
+ */
+static inline void __hash_mapping(struct vx_info *vxi, struct vs_mapping *vdm)
+{
+	spinlock_t *hash_lock = &dmap_main_hash_lock;
+	struct hlist_head *head, *hash = dmap_main_hash;
+	int device = vdm->device;
+
+	spin_lock(hash_lock);
+	vxdprintk(VXD_CBIT(misc, 8), "__hash_mapping: %p[#%d] %08x:%08x",
+		vxi, vxi ? vxi->vx_id : 0, device, vdm->target.target);
+
+	head = &hash[__hashval(device, DMAP_HASH_BITS)];
+	hlist_add_head(&vdm->dm_hlist, head);
+	spin_unlock(hash_lock);
+}
+
+
+static inline int __mode_to_default(umode_t mode)
+{
+	switch (mode) {
+	case S_IFBLK:
+		return 0;
+	case S_IFCHR:
+		return 1;
+	default:
+		BUG();
+	}
+}
+
+
+/*	__set_default()
+ *	set a default
+ */
+static inline void __set_default(struct vx_info *vxi, umode_t mode,
+	struct vx_dmap_target *vdmt)
+{
+	spinlock_t *hash_lock = &dmap_main_hash_lock;
+	spin_lock(hash_lock);
+
+	if (vxi)
+		vxi->dmap.targets[__mode_to_default(mode)] = *vdmt;
+	else
+		dmap_defaults[__mode_to_default(mode)] = *vdmt;
+
+
+	spin_unlock(hash_lock);
+
+	vxdprintk(VXD_CBIT(misc, 8), "__set_default: %p[#%u] %08x %04x",
+		  vxi, vxi ? vxi->vx_id : 0, vdmt->target, vdmt->flags);
+}
+
+
+/*	__remove_default()
+ *	remove a default
+ */
+static inline int __remove_default(struct vx_info *vxi, umode_t mode)
+{
+	spinlock_t *hash_lock = &dmap_main_hash_lock;
+	spin_lock(hash_lock);
+
+	if (vxi)
+		vxi->dmap.targets[__mode_to_default(mode)].flags = 0;
+	else	/* remove == reset */
+		dmap_defaults[__mode_to_default(mode)].flags = DATTR_OPEN | mode;
+
+	spin_unlock(hash_lock);
+	return 0;
+}
+
+
+/*	__find_mapping()
+ *	find a mapping in the hash table
+ *
+ *	caller must hold hash_lock
+ */
+static inline int __find_mapping(vxid_t xid, dev_t device, umode_t mode,
+	struct vs_mapping **local, struct vs_mapping **global)
+{
+	struct hlist_head *hash = dmap_main_hash;
+	struct hlist_head *head = &hash[__hashval(device, DMAP_HASH_BITS)];
+	struct hlist_node *pos;
+	struct vs_mapping *vdm;
+
+	*local = NULL;
+	if (global)
+		*global = NULL;
+
+	hlist_for_each(pos, head) {
+		vdm = hlist_entry(pos, struct vs_mapping, dm_hlist);
+
+		if ((vdm->device == device) &&
+			!((vdm->target.flags ^ mode) & S_IFMT)) {
+			if (vdm->xid == xid) {
+				*local = vdm;
+				return 1;
+			} else if (global && vdm->xid == 0)
+				*global = vdm;
+		}
+	}
+
+	if (global && *global)
+		return 0;
+	else
+		return -ENOENT;
+}
+
+
+/*	__lookup_mapping()
+ *	find a mapping and store the result in target and flags
+ */
+static inline int __lookup_mapping(struct vx_info *vxi,
+	dev_t device, dev_t *target, int *flags, umode_t mode)
+{
+	spinlock_t *hash_lock = &dmap_main_hash_lock;
+	struct vs_mapping *vdm, *global;
+	struct vx_dmap_target *vdmt;
+	int ret = 0;
+	vxid_t xid = vxi->vx_id;
+	int index;
+
+	spin_lock(hash_lock);
+	if (__find_mapping(xid, device, mode, &vdm, &global) > 0) {
+		ret = 1;
+		vdmt = &vdm->target;
+		goto found;
+	}
+
+	index = __mode_to_default(mode);
+	if (vxi && vxi->dmap.targets[index].flags) {
+		ret = 2;
+		vdmt = &vxi->dmap.targets[index];
+	} else if (global) {
+		ret = 3;
+		vdmt = &global->target;
+		goto found;
+	} else {
+		ret = 4;
+		vdmt = &dmap_defaults[index];
+	}
+
+found:
+	if (target && (vdmt->flags & DATTR_REMAP))
+		*target = vdmt->target;
+	else if (target)
+		*target = device;
+	if (flags)
+		*flags = vdmt->flags;
+
+	spin_unlock(hash_lock);
+
+	return ret;
+}
+
+
+/*	__remove_mapping()
+ *	remove a mapping from the hash table
+ */
+static inline int __remove_mapping(struct vx_info *vxi, dev_t device,
+	umode_t mode)
+{
+	spinlock_t *hash_lock = &dmap_main_hash_lock;
+	struct vs_mapping *vdm = NULL;
+	int ret = 0;
+
+	spin_lock(hash_lock);
+
+	ret = __find_mapping((vxi ? vxi->vx_id : 0), device, mode, &vdm,
+		NULL);
+	vxdprintk(VXD_CBIT(misc, 8), "__remove_mapping: %p[#%d] %08x %04x",
+		vxi, vxi ? vxi->vx_id : 0, device, mode);
+	if (ret < 0)
+		goto out;
+	hlist_del(&vdm->dm_hlist);
+
+out:
+	spin_unlock(hash_lock);
+	if (vdm)
+		kmem_cache_free(dmap_cachep, vdm);
+	return ret;
+}
+
+
+
+int vs_map_device(struct vx_info *vxi,
+	dev_t device, dev_t *target, umode_t mode)
+{
+	int ret, flags = DATTR_MASK;
+
+	if (!vxi) {
+		if (target)
+			*target = device;
+		goto out;
+	}
+	ret = __lookup_mapping(vxi, device, target, &flags, mode);
+	vxdprintk(VXD_CBIT(misc, 8), "vs_map_device: %08x target: %08x flags: %04x mode: %04x
mapped=%d",
+		device, target ? *target : 0, flags, mode, ret);
+out:
+	return (flags & DATTR_MASK);
+}
+
+
+
+static int do_set_mapping(struct vx_info *vxi,
+	dev_t device, dev_t target, int flags, umode_t mode)
+{
+	if (device) {
+		struct vs_mapping *new;
+
+		new = kmem_cache_alloc(dmap_cachep, GFP_KERNEL);
+		if (!new)
+			return -ENOMEM;
+
+		INIT_HLIST_NODE(&new->dm_hlist);
+		new->device = device;
+		new->target.target = target;
+		new->target.flags = flags | mode;
+		new->xid = (vxi ? vxi->vx_id : 0);
+
+		vxdprintk(VXD_CBIT(misc, 8), "do_set_mapping: %08x target: %08x flags: %04x", device,
target, flags);
+		__hash_mapping(vxi, new);
+	} else {
+		struct vx_dmap_target new = {
+			.target = target,
+			.flags = flags | mode,
+		};
+		__set_default(vxi, mode, &new);
+	}
+	return 0;
+}
+
+
+static int do_unset_mapping(struct vx_info *vxi,
+	dev_t device, dev_t target, int flags, umode_t mode)
+{
+	int ret = -EINVAL;
+
+	if (device) {
+		ret = __remove_mapping(vxi, device, mode);
+		if (ret < 0)
+			goto out;
+	} else {
+		ret = __remove_default(vxi, mode);
+		if (ret < 0)
+			goto out;
+	}
+
+out:
+	return ret;
+}
+
+
+static inline int __user_device(const char __user *name, dev_t *dev,
+	umode_t *mode)
+{
+	struct nameidata nd;
+	int ret;
+
+	if (!name) {
+		*dev = 0;
+		return 0;
+	}
+	ret = user_lpath(name, &nd.path);
+	if (ret)
+		return ret;
+	if (nd.path.dentry->d_inode) {
+		*dev = nd.path.dentry->d_inode->i_rdev;
+		*mode = nd.path.dentry->d_inode->i_mode;
+	}
+	path_put(&nd.path);
+	return 0;
+}
+
+static inline int __mapping_mode(dev_t device, dev_t target,
+	umode_t device_mode, umode_t target_mode, umode_t *mode)
+{
+	if (device)
+		*mode = device_mode & S_IFMT;
+	else if (target)
+		*mode = target_mode & S_IFMT;
+	else
+		return -EINVAL;
+
+	/* if both given, device and target mode have to match */
+	if (device && target &&
+		((device_mode ^ target_mode) & S_IFMT))
+		return -EINVAL;
+	return 0;
+}
+
+
+static inline int do_mapping(struct vx_info *vxi, const char __user *device_path,
+	const char __user *target_path, int flags, int set)
+{
+	dev_t device = ~0, target = ~0;
+	umode_t device_mode = 0, target_mode = 0, mode;
+	int ret;
+
+	ret = __user_device(device_path, &device, &device_mode);
+	if (ret)
+		return ret;
+	ret = __user_device(target_path, &target, &target_mode);
+	if (ret)
+		return ret;
+
+	ret = __mapping_mode(device, target,
+		device_mode, target_mode, &mode);
+	if (ret)
+		return ret;
+
+	if (set)
+		return do_set_mapping(vxi, device, target,
+			flags, mode);
+	else
+		return do_unset_mapping(vxi, device, target,
+			flags, mode);
+}
+
+
+int vc_set_mapping(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_set_mapping_v0 vc_data;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	return do_mapping(vxi, vc_data.device, vc_data.target,
+		vc_data.flags, 1);
+}
+
+int vc_unset_mapping(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_set_mapping_v0 vc_data;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	return do_mapping(vxi, vc_data.device, vc_data.target,
+		vc_data.flags, 0);
+}
+
+
+#ifdef	CONFIG_COMPAT
+
+int vc_set_mapping_x32(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_set_mapping_v0_x32 vc_data;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	return do_mapping(vxi, compat_ptr(vc_data.device_ptr),
+		compat_ptr(vc_data.target_ptr), vc_data.flags, 1);
+}
+
+int vc_unset_mapping_x32(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_set_mapping_v0_x32 vc_data;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	return do_mapping(vxi, compat_ptr(vc_data.device_ptr),
+		compat_ptr(vc_data.target_ptr), vc_data.flags, 0);
+}
+
+#endif	/* CONFIG_COMPAT */
+
+
diff -ruNp linux-3.13.11/kernel/vserver/dlimit.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/dlimit.c
--- linux-3.13.11/kernel/vserver/dlimit.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/dlimit.c	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,528 @@
+/*
+ *  linux/kernel/vserver/dlimit.c
+ *
+ *  Virtual Server: Context Disk Limits
+ *
+ *  Copyright (C) 2004-2009  Herbert Pötzl
+ *
+ *  V0.01  initial version
+ *  V0.02  compat32 splitup
+ *  V0.03  extended interface
+ *
+ */
+
+#include <linux/statfs.h>
+#include <linux/sched.h>
+#include <linux/namei.h>
+#include <linux/vs_tag.h>
+#include <linux/vs_dlimit.h>
+#include <linux/vserver/dlimit_cmd.h>
+#include <linux/slab.h>
+// #include <linux/gfp.h>
+
+#include <asm/uaccess.h>
+
+/*	__alloc_dl_info()
+
+	* allocate an initialized dl_info struct
+	* doesn't make it visible (hash)			*/
+
+static struct dl_info *__alloc_dl_info(struct super_block *sb, vtag_t tag)
+{
+	struct dl_info *new = NULL;
+
+	vxdprintk(VXD_CBIT(dlim, 5),
+		"alloc_dl_info(%p,%d)*", sb, tag);
+
+	/* would this benefit from a slab cache? */
+	new = kmalloc(sizeof(struct dl_info), GFP_KERNEL);
+	if (!new)
+		return 0;
+
+	memset(new, 0, sizeof(struct dl_info));
+	new->dl_tag = tag;
+	new->dl_sb = sb;
+	// INIT_RCU_HEAD(&new->dl_rcu);
+	INIT_HLIST_NODE(&new->dl_hlist);
+	spin_lock_init(&new->dl_lock);
+	atomic_set(&new->dl_refcnt, 0);
+	atomic_set(&new->dl_usecnt, 0);
+
+	/* rest of init goes here */
+
+	vxdprintk(VXD_CBIT(dlim, 4),
+		"alloc_dl_info(%p,%d) = %p", sb, tag, new);
+	return new;
+}
+
+/*	__dealloc_dl_info()
+
+	* final disposal of dl_info				*/
+
+static void __dealloc_dl_info(struct dl_info *dli)
+{
+	vxdprintk(VXD_CBIT(dlim, 4),
+		"dealloc_dl_info(%p)", dli);
+
+	dli->dl_hlist.next = LIST_POISON1;
+	dli->dl_tag = -1;
+	dli->dl_sb = 0;
+
+	BUG_ON(atomic_read(&dli->dl_usecnt));
+	BUG_ON(atomic_read(&dli->dl_refcnt));
+
+	kfree(dli);
+}
+
+
+/*	hash table for dl_info hash */
+
+#define DL_HASH_SIZE	13
+
+struct hlist_head dl_info_hash[DL_HASH_SIZE];
+
+static DEFINE_SPINLOCK(dl_info_hash_lock);
+
+
+static inline unsigned int __hashval(struct super_block *sb, vtag_t tag)
+{
+	return ((tag ^ (unsigned long)sb) % DL_HASH_SIZE);
+}
+
+
+
+/*	__hash_dl_info()
+
+	* add the dli to the global hash table
+	* requires the hash_lock to be held			*/
+
+static inline void __hash_dl_info(struct dl_info *dli)
+{
+	struct hlist_head *head;
+
+	vxdprintk(VXD_CBIT(dlim, 6),
+		"__hash_dl_info: %p[#%d]", dli, dli->dl_tag);
+	get_dl_info(dli);
+	head = &dl_info_hash[__hashval(dli->dl_sb, dli->dl_tag)];
+	hlist_add_head_rcu(&dli->dl_hlist, head);
+}
+
+/*	__unhash_dl_info()
+
+	* remove the dli from the global hash table
+	* requires the hash_lock to be held			*/
+
+static inline void __unhash_dl_info(struct dl_info *dli)
+{
+	vxdprintk(VXD_CBIT(dlim, 6),
+		"__unhash_dl_info: %p[#%d]", dli, dli->dl_tag);
+	hlist_del_rcu(&dli->dl_hlist);
+	put_dl_info(dli);
+}
+
+
+/*	__lookup_dl_info()
+
+	* requires the rcu_read_lock()
+	* doesn't increment the dl_refcnt			*/
+
+static inline struct dl_info *__lookup_dl_info(struct super_block *sb, vtag_t tag)
+{
+	struct hlist_head *head = &dl_info_hash[__hashval(sb, tag)];
+	struct dl_info *dli;
+
+	hlist_for_each_entry_rcu(dli, head, dl_hlist) {
+		if (dli->dl_tag == tag && dli->dl_sb == sb)
+			return dli;
+	}
+	return NULL;
+}
+
+
+struct dl_info *locate_dl_info(struct super_block *sb, vtag_t tag)
+{
+	struct dl_info *dli;
+
+	rcu_read_lock();
+	dli = get_dl_info(__lookup_dl_info(sb, tag));
+	vxdprintk(VXD_CBIT(dlim, 7),
+		"locate_dl_info(%p,#%d) = %p", sb, tag, dli);
+	rcu_read_unlock();
+	return dli;
+}
+
+void rcu_free_dl_info(struct rcu_head *head)
+{
+	struct dl_info *dli = container_of(head, struct dl_info, dl_rcu);
+	int usecnt, refcnt;
+
+	BUG_ON(!dli || !head);
+
+	usecnt = atomic_read(&dli->dl_usecnt);
+	BUG_ON(usecnt < 0);
+
+	refcnt = atomic_read(&dli->dl_refcnt);
+	BUG_ON(refcnt < 0);
+
+	vxdprintk(VXD_CBIT(dlim, 3),
+		"rcu_free_dl_info(%p)", dli);
+	if (!usecnt)
+		__dealloc_dl_info(dli);
+	else
+		printk("!!! rcu didn't free\n");
+}
+
+
+
+
+static int do_addrem_dlimit(uint32_t id, const char __user *name,
+	uint32_t flags, int add)
+{
+	struct path path;
+	int ret;
+
+	ret = user_lpath(name, &path);
+	if (!ret) {
+		struct super_block *sb;
+		struct dl_info *dli;
+
+		ret = -EINVAL;
+		if (!path.dentry->d_inode)
+			goto out_release;
+		if (!(sb = path.dentry->d_inode->i_sb))
+			goto out_release;
+
+		if (add) {
+			dli = __alloc_dl_info(sb, id);
+			spin_lock(&dl_info_hash_lock);
+
+			ret = -EEXIST;
+			if (__lookup_dl_info(sb, id))
+				goto out_unlock;
+			__hash_dl_info(dli);
+			dli = NULL;
+		} else {
+			spin_lock(&dl_info_hash_lock);
+			dli = __lookup_dl_info(sb, id);
+
+			ret = -ESRCH;
+			if (!dli)
+				goto out_unlock;
+			__unhash_dl_info(dli);
+		}
+		ret = 0;
+	out_unlock:
+		spin_unlock(&dl_info_hash_lock);
+		if (add && dli)
+			__dealloc_dl_info(dli);
+	out_release:
+		path_put(&path);
+	}
+	return ret;
+}
+
+int vc_add_dlimit(uint32_t id, void __user *data)
+{
+	struct vcmd_ctx_dlimit_base_v0 vc_data;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	return do_addrem_dlimit(id, vc_data.name, vc_data.flags, 1);
+}
+
+int vc_rem_dlimit(uint32_t id, void __user *data)
+{
+	struct vcmd_ctx_dlimit_base_v0 vc_data;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	return do_addrem_dlimit(id, vc_data.name, vc_data.flags, 0);
+}
+
+#ifdef	CONFIG_COMPAT
+
+int vc_add_dlimit_x32(uint32_t id, void __user *data)
+{
+	struct vcmd_ctx_dlimit_base_v0_x32 vc_data;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	return do_addrem_dlimit(id,
+		compat_ptr(vc_data.name_ptr), vc_data.flags, 1);
+}
+
+int vc_rem_dlimit_x32(uint32_t id, void __user *data)
+{
+	struct vcmd_ctx_dlimit_base_v0_x32 vc_data;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	return do_addrem_dlimit(id,
+		compat_ptr(vc_data.name_ptr), vc_data.flags, 0);
+}
+
+#endif	/* CONFIG_COMPAT */
+
+
+static inline
+int do_set_dlimit(uint32_t id, const char __user *name,
+	uint32_t space_used, uint32_t space_total,
+	uint32_t inodes_used, uint32_t inodes_total,
+	uint32_t reserved, uint32_t flags)
+{
+	struct path path;
+	int ret;
+
+	ret = user_lpath(name, &path);
+	if (!ret) {
+		struct super_block *sb;
+		struct dl_info *dli;
+
+		ret = -EINVAL;
+		if (!path.dentry->d_inode)
+			goto out_release;
+		if (!(sb = path.dentry->d_inode->i_sb))
+			goto out_release;
+
+		/* sanity checks */
+		if ((reserved != CDLIM_KEEP &&
+			reserved > 100) ||
+			(inodes_used != CDLIM_KEEP &&
+			inodes_used > inodes_total) ||
+			(space_used != CDLIM_KEEP &&
+			space_used > space_total))
+			goto out_release;
+
+		ret = -ESRCH;
+		dli = locate_dl_info(sb, id);
+		if (!dli)
+			goto out_release;
+
+		spin_lock(&dli->dl_lock);
+
+		if (inodes_used != CDLIM_KEEP)
+			dli->dl_inodes_used = inodes_used;
+		if (inodes_total != CDLIM_KEEP)
+			dli->dl_inodes_total = inodes_total;
+		if (space_used != CDLIM_KEEP)
+			dli->dl_space_used = dlimit_space_32to64(
+				space_used, flags, DLIMS_USED);
+
+		if (space_total == CDLIM_INFINITY)
+			dli->dl_space_total = DLIM_INFINITY;
+		else if (space_total != CDLIM_KEEP)
+			dli->dl_space_total = dlimit_space_32to64(
+				space_total, flags, DLIMS_TOTAL);
+
+		if (reserved != CDLIM_KEEP)
+			dli->dl_nrlmult = (1 << 10) * (100 - reserved) / 100;
+
+		spin_unlock(&dli->dl_lock);
+
+		put_dl_info(dli);
+		ret = 0;
+
+	out_release:
+		path_put(&path);
+	}
+	return ret;
+}
+
+int vc_set_dlimit(uint32_t id, void __user *data)
+{
+	struct vcmd_ctx_dlimit_v0 vc_data;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	return do_set_dlimit(id, vc_data.name,
+		vc_data.space_used, vc_data.space_total,
+		vc_data.inodes_used, vc_data.inodes_total,
+		vc_data.reserved, vc_data.flags);
+}
+
+#ifdef	CONFIG_COMPAT
+
+int vc_set_dlimit_x32(uint32_t id, void __user *data)
+{
+	struct vcmd_ctx_dlimit_v0_x32 vc_data;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	return do_set_dlimit(id, compat_ptr(vc_data.name_ptr),
+		vc_data.space_used, vc_data.space_total,
+		vc_data.inodes_used, vc_data.inodes_total,
+		vc_data.reserved, vc_data.flags);
+}
+
+#endif	/* CONFIG_COMPAT */
+
+
+static inline
+int do_get_dlimit(uint32_t id, const char __user *name,
+	uint32_t *space_used, uint32_t *space_total,
+	uint32_t *inodes_used, uint32_t *inodes_total,
+	uint32_t *reserved, uint32_t *flags)
+{
+	struct path path;
+	int ret;
+
+	ret = user_lpath(name, &path);
+	if (!ret) {
+		struct super_block *sb;
+		struct dl_info *dli;
+
+		ret = -EINVAL;
+		if (!path.dentry->d_inode)
+			goto out_release;
+		if (!(sb = path.dentry->d_inode->i_sb))
+			goto out_release;
+
+		ret = -ESRCH;
+		dli = locate_dl_info(sb, id);
+		if (!dli)
+			goto out_release;
+
+		spin_lock(&dli->dl_lock);
+		*inodes_used = dli->dl_inodes_used;
+		*inodes_total = dli->dl_inodes_total;
+
+		*space_used = dlimit_space_64to32(
+			dli->dl_space_used, flags, DLIMS_USED);
+
+		if (dli->dl_space_total == DLIM_INFINITY)
+			*space_total = CDLIM_INFINITY;
+		else
+			*space_total = dlimit_space_64to32(
+				dli->dl_space_total, flags, DLIMS_TOTAL);
+
+		*reserved = 100 - ((dli->dl_nrlmult * 100 + 512) >> 10);
+		spin_unlock(&dli->dl_lock);
+
+		put_dl_info(dli);
+		ret = -EFAULT;
+
+		ret = 0;
+	out_release:
+		path_put(&path);
+	}
+	return ret;
+}
+
+
+int vc_get_dlimit(uint32_t id, void __user *data)
+{
+	struct vcmd_ctx_dlimit_v0 vc_data;
+	int ret;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	ret = do_get_dlimit(id, vc_data.name,
+		&vc_data.space_used, &vc_data.space_total,
+		&vc_data.inodes_used, &vc_data.inodes_total,
+		&vc_data.reserved, &vc_data.flags);
+	if (ret)
+		return ret;
+
+	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+		return -EFAULT;
+	return 0;
+}
+
+#ifdef	CONFIG_COMPAT
+
+int vc_get_dlimit_x32(uint32_t id, void __user *data)
+{
+	struct vcmd_ctx_dlimit_v0_x32 vc_data;
+	int ret;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	ret = do_get_dlimit(id, compat_ptr(vc_data.name_ptr),
+		&vc_data.space_used, &vc_data.space_total,
+		&vc_data.inodes_used, &vc_data.inodes_total,
+		&vc_data.reserved, &vc_data.flags);
+	if (ret)
+		return ret;
+
+	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+		return -EFAULT;
+	return 0;
+}
+
+#endif	/* CONFIG_COMPAT */
+
+
+void vx_vsi_statfs(struct super_block *sb, struct kstatfs *buf)
+{
+	struct dl_info *dli;
+	__u64 blimit, bfree, bavail;
+	__u32 ifree;
+
+	dli = locate_dl_info(sb, dx_current_tag());
+	if (!dli)
+		return;
+
+	spin_lock(&dli->dl_lock);
+	if (dli->dl_inodes_total == (unsigned long)DLIM_INFINITY)
+		goto no_ilim;
+
+	/* reduce max inodes available to limit */
+	if (buf->f_files > dli->dl_inodes_total)
+		buf->f_files = dli->dl_inodes_total;
+
+	ifree = dli->dl_inodes_total - dli->dl_inodes_used;
+	/* reduce free inodes to min */
+	if (ifree < buf->f_ffree)
+		buf->f_ffree = ifree;
+
+no_ilim:
+	if (dli->dl_space_total == DLIM_INFINITY)
+		goto no_blim;
+
+	blimit = dli->dl_space_total >> sb->s_blocksize_bits;
+
+	if (dli->dl_space_total < dli->dl_space_used)
+		bfree = 0;
+	else
+		bfree = (dli->dl_space_total - dli->dl_space_used)
+			>> sb->s_blocksize_bits;
+
+	bavail = ((dli->dl_space_total >> 10) * dli->dl_nrlmult);
+	if (bavail < dli->dl_space_used)
+		bavail = 0;
+	else
+		bavail = (bavail - dli->dl_space_used)
+			>> sb->s_blocksize_bits;
+
+	/* reduce max space available to limit */
+	if (buf->f_blocks > blimit)
+		buf->f_blocks = blimit;
+
+	/* reduce free space to min */
+	if (bfree < buf->f_bfree)
+		buf->f_bfree = bfree;
+
+	/* reduce avail space to min */
+	if (bavail < buf->f_bavail)
+		buf->f_bavail = bavail;
+
+no_blim:
+	spin_unlock(&dli->dl_lock);
+	put_dl_info(dli);
+
+	return;
+}
+
+#include <linux/module.h>
+
+EXPORT_SYMBOL_GPL(locate_dl_info);
+EXPORT_SYMBOL_GPL(rcu_free_dl_info);
+
diff -ruNp linux-3.13.11/kernel/vserver/helper.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/helper.c
--- linux-3.13.11/kernel/vserver/helper.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/helper.c	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,242 @@
+/*
+ *  linux/kernel/vserver/helper.c
+ *
+ *  Virtual Context Support
+ *
+ *  Copyright (C) 2004-2007  Herbert Pötzl
+ *
+ *  V0.01  basic helper
+ *
+ */
+
+#include <linux/kmod.h>
+#include <linux/reboot.h>
+#include <linux/vs_context.h>
+#include <linux/vs_network.h>
+#include <linux/vserver/signal.h>
+
+
+char vshelper_path[255] = "/sbin/vshelper";
+
+static int vshelper_init(struct subprocess_info *info, struct cred *new_cred)
+{
+	current->flags &= ~PF_NO_SETAFFINITY;
+	return 0;
+}
+
+static int vs_call_usermodehelper(char *path, char **argv, char **envp, int wait)
+{
+	struct subprocess_info *info;
+	gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL;
+
+	info = call_usermodehelper_setup(path, argv, envp, gfp_mask,
+					 vshelper_init, NULL, NULL);
+	if (info == NULL)
+		return -ENOMEM;
+
+	return call_usermodehelper_exec(info, wait);
+}
+
+static int do_vshelper(char *name, char *argv[], char *envp[], int sync)
+{
+	int ret;
+
+	if ((ret = vs_call_usermodehelper(name, argv, envp,
+		sync ? UMH_WAIT_PROC : UMH_WAIT_EXEC))) {
+		printk(KERN_WARNING "%s: (%s %s) returned %s with %d\n",
+			name, argv[1], argv[2],
+			sync ? "sync" : "async", ret);
+	}
+	vxdprintk(VXD_CBIT(switch, 4),
+		"%s: (%s %s) returned %s with %d",
+		name, argv[1], argv[2], sync ? "sync" : "async", ret);
+	return ret;
+}
+
+/*
+ *      vshelper path is set via /proc/sys
+ *      invoked by vserver sys_reboot(), with
+ *      the following arguments
+ *
+ *      argv [0] = vshelper_path;
+ *      argv [1] = action: "restart", "halt", "poweroff", ...
+ *      argv [2] = context identifier
+ *
+ *      envp [*] = type-specific parameters
+ */
+
+long vs_reboot_helper(struct vx_info *vxi, int cmd, void __user *arg)
+{
+	char id_buf[8], cmd_buf[16];
+	char uid_buf[16], pid_buf[16];
+	int ret;
+
+	char *argv[] = {vshelper_path, NULL, id_buf, 0};
+	char *envp[] = {"HOME=/", "TERM=linux",
+			"PATH=/sbin:/usr/sbin:/bin:/usr/bin",
+			uid_buf, pid_buf, cmd_buf, 0};
+
+	if (vx_info_state(vxi, VXS_HELPER))
+		return -EAGAIN;
+	vxi->vx_state |= VXS_HELPER;
+
+	snprintf(id_buf, sizeof(id_buf), "%d", vxi->vx_id);
+
+	snprintf(cmd_buf, sizeof(cmd_buf), "VS_CMD=%08x", cmd);
+	snprintf(uid_buf, sizeof(uid_buf), "VS_UID=%d",
+		from_kuid(&init_user_ns, current_uid()));
+	snprintf(pid_buf, sizeof(pid_buf), "VS_PID=%d", current->pid);
+
+	switch (cmd) {
+	case LINUX_REBOOT_CMD_RESTART:
+		argv[1] = "restart";
+		break;
+
+	case LINUX_REBOOT_CMD_HALT:
+		argv[1] = "halt";
+		break;
+
+	case LINUX_REBOOT_CMD_POWER_OFF:
+		argv[1] = "poweroff";
+		break;
+
+	case LINUX_REBOOT_CMD_SW_SUSPEND:
+		argv[1] = "swsusp";
+		break;
+
+	case LINUX_REBOOT_CMD_OOM:
+		argv[1] = "oom";
+		break;
+
+	default:
+		vxi->vx_state &= ~VXS_HELPER;
+		return 0;
+	}
+
+	ret = do_vshelper(vshelper_path, argv, envp, 0);
+	vxi->vx_state &= ~VXS_HELPER;
+	__wakeup_vx_info(vxi);
+	return (ret) ? -EPERM : 0;
+}
+
+
+long vs_reboot(unsigned int cmd, void __user *arg)
+{
+	struct vx_info *vxi = current_vx_info();
+	long ret = 0;
+
+	vxdprintk(VXD_CBIT(misc, 5),
+		"vs_reboot(%p[#%d],%u)",
+		vxi, vxi ? vxi->vx_id : 0, cmd);
+
+	ret = vs_reboot_helper(vxi, cmd, arg);
+	if (ret)
+		return ret;
+
+	vxi->reboot_cmd = cmd;
+	if (vx_info_flags(vxi, VXF_REBOOT_KILL, 0)) {
+		switch (cmd) {
+		case LINUX_REBOOT_CMD_RESTART:
+		case LINUX_REBOOT_CMD_HALT:
+		case LINUX_REBOOT_CMD_POWER_OFF:
+			vx_info_kill(vxi, 0, SIGKILL);
+			vx_info_kill(vxi, 1, SIGKILL);
+		default:
+			break;
+		}
+	}
+	return 0;
+}
+
+long vs_oom_action(unsigned int cmd)
+{
+	struct vx_info *vxi = current_vx_info();
+	long ret = 0;
+
+	vxdprintk(VXD_CBIT(misc, 5),
+		"vs_oom_action(%p[#%d],%u)",
+		vxi, vxi ? vxi->vx_id : 0, cmd);
+
+	ret = vs_reboot_helper(vxi, cmd, NULL);
+	if (ret)
+		return ret;
+
+	vxi->reboot_cmd = cmd;
+	if (vx_info_flags(vxi, VXF_REBOOT_KILL, 0)) {
+		vx_info_kill(vxi, 0, SIGKILL);
+		vx_info_kill(vxi, 1, SIGKILL);
+	}
+	return 0;
+}
+
+/*
+ *      argv [0] = vshelper_path;
+ *      argv [1] = action: "startup", "shutdown"
+ *      argv [2] = context identifier
+ *
+ *      envp [*] = type-specific parameters
+ */
+
+long vs_state_change(struct vx_info *vxi, unsigned int cmd)
+{
+	char id_buf[8], cmd_buf[16];
+	char *argv[] = {vshelper_path, NULL, id_buf, 0};
+	char *envp[] = {"HOME=/", "TERM=linux",
+			"PATH=/sbin:/usr/sbin:/bin:/usr/bin", cmd_buf, 0};
+
+	if (!vx_info_flags(vxi, VXF_SC_HELPER, 0))
+		return 0;
+
+	snprintf(id_buf, sizeof(id_buf), "%d", vxi->vx_id);
+	snprintf(cmd_buf, sizeof(cmd_buf), "VS_CMD=%08x", cmd);
+
+	switch (cmd) {
+	case VSC_STARTUP:
+		argv[1] = "startup";
+		break;
+	case VSC_SHUTDOWN:
+		argv[1] = "shutdown";
+		break;
+	default:
+		return 0;
+	}
+
+	return do_vshelper(vshelper_path, argv, envp, 1);
+}
+
+
+/*
+ *      argv [0] = vshelper_path;
+ *      argv [1] = action: "netup", "netdown"
+ *      argv [2] = context identifier
+ *
+ *      envp [*] = type-specific parameters
+ */
+
+long vs_net_change(struct nx_info *nxi, unsigned int cmd)
+{
+	char id_buf[8], cmd_buf[16];
+	char *argv[] = {vshelper_path, NULL, id_buf, 0};
+	char *envp[] = {"HOME=/", "TERM=linux",
+			"PATH=/sbin:/usr/sbin:/bin:/usr/bin", cmd_buf, 0};
+
+	if (!nx_info_flags(nxi, NXF_SC_HELPER, 0))
+		return 0;
+
+	snprintf(id_buf, sizeof(id_buf), "%d", nxi->nx_id);
+	snprintf(cmd_buf, sizeof(cmd_buf), "VS_CMD=%08x", cmd);
+
+	switch (cmd) {
+	case VSC_NETUP:
+		argv[1] = "netup";
+		break;
+	case VSC_NETDOWN:
+		argv[1] = "netdown";
+		break;
+	default:
+		return 0;
+	}
+
+	return do_vshelper(vshelper_path, argv, envp, 1);
+}
+
diff -ruNp linux-3.13.11/kernel/vserver/history.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/history.c
--- linux-3.13.11/kernel/vserver/history.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/history.c	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,258 @@
+/*
+ *  kernel/vserver/history.c
+ *
+ *  Virtual Context History Backtrace
+ *
+ *  Copyright (C) 2004-2007  Herbert Pötzl
+ *
+ *  V0.01  basic structure
+ *  V0.02  hash/unhash and trace
+ *  V0.03  preemption fixes
+ *
+ */
+
+#include <linux/module.h>
+#include <asm/uaccess.h>
+
+#include <linux/vserver/context.h>
+#include <linux/vserver/debug.h>
+#include <linux/vserver/debug_cmd.h>
+#include <linux/vserver/history.h>
+
+
+#ifdef	CONFIG_VSERVER_HISTORY
+#define VXH_SIZE	CONFIG_VSERVER_HISTORY_SIZE
+#else
+#define VXH_SIZE	64
+#endif
+
+struct _vx_history {
+	unsigned int counter;
+
+	struct _vx_hist_entry entry[VXH_SIZE + 1];
+};
+
+
+DEFINE_PER_CPU(struct _vx_history, vx_history_buffer);
+
+unsigned volatile int vxh_active = 1;
+
+static atomic_t sequence = ATOMIC_INIT(0);
+
+
+/*	vxh_advance()
+
+	* requires disabled preemption				*/
+
+struct _vx_hist_entry *vxh_advance(void *loc)
+{
+	unsigned int cpu = smp_processor_id();
+	struct _vx_history *hist = &per_cpu(vx_history_buffer, cpu);
+	struct _vx_hist_entry *entry;
+	unsigned int index;
+
+	index = vxh_active ? (hist->counter++ % VXH_SIZE) : VXH_SIZE;
+	entry = &hist->entry[index];
+
+	entry->seq = atomic_inc_return(&sequence);
+	entry->loc = loc;
+	return entry;
+}
+
+EXPORT_SYMBOL_GPL(vxh_advance);
+
+
+#define VXH_LOC_FMTS	"(#%04x,*%d):%p"
+
+#define VXH_LOC_ARGS(e)	(e)->seq, cpu, (e)->loc
+
+
+#define VXH_VXI_FMTS	"%p[#%d,%d.%d]"
+
+#define VXH_VXI_ARGS(e)	(e)->vxi.ptr,				\
+			(e)->vxi.ptr ? (e)->vxi.xid : 0,	\
+			(e)->vxi.ptr ? (e)->vxi.usecnt : 0,	\
+			(e)->vxi.ptr ? (e)->vxi.tasks : 0
+
+void	vxh_dump_entry(struct _vx_hist_entry *e, unsigned cpu)
+{
+	switch (e->type) {
+	case VXH_THROW_OOPS:
+		printk( VXH_LOC_FMTS " oops \n", VXH_LOC_ARGS(e));
+		break;
+
+	case VXH_GET_VX_INFO:
+	case VXH_PUT_VX_INFO:
+		printk( VXH_LOC_FMTS " %s_vx_info " VXH_VXI_FMTS "\n",
+			VXH_LOC_ARGS(e),
+			(e->type == VXH_GET_VX_INFO) ? "get" : "put",
+			VXH_VXI_ARGS(e));
+		break;
+
+	case VXH_INIT_VX_INFO:
+	case VXH_SET_VX_INFO:
+	case VXH_CLR_VX_INFO:
+		printk( VXH_LOC_FMTS " %s_vx_info " VXH_VXI_FMTS " @%p\n",
+			VXH_LOC_ARGS(e),
+			(e->type == VXH_INIT_VX_INFO) ? "init" :
+			((e->type == VXH_SET_VX_INFO) ? "set" : "clr"),
+			VXH_VXI_ARGS(e), e->sc.data);
+		break;
+
+	case VXH_CLAIM_VX_INFO:
+	case VXH_RELEASE_VX_INFO:
+		printk( VXH_LOC_FMTS " %s_vx_info " VXH_VXI_FMTS " @%p\n",
+			VXH_LOC_ARGS(e),
+			(e->type == VXH_CLAIM_VX_INFO) ? "claim" : "release",
+			VXH_VXI_ARGS(e), e->sc.data);
+		break;
+
+	case VXH_ALLOC_VX_INFO:
+	case VXH_DEALLOC_VX_INFO:
+		printk( VXH_LOC_FMTS " %s_vx_info " VXH_VXI_FMTS "\n",
+			VXH_LOC_ARGS(e),
+			(e->type == VXH_ALLOC_VX_INFO) ? "alloc" : "dealloc",
+			VXH_VXI_ARGS(e));
+		break;
+
+	case VXH_HASH_VX_INFO:
+	case VXH_UNHASH_VX_INFO:
+		printk( VXH_LOC_FMTS " __%s_vx_info " VXH_VXI_FMTS "\n",
+			VXH_LOC_ARGS(e),
+			(e->type == VXH_HASH_VX_INFO) ? "hash" : "unhash",
+			VXH_VXI_ARGS(e));
+		break;
+
+	case VXH_LOC_VX_INFO:
+	case VXH_LOOKUP_VX_INFO:
+	case VXH_CREATE_VX_INFO:
+		printk( VXH_LOC_FMTS " __%s_vx_info [#%d] -> " VXH_VXI_FMTS "\n",
+			VXH_LOC_ARGS(e),
+			(e->type == VXH_CREATE_VX_INFO) ? "create" :
+			((e->type == VXH_LOC_VX_INFO) ? "loc" : "lookup"),
+			e->ll.arg, VXH_VXI_ARGS(e));
+		break;
+	}
+}
+
+static void __vxh_dump_history(void)
+{
+	unsigned int i, cpu;
+
+	printk("History:\tSEQ: %8x\tNR_CPUS: %d\n",
+		atomic_read(&sequence), NR_CPUS);
+
+	for (i = 0; i < VXH_SIZE; i++) {
+		for_each_online_cpu(cpu) {
+			struct _vx_history *hist =
+				&per_cpu(vx_history_buffer, cpu);
+			unsigned int index = (hist->counter - i) % VXH_SIZE;
+			struct _vx_hist_entry *entry = &hist->entry[index];
+
+			vxh_dump_entry(entry, cpu);
+		}
+	}
+}
+
+void	vxh_dump_history(void)
+{
+	vxh_active = 0;
+#ifdef CONFIG_SMP
+	local_irq_enable();
+	smp_send_stop();
+	local_irq_disable();
+#endif
+	__vxh_dump_history();
+}
+
+
+/* vserver syscall commands below here */
+
+
+int vc_dump_history(uint32_t id)
+{
+	vxh_active = 0;
+	__vxh_dump_history();
+	vxh_active = 1;
+
+	return 0;
+}
+
+
+int do_read_history(struct __user _vx_hist_entry *data,
+	int cpu, uint32_t *index, uint32_t *count)
+{
+	int pos, ret = 0;
+	struct _vx_history *hist = &per_cpu(vx_history_buffer, cpu);
+	int end = hist->counter;
+	int start = end - VXH_SIZE + 2;
+	int idx = *index;
+
+	/* special case: get current pos */
+	if (!*count) {
+		*index = end;
+		return 0;
+	}
+
+	/* have we lost some data? */
+	if (idx < start)
+		idx = start;
+
+	for (pos = 0; (pos < *count) && (idx < end); pos++, idx++) {
+		struct _vx_hist_entry *entry =
+			&hist->entry[idx % VXH_SIZE];
+
+		/* send entry to userspace */
+		ret = copy_to_user(&data[pos], entry, sizeof(*entry));
+		if (ret)
+			break;
+	}
+	/* save new index and count */
+	*index = idx;
+	*count = pos;
+	return ret ? ret : (*index < end);
+}
+
+int vc_read_history(uint32_t id, void __user *data)
+{
+	struct vcmd_read_history_v0 vc_data;
+	int ret;
+
+	if (id >= NR_CPUS)
+		return -EINVAL;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	ret = do_read_history((struct __user _vx_hist_entry *)vc_data.data,
+		id, &vc_data.index, &vc_data.count);
+
+	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+		return -EFAULT;
+	return ret;
+}
+
+#ifdef	CONFIG_COMPAT
+
+int vc_read_history_x32(uint32_t id, void __user *data)
+{
+	struct vcmd_read_history_v0_x32 vc_data;
+	int ret;
+
+	if (id >= NR_CPUS)
+		return -EINVAL;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	ret = do_read_history((struct __user _vx_hist_entry *)
+		compat_ptr(vc_data.data_ptr),
+		id, &vc_data.index, &vc_data.count);
+
+	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+		return -EFAULT;
+	return ret;
+}
+
+#endif	/* CONFIG_COMPAT */
+
diff -ruNp linux-3.13.11/kernel/vserver/inet.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/inet.c
--- linux-3.13.11/kernel/vserver/inet.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/inet.c	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,236 @@
+
+#include <linux/in.h>
+#include <linux/inetdevice.h>
+#include <linux/export.h>
+#include <linux/vs_inet.h>
+#include <linux/vs_inet6.h>
+#include <linux/vserver/debug.h>
+#include <net/route.h>
+#include <net/addrconf.h>
+
+
+int nx_v4_addr_conflict(struct nx_info *nxi1, struct nx_info *nxi2)
+{
+	int ret = 0;
+
+	if (!nxi1 || !nxi2 || nxi1 == nxi2)
+		ret = 1;
+	else {
+		struct nx_addr_v4 *ptr;
+		unsigned long irqflags;
+
+		spin_lock_irqsave(&nxi1->addr_lock, irqflags);
+		for (ptr = &nxi1->v4; ptr; ptr = ptr->next) {
+			if (v4_nx_addr_in_nx_info(nxi2, ptr, -1)) {
+				ret = 1;
+				break;
+			}
+		}
+		spin_unlock_irqrestore(&nxi1->addr_lock, irqflags);
+	}
+
+	vxdprintk(VXD_CBIT(net, 2),
+		"nx_v4_addr_conflict(%p,%p): %d",
+		nxi1, nxi2, ret);
+
+	return ret;
+}
+
+
+#ifdef	CONFIG_IPV6
+
+int nx_v6_addr_conflict(struct nx_info *nxi1, struct nx_info *nxi2)
+{
+	int ret = 0;
+
+	if (!nxi1 || !nxi2 || nxi1 == nxi2)
+		ret = 1;
+	else {
+		struct nx_addr_v6 *ptr;
+		unsigned long irqflags;
+
+		spin_lock_irqsave(&nxi1->addr_lock, irqflags);
+		for (ptr = &nxi1->v6; ptr; ptr = ptr->next) {
+			if (v6_nx_addr_in_nx_info(nxi2, ptr, -1)) {
+				ret = 1;
+				break;
+			}
+		}
+		spin_unlock_irqrestore(&nxi1->addr_lock, irqflags);
+	}
+
+	vxdprintk(VXD_CBIT(net, 2),
+		"nx_v6_addr_conflict(%p,%p): %d",
+		nxi1, nxi2, ret);
+
+	return ret;
+}
+
+#endif
+
+int v4_dev_in_nx_info(struct net_device *dev, struct nx_info *nxi)
+{
+	struct in_device *in_dev;
+	struct in_ifaddr **ifap;
+	struct in_ifaddr *ifa;
+	int ret = 0;
+
+	if (!dev)
+		goto out;
+	in_dev = in_dev_get(dev);
+	if (!in_dev)
+		goto out;
+
+	for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
+		ifap = &ifa->ifa_next) {
+		if (v4_addr_in_nx_info(nxi, ifa->ifa_local, NXA_MASK_SHOW)) {
+			ret = 1;
+			break;
+		}
+	}
+	in_dev_put(in_dev);
+out:
+	return ret;
+}
+
+
+#ifdef	CONFIG_IPV6
+
+int v6_dev_in_nx_info(struct net_device *dev, struct nx_info *nxi)
+{
+	struct inet6_dev *in_dev;
+	struct inet6_ifaddr *ifa;
+	int ret = 0;
+
+	if (!dev)
+		goto out;
+	in_dev = in6_dev_get(dev);
+	if (!in_dev)
+		goto out;
+
+	// for (ifap = &in_dev->addr_list; (ifa = *ifap) != NULL;
+	list_for_each_entry(ifa, &in_dev->addr_list, if_list) {
+		if (v6_addr_in_nx_info(nxi, &ifa->addr, -1)) {
+			ret = 1;
+			break;
+		}
+	}
+	in6_dev_put(in_dev);
+out:
+	return ret;
+}
+
+#endif
+
+int dev_in_nx_info(struct net_device *dev, struct nx_info *nxi)
+{
+	int ret = 1;
+
+	if (!nxi)
+		goto out;
+	if (nxi->v4.type && v4_dev_in_nx_info(dev, nxi))
+		goto out;
+#ifdef	CONFIG_IPV6
+	ret = 2;
+	if (nxi->v6.type && v6_dev_in_nx_info(dev, nxi))
+		goto out;
+#endif
+	ret = 0;
+out:
+	vxdprintk(VXD_CBIT(net, 3),
+		"dev_in_nx_info(%p,%p[#%d]) = %d",
+		dev, nxi, nxi ? nxi->nx_id : 0, ret);
+	return ret;
+}
+
+struct rtable *ip_v4_find_src(struct net *net, struct nx_info *nxi,
+	struct flowi4 *fl4)
+{
+	struct rtable *rt;
+
+	if (!nxi)
+		return NULL;
+
+	/* FIXME: handle lback only case */
+	if (!NX_IPV4(nxi))
+		return ERR_PTR(-EPERM);
+
+	vxdprintk(VXD_CBIT(net, 4),
+		"ip_v4_find_src(%p[#%u]) " NIPQUAD_FMT " -> " NIPQUAD_FMT,
+		nxi, nxi ? nxi->nx_id : 0,
+		NIPQUAD(fl4->saddr), NIPQUAD(fl4->daddr));
+
+	/* single IP is unconditional */
+	if (nx_info_flags(nxi, NXF_SINGLE_IP, 0) &&
+		(fl4->saddr == INADDR_ANY))
+		fl4->saddr = nxi->v4.ip[0].s_addr;
+
+	if (fl4->saddr == INADDR_ANY) {
+		struct nx_addr_v4 *ptr;
+		__be32 found = 0;
+
+		rt = __ip_route_output_key(net, fl4);
+		if (!IS_ERR(rt)) {
+			found = fl4->saddr;
+			ip_rt_put(rt);
+			vxdprintk(VXD_CBIT(net, 4),
+				"ip_v4_find_src(%p[#%u]) rok[%u]: " NIPQUAD_FMT,
+				nxi, nxi ? nxi->nx_id : 0, fl4->flowi4_oif, NIPQUAD(found));
+			if (v4_addr_in_nx_info(nxi, found, NXA_MASK_BIND))
+				goto found;
+		}
+
+		WARN_ON_ONCE(in_irq());
+		spin_lock_bh(&nxi->addr_lock);
+		for (ptr = &nxi->v4; ptr; ptr = ptr->next) {
+			__be32 primary = ptr->ip[0].s_addr;
+			__be32 mask = ptr->mask.s_addr;
+			__be32 neta = primary & mask;
+
+			vxdprintk(VXD_CBIT(net, 4), "ip_v4_find_src(%p[#%u]) chk: "
+				NIPQUAD_FMT "/" NIPQUAD_FMT "/" NIPQUAD_FMT,
+				nxi, nxi ? nxi->nx_id : 0, NIPQUAD(primary),
+				NIPQUAD(mask), NIPQUAD(neta));
+			if ((found & mask) != neta)
+				continue;
+
+			fl4->saddr = primary;
+			rt = __ip_route_output_key(net, fl4);
+			vxdprintk(VXD_CBIT(net, 4),
+				"ip_v4_find_src(%p[#%u]) rok[%u]: " NIPQUAD_FMT,
+				nxi, nxi ? nxi->nx_id : 0, fl4->flowi4_oif, NIPQUAD(primary));
+			if (!IS_ERR(rt)) {
+				found = fl4->saddr;
+				ip_rt_put(rt);
+				if (found == primary)
+					goto found_unlock;
+			}
+		}
+		/* still no source ip? */
+		found = ipv4_is_loopback(fl4->daddr)
+			? IPI_LOOPBACK : nxi->v4.ip[0].s_addr;
+	found_unlock:
+		spin_unlock_bh(&nxi->addr_lock);
+	found:
+		/* assign src ip to flow */
+		fl4->saddr = found;
+
+	} else {
+		if (!v4_addr_in_nx_info(nxi, fl4->saddr, NXA_MASK_BIND))
+			return ERR_PTR(-EPERM);
+	}
+
+	if (nx_info_flags(nxi, NXF_LBACK_REMAP, 0)) {
+		if (ipv4_is_loopback(fl4->daddr))
+			fl4->daddr = nxi->v4_lback.s_addr;
+		if (ipv4_is_loopback(fl4->saddr))
+			fl4->saddr = nxi->v4_lback.s_addr;
+	} else if (ipv4_is_loopback(fl4->daddr) &&
+		!nx_info_flags(nxi, NXF_LBACK_ALLOW, 0))
+		return ERR_PTR(-EPERM);
+
+	return NULL;
+}
+
+EXPORT_SYMBOL_GPL(ip_v4_find_src);
+
diff -ruNp linux-3.13.11/kernel/vserver/init.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/init.c
--- linux-3.13.11/kernel/vserver/init.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/init.c	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,45 @@
+/*
+ *  linux/kernel/init.c
+ *
+ *  Virtual Server Init
+ *
+ *  Copyright (C) 2004-2007  Herbert Pötzl
+ *
+ *  V0.01  basic structure
+ *
+ */
+
+#include <linux/init.h>
+
+int	vserver_register_sysctl(void);
+void	vserver_unregister_sysctl(void);
+
+
+static int __init init_vserver(void)
+{
+	int ret = 0;
+
+#ifdef	CONFIG_VSERVER_DEBUG
+	vserver_register_sysctl();
+#endif
+	return ret;
+}
+
+
+static void __exit exit_vserver(void)
+{
+
+#ifdef	CONFIG_VSERVER_DEBUG
+	vserver_unregister_sysctl();
+#endif
+	return;
+}
+
+/* FIXME: GFP_ZONETYPES gone
+long vx_slab[GFP_ZONETYPES]; */
+long vx_area;
+
+
+module_init(init_vserver);
+module_exit(exit_vserver);
+
diff -ruNp linux-3.13.11/kernel/vserver/inode.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/inode.c
--- linux-3.13.11/kernel/vserver/inode.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/inode.c	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,440 @@
+/*
+ *  linux/kernel/vserver/inode.c
+ *
+ *  Virtual Server: File System Support
+ *
+ *  Copyright (C) 2004-2007  Herbert Pötzl
+ *
+ *  V0.01  separated from vcontext V0.05
+ *  V0.02  moved to tag (instead of xid)
+ *
+ */
+
+#include <linux/tty.h>
+#include <linux/proc_fs.h>
+#include <linux/devpts_fs.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/mount.h>
+#include <linux/parser.h>
+#include <linux/namei.h>
+#include <linux/magic.h>
+#include <linux/slab.h>
+#include <linux/vserver/inode.h>
+#include <linux/vserver/inode_cmd.h>
+#include <linux/vs_base.h>
+#include <linux/vs_tag.h>
+
+#include <asm/uaccess.h>
+#include <../../fs/proc/internal.h>
+
+
+static int __vc_get_iattr(struct inode *in, uint32_t *tag, uint32_t *flags, uint32_t
*mask)
+{
+	struct proc_dir_entry *entry;
+
+	if (!in || !in->i_sb)
+		return -ESRCH;
+
+	*flags = IATTR_TAG
+		| (IS_IMMUTABLE(in) ? IATTR_IMMUTABLE : 0)
+		| (IS_IXUNLINK(in) ? IATTR_IXUNLINK : 0)
+		| (IS_BARRIER(in) ? IATTR_BARRIER : 0)
+		| (IS_COW(in) ? IATTR_COW : 0);
+	*mask = IATTR_IXUNLINK | IATTR_IMMUTABLE | IATTR_COW;
+
+	if (S_ISDIR(in->i_mode))
+		*mask |= IATTR_BARRIER;
+
+	if (IS_TAGGED(in)) {
+		*tag = i_tag_read(in);
+		*mask |= IATTR_TAG;
+	}
+
+	switch (in->i_sb->s_magic) {
+	case PROC_SUPER_MAGIC:
+		entry = PROC_I(in)->pde;
+
+		/* check for specific inodes? */
+		if (entry)
+			*mask |= IATTR_FLAGS;
+		if (entry)
+			*flags |= (entry->vx_flags & IATTR_FLAGS);
+		else
+			*flags |= (PROC_I(in)->vx_flags & IATTR_FLAGS);
+		break;
+
+	case DEVPTS_SUPER_MAGIC:
+		*tag = i_tag_read(in);
+		*mask |= IATTR_TAG;
+		break;
+
+	default:
+		break;
+	}
+	return 0;
+}
+
+int vc_get_iattr(void __user *data)
+{
+	struct path path;
+	struct vcmd_ctx_iattr_v1 vc_data = { .tag = -1 };
+	int ret;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	ret = user_lpath(vc_data.name, &path);
+	if (!ret) {
+		ret = __vc_get_iattr(path.dentry->d_inode,
+			&vc_data.tag, &vc_data.flags, &vc_data.mask);
+		path_put(&path);
+	}
+	if (ret)
+		return ret;
+
+	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+		ret = -EFAULT;
+	return ret;
+}
+
+#ifdef	CONFIG_COMPAT
+
+int vc_get_iattr_x32(void __user *data)
+{
+	struct path path;
+	struct vcmd_ctx_iattr_v1_x32 vc_data = { .tag = -1 };
+	int ret;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	ret = user_lpath(compat_ptr(vc_data.name_ptr), &path);
+	if (!ret) {
+		ret = __vc_get_iattr(path.dentry->d_inode,
+			&vc_data.tag, &vc_data.flags, &vc_data.mask);
+		path_put(&path);
+	}
+	if (ret)
+		return ret;
+
+	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+		ret = -EFAULT;
+	return ret;
+}
+
+#endif	/* CONFIG_COMPAT */
+
+
+int vc_fget_iattr(uint32_t fd, void __user *data)
+{
+	struct file *filp;
+	struct vcmd_ctx_fiattr_v0 vc_data = { .tag = -1 };
+	int ret;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	filp = fget(fd);
+	if (!filp || !filp->f_dentry || !filp->f_dentry->d_inode)
+		return -EBADF;
+
+	ret = __vc_get_iattr(filp->f_dentry->d_inode,
+		&vc_data.tag, &vc_data.flags, &vc_data.mask);
+
+	fput(filp);
+
+	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+		ret = -EFAULT;
+	return ret;
+}
+
+
+static int __vc_set_iattr(struct dentry *de, uint32_t *tag, uint32_t *flags, uint32_t
*mask)
+{
+	struct inode *in = de->d_inode;
+	int error = 0, is_proc = 0, has_tag = 0;
+	struct iattr attr = { 0 };
+
+	if (!in || !in->i_sb)
+		return -ESRCH;
+
+	is_proc = (in->i_sb->s_magic == PROC_SUPER_MAGIC);
+	if ((*mask & IATTR_FLAGS) && !is_proc)
+		return -EINVAL;
+
+	has_tag = IS_TAGGED(in) ||
+		(in->i_sb->s_magic == DEVPTS_SUPER_MAGIC);
+	if ((*mask & IATTR_TAG) && !has_tag)
+		return -EINVAL;
+
+	mutex_lock(&in->i_mutex);
+	if (*mask & IATTR_TAG) {
+		attr.ia_tag = make_ktag(&init_user_ns, *tag);
+		attr.ia_valid |= ATTR_TAG;
+	}
+
+	if (*mask & IATTR_FLAGS) {
+		struct proc_dir_entry *entry = PROC_I(in)->pde;
+		unsigned int iflags = PROC_I(in)->vx_flags;
+
+		iflags = (iflags & ~(*mask & IATTR_FLAGS))
+			| (*flags & IATTR_FLAGS);
+		PROC_I(in)->vx_flags = iflags;
+		if (entry)
+			entry->vx_flags = iflags;
+	}
+
+	if (*mask & (IATTR_IMMUTABLE | IATTR_IXUNLINK |
+		IATTR_BARRIER | IATTR_COW)) {
+		int iflags = in->i_flags;
+		int vflags = in->i_vflags;
+
+		if (*mask & IATTR_IMMUTABLE) {
+			if (*flags & IATTR_IMMUTABLE)
+				iflags |= S_IMMUTABLE;
+			else
+				iflags &= ~S_IMMUTABLE;
+		}
+		if (*mask & IATTR_IXUNLINK) {
+			if (*flags & IATTR_IXUNLINK)
+				iflags |= S_IXUNLINK;
+			else
+				iflags &= ~S_IXUNLINK;
+		}
+		if (S_ISDIR(in->i_mode) && (*mask & IATTR_BARRIER)) {
+			if (*flags & IATTR_BARRIER)
+				vflags |= V_BARRIER;
+			else
+				vflags &= ~V_BARRIER;
+		}
+		if (S_ISREG(in->i_mode) && (*mask & IATTR_COW)) {
+			if (*flags & IATTR_COW)
+				vflags |= V_COW;
+			else
+				vflags &= ~V_COW;
+		}
+		if (in->i_op && in->i_op->sync_flags) {
+			error = in->i_op->sync_flags(in, iflags, vflags);
+			if (error)
+				goto out;
+		}
+	}
+
+	if (attr.ia_valid) {
+		if (in->i_op && in->i_op->setattr)
+			error = in->i_op->setattr(de, &attr);
+		else {
+			error = inode_change_ok(in, &attr);
+			if (!error) {
+				setattr_copy(in, &attr);
+				mark_inode_dirty(in);
+			}
+		}
+	}
+
+out:
+	mutex_unlock(&in->i_mutex);
+	return error;
+}
+
+int vc_set_iattr(void __user *data)
+{
+	struct path path;
+	struct vcmd_ctx_iattr_v1 vc_data;
+	int ret;
+
+	if (!capable(CAP_LINUX_IMMUTABLE))
+		return -EPERM;
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	ret = user_lpath(vc_data.name, &path);
+	if (!ret) {
+		ret = __vc_set_iattr(path.dentry,
+			&vc_data.tag, &vc_data.flags, &vc_data.mask);
+		path_put(&path);
+	}
+
+	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+		ret = -EFAULT;
+	return ret;
+}
+
+#ifdef	CONFIG_COMPAT
+
+int vc_set_iattr_x32(void __user *data)
+{
+	struct path path;
+	struct vcmd_ctx_iattr_v1_x32 vc_data;
+	int ret;
+
+	if (!capable(CAP_LINUX_IMMUTABLE))
+		return -EPERM;
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	ret = user_lpath(compat_ptr(vc_data.name_ptr), &path);
+	if (!ret) {
+		ret = __vc_set_iattr(path.dentry,
+			&vc_data.tag, &vc_data.flags, &vc_data.mask);
+		path_put(&path);
+	}
+
+	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+		ret = -EFAULT;
+	return ret;
+}
+
+#endif	/* CONFIG_COMPAT */
+
+int vc_fset_iattr(uint32_t fd, void __user *data)
+{
+	struct file *filp;
+	struct vcmd_ctx_fiattr_v0 vc_data;
+	int ret;
+
+	if (!capable(CAP_LINUX_IMMUTABLE))
+		return -EPERM;
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	filp = fget(fd);
+	if (!filp || !filp->f_dentry || !filp->f_dentry->d_inode)
+		return -EBADF;
+
+	ret = __vc_set_iattr(filp->f_dentry, &vc_data.tag,
+		&vc_data.flags, &vc_data.mask);
+
+	fput(filp);
+
+	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+		return -EFAULT;
+	return ret;
+}
+
+
+enum { Opt_notagcheck, Opt_tag, Opt_notag, Opt_tagid, Opt_err };
+
+static match_table_t tokens = {
+	{Opt_notagcheck, "notagcheck"},
+#ifdef	CONFIG_PROPAGATE
+	{Opt_notag, "notag"},
+	{Opt_tag, "tag"},
+	{Opt_tagid, "tagid=%u"},
+#endif
+	{Opt_err, NULL}
+};
+
+
+static void __dx_parse_remove(char *string, char *opt)
+{
+	char *p = strstr(string, opt);
+	char *q = p;
+
+	if (p) {
+		while (*q != '\0' && *q != ',')
+			q++;
+		while (*q)
+			*p++ = *q++;
+		while (*p)
+			*p++ = '\0';
+	}
+}
+
+int dx_parse_tag(char *string, vtag_t *tag, int remove, int *mnt_flags,
+		 unsigned long *flags)
+{
+	int set = 0;
+	substring_t args[MAX_OPT_ARGS];
+	int token;
+	char *s, *p, *opts;
+#if defined(CONFIG_PROPAGATE) || defined(CONFIG_VSERVER_DEBUG)
+	int option = 0;
+#endif
+
+	if (!string)
+		return 0;
+	s = kstrdup(string, GFP_KERNEL | GFP_ATOMIC);
+	if (!s)
+		return 0;
+
+	opts = s;
+	while ((p = strsep(&opts, ",")) != NULL) {
+		token = match_token(p, tokens, args);
+
+		switch (token) {
+#ifdef CONFIG_PROPAGATE
+		case Opt_tag:
+			if (tag)
+				*tag = 0;
+			if (remove)
+				__dx_parse_remove(s, "tag");
+			*mnt_flags |= MNT_TAGID;
+			set |= MNT_TAGID;
+			break;
+		case Opt_notag:
+			if (remove)
+				__dx_parse_remove(s, "notag");
+			*mnt_flags |= MNT_NOTAG;
+			set |= MNT_NOTAG;
+			break;
+		case Opt_tagid:
+			if (tag && !match_int(args, &option))
+				*tag = option;
+			if (remove)
+				__dx_parse_remove(s, "tagid");
+			*mnt_flags |= MNT_TAGID;
+			set |= MNT_TAGID;
+			break;
+#endif	/* CONFIG_PROPAGATE */
+		case Opt_notagcheck:
+			if (remove)
+				__dx_parse_remove(s, "notagcheck");
+			*flags |= MS_NOTAGCHECK;
+			set |= MS_NOTAGCHECK;
+			break;
+		}
+		vxdprintk(VXD_CBIT(tag, 7),
+			"dx_parse_tag(" VS_Q("%s") "): %d:#%d",
+			p, token, option);
+	}
+	if (set)
+		strcpy(string, s);
+	kfree(s);
+	return set;
+}
+
+#ifdef	CONFIG_PROPAGATE
+
+void __dx_propagate_tag(struct nameidata *nd, struct inode *inode)
+{
+	vtag_t new_tag = 0;
+	struct vfsmount *mnt;
+	int propagate;
+
+	if (!nd)
+		return;
+	mnt = nd->path.mnt;
+	if (!mnt)
+		return;
+
+	propagate = (mnt->mnt_flags & MNT_TAGID);
+	if (propagate)
+		new_tag = mnt->mnt_tag;
+
+	vxdprintk(VXD_CBIT(tag, 7),
+		"dx_propagate_tag(%p[#%lu.%d]): %d,%d",
+		inode, inode->i_ino, inode->i_tag,
+		new_tag, (propagate) ? 1 : 0);
+
+	if (propagate)
+		i_tag_write(inode, new_tag);
+}
+
+#include <linux/module.h>
+
+EXPORT_SYMBOL_GPL(__dx_propagate_tag);
+
+#endif	/* CONFIG_PROPAGATE */
+
diff -ruNp linux-3.13.11/kernel/vserver/limit.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/limit.c
--- linux-3.13.11/kernel/vserver/limit.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/limit.c	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,345 @@
+/*
+ *  linux/kernel/vserver/limit.c
+ *
+ *  Virtual Server: Context Limits
+ *
+ *  Copyright (C) 2004-2010  Herbert Pötzl
+ *
+ *  V0.01  broken out from vcontext V0.05
+ *  V0.02  changed vcmds to vxi arg
+ *  V0.03  added memory cgroup support
+ *
+ */
+
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/memcontrol.h>
+#include <linux/res_counter.h>
+#include <linux/vs_limit.h>
+#include <linux/vserver/limit.h>
+#include <linux/vserver/limit_cmd.h>
+
+#include <asm/uaccess.h>
+
+
+const char *vlimit_name[NUM_LIMITS] = {
+	[RLIMIT_CPU]		= "CPU",
+	[RLIMIT_NPROC]		= "NPROC",
+	[RLIMIT_NOFILE]		= "NOFILE",
+	[RLIMIT_LOCKS]		= "LOCKS",
+	[RLIMIT_SIGPENDING]	= "SIGP",
+	[RLIMIT_MSGQUEUE]	= "MSGQ",
+
+	[VLIMIT_NSOCK]		= "NSOCK",
+	[VLIMIT_OPENFD]		= "OPENFD",
+	[VLIMIT_SHMEM]		= "SHMEM",
+	[VLIMIT_DENTRY]		= "DENTRY",
+};
+
+EXPORT_SYMBOL_GPL(vlimit_name);
+
+#define MASK_ENTRY(x)	(1 << (x))
+
+const struct vcmd_ctx_rlimit_mask_v0 vlimit_mask = {
+		/* minimum */
+	0
+	,	/* softlimit */
+	0
+	,       /* maximum */
+	MASK_ENTRY( RLIMIT_NPROC	) |
+	MASK_ENTRY( RLIMIT_NOFILE	) |
+	MASK_ENTRY( RLIMIT_LOCKS	) |
+	MASK_ENTRY( RLIMIT_MSGQUEUE	) |
+
+	MASK_ENTRY( VLIMIT_NSOCK	) |
+	MASK_ENTRY( VLIMIT_OPENFD	) |
+	MASK_ENTRY( VLIMIT_SHMEM	) |
+	MASK_ENTRY( VLIMIT_DENTRY	) |
+	0
+};
+		/* accounting only */
+uint32_t account_mask =
+	MASK_ENTRY( VLIMIT_SEMARY	) |
+	MASK_ENTRY( VLIMIT_NSEMS	) |
+	MASK_ENTRY( VLIMIT_MAPPED	) |
+	0;
+
+
+static int is_valid_vlimit(int id)
+{
+	uint32_t mask = vlimit_mask.minimum |
+		vlimit_mask.softlimit | vlimit_mask.maximum;
+	return mask & (1 << id);
+}
+
+static int is_accounted_vlimit(int id)
+{
+	if (is_valid_vlimit(id))
+		return 1;
+	return account_mask & (1 << id);
+}
+
+
+static inline uint64_t vc_get_soft(struct vx_info *vxi, int id)
+{
+	rlim_t limit = __rlim_soft(&vxi->limit, id);
+	return VX_VLIM(limit);
+}
+
+static inline uint64_t vc_get_hard(struct vx_info *vxi, int id)
+{
+	rlim_t limit = __rlim_hard(&vxi->limit, id);
+	return VX_VLIM(limit);
+}
+
+static int do_get_rlimit(struct vx_info *vxi, uint32_t id,
+	uint64_t *minimum, uint64_t *softlimit, uint64_t *maximum)
+{
+	if (!is_valid_vlimit(id))
+		return -EINVAL;
+
+	if (minimum)
+		*minimum = CRLIM_UNSET;
+	if (softlimit)
+		*softlimit = vc_get_soft(vxi, id);
+	if (maximum)
+		*maximum = vc_get_hard(vxi, id);
+	return 0;
+}
+
+int vc_get_rlimit(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_ctx_rlimit_v0 vc_data;
+	int ret;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	ret = do_get_rlimit(vxi, vc_data.id,
+		&vc_data.minimum, &vc_data.softlimit, &vc_data.maximum);
+	if (ret)
+		return ret;
+
+	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+		return -EFAULT;
+	return 0;
+}
+
+static int do_set_rlimit(struct vx_info *vxi, uint32_t id,
+	uint64_t minimum, uint64_t softlimit, uint64_t maximum)
+{
+	if (!is_valid_vlimit(id))
+		return -EINVAL;
+
+	if (maximum != CRLIM_KEEP)
+		__rlim_hard(&vxi->limit, id) = VX_RLIM(maximum);
+	if (softlimit != CRLIM_KEEP)
+		__rlim_soft(&vxi->limit, id) = VX_RLIM(softlimit);
+
+	/* clamp soft limit */
+	if (__rlim_soft(&vxi->limit, id) > __rlim_hard(&vxi->limit, id))
+		__rlim_soft(&vxi->limit, id) = __rlim_hard(&vxi->limit, id);
+
+	return 0;
+}
+
+int vc_set_rlimit(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_ctx_rlimit_v0 vc_data;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	return do_set_rlimit(vxi, vc_data.id,
+		vc_data.minimum, vc_data.softlimit, vc_data.maximum);
+}
+
+#ifdef	CONFIG_IA32_EMULATION
+
+int vc_set_rlimit_x32(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_ctx_rlimit_v0_x32 vc_data;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	return do_set_rlimit(vxi, vc_data.id,
+		vc_data.minimum, vc_data.softlimit, vc_data.maximum);
+}
+
+int vc_get_rlimit_x32(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_ctx_rlimit_v0_x32 vc_data;
+	int ret;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	ret = do_get_rlimit(vxi, vc_data.id,
+		&vc_data.minimum, &vc_data.softlimit, &vc_data.maximum);
+	if (ret)
+		return ret;
+
+	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+		return -EFAULT;
+	return 0;
+}
+
+#endif	/* CONFIG_IA32_EMULATION */
+
+
+int vc_get_rlimit_mask(uint32_t id, void __user *data)
+{
+	if (copy_to_user(data, &vlimit_mask, sizeof(vlimit_mask)))
+		return -EFAULT;
+	return 0;
+}
+
+
+static inline void vx_reset_hits(struct _vx_limit *limit)
+{
+	int lim;
+
+	for (lim = 0; lim < NUM_LIMITS; lim++) {
+		atomic_set(&__rlim_lhit(limit, lim), 0);
+	}
+}
+
+int vc_reset_hits(struct vx_info *vxi, void __user *data)
+{
+	vx_reset_hits(&vxi->limit);
+	return 0;
+}
+
+static inline void vx_reset_minmax(struct _vx_limit *limit)
+{
+	rlim_t value;
+	int lim;
+
+	for (lim = 0; lim < NUM_LIMITS; lim++) {
+		value = __rlim_get(limit, lim);
+		__rlim_rmax(limit, lim) = value;
+		__rlim_rmin(limit, lim) = value;
+	}
+}
+
+int vc_reset_minmax(struct vx_info *vxi, void __user *data)
+{
+	vx_reset_minmax(&vxi->limit);
+	return 0;
+}
+
+
+int vc_rlimit_stat(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_rlimit_stat_v0 vc_data;
+	struct _vx_limit *limit = &vxi->limit;
+	int id;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	id = vc_data.id;
+	if (!is_accounted_vlimit(id))
+		return -EINVAL;
+
+	vx_limit_fixup(limit, id);
+	vc_data.hits = atomic_read(&__rlim_lhit(limit, id));
+	vc_data.value = __rlim_get(limit, id);
+	vc_data.minimum = __rlim_rmin(limit, id);
+	vc_data.maximum = __rlim_rmax(limit, id);
+
+	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+		return -EFAULT;
+	return 0;
+}
+
+
+void vx_vsi_meminfo(struct sysinfo *val)
+{
+#ifdef	CONFIG_MEMCG
+	struct mem_cgroup *mcg;
+	u64 res_limit, res_usage;
+
+	rcu_read_lock();
+	mcg = mem_cgroup_from_task(current);
+	rcu_read_unlock();
+	if (!mcg)
+		goto out;
+
+	res_limit = mem_cgroup_res_read_u64(mcg, RES_LIMIT);
+	res_usage = mem_cgroup_res_read_u64(mcg, RES_USAGE);
+
+	if (res_limit != RES_COUNTER_MAX)
+		val->totalram = (res_limit >> PAGE_SHIFT);
+	val->freeram = val->totalram - (res_usage >> PAGE_SHIFT);
+	val->bufferram = 0;
+	val->totalhigh = 0;
+	val->freehigh = 0;
+out:
+#endif	/* CONFIG_MEMCG */
+	return;
+}
+
+void vx_vsi_swapinfo(struct sysinfo *val)
+{
+#ifdef	CONFIG_MEMCG
+#ifdef	CONFIG_MEMCG_SWAP
+	struct mem_cgroup *mcg;
+	u64 res_limit, res_usage, memsw_limit, memsw_usage;
+	s64 swap_limit, swap_usage;
+
+	rcu_read_lock();
+	mcg = mem_cgroup_from_task(current);
+	rcu_read_unlock();
+	if (!mcg)
+		goto out;
+
+	res_limit = mem_cgroup_res_read_u64(mcg, RES_LIMIT);
+	res_usage = mem_cgroup_res_read_u64(mcg, RES_USAGE);
+	memsw_limit = mem_cgroup_memsw_read_u64(mcg, RES_LIMIT);
+	memsw_usage = mem_cgroup_memsw_read_u64(mcg, RES_USAGE);
+
+	/* memory unlimited */
+	if (res_limit == RES_COUNTER_MAX)
+		goto out;
+
+	swap_limit = memsw_limit - res_limit;
+	/* we have a swap limit? */
+	if (memsw_limit != RES_COUNTER_MAX)
+		val->totalswap = swap_limit >> PAGE_SHIFT;
+
+	/* calculate swap part */
+	swap_usage = (memsw_usage > res_usage) ?
+		memsw_usage - res_usage : 0;
+
+	/* total shown minus usage gives free swap */
+	val->freeswap = (swap_usage < swap_limit) ?
+		val->totalswap - (swap_usage >> PAGE_SHIFT) : 0;
+out:
+#else	/* !CONFIG_MEMCG_SWAP */
+	val->totalswap = 0;
+	val->freeswap = 0;
+#endif	/* !CONFIG_MEMCG_SWAP */
+#endif	/* CONFIG_MEMCG */
+	return;
+}
+
+long vx_vsi_cached(struct sysinfo *val)
+{
+	long cache = 0;
+#ifdef	CONFIG_MEMCG
+	struct mem_cgroup *mcg;
+
+	rcu_read_lock();
+	mcg = mem_cgroup_from_task(current);
+	rcu_read_unlock();
+	if (!mcg)
+		goto out;
+
+	cache = mem_cgroup_stat_read_cache(mcg);
+out:
+#endif
+	return cache;
+}
+
diff -ruNp linux-3.13.11/kernel/vserver/limit_init.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/limit_init.h
--- linux-3.13.11/kernel/vserver/limit_init.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/limit_init.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,31 @@
+
+
+static inline void vx_info_init_limit(struct _vx_limit *limit)
+{
+	int lim;
+
+	for (lim = 0; lim < NUM_LIMITS; lim++) {
+		__rlim_soft(limit, lim) = RLIM_INFINITY;
+		__rlim_hard(limit, lim) = RLIM_INFINITY;
+		__rlim_set(limit, lim, 0);
+		atomic_set(&__rlim_lhit(limit, lim), 0);
+		__rlim_rmin(limit, lim) = 0;
+		__rlim_rmax(limit, lim) = 0;
+	}
+}
+
+static inline void vx_info_exit_limit(struct _vx_limit *limit)
+{
+	rlim_t value;
+	int lim;
+
+	for (lim = 0; lim < NUM_LIMITS; lim++) {
+		if ((1 << lim) & VLIM_NOCHECK)
+			continue;
+		value = __rlim_get(limit, lim);
+		vxwprintk_xid(value,
+			"!!! limit: %p[%s,%d] = %ld on exit.",
+			limit, vlimit_name[lim], lim, (long)value);
+	}
+}
+
diff -ruNp linux-3.13.11/kernel/vserver/limit_proc.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/limit_proc.h
--- linux-3.13.11/kernel/vserver/limit_proc.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/limit_proc.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,57 @@
+#ifndef _VX_LIMIT_PROC_H
+#define _VX_LIMIT_PROC_H
+
+#include <linux/vserver/limit_int.h>
+
+
+#define VX_LIMIT_FMT	":\t%8ld\t%8ld/%8ld\t%8lld/%8lld\t%6d\n"
+#define VX_LIMIT_TOP	\
+	"Limit\t current\t     min/max\t\t    soft/hard\t\thits\n"
+
+#define VX_LIMIT_ARG(r)				\
+	(unsigned long)__rlim_get(limit, r),	\
+	(unsigned long)__rlim_rmin(limit, r),	\
+	(unsigned long)__rlim_rmax(limit, r),	\
+	VX_VLIM(__rlim_soft(limit, r)),		\
+	VX_VLIM(__rlim_hard(limit, r)),		\
+	atomic_read(&__rlim_lhit(limit, r))
+
+static inline int vx_info_proc_limit(struct _vx_limit *limit, char *buffer)
+{
+	vx_limit_fixup(limit, -1);
+	return sprintf(buffer, VX_LIMIT_TOP
+		"PROC"	VX_LIMIT_FMT
+		"VM"	VX_LIMIT_FMT
+		"VML"	VX_LIMIT_FMT
+		"RSS"	VX_LIMIT_FMT
+		"ANON"	VX_LIMIT_FMT
+		"RMAP"	VX_LIMIT_FMT
+		"FILES" VX_LIMIT_FMT
+		"OFD"	VX_LIMIT_FMT
+		"LOCKS" VX_LIMIT_FMT
+		"SOCK"	VX_LIMIT_FMT
+		"MSGQ"	VX_LIMIT_FMT
+		"SHM"	VX_LIMIT_FMT
+		"SEMA"	VX_LIMIT_FMT
+		"SEMS"	VX_LIMIT_FMT
+		"DENT"	VX_LIMIT_FMT,
+		VX_LIMIT_ARG(RLIMIT_NPROC),
+		VX_LIMIT_ARG(RLIMIT_AS),
+		VX_LIMIT_ARG(RLIMIT_MEMLOCK),
+		VX_LIMIT_ARG(RLIMIT_RSS),
+		VX_LIMIT_ARG(VLIMIT_ANON),
+		VX_LIMIT_ARG(VLIMIT_MAPPED),
+		VX_LIMIT_ARG(RLIMIT_NOFILE),
+		VX_LIMIT_ARG(VLIMIT_OPENFD),
+		VX_LIMIT_ARG(RLIMIT_LOCKS),
+		VX_LIMIT_ARG(VLIMIT_NSOCK),
+		VX_LIMIT_ARG(RLIMIT_MSGQUEUE),
+		VX_LIMIT_ARG(VLIMIT_SHMEM),
+		VX_LIMIT_ARG(VLIMIT_SEMARY),
+		VX_LIMIT_ARG(VLIMIT_NSEMS),
+		VX_LIMIT_ARG(VLIMIT_DENTRY));
+}
+
+#endif	/* _VX_LIMIT_PROC_H */
+
+
diff -ruNp linux-3.13.11/kernel/vserver/network.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/network.c
--- linux-3.13.11/kernel/vserver/network.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/network.c	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,1053 @@
+/*
+ *  linux/kernel/vserver/network.c
+ *
+ *  Virtual Server: Network Support
+ *
+ *  Copyright (C) 2003-2007  Herbert Pötzl
+ *
+ *  V0.01  broken out from vcontext V0.05
+ *  V0.02  cleaned up implementation
+ *  V0.03  added equiv nx commands
+ *  V0.04  switch to RCU based hash
+ *  V0.05  and back to locking again
+ *  V0.06  changed vcmds to nxi arg
+ *  V0.07  have __create claim() the nxi
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/rcupdate.h>
+#include <net/ipv6.h>
+
+#include <linux/vs_network.h>
+#include <linux/vs_pid.h>
+#include <linux/vserver/network_cmd.h>
+
+
+atomic_t nx_global_ctotal	= ATOMIC_INIT(0);
+atomic_t nx_global_cactive	= ATOMIC_INIT(0);
+
+static struct kmem_cache *nx_addr_v4_cachep = NULL;
+static struct kmem_cache *nx_addr_v6_cachep = NULL;
+
+
+static int __init init_network(void)
+{
+	nx_addr_v4_cachep = kmem_cache_create("nx_v4_addr_cache",
+		sizeof(struct nx_addr_v4), 0,
+		SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+	nx_addr_v6_cachep = kmem_cache_create("nx_v6_addr_cache",
+		sizeof(struct nx_addr_v6), 0,
+		SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+	return 0;
+}
+
+
+/*	__alloc_nx_addr_v4()					*/
+
+static inline struct nx_addr_v4 *__alloc_nx_addr_v4(void)
+{
+	struct nx_addr_v4 *nxa = kmem_cache_alloc(
+		nx_addr_v4_cachep, GFP_KERNEL);
+
+	if (!IS_ERR(nxa))
+		memset(nxa, 0, sizeof(*nxa));
+	return nxa;
+}
+
+/*	__dealloc_nx_addr_v4()					*/
+
+static inline void __dealloc_nx_addr_v4(struct nx_addr_v4 *nxa)
+{
+	kmem_cache_free(nx_addr_v4_cachep, nxa);
+}
+
+/*	__dealloc_nx_addr_v4_all()				*/
+
+static inline void __dealloc_nx_addr_v4_all(struct nx_addr_v4 *nxa)
+{
+	while (nxa) {
+		struct nx_addr_v4 *next = nxa->next;
+
+		__dealloc_nx_addr_v4(nxa);
+		nxa = next;
+	}
+}
+
+
+#ifdef CONFIG_IPV6
+
+/*	__alloc_nx_addr_v6()					*/
+
+static inline struct nx_addr_v6 *__alloc_nx_addr_v6(void)
+{
+	struct nx_addr_v6 *nxa = kmem_cache_alloc(
+		nx_addr_v6_cachep, GFP_KERNEL);
+
+	if (!IS_ERR(nxa))
+		memset(nxa, 0, sizeof(*nxa));
+	return nxa;
+}
+
+/*	__dealloc_nx_addr_v6()					*/
+
+static inline void __dealloc_nx_addr_v6(struct nx_addr_v6 *nxa)
+{
+	kmem_cache_free(nx_addr_v6_cachep, nxa);
+}
+
+/*	__dealloc_nx_addr_v6_all()				*/
+
+static inline void __dealloc_nx_addr_v6_all(struct nx_addr_v6 *nxa)
+{
+	while (nxa) {
+		struct nx_addr_v6 *next = nxa->next;
+
+		__dealloc_nx_addr_v6(nxa);
+		nxa = next;
+	}
+}
+
+#endif	/* CONFIG_IPV6 */
+
+/*	__alloc_nx_info()
+
+	* allocate an initialized nx_info struct
+	* doesn't make it visible (hash)			*/
+
+static struct nx_info *__alloc_nx_info(vnid_t nid)
+{
+	struct nx_info *new = NULL;
+
+	vxdprintk(VXD_CBIT(nid, 1), "alloc_nx_info(%d)*", nid);
+
+	/* would this benefit from a slab cache? */
+	new = kmalloc(sizeof(struct nx_info), GFP_KERNEL);
+	if (!new)
+		return 0;
+
+	memset(new, 0, sizeof(struct nx_info));
+	new->nx_id = nid;
+	INIT_HLIST_NODE(&new->nx_hlist);
+	atomic_set(&new->nx_usecnt, 0);
+	atomic_set(&new->nx_tasks, 0);
+	spin_lock_init(&new->addr_lock);
+	new->nx_state = 0;
+
+	new->nx_flags = NXF_INIT_SET;
+
+	/* rest of init goes here */
+
+	new->v4_lback.s_addr = htonl(INADDR_LOOPBACK);
+	new->v4_bcast.s_addr = htonl(INADDR_BROADCAST);
+
+	vxdprintk(VXD_CBIT(nid, 0),
+		"alloc_nx_info(%d) = %p", nid, new);
+	atomic_inc(&nx_global_ctotal);
+	return new;
+}
+
+/*	__dealloc_nx_info()
+
+	* final disposal of nx_info				*/
+
+static void __dealloc_nx_info(struct nx_info *nxi)
+{
+	vxdprintk(VXD_CBIT(nid, 0),
+		"dealloc_nx_info(%p)", nxi);
+
+	nxi->nx_hlist.next = LIST_POISON1;
+	nxi->nx_id = -1;
+
+	BUG_ON(atomic_read(&nxi->nx_usecnt));
+	BUG_ON(atomic_read(&nxi->nx_tasks));
+
+	__dealloc_nx_addr_v4_all(nxi->v4.next);
+#ifdef CONFIG_IPV6
+	__dealloc_nx_addr_v6_all(nxi->v6.next);
+#endif
+
+	nxi->nx_state |= NXS_RELEASED;
+	kfree(nxi);
+	atomic_dec(&nx_global_ctotal);
+}
+
+static void __shutdown_nx_info(struct nx_info *nxi)
+{
+	nxi->nx_state |= NXS_SHUTDOWN;
+	vs_net_change(nxi, VSC_NETDOWN);
+}
+
+/*	exported stuff						*/
+
+void free_nx_info(struct nx_info *nxi)
+{
+	/* context shutdown is mandatory */
+	BUG_ON(nxi->nx_state != NXS_SHUTDOWN);
+
+	/* context must not be hashed */
+	BUG_ON(nxi->nx_state & NXS_HASHED);
+
+	BUG_ON(atomic_read(&nxi->nx_usecnt));
+	BUG_ON(atomic_read(&nxi->nx_tasks));
+
+	__dealloc_nx_info(nxi);
+}
+
+
+void __nx_set_lback(struct nx_info *nxi)
+{
+	int nid = nxi->nx_id;
+	__be32 lback = htonl(INADDR_LOOPBACK ^ ((nid & 0xFFFF) << 8));
+
+	nxi->v4_lback.s_addr = lback;
+}
+
+extern int __nx_inet_add_lback(__be32 addr);
+extern int __nx_inet_del_lback(__be32 addr);
+
+
+/*	hash table for nx_info hash */
+
+#define NX_HASH_SIZE	13
+
+struct hlist_head nx_info_hash[NX_HASH_SIZE];
+
+static DEFINE_SPINLOCK(nx_info_hash_lock);
+
+
+static inline unsigned int __hashval(vnid_t nid)
+{
+	return (nid % NX_HASH_SIZE);
+}
+
+
+
+/*	__hash_nx_info()
+
+	* add the nxi to the global hash table
+	* requires the hash_lock to be held			*/
+
+static inline void __hash_nx_info(struct nx_info *nxi)
+{
+	struct hlist_head *head;
+
+	vxd_assert_lock(&nx_info_hash_lock);
+	vxdprintk(VXD_CBIT(nid, 4),
+		"__hash_nx_info: %p[#%d]", nxi, nxi->nx_id);
+
+	/* context must not be hashed */
+	BUG_ON(nx_info_state(nxi, NXS_HASHED));
+
+	nxi->nx_state |= NXS_HASHED;
+	head = &nx_info_hash[__hashval(nxi->nx_id)];
+	hlist_add_head(&nxi->nx_hlist, head);
+	atomic_inc(&nx_global_cactive);
+}
+
+/*	__unhash_nx_info()
+
+	* remove the nxi from the global hash table
+	* requires the hash_lock to be held			*/
+
+static inline void __unhash_nx_info(struct nx_info *nxi)
+{
+	vxd_assert_lock(&nx_info_hash_lock);
+	vxdprintk(VXD_CBIT(nid, 4),
+		"__unhash_nx_info: %p[#%d.%d.%d]", nxi, nxi->nx_id,
+		atomic_read(&nxi->nx_usecnt), atomic_read(&nxi->nx_tasks));
+
+	/* context must be hashed */
+	BUG_ON(!nx_info_state(nxi, NXS_HASHED));
+	/* but without tasks */
+	BUG_ON(atomic_read(&nxi->nx_tasks));
+
+	nxi->nx_state &= ~NXS_HASHED;
+	hlist_del(&nxi->nx_hlist);
+	atomic_dec(&nx_global_cactive);
+}
+
+
+/*	__lookup_nx_info()
+
+	* requires the hash_lock to be held
+	* doesn't increment the nx_refcnt			*/
+
+static inline struct nx_info *__lookup_nx_info(vnid_t nid)
+{
+	struct hlist_head *head = &nx_info_hash[__hashval(nid)];
+	struct hlist_node *pos;
+	struct nx_info *nxi;
+
+	vxd_assert_lock(&nx_info_hash_lock);
+	hlist_for_each(pos, head) {
+		nxi = hlist_entry(pos, struct nx_info, nx_hlist);
+
+		if (nxi->nx_id == nid)
+			goto found;
+	}
+	nxi = NULL;
+found:
+	vxdprintk(VXD_CBIT(nid, 0),
+		"__lookup_nx_info(#%u): %p[#%u]",
+		nid, nxi, nxi ? nxi->nx_id : 0);
+	return nxi;
+}
+
+
+/*	__create_nx_info()
+
+	* create the requested context
+	* get(), claim() and hash it				*/
+
+static struct nx_info *__create_nx_info(int id)
+{
+	struct nx_info *new, *nxi = NULL;
+
+	vxdprintk(VXD_CBIT(nid, 1), "create_nx_info(%d)*", id);
+
+	if (!(new = __alloc_nx_info(id)))
+		return ERR_PTR(-ENOMEM);
+
+	/* required to make dynamic xids unique */
+	spin_lock(&nx_info_hash_lock);
+
+	/* static context requested */
+	if ((nxi = __lookup_nx_info(id))) {
+		vxdprintk(VXD_CBIT(nid, 0),
+			"create_nx_info(%d) = %p (already there)", id, nxi);
+		if (nx_info_flags(nxi, NXF_STATE_SETUP, 0))
+			nxi = ERR_PTR(-EBUSY);
+		else
+			nxi = ERR_PTR(-EEXIST);
+		goto out_unlock;
+	}
+	/* new context */
+	vxdprintk(VXD_CBIT(nid, 0),
+		"create_nx_info(%d) = %p (new)", id, new);
+	claim_nx_info(new, NULL);
+	__nx_set_lback(new);
+	__hash_nx_info(get_nx_info(new));
+	nxi = new, new = NULL;
+
+out_unlock:
+	spin_unlock(&nx_info_hash_lock);
+	if (new)
+		__dealloc_nx_info(new);
+	return nxi;
+}
+
+
+
+/*	exported stuff						*/
+
+
+void unhash_nx_info(struct nx_info *nxi)
+{
+	__shutdown_nx_info(nxi);
+	spin_lock(&nx_info_hash_lock);
+	__unhash_nx_info(nxi);
+	spin_unlock(&nx_info_hash_lock);
+}
+
+/*	lookup_nx_info()
+
+	* search for a nx_info and get() it
+	* negative id means current				*/
+
+struct nx_info *lookup_nx_info(int id)
+{
+	struct nx_info *nxi = NULL;
+
+	if (id < 0) {
+		nxi = get_nx_info(current_nx_info());
+	} else if (id > 1) {
+		spin_lock(&nx_info_hash_lock);
+		nxi = get_nx_info(__lookup_nx_info(id));
+		spin_unlock(&nx_info_hash_lock);
+	}
+	return nxi;
+}
+
+/*	nid_is_hashed()
+
+	* verify that nid is still hashed			*/
+
+int nid_is_hashed(vnid_t nid)
+{
+	int hashed;
+
+	spin_lock(&nx_info_hash_lock);
+	hashed = (__lookup_nx_info(nid) != NULL);
+	spin_unlock(&nx_info_hash_lock);
+	return hashed;
+}
+
+
+#ifdef	CONFIG_PROC_FS
+
+/*	get_nid_list()
+
+	* get a subset of hashed nids for proc
+	* assumes size is at least one				*/
+
+int get_nid_list(int index, unsigned int *nids, int size)
+{
+	int hindex, nr_nids = 0;
+
+	/* only show current and children */
+	if (!nx_check(0, VS_ADMIN | VS_WATCH)) {
+		if (index > 0)
+			return 0;
+		nids[nr_nids] = nx_current_nid();
+		return 1;
+	}
+
+	for (hindex = 0; hindex < NX_HASH_SIZE; hindex++) {
+		struct hlist_head *head = &nx_info_hash[hindex];
+		struct hlist_node *pos;
+
+		spin_lock(&nx_info_hash_lock);
+		hlist_for_each(pos, head) {
+			struct nx_info *nxi;
+
+			if (--index > 0)
+				continue;
+
+			nxi = hlist_entry(pos, struct nx_info, nx_hlist);
+			nids[nr_nids] = nxi->nx_id;
+			if (++nr_nids >= size) {
+				spin_unlock(&nx_info_hash_lock);
+				goto out;
+			}
+		}
+		/* keep the lock time short */
+		spin_unlock(&nx_info_hash_lock);
+	}
+out:
+	return nr_nids;
+}
+#endif
+
+
+/*
+ *	migrate task to new network
+ *	gets nxi, puts old_nxi on change
+ */
+
+int nx_migrate_task(struct task_struct *p, struct nx_info *nxi)
+{
+	struct nx_info *old_nxi;
+	int ret = 0;
+
+	if (!p || !nxi)
+		BUG();
+
+	vxdprintk(VXD_CBIT(nid, 5),
+		"nx_migrate_task(%p,%p[#%d.%d.%d])",
+		p, nxi, nxi->nx_id,
+		atomic_read(&nxi->nx_usecnt),
+		atomic_read(&nxi->nx_tasks));
+
+	if (nx_info_flags(nxi, NXF_INFO_PRIVATE, 0) &&
+		!nx_info_flags(nxi, NXF_STATE_SETUP, 0))
+		return -EACCES;
+
+	if (nx_info_state(nxi, NXS_SHUTDOWN))
+		return -EFAULT;
+
+	/* maybe disallow this completely? */
+	old_nxi = task_get_nx_info(p);
+	if (old_nxi == nxi)
+		goto out;
+
+	task_lock(p);
+	if (old_nxi)
+		clr_nx_info(&p->nx_info);
+	claim_nx_info(nxi, p);
+	set_nx_info(&p->nx_info, nxi);
+	p->nid = nxi->nx_id;
+	task_unlock(p);
+
+	vxdprintk(VXD_CBIT(nid, 5),
+		"moved task %p into nxi:%p[#%d]",
+		p, nxi, nxi->nx_id);
+
+	if (old_nxi)
+		release_nx_info(old_nxi, p);
+	ret = 0;
+out:
+	put_nx_info(old_nxi);
+	return ret;
+}
+
+
+void nx_set_persistent(struct nx_info *nxi)
+{
+	vxdprintk(VXD_CBIT(nid, 6),
+		"nx_set_persistent(%p[#%d])", nxi, nxi->nx_id);
+
+	get_nx_info(nxi);
+	claim_nx_info(nxi, NULL);
+}
+
+void nx_clear_persistent(struct nx_info *nxi)
+{
+	vxdprintk(VXD_CBIT(nid, 6),
+		"nx_clear_persistent(%p[#%d])", nxi, nxi->nx_id);
+
+	release_nx_info(nxi, NULL);
+	put_nx_info(nxi);
+}
+
+void nx_update_persistent(struct nx_info *nxi)
+{
+	if (nx_info_flags(nxi, NXF_PERSISTENT, 0))
+		nx_set_persistent(nxi);
+	else
+		nx_clear_persistent(nxi);
+}
+
+/* vserver syscall commands below here */
+
+/* taks nid and nx_info functions */
+
+#include <asm/uaccess.h>
+
+
+int vc_task_nid(uint32_t id)
+{
+	vnid_t nid;
+
+	if (id) {
+		struct task_struct *tsk;
+
+		rcu_read_lock();
+		tsk = find_task_by_real_pid(id);
+		nid = (tsk) ? tsk->nid : -ESRCH;
+		rcu_read_unlock();
+	} else
+		nid = nx_current_nid();
+	return nid;
+}
+
+
+int vc_nx_info(struct nx_info *nxi, void __user *data)
+{
+	struct vcmd_nx_info_v0 vc_data;
+
+	vc_data.nid = nxi->nx_id;
+
+	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+		return -EFAULT;
+	return 0;
+}
+
+
+/* network functions */
+
+int vc_net_create(uint32_t nid, void __user *data)
+{
+	struct vcmd_net_create vc_data = { .flagword = NXF_INIT_SET };
+	struct nx_info *new_nxi;
+	int ret;
+
+	if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	if ((nid > MAX_S_CONTEXT) || (nid < 2))
+		return -EINVAL;
+
+	new_nxi = __create_nx_info(nid);
+	if (IS_ERR(new_nxi))
+		return PTR_ERR(new_nxi);
+
+	/* initial flags */
+	new_nxi->nx_flags = vc_data.flagword;
+
+	ret = -ENOEXEC;
+	if (vs_net_change(new_nxi, VSC_NETUP))
+		goto out;
+
+	ret = nx_migrate_task(current, new_nxi);
+	if (ret)
+		goto out;
+
+	/* return context id on success */
+	ret = new_nxi->nx_id;
+
+	/* get a reference for persistent contexts */
+	if ((vc_data.flagword & NXF_PERSISTENT))
+		nx_set_persistent(new_nxi);
+out:
+	release_nx_info(new_nxi, NULL);
+	put_nx_info(new_nxi);
+	return ret;
+}
+
+
+int vc_net_migrate(struct nx_info *nxi, void __user *data)
+{
+	return nx_migrate_task(current, nxi);
+}
+
+
+static inline
+struct nx_addr_v4 *__find_v4_addr(struct nx_info *nxi,
+	__be32 ip, __be32 ip2, __be32 mask, uint16_t type, uint16_t flags,
+	struct nx_addr_v4 **prev)
+{
+	struct nx_addr_v4 *nxa = &nxi->v4;
+
+	for (; nxa; nxa = nxa->next) {
+		if ((nxa->ip[0].s_addr == ip) &&
+		    (nxa->ip[1].s_addr == ip2) &&
+		    (nxa->mask.s_addr == mask) &&
+		    (nxa->type == type) &&
+		    (nxa->flags == flags))
+		    return nxa;
+
+		/* save previous entry */
+		if (prev)
+			*prev = nxa;
+	}
+	return NULL;
+}
+
+int do_add_v4_addr(struct nx_info *nxi, __be32 ip, __be32 ip2, __be32 mask,
+	uint16_t type, uint16_t flags)
+{
+	struct nx_addr_v4 *nxa = NULL;
+	struct nx_addr_v4 *new = __alloc_nx_addr_v4();
+	unsigned long irqflags;
+	int ret = -EEXIST;
+
+	if (IS_ERR(new))
+		return PTR_ERR(new);
+
+	spin_lock_irqsave(&nxi->addr_lock, irqflags);
+	if (__find_v4_addr(nxi, ip, ip2, mask, type, flags, &nxa))
+		goto out_unlock;
+
+	if (NX_IPV4(nxi)) {
+		nxa->next = new;
+		nxa = new;
+		new = NULL;
+
+		/* remove single ip for ip list */
+		nxi->nx_flags &= ~NXF_SINGLE_IP;
+	}
+
+	nxa->ip[0].s_addr = ip;
+	nxa->ip[1].s_addr = ip2;
+	nxa->mask.s_addr = mask;
+	nxa->type = type;
+	nxa->flags = flags;
+	ret = 0;
+out_unlock:
+	spin_unlock_irqrestore(&nxi->addr_lock, irqflags);
+	if (new)
+		__dealloc_nx_addr_v4(new);
+	return ret;
+}
+
+int do_remove_v4_addr(struct nx_info *nxi, __be32 ip, __be32 ip2, __be32 mask,
+	uint16_t type, uint16_t flags)
+{
+	struct nx_addr_v4 *nxa = NULL;
+	struct nx_addr_v4 *old = NULL;
+	unsigned long irqflags;
+	int ret = 0;
+
+	spin_lock_irqsave(&nxi->addr_lock, irqflags);
+	switch (type) {
+	case NXA_TYPE_ADDR:
+		old = __find_v4_addr(nxi, ip, ip2, mask, type, flags, &nxa);
+		if (old) {
+			if (nxa) {
+				nxa->next = old->next;
+				old->next = NULL;
+			} else {
+				if (old->next) {
+					nxa = old;
+					old = old->next;
+					*nxa = *old;
+					old->next = NULL;
+				} else {
+					memset(old, 0, sizeof(*old));
+					old = NULL;
+				}
+			}
+		} else
+			ret = -ESRCH;
+		break;
+
+	case NXA_TYPE_ANY:
+		nxa = &nxi->v4;
+		old = nxa->next;
+		memset(nxa, 0, sizeof(*nxa));
+		break;
+
+	default:
+		ret = -EINVAL;
+	}
+	spin_unlock_irqrestore(&nxi->addr_lock, irqflags);
+	__dealloc_nx_addr_v4_all(old);
+	return ret;
+}
+
+
+int vc_net_add(struct nx_info *nxi, void __user *data)
+{
+	struct vcmd_net_addr_v0 vc_data;
+	int index, ret = 0;
+
+	if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	switch (vc_data.type) {
+	case NXA_TYPE_IPV4:
+		if ((vc_data.count < 1) || (vc_data.count > 4))
+			return -EINVAL;
+
+		index = 0;
+		while (index < vc_data.count) {
+			ret = do_add_v4_addr(nxi, vc_data.ip[index].s_addr, 0,
+				vc_data.mask[index].s_addr, NXA_TYPE_ADDR, 0);
+			if (ret)
+				return ret;
+			index++;
+		}
+		ret = index;
+		break;
+
+	case NXA_TYPE_IPV4|NXA_MOD_BCAST:
+		nxi->v4_bcast = vc_data.ip[0];
+		ret = 1;
+		break;
+
+	case NXA_TYPE_IPV4|NXA_MOD_LBACK:
+		nxi->v4_lback = vc_data.ip[0];
+		ret = 1;
+		break;
+
+	default:
+		ret = -EINVAL;
+		break;
+	}
+	return ret;
+}
+
+int vc_net_remove(struct nx_info *nxi, void __user *data)
+{
+	struct vcmd_net_addr_v0 vc_data;
+
+	if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	switch (vc_data.type) {
+	case NXA_TYPE_ANY:
+		return do_remove_v4_addr(nxi, 0, 0, 0, vc_data.type, 0);
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+
+int vc_net_add_ipv4_v1(struct nx_info *nxi, void __user *data)
+{
+	struct vcmd_net_addr_ipv4_v1 vc_data;
+
+	if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	switch (vc_data.type) {
+	case NXA_TYPE_ADDR:
+	case NXA_TYPE_MASK:
+		return do_add_v4_addr(nxi, vc_data.ip.s_addr, 0,
+			vc_data.mask.s_addr, vc_data.type, vc_data.flags);
+
+	case NXA_TYPE_ADDR | NXA_MOD_BCAST:
+		nxi->v4_bcast = vc_data.ip;
+		break;
+
+	case NXA_TYPE_ADDR | NXA_MOD_LBACK:
+		nxi->v4_lback = vc_data.ip;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int vc_net_add_ipv4(struct nx_info *nxi, void __user *data)
+{
+	struct vcmd_net_addr_ipv4_v2 vc_data;
+
+	if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	switch (vc_data.type) {
+	case NXA_TYPE_ADDR:
+	case NXA_TYPE_MASK:
+	case NXA_TYPE_RANGE:
+		return do_add_v4_addr(nxi, vc_data.ip.s_addr, vc_data.ip2.s_addr,
+			vc_data.mask.s_addr, vc_data.type, vc_data.flags);
+
+	case NXA_TYPE_ADDR | NXA_MOD_BCAST:
+		nxi->v4_bcast = vc_data.ip;
+		break;
+
+	case NXA_TYPE_ADDR | NXA_MOD_LBACK:
+		nxi->v4_lback = vc_data.ip;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int vc_net_rem_ipv4_v1(struct nx_info *nxi, void __user *data)
+{
+	struct vcmd_net_addr_ipv4_v1 vc_data;
+
+	if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	return do_remove_v4_addr(nxi, vc_data.ip.s_addr, 0,
+		vc_data.mask.s_addr, vc_data.type, vc_data.flags);
+}
+
+int vc_net_rem_ipv4(struct nx_info *nxi, void __user *data)
+{
+	struct vcmd_net_addr_ipv4_v2 vc_data;
+
+	if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	return do_remove_v4_addr(nxi, vc_data.ip.s_addr, vc_data.ip2.s_addr,
+		vc_data.mask.s_addr, vc_data.type, vc_data.flags);
+}
+
+#ifdef CONFIG_IPV6
+
+static inline
+struct nx_addr_v6 *__find_v6_addr(struct nx_info *nxi,
+	struct in6_addr *ip, struct in6_addr *mask,
+	uint32_t prefix, uint16_t type, uint16_t flags,
+	struct nx_addr_v6 **prev)
+{
+	struct nx_addr_v6 *nxa = &nxi->v6;
+
+	for (; nxa; nxa = nxa->next) {
+		if (ipv6_addr_equal(&nxa->ip, ip) &&
+		    ipv6_addr_equal(&nxa->mask, mask) &&
+		    (nxa->prefix == prefix) &&
+		    (nxa->type == type) &&
+		    (nxa->flags == flags))
+		    return nxa;
+
+		/* save previous entry */
+		if (prev)
+			*prev = nxa;
+	}
+	return NULL;
+}
+
+
+int do_add_v6_addr(struct nx_info *nxi,
+	struct in6_addr *ip, struct in6_addr *mask,
+	uint32_t prefix, uint16_t type, uint16_t flags)
+{
+	struct nx_addr_v6 *nxa = NULL;
+	struct nx_addr_v6 *new = __alloc_nx_addr_v6();
+	unsigned long irqflags;
+	int ret = -EEXIST;
+
+	if (IS_ERR(new))
+		return PTR_ERR(new);
+
+	spin_lock_irqsave(&nxi->addr_lock, irqflags);
+	if (__find_v6_addr(nxi, ip, mask, prefix, type, flags, &nxa))
+		goto out_unlock;
+
+	if (NX_IPV6(nxi)) {
+		nxa->next = new;
+		nxa = new;
+		new = NULL;
+	}
+
+	nxa->ip = *ip;
+	nxa->mask = *mask;
+	nxa->prefix = prefix;
+	nxa->type = type;
+	nxa->flags = flags;
+	ret = 0;
+out_unlock:
+	spin_unlock_irqrestore(&nxi->addr_lock, irqflags);
+	if (new)
+		__dealloc_nx_addr_v6(new);
+	return ret;
+}
+
+int do_remove_v6_addr(struct nx_info *nxi,
+	struct in6_addr *ip, struct in6_addr *mask,
+	uint32_t prefix, uint16_t type, uint16_t flags)
+{
+	struct nx_addr_v6 *nxa = NULL;
+	struct nx_addr_v6 *old = NULL;
+	unsigned long irqflags;
+	int ret = 0;
+
+	spin_lock_irqsave(&nxi->addr_lock, irqflags);
+	switch (type) {
+	case NXA_TYPE_ADDR:
+		old = __find_v6_addr(nxi, ip, mask, prefix, type, flags, &nxa);
+		if (old) {
+			if (nxa) {
+				nxa->next = old->next;
+				old->next = NULL;
+			} else {
+				if (old->next) {
+					nxa = old;
+					old = old->next;
+					*nxa = *old;
+					old->next = NULL;
+				} else {
+					memset(old, 0, sizeof(*old));
+					old = NULL;
+				}
+			}
+		} else
+			ret = -ESRCH;
+		break;
+
+	case NXA_TYPE_ANY:
+		nxa = &nxi->v6;
+		old = nxa->next;
+		memset(nxa, 0, sizeof(*nxa));
+		break;
+
+	default:
+		ret = -EINVAL;
+	}
+	spin_unlock_irqrestore(&nxi->addr_lock, irqflags);
+	__dealloc_nx_addr_v6_all(old);
+	return ret;
+}
+
+int vc_net_add_ipv6(struct nx_info *nxi, void __user *data)
+{
+	struct vcmd_net_addr_ipv6_v1 vc_data;
+
+	if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	switch (vc_data.type) {
+	case NXA_TYPE_ADDR:
+		memset(&vc_data.mask, ~0, sizeof(vc_data.mask));
+		/* fallthrough */
+	case NXA_TYPE_MASK:
+		return do_add_v6_addr(nxi, &vc_data.ip, &vc_data.mask,
+			vc_data.prefix, vc_data.type, vc_data.flags);
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int vc_net_remove_ipv6(struct nx_info *nxi, void __user *data)
+{
+	struct vcmd_net_addr_ipv6_v1 vc_data;
+
+	if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	switch (vc_data.type) {
+	case NXA_TYPE_ADDR:
+		memset(&vc_data.mask, ~0, sizeof(vc_data.mask));
+		/* fallthrough */
+	case NXA_TYPE_MASK:
+		return do_remove_v6_addr(nxi, &vc_data.ip, &vc_data.mask,
+			vc_data.prefix, vc_data.type, vc_data.flags);
+	case NXA_TYPE_ANY:
+		return do_remove_v6_addr(nxi, NULL, NULL, 0, vc_data.type, 0);
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+#endif	/* CONFIG_IPV6 */
+
+
+int vc_get_nflags(struct nx_info *nxi, void __user *data)
+{
+	struct vcmd_net_flags_v0 vc_data;
+
+	vc_data.flagword = nxi->nx_flags;
+
+	/* special STATE flag handling */
+	vc_data.mask = vs_mask_flags(~0ULL, nxi->nx_flags, NXF_ONE_TIME);
+
+	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+		return -EFAULT;
+	return 0;
+}
+
+int vc_set_nflags(struct nx_info *nxi, void __user *data)
+{
+	struct vcmd_net_flags_v0 vc_data;
+	uint64_t mask, trigger;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	/* special STATE flag handling */
+	mask = vs_mask_mask(vc_data.mask, nxi->nx_flags, NXF_ONE_TIME);
+	trigger = (mask & nxi->nx_flags) ^ (mask & vc_data.flagword);
+
+	nxi->nx_flags = vs_mask_flags(nxi->nx_flags,
+		vc_data.flagword, mask);
+	if (trigger & NXF_PERSISTENT)
+		nx_update_persistent(nxi);
+
+	return 0;
+}
+
+int vc_get_ncaps(struct nx_info *nxi, void __user *data)
+{
+	struct vcmd_net_caps_v0 vc_data;
+
+	vc_data.ncaps = nxi->nx_ncaps;
+	vc_data.cmask = ~0ULL;
+
+	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+		return -EFAULT;
+	return 0;
+}
+
+int vc_set_ncaps(struct nx_info *nxi, void __user *data)
+{
+	struct vcmd_net_caps_v0 vc_data;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	nxi->nx_ncaps = vs_mask_flags(nxi->nx_ncaps,
+		vc_data.ncaps, vc_data.cmask);
+	return 0;
+}
+
+
+#include <linux/module.h>
+
+module_init(init_network);
+
+EXPORT_SYMBOL_GPL(free_nx_info);
+EXPORT_SYMBOL_GPL(unhash_nx_info);
+
diff -ruNp linux-3.13.11/kernel/vserver/proc.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/proc.c
--- linux-3.13.11/kernel/vserver/proc.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/proc.c	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,1097 @@
+/*
+ *  linux/kernel/vserver/proc.c
+ *
+ *  Virtual Context Support
+ *
+ *  Copyright (C) 2003-2011  Herbert Pötzl
+ *
+ *  V0.01  basic structure
+ *  V0.02  adaptation vs1.3.0
+ *  V0.03  proc permissions
+ *  V0.04  locking/generic
+ *  V0.05  next generation procfs
+ *  V0.06  inode validation
+ *  V0.07  generic rewrite vid
+ *  V0.08  remove inode type
+ *  V0.09  added u/wmask info
+ *
+ */
+
+#include <linux/proc_fs.h>
+#include <linux/fs_struct.h>
+#include <linux/mount.h>
+#include <linux/namei.h>
+#include <asm/unistd.h>
+
+#include <linux/vs_context.h>
+#include <linux/vs_network.h>
+#include <linux/vs_cvirt.h>
+
+#include <linux/in.h>
+#include <linux/inetdevice.h>
+#include <linux/vs_inet.h>
+#include <linux/vs_inet6.h>
+
+#include <linux/vserver/global.h>
+
+#include "cvirt_proc.h"
+#include "cacct_proc.h"
+#include "limit_proc.h"
+#include "sched_proc.h"
+#include "vci_config.h"
+
+#include <../../fs/proc/internal.h>
+
+
+static inline char *print_cap_t(char *buffer, kernel_cap_t *c)
+{
+	unsigned __capi;
+
+	CAP_FOR_EACH_U32(__capi) {
+		buffer += sprintf(buffer, "%08x",
+			c->cap[(_KERNEL_CAPABILITY_U32S-1) - __capi]);
+	}
+	return buffer;
+}
+
+
+static struct proc_dir_entry *proc_virtual;
+
+static struct proc_dir_entry *proc_virtnet;
+
+
+/* first the actual feeds */
+
+
+static int proc_vci(char *buffer)
+{
+	return sprintf(buffer,
+		"VCIVersion:\t%04x:%04x\n"
+		"VCISyscall:\t%d\n"
+		"VCIKernel:\t%08x\n",
+		VCI_VERSION >> 16,
+		VCI_VERSION & 0xFFFF,
+		__NR_vserver,
+		vci_kernel_config());
+}
+
+static int proc_virtual_info(char *buffer)
+{
+	return proc_vci(buffer);
+}
+
+static int proc_virtual_status(char *buffer)
+{
+	return sprintf(buffer,
+		"#CTotal:\t%d\n"
+		"#CActive:\t%d\n"
+		"#NSProxy:\t%d\t%d %d %d %d %d %d\n"
+		"#InitTask:\t%d\t%d %d\n",
+		atomic_read(&vx_global_ctotal),
+		atomic_read(&vx_global_cactive),
+		atomic_read(&vs_global_nsproxy),
+		atomic_read(&vs_global_fs),
+		atomic_read(&vs_global_mnt_ns),
+		atomic_read(&vs_global_uts_ns),
+		atomic_read(&nr_ipc_ns),
+		atomic_read(&vs_global_user_ns),
+		atomic_read(&vs_global_pid_ns),
+		atomic_read(&init_task.usage),
+		atomic_read(&init_task.nsproxy->count),
+		init_task.fs->users);
+}
+
+
+int proc_vxi_info(struct vx_info *vxi, char *buffer)
+{
+	int length;
+
+	length = sprintf(buffer,
+		"ID:\t%d\n"
+		"Info:\t%p\n"
+		"Init:\t%d\n"
+		"OOM:\t%lld\n",
+		vxi->vx_id,
+		vxi,
+		vxi->vx_initpid,
+		vxi->vx_badness_bias);
+	return length;
+}
+
+int proc_vxi_status(struct vx_info *vxi, char *buffer)
+{
+	char *orig = buffer;
+
+	buffer += sprintf(buffer,
+		"UseCnt:\t%d\n"
+		"Tasks:\t%d\n"
+		"Flags:\t%016llx\n",
+		atomic_read(&vxi->vx_usecnt),
+		atomic_read(&vxi->vx_tasks),
+		(unsigned long long)vxi->vx_flags);
+
+	buffer += sprintf(buffer, "BCaps:\t");
+	buffer = print_cap_t(buffer, &vxi->vx_bcaps);
+	buffer += sprintf(buffer, "\n");
+
+	buffer += sprintf(buffer,
+		"CCaps:\t%016llx\n"
+		"Umask:\t%16llx\n"
+		"Wmask:\t%16llx\n"
+		"Spaces:\t%08lx %08lx\n",
+		(unsigned long long)vxi->vx_ccaps,
+		(unsigned long long)vxi->vx_umask,
+		(unsigned long long)vxi->vx_wmask,
+		vxi->space[0].vx_nsmask, vxi->space[1].vx_nsmask);
+	return buffer - orig;
+}
+
+int proc_vxi_limit(struct vx_info *vxi, char *buffer)
+{
+	return vx_info_proc_limit(&vxi->limit, buffer);
+}
+
+int proc_vxi_sched(struct vx_info *vxi, char *buffer)
+{
+	int cpu, length;
+
+	length = vx_info_proc_sched(&vxi->sched, buffer);
+	for_each_online_cpu(cpu) {
+		length += vx_info_proc_sched_pc(
+			&vx_per_cpu(vxi, sched_pc, cpu),
+			buffer + length, cpu);
+	}
+	return length;
+}
+
+int proc_vxi_nsproxy0(struct vx_info *vxi, char *buffer)
+{
+	return vx_info_proc_nsproxy(vxi->space[0].vx_nsproxy, buffer);
+}
+
+int proc_vxi_nsproxy1(struct vx_info *vxi, char *buffer)
+{
+	return vx_info_proc_nsproxy(vxi->space[1].vx_nsproxy, buffer);
+}
+
+int proc_vxi_cvirt(struct vx_info *vxi, char *buffer)
+{
+	int cpu, length;
+
+	vx_update_load(vxi);
+	length = vx_info_proc_cvirt(&vxi->cvirt, buffer);
+	for_each_online_cpu(cpu) {
+		length += vx_info_proc_cvirt_pc(
+			&vx_per_cpu(vxi, cvirt_pc, cpu),
+			buffer + length, cpu);
+	}
+	return length;
+}
+
+int proc_vxi_cacct(struct vx_info *vxi, char *buffer)
+{
+	return vx_info_proc_cacct(&vxi->cacct, buffer);
+}
+
+
+static int proc_virtnet_info(char *buffer)
+{
+	return proc_vci(buffer);
+}
+
+static int proc_virtnet_status(char *buffer)
+{
+	return sprintf(buffer,
+		"#CTotal:\t%d\n"
+		"#CActive:\t%d\n",
+		atomic_read(&nx_global_ctotal),
+		atomic_read(&nx_global_cactive));
+}
+
+int proc_nxi_info(struct nx_info *nxi, char *buffer)
+{
+	struct nx_addr_v4 *v4a;
+#ifdef	CONFIG_IPV6
+	struct nx_addr_v6 *v6a;
+#endif
+	int length, i;
+
+	length = sprintf(buffer,
+		"ID:\t%d\n"
+		"Info:\t%p\n"
+		"Bcast:\t" NIPQUAD_FMT "\n"
+		"Lback:\t" NIPQUAD_FMT "\n",
+		nxi->nx_id,
+		nxi,
+		NIPQUAD(nxi->v4_bcast.s_addr),
+		NIPQUAD(nxi->v4_lback.s_addr));
+
+	if (!NX_IPV4(nxi))
+		goto skip_v4;
+	for (i = 0, v4a = &nxi->v4; v4a; i++, v4a = v4a->next)
+		length += sprintf(buffer + length, "%d:\t" NXAV4_FMT "\n",
+			i, NXAV4(v4a));
+skip_v4:
+#ifdef	CONFIG_IPV6
+	if (!NX_IPV6(nxi))
+		goto skip_v6;
+	for (i = 0, v6a = &nxi->v6; v6a; i++, v6a = v6a->next)
+		length += sprintf(buffer + length, "%d:\t" NXAV6_FMT "\n",
+			i, NXAV6(v6a));
+skip_v6:
+#endif
+	return length;
+}
+
+int proc_nxi_status(struct nx_info *nxi, char *buffer)
+{
+	int length;
+
+	length = sprintf(buffer,
+		"UseCnt:\t%d\n"
+		"Tasks:\t%d\n"
+		"Flags:\t%016llx\n"
+		"NCaps:\t%016llx\n",
+		atomic_read(&nxi->nx_usecnt),
+		atomic_read(&nxi->nx_tasks),
+		(unsigned long long)nxi->nx_flags,
+		(unsigned long long)nxi->nx_ncaps);
+	return length;
+}
+
+
+
+/* here the inode helpers */
+
+struct vs_entry {
+	int len;
+	char *name;
+	mode_t mode;
+	struct inode_operations *iop;
+	struct file_operations *fop;
+	union proc_op op;
+};
+
+static struct inode *vs_proc_make_inode(struct super_block *sb, struct vs_entry *p)
+{
+	struct inode *inode = new_inode(sb);
+
+	if (!inode)
+		goto out;
+
+	inode->i_mode = p->mode;
+	if (p->iop)
+		inode->i_op = p->iop;
+	if (p->fop)
+		inode->i_fop = p->fop;
+
+	set_nlink(inode, (p->mode & S_IFDIR) ? 2 : 1);
+	inode->i_flags |= S_IMMUTABLE;
+
+	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
+
+	i_uid_write(inode, 0);
+	i_gid_write(inode, 0);
+	i_tag_write(inode, 0);
+out:
+	return inode;
+}
+
+static struct dentry *vs_proc_instantiate(struct inode *dir,
+	struct dentry *dentry, int id, void *ptr)
+{
+	struct vs_entry *p = ptr;
+	struct inode *inode = vs_proc_make_inode(dir->i_sb, p);
+	struct dentry *error = ERR_PTR(-EINVAL);
+
+	if (!inode)
+		goto out;
+
+	PROC_I(inode)->op = p->op;
+	PROC_I(inode)->fd = id;
+	d_add(dentry, inode);
+	error = NULL;
+out:
+	return error;
+}
+
+/* Lookups */
+
+typedef struct dentry *vx_instantiate_t(struct inode *, struct dentry *, int, void
*);
+
+
+/*
+ * Fill a directory entry.
+ *
+ * If possible create the dcache entry and derive our inode number and
+ * file type from dcache entry.
+ *
+ * Since all of the proc inode numbers are dynamically generated, the inode
+ * numbers do not exist until the inode is cache.  This means creating the
+ * the dcache entry in iterate is necessary to keep the inode numbers
+ * reported by iterate in sync with the inode numbers reported
+ * by stat.
+ */
+static int vx_proc_fill_cache(struct file *filp, struct dir_context *ctx,
+	char *name, int len, vx_instantiate_t instantiate, int id, void *ptr)
+{
+	struct dentry *child, *dir = filp->f_dentry;
+	struct inode *inode;
+	struct qstr qname;
+	ino_t ino = 0;
+	unsigned type = DT_UNKNOWN;
+
+	qname.name = name;
+	qname.len  = len;
+	qname.hash = full_name_hash(name, len);
+
+	child = d_lookup(dir, &qname);
+	if (!child) {
+		struct dentry *new;
+		new = d_alloc(dir, &qname);
+		if (new) {
+			child = instantiate(dir->d_inode, new, id, ptr);
+			if (child)
+				dput(new);
+			else
+				child = new;
+		}
+	}
+	if (!child || IS_ERR(child) || !child->d_inode)
+		goto end_instantiate;
+	inode = child->d_inode;
+	if (inode) {
+		ino = inode->i_ino;
+		type = inode->i_mode >> 12;
+	}
+	dput(child);
+end_instantiate:
+	if (!ino)
+		ino = 1;
+	return !dir_emit(ctx, name, len, ino, type);
+}
+
+
+
+/* get and revalidate vx_info/xid */
+
+static inline
+struct vx_info *get_proc_vx_info(struct inode *inode)
+{
+	return lookup_vx_info(PROC_I(inode)->fd);
+}
+
+static int proc_xid_revalidate(struct dentry *dentry, unsigned int flags)
+{
+	struct inode *inode = dentry->d_inode;
+	vxid_t xid = PROC_I(inode)->fd;
+
+	if (flags & LOOKUP_RCU)	/* FIXME: can be dropped? */
+		return -ECHILD;
+
+	if (!xid || xid_is_hashed(xid))
+		return 1;
+	d_drop(dentry);
+	return 0;
+}
+
+
+/* get and revalidate nx_info/nid */
+
+static int proc_nid_revalidate(struct dentry *dentry, unsigned int flags)
+{
+	struct inode *inode = dentry->d_inode;
+	vnid_t nid = PROC_I(inode)->fd;
+
+	if (flags & LOOKUP_RCU)	/* FIXME: can be dropped? */
+		return -ECHILD;
+
+	if (!nid || nid_is_hashed(nid))
+		return 1;
+	d_drop(dentry);
+	return 0;
+}
+
+
+
+#define PROC_BLOCK_SIZE (PAGE_SIZE - 1024)
+
+static ssize_t proc_vs_info_read(struct file *file, char __user *buf,
+			  size_t count, loff_t *ppos)
+{
+	struct inode *inode = file->f_dentry->d_inode;
+	unsigned long page;
+	ssize_t length = 0;
+
+	if (count > PROC_BLOCK_SIZE)
+		count = PROC_BLOCK_SIZE;
+
+	/* fade that out as soon as stable */
+	WARN_ON(PROC_I(inode)->fd);
+
+	if (!(page = __get_free_page(GFP_KERNEL)))
+		return -ENOMEM;
+
+	BUG_ON(!PROC_I(inode)->op.proc_vs_read);
+	length = PROC_I(inode)->op.proc_vs_read((char *)page);
+
+	if (length >= 0)
+		length = simple_read_from_buffer(buf, count, ppos,
+			(char *)page, length);
+
+	free_page(page);
+	return length;
+}
+
+static ssize_t proc_vx_info_read(struct file *file, char __user *buf,
+			  size_t count, loff_t *ppos)
+{
+	struct inode *inode = file->f_dentry->d_inode;
+	struct vx_info *vxi = NULL;
+	vxid_t xid = PROC_I(inode)->fd;
+	unsigned long page;
+	ssize_t length = 0;
+
+	if (count > PROC_BLOCK_SIZE)
+		count = PROC_BLOCK_SIZE;
+
+	/* fade that out as soon as stable */
+	WARN_ON(!xid);
+	vxi = lookup_vx_info(xid);
+	if (!vxi)
+		goto out;
+
+	length = -ENOMEM;
+	if (!(page = __get_free_page(GFP_KERNEL)))
+		goto out_put;
+
+	BUG_ON(!PROC_I(inode)->op.proc_vxi_read);
+	length = PROC_I(inode)->op.proc_vxi_read(vxi, (char *)page);
+
+	if (length >= 0)
+		length = simple_read_from_buffer(buf, count, ppos,
+			(char *)page, length);
+
+	free_page(page);
+out_put:
+	put_vx_info(vxi);
+out:
+	return length;
+}
+
+static ssize_t proc_nx_info_read(struct file *file, char __user *buf,
+			  size_t count, loff_t *ppos)
+{
+	struct inode *inode = file->f_dentry->d_inode;
+	struct nx_info *nxi = NULL;
+	vnid_t nid = PROC_I(inode)->fd;
+	unsigned long page;
+	ssize_t length = 0;
+
+	if (count > PROC_BLOCK_SIZE)
+		count = PROC_BLOCK_SIZE;
+
+	/* fade that out as soon as stable */
+	WARN_ON(!nid);
+	nxi = lookup_nx_info(nid);
+	if (!nxi)
+		goto out;
+
+	length = -ENOMEM;
+	if (!(page = __get_free_page(GFP_KERNEL)))
+		goto out_put;
+
+	BUG_ON(!PROC_I(inode)->op.proc_nxi_read);
+	length = PROC_I(inode)->op.proc_nxi_read(nxi, (char *)page);
+
+	if (length >= 0)
+		length = simple_read_from_buffer(buf, count, ppos,
+			(char *)page, length);
+
+	free_page(page);
+out_put:
+	put_nx_info(nxi);
+out:
+	return length;
+}
+
+
+
+/* here comes the lower level */
+
+
+#define NOD(NAME, MODE, IOP, FOP, OP) {	\
+	.len  = sizeof(NAME) - 1,	\
+	.name = (NAME),			\
+	.mode = MODE,			\
+	.iop  = IOP,			\
+	.fop  = FOP,			\
+	.op   = OP,			\
+}
+
+
+#define DIR(NAME, MODE, OTYPE)				\
+	NOD(NAME, (S_IFDIR | (MODE)),			\
+		&proc_ ## OTYPE ## _inode_operations,	\
+		&proc_ ## OTYPE ## _file_operations, { } )
+
+#define INF(NAME, MODE, OTYPE)				\
+	NOD(NAME, (S_IFREG | (MODE)), NULL,		\
+		&proc_vs_info_file_operations,		\
+		{ .proc_vs_read = &proc_##OTYPE } )
+
+#define VINF(NAME, MODE, OTYPE)				\
+	NOD(NAME, (S_IFREG | (MODE)), NULL,		\
+		&proc_vx_info_file_operations,		\
+		{ .proc_vxi_read = &proc_##OTYPE } )
+
+#define NINF(NAME, MODE, OTYPE)				\
+	NOD(NAME, (S_IFREG | (MODE)), NULL,		\
+		&proc_nx_info_file_operations,		\
+		{ .proc_nxi_read = &proc_##OTYPE } )
+
+
+static struct file_operations proc_vs_info_file_operations = {
+	.read =		proc_vs_info_read,
+};
+
+static struct file_operations proc_vx_info_file_operations = {
+	.read =		proc_vx_info_read,
+};
+
+static struct dentry_operations proc_xid_dentry_operations = {
+	.d_revalidate =	proc_xid_revalidate,
+};
+
+static struct vs_entry vx_base_stuff[] = {
+	VINF("info",	S_IRUGO, vxi_info),
+	VINF("status",	S_IRUGO, vxi_status),
+	VINF("limit",	S_IRUGO, vxi_limit),
+	VINF("sched",	S_IRUGO, vxi_sched),
+	VINF("nsproxy",	S_IRUGO, vxi_nsproxy0),
+	VINF("nsproxy1",S_IRUGO, vxi_nsproxy1),
+	VINF("cvirt",	S_IRUGO, vxi_cvirt),
+	VINF("cacct",	S_IRUGO, vxi_cacct),
+	{}
+};
+
+
+
+
+static struct dentry *proc_xid_instantiate(struct inode *dir,
+	struct dentry *dentry, int id, void *ptr)
+{
+	dentry->d_op = &proc_xid_dentry_operations;
+	return vs_proc_instantiate(dir, dentry, id, ptr);
+}
+
+static struct dentry *proc_xid_lookup(struct inode *dir,
+	struct dentry *dentry, unsigned int flags)
+{
+	struct vs_entry *p = vx_base_stuff;
+	struct dentry *error = ERR_PTR(-ENOENT);
+
+	for (; p->name; p++) {
+		if (p->len != dentry->d_name.len)
+			continue;
+		if (!memcmp(dentry->d_name.name, p->name, p->len))
+			break;
+	}
+	if (!p->name)
+		goto out;
+
+	error = proc_xid_instantiate(dir, dentry, PROC_I(dir)->fd, p);
+out:
+	return error;
+}
+
+static int proc_xid_iterate(struct file *filp, struct dir_context *ctx)
+{
+	struct dentry *dentry = filp->f_dentry;
+	struct inode *inode = dentry->d_inode;
+	struct vs_entry *p = vx_base_stuff;
+	int size = sizeof(vx_base_stuff) / sizeof(struct vs_entry);
+	int index;
+	u64 ino;
+
+	switch (ctx->pos) {
+	case 0:
+		ino = inode->i_ino;
+		if (!dir_emit(ctx, ".", 1, ino, DT_DIR) < 0)
+			goto out;
+		ctx->pos++;
+		/* fall through */
+	case 1:
+		ino = parent_ino(dentry);
+		if (!dir_emit(ctx, "..", 2, ino, DT_DIR) < 0)
+			goto out;
+		ctx->pos++;
+		/* fall through */
+	default:
+		index = ctx->pos - 2;
+		if (index >= size)
+			goto out;
+		for (p += index; p->name; p++) {
+			if (vx_proc_fill_cache(filp, ctx, p->name, p->len,
+				vs_proc_instantiate, PROC_I(inode)->fd, p))
+				goto out;
+			ctx->pos++;
+		}
+	}
+out:
+	return 1;
+}
+
+
+
+static struct file_operations proc_nx_info_file_operations = {
+	.read =		proc_nx_info_read,
+};
+
+static struct dentry_operations proc_nid_dentry_operations = {
+	.d_revalidate =	proc_nid_revalidate,
+};
+
+static struct vs_entry nx_base_stuff[] = {
+	NINF("info",	S_IRUGO, nxi_info),
+	NINF("status",	S_IRUGO, nxi_status),
+	{}
+};
+
+
+static struct dentry *proc_nid_instantiate(struct inode *dir,
+	struct dentry *dentry, int id, void *ptr)
+{
+	dentry->d_op = &proc_nid_dentry_operations;
+	return vs_proc_instantiate(dir, dentry, id, ptr);
+}
+
+static struct dentry *proc_nid_lookup(struct inode *dir,
+	struct dentry *dentry, unsigned int flags)
+{
+	struct vs_entry *p = nx_base_stuff;
+	struct dentry *error = ERR_PTR(-ENOENT);
+
+	for (; p->name; p++) {
+		if (p->len != dentry->d_name.len)
+			continue;
+		if (!memcmp(dentry->d_name.name, p->name, p->len))
+			break;
+	}
+	if (!p->name)
+		goto out;
+
+	error = proc_nid_instantiate(dir, dentry, PROC_I(dir)->fd, p);
+out:
+	return error;
+}
+
+static int proc_nid_iterate(struct file *filp, struct dir_context *ctx)
+{
+	struct dentry *dentry = filp->f_dentry;
+	struct inode *inode = dentry->d_inode;
+	struct vs_entry *p = nx_base_stuff;
+	int size = sizeof(nx_base_stuff) / sizeof(struct vs_entry);
+	int index;
+	u64 ino;
+
+	switch (ctx->pos) {
+	case 0:
+		ino = inode->i_ino;
+		if (!dir_emit(ctx, ".", 1, ino, DT_DIR) < 0)
+			goto out;
+		ctx->pos++;
+		/* fall through */
+	case 1:
+		ino = parent_ino(dentry);
+		if (!dir_emit(ctx, "..", 2, ino, DT_DIR) < 0)
+			goto out;
+		ctx->pos++;
+		/* fall through */
+	default:
+		index = ctx->pos - 2;
+		if (index >= size)
+			goto out;
+		for (p += index; p->name; p++) {
+			if (vx_proc_fill_cache(filp, ctx, p->name, p->len,
+				vs_proc_instantiate, PROC_I(inode)->fd, p))
+				goto out;
+			ctx->pos++;
+		}
+	}
+out:
+	return 1;
+}
+
+
+#define MAX_MULBY10	((~0U - 9) / 10)
+
+static inline int atovid(const char *str, int len)
+{
+	int vid, c;
+
+	vid = 0;
+	while (len-- > 0) {
+		c = *str - '0';
+		str++;
+		if (c > 9)
+			return -1;
+		if (vid >= MAX_MULBY10)
+			return -1;
+		vid *= 10;
+		vid += c;
+		if (!vid)
+			return -1;
+	}
+	return vid;
+}
+
+/* now the upper level (virtual) */
+
+
+static struct file_operations proc_xid_file_operations = {
+	.read =		generic_read_dir,
+	.iterate =	proc_xid_iterate,
+};
+
+static struct inode_operations proc_xid_inode_operations = {
+	.lookup =	proc_xid_lookup,
+};
+
+static struct vs_entry vx_virtual_stuff[] = {
+	INF("info",	S_IRUGO, virtual_info),
+	INF("status",	S_IRUGO, virtual_status),
+	DIR(NULL,	S_IRUGO | S_IXUGO, xid),
+};
+
+
+static struct dentry *proc_virtual_lookup(struct inode *dir,
+	struct dentry *dentry, unsigned int flags)
+{
+	struct vs_entry *p = vx_virtual_stuff;
+	struct dentry *error = ERR_PTR(-ENOENT);
+	int id = 0;
+
+	for (; p->name; p++) {
+		if (p->len != dentry->d_name.len)
+			continue;
+		if (!memcmp(dentry->d_name.name, p->name, p->len))
+			break;
+	}
+	if (p->name)
+		goto instantiate;
+
+	id = atovid(dentry->d_name.name, dentry->d_name.len);
+	if ((id < 0) || !xid_is_hashed(id))
+		goto out;
+
+instantiate:
+	error = proc_xid_instantiate(dir, dentry, id, p);
+out:
+	return error;
+}
+
+static struct file_operations proc_nid_file_operations = {
+	.read =		generic_read_dir,
+	.iterate =	proc_nid_iterate,
+};
+
+static struct inode_operations proc_nid_inode_operations = {
+	.lookup =	proc_nid_lookup,
+};
+
+static struct vs_entry nx_virtnet_stuff[] = {
+	INF("info",	S_IRUGO, virtnet_info),
+	INF("status",	S_IRUGO, virtnet_status),
+	DIR(NULL,	S_IRUGO | S_IXUGO, nid),
+};
+
+
+static struct dentry *proc_virtnet_lookup(struct inode *dir,
+	struct dentry *dentry, unsigned int flags)
+{
+	struct vs_entry *p = nx_virtnet_stuff;
+	struct dentry *error = ERR_PTR(-ENOENT);
+	int id = 0;
+
+	for (; p->name; p++) {
+		if (p->len != dentry->d_name.len)
+			continue;
+		if (!memcmp(dentry->d_name.name, p->name, p->len))
+			break;
+	}
+	if (p->name)
+		goto instantiate;
+
+	id = atovid(dentry->d_name.name, dentry->d_name.len);
+	if ((id < 0) || !nid_is_hashed(id))
+		goto out;
+
+instantiate:
+	error = proc_nid_instantiate(dir, dentry, id, p);
+out:
+	return error;
+}
+
+
+#define PROC_MAXVIDS 32
+
+int proc_virtual_iterate(struct file *filp, struct dir_context *ctx)
+{
+	struct dentry *dentry = filp->f_dentry;
+	struct inode *inode = dentry->d_inode;
+	struct vs_entry *p = vx_virtual_stuff;
+	int size = sizeof(vx_virtual_stuff) / sizeof(struct vs_entry);
+	int index;
+	unsigned int xid_array[PROC_MAXVIDS];
+	char buf[PROC_NUMBUF];
+	unsigned int nr_xids, i;
+	u64 ino;
+
+	switch (ctx->pos) {
+	case 0:
+		ino = inode->i_ino;
+		if (!dir_emit(ctx, ".", 1, ino, DT_DIR) < 0)
+			goto out;
+		ctx->pos++;
+		/* fall through */
+	case 1:
+		ino = parent_ino(dentry);
+		if (!dir_emit(ctx, "..", 2, ino, DT_DIR) < 0)
+			goto out;
+		ctx->pos++;
+		/* fall through */
+	default:
+		index = ctx->pos - 2;
+		if (index >= size)
+			goto entries;
+		for (p += index; p->name; p++) {
+			if (vx_proc_fill_cache(filp, ctx, p->name, p->len,
+				vs_proc_instantiate, 0, p))
+				goto out;
+			ctx->pos++;
+		}
+	entries:
+		index = ctx->pos - size;
+		p = &vx_virtual_stuff[size - 1];
+		nr_xids = get_xid_list(index, xid_array, PROC_MAXVIDS);
+		for (i = 0; i < nr_xids; i++) {
+			int n, xid = xid_array[i];
+			unsigned int j = PROC_NUMBUF;
+
+			n = xid;
+			do
+				buf[--j] = '0' + (n % 10);
+			while (n /= 10);
+
+			if (vx_proc_fill_cache(filp, ctx,
+				buf + j, PROC_NUMBUF - j,
+				vs_proc_instantiate, xid, p))
+				goto out;
+			ctx->pos++;
+		}
+	}
+out:
+	return 0;
+}
+
+static int proc_virtual_getattr(struct vfsmount *mnt,
+	struct dentry *dentry, struct kstat *stat)
+{
+	struct inode *inode = dentry->d_inode;
+
+	generic_fillattr(inode, stat);
+	stat->nlink = 2 + atomic_read(&vx_global_cactive);
+	return 0;
+}
+
+static struct file_operations proc_virtual_dir_operations = {
+	.read =		generic_read_dir,
+	.iterate =	proc_virtual_iterate,
+};
+
+static struct inode_operations proc_virtual_dir_inode_operations = {
+	.getattr =	proc_virtual_getattr,
+	.lookup =	proc_virtual_lookup,
+};
+
+
+
+int proc_virtnet_iterate(struct file *filp, struct dir_context *ctx)
+{
+	struct dentry *dentry = filp->f_dentry;
+	struct inode *inode = dentry->d_inode;
+	struct vs_entry *p = nx_virtnet_stuff;
+	int size = sizeof(nx_virtnet_stuff) / sizeof(struct vs_entry);
+	int index;
+	unsigned int nid_array[PROC_MAXVIDS];
+	char buf[PROC_NUMBUF];
+	unsigned int nr_nids, i;
+	u64 ino;
+
+	switch (ctx->pos) {
+	case 0:
+		ino = inode->i_ino;
+		if (!dir_emit(ctx, ".", 1, ino, DT_DIR) < 0)
+			goto out;
+		ctx->pos++;
+		/* fall through */
+	case 1:
+		ino = parent_ino(dentry);
+		if (!dir_emit(ctx, "..", 2, ino, DT_DIR) < 0)
+			goto out;
+		ctx->pos++;
+		/* fall through */
+	default:
+		index = ctx->pos - 2;
+		if (index >= size)
+			goto entries;
+		for (p += index; p->name; p++) {
+			if (vx_proc_fill_cache(filp, ctx, p->name, p->len,
+				vs_proc_instantiate, 0, p))
+				goto out;
+			ctx->pos++;
+		}
+	entries:
+		index = ctx->pos - size;
+		p = &nx_virtnet_stuff[size - 1];
+		nr_nids = get_nid_list(index, nid_array, PROC_MAXVIDS);
+		for (i = 0; i < nr_nids; i++) {
+			int n, nid = nid_array[i];
+			unsigned int j = PROC_NUMBUF;
+
+			n = nid;
+			do
+				buf[--j] = '0' + (n % 10);
+			while (n /= 10);
+
+			if (vx_proc_fill_cache(filp, ctx,
+				buf + j, PROC_NUMBUF - j,
+				vs_proc_instantiate, nid, p))
+				goto out;
+			ctx->pos++;
+		}
+	}
+out:
+	return 0;
+}
+
+static int proc_virtnet_getattr(struct vfsmount *mnt,
+	struct dentry *dentry, struct kstat *stat)
+{
+	struct inode *inode = dentry->d_inode;
+
+	generic_fillattr(inode, stat);
+	stat->nlink = 2 + atomic_read(&nx_global_cactive);
+	return 0;
+}
+
+static struct file_operations proc_virtnet_dir_operations = {
+	.read =		generic_read_dir,
+	.iterate =	proc_virtnet_iterate,
+};
+
+static struct inode_operations proc_virtnet_dir_inode_operations = {
+	.getattr =	proc_virtnet_getattr,
+	.lookup =	proc_virtnet_lookup,
+};
+
+
+
+void proc_vx_init(void)
+{
+	struct proc_dir_entry *ent;
+
+	ent = proc_mkdir("virtual", 0);
+	if (ent) {
+		ent->proc_fops = &proc_virtual_dir_operations;
+		ent->proc_iops = &proc_virtual_dir_inode_operations;
+	}
+	proc_virtual = ent;
+
+	ent = proc_mkdir("virtnet", 0);
+	if (ent) {
+		ent->proc_fops = &proc_virtnet_dir_operations;
+		ent->proc_iops = &proc_virtnet_dir_inode_operations;
+	}
+	proc_virtnet = ent;
+}
+
+
+
+
+/* per pid info */
+
+
+int proc_pid_vx_info(struct task_struct *p, char *buffer)
+{
+	struct vx_info *vxi;
+	char *orig = buffer;
+
+	buffer += sprintf(buffer, "XID:\t%d\n", vx_task_xid(p));
+
+	vxi = task_get_vx_info(p);
+	if (!vxi)
+		goto out;
+
+	buffer += sprintf(buffer, "BCaps:\t");
+	buffer = print_cap_t(buffer, &vxi->vx_bcaps);
+	buffer += sprintf(buffer, "\n");
+	buffer += sprintf(buffer, "CCaps:\t%016llx\n",
+		(unsigned long long)vxi->vx_ccaps);
+	buffer += sprintf(buffer, "CFlags:\t%016llx\n",
+		(unsigned long long)vxi->vx_flags);
+	buffer += sprintf(buffer, "CIPid:\t%d\n", vxi->vx_initpid);
+
+	put_vx_info(vxi);
+out:
+	return buffer - orig;
+}
+
+
+int proc_pid_nx_info(struct task_struct *p, char *buffer)
+{
+	struct nx_info *nxi;
+	struct nx_addr_v4 *v4a;
+#ifdef	CONFIG_IPV6
+	struct nx_addr_v6 *v6a;
+#endif
+	char *orig = buffer;
+	int i;
+
+	buffer += sprintf(buffer, "NID:\t%d\n", nx_task_nid(p));
+
+	nxi = task_get_nx_info(p);
+	if (!nxi)
+		goto out;
+
+	buffer += sprintf(buffer, "NCaps:\t%016llx\n",
+		(unsigned long long)nxi->nx_ncaps);
+	buffer += sprintf(buffer, "NFlags:\t%016llx\n",
+		(unsigned long long)nxi->nx_flags);
+
+	buffer += sprintf(buffer,
+		"V4Root[bcast]:\t" NIPQUAD_FMT "\n",
+		NIPQUAD(nxi->v4_bcast.s_addr));
+	buffer += sprintf (buffer,
+		"V4Root[lback]:\t" NIPQUAD_FMT "\n",
+		NIPQUAD(nxi->v4_lback.s_addr));
+	if (!NX_IPV4(nxi))
+		goto skip_v4;
+	for (i = 0, v4a = &nxi->v4; v4a; i++, v4a = v4a->next)
+		buffer += sprintf(buffer, "V4Root[%d]:\t" NXAV4_FMT "\n",
+			i, NXAV4(v4a));
+skip_v4:
+#ifdef	CONFIG_IPV6
+	if (!NX_IPV6(nxi))
+		goto skip_v6;
+	for (i = 0, v6a = &nxi->v6; v6a; i++, v6a = v6a->next)
+		buffer += sprintf(buffer, "V6Root[%d]:\t" NXAV6_FMT "\n",
+			i, NXAV6(v6a));
+skip_v6:
+#endif
+	put_nx_info(nxi);
+out:
+	return buffer - orig;
+}
+
diff -ruNp linux-3.13.11/kernel/vserver/sched.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/sched.c
--- linux-3.13.11/kernel/vserver/sched.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/sched.c	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,83 @@
+/*
+ *  linux/kernel/vserver/sched.c
+ *
+ *  Virtual Server: Scheduler Support
+ *
+ *  Copyright (C) 2004-2010  Herbert Pötzl
+ *
+ *  V0.01  adapted Sam Vilains version to 2.6.3
+ *  V0.02  removed legacy interface
+ *  V0.03  changed vcmds to vxi arg
+ *  V0.04  removed older and legacy interfaces
+ *  V0.05  removed scheduler code/commands
+ *
+ */
+
+#include <linux/vs_context.h>
+#include <linux/vs_sched.h>
+#include <linux/cpumask.h>
+#include <linux/vserver/sched_cmd.h>
+
+#include <asm/uaccess.h>
+
+
+void vx_update_sched_param(struct _vx_sched *sched,
+	struct _vx_sched_pc *sched_pc)
+{
+	sched_pc->prio_bias = sched->prio_bias;
+}
+
+static int do_set_prio_bias(struct vx_info *vxi, struct vcmd_prio_bias *data)
+{
+	int cpu;
+
+	if (data->prio_bias > MAX_PRIO_BIAS)
+		data->prio_bias = MAX_PRIO_BIAS;
+	if (data->prio_bias < MIN_PRIO_BIAS)
+		data->prio_bias = MIN_PRIO_BIAS;
+
+	if (data->cpu_id != ~0) {
+		vxi->sched.update = cpumask_of_cpu(data->cpu_id);
+		cpumask_and(&vxi->sched.update, &vxi->sched.update,
+			cpu_online_mask);
+	} else
+		cpumask_copy(&vxi->sched.update, cpu_online_mask);
+
+	for_each_cpu_mask(cpu, vxi->sched.update)
+		vx_update_sched_param(&vxi->sched,
+			&vx_per_cpu(vxi, sched_pc, cpu));
+	return 0;
+}
+
+int vc_set_prio_bias(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_prio_bias vc_data;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	return do_set_prio_bias(vxi, &vc_data);
+}
+
+int vc_get_prio_bias(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_prio_bias vc_data;
+	struct _vx_sched_pc *pcd;
+	int cpu;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	cpu = vc_data.cpu_id;
+
+	if (!cpu_possible(cpu))
+		return -EINVAL;
+
+	pcd = &vx_per_cpu(vxi, sched_pc, cpu);
+	vc_data.prio_bias = pcd->prio_bias;
+
+	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+		return -EFAULT;
+	return 0;
+}
+
diff -ruNp linux-3.13.11/kernel/vserver/sched_init.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/sched_init.h
--- linux-3.13.11/kernel/vserver/sched_init.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/sched_init.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,27 @@
+
+static inline void vx_info_init_sched(struct _vx_sched *sched)
+{
+	/* scheduling; hard code starting values as constants */
+	sched->prio_bias = 0;
+}
+
+static inline
+void vx_info_init_sched_pc(struct _vx_sched_pc *sched_pc, int cpu)
+{
+	sched_pc->prio_bias = 0;
+
+	sched_pc->user_ticks = 0;
+	sched_pc->sys_ticks = 0;
+	sched_pc->hold_ticks = 0;
+}
+
+static inline void vx_info_exit_sched(struct _vx_sched *sched)
+{
+	return;
+}
+
+static inline
+void vx_info_exit_sched_pc(struct _vx_sched_pc *sched_pc, int cpu)
+{
+	return;
+}
diff -ruNp linux-3.13.11/kernel/vserver/sched_proc.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/sched_proc.h
--- linux-3.13.11/kernel/vserver/sched_proc.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/sched_proc.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,32 @@
+#ifndef _VX_SCHED_PROC_H
+#define _VX_SCHED_PROC_H
+
+
+static inline
+int vx_info_proc_sched(struct _vx_sched *sched, char *buffer)
+{
+	int length = 0;
+
+	length += sprintf(buffer,
+		"PrioBias:\t%8d\n",
+		sched->prio_bias);
+	return length;
+}
+
+static inline
+int vx_info_proc_sched_pc(struct _vx_sched_pc *sched_pc,
+	char *buffer, int cpu)
+{
+	int length = 0;
+
+	length += sprintf(buffer + length,
+		"cpu %d: %lld %lld %lld", cpu,
+		(unsigned long long)sched_pc->user_ticks,
+		(unsigned long long)sched_pc->sys_ticks,
+		(unsigned long long)sched_pc->hold_ticks);
+	length += sprintf(buffer + length,
+		" %d\n", sched_pc->prio_bias);
+	return length;
+}
+
+#endif	/* _VX_SCHED_PROC_H */
diff -ruNp linux-3.13.11/kernel/vserver/signal.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/signal.c
--- linux-3.13.11/kernel/vserver/signal.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/signal.c	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,134 @@
+/*
+ *  linux/kernel/vserver/signal.c
+ *
+ *  Virtual Server: Signal Support
+ *
+ *  Copyright (C) 2003-2007  Herbert Pötzl
+ *
+ *  V0.01  broken out from vcontext V0.05
+ *  V0.02  changed vcmds to vxi arg
+ *  V0.03  adjusted siginfo for kill
+ *
+ */
+
+#include <asm/uaccess.h>
+
+#include <linux/vs_context.h>
+#include <linux/vs_pid.h>
+#include <linux/vserver/signal_cmd.h>
+
+
+int vx_info_kill(struct vx_info *vxi, int pid, int sig)
+{
+	int retval, count = 0;
+	struct task_struct *p;
+	struct siginfo *sip = SEND_SIG_PRIV;
+
+	retval = -ESRCH;
+	vxdprintk(VXD_CBIT(misc, 4),
+		"vx_info_kill(%p[#%d],%d,%d)*",
+		vxi, vxi->vx_id, pid, sig);
+	read_lock(&tasklist_lock);
+	switch (pid) {
+	case  0:
+	case -1:
+		for_each_process(p) {
+			int err = 0;
+
+			if (vx_task_xid(p) != vxi->vx_id || p->pid <= 1 ||
+				(pid && vxi->vx_initpid == p->pid))
+				continue;
+
+			err = group_send_sig_info(sig, sip, p);
+			++count;
+			if (err != -EPERM)
+				retval = err;
+		}
+		break;
+
+	case 1:
+		if (vxi->vx_initpid) {
+			pid = vxi->vx_initpid;
+			/* for now, only SIGINT to private init ... */
+			if (!vx_info_flags(vxi, VXF_STATE_ADMIN, 0) &&
+				/* ... as long as there are tasks left */
+				(atomic_read(&vxi->vx_tasks) > 1))
+				sig = SIGINT;
+		}
+		/* fallthrough */
+	default:
+		rcu_read_lock();
+		p = find_task_by_real_pid(pid);
+		rcu_read_unlock();
+		if (p) {
+			if (vx_task_xid(p) == vxi->vx_id)
+				retval = group_send_sig_info(sig, sip, p);
+		}
+		break;
+	}
+	read_unlock(&tasklist_lock);
+	vxdprintk(VXD_CBIT(misc, 4),
+		"vx_info_kill(%p[#%d],%d,%d,%ld) = %d",
+		vxi, vxi->vx_id, pid, sig, (long)sip, retval);
+	return retval;
+}
+
+int vc_ctx_kill(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_ctx_kill_v0 vc_data;
+
+	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	/* special check to allow guest shutdown */
+	if (!vx_info_flags(vxi, VXF_STATE_ADMIN, 0) &&
+		/* forbid killall pid=0 when init is present */
+		(((vc_data.pid < 1) && vxi->vx_initpid) ||
+		(vc_data.pid > 1)))
+		return -EACCES;
+
+	return vx_info_kill(vxi, vc_data.pid, vc_data.sig);
+}
+
+
+static int __wait_exit(struct vx_info *vxi)
+{
+	DECLARE_WAITQUEUE(wait, current);
+	int ret = 0;
+
+	add_wait_queue(&vxi->vx_wait, &wait);
+	set_current_state(TASK_INTERRUPTIBLE);
+
+wait:
+	if (vx_info_state(vxi,
+		VXS_SHUTDOWN | VXS_HASHED | VXS_HELPER) == VXS_SHUTDOWN)
+		goto out;
+	if (signal_pending(current)) {
+		ret = -ERESTARTSYS;
+		goto out;
+	}
+	schedule();
+	goto wait;
+
+out:
+	set_current_state(TASK_RUNNING);
+	remove_wait_queue(&vxi->vx_wait, &wait);
+	return ret;
+}
+
+
+
+int vc_wait_exit(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_wait_exit_v0 vc_data;
+	int ret;
+
+	ret = __wait_exit(vxi);
+	vc_data.reboot_cmd = vxi->reboot_cmd;
+	vc_data.exit_code = vxi->exit_code;
+
+	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+		ret = -EFAULT;
+	return ret;
+}
+
diff -ruNp linux-3.13.11/kernel/vserver/space.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/space.c
--- linux-3.13.11/kernel/vserver/space.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/space.c	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,436 @@
+/*
+ *  linux/kernel/vserver/space.c
+ *
+ *  Virtual Server: Context Space Support
+ *
+ *  Copyright (C) 2003-2010  Herbert Pötzl
+ *
+ *  V0.01  broken out from context.c 0.07
+ *  V0.02  added task locking for namespace
+ *  V0.03  broken out vx_enter_namespace
+ *  V0.04  added *space support and commands
+ *  V0.05  added credential support
+ *
+ */
+
+#include <linux/utsname.h>
+#include <linux/nsproxy.h>
+#include <linux/err.h>
+#include <linux/fs_struct.h>
+#include <linux/cred.h>
+#include <asm/uaccess.h>
+
+#include <linux/vs_context.h>
+#include <linux/vserver/space.h>
+#include <linux/vserver/space_cmd.h>
+
+atomic_t vs_global_nsproxy	= ATOMIC_INIT(0);
+atomic_t vs_global_fs		= ATOMIC_INIT(0);
+atomic_t vs_global_mnt_ns	= ATOMIC_INIT(0);
+atomic_t vs_global_uts_ns	= ATOMIC_INIT(0);
+atomic_t vs_global_user_ns	= ATOMIC_INIT(0);
+atomic_t vs_global_pid_ns	= ATOMIC_INIT(0);
+
+
+/* namespace functions */
+
+#include <linux/mnt_namespace.h>
+#include <linux/user_namespace.h>
+#include <linux/pid_namespace.h>
+#include <linux/ipc_namespace.h>
+#include <net/net_namespace.h>
+#include "../fs/mount.h"
+
+
+static const struct vcmd_space_mask_v1 space_mask_v0 = {
+	.mask = CLONE_FS |
+		CLONE_NEWNS |
+#ifdef	CONFIG_UTS_NS
+		CLONE_NEWUTS |
+#endif
+#ifdef	CONFIG_IPC_NS
+		CLONE_NEWIPC |
+#endif
+#ifdef	CONFIG_USER_NS
+		CLONE_NEWUSER |
+#endif
+		0
+};
+
+static const struct vcmd_space_mask_v1 space_mask = {
+	.mask = CLONE_FS |
+		CLONE_NEWNS |
+#ifdef	CONFIG_UTS_NS
+		CLONE_NEWUTS |
+#endif
+#ifdef	CONFIG_IPC_NS
+		CLONE_NEWIPC |
+#endif
+#ifdef	CONFIG_USER_NS
+		CLONE_NEWUSER |
+#endif
+#ifdef	CONFIG_PID_NS
+		CLONE_NEWPID |
+#endif
+#ifdef	CONFIG_NET_NS
+		CLONE_NEWNET |
+#endif
+		0
+};
+
+static const struct vcmd_space_mask_v1 default_space_mask = {
+	.mask = CLONE_FS |
+		CLONE_NEWNS |
+#ifdef	CONFIG_UTS_NS
+		CLONE_NEWUTS |
+#endif
+#ifdef	CONFIG_IPC_NS
+		CLONE_NEWIPC |
+#endif
+#ifdef	CONFIG_USER_NS
+		CLONE_NEWUSER |
+#endif
+#ifdef	CONFIG_PID_NS
+//		CLONE_NEWPID |
+#endif
+		0
+};
+
+/*
+ *	build a new nsproxy mix
+ *      assumes that both proxies are 'const'
+ *	does not touch nsproxy refcounts
+ *	will hold a reference on the result.
+ */
+
+struct nsproxy *vs_mix_nsproxy(struct nsproxy *old_nsproxy,
+	struct nsproxy *new_nsproxy, unsigned long mask)
+{
+	struct mnt_namespace *old_ns;
+	struct uts_namespace *old_uts;
+	struct ipc_namespace *old_ipc;
+#ifdef	CONFIG_PID_NS
+	struct pid_namespace *old_pid;
+#endif
+#ifdef	CONFIG_NET_NS
+	struct net *old_net;
+#endif
+	struct nsproxy *nsproxy;
+
+	nsproxy = copy_nsproxy(old_nsproxy);
+	if (!nsproxy)
+		goto out;
+
+	if (mask & CLONE_NEWNS) {
+		old_ns = nsproxy->mnt_ns;
+		nsproxy->mnt_ns = new_nsproxy->mnt_ns;
+		if (nsproxy->mnt_ns)
+			get_mnt_ns(nsproxy->mnt_ns);
+	} else
+		old_ns = NULL;
+
+	if (mask & CLONE_NEWUTS) {
+		old_uts = nsproxy->uts_ns;
+		nsproxy->uts_ns = new_nsproxy->uts_ns;
+		if (nsproxy->uts_ns)
+			get_uts_ns(nsproxy->uts_ns);
+	} else
+		old_uts = NULL;
+
+	if (mask & CLONE_NEWIPC) {
+		old_ipc = nsproxy->ipc_ns;
+		nsproxy->ipc_ns = new_nsproxy->ipc_ns;
+		if (nsproxy->ipc_ns)
+			get_ipc_ns(nsproxy->ipc_ns);
+	} else
+		old_ipc = NULL;
+
+#ifdef	CONFIG_PID_NS
+	if (mask & CLONE_NEWPID) {
+		old_pid = nsproxy->pid_ns_for_children;
+		nsproxy->pid_ns_for_children = new_nsproxy->pid_ns_for_children;
+		if (nsproxy->pid_ns_for_children)
+			get_pid_ns(nsproxy->pid_ns_for_children);
+	} else
+		old_pid = NULL;
+#endif
+#ifdef	CONFIG_NET_NS
+	if (mask & CLONE_NEWNET) {
+		old_net = nsproxy->net_ns;
+		nsproxy->net_ns = new_nsproxy->net_ns;
+		if (nsproxy->net_ns)
+			get_net(nsproxy->net_ns);
+	} else
+		old_net = NULL;
+#endif
+	if (old_ns)
+		put_mnt_ns(old_ns);
+	if (old_uts)
+		put_uts_ns(old_uts);
+	if (old_ipc)
+		put_ipc_ns(old_ipc);
+#ifdef	CONFIG_PID_NS
+	if (old_pid)
+		put_pid_ns(old_pid);
+#endif
+#ifdef	CONFIG_NET_NS
+	if (old_net)
+		put_net(old_net);
+#endif
+out:
+	return nsproxy;
+}
+
+
+/*
+ *	merge two nsproxy structs into a new one.
+ *	will hold a reference on the result.
+ */
+
+static inline
+struct nsproxy *__vs_merge_nsproxy(struct nsproxy *old,
+	struct nsproxy *proxy, unsigned long mask)
+{
+	struct nsproxy null_proxy = { .mnt_ns = NULL };
+
+	if (!proxy)
+		return NULL;
+
+	if (mask) {
+		/* vs_mix_nsproxy returns with reference */
+		return vs_mix_nsproxy(old ? old : &null_proxy,
+			proxy, mask);
+	}
+	get_nsproxy(proxy);
+	return proxy;
+}
+
+
+int vx_enter_space(struct vx_info *vxi, unsigned long mask, unsigned index)
+{
+	struct nsproxy *proxy, *proxy_cur, *proxy_new;
+	struct fs_struct *fs_cur, *fs = NULL;
+	struct _vx_space *space;
+	int ret, kill = 0;
+
+	vxdprintk(VXD_CBIT(space, 8), "vx_enter_space(%p[#%u],0x%08lx,%d)",
+		vxi, vxi->vx_id, mask, index);
+
+	if (vx_info_flags(vxi, VXF_INFO_PRIVATE, 0))
+		return -EACCES;
+
+	if (index >= VX_SPACES)
+		return -EINVAL;
+
+	space = &vxi->space[index];
+
+	if (!mask)
+		mask = space->vx_nsmask;
+
+	if ((mask & space->vx_nsmask) != mask)
+		return -EINVAL;
+
+	if (mask & CLONE_FS) {
+		fs = copy_fs_struct(space->vx_fs);
+		if (!fs)
+			return -ENOMEM;
+	}
+	proxy = space->vx_nsproxy;
+
+	vxdprintk(VXD_CBIT(space, 9),
+		"vx_enter_space(%p[#%u],0x%08lx,%d) -> (%p,%p)",
+		vxi, vxi->vx_id, mask, index, proxy, fs);
+
+	task_lock(current);
+	fs_cur = current->fs;
+
+	if (mask & CLONE_FS) {
+		spin_lock(&fs_cur->lock);
+		current->fs = fs;
+		kill = !atomic_dec_return(&fs_cur->users);
+		spin_unlock(&fs_cur->lock);
+	}
+
+	proxy_cur = current->nsproxy;
+	get_nsproxy(proxy_cur);
+	task_unlock(current);
+
+	if (kill)
+		free_fs_struct(fs_cur);
+
+	proxy_new = __vs_merge_nsproxy(proxy_cur, proxy, mask);
+	if (IS_ERR(proxy_new)) {
+		ret = PTR_ERR(proxy_new);
+		goto out_put;
+	}
+
+	proxy_new = xchg(&current->nsproxy, proxy_new);
+
+	if (mask & CLONE_NEWUSER) {
+		struct cred *cred;
+
+		vxdprintk(VXD_CBIT(space, 10),
+			"vx_enter_space(%p[#%u],%p) cred (%p,%p)",
+			vxi, vxi->vx_id, space->vx_cred,
+			current->real_cred, current->cred);
+
+		if (space->vx_cred) {
+			cred = __prepare_creds(space->vx_cred);
+			if (cred)
+				commit_creds(cred);
+		}
+	}
+
+	ret = 0;
+
+	if (proxy_new)
+		put_nsproxy(proxy_new);
+out_put:
+	if (proxy_cur)
+		put_nsproxy(proxy_cur);
+	return ret;
+}
+
+
+int vx_set_space(struct vx_info *vxi, unsigned long mask, unsigned index)
+{
+	struct nsproxy *proxy_vxi, *proxy_cur, *proxy_new;
+	struct fs_struct *fs_vxi, *fs = NULL;
+	struct _vx_space *space;
+	int ret, kill = 0;
+
+	vxdprintk(VXD_CBIT(space, 8), "vx_set_space(%p[#%u],0x%08lx,%d)",
+		vxi, vxi->vx_id, mask, index);
+
+	if ((mask & space_mask.mask) != mask)
+		return -EINVAL;
+
+	if (index >= VX_SPACES)
+		return -EINVAL;
+
+	space = &vxi->space[index];
+
+	proxy_vxi = space->vx_nsproxy;
+	fs_vxi = space->vx_fs;
+
+	if (mask & CLONE_FS) {
+		fs = copy_fs_struct(current->fs);
+		if (!fs)
+			return -ENOMEM;
+	}
+
+	task_lock(current);
+
+	if (mask & CLONE_FS) {
+		spin_lock(&fs_vxi->lock);
+		space->vx_fs = fs;
+		kill = !atomic_dec_return(&fs_vxi->users);
+		spin_unlock(&fs_vxi->lock);
+	}
+
+	proxy_cur = current->nsproxy;
+	get_nsproxy(proxy_cur);
+	task_unlock(current);
+
+	if (kill)
+		free_fs_struct(fs_vxi);
+
+	proxy_new = __vs_merge_nsproxy(proxy_vxi, proxy_cur, mask);
+	if (IS_ERR(proxy_new)) {
+		ret = PTR_ERR(proxy_new);
+		goto out_put;
+	}
+
+	proxy_new = xchg(&space->vx_nsproxy, proxy_new);
+	space->vx_nsmask |= mask;
+
+	if (mask & CLONE_NEWUSER) {
+		struct cred *cred;
+
+		vxdprintk(VXD_CBIT(space, 10),
+			"vx_set_space(%p[#%u],%p) cred (%p,%p)",
+			vxi, vxi->vx_id, space->vx_cred,
+			current->real_cred, current->cred);
+
+		cred = prepare_creds();
+		cred = (struct cred *)xchg(&space->vx_cred, cred);
+		if (cred)
+			abort_creds(cred);
+	}
+
+	ret = 0;
+
+	if (proxy_new)
+		put_nsproxy(proxy_new);
+out_put:
+	if (proxy_cur)
+		put_nsproxy(proxy_cur);
+	return ret;
+}
+
+
+int vc_enter_space_v1(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_space_mask_v1 vc_data = { .mask = 0 };
+
+	if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	return vx_enter_space(vxi, vc_data.mask, 0);
+}
+
+int vc_enter_space(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_space_mask_v2 vc_data = { .mask = 0 };
+
+	if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	if (vc_data.index >= VX_SPACES)
+		return -EINVAL;
+
+	return vx_enter_space(vxi, vc_data.mask, vc_data.index);
+}
+
+int vc_set_space_v1(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_space_mask_v1 vc_data = { .mask = 0 };
+
+	if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	return vx_set_space(vxi, vc_data.mask, 0);
+}
+
+int vc_set_space(struct vx_info *vxi, void __user *data)
+{
+	struct vcmd_space_mask_v2 vc_data = { .mask = 0 };
+
+	if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
+		return -EFAULT;
+
+	if (vc_data.index >= VX_SPACES)
+		return -EINVAL;
+
+	return vx_set_space(vxi, vc_data.mask, vc_data.index);
+}
+
+int vc_get_space_mask(void __user *data, int type)
+{
+	const struct vcmd_space_mask_v1 *mask;
+
+	if (type == 0)
+		mask = &space_mask_v0;
+	else if (type == 1)
+		mask = &space_mask;
+	else
+		mask = &default_space_mask;
+
+	vxdprintk(VXD_CBIT(space, 10),
+		"vc_get_space_mask(%d) = %08llx", type, mask->mask);
+
+	if (copy_to_user(data, mask, sizeof(*mask)))
+		return -EFAULT;
+	return 0;
+}
+
diff -ruNp linux-3.13.11/kernel/vserver/switch.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/switch.c
--- linux-3.13.11/kernel/vserver/switch.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/switch.c	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,556 @@
+/*
+ *  linux/kernel/vserver/switch.c
+ *
+ *  Virtual Server: Syscall Switch
+ *
+ *  Copyright (C) 2003-2011  Herbert Pötzl
+ *
+ *  V0.01  syscall switch
+ *  V0.02  added signal to context
+ *  V0.03  added rlimit functions
+ *  V0.04  added iattr, task/xid functions
+ *  V0.05  added debug/history stuff
+ *  V0.06  added compat32 layer
+ *  V0.07  vcmd args and perms
+ *  V0.08  added status commands
+ *  V0.09  added tag commands
+ *  V0.10  added oom bias
+ *  V0.11  added device commands
+ *  V0.12  added warn mask
+ *
+ */
+
+#include <linux/vs_context.h>
+#include <linux/vs_network.h>
+#include <linux/vserver/switch.h>
+
+#include "vci_config.h"
+
+
+static inline
+int vc_get_version(uint32_t id)
+{
+	return VCI_VERSION;
+}
+
+static inline
+int vc_get_vci(uint32_t id)
+{
+	return vci_kernel_config();
+}
+
+#include <linux/vserver/context_cmd.h>
+#include <linux/vserver/cvirt_cmd.h>
+#include <linux/vserver/cacct_cmd.h>
+#include <linux/vserver/limit_cmd.h>
+#include <linux/vserver/network_cmd.h>
+#include <linux/vserver/sched_cmd.h>
+#include <linux/vserver/debug_cmd.h>
+#include <linux/vserver/inode_cmd.h>
+#include <linux/vserver/dlimit_cmd.h>
+#include <linux/vserver/signal_cmd.h>
+#include <linux/vserver/space_cmd.h>
+#include <linux/vserver/tag_cmd.h>
+#include <linux/vserver/device_cmd.h>
+
+#include <linux/vserver/inode.h>
+#include <linux/vserver/dlimit.h>
+
+
+#ifdef	CONFIG_COMPAT
+#define __COMPAT(name, id, data, compat)	\
+	(compat) ? name ## _x32(id, data) : name(id, data)
+#define __COMPAT_NO_ID(name, data, compat)	\
+	(compat) ? name ## _x32(data) : name(data)
+#else
+#define __COMPAT(name, id, data, compat)	\
+	name(id, data)
+#define __COMPAT_NO_ID(name, data, compat)	\
+	name(data)
+#endif
+
+
+static inline
+long do_vcmd(uint32_t cmd, uint32_t id,
+	struct vx_info *vxi, struct nx_info *nxi,
+	void __user *data, int compat)
+{
+	switch (cmd) {
+
+	case VCMD_get_version:
+		return vc_get_version(id);
+	case VCMD_get_vci:
+		return vc_get_vci(id);
+
+	case VCMD_task_xid:
+		return vc_task_xid(id);
+	case VCMD_vx_info:
+		return vc_vx_info(vxi, data);
+
+	case VCMD_task_nid:
+		return vc_task_nid(id);
+	case VCMD_nx_info:
+		return vc_nx_info(nxi, data);
+
+	case VCMD_task_tag:
+		return vc_task_tag(id);
+
+	case VCMD_set_space_v1:
+		return vc_set_space_v1(vxi, data);
+	/* this is version 2 */
+	case VCMD_set_space:
+		return vc_set_space(vxi, data);
+
+	case VCMD_get_space_mask_v0:
+		return vc_get_space_mask(data, 0);
+	/* this is version 1 */
+	case VCMD_get_space_mask:
+		return vc_get_space_mask(data, 1);
+
+	case VCMD_get_space_default:
+		return vc_get_space_mask(data, -1);
+
+	case VCMD_set_umask:
+		return vc_set_umask(vxi, data);
+
+	case VCMD_get_umask:
+		return vc_get_umask(vxi, data);
+
+	case VCMD_set_wmask:
+		return vc_set_wmask(vxi, data);
+
+	case VCMD_get_wmask:
+		return vc_get_wmask(vxi, data);
+#ifdef	CONFIG_IA32_EMULATION
+	case VCMD_get_rlimit:
+		return __COMPAT(vc_get_rlimit, vxi, data, compat);
+	case VCMD_set_rlimit:
+		return __COMPAT(vc_set_rlimit, vxi, data, compat);
+#else
+	case VCMD_get_rlimit:
+		return vc_get_rlimit(vxi, data);
+	case VCMD_set_rlimit:
+		return vc_set_rlimit(vxi, data);
+#endif
+	case VCMD_get_rlimit_mask:
+		return vc_get_rlimit_mask(id, data);
+	case VCMD_reset_hits:
+		return vc_reset_hits(vxi, data);
+	case VCMD_reset_minmax:
+		return vc_reset_minmax(vxi, data);
+
+	case VCMD_get_vhi_name:
+		return vc_get_vhi_name(vxi, data);
+	case VCMD_set_vhi_name:
+		return vc_set_vhi_name(vxi, data);
+
+	case VCMD_ctx_stat:
+		return vc_ctx_stat(vxi, data);
+	case VCMD_virt_stat:
+		return vc_virt_stat(vxi, data);
+	case VCMD_sock_stat:
+		return vc_sock_stat(vxi, data);
+	case VCMD_rlimit_stat:
+		return vc_rlimit_stat(vxi, data);
+
+	case VCMD_set_cflags:
+		return vc_set_cflags(vxi, data);
+	case VCMD_get_cflags:
+		return vc_get_cflags(vxi, data);
+
+	/* this is version 1 */
+	case VCMD_set_ccaps:
+		return vc_set_ccaps(vxi, data);
+	/* this is version 1 */
+	case VCMD_get_ccaps:
+		return vc_get_ccaps(vxi, data);
+	case VCMD_set_bcaps:
+		return vc_set_bcaps(vxi, data);
+	case VCMD_get_bcaps:
+		return vc_get_bcaps(vxi, data);
+
+	case VCMD_set_badness:
+		return vc_set_badness(vxi, data);
+	case VCMD_get_badness:
+		return vc_get_badness(vxi, data);
+
+	case VCMD_set_nflags:
+		return vc_set_nflags(nxi, data);
+	case VCMD_get_nflags:
+		return vc_get_nflags(nxi, data);
+
+	case VCMD_set_ncaps:
+		return vc_set_ncaps(nxi, data);
+	case VCMD_get_ncaps:
+		return vc_get_ncaps(nxi, data);
+
+	case VCMD_set_prio_bias:
+		return vc_set_prio_bias(vxi, data);
+	case VCMD_get_prio_bias:
+		return vc_get_prio_bias(vxi, data);
+	case VCMD_add_dlimit:
+		return __COMPAT(vc_add_dlimit, id, data, compat);
+	case VCMD_rem_dlimit:
+		return __COMPAT(vc_rem_dlimit, id, data, compat);
+	case VCMD_set_dlimit:
+		return __COMPAT(vc_set_dlimit, id, data, compat);
+	case VCMD_get_dlimit:
+		return __COMPAT(vc_get_dlimit, id, data, compat);
+
+	case VCMD_ctx_kill:
+		return vc_ctx_kill(vxi, data);
+
+	case VCMD_wait_exit:
+		return vc_wait_exit(vxi, data);
+
+	case VCMD_get_iattr:
+		return __COMPAT_NO_ID(vc_get_iattr, data, compat);
+	case VCMD_set_iattr:
+		return __COMPAT_NO_ID(vc_set_iattr, data, compat);
+
+	case VCMD_fget_iattr:
+		return vc_fget_iattr(id, data);
+	case VCMD_fset_iattr:
+		return vc_fset_iattr(id, data);
+
+	case VCMD_enter_space_v0:
+		return vc_enter_space_v1(vxi, NULL);
+	case VCMD_enter_space_v1:
+		return vc_enter_space_v1(vxi, data);
+	/* this is version 2 */
+	case VCMD_enter_space:
+		return vc_enter_space(vxi, data);
+
+	case VCMD_ctx_create_v0:
+		return vc_ctx_create(id, NULL);
+	case VCMD_ctx_create:
+		return vc_ctx_create(id, data);
+	case VCMD_ctx_migrate_v0:
+		return vc_ctx_migrate(vxi, NULL);
+	case VCMD_ctx_migrate:
+		return vc_ctx_migrate(vxi, data);
+
+	case VCMD_net_create_v0:
+		return vc_net_create(id, NULL);
+	case VCMD_net_create:
+		return vc_net_create(id, data);
+	case VCMD_net_migrate:
+		return vc_net_migrate(nxi, data);
+
+	case VCMD_tag_migrate:
+		return vc_tag_migrate(id);
+
+	case VCMD_net_add:
+		return vc_net_add(nxi, data);
+	case VCMD_net_remove:
+		return vc_net_remove(nxi, data);
+
+	case VCMD_net_add_ipv4_v1:
+		return vc_net_add_ipv4_v1(nxi, data);
+	/* this is version 2 */
+	case VCMD_net_add_ipv4:
+		return vc_net_add_ipv4(nxi, data);
+
+	case VCMD_net_rem_ipv4_v1:
+		return vc_net_rem_ipv4_v1(nxi, data);
+	/* this is version 2 */
+	case VCMD_net_rem_ipv4:
+		return vc_net_rem_ipv4(nxi, data);
+#ifdef	CONFIG_IPV6
+	case VCMD_net_add_ipv6:
+		return vc_net_add_ipv6(nxi, data);
+	case VCMD_net_remove_ipv6:
+		return vc_net_remove_ipv6(nxi, data);
+#endif
+/*	case VCMD_add_match_ipv4:
+		return vc_add_match_ipv4(nxi, data);
+	case VCMD_get_match_ipv4:
+		return vc_get_match_ipv4(nxi, data);
+#ifdef	CONFIG_IPV6
+	case VCMD_add_match_ipv6:
+		return vc_add_match_ipv6(nxi, data);
+	case VCMD_get_match_ipv6:
+		return vc_get_match_ipv6(nxi, data);
+#endif	*/
+
+#ifdef	CONFIG_VSERVER_DEVICE
+	case VCMD_set_mapping:
+		return __COMPAT(vc_set_mapping, vxi, data, compat);
+	case VCMD_unset_mapping:
+		return __COMPAT(vc_unset_mapping, vxi, data, compat);
+#endif
+#ifdef	CONFIG_VSERVER_HISTORY
+	case VCMD_dump_history:
+		return vc_dump_history(id);
+	case VCMD_read_history:
+		return __COMPAT(vc_read_history, id, data, compat);
+#endif
+	default:
+		vxwprintk_task(1, "unimplemented VCMD_%02d_%d[%d]",
+			VC_CATEGORY(cmd), VC_COMMAND(cmd), VC_VERSION(cmd));
+	}
+	return -ENOSYS;
+}
+
+
+#define	__VCMD(vcmd, _perm, _args, _flags)		\
+	case VCMD_ ## vcmd: perm = _perm;		\
+		args = _args; flags = _flags; break
+
+
+#define VCA_NONE	0x00
+#define VCA_VXI		0x01
+#define VCA_NXI		0x02
+
+#define VCF_NONE	0x00
+#define VCF_INFO	0x01
+#define VCF_ADMIN	0x02
+#define VCF_ARES	0x06	/* includes admin */
+#define VCF_SETUP	0x08
+
+#define VCF_ZIDOK	0x10	/* zero id okay */
+
+
+static inline
+long do_vserver(uint32_t cmd, uint32_t id, void __user *data, int compat)
+{
+	long ret;
+	int permit = -1, state = 0;
+	int perm = -1, args = 0, flags = 0;
+	struct vx_info *vxi = NULL;
+	struct nx_info *nxi = NULL;
+
+	switch (cmd) {
+	/* unpriviledged commands */
+	__VCMD(get_version,	 0, VCA_NONE,	0);
+	__VCMD(get_vci,		 0, VCA_NONE,	0);
+	__VCMD(get_rlimit_mask,	 0, VCA_NONE,	0);
+	__VCMD(get_space_mask_v0,0, VCA_NONE,   0);
+	__VCMD(get_space_mask,	 0, VCA_NONE,   0);
+	__VCMD(get_space_default,0, VCA_NONE,   0);
+
+	/* info commands */
+	__VCMD(task_xid,	 2, VCA_NONE,	0);
+	__VCMD(reset_hits,	 2, VCA_VXI,	0);
+	__VCMD(reset_minmax,	 2, VCA_VXI,	0);
+	__VCMD(vx_info,		 3, VCA_VXI,	VCF_INFO);
+	__VCMD(get_bcaps,	 3, VCA_VXI,	VCF_INFO);
+	__VCMD(get_ccaps,	 3, VCA_VXI,	VCF_INFO);
+	__VCMD(get_cflags,	 3, VCA_VXI,	VCF_INFO);
+	__VCMD(get_umask,	 3, VCA_VXI,	VCF_INFO);
+	__VCMD(get_wmask,	 3, VCA_VXI,	VCF_INFO);
+	__VCMD(get_badness,	 3, VCA_VXI,	VCF_INFO);
+	__VCMD(get_vhi_name,	 3, VCA_VXI,	VCF_INFO);
+	__VCMD(get_rlimit,	 3, VCA_VXI,	VCF_INFO);
+
+	__VCMD(ctx_stat,	 3, VCA_VXI,	VCF_INFO);
+	__VCMD(virt_stat,	 3, VCA_VXI,	VCF_INFO);
+	__VCMD(sock_stat,	 3, VCA_VXI,	VCF_INFO);
+	__VCMD(rlimit_stat,	 3, VCA_VXI,	VCF_INFO);
+
+	__VCMD(task_nid,	 2, VCA_NONE,	0);
+	__VCMD(nx_info,		 3, VCA_NXI,	VCF_INFO);
+	__VCMD(get_ncaps,	 3, VCA_NXI,	VCF_INFO);
+	__VCMD(get_nflags,	 3, VCA_NXI,	VCF_INFO);
+
+	__VCMD(task_tag,	 2, VCA_NONE,	0);
+
+	__VCMD(get_iattr,	 2, VCA_NONE,	0);
+	__VCMD(fget_iattr,	 2, VCA_NONE,	0);
+	__VCMD(get_dlimit,	 3, VCA_NONE,	VCF_INFO);
+	__VCMD(get_prio_bias,	 3, VCA_VXI,	VCF_INFO);
+
+	/* lower admin commands */
+	__VCMD(wait_exit,	 4, VCA_VXI,	VCF_INFO);
+	__VCMD(ctx_create_v0,	 5, VCA_NONE,	0);
+	__VCMD(ctx_create,	 5, VCA_NONE,	0);
+	__VCMD(ctx_migrate_v0,	 5, VCA_VXI,	VCF_ADMIN);
+	__VCMD(ctx_migrate,	 5, VCA_VXI,	VCF_ADMIN);
+	__VCMD(enter_space_v0,	 5, VCA_VXI,	VCF_ADMIN);
+	__VCMD(enter_space_v1,	 5, VCA_VXI,	VCF_ADMIN);
+	__VCMD(enter_space,	 5, VCA_VXI,	VCF_ADMIN);
+
+	__VCMD(net_create_v0,	 5, VCA_NONE,	0);
+	__VCMD(net_create,	 5, VCA_NONE,	0);
+	__VCMD(net_migrate,	 5, VCA_NXI,	VCF_ADMIN);
+
+	__VCMD(tag_migrate,	 5, VCA_NONE,	VCF_ADMIN);
+
+	/* higher admin commands */
+	__VCMD(ctx_kill,	 6, VCA_VXI,	VCF_ARES);
+	__VCMD(set_space_v1,	 7, VCA_VXI,	VCF_ARES | VCF_SETUP);
+	__VCMD(set_space,	 7, VCA_VXI,	VCF_ARES | VCF_SETUP);
+
+	__VCMD(set_ccaps,	 7, VCA_VXI,	VCF_ARES | VCF_SETUP);
+	__VCMD(set_bcaps,	 7, VCA_VXI,	VCF_ARES | VCF_SETUP);
+	__VCMD(set_cflags,	 7, VCA_VXI,	VCF_ARES | VCF_SETUP);
+	__VCMD(set_umask,	 7, VCA_VXI,	VCF_ARES | VCF_SETUP);
+	__VCMD(set_wmask,	 7, VCA_VXI,	VCF_ARES | VCF_SETUP);
+	__VCMD(set_badness,	 7, VCA_VXI,	VCF_ARES | VCF_SETUP);
+
+	__VCMD(set_vhi_name,	 7, VCA_VXI,	VCF_ARES | VCF_SETUP);
+	__VCMD(set_rlimit,	 7, VCA_VXI,	VCF_ARES | VCF_SETUP);
+	__VCMD(set_prio_bias,	 7, VCA_VXI,	VCF_ARES | VCF_SETUP);
+
+	__VCMD(set_ncaps,	 7, VCA_NXI,	VCF_ARES | VCF_SETUP);
+	__VCMD(set_nflags,	 7, VCA_NXI,	VCF_ARES | VCF_SETUP);
+	__VCMD(net_add,		 8, VCA_NXI,	VCF_ARES | VCF_SETUP);
+	__VCMD(net_remove,	 8, VCA_NXI,	VCF_ARES | VCF_SETUP);
+	__VCMD(net_add_ipv4_v1,	 8, VCA_NXI,	VCF_ARES | VCF_SETUP);
+	__VCMD(net_rem_ipv4_v1,	 8, VCA_NXI,	VCF_ARES | VCF_SETUP);
+	__VCMD(net_add_ipv4,	 8, VCA_NXI,	VCF_ARES | VCF_SETUP);
+	__VCMD(net_rem_ipv4,	 8, VCA_NXI,	VCF_ARES | VCF_SETUP);
+#ifdef	CONFIG_IPV6
+	__VCMD(net_add_ipv6,	 8, VCA_NXI,	VCF_ARES | VCF_SETUP);
+	__VCMD(net_remove_ipv6,	 8, VCA_NXI,	VCF_ARES | VCF_SETUP);
+#endif
+	__VCMD(set_iattr,	 7, VCA_NONE,	0);
+	__VCMD(fset_iattr,	 7, VCA_NONE,	0);
+	__VCMD(set_dlimit,	 7, VCA_NONE,	VCF_ARES);
+	__VCMD(add_dlimit,	 8, VCA_NONE,	VCF_ARES);
+	__VCMD(rem_dlimit,	 8, VCA_NONE,	VCF_ARES);
+
+#ifdef	CONFIG_VSERVER_DEVICE
+	__VCMD(set_mapping,	 8, VCA_VXI,    VCF_ARES|VCF_ZIDOK);
+	__VCMD(unset_mapping,	 8, VCA_VXI,	VCF_ARES|VCF_ZIDOK);
+#endif
+	/* debug level admin commands */
+#ifdef	CONFIG_VSERVER_HISTORY
+	__VCMD(dump_history,	 9, VCA_NONE,	0);
+	__VCMD(read_history,	 9, VCA_NONE,	0);
+#endif
+
+	default:
+		perm = -1;
+	}
+
+	vxdprintk(VXD_CBIT(switch, 0),
+		"vc: VCMD_%02d_%d[%d], %d,%p [%d,%d,%x,%x]",
+		VC_CATEGORY(cmd), VC_COMMAND(cmd),
+		VC_VERSION(cmd), id, data, compat,
+		perm, args, flags);
+
+	ret = -ENOSYS;
+	if (perm < 0)
+		goto out;
+
+	state = 1;
+	if (!capable(CAP_CONTEXT))
+		goto out;
+
+	state = 2;
+	/* moved here from the individual commands */
+	ret = -EPERM;
+	if ((perm > 1) && !capable(CAP_SYS_ADMIN))
+		goto out;
+
+	state = 3;
+	/* vcmd involves resource management  */
+	ret = -EPERM;
+	if ((flags & VCF_ARES) && !capable(CAP_SYS_RESOURCE))
+		goto out;
+
+	state = 4;
+	/* various legacy exceptions */
+	switch (cmd) {
+	/* will go away when spectator is a cap */
+	case VCMD_ctx_migrate_v0:
+	case VCMD_ctx_migrate:
+		if (id == 1) {
+			current->xid = 1;
+			ret = 1;
+			goto out;
+		}
+		break;
+
+	/* will go away when spectator is a cap */
+	case VCMD_net_migrate:
+		if (id == 1) {
+			current->nid = 1;
+			ret = 1;
+			goto out;
+		}
+		break;
+	}
+
+	/* vcmds are fine by default */
+	permit = 1;
+
+	/* admin type vcmds require admin ... */
+	if (flags & VCF_ADMIN)
+		permit = vx_check(0, VS_ADMIN) ? 1 : 0;
+
+	/* ... but setup type vcmds override that */
+	if (!permit && (flags & VCF_SETUP))
+		permit = vx_flags(VXF_STATE_SETUP, 0) ? 2 : 0;
+
+	state = 5;
+	ret = -EPERM;
+	if (!permit)
+		goto out;
+
+	state = 6;
+	if (!id && (flags & VCF_ZIDOK))
+		goto skip_id;
+
+	ret = -ESRCH;
+	if (args & VCA_VXI) {
+		vxi = lookup_vx_info(id);
+		if (!vxi)
+			goto out;
+
+		if ((flags & VCF_ADMIN) &&
+			/* special case kill for shutdown */
+			(cmd != VCMD_ctx_kill) &&
+			/* can context be administrated? */
+			!vx_info_flags(vxi, VXF_STATE_ADMIN, 0)) {
+			ret = -EACCES;
+			goto out_vxi;
+		}
+	}
+	state = 7;
+	if (args & VCA_NXI) {
+		nxi = lookup_nx_info(id);
+		if (!nxi)
+			goto out_vxi;
+
+		if ((flags & VCF_ADMIN) &&
+			/* can context be administrated? */
+			!nx_info_flags(nxi, NXF_STATE_ADMIN, 0)) {
+			ret = -EACCES;
+			goto out_nxi;
+		}
+	}
+skip_id:
+	state = 8;
+	ret = do_vcmd(cmd, id, vxi, nxi, data, compat);
+
+out_nxi:
+	if ((args & VCA_NXI) && nxi)
+		put_nx_info(nxi);
+out_vxi:
+	if ((args & VCA_VXI) && vxi)
+		put_vx_info(vxi);
+out:
+	vxdprintk(VXD_CBIT(switch, 1),
+		"vc: VCMD_%02d_%d[%d] = %08lx(%ld) [%d,%d]",
+		VC_CATEGORY(cmd), VC_COMMAND(cmd),
+		VC_VERSION(cmd), ret, ret, state, permit);
+	return ret;
+}
+
+asmlinkage long
+sys_vserver(uint32_t cmd, uint32_t id, void __user *data)
+{
+	return do_vserver(cmd, id, data, 0);
+}
+
+#ifdef	CONFIG_COMPAT
+
+asmlinkage long
+sys32_vserver(uint32_t cmd, uint32_t id, void __user *data)
+{
+	return do_vserver(cmd, id, data, 1);
+}
+
+#endif	/* CONFIG_COMPAT */
diff -ruNp linux-3.13.11/kernel/vserver/sysctl.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/sysctl.c
--- linux-3.13.11/kernel/vserver/sysctl.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/sysctl.c	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,247 @@
+/*
+ *  kernel/vserver/sysctl.c
+ *
+ *  Virtual Context Support
+ *
+ *  Copyright (C) 2004-2007  Herbert Pötzl
+ *
+ *  V0.01  basic structure
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/ctype.h>
+#include <linux/sysctl.h>
+#include <linux/parser.h>
+#include <asm/uaccess.h>
+
+enum {
+	CTL_DEBUG_ERROR		= 0,
+	CTL_DEBUG_SWITCH	= 1,
+	CTL_DEBUG_XID,
+	CTL_DEBUG_NID,
+	CTL_DEBUG_TAG,
+	CTL_DEBUG_NET,
+	CTL_DEBUG_LIMIT,
+	CTL_DEBUG_CRES,
+	CTL_DEBUG_DLIM,
+	CTL_DEBUG_QUOTA,
+	CTL_DEBUG_CVIRT,
+	CTL_DEBUG_SPACE,
+	CTL_DEBUG_PERM,
+	CTL_DEBUG_MISC,
+};
+
+
+unsigned int vs_debug_switch	= 0;
+unsigned int vs_debug_xid	= 0;
+unsigned int vs_debug_nid	= 0;
+unsigned int vs_debug_tag	= 0;
+unsigned int vs_debug_net	= 0;
+unsigned int vs_debug_limit	= 0;
+unsigned int vs_debug_cres	= 0;
+unsigned int vs_debug_dlim	= 0;
+unsigned int vs_debug_quota	= 0;
+unsigned int vs_debug_cvirt	= 0;
+unsigned int vs_debug_space	= 0;
+unsigned int vs_debug_perm	= 0;
+unsigned int vs_debug_misc	= 0;
+
+
+static struct ctl_table_header *vserver_table_header;
+static ctl_table vserver_root_table[];
+
+
+void vserver_register_sysctl(void)
+{
+	if (!vserver_table_header) {
+		vserver_table_header = register_sysctl_table(vserver_root_table);
+	}
+
+}
+
+void vserver_unregister_sysctl(void)
+{
+	if (vserver_table_header) {
+		unregister_sysctl_table(vserver_table_header);
+		vserver_table_header = NULL;
+	}
+}
+
+
+static int proc_dodebug(ctl_table *table, int write,
+	void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+	char		tmpbuf[20], *p, c;
+	unsigned int	value;
+	size_t		left, len;
+
+	if ((*ppos && !write) || !*lenp) {
+		*lenp = 0;
+		return 0;
+	}
+
+	left = *lenp;
+
+	if (write) {
+		if (!access_ok(VERIFY_READ, buffer, left))
+			return -EFAULT;
+		p = (char *)buffer;
+		while (left && __get_user(c, p) >= 0 && isspace(c))
+			left--, p++;
+		if (!left)
+			goto done;
+
+		if (left > sizeof(tmpbuf) - 1)
+			return -EINVAL;
+		if (copy_from_user(tmpbuf, p, left))
+			return -EFAULT;
+		tmpbuf[left] = '\0';
+
+		for (p = tmpbuf, value = 0; '0' <= *p && *p <= '9'; p++, left--)
+			value = 10 * value + (*p - '0');
+		if (*p && !isspace(*p))
+			return -EINVAL;
+		while (left && isspace(*p))
+			left--, p++;
+		*(unsigned int *)table->data = value;
+	} else {
+		if (!access_ok(VERIFY_WRITE, buffer, left))
+			return -EFAULT;
+		len = sprintf(tmpbuf, "%d", *(unsigned int *)table->data);
+		if (len > left)
+			len = left;
+		if (__copy_to_user(buffer, tmpbuf, len))
+			return -EFAULT;
+		if ((left -= len) > 0) {
+			if (put_user('\n', (char *)buffer + len))
+				return -EFAULT;
+			left--;
+		}
+	}
+
+done:
+	*lenp -= left;
+	*ppos += *lenp;
+	return 0;
+}
+
+static int zero;
+
+#define	CTL_ENTRY(ctl, name)				\
+	{						\
+		.procname	= #name,		\
+		.data		= &vs_ ## name,		\
+		.maxlen		= sizeof(int),		\
+		.mode		= 0644,			\
+		.proc_handler	= &proc_dodebug,	\
+		.extra1		= &zero,		\
+		.extra2		= &zero,		\
+	}
+
+static ctl_table vserver_debug_table[] = {
+	CTL_ENTRY(CTL_DEBUG_SWITCH,	debug_switch),
+	CTL_ENTRY(CTL_DEBUG_XID,	debug_xid),
+	CTL_ENTRY(CTL_DEBUG_NID,	debug_nid),
+	CTL_ENTRY(CTL_DEBUG_TAG,	debug_tag),
+	CTL_ENTRY(CTL_DEBUG_NET,	debug_net),
+	CTL_ENTRY(CTL_DEBUG_LIMIT,	debug_limit),
+	CTL_ENTRY(CTL_DEBUG_CRES,	debug_cres),
+	CTL_ENTRY(CTL_DEBUG_DLIM,	debug_dlim),
+	CTL_ENTRY(CTL_DEBUG_QUOTA,	debug_quota),
+	CTL_ENTRY(CTL_DEBUG_CVIRT,	debug_cvirt),
+	CTL_ENTRY(CTL_DEBUG_SPACE,	debug_space),
+	CTL_ENTRY(CTL_DEBUG_PERM,	debug_perm),
+	CTL_ENTRY(CTL_DEBUG_MISC,	debug_misc),
+	{ 0 }
+};
+
+static ctl_table vserver_root_table[] = {
+	{
+		.procname	= "vserver",
+		.mode		= 0555,
+		.child		= vserver_debug_table
+	},
+	{ 0 }
+};
+
+
+static match_table_t tokens = {
+	{ CTL_DEBUG_SWITCH,	"switch=%x"	},
+	{ CTL_DEBUG_XID,	"xid=%x"	},
+	{ CTL_DEBUG_NID,	"nid=%x"	},
+	{ CTL_DEBUG_TAG,	"tag=%x"	},
+	{ CTL_DEBUG_NET,	"net=%x"	},
+	{ CTL_DEBUG_LIMIT,	"limit=%x"	},
+	{ CTL_DEBUG_CRES,	"cres=%x"	},
+	{ CTL_DEBUG_DLIM,	"dlim=%x"	},
+	{ CTL_DEBUG_QUOTA,	"quota=%x"	},
+	{ CTL_DEBUG_CVIRT,	"cvirt=%x"	},
+	{ CTL_DEBUG_SPACE,	"space=%x"	},
+	{ CTL_DEBUG_PERM,	"perm=%x"	},
+	{ CTL_DEBUG_MISC,	"misc=%x"	},
+	{ CTL_DEBUG_ERROR,	NULL		}
+};
+
+#define	HANDLE_CASE(id, name, val)				\
+	case CTL_DEBUG_ ## id:					\
+		vs_debug_ ## name = val;			\
+		printk("vs_debug_" #name "=0x%x\n", val);	\
+		break
+
+
+static int __init vs_debug_setup(char *str)
+{
+	char *p;
+	int token;
+
+	printk("vs_debug_setup(%s)\n", str);
+	while ((p = strsep(&str, ",")) != NULL) {
+		substring_t args[MAX_OPT_ARGS];
+		unsigned int value;
+
+		if (!*p)
+			continue;
+
+		token = match_token(p, tokens, args);
+		value = (token > 0) ? simple_strtoul(args[0].from, NULL, 0) : 0;
+
+		switch (token) {
+		HANDLE_CASE(SWITCH, switch, value);
+		HANDLE_CASE(XID,    xid,    value);
+		HANDLE_CASE(NID,    nid,    value);
+		HANDLE_CASE(TAG,    tag,    value);
+		HANDLE_CASE(NET,    net,    value);
+		HANDLE_CASE(LIMIT,  limit,  value);
+		HANDLE_CASE(CRES,   cres,   value);
+		HANDLE_CASE(DLIM,   dlim,   value);
+		HANDLE_CASE(QUOTA,  quota,  value);
+		HANDLE_CASE(CVIRT,  cvirt,  value);
+		HANDLE_CASE(SPACE,  space,  value);
+		HANDLE_CASE(PERM,   perm,   value);
+		HANDLE_CASE(MISC,   misc,   value);
+		default:
+			return -EINVAL;
+			break;
+		}
+	}
+	return 1;
+}
+
+__setup("vsdebug=", vs_debug_setup);
+
+
+
+EXPORT_SYMBOL_GPL(vs_debug_switch);
+EXPORT_SYMBOL_GPL(vs_debug_xid);
+EXPORT_SYMBOL_GPL(vs_debug_nid);
+EXPORT_SYMBOL_GPL(vs_debug_net);
+EXPORT_SYMBOL_GPL(vs_debug_limit);
+EXPORT_SYMBOL_GPL(vs_debug_cres);
+EXPORT_SYMBOL_GPL(vs_debug_dlim);
+EXPORT_SYMBOL_GPL(vs_debug_quota);
+EXPORT_SYMBOL_GPL(vs_debug_cvirt);
+EXPORT_SYMBOL_GPL(vs_debug_space);
+EXPORT_SYMBOL_GPL(vs_debug_perm);
+EXPORT_SYMBOL_GPL(vs_debug_misc);
+
diff -ruNp linux-3.13.11/kernel/vserver/tag.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/tag.c
--- linux-3.13.11/kernel/vserver/tag.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/tag.c	2014-07-09 12:00:15.000000000
+0200
@@ -0,0 +1,63 @@
+/*
+ *  linux/kernel/vserver/tag.c
+ *
+ *  Virtual Server: Shallow Tag Space
+ *
+ *  Copyright (C) 2007  Herbert Pötzl
+ *
+ *  V0.01  basic implementation
+ *
+ */
+
+#include <linux/sched.h>
+#include <linux/vserver/debug.h>
+#include <linux/vs_pid.h>
+#include <linux/vs_tag.h>
+
+#include <linux/vserver/tag_cmd.h>
+
+
+int dx_migrate_task(struct task_struct *p, vtag_t tag)
+{
+	if (!p)
+		BUG();
+
+	vxdprintk(VXD_CBIT(tag, 5),
+		"dx_migrate_task(%p[#%d],#%d)", p, p->tag, tag);
+
+	task_lock(p);
+	p->tag = tag;
+	task_unlock(p);
+
+	vxdprintk(VXD_CBIT(tag, 5),
+		"moved task %p into [#%d]", p, tag);
+	return 0;
+}
+
+/* vserver syscall commands below here */
+
+/* taks xid and vx_info functions */
+
+
+int vc_task_tag(uint32_t id)
+{
+	vtag_t tag;
+
+	if (id) {
+		struct task_struct *tsk;
+		rcu_read_lock();
+		tsk = find_task_by_real_pid(id);
+		tag = (tsk) ? tsk->tag : -ESRCH;
+		rcu_read_unlock();
+	} else
+		tag = dx_current_tag();
+	return tag;
+}
+
+
+int vc_tag_migrate(uint32_t tag)
+{
+	return dx_migrate_task(current, tag & 0xFFFF);
+}
+
+
diff -ruNp linux-3.13.11/kernel/vserver/vci_config.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/vci_config.h
--- linux-3.13.11/kernel/vserver/vci_config.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/vserver/vci_config.h	2014-07-09
12:00:15.000000000 +0200
@@ -0,0 +1,80 @@
+
+/*  interface version */
+
+#define VCI_VERSION		0x00020308
+
+
+enum {
+	VCI_KCBIT_NO_DYNAMIC = 0,
+
+	VCI_KCBIT_PROC_SECURE = 4,
+	/* VCI_KCBIT_HARDCPU = 5, */
+	/* VCI_KCBIT_IDLELIMIT = 6, */
+	/* VCI_KCBIT_IDLETIME = 7, */
+
+	VCI_KCBIT_COWBL = 8,
+	VCI_KCBIT_FULLCOWBL = 9,
+	VCI_KCBIT_SPACES = 10,
+	VCI_KCBIT_NETV2 = 11,
+	VCI_KCBIT_MEMCG = 12,
+	VCI_KCBIT_MEMCG_SWAP = 13,
+
+	VCI_KCBIT_DEBUG = 16,
+	VCI_KCBIT_HISTORY = 20,
+	VCI_KCBIT_TAGGED = 24,
+	VCI_KCBIT_PPTAG = 28,
+
+	VCI_KCBIT_MORE = 31,
+};
+
+
+static inline uint32_t vci_kernel_config(void)
+{
+	return
+	(1 << VCI_KCBIT_NO_DYNAMIC) |
+
+	/* configured features */
+#ifdef	CONFIG_VSERVER_PROC_SECURE
+	(1 << VCI_KCBIT_PROC_SECURE) |
+#endif
+#ifdef	CONFIG_VSERVER_COWBL
+	(1 << VCI_KCBIT_COWBL) |
+	(1 << VCI_KCBIT_FULLCOWBL) |
+#endif
+	(1 << VCI_KCBIT_SPACES) |
+	(1 << VCI_KCBIT_NETV2) |
+#ifdef	CONFIG_MEMCG
+	(1 << VCI_KCBIT_MEMCG) |
+#endif
+#ifdef	CONFIG_MEMCG_SWAP
+	(1 << VCI_KCBIT_MEMCG_SWAP) |
+#endif
+
+	/* debug options */
+#ifdef	CONFIG_VSERVER_DEBUG
+	(1 << VCI_KCBIT_DEBUG) |
+#endif
+#ifdef	CONFIG_VSERVER_HISTORY
+	(1 << VCI_KCBIT_HISTORY) |
+#endif
+
+	/* inode context tagging */
+#if	defined(CONFIG_TAGGING_NONE)
+	(0 << VCI_KCBIT_TAGGED) |
+#elif	defined(CONFIG_TAGGING_UID16)
+	(1 << VCI_KCBIT_TAGGED) |
+#elif	defined(CONFIG_TAGGING_GID16)
+	(2 << VCI_KCBIT_TAGGED) |
+#elif	defined(CONFIG_TAGGING_ID24)
+	(3 << VCI_KCBIT_TAGGED) |
+#elif	defined(CONFIG_TAGGING_INTERN)
+	(4 << VCI_KCBIT_TAGGED) |
+#elif	defined(CONFIG_TAGGING_RUNTIME)
+	(5 << VCI_KCBIT_TAGGED) |
+#else
+	(7 << VCI_KCBIT_TAGGED) |
+#endif
+	(1 << VCI_KCBIT_PPTAG) |
+	0;
+}
+
diff -ruNp linux-3.13.11/kernel/watchdog.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/watchdog.c
--- linux-3.13.11/kernel/watchdog.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/watchdog.c	2014-07-09 12:00:15.000000000
+0200
@@ -475,7 +475,7 @@ static int watchdog_nmi_enable(unsigned
 static void watchdog_nmi_disable(unsigned int cpu) { return; }
 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
 
-static struct smp_hotplug_thread watchdog_threads = {
+static struct smp_hotplug_thread watchdog_threads __read_only = {
 	.store			= &softlockup_watchdog,
 	.thread_should_run	= watchdog_should_run,
 	.thread_fn		= watchdog,
diff -ruNp linux-3.13.11/kernel/workqueue.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/workqueue.c
--- linux-3.13.11/kernel/workqueue.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/kernel/workqueue.c	2014-07-09 12:00:15.000000000
+0200
@@ -4678,7 +4678,7 @@ static void rebind_workers(struct worker
 		WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
 		worker_flags |= WORKER_REBOUND;
 		worker_flags &= ~WORKER_UNBOUND;
-		ACCESS_ONCE(worker->flags) = worker_flags;
+		ACCESS_ONCE_RW(worker->flags) = worker_flags;
 	}
 
 	spin_unlock_irq(&pool->lock);
diff -ruNp linux-3.13.11/lib/Kconfig.debug linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/lib/Kconfig.debug
--- linux-3.13.11/lib/Kconfig.debug	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/lib/Kconfig.debug	2014-07-09 12:00:15.000000000
+0200
@@ -845,7 +845,7 @@ config DEBUG_MUTEXES
 
 config DEBUG_WW_MUTEX_SLOWPATH
 	bool "Wait/wound mutex debugging: Slowpath testing"
-	depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
+	depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
&& !PAX_CONSTIFY_PLUGIN
 	select DEBUG_LOCK_ALLOC
 	select DEBUG_SPINLOCK
 	select DEBUG_MUTEXES
@@ -858,7 +858,7 @@ config DEBUG_WW_MUTEX_SLOWPATH
 
 config DEBUG_LOCK_ALLOC
 	bool "Lock debugging: detect incorrect freeing of live locks"
-	depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
+	depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
&& !PAX_CONSTIFY_PLUGIN
 	select DEBUG_SPINLOCK
 	select DEBUG_MUTEXES
 	select LOCKDEP
@@ -872,7 +872,7 @@ config DEBUG_LOCK_ALLOC
 
 config PROVE_LOCKING
 	bool "Lock debugging: prove locking correctness"
-	depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
+	depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
&& !PAX_CONSTIFY_PLUGIN
 	select LOCKDEP
 	select DEBUG_SPINLOCK
 	select DEBUG_MUTEXES
@@ -923,7 +923,7 @@ config LOCKDEP
 
 config LOCK_STAT
 	bool "Lock usage statistics"
-	depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
+	depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
&& !PAX_CONSTIFY_PLUGIN
 	select LOCKDEP
 	select DEBUG_SPINLOCK
 	select DEBUG_MUTEXES
@@ -1385,6 +1385,7 @@ config LATENCYTOP
 	depends on DEBUG_KERNEL
 	depends on STACKTRACE_SUPPORT
 	depends on PROC_FS
+	depends on !GRKERNSEC_HIDESYM
 	select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC
 	select KALLSYMS
 	select KALLSYMS_ALL
@@ -1401,7 +1402,7 @@ config ARCH_HAS_DEBUG_STRICT_USER_COPY_C
 config DEBUG_STRICT_USER_COPY_CHECKS
 	bool "Strict user copy size checks"
 	depends on ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
-	depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
+	depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
 	help
 	  Enabling this option turns a certain set of sanity checks for user
 	  copy operations into compile time failures.
@@ -1520,7 +1521,7 @@ endmenu # runtime tests
 
 config PROVIDE_OHCI1394_DMA_INIT
 	bool "Remote debugging over FireWire early on boot"
-	depends on PCI && X86
+	depends on PCI && X86 && !GRKERNSEC
 	help
 	  If you want to debug problems which hang or crash the kernel early
 	  on boot and the crashing machine has a FireWire port, you can use
@@ -1549,7 +1550,7 @@ config PROVIDE_OHCI1394_DMA_INIT
 
 config FIREWIRE_OHCI_REMOTE_DMA
 	bool "Remote debugging over FireWire with firewire-ohci"
-	depends on FIREWIRE_OHCI
+	depends on FIREWIRE_OHCI && !GRKERNSEC
 	help
 	  This option lets you use the FireWire bus for remote debugging
 	  with help of the firewire-ohci driver. It enables unfiltered
diff -ruNp linux-3.13.11/lib/Makefile linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/lib/Makefile
--- linux-3.13.11/lib/Makefile	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/lib/Makefile	2014-07-09 12:00:15.000000000
+0200
@@ -50,7 +50,7 @@ obj-$(CONFIG_GENERIC_HWEIGHT) += hweight
 obj-$(CONFIG_BTREE) += btree.o
 obj-$(CONFIG_ASSOCIATIVE_ARRAY) += assoc_array.o
 obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
-obj-$(CONFIG_DEBUG_LIST) += list_debug.o
+obj-y += list_debug.o
 obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
 
 ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
diff -ruNp linux-3.13.11/lib/bitmap.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/lib/bitmap.c
--- linux-3.13.11/lib/bitmap.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/lib/bitmap.c	2014-07-09 12:00:15.000000000
+0200
@@ -422,7 +422,7 @@ int __bitmap_parse(const char *buf, unsi
 {
 	int c, old_c, totaldigits, ndigits, nchunks, nbits;
 	u32 chunk;
-	const char __user __force *ubuf = (const char __user __force *)buf;
+	const char __user *ubuf = (const char __force_user *)buf;
 
 	bitmap_zero(maskp, nmaskbits);
 
@@ -507,7 +507,7 @@ int bitmap_parse_user(const char __user
 {
 	if (!access_ok(VERIFY_READ, ubuf, ulen))
 		return -EFAULT;
-	return __bitmap_parse((const char __force *)ubuf,
+	return __bitmap_parse((const char __force_kernel *)ubuf,
 				ulen, 1, maskp, nmaskbits);
 
 }
@@ -598,7 +598,7 @@ static int __bitmap_parselist(const char
 {
 	unsigned a, b;
 	int c, old_c, totaldigits;
-	const char __user __force *ubuf = (const char __user __force *)buf;
+	const char __user *ubuf = (const char __force_user *)buf;
 	int exp_digit, in_range;
 
 	totaldigits = c = 0;
@@ -698,7 +698,7 @@ int bitmap_parselist_user(const char __u
 {
 	if (!access_ok(VERIFY_READ, ubuf, ulen))
 		return -EFAULT;
-	return __bitmap_parselist((const char __force *)ubuf,
+	return __bitmap_parselist((const char __force_kernel *)ubuf,
 					ulen, 1, maskp, nmaskbits);
 }
 EXPORT_SYMBOL(bitmap_parselist_user);
diff -ruNp linux-3.13.11/lib/bug.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/lib/bug.c
--- linux-3.13.11/lib/bug.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/lib/bug.c	2014-07-09 12:00:15.000000000
+0200
@@ -134,6 +134,8 @@ enum bug_trap_type report_bug(unsigned l
 		return BUG_TRAP_TYPE_NONE;
 
 	bug = find_bug(bugaddr);
+	if (!bug)
+		return BUG_TRAP_TYPE_NONE;
 
 	file = NULL;
 	line = 0;
diff -ruNp linux-3.13.11/lib/debugobjects.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/lib/debugobjects.c
--- linux-3.13.11/lib/debugobjects.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/lib/debugobjects.c	2014-07-09 12:00:15.000000000
+0200
@@ -286,7 +286,7 @@ static void debug_object_is_on_stack(voi
 	if (limit > 4)
 		return;
 
-	is_on_stack = object_is_on_stack(addr);
+	is_on_stack = object_starts_on_stack(addr);
 	if (is_on_stack == onstack)
 		return;
 
diff -ruNp linux-3.13.11/lib/devres.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/lib/devres.c
--- linux-3.13.11/lib/devres.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/lib/devres.c	2014-07-09 12:00:15.000000000
+0200
@@ -81,7 +81,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
 void devm_iounmap(struct device *dev, void __iomem *addr)
 {
 	WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
-			       (void *)addr));
+			       (void __force *)addr));
 	iounmap(addr);
 }
 EXPORT_SYMBOL(devm_iounmap);
@@ -224,7 +224,7 @@ void devm_ioport_unmap(struct device *de
 {
 	ioport_unmap(addr);
 	WARN_ON(devres_destroy(dev, devm_ioport_map_release,
-			       devm_ioport_map_match, (void *)addr));
+			       devm_ioport_map_match, (void __force *)addr));
 }
 EXPORT_SYMBOL(devm_ioport_unmap);
 #endif /* CONFIG_HAS_IOPORT */
diff -ruNp linux-3.13.11/lib/div64.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/lib/div64.c
--- linux-3.13.11/lib/div64.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/lib/div64.c	2014-07-09 12:00:15.000000000
+0200
@@ -59,7 +59,7 @@ uint32_t __attribute__((weak)) __div64_3
 EXPORT_SYMBOL(__div64_32);
 
 #ifndef div_s64_rem
-s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
+s64 __intentional_overflow(-1) div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
 {
 	u64 quotient;
 
@@ -130,7 +130,7 @@ EXPORT_SYMBOL(div64_u64_rem);
  * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt'
  */
 #ifndef div64_u64
-u64 div64_u64(u64 dividend, u64 divisor)
+u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
 {
 	u32 high = divisor >> 32;
 	u64 quot;
diff -ruNp linux-3.13.11/lib/dma-debug.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/lib/dma-debug.c
--- linux-3.13.11/lib/dma-debug.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/lib/dma-debug.c	2014-07-09 12:00:15.000000000
+0200
@@ -768,7 +768,7 @@ static int dma_debug_device_change(struc
 
 void dma_debug_add_bus(struct bus_type *bus)
 {
-	struct notifier_block *nb;
+	notifier_block_no_const *nb;
 
 	if (global_disable)
 		return;
@@ -945,7 +945,7 @@ static void check_unmap(struct dma_debug
 
 static void check_for_stack(struct device *dev, void *addr)
 {
-	if (object_is_on_stack(addr))
+	if (object_starts_on_stack(addr))
 		err_printk(dev, NULL, "DMA-API: device driver maps memory from"
 				"stack [addr=%p]\n", addr);
 }
diff -ruNp linux-3.13.11/lib/inflate.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/lib/inflate.c
--- linux-3.13.11/lib/inflate.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/lib/inflate.c	2014-07-09 12:00:15.000000000
+0200
@@ -269,7 +269,7 @@ static void free(void *where)
 		malloc_ptr = free_mem_ptr;
 }
 #else
-#define malloc(a) kmalloc(a, GFP_KERNEL)
+#define malloc(a) kmalloc((a), GFP_KERNEL)
 #define free(a) kfree(a)
 #endif
 
diff -ruNp linux-3.13.11/lib/ioremap.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/lib/ioremap.c
--- linux-3.13.11/lib/ioremap.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/lib/ioremap.c	2014-07-09 12:00:15.000000000
+0200
@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_
 	unsigned long next;
 
 	phys_addr -= addr;
-	pmd = pmd_alloc(&init_mm, pud, addr);
+	pmd = pmd_alloc_kernel(&init_mm, pud, addr);
 	if (!pmd)
 		return -ENOMEM;
 	do {
@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_
 	unsigned long next;
 
 	phys_addr -= addr;
-	pud = pud_alloc(&init_mm, pgd, addr);
+	pud = pud_alloc_kernel(&init_mm, pgd, addr);
 	if (!pud)
 		return -ENOMEM;
 	do {
diff -ruNp linux-3.13.11/lib/is_single_threaded.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/lib/is_single_threaded.c
--- linux-3.13.11/lib/is_single_threaded.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/lib/is_single_threaded.c	2014-07-09
12:00:15.000000000 +0200
@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
 	struct task_struct *p, *t;
 	bool ret;
 
+	if (!mm)
+		return true;
+
 	if (atomic_read(&task->signal->live) != 1)
 		return false;
 
diff -ruNp linux-3.13.11/lib/kobject.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/lib/kobject.c
--- linux-3.13.11/lib/kobject.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/lib/kobject.c	2014-07-09 12:00:15.000000000
+0200
@@ -957,9 +957,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
 
 
 static DEFINE_SPINLOCK(kobj_ns_type_lock);
-static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
+static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES] __read_only;
 
-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
+int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
 {
 	enum kobj_ns_type type = ops->type;
 	int error;
diff -ruNp linux-3.13.11/lib/list_debug.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/lib/list_debug.c
--- linux-3.13.11/lib/list_debug.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/lib/list_debug.c	2014-07-09 12:00:15.000000000
+0200
@@ -11,7 +11,9 @@
 #include <linux/bug.h>
 #include <linux/kernel.h>
 #include <linux/rculist.h>
+#include <linux/mm.h>
 
+#ifdef CONFIG_DEBUG_LIST
 /*
  * Insert a new entry between two known consecutive entries.
  *
@@ -19,21 +21,40 @@
  * the prev/next entries already!
  */
 
+static bool __list_add_debug(struct list_head *new,
+			     struct list_head *prev,
+			     struct list_head *next)
+{
+	if (unlikely(next->prev != prev)) {
+		printk(KERN_ERR "list_add corruption. next->prev should be "
+			"prev (%p), but was %p. (next=%p).\n",
+			prev, next->prev, next);
+		BUG();
+		return false;
+	}
+	if (unlikely(prev->next != next)) {
+		printk(KERN_ERR "list_add corruption. prev->next should be "
+			"next (%p), but was %p. (prev=%p).\n",
+			next, prev->next, prev);
+		BUG();
+		return false;
+	}
+	if (unlikely(new == prev || new == next)) {
+		printk(KERN_ERR "list_add double add: new=%p, prev=%p, next=%p.\n",
+			new, prev, next);
+		BUG();
+		return false;
+	}
+	return true;
+}
+
 void __list_add(struct list_head *new,
-			      struct list_head *prev,
-			      struct list_head *next)
+		struct list_head *prev,
+		struct list_head *next)
 {
-	WARN(next->prev != prev,
-		"list_add corruption. next->prev should be "
-		"prev (%p), but was %p. (next=%p).\n",
-		prev, next->prev, next);
-	WARN(prev->next != next,
-		"list_add corruption. prev->next should be "
-		"next (%p), but was %p. (prev=%p).\n",
-		next, prev->next, prev);
-	WARN(new == prev || new == next,
-	     "list_add double add: new=%p, prev=%p, next=%p.\n",
-	     new, prev, next);
+	if (!__list_add_debug(new, prev, next))
+		return;
+
 	next->prev = new;
 	new->next = next;
 	new->prev = prev;
@@ -41,28 +62,46 @@ void __list_add(struct list_head *new,
 }
 EXPORT_SYMBOL(__list_add);
 
-void __list_del_entry(struct list_head *entry)
+static bool __list_del_entry_debug(struct list_head *entry)
 {
 	struct list_head *prev, *next;
 
 	prev = entry->prev;
 	next = entry->next;
 
-	if (WARN(next == LIST_POISON1,
-		"list_del corruption, %p->next is LIST_POISON1 (%p)\n",
-		entry, LIST_POISON1) ||
-	    WARN(prev == LIST_POISON2,
-		"list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
-		entry, LIST_POISON2) ||
-	    WARN(prev->next != entry,
-		"list_del corruption. prev->next should be %p, "
-		"but was %p\n", entry, prev->next) ||
-	    WARN(next->prev != entry,
-		"list_del corruption. next->prev should be %p, "
-		"but was %p\n", entry, next->prev))
+	if (unlikely(next == LIST_POISON1)) {
+		printk(KERN_ERR "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
+			entry, LIST_POISON1);
+		BUG();
+		return false;
+	}
+	if (unlikely(prev == LIST_POISON2)) {
+		printk(KERN_ERR "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
+			entry, LIST_POISON2);
+		BUG();
+		return false;
+	}
+	if (unlikely(entry->prev->next != entry)) {
+		printk(KERN_ERR "list_del corruption. prev->next should be %p, "
+			"but was %p\n", entry, prev->next);
+		BUG();
+		return false;
+	}
+	if (unlikely(entry->next->prev != entry)) {
+		printk(KERN_ERR "list_del corruption. next->prev should be %p, "
+			"but was %p\n", entry, next->prev);
+		BUG();
+		return false;
+	}
+	return true;
+}
+
+void __list_del_entry(struct list_head *entry)
+{
+	if (!__list_del_entry_debug(entry))
 		return;
 
-	__list_del(prev, next);
+	__list_del(entry->prev, entry->next);
 }
 EXPORT_SYMBOL(__list_del_entry);
 
@@ -86,15 +125,85 @@ EXPORT_SYMBOL(list_del);
 void __list_add_rcu(struct list_head *new,
 		    struct list_head *prev, struct list_head *next)
 {
-	WARN(next->prev != prev,
-		"list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
-		prev, next->prev, next);
-	WARN(prev->next != next,
-		"list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
-		next, prev->next, prev);
+	if (!__list_add_debug(new, prev, next))
+		return;
+
 	new->next = next;
 	new->prev = prev;
 	rcu_assign_pointer(list_next_rcu(prev), new);
 	next->prev = new;
 }
 EXPORT_SYMBOL(__list_add_rcu);
+#endif
+
+void __pax_list_add(struct list_head *new, struct list_head *prev, struct list_head
*next)
+{
+#ifdef CONFIG_DEBUG_LIST
+	if (!__list_add_debug(new, prev, next))
+		return;
+#endif
+
+	pax_open_kernel();
+	next->prev = new;
+	new->next = next;
+	new->prev = prev;
+	prev->next = new;
+	pax_close_kernel();
+}
+EXPORT_SYMBOL(__pax_list_add);
+
+void pax_list_del(struct list_head *entry)
+{
+#ifdef CONFIG_DEBUG_LIST
+	if (!__list_del_entry_debug(entry))
+		return;
+#endif
+
+	pax_open_kernel();
+	__list_del(entry->prev, entry->next);
+	entry->next = LIST_POISON1;
+	entry->prev = LIST_POISON2;
+	pax_close_kernel();
+}
+EXPORT_SYMBOL(pax_list_del);
+
+void pax_list_del_init(struct list_head *entry)
+{
+	pax_open_kernel();
+	__list_del(entry->prev, entry->next);
+	INIT_LIST_HEAD(entry);
+	pax_close_kernel();
+}
+EXPORT_SYMBOL(pax_list_del_init);
+
+void __pax_list_add_rcu(struct list_head *new,
+			struct list_head *prev, struct list_head *next)
+{
+#ifdef CONFIG_DEBUG_LIST
+	if (!__list_add_debug(new, prev, next))
+		return;
+#endif
+
+	pax_open_kernel();
+	new->next = next;
+	new->prev = prev;
+	rcu_assign_pointer(list_next_rcu(prev), new);
+	next->prev = new;
+	pax_close_kernel();
+}
+EXPORT_SYMBOL(__pax_list_add_rcu);
+
+void pax_list_del_rcu(struct list_head *entry)
+{
+#ifdef CONFIG_DEBUG_LIST
+	if (!__list_del_entry_debug(entry))
+		return;
+#endif
+
+	pax_open_kernel();
+	__list_del(entry->prev, entry->next);
+	entry->next = LIST_POISON1;
+	entry->prev = LIST_POISON2;
+	pax_close_kernel();
+}
+EXPORT_SYMBOL(pax_list_del_rcu);
diff -ruNp linux-3.13.11/lib/percpu-refcount.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/lib/percpu-refcount.c
--- linux-3.13.11/lib/percpu-refcount.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/lib/percpu-refcount.c	2014-07-09
12:00:15.000000000 +0200
@@ -29,7 +29,7 @@
  * can't hit 0 before we've added up all the percpu refs.
  */
 
-#define PCPU_COUNT_BIAS		(1U << 31)
+#define PCPU_COUNT_BIAS		(1U << 30)
 
 /**
  * percpu_ref_init - initialize a percpu refcount
diff -ruNp linux-3.13.11/lib/radix-tree.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/lib/radix-tree.c
--- linux-3.13.11/lib/radix-tree.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/lib/radix-tree.c	2014-07-09 12:00:15.000000000
+0200
@@ -93,7 +93,7 @@ struct radix_tree_preload {
 	int nr;
 	struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
 };
-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
 
 static inline void *ptr_to_indirect(void *ptr)
 {
diff -ruNp linux-3.13.11/lib/random32.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/lib/random32.c
--- linux-3.13.11/lib/random32.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/lib/random32.c	2014-07-09 12:00:15.000000000
+0200
@@ -44,7 +44,7 @@
 static void __init prandom_state_selftest(void);
 #endif
 
-static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
+static DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy;
 
 /**
  *	prandom_u32_state - seeded pseudo-random number generator.
diff -ruNp linux-3.13.11/lib/rbtree.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/lib/rbtree.c
--- linux-3.13.11/lib/rbtree.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/lib/rbtree.c	2014-07-09 12:00:15.000000000
+0200
@@ -380,7 +380,9 @@ static inline void dummy_copy(struct rb_
 static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {}
 
 static const struct rb_augment_callbacks dummy_callbacks = {
-	dummy_propagate, dummy_copy, dummy_rotate
+	.propagate = dummy_propagate,
+	.copy = dummy_copy,
+	.rotate = dummy_rotate
 };
 
 void rb_insert_color(struct rb_node *node, struct rb_root *root)
diff -ruNp linux-3.13.11/lib/strncpy_from_user.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/lib/strncpy_from_user.c
--- linux-3.13.11/lib/strncpy_from_user.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/lib/strncpy_from_user.c	2014-07-09
12:00:15.000000000 +0200
@@ -21,7 +21,7 @@
  */
 static inline long do_strncpy_from_user(char *dst, const char __user *src, long count,
unsigned long max)
 {
-	const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
+	static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
 	long res = 0;
 
 	/*
diff -ruNp linux-3.13.11/lib/strnlen_user.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/lib/strnlen_user.c
--- linux-3.13.11/lib/strnlen_user.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/lib/strnlen_user.c	2014-07-09 12:00:15.000000000
+0200
@@ -26,7 +26,7 @@
  */
 static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned
long max)
 {
-	const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
+	static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
 	long align, res = 0;
 	unsigned long c;
 
diff -ruNp linux-3.13.11/lib/swiotlb.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/lib/swiotlb.c
--- linux-3.13.11/lib/swiotlb.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/lib/swiotlb.c	2014-07-09 12:00:15.000000000
+0200
@@ -668,7 +668,7 @@ EXPORT_SYMBOL(swiotlb_alloc_coherent);
 
 void
 swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
-		      dma_addr_t dev_addr)
+		      dma_addr_t dev_addr, struct dma_attrs *attrs)
 {
 	phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
 
diff -ruNp linux-3.13.11/lib/usercopy.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/lib/usercopy.c
--- linux-3.13.11/lib/usercopy.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/lib/usercopy.c	2014-07-09 12:00:15.000000000
+0200
@@ -7,3 +7,9 @@ void copy_from_user_overflow(void)
 	WARN(1, "Buffer overflow detected!\n");
 }
 EXPORT_SYMBOL(copy_from_user_overflow);
+
+void copy_to_user_overflow(void)
+{
+	WARN(1, "Buffer overflow detected!\n");
+}
+EXPORT_SYMBOL(copy_to_user_overflow);
diff -ruNp linux-3.13.11/lib/vsprintf.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/lib/vsprintf.c
--- linux-3.13.11/lib/vsprintf.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/lib/vsprintf.c	2014-07-09 12:00:15.000000000
+0200
@@ -16,6 +16,9 @@
  * - scnprintf and vscnprintf
  */
 
+#ifdef CONFIG_GRKERNSEC_HIDESYM
+#define __INCLUDED_BY_HIDESYM 1
+#endif
 #include <stdarg.h>
 #include <linux/module.h>	/* for KSYM_SYMBOL_LEN */
 #include <linux/types.h>
@@ -1155,7 +1158,11 @@ char *netdev_feature_string(char *buf, c
 	return number(buf, end, *(const netdev_features_t *)addr, spec);
 }
 
+#ifdef CONFIG_GRKERNSEC_HIDESYM
+int kptr_restrict __read_mostly = 2;
+#else
 int kptr_restrict __read_mostly;
+#endif
 
 /*
  * Show a '%p' thing.  A kernel extension is that the '%p' is followed
@@ -1168,6 +1175,7 @@ int kptr_restrict __read_mostly;
  * - 'f' For simple symbolic function names without offset
  * - 'S' For symbolic direct pointers with offset
  * - 's' For symbolic direct pointers without offset
+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
  * - '[FfSs]R' as above with __builtin_extract_return_addr() translation
  * - 'B' For backtraced symbolic direct pointers with offset
  * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
@@ -1234,12 +1242,12 @@ char *pointer(const char *fmt, char *buf
 
 	if (!ptr && *fmt != 'K') {
 		/*
-		 * Print (null) with the same width as a pointer so it makes
+		 * Print (nil) with the same width as a pointer so it makes
 		 * tabular output look nice.
 		 */
 		if (spec.field_width == -1)
 			spec.field_width = default_width;
-		return string(buf, end, "(null)", spec);
+		return string(buf, end, "(nil)", spec);
 	}
 
 	switch (*fmt) {
@@ -1249,6 +1257,12 @@ char *pointer(const char *fmt, char *buf
 		/* Fallthrough */
 	case 'S':
 	case 's':
+#ifdef CONFIG_GRKERNSEC_HIDESYM
+		break;
+#else
+		return symbol_string(buf, end, ptr, spec, fmt);
+#endif
+	case 'A':
 	case 'B':
 		return symbol_string(buf, end, ptr, spec, fmt);
 	case 'R':
@@ -1304,6 +1318,8 @@ char *pointer(const char *fmt, char *buf
 			va_end(va);
 			return buf;
 		}
+	case 'P':
+		break;
 	case 'K':
 		/*
 		 * %pK cannot be used in IRQ context because its test
@@ -1365,6 +1381,21 @@ char *pointer(const char *fmt, char *buf
 				   ((const struct file *)ptr)->f_path.dentry,
 				   spec, fmt);
 	}
+
+#ifdef CONFIG_GRKERNSEC_HIDESYM
+	/* 'P' = approved pointers to copy to userland,
+	   as in the /proc/kallsyms case, as we make it display nothing
+	   for non-root users, and the real contents for root users
+	   Also ignore 'K' pointers, since we force their NULLing for non-root users
+	   above
+	*/
+	if ((unsigned long)ptr > TASK_SIZE && *fmt != 'P' && *fmt != 'K' && is_usercopy_object(buf))
{
+		printk(KERN_ALERT "grsec: kernel infoleak detected!  Please report this log to spender@grsecurity.net.\n");
+		dump_stack();
+		ptr = NULL;
+	}
+#endif
+
 	spec.flags |= SMALL;
 	if (spec.field_width == -1) {
 		spec.field_width = default_width;
@@ -2086,11 +2117,11 @@ int bstr_printf(char *buf, size_t size,
 	typeof(type) value;						\
 	if (sizeof(type) == 8) {					\
 		args = PTR_ALIGN(args, sizeof(u32));			\
-		*(u32 *)&value = *(u32 *)args;				\
-		*((u32 *)&value + 1) = *(u32 *)(args + 4);		\
+		*(u32 *)&value = *(const u32 *)args;			\
+		*((u32 *)&value + 1) = *(const u32 *)(args + 4);	\
 	} else {							\
 		args = PTR_ALIGN(args, sizeof(type));			\
-		value = *(typeof(type) *)args;				\
+		value = *(const typeof(type) *)args;			\
 	}								\
 	args += sizeof(type);						\
 	value;								\
@@ -2153,7 +2184,7 @@ int bstr_printf(char *buf, size_t size,
 		case FORMAT_TYPE_STR: {
 			const char *str_arg = args;
 			args += strlen(str_arg) + 1;
-			str = string(str, end, (char *)str_arg, spec);
+			str = string(str, end, str_arg, spec);
 			break;
 		}
 
diff -ruNp linux-3.13.11/localversion-grsec linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/localversion-grsec
--- linux-3.13.11/localversion-grsec	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/localversion-grsec	2014-07-09 12:01:53.000000000
+0200
@@ -0,0 +1 @@
+-grsec3.0
diff -ruNp linux-3.13.11/localversion-vserver linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/localversion-vserver
--- linux-3.13.11/localversion-vserver	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/localversion-vserver	2014-07-09 12:01:07.000000000
+0200
@@ -0,0 +1 @@
+-vs2.3.6.11
diff -ruNp linux-3.13.11/mm/Kconfig linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/Kconfig
--- linux-3.13.11/mm/Kconfig	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/Kconfig	2014-07-09 12:00:15.000000000
+0200
@@ -326,10 +326,11 @@ config KSM
 	  root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
 
 config DEFAULT_MMAP_MIN_ADDR
-        int "Low address space to protect from user allocation"
+	int "Low address space to protect from user allocation"
 	depends on MMU
-        default 4096
-        help
+	default 32768 if ALPHA || ARM || PARISC || SPARC32
+	default 65536
+	help
 	  This is the portion of low virtual memory which should be protected
 	  from userspace allocation.  Keeping a user from writing to low pages
 	  can help reduce the impact of kernel NULL pointer bugs.
@@ -360,7 +361,7 @@ config MEMORY_FAILURE
 
 config HWPOISON_INJECT
 	tristate "HWPoison pages injector"
-	depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
+	depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
 	select PROC_PAGE_MONITOR
 
 config NOMMU_INITIAL_TRIM_EXCESS
diff -ruNp linux-3.13.11/mm/backing-dev.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/backing-dev.c
--- linux-3.13.11/mm/backing-dev.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/backing-dev.c	2014-07-09 12:00:15.000000000
+0200
@@ -12,7 +12,7 @@
 #include <linux/device.h>
 #include <trace/events/writeback.h>
 
-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
 
 struct backing_dev_info default_backing_dev_info = {
 	.name		= "default",
@@ -533,7 +533,7 @@ int bdi_setup_and_register(struct backin
 		return err;
 
 	err = bdi_register(bdi, NULL, "%.28s-%ld", name,
-			   atomic_long_inc_return(&bdi_seq));
+			   atomic_long_inc_return_unchecked(&bdi_seq));
 	if (err) {
 		bdi_destroy(bdi);
 		return err;
diff -ruNp linux-3.13.11/mm/filemap.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/filemap.c
--- linux-3.13.11/mm/filemap.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/filemap.c	2014-07-09 12:00:15.000000000
+0200
@@ -1768,7 +1768,7 @@ int generic_file_mmap(struct file * file
 	struct address_space *mapping = file->f_mapping;
 
 	if (!mapping->a_ops->readpage)
-		return -ENOEXEC;
+		return -ENODEV;
 	file_accessed(file);
 	vma->vm_ops = &generic_file_vm_ops;
 	return 0;
@@ -1950,7 +1950,7 @@ static size_t __iovec_copy_from_user_ina
 
 	while (bytes) {
 		char __user *buf = iov->iov_base + base;
-		int copy = min(bytes, iov->iov_len - base);
+		size_t copy = min(bytes, iov->iov_len - base);
 
 		base = 0;
 		left = __copy_from_user_inatomic(vaddr, buf, copy);
@@ -1979,7 +1979,7 @@ size_t iov_iter_copy_from_user_atomic(st
 	BUG_ON(!in_atomic());
 	kaddr = kmap_atomic(page);
 	if (likely(i->nr_segs == 1)) {
-		int left;
+		size_t left;
 		char __user *buf = i->iov->iov_base + i->iov_offset;
 		left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
 		copied = bytes - left;
@@ -2007,7 +2007,7 @@ size_t iov_iter_copy_from_user(struct pa
 
 	kaddr = kmap(page);
 	if (likely(i->nr_segs == 1)) {
-		int left;
+		size_t left;
 		char __user *buf = i->iov->iov_base + i->iov_offset;
 		left = __copy_from_user(kaddr + offset, buf, bytes);
 		copied = bytes - left;
@@ -2037,7 +2037,7 @@ void iov_iter_advance(struct iov_iter *i
 		 * zero-length segments (without overruning the iovec).
 		 */
 		while (bytes || unlikely(i->count && !iov->iov_len)) {
-			int copy;
+			size_t copy;
 
 			copy = min(bytes, iov->iov_len - base);
 			BUG_ON(!i->count || i->count < copy);
@@ -2108,6 +2108,7 @@ inline int generic_write_checks(struct f
                         *pos = i_size_read(inode);
 
 		if (limit != RLIM_INFINITY) {
+			gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
 			if (*pos >= limit) {
 				send_sig(SIGXFSZ, current, 0);
 				return -EFBIG;
diff -ruNp linux-3.13.11/mm/fremap.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/fremap.c
--- linux-3.13.11/mm/fremap.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/fremap.c	2014-07-09 12:00:15.000000000
+0200
@@ -163,6 +163,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
  retry:
 	vma = find_vma(mm, start);
 
+#ifdef CONFIG_PAX_SEGMEXEC
+	if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
+		goto out;
+#endif
+
 	/*
 	 * Make sure the vma is shared, that it supports prefaulting,
 	 * and that the remapped range is valid and fully within
diff -ruNp linux-3.13.11/mm/highmem.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/highmem.c
--- linux-3.13.11/mm/highmem.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/highmem.c	2014-07-09 12:00:15.000000000
+0200
@@ -138,8 +138,9 @@ static void flush_all_zero_pkmaps(void)
 		 * So no dangers, even with speculative execution.
 		 */
 		page = pte_page(pkmap_page_table[i]);
+		pax_open_kernel();
 		pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
-
+		pax_close_kernel();
 		set_page_address(page, NULL);
 		need_flush = 1;
 	}
@@ -198,9 +199,11 @@ start:
 		}
 	}
 	vaddr = PKMAP_ADDR(last_pkmap_nr);
+
+	pax_open_kernel();
 	set_pte_at(&init_mm, vaddr,
 		   &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
-
+	pax_close_kernel();
 	pkmap_count[last_pkmap_nr] = 1;
 	set_page_address(page, (void *)vaddr);
 
diff -ruNp linux-3.13.11/mm/hugetlb.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/hugetlb.c
--- linux-3.13.11/mm/hugetlb.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/hugetlb.c	2014-07-09 12:00:15.000000000
+0200
@@ -2077,15 +2077,17 @@ static int hugetlb_sysctl_handler_common
 	struct hstate *h = &default_hstate;
 	unsigned long tmp;
 	int ret;
+	ctl_table_no_const hugetlb_table;
 
 	tmp = h->max_huge_pages;
 
 	if (write && h->order >= MAX_ORDER)
 		return -EINVAL;
 
-	table->data = &tmp;
-	table->maxlen = sizeof(unsigned long);
-	ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
+	hugetlb_table = *table;
+	hugetlb_table.data = &tmp;
+	hugetlb_table.maxlen = sizeof(unsigned long);
+	ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
 	if (ret)
 		goto out;
 
@@ -2130,15 +2132,17 @@ int hugetlb_overcommit_handler(struct ct
 	struct hstate *h = &default_hstate;
 	unsigned long tmp;
 	int ret;
+	ctl_table_no_const hugetlb_table;
 
 	tmp = h->nr_overcommit_huge_pages;
 
 	if (write && h->order >= MAX_ORDER)
 		return -EINVAL;
 
-	table->data = &tmp;
-	table->maxlen = sizeof(unsigned long);
-	ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
+	hugetlb_table = *table;
+	hugetlb_table.data = &tmp;
+	hugetlb_table.maxlen = sizeof(unsigned long);
+	ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
 	if (ret)
 		goto out;
 
@@ -2596,6 +2600,27 @@ static int unmap_ref_private(struct mm_s
 	return 1;
 }
 
+#ifdef CONFIG_PAX_SEGMEXEC
+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address,
struct page *page_m)
+{
+	struct mm_struct *mm = vma->vm_mm;
+	struct vm_area_struct *vma_m;
+	unsigned long address_m;
+	pte_t *ptep_m;
+
+	vma_m = pax_find_mirror_vma(vma);
+	if (!vma_m)
+		return;
+
+	BUG_ON(address >= SEGMEXEC_TASK_SIZE);
+	address_m = address + SEGMEXEC_TASK_SIZE;
+	ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
+	get_page(page_m);
+	hugepage_add_anon_rmap(page_m, vma_m, address_m);
+	set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
+}
+#endif
+
 /*
  * Hugetlb_cow() should be called with page lock of the original hugepage held.
  * Called with hugetlb_instantiation_mutex held and pte_page locked so we
@@ -2712,6 +2737,11 @@ retry_avoidcopy:
 				make_huge_pte(vma, new_page, 1));
 		page_remove_rmap(old_page);
 		hugepage_add_new_anon_rmap(new_page, vma, address);
+
+#ifdef CONFIG_PAX_SEGMEXEC
+		pax_mirror_huge_pte(vma, address, new_page);
+#endif
+
 		/* Make the old page be freed below */
 		new_page = old_page;
 	}
@@ -2876,6 +2906,10 @@ retry:
 				&& (vma->vm_flags & VM_SHARED)));
 	set_huge_pte_at(mm, address, ptep, new_pte);
 
+#ifdef CONFIG_PAX_SEGMEXEC
+	pax_mirror_huge_pte(vma, address, page);
+#endif
+
 	if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
 		/* Optimization, do the COW without a second fault */
 		ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
@@ -2906,6 +2940,10 @@ int hugetlb_fault(struct mm_struct *mm,
 	static DEFINE_MUTEX(hugetlb_instantiation_mutex);
 	struct hstate *h = hstate_vma(vma);
 
+#ifdef CONFIG_PAX_SEGMEXEC
+	struct vm_area_struct *vma_m;
+#endif
+
 	address &= huge_page_mask(h);
 
 	ptep = huge_pte_offset(mm, address);
@@ -2919,6 +2957,26 @@ int hugetlb_fault(struct mm_struct *mm,
 				VM_FAULT_SET_HINDEX(hstate_index(h));
 	}
 
+#ifdef CONFIG_PAX_SEGMEXEC
+	vma_m = pax_find_mirror_vma(vma);
+	if (vma_m) {
+		unsigned long address_m;
+
+		if (vma->vm_start > vma_m->vm_start) {
+			address_m = address;
+			address -= SEGMEXEC_TASK_SIZE;
+			vma = vma_m;
+			h = hstate_vma(vma);
+		} else
+			address_m = address + SEGMEXEC_TASK_SIZE;
+
+		if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
+			return VM_FAULT_OOM;
+		address_m &= HPAGE_MASK;
+		unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
+	}
+#endif
+
 	ptep = huge_pte_alloc(mm, address, huge_page_size(h));
 	if (!ptep)
 		return VM_FAULT_OOM;
diff -ruNp linux-3.13.11/mm/internal.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/internal.h
--- linux-3.13.11/mm/internal.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/internal.h	2014-07-09 12:00:15.000000000
+0200
@@ -96,6 +96,7 @@ extern pmd_t *mm_find_pmd(struct mm_stru
  * in mm/page_alloc.c
  */
 extern void __free_pages_bootmem(struct page *page, unsigned int order);
+extern void free_compound_page(struct page *page);
 extern void prep_compound_page(struct page *page, unsigned long order);
 #ifdef CONFIG_MEMORY_FAILURE
 extern bool is_free_buddy_page(struct page *page);
@@ -351,7 +352,7 @@ extern u32 hwpoison_filter_enable;
 
 extern unsigned long vm_mmap_pgoff(struct file *, unsigned long,
         unsigned long, unsigned long,
-        unsigned long, unsigned long);
+        unsigned long, unsigned long) __intentional_overflow(-1);
 
 extern void set_pageblock_order(void);
 unsigned long reclaim_clean_pages_from_list(struct zone *zone,
diff -ruNp linux-3.13.11/mm/kmemleak.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/kmemleak.c
--- linux-3.13.11/mm/kmemleak.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/kmemleak.c	2014-07-09 12:00:15.000000000
+0200
@@ -363,7 +363,7 @@ static void print_unreferenced(struct se
 
 	for (i = 0; i < object->trace_len; i++) {
 		void *ptr = (void *)object->trace[i];
-		seq_printf(seq, "    [<%p>] %pS\n", ptr, ptr);
+		seq_printf(seq, "    [<%pP>] %pA\n", ptr, ptr);
 	}
 }
 
@@ -1853,7 +1853,7 @@ static int __init kmemleak_late_init(voi
 		return -ENOMEM;
 	}
 
-	dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
+	dentry = debugfs_create_file("kmemleak", S_IRUSR, NULL, NULL,
 				     &kmemleak_fops);
 	if (!dentry)
 		pr_warning("Failed to create the debugfs kmemleak file\n");
diff -ruNp linux-3.13.11/mm/maccess.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/maccess.c
--- linux-3.13.11/mm/maccess.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/maccess.c	2014-07-09 12:00:15.000000000
+0200
@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, cons
 	set_fs(KERNEL_DS);
 	pagefault_disable();
 	ret = __copy_from_user_inatomic(dst,
-			(__force const void __user *)src, size);
+			(const void __force_user *)src, size);
 	pagefault_enable();
 	set_fs(old_fs);
 
@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, con
 
 	set_fs(KERNEL_DS);
 	pagefault_disable();
-	ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
+	ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
 	pagefault_enable();
 	set_fs(old_fs);
 
diff -ruNp linux-3.13.11/mm/madvise.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/madvise.c
--- linux-3.13.11/mm/madvise.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/madvise.c	2014-07-09 12:00:15.000000000
+0200
@@ -51,6 +51,10 @@ static long madvise_behavior(struct vm_a
 	pgoff_t pgoff;
 	unsigned long new_flags = vma->vm_flags;
 
+#ifdef CONFIG_PAX_SEGMEXEC
+	struct vm_area_struct *vma_m;
+#endif
+
 	switch (behavior) {
 	case MADV_NORMAL:
 		new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
@@ -126,6 +130,13 @@ success:
 	/*
 	 * vm_flags is protected by the mmap_sem held in write mode.
 	 */
+
+#ifdef CONFIG_PAX_SEGMEXEC
+	vma_m = pax_find_mirror_vma(vma);
+	if (vma_m)
+		vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
+#endif
+
 	vma->vm_flags = new_flags;
 
 out:
@@ -274,6 +285,11 @@ static long madvise_dontneed(struct vm_a
 			     struct vm_area_struct **prev,
 			     unsigned long start, unsigned long end)
 {
+
+#ifdef CONFIG_PAX_SEGMEXEC
+	struct vm_area_struct *vma_m;
+#endif
+
 	*prev = vma;
 	if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
 		return -EINVAL;
@@ -286,6 +302,21 @@ static long madvise_dontneed(struct vm_a
 		zap_page_range(vma, start, end - start, &details);
 	} else
 		zap_page_range(vma, start, end - start, NULL);
+
+#ifdef CONFIG_PAX_SEGMEXEC
+	vma_m = pax_find_mirror_vma(vma);
+	if (vma_m) {
+		if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
+			struct zap_details details = {
+				.nonlinear_vma = vma_m,
+				.last_index = ULONG_MAX,
+			};
+			zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
+		} else
+			zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
+	}
+#endif
+
 	return 0;
 }
 
@@ -491,6 +522,16 @@ SYSCALL_DEFINE3(madvise, unsigned long,
 	if (end < start)
 		return error;
 
+#ifdef CONFIG_PAX_SEGMEXEC
+	if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
+		if (end > SEGMEXEC_TASK_SIZE)
+			return error;
+	} else
+#endif
+
+	if (end > TASK_SIZE)
+		return error;
+
 	error = 0;
 	if (end == start)
 		return error;
diff -ruNp linux-3.13.11/mm/memcontrol.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/memcontrol.c
--- linux-3.13.11/mm/memcontrol.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/memcontrol.c	2014-07-09 12:00:15.000000000
+0200
@@ -1056,6 +1056,31 @@ struct mem_cgroup *mem_cgroup_from_task(
 	return mem_cgroup_from_css(task_css(p, mem_cgroup_subsys_id));
 }
 
+u64 mem_cgroup_res_read_u64(struct mem_cgroup *mem, int member)
+{
+	return res_counter_read_u64(&mem->res, member);
+}
+
+u64 mem_cgroup_memsw_read_u64(struct mem_cgroup *mem, int member)
+{
+	return res_counter_read_u64(&mem->memsw, member);
+}
+
+s64 mem_cgroup_stat_read_cache(struct mem_cgroup *mem)
+{
+	return mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_CACHE);
+}
+
+s64 mem_cgroup_stat_read_anon(struct mem_cgroup *mem)
+{
+	return mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_RSS);
+}
+
+s64 mem_cgroup_stat_read_mapped(struct mem_cgroup *mem)
+{
+	return mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_FILE_MAPPED);
+}
+
 struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
 {
 	struct mem_cgroup *memcg = NULL;
diff -ruNp linux-3.13.11/mm/memory-failure.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/memory-failure.c
--- linux-3.13.11/mm/memory-failure.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/memory-failure.c	2014-07-09 12:00:15.000000000
+0200
@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __r
 
 int sysctl_memory_failure_recovery __read_mostly = 1;
 
-atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
+atomic_long_unchecked_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
 
 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
 
@@ -202,7 +202,7 @@ static int kill_proc(struct task_struct
 		pfn, t->comm, t->pid);
 	si.si_signo = SIGBUS;
 	si.si_errno = 0;
-	si.si_addr = (void *)addr;
+	si.si_addr = (void __user *)addr;
 #ifdef __ARCH_SI_TRAPNO
 	si.si_trapno = trapno;
 #endif
@@ -762,7 +762,7 @@ static struct page_state {
 	unsigned long res;
 	char *msg;
 	int (*action)(struct page *p, unsigned long pfn);
-} error_states[] = {
+} __do_const error_states[] = {
 	{ reserved,	reserved,	"reserved kernel",	me_kernel },
 	/*
 	 * free pages are specially detected outside this table:
@@ -1062,7 +1062,7 @@ int memory_failure(unsigned long pfn, in
 		nr_pages = 1 << compound_order(hpage);
 	else /* normal page or thp */
 		nr_pages = 1;
-	atomic_long_add(nr_pages, &num_poisoned_pages);
+	atomic_long_add_unchecked(nr_pages, &num_poisoned_pages);
 
 	/*
 	 * We need/can do nothing about count=0 pages.
@@ -1092,7 +1092,7 @@ int memory_failure(unsigned long pfn, in
 			if (!PageHWPoison(hpage)
 			    || (hwpoison_filter(p) && TestClearPageHWPoison(p))
 			    || (p != hpage && TestSetPageHWPoison(hpage))) {
-				atomic_long_sub(nr_pages, &num_poisoned_pages);
+				atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
 				return 0;
 			}
 			set_page_hwpoison_huge_page(hpage);
@@ -1161,7 +1161,7 @@ int memory_failure(unsigned long pfn, in
 	}
 	if (hwpoison_filter(p)) {
 		if (TestClearPageHWPoison(p))
-			atomic_long_sub(nr_pages, &num_poisoned_pages);
+			atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
 		unlock_page(hpage);
 		put_page(hpage);
 		return 0;
@@ -1383,7 +1383,7 @@ int unpoison_memory(unsigned long pfn)
 			return 0;
 		}
 		if (TestClearPageHWPoison(p))
-			atomic_long_dec(&num_poisoned_pages);
+			atomic_long_dec_unchecked(&num_poisoned_pages);
 		pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
 		return 0;
 	}
@@ -1397,7 +1397,7 @@ int unpoison_memory(unsigned long pfn)
 	 */
 	if (TestClearPageHWPoison(page)) {
 		pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
-		atomic_long_sub(nr_pages, &num_poisoned_pages);
+		atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
 		freeit = 1;
 		if (PageHuge(page))
 			clear_page_hwpoison_huge_page(page);
@@ -1522,11 +1522,11 @@ static int soft_offline_huge_page(struct
 		if (PageHuge(page)) {
 			set_page_hwpoison_huge_page(hpage);
 			dequeue_hwpoisoned_huge_page(hpage);
-			atomic_long_add(1 << compound_order(hpage),
+			atomic_long_add_unchecked(1 << compound_order(hpage),
 					&num_poisoned_pages);
 		} else {
 			SetPageHWPoison(page);
-			atomic_long_inc(&num_poisoned_pages);
+			atomic_long_inc_unchecked(&num_poisoned_pages);
 		}
 	}
 	return ret;
@@ -1565,7 +1565,7 @@ static int __soft_offline_page(struct pa
 		put_page(page);
 		pr_info("soft_offline: %#lx: invalidated\n", pfn);
 		SetPageHWPoison(page);
-		atomic_long_inc(&num_poisoned_pages);
+		atomic_long_inc_unchecked(&num_poisoned_pages);
 		return 0;
 	}
 
@@ -1610,7 +1610,7 @@ static int __soft_offline_page(struct pa
 			if (!is_free_buddy_page(page))
 				pr_info("soft offline: %#lx: page leaked\n",
 					pfn);
-			atomic_long_inc(&num_poisoned_pages);
+			atomic_long_inc_unchecked(&num_poisoned_pages);
 		}
 	} else {
 		pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
@@ -1684,11 +1684,11 @@ int soft_offline_page(struct page *page,
 		if (PageHuge(page)) {
 			set_page_hwpoison_huge_page(hpage);
 			dequeue_hwpoisoned_huge_page(hpage);
-			atomic_long_add(1 << compound_order(hpage),
+			atomic_long_add_unchecked(1 << compound_order(hpage),
 					&num_poisoned_pages);
 		} else {
 			SetPageHWPoison(page);
-			atomic_long_inc(&num_poisoned_pages);
+			atomic_long_inc_unchecked(&num_poisoned_pages);
 		}
 	}
 	unset_migratetype_isolate(page, MIGRATE_MOVABLE);
diff -ruNp linux-3.13.11/mm/memory.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/memory.c
--- linux-3.13.11/mm/memory.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/memory.c	2014-07-09 12:00:15.000000000
+0200
@@ -402,6 +402,7 @@ static inline void free_pmd_range(struct
 		free_pte_range(tlb, pmd, addr);
 	} while (pmd++, addr = next, addr != end);
 
+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
 	start &= PUD_MASK;
 	if (start < floor)
 		return;
@@ -416,6 +417,8 @@ static inline void free_pmd_range(struct
 	pmd = pmd_offset(pud, start);
 	pud_clear(pud);
 	pmd_free_tlb(tlb, pmd, start);
+#endif
+
 }
 
 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
@@ -435,6 +438,7 @@ static inline void free_pud_range(struct
 		free_pmd_range(tlb, pud, addr, next, floor, ceiling);
 	} while (pud++, addr = next, addr != end);
 
+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
 	start &= PGDIR_MASK;
 	if (start < floor)
 		return;
@@ -449,6 +453,8 @@ static inline void free_pud_range(struct
 	pud = pud_offset(pgd, start);
 	pgd_clear(pgd);
 	pud_free_tlb(tlb, pud, start);
+#endif
+
 }
 
 /*
@@ -1635,12 +1641,6 @@ no_page_table:
 	return page;
 }
 
-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
-{
-	return stack_guard_page_start(vma, addr) ||
-	       stack_guard_page_end(vma, addr+PAGE_SIZE);
-}
-
 /**
  * __get_user_pages() - pin user pages in memory
  * @tsk:	task_struct of target task
@@ -1727,10 +1727,10 @@ long __get_user_pages(struct task_struct
 
 	i = 0;
 
-	do {
+	while (nr_pages) {
 		struct vm_area_struct *vma;
 
-		vma = find_extend_vma(mm, start);
+		vma = find_vma(mm, start);
 		if (!vma && in_gate_area(mm, start)) {
 			unsigned long pg = start & PAGE_MASK;
 			pgd_t *pgd;
@@ -1779,7 +1779,7 @@ long __get_user_pages(struct task_struct
 			goto next_page;
 		}
 
-		if (!vma ||
+		if (!vma || start < vma->vm_start ||
 		    (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
 		    !(vm_flags & vma->vm_flags))
 			return i ? : -EFAULT;
@@ -1808,11 +1808,6 @@ long __get_user_pages(struct task_struct
 				int ret;
 				unsigned int fault_flags = 0;
 
-				/* For mlock, just skip the stack guard page. */
-				if (foll_flags & FOLL_MLOCK) {
-					if (stack_guard_page(vma, start))
-						goto next_page;
-				}
 				if (foll_flags & FOLL_WRITE)
 					fault_flags |= FAULT_FLAG_WRITE;
 				if (nonblocking)
@@ -1892,7 +1887,7 @@ next_page:
 			start += page_increm * PAGE_SIZE;
 			nr_pages -= page_increm;
 		} while (nr_pages && start < vma->vm_end);
-	} while (nr_pages);
+	}
 	return i;
 }
 EXPORT_SYMBOL(__get_user_pages);
@@ -2099,6 +2094,10 @@ static int insert_page(struct vm_area_st
 	page_add_file_rmap(page);
 	set_pte_at(mm, addr, pte, mk_pte(page, prot));
 
+#ifdef CONFIG_PAX_SEGMEXEC
+	pax_mirror_file_pte(vma, addr, page, ptl);
+#endif
+
 	retval = 0;
 	pte_unmap_unlock(pte, ptl);
 	return retval;
@@ -2143,9 +2142,21 @@ int vm_insert_page(struct vm_area_struct
 	if (!page_count(page))
 		return -EINVAL;
 	if (!(vma->vm_flags & VM_MIXEDMAP)) {
+
+#ifdef CONFIG_PAX_SEGMEXEC
+		struct vm_area_struct *vma_m;
+#endif
+
 		BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
 		BUG_ON(vma->vm_flags & VM_PFNMAP);
 		vma->vm_flags |= VM_MIXEDMAP;
+
+#ifdef CONFIG_PAX_SEGMEXEC
+		vma_m = pax_find_mirror_vma(vma);
+		if (vma_m)
+			vma_m->vm_flags |= VM_MIXEDMAP;
+#endif
+
 	}
 	return insert_page(vma, addr, page, vma->vm_page_prot);
 }
@@ -2228,6 +2239,7 @@ int vm_insert_mixed(struct vm_area_struc
 			unsigned long pfn)
 {
 	BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
+	BUG_ON(vma->vm_mirror);
 
 	if (addr < vma->vm_start || addr >= vma->vm_end)
 		return -EFAULT;
@@ -2475,7 +2487,9 @@ static int apply_to_pmd_range(struct mm_
 
 	BUG_ON(pud_huge(*pud));
 
-	pmd = pmd_alloc(mm, pud, addr);
+	pmd = (mm == &init_mm) ?
+		pmd_alloc_kernel(mm, pud, addr) :
+		pmd_alloc(mm, pud, addr);
 	if (!pmd)
 		return -ENOMEM;
 	do {
@@ -2495,7 +2509,9 @@ static int apply_to_pud_range(struct mm_
 	unsigned long next;
 	int err;
 
-	pud = pud_alloc(mm, pgd, addr);
+	pud = (mm == &init_mm) ?
+		pud_alloc_kernel(mm, pgd, addr) :
+		pud_alloc(mm, pgd, addr);
 	if (!pud)
 		return -ENOMEM;
 	do {
@@ -2583,6 +2599,186 @@ static inline void cow_user_page(struct
 		copy_user_highpage(dst, src, va, vma);
 }
 
+#ifdef CONFIG_PAX_SEGMEXEC
+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmd)
+{
+	struct mm_struct *mm = vma->vm_mm;
+	spinlock_t *ptl;
+	pte_t *pte, entry;
+
+	pte = pte_offset_map_lock(mm, pmd, address, &ptl);
+	entry = *pte;
+	if (!pte_present(entry)) {
+		if (!pte_none(entry)) {
+			BUG_ON(pte_file(entry));
+			free_swap_and_cache(pte_to_swp_entry(entry));
+			pte_clear_not_present_full(mm, address, pte, 0);
+		}
+	} else {
+		struct page *page;
+
+		flush_cache_page(vma, address, pte_pfn(entry));
+		entry = ptep_clear_flush(vma, address, pte);
+		BUG_ON(pte_dirty(entry));
+		page = vm_normal_page(vma, address, entry);
+		if (page) {
+			update_hiwater_rss(mm);
+			if (PageAnon(page))
+				dec_mm_counter_fast(mm, MM_ANONPAGES);
+			else
+				dec_mm_counter_fast(mm, MM_FILEPAGES);
+			page_remove_rmap(page);
+			page_cache_release(page);
+		}
+	}
+	pte_unmap_unlock(pte, ptl);
+}
+
+/* PaX: if vma is mirrored, synchronize the mirror's PTE
+ *
+ * the ptl of the lower mapped page is held on entry and is not released on exit
+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
+ */
+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address,
struct page *page_m, spinlock_t *ptl)
+{
+	struct mm_struct *mm = vma->vm_mm;
+	unsigned long address_m;
+	spinlock_t *ptl_m;
+	struct vm_area_struct *vma_m;
+	pmd_t *pmd_m;
+	pte_t *pte_m, entry_m;
+
+	BUG_ON(!page_m || !PageAnon(page_m));
+
+	vma_m = pax_find_mirror_vma(vma);
+	if (!vma_m)
+		return;
+
+	BUG_ON(!PageLocked(page_m));
+	BUG_ON(address >= SEGMEXEC_TASK_SIZE);
+	address_m = address + SEGMEXEC_TASK_SIZE;
+	pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
+	pte_m = pte_offset_map(pmd_m, address_m);
+	ptl_m = pte_lockptr(mm, pmd_m);
+	if (ptl != ptl_m) {
+		spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
+		if (!pte_none(*pte_m))
+			goto out;
+	}
+
+	entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
+	page_cache_get(page_m);
+	page_add_anon_rmap(page_m, vma_m, address_m);
+	inc_mm_counter_fast(mm, MM_ANONPAGES);
+	set_pte_at(mm, address_m, pte_m, entry_m);
+	update_mmu_cache(vma_m, address_m, pte_m);
+out:
+	if (ptl != ptl_m)
+		spin_unlock(ptl_m);
+	pte_unmap(pte_m);
+	unlock_page(page_m);
+}
+
+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct
page *page_m, spinlock_t *ptl)
+{
+	struct mm_struct *mm = vma->vm_mm;
+	unsigned long address_m;
+	spinlock_t *ptl_m;
+	struct vm_area_struct *vma_m;
+	pmd_t *pmd_m;
+	pte_t *pte_m, entry_m;
+
+	BUG_ON(!page_m || PageAnon(page_m));
+
+	vma_m = pax_find_mirror_vma(vma);
+	if (!vma_m)
+		return;
+
+	BUG_ON(address >= SEGMEXEC_TASK_SIZE);
+	address_m = address + SEGMEXEC_TASK_SIZE;
+	pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
+	pte_m = pte_offset_map(pmd_m, address_m);
+	ptl_m = pte_lockptr(mm, pmd_m);
+	if (ptl != ptl_m) {
+		spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
+		if (!pte_none(*pte_m))
+			goto out;
+	}
+
+	entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
+	page_cache_get(page_m);
+	page_add_file_rmap(page_m);
+	inc_mm_counter_fast(mm, MM_FILEPAGES);
+	set_pte_at(mm, address_m, pte_m, entry_m);
+	update_mmu_cache(vma_m, address_m, pte_m);
+out:
+	if (ptl != ptl_m)
+		spin_unlock(ptl_m);
+	pte_unmap(pte_m);
+}
+
+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned
long pfn_m, spinlock_t *ptl)
+{
+	struct mm_struct *mm = vma->vm_mm;
+	unsigned long address_m;
+	spinlock_t *ptl_m;
+	struct vm_area_struct *vma_m;
+	pmd_t *pmd_m;
+	pte_t *pte_m, entry_m;
+
+	vma_m = pax_find_mirror_vma(vma);
+	if (!vma_m)
+		return;
+
+	BUG_ON(address >= SEGMEXEC_TASK_SIZE);
+	address_m = address + SEGMEXEC_TASK_SIZE;
+	pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
+	pte_m = pte_offset_map(pmd_m, address_m);
+	ptl_m = pte_lockptr(mm, pmd_m);
+	if (ptl != ptl_m) {
+		spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
+		if (!pte_none(*pte_m))
+			goto out;
+	}
+
+	entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
+	set_pte_at(mm, address_m, pte_m, entry_m);
+out:
+	if (ptl != ptl_m)
+		spin_unlock(ptl_m);
+	pte_unmap(pte_m);
+}
+
+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t
*pte, pmd_t *pmd, spinlock_t *ptl)
+{
+	struct page *page_m;
+	pte_t entry;
+
+	if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
+		goto out;
+
+	entry = *pte;
+	page_m  = vm_normal_page(vma, address, entry);
+	if (!page_m)
+		pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
+	else if (PageAnon(page_m)) {
+		if (pax_find_mirror_vma(vma)) {
+			pte_unmap_unlock(pte, ptl);
+			lock_page(page_m);
+			pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
+			if (pte_same(entry, *pte))
+				pax_mirror_anon_pte(vma, address, page_m, ptl);
+			else
+				unlock_page(page_m);
+		}
+	} else
+		pax_mirror_file_pte(vma, address, page_m, ptl);
+
+out:
+	pte_unmap_unlock(pte, ptl);
+}
+#endif
+
 /*
  * This routine handles present pages, when users try to write
  * to a shared page. It is done by copying the page to a new address
@@ -2807,6 +3003,12 @@ gotten:
 	 */
 	page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
 	if (likely(pte_same(*page_table, orig_pte))) {
+
+#ifdef CONFIG_PAX_SEGMEXEC
+		if (pax_find_mirror_vma(vma))
+			BUG_ON(!trylock_page(new_page));
+#endif
+
 		if (old_page) {
 			if (!PageAnon(old_page)) {
 				dec_mm_counter_fast(mm, MM_FILEPAGES);
@@ -2858,6 +3060,10 @@ gotten:
 			page_remove_rmap(old_page);
 		}
 
+#ifdef CONFIG_PAX_SEGMEXEC
+		pax_mirror_anon_pte(vma, address, new_page, ptl);
+#endif
+
 		/* Free the old page.. */
 		new_page = old_page;
 		ret |= VM_FAULT_WRITE;
@@ -3135,6 +3341,11 @@ static int do_swap_page(struct mm_struct
 	swap_free(entry);
 	if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
 		try_to_free_swap(page);
+
+#ifdef CONFIG_PAX_SEGMEXEC
+	if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
+#endif
+
 	unlock_page(page);
 	if (page != swapcache) {
 		/*
@@ -3158,6 +3369,11 @@ static int do_swap_page(struct mm_struct
 
 	/* No need to invalidate - it was non-present before */
 	update_mmu_cache(vma, address, page_table);
+
+#ifdef CONFIG_PAX_SEGMEXEC
+	pax_mirror_anon_pte(vma, address, page, ptl);
+#endif
+
 unlock:
 	pte_unmap_unlock(page_table, ptl);
 out:
@@ -3177,40 +3393,6 @@ out_release:
 }
 
 /*
- * This is like a special single-page "expand_{down|up}wards()",
- * except we must first make sure that 'address{-|+}PAGE_SIZE'
- * doesn't hit another vma.
- */
-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long
address)
-{
-	address &= PAGE_MASK;
-	if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
-		struct vm_area_struct *prev = vma->vm_prev;
-
-		/*
-		 * Is there a mapping abutting this one below?
-		 *
-		 * That's only ok if it's the same stack mapping
-		 * that has gotten split..
-		 */
-		if (prev && prev->vm_end == address)
-			return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
-
-		expand_downwards(vma, address - PAGE_SIZE);
-	}
-	if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
-		struct vm_area_struct *next = vma->vm_next;
-
-		/* As VM_GROWSDOWN but s/below/above/ */
-		if (next && next->vm_start == address + PAGE_SIZE)
-			return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
-
-		expand_upwards(vma, address + PAGE_SIZE);
-	}
-	return 0;
-}
-
-/*
  * We enter with non-exclusive mmap_sem (to exclude vma changes,
  * but allow concurrent faults), and pte mapped but not yet locked.
  * We return with mmap_sem still held, but pte unmapped and unlocked.
@@ -3219,27 +3401,23 @@ static int do_anonymous_page(struct mm_s
 		unsigned long address, pte_t *page_table, pmd_t *pmd,
 		unsigned int flags)
 {
-	struct page *page;
+	struct page *page = NULL;
 	spinlock_t *ptl;
 	pte_t entry;
 
-	pte_unmap(page_table);
-
-	/* Check if we need to add a guard page to the stack */
-	if (check_stack_guard_page(vma, address) < 0)
-		return VM_FAULT_SIGBUS;
-
-	/* Use the zero-page for reads */
 	if (!(flags & FAULT_FLAG_WRITE)) {
 		entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
 						vma->vm_page_prot));
-		page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
+		ptl = pte_lockptr(mm, pmd);
+		spin_lock(ptl);
 		if (!pte_none(*page_table))
 			goto unlock;
 		goto setpte;
 	}
 
 	/* Allocate our own private page. */
+	pte_unmap(page_table);
+
 	if (unlikely(anon_vma_prepare(vma)))
 		goto oom;
 	page = alloc_zeroed_user_highpage_movable(vma, address);
@@ -3263,6 +3441,11 @@ static int do_anonymous_page(struct mm_s
 	if (!pte_none(*page_table))
 		goto release;
 
+#ifdef CONFIG_PAX_SEGMEXEC
+	if (pax_find_mirror_vma(vma))
+		BUG_ON(!trylock_page(page));
+#endif
+
 	inc_mm_counter_fast(mm, MM_ANONPAGES);
 	page_add_new_anon_rmap(page, vma, address);
 setpte:
@@ -3270,6 +3453,12 @@ setpte:
 
 	/* No need to invalidate - it was non-present before */
 	update_mmu_cache(vma, address, page_table);
+
+#ifdef CONFIG_PAX_SEGMEXEC
+	if (page)
+		pax_mirror_anon_pte(vma, address, page, ptl);
+#endif
+
 unlock:
 	pte_unmap_unlock(page_table, ptl);
 	return 0;
@@ -3413,6 +3602,12 @@ static int __do_fault(struct mm_struct *
 	 */
 	/* Only go through if we didn't race with anybody else... */
 	if (likely(pte_same(*page_table, orig_pte))) {
+
+#ifdef CONFIG_PAX_SEGMEXEC
+		if (anon && pax_find_mirror_vma(vma))
+			BUG_ON(!trylock_page(page));
+#endif
+
 		flush_icache_page(vma, page);
 		entry = mk_pte(page, vma->vm_page_prot);
 		if (flags & FAULT_FLAG_WRITE)
@@ -3434,6 +3629,14 @@ static int __do_fault(struct mm_struct *
 
 		/* no need to invalidate: a not-present page won't be cached */
 		update_mmu_cache(vma, address, page_table);
+
+#ifdef CONFIG_PAX_SEGMEXEC
+		if (anon)
+			pax_mirror_anon_pte(vma, address, page, ptl);
+		else
+			pax_mirror_file_pte(vma, address, page, ptl);
+#endif
+
 	} else {
 		if (cow_page)
 			mem_cgroup_uncharge_page(cow_page);
@@ -3681,6 +3884,12 @@ static int handle_pte_fault(struct mm_st
 		if (flags & FAULT_FLAG_WRITE)
 			flush_tlb_fix_spurious_fault(vma, address);
 	}
+
+#ifdef CONFIG_PAX_SEGMEXEC
+	pax_mirror_pte(vma, address, pte, pmd, ptl);
+	return 0;
+#endif
+
 unlock:
 	pte_unmap_unlock(pte, ptl);
 	return 0;
@@ -3697,9 +3906,41 @@ static int __handle_mm_fault(struct mm_s
 	pmd_t *pmd;
 	pte_t *pte;
 
+#ifdef CONFIG_PAX_SEGMEXEC
+	struct vm_area_struct *vma_m;
+#endif
+
 	if (unlikely(is_vm_hugetlb_page(vma)))
 		return hugetlb_fault(mm, vma, address, flags);
 
+#ifdef CONFIG_PAX_SEGMEXEC
+	vma_m = pax_find_mirror_vma(vma);
+	if (vma_m) {
+		unsigned long address_m;
+		pgd_t *pgd_m;
+		pud_t *pud_m;
+		pmd_t *pmd_m;
+
+		if (vma->vm_start > vma_m->vm_start) {
+			address_m = address;
+			address -= SEGMEXEC_TASK_SIZE;
+			vma = vma_m;
+		} else
+			address_m = address + SEGMEXEC_TASK_SIZE;
+
+		pgd_m = pgd_offset(mm, address_m);
+		pud_m = pud_alloc(mm, pgd_m, address_m);
+		if (!pud_m)
+			return VM_FAULT_OOM;
+		pmd_m = pmd_alloc(mm, pud_m, address_m);
+		if (!pmd_m)
+			return VM_FAULT_OOM;
+		if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
+			return VM_FAULT_OOM;
+		pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
+	}
+#endif
+
 	pgd = pgd_offset(mm, address);
 	pud = pud_alloc(mm, pgd, address);
 	if (!pud)
@@ -3830,6 +4071,23 @@ int __pud_alloc(struct mm_struct *mm, pg
 	spin_unlock(&mm->page_table_lock);
 	return 0;
 }
+
+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
+{
+	pud_t *new = pud_alloc_one(mm, address);
+	if (!new)
+		return -ENOMEM;
+
+	smp_wmb(); /* See comment in __pte_alloc */
+
+	spin_lock(&mm->page_table_lock);
+	if (pgd_present(*pgd))		/* Another has populated it */
+		pud_free(mm, new);
+	else
+		pgd_populate_kernel(mm, pgd, new);
+	spin_unlock(&mm->page_table_lock);
+	return 0;
+}
 #endif /* __PAGETABLE_PUD_FOLDED */
 
 #ifndef __PAGETABLE_PMD_FOLDED
@@ -3860,6 +4118,30 @@ int __pmd_alloc(struct mm_struct *mm, pu
 	spin_unlock(&mm->page_table_lock);
 	return 0;
 }
+
+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
+{
+	pmd_t *new = pmd_alloc_one(mm, address);
+	if (!new)
+		return -ENOMEM;
+
+	smp_wmb(); /* See comment in __pte_alloc */
+
+	spin_lock(&mm->page_table_lock);
+#ifndef __ARCH_HAS_4LEVEL_HACK
+	if (pud_present(*pud))		/* Another has populated it */
+		pmd_free(mm, new);
+	else
+		pud_populate_kernel(mm, pud, new);
+#else
+	if (pgd_present(*pud))		/* Another has populated it */
+		pmd_free(mm, new);
+	else
+		pgd_populate_kernel(mm, pud, new);
+#endif /* __ARCH_HAS_4LEVEL_HACK */
+	spin_unlock(&mm->page_table_lock);
+	return 0;
+}
 #endif /* __PAGETABLE_PMD_FOLDED */
 
 #if !defined(__HAVE_ARCH_GATE_AREA)
@@ -3873,7 +4155,7 @@ static int __init gate_vma_init(void)
 	gate_vma.vm_start = FIXADDR_USER_START;
 	gate_vma.vm_end = FIXADDR_USER_END;
 	gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
-	gate_vma.vm_page_prot = __P101;
+	gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
 
 	return 0;
 }
@@ -4007,8 +4289,8 @@ out:
 	return ret;
 }
 
-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
-			void *buf, int len, int write)
+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
+			void *buf, size_t len, int write)
 {
 	resource_size_t phys_addr;
 	unsigned long prot = 0;
@@ -4034,8 +4316,8 @@ EXPORT_SYMBOL_GPL(generic_access_phys);
  * Access another process' address space as given in mm.  If non-NULL, use the
  * given task for page fault accounting.
  */
-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
-		unsigned long addr, void *buf, int len, int write)
+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+		unsigned long addr, void *buf, size_t len, int write)
 {
 	struct vm_area_struct *vma;
 	void *old_buf = buf;
@@ -4043,7 +4325,7 @@ static int __access_remote_vm(struct tas
 	down_read(&mm->mmap_sem);
 	/* ignore errors, just check how much was successfully transferred */
 	while (len) {
-		int bytes, ret, offset;
+		ssize_t bytes, ret, offset;
 		void *maddr;
 		struct page *page = NULL;
 
@@ -4102,8 +4384,8 @@ static int __access_remote_vm(struct tas
  *
  * The caller must hold a reference on @mm.
  */
-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
-		void *buf, int len, int write)
+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
+		void *buf, size_t len, int write)
 {
 	return __access_remote_vm(NULL, mm, addr, buf, len, write);
 }
@@ -4113,11 +4395,11 @@ int access_remote_vm(struct mm_struct *m
  * Source/target buffer must be kernel space,
  * Do not walk the page table directly, use get_user_pages
  */
-int access_process_vm(struct task_struct *tsk, unsigned long addr,
-		void *buf, int len, int write)
+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr,
+		void *buf, size_t len, int write)
 {
 	struct mm_struct *mm;
-	int ret;
+	ssize_t ret;
 
 	mm = get_task_mm(tsk);
 	if (!mm)
diff -ruNp linux-3.13.11/mm/mempolicy.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/mempolicy.c
--- linux-3.13.11/mm/mempolicy.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/mempolicy.c	2014-07-09 12:00:15.000000000
+0200
@@ -747,6 +747,10 @@ static int mbind_range(struct mm_struct
 	unsigned long vmstart;
 	unsigned long vmend;
 
+#ifdef CONFIG_PAX_SEGMEXEC
+	struct vm_area_struct *vma_m;
+#endif
+
 	vma = find_vma(mm, start);
 	if (!vma || vma->vm_start > start)
 		return -EFAULT;
@@ -790,6 +794,16 @@ static int mbind_range(struct mm_struct
 		err = vma_replace_policy(vma, new_pol);
 		if (err)
 			goto out;
+
+#ifdef CONFIG_PAX_SEGMEXEC
+		vma_m = pax_find_mirror_vma(vma);
+		if (vma_m) {
+			err = vma_replace_policy(vma_m, new_pol);
+			if (err)
+				goto out;
+		}
+#endif
+
 	}
 
  out:
@@ -1255,6 +1269,17 @@ static long do_mbind(unsigned long start
 
 	if (end < start)
 		return -EINVAL;
+
+#ifdef CONFIG_PAX_SEGMEXEC
+	if (mm->pax_flags & MF_PAX_SEGMEXEC) {
+		if (end > SEGMEXEC_TASK_SIZE)
+			return -EINVAL;
+	} else
+#endif
+
+	if (end > TASK_SIZE)
+		return -EINVAL;
+
 	if (end == start)
 		return 0;
 
@@ -1483,8 +1508,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
 	 */
 	tcred = __task_cred(task);
 	if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
-	    !uid_eq(cred->uid,  tcred->suid) && !uid_eq(cred->uid,  tcred->uid) &&
-	    !capable(CAP_SYS_NICE)) {
+	    !uid_eq(cred->uid,  tcred->suid) && !capable(CAP_SYS_NICE)) {
 		rcu_read_unlock();
 		err = -EPERM;
 		goto out_put;
@@ -1515,6 +1539,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
 		goto out;
 	}
 
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
+	if (mm != current->mm &&
+	    (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
+		mmput(mm);
+		err = -EPERM;
+		goto out;
+	}
+#endif
+
 	err = do_migrate_pages(mm, old, new,
 		capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
 
diff -ruNp linux-3.13.11/mm/migrate.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/migrate.c
--- linux-3.13.11/mm/migrate.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/migrate.c	2014-07-09 12:00:15.000000000
+0200
@@ -1464,8 +1464,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
 	 */
 	tcred = __task_cred(task);
 	if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
-	    !uid_eq(cred->uid,  tcred->suid) && !uid_eq(cred->uid,  tcred->uid) &&
-	    !capable(CAP_SYS_NICE)) {
+	    !uid_eq(cred->uid,  tcred->suid) && !capable(CAP_SYS_NICE)) {
 		rcu_read_unlock();
 		err = -EPERM;
 		goto out;
diff -ruNp linux-3.13.11/mm/mlock.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/mlock.c
--- linux-3.13.11/mm/mlock.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/mlock.c	2014-07-09 12:00:15.000000000
+0200
@@ -14,6 +14,7 @@
 #include <linux/pagevec.h>
 #include <linux/mempolicy.h>
 #include <linux/syscalls.h>
+#include <linux/security.h>
 #include <linux/sched.h>
 #include <linux/export.h>
 #include <linux/rmap.h>
@@ -588,7 +589,7 @@ static int do_mlock(unsigned long start,
 {
 	unsigned long nstart, end, tmp;
 	struct vm_area_struct * vma, * prev;
-	int error;
+	int error = 0;
 
 	VM_BUG_ON(start & ~PAGE_MASK);
 	VM_BUG_ON(len != PAGE_ALIGN(len));
@@ -597,6 +598,9 @@ static int do_mlock(unsigned long start,
 		return -EINVAL;
 	if (end == start)
 		return 0;
+	if (end > TASK_SIZE)
+		return -EINVAL;
+
 	vma = find_vma(current->mm, start);
 	if (!vma || vma->vm_start > start)
 		return -ENOMEM;
@@ -608,6 +612,11 @@ static int do_mlock(unsigned long start,
 	for (nstart = start ; ; ) {
 		vm_flags_t newflags;
 
+#ifdef CONFIG_PAX_SEGMEXEC
+		if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
+			break;
+#endif
+
 		/* Here we know that  vma->vm_start <= nstart < vma->vm_end. */
 
 		newflags = vma->vm_flags & ~VM_LOCKED;
@@ -720,6 +729,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
 	lock_limit >>= PAGE_SHIFT;
 
 	/* check against resource limits */
+	gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT)
+ len, 1);
 	if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
 		error = do_mlock(start, len, 1);
 	up_write(&current->mm->mmap_sem);
@@ -754,6 +764,11 @@ static int do_mlockall(int flags)
 	for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
 		vm_flags_t newflags;
 
+#ifdef CONFIG_PAX_SEGMEXEC
+		if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
+			break;
+#endif
+
 		newflags = vma->vm_flags & ~VM_LOCKED;
 		if (flags & MCL_CURRENT)
 			newflags |= VM_LOCKED;
@@ -787,6 +802,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
 	lock_limit >>= PAGE_SHIFT;
 
 	ret = -ENOMEM;
+	gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
 	if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
 	    capable(CAP_IPC_LOCK))
 		ret = do_mlockall(flags);
diff -ruNp linux-3.13.11/mm/mmap.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/mmap.c
--- linux-3.13.11/mm/mmap.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/mmap.c	2014-07-09 12:00:15.000000000
+0200
@@ -36,6 +36,7 @@
 #include <linux/sched/sysctl.h>
 #include <linux/notifier.h>
 #include <linux/memory.h>
+#include <linux/random.h>
 
 #include <asm/uaccess.h>
 #include <asm/cacheflush.h>
@@ -52,6 +53,16 @@
 #define arch_rebalance_pgtables(addr, len)		(addr)
 #endif
 
+static inline void verify_mm_writelocked(struct mm_struct *mm)
+{
+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
+	if (unlikely(down_read_trylock(&mm->mmap_sem))) {
+		up_read(&mm->mmap_sem);
+		BUG();
+	}
+#endif
+}
+
 static void unmap_region(struct mm_struct *mm,
 		struct vm_area_struct *vma, struct vm_area_struct *prev,
 		unsigned long start, unsigned long end);
@@ -71,16 +82,25 @@ static void unmap_region(struct mm_struc
  *		x: (no) no	x: (no) yes	x: (no) yes	x: (yes) yes
  *
  */
-pgprot_t protection_map[16] = {
+pgprot_t protection_map[16] __read_only = {
 	__P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
 	__S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
 };
 
-pgprot_t vm_get_page_prot(unsigned long vm_flags)
+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
 {
-	return __pgprot(pgprot_val(protection_map[vm_flags &
+	pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
 				(VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
 			pgprot_val(arch_vm_get_page_prot(vm_flags)));
+
+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
+	if (!(__supported_pte_mask & _PAGE_NX) &&
+	    (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
+	    (vm_flags & (VM_READ | VM_WRITE)))
+		prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
+#endif
+
+	return prot;
 }
 EXPORT_SYMBOL(vm_get_page_prot);
 
@@ -89,6 +109,7 @@ int sysctl_overcommit_ratio __read_mostl
 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
 /*
  * Make sure vm_committed_as in one cacheline and not cacheline shared with
  * other variables. It can be updated by several CPUs frequently.
@@ -245,6 +266,7 @@ static struct vm_area_struct *remove_vma
 	struct vm_area_struct *next = vma->vm_next;
 
 	might_sleep();
+	BUG_ON(vma->vm_mirror);
 	if (vma->vm_ops && vma->vm_ops->close)
 		vma->vm_ops->close(vma);
 	if (vma->vm_file)
@@ -289,6 +311,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
 	 * not page aligned -Ram Gupta
 	 */
 	rlim = rlimit(RLIMIT_DATA);
+	gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data),
1);
 	if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
 			(mm->end_data - mm->start_data) > rlim)
 		goto out;
@@ -939,6 +962,12 @@ static int
 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
 	struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
 {
+
+#ifdef CONFIG_PAX_SEGMEXEC
+	if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
+		return 0;
+#endif
+
 	if (is_mergeable_vma(vma, file, vm_flags) &&
 	    is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
 		if (vma->vm_pgoff == vm_pgoff)
@@ -958,6 +987,12 @@ static int
 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
 	struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
 {
+
+#ifdef CONFIG_PAX_SEGMEXEC
+	if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
+		return 0;
+#endif
+
 	if (is_mergeable_vma(vma, file, vm_flags) &&
 	    is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
 		pgoff_t vm_pglen;
@@ -1000,13 +1035,20 @@ can_vma_merge_after(struct vm_area_struc
 struct vm_area_struct *vma_merge(struct mm_struct *mm,
 			struct vm_area_struct *prev, unsigned long addr,
 			unsigned long end, unsigned long vm_flags,
-		     	struct anon_vma *anon_vma, struct file *file,
+			struct anon_vma *anon_vma, struct file *file,
 			pgoff_t pgoff, struct mempolicy *policy)
 {
 	pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
 	struct vm_area_struct *area, *next;
 	int err;
 
+#ifdef CONFIG_PAX_SEGMEXEC
+	unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
+	struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
+
+	BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
+#endif
+
 	/*
 	 * We later require that vma->vm_flags == vm_flags,
 	 * so this tests vma->vm_flags & VM_SPECIAL, too.
@@ -1022,6 +1064,15 @@ struct vm_area_struct *vma_merge(struct
 	if (next && next->vm_end == end)		/* cases 6, 7, 8 */
 		next = next->vm_next;
 
+#ifdef CONFIG_PAX_SEGMEXEC
+	if (prev)
+		prev_m = pax_find_mirror_vma(prev);
+	if (area)
+		area_m = pax_find_mirror_vma(area);
+	if (next)
+		next_m = pax_find_mirror_vma(next);
+#endif
+
 	/*
 	 * Can it merge with the predecessor?
 	 */
@@ -1041,9 +1092,24 @@ struct vm_area_struct *vma_merge(struct
 							/* cases 1, 6 */
 			err = vma_adjust(prev, prev->vm_start,
 				next->vm_end, prev->vm_pgoff, NULL);
-		} else					/* cases 2, 5, 7 */
+
+#ifdef CONFIG_PAX_SEGMEXEC
+			if (!err && prev_m)
+				err = vma_adjust(prev_m, prev_m->vm_start,
+					next_m->vm_end, prev_m->vm_pgoff, NULL);
+#endif
+
+		} else {				/* cases 2, 5, 7 */
 			err = vma_adjust(prev, prev->vm_start,
 				end, prev->vm_pgoff, NULL);
+
+#ifdef CONFIG_PAX_SEGMEXEC
+			if (!err && prev_m)
+				err = vma_adjust(prev_m, prev_m->vm_start,
+						end_m, prev_m->vm_pgoff, NULL);
+#endif
+
+		}
 		if (err)
 			return NULL;
 		khugepaged_enter_vma_merge(prev);
@@ -1057,12 +1123,27 @@ struct vm_area_struct *vma_merge(struct
  			mpol_equal(policy, vma_policy(next)) &&
 			can_vma_merge_before(next, vm_flags,
 					anon_vma, file, pgoff+pglen)) {
-		if (prev && addr < prev->vm_end)	/* case 4 */
+		if (prev && addr < prev->vm_end) {	/* case 4 */
 			err = vma_adjust(prev, prev->vm_start,
 				addr, prev->vm_pgoff, NULL);
-		else					/* cases 3, 8 */
+
+#ifdef CONFIG_PAX_SEGMEXEC
+			if (!err && prev_m)
+				err = vma_adjust(prev_m, prev_m->vm_start,
+						addr_m, prev_m->vm_pgoff, NULL);
+#endif
+
+		} else {				/* cases 3, 8 */
 			err = vma_adjust(area, addr, next->vm_end,
 				next->vm_pgoff - pglen, NULL);
+
+#ifdef CONFIG_PAX_SEGMEXEC
+			if (!err && area_m)
+				err = vma_adjust(area_m, addr_m, next_m->vm_end,
+						next_m->vm_pgoff - pglen, NULL);
+#endif
+
+		}
 		if (err)
 			return NULL;
 		khugepaged_enter_vma_merge(area);
@@ -1171,8 +1252,10 @@ none:
 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
 						struct file *file, long pages)
 {
-	const unsigned long stack_flags
-		= VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
+
+#ifdef CONFIG_PAX_RANDMMAP
+	if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
+#endif
 
 	mm->total_vm += pages;
 
@@ -1180,7 +1263,7 @@ void vm_stat_account(struct mm_struct *m
 		mm->shared_vm += pages;
 		if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
 			mm->exec_vm += pages;
-	} else if (flags & stack_flags)
+	} else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
 		mm->stack_vm += pages;
 }
 #endif /* CONFIG_PROC_FS */
@@ -1218,7 +1301,7 @@ unsigned long do_mmap_pgoff(struct file
 	 * (the exception is when the underlying filesystem is noexec
 	 *  mounted, in which case we dont add PROT_EXEC.)
 	 */
-	if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
+	if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
 		if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
 			prot |= PROT_EXEC;
 
@@ -1244,7 +1327,7 @@ unsigned long do_mmap_pgoff(struct file
 	/* Obtain the address to map to. we verify (or select) it and ensure
 	 * that it represents a valid section of the address space.
 	 */
-	addr = get_unmapped_area(file, addr, len, pgoff, flags);
+	addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE
: 0));
 	if (addr & ~PAGE_MASK)
 		return addr;
 
@@ -1255,6 +1338,43 @@ unsigned long do_mmap_pgoff(struct file
 	vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
 			mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
 
+#ifdef CONFIG_PAX_MPROTECT
+	if (mm->pax_flags & MF_PAX_MPROTECT) {
+
+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
+		if (file && !pgoff && (vm_flags & VM_EXEC) && mm->binfmt &&
+		    mm->binfmt->handle_mmap)
+			mm->binfmt->handle_mmap(file);
+#endif
+
+#ifndef CONFIG_PAX_MPROTECT_COMPAT
+		if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
+			gr_log_rwxmmap(file);
+
+#ifdef CONFIG_PAX_EMUPLT
+			vm_flags &= ~VM_EXEC;
+#else
+			return -EPERM;
+#endif
+
+		}
+
+		if (!(vm_flags & VM_EXEC))
+			vm_flags &= ~VM_MAYEXEC;
+#else
+		if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
+			vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
+#endif
+		else
+			vm_flags &= ~VM_MAYWRITE;
+	}
+#endif
+
+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
+	if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
+		vm_flags &= ~VM_PAGEEXEC;
+#endif
+
 	if (flags & MAP_LOCKED)
 		if (!can_do_mlock())
 			return -EPERM;
@@ -1266,6 +1386,7 @@ unsigned long do_mmap_pgoff(struct file
 		locked += mm->locked_vm;
 		lock_limit = rlimit(RLIMIT_MEMLOCK);
 		lock_limit >>= PAGE_SHIFT;
+		gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
 		if (locked > lock_limit && !capable(CAP_IPC_LOCK))
 			return -EAGAIN;
 	}
@@ -1350,6 +1471,9 @@ unsigned long do_mmap_pgoff(struct file
 			vm_flags |= VM_NORESERVE;
 	}
 
+	if (!gr_acl_handle_mmap(file, prot))
+		return -EACCES;
+	
 	addr = mmap_region(file, addr, len, vm_flags, pgoff);
 	if (!IS_ERR_VALUE(addr) &&
 	    ((vm_flags & VM_LOCKED) ||
@@ -1443,7 +1567,7 @@ int vma_wants_writenotify(struct vm_area
 	vm_flags_t vm_flags = vma->vm_flags;
 
 	/* If it was private or non-writable, the write bit is already clear */
-	if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
+	if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
 		return 0;
 
 	/* The backer wishes to know when pages are first written to? */
@@ -1489,7 +1613,22 @@ unsigned long mmap_region(struct file *f
 	struct rb_node **rb_link, *rb_parent;
 	unsigned long charged = 0;
 
+#ifdef CONFIG_PAX_SEGMEXEC
+	struct vm_area_struct *vma_m = NULL;
+#endif
+
+	/*
+	 * mm->mmap_sem is required to protect against another thread
+	 * changing the mappings in case we sleep.
+	 */
+	verify_mm_writelocked(mm);
+
 	/* Check against address space limit. */
+
+#ifdef CONFIG_PAX_RANDMMAP
+	if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (vm_flags & (VM_MAYREAD | VM_MAYWRITE |
VM_MAYEXEC)))
+#endif
+
 	if (!may_expand_vm(mm, len >> PAGE_SHIFT)) {
 		unsigned long nr_pages;
 
@@ -1508,11 +1647,10 @@ unsigned long mmap_region(struct file *f
 
 	/* Clear old maps */
 	error = -ENOMEM;
-munmap_back:
 	if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
 		if (do_munmap(mm, addr, len))
 			return -ENOMEM;
-		goto munmap_back;
+		BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
 	}
 
 	/*
@@ -1543,6 +1681,16 @@ munmap_back:
 		goto unacct_error;
 	}
 
+#ifdef CONFIG_PAX_SEGMEXEC
+	if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
+		vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+		if (!vma_m) {
+			error = -ENOMEM;
+			goto free_vma;
+		}
+	}
+#endif
+
 	vma->vm_mm = mm;
 	vma->vm_start = addr;
 	vma->vm_end = addr + len;
@@ -1562,6 +1710,13 @@ munmap_back:
 		if (error)
 			goto unmap_and_free_vma;
 
+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
+		if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
+			vma->vm_flags |= VM_PAGEEXEC;
+			vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+		}
+#endif
+
 		/* Can addr have changed??
 		 *
 		 * Answer: Yes, several device drivers can do it in their
@@ -1595,6 +1750,12 @@ munmap_back:
 	}
 
 	vma_link(mm, vma, prev, rb_link, rb_parent);
+
+#ifdef CONFIG_PAX_SEGMEXEC
+	if (vma_m)
+		BUG_ON(pax_mirror_vma(vma_m, vma));
+#endif
+
 	/* Once vma denies write, undo our temporary denial count */
 	if (vm_flags & VM_DENYWRITE)
 		allow_write_access(file);
@@ -1603,6 +1764,7 @@ out:
 	perf_event_mmap(vma);
 
 	vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
+	track_exec_limit(mm, addr, addr + len, vm_flags);
 	if (vm_flags & VM_LOCKED) {
 		if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
 					vma == get_gate_vma(current->mm)))
@@ -1635,6 +1797,12 @@ unmap_and_free_vma:
 	unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
 	charged = 0;
 free_vma:
+
+#ifdef CONFIG_PAX_SEGMEXEC
+	if (vma_m)
+		kmem_cache_free(vm_area_cachep, vma_m);
+#endif
+
 	kmem_cache_free(vm_area_cachep, vma);
 unacct_error:
 	if (charged)
@@ -1642,7 +1810,63 @@ unacct_error:
 	return error;
 }
 
-unsigned long unmapped_area(struct vm_unmapped_area_info *info)
+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
+unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file
*filp, unsigned long flags)
+{
+	if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK))
+		return ((prandom_u32() & 0xFF) + 1) << PAGE_SHIFT;
+
+	return 0;
+}
+#endif
+
+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned
long len, unsigned long offset)
+{
+	if (!vma) {
+#ifdef CONFIG_STACK_GROWSUP
+		if (addr > sysctl_heap_stack_gap)
+			vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
+		else
+			vma = find_vma(current->mm, 0);
+		if (vma && (vma->vm_flags & VM_GROWSUP))
+			return false;
+#endif
+		return true;
+	}
+
+	if (addr + len > vma->vm_start)
+		return false;
+
+	if (vma->vm_flags & VM_GROWSDOWN)
+		return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
+#ifdef CONFIG_STACK_GROWSUP
+	else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
+		return addr - vma->vm_prev->vm_end >= sysctl_heap_stack_gap;
+#endif
+	else if (offset)
+		return offset <= vma->vm_start - addr - len;
+
+	return true;
+}
+
+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len,
unsigned long offset)
+{
+	if (vma->vm_start < len)
+		return -ENOMEM;
+
+	if (!(vma->vm_flags & VM_GROWSDOWN)) {
+		if (offset <= vma->vm_start - len)
+			return vma->vm_start - len - offset;
+		else
+			return -ENOMEM;
+	}
+
+	if (sysctl_heap_stack_gap <= vma->vm_start - len)
+		return vma->vm_start - len - sysctl_heap_stack_gap;
+	return -ENOMEM;
+}
+
+unsigned long unmapped_area(const struct vm_unmapped_area_info *info)
 {
 	/*
 	 * We implement the search by looking for an rbtree node that
@@ -1690,11 +1914,29 @@ unsigned long unmapped_area(struct vm_un
 			}
 		}
 
-		gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
+		gap_start = vma->vm_prev ? vma->vm_prev->vm_end: 0;
 check_current:
 		/* Check if current node has a suitable gap */
 		if (gap_start > high_limit)
 			return -ENOMEM;
+
+		if (gap_end - gap_start > info->threadstack_offset)
+			gap_start += info->threadstack_offset;
+		else
+			gap_start = gap_end;
+
+		if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
+			if (gap_end - gap_start > sysctl_heap_stack_gap)
+				gap_start += sysctl_heap_stack_gap;
+			else
+				gap_start = gap_end;
+		}
+		if (vma->vm_flags & VM_GROWSDOWN) {
+			if (gap_end - gap_start > sysctl_heap_stack_gap)
+				gap_end -= sysctl_heap_stack_gap;
+			else
+				gap_end = gap_start;
+		}
 		if (gap_end >= low_limit && gap_end - gap_start >= length)
 			goto found;
 
@@ -1744,7 +1986,7 @@ found:
 	return gap_start;
 }
 
-unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
+unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info)
 {
 	struct mm_struct *mm = current->mm;
 	struct vm_area_struct *vma;
@@ -1798,6 +2040,24 @@ check_current:
 		gap_end = vma->vm_start;
 		if (gap_end < low_limit)
 			return -ENOMEM;
+
+		if (gap_end - gap_start > info->threadstack_offset)
+			gap_end -= info->threadstack_offset;
+		else
+			gap_end = gap_start;
+
+		if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
+			if (gap_end - gap_start > sysctl_heap_stack_gap)
+				gap_start += sysctl_heap_stack_gap;
+			else
+				gap_start = gap_end;
+		}
+		if (vma->vm_flags & VM_GROWSDOWN) {
+			if (gap_end - gap_start > sysctl_heap_stack_gap)
+				gap_end -= sysctl_heap_stack_gap;
+			else
+				gap_end = gap_start;
+		}
 		if (gap_start <= high_limit && gap_end - gap_start >= length)
 			goto found;
 
@@ -1861,6 +2121,7 @@ arch_get_unmapped_area(struct file *filp
 	struct mm_struct *mm = current->mm;
 	struct vm_area_struct *vma;
 	struct vm_unmapped_area_info info;
+	unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
 
 	if (len > TASK_SIZE - mmap_min_addr)
 		return -ENOMEM;
@@ -1868,11 +2129,15 @@ arch_get_unmapped_area(struct file *filp
 	if (flags & MAP_FIXED)
 		return addr;
 
+#ifdef CONFIG_PAX_RANDMMAP
+	if (!(mm->pax_flags & MF_PAX_RANDMMAP))
+#endif
+
 	if (addr) {
 		addr = PAGE_ALIGN(addr);
 		vma = find_vma(mm, addr);
 		if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
-		    (!vma || addr + len <= vma->vm_start))
+		    check_heap_stack_gap(vma, addr, len, offset))
 			return addr;
 	}
 
@@ -1881,6 +2146,7 @@ arch_get_unmapped_area(struct file *filp
 	info.low_limit = mm->mmap_base;
 	info.high_limit = TASK_SIZE;
 	info.align_mask = 0;
+	info.threadstack_offset = offset;
 	return vm_unmapped_area(&info);
 }
 #endif	
@@ -1899,6 +2165,7 @@ arch_get_unmapped_area_topdown(struct fi
 	struct mm_struct *mm = current->mm;
 	unsigned long addr = addr0;
 	struct vm_unmapped_area_info info;
+	unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
 
 	/* requested length too big for entire address space */
 	if (len > TASK_SIZE - mmap_min_addr)
@@ -1907,12 +2174,16 @@ arch_get_unmapped_area_topdown(struct fi
 	if (flags & MAP_FIXED)
 		return addr;
 
+#ifdef CONFIG_PAX_RANDMMAP
+	if (!(mm->pax_flags & MF_PAX_RANDMMAP))
+#endif
+
 	/* requesting a specific address */
 	if (addr) {
 		addr = PAGE_ALIGN(addr);
 		vma = find_vma(mm, addr);
 		if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
-				(!vma || addr + len <= vma->vm_start))
+				check_heap_stack_gap(vma, addr, len, offset))
 			return addr;
 	}
 
@@ -1921,6 +2192,7 @@ arch_get_unmapped_area_topdown(struct fi
 	info.low_limit = max(PAGE_SIZE, mmap_min_addr);
 	info.high_limit = mm->mmap_base;
 	info.align_mask = 0;
+	info.threadstack_offset = offset;
 	addr = vm_unmapped_area(&info);
 
 	/*
@@ -1933,6 +2205,12 @@ arch_get_unmapped_area_topdown(struct fi
 		VM_BUG_ON(addr != -ENOMEM);
 		info.flags = 0;
 		info.low_limit = TASK_UNMAPPED_BASE;
+
+#ifdef CONFIG_PAX_RANDMMAP
+		if (mm->pax_flags & MF_PAX_RANDMMAP)
+			info.low_limit += mm->delta_mmap;
+#endif
+
 		info.high_limit = TASK_SIZE;
 		addr = vm_unmapped_area(&info);
 	}
@@ -2034,6 +2312,28 @@ find_vma_prev(struct mm_struct *mm, unsi
 	return vma;
 }
 
+#ifdef CONFIG_PAX_SEGMEXEC
+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
+{
+	struct vm_area_struct *vma_m;
+
+	BUG_ON(!vma || vma->vm_start >= vma->vm_end);
+	if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
+		BUG_ON(vma->vm_mirror);
+		return NULL;
+	}
+	BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
+	vma_m = vma->vm_mirror;
+	BUG_ON(!vma_m || vma_m->vm_mirror != vma);
+	BUG_ON(vma->vm_file != vma_m->vm_file);
+	BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
+	BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
+	BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
+	BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT |
VM_LOCKED));
+	return vma_m;
+}
+#endif
+
 /*
  * Verify that the stack growth is acceptable and
  * update accounting. This is shared with both the
@@ -2050,6 +2350,7 @@ static int acct_stack_growth(struct vm_a
 		return -ENOMEM;
 
 	/* Stack limit test */
+	gr_learn_resource(current, RLIMIT_STACK, size, 1);
 	if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
 		return -ENOMEM;
 
@@ -2060,6 +2361,7 @@ static int acct_stack_growth(struct vm_a
 		locked = mm->locked_vm + grow;
 		limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
 		limit >>= PAGE_SHIFT;
+		gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
 		if (locked > limit && !capable(CAP_IPC_LOCK))
 			return -ENOMEM;
 	}
@@ -2089,37 +2391,48 @@ static int acct_stack_growth(struct vm_a
  * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
  * vma is the last one with address > vma->vm_end.  Have to extend vma.
  */
+#ifndef CONFIG_IA64
+static
+#endif
 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
 {
 	int error;
+	bool locknext;
 
 	if (!(vma->vm_flags & VM_GROWSUP))
 		return -EFAULT;
 
+	/* Also guard against wrapping around to address 0. */
+	if (address < PAGE_ALIGN(address+1))
+		address = PAGE_ALIGN(address+1);
+	else
+		return -ENOMEM;
+
 	/*
 	 * We must make sure the anon_vma is allocated
 	 * so that the anon_vma locking is not a noop.
 	 */
 	if (unlikely(anon_vma_prepare(vma)))
 		return -ENOMEM;
+	locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
+	if (locknext && anon_vma_prepare(vma->vm_next))
+		return -ENOMEM;
 	vma_lock_anon_vma(vma);
+	if (locknext)
+		vma_lock_anon_vma(vma->vm_next);
 
 	/*
 	 * vma->vm_start/vm_end cannot change under us because the caller
 	 * is required to hold the mmap_sem in read mode.  We need the
-	 * anon_vma lock to serialize against concurrent expand_stacks.
-	 * Also guard against wrapping around to address 0.
+	 * anon_vma locks to serialize against concurrent expand_stacks
+	 * and expand_upwards.
 	 */
-	if (address < PAGE_ALIGN(address+4))
-		address = PAGE_ALIGN(address+4);
-	else {
-		vma_unlock_anon_vma(vma);
-		return -ENOMEM;
-	}
 	error = 0;
 
 	/* Somebody else might have raced and expanded it already */
-	if (address > vma->vm_end) {
+	if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start
- address < sysctl_heap_stack_gap)
+		error = -ENOMEM;
+	else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address))
{
 		unsigned long size, grow;
 
 		size = address - vma->vm_start;
@@ -2154,6 +2467,8 @@ int expand_upwards(struct vm_area_struct
 			}
 		}
 	}
+	if (locknext)
+		vma_unlock_anon_vma(vma->vm_next);
 	vma_unlock_anon_vma(vma);
 	khugepaged_enter_vma_merge(vma);
 	validate_mm(vma->vm_mm);
@@ -2168,6 +2483,8 @@ int expand_downwards(struct vm_area_stru
 				   unsigned long address)
 {
 	int error;
+	bool lockprev = false;
+	struct vm_area_struct *prev;
 
 	/*
 	 * We must make sure the anon_vma is allocated
@@ -2181,6 +2498,15 @@ int expand_downwards(struct vm_area_stru
 	if (error)
 		return error;
 
+	prev = vma->vm_prev;
+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
+	lockprev = prev && (prev->vm_flags & VM_GROWSUP);
+#endif
+	if (lockprev && anon_vma_prepare(prev))
+		return -ENOMEM;
+	if (lockprev)
+		vma_lock_anon_vma(prev);
+
 	vma_lock_anon_vma(vma);
 
 	/*
@@ -2190,9 +2516,17 @@ int expand_downwards(struct vm_area_stru
 	 */
 
 	/* Somebody else might have raced and expanded it already */
-	if (address < vma->vm_start) {
+	if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end
< sysctl_heap_stack_gap)
+		error = -ENOMEM;
+	else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
 		unsigned long size, grow;
 
+#ifdef CONFIG_PAX_SEGMEXEC
+		struct vm_area_struct *vma_m;
+
+		vma_m = pax_find_mirror_vma(vma);
+#endif
+
 		size = vma->vm_end - address;
 		grow = (vma->vm_start - address) >> PAGE_SHIFT;
 
@@ -2217,13 +2551,27 @@ int expand_downwards(struct vm_area_stru
 				vma->vm_pgoff -= grow;
 				anon_vma_interval_tree_post_update_vma(vma);
 				vma_gap_update(vma);
+
+#ifdef CONFIG_PAX_SEGMEXEC
+				if (vma_m) {
+					anon_vma_interval_tree_pre_update_vma(vma_m);
+					vma_m->vm_start -= grow << PAGE_SHIFT;
+					vma_m->vm_pgoff -= grow;
+					anon_vma_interval_tree_post_update_vma(vma_m);
+					vma_gap_update(vma_m);
+				}
+#endif
+
 				spin_unlock(&vma->vm_mm->page_table_lock);
 
+				track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
 				perf_event_mmap(vma);
 			}
 		}
 	}
 	vma_unlock_anon_vma(vma);
+	if (lockprev)
+		vma_unlock_anon_vma(prev);
 	khugepaged_enter_vma_merge(vma);
 	validate_mm(vma->vm_mm);
 	return error;
@@ -2321,6 +2669,13 @@ static void remove_vma_list(struct mm_st
 	do {
 		long nrpages = vma_pages(vma);
 
+#ifdef CONFIG_PAX_SEGMEXEC
+		if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
+			vma = remove_vma(vma);
+			continue;
+		}
+#endif
+
 		if (vma->vm_flags & VM_ACCOUNT)
 			nr_accounted += nrpages;
 		vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
@@ -2365,6 +2720,16 @@ detach_vmas_to_be_unmapped(struct mm_str
 	insertion_point = (prev ? &prev->vm_next : &mm->mmap);
 	vma->vm_prev = NULL;
 	do {
+
+#ifdef CONFIG_PAX_SEGMEXEC
+		if (vma->vm_mirror) {
+			BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
+			vma->vm_mirror->vm_mirror = NULL;
+			vma->vm_mirror->vm_flags &= ~VM_EXEC;
+			vma->vm_mirror = NULL;
+		}
+#endif
+
 		vma_rb_erase(vma, &mm->mm_rb);
 		mm->map_count--;
 		tail_vma = vma;
@@ -2390,14 +2755,33 @@ static int __split_vma(struct mm_struct
 	struct vm_area_struct *new;
 	int err = -ENOMEM;
 
+#ifdef CONFIG_PAX_SEGMEXEC
+	struct vm_area_struct *vma_m, *new_m = NULL;
+	unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
+#endif
+
 	if (is_vm_hugetlb_page(vma) && (addr &
 					~(huge_page_mask(hstate_vma(vma)))))
 		return -EINVAL;
 
+#ifdef CONFIG_PAX_SEGMEXEC
+	vma_m = pax_find_mirror_vma(vma);
+#endif
+
 	new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
 	if (!new)
 		goto out_err;
 
+#ifdef CONFIG_PAX_SEGMEXEC
+	if (vma_m) {
+		new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
+		if (!new_m) {
+			kmem_cache_free(vm_area_cachep, new);
+			goto out_err;
+		}
+	}
+#endif
+
 	/* most fields are the same, copy all, and then fixup */
 	*new = *vma;
 
@@ -2410,6 +2794,22 @@ static int __split_vma(struct mm_struct
 		new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
 	}
 
+#ifdef CONFIG_PAX_SEGMEXEC
+	if (vma_m) {
+		*new_m = *vma_m;
+		INIT_LIST_HEAD(&new_m->anon_vma_chain);
+		new_m->vm_mirror = new;
+		new->vm_mirror = new_m;
+
+		if (new_below)
+			new_m->vm_end = addr_m;
+		else {
+			new_m->vm_start = addr_m;
+			new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
+		}
+	}
+#endif
+
 	err = vma_dup_policy(vma, new);
 	if (err)
 		goto out_free_vma;
@@ -2429,6 +2829,38 @@ static int __split_vma(struct mm_struct
 	else
 		err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
 
+#ifdef CONFIG_PAX_SEGMEXEC
+	if (!err && vma_m) {
+		struct mempolicy *pol = vma_policy(new);
+
+		if (anon_vma_clone(new_m, vma_m))
+			goto out_free_mpol;
+
+		mpol_get(pol);
+		set_vma_policy(new_m, pol);
+
+		if (new_m->vm_file)
+			get_file(new_m->vm_file);
+
+		if (new_m->vm_ops && new_m->vm_ops->open)
+			new_m->vm_ops->open(new_m);
+
+		if (new_below)
+			err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
+				((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
+		else
+			err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
+
+		if (err) {
+			if (new_m->vm_ops && new_m->vm_ops->close)
+				new_m->vm_ops->close(new_m);
+			if (new_m->vm_file)
+				fput(new_m->vm_file);
+			mpol_put(pol);
+		}
+	}
+#endif
+
 	/* Success. */
 	if (!err)
 		return 0;
@@ -2438,10 +2870,18 @@ static int __split_vma(struct mm_struct
 		new->vm_ops->close(new);
 	if (new->vm_file)
 		fput(new->vm_file);
-	unlink_anon_vmas(new);
  out_free_mpol:
 	mpol_put(vma_policy(new));
  out_free_vma:
+
+#ifdef CONFIG_PAX_SEGMEXEC
+	if (new_m) {
+		unlink_anon_vmas(new_m);
+		kmem_cache_free(vm_area_cachep, new_m);
+	}
+#endif
+
+	unlink_anon_vmas(new);
 	kmem_cache_free(vm_area_cachep, new);
  out_err:
 	return err;
@@ -2454,6 +2894,15 @@ static int __split_vma(struct mm_struct
 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
 	      unsigned long addr, int new_below)
 {
+
+#ifdef CONFIG_PAX_SEGMEXEC
+	if (mm->pax_flags & MF_PAX_SEGMEXEC) {
+		BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
+		if (mm->map_count >= sysctl_max_map_count-1)
+			return -ENOMEM;
+	} else
+#endif
+
 	if (mm->map_count >= sysctl_max_map_count)
 		return -ENOMEM;
 
@@ -2465,11 +2914,30 @@ int split_vma(struct mm_struct *mm, stru
  * work.  This now handles partial unmappings.
  * Jeremy Fitzhardinge <jeremy@goop.org>
  */
+#ifdef CONFIG_PAX_SEGMEXEC
 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
 {
+	int ret = __do_munmap(mm, start, len);
+	if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
+		return ret;
+
+	return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
+}
+
+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
+#else
+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
+#endif
+{
 	unsigned long end;
 	struct vm_area_struct *vma, *prev, *last;
 
+	/*
+	 * mm->mmap_sem is required to protect against another thread
+	 * changing the mappings in case we sleep.
+	 */
+	verify_mm_writelocked(mm);
+
 	if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
 		return -EINVAL;
 
@@ -2544,6 +3012,8 @@ int do_munmap(struct mm_struct *mm, unsi
 	/* Fix up all other VM information */
 	remove_vma_list(mm, vma);
 
+	track_exec_limit(mm, start, end, 0UL);
+
 	return 0;
 }
 
@@ -2552,6 +3022,13 @@ int vm_munmap(unsigned long start, size_
 	int ret;
 	struct mm_struct *mm = current->mm;
 
+
+#ifdef CONFIG_PAX_SEGMEXEC
+	if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
+	    (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
+		return -EINVAL;
+#endif
+
 	down_write(&mm->mmap_sem);
 	ret = do_munmap(mm, start, len);
 	up_write(&mm->mmap_sem);
@@ -2565,16 +3042,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
 	return vm_munmap(addr, len);
 }
 
-static inline void verify_mm_writelocked(struct mm_struct *mm)
-{
-#ifdef CONFIG_DEBUG_VM
-	if (unlikely(down_read_trylock(&mm->mmap_sem))) {
-		WARN_ON(1);
-		up_read(&mm->mmap_sem);
-	}
-#endif
-}
-
 /*
  *  this is really a simplified "do_mmap".  it only handles
  *  anonymous maps.  eventually we may be able to do some
@@ -2588,6 +3055,7 @@ static unsigned long do_brk(unsigned lon
 	struct rb_node ** rb_link, * rb_parent;
 	pgoff_t pgoff = addr >> PAGE_SHIFT;
 	int error;
+	unsigned long charged;
 
 	len = PAGE_ALIGN(len);
 	if (!len)
@@ -2595,16 +3063,30 @@ static unsigned long do_brk(unsigned lon
 
 	flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
 
+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
+	if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
+		flags &= ~VM_EXEC;
+
+#ifdef CONFIG_PAX_MPROTECT
+		if (mm->pax_flags & MF_PAX_MPROTECT)
+			flags &= ~VM_MAYEXEC;
+#endif
+
+	}
+#endif
+
 	error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
 	if (error & ~PAGE_MASK)
 		return error;
 
+	charged = len >> PAGE_SHIFT;
+
 	/*
 	 * mlock MCL_FUTURE?
 	 */
 	if (mm->def_flags & VM_LOCKED) {
 		unsigned long locked, lock_limit;
-		locked = len >> PAGE_SHIFT;
+		locked = charged;
 		locked += mm->locked_vm;
 		lock_limit = rlimit(RLIMIT_MEMLOCK);
 		lock_limit >>= PAGE_SHIFT;
@@ -2621,21 +3103,20 @@ static unsigned long do_brk(unsigned lon
 	/*
 	 * Clear old maps.  this also does some error checking for us
 	 */
- munmap_back:
 	if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
 		if (do_munmap(mm, addr, len))
 			return -ENOMEM;
-		goto munmap_back;
+		BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
 	}
 
 	/* Check against address space limits *after* clearing old maps... */
-	if (!may_expand_vm(mm, len >> PAGE_SHIFT))
+	if (!may_expand_vm(mm, charged))
 		return -ENOMEM;
 
 	if (mm->map_count > sysctl_max_map_count)
 		return -ENOMEM;
 
-	if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
+	if (security_vm_enough_memory_mm(mm, charged))
 		return -ENOMEM;
 
 	/* Can we just expand an old private anonymous mapping? */
@@ -2649,7 +3130,7 @@ static unsigned long do_brk(unsigned lon
 	 */
 	vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
 	if (!vma) {
-		vm_unacct_memory(len >> PAGE_SHIFT);
+		vm_unacct_memory(charged);
 		return -ENOMEM;
 	}
 
@@ -2663,10 +3144,11 @@ static unsigned long do_brk(unsigned lon
 	vma_link(mm, vma, prev, rb_link, rb_parent);
 out:
 	perf_event_mmap(vma);
-	mm->total_vm += len >> PAGE_SHIFT;
+	mm->total_vm += charged;
 	if (flags & VM_LOCKED)
-		mm->locked_vm += (len >> PAGE_SHIFT);
+		mm->locked_vm += charged;
 	vma->vm_flags |= VM_SOFTDIRTY;
+	track_exec_limit(mm, addr, addr + len, flags);
 	return addr;
 }
 
@@ -2728,6 +3210,7 @@ void exit_mmap(struct mm_struct *mm)
 	while (vma) {
 		if (vma->vm_flags & VM_ACCOUNT)
 			nr_accounted += vma_pages(vma);
+		vma->vm_mirror = NULL;
 		vma = remove_vma(vma);
 	}
 	vm_unacct_memory(nr_accounted);
@@ -2745,6 +3228,13 @@ int insert_vm_struct(struct mm_struct *m
 	struct vm_area_struct *prev;
 	struct rb_node **rb_link, *rb_parent;
 
+#ifdef CONFIG_PAX_SEGMEXEC
+	struct vm_area_struct *vma_m = NULL;
+#endif
+
+	if (security_mmap_addr(vma->vm_start))
+		return -EPERM;
+
 	/*
 	 * The vm_pgoff of a purely anonymous vma should be irrelevant
 	 * until its first write fault, when page's anon_vma and index
@@ -2768,7 +3258,21 @@ int insert_vm_struct(struct mm_struct *m
 	     security_vm_enough_memory_mm(mm, vma_pages(vma)))
 		return -ENOMEM;
 
+#ifdef CONFIG_PAX_SEGMEXEC
+	if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
+		vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+		if (!vma_m)
+			return -ENOMEM;
+	}
+#endif
+
 	vma_link(mm, vma, prev, rb_link, rb_parent);
+
+#ifdef CONFIG_PAX_SEGMEXEC
+	if (vma_m)
+		BUG_ON(pax_mirror_vma(vma_m, vma));
+#endif
+
 	return 0;
 }
 
@@ -2787,6 +3291,8 @@ struct vm_area_struct *copy_vma(struct v
 	struct rb_node **rb_link, *rb_parent;
 	bool faulted_in_anon_vma = true;
 
+	BUG_ON(vma->vm_mirror);
+
 	/*
 	 * If anonymous vma has not yet been faulted, update new pgoff
 	 * to match new location, to increase its chance of merging.
@@ -2851,6 +3357,39 @@ struct vm_area_struct *copy_vma(struct v
 	return NULL;
 }
 
+#ifdef CONFIG_PAX_SEGMEXEC
+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
+{
+	struct vm_area_struct *prev_m;
+	struct rb_node **rb_link_m, *rb_parent_m;
+	struct mempolicy *pol_m;
+
+	BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
+	BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
+	BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
+	*vma_m = *vma;
+	INIT_LIST_HEAD(&vma_m->anon_vma_chain);
+	if (anon_vma_clone(vma_m, vma))
+		return -ENOMEM;
+	pol_m = vma_policy(vma_m);
+	mpol_get(pol_m);
+	set_vma_policy(vma_m, pol_m);
+	vma_m->vm_start += SEGMEXEC_TASK_SIZE;
+	vma_m->vm_end += SEGMEXEC_TASK_SIZE;
+	vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
+	vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
+	if (vma_m->vm_file)
+		get_file(vma_m->vm_file);
+	if (vma_m->vm_ops && vma_m->vm_ops->open)
+		vma_m->vm_ops->open(vma_m);
+	BUG_ON(find_vma_links(vma->vm_mm, vma_m->vm_start, vma_m->vm_end, &prev_m, &rb_link_m,
&rb_parent_m));
+	vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
+	vma_m->vm_mirror = vma;
+	vma->vm_mirror = vma_m;
+	return 0;
+}
+#endif
+
 /*
  * Return true if the calling process may expand its vm space by the passed
  * number of pages
@@ -2862,6 +3401,7 @@ int may_expand_vm(struct mm_struct *mm,
 
 	lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
 
+	gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
 	if (cur + npages > lim)
 		return 0;
 	return 1;
@@ -2932,6 +3472,22 @@ int install_special_mapping(struct mm_st
 	vma->vm_start = addr;
 	vma->vm_end = addr + len;
 
+#ifdef CONFIG_PAX_MPROTECT
+	if (mm->pax_flags & MF_PAX_MPROTECT) {
+#ifndef CONFIG_PAX_MPROTECT_COMPAT
+		if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
+			return -EPERM;
+		if (!(vm_flags & VM_EXEC))
+			vm_flags &= ~VM_MAYEXEC;
+#else
+		if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
+			vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
+#endif
+		else
+			vm_flags &= ~VM_MAYWRITE;
+	}
+#endif
+
 	vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY;
 	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
 
diff -ruNp linux-3.13.11/mm/mprotect.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/mprotect.c
--- linux-3.13.11/mm/mprotect.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/mprotect.c	2014-07-09 12:00:15.000000000
+0200
@@ -23,10 +23,18 @@
 #include <linux/mmu_notifier.h>
 #include <linux/migrate.h>
 #include <linux/perf_event.h>
+#include <linux/sched/sysctl.h>
+
+#ifdef CONFIG_PAX_MPROTECT
+#include <linux/elf.h>
+#include <linux/binfmts.h>
+#endif
+
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
 #include <asm/cacheflush.h>
 #include <asm/tlbflush.h>
+#include <asm/mmu_context.h>
 
 #ifndef pgprot_modify
 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
@@ -222,6 +230,48 @@ unsigned long change_protection(struct v
 	return pages;
 }
 
+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
+/* called while holding the mmap semaphor for writing except stack expansion */
+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end,
unsigned long prot)
+{
+	unsigned long oldlimit, newlimit = 0UL;
+
+	if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
+		return;
+
+	spin_lock(&mm->page_table_lock);
+	oldlimit = mm->context.user_cs_limit;
+	if ((prot & VM_EXEC) && oldlimit < end)
+		/* USER_CS limit moved up */
+		newlimit = end;
+	else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
+		/* USER_CS limit moved down */
+		newlimit = start;
+
+	if (newlimit) {
+		mm->context.user_cs_limit = newlimit;
+
+#ifdef CONFIG_SMP
+		wmb();
+		cpus_clear(mm->context.cpu_user_cs_mask);
+		cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
+#endif
+
+		set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
+	}
+	spin_unlock(&mm->page_table_lock);
+	if (newlimit == end) {
+		struct vm_area_struct *vma = find_vma(mm, oldlimit);
+
+		for (; vma && vma->vm_start < end; vma = vma->vm_next)
+			if (is_vm_hugetlb_page(vma))
+				hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
+			else
+				change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma),
0);
+	}
+}
+#endif
+
 int
 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
 	unsigned long start, unsigned long end, unsigned long newflags)
@@ -234,11 +284,29 @@ mprotect_fixup(struct vm_area_struct *vm
 	int error;
 	int dirty_accountable = 0;
 
+#ifdef CONFIG_PAX_SEGMEXEC
+	struct vm_area_struct *vma_m = NULL;
+	unsigned long start_m, end_m;
+
+	start_m = start + SEGMEXEC_TASK_SIZE;
+	end_m = end + SEGMEXEC_TASK_SIZE;
+#endif
+
 	if (newflags == oldflags) {
 		*pprev = vma;
 		return 0;
 	}
 
+	if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
+		struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
+
+		if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start
- end)
+			return -ENOMEM;
+
+		if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
+			return -ENOMEM;
+	}
+
 	/*
 	 * If we make a private mapping writable we increase our commit;
 	 * but (without finer accounting) cannot reduce our commit if we
@@ -255,6 +323,42 @@ mprotect_fixup(struct vm_area_struct *vm
 		}
 	}
 
+#ifdef CONFIG_PAX_SEGMEXEC
+	if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
+		if (start != vma->vm_start) {
+			error = split_vma(mm, vma, start, 1);
+			if (error)
+				goto fail;
+			BUG_ON(!*pprev || (*pprev)->vm_next == vma);
+			*pprev = (*pprev)->vm_next;
+		}
+
+		if (end != vma->vm_end) {
+			error = split_vma(mm, vma, end, 0);
+			if (error)
+				goto fail;
+		}
+
+		if (pax_find_mirror_vma(vma)) {
+			error = __do_munmap(mm, start_m, end_m - start_m);
+			if (error)
+				goto fail;
+		} else {
+			vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+			if (!vma_m) {
+				error = -ENOMEM;
+				goto fail;
+			}
+			vma->vm_flags = newflags;
+			error = pax_mirror_vma(vma_m, vma);
+			if (error) {
+				vma->vm_flags = oldflags;
+				goto fail;
+			}
+		}
+	}
+#endif
+
 	/*
 	 * First try to merge with previous and/or next vma.
 	 */
@@ -285,9 +389,21 @@ success:
 	 * vm_flags and vm_page_prot are protected by the mmap_sem
 	 * held in write mode.
 	 */
+
+#ifdef CONFIG_PAX_SEGMEXEC
+	if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^
newflags) & VM_READ))
+		pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
+#endif
+
 	vma->vm_flags = newflags;
+
+#ifdef CONFIG_PAX_MPROTECT
+	if (mm->binfmt && mm->binfmt->handle_mprotect)
+		mm->binfmt->handle_mprotect(vma, newflags);
+#endif
+
 	vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
-					  vm_get_page_prot(newflags));
+					  vm_get_page_prot(vma->vm_flags));
 
 	if (vma_wants_writenotify(vma)) {
 		vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
@@ -326,6 +442,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
 	end = start + len;
 	if (end <= start)
 		return -ENOMEM;
+
+#ifdef CONFIG_PAX_SEGMEXEC
+	if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
+		if (end > SEGMEXEC_TASK_SIZE)
+			return -EINVAL;
+	} else
+#endif
+
+	if (end > TASK_SIZE)
+		return -EINVAL;
+
 	if (!arch_validate_prot(prot))
 		return -EINVAL;
 
@@ -333,7 +460,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
 	/*
 	 * Does the application expect PROT_READ to imply PROT_EXEC:
 	 */
-	if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
+	if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
 		prot |= PROT_EXEC;
 
 	vm_flags = calc_vm_prot_bits(prot);
@@ -365,6 +492,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
 	if (start > vma->vm_start)
 		prev = vma;
 
+#ifdef CONFIG_PAX_MPROTECT
+	if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
+		current->mm->binfmt->handle_mprotect(vma, vm_flags);
+#endif
+
 	for (nstart = start ; ; ) {
 		unsigned long newflags;
 
@@ -375,6 +507,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
 
 		/* newflags >> 4 shift VM_MAY% in place of VM_% */
 		if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
+			if (prot & (PROT_WRITE | PROT_EXEC))
+				gr_log_rwxmprotect(vma);
+
+			error = -EACCES;
+			goto out;
+		}
+
+		if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
 			error = -EACCES;
 			goto out;
 		}
@@ -389,6 +529,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
 		error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
 		if (error)
 			goto out;
+
+		track_exec_limit(current->mm, nstart, tmp, vm_flags);
+
 		nstart = tmp;
 
 		if (nstart < prev->vm_end)
diff -ruNp linux-3.13.11/mm/mremap.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/mremap.c
--- linux-3.13.11/mm/mremap.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/mremap.c	2014-07-09 12:00:15.000000000
+0200
@@ -144,6 +144,12 @@ static void move_ptes(struct vm_area_str
 			continue;
 		pte = ptep_get_and_clear(mm, old_addr, old_pte);
 		pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
+
+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
+		if (!(__supported_pte_mask & _PAGE_NX) && pte_present(pte) && (new_vma->vm_flags
& (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
+			pte = pte_exprotect(pte);
+#endif
+
 		pte = move_soft_dirty_pte(pte);
 		set_pte_at(mm, new_addr, new_pte, pte);
 	}
@@ -337,6 +343,11 @@ static struct vm_area_struct *vma_to_res
 	if (is_vm_hugetlb_page(vma))
 		goto Einval;
 
+#ifdef CONFIG_PAX_SEGMEXEC
+	if (pax_find_mirror_vma(vma))
+		goto Einval;
+#endif
+
 	/* We can't remap across vm area boundaries */
 	if (old_len > vma->vm_end - addr)
 		goto Efault;
@@ -392,20 +403,25 @@ static unsigned long mremap_to(unsigned
 	unsigned long ret = -EINVAL;
 	unsigned long charged = 0;
 	unsigned long map_flags;
+	unsigned long pax_task_size = TASK_SIZE;
 
 	if (new_addr & ~PAGE_MASK)
 		goto out;
 
-	if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
+#ifdef CONFIG_PAX_SEGMEXEC
+	if (mm->pax_flags & MF_PAX_SEGMEXEC)
+		pax_task_size = SEGMEXEC_TASK_SIZE;
+#endif
+
+	pax_task_size -= PAGE_SIZE;
+
+	if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
 		goto out;
 
 	/* Check if the location we're moving into overlaps the
 	 * old location at all, and fail if it does.
 	 */
-	if ((new_addr <= addr) && (new_addr+new_len) > addr)
-		goto out;
-
-	if ((addr <= new_addr) && (addr+old_len) > new_addr)
+	if (addr + old_len > new_addr && new_addr + new_len > addr)
 		goto out;
 
 	ret = do_munmap(mm, new_addr, new_len);
@@ -474,6 +490,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, a
 	unsigned long ret = -EINVAL;
 	unsigned long charged = 0;
 	bool locked = false;
+	unsigned long pax_task_size = TASK_SIZE;
 
 	if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
 		return ret;
@@ -495,6 +512,17 @@ SYSCALL_DEFINE5(mremap, unsigned long, a
 	if (!new_len)
 		return ret;
 
+#ifdef CONFIG_PAX_SEGMEXEC
+	if (mm->pax_flags & MF_PAX_SEGMEXEC)
+		pax_task_size = SEGMEXEC_TASK_SIZE;
+#endif
+
+	pax_task_size -= PAGE_SIZE;
+
+	if (new_len > pax_task_size || addr > pax_task_size-new_len ||
+	    old_len > pax_task_size || addr > pax_task_size-old_len)
+		return ret;
+
 	down_write(&current->mm->mmap_sem);
 
 	if (flags & MREMAP_FIXED) {
@@ -545,6 +573,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, a
 				new_addr = addr;
 			}
 			ret = addr;
+			track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
 			goto out;
 		}
 	}
@@ -568,7 +597,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, a
 			goto out;
 		}
 
+		map_flags = vma->vm_flags;
 		ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked);
+		if (!(ret & ~PAGE_MASK)) {
+			track_exec_limit(current->mm, addr, addr + old_len, 0UL);
+			track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
+		}
 	}
 out:
 	if (ret & ~PAGE_MASK)
diff -ruNp linux-3.13.11/mm/nommu.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/nommu.c
--- linux-3.13.11/mm/nommu.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/nommu.c	2014-07-09 12:00:15.000000000
+0200
@@ -64,7 +64,6 @@ int sysctl_max_map_count = DEFAULT_MAX_M
 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
-int heap_stack_gap = 0;
 
 atomic_long_t mmap_pages_allocated;
 
@@ -844,15 +843,6 @@ struct vm_area_struct *find_vma(struct m
 EXPORT_SYMBOL(find_vma);
 
 /*
- * find a VMA
- * - we don't extend stack VMAs under NOMMU conditions
- */
-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
-{
-	return find_vma(mm, addr);
-}
-
-/*
  * expand a stack to a given address
  * - not supported under NOMMU conditions
  */
@@ -1563,6 +1553,7 @@ int split_vma(struct mm_struct *mm, stru
 
 	/* most fields are the same, copy all, and then fixup */
 	*new = *vma;
+	INIT_LIST_HEAD(&new->anon_vma_chain);
 	*region = *vma->vm_region;
 	new->vm_region = region;
 
@@ -1992,8 +1983,8 @@ int generic_file_remap_pages(struct vm_a
 }
 EXPORT_SYMBOL(generic_file_remap_pages);
 
-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
-		unsigned long addr, void *buf, int len, int write)
+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+		unsigned long addr, void *buf, size_t len, int write)
 {
 	struct vm_area_struct *vma;
 
@@ -2034,8 +2025,8 @@ static int __access_remote_vm(struct tas
  *
  * The caller must hold a reference on @mm.
  */
-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
-		void *buf, int len, int write)
+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
+		void *buf, size_t len, int write)
 {
 	return __access_remote_vm(NULL, mm, addr, buf, len, write);
 }
@@ -2044,7 +2035,7 @@ int access_remote_vm(struct mm_struct *m
  * Access another process' address space.
  * - source/target buffer must be kernel space
  */
-int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len,
int write)
+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t
len, int write)
 {
 	struct mm_struct *mm;
 
diff -ruNp linux-3.13.11/mm/oom_kill.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/oom_kill.c
--- linux-3.13.11/mm/oom_kill.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/oom_kill.c	2014-07-09 12:00:15.000000000
+0200
@@ -35,6 +35,8 @@
 #include <linux/freezer.h>
 #include <linux/ftrace.h>
 #include <linux/ratelimit.h>
+#include <linux/reboot.h>
+#include <linux/vs_context.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/oom.h>
@@ -113,11 +115,18 @@ struct task_struct *find_lock_task_mm(st
 static bool oom_unkillable_task(struct task_struct *p,
 		const struct mem_cgroup *memcg, const nodemask_t *nodemask)
 {
-	if (is_global_init(p))
+	unsigned xid = vx_current_xid();
+
+	/* skip the init task, global and per guest */
+	if (task_is_init(p))
 		return true;
 	if (p->flags & PF_KTHREAD)
 		return true;
 
+	/* skip other guest and host processes if oom in guest */
+	if (xid && vx_task_xid(p) != xid)
+		return true;
+
 	/* When mem_cgroup_out_of_memory() and p is not member of the group */
 	if (memcg && !task_in_mem_cgroup(p, memcg))
 		return true;
@@ -426,8 +435,8 @@ void oom_kill_process(struct task_struct
 		dump_header(p, gfp_mask, order, memcg, nodemask);
 
 	task_lock(p);
-	pr_err("%s: Kill process %d (%s) score %d or sacrifice child\n",
-		message, task_pid_nr(p), p->comm, points);
+	pr_err("%s: Kill process %d:#%u (%s) score %d or sacrifice child\n",
+		message, task_pid_nr(p), p->xid, p->comm, points);
 	task_unlock(p);
 
 	/*
@@ -472,8 +481,8 @@ void oom_kill_process(struct task_struct
 
 	/* mm cannot safely be dereferenced after task_unlock(victim) */
 	mm = victim->mm;
-	pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n",
-		task_pid_nr(victim), victim->comm, K(victim->mm->total_vm),
+	pr_err("Killed process %d:#%u (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n",
+		task_pid_nr(victim), victim->xid, victim->comm, K(victim->mm->total_vm),
 		K(get_mm_counter(victim->mm, MM_ANONPAGES)),
 		K(get_mm_counter(victim->mm, MM_FILEPAGES)));
 	task_unlock(victim);
@@ -543,6 +552,8 @@ int unregister_oom_notifier(struct notif
 }
 EXPORT_SYMBOL_GPL(unregister_oom_notifier);
 
+long vs_oom_action(unsigned int);
+
 /*
  * Try to acquire the OOM killer lock for the zones in zonelist.  Returns zero
  * if a parallel OOM killing is already taking place that includes a zone in
@@ -655,7 +666,12 @@ void out_of_memory(struct zonelist *zone
 	/* Found nothing?!?! Either we hang forever, or we panic. */
 	if (!p) {
 		dump_header(NULL, gfp_mask, order, NULL, mpol_mask);
-		panic("Out of memory and no killable processes...\n");
+
+		/* avoid panic for guest OOM */
+		if (vx_current_xid())
+			vs_oom_action(LINUX_REBOOT_CMD_OOM);
+		else
+			panic("Out of memory and no killable processes...\n");
 	}
 	if (p != (void *)-1UL) {
 		oom_kill_process(p, gfp_mask, order, points, totalpages, NULL,
diff -ruNp linux-3.13.11/mm/page-writeback.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/page-writeback.c
--- linux-3.13.11/mm/page-writeback.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/page-writeback.c	2014-07-09 12:00:15.000000000
+0200
@@ -685,7 +685,7 @@ static inline long long pos_ratio_polyno
  *   card's bdi_dirty may rush to many times higher than bdi_setpoint.
  * - the bdi dirty thresh drops quickly due to change of JBOD workload
  */
-static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
+static unsigned long __intentional_overflow(-1) bdi_position_ratio(struct backing_dev_info
*bdi,
 					unsigned long thresh,
 					unsigned long bg_thresh,
 					unsigned long dirty,
diff -ruNp linux-3.13.11/mm/page_alloc.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/page_alloc.c
--- linux-3.13.11/mm/page_alloc.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/page_alloc.c	2014-07-09 12:00:15.000000000
+0200
@@ -61,6 +61,9 @@
 #include <linux/page-debug-flags.h>
 #include <linux/hugetlb.h>
 #include <linux/sched/rt.h>
+#include <linux/random.h>
+#include <linux/vs_base.h>
+#include <linux/vs_limit.h>
 
 #include <asm/sections.h>
 #include <asm/tlbflush.h>
@@ -354,7 +357,7 @@ out:
  * This usage means that zero-order pages may not be compound.
  */
 
-static void free_compound_page(struct page *page)
+void free_compound_page(struct page *page)
 {
 	__free_pages_ok(page, compound_order(page));
 }
@@ -714,6 +717,10 @@ static bool free_pages_prepare(struct pa
 	int i;
 	int bad = 0;
 
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
+	unsigned long index = 1UL << order;
+#endif
+
 	trace_mm_page_free(page, order);
 	kmemcheck_free_shadow(page, order);
 
@@ -730,6 +737,12 @@ static bool free_pages_prepare(struct pa
 		debug_check_no_obj_freed(page_address(page),
 					   PAGE_SIZE << order);
 	}
+
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
+	for (; index; --index)
+		sanitize_highpage(page + index - 1);
+#endif
+
 	arch_free_page(page, order);
 	kernel_map_pages(page, 1 << order, 0);
 
@@ -752,6 +765,20 @@ static void __free_pages_ok(struct page
 	local_irq_restore(flags);
 }
 
+#ifdef CONFIG_PAX_LATENT_ENTROPY
+bool __meminitdata extra_latent_entropy;
+
+static int __init setup_pax_extra_latent_entropy(char *str)
+{
+	extra_latent_entropy = true;
+	return 0;
+}
+early_param("pax_extra_latent_entropy", setup_pax_extra_latent_entropy);
+
+volatile u64 latent_entropy __latent_entropy;
+EXPORT_SYMBOL(latent_entropy);
+#endif
+
 void __init __free_pages_bootmem(struct page *page, unsigned int order)
 {
 	unsigned int nr_pages = 1 << order;
@@ -767,6 +794,19 @@ void __init __free_pages_bootmem(struct
 	__ClearPageReserved(p);
 	set_page_count(p, 0);
 
+#ifdef CONFIG_PAX_LATENT_ENTROPY
+	if (extra_latent_entropy && !PageHighMem(page) && page_to_pfn(page) < 0x100000) {
+		u64 hash = 0;
+		size_t index, end = PAGE_SIZE * nr_pages / sizeof hash;
+		const u64 *data = lowmem_page_address(page);
+
+		for (index = 0; index < end; index++)
+			hash ^= hash + data[index];
+		latent_entropy ^= hash;
+		add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
+	}
+#endif
+
 	page_zone(page)->managed_pages += nr_pages;
 	set_page_refcounted(page);
 	__free_pages(page, order);
@@ -872,8 +912,10 @@ static int prep_new_page(struct page *pa
 	arch_alloc_page(page, order);
 	kernel_map_pages(page, 1 << order, 1);
 
+#ifndef CONFIG_PAX_MEMORY_SANITIZE
 	if (gfp_flags & __GFP_ZERO)
 		prep_zero_page(page, order, gfp_flags);
+#endif
 
 	if (order && (gfp_flags & __GFP_COMP))
 		prep_compound_page(page, order);
@@ -2983,6 +3025,9 @@ void si_meminfo(struct sysinfo *val)
 	val->totalhigh = totalhigh_pages;
 	val->freehigh = nr_free_highpages();
 	val->mem_unit = PAGE_SIZE;
+
+	if (vx_flags(VXF_VIRT_MEM, 0))
+		vx_vsi_meminfo(val);
 }
 
 EXPORT_SYMBOL(si_meminfo);
@@ -3007,6 +3052,9 @@ void si_meminfo_node(struct sysinfo *val
 	val->freehigh = 0;
 #endif
 	val->mem_unit = PAGE_SIZE;
+
+	if (vx_flags(VXF_VIRT_MEM, 0))
+		vx_vsi_meminfo(val);
 }
 #endif
 
diff -ruNp linux-3.13.11/mm/page_io.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/page_io.c
--- linux-3.13.11/mm/page_io.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/page_io.c	2014-07-09 12:00:15.000000000
+0200
@@ -260,7 +260,7 @@ int __swap_writepage(struct page *page,
 		struct file *swap_file = sis->swap_file;
 		struct address_space *mapping = swap_file->f_mapping;
 		struct iovec iov = {
-			.iov_base = kmap(page),
+			.iov_base = (void __force_user *)kmap(page),
 			.iov_len  = PAGE_SIZE,
 		};
 
diff -ruNp linux-3.13.11/mm/percpu.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/percpu.c
--- linux-3.13.11/mm/percpu.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/percpu.c	2014-07-09 12:00:15.000000000
+0200
@@ -122,7 +122,7 @@ static unsigned int pcpu_low_unit_cpu __
 static unsigned int pcpu_high_unit_cpu __read_mostly;
 
 /* the address of the first chunk which starts with the kernel static area */
-void *pcpu_base_addr __read_mostly;
+void *pcpu_base_addr __read_only;
 EXPORT_SYMBOL_GPL(pcpu_base_addr);
 
 static const int *pcpu_unit_map __read_mostly;		/* cpu -> unit */
diff -ruNp linux-3.13.11/mm/pgtable-generic.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/pgtable-generic.c
--- linux-3.13.11/mm/pgtable-generic.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/pgtable-generic.c	2014-07-09 12:00:15.000000000
+0200
@@ -6,6 +6,8 @@
  *  Copyright (C) 2010  Linus Torvalds
  */
 
+#include <linux/mm.h>
+
 #include <linux/pagemap.h>
 #include <asm/tlb.h>
 #include <asm-generic/pgtable.h>
diff -ruNp linux-3.13.11/mm/process_vm_access.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/process_vm_access.c
--- linux-3.13.11/mm/process_vm_access.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/process_vm_access.c	2014-07-09
12:00:15.000000000 +0200
@@ -13,6 +13,7 @@
 #include <linux/uio.h>
 #include <linux/sched.h>
 #include <linux/highmem.h>
+#include <linux/security.h>
 #include <linux/ptrace.h>
 #include <linux/slab.h>
 #include <linux/syscalls.h>
@@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t
 	size_t iov_l_curr_offset = 0;
 	ssize_t iov_len;
 
+	return -ENOSYS; // PaX: until properly audited
+
 	/*
 	 * Work out how many pages of struct pages we're going to need
 	 * when eventually calling get_user_pages
 	 */
 	for (i = 0; i < riovcnt; i++) {
 		iov_len = rvec[i].iov_len;
-		if (iov_len > 0) {
-			nr_pages_iov = ((unsigned long)rvec[i].iov_base
-					+ iov_len)
-				/ PAGE_SIZE - (unsigned long)rvec[i].iov_base
-				/ PAGE_SIZE + 1;
-			nr_pages = max(nr_pages, nr_pages_iov);
-		}
+		if (iov_len <= 0)
+			continue;
+		nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
+				(unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
+		nr_pages = max(nr_pages, nr_pages_iov);
 	}
 
 	if (nr_pages == 0)
@@ -298,6 +299,11 @@ static ssize_t process_vm_rw_core(pid_t
 		goto free_proc_pages;
 	}
 
+	if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
+		rc = -EPERM;
+		goto put_task_struct;
+	}
+
 	mm = mm_access(task, PTRACE_MODE_ATTACH);
 	if (!mm || IS_ERR(mm)) {
 		rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
diff -ruNp linux-3.13.11/mm/rmap.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/rmap.c
--- linux-3.13.11/mm/rmap.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/rmap.c	2014-07-09 12:00:15.000000000
+0200
@@ -163,6 +163,10 @@ int anon_vma_prepare(struct vm_area_stru
 	struct anon_vma *anon_vma = vma->anon_vma;
 	struct anon_vma_chain *avc;
 
+#ifdef CONFIG_PAX_SEGMEXEC
+	struct anon_vma_chain *avc_m = NULL;
+#endif
+
 	might_sleep();
 	if (unlikely(!anon_vma)) {
 		struct mm_struct *mm = vma->vm_mm;
@@ -172,6 +176,12 @@ int anon_vma_prepare(struct vm_area_stru
 		if (!avc)
 			goto out_enomem;
 
+#ifdef CONFIG_PAX_SEGMEXEC
+		avc_m = anon_vma_chain_alloc(GFP_KERNEL);
+		if (!avc_m)
+			goto out_enomem_free_avc;
+#endif
+
 		anon_vma = find_mergeable_anon_vma(vma);
 		allocated = NULL;
 		if (!anon_vma) {
@@ -185,6 +195,18 @@ int anon_vma_prepare(struct vm_area_stru
 		/* page_table_lock to protect against threads */
 		spin_lock(&mm->page_table_lock);
 		if (likely(!vma->anon_vma)) {
+
+#ifdef CONFIG_PAX_SEGMEXEC
+			struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
+
+			if (vma_m) {
+				BUG_ON(vma_m->anon_vma);
+				vma_m->anon_vma = anon_vma;
+				anon_vma_chain_link(vma_m, avc_m, anon_vma);
+				avc_m = NULL;
+			}
+#endif
+
 			vma->anon_vma = anon_vma;
 			anon_vma_chain_link(vma, avc, anon_vma);
 			allocated = NULL;
@@ -195,12 +217,24 @@ int anon_vma_prepare(struct vm_area_stru
 
 		if (unlikely(allocated))
 			put_anon_vma(allocated);
+
+#ifdef CONFIG_PAX_SEGMEXEC
+		if (unlikely(avc_m))
+			anon_vma_chain_free(avc_m);
+#endif
+
 		if (unlikely(avc))
 			anon_vma_chain_free(avc);
 	}
 	return 0;
 
  out_enomem_free_avc:
+
+#ifdef CONFIG_PAX_SEGMEXEC
+	if (avc_m)
+		anon_vma_chain_free(avc_m);
+#endif
+
 	anon_vma_chain_free(avc);
  out_enomem:
 	return -ENOMEM;
@@ -236,7 +270,7 @@ static inline void unlock_anon_vma_root(
  * Attach the anon_vmas from src to dst.
  * Returns 0 on success, -ENOMEM on failure.
  */
-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
 {
 	struct anon_vma_chain *avc, *pavc;
 	struct anon_vma *root = NULL;
@@ -269,7 +303,7 @@ int anon_vma_clone(struct vm_area_struct
  * the corresponding VMA in the parent process is attached to.
  * Returns 0 on success, non-zero on failure.
  */
-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
 {
 	struct anon_vma_chain *avc;
 	struct anon_vma *anon_vma;
@@ -373,8 +407,10 @@ static void anon_vma_ctor(void *data)
 void __init anon_vma_init(void)
 {
 	anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
-			0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
-	anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC);
+			0, SLAB_DESTROY_BY_RCU|SLAB_PANIC|SLAB_NO_SANITIZE,
+			anon_vma_ctor);
+	anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
+			SLAB_PANIC|SLAB_NO_SANITIZE);
 }
 
 /*
diff -ruNp linux-3.13.11/mm/shmem.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/shmem.c
--- linux-3.13.11/mm/shmem.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/shmem.c	2014-07-09 12:00:15.000000000
+0200
@@ -33,7 +33,7 @@
 #include <linux/swap.h>
 #include <linux/aio.h>
 
-static struct vfsmount *shm_mnt;
+struct vfsmount *shm_mnt;
 
 #ifdef CONFIG_SHMEM
 /*
@@ -77,7 +77,7 @@ static struct vfsmount *shm_mnt;
 #define BOGO_DIRENT_SIZE 20
 
 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
-#define SHORT_SYMLINK_LEN 128
+#define SHORT_SYMLINK_LEN 64
 
 /*
  * shmem_fallocate and shmem_writepage communicate via inode->i_private
@@ -1909,7 +1909,7 @@ static int shmem_statfs(struct dentry *d
 {
 	struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
 
-	buf->f_type = TMPFS_MAGIC;
+	buf->f_type = TMPFS_SUPER_MAGIC;
 	buf->f_bsize = PAGE_CACHE_SIZE;
 	buf->f_namelen = NAME_MAX;
 	if (sbinfo->max_blocks) {
@@ -2232,6 +2232,11 @@ static const struct xattr_handler *shmem
 static int shmem_xattr_validate(const char *name)
 {
 	struct { const char *prefix; size_t len; } arr[] = {
+
+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
+		{ XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
+#endif
+
 		{ XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
 		{ XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
 	};
@@ -2287,6 +2292,15 @@ static int shmem_setxattr(struct dentry
 	if (err)
 		return err;
 
+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
+	if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
+		if (strcmp(name, XATTR_NAME_PAX_FLAGS))
+			return -EOPNOTSUPP;
+		if (size > 8)
+			return -EINVAL;
+	}
+#endif
+
 	return simple_xattr_set(&info->xattrs, name, value, size, flags);
 }
 
@@ -2599,8 +2613,7 @@ int shmem_fill_super(struct super_block
 	int err = -ENOMEM;
 
 	/* Round up to L1_CACHE_BYTES to resist false sharing */
-	sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
-				L1_CACHE_BYTES), GFP_KERNEL);
+	sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
 	if (!sbinfo)
 		return -ENOMEM;
 
@@ -2639,7 +2652,7 @@ int shmem_fill_super(struct super_block
 	sb->s_maxbytes = MAX_LFS_FILESIZE;
 	sb->s_blocksize = PAGE_CACHE_SIZE;
 	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
-	sb->s_magic = TMPFS_MAGIC;
+	sb->s_magic = TMPFS_SUPER_MAGIC;
 	sb->s_op = &shmem_ops;
 	sb->s_time_gran = 1;
 #ifdef CONFIG_TMPFS_XATTR
diff -ruNp linux-3.13.11/mm/slab.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/slab.c
--- linux-3.13.11/mm/slab.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/slab.c	2014-07-09 12:00:15.000000000
+0200
@@ -300,10 +300,12 @@ static void kmem_cache_node_init(struct
 		if ((x)->max_freeable < i)				\
 			(x)->max_freeable = i;				\
 	} while (0)
-#define STATS_INC_ALLOCHIT(x)	atomic_inc(&(x)->allochit)
-#define STATS_INC_ALLOCMISS(x)	atomic_inc(&(x)->allocmiss)
-#define STATS_INC_FREEHIT(x)	atomic_inc(&(x)->freehit)
-#define STATS_INC_FREEMISS(x)	atomic_inc(&(x)->freemiss)
+#define STATS_INC_ALLOCHIT(x)	atomic_inc_unchecked(&(x)->allochit)
+#define STATS_INC_ALLOCMISS(x)	atomic_inc_unchecked(&(x)->allocmiss)
+#define STATS_INC_FREEHIT(x)	atomic_inc_unchecked(&(x)->freehit)
+#define STATS_INC_FREEMISS(x)	atomic_inc_unchecked(&(x)->freemiss)
+#define STATS_INC_SANITIZED(x)	atomic_inc_unchecked(&(x)->sanitized)
+#define STATS_INC_NOT_SANITIZED(x) atomic_inc_unchecked(&(x)->not_sanitized)
 #else
 #define	STATS_INC_ACTIVE(x)	do { } while (0)
 #define	STATS_DEC_ACTIVE(x)	do { } while (0)
@@ -320,8 +322,12 @@ static void kmem_cache_node_init(struct
 #define STATS_INC_ALLOCMISS(x)	do { } while (0)
 #define STATS_INC_FREEHIT(x)	do { } while (0)
 #define STATS_INC_FREEMISS(x)	do { } while (0)
+#define STATS_INC_SANITIZED(x)	do { } while (0)
+#define STATS_INC_NOT_SANITIZED(x) do { } while (0)
 #endif
 
+#include "slab_vs.h"
+
 #if DEBUG
 
 /*
@@ -403,7 +409,7 @@ static inline void *index_to_obj(struct
  *   reciprocal_divide(offset, cache->reciprocal_buffer_size)
  */
 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
-					const struct page *page, void *obj)
+					const struct page *page, const void *obj)
 {
 	u32 offset = (obj - page->s_mem);
 	return reciprocal_divide(offset, cache->reciprocal_buffer_size);
@@ -1489,12 +1495,12 @@ void __init kmem_cache_init(void)
 	 */
 
 	kmalloc_caches[INDEX_AC] = create_kmalloc_cache("kmalloc-ac",
-					kmalloc_size(INDEX_AC), ARCH_KMALLOC_FLAGS);
+					kmalloc_size(INDEX_AC), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
 
 	if (INDEX_AC != INDEX_NODE)
 		kmalloc_caches[INDEX_NODE] =
 			create_kmalloc_cache("kmalloc-node",
-				kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
+				kmalloc_size(INDEX_NODE), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
 
 	slab_early_init = 0;
 
@@ -3240,6 +3246,7 @@ slab_alloc_node(struct kmem_cache *cache
 	/* ___cache_alloc_node can fall back to other nodes */
 	ptr = ____cache_alloc_node(cachep, flags, nodeid);
   out:
+	vx_slab_alloc(cachep, flags);
 	local_irq_restore(save_flags);
 	ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
 	kmemleak_alloc_recursive(ptr, cachep->object_size, 1, cachep->flags,
@@ -3428,8 +3435,24 @@ static inline void __cache_free(struct k
 	struct array_cache *ac = cpu_cache_get(cachep);
 
 	check_irq_off();
+
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
+	if (pax_sanitize_slab) {
+		if (!(cachep->flags & (SLAB_POISON | SLAB_NO_SANITIZE))) {
+			memset(objp, PAX_MEMORY_SANITIZE_VALUE, cachep->object_size);
+
+			if (cachep->ctor)
+				cachep->ctor(objp);
+
+			STATS_INC_SANITIZED(cachep);
+		} else
+			STATS_INC_NOT_SANITIZED(cachep);
+	}
+#endif
+
 	kmemleak_free_recursive(objp, cachep->flags);
 	objp = cache_free_debugcheck(cachep, objp, caller);
+	vx_slab_free(cachep);
 
 	kmemcheck_slab_free(cachep, objp, cachep->object_size);
 
@@ -3656,6 +3679,7 @@ void kfree(const void *objp)
 
 	if (unlikely(ZERO_OR_NULL_PTR(objp)))
 		return;
+	VM_BUG_ON(!virt_addr_valid(objp));
 	local_irq_save(flags);
 	kfree_debugcheck(objp);
 	c = virt_to_cache(objp);
@@ -4097,14 +4121,22 @@ void slabinfo_show_stats(struct seq_file
 	}
 	/* cpu stats */
 	{
-		unsigned long allochit = atomic_read(&cachep->allochit);
-		unsigned long allocmiss = atomic_read(&cachep->allocmiss);
-		unsigned long freehit = atomic_read(&cachep->freehit);
-		unsigned long freemiss = atomic_read(&cachep->freemiss);
+		unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
+		unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
+		unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
+		unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
 
 		seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
 			   allochit, allocmiss, freehit, freemiss);
 	}
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
+	{
+		unsigned long sanitized = atomic_read_unchecked(&cachep->sanitized);
+		unsigned long not_sanitized = atomic_read_unchecked(&cachep->not_sanitized);
+
+		seq_printf(m, " : pax %6lu %6lu", sanitized, not_sanitized);
+	}
+#endif
 #endif
 }
 
@@ -4334,13 +4366,69 @@ static const struct file_operations proc
 static int __init slab_proc_init(void)
 {
 #ifdef CONFIG_DEBUG_SLAB_LEAK
-	proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
+	proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
 #endif
 	return 0;
 }
 module_init(slab_proc_init);
 #endif
 
+bool is_usercopy_object(const void *ptr)
+{
+	struct page *page;
+	struct kmem_cache *cachep;
+
+	if (ZERO_OR_NULL_PTR(ptr))
+		return false;
+
+	if (!slab_is_available())
+		return false;
+
+	if (!virt_addr_valid(ptr))
+		return false;
+
+	page = virt_to_head_page(ptr);
+
+	if (!PageSlab(page))
+		return false;
+
+	cachep = page->slab_cache;
+	return cachep->flags & SLAB_USERCOPY;
+}
+
+#ifdef CONFIG_PAX_USERCOPY
+const char *check_heap_object(const void *ptr, unsigned long n)
+{
+	struct page *page;
+	struct kmem_cache *cachep;
+	unsigned int objnr;
+	unsigned long offset;
+
+	if (ZERO_OR_NULL_PTR(ptr))
+		return "<null>";
+
+	if (!virt_addr_valid(ptr))
+		return NULL;
+
+	page = virt_to_head_page(ptr);
+
+	if (!PageSlab(page))
+		return NULL;
+
+	cachep = page->slab_cache;
+	if (!(cachep->flags & SLAB_USERCOPY))
+		return cachep->name;
+
+	objnr = obj_to_index(cachep, page, ptr);
+	BUG_ON(objnr >= cachep->num);
+	offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
+	if (offset <= cachep->object_size && n <= cachep->object_size - offset)
+		return NULL;
+
+	return cachep->name;
+}
+#endif
+
 /**
  * ksize - get the actual amount of memory allocated for a given object
  * @objp: Pointer to the object
diff -ruNp linux-3.13.11/mm/slab.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/slab.h
--- linux-3.13.11/mm/slab.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/slab.h	2014-07-09 12:00:15.000000000
+0200
@@ -32,6 +32,15 @@ extern struct list_head slab_caches;
 /* The slab cache that manages slab cache information */
 extern struct kmem_cache *kmem_cache;
 
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
+#ifdef CONFIG_X86_64
+#define PAX_MEMORY_SANITIZE_VALUE	'\xfe'
+#else
+#define PAX_MEMORY_SANITIZE_VALUE	'\xff'
+#endif
+extern bool pax_sanitize_slab;
+#endif
+
 unsigned long calculate_alignment(unsigned long flags,
 		unsigned long align, unsigned long size);
 
@@ -67,7 +76,8 @@ __kmem_cache_alias(struct mem_cgroup *me
 
 /* Legal flag mask for kmem_cache_create(), for various configurations */
 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
-			 SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
+			 SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS | \
+			 SLAB_USERCOPY | SLAB_NO_SANITIZE)
 
 #if defined(CONFIG_DEBUG_SLAB)
 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
@@ -233,6 +243,9 @@ static inline struct kmem_cache *cache_f
 		return s;
 
 	page = virt_to_head_page(x);
+
+	BUG_ON(!PageSlab(page));
+
 	cachep = page->slab_cache;
 	if (slab_equal_or_root(cachep, s))
 		return cachep;
diff -ruNp linux-3.13.11/mm/slab_common.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/slab_common.c
--- linux-3.13.11/mm/slab_common.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/slab_common.c	2014-07-09 12:00:15.000000000
+0200
@@ -23,11 +23,22 @@
 
 #include "slab.h"
 
-enum slab_state slab_state;
+enum slab_state slab_state __read_only;
 LIST_HEAD(slab_caches);
 DEFINE_MUTEX(slab_mutex);
 struct kmem_cache *kmem_cache;
 
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
+bool pax_sanitize_slab __read_only = true;
+static int __init pax_sanitize_slab_setup(char *str)
+{
+	pax_sanitize_slab = !!simple_strtol(str, NULL, 0);
+	printk("%sabled PaX slab sanitization\n", pax_sanitize_slab ? "En" : "Dis");
+	return 1;
+}
+__setup("pax_sanitize_slab=", pax_sanitize_slab_setup);
+#endif
+
 #ifdef CONFIG_DEBUG_VM
 static int kmem_cache_sanity_check(struct mem_cgroup *memcg, const char *name,
 				   size_t size)
@@ -212,7 +223,7 @@ kmem_cache_create_memcg(struct mem_cgrou
 
 		err = __kmem_cache_create(s, flags);
 		if (!err) {
-			s->refcount = 1;
+			atomic_set(&s->refcount, 1);
 			list_add(&s->list, &slab_caches);
 			memcg_cache_list_add(memcg, s);
 		} else {
@@ -258,8 +269,7 @@ void kmem_cache_destroy(struct kmem_cach
 
 	get_online_cpus();
 	mutex_lock(&slab_mutex);
-	s->refcount--;
-	if (!s->refcount) {
+	if (atomic_dec_and_test(&s->refcount)) {
 		list_del(&s->list);
 
 		if (!__kmem_cache_shutdown(s)) {
@@ -305,7 +315,7 @@ void __init create_boot_cache(struct kme
 		panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
 					name, size, err);
 
-	s->refcount = -1;	/* Exempt from merging for now */
+	atomic_set(&s->refcount, -1);	/* Exempt from merging for now */
 }
 
 struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
@@ -318,7 +328,7 @@ struct kmem_cache *__init create_kmalloc
 
 	create_boot_cache(s, name, size, flags);
 	list_add(&s->list, &slab_caches);
-	s->refcount = 1;
+	atomic_set(&s->refcount, 1);
 	return s;
 }
 
@@ -330,6 +340,11 @@ struct kmem_cache *kmalloc_dma_caches[KM
 EXPORT_SYMBOL(kmalloc_dma_caches);
 #endif
 
+#ifdef CONFIG_PAX_USERCOPY_SLABS
+struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
+EXPORT_SYMBOL(kmalloc_usercopy_caches);
+#endif
+
 /*
  * Conversion table for small slabs sizes / 8 to the index in the
  * kmalloc array. This is necessary for slabs < 192 since we have non power
@@ -394,6 +409,13 @@ struct kmem_cache *kmalloc_slab(size_t s
 		return kmalloc_dma_caches[index];
 
 #endif
+
+#ifdef CONFIG_PAX_USERCOPY_SLABS
+	if (unlikely((flags & GFP_USERCOPY)))
+		return kmalloc_usercopy_caches[index];
+
+#endif
+
 	return kmalloc_caches[index];
 }
 
@@ -450,7 +472,7 @@ void __init create_kmalloc_caches(unsign
 	for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
 		if (!kmalloc_caches[i]) {
 			kmalloc_caches[i] = create_kmalloc_cache(NULL,
-							1 << i, flags);
+							1 << i, SLAB_USERCOPY | flags);
 		}
 
 		/*
@@ -459,10 +481,10 @@ void __init create_kmalloc_caches(unsign
 		 * earlier power of two caches
 		 */
 		if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
-			kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags);
+			kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, SLAB_USERCOPY | flags);
 
 		if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
-			kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, flags);
+			kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, SLAB_USERCOPY | flags);
 	}
 
 	/* Kmalloc array is now usable */
@@ -495,6 +517,23 @@ void __init create_kmalloc_caches(unsign
 		}
 	}
 #endif
+
+#ifdef CONFIG_PAX_USERCOPY_SLABS
+	for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
+		struct kmem_cache *s = kmalloc_caches[i];
+
+		if (s) {
+			int size = kmalloc_size(i);
+			char *n = kasprintf(GFP_NOWAIT,
+				 "usercopy-kmalloc-%d", size);
+
+			BUG_ON(!n);
+			kmalloc_usercopy_caches[i] = create_kmalloc_cache(n,
+				size, SLAB_USERCOPY | flags);
+		}
+	}
+#endif
+
 }
 #endif /* !CONFIG_SLOB */
 
@@ -535,6 +574,9 @@ void print_slabinfo_header(struct seq_fi
 	seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
 		 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
 	seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
+	seq_puts(m, " : pax <sanitized> <not_sanitized>");
+#endif
 #endif
 	seq_putc(m, '\n');
 }
diff -ruNp linux-3.13.11/mm/slab_vs.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/slab_vs.h
--- linux-3.13.11/mm/slab_vs.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/slab_vs.h	2014-07-09 12:00:15.000000000
+0200
@@ -0,0 +1,29 @@
+
+#include <linux/vserver/context.h>
+
+#include <linux/vs_context.h>
+
+static inline
+void vx_slab_alloc(struct kmem_cache *cachep, gfp_t flags)
+{
+	int what = gfp_zone(cachep->allocflags);
+	struct vx_info *vxi = current_vx_info();
+
+	if (!vxi)
+		return;
+
+	atomic_add(cachep->size, &vxi->cacct.slab[what]);
+}
+
+static inline
+void vx_slab_free(struct kmem_cache *cachep)
+{
+	int what = gfp_zone(cachep->allocflags);
+	struct vx_info *vxi = current_vx_info();
+
+	if (!vxi)
+		return;
+
+	atomic_sub(cachep->size, &vxi->cacct.slab[what]);
+}
+
diff -ruNp linux-3.13.11/mm/slob.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/slob.c
--- linux-3.13.11/mm/slob.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/slob.c	2014-07-09 12:00:15.000000000
+0200
@@ -157,7 +157,7 @@ static void set_slob(slob_t *s, slobidx_
 /*
  * Return the size of a slob block.
  */
-static slobidx_t slob_units(slob_t *s)
+static slobidx_t slob_units(const slob_t *s)
 {
 	if (s->units > 0)
 		return s->units;
@@ -167,7 +167,7 @@ static slobidx_t slob_units(slob_t *s)
 /*
  * Return the next free slob block pointer after this one.
  */
-static slob_t *slob_next(slob_t *s)
+static slob_t *slob_next(const slob_t *s)
 {
 	slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
 	slobidx_t next;
@@ -182,14 +182,14 @@ static slob_t *slob_next(slob_t *s)
 /*
  * Returns true if s is the last free block in its page.
  */
-static int slob_last(slob_t *s)
+static int slob_last(const slob_t *s)
 {
 	return !((unsigned long)slob_next(s) & ~PAGE_MASK);
 }
 
-static void *slob_new_pages(gfp_t gfp, int order, int node)
+static struct page *slob_new_pages(gfp_t gfp, unsigned int order, int node)
 {
-	void *page;
+	struct page *page;
 
 #ifdef CONFIG_NUMA
 	if (node != NUMA_NO_NODE)
@@ -201,14 +201,18 @@ static void *slob_new_pages(gfp_t gfp, i
 	if (!page)
 		return NULL;
 
-	return page_address(page);
+	__SetPageSlab(page);
+	return page;
 }
 
-static void slob_free_pages(void *b, int order)
+static void slob_free_pages(struct page *sp, int order)
 {
 	if (current->reclaim_state)
 		current->reclaim_state->reclaimed_slab += 1 << order;
-	free_pages((unsigned long)b, order);
+	__ClearPageSlab(sp);
+	page_mapcount_reset(sp);
+	sp->private = 0;
+	__free_pages(sp, order);
 }
 
 /*
@@ -313,15 +317,15 @@ static void *slob_alloc(size_t size, gfp
 
 	/* Not enough space: must allocate a new page */
 	if (!b) {
-		b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
-		if (!b)
+		sp = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
+		if (!sp)
 			return NULL;
-		sp = virt_to_page(b);
-		__SetPageSlab(sp);
+		b = page_address(sp);
 
 		spin_lock_irqsave(&slob_lock, flags);
 		sp->units = SLOB_UNITS(PAGE_SIZE);
 		sp->freelist = b;
+		sp->private = 0;
 		INIT_LIST_HEAD(&sp->list);
 		set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
 		set_slob_page_free(sp, slob_list);
@@ -359,12 +363,15 @@ static void slob_free(void *block, int s
 		if (slob_page_free(sp))
 			clear_slob_page_free(sp);
 		spin_unlock_irqrestore(&slob_lock, flags);
-		__ClearPageSlab(sp);
-		page_mapcount_reset(sp);
-		slob_free_pages(b, 0);
+		slob_free_pages(sp, 0);
 		return;
 	}
 
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
+	if (pax_sanitize_slab)
+		memset(block, PAX_MEMORY_SANITIZE_VALUE, size);
+#endif
+
 	if (!slob_page_free(sp)) {
 		/* This slob page is about to become partially free. Easy! */
 		sp->units = units;
@@ -424,11 +431,10 @@ out:
  */
 
 static __always_inline void *
-__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
+__do_kmalloc_node_align(size_t size, gfp_t gfp, int node, unsigned long caller, int
align)
 {
-	unsigned int *m;
-	int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
-	void *ret;
+	slob_t *m;
+	void *ret = NULL;
 
 	gfp &= gfp_allowed_mask;
 
@@ -442,23 +448,41 @@ __do_kmalloc_node(size_t size, gfp_t gfp
 
 		if (!m)
 			return NULL;
-		*m = size;
+		BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
+		BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
+		m[0].units = size;
+		m[1].units = align;
 		ret = (void *)m + align;
 
 		trace_kmalloc_node(caller, ret,
 				   size, size + align, gfp, node);
 	} else {
 		unsigned int order = get_order(size);
+		struct page *page;
 
 		if (likely(order))
 			gfp |= __GFP_COMP;
-		ret = slob_new_pages(gfp, order, node);
+		page = slob_new_pages(gfp, order, node);
+		if (page) {
+			ret = page_address(page);
+			page->private = size;
+		}
 
 		trace_kmalloc_node(caller, ret,
 				   size, PAGE_SIZE << order, gfp, node);
 	}
 
-	kmemleak_alloc(ret, size, 1, gfp);
+	return ret;
+}
+
+static __always_inline void *
+__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
+{
+	int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
+	void *ret = __do_kmalloc_node_align(size, gfp, node, caller, align);
+
+	if (!ZERO_OR_NULL_PTR(ret))
+		kmemleak_alloc(ret, size, 1, gfp);
 	return ret;
 }
 
@@ -493,34 +517,112 @@ void kfree(const void *block)
 		return;
 	kmemleak_free(block);
 
+	VM_BUG_ON(!virt_addr_valid(block));
 	sp = virt_to_page(block);
-	if (PageSlab(sp)) {
+	VM_BUG_ON(!PageSlab(sp));
+	if (!sp->private) {
 		int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
-		unsigned int *m = (unsigned int *)(block - align);
-		slob_free(m, *m + align);
-	} else
+		slob_t *m = (slob_t *)(block - align);
+		slob_free(m, m[0].units + align);
+	} else {
+		__ClearPageSlab(sp);
+		page_mapcount_reset(sp);
+		sp->private = 0;
 		__free_pages(sp, compound_order(sp));
+	}
 }
 EXPORT_SYMBOL(kfree);
 
+bool is_usercopy_object(const void *ptr)
+{
+	if (!slab_is_available())
+		return false;
+
+	// PAX: TODO
+
+	return false;
+}
+
+#ifdef CONFIG_PAX_USERCOPY
+const char *check_heap_object(const void *ptr, unsigned long n)
+{
+	struct page *page;
+	const slob_t *free;
+	const void *base;
+	unsigned long flags;
+
+	if (ZERO_OR_NULL_PTR(ptr))
+		return "<null>";
+
+	if (!virt_addr_valid(ptr))
+		return NULL;
+
+	page = virt_to_head_page(ptr);
+	if (!PageSlab(page))
+		return NULL;
+
+	if (page->private) {
+		base = page;
+		if (base <= ptr && n <= page->private - (ptr - base))
+			return NULL;
+		return "<slob>";
+	}
+
+	/* some tricky double walking to find the chunk */
+	spin_lock_irqsave(&slob_lock, flags);
+	base = (void *)((unsigned long)ptr & PAGE_MASK);
+	free = page->freelist;
+
+	while (!slob_last(free) && (void *)free <= ptr) {
+		base = free + slob_units(free);
+		free = slob_next(free);
+	}
+
+	while (base < (void *)free) {
+		slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
+		int size = SLOB_UNIT * SLOB_UNITS(m + align);
+		int offset;
+
+		if (ptr < base + align)
+			break;
+
+		offset = ptr - base - align;
+		if (offset >= m) {
+			base += size;
+			continue;
+		}
+
+		if (n > m - offset)
+			break;
+
+		spin_unlock_irqrestore(&slob_lock, flags);
+		return NULL;
+	}
+
+	spin_unlock_irqrestore(&slob_lock, flags);
+	return "<slob>";
+}
+#endif
+
 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
 size_t ksize(const void *block)
 {
 	struct page *sp;
 	int align;
-	unsigned int *m;
+	slob_t *m;
 
 	BUG_ON(!block);
 	if (unlikely(block == ZERO_SIZE_PTR))
 		return 0;
 
 	sp = virt_to_page(block);
-	if (unlikely(!PageSlab(sp)))
-		return PAGE_SIZE << compound_order(sp);
+	VM_BUG_ON(!PageSlab(sp));
+	if (sp->private)
+		return sp->private;
 
 	align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
-	m = (unsigned int *)(block - align);
-	return SLOB_UNITS(*m) * SLOB_UNIT;
+	m = (slob_t *)(block - align);
+	return SLOB_UNITS(m[0].units) * SLOB_UNIT;
 }
 EXPORT_SYMBOL(ksize);
 
@@ -536,23 +638,33 @@ int __kmem_cache_create(struct kmem_cach
 
 void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
 {
-	void *b;
+	void *b = NULL;
 
 	flags &= gfp_allowed_mask;
 
 	lockdep_trace_alloc(flags);
 
+#ifdef CONFIG_PAX_USERCOPY_SLABS
+	b = __do_kmalloc_node_align(c->size, flags, node, _RET_IP_, c->align);
+#else
 	if (c->size < PAGE_SIZE) {
 		b = slob_alloc(c->size, flags, c->align, node);
 		trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
 					    SLOB_UNITS(c->size) * SLOB_UNIT,
 					    flags, node);
 	} else {
-		b = slob_new_pages(flags, get_order(c->size), node);
+		struct page *sp;
+
+		sp = slob_new_pages(flags, get_order(c->size), node);
+		if (sp) {
+			b = page_address(sp);
+			sp->private = c->size;
+		}
 		trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
 					    PAGE_SIZE << get_order(c->size),
 					    flags, node);
 	}
+#endif
 
 	if (b && c->ctor)
 		c->ctor(b);
@@ -584,10 +696,14 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
 
 static void __kmem_cache_free(void *b, int size)
 {
-	if (size < PAGE_SIZE)
+	struct page *sp;
+
+	sp = virt_to_page(b);
+	BUG_ON(!PageSlab(sp));
+	if (!sp->private)
 		slob_free(b, size);
 	else
-		slob_free_pages(b, get_order(size));
+		slob_free_pages(sp, get_order(size));
 }
 
 static void kmem_rcu_free(struct rcu_head *head)
@@ -600,17 +716,31 @@ static void kmem_rcu_free(struct rcu_hea
 
 void kmem_cache_free(struct kmem_cache *c, void *b)
 {
+	int size = c->size;
+
+#ifdef CONFIG_PAX_USERCOPY_SLABS
+	if (size + c->align < PAGE_SIZE) {
+		size += c->align;
+		b -= c->align;
+	}
+#endif
+
 	kmemleak_free_recursive(b, c->flags);
 	if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
 		struct slob_rcu *slob_rcu;
-		slob_rcu = b + (c->size - sizeof(struct slob_rcu));
-		slob_rcu->size = c->size;
+		slob_rcu = b + (size - sizeof(struct slob_rcu));
+		slob_rcu->size = size;
 		call_rcu(&slob_rcu->head, kmem_rcu_free);
 	} else {
-		__kmem_cache_free(b, c->size);
+		__kmem_cache_free(b, size);
 	}
 
+#ifdef CONFIG_PAX_USERCOPY_SLABS
+	trace_kfree(_RET_IP_, b);
+#else
 	trace_kmem_cache_free(_RET_IP_, b);
+#endif
+
 }
 EXPORT_SYMBOL(kmem_cache_free);
 
diff -ruNp linux-3.13.11/mm/slub.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/slub.c
--- linux-3.13.11/mm/slub.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/slub.c	2014-07-09 12:00:15.000000000
+0200
@@ -207,7 +207,7 @@ struct track {
 
 enum track_item { TRACK_ALLOC, TRACK_FREE };
 
-#ifdef CONFIG_SYSFS
+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
 static int sysfs_slab_add(struct kmem_cache *);
 static int sysfs_slab_alias(struct kmem_cache *, const char *);
 static void sysfs_slab_remove(struct kmem_cache *);
@@ -530,7 +530,7 @@ static void print_track(const char *s, s
 	if (!t->addr)
 		return;
 
-	printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
+	printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
 		s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
 #ifdef CONFIG_STACKTRACE
 	{
@@ -2643,6 +2643,14 @@ static __always_inline void slab_free(st
 
 	slab_free_hook(s, x);
 
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
+	if (pax_sanitize_slab && !(s->flags & SLAB_NO_SANITIZE)) {
+		memset(x, PAX_MEMORY_SANITIZE_VALUE, s->object_size);
+		if (s->ctor)
+			s->ctor(x);
+	}
+#endif
+
 redo:
 	/*
 	 * Determine the currently cpus per cpu slab.
@@ -2710,7 +2718,7 @@ static int slub_min_objects;
  * Merge control. If this is set then no merging of slab caches will occur.
  * (Could be removed. This was introduced to pacify the merge skeptics.)
  */
-static int slub_nomerge;
+static int slub_nomerge = 1;
 
 /*
  * Calculate the order of allocation given an slab object size.
@@ -2987,6 +2995,9 @@ static int calculate_sizes(struct kmem_c
 	s->inuse = size;
 
 	if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
+		(pax_sanitize_slab && !(flags & SLAB_NO_SANITIZE)) ||
+#endif
 		s->ctor)) {
 		/*
 		 * Relocate free pointer after the object if it is not
@@ -3332,6 +3343,59 @@ void *__kmalloc_node(size_t size, gfp_t
 EXPORT_SYMBOL(__kmalloc_node);
 #endif
 
+bool is_usercopy_object(const void *ptr)
+{
+	struct page *page;
+	struct kmem_cache *s;
+
+	if (ZERO_OR_NULL_PTR(ptr))
+		return false;
+
+	if (!slab_is_available())
+		return false;
+
+	if (!virt_addr_valid(ptr))
+		return false;
+
+	page = virt_to_head_page(ptr);
+
+	if (!PageSlab(page))
+		return false;
+
+	s = page->slab_cache;
+	return s->flags & SLAB_USERCOPY;
+}
+
+#ifdef CONFIG_PAX_USERCOPY
+const char *check_heap_object(const void *ptr, unsigned long n)
+{
+	struct page *page;
+	struct kmem_cache *s;
+	unsigned long offset;
+
+	if (ZERO_OR_NULL_PTR(ptr))
+		return "<null>";
+
+	if (!virt_addr_valid(ptr))
+		return NULL;
+
+	page = virt_to_head_page(ptr);
+
+	if (!PageSlab(page))
+		return NULL;
+
+	s = page->slab_cache;
+	if (!(s->flags & SLAB_USERCOPY))
+		return s->name;
+
+	offset = (ptr - page_address(page)) % s->size;
+	if (offset <= s->object_size && n <= s->object_size - offset)
+		return NULL;
+
+	return s->name;
+}
+#endif
+
 size_t ksize(const void *object)
 {
 	struct page *page;
@@ -3360,6 +3424,7 @@ void kfree(const void *x)
 	if (unlikely(ZERO_OR_NULL_PTR(x)))
 		return;
 
+	VM_BUG_ON(!virt_addr_valid(x));
 	page = virt_to_head_page(x);
 	if (unlikely(!PageSlab(page))) {
 		BUG_ON(!PageCompound(page));
@@ -3665,7 +3730,7 @@ static int slab_unmergeable(struct kmem_
 	/*
 	 * We may have set a slab to be unmergeable during bootstrap.
 	 */
-	if (s->refcount < 0)
+	if (atomic_read(&s->refcount) < 0)
 		return 1;
 
 	return 0;
@@ -3723,7 +3788,7 @@ __kmem_cache_alias(struct mem_cgroup *me
 
 	s = find_mergeable(memcg, size, align, flags, name, ctor);
 	if (s) {
-		s->refcount++;
+		atomic_inc(&s->refcount);
 		/*
 		 * Adjust the object sizes so that we clear
 		 * the complete object on kzalloc.
@@ -3732,7 +3797,7 @@ __kmem_cache_alias(struct mem_cgroup *me
 		s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
 
 		if (sysfs_slab_alias(s, name)) {
-			s->refcount--;
+			atomic_dec(&s->refcount);
 			s = NULL;
 		}
 	}
@@ -3852,7 +3917,7 @@ void *__kmalloc_node_track_caller(size_t
 }
 #endif
 
-#ifdef CONFIG_SYSFS
+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
 static int count_inuse(struct page *page)
 {
 	return page->inuse;
@@ -4241,12 +4306,12 @@ static void resiliency_test(void)
 	validate_slab_cache(kmalloc_caches[9]);
 }
 #else
-#ifdef CONFIG_SYSFS
+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
 static void resiliency_test(void) {};
 #endif
 #endif
 
-#ifdef CONFIG_SYSFS
+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
 enum slab_stat_type {
 	SL_ALL,			/* All slabs */
 	SL_PARTIAL,		/* Only partially allocated slabs */
@@ -4492,7 +4557,7 @@ SLAB_ATTR_RO(ctor);
 
 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
 {
-	return sprintf(buf, "%d\n", s->refcount - 1);
+	return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
 }
 SLAB_ATTR_RO(aliases);
 
@@ -4580,6 +4645,14 @@ static ssize_t cache_dma_show(struct kme
 SLAB_ATTR_RO(cache_dma);
 #endif
 
+#ifdef CONFIG_PAX_USERCOPY_SLABS
+static ssize_t usercopy_show(struct kmem_cache *s, char *buf)
+{
+	return sprintf(buf, "%d\n", !!(s->flags & SLAB_USERCOPY));
+}
+SLAB_ATTR_RO(usercopy);
+#endif
+
 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
 {
 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
@@ -4914,6 +4987,9 @@ static struct attribute *slab_attrs[] =
 #ifdef CONFIG_ZONE_DMA
 	&cache_dma_attr.attr,
 #endif
+#ifdef CONFIG_PAX_USERCOPY_SLABS
+	&usercopy_attr.attr,
+#endif
 #ifdef CONFIG_NUMA
 	&remote_node_defrag_ratio_attr.attr,
 #endif
@@ -5146,6 +5222,7 @@ static char *create_unique_id(struct kme
 	return name;
 }
 
+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
 static int sysfs_slab_add(struct kmem_cache *s)
 {
 	int err;
@@ -5169,7 +5246,7 @@ static int sysfs_slab_add(struct kmem_ca
 	}
 
 	s->kobj.kset = slab_kset;
-	err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name);
+	err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
 	if (err) {
 		kobject_put(&s->kobj);
 		return err;
@@ -5203,6 +5280,7 @@ static void sysfs_slab_remove(struct kme
 	kobject_del(&s->kobj);
 	kobject_put(&s->kobj);
 }
+#endif
 
 /*
  * Need to buffer aliases during bootup until sysfs becomes
@@ -5216,6 +5294,7 @@ struct saved_alias {
 
 static struct saved_alias *alias_list;
 
+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
 {
 	struct saved_alias *al;
@@ -5238,6 +5317,7 @@ static int sysfs_slab_alias(struct kmem_
 	alias_list = al;
 	return 0;
 }
+#endif
 
 static int __init slab_sysfs_init(void)
 {
diff -ruNp linux-3.13.11/mm/sparse-vmemmap.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/sparse-vmemmap.c
--- linux-3.13.11/mm/sparse-vmemmap.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/sparse-vmemmap.c	2014-07-09 12:00:15.000000000
+0200
@@ -130,7 +130,7 @@ pud_t * __meminit vmemmap_pud_populate(p
 		void *p = vmemmap_alloc_block(PAGE_SIZE, node);
 		if (!p)
 			return NULL;
-		pud_populate(&init_mm, pud, p);
+		pud_populate_kernel(&init_mm, pud, p);
 	}
 	return pud;
 }
@@ -142,7 +142,7 @@ pgd_t * __meminit vmemmap_pgd_populate(u
 		void *p = vmemmap_alloc_block(PAGE_SIZE, node);
 		if (!p)
 			return NULL;
-		pgd_populate(&init_mm, pgd, p);
+		pgd_populate_kernel(&init_mm, pgd, p);
 	}
 	return pgd;
 }
diff -ruNp linux-3.13.11/mm/sparse.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/sparse.c
--- linux-3.13.11/mm/sparse.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/sparse.c	2014-07-09 12:00:15.000000000
+0200
@@ -745,7 +745,7 @@ static void clear_hwpoisoned_pages(struc
 
 	for (i = 0; i < PAGES_PER_SECTION; i++) {
 		if (PageHWPoison(&memmap[i])) {
-			atomic_long_sub(1, &num_poisoned_pages);
+			atomic_long_sub_unchecked(1, &num_poisoned_pages);
 			ClearPageHWPoison(&memmap[i]);
 		}
 	}
diff -ruNp linux-3.13.11/mm/swap.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/swap.c
--- linux-3.13.11/mm/swap.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/swap.c	2014-07-09 12:00:15.000000000
+0200
@@ -77,6 +77,8 @@ static void __put_compound_page(struct p
 
 	__page_cache_release(page);
 	dtor = get_compound_page_dtor(page);
+	if (!PageHuge(page))
+		BUG_ON(dtor != free_compound_page);
 	(*dtor)(page);
 }
 
diff -ruNp linux-3.13.11/mm/swapfile.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/swapfile.c
--- linux-3.13.11/mm/swapfile.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/swapfile.c	2014-07-09 12:00:15.000000000
+0200
@@ -39,6 +39,7 @@
 #include <asm/tlbflush.h>
 #include <linux/swapops.h>
 #include <linux/page_cgroup.h>
+#include <linux/vs_base.h>
 
 static bool swap_count_continued(struct swap_info_struct *, pgoff_t,
 				 unsigned char);
@@ -66,7 +67,7 @@ static DEFINE_MUTEX(swapon_mutex);
 
 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
 /* Activity counter to indicate that a swapon or swapoff has occurred */
-static atomic_t proc_poll_event = ATOMIC_INIT(0);
+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
 
 static inline unsigned char swap_count(unsigned char ent)
 {
@@ -1958,7 +1959,7 @@ SYSCALL_DEFINE1(swapoff, const char __us
 	spin_unlock(&swap_lock);
 
 	err = 0;
-	atomic_inc(&proc_poll_event);
+	atomic_inc_unchecked(&proc_poll_event);
 	wake_up_interruptible(&proc_poll_wait);
 
 out_dput:
@@ -1975,8 +1976,8 @@ static unsigned swaps_poll(struct file *
 
 	poll_wait(file, &proc_poll_wait, wait);
 
-	if (seq->poll_event != atomic_read(&proc_poll_event)) {
-		seq->poll_event = atomic_read(&proc_poll_event);
+	if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
+		seq->poll_event = atomic_read_unchecked(&proc_poll_event);
 		return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
 	}
 
@@ -2042,6 +2043,16 @@ static int swap_show(struct seq_file *sw
 
 	if (si == SEQ_START_TOKEN) {
 		seq_puts(swap,"Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n");
+		if (vx_flags(VXF_VIRT_MEM, 0)) {
+			struct sysinfo si;
+
+			vx_vsi_swapinfo(&si);
+			if (si.totalswap < (1 << 10))
+				return 0;
+			seq_printf(swap, "%s\t\t\t\t\t%s\t%lu\t%lu\t%d\n",
+				"hdv0", "partition", si.totalswap >> 10,
+				(si.totalswap - si.freeswap) >> 10, -1);
+		}
 		return 0;
 	}
 
@@ -2074,7 +2085,7 @@ static int swaps_open(struct inode *inod
 		return ret;
 
 	seq = file->private_data;
-	seq->poll_event = atomic_read(&proc_poll_event);
+	seq->poll_event = atomic_read_unchecked(&proc_poll_event);
 	return 0;
 }
 
@@ -2533,7 +2544,7 @@ SYSCALL_DEFINE2(swapon, const char __use
 		(frontswap_map) ? "FS" : "");
 
 	mutex_unlock(&swapon_mutex);
-	atomic_inc(&proc_poll_event);
+	atomic_inc_unchecked(&proc_poll_event);
 	wake_up_interruptible(&proc_poll_wait);
 
 	if (S_ISREG(inode->i_mode))
@@ -2589,6 +2600,8 @@ void si_swapinfo(struct sysinfo *val)
 	val->freeswap = atomic_long_read(&nr_swap_pages) + nr_to_be_unused;
 	val->totalswap = total_swap_pages + nr_to_be_unused;
 	spin_unlock(&swap_lock);
+	if (vx_flags(VXF_VIRT_MEM, 0))
+		vx_vsi_swapinfo(val);
 }
 
 /*
diff -ruNp linux-3.13.11/mm/util.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/util.c
--- linux-3.13.11/mm/util.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/util.c	2014-07-09 12:00:15.000000000
+0200
@@ -297,6 +297,12 @@ done:
 void arch_pick_mmap_layout(struct mm_struct *mm)
 {
 	mm->mmap_base = TASK_UNMAPPED_BASE;
+
+#ifdef CONFIG_PAX_RANDMMAP
+	if (mm->pax_flags & MF_PAX_RANDMMAP)
+		mm->mmap_base += mm->delta_mmap;
+#endif
+
 	mm->get_unmapped_area = arch_get_unmapped_area;
 }
 #endif
diff -ruNp linux-3.13.11/mm/vmalloc.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/vmalloc.c
--- linux-3.13.11/mm/vmalloc.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/vmalloc.c	2014-07-09 12:00:15.000000000
+0200
@@ -59,8 +59,19 @@ static void vunmap_pte_range(pmd_t *pmd,
 
 	pte = pte_offset_kernel(pmd, addr);
 	do {
-		pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
-		WARN_ON(!pte_none(ptent) && !pte_present(ptent));
+
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
+		if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END)
{
+			BUG_ON(!pte_exec(*pte));
+			set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
+			continue;
+		}
+#endif
+
+		{
+			pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
+			WARN_ON(!pte_none(ptent) && !pte_present(ptent));
+		}
 	} while (pte++, addr += PAGE_SIZE, addr != end);
 }
 
@@ -120,16 +131,29 @@ static int vmap_pte_range(pmd_t *pmd, un
 	pte = pte_alloc_kernel(pmd, addr);
 	if (!pte)
 		return -ENOMEM;
+
+	pax_open_kernel();
 	do {
 		struct page *page = pages[*nr];
 
-		if (WARN_ON(!pte_none(*pte)))
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
+		if (pgprot_val(prot) & _PAGE_NX)
+#endif
+
+		if (!pte_none(*pte)) {
+			pax_close_kernel();
+			WARN_ON(1);
 			return -EBUSY;
-		if (WARN_ON(!page))
+		}
+		if (!page) {
+			pax_close_kernel();
+			WARN_ON(1);
 			return -ENOMEM;
+		}
 		set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
 		(*nr)++;
 	} while (pte++, addr += PAGE_SIZE, addr != end);
+	pax_close_kernel();
 	return 0;
 }
 
@@ -139,7 +163,7 @@ static int vmap_pmd_range(pud_t *pud, un
 	pmd_t *pmd;
 	unsigned long next;
 
-	pmd = pmd_alloc(&init_mm, pud, addr);
+	pmd = pmd_alloc_kernel(&init_mm, pud, addr);
 	if (!pmd)
 		return -ENOMEM;
 	do {
@@ -156,7 +180,7 @@ static int vmap_pud_range(pgd_t *pgd, un
 	pud_t *pud;
 	unsigned long next;
 
-	pud = pud_alloc(&init_mm, pgd, addr);
+	pud = pud_alloc_kernel(&init_mm, pgd, addr);
 	if (!pud)
 		return -ENOMEM;
 	do {
@@ -216,6 +240,12 @@ int is_vmalloc_or_module_addr(const void
 	if (addr >= MODULES_VADDR && addr < MODULES_END)
 		return 1;
 #endif
+
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
+	if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
+		return 1;
+#endif
+
 	return is_vmalloc_addr(x);
 }
 
@@ -236,8 +266,14 @@ struct page *vmalloc_to_page(const void
 
 	if (!pgd_none(*pgd)) {
 		pud_t *pud = pud_offset(pgd, addr);
+#ifdef CONFIG_X86
+		if (!pud_large(*pud))
+#endif
 		if (!pud_none(*pud)) {
 			pmd_t *pmd = pmd_offset(pud, addr);
+#ifdef CONFIG_X86
+			if (!pmd_large(*pmd))
+#endif
 			if (!pmd_none(*pmd)) {
 				pte_t *ptep, pte;
 
@@ -1309,6 +1345,16 @@ static struct vm_struct *__get_vm_area_n
 	struct vm_struct *area;
 
 	BUG_ON(in_interrupt());
+
+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
+	if (flags & VM_KERNEXEC) {
+		if (start != VMALLOC_START || end != VMALLOC_END)
+			return NULL;
+		start = (unsigned long)MODULES_EXEC_VADDR;
+		end = (unsigned long)MODULES_EXEC_END;
+	}
+#endif
+
 	if (flags & VM_IOREMAP)
 		align = 1ul << clamp(fls(size), PAGE_SHIFT, IOREMAP_MAX_ORDER);
 
@@ -1534,6 +1580,11 @@ void *vmap(struct page **pages, unsigned
 	if (count > totalram_pages)
 		return NULL;
 
+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
+	if (!(pgprot_val(prot) & _PAGE_NX))
+		flags |= VM_KERNEXEC;
+#endif
+
 	area = get_vm_area_caller((count << PAGE_SHIFT), flags,
 					__builtin_return_address(0));
 	if (!area)
@@ -1634,6 +1685,13 @@ void *__vmalloc_node_range(unsigned long
 	if (!size || (size >> PAGE_SHIFT) > totalram_pages)
 		goto fail;
 
+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
+	if (!(pgprot_val(prot) & _PAGE_NX))
+		area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED | VM_KERNEXEC,
+					  VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
+	else
+#endif
+
 	area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED,
 				  start, end, node, gfp_mask, caller);
 	if (!area)
@@ -1810,10 +1868,9 @@ EXPORT_SYMBOL(vzalloc_node);
  *	For tight control over page level allocator and protection flags
  *	use __vmalloc() instead.
  */
-
 void *vmalloc_exec(unsigned long size)
 {
-	return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
+	return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
 			      NUMA_NO_NODE, __builtin_return_address(0));
 }
 
@@ -2120,6 +2177,8 @@ int remap_vmalloc_range_partial(struct v
 {
 	struct vm_struct *area;
 
+	BUG_ON(vma->vm_mirror);
+
 	size = PAGE_ALIGN(size);
 
 	if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
@@ -2602,7 +2661,11 @@ static int s_show(struct seq_file *m, vo
 		v->addr, v->addr + v->size, v->size);
 
 	if (v->caller)
+#ifdef CONFIG_GRKERNSEC_HIDESYM
+		seq_printf(m, " %pK", v->caller);
+#else
 		seq_printf(m, " %pS", v->caller);
+#endif
 
 	if (v->nr_pages)
 		seq_printf(m, " pages=%d", v->nr_pages);
diff -ruNp linux-3.13.11/mm/vmstat.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/vmstat.c
--- linux-3.13.11/mm/vmstat.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/mm/vmstat.c	2014-07-09 12:00:15.000000000
+0200
@@ -20,6 +20,7 @@
 #include <linux/writeback.h>
 #include <linux/compaction.h>
 #include <linux/mm_inline.h>
+#include <linux/grsecurity.h>
 
 #include "internal.h"
 
@@ -79,7 +80,7 @@ void vm_events_fold_cpu(int cpu)
  *
  * vm_stat contains the global counters
  */
-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
 EXPORT_SYMBOL(vm_stat);
 
 #ifdef CONFIG_SMP
@@ -423,7 +424,7 @@ static inline void fold_diff(int *diff)
 
 	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
 		if (diff[i])
-			atomic_long_add(diff[i], &vm_stat[i]);
+			atomic_long_add_unchecked(diff[i], &vm_stat[i]);
 }
 
 /*
@@ -455,7 +456,7 @@ static void refresh_cpu_vm_stats(void)
 			v = this_cpu_xchg(p->vm_stat_diff[i], 0);
 			if (v) {
 
-				atomic_long_add(v, &zone->vm_stat[i]);
+				atomic_long_add_unchecked(v, &zone->vm_stat[i]);
 				global_diff[i] += v;
 #ifdef CONFIG_NUMA
 				/* 3 seconds idle till flush */
@@ -517,7 +518,7 @@ void cpu_vm_stats_fold(int cpu)
 
 				v = p->vm_stat_diff[i];
 				p->vm_stat_diff[i] = 0;
-				atomic_long_add(v, &zone->vm_stat[i]);
+				atomic_long_add_unchecked(v, &zone->vm_stat[i]);
 				global_diff[i] += v;
 			}
 	}
@@ -537,8 +538,8 @@ void drain_zonestat(struct zone *zone, s
 		if (pset->vm_stat_diff[i]) {
 			int v = pset->vm_stat_diff[i];
 			pset->vm_stat_diff[i] = 0;
-			atomic_long_add(v, &zone->vm_stat[i]);
-			atomic_long_add(v, &vm_stat[i]);
+			atomic_long_add_unchecked(v, &zone->vm_stat[i]);
+			atomic_long_add_unchecked(v, &vm_stat[i]);
 		}
 }
 #endif
@@ -1148,10 +1149,22 @@ static void *vmstat_start(struct seq_fil
 	stat_items_size += sizeof(struct vm_event_state);
 #endif
 
-	v = kmalloc(stat_items_size, GFP_KERNEL);
+	v = kzalloc(stat_items_size, GFP_KERNEL);
 	m->private = v;
 	if (!v)
 		return ERR_PTR(-ENOMEM);
+
+#ifdef CONFIG_GRKERNSEC_PROC_ADD
+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
+        if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
+                && !in_group_p(grsec_proc_gid)
+#endif
+        )
+		return (unsigned long *)m->private + *pos;
+#endif
+#endif
+
 	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
 		v[i] = global_page_state(i);
 	v += NR_VM_ZONE_STAT_ITEMS;
@@ -1300,10 +1313,16 @@ static int __init setup_vmstat(void)
 	put_online_cpus();
 #endif
 #ifdef CONFIG_PROC_FS
-	proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
-	proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
-	proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
-	proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
+	{
+		mode_t gr_mode = S_IRUGO;
+#ifdef CONFIG_GRKERNSEC_PROC_ADD
+		gr_mode = S_IRUSR;
+#endif
+		proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
+		proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
+		proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
+		proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
+	}
 #endif
 	return 0;
 }
diff -ruNp linux-3.13.11/net/8021q/vlan.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/8021q/vlan.c
--- linux-3.13.11/net/8021q/vlan.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/8021q/vlan.c	2014-07-09 12:00:15.000000000
+0200
@@ -474,7 +474,7 @@ out:
 	return NOTIFY_DONE;
 }
 
-static struct notifier_block vlan_notifier_block __read_mostly = {
+static struct notifier_block vlan_notifier_block = {
 	.notifier_call = vlan_device_event,
 };
 
@@ -549,8 +549,7 @@ static int vlan_ioctl_handler(struct net
 		err = -EPERM;
 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
 			break;
-		if ((args.u.name_type >= 0) &&
-		    (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
+		if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
 			struct vlan_net *vn;
 
 			vn = net_generic(net, vlan_net_id);
diff -ruNp linux-3.13.11/net/9p/client.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/9p/client.c
--- linux-3.13.11/net/9p/client.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/9p/client.c	2014-07-09 12:00:15.000000000
+0200
@@ -588,7 +588,7 @@ static int p9_check_zc_errors(struct p9_
 				       len - inline_len);
 			} else {
 				err = copy_from_user(ename + inline_len,
-						     uidata, len - inline_len);
+						     (char __force_user *)uidata, len - inline_len);
 				if (err) {
 					err = -EFAULT;
 					goto out_err;
@@ -1563,7 +1563,7 @@ p9_client_read(struct p9_fid *fid, char
 			kernel_buf = 1;
 			indata = data;
 		} else
-			indata = (__force char *)udata;
+			indata = (__force_kernel char *)udata;
 		/*
 		 * response header len is 11
 		 * PDU Header(7) + IO Size (4)
@@ -1638,7 +1638,7 @@ p9_client_write(struct p9_fid *fid, char
 			kernel_buf = 1;
 			odata = data;
 		} else
-			odata = (char *)udata;
+			odata = (char __force_kernel *)udata;
 		req = p9_client_zc_rpc(clnt, P9_TWRITE, NULL, odata, 0, rsize,
 				       P9_ZC_HDR_SZ, kernel_buf, "dqd",
 				       fid->fid, offset, rsize);
diff -ruNp linux-3.13.11/net/9p/mod.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/9p/mod.c
--- linux-3.13.11/net/9p/mod.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/9p/mod.c	2014-07-09 12:00:15.000000000
+0200
@@ -84,7 +84,7 @@ static LIST_HEAD(v9fs_trans_list);
 void v9fs_register_trans(struct p9_trans_module *m)
 {
 	spin_lock(&v9fs_trans_lock);
-	list_add_tail(&m->list, &v9fs_trans_list);
+	pax_list_add_tail((struct list_head *)&m->list, &v9fs_trans_list);
 	spin_unlock(&v9fs_trans_lock);
 }
 EXPORT_SYMBOL(v9fs_register_trans);
@@ -97,7 +97,7 @@ EXPORT_SYMBOL(v9fs_register_trans);
 void v9fs_unregister_trans(struct p9_trans_module *m)
 {
 	spin_lock(&v9fs_trans_lock);
-	list_del_init(&m->list);
+	pax_list_del_init((struct list_head *)&m->list);
 	spin_unlock(&v9fs_trans_lock);
 }
 EXPORT_SYMBOL(v9fs_unregister_trans);
diff -ruNp linux-3.13.11/net/9p/trans_fd.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/9p/trans_fd.c
--- linux-3.13.11/net/9p/trans_fd.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/9p/trans_fd.c	2014-07-09 12:00:15.000000000
+0200
@@ -432,7 +432,7 @@ static int p9_fd_write(struct p9_client
 	oldfs = get_fs();
 	set_fs(get_ds());
 	/* The cast to a user pointer is valid due to the set_fs() */
-	ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
+	ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
 	set_fs(oldfs);
 
 	if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
diff -ruNp linux-3.13.11/net/atm/atm_misc.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/atm/atm_misc.c
--- linux-3.13.11/net/atm/atm_misc.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/atm/atm_misc.c	2014-07-09 12:00:15.000000000
+0200
@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int
 	if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
 		return 1;
 	atm_return(vcc, truesize);
-	atomic_inc(&vcc->stats->rx_drop);
+	atomic_inc_unchecked(&vcc->stats->rx_drop);
 	return 0;
 }
 EXPORT_SYMBOL(atm_charge);
@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct
 		}
 	}
 	atm_return(vcc, guess);
-	atomic_inc(&vcc->stats->rx_drop);
+	atomic_inc_unchecked(&vcc->stats->rx_drop);
 	return NULL;
 }
 EXPORT_SYMBOL(atm_alloc_charge);
@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
 
 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
 {
-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
 	__SONET_ITEMS
 #undef __HANDLE_ITEM
 }
@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
 
 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
 {
-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
 	__SONET_ITEMS
 #undef __HANDLE_ITEM
 }
diff -ruNp linux-3.13.11/net/atm/lec.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/atm/lec.c
--- linux-3.13.11/net/atm/lec.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/atm/lec.c	2014-07-09 12:00:15.000000000
+0200
@@ -111,9 +111,9 @@ static inline void lec_arp_put(struct le
 }
 
 static struct lane2_ops lane2_ops = {
-	lane2_resolve,		/* resolve,             spec 3.1.3 */
-	lane2_associate_req,	/* associate_req,       spec 3.1.4 */
-	NULL			/* associate indicator, spec 3.1.5 */
+	.resolve = lane2_resolve,
+	.associate_req = lane2_associate_req,
+	.associate_indicator = NULL
 };
 
 static unsigned char bus_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
diff -ruNp linux-3.13.11/net/atm/lec.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/atm/lec.h
--- linux-3.13.11/net/atm/lec.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/atm/lec.h	2014-07-09 12:00:15.000000000
+0200
@@ -48,7 +48,7 @@ struct lane2_ops {
 			      const u8 *tlvs, u32 sizeoftlvs);
 	void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
 				     const u8 *tlvs, u32 sizeoftlvs);
-};
+} __no_const;
 
 /*
  * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
diff -ruNp linux-3.13.11/net/atm/mpoa_caches.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/atm/mpoa_caches.c
--- linux-3.13.11/net/atm/mpoa_caches.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/atm/mpoa_caches.c	2014-07-09
12:00:15.000000000 +0200
@@ -535,30 +535,30 @@ static void eg_destroy_cache(struct mpoa
 
 
 static struct in_cache_ops ingress_ops = {
-	in_cache_add_entry,               /* add_entry       */
-	in_cache_get,                     /* get             */
-	in_cache_get_with_mask,           /* get_with_mask   */
-	in_cache_get_by_vcc,              /* get_by_vcc      */
-	in_cache_put,                     /* put             */
-	in_cache_remove_entry,            /* remove_entry    */
-	cache_hit,                        /* cache_hit       */
-	clear_count_and_expired,          /* clear_count     */
-	check_resolving_entries,          /* check_resolving */
-	refresh_entries,                  /* refresh         */
-	in_destroy_cache                  /* destroy_cache   */
+	.add_entry = in_cache_add_entry,
+	.get = in_cache_get,
+	.get_with_mask = in_cache_get_with_mask,
+	.get_by_vcc = in_cache_get_by_vcc,
+	.put = in_cache_put,
+	.remove_entry = in_cache_remove_entry,
+	.cache_hit = cache_hit,
+	.clear_count = clear_count_and_expired,
+	.check_resolving = check_resolving_entries,
+	.refresh = refresh_entries,
+	.destroy_cache = in_destroy_cache
 };
 
 static struct eg_cache_ops egress_ops = {
-	eg_cache_add_entry,               /* add_entry        */
-	eg_cache_get_by_cache_id,         /* get_by_cache_id  */
-	eg_cache_get_by_tag,              /* get_by_tag       */
-	eg_cache_get_by_vcc,              /* get_by_vcc       */
-	eg_cache_get_by_src_ip,           /* get_by_src_ip    */
-	eg_cache_put,                     /* put              */
-	eg_cache_remove_entry,            /* remove_entry     */
-	update_eg_cache_entry,            /* update           */
-	clear_expired,                    /* clear_expired    */
-	eg_destroy_cache                  /* destroy_cache    */
+	.add_entry = eg_cache_add_entry,
+	.get_by_cache_id = eg_cache_get_by_cache_id,
+	.get_by_tag = eg_cache_get_by_tag,
+	.get_by_vcc = eg_cache_get_by_vcc,
+	.get_by_src_ip = eg_cache_get_by_src_ip,
+	.put = eg_cache_put,
+	.remove_entry = eg_cache_remove_entry,
+	.update = update_eg_cache_entry,
+	.clear_expired = clear_expired,
+	.destroy_cache = eg_destroy_cache
 };
 
 
diff -ruNp linux-3.13.11/net/atm/proc.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/atm/proc.c
--- linux-3.13.11/net/atm/proc.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/atm/proc.c	2014-07-09 12:00:15.000000000
+0200
@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *s
   const struct k_atm_aal_stats *stats)
 {
 	seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
-		   atomic_read(&stats->tx), atomic_read(&stats->tx_err),
-		   atomic_read(&stats->rx), atomic_read(&stats->rx_err),
-		   atomic_read(&stats->rx_drop));
+		   atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
+		   atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
+		   atomic_read_unchecked(&stats->rx_drop));
 }
 
 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
diff -ruNp linux-3.13.11/net/atm/resources.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/atm/resources.c
--- linux-3.13.11/net/atm/resources.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/atm/resources.c	2014-07-09 12:00:15.000000000
+0200
@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
 static void copy_aal_stats(struct k_atm_aal_stats *from,
     struct atm_aal_stats *to)
 {
-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
 	__AAL_STAT_ITEMS
 #undef __HANDLE_ITEM
 }
@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_
 static void subtract_aal_stats(struct k_atm_aal_stats *from,
     struct atm_aal_stats *to)
 {
-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
 	__AAL_STAT_ITEMS
 #undef __HANDLE_ITEM
 }
diff -ruNp linux-3.13.11/net/ax25/sysctl_net_ax25.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ax25/sysctl_net_ax25.c
--- linux-3.13.11/net/ax25/sysctl_net_ax25.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ax25/sysctl_net_ax25.c	2014-07-09
12:00:15.000000000 +0200
@@ -152,7 +152,7 @@ int ax25_register_dev_sysctl(ax25_dev *a
 {
 	char path[sizeof("net/ax25/") + IFNAMSIZ];
 	int k;
-	struct ctl_table *table;
+	ctl_table_no_const *table;
 
 	table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL);
 	if (!table)
diff -ruNp linux-3.13.11/net/batman-adv/bat_iv_ogm.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/batman-adv/bat_iv_ogm.c
--- linux-3.13.11/net/batman-adv/bat_iv_ogm.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/batman-adv/bat_iv_ogm.c	2014-07-09
12:00:15.000000000 +0200
@@ -307,7 +307,7 @@ static int batadv_iv_ogm_iface_enable(st
 
 	/* randomize initial seqno to avoid collision */
 	get_random_bytes(&random_seqno, sizeof(random_seqno));
-	atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
+	atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, random_seqno);
 
 	hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
 	ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
@@ -894,9 +894,9 @@ static void batadv_iv_ogm_schedule(struc
 	batadv_ogm_packet->tvlv_len = htons(tvlv_len);
 
 	/* change sequence number to network order */
-	seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno);
+	seqno = (uint32_t)atomic_read_unchecked(&hard_iface->bat_iv.ogm_seqno);
 	batadv_ogm_packet->seqno = htonl(seqno);
-	atomic_inc(&hard_iface->bat_iv.ogm_seqno);
+	atomic_inc_unchecked(&hard_iface->bat_iv.ogm_seqno);
 
 	batadv_iv_ogm_slide_own_bcast_window(hard_iface);
 	batadv_iv_ogm_queue_add(bat_priv, hard_iface->bat_iv.ogm_buff,
@@ -1261,7 +1261,7 @@ static void batadv_iv_ogm_process(const
 		return;
 
 	/* could be changed by schedule_own_packet() */
-	if_incoming_seqno = atomic_read(&if_incoming->bat_iv.ogm_seqno);
+	if_incoming_seqno = atomic_read_unchecked(&if_incoming->bat_iv.ogm_seqno);
 
 	if (batadv_ogm_packet->flags & BATADV_DIRECTLINK)
 		has_directlink_flag = 1;
diff -ruNp linux-3.13.11/net/batman-adv/fragmentation.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/batman-adv/fragmentation.c
--- linux-3.13.11/net/batman-adv/fragmentation.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/batman-adv/fragmentation.c	2014-07-09
12:00:15.000000000 +0200
@@ -447,7 +447,7 @@ bool batadv_frag_send_packet(struct sk_b
 	frag_header.packet_type = BATADV_UNICAST_FRAG;
 	frag_header.version = BATADV_COMPAT_VERSION;
 	frag_header.ttl = BATADV_TTL;
-	frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno));
+	frag_header.seqno = htons(atomic_inc_return_unchecked(&bat_priv->frag_seqno));
 	frag_header.reserved = 0;
 	frag_header.no = 0;
 	frag_header.total_size = htons(skb->len);
diff -ruNp linux-3.13.11/net/batman-adv/soft-interface.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/batman-adv/soft-interface.c
--- linux-3.13.11/net/batman-adv/soft-interface.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/batman-adv/soft-interface.c	2014-07-09
12:00:15.000000000 +0200
@@ -278,7 +278,7 @@ static int batadv_interface_tx(struct sk
 		       primary_if->net_dev->dev_addr, ETH_ALEN);
 
 		/* set broadcast sequence number */
-		seqno = atomic_inc_return(&bat_priv->bcast_seqno);
+		seqno = atomic_inc_return_unchecked(&bat_priv->bcast_seqno);
 		bcast_packet->seqno = htonl(seqno);
 
 		batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay);
@@ -688,7 +688,7 @@ static int batadv_softif_init_late(struc
 	atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
 
 	atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
-	atomic_set(&bat_priv->bcast_seqno, 1);
+	atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
 	atomic_set(&bat_priv->tt.vn, 0);
 	atomic_set(&bat_priv->tt.local_changes, 0);
 	atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
@@ -700,7 +700,7 @@ static int batadv_softif_init_late(struc
 
 	/* randomize initial seqno to avoid collision */
 	get_random_bytes(&random_seqno, sizeof(random_seqno));
-	atomic_set(&bat_priv->frag_seqno, random_seqno);
+	atomic_set_unchecked(&bat_priv->frag_seqno, random_seqno);
 
 	bat_priv->primary_if = NULL;
 	bat_priv->num_ifaces = 0;
diff -ruNp linux-3.13.11/net/batman-adv/types.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/batman-adv/types.h
--- linux-3.13.11/net/batman-adv/types.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/batman-adv/types.h	2014-07-09
12:00:15.000000000 +0200
@@ -56,7 +56,7 @@
 struct batadv_hard_iface_bat_iv {
 	unsigned char *ogm_buff;
 	int ogm_buff_len;
-	atomic_t ogm_seqno;
+	atomic_unchecked_t ogm_seqno;
 };
 
 /**
@@ -673,7 +673,7 @@ struct batadv_priv {
 	atomic_t bonding;
 	atomic_t fragmentation;
 	atomic_t packet_size_max;
-	atomic_t frag_seqno;
+	atomic_unchecked_t frag_seqno;
 #ifdef CONFIG_BATMAN_ADV_BLA
 	atomic_t bridge_loop_avoidance;
 #endif
@@ -687,7 +687,7 @@ struct batadv_priv {
 #ifdef CONFIG_BATMAN_ADV_DEBUG
 	atomic_t log_level;
 #endif
-	atomic_t bcast_seqno;
+	atomic_unchecked_t bcast_seqno;
 	atomic_t bcast_queue_left;
 	atomic_t batman_queue_left;
 	char num_ifaces;
diff -ruNp linux-3.13.11/net/bluetooth/hci_sock.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/bluetooth/hci_sock.c
--- linux-3.13.11/net/bluetooth/hci_sock.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/bluetooth/hci_sock.c	2014-07-09
12:00:15.000000000 +0200
@@ -1052,7 +1052,7 @@ static int hci_sock_setsockopt(struct so
 			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
 		}
 
-		len = min_t(unsigned int, len, sizeof(uf));
+		len = min((size_t)len, sizeof(uf));
 		if (copy_from_user(&uf, optval, len)) {
 			err = -EFAULT;
 			break;
diff -ruNp linux-3.13.11/net/bluetooth/l2cap_core.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/bluetooth/l2cap_core.c
--- linux-3.13.11/net/bluetooth/l2cap_core.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/bluetooth/l2cap_core.c	2014-07-09
12:00:15.000000000 +0200
@@ -3500,8 +3500,10 @@ static int l2cap_parse_conf_rsp(struct l
 			break;
 
 		case L2CAP_CONF_RFC:
-			if (olen == sizeof(rfc))
-				memcpy(&rfc, (void *)val, olen);
+			if (olen != sizeof(rfc))
+				break;
+
+			memcpy(&rfc, (void *)val, olen);
 
 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
 			    rfc.mode != chan->mode)
diff -ruNp linux-3.13.11/net/bluetooth/l2cap_sock.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/bluetooth/l2cap_sock.c
--- linux-3.13.11/net/bluetooth/l2cap_sock.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/bluetooth/l2cap_sock.c	2014-07-09
12:00:15.000000000 +0200
@@ -545,7 +545,8 @@ static int l2cap_sock_setsockopt_old(str
 	struct sock *sk = sock->sk;
 	struct l2cap_chan *chan = l2cap_pi(sk)->chan;
 	struct l2cap_options opts;
-	int len, err = 0;
+	int err = 0;
+	size_t len = optlen;
 	u32 opt;
 
 	BT_DBG("sk %p", sk);
@@ -567,7 +568,7 @@ static int l2cap_sock_setsockopt_old(str
 		opts.max_tx   = chan->max_tx;
 		opts.txwin_size = chan->tx_win;
 
-		len = min_t(unsigned int, sizeof(opts), optlen);
+		len = min(sizeof(opts), len);
 		if (copy_from_user((char *) &opts, optval, len)) {
 			err = -EFAULT;
 			break;
@@ -647,7 +648,8 @@ static int l2cap_sock_setsockopt(struct
 	struct bt_security sec;
 	struct bt_power pwr;
 	struct l2cap_conn *conn;
-	int len, err = 0;
+	int err = 0;
+	size_t len = optlen;
 	u32 opt;
 
 	BT_DBG("sk %p", sk);
@@ -670,7 +672,7 @@ static int l2cap_sock_setsockopt(struct
 
 		sec.level = BT_SECURITY_LOW;
 
-		len = min_t(unsigned int, sizeof(sec), optlen);
+		len = min(sizeof(sec), len);
 		if (copy_from_user((char *) &sec, optval, len)) {
 			err = -EFAULT;
 			break;
@@ -770,7 +772,7 @@ static int l2cap_sock_setsockopt(struct
 
 		pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
 
-		len = min_t(unsigned int, sizeof(pwr), optlen);
+		len = min(sizeof(pwr), len);
 		if (copy_from_user((char *) &pwr, optval, len)) {
 			err = -EFAULT;
 			break;
diff -ruNp linux-3.13.11/net/bluetooth/rfcomm/sock.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/bluetooth/rfcomm/sock.c
--- linux-3.13.11/net/bluetooth/rfcomm/sock.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/bluetooth/rfcomm/sock.c	2014-07-09
12:00:15.000000000 +0200
@@ -672,7 +672,7 @@ static int rfcomm_sock_setsockopt(struct
 	struct sock *sk = sock->sk;
 	struct bt_security sec;
 	int err = 0;
-	size_t len;
+	size_t len = optlen;
 	u32 opt;
 
 	BT_DBG("sk %p", sk);
@@ -694,7 +694,7 @@ static int rfcomm_sock_setsockopt(struct
 
 		sec.level = BT_SECURITY_LOW;
 
-		len = min_t(unsigned int, sizeof(sec), optlen);
+		len = min(sizeof(sec), len);
 		if (copy_from_user((char *) &sec, optval, len)) {
 			err = -EFAULT;
 			break;
diff -ruNp linux-3.13.11/net/bluetooth/rfcomm/tty.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/bluetooth/rfcomm/tty.c
--- linux-3.13.11/net/bluetooth/rfcomm/tty.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/bluetooth/rfcomm/tty.c	2014-07-09
12:00:15.000000000 +0200
@@ -684,7 +684,7 @@ static int rfcomm_tty_open(struct tty_st
 	BT_DBG("tty %p id %d", tty, tty->index);
 
 	BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
-	       dev->channel, dev->port.count);
+	       dev->channel, atomic_read(&dev->port.count));
 
 	err = tty_port_open(&dev->port, tty, filp);
 	if (err)
@@ -707,7 +707,7 @@ static void rfcomm_tty_close(struct tty_
 	struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
 
 	BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
-						dev->port.count);
+						atomic_read(&dev->port.count));
 
 	tty_port_close(&dev->port, tty, filp);
 }
diff -ruNp linux-3.13.11/net/bridge/br_multicast.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/bridge/br_multicast.c
--- linux-3.13.11/net/bridge/br_multicast.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/bridge/br_multicast.c	2014-07-09
12:00:15.000000000 +0200
@@ -447,7 +447,7 @@ static struct sk_buff *br_ip6_multicast_
 	ip6h->hop_limit = 1;
 	ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1));
 	if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0,
-			       &ip6h->saddr)) {
+			       &ip6h->saddr, NULL)) {
 		kfree_skb(skb);
 		return NULL;
 	}
diff -ruNp linux-3.13.11/net/bridge/netfilter/ebtables.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/bridge/netfilter/ebtables.c
--- linux-3.13.11/net/bridge/netfilter/ebtables.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/bridge/netfilter/ebtables.c	2014-07-09
12:00:15.000000000 +0200
@@ -1525,7 +1525,7 @@ static int do_ebt_get_ctl(struct sock *s
 			tmp.valid_hooks = t->table->valid_hooks;
 		}
 		mutex_unlock(&ebt_mutex);
-		if (copy_to_user(user, &tmp, *len) != 0){
+		if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
 			BUGPRINT("c2u Didn't work\n");
 			ret = -EFAULT;
 			break;
@@ -2331,7 +2331,7 @@ static int compat_do_ebt_get_ctl(struct
 			goto out;
 		tmp.valid_hooks = t->valid_hooks;
 
-		if (copy_to_user(user, &tmp, *len) != 0) {
+		if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
 			ret = -EFAULT;
 			break;
 		}
@@ -2342,7 +2342,7 @@ static int compat_do_ebt_get_ctl(struct
 		tmp.entries_size = t->table->entries_size;
 		tmp.valid_hooks = t->table->valid_hooks;
 
-		if (copy_to_user(user, &tmp, *len) != 0) {
+		if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
 			ret = -EFAULT;
 			break;
 		}
diff -ruNp linux-3.13.11/net/caif/cfctrl.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/caif/cfctrl.c
--- linux-3.13.11/net/caif/cfctrl.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/caif/cfctrl.c	2014-07-09 12:00:15.000000000
+0200
@@ -10,6 +10,7 @@
 #include <linux/spinlock.h>
 #include <linux/slab.h>
 #include <linux/pkt_sched.h>
+#include <linux/sched.h>
 #include <net/caif/caif_layer.h>
 #include <net/caif/cfpkt.h>
 #include <net/caif/cfctrl.h>
@@ -43,8 +44,8 @@ struct cflayer *cfctrl_create(void)
 	memset(&dev_info, 0, sizeof(dev_info));
 	dev_info.id = 0xff;
 	cfsrvl_init(&this->serv, 0, &dev_info, false);
-	atomic_set(&this->req_seq_no, 1);
-	atomic_set(&this->rsp_seq_no, 1);
+	atomic_set_unchecked(&this->req_seq_no, 1);
+	atomic_set_unchecked(&this->rsp_seq_no, 1);
 	this->serv.layer.receive = cfctrl_recv;
 	sprintf(this->serv.layer.name, "ctrl");
 	this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
@@ -130,8 +131,8 @@ static void cfctrl_insert_req(struct cfc
 			      struct cfctrl_request_info *req)
 {
 	spin_lock_bh(&ctrl->info_list_lock);
-	atomic_inc(&ctrl->req_seq_no);
-	req->sequence_no = atomic_read(&ctrl->req_seq_no);
+	atomic_inc_unchecked(&ctrl->req_seq_no);
+	req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
 	list_add_tail(&req->list, &ctrl->list);
 	spin_unlock_bh(&ctrl->info_list_lock);
 }
@@ -149,7 +150,7 @@ static struct cfctrl_request_info *cfctr
 			if (p != first)
 				pr_warn("Requests are not received in order\n");
 
-			atomic_set(&ctrl->rsp_seq_no,
+			atomic_set_unchecked(&ctrl->rsp_seq_no,
 					 p->sequence_no);
 			list_del(&p->list);
 			goto out;
diff -ruNp linux-3.13.11/net/can/af_can.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/can/af_can.c
--- linux-3.13.11/net/can/af_can.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/can/af_can.c	2014-07-09 12:00:15.000000000
+0200
@@ -863,7 +863,7 @@ static const struct net_proto_family can
 };
 
 /* notifier block for netdevice event */
-static struct notifier_block can_netdev_notifier __read_mostly = {
+static struct notifier_block can_netdev_notifier = {
 	.notifier_call = can_notifier,
 };
 
diff -ruNp linux-3.13.11/net/can/gw.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/can/gw.c
--- linux-3.13.11/net/can/gw.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/can/gw.c	2014-07-09 12:00:15.000000000
+0200
@@ -80,7 +80,6 @@ MODULE_PARM_DESC(max_hops,
 		 "default: " __stringify(CGW_DEFAULT_HOPS) ")");
 
 static HLIST_HEAD(cgw_list);
-static struct notifier_block notifier;
 
 static struct kmem_cache *cgw_cache __read_mostly;
 
@@ -954,6 +953,10 @@ static int cgw_remove_job(struct sk_buff
 	return err;
 }
 
+static struct notifier_block notifier = {
+	.notifier_call = cgw_notifier
+};
+
 static __init int cgw_module_init(void)
 {
 	/* sanitize given module parameter */
@@ -969,7 +972,6 @@ static __init int cgw_module_init(void)
 		return -ENOMEM;
 
 	/* set notifier */
-	notifier.notifier_call = cgw_notifier;
 	register_netdevice_notifier(&notifier);
 
 	if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
diff -ruNp linux-3.13.11/net/ceph/messenger.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ceph/messenger.c
--- linux-3.13.11/net/ceph/messenger.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ceph/messenger.c	2014-07-09 12:00:15.000000000
+0200
@@ -186,7 +186,7 @@ static void con_fault(struct ceph_connec
 #define MAX_ADDR_STR_LEN	64	/* 54 is enough */
 
 static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN];
-static atomic_t addr_str_seq = ATOMIC_INIT(0);
+static atomic_unchecked_t addr_str_seq = ATOMIC_INIT(0);
 
 static struct page *zero_page;		/* used in certain error cases */
 
@@ -197,7 +197,7 @@ const char *ceph_pr_addr(const struct so
 	struct sockaddr_in *in4 = (struct sockaddr_in *) ss;
 	struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss;
 
-	i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK;
+	i = atomic_inc_return_unchecked(&addr_str_seq) & ADDR_STR_COUNT_MASK;
 	s = addr_str[i];
 
 	switch (ss->ss_family) {
diff -ruNp linux-3.13.11/net/compat.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/compat.c
--- linux-3.13.11/net/compat.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/compat.c	2014-07-09 12:00:15.000000000
+0200
@@ -73,9 +73,9 @@ int get_compat_msghdr(struct msghdr *kms
 		return -EFAULT;
 	if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
 		kmsg->msg_namelen = sizeof(struct sockaddr_storage);
-	kmsg->msg_name = compat_ptr(tmp1);
-	kmsg->msg_iov = compat_ptr(tmp2);
-	kmsg->msg_control = compat_ptr(tmp3);
+	kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
+	kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
+	kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
 	return 0;
 }
 
@@ -87,7 +87,7 @@ int verify_compat_iovec(struct msghdr *k
 
 	if (kern_msg->msg_namelen) {
 		if (mode == VERIFY_READ) {
-			int err = move_addr_to_kernel(kern_msg->msg_name,
+			int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
 						      kern_msg->msg_namelen,
 						      kern_address);
 			if (err < 0)
@@ -99,7 +99,7 @@ int verify_compat_iovec(struct msghdr *k
 		kern_msg->msg_name = NULL;
 
 	tot_len = iov_from_user_compat_to_kern(kern_iov,
-					  (struct compat_iovec __user *)kern_msg->msg_iov,
+					  (struct compat_iovec __force_user *)kern_msg->msg_iov,
 					  kern_msg->msg_iovlen);
 	if (tot_len >= 0)
 		kern_msg->msg_iov = kern_iov;
@@ -119,20 +119,20 @@ int verify_compat_iovec(struct msghdr *k
 
 #define CMSG_COMPAT_FIRSTHDR(msg)			\
 	(((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ?	\
-	 (struct compat_cmsghdr __user *)((msg)->msg_control) :		\
+	 (struct compat_cmsghdr __force_user *)((msg)->msg_control) :		\
 	 (struct compat_cmsghdr __user *)NULL)
 
 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
 	((ucmlen) >= sizeof(struct compat_cmsghdr) && \
 	 (ucmlen) <= (unsigned long) \
 	 ((mhdr)->msg_controllen - \
-	  ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
+	  ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
 
 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
 		struct compat_cmsghdr __user *cmsg, int cmsg_len)
 {
 	char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
-	if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
+	if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
 			msg->msg_controllen)
 		return NULL;
 	return (struct compat_cmsghdr __user *)ptr;
@@ -222,7 +222,7 @@ Efault:
 
 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
 {
-	struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
+	struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
 	struct compat_cmsghdr cmhdr;
 	struct compat_timeval ctv;
 	struct compat_timespec cts[3];
@@ -278,7 +278,7 @@ int put_cmsg_compat(struct msghdr *kmsg,
 
 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
 {
-	struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
+	struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
 	int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
 	int fdnum = scm->fp->count;
 	struct file **fp = scm->fp->fp;
@@ -366,7 +366,7 @@ static int do_set_sock_timeout(struct so
 		return -EFAULT;
 	old_fs = get_fs();
 	set_fs(KERNEL_DS);
-	err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
+	err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
 	set_fs(old_fs);
 
 	return err;
@@ -427,7 +427,7 @@ static int do_get_sock_timeout(struct so
 	len = sizeof(ktime);
 	old_fs = get_fs();
 	set_fs(KERNEL_DS);
-	err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
+	err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user
*)&len);
 	set_fs(old_fs);
 
 	if (!err) {
@@ -570,7 +570,7 @@ int compat_mc_setsockopt(struct sock *so
 	case MCAST_JOIN_GROUP:
 	case MCAST_LEAVE_GROUP:
 	{
-		struct compat_group_req __user *gr32 = (void *)optval;
+		struct compat_group_req __user *gr32 = (void __user *)optval;
 		struct group_req __user *kgr =
 			compat_alloc_user_space(sizeof(struct group_req));
 		u32 interface;
@@ -591,7 +591,7 @@ int compat_mc_setsockopt(struct sock *so
 	case MCAST_BLOCK_SOURCE:
 	case MCAST_UNBLOCK_SOURCE:
 	{
-		struct compat_group_source_req __user *gsr32 = (void *)optval;
+		struct compat_group_source_req __user *gsr32 = (void __user *)optval;
 		struct group_source_req __user *kgsr = compat_alloc_user_space(
 			sizeof(struct group_source_req));
 		u32 interface;
@@ -612,7 +612,7 @@ int compat_mc_setsockopt(struct sock *so
 	}
 	case MCAST_MSFILTER:
 	{
-		struct compat_group_filter __user *gf32 = (void *)optval;
+		struct compat_group_filter __user *gf32 = (void __user *)optval;
 		struct group_filter __user *kgf;
 		u32 interface, fmode, numsrc;
 
@@ -650,7 +650,7 @@ int compat_mc_getsockopt(struct sock *so
 	char __user *optval, int __user *optlen,
 	int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
 {
-	struct compat_group_filter __user *gf32 = (void *)optval;
+	struct compat_group_filter __user *gf32 = (void __user *)optval;
 	struct group_filter __user *kgf;
 	int __user	*koptlen;
 	u32 interface, fmode, numsrc;
@@ -803,7 +803,7 @@ asmlinkage long compat_sys_socketcall(in
 
 	if (call < SYS_SOCKET || call > SYS_SENDMMSG)
 		return -EINVAL;
-	if (copy_from_user(a, args, nas[call]))
+	if (nas[call] > sizeof a || copy_from_user(a, args, nas[call]))
 		return -EFAULT;
 	a0 = a[0];
 	a1 = a[1];
diff -ruNp linux-3.13.11/net/core/datagram.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/core/datagram.c
--- linux-3.13.11/net/core/datagram.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/core/datagram.c	2014-07-09 12:00:15.000000000
+0200
@@ -301,7 +301,7 @@ int skb_kill_datagram(struct sock *sk, s
 	}
 
 	kfree_skb(skb);
-	atomic_inc(&sk->sk_drops);
+	atomic_inc_unchecked(&sk->sk_drops);
 	sk_mem_reclaim_partial(sk);
 
 	return err;
diff -ruNp linux-3.13.11/net/core/dev.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/core/dev.c
--- linux-3.13.11/net/core/dev.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/core/dev.c	2014-07-09 12:00:15.000000000
+0200
@@ -122,6 +122,7 @@
 #include <linux/in.h>
 #include <linux/jhash.h>
 #include <linux/random.h>
+#include <linux/vs_inet.h>
 #include <trace/events/napi.h>
 #include <trace/events/net.h>
 #include <trace/events/skb.h>
@@ -669,7 +670,8 @@ struct net_device *__dev_get_by_name(str
 	struct hlist_head *head = dev_name_hash(net, name);
 
 	hlist_for_each_entry(dev, head, name_hlist)
-		if (!strncmp(dev->name, name, IFNAMSIZ))
+		if (!strncmp(dev->name, name, IFNAMSIZ) &&
+		    nx_dev_visible(current_nx_info(), dev))
 			return dev;
 
 	return NULL;
@@ -694,7 +696,8 @@ struct net_device *dev_get_by_name_rcu(s
 	struct hlist_head *head = dev_name_hash(net, name);
 
 	hlist_for_each_entry_rcu(dev, head, name_hlist)
-		if (!strncmp(dev->name, name, IFNAMSIZ))
+		if (!strncmp(dev->name, name, IFNAMSIZ) &&
+		    nx_dev_visible(current_nx_info(), dev))
 			return dev;
 
 	return NULL;
@@ -744,7 +747,8 @@ struct net_device *__dev_get_by_index(st
 	struct hlist_head *head = dev_index_hash(net, ifindex);
 
 	hlist_for_each_entry(dev, head, index_hlist)
-		if (dev->ifindex == ifindex)
+		if ((dev->ifindex == ifindex) &&
+		    nx_dev_visible(current_nx_info(), dev))
 			return dev;
 
 	return NULL;
@@ -762,7 +766,7 @@ EXPORT_SYMBOL(__dev_get_by_index);
  *	about locking. The caller must hold RCU lock.
  */
 
-struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
+struct net_device *dev_get_by_index_real_rcu(struct net *net, int ifindex)
 {
 	struct net_device *dev;
 	struct hlist_head *head = dev_index_hash(net, ifindex);
@@ -773,6 +777,16 @@ struct net_device *dev_get_by_index_rcu(
 
 	return NULL;
 }
+EXPORT_SYMBOL(dev_get_by_index_real_rcu);
+
+struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
+{
+	struct net_device *dev = dev_get_by_index_real_rcu(net, ifindex);
+
+	if (nx_dev_visible(current_nx_info(), dev))
+		return dev;
+	return NULL;
+}
 EXPORT_SYMBOL(dev_get_by_index_rcu);
 
 
@@ -855,7 +869,8 @@ struct net_device *dev_getbyhwaddr_rcu(s
 
 	for_each_netdev_rcu(net, dev)
 		if (dev->type == type &&
-		    !memcmp(dev->dev_addr, ha, dev->addr_len))
+		    !memcmp(dev->dev_addr, ha, dev->addr_len) &&
+		    nx_dev_visible(current_nx_info(), dev))
 			return dev;
 
 	return NULL;
@@ -867,9 +882,11 @@ struct net_device *__dev_getfirstbyhwtyp
 	struct net_device *dev;
 
 	ASSERT_RTNL();
-	for_each_netdev(net, dev)
-		if (dev->type == type)
+	for_each_netdev(net, dev) {
+		if ((dev->type == type) &&
+		    nx_dev_visible(current_nx_info(), dev))
 			return dev;
+	}
 
 	return NULL;
 }
@@ -881,7 +898,8 @@ struct net_device *dev_getfirstbyhwtype(
 
 	rcu_read_lock();
 	for_each_netdev_rcu(net, dev)
-		if (dev->type == type) {
+		if ((dev->type == type) &&
+		    nx_dev_visible(current_nx_info(), dev)) {
 			dev_hold(dev);
 			ret = dev;
 			break;
@@ -909,7 +927,8 @@ struct net_device *dev_get_by_flags_rcu(
 
 	ret = NULL;
 	for_each_netdev_rcu(net, dev) {
-		if (((dev->flags ^ if_flags) & mask) == 0) {
+		if ((((dev->flags ^ if_flags) & mask) == 0) &&
+			nx_dev_visible(current_nx_info(), dev)) {
 			ret = dev;
 			break;
 		}
@@ -987,6 +1006,8 @@ static int __dev_alloc_name(struct net *
 				continue;
 			if (i < 0 || i >= max_netdevices)
 				continue;
+			if (!nx_dev_visible(current_nx_info(), d))
+				continue;
 
 			/*  avoid cases where sscanf is not exact inverse of printf */
 			snprintf(buf, IFNAMSIZ, name, i);
@@ -1684,14 +1705,14 @@ int dev_forward_skb(struct net_device *d
 {
 	if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
 		if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
-			atomic_long_inc(&dev->rx_dropped);
+			atomic_long_inc_unchecked(&dev->rx_dropped);
 			kfree_skb(skb);
 			return NET_RX_DROP;
 		}
 	}
 
 	if (unlikely(!is_skb_forwardable(dev, skb))) {
-		atomic_long_inc(&dev->rx_dropped);
+		atomic_long_inc_unchecked(&dev->rx_dropped);
 		kfree_skb(skb);
 		return NET_RX_DROP;
 	}
@@ -2434,7 +2455,7 @@ static int illegal_highdma(const struct
 
 struct dev_gso_cb {
 	void (*destructor)(struct sk_buff *skb);
-};
+} __no_const;
 
 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
 
@@ -3224,7 +3245,7 @@ enqueue:
 
 	local_irq_restore(flags);
 
-	atomic_long_inc(&skb->dev->rx_dropped);
+	atomic_long_inc_unchecked(&skb->dev->rx_dropped);
 	kfree_skb(skb);
 	return NET_RX_DROP;
 }
@@ -3296,7 +3317,7 @@ int netif_rx_ni(struct sk_buff *skb)
 }
 EXPORT_SYMBOL(netif_rx_ni);
 
-static void net_tx_action(struct softirq_action *h)
+static __latent_entropy void net_tx_action(void)
 {
 	struct softnet_data *sd = &__get_cpu_var(softnet_data);
 
@@ -3630,7 +3651,7 @@ ncls:
 			ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
 	} else {
 drop:
-		atomic_long_inc(&skb->dev->rx_dropped);
+		atomic_long_inc_unchecked(&skb->dev->rx_dropped);
 		kfree_skb(skb);
 		/* Jamal, now you will not able to escape explaining
 		 * me how you were going to use this. :-)
@@ -4290,7 +4311,7 @@ void netif_napi_del(struct napi_struct *
 }
 EXPORT_SYMBOL(netif_napi_del);
 
-static void net_rx_action(struct softirq_action *h)
+static __latent_entropy void net_rx_action(void)
 {
 	struct softnet_data *sd = &__get_cpu_var(softnet_data);
 	unsigned long time_limit = jiffies + 2;
@@ -6179,7 +6200,7 @@ struct rtnl_link_stats64 *dev_get_stats(
 	} else {
 		netdev_stats_to_stats64(storage, &dev->stats);
 	}
-	storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
+	storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
 	return storage;
 }
 EXPORT_SYMBOL(dev_get_stats);
diff -ruNp linux-3.13.11/net/core/dev_ioctl.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/core/dev_ioctl.c
--- linux-3.13.11/net/core/dev_ioctl.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/core/dev_ioctl.c	2014-07-09 12:00:15.000000000
+0200
@@ -365,9 +365,13 @@ void dev_load(struct net *net, const cha
 	if (no_module && capable(CAP_NET_ADMIN))
 		no_module = request_module("netdev-%s", name);
 	if (no_module && capable(CAP_SYS_MODULE)) {
+#ifdef CONFIG_GRKERNSEC_MODHARDEN
+		___request_module(true, "grsec_modharden_netdev", "%s", name);
+#else
 		if (!request_module("%s", name))
 			pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated).
 Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
 				name);
+#endif
 	}
 }
 EXPORT_SYMBOL(dev_load);
diff -ruNp linux-3.13.11/net/core/filter.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/core/filter.c
--- linux-3.13.11/net/core/filter.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/core/filter.c	2014-07-09 12:00:15.000000000
+0200
@@ -126,7 +126,7 @@ unsigned int sk_run_filter(const struct
 	void *ptr;
 	u32 A = 0;			/* Accumulator */
 	u32 X = 0;			/* Index Register */
-	u32 mem[BPF_MEMWORDS];		/* Scratch Memory Store */
+	u32 mem[BPF_MEMWORDS] = {};	/* Scratch Memory Store */
 	u32 tmp;
 	int k;
 
@@ -292,10 +292,10 @@ load_b:
 			X = K;
 			continue;
 		case BPF_S_LD_MEM:
-			A = mem[K];
+			A = mem[K&15];
 			continue;
 		case BPF_S_LDX_MEM:
-			X = mem[K];
+			X = mem[K&15];
 			continue;
 		case BPF_S_MISC_TAX:
 			X = A;
@@ -308,10 +308,10 @@ load_b:
 		case BPF_S_RET_A:
 			return A;
 		case BPF_S_ST:
-			mem[K] = A;
+			mem[K&15] = A;
 			continue;
 		case BPF_S_STX:
-			mem[K] = X;
+			mem[K&15] = X;
 			continue;
 		case BPF_S_ANC_PROTOCOL:
 			A = ntohs(skb->protocol);
@@ -355,6 +355,10 @@ load_b:
 
 			if (skb_is_nonlinear(skb))
 				return 0;
+
+			if (skb->len < sizeof(struct nlattr))
+				return 0;
+
 			if (A > skb->len - sizeof(struct nlattr))
 				return 0;
 
@@ -371,11 +375,15 @@ load_b:
 
 			if (skb_is_nonlinear(skb))
 				return 0;
+
+			if (skb->len < sizeof(struct nlattr))
+				return 0;
+
 			if (A > skb->len - sizeof(struct nlattr))
 				return 0;
 
 			nla = (struct nlattr *)&skb->data[A];
-			if (nla->nla_len > A - skb->len)
+			if (nla->nla_len > skb->len - A)
 				return 0;
 
 			nla = nla_find_nested(nla, X);
@@ -391,9 +399,10 @@ load_b:
 			continue;
 #endif
 		default:
-			WARN_RATELIMIT(1, "Unknown code:%u jt:%u tf:%u k:%u\n",
+			WARN(1, KERN_ALERT "Unknown sock filter code:%u jt:%u tf:%u k:%u\n",
 				       fentry->code, fentry->jt,
 				       fentry->jf, fentry->k);
+			BUG();
 			return 0;
 		}
 	}
@@ -416,7 +425,7 @@ static int check_load_and_stores(struct
 	u16 *masks, memvalid = 0; /* one bit per cell, 16 cells */
 	int pc, ret = 0;
 
-	BUILD_BUG_ON(BPF_MEMWORDS > 16);
+	BUILD_BUG_ON(BPF_MEMWORDS != 16);
 	masks = kmalloc(flen * sizeof(*masks), GFP_KERNEL);
 	if (!masks)
 		return -ENOMEM;
@@ -679,7 +688,7 @@ int sk_unattached_filter_create(struct s
 	fp = kmalloc(sk_filter_size(fprog->len), GFP_KERNEL);
 	if (!fp)
 		return -ENOMEM;
-	memcpy(fp->insns, fprog->filter, fsize);
+	memcpy(fp->insns, (void __force_kernel *)fprog->filter, fsize);
 
 	atomic_set(&fp->refcnt, 1);
 	fp->len = fprog->len;
diff -ruNp linux-3.13.11/net/core/flow.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/core/flow.c
--- linux-3.13.11/net/core/flow.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/core/flow.c	2014-07-09 12:00:15.000000000
+0200
@@ -61,7 +61,7 @@ struct flow_cache {
 	struct timer_list		rnd_timer;
 };
 
-atomic_t flow_cache_genid = ATOMIC_INIT(0);
+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
 EXPORT_SYMBOL(flow_cache_genid);
 static struct flow_cache flow_cache_global;
 static struct kmem_cache *flow_cachep __read_mostly;
@@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsig
 
 static int flow_entry_valid(struct flow_cache_entry *fle)
 {
-	if (atomic_read(&flow_cache_genid) != fle->genid)
+	if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
 		return 0;
 	if (fle->object && !fle->object->ops->check(fle->object))
 		return 0;
@@ -258,7 +258,7 @@ flow_cache_lookup(struct net *net, const
 			hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
 			fcp->hash_count++;
 		}
-	} else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
+	} else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
 		flo = fle->object;
 		if (!flo)
 			goto ret_object;
@@ -279,7 +279,7 @@ nocache:
 	}
 	flo = resolver(net, key, family, dir, flo, ctx);
 	if (fle) {
-		fle->genid = atomic_read(&flow_cache_genid);
+		fle->genid = atomic_read_unchecked(&flow_cache_genid);
 		if (!IS_ERR(flo))
 			fle->object = flo;
 		else
diff -ruNp linux-3.13.11/net/core/iovec.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/core/iovec.c
--- linux-3.13.11/net/core/iovec.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/core/iovec.c	2014-07-09 12:00:15.000000000
+0200
@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struc
 	if (m->msg_namelen) {
 		if (mode == VERIFY_READ) {
 			void __user *namep;
-			namep = (void __user __force *) m->msg_name;
+			namep = (void __force_user *) m->msg_name;
 			err = move_addr_to_kernel(namep, m->msg_namelen,
 						  address);
 			if (err < 0)
@@ -55,7 +55,7 @@ int verify_iovec(struct msghdr *m, struc
 	}
 
 	size = m->msg_iovlen * sizeof(struct iovec);
-	if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
+	if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
 		return -EFAULT;
 
 	m->msg_iov = iov;
diff -ruNp linux-3.13.11/net/core/neighbour.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/core/neighbour.c
--- linux-3.13.11/net/core/neighbour.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/core/neighbour.c	2014-07-09 12:00:15.000000000
+0200
@@ -2775,7 +2775,7 @@ static int proc_unres_qlen(struct ctl_ta
 			   void __user *buffer, size_t *lenp, loff_t *ppos)
 {
 	int size, ret;
-	struct ctl_table tmp = *ctl;
+	ctl_table_no_const tmp = *ctl;
 
 	tmp.extra1 = &zero;
 	tmp.extra2 = &unres_qlen_max;
@@ -2983,11 +2983,12 @@ int neigh_sysctl_register(struct net_dev
 		memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
 		       sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
 	} else {
+		struct neigh_table *ntable = container_of(p, struct neigh_table, parms);
 		dev_name_source = "default";
-		t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = (int *)(p + 1);
-		t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = (int *)(p + 1) + 1;
-		t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = (int *)(p + 1) + 2;
-		t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = (int *)(p + 1) + 3;
+		t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &ntable->gc_interval;
+		t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &ntable->gc_thresh1;
+		t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &ntable->gc_thresh2;
+		t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &ntable->gc_thresh3;
 	}
 
 
diff -ruNp linux-3.13.11/net/core/net-procfs.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/core/net-procfs.c
--- linux-3.13.11/net/core/net-procfs.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/core/net-procfs.c	2014-07-09
12:00:15.000000000 +0200
@@ -1,6 +1,7 @@
 #include <linux/netdevice.h>
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
+#include <linux/vs_inet.h>
 #include <net/wext.h>
 
 #define BUCKET_SPACE (32 - NETDEV_HASHBITS - 1)
@@ -77,8 +78,13 @@ static void dev_seq_stop(struct seq_file
 static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
 {
 	struct rtnl_link_stats64 temp;
-	const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
+	const struct rtnl_link_stats64 *stats;
 
+	/* device visible inside network context? */
+	if (!nx_dev_visible(current_nx_info(), dev))
+		return;
+
+	stats = dev_get_stats(dev, &temp);
 	seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
 		   "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
 		   dev->name, stats->rx_bytes, stats->rx_packets,
@@ -283,8 +289,13 @@ static int ptype_seq_show(struct seq_fil
 		else
 			seq_printf(seq, "%04x", ntohs(pt->type));
 
+#ifdef CONFIG_GRKERNSEC_HIDESYM
+		seq_printf(seq, " %-8s %pf\n",
+			   pt->dev ? pt->dev->name : "", NULL);
+#else
 		seq_printf(seq, " %-8s %pf\n",
 			   pt->dev ? pt->dev->name : "", pt->func);
+#endif
 	}
 
 	return 0;
diff -ruNp linux-3.13.11/net/core/net-sysfs.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/core/net-sysfs.c
--- linux-3.13.11/net/core/net-sysfs.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/core/net-sysfs.c	2014-07-09 12:00:15.000000000
+0200
@@ -1358,7 +1358,7 @@ void netdev_class_remove_file_ns(struct
 }
 EXPORT_SYMBOL(netdev_class_remove_file_ns);
 
-int netdev_kobject_init(void)
+int __init netdev_kobject_init(void)
 {
 	kobj_ns_type_register(&net_ns_type_operations);
 	return class_register(&net_class);
diff -ruNp linux-3.13.11/net/core/net_namespace.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/core/net_namespace.c
--- linux-3.13.11/net/core/net_namespace.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/core/net_namespace.c	2014-07-09
12:00:15.000000000 +0200
@@ -443,7 +443,7 @@ static int __register_pernet_operations(
 	int error;
 	LIST_HEAD(net_exit_list);
 
-	list_add_tail(&ops->list, list);
+	pax_list_add_tail((struct list_head *)&ops->list, list);
 	if (ops->init || (ops->id && ops->size)) {
 		for_each_net(net) {
 			error = ops_init(ops, net);
@@ -456,7 +456,7 @@ static int __register_pernet_operations(
 
 out_undo:
 	/* If I have an error cleanup all namespaces I initialized */
-	list_del(&ops->list);
+	pax_list_del((struct list_head *)&ops->list);
 	ops_exit_list(ops, &net_exit_list);
 	ops_free_list(ops, &net_exit_list);
 	return error;
@@ -467,7 +467,7 @@ static void __unregister_pernet_operatio
 	struct net *net;
 	LIST_HEAD(net_exit_list);
 
-	list_del(&ops->list);
+	pax_list_del((struct list_head *)&ops->list);
 	for_each_net(net)
 		list_add_tail(&net->exit_list, &net_exit_list);
 	ops_exit_list(ops, &net_exit_list);
@@ -601,7 +601,7 @@ int register_pernet_device(struct pernet
 	mutex_lock(&net_mutex);
 	error = register_pernet_operations(&pernet_list, ops);
 	if (!error && (first_device == &pernet_list))
-		first_device = &ops->list;
+		first_device = (struct list_head *)&ops->list;
 	mutex_unlock(&net_mutex);
 	return error;
 }
diff -ruNp linux-3.13.11/net/core/netpoll.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/core/netpoll.c
--- linux-3.13.11/net/core/netpoll.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/core/netpoll.c	2014-07-09 12:00:15.000000000
+0200
@@ -435,7 +435,7 @@ void netpoll_send_udp(struct netpoll *np
 	struct udphdr *udph;
 	struct iphdr *iph;
 	struct ethhdr *eth;
-	static atomic_t ip_ident;
+	static atomic_unchecked_t ip_ident;
 	struct ipv6hdr *ip6h;
 
 	udp_len = len + sizeof(*udph);
@@ -506,7 +506,7 @@ void netpoll_send_udp(struct netpoll *np
 		put_unaligned(0x45, (unsigned char *)iph);
 		iph->tos      = 0;
 		put_unaligned(htons(ip_len), &(iph->tot_len));
-		iph->id       = htons(atomic_inc_return(&ip_ident));
+		iph->id       = htons(atomic_inc_return_unchecked(&ip_ident));
 		iph->frag_off = 0;
 		iph->ttl      = 64;
 		iph->protocol = IPPROTO_UDP;
diff -ruNp linux-3.13.11/net/core/rtnetlink.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/core/rtnetlink.c
--- linux-3.13.11/net/core/rtnetlink.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/core/rtnetlink.c	2014-07-09 12:00:15.000000000
+0200
@@ -58,7 +58,7 @@ struct rtnl_link {
 	rtnl_doit_func		doit;
 	rtnl_dumpit_func	dumpit;
 	rtnl_calcit_func 	calcit;
-};
+} __no_const;
 
 static DEFINE_MUTEX(rtnl_mutex);
 
@@ -299,10 +299,13 @@ int __rtnl_link_register(struct rtnl_lin
 	if (rtnl_link_ops_get(ops->kind))
 		return -EEXIST;
 
-	if (!ops->dellink)
-		ops->dellink = unregister_netdevice_queue;
+	if (!ops->dellink) {
+		pax_open_kernel();
+		*(void **)&ops->dellink = unregister_netdevice_queue;
+		pax_close_kernel();
+	}
 
-	list_add_tail(&ops->list, &link_ops);
+	pax_list_add_tail((struct list_head *)&ops->list, &link_ops);
 	return 0;
 }
 EXPORT_SYMBOL_GPL(__rtnl_link_register);
@@ -349,7 +352,7 @@ void __rtnl_link_unregister(struct rtnl_
 	for_each_net(net) {
 		__rtnl_kill_links(net, ops);
 	}
-	list_del(&ops->list);
+	pax_list_del((struct list_head *)&ops->list);
 }
 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
 
@@ -1090,6 +1093,8 @@ static int rtnl_dump_ifinfo(struct sk_bu
 		hlist_for_each_entry_rcu(dev, head, index_hlist) {
 			if (idx < s_idx)
 				goto cont;
+			if (!nx_dev_visible(skb->sk->sk_nx_info, dev))
+				continue;
 			if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
 					     NETLINK_CB(cb->skb).portid,
 					     cb->nlh->nlmsg_seq, 0,
@@ -1992,6 +1997,9 @@ void rtmsg_ifinfo(int type, struct net_d
 	int err = -ENOBUFS;
 	size_t if_info_size;
 
+	if (!nx_dev_visible(current_nx_info(), dev))
+		return;
+
 	skb = nlmsg_new((if_info_size = if_nlmsg_size(dev, 0)), flags);
 	if (skb == NULL)
 		goto errout;
diff -ruNp linux-3.13.11/net/core/scm.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/core/scm.c
--- linux-3.13.11/net/core/scm.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/core/scm.c	2014-07-09 12:00:15.000000000
+0200
@@ -210,7 +210,7 @@ EXPORT_SYMBOL(__scm_send);
 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
 {
 	struct cmsghdr __user *cm
-		= (__force struct cmsghdr __user *)msg->msg_control;
+		= (struct cmsghdr __force_user *)msg->msg_control;
 	struct cmsghdr cmhdr;
 	int cmlen = CMSG_LEN(len);
 	int err;
@@ -233,7 +233,7 @@ int put_cmsg(struct msghdr * msg, int le
 	err = -EFAULT;
 	if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
 		goto out;
-	if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
+	if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data,
cmlen - sizeof(struct cmsghdr)))
 		goto out;
 	cmlen = CMSG_SPACE(len);
 	if (msg->msg_controllen < cmlen)
@@ -249,7 +249,7 @@ EXPORT_SYMBOL(put_cmsg);
 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
 {
 	struct cmsghdr __user *cm
-		= (__force struct cmsghdr __user*)msg->msg_control;
+		= (struct cmsghdr __force_user *)msg->msg_control;
 
 	int fdmax = 0;
 	int fdnum = scm->fp->count;
@@ -269,7 +269,7 @@ void scm_detach_fds(struct msghdr *msg,
 	if (fdnum < fdmax)
 		fdmax = fdnum;
 
-	for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
+	for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
 	     i++, cmfptr++)
 	{
 		struct socket *sock;
diff -ruNp linux-3.13.11/net/core/skbuff.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/core/skbuff.c
--- linux-3.13.11/net/core/skbuff.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/core/skbuff.c	2014-07-09 12:00:15.000000000
+0200
@@ -2006,7 +2006,7 @@ EXPORT_SYMBOL(__skb_checksum);
 __wsum skb_checksum(const struct sk_buff *skb, int offset,
 		    int len, __wsum csum)
 {
-	const struct skb_checksum_ops ops = {
+	static const struct skb_checksum_ops ops = {
 		.update  = csum_partial_ext,
 		.combine = csum_block_add_ext,
 	};
@@ -3124,13 +3124,15 @@ void __init skb_init(void)
 	skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
 					      sizeof(struct sk_buff),
 					      0,
-					      SLAB_HWCACHE_ALIGN|SLAB_PANIC,
+					      SLAB_HWCACHE_ALIGN|SLAB_PANIC|
+					      SLAB_NO_SANITIZE,
 					      NULL);
 	skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
 						(2*sizeof(struct sk_buff)) +
 						sizeof(atomic_t),
 						0,
-						SLAB_HWCACHE_ALIGN|SLAB_PANIC,
+						SLAB_HWCACHE_ALIGN|SLAB_PANIC|
+						SLAB_NO_SANITIZE,
 						NULL);
 }
 
diff -ruNp linux-3.13.11/net/core/sock.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/core/sock.c
--- linux-3.13.11/net/core/sock.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/core/sock.c	2014-07-09 12:00:15.000000000
+0200
@@ -133,6 +133,10 @@
 #include <net/netprio_cgroup.h>
 
 #include <linux/filter.h>
+#include <linux/vs_socket.h>
+#include <linux/vs_limit.h>
+#include <linux/vs_context.h>
+#include <linux/vs_network.h>
 
 #include <trace/events/sock.h>
 
@@ -393,7 +397,7 @@ int sock_queue_rcv_skb(struct sock *sk,
 	struct sk_buff_head *list = &sk->sk_receive_queue;
 
 	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
-		atomic_inc(&sk->sk_drops);
+		atomic_inc_unchecked(&sk->sk_drops);
 		trace_sock_rcvqueue_full(sk, skb);
 		return -ENOMEM;
 	}
@@ -403,7 +407,7 @@ int sock_queue_rcv_skb(struct sock *sk,
 		return err;
 
 	if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
-		atomic_inc(&sk->sk_drops);
+		atomic_inc_unchecked(&sk->sk_drops);
 		return -ENOBUFS;
 	}
 
@@ -423,7 +427,7 @@ int sock_queue_rcv_skb(struct sock *sk,
 	skb_dst_force(skb);
 
 	spin_lock_irqsave(&list->lock, flags);
-	skb->dropcount = atomic_read(&sk->sk_drops);
+	skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
 	__skb_queue_tail(list, skb);
 	spin_unlock_irqrestore(&list->lock, flags);
 
@@ -443,7 +447,7 @@ int sk_receive_skb(struct sock *sk, stru
 	skb->dev = NULL;
 
 	if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
-		atomic_inc(&sk->sk_drops);
+		atomic_inc_unchecked(&sk->sk_drops);
 		goto discard_and_relse;
 	}
 	if (nested)
@@ -461,7 +465,7 @@ int sk_receive_skb(struct sock *sk, stru
 		mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
 	} else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
 		bh_unlock_sock(sk);
-		atomic_inc(&sk->sk_drops);
+		atomic_inc_unchecked(&sk->sk_drops);
 		goto discard_and_relse;
 	}
 
@@ -950,12 +954,12 @@ int sock_getsockopt(struct socket *sock,
 		struct timeval tm;
 	} v;
 
-	int lv = sizeof(int);
-	int len;
+	unsigned int lv = sizeof(int);
+	unsigned int len;
 
 	if (get_user(len, optlen))
 		return -EFAULT;
-	if (len < 0)
+	if (len > INT_MAX)
 		return -EINVAL;
 
 	memset(&v, 0, sizeof(v));
@@ -1107,11 +1111,11 @@ int sock_getsockopt(struct socket *sock,
 
 	case SO_PEERNAME:
 	{
-		char address[128];
+		char address[_K_SS_MAXSIZE];
 
 		if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
 			return -ENOTCONN;
-		if (lv < len)
+		if (lv < len || sizeof address < len)
 			return -EINVAL;
 		if (copy_to_user(optval, address, len))
 			return -EFAULT;
@@ -1188,7 +1192,7 @@ int sock_getsockopt(struct socket *sock,
 
 	if (len > lv)
 		len = lv;
-	if (copy_to_user(optval, &v, len))
+	if (len > sizeof(v) || copy_to_user(optval, &v, len))
 		return -EFAULT;
 lenout:
 	if (put_user(len, optlen))
@@ -1279,6 +1283,8 @@ static struct sock *sk_prot_alloc(struct
 			goto out_free_sec;
 		sk_tx_queue_clear(sk);
 	}
+		sock_vx_init(sk);
+		sock_nx_init(sk);
 
 	return sk;
 
@@ -1387,6 +1393,11 @@ static void __sk_free(struct sock *sk)
 		put_cred(sk->sk_peer_cred);
 	put_pid(sk->sk_peer_pid);
 	put_net(sock_net(sk));
+	vx_sock_dec(sk);
+	clr_vx_info(&sk->sk_vx_info);
+	sk->sk_xid = -1;
+	clr_nx_info(&sk->sk_nx_info);
+	sk->sk_nid = -1;
 	sk_prot_free(sk->sk_prot_creator, sk);
 }
 
@@ -1447,6 +1458,8 @@ struct sock *sk_clone_lock(const struct
 
 		/* SANITY */
 		get_net(sock_net(newsk));
+		sock_vx_init(newsk);
+		sock_nx_init(newsk);
 		sk_node_init(&newsk->sk_node);
 		sock_lock_init(newsk);
 		bh_lock_sock(newsk);
@@ -1503,6 +1516,12 @@ struct sock *sk_clone_lock(const struct
 		smp_wmb();
 		atomic_set(&newsk->sk_refcnt, 2);
 
+		set_vx_info(&newsk->sk_vx_info, sk->sk_vx_info);
+		newsk->sk_xid = sk->sk_xid;
+		vx_sock_inc(newsk);
+		set_nx_info(&newsk->sk_nx_info, sk->sk_nx_info);
+		newsk->sk_nid = sk->sk_nid;
+
 		/*
 		 * Increment the counter in the same struct proto as the master
 		 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
@@ -2340,6 +2359,12 @@ void sock_init_data(struct socket *sock,
 
 	sk->sk_stamp = ktime_set(-1L, 0);
 
+	set_vx_info(&sk->sk_vx_info, current_vx_info());
+	sk->sk_xid = vx_current_xid();
+	vx_sock_inc(sk);
+	set_nx_info(&sk->sk_nx_info, current_nx_info());
+	sk->sk_nid = nx_current_nid();
+
 #ifdef CONFIG_NET_RX_BUSY_POLL
 	sk->sk_napi_id		=	0;
 	sk->sk_ll_usec		=	sysctl_net_busy_read;
@@ -2353,7 +2378,7 @@ void sock_init_data(struct socket *sock,
 	 */
 	smp_wmb();
 	atomic_set(&sk->sk_refcnt, 1);
-	atomic_set(&sk->sk_drops, 0);
+	atomic_set_unchecked(&sk->sk_drops, 0);
 }
 EXPORT_SYMBOL(sock_init_data);
 
@@ -2481,6 +2506,7 @@ void sock_enable_timestamp(struct sock *
 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
 		       int level, int type)
 {
+	struct sock_extended_err ee;
 	struct sock_exterr_skb *serr;
 	struct sk_buff *skb, *skb2;
 	int copied, err;
@@ -2502,7 +2528,8 @@ int sock_recv_errqueue(struct sock *sk,
 	sock_recv_timestamp(msg, sk, skb);
 
 	serr = SKB_EXT_ERR(skb);
-	put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
+	ee = serr->ee;
+	put_cmsg(msg, level, type, sizeof ee, &ee);
 
 	msg->msg_flags |= MSG_ERRQUEUE;
 	err = copied;
diff -ruNp linux-3.13.11/net/core/sock_diag.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/core/sock_diag.c
--- linux-3.13.11/net/core/sock_diag.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/core/sock_diag.c	2014-07-09 12:00:15.000000000
+0200
@@ -9,26 +9,33 @@
 #include <linux/inet_diag.h>
 #include <linux/sock_diag.h>
 
-static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
+static const struct sock_diag_handler *sock_diag_handlers[AF_MAX] __read_only;
 static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
 static DEFINE_MUTEX(sock_diag_table_mutex);
 
 int sock_diag_check_cookie(void *sk, __u32 *cookie)
 {
+#ifndef CONFIG_GRKERNSEC_HIDESYM
 	if ((cookie[0] != INET_DIAG_NOCOOKIE ||
 	     cookie[1] != INET_DIAG_NOCOOKIE) &&
 	    ((u32)(unsigned long)sk != cookie[0] ||
 	     (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
 		return -ESTALE;
 	else
+#endif
 		return 0;
 }
 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
 
 void sock_diag_save_cookie(void *sk, __u32 *cookie)
 {
+#ifdef CONFIG_GRKERNSEC_HIDESYM
+	cookie[0] = 0;
+	cookie[1] = 0;
+#else
 	cookie[0] = (u32)(unsigned long)sk;
 	cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
+#endif
 }
 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
 
@@ -113,8 +120,11 @@ int sock_diag_register(const struct sock
 	mutex_lock(&sock_diag_table_mutex);
 	if (sock_diag_handlers[hndl->family])
 		err = -EBUSY;
-	else
+	else {
+		pax_open_kernel();
 		sock_diag_handlers[hndl->family] = hndl;
+		pax_close_kernel();
+	}
 	mutex_unlock(&sock_diag_table_mutex);
 
 	return err;
@@ -130,7 +140,9 @@ void sock_diag_unregister(const struct s
 
 	mutex_lock(&sock_diag_table_mutex);
 	BUG_ON(sock_diag_handlers[family] != hnld);
+	pax_open_kernel();
 	sock_diag_handlers[family] = NULL;
+	pax_close_kernel();
 	mutex_unlock(&sock_diag_table_mutex);
 }
 EXPORT_SYMBOL_GPL(sock_diag_unregister);
diff -ruNp linux-3.13.11/net/core/sysctl_net_core.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/core/sysctl_net_core.c
--- linux-3.13.11/net/core/sysctl_net_core.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/core/sysctl_net_core.c	2014-07-09
12:00:15.000000000 +0200
@@ -32,7 +32,7 @@ static int rps_sock_flow_sysctl(struct c
 {
 	unsigned int orig_size, size;
 	int ret, i;
-	struct ctl_table tmp = {
+	ctl_table_no_const tmp = {
 		.data = &size,
 		.maxlen = sizeof(size),
 		.mode = table->mode
@@ -199,7 +199,7 @@ static int set_default_qdisc(struct ctl_
 			     void __user *buffer, size_t *lenp, loff_t *ppos)
 {
 	char id[IFNAMSIZ];
-	struct ctl_table tbl = {
+	ctl_table_no_const tbl = {
 		.data = id,
 		.maxlen = IFNAMSIZ,
 	};
@@ -378,13 +378,12 @@ static struct ctl_table netns_core_table
 
 static __net_init int sysctl_core_net_init(struct net *net)
 {
-	struct ctl_table *tbl;
+	ctl_table_no_const *tbl = NULL;
 
 	net->core.sysctl_somaxconn = SOMAXCONN;
 
-	tbl = netns_core_table;
 	if (!net_eq(net, &init_net)) {
-		tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
+		tbl = kmemdup(netns_core_table, sizeof(netns_core_table), GFP_KERNEL);
 		if (tbl == NULL)
 			goto err_dup;
 
@@ -394,17 +393,16 @@ static __net_init int sysctl_core_net_in
 		if (net->user_ns != &init_user_ns) {
 			tbl[0].procname = NULL;
 		}
-	}
-
-	net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
+		net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
+	} else
+		net->core.sysctl_hdr = register_net_sysctl(net, "net/core", netns_core_table);
 	if (net->core.sysctl_hdr == NULL)
 		goto err_reg;
 
 	return 0;
 
 err_reg:
-	if (tbl != netns_core_table)
-		kfree(tbl);
+	kfree(tbl);
 err_dup:
 	return -ENOMEM;
 }
@@ -419,7 +417,7 @@ static __net_exit void sysctl_core_net_e
 	kfree(tbl);
 }
 
-static __net_initdata struct pernet_operations sysctl_core_ops = {
+static __net_initconst struct pernet_operations sysctl_core_ops = {
 	.init = sysctl_core_net_init,
 	.exit = sysctl_core_net_exit,
 };
diff -ruNp linux-3.13.11/net/decnet/af_decnet.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/decnet/af_decnet.c
--- linux-3.13.11/net/decnet/af_decnet.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/decnet/af_decnet.c	2014-07-09
12:00:15.000000000 +0200
@@ -465,6 +465,7 @@ static struct proto dn_proto = {
 	.sysctl_rmem		= sysctl_decnet_rmem,
 	.max_header		= DN_MAX_NSP_DATA_HEADER + 64,
 	.obj_size		= sizeof(struct dn_sock),
+	.slab_flags		= SLAB_USERCOPY,
 };
 
 static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp)
diff -ruNp linux-3.13.11/net/decnet/dn_dev.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/decnet/dn_dev.c
--- linux-3.13.11/net/decnet/dn_dev.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/decnet/dn_dev.c	2014-07-09 12:00:15.000000000
+0200
@@ -200,7 +200,7 @@ static struct dn_dev_sysctl_table {
 		.extra1 = &min_t3,
 		.extra2 = &max_t3
 	},
-	{0}
+	{ }
 	},
 };
 
diff -ruNp linux-3.13.11/net/decnet/sysctl_net_decnet.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/decnet/sysctl_net_decnet.c
--- linux-3.13.11/net/decnet/sysctl_net_decnet.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/decnet/sysctl_net_decnet.c	2014-07-09
12:00:15.000000000 +0200
@@ -174,7 +174,7 @@ static int dn_node_address_handler(struc
 
 	if (len > *lenp) len = *lenp;
 
-	if (copy_to_user(buffer, addr, len))
+	if (len > sizeof addr || copy_to_user(buffer, addr, len))
 		return -EFAULT;
 
 	*lenp = len;
@@ -237,7 +237,7 @@ static int dn_def_dev_handler(struct ctl
 
 	if (len > *lenp) len = *lenp;
 
-	if (copy_to_user(buffer, devname, len))
+	if (len > sizeof devname || copy_to_user(buffer, devname, len))
 		return -EFAULT;
 
 	*lenp = len;
diff -ruNp linux-3.13.11/net/ieee802154/dgram.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ieee802154/dgram.c
--- linux-3.13.11/net/ieee802154/dgram.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ieee802154/dgram.c	2014-07-09
12:00:15.000000000 +0200
@@ -315,8 +315,9 @@ static int dgram_recvmsg(struct kiocb *i
 	if (saddr) {
 		saddr->family = AF_IEEE802154;
 		saddr->addr = mac_cb(skb)->sa;
-		*addr_len = sizeof(*saddr);
 	}
+	if (addr_len)
+		*addr_len = sizeof(*saddr);
 
 	if (flags & MSG_TRUNC)
 		copied = skb->len;
diff -ruNp linux-3.13.11/net/ipv4/af_inet.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/af_inet.c
--- linux-3.13.11/net/ipv4/af_inet.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/af_inet.c	2014-07-09 12:00:15.000000000
+0200
@@ -118,6 +118,7 @@
 #ifdef CONFIG_IP_MROUTE
 #include <linux/mroute.h>
 #endif
+#include <linux/vs_limit.h>
 
 
 /* The inetsw table contains everything that inet_create needs to
@@ -309,10 +310,13 @@ lookup_protocol:
 	}
 
 	err = -EPERM;
+	if ((protocol == IPPROTO_ICMP) &&
+		nx_capable(CAP_NET_RAW, NXC_RAW_ICMP))
+		goto override;
 	if (sock->type == SOCK_RAW && !kern &&
 	    !ns_capable(net->user_ns, CAP_NET_RAW))
 		goto out_rcu_unlock;
-
+override:
 	sock->ops = answer->ops;
 	answer_prot = answer->prot;
 	answer_no_check = answer->no_check;
@@ -433,6 +437,7 @@ int inet_bind(struct socket *sock, struc
 	struct sockaddr_in *addr = (struct sockaddr_in *)uaddr;
 	struct sock *sk = sock->sk;
 	struct inet_sock *inet = inet_sk(sk);
+	struct nx_v4_sock_addr nsa;
 	struct net *net = sock_net(sk);
 	unsigned short snum;
 	int chk_addr_ret;
@@ -457,7 +462,11 @@ int inet_bind(struct socket *sock, struc
 			goto out;
 	}
 
-	chk_addr_ret = inet_addr_type(net, addr->sin_addr.s_addr);
+	err = v4_map_sock_addr(inet, addr, &nsa);
+	if (err)
+		goto out;
+
+	chk_addr_ret = inet_addr_type(net, nsa.saddr);
 
 	/* Not specified by any standard per-se, however it breaks too
 	 * many applications when removed.  It is unfortunate since
@@ -469,7 +478,7 @@ int inet_bind(struct socket *sock, struc
 	err = -EADDRNOTAVAIL;
 	if (!sysctl_ip_nonlocal_bind &&
 	    !(inet->freebind || inet->transparent) &&
-	    addr->sin_addr.s_addr != htonl(INADDR_ANY) &&
+	    nsa.saddr != htonl(INADDR_ANY) &&
 	    chk_addr_ret != RTN_LOCAL &&
 	    chk_addr_ret != RTN_MULTICAST &&
 	    chk_addr_ret != RTN_BROADCAST)
@@ -495,7 +504,7 @@ int inet_bind(struct socket *sock, struc
 	if (sk->sk_state != TCP_CLOSE || inet->inet_num)
 		goto out_release_sock;
 
-	inet->inet_rcv_saddr = inet->inet_saddr = addr->sin_addr.s_addr;
+	v4_set_sock_addr(inet, &nsa);
 	if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
 		inet->inet_saddr = 0;  /* Use device */
 
@@ -714,11 +723,13 @@ int inet_getname(struct socket *sock, st
 		     peer == 1))
 			return -ENOTCONN;
 		sin->sin_port = inet->inet_dport;
-		sin->sin_addr.s_addr = inet->inet_daddr;
+		sin->sin_addr.s_addr =
+			nx_map_sock_lback(sk->sk_nx_info, inet->inet_daddr);
 	} else {
 		__be32 addr = inet->inet_rcv_saddr;
 		if (!addr)
 			addr = inet->inet_saddr;
+		addr = nx_map_sock_lback(sk->sk_nx_info, addr);
 		sin->sin_port = inet->inet_sport;
 		sin->sin_addr.s_addr = addr;
 	}
@@ -1686,13 +1697,9 @@ static int __init inet_init(void)
 
 	BUILD_BUG_ON(sizeof(struct inet_skb_parm) > FIELD_SIZEOF(struct sk_buff, cb));
 
-	sysctl_local_reserved_ports = kzalloc(65536 / 8, GFP_KERNEL);
-	if (!sysctl_local_reserved_ports)
-		goto out;
-
 	rc = proto_register(&tcp_prot, 1);
 	if (rc)
-		goto out_free_reserved_ports;
+		goto out;
 
 	rc = proto_register(&udp_prot, 1);
 	if (rc)
@@ -1799,8 +1806,6 @@ out_unregister_udp_proto:
 	proto_unregister(&udp_prot);
 out_unregister_tcp_proto:
 	proto_unregister(&tcp_prot);
-out_free_reserved_ports:
-	kfree(sysctl_local_reserved_ports);
 	goto out;
 }
 
diff -ruNp linux-3.13.11/net/ipv4/arp.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/arp.c
--- linux-3.13.11/net/ipv4/arp.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/arp.c	2014-07-09 12:00:15.000000000
+0200
@@ -1336,6 +1336,7 @@ static void arp_format_neigh_entry(struc
 	struct net_device *dev = n->dev;
 	int hatype = dev->type;
 
+	/* FIXME: check for network context */
 	read_lock(&n->lock);
 	/* Convert hardware address to XX:XX:XX:XX ... form. */
 #if IS_ENABLED(CONFIG_AX25)
@@ -1367,6 +1368,7 @@ static void arp_format_pneigh_entry(stru
 	int hatype = dev ? dev->type : 0;
 	char tbuf[16];
 
+	/* FIXME: check for network context */
 	sprintf(tbuf, "%pI4", n->key);
 	seq_printf(seq, "%-16s 0x%-10x0x%-10x%s     *        %s\n",
 		   tbuf, hatype, ATF_PUBL | ATF_PERM, "00:00:00:00:00:00",
diff -ruNp linux-3.13.11/net/ipv4/devinet.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/devinet.c
--- linux-3.13.11/net/ipv4/devinet.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/devinet.c	2014-07-09 12:00:15.000000000
+0200
@@ -527,6 +527,7 @@ struct in_device *inetdev_by_index(struc
 }
 EXPORT_SYMBOL(inetdev_by_index);
 
+
 /* Called only from RTNL semaphored context. No locks. */
 
 struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix,
@@ -947,6 +948,8 @@ int devinet_ioctl(struct net *net, unsig
 
 	in_dev = __in_dev_get_rtnl(dev);
 	if (in_dev) {
+		struct nx_info *nxi = current_nx_info();
+
 		if (tryaddrmatch) {
 			/* Matthias Andree */
 			/* compare label and address (4.4BSD style) */
@@ -955,6 +958,8 @@ int devinet_ioctl(struct net *net, unsig
 			   This is checked above. */
 			for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
 			     ifap = &ifa->ifa_next) {
+				if (!nx_v4_ifa_visible(nxi, ifa))
+					continue;
 				if (!strcmp(ifr.ifr_name, ifa->ifa_label) &&
 				    sin_orig.sin_addr.s_addr ==
 							ifa->ifa_local) {
@@ -967,9 +972,12 @@ int devinet_ioctl(struct net *net, unsig
 		   comparing just the label */
 		if (!ifa) {
 			for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
-			     ifap = &ifa->ifa_next)
+			     ifap = &ifa->ifa_next) {
+				if (!nx_v4_ifa_visible(nxi, ifa))
+					continue;
 				if (!strcmp(ifr.ifr_name, ifa->ifa_label))
 					break;
+			}
 		}
 	}
 
@@ -1123,6 +1131,8 @@ static int inet_gifconf(struct net_devic
 		goto out;
 
 	for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
+		if (!nx_v4_ifa_visible(current_nx_info(), ifa))
+			continue;
 		if (!buf) {
 			done += sizeof(ifr);
 			continue;
@@ -1524,6 +1534,7 @@ static int inet_dump_ifaddr(struct sk_bu
 	struct net_device *dev;
 	struct in_device *in_dev;
 	struct in_ifaddr *ifa;
+	struct sock *sk = skb->sk;
 	struct hlist_head *head;
 
 	s_h = cb->args[0];
@@ -1534,7 +1545,7 @@ static int inet_dump_ifaddr(struct sk_bu
 		idx = 0;
 		head = &net->dev_index_head[h];
 		rcu_read_lock();
-		cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
+		cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
 			  net->dev_base_seq;
 		hlist_for_each_entry_rcu(dev, head, index_hlist) {
 			if (idx < s_idx)
@@ -1547,6 +1558,8 @@ static int inet_dump_ifaddr(struct sk_bu
 
 			for (ifa = in_dev->ifa_list, ip_idx = 0; ifa;
 			     ifa = ifa->ifa_next, ip_idx++) {
+			if (sk && !nx_v4_ifa_visible(sk->sk_nx_info, ifa))
+				continue;
 				if (ip_idx < s_ip_idx)
 					continue;
 				if (inet_fill_ifaddr(skb, ifa,
@@ -1845,7 +1858,7 @@ static int inet_netconf_dump_devconf(str
 		idx = 0;
 		head = &net->dev_index_head[h];
 		rcu_read_lock();
-		cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
+		cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
 			  net->dev_base_seq;
 		hlist_for_each_entry_rcu(dev, head, index_hlist) {
 			if (idx < s_idx)
@@ -2070,7 +2083,7 @@ static int ipv4_doint_and_flush(struct c
 #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
 	DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
 
-static struct devinet_sysctl_table {
+static const struct devinet_sysctl_table {
 	struct ctl_table_header *sysctl_header;
 	struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
 } devinet_sysctl = {
@@ -2192,7 +2205,7 @@ static __net_init int devinet_init_net(s
 	int err;
 	struct ipv4_devconf *all, *dflt;
 #ifdef CONFIG_SYSCTL
-	struct ctl_table *tbl = ctl_forward_entry;
+	ctl_table_no_const *tbl = NULL;
 	struct ctl_table_header *forw_hdr;
 #endif
 
@@ -2210,7 +2223,7 @@ static __net_init int devinet_init_net(s
 			goto err_alloc_dflt;
 
 #ifdef CONFIG_SYSCTL
-		tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
+		tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL);
 		if (tbl == NULL)
 			goto err_alloc_ctl;
 
@@ -2230,7 +2243,10 @@ static __net_init int devinet_init_net(s
 		goto err_reg_dflt;
 
 	err = -ENOMEM;
-	forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
+	if (!net_eq(net, &init_net))
+		forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
+	else
+		forw_hdr = register_net_sysctl(net, "net/ipv4", ctl_forward_entry);
 	if (forw_hdr == NULL)
 		goto err_reg_ctl;
 	net->ipv4.forw_hdr = forw_hdr;
@@ -2246,8 +2262,7 @@ err_reg_ctl:
 err_reg_dflt:
 	__devinet_sysctl_unregister(all);
 err_reg_all:
-	if (tbl != ctl_forward_entry)
-		kfree(tbl);
+	kfree(tbl);
 err_alloc_ctl:
 #endif
 	if (dflt != &ipv4_devconf_dflt)
diff -ruNp linux-3.13.11/net/ipv4/fib_frontend.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/fib_frontend.c
--- linux-3.13.11/net/ipv4/fib_frontend.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/fib_frontend.c	2014-07-09
12:00:15.000000000 +0200
@@ -1015,12 +1015,12 @@ static int fib_inetaddr_event(struct not
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
 		fib_sync_up(dev);
 #endif
-		atomic_inc(&net->ipv4.dev_addr_genid);
+		atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
 		rt_cache_flush(dev_net(dev));
 		break;
 	case NETDEV_DOWN:
 		fib_del_ifaddr(ifa, NULL);
-		atomic_inc(&net->ipv4.dev_addr_genid);
+		atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
 		if (ifa->ifa_dev->ifa_list == NULL) {
 			/* Last address was deleted from this interface.
 			 * Disable IP.
@@ -1058,7 +1058,7 @@ static int fib_netdev_event(struct notif
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
 		fib_sync_up(dev);
 #endif
-		atomic_inc(&net->ipv4.dev_addr_genid);
+		atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
 		rt_cache_flush(net);
 		break;
 	case NETDEV_DOWN:
diff -ruNp linux-3.13.11/net/ipv4/fib_semantics.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/fib_semantics.c
--- linux-3.13.11/net/ipv4/fib_semantics.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/fib_semantics.c	2014-07-09
12:00:15.000000000 +0200
@@ -766,7 +766,7 @@ __be32 fib_info_update_nh_saddr(struct n
 	nh->nh_saddr = inet_select_addr(nh->nh_dev,
 					nh->nh_gw,
 					nh->nh_parent->fib_scope);
-	nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
+	nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
 
 	return nh->nh_saddr;
 }
diff -ruNp linux-3.13.11/net/ipv4/fib_trie.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/fib_trie.c
--- linux-3.13.11/net/ipv4/fib_trie.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/fib_trie.c	2014-07-09 12:00:15.000000000
+0200
@@ -2530,6 +2530,7 @@ static int fib_route_seq_show(struct seq
 
 			seq_setwidth(seq, 127);
 
+			/* FIXME: check for network context? */
 			if (fi)
 				seq_printf(seq,
 					 "%s\t%08X\t%08X\t%04X\t%d\t%u\t"
diff -ruNp linux-3.13.11/net/ipv4/inet_connection_sock.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/inet_connection_sock.c
--- linux-3.13.11/net/ipv4/inet_connection_sock.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/inet_connection_sock.c	2014-07-09
12:00:15.000000000 +0200
@@ -29,7 +29,7 @@ const char inet_csk_timer_bug_msg[] = "i
 EXPORT_SYMBOL(inet_csk_timer_bug_msg);
 #endif
 
-unsigned long *sysctl_local_reserved_ports;
+unsigned long sysctl_local_reserved_ports[65536 / 8 / sizeof(unsigned long)];
 EXPORT_SYMBOL(sysctl_local_reserved_ports);
 
 void inet_get_local_port_range(struct net *net, int *low, int *high)
@@ -45,6 +45,37 @@ void inet_get_local_port_range(struct ne
 }
 EXPORT_SYMBOL(inet_get_local_port_range);
 
+int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2)
+{
+	__be32	sk1_rcv_saddr = sk1->sk_rcv_saddr,
+		sk2_rcv_saddr = sk2->sk_rcv_saddr;
+
+	if (inet_v6_ipv6only(sk2))
+		return 0;
+
+	if (sk1_rcv_saddr &&
+	    sk2_rcv_saddr &&
+	    sk1_rcv_saddr == sk2_rcv_saddr)
+		return 1;
+
+	if (sk1_rcv_saddr &&
+	    !sk2_rcv_saddr &&
+	    v4_addr_in_nx_info(sk2->sk_nx_info, sk1_rcv_saddr, NXA_MASK_BIND))
+		return 1;
+
+	if (sk2_rcv_saddr &&
+	    !sk1_rcv_saddr &&
+	    v4_addr_in_nx_info(sk1->sk_nx_info, sk2_rcv_saddr, NXA_MASK_BIND))
+		return 1;
+
+	if (!sk1_rcv_saddr &&
+	    !sk2_rcv_saddr &&
+	    nx_v4_addr_conflict(sk1->sk_nx_info, sk2->sk_nx_info))
+		return 1;
+
+	return 0;
+}
+
 int inet_csk_bind_conflict(const struct sock *sk,
 			   const struct inet_bind_bucket *tb, bool relax)
 {
@@ -72,15 +103,13 @@ int inet_csk_bind_conflict(const struct
 			    (sk2->sk_state != TCP_TIME_WAIT &&
 			     !uid_eq(uid, sock_i_uid(sk2))))) {
 
-				if (!sk2->sk_rcv_saddr || !sk->sk_rcv_saddr ||
-				    sk2->sk_rcv_saddr == sk->sk_rcv_saddr)
+				if (ipv4_rcv_saddr_equal(sk, sk2))
 					break;
 			}
 			if (!relax && reuse && sk2->sk_reuse &&
 			    sk2->sk_state != TCP_LISTEN) {
 
-				if (!sk2->sk_rcv_saddr || !sk->sk_rcv_saddr ||
-				    sk2->sk_rcv_saddr == sk->sk_rcv_saddr)
+				if (ipv4_rcv_saddr_equal(sk, sk2))
 					break;
 			}
 		}
diff -ruNp linux-3.13.11/net/ipv4/inet_diag.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/inet_diag.c
--- linux-3.13.11/net/ipv4/inet_diag.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/inet_diag.c	2014-07-09 12:00:15.000000000
+0200
@@ -31,6 +31,8 @@
 
 #include <linux/inet.h>
 #include <linux/stddef.h>
+#include <linux/vs_network.h>
+#include <linux/vs_inet.h>
 
 #include <linux/inet_diag.h>
 #include <linux/sock_diag.h>
@@ -110,8 +112,10 @@ int inet_sk_diag_fill(struct sock *sk, s
 	memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
 	memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
 
-	r->id.idiag_src[0] = inet->inet_rcv_saddr;
-	r->id.idiag_dst[0] = inet->inet_daddr;
+	r->id.idiag_src[0] = nx_map_sock_lback(sk->sk_nx_info,
+		inet->inet_rcv_saddr);
+	r->id.idiag_dst[0] = nx_map_sock_lback(sk->sk_nx_info,
+		inet->inet_daddr);
 
 	if (nla_put_u8(skb, INET_DIAG_SHUTDOWN, sk->sk_shutdown))
 		goto errout;
@@ -254,8 +258,8 @@ static int inet_twsk_diag_fill(struct in
 	memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
 	memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
 
-	r->id.idiag_src[0]    = tw->tw_rcv_saddr;
-	r->id.idiag_dst[0]    = tw->tw_daddr;
+	r->id.idiag_src[0]    = nx_map_sock_lback(tw->tw_nx_info, tw->tw_rcv_saddr);
+	r->id.idiag_dst[0]    = nx_map_sock_lback(tw->tw_nx_info, tw->tw_daddr);
 
 	r->idiag_state	      = tw->tw_substate;
 	r->idiag_timer	      = 3;
@@ -298,12 +302,14 @@ int inet_diag_dump_one_icsk(struct inet_
 
 	err = -EINVAL;
 	if (req->sdiag_family == AF_INET) {
+		/* TODO: lback */
 		sk = inet_lookup(net, hashinfo, req->id.idiag_dst[0],
 				 req->id.idiag_dport, req->id.idiag_src[0],
 				 req->id.idiag_sport, req->id.idiag_if);
 	}
 #if IS_ENABLED(CONFIG_IPV6)
 	else if (req->sdiag_family == AF_INET6) {
+		/* TODO: lback */
 		sk = inet6_lookup(net, hashinfo,
 				  (struct in6_addr *)req->id.idiag_dst,
 				  req->id.idiag_dport,
@@ -501,6 +507,7 @@ int inet_diag_bc_sk(const struct nlattr
 	} else
 #endif
 	{
+			/* TODO: lback */
 		entry.saddr = &inet->inet_rcv_saddr;
 		entry.daddr = &inet->inet_daddr;
 	}
@@ -659,6 +666,7 @@ static int inet_twsk_diag_dump(struct so
 		} else
 #endif
 		{
+			/* TODO: lback */
 			entry.saddr = &tw->tw_rcv_saddr;
 			entry.daddr = &tw->tw_daddr;
 		}
@@ -741,8 +749,8 @@ static int inet_diag_fill_req(struct sk_
 	memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
 	memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
 
-	r->id.idiag_src[0] = ireq->ir_loc_addr;
-	r->id.idiag_dst[0] = ireq->ir_rmt_addr;
+	r->id.idiag_src[0] = nx_map_sock_lback(sk->sk_nx_info, ireq->ir_loc_addr);
+	r->id.idiag_dst[0] = nx_map_sock_lback(sk->sk_nx_info, ireq->ir_rmt_addr);
 
 	r->idiag_expires = jiffies_to_msecs(tmo);
 	r->idiag_rqueue = 0;
@@ -806,6 +814,7 @@ static int inet_diag_dump_reqs(struct sk
 			    r->id.idiag_dport)
 				continue;
 
+			/* TODO: lback */
 			if (bc) {
 				inet_diag_req_addrs(sk, req, &entry);
 				entry.dport = ntohs(ireq->ir_rmt_port);
@@ -862,6 +871,8 @@ void inet_diag_dump_icsk(struct inet_has
 				if (!net_eq(sock_net(sk), net))
 					continue;
 
+				if (!nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT))
+					continue;
 				if (num < s_num) {
 					num++;
 					continue;
@@ -934,6 +945,8 @@ skip_listen_ht:
 
 			if (!net_eq(sock_net(sk), net))
 				continue;
+			if (!nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT))
+				continue;
 			if (num < s_num)
 				goto next_normal;
 			state = (sk->sk_state == TCP_TIME_WAIT) ?
diff -ruNp linux-3.13.11/net/ipv4/inet_hashtables.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/inet_hashtables.c
--- linux-3.13.11/net/ipv4/inet_hashtables.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/inet_hashtables.c	2014-07-09
12:00:15.000000000 +0200
@@ -18,10 +18,12 @@
 #include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/wait.h>
+#include <linux/security.h>
 
 #include <net/inet_connection_sock.h>
 #include <net/inet_hashtables.h>
 #include <net/secure_seq.h>
+#include <net/route.h>
 #include <net/ip.h>
 
 static unsigned int inet_ehashfn(struct net *net, const __be32 laddr,
@@ -49,6 +51,8 @@ static unsigned int inet_sk_ehashfn(cons
 	return inet_ehashfn(net, laddr, lport, faddr, fport);
 }
 
+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock
*inet);
+
 /*
  * Allocate and initialize a new local port bind bucket.
  * The bindhash mutex for snum's hash chain must be held here.
@@ -181,6 +185,11 @@ static inline int compute_score(struct s
 			if (rcv_saddr != daddr)
 				return -1;
 			score += 4;
+		} else {
+			/* block non nx_info ips */
+			if (!v4_addr_in_nx_info(sk->sk_nx_info,
+				daddr, NXA_MASK_BIND))
+				return -1;
 		}
 		if (sk->sk_bound_dev_if) {
 			if (sk->sk_bound_dev_if != dif)
@@ -198,7 +207,6 @@ static inline int compute_score(struct s
  * wildcarded during the search since they can never be otherwise.
  */
 
-
 struct sock *__inet_lookup_listener(struct net *net,
 				    struct inet_hashinfo *hashinfo,
 				    const __be32 saddr, __be16 sport,
@@ -234,6 +242,7 @@ begin:
 			phash = next_pseudo_random32(phash);
 		}
 	}
+
 	/*
 	 * if the nulls value we got at the end of this lookup is
 	 * not the expected one, we must restart lookup.
@@ -554,6 +563,8 @@ ok:
 			twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
 		spin_unlock(&head->lock);
 
+		gr_update_task_in_ip_table(current, inet_sk(sk));
+
 		if (tw) {
 			inet_twsk_deschedule(tw, death_row);
 			while (twrefcnt) {
diff -ruNp linux-3.13.11/net/ipv4/inetpeer.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/inetpeer.c
--- linux-3.13.11/net/ipv4/inetpeer.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/inetpeer.c	2014-07-09 12:00:15.000000000
+0200
@@ -503,8 +503,8 @@ relookup:
 	if (p) {
 		p->daddr = *daddr;
 		atomic_set(&p->refcnt, 1);
-		atomic_set(&p->rid, 0);
-		atomic_set(&p->ip_id_count,
+		atomic_set_unchecked(&p->rid, 0);
+		atomic_set_unchecked(&p->ip_id_count,
 				(daddr->family == AF_INET) ?
 					secure_ip_id(daddr->addr.a4) :
 					secure_ipv6_id(daddr->addr.a6));
diff -ruNp linux-3.13.11/net/ipv4/ip_fragment.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/ip_fragment.c
--- linux-3.13.11/net/ipv4/ip_fragment.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/ip_fragment.c	2014-07-09
12:00:15.000000000 +0200
@@ -283,7 +283,7 @@ static inline int ip_frag_too_far(struct
 		return 0;
 
 	start = qp->rid;
-	end = atomic_inc_return(&peer->rid);
+	end = atomic_inc_return_unchecked(&peer->rid);
 	qp->rid = end;
 
 	rc = qp->q.fragments && (end - start) > max;
@@ -760,12 +760,11 @@ static struct ctl_table ip4_frags_ctl_ta
 
 static int __net_init ip4_frags_ns_ctl_register(struct net *net)
 {
-	struct ctl_table *table;
+	ctl_table_no_const *table = NULL;
 	struct ctl_table_header *hdr;
 
-	table = ip4_frags_ns_ctl_table;
 	if (!net_eq(net, &init_net)) {
-		table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
+		table = kmemdup(ip4_frags_ns_ctl_table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
 		if (table == NULL)
 			goto err_alloc;
 
@@ -776,9 +775,10 @@ static int __net_init ip4_frags_ns_ctl_r
 		/* Don't export sysctls to unprivileged users */
 		if (net->user_ns != &init_user_ns)
 			table[0].procname = NULL;
-	}
+		hdr = register_net_sysctl(net, "net/ipv4", table);
+	} else
+		hdr = register_net_sysctl(net, "net/ipv4", ip4_frags_ns_ctl_table);
 
-	hdr = register_net_sysctl(net, "net/ipv4", table);
 	if (hdr == NULL)
 		goto err_reg;
 
@@ -786,8 +786,7 @@ static int __net_init ip4_frags_ns_ctl_r
 	return 0;
 
 err_reg:
-	if (!net_eq(net, &init_net))
-		kfree(table);
+	kfree(table);
 err_alloc:
 	return -ENOMEM;
 }
diff -ruNp linux-3.13.11/net/ipv4/ip_gre.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/ip_gre.c
--- linux-3.13.11/net/ipv4/ip_gre.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/ip_gre.c	2014-07-09 12:00:15.000000000
+0200
@@ -115,7 +115,7 @@ static bool log_ecn_error = true;
 module_param(log_ecn_error, bool, 0644);
 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
 
-static struct rtnl_link_ops ipgre_link_ops __read_mostly;
+static struct rtnl_link_ops ipgre_link_ops;
 static int ipgre_tunnel_init(struct net_device *dev);
 
 static int ipgre_net_id __read_mostly;
@@ -732,7 +732,7 @@ static const struct nla_policy ipgre_pol
 	[IFLA_GRE_PMTUDISC]	= { .type = NLA_U8 },
 };
 
-static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
+static struct rtnl_link_ops ipgre_link_ops = {
 	.kind		= "gre",
 	.maxtype	= IFLA_GRE_MAX,
 	.policy		= ipgre_policy,
@@ -746,7 +746,7 @@ static struct rtnl_link_ops ipgre_link_o
 	.fill_info	= ipgre_fill_info,
 };
 
-static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
+static struct rtnl_link_ops ipgre_tap_ops = {
 	.kind		= "gretap",
 	.maxtype	= IFLA_GRE_MAX,
 	.policy		= ipgre_policy,
diff -ruNp linux-3.13.11/net/ipv4/ip_sockglue.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/ip_sockglue.c
--- linux-3.13.11/net/ipv4/ip_sockglue.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/ip_sockglue.c	2014-07-09
12:00:15.000000000 +0200
@@ -1172,7 +1172,8 @@ static int do_ip_getsockopt(struct sock
 		len = min_t(unsigned int, len, opt->optlen);
 		if (put_user(len, optlen))
 			return -EFAULT;
-		if (copy_to_user(optval, opt->__data, len))
+		if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
+		    copy_to_user(optval, opt->__data, len))
 			return -EFAULT;
 		return 0;
 	}
@@ -1303,7 +1304,7 @@ static int do_ip_getsockopt(struct sock
 		if (sk->sk_type != SOCK_STREAM)
 			return -ENOPROTOOPT;
 
-		msg.msg_control = optval;
+		msg.msg_control = (void __force_kernel *)optval;
 		msg.msg_controllen = len;
 		msg.msg_flags = flags;
 
diff -ruNp linux-3.13.11/net/ipv4/ip_vti.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/ip_vti.c
--- linux-3.13.11/net/ipv4/ip_vti.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/ip_vti.c	2014-07-09 12:00:15.000000000
+0200
@@ -44,7 +44,7 @@
 #include <net/net_namespace.h>
 #include <net/netns/generic.h>
 
-static struct rtnl_link_ops vti_link_ops __read_mostly;
+static struct rtnl_link_ops vti_link_ops;
 
 static int vti_net_id __read_mostly;
 static int vti_tunnel_init(struct net_device *dev);
@@ -360,7 +360,7 @@ static const struct nla_policy vti_polic
 	[IFLA_VTI_REMOTE]	= { .len = FIELD_SIZEOF(struct iphdr, daddr) },
 };
 
-static struct rtnl_link_ops vti_link_ops __read_mostly = {
+static struct rtnl_link_ops vti_link_ops = {
 	.kind		= "vti",
 	.maxtype	= IFLA_VTI_MAX,
 	.policy		= vti_policy,
diff -ruNp linux-3.13.11/net/ipv4/ipconfig.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/ipconfig.c
--- linux-3.13.11/net/ipv4/ipconfig.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/ipconfig.c	2014-07-09 12:00:15.000000000
+0200
@@ -334,7 +334,7 @@ static int __init ic_devinet_ioctl(unsig
 
 	mm_segment_t oldfs = get_fs();
 	set_fs(get_ds());
-	res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
+	res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
 	set_fs(oldfs);
 	return res;
 }
@@ -345,7 +345,7 @@ static int __init ic_dev_ioctl(unsigned
 
 	mm_segment_t oldfs = get_fs();
 	set_fs(get_ds());
-	res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
+	res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
 	set_fs(oldfs);
 	return res;
 }
@@ -356,7 +356,7 @@ static int __init ic_route_ioctl(unsigne
 
 	mm_segment_t oldfs = get_fs();
 	set_fs(get_ds());
-	res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
+	res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
 	set_fs(oldfs);
 	return res;
 }
diff -ruNp linux-3.13.11/net/ipv4/ipip.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/ipip.c
--- linux-3.13.11/net/ipv4/ipip.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/ipip.c	2014-07-09 12:00:16.000000000
+0200
@@ -124,7 +124,7 @@ MODULE_PARM_DESC(log_ecn_error, "Log pac
 static int ipip_net_id __read_mostly;
 
 static int ipip_tunnel_init(struct net_device *dev);
-static struct rtnl_link_ops ipip_link_ops __read_mostly;
+static struct rtnl_link_ops ipip_link_ops;
 
 static int ipip_err(struct sk_buff *skb, u32 info)
 {
@@ -409,7 +409,7 @@ static const struct nla_policy ipip_poli
 	[IFLA_IPTUN_PMTUDISC]		= { .type = NLA_U8 },
 };
 
-static struct rtnl_link_ops ipip_link_ops __read_mostly = {
+static struct rtnl_link_ops ipip_link_ops = {
 	.kind		= "ipip",
 	.maxtype	= IFLA_IPTUN_MAX,
 	.policy		= ipip_policy,
diff -ruNp linux-3.13.11/net/ipv4/netfilter/arp_tables.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/netfilter/arp_tables.c
--- linux-3.13.11/net/ipv4/netfilter/arp_tables.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/netfilter/arp_tables.c	2014-07-09
12:00:16.000000000 +0200
@@ -885,14 +885,14 @@ static int compat_table_info(const struc
 #endif
 
 static int get_info(struct net *net, void __user *user,
-                    const int *len, int compat)
+                    int len, int compat)
 {
 	char name[XT_TABLE_MAXNAMELEN];
 	struct xt_table *t;
 	int ret;
 
-	if (*len != sizeof(struct arpt_getinfo)) {
-		duprintf("length %u != %Zu\n", *len,
+	if (len != sizeof(struct arpt_getinfo)) {
+		duprintf("length %u != %Zu\n", len,
 			 sizeof(struct arpt_getinfo));
 		return -EINVAL;
 	}
@@ -929,7 +929,7 @@ static int get_info(struct net *net, voi
 		info.size = private->size;
 		strcpy(info.name, name);
 
-		if (copy_to_user(user, &info, *len) != 0)
+		if (copy_to_user(user, &info, len) != 0)
 			ret = -EFAULT;
 		else
 			ret = 0;
@@ -1688,7 +1688,7 @@ static int compat_do_arpt_get_ctl(struct
 
 	switch (cmd) {
 	case ARPT_SO_GET_INFO:
-		ret = get_info(sock_net(sk), user, len, 1);
+		ret = get_info(sock_net(sk), user, *len, 1);
 		break;
 	case ARPT_SO_GET_ENTRIES:
 		ret = compat_get_entries(sock_net(sk), user, len);
@@ -1733,7 +1733,7 @@ static int do_arpt_get_ctl(struct sock *
 
 	switch (cmd) {
 	case ARPT_SO_GET_INFO:
-		ret = get_info(sock_net(sk), user, len, 0);
+		ret = get_info(sock_net(sk), user, *len, 0);
 		break;
 
 	case ARPT_SO_GET_ENTRIES:
diff -ruNp linux-3.13.11/net/ipv4/netfilter/ip_tables.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/netfilter/ip_tables.c
--- linux-3.13.11/net/ipv4/netfilter/ip_tables.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/netfilter/ip_tables.c	2014-07-09
12:00:16.000000000 +0200
@@ -1073,14 +1073,14 @@ static int compat_table_info(const struc
 #endif
 
 static int get_info(struct net *net, void __user *user,
-                    const int *len, int compat)
+                    int len, int compat)
 {
 	char name[XT_TABLE_MAXNAMELEN];
 	struct xt_table *t;
 	int ret;
 
-	if (*len != sizeof(struct ipt_getinfo)) {
-		duprintf("length %u != %zu\n", *len,
+	if (len != sizeof(struct ipt_getinfo)) {
+		duprintf("length %u != %zu\n", len,
 			 sizeof(struct ipt_getinfo));
 		return -EINVAL;
 	}
@@ -1117,7 +1117,7 @@ static int get_info(struct net *net, voi
 		info.size = private->size;
 		strcpy(info.name, name);
 
-		if (copy_to_user(user, &info, *len) != 0)
+		if (copy_to_user(user, &info, len) != 0)
 			ret = -EFAULT;
 		else
 			ret = 0;
@@ -1971,7 +1971,7 @@ compat_do_ipt_get_ctl(struct sock *sk, i
 
 	switch (cmd) {
 	case IPT_SO_GET_INFO:
-		ret = get_info(sock_net(sk), user, len, 1);
+		ret = get_info(sock_net(sk), user, *len, 1);
 		break;
 	case IPT_SO_GET_ENTRIES:
 		ret = compat_get_entries(sock_net(sk), user, len);
@@ -2018,7 +2018,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd,
 
 	switch (cmd) {
 	case IPT_SO_GET_INFO:
-		ret = get_info(sock_net(sk), user, len, 0);
+		ret = get_info(sock_net(sk), user, *len, 0);
 		break;
 
 	case IPT_SO_GET_ENTRIES:
diff -ruNp linux-3.13.11/net/ipv4/netfilter.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/netfilter.c
--- linux-3.13.11/net/ipv4/netfilter.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/netfilter.c	2014-07-09 12:00:16.000000000
+0200
@@ -11,7 +11,7 @@
 #include <linux/skbuff.h>
 #include <linux/gfp.h>
 #include <linux/export.h>
-#include <net/route.h>
+// #include <net/route.h>
 #include <net/xfrm.h>
 #include <net/ip.h>
 #include <net/netfilter/nf_queue.h>
diff -ruNp linux-3.13.11/net/ipv4/ping.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/ping.c
--- linux-3.13.11/net/ipv4/ping.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/ping.c	2014-07-09 12:00:16.000000000
+0200
@@ -55,7 +55,7 @@
 
 
 struct ping_table ping_table;
-struct pingv6_ops pingv6_ops;
+struct pingv6_ops *pingv6_ops;
 EXPORT_SYMBOL_GPL(pingv6_ops);
 
 static u16 ping_port_rover;
@@ -251,23 +251,28 @@ int ping_init_sock(struct sock *sk)
 	struct group_info *group_info = get_current_groups();
 	int i, j, count = group_info->ngroups;
 	kgid_t low, high;
+	int ret = 0;
 
 	inet_get_ping_group_range_net(net, &low, &high);
 	if (gid_lte(low, group) && gid_lte(group, high))
-		return 0;
+		goto out_release_group;
 
 	for (i = 0; i < group_info->nblocks; i++) {
 		int cp_count = min_t(int, NGROUPS_PER_BLOCK, count);
 		for (j = 0; j < cp_count; j++) {
 			kgid_t gid = group_info->blocks[i][j];
 			if (gid_lte(low, gid) && gid_lte(gid, high))
-				return 0;
+				goto out_release_group;
 		}
 
 		count -= cp_count;
 	}
 
-	return -EACCES;
+	ret = -EACCES;
+
+out_release_group:
+	put_group_info(group_info);
+	return ret;
 }
 EXPORT_SYMBOL_GPL(ping_init_sock);
 
@@ -334,7 +339,7 @@ static int ping_check_bind_addr(struct s
 				return -ENODEV;
 			}
 		}
-		has_addr = pingv6_ops.ipv6_chk_addr(net, &addr->sin6_addr, dev,
+		has_addr = pingv6_ops->ipv6_chk_addr(net, &addr->sin6_addr, dev,
 						    scoped);
 		rcu_read_unlock();
 
@@ -542,7 +547,7 @@ void ping_err(struct sk_buff *skb, int o
 		}
 #if IS_ENABLED(CONFIG_IPV6)
 	} else if (skb->protocol == htons(ETH_P_IPV6)) {
-		harderr = pingv6_ops.icmpv6_err_convert(type, code, &err);
+		harderr = pingv6_ops->icmpv6_err_convert(type, code, &err);
 #endif
 	}
 
@@ -560,7 +565,7 @@ void ping_err(struct sk_buff *skb, int o
 				      info, (u8 *)icmph);
 #if IS_ENABLED(CONFIG_IPV6)
 		} else if (family == AF_INET6) {
-			pingv6_ops.ipv6_icmp_error(sk, skb, err, 0,
+			pingv6_ops->ipv6_icmp_error(sk, skb, err, 0,
 						   info, (u8 *)icmph);
 #endif
 		}
@@ -830,6 +835,8 @@ int ping_recvmsg(struct kiocb *iocb, str
 {
 	struct inet_sock *isk = inet_sk(sk);
 	int family = sk->sk_family;
+	struct sockaddr_in *sin;
+	struct sockaddr_in6 *sin6;
 	struct sk_buff *skb;
 	int copied, err;
 
@@ -839,12 +846,19 @@ int ping_recvmsg(struct kiocb *iocb, str
 	if (flags & MSG_OOB)
 		goto out;
 
+	if (addr_len) {
+		if (family == AF_INET)
+			*addr_len = sizeof(*sin);
+		else if (family == AF_INET6 && addr_len)
+			*addr_len = sizeof(*sin6);
+	}
+
 	if (flags & MSG_ERRQUEUE) {
 		if (family == AF_INET) {
 			return ip_recv_error(sk, msg, len, addr_len);
 #if IS_ENABLED(CONFIG_IPV6)
 		} else if (family == AF_INET6) {
-			return pingv6_ops.ipv6_recv_error(sk, msg, len,
+			return pingv6_ops->ipv6_recv_error(sk, msg, len,
 							  addr_len);
 #endif
 		}
@@ -876,7 +890,6 @@ int ping_recvmsg(struct kiocb *iocb, str
 			sin->sin_port = 0 /* skb->h.uh->source */;
 			sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
 			memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
-			*addr_len = sizeof(*sin);
 		}
 
 		if (isk->cmsg_flags)
@@ -899,11 +912,10 @@ int ping_recvmsg(struct kiocb *iocb, str
 			sin6->sin6_scope_id =
 				ipv6_iface_scope_id(&sin6->sin6_addr,
 						    IP6CB(skb)->iif);
-			*addr_len = sizeof(*sin6);
 		}
 
 		if (inet6_sk(sk)->rxopt.all)
-			pingv6_ops.ip6_datagram_recv_ctl(sk, msg, skb);
+			pingv6_ops->ip6_datagram_recv_ctl(sk, msg, skb);
 #endif
 	} else {
 		BUG();
@@ -1093,7 +1105,7 @@ static void ping_v4_format_sock(struct s
 		from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
 		0, sock_i_ino(sp),
 		atomic_read(&sp->sk_refcnt), sp,
-		atomic_read(&sp->sk_drops));
+		atomic_read_unchecked(&sp->sk_drops));
 }
 
 static int ping_v4_seq_show(struct seq_file *seq, void *v)
diff -ruNp linux-3.13.11/net/ipv4/raw.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/raw.c
--- linux-3.13.11/net/ipv4/raw.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/raw.c	2014-07-09 12:00:16.000000000
+0200
@@ -116,7 +116,7 @@ static struct sock *__raw_v4_lookup(stru
 
 		if (net_eq(sock_net(sk), net) && inet->inet_num == num	&&
 		    !(inet->inet_daddr && inet->inet_daddr != raddr) 	&&
-		    !(inet->inet_rcv_saddr && inet->inet_rcv_saddr != laddr) &&
+		    v4_sock_addr_match(sk->sk_nx_info, inet, laddr)	&&
 		    !(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif))
 			goto found; /* gotcha */
 	}
@@ -311,7 +311,7 @@ static int raw_rcv_skb(struct sock *sk,
 int raw_rcv(struct sock *sk, struct sk_buff *skb)
 {
 	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
-		atomic_inc(&sk->sk_drops);
+		atomic_inc_unchecked(&sk->sk_drops);
 		kfree_skb(skb);
 		return NET_RX_DROP;
 	}
@@ -397,6 +397,12 @@ static int raw_send_hdrinc(struct sock *
 		icmp_out_count(net, ((struct icmphdr *)
 			skb_transport_header(skb))->type);
 
+	err = -EPERM;
+	if (!nx_check(0, VS_ADMIN) && !capable(CAP_NET_RAW) &&
+		sk->sk_nx_info &&
+		!v4_addr_in_nx_info(sk->sk_nx_info, iph->saddr, NXA_MASK_BIND))
+		goto error_free;
+
 	err = NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL,
 		      rt->dst.dev, dst_output);
 	if (err > 0)
@@ -585,6 +591,16 @@ static int raw_sendmsg(struct kiocb *ioc
 			goto done;
 	}
 
+	if (sk->sk_nx_info) {
+		rt = ip_v4_find_src(sock_net(sk), sk->sk_nx_info, &fl4);
+		if (IS_ERR(rt)) {
+			err = PTR_ERR(rt);
+			rt = NULL;
+			goto done;
+		}
+		ip_rt_put(rt);
+	}
+
 	security_sk_classify_flow(sk, flowi4_to_flowi(&fl4));
 	rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
 	if (IS_ERR(rt)) {
@@ -661,17 +677,19 @@ static int raw_bind(struct sock *sk, str
 {
 	struct inet_sock *inet = inet_sk(sk);
 	struct sockaddr_in *addr = (struct sockaddr_in *) uaddr;
+	struct nx_v4_sock_addr nsa = { 0 };
 	int ret = -EINVAL;
 	int chk_addr_ret;
 
 	if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_in))
 		goto out;
-	chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr);
+	v4_map_sock_addr(inet, addr, &nsa);
+	chk_addr_ret = inet_addr_type(sock_net(sk), nsa.saddr);
 	ret = -EADDRNOTAVAIL;
-	if (addr->sin_addr.s_addr && chk_addr_ret != RTN_LOCAL &&
+	if (nsa.saddr && chk_addr_ret != RTN_LOCAL &&
 	    chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST)
 		goto out;
-	inet->inet_rcv_saddr = inet->inet_saddr = addr->sin_addr.s_addr;
+	v4_set_sock_addr(inet, &nsa);
 	if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
 		inet->inet_saddr = 0;  /* Use device */
 	sk_dst_reset(sk);
@@ -696,6 +714,9 @@ static int raw_recvmsg(struct kiocb *ioc
 	if (flags & MSG_OOB)
 		goto out;
 
+	if (addr_len)
+		*addr_len = sizeof(*sin);
+
 	if (flags & MSG_ERRQUEUE) {
 		err = ip_recv_error(sk, msg, len, addr_len);
 		goto out;
@@ -720,10 +741,10 @@ static int raw_recvmsg(struct kiocb *ioc
 	/* Copy the address. */
 	if (sin) {
 		sin->sin_family = AF_INET;
-		sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
+		sin->sin_addr.s_addr =
+			nx_map_sock_lback(sk->sk_nx_info, ip_hdr(skb)->saddr);
 		sin->sin_port = 0;
 		memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
-		*addr_len = sizeof(*sin);
 	}
 	if (inet->cmsg_flags)
 		ip_cmsg_recv(msg, skb);
@@ -748,16 +769,20 @@ static int raw_init(struct sock *sk)
 
 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
 {
+	struct icmp_filter filter;
+
 	if (optlen > sizeof(struct icmp_filter))
 		optlen = sizeof(struct icmp_filter);
-	if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
+	if (copy_from_user(&filter, optval, optlen))
 		return -EFAULT;
+	raw_sk(sk)->filter = filter;
 	return 0;
 }
 
 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
 {
 	int len, ret = -EFAULT;
+	struct icmp_filter filter;
 
 	if (get_user(len, optlen))
 		goto out;
@@ -767,8 +792,8 @@ static int raw_geticmpfilter(struct sock
 	if (len > sizeof(struct icmp_filter))
 		len = sizeof(struct icmp_filter);
 	ret = -EFAULT;
-	if (put_user(len, optlen) ||
-	    copy_to_user(optval, &raw_sk(sk)->filter, len))
+	filter = raw_sk(sk)->filter;
+	if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter,
len))
 		goto out;
 	ret = 0;
 out:	return ret;
@@ -916,7 +941,8 @@ static struct sock *raw_get_first(struct
 	for (state->bucket = 0; state->bucket < RAW_HTABLE_SIZE;
 			++state->bucket) {
 		sk_for_each(sk, &state->h->ht[state->bucket])
-			if (sock_net(sk) == seq_file_net(seq))
+			if ((sock_net(sk) == seq_file_net(seq)) &&
+			    nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT))
 				goto found;
 	}
 	sk = NULL;
@@ -932,7 +958,8 @@ static struct sock *raw_get_next(struct
 		sk = sk_next(sk);
 try_again:
 		;
-	} while (sk && sock_net(sk) != seq_file_net(seq));
+	} while (sk && ((sock_net(sk) != seq_file_net(seq)) ||
+		!nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT)));
 
 	if (!sk && ++state->bucket < RAW_HTABLE_SIZE) {
 		sk = sk_head(&state->h->ht[state->bucket]);
@@ -997,7 +1024,7 @@ static void raw_sock_seq_show(struct seq
 		0, 0L, 0,
 		from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
 		0, sock_i_ino(sp),
-		atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
+		atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
 }
 
 static int raw_seq_show(struct seq_file *seq, void *v)
diff -ruNp linux-3.13.11/net/ipv4/route.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/route.c
--- linux-3.13.11/net/ipv4/route.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/route.c	2014-07-09 12:00:16.000000000
+0200
@@ -2063,7 +2063,7 @@ struct rtable *__ip_route_output_key(str
 
 
 	if (fl4->flowi4_oif) {
-		dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif);
+		dev_out = dev_get_by_index_real_rcu(net, fl4->flowi4_oif);
 		rth = ERR_PTR(-ENODEV);
 		if (dev_out == NULL)
 			goto out;
@@ -2621,34 +2621,34 @@ static struct ctl_table ipv4_route_flush
 		.maxlen		= sizeof(int),
 		.mode		= 0200,
 		.proc_handler	= ipv4_sysctl_rtcache_flush,
+		.extra1		= &init_net,
 	},
 	{ },
 };
 
 static __net_init int sysctl_route_net_init(struct net *net)
 {
-	struct ctl_table *tbl;
+	ctl_table_no_const *tbl = NULL;
 
-	tbl = ipv4_route_flush_table;
 	if (!net_eq(net, &init_net)) {
-		tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
+		tbl = kmemdup(ipv4_route_flush_table, sizeof(ipv4_route_flush_table), GFP_KERNEL);
 		if (tbl == NULL)
 			goto err_dup;
 
 		/* Don't export sysctls to unprivileged users */
 		if (net->user_ns != &init_user_ns)
 			tbl[0].procname = NULL;
-	}
-	tbl[0].extra1 = net;
+		tbl[0].extra1 = net;
+		net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
+	} else
+		net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", ipv4_route_flush_table);
 
-	net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
 	if (net->ipv4.route_hdr == NULL)
 		goto err_reg;
 	return 0;
 
 err_reg:
-	if (tbl != ipv4_route_flush_table)
-		kfree(tbl);
+	kfree(tbl);
 err_dup:
 	return -ENOMEM;
 }
@@ -2671,8 +2671,8 @@ static __net_initdata struct pernet_oper
 
 static __net_init int rt_genid_init(struct net *net)
 {
-	atomic_set(&net->ipv4.rt_genid, 0);
-	atomic_set(&net->fnhe_genid, 0);
+	atomic_set_unchecked(&net->ipv4.rt_genid, 0);
+	atomic_set_unchecked(&net->fnhe_genid, 0);
 	get_random_bytes(&net->ipv4.dev_addr_genid,
 			 sizeof(net->ipv4.dev_addr_genid));
 	return 0;
diff -ruNp linux-3.13.11/net/ipv4/sysctl_net_ipv4.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/sysctl_net_ipv4.c
--- linux-3.13.11/net/ipv4/sysctl_net_ipv4.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/sysctl_net_ipv4.c	2014-07-09
12:00:16.000000000 +0200
@@ -60,7 +60,7 @@ static int ipv4_local_port_range(struct
 		container_of(table->data, struct net, ipv4.sysctl_local_ports.range);
 	int ret;
 	int range[2];
-	struct ctl_table tmp = {
+	ctl_table_no_const tmp = {
 		.data = &range,
 		.maxlen = sizeof(range),
 		.mode = table->mode,
@@ -118,7 +118,7 @@ static int ipv4_ping_group_range(struct
 	int ret;
 	gid_t urange[2];
 	kgid_t low, high;
-	struct ctl_table tmp = {
+	ctl_table_no_const tmp = {
 		.data = &urange,
 		.maxlen = sizeof(urange),
 		.mode = table->mode,
@@ -149,7 +149,7 @@ static int proc_tcp_congestion_control(s
 				       void __user *buffer, size_t *lenp, loff_t *ppos)
 {
 	char val[TCP_CA_NAME_MAX];
-	struct ctl_table tbl = {
+	ctl_table_no_const tbl = {
 		.data = val,
 		.maxlen = TCP_CA_NAME_MAX,
 	};
@@ -168,7 +168,7 @@ static int proc_tcp_available_congestion
 						 void __user *buffer, size_t *lenp,
 						 loff_t *ppos)
 {
-	struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, };
+	ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX, };
 	int ret;
 
 	tbl.data = kmalloc(tbl.maxlen, GFP_USER);
@@ -185,7 +185,7 @@ static int proc_allowed_congestion_contr
 					   void __user *buffer, size_t *lenp,
 					   loff_t *ppos)
 {
-	struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX };
+	ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX };
 	int ret;
 
 	tbl.data = kmalloc(tbl.maxlen, GFP_USER);
@@ -204,7 +204,7 @@ static int proc_tcp_fastopen_key(struct
 				 void __user *buffer, size_t *lenp,
 				 loff_t *ppos)
 {
-	struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
+	ctl_table_no_const tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
 	struct tcp_fastopen_context *ctxt;
 	int ret;
 	u32  user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
@@ -445,7 +445,7 @@ static struct ctl_table ipv4_table[] = {
 	},
 	{
 		.procname	= "ip_local_reserved_ports",
-		.data		= NULL, /* initialized in sysctl_ipv4_init */
+		.data		= sysctl_local_reserved_ports,
 		.maxlen		= 65536,
 		.mode		= 0644,
 		.proc_handler	= proc_do_large_bitmap,
@@ -827,13 +827,12 @@ static struct ctl_table ipv4_net_table[]
 
 static __net_init int ipv4_sysctl_init_net(struct net *net)
 {
-	struct ctl_table *table;
+	ctl_table_no_const *table = NULL;
 
-	table = ipv4_net_table;
 	if (!net_eq(net, &init_net)) {
 		int i;
 
-		table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
+		table = kmemdup(ipv4_net_table, sizeof(ipv4_net_table), GFP_KERNEL);
 		if (table == NULL)
 			goto err_alloc;
 
@@ -856,15 +855,17 @@ static __net_init int ipv4_sysctl_init_n
 	net->ipv4.sysctl_local_ports.range[0] =  32768;
 	net->ipv4.sysctl_local_ports.range[1] =  61000;
 
-	net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
+	if (!net_eq(net, &init_net))
+		net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
+	else
+		net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", ipv4_net_table);
 	if (net->ipv4.ipv4_hdr == NULL)
 		goto err_reg;
 
 	return 0;
 
 err_reg:
-	if (!net_eq(net, &init_net))
-		kfree(table);
+	kfree(table);
 err_alloc:
 	return -ENOMEM;
 }
@@ -886,16 +887,6 @@ static __net_initdata struct pernet_oper
 static __init int sysctl_ipv4_init(void)
 {
 	struct ctl_table_header *hdr;
-	struct ctl_table *i;
-
-	for (i = ipv4_table; i->procname; i++) {
-		if (strcmp(i->procname, "ip_local_reserved_ports") == 0) {
-			i->data = sysctl_local_reserved_ports;
-			break;
-		}
-	}
-	if (!i->procname)
-		return -EINVAL;
 
 	hdr = register_net_sysctl(&init_net, "net/ipv4", ipv4_table);
 	if (hdr == NULL)
diff -ruNp linux-3.13.11/net/ipv4/tcp.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/tcp.c
--- linux-3.13.11/net/ipv4/tcp.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/tcp.c	2014-07-09 12:00:16.000000000
+0200
@@ -268,6 +268,7 @@
 #include <linux/crypto.h>
 #include <linux/time.h>
 #include <linux/slab.h>
+#include <linux/in.h>
 
 #include <net/icmp.h>
 #include <net/inet_common.h>
diff -ruNp linux-3.13.11/net/ipv4/tcp_input.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/tcp_input.c
--- linux-3.13.11/net/ipv4/tcp_input.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/tcp_input.c	2014-07-09 12:00:16.000000000
+0200
@@ -759,7 +759,7 @@ static void tcp_update_pacing_rate(struc
 	 * without any lock. We want to make sure compiler wont store
 	 * intermediate values in this location.
 	 */
-	ACCESS_ONCE(sk->sk_pacing_rate) = min_t(u64, rate,
+	ACCESS_ONCE_RW(sk->sk_pacing_rate) = min_t(u64, rate,
 						sk->sk_max_pacing_rate);
 }
 
@@ -4482,7 +4482,7 @@ static struct sk_buff *tcp_collapse_one(
  * simplifies code)
  */
 static void
-tcp_collapse(struct sock *sk, struct sk_buff_head *list,
+__intentional_overflow(5,6) tcp_collapse(struct sock *sk, struct sk_buff_head *list,
 	     struct sk_buff *head, struct sk_buff *tail,
 	     u32 start, u32 end)
 {
@@ -5559,6 +5559,7 @@ discard:
 	    tcp_paws_reject(&tp->rx_opt, 0))
 		goto discard_and_undo;
 
+#ifndef CONFIG_GRKERNSEC_NO_SIMULT_CONNECT
 	if (th->syn) {
 		/* We see SYN without ACK. It is attempt of
 		 * simultaneous connect with crossed SYNs.
@@ -5609,6 +5610,7 @@ discard:
 		goto discard;
 #endif
 	}
+#endif
 	/* "fifth, if neither of the SYN or RST bits is set then
 	 * drop the segment and return."
 	 */
@@ -5655,7 +5657,7 @@ int tcp_rcv_state_process(struct sock *s
 			goto discard;
 
 		if (th->syn) {
-			if (th->fin)
+			if (th->fin || th->urg || th->psh)
 				goto discard;
 			if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
 				return 1;
diff -ruNp linux-3.13.11/net/ipv4/tcp_ipv4.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/tcp_ipv4.c
--- linux-3.13.11/net/ipv4/tcp_ipv4.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/tcp_ipv4.c	2014-07-09 12:00:16.000000000
+0200
@@ -91,6 +91,10 @@ int sysctl_tcp_low_latency __read_mostly
 EXPORT_SYMBOL(sysctl_tcp_low_latency);
 
 
+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
+extern int grsec_enable_blackhole;
+#endif
+
 #ifdef CONFIG_TCP_MD5SIG
 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
 			       __be32 daddr, __be32 saddr, const struct tcphdr *th);
@@ -1830,6 +1834,9 @@ int tcp_v4_do_rcv(struct sock *sk, struc
 	return 0;
 
 reset:
+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
+	if (!grsec_enable_blackhole)
+#endif
 	tcp_v4_send_reset(rsk, skb);
 discard:
 	kfree_skb(skb);
@@ -1975,12 +1982,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
 	TCP_SKB_CB(skb)->sacked	 = 0;
 
 	sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
-	if (!sk)
+	if (!sk) {
+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
+		ret = 1;
+#endif
 		goto no_tcp_socket;
-
+	}
 process:
-	if (sk->sk_state == TCP_TIME_WAIT)
+	if (sk->sk_state == TCP_TIME_WAIT) {
+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
+		ret = 2;
+#endif
 		goto do_time_wait;
+	}
 
 	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
 		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
@@ -2034,6 +2048,10 @@ csum_error:
 bad_packet:
 		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
 	} else {
+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
+		if (!grsec_enable_blackhole || (ret == 1 &&
+		    (skb->dev->flags & IFF_LOOPBACK)))
+#endif
 		tcp_v4_send_reset(NULL, skb);
 	}
 
@@ -2227,6 +2245,12 @@ static void *listening_get_next(struct s
 		req = req->dl_next;
 		while (1) {
 			while (req) {
+				vxdprintk(VXD_CBIT(net, 6),
+					"sk,req: %p [#%d] (from %d)", req->sk,
+					(req->sk)?req->sk->sk_nid:0, nx_current_nid());
+				if (req->sk &&
+					!nx_check(req->sk->sk_nid, VS_WATCH_P | VS_IDENT))
+					continue;
 				if (req->rsk_ops->family == st->family) {
 					cur = req;
 					goto out;
@@ -2251,6 +2275,10 @@ get_req:
 	}
 get_sk:
 	sk_nulls_for_each_from(sk, node) {
+		vxdprintk(VXD_CBIT(net, 6), "sk: %p [#%d] (from %d)",
+			sk, sk->sk_nid, nx_current_nid());
+		if (!nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT))
+			continue;
 		if (!net_eq(sock_net(sk), net))
 			continue;
 		if (sk->sk_family == st->family) {
@@ -2325,6 +2353,11 @@ static void *established_get_first(struc
 
 		spin_lock_bh(lock);
 		sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
+			vxdprintk(VXD_CBIT(net, 6),
+				"sk,egf: %p [#%d] (from %d)",
+				sk, sk->sk_nid, nx_current_nid());
+			if (!nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT))
+				continue;
 			if (sk->sk_family != st->family ||
 			    !net_eq(sock_net(sk), net)) {
 				continue;
@@ -2351,6 +2384,11 @@ static void *established_get_next(struct
 	sk = sk_nulls_next(sk);
 
 	sk_nulls_for_each_from(sk, node) {
+		vxdprintk(VXD_CBIT(net, 6),
+			"sk,egn: %p [#%d] (from %d)",
+			sk, sk->sk_nid, nx_current_nid());
+		if (!nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT))
+			continue;
 		if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
 			return sk;
 	}
@@ -2549,9 +2587,9 @@ static void get_openreq4(const struct so
 	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
 		" %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
 		i,
-		ireq->ir_loc_addr,
+		nx_map_sock_lback(current_nx_info(), ireq->ir_loc_addr),
 		ntohs(inet_sk(sk)->inet_sport),
-		ireq->ir_rmt_addr,
+		nx_map_sock_lback(current_nx_info(), ireq->ir_rmt_addr),
 		ntohs(ireq->ir_rmt_port),
 		TCP_SYN_RECV,
 		0, 0, /* could print option size, but that is af dependent. */
@@ -2573,8 +2611,8 @@ static void get_tcp4_sock(struct sock *s
 	const struct inet_connection_sock *icsk = inet_csk(sk);
 	const struct inet_sock *inet = inet_sk(sk);
 	struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
-	__be32 dest = inet->inet_daddr;
-	__be32 src = inet->inet_rcv_saddr;
+	__be32 dest = nx_map_sock_lback(current_nx_info(), inet->inet_daddr);
+	__be32 src = nx_map_sock_lback(current_nx_info(), inet->inet_rcv_saddr);
 	__u16 destp = ntohs(inet->inet_dport);
 	__u16 srcp = ntohs(inet->inet_sport);
 	int rx_queue;
@@ -2631,8 +2669,8 @@ static void get_timewait4_sock(const str
 	__u16 destp, srcp;
 	s32 delta = tw->tw_ttd - inet_tw_time_stamp();
 
-	dest  = tw->tw_daddr;
-	src   = tw->tw_rcv_saddr;
+	dest  = nx_map_sock_lback(current_nx_info(), tw->tw_daddr);
+	src   = nx_map_sock_lback(current_nx_info(), tw->tw_rcv_saddr);
 	destp = ntohs(tw->tw_dport);
 	srcp  = ntohs(tw->tw_sport);
 
diff -ruNp linux-3.13.11/net/ipv4/tcp_minisocks.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/tcp_minisocks.c
--- linux-3.13.11/net/ipv4/tcp_minisocks.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/tcp_minisocks.c	2014-07-09
12:00:16.000000000 +0200
@@ -23,10 +23,17 @@
 #include <linux/slab.h>
 #include <linux/sysctl.h>
 #include <linux/workqueue.h>
+#include <linux/vs_limit.h>
+#include <linux/vs_socket.h>
+#include <linux/vs_context.h>
 #include <net/tcp.h>
 #include <net/inet_common.h>
 #include <net/xfrm.h>
 
+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
+extern int grsec_enable_blackhole;
+#endif
+
 int sysctl_tcp_syncookies __read_mostly = 1;
 EXPORT_SYMBOL(sysctl_tcp_syncookies);
 
@@ -290,6 +297,11 @@ void tcp_time_wait(struct sock *sk, int
 		tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
 		tcptw->tw_ts_offset	= tp->tsoffset;
 
+		tw->tw_xid		= sk->sk_xid;
+		tw->tw_vx_info		= NULL;
+		tw->tw_nid		= sk->sk_nid;
+		tw->tw_nx_info		= NULL;
+
 #if IS_ENABLED(CONFIG_IPV6)
 		if (tw->tw_family == PF_INET6) {
 			struct ipv6_pinfo *np = inet6_sk(sk);
@@ -708,7 +720,10 @@ embryonic_reset:
 		 * avoid becoming vulnerable to outside attack aiming at
 		 * resetting legit local connections.
 		 */
-		req->rsk_ops->send_reset(sk, skb);
+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
+		if (!grsec_enable_blackhole)
+#endif
+			req->rsk_ops->send_reset(sk, skb);
 	} else if (fastopen) { /* received a valid RST pkt */
 		reqsk_fastopen_remove(sk, req, true);
 		tcp_reset(sk);
diff -ruNp linux-3.13.11/net/ipv4/tcp_probe.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/tcp_probe.c
--- linux-3.13.11/net/ipv4/tcp_probe.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/tcp_probe.c	2014-07-09 12:00:16.000000000
+0200
@@ -238,7 +238,7 @@ static ssize_t tcpprobe_read(struct file
 		if (cnt + width >= len)
 			break;
 
-		if (copy_to_user(buf + cnt, tbuf, width))
+		if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
 			return -EFAULT;
 		cnt += width;
 	}
diff -ruNp linux-3.13.11/net/ipv4/tcp_timer.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/tcp_timer.c
--- linux-3.13.11/net/ipv4/tcp_timer.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/tcp_timer.c	2014-07-09 12:00:16.000000000
+0200
@@ -22,6 +22,10 @@
 #include <linux/gfp.h>
 #include <net/tcp.h>
 
+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
+extern int grsec_lastack_retries;
+#endif
+
 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
@@ -189,6 +193,13 @@ static int tcp_write_timeout(struct sock
 		}
 	}
 
+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
+	if ((sk->sk_state == TCP_LAST_ACK) &&
+	    (grsec_lastack_retries > 0) &&
+	    (grsec_lastack_retries < retry_until))
+		retry_until = grsec_lastack_retries;
+#endif
+
 	if (retransmits_timed_out(sk, retry_until,
 				  syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
 		/* Has it gone just too far? */
diff -ruNp linux-3.13.11/net/ipv4/udp.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/udp.c
--- linux-3.13.11/net/ipv4/udp.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/udp.c	2014-07-09 12:00:16.000000000
+0200
@@ -87,6 +87,7 @@
 #include <linux/types.h>
 #include <linux/fcntl.h>
 #include <linux/module.h>
+#include <linux/security.h>
 #include <linux/socket.h>
 #include <linux/sockios.h>
 #include <linux/igmp.h>
@@ -113,6 +114,10 @@
 #include <net/busy_poll.h>
 #include "udp_impl.h"
 
+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
+extern int grsec_enable_blackhole;
+#endif
+
 struct udp_table udp_table __read_mostly;
 EXPORT_SYMBOL(udp_table);
 
@@ -308,14 +313,7 @@ fail:
 }
 EXPORT_SYMBOL(udp_lib_get_port);
 
-static int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2)
-{
-	struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2);
-
-	return 	(!ipv6_only_sock(sk2)  &&
-		 (!inet1->inet_rcv_saddr || !inet2->inet_rcv_saddr ||
-		   inet1->inet_rcv_saddr == inet2->inet_rcv_saddr));
-}
+extern int ipv4_rcv_saddr_equal(const struct sock *, const struct sock *);
 
 static unsigned int udp4_portaddr_hash(struct net *net, __be32 saddr,
 				       unsigned int port)
@@ -350,6 +348,11 @@ static inline int compute_score(struct s
 			if (inet->inet_rcv_saddr != daddr)
 				return -1;
 			score += 4;
+		} else {
+			/* block non nx_info ips */
+			if (!v4_addr_in_nx_info(sk->sk_nx_info,
+				daddr, NXA_MASK_BIND))
+				return -1;
 		}
 		if (inet->inet_daddr) {
 			if (inet->inet_daddr != saddr)
@@ -472,6 +475,7 @@ begin:
 	return result;
 }
 
+
 /* UDP is nearly always wildcards out the wazoo, it makes no sense to try
  * harder than this. -DaveM
  */
@@ -518,6 +522,11 @@ begin:
 	sk_nulls_for_each_rcu(sk, node, &hslot->head) {
 		score = compute_score(sk, net, saddr, hnum, sport,
 				      daddr, dport, dif);
+		/* FIXME: disabled?
+		if (score == 9) {
+			result = sk;
+			break;
+		} else */
 		if (score > badness) {
 			result = sk;
 			badness = score;
@@ -542,6 +551,7 @@ begin:
 	if (get_nulls_value(node) != slot)
 		goto begin;
 
+
 	if (result) {
 		if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
 			result = NULL;
@@ -551,6 +561,7 @@ begin:
 			goto begin;
 		}
 	}
+
 	rcu_read_unlock();
 	return result;
 }
@@ -585,7 +596,7 @@ static inline bool __udp_is_mcast_sock(s
 	    udp_sk(sk)->udp_port_hash != hnum ||
 	    (inet->inet_daddr && inet->inet_daddr != rmt_addr) ||
 	    (inet->inet_dport != rmt_port && inet->inet_dport) ||
-	    (inet->inet_rcv_saddr && inet->inet_rcv_saddr != loc_addr) ||
+	    !v4_sock_addr_match(sk->sk_nx_info, inet, loc_addr)	||
 	    ipv6_only_sock(sk) ||
 	    (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif))
 		return false;
@@ -615,6 +626,9 @@ found:
 	return s;
 }
 
+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
+
 /*
  * This routine is called by the ICMP module when it gets some
  * sort of error condition.  If err < 0 then the socket should
@@ -914,9 +928,18 @@ int udp_sendmsg(struct kiocb *iocb, stru
 		dport = usin->sin_port;
 		if (dport == 0)
 			return -EINVAL;
+
+		err = gr_search_udp_sendmsg(sk, usin);
+		if (err)
+			return err;
 	} else {
 		if (sk->sk_state != TCP_ESTABLISHED)
 			return -EDESTADDRREQ;
+
+		err = gr_search_udp_sendmsg(sk, NULL);
+		if (err)
+			return err;
+
 		daddr = inet->inet_daddr;
 		dport = inet->inet_dport;
 		/* Open fast path for connected socket.
@@ -989,6 +1012,16 @@ int udp_sendmsg(struct kiocb *iocb, stru
 				   inet_sk_flowi_flags(sk)|FLOWI_FLAG_CAN_SLEEP,
 				   faddr, saddr, dport, inet->inet_sport);
 
+		if (sk->sk_nx_info) {
+			rt = ip_v4_find_src(net, sk->sk_nx_info, fl4);
+			if (IS_ERR(rt)) {
+				err = PTR_ERR(rt);
+				rt = NULL;
+				goto out;
+			}
+			ip_rt_put(rt);
+		}
+
 		security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
 		rt = ip_route_output_flow(net, fl4, sk);
 		if (IS_ERR(rt)) {
@@ -1163,7 +1196,7 @@ static unsigned int first_packet_length(
 				 IS_UDPLITE(sk));
 		UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
 				 IS_UDPLITE(sk));
-		atomic_inc(&sk->sk_drops);
+		atomic_inc_unchecked(&sk->sk_drops);
 		__skb_unlink(skb, rcvq);
 		__skb_queue_tail(&list_kill, skb);
 	}
@@ -1234,6 +1267,12 @@ int udp_recvmsg(struct kiocb *iocb, stru
 	int is_udplite = IS_UDPLITE(sk);
 	bool slow;
 
+	/*
+	 *	Check any passed addresses
+	 */
+	if (addr_len)
+		*addr_len = sizeof(*sin);
+
 	if (flags & MSG_ERRQUEUE)
 		return ip_recv_error(sk, msg, len, addr_len);
 
@@ -1243,6 +1282,10 @@ try_again:
 	if (!skb)
 		goto out;
 
+	err = gr_search_udp_recvmsg(sk, skb);
+	if (err)
+		goto out_free;
+
 	ulen = skb->len - sizeof(struct udphdr);
 	copied = len;
 	if (copied > ulen)
@@ -1276,7 +1319,7 @@ try_again:
 	if (unlikely(err)) {
 		trace_kfree_skb(skb, udp_recvmsg);
 		if (!peeked) {
-			atomic_inc(&sk->sk_drops);
+			atomic_inc_unchecked(&sk->sk_drops);
 			UDP_INC_STATS_USER(sock_net(sk),
 					   UDP_MIB_INERRORS, is_udplite);
 		}
@@ -1293,9 +1336,9 @@ try_again:
 	if (sin) {
 		sin->sin_family = AF_INET;
 		sin->sin_port = udp_hdr(skb)->source;
-		sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
+		sin->sin_addr.s_addr = nx_map_sock_lback(
+			skb->sk->sk_nx_info, ip_hdr(skb)->saddr);
 		memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
-		*addr_len = sizeof(*sin);
 	}
 	if (inet->cmsg_flags)
 		ip_cmsg_recv(msg, skb);
@@ -1566,7 +1609,7 @@ csum_error:
 	UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
 drop:
 	UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
-	atomic_inc(&sk->sk_drops);
+	atomic_inc_unchecked(&sk->sk_drops);
 	kfree_skb(skb);
 	return -1;
 }
@@ -1585,7 +1628,7 @@ static void flush_stack(struct sock **st
 			skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
 
 		if (!skb1) {
-			atomic_inc(&sk->sk_drops);
+			atomic_inc_unchecked(&sk->sk_drops);
 			UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
 					 IS_UDPLITE(sk));
 			UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
@@ -1786,6 +1829,9 @@ int __udp4_lib_rcv(struct sk_buff *skb,
 		goto csum_error;
 
 	UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
+	if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
+#endif
 	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
 
 	/*
@@ -2223,6 +2269,8 @@ static struct sock *udp_get_first(struct
 		sk_nulls_for_each(sk, node, &hslot->head) {
 			if (!net_eq(sock_net(sk), net))
 				continue;
+			if (!nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT))
+				continue;
 			if (sk->sk_family == state->family)
 				goto found;
 		}
@@ -2240,7 +2288,9 @@ static struct sock *udp_get_next(struct
 
 	do {
 		sk = sk_nulls_next(sk);
-	} while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != state->family));
+	} while (sk && (!net_eq(sock_net(sk), net) ||
+		sk->sk_family != state->family ||
+		!nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT)));
 
 	if (!sk) {
 		if (state->bucket <= state->udp_table->mask)
@@ -2336,8 +2386,8 @@ static void udp4_format_sock(struct sock
 		int bucket)
 {
 	struct inet_sock *inet = inet_sk(sp);
-	__be32 dest = inet->inet_daddr;
-	__be32 src  = inet->inet_rcv_saddr;
+	__be32 dest = nx_map_sock_lback(current_nx_info(), inet->inet_daddr);
+	__be32 src = nx_map_sock_lback(current_nx_info(), inet->inet_rcv_saddr);
 	__u16 destp	  = ntohs(inet->inet_dport);
 	__u16 srcp	  = ntohs(inet->inet_sport);
 
@@ -2350,7 +2400,7 @@ static void udp4_format_sock(struct sock
 		from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
 		0, sock_i_ino(sp),
 		atomic_read(&sp->sk_refcnt), sp,
-		atomic_read(&sp->sk_drops));
+		atomic_read_unchecked(&sp->sk_drops));
 }
 
 int udp4_seq_show(struct seq_file *seq, void *v)
diff -ruNp linux-3.13.11/net/ipv4/xfrm4_policy.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/xfrm4_policy.c
--- linux-3.13.11/net/ipv4/xfrm4_policy.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv4/xfrm4_policy.c	2014-07-09
12:00:16.000000000 +0200
@@ -186,11 +186,11 @@ _decode_session4(struct sk_buff *skb, st
 	fl4->flowi4_tos = iph->tos;
 }
 
-static inline int xfrm4_garbage_collect(struct dst_ops *ops)
+static int xfrm4_garbage_collect(struct dst_ops *ops)
 {
 	struct net *net = container_of(ops, struct net, xfrm.xfrm4_dst_ops);
 
-	xfrm4_policy_afinfo.garbage_collect(net);
+	xfrm_garbage_collect_deferred(net);
 	return (dst_entries_get_slow(ops) > ops->gc_thresh * 2);
 }
 
@@ -269,19 +269,18 @@ static struct ctl_table xfrm4_policy_tab
 
 static int __net_init xfrm4_net_init(struct net *net)
 {
-	struct ctl_table *table;
+	ctl_table_no_const *table = NULL;
 	struct ctl_table_header *hdr;
 
-	table = xfrm4_policy_table;
 	if (!net_eq(net, &init_net)) {
-		table = kmemdup(table, sizeof(xfrm4_policy_table), GFP_KERNEL);
+		table = kmemdup(xfrm4_policy_table, sizeof(xfrm4_policy_table), GFP_KERNEL);
 		if (!table)
 			goto err_alloc;
 
 		table[0].data = &net->xfrm.xfrm4_dst_ops.gc_thresh;
-	}
-
-	hdr = register_net_sysctl(net, "net/ipv4", table);
+		hdr = register_net_sysctl(net, "net/ipv4", table);
+	} else
+		hdr = register_net_sysctl(net, "net/ipv4", xfrm4_policy_table);
 	if (!hdr)
 		goto err_reg;
 
@@ -289,8 +288,7 @@ static int __net_init xfrm4_net_init(str
 	return 0;
 
 err_reg:
-	if (!net_eq(net, &init_net))
-		kfree(table);
+	kfree(table);
 err_alloc:
 	return -ENOMEM;
 }
diff -ruNp linux-3.13.11/net/ipv6/Kconfig linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv6/Kconfig
--- linux-3.13.11/net/ipv6/Kconfig	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv6/Kconfig	2014-07-09 12:00:16.000000000
+0200
@@ -4,8 +4,8 @@
 
 #   IPv6 as module will cause a CRASH if you try to unload it
 menuconfig IPV6
-	tristate "The IPv6 protocol"
-	default m
+	bool "The IPv6 protocol"
+	default n
 	---help---
 	  This is complemental support for the IP version 6.
 	  You will still be able to do traditional IPv4 networking as well.
diff -ruNp linux-3.13.11/net/ipv6/addrconf.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv6/addrconf.c
--- linux-3.13.11/net/ipv6/addrconf.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv6/addrconf.c	2014-07-09 12:00:16.000000000
+0200
@@ -90,6 +90,8 @@
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
 #include <linux/export.h>
+#include <linux/vs_network.h>
+#include <linux/vs_inet6.h>
 
 /* Set to 3 to get tracing... */
 #define ACONF_DEBUG 2
@@ -589,7 +591,7 @@ static int inet6_netconf_dump_devconf(st
 		idx = 0;
 		head = &net->dev_index_head[h];
 		rcu_read_lock();
-		cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
+		cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^
 			  net->dev_base_seq;
 		hlist_for_each_entry_rcu(dev, head, index_hlist) {
 			if (idx < s_idx)
@@ -1284,7 +1286,7 @@ out:
 
 int ipv6_dev_get_saddr(struct net *net, const struct net_device *dst_dev,
 		       const struct in6_addr *daddr, unsigned int prefs,
-		       struct in6_addr *saddr)
+		       struct in6_addr *saddr, struct nx_info *nxi)
 {
 	struct ipv6_saddr_score scores[2],
 				*score = &scores[0], *hiscore = &scores[1];
@@ -1356,6 +1358,8 @@ int ipv6_dev_get_saddr(struct net *net,
 					       dev->name);
 				continue;
 			}
+			if (!v6_addr_in_nx_info(nxi, &score->ifa->addr, -1))
+				continue;
 
 			score->rule = -1;
 			bitmap_zero(score->scorebits, IPV6_SADDR_RULE_MAX);
@@ -2337,7 +2341,7 @@ int addrconf_set_dstaddr(struct net *net
 		p.iph.ihl = 5;
 		p.iph.protocol = IPPROTO_IPV6;
 		p.iph.ttl = 64;
-		ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
+		ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
 
 		if (ops->ndo_do_ioctl) {
 			mm_segment_t oldfs = get_fs();
@@ -3371,7 +3375,10 @@ static void if6_seq_stop(struct seq_file
 static int if6_seq_show(struct seq_file *seq, void *v)
 {
 	struct inet6_ifaddr *ifp = (struct inet6_ifaddr *)v;
-	seq_printf(seq, "%pi6 %02x %02x %02x %02x %8s\n",
+
+	if (nx_check(0, VS_ADMIN|VS_WATCH) ||
+	    v6_addr_in_nx_info(current_nx_info(), &ifp->addr, -1))
+		seq_printf(seq, "%pi6 %02x %02x %02x %02x %8s\n",
 		   &ifp->addr,
 		   ifp->idev->dev->ifindex,
 		   ifp->prefix_len,
@@ -3892,6 +3899,11 @@ static int in6_dump_addrs(struct inet6_d
 	struct ifacaddr6 *ifaca;
 	int err = 1;
 	int ip_idx = *p_ip_idx;
+	struct nx_info *nxi = skb->sk ? skb->sk->sk_nx_info : NULL;
+
+	/* disable ipv6 on non v6 guests */
+	if (nxi && !nx_info_has_v6(nxi))
+		return skb->len;
 
 	read_lock_bh(&idev->lock);
 	switch (type) {
@@ -3902,6 +3914,8 @@ static int in6_dump_addrs(struct inet6_d
 		list_for_each_entry(ifa, &idev->addr_list, if_list) {
 			if (++ip_idx < s_ip_idx)
 				continue;
+				if (!v6_addr_in_nx_info(nxi, &ifa->addr, -1))
+					continue;
 			err = inet6_fill_ifaddr(skb, ifa,
 						NETLINK_CB(cb->skb).portid,
 						cb->nlh->nlmsg_seq,
@@ -3919,6 +3933,8 @@ static int in6_dump_addrs(struct inet6_d
 		     ifmca = ifmca->next, ip_idx++) {
 			if (ip_idx < s_ip_idx)
 				continue;
+				if (!v6_addr_in_nx_info(nxi, &ifmca->mca_addr, -1))
+					continue;
 			err = inet6_fill_ifmcaddr(skb, ifmca,
 						  NETLINK_CB(cb->skb).portid,
 						  cb->nlh->nlmsg_seq,
@@ -3934,6 +3950,8 @@ static int in6_dump_addrs(struct inet6_d
 		     ifaca = ifaca->aca_next, ip_idx++) {
 			if (ip_idx < s_ip_idx)
 				continue;
+				if (!v6_addr_in_nx_info(nxi, &ifaca->aca_addr, -1))
+					continue;
 			err = inet6_fill_ifacaddr(skb, ifaca,
 						  NETLINK_CB(cb->skb).portid,
 						  cb->nlh->nlmsg_seq,
@@ -3962,12 +3980,16 @@ static int inet6_dump_addr(struct sk_buf
 	struct inet6_dev *idev;
 	struct hlist_head *head;
 
+	/* FIXME: maybe disable ipv6 on non v6 guests?
+	if (skb->sk && skb->sk->sk_vx_info)
+		return skb->len; */
+
 	s_h = cb->args[0];
 	s_idx = idx = cb->args[1];
 	s_ip_idx = ip_idx = cb->args[2];
 
 	rcu_read_lock();
-	cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
+	cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
 	for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
 		idx = 0;
 		head = &net->dev_index_head[h];
@@ -4404,6 +4426,7 @@ static int inet6_dump_ifinfo(struct sk_b
 	struct net_device *dev;
 	struct inet6_dev *idev;
 	struct hlist_head *head;
+	struct nx_info *nxi = skb->sk ? skb->sk->sk_nx_info : NULL;
 
 	s_h = cb->args[0];
 	s_idx = cb->args[1];
@@ -4415,6 +4438,8 @@ static int inet6_dump_ifinfo(struct sk_b
 		hlist_for_each_entry_rcu(dev, head, index_hlist) {
 			if (idx < s_idx)
 				goto cont;
+			if (!v6_dev_in_nx_info(dev, nxi))
+				goto cont;
 			idev = __in6_dev_get(dev);
 			if (!idev)
 				goto cont;
@@ -4574,7 +4599,7 @@ static void __ipv6_ifa_notify(int event,
 			dst_free(&ifp->rt->dst);
 		break;
 	}
-	atomic_inc(&net->ipv6.dev_addr_genid);
+	atomic_inc_unchecked(&net->ipv6.dev_addr_genid);
 	rt_genid_bump_ipv6(net);
 }
 
@@ -4595,7 +4620,7 @@ int addrconf_sysctl_forward(struct ctl_t
 	int *valp = ctl->data;
 	int val = *valp;
 	loff_t pos = *ppos;
-	struct ctl_table lctl;
+	ctl_table_no_const lctl;
 	int ret;
 
 	/*
@@ -4680,7 +4705,7 @@ int addrconf_sysctl_disable(struct ctl_t
 	int *valp = ctl->data;
 	int val = *valp;
 	loff_t pos = *ppos;
-	struct ctl_table lctl;
+	ctl_table_no_const lctl;
 	int ret;
 
 	/*
diff -ruNp linux-3.13.11/net/ipv6/af_inet6.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv6/af_inet6.c
--- linux-3.13.11/net/ipv6/af_inet6.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv6/af_inet6.c	2014-07-09 12:00:16.000000000
+0200
@@ -43,6 +43,8 @@
 #include <linux/netdevice.h>
 #include <linux/icmpv6.h>
 #include <linux/netfilter_ipv6.h>
+#include <linux/vs_inet.h>
+#include <linux/vs_inet6.h>
 
 #include <net/ip.h>
 #include <net/ipv6.h>
@@ -156,10 +158,13 @@ lookup_protocol:
 	}
 
 	err = -EPERM;
+	if ((protocol == IPPROTO_ICMPV6) &&
+		nx_capable(CAP_NET_RAW, NXC_RAW_ICMP))
+		goto override;
 	if (sock->type == SOCK_RAW && !kern &&
 	    !ns_capable(net->user_ns, CAP_NET_RAW))
 		goto out_rcu_unlock;
-
+override:
 	sock->ops = answer->ops;
 	answer_prot = answer->prot;
 	answer_no_check = answer->no_check;
@@ -259,6 +264,7 @@ int inet6_bind(struct socket *sock, stru
 	struct inet_sock *inet = inet_sk(sk);
 	struct ipv6_pinfo *np = inet6_sk(sk);
 	struct net *net = sock_net(sk);
+	struct nx_v6_sock_addr nsa;
 	__be32 v4addr = 0;
 	unsigned short snum;
 	int addr_type = 0;
@@ -274,6 +280,10 @@ int inet6_bind(struct socket *sock, stru
 	if (addr->sin6_family != AF_INET6)
 		return -EAFNOSUPPORT;
 
+	err = v6_map_sock_addr(inet, addr, &nsa);
+	if (err)
+		return err;
+
 	addr_type = ipv6_addr_type(&addr->sin6_addr);
 	if ((addr_type & IPV6_ADDR_MULTICAST) && sock->type == SOCK_STREAM)
 		return -EINVAL;
@@ -305,6 +315,7 @@ int inet6_bind(struct socket *sock, stru
 		/* Reproduce AF_INET checks to make the bindings consistent */
 		v4addr = addr->sin6_addr.s6_addr32[3];
 		chk_addr_ret = inet_addr_type(net, v4addr);
+
 		if (!sysctl_ip_nonlocal_bind &&
 		    !(inet->freebind || inet->transparent) &&
 		    v4addr != htonl(INADDR_ANY) &&
@@ -314,6 +325,10 @@ int inet6_bind(struct socket *sock, stru
 			err = -EADDRNOTAVAIL;
 			goto out;
 		}
+		if (!v4_addr_in_nx_info(sk->sk_nx_info, v4addr, NXA_MASK_BIND)) {
+			err = -EADDRNOTAVAIL;
+			goto out;
+		}
 	} else {
 		if (addr_type != IPV6_ADDR_ANY) {
 			struct net_device *dev = NULL;
@@ -340,6 +355,11 @@ int inet6_bind(struct socket *sock, stru
 				}
 			}
 
+			if (!v6_addr_in_nx_info(sk->sk_nx_info, &addr->sin6_addr, -1)) {
+				err = -EADDRNOTAVAIL;
+				goto out_unlock;
+			}
+
 			/* ipv4 addr of the socket is invalid.  Only the
 			 * unspecified and mapped address have a v4 equivalent.
 			 */
@@ -356,6 +376,9 @@ int inet6_bind(struct socket *sock, stru
 		}
 	}
 
+	/* what's that for? */
+	v6_set_sock_addr(inet, &nsa);
+
 	inet->inet_rcv_saddr = v4addr;
 	inet->inet_saddr = v4addr;
 
@@ -457,9 +480,11 @@ int inet6_getname(struct socket *sock, s
 			return -ENOTCONN;
 		sin->sin6_port = inet->inet_dport;
 		sin->sin6_addr = sk->sk_v6_daddr;
+		/* FIXME: remap lback? */
 		if (np->sndflow)
 			sin->sin6_flowinfo = np->flow_label;
 	} else {
+		/* FIXME: remap lback? */
 		if (ipv6_addr_any(&sk->sk_v6_rcv_saddr))
 			sin->sin6_addr = np->saddr;
 		else
@@ -776,7 +801,7 @@ static int __net_init inet6_net_init(str
 
 	net->ipv6.sysctl.bindv6only = 0;
 	net->ipv6.sysctl.icmpv6_time = 1*HZ;
-	atomic_set(&net->ipv6.rt_genid, 0);
+	atomic_set_unchecked(&net->ipv6.rt_genid, 0);
 
 	err = ipv6_init_mibs(net);
 	if (err)
diff -ruNp linux-3.13.11/net/ipv6/datagram.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv6/datagram.c
--- linux-3.13.11/net/ipv6/datagram.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv6/datagram.c	2014-07-09 12:00:16.000000000
+0200
@@ -655,7 +655,7 @@ int ip6_datagram_send_ctl(struct net *ne
 
 			rcu_read_lock();
 			if (fl6->flowi6_oif) {
-				dev = dev_get_by_index_rcu(net, fl6->flowi6_oif);
+				dev = dev_get_by_index_real_rcu(net, fl6->flowi6_oif);
 				if (!dev) {
 					rcu_read_unlock();
 					return -ENODEV;
@@ -906,5 +906,5 @@ void ip6_dgram_sock_seq_show(struct seq_
 		   0,
 		   sock_i_ino(sp),
 		   atomic_read(&sp->sk_refcnt), sp,
-		   atomic_read(&sp->sk_drops));
+		   atomic_read_unchecked(&sp->sk_drops));
 }
diff -ruNp linux-3.13.11/net/ipv6/fib6_rules.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv6/fib6_rules.c
--- linux-3.13.11/net/ipv6/fib6_rules.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv6/fib6_rules.c	2014-07-09
12:00:16.000000000 +0200
@@ -97,7 +97,7 @@ static int fib6_rule_action(struct fib_r
 					       ip6_dst_idev(&rt->dst)->dev,
 					       &flp6->daddr,
 					       rt6_flags2srcprefs(flags),
-					       &saddr))
+					       &saddr, NULL))
 				goto again;
 			if (!ipv6_prefix_equal(&saddr, &r->src.addr,
 					       r->src.plen))
diff -ruNp linux-3.13.11/net/ipv6/icmp.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv6/icmp.c
--- linux-3.13.11/net/ipv6/icmp.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv6/icmp.c	2014-07-09 12:00:16.000000000
+0200
@@ -997,7 +997,7 @@ struct ctl_table ipv6_icmp_table_templat
 
 struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
 {
-	struct ctl_table *table;
+	ctl_table_no_const *table;
 
 	table = kmemdup(ipv6_icmp_table_template,
 			sizeof(ipv6_icmp_table_template),
diff -ruNp linux-3.13.11/net/ipv6/inet6_hashtables.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv6/inet6_hashtables.c
--- linux-3.13.11/net/ipv6/inet6_hashtables.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv6/inet6_hashtables.c	2014-07-09
12:00:16.000000000 +0200
@@ -16,6 +16,7 @@
 
 #include <linux/module.h>
 #include <linux/random.h>
+#include <linux/vs_inet6.h>
 
 #include <net/inet_connection_sock.h>
 #include <net/inet_hashtables.h>
@@ -116,7 +117,6 @@ struct sock *__inet6_lookup_established(
 	unsigned int slot = hash & hashinfo->ehash_mask;
 	struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
 
-
 	rcu_read_lock();
 begin:
 	sk_nulls_for_each_rcu(sk, node, &head->chain) {
@@ -158,6 +158,9 @@ static inline int compute_score(struct s
 			if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
 				return -1;
 			score++;
+		} else {
+			if (!v6_addr_in_nx_info(sk->sk_nx_info, daddr, -1))
+				return -1;
 		}
 		if (sk->sk_bound_dev_if) {
 			if (sk->sk_bound_dev_if != dif)
diff -ruNp linux-3.13.11/net/ipv6/ip6_fib.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv6/ip6_fib.c
--- linux-3.13.11/net/ipv6/ip6_fib.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv6/ip6_fib.c	2014-07-09 12:00:16.000000000
+0200
@@ -1779,6 +1779,7 @@ static int ipv6_route_seq_show(struct se
 	struct rt6_info *rt = v;
 	struct ipv6_route_iter *iter = seq->private;
 
+	/* FIXME: check for network context? */
 	seq_printf(seq, "%pi6 %02x ", &rt->rt6i_dst.addr, rt->rt6i_dst.plen);
 
 #ifdef CONFIG_IPV6_SUBTREES
diff -ruNp linux-3.13.11/net/ipv6/ip6_gre.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv6/ip6_gre.c
--- linux-3.13.11/net/ipv6/ip6_gre.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv6/ip6_gre.c	2014-07-09 12:00:16.000000000
+0200
@@ -74,7 +74,7 @@ struct ip6gre_net {
 	struct net_device *fb_tunnel_dev;
 };
 
-static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
+static struct rtnl_link_ops ip6gre_link_ops;
 static int ip6gre_tunnel_init(struct net_device *dev);
 static void ip6gre_tunnel_setup(struct net_device *dev);
 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
@@ -1294,7 +1294,7 @@ static void ip6gre_fb_tunnel_init(struct
 }
 
 
-static struct inet6_protocol ip6gre_protocol __read_mostly = {
+static struct inet6_protocol ip6gre_protocol = {
 	.handler     = ip6gre_rcv,
 	.err_handler = ip6gre_err,
 	.flags       = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
@@ -1637,7 +1637,7 @@ static const struct nla_policy ip6gre_po
 	[IFLA_GRE_FLAGS]       = { .type = NLA_U32 },
 };
 
-static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
+static struct rtnl_link_ops ip6gre_link_ops = {
 	.kind		= "ip6gre",
 	.maxtype	= IFLA_GRE_MAX,
 	.policy		= ip6gre_policy,
@@ -1650,7 +1650,7 @@ static struct rtnl_link_ops ip6gre_link_
 	.fill_info	= ip6gre_fill_info,
 };
 
-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
+static struct rtnl_link_ops ip6gre_tap_ops = {
 	.kind		= "ip6gretap",
 	.maxtype	= IFLA_GRE_MAX,
 	.policy		= ip6gre_policy,
diff -ruNp linux-3.13.11/net/ipv6/ip6_output.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv6/ip6_output.c
--- linux-3.13.11/net/ipv6/ip6_output.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv6/ip6_output.c	2014-07-09
12:00:16.000000000 +0200
@@ -872,7 +872,8 @@ static int ip6_dst_lookup_tail(struct so
 		struct rt6_info *rt = (struct rt6_info *) *dst;
 		err = ip6_route_get_saddr(net, rt, &fl6->daddr,
 					  sk ? inet6_sk(sk)->srcprefs : 0,
-					  &fl6->saddr);
+					  &fl6->saddr,
+					  sk ? sk->sk_nx_info : NULL);
 		if (err)
 			goto out_err_release;
 	}
diff -ruNp linux-3.13.11/net/ipv6/ip6_tunnel.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv6/ip6_tunnel.c
--- linux-3.13.11/net/ipv6/ip6_tunnel.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv6/ip6_tunnel.c	2014-07-09
12:00:16.000000000 +0200
@@ -89,7 +89,7 @@ static u32 HASH(const struct in6_addr *a
 
 static int ip6_tnl_dev_init(struct net_device *dev);
 static void ip6_tnl_dev_setup(struct net_device *dev);
-static struct rtnl_link_ops ip6_link_ops __read_mostly;
+static struct rtnl_link_ops ip6_link_ops;
 
 static int ip6_tnl_net_id __read_mostly;
 struct ip6_tnl_net {
@@ -1717,7 +1717,7 @@ static const struct nla_policy ip6_tnl_p
 	[IFLA_IPTUN_PROTO]		= { .type = NLA_U8 },
 };
 
-static struct rtnl_link_ops ip6_link_ops __read_mostly = {
+static struct rtnl_link_ops ip6_link_ops = {
 	.kind		= "ip6tnl",
 	.maxtype	= IFLA_IPTUN_MAX,
 	.policy		= ip6_tnl_policy,
diff -ruNp linux-3.13.11/net/ipv6/ip6_vti.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv6/ip6_vti.c
--- linux-3.13.11/net/ipv6/ip6_vti.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv6/ip6_vti.c	2014-07-09 12:00:16.000000000
+0200
@@ -63,7 +63,7 @@ static u32 HASH(const struct in6_addr *a
 
 static int vti6_dev_init(struct net_device *dev);
 static void vti6_dev_setup(struct net_device *dev);
-static struct rtnl_link_ops vti6_link_ops __read_mostly;
+static struct rtnl_link_ops vti6_link_ops;
 
 static int vti6_net_id __read_mostly;
 struct vti6_net {
@@ -902,7 +902,7 @@ static const struct nla_policy vti6_poli
 	[IFLA_VTI_OKEY]		= { .type = NLA_U32 },
 };
 
-static struct rtnl_link_ops vti6_link_ops __read_mostly = {
+static struct rtnl_link_ops vti6_link_ops = {
 	.kind		= "vti6",
 	.maxtype	= IFLA_VTI_MAX,
 	.policy		= vti6_policy,
diff -ruNp linux-3.13.11/net/ipv6/ipv6_sockglue.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv6/ipv6_sockglue.c
--- linux-3.13.11/net/ipv6/ipv6_sockglue.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv6/ipv6_sockglue.c	2014-07-09
12:00:16.000000000 +0200
@@ -991,7 +991,7 @@ static int do_ipv6_getsockopt(struct soc
 		if (sk->sk_type != SOCK_STREAM)
 			return -ENOPROTOOPT;
 
-		msg.msg_control = optval;
+		msg.msg_control = (void __force_kernel *)optval;
 		msg.msg_controllen = len;
 		msg.msg_flags = flags;
 
diff -ruNp linux-3.13.11/net/ipv6/ndisc.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv6/ndisc.c
--- linux-3.13.11/net/ipv6/ndisc.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv6/ndisc.c	2014-07-09 12:00:16.000000000
+0200
@@ -486,7 +486,7 @@ void ndisc_send_na(struct net_device *de
 	} else {
 		if (ipv6_dev_get_saddr(dev_net(dev), dev, daddr,
 				       inet6_sk(dev_net(dev)->ipv6.ndisc_sk)->srcprefs,
-				       &tmpaddr))
+				       &tmpaddr, NULL))
 			return;
 		src_addr = &tmpaddr;
 	}
diff -ruNp linux-3.13.11/net/ipv6/netfilter/ip6_tables.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv6/netfilter/ip6_tables.c
--- linux-3.13.11/net/ipv6/netfilter/ip6_tables.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv6/netfilter/ip6_tables.c	2014-07-09
12:00:16.000000000 +0200
@@ -1083,14 +1083,14 @@ static int compat_table_info(const struc
 #endif
 
 static int get_info(struct net *net, void __user *user,
-                    const int *len, int compat)
+                    int len, int compat)
 {
 	char name[XT_TABLE_MAXNAMELEN];
 	struct xt_table *t;
 	int ret;
 
-	if (*len != sizeof(struct ip6t_getinfo)) {
-		duprintf("length %u != %zu\n", *len,
+	if (len != sizeof(struct ip6t_getinfo)) {
+		duprintf("length %u != %zu\n", len,
 			 sizeof(struct ip6t_getinfo));
 		return -EINVAL;
 	}
@@ -1127,7 +1127,7 @@ static int get_info(struct net *net, voi
 		info.size = private->size;
 		strcpy(info.name, name);
 
-		if (copy_to_user(user, &info, *len) != 0)
+		if (copy_to_user(user, &info, len) != 0)
 			ret = -EFAULT;
 		else
 			ret = 0;
@@ -1981,7 +1981,7 @@ compat_do_ip6t_get_ctl(struct sock *sk,
 
 	switch (cmd) {
 	case IP6T_SO_GET_INFO:
-		ret = get_info(sock_net(sk), user, len, 1);
+		ret = get_info(sock_net(sk), user, *len, 1);
 		break;
 	case IP6T_SO_GET_ENTRIES:
 		ret = compat_get_entries(sock_net(sk), user, len);
@@ -2028,7 +2028,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd
 
 	switch (cmd) {
 	case IP6T_SO_GET_INFO:
-		ret = get_info(sock_net(sk), user, len, 0);
+		ret = get_info(sock_net(sk), user, *len, 0);
 		break;
 
 	case IP6T_SO_GET_ENTRIES:
diff -ruNp linux-3.13.11/net/ipv6/netfilter/ip6t_MASQUERADE.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv6/netfilter/ip6t_MASQUERADE.c
--- linux-3.13.11/net/ipv6/netfilter/ip6t_MASQUERADE.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv6/netfilter/ip6t_MASQUERADE.c	2014-07-09
12:00:16.000000000 +0200
@@ -34,7 +34,7 @@ masquerade_tg6(struct sk_buff *skb, cons
 			    ctinfo == IP_CT_RELATED_REPLY));
 
 	if (ipv6_dev_get_saddr(dev_net(par->out), par->out,
-			       &ipv6_hdr(skb)->daddr, 0, &src) < 0)
+			       &ipv6_hdr(skb)->daddr, 0, &src, NULL) < 0)
 		return NF_DROP;
 
 	nfct_nat(ct)->masq_index = par->out->ifindex;
diff -ruNp linux-3.13.11/net/ipv6/netfilter/nf_conntrack_reasm.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv6/netfilter/nf_conntrack_reasm.c
--- linux-3.13.11/net/ipv6/netfilter/nf_conntrack_reasm.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv6/netfilter/nf_conntrack_reasm.c	2014-07-09
12:00:16.000000000 +0200
@@ -90,12 +90,11 @@ static struct ctl_table nf_ct_frag6_sysc
 
 static int nf_ct_frag6_sysctl_register(struct net *net)
 {
-	struct ctl_table *table;
+	ctl_table_no_const *table = NULL;
 	struct ctl_table_header *hdr;
 
-	table = nf_ct_frag6_sysctl_table;
 	if (!net_eq(net, &init_net)) {
-		table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table),
+		table = kmemdup(nf_ct_frag6_sysctl_table, sizeof(nf_ct_frag6_sysctl_table),
 				GFP_KERNEL);
 		if (table == NULL)
 			goto err_alloc;
@@ -103,9 +102,9 @@ static int nf_ct_frag6_sysctl_register(s
 		table[0].data = &net->nf_frag.frags.timeout;
 		table[1].data = &net->nf_frag.frags.low_thresh;
 		table[2].data = &net->nf_frag.frags.high_thresh;
-	}
-
-	hdr = register_net_sysctl(net, "net/netfilter", table);
+		hdr = register_net_sysctl(net, "net/netfilter", table);
+	} else
+		hdr = register_net_sysctl(net, "net/netfilter", nf_ct_frag6_sysctl_table);
 	if (hdr == NULL)
 		goto err_reg;
 
@@ -113,8 +112,7 @@ static int nf_ct_frag6_sysctl_register(s
 	return 0;
 
 err_reg:
-	if (!net_eq(net, &init_net))
-		kfree(table);
+	kfree(table);
 err_alloc:
 	return -ENOMEM;
 }
diff -ruNp linux-3.13.11/net/ipv6/output_core.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv6/output_core.c
--- linux-3.13.11/net/ipv6/output_core.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv6/output_core.c	2014-07-09
12:00:16.000000000 +0200
@@ -9,8 +9,8 @@
 
 void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
 {
-	static atomic_t ipv6_fragmentation_id;
-	int old, new;
+	static atomic_unchecked_t ipv6_fragmentation_id;
+	int id;
 
 #if IS_ENABLED(CONFIG_IPV6)
 	if (rt && !(rt->dst.flags & DST_NOPEER)) {
@@ -26,13 +26,10 @@ void ipv6_select_ident(struct frag_hdr *
 		}
 	}
 #endif
-	do {
-		old = atomic_read(&ipv6_fragmentation_id);
-		new = old + 1;
-		if (!new)
-			new = 1;
-	} while (atomic_cmpxchg(&ipv6_fragmentation_id, old, new) != old);
-	fhdr->identification = htonl(new);
+	id = atomic_inc_return_unchecked(&ipv6_fragmentation_id);
+	if (!id)
+		id = atomic_inc_return_unchecked(&ipv6_fragmentation_id);
+	fhdr->identification = htonl(id);
 }
 EXPORT_SYMBOL(ipv6_select_ident);
 
diff -ruNp linux-3.13.11/net/ipv6/ping.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv6/ping.c
--- linux-3.13.11/net/ipv6/ping.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv6/ping.c	2014-07-09 12:00:16.000000000
+0200
@@ -246,6 +246,22 @@ static struct pernet_operations ping_v6_
 };
 #endif
 
+static struct pingv6_ops real_pingv6_ops = {
+	.ipv6_recv_error	= ipv6_recv_error,
+	.ip6_datagram_recv_ctl	= ip6_datagram_recv_ctl,
+	.icmpv6_err_convert	= icmpv6_err_convert,
+	.ipv6_icmp_error	= ipv6_icmp_error,
+	.ipv6_chk_addr		= ipv6_chk_addr,
+};
+
+static struct pingv6_ops dummy_pingv6_ops = {
+	.ipv6_recv_error	= dummy_ipv6_recv_error,
+	.ip6_datagram_recv_ctl	= dummy_ip6_datagram_recv_ctl,
+	.icmpv6_err_convert	= dummy_icmpv6_err_convert,
+	.ipv6_icmp_error	= dummy_ipv6_icmp_error,
+	.ipv6_chk_addr		= dummy_ipv6_chk_addr,
+};
+
 int __init pingv6_init(void)
 {
 #ifdef CONFIG_PROC_FS
@@ -253,11 +269,7 @@ int __init pingv6_init(void)
 	if (ret)
 		return ret;
 #endif
-	pingv6_ops.ipv6_recv_error = ipv6_recv_error;
-	pingv6_ops.ip6_datagram_recv_ctl = ip6_datagram_recv_ctl;
-	pingv6_ops.icmpv6_err_convert = icmpv6_err_convert;
-	pingv6_ops.ipv6_icmp_error = ipv6_icmp_error;
-	pingv6_ops.ipv6_chk_addr = ipv6_chk_addr;
+	pingv6_ops = &real_pingv6_ops;
 	return inet6_register_protosw(&pingv6_protosw);
 }
 
@@ -266,11 +278,7 @@ int __init pingv6_init(void)
  */
 void pingv6_exit(void)
 {
-	pingv6_ops.ipv6_recv_error = dummy_ipv6_recv_error;
-	pingv6_ops.ip6_datagram_recv_ctl = dummy_ip6_datagram_recv_ctl;
-	pingv6_ops.icmpv6_err_convert = dummy_icmpv6_err_convert;
-	pingv6_ops.ipv6_icmp_error = dummy_ipv6_icmp_error;
-	pingv6_ops.ipv6_chk_addr = dummy_ipv6_chk_addr;
+	pingv6_ops = &dummy_pingv6_ops;
 #ifdef CONFIG_PROC_FS
 	unregister_pernet_subsys(&ping_v6_net_ops);
 #endif
diff -ruNp linux-3.13.11/net/ipv6/raw.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv6/raw.c
--- linux-3.13.11/net/ipv6/raw.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv6/raw.c	2014-07-09 12:00:16.000000000
+0200
@@ -30,6 +30,7 @@
 #include <linux/icmpv6.h>
 #include <linux/netfilter.h>
 #include <linux/netfilter_ipv6.h>
+#include <linux/vs_inet6.h>
 #include <linux/skbuff.h>
 #include <linux/compat.h>
 #include <asm/uaccess.h>
@@ -287,6 +288,13 @@ static int rawv6_bind(struct sock *sk, s
 				goto out_unlock;
 		}
 
+		if (!v6_addr_in_nx_info(sk->sk_nx_info, &addr->sin6_addr, -1)) {
+			err = -EADDRNOTAVAIL;
+			if (dev)
+				dev_put(dev);
+			goto out;
+		}
+
 		/* ipv4 addr of the socket is invalid.  Only the
 		 * unspecified and mapped address have a v4 equivalent.
 		 */
@@ -384,7 +392,7 @@ static inline int rawv6_rcv_skb(struct s
 {
 	if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
 	    skb_checksum_complete(skb)) {
-		atomic_inc(&sk->sk_drops);
+		atomic_inc_unchecked(&sk->sk_drops);
 		kfree_skb(skb);
 		return NET_RX_DROP;
 	}
@@ -412,7 +420,7 @@ int rawv6_rcv(struct sock *sk, struct sk
 	struct raw6_sock *rp = raw6_sk(sk);
 
 	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
-		atomic_inc(&sk->sk_drops);
+		atomic_inc_unchecked(&sk->sk_drops);
 		kfree_skb(skb);
 		return NET_RX_DROP;
 	}
@@ -436,7 +444,7 @@ int rawv6_rcv(struct sock *sk, struct sk
 
 	if (inet->hdrincl) {
 		if (skb_checksum_complete(skb)) {
-			atomic_inc(&sk->sk_drops);
+			atomic_inc_unchecked(&sk->sk_drops);
 			kfree_skb(skb);
 			return NET_RX_DROP;
 		}
@@ -465,6 +473,9 @@ static int rawv6_recvmsg(struct kiocb *i
 	if (flags & MSG_OOB)
 		return -EOPNOTSUPP;
 
+	if (addr_len)
+		*addr_len=sizeof(*sin6);
+
 	if (flags & MSG_ERRQUEUE)
 		return ipv6_recv_error(sk, msg, len, addr_len);
 
@@ -503,7 +514,6 @@ static int rawv6_recvmsg(struct kiocb *i
 		sin6->sin6_flowinfo = 0;
 		sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr,
 							  IP6CB(skb)->iif);
-		*addr_len = sizeof(*sin6);
 	}
 
 	sock_recv_ts_and_drops(msg, sk, skb);
@@ -606,7 +616,7 @@ out:
 	return err;
 }
 
-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
 			struct flowi6 *fl6, struct dst_entry **dstp,
 			unsigned int flags)
 {
@@ -918,12 +928,15 @@ do_confirm:
 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
 			       char __user *optval, int optlen)
 {
+	struct icmp6_filter filter;
+
 	switch (optname) {
 	case ICMPV6_FILTER:
 		if (optlen > sizeof(struct icmp6_filter))
 			optlen = sizeof(struct icmp6_filter);
-		if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
+		if (copy_from_user(&filter, optval, optlen))
 			return -EFAULT;
+		raw6_sk(sk)->filter = filter;
 		return 0;
 	default:
 		return -ENOPROTOOPT;
@@ -936,6 +949,7 @@ static int rawv6_geticmpfilter(struct so
 			       char __user *optval, int __user *optlen)
 {
 	int len;
+	struct icmp6_filter filter;
 
 	switch (optname) {
 	case ICMPV6_FILTER:
@@ -947,7 +961,8 @@ static int rawv6_geticmpfilter(struct so
 			len = sizeof(struct icmp6_filter);
 		if (put_user(len, optlen))
 			return -EFAULT;
-		if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
+		filter = raw6_sk(sk)->filter;
+		if (len > sizeof filter || copy_to_user(optval, &filter, len))
 			return -EFAULT;
 		return 0;
 	default:
diff -ruNp linux-3.13.11/net/ipv6/reassembly.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv6/reassembly.c
--- linux-3.13.11/net/ipv6/reassembly.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv6/reassembly.c	2014-07-09
12:00:16.000000000 +0200
@@ -626,12 +626,11 @@ static struct ctl_table ip6_frags_ctl_ta
 
 static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
 {
-	struct ctl_table *table;
+	ctl_table_no_const *table = NULL;
 	struct ctl_table_header *hdr;
 
-	table = ip6_frags_ns_ctl_table;
 	if (!net_eq(net, &init_net)) {
-		table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
+		table = kmemdup(ip6_frags_ns_ctl_table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
 		if (table == NULL)
 			goto err_alloc;
 
@@ -642,9 +641,10 @@ static int __net_init ip6_frags_ns_sysct
 		/* Don't export sysctls to unprivileged users */
 		if (net->user_ns != &init_user_ns)
 			table[0].procname = NULL;
-	}
+		hdr = register_net_sysctl(net, "net/ipv6", table);
+	} else
+		hdr = register_net_sysctl(net, "net/ipv6", ip6_frags_ns_ctl_table);
 
-	hdr = register_net_sysctl(net, "net/ipv6", table);
 	if (hdr == NULL)
 		goto err_reg;
 
@@ -652,8 +652,7 @@ static int __net_init ip6_frags_ns_sysct
 	return 0;
 
 err_reg:
-	if (!net_eq(net, &init_net))
-		kfree(table);
+	kfree(table);
 err_alloc:
 	return -ENOMEM;
 }
diff -ruNp linux-3.13.11/net/ipv6/route.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv6/route.c
--- linux-3.13.11/net/ipv6/route.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv6/route.c	2014-07-09 12:00:16.000000000
+0200
@@ -58,6 +58,7 @@
 #include <net/netevent.h>
 #include <net/netlink.h>
 #include <net/nexthop.h>
+#include <linux/vs_inet6.h>
 
 #include <asm/uaccess.h>
 
@@ -2196,15 +2197,17 @@ int ip6_route_get_saddr(struct net *net,
 			struct rt6_info *rt,
 			const struct in6_addr *daddr,
 			unsigned int prefs,
-			struct in6_addr *saddr)
+			struct in6_addr *saddr,
+			struct nx_info *nxi)
 {
 	struct inet6_dev *idev = ip6_dst_idev((struct dst_entry*)rt);
 	int err = 0;
-	if (rt->rt6i_prefsrc.plen)
+	if (rt->rt6i_prefsrc.plen && (!nxi ||
+	    v6_addr_in_nx_info(nxi, &rt->rt6i_prefsrc.addr, NXA_TYPE_ADDR)))
 		*saddr = rt->rt6i_prefsrc.addr;
 	else
 		err = ipv6_dev_get_saddr(net, idev ? idev->dev : NULL,
-					 daddr, prefs, saddr);
+					 daddr, prefs, saddr, nxi);
 	return err;
 }
 
@@ -2624,7 +2627,8 @@ static int rt6_fill_node(struct net *net
 				goto nla_put_failure;
 	} else if (dst) {
 		struct in6_addr saddr_buf;
-		if (ip6_route_get_saddr(net, rt, dst, 0, &saddr_buf) == 0 &&
+		if (ip6_route_get_saddr(net, rt, dst, 0, &saddr_buf,
+		    (skb->sk ? skb->sk->sk_nx_info : NULL)) == 0 &&
 		    nla_put(skb, RTA_PREFSRC, 16, &saddr_buf))
 			goto nla_put_failure;
 	}
@@ -2954,7 +2958,7 @@ struct ctl_table ipv6_route_table_templa
 
 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
 {
-	struct ctl_table *table;
+	ctl_table_no_const *table;
 
 	table = kmemdup(ipv6_route_table_template,
 			sizeof(ipv6_route_table_template),
diff -ruNp linux-3.13.11/net/ipv6/sit.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv6/sit.c
--- linux-3.13.11/net/ipv6/sit.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv6/sit.c	2014-07-09 12:00:16.000000000
+0200
@@ -74,7 +74,7 @@ static void ipip6_tunnel_setup(struct ne
 static void ipip6_dev_free(struct net_device *dev);
 static bool check_6rd(struct ip_tunnel *tunnel, const struct in6_addr *v6dst,
 		      __be32 *v4dst);
-static struct rtnl_link_ops sit_link_ops __read_mostly;
+static struct rtnl_link_ops sit_link_ops;
 
 static int sit_net_id __read_mostly;
 struct sit_net {
@@ -1664,7 +1664,7 @@ static void ipip6_dellink(struct net_dev
 		unregister_netdevice_queue(dev, head);
 }
 
-static struct rtnl_link_ops sit_link_ops __read_mostly = {
+static struct rtnl_link_ops sit_link_ops = {
 	.kind		= "sit",
 	.maxtype	= IFLA_IPTUN_MAX,
 	.policy		= ipip6_policy,
diff -ruNp linux-3.13.11/net/ipv6/sysctl_net_ipv6.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv6/sysctl_net_ipv6.c
--- linux-3.13.11/net/ipv6/sysctl_net_ipv6.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv6/sysctl_net_ipv6.c	2014-07-09
12:00:16.000000000 +0200
@@ -40,7 +40,7 @@ static struct ctl_table ipv6_rotable[] =
 
 static int __net_init ipv6_sysctl_net_init(struct net *net)
 {
-	struct ctl_table *ipv6_table;
+	ctl_table_no_const *ipv6_table;
 	struct ctl_table *ipv6_route_table;
 	struct ctl_table *ipv6_icmp_table;
 	int err;
diff -ruNp linux-3.13.11/net/ipv6/tcp_ipv6.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv6/tcp_ipv6.c
--- linux-3.13.11/net/ipv6/tcp_ipv6.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv6/tcp_ipv6.c	2014-07-09 12:00:16.000000000
+0200
@@ -72,6 +72,7 @@
 
 #include <linux/crypto.h>
 #include <linux/scatterlist.h>
+#include <linux/vs_inet6.h>
 
 static void	tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
 static void	tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
@@ -104,6 +105,10 @@ static void inet6_sk_rx_dst_set(struct s
 		inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
 }
 
+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
+extern int grsec_enable_blackhole;
+#endif
+
 static void tcp_v6_hash(struct sock *sk)
 {
 	if (sk->sk_state != TCP_CLOSE) {
@@ -164,8 +169,15 @@ static int tcp_v6_connect(struct sock *s
 	 *	connect() to INADDR_ANY means loopback (BSD'ism).
 	 */
 
-	if(ipv6_addr_any(&usin->sin6_addr))
-		usin->sin6_addr.s6_addr[15] = 0x1;
+	if(ipv6_addr_any(&usin->sin6_addr)) {
+		struct nx_info *nxi =  sk->sk_nx_info;
+
+		if (nxi && nx_info_has_v6(nxi))
+			/* FIXME: remap lback? */
+			usin->sin6_addr = nxi->v6.ip;
+		else
+			usin->sin6_addr.s6_addr[15] = 0x1;
+	}
 
 	addr_type = ipv6_addr_type(&usin->sin6_addr);
 
@@ -1397,6 +1409,9 @@ static int tcp_v6_do_rcv(struct sock *sk
 	return 0;
 
 reset:
+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
+	if (!grsec_enable_blackhole)
+#endif
 	tcp_v6_send_reset(sk, skb);
 discard:
 	if (opt_skb)
@@ -1479,12 +1494,20 @@ static int tcp_v6_rcv(struct sk_buff *sk
 	TCP_SKB_CB(skb)->sacked = 0;
 
 	sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
-	if (!sk)
+	if (!sk) {
+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
+		ret = 1;
+#endif
 		goto no_tcp_socket;
+	}
 
 process:
-	if (sk->sk_state == TCP_TIME_WAIT)
+	if (sk->sk_state == TCP_TIME_WAIT) {
+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
+		ret = 2;
+#endif
 		goto do_time_wait;
+	}
 
 	if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
 		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
@@ -1536,6 +1559,10 @@ csum_error:
 bad_packet:
 		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
 	} else {
+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
+		if (!grsec_enable_blackhole || (ret == 1 &&
+		    (skb->dev->flags & IFF_LOOPBACK)))
+#endif
 		tcp_v6_send_reset(NULL, skb);
 	}
 
diff -ruNp linux-3.13.11/net/ipv6/udp.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv6/udp.c
--- linux-3.13.11/net/ipv6/udp.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv6/udp.c	2014-07-09 12:00:16.000000000
+0200
@@ -47,6 +47,7 @@
 #include <net/xfrm.h>
 #include <net/inet6_hashtables.h>
 #include <net/busy_poll.h>
+#include <linux/vs_inet6.h>
 
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
@@ -76,33 +77,65 @@ static unsigned int udp6_ehashfn(struct
 			       udp_ipv6_hash_secret + net_hash_mix(net));
 }
 
-int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
+extern int grsec_enable_blackhole;
+#endif
+
+int ipv6_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2)
 {
+	const struct in6_addr *sk1_rcv_saddr6 = inet6_rcv_saddr(sk1);
 	const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
-	int sk_ipv6only = ipv6_only_sock(sk);
+	__be32 sk1_rcv_saddr = sk1->sk_rcv_saddr;
+	__be32 sk2_rcv_saddr = sk2->sk_rcv_saddr;
+	int sk1_ipv6only = ipv6_only_sock(sk1);
 	int sk2_ipv6only = inet_v6_ipv6only(sk2);
-	int addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr);
+	int addr_type1 = ipv6_addr_type(sk1_rcv_saddr6);
 	int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED;
 
 	/* if both are mapped, treat as IPv4 */
-	if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED)
-		return (!sk2_ipv6only &&
-			(!sk->sk_rcv_saddr || !sk2->sk_rcv_saddr ||
-			  sk->sk_rcv_saddr == sk2->sk_rcv_saddr));
+	if (addr_type1 == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED) {
+		if (!sk2_ipv6only &&
+			(!sk1->sk_rcv_saddr || !sk2->sk_rcv_saddr ||
+			  sk1->sk_rcv_saddr == sk2->sk_rcv_saddr))
+			goto vs_v4;
+		else
+			return 0;
+	}
 
 	if (addr_type2 == IPV6_ADDR_ANY &&
-	    !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED))
-		return 1;
+	    !(sk2_ipv6only && addr_type1 == IPV6_ADDR_MAPPED))
+		goto vs;
 
-	if (addr_type == IPV6_ADDR_ANY &&
-	    !(sk_ipv6only && addr_type2 == IPV6_ADDR_MAPPED))
-		return 1;
+	if (addr_type1 == IPV6_ADDR_ANY &&
+	    !(sk1_ipv6only && addr_type2 == IPV6_ADDR_MAPPED))
+		goto vs;
 
 	if (sk2_rcv_saddr6 &&
-	    ipv6_addr_equal(&sk->sk_v6_rcv_saddr, sk2_rcv_saddr6))
-		return 1;
+	    ipv6_addr_equal(&sk1->sk_v6_rcv_saddr, sk2_rcv_saddr6))
+		goto vs;
 
 	return 0;
+
+vs_v4:
+	if (!sk1_rcv_saddr && !sk2_rcv_saddr)
+		return nx_v4_addr_conflict(sk1->sk_nx_info, sk2->sk_nx_info);
+	if (!sk2_rcv_saddr)
+		return v4_addr_in_nx_info(sk1->sk_nx_info, sk2_rcv_saddr, -1);
+	if (!sk1_rcv_saddr)
+		return v4_addr_in_nx_info(sk2->sk_nx_info, sk1_rcv_saddr, -1);
+	return 1;
+vs:
+	if (addr_type2 == IPV6_ADDR_ANY && addr_type1 == IPV6_ADDR_ANY)
+		return nx_v6_addr_conflict(sk1->sk_nx_info, sk2->sk_nx_info);
+	else if (addr_type2 == IPV6_ADDR_ANY)
+		return v6_addr_in_nx_info(sk2->sk_nx_info, sk1_rcv_saddr6, -1);
+	else if (addr_type1 == IPV6_ADDR_ANY) {
+		if (addr_type2 == IPV6_ADDR_MAPPED)
+			return nx_v4_addr_conflict(sk1->sk_nx_info, sk2->sk_nx_info);
+		else
+			return v6_addr_in_nx_info(sk1->sk_nx_info, sk2_rcv_saddr6, -1);
+	}
+	return 1;
 }
 
 static unsigned int udp6_portaddr_hash(struct net *net,
@@ -160,6 +193,10 @@ static inline int compute_score(struct s
 			if (inet->inet_dport != sport)
 				return -1;
 			score++;
+		} else {
+			/* block non nx_info ips */
+			if (!v6_addr_in_nx_info(sk->sk_nx_info, daddr, -1))
+				return -1;
 		}
 		if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
 			if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
@@ -392,6 +429,9 @@ int udpv6_recvmsg(struct kiocb *iocb, st
 	int is_udp4;
 	bool slow;
 
+	if (addr_len)
+		*addr_len = sizeof(struct sockaddr_in6);
+
 	if (flags & MSG_ERRQUEUE)
 		return ipv6_recv_error(sk, msg, len, addr_len);
 
@@ -435,7 +475,7 @@ try_again:
 	if (unlikely(err)) {
 		trace_kfree_skb(skb, udpv6_recvmsg);
 		if (!peeked) {
-			atomic_inc(&sk->sk_drops);
+			atomic_inc_unchecked(&sk->sk_drops);
 			if (is_udp4)
 				UDP_INC_STATS_USER(sock_net(sk),
 						   UDP_MIB_INERRORS,
@@ -477,7 +517,7 @@ try_again:
 				ipv6_iface_scope_id(&sin6->sin6_addr,
 						    IP6CB(skb)->iif);
 		}
-		*addr_len = sizeof(*sin6);
+
 	}
 	if (is_udp4) {
 		if (inet->cmsg_flags)
@@ -685,7 +725,7 @@ csum_error:
 	UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
 drop:
 	UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
-	atomic_inc(&sk->sk_drops);
+	atomic_inc_unchecked(&sk->sk_drops);
 	kfree_skb(skb);
 	return -1;
 }
@@ -742,7 +782,7 @@ static void flush_stack(struct sock **st
 		if (likely(skb1 == NULL))
 			skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
 		if (!skb1) {
-			atomic_inc(&sk->sk_drops);
+			atomic_inc_unchecked(&sk->sk_drops);
 			UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
 					  IS_UDPLITE(sk));
 			UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
@@ -881,6 +921,9 @@ int __udp6_lib_rcv(struct sk_buff *skb,
 		goto csum_error;
 
 	UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
+	if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
+#endif
 	icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
 
 	kfree_skb(skb);
diff -ruNp linux-3.13.11/net/ipv6/xfrm6_policy.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv6/xfrm6_policy.c
--- linux-3.13.11/net/ipv6/xfrm6_policy.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/ipv6/xfrm6_policy.c	2014-07-09
12:00:16.000000000 +0200
@@ -63,7 +63,7 @@ static int xfrm6_get_saddr(struct net *n
 	dev = ip6_dst_idev(dst)->dev;
 	ipv6_dev_get_saddr(dev_net(dev), dev,
 			   (struct in6_addr *)&daddr->a6, 0,
-			   (struct in6_addr *)&saddr->a6);
+			   (struct in6_addr *)&saddr->a6, NULL);
 	dst_release(dst);
 	return 0;
 }
@@ -212,11 +212,11 @@ _decode_session6(struct sk_buff *skb, st
 	}
 }
 
-static inline int xfrm6_garbage_collect(struct dst_ops *ops)
+static int xfrm6_garbage_collect(struct dst_ops *ops)
 {
 	struct net *net = container_of(ops, struct net, xfrm.xfrm6_dst_ops);
 
-	xfrm6_policy_afinfo.garbage_collect(net);
+	xfrm_garbage_collect_deferred(net);
 	return dst_entries_get_fast(ops) > ops->gc_thresh * 2;
 }
 
@@ -329,19 +329,19 @@ static struct ctl_table xfrm6_policy_tab
 
 static int __net_init xfrm6_net_init(struct net *net)
 {
-	struct ctl_table *table;
+	ctl_table_no_const *table = NULL;
 	struct ctl_table_header *hdr;
 
-	table = xfrm6_policy_table;
 	if (!net_eq(net, &init_net)) {
-		table = kmemdup(table, sizeof(xfrm6_policy_table), GFP_KERNEL);
+		table = kmemdup(xfrm6_policy_table, sizeof(xfrm6_policy_table), GFP_KERNEL);
 		if (!table)
 			goto err_alloc;
 
 		table[0].data = &net->xfrm.xfrm6_dst_ops.gc_thresh;
-	}
+		hdr = register_net_sysctl(net, "net/ipv6", table);
+	} else
+		hdr = register_net_sysctl(net, "net/ipv6", xfrm6_policy_table);
 
-	hdr = register_net_sysctl(net, "net/ipv6", table);
 	if (!hdr)
 		goto err_reg;
 
@@ -349,8 +349,7 @@ static int __net_init xfrm6_net_init(str
 	return 0;
 
 err_reg:
-	if (!net_eq(net, &init_net))
-		kfree(table);
+	kfree(table);
 err_alloc:
 	return -ENOMEM;
 }
diff -ruNp linux-3.13.11/net/irda/ircomm/ircomm_tty.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/irda/ircomm/ircomm_tty.c
--- linux-3.13.11/net/irda/ircomm/ircomm_tty.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/irda/ircomm/ircomm_tty.c	2014-07-09
12:00:16.000000000 +0200
@@ -319,11 +319,11 @@ static int ircomm_tty_block_til_ready(st
 	add_wait_queue(&port->open_wait, &wait);
 
 	IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
-	      __FILE__, __LINE__, tty->driver->name, port->count);
+	      __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
 
 	spin_lock_irqsave(&port->lock, flags);
 	if (!tty_hung_up_p(filp))
-		port->count--;
+		atomic_dec(&port->count);
 	port->blocked_open++;
 	spin_unlock_irqrestore(&port->lock, flags);
 
@@ -358,7 +358,7 @@ static int ircomm_tty_block_til_ready(st
 		}
 
 		IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
-		      __FILE__, __LINE__, tty->driver->name, port->count);
+		      __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
 
 		schedule();
 	}
@@ -368,12 +368,12 @@ static int ircomm_tty_block_til_ready(st
 
 	spin_lock_irqsave(&port->lock, flags);
 	if (!tty_hung_up_p(filp))
-		port->count++;
+		atomic_inc(&port->count);
 	port->blocked_open--;
 	spin_unlock_irqrestore(&port->lock, flags);
 
 	IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
-	      __FILE__, __LINE__, tty->driver->name, port->count);
+	      __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
 
 	if (!retval)
 		port->flags |= ASYNC_NORMAL_ACTIVE;
@@ -447,12 +447,12 @@ static int ircomm_tty_open(struct tty_st
 
 	/* ++ is not atomic, so this should be protected - Jean II */
 	spin_lock_irqsave(&self->port.lock, flags);
-	self->port.count++;
+	atomic_inc(&self->port.count);
 	spin_unlock_irqrestore(&self->port.lock, flags);
 	tty_port_tty_set(&self->port, tty);
 
 	IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
-		   self->line, self->port.count);
+		   self->line, atomic_read(&self->port.count));
 
 	/* Not really used by us, but lets do it anyway */
 	self->port.low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
@@ -989,7 +989,7 @@ static void ircomm_tty_hangup(struct tty
 		tty_kref_put(port->tty);
 	}
 	port->tty = NULL;
-	port->count = 0;
+	atomic_set(&port->count, 0);
 	spin_unlock_irqrestore(&port->lock, flags);
 
 	wake_up_interruptible(&port->open_wait);
@@ -1346,7 +1346,7 @@ static void ircomm_tty_line_info(struct
 	seq_putc(m, '\n');
 
 	seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
-	seq_printf(m, "Open count: %d\n", self->port.count);
+	seq_printf(m, "Open count: %d\n", atomic_read(&self->port.count));
 	seq_printf(m, "Max data size: %d\n", self->max_data_size);
 	seq_printf(m, "Max header size: %d\n", self->max_header_size);
 
diff -ruNp linux-3.13.11/net/iucv/af_iucv.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/iucv/af_iucv.c
--- linux-3.13.11/net/iucv/af_iucv.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/iucv/af_iucv.c	2014-07-09 12:00:16.000000000
+0200
@@ -773,10 +773,10 @@ static int iucv_sock_autobind(struct soc
 
 	write_lock_bh(&iucv_sk_list.lock);
 
-	sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
+	sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
 	while (__iucv_get_sock_by_name(name)) {
 		sprintf(name, "%08x",
-			atomic_inc_return(&iucv_sk_list.autobind_name));
+			atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
 	}
 
 	write_unlock_bh(&iucv_sk_list.lock);
diff -ruNp linux-3.13.11/net/iucv/iucv.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/iucv/iucv.c
--- linux-3.13.11/net/iucv/iucv.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/iucv/iucv.c	2014-07-09 12:00:16.000000000
+0200
@@ -690,7 +690,7 @@ static int iucv_cpu_notify(struct notifi
 	return NOTIFY_OK;
 }
 
-static struct notifier_block __refdata iucv_cpu_notifier = {
+static struct notifier_block iucv_cpu_notifier = {
 	.notifier_call = iucv_cpu_notify,
 };
 
diff -ruNp linux-3.13.11/net/key/af_key.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/key/af_key.c
--- linux-3.13.11/net/key/af_key.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/key/af_key.c	2014-07-09 12:00:16.000000000
+0200
@@ -3041,10 +3041,10 @@ static int pfkey_send_policy_notify(stru
 static u32 get_acqseq(void)
 {
 	u32 res;
-	static atomic_t acqseq;
+	static atomic_unchecked_t acqseq;
 
 	do {
-		res = atomic_inc_return(&acqseq);
+		res = atomic_inc_return_unchecked(&acqseq);
 	} while (!res);
 	return res;
 }
diff -ruNp linux-3.13.11/net/l2tp/l2tp_ip.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/l2tp/l2tp_ip.c
--- linux-3.13.11/net/l2tp/l2tp_ip.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/l2tp/l2tp_ip.c	2014-07-09 12:00:16.000000000
+0200
@@ -518,6 +518,9 @@ static int l2tp_ip_recvmsg(struct kiocb
 	if (flags & MSG_OOB)
 		goto out;
 
+	if (addr_len)
+		*addr_len = sizeof(*sin);
+
 	skb = skb_recv_datagram(sk, flags, noblock, &err);
 	if (!skb)
 		goto out;
@@ -540,7 +543,6 @@ static int l2tp_ip_recvmsg(struct kiocb
 		sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
 		sin->sin_port = 0;
 		memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
-		*addr_len = sizeof(*sin);
 	}
 	if (inet->cmsg_flags)
 		ip_cmsg_recv(msg, skb);
diff -ruNp linux-3.13.11/net/mac80211/cfg.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/mac80211/cfg.c
--- linux-3.13.11/net/mac80211/cfg.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/mac80211/cfg.c	2014-07-09 12:00:16.000000000
+0200
@@ -826,7 +826,7 @@ static int ieee80211_set_monitor_channel
 			ret = ieee80211_vif_use_channel(sdata, chandef,
 					IEEE80211_CHANCTX_EXCLUSIVE);
 		}
-	} else if (local->open_count == local->monitors) {
+	} else if (local_read(&local->open_count) == local->monitors) {
 		local->_oper_chandef = *chandef;
 		ieee80211_hw_config(local, 0);
 	}
@@ -3311,7 +3311,7 @@ static void ieee80211_mgmt_frame_registe
 		else
 			local->probe_req_reg--;
 
-		if (!local->open_count)
+		if (!local_read(&local->open_count))
 			break;
 
 		ieee80211_queue_work(&local->hw, &local->reconfig_filter);
@@ -3774,8 +3774,8 @@ static int ieee80211_cfg_get_channel(str
 	if (chanctx_conf) {
 		*chandef = chanctx_conf->def;
 		ret = 0;
-	} else if (local->open_count > 0 &&
-		   local->open_count == local->monitors &&
+	} else if (local_read(&local->open_count) > 0 &&
+		   local_read(&local->open_count) == local->monitors &&
 		   sdata->vif.type == NL80211_IFTYPE_MONITOR) {
 		if (local->use_chanctx)
 			*chandef = local->monitor_chandef;
diff -ruNp linux-3.13.11/net/mac80211/ieee80211_i.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/mac80211/ieee80211_i.h
--- linux-3.13.11/net/mac80211/ieee80211_i.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/mac80211/ieee80211_i.h	2014-07-09
12:00:16.000000000 +0200
@@ -28,6 +28,7 @@
 #include <net/ieee80211_radiotap.h>
 #include <net/cfg80211.h>
 #include <net/mac80211.h>
+#include <asm/local.h>
 #include "key.h"
 #include "sta_info.h"
 #include "debug.h"
@@ -961,7 +962,7 @@ struct ieee80211_local {
 	/* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
 	spinlock_t queue_stop_reason_lock;
 
-	int open_count;
+	local_t open_count;
 	int monitors, cooked_mntrs;
 	/* number of interfaces with corresponding FIF_ flags */
 	int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
diff -ruNp linux-3.13.11/net/mac80211/iface.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/mac80211/iface.c
--- linux-3.13.11/net/mac80211/iface.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/mac80211/iface.c	2014-07-09 12:00:16.000000000
+0200
@@ -519,7 +519,7 @@ int ieee80211_do_open(struct wireless_de
 		break;
 	}
 
-	if (local->open_count == 0) {
+	if (local_read(&local->open_count) == 0) {
 		res = drv_start(local);
 		if (res)
 			goto err_del_bss;
@@ -566,7 +566,7 @@ int ieee80211_do_open(struct wireless_de
 			res = drv_add_interface(local, sdata);
 			if (res)
 				goto err_stop;
-		} else if (local->monitors == 0 && local->open_count == 0) {
+		} else if (local->monitors == 0 && local_read(&local->open_count) == 0) {
 			res = ieee80211_add_virtual_monitor(local);
 			if (res)
 				goto err_stop;
@@ -675,7 +675,7 @@ int ieee80211_do_open(struct wireless_de
 		atomic_inc(&local->iff_promiscs);
 
 	if (coming_up)
-		local->open_count++;
+		local_inc(&local->open_count);
 
 	if (hw_reconf_flags)
 		ieee80211_hw_config(local, hw_reconf_flags);
@@ -713,7 +713,7 @@ int ieee80211_do_open(struct wireless_de
  err_del_interface:
 	drv_remove_interface(local, sdata);
  err_stop:
-	if (!local->open_count)
+	if (!local_read(&local->open_count))
 		drv_stop(local);
  err_del_bss:
 	sdata->bss = NULL;
@@ -856,7 +856,7 @@ static void ieee80211_do_stop(struct iee
 	}
 
 	if (going_down)
-		local->open_count--;
+		local_dec(&local->open_count);
 
 	switch (sdata->vif.type) {
 	case NL80211_IFTYPE_AP_VLAN:
@@ -923,7 +923,7 @@ static void ieee80211_do_stop(struct iee
 	}
 	spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
 
-	if (local->open_count == 0)
+	if (local_read(&local->open_count) == 0)
 		ieee80211_clear_tx_pending(local);
 
 	/*
@@ -963,7 +963,7 @@ static void ieee80211_do_stop(struct iee
 
 	ieee80211_recalc_ps(local, -1);
 
-	if (local->open_count == 0) {
+	if (local_read(&local->open_count) == 0) {
 		ieee80211_stop_device(local);
 
 		/* no reconfiguring after stop! */
@@ -974,7 +974,7 @@ static void ieee80211_do_stop(struct iee
 	ieee80211_configure_filter(local);
 	ieee80211_hw_config(local, hw_reconf_flags);
 
-	if (local->monitors == local->open_count)
+	if (local->monitors == local_read(&local->open_count))
 		ieee80211_add_virtual_monitor(local);
 }
 
diff -ruNp linux-3.13.11/net/mac80211/main.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/mac80211/main.c
--- linux-3.13.11/net/mac80211/main.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/mac80211/main.c	2014-07-09 12:00:16.000000000
+0200
@@ -172,7 +172,7 @@ int ieee80211_hw_config(struct ieee80211
 		changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL |
 			     IEEE80211_CONF_CHANGE_POWER);
 
-	if (changed && local->open_count) {
+	if (changed && local_read(&local->open_count)) {
 		ret = drv_config(local, changed);
 		/*
 		 * Goal:
diff -ruNp linux-3.13.11/net/mac80211/pm.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/mac80211/pm.c
--- linux-3.13.11/net/mac80211/pm.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/mac80211/pm.c	2014-07-09 12:00:16.000000000
+0200
@@ -12,7 +12,7 @@ int __ieee80211_suspend(struct ieee80211
 	struct ieee80211_sub_if_data *sdata;
 	struct sta_info *sta;
 
-	if (!local->open_count)
+	if (!local_read(&local->open_count))
 		goto suspend;
 
 	ieee80211_scan_cancel(local);
@@ -59,7 +59,7 @@ int __ieee80211_suspend(struct ieee80211
 	cancel_work_sync(&local->dynamic_ps_enable_work);
 	del_timer_sync(&local->dynamic_ps_timer);
 
-	local->wowlan = wowlan && local->open_count;
+	local->wowlan = wowlan && local_read(&local->open_count);
 	if (local->wowlan) {
 		int err = drv_suspend(local, wowlan);
 		if (err < 0) {
@@ -116,7 +116,7 @@ int __ieee80211_suspend(struct ieee80211
 	WARN_ON(!list_empty(&local->chanctx_list));
 
 	/* stop hardware - this must stop RX */
-	if (local->open_count)
+	if (local_read(&local->open_count))
 		ieee80211_stop_device(local);
 
  suspend:
diff -ruNp linux-3.13.11/net/mac80211/rate.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/mac80211/rate.c
--- linux-3.13.11/net/mac80211/rate.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/mac80211/rate.c	2014-07-09 12:00:16.000000000
+0200
@@ -734,7 +734,7 @@ int ieee80211_init_rate_ctrl_alg(struct
 
 	ASSERT_RTNL();
 
-	if (local->open_count)
+	if (local_read(&local->open_count))
 		return -EBUSY;
 
 	if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
diff -ruNp linux-3.13.11/net/mac80211/rc80211_pid_debugfs.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/mac80211/rc80211_pid_debugfs.c
--- linux-3.13.11/net/mac80211/rc80211_pid_debugfs.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/mac80211/rc80211_pid_debugfs.c	2014-07-09
12:00:16.000000000 +0200
@@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_r
 
 	spin_unlock_irqrestore(&events->lock, status);
 
-	if (copy_to_user(buf, pb, p))
+	if (p > sizeof(pb) || copy_to_user(buf, pb, p))
 		return -EFAULT;
 
 	return p;
diff -ruNp linux-3.13.11/net/mac80211/util.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/mac80211/util.c
--- linux-3.13.11/net/mac80211/util.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/mac80211/util.c	2014-07-09 12:00:16.000000000
+0200
@@ -1474,7 +1474,7 @@ int ieee80211_reconfig(struct ieee80211_
 	}
 #endif
 	/* everything else happens only if HW was up & running */
-	if (!local->open_count)
+	if (!local_read(&local->open_count))
 		goto wake_up;
 
 	/*
@@ -1699,7 +1699,7 @@ int ieee80211_reconfig(struct ieee80211_
 	local->in_reconfig = false;
 	barrier();
 
-	if (local->monitors == local->open_count && local->monitors > 0)
+	if (local->monitors == local_read(&local->open_count) && local->monitors > 0)
 		ieee80211_add_virtual_monitor(local);
 
 	/*
diff -ruNp linux-3.13.11/net/netfilter/Kconfig linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/netfilter/Kconfig
--- linux-3.13.11/net/netfilter/Kconfig	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/netfilter/Kconfig	2014-07-09
12:00:16.000000000 +0200
@@ -1002,6 +1002,16 @@ config NETFILTER_XT_MATCH_ESP
 
 	  To compile it as a module, choose M here.  If unsure, say N.
 
+config NETFILTER_XT_MATCH_GRADM
+	tristate '"gradm" match support'
+	depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
+	depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
+	---help---
+	  The gradm match allows to match on grsecurity RBAC being enabled.
+	  It is useful when iptables rules are applied early on bootup to
+	  prevent connections to the machine (except from a trusted host)
+	  while the RBAC system is disabled.
+
 config NETFILTER_XT_MATCH_HASHLIMIT
 	tristate '"hashlimit" match support'
 	depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
diff -ruNp linux-3.13.11/net/netfilter/Makefile linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/netfilter/Makefile
--- linux-3.13.11/net/netfilter/Makefile	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/netfilter/Makefile	2014-07-09
12:00:16.000000000 +0200
@@ -130,6 +130,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP
 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
diff -ruNp linux-3.13.11/net/netfilter/ipset/ip_set_core.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/netfilter/ipset/ip_set_core.c
--- linux-3.13.11/net/netfilter/ipset/ip_set_core.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/netfilter/ipset/ip_set_core.c	2014-07-09
12:00:16.000000000 +0200
@@ -1950,7 +1950,7 @@ done:
 	return ret;
 }
 
-static struct nf_sockopt_ops so_set __read_mostly = {
+static struct nf_sockopt_ops so_set = {
 	.pf		= PF_INET,
 	.get_optmin	= SO_IP_SET,
 	.get_optmax	= SO_IP_SET + 1,
diff -ruNp linux-3.13.11/net/netfilter/ipvs/ip_vs_conn.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/netfilter/ipvs/ip_vs_conn.c
--- linux-3.13.11/net/netfilter/ipvs/ip_vs_conn.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/netfilter/ipvs/ip_vs_conn.c	2014-07-09
12:00:16.000000000 +0200
@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, s
 	/* Increase the refcnt counter of the dest */
 	ip_vs_dest_hold(dest);
 
-	conn_flags = atomic_read(&dest->conn_flags);
+	conn_flags = atomic_read_unchecked(&dest->conn_flags);
 	if (cp->protocol != IPPROTO_UDP)
 		conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
 	flags = cp->flags;
@@ -900,7 +900,7 @@ ip_vs_conn_new(const struct ip_vs_conn_p
 
 	cp->control = NULL;
 	atomic_set(&cp->n_control, 0);
-	atomic_set(&cp->in_pkts, 0);
+	atomic_set_unchecked(&cp->in_pkts, 0);
 
 	cp->packet_xmit = NULL;
 	cp->app = NULL;
@@ -1188,7 +1188,7 @@ static inline int todrop_entry(struct ip
 
 	/* Don't drop the entry if its number of incoming packets is not
 	   located in [0, 8] */
-	i = atomic_read(&cp->in_pkts);
+	i = atomic_read_unchecked(&cp->in_pkts);
 	if (i > 8 || i < 0) return 0;
 
 	if (!todrop_rate[i]) return 0;
diff -ruNp linux-3.13.11/net/netfilter/ipvs/ip_vs_core.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/netfilter/ipvs/ip_vs_core.c
--- linux-3.13.11/net/netfilter/ipvs/ip_vs_core.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/netfilter/ipvs/ip_vs_core.c	2014-07-09
12:00:16.000000000 +0200
@@ -567,7 +567,7 @@ int ip_vs_leave(struct ip_vs_service *sv
 		ret = cp->packet_xmit(skb, cp, pd->pp, iph);
 		/* do not touch skb anymore */
 
-		atomic_inc(&cp->in_pkts);
+		atomic_inc_unchecked(&cp->in_pkts);
 		ip_vs_conn_put(cp);
 		return ret;
 	}
@@ -1706,7 +1706,7 @@ ip_vs_in(unsigned int hooknum, struct sk
 	if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
 		pkts = sysctl_sync_threshold(ipvs);
 	else
-		pkts = atomic_add_return(1, &cp->in_pkts);
+		pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
 
 	if (ipvs->sync_state & IP_VS_STATE_MASTER)
 		ip_vs_sync_conn(net, cp, pkts);
diff -ruNp linux-3.13.11/net/netfilter/ipvs/ip_vs_ctl.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/netfilter/ipvs/ip_vs_ctl.c
--- linux-3.13.11/net/netfilter/ipvs/ip_vs_ctl.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/netfilter/ipvs/ip_vs_ctl.c	2014-07-09
12:00:16.000000000 +0200
@@ -794,7 +794,7 @@ __ip_vs_update_dest(struct ip_vs_service
 		 */
 		ip_vs_rs_hash(ipvs, dest);
 	}
-	atomic_set(&dest->conn_flags, conn_flags);
+	atomic_set_unchecked(&dest->conn_flags, conn_flags);
 
 	/* bind the service */
 	old_svc = rcu_dereference_protected(dest->svc, 1);
@@ -1654,7 +1654,7 @@ proc_do_sync_ports(struct ctl_table *tab
  *	align with netns init in ip_vs_control_net_init()
  */
 
-static struct ctl_table vs_vars[] = {
+static ctl_table_no_const vs_vars[] __read_only = {
 	{
 		.procname	= "amemthresh",
 		.maxlen		= sizeof(int),
@@ -2075,7 +2075,7 @@ static int ip_vs_info_seq_show(struct se
 					   "      %-7s %-6d %-10d %-10d\n",
 					   &dest->addr.in6,
 					   ntohs(dest->port),
-					   ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
+					   ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
 					   atomic_read(&dest->weight),
 					   atomic_read(&dest->activeconns),
 					   atomic_read(&dest->inactconns));
@@ -2086,7 +2086,7 @@ static int ip_vs_info_seq_show(struct se
 					   "%-7s %-6d %-10d %-10d\n",
 					   ntohl(dest->addr.ip),
 					   ntohs(dest->port),
-					   ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
+					   ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
 					   atomic_read(&dest->weight),
 					   atomic_read(&dest->activeconns),
 					   atomic_read(&dest->inactconns));
@@ -2564,7 +2564,7 @@ __ip_vs_get_dest_entries(struct net *net
 
 			entry.addr = dest->addr.ip;
 			entry.port = dest->port;
-			entry.conn_flags = atomic_read(&dest->conn_flags);
+			entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
 			entry.weight = atomic_read(&dest->weight);
 			entry.u_threshold = dest->u_threshold;
 			entry.l_threshold = dest->l_threshold;
@@ -3107,7 +3107,7 @@ static int ip_vs_genl_fill_dest(struct s
 	if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
 	    nla_put_be16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
 	    nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
-			(atomic_read(&dest->conn_flags) &
+			(atomic_read_unchecked(&dest->conn_flags) &
 			 IP_VS_CONN_F_FWD_MASK)) ||
 	    nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
 			atomic_read(&dest->weight)) ||
@@ -3580,7 +3580,7 @@ out:
 }
 
 
-static const struct genl_ops ip_vs_genl_ops[] __read_mostly = {
+static const struct genl_ops ip_vs_genl_ops[] = {
 	{
 		.cmd	= IPVS_CMD_NEW_SERVICE,
 		.flags	= GENL_ADMIN_PERM,
@@ -3697,7 +3697,7 @@ static int __net_init ip_vs_control_net_
 {
 	int idx;
 	struct netns_ipvs *ipvs = net_ipvs(net);
-	struct ctl_table *tbl;
+	ctl_table_no_const *tbl;
 
 	atomic_set(&ipvs->dropentry, 0);
 	spin_lock_init(&ipvs->dropentry_lock);
diff -ruNp linux-3.13.11/net/netfilter/ipvs/ip_vs_lblc.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/netfilter/ipvs/ip_vs_lblc.c
--- linux-3.13.11/net/netfilter/ipvs/ip_vs_lblc.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/netfilter/ipvs/ip_vs_lblc.c	2014-07-09
12:00:16.000000000 +0200
@@ -118,7 +118,7 @@ struct ip_vs_lblc_table {
  *      IPVS LBLC sysctl table
  */
 #ifdef CONFIG_SYSCTL
-static struct ctl_table vs_vars_table[] = {
+static ctl_table_no_const vs_vars_table[] __read_only = {
 	{
 		.procname	= "lblc_expiration",
 		.data		= NULL,
diff -ruNp linux-3.13.11/net/netfilter/ipvs/ip_vs_lblcr.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/netfilter/ipvs/ip_vs_lblcr.c
--- linux-3.13.11/net/netfilter/ipvs/ip_vs_lblcr.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/netfilter/ipvs/ip_vs_lblcr.c	2014-07-09
12:00:16.000000000 +0200
@@ -289,7 +289,7 @@ struct ip_vs_lblcr_table {
  *      IPVS LBLCR sysctl table
  */
 
-static struct ctl_table vs_vars_table[] = {
+static ctl_table_no_const vs_vars_table[] __read_only = {
 	{
 		.procname	= "lblcr_expiration",
 		.data		= NULL,
diff -ruNp linux-3.13.11/net/netfilter/ipvs/ip_vs_sync.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/netfilter/ipvs/ip_vs_sync.c
--- linux-3.13.11/net/netfilter/ipvs/ip_vs_sync.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/netfilter/ipvs/ip_vs_sync.c	2014-07-09
12:00:16.000000000 +0200
@@ -609,7 +609,7 @@ static void ip_vs_sync_conn_v0(struct ne
 	cp = cp->control;
 	if (cp) {
 		if (cp->flags & IP_VS_CONN_F_TEMPLATE)
-			pkts = atomic_add_return(1, &cp->in_pkts);
+			pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
 		else
 			pkts = sysctl_sync_threshold(ipvs);
 		ip_vs_sync_conn(net, cp->control, pkts);
@@ -771,7 +771,7 @@ control:
 	if (!cp)
 		return;
 	if (cp->flags & IP_VS_CONN_F_TEMPLATE)
-		pkts = atomic_add_return(1, &cp->in_pkts);
+		pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
 	else
 		pkts = sysctl_sync_threshold(ipvs);
 	goto sloop;
@@ -895,7 +895,7 @@ static void ip_vs_proc_conn(struct net *
 
 	if (opt)
 		memcpy(&cp->in_seq, opt, sizeof(*opt));
-	atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
+	atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
 	cp->state = state;
 	cp->old_state = cp->state;
 	/*
diff -ruNp linux-3.13.11/net/netfilter/ipvs/ip_vs_xmit.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/netfilter/ipvs/ip_vs_xmit.c
--- linux-3.13.11/net/netfilter/ipvs/ip_vs_xmit.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/netfilter/ipvs/ip_vs_xmit.c	2014-07-09
12:00:16.000000000 +0200
@@ -316,7 +316,7 @@ __ip_vs_route_output_v6(struct net *net,
 		return dst;
 	if (ipv6_addr_any(&fl6.saddr) &&
 	    ipv6_dev_get_saddr(net, ip6_dst_idev(dst)->dev,
-			       &fl6.daddr, 0, &fl6.saddr) < 0)
+			       &fl6.daddr, 0, &fl6.saddr, NULL) < 0)
 		goto out_err;
 	if (do_xfrm) {
 		dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
@@ -1102,7 +1102,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, str
 		else
 			rc = NF_ACCEPT;
 		/* do not touch skb anymore */
-		atomic_inc(&cp->in_pkts);
+		atomic_inc_unchecked(&cp->in_pkts);
 		goto out;
 	}
 
@@ -1194,7 +1194,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb,
 		else
 			rc = NF_ACCEPT;
 		/* do not touch skb anymore */
-		atomic_inc(&cp->in_pkts);
+		atomic_inc_unchecked(&cp->in_pkts);
 		goto out;
 	}
 
diff -ruNp linux-3.13.11/net/netfilter/nf_conntrack_acct.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/netfilter/nf_conntrack_acct.c
--- linux-3.13.11/net/netfilter/nf_conntrack_acct.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/netfilter/nf_conntrack_acct.c	2014-07-09
12:00:16.000000000 +0200
@@ -62,7 +62,7 @@ static struct nf_ct_ext_type acct_extend
 #ifdef CONFIG_SYSCTL
 static int nf_conntrack_acct_init_sysctl(struct net *net)
 {
-	struct ctl_table *table;
+	ctl_table_no_const *table;
 
 	table = kmemdup(acct_sysctl_table, sizeof(acct_sysctl_table),
 			GFP_KERNEL);
diff -ruNp linux-3.13.11/net/netfilter/nf_conntrack_core.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/netfilter/nf_conntrack_core.c
--- linux-3.13.11/net/netfilter/nf_conntrack_core.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/netfilter/nf_conntrack_core.c	2014-07-09
12:00:16.000000000 +0200
@@ -1605,6 +1605,10 @@ void nf_conntrack_init_end(void)
 #define DYING_NULLS_VAL		((1<<30)+1)
 #define TEMPLATE_NULLS_VAL	((1<<30)+2)
 
+#ifdef CONFIG_GRKERNSEC_HIDESYM
+static atomic_unchecked_t conntrack_cache_id = ATOMIC_INIT(0);
+#endif
+
 int nf_conntrack_init_net(struct net *net)
 {
 	int ret;
@@ -1619,7 +1623,11 @@ int nf_conntrack_init_net(struct net *ne
 		goto err_stat;
 	}
 
+#ifdef CONFIG_GRKERNSEC_HIDESYM
+	net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08lx", atomic_inc_return_unchecked(&conntrack_cache_id));
+#else
 	net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
+#endif
 	if (!net->ct.slabname) {
 		ret = -ENOMEM;
 		goto err_slabname;
diff -ruNp linux-3.13.11/net/netfilter/nf_conntrack_ecache.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/netfilter/nf_conntrack_ecache.c
--- linux-3.13.11/net/netfilter/nf_conntrack_ecache.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/netfilter/nf_conntrack_ecache.c	2014-07-09
12:00:16.000000000 +0200
@@ -188,7 +188,7 @@ static struct nf_ct_ext_type event_exten
 #ifdef CONFIG_SYSCTL
 static int nf_conntrack_event_init_sysctl(struct net *net)
 {
-	struct ctl_table *table;
+	ctl_table_no_const *table;
 
 	table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
 			GFP_KERNEL);
diff -ruNp linux-3.13.11/net/netfilter/nf_conntrack_helper.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/netfilter/nf_conntrack_helper.c
--- linux-3.13.11/net/netfilter/nf_conntrack_helper.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/netfilter/nf_conntrack_helper.c	2014-07-09
12:00:16.000000000 +0200
@@ -57,7 +57,7 @@ static struct ctl_table helper_sysctl_ta
 
 static int nf_conntrack_helper_init_sysctl(struct net *net)
 {
-	struct ctl_table *table;
+	ctl_table_no_const *table;
 
 	table = kmemdup(helper_sysctl_table, sizeof(helper_sysctl_table),
 			GFP_KERNEL);
diff -ruNp linux-3.13.11/net/netfilter/nf_conntrack_proto.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/netfilter/nf_conntrack_proto.c
--- linux-3.13.11/net/netfilter/nf_conntrack_proto.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/netfilter/nf_conntrack_proto.c	2014-07-09
12:00:16.000000000 +0200
@@ -52,7 +52,7 @@ nf_ct_register_sysctl(struct net *net,
 
 static void
 nf_ct_unregister_sysctl(struct ctl_table_header **header,
-			struct ctl_table **table,
+			ctl_table_no_const **table,
 			unsigned int users)
 {
 	if (users > 0)
diff -ruNp linux-3.13.11/net/netfilter/nf_conntrack_proto_dccp.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/netfilter/nf_conntrack_proto_dccp.c
--- linux-3.13.11/net/netfilter/nf_conntrack_proto_dccp.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/netfilter/nf_conntrack_proto_dccp.c	2014-07-09
12:00:16.000000000 +0200
@@ -457,7 +457,7 @@ static bool dccp_new(struct nf_conn *ct,
 out_invalid:
 	if (LOG_INVALID(net, IPPROTO_DCCP))
 		nf_log_packet(net, nf_ct_l3num(ct), 0, skb, NULL, NULL,
-			      NULL, msg);
+			      NULL, "%s", msg);
 	return false;
 }
 
@@ -614,7 +614,7 @@ static int dccp_error(struct net *net, s
 
 out_invalid:
 	if (LOG_INVALID(net, IPPROTO_DCCP))
-		nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, msg);
+		nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, "%s", msg);
 	return -NF_ACCEPT;
 }
 
diff -ruNp linux-3.13.11/net/netfilter/nf_conntrack_standalone.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/netfilter/nf_conntrack_standalone.c
--- linux-3.13.11/net/netfilter/nf_conntrack_standalone.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/netfilter/nf_conntrack_standalone.c	2014-07-09
12:00:16.000000000 +0200
@@ -471,7 +471,7 @@ static struct ctl_table nf_ct_netfilter_
 
 static int nf_conntrack_standalone_init_sysctl(struct net *net)
 {
-	struct ctl_table *table;
+	ctl_table_no_const *table;
 
 	table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table),
 			GFP_KERNEL);
diff -ruNp linux-3.13.11/net/netfilter/nf_conntrack_timestamp.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/netfilter/nf_conntrack_timestamp.c
--- linux-3.13.11/net/netfilter/nf_conntrack_timestamp.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/netfilter/nf_conntrack_timestamp.c	2014-07-09
12:00:16.000000000 +0200
@@ -42,7 +42,7 @@ static struct nf_ct_ext_type tstamp_exte
 #ifdef CONFIG_SYSCTL
 static int nf_conntrack_tstamp_init_sysctl(struct net *net)
 {
-	struct ctl_table *table;
+	ctl_table_no_const *table;
 
 	table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table),
 			GFP_KERNEL);
diff -ruNp linux-3.13.11/net/netfilter/nf_log.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/netfilter/nf_log.c
--- linux-3.13.11/net/netfilter/nf_log.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/netfilter/nf_log.c	2014-07-09
12:00:16.000000000 +0200
@@ -243,7 +243,7 @@ static const struct file_operations nflo
 
 #ifdef CONFIG_SYSCTL
 static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
-static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
+static ctl_table_no_const nf_log_sysctl_table[NFPROTO_NUMPROTO+1] __read_only;
 
 static int nf_log_proc_dostring(struct ctl_table *table, int write,
 			 void __user *buffer, size_t *lenp, loff_t *ppos)
@@ -274,14 +274,16 @@ static int nf_log_proc_dostring(struct c
 		rcu_assign_pointer(net->nf.nf_loggers[tindex], logger);
 		mutex_unlock(&nf_log_mutex);
 	} else {
+		ctl_table_no_const nf_log_table = *table;
+
 		mutex_lock(&nf_log_mutex);
 		logger = rcu_dereference_protected(net->nf.nf_loggers[tindex],
 						   lockdep_is_held(&nf_log_mutex));
 		if (!logger)
-			table->data = "NONE";
+			nf_log_table.data = "NONE";
 		else
-			table->data = logger->name;
-		r = proc_dostring(table, write, buffer, lenp, ppos);
+			nf_log_table.data = logger->name;
+		r = proc_dostring(&nf_log_table, write, buffer, lenp, ppos);
 		mutex_unlock(&nf_log_mutex);
 	}
 
diff -ruNp linux-3.13.11/net/netfilter/nf_sockopt.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/netfilter/nf_sockopt.c
--- linux-3.13.11/net/netfilter/nf_sockopt.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/netfilter/nf_sockopt.c	2014-07-09
12:00:16.000000000 +0200
@@ -45,7 +45,7 @@ int nf_register_sockopt(struct nf_sockop
 		}
 	}
 
-	list_add(&reg->list, &nf_sockopts);
+	pax_list_add((struct list_head *)&reg->list, &nf_sockopts);
 out:
 	mutex_unlock(&nf_sockopt_mutex);
 	return ret;
@@ -55,7 +55,7 @@ EXPORT_SYMBOL(nf_register_sockopt);
 void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
 {
 	mutex_lock(&nf_sockopt_mutex);
-	list_del(&reg->list);
+	pax_list_del((struct list_head *)&reg->list);
 	mutex_unlock(&nf_sockopt_mutex);
 }
 EXPORT_SYMBOL(nf_unregister_sockopt);
diff -ruNp linux-3.13.11/net/netfilter/nf_tables_api.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/netfilter/nf_tables_api.c
--- linux-3.13.11/net/netfilter/nf_tables_api.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/netfilter/nf_tables_api.c	2014-07-09
12:00:16.000000000 +0200
@@ -148,8 +148,8 @@ static int nf_tables_chain_type_lookup(c
 #ifdef CONFIG_MODULES
 	if (type < 0 && autoload) {
 		nfnl_unlock(NFNL_SUBSYS_NFTABLES);
-		request_module("nft-chain-%u-%*.s", afi->family,
-			       nla_len(nla)-1, (const char *)nla_data(nla));
+		request_module("nft-chain-%u-%.*s", afi->family,
+			       nla_len(nla), (const char *)nla_data(nla));
 		nfnl_lock(NFNL_SUBSYS_NFTABLES);
 		type = __nf_tables_chain_type_lookup(afi->family, nla);
 	}
@@ -1916,7 +1916,8 @@ static const struct nft_set_ops *nft_sel
 
 static const struct nla_policy nft_set_policy[NFTA_SET_MAX + 1] = {
 	[NFTA_SET_TABLE]		= { .type = NLA_STRING },
-	[NFTA_SET_NAME]			= { .type = NLA_STRING },
+	[NFTA_SET_NAME]			= { .type = NLA_STRING,
+					    .len = IFNAMSIZ - 1 },
 	[NFTA_SET_FLAGS]		= { .type = NLA_U32 },
 	[NFTA_SET_KEY_TYPE]		= { .type = NLA_U32 },
 	[NFTA_SET_KEY_LEN]		= { .type = NLA_U32 },
diff -ruNp linux-3.13.11/net/netfilter/nfnetlink_log.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/netfilter/nfnetlink_log.c
--- linux-3.13.11/net/netfilter/nfnetlink_log.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/netfilter/nfnetlink_log.c	2014-07-09
12:00:16.000000000 +0200
@@ -82,7 +82,7 @@ static int nfnl_log_net_id __read_mostly
 struct nfnl_log_net {
 	spinlock_t instances_lock;
 	struct hlist_head instance_table[INSTANCE_BUCKETS];
-	atomic_t global_seq;
+	atomic_unchecked_t global_seq;
 };
 
 static struct nfnl_log_net *nfnl_log_pernet(struct net *net)
@@ -564,7 +564,7 @@ __build_packet_message(struct nfnl_log_n
 	/* global sequence number */
 	if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
 	    nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
-			 htonl(atomic_inc_return(&log->global_seq))))
+			 htonl(atomic_inc_return_unchecked(&log->global_seq))))
 		goto nla_put_failure;
 
 	if (data_len) {
diff -ruNp linux-3.13.11/net/netfilter/nft_compat.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/netfilter/nft_compat.c
--- linux-3.13.11/net/netfilter/nft_compat.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/netfilter/nft_compat.c	2014-07-09
12:00:16.000000000 +0200
@@ -216,7 +216,7 @@ target_dump_info(struct sk_buff *skb, co
 		/* We want to reuse existing compat_to_user */
 		old_fs = get_fs();
 		set_fs(KERNEL_DS);
-		t->compat_to_user(out, in);
+		t->compat_to_user((void __force_user *)out, in);
 		set_fs(old_fs);
 		ret = nla_put(skb, NFTA_TARGET_INFO, XT_ALIGN(t->targetsize), out);
 		kfree(out);
@@ -403,7 +403,7 @@ match_dump_info(struct sk_buff *skb, con
 		/* We want to reuse existing compat_to_user */
 		old_fs = get_fs();
 		set_fs(KERNEL_DS);
-		m->compat_to_user(out, in);
+		m->compat_to_user((void __force_user *)out, in);
 		set_fs(old_fs);
 		ret = nla_put(skb, NFTA_MATCH_INFO, XT_ALIGN(m->matchsize), out);
 		kfree(out);
diff -ruNp linux-3.13.11/net/netfilter/xt_gradm.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/netfilter/xt_gradm.c
--- linux-3.13.11/net/netfilter/xt_gradm.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/netfilter/xt_gradm.c	2014-07-09
12:00:16.000000000 +0200
@@ -0,0 +1,51 @@
+/*
+ *	gradm match for netfilter
+ *	Copyright © Zbigniew Krzystolik, 2010
+ *
+ *	This program is free software; you can redistribute it and/or modify
+ *	it under the terms of the GNU General Public License; either version
+ *	2 or 3 as published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/skbuff.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/grsecurity.h>
+#include <linux/netfilter/xt_gradm.h>
+
+static bool
+gradm_mt(const struct sk_buff *skb, struct xt_action_param *par)
+{
+	const struct xt_gradm_mtinfo *info = par->matchinfo;
+	bool retval = false;
+	if (gr_acl_is_enabled())
+		retval = true;
+	return retval ^ info->invflags;
+}
+
+static struct xt_match gradm_mt_reg __read_mostly = {
+		.name       = "gradm",
+		.revision   = 0,
+		.family     = NFPROTO_UNSPEC,
+		.match      = gradm_mt,
+		.matchsize  = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
+		.me         = THIS_MODULE,
+};
+
+static int __init gradm_mt_init(void)
+{
+	return xt_register_match(&gradm_mt_reg);
+}
+
+static void __exit gradm_mt_exit(void)
+{
+	xt_unregister_match(&gradm_mt_reg);
+}
+
+module_init(gradm_mt_init);
+module_exit(gradm_mt_exit);
+MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
+MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ipt_gradm");
+MODULE_ALIAS("ip6t_gradm");
diff -ruNp linux-3.13.11/net/netfilter/xt_statistic.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/netfilter/xt_statistic.c
--- linux-3.13.11/net/netfilter/xt_statistic.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/netfilter/xt_statistic.c	2014-07-09
12:00:16.000000000 +0200
@@ -19,7 +19,7 @@
 #include <linux/module.h>
 
 struct xt_statistic_priv {
-	atomic_t count;
+	atomic_unchecked_t count;
 } ____cacheline_aligned_in_smp;
 
 MODULE_LICENSE("GPL");
@@ -42,9 +42,9 @@ statistic_mt(const struct sk_buff *skb,
 		break;
 	case XT_STATISTIC_MODE_NTH:
 		do {
-			oval = atomic_read(&info->master->count);
+			oval = atomic_read_unchecked(&info->master->count);
 			nval = (oval == info->u.nth.every) ? 0 : oval + 1;
-		} while (atomic_cmpxchg(&info->master->count, oval, nval) != oval);
+		} while (atomic_cmpxchg_unchecked(&info->master->count, oval, nval) != oval);
 		if (nval == 0)
 			ret = !ret;
 		break;
@@ -64,7 +64,7 @@ static int statistic_mt_check(const stru
 	info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
 	if (info->master == NULL)
 		return -ENOMEM;
-	atomic_set(&info->master->count, info->u.nth.count);
+	atomic_set_unchecked(&info->master->count, info->u.nth.count);
 
 	return 0;
 }
diff -ruNp linux-3.13.11/net/netlink/af_netlink.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/netlink/af_netlink.c
--- linux-3.13.11/net/netlink/af_netlink.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/netlink/af_netlink.c	2014-07-09
12:00:16.000000000 +0200
@@ -58,6 +58,9 @@
 #include <linux/mutex.h>
 #include <linux/vmalloc.h>
 #include <linux/if_arp.h>
+#include <linux/vs_context.h>
+#include <linux/vs_network.h>
+#include <linux/vs_limit.h>
 #include <asm/cacheflush.h>
 
 #include <net/net_namespace.h>
@@ -249,7 +252,7 @@ static void netlink_overrun(struct sock
 			sk->sk_error_report(sk);
 		}
 	}
-	atomic_inc(&sk->sk_drops);
+	atomic_inc_unchecked(&sk->sk_drops);
 }
 
 static void netlink_rcv_wake(struct sock *sk)
@@ -1481,8 +1484,8 @@ static int netlink_connect(struct socket
 	if (addr->sa_family != AF_NETLINK)
 		return -EINVAL;
 
-	/* Only superuser is allowed to send multicasts */
-	if (nladdr->nl_groups && !netlink_capable(sock, NL_CFG_F_NONROOT_SEND))
+	if ((nladdr->nl_groups || nladdr->nl_pid) &&
+	    !netlink_capable(sock, NL_CFG_F_NONROOT_SEND))
 		return -EPERM;
 
 	if (!nlk->portid)
@@ -2850,6 +2853,8 @@ static struct sock *netlink_seq_socket_i
 			sk_for_each(s, &hash->table[j]) {
 				if (sock_net(s) != seq_file_net(seq))
 					continue;
+				if (!nx_check(s->sk_nid, VS_WATCH_P | VS_IDENT))
+					continue;
 				if (off == pos) {
 					iter->link = i;
 					iter->hash_idx = j;
@@ -2886,7 +2891,8 @@ static void *netlink_seq_next(struct seq
 	s = v;
 	do {
 		s = sk_next(s);
-	} while (s && !nl_table[s->sk_protocol].compare(net, s));
+	} while (s && (!nl_table[s->sk_protocol].compare(net, s) ||
+		!nx_check(s->sk_nid, VS_WATCH_P | VS_IDENT)));
 	if (s)
 		return s;
 
@@ -2899,7 +2905,8 @@ static void *netlink_seq_next(struct seq
 		for (; j <= hash->mask; j++) {
 			s = sk_head(&hash->table[j]);
 
-			while (s && !nl_table[s->sk_protocol].compare(net, s))
+			while (s && (!nl_table[s->sk_protocol].compare(net, s) ||
+				!nx_check(s->sk_nid, VS_WATCH_P | VS_IDENT)))
 				s = sk_next(s);
 			if (s) {
 				iter->link = i;
@@ -2940,7 +2947,7 @@ static int netlink_seq_show(struct seq_f
 			   sk_wmem_alloc_get(s),
 			   nlk->cb_running,
 			   atomic_read(&s->sk_refcnt),
-			   atomic_read(&s->sk_drops),
+			   atomic_read_unchecked(&s->sk_drops),
 			   sock_i_ino(s)
 			);
 
diff -ruNp linux-3.13.11/net/netrom/af_netrom.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/netrom/af_netrom.c
--- linux-3.13.11/net/netrom/af_netrom.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/netrom/af_netrom.c	2014-07-09
12:00:16.000000000 +0200
@@ -850,7 +850,6 @@ static int nr_getname(struct socket *soc
 		*uaddr_len = sizeof(struct full_sockaddr_ax25);
 	} else {
 		sax->fsa_ax25.sax25_family = AF_NETROM;
-		sax->fsa_ax25.sax25_ndigis = 0;
 		sax->fsa_ax25.sax25_call   = nr->source_addr;
 		*uaddr_len = sizeof(struct sockaddr_ax25);
 	}
diff -ruNp linux-3.13.11/net/packet/af_packet.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/packet/af_packet.c
--- linux-3.13.11/net/packet/af_packet.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/packet/af_packet.c	2014-07-09
12:00:16.000000000 +0200
@@ -1720,7 +1720,7 @@ static int packet_rcv(struct sk_buff *sk
 
 	spin_lock(&sk->sk_receive_queue.lock);
 	po->stats.stats1.tp_packets++;
-	skb->dropcount = atomic_read(&sk->sk_drops);
+	skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
 	__skb_queue_tail(&sk->sk_receive_queue, skb);
 	spin_unlock(&sk->sk_receive_queue.lock);
 	sk->sk_data_ready(sk, skb->len);
@@ -1729,7 +1729,7 @@ static int packet_rcv(struct sk_buff *sk
 drop_n_acct:
 	spin_lock(&sk->sk_receive_queue.lock);
 	po->stats.stats1.tp_drops++;
-	atomic_inc(&sk->sk_drops);
+	atomic_inc_unchecked(&sk->sk_drops);
 	spin_unlock(&sk->sk_receive_queue.lock);
 
 drop_n_restore:
@@ -3275,7 +3275,7 @@ static int packet_getsockopt(struct sock
 	case PACKET_HDRLEN:
 		if (len > sizeof(int))
 			len = sizeof(int);
-		if (copy_from_user(&val, optval, len))
+		if (len > sizeof(val) || copy_from_user(&val, optval, len))
 			return -EFAULT;
 		switch (val) {
 		case TPACKET_V1:
@@ -3318,7 +3318,7 @@ static int packet_getsockopt(struct sock
 		len = lv;
 	if (put_user(len, optlen))
 		return -EFAULT;
-	if (copy_to_user(optval, data, len))
+	if (len > sizeof(st) || copy_to_user(optval, data, len))
 		return -EFAULT;
 	return 0;
 }
diff -ruNp linux-3.13.11/net/phonet/datagram.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/phonet/datagram.c
--- linux-3.13.11/net/phonet/datagram.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/phonet/datagram.c	2014-07-09
12:00:16.000000000 +0200
@@ -139,6 +139,9 @@ static int pn_recvmsg(struct kiocb *iocb
 			MSG_CMSG_COMPAT))
 		goto out_nofree;
 
+	if (addr_len)
+		*addr_len = sizeof(sa);
+
 	skb = skb_recv_datagram(sk, flags, noblock, &rval);
 	if (skb == NULL)
 		goto out_nofree;
@@ -159,10 +162,8 @@ static int pn_recvmsg(struct kiocb *iocb
 
 	rval = (flags & MSG_TRUNC) ? skb->len : copylen;
 
-	if (msg->msg_name != NULL) {
-		memcpy(msg->msg_name, &sa, sizeof(sa));
-		*addr_len = sizeof(sa);
-	}
+	if (msg->msg_name != NULL)
+		memcpy(msg->msg_name, &sa, sizeof(struct sockaddr_pn));
 
 out:
 	skb_free_datagram(sk, skb);
diff -ruNp linux-3.13.11/net/phonet/pep.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/phonet/pep.c
--- linux-3.13.11/net/phonet/pep.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/phonet/pep.c	2014-07-09 12:00:16.000000000
+0200
@@ -388,7 +388,7 @@ static int pipe_do_rcv(struct sock *sk,
 
 	case PNS_PEP_CTRL_REQ:
 		if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
-			atomic_inc(&sk->sk_drops);
+			atomic_inc_unchecked(&sk->sk_drops);
 			break;
 		}
 		__skb_pull(skb, 4);
@@ -409,7 +409,7 @@ static int pipe_do_rcv(struct sock *sk,
 		}
 
 		if (pn->rx_credits == 0) {
-			atomic_inc(&sk->sk_drops);
+			atomic_inc_unchecked(&sk->sk_drops);
 			err = -ENOBUFS;
 			break;
 		}
@@ -580,7 +580,7 @@ static int pipe_handler_do_rcv(struct so
 		}
 
 		if (pn->rx_credits == 0) {
-			atomic_inc(&sk->sk_drops);
+			atomic_inc_unchecked(&sk->sk_drops);
 			err = NET_RX_DROP;
 			break;
 		}
diff -ruNp linux-3.13.11/net/phonet/socket.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/phonet/socket.c
--- linux-3.13.11/net/phonet/socket.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/phonet/socket.c	2014-07-09 12:00:16.000000000
+0200
@@ -611,7 +611,7 @@ static int pn_sock_seq_show(struct seq_f
 			from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
 			sock_i_ino(sk),
 			atomic_read(&sk->sk_refcnt), sk,
-			atomic_read(&sk->sk_drops));
+			atomic_read_unchecked(&sk->sk_drops));
 	}
 	seq_pad(seq, '\n');
 	return 0;
diff -ruNp linux-3.13.11/net/phonet/sysctl.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/phonet/sysctl.c
--- linux-3.13.11/net/phonet/sysctl.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/phonet/sysctl.c	2014-07-09 12:00:16.000000000
+0200
@@ -67,7 +67,7 @@ static int proc_local_port_range(struct
 {
 	int ret;
 	int range[2] = {local_port_range[0], local_port_range[1]};
-	struct ctl_table tmp = {
+	ctl_table_no_const tmp = {
 		.data = &range,
 		.maxlen = sizeof(range),
 		.mode = table->mode,
diff -ruNp linux-3.13.11/net/rds/cong.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/rds/cong.c
--- linux-3.13.11/net/rds/cong.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/rds/cong.c	2014-07-09 12:00:16.000000000
+0200
@@ -78,7 +78,7 @@
  * finds that the saved generation number is smaller than the global generation
  * number, it wakes up the process.
  */
-static atomic_t		rds_cong_generation = ATOMIC_INIT(0);
+static atomic_unchecked_t		rds_cong_generation = ATOMIC_INIT(0);
 
 /*
  * Congestion monitoring
@@ -233,7 +233,7 @@ void rds_cong_map_updated(struct rds_con
 	rdsdebug("waking map %p for %pI4\n",
 	  map, &map->m_addr);
 	rds_stats_inc(s_cong_update_received);
-	atomic_inc(&rds_cong_generation);
+	atomic_inc_unchecked(&rds_cong_generation);
 	if (waitqueue_active(&map->m_waitq))
 		wake_up(&map->m_waitq);
 	if (waitqueue_active(&rds_poll_waitq))
@@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
 
 int rds_cong_updated_since(unsigned long *recent)
 {
-	unsigned long gen = atomic_read(&rds_cong_generation);
+	unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
 
 	if (likely(*recent == gen))
 		return 0;
diff -ruNp linux-3.13.11/net/rds/ib.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/rds/ib.h
--- linux-3.13.11/net/rds/ib.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/rds/ib.h	2014-07-09 12:00:16.000000000
+0200
@@ -128,7 +128,7 @@ struct rds_ib_connection {
 	/* sending acks */
 	unsigned long		i_ack_flags;
 #ifdef KERNEL_HAS_ATOMIC64
-	atomic64_t		i_ack_next;	/* next ACK to send */
+	atomic64_unchecked_t	i_ack_next;	/* next ACK to send */
 #else
 	spinlock_t		i_ack_lock;	/* protect i_ack_next */
 	u64			i_ack_next;	/* next ACK to send */
diff -ruNp linux-3.13.11/net/rds/ib_cm.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/rds/ib_cm.c
--- linux-3.13.11/net/rds/ib_cm.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/rds/ib_cm.c	2014-07-09 12:00:16.000000000
+0200
@@ -717,7 +717,7 @@ void rds_ib_conn_shutdown(struct rds_con
 	/* Clear the ACK state */
 	clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
 #ifdef KERNEL_HAS_ATOMIC64
-	atomic64_set(&ic->i_ack_next, 0);
+	atomic64_set_unchecked(&ic->i_ack_next, 0);
 #else
 	ic->i_ack_next = 0;
 #endif
diff -ruNp linux-3.13.11/net/rds/ib_recv.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/rds/ib_recv.c
--- linux-3.13.11/net/rds/ib_recv.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/rds/ib_recv.c	2014-07-09 12:00:16.000000000
+0200
@@ -596,7 +596,7 @@ static u64 rds_ib_get_ack(struct rds_ib_
 static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
 				int ack_required)
 {
-	atomic64_set(&ic->i_ack_next, seq);
+	atomic64_set_unchecked(&ic->i_ack_next, seq);
 	if (ack_required) {
 		smp_mb__before_clear_bit();
 		set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
@@ -608,7 +608,7 @@ static u64 rds_ib_get_ack(struct rds_ib_
 	clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
 	smp_mb__after_clear_bit();
 
-	return atomic64_read(&ic->i_ack_next);
+	return atomic64_read_unchecked(&ic->i_ack_next);
 }
 #endif
 
diff -ruNp linux-3.13.11/net/rds/iw.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/rds/iw.h
--- linux-3.13.11/net/rds/iw.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/rds/iw.h	2014-07-09 12:00:16.000000000
+0200
@@ -134,7 +134,7 @@ struct rds_iw_connection {
 	/* sending acks */
 	unsigned long		i_ack_flags;
 #ifdef KERNEL_HAS_ATOMIC64
-	atomic64_t		i_ack_next;	/* next ACK to send */
+	atomic64_unchecked_t	i_ack_next;	/* next ACK to send */
 #else
 	spinlock_t		i_ack_lock;	/* protect i_ack_next */
 	u64			i_ack_next;	/* next ACK to send */
diff -ruNp linux-3.13.11/net/rds/iw_cm.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/rds/iw_cm.c
--- linux-3.13.11/net/rds/iw_cm.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/rds/iw_cm.c	2014-07-09 12:00:16.000000000
+0200
@@ -663,7 +663,7 @@ void rds_iw_conn_shutdown(struct rds_con
 	/* Clear the ACK state */
 	clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
 #ifdef KERNEL_HAS_ATOMIC64
-	atomic64_set(&ic->i_ack_next, 0);
+	atomic64_set_unchecked(&ic->i_ack_next, 0);
 #else
 	ic->i_ack_next = 0;
 #endif
diff -ruNp linux-3.13.11/net/rds/iw_recv.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/rds/iw_recv.c
--- linux-3.13.11/net/rds/iw_recv.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/rds/iw_recv.c	2014-07-09 12:00:16.000000000
+0200
@@ -427,7 +427,7 @@ static u64 rds_iw_get_ack(struct rds_iw_
 static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
 				int ack_required)
 {
-	atomic64_set(&ic->i_ack_next, seq);
+	atomic64_set_unchecked(&ic->i_ack_next, seq);
 	if (ack_required) {
 		smp_mb__before_clear_bit();
 		set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
@@ -439,7 +439,7 @@ static u64 rds_iw_get_ack(struct rds_iw_
 	clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
 	smp_mb__after_clear_bit();
 
-	return atomic64_read(&ic->i_ack_next);
+	return atomic64_read_unchecked(&ic->i_ack_next);
 }
 #endif
 
diff -ruNp linux-3.13.11/net/rds/rds.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/rds/rds.h
--- linux-3.13.11/net/rds/rds.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/rds/rds.h	2014-07-09 12:00:16.000000000
+0200
@@ -449,7 +449,7 @@ struct rds_transport {
 	void (*sync_mr)(void *trans_private, int direction);
 	void (*free_mr)(void *trans_private, int invalidate);
 	void (*flush_mrs)(void);
-};
+} __do_const;
 
 struct rds_sock {
 	struct sock		rs_sk;
diff -ruNp linux-3.13.11/net/rds/tcp.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/rds/tcp.c
--- linux-3.13.11/net/rds/tcp.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/rds/tcp.c	2014-07-09 12:00:16.000000000
+0200
@@ -59,7 +59,7 @@ void rds_tcp_nonagle(struct socket *sock
 	int val = 1;
 
 	set_fs(KERNEL_DS);
-	sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val,
+	sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __force_user *)&val,
 			      sizeof(val));
 	set_fs(oldfs);
 }
diff -ruNp linux-3.13.11/net/rds/tcp_send.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/rds/tcp_send.c
--- linux-3.13.11/net/rds/tcp_send.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/rds/tcp_send.c	2014-07-09 12:00:16.000000000
+0200
@@ -43,7 +43,7 @@ static void rds_tcp_cork(struct socket *
 
 	oldfs = get_fs();
 	set_fs(KERNEL_DS);
-	sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __user *)&val,
+	sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __force_user *)&val,
 			      sizeof(val));
 	set_fs(oldfs);
 }
diff -ruNp linux-3.13.11/net/rxrpc/af_rxrpc.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/rxrpc/af_rxrpc.c
--- linux-3.13.11/net/rxrpc/af_rxrpc.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/rxrpc/af_rxrpc.c	2014-07-09 12:00:16.000000000
+0200
@@ -40,7 +40,7 @@ static const struct proto_ops rxrpc_rpc_
 __be32 rxrpc_epoch;
 
 /* current debugging ID */
-atomic_t rxrpc_debug_id;
+atomic_unchecked_t rxrpc_debug_id;
 
 /* count of skbs currently in use */
 atomic_t rxrpc_n_skbs;
diff -ruNp linux-3.13.11/net/rxrpc/ar-ack.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/rxrpc/ar-ack.c
--- linux-3.13.11/net/rxrpc/ar-ack.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/rxrpc/ar-ack.c	2014-07-09 12:00:16.000000000
+0200
@@ -175,7 +175,7 @@ static void rxrpc_resend(struct rxrpc_ca
 
 	_enter("{%d,%d,%d,%d},",
 	       call->acks_hard, call->acks_unacked,
-	       atomic_read(&call->sequence),
+	       atomic_read_unchecked(&call->sequence),
 	       CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
 
 	stop = 0;
@@ -199,7 +199,7 @@ static void rxrpc_resend(struct rxrpc_ca
 
 			/* each Tx packet has a new serial number */
 			sp->hdr.serial =
-				htonl(atomic_inc_return(&call->conn->serial));
+				htonl(atomic_inc_return_unchecked(&call->conn->serial));
 
 			hdr = (struct rxrpc_header *) txb->head;
 			hdr->serial = sp->hdr.serial;
@@ -403,7 +403,7 @@ static void rxrpc_rotate_tx_window(struc
  */
 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
 {
-	rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
+	rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
 }
 
 /*
@@ -629,7 +629,7 @@ process_further:
 
 		latest = ntohl(sp->hdr.serial);
 		hard = ntohl(ack.firstPacket);
-		tx = atomic_read(&call->sequence);
+		tx = atomic_read_unchecked(&call->sequence);
 
 		_proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
 		       latest,
@@ -1161,7 +1161,7 @@ void rxrpc_process_call(struct work_stru
 	goto maybe_reschedule;
 
 send_ACK_with_skew:
-	ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
+	ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
 			    ntohl(ack.serial));
 send_ACK:
 	mtu = call->conn->trans->peer->if_mtu;
@@ -1173,7 +1173,7 @@ send_ACK:
 	ackinfo.rxMTU	= htonl(5692);
 	ackinfo.jumbo_max = htonl(4);
 
-	hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
+	hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
 	_proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
 	       ntohl(hdr.serial),
 	       ntohs(ack.maxSkew),
@@ -1191,7 +1191,7 @@ send_ACK:
 send_message:
 	_debug("send message");
 
-	hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
+	hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
 	_proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
 send_message_2:
 
diff -ruNp linux-3.13.11/net/rxrpc/ar-call.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/rxrpc/ar-call.c
--- linux-3.13.11/net/rxrpc/ar-call.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/rxrpc/ar-call.c	2014-07-09 12:00:16.000000000
+0200
@@ -83,7 +83,7 @@ static struct rxrpc_call *rxrpc_alloc_ca
 	spin_lock_init(&call->lock);
 	rwlock_init(&call->state_lock);
 	atomic_set(&call->usage, 1);
-	call->debug_id = atomic_inc_return(&rxrpc_debug_id);
+	call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
 	call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
 
 	memset(&call->sock_node, 0xed, sizeof(call->sock_node));
diff -ruNp linux-3.13.11/net/rxrpc/ar-connection.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/rxrpc/ar-connection.c
--- linux-3.13.11/net/rxrpc/ar-connection.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/rxrpc/ar-connection.c	2014-07-09
12:00:16.000000000 +0200
@@ -206,7 +206,7 @@ static struct rxrpc_connection *rxrpc_al
 		rwlock_init(&conn->lock);
 		spin_lock_init(&conn->state_lock);
 		atomic_set(&conn->usage, 1);
-		conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
+		conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
 		conn->avail_calls = RXRPC_MAXCALLS;
 		conn->size_align = 4;
 		conn->header_size = sizeof(struct rxrpc_header);
diff -ruNp linux-3.13.11/net/rxrpc/ar-connevent.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/rxrpc/ar-connevent.c
--- linux-3.13.11/net/rxrpc/ar-connevent.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/rxrpc/ar-connevent.c	2014-07-09
12:00:16.000000000 +0200
@@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct
 
 	len = iov[0].iov_len + iov[1].iov_len;
 
-	hdr.serial = htonl(atomic_inc_return(&conn->serial));
+	hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
 	_proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
 
 	ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
diff -ruNp linux-3.13.11/net/rxrpc/ar-input.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/rxrpc/ar-input.c
--- linux-3.13.11/net/rxrpc/ar-input.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/rxrpc/ar-input.c	2014-07-09 12:00:16.000000000
+0200
@@ -340,9 +340,9 @@ void rxrpc_fast_process_packet(struct rx
 	/* track the latest serial number on this connection for ACK packet
 	 * information */
 	serial = ntohl(sp->hdr.serial);
-	hi_serial = atomic_read(&call->conn->hi_serial);
+	hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
 	while (serial > hi_serial)
-		hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
+		hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
 					   serial);
 
 	/* request ACK generation for any ACK or DATA packet that requests
diff -ruNp linux-3.13.11/net/rxrpc/ar-internal.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/rxrpc/ar-internal.h
--- linux-3.13.11/net/rxrpc/ar-internal.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/rxrpc/ar-internal.h	2014-07-09
12:00:16.000000000 +0200
@@ -272,8 +272,8 @@ struct rxrpc_connection {
 	int			error;		/* error code for local abort */
 	int			debug_id;	/* debug ID for printks */
 	unsigned int		call_counter;	/* call ID counter */
-	atomic_t		serial;		/* packet serial number counter */
-	atomic_t		hi_serial;	/* highest serial number received */
+	atomic_unchecked_t	serial;		/* packet serial number counter */
+	atomic_unchecked_t	hi_serial;	/* highest serial number received */
 	u8			avail_calls;	/* number of calls available */
 	u8			size_align;	/* data size alignment (for security) */
 	u8			header_size;	/* rxrpc + security header size */
@@ -346,7 +346,7 @@ struct rxrpc_call {
 	spinlock_t		lock;
 	rwlock_t		state_lock;	/* lock for state transition */
 	atomic_t		usage;
-	atomic_t		sequence;	/* Tx data packet sequence counter */
+	atomic_unchecked_t	sequence;	/* Tx data packet sequence counter */
 	u32			abort_code;	/* local/remote abort code */
 	enum {					/* current state of call */
 		RXRPC_CALL_CLIENT_SEND_REQUEST,	/* - client sending request phase */
@@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(stru
  */
 extern atomic_t rxrpc_n_skbs;
 extern __be32 rxrpc_epoch;
-extern atomic_t rxrpc_debug_id;
+extern atomic_unchecked_t rxrpc_debug_id;
 extern struct workqueue_struct *rxrpc_workqueue;
 
 /*
diff -ruNp linux-3.13.11/net/rxrpc/ar-local.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/rxrpc/ar-local.c
--- linux-3.13.11/net/rxrpc/ar-local.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/rxrpc/ar-local.c	2014-07-09 12:00:16.000000000
+0200
@@ -45,7 +45,7 @@ struct rxrpc_local *rxrpc_alloc_local(st
 		spin_lock_init(&local->lock);
 		rwlock_init(&local->services_lock);
 		atomic_set(&local->usage, 1);
-		local->debug_id = atomic_inc_return(&rxrpc_debug_id);
+		local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
 		memcpy(&local->srx, srx, sizeof(*srx));
 	}
 
diff -ruNp linux-3.13.11/net/rxrpc/ar-output.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/rxrpc/ar-output.c
--- linux-3.13.11/net/rxrpc/ar-output.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/rxrpc/ar-output.c	2014-07-09
12:00:16.000000000 +0200
@@ -682,9 +682,9 @@ static int rxrpc_send_data(struct kiocb
 			sp->hdr.cid = call->cid;
 			sp->hdr.callNumber = call->call_id;
 			sp->hdr.seq =
-				htonl(atomic_inc_return(&call->sequence));
+				htonl(atomic_inc_return_unchecked(&call->sequence));
 			sp->hdr.serial =
-				htonl(atomic_inc_return(&conn->serial));
+				htonl(atomic_inc_return_unchecked(&conn->serial));
 			sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
 			sp->hdr.userStatus = 0;
 			sp->hdr.securityIndex = conn->security_ix;
diff -ruNp linux-3.13.11/net/rxrpc/ar-peer.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/rxrpc/ar-peer.c
--- linux-3.13.11/net/rxrpc/ar-peer.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/rxrpc/ar-peer.c	2014-07-09 12:00:16.000000000
+0200
@@ -72,7 +72,7 @@ static struct rxrpc_peer *rxrpc_alloc_pe
 		INIT_LIST_HEAD(&peer->error_targets);
 		spin_lock_init(&peer->lock);
 		atomic_set(&peer->usage, 1);
-		peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
+		peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
 		memcpy(&peer->srx, srx, sizeof(*srx));
 
 		rxrpc_assess_MTU_size(peer);
diff -ruNp linux-3.13.11/net/rxrpc/ar-proc.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/rxrpc/ar-proc.c
--- linux-3.13.11/net/rxrpc/ar-proc.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/rxrpc/ar-proc.c	2014-07-09 12:00:16.000000000
+0200
@@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(str
 		   atomic_read(&conn->usage),
 		   rxrpc_conn_states[conn->state],
 		   key_serial(conn->key),
-		   atomic_read(&conn->serial),
-		   atomic_read(&conn->hi_serial));
+		   atomic_read_unchecked(&conn->serial),
+		   atomic_read_unchecked(&conn->hi_serial));
 
 	return 0;
 }
diff -ruNp linux-3.13.11/net/rxrpc/ar-transport.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/rxrpc/ar-transport.c
--- linux-3.13.11/net/rxrpc/ar-transport.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/rxrpc/ar-transport.c	2014-07-09
12:00:16.000000000 +0200
@@ -47,7 +47,7 @@ static struct rxrpc_transport *rxrpc_all
 		spin_lock_init(&trans->client_lock);
 		rwlock_init(&trans->conn_lock);
 		atomic_set(&trans->usage, 1);
-		trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
+		trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
 
 		if (peer->srx.transport.family == AF_INET) {
 			switch (peer->srx.transport_type) {
diff -ruNp linux-3.13.11/net/rxrpc/rxkad.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/rxrpc/rxkad.c
--- linux-3.13.11/net/rxrpc/rxkad.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/rxrpc/rxkad.c	2014-07-09 12:00:16.000000000
+0200
@@ -610,7 +610,7 @@ static int rxkad_issue_challenge(struct
 
 	len = iov[0].iov_len + iov[1].iov_len;
 
-	hdr.serial = htonl(atomic_inc_return(&conn->serial));
+	hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
 	_proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
 
 	ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
@@ -660,7 +660,7 @@ static int rxkad_send_response(struct rx
 
 	len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
 
-	hdr->serial = htonl(atomic_inc_return(&conn->serial));
+	hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
 	_proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
 
 	ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
diff -ruNp linux-3.13.11/net/sctp/ipv6.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/sctp/ipv6.c
--- linux-3.13.11/net/sctp/ipv6.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/sctp/ipv6.c	2014-07-09 12:00:16.000000000
+0200
@@ -964,7 +964,7 @@ static const struct inet6_protocol sctpv
 	.flags        = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
 };
 
-static struct sctp_af sctp_af_inet6 = {
+static struct sctp_af sctp_af_inet6 __read_only = {
 	.sa_family	   = AF_INET6,
 	.sctp_xmit	   = sctp_v6_xmit,
 	.setsockopt	   = ipv6_setsockopt,
@@ -996,7 +996,7 @@ static struct sctp_af sctp_af_inet6 = {
 #endif
 };
 
-static struct sctp_pf sctp_pf_inet6 = {
+static struct sctp_pf sctp_pf_inet6 __read_only = {
 	.event_msgname = sctp_inet6_event_msgname,
 	.skb_msgname   = sctp_inet6_skb_msgname,
 	.af_supported  = sctp_inet6_af_supported,
@@ -1021,7 +1021,7 @@ void sctp_v6_pf_init(void)
 
 void sctp_v6_pf_exit(void)
 {
-	list_del(&sctp_af_inet6.list);
+	pax_list_del(&sctp_af_inet6.list);
 }
 
 /* Initialize IPv6 support and register with socket layer.  */
diff -ruNp linux-3.13.11/net/sctp/protocol.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/sctp/protocol.c
--- linux-3.13.11/net/sctp/protocol.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/sctp/protocol.c	2014-07-09 12:00:16.000000000
+0200
@@ -832,8 +832,10 @@ int sctp_register_af(struct sctp_af *af)
 		return 0;
 	}
 
+	pax_open_kernel();
 	INIT_LIST_HEAD(&af->list);
-	list_add_tail(&af->list, &sctp_address_families);
+	pax_close_kernel();
+	pax_list_add_tail(&af->list, &sctp_address_families);
 	return 1;
 }
 
@@ -963,7 +965,7 @@ static inline int sctp_v4_xmit(struct sk
 
 static struct sctp_af sctp_af_inet;
 
-static struct sctp_pf sctp_pf_inet = {
+static struct sctp_pf sctp_pf_inet __read_only = {
 	.event_msgname = sctp_inet_event_msgname,
 	.skb_msgname   = sctp_inet_skb_msgname,
 	.af_supported  = sctp_inet_af_supported,
@@ -1034,7 +1036,7 @@ static const struct net_protocol sctp_pr
 };
 
 /* IPv4 address related functions.  */
-static struct sctp_af sctp_af_inet = {
+static struct sctp_af sctp_af_inet __read_only = {
 	.sa_family	   = AF_INET,
 	.sctp_xmit	   = sctp_v4_xmit,
 	.setsockopt	   = ip_setsockopt,
@@ -1119,7 +1121,7 @@ static void sctp_v4_pf_init(void)
 
 static void sctp_v4_pf_exit(void)
 {
-	list_del(&sctp_af_inet.list);
+	pax_list_del(&sctp_af_inet.list);
 }
 
 static int sctp_v4_protosw_init(void)
diff -ruNp linux-3.13.11/net/sctp/sm_sideeffect.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/sctp/sm_sideeffect.c
--- linux-3.13.11/net/sctp/sm_sideeffect.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/sctp/sm_sideeffect.c	2014-07-09
12:00:16.000000000 +0200
@@ -440,7 +440,7 @@ static void sctp_generate_sack_event(uns
 	sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_SACK);
 }
 
-sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES] = {
+sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES] = {
 	NULL,
 	sctp_generate_t1_cookie_event,
 	sctp_generate_t1_init_event,
diff -ruNp linux-3.13.11/net/sctp/socket.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/sctp/socket.c
--- linux-3.13.11/net/sctp/socket.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/sctp/socket.c	2014-07-09 12:00:16.000000000
+0200
@@ -2176,11 +2176,13 @@ static int sctp_setsockopt_events(struct
 {
 	struct sctp_association *asoc;
 	struct sctp_ulpevent *event;
+	struct sctp_event_subscribe subscribe;
 
 	if (optlen > sizeof(struct sctp_event_subscribe))
 		return -EINVAL;
-	if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen))
+	if (copy_from_user(&subscribe, optval, optlen))
 		return -EFAULT;
+	sctp_sk(sk)->subscribe = subscribe;
 
 	/*
 	 * At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT,
@@ -4252,13 +4254,16 @@ static int sctp_getsockopt_disable_fragm
 static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval,
 				  int __user *optlen)
 {
+	struct sctp_event_subscribe subscribe;
+
 	if (len <= 0)
 		return -EINVAL;
 	if (len > sizeof(struct sctp_event_subscribe))
 		len = sizeof(struct sctp_event_subscribe);
 	if (put_user(len, optlen))
 		return -EFAULT;
-	if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len))
+	subscribe = sctp_sk(sk)->subscribe;
+	if (copy_to_user(optval, &subscribe, len))
 		return -EFAULT;
 	return 0;
 }
@@ -4276,6 +4281,8 @@ static int sctp_getsockopt_events(struct
  */
 static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optval,
int __user *optlen)
 {
+	__u32 autoclose;
+
 	/* Applicable to UDP-style socket only */
 	if (sctp_style(sk, TCP))
 		return -EOPNOTSUPP;
@@ -4284,7 +4291,8 @@ static int sctp_getsockopt_autoclose(str
 	len = sizeof(int);
 	if (put_user(len, optlen))
 		return -EFAULT;
-	if (copy_to_user(optval, &sctp_sk(sk)->autoclose, sizeof(int)))
+	autoclose = sctp_sk(sk)->autoclose;
+	if (copy_to_user(optval, &autoclose, sizeof(int)))
 		return -EFAULT;
 	return 0;
 }
@@ -4656,12 +4664,15 @@ static int sctp_getsockopt_delayed_ack(s
  */
 static int sctp_getsockopt_initmsg(struct sock *sk, int len, char __user *optval, int
__user *optlen)
 {
+	struct sctp_initmsg initmsg;
+
 	if (len < sizeof(struct sctp_initmsg))
 		return -EINVAL;
 	len = sizeof(struct sctp_initmsg);
 	if (put_user(len, optlen))
 		return -EFAULT;
-	if (copy_to_user(optval, &sctp_sk(sk)->initmsg, len))
+	initmsg = sctp_sk(sk)->initmsg;
+	if (copy_to_user(optval, &initmsg, len))
 		return -EFAULT;
 	return 0;
 }
@@ -4702,6 +4713,8 @@ static int sctp_getsockopt_peer_addrs(st
 		addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
 		if (space_left < addrlen)
 			return -ENOMEM;
+		if (addrlen > sizeof(temp) || addrlen < 0)
+			return -EFAULT;
 		if (copy_to_user(to, &temp, addrlen))
 			return -EFAULT;
 		to += addrlen;
diff -ruNp linux-3.13.11/net/sctp/sysctl.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/sctp/sysctl.c
--- linux-3.13.11/net/sctp/sysctl.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/sctp/sysctl.c	2014-07-09 12:00:16.000000000
+0200
@@ -305,7 +305,7 @@ static int proc_sctp_do_hmac_alg(struct
 {
 	struct net *net = current->nsproxy->net_ns;
 	char tmp[8];
-	struct ctl_table tbl;
+	ctl_table_no_const tbl;
 	int ret;
 	int changed = 0;
 	char *none = "none";
@@ -352,7 +352,7 @@ static int proc_sctp_do_rto_min(struct c
 {
 	struct net *net = current->nsproxy->net_ns;
 	int new_value;
-	struct ctl_table tbl;
+	ctl_table_no_const tbl;
 	unsigned int min = *(unsigned int *) ctl->extra1;
 	unsigned int max = *(unsigned int *) ctl->extra2;
 	int ret;
@@ -379,7 +379,7 @@ static int proc_sctp_do_rto_max(struct c
 {
 	struct net *net = current->nsproxy->net_ns;
 	int new_value;
-	struct ctl_table tbl;
+	ctl_table_no_const tbl;
 	unsigned int min = *(unsigned int *) ctl->extra1;
 	unsigned int max = *(unsigned int *) ctl->extra2;
 	int ret;
@@ -402,7 +402,7 @@ static int proc_sctp_do_rto_max(struct c
 
 int sctp_sysctl_net_register(struct net *net)
 {
-	struct ctl_table *table;
+	ctl_table_no_const *table;
 	int i;
 
 	table = kmemdup(sctp_net_table, sizeof(sctp_net_table), GFP_KERNEL);
diff -ruNp linux-3.13.11/net/socket.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/socket.c
--- linux-3.13.11/net/socket.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/socket.c	2014-07-09 12:00:16.000000000
+0200
@@ -88,6 +88,7 @@
 #include <linux/magic.h>
 #include <linux/slab.h>
 #include <linux/xattr.h>
+#include <linux/in.h>
 
 #include <asm/uaccess.h>
 #include <asm/unistd.h>
@@ -98,6 +99,9 @@
 
 #include <net/sock.h>
 #include <linux/netfilter.h>
+#include <linux/vs_socket.h>
+#include <linux/vs_inet.h>
+#include <linux/vs_inet6.h>
 
 #include <linux/if_tun.h>
 #include <linux/ipv6_route.h>
@@ -111,6 +115,8 @@ unsigned int sysctl_net_busy_read __read
 unsigned int sysctl_net_busy_poll __read_mostly;
 #endif
 
+#include <linux/grsock.h>
+
 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
 			 unsigned long nr_segs, loff_t pos);
@@ -162,7 +168,7 @@ static const struct file_operations sock
  */
 
 static DEFINE_SPINLOCK(net_family_lock);
-static const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
+const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
 
 /*
  *	Statistics counters of the socket lists
@@ -328,7 +334,7 @@ static struct dentry *sockfs_mount(struc
 		&sockfs_dentry_operations, SOCKFS_MAGIC);
 }
 
-static struct vfsmount *sock_mnt __read_mostly;
+struct vfsmount *sock_mnt __read_mostly;
 
 static struct file_system_type sock_fs_type = {
 	.name =		"sockfs",
@@ -623,13 +629,29 @@ static inline int __sock_sendmsg_nosec(s
 				       struct msghdr *msg, size_t size)
 {
 	struct sock_iocb *si = kiocb_to_siocb(iocb);
+	size_t len;
 
 	si->sock = sock;
 	si->scm = NULL;
 	si->msg = msg;
 	si->size = size;
 
-	return sock->ops->sendmsg(iocb, sock, msg, size);
+	len = sock->ops->sendmsg(iocb, sock, msg, size);
+	if (sock->sk) {
+		if (len == size)
+			vx_sock_send(sock->sk, size);
+		else
+			vx_sock_fail(sock->sk, size);
+	}
+	vxdprintk(VXD_CBIT(net, 7),
+		"__sock_sendmsg: %p[%p,%p,%p;%d/%d]:%d/%zu",
+		sock, sock->sk,
+		(sock->sk)?sock->sk->sk_nx_info:0,
+		(sock->sk)?sock->sk->sk_vx_info:0,
+		(sock->sk)?sock->sk->sk_xid:0,
+		(sock->sk)?sock->sk->sk_nid:0,
+		(unsigned int)size, len);
+	return len;
 }
 
 static inline int __sock_sendmsg(struct kiocb *iocb, struct socket *sock,
@@ -773,6 +795,7 @@ static inline int __sock_recvmsg_nosec(s
 				       struct msghdr *msg, size_t size, int flags)
 {
 	struct sock_iocb *si = kiocb_to_siocb(iocb);
+	int len;
 
 	si->sock = sock;
 	si->scm = NULL;
@@ -780,7 +803,18 @@ static inline int __sock_recvmsg_nosec(s
 	si->size = size;
 	si->flags = flags;
 
-	return sock->ops->recvmsg(iocb, sock, msg, size, flags);
+	len = sock->ops->recvmsg(iocb, sock, msg, size, flags);
+	if ((len >= 0) && sock->sk)
+		vx_sock_recv(sock->sk, len);
+	vxdprintk(VXD_CBIT(net, 7),
+		"__sock_recvmsg: %p[%p,%p,%p;%d/%d]:%d/%d",
+		sock, sock->sk,
+		(sock->sk)?sock->sk->sk_nx_info:0,
+		(sock->sk)?sock->sk->sk_vx_info:0,
+		(sock->sk)?sock->sk->sk_xid:0,
+		(sock->sk)?sock->sk->sk_nid:0,
+		(unsigned int)size, len);
+	return len;
 }
 
 static inline int __sock_recvmsg(struct kiocb *iocb, struct socket *sock,
@@ -1255,6 +1289,15 @@ int __sock_create(struct net *net, int f
 		return -EAFNOSUPPORT;
 	if (type < 0 || type >= SOCK_MAX)
 		return -EINVAL;
+	if (protocol < 0)
+		return -EINVAL;
+
+	if (!nx_check(0, VS_ADMIN)) {
+		if (family == PF_INET && !current_nx_info_has_v4())
+			return -EAFNOSUPPORT;
+		if (family == PF_INET6 && !current_nx_info_has_v6())
+			return -EAFNOSUPPORT;
+	}
 
 	/* Compatibility.
 
@@ -1275,6 +1318,20 @@ int __sock_create(struct net *net, int f
 	if (err)
 		return err;
 
+	if(!kern && !gr_search_socket(family, type, protocol)) {
+		if (rcu_access_pointer(net_families[family]) == NULL)
+			return -EAFNOSUPPORT;
+		else
+			return -EACCES;
+	}
+
+	if (!kern && gr_handle_sock_all(family, type, protocol)) {
+		if (rcu_access_pointer(net_families[family]) == NULL)
+			return -EAFNOSUPPORT;
+		else
+			return -EACCES;
+	}
+
 	/*
 	 *	Allocate the socket and allow the family to set things up. if
 	 *	the protocol is 0, the family is instructed to select an appropriate
@@ -1390,6 +1447,7 @@ SYSCALL_DEFINE3(socket, int, family, int
 	if (retval < 0)
 		goto out;
 
+	set_bit(SOCK_USER_SOCKET, &sock->flags);
 	retval = sock_map_fd(sock, flags & (O_CLOEXEC | O_NONBLOCK));
 	if (retval < 0)
 		goto out_release;
@@ -1431,10 +1489,12 @@ SYSCALL_DEFINE4(socketpair, int, family,
 	err = sock_create(family, type, protocol, &sock1);
 	if (err < 0)
 		goto out;
+	set_bit(SOCK_USER_SOCKET, &sock1->flags);
 
 	err = sock_create(family, type, protocol, &sock2);
 	if (err < 0)
 		goto out_release_1;
+	set_bit(SOCK_USER_SOCKET, &sock2->flags);
 
 	err = sock1->ops->socketpair(sock1, sock2);
 	if (err < 0)
@@ -1513,6 +1573,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
 	if (sock) {
 		err = move_addr_to_kernel(umyaddr, addrlen, &address);
 		if (err >= 0) {
+			if (gr_handle_sock_server((struct sockaddr *)&address)) {
+				err = -EACCES;
+				goto error;
+			}
+			err = gr_search_bind(sock, (struct sockaddr_in *)&address);
+			if (err)
+				goto error;
+
 			err = security_socket_bind(sock,
 						   (struct sockaddr *)&address,
 						   addrlen);
@@ -1521,6 +1589,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
 						      (struct sockaddr *)
 						      &address, addrlen);
 		}
+error:
 		fput_light(sock->file, fput_needed);
 	}
 	return err;
@@ -1544,10 +1613,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, ba
 		if ((unsigned int)backlog > somaxconn)
 			backlog = somaxconn;
 
+		if (gr_handle_sock_server_other(sock->sk)) {
+			err = -EPERM;
+			goto error;
+		}
+
+		err = gr_search_listen(sock);
+		if (err)
+			goto error;
+
 		err = security_socket_listen(sock, backlog);
 		if (!err)
 			err = sock->ops->listen(sock, backlog);
 
+error:
 		fput_light(sock->file, fput_needed);
 	}
 	return err;
@@ -1591,6 +1670,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
 	newsock->type = sock->type;
 	newsock->ops = sock->ops;
 
+	if (gr_handle_sock_server_other(sock->sk)) {
+		err = -EPERM;
+		sock_release(newsock);
+		goto out_put;
+	}
+
+	err = gr_search_accept(sock);
+	if (err) {
+		sock_release(newsock);
+		goto out_put;
+	}
+
 	/*
 	 * We don't need try_module_get here, as the listening socket (sock)
 	 * has the protocol module (sock->ops->owner) held.
@@ -1636,6 +1727,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
 	fd_install(newfd, newfile);
 	err = newfd;
 
+	gr_attach_curr_ip(newsock->sk);
+
 out_put:
 	fput_light(sock->file, fput_needed);
 out:
@@ -1668,6 +1761,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct
 		int, addrlen)
 {
 	struct socket *sock;
+	struct sockaddr *sck;
 	struct sockaddr_storage address;
 	int err, fput_needed;
 
@@ -1678,6 +1772,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct
 	if (err < 0)
 		goto out_put;
 
+	sck = (struct sockaddr *)&address;
+
+	if (gr_handle_sock_client(sck)) {
+		err = -EACCES;
+		goto out_put;
+	}
+
+	err = gr_search_connect(sock, (struct sockaddr_in *)sck);
+	if (err)
+		goto out_put;
+
 	err =
 	    security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
 	if (err)
@@ -1759,6 +1864,8 @@ SYSCALL_DEFINE3(getpeername, int, fd, st
  *	the protocol.
  */
 
+asmlinkage long sys_sendto(int, void __user *, size_t, unsigned, struct sockaddr __user
*, int);
+
 SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len,
 		unsigned int, flags, struct sockaddr __user *, addr,
 		int, addr_len)
@@ -1825,7 +1932,7 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void
 	struct socket *sock;
 	struct iovec iov;
 	struct msghdr msg;
-	struct sockaddr_storage address;
+	struct sockaddr_storage address = { };
 	int err, err2;
 	int fput_needed;
 
@@ -2051,7 +2158,7 @@ static int ___sys_sendmsg(struct socket
 		 * checking falls down on this.
 		 */
 		if (copy_from_user(ctl_buf,
-				   (void __user __force *)msg_sys->msg_control,
+				   (void __force_user *)msg_sys->msg_control,
 				   ctl_len))
 			goto out_freectl;
 		msg_sys->msg_control = ctl_buf;
@@ -2202,7 +2309,7 @@ static int ___sys_recvmsg(struct socket
 	int err, total_len, len;
 
 	/* kernel mode address */
-	struct sockaddr_storage addr;
+	struct sockaddr_storage addr = { };
 
 	/* user mode address pointers */
 	struct sockaddr __user *uaddr;
@@ -2231,7 +2338,7 @@ static int ___sys_recvmsg(struct socket
 	/* Save the user-mode address (verify_iovec will change the
 	 * kernel msghdr to use the kernel address space)
 	 */
-	uaddr = (__force void __user *)msg_sys->msg_name;
+	uaddr = (void __force_user *)msg_sys->msg_name;
 	uaddr_len = COMPAT_NAMELEN(msg);
 	if (MSG_CMSG_COMPAT & flags)
 		err = verify_compat_iovec(msg_sys, iov, &addr, VERIFY_WRITE);
@@ -2875,7 +2982,7 @@ static int ethtool_ioctl(struct net *net
 	ifr = compat_alloc_user_space(buf_size);
 	rxnfc = (void __user *)ifr + ALIGN(sizeof(struct ifreq), 8);
 
-	if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
+	if (copy_in_user(ifr->ifr_name, ifr32->ifr_name, IFNAMSIZ))
 		return -EFAULT;
 
 	if (put_user(convert_in ? rxnfc : compat_ptr(data),
@@ -2989,14 +3096,14 @@ static int bond_ioctl(struct net *net, u
 		old_fs = get_fs();
 		set_fs(KERNEL_DS);
 		err = dev_ioctl(net, cmd,
-				(struct ifreq __user __force *) &kifr);
+				(struct ifreq __force_user *) &kifr);
 		set_fs(old_fs);
 
 		return err;
 	case SIOCBONDSLAVEINFOQUERY:
 	case SIOCBONDINFOQUERY:
 		uifr = compat_alloc_user_space(sizeof(*uifr));
-		if (copy_in_user(&uifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
+		if (copy_in_user(uifr->ifr_name, ifr32->ifr_name, IFNAMSIZ))
 			return -EFAULT;
 
 		if (get_user(data, &ifr32->ifr_ifru.ifru_data))
@@ -3098,7 +3205,7 @@ static int compat_sioc_ifmap(struct net
 
 	old_fs = get_fs();
 	set_fs(KERNEL_DS);
-	err = dev_ioctl(net, cmd, (void  __user __force *)&ifr);
+	err = dev_ioctl(net, cmd, (void  __force_user *)&ifr);
 	set_fs(old_fs);
 
 	if (cmd == SIOCGIFMAP && !err) {
@@ -3203,7 +3310,7 @@ static int routing_ioctl(struct net *net
 		ret |= get_user(rtdev, &(ur4->rt_dev));
 		if (rtdev) {
 			ret |= copy_from_user(devname, compat_ptr(rtdev), 15);
-			r4.rt_dev = (char __user __force *)devname;
+			r4.rt_dev = (char __force_user *)devname;
 			devname[15] = 0;
 		} else
 			r4.rt_dev = NULL;
@@ -3429,8 +3536,8 @@ int kernel_getsockopt(struct socket *soc
 	int __user *uoptlen;
 	int err;
 
-	uoptval = (char __user __force *) optval;
-	uoptlen = (int __user __force *) optlen;
+	uoptval = (char __force_user *) optval;
+	uoptlen = (int __force_user *) optlen;
 
 	set_fs(KERNEL_DS);
 	if (level == SOL_SOCKET)
@@ -3450,7 +3557,7 @@ int kernel_setsockopt(struct socket *soc
 	char __user *uoptval;
 	int err;
 
-	uoptval = (char __user __force *) optval;
+	uoptval = (char __force_user *) optval;
 
 	set_fs(KERNEL_DS);
 	if (level == SOL_SOCKET)
diff -ruNp linux-3.13.11/net/sunrpc/auth.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/sunrpc/auth.c
--- linux-3.13.11/net/sunrpc/auth.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/sunrpc/auth.c	2014-07-09 12:00:16.000000000
+0200
@@ -15,6 +15,7 @@
 #include <linux/sunrpc/clnt.h>
 #include <linux/sunrpc/gss_api.h>
 #include <linux/spinlock.h>
+#include <linux/vs_tag.h>
 
 #ifdef RPC_DEBUG
 # define RPCDBG_FACILITY	RPCDBG_AUTH
@@ -586,6 +587,7 @@ rpcauth_lookupcred(struct rpc_auth *auth
 	memset(&acred, 0, sizeof(acred));
 	acred.uid = cred->fsuid;
 	acred.gid = cred->fsgid;
+	acred.tag = make_ktag(&init_user_ns, dx_current_tag());
 	acred.group_info = get_group_info(((struct cred *)cred)->group_info);
 
 	ret = auth->au_ops->lookup_cred(auth, &acred, flags);
@@ -626,6 +628,7 @@ rpcauth_bind_root_cred(struct rpc_task *
 	struct auth_cred acred = {
 		.uid = GLOBAL_ROOT_UID,
 		.gid = GLOBAL_ROOT_GID,
+		.tag = KTAGT_INIT(dx_current_tag()),
 	};
 
 	dprintk("RPC: %5u looking up %s cred\n",
diff -ruNp linux-3.13.11/net/sunrpc/auth_gss/svcauth_gss.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/sunrpc/auth_gss/svcauth_gss.c
--- linux-3.13.11/net/sunrpc/auth_gss/svcauth_gss.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/sunrpc/auth_gss/svcauth_gss.c	2014-07-09
12:00:16.000000000 +0200
@@ -1140,7 +1140,7 @@ static int gss_proxy_save_rsc(struct cac
 				uint64_t *handle)
 {
 	struct rsc rsci, *rscp = NULL;
-	static atomic64_t ctxhctr;
+	static atomic64_unchecked_t ctxhctr = ATOMIC64_INIT(0);
 	long long ctxh;
 	struct gss_api_mech *gm = NULL;
 	time_t expiry;
@@ -1151,7 +1151,7 @@ static int gss_proxy_save_rsc(struct cac
 	status = -ENOMEM;
 	/* the handle needs to be just a unique id,
 	 * use a static counter */
-	ctxh = atomic64_inc_return(&ctxhctr);
+	ctxh = atomic64_inc_return_unchecked(&ctxhctr);
 
 	/* make a copy for the caller */
 	*handle = ctxh;
diff -ruNp linux-3.13.11/net/sunrpc/auth_unix.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/sunrpc/auth_unix.c
--- linux-3.13.11/net/sunrpc/auth_unix.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/sunrpc/auth_unix.c	2014-07-09
12:00:16.000000000 +0200
@@ -13,11 +13,13 @@
 #include <linux/sunrpc/clnt.h>
 #include <linux/sunrpc/auth.h>
 #include <linux/user_namespace.h>
+#include <linux/vs_tag.h>
 
 #define NFS_NGROUPS	16
 
 struct unx_cred {
 	struct rpc_cred		uc_base;
+	ktag_t			uc_tag;
 	kgid_t			uc_gid;
 	kgid_t			uc_gids[NFS_NGROUPS];
 };
@@ -80,6 +82,7 @@ unx_create_cred(struct rpc_auth *auth, s
 		groups = NFS_NGROUPS;
 
 	cred->uc_gid = acred->gid;
+	cred->uc_tag = acred->tag;
 	for (i = 0; i < groups; i++)
 		cred->uc_gids[i] = GROUP_AT(acred->group_info, i);
 	if (i < NFS_NGROUPS)
@@ -121,7 +124,9 @@ unx_match(struct auth_cred *acred, struc
 	unsigned int i;
 
 
-	if (!uid_eq(cred->uc_uid, acred->uid) || !gid_eq(cred->uc_gid, acred->gid))
+	if (!uid_eq(cred->uc_uid, acred->uid) ||
+	    !gid_eq(cred->uc_gid, acred->gid) ||
+	    !tag_eq(cred->uc_tag, acred->tag))
 		return 0;
 
 	if (acred->group_info != NULL)
@@ -146,7 +151,7 @@ unx_marshal(struct rpc_task *task, __be3
 	struct rpc_clnt	*clnt = task->tk_client;
 	struct unx_cred	*cred = container_of(task->tk_rqstp->rq_cred, struct unx_cred, uc_base);
 	__be32		*base, *hold;
-	int		i;
+	int		i, tag;
 
 	*p++ = htonl(RPC_AUTH_UNIX);
 	base = p++;
@@ -157,8 +162,11 @@ unx_marshal(struct rpc_task *task, __be3
 	 */
 	p = xdr_encode_array(p, clnt->cl_nodename, clnt->cl_nodelen);
 
-	*p++ = htonl((u32) from_kuid(&init_user_ns, cred->uc_uid));
-	*p++ = htonl((u32) from_kgid(&init_user_ns, cred->uc_gid));
+	tag = task->tk_client->cl_tag;
+	*p++ = htonl((u32) from_kuid(&init_user_ns,
+		TAGINO_KUID(tag, cred->uc_uid, cred->uc_tag)));
+	*p++ = htonl((u32) from_kgid(&init_user_ns,
+		TAGINO_KGID(tag, cred->uc_gid, cred->uc_tag)));
 	hold = p++;
 	for (i = 0; i < 16 && gid_valid(cred->uc_gids[i]); i++)
 		*p++ = htonl((u32) from_kgid(&init_user_ns, cred->uc_gids[i]));
diff -ruNp linux-3.13.11/net/sunrpc/backchannel_rqst.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/sunrpc/backchannel_rqst.c
--- linux-3.13.11/net/sunrpc/backchannel_rqst.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/sunrpc/backchannel_rqst.c	2014-07-09
12:00:16.000000000 +0200
@@ -64,7 +64,6 @@ static void xprt_free_allocation(struct
 	free_page((unsigned long)xbufp->head[0].iov_base);
 	xbufp = &req->rq_snd_buf;
 	free_page((unsigned long)xbufp->head[0].iov_base);
-	list_del(&req->rq_bc_pa_list);
 	kfree(req);
 }
 
@@ -168,8 +167,10 @@ out_free:
 	/*
 	 * Memory allocation failed, free the temporary list
 	 */
-	list_for_each_entry_safe(req, tmp, &tmp_list, rq_bc_pa_list)
+	list_for_each_entry_safe(req, tmp, &tmp_list, rq_bc_pa_list) {
+		list_del(&req->rq_bc_pa_list);
 		xprt_free_allocation(req);
+	}
 
 	dprintk("RPC:       setup backchannel transport failed\n");
 	return -ENOMEM;
@@ -198,6 +199,7 @@ void xprt_destroy_backchannel(struct rpc
 	xprt_dec_alloc_count(xprt, max_reqs);
 	list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
 		dprintk("RPC:        req=%p\n", req);
+		list_del(&req->rq_bc_pa_list);
 		xprt_free_allocation(req);
 		if (--max_reqs == 0)
 			break;
diff -ruNp linux-3.13.11/net/sunrpc/clnt.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/sunrpc/clnt.c
--- linux-3.13.11/net/sunrpc/clnt.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/sunrpc/clnt.c	2014-07-09 12:00:16.000000000
+0200
@@ -31,6 +31,7 @@
 #include <linux/in.h>
 #include <linux/in6.h>
 #include <linux/un.h>
+#include <linux/vs_cvirt.h>
 
 #include <linux/sunrpc/clnt.h>
 #include <linux/sunrpc/addr.h>
@@ -538,6 +539,9 @@ struct rpc_clnt *rpc_create(struct rpc_c
 	if (!(args->flags & RPC_CLNT_CREATE_QUIET))
 		clnt->cl_chatty = 1;
 
+	/* TODO: handle RPC_CLNT_CREATE_TAGGED
+	if (args->flags & RPC_CLNT_CREATE_TAGGED)
+		clnt->cl_tag = 1; */
 	return clnt;
 }
 EXPORT_SYMBOL_GPL(rpc_create);
@@ -1415,7 +1419,9 @@ call_start(struct rpc_task *task)
 			(RPC_IS_ASYNC(task) ? "async" : "sync"));
 
 	/* Increment call count */
-	task->tk_msg.rpc_proc->p_count++;
+	pax_open_kernel();
+	(*(unsigned int *)&task->tk_msg.rpc_proc->p_count)++;
+	pax_close_kernel();
 	clnt->cl_stats->rpccnt++;
 	task->tk_action = call_reserve;
 }
diff -ruNp linux-3.13.11/net/sunrpc/sched.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/sunrpc/sched.c
--- linux-3.13.11/net/sunrpc/sched.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/sunrpc/sched.c	2014-07-09 12:00:16.000000000
+0200
@@ -261,9 +261,9 @@ static int rpc_wait_bit_killable(void *w
 #if defined(RPC_DEBUG) || defined(RPC_TRACEPOINTS)
 static void rpc_task_set_debuginfo(struct rpc_task *task)
 {
-	static atomic_t rpc_pid;
+	static atomic_unchecked_t rpc_pid;
 
-	task->tk_pid = atomic_inc_return(&rpc_pid);
+	task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
 }
 #else
 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
diff -ruNp linux-3.13.11/net/sunrpc/svc.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/sunrpc/svc.c
--- linux-3.13.11/net/sunrpc/svc.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/sunrpc/svc.c	2014-07-09 12:00:16.000000000
+0200
@@ -1158,7 +1158,9 @@ svc_process_common(struct svc_rqst *rqst
 	svc_putnl(resv, RPC_SUCCESS);
 
 	/* Bump per-procedure stats counter */
-	procp->pc_count++;
+	pax_open_kernel();
+	(*(unsigned int *)&procp->pc_count)++;
+	pax_close_kernel();
 
 	/* Initialize storage for argp and resp */
 	memset(rqstp->rq_argp, 0, procp->pc_argsize);
diff -ruNp linux-3.13.11/net/sunrpc/svcauth_unix.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/sunrpc/svcauth_unix.c
--- linux-3.13.11/net/sunrpc/svcauth_unix.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/sunrpc/svcauth_unix.c	2014-07-09
12:00:16.000000000 +0200
@@ -414,7 +414,7 @@ struct unix_gid {
 	struct group_info	*gi;
 };
 
-static int unix_gid_hash(kuid_t uid)
+static int __intentional_overflow(-1) unix_gid_hash(kuid_t uid)
 {
 	return hash_long(from_kuid(&init_user_ns, uid), GID_HASHBITS);
 }
@@ -470,7 +470,7 @@ static void unix_gid_request(struct cach
 	(*bpp)[-1] = '\n';
 }
 
-static struct unix_gid *unix_gid_lookup(struct cache_detail *cd, kuid_t uid);
+static struct unix_gid * __intentional_overflow(-1) unix_gid_lookup(struct cache_detail
*cd, kuid_t uid);
 
 static int unix_gid_parse(struct cache_detail *cd,
 			char *mesg, int mlen)
diff -ruNp linux-3.13.11/net/sunrpc/xprtrdma/svc_rdma.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/sunrpc/xprtrdma/svc_rdma.c
--- linux-3.13.11/net/sunrpc/xprtrdma/svc_rdma.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/sunrpc/xprtrdma/svc_rdma.c	2014-07-09
12:00:16.000000000 +0200
@@ -62,15 +62,15 @@ unsigned int svcrdma_max_req_size = RPCR
 static unsigned int min_max_inline = 4096;
 static unsigned int max_max_inline = 65536;
 
-atomic_t rdma_stat_recv;
-atomic_t rdma_stat_read;
-atomic_t rdma_stat_write;
-atomic_t rdma_stat_sq_starve;
-atomic_t rdma_stat_rq_starve;
-atomic_t rdma_stat_rq_poll;
-atomic_t rdma_stat_rq_prod;
-atomic_t rdma_stat_sq_poll;
-atomic_t rdma_stat_sq_prod;
+atomic_unchecked_t rdma_stat_recv;
+atomic_unchecked_t rdma_stat_read;
+atomic_unchecked_t rdma_stat_write;
+atomic_unchecked_t rdma_stat_sq_starve;
+atomic_unchecked_t rdma_stat_rq_starve;
+atomic_unchecked_t rdma_stat_rq_poll;
+atomic_unchecked_t rdma_stat_rq_prod;
+atomic_unchecked_t rdma_stat_sq_poll;
+atomic_unchecked_t rdma_stat_sq_prod;
 
 /* Temporary NFS request map and context caches */
 struct kmem_cache *svc_rdma_map_cachep;
@@ -110,7 +110,7 @@ static int read_reset_stat(struct ctl_ta
 		len -= *ppos;
 		if (len > *lenp)
 			len = *lenp;
-		if (len && copy_to_user(buffer, str_buf, len))
+		if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
 			return -EFAULT;
 		*lenp = len;
 		*ppos += len;
@@ -151,63 +151,63 @@ static struct ctl_table svcrdma_parm_tab
 	{
 		.procname	= "rdma_stat_read",
 		.data		= &rdma_stat_read,
-		.maxlen		= sizeof(atomic_t),
+		.maxlen		= sizeof(atomic_unchecked_t),
 		.mode		= 0644,
 		.proc_handler	= read_reset_stat,
 	},
 	{
 		.procname	= "rdma_stat_recv",
 		.data		= &rdma_stat_recv,
-		.maxlen		= sizeof(atomic_t),
+		.maxlen		= sizeof(atomic_unchecked_t),
 		.mode		= 0644,
 		.proc_handler	= read_reset_stat,
 	},
 	{
 		.procname	= "rdma_stat_write",
 		.data		= &rdma_stat_write,
-		.maxlen		= sizeof(atomic_t),
+		.maxlen		= sizeof(atomic_unchecked_t),
 		.mode		= 0644,
 		.proc_handler	= read_reset_stat,
 	},
 	{
 		.procname	= "rdma_stat_sq_starve",
 		.data		= &rdma_stat_sq_starve,
-		.maxlen		= sizeof(atomic_t),
+		.maxlen		= sizeof(atomic_unchecked_t),
 		.mode		= 0644,
 		.proc_handler	= read_reset_stat,
 	},
 	{
 		.procname	= "rdma_stat_rq_starve",
 		.data		= &rdma_stat_rq_starve,
-		.maxlen		= sizeof(atomic_t),
+		.maxlen		= sizeof(atomic_unchecked_t),
 		.mode		= 0644,
 		.proc_handler	= read_reset_stat,
 	},
 	{
 		.procname	= "rdma_stat_rq_poll",
 		.data		= &rdma_stat_rq_poll,
-		.maxlen		= sizeof(atomic_t),
+		.maxlen		= sizeof(atomic_unchecked_t),
 		.mode		= 0644,
 		.proc_handler	= read_reset_stat,
 	},
 	{
 		.procname	= "rdma_stat_rq_prod",
 		.data		= &rdma_stat_rq_prod,
-		.maxlen		= sizeof(atomic_t),
+		.maxlen		= sizeof(atomic_unchecked_t),
 		.mode		= 0644,
 		.proc_handler	= read_reset_stat,
 	},
 	{
 		.procname	= "rdma_stat_sq_poll",
 		.data		= &rdma_stat_sq_poll,
-		.maxlen		= sizeof(atomic_t),
+		.maxlen		= sizeof(atomic_unchecked_t),
 		.mode		= 0644,
 		.proc_handler	= read_reset_stat,
 	},
 	{
 		.procname	= "rdma_stat_sq_prod",
 		.data		= &rdma_stat_sq_prod,
-		.maxlen		= sizeof(atomic_t),
+		.maxlen		= sizeof(atomic_unchecked_t),
 		.mode		= 0644,
 		.proc_handler	= read_reset_stat,
 	},
diff -ruNp linux-3.13.11/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
--- linux-3.13.11/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c	2014-07-09
12:00:16.000000000 +0200
@@ -501,7 +501,7 @@ next_sge:
 			svc_rdma_put_context(ctxt, 0);
 			goto out;
 		}
-		atomic_inc(&rdma_stat_read);
+		atomic_inc_unchecked(&rdma_stat_read);
 
 		if (read_wr.num_sge < chl_map->ch[ch_no].count) {
 			chl_map->ch[ch_no].count -= read_wr.num_sge;
@@ -611,7 +611,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
 				  dto_q);
 		list_del_init(&ctxt->dto_q);
 	} else {
-		atomic_inc(&rdma_stat_rq_starve);
+		atomic_inc_unchecked(&rdma_stat_rq_starve);
 		clear_bit(XPT_DATA, &xprt->xpt_flags);
 		ctxt = NULL;
 	}
@@ -631,7 +631,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
 	dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
 		ctxt, rdma_xprt, rqstp, ctxt->wc_status);
 	BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
-	atomic_inc(&rdma_stat_recv);
+	atomic_inc_unchecked(&rdma_stat_recv);
 
 	/* Build up the XDR from the receive buffers. */
 	rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
diff -ruNp linux-3.13.11/net/sunrpc/xprtrdma/svc_rdma_sendto.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/sunrpc/xprtrdma/svc_rdma_sendto.c
--- linux-3.13.11/net/sunrpc/xprtrdma/svc_rdma_sendto.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/sunrpc/xprtrdma/svc_rdma_sendto.c	2014-07-09
12:00:16.000000000 +0200
@@ -362,7 +362,7 @@ static int send_write(struct svcxprt_rdm
 	write_wr.wr.rdma.remote_addr = to;
 
 	/* Post It */
-	atomic_inc(&rdma_stat_write);
+	atomic_inc_unchecked(&rdma_stat_write);
 	if (svc_rdma_send(xprt, &write_wr))
 		goto err;
 	return 0;
diff -ruNp linux-3.13.11/net/sunrpc/xprtrdma/svc_rdma_transport.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/sunrpc/xprtrdma/svc_rdma_transport.c
--- linux-3.13.11/net/sunrpc/xprtrdma/svc_rdma_transport.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/sunrpc/xprtrdma/svc_rdma_transport.c	2014-07-09
12:00:16.000000000 +0200
@@ -292,7 +292,7 @@ static void rq_cq_reap(struct svcxprt_rd
 		return;
 
 	ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
-	atomic_inc(&rdma_stat_rq_poll);
+	atomic_inc_unchecked(&rdma_stat_rq_poll);
 
 	while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
 		ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
@@ -314,7 +314,7 @@ static void rq_cq_reap(struct svcxprt_rd
 	}
 
 	if (ctxt)
-		atomic_inc(&rdma_stat_rq_prod);
+		atomic_inc_unchecked(&rdma_stat_rq_prod);
 
 	set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
 	/*
@@ -386,7 +386,7 @@ static void sq_cq_reap(struct svcxprt_rd
 		return;
 
 	ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
-	atomic_inc(&rdma_stat_sq_poll);
+	atomic_inc_unchecked(&rdma_stat_sq_poll);
 	while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
 		if (wc.status != IB_WC_SUCCESS)
 			/* Close the transport */
@@ -404,7 +404,7 @@ static void sq_cq_reap(struct svcxprt_rd
 	}
 
 	if (ctxt)
-		atomic_inc(&rdma_stat_sq_prod);
+		atomic_inc_unchecked(&rdma_stat_sq_prod);
 }
 
 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
@@ -1262,7 +1262,7 @@ int svc_rdma_send(struct svcxprt_rdma *x
 		spin_lock_bh(&xprt->sc_lock);
 		if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
 			spin_unlock_bh(&xprt->sc_lock);
-			atomic_inc(&rdma_stat_sq_starve);
+			atomic_inc_unchecked(&rdma_stat_sq_starve);
 
 			/* See if we can opportunistically reap SQ WR to make room */
 			sq_cq_reap(xprt);
diff -ruNp linux-3.13.11/net/sysctl_net.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/sysctl_net.c
--- linux-3.13.11/net/sysctl_net.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/sysctl_net.c	2014-07-09 12:00:16.000000000
+0200
@@ -46,7 +46,7 @@ static int net_ctl_permissions(struct ct
 	kgid_t root_gid = make_kgid(net->user_ns, 0);
 
 	/* Allow network administrator to have same access as root. */
-	if (ns_capable(net->user_ns, CAP_NET_ADMIN) ||
+	if (ns_capable_nolog(net->user_ns, CAP_NET_ADMIN) ||
 	    uid_eq(root_uid, current_euid())) {
 		int mode = (table->mode >> 6) & 7;
 		return (mode << 6) | (mode << 3) | mode;
diff -ruNp linux-3.13.11/net/tipc/subscr.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/tipc/subscr.c
--- linux-3.13.11/net/tipc/subscr.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/tipc/subscr.c	2014-07-09 12:00:16.000000000
+0200
@@ -97,7 +97,7 @@ static void subscr_send_event(struct tip
 	struct tipc_subscriber *subscriber = sub->subscriber;
 	struct kvec msg_sect;
 
-	msg_sect.iov_base = (void *)&sub->evt;
+	msg_sect.iov_base = &sub->evt;
 	msg_sect.iov_len = sizeof(struct tipc_event);
 	sub->evt.event = htohl(event, sub->swap);
 	sub->evt.found_lower = htohl(found_lower, sub->swap);
diff -ruNp linux-3.13.11/net/unix/af_unix.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/unix/af_unix.c
--- linux-3.13.11/net/unix/af_unix.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/unix/af_unix.c	2014-07-09 12:00:16.000000000
+0200
@@ -115,6 +115,8 @@
 #include <net/checksum.h>
 #include <linux/security.h>
 #include <linux/freezer.h>
+#include <linux/vs_context.h>
+#include <linux/vs_limit.h>
 
 struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE];
 EXPORT_SYMBOL_GPL(unix_socket_table);
@@ -270,6 +272,8 @@ static struct sock *__unix_find_socket_b
 		if (!net_eq(sock_net(s), net))
 			continue;
 
+		if (!nx_check(s->sk_nid, VS_WATCH_P | VS_IDENT))
+			continue;
 		if (u->addr->len == len &&
 		    !memcmp(u->addr->name, sunname, len))
 			goto found;
@@ -789,6 +793,12 @@ static struct sock *unix_find_other(stru
 		err = -ECONNREFUSED;
 		if (!S_ISSOCK(inode->i_mode))
 			goto put_fail;
+
+		if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
+			err = -EACCES;
+			goto put_fail;
+		}
+
 		u = unix_find_socket_byinode(inode);
 		if (!u)
 			goto put_fail;
@@ -809,6 +819,13 @@ static struct sock *unix_find_other(stru
 		if (u) {
 			struct dentry *dentry;
 			dentry = unix_sk(u)->path.dentry;
+
+			if (!gr_handle_chroot_unix(pid_vnr(u->sk_peer_pid))) {
+				err = -EPERM;
+				sock_put(u);
+				goto fail;
+			}
+
 			if (dentry)
 				touch_atime(&unix_sk(u)->path);
 		} else
@@ -842,12 +859,18 @@ static int unix_mknod(const char *sun_pa
 	 */
 	err = security_path_mknod(&path, dentry, mode, 0);
 	if (!err) {
+		if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
+			err = -EACCES;
+			goto out;
+		}
 		err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0);
 		if (!err) {
 			res->mnt = mntget(path.mnt);
 			res->dentry = dget(dentry);
+			gr_handle_create(dentry, path.mnt);
 		}
 	}
+out:
 	done_path_create(&path, dentry);
 	return err;
 }
@@ -2275,6 +2298,8 @@ static struct sock *unix_from_bucket(str
 	for (sk = sk_head(&unix_socket_table[bucket]); sk; sk = sk_next(sk)) {
 		if (sock_net(sk) != seq_file_net(seq))
 			continue;
+		if (!nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT))
+			continue;
 		if (++count == offset)
 			break;
 	}
@@ -2292,6 +2317,8 @@ static struct sock *unix_next_socket(str
 		sk = sk_next(sk);
 		if (!sk)
 			goto next_bucket;
+		if (!nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT))
+			continue;
 		if (sock_net(sk) == seq_file_net(seq))
 			return sk;
 	}
@@ -2342,9 +2369,13 @@ static int unix_seq_show(struct seq_file
 		seq_puts(seq, "Num       RefCount Protocol Flags    Type St "
 			 "Inode Path\n");
 	else {
-		struct sock *s = v;
+		struct sock *s = v, *peer;
 		struct unix_sock *u = unix_sk(s);
 		unix_state_lock(s);
+		peer = unix_peer(s);
+		unix_state_unlock(s);
+
+		unix_state_double_lock(s, peer);
 
 		seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
 			s,
@@ -2371,8 +2402,10 @@ static int unix_seq_show(struct seq_file
 			}
 			for ( ; i < len; i++)
 				seq_putc(seq, u->addr->name->sun_path[i]);
-		}
-		unix_state_unlock(s);
+		} else if (peer)
+			seq_printf(seq, " P%lu", sock_i_ino(peer));
+
+		unix_state_double_unlock(s, peer);
 		seq_putc(seq, '\n');
 	}
 
diff -ruNp linux-3.13.11/net/unix/sysctl_net_unix.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/unix/sysctl_net_unix.c
--- linux-3.13.11/net/unix/sysctl_net_unix.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/unix/sysctl_net_unix.c	2014-07-09
12:00:16.000000000 +0200
@@ -28,7 +28,7 @@ static struct ctl_table unix_table[] = {
 
 int __net_init unix_sysctl_register(struct net *net)
 {
-	struct ctl_table *table;
+	ctl_table_no_const *table;
 
 	table = kmemdup(unix_table, sizeof(unix_table), GFP_KERNEL);
 	if (table == NULL)
diff -ruNp linux-3.13.11/net/vmw_vsock/vmci_transport_notify.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/vmw_vsock/vmci_transport_notify.c
--- linux-3.13.11/net/vmw_vsock/vmci_transport_notify.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/vmw_vsock/vmci_transport_notify.c	2014-07-09
12:00:16.000000000 +0200
@@ -662,19 +662,19 @@ static void vmci_transport_notify_pkt_pr
 
 /* Socket control packet based operations. */
 struct vmci_transport_notify_ops vmci_transport_notify_pkt_ops = {
-	vmci_transport_notify_pkt_socket_init,
-	vmci_transport_notify_pkt_socket_destruct,
-	vmci_transport_notify_pkt_poll_in,
-	vmci_transport_notify_pkt_poll_out,
-	vmci_transport_notify_pkt_handle_pkt,
-	vmci_transport_notify_pkt_recv_init,
-	vmci_transport_notify_pkt_recv_pre_block,
-	vmci_transport_notify_pkt_recv_pre_dequeue,
-	vmci_transport_notify_pkt_recv_post_dequeue,
-	vmci_transport_notify_pkt_send_init,
-	vmci_transport_notify_pkt_send_pre_block,
-	vmci_transport_notify_pkt_send_pre_enqueue,
-	vmci_transport_notify_pkt_send_post_enqueue,
-	vmci_transport_notify_pkt_process_request,
-	vmci_transport_notify_pkt_process_negotiate,
+	.socket_init = vmci_transport_notify_pkt_socket_init,
+	.socket_destruct = vmci_transport_notify_pkt_socket_destruct,
+	.poll_in = vmci_transport_notify_pkt_poll_in,
+	.poll_out = vmci_transport_notify_pkt_poll_out,
+	.handle_notify_pkt = vmci_transport_notify_pkt_handle_pkt,
+	.recv_init = vmci_transport_notify_pkt_recv_init,
+	.recv_pre_block = vmci_transport_notify_pkt_recv_pre_block,
+	.recv_pre_dequeue = vmci_transport_notify_pkt_recv_pre_dequeue,
+	.recv_post_dequeue = vmci_transport_notify_pkt_recv_post_dequeue,
+	.send_init = vmci_transport_notify_pkt_send_init,
+	.send_pre_block = vmci_transport_notify_pkt_send_pre_block,
+	.send_pre_enqueue = vmci_transport_notify_pkt_send_pre_enqueue,
+	.send_post_enqueue = vmci_transport_notify_pkt_send_post_enqueue,
+	.process_request = vmci_transport_notify_pkt_process_request,
+	.process_negotiate = vmci_transport_notify_pkt_process_negotiate,
 };
diff -ruNp linux-3.13.11/net/vmw_vsock/vmci_transport_notify_qstate.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/vmw_vsock/vmci_transport_notify_qstate.c
--- linux-3.13.11/net/vmw_vsock/vmci_transport_notify_qstate.c	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/vmw_vsock/vmci_transport_notify_qstate.c	2014-07-09
12:00:16.000000000 +0200
@@ -420,19 +420,19 @@ vmci_transport_notify_pkt_send_pre_enque
 
 /* Socket always on control packet based operations. */
 struct vmci_transport_notify_ops vmci_transport_notify_pkt_q_state_ops = {
-	vmci_transport_notify_pkt_socket_init,
-	vmci_transport_notify_pkt_socket_destruct,
-	vmci_transport_notify_pkt_poll_in,
-	vmci_transport_notify_pkt_poll_out,
-	vmci_transport_notify_pkt_handle_pkt,
-	vmci_transport_notify_pkt_recv_init,
-	vmci_transport_notify_pkt_recv_pre_block,
-	vmci_transport_notify_pkt_recv_pre_dequeue,
-	vmci_transport_notify_pkt_recv_post_dequeue,
-	vmci_transport_notify_pkt_send_init,
-	vmci_transport_notify_pkt_send_pre_block,
-	vmci_transport_notify_pkt_send_pre_enqueue,
-	vmci_transport_notify_pkt_send_post_enqueue,
-	vmci_transport_notify_pkt_process_request,
-	vmci_transport_notify_pkt_process_negotiate,
+	.socket_init = vmci_transport_notify_pkt_socket_init,
+	.socket_destruct = vmci_transport_notify_pkt_socket_destruct,
+	.poll_in = vmci_transport_notify_pkt_poll_in,
+	.poll_out = vmci_transport_notify_pkt_poll_out,
+	.handle_notify_pkt = vmci_transport_notify_pkt_handle_pkt,
+	.recv_init = vmci_transport_notify_pkt_recv_init,
+	.recv_pre_block = vmci_transport_notify_pkt_recv_pre_block,
+	.recv_pre_dequeue = vmci_transport_notify_pkt_recv_pre_dequeue,
+	.recv_post_dequeue = vmci_transport_notify_pkt_recv_post_dequeue,
+	.send_init = vmci_transport_notify_pkt_send_init,
+	.send_pre_block = vmci_transport_notify_pkt_send_pre_block,
+	.send_pre_enqueue = vmci_transport_notify_pkt_send_pre_enqueue,
+	.send_post_enqueue = vmci_transport_notify_pkt_send_post_enqueue,
+	.process_request = vmci_transport_notify_pkt_process_request,
+	.process_negotiate = vmci_transport_notify_pkt_process_negotiate,
 };
diff -ruNp linux-3.13.11/net/wireless/wext-core.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/wireless/wext-core.c
--- linux-3.13.11/net/wireless/wext-core.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/wireless/wext-core.c	2014-07-09
12:00:16.000000000 +0200
@@ -748,8 +748,7 @@ static int ioctl_standard_iw_point(struc
 		 */
 
 		/* Support for very large requests */
-		if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
-		    (user_length > descr->max_tokens)) {
+		if (user_length > descr->max_tokens) {
 			/* Allow userspace to GET more than max so
 			 * we can support any size GET requests.
 			 * There is still a limit : -ENOMEM.
@@ -788,22 +787,6 @@ static int ioctl_standard_iw_point(struc
 		}
 	}
 
-	if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
-		/*
-		 * If this is a GET, but not NOMAX, it means that the extra
-		 * data is not bounded by userspace, but by max_tokens. Thus
-		 * set the length to max_tokens. This matches the extra data
-		 * allocation.
-		 * The driver should fill it with the number of tokens it
-		 * provided, and it may check iwp->length rather than having
-		 * knowledge of max_tokens. If the driver doesn't change the
-		 * iwp->length, this ioctl just copies back max_token tokens
-		 * filled with zeroes. Hopefully the driver isn't claiming
-		 * them to be valid data.
-		 */
-		iwp->length = descr->max_tokens;
-	}
-
 	err = handler(dev, info, (union iwreq_data *) iwp, extra);
 
 	iwp->length += essid_compat;
diff -ruNp linux-3.13.11/net/x25/sysctl_net_x25.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/x25/sysctl_net_x25.c
--- linux-3.13.11/net/x25/sysctl_net_x25.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/x25/sysctl_net_x25.c	2014-07-09
12:00:16.000000000 +0200
@@ -70,7 +70,7 @@ static struct ctl_table x25_table[] = {
 		.mode = 	0644,
 		.proc_handler = proc_dointvec,
 	},
-	{ 0, },
+	{ },
 };
 
 void __init x25_register_sysctl(void)
diff -ruNp linux-3.13.11/net/xfrm/xfrm_policy.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/xfrm/xfrm_policy.c
--- linux-3.13.11/net/xfrm/xfrm_policy.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/xfrm/xfrm_policy.c	2014-07-09
12:00:16.000000000 +0200
@@ -332,7 +332,7 @@ static void xfrm_policy_kill(struct xfrm
 {
 	policy->walk.dead = 1;
 
-	atomic_inc(&policy->genid);
+	atomic_inc_unchecked(&policy->genid);
 
 	if (del_timer(&policy->polq.hold_timer))
 		xfrm_pol_put(policy);
@@ -660,7 +660,7 @@ int xfrm_policy_insert(int dir, struct x
 		hlist_add_head(&policy->bydst, chain);
 	xfrm_pol_hold(policy);
 	net->xfrm.policy_count[dir]++;
-	atomic_inc(&flow_cache_genid);
+	atomic_inc_unchecked(&flow_cache_genid);
 
 	/* After previous checking, family can either be AF_INET or AF_INET6 */
 	if (policy->family == AF_INET)
@@ -1636,7 +1636,7 @@ free_dst:
 	goto out;
 }
 
-static int inline
+static inline int
 xfrm_dst_alloc_copy(void **target, const void *src, int size)
 {
 	if (!*target) {
@@ -1648,7 +1648,7 @@ xfrm_dst_alloc_copy(void **target, const
 	return 0;
 }
 
-static int inline
+static inline int
 xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
 {
 #ifdef CONFIG_XFRM_SUB_POLICY
@@ -1660,7 +1660,7 @@ xfrm_dst_update_parent(struct dst_entry
 #endif
 }
 
-static int inline
+static inline int
 xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
 {
 #ifdef CONFIG_XFRM_SUB_POLICY
@@ -1754,7 +1754,7 @@ xfrm_resolve_and_create_bundle(struct xf
 
 	xdst->num_pols = num_pols;
 	memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
-	xdst->policy_genid = atomic_read(&pols[0]->genid);
+	xdst->policy_genid = atomic_read_unchecked(&pols[0]->genid);
 
 	return xdst;
 }
@@ -2582,11 +2582,12 @@ void xfrm_garbage_collect(struct net *ne
 }
 EXPORT_SYMBOL(xfrm_garbage_collect);
 
-static void xfrm_garbage_collect_deferred(struct net *net)
+void xfrm_garbage_collect_deferred(struct net *net)
 {
 	flow_cache_flush_deferred();
 	__xfrm_garbage_collect(net);
 }
+EXPORT_SYMBOL(xfrm_garbage_collect_deferred);
 
 static void xfrm_init_pmtu(struct dst_entry *dst)
 {
@@ -2636,7 +2637,7 @@ static int xfrm_bundle_ok(struct xfrm_ds
 		if (xdst->xfrm_genid != dst->xfrm->genid)
 			return 0;
 		if (xdst->num_pols > 0 &&
-		    xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
+		    xdst->policy_genid != atomic_read_unchecked(&xdst->pols[0]->genid))
 			return 0;
 
 		mtu = dst_mtu(dst->child);
@@ -2724,8 +2725,6 @@ int xfrm_policy_register_afinfo(struct x
 			dst_ops->link_failure = xfrm_link_failure;
 		if (likely(dst_ops->neigh_lookup == NULL))
 			dst_ops->neigh_lookup = xfrm_neigh_lookup;
-		if (likely(afinfo->garbage_collect == NULL))
-			afinfo->garbage_collect = xfrm_garbage_collect_deferred;
 		rcu_assign_pointer(xfrm_policy_afinfo[afinfo->family], afinfo);
 	}
 	spin_unlock(&xfrm_policy_afinfo_lock);
@@ -2779,7 +2778,6 @@ int xfrm_policy_unregister_afinfo(struct
 		dst_ops->check = NULL;
 		dst_ops->negative_advice = NULL;
 		dst_ops->link_failure = NULL;
-		afinfo->garbage_collect = NULL;
 	}
 	return err;
 }
@@ -3162,7 +3160,7 @@ static int xfrm_policy_migrate(struct xf
 			       sizeof(pol->xfrm_vec[i].saddr));
 			pol->xfrm_vec[i].encap_family = mp->new_family;
 			/* flush bundles */
-			atomic_inc(&pol->genid);
+			atomic_inc_unchecked(&pol->genid);
 		}
 	}
 
diff -ruNp linux-3.13.11/net/xfrm/xfrm_state.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/xfrm/xfrm_state.c
--- linux-3.13.11/net/xfrm/xfrm_state.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/xfrm/xfrm_state.c	2014-07-09
12:00:16.000000000 +0200
@@ -174,12 +174,14 @@ int xfrm_register_type(const struct xfrm
 
 	if (unlikely(afinfo == NULL))
 		return -EAFNOSUPPORT;
-	typemap = afinfo->type_map;
+	typemap = (const struct xfrm_type **)afinfo->type_map;
 	spin_lock_bh(&xfrm_type_lock);
 
-	if (likely(typemap[type->proto] == NULL))
+	if (likely(typemap[type->proto] == NULL)) {
+		pax_open_kernel();
 		typemap[type->proto] = type;
-	else
+		pax_close_kernel();
+	} else
 		err = -EEXIST;
 	spin_unlock_bh(&xfrm_type_lock);
 	xfrm_state_put_afinfo(afinfo);
@@ -195,13 +197,16 @@ int xfrm_unregister_type(const struct xf
 
 	if (unlikely(afinfo == NULL))
 		return -EAFNOSUPPORT;
-	typemap = afinfo->type_map;
+	typemap = (const struct xfrm_type **)afinfo->type_map;
 	spin_lock_bh(&xfrm_type_lock);
 
 	if (unlikely(typemap[type->proto] != type))
 		err = -ENOENT;
-	else
+	else {
+		pax_open_kernel();
 		typemap[type->proto] = NULL;
+		pax_close_kernel();
+	}
 	spin_unlock_bh(&xfrm_type_lock);
 	xfrm_state_put_afinfo(afinfo);
 	return err;
@@ -211,7 +216,6 @@ EXPORT_SYMBOL(xfrm_unregister_type);
 static const struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
 {
 	struct xfrm_state_afinfo *afinfo;
-	const struct xfrm_type **typemap;
 	const struct xfrm_type *type;
 	int modload_attempted = 0;
 
@@ -219,9 +223,8 @@ retry:
 	afinfo = xfrm_state_get_afinfo(family);
 	if (unlikely(afinfo == NULL))
 		return NULL;
-	typemap = afinfo->type_map;
 
-	type = typemap[proto];
+	type = afinfo->type_map[proto];
 	if (unlikely(type && !try_module_get(type->owner)))
 		type = NULL;
 	if (!type && !modload_attempted) {
@@ -255,7 +258,7 @@ int xfrm_register_mode(struct xfrm_mode
 		return -EAFNOSUPPORT;
 
 	err = -EEXIST;
-	modemap = afinfo->mode_map;
+	modemap = (struct xfrm_mode **)afinfo->mode_map;
 	spin_lock_bh(&xfrm_mode_lock);
 	if (modemap[mode->encap])
 		goto out;
@@ -264,8 +267,10 @@ int xfrm_register_mode(struct xfrm_mode
 	if (!try_module_get(afinfo->owner))
 		goto out;
 
-	mode->afinfo = afinfo;
+	pax_open_kernel();
+	*(const void **)&mode->afinfo = afinfo;
 	modemap[mode->encap] = mode;
+	pax_close_kernel();
 	err = 0;
 
 out:
@@ -289,10 +294,12 @@ int xfrm_unregister_mode(struct xfrm_mod
 		return -EAFNOSUPPORT;
 
 	err = -ENOENT;
-	modemap = afinfo->mode_map;
+	modemap = (struct xfrm_mode **)afinfo->mode_map;
 	spin_lock_bh(&xfrm_mode_lock);
 	if (likely(modemap[mode->encap] == mode)) {
+		pax_open_kernel();
 		modemap[mode->encap] = NULL;
+		pax_close_kernel();
 		module_put(mode->afinfo->owner);
 		err = 0;
 	}
@@ -1486,10 +1493,10 @@ EXPORT_SYMBOL(xfrm_find_acq_byseq);
 u32 xfrm_get_acqseq(void)
 {
 	u32 res;
-	static atomic_t acqseq;
+	static atomic_unchecked_t acqseq;
 
 	do {
-		res = atomic_inc_return(&acqseq);
+		res = atomic_inc_return_unchecked(&acqseq);
 	} while (!res);
 
 	return res;
diff -ruNp linux-3.13.11/net/xfrm/xfrm_sysctl.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/xfrm/xfrm_sysctl.c
--- linux-3.13.11/net/xfrm/xfrm_sysctl.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/net/xfrm/xfrm_sysctl.c	2014-07-09
12:00:16.000000000 +0200
@@ -42,7 +42,7 @@ static struct ctl_table xfrm_table[] = {
 
 int __net_init xfrm_sysctl_init(struct net *net)
 {
-	struct ctl_table *table;
+	ctl_table_no_const *table;
 
 	__xfrm_sysctl_init(net);
 
diff -ruNp linux-3.13.11/scripts/Makefile linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/scripts/Makefile
--- linux-3.13.11/scripts/Makefile	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/scripts/Makefile	2014-07-09 12:00:16.000000000
+0200
@@ -40,3 +40,5 @@ subdir-$(CONFIG_DTC)         += dtc
 
 # Let clean descend into subdirs
 subdir-	+= basic kconfig package selinux
+
+clean-files := randstruct.seed
diff -ruNp linux-3.13.11/scripts/Makefile.build linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/scripts/Makefile.build
--- linux-3.13.11/scripts/Makefile.build	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/scripts/Makefile.build	2014-07-09
12:00:16.000000000 +0200
@@ -111,7 +111,7 @@ endif
 endif
 
 # Do not include host rules unless needed
-ifneq ($(hostprogs-y)$(hostprogs-m),)
+ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m)$(hostcxxlibs-y)$(hostcxxlibs-m),)
 include scripts/Makefile.host
 endif
 
diff -ruNp linux-3.13.11/scripts/Makefile.clean linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/scripts/Makefile.clean
--- linux-3.13.11/scripts/Makefile.clean	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/scripts/Makefile.clean	2014-07-09
12:00:16.000000000 +0200
@@ -43,7 +43,8 @@ subdir-ymn	:= $(addprefix $(obj)/,$(subd
 __clean-files	:= $(extra-y) $(always)                  \
 		   $(targets) $(clean-files)             \
 		   $(host-progs)                         \
-		   $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
+		   $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
+		   $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
 
 __clean-files   := $(filter-out $(no-clean-files), $(__clean-files))
 
diff -ruNp linux-3.13.11/scripts/Makefile.host linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/scripts/Makefile.host
--- linux-3.13.11/scripts/Makefile.host	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/scripts/Makefile.host	2014-07-09
12:00:16.000000000 +0200
@@ -31,6 +31,8 @@
 # Note: Shared libraries consisting of C++ files are not supported
 
 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
+__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
+__hostcxxlibs := $(sort $(hostcxxlibs-y) $(hostcxxlibs-m))
 
 # C code
 # Executables compiled from a single .c file
@@ -54,11 +56,15 @@ host-cxxobjs	:= $(sort $(foreach m,$(hos
 # Shared libaries (only .c supported)
 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
 host-cshlib	:= $(sort $(filter %.so, $(host-cobjs)))
+host-cshlib	+= $(sort $(filter %.so, $(__hostlibs)))
+host-cxxshlib	:= $(sort $(filter %.so, $(__hostcxxlibs)))
 # Remove .so files from "xxx-objs"
 host-cobjs	:= $(filter-out %.so,$(host-cobjs))
+host-cxxobjs	:= $(filter-out %.so,$(host-cxxobjs))
 
-#Object (.o) files used by the shared libaries
+# Object (.o) files used by the shared libaries
 host-cshobjs	:= $(sort $(foreach m,$(host-cshlib),$($(m:.so=-objs))))
+host-cxxshobjs	:= $(sort $(foreach m,$(host-cxxshlib),$($(m:.so=-objs))))
 
 # output directory for programs/.o files
 # hostprogs-y := tools/build may have been specified. Retrieve directory
@@ -82,7 +88,9 @@ host-cobjs	:= $(addprefix $(obj)/,$(host
 host-cxxmulti	:= $(addprefix $(obj)/,$(host-cxxmulti))
 host-cxxobjs	:= $(addprefix $(obj)/,$(host-cxxobjs))
 host-cshlib	:= $(addprefix $(obj)/,$(host-cshlib))
+host-cxxshlib	:= $(addprefix $(obj)/,$(host-cxxshlib))
 host-cshobjs	:= $(addprefix $(obj)/,$(host-cshobjs))
+host-cxxshobjs	:= $(addprefix $(obj)/,$(host-cxxshobjs))
 host-objdirs    := $(addprefix $(obj)/,$(host-objdirs))
 
 obj-dirs += $(host-objdirs)
@@ -156,6 +164,13 @@ quiet_cmd_host-cshobjs	= HOSTCC  -fPIC $
 $(host-cshobjs): $(obj)/%.o: $(src)/%.c FORCE
 	$(call if_changed_dep,host-cshobjs)
 
+# Compile .c file, create position independent .o file
+# host-cxxshobjs -> .o
+quiet_cmd_host-cxxshobjs	= HOSTCXX -fPIC $@
+      cmd_host-cxxshobjs	= $(HOSTCXX) $(hostcxx_flags) -fPIC -c -o $@ $<
+$(host-cxxshobjs): $(obj)/%.o: $(src)/%.c FORCE
+	$(call if_changed_dep,host-cxxshobjs)
+
 # Link a shared library, based on position independent .o files
 # *.o -> .so shared library (host-cshlib)
 quiet_cmd_host-cshlib	= HOSTLLD -shared $@
@@ -165,6 +180,15 @@ quiet_cmd_host-cshlib	= HOSTLLD -shared
 $(host-cshlib): $(obj)/%: $(host-cshobjs) FORCE
 	$(call if_changed,host-cshlib)
 
+# Link a shared library, based on position independent .o files
+# *.o -> .so shared library (host-cxxshlib)
+quiet_cmd_host-cxxshlib	= HOSTLLD -shared $@
+      cmd_host-cxxshlib	= $(HOSTCXX) $(HOSTLDFLAGS) -shared -o $@ \
+			  $(addprefix $(obj)/,$($(@F:.so=-objs))) \
+			  $(HOST_LOADLIBES) $(HOSTLOADLIBES_$(@F))
+$(host-cxxshlib): $(obj)/%: $(host-cxxshobjs) FORCE
+	$(call if_changed,host-cxxshlib)
+
 targets += $(host-csingle)  $(host-cmulti) $(host-cobjs)\
-	   $(host-cxxmulti) $(host-cxxobjs) $(host-cshlib) $(host-cshobjs) 
+	   $(host-cxxmulti) $(host-cxxobjs) $(host-cshlib) $(host-cshobjs) $(host-cxxshlib)
$(host-cxxshobjs)
 
diff -ruNp linux-3.13.11/scripts/basic/fixdep.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/scripts/basic/fixdep.c
--- linux-3.13.11/scripts/basic/fixdep.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/scripts/basic/fixdep.c	2014-07-09
12:00:16.000000000 +0200
@@ -161,7 +161,7 @@ static unsigned int strhash(const char *
 /*
  * Lookup a value in the configuration string.
  */
-static int is_defined_config(const char *name, int len, unsigned int hash)
+static int is_defined_config(const char *name, unsigned int len, unsigned int hash)
 {
 	struct item *aux;
 
@@ -211,10 +211,10 @@ static void clear_config(void)
 /*
  * Record the use of a CONFIG_* word.
  */
-static void use_config(const char *m, int slen)
+static void use_config(const char *m, unsigned int slen)
 {
 	unsigned int hash = strhash(m, slen);
-	int c, i;
+	unsigned int c, i;
 
 	if (is_defined_config(m, slen, hash))
 	    return;
@@ -235,9 +235,9 @@ static void use_config(const char *m, in
 
 static void parse_config_file(const char *map, size_t len)
 {
-	const int *end = (const int *) (map + len);
+	const unsigned int *end = (const unsigned int *) (map + len);
 	/* start at +1, so that p can never be < map */
-	const int *m   = (const int *) map + 1;
+	const unsigned int *m   = (const unsigned int *) map + 1;
 	const char *p, *q;
 
 	for (; m < end; m++) {
@@ -435,7 +435,7 @@ static void print_deps(void)
 static void traps(void)
 {
 	static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
-	int *p = (int *)test;
+	unsigned int *p = (unsigned int *)test;
 
 	if (*p != INT_CONF) {
 		fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianness? %#x\n",
diff -ruNp linux-3.13.11/scripts/checksyscalls.sh linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/scripts/checksyscalls.sh
--- linux-3.13.11/scripts/checksyscalls.sh	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/scripts/checksyscalls.sh	2014-07-09
12:00:16.000000000 +0200
@@ -193,7 +193,6 @@ cat << EOF
 #define __IGNORE_afs_syscall
 #define __IGNORE_getpmsg
 #define __IGNORE_putpmsg
-#define __IGNORE_vserver
 EOF
 }
 
diff -ruNp linux-3.13.11/scripts/gcc-plugin.sh linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/scripts/gcc-plugin.sh
--- linux-3.13.11/scripts/gcc-plugin.sh	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/scripts/gcc-plugin.sh	2014-07-09
12:00:16.000000000 +0200
@@ -0,0 +1,16 @@
+#!/bin/bash
+srctree=$(dirname "$0")
+gccplugins_dir=$($3 -print-file-name=plugin)
+plugincc=$($1 -E -shared - -o /dev/null -I${srctree}/../tools/gcc -I${gccplugins_dir}/include
2>&1 <<EOF
+#include "gcc-common.h"
+#if BUILDING_GCC_VERSION >= 4008 || defined(ENABLE_BUILD_WITH_CXX)
+#warning $2
+#else
+#warning $1
+#endif
+EOF
+)
+if [ $? -eq 0 ]
+then
+	( [[ "$plugincc" =~ "$1" ]] && echo "$1" ) || ( [[ "$plugincc" =~ "$2" ]] && echo
"$2" )
+fi
diff -ruNp linux-3.13.11/scripts/headers_install.sh linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/scripts/headers_install.sh
--- linux-3.13.11/scripts/headers_install.sh	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/scripts/headers_install.sh	2014-07-09
12:00:16.000000000 +0200
@@ -32,6 +32,7 @@ do
 	FILE="$(basename "$i")"
 	sed -r \
 		-e 's/([ \t(])(__user|__force|__iomem)[ \t]/\1/g' \
+		-e 's/__intentional_overflow\([- \t,0-9]*\)//g' \
 		-e 's/__attribute_const__([ \t]|$)/\1/g' \
 		-e 's@^#include <linux/compiler.h>@@' \
 		-e 's/(^|[^a-zA-Z0-9])__packed([^a-zA-Z0-9_]|$)/\1__attribute__((packed))\2/g' \
diff -ruNp linux-3.13.11/scripts/link-vmlinux.sh linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/scripts/link-vmlinux.sh
--- linux-3.13.11/scripts/link-vmlinux.sh	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/scripts/link-vmlinux.sh	2014-07-09
12:00:16.000000000 +0200
@@ -162,7 +162,7 @@ else
 fi;
 
 # final build of init/
-${MAKE} -f "${srctree}/scripts/Makefile.build" obj=init
+${MAKE} -f "${srctree}/scripts/Makefile.build" obj=init GCC_PLUGINS_CFLAGS="${GCC_PLUGINS_CFLAGS}"
GCC_PLUGINS_AFLAGS="${GCC_PLUGINS_AFLAGS}"
 
 kallsymso=""
 kallsyms_vmlinux=""
diff -ruNp linux-3.13.11/scripts/mod/file2alias.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/scripts/mod/file2alias.c
--- linux-3.13.11/scripts/mod/file2alias.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/scripts/mod/file2alias.c	2014-07-09
12:00:16.000000000 +0200
@@ -142,7 +142,7 @@ static void device_id_check(const char *
 			    unsigned long size, unsigned long id_size,
 			    void *symval)
 {
-	int i;
+	unsigned int i;
 
 	if (size % id_size || size < id_size) {
 		fatal("%s: sizeof(struct %s_device_id)=%lu is not a modulo "
@@ -170,7 +170,7 @@ static void device_id_check(const char *
 /* USB is special because the bcdDevice can be matched against a numeric range */
 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipNinN" */
 static void do_usb_entry(void *symval,
-			 unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
+			 unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
 			 unsigned char range_lo, unsigned char range_hi,
 			 unsigned char max, struct module *mod)
 {
@@ -280,7 +280,7 @@ static void do_usb_entry_multi(void *sym
 {
 	unsigned int devlo, devhi;
 	unsigned char chi, clo, max;
-	int ndigits;
+	unsigned int ndigits;
 
 	DEF_FIELD(symval, usb_device_id, match_flags);
 	DEF_FIELD(symval, usb_device_id, idVendor);
@@ -533,7 +533,7 @@ static void do_pnp_device_entry(void *sy
 	for (i = 0; i < count; i++) {
 		DEF_FIELD_ADDR(symval + i*id_size, pnp_device_id, id);
 		char acpi_id[sizeof(*id)];
-		int j;
+		unsigned int j;
 
 		buf_printf(&mod->dev_table_buf,
 			   "MODULE_ALIAS(\"pnp:d%s*\");\n", *id);
@@ -562,7 +562,7 @@ static void do_pnp_card_entries(void *sy
 
 		for (j = 0; j < PNP_MAX_DEVICES; j++) {
 			const char *id = (char *)(*devs)[j].id;
-			int i2, j2;
+			unsigned int i2, j2;
 			int dup = 0;
 
 			if (!id[0])
@@ -588,7 +588,7 @@ static void do_pnp_card_entries(void *sy
 			/* add an individual alias for every device entry */
 			if (!dup) {
 				char acpi_id[PNP_ID_LEN];
-				int k;
+				unsigned int k;
 
 				buf_printf(&mod->dev_table_buf,
 					   "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
@@ -940,7 +940,7 @@ static void dmi_ascii_filter(char *d, co
 static int do_dmi_entry(const char *filename, void *symval,
 			char *alias)
 {
-	int i, j;
+	unsigned int i, j;
 	DEF_FIELD_ADDR(symval, dmi_system_id, matches);
 	sprintf(alias, "dmi*");
 
diff -ruNp linux-3.13.11/scripts/mod/modpost.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/scripts/mod/modpost.c
--- linux-3.13.11/scripts/mod/modpost.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/scripts/mod/modpost.c	2014-07-09
12:00:16.000000000 +0200
@@ -941,6 +941,7 @@ enum mismatch {
 	ANY_INIT_TO_ANY_EXIT,
 	ANY_EXIT_TO_ANY_INIT,
 	EXPORT_TO_INIT_EXIT,
+	DATA_TO_TEXT
 };
 
 struct sectioncheck {
@@ -1027,6 +1028,12 @@ const struct sectioncheck sectioncheck[]
 	.tosec   = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
 	.mismatch = EXPORT_TO_INIT_EXIT,
 	.symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
+},
+/* Do not reference code from writable data */
+{
+	.fromsec = { DATA_SECTIONS, NULL },
+	.tosec   = { TEXT_SECTIONS, NULL },
+	.mismatch = DATA_TO_TEXT
 }
 };
 
@@ -1147,10 +1154,10 @@ static Elf_Sym *find_elf_symbol(struct e
 			continue;
 		if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
 			continue;
-		if (sym->st_value == addr)
-			return sym;
 		/* Find a symbol nearby - addr are maybe negative */
 		d = sym->st_value - addr;
+		if (d == 0)
+			return sym;
 		if (d < 0)
 			d = addr - sym->st_value;
 		if (d < distance) {
@@ -1428,6 +1435,14 @@ static void report_sec_mismatch(const ch
 		tosym, prl_to, prl_to, tosym);
 		free(prl_to);
 		break;
+	case DATA_TO_TEXT:
+#if 0
+		fprintf(stderr,
+		"The %s %s:%s references\n"
+		"the %s %s:%s%s\n",
+		from, fromsec, fromsym, to, tosec, tosym, to_p);
+#endif
+		break;
 	}
 	fprintf(stderr, "\n");
 }
@@ -1662,7 +1677,7 @@ static void section_rel(const char *modn
 static void check_sec_ref(struct module *mod, const char *modname,
                           struct elf_info *elf)
 {
-	int i;
+	unsigned int i;
 	Elf_Shdr *sechdrs = elf->sechdrs;
 
 	/* Walk through all sections */
@@ -1781,7 +1796,7 @@ void __attribute__((format(printf, 2, 3)
 	va_end(ap);
 }
 
-void buf_write(struct buffer *buf, const char *s, int len)
+void buf_write(struct buffer *buf, const char *s, unsigned int len)
 {
 	if (buf->size - buf->pos < len) {
 		buf->size += len + SZ;
@@ -2000,7 +2015,7 @@ static void write_if_changed(struct buff
 	if (fstat(fileno(file), &st) < 0)
 		goto close_write;
 
-	if (st.st_size != b->pos)
+	if (st.st_size != (off_t)b->pos)
 		goto close_write;
 
 	tmp = NOFAIL(malloc(b->pos));
diff -ruNp linux-3.13.11/scripts/mod/modpost.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/scripts/mod/modpost.h
--- linux-3.13.11/scripts/mod/modpost.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/scripts/mod/modpost.h	2014-07-09
12:00:16.000000000 +0200
@@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *e
 
 struct buffer {
 	char *p;
-	int pos;
-	int size;
+	unsigned int pos;
+	unsigned int size;
 };
 
 void __attribute__((format(printf, 2, 3)))
 buf_printf(struct buffer *buf, const char *fmt, ...);
 
 void
-buf_write(struct buffer *buf, const char *s, int len);
+buf_write(struct buffer *buf, const char *s, unsigned int len);
 
 struct module {
 	struct module *next;
diff -ruNp linux-3.13.11/scripts/mod/sumversion.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/scripts/mod/sumversion.c
--- linux-3.13.11/scripts/mod/sumversion.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/scripts/mod/sumversion.c	2014-07-09
12:00:16.000000000 +0200
@@ -470,7 +470,7 @@ static void write_version(const char *fi
 		goto out;
 	}
 
-	if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
+	if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
 		warn("writing sum in %s failed: %s\n",
 			filename, strerror(errno));
 		goto out;
diff -ruNp linux-3.13.11/scripts/module-common.lds linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/scripts/module-common.lds
--- linux-3.13.11/scripts/module-common.lds	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/scripts/module-common.lds	2014-07-09
12:00:16.000000000 +0200
@@ -6,6 +6,10 @@
 SECTIONS {
 	/DISCARD/ : { *(.discard) }
 
+	.rodata : {
+		*(.rodata) *(.rodata.*)
+		*(.data..read_only)
+	}
 	__ksymtab		: { *(SORT(___ksymtab+*)) }
 	__ksymtab_gpl		: { *(SORT(___ksymtab_gpl+*)) }
 	__ksymtab_unused	: { *(SORT(___ksymtab_unused+*)) }
diff -ruNp linux-3.13.11/scripts/package/builddeb linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/scripts/package/builddeb
--- linux-3.13.11/scripts/package/builddeb	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/scripts/package/builddeb	2014-07-09
12:00:16.000000000 +0200
@@ -281,6 +281,7 @@ fi
 (cd $srctree; find . -name Makefile\* -o -name Kconfig\* -o -name \*.pl > "$objtree/debian/hdrsrcfiles")
 (cd $srctree; find arch/$SRCARCH/include include scripts -type f >> "$objtree/debian/hdrsrcfiles")
 (cd $objtree; find arch/$SRCARCH/include Module.symvers include scripts -type f >>
"$objtree/debian/hdrobjfiles")
+(cd $objtree; find tools/gcc -name \*.so >> "$objtree/debian/hdrobjfiles")
 destdir=$kernel_headers_dir/usr/src/linux-headers-$version
 mkdir -p "$destdir"
 (cd $srctree; tar -c -f - -T "$objtree/debian/hdrsrcfiles") | (cd $destdir; tar -xf
-)
diff -ruNp linux-3.13.11/scripts/pnmtologo.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/scripts/pnmtologo.c
--- linux-3.13.11/scripts/pnmtologo.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/scripts/pnmtologo.c	2014-07-09 12:00:16.000000000
+0200
@@ -244,14 +244,14 @@ static void write_header(void)
     fprintf(out, " *  Linux logo %s\n", logoname);
     fputs(" */\n\n", out);
     fputs("#include <linux/linux_logo.h>\n\n", out);
-    fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
+    fprintf(out, "static unsigned char %s_data[] = {\n",
 	    logoname);
 }
 
 static void write_footer(void)
 {
     fputs("\n};\n\n", out);
-    fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
+    fprintf(out, "const struct linux_logo %s = {\n", logoname);
     fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
     fprintf(out, "\t.width\t\t= %d,\n", logo_width);
     fprintf(out, "\t.height\t\t= %d,\n", logo_height);
@@ -381,7 +381,7 @@ static void write_logo_clut224(void)
     fputs("\n};\n\n", out);
 
     /* write logo clut */
-    fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
+    fprintf(out, "static unsigned char %s_clut[] = {\n",
 	    logoname);
     write_hex_cnt = 0;
     for (i = 0; i < logo_clutsize; i++) {
diff -ruNp linux-3.13.11/scripts/sortextable.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/scripts/sortextable.h
--- linux-3.13.11/scripts/sortextable.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/scripts/sortextable.h	2014-07-09
12:00:16.000000000 +0200
@@ -108,9 +108,9 @@ do_func(Elf_Ehdr *ehdr, char const *cons
 	const char *secstrtab;
 	const char *strtab;
 	char *extab_image;
-	int extab_index = 0;
-	int i;
-	int idx;
+	unsigned int extab_index = 0;
+	unsigned int i;
+	unsigned int idx;
 	unsigned int num_sections;
 	unsigned int secindex_strings;
 
diff -ruNp linux-3.13.11/security/Kconfig linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/security/Kconfig
--- linux-3.13.11/security/Kconfig	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/security/Kconfig	2014-07-09 12:00:16.000000000
+0200
@@ -4,6 +4,960 @@
 
 menu "Security options"
 
+menu "Grsecurity"
+
+	config ARCH_TRACK_EXEC_LIMIT
+	bool
+
+	config PAX_KERNEXEC_PLUGIN
+	bool
+
+	config PAX_PER_CPU_PGD
+	bool
+
+	config TASK_SIZE_MAX_SHIFT
+	int
+	depends on X86_64
+	default 47 if !PAX_PER_CPU_PGD
+	default 42 if PAX_PER_CPU_PGD
+
+	config PAX_ENABLE_PAE
+	bool
+	default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
+	
+	config PAX_USERCOPY_SLABS
+	bool
+
+config GRKERNSEC
+	bool "Grsecurity"
+	select CRYPTO
+	select CRYPTO_SHA256
+	select PROC_FS
+	select STOP_MACHINE
+	select TTY
+	select DEBUG_KERNEL
+	select DEBUG_LIST
+	help
+	  If you say Y here, you will be able to configure many features
+	  that will enhance the security of your system.  It is highly
+	  recommended that you say Y here and read through the help
+	  for each option so that you fully understand the features and
+	  can evaluate their usefulness for your machine.
+
+choice
+	prompt "Configuration Method"
+	depends on GRKERNSEC
+	default GRKERNSEC_CONFIG_CUSTOM
+	help
+
+config GRKERNSEC_CONFIG_AUTO
+	bool "Automatic"
+	help
+	  If you choose this configuration method, you'll be able to answer a small
+	  number of simple questions about how you plan to use this kernel.
+	  The settings of grsecurity and PaX will be automatically configured for
+	  the highest commonly-used settings within the provided constraints.
+
+	  If you require additional configuration, custom changes can still be made
+	  from the "custom configuration" menu.
+
+config GRKERNSEC_CONFIG_CUSTOM
+	bool "Custom"
+	help
+	  If you choose this configuration method, you'll be able to configure all
+	  grsecurity and PaX settings manually.  Via this method, no options are
+	  automatically enabled.
+
+endchoice
+
+choice
+	prompt "Usage Type"
+	depends on (GRKERNSEC && GRKERNSEC_CONFIG_AUTO)
+	default GRKERNSEC_CONFIG_SERVER
+	help
+
+config GRKERNSEC_CONFIG_SERVER
+	bool "Server"
+	help
+	  Choose this option if you plan to use this kernel on a server.
+
+config GRKERNSEC_CONFIG_DESKTOP
+	bool "Desktop"
+	help
+	  Choose this option if you plan to use this kernel on a desktop.
+
+endchoice
+
+choice
+	prompt "Virtualization Type"
+	depends on (GRKERNSEC && X86 && GRKERNSEC_CONFIG_AUTO)
+	default GRKERNSEC_CONFIG_VIRT_NONE
+	help
+
+config GRKERNSEC_CONFIG_VIRT_NONE
+	bool "None"
+	help
+	  Choose this option if this kernel will be run on bare metal.
+
+config GRKERNSEC_CONFIG_VIRT_GUEST
+	bool "Guest"
+	help
+	  Choose this option if this kernel will be run as a VM guest.
+
+config GRKERNSEC_CONFIG_VIRT_HOST
+	bool "Host"
+	help
+	  Choose this option if this kernel will be run as a VM host.
+
+endchoice
+
+choice
+	prompt "Virtualization Hardware"
+	depends on (GRKERNSEC && X86 && GRKERNSEC_CONFIG_AUTO && (GRKERNSEC_CONFIG_VIRT_GUEST
|| GRKERNSEC_CONFIG_VIRT_HOST))
+	help
+
+config GRKERNSEC_CONFIG_VIRT_EPT
+	bool "EPT/RVI Processor Support"
+	depends on X86
+	help
+	  Choose this option if your CPU supports the EPT or RVI features of 2nd-gen
+	  hardware virtualization.  This allows for additional kernel hardening protections
+	  to operate without additional performance impact.
+
+	  To see if your Intel processor supports EPT, see:
+	  http://ark.intel.com/Products/VirtualizationTechnology
+	  (Most Core i3/5/7 support EPT)
+
+	  To see if your AMD processor supports RVI, see:
+	  http://support.amd.com/us/kbarticles/Pages/GPU120AMDRVICPUsHyperVWin8.aspx
+
+config GRKERNSEC_CONFIG_VIRT_SOFT
+	bool "First-gen/No Hardware Virtualization"
+	help
+	  Choose this option if you use an Atom/Pentium/Core 2 processor that either doesn't
+	  support hardware virtualization or doesn't support the EPT/RVI extensions.
+
+endchoice
+
+choice
+	prompt "Virtualization Software"
+	depends on (GRKERNSEC && GRKERNSEC_CONFIG_AUTO && (GRKERNSEC_CONFIG_VIRT_GUEST ||
GRKERNSEC_CONFIG_VIRT_HOST))
+	help
+
+config GRKERNSEC_CONFIG_VIRT_XEN
+	bool "Xen"
+	help
+	  Choose this option if this kernel is running as a Xen guest or host.
+
+config GRKERNSEC_CONFIG_VIRT_VMWARE
+	bool "VMWare"
+	help
+	  Choose this option if this kernel is running as a VMWare guest or host.
+
+config GRKERNSEC_CONFIG_VIRT_KVM
+	bool "KVM"
+	help
+	  Choose this option if this kernel is running as a KVM guest or host.
+
+config GRKERNSEC_CONFIG_VIRT_VIRTUALBOX
+	bool "VirtualBox"
+	help
+	  Choose this option if this kernel is running as a VirtualBox guest or host.
+
+endchoice
+
+choice
+	prompt "Required Priorities"
+	depends on (GRKERNSEC && GRKERNSEC_CONFIG_AUTO)
+	default GRKERNSEC_CONFIG_PRIORITY_PERF
+	help
+
+config GRKERNSEC_CONFIG_PRIORITY_PERF
+	bool "Performance"
+	help
+	  Choose this option if performance is of highest priority for this deployment
+	  of grsecurity.  Features like UDEREF on a 64bit kernel, kernel stack clearing,
+	  clearing of structures intended for userland, and freed memory sanitizing will
+	  be disabled.
+
+config GRKERNSEC_CONFIG_PRIORITY_SECURITY
+	bool "Security"
+	help
+	  Choose this option if security is of highest priority for this deployment of
+	  grsecurity.  UDEREF, kernel stack clearing, clearing of structures intended
+	  for userland, and freed memory sanitizing will be enabled for this kernel.
+	  In a worst-case scenario, these features can introduce a 20% performance hit
+	  (UDEREF on x64 contributing half of this hit).
+
+endchoice
+
+menu "Default Special Groups"
+depends on (GRKERNSEC && GRKERNSEC_CONFIG_AUTO)
+
+config GRKERNSEC_PROC_GID
+	int "GID exempted from /proc restrictions"
+	default 1001
+	help
+	  Setting this GID determines which group will be exempted from
+	  grsecurity's /proc restrictions, allowing users of the specified
+	  group  to view network statistics and the existence of other users'
+	  processes on the system.  This GID may also be chosen at boot time
+	  via "grsec_proc_gid=" on the kernel commandline.
+
+config GRKERNSEC_TPE_UNTRUSTED_GID
+        int "GID for TPE-untrusted users"
+        depends on GRKERNSEC_CONFIG_SERVER && GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
+        default 1005
+        help
+	  Setting this GID determines which group untrusted users should
+	  be added to.  These users will be placed under grsecurity's Trusted Path
+	  Execution mechanism, preventing them from executing their own binaries.
+	  The users will only be able to execute binaries in directories owned and
+	  writable only by the root user.  If the sysctl option is enabled, a sysctl
+	  option with name "tpe_gid" is created.
+
+config GRKERNSEC_TPE_TRUSTED_GID
+        int "GID for TPE-trusted users"
+        depends on GRKERNSEC_CONFIG_SERVER && GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
+        default 1005
+        help
+          Setting this GID determines what group TPE restrictions will be
+          *disabled* for.  If the sysctl option is enabled, a sysctl option
+          with name "tpe_gid" is created.
+
+config GRKERNSEC_SYMLINKOWN_GID
+        int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
+        depends on GRKERNSEC_CONFIG_SERVER
+        default 1006
+        help
+          Setting this GID determines what group kernel-enforced
+          SymlinksIfOwnerMatch will be enabled for.  If the sysctl option
+          is enabled, a sysctl option with name "symlinkown_gid" is created.
+
+
+endmenu
+
+menu "Customize Configuration"
+depends on GRKERNSEC
+
+menu "PaX"
+
+config PAX
+	bool "Enable various PaX features"
+	default y if GRKERNSEC_CONFIG_AUTO
+	depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC ||
SPARC || X86)
+	help
+	  This allows you to enable various PaX features.  PaX adds
+	  intrusion prevention mechanisms to the kernel that reduce
+	  the risks posed by exploitable memory corruption bugs.
+
+menu "PaX Control"
+	depends on PAX
+
+config PAX_SOFTMODE
+	bool 'Support soft mode'
+	help
+	  Enabling this option will allow you to run PaX in soft mode, that
+	  is, PaX features will not be enforced by default, only on executables
+	  marked explicitly.  You must also enable PT_PAX_FLAGS or XATTR_PAX_FLAGS
+	  support as they are the only way to mark executables for soft mode use.
+
+	  Soft mode can be activated by using the "pax_softmode=1" kernel command
+	  line option on boot.  Furthermore you can control various PaX features
+	  at runtime via the entries in /proc/sys/kernel/pax.
+
+config PAX_EI_PAX
+	bool 'Use legacy ELF header marking'
+	default y if GRKERNSEC_CONFIG_AUTO
+	help
+	  Enabling this option will allow you to control PaX features on
+	  a per executable basis via the 'chpax' utility available at
+	  http://pax.grsecurity.net/.  The control flags will be read from
+	  an otherwise reserved part of the ELF header.  This marking has
+	  numerous drawbacks (no support for soft-mode, toolchain does not
+	  know about the non-standard use of the ELF header) therefore it
+	  has been deprecated in favour of PT_PAX_FLAGS and XATTR_PAX_FLAGS
+	  support.
+
+	  Note that if you enable PT_PAX_FLAGS or XATTR_PAX_FLAGS marking
+	  support as well, they will override the legacy EI_PAX marks.
+
+	  If you enable none of the marking options then all applications
+	  will run with PaX enabled on them by default.
+
+config PAX_PT_PAX_FLAGS
+	bool 'Use ELF program header marking'
+	default y if GRKERNSEC_CONFIG_AUTO
+	help
+	  Enabling this option will allow you to control PaX features on
+	  a per executable basis via the 'paxctl' utility available at
+	  http://pax.grsecurity.net/.  The control flags will be read from
+	  a PaX specific ELF program header (PT_PAX_FLAGS).  This marking
+	  has the benefits of supporting both soft mode and being fully
+	  integrated into the toolchain (the binutils patch is available
+	  from http://pax.grsecurity.net).
+
+	  Note that if you enable the legacy EI_PAX marking support as well,
+	  the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
+
+	  If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
+	  must make sure that the marks are the same if a binary has both marks.
+
+	  If you enable none of the marking options then all applications
+	  will run with PaX enabled on them by default.
+
+config PAX_XATTR_PAX_FLAGS
+	bool 'Use filesystem extended attributes marking'
+	default y if GRKERNSEC_CONFIG_AUTO
+	select CIFS_XATTR if CIFS
+	select EXT2_FS_XATTR if EXT2_FS
+	select EXT3_FS_XATTR if EXT3_FS
+	select JFFS2_FS_XATTR if JFFS2_FS
+	select REISERFS_FS_XATTR if REISERFS_FS
+	select SQUASHFS_XATTR if SQUASHFS
+	select TMPFS_XATTR if TMPFS
+	select UBIFS_FS_XATTR if UBIFS_FS
+	help
+	  Enabling this option will allow you to control PaX features on
+	  a per executable basis via the 'setfattr' utility.  The control
+	  flags will be read from the user.pax.flags extended attribute of
+	  the file.  This marking has the benefit of supporting binary-only
+	  applications that self-check themselves (e.g., skype) and would
+	  not tolerate chpax/paxctl changes.  The main drawback is that
+	  extended attributes are not supported by some filesystems (e.g.,
+	  isofs, udf, vfat) so copying files through such filesystems will
+	  lose the extended attributes and these PaX markings.
+
+	  Note that if you enable the legacy EI_PAX marking support as well,
+	  the EI_PAX marks will be overridden by the XATTR_PAX_FLAGS marks.
+
+	  If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
+	  must make sure that the marks are the same if a binary has both marks.
+
+	  If you enable none of the marking options then all applications
+	  will run with PaX enabled on them by default.
+
+choice
+	prompt 'MAC system integration'
+	default PAX_HAVE_ACL_FLAGS
+	help
+	  Mandatory Access Control systems have the option of controlling
+	  PaX flags on a per executable basis, choose the method supported
+	  by your particular system.
+
+	  - "none": if your MAC system does not interact with PaX,
+	  - "direct": if your MAC system defines pax_set_initial_flags() itself,
+	  - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
+
+	  NOTE: this option is for developers/integrators only.
+
+	config PAX_NO_ACL_FLAGS
+		bool 'none'
+
+	config PAX_HAVE_ACL_FLAGS
+		bool 'direct'
+
+	config PAX_HOOK_ACL_FLAGS
+		bool 'hook'
+endchoice
+
+endmenu
+
+menu "Non-executable pages"
+	depends on PAX
+
+config PAX_NOEXEC
+	bool "Enforce non-executable pages"
+	default y if GRKERNSEC_CONFIG_AUTO
+	depends on ALPHA || (ARM && (CPU_V6 || CPU_V6K || CPU_V7)) || IA64 || MIPS || PARISC
|| PPC || S390 || SPARC || X86
+	help
+	  By design some architectures do not allow for protecting memory
+	  pages against execution or even if they do, Linux does not make
+	  use of this feature.  In practice this means that if a page is
+	  readable (such as the stack or heap) it is also executable.
+
+	  There is a well known exploit technique that makes use of this
+	  fact and a common programming mistake where an attacker can
+	  introduce code of his choice somewhere in the attacked program's
+	  memory (typically the stack or the heap) and then execute it.
+
+	  If the attacked program was running with different (typically
+	  higher) privileges than that of the attacker, then he can elevate
+	  his own privilege level (e.g. get a root shell, write to files for
+	  which he does not have write access to, etc).
+
+	  Enabling this option will let you choose from various features
+	  that prevent the injection and execution of 'foreign' code in
+	  a program.
+
+	  This will also break programs that rely on the old behaviour and
+	  expect that dynamically allocated memory via the malloc() family
+	  of functions is executable (which it is not).  Notable examples
+	  are the XFree86 4.x server, the java runtime and wine.
+
+config PAX_PAGEEXEC
+	bool "Paging based non-executable pages"
+	default y if GRKERNSEC_CONFIG_AUTO
+	depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII
|| MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 ||
MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
+	select ARCH_TRACK_EXEC_LIMIT if X86_32
+	help
+	  This implementation is based on the paging feature of the CPU.
+	  On i386 without hardware non-executable bit support there is a
+	  variable but usually low performance impact, however on Intel's
+	  P4 core based CPUs it is very high so you should not enable this
+	  for kernels meant to be used on such CPUs.
+
+	  On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
+	  with hardware non-executable bit support there is no performance
+	  impact, on ppc the impact is negligible.
+
+	  Note that several architectures require various emulations due to
+	  badly designed userland ABIs, this will cause a performance impact
+	  but will disappear as soon as userland is fixed. For example, ppc
+	  userland MUST have been built with secure-plt by a recent toolchain.
+
+config PAX_SEGMEXEC
+	bool "Segmentation based non-executable pages"
+	default y if GRKERNSEC_CONFIG_AUTO
+	depends on PAX_NOEXEC && X86_32
+	help
+	  This implementation is based on the segmentation feature of the
+	  CPU and has a very small performance impact, however applications
+	  will be limited to a 1.5 GB address space instead of the normal
+	  3 GB.
+
+config PAX_EMUTRAMP
+	bool "Emulate trampolines"
+	default y if PARISC || GRKERNSEC_CONFIG_AUTO
+	depends on (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
+	help
+	  There are some programs and libraries that for one reason or
+	  another attempt to execute special small code snippets from
+	  non-executable memory pages.  Most notable examples are the
+	  signal handler return code generated by the kernel itself and
+	  the GCC trampolines.
+
+	  If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
+	  such programs will no longer work under your kernel.
+
+	  As a remedy you can say Y here and use the 'chpax' or 'paxctl'
+	  utilities to enable trampoline emulation for the affected programs
+	  yet still have the protection provided by the non-executable pages.
+
+	  On parisc you MUST enable this option and EMUSIGRT as well, otherwise
+	  your system will not even boot.
+
+	  Alternatively you can say N here and use the 'chpax' or 'paxctl'
+	  utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
+	  for the affected files.
+
+	  NOTE: enabling this feature *may* open up a loophole in the
+	  protection provided by non-executable pages that an attacker
+	  could abuse.  Therefore the best solution is to not have any
+	  files on your system that would require this option.  This can
+	  be achieved by not using libc5 (which relies on the kernel
+	  signal handler return code) and not using or rewriting programs
+	  that make use of the nested function implementation of GCC.
+	  Skilled users can just fix GCC itself so that it implements
+	  nested function calls in a way that does not interfere with PaX.
+
+config PAX_EMUSIGRT
+	bool "Automatically emulate sigreturn trampolines"
+	depends on PAX_EMUTRAMP && PARISC
+	default y
+	help
+	  Enabling this option will have the kernel automatically detect
+	  and emulate signal return trampolines executing on the stack
+	  that would otherwise lead to task termination.
+
+	  This solution is intended as a temporary one for users with
+	  legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
+	  Modula-3 runtime, etc) or executables linked to such, basically
+	  everything that does not specify its own SA_RESTORER function in
+	  normal executable memory like glibc 2.1+ does.
+
+	  On parisc you MUST enable this option, otherwise your system will
+	  not even boot.
+
+	  NOTE: this feature cannot be disabled on a per executable basis
+	  and since it *does* open up a loophole in the protection provided
+	  by non-executable pages, the best solution is to not have any
+	  files on your system that would require this option.
+
+config PAX_MPROTECT
+	bool "Restrict mprotect()"
+	default y if GRKERNSEC_CONFIG_AUTO
+	depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
+	help
+	  Enabling this option will prevent programs from
+	   - changing the executable status of memory pages that were
+	     not originally created as executable,
+	   - making read-only executable pages writable again,
+	   - creating executable pages from anonymous memory,
+	   - making read-only-after-relocations (RELRO) data pages writable again.
+
+	  You should say Y here to complete the protection provided by
+	  the enforcement of non-executable pages.
+
+	  NOTE: you can use the 'chpax' or 'paxctl' utilities to control
+	  this feature on a per file basis.
+
+config PAX_MPROTECT_COMPAT
+	bool "Use legacy/compat protection demoting (read help)"
+	default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_DESKTOP)
+	depends on PAX_MPROTECT
+	help
+	  The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
+	  by sending the proper error code to the application.  For some broken 
+	  userland, this can cause problems with Python or other applications.  The
+	  current implementation however allows for applications like clamav to
+	  detect if JIT compilation/execution is allowed and to fall back gracefully
+	  to an interpreter-based mode if it does not.  While we encourage everyone
+	  to use the current implementation as-is and push upstream to fix broken
+	  userland (note that the RWX logging option can assist with this), in some
+	  environments this may not be possible.  Having to disable MPROTECT
+	  completely on certain binaries reduces the security benefit of PaX,
+	  so this option is provided for those environments to revert to the old
+	  behavior.
+	  
+config PAX_ELFRELOCS
+	bool "Allow ELF text relocations (read help)"
+	depends on PAX_MPROTECT
+	default n
+	help
+	  Non-executable pages and mprotect() restrictions are effective
+	  in preventing the introduction of new executable code into an
+	  attacked task's address space.  There remain only two venues
+	  for this kind of attack: if the attacker can execute already
+	  existing code in the attacked task then he can either have it
+	  create and mmap() a file containing his code or have it mmap()
+	  an already existing ELF library that does not have position
+	  independent code in it and use mprotect() on it to make it
+	  writable and copy his code there.  While protecting against
+	  the former approach is beyond PaX, the latter can be prevented
+	  by having only PIC ELF libraries on one's system (which do not
+	  need to relocate their code).  If you are sure this is your case,
+	  as is the case with all modern Linux distributions, then leave
+	  this option disabled.  You should say 'n' here.
+
+config PAX_ETEXECRELOCS
+	bool "Allow ELF ET_EXEC text relocations"
+	depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
+	select PAX_ELFRELOCS
+	default y
+	help
+	  On some architectures there are incorrectly created applications
+	  that require text relocations and would not work without enabling
+	  this option.  If you are an alpha, ia64 or parisc user, you should
+	  enable this option and disable it once you have made sure that
+	  none of your applications need it.
+
+config PAX_EMUPLT
+	bool "Automatically emulate ELF PLT"
+	depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
+	default y
+	help
+	  Enabling this option will have the kernel automatically detect
+	  and emulate the Procedure Linkage Table entries in ELF files.
+	  On some architectures such entries are in writable memory, and
+	  become non-executable leading to task termination.  Therefore
+	  it is mandatory that you enable this option on alpha, parisc,
+	  sparc and sparc64, otherwise your system would not even boot.
+
+	  NOTE: this feature *does* open up a loophole in the protection
+	  provided by the non-executable pages, therefore the proper
+	  solution is to modify the toolchain to produce a PLT that does
+	  not need to be writable.
+
+config PAX_DLRESOLVE
+	bool 'Emulate old glibc resolver stub'
+	depends on PAX_EMUPLT && SPARC
+	default n
+	help
+	  This option is needed if userland has an old glibc (before 2.4)
+	  that puts a 'save' instruction into the runtime generated resolver
+	  stub that needs special emulation.
+
+config PAX_KERNEXEC
+	bool "Enforce non-executable kernel pages"
+	default y if GRKERNSEC_CONFIG_AUTO && (GRKERNSEC_CONFIG_VIRT_NONE || (GRKERNSEC_CONFIG_VIRT_EPT
&& GRKERNSEC_CONFIG_VIRT_GUEST) || (GRKERNSEC_CONFIG_VIRT_EPT && GRKERNSEC_CONFIG_VIRT_KVM))
+	depends on (X86 || (ARM && (CPU_V6 || CPU_V6K || CPU_V7) && !(ARM_LPAE && MODULES)))
&& !XEN
+	select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
+	select PAX_KERNEXEC_PLUGIN if X86_64
+	help
+	  This is the kernel land equivalent of PAGEEXEC and MPROTECT,
+	  that is, enabling this option will make it harder to inject
+	  and execute 'foreign' code in kernel memory itself.
+
+choice
+	prompt "Return Address Instrumentation Method"
+	default PAX_KERNEXEC_PLUGIN_METHOD_BTS
+	depends on PAX_KERNEXEC_PLUGIN
+	help
+	  Select the method used to instrument function pointer dereferences.
+	  Note that binary modules cannot be instrumented by this approach.
+
+	  Note that the implementation requires a gcc with plugin support,
+	  i.e., gcc 4.5 or newer.  You may need to install the supporting
+	  headers explicitly in addition to the normal gcc package.
+
+	config PAX_KERNEXEC_PLUGIN_METHOD_BTS
+		bool "bts"
+		help
+		  This method is compatible with binary only modules but has
+		  a higher runtime overhead.
+
+	config PAX_KERNEXEC_PLUGIN_METHOD_OR
+		bool "or"
+		depends on !PARAVIRT
+		help
+		  This method is incompatible with binary only modules but has
+		  a lower runtime overhead.
+endchoice
+
+config PAX_KERNEXEC_PLUGIN_METHOD
+	string
+	default "bts" if PAX_KERNEXEC_PLUGIN_METHOD_BTS
+	default "or" if PAX_KERNEXEC_PLUGIN_METHOD_OR
+	default ""
+
+config PAX_KERNEXEC_MODULE_TEXT
+	int "Minimum amount of memory reserved for module code"
+	default "4" if (!GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_SERVER)
+	default "12" if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_DESKTOP)
+	depends on PAX_KERNEXEC && X86_32
+	help
+	  Due to implementation details the kernel must reserve a fixed
+	  amount of memory for runtime allocated code (such as modules)
+	  at compile time that cannot be changed at runtime.  Here you
+	  can specify the minimum amount in MB that will be reserved.
+	  Due to the same implementation details this size will always
+	  be rounded up to the next 2/4 MB boundary (depends on PAE) so
+	  the actually available memory for runtime allocated code will
+	  usually be more than this minimum.
+
+	  The default 4 MB should be enough for most users but if you have
+	  an excessive number of modules (e.g., most distribution configs
+	  compile many drivers as modules) or use huge modules such as
+	  nvidia's kernel driver, you will need to adjust this amount.
+	  A good rule of thumb is to look at your currently loaded kernel
+	  modules and add up their sizes.
+
+endmenu
+
+menu "Address Space Layout Randomization"
+	depends on PAX
+
+config PAX_ASLR
+	bool "Address Space Layout Randomization"
+	default y if GRKERNSEC_CONFIG_AUTO
+	help
+	  Many if not most exploit techniques rely on the knowledge of
+	  certain addresses in the attacked program.  The following options
+	  will allow the kernel to apply a certain amount of randomization
+	  to specific parts of the program thereby forcing an attacker to
+	  guess them in most cases.  Any failed guess will most likely crash
+	  the attacked program which allows the kernel to detect such attempts
+	  and react on them.  PaX itself provides no reaction mechanisms,
+	  instead it is strongly encouraged that you make use of Nergal's
+	  segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
+	  (http://www.grsecurity.net/) built-in crash detection features or
+	  develop one yourself.
+
+	  By saying Y here you can choose to randomize the following areas:
+	   - top of the task's kernel stack
+	   - top of the task's userland stack
+	   - base address for mmap() requests that do not specify one
+	     (this includes all libraries)
+	   - base address of the main executable
+
+	  It is strongly recommended to say Y here as address space layout
+	  randomization has negligible impact on performance yet it provides
+	  a very effective protection.
+
+	  NOTE: you can use the 'chpax' or 'paxctl' utilities to control
+	  this feature on a per file basis.
+
+config PAX_RANDKSTACK
+	bool "Randomize kernel stack base"
+	default y if GRKERNSEC_CONFIG_AUTO && !(GRKERNSEC_CONFIG_VIRT_HOST && GRKERNSEC_CONFIG_VIRT_VIRTUALBOX)
+	depends on X86_TSC && X86
+	help
+	  By saying Y here the kernel will randomize every task's kernel
+	  stack on every system call.  This will not only force an attacker
+	  to guess it but also prevent him from making use of possible
+	  leaked information about it.
+
+	  Since the kernel stack is a rather scarce resource, randomization
+	  may cause unexpected stack overflows, therefore you should very
+	  carefully test your system.  Note that once enabled in the kernel
+	  configuration, this feature cannot be disabled on a per file basis.
+
+config PAX_RANDUSTACK
+	bool "Randomize user stack base"
+	default y if GRKERNSEC_CONFIG_AUTO
+	depends on PAX_ASLR
+	help
+	  By saying Y here the kernel will randomize every task's userland
+	  stack.  The randomization is done in two steps where the second
+	  one may apply a big amount of shift to the top of the stack and
+	  cause problems for programs that want to use lots of memory (more
+	  than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
+	  For this reason the second step can be controlled by 'chpax' or
+	  'paxctl' on a per file basis.
+
+config PAX_RANDMMAP
+	bool "Randomize mmap() base"
+	default y if GRKERNSEC_CONFIG_AUTO
+	depends on PAX_ASLR
+	help
+	  By saying Y here the kernel will use a randomized base address for
+	  mmap() requests that do not specify one themselves.  As a result
+	  all dynamically loaded libraries will appear at random addresses
+	  and therefore be harder to exploit by a technique where an attacker
+	  attempts to execute library code for his purposes (e.g. spawn a
+	  shell from an exploited program that is running at an elevated
+	  privilege level).
+
+	  Furthermore, if a program is relinked as a dynamic ELF file, its
+	  base address will be randomized as well, completing the full
+	  randomization of the address space layout.  Attacking such programs
+	  becomes a guess game.  You can find an example of doing this at
+	  http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
+	  http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
+
+	  NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
+	  feature on a per file basis.
+
+endmenu
+
+menu "Miscellaneous hardening features"
+
+config PAX_MEMORY_SANITIZE
+	bool "Sanitize all freed memory"
+	default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_PRIORITY_SECURITY)
+	help
+	  By saying Y here the kernel will erase memory pages and slab objects
+	  as soon as they are freed.  This in turn reduces the lifetime of data
+	  stored in them, making it less likely that sensitive information such
+	  as passwords, cryptographic secrets, etc stay in memory for too long.
+
+	  This is especially useful for programs whose runtime is short, long
+	  lived processes and the kernel itself benefit from this as long as
+	  they ensure timely freeing of memory that may hold sensitive
+	  information.
+
+	  A nice side effect of the sanitization of slab objects is the
+	  reduction of possible info leaks caused by padding bytes within the
+	  leaky structures.  Use-after-free bugs for structures containing
+	  pointers can also be detected as dereferencing the sanitized pointer
+	  will generate an access violation.
+
+	  The tradeoff is performance impact, on a single CPU system kernel
+	  compilation sees a 3% slowdown, other systems and workloads may vary
+	  and you are advised to test this feature on your expected workload
+	  before deploying it.
+
+	  To reduce the performance penalty by sanitizing pages only, albeit
+	  limiting the effectiveness of this feature at the same time, slab
+	  sanitization can be disabled with the kernel commandline parameter
+	  "pax_sanitize_slab=0".
+
+	  Note that this feature does not protect data stored in live pages,
+	  e.g., process memory swapped to disk may stay there for a long time.
+
+config PAX_MEMORY_STACKLEAK
+	bool "Sanitize kernel stack"
+	default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_PRIORITY_SECURITY)
+	depends on X86
+	help
+	  By saying Y here the kernel will erase the kernel stack before it
+	  returns from a system call.  This in turn reduces the information
+	  that a kernel stack leak bug can reveal.
+
+	  Note that such a bug can still leak information that was put on
+	  the stack by the current system call (the one eventually triggering
+	  the bug) but traces of earlier system calls on the kernel stack
+	  cannot leak anymore.
+
+	  The tradeoff is performance impact: on a single CPU system kernel
+	  compilation sees a 1% slowdown, other systems and workloads may vary
+	  and you are advised to test this feature on your expected workload
+	  before deploying it.
+
+	  Note that the full feature requires a gcc with plugin support,
+	  i.e., gcc 4.5 or newer.  You may need to install the supporting
+	  headers explicitly in addition to the normal gcc package.  Using
+	  older gcc versions means that functions with large enough stack
+	  frames may leave uninitialized memory behind that may be exposed
+	  to a later syscall leaking the stack.
+
+config PAX_MEMORY_STRUCTLEAK
+	bool "Forcibly initialize local variables copied to userland"
+	default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_PRIORITY_SECURITY)
+	help
+	  By saying Y here the kernel will zero initialize some local
+	  variables that are going to be copied to userland.  This in
+	  turn prevents unintended information leakage from the kernel
+	  stack should later code forget to explicitly set all parts of
+	  the copied variable.
+
+	  The tradeoff is less performance impact than PAX_MEMORY_STACKLEAK
+	  at a much smaller coverage.
+
+	  Note that the implementation requires a gcc with plugin support,
+	  i.e., gcc 4.5 or newer.  You may need to install the supporting
+	  headers explicitly in addition to the normal gcc package.
+
+config PAX_MEMORY_UDEREF
+	bool "Prevent invalid userland pointer dereference"
+	default y if GRKERNSEC_CONFIG_AUTO && !(X86_64 && GRKERNSEC_CONFIG_PRIORITY_PERF)
&& (GRKERNSEC_CONFIG_VIRT_NONE || GRKERNSEC_CONFIG_VIRT_EPT)
+	depends on (X86 || (ARM && (CPU_V6 || CPU_V6K || CPU_V7) && !ARM_LPAE)) && !UML_X86
&& !XEN
+	select PAX_PER_CPU_PGD if X86_64
+	help
+	  By saying Y here the kernel will be prevented from dereferencing
+	  userland pointers in contexts where the kernel expects only kernel
+	  pointers.  This is both a useful runtime debugging feature and a
+	  security measure that prevents exploiting a class of kernel bugs.
+
+	  The tradeoff is that some virtualization solutions may experience
+	  a huge slowdown and therefore you should not enable this feature
+	  for kernels meant to run in such environments.  Whether a given VM
+	  solution is affected or not is best determined by simply trying it
+	  out, the performance impact will be obvious right on boot as this
+	  mechanism engages from very early on.  A good rule of thumb is that
+	  VMs running on CPUs without hardware virtualization support (i.e.,
+	  the majority of IA-32 CPUs) will likely experience the slowdown.
+
+	  On X86_64 the kernel will make use of PCID support when available
+	  (Intel's Westmere, Sandy Bridge, etc) for better security (default)
+	  or performance impact.  Pass pax_weakuderef on the kernel command
+	  line to choose the latter.
+
+config PAX_REFCOUNT
+	bool "Prevent various kernel object reference counter overflows"
+	default y if GRKERNSEC_CONFIG_AUTO
+	depends on GRKERNSEC && ((ARM && (CPU_V6 || CPU_V6K || CPU_V7)) || MIPS || SPARC64
|| X86)
+	help
+	  By saying Y here the kernel will detect and prevent overflowing
+	  various (but not all) kinds of object reference counters.  Such
+	  overflows can normally occur due to bugs only and are often, if
+	  not always, exploitable.
+
+	  The tradeoff is that data structures protected by an overflowed
+	  refcount will never be freed and therefore will leak memory.  Note
+	  that this leak also happens even without this protection but in
+	  that case the overflow can eventually trigger the freeing of the
+	  data structure while it is still being used elsewhere, resulting
+	  in the exploitable situation that this feature prevents.
+
+	  Since this has a negligible performance impact, you should enable
+	  this feature.
+
+config PAX_CONSTIFY_PLUGIN
+	bool "Automatically constify eligible structures"
+	default y
+	depends on !UML && PAX_KERNEXEC
+	help
+	  By saying Y here the compiler will automatically constify a class
+	  of types that contain only function pointers.  This reduces the
+	  kernel's attack surface and also produces a better memory layout.
+
+	  Note that the implementation requires a gcc with plugin support,
+	  i.e., gcc 4.5 or newer.  You may need to install the supporting
+	  headers explicitly in addition to the normal gcc package.
+ 
+	  Note that if some code really has to modify constified variables
+	  then the source code will have to be patched to allow it.  Examples
+	  can be found in PaX itself (the no_const attribute) and for some
+	  out-of-tree modules at http://www.grsecurity.net/~paxguy1/ .
+
+config PAX_USERCOPY
+	bool "Harden heap object copies between kernel and userland"
+	default y if GRKERNSEC_CONFIG_AUTO
+	depends on ARM || IA64 || PPC || SPARC || X86
+	depends on GRKERNSEC && (SLAB || SLUB || SLOB)
+	select PAX_USERCOPY_SLABS
+	help
+	  By saying Y here the kernel will enforce the size of heap objects
+	  when they are copied in either direction between the kernel and
+	  userland, even if only a part of the heap object is copied.
+
+	  Specifically, this checking prevents information leaking from the
+	  kernel heap during kernel to userland copies (if the kernel heap
+	  object is otherwise fully initialized) and prevents kernel heap
+	  overflows during userland to kernel copies.
+
+	  Note that the current implementation provides the strictest bounds
+	  checks for the SLUB allocator.
+
+	  Enabling this option also enables per-slab cache protection against
+	  data in a given cache being copied into/out of via userland
+	  accessors.  Though the whitelist of regions will be reduced over
+	  time, it notably protects important data structures like task structs.
+
+	  If frame pointers are enabled on x86, this option will also restrict
+	  copies into and out of the kernel stack to local variables within a
+	  single frame.
+
+	  Since this has a negligible performance impact, you should enable
+	  this feature.
+
+config PAX_USERCOPY_DEBUG
+	bool
+	depends on X86 && PAX_USERCOPY
+	default n
+
+config PAX_SIZE_OVERFLOW
+	bool "Prevent various integer overflows in function size parameters"
+	default y if GRKERNSEC_CONFIG_AUTO
+	depends on X86
+	help
+	  By saying Y here the kernel recomputes expressions of function
+	  arguments marked by a size_overflow attribute with double integer
+	  precision (DImode/TImode for 32/64 bit integer types).
+
+	  The recomputed argument is checked against TYPE_MAX and an event
+	  is logged on overflow and the triggering process is killed.
+
+	  Homepage: http://www.grsecurity.net/~ephox/overflow_plugin/
+
+	  Note that the implementation requires a gcc with plugin support,
+	  i.e., gcc 4.5 or newer.  You may need to install the supporting
+	  headers explicitly in addition to the normal gcc package.
+
+config PAX_LATENT_ENTROPY
+	bool "Generate some entropy during boot and runtime"
+	default y if GRKERNSEC_CONFIG_AUTO
+	help
+	  By saying Y here the kernel will instrument some kernel code to
+	  extract some entropy from both original and artificially created
+	  program state.  This will help especially embedded systems where
+	  there is little 'natural' source of entropy normally.  The cost
+	  is some slowdown of the boot process and fork and irq processing.
+
+	  When pax_extra_latent_entropy is passed on the kernel command line,
+	  entropy will be extracted from up to the first 4GB of RAM while the
+	  runtime memory allocator is being initialized.  This costs even more
+	  slowdown of the boot process.
+
+	  Note that the implementation requires a gcc with plugin support,
+	  i.e., gcc 4.5 or newer.  You may need to install the supporting
+	  headers explicitly in addition to the normal gcc package.
+
+	  Note that entropy extracted this way is not cryptographically
+	  secure!
+
+endmenu
+
+endmenu
+
+source grsecurity/Kconfig
+
+endmenu
+
+endmenu
+
 source security/keys/Kconfig
 
 config SECURITY_DMESG_RESTRICT
@@ -103,7 +1057,7 @@ config INTEL_TXT
 config LSM_MMAP_MIN_ADDR
 	int "Low address space for LSM to protect from user allocation"
 	depends on SECURITY && SECURITY_SELINUX
-	default 32768 if ARM
+	default 32768 if ALPHA || ARM || PARISC || SPARC32
 	default 65536
 	help
 	  This is the portion of low virtual memory which should be protected
diff -ruNp linux-3.13.11/security/apparmor/file.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/security/apparmor/file.c
--- linux-3.13.11/security/apparmor/file.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/security/apparmor/file.c	2014-07-09
12:00:16.000000000 +0200
@@ -348,8 +348,8 @@ static inline bool xindex_is_subset(u32
 int aa_path_link(struct aa_profile *profile, struct dentry *old_dentry,
 		 struct path *new_dir, struct dentry *new_dentry)
 {
-	struct path link = { new_dir->mnt, new_dentry };
-	struct path target = { new_dir->mnt, old_dentry };
+	struct path link = { .mnt = new_dir->mnt, .dentry = new_dentry };
+	struct path target = { .mnt = new_dir->mnt, .dentry = old_dentry };
 	struct path_cond cond = {
 		old_dentry->d_inode->i_uid,
 		old_dentry->d_inode->i_mode
diff -ruNp linux-3.13.11/security/apparmor/lsm.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/security/apparmor/lsm.c
--- linux-3.13.11/security/apparmor/lsm.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/security/apparmor/lsm.c	2014-07-09
12:00:16.000000000 +0200
@@ -186,7 +186,7 @@ static int common_perm_dir_dentry(int op
 				  struct dentry *dentry, u32 mask,
 				  struct path_cond *cond)
 {
-	struct path path = { dir->mnt, dentry };
+	struct path path = { .mnt = dir->mnt, .dentry = dentry };
 
 	return common_perm(op, &path, mask, cond);
 }
@@ -203,7 +203,7 @@ static int common_perm_dir_dentry(int op
 static int common_perm_mnt_dentry(int op, struct vfsmount *mnt,
 				  struct dentry *dentry, u32 mask)
 {
-	struct path path = { mnt, dentry };
+	struct path path = { .mnt = mnt, .dentry = dentry };
 	struct path_cond cond = { dentry->d_inode->i_uid,
 				  dentry->d_inode->i_mode
 	};
@@ -325,8 +325,8 @@ static int apparmor_path_rename(struct p
 
 	profile = aa_current_profile();
 	if (!unconfined(profile)) {
-		struct path old_path = { old_dir->mnt, old_dentry };
-		struct path new_path = { new_dir->mnt, new_dentry };
+		struct path old_path = { .mnt = old_dir->mnt, .dentry = old_dentry };
+		struct path new_path = { .mnt = new_dir->mnt, .dentry = new_dentry };
 		struct path_cond cond = { old_dentry->d_inode->i_uid,
 					  old_dentry->d_inode->i_mode
 		};
@@ -615,7 +615,7 @@ static int apparmor_task_setrlimit(struc
 	return error;
 }
 
-static struct security_operations apparmor_ops = {
+static struct security_operations apparmor_ops __read_only = {
 	.name =				"apparmor",
 
 	.ptrace_access_check =		apparmor_ptrace_access_check,
diff -ruNp linux-3.13.11/security/commoncap.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/security/commoncap.c
--- linux-3.13.11/security/commoncap.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/security/commoncap.c	2014-07-09 12:00:16.000000000
+0200
@@ -76,6 +76,7 @@ int cap_netlink_send(struct sock *sk, st
 int cap_capable(const struct cred *cred, struct user_namespace *targ_ns,
 		int cap, int audit)
 {
+	struct vx_info *vxi = current_vx_info(); /* FIXME: get vxi from cred? */
 	struct user_namespace *ns = targ_ns;
 
 	/* See if cred has the capability in the target user namespace
@@ -84,8 +85,12 @@ int cap_capable(const struct cred *cred,
 	 */
 	for (;;) {
 		/* Do we have the necessary capabilities? */
-		if (ns == cred->user_ns)
-			return cap_raised(cred->cap_effective, cap) ? 0 : -EPERM;
+		if (ns == cred->user_ns) {
+			if (vx_info_flags(vxi, VXF_STATE_SETUP, 0) &&
+			    cap_raised(cred->cap_effective, cap))
+				return 0;
+			return vx_cap_raised(vxi, cred->cap_effective, cap) ? 0 : -EPERM;
+		}
 
 		/* Have we tried all of the parent namespaces? */
 		if (ns == &init_user_ns)
@@ -424,6 +429,32 @@ int get_vfs_caps_from_disk(const struct
 	return 0;
 }
 
+/* returns:
+	1 for suid privilege
+	2 for sgid privilege
+	3 for fscap privilege
+*/
+int is_privileged_binary(const struct dentry *dentry)
+{
+	struct cpu_vfs_cap_data capdata;
+	struct inode *inode = dentry->d_inode;
+
+	if (!inode || S_ISDIR(inode->i_mode))
+		return 0;
+
+	if (inode->i_mode & S_ISUID)
+		return 1;
+	if ((inode->i_mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))
+		return 2;
+
+	if (!get_vfs_caps_from_disk(dentry, &capdata)) {
+		if (!cap_isclear(capdata.inheritable) || !cap_isclear(capdata.permitted))
+			return 3;
+	}
+
+	return 0;
+}
+
 /*
  * Attempt to get the on-exec apply capability sets for an executable file from
  * its xattrs and, if present, apply them to the proposed credentials being
@@ -592,6 +623,9 @@ int cap_bprm_secureexec(struct linux_bin
 	const struct cred *cred = current_cred();
 	kuid_t root_uid = make_kuid(cred->user_ns, 0);
 
+	if (gr_acl_enable_at_secure())
+		return 1;
+
 	if (!uid_eq(cred->uid, root_uid)) {
 		if (bprm->cap_effective)
 			return 1;
@@ -628,7 +662,7 @@ int cap_inode_setxattr(struct dentry *de
 
 	if (!strncmp(name, XATTR_SECURITY_PREFIX,
 		     sizeof(XATTR_SECURITY_PREFIX) - 1) &&
-	    !capable(CAP_SYS_ADMIN))
+		!vx_capable(CAP_SYS_ADMIN, VXC_FS_SECURITY))
 		return -EPERM;
 	return 0;
 }
@@ -654,7 +688,7 @@ int cap_inode_removexattr(struct dentry
 
 	if (!strncmp(name, XATTR_SECURITY_PREFIX,
 		     sizeof(XATTR_SECURITY_PREFIX) - 1) &&
-	    !capable(CAP_SYS_ADMIN))
+		!vx_capable(CAP_SYS_ADMIN, VXC_FS_SECURITY))
 		return -EPERM;
 	return 0;
 }
diff -ruNp linux-3.13.11/security/integrity/ima/ima.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/security/integrity/ima/ima.h
--- linux-3.13.11/security/integrity/ima/ima.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/security/integrity/ima/ima.h	2014-07-09
12:00:16.000000000 +0200
@@ -118,8 +118,8 @@ int ima_init_template(void);
 extern spinlock_t ima_queue_lock;
 
 struct ima_h_table {
-	atomic_long_t len;	/* number of stored measurements in the list */
-	atomic_long_t violations;
+	atomic_long_unchecked_t len;	/* number of stored measurements in the list */
+	atomic_long_unchecked_t violations;
 	struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
 };
 extern struct ima_h_table ima_htable;
diff -ruNp linux-3.13.11/security/integrity/ima/ima_api.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/security/integrity/ima/ima_api.c
--- linux-3.13.11/security/integrity/ima/ima_api.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/security/integrity/ima/ima_api.c	2014-07-09
12:00:16.000000000 +0200
@@ -137,7 +137,7 @@ void ima_add_violation(struct file *file
 	int result;
 
 	/* can overflow, only indicator */
-	atomic_long_inc(&ima_htable.violations);
+	atomic_long_inc_unchecked(&ima_htable.violations);
 
 	result = ima_alloc_init_template(NULL, file, filename,
 					 NULL, 0, &entry);
diff -ruNp linux-3.13.11/security/integrity/ima/ima_fs.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/security/integrity/ima/ima_fs.c
--- linux-3.13.11/security/integrity/ima/ima_fs.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/security/integrity/ima/ima_fs.c	2014-07-09
12:00:16.000000000 +0200
@@ -28,12 +28,12 @@
 static int valid_policy = 1;
 #define TMPBUFLEN 12
 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
-				     loff_t *ppos, atomic_long_t *val)
+				     loff_t *ppos, atomic_long_unchecked_t *val)
 {
 	char tmpbuf[TMPBUFLEN];
 	ssize_t len;
 
-	len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
+	len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
 	return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
 }
 
diff -ruNp linux-3.13.11/security/integrity/ima/ima_queue.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/security/integrity/ima/ima_queue.c
--- linux-3.13.11/security/integrity/ima/ima_queue.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/security/integrity/ima/ima_queue.c	2014-07-09
12:00:16.000000000 +0200
@@ -80,7 +80,7 @@ static int ima_add_digest_entry(struct i
 	INIT_LIST_HEAD(&qe->later);
 	list_add_tail_rcu(&qe->later, &ima_measurements);
 
-	atomic_long_inc(&ima_htable.len);
+	atomic_long_inc_unchecked(&ima_htable.len);
 	key = ima_hash_key(entry->digest);
 	hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
 	return 0;
diff -ruNp linux-3.13.11/security/keys/compat.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/security/keys/compat.c
--- linux-3.13.11/security/keys/compat.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/security/keys/compat.c	2014-07-09
12:00:16.000000000 +0200
@@ -44,7 +44,7 @@ static long compat_keyctl_instantiate_ke
 	if (ret == 0)
 		goto no_payload_free;
 
-	ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
+	ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc,
ret, ringid);
 err:
 	if (iov != iovstack)
 		kfree(iov);
diff -ruNp linux-3.13.11/security/keys/internal.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/security/keys/internal.h
--- linux-3.13.11/security/keys/internal.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/security/keys/internal.h	2014-07-09
12:00:16.000000000 +0200
@@ -253,7 +253,7 @@ extern long keyctl_instantiate_key_iov(k
 extern long keyctl_invalidate_key(key_serial_t);
 
 extern long keyctl_instantiate_key_common(key_serial_t,
-					  const struct iovec *,
+					  const struct iovec __user *,
 					  unsigned, size_t, key_serial_t);
 #ifdef CONFIG_PERSISTENT_KEYRINGS
 extern long keyctl_get_persistent(uid_t, key_serial_t);
diff -ruNp linux-3.13.11/security/keys/key.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/security/keys/key.c
--- linux-3.13.11/security/keys/key.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/security/keys/key.c	2014-07-09 12:00:16.000000000
+0200
@@ -285,7 +285,7 @@ struct key *key_alloc(struct key_type *t
 
 	atomic_set(&key->usage, 1);
 	init_rwsem(&key->sem);
-	lockdep_set_class(&key->sem, &type->lock_class);
+	lockdep_set_class(&key->sem, (struct lock_class_key *)&type->lock_class);
 	key->index_key.type = type;
 	key->user = user;
 	key->quotalen = quotalen;
@@ -1036,7 +1036,9 @@ int register_key_type(struct key_type *k
 	struct key_type *p;
 	int ret;
 
-	memset(&ktype->lock_class, 0, sizeof(ktype->lock_class));
+	pax_open_kernel();
+	memset((void *)&ktype->lock_class, 0, sizeof(ktype->lock_class));
+	pax_close_kernel();
 
 	ret = -EEXIST;
 	down_write(&key_types_sem);
@@ -1048,7 +1050,7 @@ int register_key_type(struct key_type *k
 	}
 
 	/* store the type */
-	list_add(&ktype->link, &key_types_list);
+	pax_list_add((struct list_head *)&ktype->link, &key_types_list);
 
 	pr_notice("Key type %s registered\n", ktype->name);
 	ret = 0;
@@ -1070,7 +1072,7 @@ EXPORT_SYMBOL(register_key_type);
 void unregister_key_type(struct key_type *ktype)
 {
 	down_write(&key_types_sem);
-	list_del_init(&ktype->link);
+	pax_list_del_init((struct list_head *)&ktype->link);
 	downgrade_write(&key_types_sem);
 	key_gc_keytype(ktype);
 	pr_notice("Key type %s unregistered\n", ktype->name);
@@ -1088,10 +1090,10 @@ void __init key_init(void)
 			0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
 
 	/* add the special key types */
-	list_add_tail(&key_type_keyring.link, &key_types_list);
-	list_add_tail(&key_type_dead.link, &key_types_list);
-	list_add_tail(&key_type_user.link, &key_types_list);
-	list_add_tail(&key_type_logon.link, &key_types_list);
+	pax_list_add_tail((struct list_head *)&key_type_keyring.link, &key_types_list);
+	pax_list_add_tail((struct list_head *)&key_type_dead.link, &key_types_list);
+	pax_list_add_tail((struct list_head *)&key_type_user.link, &key_types_list);
+	pax_list_add_tail((struct list_head *)&key_type_logon.link, &key_types_list);
 
 	/* record the root user tracking */
 	rb_link_node(&root_key_user.node,
diff -ruNp linux-3.13.11/security/keys/keyctl.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/security/keys/keyctl.c
--- linux-3.13.11/security/keys/keyctl.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/security/keys/keyctl.c	2014-07-09
12:00:16.000000000 +0200
@@ -987,7 +987,7 @@ static int keyctl_change_reqkey_auth(str
 /*
  * Copy the iovec data from userspace
  */
-static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
+static long copy_from_user_iovec(void *buffer, const struct iovec __user *iov,
 				 unsigned ioc)
 {
 	for (; ioc > 0; ioc--) {
@@ -1009,7 +1009,7 @@ static long copy_from_user_iovec(void *b
  * If successful, 0 will be returned.
  */
 long keyctl_instantiate_key_common(key_serial_t id,
-				   const struct iovec *payload_iov,
+				   const struct iovec __user *payload_iov,
 				   unsigned ioc,
 				   size_t plen,
 				   key_serial_t ringid)
@@ -1104,7 +1104,7 @@ long keyctl_instantiate_key(key_serial_t
 			[0].iov_len  = plen
 		};
 
-		return keyctl_instantiate_key_common(id, iov, 1, plen, ringid);
+		return keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov,
1, plen, ringid);
 	}
 
 	return keyctl_instantiate_key_common(id, NULL, 0, 0, ringid);
@@ -1137,7 +1137,7 @@ long keyctl_instantiate_key_iov(key_seri
 	if (ret == 0)
 		goto no_payload_free;
 
-	ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
+	ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc,
ret, ringid);
 err:
 	if (iov != iovstack)
 		kfree(iov);
diff -ruNp linux-3.13.11/security/keys/keyring.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/security/keys/keyring.c
--- linux-3.13.11/security/keys/keyring.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/security/keys/keyring.c	2014-07-09
12:00:16.000000000 +0200
@@ -1000,7 +1000,11 @@ static int keyring_detect_cycle_iterator
 
 	kenter("{%d}", key->serial);
 
-	BUG_ON(key != ctx->match_data);
+	/* We might get a keyring with matching index-key that is nonetheless a
+	 * different keyring. */
+	if (key != ctx->match_data)
+		return 0;
+
 	ctx->result = ERR_PTR(-EDEADLK);
 	return 1;
 }
diff -ruNp linux-3.13.11/security/min_addr.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/security/min_addr.c
--- linux-3.13.11/security/min_addr.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/security/min_addr.c	2014-07-09 12:00:16.000000000
+0200
@@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG
  */
 static void update_mmap_min_addr(void)
 {
+#ifndef SPARC
 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
 	if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
 		mmap_min_addr = dac_mmap_min_addr;
@@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
 #else
 	mmap_min_addr = dac_mmap_min_addr;
 #endif
+#endif
 }
 
 /*
diff -ruNp linux-3.13.11/security/security.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/security/security.c
--- linux-3.13.11/security/security.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/security/security.c	2014-07-09 12:00:16.000000000
+0200
@@ -33,8 +33,8 @@
 static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1] =
 	CONFIG_DEFAULT_SECURITY;
 
-static struct security_operations *security_ops;
-static struct security_operations default_security_ops = {
+struct security_operations *security_ops __read_only;
+struct security_operations default_security_ops __read_only = {
 	.name	= "default",
 };
 
@@ -73,11 +73,6 @@ int __init security_init(void)
 	return 0;
 }
 
-void reset_security_ops(void)
-{
-	security_ops = &default_security_ops;
-}
-
 /* Save user chosen LSM */
 static int __init choose_lsm(char *str)
 {
diff -ruNp linux-3.13.11/security/selinux/avc.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/security/selinux/avc.c
--- linux-3.13.11/security/selinux/avc.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/security/selinux/avc.c	2014-07-09
12:00:16.000000000 +0200
@@ -59,7 +59,7 @@ struct avc_node {
 struct avc_cache {
 	struct hlist_head	slots[AVC_CACHE_SLOTS]; /* head for avc_node->list */
 	spinlock_t		slots_lock[AVC_CACHE_SLOTS]; /* lock for writes */
-	atomic_t		lru_hint;	/* LRU hint for reclaim scan */
+	atomic_unchecked_t	lru_hint;	/* LRU hint for reclaim scan */
 	atomic_t		active_nodes;
 	u32			latest_notif;	/* latest revocation notification */
 };
@@ -167,7 +167,7 @@ void __init avc_init(void)
 		spin_lock_init(&avc_cache.slots_lock[i]);
 	}
 	atomic_set(&avc_cache.active_nodes, 0);
-	atomic_set(&avc_cache.lru_hint, 0);
+	atomic_set_unchecked(&avc_cache.lru_hint, 0);
 
 	avc_node_cachep = kmem_cache_create("avc_node", sizeof(struct avc_node),
 					     0, SLAB_PANIC, NULL);
@@ -242,7 +242,7 @@ static inline int avc_reclaim_node(void)
 	spinlock_t *lock;
 
 	for (try = 0, ecx = 0; try < AVC_CACHE_SLOTS; try++) {
-		hvalue = atomic_inc_return(&avc_cache.lru_hint) & (AVC_CACHE_SLOTS - 1);
+		hvalue = atomic_inc_return_unchecked(&avc_cache.lru_hint) & (AVC_CACHE_SLOTS - 1);
 		head = &avc_cache.slots[hvalue];
 		lock = &avc_cache.slots_lock[hvalue];
 
diff -ruNp linux-3.13.11/security/selinux/hooks.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/security/selinux/hooks.c
--- linux-3.13.11/security/selinux/hooks.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/security/selinux/hooks.c	2014-07-09
12:00:16.000000000 +0200
@@ -68,7 +68,6 @@
 #include <linux/dccp.h>
 #include <linux/quota.h>
 #include <linux/un.h>		/* for Unix socket types */
-#include <net/af_unix.h>	/* for Unix socket types */
 #include <linux/parser.h>
 #include <linux/nfs_mount.h>
 #include <net/ipv6.h>
@@ -96,8 +95,6 @@
 #include "audit.h"
 #include "avc_ss.h"
 
-extern struct security_operations *security_ops;
-
 /* SECMARK reference count */
 static atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
 
@@ -5763,7 +5760,7 @@ static int selinux_key_getsecurity(struc
 
 #endif
 
-static struct security_operations selinux_ops = {
+static struct security_operations selinux_ops __read_only = {
 	.name =				"selinux",
 
 	.ptrace_access_check =		selinux_ptrace_access_check,
@@ -6116,6 +6113,9 @@ static void selinux_nf_ip_exit(void)
 #ifdef CONFIG_SECURITY_SELINUX_DISABLE
 static int selinux_disabled;
 
+extern struct security_operations *security_ops;
+extern struct security_operations default_security_ops;
+
 int selinux_disable(void)
 {
 	if (ss_initialized) {
@@ -6133,7 +6133,9 @@ int selinux_disable(void)
 	selinux_disabled = 1;
 	selinux_enabled = 0;
 
-	reset_security_ops();
+	pax_open_kernel();
+	security_ops = &default_security_ops;
+	pax_close_kernel();
 
 	/* Try to destroy the avc node cache */
 	avc_disable();
diff -ruNp linux-3.13.11/security/selinux/include/xfrm.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/security/selinux/include/xfrm.h
--- linux-3.13.11/security/selinux/include/xfrm.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/security/selinux/include/xfrm.h	2014-07-09
12:00:16.000000000 +0200
@@ -45,7 +45,7 @@ static inline void selinux_xfrm_notify_p
 {
 	struct net *net;
 
-	atomic_inc(&flow_cache_genid);
+	atomic_inc_unchecked(&flow_cache_genid);
 	rtnl_lock();
 	for_each_net(net)
 		rt_genid_bump_all(net);
diff -ruNp linux-3.13.11/security/smack/smack_lsm.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/security/smack/smack_lsm.c
--- linux-3.13.11/security/smack/smack_lsm.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/security/smack/smack_lsm.c	2014-07-09
12:00:16.000000000 +0200
@@ -3731,7 +3731,7 @@ static int smack_inode_getsecctx(struct
 	return 0;
 }
 
-struct security_operations smack_ops = {
+struct security_operations smack_ops __read_only = {
 	.name =				"smack",
 
 	.ptrace_access_check =		smack_ptrace_access_check,
diff -ruNp linux-3.13.11/security/tomoyo/file.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/security/tomoyo/file.c
--- linux-3.13.11/security/tomoyo/file.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/security/tomoyo/file.c	2014-07-09
12:00:16.000000000 +0200
@@ -692,7 +692,7 @@ int tomoyo_path_number_perm(const u8 typ
 {
 	struct tomoyo_request_info r;
 	struct tomoyo_obj_info obj = {
-		.path1 = *path,
+		.path1 = { .mnt = path->mnt, .dentry = path->dentry },
 	};
 	int error = -ENOMEM;
 	struct tomoyo_path_info buf;
@@ -740,7 +740,7 @@ int tomoyo_check_open_permission(struct
 	struct tomoyo_path_info buf;
 	struct tomoyo_request_info r;
 	struct tomoyo_obj_info obj = {
-		.path1 = *path,
+		.path1 = { .mnt = path->mnt, .dentry = path->dentry },
 	};
 	int idx;
 
@@ -786,7 +786,7 @@ int tomoyo_path_perm(const u8 operation,
 {
 	struct tomoyo_request_info r;
 	struct tomoyo_obj_info obj = {
-		.path1 = *path,
+		.path1 = { .mnt = path->mnt, .dentry = path->dentry },
 	};
 	int error;
 	struct tomoyo_path_info buf;
@@ -843,7 +843,7 @@ int tomoyo_mkdev_perm(const u8 operation
 {
 	struct tomoyo_request_info r;
 	struct tomoyo_obj_info obj = {
-		.path1 = *path,
+		.path1 = { .mnt = path->mnt, .dentry = path->dentry },
 	};
 	int error = -ENOMEM;
 	struct tomoyo_path_info buf;
@@ -890,8 +890,8 @@ int tomoyo_path2_perm(const u8 operation
 	struct tomoyo_path_info buf2;
 	struct tomoyo_request_info r;
 	struct tomoyo_obj_info obj = {
-		.path1 = *path1,
-		.path2 = *path2,
+		.path1 = { .mnt = path1->mnt, .dentry = path1->dentry },
+		.path2 = { .mnt = path2->mnt, .dentry = path2->dentry }
 	};
 	int idx;
 
diff -ruNp linux-3.13.11/security/tomoyo/mount.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/security/tomoyo/mount.c
--- linux-3.13.11/security/tomoyo/mount.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/security/tomoyo/mount.c	2014-07-09
12:00:16.000000000 +0200
@@ -118,6 +118,10 @@ static int tomoyo_mount_acl(struct tomoy
 		   type == tomoyo_mounts[TOMOYO_MOUNT_MOVE]) {
 		need_dev = -1; /* dev_name is a directory */
 	} else {
+		if (!capable(CAP_SYS_ADMIN)) {
+			error = -EPERM;
+			goto out;
+		}
 		fstype = get_fs_type(type);
 		if (!fstype) {
 			error = -ENODEV;
diff -ruNp linux-3.13.11/security/tomoyo/tomoyo.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/security/tomoyo/tomoyo.c
--- linux-3.13.11/security/tomoyo/tomoyo.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/security/tomoyo/tomoyo.c	2014-07-09
12:00:16.000000000 +0200
@@ -146,7 +146,7 @@ static int tomoyo_bprm_check_security(st
  */
 static int tomoyo_inode_getattr(struct vfsmount *mnt, struct dentry *dentry)
 {
-	struct path path = { mnt, dentry };
+	struct path path = { .mnt = mnt, .dentry = dentry };
 	return tomoyo_path_perm(TOMOYO_TYPE_GETATTR, &path, NULL);
 }
 
@@ -172,7 +172,7 @@ static int tomoyo_path_truncate(struct p
  */
 static int tomoyo_path_unlink(struct path *parent, struct dentry *dentry)
 {
-	struct path path = { parent->mnt, dentry };
+	struct path path = { .mnt = parent->mnt, .dentry = dentry };
 	return tomoyo_path_perm(TOMOYO_TYPE_UNLINK, &path, NULL);
 }
 
@@ -188,7 +188,7 @@ static int tomoyo_path_unlink(struct pat
 static int tomoyo_path_mkdir(struct path *parent, struct dentry *dentry,
 			     umode_t mode)
 {
-	struct path path = { parent->mnt, dentry };
+	struct path path = { .mnt = parent->mnt, .dentry = dentry };
 	return tomoyo_path_number_perm(TOMOYO_TYPE_MKDIR, &path,
 				       mode & S_IALLUGO);
 }
@@ -203,7 +203,7 @@ static int tomoyo_path_mkdir(struct path
  */
 static int tomoyo_path_rmdir(struct path *parent, struct dentry *dentry)
 {
-	struct path path = { parent->mnt, dentry };
+	struct path path = { .mnt = parent->mnt, .dentry = dentry };
 	return tomoyo_path_perm(TOMOYO_TYPE_RMDIR, &path, NULL);
 }
 
@@ -219,7 +219,7 @@ static int tomoyo_path_rmdir(struct path
 static int tomoyo_path_symlink(struct path *parent, struct dentry *dentry,
 			       const char *old_name)
 {
-	struct path path = { parent->mnt, dentry };
+	struct path path = { .mnt = parent->mnt, .dentry = dentry };
 	return tomoyo_path_perm(TOMOYO_TYPE_SYMLINK, &path, old_name);
 }
 
@@ -236,7 +236,7 @@ static int tomoyo_path_symlink(struct pa
 static int tomoyo_path_mknod(struct path *parent, struct dentry *dentry,
 			     umode_t mode, unsigned int dev)
 {
-	struct path path = { parent->mnt, dentry };
+	struct path path = { .mnt = parent->mnt, .dentry = dentry };
 	int type = TOMOYO_TYPE_CREATE;
 	const unsigned int perm = mode & S_IALLUGO;
 
@@ -275,8 +275,8 @@ static int tomoyo_path_mknod(struct path
 static int tomoyo_path_link(struct dentry *old_dentry, struct path *new_dir,
 			    struct dentry *new_dentry)
 {
-	struct path path1 = { new_dir->mnt, old_dentry };
-	struct path path2 = { new_dir->mnt, new_dentry };
+	struct path path1 = { .mnt = new_dir->mnt, .dentry = old_dentry };
+	struct path path2 = { .mnt = new_dir->mnt, .dentry = new_dentry };
 	return tomoyo_path2_perm(TOMOYO_TYPE_LINK, &path1, &path2);
 }
 
@@ -295,8 +295,8 @@ static int tomoyo_path_rename(struct pat
 			      struct path *new_parent,
 			      struct dentry *new_dentry)
 {
-	struct path path1 = { old_parent->mnt, old_dentry };
-	struct path path2 = { new_parent->mnt, new_dentry };
+	struct path path1 = { .mnt = old_parent->mnt, .dentry = old_dentry };
+	struct path path2 = { .mnt = new_parent->mnt, .dentry = new_dentry };
 	return tomoyo_path2_perm(TOMOYO_TYPE_RENAME, &path1, &path2);
 }
 
@@ -424,7 +424,7 @@ static int tomoyo_sb_mount(const char *d
  */
 static int tomoyo_sb_umount(struct vfsmount *mnt, int flags)
 {
-	struct path path = { mnt, mnt->mnt_root };
+	struct path path = { .mnt = mnt, .dentry = mnt->mnt_root };
 	return tomoyo_path_perm(TOMOYO_TYPE_UMOUNT, &path, NULL);
 }
 
@@ -503,7 +503,7 @@ static int tomoyo_socket_sendmsg(struct
  * tomoyo_security_ops is a "struct security_operations" which is used for
  * registering TOMOYO.
  */
-static struct security_operations tomoyo_security_ops = {
+static struct security_operations tomoyo_security_ops __read_only = {
 	.name                = "tomoyo",
 	.cred_alloc_blank    = tomoyo_cred_alloc_blank,
 	.cred_prepare        = tomoyo_cred_prepare,
diff -ruNp linux-3.13.11/security/yama/Kconfig linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/security/yama/Kconfig
--- linux-3.13.11/security/yama/Kconfig	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/security/yama/Kconfig	2014-07-09
12:00:16.000000000 +0200
@@ -1,6 +1,6 @@
 config SECURITY_YAMA
 	bool "Yama support"
-	depends on SECURITY
+	depends on SECURITY && !GRKERNSEC
 	select SECURITYFS
 	select SECURITY_PATH
 	default n
diff -ruNp linux-3.13.11/security/yama/yama_lsm.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/security/yama/yama_lsm.c
--- linux-3.13.11/security/yama/yama_lsm.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/security/yama/yama_lsm.c	2014-07-09
12:00:16.000000000 +0200
@@ -365,7 +365,7 @@ int yama_ptrace_traceme(struct task_stru
 }
 
 #ifndef CONFIG_SECURITY_YAMA_STACKED
-static struct security_operations yama_ops = {
+static struct security_operations yama_ops __read_only = {
 	.name =			"yama",
 
 	.ptrace_access_check =	yama_ptrace_access_check,
@@ -376,28 +376,24 @@ static struct security_operations yama_o
 #endif
 
 #ifdef CONFIG_SYSCTL
+static int zero __read_only;
+static int max_scope __read_only = YAMA_SCOPE_NO_ATTACH;
+
 static int yama_dointvec_minmax(struct ctl_table *table, int write,
 				void __user *buffer, size_t *lenp, loff_t *ppos)
 {
-	int rc;
+	ctl_table_no_const yama_table;
 
 	if (write && !capable(CAP_SYS_PTRACE))
 		return -EPERM;
 
-	rc = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
-	if (rc)
-		return rc;
-
+	yama_table = *table;
 	/* Lock the max value if it ever gets set. */
-	if (write && *(int *)table->data == *(int *)table->extra2)
-		table->extra1 = table->extra2;
-
-	return rc;
+	if (ptrace_scope == max_scope)
+		yama_table.extra1 = &max_scope;
+	return proc_dointvec_minmax(&yama_table, write, buffer, lenp, ppos);
 }
 
-static int zero;
-static int max_scope = YAMA_SCOPE_NO_ATTACH;
-
 struct ctl_path yama_sysctl_path[] = {
 	{ .procname = "kernel", },
 	{ .procname = "yama", },
diff -ruNp linux-3.13.11/sound/aoa/codecs/onyx.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/sound/aoa/codecs/onyx.c
--- linux-3.13.11/sound/aoa/codecs/onyx.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/sound/aoa/codecs/onyx.c	2014-07-09
12:00:16.000000000 +0200
@@ -54,7 +54,7 @@ struct onyx {
 				spdif_locked:1,
 				analog_locked:1,
 				original_mute:2;
-	int			open_count;
+	local_t			open_count;
 	struct codec_info	*codec_info;
 
 	/* mutex serializes concurrent access to the device
@@ -753,7 +753,7 @@ static int onyx_open(struct codec_info_i
 	struct onyx *onyx = cii->codec_data;
 
 	mutex_lock(&onyx->mutex);
-	onyx->open_count++;
+	local_inc(&onyx->open_count);
 	mutex_unlock(&onyx->mutex);
 
 	return 0;
@@ -765,8 +765,7 @@ static int onyx_close(struct codec_info_
 	struct onyx *onyx = cii->codec_data;
 
 	mutex_lock(&onyx->mutex);
-	onyx->open_count--;
-	if (!onyx->open_count)
+	if (local_dec_and_test(&onyx->open_count))
 		onyx->spdif_locked = onyx->analog_locked = 0;
 	mutex_unlock(&onyx->mutex);
 
diff -ruNp linux-3.13.11/sound/aoa/codecs/onyx.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/sound/aoa/codecs/onyx.h
--- linux-3.13.11/sound/aoa/codecs/onyx.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/sound/aoa/codecs/onyx.h	2014-07-09
12:00:16.000000000 +0200
@@ -11,6 +11,7 @@
 #include <linux/i2c.h>
 #include <asm/pmac_low_i2c.h>
 #include <asm/prom.h>
+#include <asm/local.h>
 
 /* PCM3052 register definitions */
 
diff -ruNp linux-3.13.11/sound/core/oss/pcm_oss.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/sound/core/oss/pcm_oss.c
--- linux-3.13.11/sound/core/oss/pcm_oss.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/sound/core/oss/pcm_oss.c	2014-07-09
12:00:16.000000000 +0200
@@ -1189,10 +1189,10 @@ snd_pcm_sframes_t snd_pcm_oss_write3(str
 		if (in_kernel) {
 			mm_segment_t fs;
 			fs = snd_enter_user();
-			ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
+			ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
 			snd_leave_user(fs);
 		} else {
-			ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
+			ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
 		}
 		if (ret != -EPIPE && ret != -ESTRPIPE)
 			break;
@@ -1234,10 +1234,10 @@ snd_pcm_sframes_t snd_pcm_oss_read3(stru
 		if (in_kernel) {
 			mm_segment_t fs;
 			fs = snd_enter_user();
-			ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
+			ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
 			snd_leave_user(fs);
 		} else {
-			ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
+			ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
 		}
 		if (ret == -EPIPE) {
 			if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
@@ -1337,7 +1337,7 @@ static ssize_t snd_pcm_oss_write2(struct
 		struct snd_pcm_plugin_channel *channels;
 		size_t oss_frame_bytes = (runtime->oss.plugin_first->src_width * runtime->oss.plugin_first->src_format.channels)
/ 8;
 		if (!in_kernel) {
-			if (copy_from_user(runtime->oss.buffer, (const char __force __user *)buf, bytes))
+			if (copy_from_user(runtime->oss.buffer, (const char __force_user *)buf, bytes))
 				return -EFAULT;
 			buf = runtime->oss.buffer;
 		}
@@ -1407,7 +1407,7 @@ static ssize_t snd_pcm_oss_write1(struct
 			}
 		} else {
 			tmp = snd_pcm_oss_write2(substream,
-						 (const char __force *)buf,
+						 (const char __force_kernel *)buf,
 						 runtime->oss.period_bytes, 0);
 			if (tmp <= 0)
 				goto err;
@@ -1433,7 +1433,7 @@ static ssize_t snd_pcm_oss_read2(struct
 	struct snd_pcm_runtime *runtime = substream->runtime;
 	snd_pcm_sframes_t frames, frames1;
 #ifdef CONFIG_SND_PCM_OSS_PLUGINS
-	char __user *final_dst = (char __force __user *)buf;
+	char __user *final_dst = (char __force_user *)buf;
 	if (runtime->oss.plugin_first) {
 		struct snd_pcm_plugin_channel *channels;
 		size_t oss_frame_bytes = (runtime->oss.plugin_last->dst_width * runtime->oss.plugin_last->dst_format.channels)
/ 8;
@@ -1495,7 +1495,7 @@ static ssize_t snd_pcm_oss_read1(struct
 			xfer += tmp;
 			runtime->oss.buffer_used -= tmp;
 		} else {
-			tmp = snd_pcm_oss_read2(substream, (char __force *)buf,
+			tmp = snd_pcm_oss_read2(substream, (char __force_kernel *)buf,
 						runtime->oss.period_bytes, 0);
 			if (tmp <= 0)
 				goto err;
@@ -1663,7 +1663,7 @@ static int snd_pcm_oss_sync(struct snd_p
 								   size1);
 					size1 /= runtime->channels; /* frames */
 					fs = snd_enter_user();
-					snd_pcm_lib_write(substream, (void __force __user *)runtime->oss.buffer, size1);
+					snd_pcm_lib_write(substream, (void __force_user *)runtime->oss.buffer, size1);
 					snd_leave_user(fs);
 				}
 			} else if (runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) {
diff -ruNp linux-3.13.11/sound/core/pcm_compat.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/sound/core/pcm_compat.c
--- linux-3.13.11/sound/core/pcm_compat.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/sound/core/pcm_compat.c	2014-07-09
12:00:16.000000000 +0200
@@ -31,7 +31,7 @@ static int snd_pcm_ioctl_delay_compat(st
 	int err;
 
 	fs = snd_enter_user();
-	err = snd_pcm_delay(substream, &delay);
+	err = snd_pcm_delay(substream, (snd_pcm_sframes_t __force_user *)&delay);
 	snd_leave_user(fs);
 	if (err < 0)
 		return err;
diff -ruNp linux-3.13.11/sound/core/pcm_native.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/sound/core/pcm_native.c
--- linux-3.13.11/sound/core/pcm_native.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/sound/core/pcm_native.c	2014-07-09
12:00:16.000000000 +0200
@@ -2811,11 +2811,11 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_
 	switch (substream->stream) {
 	case SNDRV_PCM_STREAM_PLAYBACK:
 		result = snd_pcm_playback_ioctl1(NULL, substream, cmd,
-						 (void __user *)arg);
+						 (void __force_user *)arg);
 		break;
 	case SNDRV_PCM_STREAM_CAPTURE:
 		result = snd_pcm_capture_ioctl1(NULL, substream, cmd,
-						(void __user *)arg);
+						(void __force_user *)arg);
 		break;
 	default:
 		result = -EINVAL;
diff -ruNp linux-3.13.11/sound/core/seq/oss/seq_oss.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/sound/core/seq/oss/seq_oss.c
--- linux-3.13.11/sound/core/seq/oss/seq_oss.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/sound/core/seq/oss/seq_oss.c	2014-07-09
12:00:16.000000000 +0200
@@ -75,8 +75,8 @@ static int __init alsa_seq_oss_init(void
 {
 	int rc;
 	static struct snd_seq_dev_ops ops = {
-		snd_seq_oss_synth_register,
-		snd_seq_oss_synth_unregister,
+		.init_device = snd_seq_oss_synth_register,
+		.free_device = snd_seq_oss_synth_unregister,
 	};
 
 	snd_seq_autoload_lock();
diff -ruNp linux-3.13.11/sound/core/seq/seq_device.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/sound/core/seq/seq_device.c
--- linux-3.13.11/sound/core/seq/seq_device.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/sound/core/seq/seq_device.c	2014-07-09
12:00:16.000000000 +0200
@@ -64,7 +64,7 @@ struct ops_list {
 	int argsize;		/* argument size */
 
 	/* operators */
-	struct snd_seq_dev_ops ops;
+	struct snd_seq_dev_ops *ops;
 
 	/* registered devices */
 	struct list_head dev_list;	/* list of devices */
@@ -333,7 +333,7 @@ int snd_seq_device_register_driver(char
 
 	mutex_lock(&ops->reg_mutex);
 	/* copy driver operators */
-	ops->ops = *entry;
+	ops->ops = entry;
 	ops->driver |= DRIVER_LOADED;
 	ops->argsize = argsize;
 
@@ -463,7 +463,7 @@ static int init_device(struct snd_seq_de
 			   dev->name, ops->id, ops->argsize, dev->argsize);
 		return -EINVAL;
 	}
-	if (ops->ops.init_device(dev) >= 0) {
+	if (ops->ops->init_device(dev) >= 0) {
 		dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
 		ops->num_init_devices++;
 	} else {
@@ -490,7 +490,7 @@ static int free_device(struct snd_seq_de
 			   dev->name, ops->id, ops->argsize, dev->argsize);
 		return -EINVAL;
 	}
-	if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
+	if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
 		dev->status = SNDRV_SEQ_DEVICE_FREE;
 		dev->driver_data = NULL;
 		ops->num_init_devices--;
diff -ruNp linux-3.13.11/sound/core/seq/seq_midi.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/sound/core/seq/seq_midi.c
--- linux-3.13.11/sound/core/seq/seq_midi.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/sound/core/seq/seq_midi.c	2014-07-09
12:00:16.000000000 +0200
@@ -462,8 +462,8 @@ snd_seq_midisynth_unregister_port(struct
 static int __init alsa_seq_midi_init(void)
 {
 	static struct snd_seq_dev_ops ops = {
-		snd_seq_midisynth_register_port,
-		snd_seq_midisynth_unregister_port,
+		.init_device = snd_seq_midisynth_register_port,
+		.free_device = snd_seq_midisynth_unregister_port,
 	};
 	memset(&synths, 0, sizeof(synths));
 	snd_seq_autoload_lock();
diff -ruNp linux-3.13.11/sound/core/sound.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/sound/core/sound.c
--- linux-3.13.11/sound/core/sound.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/sound/core/sound.c	2014-07-09 12:00:16.000000000
+0200
@@ -86,7 +86,7 @@ static void snd_request_other(int minor)
 	case SNDRV_MINOR_TIMER:		str = "snd-timer";	break;
 	default:			return;
 	}
-	request_module(str);
+	request_module("%s", str);
 }
 
 #endif	/* modular kernel */
diff -ruNp linux-3.13.11/sound/drivers/mts64.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/sound/drivers/mts64.c
--- linux-3.13.11/sound/drivers/mts64.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/sound/drivers/mts64.c	2014-07-09
12:00:16.000000000 +0200
@@ -29,6 +29,7 @@
 #include <sound/initval.h>
 #include <sound/rawmidi.h>
 #include <sound/control.h>
+#include <asm/local.h>
 
 #define CARD_NAME "Miditerminal 4140"
 #define DRIVER_NAME "MTS64"
@@ -67,7 +68,7 @@ struct mts64 {
 	struct pardevice *pardev;
 	int pardev_claimed;
 
-	int open_count;
+	local_t open_count;
 	int current_midi_output_port;
 	int current_midi_input_port;
 	u8 mode[MTS64_NUM_INPUT_PORTS];
@@ -697,7 +698,7 @@ static int snd_mts64_rawmidi_open(struct
 {
 	struct mts64 *mts = substream->rmidi->private_data;
 
-	if (mts->open_count == 0) {
+	if (local_read(&mts->open_count) == 0) {
 		/* We don't need a spinlock here, because this is just called 
 		   if the device has not been opened before. 
 		   So there aren't any IRQs from the device */
@@ -705,7 +706,7 @@ static int snd_mts64_rawmidi_open(struct
 
 		msleep(50);
 	}
-	++(mts->open_count);
+	local_inc(&mts->open_count);
 
 	return 0;
 }
@@ -715,8 +716,7 @@ static int snd_mts64_rawmidi_close(struc
 	struct mts64 *mts = substream->rmidi->private_data;
 	unsigned long flags;
 
-	--(mts->open_count);
-	if (mts->open_count == 0) {
+	if (local_dec_return(&mts->open_count) == 0) {
 		/* We need the spinlock_irqsave here because we can still
 		   have IRQs at this point */
 		spin_lock_irqsave(&mts->lock, flags);
@@ -725,8 +725,8 @@ static int snd_mts64_rawmidi_close(struc
 
 		msleep(500);
 
-	} else if (mts->open_count < 0)
-		mts->open_count = 0;
+	} else if (local_read(&mts->open_count) < 0)
+		local_set(&mts->open_count, 0);
 
 	return 0;
 }
diff -ruNp linux-3.13.11/sound/drivers/opl3/opl3_seq.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/sound/drivers/opl3/opl3_seq.c
--- linux-3.13.11/sound/drivers/opl3/opl3_seq.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/sound/drivers/opl3/opl3_seq.c	2014-07-09
12:00:16.000000000 +0200
@@ -281,8 +281,8 @@ static int __init alsa_opl3_seq_init(voi
 {
 	static struct snd_seq_dev_ops ops =
 	{
-		snd_opl3_seq_new_device,
-		snd_opl3_seq_delete_device
+		.init_device = snd_opl3_seq_new_device,
+		.free_device = snd_opl3_seq_delete_device
 	};
 
 	return snd_seq_device_register_driver(SNDRV_SEQ_DEV_ID_OPL3, &ops,
diff -ruNp linux-3.13.11/sound/drivers/opl4/opl4_lib.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/sound/drivers/opl4/opl4_lib.c
--- linux-3.13.11/sound/drivers/opl4/opl4_lib.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/sound/drivers/opl4/opl4_lib.c	2014-07-09
12:00:16.000000000 +0200
@@ -29,7 +29,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@
 MODULE_DESCRIPTION("OPL4 driver");
 MODULE_LICENSE("GPL");
 
-static void inline snd_opl4_wait(struct snd_opl4 *opl4)
+static inline void snd_opl4_wait(struct snd_opl4 *opl4)
 {
 	int timeout = 10;
 	while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
diff -ruNp linux-3.13.11/sound/drivers/opl4/opl4_seq.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/sound/drivers/opl4/opl4_seq.c
--- linux-3.13.11/sound/drivers/opl4/opl4_seq.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/sound/drivers/opl4/opl4_seq.c	2014-07-09
12:00:16.000000000 +0200
@@ -198,8 +198,8 @@ static int snd_opl4_seq_delete_device(st
 static int __init alsa_opl4_synth_init(void)
 {
 	static struct snd_seq_dev_ops ops = {
-		snd_opl4_seq_new_device,
-		snd_opl4_seq_delete_device
+		.init_device = snd_opl4_seq_new_device,
+		.free_device = snd_opl4_seq_delete_device
 	};
 
 	return snd_seq_device_register_driver(SNDRV_SEQ_DEV_ID_OPL4, &ops,
diff -ruNp linux-3.13.11/sound/drivers/portman2x4.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/sound/drivers/portman2x4.c
--- linux-3.13.11/sound/drivers/portman2x4.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/sound/drivers/portman2x4.c	2014-07-09
12:00:16.000000000 +0200
@@ -48,6 +48,7 @@
 #include <sound/initval.h>
 #include <sound/rawmidi.h>
 #include <sound/control.h>
+#include <asm/local.h>
 
 #define CARD_NAME "Portman 2x4"
 #define DRIVER_NAME "portman"
@@ -85,7 +86,7 @@ struct portman {
 	struct pardevice *pardev;
 	int pardev_claimed;
 
-	int open_count;
+	local_t open_count;
 	int mode[PORTMAN_NUM_INPUT_PORTS];
 	struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
 };
diff -ruNp linux-3.13.11/sound/firewire/amdtp.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/sound/firewire/amdtp.c
--- linux-3.13.11/sound/firewire/amdtp.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/sound/firewire/amdtp.c	2014-07-09
12:00:16.000000000 +0200
@@ -488,7 +488,7 @@ static void queue_out_packet(struct amdt
 		ptr = s->pcm_buffer_pointer + data_blocks;
 		if (ptr >= pcm->runtime->buffer_size)
 			ptr -= pcm->runtime->buffer_size;
-		ACCESS_ONCE(s->pcm_buffer_pointer) = ptr;
+		ACCESS_ONCE_RW(s->pcm_buffer_pointer) = ptr;
 
 		s->pcm_period_pointer += data_blocks;
 		if (s->pcm_period_pointer >= pcm->runtime->period_size) {
@@ -655,7 +655,7 @@ EXPORT_SYMBOL(amdtp_out_stream_pcm_point
  */
 void amdtp_out_stream_update(struct amdtp_out_stream *s)
 {
-	ACCESS_ONCE(s->source_node_id_field) =
+	ACCESS_ONCE_RW(s->source_node_id_field) =
 		(fw_parent_device(s->unit)->card->node_id & 0x3f) << 24;
 }
 EXPORT_SYMBOL(amdtp_out_stream_update);
diff -ruNp linux-3.13.11/sound/firewire/amdtp.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/sound/firewire/amdtp.h
--- linux-3.13.11/sound/firewire/amdtp.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/sound/firewire/amdtp.h	2014-07-09
12:00:16.000000000 +0200
@@ -135,7 +135,7 @@ static inline bool amdtp_out_streaming_e
 static inline void amdtp_out_stream_pcm_trigger(struct amdtp_out_stream *s,
 						struct snd_pcm_substream *pcm)
 {
-	ACCESS_ONCE(s->pcm) = pcm;
+	ACCESS_ONCE_RW(s->pcm) = pcm;
 }
 
 static inline bool cip_sfc_is_base_44100(enum cip_sfc sfc)
diff -ruNp linux-3.13.11/sound/firewire/isight.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/sound/firewire/isight.c
--- linux-3.13.11/sound/firewire/isight.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/sound/firewire/isight.c	2014-07-09
12:00:16.000000000 +0200
@@ -96,7 +96,7 @@ static void isight_update_pointers(struc
 	ptr += count;
 	if (ptr >= runtime->buffer_size)
 		ptr -= runtime->buffer_size;
-	ACCESS_ONCE(isight->buffer_pointer) = ptr;
+	ACCESS_ONCE_RW(isight->buffer_pointer) = ptr;
 
 	isight->period_counter += count;
 	if (isight->period_counter >= runtime->period_size) {
@@ -299,7 +299,7 @@ static int isight_hw_params(struct snd_p
 	if (err < 0)
 		return err;
 
-	ACCESS_ONCE(isight->pcm_active) = true;
+	ACCESS_ONCE_RW(isight->pcm_active) = true;
 
 	return 0;
 }
@@ -337,7 +337,7 @@ static int isight_hw_free(struct snd_pcm
 {
 	struct isight *isight = substream->private_data;
 
-	ACCESS_ONCE(isight->pcm_active) = false;
+	ACCESS_ONCE_RW(isight->pcm_active) = false;
 
 	mutex_lock(&isight->mutex);
 	isight_stop_streaming(isight);
@@ -430,10 +430,10 @@ static int isight_trigger(struct snd_pcm
 
 	switch (cmd) {
 	case SNDRV_PCM_TRIGGER_START:
-		ACCESS_ONCE(isight->pcm_running) = true;
+		ACCESS_ONCE_RW(isight->pcm_running) = true;
 		break;
 	case SNDRV_PCM_TRIGGER_STOP:
-		ACCESS_ONCE(isight->pcm_running) = false;
+		ACCESS_ONCE_RW(isight->pcm_running) = false;
 		break;
 	default:
 		return -EINVAL;
diff -ruNp linux-3.13.11/sound/firewire/scs1x.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/sound/firewire/scs1x.c
--- linux-3.13.11/sound/firewire/scs1x.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/sound/firewire/scs1x.c	2014-07-09
12:00:16.000000000 +0200
@@ -74,7 +74,7 @@ static void scs_output_trigger(struct sn
 {
 	struct scs *scs = stream->rmidi->private_data;
 
-	ACCESS_ONCE(scs->output) = up ? stream : NULL;
+	ACCESS_ONCE_RW(scs->output) = up ? stream : NULL;
 	if (up) {
 		scs->output_idle = false;
 		tasklet_schedule(&scs->tasklet);
@@ -257,7 +257,7 @@ static void scs_input_trigger(struct snd
 {
 	struct scs *scs = stream->rmidi->private_data;
 
-	ACCESS_ONCE(scs->input) = up ? stream : NULL;
+	ACCESS_ONCE_RW(scs->input) = up ? stream : NULL;
 }
 
 static void scs_input_escaped_byte(struct snd_rawmidi_substream *stream,
@@ -473,8 +473,8 @@ static void scs_remove(struct fw_unit *u
 
 	snd_card_disconnect(scs->card);
 
-	ACCESS_ONCE(scs->output) = NULL;
-	ACCESS_ONCE(scs->input) = NULL;
+	ACCESS_ONCE_RW(scs->output) = NULL;
+	ACCESS_ONCE_RW(scs->input) = NULL;
 
 	wait_event(scs->idle_wait, scs->output_idle);
 
diff -ruNp linux-3.13.11/sound/isa/sb/emu8000_synth.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/sound/isa/sb/emu8000_synth.c
--- linux-3.13.11/sound/isa/sb/emu8000_synth.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/sound/isa/sb/emu8000_synth.c	2014-07-09
12:00:16.000000000 +0200
@@ -120,8 +120,8 @@ static int __init alsa_emu8000_init(void
 {
 	
 	static struct snd_seq_dev_ops ops = {
-		snd_emu8000_new_device,
-		snd_emu8000_delete_device,
+		.init_device = snd_emu8000_new_device,
+		.free_device = snd_emu8000_delete_device,
 	};
 	return snd_seq_device_register_driver(SNDRV_SEQ_DEV_ID_EMU8000, &ops,
 					      sizeof(struct snd_emu8000*));
diff -ruNp linux-3.13.11/sound/oss/sb_audio.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/sound/oss/sb_audio.c
--- linux-3.13.11/sound/oss/sb_audio.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/sound/oss/sb_audio.c	2014-07-09 12:00:16.000000000
+0200
@@ -904,7 +904,7 @@ sb16_copy_from_user(int dev,
 		buf16 = (signed short *)(localbuf + localoffs);
 		while (c)
 		{
-			locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
+			locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
 			if (copy_from_user(lbuf8,
 					   userbuf+useroffs + p,
 					   locallen))
diff -ruNp linux-3.13.11/sound/oss/swarm_cs4297a.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/sound/oss/swarm_cs4297a.c
--- linux-3.13.11/sound/oss/swarm_cs4297a.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/sound/oss/swarm_cs4297a.c	2014-07-09
12:00:16.000000000 +0200
@@ -2621,7 +2621,6 @@ static int __init cs4297a_init(void)
 {
 	struct cs4297a_state *s;
 	u32 pwr, id;
-	mm_segment_t fs;
 	int rval;
 #ifndef CONFIG_BCM_CS4297A_CSWARM
 	u64 cfg;
@@ -2711,22 +2710,23 @@ static int __init cs4297a_init(void)
         if (!rval) {
 		char *sb1250_duart_present;
 
+#if 0
+                mm_segment_t fs;
                 fs = get_fs();
                 set_fs(KERNEL_DS);
-#if 0
                 val = SOUND_MASK_LINE;
                 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
                 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
                         val = initvol[i].vol;
                         mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
                 }
+                set_fs(fs);
 //                cs4297a_write_ac97(s, 0x18, 0x0808);
 #else
                 //                cs4297a_write_ac97(s, 0x5e, 0x180);
                 cs4297a_write_ac97(s, 0x02, 0x0808);
                 cs4297a_write_ac97(s, 0x18, 0x0808);
 #endif
-                set_fs(fs);
 
                 list_add(&s->list, &cs4297a_devs);
 
diff -ruNp linux-3.13.11/sound/pci/emu10k1/emu10k1_synth.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/sound/pci/emu10k1/emu10k1_synth.c
--- linux-3.13.11/sound/pci/emu10k1/emu10k1_synth.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/sound/pci/emu10k1/emu10k1_synth.c	2014-07-09
12:00:16.000000000 +0200
@@ -108,8 +108,8 @@ static int __init alsa_emu10k1_synth_ini
 {
 	
 	static struct snd_seq_dev_ops ops = {
-		snd_emu10k1_synth_new_device,
-		snd_emu10k1_synth_delete_device,
+		.init_device = snd_emu10k1_synth_new_device,
+		.free_device = snd_emu10k1_synth_delete_device,
 	};
 	return snd_seq_device_register_driver(SNDRV_SEQ_DEV_ID_EMU10K1_SYNTH, &ops,
 					      sizeof(struct snd_emu10k1_synth_arg));
diff -ruNp linux-3.13.11/sound/pci/hda/hda_codec.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/sound/pci/hda/hda_codec.c
--- linux-3.13.11/sound/pci/hda/hda_codec.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/sound/pci/hda/hda_codec.c	2014-07-09
12:00:16.000000000 +0200
@@ -976,14 +976,10 @@ find_codec_preset(struct hda_codec *code
 	mutex_unlock(&preset_mutex);
 
 	if (mod_requested < HDA_MODREQ_MAX_COUNT) {
-		char name[32];
 		if (!mod_requested)
-			snprintf(name, sizeof(name), "snd-hda-codec-id:%08x",
-				 codec->vendor_id);
+			request_module("snd-hda-codec-id:%08x", codec->vendor_id);
 		else
-			snprintf(name, sizeof(name), "snd-hda-codec-id:%04x*",
-				 (codec->vendor_id >> 16) & 0xffff);
-		request_module(name);
+			request_module("snd-hda-codec-id:%04x*", (codec->vendor_id >> 16) & 0xffff);
 		mod_requested++;
 		goto again;
 	}
@@ -2668,7 +2664,7 @@ static int get_kctl_0dB_offset(struct sn
 		/* FIXME: set_fs() hack for obtaining user-space TLV data */
 		mm_segment_t fs = get_fs();
 		set_fs(get_ds());
-		if (!kctl->tlv.c(kctl, 0, sizeof(_tlv), _tlv))
+		if (!kctl->tlv.c(kctl, 0, sizeof(_tlv), (unsigned int __force_user *)_tlv))
 			tlv = _tlv;
 		set_fs(fs);
 	} else if (kctl->vd[0].access & SNDRV_CTL_ELEM_ACCESS_TLV_READ)
diff -ruNp linux-3.13.11/sound/pci/ymfpci/ymfpci.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/sound/pci/ymfpci/ymfpci.h
--- linux-3.13.11/sound/pci/ymfpci/ymfpci.h	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/sound/pci/ymfpci/ymfpci.h	2014-07-09
12:00:16.000000000 +0200
@@ -358,7 +358,7 @@ struct snd_ymfpci {
 	spinlock_t reg_lock;
 	spinlock_t voice_lock;
 	wait_queue_head_t interrupt_sleep;
-	atomic_t interrupt_sleep_count;
+	atomic_unchecked_t interrupt_sleep_count;
 	struct snd_info_entry *proc_entry;
 	const struct firmware *dsp_microcode;
 	const struct firmware *controller_microcode;
diff -ruNp linux-3.13.11/sound/pci/ymfpci/ymfpci_main.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/sound/pci/ymfpci/ymfpci_main.c
--- linux-3.13.11/sound/pci/ymfpci/ymfpci_main.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/sound/pci/ymfpci/ymfpci_main.c	2014-07-09
12:00:16.000000000 +0200
@@ -202,8 +202,8 @@ static void snd_ymfpci_hw_stop(struct sn
 		if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
 			break;
 	}
-	if (atomic_read(&chip->interrupt_sleep_count)) {
-		atomic_set(&chip->interrupt_sleep_count, 0);
+	if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
+		atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
 		wake_up(&chip->interrupt_sleep);
 	}
       __end:
@@ -787,7 +787,7 @@ static void snd_ymfpci_irq_wait(struct s
 		 	continue;
 		init_waitqueue_entry(&wait, current);
 		add_wait_queue(&chip->interrupt_sleep, &wait);
-		atomic_inc(&chip->interrupt_sleep_count);
+		atomic_inc_unchecked(&chip->interrupt_sleep_count);
 		schedule_timeout_uninterruptible(msecs_to_jiffies(50));
 		remove_wait_queue(&chip->interrupt_sleep, &wait);
 	}
@@ -825,8 +825,8 @@ static irqreturn_t snd_ymfpci_interrupt(
 		snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
 		spin_unlock(&chip->reg_lock);
 
-		if (atomic_read(&chip->interrupt_sleep_count)) {
-			atomic_set(&chip->interrupt_sleep_count, 0);
+		if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
+			atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
 			wake_up(&chip->interrupt_sleep);
 		}
 	}
@@ -2421,7 +2421,7 @@ int snd_ymfpci_create(struct snd_card *c
 	spin_lock_init(&chip->reg_lock);
 	spin_lock_init(&chip->voice_lock);
 	init_waitqueue_head(&chip->interrupt_sleep);
-	atomic_set(&chip->interrupt_sleep_count, 0);
+	atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
 	chip->card = card;
 	chip->pci = pci;
 	chip->irq = -1;
diff -ruNp linux-3.13.11/sound/soc/fsl/fsl_ssi.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/sound/soc/fsl/fsl_ssi.c
--- linux-3.13.11/sound/soc/fsl/fsl_ssi.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/sound/soc/fsl/fsl_ssi.c	2014-07-09
12:00:16.000000000 +0200
@@ -857,7 +857,7 @@ static int fsl_ssi_probe(struct platform
 {
 	struct fsl_ssi_private *ssi_private;
 	int ret = 0;
-	struct device_attribute *dev_attr = NULL;
+	device_attribute_no_const *dev_attr = NULL;
 	struct device_node *np = pdev->dev.of_node;
 	const char *p, *sprop;
 	const uint32_t *iprop;
diff -ruNp linux-3.13.11/sound/soc/soc-core.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/sound/soc/soc-core.c
--- linux-3.13.11/sound/soc/soc-core.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/sound/soc/soc-core.c	2014-07-09 12:00:16.000000000
+0200
@@ -2253,8 +2253,10 @@ int snd_soc_set_ac97_ops_of_reset(struct
 	if (ret)
 		return ret;
 
-	ops->warm_reset = snd_soc_ac97_warm_reset;
-	ops->reset = snd_soc_ac97_reset;
+	pax_open_kernel();
+	*(void **)&ops->warm_reset = snd_soc_ac97_warm_reset;
+	*(void **)&ops->reset = snd_soc_ac97_reset;
+	pax_close_kernel();
 
 	snd_ac97_rst_cfg = cfg;
 	return 0;
diff -ruNp linux-3.13.11/sound/synth/emux/emux_seq.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/sound/synth/emux/emux_seq.c
--- linux-3.13.11/sound/synth/emux/emux_seq.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/sound/synth/emux/emux_seq.c	2014-07-09
12:00:16.000000000 +0200
@@ -33,13 +33,13 @@ static int snd_emux_unuse(void *private_
  * MIDI emulation operators
  */
 static struct snd_midi_op emux_ops = {
-	snd_emux_note_on,
-	snd_emux_note_off,
-	snd_emux_key_press,
-	snd_emux_terminate_note,
-	snd_emux_control,
-	snd_emux_nrpn,
-	snd_emux_sysex,
+	.note_on = snd_emux_note_on,
+	.note_off = snd_emux_note_off,
+	.key_press = snd_emux_key_press,
+	.note_terminate = snd_emux_terminate_note,
+	.control = snd_emux_control,
+	.nrpn = snd_emux_nrpn,
+	.sysex = snd_emux_sysex,
 };
 
 
diff -ruNp linux-3.13.11/tools/gcc/.gitignore linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/tools/gcc/.gitignore
--- linux-3.13.11/tools/gcc/.gitignore	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/tools/gcc/.gitignore	2014-07-09 12:00:16.000000000
+0200
@@ -0,0 +1,2 @@
+randomize_layout_seed.h
+size_overflow_hash.h
diff -ruNp linux-3.13.11/tools/gcc/Makefile linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/tools/gcc/Makefile
--- linux-3.13.11/tools/gcc/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/tools/gcc/Makefile	2014-07-09 12:00:16.000000000
+0200
@@ -0,0 +1,60 @@
+#CC := gcc
+#PLUGIN_SOURCE_FILES := pax_plugin.c
+#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
+GCCPLUGINS_DIR := $(shell $(CC) -print-file-name=plugin)
+#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W -std=gnu99
+
+ifeq ($(PLUGINCC),$(HOSTCC))
+HOSTLIBS := hostlibs
+HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include -std=gnu99 -ggdb
+else
+HOSTLIBS := hostcxxlibs
+HOST_EXTRACXXFLAGS += -I$(GCCPLUGINS_DIR)/include -std=gnu++98 -fno-rtti -ggdb -Wno-unused-parameter
-Wno-narrowing
+endif
+
+$(HOSTLIBS)-$(CONFIG_PAX_CONSTIFY_PLUGIN) := constify_plugin.so
+$(HOSTLIBS)-$(CONFIG_PAX_MEMORY_STACKLEAK) += stackleak_plugin.so
+$(HOSTLIBS)-$(CONFIG_KALLOCSTAT_PLUGIN) += kallocstat_plugin.so
+$(HOSTLIBS)-$(CONFIG_PAX_KERNEXEC_PLUGIN) += kernexec_plugin.so
+$(HOSTLIBS)-$(CONFIG_CHECKER_PLUGIN) += checker_plugin.so
+$(HOSTLIBS)-y += colorize_plugin.so
+$(HOSTLIBS)-$(CONFIG_PAX_SIZE_OVERFLOW) += size_overflow_plugin.so
+$(HOSTLIBS)-$(CONFIG_PAX_LATENT_ENTROPY) += latent_entropy_plugin.so
+$(HOSTLIBS)-$(CONFIG_PAX_MEMORY_STRUCTLEAK) += structleak_plugin.so
+$(HOSTLIBS)-$(CONFIG_GRKERNSEC_RANDSTRUCT) += randomize_layout_plugin.so
+
+always := $($(HOSTLIBS)-y)
+
+constify_plugin-objs := constify_plugin.o
+stackleak_plugin-objs := stackleak_plugin.o
+kallocstat_plugin-objs := kallocstat_plugin.o
+kernexec_plugin-objs := kernexec_plugin.o
+checker_plugin-objs := checker_plugin.o
+colorize_plugin-objs := colorize_plugin.o
+size_overflow_plugin-objs := size_overflow_plugin.o
+latent_entropy_plugin-objs := latent_entropy_plugin.o
+structleak_plugin-objs := structleak_plugin.o
+randomize_layout_plugin-objs := randomize_layout_plugin.o
+
+$(obj)/size_overflow_plugin.o: $(objtree)/$(obj)/size_overflow_hash.h $(objtree)/$(obj)/size_overflow_hash_aux.h
+$(obj)/randomize_layout_plugin.o: $(objtree)/$(obj)/randomize_layout_seed.h
+
+quiet_cmd_build_size_overflow_hash = GENHASH  $@
+      cmd_build_size_overflow_hash = \
+	$(CONFIG_SHELL) $(srctree)/$(src)/generate_size_overflow_hash.sh -s size_overflow_hash
-d $< -o $@
+$(objtree)/$(obj)/size_overflow_hash.h: $(src)/size_overflow_hash.data FORCE
+	$(call if_changed,build_size_overflow_hash)
+
+quiet_cmd_build_size_overflow_hash_aux = GENHASH  $@
+      cmd_build_size_overflow_hash_aux = \
+	$(CONFIG_SHELL) $(srctree)/$(src)/generate_size_overflow_hash.sh -s size_overflow_hash_aux
-d $< -o $@
+$(objtree)/$(obj)/size_overflow_hash_aux.h: $(src)/size_overflow_hash_aux.data FORCE
+	$(call if_changed,build_size_overflow_hash_aux)
+
+quiet_cmd_create_randomize_layout_seed = GENSEED  $@
+      cmd_create_randomize_layout_seed = \
+	$(CONFIG_SHELL) $(srctree)/$(src)/gen-random-seed.sh $@ $(objtree)/include/generated/randomize_layout_hash.h
+$(objtree)/$(obj)/randomize_layout_seed.h: FORCE
+	$(call if_changed,create_randomize_layout_seed)
+
+targets += size_overflow_hash.h size_overflow_hash_aux.h randomize_layout_seed.h randomize_layout_hash.h
diff -ruNp linux-3.13.11/tools/gcc/checker_plugin.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/tools/gcc/checker_plugin.c
--- linux-3.13.11/tools/gcc/checker_plugin.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/tools/gcc/checker_plugin.c	2014-07-09
12:00:16.000000000 +0200
@@ -0,0 +1,150 @@
+/*
+ * Copyright 2011-2014 by the PaX Team <pageexec@freemail.hu>
+ * Licensed under the GPL v2
+ *
+ * Note: the choice of the license means that the compilation process is
+ *       NOT 'eligible' as defined by gcc's library exception to the GPL v3,
+ *       but for the kernel it doesn't matter since it doesn't link against
+ *       any of the gcc libraries
+ *
+ * gcc plugin to implement various sparse (source code checker) features
+ *
+ * TODO:
+ * - define separate __iomem, __percpu and __rcu address spaces (lots of code to patch)
+ *
+ * BUGS:
+ * - none known
+ */
+
+#include "gcc-common.h"
+
+extern void c_register_addr_space (const char *str, addr_space_t as);
+extern enum machine_mode default_addr_space_pointer_mode (addr_space_t);
+extern enum machine_mode default_addr_space_address_mode (addr_space_t);
+extern bool default_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t
as);
+extern bool default_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem,
bool strict, addr_space_t as);
+extern rtx default_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode
mode, addr_space_t as);
+
+int plugin_is_GPL_compatible;
+
+static struct plugin_info checker_plugin_info = {
+	.version	= "201304082245",
+	.help		= NULL,
+};
+
+#define ADDR_SPACE_KERNEL		0
+#define ADDR_SPACE_FORCE_KERNEL		1
+#define ADDR_SPACE_USER			2
+#define ADDR_SPACE_FORCE_USER		3
+#define ADDR_SPACE_IOMEM		0
+#define ADDR_SPACE_FORCE_IOMEM		0
+#define ADDR_SPACE_PERCPU		0
+#define ADDR_SPACE_FORCE_PERCPU		0
+#define ADDR_SPACE_RCU			0
+#define ADDR_SPACE_FORCE_RCU		0
+
+static enum machine_mode checker_addr_space_pointer_mode(addr_space_t addrspace)
+{
+	return default_addr_space_pointer_mode(ADDR_SPACE_GENERIC);
+}
+
+static enum machine_mode checker_addr_space_address_mode(addr_space_t addrspace)
+{
+	return default_addr_space_address_mode(ADDR_SPACE_GENERIC);
+}
+
+static bool checker_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t
as)
+{
+	return default_addr_space_valid_pointer_mode(mode, as);
+}
+
+static bool checker_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem,
bool strict, addr_space_t as)
+{
+	return default_addr_space_legitimate_address_p(mode, mem, strict, ADDR_SPACE_GENERIC);
+}
+
+static rtx checker_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode
mode, addr_space_t as)
+{
+	return default_addr_space_legitimize_address(x, oldx, mode, as);
+}
+
+static bool checker_addr_space_subset_p(addr_space_t subset, addr_space_t superset)
+{
+	if (subset == ADDR_SPACE_FORCE_KERNEL && superset == ADDR_SPACE_KERNEL)
+		return true;
+
+	if (subset == ADDR_SPACE_FORCE_USER && superset == ADDR_SPACE_USER)
+		return true;
+
+	if (subset == ADDR_SPACE_FORCE_IOMEM && superset == ADDR_SPACE_IOMEM)
+		return true;
+
+	if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_USER)
+		return true;
+
+	if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_IOMEM)
+		return true;
+
+	if (subset == ADDR_SPACE_USER && superset == ADDR_SPACE_FORCE_KERNEL)
+		return true;
+
+	if (subset == ADDR_SPACE_IOMEM && superset == ADDR_SPACE_FORCE_KERNEL)
+		return true;
+
+	return subset == superset;
+}
+
+static rtx checker_addr_space_convert(rtx op, tree from_type, tree to_type)
+{
+//	addr_space_t from_as = TYPE_ADDR_SPACE(TREE_TYPE(from_type));
+//	addr_space_t to_as = TYPE_ADDR_SPACE(TREE_TYPE(to_type));
+
+	return op;
+}
+
+static void register_checker_address_spaces(void *event_data, void *data)
+{
+	c_register_addr_space("__kernel", ADDR_SPACE_KERNEL);
+	c_register_addr_space("__force_kernel", ADDR_SPACE_FORCE_KERNEL);
+	c_register_addr_space("__user", ADDR_SPACE_USER);
+	c_register_addr_space("__force_user", ADDR_SPACE_FORCE_USER);
+//	c_register_addr_space("__iomem", ADDR_SPACE_IOMEM);
+//	c_register_addr_space("__force_iomem", ADDR_SPACE_FORCE_IOMEM);
+//	c_register_addr_space("__percpu", ADDR_SPACE_PERCPU);
+//	c_register_addr_space("__force_percpu", ADDR_SPACE_FORCE_PERCPU);
+//	c_register_addr_space("__rcu", ADDR_SPACE_RCU);
+//	c_register_addr_space("__force_rcu", ADDR_SPACE_FORCE_RCU);
+
+	targetm.addr_space.pointer_mode		= checker_addr_space_pointer_mode;
+	targetm.addr_space.address_mode		= checker_addr_space_address_mode;
+	targetm.addr_space.valid_pointer_mode	= checker_addr_space_valid_pointer_mode;
+	targetm.addr_space.legitimate_address_p	= checker_addr_space_legitimate_address_p;
+//	targetm.addr_space.legitimize_address	= checker_addr_space_legitimize_address;
+	targetm.addr_space.subset_p		= checker_addr_space_subset_p;
+	targetm.addr_space.convert		= checker_addr_space_convert;
+}
+
+int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
+{
+	const char * const plugin_name = plugin_info->base_name;
+	const int argc = plugin_info->argc;
+	const struct plugin_argument * const argv = plugin_info->argv;
+	int i;
+
+	if (!plugin_default_version_check(version, &gcc_version)) {
+		error(G_("incompatible gcc/plugin versions"));
+		return 1;
+	}
+
+	register_callback(plugin_name, PLUGIN_INFO, NULL, &checker_plugin_info);
+
+	for (i = 0; i < argc; ++i)
+		error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
+
+	if (TARGET_64BIT == 0)
+		return 0;
+
+	register_callback(plugin_name, PLUGIN_PRAGMAS, register_checker_address_spaces, NULL);
+
+	return 0;
+}
diff -ruNp linux-3.13.11/tools/gcc/colorize_plugin.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/tools/gcc/colorize_plugin.c
--- linux-3.13.11/tools/gcc/colorize_plugin.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/tools/gcc/colorize_plugin.c	2014-07-09
12:00:16.000000000 +0200
@@ -0,0 +1,169 @@
+/*
+ * Copyright 2012-2014 by PaX Team <pageexec@freemail.hu>
+ * Licensed under the GPL v2
+ *
+ * Note: the choice of the license means that the compilation process is
+ *       NOT 'eligible' as defined by gcc's library exception to the GPL v3,
+ *       but for the kernel it doesn't matter since it doesn't link against
+ *       any of the gcc libraries
+ *
+ * gcc plugin to colorize diagnostic output
+ *
+ */
+
+#include "gcc-common.h"
+
+int plugin_is_GPL_compatible;
+
+static struct plugin_info colorize_plugin_info = {
+	.version	= "201401260140",
+	.help		= NULL,
+};
+
+#define GREEN		"\033[32m\033[2m"
+#define LIGHTGREEN	"\033[32m\033[1m"
+#define YELLOW		"\033[33m\033[2m"
+#define LIGHTYELLOW	"\033[33m\033[1m"
+#define RED		"\033[31m\033[2m"
+#define LIGHTRED	"\033[31m\033[1m"
+#define BLUE		"\033[34m\033[2m"
+#define LIGHTBLUE	"\033[34m\033[1m"
+#define BRIGHT		"\033[m\033[1m"
+#define NORMAL		"\033[m"
+
+static diagnostic_starter_fn old_starter;
+static diagnostic_finalizer_fn old_finalizer;
+
+static void start_colorize(diagnostic_context *context, diagnostic_info *diagnostic)
+{
+	const char *color;
+	char *newprefix;
+
+	switch (diagnostic->kind) {
+	case DK_NOTE:
+		color = LIGHTBLUE;
+		break;
+
+	case DK_PEDWARN:
+	case DK_WARNING:
+		color = LIGHTYELLOW;
+		break;
+
+	case DK_ERROR:
+	case DK_FATAL:
+	case DK_ICE:
+	case DK_PERMERROR:
+	case DK_SORRY:
+		color = LIGHTRED;
+		break;
+
+	default:
+		color = NORMAL;
+	}
+
+	old_starter(context, diagnostic);
+	if (-1 == asprintf(&newprefix, "%s%s" NORMAL, color, context->printer->prefix))
+		return;
+	pp_destroy_prefix(context->printer);
+	pp_set_prefix(context->printer, newprefix);
+}
+
+static void finalize_colorize(diagnostic_context *context, diagnostic_info *diagnostic)
+{
+	old_finalizer(context, diagnostic);
+}
+
+static void colorize_arm(void)
+{
+	old_starter = diagnostic_starter(global_dc);
+	old_finalizer = diagnostic_finalizer(global_dc);
+
+	diagnostic_starter(global_dc) = start_colorize;
+	diagnostic_finalizer(global_dc) = finalize_colorize;
+}
+
+static unsigned int execute_colorize_rearm(void)
+{
+	if (diagnostic_starter(global_dc) == start_colorize)
+		return 0;
+
+	colorize_arm();
+	return 0;
+}
+
+#if BUILDING_GCC_VERSION >= 4009
+static const struct pass_data colorize_rearm_pass_data = {
+#else
+struct simple_ipa_opt_pass colorize_rearm_pass = {
+	.pass = {
+#endif
+		.type			= SIMPLE_IPA_PASS,
+		.name			= "colorize_rearm",
+#if BUILDING_GCC_VERSION >= 4008
+		.optinfo_flags		= OPTGROUP_NONE,
+#endif
+#if BUILDING_GCC_VERSION >= 4009
+		.has_gate		= false,
+		.has_execute		= true,
+#else
+		.gate			= NULL,
+		.execute		= execute_colorize_rearm,
+		.sub			= NULL,
+		.next			= NULL,
+		.static_pass_number	= 0,
+#endif
+		.tv_id			= TV_NONE,
+		.properties_required	= 0,
+		.properties_provided	= 0,
+		.properties_destroyed	= 0,
+		.todo_flags_start	= 0,
+		.todo_flags_finish	= 0
+#if BUILDING_GCC_VERSION < 4009
+	}
+#endif
+};
+
+#if BUILDING_GCC_VERSION >= 4009
+namespace {
+class colorize_rearm_pass : public simple_ipa_opt_pass {
+public:
+	colorize_rearm_pass() : simple_ipa_opt_pass(colorize_rearm_pass_data, g) {}
+	unsigned int execute() { return execute_colorize_rearm(); }
+};
+}
+#endif
+
+static struct opt_pass *make_colorize_rearm_pass(void)
+{
+#if BUILDING_GCC_VERSION >= 4009
+	return new colorize_rearm_pass();
+#else
+	return &colorize_rearm_pass.pass;
+#endif
+}
+
+static void colorize_start_unit(void *gcc_data, void *user_data)
+{
+	colorize_arm();
+}
+
+int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
+{
+	const char * const plugin_name = plugin_info->base_name;
+	struct register_pass_info colorize_rearm_pass_info;
+
+	colorize_rearm_pass_info.pass				= make_colorize_rearm_pass();
+	colorize_rearm_pass_info.reference_pass_name		= "*free_lang_data";
+	colorize_rearm_pass_info.ref_pass_instance_number	= 1;
+	colorize_rearm_pass_info.pos_op 			= PASS_POS_INSERT_AFTER;
+
+	if (!plugin_default_version_check(version, &gcc_version)) {
+		error(G_("incompatible gcc/plugin versions"));
+		return 1;
+	}
+
+	register_callback(plugin_name, PLUGIN_INFO, NULL, &colorize_plugin_info);
+	register_callback(plugin_name, PLUGIN_START_UNIT, &colorize_start_unit, NULL);
+	register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &colorize_rearm_pass_info);
+	return 0;
+}
diff -ruNp linux-3.13.11/tools/gcc/constify_plugin.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/tools/gcc/constify_plugin.c
--- linux-3.13.11/tools/gcc/constify_plugin.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/tools/gcc/constify_plugin.c	2014-07-09
12:00:16.000000000 +0200
@@ -0,0 +1,552 @@
+/*
+ * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
+ * Copyright 2011-2014 by PaX Team <pageexec@freemail.hu>
+ * Licensed under the GPL v2, or (at your option) v3
+ *
+ * This gcc plugin constifies all structures which contain only function pointers or
are explicitly marked for constification.
+ *
+ * Homepage:
+ * http://www.grsecurity.net/~ephox/const_plugin/
+ *
+ * Usage:
+ * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so
constify_plugin.c
+ * $ gcc -fplugin=constify_plugin.so test.c -O2
+ */
+
+#include "gcc-common.h"
+
+// unused C type flag in all versions 4.5-4.9
+#define TYPE_CONSTIFY_VISITED(TYPE) TYPE_LANG_FLAG_4(TYPE)
+
+int plugin_is_GPL_compatible;
+
+static struct plugin_info const_plugin_info = {
+	.version	= "201401270210",
+	.help		= "no-constify\tturn off constification\n",
+};
+
+typedef struct {
+	bool has_fptr_field;
+	bool has_writable_field;
+	bool has_do_const_field;
+	bool has_no_const_field;
+} constify_info;
+
+static const_tree get_field_type(const_tree field)
+{
+	return strip_array_types(TREE_TYPE(field));
+}
+
+static bool is_fptr(const_tree field)
+{
+	const_tree ptr = get_field_type(field);
+
+	if (TREE_CODE(ptr) != POINTER_TYPE)
+		return false;
+
+	return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
+}
+
+/*
+ * determine whether the given structure type meets the requirements for automatic
constification,
+ * including the constification attributes on nested structure types
+ */
+static void constifiable(const_tree node, constify_info *cinfo)
+{
+	const_tree field;
+
+	gcc_assert(TREE_CODE(node) == RECORD_TYPE || TREE_CODE(node) == UNION_TYPE);
+
+	// e.g., pointer to structure fields while still constructing the structure type
+	if (TYPE_FIELDS(node) == NULL_TREE)
+		return;
+
+	for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
+		const_tree type = get_field_type(field);
+		enum tree_code code = TREE_CODE(type);
+
+		if (node == type)
+			continue;
+
+		if (is_fptr(field))
+			cinfo->has_fptr_field = true;
+		else if (!TREE_READONLY(field))
+			cinfo->has_writable_field = true;
+
+		if (code == RECORD_TYPE || code == UNION_TYPE) {
+			if (lookup_attribute("do_const", TYPE_ATTRIBUTES(type)))
+				cinfo->has_do_const_field = true;
+			else if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type)))
+				cinfo->has_no_const_field = true;
+			else
+				constifiable(type, cinfo);
+		}
+	}
+}
+
+static bool constified(const_tree node)
+{
+	constify_info cinfo = {
+		.has_fptr_field = false,
+		.has_writable_field = false,
+		.has_do_const_field = false,
+		.has_no_const_field = false
+	};
+
+	gcc_assert(TREE_CODE(node) == RECORD_TYPE || TREE_CODE(node) == UNION_TYPE);
+
+	if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node))) {
+//		gcc_assert(!TYPE_READONLY(node));
+		return false;
+	}
+
+	if (lookup_attribute("do_const", TYPE_ATTRIBUTES(node))) {
+		gcc_assert(TYPE_READONLY(node));
+		return true;
+	}
+
+	constifiable(node, &cinfo);
+	if ((!cinfo.has_fptr_field || cinfo.has_writable_field) && !cinfo.has_do_const_field)
+		return false;
+
+	return TYPE_READONLY(node);
+}
+
+static void deconstify_tree(tree node);
+
+static void deconstify_type(tree type)
+{
+	tree field;
+
+	gcc_assert(TREE_CODE(type) == RECORD_TYPE || TREE_CODE(type) == UNION_TYPE);
+
+	for (field = TYPE_FIELDS(type); field; field = TREE_CHAIN(field)) {
+		const_tree fieldtype = get_field_type(field);
+
+		// special case handling of simple ptr-to-same-array-type members
+		if (TREE_CODE(TREE_TYPE(field)) == POINTER_TYPE) {
+			tree ptrtype = TREE_TYPE(TREE_TYPE(field));
+
+			if (TREE_TYPE(TREE_TYPE(field)) == type)
+				continue;
+			if (TREE_CODE(ptrtype) != RECORD_TYPE && TREE_CODE(ptrtype) != UNION_TYPE)
+				continue;
+			if (!constified(ptrtype))
+				continue;
+			if (TYPE_MAIN_VARIANT(ptrtype) == TYPE_MAIN_VARIANT(type)) {
+				TREE_TYPE(field) = copy_node(TREE_TYPE(field));
+				TREE_TYPE(TREE_TYPE(field)) = build_qualified_type(type, TYPE_QUALS(ptrtype) &
~TYPE_QUAL_CONST);
+			}
+			continue;
+		}
+		if (TREE_CODE(fieldtype) != RECORD_TYPE && TREE_CODE(fieldtype) != UNION_TYPE)
+			continue;
+		if (!constified(fieldtype))
+			continue;
+
+		deconstify_tree(field);
+		TREE_READONLY(field) = 0;
+	}
+	TYPE_READONLY(type) = 0;
+	C_TYPE_FIELDS_READONLY(type) = 0;
+	if (lookup_attribute("do_const", TYPE_ATTRIBUTES(type))) {
+		TYPE_ATTRIBUTES(type) = copy_list(TYPE_ATTRIBUTES(type));
+		TYPE_ATTRIBUTES(type) = remove_attribute("do_const", TYPE_ATTRIBUTES(type));
+	}
+}
+
+static void deconstify_tree(tree node)
+{
+	tree old_type, new_type, field;
+
+	old_type = TREE_TYPE(node);
+	while (TREE_CODE(old_type) == ARRAY_TYPE && TREE_CODE(TREE_TYPE(old_type)) != ARRAY_TYPE)
{
+		node = TREE_TYPE(node) = copy_node(old_type);
+		old_type = TREE_TYPE(old_type);
+	}
+
+	gcc_assert(TREE_CODE(old_type) == RECORD_TYPE || TREE_CODE(old_type) == UNION_TYPE);
+	gcc_assert(TYPE_READONLY(old_type) && (TYPE_QUALS(old_type) & TYPE_QUAL_CONST));
+
+	new_type = build_qualified_type(old_type, TYPE_QUALS(old_type) & ~TYPE_QUAL_CONST);
+	TYPE_FIELDS(new_type) = copy_list(TYPE_FIELDS(new_type));
+	for (field = TYPE_FIELDS(new_type); field; field = TREE_CHAIN(field))
+		DECL_FIELD_CONTEXT(field) = new_type;
+
+	deconstify_type(new_type);
+
+	TREE_TYPE(node) = new_type;
+}
+
+static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags,
bool *no_add_attrs)
+{
+	tree type;
+	constify_info cinfo = {
+		.has_fptr_field = false,
+		.has_writable_field = false,
+		.has_do_const_field = false,
+		.has_no_const_field = false
+	};
+
+	*no_add_attrs = true;
+	if (TREE_CODE(*node) == FUNCTION_DECL) {
+		error("%qE attribute does not apply to functions (%qF)", name, *node);
+		return NULL_TREE;
+	}
+
+	if (TREE_CODE(*node) == PARM_DECL) {
+		error("%qE attribute does not apply to function parameters (%qD)", name, *node);
+		return NULL_TREE;
+	}
+
+	if (TREE_CODE(*node) == VAR_DECL) {
+		error("%qE attribute does not apply to variables (%qD)", name, *node);
+		return NULL_TREE;
+	}
+
+	if (TYPE_P(*node)) {
+		type = *node;
+	} else {
+		gcc_assert(TREE_CODE(*node) == TYPE_DECL);
+		type = TREE_TYPE(*node);
+	}
+
+	if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) {
+		error("%qE attribute used on %qT applies to struct and union types only", name, type);
+		return NULL_TREE;
+	}
+
+	if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) {
+		error("%qE attribute is already applied to the type %qT", name, type);
+		return NULL_TREE;
+	}
+
+	if (TYPE_P(*node)) {
+		if (lookup_attribute("do_const", TYPE_ATTRIBUTES(type)))
+			error("%qE attribute used on type %qT is incompatible with 'do_const'", name, type);
+		else
+			*no_add_attrs = false;
+		return NULL_TREE;
+	}
+
+	constifiable(type, &cinfo);
+	if ((cinfo.has_fptr_field && !cinfo.has_writable_field) || lookup_attribute("do_const",
TYPE_ATTRIBUTES(type))) {
+		deconstify_tree(*node);
+		TYPE_CONSTIFY_VISITED(TREE_TYPE(*node)) = 1;
+		return NULL_TREE;
+	}
+
+	error("%qE attribute used on type %qT that is not constified", name, type);
+	return NULL_TREE;
+}
+
+static void constify_type(tree type)
+{
+	TYPE_READONLY(type) = 1;
+	C_TYPE_FIELDS_READONLY(type) = 1;
+	TYPE_CONSTIFY_VISITED(type) = 1;
+//	TYPE_ATTRIBUTES(type) = copy_list(TYPE_ATTRIBUTES(type));
+//	TYPE_ATTRIBUTES(type) = tree_cons(get_identifier("do_const"), NULL_TREE, TYPE_ATTRIBUTES(type));
+}
+
+static tree handle_do_const_attribute(tree *node, tree name, tree args, int flags,
bool *no_add_attrs)
+{
+	*no_add_attrs = true;
+	if (!TYPE_P(*node)) {
+		error("%qE attribute applies to types only (%qD)", name, *node);
+		return NULL_TREE;
+	}
+
+	if (TREE_CODE(*node) != RECORD_TYPE && TREE_CODE(*node) != UNION_TYPE) {
+		error("%qE attribute used on %qT applies to struct and union types only", name, *node);
+		return NULL_TREE;
+	}
+
+	if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(*node))) {
+		error("%qE attribute used on %qT is already applied to the type", name, *node);
+		return NULL_TREE;
+	}
+
+	if (lookup_attribute("no_const", TYPE_ATTRIBUTES(*node))) {
+		error("%qE attribute used on %qT is incompatible with 'no_const'", name, *node);
+		return NULL_TREE;
+	}
+
+	*no_add_attrs = false;
+	return NULL_TREE;
+}
+
+static struct attribute_spec no_const_attr = {
+	.name			= "no_const",
+	.min_length		= 0,
+	.max_length		= 0,
+	.decl_required		= false,
+	.type_required		= false,
+	.function_type_required	= false,
+	.handler		= handle_no_const_attribute,
+#if BUILDING_GCC_VERSION >= 4007
+	.affects_type_identity	= true
+#endif
+};
+
+static struct attribute_spec do_const_attr = {
+	.name			= "do_const",
+	.min_length		= 0,
+	.max_length		= 0,
+	.decl_required		= false,
+	.type_required		= false,
+	.function_type_required	= false,
+	.handler		= handle_do_const_attribute,
+#if BUILDING_GCC_VERSION >= 4007
+	.affects_type_identity	= true
+#endif
+};
+
+static void register_attributes(void *event_data, void *data)
+{
+	register_attribute(&no_const_attr);
+	register_attribute(&do_const_attr);
+}
+
+static void finish_type(void *event_data, void *data)
+{
+	tree type = (tree)event_data;
+	constify_info cinfo = {
+		.has_fptr_field = false,
+		.has_writable_field = false,
+		.has_do_const_field = false,
+		.has_no_const_field = false
+	};
+
+	if (type == NULL_TREE || type == error_mark_node)
+		return;
+
+	if (TYPE_FIELDS(type) == NULL_TREE || TYPE_CONSTIFY_VISITED(type))
+		return;
+
+	constifiable(type, &cinfo);
+
+	if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type))) {
+		if ((cinfo.has_fptr_field && !cinfo.has_writable_field) || cinfo.has_do_const_field)
{
+			deconstify_type(type);
+			TYPE_CONSTIFY_VISITED(type) = 1;
+		} else
+			error("'no_const' attribute used on type %qT that is not constified", type);
+		return;
+	}
+
+	if (lookup_attribute("do_const", TYPE_ATTRIBUTES(type))) {
+		if (!cinfo.has_writable_field) {
+			error("'do_const' attribute used on type %qT that is%sconstified", type, cinfo.has_fptr_field
? " " : " not ");
+			return;
+		}
+		constify_type(type);
+		return;
+	}
+
+	if (cinfo.has_fptr_field && !cinfo.has_writable_field) {
+		if (lookup_attribute("do_const", TYPE_ATTRIBUTES(type))) {
+			error("'do_const' attribute used on type %qT that is constified", type);
+			return;
+		}
+		constify_type(type);
+		return;
+	}
+
+	deconstify_type(type);
+	TYPE_CONSTIFY_VISITED(type) = 1;
+}
+
+static void check_global_variables(void *event_data, void *data)
+{
+	struct varpool_node *node;
+
+	FOR_EACH_VARIABLE(node) {
+		tree var = NODE_DECL(node);
+		tree type = TREE_TYPE(var);
+
+		if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
+			continue;
+
+		if (!TYPE_READONLY(type) || !C_TYPE_FIELDS_READONLY(type))
+			continue;
+
+		if (!TYPE_CONSTIFY_VISITED(type))
+			continue;
+
+		if (DECL_EXTERNAL(var))
+			continue;
+
+		if (DECL_INITIAL(var))
+			continue;
+
+		// this works around a gcc bug/feature where uninitialized globals
+		// are moved into the .bss section regardless of any constification
+		DECL_INITIAL(var) = build_constructor(type, NULL);
+//		inform(DECL_SOURCE_LOCATION(var), "constified variable %qE moved into .rodata",
var);
+	}
+}
+
+static unsigned int check_local_variables(void)
+{
+	unsigned int ret = 0;
+	tree var;
+
+	unsigned int i;
+
+	FOR_EACH_LOCAL_DECL(cfun, i, var) {
+		tree type = TREE_TYPE(var);
+
+		gcc_assert(DECL_P(var));
+		if (is_global_var(var))
+			continue;
+
+		if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
+			continue;
+
+		if (!TYPE_READONLY(type) || !C_TYPE_FIELDS_READONLY(type))
+			continue;
+
+		if (!TYPE_CONSTIFY_VISITED(type))
+			continue;
+
+		error_at(DECL_SOURCE_LOCATION(var), "constified variable %qE cannot be local", var);
+		ret = 1;
+	}
+	return ret;
+}
+
+#if BUILDING_GCC_VERSION >= 4009
+static const struct pass_data check_local_variables_pass_data = {
+#else
+static struct gimple_opt_pass check_local_variables_pass = {
+	.pass = {
+#endif
+		.type			= GIMPLE_PASS,
+		.name			= "check_local_variables",
+#if BUILDING_GCC_VERSION >= 4008
+		.optinfo_flags		= OPTGROUP_NONE,
+#endif
+#if BUILDING_GCC_VERSION >= 4009
+		.has_gate		= false,
+		.has_execute		= true,
+#else
+		.gate			= NULL,
+		.execute		= check_local_variables,
+		.sub			= NULL,
+		.next			= NULL,
+		.static_pass_number	= 0,
+#endif
+		.tv_id			= TV_NONE,
+		.properties_required	= 0,
+		.properties_provided	= 0,
+		.properties_destroyed	= 0,
+		.todo_flags_start	= 0,
+		.todo_flags_finish	= 0
+#if BUILDING_GCC_VERSION < 4009
+	}
+#endif
+};
+
+#if BUILDING_GCC_VERSION >= 4009
+namespace {
+class check_local_variables_pass : public gimple_opt_pass {
+public:
+	check_local_variables_pass() : gimple_opt_pass(check_local_variables_pass_data, g)
{}
+	unsigned int execute() { return check_local_variables(); }
+};
+}
+#endif
+
+static struct opt_pass *make_check_local_variables_pass(void)
+{
+#if BUILDING_GCC_VERSION >= 4009
+	return new check_local_variables_pass();
+#else
+	return &check_local_variables_pass.pass;
+#endif
+}
+
+static struct {
+	const char *name;
+	const char *asm_op;
+} sections[] = {
+	{".init.rodata",     "\t.section\t.init.rodata,\"a\""},
+	{".ref.rodata",      "\t.section\t.ref.rodata,\"a\""},
+	{".devinit.rodata",  "\t.section\t.devinit.rodata,\"a\""},
+	{".devexit.rodata",  "\t.section\t.devexit.rodata,\"a\""},
+	{".cpuinit.rodata",  "\t.section\t.cpuinit.rodata,\"a\""},
+	{".cpuexit.rodata",  "\t.section\t.cpuexit.rodata,\"a\""},
+	{".meminit.rodata",  "\t.section\t.meminit.rodata,\"a\""},
+	{".memexit.rodata",  "\t.section\t.memexit.rodata,\"a\""},
+	{".data..read_only", "\t.section\t.data..read_only,\"a\""},
+};
+
+static unsigned int (*old_section_type_flags)(tree decl, const char *name, int reloc);
+
+static unsigned int constify_section_type_flags(tree decl, const char *name, int reloc)
+{
+	size_t i;
+
+	for (i = 0; i < ARRAY_SIZE(sections); i++)
+		if (!strcmp(sections[i].name, name))
+			return 0;
+	return old_section_type_flags(decl, name, reloc);
+}
+
+static void constify_start_unit(void *gcc_data, void *user_data)
+{
+//	size_t i;
+
+//	for (i = 0; i < ARRAY_SIZE(sections); i++)
+//		sections[i].section = get_unnamed_section(0, output_section_asm_op, sections[i].asm_op);
+//		sections[i].section = get_section(sections[i].name, 0, NULL);
+
+	old_section_type_flags = targetm.section_type_flags;
+	targetm.section_type_flags = constify_section_type_flags;
+}
+
+int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
+{
+	const char * const plugin_name = plugin_info->base_name;
+	const int argc = plugin_info->argc;
+	const struct plugin_argument * const argv = plugin_info->argv;
+	int i;
+	bool constify = true;
+
+	struct register_pass_info check_local_variables_pass_info;
+
+	check_local_variables_pass_info.pass				= make_check_local_variables_pass();
+	check_local_variables_pass_info.reference_pass_name		= "ssa";
+	check_local_variables_pass_info.ref_pass_instance_number	= 1;
+	check_local_variables_pass_info.pos_op				= PASS_POS_INSERT_BEFORE;
+
+	if (!plugin_default_version_check(version, &gcc_version)) {
+		error(G_("incompatible gcc/plugin versions"));
+		return 1;
+	}
+
+	for (i = 0; i < argc; ++i) {
+		if (!(strcmp(argv[i].key, "no-constify"))) {
+			constify = false;
+			continue;
+		}
+		error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
+	}
+
+	if (strcmp(lang_hooks.name, "GNU C")) {
+		inform(UNKNOWN_LOCATION, G_("%s supports C only"), plugin_name);
+		constify = false;
+	}
+
+	register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
+	if (constify) {
+		register_callback(plugin_name, PLUGIN_ALL_IPA_PASSES_START, check_global_variables,
NULL);
+		register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
+		register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &check_local_variables_pass_info);
+		register_callback(plugin_name, PLUGIN_START_UNIT, constify_start_unit, NULL);
+	}
+	register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
+
+	return 0;
+}
diff -ruNp linux-3.13.11/tools/gcc/gcc-common.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/tools/gcc/gcc-common.h
--- linux-3.13.11/tools/gcc/gcc-common.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/tools/gcc/gcc-common.h	2014-07-09
12:00:16.000000000 +0200
@@ -0,0 +1,287 @@
+#ifndef GCC_COMMON_H_INCLUDED
+#define GCC_COMMON_H_INCLUDED
+
+#include "plugin.h"
+#include "bversion.h"
+#include "plugin-version.h"
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "line-map.h"
+#include "input.h"
+#include "tree.h"
+
+#include "tree-inline.h"
+#include "version.h"
+#include "rtl.h"
+#include "tm_p.h"
+#include "flags.h"
+//#include "insn-attr.h"
+//#include "insn-config.h"
+//#include "insn-flags.h"
+#include "hard-reg-set.h"
+//#include "recog.h"
+#include "output.h"
+#include "except.h"
+#include "function.h"
+#include "toplev.h"
+//#include "expr.h"
+#include "basic-block.h"
+#include "intl.h"
+#include "ggc.h"
+//#include "regs.h"
+#include "timevar.h"
+
+#include "params.h"
+#include "pointer-set.h"
+#include "emit-rtl.h"
+//#include "reload.h"
+//#include "ira.h"
+//#include "dwarf2asm.h"
+#include "debug.h"
+#include "target.h"
+#include "langhooks.h"
+#include "cfgloop.h"
+//#include "hosthooks.h"
+#include "cgraph.h"
+#include "opts.h"
+//#include "coverage.h"
+//#include "value-prof.h"
+
+#if BUILDING_GCC_VERSION >= 4007
+#include "tree-pretty-print.h"
+#include "gimple-pretty-print.h"
+#include "c-tree.h"
+//#include "alloc-pool.h"
+#endif
+
+#if BUILDING_GCC_VERSION <= 4008
+#include "tree-flow.h"
+#else
+#include "tree-cfgcleanup.h"
+#endif
+
+#include "diagnostic.h"
+//#include "tree-diagnostic.h"
+#include "tree-dump.h"
+#include "tree-pass.h"
+//#include "df.h"
+#include "predict.h"
+//#include "lto-streamer.h"
+#include "ipa-utils.h"
+
+#if BUILDING_GCC_VERSION >= 4009
+#include "varasm.h"
+#include "stor-layout.h"
+#include "internal-fn.h"
+#include "gimple-expr.h"
+//#include "diagnostic-color.h"
+#include "context.h"
+#include "tree-ssa-alias.h"
+#include "stringpool.h"
+#include "tree-ssanames.h"
+#include "print-tree.h"
+#include "tree-eh.h"
+#endif
+
+#include "gimple.h"
+
+#if BUILDING_GCC_VERSION >= 4009
+#include "tree-ssa-operands.h"
+#include "tree-phinodes.h"
+#include "tree-cfg.h"
+#include "gimple-iterator.h"
+#include "gimple-ssa.h"
+#include "ssa-iterators.h"
+#endif
+
+//#include "expr.h" where are you...
+extern rtx emit_move_insn(rtx x, rtx y);
+
+// missing from basic_block.h...
+extern void debug_dominance_info(enum cdi_direction dir);
+extern void debug_dominance_tree(enum cdi_direction dir, basic_block root);
+
+#define __unused __attribute__((__unused__))
+
+#define DECL_NAME_POINTER(node) IDENTIFIER_POINTER(DECL_NAME(node))
+#define DECL_NAME_LENGTH(node) IDENTIFIER_LENGTH(DECL_NAME(node))
+
+#if BUILDING_GCC_VERSION == 4005
+#define FOR_EACH_LOCAL_DECL(FUN, I, D) for (tree vars = (FUN)->local_decls; vars &&
(D = TREE_VALUE(vars)); vars = TREE_CHAIN(vars), I)
+#define DECL_CHAIN(NODE) (TREE_CHAIN(DECL_MINIMAL_CHECK(NODE)))
+#define FOR_EACH_VEC_ELT(T, V, I, P) for (I = 0; VEC_iterate(T, (V), (I), (P)); ++(I))
+#define TODO_rebuild_cgraph_edges 0
+
+static inline bool gimple_call_builtin_p(gimple stmt, enum built_in_function code)
+{
+	tree fndecl;
+
+	if (!is_gimple_call(stmt))
+		return false;
+	fndecl = gimple_call_fndecl(stmt);
+	if (!fndecl || DECL_BUILT_IN_CLASS(fndecl) != BUILT_IN_NORMAL)
+		return false;
+//	print_node(stderr, "pax", fndecl, 4);
+	return DECL_FUNCTION_CODE(fndecl) == code;
+}
+
+static inline bool is_simple_builtin(tree decl)
+{
+	if (decl && DECL_BUILT_IN_CLASS(decl) != BUILT_IN_NORMAL)
+		return false;
+
+	switch (DECL_FUNCTION_CODE(decl)) {
+	/* Builtins that expand to constants. */
+	case BUILT_IN_CONSTANT_P:
+	case BUILT_IN_EXPECT:
+	case BUILT_IN_OBJECT_SIZE:
+	case BUILT_IN_UNREACHABLE:
+	/* Simple register moves or loads from stack. */
+	case BUILT_IN_RETURN_ADDRESS:
+	case BUILT_IN_EXTRACT_RETURN_ADDR:
+	case BUILT_IN_FROB_RETURN_ADDR:
+	case BUILT_IN_RETURN:
+	case BUILT_IN_AGGREGATE_INCOMING_ADDRESS:
+	case BUILT_IN_FRAME_ADDRESS:
+	case BUILT_IN_VA_END:
+	case BUILT_IN_STACK_SAVE:
+	case BUILT_IN_STACK_RESTORE:
+	/* Exception state returns or moves registers around. */
+	case BUILT_IN_EH_FILTER:
+	case BUILT_IN_EH_POINTER:
+	case BUILT_IN_EH_COPY_VALUES:
+	return true;
+
+	default:
+	return false;
+	}
+}
+#endif
+
+#if BUILDING_GCC_VERSION <= 4006
+#define ANY_RETURN_P(rtx) (GET_CODE(rtx) == RETURN)
+#define C_DECL_REGISTER(EXP) DECL_LANG_FLAG_4(EXP)
+
+// should come from c-tree.h if only it were installed for gcc 4.5...
+#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE)
+
+#define get_random_seed(noinit) ({						\
+	unsigned HOST_WIDE_INT seed;						\
+	sscanf(get_random_seed(noinit), "%" HOST_WIDE_INT_PRINT "x", &seed);	\
+	seed * seed; })
+
+static inline bool gimple_clobber_p(gimple s)
+{
+	return false;
+}
+
+static inline tree builtin_decl_implicit(enum built_in_function fncode)
+{
+	return implicit_built_in_decls[fncode];
+}
+
+static inline struct cgraph_node *cgraph_get_create_node(tree decl)
+{
+	struct cgraph_node *node = cgraph_get_node(decl);
+
+	return node ? node : cgraph_node(decl);
+}
+
+static inline bool cgraph_function_with_gimple_body_p(struct cgraph_node *node)
+{
+	return node->analyzed && !node->thunk.thunk_p && !node->alias;
+}
+
+static inline struct cgraph_node *cgraph_first_function_with_gimple_body(void)
+{
+	struct cgraph_node *node;
+
+	for (node = cgraph_nodes; node; node = node->next)
+		if (cgraph_function_with_gimple_body_p(node))
+			return node;
+	return NULL;
+}
+
+static inline struct cgraph_node *cgraph_next_function_with_gimple_body(struct cgraph_node
*node)
+{
+	for (node = node->next; node; node = node->next)
+		if (cgraph_function_with_gimple_body_p(node))
+			return node;
+	return NULL;
+}
+
+#define FOR_EACH_FUNCTION_WITH_GIMPLE_BODY(node) \
+	for ((node) = cgraph_first_function_with_gimple_body(); (node); \
+		(node) = cgraph_next_function_with_gimple_body(node))
+#endif
+
+#if BUILDING_GCC_VERSION == 4006
+extern void debug_gimple_stmt(gimple);
+extern void debug_gimple_seq(gimple_seq);
+extern void print_gimple_seq(FILE *, gimple_seq, int, int);
+extern void print_gimple_stmt(FILE *, gimple, int, int);
+extern void print_gimple_expr(FILE *, gimple, int, int);
+extern void dump_gimple_stmt(pretty_printer *, gimple, int, int);
+#endif
+
+#if BUILDING_GCC_VERSION <= 4007
+#define FOR_EACH_VARIABLE(node) for (node = varpool_nodes; node; node = node->next)
+#define PROP_loops 0
+
+static inline int bb_loop_depth(const_basic_block bb)
+{
+	return bb->loop_father ? loop_depth(bb->loop_father) : 0;
+}
+
+static inline bool gimple_store_p(gimple gs)
+{
+	tree lhs = gimple_get_lhs(gs);
+	return lhs && !is_gimple_reg(lhs);
+}
+#endif
+
+#if BUILDING_GCC_VERSION >= 4007
+#define cgraph_create_edge(caller, callee, call_stmt, count, freq, nest) \
+	cgraph_create_edge((caller), (callee), (call_stmt), (count), (freq))
+#endif
+
+#if BUILDING_GCC_VERSION <= 4008
+#define ENTRY_BLOCK_PTR_FOR_FN(FN)	ENTRY_BLOCK_PTR_FOR_FUNCTION(FN)
+#define EXIT_BLOCK_PTR_FOR_FN(FN)	EXIT_BLOCK_PTR_FOR_FUNCTION(FN)
+#define basic_block_info_for_fn(FN)	((FN)->cfg->x_basic_block_info)
+#define n_basic_blocks_for_fn(FN)	((FN)->cfg->x_n_basic_blocks)
+#define n_edges_for_fn(FN)		((FN)->cfg->x_n_edges)
+#define last_basic_block_for_fn(FN)	((FN)->cfg->x_last_basic_block)
+#define label_to_block_map_for_fn(FN)	((FN)->cfg->x_label_to_block_map)
+#define profile_status_for_fn(FN)	((FN)->cfg->x_profile_status)
+
+static inline const char *get_tree_code_name(enum tree_code code)
+{
+	gcc_assert(code < MAX_TREE_CODES);
+	return tree_code_name[code];
+}
+
+#define ipa_remove_stmt_references(cnode, stmt)
+#endif
+
+#if BUILDING_GCC_VERSION == 4008
+#define NODE_DECL(node) node->symbol.decl
+#else
+#define NODE_DECL(node) node->decl
+#endif
+
+#if BUILDING_GCC_VERSION >= 4008
+#define add_referenced_var(var)
+#define mark_sym_for_renaming(var)
+#define varpool_mark_needed_node(node)
+#define TODO_dump_func 0
+#define TODO_dump_cgraph 0
+#endif
+
+#if BUILDING_GCC_VERSION >= 4009
+#define TODO_ggc_collect 0
+#endif
+
+#endif
diff -ruNp linux-3.13.11/tools/gcc/gen-random-seed.sh linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/tools/gcc/gen-random-seed.sh
--- linux-3.13.11/tools/gcc/gen-random-seed.sh	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/tools/gcc/gen-random-seed.sh	2014-07-09
12:00:16.000000000 +0200
@@ -0,0 +1,8 @@
+#!/bin/sh
+
+if [ ! -f "$1" ]; then
+	SEED=`od -A n -t x8 -N 32 /dev/urandom | tr -d ' \n'`
+	echo "const char *randstruct_seed = \"$SEED\";" > "$1"
+	HASH=`echo -n "$SEED" | sha256sum | cut -d" " -f1 | tr -d ' \n'`
+	echo "#define RANDSTRUCT_HASHED_SEED \"$HASH\"" > "$2"
+fi
diff -ruNp linux-3.13.11/tools/gcc/generate_size_overflow_hash.sh linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/tools/gcc/generate_size_overflow_hash.sh
--- linux-3.13.11/tools/gcc/generate_size_overflow_hash.sh	1970-01-01 01:00:00.000000000
+0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/tools/gcc/generate_size_overflow_hash.sh	2014-07-09
12:00:16.000000000 +0200
@@ -0,0 +1,97 @@
+#!/bin/bash
+
+# This script generates the hash table (size_overflow_hash.h) for the size_overflow
gcc plugin (size_overflow_plugin.c).
+
+header1="size_overflow_hash.h"
+database="size_overflow_hash.data"
+n=65536
+hashtable_name="size_overflow_hash"
+
+usage() {
+cat <<EOF
+usage: $0 options
+OPTIONS:
+        -h|--help               help
+	-o			header file
+	-d			database file
+	-n			hash array size
+	-s			name of the hash table
+EOF
+    return 0
+}
+
+while true
+do
+    case "$1" in
+    -h|--help)	usage && exit 0;;
+    -n)		n=$2; shift 2;;
+    -o)		header1="$2"; shift 2;;
+    -d)		database="$2"; shift 2;;
+    -s)		hashtable_name="$2"; shift 2;;
+    --)		shift 1; break ;;
+     *)		break ;;
+    esac
+done
+
+create_defines() {
+	for i in `seq 0 31`
+	do
+		echo -e "#define PARAM"$i" (1U << "$i")" >> "$header1"
+	done
+	echo >> "$header1"
+}
+
+create_structs() {
+	rm -f "$header1"
+
+	create_defines
+
+	cat "$database" | while read data
+	do
+		data_array=($data)
+		struct_hash_name="${data_array[0]}"
+		funcn="${data_array[1]}"
+		params="${data_array[2]}"
+		next="${data_array[4]}"
+
+		echo "const struct size_overflow_hash $struct_hash_name = {" >> "$header1"
+
+		echo -e "\t.next\t= $next,\n\t.name\t= \"$funcn\"," >> "$header1"
+		echo -en "\t.param\t= " >> "$header1"
+		line=
+		for param_num in ${params//-/ };
+		do
+			line="${line}PARAM"$param_num"|"
+		done
+
+		echo -e "${line%?},\n};\n" >> "$header1"
+	done
+}
+
+create_headers() {
+	echo "const struct size_overflow_hash * const $hashtable_name[$n] = {" >> "$header1"
+}
+
+create_array_elements() {
+	index=0
+	grep -v "nohasharray" $database | sort -n -k 4 | while read data
+	do
+		data_array=($data)
+		i="${data_array[3]}"
+		hash="${data_array[0]}"
+		while [[ $index -lt $i ]]
+		do
+			echo -e "\t["$index"]\t= NULL," >> "$header1"
+			index=$(($index + 1))
+		done
+		index=$(($index + 1))
+		echo -e "\t["$i"]\t= &"$hash"," >> "$header1"
+	done
+	echo '};' >> $header1
+}
+
+create_structs
+create_headers
+create_array_elements
+
+exit 0
diff -ruNp linux-3.13.11/tools/gcc/kallocstat_plugin.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/tools/gcc/kallocstat_plugin.c
--- linux-3.13.11/tools/gcc/kallocstat_plugin.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/tools/gcc/kallocstat_plugin.c	2014-07-09
12:00:16.000000000 +0200
@@ -0,0 +1,182 @@
+/*
+ * Copyright 2011-2014 by the PaX Team <pageexec@freemail.hu>
+ * Licensed under the GPL v2
+ *
+ * Note: the choice of the license means that the compilation process is
+ *       NOT 'eligible' as defined by gcc's library exception to the GPL v3,
+ *       but for the kernel it doesn't matter since it doesn't link against
+ *       any of the gcc libraries
+ *
+ * gcc plugin to find the distribution of k*alloc sizes
+ *
+ * TODO:
+ *
+ * BUGS:
+ * - none known
+ */
+
+#include "gcc-common.h"
+
+int plugin_is_GPL_compatible;
+
+static struct plugin_info kallocstat_plugin_info = {
+	.version	= "201401260140",
+	.help		= NULL
+};
+
+static const char * const kalloc_functions[] = {
+	"__kmalloc",
+	"kmalloc",
+	"kmalloc_large",
+	"kmalloc_node",
+	"kmalloc_order",
+	"kmalloc_order_trace",
+	"kmalloc_slab",
+	"kzalloc",
+	"kzalloc_node",
+};
+
+static bool is_kalloc(const char *fnname)
+{
+	size_t i;
+
+	for (i = 0; i < ARRAY_SIZE(kalloc_functions); i++)
+		if (!strcmp(fnname, kalloc_functions[i]))
+			return true;
+	return false;
+}
+
+static unsigned int execute_kallocstat(void)
+{
+	basic_block bb;
+
+	// 1. loop through BBs and GIMPLE statements
+	FOR_EACH_BB_FN(bb, cfun) {
+		gimple_stmt_iterator gsi;
+		for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
+			// gimple match: 
+			tree fndecl, size;
+			gimple stmt;
+			const char *fnname;
+
+			// is it a call
+			stmt = gsi_stmt(gsi);
+			if (!is_gimple_call(stmt))
+				continue;
+			fndecl = gimple_call_fndecl(stmt);
+			if (fndecl == NULL_TREE)
+				continue;
+			if (TREE_CODE(fndecl) != FUNCTION_DECL)
+				continue;
+
+			// is it a call to k*alloc
+			fnname = DECL_NAME_POINTER(fndecl);
+			if (!is_kalloc(fnname))
+				continue;
+
+			// is the size arg const or the result of a simple const assignment
+			size = gimple_call_arg(stmt, 0);
+			while (true) {
+				expanded_location xloc;
+				size_t size_val;
+
+				if (TREE_CONSTANT(size)) {
+					xloc = expand_location(gimple_location(stmt));
+					if (!xloc.file)
+						xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
+					size_val = TREE_INT_CST_LOW(size);
+					fprintf(stderr, "kallocsize: %8zu %8zx %s %s:%u\n", size_val, size_val, fnname,
xloc.file, xloc.line);
+					break;
+				}
+
+				if (TREE_CODE(size) != SSA_NAME)
+					break;
+				stmt = SSA_NAME_DEF_STMT(size);
+//debug_gimple_stmt(stmt);
+//debug_tree(size);
+				if (!stmt || !is_gimple_assign(stmt))
+					break;
+				if (gimple_num_ops(stmt) != 2)
+					break;
+				size = gimple_assign_rhs1(stmt);
+			}
+//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
+//debug_tree(gimple_call_fn(call_stmt));
+//print_node(stderr, "pax", fndecl, 4);
+		}
+	}
+
+	return 0;
+}
+
+#if BUILDING_GCC_VERSION >= 4009
+static const struct pass_data kallocstat_pass_data = {
+#else
+static struct gimple_opt_pass kallocstat_pass = {
+	.pass = {
+#endif
+		.type			= GIMPLE_PASS,
+		.name			= "kallocstat",
+#if BUILDING_GCC_VERSION >= 4008
+		.optinfo_flags		= OPTGROUP_NONE,
+#endif
+#if BUILDING_GCC_VERSION >= 4009
+		.has_gate		= false,
+		.has_execute		= true,
+#else
+		.gate			= NULL,
+		.execute		= execute_kallocstat,
+		.sub			= NULL,
+		.next			= NULL,
+		.static_pass_number	= 0,
+#endif
+		.tv_id			= TV_NONE,
+		.properties_required	= 0,
+		.properties_provided	= 0,
+		.properties_destroyed	= 0,
+		.todo_flags_start	= 0,
+		.todo_flags_finish	= 0
+#if BUILDING_GCC_VERSION < 4009
+	}
+#endif
+};
+
+#if BUILDING_GCC_VERSION >= 4009
+namespace {
+class kallocstat_pass : public gimple_opt_pass {
+public:
+	kallocstat_pass() : gimple_opt_pass(kallocstat_pass_data, g) {}
+	unsigned int execute() { return execute_kallocstat(); }
+};
+}
+#endif
+
+static struct opt_pass *make_kallocstat_pass(void)
+{
+#if BUILDING_GCC_VERSION >= 4009
+	return new kallocstat_pass();
+#else
+	return &kallocstat_pass.pass;
+#endif
+}
+
+int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
+{
+	const char * const plugin_name = plugin_info->base_name;
+	struct register_pass_info kallocstat_pass_info;
+
+	kallocstat_pass_info.pass			= make_kallocstat_pass();
+	kallocstat_pass_info.reference_pass_name	= "ssa";
+	kallocstat_pass_info.ref_pass_instance_number	= 1;
+	kallocstat_pass_info.pos_op 			= PASS_POS_INSERT_AFTER;
+
+	if (!plugin_default_version_check(version, &gcc_version)) {
+		error(G_("incompatible gcc/plugin versions"));
+		return 1;
+	}
+
+	register_callback(plugin_name, PLUGIN_INFO, NULL, &kallocstat_plugin_info);
+	register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kallocstat_pass_info);
+
+	return 0;
+}
diff -ruNp linux-3.13.11/tools/gcc/kernexec_plugin.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/tools/gcc/kernexec_plugin.c
--- linux-3.13.11/tools/gcc/kernexec_plugin.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/tools/gcc/kernexec_plugin.c	2014-07-09
12:00:16.000000000 +0200
@@ -0,0 +1,519 @@
+/*
+ * Copyright 2011-2014 by the PaX Team <pageexec@freemail.hu>
+ * Licensed under the GPL v2
+ *
+ * Note: the choice of the license means that the compilation process is
+ *       NOT 'eligible' as defined by gcc's library exception to the GPL v3,
+ *       but for the kernel it doesn't matter since it doesn't link against
+ *       any of the gcc libraries
+ *
+ * gcc plugin to make KERNEXEC/amd64 almost as good as it is on i386
+ *
+ * TODO:
+ *
+ * BUGS:
+ * - none known
+ */
+
+#include "gcc-common.h"
+
+int plugin_is_GPL_compatible;
+
+static struct plugin_info kernexec_plugin_info = {
+	.version	= "201401260140",
+	.help		= "method=[bts|or]\tinstrumentation method\n"
+};
+
+static void (*kernexec_instrument_fptr)(gimple_stmt_iterator *);
+static void (*kernexec_instrument_retaddr)(rtx);
+
+/*
+ * add special KERNEXEC instrumentation: reload %r12 after it has been clobbered
+ */
+static void kernexec_reload_fptr_mask(gimple_stmt_iterator *gsi)
+{
+	gimple asm_movabs_stmt;
+
+	// build asm volatile("movabs $0x8000000000000000, %%r12\n\t" : : : );
+	asm_movabs_stmt = gimple_build_asm_vec("movabs $0x8000000000000000, %%r12\n\t", NULL,
NULL, NULL, NULL);
+	gimple_asm_set_volatile(asm_movabs_stmt, true);
+	gsi_insert_after(gsi, asm_movabs_stmt, GSI_CONTINUE_LINKING);
+	update_stmt(asm_movabs_stmt);
+}
+
+/*
+ * find all asm() stmts that clobber r12 and add a reload of r12
+ */
+static unsigned int execute_kernexec_reload(void)
+{
+	basic_block bb;
+
+	// 1. loop through BBs and GIMPLE statements
+	FOR_EACH_BB_FN(bb, cfun) {
+		gimple_stmt_iterator gsi;
+
+		for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
+			// gimple match: __asm__ ("" :  :  : "r12");
+			gimple asm_stmt;
+			size_t nclobbers;
+
+			// is it an asm ...
+			asm_stmt = gsi_stmt(gsi);
+			if (gimple_code(asm_stmt) != GIMPLE_ASM)
+				continue;
+
+			// ... clobbering r12
+			nclobbers = gimple_asm_nclobbers(asm_stmt);
+			while (nclobbers--) {
+				tree op = gimple_asm_clobber_op(asm_stmt, nclobbers);
+				if (strcmp(TREE_STRING_POINTER(TREE_VALUE(op)), "r12"))
+					continue;
+				kernexec_reload_fptr_mask(&gsi);
+//print_gimple_stmt(stderr, asm_stmt, 0, TDF_LINENO);
+				break;
+			}
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * add special KERNEXEC instrumentation: force MSB of fptr to 1, which will produce
+ * a non-canonical address from a userland ptr and will just trigger a GPF on dereference
+ */
+static void kernexec_instrument_fptr_bts(gimple_stmt_iterator *gsi)
+{
+	gimple assign_intptr, assign_new_fptr, call_stmt;
+	tree intptr, orptr, old_fptr, new_fptr, kernexec_mask;
+
+	call_stmt = gsi_stmt(*gsi);
+	old_fptr = gimple_call_fn(call_stmt);
+
+	// create temporary unsigned long variable used for bitops and cast fptr to it
+	intptr = create_tmp_var(long_unsigned_type_node, "kernexec_bts");
+	add_referenced_var(intptr);
+	intptr = make_ssa_name(intptr, NULL);
+	assign_intptr = gimple_build_assign(intptr, fold_convert(long_unsigned_type_node,
old_fptr));
+	SSA_NAME_DEF_STMT(intptr) = assign_intptr;
+	gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
+	update_stmt(assign_intptr);
+
+	// apply logical or to temporary unsigned long and bitmask
+	kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0x8000000000000000LL);
+//	kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0xffffffff80000000LL);
+	orptr = fold_build2(BIT_IOR_EXPR, long_long_unsigned_type_node, intptr, kernexec_mask);
+	intptr = make_ssa_name(SSA_NAME_VAR(intptr), NULL);
+	assign_intptr = gimple_build_assign(intptr, orptr);
+	SSA_NAME_DEF_STMT(intptr) = assign_intptr;
+	gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
+	update_stmt(assign_intptr);
+
+	// cast temporary unsigned long back to a temporary fptr variable
+	new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_fptr");
+	add_referenced_var(new_fptr);
+	new_fptr = make_ssa_name(new_fptr, NULL);
+	assign_new_fptr = gimple_build_assign(new_fptr, fold_convert(TREE_TYPE(old_fptr),
intptr));
+	SSA_NAME_DEF_STMT(new_fptr) = assign_new_fptr;
+	gsi_insert_before(gsi, assign_new_fptr, GSI_SAME_STMT);
+	update_stmt(assign_new_fptr);
+
+	// replace call stmt fn with the new fptr
+	gimple_call_set_fn(call_stmt, new_fptr);
+	update_stmt(call_stmt);
+}
+
+static void kernexec_instrument_fptr_or(gimple_stmt_iterator *gsi)
+{
+	gimple asm_or_stmt, call_stmt;
+	tree old_fptr, new_fptr, input, output;
+#if BUILDING_GCC_VERSION <= 4007
+	VEC(tree, gc) *inputs = NULL;
+	VEC(tree, gc) *outputs = NULL;
+#else
+	vec<tree, va_gc> *inputs = NULL;
+	vec<tree, va_gc> *outputs = NULL;
+#endif
+
+	call_stmt = gsi_stmt(*gsi);
+	old_fptr = gimple_call_fn(call_stmt);
+
+	// create temporary fptr variable
+	new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_or");
+	add_referenced_var(new_fptr);
+	new_fptr = make_ssa_name(new_fptr, NULL);
+
+	// build asm volatile("orq %%r12, %0\n\t" : "=r"(new_fptr) : "0"(old_fptr));
+	input = build_tree_list(NULL_TREE, build_string(1, "0"));
+	input = chainon(NULL_TREE, build_tree_list(input, old_fptr));
+	output = build_tree_list(NULL_TREE, build_string(2, "=r"));
+	output = chainon(NULL_TREE, build_tree_list(output, new_fptr));
+#if BUILDING_GCC_VERSION <= 4007
+	VEC_safe_push(tree, gc, inputs, input);
+	VEC_safe_push(tree, gc, outputs, output);
+#else
+	vec_safe_push(inputs, input);
+	vec_safe_push(outputs, output);
+#endif
+	asm_or_stmt = gimple_build_asm_vec("orq %%r12, %0\n\t", inputs, outputs, NULL, NULL);
+	SSA_NAME_DEF_STMT(new_fptr) = asm_or_stmt;
+	gimple_asm_set_volatile(asm_or_stmt, true);
+	gsi_insert_before(gsi, asm_or_stmt, GSI_SAME_STMT);
+	update_stmt(asm_or_stmt);
+
+	// replace call stmt fn with the new fptr
+	gimple_call_set_fn(call_stmt, new_fptr);
+	update_stmt(call_stmt);
+}
+
+/*
+ * find all C level function pointer dereferences and forcibly set the highest bit
of the pointer
+ */
+static unsigned int execute_kernexec_fptr(void)
+{
+	basic_block bb;
+
+	// 1. loop through BBs and GIMPLE statements
+	FOR_EACH_BB_FN(bb, cfun) {
+		gimple_stmt_iterator gsi;
+
+		for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
+			// gimple match: h_1 = get_fptr (); D.2709_3 = h_1 (x_2(D));
+			tree fn;
+			gimple call_stmt;
+
+			// is it a call ...
+			call_stmt = gsi_stmt(gsi);
+			if (!is_gimple_call(call_stmt))
+				continue;
+			fn = gimple_call_fn(call_stmt);
+			if (TREE_CODE(fn) == ADDR_EXPR)
+				continue;
+			if (TREE_CODE(fn) != SSA_NAME)
+				gcc_unreachable();
+
+			// ... through a function pointer
+			if (SSA_NAME_VAR(fn) != NULL_TREE) {
+				fn = SSA_NAME_VAR(fn);
+				if (TREE_CODE(fn) != VAR_DECL && TREE_CODE(fn) != PARM_DECL) {
+					debug_tree(fn);
+					gcc_unreachable();
+				}
+			}
+			fn = TREE_TYPE(fn);
+			if (TREE_CODE(fn) != POINTER_TYPE)
+				continue;
+			fn = TREE_TYPE(fn);
+			if (TREE_CODE(fn) != FUNCTION_TYPE)
+				continue;
+
+			kernexec_instrument_fptr(&gsi);
+
+//debug_tree(gimple_call_fn(call_stmt));
+//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
+		}
+	}
+
+	return 0;
+}
+
+// add special KERNEXEC instrumentation: btsq $63,(%rsp) just before retn
+static void kernexec_instrument_retaddr_bts(rtx insn)
+{
+	rtx btsq;
+	rtvec argvec, constraintvec, labelvec;
+	int line;
+
+	// create asm volatile("btsq $63,(%%rsp)":::)
+	argvec = rtvec_alloc(0);
+	constraintvec = rtvec_alloc(0);
+	labelvec = rtvec_alloc(0);
+	line = expand_location(RTL_LOCATION(insn)).line;
+	btsq = gen_rtx_ASM_OPERANDS(VOIDmode, "btsq $63,(%%rsp)", empty_string, 0, argvec,
constraintvec, labelvec, line);
+	MEM_VOLATILE_P(btsq) = 1;
+//	RTX_FRAME_RELATED_P(btsq) = 1; // not for ASM_OPERANDS
+	emit_insn_before(btsq, insn);
+}
+
+// add special KERNEXEC instrumentation: orq %r12,(%rsp) just before retn
+static void kernexec_instrument_retaddr_or(rtx insn)
+{
+	rtx orq;
+	rtvec argvec, constraintvec, labelvec;
+	int line;
+
+	// create asm volatile("orq %%r12,(%%rsp)":::)
+	argvec = rtvec_alloc(0);
+	constraintvec = rtvec_alloc(0);
+	labelvec = rtvec_alloc(0);
+	line = expand_location(RTL_LOCATION(insn)).line;
+	orq = gen_rtx_ASM_OPERANDS(VOIDmode, "orq %%r12,(%%rsp)", empty_string, 0, argvec,
constraintvec, labelvec, line);
+	MEM_VOLATILE_P(orq) = 1;
+//	RTX_FRAME_RELATED_P(orq) = 1; // not for ASM_OPERANDS
+	emit_insn_before(orq, insn);
+}
+
+/*
+ * find all asm level function returns and forcibly set the highest bit of the return
address
+ */
+static unsigned int execute_kernexec_retaddr(void)
+{
+	rtx insn;
+
+//	if (stack_realign_drap)
+//		inform(DECL_SOURCE_LOCATION(current_function_decl), "drap detected in %s\n", IDENTIFIER_POINTER(DECL_NAME(current_function_decl)));
+
+	// 1. find function returns
+	for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
+		// rtl match: (jump_insn 41 40 42 2 (return) fptr.c:42 634 {return_internal} (nil))
+		//            (jump_insn 12 9 11 2 (parallel [ (return) (unspec [ (0) ] UNSPEC_REP)
]) fptr.c:46 635 {return_internal_long} (nil))
+		//            (jump_insn 97 96 98 6 (simple_return) fptr.c:50 -1 (nil) -> simple_return)
+		rtx body;
+
+		// is it a retn
+		if (!JUMP_P(insn))
+			continue;
+		body = PATTERN(insn);
+		if (GET_CODE(body) == PARALLEL)
+			body = XVECEXP(body, 0, 0);
+		if (!ANY_RETURN_P(body))
+			continue;
+		kernexec_instrument_retaddr(insn);
+	}
+
+//	print_simple_rtl(stderr, get_insns());
+//	print_rtl(stderr, get_insns());
+
+	return 0;
+}
+
+static bool kernexec_cmodel_check(void)
+{
+	tree section;
+
+	if (ix86_cmodel != CM_KERNEL)
+		return false;
+
+	section = lookup_attribute("section", DECL_ATTRIBUTES(current_function_decl));
+	if (!section || !TREE_VALUE(section))
+		return true;
+
+	section = TREE_VALUE(TREE_VALUE(section));
+	if (strncmp(TREE_STRING_POINTER(section), ".vsyscall_", 10))
+		return true;
+
+	return false;
+}
+
+#if BUILDING_GCC_VERSION >= 4009
+static const struct pass_data kernexec_reload_pass_data = {
+#else
+static struct gimple_opt_pass kernexec_reload_pass = {
+	.pass = {
+#endif
+		.type			= GIMPLE_PASS,
+		.name			= "kernexec_reload",
+#if BUILDING_GCC_VERSION >= 4008
+		.optinfo_flags		= OPTGROUP_NONE,
+#endif
+#if BUILDING_GCC_VERSION >= 4009
+		.has_gate		= true,
+		.has_execute		= true,
+#else
+		.gate			= kernexec_cmodel_check,
+		.execute		= execute_kernexec_reload,
+		.sub			= NULL,
+		.next			= NULL,
+		.static_pass_number	= 0,
+#endif
+		.tv_id			= TV_NONE,
+		.properties_required	= 0,
+		.properties_provided	= 0,
+		.properties_destroyed	= 0,
+		.todo_flags_start	= 0,
+		.todo_flags_finish	= TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals
| TODO_update_ssa_no_phi
+#if BUILDING_GCC_VERSION < 4009
+	}
+#endif
+};
+
+#if BUILDING_GCC_VERSION >= 4009
+static const struct pass_data kernexec_fptr_pass_data = {
+#else
+static struct gimple_opt_pass kernexec_fptr_pass = {
+	.pass = {
+#endif
+		.type			= GIMPLE_PASS,
+		.name			= "kernexec_fptr",
+#if BUILDING_GCC_VERSION >= 4008
+		.optinfo_flags		= OPTGROUP_NONE,
+#endif
+#if BUILDING_GCC_VERSION >= 4009
+		.has_gate		= true,
+		.has_execute		= true,
+#else
+		.gate			= kernexec_cmodel_check,
+		.execute		= execute_kernexec_fptr,
+		.sub			= NULL,
+		.next			= NULL,
+		.static_pass_number	= 0,
+#endif
+		.tv_id			= TV_NONE,
+		.properties_required	= 0,
+		.properties_provided	= 0,
+		.properties_destroyed	= 0,
+		.todo_flags_start	= 0,
+		.todo_flags_finish	= TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals
| TODO_update_ssa_no_phi
+#if BUILDING_GCC_VERSION < 4009
+	}
+#endif
+};
+
+#if BUILDING_GCC_VERSION >= 4009
+static const struct pass_data kernexec_retaddr_pass_data = {
+#else
+static struct rtl_opt_pass kernexec_retaddr_pass = {
+	.pass = {
+#endif
+		.type			= RTL_PASS,
+		.name			= "kernexec_retaddr",
+#if BUILDING_GCC_VERSION >= 4008
+		.optinfo_flags		= OPTGROUP_NONE,
+#endif
+#if BUILDING_GCC_VERSION >= 4009
+		.has_gate		= true,
+		.has_execute		= true,
+#else
+		.gate			= kernexec_cmodel_check,
+		.execute		= execute_kernexec_retaddr,
+		.sub			= NULL,
+		.next			= NULL,
+		.static_pass_number	= 0,
+#endif
+		.tv_id			= TV_NONE,
+		.properties_required	= 0,
+		.properties_provided	= 0,
+		.properties_destroyed	= 0,
+		.todo_flags_start	= 0,
+		.todo_flags_finish	= TODO_dump_func | TODO_ggc_collect
+#if BUILDING_GCC_VERSION < 4009
+	}
+#endif
+};
+
+#if BUILDING_GCC_VERSION >= 4009
+namespace {
+class kernexec_reload_pass : public gimple_opt_pass {
+public:
+	kernexec_reload_pass() : gimple_opt_pass(kernexec_reload_pass_data, g) {}
+	bool gate() { return kernexec_cmodel_check(); }
+	unsigned int execute() { return execute_kernexec_reload(); }
+};
+
+class kernexec_fptr_pass : public gimple_opt_pass {
+public:
+	kernexec_fptr_pass() : gimple_opt_pass(kernexec_fptr_pass_data, g) {}
+	bool gate() { return kernexec_cmodel_check(); }
+	unsigned int execute() { return execute_kernexec_fptr(); }
+};
+
+class kernexec_retaddr_pass : public rtl_opt_pass {
+public:
+	kernexec_retaddr_pass() : rtl_opt_pass(kernexec_retaddr_pass_data, g) {}
+	bool gate() { return kernexec_cmodel_check(); }
+	unsigned int execute() { return execute_kernexec_retaddr(); }
+};
+}
+#endif
+
+static struct opt_pass *make_kernexec_reload_pass(void)
+{
+#if BUILDING_GCC_VERSION >= 4009
+	return new kernexec_reload_pass();
+#else
+	return &kernexec_reload_pass.pass;
+#endif
+}
+
+static struct opt_pass *make_kernexec_fptr_pass(void)
+{
+#if BUILDING_GCC_VERSION >= 4009
+	return new kernexec_fptr_pass();
+#else
+	return &kernexec_fptr_pass.pass;
+#endif
+}
+
+static struct opt_pass *make_kernexec_retaddr_pass(void)
+{
+#if BUILDING_GCC_VERSION >= 4009
+	return new kernexec_retaddr_pass();
+#else
+	return &kernexec_retaddr_pass.pass;
+#endif
+}
+
+int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
+{
+	const char * const plugin_name = plugin_info->base_name;
+	const int argc = plugin_info->argc;
+	const struct plugin_argument * const argv = plugin_info->argv;
+	int i;
+	struct register_pass_info kernexec_reload_pass_info;
+	struct register_pass_info kernexec_fptr_pass_info;
+	struct register_pass_info kernexec_retaddr_pass_info;
+
+	kernexec_reload_pass_info.pass				= make_kernexec_reload_pass();
+	kernexec_reload_pass_info.reference_pass_name		= "ssa";
+	kernexec_reload_pass_info.ref_pass_instance_number	= 1;
+	kernexec_reload_pass_info.pos_op 			= PASS_POS_INSERT_AFTER;
+
+	kernexec_fptr_pass_info.pass				= make_kernexec_fptr_pass();
+	kernexec_fptr_pass_info.reference_pass_name		= "ssa";
+	kernexec_fptr_pass_info.ref_pass_instance_number	= 1;
+	kernexec_fptr_pass_info.pos_op 				= PASS_POS_INSERT_AFTER;
+
+	kernexec_retaddr_pass_info.pass				= make_kernexec_retaddr_pass();
+	kernexec_retaddr_pass_info.reference_pass_name		= "pro_and_epilogue";
+	kernexec_retaddr_pass_info.ref_pass_instance_number	= 1;
+	kernexec_retaddr_pass_info.pos_op 			= PASS_POS_INSERT_AFTER;
+
+	if (!plugin_default_version_check(version, &gcc_version)) {
+		error(G_("incompatible gcc/plugin versions"));
+		return 1;
+	}
+
+	register_callback(plugin_name, PLUGIN_INFO, NULL, &kernexec_plugin_info);
+
+	if (TARGET_64BIT == 0)
+		return 0;
+
+	for (i = 0; i < argc; ++i) {
+		if (!strcmp(argv[i].key, "method")) {
+			if (!argv[i].value) {
+				error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
+				continue;
+			}
+			if (!strcmp(argv[i].value, "bts")) {
+				kernexec_instrument_fptr = kernexec_instrument_fptr_bts;
+				kernexec_instrument_retaddr = kernexec_instrument_retaddr_bts;
+			} else if (!strcmp(argv[i].value, "or")) {
+				kernexec_instrument_fptr = kernexec_instrument_fptr_or;
+				kernexec_instrument_retaddr = kernexec_instrument_retaddr_or;
+				fix_register("r12", 1, 1);
+			} else
+				error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key,
argv[i].value);
+			continue;
+		}
+		error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
+	}
+	if (!kernexec_instrument_fptr || !kernexec_instrument_retaddr)
+		error(G_("no instrumentation method was selected via '-fplugin-arg-%s-method'"),
plugin_name);
+
+	if (kernexec_instrument_fptr == kernexec_instrument_fptr_or)
+		register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_reload_pass_info);
+	register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_fptr_pass_info);
+	register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_retaddr_pass_info);
+
+	return 0;
+}
diff -ruNp linux-3.13.11/tools/gcc/latent_entropy_plugin.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/tools/gcc/latent_entropy_plugin.c
--- linux-3.13.11/tools/gcc/latent_entropy_plugin.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/tools/gcc/latent_entropy_plugin.c	2014-07-09
12:00:16.000000000 +0200
@@ -0,0 +1,457 @@
+/*
+ * Copyright 2012-2014 by the PaX Team <pageexec@freemail.hu>
+ * Licensed under the GPL v2
+ *
+ * Note: the choice of the license means that the compilation process is
+ *       NOT 'eligible' as defined by gcc's library exception to the GPL v3,
+ *       but for the kernel it doesn't matter since it doesn't link against
+ *       any of the gcc libraries
+ *
+ * gcc plugin to help generate a little bit of entropy from program state,
+ * used throughout the uptime of the kernel
+ *
+ * TODO:
+ * - add ipa pass to identify not explicitly marked candidate functions
+ * - mix in more program state (function arguments/return values, loop variables, etc)
+ * - more instrumentation control via attribute parameters
+ *
+ * BUGS:
+ * - LTO needs -flto-partition=none for now
+ */
+
+#include "gcc-common.h"
+
+int plugin_is_GPL_compatible;
+
+static tree latent_entropy_decl;
+
+static struct plugin_info latent_entropy_plugin_info = {
+	.version	= "201403280150",
+	.help		= NULL
+};
+
+static unsigned HOST_WIDE_INT seed;
+static unsigned HOST_WIDE_INT get_random_const(void)
+{
+	unsigned int i;
+	unsigned HOST_WIDE_INT ret = 0;
+
+	for (i = 0; i < 8 * sizeof ret; i++) {
+		ret = (ret << 1) | (seed & 1);
+		seed >>= 1;
+		if (ret & 1)
+			seed ^= 0xD800000000000000ULL;
+	}
+
+	return ret;
+}
+
+static tree handle_latent_entropy_attribute(tree *node, tree name, tree args, int flags,
bool *no_add_attrs)
+{
+	tree type;
+	unsigned long long mask;
+#if BUILDING_GCC_VERSION <= 4007
+	VEC(constructor_elt, gc) *vals;
+#else
+	vec<constructor_elt, va_gc> *vals;
+#endif
+
+	switch (TREE_CODE(*node)) {
+	default:
+		*no_add_attrs = true;
+		error("%qE attribute only applies to functions and variables", name);
+		break;
+
+	case VAR_DECL:
+		if (DECL_INITIAL(*node)) {
+			*no_add_attrs = true;
+			error("variable %qD with %qE attribute must not be initialized", *node, name);
+			break;
+		}
+
+		if (!TREE_STATIC(*node)) {
+			*no_add_attrs = true;
+			error("variable %qD with %qE attribute must not be local", *node, name);
+			break;
+		}
+
+		type = TREE_TYPE(*node);
+		switch (TREE_CODE(type)) {
+		default:
+			*no_add_attrs = true;
+			error("variable %qD with %qE attribute must be an integer or a fixed length integer
array type or a fixed sized structure with integer fields", *node, name);
+			break;
+
+		case RECORD_TYPE: {
+			tree field;
+			unsigned int nelt = 0;
+
+			for (field = TYPE_FIELDS(type); field; nelt++, field = TREE_CHAIN(field)) {
+				tree fieldtype;
+
+				fieldtype = TREE_TYPE(field);
+				if (TREE_CODE(fieldtype) != INTEGER_TYPE) {
+					*no_add_attrs = true;
+					error("structure variable %qD with %qE attribute has a non-integer field %qE",
*node, name, field);
+					break;
+				}
+			}
+
+			if (field)
+				break;
+
+#if BUILDING_GCC_VERSION <= 4007
+			vals = VEC_alloc(constructor_elt, gc, nelt);
+#else
+			vec_alloc(vals, nelt);
+#endif
+
+			for (field = TYPE_FIELDS(type); field; field = TREE_CHAIN(field)) {
+				tree fieldtype;
+
+				fieldtype = TREE_TYPE(field);
+				mask = 1ULL << (TREE_INT_CST_LOW(TYPE_SIZE(fieldtype)) - 1);
+				mask = 2 * (mask - 1) + 1;
+
+				if (TYPE_UNSIGNED(fieldtype))
+					CONSTRUCTOR_APPEND_ELT(vals, field, build_int_cstu(fieldtype, mask & get_random_const()));
+				else
+					CONSTRUCTOR_APPEND_ELT(vals, field, build_int_cst(fieldtype, mask & get_random_const()));
+			}
+
+			DECL_INITIAL(*node) = build_constructor(type, vals);
+//debug_tree(DECL_INITIAL(*node));
+			break;
+		}
+
+		case INTEGER_TYPE:
+			mask = 1ULL << (TREE_INT_CST_LOW(TYPE_SIZE(type)) - 1);
+			mask = 2 * (mask - 1) + 1;
+
+			if (TYPE_UNSIGNED(type))
+				DECL_INITIAL(*node) = build_int_cstu(type, mask & get_random_const());
+			else
+				DECL_INITIAL(*node) = build_int_cst(type, mask & get_random_const());
+			break;
+
+		case ARRAY_TYPE: {
+			tree elt_type, array_size, elt_size;
+			unsigned int i, nelt;
+
+			elt_type = TREE_TYPE(type);
+			elt_size = TYPE_SIZE_UNIT(TREE_TYPE(type));
+			array_size = TYPE_SIZE_UNIT(type);
+
+			if (TREE_CODE(elt_type) != INTEGER_TYPE || !array_size || TREE_CODE(array_size)
!= INTEGER_CST) {
+				*no_add_attrs = true;
+				error("array variable %qD with %qE attribute must be a fixed length integer array
type", *node, name);
+				break;
+			}
+
+			nelt = TREE_INT_CST_LOW(array_size) / TREE_INT_CST_LOW(elt_size);
+#if BUILDING_GCC_VERSION <= 4007
+			vals = VEC_alloc(constructor_elt, gc, nelt);
+#else
+			vec_alloc(vals, nelt);
+#endif
+
+			mask = 1ULL << (TREE_INT_CST_LOW(TYPE_SIZE(elt_type)) - 1);
+			mask = 2 * (mask - 1) + 1;
+
+			for (i = 0; i < nelt; i++)
+				if (TYPE_UNSIGNED(elt_type))
+					CONSTRUCTOR_APPEND_ELT(vals, size_int(i), build_int_cstu(elt_type, mask & get_random_const()));
+				else
+					CONSTRUCTOR_APPEND_ELT(vals, size_int(i), build_int_cst(elt_type, mask & get_random_const()));
+
+			DECL_INITIAL(*node) = build_constructor(type, vals);
+//debug_tree(DECL_INITIAL(*node));
+			break;
+		}
+		}
+		break;
+
+	case FUNCTION_DECL:
+		break;
+	}
+
+	return NULL_TREE;
+}
+
+static struct attribute_spec latent_entropy_attr = {
+	.name				= "latent_entropy",
+	.min_length			= 0,
+	.max_length			= 0,
+	.decl_required			= true,
+	.type_required			= false,
+	.function_type_required		= false,
+	.handler			= handle_latent_entropy_attribute,
+#if BUILDING_GCC_VERSION >= 4007
+	.affects_type_identity		= false
+#endif
+};
+
+static void register_attributes(void *event_data, void *data)
+{
+	register_attribute(&latent_entropy_attr);
+}
+
+static bool gate_latent_entropy(void)
+{
+	// don't bother with noreturn functions for now
+	if (TREE_THIS_VOLATILE(current_function_decl))
+		return false;
+
+	return lookup_attribute("latent_entropy", DECL_ATTRIBUTES(current_function_decl))
!= NULL_TREE;
+}
+
+static enum tree_code get_op(tree *rhs)
+{
+	static enum tree_code op;
+	unsigned HOST_WIDE_INT random_const;
+
+	random_const = get_random_const();
+
+	switch (op) {
+	case BIT_XOR_EXPR:
+		op = PLUS_EXPR;
+		break;
+
+	case PLUS_EXPR:
+		if (rhs) {
+			op = LROTATE_EXPR;
+			random_const &= HOST_BITS_PER_WIDE_INT - 1;
+			break;
+		}
+
+	case LROTATE_EXPR:
+	default:
+		op = BIT_XOR_EXPR;
+		break;
+	}
+	if (rhs)
+		*rhs = build_int_cstu(unsigned_intDI_type_node, random_const);
+	return op;
+}
+
+static void perturb_local_entropy(basic_block bb, tree local_entropy)
+{
+	gimple_stmt_iterator gsi;
+	gimple assign;
+	tree addxorrol, rhs;
+	enum tree_code op;
+
+	op = get_op(&rhs);
+	addxorrol = fold_build2_loc(UNKNOWN_LOCATION, op, unsigned_intDI_type_node, local_entropy,
rhs);
+	assign = gimple_build_assign(local_entropy, addxorrol);
+	gsi = gsi_after_labels(bb);
+	gsi_insert_before(&gsi, assign, GSI_NEW_STMT);
+	update_stmt(assign);
+//debug_bb(bb);
+}
+
+static void perturb_latent_entropy(basic_block bb, tree rhs)
+{
+	gimple_stmt_iterator gsi;
+	gimple assign;
+	tree addxorrol, temp;
+
+	// 1. create temporary copy of latent_entropy
+	temp = create_tmp_var(unsigned_intDI_type_node, "temp_latent_entropy");
+	add_referenced_var(temp);
+
+	// 2. read...
+	temp = make_ssa_name(temp, NULL);
+	assign = gimple_build_assign(temp, latent_entropy_decl);
+	SSA_NAME_DEF_STMT(temp) = assign;
+	add_referenced_var(latent_entropy_decl);
+	gsi = gsi_after_labels(bb);
+	gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
+	update_stmt(assign);
+
+	// 3. ...modify...
+	addxorrol = fold_build2_loc(UNKNOWN_LOCATION, get_op(NULL), unsigned_intDI_type_node,
temp, rhs);
+	temp = make_ssa_name(SSA_NAME_VAR(temp), NULL);
+	assign = gimple_build_assign(temp, addxorrol);
+	SSA_NAME_DEF_STMT(temp) = assign;
+	gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
+	update_stmt(assign);
+
+	// 4. ...write latent_entropy
+	assign = gimple_build_assign(latent_entropy_decl, temp);
+	gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
+	update_stmt(assign);
+}
+
+static unsigned int execute_latent_entropy(void)
+{
+	basic_block bb;
+	gimple assign;
+	gimple_stmt_iterator gsi;
+	tree local_entropy;
+
+	if (!latent_entropy_decl) {
+		struct varpool_node *node;
+
+		FOR_EACH_VARIABLE(node) {
+			tree var = NODE_DECL(node);
+
+			if (strcmp(IDENTIFIER_POINTER(DECL_NAME(var)), "latent_entropy"))
+				continue;
+			latent_entropy_decl = var;
+//			debug_tree(var);
+			break;
+		}
+		if (!latent_entropy_decl) {
+//			debug_tree(current_function_decl);
+			return 0;
+		}
+	}
+
+//fprintf(stderr, "latent_entropy: %s\n", IDENTIFIER_POINTER(DECL_NAME(current_function_decl)));
+
+	// 1. create local entropy variable
+	local_entropy = create_tmp_var(unsigned_intDI_type_node, "local_entropy");
+	add_referenced_var(local_entropy);
+	mark_sym_for_renaming(local_entropy);
+
+	// 2. initialize local entropy variable
+	bb = split_block_after_labels(ENTRY_BLOCK_PTR_FOR_FN(cfun))->dest;
+	if (dom_info_available_p(CDI_DOMINATORS))
+		set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR_FOR_FN(cfun));
+	gsi = gsi_start_bb(bb);
+
+	assign = gimple_build_assign(local_entropy, build_int_cstu(unsigned_intDI_type_node,
get_random_const()));
+//	gimple_set_location(assign, loc);
+	gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
+	update_stmt(assign);
+//debug_bb(bb);
+	gcc_assert(single_succ_p(bb));
+	bb = single_succ(bb);
+
+	// 3. instrument each BB with an operation on the local entropy variable
+	while (bb != EXIT_BLOCK_PTR_FOR_FN(cfun)) {
+		perturb_local_entropy(bb, local_entropy);
+//debug_bb(bb);
+		bb = bb->next_bb;
+	};
+
+	// 4. mix local entropy into the global entropy variable
+	gcc_assert(single_pred_p(EXIT_BLOCK_PTR_FOR_FN(cfun)));
+	perturb_latent_entropy(single_pred(EXIT_BLOCK_PTR_FOR_FN(cfun)), local_entropy);
+//debug_bb(single_pred(EXIT_BLOCK_PTR_FOR_FN(cfun)));
+	return 0;
+}
+
+static void latent_entropy_start_unit(void *gcc_data, void *user_data)
+{
+	tree latent_entropy_type;
+
+	seed = get_random_seed(false);
+
+	if (in_lto_p)
+		return;
+
+	// extern volatile u64 latent_entropy
+	gcc_assert(TYPE_PRECISION(long_long_unsigned_type_node) == 64);
+	latent_entropy_type = build_qualified_type(long_long_unsigned_type_node, TYPE_QUALS(long_long_unsigned_type_node)
| TYPE_QUAL_VOLATILE);
+	latent_entropy_decl = build_decl(UNKNOWN_LOCATION, VAR_DECL, get_identifier("latent_entropy"),
latent_entropy_type);
+
+	TREE_STATIC(latent_entropy_decl) = 1;
+	TREE_PUBLIC(latent_entropy_decl) = 1;
+	TREE_USED(latent_entropy_decl) = 1;
+	TREE_THIS_VOLATILE(latent_entropy_decl) = 1;
+	DECL_EXTERNAL(latent_entropy_decl) = 1;
+	DECL_ARTIFICIAL(latent_entropy_decl) = 1;
+	lang_hooks.decls.pushdecl(latent_entropy_decl);
+//	DECL_ASSEMBLER_NAME(latent_entropy_decl);
+//	varpool_finalize_decl(latent_entropy_decl);
+//	varpool_mark_needed_node(latent_entropy_decl);
+}
+
+#if BUILDING_GCC_VERSION >= 4009
+static const struct pass_data latent_entropy_pass_data = {
+#else
+static struct gimple_opt_pass latent_entropy_pass = {
+	.pass = {
+#endif
+		.type			= GIMPLE_PASS,
+		.name			= "latent_entropy",
+#if BUILDING_GCC_VERSION >= 4008
+		.optinfo_flags		= OPTGROUP_NONE,
+#endif
+#if BUILDING_GCC_VERSION >= 4009
+		.has_gate		= true,
+		.has_execute		= true,
+#else
+		.gate			= gate_latent_entropy,
+		.execute		= execute_latent_entropy,
+		.sub			= NULL,
+		.next			= NULL,
+		.static_pass_number	= 0,
+#endif
+		.tv_id			= TV_NONE,
+		.properties_required	= PROP_gimple_leh | PROP_cfg,
+		.properties_provided	= 0,
+		.properties_destroyed	= 0,
+		.todo_flags_start	= 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
+		.todo_flags_finish	= TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
+#if BUILDING_GCC_VERSION < 4009
+	}
+#endif
+};
+
+#if BUILDING_GCC_VERSION >= 4009
+namespace {
+class latent_entropy_pass : public gimple_opt_pass {
+public:
+	latent_entropy_pass() : gimple_opt_pass(latent_entropy_pass_data, g) {}
+	bool gate() { return gate_latent_entropy(); }
+	unsigned int execute() { return execute_latent_entropy(); }
+};
+}
+#endif
+
+static struct opt_pass *make_latent_entropy_pass(void)
+{
+#if BUILDING_GCC_VERSION >= 4009
+	return new latent_entropy_pass();
+#else
+	return &latent_entropy_pass.pass;
+#endif
+}
+
+int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
+{
+	const char * const plugin_name = plugin_info->base_name;
+	struct register_pass_info latent_entropy_pass_info;
+
+	latent_entropy_pass_info.pass				= make_latent_entropy_pass();
+	latent_entropy_pass_info.reference_pass_name		= "optimized";
+	latent_entropy_pass_info.ref_pass_instance_number	= 1;
+	latent_entropy_pass_info.pos_op 			= PASS_POS_INSERT_BEFORE;
+	static const struct ggc_root_tab gt_ggc_r_gt_latent_entropy[] = {
+		{
+			.base = &latent_entropy_decl,
+			.nelt = 1,
+			.stride = sizeof(latent_entropy_decl),
+			.cb = &gt_ggc_mx_tree_node,
+			.pchw = &gt_pch_nx_tree_node
+		},
+		LAST_GGC_ROOT_TAB
+	};
+
+	if (!plugin_default_version_check(version, &gcc_version)) {
+		error(G_("incompatible gcc/plugin versions"));
+		return 1;
+	}
+
+	register_callback(plugin_name, PLUGIN_INFO, NULL, &latent_entropy_plugin_info);
+	register_callback(plugin_name, PLUGIN_START_UNIT, &latent_entropy_start_unit, NULL);
+	if (!in_lto_p)
+		register_callback(plugin_name, PLUGIN_REGISTER_GGC_ROOTS, NULL, (void *)&gt_ggc_r_gt_latent_entropy);
+	register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &latent_entropy_pass_info);
+	register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
+
+	return 0;
+}
diff -ruNp linux-3.13.11/tools/gcc/randomize_layout_plugin.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/tools/gcc/randomize_layout_plugin.c
--- linux-3.13.11/tools/gcc/randomize_layout_plugin.c	1970-01-01 01:00:00.000000000
+0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/tools/gcc/randomize_layout_plugin.c	2014-07-09
12:00:16.000000000 +0200
@@ -0,0 +1,910 @@
+/*
+ * Copyright 2014 by Open Source Security, Inc., Brad Spengler <spender@grsecurity.net>
+ *                   and PaX Team <pageexec@freemail.hu>
+ * Licensed under the GPL v2
+ *
+ * Usage:
+ * $ # for 4.5/4.6/C based 4.7
+ * $ gcc -I`gcc -print-file-name=plugin`/include -I`gcc -print-file-name=plugin`/include/c-family
-fPIC -shared -O2 -o randomize_layout_plugin.so randomize_layout_plugin.c
+ * $ # for C++ based 4.7/4.8+
+ * $ g++ -I`g++ -print-file-name=plugin`/include -I`g++ -print-file-name=plugin`/include/c-family
-fPIC -shared -O2 -o randomize_layout_plugin.so randomize_layout_plugin.c
+ * $ gcc -fplugin=./randomize_layout_plugin.so test.c -O2
+ */
+
+#include "gcc-common.h"
+#include "randomize_layout_seed.h"
+
+#if BUILDING_GCC_MAJOR < 4 || BUILDING_GCC_MINOR < 6 || (BUILDING_GCC_MINOR == 6 &&
BUILDING_GCC_PATCHLEVEL < 4)
+#error "The RANDSTRUCT plugin requires GCC 4.6.4 or newer."
+#endif
+
+#define ORIG_TYPE_NAME(node) \
+	(TYPE_NAME(TYPE_MAIN_VARIANT(node)) != NULL_TREE ? ((const unsigned char *)IDENTIFIER_POINTER(TYPE_NAME(TYPE_MAIN_VARIANT(node))))
: (const unsigned char *)"anonymous")
+
+int plugin_is_GPL_compatible;
+
+static int performance_mode;
+
+static struct plugin_info randomize_layout_plugin_info = {
+	.version	= "201402201816",
+	.help		= "disable\t\t\tdo not activate plugin\n"
+			  "performance-mode\tenable cacheline-aware layout randomization\n"
+};
+
+/* from old Linux dcache.h */
+static inline unsigned long
+partial_name_hash(unsigned long c, unsigned long prevhash)
+{
+	return (prevhash + (c << 4) + (c >> 4)) * 11;
+}
+static inline unsigned int
+name_hash(const unsigned char *name)
+{
+	unsigned long hash = 0;
+	unsigned int len = strlen((const char *)name);
+	while (len--)
+		hash = partial_name_hash(*name++, hash);
+	return (unsigned int)hash;
+}
+
+static tree handle_randomize_layout_attr(tree *node, tree name, tree args, int flags,
bool *no_add_attrs)
+{
+	tree type;
+
+	*no_add_attrs = true;
+	if (TREE_CODE(*node) == FUNCTION_DECL) {
+		error("%qE attribute does not apply to functions (%qF)", name, *node);
+		return NULL_TREE;
+	}
+
+	if (TREE_CODE(*node) == PARM_DECL) {
+		error("%qE attribute does not apply to function parameters (%qD)", name, *node);
+		return NULL_TREE;
+	}
+
+	if (TREE_CODE(*node) == VAR_DECL) {
+		error("%qE attribute does not apply to variables (%qD)", name, *node);
+		return NULL_TREE;
+	}
+
+	if (TYPE_P(*node)) {
+		type = *node;
+	} else {
+		gcc_assert(TREE_CODE(*node) == TYPE_DECL);
+		type = TREE_TYPE(*node);
+	}
+
+	if (TREE_CODE(type) != RECORD_TYPE) {
+		error("%qE attribute used on %qT applies to struct types only", name, type);
+		return NULL_TREE;
+	}
+
+	if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) {
+		error("%qE attribute is already applied to the type %qT", name, type);
+		return NULL_TREE;
+	}
+
+	*no_add_attrs = false;
+
+	return NULL_TREE;
+}
+
+/* set on complete types that we don't need to inspect further at all */
+static tree handle_randomize_considered_attr(tree *node, tree name, tree args, int
flags, bool *no_add_attrs)
+{
+	*no_add_attrs = false;
+	return NULL_TREE;
+}
+
+/*
+ * set on types that we've performed a shuffle on, to prevent re-shuffling
+ * this does not preclude us from inspecting its fields for potential shuffles
+ */
+static tree handle_randomize_performed_attr(tree *node, tree name, tree args, int flags,
bool *no_add_attrs)
+{
+	*no_add_attrs = false;
+	return NULL_TREE;
+}
+
+/*
+ * 64bit variant of Bob Jenkins' public domain PRNG
+ * 256 bits of internal state
+ */
+
+typedef unsigned long long u64;
+
+typedef struct ranctx { u64 a; u64 b; u64 c; u64 d; } ranctx;
+
+#define rot(x,k) (((x)<<(k))|((x)>>(64-(k))))
+static u64 ranval(ranctx *x) {
+	u64 e = x->a - rot(x->b, 7);
+	x->a = x->b ^ rot(x->c, 13);
+	x->b = x->c + rot(x->d, 37);
+	x->c = x->d + e;
+	x->d = e + x->a;
+	return x->d;
+}
+
+static void raninit(ranctx *x, u64 *seed) {
+	int i;
+
+	x->a = seed[0];
+	x->b = seed[1];
+	x->c = seed[2];
+	x->d = seed[3];
+
+	for (i=0; i < 30; ++i)
+		(void)ranval(x);
+}
+
+static u64 shuffle_seed[4];
+
+struct partition_group {
+	tree tree_start;
+	unsigned long start;
+	unsigned long length;
+};
+
+static void partition_struct(tree *fields, unsigned long length, struct partition_group
*size_groups, unsigned long *num_groups)
+{
+	unsigned long i;
+	unsigned long accum_size = 0;
+	unsigned long accum_length = 0;
+	unsigned long group_idx = 0;
+
+	gcc_assert(length < INT_MAX);
+
+	memset(size_groups, 0, sizeof(struct partition_group) * length);
+
+	for (i = 0; i < length; i++) {
+		if (size_groups[group_idx].tree_start == NULL_TREE) {
+			size_groups[group_idx].tree_start = fields[i];
+			size_groups[group_idx].start = i;
+			accum_length = 0;
+			accum_size = 0;
+		}
+		accum_size += (unsigned long)int_size_in_bytes(TREE_TYPE(fields[i]));
+		accum_length++;
+		if (accum_size >= 64) {
+			size_groups[group_idx].length = accum_length;
+			accum_length = 0;
+			group_idx++;
+		}
+	}
+
+	if (size_groups[group_idx].tree_start != NULL_TREE &&
+	    !size_groups[group_idx].length) {
+		size_groups[group_idx].length = accum_length;
+		group_idx++;
+	}
+
+	*num_groups = group_idx;
+}
+
+static void performance_shuffle(tree *newtree, unsigned long length, ranctx *prng_state)
+{
+	unsigned long i, x;
+	struct partition_group size_group[length];
+	unsigned long num_groups = 0;
+	unsigned long randnum;
+
+	partition_struct(newtree, length, (struct partition_group *)&size_group, &num_groups);
+	for (i = num_groups - 1; i > 0; i--) {
+		struct partition_group tmp;
+		randnum = ranval(prng_state) % (i + 1);
+		tmp = size_group[i];
+		size_group[i] = size_group[randnum];
+		size_group[randnum] = tmp;
+	}
+
+	for (x = 0; x < num_groups; x++) {
+		for (i = size_group[x].start + size_group[x].length - 1; i > size_group[x].start;
i--) {
+			tree tmp;
+			if (DECL_BIT_FIELD_TYPE(newtree[i]))
+				continue;
+			randnum = ranval(prng_state) % (i + 1);
+			// we could handle this case differently if desired
+			if (DECL_BIT_FIELD_TYPE(newtree[randnum]))
+				continue;
+			tmp = newtree[i];
+			newtree[i] = newtree[randnum];
+			newtree[randnum] = tmp;
+		}
+	}
+}
+
+static void full_shuffle(tree *newtree, unsigned long length, ranctx *prng_state)
+{
+	unsigned long i, randnum;
+
+	for (i = length - 1; i > 0; i--) {
+		tree tmp;
+		randnum = ranval(prng_state) % (i + 1);
+		tmp = newtree[i];
+		newtree[i] = newtree[randnum];
+		newtree[randnum] = tmp;
+	}
+}
+
+/* modern in-place Fisher-Yates shuffle */
+static void shuffle(const_tree type, tree *newtree, unsigned long length)
+{
+	unsigned long i;
+	u64 seed[4];
+	ranctx prng_state;
+	const unsigned char *structname;
+
+	if (length == 0)
+		return;
+
+	gcc_assert(TREE_CODE(type) == RECORD_TYPE);
+
+	structname = ORIG_TYPE_NAME(type);
+
+#ifdef __DEBUG_PLUGIN
+	fprintf(stderr, "Shuffling struct %s %p\n", (const char *)structname, type);
+#ifdef __DEBUG_VERBOSE
+	debug_tree((tree)type);
+#endif
+#endif
+
+	for (i = 0; i < 4; i++) {
+		seed[i] = shuffle_seed[i];
+		seed[i] ^= name_hash(structname);
+	}
+
+	raninit(&prng_state, (u64 *)&seed);
+
+	if (performance_mode)
+		performance_shuffle(newtree, length, &prng_state);
+	else
+		full_shuffle(newtree, length, &prng_state);
+}
+
+static bool is_flexible_array(const_tree field)
+{
+	const_tree fieldtype;
+	const_tree typesize;
+	const_tree elemtype;
+	const_tree elemsize;
+
+	fieldtype = TREE_TYPE(field);
+	typesize = TYPE_SIZE(fieldtype);
+
+	if (TREE_CODE(fieldtype) != ARRAY_TYPE)
+		return false;
+
+	elemtype = TREE_TYPE(fieldtype);
+	elemsize = TYPE_SIZE(elemtype);
+
+	/* size of type is represented in bits */
+
+	if (typesize == NULL_TREE && TYPE_DOMAIN(fieldtype) != NULL_TREE &&
+	    TYPE_MAX_VALUE(TYPE_DOMAIN(fieldtype)) == NULL_TREE)
+		return true;
+
+	if (typesize != NULL_TREE && 
+	    (TREE_CONSTANT(typesize) && (!TREE_INT_CST_LOW(typesize) ||
+	     TREE_INT_CST_LOW(typesize) == TREE_INT_CST_LOW(elemsize))))
+		return true;
+
+	return false;
+}
+
+static int relayout_struct(tree type)
+{
+	unsigned long num_fields = (unsigned long)list_length(TYPE_FIELDS(type));
+	unsigned long shuffle_length = num_fields;
+	tree field;
+	tree newtree[num_fields];
+	unsigned long i;
+	tree list;
+	tree variant;
+	expanded_location xloc;
+
+	if (TYPE_FIELDS(type) == NULL_TREE)
+		return 0;
+
+	if (num_fields < 2)
+		return 0;
+
+	gcc_assert(TREE_CODE(type) == RECORD_TYPE);
+
+	gcc_assert(num_fields < INT_MAX);
+
+	if (lookup_attribute("randomize_performed", TYPE_ATTRIBUTES(type)) ||
+	    lookup_attribute("no_randomize_layout", TYPE_ATTRIBUTES(TYPE_MAIN_VARIANT(type))))
+		return 0;
+
+	/* throw out any structs in uapi */
+	xloc = expand_location(DECL_SOURCE_LOCATION(TYPE_FIELDS(type)));
+
+	if (strstr(xloc.file, "/uapi/"))
+		error(G_("attempted to randomize userland API struct %s"), ORIG_TYPE_NAME(type));
+
+	for (field = TYPE_FIELDS(type), i = 0; field; field = TREE_CHAIN(field), i++) {
+		gcc_assert(TREE_CODE(field) == FIELD_DECL);
+		newtree[i] = field;
+	}
+
+	/*
+	 * enforce that we don't randomize the layout of the last
+	 * element of a struct if it's a 0 or 1-length array
+	 * or a proper flexible array
+	 */
+	if (is_flexible_array(newtree[num_fields - 1]))
+		shuffle_length--;
+
+	shuffle(type, (tree *)newtree, shuffle_length);
+
+	/*
+	 * set up a bogus anonymous struct field designed to error out on unnamed struct initializers
+	 * as gcc provides no other way to detect such code
+	 */
+	list = make_node(FIELD_DECL);
+	TREE_CHAIN(list) = newtree[0];
+	TREE_TYPE(list) = void_type_node;
+	DECL_SIZE(list) = bitsize_zero_node;
+	DECL_NONADDRESSABLE_P(list) = 1;
+	DECL_FIELD_BIT_OFFSET(list) = bitsize_zero_node;
+	DECL_SIZE_UNIT(list) = size_zero_node;
+	DECL_FIELD_OFFSET(list) = size_zero_node;
+	// to satisfy the constify plugin
+	TREE_READONLY(list) = 1;
+
+	for (i = 0; i < num_fields - 1; i++)
+		TREE_CHAIN(newtree[i]) = newtree[i+1];
+	TREE_CHAIN(newtree[num_fields - 1]) = NULL_TREE;
+
+	for (variant = TYPE_MAIN_VARIANT(type); variant; variant = TYPE_NEXT_VARIANT(variant))
{
+		TYPE_FIELDS(variant) = list;
+		TYPE_ATTRIBUTES(variant) = copy_list(TYPE_ATTRIBUTES(variant));
+		TYPE_ATTRIBUTES(variant) = tree_cons(get_identifier("randomize_performed"), NULL_TREE,
TYPE_ATTRIBUTES(variant));
+		// force a re-layout
+		TYPE_SIZE(variant) = NULL_TREE;
+		layout_type(variant);
+	}
+
+	return 1;
+}
+
+/* from constify plugin */
+static const_tree get_field_type(const_tree field)
+{
+	return strip_array_types(TREE_TYPE(field));
+}
+
+/* from constify plugin */
+static bool is_fptr(const_tree fieldtype)
+{
+	if (TREE_CODE(fieldtype) != POINTER_TYPE)
+		return false;
+
+	return TREE_CODE(TREE_TYPE(fieldtype)) == FUNCTION_TYPE;
+}
+
+/* derived from constify plugin */
+static int is_pure_ops_struct(const_tree node)
+{
+	const_tree field;
+
+	gcc_assert(TREE_CODE(node) == RECORD_TYPE || TREE_CODE(node) == UNION_TYPE);
+
+	for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
+		const_tree fieldtype = get_field_type(field);
+		enum tree_code code = TREE_CODE(fieldtype);
+
+		if (node == fieldtype)
+			continue;
+
+		if (!is_fptr(fieldtype))
+			return 0;
+
+		if (code != RECORD_TYPE && code != UNION_TYPE)
+			continue;
+
+		if (!is_pure_ops_struct(fieldtype))
+			return 0;
+	}
+
+	return 1;
+}
+
+static void randomize_type(tree type)
+{
+	tree variant;
+
+	gcc_assert(TREE_CODE(type) == RECORD_TYPE);
+
+	if (lookup_attribute("randomize_considered", TYPE_ATTRIBUTES(type)))
+		return;
+
+	if (lookup_attribute("randomize_layout", TYPE_ATTRIBUTES(TYPE_MAIN_VARIANT(type)))
|| is_pure_ops_struct(type))
+		relayout_struct(type);
+
+	for (variant = TYPE_MAIN_VARIANT(type); variant; variant = TYPE_NEXT_VARIANT(variant))
{
+		TYPE_ATTRIBUTES(type) = copy_list(TYPE_ATTRIBUTES(type));
+		TYPE_ATTRIBUTES(type) = tree_cons(get_identifier("randomize_considered"), NULL_TREE,
TYPE_ATTRIBUTES(type));
+	}
+#ifdef __DEBUG_PLUGIN
+	fprintf(stderr, "Marking randomize_considered on struct %s\n", ORIG_TYPE_NAME(type));
+#ifdef __DEBUG_VERBOSE
+	debug_tree(type);
+#endif
+#endif
+}
+
+static void randomize_layout_finish_decl(void *event_data, void *data)
+{
+	tree decl = (tree)event_data;
+	tree type;
+
+	if (decl == NULL_TREE || decl == error_mark_node)
+		return;
+
+	type = TREE_TYPE(decl);
+
+	if (TREE_CODE(decl) != VAR_DECL)
+		return;
+
+	if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
+		return;
+
+	if (!lookup_attribute("randomize_performed", TYPE_ATTRIBUTES(type)))
+		return;
+
+	relayout_decl(decl);
+}
+
+static void finish_type(void *event_data, void *data)
+{
+	tree type = (tree)event_data;
+
+	if (type == NULL_TREE || type == error_mark_node)
+		return;
+
+	if (TREE_CODE(type) != RECORD_TYPE)
+		return;
+
+	if (TYPE_FIELDS(type) == NULL_TREE)
+		return;
+
+	if (lookup_attribute("randomize_considered", TYPE_ATTRIBUTES(type)))
+		return;
+
+#ifdef __DEBUG_PLUGIN
+	fprintf(stderr, "Calling randomize_type on %s\n", ORIG_TYPE_NAME(type));
+#endif
+#ifdef __DEBUG_VERBOSE
+	debug_tree(type);
+#endif
+	randomize_type(type);
+
+	return;
+}
+
+static struct attribute_spec randomize_layout_attr = {
+	.name		= "randomize_layout",
+	// related to args
+	.min_length	= 0,
+	.max_length	= 0,
+	.decl_required	= false,
+	// need type declaration
+	.type_required	= true,
+	.function_type_required = false,
+	.handler		= handle_randomize_layout_attr,
+#if BUILDING_GCC_VERSION >= 4007
+	.affects_type_identity  = true
+#endif
+};
+
+static struct attribute_spec no_randomize_layout_attr = {
+	.name		= "no_randomize_layout",
+	// related to args
+	.min_length	= 0,
+	.max_length	= 0,
+	.decl_required	= false,
+	// need type declaration
+	.type_required	= true,
+	.function_type_required = false,
+	.handler		= handle_randomize_layout_attr,
+#if BUILDING_GCC_VERSION >= 4007
+	.affects_type_identity  = true
+#endif
+};
+
+static struct attribute_spec randomize_considered_attr = {
+	.name		= "randomize_considered",
+	// related to args
+	.min_length	= 0,
+	.max_length	= 0,
+	.decl_required	= false,
+	// need type declaration
+	.type_required	= true,
+	.function_type_required = false,
+	.handler		= handle_randomize_considered_attr,
+#if BUILDING_GCC_VERSION >= 4007
+	.affects_type_identity  = false
+#endif
+};
+
+static struct attribute_spec randomize_performed_attr = {
+	.name		= "randomize_performed",
+	// related to args
+	.min_length	= 0,
+	.max_length	= 0,
+	.decl_required	= false,
+	// need type declaration
+	.type_required	= true,
+	.function_type_required = false,
+	.handler		= handle_randomize_performed_attr,
+#if BUILDING_GCC_VERSION >= 4007
+	.affects_type_identity  = false
+#endif
+};
+
+static void register_attributes(void *event_data, void *data)
+{
+	register_attribute(&randomize_layout_attr);
+	register_attribute(&no_randomize_layout_attr);
+	register_attribute(&randomize_considered_attr);
+	register_attribute(&randomize_performed_attr);
+}
+
+static void check_bad_casts_in_constructor(tree var, tree init)
+{
+	unsigned HOST_WIDE_INT idx;
+	tree field, val;
+	tree field_type, val_type;
+
+	FOR_EACH_CONSTRUCTOR_ELT(CONSTRUCTOR_ELTS(init), idx, field, val) {
+		if (TREE_CODE(val) == CONSTRUCTOR) {
+			check_bad_casts_in_constructor(var, val);
+			continue;
+		}
+
+		/* pipacs' plugin creates franken-arrays that differ from those produced by
+		   normal code which all have valid 'field' trees. work around this */
+		if (field == NULL_TREE)
+			continue;
+		field_type = TREE_TYPE(field);
+		val_type = TREE_TYPE(val);
+
+		if (TREE_CODE(field_type) != POINTER_TYPE || TREE_CODE(val_type) != POINTER_TYPE)
+			continue;
+
+		if (field_type == val_type)
+			continue;
+
+		field_type = TYPE_MAIN_VARIANT(strip_array_types(TYPE_MAIN_VARIANT(TREE_TYPE(field_type))));
+		val_type = TYPE_MAIN_VARIANT(strip_array_types(TYPE_MAIN_VARIANT(TREE_TYPE(val_type))));
+
+		if (field_type == void_type_node)
+			continue;
+		if (field_type == val_type)
+			continue;
+		if (TREE_CODE(val_type) != RECORD_TYPE)
+			continue;
+
+		if (!lookup_attribute("randomize_performed", TYPE_ATTRIBUTES(val_type)))
+			continue;
+		inform(DECL_SOURCE_LOCATION(var), "found mismatched struct pointer types: %qT and
%qT\n", TYPE_MAIN_VARIANT(field_type), TYPE_MAIN_VARIANT(val_type));
+	}
+}
+
+/* derived from the constify plugin */
+static void check_global_variables(void *event_data, void *data)
+{
+	struct varpool_node *node;
+	tree init;
+
+	FOR_EACH_VARIABLE(node) {
+		tree var = NODE_DECL(node);
+		init = DECL_INITIAL(var);
+		if (init == NULL_TREE)
+			continue;
+
+		if (TREE_CODE(init) != CONSTRUCTOR)
+			continue;
+
+		check_bad_casts_in_constructor(var, init);
+	}
+}
+
+static bool dominated_by_is_err(const_tree rhs, basic_block bb)
+{
+	basic_block dom;
+	gimple dom_stmt;
+	gimple call_stmt;
+	const_tree dom_lhs;
+	const_tree poss_is_err_cond;
+	const_tree poss_is_err_func;
+	const_tree is_err_arg;
+
+	dom = get_immediate_dominator(CDI_DOMINATORS, bb);
+	if (!dom)
+		return false;
+
+	dom_stmt = last_stmt(dom);
+	if (!dom_stmt)
+		return false;
+
+	if (gimple_code(dom_stmt) != GIMPLE_COND)
+		return false;
+
+	if (gimple_cond_code(dom_stmt) != NE_EXPR)
+		return false;
+
+	if (!integer_zerop(gimple_cond_rhs(dom_stmt)))
+		return false;
+
+	poss_is_err_cond = gimple_cond_lhs(dom_stmt);
+
+	if (TREE_CODE(poss_is_err_cond) != SSA_NAME)
+		return false;
+
+	call_stmt = SSA_NAME_DEF_STMT(poss_is_err_cond);
+
+	if (gimple_code(call_stmt) != GIMPLE_CALL)
+		return false;
+
+	dom_lhs = gimple_get_lhs(call_stmt);
+	poss_is_err_func = gimple_call_fndecl(call_stmt);
+	if (!poss_is_err_func)
+		return false;
+	if (dom_lhs != poss_is_err_cond)
+		return false;
+	if (strcmp(DECL_NAME_POINTER(poss_is_err_func), "IS_ERR"))
+		return false;
+
+	is_err_arg = gimple_call_arg(call_stmt, 0);
+	if (!is_err_arg)
+		return false;
+
+	if (is_err_arg != rhs)
+		return false;
+
+	return true;
+}
+
+static void handle_local_var_initializers(void)
+{
+	tree var;
+	unsigned int i;
+
+	FOR_EACH_LOCAL_DECL(cfun, i, var) {
+		tree init = DECL_INITIAL(var);
+		if (!init)
+			continue;
+		if (TREE_CODE(init) != CONSTRUCTOR)
+			continue;
+		check_bad_casts_in_constructor(var, init);
+	}
+}
+
+/*
+ * iterate over all statements to find "bad" casts:
+ * those where the address of the start of a structure is cast
+ * to a pointer of a structure of a different type, or a
+ * structure pointer type is cast to a different structure pointer type
+ */
+static unsigned int find_bad_casts(void)
+{
+	basic_block bb;
+
+	handle_local_var_initializers();
+
+	FOR_ALL_BB_FN(bb, cfun) {
+		gimple_stmt_iterator gsi;
+
+		for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
+			gimple stmt;
+			const_tree lhs;
+			const_tree lhs_type;
+			const_tree rhs1;
+			const_tree rhs_type;
+			const_tree ptr_lhs_type;
+			const_tree ptr_rhs_type;
+			const_tree op0;
+			const_tree op0_type;
+			enum tree_code rhs_code;
+
+			stmt = gsi_stmt(gsi);
+
+#ifdef __DEBUG_PLUGIN
+#ifdef __DEBUG_VERBOSE
+			debug_gimple_stmt(stmt);
+			debug_tree(gimple_get_lhs(stmt));
+#endif
+#endif
+
+			if (gimple_code(stmt) != GIMPLE_ASSIGN)
+				continue;
+
+#ifdef __DEBUG_PLUGIN
+#ifdef __DEBUG_VERBOSE
+			debug_tree(gimple_assign_rhs1(stmt));
+#endif
+#endif
+
+			rhs_code = gimple_assign_rhs_code(stmt);
+
+			if (rhs_code != ADDR_EXPR && rhs_code != SSA_NAME)
+				continue;
+
+			lhs = gimple_get_lhs(stmt);
+			lhs_type = TREE_TYPE(lhs);
+			rhs1 = gimple_assign_rhs1(stmt);
+			rhs_type = TREE_TYPE(rhs1);
+
+			if (TREE_CODE(rhs_type) != POINTER_TYPE ||
+			    TREE_CODE(lhs_type) != POINTER_TYPE)
+				continue;
+
+			ptr_lhs_type = TYPE_MAIN_VARIANT(strip_array_types(TYPE_MAIN_VARIANT(TREE_TYPE(lhs_type))));
+			ptr_rhs_type = TYPE_MAIN_VARIANT(strip_array_types(TYPE_MAIN_VARIANT(TREE_TYPE(rhs_type))));
+
+			if (ptr_rhs_type == void_type_node)
+				continue;
+
+			if (ptr_lhs_type == void_type_node)
+				continue;
+
+			if (dominated_by_is_err(rhs1, bb))
+				continue;
+
+			if (TREE_CODE(ptr_rhs_type) != RECORD_TYPE) {
+#ifndef __DEBUG_PLUGIN
+				if (lookup_attribute("randomize_performed", TYPE_ATTRIBUTES(ptr_lhs_type)))
+#endif
+				inform(gimple_location(stmt), "found mismatched struct pointer types: %qT and %qT\n",
ptr_lhs_type, ptr_rhs_type);
+				continue;
+			}
+
+			if (rhs_code == SSA_NAME && ptr_lhs_type == ptr_rhs_type)
+				continue;
+
+			if (rhs_code == ADDR_EXPR) {
+				op0 = TREE_OPERAND(rhs1, 0);
+
+				if (op0 == NULL_TREE)
+					continue;
+
+				if (TREE_CODE(op0) != VAR_DECL)
+					continue;
+
+				op0_type = TYPE_MAIN_VARIANT(strip_array_types(TYPE_MAIN_VARIANT(TREE_TYPE(op0))));
+				if (op0_type == ptr_lhs_type)
+					continue;
+
+#ifndef __DEBUG_PLUGIN
+				if (lookup_attribute("randomize_performed", TYPE_ATTRIBUTES(op0_type)))
+#endif
+				inform(gimple_location(stmt), "found mismatched struct pointer types: %qT and %qT\n",
ptr_lhs_type, op0_type);
+			} else {
+				const_tree ssa_name_var = SSA_NAME_VAR(rhs1);
+				/* skip bogus type casts introduced by container_of */
+				if (ssa_name_var != NULL_TREE && DECL_NAME(ssa_name_var) && 
+				    !strcmp((const char *)DECL_NAME_POINTER(ssa_name_var), "__mptr"))
+					continue;
+#ifndef __DEBUG_PLUGIN
+				if (lookup_attribute("randomize_performed", TYPE_ATTRIBUTES(ptr_rhs_type)))
+#endif
+				inform(gimple_location(stmt), "found mismatched struct pointer types: %qT and %qT\n",
ptr_lhs_type, ptr_rhs_type);
+			}
+
+		}
+	}
+	return 0;
+}
+
+#if BUILDING_GCC_VERSION >= 4009
+static const struct pass_data randomize_layout_bad_cast_data = {
+#else
+static struct gimple_opt_pass randomize_layout_bad_cast = {
+	.pass = {
+#endif
+		.type			= GIMPLE_PASS,
+		.name			= "randomize_layout_bad_cast",
+#if BUILDING_GCC_VERSION >= 4008
+		.optinfo_flags		= OPTGROUP_NONE,
+#endif
+#if BUILDING_GCC_VERSION >= 4009
+		.has_gate		= false,
+		.has_execute		= true,
+#else
+		.gate			= NULL,
+		.execute		= find_bad_casts,
+		.sub			= NULL,
+		.next			= NULL,
+		.static_pass_number	= 0,
+#endif
+		.tv_id			= TV_NONE,
+		.properties_required	= PROP_cfg,
+		.properties_provided	= 0,
+		.properties_destroyed	= 0,
+		.todo_flags_start	= 0,
+		.todo_flags_finish	= TODO_dump_func
+#if BUILDING_GCC_VERSION < 4009
+	}
+#endif
+};
+
+#if BUILDING_GCC_VERSION >= 4009
+namespace {
+class randomize_layout_bad_cast : public gimple_opt_pass {
+public:
+	randomize_layout_bad_cast() : gimple_opt_pass(randomize_layout_bad_cast_data, g) {}
+	unsigned int execute() { return find_bad_casts(); }
+};
+}
+#endif
+
+static struct opt_pass *make_randomize_layout_bad_cast(void)
+{
+#if BUILDING_GCC_VERSION >= 4009
+	return new randomize_layout_bad_cast();
+#else
+	return &randomize_layout_bad_cast.pass;
+#endif
+}
+
+int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
+{
+	int i;
+	const char * const plugin_name = plugin_info->base_name;
+	const int argc = plugin_info->argc;
+	const struct plugin_argument * const argv = plugin_info->argv;
+	bool enable = true;
+	int obtained_seed = 0;
+	struct register_pass_info randomize_layout_bad_cast_info;
+
+	randomize_layout_bad_cast_info.pass			= make_randomize_layout_bad_cast();
+	randomize_layout_bad_cast_info.reference_pass_name	= "ssa";
+	randomize_layout_bad_cast_info.ref_pass_instance_number	= 1;
+	randomize_layout_bad_cast_info.pos_op			= PASS_POS_INSERT_AFTER;
+
+	if (!plugin_default_version_check(version, &gcc_version)) {
+		error(G_("incompatible gcc/plugin versions"));
+		return 1;
+	}
+
+	if (strcmp(lang_hooks.name, "GNU C")) {
+		inform(UNKNOWN_LOCATION, G_("%s supports C only"), plugin_name);
+		enable = false;
+	}
+
+	for (i = 0; i < argc; ++i) {
+		if (!strcmp(argv[i].key, "disable")) {
+			enable = false;
+			continue;
+		}
+		if (!strcmp(argv[i].key, "performance-mode")) {
+			performance_mode = 1;
+			continue;
+		}
+		error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
+	}
+
+	if (strlen(randstruct_seed) != 64) {
+		error(G_("invalid seed value supplied for %s plugin"), plugin_name);
+		return 1;
+	}
+	obtained_seed = sscanf(randstruct_seed, "%016llx%016llx%016llx%016llx",
+		&shuffle_seed[0], &shuffle_seed[1], &shuffle_seed[2], &shuffle_seed[3]);
+	if (obtained_seed != 4) {
+		error(G_("Invalid seed supplied for %s plugin"), plugin_name);
+		return 1;
+	}
+
+	register_callback(plugin_name, PLUGIN_INFO, NULL, &randomize_layout_plugin_info);
+	if (enable) {
+		register_callback(plugin_name, PLUGIN_ALL_IPA_PASSES_START, check_global_variables,
NULL);
+		register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &randomize_layout_bad_cast_info);
+		register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
+		register_callback(plugin_name, PLUGIN_FINISH_DECL, randomize_layout_finish_decl,
NULL);
+	}
+	register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
+
+	return 0;
+}
diff -ruNp linux-3.13.11/tools/gcc/size_overflow_hash.data linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/tools/gcc/size_overflow_hash.data
--- linux-3.13.11/tools/gcc/size_overflow_hash.data	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/tools/gcc/size_overflow_hash.data	2014-07-09
12:00:16.000000000 +0200
@@ -0,0 +1,5709 @@
+intel_fake_agp_alloc_by_type_1 intel_fake_agp_alloc_by_type 1 1 NULL
+ocfs2_get_refcount_tree_3 ocfs2_get_refcount_tree 0 3 NULL
+storvsc_connect_to_vsp_22 storvsc_connect_to_vsp 2 22 NULL
+compat_sock_setsockopt_23 compat_sock_setsockopt 5 23 NULL
+carl9170_alloc_27 carl9170_alloc 1 27 NULL
+sel_read_policyvers_55 sel_read_policyvers 3 55 NULL nohasharray
+padzero_55 padzero 1 55 &sel_read_policyvers_55
+cfg80211_disconnected_57 cfg80211_disconnected 4 57 NULL
+__skb_to_sgvec_72 __skb_to_sgvec 0 72 NULL
+snd_korg1212_copy_to_92 snd_korg1212_copy_to 6 92 NULL
+load_msg_95 load_msg 2 95 NULL
+ipath_verbs_send_117 ipath_verbs_send 5-3 117 NULL
+init_q_132 init_q 4 132 NULL
+memstick_alloc_host_142 memstick_alloc_host 1 142 NULL
+hva_to_gfn_memslot_149 hva_to_gfn_memslot 0-1 149 NULL
+ping_v6_sendmsg_152 ping_v6_sendmsg 4 152 NULL
+ext4_ext_get_actual_len_153 ext4_ext_get_actual_len 0 153 NULL nohasharray
+tracing_trace_options_write_153 tracing_trace_options_write 3 153 &ext4_ext_get_actual_len_153
+pci_request_selected_regions_169 pci_request_selected_regions 0 169 NULL
+xfs_buf_item_get_format_189 xfs_buf_item_get_format 2 189 NULL
+iscsi_session_setup_196 iscsi_session_setup 4-5 196 NULL
+br_port_info_size_268 br_port_info_size 0 268 NULL
+generic_file_direct_write_291 generic_file_direct_write 0 291 NULL
+read_file_war_stats_292 read_file_war_stats 3 292 NULL
+SYSC_connect_304 SYSC_connect 3 304 NULL
+syslog_print_307 syslog_print 2 307 NULL
+dn_setsockopt_314 dn_setsockopt 5 314 NULL
+mlx5_core_access_reg_361 mlx5_core_access_reg 3-5 361 NULL
+hw_device_state_409 hw_device_state 0 409 NULL
+aio_read_events_ring_410 aio_read_events_ring 3-0 410 NULL
+lbs_rdmac_read_418 lbs_rdmac_read 3 418 NULL
+snd_ca0106_ptr_read_467 snd_ca0106_ptr_read 0 467 NULL
+cfs_trace_set_debug_mb_usrstr_486 cfs_trace_set_debug_mb_usrstr 2 486 NULL
+nvme_trans_modesel_data_488 nvme_trans_modesel_data 4 488 NULL
+iwl_dbgfs_protection_mode_write_502 iwl_dbgfs_protection_mode_write 3 502 NULL
+rx_rx_defrag_end_read_505 rx_rx_defrag_end_read 3 505 NULL
+ocfs2_validate_meta_ecc_bhs_527 ocfs2_validate_meta_ecc_bhs 0 527 NULL
+zlib_deflate_workspacesize_537 zlib_deflate_workspacesize 0-1-2 537 NULL
+iwl_dbgfs_wowlan_sram_read_540 iwl_dbgfs_wowlan_sram_read 3 540 NULL
+sco_sock_setsockopt_552 sco_sock_setsockopt 5 552 NULL
+lpfc_nlp_state_name_556 lpfc_nlp_state_name 2 556 NULL
+snd_aw2_saa7146_get_hw_ptr_playback_558 snd_aw2_saa7146_get_hw_ptr_playback 0 558 NULL
+start_isoc_chain_565 start_isoc_chain 2 565 NULL nohasharray
+dev_hard_header_565 dev_hard_header 0 565 &start_isoc_chain_565
+ocfs2_refcounted_xattr_delete_need_584 ocfs2_refcounted_xattr_delete_need 0 584 NULL
+osl_pktget_590 osl_pktget 2 590 NULL
+smk_write_load_self2_591 smk_write_load_self2 3 591 NULL
+btrfs_stack_file_extent_offset_607 btrfs_stack_file_extent_offset 0 607 NULL
+ni_gpct_device_construct_610 ni_gpct_device_construct 5 610 NULL
+fuse_request_alloc_nofs_617 fuse_request_alloc_nofs 1 617 NULL
+ptlrpc_lprocfs_nrs_seq_write_621 ptlrpc_lprocfs_nrs_seq_write 3 621 NULL
+viafb_dfpl_proc_write_627 viafb_dfpl_proc_write 3 627 NULL
+clone_split_bio_633 clone_split_bio 6 633 NULL
+ceph_osdc_new_request_635 ceph_osdc_new_request 6 635 NULL
+cfs_hash_bkt_size_643 cfs_hash_bkt_size 0 643 NULL
+unlink_queued_645 unlink_queued 4 645 NULL
+dtim_interval_read_654 dtim_interval_read 3 654 NULL
+mem_rx_free_mem_blks_read_675 mem_rx_free_mem_blks_read 3 675 NULL
+persistent_ram_vmap_709 persistent_ram_vmap 1-2 709 NULL
+sctp_setsockopt_peer_addr_params_734 sctp_setsockopt_peer_addr_params 3 734 NULL
+dvb_video_write_754 dvb_video_write 3 754 NULL
+cfs_trace_allocate_string_buffer_781 cfs_trace_allocate_string_buffer 2 781 NULL
+ath6kl_disconnect_timeout_write_794 ath6kl_disconnect_timeout_write 3 794 NULL
+if_writecmd_815 if_writecmd 2 815 NULL
+aac_change_queue_depth_825 aac_change_queue_depth 2 825 NULL
+error_state_read_859 error_state_read 6 859 NULL
+o2net_send_message_vec_879 o2net_send_message_vec 4 879 NULL nohasharray
+iwl_dbgfs_fh_reg_read_879 iwl_dbgfs_fh_reg_read 3 879 &o2net_send_message_vec_879
+snd_pcm_action_single_905 snd_pcm_action_single 0 905 NULL
+carl9170_cmd_buf_950 carl9170_cmd_buf 3 950 NULL
+__nodes_weight_956 __nodes_weight 2-0 956 NULL
+bnx2x_fill_fw_str_968 bnx2x_fill_fw_str 3 968 NULL
+memcmp_990 memcmp 0 990 NULL
+readreg_1017 readreg 0-1 1017 NULL
+smk_write_cipso2_1021 smk_write_cipso2 3 1021 NULL
+gigaset_initdriver_1060 gigaset_initdriver 2 1060 NULL
+mce_request_packet_1073 mce_request_packet 3 1073 NULL
+agp_create_memory_1075 agp_create_memory 1 1075 NULL
+_scsih_adjust_queue_depth_1083 _scsih_adjust_queue_depth 2 1083 NULL
+llcp_sock_sendmsg_1092 llcp_sock_sendmsg 4 1092 NULL
+llc_mac_hdr_init_1094 llc_mac_hdr_init 0 1094 NULL
+nfs4_init_nonuniform_client_string_1097 nfs4_init_nonuniform_client_string 3 1097 NULL
+utf8s_to_utf16s_1115 utf8s_to_utf16s 0 1115 NULL
+cfg80211_report_obss_beacon_1133 cfg80211_report_obss_beacon 3 1133 NULL
+i2400m_rx_ctl_1157 i2400m_rx_ctl 4 1157 NULL
+ipc_alloc_1192 ipc_alloc 1 1192 NULL
+ib_create_send_mad_1196 ib_create_send_mad 5 1196 NULL
+pstore_ftrace_knob_write_1198 pstore_ftrace_knob_write 3 1198 NULL
+i2400m_rx_ctl_ack_1199 i2400m_rx_ctl_ack 3 1199 NULL
+dgrp_dpa_read_1204 dgrp_dpa_read 3 1204 NULL
+i2cdev_read_1206 i2cdev_read 3 1206 NULL
+lov_ost_pool_init_1215 lov_ost_pool_init 2 1215 NULL
+ocfs2_extend_file_1266 ocfs2_extend_file 3 1266 NULL
+qla4xxx_change_queue_depth_1268 qla4xxx_change_queue_depth 2 1268 NULL
+ioctl_private_iw_point_1273 ioctl_private_iw_point 7 1273 NULL
+SyS_flistxattr_1287 SyS_flistxattr 3 1287 NULL
+tx_frag_in_process_called_read_1290 tx_frag_in_process_called_read 3 1290 NULL
+ffs_1322 ffs 0 1322 NULL
+qlcnic_pci_sriov_configure_1327 qlcnic_pci_sriov_configure 2 1327 NULL
+btrfs_submit_compressed_write_1347 btrfs_submit_compressed_write 5 1347 NULL
+snd_pcm_lib_write1_1358 snd_pcm_lib_write1 0-3 1358 NULL
+ipx_sendmsg_1362 ipx_sendmsg 4 1362 NULL
+fw_stats_raw_read_1369 fw_stats_raw_read 3 1369 NULL
+ocfs2_prepare_inode_for_write_1372 ocfs2_prepare_inode_for_write 3 1372 NULL
+sctp_setsockopt_initmsg_1383 sctp_setsockopt_initmsg 3 1383 NULL
+do_msgsnd_1387 do_msgsnd 4 1387 NULL
+SYSC_io_getevents_1392 SYSC_io_getevents 3 1392 NULL
+file_read_actor_1401 file_read_actor 4-0 1401 NULL
+cfs_trace_copyout_string_1416 cfs_trace_copyout_string 2 1416 NULL
+init_rs_internal_1436 init_rs_internal 1 1436 NULL
+stack_max_size_read_1445 stack_max_size_read 3 1445 NULL
+tx_queue_len_read_1463 tx_queue_len_read 3 1463 NULL
+xprt_alloc_1475 xprt_alloc 2 1475 NULL
+SYSC_syslog_1477 SYSC_syslog 3 1477 NULL
+sta_num_ps_buf_frames_read_1488 sta_num_ps_buf_frames_read 3 1488 NULL
+fpregs_set_1497 fpregs_set 4 1497 NULL
+tomoyo_round2_1518 tomoyo_round2 0 1518 NULL
+alloc_perm_bits_1532 alloc_perm_bits 2 1532 NULL
+ath6kl_init_get_fwcaps_1557 ath6kl_init_get_fwcaps 3 1557 NULL
+ieee80211_if_read_dot11MeshHWMPnetDiameterTraversalTime_1589 ieee80211_if_read_dot11MeshHWMPnetDiameterTraversalTime
3 1589 NULL
+ipath_ht_handle_hwerrors_1592 ipath_ht_handle_hwerrors 3 1592 NULL
+packet_buffer_init_1607 packet_buffer_init 2 1607 NULL
+btmrvl_hscmd_read_1614 btmrvl_hscmd_read 3 1614 NULL
+v9fs_fid_xattr_get_1618 v9fs_fid_xattr_get 0 1618 NULL
+ikconfig_read_current_1658 ikconfig_read_current 3 1658 NULL
+mei_cl_recv_1665 mei_cl_recv 3 1665 NULL
+rmap_add_1677 rmap_add 3 1677 NULL
+configfs_read_file_1683 configfs_read_file 3 1683 NULL
+pdu_write_u_1710 pdu_write_u 3 1710 NULL
+coda_psdev_write_1711 coda_psdev_write 3 1711 NULL
+internal_create_group_1733 internal_create_group 0 1733 NULL
+dev_irnet_read_1741 dev_irnet_read 3 1741 NULL
+tx_frag_called_read_1748 tx_frag_called_read 3 1748 NULL
+cosa_write_1774 cosa_write 3 1774 NULL
+fcoe_ctlr_device_add_1793 fcoe_ctlr_device_add 3 1793 NULL
+__nodelist_scnprintf_1815 __nodelist_scnprintf 2-0 1815 NULL
+sb_issue_zeroout_1884 sb_issue_zeroout 3 1884 NULL
+rx_defrag_called_read_1897 rx_defrag_called_read 3 1897 NULL
+nfs_parse_server_name_1899 nfs_parse_server_name 2 1899 NULL
+SyS_add_key_1900 SyS_add_key 4 1900 NULL
+uhid_char_read_1920 uhid_char_read 3 1920 NULL
+tx_tx_retry_data_read_1926 tx_tx_retry_data_read 3 1926 NULL
+bdev_erase_1933 bdev_erase 3 1933 NULL
+ext3_fiemap_1936 ext3_fiemap 4 1936 NULL
+cyttsp_probe_1940 cyttsp_probe 4 1940 NULL
+ieee80211_if_fmt_dot11MeshConfirmTimeout_1945 ieee80211_if_fmt_dot11MeshConfirmTimeout
3 1945 NULL
+ivtv_v4l2_read_1964 ivtv_v4l2_read 3 1964 NULL
+sel_read_avc_hash_stats_1984 sel_read_avc_hash_stats 3 1984 NULL
+gpio_power_write_1991 gpio_power_write 3 1991 NULL
+__alloc_bootmem_node_1992 __alloc_bootmem_node 2 1992 NULL
+rx_rx_defrag_read_2010 rx_rx_defrag_read 3 2010 NULL
+ocfs2_global_qinit_alloc_2018 ocfs2_global_qinit_alloc 0 2018 NULL
+write_flush_pipefs_2021 write_flush_pipefs 3 2021 NULL
+BcmCopySection_2035 BcmCopySection 5 2035 NULL
+ath6kl_fwlog_mask_read_2050 ath6kl_fwlog_mask_read 3 2050 NULL
+ocfs2_expand_inline_dir_2063 ocfs2_expand_inline_dir 3 2063 NULL
+__generic_copy_from_user_intel_2073 __generic_copy_from_user_intel 0-3 2073 NULL
+diva_set_driver_dbg_mask_2077 diva_set_driver_dbg_mask 0 2077 NULL
+iwl_dbgfs_current_sleep_command_read_2081 iwl_dbgfs_current_sleep_command_read 3 2081
NULL
+idetape_chrdev_read_2097 idetape_chrdev_read 3 2097 NULL
+audit_expand_2098 audit_expand 0 2098 NULL
+iwl_dbgfs_log_event_read_2107 iwl_dbgfs_log_event_read 3 2107 NULL
+ecryptfs_encrypt_and_encode_filename_2109 ecryptfs_encrypt_and_encode_filename 6 2109
NULL
+enable_read_2117 enable_read 3 2117 NULL
+pcf50633_write_block_2124 pcf50633_write_block 2-3 2124 NULL
+check_load_and_stores_2143 check_load_and_stores 2 2143 NULL
+iov_iter_count_2152 iov_iter_count 0 2152 NULL
+__copy_to_user_ll_2157 __copy_to_user_ll 0-3 2157 NULL
+_ore_get_io_state_2166 _ore_get_io_state 3-4-5 2166 NULL
+bio_integrity_alloc_2194 bio_integrity_alloc 3 2194 NULL
+picolcd_debug_reset_write_2195 picolcd_debug_reset_write 3 2195 NULL
+u32_array_read_2219 u32_array_read 3 2219 NULL nohasharray
+mei_dbgfs_read_meclients_2219 mei_dbgfs_read_meclients 3 2219 &u32_array_read_2219
+vhci_write_2224 vhci_write 3 2224 NULL
+__ocfs2_journal_access_2241 __ocfs2_journal_access 0 2241 NULL
+ieee80211_if_read_dot11MeshHWMPRannInterval_2249 ieee80211_if_read_dot11MeshHWMPRannInterval
3 2249 NULL
+netlbl_secattr_catmap_walk_2255 netlbl_secattr_catmap_walk 0-2 2255 NULL
+sel_write_avc_cache_threshold_2256 sel_write_avc_cache_threshold 3 2256 NULL
+do_update_counters_2259 do_update_counters 4 2259 NULL
+ath6kl_wmi_bssinfo_event_rx_2275 ath6kl_wmi_bssinfo_event_rx 3 2275 NULL
+debug_debug5_read_2291 debug_debug5_read 3 2291 NULL
+kvm_clear_guest_page_2308 kvm_clear_guest_page 4 2308 NULL
+intel_sdvo_set_value_2311 intel_sdvo_set_value 4 2311 NULL
+hfsplus_find_init_2318 hfsplus_find_init 0 2318 NULL nohasharray
+picolcd_fb_write_2318 picolcd_fb_write 3 2318 &hfsplus_find_init_2318
+dice_hwdep_read_2326 dice_hwdep_read 3 2326 NULL
+__erst_read_to_erange_2341 __erst_read_to_erange 0 2341 NULL
+zr364xx_read_2354 zr364xx_read 3 2354 NULL
+sysfs_add_file_mode_ns_2362 sysfs_add_file_mode_ns 0 2362 NULL
+viafb_iga2_odev_proc_write_2363 viafb_iga2_odev_proc_write 3 2363 NULL
+xfs_buf_map_from_irec_2368 xfs_buf_map_from_irec 5 2368 NULL nohasharray
+rose_recvmsg_2368 rose_recvmsg 4 2368 &xfs_buf_map_from_irec_2368
+il_dbgfs_sensitivity_read_2370 il_dbgfs_sensitivity_read 3 2370 NULL
+rxpipe_rx_prep_beacon_drop_read_2403 rxpipe_rx_prep_beacon_drop_read 3 2403 NULL
+isdn_v110_open_2418 isdn_v110_open 3 2418 NULL
+raid1_size_2419 raid1_size 0-2 2419 NULL
+b43legacy_debugfs_read_2473 b43legacy_debugfs_read 3 2473 NULL
+wiphy_new_2482 wiphy_new 2 2482 NULL
+bio_alloc_bioset_2484 bio_alloc_bioset 2 2484 NULL
+hfsplus_user_setxattr_2485 hfsplus_user_setxattr 4 2485 NULL
+squashfs_read_fragment_index_table_2506 squashfs_read_fragment_index_table 4 2506 NULL
+v9fs_cached_file_read_2514 v9fs_cached_file_read 3 2514 NULL
+ext4_get_inode_loc_2516 ext4_get_inode_loc 0 2516 NULL
+batadv_tvlv_container_list_size_2524 batadv_tvlv_container_list_size 0 2524 NULL
+gspca_dev_probe_2570 gspca_dev_probe 4 2570 NULL
+pcm_sanity_check_2574 pcm_sanity_check 0 2574 NULL
+mdc_max_rpcs_in_flight_seq_write_2594 mdc_max_rpcs_in_flight_seq_write 3 2594 NULL
+slot_bytes_2609 slot_bytes 0 2609 NULL
+smk_write_logging_2618 smk_write_logging 3 2618 NULL
+switch_status_2629 switch_status 5 2629 NULL
+tcp_xmit_size_goal_2661 tcp_xmit_size_goal 2 2661 NULL
+osc_build_ppga_2670 osc_build_ppga 2 2670 NULL
+ffs_ep0_read_2672 ffs_ep0_read 3 2672 NULL
+oti6858_write_2692 oti6858_write 4 2692 NULL
+nfc_llcp_send_ui_frame_2702 nfc_llcp_send_ui_frame 5 2702 NULL
+memcpy_fromiovecend_2707 memcpy_fromiovecend 3-4 2707 NULL
+lprocfs_stats_counter_size_2708 lprocfs_stats_counter_size 0 2708 NULL
+xfs_readdir_2767 xfs_readdir 3 2767 NULL
+mon_bin_ioctl_2771 mon_bin_ioctl 3 2771 NULL
+device_add_attrs_2789 device_add_attrs 0 2789 NULL
+iwl_dbgfs_clear_ucode_statistics_write_2804 iwl_dbgfs_clear_ucode_statistics_write
3 2804 NULL
+sel_read_enforce_2828 sel_read_enforce 3 2828 NULL
+vb2_dc_get_userptr_2829 vb2_dc_get_userptr 2-3 2829 NULL
+wait_for_avail_2847 wait_for_avail 0 2847 NULL
+sfq_alloc_2861 sfq_alloc 1 2861 NULL
+irnet_ctrl_read_2863 irnet_ctrl_read 4 2863 NULL
+move_addr_to_user_2868 move_addr_to_user 2 2868 NULL
+nla_padlen_2883 nla_padlen 1 2883 NULL
+cmm_write_2896 cmm_write 3 2896 NULL
+osc_import_seq_write_2923 osc_import_seq_write 3 2923 NULL
+xfs_trans_get_buf_map_2927 xfs_trans_get_buf_map 4 2927 NULL
+nes_read_indexed_2946 nes_read_indexed 0 2946 NULL
+tm6000_i2c_recv_regs16_2949 tm6000_i2c_recv_regs16 5 2949 NULL
+i40e_dbg_prep_dump_buf_2951 i40e_dbg_prep_dump_buf 2 2951 NULL
+set_fast_connectable_2952 set_fast_connectable 4 2952 NULL
+free_area_init_core_2962 free_area_init_core 2-3 2962 NULL
+do_strnlen_user_2976 do_strnlen_user 0-2 2976 NULL
+p9_nr_pages_2992 p9_nr_pages 0-2 2992 NULL
+lov_stripetype_seq_write_3013 lov_stripetype_seq_write 3 3013 NULL
+do_dmabuf_dirty_sou_3017 do_dmabuf_dirty_sou 7 3017 NULL
+depth_write_3021 depth_write 3 3021 NULL
+snd_azf3328_codec_inl_3022 snd_azf3328_codec_inl 0 3022 NULL
+kvm_unmap_hva_3028 kvm_unmap_hva 2 3028 NULL
+xfrm_dst_alloc_copy_3034 xfrm_dst_alloc_copy 3 3034 NULL
+lpfc_idiag_mbxacc_write_3038 lpfc_idiag_mbxacc_write 3 3038 NULL nohasharray
+iwl_dbgfs_sleep_level_override_read_3038 iwl_dbgfs_sleep_level_override_read 3 3038
&lpfc_idiag_mbxacc_write_3038
+nr_free_buffer_pages_3044 nr_free_buffer_pages 0 3044 NULL
+il3945_ucode_rx_stats_read_3048 il3945_ucode_rx_stats_read 3 3048 NULL
+qp_alloc_ppn_set_3068 qp_alloc_ppn_set 2-4 3068 NULL
+__blk_end_bidi_request_3070 __blk_end_bidi_request 3-4 3070 NULL
+dac960_user_command_proc_write_3071 dac960_user_command_proc_write 3 3071 NULL
+read_file_antenna_diversity_3077 read_file_antenna_diversity 3 3077 NULL
+clone_bio_3100 clone_bio 6 3100 NULL nohasharray
+ttusb2_msg_3100 ttusb2_msg 4 3100 &clone_bio_3100
+rb_alloc_3102 rb_alloc 1 3102 NULL
+simple_write_to_buffer_3122 simple_write_to_buffer 5-2 3122 NULL
+print_time_3132 print_time 0 3132 NULL
+fill_write_buffer_3142 fill_write_buffer 3 3142 NULL
+CIFSSMBSetPosixACL_3154 CIFSSMBSetPosixACL 5 3154 NULL
+compat_sys_migrate_pages_3157 compat_sys_migrate_pages 2 3157 NULL
+uv_num_possible_blades_3177 uv_num_possible_blades 0 3177 NULL
+uvc_video_stats_dump_3181 uvc_video_stats_dump 3 3181 NULL
+compat_do_ip6t_set_ctl_3184 compat_do_ip6t_set_ctl 4 3184 NULL
+mempool_create_node_3191 mempool_create_node 1 3191 NULL
+alloc_context_3194 alloc_context 1 3194 NULL
+shmem_pread_slow_3198 shmem_pread_slow 3-2 3198 NULL
+codec_reg_write_file_3204 codec_reg_write_file 3 3204 NULL
+SyS_sendto_3219 SyS_sendto 6 3219 NULL
+btrfs_prealloc_file_range_3227 btrfs_prealloc_file_range 3 3227 NULL
+kimage_crash_alloc_3233 kimage_crash_alloc 3 3233 NULL
+write_adapter_mem_3234 write_adapter_mem 3 3234 NULL
+do_read_log_to_user_3236 do_read_log_to_user 4 3236 NULL
+ext3_xattr_find_entry_3237 ext3_xattr_find_entry 0 3237 NULL
+key_key_read_3241 key_key_read 3 3241 NULL
+__ilog2_u64_3284 __ilog2_u64 0 3284 NULL
+__iovec_copy_from_user_inatomic_3314 __iovec_copy_from_user_inatomic 0-4-3 3314 NULL
+dbDiscardAG_3322 dbDiscardAG 3 3322 NULL
+compat_sys_setsockopt_3326 compat_sys_setsockopt 5 3326 NULL
+ocfs2_extend_xattr_bucket_3328 ocfs2_extend_xattr_bucket 4 3328 NULL
+read_from_oldmem_3337 read_from_oldmem 2 3337 NULL
+sysfs_create_group_3339 sysfs_create_group 0 3339 NULL
+tty_port_register_device_attr_3341 tty_port_register_device_attr 3 3341 NULL
+il_dbgfs_interrupt_read_3351 il_dbgfs_interrupt_read 3 3351 NULL
+gsm_control_rls_3353 gsm_control_rls 3 3353 NULL
+scnprintf_3360 scnprintf 0-2 3360 NULL
+ReadByteAmd7930_3365 ReadByteAmd7930 0 3365 NULL
+sr_read_3366 sr_read 3 3366 NULL
+mtdchar_writeoob_3393 mtdchar_writeoob 4 3393 NULL
+send_stream_3397 send_stream 4 3397 NULL
+isdn_readbchan_3401 isdn_readbchan 0-5 3401 NULL
+mei_io_cb_alloc_resp_buf_3414 mei_io_cb_alloc_resp_buf 2 3414 NULL
+pci_add_cap_save_buffer_3426 pci_add_cap_save_buffer 3 3426 NULL
+crystalhd_create_dio_pool_3427 crystalhd_create_dio_pool 2 3427 NULL
+SyS_msgsnd_3436 SyS_msgsnd 3 3436 NULL
+pipe_iov_copy_to_user_3447 pipe_iov_copy_to_user 3 3447 NULL
+s3fb_ddc_read_3451 s3fb_ddc_read 0 3451 NULL
+softsynth_write_3455 softsynth_write 3 3455 NULL
+snd_pcm_lib_readv_transfer_3464 snd_pcm_lib_readv_transfer 5-4-2 3464 NULL nohasharray
+jffs2_acl_setxattr_3464 jffs2_acl_setxattr 4 3464 &snd_pcm_lib_readv_transfer_3464
+security_context_to_sid_default_3492 security_context_to_sid_default 2 3492 NULL
+xfrm_migrate_msgsize_3496 xfrm_migrate_msgsize 1 3496 NULL
+mem_tx_free_mem_blks_read_3521 mem_tx_free_mem_blks_read 3 3521 NULL
+SyS_semtimedop_3532 SyS_semtimedop 3 3532 NULL
+SyS_readv_3539 SyS_readv 3 3539 NULL
+btrfs_dir_name_len_3549 btrfs_dir_name_len 0 3549 NULL
+alloc_smp_resp_3566 alloc_smp_resp 1 3566 NULL
+evtchn_read_3569 evtchn_read 3 3569 NULL
+ll_track_ppid_seq_write_3582 ll_track_ppid_seq_write 3 3582 NULL
+vc_resize_3585 vc_resize 3-2 3585 NULL
+kvm_mmu_notifier_change_pte_3596 kvm_mmu_notifier_change_pte 3 3596 NULL
+sctp_getsockopt_events_3607 sctp_getsockopt_events 2 3607 NULL
+edac_mc_alloc_3611 edac_mc_alloc 4 3611 NULL
+tx_tx_starts_read_3617 tx_tx_starts_read 3 3617 NULL
+aligned_kmalloc_3628 aligned_kmalloc 1 3628 NULL
+ath6kl_disconnect_timeout_read_3650 ath6kl_disconnect_timeout_read 3 3650 NULL
+i915_compat_ioctl_3656 i915_compat_ioctl 2 3656 NULL
+snd_m3_assp_read_3703 snd_m3_assp_read 0 3703 NULL
+ci_ll_write_3740 ci_ll_write 4 3740 NULL nohasharray
+ath6kl_mgmt_tx_3740 ath6kl_mgmt_tx 7 3740 &ci_ll_write_3740
+sctp_setsockopt_auth_key_3793 sctp_setsockopt_auth_key 3 3793 NULL
+ncp_file_write_3813 ncp_file_write 3 3813 NULL
+llc_ui_recvmsg_3826 llc_ui_recvmsg 4 3826 NULL
+hfsplus_direct_IO_3835 hfsplus_direct_IO 4 3835 NULL
+create_one_cdev_3852 create_one_cdev 2 3852 NULL
+smk_read_onlycap_3855 smk_read_onlycap 3 3855 NULL
+get_fd_set_3866 get_fd_set 1 3866 NULL
+apei_res_sub_3873 apei_res_sub 0 3873 NULL
+garp_attr_create_3883 garp_attr_create 3 3883 NULL
+efivarfs_file_read_3893 efivarfs_file_read 3 3893 NULL
+nvram_write_3894 nvram_write 3 3894 NULL
+pipeline_pre_proc_swi_read_3898 pipeline_pre_proc_swi_read 3 3898 NULL
+comedi_buf_read_n_available_3899 comedi_buf_read_n_available 0 3899 NULL
+vcs_write_3910 vcs_write 3 3910 NULL
+SyS_move_pages_3920 SyS_move_pages 2 3920 NULL
+hdlc_irq_one_3944 hdlc_irq_one 2 3944 NULL
+brcmf_debugfs_fws_stats_read_3947 brcmf_debugfs_fws_stats_read 3 3947 NULL
+ll_get_max_mdsize_3962 ll_get_max_mdsize 0 3962 NULL
+mite_bytes_written_to_memory_lb_3987 mite_bytes_written_to_memory_lb 0 3987 NULL
+do_add_counters_3992 do_add_counters 3 3992 NULL
+obd_alloc_memmd_4002 obd_alloc_memmd 0 4002 NULL
+userspace_status_4004 userspace_status 4 4004 NULL
+xfs_check_block_4005 xfs_check_block 4 4005 NULL nohasharray
+mei_write_4005 mei_write 3 4005 &xfs_check_block_4005
+snd_hdsp_capture_copy_4011 snd_hdsp_capture_copy 5 4011 NULL
+blk_end_request_4024 blk_end_request 3 4024 NULL
+ext4_xattr_find_entry_4025 ext4_xattr_find_entry 0 4025 NULL
+mtip_hw_read_registers_4037 mtip_hw_read_registers 3 4037 NULL
+read_file_queues_4078 read_file_queues 3 4078 NULL
+fbcon_do_set_font_4079 fbcon_do_set_font 2-3 4079 NULL
+C_SYSC_rt_sigpending_4114 C_SYSC_rt_sigpending 2 4114 NULL
+tm6000_read_4151 tm6000_read 3 4151 NULL
+mpt_raid_phys_disk_get_num_paths_4155 mpt_raid_phys_disk_get_num_paths 0 4155 NULL
+msg_bits_4158 msg_bits 0-3-4 4158 NULL
+get_alua_req_4166 get_alua_req 3 4166 NULL
+blk_dropped_read_4168 blk_dropped_read 3 4168 NULL
+read_file_bool_4180 read_file_bool 3 4180 NULL
+ocfs2_find_cpos_for_right_leaf_4194 ocfs2_find_cpos_for_right_leaf 0 4194 NULL
+vring_new_virtqueue_4199 vring_new_virtqueue 2 4199 NULL
+f1x_determine_channel_4202 f1x_determine_channel 2 4202 NULL
+_osd_req_list_objects_4204 _osd_req_list_objects 6 4204 NULL
+__snd_gf1_read_addr_4210 __snd_gf1_read_addr 0 4210 NULL
+ath6kl_force_roam_write_4282 ath6kl_force_roam_write 3 4282 NULL
+goldfish_audio_write_4284 goldfish_audio_write 3 4284 NULL
+__usbnet_read_cmd_4299 __usbnet_read_cmd 7 4299 NULL
+dvb_ringbuffer_pkt_read_user_4303 dvb_ringbuffer_pkt_read_user 3-2-5 4303 NULL
+count_strings_4315 count_strings 0 4315 NULL
+__sysfs_add_one_4326 __sysfs_add_one 0 4326 NULL
+nouveau_fifo_create__4327 nouveau_fifo_create_ 5-6 4327 NULL
+snd_rawmidi_kernel_read_4328 snd_rawmidi_kernel_read 3 4328 NULL
+__copy_from_user_inatomic_4365 __copy_from_user_inatomic 0-3 4365 NULL nohasharray
+lookup_string_4365 lookup_string 0 4365 &__copy_from_user_inatomic_4365
+irda_sendmsg_4388 irda_sendmsg 4 4388 NULL
+access_process_vm_4412 access_process_vm 0 4412 NULL nohasharray
+cxacru_cm_get_array_4412 cxacru_cm_get_array 4 4412 &access_process_vm_4412
+libfc_vport_create_4415 libfc_vport_create 2 4415 NULL
+rtw_android_get_rssi_4421 rtw_android_get_rssi 0 4421 NULL
+do_pages_stat_4437 do_pages_stat 2 4437 NULL
+at76_set_card_command_4471 at76_set_card_command 4 4471 NULL
+snd_seq_expand_var_event_4481 snd_seq_expand_var_event 5-0 4481 NULL
+vmbus_establish_gpadl_4495 vmbus_establish_gpadl 3 4495 NULL
+set_link_security_4502 set_link_security 4 4502 NULL
+ll_max_readahead_per_file_mb_seq_write_4531 ll_max_readahead_per_file_mb_seq_write
3 4531 NULL
+tty_register_device_4544 tty_register_device 2 4544 NULL
+btrfs_file_extent_inline_item_len_4575 btrfs_file_extent_inline_item_len 0 4575 NULL
+xfs_buf_get_maps_4581 xfs_buf_get_maps 2 4581 NULL
+bch_alloc_4593 bch_alloc 1 4593 NULL
+ocfs2_refcount_lock_4595 ocfs2_refcount_lock 0 4595 NULL
+ll_rw_extents_stats_seq_write_4633 ll_rw_extents_stats_seq_write 3 4633 NULL
+iwl_dbgfs_tx_queue_read_4635 iwl_dbgfs_tx_queue_read 3 4635 NULL
+skb_add_data_nocache_4682 skb_add_data_nocache 4 4682 NULL
+cx18_read_pos_4683 cx18_read_pos 3 4683 NULL
+short_retry_limit_read_4687 short_retry_limit_read 3 4687 NULL
+kone_receive_4690 kone_receive 4 4690 NULL
+hash_netportnet6_expire_4702 hash_netportnet6_expire 4 4702 NULL
+cxgbi_alloc_big_mem_4707 cxgbi_alloc_big_mem 1 4707 NULL
+ati_create_gatt_pages_4722 ati_create_gatt_pages 1 4722 NULL nohasharray
+show_header_4722 show_header 3 4722 &ati_create_gatt_pages_4722
+bitmap_startwrite_4736 bitmap_startwrite 2 4736 NULL nohasharray
+ll_rw_offset_stats_seq_write_4736 ll_rw_offset_stats_seq_write 3 4736 &bitmap_startwrite_4736
+lu_buf_alloc_4753 lu_buf_alloc 2 4753 NULL
+pwr_rcvd_bcns_cnt_read_4774 pwr_rcvd_bcns_cnt_read 3 4774 NULL
+create_subvol_4791 create_subvol 4 4791 NULL
+ncp__vol2io_4804 ncp__vol2io 5 4804 NULL
+repair_io_failure_4815 repair_io_failure 4-3 4815 NULL
+comedi_buf_write_free_4847 comedi_buf_write_free 2 4847 NULL
+gigaset_if_receive_4861 gigaset_if_receive 3 4861 NULL
+key_tx_spec_read_4862 key_tx_spec_read 3 4862 NULL
+ocfs2_defrag_extent_4873 ocfs2_defrag_extent 2 4873 NULL
+hid_register_field_4874 hid_register_field 2-3 4874 NULL
+vga_arb_read_4886 vga_arb_read 3 4886 NULL
+ieee80211_if_fmt_ave_beacon_4941 ieee80211_if_fmt_ave_beacon 3 4941 NULL
+ocfs2_should_refresh_lock_res_4958 ocfs2_should_refresh_lock_res 0 4958 NULL
+compat_rawv6_setsockopt_4967 compat_rawv6_setsockopt 5 4967 NULL
+ath10k_read_chip_id_4969 ath10k_read_chip_id 3 4969 NULL
+skb_network_header_len_4971 skb_network_header_len 0 4971 NULL
+ieee80211_if_fmt_dot11MeshHWMPconfirmationInterval_4976 ieee80211_if_fmt_dot11MeshHWMPconfirmationInterval
3 4976 NULL
+compat_SyS_ipc_5000 compat_SyS_ipc 3 5000 NULL
+do_mincore_5018 do_mincore 0-2-1 5018 NULL
+btrfs_punch_hole_5041 btrfs_punch_hole 2 5041 NULL
+cfg80211_rx_mgmt_5056 cfg80211_rx_mgmt 5 5056 NULL
+ocfs2_check_range_for_holes_5066 ocfs2_check_range_for_holes 3-2 5066 NULL
+snd_mixart_BA1_read_5082 snd_mixart_BA1_read 5 5082 NULL
+snd_emu10k1_ptr20_read_5087 snd_emu10k1_ptr20_read 0 5087 NULL
+kfifo_copy_from_user_5091 kfifo_copy_from_user 3-4-0 5091 NULL nohasharray
+get_random_bytes_5091 get_random_bytes 2 5091 &kfifo_copy_from_user_5091 nohasharray
+blk_rq_sectors_5091 blk_rq_sectors 0 5091 &get_random_bytes_5091
+sound_write_5102 sound_write 3 5102 NULL
+i40e_dbg_netdev_ops_write_5117 i40e_dbg_netdev_ops_write 3 5117 NULL
+qib_7220_handle_hwerrors_5142 qib_7220_handle_hwerrors 3 5142 NULL
+__uwb_addr_print_5161 __uwb_addr_print 2 5161 NULL
+iwl_dbgfs_status_read_5171 iwl_dbgfs_status_read 3 5171 NULL
+acpi_pcc_get_sqty_5176 acpi_pcc_get_sqty 0 5176 NULL
+ppp_cp_parse_cr_5214 ppp_cp_parse_cr 4 5214 NULL
+dwc2_hcd_urb_alloc_5217 dwc2_hcd_urb_alloc 2 5217 NULL
+ath6kl_debug_roam_tbl_event_5224 ath6kl_debug_roam_tbl_event 3 5224 NULL
+usb_descriptor_fillbuf_5302 usb_descriptor_fillbuf 0 5302 NULL
+r592_write_fifo_pio_5315 r592_write_fifo_pio 3 5315 NULL
+sbc_get_write_same_sectors_5317 sbc_get_write_same_sectors 0 5317 NULL
+pwr_elp_enter_read_5324 pwr_elp_enter_read 3 5324 NULL
+cq_free_res_5355 cq_free_res 5 5355 NULL
+ps_pspoll_utilization_read_5361 ps_pspoll_utilization_read 3 5361 NULL
+cciss_allocate_sg_chain_blocks_5368 cciss_allocate_sg_chain_blocks 3-2 5368 NULL
+__split_bvec_across_targets_5454 __split_bvec_across_targets 3 5454 NULL
+xfs_efd_init_5463 xfs_efd_init 3 5463 NULL
+xfs_efi_init_5476 xfs_efi_init 2 5476 NULL
+cifs_security_flags_proc_write_5484 cifs_security_flags_proc_write 3 5484 NULL
+tty_write_5494 tty_write 3 5494 NULL
+tomoyo_update_domain_5498 tomoyo_update_domain 2 5498 NULL nohasharray
+ieee80211_if_fmt_last_beacon_5498 ieee80211_if_fmt_last_beacon 3 5498 &tomoyo_update_domain_5498
+__max_nr_grant_frames_5505 __max_nr_grant_frames 0 5505 NULL
+ieee80211_if_fmt_auto_open_plinks_5534 ieee80211_if_fmt_auto_open_plinks 3 5534 NULL
+get_entry_msg_len_5552 get_entry_msg_len 0 5552 NULL
+le_readq_5557 le_readq 0 5557 NULL
+inw_5558 inw 0 5558 NULL
+bioset_create_5580 bioset_create 1 5580 NULL
+oz_ep_alloc_5587 oz_ep_alloc 1 5587 NULL
+SYSC_fsetxattr_5639 SYSC_fsetxattr 4 5639 NULL
+ext4_xattr_get_5661 ext4_xattr_get 0 5661 NULL
+posix_clock_register_5662 posix_clock_register 2 5662 NULL
+get_arg_5694 get_arg 3 5694 NULL
+subbuf_read_actor_5708 subbuf_read_actor 3 5708 NULL
+vmw_kms_readback_5727 vmw_kms_readback 6 5727 NULL
+rts51x_transfer_data_partial_5735 rts51x_transfer_data_partial 6 5735 NULL
+sctp_setsockopt_autoclose_5775 sctp_setsockopt_autoclose 3 5775 NULL
+__vxge_hw_blockpool_malloc_5786 __vxge_hw_blockpool_malloc 2 5786 NULL
+nvme_trans_bdev_char_page_5797 nvme_trans_bdev_char_page 3 5797 NULL
+skb_copy_datagram_iovec_5806 skb_copy_datagram_iovec 2-4 5806 NULL
+nv50_disp_pioc_create__5812 nv50_disp_pioc_create_ 5 5812 NULL
+ceph_x_encrypt_buflen_5829 ceph_x_encrypt_buflen 0-1 5829 NULL
+ceph_msg_new_5846 ceph_msg_new 2 5846 NULL
+setup_req_5848 setup_req 3-0 5848 NULL
+ria_page_count_5849 ria_page_count 0 5849 NULL
+rx_filter_max_arp_queue_dep_read_5851 rx_filter_max_arp_queue_dep_read 3 5851 NULL
+config_buf_5862 config_buf 0 5862 NULL
+iwl_dbgfs_scan_ant_rxchain_write_5877 iwl_dbgfs_scan_ant_rxchain_write 3 5877 NULL
+lprocfs_fid_width_seq_write_5889 lprocfs_fid_width_seq_write 3 5889 NULL
+port_show_regs_5904 port_show_regs 3 5904 NULL
+rbd_segment_length_5907 rbd_segment_length 0-3-2 5907 NULL
+uhci_debug_read_5911 uhci_debug_read 3 5911 NULL
+lbs_highsnr_read_5931 lbs_highsnr_read 3 5931 NULL
+ps_poll_ps_poll_timeouts_read_5934 ps_poll_ps_poll_timeouts_read 3 5934 NULL
+edac_device_alloc_ctl_info_5941 edac_device_alloc_ctl_info 1 5941 NULL
+ll_statahead_one_5962 ll_statahead_one 3 5962 NULL
+__apu_get_register_5967 __apu_get_register 0 5967 NULL
+ieee80211_if_fmt_rc_rateidx_mask_5ghz_5971 ieee80211_if_fmt_rc_rateidx_mask_5ghz 3
5971 NULL
+SyS_semop_5980 SyS_semop 3 5980 NULL
+alloc_msg_6072 alloc_msg 1 6072 NULL
+sctp_setsockopt_connectx_6073 sctp_setsockopt_connectx 3 6073 NULL
+rts51x_ms_rw_multi_sector_6076 rts51x_ms_rw_multi_sector 3-4 6076 NULL
+ipmi_addr_length_6110 ipmi_addr_length 0 6110 NULL
+dfs_global_file_write_6112 dfs_global_file_write 3 6112 NULL
+nouveau_parent_create__6131 nouveau_parent_create_ 7 6131 NULL
+ieee80211_if_fmt_beacon_timeout_6153 ieee80211_if_fmt_beacon_timeout 3 6153 NULL
+ivtv_copy_buf_to_user_6159 ivtv_copy_buf_to_user 4 6159 NULL
+wl1251_cmd_template_set_6172 wl1251_cmd_template_set 4 6172 NULL
+SyS_setgroups_6182 SyS_setgroups 1 6182 NULL
+mxt_show_instance_6207 mxt_show_instance 2-0 6207 NULL
+v4l2_ctrl_new_std_menu_6221 v4l2_ctrl_new_std_menu 4 6221 NULL
+mqueue_read_file_6228 mqueue_read_file 3 6228 NULL
+f_hidg_read_6238 f_hidg_read 3 6238 NULL
+fbcon_prepare_logo_6246 fbcon_prepare_logo 5 6246 NULL
+tx_tx_start_null_frame_read_6281 tx_tx_start_null_frame_read 3 6281 NULL
+snd_hda_override_conn_list_6282 snd_hda_override_conn_list 3-0 6282 NULL nohasharray
+xenbus_file_write_6282 xenbus_file_write 3 6282 &snd_hda_override_conn_list_6282
+posix_acl_fix_xattr_to_user_6283 posix_acl_fix_xattr_to_user 2 6283 NULL
+serial_port_in_6291 serial_port_in 0 6291 NULL
+qlcnic_sriov_alloc_bc_msg_6309 qlcnic_sriov_alloc_bc_msg 2 6309 NULL
+hfa384x_inw_6329 hfa384x_inw 0 6329 NULL nohasharray
+SyS_mincore_6329 SyS_mincore 2-1 6329 &hfa384x_inw_6329
+fuse_get_req_for_background_6337 fuse_get_req_for_background 2 6337 NULL
+ucs2_strnlen_6342 ucs2_strnlen 0 6342 NULL
+regcache_sync_block_raw_6350 regcache_sync_block_raw 5-4 6350 NULL
+mei_dbgfs_read_devstate_6352 mei_dbgfs_read_devstate 3 6352 NULL
+_proc_do_string_6376 _proc_do_string 2 6376 NULL
+osd_req_read_sg_kern_6378 osd_req_read_sg_kern 5 6378 NULL
+posix_acl_fix_xattr_userns_6420 posix_acl_fix_xattr_userns 4 6420 NULL
+add_transaction_credits_6422 add_transaction_credits 2-3 6422 NULL
+ipr_change_queue_depth_6431 ipr_change_queue_depth 2 6431 NULL
+__alloc_bootmem_node_nopanic_6432 __alloc_bootmem_node_nopanic 2 6432 NULL
+ieee80211_if_fmt_dot11MeshMaxRetries_6476 ieee80211_if_fmt_dot11MeshMaxRetries 3 6476
NULL
+qp_memcpy_from_queue_6479 qp_memcpy_from_queue 5-4 6479 NULL
+cipso_v4_map_lvl_hton_6490 cipso_v4_map_lvl_hton 0 6490 NULL
+dbg_intr_buf_6501 dbg_intr_buf 2 6501 NULL
+mei_read_6507 mei_read 3 6507 NULL
+rndis_set_oid_6547 rndis_set_oid 4 6547 NULL
+wdm_read_6549 wdm_read 3 6549 NULL
+dm_stats_create_6551 dm_stats_create 4-2-3 6551 NULL
+fb_alloc_cmap_6554 fb_alloc_cmap 2 6554 NULL
+SyS_semtimedop_6563 SyS_semtimedop 3 6563 NULL
+ecryptfs_filldir_6622 ecryptfs_filldir 3 6622 NULL
+xfs_do_div_6649 xfs_do_div 0-2 6649 NULL
+process_rcvd_data_6679 process_rcvd_data 3 6679 NULL
+btrfs_lookup_csums_range_6696 btrfs_lookup_csums_range 2-3 6696 NULL
+ps_pspoll_max_apturn_read_6699 ps_pspoll_max_apturn_read 3 6699 NULL
+bnad_debugfs_write_regrd_6706 bnad_debugfs_write_regrd 3 6706 NULL
+mpeg_read_6708 mpeg_read 3 6708 NULL
+ibmpex_query_sensor_count_6709 ibmpex_query_sensor_count 0 6709 NULL
+video_proc_write_6724 video_proc_write 3 6724 NULL
+posix_acl_xattr_count_6725 posix_acl_xattr_count 0-1 6725 NULL
+kobject_add_varg_6781 kobject_add_varg 0 6781 NULL
+iwl_dbgfs_channels_read_6784 iwl_dbgfs_channels_read 3 6784 NULL
+ieee80211_if_read_6785 ieee80211_if_read 3 6785 NULL
+zone_spanned_pages_in_node_6787 zone_spanned_pages_in_node 0-3-4 6787 NULL
+hdlcdrv_register_6792 hdlcdrv_register 2 6792 NULL
+tx_tx_done_data_read_6799 tx_tx_done_data_read 3 6799 NULL
+lbs_rdrf_write_6826 lbs_rdrf_write 3 6826 NULL
+calc_pages_for_6838 calc_pages_for 0-1-2 6838 NULL
+mon_bin_read_6841 mon_bin_read 3 6841 NULL
+snd_cs4281_BA0_read_6847 snd_cs4281_BA0_read 5 6847 NULL
+perf_output_sample_ustack_6868 perf_output_sample_ustack 2 6868 NULL
+dio_complete_6879 dio_complete 0-2-3 6879 NULL
+raw_seticmpfilter_6888 raw_seticmpfilter 3 6888 NULL nohasharray
+ieee80211_if_fmt_path_refresh_time_6888 ieee80211_if_fmt_path_refresh_time 3 6888 &raw_seticmpfilter_6888
+dlmfs_file_write_6892 dlmfs_file_write 3 6892 NULL
+proc_sessionid_read_6911 proc_sessionid_read 3 6911 NULL nohasharray
+spi_show_regs_6911 spi_show_regs 3 6911 &proc_sessionid_read_6911 nohasharray
+acm_alloc_minor_6911 acm_alloc_minor 0 6911 &spi_show_regs_6911
+__kfifo_dma_in_finish_r_6913 __kfifo_dma_in_finish_r 2-3 6913 NULL
+do_msgrcv_6921 do_msgrcv 3 6921 NULL
+cache_do_downcall_6926 cache_do_downcall 3 6926 NULL
+ipath_verbs_send_dma_6929 ipath_verbs_send_dma 6 6929 NULL
+qsfp_cks_6945 qsfp_cks 2-0 6945 NULL
+tg3_nvram_write_block_unbuffered_6955 tg3_nvram_write_block_unbuffered 3 6955 NULL
+pch_uart_hal_read_6961 pch_uart_hal_read 0 6961 NULL
+rsa_extract_mpi_6973 rsa_extract_mpi 5 6973 NULL nohasharray
+i40e_dbg_dump_write_6973 i40e_dbg_dump_write 3 6973 &rsa_extract_mpi_6973
+request_key_async_6990 request_key_async 4 6990 NULL
+tpl_write_6998 tpl_write 3 6998 NULL
+r871x_set_wpa_ie_7000 r871x_set_wpa_ie 3 7000 NULL
+cipso_v4_gentag_enum_7006 cipso_v4_gentag_enum 0 7006 NULL
+tracing_cpumask_read_7010 tracing_cpumask_read 3 7010 NULL
+ld_usb_write_7022 ld_usb_write 3 7022 NULL
+wimax_msg_7030 wimax_msg 4 7030 NULL
+ipath_get_base_info_7043 ipath_get_base_info 3 7043 NULL
+snd_pcm_oss_bytes_7051 snd_pcm_oss_bytes 2 7051 NULL
+hci_sock_recvmsg_7072 hci_sock_recvmsg 4 7072 NULL
+event_enable_read_7074 event_enable_read 3 7074 NULL
+beacon_interval_read_7091 beacon_interval_read 3 7091 NULL
+pipeline_enc_rx_stat_fifo_int_read_7107 pipeline_enc_rx_stat_fifo_int_read 3 7107 NULL
+osc_resend_count_seq_write_7120 osc_resend_count_seq_write 3 7120 NULL
+qib_format_hwerrors_7133 qib_format_hwerrors 5 7133 NULL
+kvm_mmu_notifier_test_young_7139 kvm_mmu_notifier_test_young 3 7139 NULL
+__alloc_objio_seg_7203 __alloc_objio_seg 1 7203 NULL
+hdlc_loop_7255 hdlc_loop 0 7255 NULL
+f_midi_start_ep_7270 f_midi_start_ep 0 7270 NULL
+rx_rate_rx_frames_per_rates_read_7282 rx_rate_rx_frames_per_rates_read 3 7282 NULL
+get_string_7302 get_string 0 7302 NULL
+mgmt_control_7349 mgmt_control 3 7349 NULL
+at_est2timeout_7365 at_est2timeout 0-1 7365 NULL
+ieee80211_if_read_dot11MeshHWMPactivePathTimeout_7368 ieee80211_if_read_dot11MeshHWMPactivePathTimeout
3 7368 NULL
+ath10k_read_fw_stats_7387 ath10k_read_fw_stats 3 7387 NULL
+hweight_long_7388 hweight_long 1-0 7388 NULL
+sl_change_mtu_7396 sl_change_mtu 2 7396 NULL
+_ore_add_stripe_unit_7399 _ore_add_stripe_unit 6-3 7399 NULL
+readb_7401 readb 0 7401 NULL
+drm_property_create_blob_7414 drm_property_create_blob 2 7414 NULL
+__copy_to_user_nocheck_7443 __copy_to_user_nocheck 0-3 7443 NULL
+ip_options_get_alloc_7448 ip_options_get_alloc 1 7448 NULL
+SYSC_setgroups_7454 SYSC_setgroups 1 7454 NULL
+rt2x00debug_read_queue_stats_7455 rt2x00debug_read_queue_stats 3 7455 NULL
+l2tp_ip6_sendmsg_7461 l2tp_ip6_sendmsg 4 7461 NULL
+garp_request_join_7471 garp_request_join 4 7471 NULL nohasharray
+ReadHSCX_7471 ReadHSCX 0 7471 &garp_request_join_7471
+snd_pcm_lib_read1_7491 snd_pcm_lib_read1 0-3 7491 NULL
+iwl_mvm_power_dbgfs_read_7502 iwl_mvm_power_dbgfs_read 0 7502 NULL
+ahash_instance_headroom_7509 ahash_instance_headroom 0 7509 NULL nohasharray
+sdhci_alloc_host_7509 sdhci_alloc_host 2 7509 &ahash_instance_headroom_7509
+array_zalloc_7519 array_zalloc 1-2 7519 NULL
+ath10k_read_htt_stats_mask_7557 ath10k_read_htt_stats_mask 3 7557 NULL
+smk_read_mapped_7562 smk_read_mapped 3 7562 NULL
+cfs_cpt_num_estimate_7571 cfs_cpt_num_estimate 0 7571 NULL
+ocfs2_lock_create_7612 ocfs2_lock_create 0 7612 NULL
+groups_alloc_7614 groups_alloc 1 7614 NULL nohasharray
+create_dir_7614 create_dir 0 7614 &groups_alloc_7614
+_rtw_zmalloc_7636 _rtw_zmalloc 1 7636 NULL
+fault_inject_write_7662 fault_inject_write 3 7662 NULL
+acpi_ex_allocate_name_string_7685 acpi_ex_allocate_name_string 2-1 7685 NULL
+acpi_ns_get_pathname_length_7699 acpi_ns_get_pathname_length 0 7699 NULL
+dev_write_7708 dev_write 3 7708 NULL
+pci_raw_set_power_state_7729 pci_raw_set_power_state 0 7729 NULL
+vxge_device_register_7752 vxge_device_register 4 7752 NULL
+iwl_dbgfs_bt_cmd_read_7770 iwl_dbgfs_bt_cmd_read 3 7770 NULL
+alloc_candev_7776 alloc_candev 1-2 7776 NULL
+dfs_global_file_read_7787 dfs_global_file_read 3 7787 NULL
+bnx2_nvram_write_7790 bnx2_nvram_write 4-2 7790 NULL
+diva_os_copy_from_user_7792 diva_os_copy_from_user 4 7792 NULL nohasharray
+lustre_packed_msg_size_7792 lustre_packed_msg_size 0 7792 &diva_os_copy_from_user_7792
+cfs_trace_dump_debug_buffer_usrstr_7861 cfs_trace_dump_debug_buffer_usrstr 2 7861 NULL
+tipc_alloc_entry_7875 tipc_alloc_entry 2 7875 NULL
+config_desc_7878 config_desc 0 7878 NULL
+dvb_dmxdev_read_sec_7892 dvb_dmxdev_read_sec 4 7892 NULL
+xfs_trans_get_efi_7898 xfs_trans_get_efi 2 7898 NULL
+libfc_host_alloc_7917 libfc_host_alloc 2 7917 NULL
+f_hidg_write_7932 f_hidg_write 3 7932 NULL
+integrity_digsig_verify_7956 integrity_digsig_verify 3-0 7956 NULL
+smk_write_load_self_7958 smk_write_load_self 3 7958 NULL
+tt3650_ci_msg_locked_8013 tt3650_ci_msg_locked 4 8013 NULL
+vcs_read_8017 vcs_read 3 8017 NULL
+vhost_add_used_and_signal_n_8038 vhost_add_used_and_signal_n 4 8038 NULL
+ms_read_multiple_pages_8052 ms_read_multiple_pages 5-4 8052 NULL
+dgrp_mon_read_8065 dgrp_mon_read 3 8065 NULL
+spi_write_then_read_8073 spi_write_then_read 5-3 8073 NULL
+qla4xxx_post_ping_evt_work_8074 qla4xxx_post_ping_evt_work 4 8074 NULL
+venus_lookup_8121 venus_lookup 4 8121 NULL
+ieee80211_if_fmt_num_buffered_multicast_8127 ieee80211_if_fmt_num_buffered_multicast
3 8127 NULL
+xfs_file_fallocate_8150 xfs_file_fallocate 3-4 8150 NULL
+__sk_mem_schedule_8185 __sk_mem_schedule 2 8185 NULL
+ieee80211_if_fmt_dot11MeshHoldingTimeout_8187 ieee80211_if_fmt_dot11MeshHoldingTimeout
3 8187 NULL
+recent_mt_proc_write_8206 recent_mt_proc_write 3 8206 NULL
+__ocfs2_lock_refcount_tree_8207 __ocfs2_lock_refcount_tree 0 8207 NULL
+rt2x00debug_write_bbp_8212 rt2x00debug_write_bbp 3 8212 NULL
+ad7879_spi_multi_read_8218 ad7879_spi_multi_read 3 8218 NULL
+play_iframe_8219 play_iframe 3 8219 NULL
+kvm_mmu_page_set_gfn_8225 kvm_mmu_page_set_gfn 2 8225 NULL
+sctp_ssnmap_size_8228 sctp_ssnmap_size 0-1-2 8228 NULL
+ceph_sync_write_8233 ceph_sync_write 4 8233 NULL
+check_xattr_ref_inode_8244 check_xattr_ref_inode 0 8244 NULL
+t3_init_l2t_8261 t3_init_l2t 1 8261 NULL
+init_cdev_8274 init_cdev 1 8274 NULL
+rproc_recovery_write_8281 rproc_recovery_write 3 8281 NULL
+qib_decode_7220_err_8315 qib_decode_7220_err 3 8315 NULL
+ipwireless_send_packet_8328 ipwireless_send_packet 4 8328 NULL
+tracing_entries_read_8345 tracing_entries_read 3 8345 NULL
+ieee80211_if_fmt_ht_opmode_8347 ieee80211_if_fmt_ht_opmode 3 8347 NULL
+generic_write_sync_8358 generic_write_sync 0 8358 NULL
+ping_getfrag_8360 ping_getfrag 4-3 8360 NULL
+ath6kl_lrssi_roam_write_8362 ath6kl_lrssi_roam_write 3 8362 NULL
+xdi_copy_from_user_8395 xdi_copy_from_user 4 8395 NULL
+zd_rf_scnprint_id_8406 zd_rf_scnprint_id 0-3 8406 NULL
+smk_write_change_rule_8411 smk_write_change_rule 3 8411 NULL nohasharray
+uvc_v4l2_ioctl_8411 uvc_v4l2_ioctl 2 8411 &smk_write_change_rule_8411
+roccat_common2_sysfs_read_8431 roccat_common2_sysfs_read 6 8431 NULL
+afs_cell_lookup_8482 afs_cell_lookup 2 8482 NULL
+fore200e_chunk_alloc_8501 fore200e_chunk_alloc 4-3 8501 NULL
+batadv_tt_len_8502 batadv_tt_len 0-1 8502 NULL
+dev_config_8506 dev_config 3 8506 NULL
+ACL_to_cifs_posix_8509 ACL_to_cifs_posix 3 8509 NULL
+opticon_process_data_packet_8524 opticon_process_data_packet 3 8524 NULL
+user_on_off_8552 user_on_off 2 8552 NULL
+profile_remove_8556 profile_remove 3 8556 NULL
+cache_slow_downcall_8570 cache_slow_downcall 2 8570 NULL
+isr_dma0_done_read_8574 isr_dma0_done_read 3 8574 NULL
+tower_write_8580 tower_write 3 8580 NULL
+cfs_cpt_number_8618 cfs_cpt_number 0 8618 NULL
+shash_setkey_unaligned_8620 shash_setkey_unaligned 3 8620 NULL
+it821x_firmware_command_8628 it821x_firmware_command 3 8628 NULL
+scsi_dma_map_8632 scsi_dma_map 0 8632 NULL
+fuse_send_write_pages_8636 fuse_send_write_pages 0-5 8636 NULL
+generic_acl_set_8658 generic_acl_set 4 8658 NULL
+mlx5_vzalloc_8663 mlx5_vzalloc 1 8663 NULL
+dio_bio_alloc_8677 dio_bio_alloc 5 8677 NULL
+lbs_bcnmiss_read_8678 lbs_bcnmiss_read 3 8678 NULL
+rproc_trace_read_8686 rproc_trace_read 3 8686 NULL
+skb_frag_size_8695 skb_frag_size 0 8695 NULL
+arcfb_write_8702 arcfb_write 3 8702 NULL
+i_size_read_8703 i_size_read 0 8703 NULL nohasharray
+init_header_8703 init_header 0 8703 &i_size_read_8703
+HDLC_irq_8709 HDLC_irq 2 8709 NULL
+ctrl_out_8712 ctrl_out 3-5 8712 NULL
+tracing_max_lat_write_8728 tracing_max_lat_write 3 8728 NULL
+jffs2_acl_count_8729 jffs2_acl_count 0-1 8729 NULL
+__create_irqs_8733 __create_irqs 2 8733 NULL
+tx_tx_exch_expiry_read_8749 tx_tx_exch_expiry_read 3 8749 NULL
+compound_order_8750 compound_order 0 8750 NULL
+ocfs2_find_path_8754 ocfs2_find_path 0 8754 NULL
+yurex_write_8761 yurex_write 3 8761 NULL
+joydev_compat_ioctl_8765 joydev_compat_ioctl 2 8765 NULL
+kstrtoint_from_user_8778 kstrtoint_from_user 2 8778 NULL
+paging32_prefetch_gpte_8783 paging32_prefetch_gpte 4 8783 NULL
+ext4_try_to_write_inline_data_8785 ext4_try_to_write_inline_data 3-4 8785 NULL
+__bitmap_weight_8796 __bitmap_weight 0-2 8796 NULL
+cpuset_common_file_read_8800 cpuset_common_file_read 5 8800 NULL
+metronomefb_write_8823 metronomefb_write 3 8823 NULL
+SyS_llistxattr_8824 SyS_llistxattr 3 8824 NULL
+get_queue_depth_8833 get_queue_depth 0 8833 NULL
+dvb_ringbuffer_pkt_next_8834 dvb_ringbuffer_pkt_next 0-2 8834 NULL
+usb_ep_queue_8839 usb_ep_queue 0 8839 NULL
+debug_debug1_read_8856 debug_debug1_read 3 8856 NULL
+wa_nep_queue_8858 wa_nep_queue 2 8858 NULL
+radeon_drm_ioctl_8875 radeon_drm_ioctl 2 8875 NULL
+compressed_bio_size_8887 compressed_bio_size 0-2 8887 NULL
+ab3100_get_set_reg_8890 ab3100_get_set_reg 3 8890 NULL nohasharray
+tracing_max_lat_read_8890 tracing_max_lat_read 3 8890 &ab3100_get_set_reg_8890
+sdio_max_byte_size_8907 sdio_max_byte_size 0 8907 NULL
+sysfs_merge_group_8917 sysfs_merge_group 0 8917 NULL
+write_file_ani_8918 write_file_ani 3 8918 NULL
+layout_commit_8926 layout_commit 3 8926 NULL
+adjust_priv_size_8935 adjust_priv_size 0-1 8935 NULL
+driver_stats_read_8944 driver_stats_read 3 8944 NULL
+read_file_tgt_stats_8959 read_file_tgt_stats 3 8959 NULL
+usb_allocate_stream_buffers_8964 usb_allocate_stream_buffers 3 8964 NULL
+qib_qsfp_dump_8966 qib_qsfp_dump 0-3 8966 NULL
+venus_mkdir_8967 venus_mkdir 4 8967 NULL
+seq_open_net_8968 seq_open_net 4 8968 NULL nohasharray
+vol_cdev_read_8968 vol_cdev_read 3 8968 &seq_open_net_8968
+bio_integrity_get_tag_8974 bio_integrity_get_tag 3 8974 NULL
+jbd2_journal_blocks_per_page_9004 jbd2_journal_blocks_per_page 0 9004 NULL
+il_dbgfs_clear_ucode_stats_write_9016 il_dbgfs_clear_ucode_stats_write 3 9016 NULL
+snd_emu10k1_ptr_read_9026 snd_emu10k1_ptr_read 0-2 9026 NULL
+fd_ioctl_9028 fd_ioctl 3 9028 NULL
+nla_put_9042 nla_put 3 9042 NULL
+snd_emu10k1_synth_copy_from_user_9061 snd_emu10k1_synth_copy_from_user 3-5 9061 NULL
+snd_gus_dram_peek_9062 snd_gus_dram_peek 4 9062 NULL
+fib_info_hash_alloc_9075 fib_info_hash_alloc 1 9075 NULL
+create_queues_9088 create_queues 2-3 9088 NULL
+ftdi_prepare_write_buffer_9093 ftdi_prepare_write_buffer 3 9093 NULL
+adxl34x_spi_read_block_9108 adxl34x_spi_read_block 3 9108 NULL
+caif_stream_sendmsg_9110 caif_stream_sendmsg 4 9110 NULL nohasharray
+gfn_to_rmap_9110 gfn_to_rmap 3-2 9110 &caif_stream_sendmsg_9110
+udf_direct_IO_9111 udf_direct_IO 4 9111 NULL
+pmcraid_change_queue_depth_9116 pmcraid_change_queue_depth 2 9116 NULL
+apei_resources_merge_9149 apei_resources_merge 0 9149 NULL
+vb2_dma_sg_alloc_9157 vb2_dma_sg_alloc 2 9157 NULL
+dbg_command_buf_9165 dbg_command_buf 2 9165 NULL
+isr_irqs_read_9181 isr_irqs_read 3 9181 NULL
+count_leading_zeros_9183 count_leading_zeros 0 9183 NULL
+altera_swap_ir_9194 altera_swap_ir 2 9194 NULL
+snd_m3_get_pointer_9206 snd_m3_get_pointer 0 9206 NULL
+virtqueue_add_9217 virtqueue_add 4-5 9217 NULL
+tx_tx_prepared_descs_read_9221 tx_tx_prepared_descs_read 3 9221 NULL
+sctp_getsockopt_delayed_ack_9232 sctp_getsockopt_delayed_ack 2 9232 NULL
+hfsplus_bnode_read_u16_9262 hfsplus_bnode_read_u16 0 9262 NULL
+hdpvr_read_9273 hdpvr_read 3 9273 NULL
+flakey_status_9274 flakey_status 5 9274 NULL
+iwl_dbgfs_stations_read_9309 iwl_dbgfs_stations_read 3 9309 NULL
+ceph_sync_setxattr_9310 ceph_sync_setxattr 4 9310 NULL
+ieee80211_if_fmt_txpower_9334 ieee80211_if_fmt_txpower 3 9334 NULL
+nvme_trans_fmt_get_parm_header_9340 nvme_trans_fmt_get_parm_header 2 9340 NULL
+ocfs2_orphan_for_truncate_9342 ocfs2_orphan_for_truncate 4 9342 NULL
+ll_direct_rw_pages_9361 ll_direct_rw_pages 0 9361 NULL
+sta_beacon_loss_count_read_9370 sta_beacon_loss_count_read 3 9370 NULL
+get_request_type_9393 get_request_type 0 9393 NULL nohasharray
+mlx4_bitmap_init_9393 mlx4_bitmap_init 5-2 9393 &get_request_type_9393
+virtqueue_add_outbuf_9395 virtqueue_add_outbuf 3 9395 NULL
+read_9397 read 3 9397 NULL
+hash_ipportip4_expire_9415 hash_ipportip4_expire 4 9415 NULL
+btrfs_drop_extents_9423 btrfs_drop_extents 4 9423 NULL
+bm_realloc_pages_9431 bm_realloc_pages 2 9431 NULL
+ffs_ep0_write_9438 ffs_ep0_write 3 9438 NULL
+ieee80211_if_fmt_fwded_unicast_9454 ieee80211_if_fmt_fwded_unicast 3 9454 NULL
+ext3_xattr_set_acl_9467 ext3_xattr_set_acl 4 9467 NULL
+agp_generic_alloc_user_9470 agp_generic_alloc_user 1 9470 NULL nohasharray
+get_registers_9470 get_registers 4 9470 &agp_generic_alloc_user_9470
+crypt_status_9492 crypt_status 5 9492 NULL
+lbs_threshold_write_9502 lbs_threshold_write 5 9502 NULL
+lp_write_9511 lp_write 3 9511 NULL
+mext_calc_swap_extents_9517 mext_calc_swap_extents 4 9517 NULL
+scsi_tgt_kspace_exec_9522 scsi_tgt_kspace_exec 8 9522 NULL
+ll_max_read_ahead_whole_mb_seq_write_9528 ll_max_read_ahead_whole_mb_seq_write 3 9528
NULL
+read_file_dma_9530 read_file_dma 3 9530 NULL
+iwl_dbgfs_bf_params_read_9542 iwl_dbgfs_bf_params_read 3 9542 NULL
+il_dbgfs_missed_beacon_write_9546 il_dbgfs_missed_beacon_write 3 9546 NULL
+compat_SyS_pwritev64_9548 compat_SyS_pwritev64 3 9548 NULL
+fw_node_create_9559 fw_node_create 2 9559 NULL
+kobj_map_9566 kobj_map 2-3 9566 NULL
+f2fs_read_data_pages_9574 f2fs_read_data_pages 4 9574 NULL
+snd_emu10k1_fx8010_read_9605 snd_emu10k1_fx8010_read 5-6 9605 NULL
+lov_ost_pool_add_9626 lov_ost_pool_add 3 9626 NULL
+saa7164_buffer_alloc_user_9627 saa7164_buffer_alloc_user 2 9627 NULL
+ceph_copy_user_to_page_vector_9635 ceph_copy_user_to_page_vector 4-3 9635 NULL
+acpi_ex_insert_into_field_9638 acpi_ex_insert_into_field 3 9638 NULL
+compat_sys_keyctl_9639 compat_sys_keyctl 4 9639 NULL
+ll_checksum_seq_write_9648 ll_checksum_seq_write 3 9648 NULL
+ocfs2_xattr_get_rec_9652 ocfs2_xattr_get_rec 0 9652 NULL
+queue_received_packet_9657 queue_received_packet 5 9657 NULL
+snd_opl4_mem_proc_write_9670 snd_opl4_mem_proc_write 5 9670 NULL
+dns_query_9676 dns_query 3 9676 NULL
+qib_7322_handle_hwerrors_9678 qib_7322_handle_hwerrors 3 9678 NULL
+__erst_read_from_storage_9690 __erst_read_from_storage 0 9690 NULL
+vx_transfer_end_9701 vx_transfer_end 0 9701 NULL
+fuse_iter_npages_9705 fuse_iter_npages 0 9705 NULL nohasharray
+ieee80211_if_read_aid_9705 ieee80211_if_read_aid 3 9705 &fuse_iter_npages_9705
+cfg80211_tx_mlme_mgmt_9715 cfg80211_tx_mlme_mgmt 3 9715 NULL
+btrfs_stack_file_extent_num_bytes_9720 btrfs_stack_file_extent_num_bytes 0 9720 NULL
+SYSC_ppoll_9721 SYSC_ppoll 2 9721 NULL
+nla_get_u8_9736 nla_get_u8 0 9736 NULL
+ieee80211_if_fmt_num_mcast_sta_9738 ieee80211_if_fmt_num_mcast_sta 3 9738 NULL
+ddb_input_read_9743 ddb_input_read 3-0 9743 NULL
+sta_last_ack_signal_read_9751 sta_last_ack_signal_read 3 9751 NULL
+btrfs_super_root_9763 btrfs_super_root 0 9763 NULL
+__blk_queue_init_tags_9778 __blk_queue_init_tags 2 9778 NULL
+snd_mem_proc_write_9786 snd_mem_proc_write 3 9786 NULL
+kvm_age_hva_9795 kvm_age_hva 2 9795 NULL
+parse_uac2_sample_rate_range_9801 parse_uac2_sample_rate_range 0 9801 NULL
+tpm_data_in_9802 tpm_data_in 0 9802 NULL
+udpv6_recvmsg_9813 udpv6_recvmsg 4 9813 NULL nohasharray
+ieee80211_if_read_state_9813 ieee80211_if_read_state 3 9813 &udpv6_recvmsg_9813
+pmcraid_alloc_sglist_9864 pmcraid_alloc_sglist 1 9864 NULL
+btrfs_free_reserved_extent_9867 btrfs_free_reserved_extent 2 9867 NULL
+f1x_translate_sysaddr_to_cs_9868 f1x_translate_sysaddr_to_cs 2 9868 NULL
+wil_read_file_ioblob_9878 wil_read_file_ioblob 3 9878 NULL
+snd_midi_event_new_9893 snd_midi_event_new 1 9893 NULL nohasharray
+bm_register_write_9893 bm_register_write 3 9893 &snd_midi_event_new_9893
+snd_gf1_pcm_playback_copy_9895 snd_gf1_pcm_playback_copy 5-3 9895 NULL
+nonpaging_page_fault_9908 nonpaging_page_fault 2 9908 NULL
+root_nfs_parse_options_9937 root_nfs_parse_options 3 9937 NULL
+pstore_ftrace_knob_read_9947 pstore_ftrace_knob_read 3 9947 NULL
+read_file_misc_9948 read_file_misc 3 9948 NULL
+csum_partial_copy_fromiovecend_9957 csum_partial_copy_fromiovecend 3-4 9957 NULL
+SyS_gethostname_9964 SyS_gethostname 2 9964 NULL
+get_free_serial_index_9969 get_free_serial_index 0 9969 NULL
+btrfs_add_link_9973 btrfs_add_link 5 9973 NULL
+gameport_read_9983 gameport_read 0 9983 NULL
+SYSC_move_pages_9986 SYSC_move_pages 2 9986 NULL
+aat2870_dump_reg_10019 aat2870_dump_reg 0 10019 NULL
+ieee80211_set_probe_resp_10077 ieee80211_set_probe_resp 3 10077 NULL
+get_elem_size_10110 get_elem_size 0-2 10110 NULL nohasharray
+dynamic_ps_timeout_read_10110 dynamic_ps_timeout_read 3 10110 &get_elem_size_10110
+gfs2_meta_read_10112 gfs2_meta_read 0 10112 NULL
+SyS_migrate_pages_10134 SyS_migrate_pages 2 10134 NULL
+aes_decrypt_packets_read_10155 aes_decrypt_packets_read 3 10155 NULL
+rx_out_of_mem_read_10157 rx_out_of_mem_read 3 10157 NULL
+hidg_alloc_ep_req_10159 hidg_alloc_ep_req 2 10159 NULL
+asd_store_update_bios_10165 asd_store_update_bios 4 10165 NULL
+kstrtol_from_user_10168 kstrtol_from_user 2 10168 NULL
+proc_pid_attr_read_10173 proc_pid_attr_read 3 10173 NULL
+jffs2_user_setxattr_10182 jffs2_user_setxattr 4 10182 NULL
+hdlc_rpr_irq_10240 hdlc_rpr_irq 2 10240 NULL
+cciss_proc_write_10259 cciss_proc_write 3 10259 NULL
+__qlcnic_pci_sriov_enable_10281 __qlcnic_pci_sriov_enable 2 10281 NULL
+snd_rme9652_capture_copy_10287 snd_rme9652_capture_copy 5 10287 NULL
+read_emulate_10310 read_emulate 2-4 10310 NULL
+read_file_spectral_count_10320 read_file_spectral_count 3 10320 NULL
+compat_SyS_writev_10327 compat_SyS_writev 3 10327 NULL
+tun_sendmsg_10337 tun_sendmsg 4 10337 NULL
+ufx_alloc_urb_list_10349 ufx_alloc_urb_list 3 10349 NULL
+whci_add_cap_10350 whci_add_cap 0 10350 NULL
+dbAllocAny_10354 dbAllocAny 0 10354 NULL
+ath6kl_listen_int_read_10355 ath6kl_listen_int_read 3 10355 NULL
+ms_write_multiple_pages_10362 ms_write_multiple_pages 6-5 10362 NULL
+sta_ht_capa_read_10366 sta_ht_capa_read 3 10366 NULL
+ecryptfs_decode_and_decrypt_filename_10379 ecryptfs_decode_and_decrypt_filename 5 10379
NULL
+do_compat_pselect_10398 do_compat_pselect 1 10398 NULL
+fwtty_rx_10434 fwtty_rx 3 10434 NULL
+event_phy_transmit_error_read_10471 event_phy_transmit_error_read 3 10471 NULL
+hash_ipportip6_expire_10478 hash_ipportip6_expire 4 10478 NULL
+nouveau_pwr_create__10483 nouveau_pwr_create_ 4 10483 NULL
+ext4_itable_unused_count_10501 ext4_itable_unused_count 0 10501 NULL
+qib_alloc_fast_reg_page_list_10507 qib_alloc_fast_reg_page_list 2 10507 NULL
+sel_write_disable_10511 sel_write_disable 3 10511 NULL
+osd_req_write_sg_kern_10514 osd_req_write_sg_kern 5 10514 NULL
+rds_message_alloc_10517 rds_message_alloc 1 10517 NULL
+qlcnic_pci_sriov_enable_10519 qlcnic_pci_sriov_enable 2 10519 NULL
+kstrtouint_from_user_10536 kstrtouint_from_user 2 10536 NULL nohasharray
+snd_pcm_lib_read_10536 snd_pcm_lib_read 0-3 10536 &kstrtouint_from_user_10536
+ext4_write_begin_10576 ext4_write_begin 3-4 10576 NULL
+scrub_remap_extent_10588 scrub_remap_extent 2 10588 NULL
+otp_read_10594 otp_read 2-4-5 10594 NULL
+supply_map_read_file_10608 supply_map_read_file 3 10608 NULL
+ima_show_htable_violations_10619 ima_show_htable_violations 3 10619 NULL
+nfs_idmap_lookup_id_10660 nfs_idmap_lookup_id 2 10660 NULL
+efx_max_tx_len_10662 efx_max_tx_len 0-2 10662 NULL
+parport_write_10669 parport_write 0 10669 NULL
+edge_write_10692 edge_write 4 10692 NULL
+selinux_inode_setxattr_10708 selinux_inode_setxattr 4 10708 NULL nohasharray
+inl_10708 inl 0 10708 &selinux_inode_setxattr_10708
+shash_async_setkey_10720 shash_async_setkey 3 10720 NULL nohasharray
+pvr2_ioread_read_10720 pvr2_ioread_read 3 10720 &shash_async_setkey_10720
+spi_sync_10731 spi_sync 0 10731 NULL
+apu_get_register_10737 apu_get_register 0 10737 NULL nohasharray
+sctp_getsockopt_maxseg_10737 sctp_getsockopt_maxseg 2 10737 &apu_get_register_10737
+SyS_io_getevents_10756 SyS_io_getevents 3 10756 NULL
+vhost_add_used_n_10760 vhost_add_used_n 3 10760 NULL
+kvm_read_guest_atomic_10765 kvm_read_guest_atomic 4 10765 NULL
+__qp_memcpy_to_queue_10779 __qp_memcpy_to_queue 2-4 10779 NULL
+diva_set_trace_filter_10820 diva_set_trace_filter 0-1 10820 NULL
+lbs_sleepparams_read_10840 lbs_sleepparams_read 3 10840 NULL
+ida_get_new_above_10853 ida_get_new_above 0 10853 NULL
+fuse_conn_max_background_read_10855 fuse_conn_max_background_read 3 10855 NULL
+snd_pcm_oss_write1_10872 snd_pcm_oss_write1 3 10872 NULL
+wiidebug_drm_write_10879 wiidebug_drm_write 3 10879 NULL
+get_scq_10897 get_scq 2 10897 NULL
+cgroup_write_string_10900 cgroup_write_string 5 10900 NULL
+tifm_alloc_adapter_10903 tifm_alloc_adapter 1 10903 NULL
+lprocfs_wr_atomic_10912 lprocfs_wr_atomic 3 10912 NULL
+__copy_from_user_10918 __copy_from_user 0-3 10918 NULL
+kobject_add_10919 kobject_add 0 10919 NULL
+ar9003_dump_modal_eeprom_10959 ar9003_dump_modal_eeprom 3-2-0 10959 NULL
+ci_port_test_write_10962 ci_port_test_write 3 10962 NULL
+bm_entry_read_10976 bm_entry_read 3 10976 NULL
+sched_autogroup_write_10984 sched_autogroup_write 3 10984 NULL
+xfrm_hash_alloc_10997 xfrm_hash_alloc 1 10997 NULL
+rx_filter_accum_arp_pend_requests_read_11003 rx_filter_accum_arp_pend_requests_read
3 11003 NULL
+SetLineNumber_11023 SetLineNumber 0 11023 NULL
+tda10048_writeregbulk_11050 tda10048_writeregbulk 4 11050 NULL
+insert_inline_extent_backref_11063 insert_inline_extent_backref 8 11063 NULL
+tcp_send_mss_11079 tcp_send_mss 0 11079 NULL
+count_argc_11083 count_argc 0 11083 NULL
+ocfs2_blocks_per_xattr_bucket_11099 ocfs2_blocks_per_xattr_bucket 0 11099 NULL
+kvm_write_guest_cached_11106 kvm_write_guest_cached 4 11106 NULL
+tw_change_queue_depth_11116 tw_change_queue_depth 2 11116 NULL
+page_offset_11120 page_offset 0 11120 NULL
+cea_db_payload_len_11124 cea_db_payload_len 0 11124 NULL nohasharray
+tracing_buffers_read_11124 tracing_buffers_read 3 11124 &cea_db_payload_len_11124
+alloc_alien_cache_11127 alloc_alien_cache 2 11127 NULL
+snd_gf1_pcm_playback_silence_11172 snd_gf1_pcm_playback_silence 4-3 11172 NULL
+il_dbgfs_rx_queue_read_11221 il_dbgfs_rx_queue_read 3 11221 NULL
+comedi_alloc_spriv_11234 comedi_alloc_spriv 2 11234 NULL
+hugetlbfs_read_11268 hugetlbfs_read 3 11268 NULL
+ath6kl_power_params_write_11274 ath6kl_power_params_write 3 11274 NULL
+__proc_daemon_file_11305 __proc_daemon_file 5 11305 NULL
+ext4_xattr_check_names_11314 ext4_xattr_check_names 0 11314 NULL
+bcache_dev_sectors_dirty_add_11315 bcache_dev_sectors_dirty_add 3-4 11315 NULL
+sk_filter_size_11316 sk_filter_size 0 11316 NULL nohasharray
+tcp_send_rcvq_11316 tcp_send_rcvq 3 11316 &sk_filter_size_11316
+construct_key_11329 construct_key 3 11329 NULL nohasharray
+__kfifo_out_peek_11329 __kfifo_out_peek 0-3 11329 &construct_key_11329
+next_segment_11330 next_segment 0-2-1 11330 NULL
+persistent_ram_buffer_map_11332 persistent_ram_buffer_map 2-1 11332 NULL
+ext4_get_inline_size_11349 ext4_get_inline_size 0 11349 NULL
+sel_write_create_11353 sel_write_create 3 11353 NULL nohasharray
+nl80211_send_mgmt_11353 nl80211_send_mgmt 7 11353 &sel_write_create_11353
+qib_get_base_info_11369 qib_get_base_info 3 11369 NULL
+nft_value_dump_11381 nft_value_dump 3 11381 NULL
+isku_sysfs_read_keys_capslock_11392 isku_sysfs_read_keys_capslock 6 11392 NULL
+dev_irnet_write_11398 dev_irnet_write 3 11398 NULL
+lprocfs_wr_evict_client_11402 lprocfs_wr_evict_client 3 11402 NULL
+___alloc_bootmem_11410 ___alloc_bootmem 1 11410 NULL
+str_to_user_11411 str_to_user 2 11411 NULL
+mem_fw_gen_free_mem_blks_read_11413 mem_fw_gen_free_mem_blks_read 3 11413 NULL
+ath6kl_wmi_test_rx_11414 ath6kl_wmi_test_rx 3 11414 NULL
+adis16480_show_firmware_revision_11417 adis16480_show_firmware_revision 3 11417 NULL
nohasharray
+import_sec_validate_get_11417 import_sec_validate_get 0 11417 &adis16480_show_firmware_revision_11417
+trace_options_read_11419 trace_options_read 3 11419 NULL
+i40e_dbg_command_write_11421 i40e_dbg_command_write 3 11421 NULL
+xd_read_multiple_pages_11422 xd_read_multiple_pages 5-4 11422 NULL
+bttv_read_11432 bttv_read 3 11432 NULL
+create_zero_mask_11453 create_zero_mask 0-1 11453 NULL
+do_blockdev_direct_IO_11455 do_blockdev_direct_IO 0-6 11455 NULL
+pci_set_power_state_11479 pci_set_power_state 0 11479 NULL nohasharray
+sca3000_read_first_n_hw_rb_11479 sca3000_read_first_n_hw_rb 2 11479 &pci_set_power_state_11479
+xfs_file_buffered_aio_write_11492 xfs_file_buffered_aio_write 4 11492 NULL
+sd_do_mode_sense_11507 sd_do_mode_sense 5 11507 NULL
+kmem_zalloc_11510 kmem_zalloc 1 11510 NULL
+ll_direct_IO_26_seg_11518 ll_direct_IO_26_seg 0 11518 NULL
+twl_direction_in_11527 twl_direction_in 2 11527 NULL
+skb_cow_data_11565 skb_cow_data 0 11565 NULL
+lpfc_idiag_ctlacc_write_11576 lpfc_idiag_ctlacc_write 3 11576 NULL
+oprofilefs_ulong_to_user_11582 oprofilefs_ulong_to_user 3 11582 NULL
+batadv_iv_ogm_orig_add_if_11586 batadv_iv_ogm_orig_add_if 2 11586 NULL
+snd_pcm_action_11589 snd_pcm_action 0 11589 NULL
+fw_device_op_ioctl_11595 fw_device_op_ioctl 2 11595 NULL
+batadv_iv_ogm_orig_del_if_11604 batadv_iv_ogm_orig_del_if 2 11604 NULL
+SYSC_mq_timedsend_11607 SYSC_mq_timedsend 3 11607 NULL
+sisusb_send_bridge_packet_11649 sisusb_send_bridge_packet 2 11649 NULL
+nla_total_size_11658 nla_total_size 1-0 11658 NULL
+slab_ksize_11664 slab_ksize 0 11664 NULL
+ide_queue_pc_tail_11673 ide_queue_pc_tail 5 11673 NULL
+compat_SyS_msgsnd_11675 compat_SyS_msgsnd 3 11675 NULL
+btrfs_alloc_delayed_item_11678 btrfs_alloc_delayed_item 1 11678 NULL
+sctp_setsockopt_hmac_ident_11687 sctp_setsockopt_hmac_ident 3 11687 NULL
+split_11691 split 2 11691 NULL
+snd_ctl_elem_user_tlv_11695 snd_ctl_elem_user_tlv 3 11695 NULL
+blk_rq_cur_bytes_11723 blk_rq_cur_bytes 0 11723 NULL
+dm_bio_prison_create_11749 dm_bio_prison_create 1 11749 NULL
+iwl_dbgfs_qos_read_11753 iwl_dbgfs_qos_read 3 11753 NULL
+ps_pspoll_timeouts_read_11776 ps_pspoll_timeouts_read 3 11776 NULL
+btrfs_key_blockptr_11786 btrfs_key_blockptr 0 11786 NULL
+pcpu_fc_alloc_11818 pcpu_fc_alloc 2 11818 NULL
+umc_device_register_11824 umc_device_register 0 11824 NULL
+zerocopy_sg_from_iovec_11828 zerocopy_sg_from_iovec 3 11828 NULL
+sctp_setsockopt_maxseg_11829 sctp_setsockopt_maxseg 3 11829 NULL
+rts51x_read_status_11830 rts51x_read_status 4 11830 NULL
+unix_stream_connect_11844 unix_stream_connect 3 11844 NULL
+ecryptfs_copy_filename_11868 ecryptfs_copy_filename 4 11868 NULL
+ieee80211_rx_bss_info_11887 ieee80211_rx_bss_info 3 11887 NULL
+mdc_rename_11899 mdc_rename 4-6 11899 NULL
+xstateregs_get_11906 xstateregs_get 4 11906 NULL
+ti_write_11916 ti_write 4 11916 NULL
+fs_devrw_entry_11924 fs_devrw_entry 3 11924 NULL
+bitmap_remap_11929 bitmap_remap 5 11929 NULL
+atomic_sub_return_11939 atomic_sub_return 0-1 11939 NULL
+r1_sync_page_io_11963 r1_sync_page_io 3 11963 NULL
+f1x_swap_interleaved_region_11970 f1x_swap_interleaved_region 0-2 11970 NULL
+read_and_add_raw_conns_11987 read_and_add_raw_conns 0 11987 NULL
+i40e_pci_sriov_configure_12011 i40e_pci_sriov_configure 2 12011 NULL
+ftdi_elan_total_command_size_12045 ftdi_elan_total_command_size 0 12045 NULL
+ieee80211_if_read_user_power_level_12050 ieee80211_if_read_user_power_level 3 12050
NULL
+il4965_ucode_tx_stats_read_12064 il4965_ucode_tx_stats_read 3 12064 NULL
+ptc_proc_write_12076 ptc_proc_write 3 12076 NULL
+batadv_tt_global_size_mod_12085 batadv_tt_global_size_mod 3 12085 NULL
+rtw_malloc2d_12102 rtw_malloc2d 1-2-3 12102 NULL
+alloc_bulk_urbs_generic_12127 alloc_bulk_urbs_generic 5 12127 NULL
+set_powered_12129 set_powered 4 12129 NULL
+ramoops_init_prz_12134 ramoops_init_prz 5 12134 NULL
+xfs_handle_to_dentry_12135 xfs_handle_to_dentry 3 12135 NULL
+rawv6_seticmpfilter_12137 rawv6_seticmpfilter 5 12137 NULL
+rawsock_recvmsg_12144 rawsock_recvmsg 4 12144 NULL
+btmrvl_sdio_host_to_card_12152 btmrvl_sdio_host_to_card 3 12152 NULL
+vmbus_open_12154 vmbus_open 2-3 12154 NULL
+fnic_reset_stats_write_12177 fnic_reset_stats_write 3 12177 NULL
+LNetEQAlloc_12178 LNetEQAlloc 1 12178 NULL
+ddp_make_gl_12179 ddp_make_gl 1 12179 NULL
+compat_do_arpt_set_ctl_12184 compat_do_arpt_set_ctl 4 12184 NULL
+ip_generic_getfrag_12187 ip_generic_getfrag 3-4 12187 NULL
+snd_pcm_kernel_ioctl_12219 snd_pcm_kernel_ioctl 0 12219 NULL
+fuse_get_req_12221 fuse_get_req 2 12221 NULL nohasharray
+aat2870_reg_read_file_12221 aat2870_reg_read_file 3 12221 &fuse_get_req_12221
+__alloc_bootmem_low_nopanic_12235 __alloc_bootmem_low_nopanic 1 12235 NULL
+ib_uverbs_unmarshall_recv_12251 ib_uverbs_unmarshall_recv 5 12251 NULL
+shash_compat_setkey_12267 shash_compat_setkey 3 12267 NULL
+add_sctp_bind_addr_12269 add_sctp_bind_addr 3 12269 NULL
+note_last_dentry_12285 note_last_dentry 3 12285 NULL
+roundup_to_multiple_of_64_12288 roundup_to_multiple_of_64 0-1 12288 NULL nohasharray
+il_dbgfs_nvm_read_12288 il_dbgfs_nvm_read 3 12288 &roundup_to_multiple_of_64_12288
+bt_sock_recvmsg_12316 bt_sock_recvmsg 4 12316 NULL
+pcbit_writecmd_12332 pcbit_writecmd 2 12332 NULL
+mptctl_ioctl_12355 mptctl_ioctl 2 12355 NULL
+__nf_ct_ext_add_length_12364 __nf_ct_ext_add_length 3 12364 NULL
+xfs_iext_inline_to_direct_12384 xfs_iext_inline_to_direct 2 12384 NULL
+btrfs_file_extent_ram_bytes_12391 btrfs_file_extent_ram_bytes 0 12391 NULL nohasharray
+populate_dir_12391 populate_dir 0 12391 &btrfs_file_extent_ram_bytes_12391 nohasharray
+write_file_dump_12391 write_file_dump 3 12391 &populate_dir_12391
+hbucket_elem_add_12416 hbucket_elem_add 3 12416 NULL
+ieee80211_if_read_num_mcast_sta_12419 ieee80211_if_read_num_mcast_sta 3 12419 NULL
+ptlrpc_set_wait_12426 ptlrpc_set_wait 0 12426 NULL
+cfs_array_alloc_12441 cfs_array_alloc 2 12441 NULL
+skb_do_copy_data_nocache_12465 skb_do_copy_data_nocache 5 12465 NULL
+x25_sendmsg_12487 x25_sendmsg 4 12487 NULL
+fnic_trace_ctrl_read_12497 fnic_trace_ctrl_read 3 12497 NULL
+qib_alloc_fast_reg_mr_12526 qib_alloc_fast_reg_mr 2 12526 NULL
+xfs_get_extsz_hint_12531 xfs_get_extsz_hint 0 12531 NULL
+hvc_alloc_12579 hvc_alloc 4 12579 NULL
+pcpu_extend_area_map_12589 pcpu_extend_area_map 2 12589 NULL
+tlbflush_write_file_12598 tlbflush_write_file 3 12598 NULL
+vhci_put_user_12604 vhci_put_user 4 12604 NULL
+sdhci_pltfm_init_12627 sdhci_pltfm_init 3 12627 NULL
+pwr_rcvd_awake_bcns_cnt_read_12632 pwr_rcvd_awake_bcns_cnt_read 3 12632 NULL
+pn_sendmsg_12640 pn_sendmsg 4 12640 NULL
+dwc3_link_state_write_12641 dwc3_link_state_write 3 12641 NULL
+nr_recvmsg_12649 nr_recvmsg 4 12649 NULL
+rtw_android_get_link_speed_12655 rtw_android_get_link_speed 0 12655 NULL
+ocfs2_read_block_12659 ocfs2_read_block 0 12659 NULL
+lustre_pack_request_v2_12665 lustre_pack_request_v2 0 12665 NULL
+sel_read_class_12669 sel_read_class 3 12669 NULL nohasharray
+sparse_mem_maps_populate_node_12669 sparse_mem_maps_populate_node 4 12669 &sel_read_class_12669
+ext4_writepage_trans_blocks_12674 ext4_writepage_trans_blocks 0 12674 NULL
+iwl_dbgfs_calib_disabled_write_12707 iwl_dbgfs_calib_disabled_write 3 12707 NULL
+ieee80211_if_read_num_buffered_multicast_12716 ieee80211_if_read_num_buffered_multicast
3 12716 NULL
+ivtv_write_12721 ivtv_write 3 12721 NULL
+key_rx_spec_read_12736 key_rx_spec_read 3 12736 NULL
+__mei_cl_async_send_12737 __mei_cl_async_send 3 12737 NULL
+ieee80211_if_read_dot11MeshMaxRetries_12756 ieee80211_if_read_dot11MeshMaxRetries 3
12756 NULL
+listxattr_12769 listxattr 3 12769 NULL
+sctp_ssnmap_init_12772 sctp_ssnmap_init 2-3 12772 NULL
+scsi_adjust_queue_depth_12802 scsi_adjust_queue_depth 3 12802 NULL
+xfs_inumbers_fmt_12817 xfs_inumbers_fmt 3 12817 NULL
+readq_12825 readq 0 12825 NULL
+SyS_add_key_12834 SyS_add_key 4 12834 NULL
+TSS_authhmac_12839 TSS_authhmac 3 12839 NULL
+spidev_sync_12842 spidev_sync 0 12842 NULL
+spidev_ioctl_12846 spidev_ioctl 2 12846 NULL
+ath9k_dump_4k_modal_eeprom_12883 ath9k_dump_4k_modal_eeprom 3-2 12883 NULL
+get_leb_cnt_12892 get_leb_cnt 0-2 12892 NULL
+get_virtual_node_size_12908 get_virtual_node_size 0 12908 NULL
+rds_pages_in_vec_12922 rds_pages_in_vec 0 12922 NULL
+do_inode_permission_12946 do_inode_permission 0 12946 NULL
+bm_status_write_12964 bm_status_write 3 12964 NULL
+raid56_parity_recover_12987 raid56_parity_recover 5 12987 NULL
+TransmitTcb_12989 TransmitTcb 4 12989 NULL
+sk_peek_offset_12991 sk_peek_offset 0 12991 NULL
+subsystem_filter_write_13022 subsystem_filter_write 3 13022 NULL
+generic_segment_checks_13041 generic_segment_checks 0 13041 NULL
+ocfs2_write_begin_13045 ocfs2_write_begin 3-4 13045 NULL
+__dn_setsockopt_13060 __dn_setsockopt 5 13060 NULL nohasharray
+ptlrpc_lprocfs_threads_min_seq_write_13060 ptlrpc_lprocfs_threads_min_seq_write 3 13060
&__dn_setsockopt_13060
+biovec_create_pool_13079 biovec_create_pool 2 13079 NULL
+xattr_getsecurity_13090 xattr_getsecurity 0 13090 NULL
+ttm_dma_pool_alloc_new_pages_13105 ttm_dma_pool_alloc_new_pages 3 13105 NULL
+SyS_msgrcv_13109 SyS_msgrcv 3 13109 NULL
+snd_rme96_playback_copy_13111 snd_rme96_playback_copy 5 13111 NULL
+bfad_debugfs_read_13119 bfad_debugfs_read 3 13119 NULL
+blk_update_request_13146 blk_update_request 3 13146 NULL
+ocfs2_quota_trans_credits_13150 ocfs2_quota_trans_credits 0 13150 NULL
+caif_stream_recvmsg_13173 caif_stream_recvmsg 4 13173 NULL
+pwr_disable_ps_read_13176 pwr_disable_ps_read 3 13176 NULL
+ucs2_strlen_13178 ucs2_strlen 0 13178 NULL
+dgrp_net_ioctl_13183 dgrp_net_ioctl 2 13183 NULL
+create_trace_uprobe_13184 create_trace_uprobe 1 13184 NULL
+comedi_read_13199 comedi_read 3 13199 NULL
+hash_ipport4_expire_13201 hash_ipport4_expire 4 13201 NULL
+mmc_ext_csd_read_13205 mmc_ext_csd_read 3 13205 NULL
+svm_msrpm_offset_13220 svm_msrpm_offset 0-1 13220 NULL
+fnic_trace_ctrl_write_13229 fnic_trace_ctrl_write 3 13229 NULL
+asix_read_cmd_13245 asix_read_cmd 5 13245 NULL
+init_tid_tabs_13252 init_tid_tabs 2-3-4 13252 NULL
+bio_integrity_trim_13259 bio_integrity_trim 3 13259 NULL
+simple_attr_write_13260 simple_attr_write 3 13260 NULL
+pmcraid_notify_aen_13274 pmcraid_notify_aen 3 13274 NULL
+il4965_stats_flag_13281 il4965_stats_flag 3-0 13281 NULL
+lpfc_idiag_mbxacc_get_setup_13282 lpfc_idiag_mbxacc_get_setup 0 13282 NULL
+sd_major_13294 sd_major 0-1 13294 NULL
+module_param_sysfs_setup_13296 module_param_sysfs_setup 0 13296 NULL
+__clone_and_map_data_bio_13334 __clone_and_map_data_bio 4-8 13334 NULL
+kvm_read_nested_guest_page_13337 kvm_read_nested_guest_page 5 13337 NULL
+hscx_empty_fifo_13360 hscx_empty_fifo 2 13360 NULL
+iso_sched_alloc_13377 iso_sched_alloc 1 13377 NULL nohasharray
+wep_key_not_found_read_13377 wep_key_not_found_read 3 13377 &iso_sched_alloc_13377
+ext4_meta_trans_blocks_13380 ext4_meta_trans_blocks 0-3-2 13380 NULL
+lov_mds_md_size_13388 lov_mds_md_size 0-1 13388 NULL nohasharray
+dis_bypass_write_13388 dis_bypass_write 3 13388 &lov_mds_md_size_13388
+netxen_alloc_sds_rings_13417 netxen_alloc_sds_rings 2 13417 NULL
+sctp_setsockopt_peer_primary_addr_13440 sctp_setsockopt_peer_primary_addr 3 13440 NULL
+ath6kl_cfg80211_connect_event_13443 ath6kl_cfg80211_connect_event 8-9-7 13443 NULL
+sb_init_dio_done_wq_13482 sb_init_dio_done_wq 0 13482 NULL
+data_read_13494 data_read 3 13494 NULL
+ioat_chansts_32_13506 ioat_chansts_32 0 13506 NULL
+ocfs2_align_bytes_to_blocks_13512 ocfs2_align_bytes_to_blocks 0-2 13512 NULL
+core_status_13515 core_status 4 13515 NULL
+smk_write_mapped_13519 smk_write_mapped 3 13519 NULL
+bm_init_13529 bm_init 2 13529 NULL
+llcp_sock_recvmsg_13556 llcp_sock_recvmsg 4 13556 NULL
+ieee80211_if_read_ap_power_level_13558 ieee80211_if_read_ap_power_level 3 13558 NULL
+hash_net4_expire_13559 hash_net4_expire 4 13559 NULL
+read_file_antenna_13574 read_file_antenna 3 13574 NULL
+cache_write_13589 cache_write 3 13589 NULL
+Rd_Indx_13602 Rd_Indx 3-2 13602 NULL
+wm8994_bulk_write_13615 wm8994_bulk_write 2-3 13615 NULL
+pmcraid_get_minor_13619 pmcraid_get_minor 0 13619 NULL
+packet_snd_13634 packet_snd 3 13634 NULL
+blk_msg_write_13655 blk_msg_write 3 13655 NULL
+cache_downcall_13666 cache_downcall 3 13666 NULL
+ext3_xattr_list_entries_13682 ext3_xattr_list_entries 0 13682 NULL
+nv94_aux_13689 nv94_aux 2-5 13689 NULL
+usb_get_string_13693 usb_get_string 0 13693 NULL
+fw_iso_buffer_alloc_13704 fw_iso_buffer_alloc 2 13704 NULL
+audit_unpack_string_13748 audit_unpack_string 3 13748 NULL
+ieee802154_alloc_device_13767 ieee802154_alloc_device 1 13767 NULL
+fb_sys_read_13778 fb_sys_read 3 13778 NULL
+ath6kl_mgmt_powersave_ap_13791 ath6kl_mgmt_powersave_ap 6 13791 NULL
+random_read_13815 random_read 3 13815 NULL
+mutex_lock_interruptible_nested_13817 mutex_lock_interruptible_nested 0 13817 NULL
+hsi_register_board_info_13820 hsi_register_board_info 2 13820 NULL
+___mei_cl_send_13821 ___mei_cl_send 3 13821 NULL
+enc_pools_insert_13849 enc_pools_insert 3 13849 NULL
+evdev_ioctl_compat_13851 evdev_ioctl_compat 2 13851 NULL
+compat_ip_setsockopt_13870 compat_ip_setsockopt 5 13870 NULL
+qp_memcpy_to_queue_13886 qp_memcpy_to_queue 5-2 13886 NULL
+snd_pcm_aio_read_13900 snd_pcm_aio_read 3 13900 NULL
+cfg80211_inform_bss_width_13933 cfg80211_inform_bss_width 9 13933 NULL
+ext3_xattr_block_get_13936 ext3_xattr_block_get 0 13936 NULL
+ieee80211_if_read_dot11MeshForwarding_13940 ieee80211_if_read_dot11MeshForwarding 3
13940 NULL nohasharray
+ocfs2_xa_value_truncate_13940 ocfs2_xa_value_truncate 2 13940 &ieee80211_if_read_dot11MeshForwarding_13940
+iwl_dbgfs_protection_mode_read_13943 iwl_dbgfs_protection_mode_read 3 13943 NULL
+ieee80211_if_read_min_discovery_timeout_13946 ieee80211_if_read_min_discovery_timeout
3 13946 NULL
+lpfc_idiag_queacc_read_13950 lpfc_idiag_queacc_read 3 13950 NULL
+osc_grant_shrink_interval_seq_write_13952 osc_grant_shrink_interval_seq_write 3 13952
NULL
+ocfs2_refresh_slot_info_13957 ocfs2_refresh_slot_info 0 13957 NULL
+snd_pcm_plug_slave_size_13967 snd_pcm_plug_slave_size 0-2 13967 NULL
+qcam_read_13977 qcam_read 3 13977 NULL
+dsp_read_13980 dsp_read 2 13980 NULL
+dvb_demux_read_13981 dvb_demux_read 3 13981 NULL
+create_files_14003 create_files 0 14003 NULL
+sddr09_write_data_14014 sddr09_write_data 3 14014 NULL
+btrfs_get_blocks_direct_14016 btrfs_get_blocks_direct 2 14016 NULL
+dmi_format_ids_14018 dmi_format_ids 2 14018 NULL
+iscsi_create_flashnode_conn_14022 iscsi_create_flashnode_conn 4 14022 NULL
+dvb_usercopy_14036 dvb_usercopy 2 14036 NULL
+read_def_modal_eeprom_14041 read_def_modal_eeprom 3 14041 NULL
+ieee80211_if_fmt_aid_14055 ieee80211_if_fmt_aid 3 14055 NULL
+ovs_nla_alloc_flow_actions_14056 ovs_nla_alloc_flow_actions 1 14056 NULL
+sta_agg_status_read_14058 sta_agg_status_read 3 14058 NULL
+lov_stripeoffset_seq_write_14078 lov_stripeoffset_seq_write 3 14078 NULL
+do_proc_readlink_14096 do_proc_readlink 3 14096 NULL
+compat_sys_pselect6_14105 compat_sys_pselect6 1 14105 NULL
+ext4_journal_blocks_per_page_14127 ext4_journal_blocks_per_page 0 14127 NULL
+isku_sysfs_read_light_14140 isku_sysfs_read_light 6 14140 NULL
+em_canid_change_14150 em_canid_change 3 14150 NULL
+gsm_dlci_data_14155 gsm_dlci_data 3 14155 NULL
+print_input_mask_14168 print_input_mask 3-0 14168 NULL
+ocfs2_xattr_value_truncate_14183 ocfs2_xattr_value_truncate 3 14183 NULL
+datafab_read_data_14186 datafab_read_data 4 14186 NULL
+hfsplus_brec_find_14200 hfsplus_brec_find 0 14200 NULL
+alloc_async_14208 alloc_async 1 14208 NULL
+ath6kl_regread_write_14220 ath6kl_regread_write 3 14220 NULL
+ieee80211_if_write_uapsd_max_sp_len_14233 ieee80211_if_write_uapsd_max_sp_len 3 14233
NULL
+dma_declare_coherent_memory_14244 dma_declare_coherent_memory 4 14244 NULL
+ath6kl_connect_event_14267 ath6kl_connect_event 7-8-9 14267 NULL
+rr_status_14293 rr_status 5 14293 NULL
+read_default_ldt_14302 read_default_ldt 2 14302 NULL
+oo_objects_14319 oo_objects 0 14319 NULL
+ll_get_user_pages_14328 ll_get_user_pages 3-2-0 14328 NULL
+p9_client_zc_rpc_14345 p9_client_zc_rpc 7 14345 NULL
+alloc_tx_struct_14349 alloc_tx_struct 1 14349 NULL
+hash_ipportnet4_expire_14354 hash_ipportnet4_expire 4 14354 NULL
+snd_pcm_lib_readv_14363 snd_pcm_lib_readv 0-3 14363 NULL
+ath6kl_regdump_read_14393 ath6kl_regdump_read 3 14393 NULL
+smk_write_onlycap_14400 smk_write_onlycap 3 14400 NULL
+mtd_concat_create_14416 mtd_concat_create 2 14416 NULL
+get_kcore_size_14425 get_kcore_size 0 14425 NULL
+block_size_14443 block_size 0 14443 NULL
+lmv_user_md_size_14456 lmv_user_md_size 0-1 14456 NULL
+snd_emu10k1_proc_spdif_status_14457 snd_emu10k1_proc_spdif_status 4-5 14457 NULL
+ath10k_write_htt_stats_mask_14458 ath10k_write_htt_stats_mask 3 14458 NULL
+lustre_msg_size_v2_14470 lustre_msg_size_v2 0 14470 NULL
+dma_transfer_size_14473 dma_transfer_size 0 14473 NULL
+udplite_getfrag_14479 udplite_getfrag 3-4 14479 NULL
+ieee80211_if_read_dot11MeshGateAnnouncementProtocol_14486 ieee80211_if_read_dot11MeshGateAnnouncementProtocol
3 14486 NULL
+ocfs2_debug_read_14507 ocfs2_debug_read 3 14507 NULL
+ep0_write_14536 ep0_write 3 14536 NULL nohasharray
+dataflash_read_user_otp_14536 dataflash_read_user_otp 3-2 14536 &ep0_write_14536
+picolcd_debug_eeprom_read_14549 picolcd_debug_eeprom_read 3 14549 NULL
+qp_host_alloc_queue_14566 qp_host_alloc_queue 1 14566 NULL
+SyS_setdomainname_14569 SyS_setdomainname 2 14569 NULL
+idmap_pipe_downcall_14591 idmap_pipe_downcall 3 14591 NULL
+ceph_osdc_alloc_request_14597 ceph_osdc_alloc_request 3 14597 NULL
+dbJoin_14644 dbJoin 0 14644 NULL
+profile_replace_14652 profile_replace 3 14652 NULL
+min_bytes_needed_14675 min_bytes_needed 0 14675 NULL
+nvme_trans_log_info_exceptions_14677 nvme_trans_log_info_exceptions 3 14677 NULL
+pipeline_enc_tx_stat_fifo_int_read_14680 pipeline_enc_tx_stat_fifo_int_read 3 14680
NULL
+ieee80211_if_fmt_rc_rateidx_mask_2ghz_14683 ieee80211_if_fmt_rc_rateidx_mask_2ghz 3
14683 NULL
+SyS_fsetxattr_14702 SyS_fsetxattr 4 14702 NULL
+persistent_ram_ecc_string_14704 persistent_ram_ecc_string 0 14704 NULL
+u_audio_playback_14709 u_audio_playback 3 14709 NULL
+rtw_cbuf_alloc_14710 rtw_cbuf_alloc 1 14710 NULL
+cgroup_path_14713 cgroup_path 3 14713 NULL
+vfd_write_14717 vfd_write 3 14717 NULL
+__blk_end_request_14729 __blk_end_request 3 14729 NULL
+raid1_resize_14740 raid1_resize 2 14740 NULL
+i915_error_state_buf_init_14742 i915_error_state_buf_init 2 14742 NULL
+btrfs_inode_extref_name_len_14752 btrfs_inode_extref_name_len 0 14752 NULL
+rx_rx_cmplt_read_14753 rx_rx_cmplt_read 3 14753 NULL
+regmap_range_read_file_14775 regmap_range_read_file 3 14775 NULL
+sta_dev_read_14782 sta_dev_read 3 14782 NULL
+keys_proc_write_14792 keys_proc_write 3 14792 NULL
+ext4_kvmalloc_14796 ext4_kvmalloc 1 14796 NULL
+__kfifo_in_14797 __kfifo_in 3-0 14797 NULL
+hpet_readl_14801 hpet_readl 0 14801 NULL nohasharray
+snd_als300_gcr_read_14801 snd_als300_gcr_read 0 14801 &hpet_readl_14801
+do_tune_cpucache_14828 do_tune_cpucache 2 14828 NULL
+mrp_attr_create_14853 mrp_attr_create 3 14853 NULL
+lcd_write_14857 lcd_write 3 14857 NULL
+get_user_cpu_mask_14861 get_user_cpu_mask 2 14861 NULL
+gmux_index_read8_14890 gmux_index_read8 0 14890 NULL
+acpi_os_allocate_14892 acpi_os_allocate 1 14892 NULL
+SYSC_readv_14901 SYSC_readv 3 14901 NULL
+__arch_hweight64_14923 __arch_hweight64 0 14923 NULL nohasharray
+qp_memcpy_to_queue_iov_14923 qp_memcpy_to_queue_iov 5-2 14923 &__arch_hweight64_14923
+ocfs2_expand_nonsparse_inode_14936 ocfs2_expand_nonsparse_inode 3-4 14936 NULL
+queue_cnt_14951 queue_cnt 0 14951 NULL
+unix_dgram_recvmsg_14952 unix_dgram_recvmsg 4 14952 NULL
+videobuf_read_stream_14956 videobuf_read_stream 3 14956 NULL
+mce_flush_rx_buffer_14976 mce_flush_rx_buffer 2 14976 NULL
+setkey_14987 setkey 3 14987 NULL nohasharray
+gpio_twl4030_write_14987 gpio_twl4030_write 1 14987 &setkey_14987
+xfs_dinode_size_14996 xfs_dinode_size 0 14996 NULL
+blk_integrity_tuple_size_15027 blk_integrity_tuple_size 0 15027 NULL
+cld_pipe_downcall_15058 cld_pipe_downcall 3 15058 NULL
+ieee80211_if_read_uapsd_max_sp_len_15067 ieee80211_if_read_uapsd_max_sp_len 3 15067
NULL
+nfs4_write_cached_acl_15070 nfs4_write_cached_acl 4 15070 NULL
+ntfs_copy_from_user_15072 ntfs_copy_from_user 3-5-0 15072 NULL
+pppoe_recvmsg_15073 pppoe_recvmsg 4 15073 NULL
+ceph_calc_ceph_pg_15075 ceph_calc_ceph_pg 0 15075 NULL
+smscore_load_firmware_family2_15086 smscore_load_firmware_family2 3 15086 NULL
+compat_SyS_pwritev_15118 compat_SyS_pwritev 3 15118 NULL
+hex_dump_to_buffer_15121 hex_dump_to_buffer 6 15121 NULL
+start_port_15124 start_port 0 15124 NULL
+ipwireless_ppp_mru_15153 ipwireless_ppp_mru 0 15153 NULL
+iwl_dbgfs_sta_drain_write_15167 iwl_dbgfs_sta_drain_write 3 15167 NULL
+SYSC_setdomainname_15180 SYSC_setdomainname 2 15180 NULL
+iscsi_create_endpoint_15193 iscsi_create_endpoint 1 15193 NULL
+mtt_alloc_res_15211 mtt_alloc_res 5 15211 NULL
+bfad_debugfs_write_regrd_15218 bfad_debugfs_write_regrd 3 15218 NULL
+iwl_dbgfs_sram_write_15239 iwl_dbgfs_sram_write 3 15239 NULL
+il_dbgfs_rx_stats_read_15243 il_dbgfs_rx_stats_read 3 15243 NULL
+simple_strtol_15273 simple_strtol 0 15273 NULL
+fw_realloc_buffer_15280 fw_realloc_buffer 2 15280 NULL
+ocfs2_read_refcount_block_15305 ocfs2_read_refcount_block 0 15305 NULL
+xlog_ticket_alloc_15335 xlog_ticket_alloc 2 15335 NULL
+kovaplus_sysfs_read_15337 kovaplus_sysfs_read 6 15337 NULL
+ioread16_15342 ioread16 0 15342 NULL
+ept_prefetch_gpte_15348 ept_prefetch_gpte 4 15348 NULL
+acpi_ut_create_string_object_15360 acpi_ut_create_string_object 1 15360 NULL
+ext4_direct_IO_15369 ext4_direct_IO 4 15369 NULL
+graph_depth_read_15371 graph_depth_read 3 15371 NULL
+compat_sys_process_vm_readv_15374 compat_sys_process_vm_readv 3-5 15374 NULL
+fq_codel_zalloc_15378 fq_codel_zalloc 1 15378 NULL
+alloc_fddidev_15382 alloc_fddidev 1 15382 NULL
+pipeline_csum_to_rx_xfer_swi_read_15403 pipeline_csum_to_rx_xfer_swi_read 3 15403 NULL
+get_modalias_15406 get_modalias 2 15406 NULL
+blockdev_direct_IO_15408 blockdev_direct_IO 5 15408 NULL
+__videobuf_copy_to_user_15423 __videobuf_copy_to_user 4-0 15423 NULL
+tcp_mtu_to_mss_15438 tcp_mtu_to_mss 2-0 15438 NULL
+hpsa_change_queue_depth_15449 hpsa_change_queue_depth 2 15449 NULL
+memweight_15450 memweight 2 15450 NULL
+zd_chip_is_zd1211b_15518 zd_chip_is_zd1211b 0 15518 NULL
+ifx_spi_write_15531 ifx_spi_write 3 15531 NULL
+p9_check_zc_errors_15534 p9_check_zc_errors 4 15534 NULL
+xfrm_state_mtu_15548 xfrm_state_mtu 0-2 15548 NULL
+persistent_status_15574 persistent_status 4 15574 NULL
+bnx2fc_process_unsol_compl_15576 bnx2fc_process_unsol_compl 2 15576 NULL
+vme_user_write_15587 vme_user_write 3 15587 NULL
+compat_fillonedir_15620 compat_fillonedir 3 15620 NULL
+proc_loginuid_read_15631 proc_loginuid_read 3 15631 NULL
+tomoyo_scan_bprm_15642 tomoyo_scan_bprm 2-4 15642 NULL nohasharray
+sk_memory_allocated_add_15642 sk_memory_allocated_add 2 15642 &tomoyo_scan_bprm_15642
nohasharray
+pipeline_hs_tx_stat_fifo_int_read_15642 pipeline_hs_tx_stat_fifo_int_read 3 15642 &sk_memory_allocated_add_15642
+joydev_handle_JSIOCSBTNMAP_15643 joydev_handle_JSIOCSBTNMAP 3 15643 NULL
+fs_path_add_15648 fs_path_add 3 15648 NULL
+xsd_read_15653 xsd_read 3 15653 NULL
+unix_bind_15668 unix_bind 3 15668 NULL
+dm_read_15674 dm_read 3 15674 NULL nohasharray
+SyS_connect_15674 SyS_connect 3 15674 &dm_read_15674
+tracing_snapshot_write_15719 tracing_snapshot_write 3 15719 NULL
+HiSax_readstatus_15752 HiSax_readstatus 2 15752 NULL
+bio_map_15794 bio_map 3-0 15794 NULL
+smk_read_direct_15803 smk_read_direct 3 15803 NULL
+nameseq_list_15817 nameseq_list 3-0 15817 NULL nohasharray
+gnttab_expand_15817 gnttab_expand 1 15817 &nameseq_list_15817
+afs_proc_rootcell_write_15822 afs_proc_rootcell_write 3 15822 NULL
+brcmf_sdbrcm_died_dump_15841 brcmf_sdbrcm_died_dump 3 15841 NULL
+table_size_15851 table_size 0-1-2 15851 NULL
+write_file_tx99_15856 write_file_tx99 3 15856 NULL
+media_entity_init_15870 media_entity_init 2-4 15870 NULL
+__mptctl_ioctl_15875 __mptctl_ioctl 2 15875 NULL
+nfs_map_group_to_gid_15892 nfs_map_group_to_gid 3 15892 NULL
+native_read_msr_15905 native_read_msr 0 15905 NULL
+parse_audio_stream_data_15937 parse_audio_stream_data 3 15937 NULL
+power_read_15939 power_read 3 15939 NULL
+lpfc_idiag_drbacc_read_15948 lpfc_idiag_drbacc_read 3 15948 NULL
+snd_pcm_lib_read_transfer_15952 snd_pcm_lib_read_transfer 5-2-4 15952 NULL
+viafb_vt1636_proc_write_16018 viafb_vt1636_proc_write 3 16018 NULL
+dccp_recvmsg_16056 dccp_recvmsg 4 16056 NULL
+read_file_spectral_period_16057 read_file_spectral_period 3 16057 NULL
+si5351_msynth_params_address_16062 si5351_msynth_params_address 0-1 16062 NULL
+isr_tx_exch_complete_read_16103 isr_tx_exch_complete_read 3 16103 NULL
+isr_hw_pm_mode_changes_read_16110 isr_hw_pm_mode_changes_read 3 16110 NULL nohasharray
+dma_tx_requested_read_16110 dma_tx_requested_read 3 16110 &isr_hw_pm_mode_changes_read_16110
+snd_dma_pointer_16126 snd_dma_pointer 0-2 16126 NULL
+compat_sys_select_16131 compat_sys_select 1 16131 NULL
+fsm_init_16134 fsm_init 2 16134 NULL
+ext4_xattr_block_get_16148 ext4_xattr_block_get 0 16148 NULL
+optimal_reclaimed_pages_16172 optimal_reclaimed_pages 0 16172 NULL
+mapping_level_16188 mapping_level 2-0 16188 NULL
+i40e_allocate_virt_mem_d_16191 i40e_allocate_virt_mem_d 3 16191 NULL
+ath10k_htt_rx_ring_size_16201 ath10k_htt_rx_ring_size 0 16201 NULL
+cipso_v4_map_cat_rng_hton_16203 cipso_v4_map_cat_rng_hton 0 16203 NULL
+SyS_pselect6_16210 SyS_pselect6 1 16210 NULL
+create_table_16213 create_table 2 16213 NULL
+ath9k_hw_ar9287_dump_eeprom_16224 ath9k_hw_ar9287_dump_eeprom 5-4 16224 NULL
+atomic_read_file_16227 atomic_read_file 3 16227 NULL
+BcmGetSectionValStartOffset_16235 BcmGetSectionValStartOffset 0 16235 NULL
+lov_prep_brw_set_16246 lov_prep_brw_set 3 16246 NULL
+btrfs_dev_extent_chunk_offset_16247 btrfs_dev_extent_chunk_offset 0 16247 NULL nohasharray
+i40e_dbg_dump_read_16247 i40e_dbg_dump_read 3 16247 &btrfs_dev_extent_chunk_offset_16247
+il_dbgfs_disable_ht40_write_16249 il_dbgfs_disable_ht40_write 3 16249 NULL
+SyS_fgetxattr_16254 SyS_fgetxattr 4 16254 NULL
+reiserfs_acl_count_16265 reiserfs_acl_count 0-1 16265 NULL
+ocfs2_xattr_bucket_value_truncate_16279 ocfs2_xattr_bucket_value_truncate 4 16279 NULL
+nand_bch_init_16280 nand_bch_init 3-2 16280 NULL nohasharray
+drbd_setsockopt_16280 drbd_setsockopt 5 16280 &nand_bch_init_16280
+account_16283 account 0-4-2 16283 NULL nohasharray
+mirror_status_16283 mirror_status 5 16283 &account_16283
+jumpshot_read_data_16287 jumpshot_read_data 4 16287 NULL
+mo_xattr_get_16288 mo_xattr_get 0 16288 NULL
+stk_allocate_buffers_16291 stk_allocate_buffers 2 16291 NULL
+rbd_segment_offset_16293 rbd_segment_offset 0-2 16293 NULL
+rsc_mgr_init_16299 rsc_mgr_init 3 16299 NULL
+kvm_handle_hva_range_16312 kvm_handle_hva_range 3-2 16312 NULL
+sysfs_create_groups_16360 sysfs_create_groups 0 16360 NULL
+total_ps_buffered_read_16365 total_ps_buffered_read 3 16365 NULL
+iscsi_tcp_conn_setup_16376 iscsi_tcp_conn_setup 2 16376 NULL
+ieee80211_if_read_tsf_16420 ieee80211_if_read_tsf 3 16420 NULL
+rxrpc_server_keyring_16431 rxrpc_server_keyring 3 16431 NULL
+__bio_add_page_16435 __bio_add_page 0-4 16435 NULL
+cmdline_store_16442 cmdline_store 4 16442 NULL
+btrfs_truncate_inode_items_16452 btrfs_truncate_inode_items 4 16452 NULL
+netlink_change_ngroups_16457 netlink_change_ngroups 2 16457 NULL
+req_capsule_get_size_16467 req_capsule_get_size 0 16467 NULL
+tracing_readme_read_16493 tracing_readme_read 3 16493 NULL
+KEY_OFFSET_16504 KEY_OFFSET 0 16504 NULL
+snd_interval_max_16529 snd_interval_max 0 16529 NULL
+raid10_resize_16537 raid10_resize 2 16537 NULL
+lpfc_debugfs_read_16566 lpfc_debugfs_read 3 16566 NULL
+agp_allocate_memory_wrap_16576 agp_allocate_memory_wrap 1 16576 NULL
+lustre_msg_hdr_size_v2_16589 lustre_msg_hdr_size_v2 0 16589 NULL
+gmux_index_read32_16604 gmux_index_read32 0 16604 NULL
+rtw_set_wpa_ie_16633 rtw_set_wpa_ie 3 16633 NULL
+btrfs_get_token_32_16651 btrfs_get_token_32 0 16651 NULL
+__wa_populate_dto_urb_16699 __wa_populate_dto_urb 3-4 16699 NULL
+__proc_lnet_buffers_16717 __proc_lnet_buffers 5 16717 NULL
+__copy_to_user_swizzled_16748 __copy_to_user_swizzled 3-4 16748 NULL
+arcmsr_adjust_disk_queue_depth_16756 arcmsr_adjust_disk_queue_depth 2 16756 NULL
+blk_rq_map_user_iov_16772 blk_rq_map_user_iov 5 16772 NULL
+i2o_parm_issue_16790 i2o_parm_issue 0 16790 NULL
+get_server_iovec_16804 get_server_iovec 2 16804 NULL
+drm_malloc_ab_16831 drm_malloc_ab 1-2 16831 NULL
+scsi_mode_sense_16835 scsi_mode_sense 5 16835 NULL
+hfsplus_min_io_size_16859 hfsplus_min_io_size 0 16859 NULL
+alloc_idx_lebs_16872 alloc_idx_lebs 2 16872 NULL
+carl9170_debugfs_ampdu_state_read_16873 carl9170_debugfs_ampdu_state_read 3 16873 NULL
+st_write_16874 st_write 3 16874 NULL
+__kfifo_peek_n_16877 __kfifo_peek_n 0 16877 NULL
+transport_init_session_tags_16878 transport_init_session_tags 1-2 16878 NULL
+snd_gf1_mem_proc_dump_16926 snd_gf1_mem_proc_dump 5 16926 NULL nohasharray
+psb_unlocked_ioctl_16926 psb_unlocked_ioctl 2 16926 &snd_gf1_mem_proc_dump_16926
+_sp2d_alloc_16944 _sp2d_alloc 1-2-3 16944 NULL
+squashfs_read_table_16945 squashfs_read_table 3 16945 NULL
+keyctl_instantiate_key_iov_16969 keyctl_instantiate_key_iov 3 16969 NULL
+ocfs2_read_quota_phys_block_16990 ocfs2_read_quota_phys_block 0 16990 NULL
+ceph_read_dir_17005 ceph_read_dir 3 17005 NULL
+copy_counters_to_user_17027 copy_counters_to_user 5 17027 NULL
+jffs2_trusted_setxattr_17048 jffs2_trusted_setxattr 4 17048 NULL
+__arch_hweight32_17060 __arch_hweight32 0 17060 NULL
+sddr55_read_data_17072 sddr55_read_data 4 17072 NULL
+dvb_dvr_read_17073 dvb_dvr_read 3 17073 NULL
+simple_transaction_read_17076 simple_transaction_read 3 17076 NULL
+carl9170_debugfs_mem_usage_read_17084 carl9170_debugfs_mem_usage_read 3 17084 NULL
+entry_length_17093 entry_length 0 17093 NULL
+ocfs2_get_refcount_cpos_end_17113 ocfs2_get_refcount_cpos_end 0 17113 NULL
+write_mem_17114 write_mem 3 17114 NULL
+pvr2_hdw_state_report_17121 pvr2_hdw_state_report 3 17121 NULL
+nouveau_instobj_create__17144 nouveau_instobj_create_ 4 17144 NULL
+jumpshot_write_data_17151 jumpshot_write_data 4 17151 NULL
+sep_read_17161 sep_read 3 17161 NULL
+befs_nls2utf_17163 befs_nls2utf 3 17163 NULL
+tx_tx_start_templates_read_17164 tx_tx_start_templates_read 3 17164 NULL
+UniStrnlen_17169 UniStrnlen 0 17169 NULL
+access_remote_vm_17189 access_remote_vm 0 17189 NULL nohasharray
+iwl_dbgfs_txfifo_flush_write_17189 iwl_dbgfs_txfifo_flush_write 3 17189 &access_remote_vm_17189
nohasharray
+ocfs2_flock_handle_signal_17189 ocfs2_flock_handle_signal 0 17189 &iwl_dbgfs_txfifo_flush_write_17189
+iscsit_find_cmd_from_itt_or_dump_17194 iscsit_find_cmd_from_itt_or_dump 3 17194 NULL
nohasharray
+driver_state_read_17194 driver_state_read 3 17194 &iscsit_find_cmd_from_itt_or_dump_17194
+sync_request_17208 sync_request 2 17208 NULL
+dn_recvmsg_17213 dn_recvmsg 4 17213 NULL
+lprocfs_read_frac_helper_17261 lprocfs_read_frac_helper 0 17261 NULL
+error_error_frame_cts_nul_flid_read_17262 error_error_frame_cts_nul_flid_read 3 17262
NULL
+alloc_ep_17269 alloc_ep 1 17269 NULL
+pg_read_17276 pg_read 3 17276 NULL
+raw_recvmsg_17277 raw_recvmsg 4 17277 NULL
+hmac_sha256_17278 hmac_sha256 2 17278 NULL
+neigh_hash_grow_17283 neigh_hash_grow 2 17283 NULL
+minstrel_stats_read_17290 minstrel_stats_read 3 17290 NULL
+__ptlrpc_request_bufs_pack_17298 __ptlrpc_request_bufs_pack 0 17298 NULL
+ieee80211_if_fmt_dot11MeshForwarding_17301 ieee80211_if_fmt_dot11MeshForwarding 3 17301
NULL
+mb_cache_create_17307 mb_cache_create 2 17307 NULL
+gnttab_map_frames_v2_17314 gnttab_map_frames_v2 2 17314 NULL
+ieee80211_if_read_dot11MeshHWMPperrMinInterval_17346 ieee80211_if_read_dot11MeshHWMPperrMinInterval
3 17346 NULL
+ath6kl_wmi_send_mgmt_cmd_17347 ath6kl_wmi_send_mgmt_cmd 7 17347 NULL
+mdc_import_seq_write_17409 mdc_import_seq_write 3 17409 NULL
+lpfc_debugfs_dif_err_write_17424 lpfc_debugfs_dif_err_write 3 17424 NULL
+compat_sys_ppoll_17430 compat_sys_ppoll 2 17430 NULL
+sta_connected_time_read_17435 sta_connected_time_read 3 17435 NULL
+libcfs_ipif_enumerate_17445 libcfs_ipif_enumerate 0 17445 NULL
+nla_get_u32_17455 nla_get_u32 0 17455 NULL
+__ref_totlen_17461 __ref_totlen 0 17461 NULL
+probe_kernel_write_17481 probe_kernel_write 3 17481 NULL
+TSS_rawhmac_17486 TSS_rawhmac 3 17486 NULL
+lbs_highrssi_write_17515 lbs_highrssi_write 3 17515 NULL
+qp_free_res_17541 qp_free_res 5 17541 NULL
+__copy_to_user_17551 __copy_to_user 3-0 17551 NULL
+copy_from_user_17559 copy_from_user 0-3 17559 NULL
+hash_netport4_expire_17573 hash_netport4_expire 4 17573 NULL
+acpi_ut_create_package_object_17594 acpi_ut_create_package_object 1 17594 NULL
+neigh_hash_alloc_17595 neigh_hash_alloc 1 17595 NULL
+osst_execute_17607 osst_execute 7-6 17607 NULL
+ieee80211_if_read_dot11MeshHWMPactivePathToRootTimeout_17618 ieee80211_if_read_dot11MeshHWMPactivePathToRootTimeout
3 17618 NULL
+dma_map_page_17628 dma_map_page 0 17628 NULL
+ocfs2_rotate_subtree_left_17634 ocfs2_rotate_subtree_left 5 17634 NULL
+twl4030_set_gpio_direction_17645 twl4030_set_gpio_direction 1 17645 NULL
+SYSC_migrate_pages_17657 SYSC_migrate_pages 2 17657 NULL
+packet_setsockopt_17662 packet_setsockopt 5 17662 NULL
+pwr_enable_ps_read_17686 pwr_enable_ps_read 3 17686 NULL
+venus_rename_17707 venus_rename 4-5 17707 NULL nohasharray
+__einj_error_trigger_17707 __einj_error_trigger 0 17707 &venus_rename_17707
+exofs_read_lookup_dev_table_17733 exofs_read_lookup_dev_table 3 17733 NULL
+sctpprobe_read_17741 sctpprobe_read 3 17741 NULL
+dgap_do_fep_load_17765 dgap_do_fep_load 3 17765 NULL
+brcmf_sdio_chip_verifynvram_17776 brcmf_sdio_chip_verifynvram 4 17776 NULL
+shrink_slab_node_17794 shrink_slab_node 3 17794 NULL
+gnet_stats_copy_app_17821 gnet_stats_copy_app 3 17821 NULL
+cipso_v4_gentag_rbm_17836 cipso_v4_gentag_rbm 0 17836 NULL
+dm_stats_message_17863 dm_stats_message 5 17863 NULL
+sisusb_send_bulk_msg_17864 sisusb_send_bulk_msg 3 17864 NULL
+alloc_sja1000dev_17868 alloc_sja1000dev 1 17868 NULL
+virtio_cread32_17873 virtio_cread32 0 17873 NULL
+ray_cs_essid_proc_write_17875 ray_cs_essid_proc_write 3 17875 NULL
+orinoco_set_key_17878 orinoco_set_key 5-7 17878 NULL nohasharray
+i40e_align_l2obj_base_17878 i40e_align_l2obj_base 0-1 17878 &orinoco_set_key_17878
+init_per_cpu_17880 init_per_cpu 1 17880 NULL
+ieee80211_if_fmt_dot11MeshMaxPeerLinks_17883 ieee80211_if_fmt_dot11MeshMaxPeerLinks
3 17883 NULL
+ieee80211_if_fmt_dot11MeshHWMPRootMode_17890 ieee80211_if_fmt_dot11MeshHWMPRootMode
3 17890 NULL
+xfs_buf_associate_memory_17915 xfs_buf_associate_memory 3 17915 NULL
+scsi_bufflen_17933 scsi_bufflen 0 17933 NULL
+__mutex_lock_check_stamp_17947 __mutex_lock_check_stamp 0 17947 NULL
+beacon_interval_write_17952 beacon_interval_write 3 17952 NULL
+calc_nr_buckets_17976 calc_nr_buckets 0 17976 NULL
+ext4_ext_calc_credits_for_single_extent_17983 ext4_ext_calc_credits_for_single_extent
0-2 17983 NULL
+smk_write_cipso_17989 smk_write_cipso 3 17989 NULL
+gnttab_max_grant_frames_17993 gnttab_max_grant_frames 0 17993 NULL
+pvr2_v4l2_read_18006 pvr2_v4l2_read 3 18006 NULL
+alloc_rx_desc_ring_18016 alloc_rx_desc_ring 2 18016 NULL
+cpufreq_add_dev_symlink_18028 cpufreq_add_dev_symlink 0 18028 NULL
+o2hb_highest_node_18034 o2hb_highest_node 0 18034 NULL
+cryptd_alloc_instance_18048 cryptd_alloc_instance 2-3 18048 NULL
+ddebug_proc_write_18055 ddebug_proc_write 3 18055 NULL
+lua_sysfs_read_18062 lua_sysfs_read 6 18062 NULL
+fpregs_get_18066 fpregs_get 4 18066 NULL
+kvm_read_guest_page_18074 kvm_read_guest_page 5 18074 NULL
+SYSC_pselect6_18076 SYSC_pselect6 1 18076 NULL
+SYSC_semtimedop_18091 SYSC_semtimedop 3 18091 NULL
+mpi_alloc_18094 mpi_alloc 1 18094 NULL
+hfs_direct_IO_18104 hfs_direct_IO 4 18104 NULL
+dfs_file_read_18116 dfs_file_read 3 18116 NULL
+svc_getnl_18120 svc_getnl 0 18120 NULL
+paging32_gpte_to_gfn_lvl_18131 paging32_gpte_to_gfn_lvl 0-2-1 18131 NULL
+selinux_inode_setsecurity_18148 selinux_inode_setsecurity 4 18148 NULL
+pccard_store_cis_18176 pccard_store_cis 6 18176 NULL
+orinoco_add_extscan_result_18207 orinoco_add_extscan_result 3 18207 NULL
+gsm_control_message_18209 gsm_control_message 4 18209 NULL
+do_ipv6_setsockopt_18215 do_ipv6_setsockopt 5 18215 NULL
+gnttab_alloc_grant_references_18240 gnttab_alloc_grant_references 1 18240 NULL
+alloc_trace_uprobe_18247 alloc_trace_uprobe 3 18247 NULL
+rfcomm_sock_setsockopt_18254 rfcomm_sock_setsockopt 5 18254 NULL
+qdisc_class_hash_alloc_18262 qdisc_class_hash_alloc 1 18262 NULL
+gfs2_alloc_sort_buffer_18275 gfs2_alloc_sort_buffer 1 18275 NULL
+alloc_ring_18278 alloc_ring 2-4 18278 NULL
+bio_phys_segments_18281 bio_phys_segments 0 18281 NULL nohasharray
+nouveau_subdev_create__18281 nouveau_subdev_create_ 7 18281 &bio_phys_segments_18281
+ext4_readpages_18283 ext4_readpages 4 18283 NULL
+mmc_send_bus_test_18285 mmc_send_bus_test 4 18285 NULL
+um_idi_write_18293 um_idi_write 3 18293 NULL
+nouveau_disp_create__18305 nouveau_disp_create_ 4-7 18305 NULL
+vga_r_18310 vga_r 0 18310 NULL
+class_add_profile_18315 class_add_profile 1-3-5 18315 NULL
+csio_mem_read_18319 csio_mem_read 3 18319 NULL
+alloc_and_copy_string_18321 alloc_and_copy_string 2 18321 NULL
+ecryptfs_send_message_18322 ecryptfs_send_message 2 18322 NULL
+bio_integrity_advance_18324 bio_integrity_advance 2 18324 NULL
+lcd_proc_write_18351 lcd_proc_write 3 18351 NULL
+pwr_power_save_off_read_18355 pwr_power_save_off_read 3 18355 NULL
+SyS_process_vm_readv_18366 SyS_process_vm_readv 3-5 18366 NULL
+ep_io_18367 ep_io 0 18367 NULL
+qib_user_sdma_num_pages_18371 qib_user_sdma_num_pages 0 18371 NULL
+ci_role_write_18388 ci_role_write 3 18388 NULL
+hdlc_empty_fifo_18397 hdlc_empty_fifo 2 18397 NULL
+adis16136_show_serial_18402 adis16136_show_serial 3 18402 NULL
+crystalhd_user_data_18407 crystalhd_user_data 3 18407 NULL
+iscsi_create_flashnode_sess_18433 iscsi_create_flashnode_sess 4 18433 NULL
+snd_hda_get_connections_18437 snd_hda_get_connections 0 18437 NULL
+flash_dev_cache_miss_18454 flash_dev_cache_miss 4 18454 NULL
+fuse_perform_write_18457 fuse_perform_write 4 18457 NULL
+regset_tls_set_18459 regset_tls_set 4 18459 NULL
+pci_vpd_lrdt_size_18479 pci_vpd_lrdt_size 0 18479 NULL nohasharray
+mite_bytes_in_transit_18479 mite_bytes_in_transit 0 18479 &pci_vpd_lrdt_size_18479
+udpv6_setsockopt_18487 udpv6_setsockopt 5 18487 NULL
+btrfs_fiemap_18501 btrfs_fiemap 3 18501 NULL
+__copy_user_zeroing_intel_18510 __copy_user_zeroing_intel 0-3 18510 NULL
+snd_vx_inb_18514 snd_vx_inb 0 18514 NULL
+snd_gus_dram_poke_18525 snd_gus_dram_poke 4 18525 NULL
+nouveau_fifo_channel_create__18530 nouveau_fifo_channel_create_ 9 18530 NULL
+seq_copy_in_user_18543 seq_copy_in_user 3 18543 NULL
+sas_change_queue_depth_18555 sas_change_queue_depth 2 18555 NULL
+smk_write_rules_list_18565 smk_write_rules_list 3 18565 NULL
+debug_output_18575 debug_output 3 18575 NULL
+filemap_fdatawait_range_18600 filemap_fdatawait_range 0 18600 NULL nohasharray
+slabinfo_write_18600 slabinfo_write 3 18600 &filemap_fdatawait_range_18600
+iowarrior_write_18604 iowarrior_write 3 18604 NULL
+nvc0_ram_create__18624 nvc0_ram_create_ 4 18624 NULL nohasharray
+audio_get_endpoint_req_18624 audio_get_endpoint_req 0 18624 &nvc0_ram_create__18624
+from_buffer_18625 from_buffer 3 18625 NULL
+snd_pcm_oss_write3_18657 snd_pcm_oss_write3 0-3 18657 NULL
+ieee80211_if_fmt_rssi_threshold_18664 ieee80211_if_fmt_rssi_threshold 3 18664 NULL
+xfs_iext_insert_18667 xfs_iext_insert 3 18667 NULL
+fnic_stats_debugfs_read_18688 fnic_stats_debugfs_read 3 18688 NULL
+echo_client_prep_commit_18693 echo_client_prep_commit 8 18693 NULL
+iwl_dbgfs_rx_handlers_read_18708 iwl_dbgfs_rx_handlers_read 3 18708 NULL
+ceph_alloc_page_vector_18710 ceph_alloc_page_vector 1 18710 NULL
+blk_rq_bytes_18715 blk_rq_bytes 0 18715 NULL
+snd_als4k_gcr_read_addr_18741 snd_als4k_gcr_read_addr 0 18741 NULL
+o2hb_debug_create_18744 o2hb_debug_create 4 18744 NULL
+__erst_read_to_erange_from_nvram_18748 __erst_read_to_erange_from_nvram 0 18748 NULL
+wep_packets_read_18751 wep_packets_read 3 18751 NULL
+read_file_dump_nfcal_18766 read_file_dump_nfcal 3 18766 NULL
+ffs_epfile_read_18775 ffs_epfile_read 3 18775 NULL
+SyS_lsetxattr_18776 SyS_lsetxattr 4 18776 NULL
+alloc_fcdev_18780 alloc_fcdev 1 18780 NULL
+prealloc_18800 prealloc 0 18800 NULL
+dm_stats_print_18815 dm_stats_print 7 18815 NULL
+sys_modify_ldt_18824 sys_modify_ldt 3 18824 NULL
+mtf_test_write_18844 mtf_test_write 3 18844 NULL
+sctp_setsockopt_events_18862 sctp_setsockopt_events 3 18862 NULL
+ieee80211_if_read_element_ttl_18869 ieee80211_if_read_element_ttl 3 18869 NULL
+xlog_find_verify_log_record_18870 xlog_find_verify_log_record 2 18870 NULL
+ceph_setxattr_18913 ceph_setxattr 4 18913 NULL
+ieee80211_rx_mgmt_disassoc_18927 ieee80211_rx_mgmt_disassoc 3 18927 NULL
+snapshot_write_next_18937 snapshot_write_next 0 18937 NULL
+__nla_reserve_18974 __nla_reserve 3 18974 NULL
+__blockdev_direct_IO_18977 __blockdev_direct_IO 0-6 18977 NULL
+layout_in_gaps_19006 layout_in_gaps 2 19006 NULL
+huge_page_size_19008 huge_page_size 0 19008 NULL
+hash_netport6_expire_19013 hash_netport6_expire 4 19013 NULL
+sysfs_create_dir_ns_19033 sysfs_create_dir_ns 0 19033 NULL
+revalidate_19043 revalidate 2 19043 NULL
+afs_vnode_store_data_19048 afs_vnode_store_data 2-3-4-5 19048 NULL
+osc_pinger_recov_seq_write_19056 osc_pinger_recov_seq_write 3 19056 NULL
+create_gpadl_header_19064 create_gpadl_header 2 19064 NULL
+ieee80211_key_alloc_19065 ieee80211_key_alloc 3 19065 NULL
+ceph_create_snap_context_19082 ceph_create_snap_context 1 19082 NULL
+sta_last_seq_ctrl_read_19106 sta_last_seq_ctrl_read 3 19106 NULL
+cifs_readv_from_socket_19109 cifs_readv_from_socket 3 19109 NULL
+ATOMIC_SUB_RETURN_19115 ATOMIC_SUB_RETURN 2 19115 NULL
+snd_als4k_iobase_readl_19136 snd_als4k_iobase_readl 0 19136 NULL
+alloc_irdadev_19140 alloc_irdadev 1 19140 NULL
+sleep_auth_read_19159 sleep_auth_read 3 19159 NULL
+smk_write_access2_19170 smk_write_access2 3 19170 NULL
+iwl_dbgfs_reply_tx_error_read_19205 iwl_dbgfs_reply_tx_error_read 3 19205 NULL
+vmw_unlocked_ioctl_19212 vmw_unlocked_ioctl 2 19212 NULL
+__copy_to_user_inatomic_19214 __copy_to_user_inatomic 3-0 19214 NULL
+dev_counters_read_19216 dev_counters_read 3 19216 NULL
+wbcir_tx_19219 wbcir_tx 3 19219 NULL
+snd_mask_max_19224 snd_mask_max 0 19224 NULL
+bio_alloc_mddev_19238 bio_alloc_mddev 2 19238 NULL
+ucma_query_19260 ucma_query 4 19260 NULL
+il_dbgfs_rxon_filter_flags_read_19281 il_dbgfs_rxon_filter_flags_read 3 19281 NULL
+batadv_tt_save_orig_buffer_19288 batadv_tt_save_orig_buffer 4 19288 NULL nohasharray
+cfg80211_rx_unprot_mlme_mgmt_19288 cfg80211_rx_unprot_mlme_mgmt 3 19288 &batadv_tt_save_orig_buffer_19288
+qc_capture_19298 qc_capture 3 19298 NULL
+ocfs2_prepare_inode_for_refcount_19303 ocfs2_prepare_inode_for_refcount 4-3 19303 NULL
+event_tx_stuck_read_19305 event_tx_stuck_read 3 19305 NULL
+debug_read_19322 debug_read 3 19322 NULL
+lbs_host_sleep_write_19332 lbs_host_sleep_write 3 19332 NULL nohasharray
+cfg80211_inform_bss_19332 cfg80211_inform_bss 8 19332 &lbs_host_sleep_write_19332
+closure_sub_19359 closure_sub 2 19359 NULL
+firmware_data_write_19360 firmware_data_write 6-5 19360 NULL
+read_zero_19366 read_zero 3 19366 NULL
+interpret_user_input_19393 interpret_user_input 2 19393 NULL
+sync_fill_pt_info_19397 sync_fill_pt_info 0 19397 NULL
+pep_recvmsg_19402 pep_recvmsg 4 19402 NULL
+dvbdmx_write_19423 dvbdmx_write 3 19423 NULL
+SyS_sched_getaffinity_19444 SyS_sched_getaffinity 2 19444 NULL
+xfrm_alg_auth_len_19454 xfrm_alg_auth_len 0 19454 NULL
+gnet_stats_copy_19458 gnet_stats_copy 4 19458 NULL
+gp2ap020a00f_get_thresh_reg_19468 gp2ap020a00f_get_thresh_reg 0 19468 NULL
+sky2_read16_19475 sky2_read16 0 19475 NULL
+__read_status_pciv2_19492 __read_status_pciv2 0 19492 NULL
+kstrtoll_from_user_19500 kstrtoll_from_user 2 19500 NULL
+ext4_add_new_descs_19509 ext4_add_new_descs 3 19509 NULL
+batadv_tvlv_container_register_19520 batadv_tvlv_container_register 5 19520 NULL
+apei_exec_pre_map_gars_19529 apei_exec_pre_map_gars 0 19529 NULL nohasharray
+cfc_write_array_to_buffer_19529 cfc_write_array_to_buffer 3 19529 &apei_exec_pre_map_gars_19529
+nfc_llcp_build_tlv_19536 nfc_llcp_build_tlv 3 19536 NULL
+howmany_64_19548 howmany_64 2 19548 NULL
+gfn_to_index_19558 gfn_to_index 0-1-3-2 19558 NULL
+ocfs2_control_message_19564 ocfs2_control_message 3 19564 NULL
+ieee80211_if_read_tkip_mic_test_19565 ieee80211_if_read_tkip_mic_test 3 19565 NULL
+nfsd_read_19568 nfsd_read 5 19568 NULL
+cgroup_read_s64_19570 cgroup_read_s64 5 19570 NULL
+bm_status_read_19583 bm_status_read 3 19583 NULL
+load_xattr_datum_19594 load_xattr_datum 0 19594 NULL
+__mei_cl_recv_19636 __mei_cl_recv 3 19636 NULL
+LoadBitmap_19658 LoadBitmap 2 19658 NULL
+iwl_dbgfs_pm_params_write_19660 iwl_dbgfs_pm_params_write 3 19660 NULL
+read_reg_19723 read_reg 0 19723 NULL
+wm8350_block_write_19727 wm8350_block_write 2-3 19727 NULL
+memcpy_toiovecend_19736 memcpy_toiovecend 4-3 19736 NULL
+snd_es1968_get_dma_ptr_19747 snd_es1968_get_dma_ptr 0 19747 NULL
+p9_client_read_19750 p9_client_read 5-0 19750 NULL
+pnpbios_proc_write_19758 pnpbios_proc_write 3 19758 NULL
+ocfs2_readpages_19759 ocfs2_readpages 4 19759 NULL
+jffs2_acl_from_medium_19762 jffs2_acl_from_medium 2 19762 NULL
+readhscx_19769 readhscx 0 19769 NULL
+__set_print_fmt_19776 __set_print_fmt 0 19776 NULL
+iwl_dbgfs_disable_power_off_write_19823 iwl_dbgfs_disable_power_off_write 3 19823 NULL
+irda_setsockopt_19824 irda_setsockopt 5 19824 NULL
+vfs_getxattr_19832 vfs_getxattr 0 19832 NULL
+security_context_to_sid_19839 security_context_to_sid 2 19839 NULL
+crypt_alloc_buffer_19846 crypt_alloc_buffer 2 19846 NULL
+cfg80211_mlme_register_mgmt_19852 cfg80211_mlme_register_mgmt 5 19852 NULL
+__nla_put_19857 __nla_put 3 19857 NULL
+mrp_request_join_19882 mrp_request_join 4 19882 NULL
+aes_decrypt_interrupt_read_19910 aes_decrypt_interrupt_read 3 19910 NULL
+ps_upsd_max_apturn_read_19918 ps_upsd_max_apturn_read 3 19918 NULL
+mangle_name_19923 mangle_name 0 19923 NULL
+cgroup_task_count_19930 cgroup_task_count 0 19930 NULL
+guest_read_tsc_19931 guest_read_tsc 0 19931 NULL
+iwl_dbgfs_rx_queue_read_19943 iwl_dbgfs_rx_queue_read 3 19943 NULL
+cfg80211_rx_assoc_resp_19944 cfg80211_rx_assoc_resp 4 19944 NULL
+get_jack_mode_name_19976 get_jack_mode_name 4 19976 NULL
+attach_hdlc_protocol_19986 attach_hdlc_protocol 3 19986 NULL
+rtw_set_wps_probe_resp_19989 rtw_set_wps_probe_resp 3 19989 NULL
+lustre_pack_request_19992 lustre_pack_request 0 19992 NULL
+diva_um_idi_read_20003 diva_um_idi_read 0 20003 NULL
+lov_stripe_md_size_20009 lov_stripe_md_size 0-1 20009 NULL
+tree_mod_log_eb_move_20011 tree_mod_log_eb_move 5 20011 NULL
+SYSC_fgetxattr_20027 SYSC_fgetxattr 4 20027 NULL
+split_scan_timeout_read_20029 split_scan_timeout_read 3 20029 NULL
+alloc_ieee80211_20063 alloc_ieee80211 1 20063 NULL
+iwl_mvm_power_mac_dbgfs_read_20067 iwl_mvm_power_mac_dbgfs_read 4 20067 NULL
+target_message_20072 target_message 2 20072 NULL
+rawv6_sendmsg_20080 rawv6_sendmsg 4 20080 NULL
+fuse_conn_limit_read_20084 fuse_conn_limit_read 3 20084 NULL
+aat2870_reg_write_file_20086 aat2870_reg_write_file 3 20086 NULL
+team_options_register_20091 team_options_register 3 20091 NULL
+qla2x00_adjust_sdev_qdepth_up_20097 qla2x00_adjust_sdev_qdepth_up 2 20097 NULL
+root_nfs_copy_20111 root_nfs_copy 3 20111 NULL
+hptiop_adjust_disk_queue_depth_20122 hptiop_adjust_disk_queue_depth 2 20122 NULL
+tomoyo_commit_ok_20167 tomoyo_commit_ok 2 20167 NULL
+read_flush_pipefs_20171 read_flush_pipefs 3 20171 NULL
+wep_addr_key_count_read_20174 wep_addr_key_count_read 3 20174 NULL
+create_trace_probe_20175 create_trace_probe 1 20175 NULL
+crystalhd_map_dio_20181 crystalhd_map_dio 3 20181 NULL
+pvr2_ctrl_value_to_sym_20229 pvr2_ctrl_value_to_sym 5 20229 NULL
+rose_sendmsg_20249 rose_sendmsg 4 20249 NULL
+tm6000_i2c_send_regs_20250 tm6000_i2c_send_regs 5 20250 NULL
+btrfs_header_nritems_20296 btrfs_header_nritems 0 20296 NULL
+r10_sync_page_io_20307 r10_sync_page_io 3 20307 NULL
+dm_get_reserved_bio_based_ios_20315 dm_get_reserved_bio_based_ios 0 20315 NULL
+tx_tx_burst_programmed_read_20320 tx_tx_burst_programmed_read 3 20320 NULL
+vx_send_msg_nolock_20322 vx_send_msg_nolock 0 20322 NULL
+snd_cs4281_BA1_read_20323 snd_cs4281_BA1_read 5 20323 NULL
+gfs2_glock_nq_m_20347 gfs2_glock_nq_m 1 20347 NULL
+handle_arr_calc_size_20355 handle_arr_calc_size 0-1 20355 NULL
+smk_set_cipso_20379 smk_set_cipso 3 20379 NULL
+snd_nm256_readl_20394 snd_nm256_readl 0 20394 NULL nohasharray
+read_7220_creg32_20394 read_7220_creg32 0 20394 &snd_nm256_readl_20394
+__kfifo_from_user_20399 __kfifo_from_user 3 20399 NULL nohasharray
+SyS_get_mempolicy_20399 SyS_get_mempolicy 3 20399 &__kfifo_from_user_20399
+nfs3_setxattr_20458 nfs3_setxattr 4 20458 NULL
+compat_ipv6_setsockopt_20468 compat_ipv6_setsockopt 5 20468 NULL
+read_buf_20469 read_buf 2 20469 NULL
+bio_trim_20472 bio_trim 2 20472 NULL
+btrfs_get_32_20476 btrfs_get_32 0 20476 NULL
+xfs_iext_realloc_direct_20521 xfs_iext_realloc_direct 2 20521 NULL
+drbd_bm_resize_20522 drbd_bm_resize 2 20522 NULL
+amd_create_gatt_pages_20537 amd_create_gatt_pages 1 20537 NULL
+scsi_report_opcode_20551 scsi_report_opcode 3 20551 NULL
+venus_create_20555 venus_create 4 20555 NULL
+btrfs_super_log_root_20565 btrfs_super_log_root 0 20565 NULL
+crypto_ahash_reqsize_20569 crypto_ahash_reqsize 0 20569 NULL
+ocfs2_cluster_lock_20588 ocfs2_cluster_lock 0 20588 NULL
+kvm_test_age_hva_20593 kvm_test_age_hva 2 20593 NULL
+sync_timeline_create_20601 sync_timeline_create 2 20601 NULL
+lirc_write_20604 lirc_write 3 20604 NULL
+qib_qsfp_write_20614 qib_qsfp_write 0-2-4 20614 NULL
+snd_pcm_oss_prepare_20641 snd_pcm_oss_prepare 0 20641 NULL
+get_extent_skip_holes_20642 get_extent_skip_holes 2 20642 NULL
+kfifo_copy_to_user_20646 kfifo_copy_to_user 3-4 20646 NULL
+cpulist_scnprintf_20648 cpulist_scnprintf 2-0 20648 NULL
+oz_add_farewell_20652 oz_add_farewell 5 20652 NULL
+oz_cdev_read_20659 oz_cdev_read 3 20659 NULL
+snd_hdsp_playback_copy_20676 snd_hdsp_playback_copy 5 20676 NULL
+dvb_dmxdev_buffer_read_20682 dvb_dmxdev_buffer_read 0-4 20682 NULL
+cpumask_size_20683 cpumask_size 0 20683 NULL
+btrfs_node_blockptr_20685 btrfs_node_blockptr 0 20685 NULL
+read_file_tgt_int_stats_20697 read_file_tgt_int_stats 3 20697 NULL
+__maestro_read_20700 __maestro_read 0 20700 NULL
+cipso_v4_gentag_rng_20703 cipso_v4_gentag_rng 0 20703 NULL
+pcpu_page_first_chunk_20712 pcpu_page_first_chunk 1 20712 NULL
+ocfs2_read_xattr_bucket_20722 ocfs2_read_xattr_bucket 0 20722 NULL
+security_context_to_sid_force_20724 security_context_to_sid_force 2 20724 NULL
+fb_prepare_logo_20743 fb_prepare_logo 0 20743 NULL
+vol_cdev_direct_write_20751 vol_cdev_direct_write 3 20751 NULL
+ocfs2_align_bytes_to_clusters_20754 ocfs2_align_bytes_to_clusters 2 20754 NULL
+brcmf_p2p_escan_20763 brcmf_p2p_escan 2 20763 NULL
+fb_alloc_cmap_gfp_20792 fb_alloc_cmap_gfp 2 20792 NULL
+iwl_dbgfs_rxon_flags_read_20795 iwl_dbgfs_rxon_flags_read 3 20795 NULL
+strndup_user_20819 strndup_user 2 20819 NULL
+tipc_msg_build_20825 tipc_msg_build 3 20825 NULL
+wl1271_format_buffer_20834 wl1271_format_buffer 2 20834 NULL
+uvc_alloc_entity_20836 uvc_alloc_entity 3-4 20836 NULL
+p9_tag_alloc_20845 p9_tag_alloc 3 20845 NULL
+nvme_trans_supported_vpd_pages_20847 nvme_trans_supported_vpd_pages 4 20847 NULL
+get_name_20855 get_name 4 20855 NULL
+iwl_dbgfs_pm_params_read_20866 iwl_dbgfs_pm_params_read 3 20866 NULL
+snd_pcm_capture_avail_20867 snd_pcm_capture_avail 0 20867 NULL
+srq_free_res_20868 srq_free_res 5 20868 NULL
+cfs_cpt_table_create_20884 cfs_cpt_table_create 1 20884 NULL
+rb_simple_write_20890 rb_simple_write 3 20890 NULL
+sisusb_send_packet_20891 sisusb_send_packet 2 20891 NULL
+key_icverrors_read_20895 key_icverrors_read 3 20895 NULL
+vfio_msi_enable_20906 vfio_msi_enable 2 20906 NULL
+lbs_rdbbp_write_20918 lbs_rdbbp_write 3 20918 NULL
+htable_bits_20933 htable_bits 0 20933 NULL
+altera_set_ir_post_20948 altera_set_ir_post 2 20948 NULL
+rx_rx_phy_hdr_read_20950 rx_rx_phy_hdr_read 3 20950 NULL
+rsxx_cram_read_20957 rsxx_cram_read 3 20957 NULL
+nfs_map_name_to_uid_20962 nfs_map_name_to_uid 3 20962 NULL
+snd_rme9652_playback_copy_20970 snd_rme9652_playback_copy 5 20970 NULL
+alg_setsockopt_20985 alg_setsockopt 5 20985 NULL
+qib_verbs_send_20999 qib_verbs_send 5-3 20999 NULL
+btrfs_inode_ref_name_len_21024 btrfs_inode_ref_name_len 0 21024 NULL
+rx_defrag_tkip_called_read_21031 rx_defrag_tkip_called_read 3 21031 NULL
+srp_change_queue_depth_21038 srp_change_queue_depth 2 21038 NULL
+lbs_threshold_read_21046 lbs_threshold_read 5 21046 NULL
+reiserfs_direct_IO_21051 reiserfs_direct_IO 4 21051 NULL
+proc_fault_inject_write_21058 proc_fault_inject_write 3 21058 NULL
+qdisc_get_default_21072 qdisc_get_default 2 21072 NULL
+event_calibration_read_21083 event_calibration_read 3 21083 NULL
+bl_add_page_to_bio_21094 bl_add_page_to_bio 2 21094 NULL nohasharray
+multipath_status_21094 multipath_status 5 21094 &bl_add_page_to_bio_21094
+rate_control_pid_events_read_21099 rate_control_pid_events_read 3 21099 NULL
+ocfs2_extend_meta_needed_21104 ocfs2_extend_meta_needed 0 21104 NULL
+ath6kl_send_go_probe_resp_21113 ath6kl_send_go_probe_resp 3 21113 NULL
+i2400m_rx_trace_21127 i2400m_rx_trace 3 21127 NULL
+cx18_v4l2_read_21196 cx18_v4l2_read 3 21196 NULL
+ipc_rcu_alloc_21208 ipc_rcu_alloc 1 21208 NULL
+scsi_execute_req_flags_21215 scsi_execute_req_flags 5 21215 NULL
+get_numpages_21227 get_numpages 0-1-2 21227 NULL
+input_ff_create_21240 input_ff_create 2 21240 NULL
+cfg80211_notify_new_peer_candidate_21242 cfg80211_notify_new_peer_candidate 4 21242
NULL
+use_debug_keys_read_21251 use_debug_keys_read 3 21251 NULL
+fru_length_21257 fru_length 0 21257 NULL
+rtw_set_wps_beacon_21262 rtw_set_wps_beacon 3 21262 NULL
+ocfs2_blocks_for_bytes_21268 ocfs2_blocks_for_bytes 0-2 21268 NULL
+do_msg_fill_21307 do_msg_fill 3 21307 NULL
+add_res_range_21310 add_res_range 4 21310 NULL
+get_zeroed_page_21322 get_zeroed_page 0 21322 NULL
+ftrace_profile_read_21327 ftrace_profile_read 3 21327 NULL
+read_file_bool_bmps_21344 read_file_bool_bmps 3 21344 NULL
+ocfs2_find_subtree_root_21351 ocfs2_find_subtree_root 0 21351 NULL
+gfs2_ea_get_copy_21353 gfs2_ea_get_copy 0 21353 NULL
+alloc_orinocodev_21371 alloc_orinocodev 1 21371 NULL
+SYSC_rt_sigpending_21379 SYSC_rt_sigpending 2 21379 NULL
+video_ioctl2_21380 video_ioctl2 2 21380 NULL
+insert_ptr_21386 insert_ptr 6 21386 NULL
+diva_get_driver_dbg_mask_21399 diva_get_driver_dbg_mask 0 21399 NULL
+snd_m3_inw_21406 snd_m3_inw 0 21406 NULL
+snapshot_read_next_21426 snapshot_read_next 0 21426 NULL
+tcp_bound_to_half_wnd_21429 tcp_bound_to_half_wnd 0-2 21429 NULL
+tracing_saved_cmdlines_read_21434 tracing_saved_cmdlines_read 3 21434 NULL
+aggr_size_tx_agg_vs_rate_read_21438 aggr_size_tx_agg_vs_rate_read 3 21438 NULL
+__ertm_hdr_size_21450 __ertm_hdr_size 0 21450 NULL
+ReadISAR_21453 ReadISAR 0 21453 NULL
+mei_nfc_send_21477 mei_nfc_send 3 21477 NULL
+read_file_xmit_21487 read_file_xmit 3 21487 NULL
+mmc_alloc_sg_21504 mmc_alloc_sg 1 21504 NULL
+btrfs_file_aio_write_21520 btrfs_file_aio_write 4 21520 NULL
+il_dbgfs_stations_read_21532 il_dbgfs_stations_read 3 21532 NULL
+cipso_v4_map_cat_enum_hton_21540 cipso_v4_map_cat_enum_hton 0 21540 NULL
+rxrpc_send_data_21553 rxrpc_send_data 5 21553 NULL
+rx_rx_beacon_early_term_read_21559 rx_rx_beacon_early_term_read 3 21559 NULL
+xfs_buf_read_uncached_21585 xfs_buf_read_uncached 3 21585 NULL
+snd_es18xx_mixer_read_21586 snd_es18xx_mixer_read 0 21586 NULL
+ocfs2_acl_from_xattr_21604 ocfs2_acl_from_xattr 2 21604 NULL
+filemap_get_page_21606 filemap_get_page 2 21606 NULL
+ocfs2_refcount_cow_hunk_21630 ocfs2_refcount_cow_hunk 3-4 21630 NULL
+__jfs_getxattr_21631 __jfs_getxattr 0 21631 NULL
+atalk_sendmsg_21677 atalk_sendmsg 4 21677 NULL
+ocfs2_xattr_get_nolock_21678 ocfs2_xattr_get_nolock 0 21678 NULL
+rtllib_alloc_txb_21687 rtllib_alloc_txb 1 21687 NULL
+evdev_ioctl_handler_21705 evdev_ioctl_handler 2 21705 NULL
+unix_skb_len_21722 unix_skb_len 0 21722 NULL
+lprocfs_wr_import_21728 lprocfs_wr_import 3 21728 NULL
+mthca_alloc_init_21754 mthca_alloc_init 2 21754 NULL
+usbat_flash_read_data_21762 usbat_flash_read_data 4 21762 NULL
+gen_pool_add_21776 gen_pool_add 3 21776 NULL
+xfs_da_grow_inode_int_21785 xfs_da_grow_inode_int 3 21785 NULL
+dvb_generic_ioctl_21810 dvb_generic_ioctl 2 21810 NULL
+__ocfs2_cluster_lock_21812 __ocfs2_cluster_lock 0 21812 NULL
+oom_adj_read_21847 oom_adj_read 3 21847 NULL
+lpfc_idiag_extacc_avail_get_21865 lpfc_idiag_extacc_avail_get 0-3 21865 NULL
+brcms_debugfs_hardware_read_21867 brcms_debugfs_hardware_read 3 21867 NULL
+sisusbcon_bmove_21873 sisusbcon_bmove 6-5-7 21873 NULL
+ldlm_lock_create_21888 ldlm_lock_create 7 21888 NULL
+dbAllocCtl_21911 dbAllocCtl 0 21911 NULL
+qsfp_1_read_21915 qsfp_1_read 3 21915 NULL
+SYSC_prctl_21980 SYSC_prctl 4 21980 NULL
+compat_rw_copy_check_uvector_22001 compat_rw_copy_check_uvector 0-3 22001 NULL nohasharray
+rxpipe_descr_host_int_trig_rx_data_read_22001 rxpipe_descr_host_int_trig_rx_data_read
3 22001 &compat_rw_copy_check_uvector_22001
+regcache_sync_block_raw_flush_22021 regcache_sync_block_raw_flush 3-4 22021 NULL
+btrfs_get_16_22023 btrfs_get_16 0 22023 NULL
+_sp2d_min_pg_22032 _sp2d_min_pg 0 22032 NULL
+zd_usb_read_fw_22049 zd_usb_read_fw 4 22049 NULL
+ieee80211_if_fmt_dropped_frames_ttl_22054 ieee80211_if_fmt_dropped_frames_ttl 3 22054
NULL
+btrfs_reloc_clone_csums_22077 btrfs_reloc_clone_csums 2-3 22077 NULL
+mem_rw_22085 mem_rw 3 22085 NULL
+kstrtos32_from_user_22087 kstrtos32_from_user 2 22087 NULL
+rt2x00debug_read_crypto_stats_22109 rt2x00debug_read_crypto_stats 3 22109 NULL
+snd_hda_codec_read_22130 snd_hda_codec_read 0 22130 NULL
+SyS_sched_setaffinity_22148 SyS_sched_setaffinity 2 22148 NULL
+do_tcp_sendpages_22155 do_tcp_sendpages 4 22155 NULL
+__kfifo_alloc_22173 __kfifo_alloc 3 22173 NULL
+rfcomm_sock_recvmsg_22227 rfcomm_sock_recvmsg 4 22227 NULL
+mem_write_22232 mem_write 3 22232 NULL
+p9_virtio_zc_request_22240 p9_virtio_zc_request 6-5 22240 NULL
+prepare_to_wait_event_22247 prepare_to_wait_event 0 22247 NULL
+compat_process_vm_rw_22254 compat_process_vm_rw 3-5 22254 NULL
+ping_common_sendmsg_22261 ping_common_sendmsg 5 22261 NULL
+add_res_tree_22263 add_res_tree 7 22263 NULL
+__btrfs_direct_write_22273 __btrfs_direct_write 4 22273 NULL
+queue_max_sectors_22280 queue_max_sectors 0 22280 NULL
+__tun_chr_ioctl_22300 __tun_chr_ioctl 4 22300 NULL nohasharray
+pci_vpd_srdt_size_22300 pci_vpd_srdt_size 0 22300 &__tun_chr_ioctl_22300
+mesh_table_alloc_22305 mesh_table_alloc 1 22305 NULL
+lov_setstripe_22307 lov_setstripe 2 22307 NULL
+udpv6_sendmsg_22316 udpv6_sendmsg 4 22316 NULL
+C_SYSC_msgrcv_22320 C_SYSC_msgrcv 3 22320 NULL
+atomic_read_22342 atomic_read 0 22342 NULL
+ll_lazystatfs_seq_write_22353 ll_lazystatfs_seq_write 3 22353 NULL
+memcg_size_22360 memcg_size 0 22360 NULL
+snd_pcm_alsa_frames_22363 snd_pcm_alsa_frames 2 22363 NULL
+evdev_ioctl_22371 evdev_ioctl 2 22371 NULL
+alloc_large_system_hash_22391 alloc_large_system_hash 2 22391 NULL
+zoran_write_22404 zoran_write 3 22404 NULL
+ATOMIC_ADD_RETURN_22413 ATOMIC_ADD_RETURN 2 22413 NULL
+queue_reply_22416 queue_reply 3 22416 NULL
+__set_enter_print_fmt_22431 __set_enter_print_fmt 0 22431 NULL
+queue_max_segments_22441 queue_max_segments 0 22441 NULL
+handle_received_packet_22457 handle_received_packet 3 22457 NULL
+mem_cgroup_read_22461 mem_cgroup_read 5 22461 NULL
+source_sink_start_ep_22472 source_sink_start_ep 0 22472 NULL
+ecryptfs_write_22488 ecryptfs_write 4-3 22488 NULL
+qib_user_sdma_alloc_header_22490 qib_user_sdma_alloc_header 2 22490 NULL
+cache_write_procfs_22491 cache_write_procfs 3 22491 NULL
+mutex_lock_interruptible_22505 mutex_lock_interruptible 0 22505 NULL
+trim_no_bitmap_22524 trim_no_bitmap 4-3 22524 NULL
+ocfs2_read_extent_block_22550 ocfs2_read_extent_block 0 22550 NULL
+agp_alloc_page_array_22554 agp_alloc_page_array 1 22554 NULL
+dbFindCtl_22587 dbFindCtl 0 22587 NULL
+snapshot_read_22601 snapshot_read 3 22601 NULL
+sctp_setsockopt_connectx_old_22631 sctp_setsockopt_connectx_old 3 22631 NULL
+ide_core_cp_entry_22636 ide_core_cp_entry 3 22636 NULL
+wl1271_rx_filter_get_fields_size_22638 wl1271_rx_filter_get_fields_size 0 22638 NULL
+pwr_wake_on_timer_exp_read_22640 pwr_wake_on_timer_exp_read 3 22640 NULL
+iwl_dbgfs_calib_disabled_read_22649 iwl_dbgfs_calib_disabled_read 3 22649 NULL
+compat_SyS_msgrcv_22661 compat_SyS_msgrcv 3 22661 NULL
+ext4_ext_direct_IO_22679 ext4_ext_direct_IO 4 22679 NULL
+l2tp_ip_recvmsg_22681 l2tp_ip_recvmsg 4 22681 NULL
+bch_dump_read_22685 bch_dump_read 3 22685 NULL
+reg_umr_22686 reg_umr 5 22686 NULL
+alloc_libipw_22708 alloc_libipw 1 22708 NULL
+cx18_copy_buf_to_user_22735 cx18_copy_buf_to_user 4-0 22735 NULL
+ceph_decode_32_22738 ceph_decode_32 0 22738 NULL nohasharray
+__mei_cl_send_22738 __mei_cl_send 3 22738 &ceph_decode_32_22738
+iio_debugfs_write_reg_22742 iio_debugfs_write_reg 3 22742 NULL
+qlcnic_sriov_init_22762 qlcnic_sriov_init 2 22762 NULL
+print_frame_22769 print_frame 0 22769 NULL
+ftrace_arch_read_dyn_info_22773 ftrace_arch_read_dyn_info 0 22773 NULL
+pla_ocp_write_22802 pla_ocp_write 4 22802 NULL
+__generic_copy_to_user_intel_22806 __generic_copy_to_user_intel 0-3 22806 NULL
+clone_bio_integrity_22842 clone_bio_integrity 4 22842 NULL
+read_file_rcstat_22854 read_file_rcstat 3 22854 NULL
+create_attr_set_22861 create_attr_set 1 22861 NULL
+hash_ip6_expire_22867 hash_ip6_expire 4 22867 NULL
+vmw_execbuf_process_22885 vmw_execbuf_process 5 22885 NULL
+usblp_new_writeurb_22894 usblp_new_writeurb 2 22894 NULL
+mdc800_device_read_22896 mdc800_device_read 3 22896 NULL
+policy_emit_config_values_22900 policy_emit_config_values 3 22900 NULL
+xstateregs_set_22932 xstateregs_set 4 22932 NULL
+pcpu_mem_zalloc_22948 pcpu_mem_zalloc 1 22948 NULL
+alloc_sglist_22960 alloc_sglist 2-3 22960 NULL
+caif_seqpkt_sendmsg_22961 caif_seqpkt_sendmsg 4 22961 NULL
+vme_get_size_22964 vme_get_size 0 22964 NULL
+tx_frag_key_not_found_read_22971 tx_frag_key_not_found_read 3 22971 NULL
+usb_get_langid_22983 usb_get_langid 0 22983 NULL
+remote_settings_file_write_22987 remote_settings_file_write 3 22987 NULL
+brcmf_sdio_chip_exit_download_23001 brcmf_sdio_chip_exit_download 4 23001 NULL
+viafb_dvp0_proc_write_23023 viafb_dvp0_proc_write 3 23023 NULL
+cifs_local_to_utf16_bytes_23025 cifs_local_to_utf16_bytes 0 23025 NULL
+ocfs2_refcount_cow_xattr_23029 ocfs2_refcount_cow_xattr 0-6-7 23029 NULL
+st_status_23032 st_status 5 23032 NULL
+nv50_disp_chan_create__23056 nv50_disp_chan_create_ 5 23056 NULL
+comedi_buf_write_n_available_23057 comedi_buf_write_n_available 0 23057 NULL
+reiserfs_add_entry_23062 reiserfs_add_entry 4 23062 NULL nohasharray
+unix_seqpacket_recvmsg_23062 unix_seqpacket_recvmsg 4 23062 &reiserfs_add_entry_23062
+mei_cl_send_23068 mei_cl_send 3 23068 NULL
+kvm_mmu_gva_to_gpa_write_23075 kvm_mmu_gva_to_gpa_write 0 23075 NULL
+raw_sendmsg_23078 raw_sendmsg 4 23078 NULL
+get_user_hdr_len_23079 get_user_hdr_len 0 23079 NULL
+isr_tx_procs_read_23084 isr_tx_procs_read 3 23084 NULL
+rt2x00debug_write_eeprom_23091 rt2x00debug_write_eeprom 3 23091 NULL
+ntfs_ucstonls_23097 ntfs_ucstonls 3-5 23097 NULL
+pipe_iov_copy_from_user_23102 pipe_iov_copy_from_user 3 23102 NULL
+dgram_recvmsg_23104 dgram_recvmsg 4 23104 NULL
+mwl8k_cmd_set_beacon_23110 mwl8k_cmd_set_beacon 4 23110 NULL
+nl80211_send_rx_auth_23111 nl80211_send_rx_auth 4 23111 NULL
+__clear_user_23118 __clear_user 0-2 23118 NULL
+drm_mode_create_tv_properties_23122 drm_mode_create_tv_properties 2 23122 NULL
+ata_scsi_change_queue_depth_23126 ata_scsi_change_queue_depth 2 23126 NULL
+read_file_ani_23161 read_file_ani 3 23161 NULL
+usblp_write_23178 usblp_write 3 23178 NULL
+gss_pipe_downcall_23182 gss_pipe_downcall 3 23182 NULL
+mpi_alloc_limb_space_23190 mpi_alloc_limb_space 1 23190 NULL
+tty_buffer_request_room_23228 tty_buffer_request_room 2-0 23228 NULL
+xlog_get_bp_23229 xlog_get_bp 2 23229 NULL nohasharray
+__read_status_pci_23229 __read_status_pci 0 23229 &xlog_get_bp_23229
+ft1000_read_dpram_mag_32_23232 ft1000_read_dpram_mag_32 0 23232 NULL
+rxrpc_client_sendmsg_23236 rxrpc_client_sendmsg 5 23236 NULL
+__gfn_to_rmap_23240 __gfn_to_rmap 2-1 23240 NULL
+nv50_ram_create__23241 nv50_ram_create_ 4 23241 NULL
+sctp_recvmsg_23265 sctp_recvmsg 4 23265 NULL
+uwb_dev_addr_print_23282 uwb_dev_addr_print 2 23282 NULL
+diva_get_trace_filter_23286 diva_get_trace_filter 0 23286 NULL
+i2cdev_write_23310 i2cdev_write 3 23310 NULL
+__aa_kvmalloc_23320 __aa_kvmalloc 1 23320 NULL
+page_readlink_23346 page_readlink 3 23346 NULL
+kmem_zalloc_large_23351 kmem_zalloc_large 1 23351 NULL
+get_dst_timing_23358 get_dst_timing 0 23358 NULL
+fd_setup_write_same_buf_23369 fd_setup_write_same_buf 3 23369 NULL
+iscsi_change_queue_depth_23416 iscsi_change_queue_depth 2 23416 NULL
+vga_mm_r_23419 vga_mm_r 0 23419 NULL
+ocfs2_zero_tail_23447 ocfs2_zero_tail 3 23447 NULL
+hidraw_send_report_23449 hidraw_send_report 3 23449 NULL
+__ata_change_queue_depth_23484 __ata_change_queue_depth 3 23484 NULL
+linear_conf_23485 linear_conf 2 23485 NULL
+event_filter_read_23494 event_filter_read 3 23494 NULL
+lustre_acl_xattr_merge2ext_23502 lustre_acl_xattr_merge2ext 2 23502 NULL
+devm_iio_device_alloc_23511 devm_iio_device_alloc 2 23511 NULL
+__proc_cpt_table_23516 __proc_cpt_table 5 23516 NULL
+ima_show_measurements_count_23536 ima_show_measurements_count 3 23536 NULL
+tcp_current_mss_23552 tcp_current_mss 0 23552 NULL
+btrfs_super_bytenr_23561 btrfs_super_bytenr 0 23561 NULL
+venus_symlink_23570 venus_symlink 6-4 23570 NULL
+iwl_dbgfs_interrupt_read_23574 iwl_dbgfs_interrupt_read 3 23574 NULL
+xfpregs_get_23586 xfpregs_get 4 23586 NULL
+snd_interval_min_23590 snd_interval_min 0 23590 NULL
+islpci_mgt_transaction_23610 islpci_mgt_transaction 5 23610 NULL
+ocfs2_journal_access_23616 ocfs2_journal_access 0 23616 NULL
+__i2400mu_send_barker_23652 __i2400mu_send_barker 3 23652 NULL
+sInW_23663 sInW 0 23663 NULL
+SyS_connect_23669 SyS_connect 3 23669 NULL
+cx18_read_23699 cx18_read 3 23699 NULL
+at_get_23708 at_get 0 23708 NULL
+rx_rx_dropped_frame_read_23748 rx_rx_dropped_frame_read 3 23748 NULL
+__kfifo_max_r_23768 __kfifo_max_r 0-2-1 23768 NULL
+__build_packet_message_23778 __build_packet_message 4-10 23778 NULL
+security_inode_getxattr_23781 security_inode_getxattr 0 23781 NULL
+cfg80211_inform_bss_width_frame_23782 cfg80211_inform_bss_width_frame 5 23782 NULL
+mpt_free_res_23793 mpt_free_res 5 23793 NULL
+map_write_23795 map_write 3 23795 NULL
+rx_path_reset_read_23801 rx_path_reset_read 3 23801 NULL
+ocfs2_replace_cow_23803 ocfs2_replace_cow 0 23803 NULL
+__earlyonly_bootmem_alloc_23824 __earlyonly_bootmem_alloc 2 23824 NULL
+lustre_msg_buflen_23827 lustre_msg_buflen 0 23827 NULL
+ceph_copy_page_vector_to_user_23829 ceph_copy_page_vector_to_user 3-4 23829 NULL
+pgdat_end_pfn_23842 pgdat_end_pfn 0 23842 NULL
+iwl_dbgfs_nvm_read_23845 iwl_dbgfs_nvm_read 3 23845 NULL
+p54_init_common_23850 p54_init_common 1 23850 NULL
+bin_to_hex_dup_23853 bin_to_hex_dup 2 23853 NULL
+ocfs2_xattr_get_clusters_23857 ocfs2_xattr_get_clusters 0 23857 NULL
+ieee80211_if_read_dot11MeshMaxPeerLinks_23878 ieee80211_if_read_dot11MeshMaxPeerLinks
3 23878 NULL
+nouveau_clock_create__23881 nouveau_clock_create_ 5 23881 NULL
+tipc_snprintf_23893 tipc_snprintf 2-0 23893 NULL
+usbg_prepare_w_request_23895 usbg_prepare_w_request 0 23895 NULL
+add_new_gdb_meta_bg_23911 add_new_gdb_meta_bg 3 23911 NULL nohasharray
+ieee80211_if_read_hw_queues_23911 ieee80211_if_read_hw_queues 3 23911 &add_new_gdb_meta_bg_23911
+f2fs_getxattr_23917 f2fs_getxattr 0 23917 NULL
+mpihelp_mul_karatsuba_case_23918 mpihelp_mul_karatsuba_case 5-3 23918 NULL nohasharray
+ipath_reg_phys_mr_23918 ipath_reg_phys_mr 3 23918 &mpihelp_mul_karatsuba_case_23918
+kvm_read_guest_23928 kvm_read_guest 4-2 23928 NULL
+uvc_endpoint_max_bpi_23944 uvc_endpoint_max_bpi 0 23944 NULL
+cifs_setxattr_23957 cifs_setxattr 4 23957 NULL
+size_roundup_power2_23958 size_roundup_power2 0-1 23958 NULL
+sddr55_write_data_23983 sddr55_write_data 4 23983 NULL
+zd_usb_iowrite16v_async_23984 zd_usb_iowrite16v_async 3 23984 NULL
+cxgb_alloc_mem_24007 cxgb_alloc_mem 1 24007 NULL
+give_pages_24021 give_pages 3 24021 NULL
+adis16400_show_serial_number_24037 adis16400_show_serial_number 3 24037 NULL
+hmac_setkey_24043 hmac_setkey 3 24043 NULL
+afs_cell_alloc_24052 afs_cell_alloc 2 24052 NULL
+blkcipher_copy_iv_24075 blkcipher_copy_iv 3 24075 NULL
+vb2_fop_read_24080 vb2_fop_read 3 24080 NULL
+pipeline_post_proc_swi_read_24108 pipeline_post_proc_swi_read 3 24108 NULL
+request_key_auth_read_24109 request_key_auth_read 3 24109 NULL
+lov_brw_24122 lov_brw 4 24122 NULL
+mpu401_read_24126 mpu401_read 3-0 24126 NULL
+_picolcd_flash_write_24134 _picolcd_flash_write 4 24134 NULL
+irnet_ctrl_write_24139 irnet_ctrl_write 3 24139 NULL
+SyS_sethostname_24150 SyS_sethostname 2 24150 NULL
+trim_bitmaps_24158 trim_bitmaps 3 24158 NULL
+adu_read_24177 adu_read 3 24177 NULL
+safe_prepare_write_buffer_24187 safe_prepare_write_buffer 3 24187 NULL
+irq_remapping_setup_msi_irqs_24194 irq_remapping_setup_msi_irqs 2 24194 NULL
+ieee80211_if_read_dot11MeshHWMPpreqMinInterval_24208 ieee80211_if_read_dot11MeshHWMPpreqMinInterval
3 24208 NULL
+tcpprobe_sprint_24222 tcpprobe_sprint 0-2 24222 NULL
+pcpu_embed_first_chunk_24224 pcpu_embed_first_chunk 3-2-1 24224 NULL nohasharray
+mei_amthif_read_24224 mei_amthif_read 4 24224 &pcpu_embed_first_chunk_24224
+pci_num_vf_24235 pci_num_vf 0 24235 NULL
+sel_read_bool_24236 sel_read_bool 3 24236 NULL
+em28xx_alloc_urbs_24260 em28xx_alloc_urbs 4-6 24260 NULL
+calculate_sizes_24273 calculate_sizes 2 24273 NULL
+thin_status_24278 thin_status 5 24278 NULL
+msg_size_24288 msg_size 0 24288 NULL
+gserial_connect_24302 gserial_connect 0 24302 NULL
+btmrvl_pscmd_read_24308 btmrvl_pscmd_read 3 24308 NULL
+ath6kl_add_bss_if_needed_24317 ath6kl_add_bss_if_needed 6 24317 NULL
+si476x_radio_read_acf_blob_24336 si476x_radio_read_acf_blob 3 24336 NULL
+C_SYSC_pwritev_24345 C_SYSC_pwritev 3 24345 NULL
+kzalloc_node_24352 kzalloc_node 1 24352 NULL
+qla2x00_handle_queue_full_24365 qla2x00_handle_queue_full 2 24365 NULL
+cfi_read_pri_24366 cfi_read_pri 3 24366 NULL
+btrfs_item_size_nr_24367 btrfs_item_size_nr 0 24367 NULL
+igetword_24373 igetword 0 24373 NULL
+max_io_len_24384 max_io_len 0-1 24384 NULL
+mpt_alloc_res_24387 mpt_alloc_res 5 24387 NULL
+osc_cur_grant_bytes_seq_write_24396 osc_cur_grant_bytes_seq_write 3 24396 NULL
+getxattr_24398 getxattr 4 24398 NULL nohasharray
+pvr2_v4l2_ioctl_24398 pvr2_v4l2_ioctl 2 24398 &getxattr_24398
+blk_update_bidi_request_24415 blk_update_bidi_request 3-4 24415 NULL
+nvme_trans_log_supp_pages_24418 nvme_trans_log_supp_pages 3 24418 NULL
+b43_debugfs_read_24425 b43_debugfs_read 3 24425 NULL
+xenbus_file_read_24427 xenbus_file_read 3 24427 NULL
+ieee80211_rx_mgmt_beacon_24430 ieee80211_rx_mgmt_beacon 3 24430 NULL
+copy_and_ioctl_24434 copy_and_ioctl 4 24434 NULL
+ixgbe_alloc_q_vector_24439 ixgbe_alloc_q_vector 4-6 24439 NULL
+smk_user_access_24440 smk_user_access 3 24440 NULL nohasharray
+rtw_set_wps_assoc_resp_24440 rtw_set_wps_assoc_resp 3 24440 &smk_user_access_24440
+evdev_do_ioctl_24459 evdev_do_ioctl 2 24459 NULL
+lbs_highsnr_write_24460 lbs_highsnr_write 3 24460 NULL
+skb_copy_and_csum_datagram_iovec_24466 skb_copy_and_csum_datagram_iovec 2 24466 NULL
+dut_mode_read_24489 dut_mode_read 3 24489 NULL
+read_file_spec_scan_ctl_24491 read_file_spec_scan_ctl 3 24491 NULL
+pd_video_read_24510 pd_video_read 3 24510 NULL
+request_key_with_auxdata_24515 request_key_with_auxdata 4 24515 NULL
+xfs_buf_get_map_24522 xfs_buf_get_map 3 24522 NULL
+do_mpage_readpage_24536 do_mpage_readpage 3 24536 NULL
+write_cache_pages_24562 write_cache_pages 0 24562 NULL
+SyS_pselect6_24582 SyS_pselect6 1 24582 NULL
+udf_compute_nr_groups_24594 udf_compute_nr_groups 0 24594 NULL
+sensor_hub_get_physical_device_count_24605 sensor_hub_get_physical_device_count 0 24605
NULL nohasharray
+lov_alloc_memmd_24605 lov_alloc_memmd 2 24605 &sensor_hub_get_physical_device_count_24605
+SyS_poll_24620 SyS_poll 2 24620 NULL
+context_alloc_24645 context_alloc 3 24645 NULL
+blk_rq_err_bytes_24650 blk_rq_err_bytes 0 24650 NULL
+datafab_write_data_24696 datafab_write_data 4 24696 NULL
+intelfbhw_get_p1p2_24703 intelfbhw_get_p1p2 2 24703 NULL
+simple_attr_read_24738 simple_attr_read 3 24738 NULL
+qla2x00_change_queue_depth_24742 qla2x00_change_queue_depth 2 24742 NULL
+get_dma_residue_24749 get_dma_residue 0 24749 NULL
+ocfs2_cow_file_pos_24751 ocfs2_cow_file_pos 3 24751 NULL
+kgdb_hex2mem_24755 kgdb_hex2mem 3 24755 NULL
+ocfs2_read_blocks_24777 ocfs2_read_blocks 0 24777 NULL
+datablob_hmac_verify_24786 datablob_hmac_verify 4 24786 NULL
+cache_read_24790 cache_read 3 24790 NULL
+user_regset_copyout_24796 user_regset_copyout 7 24796 NULL
+kvm_read_guest_virt_helper_24804 kvm_read_guest_virt_helper 3-1 24804 NULL
+ath6kl_fwlog_mask_write_24810 ath6kl_fwlog_mask_write 3 24810 NULL
+net2272_read_24825 net2272_read 0 24825 NULL
+snd_als4k_gcr_read_24840 snd_als4k_gcr_read 0 24840 NULL
+snd_pcm_lib_buffer_bytes_24865 snd_pcm_lib_buffer_bytes 0 24865 NULL
+pnp_alloc_24869 pnp_alloc 1 24869 NULL nohasharray
+l2cap_create_basic_pdu_24869 l2cap_create_basic_pdu 3 24869 &pnp_alloc_24869
+queues_read_24877 queues_read 3 24877 NULL
+__vxge_hw_vp_initialize_24885 __vxge_hw_vp_initialize 2 24885 NULL
+codec_list_read_file_24910 codec_list_read_file 3 24910 NULL
+v4l2_ctrl_new_24927 v4l2_ctrl_new 7 24927 NULL nohasharray
+__btrfs_free_extent_24927 __btrfs_free_extent 7 24927 &v4l2_ctrl_new_24927
+ocfs2_fiemap_24949 ocfs2_fiemap 4-3 24949 NULL
+packet_sendmsg_24954 packet_sendmsg 4 24954 NULL
+ll_layout_fetch_24961 ll_layout_fetch 0 24961 NULL
+twl_i2c_write_u8_24976 twl_i2c_write_u8 3 24976 NULL
+llc_ui_sendmsg_24987 llc_ui_sendmsg 4 24987 NULL
+key_conf_hw_key_idx_read_25003 key_conf_hw_key_idx_read 3 25003 NULL
+il_dbgfs_channels_read_25005 il_dbgfs_channels_read 3 25005 NULL
+ni_660x_num_counters_25031 ni_660x_num_counters 0 25031 NULL
+nfs_dns_resolve_name_25036 nfs_dns_resolve_name 3 25036 NULL
+load_unaligned_zeropad_25050 load_unaligned_zeropad 0 25050 NULL
+btrfs_stack_key_blockptr_25058 btrfs_stack_key_blockptr 0 25058 NULL
+gs_buf_alloc_25067 gs_buf_alloc 2 25067 NULL
+ll_track_pid_seq_write_25068 ll_track_pid_seq_write 3 25068 NULL
+SYSC_listxattr_25072 SYSC_listxattr 3 25072 NULL
+iwl_dbgfs_tx_flush_write_25091 iwl_dbgfs_tx_flush_write 3 25091 NULL
+ima_appraise_measurement_25093 ima_appraise_measurement 6 25093 NULL
+blkg_path_25099 blkg_path 3 25099 NULL
+snd_rawmidi_kernel_write_25106 snd_rawmidi_kernel_write 3 25106 NULL
+ipath_init_qp_table_25167 ipath_init_qp_table 2 25167 NULL
+kvm_mmu_notifier_change_pte_25169 kvm_mmu_notifier_change_pte 3 25169 NULL
+sctp_getsockopt_local_addrs_25178 sctp_getsockopt_local_addrs 2 25178 NULL
+mon_stat_read_25238 mon_stat_read 3 25238 NULL
+stripe_status_25259 stripe_status 5 25259 NULL
+snd_pcm_start_25273 snd_pcm_start 0 25273 NULL
+crypto_alloc_instance2_25277 crypto_alloc_instance2 3 25277 NULL
+vfs_writev_25278 vfs_writev 3 25278 NULL
+l2tp_session_create_25286 l2tp_session_create 1 25286 NULL
+ath9k_debugfs_read_buf_25316 ath9k_debugfs_read_buf 3 25316 NULL
+rng_buffer_size_25348 rng_buffer_size 0 25348 NULL
+SYSC_kexec_load_25361 SYSC_kexec_load 2 25361 NULL
+unix_mkname_25368 unix_mkname 0-2 25368 NULL
+sel_read_mls_25369 sel_read_mls 3 25369 NULL
+vsp1_entity_init_25407 vsp1_entity_init 3 25407 NULL
+dai_list_read_file_25421 dai_list_read_file 3 25421 NULL
+generic_file_buffered_write_25464 generic_file_buffered_write 4 25464 NULL
+ipath_decode_err_25468 ipath_decode_err 3 25468 NULL
+crypto_hash_digestsize_25469 crypto_hash_digestsize 0 25469 NULL
+ivtv_buf_copy_from_user_25502 ivtv_buf_copy_from_user 4-0 25502 NULL
+snd_pcm_plugin_build_25505 snd_pcm_plugin_build 5 25505 NULL
+sb_permission_25523 sb_permission 0 25523 NULL
+ext3_get_inode_loc_25542 ext3_get_inode_loc 0 25542 NULL
+ieee80211_if_read_path_refresh_time_25545 ieee80211_if_read_path_refresh_time 3 25545
NULL
+wimax_addr_scnprint_25548 wimax_addr_scnprint 2 25548 NULL
+ht_print_chan_25556 ht_print_chan 3-4-0 25556 NULL
+skb_tailroom_25567 skb_tailroom 0 25567 NULL
+ping_recvmsg_25597 ping_recvmsg 4 25597 NULL
+copy_user_generic_25611 copy_user_generic 0 25611 NULL
+proc_coredump_filter_write_25625 proc_coredump_filter_write 3 25625 NULL
+befs_utf2nls_25628 befs_utf2nls 3 25628 NULL nohasharray
+__get_user_pages_25628 __get_user_pages 0 25628 &befs_utf2nls_25628
+__direct_map_25647 __direct_map 6-5 25647 NULL
+aircable_prepare_write_buffer_25669 aircable_prepare_write_buffer 3 25669 NULL
+lpfc_idiag_cmd_get_25672 lpfc_idiag_cmd_get 2 25672 NULL
+sta_inactive_ms_read_25690 sta_inactive_ms_read 3 25690 NULL
+rx_filter_mc_filter_read_25712 rx_filter_mc_filter_read 3 25712 NULL
+ibmasm_new_command_25714 ibmasm_new_command 2 25714 NULL
+__alloc_bootmem_low_node_25726 __alloc_bootmem_low_node 2 25726 NULL nohasharray
+sel_write_context_25726 sel_write_context 3 25726 &__alloc_bootmem_low_node_25726
+cxgbi_device_portmap_create_25747 cxgbi_device_portmap_create 3 25747 NULL
+event_rx_pool_read_25792 event_rx_pool_read 3 25792 NULL
+sg_read_25799 sg_read 3 25799 NULL
+system_enable_read_25815 system_enable_read 3 25815 NULL
+realloc_buffer_25816 realloc_buffer 2 25816 NULL
+pwr_missing_bcns_read_25824 pwr_missing_bcns_read 3 25824 NULL
+parport_read_25855 parport_read 0 25855 NULL
+xfs_dir2_sf_hdr_size_25858 xfs_dir2_sf_hdr_size 0 25858 NULL
+key_attr_size_25865 key_attr_size 0 25865 NULL
+ath6kl_regread_read_25884 ath6kl_regread_read 3 25884 NULL
+run_delalloc_nocow_25896 run_delalloc_nocow 3-4 25896 NULL
+sisusbcon_scroll_area_25899 sisusbcon_scroll_area 4-3 25899 NULL
+lpfc_change_queue_depth_25905 lpfc_change_queue_depth 2 25905 NULL
+nvme_trans_mode_page_create_25908 nvme_trans_mode_page_create 7-4 25908 NULL
+do_jffs2_setxattr_25910 do_jffs2_setxattr 5 25910 NULL
+rcname_read_25919 rcname_read 3 25919 NULL
+snd_es1938_capture_copy_25930 snd_es1938_capture_copy 5 25930 NULL
+key_flags_read_25931 key_flags_read 3 25931 NULL
+copy_play_buf_25932 copy_play_buf 3 25932 NULL
+flush_25957 flush 2 25957 NULL
+udp_setsockopt_25985 udp_setsockopt 5 25985 NULL
+lustre_msg_buflen_v2_25997 lustre_msg_buflen_v2 0 25997 NULL
+SyS_process_vm_readv_26019 SyS_process_vm_readv 3-5 26019 NULL
+xfs_xattr_acl_set_26028 xfs_xattr_acl_set 4 26028 NULL
+mptscsih_change_queue_depth_26036 mptscsih_change_queue_depth 2 26036 NULL
+selinux_inode_post_setxattr_26037 selinux_inode_post_setxattr 4 26037 NULL
+tun_do_read_26047 tun_do_read 5 26047 NULL
+keyctl_update_key_26061 keyctl_update_key 3 26061 NULL
+rx_rx_wa_density_dropped_frame_read_26095 rx_rx_wa_density_dropped_frame_read 3 26095
NULL
+read_sb_page_26119 read_sb_page 5 26119 NULL
+ath9k_hw_name_26146 ath9k_hw_name 3 26146 NULL
+copy_oldmem_page_26164 copy_oldmem_page 3 26164 NULL
+gfs2_xattr_acl_get_26166 gfs2_xattr_acl_get 0 26166 NULL nohasharray
+ath6kl_roam_table_read_26166 ath6kl_roam_table_read 3 26166 &gfs2_xattr_acl_get_26166
+disk_devt_26180 disk_devt 0 26180 NULL
+cgroup_setxattr_26188 cgroup_setxattr 4 26188 NULL
+ieee80211_if_fmt_dot11MeshTTL_26198 ieee80211_if_fmt_dot11MeshTTL 3 26198 NULL
+xfs_idata_realloc_26199 xfs_idata_realloc 2 26199 NULL
+mce_write_26201 mce_write 3 26201 NULL
+mwifiex_regrdwr_write_26225 mwifiex_regrdwr_write 3 26225 NULL
+_scsih_change_queue_depth_26230 _scsih_change_queue_depth 2 26230 NULL
+rxrpc_recvmsg_26233 rxrpc_recvmsg 4 26233 NULL
+bio_split_26235 bio_split 2 26235 NULL
+crypto_ctxsize_26278 crypto_ctxsize 0 26278 NULL
+apei_resources_request_26279 apei_resources_request 0 26279 NULL
+wacom_set_device_mode_26280 wacom_set_device_mode 3 26280 NULL
+snd_pcm_plug_client_channels_buf_26309 snd_pcm_plug_client_channels_buf 0-3 26309 NULL
nohasharray
+pax_get_random_long_26309 pax_get_random_long 0 26309 &snd_pcm_plug_client_channels_buf_26309
+pwr_wake_on_host_read_26321 pwr_wake_on_host_read 3 26321 NULL
+check_can_nocow_26336 check_can_nocow 2 26336 NULL
+snd_vx_check_reg_bit_26344 snd_vx_check_reg_bit 0 26344 NULL
+ocfs2_duplicate_clusters_by_page_26357 ocfs2_duplicate_clusters_by_page 6-3 26357 NULL
+cifs_readdata_alloc_26360 cifs_readdata_alloc 1 26360 NULL
+invalidate_inode_pages2_range_26403 invalidate_inode_pages2_range 0 26403 NULL
+ntty_write_26404 ntty_write 3 26404 NULL
+firmware_store_26408 firmware_store 4 26408 NULL
+pagemap_read_26441 pagemap_read 3 26441 NULL
+tower_read_26461 tower_read 3 26461 NULL nohasharray
+enc_pools_add_pages_26461 enc_pools_add_pages 1 26461 &tower_read_26461
+ib_alloc_device_26483 ib_alloc_device 1 26483 NULL
+ulong_write_file_26485 ulong_write_file 3 26485 NULL
+dvb_ca_en50221_io_ioctl_26490 dvb_ca_en50221_io_ioctl 2 26490 NULL
+read_vmcore_26501 read_vmcore 3 26501 NULL
+uhid_char_write_26502 uhid_char_write 3 26502 NULL
+vfio_pci_set_msi_trigger_26507 vfio_pci_set_msi_trigger 4-3 26507 NULL
+iwl_dbgfs_rf_reset_read_26512 iwl_dbgfs_rf_reset_read 3 26512 NULL
+SyS_rt_sigpending_26538 SyS_rt_sigpending 2 26538 NULL
+__vhost_add_used_n_26554 __vhost_add_used_n 3 26554 NULL
+dio_new_bio_26562 dio_new_bio 0 26562 NULL
+rts51x_read_mem_26577 rts51x_read_mem 4 26577 NULL
+pwr_fix_tsf_ps_read_26627 pwr_fix_tsf_ps_read 3 26627 NULL
+irq_alloc_generic_chip_26650 irq_alloc_generic_chip 2 26650 NULL nohasharray
+inb_p_26650 inb_p 0 26650 &irq_alloc_generic_chip_26650
+nouveau_volt_create__26654 nouveau_volt_create_ 4 26654 NULL
+cipso_v4_map_cat_rbm_hton_26680 cipso_v4_map_cat_rbm_hton 0 26680 NULL
+nouveau_namedb_create__26732 nouveau_namedb_create_ 7 26732 NULL
+pipeline_tcp_rx_stat_fifo_int_read_26745 pipeline_tcp_rx_stat_fifo_int_read 3 26745
NULL
+bos_desc_26752 bos_desc 0 26752 NULL
+snd_hda_get_raw_connections_26762 snd_hda_get_raw_connections 0 26762 NULL
+dma_map_single_attrs_26779 dma_map_single_attrs 0 26779 NULL
+qlcnic_alloc_sds_rings_26795 qlcnic_alloc_sds_rings 2 26795 NULL
+cipso_v4_genopt_26812 cipso_v4_genopt 0 26812 NULL
+iwl_trans_read_mem32_26825 iwl_trans_read_mem32 0 26825 NULL
+smk_write_load_26829 smk_write_load 3 26829 NULL
+scnprint_id_26842 scnprint_id 3-0 26842 NULL
+ecryptfs_miscdev_write_26847 ecryptfs_miscdev_write 3 26847 NULL
+tipc_conn_sendmsg_26867 tipc_conn_sendmsg 5 26867 NULL
+ath6kl_create_qos_write_26879 ath6kl_create_qos_write 3 26879 NULL
+svc_print_xprts_26881 svc_print_xprts 0 26881 NULL
+cfg80211_process_auth_26916 cfg80211_process_auth 3 26916 NULL
+x25_asy_change_mtu_26928 x25_asy_change_mtu 2 26928 NULL
+scsi_tgt_copy_sense_26933 scsi_tgt_copy_sense 3 26933 NULL
+sctp_setsockopt_adaptation_layer_26935 sctp_setsockopt_adaptation_layer 3 26935 NULL
nohasharray
+pwr_ps_enter_read_26935 pwr_ps_enter_read 3 26935 &sctp_setsockopt_adaptation_layer_26935
+hecubafb_write_26942 hecubafb_write 3 26942 NULL
+do_trimming_26952 do_trimming 3 26952 NULL nohasharray
+extract_entropy_user_26952 extract_entropy_user 3 26952 &do_trimming_26952
+do_direct_IO_26979 do_direct_IO 0 26979 NULL
+__videobuf_alloc_vb_27062 __videobuf_alloc_vb 1 27062 NULL
+ext4_convert_unwritten_extents_27064 ext4_convert_unwritten_extents 4-3-0 27064 NULL
+snd_pcm_lib_period_bytes_27071 snd_pcm_lib_period_bytes 0 27071 NULL
+paravirt_read_msr_27077 paravirt_read_msr 0 27077 NULL
+alloc_fdmem_27083 alloc_fdmem 1 27083 NULL
+btmrvl_hscmd_write_27089 btmrvl_hscmd_write 3 27089 NULL nohasharray
+ath9k_hw_4k_dump_eeprom_27089 ath9k_hw_4k_dump_eeprom 5-4 27089 &btmrvl_hscmd_write_27089
+__devcgroup_inode_permission_27108 __devcgroup_inode_permission 0 27108 NULL
+get_kernel_page_27133 get_kernel_page 0 27133 NULL
+drbd_get_capacity_27141 drbd_get_capacity 0 27141 NULL
+pms_capture_27142 pms_capture 4 27142 NULL
+btmrvl_hscfgcmd_write_27143 btmrvl_hscfgcmd_write 3 27143 NULL
+snd_compr_calc_avail_27165 snd_compr_calc_avail 0 27165 NULL
+ieee80211_if_read_rc_rateidx_mask_5ghz_27183 ieee80211_if_read_rc_rateidx_mask_5ghz
3 27183 NULL
+__sg_alloc_table_27198 __sg_alloc_table 0 27198 NULL
+write_kmem_27225 write_kmem 3 27225 NULL
+dbAllocAG_27228 dbAllocAG 0 27228 NULL
+rxrpc_request_key_27235 rxrpc_request_key 3 27235 NULL
+ll_track_gid_seq_write_27267 ll_track_gid_seq_write 3 27267 NULL
+comedi_alloc_devpriv_27272 comedi_alloc_devpriv 2 27272 NULL
+copy_from_buf_27308 copy_from_buf 4-2 27308 NULL
+virtqueue_add_inbuf_27312 virtqueue_add_inbuf 3 27312 NULL
+snd_pcm_oss_write2_27332 snd_pcm_oss_write2 3-0 27332 NULL
+afs_cell_create_27346 afs_cell_create 2 27346 NULL
+iwl_dbgfs_csr_write_27363 iwl_dbgfs_csr_write 3 27363 NULL
+pcbit_stat_27364 pcbit_stat 2 27364 NULL
+seq_read_27411 seq_read 3 27411 NULL
+ib_dma_map_sg_27413 ib_dma_map_sg 0 27413 NULL
+ieee80211_if_read_smps_27416 ieee80211_if_read_smps 3 27416 NULL
+ocfs2_refcount_cal_cow_clusters_27422 ocfs2_refcount_cal_cow_clusters 0-3-4 27422 NULL
+cypress_write_27423 cypress_write 4 27423 NULL
+sddr09_read_data_27447 sddr09_read_data 3 27447 NULL
+v4l2_ctrl_new_std_menu_items_27487 v4l2_ctrl_new_std_menu_items 4 27487 NULL
+hcd_buffer_alloc_27495 hcd_buffer_alloc 2 27495 NULL
+ip_set_get_h32_27498 ip_set_get_h32 0 27498 NULL
+btrfs_get_64_27499 btrfs_get_64 0 27499 NULL
+garmin_read_process_27509 garmin_read_process 3 27509 NULL
+oti_alloc_cookies_27510 oti_alloc_cookies 2 27510 NULL
+ib_copy_to_udata_27525 ib_copy_to_udata 3 27525 NULL
+snd_sonicvibes_getdmaa_27552 snd_sonicvibes_getdmaa 0 27552 NULL
+SyS_fgetxattr_27571 SyS_fgetxattr 4 27571 NULL
+sco_sock_recvmsg_27572 sco_sock_recvmsg 4 27572 NULL
+libipw_alloc_txb_27579 libipw_alloc_txb 1 27579 NULL
+ocfs2_xattr_ibody_get_27642 ocfs2_xattr_ibody_get 0 27642 NULL nohasharray
+nl80211_send_connect_result_27642 nl80211_send_connect_result 5-7 27642 &ocfs2_xattr_ibody_get_27642
nohasharray
+read_flush_procfs_27642 read_flush_procfs 3 27642 &nl80211_send_connect_result_27642
nohasharray
+ocfs2_direct_IO_27642 ocfs2_direct_IO 4 27642 &read_flush_procfs_27642
+add_new_gdb_27643 add_new_gdb 3 27643 NULL
+btrfs_fallocate_27647 btrfs_fallocate 3-4 27647 NULL
+qnx6_readpages_27657 qnx6_readpages 4 27657 NULL
+cdrom_read_cdda_old_27664 cdrom_read_cdda_old 4 27664 NULL
+ocfs2_extend_dir_27695 ocfs2_extend_dir 4 27695 NULL
+fs_path_add_from_extent_buffer_27702 fs_path_add_from_extent_buffer 4 27702 NULL
+evm_write_key_27715 evm_write_key 3 27715 NULL
+ieee80211_if_fmt_dot11MeshGateAnnouncementProtocol_27722 ieee80211_if_fmt_dot11MeshGateAnnouncementProtocol
3 27722 NULL
+xfs_dir2_block_sfsize_27727 xfs_dir2_block_sfsize 0 27727 NULL
+SyS_setsockopt_27759 SyS_setsockopt 5 27759 NULL
+__lov_setstripe_27782 __lov_setstripe 2 27782 NULL
+twl4030_set_gpio_dataout_27792 twl4030_set_gpio_dataout 1 27792 NULL
+SyS_readv_27804 SyS_readv 3 27804 NULL
+mpihelp_mul_27805 mpihelp_mul 5-3 27805 NULL
+fwtty_buffer_rx_27821 fwtty_buffer_rx 3 27821 NULL
+hpt374_read_freq_27828 hpt374_read_freq 0 27828 NULL
+init_header_complete_27833 init_header_complete 0 27833 NULL
+read_profile_27859 read_profile 3 27859 NULL
+sky2_pci_read16_27863 sky2_pci_read16 0 27863 NULL
+ieee80211_if_read_dot11MeshHWMProotInterval_27873 ieee80211_if_read_dot11MeshHWMProotInterval
3 27873 NULL
+unix_seqpacket_sendmsg_27893 unix_seqpacket_sendmsg 4 27893 NULL
+check_mapped_name_27943 check_mapped_name 3 27943 NULL
+tracing_clock_write_27961 tracing_clock_write 3 27961 NULL
+tipc_media_addr_printf_27971 tipc_media_addr_printf 2 27971 NULL
+device_register_27972 device_register 0 27972 NULL nohasharray
+mic_rx_pkts_read_27972 mic_rx_pkts_read 3 27972 &device_register_27972
+pci_enable_device_flags_27977 pci_enable_device_flags 0 27977 NULL
+f2fs_bio_alloc_27983 f2fs_bio_alloc 2 27983 NULL
+edt_ft5x06_debugfs_raw_data_read_28002 edt_ft5x06_debugfs_raw_data_read 3 28002 NULL
+snd_rawmidi_write_28008 snd_rawmidi_write 3 28008 NULL
+powercap_register_zone_28028 powercap_register_zone 6 28028 NULL
+sctp_setsockopt_maxburst_28041 sctp_setsockopt_maxburst 3 28041 NULL
+rts51x_xd_rw_28046 rts51x_xd_rw 3-4 28046 NULL
+cx231xx_init_vbi_isoc_28053 cx231xx_init_vbi_isoc 3-2-4 28053 NULL
+pool_status_28055 pool_status 5 28055 NULL
+init_rs_non_canonical_28059 init_rs_non_canonical 1 28059 NULL
+lpfc_idiag_mbxacc_read_28061 lpfc_idiag_mbxacc_read 3 28061 NULL
+tx_frag_bad_mblk_num_read_28064 tx_frag_bad_mblk_num_read 3 28064 NULL
+mmc_test_alloc_mem_28102 mmc_test_alloc_mem 3-2 28102 NULL
+rx_defrag_need_defrag_read_28117 rx_defrag_need_defrag_read 3 28117 NULL
+vgacon_adjust_height_28124 vgacon_adjust_height 2 28124 NULL
+video_read_28148 video_read 3 28148 NULL
+snd_midi_channel_alloc_set_28153 snd_midi_channel_alloc_set 1 28153 NULL
+stats_dot11FCSErrorCount_read_28154 stats_dot11FCSErrorCount_read 3 28154 NULL
+vread_28173 vread 0-3 28173 NULL
+macvtap_get_user_28185 macvtap_get_user 4 28185 NULL
+counter_free_res_28187 counter_free_res 5 28187 NULL
+read_disk_sb_28188 read_disk_sb 2 28188 NULL
+nouveau_mxm_create__28200 nouveau_mxm_create_ 4 28200 NULL
+__qp_memcpy_from_queue_28220 __qp_memcpy_from_queue 3-4 28220 NULL
+line6_alloc_sysex_buffer_28225 line6_alloc_sysex_buffer 4 28225 NULL
+amd_nb_num_28228 amd_nb_num 0 28228 NULL
+fuse_direct_IO_28275 fuse_direct_IO 4 28275 NULL
+usemap_size_28281 usemap_size 0 28281 NULL
+inline_xattr_size_28285 inline_xattr_size 0 28285 NULL
+dma_map_sg_attrs_28289 dma_map_sg_attrs 0 28289 NULL
+SyS_ppoll_28290 SyS_ppoll 2 28290 NULL
+kstrtos16_from_user_28300 kstrtos16_from_user 2 28300 NULL
+nouveau_compat_ioctl_28305 nouveau_compat_ioctl 2 28305 NULL
+snd_pcm_oss_read_28317 snd_pcm_oss_read 3 28317 NULL
+bm_entry_write_28338 bm_entry_write 3 28338 NULL
+tcp_copy_to_iovec_28344 tcp_copy_to_iovec 3 28344 NULL
+snapshot_write_28351 snapshot_write 3 28351 NULL
+xfs_iomap_write_unwritten_28365 xfs_iomap_write_unwritten 3-2 28365 NULL
+batadv_handle_tt_response_28370 batadv_handle_tt_response 4 28370 NULL
+dlmfs_file_read_28385 dlmfs_file_read 3 28385 NULL
+tx_frag_cache_miss_read_28394 tx_frag_cache_miss_read 3 28394 NULL
+bypass_pwup_write_28416 bypass_pwup_write 3 28416 NULL
+subdev_ioctl_28417 subdev_ioctl 2 28417 NULL
+ksocknal_alloc_tx_28426 ksocknal_alloc_tx 2 28426 NULL
+mpage_readpages_28436 mpage_readpages 3 28436 NULL
+snd_emu10k1_efx_read_28452 snd_emu10k1_efx_read 2 28452 NULL
+key_mic_failures_read_28457 key_mic_failures_read 3 28457 NULL
+alloc_irq_cpu_rmap_28459 alloc_irq_cpu_rmap 1 28459 NULL
+ps_poll_upsd_utilization_read_28519 ps_poll_upsd_utilization_read 3 28519 NULL
+i2400m_tx_stats_read_28527 i2400m_tx_stats_read 3 28527 NULL
+sel_read_policycap_28544 sel_read_policycap 3 28544 NULL
+mptctl_getiocinfo_28545 mptctl_getiocinfo 2 28545 NULL nohasharray
+run_delalloc_range_28545 run_delalloc_range 3-4 28545 &mptctl_getiocinfo_28545 nohasharray
+aio_read_events_28545 aio_read_events 3 28545 &run_delalloc_range_28545
+sysfs_create_bin_file_28551 sysfs_create_bin_file 0 28551 NULL
+b43legacy_debugfs_write_28556 b43legacy_debugfs_write 3 28556 NULL
+asymmetric_verify_28567 asymmetric_verify 3 28567 NULL
+oxygen_read32_28582 oxygen_read32 0 28582 NULL
+extract_entropy_28604 extract_entropy 5-3 28604 NULL
+kfifo_unused_28612 kfifo_unused 0 28612 NULL
+snd_nm256_capture_copy_28622 snd_nm256_capture_copy 5-3 28622 NULL
+setup_usemap_28636 setup_usemap 3-4 28636 NULL
+qib_handle_6120_hwerrors_28642 qib_handle_6120_hwerrors 3 28642 NULL
+p9_fcall_alloc_28652 p9_fcall_alloc 1 28652 NULL
+read_nic_io_byte_28654 read_nic_io_byte 0 28654 NULL
+blk_queue_resize_tags_28670 blk_queue_resize_tags 2 28670 NULL
+SyS_setgroups16_28686 SyS_setgroups16 1 28686 NULL
+kvm_mmu_get_page_28692 kvm_mmu_get_page 2 28692 NULL
+drm_plane_init_28731 drm_plane_init 6 28731 NULL
+spi_execute_28736 spi_execute 5 28736 NULL
+snd_pcm_aio_write_28738 snd_pcm_aio_write 3 28738 NULL
+read_file_btcoex_28743 read_file_btcoex 3 28743 NULL
+max_hw_blocks_28748 max_hw_blocks 0 28748 NULL
+rpc_pipe_generic_upcall_28766 rpc_pipe_generic_upcall 4 28766 NULL
+ath6kl_get_num_reg_28780 ath6kl_get_num_reg 0 28780 NULL
+sel_write_member_28800 sel_write_member 3 28800 NULL
+cgroup_file_read_28804 cgroup_file_read 3 28804 NULL
+iwl_dbgfs_rxon_filter_flags_read_28832 iwl_dbgfs_rxon_filter_flags_read 3 28832 NULL
+vp_request_msix_vectors_28849 vp_request_msix_vectors 2 28849 NULL
+ipv6_renew_options_28867 ipv6_renew_options 5 28867 NULL
+max_io_len_target_boundary_28879 max_io_len_target_boundary 0-1 28879 NULL
+packet_sendmsg_spkt_28885 packet_sendmsg_spkt 4 28885 NULL
+da9055_group_write_28904 da9055_group_write 2-3 28904 NULL
+ps_upsd_timeouts_read_28924 ps_upsd_timeouts_read 3 28924 NULL
+iwl_dbgfs_sleep_level_override_write_28925 iwl_dbgfs_sleep_level_override_write 3 28925
NULL
+push_rx_28939 push_rx 3 28939 NULL
+btrfs_trim_block_group_28963 btrfs_trim_block_group 3-4 28963 NULL
+alloc_sched_domains_28972 alloc_sched_domains 1 28972 NULL
+hash_net6_expire_28979 hash_net6_expire 4 28979 NULL
+hci_sock_setsockopt_28993 hci_sock_setsockopt 5 28993 NULL
+bin_uuid_28999 bin_uuid 3 28999 NULL
+fd_execute_rw_29004 fd_execute_rw 3 29004 NULL
+ieee80211_if_read_ht_opmode_29044 ieee80211_if_read_ht_opmode 3 29044 NULL
+rxrpc_sendmsg_29049 rxrpc_sendmsg 4 29049 NULL
+btrfs_root_bytenr_29058 btrfs_root_bytenr 0 29058 NULL
+iso_packets_buffer_init_29061 iso_packets_buffer_init 3-4 29061 NULL
+roundup_64_29066 roundup_64 2-0-1 29066 NULL
+lpfc_idiag_extacc_drivr_get_29067 lpfc_idiag_extacc_drivr_get 0-3 29067 NULL
+sctp_getsockopt_assoc_stats_29074 sctp_getsockopt_assoc_stats 2 29074 NULL
+iwl_dbgfs_log_event_write_29088 iwl_dbgfs_log_event_write 3 29088 NULL
+i915_error_object_create_sized_29091 i915_error_object_create_sized 3 29091 NULL
+isdn_ppp_write_29109 isdn_ppp_write 4 29109 NULL
+snprintf_29125 snprintf 0 29125 NULL
+iov_shorten_29130 iov_shorten 0 29130 NULL
+proc_scsi_write_29142 proc_scsi_write 3 29142 NULL
+kvm_mmu_notifier_clear_flush_young_29154 kvm_mmu_notifier_clear_flush_young 3 29154
NULL
+drm_property_create_enum_29201 drm_property_create_enum 5 29201 NULL
+wusb_prf_256_29203 wusb_prf_256 7 29203 NULL
+iwl_dbgfs_temperature_read_29224 iwl_dbgfs_temperature_read 3 29224 NULL
+nvme_trans_copy_from_user_29227 nvme_trans_copy_from_user 3 29227 NULL
+irq_domain_add_linear_29236 irq_domain_add_linear 2 29236 NULL
+evdev_handle_get_val_29242 evdev_handle_get_val 5-6 29242 NULL
+security_context_to_sid_core_29248 security_context_to_sid_core 2 29248 NULL
+prism2_set_genericelement_29277 prism2_set_genericelement 3 29277 NULL
+ext4_fiemap_29296 ext4_fiemap 4 29296 NULL
+sn9c102_read_29305 sn9c102_read 3 29305 NULL
+__fuse_get_req_29315 __fuse_get_req 2 29315 NULL
+lprocfs_write_helper_29323 lprocfs_write_helper 2 29323 NULL
+kvm_handle_hva_29326 kvm_handle_hva 2 29326 NULL
+tun_put_user_29337 tun_put_user 5 29337 NULL
+__alloc_ei_netdev_29338 __alloc_ei_netdev 1 29338 NULL
+l2cap_sock_setsockopt_old_29346 l2cap_sock_setsockopt_old 4 29346 NULL
+mwifiex_cfg80211_mgmt_tx_29387 mwifiex_cfg80211_mgmt_tx 7 29387 NULL
+read_file_tx99_power_29405 read_file_tx99_power 3 29405 NULL
+mempool_create_29437 mempool_create 1 29437 NULL
+crypto_ahash_alignmask_29445 crypto_ahash_alignmask 0 29445 NULL
+p9_client_prepare_req_29448 p9_client_prepare_req 3 29448 NULL
+validate_scan_freqs_29462 validate_scan_freqs 0 29462 NULL
+SyS_flistxattr_29474 SyS_flistxattr 3 29474 NULL
+do_register_entry_29478 do_register_entry 4 29478 NULL
+simple_strtoul_29480 simple_strtoul 0 29480 NULL
+btmrvl_pscmd_write_29504 btmrvl_pscmd_write 3 29504 NULL
+btrfs_file_extent_disk_bytenr_29505 btrfs_file_extent_disk_bytenr 0 29505 NULL
+write_file_regidx_29517 write_file_regidx 3 29517 NULL
+atk_debugfs_ggrp_read_29522 atk_debugfs_ggrp_read 3 29522 NULL
+ftrace_write_29551 ftrace_write 3 29551 NULL
+idetape_queue_rw_tail_29562 idetape_queue_rw_tail 3 29562 NULL
+leaf_dealloc_29566 leaf_dealloc 3 29566 NULL
+kvm_read_guest_virt_system_29569 kvm_read_guest_virt_system 4-2 29569 NULL
+lbs_lowsnr_read_29571 lbs_lowsnr_read 3 29571 NULL
+iwl_dbgfs_missed_beacon_write_29586 iwl_dbgfs_missed_beacon_write 3 29586 NULL
+pvr2_hdw_report_unlocked_29589 pvr2_hdw_report_unlocked 4-0 29589 NULL
+dio_set_defer_completion_29599 dio_set_defer_completion 0 29599 NULL
+slots_per_page_29601 slots_per_page 0 29601 NULL
+osc_cached_mb_seq_write_29610 osc_cached_mb_seq_write 3 29610 NULL
+nla_get_u16_29624 nla_get_u16 0 29624 NULL
+tx_frag_cache_hit_read_29639 tx_frag_cache_hit_read 3 29639 NULL
+sctp_make_abort_user_29654 sctp_make_abort_user 3 29654 NULL
+sisusb_write_mem_bulk_29678 sisusb_write_mem_bulk 4 29678 NULL
+lustre_posix_acl_xattr_2ext_29693 lustre_posix_acl_xattr_2ext 2 29693 NULL
+posix_acl_from_xattr_29708 posix_acl_from_xattr 3 29708 NULL
+probes_write_29711 probes_write 3 29711 NULL
+read_cis_cache_29735 read_cis_cache 4 29735 NULL
+xfs_new_eof_29737 xfs_new_eof 2 29737 NULL
+std_nic_write_29752 std_nic_write 3 29752 NULL
+dbAlloc_29794 dbAlloc 0 29794 NULL
+tcp_sendpage_29829 tcp_sendpage 4 29829 NULL
+__probe_kernel_write_29842 __probe_kernel_write 3 29842 NULL
+kvm_read_hva_atomic_29848 kvm_read_hva_atomic 3 29848 NULL
+count_partial_29850 count_partial 0 29850 NULL
+write_file_bool_bmps_29870 write_file_bool_bmps 3 29870 NULL
+ipv6_setsockopt_29871 ipv6_setsockopt 5 29871 NULL
+scsi_end_request_29876 scsi_end_request 3 29876 NULL
+crypto_aead_alignmask_29885 crypto_aead_alignmask 0 29885 NULL
+lov_ost_pool_extend_29914 lov_ost_pool_extend 2 29914 NULL
+write_file_queue_29922 write_file_queue 3 29922 NULL
+ext4_xattr_set_acl_29930 ext4_xattr_set_acl 4 29930 NULL
+__btrfs_getxattr_29947 __btrfs_getxattr 0 29947 NULL nohasharray
+ipv6_recv_error_29947 ipv6_recv_error 3 29947 &__btrfs_getxattr_29947
+dev_mem_write_30028 dev_mem_write 3 30028 NULL
+alloc_netdev_mqs_30030 alloc_netdev_mqs 1 30030 NULL
+scsi_vpd_inquiry_30040 scsi_vpd_inquiry 4 30040 NULL
+drp_wmove_30043 drp_wmove 4 30043 NULL
+__pci_request_selected_regions_30058 __pci_request_selected_regions 0 30058 NULL
+cxgbi_ddp_reserve_30091 cxgbi_ddp_reserve 4 30091 NULL
+snd_midi_channel_init_set_30092 snd_midi_channel_init_set 1 30092 NULL
+rx_filter_data_filter_read_30098 rx_filter_data_filter_read 3 30098 NULL
+defragment_dma_buffer_30113 defragment_dma_buffer 0 30113 NULL
+spi_async_locked_30117 spi_async_locked 0 30117 NULL
+recv_stream_30138 recv_stream 4 30138 NULL
+u_memcpya_30139 u_memcpya 3-2 30139 NULL
+elfcorehdr_read_30159 elfcorehdr_read 2 30159 NULL
+alloc_switch_ctx_30165 alloc_switch_ctx 2 30165 NULL
+expand_inode_data_30169 expand_inode_data 2-3 30169 NULL
+mempool_create_page_pool_30189 mempool_create_page_pool 1 30189 NULL
+drm_property_create_bitmask_30195 drm_property_create_bitmask 5 30195 NULL
+usblp_ioctl_30203 usblp_ioctl 2 30203 NULL
+read_4k_modal_eeprom_30212 read_4k_modal_eeprom 3 30212 NULL
+SyS_semop_30227 SyS_semop 3 30227 NULL
+bitmap_file_set_bit_30228 bitmap_file_set_bit 2 30228 NULL
+ocfs2_calc_bg_discontig_credits_30230 ocfs2_calc_bg_discontig_credits 0 30230 NULL
+rawv6_recvmsg_30265 rawv6_recvmsg 4 30265 NULL
+hfsplus_trusted_setxattr_30270 hfsplus_trusted_setxattr 4 30270 NULL
+isr_pci_pm_read_30271 isr_pci_pm_read 3 30271 NULL
+compat_readv_30273 compat_readv 3 30273 NULL
+skcipher_sendmsg_30290 skcipher_sendmsg 4 30290 NULL
+pipeline_sec_frag_swi_read_30294 pipeline_sec_frag_swi_read 3 30294 NULL
+tcp_sendmsg_30296 tcp_sendmsg 4 30296 NULL
+osc_contention_seconds_seq_write_30305 osc_contention_seconds_seq_write 3 30305 NULL
+ext4_acl_from_disk_30320 ext4_acl_from_disk 2 30320 NULL
+i8254_read_30330 i8254_read 0 30330 NULL
+resource_from_user_30341 resource_from_user 3 30341 NULL
+o2nm_this_node_30342 o2nm_this_node 0 30342 NULL
+kstrtou32_from_user_30361 kstrtou32_from_user 2 30361 NULL
+C_SYSC_readv_30369 C_SYSC_readv 3 30369 NULL
+blkdev_issue_zeroout_30392 blkdev_issue_zeroout 3 30392 NULL
+c4iw_init_resource_30393 c4iw_init_resource 2-3 30393 NULL
+get_kernel_pages_30397 get_kernel_pages 0 30397 NULL
+vb2_fop_write_30420 vb2_fop_write 3 30420 NULL
+tx_tx_template_prepared_read_30424 tx_tx_template_prepared_read 3 30424 NULL
+lstcon_session_info_30425 lstcon_session_info 6 30425 NULL
+enable_write_30456 enable_write 3 30456 NULL
+tx_tx_template_programmed_read_30461 tx_tx_template_programmed_read 3 30461 NULL
+urandom_read_30462 urandom_read 3 30462 NULL
+zoran_ioctl_30465 zoran_ioctl 2 30465 NULL
+i2c_ctrl_read_30467 i2c_ctrl_read 0 30467 NULL
+adu_write_30487 adu_write 3 30487 NULL
+dtim_interval_write_30489 dtim_interval_write 3 30489 NULL
+batadv_send_tt_request_30493 batadv_send_tt_request 5 30493 NULL
+dwc3_testmode_write_30516 dwc3_testmode_write 3 30516 NULL
+set_config_30526 set_config 0 30526 NULL nohasharray
+debug_debug2_read_30526 debug_debug2_read 3 30526 &set_config_30526
+xfs_sb_version_hasftype_30559 xfs_sb_version_hasftype 0 30559 NULL
+disk_expand_part_tbl_30561 disk_expand_part_tbl 2 30561 NULL
+set_le_30581 set_le 4 30581 NULL
+blk_init_tags_30592 blk_init_tags 1 30592 NULL
+i2c_hid_get_report_length_30598 i2c_hid_get_report_length 0 30598 NULL
+sgl_map_user_pages_30610 sgl_map_user_pages 2 30610 NULL nohasharray
+cpufreq_get_global_kobject_30610 cpufreq_get_global_kobject 0 30610 &sgl_map_user_pages_30610
+SyS_msgrcv_30611 SyS_msgrcv 3 30611 NULL
+macvtap_sendmsg_30629 macvtap_sendmsg 4 30629 NULL
+ieee80211_if_read_dot11MeshAwakeWindowDuration_30631 ieee80211_if_read_dot11MeshAwakeWindowDuration
3 30631 NULL
+compat_raw_setsockopt_30634 compat_raw_setsockopt 5 30634 NULL
+mlx5_ib_alloc_fast_reg_page_list_30638 mlx5_ib_alloc_fast_reg_page_list 2 30638 NULL
+SyS_listxattr_30647 SyS_listxattr 3 30647 NULL
+jffs2_flash_read_30667 jffs2_flash_read 0 30667 NULL
+ni_ai_fifo_read_30681 ni_ai_fifo_read 3 30681 NULL
+dccp_setsockopt_ccid_30701 dccp_setsockopt_ccid 4 30701 NULL
+lbs_wrbbp_write_30712 lbs_wrbbp_write 3 30712 NULL
+lbs_debugfs_read_30721 lbs_debugfs_read 3 30721 NULL
+snd_nm256_playback_silence_30727 snd_nm256_playback_silence 4-3 30727 NULL
+snapshot_status_30744 snapshot_status 5 30744 NULL
+fuse_conn_limit_write_30777 fuse_conn_limit_write 3 30777 NULL
+smk_read_doi_30813 smk_read_doi 3 30813 NULL
+get_kobj_path_length_30831 get_kobj_path_length 0 30831 NULL
+sctp_setsockopt_auth_chunk_30843 sctp_setsockopt_auth_chunk 3 30843 NULL
+wd_autoreset_write_30862 wd_autoreset_write 3 30862 NULL
+ieee80211_if_fmt_dropped_frames_no_route_30884 ieee80211_if_fmt_dropped_frames_no_route
3 30884 NULL
+pn_recvmsg_30887 pn_recvmsg 4 30887 NULL
+sctp_setsockopt_rtoinfo_30941 sctp_setsockopt_rtoinfo 3 30941 NULL
+tty_insert_flip_string_flags_30969 tty_insert_flip_string_flags 4 30969 NULL
+huge_page_mask_30981 huge_page_mask 0 30981 NULL
+read_file_bt_ant_diversity_30983 read_file_bt_ant_diversity 3 30983 NULL
+lbs_host_sleep_read_31013 lbs_host_sleep_read 3 31013 NULL
+ima_eventsig_init_31022 ima_eventsig_init 5 31022 NULL
+template_fmt_size_31033 template_fmt_size 0 31033 NULL
+do_setup_msi_irqs_31043 do_setup_msi_irqs 2 31043 NULL
+stride_pg_count_31053 stride_pg_count 0-2-1-4-3-5 31053 NULL
+lbs_failcount_read_31063 lbs_failcount_read 3 31063 NULL
+sctp_setsockopt_context_31091 sctp_setsockopt_context 3 31091 NULL
+proc_gid_map_write_31093 proc_gid_map_write 3 31093 NULL
+compat_sys_get_mempolicy_31109 compat_sys_get_mempolicy 3 31109 NULL
+depth_read_31112 depth_read 3 31112 NULL
+hash_ipportnet6_expire_31118 hash_ipportnet6_expire 4 31118 NULL
+kimage_normal_alloc_31140 kimage_normal_alloc 3 31140 NULL
+size_inside_page_31141 size_inside_page 0 31141 NULL
+w9966_v4l_read_31148 w9966_v4l_read 3 31148 NULL
+ch_do_scsi_31171 ch_do_scsi 4 31171 NULL
+r592_read_fifo_pio_31198 r592_read_fifo_pio 3 31198 NULL
+mtdchar_readoob_31200 mtdchar_readoob 4 31200 NULL
+__btrfs_free_reserved_extent_31207 __btrfs_free_reserved_extent 2 31207 NULL
+cpumask_weight_31215 cpumask_weight 0 31215 NULL
+__read_reg_31216 __read_reg 0 31216 NULL
+atm_get_addr_31221 atm_get_addr 3 31221 NULL
+tcp_recvmsg_31238 tcp_recvmsg 4 31238 NULL
+cyy_readb_31240 cyy_readb 0 31240 NULL
+_create_sg_bios_31244 _create_sg_bios 4 31244 NULL
+ieee80211_if_read_last_beacon_31257 ieee80211_if_read_last_beacon 3 31257 NULL
+hash_netportnet4_expire_31290 hash_netportnet4_expire 4 31290 NULL
+uvc_simplify_fraction_31303 uvc_simplify_fraction 3 31303 NULL
+sisusbcon_scroll_31315 sisusbcon_scroll 5-2-3 31315 NULL
+command_file_write_31318 command_file_write 3 31318 NULL
+hwerr_crcbits_31334 hwerr_crcbits 4 31334 NULL
+em28xx_init_usb_xfer_31337 em28xx_init_usb_xfer 4-6 31337 NULL
+outlen_write_31358 outlen_write 3 31358 NULL
+ieee80211_rx_mgmt_auth_31366 ieee80211_rx_mgmt_auth 3 31366 NULL
+xprt_rdma_allocate_31372 xprt_rdma_allocate 2 31372 NULL
+vb2_vmalloc_get_userptr_31374 vb2_vmalloc_get_userptr 3-2 31374 NULL
+trace_parser_get_init_31379 trace_parser_get_init 2 31379 NULL
+inb_31388 inb 0 31388 NULL
+key_ifindex_read_31411 key_ifindex_read 3 31411 NULL
+_sp2d_max_pg_31422 _sp2d_max_pg 0 31422 NULL
+TSS_checkhmac1_31429 TSS_checkhmac1 5 31429 NULL
+snd_aw2_saa7146_get_hw_ptr_capture_31431 snd_aw2_saa7146_get_hw_ptr_capture 0 31431
NULL
+transport_alloc_session_tags_31449 transport_alloc_session_tags 2-3 31449 NULL
+opera1_xilinx_rw_31453 opera1_xilinx_rw 5 31453 NULL
+xfs_btree_get_numrecs_31477 xfs_btree_get_numrecs 0 31477 NULL
+alg_setkey_31485 alg_setkey 3 31485 NULL
+rds_message_map_pages_31487 rds_message_map_pages 2 31487 NULL
+qsfp_2_read_31491 qsfp_2_read 3 31491 NULL
+__alloc_bootmem_31498 __alloc_bootmem 1 31498 NULL
+hidraw_write_31536 hidraw_write 3 31536 NULL
+usbvision_read_31555 usbvision_read 3 31555 NULL
+tx_frag_tkip_called_read_31575 tx_frag_tkip_called_read 3 31575 NULL
+get_max_inline_xattr_value_size_31578 get_max_inline_xattr_value_size 0 31578 NULL
+osst_write_31581 osst_write 3 31581 NULL
+snd_compr_get_avail_31584 snd_compr_get_avail 0 31584 NULL
+iwl_dbgfs_ucode_tx_stats_read_31611 iwl_dbgfs_ucode_tx_stats_read 3 31611 NULL
+mtd_get_user_prot_info_31616 mtd_get_user_prot_info 0 31616 NULL
+arvo_sysfs_read_31617 arvo_sysfs_read 6 31617 NULL
+videobuf_read_one_31637 videobuf_read_one 3 31637 NULL
+pod_alloc_sysex_buffer_31651 pod_alloc_sysex_buffer 3 31651 NULL
+xfer_secondary_pool_31661 xfer_secondary_pool 2 31661 NULL
+__lgread_31668 __lgread 4 31668 NULL
+copy_from_user_nmi_31672 copy_from_user_nmi 3-0 31672 NULL
+forced_ps_read_31685 forced_ps_read 3 31685 NULL
+fst_recover_rx_error_31687 fst_recover_rx_error 3 31687 NULL
+utf16s_to_utf8s_31735 utf16s_to_utf8s 0 31735 NULL nohasharray
+lu_buf_check_and_grow_31735 lu_buf_check_and_grow 2 31735 &utf16s_to_utf8s_31735
+shmem_pwrite_slow_31741 shmem_pwrite_slow 3-2 31741 NULL
+input_abs_get_max_31742 input_abs_get_max 0 31742 NULL nohasharray
+NCR_700_change_queue_depth_31742 NCR_700_change_queue_depth 2 31742 &input_abs_get_max_31742
+bcm_char_read_31750 bcm_char_read 3 31750 NULL
+snd_seq_device_new_31753 snd_seq_device_new 4 31753 NULL
+SyS_lsetxattr_31766 SyS_lsetxattr 4 31766 NULL
+usblp_cache_device_id_string_31790 usblp_cache_device_id_string 0 31790 NULL
+ecryptfs_send_message_locked_31801 ecryptfs_send_message_locked 2 31801 NULL
+isr_rx_procs_read_31804 isr_rx_procs_read 3 31804 NULL
+data_write_31805 data_write 3 31805 NULL
+SyS_msgsnd_31814 SyS_msgsnd 3 31814 NULL
+strnlen_user_31815 strnlen_user 0-2 31815 NULL
+sta_last_signal_read_31818 sta_last_signal_read 3 31818 NULL
+SyS_ppoll_31855 SyS_ppoll 2 31855 NULL
+iwl_dbgfs_disable_ht40_write_31876 iwl_dbgfs_disable_ht40_write 3 31876 NULL
+drm_mode_crtc_set_gamma_size_31881 drm_mode_crtc_set_gamma_size 2 31881 NULL
+ddb_output_write_31902 ddb_output_write 3-0 31902 NULL
+xattr_permission_31907 xattr_permission 0 31907 NULL
+lu_buf_realloc_31915 lu_buf_realloc 2 31915 NULL
+new_dir_31919 new_dir 3 31919 NULL
+kmem_alloc_31920 kmem_alloc 1 31920 NULL
+SYSC_sethostname_31940 SYSC_sethostname 2 31940 NULL
+read_mem_31942 read_mem 3 31942 NULL nohasharray
+iov_iter_copy_from_user_31942 iov_iter_copy_from_user 4-0 31942 &read_mem_31942
+vb2_write_31948 vb2_write 3 31948 NULL
+pvr2_ctrl_get_valname_31951 pvr2_ctrl_get_valname 4 31951 NULL
+regcache_rbtree_sync_31964 regcache_rbtree_sync 2 31964 NULL
+copy_from_user_toio_31966 copy_from_user_toio 3 31966 NULL
+iblock_execute_rw_31982 iblock_execute_rw 3 31982 NULL nohasharray
+vx_read_status_31982 vx_read_status 0 31982 &iblock_execute_rw_31982
+find_next_zero_bit_31990 find_next_zero_bit 0 31990 NULL
+lustre_acl_xattr_merge2posix_31992 lustre_acl_xattr_merge2posix 2 31992 NULL
+sysfs_create_file_31996 sysfs_create_file 0 31996 NULL
+calc_hmac_32010 calc_hmac 3 32010 NULL
+aead_len_32021 aead_len 0 32021 NULL
+posix_acl_set_32037 posix_acl_set 4 32037 NULL
+stk_read_32038 stk_read 3 32038 NULL
+ocfs2_update_edge_lengths_32046 ocfs2_update_edge_lengths 3 32046 NULL
+SYSC_llistxattr_32061 SYSC_llistxattr 3 32061 NULL
+proc_scsi_devinfo_write_32064 proc_scsi_devinfo_write 3 32064 NULL
+cow_file_range_inline_32091 cow_file_range_inline 3 32091 NULL
+bio_alloc_32095 bio_alloc 2 32095 NULL
+ath6kl_fwlog_read_32101 ath6kl_fwlog_read 3 32101 NULL
+disk_status_32120 disk_status 4 32120 NULL
+kobject_add_internal_32133 kobject_add_internal 0 32133 NULL
+venus_link_32165 venus_link 5 32165 NULL
+do_writepages_32173 do_writepages 0 32173 NULL
+del_ptr_32197 del_ptr 4 32197 NULL
+wusb_ccm_mac_32199 wusb_ccm_mac 7 32199 NULL
+riva_get_cmap_len_32218 riva_get_cmap_len 0 32218 NULL
+caif_seqpkt_recvmsg_32241 caif_seqpkt_recvmsg 4 32241 NULL
+lbs_lowrssi_read_32242 lbs_lowrssi_read 3 32242 NULL
+ocfs2_xattr_find_entry_32260 ocfs2_xattr_find_entry 0 32260 NULL
+kvm_set_spte_hva_32312 kvm_set_spte_hva 2 32312 NULL
+cas_calc_tabort_32316 cas_calc_tabort 0 32316 NULL
+SyS_select_32319 SyS_select 1 32319 NULL
+nouveau_bar_create__32332 nouveau_bar_create_ 4 32332 NULL
+nl80211_send_mlme_event_32337 nl80211_send_mlme_event 4 32337 NULL
+t4_alloc_mem_32342 t4_alloc_mem 1 32342 NULL
+dispatch_ioctl_32357 dispatch_ioctl 2 32357 NULL nohasharray
+rx_streaming_always_write_32357 rx_streaming_always_write 3 32357 &dispatch_ioctl_32357
+ReadHDLCPCI_32362 ReadHDLCPCI 0 32362 NULL nohasharray
+sel_read_initcon_32362 sel_read_initcon 3 32362 &ReadHDLCPCI_32362
+ocfs2_cancel_convert_32392 ocfs2_cancel_convert 0 32392 NULL
+ll_setxattr_common_32398 ll_setxattr_common 4 32398 NULL
+xfs_iext_add_indirect_multi_32400 xfs_iext_add_indirect_multi 3 32400 NULL
+vmci_qp_alloc_32405 vmci_qp_alloc 5-3 32405 NULL
+cache_status_32462 cache_status 5 32462 NULL
+fill_readbuf_32464 fill_readbuf 3 32464 NULL
+dgap_usertoboard_32490 dgap_usertoboard 4 32490 NULL
+ide_driver_proc_write_32493 ide_driver_proc_write 3 32493 NULL
+bypass_pwoff_write_32499 bypass_pwoff_write 3 32499 NULL
+mdc_pinger_recov_seq_write_32510 mdc_pinger_recov_seq_write 3 32510 NULL
+ctrl_std_val_to_sym_32516 ctrl_std_val_to_sym 5 32516 NULL
+disconnect_32521 disconnect 4 32521 NULL
+qsfp_read_32522 qsfp_read 0-2-4 32522 NULL
+ocfs2_refresh_qinfo_32524 ocfs2_refresh_qinfo 0 32524 NULL nohasharray
+audio_get_intf_req_32524 audio_get_intf_req 0 32524 &ocfs2_refresh_qinfo_32524
+ilo_read_32531 ilo_read 3 32531 NULL
+ieee80211_if_read_estab_plinks_32533 ieee80211_if_read_estab_plinks 3 32533 NULL
+format_devstat_counter_32550 format_devstat_counter 3 32550 NULL
+aes_encrypt_fail_read_32562 aes_encrypt_fail_read 3 32562 NULL
+osc_iocontrol_32565 osc_iocontrol 3 32565 NULL
+mem_swapout_entry_32586 mem_swapout_entry 3 32586 NULL
+pipeline_tcp_tx_stat_fifo_int_read_32589 pipeline_tcp_tx_stat_fifo_int_read 3 32589
NULL
+read_file_beacon_32595 read_file_beacon 3 32595 NULL
+ieee80211_if_read_dropped_frames_congestion_32603 ieee80211_if_read_dropped_frames_congestion
3 32603 NULL
+irda_recvmsg_dgram_32631 irda_recvmsg_dgram 4 32631 NULL
+cfg80211_roamed_32632 cfg80211_roamed 5-7 32632 NULL
+kvmalloc_32646 kvmalloc 1 32646 NULL
+ib_sg_dma_len_32649 ib_sg_dma_len 0 32649 NULL
+generic_readlink_32654 generic_readlink 3 32654 NULL
+move_addr_to_kernel_32673 move_addr_to_kernel 2 32673 NULL
+apei_res_add_32674 apei_res_add 0 32674 NULL
+compat_SyS_preadv_32679 compat_SyS_preadv 3 32679 NULL
+jfs_readpages_32702 jfs_readpages 4 32702 NULL
+rt2x00debug_read_queue_dump_32712 rt2x00debug_read_queue_dump 3 32712 NULL
+i40e_pci_sriov_enable_32742 i40e_pci_sriov_enable 2 32742 NULL
+megasas_change_queue_depth_32747 megasas_change_queue_depth 2 32747 NULL
+stats_read_ul_32751 stats_read_ul 3 32751 NULL
+vmci_transport_dgram_dequeue_32775 vmci_transport_dgram_dequeue 4 32775 NULL
+sctp_tsnmap_grow_32784 sctp_tsnmap_grow 2 32784 NULL
+rproc_name_read_32805 rproc_name_read 3 32805 NULL
+new_tape_buffer_32866 new_tape_buffer 2 32866 NULL
+cifs_writedata_alloc_32880 cifs_writedata_alloc 1 32880 NULL nohasharray
+ath6kl_usb_submit_ctrl_in_32880 ath6kl_usb_submit_ctrl_in 6 32880 &cifs_writedata_alloc_32880
+vp702x_usb_inout_cmd_32884 vp702x_usb_inout_cmd 6-4 32884 NULL
+il_dbgfs_tx_stats_read_32913 il_dbgfs_tx_stats_read 3 32913 NULL
+zlib_inflate_workspacesize_32927 zlib_inflate_workspacesize 0 32927 NULL
+rmap_recycle_32938 rmap_recycle 3 32938 NULL
+xfs_log_reserve_32959 xfs_log_reserve 2 32959 NULL
+ocfs2_check_dir_trailer_32968 ocfs2_check_dir_trailer 0 32968 NULL
+compat_filldir_32999 compat_filldir 3 32999 NULL
+SyS_syslog_33007 SyS_syslog 3 33007 NULL
+br_multicast_set_hash_max_33012 br_multicast_set_hash_max 2 33012 NULL
+write_file_bt_ant_diversity_33019 write_file_bt_ant_diversity 3 33019 NULL
+mic_virtio_copy_to_user_33048 mic_virtio_copy_to_user 3 33048 NULL
+SYSC_lgetxattr_33049 SYSC_lgetxattr 4 33049 NULL
+pipeline_dec_packet_in_fifo_full_read_33052 pipeline_dec_packet_in_fifo_full_read 3
33052 NULL
+ebt_compat_match_offset_33053 ebt_compat_match_offset 0-2 33053 NULL
+bitmap_resize_33054 bitmap_resize 2 33054 NULL
+stats_dot11RTSSuccessCount_read_33065 stats_dot11RTSSuccessCount_read 3 33065 NULL
+sel_read_checkreqprot_33068 sel_read_checkreqprot 3 33068 NULL
+alloc_tio_33077 alloc_tio 3 33077 NULL
+acl_permission_check_33083 acl_permission_check 0 33083 NULL
+fb_sys_write_33130 fb_sys_write 3 33130 NULL
+__len_within_target_33132 __len_within_target 0 33132 NULL
+SyS_poll_33152 SyS_poll 2 33152 NULL
+debug_debug6_read_33168 debug_debug6_read 3 33168 NULL
+dataflash_read_fact_otp_33204 dataflash_read_fact_otp 3-2 33204 NULL
+pp_read_33210 pp_read 3 33210 NULL
+xfs_file_aio_write_33234 xfs_file_aio_write 4 33234 NULL
+snd_pcm_plug_client_size_33267 snd_pcm_plug_client_size 0-2 33267 NULL
+cachefiles_cook_key_33274 cachefiles_cook_key 2 33274 NULL
+sync_pt_create_33282 sync_pt_create 2 33282 NULL
+mcs7830_get_reg_33308 mcs7830_get_reg 3 33308 NULL
+isku_sysfs_read_keys_easyzone_33318 isku_sysfs_read_keys_easyzone 6 33318 NULL
+vx_send_irq_dsp_33329 vx_send_irq_dsp 0 33329 NULL
+joydev_ioctl_33343 joydev_ioctl 2 33343 NULL
+lov_stripesize_seq_write_33353 lov_stripesize_seq_write 3 33353 NULL
+create_xattr_datum_33356 create_xattr_datum 5 33356 NULL nohasharray
+irq_pkt_threshold_read_33356 irq_pkt_threshold_read 3 33356 &create_xattr_datum_33356
+read_file_regidx_33370 read_file_regidx 3 33370 NULL
+ieee80211_if_read_dropped_frames_no_route_33383 ieee80211_if_read_dropped_frames_no_route
3 33383 NULL
+scsi_varlen_cdb_length_33385 scsi_varlen_cdb_length 0 33385 NULL
+ocfs2_allocate_unwritten_extents_33394 ocfs2_allocate_unwritten_extents 3-2 33394 NULL
+cfs_trace_copyin_string_33396 cfs_trace_copyin_string 4 33396 NULL
+snd_pcm_capture_ioctl1_33408 snd_pcm_capture_ioctl1 0 33408 NULL
+hash_netiface6_expire_33421 hash_netiface6_expire 4 33421 NULL
+dis_tap_write_33426 dis_tap_write 3 33426 NULL
+message_stats_list_33440 message_stats_list 5 33440 NULL
+ovs_vport_alloc_33475 ovs_vport_alloc 1 33475 NULL
+create_entry_33479 create_entry 2 33479 NULL
+ip_setsockopt_33487 ip_setsockopt 5 33487 NULL nohasharray
+elf_map_33487 elf_map 0-2 33487 &ip_setsockopt_33487
+res_counter_read_33499 res_counter_read 4 33499 NULL
+hash_netnet4_expire_33500 hash_netnet4_expire 4 33500 NULL
+fb_read_33506 fb_read 3 33506 NULL
+musb_test_mode_write_33518 musb_test_mode_write 3 33518 NULL
+ahash_setkey_unaligned_33521 ahash_setkey_unaligned 3 33521 NULL
+nes_alloc_fast_reg_page_list_33523 nes_alloc_fast_reg_page_list 2 33523 NULL
+aggr_size_rx_size_read_33526 aggr_size_rx_size_read 3 33526 NULL
+tomoyo_read_self_33539 tomoyo_read_self 3 33539 NULL nohasharray
+osc_max_rpcs_in_flight_seq_write_33539 osc_max_rpcs_in_flight_seq_write 3 33539 &tomoyo_read_self_33539
+count_subheaders_33591 count_subheaders 0 33591 NULL
+scsi_execute_33596 scsi_execute 5 33596 NULL
+comedi_buf_write_n_allocated_33604 comedi_buf_write_n_allocated 0 33604 NULL
+xt_compat_target_offset_33608 xt_compat_target_offset 0 33608 NULL
+usb_gstrings_attach_33615 usb_gstrings_attach 3 33615 NULL nohasharray
+il_dbgfs_qos_read_33615 il_dbgfs_qos_read 3 33615 &usb_gstrings_attach_33615
+stride_page_count_33641 stride_page_count 2 33641 NULL
+irq_blk_threshold_read_33666 irq_blk_threshold_read 3 33666 NULL
+inw_p_33668 inw_p 0 33668 NULL
+arp_hdr_len_33671 arp_hdr_len 0 33671 NULL
+i2c_hid_alloc_buffers_33673 i2c_hid_alloc_buffers 2 33673 NULL
+nv50_disp_dmac_create__33696 nv50_disp_dmac_create_ 6 33696 NULL
+netlink_sendmsg_33708 netlink_sendmsg 4 33708 NULL
+tipc_link_stats_33716 tipc_link_stats 3 33716 NULL
+ext4_wb_update_i_disksize_33717 ext4_wb_update_i_disksize 2 33717 NULL
+pvr2_stream_buffer_count_33719 pvr2_stream_buffer_count 2 33719 NULL
+write_file_spectral_count_33723 write_file_spectral_count 3 33723 NULL
+__mutex_lock_interruptible_slowpath_33735 __mutex_lock_interruptible_slowpath 0 33735
NULL
+vifs_state_read_33762 vifs_state_read 3 33762 NULL
+hashtab_create_33769 hashtab_create 3 33769 NULL
+if_sdio_read_rx_len_33800 if_sdio_read_rx_len 0 33800 NULL
+filter_write_33819 filter_write 3 33819 NULL
+sep_create_msgarea_context_33829 sep_create_msgarea_context 4 33829 NULL
+scrub_setup_recheck_block_33831 scrub_setup_recheck_block 5-4 33831 NULL
+ext4_journal_extend_33835 ext4_journal_extend 2 33835 NULL
+oz_cdev_write_33852 oz_cdev_write 3 33852 NULL
+get_user_pages_33908 get_user_pages 0 33908 NULL
+ath6kl_roam_mode_write_33912 ath6kl_roam_mode_write 3 33912 NULL
+queue_logical_block_size_33918 queue_logical_block_size 0 33918 NULL
+sel_read_avc_cache_threshold_33942 sel_read_avc_cache_threshold 3 33942 NULL
+lpfc_idiag_ctlacc_read_33943 lpfc_idiag_ctlacc_read 3 33943 NULL
+read_file_tgt_rx_stats_33944 read_file_tgt_rx_stats 3 33944 NULL
+hfsplus_osx_setxattr_33952 hfsplus_osx_setxattr 4 33952 NULL
+__proc_dump_kernel_33954 __proc_dump_kernel 5 33954 NULL
+vga_switcheroo_debugfs_write_33984 vga_switcheroo_debugfs_write 3 33984 NULL
+lbs_lowrssi_write_34025 lbs_lowrssi_write 3 34025 NULL
+ppp_write_34034 ppp_write 3 34034 NULL
+tty_insert_flip_string_34042 tty_insert_flip_string 3-0 34042 NULL
+memcg_update_all_caches_34068 memcg_update_all_caches 1 34068 NULL
+pipeline_pipeline_fifo_full_read_34095 pipeline_pipeline_fifo_full_read 3 34095 NULL
+__irq_domain_add_34101 __irq_domain_add 2 34101 NULL
+proc_scsi_host_write_34107 proc_scsi_host_write 3 34107 NULL
+islpci_mgt_transmit_34133 islpci_mgt_transmit 5 34133 NULL
+ttm_dma_page_pool_free_34135 ttm_dma_page_pool_free 2-0 34135 NULL
+ixgbe_dbg_netdev_ops_write_34141 ixgbe_dbg_netdev_ops_write 3 34141 NULL
+shmem_pread_fast_34147 shmem_pread_fast 3 34147 NULL
+ocfs2_xattr_list_entry_34165 ocfs2_xattr_list_entry 0 34165 NULL
+skb_to_sgvec_34171 skb_to_sgvec 0 34171 NULL
+ext4_da_write_begin_34215 ext4_da_write_begin 3-4 34215 NULL
+sysfs_bin_read_34228 sysfs_bin_read 3 34228 NULL
+bl_pipe_downcall_34264 bl_pipe_downcall 3 34264 NULL
+ocfs2_dlm_lock_34265 ocfs2_dlm_lock 0 34265 NULL
+device_private_init_34279 device_private_init 0 34279 NULL
+ext4_get_groups_count_34324 ext4_get_groups_count 0 34324 NULL
+pcpu_need_to_extend_34326 pcpu_need_to_extend 0 34326 NULL nohasharray
+iov_iter_single_seg_count_34326 iov_iter_single_seg_count 0 34326 &pcpu_need_to_extend_34326
+crypto_ablkcipher_ivsize_34363 crypto_ablkcipher_ivsize 0 34363 NULL nohasharray
+sync_page_io_34363 sync_page_io 3 34363 &crypto_ablkcipher_ivsize_34363
+rngapi_reset_34366 rngapi_reset 3 34366 NULL
+ea_read_34378 ea_read 0 34378 NULL
+fuse_send_read_34379 fuse_send_read 4 34379 NULL
+av7110_vbi_write_34384 av7110_vbi_write 3 34384 NULL
+usbvision_v4l2_read_34386 usbvision_v4l2_read 3 34386 NULL
+read_rbu_image_type_34387 read_rbu_image_type 6 34387 NULL
+iwl_calib_set_34400 iwl_calib_set 3 34400 NULL nohasharray
+ivtv_read_pos_34400 ivtv_read_pos 3 34400 &iwl_calib_set_34400
+wd_exp_mode_write_34407 wd_exp_mode_write 3 34407 NULL
+nl80211_send_disassoc_34424 nl80211_send_disassoc 4 34424 NULL
+usbtest_alloc_urb_34446 usbtest_alloc_urb 3-5 34446 NULL
+mwifiex_regrdwr_read_34472 mwifiex_regrdwr_read 3 34472 NULL
+skcipher_sndbuf_34476 skcipher_sndbuf 0 34476 NULL
+i2o_parm_field_get_34477 i2o_parm_field_get 5 34477 NULL
+ocfs2_mv_xattr_buckets_34484 ocfs2_mv_xattr_buckets 6 34484 NULL
+security_inode_permission_34488 security_inode_permission 0 34488 NULL
+SyS_pwritev_34494 SyS_pwritev 3 34494 NULL
+qp_alloc_res_34496 qp_alloc_res 5 34496 NULL
+lu_buf_check_and_alloc_34505 lu_buf_check_and_alloc 2 34505 NULL
+ext4_fallocate_34537 ext4_fallocate 4-3 34537 NULL nohasharray
+tracing_stats_read_34537 tracing_stats_read 3 34537 &ext4_fallocate_34537
+hugetlbfs_read_actor_34547 hugetlbfs_read_actor 2-5-4-0 34547 NULL
+dbBackSplit_34561 dbBackSplit 0 34561 NULL
+alloc_ieee80211_rsl_34564 alloc_ieee80211_rsl 1 34564 NULL
+lov_stripecount_seq_write_34582 lov_stripecount_seq_write 3 34582 NULL
+init_send_hfcd_34586 init_send_hfcd 1 34586 NULL
+inet6_ifla6_size_34591 inet6_ifla6_size 0 34591 NULL
+ceph_msgpool_init_34599 ceph_msgpool_init 4 34599 NULL nohasharray
+cw1200_queue_init_34599 cw1200_queue_init 4 34599 &ceph_msgpool_init_34599
+brcmf_cfg80211_mgmt_tx_34608 brcmf_cfg80211_mgmt_tx 7 34608 NULL
+__jffs2_ref_totlen_34609 __jffs2_ref_totlen 0 34609 NULL
+apei_get_nvs_resources_34616 apei_get_nvs_resources 0 34616 NULL
+__cfg80211_disconnected_34622 __cfg80211_disconnected 3 34622 NULL
+cnic_alloc_dma_34641 cnic_alloc_dma 3 34641 NULL
+kvm_set_spte_hva_34671 kvm_set_spte_hva 2 34671 NULL
+sleep_auth_write_34676 sleep_auth_write 3 34676 NULL
+isr_fiqs_read_34687 isr_fiqs_read 3 34687 NULL
+batadv_tvlv_realloc_packet_buff_34688 batadv_tvlv_realloc_packet_buff 3-4 34688 NULL
+port_print_34704 port_print 3 34704 NULL
+ieee80211_if_read_num_sta_ps_34722 ieee80211_if_read_num_sta_ps 3 34722 NULL
+platform_list_read_file_34734 platform_list_read_file 3 34734 NULL
+reg_w_ixbuf_34736 reg_w_ixbuf 4 34736 NULL
+lsm_alloc_plain_34755 lsm_alloc_plain 1 34755 NULL
+bootmode_store_34762 bootmode_store 4 34762 NULL
+device_add_34766 device_add 0 34766 NULL
+qib_cdev_init_34778 qib_cdev_init 1 34778 NULL
+SYSC_keyctl_34800 SYSC_keyctl 4 34800 NULL
+can_nocow_extent_34801 can_nocow_extent 2 34801 NULL
+drbd_get_max_capacity_34804 drbd_get_max_capacity 0 34804 NULL
+ll_setxattr_34806 ll_setxattr 4 34806 NULL
+file_page_index_34820 file_page_index 0-2 34820 NULL
+b43_debugfs_write_34838 b43_debugfs_write 3 34838 NULL
+nl_portid_hash_zalloc_34843 nl_portid_hash_zalloc 1 34843 NULL
+acpi_system_write_wakeup_device_34853 acpi_system_write_wakeup_device 3 34853 NULL
+usb_serial_generic_prepare_write_buffer_34857 usb_serial_generic_prepare_write_buffer
3 34857 NULL
+ieee80211_if_read_txpower_34871 ieee80211_if_read_txpower 3 34871 NULL
+msg_print_text_34889 msg_print_text 0 34889 NULL
+ieee80211_if_write_34894 ieee80211_if_write 3 34894 NULL
+si476x_radio_read_rsq_primary_blob_34916 si476x_radio_read_rsq_primary_blob 3 34916
NULL
+__inode_permission_34925 __inode_permission 0 34925 NULL nohasharray
+btrfs_super_chunk_root_34925 btrfs_super_chunk_root 0 34925 &__inode_permission_34925
+ceph_aio_write_34930 ceph_aio_write 4 34930 NULL
+sec_flags2str_34933 sec_flags2str 3 34933 NULL
+snd_info_entry_read_34938 snd_info_entry_read 3 34938 NULL
+i2c_transfer_34958 i2c_transfer 0 34958 NULL
+do_add_page_to_bio_34974 do_add_page_to_bio 2-10 34974 NULL
+rx_rx_hdr_overflow_read_35002 rx_rx_hdr_overflow_read 3 35002 NULL
+l2cap_skbuff_fromiovec_35003 l2cap_skbuff_fromiovec 4-3 35003 NULL
+sisusb_copy_memory_35016 sisusb_copy_memory 4 35016 NULL
+coda_psdev_read_35029 coda_psdev_read 3 35029 NULL
+brcmf_sdio_chip_writenvram_35042 brcmf_sdio_chip_writenvram 4 35042 NULL
+pwr_connection_out_of_sync_read_35061 pwr_connection_out_of_sync_read 3 35061 NULL
+__kfifo_uint_must_check_helper_35097 __kfifo_uint_must_check_helper 0-1 35097 NULL
+capi_write_35104 capi_write 3 35104 NULL nohasharray
+tx_tx_done_template_read_35104 tx_tx_done_template_read 3 35104 &capi_write_35104
+ide_settings_proc_write_35110 ide_settings_proc_write 3 35110 NULL
+ceph_osdc_start_request_35122 ceph_osdc_start_request 0 35122 NULL
+message_stats_print_35158 message_stats_print 6 35158 NULL
+iscsi_conn_setup_35159 iscsi_conn_setup 2 35159 NULL
+ieee80211_if_read_bssid_35161 ieee80211_if_read_bssid 3 35161 NULL
+unix_stream_recvmsg_35210 unix_stream_recvmsg 4 35210 NULL
+security_key_getsecurity_35218 security_key_getsecurity 0 35218 NULL nohasharray
+striped_read_35218 striped_read 0-2 35218 &security_key_getsecurity_35218
+rx_rx_cmplt_task_read_35226 rx_rx_cmplt_task_read 3 35226 NULL
+set_fd_set_35249 set_fd_set 1 35249 NULL
+ioapic_setup_resources_35255 ioapic_setup_resources 1 35255 NULL
+jbd2_journal_get_write_access_35263 jbd2_journal_get_write_access 0 35263 NULL
+dis_disc_write_35265 dis_disc_write 3 35265 NULL
+dma_show_regs_35266 dma_show_regs 3 35266 NULL
+irda_recvmsg_stream_35280 irda_recvmsg_stream 4 35280 NULL
+i2o_block_end_request_35282 i2o_block_end_request 3 35282 NULL
+isr_rx_rdys_read_35283 isr_rx_rdys_read 3 35283 NULL
+__btrfs_buffered_write_35311 __btrfs_buffered_write 3 35311 NULL nohasharray
+brcmf_sdio_forensic_read_35311 brcmf_sdio_forensic_read 3 35311 &__btrfs_buffered_write_35311
+tracing_read_pipe_35312 tracing_read_pipe 3 35312 NULL
+ieee80211_if_fmt_ap_power_level_35347 ieee80211_if_fmt_ap_power_level 3 35347 NULL
+nouveau_devinit_create__35348 nouveau_devinit_create_ 4 35348 NULL
+ieee80211_rx_mgmt_deauth_35351 ieee80211_rx_mgmt_deauth 3 35351 NULL
+compat_filldir64_35354 compat_filldir64 3 35354 NULL
+read_kmem_35372 read_kmem 3 35372 NULL
+SyS_getxattr_35408 SyS_getxattr 4 35408 NULL
+rawv6_send_hdrinc_35425 rawv6_send_hdrinc 3 35425 NULL
+buffer_to_user_35439 buffer_to_user 3 35439 NULL
+fiemap_prepare_and_copy_exts_35494 fiemap_prepare_and_copy_exts 5 35494 NULL
+btrfs_prealloc_file_range_trans_35500 btrfs_prealloc_file_range_trans 4 35500 NULL
+async_setkey_35521 async_setkey 3 35521 NULL
+__filemap_fdatawrite_range_35528 __filemap_fdatawrite_range 0 35528 NULL
+iwl_dbgfs_bt_traffic_read_35534 iwl_dbgfs_bt_traffic_read 3 35534 NULL
+pstore_mkfile_35536 pstore_mkfile 7 35536 NULL
+rxpipe_tx_xfr_host_int_trig_rx_data_read_35538 rxpipe_tx_xfr_host_int_trig_rx_data_read
3 35538 NULL
+ibnl_put_attr_35541 ibnl_put_attr 3 35541 NULL
+ieee80211_if_write_smps_35550 ieee80211_if_write_smps 3 35550 NULL
+sysfs_create_subdir_35567 sysfs_create_subdir 0 35567 NULL
+ext4_blocks_for_truncate_35579 ext4_blocks_for_truncate 0 35579 NULL
+ext2_acl_from_disk_35580 ext2_acl_from_disk 2 35580 NULL
+spk_msg_set_35586 spk_msg_set 3 35586 NULL
+kernel_readv_35617 kernel_readv 3 35617 NULL
+reiserfs_readpages_35629 reiserfs_readpages 4 35629 NULL
+pci_request_regions_35635 pci_request_regions 0 35635 NULL
+ptlrpcd_steal_rqset_35637 ptlrpcd_steal_rqset 0 35637 NULL
+spi_register_board_info_35651 spi_register_board_info 2 35651 NULL
+rdmaltWithLock_35669 rdmaltWithLock 0 35669 NULL
+compat_sys_kexec_load_35674 compat_sys_kexec_load 2 35674 NULL
+SYSC_pwritev_35690 SYSC_pwritev 3 35690 NULL
+rds_page_copy_user_35691 rds_page_copy_user 4 35691 NULL
+md_super_write_35703 md_super_write 4 35703 NULL
+ocfs2_extent_recs_per_gd_35710 ocfs2_extent_recs_per_gd 0 35710 NULL
+iwl_dbgfs_disable_ht40_read_35761 iwl_dbgfs_disable_ht40_read 3 35761 NULL
+udf_alloc_i_data_35786 udf_alloc_i_data 2 35786 NULL
+pvr2_hdw_cpufw_get_35824 pvr2_hdw_cpufw_get 0-4-2 35824 NULL
+tx_tx_cmplt_read_35854 tx_tx_cmplt_read 3 35854 NULL
+vx_query_hbuffer_size_35859 vx_query_hbuffer_size 0 35859 NULL
+mthca_buf_alloc_35861 mthca_buf_alloc 2 35861 NULL
+fls64_35862 fls64 0 35862 NULL
+kvm_dirty_bitmap_bytes_35886 kvm_dirty_bitmap_bytes 0 35886 NULL
+ieee80211_if_fmt_dot11MeshRetryTimeout_35890 ieee80211_if_fmt_dot11MeshRetryTimeout
3 35890 NULL
+uwb_rc_cmd_done_35892 uwb_rc_cmd_done 4 35892 NULL
+SyS_set_mempolicy_35909 SyS_set_mempolicy 3 35909 NULL
+kernel_setsockopt_35913 kernel_setsockopt 5 35913 NULL
+rbio_nr_pages_35916 rbio_nr_pages 0-1-2 35916 NULL
+sctp_tsnmap_mark_35929 sctp_tsnmap_mark 2 35929 NULL
+rx_defrag_init_called_read_35935 rx_defrag_init_called_read 3 35935 NULL
+put_cmsg_compat_35937 put_cmsg_compat 4 35937 NULL
+ext_rts51x_sd_execute_write_data_35971 ext_rts51x_sd_execute_write_data 9 35971 NULL
+ceph_buffer_new_35974 ceph_buffer_new 1 35974 NULL nohasharray
+generic_ocp_read_35974 generic_ocp_read 3 35974 &ceph_buffer_new_35974
+acl_alloc_35979 acl_alloc 1 35979 NULL
+device_add_class_symlinks_35985 device_add_class_symlinks 0 35985 NULL
+generic_file_aio_read_35987 generic_file_aio_read 0 35987 NULL
+write_file_antenna_35998 write_file_antenna 3 35998 NULL nohasharray
+kuc_alloc_35998 kuc_alloc 1 35998 &write_file_antenna_35998
+il3945_ucode_tx_stats_read_36016 il3945_ucode_tx_stats_read 3 36016 NULL
+__videobuf_alloc_36031 __videobuf_alloc 1 36031 NULL
+account_shadowed_36048 account_shadowed 2 36048 NULL
+gpio_power_read_36059 gpio_power_read 3 36059 NULL
+write_emulate_36065 write_emulate 2-4 36065 NULL
+stack_max_size_write_36068 stack_max_size_write 3 36068 NULL
+radeon_vm_num_pdes_36070 radeon_vm_num_pdes 0 36070 NULL
+ieee80211_if_fmt_peer_36071 ieee80211_if_fmt_peer 3 36071 NULL
+ieee80211_if_write_tsf_36077 ieee80211_if_write_tsf 3 36077 NULL
+snd_pcm_plug_read_transfer_36080 snd_pcm_plug_read_transfer 0-3 36080 NULL
+mtip_hw_read_device_status_36082 mtip_hw_read_device_status 3 36082 NULL
+vga_arb_write_36112 vga_arb_write 3 36112 NULL
+simple_xattr_alloc_36118 simple_xattr_alloc 2 36118 NULL
+ext3_readpages_36144 ext3_readpages 4 36144 NULL
+twl_set_36154 twl_set 2 36154 NULL
+b1_alloc_card_36155 b1_alloc_card 1 36155 NULL
+btrfs_file_extent_inline_len_36158 btrfs_file_extent_inline_len 0 36158 NULL
+snd_korg1212_copy_from_36169 snd_korg1212_copy_from 6 36169 NULL
+SyS_kexec_load_36176 SyS_kexec_load 2 36176 NULL
+ramoops_init_przs_36199 ramoops_init_przs 4 36199 NULL
+SYSC_sched_getaffinity_36208 SYSC_sched_getaffinity 2 36208 NULL
+SYSC_process_vm_readv_36216 SYSC_process_vm_readv 3-5 36216 NULL
+atomic_stats_read_36228 atomic_stats_read 3 36228 NULL
+viafb_iga1_odev_proc_write_36241 viafb_iga1_odev_proc_write 3 36241 NULL
+SYSC_getxattr_36242 SYSC_getxattr 4 36242 NULL
+rproc_recovery_read_36245 rproc_recovery_read 3 36245 NULL
+scrub_stripe_36248 scrub_stripe 5-4 36248 NULL
+compat_sys_mbind_36256 compat_sys_mbind 5 36256 NULL
+usb_buffer_alloc_36276 usb_buffer_alloc 2 36276 NULL nohasharray
+cfs_hash_buckets_realloc_36276 cfs_hash_buckets_realloc 4 36276 &usb_buffer_alloc_36276
+codec_reg_read_file_36280 codec_reg_read_file 3 36280 NULL
+crypto_shash_digestsize_36284 crypto_shash_digestsize 0 36284 NULL
+nouveau_cli_create_36293 nouveau_cli_create 3 36293 NULL
+lpfc_debugfs_dif_err_read_36303 lpfc_debugfs_dif_err_read 3 36303 NULL
+cfg80211_rx_mlme_mgmt_36306 cfg80211_rx_mlme_mgmt 3 36306 NULL
+ad7879_spi_xfer_36311 ad7879_spi_xfer 3 36311 NULL
+fat_compat_ioctl_filldir_36328 fat_compat_ioctl_filldir 3 36328 NULL
+lc_create_36332 lc_create 4 36332 NULL
+jbd2_journal_init_revoke_table_36336 jbd2_journal_init_revoke_table 1 36336 NULL
+isku_sysfs_read_key_mask_36343 isku_sysfs_read_key_mask 6 36343 NULL
+ath6kl_regwrite_write_36351 ath6kl_regwrite_write 3 36351 NULL
+v9fs_file_readn_36353 v9fs_file_readn 4 36353 NULL
+to_sector_36361 to_sector 0-1 36361 NULL
+tunables_read_36385 tunables_read 3 36385 NULL
+afs_alloc_flat_call_36399 afs_alloc_flat_call 2-3 36399 NULL
+sierra_write_36402 sierra_write 4 36402 NULL
+iwl_dbgfs_d3_sram_write_36403 iwl_dbgfs_d3_sram_write 3 36403 NULL
+SyS_sethostname_36417 SyS_sethostname 2 36417 NULL
+ReadW6692B_36445 ReadW6692B 0 36445 NULL
+sctp_tsnmap_init_36446 sctp_tsnmap_init 2 36446 NULL
+alloc_etherdev_mqs_36450 alloc_etherdev_mqs 1 36450 NULL
+SyS_process_vm_writev_36476 SyS_process_vm_writev 3-5 36476 NULL
+b43_nphy_load_samples_36481 b43_nphy_load_samples 3 36481 NULL
+tx_tx_checksum_result_read_36490 tx_tx_checksum_result_read 3 36490 NULL nohasharray
+ip6_append_data_36490 ip6_append_data 4 36490 &tx_tx_checksum_result_read_36490
+cmd_loop_36491 cmd_loop 0 36491 NULL
+__hwahc_op_set_ptk_36510 __hwahc_op_set_ptk 5 36510 NULL
+mcam_v4l_read_36513 mcam_v4l_read 3 36513 NULL
+get_param_l_36518 get_param_l 0 36518 NULL
+ieee80211_if_read_fwded_frames_36520 ieee80211_if_read_fwded_frames 3 36520 NULL
+crypto_aead_authsize_36537 crypto_aead_authsize 0 36537 NULL
+cpu_type_read_36540 cpu_type_read 3 36540 NULL
+__kfifo_to_user_36555 __kfifo_to_user 3-0 36555 NULL nohasharray
+macvtap_do_read_36555 macvtap_do_read 4 36555 &__kfifo_to_user_36555
+btrfs_get_token_64_36572 btrfs_get_token_64 0 36572 NULL
+__erst_read_36579 __erst_read 0 36579 NULL
+put_cmsg_36589 put_cmsg 4 36589 NULL
+fat_ioctl_filldir_36621 fat_ioctl_filldir 3 36621 NULL
+vxge_config_vpaths_36636 vxge_config_vpaths 0 36636 NULL
+convert_extent_item_v0_36645 convert_extent_item_v0 4 36645 NULL
+ced_ioctl_36647 ced_ioctl 2 36647 NULL
+lpfc_idiag_extacc_alloc_get_36648 lpfc_idiag_extacc_alloc_get 0-3 36648 NULL
+osd_req_list_collection_objects_36664 osd_req_list_collection_objects 5 36664 NULL
+iscsi_host_alloc_36671 iscsi_host_alloc 2 36671 NULL
+xillybus_read_36678 xillybus_read 3 36678 NULL
+gsmtty_write_36702 gsmtty_write 3 36702 NULL
+snd_rawmidi_kernel_read1_36740 snd_rawmidi_kernel_read1 4-0 36740 NULL
+cxgbi_device_register_36746 cxgbi_device_register 1-2 36746 NULL
+ps_poll_upsd_timeouts_read_36755 ps_poll_upsd_timeouts_read 3 36755 NULL
+ptp_filter_init_36780 ptp_filter_init 2 36780 NULL
+i40e_init_lan_hmc_36796 i40e_init_lan_hmc 2-3-4-5 36796 NULL
+proc_fault_inject_read_36802 proc_fault_inject_read 3 36802 NULL
+hiddev_ioctl_36816 hiddev_ioctl 2 36816 NULL
+int_hardware_entry_36833 int_hardware_entry 3 36833 NULL
+fc_change_queue_depth_36841 fc_change_queue_depth 2 36841 NULL
+keyctl_describe_key_36853 keyctl_describe_key 3 36853 NULL
+cm_write_36858 cm_write 3 36858 NULL
+tx_tx_data_programmed_read_36871 tx_tx_data_programmed_read 3 36871 NULL
+svc_setsockopt_36876 svc_setsockopt 5 36876 NULL
+raid56_parity_write_36877 raid56_parity_write 5 36877 NULL
+__btrfs_map_block_36883 __btrfs_map_block 3 36883 NULL
+ib_ucm_alloc_data_36885 ib_ucm_alloc_data 3 36885 NULL
+selinux_inode_notifysecctx_36896 selinux_inode_notifysecctx 3 36896 NULL
+OS_kmalloc_36909 OS_kmalloc 1 36909 NULL
+audio_set_endpoint_req_36918 audio_set_endpoint_req 0 36918 NULL
+crypto_blkcipher_ivsize_36944 crypto_blkcipher_ivsize 0 36944 NULL
+il4965_rs_sta_dbgfs_scale_table_write_36979 il4965_rs_sta_dbgfs_scale_table_write 3
36979 NULL
+drbd_new_dev_size_36998 drbd_new_dev_size 0-3 36998 NULL
+auok190xfb_write_37001 auok190xfb_write 3 37001 NULL
+setxattr_37006 setxattr 4 37006 NULL
+ocfs2_dlm_unlock_37037 ocfs2_dlm_unlock 0 37037 NULL
+command_file_read_37038 command_file_read 3 37038 NULL
+figure_loop_size_37051 figure_loop_size 2-3 37051 NULL
+ieee80211_if_read_drop_unencrypted_37053 ieee80211_if_read_drop_unencrypted 3 37053
NULL nohasharray
+qp_broker_create_37053 qp_broker_create 6-5 37053 &ieee80211_if_read_drop_unencrypted_37053
+SYSC_setxattr_37078 SYSC_setxattr 4 37078 NULL
+parse_command_37079 parse_command 2 37079 NULL
+pipeline_cs_rx_packet_in_read_37089 pipeline_cs_rx_packet_in_read 3 37089 NULL
+tun_get_user_37094 tun_get_user 5 37094 NULL
+has_wrprotected_page_37123 has_wrprotected_page 3-2 37123 NULL
+snd_hda_get_conn_list_37132 snd_hda_get_conn_list 0 37132 NULL
+mtt_free_res_37144 mtt_free_res 5 37144 NULL
+msg_word_37164 msg_word 0 37164 NULL
+f2fs_direct_IO_37167 f2fs_direct_IO 4 37167 NULL
+can_set_xattr_37182 can_set_xattr 4 37182 NULL
+vcc_recvmsg_37198 vcc_recvmsg 4 37198 NULL
+sysfs_add_file_37200 sysfs_add_file 0 37200 NULL
+forced_ps_write_37209 forced_ps_write 3 37209 NULL
+crypto_shash_descsize_37212 crypto_shash_descsize 0 37212 NULL nohasharray
+ext4_ind_direct_IO_37212 ext4_ind_direct_IO 0-4 37212 &crypto_shash_descsize_37212
+bchannel_get_rxbuf_37213 bchannel_get_rxbuf 2-0 37213 NULL
+regmap_access_read_file_37223 regmap_access_read_file 3 37223 NULL
+__do_replace_37227 __do_replace 5 37227 NULL
+iwl_dbgfs_d3_sram_read_37237 iwl_dbgfs_d3_sram_read 3 37237 NULL
+rx_filter_dup_filter_read_37238 rx_filter_dup_filter_read 3 37238 NULL
+exofs_max_io_pages_37263 exofs_max_io_pages 0-2 37263 NULL
+ieee80211_if_read_power_mode_37305 ieee80211_if_read_power_mode 3 37305 NULL
+ext3_direct_IO_37308 ext3_direct_IO 4 37308 NULL
+ocfs2_calc_extend_credits_37310 ocfs2_calc_extend_credits 0 37310 NULL
+jffs2_write_dirent_37311 jffs2_write_dirent 5 37311 NULL
+send_msg_37323 send_msg 4 37323 NULL
+l2cap_create_connless_pdu_37327 l2cap_create_connless_pdu 3 37327 NULL nohasharray
+bnx2x_vf_fill_fw_str_37327 bnx2x_vf_fill_fw_str 3 37327 &l2cap_create_connless_pdu_37327
+scsi_mode_select_37330 scsi_mode_select 6 37330 NULL
+rxrpc_server_sendmsg_37331 rxrpc_server_sendmsg 4 37331 NULL
+security_inode_getsecurity_37354 security_inode_getsecurity 0 37354 NULL
+hci_sock_sendmsg_37420 hci_sock_sendmsg 4 37420 NULL
+acpi_os_allocate_zeroed_37422 acpi_os_allocate_zeroed 1 37422 NULL
+tty_insert_flip_string_fixed_flag_37428 tty_insert_flip_string_fixed_flag 4-0 37428
NULL
+iwl_print_last_event_logs_37433 iwl_print_last_event_logs 7-9-0 37433 NULL
+fru_alloc_37442 fru_alloc 1 37442 NULL
+tcp_established_options_37450 tcp_established_options 0 37450 NULL nohasharray
+tipc_send2port_37450 tipc_send2port 4 37450 &tcp_established_options_37450
+brcmf_sdio_dump_console_37455 brcmf_sdio_dump_console 4 37455 NULL
+get_est_timing_37484 get_est_timing 0 37484 NULL
+kmem_realloc_37489 kmem_realloc 2 37489 NULL
+__hfsplus_setxattr_37499 __hfsplus_setxattr 4 37499 NULL
+bitmap_dirty_bits_37503 bitmap_dirty_bits 2 37503 NULL
+osc_active_seq_write_37514 osc_active_seq_write 3 37514 NULL
+bdev_writeseg_37519 bdev_writeseg 2-3 37519 NULL
+xz_dec_test_write_37527 xz_dec_test_write 3 37527 NULL
+fault_inject_read_37534 fault_inject_read 3 37534 NULL
+hdr_size_37536 hdr_size 0 37536 NULL
+extent_map_end_37550 extent_map_end 0 37550 NULL
+sep_create_dcb_dmatables_context_37551 sep_create_dcb_dmatables_context 6 37551 NULL
+ioat_chansts_37558 ioat_chansts 0 37558 NULL
+xhci_alloc_streams_37586 xhci_alloc_streams 5 37586 NULL
+qla2x00_debounce_register_37597 qla2x00_debounce_register 0 37597 NULL
+kvm_read_guest_page_mmu_37611 kvm_read_guest_page_mmu 6 37611 NULL
+SYSC_mbind_37622 SYSC_mbind 5 37622 NULL
+SyS_mbind_37638 SyS_mbind 5 37638 NULL
+bio_copy_user_iov_37660 bio_copy_user_iov 4 37660 NULL
+rfcomm_sock_sendmsg_37661 rfcomm_sock_sendmsg 4 37661 NULL nohasharray
+vmw_framebuffer_dmabuf_dirty_37661 vmw_framebuffer_dmabuf_dirty 6 37661 &rfcomm_sock_sendmsg_37661
+SYSC_get_mempolicy_37664 SYSC_get_mempolicy 3 37664 NULL
+__wa_seg_calculate_isoc_frame_count_37672 __wa_seg_calculate_isoc_frame_count 0 37672
NULL
+ieee80211_if_read_rc_rateidx_mcs_mask_2ghz_37675 ieee80211_if_read_rc_rateidx_mcs_mask_2ghz
3 37675 NULL
+regmap_map_read_file_37685 regmap_map_read_file 3 37685 NULL
+page_chain_free_37697 page_chain_free 0 37697 NULL
+nametbl_header_37698 nametbl_header 2-0 37698 NULL
+__le32_to_cpup_37702 __le32_to_cpup 0 37702 NULL
+dynamic_ps_timeout_write_37713 dynamic_ps_timeout_write 3 37713 NULL
+read_enabled_file_bool_37744 read_enabled_file_bool 3 37744 NULL
+ocfs2_control_cfu_37750 ocfs2_control_cfu 2 37750 NULL
+ipath_cdev_init_37752 ipath_cdev_init 1 37752 NULL
+dccp_setsockopt_cscov_37766 dccp_setsockopt_cscov 2 37766 NULL
+il4965_rs_sta_dbgfs_rate_scale_data_read_37792 il4965_rs_sta_dbgfs_rate_scale_data_read
3 37792 NULL
+smk_read_logging_37804 smk_read_logging 3 37804 NULL
+ocrdma_alloc_frmr_page_list_37815 ocrdma_alloc_frmr_page_list 2 37815 NULL
+rx_decrypt_key_not_found_read_37820 rx_decrypt_key_not_found_read 3 37820 NULL
+android_get_p2p_addr_37832 android_get_p2p_addr 0 37832 NULL
+jbd2_journal_get_undo_access_37837 jbd2_journal_get_undo_access 0 37837 NULL
+o2hb_debug_read_37851 o2hb_debug_read 3 37851 NULL
+xfs_dir2_block_to_sf_37868 xfs_dir2_block_to_sf 3 37868 NULL
+set_registers_37883 set_registers 4 37883 NULL
+btrfs_stack_file_extent_disk_bytenr_37888 btrfs_stack_file_extent_disk_bytenr 0 37888
NULL
+pkt_alloc_packet_data_37928 pkt_alloc_packet_data 1 37928 NULL nohasharray
+_rtw_malloc_37928 _rtw_malloc 1 37928 &pkt_alloc_packet_data_37928
+read_rbu_packet_size_37939 read_rbu_packet_size 6 37939 NULL
+write_file_bool_37957 write_file_bool 3 37957 NULL
+fifo_alloc_37961 fifo_alloc 1 37961 NULL
+rds_rdma_extra_size_37990 rds_rdma_extra_size 0 37990 NULL
+persistent_ram_old_size_37997 persistent_ram_old_size 0 37997 NULL
+vfs_readv_38011 vfs_readv 3 38011 NULL
+aggr_recv_addba_req_evt_38037 aggr_recv_addba_req_evt 4 38037 NULL
+il_dbgfs_chain_noise_read_38044 il_dbgfs_chain_noise_read 3 38044 NULL nohasharray
+klsi_105_prepare_write_buffer_38044 klsi_105_prepare_write_buffer 3 38044 &il_dbgfs_chain_noise_read_38044
+SyS_llistxattr_38048 SyS_llistxattr 3 38048 NULL
+sysfs_do_create_link_38051 sysfs_do_create_link 0 38051 NULL
+_xfs_buf_alloc_38058 _xfs_buf_alloc 3 38058 NULL
+nsm_create_handle_38060 nsm_create_handle 4 38060 NULL
+alloc_ltalkdev_38071 alloc_ltalkdev 1 38071 NULL
+xfs_buf_readahead_map_38081 xfs_buf_readahead_map 3 38081 NULL nohasharray
+wcn36xx_smd_rsp_process_38081 wcn36xx_smd_rsp_process 3 38081 &xfs_buf_readahead_map_38081
+uwb_mac_addr_print_38085 uwb_mac_addr_print 2 38085 NULL
+request_key_auth_new_38092 request_key_auth_new 3 38092 NULL
+proc_self_readlink_38094 proc_self_readlink 3 38094 NULL
+ep0_read_38095 ep0_read 3 38095 NULL
+sk_wmem_schedule_38096 sk_wmem_schedule 2 38096 NULL nohasharray
+osc_checksum_seq_write_38096 osc_checksum_seq_write 3 38096 &sk_wmem_schedule_38096
+o2hb_read_slots_38105 o2hb_read_slots 2 38105 NULL
+snd_pcm_oss_write_38108 snd_pcm_oss_write 3 38108 NULL
+vmw_kms_present_38130 vmw_kms_present 9 38130 NULL
+__ntfs_copy_from_user_iovec_inatomic_38153 __ntfs_copy_from_user_iovec_inatomic 0-4-3
38153 NULL
+btrfs_extent_same_38163 btrfs_extent_same 3-2 38163 NULL
+kvm_clear_guest_38164 kvm_clear_guest 3-2 38164 NULL
+cdev_add_38176 cdev_add 2-3 38176 NULL
+rt2x00debug_write_rf_38195 rt2x00debug_write_rf 3 38195 NULL
+get_ucode_user_38202 get_ucode_user 3 38202 NULL
+osd_req_list_partition_collections_38223 osd_req_list_partition_collections 5 38223
NULL
+ceph_decode_16_38239 ceph_decode_16 0 38239 NULL
+_ipw_read_reg32_38245 _ipw_read_reg32 0 38245 NULL
+mthca_alloc_icm_table_38268 mthca_alloc_icm_table 4-3 38268 NULL nohasharray
+ieee80211_if_read_auto_open_plinks_38268 ieee80211_if_read_auto_open_plinks 3 38268
&mthca_alloc_icm_table_38268 nohasharray
+SYSC_msgrcv_38268 SYSC_msgrcv 3 38268 &ieee80211_if_read_auto_open_plinks_38268
+xfs_bmbt_to_bmdr_38275 xfs_bmbt_to_bmdr 3 38275 NULL nohasharray
+xfs_bmdr_to_bmbt_38275 xfs_bmdr_to_bmbt 5 38275 &xfs_bmbt_to_bmdr_38275
+ftdi_process_packet_38281 ftdi_process_packet 4 38281 NULL
+ucma_query_path_38305 ucma_query_path 3 38305 NULL
+isr_rx_headers_read_38325 isr_rx_headers_read 3 38325 NULL
+ida_simple_get_38326 ida_simple_get 0 38326 NULL
+__snd_gf1_look8_38333 __snd_gf1_look8 0 38333 NULL
+btrfs_file_extent_disk_num_bytes_38363 btrfs_file_extent_disk_num_bytes 0 38363 NULL
+xfs_free_file_space_38383 xfs_free_file_space 2-3 38383 NULL
+dn_sendmsg_38390 dn_sendmsg 4 38390 NULL
+ieee80211_if_read_dtim_count_38419 ieee80211_if_read_dtim_count 3 38419 NULL
+pmcraid_copy_sglist_38431 pmcraid_copy_sglist 3 38431 NULL
+kvm_write_guest_38454 kvm_write_guest 4-2 38454 NULL
+blk_end_bidi_request_38482 blk_end_bidi_request 3-4 38482 NULL
+dev_names_read_38509 dev_names_read 3 38509 NULL
+iscsi_create_iface_38510 iscsi_create_iface 5 38510 NULL
+event_rx_mismatch_read_38518 event_rx_mismatch_read 3 38518 NULL
+ubifs_idx_node_sz_38546 ubifs_idx_node_sz 0-2 38546 NULL
+btrfs_discard_extent_38547 btrfs_discard_extent 2 38547 NULL
+kuc_len_38557 kuc_len 0-1 38557 NULL
+irda_sendmsg_dgram_38563 irda_sendmsg_dgram 4 38563 NULL
+il4965_rs_sta_dbgfs_scale_table_read_38564 il4965_rs_sta_dbgfs_scale_table_read 3 38564
NULL
+_ipw_read32_38565 _ipw_read32 0 38565 NULL
+snd_nm256_playback_copy_38567 snd_nm256_playback_copy 5-3 38567 NULL
+copy_ctl_value_to_user_38587 copy_ctl_value_to_user 4 38587 NULL
+icn_writecmd_38629 icn_writecmd 2 38629 NULL
+write_enabled_file_bool_38630 write_enabled_file_bool 3 38630 NULL
+ext2_readpages_38640 ext2_readpages 4 38640 NULL
+audit_init_entry_38644 audit_init_entry 1 38644 NULL
+qp_broker_alloc_38646 qp_broker_alloc 6-5 38646 NULL
+mmc_send_cxd_data_38655 mmc_send_cxd_data 5 38655 NULL
+nouveau_instmem_create__38664 nouveau_instmem_create_ 4 38664 NULL
+snd_es1371_wait_src_ready_38673 snd_es1371_wait_src_ready 0 38673 NULL
+iscsit_dump_data_payload_38683 iscsit_dump_data_payload 2 38683 NULL
+rbio_add_io_page_38700 rbio_add_io_page 6 38700 NULL
+alloc_trace_probe_38720 alloc_trace_probe 6 38720 NULL
+w83977af_sir_interrupt_38738 w83977af_sir_interrupt 0 38738 NULL
+udf_readpages_38761 udf_readpages 4 38761 NULL
+iwl_dbgfs_thermal_throttling_read_38779 iwl_dbgfs_thermal_throttling_read 3 38779 NULL
+bcache_device_init_38781 bcache_device_init 3 38781 NULL
+snd_gus_dram_write_38784 snd_gus_dram_write 4 38784 NULL
+slab_order_38794 slab_order 0 38794 NULL
+do_pci_enable_device_38802 do_pci_enable_device 0 38802 NULL
+err_decode_38804 err_decode 2 38804 NULL
+ipv6_renew_option_38813 ipv6_renew_option 3 38813 NULL
+direct_entry_38836 direct_entry 3 38836 NULL
+compat_udp_setsockopt_38840 compat_udp_setsockopt 5 38840 NULL
+read_nic_io_word_38853 read_nic_io_word 0 38853 NULL
+interfaces_38859 interfaces 2 38859 NULL
+pci_msix_table_size_38867 pci_msix_table_size 0 38867 NULL
+dbgfs_state_38894 dbgfs_state 3 38894 NULL
+f2fs_xattr_set_acl_38895 f2fs_xattr_set_acl 4 38895 NULL
+il_dbgfs_sram_write_38942 il_dbgfs_sram_write 3 38942 NULL
+__ath6kl_wmi_send_mgmt_cmd_38971 __ath6kl_wmi_send_mgmt_cmd 7 38971 NULL
+usb_maxpacket_38977 usb_maxpacket 0 38977 NULL nohasharray
+C_SYSC_preadv64_38977 C_SYSC_preadv64 3 38977 &usb_maxpacket_38977
+OSDSetBlock_38986 OSDSetBlock 2-4 38986 NULL
+lpfc_idiag_extacc_write_38998 lpfc_idiag_extacc_write 3 38998 NULL
+get_nodes_39012 get_nodes 3 39012 NULL
+twl6030_interrupt_unmask_39013 twl6030_interrupt_unmask 2 39013 NULL
+__blkdev_issue_zeroout_39020 __blkdev_issue_zeroout 3 39020 NULL
+_zd_iowrite32v_async_locked_39034 _zd_iowrite32v_async_locked 3 39034 NULL
+do_write_kmem_39051 do_write_kmem 0-1-3 39051 NULL
+ReadHFC_39104 ReadHFC 0 39104 NULL
+tomoyo_truncate_39105 tomoyo_truncate 0 39105 NULL
+__kfifo_to_user_r_39123 __kfifo_to_user_r 5-3 39123 NULL
+ea_foreach_39133 ea_foreach 0 39133 NULL
+generic_permission_39150 generic_permission 0 39150 NULL
+proc_coredump_filter_read_39153 proc_coredump_filter_read 3 39153 NULL
+ath9k_hw_ar9003_dump_eeprom_39156 ath9k_hw_ar9003_dump_eeprom 5-4 39156 NULL
+echo_client_kbrw_39170 echo_client_kbrw 6 39170 NULL
+ext3_xattr_check_names_39174 ext3_xattr_check_names 0 39174 NULL
+ubi_more_update_data_39189 ubi_more_update_data 4 39189 NULL
+qcam_read_bytes_39205 qcam_read_bytes 0 39205 NULL
+ivtv_v4l2_write_39226 ivtv_v4l2_write 3 39226 NULL
+posix_acl_to_xattr_39237 posix_acl_to_xattr 0 39237 NULL
+snd_pcm_capture_forward_39248 snd_pcm_capture_forward 2 39248 NULL
+r128_compat_ioctl_39250 r128_compat_ioctl 2 39250 NULL nohasharray
+pwr_cont_miss_bcns_spread_read_39250 pwr_cont_miss_bcns_spread_read 3 39250 &r128_compat_ioctl_39250
+i915_error_state_read_39254 i915_error_state_read 3 39254 NULL
+rx_filter_protection_filter_read_39282 rx_filter_protection_filter_read 3 39282 NULL
+__cfg80211_connect_result_39326 __cfg80211_connect_result 4-6 39326 NULL
+insert_reserved_file_extent_39327 insert_reserved_file_extent 3 39327 NULL
+wimax_msg_alloc_39343 wimax_msg_alloc 4 39343 NULL
+ide_complete_rq_39354 ide_complete_rq 3 39354 NULL
+do_write_log_from_user_39362 do_write_log_from_user 3-0 39362 NULL
+vortex_wtdma_getlinearpos_39371 vortex_wtdma_getlinearpos 0 39371 NULL
+regmap_name_read_file_39379 regmap_name_read_file 3 39379 NULL
+fnic_trace_debugfs_read_39380 fnic_trace_debugfs_read 3 39380 NULL
+ps_poll_ps_poll_utilization_read_39383 ps_poll_ps_poll_utilization_read 3 39383 NULL
+__send_to_port_39386 __send_to_port 3 39386 NULL
+user_power_read_39414 user_power_read 3 39414 NULL
+alloc_agpphysmem_i8xx_39427 alloc_agpphysmem_i8xx 1 39427 NULL
+mic_desc_size_39464 mic_desc_size 0 39464 NULL
+apei_resources_add_39470 apei_resources_add 0 39470 NULL
+setkey_unaligned_39474 setkey_unaligned 3 39474 NULL
+ieee80211_if_fmt_dot11MeshHWMPmaxPREQretries_39499 ieee80211_if_fmt_dot11MeshHWMPmaxPREQretries
3 39499 NULL
+cl_req_alloc_39523 cl_req_alloc 4 39523 NULL
+int_proc_write_39542 int_proc_write 3 39542 NULL
+mdc_unpack_capa_39553 mdc_unpack_capa 0 39553 NULL
+pp_write_39554 pp_write 3 39554 NULL
+datablob_format_39571 datablob_format 2 39571 NULL nohasharray
+ieee80211_if_read_fwded_mcast_39571 ieee80211_if_read_fwded_mcast 3 39571 &datablob_format_39571
+ext_depth_39607 ext_depth 0 39607 NULL
+batadv_tt_tvlv_generate_39615 batadv_tt_tvlv_generate 4 39615 NULL
+nfs_idmap_get_key_39616 nfs_idmap_get_key 2 39616 NULL
+sdio_readb_39618 sdio_readb 0 39618 NULL
+set_dev_class_39645 set_dev_class 4 39645 NULL
+snd_rme32_capture_copy_39653 snd_rme32_capture_copy 5 39653 NULL
+tcp_try_rmem_schedule_39657 tcp_try_rmem_schedule 3 39657 NULL
+kvm_read_guest_cached_39666 kvm_read_guest_cached 4 39666 NULL
+v4l_stk_read_39672 v4l_stk_read 3 39672 NULL
+hsc_msg_len_get_39673 hsc_msg_len_get 0 39673 NULL
+do_surface_dirty_sou_39678 do_surface_dirty_sou 7 39678 NULL
+sd_completed_bytes_39705 sd_completed_bytes 0 39705 NULL
+ftrace_pid_write_39710 ftrace_pid_write 3 39710 NULL
+adt7316_spi_multi_read_39765 adt7316_spi_multi_read 3 39765 NULL
+security_inode_listsecurity_39812 security_inode_listsecurity 0 39812 NULL
+snd_pcm_oss_writev3_39818 snd_pcm_oss_writev3 3 39818 NULL
+get_priv_size_39828 get_priv_size 0-1 39828 NULL
+pkt_add_39897 pkt_add 3 39897 NULL
+read_file_modal_eeprom_39909 read_file_modal_eeprom 3 39909 NULL
+gen_pool_add_virt_39913 gen_pool_add_virt 4 39913 NULL
+dw210x_op_rw_39915 dw210x_op_rw 6 39915 NULL
+aes_encrypt_interrupt_read_39919 aes_encrypt_interrupt_read 3 39919 NULL
+exofs_read_kern_39921 exofs_read_kern 6 39921 NULL nohasharray
+oom_score_adj_read_39921 oom_score_adj_read 3 39921 &exofs_read_kern_39921
+__spi_async_39932 __spi_async 0 39932 NULL
+__get_order_39935 __get_order 0 39935 NULL
+error_error_frame_read_39947 error_error_frame_read 3 39947 NULL
+tty_prepare_flip_string_39955 tty_prepare_flip_string 3-0 39955 NULL
+lstcon_group_list_39958 lstcon_group_list 2 39958 NULL
+dma_push_rx_39973 dma_push_rx 2 39973 NULL
+broadsheetfb_write_39976 broadsheetfb_write 3 39976 NULL
+mthca_array_init_39987 mthca_array_init 2 39987 NULL
+fw_device_op_read_39990 fw_device_op_read 3 39990 NULL
+server_name2svname_39998 server_name2svname 4 39998 NULL
+xen_hvm_config_40018 xen_hvm_config 2 40018 NULL
+ivtvfb_write_40023 ivtvfb_write 3 40023 NULL
+disc_pwup_write_40027 disc_pwup_write 3 40027 NULL
+ea_foreach_i_40028 ea_foreach_i 0 40028 NULL
+datablob_hmac_append_40038 datablob_hmac_append 3 40038 NULL
+l2cap_create_iframe_pdu_40055 l2cap_create_iframe_pdu 3 40055 NULL nohasharray
+add_tty_40055 add_tty 1 40055 &l2cap_create_iframe_pdu_40055
+atomic_xchg_40070 atomic_xchg 0 40070 NULL
+sctp_setsockopt_delayed_ack_40129 sctp_setsockopt_delayed_ack 3 40129 NULL
+dwc2_max_desc_num_40132 dwc2_max_desc_num 0 40132 NULL
+rx_rx_frame_checksum_read_40140 rx_rx_frame_checksum_read 3 40140 NULL
+ath10k_write_simulate_fw_crash_40143 ath10k_write_simulate_fw_crash 3 40143 NULL
+iwch_alloc_fastreg_pbl_40153 iwch_alloc_fastreg_pbl 2 40153 NULL
+pt_write_40159 pt_write 3 40159 NULL
+scsi_sg_count_40182 scsi_sg_count 0 40182 NULL
+ipr_alloc_ucode_buffer_40199 ipr_alloc_ucode_buffer 1 40199 NULL
+allocate_probes_40204 allocate_probes 1 40204 NULL
+au0828_v4l2_read_40220 au0828_v4l2_read 3 40220 NULL
+compress_file_range_40225 compress_file_range 3-4 40225 NULL
+osst_read_40237 osst_read 3 40237 NULL
+lpage_info_slot_40243 lpage_info_slot 3-1 40243 NULL
+ocfs2_zero_extend_get_range_40248 ocfs2_zero_extend_get_range 4-3 40248 NULL
+ptlrpc_queue_wait_40252 ptlrpc_queue_wait 0 40252 NULL
+rs_sta_dbgfs_scale_table_read_40262 rs_sta_dbgfs_scale_table_read 3 40262 NULL
+ext2_fiemap_40271 ext2_fiemap 4 40271 NULL
+usbnet_read_cmd_40275 usbnet_read_cmd 7 40275 NULL
+rx_xfr_hint_trig_read_40283 rx_xfr_hint_trig_read 3 40283 NULL
+SyS_bind_40303 SyS_bind 3 40303 NULL
+ib_get_mad_data_offset_40336 ib_get_mad_data_offset 0 40336 NULL
+mmio_read_40348 mmio_read 4 40348 NULL
+event_rx_mem_empty_read_40363 event_rx_mem_empty_read 3 40363 NULL
+ocfs2_check_range_for_refcount_40365 ocfs2_check_range_for_refcount 3-2 40365 NULL
+get_chars_40373 get_chars 3 40373 NULL
+fb_prepare_extra_logos_40429 fb_prepare_extra_logos 0-2 40429 NULL
+tomoyo_update_policy_40458 tomoyo_update_policy 2 40458 NULL
+zd_usb_scnprint_id_40459 zd_usb_scnprint_id 0-3 40459 NULL
+gp2ap020a00f_write_event_threshold_40461 gp2ap020a00f_write_event_threshold 2 40461
NULL
+SyS_writev_40467 SyS_writev 3 40467 NULL
+SyS_select_40473 SyS_select 1 40473 NULL
+afs_fs_store_data_40484 afs_fs_store_data 3-4-5-6 40484 NULL
+batadv_hash_new_40491 batadv_hash_new 1 40491 NULL
+devcgroup_inode_permission_40492 devcgroup_inode_permission 0 40492 NULL
+__ethtool_get_sset_count_40511 __ethtool_get_sset_count 0 40511 NULL
+TSS_checkhmac2_40520 TSS_checkhmac2 5-7 40520 NULL
+ixgbe_dbg_reg_ops_read_40540 ixgbe_dbg_reg_ops_read 3 40540 NULL
+ima_write_policy_40548 ima_write_policy 3 40548 NULL
+esp_alloc_tmp_40558 esp_alloc_tmp 3-2 40558 NULL
+b1_get_byte_40597 b1_get_byte 0 40597 NULL
+get_priv_descr_and_size_40612 get_priv_descr_and_size 0 40612 NULL
+twl4030_kpwrite_u8_40665 twl4030_kpwrite_u8 3 40665 NULL
+__cfg80211_roamed_40668 __cfg80211_roamed 4-6 40668 NULL
+pipeline_rx_complete_stat_fifo_int_read_40671 pipeline_rx_complete_stat_fifo_int_read
3 40671 NULL
+fops_read_40672 fops_read 3 40672 NULL
+idr_get_empty_slot_40674 idr_get_empty_slot 0 40674 NULL
+alloc_rbio_40676 alloc_rbio 4 40676 NULL
+videobuf_dma_init_user_locked_40678 videobuf_dma_init_user_locked 4-3 40678 NULL
+pci_enable_resources_40680 pci_enable_resources 0 40680 NULL
+nfc_hci_set_param_40697 nfc_hci_set_param 5 40697 NULL
+__seq_open_private_40715 __seq_open_private 3 40715 NULL
+fuse_readpages_40737 fuse_readpages 4 40737 NULL
+xfs_iext_remove_direct_40744 xfs_iext_remove_direct 3 40744 NULL
+security_inode_listxattr_40752 security_inode_listxattr 0 40752 NULL
+card_send_command_40757 card_send_command 3 40757 NULL
+ad1889_readl_40765 ad1889_readl 0 40765 NULL
+pg_write_40766 pg_write 3 40766 NULL
+show_list_40775 show_list 3-0 40775 NULL
+kfifo_out_copy_r_40784 kfifo_out_copy_r 3-0 40784 NULL
+bitmap_weight_40791 bitmap_weight 0-2 40791 NULL
+pyra_sysfs_read_40795 pyra_sysfs_read 6 40795 NULL
+add_action_40811 add_action 4 40811 NULL
+nl80211_send_roamed_40825 nl80211_send_roamed 5-7 40825 NULL
+SyS_mbind_40828 SyS_mbind 5 40828 NULL
+nilfs_mdt_init_40849 nilfs_mdt_init 3 40849 NULL
+v9fs_file_read_40858 v9fs_file_read 3 40858 NULL
+read_file_queue_40895 read_file_queue 3 40895 NULL
+waiters_read_40902 waiters_read 3 40902 NULL
+isdn_add_channels_40905 isdn_add_channels 3 40905 NULL
+gfs2_ea_find_40913 gfs2_ea_find 0 40913 NULL
+vol_cdev_write_40915 vol_cdev_write 3 40915 NULL
+snd_vx_create_40948 snd_vx_create 4 40948 NULL nohasharray
+sg_alloc_table_40948 sg_alloc_table 0 40948 &snd_vx_create_40948
+rds_sendmsg_40976 rds_sendmsg 4 40976 NULL
+il_dbgfs_fh_reg_read_40993 il_dbgfs_fh_reg_read 3 40993 NULL
+iwl_dbgfs_scan_ant_rxchain_read_40999 iwl_dbgfs_scan_ant_rxchain_read 3 40999 NULL
+mac80211_format_buffer_41010 mac80211_format_buffer 2 41010 NULL
+__proc_dobitmasks_41029 __proc_dobitmasks 5 41029 NULL
+_req_append_segment_41031 _req_append_segment 2 41031 NULL
+mISDN_sock_sendmsg_41035 mISDN_sock_sendmsg 4 41035 NULL
+ocfs2_xattr_index_block_find_41040 ocfs2_xattr_index_block_find 0 41040 NULL
+lprocfs_write_frac_helper_41050 lprocfs_write_frac_helper 2 41050 NULL
+calculate_order_41061 calculate_order 0 41061 NULL
+vfs_listxattr_41062 vfs_listxattr 0 41062 NULL nohasharray
+beacon_filtering_write_41062 beacon_filtering_write 3 41062 &vfs_listxattr_41062
+cfg80211_inform_bss_frame_41078 cfg80211_inform_bss_frame 4 41078 NULL
+nvme_map_user_pages_41093 nvme_map_user_pages 4-3 41093 NULL nohasharray
+roccat_read_41093 roccat_read 3 41093 &nvme_map_user_pages_41093
+dma_attach_41094 dma_attach 5-6 41094 NULL
+provide_user_output_41105 provide_user_output 3 41105 NULL
+f_audio_buffer_alloc_41110 f_audio_buffer_alloc 1 41110 NULL
+ath10k_read_wmi_services_41112 ath10k_read_wmi_services 3 41112 NULL
+ocfs2_extend_trans_41116 ocfs2_extend_trans 2 41116 NULL
+v4l2_ctrl_new_int_menu_41151 v4l2_ctrl_new_int_menu 4 41151 NULL
+tx_frag_mpdu_alloc_failed_read_41167 tx_frag_mpdu_alloc_failed_read 3 41167 NULL
+dvb_ca_write_41171 dvb_ca_write 3 41171 NULL
+dgap_driver_kzmalloc_41189 dgap_driver_kzmalloc 1 41189 NULL
+compat_sys_process_vm_writev_41194 compat_sys_process_vm_writev 3-5 41194 NULL
+dfs_file_write_41196 dfs_file_write 3 41196 NULL
+nfs_page_array_len_41219 nfs_page_array_len 0-2-1 41219 NULL
+cfg80211_process_disassoc_41231 cfg80211_process_disassoc 3 41231 NULL
+hiddev_compat_ioctl_41255 hiddev_compat_ioctl 2 41255 NULL
+erst_read_41260 erst_read 0 41260 NULL
+alloc_context_41283 alloc_context 1 41283 NULL
+o2hb_setup_one_bio_41341 o2hb_setup_one_bio 4 41341 NULL
+twl_change_queue_depth_41342 twl_change_queue_depth 2 41342 NULL
+rtw_android_set_block_41347 rtw_android_set_block 0 41347 NULL
+cnic_init_id_tbl_41354 cnic_init_id_tbl 2 41354 NULL
+kmp_init_41373 kmp_init 2 41373 NULL
+isr_commands_read_41398 isr_commands_read 3 41398 NULL
+rx_defrag_decrypt_failed_read_41411 rx_defrag_decrypt_failed_read 3 41411 NULL
+xfs_iext_add_41422 xfs_iext_add 3 41422 NULL
+isdn_ppp_fill_rq_41428 isdn_ppp_fill_rq 2 41428 NULL
+lbs_rdrf_read_41431 lbs_rdrf_read 3 41431 NULL
+iio_device_alloc_41440 iio_device_alloc 1 41440 NULL
+ntfs_file_buffered_write_41442 ntfs_file_buffered_write 6-4 41442 NULL
+pcpu_build_alloc_info_41443 pcpu_build_alloc_info 1-2-3 41443 NULL
+se_io_cb_41461 se_io_cb 3 41461 NULL
+layout_leb_in_gaps_41470 layout_leb_in_gaps 0 41470 NULL
+rt2x00debug_write_rfcsr_41473 rt2x00debug_write_rfcsr 3 41473 NULL
+bl_alloc_init_bio_41478 bl_alloc_init_bio 1 41478 NULL
+kvm_unmap_hva_range_41484 kvm_unmap_hva_range 3-2 41484 NULL
+wep_interrupt_read_41492 wep_interrupt_read 3 41492 NULL
+SyS_get_mempolicy_41495 SyS_get_mempolicy 3 41495 NULL
+hpfs_translate_name_41497 hpfs_translate_name 3 41497 NULL
+xfrm_hash_new_size_41505 xfrm_hash_new_size 0-1 41505 NULL
+SyS_preadv_41523 SyS_preadv 3 41523 NULL
+dm_get_reserved_rq_based_ios_41529 dm_get_reserved_rq_based_ios 0 41529 NULL
+tx_tx_frame_checksum_read_41553 tx_tx_frame_checksum_read 3 41553 NULL
+ath6kl_endpoint_stats_read_41554 ath6kl_endpoint_stats_read 3 41554 NULL
+nr_status_frames_41559 nr_status_frames 0-1 41559 NULL nohasharray
+si476x_radio_fops_read_41559 si476x_radio_fops_read 3 41559 &nr_status_frames_41559
+rng_dev_read_41581 rng_dev_read 3 41581 NULL
+batadv_tvlv_container_ogm_append_41588 batadv_tvlv_container_ogm_append 4 41588 NULL
+vga_io_r_41609 vga_io_r 0 41609 NULL
+tcp_hdrlen_41610 tcp_hdrlen 0 41610 NULL
+lbs_bcnmiss_write_41613 lbs_bcnmiss_write 3 41613 NULL nohasharray
+usb_endpoint_maxp_41613 usb_endpoint_maxp 0 41613 &lbs_bcnmiss_write_41613
+a2mp_send_41615 a2mp_send 4 41615 NULL
+lstcon_batch_list_41627 lstcon_batch_list 2 41627 NULL
+mempool_create_kmalloc_pool_41650 mempool_create_kmalloc_pool 1 41650 NULL
+rx_rx_pre_complt_read_41653 rx_rx_pre_complt_read 3 41653 NULL
+get_std_timing_41654 get_std_timing 0 41654 NULL
+ieee80211_if_fmt_bssid_41677 ieee80211_if_fmt_bssid 3 41677 NULL
+fill_pcm_stream_name_41685 fill_pcm_stream_name 2 41685 NULL
+lov_unpackmd_41701 lov_unpackmd 4 41701 NULL
+apei_exec_for_each_entry_41717 apei_exec_for_each_entry 0 41717 NULL
+fillonedir_41746 fillonedir 3 41746 NULL
+iwl_dbgfs_bt_notif_read_41794 iwl_dbgfs_bt_notif_read 3 41794 NULL
+hsi_alloc_controller_41802 hsi_alloc_controller 1 41802 NULL
+rtw_android_get_macaddr_41812 rtw_android_get_macaddr 0 41812 NULL
+sco_send_frame_41815 sco_send_frame 3 41815 NULL
+ixgbe_dbg_netdev_ops_read_41839 ixgbe_dbg_netdev_ops_read 3 41839 NULL
+do_ip_setsockopt_41852 do_ip_setsockopt 5 41852 NULL
+keyctl_instantiate_key_41855 keyctl_instantiate_key 3 41855 NULL
+pci_map_single_41869 pci_map_single 0 41869 NULL
+usb_gadget_get_string_41871 usb_gadget_get_string 0 41871 NULL
+v_APCI3120_InterruptDmaMoveBlock16bit_41914 v_APCI3120_InterruptDmaMoveBlock16bit 4
41914 NULL
+get_fdb_entries_41916 get_fdb_entries 3 41916 NULL
+nfsd_getxattr_41934 nfsd_getxattr 0 41934 NULL
+ext4_da_write_inline_data_begin_41935 ext4_da_write_inline_data_begin 3-4 41935 NULL
+sci_rxfill_41945 sci_rxfill 0 41945 NULL
+read_gssp_41947 read_gssp 3 41947 NULL
+ocfs2_xattr_bucket_get_name_value_41949 ocfs2_xattr_bucket_get_name_value 0 41949 NULL
+portnames_read_41958 portnames_read 3 41958 NULL
+dst_mtu_41969 dst_mtu 0 41969 NULL
+cx24116_writeregN_41975 cx24116_writeregN 4 41975 NULL
+pool_allocate_42012 pool_allocate 3 42012 NULL
+spidev_sync_read_42014 spidev_sync_read 0 42014 NULL
+rs_sta_dbgfs_scale_table_write_42017 rs_sta_dbgfs_scale_table_write 3 42017 NULL
+create_dir_42025 create_dir 0 42025 NULL
+acpi_ut_create_buffer_object_42030 acpi_ut_create_buffer_object 1 42030 NULL
+__btrfs_drop_extents_42032 __btrfs_drop_extents 5 42032 NULL
+__hwahc_op_set_gtk_42038 __hwahc_op_set_gtk 4 42038 NULL
+irda_sendmsg_ultra_42047 irda_sendmsg_ultra 4 42047 NULL
+jffs2_do_link_42048 jffs2_do_link 6 42048 NULL
+ps_poll_upsd_max_ap_turn_read_42050 ps_poll_upsd_max_ap_turn_read 3 42050 NULL
+InterfaceTransmitPacket_42058 InterfaceTransmitPacket 3 42058 NULL
+scsi_execute_req_42088 scsi_execute_req 5 42088 NULL
+sk_chk_filter_42095 sk_chk_filter 2 42095 NULL
+submit_inquiry_42108 submit_inquiry 3 42108 NULL
+dw_dma_cyclic_prep_42113 dw_dma_cyclic_prep 3-4 42113 NULL
+obd_get_info_42156 obd_get_info 0 42156 NULL
+blk_ioctl_zeroout_42160 blk_ioctl_zeroout 3 42160 NULL
+mmc_align_data_size_42161 mmc_align_data_size 0-2 42161 NULL
+read_file_base_eeprom_42168 read_file_base_eeprom 3 42168 NULL
+oprofilefs_str_to_user_42182 oprofilefs_str_to_user 3 42182 NULL
+write_file_beacon_42185 write_file_beacon 3 42185 NULL
+get_znodes_to_commit_42201 get_znodes_to_commit 0 42201 NULL
+pla_ocp_read_42235 pla_ocp_read 3 42235 NULL
+rx_defrag_need_decrypt_read_42253 rx_defrag_need_decrypt_read 3 42253 NULL
+find_last_bit_42260 find_last_bit 0 42260 NULL
+__pcpu_size_to_slot_42271 __pcpu_size_to_slot 0 42271 NULL
+snd_pcm_hw_param_value_max_42280 snd_pcm_hw_param_value_max 0 42280 NULL
+__cpus_weight_42299 __cpus_weight 2-0 42299 NULL
+sel_read_perm_42302 sel_read_perm 3 42302 NULL
+sctp_setsockopt_del_key_42304 sctp_setsockopt_del_key 3 42304 NULL nohasharray
+ulong_read_file_42304 ulong_read_file 3 42304 &sctp_setsockopt_del_key_42304
+xfs_vm_readpages_42308 xfs_vm_readpages 4 42308 NULL
+hysdn_conf_read_42324 hysdn_conf_read 3 42324 NULL
+tcp_sync_mss_42330 tcp_sync_mss 2-0 42330 NULL
+ide_raw_taskfile_42355 ide_raw_taskfile 4 42355 NULL
+tipc_send_42374 tipc_send 3 42374 NULL
+drbd_md_last_sector_42378 drbd_md_last_sector 0 42378 NULL
+il_dbgfs_disable_ht40_read_42386 il_dbgfs_disable_ht40_read 3 42386 NULL
+msnd_fifo_read_42406 msnd_fifo_read 0-3 42406 NULL
+krng_get_random_42420 krng_get_random 3 42420 NULL
+gsm_data_alloc_42437 gsm_data_alloc 3 42437 NULL
+key_conf_keyidx_read_42443 key_conf_keyidx_read 3 42443 NULL
+alloc_request_42448 alloc_request 0 42448 NULL
+snd_pcm_action_group_42452 snd_pcm_action_group 0 42452 NULL
+tcm_loop_change_queue_depth_42454 tcm_loop_change_queue_depth 2 42454 NULL
+kuc_free_42455 kuc_free 2 42455 NULL
+__simple_xattr_set_42474 __simple_xattr_set 4 42474 NULL
+omfs_readpages_42490 omfs_readpages 4 42490 NULL
+bypass_write_42498 bypass_write 3 42498 NULL
+SyS_mincore_42511 SyS_mincore 1-2 42511 NULL
+kvm_write_wall_clock_42520 kvm_write_wall_clock 2 42520 NULL
+dio_bio_complete_42524 dio_bio_complete 0 42524 NULL
+smk_write_netlbladdr_42525 smk_write_netlbladdr 3 42525 NULL
+dbAllocNear_42546 dbAllocNear 0 42546 NULL
+ath6kl_wmi_proc_events_vif_42549 ath6kl_wmi_proc_events_vif 5 42549 NULL
+udp_recvmsg_42558 udp_recvmsg 4 42558 NULL
+iwl_print_event_log_42566 iwl_print_event_log 7-5-0 42566 NULL
+xfrm_new_hash_mask_42579 xfrm_new_hash_mask 0-1 42579 NULL
+oom_score_adj_write_42594 oom_score_adj_write 3 42594 NULL
+ieee80211_if_fmt_dot11MeshHWMPactivePathTimeout_42635 ieee80211_if_fmt_dot11MeshHWMPactivePathTimeout
3 42635 NULL
+scsi_activate_tcq_42640 scsi_activate_tcq 2 42640 NULL
+br_mdb_rehash_42643 br_mdb_rehash 2 42643 NULL
+parport_pc_compat_write_block_pio_42644 parport_pc_compat_write_block_pio 3 42644 NULL
+_regmap_raw_write_42652 _regmap_raw_write 4-2 42652 NULL
+request_key_and_link_42693 request_key_and_link 4 42693 NULL
+vb2_read_42703 vb2_read 3 42703 NULL
+read_status_42722 read_status 0 42722 NULL
+dvb_demux_ioctl_42733 dvb_demux_ioctl 2 42733 NULL
+set_aoe_iflist_42737 set_aoe_iflist 2 42737 NULL
+ax25_setsockopt_42740 ax25_setsockopt 5 42740 NULL
+dpm_sysfs_add_42756 dpm_sysfs_add 0 42756 NULL
+x25_recvmsg_42777 x25_recvmsg 4 42777 NULL
+snd_midi_event_decode_42780 snd_midi_event_decode 0 42780 NULL
+cryptd_hash_setkey_42781 cryptd_hash_setkey 3 42781 NULL nohasharray
+isku_sysfs_read_info_42781 isku_sysfs_read_info 6 42781 &cryptd_hash_setkey_42781
+elfcorehdr_read_notes_42786 elfcorehdr_read_notes 2 42786 NULL
+koneplus_sysfs_read_42792 koneplus_sysfs_read 6 42792 NULL
+ptlrpc_request_bufs_pack_42793 ptlrpc_request_bufs_pack 0 42793 NULL
+ntfs_attr_extend_allocation_42796 ntfs_attr_extend_allocation 0 42796 NULL
+fw_device_op_compat_ioctl_42804 fw_device_op_compat_ioctl 2 42804 NULL
+drm_ioctl_42813 drm_ioctl 2 42813 NULL
+iwl_dbgfs_ucode_bt_stats_read_42820 iwl_dbgfs_ucode_bt_stats_read 3 42820 NULL
+set_arg_42824 set_arg 3 42824 NULL
+si476x_radio_read_rsq_blob_42827 si476x_radio_read_rsq_blob 3 42827 NULL
+ocfs2_clusters_for_bytes_42872 ocfs2_clusters_for_bytes 0-2 42872 NULL
+nvme_trans_unit_serial_page_42879 nvme_trans_unit_serial_page 4 42879 NULL
+xpc_kmalloc_cacheline_aligned_42895 xpc_kmalloc_cacheline_aligned 1 42895 NULL
+hd_end_request_42904 hd_end_request 2 42904 NULL
+sta_last_rx_rate_read_42909 sta_last_rx_rate_read 3 42909 NULL
+sctp_getsockopt_maxburst_42941 sctp_getsockopt_maxburst 2 42941 NULL nohasharray
+mdc_unpack_acl_42941 mdc_unpack_acl 0 42941 &sctp_getsockopt_maxburst_42941
+vx_reset_chk_42946 vx_reset_chk 0 42946 NULL
+blkdev_direct_IO_42962 blkdev_direct_IO 4 42962 NULL
+read_file_node_stat_42964 read_file_node_stat 3 42964 NULL
+compat_udpv6_setsockopt_42981 compat_udpv6_setsockopt 5 42981 NULL
+nfs_idmap_get_desc_42990 nfs_idmap_get_desc 4-2 42990 NULL nohasharray
+rtw_os_xmit_resource_alloc_42990 rtw_os_xmit_resource_alloc 3 42990 &nfs_idmap_get_desc_42990
+isr_rx_mem_overflow_read_43025 isr_rx_mem_overflow_read 3 43025 NULL
+wep_default_key_count_read_43035 wep_default_key_count_read 3 43035 NULL
+nouveau_gpuobj_create__43072 nouveau_gpuobj_create_ 9 43072 NULL
+nfs_map_group_to_gid_43082 nfs_map_group_to_gid 3 43082 NULL
+_xfer_secondary_pool_43089 _xfer_secondary_pool 2 43089 NULL
+sysfs_create_file_ns_43103 sysfs_create_file_ns 0 43103 NULL
+ieee80211_if_fmt_drop_unencrypted_43107 ieee80211_if_fmt_drop_unencrypted 3 43107 NULL
+calculate_node_totalpages_43118 calculate_node_totalpages 2-3 43118 NULL
+read_file_dfs_43145 read_file_dfs 3 43145 NULL
+cfs_cpt_table_alloc_43159 cfs_cpt_table_alloc 1 43159 NULL
+usb_string_sub_43164 usb_string_sub 0 43164 NULL
+il_dbgfs_power_save_status_read_43165 il_dbgfs_power_save_status_read 3 43165 NULL
+ath6kl_set_assoc_req_ies_43185 ath6kl_set_assoc_req_ies 3 43185 NULL
+ext4_xattr_ibody_get_43200 ext4_xattr_ibody_get 0 43200 NULL
+uio_write_43202 uio_write 3 43202 NULL
+iso_callback_43208 iso_callback 3 43208 NULL
+ath10k_p2p_calc_noa_ie_len_43209 ath10k_p2p_calc_noa_ie_len 0 43209 NULL
+f2fs_acl_from_disk_43210 f2fs_acl_from_disk 2 43210 NULL
+atomic_long_add_return_43217 atomic_long_add_return 1-0 43217 NULL
+batadv_tt_tvlv_unicast_handler_v1_43239 batadv_tt_tvlv_unicast_handler_v1 5 43239 NULL
+vmemmap_alloc_block_43245 vmemmap_alloc_block 1 43245 NULL
+ide_end_rq_43269 ide_end_rq 4 43269 NULL
+nilfs_direct_IO_43271 nilfs_direct_IO 4 43271 NULL
+parport_pc_ecp_write_block_pio_43278 parport_pc_ecp_write_block_pio 3 43278 NULL nohasharray
+evtchn_write_43278 evtchn_write 3 43278 &parport_pc_ecp_write_block_pio_43278
+filemap_write_and_wait_range_43279 filemap_write_and_wait_range 0 43279 NULL
+mpage_alloc_43299 mpage_alloc 3 43299 NULL
+mmu_set_spte_43327 mmu_set_spte 7-6 43327 NULL
+__ext4_get_inode_loc_43332 __ext4_get_inode_loc 0 43332 NULL
+xenfb_write_43412 xenfb_write 3 43412 NULL
+__alloc_bootmem_low_43423 __alloc_bootmem_low 1 43423 NULL
+usb_alloc_urb_43436 usb_alloc_urb 1 43436 NULL
+ath6kl_wmi_roam_tbl_event_rx_43440 ath6kl_wmi_roam_tbl_event_rx 3 43440 NULL
+usb_string_43443 usb_string 0 43443 NULL nohasharray
+usemap_size_43443 usemap_size 0-2-1 43443 &usb_string_43443
+get_vm_area_size_43444 get_vm_area_size 0 43444 NULL
+nvme_trans_device_id_page_43466 nvme_trans_device_id_page 4 43466 NULL
+tx_tx_data_prepared_read_43497 tx_tx_data_prepared_read 3 43497 NULL
+ieee80211_if_fmt_dot11MeshHWMPnetDiameterTraversalTime_43505 ieee80211_if_fmt_dot11MeshHWMPnetDiameterTraversalTime
3 43505 NULL
+do_readlink_43518 do_readlink 2 43518 NULL
+dvb_ca_en50221_io_write_43533 dvb_ca_en50221_io_write 3 43533 NULL
+read_events_43534 read_events 3 43534 NULL
+cachefiles_daemon_write_43535 cachefiles_daemon_write 3 43535 NULL
+tx_frag_failed_read_43540 tx_frag_failed_read 3 43540 NULL
+request_resource_43548 request_resource 0 43548 NULL
+rpc_malloc_43573 rpc_malloc 2 43573 NULL
+handle_frequent_errors_43599 handle_frequent_errors 4 43599 NULL
+lpfc_idiag_drbacc_read_reg_43606 lpfc_idiag_drbacc_read_reg 0-3 43606 NULL
+proc_read_43614 proc_read 3 43614 NULL
+disable_dma_on_even_43618 disable_dma_on_even 0 43618 NULL
+alloc_thread_groups_43625 alloc_thread_groups 2 43625 NULL
+random_write_43656 random_write 3 43656 NULL
+bio_integrity_tag_43658 bio_integrity_tag 3 43658 NULL
+ext4_acl_count_43659 ext4_acl_count 0-1 43659 NULL
+write_file_tx99_power_43670 write_file_tx99_power 3 43670 NULL
+dmam_declare_coherent_memory_43679 dmam_declare_coherent_memory 4 43679 NULL
+max77693_bulk_write_43698 max77693_bulk_write 2-3 43698 NULL
+drbd_md_first_sector_43729 drbd_md_first_sector 0 43729 NULL
+snd_rme32_playback_copy_43732 snd_rme32_playback_copy 5 43732 NULL
+fuse_conn_congestion_threshold_write_43736 fuse_conn_congestion_threshold_write 3 43736
NULL
+gigaset_initcs_43753 gigaset_initcs 2 43753 NULL
+sctp_setsockopt_active_key_43755 sctp_setsockopt_active_key 3 43755 NULL
+ocfs2_xattr_get_value_outside_43787 ocfs2_xattr_get_value_outside 0 43787 NULL nohasharray
+byte_pos_43787 byte_pos 0-2 43787 &ocfs2_xattr_get_value_outside_43787
+btrfs_copy_from_user_43806 btrfs_copy_from_user 0-3-1 43806 NULL
+ieee80211_if_fmt_element_ttl_43825 ieee80211_if_fmt_element_ttl 3 43825 NULL
+ieee80211_alloc_hw_43829 ieee80211_alloc_hw 1 43829 NULL
+read_flush_43851 read_flush 3 43851 NULL
+pm860x_bulk_write_43875 pm860x_bulk_write 2-3 43875 NULL
+SendString_43928 SendString 3 43928 NULL
+stats_dot11RTSFailureCount_read_43948 stats_dot11RTSFailureCount_read 3 43948 NULL
+__get_required_blob_size_43980 __get_required_blob_size 0-3-2 43980 NULL
+nla_reserve_43984 nla_reserve 3 43984 NULL
+__clkdev_alloc_43990 __clkdev_alloc 1 43990 NULL
+scsi_command_size_43992 scsi_command_size 0 43992 NULL nohasharray
+kvm_read_guest_virt_43992 kvm_read_guest_virt 4-2 43992 &scsi_command_size_43992 nohasharray
+bcm_recvmsg_43992 bcm_recvmsg 4 43992 &kvm_read_guest_virt_43992
+emit_flags_44006 emit_flags 4-3 44006 NULL
+write_flush_procfs_44011 write_flush_procfs 3 44011 NULL
+fru_strlen_44046 fru_strlen 0 44046 NULL
+ath9k_def_dump_modal_eeprom_44078 ath9k_def_dump_modal_eeprom 3-2-0 44078 NULL
+SYSC_add_key_44079 SYSC_add_key 4 44079 NULL
+__vxge_hw_vpath_tim_configure_44093 __vxge_hw_vpath_tim_configure 2 44093 NULL
+xlog_recover_add_to_cont_trans_44102 xlog_recover_add_to_cont_trans 4 44102 NULL
+skb_frag_dma_map_44112 skb_frag_dma_map 0 44112 NULL
+tracing_set_trace_read_44122 tracing_set_trace_read 3 44122 NULL
+SyS_process_vm_writev_44129 SyS_process_vm_writev 3-5 44129 NULL
+ttm_get_pages_44142 ttm_get_pages 2 44142 NULL
+scsi_get_resid_44147 scsi_get_resid 0 44147 NULL
+ocfs2_xattr_bucket_find_44174 ocfs2_xattr_bucket_find 0 44174 NULL
+SYSC_set_mempolicy_44176 SYSC_set_mempolicy 3 44176 NULL
+readreg_ipac_44186 readreg_ipac 0 44186 NULL
+handle_eviocgbit_44193 handle_eviocgbit 3 44193 NULL
+srp_alloc_iu_44227 srp_alloc_iu 2 44227 NULL
+scsi_track_queue_full_44239 scsi_track_queue_full 2 44239 NULL
+sigma_action_write_regmap_44240 sigma_action_write_regmap 3 44240 NULL
+apei_resources_sub_44252 apei_resources_sub 0 44252 NULL
+device_create_file_44285 device_create_file 0 44285 NULL
+ath6kl_keepalive_read_44303 ath6kl_keepalive_read 3 44303 NULL
+bitmap_scnprintf_44318 bitmap_scnprintf 0-2 44318 NULL
+dispatch_proc_write_44320 dispatch_proc_write 3 44320 NULL
+rs_init_44327 rs_init 1 44327 NULL
+blk_queue_init_tags_44355 blk_queue_init_tags 2 44355 NULL nohasharray
+nfs_fscache_get_super_cookie_44355 nfs_fscache_get_super_cookie 3 44355 &blk_queue_init_tags_44355
+alloc_requests_44372 alloc_requests 0 44372 NULL
+rts_threshold_read_44384 rts_threshold_read 3 44384 NULL
+mtip_hw_read_flags_44396 mtip_hw_read_flags 3 44396 NULL
+aoedev_flush_44398 aoedev_flush 2 44398 NULL
+strlcpy_44400 strlcpy 3 44400 NULL
+drm_buffer_alloc_44405 drm_buffer_alloc 2 44405 NULL
+osst_do_scsi_44410 osst_do_scsi 4 44410 NULL
+ieee80211_if_read_rc_rateidx_mcs_mask_5ghz_44423 ieee80211_if_read_rc_rateidx_mcs_mask_5ghz
3 44423 NULL
+iwl_dbgfs_bf_params_write_44450 iwl_dbgfs_bf_params_write 3 44450 NULL
+write_file_debug_44476 write_file_debug 3 44476 NULL
+btrfs_chunk_item_size_44478 btrfs_chunk_item_size 0-1 44478 NULL
+sdio_align_size_44489 sdio_align_size 0-2 44489 NULL
+bio_advance_44496 bio_advance 2 44496 NULL
+ieee80211_if_read_dropped_frames_ttl_44500 ieee80211_if_read_dropped_frames_ttl 3 44500
NULL
+ac_register_board_44504 ac_register_board 3 44504 NULL
+security_getprocattr_44505 security_getprocattr 0 44505 NULL nohasharray
+iwl_dbgfs_sram_read_44505 iwl_dbgfs_sram_read 3 44505 &security_getprocattr_44505
+spidev_write_44510 spidev_write 3 44510 NULL
+SyS_io_getevents_44519 SyS_io_getevents 3 44519 NULL
+ieee80211_rx_mgmt_assoc_resp_44525 ieee80211_rx_mgmt_assoc_resp 3 44525 NULL
+comm_write_44537 comm_write 3 44537 NULL
+xfs_log_calc_unit_res_44540 xfs_log_calc_unit_res 0-2 44540 NULL
+dgrp_config_proc_write_44571 dgrp_config_proc_write 3 44571 NULL
+nouveau_perfmon_create__44602 nouveau_perfmon_create_ 4 44602 NULL
+alloc_ctrl_packet_44667 alloc_ctrl_packet 1 44667 NULL
+mpi_resize_44674 mpi_resize 2 44674 NULL
+sysfs_create_link_44685 sysfs_create_link 0 44685 NULL
+ts_read_44687 ts_read 3 44687 NULL
+lov_emerg_alloc_44698 lov_emerg_alloc 1 44698 NULL
+__ocfs2_rotate_tree_left_44705 __ocfs2_rotate_tree_left 3 44705 NULL
+xfer_to_user_44713 xfer_to_user 3 44713 NULL nohasharray
+__generic_block_fiemap_44713 __generic_block_fiemap 4 44713 &xfer_to_user_44713
+_zd_iowrite32v_locked_44725 _zd_iowrite32v_locked 3 44725 NULL
+clusterip_proc_write_44729 clusterip_proc_write 3 44729 NULL
+fib_count_nexthops_44730 fib_count_nexthops 0 44730 NULL
+key_tx_rx_count_read_44742 key_tx_rx_count_read 3 44742 NULL
+tnode_new_44757 tnode_new 3 44757 NULL nohasharray
+pty_write_44757 pty_write 3 44757 &tnode_new_44757
+__videobuf_copy_stream_44769 __videobuf_copy_stream 4-0 44769 NULL
+handsfree_ramp_44777 handsfree_ramp 2 44777 NULL
+irq_domain_add_legacy_44781 irq_domain_add_legacy 4-2 44781 NULL
+sctp_setsockopt_44788 sctp_setsockopt 5 44788 NULL
+rx_dropped_read_44799 rx_dropped_read 3 44799 NULL
+qla4xxx_alloc_work_44813 qla4xxx_alloc_work 2 44813 NULL
+mei_cl_read_start_44824 mei_cl_read_start 2 44824 NULL
+rmap_write_protect_44833 rmap_write_protect 2 44833 NULL
+sisusb_write_44834 sisusb_write 3 44834 NULL
+kvm_read_hva_44847 kvm_read_hva 3 44847 NULL
+qib_verbs_send_dma_44850 qib_verbs_send_dma 6 44850 NULL
+copydesc_user_44855 copydesc_user 3 44855 NULL
+set_advertising_44870 set_advertising 4 44870 NULL
+init_rs_44873 init_rs 1 44873 NULL
+skb_availroom_44883 skb_availroom 0 44883 NULL
+ocfs2_wait_for_mask_44893 ocfs2_wait_for_mask 0 44893 NULL
+do_tty_write_44896 do_tty_write 5 44896 NULL
+regmap_spi_read_44921 regmap_spi_read 3-5 44921 NULL
+tx_queue_status_read_44978 tx_queue_status_read 3 44978 NULL
+bytepos_delta_45017 bytepos_delta 0-2 45017 NULL
+ptrace_writedata_45021 ptrace_writedata 4 45021 NULL
+dm_kvzalloc_45025 dm_kvzalloc 1 45025 NULL
+vhci_get_user_45039 vhci_get_user 3 45039 NULL
+sysfs_do_create_link_sd_45057 sysfs_do_create_link_sd 0 45057 NULL
+sel_write_user_45060 sel_write_user 3 45060 NULL
+snd_mixart_BA0_read_45069 snd_mixart_BA0_read 5 45069 NULL
+kvm_mmu_page_get_gfn_45110 kvm_mmu_page_get_gfn 0-2 45110 NULL
+pwr_missing_bcns_cnt_read_45113 pwr_missing_bcns_cnt_read 3 45113 NULL
+usbdev_read_45114 usbdev_read 3 45114 NULL
+send_to_tty_45141 send_to_tty 3 45141 NULL
+cfs_trace_daemon_command_usrstr_45147 cfs_trace_daemon_command_usrstr 2 45147 NULL
+gen_bitmask_string_45149 gen_bitmask_string 6 45149 NULL
+device_write_45156 device_write 3 45156 NULL nohasharray
+ocfs2_remove_inode_range_45156 ocfs2_remove_inode_range 3-4 45156 &device_write_45156
+tomoyo_write_self_45161 tomoyo_write_self 3 45161 NULL
+sta_agg_status_write_45164 sta_agg_status_write 3 45164 NULL
+snd_sb_csp_load_user_45190 snd_sb_csp_load_user 3 45190 NULL nohasharray
+sctp_pack_cookie_45190 sctp_pack_cookie 6 45190 &snd_sb_csp_load_user_45190
+iso_alloc_urb_45206 iso_alloc_urb 4-5 45206 NULL
+spi_alloc_master_45223 spi_alloc_master 2 45223 NULL
+ieee80211_if_read_peer_45233 ieee80211_if_read_peer 3 45233 NULL
+event_enable_write_45238 event_enable_write 3 45238 NULL
+prism2_pda_proc_read_45246 prism2_pda_proc_read 3 45246 NULL
+input_mt_init_slots_45279 input_mt_init_slots 2 45279 NULL
+gfs2_fiemap_45282 gfs2_fiemap 4 45282 NULL
+snd_pcm_oss_sync1_45298 snd_pcm_oss_sync1 2 45298 NULL
+e1000_tx_map_45309 e1000_tx_map 5 45309 NULL
+copy_vm86_regs_from_user_45340 copy_vm86_regs_from_user 3 45340 NULL
+null_alloc_repbuf_45375 null_alloc_repbuf 3 45375 NULL
+sock_recv_errqueue_45412 sock_recv_errqueue 3 45412 NULL
+ieee80211_if_fmt_dot11MeshHWMProotInterval_45421 ieee80211_if_fmt_dot11MeshHWMProotInterval
3 45421 NULL
+ll_iocontrol_register_45430 ll_iocontrol_register 2 45430 NULL
+tty_buffer_alloc_45437 tty_buffer_alloc 2 45437 NULL
+__node_remap_45458 __node_remap 4 45458 NULL
+rds_ib_set_wr_signal_state_45463 rds_ib_set_wr_signal_state 0 45463 NULL
+tracing_read_dyn_info_45468 tracing_read_dyn_info 3 45468 NULL
+rds_message_copy_from_user_45510 rds_message_copy_from_user 3 45510 NULL
+i40e_alloc_vfs_45511 i40e_alloc_vfs 2 45511 NULL
+cgroup_read_u64_45532 cgroup_read_u64 5 45532 NULL
+copy_macs_45534 copy_macs 4 45534 NULL
+nla_attr_size_45545 nla_attr_size 0-1 45545 NULL
+v9fs_direct_read_45546 v9fs_direct_read 3 45546 NULL
+cx18_copy_mdl_to_user_45549 cx18_copy_mdl_to_user 4 45549 NULL
+stats_dot11ACKFailureCount_read_45558 stats_dot11ACKFailureCount_read 3 45558 NULL
+_regmap_bus_raw_write_45559 _regmap_bus_raw_write 2 45559 NULL
+posix_acl_xattr_size_45561 posix_acl_xattr_size 0-1 45561 NULL
+venus_rmdir_45564 venus_rmdir 4 45564 NULL
+ath6kl_keepalive_write_45600 ath6kl_keepalive_write 3 45600 NULL
+hidraw_get_report_45609 hidraw_get_report 3 45609 NULL
+compat_mpctl_ioctl_45671 compat_mpctl_ioctl 2 45671 NULL
+dgram_sendmsg_45679 dgram_sendmsg 4 45679 NULL
+smk_write_ambient_45691 smk_write_ambient 3 45691 NULL
+unix_dgram_sendmsg_45699 unix_dgram_sendmsg 4 45699 NULL nohasharray
+bscnl_emit_45699 bscnl_emit 2-5-0 45699 &unix_dgram_sendmsg_45699
+sg_proc_write_adio_45704 sg_proc_write_adio 3 45704 NULL
+snd_cs46xx_io_read_45734 snd_cs46xx_io_read 5 45734 NULL nohasharray
+task_cgroup_path_45734 task_cgroup_path 3 45734 &snd_cs46xx_io_read_45734
+rw_copy_check_uvector_45748 rw_copy_check_uvector 3-0 45748 NULL nohasharray
+v4l2_ctrl_new_std_45748 v4l2_ctrl_new_std 5 45748 &rw_copy_check_uvector_45748
+lkdtm_debugfs_read_45752 lkdtm_debugfs_read 3 45752 NULL
+alloc_ts_config_45775 alloc_ts_config 1 45775 NULL
+osc_checksum_type_seq_write_45785 osc_checksum_type_seq_write 3 45785 NULL
+raw_setsockopt_45800 raw_setsockopt 5 45800 NULL
+rds_tcp_inc_copy_to_user_45804 rds_tcp_inc_copy_to_user 3 45804 NULL
+lbs_rdbbp_read_45805 lbs_rdbbp_read 3 45805 NULL
+pcpu_alloc_alloc_info_45813 pcpu_alloc_alloc_info 1-2 45813 NULL
+ll_max_readahead_mb_seq_write_45815 ll_max_readahead_mb_seq_write 3 45815 NULL
+memcg_update_cache_size_45828 memcg_update_cache_size 2 45828 NULL
+ipv6_recv_rxpmtu_45830 ipv6_recv_rxpmtu 3 45830 NULL
+x509_process_extension_45854 x509_process_extension 5 45854 NULL
+efx_tx_queue_insert_45859 efx_tx_queue_insert 2 45859 NULL
+isdn_write_45863 isdn_write 3 45863 NULL
+tpm_config_in_45880 tpm_config_in 0 45880 NULL
+get_rdac_req_45882 get_rdac_req 3 45882 NULL
+ima_eventdigest_init_common_45889 ima_eventdigest_init_common 2 45889 NULL
+ocfs2_xattr_block_find_45891 ocfs2_xattr_block_find 0 45891 NULL
+cfs_cpt_weight_45903 cfs_cpt_weight 0 45903 NULL
+wm_adsp_region_to_reg_45915 wm_adsp_region_to_reg 0-2 45915 NULL
+dbgfs_frame_45917 dbgfs_frame 3 45917 NULL
+alloc_mr_45935 alloc_mr 1 45935 NULL
+copy_to_45969 copy_to 3 45969 NULL
+rb_simple_read_45972 rb_simple_read 3 45972 NULL
+ioat2_dca_count_dca_slots_45984 ioat2_dca_count_dca_slots 0 45984 NULL
+kobject_init_and_add_46003 kobject_init_and_add 0 46003 NULL
+sierra_setup_urb_46029 sierra_setup_urb 5 46029 NULL
+fnic_reset_stats_read_46030 fnic_reset_stats_read 3 46030 NULL nohasharray
+get_free_entries_46030 get_free_entries 1 46030 &fnic_reset_stats_read_46030
+__access_remote_vm_46031 __access_remote_vm 0 46031 NULL
+snd_emu10k1x_ptr_read_46049 snd_emu10k1x_ptr_read 0 46049 NULL
+__ocfs2_move_extent_46060 __ocfs2_move_extent 3-4 46060 NULL nohasharray
+dma_tx_errors_read_46060 dma_tx_errors_read 3 46060 &__ocfs2_move_extent_46060
+sel_commit_bools_write_46077 sel_commit_bools_write 3 46077 NULL
+il3945_ucode_general_stats_read_46111 il3945_ucode_general_stats_read 3 46111 NULL
nohasharray
+memcg_update_array_size_46111 memcg_update_array_size 1 46111 &il3945_ucode_general_stats_read_46111
+C_SYSC_writev_46113 C_SYSC_writev 3 46113 NULL
+mlx4_ib_alloc_fast_reg_page_list_46119 mlx4_ib_alloc_fast_reg_page_list 2 46119 NULL
+rtw_buf_update_46138 rtw_buf_update 4 46138 NULL
+vb2_dma_sg_get_userptr_46146 vb2_dma_sg_get_userptr 3-2 46146 NULL
+__netlink_change_ngroups_46156 __netlink_change_ngroups 2 46156 NULL
+twl_direction_out_46182 twl_direction_out 2 46182 NULL
+vxge_os_dma_malloc_46184 vxge_os_dma_malloc 2 46184 NULL
+fq_resize_46195 fq_resize 2 46195 NULL
+add_conn_list_46197 add_conn_list 3-0 46197 NULL
+i2400m_op_msg_from_user_46213 i2400m_op_msg_from_user 4 46213 NULL
+tm6000_i2c_recv_regs_46215 tm6000_i2c_recv_regs 5 46215 NULL
+dsp_write_46218 dsp_write 2 46218 NULL
+hash_netiface4_expire_46226 hash_netiface4_expire 4 46226 NULL
+xen_setup_msi_irqs_46245 xen_setup_msi_irqs 2 46245 NULL
+mpi_read_raw_data_46248 mpi_read_raw_data 2 46248 NULL
+ReadReg_46277 ReadReg 0 46277 NULL
+sptlrpc_req_get_ctx_46303 sptlrpc_req_get_ctx 0 46303 NULL
+sg_proc_write_dressz_46316 sg_proc_write_dressz 3 46316 NULL
+__hwahc_dev_set_key_46328 __hwahc_dev_set_key 5 46328 NULL nohasharray
+compat_SyS_readv_46328 compat_SyS_readv 3 46328 &__hwahc_dev_set_key_46328
+iwl_dbgfs_chain_noise_read_46355 iwl_dbgfs_chain_noise_read 3 46355 NULL
+smk_write_direct_46363 smk_write_direct 3 46363 NULL
+fuse_file_aio_write_46399 fuse_file_aio_write 4 46399 NULL
+crypto_ablkcipher_reqsize_46411 crypto_ablkcipher_reqsize 0 46411 NULL
+ttm_page_pool_get_pages_46431 ttm_page_pool_get_pages 0-5 46431 NULL
+cfs_power2_roundup_46433 cfs_power2_roundup 0-1 46433 NULL
+cp210x_set_config_46447 cp210x_set_config 4 46447 NULL
+parport_pc_fifo_write_block_46455 parport_pc_fifo_write_block 3 46455 NULL
+il_dbgfs_clear_traffic_stats_write_46458 il_dbgfs_clear_traffic_stats_write 3 46458
NULL
+filldir64_46469 filldir64 3 46469 NULL
+fill_in_write_vector_46498 fill_in_write_vector 0 46498 NULL
+pin_code_reply_46510 pin_code_reply 4 46510 NULL
+mthca_alloc_cq_buf_46512 mthca_alloc_cq_buf 3 46512 NULL
+kmsg_read_46514 kmsg_read 3 46514 NULL nohasharray
+nouveau_drm_ioctl_46514 nouveau_drm_ioctl 2 46514 &kmsg_read_46514
+nl80211_send_rx_assoc_46538 nl80211_send_rx_assoc 4 46538 NULL
+dn_current_mss_46574 dn_current_mss 0 46574 NULL
+serverworks_create_gatt_pages_46582 serverworks_create_gatt_pages 1 46582 NULL
+snd_compr_write_data_46592 snd_compr_write_data 3 46592 NULL
+il3945_stats_flag_46606 il3945_stats_flag 3-0 46606 NULL
+vscnprintf_46617 vscnprintf 0-2 46617 NULL
+__kfifo_out_r_46623 __kfifo_out_r 3-0 46623 NULL
+request_key_async_with_auxdata_46624 request_key_async_with_auxdata 4 46624 NULL
+pci_enable_device_46642 pci_enable_device 0 46642 NULL
+vfs_getxattr_alloc_46649 vfs_getxattr_alloc 0 46649 NULL
+e1000_tx_map_46672 e1000_tx_map 4 46672 NULL
+alloc_data_packet_46698 alloc_data_packet 1 46698 NULL
+__ilog2_u32_46706 __ilog2_u32 0 46706 NULL
+erst_dbg_write_46715 erst_dbg_write 3 46715 NULL
+wl1271_rx_filter_alloc_field_46721 wl1271_rx_filter_alloc_field 5 46721 NULL
+irq_domain_add_simple_46734 irq_domain_add_simple 2 46734 NULL
+read_file_tx99_46741 read_file_tx99 3 46741 NULL
+ext4_count_free_46754 ext4_count_free 2 46754 NULL
+hest_ghes_dev_register_46766 hest_ghes_dev_register 1 46766 NULL
+int_hw_irq_en_46776 int_hw_irq_en 3 46776 NULL
+_xfs_buf_get_pages_46811 _xfs_buf_get_pages 2 46811 NULL
+xfs_iroot_realloc_46826 xfs_iroot_realloc 2 46826 NULL
+readreg_46845 readreg 0 46845 NULL
+spi_async_46857 spi_async 0 46857 NULL
+SyS_move_pages_46863 SyS_move_pages 2 46863 NULL nohasharray
+vsnprintf_46863 vsnprintf 0 46863 &SyS_move_pages_46863
+nvme_alloc_queue_46865 nvme_alloc_queue 3 46865 NULL
+qp_memcpy_from_queue_iov_46874 qp_memcpy_from_queue_iov 5-4 46874 NULL
+lov_iocontrol_46876 lov_iocontrol 3 46876 NULL
+ixgbe_dbg_reg_ops_write_46895 ixgbe_dbg_reg_ops_write 3 46895 NULL
+sk_mem_pages_46896 sk_mem_pages 0-1 46896 NULL
+ieee80211_if_fmt_power_mode_46906 ieee80211_if_fmt_power_mode 3 46906 NULL
+wlcore_alloc_hw_46917 wlcore_alloc_hw 1-3 46917 NULL
+fb_write_46924 fb_write 3 46924 NULL
+__sctp_setsockopt_connectx_46949 __sctp_setsockopt_connectx 3 46949 NULL
+qla4xxx_post_aen_work_46953 qla4xxx_post_aen_work 3 46953 NULL
+SYSC_poll_46965 SYSC_poll 2 46965 NULL
+crypto_tfm_alg_alignmask_46971 crypto_tfm_alg_alignmask 0 46971 NULL
+mgmt_pending_add_46976 mgmt_pending_add 5 46976 NULL
+strlcat_46985 strlcat 3 46985 NULL
+bitmap_file_clear_bit_46990 bitmap_file_clear_bit 2 46990 NULL
+sel_write_bool_46996 sel_write_bool 3 46996 NULL nohasharray
+gfs2_xattr_system_set_46996 gfs2_xattr_system_set 4 46996 &sel_write_bool_46996
+blk_rq_map_kern_47004 blk_rq_map_kern 4 47004 NULL
+cx231xx_init_bulk_47024 cx231xx_init_bulk 3-2-4 47024 NULL
+fs_path_len_47060 fs_path_len 0 47060 NULL
+ext4_xattr_list_entries_47070 ext4_xattr_list_entries 0 47070 NULL
+pipeline_dec_packet_in_read_47076 pipeline_dec_packet_in_read 3 47076 NULL
+scsi_deactivate_tcq_47086 scsi_deactivate_tcq 2 47086 NULL
+iwl_dump_nic_event_log_47089 iwl_dump_nic_event_log 0 47089 NULL
+ptlrpc_lprocfs_threads_max_seq_write_47104 ptlrpc_lprocfs_threads_max_seq_write 3 47104
NULL
+mousedev_read_47123 mousedev_read 3 47123 NULL
+acpi_ut_initialize_buffer_47143 acpi_ut_initialize_buffer 2 47143 NULL nohasharray
+ses_recv_diag_47143 ses_recv_diag 4 47143 &acpi_ut_initialize_buffer_47143
+mxms_headerlen_47161 mxms_headerlen 0 47161 NULL
+rs_sta_dbgfs_rate_scale_data_read_47165 rs_sta_dbgfs_rate_scale_data_read 3 47165 NULL
+rts51x_ms_rw_47171 rts51x_ms_rw 3-4 47171 NULL
+can_set_system_xattr_47182 can_set_system_xattr 4 47182 NULL
+options_write_47243 options_write 3 47243 NULL
+portcntrs_1_read_47253 portcntrs_1_read 3 47253 NULL
+ablkcipher_next_slow_47274 ablkcipher_next_slow 4-3 47274 NULL
+gfs2_readpages_47285 gfs2_readpages 4 47285 NULL
+vsnprintf_47291 vsnprintf 0 47291 NULL
+SYSC_semop_47292 SYSC_semop 3 47292 NULL
+tx_internal_desc_overflow_read_47300 tx_internal_desc_overflow_read 3 47300 NULL
+nouveau_fb_create__47316 nouveau_fb_create_ 4 47316 NULL
+ieee80211_if_read_dot11MeshHoldingTimeout_47356 ieee80211_if_read_dot11MeshHoldingTimeout
3 47356 NULL
+avc_get_hash_stats_47359 avc_get_hash_stats 0 47359 NULL
+kvm_arch_create_memslot_47364 kvm_arch_create_memslot 3 47364 NULL nohasharray
+__output_copy_user_47364 __output_copy_user 3 47364 &kvm_arch_create_memslot_47364
+__bio_map_kern_47379 __bio_map_kern 3 47379 NULL
+trace_options_core_read_47390 trace_options_core_read 3 47390 NULL nohasharray
+nv_rd32_47390 nv_rd32 0 47390 &trace_options_core_read_47390
+nametbl_list_47391 nametbl_list 2 47391 NULL
+dgrp_net_write_47392 dgrp_net_write 3 47392 NULL
+pfkey_sendmsg_47394 pfkey_sendmsg 4 47394 NULL
+lbs_wrmac_write_47400 lbs_wrmac_write 3 47400 NULL
+sta_vht_capa_read_47409 sta_vht_capa_read 3 47409 NULL
+crypto_ablkcipher_alignmask_47410 crypto_ablkcipher_alignmask 0 47410 NULL
+lbs_wrrf_write_47418 lbs_wrrf_write 3 47418 NULL
+posix_acl_from_disk_47445 posix_acl_from_disk 2 47445 NULL
+nvme_trans_send_fw_cmd_47479 nvme_trans_send_fw_cmd 4 47479 NULL
+newpart_47485 newpart 6-4 47485 NULL
+mcp23s17_read_regs_47491 mcp23s17_read_regs 4 47491 NULL
+core_sys_select_47494 core_sys_select 1 47494 NULL
+alloc_arraycache_47505 alloc_arraycache 2 47505 NULL
+unlink_simple_47506 unlink_simple 3 47506 NULL
+pstore_decompress_47510 pstore_decompress 0 47510 NULL
+__proc_lnet_portal_rotor_47529 __proc_lnet_portal_rotor 5 47529 NULL
+process_vm_rw_47533 process_vm_rw 3-5 47533 NULL nohasharray
+vscnprintf_47533 vscnprintf 0-2 47533 &process_vm_rw_47533
+einj_check_trigger_header_47534 einj_check_trigger_header 0 47534 NULL
+ieee80211_if_fmt_min_discovery_timeout_47539 ieee80211_if_fmt_min_discovery_timeout
3 47539 NULL
+read_ldt_47570 read_ldt 2 47570 NULL
+isku_sysfs_read_last_set_47572 isku_sysfs_read_last_set 6 47572 NULL
+btrfs_stack_header_bytenr_47589 btrfs_stack_header_bytenr 0 47589 NULL
+ext4_kvzalloc_47605 ext4_kvzalloc 1 47605 NULL
+sctp_ssnmap_new_47608 sctp_ssnmap_new 2-1 47608 NULL
+cache_read_pipefs_47615 cache_read_pipefs 3 47615 NULL
+twl4030_clear_set_47624 twl4030_clear_set 4 47624 NULL
+get_size_47644 get_size 1-2 47644 NULL
+packet_recvmsg_47700 packet_recvmsg 4 47700 NULL nohasharray
+ipath_format_hwmsg_47700 ipath_format_hwmsg 2 47700 &packet_recvmsg_47700
+save_microcode_47717 save_microcode 3 47717 NULL
+bits_to_user_47733 bits_to_user 2-3 47733 NULL
+carl9170_debugfs_read_47738 carl9170_debugfs_read 3 47738 NULL
+ir_prepare_write_buffer_47747 ir_prepare_write_buffer 3 47747 NULL
+mvumi_alloc_mem_resource_47750 mvumi_alloc_mem_resource 3 47750 NULL
+alloc_sched_domains_47756 alloc_sched_domains 1 47756 NULL
+uwb_ie_dump_hex_47774 uwb_ie_dump_hex 4 47774 NULL
+SyS_setgroups16_47780 SyS_setgroups16 1 47780 NULL
+error_error_numll_frame_cts_start_read_47781 error_error_numll_frame_cts_start_read
3 47781 NULL
+posix_acl_fix_xattr_from_user_47793 posix_acl_fix_xattr_from_user 2 47793 NULL
+W6692_empty_Bfifo_47804 W6692_empty_Bfifo 2 47804 NULL
+lov_packmd_47810 lov_packmd 0 47810 NULL
+tree_mod_log_insert_move_47823 tree_mod_log_insert_move 5 47823 NULL
+pinconf_dbg_config_write_47835 pinconf_dbg_config_write 3 47835 NULL
+KEY_SIZE_47855 KEY_SIZE 0 47855 NULL
+vhci_read_47878 vhci_read 3 47878 NULL
+keyctl_instantiate_key_common_47889 keyctl_instantiate_key_common 4 47889 NULL
+cfs_percpt_alloc_47918 cfs_percpt_alloc 2 47918 NULL
+comedi_write_47926 comedi_write 3 47926 NULL
+nvme_trans_get_blk_desc_len_47946 nvme_trans_get_blk_desc_len 0-2 47946 NULL
+gether_get_ifname_47972 gether_get_ifname 3 47972 NULL
+mempool_resize_47983 mempool_resize 2 47983 NULL nohasharray
+iwl_dbgfs_ucode_tracing_read_47983 iwl_dbgfs_ucode_tracing_read 3 47983 &mempool_resize_47983
+dbg_port_buf_47990 dbg_port_buf 2 47990 NULL
+ib_umad_write_47993 ib_umad_write 3 47993 NULL
+lustre_cfg_len_48002 lustre_cfg_len 0 48002 NULL
+gdm_tty_recv_complete_48011 gdm_tty_recv_complete 2 48011 NULL
+ffs_epfile_write_48014 ffs_epfile_write 3 48014 NULL
+bio_integrity_set_tag_48035 bio_integrity_set_tag 3 48035 NULL
+pppoe_sendmsg_48039 pppoe_sendmsg 4 48039 NULL
+SYSC_writev_48040 SYSC_writev 3 48040 NULL
+wpan_phy_alloc_48056 wpan_phy_alloc 1 48056 NULL
+posix_acl_alloc_48063 posix_acl_alloc 1 48063 NULL
+palmas_bulk_write_48068 palmas_bulk_write 2-3-5 48068 NULL
+disc_write_48070 disc_write 3 48070 NULL
+mmc_alloc_host_48097 mmc_alloc_host 1 48097 NULL
+skb_copy_datagram_const_iovec_48102 skb_copy_datagram_const_iovec 4-2-5 48102 NULL
+vmw_framebuffer_surface_dirty_48132 vmw_framebuffer_surface_dirty 6 48132 NULL
+set_discoverable_48141 set_discoverable 4 48141 NULL
+dn_fib_count_nhs_48145 dn_fib_count_nhs 0 48145 NULL
+_add_to_r4w_48152 _add_to_r4w 4 48152 NULL
+isr_dma1_done_read_48159 isr_dma1_done_read 3 48159 NULL
+c4iw_id_table_alloc_48163 c4iw_id_table_alloc 3 48163 NULL
+rbd_obj_method_sync_48170 rbd_obj_method_sync 8 48170 NULL
+alloc_cc770dev_48186 alloc_cc770dev 1 48186 NULL
+brcmf_sdio_chip_cm3_exitdl_48192 brcmf_sdio_chip_cm3_exitdl 4 48192 NULL
+cfg80211_process_deauth_48200 cfg80211_process_deauth 3 48200 NULL
+ext4_index_trans_blocks_48205 ext4_index_trans_blocks 0-2 48205 NULL
+snd_seq_dump_var_event_48209 snd_seq_dump_var_event 0 48209 NULL
+ll_direct_IO_26_48216 ll_direct_IO_26 4 48216 NULL
+uv_blade_nr_possible_cpus_48226 uv_blade_nr_possible_cpus 0 48226 NULL
+nilfs_readpages_48229 nilfs_readpages 4 48229 NULL
+read_file_recv_48232 read_file_recv 3 48232 NULL
+unaccount_shadowed_48233 unaccount_shadowed 2 48233 NULL
+nouveau_i2c_port_create__48240 nouveau_i2c_port_create_ 7 48240 NULL
+nfsctl_transaction_read_48250 nfsctl_transaction_read 3 48250 NULL
+batadv_socket_read_48257 batadv_socket_read 3 48257 NULL
+cache_write_pipefs_48270 cache_write_pipefs 3 48270 NULL
+trace_options_write_48275 trace_options_write 3 48275 NULL
+send_set_info_48288 send_set_info 7 48288 NULL
+lpfc_idiag_extacc_read_48301 lpfc_idiag_extacc_read 3 48301 NULL
+timblogiw_read_48305 timblogiw_read 3 48305 NULL
+hash_setkey_48310 hash_setkey 3 48310 NULL
+audio_set_intf_req_48319 audio_set_intf_req 0 48319 NULL
+kvm_mmu_pte_write_48340 kvm_mmu_pte_write 2 48340 NULL
+skb_add_data_48363 skb_add_data 3 48363 NULL
+tx_frag_init_called_read_48377 tx_frag_init_called_read 3 48377 NULL
+lbs_debugfs_write_48413 lbs_debugfs_write 3 48413 NULL
+uhid_event_from_user_48417 uhid_event_from_user 2 48417 NULL
+div64_u64_rem_48418 div64_u64_rem 0-1-2 48418 NULL
+pwr_tx_without_ps_read_48423 pwr_tx_without_ps_read 3 48423 NULL
+print_filtered_48442 print_filtered 2-0 48442 NULL
+tun_recvmsg_48463 tun_recvmsg 4 48463 NULL
+compat_SyS_preadv64_48469 compat_SyS_preadv64 3 48469 NULL
+ipath_format_hwerrors_48487 ipath_format_hwerrors 5 48487 NULL
+r8712_usbctrl_vendorreq_48489 r8712_usbctrl_vendorreq 6 48489 NULL
+ocfs2_refcount_cow_48495 ocfs2_refcount_cow 3 48495 NULL
+send_control_msg_48498 send_control_msg 6 48498 NULL
+count_masked_bytes_48507 count_masked_bytes 0-1 48507 NULL
+diva_os_copy_to_user_48508 diva_os_copy_to_user 4 48508 NULL
+brcmf_sdio_trap_info_48510 brcmf_sdio_trap_info 4 48510 NULL
+phantom_get_free_48514 phantom_get_free 0 48514 NULL
+drbd_bm_capacity_48530 drbd_bm_capacity 0 48530 NULL
+raid10_size_48571 raid10_size 0-2-3 48571 NULL
+llog_data_len_48607 llog_data_len 1 48607 NULL
+do_ip_vs_set_ctl_48641 do_ip_vs_set_ctl 4 48641 NULL
+ll_rw_extents_stats_pp_seq_write_48651 ll_rw_extents_stats_pp_seq_write 3 48651 NULL
+mtd_read_48655 mtd_read 0 48655 NULL
+aes_encrypt_packets_read_48666 aes_encrypt_packets_read 3 48666 NULL
+sm501_create_subdev_48668 sm501_create_subdev 3-4 48668 NULL
+hysdn_log_write_48694 hysdn_log_write 3 48694 NULL
+altera_drscan_48698 altera_drscan 2 48698 NULL
+kvm_set_irq_routing_48704 kvm_set_irq_routing 3 48704 NULL
+recv_msg_48709 recv_msg 4 48709 NULL
+lpfc_idiag_drbacc_write_48712 lpfc_idiag_drbacc_write 3 48712 NULL
+SyS_lgetxattr_48719 SyS_lgetxattr 4 48719 NULL
+ath6kl_usb_bmi_read_48745 ath6kl_usb_bmi_read 3 48745 NULL
+ath6kl_regwrite_read_48747 ath6kl_regwrite_read 3 48747 NULL
+l2cap_segment_sdu_48772 l2cap_segment_sdu 4 48772 NULL
+gfs2_direct_IO_48774 gfs2_direct_IO 4 48774 NULL
+il3945_sta_dbgfs_stats_table_read_48802 il3945_sta_dbgfs_stats_table_read 3 48802 NULL
+twa_change_queue_depth_48808 twa_change_queue_depth 2 48808 NULL
+atomic_counters_read_48827 atomic_counters_read 3 48827 NULL
+azx_get_position_48841 azx_get_position 0 48841 NULL
+vc_do_resize_48842 vc_do_resize 3-4 48842 NULL
+comedi_buf_write_alloc_48846 comedi_buf_write_alloc 0-2 48846 NULL
+suspend_dtim_interval_write_48854 suspend_dtim_interval_write 3 48854 NULL
+sptlrpc_cli_alloc_reqbuf_48855 sptlrpc_cli_alloc_reqbuf 0 48855 NULL
+C_SYSC_pwritev64_48864 C_SYSC_pwritev64 3 48864 NULL nohasharray
+viafb_dvp1_proc_write_48864 viafb_dvp1_proc_write 3 48864 &C_SYSC_pwritev64_48864
+__ffs_ep0_read_events_48868 __ffs_ep0_read_events 3 48868 NULL
+crypto_cipher_ctxsize_48890 crypto_cipher_ctxsize 0 48890 NULL
+joydev_handle_JSIOCSAXMAP_48898 joydev_handle_JSIOCSAXMAP 3 48898 NULL
+xdi_copy_to_user_48900 xdi_copy_to_user 4 48900 NULL
+msg_hdr_sz_48908 msg_hdr_sz 0 48908 NULL
+sep_crypto_dma_48937 sep_crypto_dma 0 48937 NULL
+si5351_write_parameters_48940 si5351_write_parameters 2 48940 NULL
+event_heart_beat_read_48961 event_heart_beat_read 3 48961 NULL
+nand_ecc_test_run_48966 nand_ecc_test_run 1 48966 NULL
+vmci_handle_arr_create_48971 vmci_handle_arr_create 1 48971 NULL
+rds_rm_size_48996 rds_rm_size 0-2 48996 NULL
+sel_write_enforce_48998 sel_write_enforce 3 48998 NULL
+null_alloc_rs_49019 null_alloc_rs 2 49019 NULL
+filemap_check_errors_49022 filemap_check_errors 0 49022 NULL
+aic_inb_49023 aic_inb 0 49023 NULL
+transient_status_49027 transient_status 4 49027 NULL
+iwl_mvm_power_legacy_dbgfs_read_49038 iwl_mvm_power_legacy_dbgfs_read 4 49038 NULL
+aic7xxx_rem_scb_from_disc_list_49041 aic7xxx_rem_scb_from_disc_list 0 49041 NULL
+scsi_register_49094 scsi_register 2 49094 NULL
+compat_do_readv_writev_49102 compat_do_readv_writev 4 49102 NULL
+xfrm_replay_state_esn_len_49119 xfrm_replay_state_esn_len 0 49119 NULL
+ll_max_cached_mb_seq_write_49122 ll_max_cached_mb_seq_write 3 49122 NULL
+pt_read_49136 pt_read 3 49136 NULL
+ipwireless_tty_received_49154 ipwireless_tty_received 3 49154 NULL
+f2fs_acl_count_49155 f2fs_acl_count 0-1 49155 NULL
+ipw_queue_tx_init_49161 ipw_queue_tx_init 3 49161 NULL
+__jfs_setxattr_49175 __jfs_setxattr 5 49175 NULL
+ath6kl_bgscan_int_write_49178 ath6kl_bgscan_int_write 3 49178 NULL
+dvb_dvr_ioctl_49182 dvb_dvr_ioctl 2 49182 NULL
+print_queue_49191 print_queue 4-0 49191 NULL
+root_nfs_cat_49192 root_nfs_cat 3 49192 NULL
+iwl_dbgfs_ucode_general_stats_read_49199 iwl_dbgfs_ucode_general_stats_read 3 49199
NULL
+il4965_rs_sta_dbgfs_stats_table_read_49206 il4965_rs_sta_dbgfs_stats_table_read 3 49206
NULL
+do_jffs2_getxattr_49210 do_jffs2_getxattr 0 49210 NULL
+nouveau_therm_create__49228 nouveau_therm_create_ 4 49228 NULL
+hugetlb_cgroup_read_49259 hugetlb_cgroup_read 5 49259 NULL
+ieee80211_if_read_rssi_threshold_49260 ieee80211_if_read_rssi_threshold 3 49260 NULL
+isku_sysfs_read_keys_media_49268 isku_sysfs_read_keys_media 6 49268 NULL
+ptlrpc_check_set_49277 ptlrpc_check_set 0 49277 NULL
+rx_filter_beacon_filter_read_49279 rx_filter_beacon_filter_read 3 49279 NULL
+viafb_dfph_proc_write_49288 viafb_dfph_proc_write 3 49288 NULL
+uio_read_49300 uio_read 3 49300 NULL
+isku_sysfs_read_keys_macro_49312 isku_sysfs_read_keys_macro 6 49312 NULL
+SYSC_mincore_49319 SYSC_mincore 2-1 49319 NULL
+fwtty_port_handler_49327 fwtty_port_handler 9 49327 NULL
+srpt_alloc_ioctx_ring_49330 srpt_alloc_ioctx_ring 2-4-3 49330 NULL
+joydev_ioctl_common_49359 joydev_ioctl_common 2 49359 NULL
+iscsi_alloc_session_49390 iscsi_alloc_session 3 49390 NULL
+ext4_ext_index_trans_blocks_49396 ext4_ext_index_trans_blocks 0 49396 NULL
+rx_streaming_always_read_49401 rx_streaming_always_read 3 49401 NULL
+tnode_alloc_49407 tnode_alloc 1 49407 NULL
+samples_to_bytes_49426 samples_to_bytes 0-2 49426 NULL
+compat_do_msg_fill_49440 compat_do_msg_fill 3 49440 NULL
+__hfsplus_getxattr_49460 __hfsplus_getxattr 0 49460 NULL
+agp_3_5_isochronous_node_enable_49465 agp_3_5_isochronous_node_enable 3 49465 NULL
+xfs_iformat_local_49472 xfs_iformat_local 4 49472 NULL
+isr_decrypt_done_read_49490 isr_decrypt_done_read 3 49490 NULL
+iwl_dbgfs_disable_power_off_read_49517 iwl_dbgfs_disable_power_off_read 3 49517 NULL
+SyS_listxattr_49519 SyS_listxattr 3 49519 NULL
+emulator_write_phys_49520 emulator_write_phys 2-4 49520 NULL
+smk_write_access_49561 smk_write_access 3 49561 NULL
+alloc_chunk_49575 alloc_chunk 1 49575 NULL
+sctp_setsockopt_default_send_param_49578 sctp_setsockopt_default_send_param 3 49578
NULL
+ptlrpc_request_pack_49581 ptlrpc_request_pack 0 49581 NULL
+readfifo_49583 readfifo 1 49583 NULL
+tap_write_49595 tap_write 3 49595 NULL
+isr_wakeups_read_49607 isr_wakeups_read 3 49607 NULL
+btrfs_mksubvol_49616 btrfs_mksubvol 3 49616 NULL
+heap_init_49617 heap_init 2 49617 NULL
+smk_write_doi_49621 smk_write_doi 3 49621 NULL
+port_fops_read_49626 port_fops_read 3 49626 NULL
+btrfsic_cmp_log_and_dev_bytenr_49628 btrfsic_cmp_log_and_dev_bytenr 2 49628 NULL
+aa_simple_write_to_buffer_49683 aa_simple_write_to_buffer 4-3 49683 NULL
+SyS_pwritev_49688 SyS_pwritev 3 49688 NULL
+__copy_from_user_nocheck_49699 __copy_from_user_nocheck 0-3 49699 NULL
+cx2341x_ctrl_new_menu_49700 cx2341x_ctrl_new_menu 3 49700 NULL
+write_pool_49718 write_pool 3 49718 NULL
+kvm_mmu_notifier_invalidate_page_49723 kvm_mmu_notifier_invalidate_page 3 49723 NULL
+sep_create_dcb_dmatables_context_kernel_49728 sep_create_dcb_dmatables_context_kernel
6 49728 NULL
+zd_usb_iowrite16v_49744 zd_usb_iowrite16v 3 49744 NULL
+btrfs_chunk_num_stripes_49751 btrfs_chunk_num_stripes 0 49751 NULL
+fuse_wr_pages_49753 fuse_wr_pages 0-1-2 49753 NULL
+key_conf_keylen_read_49758 key_conf_keylen_read 3 49758 NULL
+fuse_conn_waiting_read_49762 fuse_conn_waiting_read 3 49762 NULL
+w83977af_fir_interrupt_49775 w83977af_fir_interrupt 0 49775 NULL
+ceph_osdc_readpages_49789 ceph_osdc_readpages 0 49789 NULL
+nfs4_acl_new_49806 nfs4_acl_new 1 49806 NULL
+ntfs_copy_from_user_iovec_49829 ntfs_copy_from_user_iovec 3-6-0 49829 NULL
+add_uuid_49831 add_uuid 4 49831 NULL
+iraw_loop_49842 iraw_loop 0-1 49842 NULL
+twl4030_write_49846 twl4030_write 2 49846 NULL
+scsi_dispatch_cmd_entry_49848 scsi_dispatch_cmd_entry 3 49848 NULL
+timeradd_entry_49850 timeradd_entry 3 49850 NULL
+fiemap_count_to_size_49869 fiemap_count_to_size 0-1 49869 NULL
+sctp_setsockopt_bindx_49870 sctp_setsockopt_bindx 3 49870 NULL
+ceph_get_caps_49890 ceph_get_caps 0 49890 NULL
+osc_brw_49896 osc_brw 4 49896 NULL
+config_ep_by_speed_49939 config_ep_by_speed 0 49939 NULL
+ieee80211_if_fmt_dtim_count_49987 ieee80211_if_fmt_dtim_count 3 49987 NULL
+drm_buffer_copy_from_user_49990 drm_buffer_copy_from_user 3 49990 NULL
+l2cap_chan_send_49995 l2cap_chan_send 3 49995 NULL
+dn_mss_from_pmtu_50011 dn_mss_from_pmtu 0-2 50011 NULL
+isdn_read_50021 isdn_read 3 50021 NULL
+mdc_rename_pack_50023 mdc_rename_pack 4-6 50023 NULL
+ioread8_50049 ioread8 0 50049 NULL
+fuse_conn_max_background_write_50061 fuse_conn_max_background_write 3 50061 NULL
+__kfifo_dma_in_prepare_50081 __kfifo_dma_in_prepare 4 50081 NULL
+dev_set_alias_50084 dev_set_alias 3 50084 NULL
+libcfs_ioctl_popdata_50087 libcfs_ioctl_popdata 3 50087 NULL
+sock_setsockopt_50088 sock_setsockopt 5 50088 NULL
+altera_swap_dr_50090 altera_swap_dr 2 50090 NULL
+android_set_cntry_50100 android_set_cntry 0 50100 NULL
+read_file_slot_50111 read_file_slot 3 50111 NULL
+rx_streaming_interval_write_50120 rx_streaming_interval_write 3 50120 NULL
+jfs_direct_IO_50125 jfs_direct_IO 4 50125 NULL
+SYSC_preadv_50134 SYSC_preadv 3 50134 NULL
+copy_items_50140 copy_items 6 50140 NULL
+tx_frag_need_fragmentation_read_50153 tx_frag_need_fragmentation_read 3 50153 NULL
+kmalloc_node_50163 kmalloc_node 1 50163 NULL
+rx_filter_ibss_filter_read_50167 rx_filter_ibss_filter_read 3 50167 NULL
+ahd_probe_stack_size_50168 ahd_probe_stack_size 0 50168 NULL
+odev_update_50169 odev_update 2 50169 NULL
+ubi_resize_volume_50172 ubi_resize_volume 2 50172 NULL nohasharray
+ieee80211_if_fmt_dot11MeshHWMPRannInterval_50172 ieee80211_if_fmt_dot11MeshHWMPRannInterval
3 50172 &ubi_resize_volume_50172
+cfg80211_roamed_bss_50198 cfg80211_roamed_bss 4-6 50198 NULL
+cyttsp4_probe_50201 cyttsp4_probe 4 50201 NULL
+rx_rx_timeout_wa_read_50204 rx_rx_timeout_wa_read 3 50204 NULL
+mthca_buddy_init_50206 mthca_buddy_init 2 50206 NULL
+l2cap_sock_setsockopt_50207 l2cap_sock_setsockopt 5 50207 NULL
+mon_bin_compat_ioctl_50234 mon_bin_compat_ioctl 3 50234 NULL
+sg_kmalloc_50240 sg_kmalloc 1 50240 NULL
+rxrpc_setsockopt_50286 rxrpc_setsockopt 5 50286 NULL
+soc_codec_reg_show_50302 soc_codec_reg_show 0-3 50302 NULL
+SYSC_flistxattr_50307 SYSC_flistxattr 3 50307 NULL
+SYSC_sched_setaffinity_50310 SYSC_sched_setaffinity 2 50310 NULL
+soc_camera_read_50319 soc_camera_read 3 50319 NULL
+do_launder_page_50329 do_launder_page 0 50329 NULL
+nouveau_engine_create__50331 nouveau_engine_create_ 7 50331 NULL
+lpfc_idiag_pcicfg_read_50334 lpfc_idiag_pcicfg_read 3 50334 NULL
+snd_pcm_lib_writev_50337 snd_pcm_lib_writev 0-3 50337 NULL
+tpm_read_50344 tpm_read 3 50344 NULL
+isdn_ppp_read_50356 isdn_ppp_read 4 50356 NULL
+iwl_dbgfs_echo_test_write_50362 iwl_dbgfs_echo_test_write 3 50362 NULL
+xfrm_send_migrate_50365 xfrm_send_migrate 5 50365 NULL
+roccat_common2_receive_50369 roccat_common2_receive 4 50369 NULL
+sl_alloc_bufs_50380 sl_alloc_bufs 2 50380 NULL
+l2tp_ip_sendmsg_50411 l2tp_ip_sendmsg 4 50411 NULL
+iscsi_create_conn_50425 iscsi_create_conn 2 50425 NULL
+validate_acl_mac_addrs_50429 validate_acl_mac_addrs 0 50429 NULL
+btrfs_error_discard_extent_50444 btrfs_error_discard_extent 2 50444 NULL
+pgctrl_write_50453 pgctrl_write 3 50453 NULL
+device_create_sys_dev_entry_50458 device_create_sys_dev_entry 0 50458 NULL
+cfs_size_round_50472 cfs_size_round 0-1 50472 NULL
+cdrom_read_cdda_50478 cdrom_read_cdda 4 50478 NULL
+mei_io_cb_alloc_req_buf_50493 mei_io_cb_alloc_req_buf 2 50493 NULL
+pwr_rcvd_awake_beacons_read_50505 pwr_rcvd_awake_beacons_read 3 50505 NULL
+ath6kl_set_ap_probe_resp_ies_50539 ath6kl_set_ap_probe_resp_ies 3 50539 NULL
+usbat_flash_write_data_50553 usbat_flash_write_data 4 50553 NULL
+fat_readpages_50582 fat_readpages 4 50582 NULL
+iwl_dbgfs_missed_beacon_read_50584 iwl_dbgfs_missed_beacon_read 3 50584 NULL
+xillybus_write_50605 xillybus_write 3 50605 NULL
+rx_rx_checksum_result_read_50617 rx_rx_checksum_result_read 3 50617 NULL
+sparse_early_usemaps_alloc_node_50623 sparse_early_usemaps_alloc_node 4 50623 NULL
+simple_transaction_get_50633 simple_transaction_get 3 50633 NULL
+ath6kl_tm_rx_event_50664 ath6kl_tm_rx_event 3 50664 NULL
+bnad_debugfs_read_50665 bnad_debugfs_read 3 50665 NULL
+prism2_read_fid_reg_50689 prism2_read_fid_reg 0 50689 NULL
+xfs_growfs_get_hdr_buf_50697 xfs_growfs_get_hdr_buf 3 50697 NULL
+dev_mem_read_50706 dev_mem_read 3 50706 NULL
+blk_check_plugged_50736 blk_check_plugged 3 50736 NULL
+__ext3_get_inode_loc_50744 __ext3_get_inode_loc 0 50744 NULL
+ocfs2_xattr_block_get_50773 ocfs2_xattr_block_get 0 50773 NULL
+tm6000_read_write_usb_50774 tm6000_read_write_usb 7 50774 NULL
+bio_alloc_map_data_50782 bio_alloc_map_data 1-2 50782 NULL
+tpm_write_50798 tpm_write 3 50798 NULL
+write_flush_50803 write_flush 3 50803 NULL
+dvb_play_50814 dvb_play 3 50814 NULL
+dpcm_show_state_50827 dpcm_show_state 0 50827 NULL
+SetArea_50835 SetArea 4 50835 NULL
+videobuf_dma_init_user_50839 videobuf_dma_init_user 4-3 50839 NULL
+carl9170_debugfs_write_50857 carl9170_debugfs_write 3 50857 NULL
+SyS_lgetxattr_50889 SyS_lgetxattr 4 50889 NULL
+netlbl_secattr_catmap_walk_rng_50894 netlbl_secattr_catmap_walk_rng 0-2 50894 NULL
+__bdev_writeseg_50903 __bdev_writeseg 4 50903 NULL
+xfs_iext_remove_50909 xfs_iext_remove 3 50909 NULL
+blk_rq_cur_sectors_50910 blk_rq_cur_sectors 0 50910 NULL
+hash_recvmsg_50924 hash_recvmsg 4 50924 NULL
+chd_dec_fetch_cdata_50926 chd_dec_fetch_cdata 3 50926 NULL
+show_device_status_50947 show_device_status 0 50947 NULL
+irq_timeout_write_50950 irq_timeout_write 3 50950 NULL
+virtio_cread16_50951 virtio_cread16 0 50951 NULL
+sdio_uart_write_50954 sdio_uart_write 3 50954 NULL
+SyS_setxattr_50957 SyS_setxattr 4 50957 NULL
+iwl_statistics_flag_50981 iwl_statistics_flag 3-0 50981 NULL
+timeout_write_50991 timeout_write 3 50991 NULL
+proc_write_51003 proc_write 3 51003 NULL
+jbd2_journal_extend_51012 jbd2_journal_extend 2 51012 NULL
+lbs_dev_info_51023 lbs_dev_info 3 51023 NULL
+fuse_conn_congestion_threshold_read_51028 fuse_conn_congestion_threshold_read 3 51028
NULL
+BcmGetSectionValEndOffset_51039 BcmGetSectionValEndOffset 0 51039 NULL
+dump_midi_51040 dump_midi 3 51040 NULL
+usb_get_descriptor_51041 usb_get_descriptor 0 51041 NULL
+srpt_alloc_ioctx_51042 srpt_alloc_ioctx 2-3 51042 NULL
+do_arpt_set_ctl_51053 do_arpt_set_ctl 4 51053 NULL
+wusb_prf_64_51065 wusb_prf_64 7 51065 NULL
+jbd2_journal_init_revoke_51088 jbd2_journal_init_revoke 2 51088 NULL
+__ocfs2_find_path_51096 __ocfs2_find_path 0 51096 NULL
+ti_recv_51110 ti_recv 3 51110 NULL
+uasp_prepare_r_request_51124 uasp_prepare_r_request 0 51124 NULL
+nfs_map_name_to_uid_51132 nfs_map_name_to_uid 3 51132 NULL
+alloc_rtllib_51136 alloc_rtllib 1 51136 NULL
+simple_xattr_set_51140 simple_xattr_set 4 51140 NULL
+xfs_trans_get_efd_51148 xfs_trans_get_efd 3 51148 NULL
+nf_ct_ext_create_51232 nf_ct_ext_create 3 51232 NULL
+snd_pcm_write_51235 snd_pcm_write 3 51235 NULL
+drm_property_create_51239 drm_property_create 4 51239 NULL
+__mxt_read_reg_51249 __mxt_read_reg 0 51249 NULL
+st_read_51251 st_read 3 51251 NULL
+compat_dccp_setsockopt_51263 compat_dccp_setsockopt 5 51263 NULL
+target_alloc_sgl_51264 target_alloc_sgl 3 51264 NULL
+dvb_audio_write_51275 dvb_audio_write 3 51275 NULL
+ipwireless_network_packet_received_51277 ipwireless_network_packet_received 4 51277
NULL
+pvr2_std_id_to_str_51288 pvr2_std_id_to_str 2 51288 NULL
+bnad_debugfs_read_regrd_51308 bnad_debugfs_read_regrd 3 51308 NULL
+init_map_ipmac_51317 init_map_ipmac 5 51317 NULL
+alloc_hippi_dev_51320 alloc_hippi_dev 1 51320 NULL
+ext2_xattr_get_51327 ext2_xattr_get 0 51327 NULL
+alloc_smp_req_51337 alloc_smp_req 1 51337 NULL
+ipw_get_event_log_len_51341 ipw_get_event_log_len 0 51341 NULL
+ieee80211_if_fmt_estab_plinks_51370 ieee80211_if_fmt_estab_plinks 3 51370 NULL
+radeon_kms_compat_ioctl_51371 radeon_kms_compat_ioctl 2 51371 NULL
+ceph_sync_read_51410 ceph_sync_read 3-0 51410 NULL
+blk_register_region_51424 blk_register_region 1-2 51424 NULL
+mwifiex_rdeeprom_read_51429 mwifiex_rdeeprom_read 3 51429 NULL
+hfsplus_brec_read_51436 hfsplus_brec_read 0 51436 NULL
+ieee80211_if_read_dot11MeshHWMPRootMode_51441 ieee80211_if_read_dot11MeshHWMPRootMode
3 51441 NULL
+print_devstats_dot11ACKFailureCount_51443 print_devstats_dot11ACKFailureCount 3 51443
NULL
+____alloc_ei_netdev_51475 ____alloc_ei_netdev 1 51475 NULL
+xfs_buf_get_uncached_51477 xfs_buf_get_uncached 2 51477 NULL
+kvm_fetch_guest_virt_51493 kvm_fetch_guest_virt 4-2 51493 NULL
+ieee80211_if_write_uapsd_queues_51526 ieee80211_if_write_uapsd_queues 3 51526 NULL
+__alloc_eip_netdev_51549 __alloc_eip_netdev 1 51549 NULL
+batadv_tt_prepare_tvlv_local_data_51568 batadv_tt_prepare_tvlv_local_data 0 51568 NULL
+ixgb_get_eeprom_len_51586 ixgb_get_eeprom_len 0 51586 NULL
+aac_convert_sgraw2_51598 aac_convert_sgraw2 4 51598 NULL
+table_size_to_number_of_entries_51613 table_size_to_number_of_entries 0-1 51613 NULL
+extent_fiemap_51621 extent_fiemap 3 51621 NULL
+sctp_auth_create_key_51641 sctp_auth_create_key 1 51641 NULL
+iscsi_create_session_51647 iscsi_create_session 3 51647 NULL
+ps_upsd_utilization_read_51669 ps_upsd_utilization_read 3 51669 NULL
+sctp_setsockopt_associnfo_51684 sctp_setsockopt_associnfo 3 51684 NULL
+host_mapping_level_51696 host_mapping_level 0 51696 NULL
+sel_write_access_51704 sel_write_access 3 51704 NULL
+tty_cdev_add_51714 tty_cdev_add 2-4 51714 NULL
+v9fs_alloc_rdir_buf_51716 v9fs_alloc_rdir_buf 2 51716 NULL
+drm_compat_ioctl_51717 drm_compat_ioctl 2 51717 NULL
+sg_read_oxfer_51724 sg_read_oxfer 3 51724 NULL
+cm4040_read_51732 cm4040_read 3 51732 NULL
+get_user_pages_fast_51751 get_user_pages_fast 0 51751 NULL
+ifx_spi_insert_flip_string_51752 ifx_spi_insert_flip_string 3 51752 NULL
+if_write_51756 if_write 3 51756 NULL
+qib_alloc_devdata_51819 qib_alloc_devdata 2 51819 NULL
+buffer_from_user_51826 buffer_from_user 3 51826 NULL
+ioread32_51847 ioread32 0 51847 NULL nohasharray
+read_file_tgt_tx_stats_51847 read_file_tgt_tx_stats 3 51847 &ioread32_51847
+do_readv_writev_51849 do_readv_writev 4 51849 NULL
+SYSC_sendto_51852 SYSC_sendto 6 51852 NULL
+bm_page_io_async_51858 bm_page_io_async 2 51858 NULL
+pointer_size_read_51863 pointer_size_read 3 51863 NULL
+get_indirect_ea_51869 get_indirect_ea 4 51869 NULL
+user_read_51881 user_read 3 51881 NULL
+dbAdjCtl_51888 dbAdjCtl 0 51888 NULL
+SyS_mq_timedsend_51896 SyS_mq_timedsend 3 51896 NULL
+wmi_set_ie_51919 wmi_set_ie 3 51919 NULL
+dbg_status_buf_51930 dbg_status_buf 2 51930 NULL
+__tcp_mtu_to_mss_51938 __tcp_mtu_to_mss 0-2 51938 NULL
+xfrm_alg_len_51940 xfrm_alg_len 0 51940 NULL
+scsi_get_vpd_page_51951 scsi_get_vpd_page 4 51951 NULL
+snd_mask_min_51969 snd_mask_min 0 51969 NULL
+__blkdev_get_51972 __blkdev_get 0 51972 NULL
+get_zone_51981 get_zone 0-1 51981 NULL
+ath6kl_sdio_alloc_prep_scat_req_51986 ath6kl_sdio_alloc_prep_scat_req 2 51986 NULL
+_c4iw_write_mem_dma_51991 _c4iw_write_mem_dma 3 51991 NULL
+dwc3_mode_write_51997 dwc3_mode_write 3 51997 NULL
+skb_copy_datagram_from_iovec_52014 skb_copy_datagram_from_iovec 4-2-5 52014 NULL
+rdmalt_52022 rdmalt 0 52022 NULL
+override_release_52032 override_release 2 52032 NULL
+end_port_52042 end_port 0 52042 NULL
+dma_rx_errors_read_52045 dma_rx_errors_read 3 52045 NULL
+msnd_fifo_write_52052 msnd_fifo_write 0-3 52052 NULL
+dvb_ringbuffer_avail_52057 dvb_ringbuffer_avail 0 52057 NULL
+__fuse_request_alloc_52060 __fuse_request_alloc 1 52060 NULL
+isofs_readpages_52067 isofs_readpages 4 52067 NULL
+nsm_get_handle_52089 nsm_get_handle 4 52089 NULL
+o2net_debug_read_52105 o2net_debug_read 3 52105 NULL
+split_scan_timeout_write_52128 split_scan_timeout_write 3 52128 NULL
+retry_count_read_52129 retry_count_read 3 52129 NULL
+gdm_usb_hci_send_52138 gdm_usb_hci_send 3 52138 NULL
+sub_alloc_52140 sub_alloc 0 52140 NULL
+hysdn_conf_write_52145 hysdn_conf_write 3 52145 NULL
+htable_size_52148 htable_size 0-1 52148 NULL
+smk_write_load2_52155 smk_write_load2 3 52155 NULL
+ieee80211_if_read_dot11MeshRetryTimeout_52168 ieee80211_if_read_dot11MeshRetryTimeout
3 52168 NULL
+mga_compat_ioctl_52170 mga_compat_ioctl 2 52170 NULL
+print_prefix_52176 print_prefix 0 52176 NULL
+proc_pid_readlink_52186 proc_pid_readlink 3 52186 NULL
+vmci_qp_broker_alloc_52216 vmci_qp_broker_alloc 6-5 52216 NULL
+fuse_request_alloc_52243 fuse_request_alloc 1 52243 NULL nohasharray
+xfs_iomap_eof_align_last_fsb_52243 xfs_iomap_eof_align_last_fsb 3 52243 &fuse_request_alloc_52243
+mdiobus_alloc_size_52259 mdiobus_alloc_size 1 52259 NULL
+shrink_slab_52261 shrink_slab 2 52261 NULL
+sisusbcon_do_font_op_52271 sisusbcon_do_font_op 9 52271 NULL
+handle_supp_msgs_52284 handle_supp_msgs 4 52284 NULL
+kobject_set_name_vargs_52309 kobject_set_name_vargs 0 52309 NULL
+read_file_reset_52310 read_file_reset 3 52310 NULL
+request_asymmetric_key_52317 request_asymmetric_key 2-4 52317 NULL
+hwflags_read_52318 hwflags_read 3 52318 NULL
+test_unaligned_bulk_52333 test_unaligned_bulk 3 52333 NULL
+hur_len_52339 hur_len 0 52339 NULL
+bytes_to_frames_52362 bytes_to_frames 0-2 52362 NULL
+copy_entries_to_user_52367 copy_entries_to_user 1 52367 NULL
+iwl_dump_fh_52371 iwl_dump_fh 0 52371 NULL
+hfsplus_find_attr_52374 hfsplus_find_attr 0 52374 NULL
+mq_emit_config_values_52378 mq_emit_config_values 3 52378 NULL
+isdn_writebuf_stub_52383 isdn_writebuf_stub 4 52383 NULL
+jfs_setxattr_52389 jfs_setxattr 4 52389 NULL
+aer_inject_write_52399 aer_inject_write 3 52399 NULL
+cgroup_file_write_52417 cgroup_file_write 3 52417 NULL
+line6_midibuf_init_52425 line6_midibuf_init 2 52425 NULL
+hso_serial_common_create_52428 hso_serial_common_create 4 52428 NULL
+delay_status_52431 delay_status 5 52431 NULL
+ath6kl_delete_qos_write_52435 ath6kl_delete_qos_write 3 52435 NULL
+ieee80211_if_fmt_num_sta_ps_52438 ieee80211_if_fmt_num_sta_ps 3 52438 NULL
+acpi_nvs_for_each_region_52448 acpi_nvs_for_each_region 0 52448 NULL
+alauda_read_data_52452 alauda_read_data 3 52452 NULL
+ieee80211_alloc_txb_52477 ieee80211_alloc_txb 1 52477 NULL
+usb_tranzport_write_52479 usb_tranzport_write 3 52479 NULL
+ocfs2_extend_no_holes_52483 ocfs2_extend_no_holes 3-4 52483 NULL
+fd_do_rw_52495 fd_do_rw 3 52495 NULL
+int_tasklet_entry_52500 int_tasklet_entry 3 52500 NULL
+lmv_get_easize_52504 lmv_get_easize 0 52504 NULL
+pm_qos_power_write_52513 pm_qos_power_write 3 52513 NULL
+bt_sock_stream_recvmsg_52518 bt_sock_stream_recvmsg 4 52518 NULL
+dup_variable_bug_52525 dup_variable_bug 3 52525 NULL
+raw_recvmsg_52529 raw_recvmsg 4 52529 NULL
+dccpprobe_read_52549 dccpprobe_read 3 52549 NULL
+debug_level_proc_write_52572 debug_level_proc_write 3 52572 NULL
+isku_sysfs_read_macro_52587 isku_sysfs_read_macro 6 52587 NULL
+SyS_setsockopt_52610 SyS_setsockopt 5 52610 NULL
+ll_sa_entry_alloc_52611 ll_sa_entry_alloc 4 52611 NULL
+tps80031_writes_52638 tps80031_writes 3-4 52638 NULL
+brcmf_sdio_assert_info_52653 brcmf_sdio_assert_info 4 52653 NULL
+nvme_queue_extra_52661 nvme_queue_extra 0-1 52661 NULL
+SYSC_gethostname_52677 SYSC_gethostname 2 52677 NULL
+nvd0_disp_pioc_create__52693 nvd0_disp_pioc_create_ 5 52693 NULL
+nouveau_client_create__52715 nouveau_client_create_ 5 52715 NULL
+__dm_stat_bio_52722 __dm_stat_bio 3 52722 NULL
+cx25840_ir_rx_read_52724 cx25840_ir_rx_read 3 52724 NULL
+blkcipher_next_slow_52733 blkcipher_next_slow 3-4 52733 NULL
+relay_alloc_page_array_52735 relay_alloc_page_array 1 52735 NULL
+hfcsusb_rx_frame_52745 hfcsusb_rx_frame 3 52745 NULL
+carl9170_debugfs_vif_dump_read_52755 carl9170_debugfs_vif_dump_read 3 52755 NULL
+ieee80211_if_read_beacon_timeout_52756 ieee80211_if_read_beacon_timeout 3 52756 NULL
+nvme_trans_ext_inq_page_52776 nvme_trans_ext_inq_page 3 52776 NULL
+pwr_rcvd_beacons_read_52836 pwr_rcvd_beacons_read 3 52836 NULL
+ext2_xattr_set_acl_52857 ext2_xattr_set_acl 4 52857 NULL
+mon_bin_get_event_52863 mon_bin_get_event 4-6 52863 NULL
+twl6030_gpadc_write_52867 twl6030_gpadc_write 1 52867 NULL
+qib_decode_6120_err_52876 qib_decode_6120_err 3 52876 NULL
+twlreg_write_52880 twlreg_write 3 52880 NULL
+pvr2_ctrl_value_to_sym_internal_52881 pvr2_ctrl_value_to_sym_internal 5 52881 NULL
+cache_read_procfs_52882 cache_read_procfs 3 52882 NULL
+kvm_kvzalloc_52894 kvm_kvzalloc 1 52894 NULL
+dio_bio_reap_52913 dio_bio_reap 0 52913 NULL
+__kfifo_out_peek_r_52919 __kfifo_out_peek_r 3 52919 NULL
+iblock_get_bio_52936 iblock_get_bio 3 52936 NULL
+__nodes_remap_52951 __nodes_remap 5 52951 NULL
+send_packet_52960 send_packet 4 52960 NULL
+ieee80211_if_fmt_fwded_mcast_52961 ieee80211_if_fmt_fwded_mcast 3 52961 NULL
+tx_tx_exch_read_52986 tx_tx_exch_read 3 52986 NULL
+num_node_state_52989 num_node_state 0 52989 NULL
+efivarfs_file_write_53000 efivarfs_file_write 3 53000 NULL
+uasp_alloc_stream_res_53015 uasp_alloc_stream_res 0 53015 NULL
+btrfs_free_and_pin_reserved_extent_53016 btrfs_free_and_pin_reserved_extent 2 53016
NULL
+tx_tx_exch_pending_read_53018 tx_tx_exch_pending_read 3 53018 NULL
+bio_cur_bytes_53037 bio_cur_bytes 0 53037 NULL
+regcache_lzo_block_count_53056 regcache_lzo_block_count 0 53056 NULL
+cfi_read_query_53066 cfi_read_query 0 53066 NULL
+iwl_dbgfs_interrupt_write_53069 iwl_dbgfs_interrupt_write 3 53069 NULL
+mwifiex_debug_read_53074 mwifiex_debug_read 3 53074 NULL
+mic_virtio_copy_from_user_53107 mic_virtio_copy_from_user 3 53107 NULL
+verity_status_53120 verity_status 5 53120 NULL
+brcmf_usb_dl_cmd_53130 brcmf_usb_dl_cmd 4 53130 NULL
+ps_poll_ps_poll_max_ap_turn_read_53140 ps_poll_ps_poll_max_ap_turn_read 3 53140 NULL
+ieee80211_bss_info_update_53170 ieee80211_bss_info_update 4 53170 NULL
+btrfs_io_bio_alloc_53179 btrfs_io_bio_alloc 2 53179 NULL
+clear_capture_buf_53192 clear_capture_buf 2 53192 NULL
+tx_tx_start_data_read_53219 tx_tx_start_data_read 3 53219 NULL
+ptlrpc_lprocfs_req_history_max_seq_write_53243 ptlrpc_lprocfs_req_history_max_seq_write
3 53243 NULL
+hfsplus_xattr_set_posix_acl_53249 hfsplus_xattr_set_posix_acl 4 53249 NULL
+xfs_trans_read_buf_map_53258 xfs_trans_read_buf_map 5 53258 NULL
+wil_write_file_ssid_53266 wil_write_file_ssid 3 53266 NULL
+btrfs_file_extent_num_bytes_53269 btrfs_file_extent_num_bytes 0 53269 NULL
+ftrace_profile_write_53327 ftrace_profile_write 3 53327 NULL
+find_nr_power_limit_53330 find_nr_power_limit 0 53330 NULL
+gsm_control_reply_53333 gsm_control_reply 4 53333 NULL
+vm_mmap_53339 vm_mmap 0 53339 NULL
+read_6120_creg32_53363 read_6120_creg32 0 53363 NULL
+sock_setbindtodevice_53369 sock_setbindtodevice 3 53369 NULL
+get_random_bytes_arch_53370 get_random_bytes_arch 2 53370 NULL
+isr_cmd_cmplt_read_53439 isr_cmd_cmplt_read 3 53439 NULL
+mwifiex_info_read_53447 mwifiex_info_read 3 53447 NULL
+apei_exec_run_optional_53452 apei_exec_run_optional 0 53452 NULL
+paging64_prefetch_gpte_53468 paging64_prefetch_gpte 4 53468 NULL
+ima_write_template_field_data_53475 ima_write_template_field_data 2 53475 NULL
+iowarrior_read_53483 iowarrior_read 3 53483 NULL
+osd_req_write_kern_53486 osd_req_write_kern 5 53486 NULL
+do_verify_xattr_datum_53499 do_verify_xattr_datum 0 53499 NULL
+snd_pcm_format_physical_width_53505 snd_pcm_format_physical_width 0 53505 NULL
+dbAllocNext_53506 dbAllocNext 0 53506 NULL
+ocfs2_xattr_set_acl_53508 ocfs2_xattr_set_acl 4 53508 NULL
+check_acl_53512 check_acl 0 53512 NULL
+nft_data_dump_53549 nft_data_dump 5 53549 NULL
+SYSC_bind_53582 SYSC_bind 3 53582 NULL
+cifs_utf16_bytes_53593 cifs_utf16_bytes 0 53593 NULL
+proc_uid_map_write_53596 proc_uid_map_write 3 53596 NULL
+pfkey_recvmsg_53604 pfkey_recvmsg 4 53604 NULL
+___alloc_bootmem_nopanic_53626 ___alloc_bootmem_nopanic 1 53626 NULL
+xd_write_multiple_pages_53633 xd_write_multiple_pages 6-5 53633 NULL
+ccid_getsockopt_builtin_ccids_53634 ccid_getsockopt_builtin_ccids 2 53634 NULL
+nr_sendmsg_53656 nr_sendmsg 4 53656 NULL
+fuse_fill_write_pages_53682 fuse_fill_write_pages 0-4 53682 NULL
+v4l2_event_subscribe_53687 v4l2_event_subscribe 3 53687 NULL
+bdev_logical_block_size_53690 bdev_logical_block_size 0 53690 NULL nohasharray
+igb_alloc_q_vector_53690 igb_alloc_q_vector 4-6 53690 &bdev_logical_block_size_53690
+find_overflow_devnum_53711 find_overflow_devnum 0 53711 NULL
+bio_integrity_split_53714 bio_integrity_split 3 53714 NULL
+__proc_debug_mb_53732 __proc_debug_mb 5 53732 NULL
+wdm_write_53735 wdm_write 3 53735 NULL
+amdtp_out_stream_get_max_payload_53755 amdtp_out_stream_get_max_payload 0 53755 NULL
nohasharray
+lpfc_idiag_queacc_read_qe_53755 lpfc_idiag_queacc_read_qe 0-2 53755 &amdtp_out_stream_get_max_payload_53755
+ext2_acl_count_53773 ext2_acl_count 0-1 53773 NULL
+__kfifo_dma_in_prepare_r_53792 __kfifo_dma_in_prepare_r 4-5 53792 NULL
+qp_alloc_host_work_53798 qp_alloc_host_work 5-3 53798 NULL
+regmap_raw_write_53803 regmap_raw_write 2-4 53803 NULL
+lpfc_idiag_ctlacc_read_reg_53809 lpfc_idiag_ctlacc_read_reg 0-3 53809 NULL
+nls_nullsize_53815 nls_nullsize 0 53815 NULL
+setup_data_read_53822 setup_data_read 3 53822 NULL
+pms_read_53873 pms_read 3 53873 NULL
+ieee80211_if_fmt_dropped_frames_congestion_53883 ieee80211_if_fmt_dropped_frames_congestion
3 53883 NULL
+SyS_setgroups_53900 SyS_setgroups 1 53900 NULL
+batadv_tt_tvlv_ogm_handler_v1_53909 batadv_tt_tvlv_ogm_handler_v1 5 53909 NULL
+usb_serial_generic_write_53927 usb_serial_generic_write 4 53927 NULL
+ocfs2_make_clusters_writable_53938 ocfs2_make_clusters_writable 0 53938 NULL
+idetape_chrdev_write_53976 idetape_chrdev_write 3 53976 NULL
+__ocfs2_xattr_set_value_outside_53981 __ocfs2_xattr_set_value_outside 5 53981 NULL
+ieee80211_if_fmt_dot11MeshHWMPperrMinInterval_53998 ieee80211_if_fmt_dot11MeshHWMPperrMinInterval
3 53998 NULL
+hfsplus_attr_build_key_54013 hfsplus_attr_build_key 0 54013 NULL
+snd_pcm_lib_write_transfer_54018 snd_pcm_lib_write_transfer 5-2-4 54018 NULL
+mdc_kuc_write_54019 mdc_kuc_write 3 54019 NULL
+ipxrtr_route_packet_54036 ipxrtr_route_packet 4 54036 NULL
+batadv_tt_update_orig_54049 batadv_tt_update_orig 6-4 54049 NULL
+pipeline_dec_packet_out_read_54052 pipeline_dec_packet_out_read 3 54052 NULL
+nl80211_send_disconnected_54056 nl80211_send_disconnected 5 54056 NULL
+rproc_state_read_54057 rproc_state_read 3 54057 NULL
+_malloc_54077 _malloc 1 54077 NULL
+bitmap_bitremap_54096 bitmap_bitremap 4 54096 NULL
+altera_set_ir_pre_54103 altera_set_ir_pre 2 54103 NULL nohasharray
+lustre_posix_acl_xattr_filter_54103 lustre_posix_acl_xattr_filter 2 54103 &altera_set_ir_pre_54103
+__comedi_buf_write_alloc_54112 __comedi_buf_write_alloc 0-2 54112 NULL
+strn_len_54122 strn_len 0 54122 NULL
+isku_receive_54130 isku_receive 4 54130 NULL
+isr_host_acknowledges_read_54136 isr_host_acknowledges_read 3 54136 NULL
+irq_blk_threshold_write_54138 irq_blk_threshold_write 3 54138 NULL
+memcpy_toiovec_54166 memcpy_toiovec 3 54166 NULL
+nouveau_falcon_create__54169 nouveau_falcon_create_ 8 54169 NULL
+p9_client_prepare_req_54175 p9_client_prepare_req 3 54175 NULL
+do_sys_poll_54221 do_sys_poll 2 54221 NULL
+__register_chrdev_54223 __register_chrdev 2-3 54223 NULL
+pi_read_regr_54231 pi_read_regr 0 54231 NULL
+mcp23s08_read_regs_54246 mcp23s08_read_regs 4 54246 NULL
+reada_add_block_54247 reada_add_block 2 54247 NULL
+xfs_dir2_sf_addname_hard_54254 xfs_dir2_sf_addname_hard 3 54254 NULL
+ceph_msgpool_get_54258 ceph_msgpool_get 2 54258 NULL
+audio_write_54261 audio_write 4 54261 NULL nohasharray
+wusb_prf_54261 wusb_prf 7 54261 &audio_write_54261
+mwifiex_getlog_read_54269 mwifiex_getlog_read 3 54269 NULL
+kstrtou16_from_user_54274 kstrtou16_from_user 2 54274 NULL
+tipc_multicast_54285 tipc_multicast 4 54285 NULL
+altera_set_dr_post_54291 altera_set_dr_post 2 54291 NULL
+dlm_alloc_pagevec_54296 dlm_alloc_pagevec 1 54296 NULL
+reclaim_pages_54301 reclaim_pages 3 54301 NULL
+sprintf_54306 sprintf 0 54306 NULL
+bio_add_pc_page_54319 bio_add_pc_page 4 54319 NULL
+br_fdb_fillbuf_54339 br_fdb_fillbuf 0 54339 NULL
+__alloc_dev_table_54343 __alloc_dev_table 2 54343 NULL
+__get_free_pages_54352 __get_free_pages 0 54352 NULL
+tcf_hash_create_54360 tcf_hash_create 4 54360 NULL
+read_file_credit_dist_stats_54367 read_file_credit_dist_stats 3 54367 NULL
+vfs_readlink_54368 vfs_readlink 3 54368 NULL
+do_dccp_setsockopt_54377 do_dccp_setsockopt 5 54377 NULL nohasharray
+intel_sdvo_write_cmd_54377 intel_sdvo_write_cmd 4 54377 &do_dccp_setsockopt_54377
+ah_alloc_tmp_54378 ah_alloc_tmp 3-2 54378 NULL
+snd_pcm_oss_read2_54387 snd_pcm_oss_read2 3-0 54387 NULL
+iwl_dbgfs_power_save_status_read_54392 iwl_dbgfs_power_save_status_read 3 54392 NULL
+ll_ra_count_get_54410 ll_ra_count_get 3 54410 NULL
+copy_gadget_strings_54417 copy_gadget_strings 2-3 54417 NULL
+sparse_early_mem_maps_alloc_node_54485 sparse_early_mem_maps_alloc_node 4 54485 NULL
+simple_strtoull_54493 simple_strtoull 0 54493 NULL
+btrfs_ordered_sum_size_54509 btrfs_ordered_sum_size 0-2 54509 NULL
+cgroup_write_X64_54514 cgroup_write_X64 5 54514 NULL
+rfc4106_set_key_54519 rfc4106_set_key 3 54519 NULL
+vmci_transport_dgram_enqueue_54525 vmci_transport_dgram_enqueue 4 54525 NULL
+viacam_read_54526 viacam_read 3 54526 NULL
+unix_dgram_connect_54535 unix_dgram_connect 3 54535 NULL
+setsockopt_54539 setsockopt 5 54539 NULL
+lbs_lowsnr_write_54549 lbs_lowsnr_write 3 54549 NULL
+SYSC_setsockopt_54561 SYSC_setsockopt 5 54561 NULL
+nfsd_vfs_write_54577 nfsd_vfs_write 6 54577 NULL
+fw_iso_buffer_init_54582 fw_iso_buffer_init 3 54582 NULL
+nvme_npages_54601 nvme_npages 0-1 54601 NULL
+irq_pkt_threshold_write_54605 irq_pkt_threshold_write 3 54605 NULL
+port_fops_write_54627 port_fops_write 3 54627 NULL
+irq_timeout_read_54653 irq_timeout_read 3 54653 NULL
+dns_resolver_read_54658 dns_resolver_read 3 54658 NULL
+twl6030_interrupt_mask_54659 twl6030_interrupt_mask 2 54659 NULL
+tdp_page_fault_54663 tdp_page_fault 2 54663 NULL
+bus_add_device_54665 bus_add_device 0 54665 NULL
+cw1200_queue_stats_init_54670 cw1200_queue_stats_init 2 54670 NULL
+bio_kmalloc_54672 bio_kmalloc 2 54672 NULL
+evm_read_key_54674 evm_read_key 3 54674 NULL
+tipc_link_send_sections_fast_54689 tipc_link_send_sections_fast 3 54689 NULL
+__btrfs_inc_extent_ref_54706 __btrfs_inc_extent_ref 7 54706 NULL
+rfkill_fop_read_54711 rfkill_fop_read 3 54711 NULL
+ocfs2_control_write_54737 ocfs2_control_write 3 54737 NULL
+kzalloc_54740 kzalloc 1 54740 NULL
+wep_iv_read_54744 wep_iv_read 3 54744 NULL
+lpfc_idiag_pcicfg_write_54749 lpfc_idiag_pcicfg_write 3 54749 NULL
+iio_event_chrdev_read_54757 iio_event_chrdev_read 3 54757 NULL
+adis16480_show_firmware_date_54762 adis16480_show_firmware_date 3 54762 NULL
+ldsem_atomic_update_54774 ldsem_atomic_update 1 54774 NULL
+flexcop_device_kmalloc_54793 flexcop_device_kmalloc 1 54793 NULL
+nfsd_write_54809 nfsd_write 6 54809 NULL
+ar9287_dump_modal_eeprom_54814 ar9287_dump_modal_eeprom 3-2 54814 NULL
+crypto_tfm_ctx_alignment_54815 crypto_tfm_ctx_alignment 0 54815 NULL nohasharray
+kvzalloc_54815 kvzalloc 1 54815 &crypto_tfm_ctx_alignment_54815 nohasharray
+aes_decrypt_fail_read_54815 aes_decrypt_fail_read 3 54815 &kvzalloc_54815
+generic_perform_write_54832 generic_perform_write 3 54832 NULL
+write_rio_54837 write_rio 3 54837 NULL
+ext3_acl_from_disk_54839 ext3_acl_from_disk 2 54839 NULL nohasharray
+nouveau_engctx_create__54839 nouveau_engctx_create_ 8 54839 &ext3_acl_from_disk_54839
+ll_layout_conf_54841 ll_layout_conf 0 54841 NULL
+ufx_ops_write_54848 ufx_ops_write 3 54848 NULL
+printer_read_54851 printer_read 3 54851 NULL
+alloc_ep_req_54860 alloc_ep_req 2 54860 NULL
+broadsheet_spiflash_rewrite_sector_54864 broadsheet_spiflash_rewrite_sector 2 54864
NULL
+prism_build_supp_rates_54865 prism_build_supp_rates 0 54865 NULL
+iscsi_pool_init_54913 iscsi_pool_init 2-4 54913 NULL nohasharray
+kobject_set_name_vargs_54913 kobject_set_name_vargs 0 54913 &iscsi_pool_init_54913
+btrfs_stack_chunk_num_stripes_54923 btrfs_stack_chunk_num_stripes 0 54923 NULL
+bio_add_page_54933 bio_add_page 0-3 54933 NULL
+mxms_structlen_54939 mxms_structlen 0 54939 NULL
+add_port_54941 add_port 2 54941 NULL
+ath9k_dump_btcoex_54949 ath9k_dump_btcoex 3-0 54949 NULL
+alauda_write_data_54967 alauda_write_data 3 54967 NULL
+c4_add_card_54968 c4_add_card 3 54968 NULL
+ext3_xattr_get_54989 ext3_xattr_get 0 54989 NULL
+cx231xx_v4l2_read_55014 cx231xx_v4l2_read 3 55014 NULL
+error_error_null_Frame_tx_start_read_55024 error_error_null_Frame_tx_start_read 3 55024
NULL
+dgap_do_bios_load_55025 dgap_do_bios_load 3 55025 NULL
+apei_exec_run_55075 apei_exec_run 0 55075 NULL
+bitmap_storage_alloc_55077 bitmap_storage_alloc 2 55077 NULL
+read_dma_55086 read_dma 3 55086 NULL
+rxpipe_beacon_buffer_thres_host_int_trig_rx_data_read_55106 rxpipe_beacon_buffer_thres_host_int_trig_rx_data_read
3 55106 NULL
+crypto_ahash_setkey_55134 crypto_ahash_setkey 3 55134 NULL
+filldir_55137 filldir 3 55137 NULL
+ocfs2_truncate_file_55148 ocfs2_truncate_file 3 55148 NULL
+npages_to_npools_55149 npages_to_npools 0-1 55149 NULL
+ieee80211_if_read_uapsd_queues_55150 ieee80211_if_read_uapsd_queues 3 55150 NULL
+mtd_get_fact_prot_info_55186 mtd_get_fact_prot_info 0 55186 NULL
+sel_write_relabel_55195 sel_write_relabel 3 55195 NULL
+sched_feat_write_55202 sched_feat_write 3 55202 NULL
+ht40allow_map_read_55209 ht40allow_map_read 3 55209 NULL
+__kfifo_dma_out_prepare_r_55211 __kfifo_dma_out_prepare_r 4-5 55211 NULL
+do_raw_setsockopt_55215 do_raw_setsockopt 5 55215 NULL
+qxl_alloc_client_monitors_config_55216 qxl_alloc_client_monitors_config 2 55216 NULL
+nouveau_mc_create__55217 nouveau_mc_create_ 4 55217 NULL
+dbAllocDmap_55227 dbAllocDmap 0 55227 NULL
+memcpy_fromiovec_55247 memcpy_fromiovec 3 55247 NULL
+lbs_failcount_write_55276 lbs_failcount_write 3 55276 NULL
+persistent_ram_new_55286 persistent_ram_new 2-1 55286 NULL
+rx_streaming_interval_read_55291 rx_streaming_interval_read 3 55291 NULL
+lov_get_stripecnt_55297 lov_get_stripecnt 0-3 55297 NULL
+gsm_control_modem_55303 gsm_control_modem 3 55303 NULL
+wimax_msg_len_55304 wimax_msg_len 0 55304 NULL
+qp_alloc_guest_work_55305 qp_alloc_guest_work 5-3 55305 NULL
+__vxge_hw_vpath_initialize_55328 __vxge_hw_vpath_initialize 2 55328 NULL
+vme_user_read_55338 vme_user_read 3 55338 NULL
+__wa_xfer_setup_sizes_55342 __wa_xfer_setup_sizes 0 55342 NULL nohasharray
+sctp_datamsg_from_user_55342 sctp_datamsg_from_user 4 55342 &__wa_xfer_setup_sizes_55342
+tipc_send2name_55373 tipc_send2name 5 55373 NULL
+cw1200_sdio_align_size_55391 cw1200_sdio_align_size 2 55391 NULL
+iwl_dbgfs_plcp_delta_read_55407 iwl_dbgfs_plcp_delta_read 3 55407 NULL
+sysfs_chmod_file_55408 sysfs_chmod_file 0 55408 NULL
+si476x_radio_read_rds_blckcnt_blob_55427 si476x_radio_read_rds_blckcnt_blob 3 55427
NULL
+sysfs_sd_setattr_55437 sysfs_sd_setattr 0 55437 NULL
+__vxge_hw_channel_allocate_55462 __vxge_hw_channel_allocate 3 55462 NULL
+cx23888_ir_rx_read_55473 cx23888_ir_rx_read 3 55473 NULL
+snd_pcm_lib_write_55483 snd_pcm_lib_write 0-3 55483 NULL
+i2o_pool_alloc_55485 i2o_pool_alloc 4 55485 NULL
+batadv_tt_entries_55487 batadv_tt_entries 0-1 55487 NULL
+ras_stride_increase_window_55501 ras_stride_increase_window 3 55501 NULL
+tx_tx_done_int_template_read_55511 tx_tx_done_int_template_read 3 55511 NULL
+ea_get_55522 ea_get 3-0 55522 NULL
+buffer_size_55534 buffer_size 0 55534 NULL
+set_msr_interception_55538 set_msr_interception 2 55538 NULL
+tty_port_register_device_55543 tty_port_register_device 3 55543 NULL
+dgap_do_config_load_55548 dgap_do_config_load 2 55548 NULL
+hash_ipport6_expire_55549 hash_ipport6_expire 4 55549 NULL
+dm_stats_list_55551 dm_stats_list 4 55551 NULL
+__vdev_disk_physio_55568 __vdev_disk_physio 4 55568 NULL
+add_partition_55588 add_partition 2 55588 NULL
+kstrtou8_from_user_55599 kstrtou8_from_user 2 55599 NULL
+SyS_keyctl_55602 SyS_keyctl 4 55602 NULL nohasharray
+config_buf_55602 config_buf 0 55602 &SyS_keyctl_55602
+macvtap_put_user_55609 macvtap_put_user 4 55609 NULL
+selinux_setprocattr_55611 selinux_setprocattr 4 55611 NULL
+edge_tty_recv_55622 edge_tty_recv 3 55622 NULL
+pktgen_if_write_55628 pktgen_if_write 3 55628 NULL nohasharray
+reiserfs_xattr_get_55628 reiserfs_xattr_get 0 55628 &pktgen_if_write_55628
+osc_obd_max_pages_per_rpc_seq_write_55636 osc_obd_max_pages_per_rpc_seq_write 3 55636
NULL
+xfs_bmbt_maxrecs_55649 xfs_bmbt_maxrecs 0-2 55649 NULL
+lpfc_idiag_queinfo_read_55662 lpfc_idiag_queinfo_read 3 55662 NULL
+il_dbgfs_tx_queue_read_55668 il_dbgfs_tx_queue_read 3 55668 NULL
+get_info_55681 get_info 3 55681 NULL
+iwl_dbgfs_plcp_delta_write_55682 iwl_dbgfs_plcp_delta_write 3 55682 NULL
+echo_big_lmm_get_55690 echo_big_lmm_get 0 55690 NULL
+genl_allocate_reserve_groups_55705 genl_allocate_reserve_groups 1 55705 NULL
+pm8001_store_update_fw_55716 pm8001_store_update_fw 4 55716 NULL
+ocfs2_lock_refcount_tree_55719 ocfs2_lock_refcount_tree 0 55719 NULL
+tap_pwup_write_55723 tap_pwup_write 3 55723 NULL
+__iio_allocate_kfifo_55738 __iio_allocate_kfifo 2 55738 NULL
+set_local_name_55757 set_local_name 4 55757 NULL
+strlen_55778 strlen 0 55778 NULL
+set_spte_55783 set_spte 4-5 55783 NULL
+req_bio_endio_55786 req_bio_endio 3 55786 NULL nohasharray
+conf_read_55786 conf_read 3 55786 &req_bio_endio_55786
+uwb_rc_neh_grok_event_55799 uwb_rc_neh_grok_event 3 55799 NULL
+sb16_copy_from_user_55836 sb16_copy_from_user 10-6-7 55836 NULL
+ip_hdrlen_55849 ip_hdrlen 0 55849 NULL
+hcd_alloc_coherent_55862 hcd_alloc_coherent 5 55862 NULL
+shmem_setxattr_55867 shmem_setxattr 4 55867 NULL
+hsc_write_55875 hsc_write 3 55875 NULL
+ramdisk_store_55885 ramdisk_store 4 55885 NULL
+pm_qos_power_read_55891 pm_qos_power_read 3 55891 NULL
+hash_ip4_expire_55911 hash_ip4_expire 4 55911 NULL
+snd_pcm_hw_param_value_min_55917 snd_pcm_hw_param_value_min 0 55917 NULL
+ext2_direct_IO_55932 ext2_direct_IO 4 55932 NULL
+kvm_write_guest_virt_system_55944 kvm_write_guest_virt_system 4-2 55944 NULL
+sel_read_policy_55947 sel_read_policy 3 55947 NULL
+ceph_get_direct_page_vector_55956 ceph_get_direct_page_vector 2 55956 NULL
+simple_read_from_buffer_55957 simple_read_from_buffer 2-5 55957 NULL
+tx_tx_imm_resp_read_55964 tx_tx_imm_resp_read 3 55964 NULL
+btrfs_clone_55977 btrfs_clone 5-3 55977 NULL
+wa_xfer_create_subset_sg_55992 wa_xfer_create_subset_sg 3-2 55992 NULL
+nvme_alloc_iod_56027 nvme_alloc_iod 1-2 56027 NULL
+dccp_sendmsg_56058 dccp_sendmsg 4 56058 NULL
+pscsi_get_bio_56103 pscsi_get_bio 1 56103 NULL
+add_sysfs_param_56108 add_sysfs_param 0 56108 NULL
+usb_alloc_stream_buffers_56123 usb_alloc_stream_buffers 3 56123 NULL
+sel_read_handle_status_56139 sel_read_handle_status 3 56139 NULL
+write_file_frameerrors_56145 write_file_frameerrors 3 56145 NULL
+__i2c_transfer_56162 __i2c_transfer 0 56162 NULL
+rawv6_setsockopt_56165 rawv6_setsockopt 5 56165 NULL
+ath9k_dump_legacy_btcoex_56194 ath9k_dump_legacy_btcoex 3-0 56194 NULL
+vring_add_indirect_56222 vring_add_indirect 4 56222 NULL
+ocfs2_find_xe_in_bucket_56224 ocfs2_find_xe_in_bucket 0 56224 NULL
+do_ipt_set_ctl_56238 do_ipt_set_ctl 4 56238 NULL
+fd_copyin_56247 fd_copyin 3 56247 NULL
+sk_rmem_schedule_56255 sk_rmem_schedule 3 56255 NULL
+il4965_ucode_general_stats_read_56277 il4965_ucode_general_stats_read 3 56277 NULL
+ieee80211_if_fmt_user_power_level_56283 ieee80211_if_fmt_user_power_level 3 56283 NULL
+RESIZE_IF_NEEDED_56286 RESIZE_IF_NEEDED 2 56286 NULL
+dvb_aplay_56296 dvb_aplay 3 56296 NULL
+btmrvl_hscfgcmd_read_56303 btmrvl_hscfgcmd_read 3 56303 NULL
+speakup_file_write_56310 speakup_file_write 3 56310 NULL
+pipeline_pre_to_defrag_swi_read_56321 pipeline_pre_to_defrag_swi_read 3 56321 NULL
+journal_init_revoke_table_56331 journal_init_revoke_table 1 56331 NULL
+snd_rawmidi_read_56337 snd_rawmidi_read 3 56337 NULL
+vxge_os_dma_malloc_async_56348 vxge_os_dma_malloc_async 3 56348 NULL
+mite_device_bytes_transferred_56355 mite_device_bytes_transferred 0 56355 NULL
+iov_iter_copy_from_user_atomic_56368 iov_iter_copy_from_user_atomic 4-0 56368 NULL
+dev_read_56369 dev_read 3 56369 NULL
+ath10k_read_simulate_fw_crash_56371 ath10k_read_simulate_fw_crash 3 56371 NULL
+write_gssp_56404 write_gssp 3 56404 NULL
+ocfs2_control_read_56405 ocfs2_control_read 3 56405 NULL
+do_get_write_access_56410 do_get_write_access 0 56410 NULL
+store_msg_56417 store_msg 3 56417 NULL
+pppol2tp_sendmsg_56420 pppol2tp_sendmsg 4 56420 NULL
+fl_create_56435 fl_create 5 56435 NULL
+gnttab_map_56439 gnttab_map 2 56439 NULL
+cx231xx_init_isoc_56453 cx231xx_init_isoc 3-2-4 56453 NULL
+set_connectable_56458 set_connectable 4 56458 NULL
+osd_req_list_partition_objects_56464 osd_req_list_partition_objects 5 56464 NULL
+putused_user_56467 putused_user 3 56467 NULL
+lbs_rdmac_write_56471 lbs_rdmac_write 3 56471 NULL
+calc_linear_pos_56472 calc_linear_pos 0-3 56472 NULL
+crypto_shash_alignmask_56486 crypto_shash_alignmask 0 56486 NULL
+ieee80211_rx_mgmt_probe_beacon_56491 ieee80211_rx_mgmt_probe_beacon 3 56491 NULL
+init_map_ip_56508 init_map_ip 5 56508 NULL
+lustre_posix_acl_xattr_reduce_space_56512 lustre_posix_acl_xattr_reduce_space 3 56512
NULL
+cfg80211_connect_result_56515 cfg80211_connect_result 4-6 56515 NULL
+ip_options_get_56538 ip_options_get 4 56538 NULL
+ll_wr_track_id_56544 ll_wr_track_id 2 56544 NULL
+alloc_apertures_56561 alloc_apertures 1 56561 NULL
+rs_sta_dbgfs_stats_table_read_56573 rs_sta_dbgfs_stats_table_read 3 56573 NULL
+portcntrs_2_read_56586 portcntrs_2_read 3 56586 NULL
+event_filter_write_56609 event_filter_write 3 56609 NULL
+nvme_trans_log_temperature_56613 nvme_trans_log_temperature 3 56613 NULL
+edac_device_create_block_56619 edac_device_create_block 0 56619 NULL
+gather_array_56641 gather_array 3 56641 NULL
+lookup_extent_backref_56644 lookup_extent_backref 9 56644 NULL
+uvc_debugfs_stats_read_56651 uvc_debugfs_stats_read 3 56651 NULL
+tg3_nvram_write_block_56666 tg3_nvram_write_block 3 56666 NULL
+snd_gus_dram_read_56686 snd_gus_dram_read 4 56686 NULL
+dvb_ringbuffer_read_user_56702 dvb_ringbuffer_read_user 3-0 56702 NULL
+sta_flags_read_56710 sta_flags_read 3 56710 NULL
+ipv6_getsockopt_sticky_56711 ipv6_getsockopt_sticky 5 56711 NULL
+__wa_xfer_setup_segs_56725 __wa_xfer_setup_segs 2 56725 NULL
+__copy_from_user_ll_56738 __copy_from_user_ll 0-3 56738 NULL
+drm_agp_bind_pages_56748 drm_agp_bind_pages 3 56748 NULL
+btrfsic_map_block_56751 btrfsic_map_block 2 56751 NULL
+ttm_alloc_new_pages_56792 ttm_alloc_new_pages 5 56792 NULL
+do_syslog_56807 do_syslog 3 56807 NULL
+mtdchar_write_56831 mtdchar_write 3 56831 NULL
+snd_rawmidi_kernel_write1_56847 snd_rawmidi_kernel_write1 4-0 56847 NULL
+si476x_radio_read_agc_blob_56849 si476x_radio_read_agc_blob 3 56849 NULL
+ext3_xattr_ibody_get_56880 ext3_xattr_ibody_get 0 56880 NULL
+pvr2_debugifc_print_status_56890 pvr2_debugifc_print_status 3 56890 NULL
+debug_debug3_read_56894 debug_debug3_read 3 56894 NULL
+batadv_tt_update_changes_56895 batadv_tt_update_changes 3 56895 NULL
+hfsplus_find_cat_56899 hfsplus_find_cat 0 56899 NULL
+hfsplus_setxattr_56902 hfsplus_setxattr 4 56902 NULL
+strcspn_56913 strcspn 0 56913 NULL
+__kfifo_out_56927 __kfifo_out 0-3 56927 NULL
+journal_init_revoke_56933 journal_init_revoke 2 56933 NULL
+nouveau_xtensa_create__56952 nouveau_xtensa_create_ 8 56952 NULL
+diva_get_driver_info_56967 diva_get_driver_info 0 56967 NULL
+nouveau_device_create__56984 nouveau_device_create_ 6 56984 NULL
+sptlrpc_secflags2str_56995 sptlrpc_secflags2str 3 56995 NULL
+vlsi_alloc_ring_57003 vlsi_alloc_ring 3-4 57003 NULL
+btrfs_super_csum_size_57004 btrfs_super_csum_size 0 57004 NULL
+aircable_process_packet_57027 aircable_process_packet 4 57027 NULL
+ieee80211_if_fmt_state_57043 ieee80211_if_fmt_state 3 57043 NULL nohasharray
+skb_network_offset_57043 skb_network_offset 0 57043 &ieee80211_if_fmt_state_57043
+bytes_to_samples_57049 bytes_to_samples 0-2 57049 NULL
+xfs_buf_read_map_57053 xfs_buf_read_map 3 57053 NULL
+cx2341x_ctrl_new_std_57061 cx2341x_ctrl_new_std 4 57061 NULL
+sca3000_read_data_57064 sca3000_read_data 4 57064 NULL
+pcmcia_replace_cis_57066 pcmcia_replace_cis 3 57066 NULL
+tracing_set_trace_write_57096 tracing_set_trace_write 3 57096 NULL
+altera_get_note_57099 altera_get_note 6 57099 NULL
+hpfs_readpages_57106 hpfs_readpages 4 57106 NULL
+crypto_compress_ctxsize_57109 crypto_compress_ctxsize 0 57109 NULL
+sysfs_write_file_57116 sysfs_write_file 3 57116 NULL
+cipso_v4_gentag_loc_57119 cipso_v4_gentag_loc 0 57119 NULL
+rds_ib_sub_signaled_57136 rds_ib_sub_signaled 2 57136 NULL nohasharray
+nl80211_send_deauth_57136 nl80211_send_deauth 4 57136 &rds_ib_sub_signaled_57136 nohasharray
+ima_show_htable_value_57136 ima_show_htable_value 2 57136 &nl80211_send_deauth_57136
+snd_sonicvibes_getdmac_57140 snd_sonicvibes_getdmac 0 57140 NULL
+udl_prime_create_57159 udl_prime_create 2 57159 NULL
+stk_prepare_sio_buffers_57168 stk_prepare_sio_buffers 2 57168 NULL
+rx_hw_stuck_read_57179 rx_hw_stuck_read 3 57179 NULL
+hash_netnet6_expire_57191 hash_netnet6_expire 4 57191 NULL
+tt3650_ci_msg_57219 tt3650_ci_msg 4 57219 NULL
+dma_fifo_alloc_57236 dma_fifo_alloc 5-3-2 57236 NULL
+rsxx_cram_write_57244 rsxx_cram_write 3 57244 NULL
+ieee80211_if_fmt_tsf_57249 ieee80211_if_fmt_tsf 3 57249 NULL
+oprofilefs_ulong_from_user_57251 oprofilefs_ulong_from_user 3 57251 NULL
+alloc_flex_gd_57259 alloc_flex_gd 1 57259 NULL
+lbs_sleepparams_write_57283 lbs_sleepparams_write 3 57283 NULL
+pstore_file_read_57288 pstore_file_read 3 57288 NULL
+snd_pcm_read_57289 snd_pcm_read 3 57289 NULL
+ftdi_elan_write_57309 ftdi_elan_write 3 57309 NULL
+write_file_regval_57313 write_file_regval 3 57313 NULL
+__mxt_write_reg_57326 __mxt_write_reg 3 57326 NULL
+usblp_read_57342 usblp_read 3 57342 NULL
+print_devstats_dot11RTSFailureCount_57347 print_devstats_dot11RTSFailureCount 3 57347
NULL
+dio_send_cur_page_57348 dio_send_cur_page 0 57348 NULL
+tipc_bclink_stats_57372 tipc_bclink_stats 2 57372 NULL
+tty_register_device_attr_57381 tty_register_device_attr 2 57381 NULL
+read_file_blob_57406 read_file_blob 3 57406 NULL
+enclosure_register_57412 enclosure_register 3 57412 NULL
+compat_keyctl_instantiate_key_iov_57431 compat_keyctl_instantiate_key_iov 3 57431 NULL
+copy_to_user_fromio_57432 copy_to_user_fromio 3 57432 NULL
+__roundup_pow_of_two_57461 __roundup_pow_of_two 0 57461 NULL
+sisusb_clear_vram_57466 sisusb_clear_vram 2-3 57466 NULL
+ieee80211_if_read_flags_57470 ieee80211_if_read_flags 3 57470 NULL
+tipc_port_reject_sections_57478 tipc_port_reject_sections 4 57478 NULL
+bnad_debugfs_write_regwr_57500 bnad_debugfs_write_regwr 3 57500 NULL
+skb_headlen_57501 skb_headlen 0 57501 NULL
+copy_in_user_57502 copy_in_user 3 57502 NULL
+ckhdid_printf_57505 ckhdid_printf 2 57505 NULL
+init_tag_map_57515 init_tag_map 3 57515 NULL
+il_dbgfs_force_reset_read_57517 il_dbgfs_force_reset_read 3 57517 NULL nohasharray
+wil_read_file_ssid_57517 wil_read_file_ssid 3 57517 &il_dbgfs_force_reset_read_57517
+cmm_read_57520 cmm_read 3 57520 NULL
+inode_permission_57531 inode_permission 0 57531 NULL
+acpi_dev_get_resources_57534 acpi_dev_get_resources 0 57534 NULL
+ptlrpc_lprocfs_hp_ratio_seq_write_57537 ptlrpc_lprocfs_hp_ratio_seq_write 3 57537 NULL
+ReadHDLCPnP_57559 ReadHDLCPnP 0 57559 NULL
+obd_unpackmd_57563 obd_unpackmd 0 57563 NULL
+snd_pcm_playback_ioctl1_57569 snd_pcm_playback_ioctl1 0 57569 NULL
+get_bridge_ifindices_57579 get_bridge_ifindices 0 57579 NULL
+ldlm_cli_enqueue_local_57582 ldlm_cli_enqueue_local 11 57582 NULL
+il_dbgfs_interrupt_write_57591 il_dbgfs_interrupt_write 3 57591 NULL
+read_file_spectral_fft_period_57593 read_file_spectral_fft_period 3 57593 NULL
+tx_tx_retry_template_read_57623 tx_tx_retry_template_read 3 57623 NULL
+sisusbcon_putcs_57630 sisusbcon_putcs 3 57630 NULL
+mem_read_57631 mem_read 3 57631 NULL
+r3964_write_57662 r3964_write 4 57662 NULL
+proc_ns_readlink_57664 proc_ns_readlink 3 57664 NULL
+__lgwrite_57669 __lgwrite 4 57669 NULL
+f1x_match_to_this_node_57695 f1x_match_to_this_node 3 57695 NULL
+i2400m_rx_stats_read_57706 i2400m_rx_stats_read 3 57706 NULL
+ieee80211_if_read_dot11MeshHWMPconfirmationInterval_57722 ieee80211_if_read_dot11MeshHWMPconfirmationInterval
3 57722 NULL
+nouveau_gpio_create__57735 nouveau_gpio_create_ 4-5 57735 NULL
+pppol2tp_recvmsg_57742 pppol2tp_recvmsg 4 57742 NULL nohasharray
+compat_sys_set_mempolicy_57742 compat_sys_set_mempolicy 3 57742 &pppol2tp_recvmsg_57742
+ieee80211_if_fmt_dot11MeshHWMPpreqMinInterval_57762 ieee80211_if_fmt_dot11MeshHWMPpreqMinInterval
3 57762 NULL
+SYSC_process_vm_writev_57776 SYSC_process_vm_writev 3-5 57776 NULL
+apei_exec_collect_resources_57788 apei_exec_collect_resources 0 57788 NULL
+ld2_57794 ld2 0 57794 NULL
+ivtv_read_57796 ivtv_read 3 57796 NULL
+bfad_debugfs_read_regrd_57830 bfad_debugfs_read_regrd 3 57830 NULL
+copy_to_user_57835 copy_to_user 3-0 57835 NULL
+flash_read_57843 flash_read 3 57843 NULL
+kiblnd_create_tx_pool_57846 kiblnd_create_tx_pool 2 57846 NULL
+xt_alloc_table_info_57903 xt_alloc_table_info 1 57903 NULL
+iio_read_first_n_kfifo_57910 iio_read_first_n_kfifo 2 57910 NULL
+memcg_caches_array_size_57918 memcg_caches_array_size 0-1 57918 NULL
+twl_i2c_write_57923 twl_i2c_write 3-4 57923 NULL
+__snd_gf1_look16_57925 __snd_gf1_look16 0 57925 NULL
+sel_read_handle_unknown_57933 sel_read_handle_unknown 3 57933 NULL
+xfs_mru_cache_create_57943 xfs_mru_cache_create 3 57943 NULL
+key_algorithm_read_57946 key_algorithm_read 3 57946 NULL
+ip_set_alloc_57953 ip_set_alloc 1 57953 NULL nohasharray
+ioat3_dca_count_dca_slots_57953 ioat3_dca_count_dca_slots 0 57953 &ip_set_alloc_57953
+do_rx_dma_57996 do_rx_dma 5 57996 NULL
+rx_reset_counter_read_58001 rx_reset_counter_read 3 58001 NULL
+iwl_dbgfs_ucode_rx_stats_read_58023 iwl_dbgfs_ucode_rx_stats_read 3 58023 NULL
+io_playback_transfer_58030 io_playback_transfer 4 58030 NULL
+mce_async_out_58056 mce_async_out 3 58056 NULL
+ocfs2_find_leaf_58065 ocfs2_find_leaf 0 58065 NULL
+dt3155_alloc_coherent_58073 dt3155_alloc_coherent 2 58073 NULL
+cm4040_write_58079 cm4040_write 3 58079 NULL
+ipv6_flowlabel_opt_58135 ipv6_flowlabel_opt 3 58135 NULL nohasharray
+slhc_init_58135 slhc_init 1-2 58135 &ipv6_flowlabel_opt_58135
+garmin_write_bulk_58191 garmin_write_bulk 3 58191 NULL
+ieee80211_if_fmt_flags_58205 ieee80211_if_fmt_flags 3 58205 NULL
+btrfsic_create_link_to_next_block_58246 btrfsic_create_link_to_next_block 4 58246 NULL
+read_file_debug_58256 read_file_debug 3 58256 NULL
+osc_max_dirty_mb_seq_write_58263 osc_max_dirty_mb_seq_write 3 58263 NULL
+cfg80211_mgmt_tx_status_58266 cfg80211_mgmt_tx_status 4 58266 NULL
+profile_load_58267 profile_load 3 58267 NULL
+kstrtos8_from_user_58268 kstrtos8_from_user 2 58268 NULL
+acpi_ds_build_internal_package_obj_58271 acpi_ds_build_internal_package_obj 3 58271
NULL
+iscsi_decode_text_input_58292 iscsi_decode_text_input 4 58292 NULL
+ieee80211_if_read_dot11MeshTTL_58307 ieee80211_if_read_dot11MeshTTL 3 58307 NULL
+tx_tx_start_int_templates_read_58324 tx_tx_start_int_templates_read 3 58324 NULL
+ext4_ext_truncate_extend_restart_58331 ext4_ext_truncate_extend_restart 3 58331 NULL
+__copy_from_user_swizzled_58337 __copy_from_user_swizzled 2-4 58337 NULL
+SyS_migrate_pages_58348 SyS_migrate_pages 2 58348 NULL
+brcmf_debugfs_sdio_counter_read_58369 brcmf_debugfs_sdio_counter_read 3 58369 NULL
+il_dbgfs_status_read_58388 il_dbgfs_status_read 3 58388 NULL
+_drbd_md_sync_page_io_58403 _drbd_md_sync_page_io 6 58403 NULL
+kvm_mmu_write_protect_pt_masked_58406 kvm_mmu_write_protect_pt_masked 3 58406 NULL
nohasharray
+idetape_pad_zeros_58406 idetape_pad_zeros 2 58406 &kvm_mmu_write_protect_pt_masked_58406
+i2400m_pld_size_58415 i2400m_pld_size 0 58415 NULL
+capabilities_read_58457 capabilities_read 3 58457 NULL
+lpfc_idiag_baracc_read_58466 lpfc_idiag_baracc_read 3 58466 NULL nohasharray
+compat_do_ipt_set_ctl_58466 compat_do_ipt_set_ctl 4 58466 &lpfc_idiag_baracc_read_58466
+nv_rd08_58472 nv_rd08 0 58472 NULL
+acpi_tables_sysfs_init_58477 acpi_tables_sysfs_init 0 58477 NULL
+snd_gf1_read_addr_58483 snd_gf1_read_addr 0 58483 NULL
+snd_rme96_capture_copy_58484 snd_rme96_capture_copy 5 58484 NULL
+btrfs_cont_expand_58498 btrfs_cont_expand 2-3 58498 NULL
+rndis_add_response_58544 rndis_add_response 2 58544 NULL
+ldlm_srv_pool_shrink_58554 ldlm_srv_pool_shrink 0 58554 NULL
+wep_decrypt_fail_read_58567 wep_decrypt_fail_read 3 58567 NULL
+scnprint_mac_oui_58578 scnprint_mac_oui 3-0 58578 NULL
+get_rhf_errstring_58582 get_rhf_errstring 3 58582 NULL
+ea_read_inline_58589 ea_read_inline 0 58589 NULL
+isku_sysfs_read_keys_thumbster_58590 isku_sysfs_read_keys_thumbster 6 58590 NULL
+xip_file_read_58592 xip_file_read 3 58592 NULL
+ecryptfs_write_end_58594 ecryptfs_write_end 5-3 58594 NULL
+radeon_bo_size_58606 radeon_bo_size 0 58606 NULL
+skb_copy_to_page_nocache_58624 skb_copy_to_page_nocache 6 58624 NULL
+tx_tx_start_fw_gen_read_58648 tx_tx_start_fw_gen_read 3 58648 NULL
+iwl_dbgfs_rx_handlers_write_58655 iwl_dbgfs_rx_handlers_write 3 58655 NULL
+find_zero_58685 find_zero 0-1 58685 NULL
+uwb_bce_print_IEs_58686 uwb_bce_print_IEs 4 58686 NULL
+tps6586x_writes_58689 tps6586x_writes 2-3 58689 NULL
+vx_send_msg_58711 vx_send_msg 0 58711 NULL
+csum_exist_in_range_58730 csum_exist_in_range 2-3 58730 NULL
+frames_to_bytes_58741 frames_to_bytes 0-2 58741 NULL
+ieee80211_if_write_tkip_mic_test_58748 ieee80211_if_write_tkip_mic_test 3 58748 NULL
+agp_allocate_memory_58761 agp_allocate_memory 2 58761 NULL
+regmap_calc_reg_len_58795 regmap_calc_reg_len 0 58795 NULL
+raw_send_hdrinc_58803 raw_send_hdrinc 4 58803 NULL
+isku_sysfs_read_58806 isku_sysfs_read 5 58806 NULL
+ep_read_58813 ep_read 3 58813 NULL
+command_write_58841 command_write 3 58841 NULL
+ath6kl_wmi_send_action_cmd_58860 ath6kl_wmi_send_action_cmd 7 58860 NULL
+gs_alloc_req_58883 gs_alloc_req 2 58883 NULL
+esas2r_change_queue_depth_58886 esas2r_change_queue_depth 2 58886 NULL
+lprocfs_wr_pinger_recov_58914 lprocfs_wr_pinger_recov 3 58914 NULL
+print_devstats_dot11FCSErrorCount_58919 print_devstats_dot11FCSErrorCount 3 58919 NULL
+pipeline_cs_rx_packet_out_read_58926 pipeline_cs_rx_packet_out_read 3 58926 NULL
+sptlrpc_import_sec_adapt_58948 sptlrpc_import_sec_adapt 0 58948 NULL
+wait_table_hash_nr_entries_58962 wait_table_hash_nr_entries 0 58962 NULL
+ieee80211_if_fmt_dot11MeshHWMPactivePathToRootTimeout_58965 ieee80211_if_fmt_dot11MeshHWMPactivePathToRootTimeout
3 58965 NULL
+crypto_aead_ivsize_58970 crypto_aead_ivsize 0 58970 NULL
+init_list_set_59005 init_list_set 3 59005 NULL
+ep_write_59008 ep_write 3 59008 NULL
+lpfc_idiag_baracc_write_59014 lpfc_idiag_baracc_write 3 59014 NULL
+SyS_preadv_59029 SyS_preadv 3 59029 NULL
+init_pci_cap_msi_perm_59033 init_pci_cap_msi_perm 2 59033 NULL
+selinux_transaction_write_59038 selinux_transaction_write 3 59038 NULL
+crypto_aead_reqsize_59039 crypto_aead_reqsize 0 59039 NULL
+regmap_bulk_write_59049 regmap_bulk_write 2-4 59049 NULL
+sysfs_link_sibling_59078 sysfs_link_sibling 0 59078 NULL
+mmc_sd_num_wr_blocks_59112 mmc_sd_num_wr_blocks 0 59112 NULL
+scsi_io_completion_59122 scsi_io_completion 2 59122 NULL
+nfc_llcp_send_i_frame_59130 nfc_llcp_send_i_frame 3 59130 NULL
+print_devstats_dot11RTSSuccessCount_59145 print_devstats_dot11RTSSuccessCount 3 59145
NULL nohasharray
+framebuffer_alloc_59145 framebuffer_alloc 1 59145 &print_devstats_dot11RTSSuccessCount_59145
+radeon_compat_ioctl_59150 radeon_compat_ioctl 2 59150 NULL
+pvr2_hdw_report_clients_59152 pvr2_hdw_report_clients 3 59152 NULL
+md_getxattr_59161 md_getxattr 0 59161 NULL
+ksize_59176 ksize 0 59176 NULL
+setup_window_59178 setup_window 4-2-5-7 59178 NULL
+ocfs2_move_extent_59187 ocfs2_move_extent 2-5 59187 NULL
+xfs_iext_realloc_indirect_59211 xfs_iext_realloc_indirect 2 59211 NULL
+check_mapped_selector_name_59216 check_mapped_selector_name 5 59216 NULL
+dt3155_read_59226 dt3155_read 3 59226 NULL
+paging64_gpte_to_gfn_lvl_59229 paging64_gpte_to_gfn_lvl 0-1-2 59229 NULL
+tty_prepare_flip_string_flags_59240 tty_prepare_flip_string_flags 4 59240 NULL
+nla_len_59258 nla_len 0 59258 NULL
+drbd_bm_write_page_59290 drbd_bm_write_page 2 59290 NULL
+btrfs_insert_dir_item_59304 btrfs_insert_dir_item 4 59304 NULL
+fd_copyout_59323 fd_copyout 3 59323 NULL
+read_9287_modal_eeprom_59327 read_9287_modal_eeprom 3 59327 NULL
+rx_defrag_in_process_called_read_59338 rx_defrag_in_process_called_read 3 59338 NULL
+xfs_attrmulti_attr_set_59346 xfs_attrmulti_attr_set 4 59346 NULL
+__map_request_59350 __map_request 0 59350 NULL
+f2fs_fallocate_59377 f2fs_fallocate 3-4 59377 NULL
+pvr2_debugifc_print_info_59380 pvr2_debugifc_print_info 3 59380 NULL
+journal_init_dev_59384 journal_init_dev 5 59384 NULL
+__net_get_random_once_59389 __net_get_random_once 2 59389 NULL
+isku_sysfs_read_keys_function_59412 isku_sysfs_read_keys_function 6 59412 NULL
+pci_ctrl_read_59424 pci_ctrl_read 0 59424 NULL
+vxge_hw_ring_rxds_per_block_get_59425 vxge_hw_ring_rxds_per_block_get 0 59425 NULL
+SyS_sched_setaffinity_59442 SyS_sched_setaffinity 2 59442 NULL
+fs_path_ensure_buf_59445 fs_path_ensure_buf 2 59445 NULL
+ib_copy_from_udata_59502 ib_copy_from_udata 3 59502 NULL
+mic_vringh_copy_59523 mic_vringh_copy 4 59523 NULL
+mpi_get_nbits_59551 mpi_get_nbits 0 59551 NULL
+tunables_write_59563 tunables_write 3 59563 NULL
+__copy_from_user_ll_nozero_59571 __copy_from_user_ll_nozero 0-3 59571 NULL
+write_pbl_59583 write_pbl 4 59583 NULL
+memdup_user_59590 memdup_user 2 59590 NULL
+xrcdn_free_res_59616 xrcdn_free_res 5 59616 NULL nohasharray
+mem_fwlog_free_mem_blks_read_59616 mem_fwlog_free_mem_blks_read 3 59616 &xrcdn_free_res_59616
+ath6kl_endpoint_stats_write_59621 ath6kl_endpoint_stats_write 3 59621 NULL
+mtrr_write_59622 mtrr_write 3 59622 NULL
+find_first_zero_bit_59636 find_first_zero_bit 0 59636 NULL
+SyS_setdomainname_59646 SyS_setdomainname 2 59646 NULL
+hidraw_read_59650 hidraw_read 3 59650 NULL
+v9fs_xattr_set_acl_59651 v9fs_xattr_set_acl 4 59651 NULL
+__devcgroup_check_permission_59665 __devcgroup_check_permission 0 59665 NULL
+iwl_dbgfs_mac_params_read_59666 iwl_dbgfs_mac_params_read 3 59666 NULL
+alloc_dca_provider_59670 alloc_dca_provider 2 59670 NULL
+mic_calc_failure_read_59700 mic_calc_failure_read 3 59700 NULL
+ioperm_get_59701 ioperm_get 4-3 59701 NULL
+prism2_info_scanresults_59729 prism2_info_scanresults 3 59729 NULL
+ieee80211_if_read_fwded_unicast_59740 ieee80211_if_read_fwded_unicast 3 59740 NULL
+fat_direct_IO_59741 fat_direct_IO 4 59741 NULL
+qib_decode_7220_sdma_errs_59745 qib_decode_7220_sdma_errs 4 59745 NULL
+strnlen_59746 strnlen 0 59746 NULL
+ext3_acl_count_59754 ext3_acl_count 0-1 59754 NULL
+long_retry_limit_read_59766 long_retry_limit_read 3 59766 NULL
+venus_remove_59781 venus_remove 4 59781 NULL
+mei_nfc_recv_59784 mei_nfc_recv 3 59784 NULL
+C_SYSC_preadv_59801 C_SYSC_preadv 3 59801 NULL
+ipw_write_59807 ipw_write 3 59807 NULL
+scsi_init_shared_tag_map_59812 scsi_init_shared_tag_map 2 59812 NULL
+ieee80211_if_read_dot11MeshHWMPmaxPREQretries_59829 ieee80211_if_read_dot11MeshHWMPmaxPREQretries
3 59829 NULL
+gspca_dev_probe2_59833 gspca_dev_probe2 4 59833 NULL
+regmap_raw_write_async_59849 regmap_raw_write_async 2-4 59849 NULL
+pvr2_ioread_set_sync_key_59882 pvr2_ioread_set_sync_key 3 59882 NULL
+l2cap_sock_recvmsg_59886 l2cap_sock_recvmsg 4 59886 NULL
+ffs_prepare_buffer_59892 ffs_prepare_buffer 2 59892 NULL
+ocfs2_extend_rotate_transaction_59894 ocfs2_extend_rotate_transaction 2-3 59894 NULL
+aic7xxx_abort_waiting_scb_59932 aic7xxx_abort_waiting_scb 0 59932 NULL
+kvm_mmu_notifier_invalidate_range_start_59944 kvm_mmu_notifier_invalidate_range_start
3-4 59944 NULL
+dapm_widget_power_read_file_59950 dapm_widget_power_read_file 3 59950 NULL nohasharray
+il_dbgfs_rxon_flags_read_59950 il_dbgfs_rxon_flags_read 3 59950 &dapm_widget_power_read_file_59950
+il_dbgfs_missed_beacon_read_59956 il_dbgfs_missed_beacon_read 3 59956 NULL
+__arch_hweight16_59975 __arch_hweight16 0 59975 NULL
+osd_req_read_kern_59990 osd_req_read_kern 5 59990 NULL
+ghash_async_setkey_60001 ghash_async_setkey 3 60001 NULL
+ieee80211_if_fmt_dot11MeshAwakeWindowDuration_60006 ieee80211_if_fmt_dot11MeshAwakeWindowDuration
3 60006 NULL
+rawsock_sendmsg_60010 rawsock_sendmsg 4 60010 NULL
+mthca_init_cq_60011 mthca_init_cq 2 60011 NULL
+osd_req_list_dev_partitions_60027 osd_req_list_dev_partitions 4 60027 NULL
+xlog_bread_offset_60030 xlog_bread_offset 3 60030 NULL
+bio_integrity_hw_sectors_60039 bio_integrity_hw_sectors 0-2 60039 NULL
+do_ip6t_set_ctl_60040 do_ip6t_set_ctl 4 60040 NULL
+vcs_size_60050 vcs_size 0 60050 NULL
+gru_alloc_gts_60056 gru_alloc_gts 3-2 60056 NULL
+compat_writev_60063 compat_writev 3 60063 NULL
+ath6kl_listen_int_write_60066 ath6kl_listen_int_write 3 60066 NULL
+c4iw_num_stags_60073 c4iw_num_stags 0 60073 NULL
+rxrpc_kernel_send_data_60083 rxrpc_kernel_send_data 3 60083 NULL
+ieee80211_if_fmt_fwded_frames_60103 ieee80211_if_fmt_fwded_frames 3 60103 NULL
+SYSC_msgsnd_60113 SYSC_msgsnd 3 60113 NULL
+nfs_idmap_request_key_60124 nfs_idmap_request_key 2 60124 NULL
+__mutex_lock_common_60134 __mutex_lock_common 0 60134 NULL
+ld_usb_read_60156 ld_usb_read 3 60156 NULL
+jmb38x_ms_count_slots_60164 jmb38x_ms_count_slots 0 60164 NULL
+init_state_60165 init_state 2 60165 NULL
+jffs2_alloc_full_dirent_60179 jffs2_alloc_full_dirent 1 60179 NULL nohasharray
+sg_build_sgat_60179 sg_build_sgat 3 60179 &jffs2_alloc_full_dirent_60179
+fuse_async_req_send_60183 fuse_async_req_send 0-3 60183 NULL
+rx_rx_tkip_replays_read_60193 rx_rx_tkip_replays_read 3 60193 NULL
+qib_reg_phys_mr_60202 qib_reg_phys_mr 3 60202 NULL
+btrfs_get_token_16_60220 btrfs_get_token_16 0 60220 NULL
+irq_alloc_domain_generic_chips_60264 irq_alloc_domain_generic_chips 2-3 60264 NULL
+display_crc_ctl_write_60273 display_crc_ctl_write 3 60273 NULL
+printer_write_60276 printer_write 3 60276 NULL
+do_xip_mapping_read_60297 do_xip_mapping_read 5 60297 NULL
+getDataLength_60301 getDataLength 0 60301 NULL
+usb_alphatrack_write_60341 usb_alphatrack_write 3 60341 NULL
+__kfifo_from_user_r_60345 __kfifo_from_user_r 5-3 60345 NULL
+dccp_setsockopt_60367 dccp_setsockopt 5 60367 NULL
+mthca_alloc_resize_buf_60394 mthca_alloc_resize_buf 3 60394 NULL
+ocfs2_zero_extend_60396 ocfs2_zero_extend 3 60396 NULL
+driver_names_read_60399 driver_names_read 3 60399 NULL
+simple_alloc_urb_60420 simple_alloc_urb 3 60420 NULL
+excessive_retries_read_60425 excessive_retries_read 3 60425 NULL
+kmalloc_60432 kmalloc 1 60432 NULL nohasharray
+tstats_write_60432 tstats_write 3 60432 &kmalloc_60432
+snd_hda_get_num_raw_conns_60462 snd_hda_get_num_raw_conns 0 60462 NULL
+crypto_shash_setkey_60483 crypto_shash_setkey 3 60483 NULL
+lustre_msg_early_size_60496 lustre_msg_early_size 0 60496 NULL
+v9fs_fid_readn_60544 v9fs_fid_readn 4 60544 NULL
+nonpaging_map_60551 nonpaging_map 4 60551 NULL
+osc_lockless_truncate_seq_write_60553 osc_lockless_truncate_seq_write 3 60553 NULL
+tracing_entries_write_60563 tracing_entries_write 3 60563 NULL
+skb_transport_offset_60619 skb_transport_offset 0 60619 NULL
+wl1273_fm_fops_write_60621 wl1273_fm_fops_write 3 60621 NULL
+acl_alloc_stack_init_60630 acl_alloc_stack_init 1 60630 NULL
+__proc_lnet_stats_60647 __proc_lnet_stats 5 60647 NULL
+if_sdio_host_to_card_60666 if_sdio_host_to_card 4 60666 NULL
+ieee80211_if_read_dot11MeshConfirmTimeout_60670 ieee80211_if_read_dot11MeshConfirmTimeout
3 60670 NULL
+vga_rcrt_60731 vga_rcrt 0 60731 NULL
+snd_ice1712_ds_read_60754 snd_ice1712_ds_read 0 60754 NULL
+raid_status_60755 raid_status 5 60755 NULL
+sel_write_checkreqprot_60774 sel_write_checkreqprot 3 60774 NULL
+opticon_write_60775 opticon_write 4 60775 NULL
+acl_alloc_num_60778 acl_alloc_num 1-2 60778 NULL
+snd_pcm_oss_readv3_60792 snd_pcm_oss_readv3 3 60792 NULL
+pwr_tx_with_ps_read_60851 pwr_tx_with_ps_read 3 60851 NULL
+alloc_buf_60864 alloc_buf 3-2 60864 NULL
+generic_writepages_60871 generic_writepages 0 60871 NULL
+ext4_update_inline_data_60888 ext4_update_inline_data 3 60888 NULL
+iio_debugfs_read_reg_60908 iio_debugfs_read_reg 3 60908 NULL
+libcfs_sock_ioctl_60915 libcfs_sock_ioctl 0 60915 NULL
+mgt_set_varlen_60916 mgt_set_varlen 4 60916 NULL
+scrub_chunk_60926 scrub_chunk 5 60926 NULL
+submit_extent_page_60928 submit_extent_page 5 60928 NULL
+pti_char_write_60960 pti_char_write 3 60960 NULL
+mwifiex_alloc_sdio_mpa_buffers_60961 mwifiex_alloc_sdio_mpa_buffers 2-3 60961 NULL
+__a2mp_build_60987 __a2mp_build 3 60987 NULL
+hsc_msg_alloc_60990 hsc_msg_alloc 1 60990 NULL
+ath6kl_lrssi_roam_read_61022 ath6kl_lrssi_roam_read 3 61022 NULL
+graph_depth_write_61024 graph_depth_write 3 61024 NULL
+sdhci_pltfm_register_61031 sdhci_pltfm_register 3 61031 NULL
+lpfc_idiag_queacc_write_61043 lpfc_idiag_queacc_write 3 61043 NULL
+symtab_init_61050 symtab_init 2 61050 NULL
+fuse_send_write_61053 fuse_send_write 0-4 61053 NULL
+bitmap_scnlistprintf_61062 bitmap_scnlistprintf 0-2 61062 NULL
+ahash_align_buffer_size_61070 ahash_align_buffer_size 0-1-2 61070 NULL
+get_derived_key_61100 get_derived_key 4 61100 NULL
+i40e_calculate_l2fpm_size_61104 i40e_calculate_l2fpm_size 0-1-2-3-4 61104 NULL
+alloc_chrdev_region_61112 alloc_chrdev_region 0 61112 NULL
+__probe_kernel_read_61119 __probe_kernel_read 3 61119 NULL
+vmemmap_alloc_block_buf_61126 vmemmap_alloc_block_buf 1 61126 NULL
+afs_proc_cells_write_61139 afs_proc_cells_write 3 61139 NULL
+brcmf_sdio_chip_cr4_exitdl_61143 brcmf_sdio_chip_cr4_exitdl 4 61143 NULL
+osl_malloc_61156 osl_malloc 2 61156 NULL
+pair_device_61175 pair_device 4 61175 NULL nohasharray
+event_oom_late_read_61175 event_oom_late_read 3 61175 &pair_device_61175
+dio_bio_add_page_61178 dio_bio_add_page 0 61178 NULL
+SyS_prctl_61202 SyS_prctl 4 61202 NULL
+arch_hibernation_header_save_61212 arch_hibernation_header_save 0 61212 NULL
+smk_read_ambient_61220 smk_read_ambient 3 61220 NULL
+btrfs_bio_alloc_61270 btrfs_bio_alloc 3 61270 NULL nohasharray
+find_get_pages_tag_61270 find_get_pages_tag 0 61270 &btrfs_bio_alloc_61270 nohasharray
+ifalias_store_61270 ifalias_store 4 61270 &find_get_pages_tag_61270
+vortex_adbdma_getlinearpos_61283 vortex_adbdma_getlinearpos 0 61283 NULL nohasharray
+hfsplus_getxattr_finder_info_61283 hfsplus_getxattr_finder_info 0 61283 &vortex_adbdma_getlinearpos_61283
+nvme_trans_copy_to_user_61288 nvme_trans_copy_to_user 3 61288 NULL
+xfer_from_user_61307 xfer_from_user 3 61307 NULL
+xfrm_user_sec_ctx_size_61320 xfrm_user_sec_ctx_size 0 61320 NULL
+C_SYSC_msgsnd_61330 C_SYSC_msgsnd 3 61330 NULL
+write_file_spectral_short_repeat_61335 write_file_spectral_short_repeat 3 61335 NULL
+st5481_setup_isocpipes_61340 st5481_setup_isocpipes 6-4 61340 NULL
+rx_rx_wa_ba_not_expected_read_61341 rx_rx_wa_ba_not_expected_read 3 61341 NULL
+__dm_get_reserved_ios_61342 __dm_get_reserved_ios 0-3-2 61342 NULL
+f1x_map_sysaddr_to_csrow_61344 f1x_map_sysaddr_to_csrow 2 61344 NULL
+debug_debug4_read_61367 debug_debug4_read 3 61367 NULL
+system_enable_write_61396 system_enable_write 3 61396 NULL
+xfs_zero_remaining_bytes_61423 xfs_zero_remaining_bytes 3 61423 NULL
+unix_stream_sendmsg_61455 unix_stream_sendmsg 4 61455 NULL
+snd_pcm_lib_writev_transfer_61483 snd_pcm_lib_writev_transfer 5-4-2 61483 NULL
+btrfs_item_size_61485 btrfs_item_size 0 61485 NULL
+ocfs2_get_refcount_rec_61514 ocfs2_get_refcount_rec 0 61514 NULL
+erst_errno_61526 erst_errno 0 61526 NULL
+trace_options_core_write_61551 trace_options_core_write 3 61551 NULL
+dvb_net_ioctl_61559 dvb_net_ioctl 2 61559 NULL
+parport_pc_fifo_write_block_dma_61568 parport_pc_fifo_write_block_dma 3 61568 NULL
+fan_proc_write_61569 fan_proc_write 3 61569 NULL
+ieee80211_if_read_rc_rateidx_mask_2ghz_61570 ieee80211_if_read_rc_rateidx_mask_2ghz
3 61570 NULL
+ldlm_pool_rw_atomic_seq_write_61572 ldlm_pool_rw_atomic_seq_write 3 61572 NULL
+seq_open_private_61589 seq_open_private 3 61589 NULL
+ept_gpte_to_gfn_lvl_61591 ept_gpte_to_gfn_lvl 0-1-2 61591 NULL
+netlink_recvmsg_61600 netlink_recvmsg 4 61600 NULL
+nfs4_init_uniform_client_string_61601 nfs4_init_uniform_client_string 3 61601 NULL
+configfs_write_file_61621 configfs_write_file 3 61621 NULL
+ieee80211_if_fmt_hw_queues_61629 ieee80211_if_fmt_hw_queues 3 61629 NULL
+i2o_parm_table_get_61635 i2o_parm_table_get 6 61635 NULL
+snd_pcm_oss_read3_61643 snd_pcm_oss_read3 0-3 61643 NULL
+resize_stripes_61650 resize_stripes 2 61650 NULL
+ttm_page_pool_free_61661 ttm_page_pool_free 2-0 61661 NULL
+insert_one_name_61668 insert_one_name 7 61668 NULL
+qib_format_hwmsg_61679 qib_format_hwmsg 2 61679 NULL
+lock_loop_61681 lock_loop 1 61681 NULL
+__do_tune_cpucache_61684 __do_tune_cpucache 2 61684 NULL
+filter_read_61692 filter_read 3 61692 NULL
+iov_length_61716 iov_length 0 61716 NULL
+fragmentation_threshold_read_61718 fragmentation_threshold_read 3 61718 NULL
+null_alloc_reqbuf_61719 null_alloc_reqbuf 3 61719 NULL
+read_file_interrupt_61742 read_file_interrupt 3 61742 NULL nohasharray
+read_file_regval_61742 read_file_regval 3 61742 &read_file_interrupt_61742
+SyS_sendto_61763 SyS_sendto 6 61763 NULL
+mls_compute_context_len_61812 mls_compute_context_len 0 61812 NULL
+bfad_debugfs_write_regwr_61841 bfad_debugfs_write_regwr 3 61841 NULL
+regcache_sync_block_61846 regcache_sync_block 5-4 61846 NULL
+ath9k_hw_def_dump_eeprom_61853 ath9k_hw_def_dump_eeprom 5-4 61853 NULL
+fs_path_prepare_for_add_61854 fs_path_prepare_for_add 2 61854 NULL
+evdev_compute_buffer_size_61863 evdev_compute_buffer_size 0 61863 NULL
+SYSC_lsetxattr_61869 SYSC_lsetxattr 4 61869 NULL
+get_fw_name_61874 get_fw_name 3 61874 NULL
+btrfs_ioctl_clone_61886 btrfs_ioctl_clone 3-4-5 61886 NULL
+lprocfs_write_frac_u64_helper_61897 lprocfs_write_frac_u64_helper 2 61897 NULL
+lov_mds_md_stripecnt_61899 lov_mds_md_stripecnt 0-1 61899 NULL
+clear_refs_write_61904 clear_refs_write 3 61904 NULL nohasharray
+import_sec_check_expire_61904 import_sec_check_expire 0 61904 &clear_refs_write_61904
+rx_filter_arp_filter_read_61914 rx_filter_arp_filter_read 3 61914 NULL
+au0828_init_isoc_61917 au0828_init_isoc 3-2-4 61917 NULL
+sctp_sendmsg_61919 sctp_sendmsg 4 61919 NULL
+edac_device_create_instance_61940 edac_device_create_instance 0 61940 NULL
+SyS_kexec_load_61946 SyS_kexec_load 2 61946 NULL
+il4965_ucode_rx_stats_read_61948 il4965_ucode_rx_stats_read 3 61948 NULL
+squashfs_read_id_index_table_61961 squashfs_read_id_index_table 4 61961 NULL
+fix_read_error_61965 fix_read_error 4 61965 NULL
+ocfs2_quota_write_61972 ocfs2_quota_write 4-5 61972 NULL
+fd_locked_ioctl_61978 fd_locked_ioctl 3 61978 NULL
+cow_file_range_61979 cow_file_range 3 61979 NULL
+dequeue_event_62000 dequeue_event 3 62000 NULL
+xt_compat_match_offset_62011 xt_compat_match_offset 0 62011 NULL
+SyS_setxattr_62019 SyS_setxattr 4 62019 NULL
+jffs2_do_unlink_62020 jffs2_do_unlink 4 62020 NULL
+SYSC_select_62024 SYSC_select 1 62024 NULL
+pmcraid_build_passthrough_ioadls_62034 pmcraid_build_passthrough_ioadls 2 62034 NULL
+sctp_user_addto_chunk_62047 sctp_user_addto_chunk 2-3 62047 NULL
+do_pselect_62061 do_pselect 1 62061 NULL
+pcpu_alloc_bootmem_62074 pcpu_alloc_bootmem 2 62074 NULL
+jffs2_security_setxattr_62107 jffs2_security_setxattr 4 62107 NULL
+btrfs_direct_IO_62114 btrfs_direct_IO 4 62114 NULL
+ip_recv_error_62117 ip_recv_error 3 62117 NULL
+generic_block_fiemap_62122 generic_block_fiemap 4 62122 NULL
+llc_ui_header_len_62131 llc_ui_header_len 0 62131 NULL
+kobject_add_varg_62133 kobject_add_varg 0 62133 NULL nohasharray
+qib_diag_write_62133 qib_diag_write 3 62133 &kobject_add_varg_62133
+device_add_attrs_62135 device_add_attrs 0 62135 NULL nohasharray
+ql_status_62135 ql_status 5 62135 &device_add_attrs_62135
+video_usercopy_62151 video_usercopy 2 62151 NULL
+SyS_getxattr_62166 SyS_getxattr 4 62166 NULL
+prism54_wpa_bss_ie_get_62173 prism54_wpa_bss_ie_get 0 62173 NULL
+write_file_dfs_62180 write_file_dfs 3 62180 NULL
+alloc_upcall_62186 alloc_upcall 2 62186 NULL
+btrfs_xattr_acl_set_62203 btrfs_xattr_acl_set 4 62203 NULL
+sock_kmalloc_62205 sock_kmalloc 2 62205 NULL
+SYSC_setgroups16_62232 SYSC_setgroups16 1 62232 NULL
+nfsd_read_file_62241 nfsd_read_file 6 62241 NULL
+subtract_dirty_62242 subtract_dirty 2-3 62242 NULL
+get_random_int_62279 get_random_int 0 62279 NULL
+il_dbgfs_sram_read_62296 il_dbgfs_sram_read 3 62296 NULL
+sparse_early_usemaps_alloc_pgdat_section_62304 sparse_early_usemaps_alloc_pgdat_section
2 62304 NULL
+subsystem_filter_read_62310 subsystem_filter_read 3 62310 NULL
+Wb35Reg_BurstWrite_62327 Wb35Reg_BurstWrite 4 62327 NULL
+ocfs2_xattr_buckets_per_cluster_62330 ocfs2_xattr_buckets_per_cluster 0 62330 NULL
+subseq_list_62332 subseq_list 3-0 62332 NULL
+ll_statahead_max_seq_write_62333 ll_statahead_max_seq_write 3 62333 NULL
+flash_write_62354 flash_write 3 62354 NULL
+xfpregs_set_62363 xfpregs_set 4 62363 NULL
+rx_rx_timeout_read_62389 rx_rx_timeout_read 3 62389 NULL
+altera_irscan_62396 altera_irscan 2 62396 NULL
+set_ssp_62411 set_ssp 4 62411 NULL
+ext_rts51x_sd_execute_read_data_62501 ext_rts51x_sd_execute_read_data 9 62501 NULL
+pep_sendmsg_62524 pep_sendmsg 4 62524 NULL
+test_iso_queue_62534 test_iso_queue 5 62534 NULL
+debugfs_read_62535 debugfs_read 3 62535 NULL
+sco_sock_sendmsg_62542 sco_sock_sendmsg 4 62542 NULL
+qib_refresh_qsfp_cache_62547 qib_refresh_qsfp_cache 0 62547 NULL
+link_send_sections_long_62557 link_send_sections_long 3 62557 NULL
+xfrm_user_policy_62573 xfrm_user_policy 4 62573 NULL
+compat_SyS_rt_sigpending_62580 compat_SyS_rt_sigpending 2 62580 NULL
+get_subdir_62581 get_subdir 3 62581 NULL
+nfsd_vfs_read_62605 nfsd_vfs_read 6 62605 NULL
+tipc_port_recv_sections_62609 tipc_port_recv_sections 3 62609 NULL
+dut_mode_write_62630 dut_mode_write 3 62630 NULL
+vfs_fsync_range_62635 vfs_fsync_range 0 62635 NULL
+lpfc_sli4_queue_alloc_62646 lpfc_sli4_queue_alloc 3 62646 NULL
+ocfs2_wait_for_mask_interruptible_62675 ocfs2_wait_for_mask_interruptible 0 62675 NULL
+printer_req_alloc_62687 printer_req_alloc 2 62687 NULL
+bioset_integrity_create_62708 bioset_integrity_create 2 62708 NULL
+gfs2_log_write_62717 gfs2_log_write 3 62717 NULL
+rdm_62719 rdm 0 62719 NULL
+obd_ioctl_popdata_62741 obd_ioctl_popdata 3 62741 NULL
+key_replays_read_62746 key_replays_read 3 62746 NULL
+lov_verify_lmm_62747 lov_verify_lmm 2 62747 NULL
+mwifiex_rdeeprom_write_62754 mwifiex_rdeeprom_write 3 62754 NULL
+ax25_sendmsg_62770 ax25_sendmsg 4 62770 NULL
+C_SYSC_ipc_62776 C_SYSC_ipc 3 62776 NULL
+SyS_sched_getaffinity_62786 SyS_sched_getaffinity 2 62786 NULL
+dm_stats_account_io_62787 dm_stats_account_io 3 62787 NULL
+tracing_total_entries_read_62817 tracing_total_entries_read 3 62817 NULL
+__rounddown_pow_of_two_62836 __rounddown_pow_of_two 0 62836 NULL
+bio_get_nr_vecs_62838 bio_get_nr_vecs 0 62838 NULL
+xlog_recover_add_to_trans_62839 xlog_recover_add_to_trans 4 62839 NULL
+rx_fcs_err_read_62844 rx_fcs_err_read 3 62844 NULL
+read_nic_io_dword_62859 read_nic_io_dword 0 62859 NULL
+l2tp_ip6_recvmsg_62874 l2tp_ip6_recvmsg 4 62874 NULL
+aoechr_write_62883 aoechr_write 3 62883 NULL
+if_spi_host_to_card_62890 if_spi_host_to_card 4 62890 NULL
+ocfs2_validate_gd_parent_62905 ocfs2_validate_gd_parent 0 62905 NULL
+mempool_create_slab_pool_62907 mempool_create_slab_pool 1 62907 NULL
+getdqbuf_62908 getdqbuf 1 62908 NULL
+ll_statahead_agl_seq_write_62928 ll_statahead_agl_seq_write 3 62928 NULL
+agp_create_user_memory_62955 agp_create_user_memory 1 62955 NULL
+kstrtoull_from_user_63026 kstrtoull_from_user 2 63026 NULL
+__vb2_perform_fileio_63033 __vb2_perform_fileio 3 63033 NULL
+pipeline_defrag_to_csum_swi_read_63037 pipeline_defrag_to_csum_swi_read 3 63037 NULL
+scsi_host_alloc_63041 scsi_host_alloc 2 63041 NULL
+unlink1_63059 unlink1 3 63059 NULL
+xen_set_nslabs_63066 xen_set_nslabs 0 63066 NULL
+iwl_dbgfs_fw_rx_stats_read_63070 iwl_dbgfs_fw_rx_stats_read 3 63070 NULL
+sep_prepare_input_output_dma_table_in_dcb_63087 sep_prepare_input_output_dma_table_in_dcb
4-5 63087 NULL
+iwl_dbgfs_sensitivity_read_63116 iwl_dbgfs_sensitivity_read 3 63116 NULL
+ext4_chunk_trans_blocks_63123 ext4_chunk_trans_blocks 0-2 63123 NULL
+smk_write_revoke_subj_63173 smk_write_revoke_subj 3 63173 NULL
+SyS_syslog_63178 SyS_syslog 3 63178 NULL
+vme_master_read_63221 vme_master_read 0 63221 NULL
+SyS_gethostname_63227 SyS_gethostname 2 63227 NULL
+ptp_read_63251 ptp_read 4 63251 NULL
+xfs_dir2_leaf_getdents_63262 xfs_dir2_leaf_getdents 3 63262 NULL
+raid5_resize_63306 raid5_resize 2 63306 NULL
+proc_info_read_63344 proc_info_read 3 63344 NULL
+ps_upsd_max_sptime_read_63362 ps_upsd_max_sptime_read 3 63362 NULL
+idmouse_read_63374 idmouse_read 3 63374 NULL
+usbnet_read_cmd_nopm_63388 usbnet_read_cmd_nopm 7 63388 NULL nohasharray
+edac_pci_alloc_ctl_info_63388 edac_pci_alloc_ctl_info 1 63388 &usbnet_read_cmd_nopm_63388
+rxpipe_missed_beacon_host_int_trig_rx_data_read_63405 rxpipe_missed_beacon_host_int_trig_rx_data_read
3 63405 NULL
+nouveau_event_create_63411 nouveau_event_create 1 63411 NULL
+l2cap_sock_sendmsg_63427 l2cap_sock_sendmsg 4 63427 NULL
+nfsd_symlink_63442 nfsd_symlink 6 63442 NULL
+si5351_bulk_write_63468 si5351_bulk_write 2-3 63468 NULL
+snd_info_entry_write_63474 snd_info_entry_write 3 63474 NULL
+reada_find_extent_63486 reada_find_extent 2 63486 NULL
+read_kcore_63488 read_kcore 3 63488 NULL
+snd_pcm_plug_write_transfer_63503 snd_pcm_plug_write_transfer 0-3 63503 NULL
+efx_mcdi_rpc_async_63529 efx_mcdi_rpc_async 4-5 63529 NULL
+ubi_more_leb_change_data_63534 ubi_more_leb_change_data 4 63534 NULL
+write_file_spectral_period_63536 write_file_spectral_period 3 63536 NULL
+if_sdio_read_scratch_63540 if_sdio_read_scratch 0 63540 NULL
+append_to_buffer_63550 append_to_buffer 3 63550 NULL
+kvm_write_guest_page_63555 kvm_write_guest_page 5 63555 NULL
+rproc_alloc_63577 rproc_alloc 5 63577 NULL
+write_debug_level_63613 write_debug_level 3 63613 NULL
+symbol_build_supp_rates_63634 symbol_build_supp_rates 0 63634 NULL
+proc_loginuid_write_63648 proc_loginuid_write 3 63648 NULL
+ValidateDSDParamsChecksum_63654 ValidateDSDParamsChecksum 3 63654 NULL
+ldlm_cli_enqueue_63657 ldlm_cli_enqueue 8 63657 NULL
+hidraw_ioctl_63658 hidraw_ioctl 2 63658 NULL
+vbi_read_63673 vbi_read 3 63673 NULL
+write_file_spectral_fft_period_63696 write_file_spectral_fft_period 3 63696 NULL
+nouveau_object_create__63715 nouveau_object_create_ 5 63715 NULL
+btrfs_insert_delayed_dir_index_63720 btrfs_insert_delayed_dir_index 4 63720 NULL
+selinux_secctx_to_secid_63744 selinux_secctx_to_secid 2 63744 NULL
+snd_pcm_oss_read1_63771 snd_pcm_oss_read1 3 63771 NULL
+snd_opl4_mem_proc_read_63774 snd_opl4_mem_proc_read 5 63774 NULL
+spidev_compat_ioctl_63778 spidev_compat_ioctl 2 63778 NULL
+mwifiex_11n_create_rx_reorder_tbl_63806 mwifiex_11n_create_rx_reorder_tbl 4 63806 NULL
+copy_nodes_to_user_63807 copy_nodes_to_user 2 63807 NULL
+prepare_copy_63826 prepare_copy 2 63826 NULL
+sel_write_load_63830 sel_write_load 3 63830 NULL
+ll_readlink_63836 ll_readlink 3 63836 NULL
+proc_pid_attr_write_63845 proc_pid_attr_write 3 63845 NULL
+xhci_alloc_stream_info_63902 xhci_alloc_stream_info 3 63902 NULL
+uvc_alloc_urb_buffers_63922 uvc_alloc_urb_buffers 0-2-3 63922 NULL
+snd_compr_write_63923 snd_compr_write 3 63923 NULL
+afs_send_simple_reply_63940 afs_send_simple_reply 3 63940 NULL
+__team_options_register_63941 __team_options_register 3 63941 NULL
+macvtap_recvmsg_63949 macvtap_recvmsg 4 63949 NULL
+sysfs_add_one_63969 sysfs_add_one 0 63969 NULL
+set_bredr_63975 set_bredr 4 63975 NULL
+construct_key_and_link_63985 construct_key_and_link 3 63985 NULL
+rs_extent_to_bm_page_63996 rs_extent_to_bm_page 0-1 63996 NULL
+read_file_frameerrors_64001 read_file_frameerrors 3 64001 NULL
+hfsplus_security_setxattr_64009 hfsplus_security_setxattr 4 64009 NULL
+SyS_rt_sigpending_64018 SyS_rt_sigpending 2 64018 NULL
+dbAllocDmapLev_64030 dbAllocDmapLev 0 64030 NULL
+SyS_fsetxattr_64039 SyS_fsetxattr 4 64039 NULL
+get_u8_64076 get_u8 0 64076 NULL
+xilly_malloc_64077 xilly_malloc 2 64077 NULL
+sl_realloc_bufs_64086 sl_realloc_bufs 2 64086 NULL
+vmci_handle_arr_get_size_64088 vmci_handle_arr_get_size 0 64088 NULL
+lbs_highrssi_read_64089 lbs_highrssi_read 3 64089 NULL
+SyS_set_mempolicy_64096 SyS_set_mempolicy 3 64096 NULL
+SyS_mq_timedsend_64107 SyS_mq_timedsend 3 64107 NULL
+rdma_addr_size_64116 rdma_addr_size 0 64116 NULL
+do_load_xattr_datum_64118 do_load_xattr_datum 0 64118 NULL
+bypass_wd_write_64120 bypass_wd_write 3 64120 NULL
+ext4_prepare_inline_data_64124 ext4_prepare_inline_data 3 64124 NULL
+init_bch_64130 init_bch 1-2 64130 NULL
+ablkcipher_copy_iv_64140 ablkcipher_copy_iv 3 64140 NULL
+read_div_64147 read_div 0 64147 NULL
+dlfb_ops_write_64150 dlfb_ops_write 3 64150 NULL
+cpumask_scnprintf_64170 cpumask_scnprintf 0-2 64170 NULL
+xfs_vm_direct_IO_64223 xfs_vm_direct_IO 4 64223 NULL
+read_pulse_64227 read_pulse 0-3 64227 NULL
+ea_len_64229 ea_len 0 64229 NULL
+io_capture_transfer_64276 io_capture_transfer 4 64276 NULL
+btrfs_file_extent_offset_64278 btrfs_file_extent_offset 0 64278 NULL
+sta_current_tx_rate_read_64286 sta_current_tx_rate_read 3 64286 NULL
+xfs_dir_cilookup_result_64288 xfs_dir_cilookup_result 3 64288 NULL nohasharray
+event_id_read_64288 event_id_read 3 64288 &xfs_dir_cilookup_result_64288
+ocfs2_block_check_validate_bhs_64302 ocfs2_block_check_validate_bhs 0 64302 NULL
+snd_hda_get_sub_nodes_64304 snd_hda_get_sub_nodes 0 64304 NULL
+error_error_bar_retry_read_64305 error_error_bar_retry_read 3 64305 NULL
+sisusbcon_clear_64329 sisusbcon_clear 4-3-5 64329 NULL
+ts_write_64336 ts_write 3 64336 NULL
+usbtmc_write_64340 usbtmc_write 3 64340 NULL
+bnx2x_vfop_mcast_cmd_64354 bnx2x_vfop_mcast_cmd 5 64354 NULL
+user_regset_copyin_64360 user_regset_copyin 7 64360 NULL
+wlc_phy_loadsampletable_nphy_64367 wlc_phy_loadsampletable_nphy 3 64367 NULL
+reg_create_64372 reg_create 5 64372 NULL
+ilo_write_64378 ilo_write 3 64378 NULL
+btrfs_map_block_64379 btrfs_map_block 3 64379 NULL
+vmcs_readl_64381 vmcs_readl 0 64381 NULL
+nilfs_alloc_seg_bio_64383 nilfs_alloc_seg_bio 3 64383 NULL
+ir_lirc_transmit_ir_64403 ir_lirc_transmit_ir 3 64403 NULL
+pidlist_allocate_64404 pidlist_allocate 1 64404 NULL
+rx_hdr_overflow_read_64407 rx_hdr_overflow_read 3 64407 NULL
+snd_card_create_64418 snd_card_create 4 64418 NULL nohasharray
+keyctl_get_security_64418 keyctl_get_security 3 64418 &snd_card_create_64418
+oom_adj_write_64428 oom_adj_write 3 64428 NULL
+read_file_spectral_short_repeat_64431 read_file_spectral_short_repeat 3 64431 NULL
+ax25_recvmsg_64441 ax25_recvmsg 4 64441 NULL
+single_open_size_64483 single_open_size 4 64483 NULL
+p54_parse_rssical_64493 p54_parse_rssical 3 64493 NULL
+msg_data_sz_64503 msg_data_sz 0 64503 NULL
+remove_uuid_64505 remove_uuid 4 64505 NULL
+crypto_blkcipher_alignmask_64520 crypto_blkcipher_alignmask 0 64520 NULL
+opera1_usb_i2c_msgxfer_64521 opera1_usb_i2c_msgxfer 4 64521 NULL
+iwl_dbgfs_ucode_tracing_write_64524 iwl_dbgfs_ucode_tracing_write 3 64524 NULL
+ses_send_diag_64527 ses_send_diag 4 64527 NULL
+prctl_set_mm_64538 prctl_set_mm 3 64538 NULL
+SyS_bind_64544 SyS_bind 3 64544 NULL
+rbd_obj_read_sync_64554 rbd_obj_read_sync 4-3 64554 NULL
+__btrfs_prealloc_file_range_64557 __btrfs_prealloc_file_range 3 64557 NULL
+__spi_sync_64561 __spi_sync 0 64561 NULL nohasharray
+ll_max_rw_chunk_seq_write_64561 ll_max_rw_chunk_seq_write 3 64561 &__spi_sync_64561
+__apei_exec_run_64563 __apei_exec_run 0 64563 NULL
+kstrtoul_from_user_64569 kstrtoul_from_user 2 64569 NULL
+do_erase_64574 do_erase 4 64574 NULL
+fanotify_write_64623 fanotify_write 3 64623 NULL
+regmap_read_debugfs_64658 regmap_read_debugfs 5 64658 NULL
+ocfs2_read_xattr_block_64661 ocfs2_read_xattr_block 0 64661 NULL nohasharray
+tlbflush_read_file_64661 tlbflush_read_file 3 64661 &ocfs2_read_xattr_block_64661
+efx_tsoh_get_buffer_64664 efx_tsoh_get_buffer 3 64664 NULL
+rx_rx_out_of_mpdu_nodes_read_64668 rx_rx_out_of_mpdu_nodes_read 3 64668 NULL
+nr_free_zone_pages_64680 nr_free_zone_pages 0 64680 NULL
+sec_bulk_write_64691 sec_bulk_write 2-3 64691 NULL
+snd_pcm_oss_capture_position_fixup_64713 snd_pcm_oss_capture_position_fixup 0 64713
NULL
+dapm_bias_read_file_64715 dapm_bias_read_file 3 64715 NULL
+atomic_add_return_64720 atomic_add_return 0-1 64720 NULL
+i2400m_msg_to_dev_64722 i2400m_msg_to_dev 3 64722 NULL
+AscGetChipVersion_64737 AscGetChipVersion 0 64737 NULL
+squashfs_read_inode_lookup_table_64739 squashfs_read_inode_lookup_table 4 64739 NULL
+bio_map_kern_64751 bio_map_kern 3 64751 NULL
+rt2x00debug_write_csr_64753 rt2x00debug_write_csr 3 64753 NULL
+message_for_md_64777 message_for_md 5 64777 NULL
+isr_low_rssi_read_64789 isr_low_rssi_read 3 64789 NULL
+regmap_reg_ranges_read_file_64798 regmap_reg_ranges_read_file 3 64798 NULL
+nfsctl_transaction_write_64800 nfsctl_transaction_write 3 64800 NULL
+rfkill_fop_write_64808 rfkill_fop_write 3 64808 NULL
+proc_projid_map_write_64810 proc_projid_map_write 3 64810 NULL
+megaraid_change_queue_depth_64815 megaraid_change_queue_depth 2 64815 NULL
+ecryptfs_send_miscdev_64816 ecryptfs_send_miscdev 2 64816 NULL
+do_kimage_alloc_64827 do_kimage_alloc 3 64827 NULL
+altera_set_dr_pre_64862 altera_set_dr_pre 2 64862 NULL
+lprocfs_write_u64_helper_64880 lprocfs_write_u64_helper 2 64880 NULL
+ffs_epfile_io_64886 ffs_epfile_io 3 64886 NULL
+ieee80211_if_read_ave_beacon_64924 ieee80211_if_read_ave_beacon 3 64924 NULL
+ip_options_get_from_user_64958 ip_options_get_from_user 4 64958 NULL
+traceprobe_probes_write_64969 traceprobe_probes_write 3 64969 NULL
+suspend_dtim_interval_read_64971 suspend_dtim_interval_read 3 64971 NULL
+crypto_ahash_digestsize_65014 crypto_ahash_digestsize 0 65014 NULL
+insert_dent_65034 insert_dent 7 65034 NULL
+snd_hda_get_pin_label_65035 snd_hda_get_pin_label 5 65035 NULL
+ext4_ind_trans_blocks_65053 ext4_ind_trans_blocks 0-2 65053 NULL
+pcibios_enable_device_65059 pcibios_enable_device 0 65059 NULL
+__alloc_bootmem_node_high_65076 __alloc_bootmem_node_high 2 65076 NULL
+batadv_socket_write_65083 batadv_socket_write 3 65083 NULL
+ocfs2_truncate_cluster_pages_65086 ocfs2_truncate_cluster_pages 2 65086 NULL
+ath9k_dump_mci_btcoex_65090 ath9k_dump_mci_btcoex 3-0 65090 NULL
+uasp_alloc_cmd_65097 uasp_alloc_cmd 0 65097 NULL
+generic_ocp_write_65107 generic_ocp_write 4 65107 NULL
+rx_rx_done_read_65217 rx_rx_done_read 3 65217 NULL
+print_endpoint_stat_65232 print_endpoint_stat 3-4-0 65232 NULL
+whci_n_caps_65247 whci_n_caps 0 65247 NULL
+kmalloc_parameter_65279 kmalloc_parameter 1 65279 NULL
+compat_core_sys_select_65285 compat_core_sys_select 1 65285 NULL
+mpi_set_buffer_65294 mpi_set_buffer 3 65294 NULL
+redirected_tty_write_65297 redirected_tty_write 3 65297 NULL
+get_var_len_65304 get_var_len 0 65304 NULL
+unpack_array_65318 unpack_array 0 65318 NULL
+pci_vpd_find_tag_65325 pci_vpd_find_tag 0-2 65325 NULL
+dccp_setsockopt_service_65336 dccp_setsockopt_service 4 65336 NULL
+dma_rx_requested_read_65354 dma_rx_requested_read 3 65354 NULL
+alloc_cpu_rmap_65363 alloc_cpu_rmap 1 65363 NULL
+SyS_writev_65372 SyS_writev 3 65372 NULL
+__alloc_bootmem_nopanic_65397 __alloc_bootmem_nopanic 1 65397 NULL
+trace_seq_to_user_65398 trace_seq_to_user 3 65398 NULL
+__read_vmcore_65402 __read_vmcore 2 65402 NULL
+usb_ep_enable_65405 usb_ep_enable 0 65405 NULL
+ocfs2_write_begin_nolock_65410 ocfs2_write_begin_nolock 3-4 65410 NULL
+device_add_groups_65423 device_add_groups 0 65423 NULL
+xpc_kzalloc_cacheline_aligned_65433 xpc_kzalloc_cacheline_aligned 1 65433 NULL
+usb_alloc_coherent_65444 usb_alloc_coherent 2 65444 NULL
+il_dbgfs_wd_timeout_write_65464 il_dbgfs_wd_timeout_write 3 65464 NULL
+clear_user_65470 clear_user 2 65470 NULL
+dpcm_state_read_file_65489 dpcm_state_read_file 3 65489 NULL
+lookup_inline_extent_backref_65493 lookup_inline_extent_backref 9 65493 NULL
+nvme_trans_standard_inquiry_page_65526 nvme_trans_standard_inquiry_page 4 65526 NULL
+tree_mod_log_eb_copy_65535 tree_mod_log_eb_copy 6 65535 NULL
diff -ruNp linux-3.13.11/tools/gcc/size_overflow_hash_aux.data linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/tools/gcc/size_overflow_hash_aux.data
--- linux-3.13.11/tools/gcc/size_overflow_hash_aux.data	1970-01-01 01:00:00.000000000
+0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/tools/gcc/size_overflow_hash_aux.data	2014-07-09
12:00:16.000000000 +0200
@@ -0,0 +1,92 @@
+spa_set_aux_vdevs_746 spa_set_aux_vdevs 3 746 NULL
+zfs_lookup_2144 zfs_lookup 0 2144 NULL
+mappedread_2627 mappedread 2 2627 NULL
+vdev_disk_dio_alloc_2957 vdev_disk_dio_alloc 1 2957 NULL
+nv_alloc_pushpage_spl_4286 nv_alloc_pushpage_spl 2 4286 NULL
+zpl_xattr_get_4574 zpl_xattr_get 0 4574 NULL
+sa_replace_all_by_template_5699 sa_replace_all_by_template 3 5699 NULL
+dmu_write_6048 dmu_write 4-3 6048 NULL
+dmu_buf_hold_array_6095 dmu_buf_hold_array 4-3 6095 NULL
+update_pages_6225 update_pages 2-3 6225 NULL
+bio_nr_pages_7117 bio_nr_pages 0-2 7117 NULL
+dmu_buf_hold_array_by_bonus_8562 dmu_buf_hold_array_by_bonus 3-2 8562 NULL
+zpios_dmu_write_8858 zpios_dmu_write 4-5 8858 NULL
+ddi_copyout_9401 ddi_copyout 3 9401 NULL
+avl_numnodes_12384 avl_numnodes 0 12384 NULL
+dmu_write_uio_dnode_12473 dmu_write_uio_dnode 3 12473 NULL
+dmu_xuio_init_12866 dmu_xuio_init 2 12866 NULL
+zpl_read_common_14389 zpl_read_common 0 14389 NULL
+dmu_snapshot_realname_14632 dmu_snapshot_realname 4 14632 NULL
+kmem_alloc_debug_14852 kmem_alloc_debug 1 14852 NULL
+kmalloc_node_nofail_15151 kmalloc_node_nofail 1 15151 NULL
+dmu_write_uio_16351 dmu_write_uio 4 16351 NULL
+zfs_log_write_16524 zfs_log_write 6-5 16524 NULL
+sa_build_layouts_16910 sa_build_layouts 3 16910 NULL
+dsl_dir_namelen_17053 dsl_dir_namelen 0 17053 NULL
+kcopy_copy_to_user_17336 kcopy_copy_to_user 5 17336 NULL
+sa_add_layout_entry_17507 sa_add_layout_entry 3 17507 NULL
+sa_attr_table_setup_18029 sa_attr_table_setup 3 18029 NULL
+uiocopy_18680 uiocopy 2 18680 NULL
+dmu_buf_hold_array_by_dnode_19125 dmu_buf_hold_array_by_dnode 2-3 19125 NULL
+zpl_acl_from_xattr_21141 zpl_acl_from_xattr 2 21141 NULL
+dsl_pool_tx_assign_init_22518 dsl_pool_tx_assign_init 2 22518 NULL
+nvlist_lookup_byte_array_22527 nvlist_lookup_byte_array 0 22527 NULL
+sa_replace_all_by_template_locked_22533 sa_replace_all_by_template_locked 3 22533 NULL
+tsd_hash_table_init_22559 tsd_hash_table_init 1 22559 NULL
+spa_vdev_remove_aux_23966 spa_vdev_remove_aux 4 23966 NULL
+zpl_xattr_acl_set_access_24129 zpl_xattr_acl_set_access 4 24129 NULL
+dmu_assign_arcbuf_24622 dmu_assign_arcbuf 2 24622 NULL
+zap_lookup_norm_25166 zap_lookup_norm 9 25166 NULL
+dmu_prealloc_25456 dmu_prealloc 4-3 25456 NULL
+kmalloc_nofail_26347 kmalloc_nofail 1 26347 NULL
+zfsctl_snapshot_zpath_27578 zfsctl_snapshot_zpath 2 27578 NULL
+zpios_dmu_read_30015 zpios_dmu_read 4-5 30015 NULL
+splat_write_30943 splat_write 3 30943 NULL
+zpl_xattr_get_sa_31183 zpl_xattr_get_sa 0 31183 NULL
+dmu_read_uio_31467 dmu_read_uio 4 31467 NULL
+zfs_replay_fuids_31479 zfs_replay_fuids 4 31479 NULL
+spa_history_log_to_phys_31632 spa_history_log_to_phys 0-1 31632 NULL
+__zpl_xattr_get_32601 __zpl_xattr_get 0 32601 NULL
+proc_copyout_string_34049 proc_copyout_string 2 34049 NULL
+nv_alloc_sleep_spl_34544 nv_alloc_sleep_spl 2 34544 NULL
+nv_alloc_nosleep_spl_34761 nv_alloc_nosleep_spl 2 34761 NULL
+zap_leaf_array_match_36922 zap_leaf_array_match 4 36922 NULL
+copyinstr_36980 copyinstr 3 36980 NULL
+zpl_xattr_acl_set_default_37864 zpl_xattr_acl_set_default 4 37864 NULL
+splat_read_38116 splat_read 3 38116 NULL
+sa_setup_38756 sa_setup 4 38756 NULL
+vdev_disk_physio_39898 vdev_disk_physio 3 39898 NULL
+arc_buf_size_39982 arc_buf_size 0 39982 NULL
+kzalloc_nofail_40719 kzalloc_nofail 1 40719 NULL
+fuidstr_to_sid_40777 fuidstr_to_sid 4 40777 NULL
+vdev_raidz_matrix_reconstruct_40852 vdev_raidz_matrix_reconstruct 2-3 40852 NULL
+sa_find_layout_40892 sa_find_layout 4 40892 NULL
+zpl_xattr_get_dir_41918 zpl_xattr_get_dir 0 41918 NULL
+zfs_sa_get_xattr_42600 zfs_sa_get_xattr 0 42600 NULL
+zpl_xattr_acl_set_42808 zpl_xattr_acl_set 4 42808 NULL
+xdr_dec_array_43091 xdr_dec_array 5 43091 NULL
+dsl_dataset_namelen_43136 dsl_dataset_namelen 0 43136 NULL
+kcopy_write_43683 kcopy_write 3 43683 NULL
+uiomove_44355 uiomove 2 44355 NULL
+dmu_read_44418 dmu_read 4-3 44418 NULL
+ddi_copyin_44846 ddi_copyin 3 44846 NULL
+kcopy_do_get_45061 kcopy_do_get 5 45061 NULL
+copyin_45945 copyin 3 45945 NULL
+zil_itx_create_46555 zil_itx_create 2 46555 NULL
+dmu_write_uio_dbuf_48064 dmu_write_uio_dbuf 3 48064 NULL
+blk_rq_pos_48233 blk_rq_pos 0 48233 NULL
+spa_history_write_49650 spa_history_write 3 49650 NULL
+kcopy_copy_pages_to_user_49823 kcopy_copy_pages_to_user 3-4 49823 NULL
+zfs_log_write_50162 zfs_log_write 6-5 50162 NULL
+i_fm_alloc_51038 i_fm_alloc 2 51038 NULL
+copyout_51409 copyout 3 51409 NULL
+zvol_log_write_54898 zvol_log_write 4-3 54898 NULL
+zfs_acl_node_alloc_55641 zfs_acl_node_alloc 1 55641 NULL
+get_nvlist_56685 get_nvlist 2 56685 NULL
+zprop_get_numprops_56820 zprop_get_numprops 0 56820 NULL
+splat_taskq_test4_common_59829 splat_taskq_test4_common 5 59829 NULL
+zfs_replay_domain_cnt_61399 zfs_replay_domain_cnt 0 61399 NULL
+zpios_write_61823 zpios_write 3 61823 NULL
+proc_copyin_string_62019 proc_copyin_string 4 62019 NULL
+random_get_pseudo_bytes_64611 random_get_pseudo_bytes 2 64611 NULL
+zpios_read_64734 zpios_read 3 64734 NULL
diff -ruNp linux-3.13.11/tools/gcc/size_overflow_plugin.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/tools/gcc/size_overflow_plugin.c
--- linux-3.13.11/tools/gcc/size_overflow_plugin.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/tools/gcc/size_overflow_plugin.c	2014-07-09
12:00:16.000000000 +0200
@@ -0,0 +1,4166 @@
+/*
+ * Copyright 2011-2014 by Emese Revfy <re.emese@gmail.com>
+ * Licensed under the GPL v2, or (at your option) v3
+ *
+ * Homepage:
+ * http://www.grsecurity.net/~ephox/overflow_plugin/
+ *
+ * Documentation:
+ * http://forums.grsecurity.net/viewtopic.php?f=7&t=3043
+ *
+ * This plugin recomputes expressions of function arguments marked by a size_overflow
attribute
+ * with double integer precision (DImode/TImode for 32/64 bit integer types).
+ * The recomputed argument is checked against TYPE_MAX and an event is logged on overflow
and the triggering process is killed.
+ *
+ * Usage:
+ * $ # for 4.5/4.6/C based 4.7
+ * $ gcc -I`gcc -print-file-name=plugin`/include -I`gcc -print-file-name=plugin`/include/c-family
-fPIC -shared -O2 -std=gnu99 -ggdb -o size_overflow_plugin.so size_overflow_plugin.c
+ * $ # for C++ based 4.7/4.8+
+ * $ g++ -I`g++ -print-file-name=plugin`/include -I`g++ -print-file-name=plugin`/include/c-family
-fPIC -shared -O2 -std=gnu++98 -fno-rtti -ggdb -o size_overflow_plugin.so size_overflow_plugin.c
+ *
+ * $ gcc -fplugin=./size_overflow_plugin.so test.c -O2
+ */
+
+#include "gcc-common.h"
+
+int plugin_is_GPL_compatible;
+
+static struct plugin_info size_overflow_plugin_info = {
+	.version	= "20140407",
+	.help		= "no-size-overflow\tturn off size overflow checking\n",
+};
+
+#define BEFORE_STMT true
+#define AFTER_STMT false
+#define CREATE_NEW_VAR NULL_TREE
+#define CODES_LIMIT 32
+#define MAX_PARAM 31
+#define VEC_LEN 128
+#define RET_CHECK NULL_TREE
+#define CANNOT_FIND_ARG 32
+#define WRONG_NODE 32
+#define NOT_INTENTIONAL_ASM NULL
+#define MIN_CHECK true
+#define MAX_CHECK false
+
+#define TURN_OFF_ASM_STR "# size_overflow MARK_TURN_OFF "
+#define YES_ASM_STR "# size_overflow MARK_YES "
+#define OK_ASM_STR "# size_overflow "
+
+struct size_overflow_hash {
+	const struct size_overflow_hash * const next;
+	const char * const name;
+	const unsigned int param;
+};
+
+#include "size_overflow_hash.h"
+#include "size_overflow_hash_aux.h"
+
+enum mark {
+	MARK_NO, MARK_YES, MARK_NOT_INTENTIONAL, MARK_TURN_OFF
+};
+
+static unsigned int call_count;
+
+enum stmt_flags {
+	MY_STMT, NO_CAST_CHECK, VISITED_STMT, NO_FLAGS
+};
+
+struct visited {
+	struct visited *next;
+	const_tree fndecl;
+	unsigned int num;
+};
+
+struct next_cgraph_node {
+	struct next_cgraph_node *next;
+	struct cgraph_node *current_function;
+	tree callee_fndecl;
+	unsigned int num;
+};
+
+struct interesting_node {
+	struct interesting_node *next;
+	gimple first_stmt;
+	const_tree fndecl;
+	tree node;
+#if BUILDING_GCC_VERSION <= 4007
+	VEC(tree, gc) *last_nodes;
+#else
+	vec<tree, va_gc> *last_nodes;
+#endif
+	unsigned int num;
+	enum mark intentional_attr_decl;
+	enum mark intentional_attr_cur_fndecl;
+	gimple intentional_mark_from_gimple;
+};
+
+static tree report_size_overflow_decl;
+
+static tree expand(struct pointer_set_t *visited, struct cgraph_node *caller_node,
tree lhs);
+static void set_conditions(struct pointer_set_t *visited, bool *interesting_conditions,
const_tree lhs);
+static void walk_use_def(struct pointer_set_t *visited, struct interesting_node *cur_node,
tree lhs);
+static enum mark search_intentional(struct pointer_set_t *visited, const_tree lhs);
+static void search_size_overflow_attribute(struct pointer_set_t *visited, tree lhs);
+
+static void check_size_overflow(struct cgraph_node *caller_node, gimple stmt, tree
size_overflow_type, tree cast_rhs, tree rhs, bool before);
+static tree get_size_overflow_type(gimple stmt, const_tree node);
+static tree dup_assign(struct pointer_set_t *visited, gimple oldstmt, const_tree node,
tree rhs1, tree rhs2, tree __unused rhs3);
+
+static tree handle_size_overflow_attribute(tree *node, tree __unused name, tree args,
int __unused flags, bool *no_add_attrs)
+{
+	unsigned int arg_count;
+	enum tree_code code = TREE_CODE(*node);
+
+	switch (code) {
+	case FUNCTION_DECL:
+		arg_count = type_num_arguments(TREE_TYPE(*node));
+		break;
+	case FUNCTION_TYPE:
+	case METHOD_TYPE:
+		arg_count = type_num_arguments(*node);
+		break;
+	default:
+		*no_add_attrs = true;
+		error("%s: %qE attribute only applies to functions", __func__, name);
+		return NULL_TREE;
+	}
+
+	for (; args; args = TREE_CHAIN(args)) {
+		tree position = TREE_VALUE(args);
+		if (TREE_CODE(position) != INTEGER_CST || TREE_INT_CST_LOW(position) > arg_count
) {
+			error("%s: parameter %u is outside range.", __func__, (unsigned int)TREE_INT_CST_LOW(position));
+			*no_add_attrs = true;
+		}
+	}
+	return NULL_TREE;
+}
+
+static tree handle_intentional_overflow_attribute(tree *node, tree __unused name, tree
args, int __unused flags, bool *no_add_attrs)
+{
+	unsigned int arg_count;
+	enum tree_code code = TREE_CODE(*node);
+
+	switch (code) {
+	case FUNCTION_DECL:
+		arg_count = type_num_arguments(TREE_TYPE(*node));
+		break;
+	case FUNCTION_TYPE:
+	case METHOD_TYPE:
+		arg_count = type_num_arguments(*node);
+		break;
+	case FIELD_DECL:
+		return NULL_TREE;
+	default:
+		*no_add_attrs = true;
+		error("%qE attribute only applies to functions", name);
+		return NULL_TREE;
+	}
+
+	if (TREE_INT_CST_HIGH(TREE_VALUE(args)) != 0)
+		return NULL_TREE;
+
+	for (; args; args = TREE_CHAIN(args)) {
+		tree position = TREE_VALUE(args);
+		if (TREE_CODE(position) != INTEGER_CST || TREE_INT_CST_LOW(position) > arg_count
) {
+			error("%s: parameter %u is outside range.", __func__, (unsigned int)TREE_INT_CST_LOW(position));
+			*no_add_attrs = true;
+		}
+	}
+	return NULL_TREE;
+}
+
+static struct attribute_spec size_overflow_attr = {
+	.name				= "size_overflow",
+	.min_length			= 1,
+	.max_length			= -1,
+	.decl_required			= true,
+	.type_required			= false,
+	.function_type_required		= false,
+	.handler			= handle_size_overflow_attribute,
+#if BUILDING_GCC_VERSION >= 4007
+	.affects_type_identity		= false
+#endif
+};
+
+static struct attribute_spec intentional_overflow_attr = {
+	.name				= "intentional_overflow",
+	.min_length			= 1,
+	.max_length			= -1,
+	.decl_required			= true,
+	.type_required			= false,
+	.function_type_required		= false,
+	.handler			= handle_intentional_overflow_attribute,
+#if BUILDING_GCC_VERSION >= 4007
+	.affects_type_identity		= false
+#endif
+};
+
+static void register_attributes(void __unused *event_data, void __unused *data)
+{
+	register_attribute(&size_overflow_attr);
+	register_attribute(&intentional_overflow_attr);
+}
+
+static enum stmt_flags get_stmt_flag(gimple stmt)
+{
+	bool bit_1, bit_2;
+
+	bit_1 = gimple_plf(stmt, GF_PLF_1);
+	bit_2 = gimple_plf(stmt, GF_PLF_2);
+
+	if (!bit_1 && !bit_2)
+		return NO_FLAGS;
+	if (bit_1 && bit_2)
+		return MY_STMT;
+	if (!bit_1 && bit_2)
+		return VISITED_STMT;
+	return NO_CAST_CHECK;
+}
+
+static void set_stmt_flag(gimple stmt, enum stmt_flags new_flag)
+{
+	bool bit_1, bit_2;
+
+	switch (new_flag) {
+	case NO_FLAGS:
+		bit_1 = bit_2 = false;
+		break;
+	case MY_STMT:
+		bit_1 = bit_2 = true;
+		break;
+	case VISITED_STMT:
+		bit_1 = false;
+		bit_2 = true;
+		break;
+	case NO_CAST_CHECK:
+		bit_1 = true;
+		bit_2 = false;
+		break;
+	default:
+		gcc_unreachable();
+	}
+
+	gimple_set_plf(stmt, GF_PLF_1, bit_1);
+	gimple_set_plf(stmt, GF_PLF_2, bit_2);
+}
+
+static bool is_bool(const_tree node)
+{
+	const_tree type;
+
+	if (node == NULL_TREE)
+		return false;
+
+	type = TREE_TYPE(node);
+	if (!INTEGRAL_TYPE_P(type))
+		return false;
+	if (TREE_CODE(type) == BOOLEAN_TYPE)
+		return true;
+	if (TYPE_PRECISION(type) == 1)
+		return true;
+	return false;
+}
+
+static bool skip_types(const_tree var)
+{
+	tree type;
+	enum tree_code code;
+
+	if (is_gimple_constant(var))
+		return true;
+
+	switch (TREE_CODE(var)) {
+		case ADDR_EXPR:
+#if BUILDING_GCC_VERSION >= 4006
+		case MEM_REF:
+#endif
+		case ARRAY_REF:
+		case BIT_FIELD_REF:
+		case INDIRECT_REF:
+		case TARGET_MEM_REF:
+		case COMPONENT_REF:
+		case VAR_DECL:
+		case VIEW_CONVERT_EXPR:
+			return true;
+		default:
+			break;
+	}
+
+	code = TREE_CODE(var);
+	gcc_assert(code == SSA_NAME || code == PARM_DECL);
+
+	type = TREE_TYPE(var);
+	switch (TREE_CODE(type)) {
+		case INTEGER_TYPE:
+		case ENUMERAL_TYPE:
+			return false;
+		case BOOLEAN_TYPE:
+			return is_bool(var);
+		default:
+			return true;
+	}
+}
+
+static inline gimple get_def_stmt(const_tree node)
+{
+	gcc_assert(node != NULL_TREE);
+
+	if (skip_types(node))
+		return NULL;
+
+	if (TREE_CODE(node) != SSA_NAME)
+		return NULL;
+	return SSA_NAME_DEF_STMT(node);
+}
+
+static unsigned char get_tree_code(const_tree type)
+{
+	switch (TREE_CODE(type)) {
+	case ARRAY_TYPE:
+		return 0;
+	case BOOLEAN_TYPE:
+		return 1;
+	case ENUMERAL_TYPE:
+		return 2;
+	case FUNCTION_TYPE:
+		return 3;
+	case INTEGER_TYPE:
+		return 4;
+	case POINTER_TYPE:
+		return 5;
+	case RECORD_TYPE:
+		return 6;
+	case UNION_TYPE:
+		return 7;
+	case VOID_TYPE:
+		return 8;
+	case REAL_TYPE:
+		return 9;
+	case VECTOR_TYPE:
+		return 10;
+	case REFERENCE_TYPE:
+		return 11;
+	case OFFSET_TYPE:
+		return 12;
+	case COMPLEX_TYPE:
+		return 13;
+	default:
+		debug_tree((tree)type);
+		gcc_unreachable();
+	}
+}
+
+struct function_hash {
+	size_t tree_codes_len;
+	unsigned char tree_codes[CODES_LIMIT];
+	const_tree fndecl;
+	unsigned int hash;
+};
+
+// http://www.team5150.com/~andrew/noncryptohashzoo2~/CrapWow.html
+static unsigned int CrapWow(const char *key, unsigned int len, unsigned int seed)
+{
+#define cwfold( a, b, lo, hi ) { p = (unsigned int)(a) * (unsigned long long)(b); lo
^= (unsigned int)p; hi ^= (unsigned int)(p >> 32); }
+#define cwmixa( in ) { cwfold( in, m, k, h ); }
+#define cwmixb( in ) { cwfold( in, n, h, k ); }
+
+	unsigned int m = 0x57559429;
+	unsigned int n = 0x5052acdb;
+	const unsigned int *key4 = (const unsigned int *)key;
+	unsigned int h = len;
+	unsigned int k = len + seed + n;
+	unsigned long long p;
+
+	while (len >= 8) {
+		cwmixb(key4[0]) cwmixa(key4[1]) key4 += 2;
+		len -= 8;
+	}
+	if (len >= 4) {
+		cwmixb(key4[0]) key4 += 1;
+		len -= 4;
+	}
+	if (len)
+		cwmixa(key4[0] & ((1 << (len * 8)) - 1 ));
+	cwmixb(h ^ (k + n));
+	return k ^ h;
+
+#undef cwfold
+#undef cwmixa
+#undef cwmixb
+}
+
+static void set_hash(const char *fn_name, struct function_hash *fn_hash_data)
+{
+	unsigned int fn, codes, seed = 0;
+
+	fn = CrapWow(fn_name, strlen(fn_name), seed) & 0xffff;
+	codes = CrapWow((const char*)fn_hash_data->tree_codes, fn_hash_data->tree_codes_len,
seed) & 0xffff;
+
+	fn_hash_data->hash = fn ^ codes;
+}
+
+static void set_node_codes(const_tree type, struct function_hash *fn_hash_data)
+{
+	gcc_assert(type != NULL_TREE);
+	gcc_assert(TREE_CODE_CLASS(TREE_CODE(type)) == tcc_type);
+
+	while (type && fn_hash_data->tree_codes_len < CODES_LIMIT) {
+		fn_hash_data->tree_codes[fn_hash_data->tree_codes_len] = get_tree_code(type);
+		fn_hash_data->tree_codes_len++;
+		type = TREE_TYPE(type);
+	}
+}
+
+static void set_result_codes(const_tree node, struct function_hash *fn_hash_data)
+{
+	const_tree result;
+
+	gcc_assert(node != NULL_TREE);
+
+	if (DECL_P(node)) {
+		result = DECL_RESULT(node);
+		if (result != NULL_TREE)
+			return set_node_codes(TREE_TYPE(result), fn_hash_data);
+		return set_result_codes(TREE_TYPE(node), fn_hash_data);
+	}
+
+	gcc_assert(TYPE_P(node));
+
+	if (TREE_CODE(node) == FUNCTION_TYPE)
+		return set_result_codes(TREE_TYPE(node), fn_hash_data);
+
+	return set_node_codes(node, fn_hash_data);
+}
+
+static void set_function_codes(struct function_hash *fn_hash_data)
+{
+	const_tree arg, type = TREE_TYPE(fn_hash_data->fndecl);
+	enum tree_code code = TREE_CODE(type);
+
+	gcc_assert(code == FUNCTION_TYPE || code == METHOD_TYPE);
+
+	set_result_codes(fn_hash_data->fndecl, fn_hash_data);
+
+	for (arg = TYPE_ARG_TYPES(type); arg != NULL_TREE && fn_hash_data->tree_codes_len
< CODES_LIMIT; arg = TREE_CHAIN(arg))
+		set_node_codes(TREE_VALUE(arg), fn_hash_data);
+}
+
+static const struct size_overflow_hash *get_proper_hash_chain(const struct size_overflow_hash
*entry, const char *func_name)
+{
+	while (entry) {
+		if (!strcmp(entry->name, func_name))
+			return entry;
+		entry = entry->next;
+	}
+	return NULL;
+}
+
+static const struct size_overflow_hash *get_function_hash(const_tree fndecl)
+{
+	const struct size_overflow_hash *entry;
+	struct function_hash fn_hash_data;
+	const char *func_name;
+
+	// skip builtins __builtin_constant_p
+	if (DECL_BUILT_IN(fndecl))
+		return NULL;
+
+	fn_hash_data.fndecl = fndecl;
+	fn_hash_data.tree_codes_len = 0;
+
+	set_function_codes(&fn_hash_data);
+	gcc_assert(fn_hash_data.tree_codes_len != 0);
+
+	func_name = DECL_NAME_POINTER(fn_hash_data.fndecl);
+	set_hash(func_name, &fn_hash_data);
+
+	entry = size_overflow_hash[fn_hash_data.hash];
+	entry = get_proper_hash_chain(entry, func_name);
+	if (entry)
+		return entry;
+	entry = size_overflow_hash_aux[fn_hash_data.hash];
+	return get_proper_hash_chain(entry, func_name);
+}
+
+static void print_missing_msg(const_tree func, unsigned int argnum)
+{
+	location_t loc;
+	const char *curfunc;
+	struct function_hash fn_hash_data;
+
+	fn_hash_data.fndecl = DECL_ORIGIN(func);
+	fn_hash_data.tree_codes_len = 0;
+
+	loc = DECL_SOURCE_LOCATION(fn_hash_data.fndecl);
+	curfunc = DECL_NAME_POINTER(fn_hash_data.fndecl);
+
+	set_function_codes(&fn_hash_data);
+	set_hash(curfunc, &fn_hash_data);
+
+	inform(loc, "Function %s is missing from the size_overflow hash table +%s+%u+%u+",
curfunc, curfunc, argnum, fn_hash_data.hash);
+}
+
+static unsigned int find_arg_number_tree(const_tree arg, const_tree func)
+{
+	tree var;
+	unsigned int argnum = 1;
+
+	if (TREE_CODE(arg) == SSA_NAME)
+		arg = SSA_NAME_VAR(arg);
+
+	for (var = DECL_ARGUMENTS(func); var; var = TREE_CHAIN(var), argnum++) {
+		if (!operand_equal_p(arg, var, 0) && strcmp(DECL_NAME_POINTER(var), DECL_NAME_POINTER(arg)))
+			continue;
+		if (!skip_types(var))
+			return argnum;
+	}
+
+	return CANNOT_FIND_ARG;
+}
+
+static tree create_new_var(tree type)
+{
+	tree new_var = create_tmp_var(type, "cicus");
+
+	add_referenced_var(new_var);
+	return new_var;
+}
+
+static gimple create_binary_assign(enum tree_code code, gimple stmt, tree rhs1, tree
rhs2)
+{
+	gimple assign;
+	gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
+	tree type = TREE_TYPE(rhs1);
+	tree lhs = create_new_var(type);
+
+	gcc_assert(types_compatible_p(type, TREE_TYPE(rhs2)));
+	assign = gimple_build_assign_with_ops(code, lhs, rhs1, rhs2);
+	gimple_assign_set_lhs(assign, make_ssa_name(lhs, assign));
+
+	gsi_insert_before(&gsi, assign, GSI_NEW_STMT);
+	update_stmt(assign);
+	set_stmt_flag(assign, MY_STMT);
+	return assign;
+}
+
+static tree cast_a_tree(tree type, tree var)
+{
+	gcc_assert(type != NULL_TREE);
+	gcc_assert(var != NULL_TREE);
+	gcc_assert(fold_convertible_p(type, var));
+
+	return fold_convert(type, var);
+}
+
+static tree get_lhs(const_gimple stmt)
+{
+	switch (gimple_code(stmt)) {
+	case GIMPLE_ASSIGN:
+	case GIMPLE_CALL:
+		return gimple_get_lhs(stmt);
+	case GIMPLE_PHI:
+		return gimple_phi_result(stmt);
+	default:
+		return NULL_TREE;
+	}
+}
+
+static bool skip_cast(tree dst_type, const_tree rhs, bool force)
+{
+	const_gimple def_stmt = get_def_stmt(rhs);
+
+	if (force)
+		return false;
+
+	if (is_gimple_constant(rhs))
+		return false;
+
+	if (!def_stmt || gimple_code(def_stmt) == GIMPLE_NOP)
+		return false;
+
+	if (!types_compatible_p(dst_type, TREE_TYPE(rhs)))
+		return false;
+
+	// DI type can be on 32 bit (from create_assign) but overflow type stays DI
+	if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode))
+		return false;
+
+	return true;
+}
+
+static gimple build_cast_stmt(tree dst_type, tree rhs, tree lhs, gimple_stmt_iterator
*gsi, bool before, bool force)
+{
+	gimple assign, def_stmt;
+
+	gcc_assert(dst_type != NULL_TREE && rhs != NULL_TREE);
+	if (gsi_end_p(*gsi) && before == AFTER_STMT)
+		gcc_unreachable();
+
+	def_stmt = get_def_stmt(rhs);
+	if (def_stmt && gimple_code(def_stmt) != GIMPLE_NOP && skip_cast(dst_type, rhs, force)
&& get_stmt_flag(def_stmt) == MY_STMT)
+		return def_stmt;
+
+	if (lhs == CREATE_NEW_VAR)
+		lhs = create_new_var(dst_type);
+
+	assign = gimple_build_assign(lhs, cast_a_tree(dst_type, rhs));
+
+	if (!gsi_end_p(*gsi)) {
+		location_t loc = gimple_location(gsi_stmt(*gsi));
+		gimple_set_location(assign, loc);
+	}
+
+	gimple_assign_set_lhs(assign, make_ssa_name(lhs, assign));
+
+	if (before)
+		gsi_insert_before(gsi, assign, GSI_NEW_STMT);
+	else
+		gsi_insert_after(gsi, assign, GSI_NEW_STMT);
+	update_stmt(assign);
+	return assign;
+}
+
+static tree cast_to_new_size_overflow_type(gimple stmt, tree rhs, tree size_overflow_type,
bool before)
+{
+	gimple_stmt_iterator gsi;
+	tree lhs;
+	gimple new_stmt;
+
+	if (rhs == NULL_TREE)
+		return NULL_TREE;
+
+	gsi = gsi_for_stmt(stmt);
+	new_stmt = build_cast_stmt(size_overflow_type, rhs, CREATE_NEW_VAR, &gsi, before,
false);
+	set_stmt_flag(new_stmt, MY_STMT);
+
+	lhs = get_lhs(new_stmt);
+	gcc_assert(lhs != NULL_TREE);
+	return lhs;
+}
+
+static tree cast_to_TI_type(gimple stmt, tree node)
+{
+	gimple_stmt_iterator gsi;
+	gimple cast_stmt;
+	tree type = TREE_TYPE(node);
+
+	if (types_compatible_p(type, intTI_type_node))
+		return node;
+
+	gsi = gsi_for_stmt(stmt);
+	cast_stmt = build_cast_stmt(intTI_type_node, node, CREATE_NEW_VAR, &gsi, BEFORE_STMT,
false);
+	set_stmt_flag(cast_stmt, MY_STMT);
+	return gimple_assign_lhs(cast_stmt);
+}
+
+static tree create_assign(struct pointer_set_t *visited, gimple oldstmt, tree rhs1,
bool before)
+{
+	tree lhs, new_lhs;
+	gimple_stmt_iterator gsi;
+
+	if (rhs1 == NULL_TREE) {
+		debug_gimple_stmt(oldstmt);
+		error("%s: rhs1 is NULL_TREE", __func__);
+		gcc_unreachable();
+	}
+
+	switch (gimple_code(oldstmt)) {
+	case GIMPLE_ASM:
+		lhs = rhs1;
+		break;
+	case GIMPLE_CALL:
+	case GIMPLE_ASSIGN:
+		lhs = gimple_get_lhs(oldstmt);
+		break;
+	default:
+		debug_gimple_stmt(oldstmt);
+		gcc_unreachable();
+	}
+
+	gsi = gsi_for_stmt(oldstmt);
+	pointer_set_insert(visited, oldstmt);
+	if (lookup_stmt_eh_lp(oldstmt) != 0) {
+		basic_block next_bb, cur_bb;
+		const_edge e;
+
+		gcc_assert(before == false);
+		gcc_assert(stmt_can_throw_internal(oldstmt));
+		gcc_assert(gimple_code(oldstmt) == GIMPLE_CALL);
+		gcc_assert(!gsi_end_p(gsi));
+
+		cur_bb = gimple_bb(oldstmt);
+		next_bb = cur_bb->next_bb;
+		e = find_edge(cur_bb, next_bb);
+		gcc_assert(e != NULL);
+		gcc_assert(e->flags & EDGE_FALLTHRU);
+
+		gsi = gsi_after_labels(next_bb);
+		gcc_assert(!gsi_end_p(gsi));
+
+		before = true;
+		oldstmt = gsi_stmt(gsi);
+	}
+
+	new_lhs = cast_to_new_size_overflow_type(oldstmt, rhs1, get_size_overflow_type(oldstmt,
lhs), before);
+	return new_lhs;
+}
+
+static tree dup_assign(struct pointer_set_t *visited, gimple oldstmt, const_tree node,
tree rhs1, tree rhs2, tree __unused rhs3)
+{
+	gimple stmt;
+	gimple_stmt_iterator gsi;
+	tree size_overflow_type, new_var, lhs = gimple_assign_lhs(oldstmt);
+
+	if (get_stmt_flag(oldstmt) == MY_STMT)
+		return lhs;
+
+	if (gimple_num_ops(oldstmt) != 4 && rhs1 == NULL_TREE) {
+		rhs1 = gimple_assign_rhs1(oldstmt);
+		rhs1 = create_assign(visited, oldstmt, rhs1, BEFORE_STMT);
+	}
+	if (gimple_num_ops(oldstmt) == 3 && rhs2 == NULL_TREE) {
+		rhs2 = gimple_assign_rhs2(oldstmt);
+		rhs2 = create_assign(visited, oldstmt, rhs2, BEFORE_STMT);
+	}
+
+	stmt = gimple_copy(oldstmt);
+	gimple_set_location(stmt, gimple_location(oldstmt));
+	set_stmt_flag(stmt, MY_STMT);
+
+	if (gimple_assign_rhs_code(oldstmt) == WIDEN_MULT_EXPR)
+		gimple_assign_set_rhs_code(stmt, MULT_EXPR);
+
+	size_overflow_type = get_size_overflow_type(oldstmt, node);
+
+	new_var = create_new_var(size_overflow_type);
+	new_var = make_ssa_name(new_var, stmt);
+	gimple_assign_set_lhs(stmt, new_var);
+
+	if (rhs1 != NULL_TREE)
+		gimple_assign_set_rhs1(stmt, rhs1);
+
+	if (rhs2 != NULL_TREE)
+		gimple_assign_set_rhs2(stmt, rhs2);
+#if BUILDING_GCC_VERSION >= 4006
+	if (rhs3 != NULL_TREE)
+		gimple_assign_set_rhs3(stmt, rhs3);
+#endif
+	gimple_set_vuse(stmt, gimple_vuse(oldstmt));
+	gimple_set_vdef(stmt, gimple_vdef(oldstmt));
+
+	gsi = gsi_for_stmt(oldstmt);
+	gsi_insert_after(&gsi, stmt, GSI_SAME_STMT);
+	update_stmt(stmt);
+	pointer_set_insert(visited, oldstmt);
+	return gimple_assign_lhs(stmt);
+}
+
+static tree cast_parm_decl(tree phi_ssa_name, tree arg, tree size_overflow_type, basic_block
bb)
+{
+	gimple assign;
+	gimple_stmt_iterator gsi;
+	basic_block first_bb;
+
+	gcc_assert(SSA_NAME_IS_DEFAULT_DEF(arg));
+
+	if (bb->index == 0) {
+		first_bb = split_block_after_labels(ENTRY_BLOCK_PTR_FOR_FN(cfun))->dest;
+		gcc_assert(dom_info_available_p(CDI_DOMINATORS));
+		set_immediate_dominator(CDI_DOMINATORS, first_bb, ENTRY_BLOCK_PTR_FOR_FN(cfun));
+		bb = first_bb;
+	}
+
+	gsi = gsi_after_labels(bb);
+	assign = build_cast_stmt(size_overflow_type, arg, phi_ssa_name, &gsi, BEFORE_STMT,
false);
+	set_stmt_flag(assign, MY_STMT);
+
+	return gimple_assign_lhs(assign);
+}
+
+static tree use_phi_ssa_name(tree ssa_name_var, tree new_arg)
+{
+	gimple_stmt_iterator gsi;
+	gimple assign, def_stmt = get_def_stmt(new_arg);
+
+	if (gimple_code(def_stmt) == GIMPLE_PHI) {
+		gsi = gsi_after_labels(gimple_bb(def_stmt));
+		assign = build_cast_stmt(TREE_TYPE(new_arg), new_arg, ssa_name_var, &gsi, BEFORE_STMT,
true);
+	} else {
+		gsi = gsi_for_stmt(def_stmt);
+		assign = build_cast_stmt(TREE_TYPE(new_arg), new_arg, ssa_name_var, &gsi, AFTER_STMT,
true);
+	}
+
+	set_stmt_flag(assign, MY_STMT);
+	return gimple_assign_lhs(assign);
+}
+
+static tree cast_visited_phi_arg(tree ssa_name_var, tree arg, tree size_overflow_type)
+{
+	basic_block bb;
+	gimple_stmt_iterator gsi;
+	const_gimple def_stmt;
+	gimple assign;
+
+	def_stmt = get_def_stmt(arg);
+	bb = gimple_bb(def_stmt);
+	gcc_assert(bb->index != 0);
+	gsi = gsi_after_labels(bb);
+
+	assign = build_cast_stmt(size_overflow_type, arg, ssa_name_var, &gsi, BEFORE_STMT,
false);
+	set_stmt_flag(assign, MY_STMT);
+	return gimple_assign_lhs(assign);
+}
+
+static tree create_new_phi_arg(tree ssa_name_var, tree new_arg, gimple oldstmt, unsigned
int i)
+{
+	tree size_overflow_type;
+	tree arg;
+	const_gimple def_stmt;
+
+	if (new_arg != NULL_TREE && is_gimple_constant(new_arg))
+		return new_arg;
+
+	arg = gimple_phi_arg_def(oldstmt, i);
+	def_stmt = get_def_stmt(arg);
+	gcc_assert(def_stmt != NULL);
+	size_overflow_type = get_size_overflow_type(oldstmt, arg);
+
+	switch (gimple_code(def_stmt)) {
+	case GIMPLE_PHI:
+		return cast_visited_phi_arg(ssa_name_var, arg, size_overflow_type);
+	case GIMPLE_NOP: {
+		basic_block bb;
+
+		bb = gimple_phi_arg_edge(oldstmt, i)->src;
+		return cast_parm_decl(ssa_name_var, arg, size_overflow_type, bb);
+	}
+	case GIMPLE_ASM: {
+		gimple_stmt_iterator gsi;
+		gimple assign, stmt = get_def_stmt(arg);
+
+		gsi = gsi_for_stmt(stmt);
+		assign = build_cast_stmt(size_overflow_type, arg, ssa_name_var, &gsi, AFTER_STMT,
false);
+		set_stmt_flag(assign, MY_STMT);
+		return gimple_assign_lhs(assign);
+	}
+	default:
+		gcc_assert(new_arg != NULL_TREE);
+		gcc_assert(types_compatible_p(TREE_TYPE(new_arg), size_overflow_type));
+		return use_phi_ssa_name(ssa_name_var, new_arg);
+	}
+}
+
+static gimple overflow_create_phi_node(gimple oldstmt, tree result)
+{
+	basic_block bb;
+	gimple phi;
+	gimple_seq seq;
+	gimple_stmt_iterator gsi = gsi_for_stmt(oldstmt);
+
+	bb = gsi_bb(gsi);
+
+	if (result == NULL_TREE) {
+		tree old_result = gimple_phi_result(oldstmt);
+		tree size_overflow_type = get_size_overflow_type(oldstmt, old_result);
+
+		result = create_new_var(size_overflow_type);
+	}
+
+	phi = create_phi_node(result, bb);
+	gimple_phi_set_result(phi, make_ssa_name(result, phi));
+	seq = phi_nodes(bb);
+	gsi = gsi_last(seq);
+	gsi_remove(&gsi, false);
+
+	gsi = gsi_for_stmt(oldstmt);
+	gsi_insert_after(&gsi, phi, GSI_NEW_STMT);
+	gimple_set_bb(phi, bb);
+	set_stmt_flag(phi, MY_STMT);
+	return phi;
+}
+
+#if BUILDING_GCC_VERSION <= 4007
+static tree create_new_phi_node(VEC(tree, heap) **args, tree ssa_name_var, gimple oldstmt)
+#else
+static tree create_new_phi_node(vec<tree, va_heap, vl_embed> *&args, tree ssa_name_var,
gimple oldstmt)
+#endif
+{
+	gimple new_phi;
+	unsigned int i;
+	tree arg, result;
+	location_t loc = gimple_location(oldstmt);
+
+#if BUILDING_GCC_VERSION <= 4007
+	gcc_assert(!VEC_empty(tree, *args));
+#else
+	gcc_assert(!args->is_empty());
+#endif
+
+	new_phi = overflow_create_phi_node(oldstmt, ssa_name_var);
+	result = gimple_phi_result(new_phi);
+	ssa_name_var = SSA_NAME_VAR(result);
+
+#if BUILDING_GCC_VERSION <= 4007
+	FOR_EACH_VEC_ELT(tree, *args, i, arg) {
+#else
+	FOR_EACH_VEC_SAFE_ELT(args, i, arg) {
+#endif
+		arg = create_new_phi_arg(ssa_name_var, arg, oldstmt, i);
+		add_phi_arg(new_phi, arg, gimple_phi_arg_edge(oldstmt, i), loc);
+	}
+
+#if BUILDING_GCC_VERSION <= 4007
+	VEC_free(tree, heap, *args);
+#else
+	vec_free(args);
+#endif
+	update_stmt(new_phi);
+	return result;
+}
+
+static tree handle_phi(struct pointer_set_t *visited, struct cgraph_node *caller_node,
tree orig_result)
+{
+	tree ssa_name_var = NULL_TREE;
+#if BUILDING_GCC_VERSION <= 4007
+	VEC(tree, heap) *args = NULL;
+#else
+	vec<tree, va_heap, vl_embed> *args = NULL;
+#endif
+	gimple oldstmt = get_def_stmt(orig_result);
+	unsigned int i, len = gimple_phi_num_args(oldstmt);
+
+	pointer_set_insert(visited, oldstmt);
+	for (i = 0; i < len; i++) {
+		tree arg, new_arg;
+
+		arg = gimple_phi_arg_def(oldstmt, i);
+		new_arg = expand(visited, caller_node, arg);
+
+		if (ssa_name_var == NULL_TREE && new_arg != NULL_TREE)
+			ssa_name_var = SSA_NAME_VAR(new_arg);
+
+		if (is_gimple_constant(arg)) {
+			tree size_overflow_type = get_size_overflow_type(oldstmt, arg);
+
+			new_arg = cast_a_tree(size_overflow_type, arg);
+		}
+
+#if BUILDING_GCC_VERSION <= 4007
+		VEC_safe_push(tree, heap, args, new_arg);
+#else
+		vec_safe_push(args, new_arg);
+#endif
+	}
+
+#if BUILDING_GCC_VERSION <= 4007
+	return create_new_phi_node(&args, ssa_name_var, oldstmt);
+#else
+	return create_new_phi_node(args, ssa_name_var, oldstmt);
+#endif
+}
+
+static tree change_assign_rhs(gimple stmt, const_tree orig_rhs, tree new_rhs)
+{
+	gimple assign;
+	gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
+	tree origtype = TREE_TYPE(orig_rhs);
+
+	gcc_assert(is_gimple_assign(stmt));
+
+	assign = build_cast_stmt(origtype, new_rhs, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false);
+	set_stmt_flag(assign, MY_STMT);
+	return gimple_assign_lhs(assign);
+}
+
+static bool is_a_cast_and_const_overflow(const_tree no_const_rhs)
+{
+	const_tree rhs1, lhs, rhs1_type, lhs_type;
+	enum machine_mode lhs_mode, rhs_mode;
+	gimple def_stmt = get_def_stmt(no_const_rhs);
+
+	if (!def_stmt || !gimple_assign_cast_p(def_stmt))
+		return false;
+
+	rhs1 = gimple_assign_rhs1(def_stmt);
+	lhs = gimple_assign_lhs(def_stmt);
+	rhs1_type = TREE_TYPE(rhs1);
+	lhs_type = TREE_TYPE(lhs);
+	rhs_mode = TYPE_MODE(rhs1_type);
+	lhs_mode = TYPE_MODE(lhs_type);
+	if (TYPE_UNSIGNED(lhs_type) == TYPE_UNSIGNED(rhs1_type) || lhs_mode != rhs_mode)
+		return false;
+
+	return true;
+}
+
+static tree create_cast_assign(struct pointer_set_t *visited, gimple stmt)
+{
+	tree rhs1 = gimple_assign_rhs1(stmt);
+	tree lhs = gimple_assign_lhs(stmt);
+	const_tree rhs1_type = TREE_TYPE(rhs1);
+	const_tree lhs_type = TREE_TYPE(lhs);
+
+	if (TYPE_UNSIGNED(rhs1_type) == TYPE_UNSIGNED(lhs_type))
+		return create_assign(visited, stmt, lhs, AFTER_STMT);
+
+	return create_assign(visited, stmt, rhs1, AFTER_STMT);
+}
+
+static bool no_uses(tree node)
+{
+	imm_use_iterator imm_iter;
+	use_operand_p use_p;
+
+	FOR_EACH_IMM_USE_FAST(use_p, imm_iter, node) {
+		const_gimple use_stmt = USE_STMT(use_p);
+
+		if (use_stmt == NULL)
+			return true;
+		if (is_gimple_debug(use_stmt))
+			continue;
+		return false;
+	}
+	return true;
+}
+
+// 3.8.5 mm/page-writeback.c __ilog2_u64(): ret, uint + uintmax; uint -> int; int max
+static bool is_const_plus_unsigned_signed_truncation(const_tree lhs)
+{
+	tree rhs1, lhs_type, rhs_type, rhs2, not_const_rhs;
+	gimple def_stmt = get_def_stmt(lhs);
+
+	if (!def_stmt || !gimple_assign_cast_p(def_stmt))
+		return false;
+
+	rhs1 = gimple_assign_rhs1(def_stmt);
+	rhs_type = TREE_TYPE(rhs1);
+	lhs_type = TREE_TYPE(lhs);
+	if (TYPE_UNSIGNED(lhs_type) || !TYPE_UNSIGNED(rhs_type))
+		return false;
+	if (TYPE_MODE(lhs_type) != TYPE_MODE(rhs_type))
+		return false;
+
+	def_stmt = get_def_stmt(rhs1);
+	if (!def_stmt || !is_gimple_assign(def_stmt) || gimple_num_ops(def_stmt) != 3)
+		return false;
+
+	if (gimple_assign_rhs_code(def_stmt) != PLUS_EXPR)
+		return false;
+
+	rhs1 = gimple_assign_rhs1(def_stmt);
+	rhs2 = gimple_assign_rhs2(def_stmt);
+	if (!is_gimple_constant(rhs1) && !is_gimple_constant(rhs2))
+		return false;
+
+	if (is_gimple_constant(rhs2))
+		not_const_rhs = rhs1;
+	else
+		not_const_rhs = rhs2;
+
+	return no_uses(not_const_rhs);
+}
+
+static bool skip_lhs_cast_check(const_gimple stmt)
+{
+	const_tree rhs = gimple_assign_rhs1(stmt);
+	const_gimple def_stmt = get_def_stmt(rhs);
+
+	// 3.8.2 kernel/futex_compat.c compat_exit_robust_list(): get_user() 64 ulong -> int
(compat_long_t), int max
+	if (gimple_code(def_stmt) == GIMPLE_ASM)
+		return true;
+
+	if (is_const_plus_unsigned_signed_truncation(rhs))
+		return true;
+
+	return false;
+}
+
+static tree create_cast_overflow_check(struct pointer_set_t *visited, struct cgraph_node
*caller_node, tree new_rhs1, gimple stmt)
+{
+	bool cast_lhs, cast_rhs;
+	tree lhs = gimple_assign_lhs(stmt);
+	tree rhs = gimple_assign_rhs1(stmt);
+	const_tree lhs_type = TREE_TYPE(lhs);
+	const_tree rhs_type = TREE_TYPE(rhs);
+	enum machine_mode lhs_mode = TYPE_MODE(lhs_type);
+	enum machine_mode rhs_mode = TYPE_MODE(rhs_type);
+	unsigned int lhs_size = GET_MODE_BITSIZE(lhs_mode);
+	unsigned int rhs_size = GET_MODE_BITSIZE(rhs_mode);
+
+	static bool check_lhs[3][4] = {
+		// ss    su     us     uu
+		{ false, true,  true,  false }, // lhs > rhs
+		{ false, false, false, false }, // lhs = rhs
+		{ true,  true,  true,  true  }, // lhs < rhs
+	};
+
+	static bool check_rhs[3][4] = {
+		// ss    su     us     uu
+		{ true,  false, true,  true  }, // lhs > rhs
+		{ true,  false, true,  true  }, // lhs = rhs
+		{ true,  false, true,  true  }, // lhs < rhs
+	};
+
+	// skip lhs check on signed SI -> HI cast or signed SI -> QI cast !!!!
+	if (rhs_mode == SImode && !TYPE_UNSIGNED(rhs_type) && (lhs_mode == HImode || lhs_mode
== QImode))
+		return create_assign(visited, stmt, lhs, AFTER_STMT);
+
+	if (lhs_size > rhs_size) {
+		cast_lhs = check_lhs[0][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
+		cast_rhs = check_rhs[0][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
+	} else if (lhs_size == rhs_size) {
+		cast_lhs = check_lhs[1][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
+		cast_rhs = check_rhs[1][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
+	} else {
+		cast_lhs = check_lhs[2][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
+		cast_rhs = check_rhs[2][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
+	}
+
+	if (!cast_lhs && !cast_rhs)
+		return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE);
+
+	if (cast_lhs && !skip_lhs_cast_check(stmt))
+		check_size_overflow(caller_node, stmt, TREE_TYPE(new_rhs1), new_rhs1, lhs, BEFORE_STMT);
+
+	if (cast_rhs)
+		check_size_overflow(caller_node, stmt, TREE_TYPE(new_rhs1), new_rhs1, rhs, BEFORE_STMT);
+
+	return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE);
+}
+
+static tree handle_unary_rhs(struct pointer_set_t *visited, struct cgraph_node *caller_node,
gimple stmt)
+{
+	tree rhs1, new_rhs1, lhs = gimple_assign_lhs(stmt);
+
+	if (get_stmt_flag(stmt) == MY_STMT)
+		return lhs;
+
+	rhs1 = gimple_assign_rhs1(stmt);
+	if (TREE_CODE(TREE_TYPE(rhs1)) == POINTER_TYPE)
+		return create_assign(visited, stmt, lhs, AFTER_STMT);
+
+	new_rhs1 = expand(visited, caller_node, rhs1);
+
+	if (new_rhs1 == NULL_TREE)
+		return create_cast_assign(visited, stmt);
+
+	if (get_stmt_flag(stmt) == NO_CAST_CHECK)
+		return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE);
+
+	if (gimple_assign_rhs_code(stmt) == BIT_NOT_EXPR) {
+		tree size_overflow_type = get_size_overflow_type(stmt, rhs1);
+
+		new_rhs1 = cast_to_new_size_overflow_type(stmt, new_rhs1, size_overflow_type, BEFORE_STMT);
+		check_size_overflow(caller_node, stmt, size_overflow_type, new_rhs1, rhs1, BEFORE_STMT);
+		return create_assign(visited, stmt, lhs, AFTER_STMT);
+	}
+
+	if (!gimple_assign_cast_p(stmt))
+		return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE);
+
+	return create_cast_overflow_check(visited, caller_node, new_rhs1, stmt);
+}
+
+static tree handle_unary_ops(struct pointer_set_t *visited, struct cgraph_node *caller_node,
gimple stmt)
+{
+	tree rhs1, lhs = gimple_assign_lhs(stmt);
+	gimple def_stmt = get_def_stmt(lhs);
+
+	gcc_assert(gimple_code(def_stmt) != GIMPLE_NOP);
+	rhs1 = gimple_assign_rhs1(def_stmt);
+
+	if (is_gimple_constant(rhs1))
+		return create_assign(visited, def_stmt, lhs, AFTER_STMT);
+
+	switch (TREE_CODE(rhs1)) {
+	case SSA_NAME:
+		return handle_unary_rhs(visited, caller_node, def_stmt);
+	case ARRAY_REF:
+	case BIT_FIELD_REF:
+	case ADDR_EXPR:
+	case COMPONENT_REF:
+	case INDIRECT_REF:
+#if BUILDING_GCC_VERSION >= 4006
+	case MEM_REF:
+#endif
+	case TARGET_MEM_REF:
+	case VIEW_CONVERT_EXPR:
+		return create_assign(visited, def_stmt, lhs, AFTER_STMT);
+	case PARM_DECL:
+	case VAR_DECL:
+		return create_assign(visited, stmt, lhs, AFTER_STMT);
+
+	default:
+		debug_gimple_stmt(def_stmt);
+		debug_tree(rhs1);
+		gcc_unreachable();
+	}
+}
+
+static void insert_cond(basic_block cond_bb, tree arg, enum tree_code cond_code, tree
type_value)
+{
+	gimple cond_stmt;
+	gimple_stmt_iterator gsi = gsi_last_bb(cond_bb);
+
+	cond_stmt = gimple_build_cond(cond_code, arg, type_value, NULL_TREE, NULL_TREE);
+	gsi_insert_after(&gsi, cond_stmt, GSI_CONTINUE_LINKING);
+	update_stmt(cond_stmt);
+}
+
+static tree create_string_param(tree string)
+{
+	tree i_type, a_type;
+	const int length = TREE_STRING_LENGTH(string);
+
+	gcc_assert(length > 0);
+
+	i_type = build_index_type(build_int_cst(NULL_TREE, length - 1));
+	a_type = build_array_type(char_type_node, i_type);
+
+	TREE_TYPE(string) = a_type;
+	TREE_CONSTANT(string) = 1;
+	TREE_READONLY(string) = 1;
+
+	return build1(ADDR_EXPR, ptr_type_node, string);
+}
+
+static void insert_cond_result(struct cgraph_node *caller_node, basic_block bb_true,
const_gimple stmt, const_tree arg, bool min)
+{
+	gimple func_stmt;
+	const_gimple def_stmt;
+	const_tree loc_line;
+	tree loc_file, ssa_name, current_func;
+	expanded_location xloc;
+	char *ssa_name_buf;
+	int len;
+	struct cgraph_edge *edge;
+	struct cgraph_node *callee_node;
+	int frequency;
+	gimple_stmt_iterator gsi = gsi_start_bb(bb_true);
+
+	def_stmt = get_def_stmt(arg);
+	xloc = expand_location(gimple_location(def_stmt));
+
+	if (!gimple_has_location(def_stmt)) {
+		xloc = expand_location(gimple_location(stmt));
+		if (!gimple_has_location(stmt))
+			xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
+	}
+
+	loc_line = build_int_cstu(unsigned_type_node, xloc.line);
+
+	loc_file = build_string(strlen(xloc.file) + 1, xloc.file);
+	loc_file = create_string_param(loc_file);
+
+	current_func = build_string(DECL_NAME_LENGTH(current_function_decl) + 1, DECL_NAME_POINTER(current_function_decl));
+	current_func = create_string_param(current_func);
+
+	gcc_assert(DECL_NAME(SSA_NAME_VAR(arg)) != NULL);
+	call_count++;
+	len = asprintf(&ssa_name_buf, "%s_%u %s, count: %u\n", DECL_NAME_POINTER(SSA_NAME_VAR(arg)),
SSA_NAME_VERSION(arg), min ? "min" : "max", call_count);
+	gcc_assert(len > 0);
+	ssa_name = build_string(len + 1, ssa_name_buf);
+	free(ssa_name_buf);
+	ssa_name = create_string_param(ssa_name);
+
+	// void report_size_overflow(const char *file, unsigned int line, const char *func,
const char *ssa_name)
+	func_stmt = gimple_build_call(report_size_overflow_decl, 4, loc_file, loc_line, current_func,
ssa_name);
+	gsi_insert_after(&gsi, func_stmt, GSI_CONTINUE_LINKING);
+
+	callee_node = cgraph_get_create_node(report_size_overflow_decl);
+	frequency = compute_call_stmt_bb_frequency(current_function_decl, bb_true);
+
+	edge = cgraph_create_edge(caller_node, callee_node, func_stmt, bb_true->count, frequency,
bb_true->loop_depth);
+	gcc_assert(edge != NULL);
+}
+
+static void __unused print_the_code_insertions(const_gimple stmt)
+{
+	location_t loc = gimple_location(stmt);
+
+	inform(loc, "Integer size_overflow check applied here.");
+}
+
+static void insert_check_size_overflow(struct cgraph_node *caller_node, gimple stmt,
enum tree_code cond_code, tree arg, tree type_value, bool before, bool min)
+{
+	basic_block cond_bb, join_bb, bb_true;
+	edge e;
+	gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
+
+	cond_bb = gimple_bb(stmt);
+	if (before)
+		gsi_prev(&gsi);
+	if (gsi_end_p(gsi))
+		e = split_block_after_labels(cond_bb);
+	else
+		e = split_block(cond_bb, gsi_stmt(gsi));
+	cond_bb = e->src;
+	join_bb = e->dest;
+	e->flags = EDGE_FALSE_VALUE;
+	e->probability = REG_BR_PROB_BASE;
+
+	bb_true = create_empty_bb(cond_bb);
+	make_edge(cond_bb, bb_true, EDGE_TRUE_VALUE);
+	make_edge(cond_bb, join_bb, EDGE_FALSE_VALUE);
+	make_edge(bb_true, join_bb, EDGE_FALLTHRU);
+
+	gcc_assert(dom_info_available_p(CDI_DOMINATORS));
+	set_immediate_dominator(CDI_DOMINATORS, bb_true, cond_bb);
+	set_immediate_dominator(CDI_DOMINATORS, join_bb, cond_bb);
+
+	if (current_loops != NULL) {
+		gcc_assert(cond_bb->loop_father == join_bb->loop_father);
+		add_bb_to_loop(bb_true, cond_bb->loop_father);
+	}
+
+	insert_cond(cond_bb, arg, cond_code, type_value);
+	insert_cond_result(caller_node, bb_true, stmt, arg, min);
+
+//	print_the_code_insertions(stmt);
+}
+
+static void check_size_overflow(struct cgraph_node *caller_node, gimple stmt, tree
size_overflow_type, tree cast_rhs, tree rhs, bool before)
+{
+	const_tree rhs_type = TREE_TYPE(rhs);
+	tree cast_rhs_type, type_max_type, type_min_type, type_max, type_min;
+
+	gcc_assert(rhs_type != NULL_TREE);
+	if (TREE_CODE(rhs_type) == POINTER_TYPE)
+		return;
+
+	gcc_assert(TREE_CODE(rhs_type) == INTEGER_TYPE || TREE_CODE(rhs_type) == ENUMERAL_TYPE);
+
+	if (is_const_plus_unsigned_signed_truncation(rhs))
+		return;
+
+	type_max = cast_a_tree(size_overflow_type, TYPE_MAX_VALUE(rhs_type));
+	// typemax (-1) < typemin (0)
+	if (TREE_OVERFLOW(type_max))
+		return;
+
+	type_min = cast_a_tree(size_overflow_type, TYPE_MIN_VALUE(rhs_type));
+
+	cast_rhs_type = TREE_TYPE(cast_rhs);
+	type_max_type = TREE_TYPE(type_max);
+	gcc_assert(types_compatible_p(cast_rhs_type, type_max_type));
+
+	insert_check_size_overflow(caller_node, stmt, GT_EXPR, cast_rhs, type_max, before,
MAX_CHECK);
+
+	// special case: get_size_overflow_type(), 32, u64->s
+	if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode) && TYPE_UNSIGNED(size_overflow_type)
&& !TYPE_UNSIGNED(rhs_type))
+		return;
+
+	type_min_type = TREE_TYPE(type_min);
+	gcc_assert(types_compatible_p(type_max_type, type_min_type));
+	insert_check_size_overflow(caller_node, stmt, LT_EXPR, cast_rhs, type_min, before,
MIN_CHECK);
+}
+
+static bool is_lt_signed_type_max(const_tree rhs)
+{
+	const_tree new_type, type_max, type = TREE_TYPE(rhs);
+
+	if (!TYPE_UNSIGNED(type))
+		return true;
+
+	switch (TYPE_MODE(type)) {
+	case QImode:
+		new_type = intQI_type_node;
+		break;
+	case HImode:
+		new_type = intHI_type_node;
+		break;
+	case SImode:
+		new_type = intSI_type_node;
+		break;
+	case DImode:
+		new_type = intDI_type_node;
+		break;
+	default:
+		debug_tree((tree)type);
+		gcc_unreachable();
+	}
+
+	type_max = TYPE_MAX_VALUE(new_type);
+	if (!tree_int_cst_lt(type_max, rhs))
+		return true;
+
+	return false;
+}
+
+static bool is_gt_zero(const_tree rhs)
+{
+	const_tree type = TREE_TYPE(rhs);
+
+	if (TYPE_UNSIGNED(type))
+		return true;
+
+	if (!tree_int_cst_lt(rhs, integer_zero_node))
+		return true;
+
+	return false;
+}
+
+static bool is_a_constant_overflow(const_gimple stmt, const_tree rhs)
+{
+	if (gimple_assign_rhs_code(stmt) == MIN_EXPR)
+		return false;
+	if (!is_gimple_constant(rhs))
+		return false;
+
+	// If the const is between 0 and the max value of the signed type of the same bitsize
then there is no intentional overflow
+//	if (is_lt_signed_type_max(rhs) && is_gt_zero(rhs))
+//		return false;
+
+	return true;
+}
+
+static tree get_def_stmt_rhs(const_tree var)
+{
+	tree rhs1, def_stmt_rhs1;
+	gimple rhs1_def_stmt, def_stmt_rhs1_def_stmt, def_stmt;
+
+	def_stmt = get_def_stmt(var);
+	if (!gimple_assign_cast_p(def_stmt))
+		return NULL_TREE;
+	gcc_assert(gimple_code(def_stmt) != GIMPLE_NOP && get_stmt_flag(def_stmt) == MY_STMT
&& gimple_assign_cast_p(def_stmt));
+
+	rhs1 = gimple_assign_rhs1(def_stmt);
+	rhs1_def_stmt = get_def_stmt(rhs1);
+	if (!gimple_assign_cast_p(rhs1_def_stmt))
+		return rhs1;
+
+	def_stmt_rhs1 = gimple_assign_rhs1(rhs1_def_stmt);
+	def_stmt_rhs1_def_stmt = get_def_stmt(def_stmt_rhs1);
+
+	switch (gimple_code(def_stmt_rhs1_def_stmt)) {
+	case GIMPLE_CALL:
+	case GIMPLE_NOP:
+	case GIMPLE_ASM:
+	case GIMPLE_PHI:
+		return def_stmt_rhs1;
+	case GIMPLE_ASSIGN:
+		return rhs1;
+	default:
+		debug_gimple_stmt(def_stmt_rhs1_def_stmt);
+		gcc_unreachable();
+	}
+}
+
+static tree handle_intentional_overflow(struct pointer_set_t *visited, struct cgraph_node
*caller_node, bool check_overflow, gimple stmt, tree change_rhs, tree new_rhs2)
+{
+	tree new_rhs, orig_rhs;
+	void (*gimple_assign_set_rhs)(gimple, tree);
+	tree rhs1 = gimple_assign_rhs1(stmt);
+	tree rhs2 = gimple_assign_rhs2(stmt);
+	tree lhs = gimple_assign_lhs(stmt);
+
+	if (!check_overflow)
+		return create_assign(visited, stmt, lhs, AFTER_STMT);
+
+	if (change_rhs == NULL_TREE)
+		return create_assign(visited, stmt, lhs, AFTER_STMT);
+
+	if (new_rhs2 == NULL_TREE) {
+		orig_rhs = rhs1;
+		gimple_assign_set_rhs = &gimple_assign_set_rhs1;
+	} else {
+		orig_rhs = rhs2;
+		gimple_assign_set_rhs = &gimple_assign_set_rhs2;
+	}
+
+	check_size_overflow(caller_node, stmt, TREE_TYPE(change_rhs), change_rhs, orig_rhs,
BEFORE_STMT);
+
+	new_rhs = change_assign_rhs(stmt, orig_rhs, change_rhs);
+	gimple_assign_set_rhs(stmt, new_rhs);
+	update_stmt(stmt);
+
+	return create_assign(visited, stmt, lhs, AFTER_STMT);
+}
+
+static bool is_subtraction_special(const_gimple stmt)
+{
+	gimple rhs1_def_stmt, rhs2_def_stmt;
+	const_tree rhs1_def_stmt_rhs1, rhs2_def_stmt_rhs1, rhs1_def_stmt_lhs, rhs2_def_stmt_lhs;
+	enum machine_mode rhs1_def_stmt_rhs1_mode, rhs2_def_stmt_rhs1_mode, rhs1_def_stmt_lhs_mode,
rhs2_def_stmt_lhs_mode;
+	const_tree rhs1 = gimple_assign_rhs1(stmt);
+	const_tree rhs2 = gimple_assign_rhs2(stmt);
+
+	if (is_gimple_constant(rhs1) || is_gimple_constant(rhs2))
+		return false;
+
+	gcc_assert(TREE_CODE(rhs1) == SSA_NAME && TREE_CODE(rhs2) == SSA_NAME);
+
+	if (gimple_assign_rhs_code(stmt) != MINUS_EXPR)
+		return false;
+
+	rhs1_def_stmt = get_def_stmt(rhs1);
+	rhs2_def_stmt = get_def_stmt(rhs2);
+	if (!gimple_assign_cast_p(rhs1_def_stmt) || !gimple_assign_cast_p(rhs2_def_stmt))
+		return false;
+
+	rhs1_def_stmt_rhs1 = gimple_assign_rhs1(rhs1_def_stmt);
+	rhs2_def_stmt_rhs1 = gimple_assign_rhs1(rhs2_def_stmt);
+	rhs1_def_stmt_lhs = gimple_assign_lhs(rhs1_def_stmt);
+	rhs2_def_stmt_lhs = gimple_assign_lhs(rhs2_def_stmt);
+	rhs1_def_stmt_rhs1_mode = TYPE_MODE(TREE_TYPE(rhs1_def_stmt_rhs1));
+	rhs2_def_stmt_rhs1_mode = TYPE_MODE(TREE_TYPE(rhs2_def_stmt_rhs1));
+	rhs1_def_stmt_lhs_mode = TYPE_MODE(TREE_TYPE(rhs1_def_stmt_lhs));
+	rhs2_def_stmt_lhs_mode = TYPE_MODE(TREE_TYPE(rhs2_def_stmt_lhs));
+	if (GET_MODE_BITSIZE(rhs1_def_stmt_rhs1_mode) <= GET_MODE_BITSIZE(rhs1_def_stmt_lhs_mode))
+		return false;
+	if (GET_MODE_BITSIZE(rhs2_def_stmt_rhs1_mode) <= GET_MODE_BITSIZE(rhs2_def_stmt_lhs_mode))
+		return false;
+
+	set_stmt_flag(rhs1_def_stmt, NO_CAST_CHECK);
+	set_stmt_flag(rhs2_def_stmt, NO_CAST_CHECK);
+	return true;
+}
+
+static tree handle_integer_truncation(struct pointer_set_t *visited, struct cgraph_node
*caller_node, const_tree lhs)
+{
+	tree new_rhs1, new_rhs2;
+	tree new_rhs1_def_stmt_rhs1, new_rhs2_def_stmt_rhs1, new_lhs;
+	gimple assign, stmt = get_def_stmt(lhs);
+	tree rhs1 = gimple_assign_rhs1(stmt);
+	tree rhs2 = gimple_assign_rhs2(stmt);
+
+	if (!is_subtraction_special(stmt))
+		return NULL_TREE;
+
+	new_rhs1 = expand(visited, caller_node, rhs1);
+	new_rhs2 = expand(visited, caller_node, rhs2);
+
+	new_rhs1_def_stmt_rhs1 = get_def_stmt_rhs(new_rhs1);
+	new_rhs2_def_stmt_rhs1 = get_def_stmt_rhs(new_rhs2);
+
+	if (new_rhs1_def_stmt_rhs1 == NULL_TREE || new_rhs2_def_stmt_rhs1 == NULL_TREE)
+		return NULL_TREE;
+
+	if (!types_compatible_p(TREE_TYPE(new_rhs1_def_stmt_rhs1), TREE_TYPE(new_rhs2_def_stmt_rhs1)))
{
+		new_rhs1_def_stmt_rhs1 = cast_to_TI_type(stmt, new_rhs1_def_stmt_rhs1);
+		new_rhs2_def_stmt_rhs1 = cast_to_TI_type(stmt, new_rhs2_def_stmt_rhs1);
+	}
+
+	assign = create_binary_assign(MINUS_EXPR, stmt, new_rhs1_def_stmt_rhs1, new_rhs2_def_stmt_rhs1);
+	new_lhs = gimple_assign_lhs(assign);
+	check_size_overflow(caller_node, assign, TREE_TYPE(new_lhs), new_lhs, rhs1, AFTER_STMT);
+
+	return dup_assign(visited, stmt, lhs, new_rhs1, new_rhs2, NULL_TREE);
+}
+
+static bool is_a_neg_overflow(const_gimple stmt, const_tree rhs)
+{
+	const_gimple def_stmt;
+
+	if (TREE_CODE(rhs) != SSA_NAME)
+		return false;
+
+	if (gimple_assign_rhs_code(stmt) != PLUS_EXPR)
+		return false;
+
+	def_stmt = get_def_stmt(rhs);
+	if (!is_gimple_assign(def_stmt) || gimple_assign_rhs_code(def_stmt) != BIT_NOT_EXPR)
+		return false;
+
+	return true;
+}
+
+static tree handle_binary_ops(struct pointer_set_t *visited, struct cgraph_node *caller_node,
tree lhs)
+{
+	tree rhs1, rhs2, new_lhs;
+	gimple def_stmt = get_def_stmt(lhs);
+	tree new_rhs1 = NULL_TREE;
+	tree new_rhs2 = NULL_TREE;
+
+	rhs1 = gimple_assign_rhs1(def_stmt);
+	rhs2 = gimple_assign_rhs2(def_stmt);
+
+	/* no DImode/TImode division in the 32/64 bit kernel */
+	switch (gimple_assign_rhs_code(def_stmt)) {
+	case RDIV_EXPR:
+	case TRUNC_DIV_EXPR:
+	case CEIL_DIV_EXPR:
+	case FLOOR_DIV_EXPR:
+	case ROUND_DIV_EXPR:
+	case TRUNC_MOD_EXPR:
+	case CEIL_MOD_EXPR:
+	case FLOOR_MOD_EXPR:
+	case ROUND_MOD_EXPR:
+	case EXACT_DIV_EXPR:
+	case POINTER_PLUS_EXPR:
+	case BIT_AND_EXPR:
+		return create_assign(visited, def_stmt, lhs, AFTER_STMT);
+	default:
+		break;
+	}
+
+	new_lhs = handle_integer_truncation(visited, caller_node, lhs);
+	if (new_lhs != NULL_TREE)
+		return new_lhs;
+
+	if (TREE_CODE(rhs1) == SSA_NAME)
+		new_rhs1 = expand(visited, caller_node, rhs1);
+	if (TREE_CODE(rhs2) == SSA_NAME)
+		new_rhs2 = expand(visited, caller_node, rhs2);
+
+	if (is_a_neg_overflow(def_stmt, rhs2))
+		return handle_intentional_overflow(visited, caller_node, true, def_stmt, new_rhs1,
NULL_TREE);
+	if (is_a_neg_overflow(def_stmt, rhs1))
+		return handle_intentional_overflow(visited, caller_node, true, def_stmt, new_rhs2,
new_rhs2);
+
+
+	if (is_a_constant_overflow(def_stmt, rhs2))
+		return handle_intentional_overflow(visited, caller_node, !is_a_cast_and_const_overflow(rhs1),
def_stmt, new_rhs1, NULL_TREE);
+	if (is_a_constant_overflow(def_stmt, rhs1))
+		return handle_intentional_overflow(visited, caller_node, !is_a_cast_and_const_overflow(rhs2),
def_stmt, new_rhs2, new_rhs2);
+
+	// the const is between 0 and (signed) MAX
+	if (is_gimple_constant(rhs1))
+		new_rhs1 = create_assign(visited, def_stmt, rhs1, BEFORE_STMT);
+	if (is_gimple_constant(rhs2))
+		new_rhs2 = create_assign(visited, def_stmt, rhs2, BEFORE_STMT);
+
+	return dup_assign(visited, def_stmt, lhs, new_rhs1, new_rhs2, NULL_TREE);
+}
+
+#if BUILDING_GCC_VERSION >= 4006
+static tree get_new_rhs(struct pointer_set_t *visited, struct cgraph_node *caller_node,
tree size_overflow_type, tree rhs)
+{
+	if (is_gimple_constant(rhs))
+		return cast_a_tree(size_overflow_type, rhs);
+	if (TREE_CODE(rhs) != SSA_NAME)
+		return NULL_TREE;
+	return expand(visited, caller_node, rhs);
+}
+
+static tree handle_ternary_ops(struct pointer_set_t *visited, struct cgraph_node *caller_node,
tree lhs)
+{
+	tree rhs1, rhs2, rhs3, new_rhs1, new_rhs2, new_rhs3, size_overflow_type;
+	gimple def_stmt = get_def_stmt(lhs);
+
+	size_overflow_type = get_size_overflow_type(def_stmt, lhs);
+
+	rhs1 = gimple_assign_rhs1(def_stmt);
+	rhs2 = gimple_assign_rhs2(def_stmt);
+	rhs3 = gimple_assign_rhs3(def_stmt);
+	new_rhs1 = get_new_rhs(visited, caller_node, size_overflow_type, rhs1);
+	new_rhs2 = get_new_rhs(visited, caller_node, size_overflow_type, rhs2);
+	new_rhs3 = get_new_rhs(visited, caller_node, size_overflow_type, rhs3);
+
+	return dup_assign(visited, def_stmt, lhs, new_rhs1, new_rhs2, new_rhs3);
+}
+#endif
+
+static tree get_size_overflow_type(gimple stmt, const_tree node)
+{
+	const_tree type;
+	tree new_type;
+
+	gcc_assert(node != NULL_TREE);
+
+	type = TREE_TYPE(node);
+
+	if (get_stmt_flag(stmt) == MY_STMT)
+		return TREE_TYPE(node);
+
+	switch (TYPE_MODE(type)) {
+	case QImode:
+		new_type = intHI_type_node;
+		break;
+	case HImode:
+		new_type = intSI_type_node;
+		break;
+	case SImode:
+		new_type = intDI_type_node;
+		break;
+	case DImode:
+		if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode))
+			new_type = TYPE_UNSIGNED(type) ? unsigned_intDI_type_node : intDI_type_node;
+		else
+			new_type = intTI_type_node;
+		break;
+	case TImode:
+		gcc_assert(!TYPE_UNSIGNED(type));
+		new_type = intTI_type_node;
+		break;
+	default:
+		debug_tree((tree)node);
+		error("%s: unsupported gcc configuration (%qE).", __func__, current_function_decl);
+		gcc_unreachable();
+	}
+
+	if (TYPE_QUALS(type) != 0)
+		return build_qualified_type(new_type, TYPE_QUALS(type));
+	return new_type;
+}
+
+static tree expand_visited(gimple def_stmt)
+{
+	const_gimple next_stmt;
+	gimple_stmt_iterator gsi;
+	enum gimple_code code = gimple_code(def_stmt);
+
+	if (code == GIMPLE_ASM)
+		return NULL_TREE;
+
+	gsi = gsi_for_stmt(def_stmt);
+	gsi_next(&gsi);
+
+	if (gimple_code(def_stmt) == GIMPLE_PHI && gsi_end_p(gsi))
+		return NULL_TREE;
+	gcc_assert(!gsi_end_p(gsi));
+	next_stmt = gsi_stmt(gsi);
+
+	if (gimple_code(def_stmt) == GIMPLE_PHI && get_stmt_flag((gimple)next_stmt) != MY_STMT)
+		return NULL_TREE;
+	gcc_assert(get_stmt_flag((gimple)next_stmt) == MY_STMT);
+
+	return get_lhs(next_stmt);
+}
+
+static tree expand(struct pointer_set_t *visited, struct cgraph_node *caller_node,
tree lhs)
+{
+	gimple def_stmt;
+
+	def_stmt = get_def_stmt(lhs);
+
+	if (!def_stmt || gimple_code(def_stmt) == GIMPLE_NOP)
+		return NULL_TREE;
+
+	if (get_stmt_flag(def_stmt) == MY_STMT)
+		return lhs;
+
+	if (pointer_set_contains(visited, def_stmt))
+		return expand_visited(def_stmt);
+
+	switch (gimple_code(def_stmt)) {
+	case GIMPLE_PHI:
+		return handle_phi(visited, caller_node, lhs);
+	case GIMPLE_CALL:
+	case GIMPLE_ASM:
+		return create_assign(visited, def_stmt, lhs, AFTER_STMT);
+	case GIMPLE_ASSIGN:
+		switch (gimple_num_ops(def_stmt)) {
+		case 2:
+			return handle_unary_ops(visited, caller_node, def_stmt);
+		case 3:
+			return handle_binary_ops(visited, caller_node, lhs);
+#if BUILDING_GCC_VERSION >= 4006
+		case 4:
+			return handle_ternary_ops(visited, caller_node, lhs);
+#endif
+		}
+	default:
+		debug_gimple_stmt(def_stmt);
+		error("%s: unknown gimple code", __func__);
+		gcc_unreachable();
+	}
+}
+
+static tree cast_to_orig_type(gimple stmt, const_tree orig_node, tree new_node)
+{
+	const_gimple assign;
+	tree orig_type = TREE_TYPE(orig_node);
+	gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
+
+	assign = build_cast_stmt(orig_type, new_node, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false);
+	return gimple_assign_lhs(assign);
+}
+
+static void change_orig_node(struct interesting_node *cur_node, tree new_node)
+{
+	void (*set_rhs)(gimple, tree);
+	gimple stmt = cur_node->first_stmt;
+	const_tree orig_node = cur_node->node;
+
+	switch (gimple_code(stmt)) {
+	case GIMPLE_RETURN:
+		gimple_return_set_retval(stmt, cast_to_orig_type(stmt, orig_node, new_node));
+		break;
+	case GIMPLE_CALL:
+		gimple_call_set_arg(stmt, cur_node->num - 1, cast_to_orig_type(stmt, orig_node, new_node));
+		break;
+	case GIMPLE_ASSIGN:
+		switch (cur_node->num) {
+		case 1:
+			set_rhs = &gimple_assign_set_rhs1;
+			break;
+		case 2:
+			set_rhs = &gimple_assign_set_rhs2;
+			break;
+#if BUILDING_GCC_VERSION >= 4006
+		case 3:
+			set_rhs = &gimple_assign_set_rhs3;
+			break;
+#endif
+		default:
+			gcc_unreachable();
+		}
+
+		set_rhs(stmt, cast_to_orig_type(stmt, orig_node, new_node));
+		break;
+	default:
+		debug_gimple_stmt(stmt);
+		gcc_unreachable();
+	}
+
+	update_stmt(stmt);
+}
+
+static unsigned int get_correct_arg_count(unsigned int argnum, const_tree fndecl)
+{
+	const struct size_overflow_hash *hash;
+	unsigned int new_argnum;
+	tree arg;
+	const_tree origarg;
+
+	if (argnum == 0)
+		return argnum;
+
+	hash = get_function_hash(fndecl);
+	if (hash && hash->param & (1U << argnum))
+		return argnum;
+
+	if (DECL_EXTERNAL(fndecl))
+		return argnum;
+
+	origarg = DECL_ARGUMENTS(DECL_ORIGIN(fndecl));
+	argnum--;
+	while (origarg && argnum) {
+		origarg = TREE_CHAIN(origarg);
+		argnum--;
+	}
+	gcc_assert(argnum == 0);
+	gcc_assert(origarg != NULL_TREE);
+
+	for (arg = DECL_ARGUMENTS(fndecl), new_argnum = 1; arg; arg = TREE_CHAIN(arg), new_argnum++)
+		if (operand_equal_p(origarg, arg, 0) || !strcmp(DECL_NAME_POINTER(origarg), DECL_NAME_POINTER(arg)))
+			return new_argnum;
+
+	return CANNOT_FIND_ARG;
+}
+
+// Don't want to duplicate entries in next_cgraph_node
+static bool is_in_next_cgraph_node(struct next_cgraph_node *head, struct cgraph_node
*node, const_tree fndecl, unsigned int num)
+{
+	const_tree new_callee_fndecl;
+	struct next_cgraph_node *cur_node;
+
+	if (fndecl == RET_CHECK)
+		new_callee_fndecl = NODE_DECL(node);
+	else
+		new_callee_fndecl = fndecl;
+
+	for (cur_node = head; cur_node; cur_node = cur_node->next) {
+		if (!operand_equal_p(NODE_DECL(cur_node->current_function), NODE_DECL(node), 0))
+			continue;
+		if (!operand_equal_p(cur_node->callee_fndecl, new_callee_fndecl, 0))
+			continue;
+		if (num == cur_node->num)
+			return true;
+	}
+	return false;
+}
+
+/* Add a next_cgraph_node into the list for handle_function().
+ * handle_function()  iterates over all the next cgraph nodes and
+ * starts the overflow check insertion process.
+ */
+static struct next_cgraph_node *create_new_next_cgraph_node(struct next_cgraph_node
*head, struct cgraph_node *node, tree fndecl, unsigned int num)
+{
+	struct next_cgraph_node *new_node;
+
+	if (is_in_next_cgraph_node(head, node, fndecl, num))
+		return head;
+
+	new_node = (struct next_cgraph_node *)xmalloc(sizeof(*new_node));
+	new_node->current_function = node;
+	new_node->next = NULL;
+	new_node->num = num;
+	if (fndecl == RET_CHECK)
+		new_node->callee_fndecl = NODE_DECL(node);
+	else
+		new_node->callee_fndecl = fndecl;
+
+	if (!head)
+		return new_node;
+
+	new_node->next = head;
+	return new_node;
+}
+
+static struct next_cgraph_node *create_new_next_cgraph_nodes(struct next_cgraph_node
*head, struct cgraph_node *node, unsigned int num)
+{
+	struct cgraph_edge *e;
+
+	if (num == 0)
+		return create_new_next_cgraph_node(head, node, RET_CHECK, num);
+
+	for (e = node->callers; e; e = e->next_caller) {
+		tree fndecl = gimple_call_fndecl(e->call_stmt);
+
+		gcc_assert(fndecl != NULL_TREE);
+		head = create_new_next_cgraph_node(head, e->caller, fndecl, num);
+	}
+
+	return head;
+}
+
+static bool is_a_return_check(const_tree node)
+{
+	if (TREE_CODE(node) == FUNCTION_DECL)
+		return true;
+
+	gcc_assert(TREE_CODE(node) == PARM_DECL);
+	return false;
+}
+
+static bool is_in_hash_table(const_tree fndecl, unsigned int num)
+{
+	const struct size_overflow_hash *hash;
+
+	hash = get_function_hash(fndecl);
+	if (hash && (hash->param & (1U << num)))
+		return true;
+	return false;
+}
+
+struct missing_functions {
+	struct missing_functions *next;
+	const_tree node;
+	tree fndecl;
+};
+
+static struct missing_functions *create_new_missing_function(struct missing_functions
*missing_fn_head, tree node)
+{
+	struct missing_functions *new_function;
+
+	new_function = (struct missing_functions *)xmalloc(sizeof(*new_function));
+	new_function->node = node;
+	new_function->next = NULL;
+
+	if (TREE_CODE(node) == FUNCTION_DECL)
+		new_function->fndecl = node;
+	else
+		new_function->fndecl = current_function_decl;
+	gcc_assert(new_function->fndecl);
+
+	if (!missing_fn_head)
+		return new_function;
+
+	new_function->next = missing_fn_head;
+	return new_function;
+}
+
+/* Check if the function has a size_overflow attribute or it is in the size_overflow
hash table.
+ * If the function is missing everywhere then print the missing message into stderr.
+ */
+static bool is_missing_function(const_tree orig_fndecl, unsigned int num)
+{
+	switch (DECL_FUNCTION_CODE(orig_fndecl)) {
+#if BUILDING_GCC_VERSION >= 4008
+	case BUILT_IN_BSWAP16:
+#endif
+	case BUILT_IN_BSWAP32:
+	case BUILT_IN_BSWAP64:
+	case BUILT_IN_EXPECT:
+	case BUILT_IN_MEMCMP:
+		return false;
+	default:
+		break;
+	}
+
+	// skip test.c
+	if (strcmp(DECL_NAME_POINTER(current_function_decl), "coolmalloc")) {
+		if (lookup_attribute("size_overflow", DECL_ATTRIBUTES(orig_fndecl)))
+			warning(0, "unnecessary size_overflow attribute on: %s\n", DECL_NAME_POINTER(orig_fndecl));
+	}
+
+	if (is_in_hash_table(orig_fndecl, num))
+		return false;
+
+	print_missing_msg(orig_fndecl, num);
+	return true;
+}
+
+// Get the argnum of a function decl, if node is a return then the argnum is 0
+static unsigned int get_function_num(const_tree node, const_tree orig_fndecl)
+{
+	if (is_a_return_check(node))
+		return 0;
+	else
+		return find_arg_number_tree(node, orig_fndecl);
+}
+
+/* If the function is missing from the hash table and it is a static function
+ * then create a next_cgraph_node from it for handle_function()
+ */
+static struct next_cgraph_node *check_missing_overflow_attribute_and_create_next_node(struct
next_cgraph_node *cnodes, struct missing_functions *missing_fn_head)
+{
+	unsigned int num;
+	const_tree orig_fndecl;
+	struct cgraph_node *next_node = NULL;
+
+	orig_fndecl = DECL_ORIGIN(missing_fn_head->fndecl);
+
+	num = get_function_num(missing_fn_head->node, orig_fndecl);
+	if (num == CANNOT_FIND_ARG)
+		return cnodes;
+
+	if (!is_missing_function(orig_fndecl, num))
+		return cnodes;
+
+	next_node = cgraph_get_node(missing_fn_head->fndecl);
+	if (next_node && next_node->local.local)
+		cnodes = create_new_next_cgraph_nodes(cnodes, next_node, num);
+	return cnodes;
+}
+
+/* Search for missing size_overflow attributes on the last nodes in ipa and collect
them
+ * into the next_cgraph_node list. They will be the next interesting returns or callees.
+ */
+static struct next_cgraph_node *search_overflow_attribute(struct next_cgraph_node *cnodes,
struct interesting_node *cur_node)
+{
+	unsigned int i;
+	tree node;
+	struct missing_functions *cur, *missing_fn_head = NULL;
+
+#if BUILDING_GCC_VERSION <= 4007
+	FOR_EACH_VEC_ELT(tree, cur_node->last_nodes, i, node) {
+#else
+	FOR_EACH_VEC_ELT(*cur_node->last_nodes, i, node) {
+#endif
+		switch (TREE_CODE(node)) {
+		case PARM_DECL:
+			if (TREE_CODE(TREE_TYPE(node)) != INTEGER_TYPE)
+				break;
+		case FUNCTION_DECL:
+			missing_fn_head = create_new_missing_function(missing_fn_head, node);
+			break;
+		default:
+			break;
+		}
+	}
+
+	while (missing_fn_head) {
+		cnodes = check_missing_overflow_attribute_and_create_next_node(cnodes, missing_fn_head);
+
+		cur = missing_fn_head->next;
+		free(missing_fn_head);
+		missing_fn_head = cur;
+	}
+
+	return cnodes;
+}
+
+static void walk_phi_set_conditions(struct pointer_set_t *visited, bool *interesting_conditions,
const_tree result)
+{
+	gimple phi = get_def_stmt(result);
+	unsigned int i, n = gimple_phi_num_args(phi);
+
+	pointer_set_insert(visited, phi);
+	for (i = 0; i < n; i++) {
+		const_tree arg = gimple_phi_arg_def(phi, i);
+
+		set_conditions(visited, interesting_conditions, arg);
+	}
+}
+
+enum conditions {
+	FROM_CONST, NOT_UNARY, CAST
+};
+
+// Search for constants, cast assignments and binary/ternary assignments
+static void set_conditions(struct pointer_set_t *visited, bool *interesting_conditions,
const_tree lhs)
+{
+	gimple def_stmt = get_def_stmt(lhs);
+
+	if (is_gimple_constant(lhs)) {
+		interesting_conditions[FROM_CONST] = true;
+		return;
+	}
+
+	if (!def_stmt)
+		return;
+
+	if (pointer_set_contains(visited, def_stmt))
+		return;
+
+	switch (gimple_code(def_stmt)) {
+	case GIMPLE_NOP:
+	case GIMPLE_CALL:
+	case GIMPLE_ASM:
+		return;
+	case GIMPLE_PHI:
+		return walk_phi_set_conditions(visited, interesting_conditions, lhs);
+	case GIMPLE_ASSIGN:
+		if (gimple_num_ops(def_stmt) == 2) {
+			const_tree rhs = gimple_assign_rhs1(def_stmt);
+
+			if (gimple_assign_cast_p(def_stmt))
+				interesting_conditions[CAST] = true;
+
+			return set_conditions(visited, interesting_conditions, rhs);
+		} else {
+			interesting_conditions[NOT_UNARY] = true;
+			return;
+		}
+	default:
+		debug_gimple_stmt(def_stmt);
+		gcc_unreachable();
+	}
+}
+
+// determine whether duplication will be necessary or not.
+static void search_interesting_conditions(struct interesting_node *cur_node, bool *interesting_conditions)
+{
+	struct pointer_set_t *visited;
+
+	if (gimple_assign_cast_p(cur_node->first_stmt))
+		interesting_conditions[CAST] = true;
+	else if (is_gimple_assign(cur_node->first_stmt) && gimple_num_ops(cur_node->first_stmt)
> 2)
+		interesting_conditions[NOT_UNARY] = true;
+
+	visited = pointer_set_create();
+	set_conditions(visited, interesting_conditions, cur_node->node);
+	pointer_set_destroy(visited);
+}
+
+// Remove the size_overflow asm stmt and create an assignment from the input and output
of the asm
+static void replace_size_overflow_asm_with_assign(gimple asm_stmt, tree lhs, tree rhs)
+{
+	gimple assign;
+	gimple_stmt_iterator gsi;
+
+	// already removed
+	if (gimple_bb(asm_stmt) == NULL)
+		return;
+	gsi = gsi_for_stmt(asm_stmt);
+
+	assign = gimple_build_assign(lhs, rhs);
+	gsi_insert_before(&gsi, assign, GSI_SAME_STMT);
+	SSA_NAME_DEF_STMT(lhs) = assign;
+
+	gsi_remove(&gsi, true);
+}
+
+// Get the field decl of a component ref for intentional_overflow checking
+static const_tree search_field_decl(const_tree comp_ref)
+{
+	const_tree field = NULL_TREE;
+	unsigned int i, len = TREE_OPERAND_LENGTH(comp_ref);
+
+	for (i = 0; i < len; i++) {
+		field = TREE_OPERAND(comp_ref, i);
+		if (TREE_CODE(field) == FIELD_DECL)
+			break;
+	}
+	gcc_assert(TREE_CODE(field) == FIELD_DECL);
+	return field;
+}
+
+/* Get the fndecl of an interesting stmt, the fndecl is the caller function if the
interesting
+ * stmt is a return otherwise it is the callee function.
+ */
+static const_tree get_interesting_orig_fndecl(const_gimple stmt, unsigned int argnum)
+{
+	const_tree fndecl;
+
+	if (argnum == 0)
+		fndecl = current_function_decl;
+	else
+		fndecl = gimple_call_fndecl(stmt);
+
+	if (fndecl == NULL_TREE)
+		return NULL_TREE;
+
+	return DECL_ORIGIN(fndecl);
+}
+
+/* Get the param of the intentional_overflow attribute.
+ *   * 0: MARK_NOT_INTENTIONAL
+ *   * 1..MAX_PARAM: MARK_YES
+ *   * -1: MARK_TURN_OFF
+ */
+static tree get_attribute_param(const_tree decl)
+{
+	const_tree attr;
+
+	if (decl == NULL_TREE)
+		return NULL_TREE;
+
+	attr = lookup_attribute("intentional_overflow", DECL_ATTRIBUTES(decl));
+	if (!attr || !TREE_VALUE(attr))
+		return NULL_TREE;
+
+	return TREE_VALUE(attr);
+}
+
+// MARK_TURN_OFF
+static bool is_turn_off_intentional_attr(const_tree decl)
+{
+	const_tree param_head;
+
+	param_head = get_attribute_param(decl);
+	if (param_head == NULL_TREE)
+		return false;
+
+	if (TREE_INT_CST_HIGH(TREE_VALUE(param_head)) == -1)
+		return true;
+	return false;
+}
+
+// MARK_NOT_INTENTIONAL
+static bool is_end_intentional_intentional_attr(const_tree decl, unsigned int argnum)
+{
+	const_tree param_head;
+
+	if (argnum == 0)
+		return false;
+
+	param_head = get_attribute_param(decl);
+	if (param_head == NULL_TREE)
+		return false;
+
+	if (!TREE_INT_CST_LOW(TREE_VALUE(param_head)))
+		return true;
+	return false;
+}
+
+// MARK_YES
+static bool is_yes_intentional_attr(const_tree decl, unsigned int argnum)
+{
+	tree param, param_head;
+
+	if (argnum == 0)
+		return false;
+
+	param_head = get_attribute_param(decl);
+	for (param = param_head; param; param = TREE_CHAIN(param))
+		if (argnum == TREE_INT_CST_LOW(TREE_VALUE(param)))
+			return true;
+	return false;
+}
+
+static const char *get_asm_string(const_gimple stmt)
+{
+	if (!stmt)
+		return NULL;
+	if (gimple_code(stmt) != GIMPLE_ASM)
+		return NULL;
+
+	return gimple_asm_string(stmt);
+}
+
+static bool is_size_overflow_intentional_asm_turn_off(const_gimple stmt)
+{
+	const char *str;
+
+	str = get_asm_string(stmt);
+	if (!str)
+		return false;
+	return !strncmp(str, TURN_OFF_ASM_STR, sizeof(TURN_OFF_ASM_STR) - 1);
+}
+
+static bool is_size_overflow_intentional_asm_yes(const_gimple stmt)
+{
+	const char *str;
+
+	str = get_asm_string(stmt);
+	if (!str)
+		return false;
+	return !strncmp(str, YES_ASM_STR, sizeof(YES_ASM_STR) - 1);
+}
+
+static bool is_size_overflow_asm(const_gimple stmt)
+{
+	const char *str;
+
+	str = get_asm_string(stmt);
+	if (!str)
+		return false;
+	return !strncmp(str, OK_ASM_STR, sizeof(OK_ASM_STR) - 1);
+}
+
+static void print_missing_intentional(enum mark callee_attr, enum mark caller_attr,
const_tree decl, unsigned int argnum)
+{
+	location_t loc;
+
+	if (caller_attr == MARK_NO || caller_attr == MARK_NOT_INTENTIONAL || caller_attr ==
MARK_TURN_OFF)
+		return;
+
+	if (callee_attr == MARK_NOT_INTENTIONAL || callee_attr == MARK_YES)
+		return;
+
+	loc = DECL_SOURCE_LOCATION(decl);
+	inform(loc, "The intentional_overflow attribute is missing from +%s+%u+", DECL_NAME_POINTER(decl),
argnum);
+}
+
+/* Get the type of the intentional_overflow attribute of a node
+ *  * MARK_TURN_OFF
+ *  * MARK_YES
+ *  * MARK_NO
+ *  * MARK_NOT_INTENTIONAL
+ */
+static enum mark get_intentional_attr_type(const_tree node)
+{
+	const_tree cur_decl;
+
+	if (node == NULL_TREE)
+		return MARK_NO;
+
+	switch (TREE_CODE(node)) {
+	case COMPONENT_REF:
+		cur_decl = search_field_decl(node);
+		if (is_turn_off_intentional_attr(cur_decl))
+			return MARK_TURN_OFF;
+		if (is_end_intentional_intentional_attr(cur_decl, 1))
+			return MARK_YES;
+		break;
+	case PARM_DECL: {
+		unsigned int argnum;
+
+		cur_decl = DECL_ORIGIN(current_function_decl);
+		argnum = find_arg_number_tree(node, cur_decl);
+		if (argnum == CANNOT_FIND_ARG)
+			return MARK_NO;
+		if (is_yes_intentional_attr(cur_decl, argnum))
+			return MARK_YES;
+		if (is_end_intentional_intentional_attr(cur_decl, argnum))
+			return MARK_NOT_INTENTIONAL;
+		break;
+	}
+	case FUNCTION_DECL:
+		if (is_turn_off_intentional_attr(DECL_ORIGIN(node)))
+			return MARK_TURN_OFF;
+		break;
+	default:
+		break;
+	}
+	return MARK_NO;
+}
+
+// Search for the intentional_overflow attribute on the last nodes
+static enum mark search_last_nodes_intentional(struct interesting_node *cur_node)
+{
+	unsigned int i;
+	tree last_node;
+	enum mark mark = MARK_NO;
+
+#if BUILDING_GCC_VERSION <= 4007
+	FOR_EACH_VEC_ELT(tree, cur_node->last_nodes, i, last_node) {
+#else
+	FOR_EACH_VEC_ELT(*cur_node->last_nodes, i, last_node) {
+#endif
+		mark = get_intentional_attr_type(last_node);
+		if (mark != MARK_NO)
+			break;
+	}
+	return mark;
+}
+
+/* Check the intentional kind of size_overflow asm stmt (created by the gimple pass)
and
+ * set the appropriate intentional_overflow type. Delete the asm stmt in the end.
+ */
+static bool is_intentional_attribute_from_gimple(struct interesting_node *cur_node)
+{
+	if (!cur_node->intentional_mark_from_gimple)
+		return false;
+
+	if (is_size_overflow_intentional_asm_yes(cur_node->intentional_mark_from_gimple))
+		cur_node->intentional_attr_cur_fndecl = MARK_YES;
+	else
+		cur_node->intentional_attr_cur_fndecl = MARK_TURN_OFF;
+
+	// skip param decls
+	if (gimple_asm_noutputs(cur_node->intentional_mark_from_gimple) == 0)
+		return true;
+	return true;
+}
+
+/* Search intentional_overflow attribute on caller and on callee too.
+ * 0</MARK_YES: no dup, search size_overflow and intentional_overflow attributes
+ * 0/MARK_NOT_INTENTIONAL: no dup, search size_overflow attribute (int)
+ * -1/MARK_TURN_OFF: no dup, no search, current_function_decl -> no dup
+*/
+static void check_intentional_attribute_ipa(struct interesting_node *cur_node)
+{
+	const_tree fndecl;
+
+	if (is_intentional_attribute_from_gimple(cur_node))
+		return;
+
+	if (is_turn_off_intentional_attr(DECL_ORIGIN(current_function_decl))) {
+		cur_node->intentional_attr_cur_fndecl = MARK_TURN_OFF;
+		return;
+	}
+
+	if (gimple_code(cur_node->first_stmt) == GIMPLE_ASM) {
+		cur_node->intentional_attr_cur_fndecl = MARK_NOT_INTENTIONAL;
+		return;
+	}
+
+	if (gimple_code(cur_node->first_stmt) == GIMPLE_ASSIGN)
+		return;
+
+	fndecl = get_interesting_orig_fndecl(cur_node->first_stmt, cur_node->num);
+	if (is_turn_off_intentional_attr(fndecl)) {
+		cur_node->intentional_attr_decl = MARK_TURN_OFF;
+		return;
+	}
+
+	if (is_end_intentional_intentional_attr(fndecl, cur_node->num))
+		cur_node->intentional_attr_decl = MARK_NOT_INTENTIONAL;
+	else if (is_yes_intentional_attr(fndecl, cur_node->num))
+		cur_node->intentional_attr_decl = MARK_YES;
+
+	cur_node->intentional_attr_cur_fndecl = search_last_nodes_intentional(cur_node);
+	print_missing_intentional(cur_node->intentional_attr_decl, cur_node->intentional_attr_cur_fndecl,
cur_node->fndecl, cur_node->num);
+}
+
+// e.g., 3.8.2, 64, arch/x86/ia32/ia32_signal.c copy_siginfo_from_user32(): compat_ptr()
u32 max
+static bool skip_asm(const_tree arg)
+{
+	gimple def_stmt = get_def_stmt(arg);
+
+	if (!def_stmt || !gimple_assign_cast_p(def_stmt))
+		return false;
+
+	def_stmt = get_def_stmt(gimple_assign_rhs1(def_stmt));
+	return def_stmt && gimple_code(def_stmt) == GIMPLE_ASM;
+}
+
+static void walk_use_def_phi(struct pointer_set_t *visited, struct interesting_node
*cur_node, tree result)
+{
+	gimple phi = get_def_stmt(result);
+	unsigned int i, n = gimple_phi_num_args(phi);
+
+	pointer_set_insert(visited, phi);
+	for (i = 0; i < n; i++) {
+		tree arg = gimple_phi_arg_def(phi, i);
+
+		walk_use_def(visited, cur_node, arg);
+	}
+}
+
+static void walk_use_def_binary(struct pointer_set_t *visited, struct interesting_node
*cur_node, tree lhs)
+{
+	gimple def_stmt = get_def_stmt(lhs);
+	tree rhs1, rhs2;
+
+	rhs1 = gimple_assign_rhs1(def_stmt);
+	rhs2 = gimple_assign_rhs2(def_stmt);
+
+	walk_use_def(visited, cur_node, rhs1);
+	walk_use_def(visited, cur_node, rhs2);
+}
+
+static void insert_last_node(struct interesting_node *cur_node, tree node)
+{
+	unsigned int i;
+	tree element;
+	enum tree_code code;
+
+	gcc_assert(node != NULL_TREE);
+
+	if (is_gimple_constant(node))
+		return;
+
+	code = TREE_CODE(node);
+	if (code == VAR_DECL) {
+		node = DECL_ORIGIN(node);
+		code = TREE_CODE(node);
+	}
+
+	if (code != PARM_DECL && code != FUNCTION_DECL && code != COMPONENT_REF)
+		return;
+
+#if BUILDING_GCC_VERSION <= 4007
+	FOR_EACH_VEC_ELT(tree, cur_node->last_nodes, i, element) {
+#else
+	FOR_EACH_VEC_ELT(*cur_node->last_nodes, i, element) {
+#endif
+		if (operand_equal_p(node, element, 0))
+			return;
+	}
+
+#if BUILDING_GCC_VERSION <= 4007
+	gcc_assert(VEC_length(tree, cur_node->last_nodes) < VEC_LEN);
+	VEC_safe_push(tree, gc, cur_node->last_nodes, node);
+#else
+	gcc_assert(cur_node->last_nodes->length() < VEC_LEN);
+	vec_safe_push(cur_node->last_nodes, node);
+#endif
+}
+
+// a size_overflow asm stmt in the control flow doesn't stop the recursion
+static void handle_asm_stmt(struct pointer_set_t *visited, struct interesting_node
*cur_node, tree lhs, const_gimple stmt)
+{
+	if (!is_size_overflow_asm(stmt))
+		walk_use_def(visited, cur_node, SSA_NAME_VAR(lhs));
+}
+
+/* collect the parm_decls and fndecls (for checking a missing size_overflow attribute
(ret or arg) or intentional_overflow)
+ * and component refs (for checking the intentional_overflow attribute).
+ */
+static void walk_use_def(struct pointer_set_t *visited, struct interesting_node *cur_node,
tree lhs)
+{
+	const_gimple def_stmt;
+
+	if (TREE_CODE(lhs) != SSA_NAME) {
+		insert_last_node(cur_node, lhs);
+		return;
+	}
+
+	def_stmt = get_def_stmt(lhs);
+	if (!def_stmt)
+		return;
+
+	if (pointer_set_insert(visited, def_stmt))
+		return;
+
+	switch (gimple_code(def_stmt)) {
+	case GIMPLE_NOP:
+		return walk_use_def(visited, cur_node, SSA_NAME_VAR(lhs));
+	case GIMPLE_ASM:
+		return handle_asm_stmt(visited, cur_node, lhs, def_stmt);
+	case GIMPLE_CALL: {
+		tree fndecl = gimple_call_fndecl(def_stmt);
+
+		if (fndecl == NULL_TREE)
+			return;
+		insert_last_node(cur_node, fndecl);
+		return;
+	}
+	case GIMPLE_PHI:
+		return walk_use_def_phi(visited, cur_node, lhs);
+	case GIMPLE_ASSIGN:
+		switch (gimple_num_ops(def_stmt)) {
+		case 2:
+			return walk_use_def(visited, cur_node, gimple_assign_rhs1(def_stmt));
+		case 3:
+			return walk_use_def_binary(visited, cur_node, lhs);
+		}
+	default:
+		debug_gimple_stmt((gimple)def_stmt);
+		error("%s: unknown gimple code", __func__);
+		gcc_unreachable();
+	}
+}
+
+// Collect all the last nodes for checking the intentional_overflow and size_overflow
attributes
+static void set_last_nodes(struct interesting_node *cur_node)
+{
+	struct pointer_set_t *visited;
+
+	visited = pointer_set_create();
+	walk_use_def(visited, cur_node, cur_node->node);
+	pointer_set_destroy(visited);
+}
+
+enum precond {
+	NO_ATTRIBUTE_SEARCH, NO_CHECK_INSERT, NONE
+};
+
+/* If there is a mark_turn_off intentional attribute on the caller or the callee then
there is no duplication and missing size_overflow attribute check anywhere.
+ * There is only missing size_overflow attribute checking if the intentional_overflow
attribute is the mark_no type.
+ * Stmt duplication is unnecessary if there are no binary/ternary assignements or if
the unary assignment isn't a cast.
+ * It skips the possible error codes too. If the def_stmts trace back to a constant
and there are no binary/ternary assigments then we assume that it is some kind of error
code.
+ */
+static enum precond check_preconditions(struct interesting_node *cur_node)
+{
+	bool interesting_conditions[3] = {false, false, false};
+
+	set_last_nodes(cur_node);
+
+	check_intentional_attribute_ipa(cur_node);
+	if (cur_node->intentional_attr_decl == MARK_TURN_OFF || cur_node->intentional_attr_cur_fndecl
== MARK_TURN_OFF)
+		return NO_ATTRIBUTE_SEARCH;
+
+	search_interesting_conditions(cur_node, interesting_conditions);
+
+	// error code
+	if (interesting_conditions[CAST] && interesting_conditions[FROM_CONST] && !interesting_conditions[NOT_UNARY])
+		return NO_ATTRIBUTE_SEARCH;
+
+	// unnecessary overflow check
+	if (!interesting_conditions[CAST] && !interesting_conditions[NOT_UNARY])
+		return NO_CHECK_INSERT;
+
+	if (cur_node->intentional_attr_cur_fndecl != MARK_NO)
+		return NO_CHECK_INSERT;
+
+	return NONE;
+}
+
+/* This function calls the main recursion function (expand) that duplicates the stmts.
Before that it checks the intentional_overflow attribute and asm stmts,
+ * it decides whether the duplication is necessary or not and it searches for missing
size_overflow attributes. After expand() it changes the orig node to the duplicated
node
+ * in the original stmt (first stmt) and it inserts the overflow check for the arg
of the callee or for the return value.
+ */
+static struct next_cgraph_node *handle_interesting_stmt(struct next_cgraph_node *cnodes,
struct interesting_node *cur_node, struct cgraph_node *caller_node)
+{
+	enum precond ret;
+	struct pointer_set_t *visited;
+	tree new_node, orig_node = cur_node->node;
+
+	ret = check_preconditions(cur_node);
+	if (ret == NO_ATTRIBUTE_SEARCH)
+		return cnodes;
+
+	cnodes = search_overflow_attribute(cnodes, cur_node);
+
+	if (ret == NO_CHECK_INSERT)
+		return cnodes;
+
+	visited = pointer_set_create();
+	new_node = expand(visited, caller_node, orig_node);
+	pointer_set_destroy(visited);
+
+	if (new_node == NULL_TREE)
+		return cnodes;
+
+	change_orig_node(cur_node, new_node);
+	check_size_overflow(caller_node, cur_node->first_stmt, TREE_TYPE(new_node), new_node,
orig_node, BEFORE_STMT);
+
+	return cnodes;
+}
+
+// Check visited interesting nodes.
+static bool is_in_interesting_node(struct interesting_node *head, const_gimple first_stmt,
const_tree node, unsigned int num)
+{
+	struct interesting_node *cur;
+
+	for (cur = head; cur; cur = cur->next) {
+		if (!operand_equal_p(node, cur->node, 0))
+			continue;
+		if (num != cur->num)
+			continue;
+		if (first_stmt == cur->first_stmt)
+			return true;
+	}
+	return false;
+}
+
+/* Create an interesting node. The ipa pass starts to duplicate from these stmts.
+   first_stmt: it is the call or assignment or ret stmt, change_orig_node() will change
the original node (retval, or function arg) in this
+   last_nodes: they are the last stmts in the recursion (they haven't a def_stmt).
They are useful in the missing size_overflow attribute check and
+               the intentional_overflow attribute check. They are collected by set_last_nodes().
+   num: arg count of a call stmt or 0 when it is a ret
+   node: the recursion starts from here, it is a call arg or a return value
+   fndecl: the fndecl of the interesting node when the node is an arg. it is the fndecl
of the callee function otherwise it is the fndecl of the caller (current_function_fndecl)
function.
+   intentional_attr_decl: intentional_overflow attribute of the callee function
+   intentional_attr_cur_fndecl: intentional_overflow attribute of the caller function
+   intentional_mark_from_gimple: the intentional overflow type of size_overflow asm
stmt from gimple if it exists
+ */
+static struct interesting_node *create_new_interesting_node(struct interesting_node
*head, gimple first_stmt, tree node, unsigned int num, gimple asm_stmt)
+{
+	struct interesting_node *new_node;
+	tree fndecl;
+	enum gimple_code code;
+
+	gcc_assert(node != NULL_TREE);
+	code = gimple_code(first_stmt);
+	gcc_assert(code == GIMPLE_CALL || code == GIMPLE_ASM || code == GIMPLE_ASSIGN || code
== GIMPLE_RETURN);
+
+	if (num == CANNOT_FIND_ARG)
+		return head;
+
+	if (skip_types(node))
+		return head;
+
+	if (skip_asm(node))
+		return head;
+
+	if (is_gimple_call(first_stmt))
+		fndecl = gimple_call_fndecl(first_stmt);
+	else
+		fndecl = current_function_decl;
+
+	if (fndecl == NULL_TREE)
+		return head;
+
+	if (is_in_interesting_node(head, first_stmt, node, num))
+		return head;
+
+	new_node = (struct interesting_node *)xmalloc(sizeof(*new_node));
+
+	new_node->next = NULL;
+	new_node->first_stmt = first_stmt;
+#if BUILDING_GCC_VERSION <= 4007
+	new_node->last_nodes = VEC_alloc(tree, gc, VEC_LEN);
+#else
+	vec_alloc(new_node->last_nodes, VEC_LEN);
+#endif
+	new_node->num = num;
+	new_node->node = node;
+	new_node->fndecl = fndecl;
+	new_node->intentional_attr_decl = MARK_NO;
+	new_node->intentional_attr_cur_fndecl = MARK_NO;
+	new_node->intentional_mark_from_gimple = asm_stmt;
+
+	if (!head)
+		return new_node;
+
+	new_node->next = head;
+	return new_node;
+}
+
+/* Check the ret stmts in the functions on the next cgraph node list (these functions
will be in the hash table and they are reachable from ipa).
+ * If the ret stmt is in the next cgraph node list then it's an interesting ret.
+ */
+static struct interesting_node *handle_stmt_by_cgraph_nodes_ret(struct interesting_node
*head, gimple stmt, struct next_cgraph_node *next_node)
+{
+	struct next_cgraph_node *cur_node;
+	tree ret = gimple_return_retval(stmt);
+
+	if (ret == NULL_TREE)
+		return head;
+
+	for (cur_node = next_node; cur_node; cur_node = cur_node->next) {
+		if (!operand_equal_p(cur_node->callee_fndecl, DECL_ORIGIN(current_function_decl),
0))
+			continue;
+		if (cur_node->num == 0)
+			head = create_new_interesting_node(head, stmt, ret, 0, NOT_INTENTIONAL_ASM);
+	}
+
+	return head;
+}
+
+/* Check the call stmts in the functions on the next cgraph node list (these functions
will be in the hash table and they are reachable from ipa).
+ * If the call stmt is in the next cgraph node list then it's an interesting call.
+ */
+static struct interesting_node *handle_stmt_by_cgraph_nodes_call(struct interesting_node
*head, gimple stmt, struct next_cgraph_node *next_node)
+{
+	unsigned int argnum;
+	tree arg;
+	const_tree fndecl;
+	struct next_cgraph_node *cur_node;
+
+	fndecl = gimple_call_fndecl(stmt);
+	if (fndecl == NULL_TREE)
+		return head;
+
+	for (cur_node = next_node; cur_node; cur_node = cur_node->next) {
+		if (!operand_equal_p(cur_node->callee_fndecl, fndecl, 0))
+			continue;
+		argnum = get_correct_arg_count(cur_node->num, fndecl);
+		gcc_assert(argnum != CANNOT_FIND_ARG);
+		if (argnum == 0)
+			continue;
+
+		arg = gimple_call_arg(stmt, argnum - 1);
+		head = create_new_interesting_node(head, stmt, arg, argnum, NOT_INTENTIONAL_ASM);
+	}
+
+	return head;
+}
+
+static unsigned int check_ops(const_tree orig_node, const_tree node, unsigned int ret_count)
+{
+	if (!operand_equal_p(orig_node, node, 0))
+		return WRONG_NODE;
+	if (skip_types(node))
+		return WRONG_NODE;
+	return ret_count;
+}
+
+// Get the index of the rhs node in an assignment
+static unsigned int get_assign_ops_count(const_gimple stmt, tree node)
+{
+	const_tree rhs1, rhs2;
+	unsigned int ret;
+
+	gcc_assert(stmt);
+	gcc_assert(is_gimple_assign(stmt));
+
+	rhs1 = gimple_assign_rhs1(stmt);
+	gcc_assert(rhs1 != NULL_TREE);
+
+	switch (gimple_num_ops(stmt)) {
+	case 2:
+		return check_ops(node, rhs1, 1);
+	case 3:
+		ret = check_ops(node, rhs1, 1);
+		if (ret != WRONG_NODE)
+			return ret;
+
+		rhs2 = gimple_assign_rhs2(stmt);
+		gcc_assert(rhs2 != NULL_TREE);
+		return check_ops(node, rhs2, 2);
+	default:
+		gcc_unreachable();
+	}
+}
+
+// Find the correct arg number of a call stmt. It is needed when the interesting function
is a cloned function.
+static unsigned int find_arg_number_gimple(const_tree arg, const_gimple stmt)
+{
+	unsigned int i;
+
+	if (gimple_call_fndecl(stmt) == NULL_TREE)
+		return CANNOT_FIND_ARG;
+
+	for (i = 0; i < gimple_call_num_args(stmt); i++) {
+		tree node;
+
+		node = gimple_call_arg(stmt, i);
+		if (!operand_equal_p(arg, node, 0))
+			continue;
+		if (!skip_types(node))
+			return i + 1;
+	}
+
+	return CANNOT_FIND_ARG;
+}
+
+/* starting from the size_overflow asm stmt collect interesting stmts. They can be
+ * any of return, call or assignment stmts (because of inlining).
+ */
+static struct interesting_node *get_interesting_ret_or_call(struct pointer_set_t *visited,
struct interesting_node *head, tree node, gimple intentional_asm)
+{
+	use_operand_p use_p;
+	imm_use_iterator imm_iter;
+	unsigned int argnum;
+
+	gcc_assert(TREE_CODE(node) == SSA_NAME);
+
+	if (pointer_set_insert(visited, node))
+		return head;
+
+	FOR_EACH_IMM_USE_FAST(use_p, imm_iter, node) {
+		gimple stmt = USE_STMT(use_p);
+
+		if (stmt == NULL)
+			return head;
+		if (is_gimple_debug(stmt))
+			continue;
+
+		switch (gimple_code(stmt)) {
+		case GIMPLE_CALL:
+			argnum = find_arg_number_gimple(node, stmt);
+			head = create_new_interesting_node(head, stmt, node, argnum, intentional_asm);
+			break;
+		case GIMPLE_RETURN:
+			head = create_new_interesting_node(head, stmt, node, 0, intentional_asm);
+			break;
+		case GIMPLE_ASSIGN:
+			argnum = get_assign_ops_count(stmt, node);
+			head = create_new_interesting_node(head, stmt, node, argnum, intentional_asm);
+			break;
+		case GIMPLE_PHI: {
+			tree result = gimple_phi_result(stmt);
+			head = get_interesting_ret_or_call(visited, head, result, intentional_asm);
+			break;
+		}
+		case GIMPLE_ASM:
+			if (gimple_asm_noutputs(stmt) != 0)
+				break;
+			if (!is_size_overflow_asm(stmt))
+				break;
+			head = create_new_interesting_node(head, stmt, node, 1, intentional_asm);
+			break;
+		case GIMPLE_COND:
+		case GIMPLE_SWITCH:
+			break;
+		default:
+			debug_gimple_stmt(stmt);
+			gcc_unreachable();
+			break;
+		}
+	}
+	return head;
+}
+
+static void remove_size_overflow_asm(gimple stmt)
+{
+	gimple_stmt_iterator gsi;
+	tree input, output;
+
+	if (!is_size_overflow_asm(stmt))
+		return;
+
+	if (gimple_asm_noutputs(stmt) == 0) {
+		gsi = gsi_for_stmt(stmt);
+		ipa_remove_stmt_references(cgraph_get_create_node(current_function_decl), stmt);
+		gsi_remove(&gsi, true);
+		return;
+	}
+
+	input = gimple_asm_input_op(stmt, 0);
+	output = gimple_asm_output_op(stmt, 0);
+	replace_size_overflow_asm_with_assign(stmt, TREE_VALUE(output), TREE_VALUE(input));
+}
+
+/* handle the size_overflow asm stmts from the gimple pass and collect the interesting
stmts.
+ * If the asm stmt is a parm_decl kind (noutputs == 0) then remove it.
+ * If it is a simple asm stmt then replace it with an assignment from the asm input
to the asm output.
+ */
+static struct interesting_node *handle_stmt_by_size_overflow_asm(gimple stmt, struct
interesting_node *head)
+{
+	const_tree output;
+	struct pointer_set_t *visited;
+	gimple intentional_asm = NOT_INTENTIONAL_ASM;
+
+	if (!is_size_overflow_asm(stmt))
+		return head;
+
+	if (is_size_overflow_intentional_asm_yes(stmt) || is_size_overflow_intentional_asm_turn_off(stmt))
+		intentional_asm = stmt;
+
+	gcc_assert(gimple_asm_ninputs(stmt) == 1);
+
+	if (gimple_asm_noutputs(stmt) == 0 && is_size_overflow_intentional_asm_turn_off(stmt))
+		return head;
+
+	if (gimple_asm_noutputs(stmt) == 0) {
+		const_tree input;
+
+		if (!is_size_overflow_intentional_asm_turn_off(stmt))
+			return head;
+
+		input = gimple_asm_input_op(stmt, 0);
+		remove_size_overflow_asm(stmt);
+		if (is_gimple_constant(TREE_VALUE(input)))
+			return head;
+		visited = pointer_set_create();
+		head = get_interesting_ret_or_call(visited, head, TREE_VALUE(input), intentional_asm);
+		pointer_set_destroy(visited);
+		return head;
+	}
+
+	if (!is_size_overflow_intentional_asm_yes(stmt) && !is_size_overflow_intentional_asm_turn_off(stmt))
+		remove_size_overflow_asm(stmt);
+
+	visited = pointer_set_create();
+	output = gimple_asm_output_op(stmt, 0);
+	head = get_interesting_ret_or_call(visited, head, TREE_VALUE(output), intentional_asm);
+	pointer_set_destroy(visited);
+	return head;
+}
+
+/* Iterate over all the stmts of a function and look for the size_overflow asm stmts
(they were created in the gimple pass)
+ * or a call stmt or a return stmt and store them in the interesting_node list
+ */
+static struct interesting_node *collect_interesting_stmts(struct next_cgraph_node *next_node)
+{
+	basic_block bb;
+	struct interesting_node *head = NULL;
+
+	FOR_ALL_BB_FN(bb, cfun) {
+		gimple_stmt_iterator gsi;
+
+		for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
+			enum gimple_code code;
+			gimple stmt = gsi_stmt(gsi);
+
+			code = gimple_code(stmt);
+
+			if (code == GIMPLE_ASM)
+				head = handle_stmt_by_size_overflow_asm(stmt, head);
+
+			if (!next_node)
+				continue;
+			if (code == GIMPLE_CALL)
+				head = handle_stmt_by_cgraph_nodes_call(head, stmt, next_node);
+			if (code == GIMPLE_RETURN)
+				head = handle_stmt_by_cgraph_nodes_ret(head, stmt, next_node);
+		}
+	}
+	return head;
+}
+
+static void set_current_function_decl(tree fndecl)
+{
+	gcc_assert(fndecl != NULL_TREE);
+
+	push_cfun(DECL_STRUCT_FUNCTION(fndecl));
+	calculate_dominance_info(CDI_DOMINATORS);
+	current_function_decl = fndecl;
+}
+
+static void unset_current_function_decl(void)
+{
+	free_dominance_info(CDI_DOMINATORS);
+	pop_cfun();
+	current_function_decl = NULL_TREE;
+}
+
+static void free_interesting_node(struct interesting_node *head)
+{
+	struct interesting_node *cur;
+
+	while (head) {
+		cur = head->next;
+#if BUILDING_GCC_VERSION <= 4007
+		VEC_free(tree, gc, head->last_nodes);
+#else
+		vec_free(head->last_nodes);
+#endif
+		free(head);
+		head = cur;
+	}
+}
+
+static struct visited *insert_visited_function(struct visited *head, struct interesting_node
*cur_node)
+{
+	struct visited *new_visited;
+
+	new_visited = (struct visited *)xmalloc(sizeof(*new_visited));
+	new_visited->fndecl = cur_node->fndecl;
+	new_visited->num = cur_node->num;
+	new_visited->next = NULL;
+
+	if (!head)
+		return new_visited;
+
+	new_visited->next = head;
+	return new_visited;
+}
+
+/* Check whether the function was already visited. If the fndecl, the arg count of
the fndecl and the first_stmt (call or return) are same then
+ * it is a visited function.
+ */
+static bool is_visited_function(struct visited *head, struct interesting_node *cur_node)
+{
+	struct visited *cur;
+
+	if (!head)
+		return false;
+
+	if (get_stmt_flag(cur_node->first_stmt) != VISITED_STMT)
+		return false;
+
+	for (cur = head; cur; cur = cur->next) {
+		if (!operand_equal_p(cur_node->fndecl, cur->fndecl, 0))
+			continue;
+		if (cur_node->num == cur->num)
+			return true;
+	}
+	return false;
+}
+
+static void free_next_cgraph_node(struct next_cgraph_node *head)
+{
+	struct next_cgraph_node *cur;
+
+	while (head) {
+		cur = head->next;
+		free(head);
+		head = cur;
+	}
+}
+
+static void remove_all_size_overflow_asm(void)
+{
+	basic_block bb;
+
+	FOR_ALL_BB_FN(bb, cfun) {
+		gimple_stmt_iterator si;
+
+		for (si = gsi_start_bb(bb); !gsi_end_p(si); gsi_next(&si))
+			remove_size_overflow_asm(gsi_stmt(si));
+	}
+}
+
+/* Main recursive walk of the ipa pass: iterate over the collected interesting stmts
in a function
+ * (they are interesting if they have an associated size_overflow asm stmt) and recursively
walk
+ * the newly collected interesting functions (they are interesting if there is control
flow between
+ * the interesting stmts and them).
+ */
+static struct visited *handle_function(struct cgraph_node *node, struct next_cgraph_node
*next_node, struct visited *visited)
+{
+	struct interesting_node *head, *cur_node;
+	struct next_cgraph_node *cur_cnodes, *cnodes_head = NULL;
+
+	set_current_function_decl(NODE_DECL(node));
+	call_count = 0;
+
+	head = collect_interesting_stmts(next_node);
+
+	for (cur_node = head; cur_node; cur_node = cur_node->next) {
+		if (is_visited_function(visited, cur_node))
+			continue;
+		cnodes_head = handle_interesting_stmt(cnodes_head, cur_node, node);
+		set_stmt_flag(cur_node->first_stmt, VISITED_STMT);
+		visited = insert_visited_function(visited, cur_node);
+	}
+
+	free_interesting_node(head);
+	remove_all_size_overflow_asm();
+	unset_current_function_decl();
+
+	for (cur_cnodes = cnodes_head; cur_cnodes; cur_cnodes = cur_cnodes->next)
+		visited = handle_function(cur_cnodes->current_function, cur_cnodes, visited);
+
+	free_next_cgraph_node(cnodes_head);
+	return visited;
+}
+
+static void free_visited(struct visited *head)
+{
+	struct visited *cur;
+
+	while (head) {
+		cur = head->next;
+		free(head);
+		head = cur;
+	}
+}
+
+// erase the local flag
+static void set_plf_false(void)
+{
+	basic_block bb;
+
+	FOR_ALL_BB_FN(bb, cfun) {
+		gimple_stmt_iterator si;
+
+		for (si = gsi_start_bb(bb); !gsi_end_p(si); gsi_next(&si))
+			set_stmt_flag(gsi_stmt(si), NO_FLAGS);
+		for (si = gsi_start_phis(bb); !gsi_end_p(si); gsi_next(&si))
+			set_stmt_flag(gsi_stmt(si), NO_FLAGS);
+	}
+}
+
+// Main entry point of the ipa pass: erases the plf flag of all stmts and iterates
over all the functions
+static unsigned int search_function(void)
+{
+	struct cgraph_node *node;
+	struct visited *visited = NULL;
+
+	FOR_EACH_FUNCTION_WITH_GIMPLE_BODY(node) {
+		set_current_function_decl(NODE_DECL(node));
+		set_plf_false();
+		unset_current_function_decl();
+	}
+
+	FOR_EACH_FUNCTION_WITH_GIMPLE_BODY(node) {
+		gcc_assert(cgraph_function_flags_ready);
+#if BUILDING_GCC_VERSION <= 4007
+		gcc_assert(node->reachable);
+#endif
+
+		visited = handle_function(node, NULL, visited);
+	}
+
+	free_visited(visited);
+	return 0;
+}
+
+#if BUILDING_GCC_VERSION >= 4009
+static const struct pass_data ipa_pass_data = {
+#else
+static struct ipa_opt_pass_d ipa_pass = {
+	.pass = {
+#endif
+		.type			= SIMPLE_IPA_PASS,
+		.name			= "size_overflow",
+#if BUILDING_GCC_VERSION >= 4008
+		.optinfo_flags		= OPTGROUP_NONE,
+#endif
+#if BUILDING_GCC_VERSION >= 4009
+		.has_gate		= false,
+		.has_execute		= true,
+#else
+		.gate			= NULL,
+		.execute		= search_function,
+		.sub			= NULL,
+		.next			= NULL,
+		.static_pass_number	= 0,
+#endif
+		.tv_id			= TV_NONE,
+		.properties_required	= 0,
+		.properties_provided	= 0,
+		.properties_destroyed	= 0,
+		.todo_flags_start	= 0,
+		.todo_flags_finish	= TODO_verify_ssa | TODO_verify_stmts | TODO_remove_unused_locals
| TODO_ggc_collect | TODO_verify_flow | TODO_dump_cgraph | TODO_dump_func | TODO_update_ssa_no_phi,
+#if BUILDING_GCC_VERSION < 4009
+	},
+	.generate_summary		= NULL,
+	.write_summary			= NULL,
+	.read_summary			= NULL,
+#if BUILDING_GCC_VERSION >= 4006
+	.write_optimization_summary	= NULL,
+	.read_optimization_summary	= NULL,
+#endif
+	.stmt_fixup			= NULL,
+	.function_transform_todo_flags_start		= 0,
+	.function_transform		= NULL,
+	.variable_transform		= NULL,
+#endif
+};
+
+#if BUILDING_GCC_VERSION >= 4009
+namespace {
+class ipa_pass : public ipa_opt_pass_d {
+public:
+	ipa_pass() : ipa_opt_pass_d(ipa_pass_data, g, NULL, NULL, NULL, NULL, NULL, NULL,
0, NULL, NULL) {}
+	unsigned int execute() { return search_function(); }
+};
+}
+#endif
+
+static struct opt_pass *make_ipa_pass(void)
+{
+#if BUILDING_GCC_VERSION >= 4009
+	return new ipa_pass();
+#else
+	return &ipa_pass.pass;
+#endif
+}
+
+// data for the size_overflow asm stmt
+struct asm_data {
+	gimple def_stmt;
+	tree input;
+	tree output;
+};
+
+#if BUILDING_GCC_VERSION <= 4007
+static VEC(tree, gc) *create_asm_io_list(tree string, tree io)
+#else
+static vec<tree, va_gc> *create_asm_io_list(tree string, tree io)
+#endif
+{
+	tree list;
+#if BUILDING_GCC_VERSION <= 4007
+	VEC(tree, gc) *vec_list = NULL;
+#else
+	vec<tree, va_gc> *vec_list = NULL;
+#endif
+
+	list = build_tree_list(NULL_TREE, string);
+	list = chainon(NULL_TREE, build_tree_list(list, io));
+#if BUILDING_GCC_VERSION <= 4007
+	VEC_safe_push(tree, gc, vec_list, list);
+#else
+	vec_safe_push(vec_list, list);
+#endif
+	return vec_list;
+}
+
+static void create_asm_stmt(const char *str, tree str_input, tree str_output, struct
asm_data *asm_data)
+{
+	gimple asm_stmt;
+	gimple_stmt_iterator gsi;
+#if BUILDING_GCC_VERSION <= 4007
+	VEC(tree, gc) *input, *output = NULL;
+#else
+	vec<tree, va_gc> *input, *output = NULL;
+#endif
+
+	input = create_asm_io_list(str_input, asm_data->input);
+
+	if (asm_data->output)
+		output = create_asm_io_list(str_output, asm_data->output);
+
+	asm_stmt = gimple_build_asm_vec(str, input, output, NULL, NULL);
+	gsi = gsi_for_stmt(asm_data->def_stmt);
+	gsi_insert_after(&gsi, asm_stmt, GSI_NEW_STMT);
+
+	if (asm_data->output)
+		SSA_NAME_DEF_STMT(asm_data->output) = asm_stmt;
+}
+
+static void replace_call_lhs(const struct asm_data *asm_data)
+{
+	gimple_set_lhs(asm_data->def_stmt, asm_data->input);
+	update_stmt(asm_data->def_stmt);
+	SSA_NAME_DEF_STMT(asm_data->input) = asm_data->def_stmt;
+}
+
+static enum mark search_intentional_phi(struct pointer_set_t *visited, const_tree result)
+{
+	enum mark cur_fndecl_attr;
+	gimple phi = get_def_stmt(result);
+	unsigned int i, n = gimple_phi_num_args(phi);
+
+	pointer_set_insert(visited, phi);
+	for (i = 0; i < n; i++) {
+		tree arg = gimple_phi_arg_def(phi, i);
+
+		cur_fndecl_attr = search_intentional(visited, arg);
+		if (cur_fndecl_attr != MARK_NO)
+			return cur_fndecl_attr;
+	}
+	return MARK_NO;
+}
+
+static enum mark search_intentional_binary(struct pointer_set_t *visited, const_tree
lhs)
+{
+	enum mark cur_fndecl_attr;
+	const_tree rhs1, rhs2;
+	gimple def_stmt = get_def_stmt(lhs);
+
+	rhs1 = gimple_assign_rhs1(def_stmt);
+	rhs2 = gimple_assign_rhs2(def_stmt);
+
+	cur_fndecl_attr = search_intentional(visited, rhs1);
+	if (cur_fndecl_attr != MARK_NO)
+		return cur_fndecl_attr;
+	return search_intentional(visited, rhs2);
+}
+
+// Look up the intentional_overflow attribute on the caller and the callee functions.
+static enum mark search_intentional(struct pointer_set_t *visited, const_tree lhs)
+{
+	const_gimple def_stmt;
+
+	if (TREE_CODE(lhs) != SSA_NAME)
+		return get_intentional_attr_type(lhs);
+
+	def_stmt = get_def_stmt(lhs);
+	if (!def_stmt)
+		return MARK_NO;
+
+	if (pointer_set_contains(visited, def_stmt))
+		return MARK_NO;
+
+	switch (gimple_code(def_stmt)) {
+	case GIMPLE_NOP:
+		return search_intentional(visited, SSA_NAME_VAR(lhs));
+	case GIMPLE_ASM:
+		if (is_size_overflow_intentional_asm_turn_off(def_stmt))
+			return MARK_TURN_OFF;
+		return MARK_NO;
+	case GIMPLE_CALL:
+		return MARK_NO;
+	case GIMPLE_PHI:
+		return search_intentional_phi(visited, lhs);
+	case GIMPLE_ASSIGN:
+		switch (gimple_num_ops(def_stmt)) {
+		case 2:
+			return search_intentional(visited, gimple_assign_rhs1(def_stmt));
+		case 3:
+			return search_intentional_binary(visited, lhs);
+		}
+	case GIMPLE_RETURN:
+		return MARK_NO;
+	default:
+		debug_gimple_stmt((gimple)def_stmt);
+		error("%s: unknown gimple code", __func__);
+		gcc_unreachable();
+	}
+}
+
+// Check the intentional_overflow attribute and create the asm comment string for the
size_overflow asm stmt.
+static enum mark check_intentional_attribute_gimple(const_tree arg, const_gimple stmt,
unsigned int argnum)
+{
+	const_tree fndecl;
+	struct pointer_set_t *visited;
+	enum mark cur_fndecl_attr, decl_attr = MARK_NO;
+
+	fndecl = get_interesting_orig_fndecl(stmt, argnum);
+	if (is_end_intentional_intentional_attr(fndecl, argnum))
+		decl_attr = MARK_NOT_INTENTIONAL;
+	else if (is_yes_intentional_attr(fndecl, argnum))
+		decl_attr = MARK_YES;
+	else if (is_turn_off_intentional_attr(fndecl) || is_turn_off_intentional_attr(DECL_ORIGIN(current_function_decl)))
{
+		return MARK_TURN_OFF;
+	}
+
+	visited = pointer_set_create();
+	cur_fndecl_attr = search_intentional(visited, arg);
+	pointer_set_destroy(visited);
+
+	switch (cur_fndecl_attr) {
+	case MARK_NO:
+	case MARK_TURN_OFF:
+		return cur_fndecl_attr;
+	default:
+		print_missing_intentional(decl_attr, cur_fndecl_attr, fndecl, argnum);
+		return MARK_YES;
+	}
+}
+
+static void check_missing_size_overflow_attribute(tree var)
+{
+	tree orig_fndecl;
+	unsigned int num;
+
+	if (is_a_return_check(var))
+		orig_fndecl = DECL_ORIGIN(var);
+	else
+		orig_fndecl = DECL_ORIGIN(current_function_decl);
+
+	num = get_function_num(var, orig_fndecl);
+	if (num == CANNOT_FIND_ARG)
+		return;
+
+	is_missing_function(orig_fndecl, num);
+}
+
+static void search_size_overflow_attribute_phi(struct pointer_set_t *visited, const_tree
result)
+{
+	gimple phi = get_def_stmt(result);
+	unsigned int i, n = gimple_phi_num_args(phi);
+
+	pointer_set_insert(visited, phi);
+	for (i = 0; i < n; i++) {
+		tree arg = gimple_phi_arg_def(phi, i);
+
+		search_size_overflow_attribute(visited, arg);
+	}
+}
+
+static void search_size_overflow_attribute_binary(struct pointer_set_t *visited, const_tree
lhs)
+{
+	const_gimple def_stmt = get_def_stmt(lhs);
+	tree rhs1, rhs2;
+
+	rhs1 = gimple_assign_rhs1(def_stmt);
+	rhs2 = gimple_assign_rhs2(def_stmt);
+
+	search_size_overflow_attribute(visited, rhs1);
+	search_size_overflow_attribute(visited, rhs2);
+}
+
+static void search_size_overflow_attribute(struct pointer_set_t *visited, tree lhs)
+{
+	const_gimple def_stmt;
+
+	if (TREE_CODE(lhs) == PARM_DECL) {
+		check_missing_size_overflow_attribute(lhs);
+		return;
+	}
+
+	def_stmt = get_def_stmt(lhs);
+	if (!def_stmt)
+		return;
+
+	if (pointer_set_insert(visited, def_stmt))
+		return;
+
+	switch (gimple_code(def_stmt)) {
+	case GIMPLE_NOP:
+		return search_size_overflow_attribute(visited, SSA_NAME_VAR(lhs));
+	case GIMPLE_ASM:
+		return;
+	case GIMPLE_CALL: {
+		tree fndecl = gimple_call_fndecl(def_stmt);
+
+		if (fndecl == NULL_TREE)
+			return;
+		check_missing_size_overflow_attribute(fndecl);
+		return;
+	}
+	case GIMPLE_PHI:
+		return search_size_overflow_attribute_phi(visited, lhs);
+	case GIMPLE_ASSIGN:
+		switch (gimple_num_ops(def_stmt)) {
+		case 2:
+			return search_size_overflow_attribute(visited, gimple_assign_rhs1(def_stmt));
+		case 3:
+			return search_size_overflow_attribute_binary(visited, lhs);
+		}
+	default:
+		debug_gimple_stmt((gimple)def_stmt);
+		error("%s: unknown gimple code", __func__);
+		gcc_unreachable();
+	}
+}
+
+// Search missing entries in the hash table (invoked from the gimple pass)
+static void search_missing_size_overflow_attribute_gimple(const_gimple stmt, unsigned
int num)
+{
+	tree fndecl = NULL_TREE;
+	tree lhs;
+	struct pointer_set_t *visited;
+
+	if (is_turn_off_intentional_attr(DECL_ORIGIN(current_function_decl)))
+		return;
+
+	if (num == 0) {
+		gcc_assert(gimple_code(stmt) == GIMPLE_RETURN);
+		lhs = gimple_return_retval(stmt);
+	} else {
+		gcc_assert(is_gimple_call(stmt));
+		lhs = gimple_call_arg(stmt, num - 1);
+		fndecl = gimple_call_fndecl(stmt);
+	}
+
+	if (fndecl != NULL_TREE && is_turn_off_intentional_attr(DECL_ORIGIN(fndecl)))
+		return;
+
+	visited = pointer_set_create();
+	search_size_overflow_attribute(visited, lhs);
+	pointer_set_destroy(visited);
+}
+
+static void create_output_from_phi(gimple stmt, unsigned int argnum, struct asm_data
*asm_data)
+{
+	gimple_stmt_iterator gsi;
+	gimple assign;
+
+	assign = gimple_build_assign(asm_data->input, asm_data->output);
+	gsi = gsi_for_stmt(stmt);
+	gsi_insert_before(&gsi, assign, GSI_NEW_STMT);
+	asm_data->def_stmt = assign;
+
+	asm_data->output = create_new_var(TREE_TYPE(asm_data->output));
+	asm_data->output = make_ssa_name(asm_data->output, stmt);
+	if (gimple_code(stmt) == GIMPLE_RETURN)
+		gimple_return_set_retval(stmt, asm_data->output);
+	else
+		gimple_call_set_arg(stmt, argnum - 1, asm_data->output);
+	update_stmt(stmt);
+}
+
+static char *create_asm_comment(unsigned int argnum, const_gimple stmt , const char
*mark_str)
+{
+	const char *fn_name;
+	char *asm_comment;
+	unsigned int len;
+
+	if (argnum == 0)
+		fn_name = DECL_NAME_POINTER(current_function_decl);
+	else
+		fn_name = DECL_NAME_POINTER(gimple_call_fndecl(stmt));
+
+	len = asprintf(&asm_comment, "%s %s %u", mark_str, fn_name, argnum);
+	gcc_assert(len > 0);
+
+	return asm_comment;
+}
+
+static const char *convert_mark_to_str(enum mark mark)
+{
+	switch (mark) {
+	case MARK_NO:
+		return OK_ASM_STR;
+	case MARK_YES:
+	case MARK_NOT_INTENTIONAL:
+		return YES_ASM_STR;
+	case MARK_TURN_OFF:
+		return TURN_OFF_ASM_STR;
+	}
+
+	gcc_unreachable();
+}
+
+/* Create the input of the size_overflow asm stmt.
+ * When the arg of the callee function is a parm_decl it creates this kind of size_overflow
asm stmt:
+ *   __asm__("# size_overflow MARK_YES" :  : "rm" size_1(D));
+ * The input field in asm_data will be empty if there is no need for further size_overflow
asm stmt insertion.
+ * otherwise create the input (for a phi stmt the output too) of the asm stmt.
+ */
+static void create_asm_input(gimple stmt, unsigned int argnum, struct asm_data *asm_data)
+{
+	if (!asm_data->def_stmt) {
+		asm_data->input = NULL_TREE;
+		return;
+	}
+
+	asm_data->input = create_new_var(TREE_TYPE(asm_data->output));
+	asm_data->input = make_ssa_name(asm_data->input, asm_data->def_stmt);
+
+	switch (gimple_code(asm_data->def_stmt)) {
+	case GIMPLE_ASSIGN:
+	case GIMPLE_CALL:
+		replace_call_lhs(asm_data);
+		break;
+	case GIMPLE_PHI:
+		create_output_from_phi(stmt, argnum, asm_data);
+		break;
+	case GIMPLE_NOP: {
+		enum mark mark;
+		const char *mark_str;
+		char *asm_comment;
+
+		mark = check_intentional_attribute_gimple(asm_data->output, stmt, argnum);
+
+		asm_data->input = asm_data->output;
+		asm_data->output = NULL;
+		asm_data->def_stmt = stmt;
+
+		mark_str = convert_mark_to_str(mark);
+		asm_comment = create_asm_comment(argnum, stmt, mark_str);
+
+		create_asm_stmt(asm_comment, build_string(2, "rm"), NULL, asm_data);
+		free(asm_comment);
+		asm_data->input = NULL_TREE;
+		break;
+	}
+	case GIMPLE_ASM:
+		if (is_size_overflow_asm(asm_data->def_stmt)) {
+			asm_data->input = NULL_TREE;
+			break;
+		}
+	default:
+		debug_gimple_stmt(asm_data->def_stmt);
+		gcc_unreachable();
+	}
+}
+
+/* This is the gimple part of searching for a missing size_overflow attribute. If the
intentional_overflow attribute type
+ * is of the right kind create the appropriate size_overflow asm stmts:
+ *   __asm__("# size_overflow" : =rm" D.3344_8 : "0" cicus.4_16);
+ *   __asm__("# size_overflow MARK_YES" :  : "rm" size_1(D));
+ */
+static void create_size_overflow_asm(gimple stmt, tree output_node, unsigned int argnum)
+{
+	struct asm_data asm_data;
+	const char *mark_str;
+	char *asm_comment;
+	enum mark mark;
+
+	if (is_gimple_constant(output_node))
+		return;
+
+	asm_data.output = output_node;
+	mark = check_intentional_attribute_gimple(asm_data.output, stmt, argnum);
+	if (mark != MARK_TURN_OFF)
+		search_missing_size_overflow_attribute_gimple(stmt, argnum);
+
+	asm_data.def_stmt = get_def_stmt(asm_data.output);
+	if (is_size_overflow_intentional_asm_turn_off(asm_data.def_stmt))
+		return;
+
+	create_asm_input(stmt, argnum, &asm_data);
+	if (asm_data.input == NULL_TREE)
+		return;
+
+	mark_str = convert_mark_to_str(mark);
+	asm_comment = create_asm_comment(argnum, stmt, mark_str);
+	create_asm_stmt(asm_comment, build_string(1, "0"), build_string(3, "=rm"), &asm_data);
+	free(asm_comment);
+}
+
+// Insert an asm stmt with "MARK_TURN_OFF", "MARK_YES" or "MARK_NOT_INTENTIONAL".
+static bool create_mark_asm(gimple stmt, enum mark mark)
+{
+	struct asm_data asm_data;
+	const char *asm_str;
+
+	switch (mark) {
+	case MARK_TURN_OFF:
+		asm_str = TURN_OFF_ASM_STR;
+		break;
+	case MARK_NOT_INTENTIONAL:
+	case MARK_YES:
+		asm_str = YES_ASM_STR;
+		break;
+	default:
+		gcc_unreachable();
+	}
+
+	asm_data.def_stmt = stmt;
+	asm_data.output = gimple_call_lhs(stmt);
+
+	if (asm_data.output == NULL_TREE) {
+		asm_data.input = gimple_call_arg(stmt, 0);
+		if (is_gimple_constant(asm_data.input))
+			return false;
+		asm_data.output = NULL;
+		create_asm_stmt(asm_str, build_string(2, "rm"), NULL, &asm_data);
+		return true;
+	}
+
+	create_asm_input(stmt, 0, &asm_data);
+	gcc_assert(asm_data.input != NULL_TREE);
+
+	create_asm_stmt(asm_str, build_string(1, "0"), build_string(3, "=rm"), &asm_data);
+	return true;
+}
+
+static bool is_from_cast(const_tree node)
+{
+	gimple def_stmt = get_def_stmt(node);
+
+	if (!def_stmt)
+		return false;
+
+	if (gimple_assign_cast_p(def_stmt))
+		return true;
+
+	return false;
+}
+
+// Skip duplication when there is a minus expr and the type of rhs1 or rhs2 is a pointer_type.
+static bool skip_ptr_minus(gimple stmt)
+{
+	const_tree rhs1, rhs2, ptr1_rhs, ptr2_rhs;
+
+	if (gimple_assign_rhs_code(stmt) != MINUS_EXPR)
+		return false;
+
+	rhs1 = gimple_assign_rhs1(stmt);
+	if (!is_from_cast(rhs1))
+		return false;
+
+	rhs2 = gimple_assign_rhs2(stmt);
+	if (!is_from_cast(rhs2))
+		return false;
+
+	ptr1_rhs = gimple_assign_rhs1(get_def_stmt(rhs1));
+	ptr2_rhs = gimple_assign_rhs1(get_def_stmt(rhs2));
+
+	if (TREE_CODE(TREE_TYPE(ptr1_rhs)) != POINTER_TYPE && TREE_CODE(TREE_TYPE(ptr2_rhs))
!= POINTER_TYPE)
+		return false;
+
+	create_mark_asm(stmt, MARK_YES);
+	return true;
+}
+
+static void walk_use_def_ptr(struct pointer_set_t *visited, const_tree lhs)
+{
+	gimple def_stmt;
+
+	def_stmt = get_def_stmt(lhs);
+	if (!def_stmt)
+		return;
+
+	if (pointer_set_insert(visited, def_stmt))
+		return;
+
+	switch (gimple_code(def_stmt)) {
+	case GIMPLE_NOP:
+	case GIMPLE_ASM:
+	case GIMPLE_CALL:
+		break;
+	case GIMPLE_PHI: {
+		unsigned int i, n = gimple_phi_num_args(def_stmt);
+
+		pointer_set_insert(visited, def_stmt);
+
+		for (i = 0; i < n; i++) {
+			tree arg = gimple_phi_arg_def(def_stmt, i);
+
+			walk_use_def_ptr(visited, arg);
+		}
+	}
+	case GIMPLE_ASSIGN:
+		switch (gimple_num_ops(def_stmt)) {
+		case 2:
+			walk_use_def_ptr(visited, gimple_assign_rhs1(def_stmt));
+			return;
+		case 3:
+			if (skip_ptr_minus(def_stmt))
+				return;
+
+			walk_use_def_ptr(visited, gimple_assign_rhs1(def_stmt));
+			walk_use_def_ptr(visited, gimple_assign_rhs2(def_stmt));
+			return;
+		default:
+			return;
+		}
+	default:
+		debug_gimple_stmt((gimple)def_stmt);
+		error("%s: unknown gimple code", __func__);
+		gcc_unreachable();
+	}
+}
+
+// Look for a ptr - ptr expression (e.g., cpuset_common_file_read() s - page)
+static void insert_mark_not_intentional_asm_at_ptr(const_tree arg)
+{
+	struct pointer_set_t *visited;
+
+	visited = pointer_set_create();
+	walk_use_def_ptr(visited, arg);
+	pointer_set_destroy(visited);
+}
+
+// Determine the return value and insert the asm stmt to mark the return stmt.
+static void insert_asm_ret(gimple stmt)
+{
+	tree ret;
+
+	ret = gimple_return_retval(stmt);
+	create_size_overflow_asm(stmt, ret, 0);
+}
+
+// Determine the correct arg index and arg and insert the asm stmt to mark the stmt.
+static void insert_asm_arg(gimple stmt, unsigned int orig_argnum)
+{
+	tree arg;
+	unsigned int argnum;
+
+	argnum = get_correct_arg_count(orig_argnum, gimple_call_fndecl(stmt));
+	gcc_assert(argnum != 0);
+	if (argnum == CANNOT_FIND_ARG)
+		return;
+
+	arg = gimple_call_arg(stmt, argnum - 1);
+	gcc_assert(arg != NULL_TREE);
+
+	// skip all ptr - ptr expressions
+	insert_mark_not_intentional_asm_at_ptr(arg);
+
+	create_size_overflow_asm(stmt, arg, argnum);
+}
+
+// If a function arg or the return value is marked by the size_overflow attribute then
set its index in the array.
+static void set_argnum_attribute(const_tree attr, bool *argnums)
+{
+	unsigned int argnum;
+	tree attr_value;
+
+	for (attr_value = TREE_VALUE(attr); attr_value; attr_value = TREE_CHAIN(attr_value))
{
+		argnum = TREE_INT_CST_LOW(TREE_VALUE(attr_value));
+		argnums[argnum] = true;
+	}
+}
+
+// If a function arg or the return value is in the hash table then set its index in
the array.
+static void set_argnum_hash(tree fndecl, bool *argnums)
+{
+	unsigned int num;
+	const struct size_overflow_hash *hash;
+
+	hash = get_function_hash(DECL_ORIGIN(fndecl));
+	if (!hash)
+		return;
+
+	for (num = 0; num <= MAX_PARAM; num++) {
+		if (!(hash->param & (1U << num)))
+			continue;
+
+		argnums[num] = true;
+	}
+}
+
+static bool is_all_the_argnums_empty(bool *argnums)
+{
+	unsigned int i;
+
+	for (i = 0; i <= MAX_PARAM; i++)
+		if (argnums[i])
+			return false;
+	return true;
+}
+
+// Check whether the arguments or the return value of the function are in the hash
table or are marked by the size_overflow attribute.
+static void search_interesting_args(tree fndecl, bool *argnums)
+{
+	const_tree attr;
+
+	set_argnum_hash(fndecl, argnums);
+	if (!is_all_the_argnums_empty(argnums))
+		return;
+
+	attr = lookup_attribute("size_overflow", DECL_ATTRIBUTES(fndecl));
+	if (attr && TREE_VALUE(attr))
+		set_argnum_attribute(attr, argnums);
+}
+
+/*
+ * Look up the intentional_overflow attribute that turns off ipa based duplication
+ * on the callee function.
+ */
+static bool is_mark_turn_off_attribute(gimple stmt)
+{
+	enum mark mark;
+	const_tree fndecl = gimple_call_fndecl(stmt);
+
+	mark = get_intentional_attr_type(DECL_ORIGIN(fndecl));
+	if (mark == MARK_TURN_OFF)
+		return true;
+	return false;
+}
+
+// If the argument(s) of the callee function is/are in the hash table or are marked
by an attribute then mark the call stmt with an asm stmt
+static void handle_interesting_function(gimple stmt)
+{
+	unsigned int argnum;
+	tree fndecl;
+	bool orig_argnums[MAX_PARAM + 1] = {false};
+
+	if (gimple_call_num_args(stmt) == 0)
+		return;
+	fndecl = gimple_call_fndecl(stmt);
+	if (fndecl == NULL_TREE)
+		return;
+	fndecl = DECL_ORIGIN(fndecl);
+
+	if (is_mark_turn_off_attribute(stmt)) {
+		create_mark_asm(stmt, MARK_TURN_OFF);
+		return;
+	}
+
+	search_interesting_args(fndecl, orig_argnums);
+
+	for (argnum = 1; argnum < MAX_PARAM; argnum++)
+		if (orig_argnums[argnum])
+			insert_asm_arg(stmt, argnum);
+}
+
+// If the return value of the caller function is in hash table (its index is 0) then
mark the return stmt with an asm stmt
+static void handle_interesting_ret(gimple stmt)
+{
+	bool orig_argnums[MAX_PARAM + 1] = {false};
+
+	search_interesting_args(current_function_decl, orig_argnums);
+
+	if (orig_argnums[0])
+		insert_asm_ret(stmt);
+}
+
+// Iterate over all the stmts and search for call and return stmts and mark them if
they're in the hash table
+static unsigned int search_interesting_functions(void)
+{
+	basic_block bb;
+
+	FOR_ALL_BB_FN(bb, cfun) {
+		gimple_stmt_iterator gsi;
+
+		for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
+			gimple stmt = gsi_stmt(gsi);
+
+			if (is_size_overflow_asm(stmt))
+				continue;
+
+			if (is_gimple_call(stmt))
+				handle_interesting_function(stmt);
+			else if (gimple_code(stmt) == GIMPLE_RETURN)
+				handle_interesting_ret(stmt);
+		}
+	}
+	return 0;
+}
+
+/*
+ * A lot of functions get inlined before the ipa passes so after the build_ssa gimple
pass
+ * this pass inserts asm stmts to mark the interesting args
+ * that the ipa pass will detect and insert the size overflow checks for.
+ */
+#if BUILDING_GCC_VERSION >= 4009
+static const struct pass_data insert_size_overflow_asm_pass_data = {
+#else
+static struct gimple_opt_pass insert_size_overflow_asm_pass = {
+	.pass = {
+#endif
+		.type			= GIMPLE_PASS,
+		.name			= "insert_size_overflow_asm",
+#if BUILDING_GCC_VERSION >= 4008
+		.optinfo_flags		= OPTGROUP_NONE,
+#endif
+#if BUILDING_GCC_VERSION >= 4009
+		.has_gate		= false,
+		.has_execute		= true,
+#else
+		.gate			= NULL,
+		.execute		= search_interesting_functions,
+		.sub			= NULL,
+		.next			= NULL,
+		.static_pass_number	= 0,
+#endif
+		.tv_id			= TV_NONE,
+		.properties_required	= PROP_cfg,
+		.properties_provided	= 0,
+		.properties_destroyed	= 0,
+		.todo_flags_start	= 0,
+		.todo_flags_finish	= TODO_dump_func | TODO_verify_ssa | TODO_verify_stmts | TODO_remove_unused_locals
| TODO_update_ssa_no_phi | TODO_cleanup_cfg | TODO_ggc_collect | TODO_verify_flow
+#if BUILDING_GCC_VERSION < 4009
+	}
+#endif
+};
+
+#if BUILDING_GCC_VERSION >= 4009
+namespace {
+class insert_size_overflow_asm_pass : public gimple_opt_pass {
+public:
+	insert_size_overflow_asm_pass() : gimple_opt_pass(insert_size_overflow_asm_pass_data,
g) {}
+	unsigned int execute() { return search_interesting_functions(); }
+};
+}
+#endif
+
+static struct opt_pass *make_insert_size_overflow_asm_pass(void)
+{
+#if BUILDING_GCC_VERSION >= 4009
+	return new insert_size_overflow_asm_pass();
+#else
+	return &insert_size_overflow_asm_pass.pass;
+#endif
+}
+
+// Create the noreturn report_size_overflow() function decl.
+static void size_overflow_start_unit(void __unused *gcc_data, void __unused *user_data)
+{
+	tree const_char_ptr_type_node;
+	tree fntype;
+
+	const_char_ptr_type_node = build_pointer_type(build_type_variant(char_type_node, 1,
0));
+
+	// void report_size_overflow(const char *loc_file, unsigned int loc_line, const char
*current_func, const char *ssa_var)
+	fntype = build_function_type_list(void_type_node,
+					  const_char_ptr_type_node,
+					  unsigned_type_node,
+					  const_char_ptr_type_node,
+					  const_char_ptr_type_node,
+					  NULL_TREE);
+	report_size_overflow_decl = build_fn_decl("report_size_overflow", fntype);
+
+	DECL_ASSEMBLER_NAME(report_size_overflow_decl);
+	TREE_PUBLIC(report_size_overflow_decl) = 1;
+	DECL_EXTERNAL(report_size_overflow_decl) = 1;
+	DECL_ARTIFICIAL(report_size_overflow_decl) = 1;
+	TREE_THIS_VOLATILE(report_size_overflow_decl) = 1;
+}
+
+static unsigned int dump_functions(void)
+{
+	struct cgraph_node *node;
+
+	FOR_EACH_FUNCTION_WITH_GIMPLE_BODY(node) {
+		basic_block bb;
+
+		push_cfun(DECL_STRUCT_FUNCTION(NODE_DECL(node)));
+		current_function_decl = NODE_DECL(node);
+
+		fprintf(stderr, "-----------------------------------------\n%s\n-----------------------------------------\n",
DECL_NAME_POINTER(current_function_decl));
+
+		FOR_ALL_BB_FN(bb, cfun) {
+			gimple_stmt_iterator si;
+
+			fprintf(stderr, "<bb %u>:\n", bb->index);
+			for (si = gsi_start_phis(bb); !gsi_end_p(si); gsi_next(&si))
+				debug_gimple_stmt(gsi_stmt(si));
+			for (si = gsi_start_bb(bb); !gsi_end_p(si); gsi_next(&si))
+				debug_gimple_stmt(gsi_stmt(si));
+			fprintf(stderr, "\n");
+		}
+
+		fprintf(stderr, "-------------------------------------------------------------------------\n");
+
+		pop_cfun();
+		current_function_decl = NULL_TREE;
+	}
+
+	fprintf(stderr, "###############################################################################\n");
+
+	return 0;
+}
+
+#if BUILDING_GCC_VERSION >= 4009
+static const struct pass_data dump_pass_data = {
+#else
+static struct ipa_opt_pass_d dump_pass = {
+	.pass = {
+#endif
+		.type			= SIMPLE_IPA_PASS,
+		.name			= "dump",
+#if BUILDING_GCC_VERSION >= 4008
+		.optinfo_flags		= OPTGROUP_NONE,
+#endif
+#if BUILDING_GCC_VERSION >= 4009
+		.has_gate		= false,
+		.has_execute		= true,
+#else
+		.gate			= NULL,
+		.execute		= dump_functions,
+		.sub			= NULL,
+		.next			= NULL,
+		.static_pass_number	= 0,
+#endif
+		.tv_id			= TV_NONE,
+		.properties_required	= 0,
+		.properties_provided	= 0,
+		.properties_destroyed	= 0,
+		.todo_flags_start	= 0,
+		.todo_flags_finish	= 0,
+#if BUILDING_GCC_VERSION < 4009
+	},
+	.generate_summary		= NULL,
+	.write_summary			= NULL,
+	.read_summary			= NULL,
+#if BUILDING_GCC_VERSION >= 4006
+	.write_optimization_summary	= NULL,
+	.read_optimization_summary	= NULL,
+#endif
+	.stmt_fixup			= NULL,
+	.function_transform_todo_flags_start		= 0,
+	.function_transform		= NULL,
+	.variable_transform		= NULL,
+#endif
+};
+
+#if BUILDING_GCC_VERSION >= 4009
+namespace {
+class dump_pass : public ipa_opt_pass_d {
+public:
+	dump_pass() : ipa_opt_pass_d(dump_pass_data, g, NULL, NULL, NULL, NULL, NULL, NULL,
0, NULL, NULL) {}
+	unsigned int execute() { return dump_functions(); }
+};
+}
+#endif
+
+static struct opt_pass *make_dump_pass(void)
+{
+#if BUILDING_GCC_VERSION >= 4009
+	return new dump_pass();
+#else
+	return &dump_pass.pass;
+#endif
+}
+
+int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
+{
+	int i;
+	const char * const plugin_name = plugin_info->base_name;
+	const int argc = plugin_info->argc;
+	const struct plugin_argument * const argv = plugin_info->argv;
+	bool enable = true;
+	struct register_pass_info insert_size_overflow_asm_pass_info;
+	struct register_pass_info __unused dump_before_pass_info;
+	struct register_pass_info __unused dump_after_pass_info;
+	struct register_pass_info ipa_pass_info;
+	static const struct ggc_root_tab gt_ggc_r_gt_size_overflow[] = {
+		{
+			.base = &report_size_overflow_decl,
+			.nelt = 1,
+			.stride = sizeof(report_size_overflow_decl),
+			.cb = &gt_ggc_mx_tree_node,
+			.pchw = &gt_pch_nx_tree_node
+		},
+		LAST_GGC_ROOT_TAB
+	};
+
+	insert_size_overflow_asm_pass_info.pass				= make_insert_size_overflow_asm_pass();
+	insert_size_overflow_asm_pass_info.reference_pass_name		= "ssa";
+	insert_size_overflow_asm_pass_info.ref_pass_instance_number	= 1;
+	insert_size_overflow_asm_pass_info.pos_op			= PASS_POS_INSERT_AFTER;
+
+	dump_before_pass_info.pass			= make_dump_pass();
+	dump_before_pass_info.reference_pass_name	= "increase_alignment";
+	dump_before_pass_info.ref_pass_instance_number	= 1;
+	dump_before_pass_info.pos_op			= PASS_POS_INSERT_BEFORE;
+
+	ipa_pass_info.pass			= make_ipa_pass();
+	ipa_pass_info.reference_pass_name	= "increase_alignment";
+	ipa_pass_info.ref_pass_instance_number	= 1;
+	ipa_pass_info.pos_op			= PASS_POS_INSERT_BEFORE;
+
+	dump_after_pass_info.pass			= make_dump_pass();
+	dump_after_pass_info.reference_pass_name	= "increase_alignment";
+	dump_after_pass_info.ref_pass_instance_number	= 1;
+	dump_after_pass_info.pos_op			= PASS_POS_INSERT_BEFORE;
+
+	if (!plugin_default_version_check(version, &gcc_version)) {
+		error(G_("incompatible gcc/plugin versions"));
+		return 1;
+	}
+
+	for (i = 0; i < argc; ++i) {
+		if (!strcmp(argv[i].key, "no-size-overflow")) {
+			enable = false;
+			continue;
+		}
+		error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
+	}
+
+	register_callback(plugin_name, PLUGIN_INFO, NULL, &size_overflow_plugin_info);
+	if (enable) {
+		register_callback(plugin_name, PLUGIN_START_UNIT, &size_overflow_start_unit, NULL);
+		register_callback(plugin_name, PLUGIN_REGISTER_GGC_ROOTS, NULL, (void *)&gt_ggc_r_gt_size_overflow);
+		register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &insert_size_overflow_asm_pass_info);
+//		register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &dump_before_pass_info);
+		register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &ipa_pass_info);
+//		register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &dump_after_pass_info);
+	}
+	register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
+
+	return 0;
+}
diff -ruNp linux-3.13.11/tools/gcc/stackleak_plugin.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/tools/gcc/stackleak_plugin.c
--- linux-3.13.11/tools/gcc/stackleak_plugin.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/tools/gcc/stackleak_plugin.c	2014-07-09
12:00:16.000000000 +0200
@@ -0,0 +1,374 @@
+/*
+ * Copyright 2011-2014 by the PaX Team <pageexec@freemail.hu>
+ * Licensed under the GPL v2
+ *
+ * Note: the choice of the license means that the compilation process is
+ *       NOT 'eligible' as defined by gcc's library exception to the GPL v3,
+ *       but for the kernel it doesn't matter since it doesn't link against
+ *       any of the gcc libraries
+ *
+ * gcc plugin to help implement various PaX features
+ *
+ * - track lowest stack pointer
+ *
+ * TODO:
+ * - initialize all local variables
+ *
+ * BUGS:
+ * - none known
+ */
+
+#include "gcc-common.h"
+
+int plugin_is_GPL_compatible;
+
+static int track_frame_size = -1;
+static const char track_function[] = "pax_track_stack";
+static const char check_function[] = "pax_check_alloca";
+static tree track_function_decl, check_function_decl;
+static bool init_locals;
+
+static struct plugin_info stackleak_plugin_info = {
+	.version	= "201402131920",
+	.help		= "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn
bytes\n"
+//			  "initialize-locals\t\tforcibly initialize all stack frames\n"
+};
+
+static void stackleak_check_alloca(gimple_stmt_iterator *gsi)
+{
+	gimple check_alloca;
+	tree alloca_size;
+
+	// insert call to void pax_check_alloca(unsigned long size)
+	alloca_size = gimple_call_arg(gsi_stmt(*gsi), 0);
+	check_alloca = gimple_build_call(check_function_decl, 1, alloca_size);
+	gsi_insert_before(gsi, check_alloca, GSI_SAME_STMT);
+}
+
+static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi)
+{
+	gimple track_stack;
+
+	// insert call to void pax_track_stack(void)
+	track_stack = gimple_build_call(track_function_decl, 0);
+	gsi_insert_after(gsi, track_stack, GSI_CONTINUE_LINKING);
+}
+
+static bool is_alloca(gimple stmt)
+{
+	if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA))
+		return true;
+
+#if BUILDING_GCC_VERSION >= 4007
+	if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
+		return true;
+#endif
+
+	return false;
+}
+
+static unsigned int execute_stackleak_tree_instrument(void)
+{
+	basic_block bb, entry_bb;
+	bool prologue_instrumented = false, is_leaf = true;
+
+	entry_bb = ENTRY_BLOCK_PTR_FOR_FN(cfun)->next_bb;
+
+	// 1. loop through BBs and GIMPLE statements
+	FOR_EACH_BB_FN(bb, cfun) {
+		gimple_stmt_iterator gsi;
+
+		for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
+			gimple stmt;
+
+			stmt = gsi_stmt(gsi);
+
+			if (is_gimple_call(stmt))
+				is_leaf = false;
+
+			// gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list
0xb7576450>
+			if (!is_alloca(stmt))
+				continue;
+
+			// 2. insert stack overflow check before each __builtin_alloca call
+			stackleak_check_alloca(&gsi);
+
+			// 3. insert track call after each __builtin_alloca call
+			stackleak_add_instrumentation(&gsi);
+			if (bb == entry_bb)
+				prologue_instrumented = true;
+		}
+	}
+
+	// special cases for some bad linux code: taking the address of static inline functions
will materialize them
+	// but we mustn't instrument some of them as the resulting stack alignment required
by the function call ABI
+	// will break other assumptions regarding the expected (but not otherwise enforced)
register clobbering  ABI.
+	// case in point: native_save_fl on amd64 when optimized for size clobbers rdx if
it were instrumented here.
+	if (is_leaf && !TREE_PUBLIC(current_function_decl) && DECL_DECLARED_INLINE_P(current_function_decl))
+		return 0;
+	if (is_leaf && !strncmp(IDENTIFIER_POINTER(DECL_NAME(current_function_decl)), "_paravirt_",
10))
+		return 0;
+
+	// 4. insert track call at the beginning
+	if (!prologue_instrumented) {
+		gimple_stmt_iterator gsi;
+
+		bb = split_block_after_labels(ENTRY_BLOCK_PTR_FOR_FN(cfun))->dest;
+		if (dom_info_available_p(CDI_DOMINATORS))
+			set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR_FOR_FN(cfun));
+		gsi = gsi_start_bb(bb);
+		stackleak_add_instrumentation(&gsi);
+	}
+
+	return 0;
+}
+
+static unsigned int execute_stackleak_final(void)
+{
+	rtx insn, next;
+
+	if (cfun->calls_alloca)
+		return 0;
+
+	// keep calls only if function frame is big enough
+	if (get_frame_size() >= track_frame_size)
+		return 0;
+
+	// 1. find pax_track_stack calls
+	for (insn = get_insns(); insn; insn = next) {
+		// rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags
0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
+		rtx body;
+
+		next = NEXT_INSN(insn);
+		if (!CALL_P(insn))
+			continue;
+		body = PATTERN(insn);
+		if (GET_CODE(body) != CALL)
+			continue;
+		body = XEXP(body, 0);
+		if (GET_CODE(body) != MEM)
+			continue;
+		body = XEXP(body, 0);
+		if (GET_CODE(body) != SYMBOL_REF)
+			continue;
+//		if (strcmp(XSTR(body, 0), track_function))
+		if (SYMBOL_REF_DECL(body) != track_function_decl)
+			continue;
+//		warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(),
track_frame_size);
+		// 2. delete call
+		delete_insn_and_edges(insn);
+#if BUILDING_GCC_VERSION >= 4007
+		if (GET_CODE(next) == NOTE && NOTE_KIND(next) == NOTE_INSN_CALL_ARG_LOCATION) {
+			insn = next;
+			next = NEXT_INSN(insn);
+			delete_insn_and_edges(insn);
+		}
+#endif
+	}
+
+//	print_simple_rtl(stderr, get_insns());
+//	print_rtl(stderr, get_insns());
+//	warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(),
track_frame_size);
+
+	return 0;
+}
+
+static bool gate_stackleak_track_stack(void)
+{
+	return track_frame_size >= 0;
+}
+
+static void stackleak_start_unit(void *gcc_data, void *user_data)
+{
+	tree fntype;
+
+	// void pax_track_stack(void)
+	fntype = build_function_type_list(void_type_node, NULL_TREE);
+	track_function_decl = build_fn_decl(track_function, fntype);
+	DECL_ASSEMBLER_NAME(track_function_decl); // for LTO
+	TREE_PUBLIC(track_function_decl) = 1;
+	DECL_EXTERNAL(track_function_decl) = 1;
+	DECL_ARTIFICIAL(track_function_decl) = 1;
+
+	// void pax_check_alloca(unsigned long)
+	fntype = build_function_type_list(void_type_node, long_unsigned_type_node, NULL_TREE);
+	check_function_decl = build_fn_decl(check_function, fntype);
+	DECL_ASSEMBLER_NAME(check_function_decl); // for LTO
+	TREE_PUBLIC(check_function_decl) = 1;
+	DECL_EXTERNAL(check_function_decl) = 1;
+	DECL_ARTIFICIAL(check_function_decl) = 1;
+}
+
+#if BUILDING_GCC_VERSION >= 4009
+static const struct pass_data stackleak_tree_instrument_pass_data = {
+#else
+static struct gimple_opt_pass stackleak_tree_instrument_pass = {
+	.pass = {
+#endif
+		.type			= GIMPLE_PASS,
+		.name			= "stackleak_tree_instrument",
+#if BUILDING_GCC_VERSION >= 4008
+		.optinfo_flags		= OPTGROUP_NONE,
+#endif
+#if BUILDING_GCC_VERSION >= 4009
+		.has_gate		= true,
+		.has_execute		= true,
+#else
+		.gate			= gate_stackleak_track_stack,
+		.execute		= execute_stackleak_tree_instrument,
+		.sub			= NULL,
+		.next			= NULL,
+		.static_pass_number	= 0,
+#endif
+		.tv_id			= TV_NONE,
+		.properties_required	= PROP_gimple_leh | PROP_cfg,
+		.properties_provided	= 0,
+		.properties_destroyed	= 0,
+		.todo_flags_start	= 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
+		.todo_flags_finish	= TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
| TODO_rebuild_cgraph_edges
+#if BUILDING_GCC_VERSION < 4009
+	}
+#endif
+};
+
+#if BUILDING_GCC_VERSION >= 4009
+static const struct pass_data stackleak_final_rtl_opt_pass_data = {
+#else
+static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
+	.pass = {
+#endif
+		.type			= RTL_PASS,
+		.name			= "stackleak_final",
+#if BUILDING_GCC_VERSION >= 4008
+		.optinfo_flags		= OPTGROUP_NONE,
+#endif
+#if BUILDING_GCC_VERSION >= 4009
+		.has_gate		= true,
+		.has_execute		= true,
+#else
+		.gate			= gate_stackleak_track_stack,
+		.execute		= execute_stackleak_final,
+		.sub			= NULL,
+		.next			= NULL,
+		.static_pass_number	= 0,
+#endif
+		.tv_id			= TV_NONE,
+		.properties_required	= 0,
+		.properties_provided	= 0,
+		.properties_destroyed	= 0,
+		.todo_flags_start	= 0,
+		.todo_flags_finish	= TODO_dump_func
+#if BUILDING_GCC_VERSION < 4009
+	}
+#endif
+};
+
+#if BUILDING_GCC_VERSION >= 4009
+namespace {
+class stackleak_tree_instrument_pass : public gimple_opt_pass {
+public:
+	stackleak_tree_instrument_pass() : gimple_opt_pass(stackleak_tree_instrument_pass_data,
g) {}
+	bool gate() { return gate_stackleak_track_stack(); }
+	unsigned int execute() { return execute_stackleak_tree_instrument(); }
+};
+
+class stackleak_final_rtl_opt_pass : public rtl_opt_pass {
+public:
+	stackleak_final_rtl_opt_pass() : rtl_opt_pass(stackleak_final_rtl_opt_pass_data, g)
{}
+	bool gate() { return gate_stackleak_track_stack(); }
+	unsigned int execute() { return execute_stackleak_final(); }
+};
+}
+#endif
+
+static struct opt_pass *make_stackleak_tree_instrument_pass(void)
+{
+#if BUILDING_GCC_VERSION >= 4009
+	return new stackleak_tree_instrument_pass();
+#else
+	return &stackleak_tree_instrument_pass.pass;
+#endif
+}
+
+static struct opt_pass *make_stackleak_final_rtl_opt_pass(void)
+{
+#if BUILDING_GCC_VERSION >= 4009
+	return new stackleak_final_rtl_opt_pass();
+#else
+	return &stackleak_final_rtl_opt_pass.pass;
+#endif
+}
+
+int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
+{
+	const char * const plugin_name = plugin_info->base_name;
+	const int argc = plugin_info->argc;
+	const struct plugin_argument * const argv = plugin_info->argv;
+	int i;
+	struct register_pass_info stackleak_tree_instrument_pass_info;
+	struct register_pass_info stackleak_final_pass_info;
+	static const struct ggc_root_tab gt_ggc_r_gt_stackleak[] = {
+		{
+			.base = &track_function_decl,
+			.nelt = 1,
+			.stride = sizeof(track_function_decl),
+			.cb = &gt_ggc_mx_tree_node,
+			.pchw = &gt_pch_nx_tree_node
+		},
+		{
+			.base = &check_function_decl,
+			.nelt = 1,
+			.stride = sizeof(check_function_decl),
+			.cb = &gt_ggc_mx_tree_node,
+			.pchw = &gt_pch_nx_tree_node
+		},
+		LAST_GGC_ROOT_TAB
+	};
+
+	stackleak_tree_instrument_pass_info.pass			= make_stackleak_tree_instrument_pass();
+//	stackleak_tree_instrument_pass_info.reference_pass_name		= "tree_profile";
+	stackleak_tree_instrument_pass_info.reference_pass_name		= "optimized";
+	stackleak_tree_instrument_pass_info.ref_pass_instance_number	= 1;
+	stackleak_tree_instrument_pass_info.pos_op 			= PASS_POS_INSERT_BEFORE;
+
+	stackleak_final_pass_info.pass				= make_stackleak_final_rtl_opt_pass();
+	stackleak_final_pass_info.reference_pass_name		= "final";
+	stackleak_final_pass_info.ref_pass_instance_number	= 1;
+	stackleak_final_pass_info.pos_op 			= PASS_POS_INSERT_BEFORE;
+
+	if (!plugin_default_version_check(version, &gcc_version)) {
+		error(G_("incompatible gcc/plugin versions"));
+		return 1;
+	}
+
+	register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
+
+	for (i = 0; i < argc; ++i) {
+		if (!strcmp(argv[i].key, "track-lowest-sp")) {
+			if (!argv[i].value) {
+				error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
+				continue;
+			}
+			track_frame_size = atoi(argv[i].value);
+			if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
+				error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key,
argv[i].value);
+			continue;
+		}
+		if (!strcmp(argv[i].key, "initialize-locals")) {
+			if (argv[i].value) {
+				error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key,
argv[i].value);
+				continue;
+			}
+			init_locals = true;
+			continue;
+		}
+		error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
+	}
+
+	register_callback(plugin_name, PLUGIN_START_UNIT, &stackleak_start_unit, NULL);
+	register_callback(plugin_name, PLUGIN_REGISTER_GGC_ROOTS, NULL, (void *)&gt_ggc_r_gt_stackleak);
+	register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
+	register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
+
+	return 0;
+}
diff -ruNp linux-3.13.11/tools/gcc/structleak_plugin.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/tools/gcc/structleak_plugin.c
--- linux-3.13.11/tools/gcc/structleak_plugin.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/tools/gcc/structleak_plugin.c	2014-07-09
12:00:16.000000000 +0200
@@ -0,0 +1,273 @@
+/*
+ * Copyright 2013-2014 by PaX Team <pageexec@freemail.hu>
+ * Licensed under the GPL v2
+ *
+ * Note: the choice of the license means that the compilation process is
+ *       NOT 'eligible' as defined by gcc's library exception to the GPL v3,
+ *       but for the kernel it doesn't matter since it doesn't link against
+ *       any of the gcc libraries
+ *
+ * gcc plugin to forcibly initialize certain local variables that could
+ * otherwise leak kernel stack to userland if they aren't properly initialized
+ * by later code
+ *
+ * Homepage: http://pax.grsecurity.net/
+ *
+ * Usage:
+ * $ # for 4.5/4.6/C based 4.7
+ * $ gcc -I`gcc -print-file-name=plugin`/include -I`gcc -print-file-name=plugin`/include/c-family
-fPIC -shared -O2 -o structleak_plugin.so structleak_plugin.c
+ * $ # for C++ based 4.7/4.8+
+ * $ g++ -I`g++ -print-file-name=plugin`/include -I`g++ -print-file-name=plugin`/include/c-family
-fPIC -shared -O2 -o structleak_plugin.so structleak_plugin.c
+ * $ gcc -fplugin=./structleak_plugin.so test.c -O2
+ *
+ * TODO: eliminate redundant initializers
+ *       increase type coverage
+ */
+
+#include "gcc-common.h"
+
+// unused C type flag in all versions 4.5-4.9
+#define TYPE_USERSPACE(TYPE) TYPE_LANG_FLAG_3(TYPE)
+
+int plugin_is_GPL_compatible;
+
+static struct plugin_info structleak_plugin_info = {
+	.version	= "201401260140",
+	.help		= "disable\tdo not activate plugin\n",
+};
+
+static tree handle_user_attribute(tree *node, tree name, tree args, int flags, bool
*no_add_attrs)
+{
+	*no_add_attrs = true;
+
+	// check for types? for now accept everything linux has to offer
+	if (TREE_CODE(*node) != FIELD_DECL)
+		return NULL_TREE;
+
+	*no_add_attrs = false;
+	return NULL_TREE;
+}
+
+static struct attribute_spec user_attr = {
+	.name			= "user",
+	.min_length		= 0,
+	.max_length		= 0,
+	.decl_required		= false,
+	.type_required		= false,
+	.function_type_required	= false,
+	.handler		= handle_user_attribute,
+#if BUILDING_GCC_VERSION >= 4007
+	.affects_type_identity	= true
+#endif
+};
+
+static void register_attributes(void *event_data, void *data)
+{
+	register_attribute(&user_attr);
+//	register_attribute(&force_attr);
+}
+
+static tree get_field_type(tree field)
+{
+	return strip_array_types(TREE_TYPE(field));
+}
+
+static bool is_userspace_type(tree type)
+{
+	tree field;
+
+	for (field = TYPE_FIELDS(type); field; field = TREE_CHAIN(field)) {
+		tree fieldtype = get_field_type(field);
+		enum tree_code code = TREE_CODE(fieldtype);
+
+		if (code == RECORD_TYPE || code == UNION_TYPE)
+			if (is_userspace_type(fieldtype))
+				return true;
+
+		if (lookup_attribute("user", DECL_ATTRIBUTES(field)))
+			return true;
+	}
+	return false;
+}
+
+static void finish_type(void *event_data, void *data)
+{
+	tree type = (tree)event_data;
+
+	if (TYPE_USERSPACE(type))
+		return;
+
+	if (is_userspace_type(type))
+		TYPE_USERSPACE(type) = 1;
+}
+
+static void initialize(tree var)
+{
+	basic_block bb;
+	gimple_stmt_iterator gsi;
+	tree initializer;
+	gimple init_stmt;
+
+	// this is the original entry bb before the forced split
+	// TODO: check further BBs in case more splits occured before us
+	bb = ENTRY_BLOCK_PTR_FOR_FN(cfun)->next_bb->next_bb;
+
+	// first check if the variable is already initialized, warn otherwise
+	for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
+		gimple stmt = gsi_stmt(gsi);
+		tree rhs1;
+
+		// we're looking for an assignment of a single rhs...
+		if (!gimple_assign_single_p(stmt))
+			continue;
+		rhs1 = gimple_assign_rhs1(stmt);
+#if BUILDING_GCC_VERSION >= 4007
+		// ... of a non-clobbering expression...
+		if (TREE_CLOBBER_P(rhs1))
+			continue;
+#endif
+		// ... to our variable...
+		if (gimple_get_lhs(stmt) != var)
+			continue;
+		// if it's an initializer then we're good
+		if (TREE_CODE(rhs1) == CONSTRUCTOR)
+			return;
+	}
+
+	// these aren't the 0days you're looking for
+//	inform(DECL_SOURCE_LOCATION(var), "userspace variable will be forcibly initialized");
+
+	// build the initializer expression
+	initializer = build_constructor(TREE_TYPE(var), NULL);
+
+	// build the initializer stmt
+	init_stmt = gimple_build_assign(var, initializer);
+	gsi = gsi_start_bb(ENTRY_BLOCK_PTR_FOR_FN(cfun)->next_bb);
+	gsi_insert_before(&gsi, init_stmt, GSI_NEW_STMT);
+	update_stmt(init_stmt);
+}
+
+static unsigned int handle_function(void)
+{
+	basic_block bb;
+	unsigned int ret = 0;
+	tree var;
+	unsigned int i;
+
+	// split the first bb where we can put the forced initializers
+	bb = split_block_after_labels(ENTRY_BLOCK_PTR_FOR_FN(cfun))->dest;
+	if (dom_info_available_p(CDI_DOMINATORS))
+		set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR_FOR_FN(cfun));
+
+	// enumarate all local variables and forcibly initialize our targets
+	FOR_EACH_LOCAL_DECL(cfun, i, var) {
+		tree type = TREE_TYPE(var);
+
+		gcc_assert(DECL_P(var));
+		if (!auto_var_in_fn_p(var, current_function_decl))
+			continue;
+
+		// only care about structure types
+		if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
+			continue;
+
+		// if the type is of interest, examine the variable
+		if (TYPE_USERSPACE(type))
+			initialize(var);
+	}
+
+	return ret;
+}
+
+#if BUILDING_GCC_VERSION >= 4009
+static const struct pass_data structleak_pass_data = {
+#else
+static struct gimple_opt_pass structleak_pass = {
+	.pass = {
+#endif
+		.type			= GIMPLE_PASS,
+		.name			= "structleak",
+#if BUILDING_GCC_VERSION >= 4008
+		.optinfo_flags		= OPTGROUP_NONE,
+#endif
+#if BUILDING_GCC_VERSION >= 4009
+		.has_gate		= false,
+		.has_execute		= true,
+#else
+		.gate			= NULL,
+		.execute		= handle_function,
+		.sub			= NULL,
+		.next			= NULL,
+		.static_pass_number	= 0,
+#endif
+		.tv_id			= TV_NONE,
+		.properties_required	= PROP_cfg,
+		.properties_provided	= 0,
+		.properties_destroyed	= 0,
+		.todo_flags_start	= 0,
+		.todo_flags_finish	= TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals
| TODO_update_ssa | TODO_ggc_collect | TODO_verify_flow
+#if BUILDING_GCC_VERSION < 4009
+	}
+#endif
+};
+
+#if BUILDING_GCC_VERSION >= 4009
+namespace {
+class structleak_pass : public gimple_opt_pass {
+public:
+	structleak_pass() : gimple_opt_pass(structleak_pass_data, g) {}
+	unsigned int execute() { return handle_function(); }
+};
+}
+#endif
+
+static struct opt_pass *make_structleak_pass(void)
+{
+#if BUILDING_GCC_VERSION >= 4009
+	return new structleak_pass();
+#else
+	return &structleak_pass.pass;
+#endif
+}
+
+int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
+{
+	int i;
+	const char * const plugin_name = plugin_info->base_name;
+	const int argc = plugin_info->argc;
+	const struct plugin_argument * const argv = plugin_info->argv;
+	bool enable = true;
+	struct register_pass_info structleak_pass_info;
+
+	structleak_pass_info.pass			= make_structleak_pass();
+	structleak_pass_info.reference_pass_name	= "ssa";
+	structleak_pass_info.ref_pass_instance_number	= 1;
+	structleak_pass_info.pos_op			= PASS_POS_INSERT_AFTER;
+
+	if (!plugin_default_version_check(version, &gcc_version)) {
+		error(G_("incompatible gcc/plugin versions"));
+		return 1;
+	}
+
+	if (strcmp(lang_hooks.name, "GNU C")) {
+		inform(UNKNOWN_LOCATION, G_("%s supports C only"), plugin_name);
+		enable = false;
+	}
+
+	for (i = 0; i < argc; ++i) {
+		if (!strcmp(argv[i].key, "disable")) {
+			enable = false;
+			continue;
+		}
+		error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
+	}
+
+	register_callback(plugin_name, PLUGIN_INFO, NULL, &structleak_plugin_info);
+	if (enable) {
+		register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &structleak_pass_info);
+		register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
+	}
+	register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
+
+	return 0;
+}
diff -ruNp linux-3.13.11/tools/lib/lk/Makefile linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/tools/lib/lk/Makefile
--- linux-3.13.11/tools/lib/lk/Makefile	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/tools/lib/lk/Makefile	2014-07-09
12:00:16.000000000 +0200
@@ -13,7 +13,7 @@ LIB_OBJS += $(OUTPUT)debugfs.o
 
 LIBFILE = liblk.a
 
-CFLAGS = -ggdb3 -Wall -Wextra -std=gnu99 -Werror -O6 -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS)
$(EXTRA_CFLAGS) -fPIC
+CFLAGS = -ggdb3 -Wall -Wextra -std=gnu99 -Werror -O6 -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2
$(EXTRA_WARNINGS) $(EXTRA_CFLAGS) -fPIC
 EXTLIBS = -lelf -lpthread -lrt -lm
 ALL_CFLAGS = $(CFLAGS) $(BASIC_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64
 ALL_LDFLAGS = $(LDFLAGS)
diff -ruNp linux-3.13.11/tools/perf/util/include/asm/alternative-asm.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/tools/perf/util/include/asm/alternative-asm.h
--- linux-3.13.11/tools/perf/util/include/asm/alternative-asm.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/tools/perf/util/include/asm/alternative-asm.h	2014-07-09
12:00:16.000000000 +0200
@@ -5,4 +5,7 @@
 
 #define altinstruction_entry #
 
+	.macro pax_force_retaddr rip=0, reload=0
+	.endm
+
 #endif
diff -ruNp linux-3.13.11/tools/perf/util/include/linux/compiler.h linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/tools/perf/util/include/linux/compiler.h
--- linux-3.13.11/tools/perf/util/include/linux/compiler.h	2014-04-23 01:49:33.000000000
+0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/tools/perf/util/include/linux/compiler.h	2014-07-09
12:00:16.000000000 +0200
@@ -27,4 +27,12 @@
 # define __weak			__attribute__((weak))
 #endif
 
+#ifndef __size_overflow
+# define __size_overflow(...)
+#endif
+
+#ifndef __intentional_overflow
+# define __intentional_overflow(...)
+#endif
+
 #endif
diff -ruNp linux-3.13.11/virt/kvm/ioapic.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/virt/kvm/ioapic.c
--- linux-3.13.11/virt/kvm/ioapic.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/virt/kvm/ioapic.c	2014-07-09 12:00:16.000000000
+0200
@@ -306,7 +306,7 @@ static int ioapic_deliver(struct kvm_ioa
 		BUG_ON(ioapic->rtc_status.pending_eoi != 0);
 		ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe,
 				ioapic->rtc_status.dest_map);
-		ioapic->rtc_status.pending_eoi = ret;
+		ioapic->rtc_status.pending_eoi = (ret < 0 ? 0 : ret);
 	} else
 		ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, NULL);
 
diff -ruNp linux-3.13.11/virt/kvm/kvm_main.c linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/virt/kvm/kvm_main.c
--- linux-3.13.11/virt/kvm/kvm_main.c	2014-04-23 01:49:33.000000000 +0200
+++ linux-3.13.11-vs2.3.6.11-grsec3.0_201404182111/virt/kvm/kvm_main.c	2014-07-09 12:00:16.000000000
+0200
@@ -76,12 +76,17 @@ LIST_HEAD(vm_list);
 
 static cpumask_var_t cpus_hardware_enabled;
 static int kvm_usage_count = 0;
-static atomic_t hardware_enable_failed;
+static atomic_unchecked_t hardware_enable_failed;
 
 struct kmem_cache *kvm_vcpu_cache;
 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
 
-static __read_mostly struct preempt_ops kvm_preempt_ops;
+static void kvm_sched_in(struct preempt_notifier *pn, int cpu);
+static void kvm_sched_out(struct preempt_notifier *pn, struct task_struct *next);
+static struct preempt_ops kvm_preempt_ops = {
+	.sched_in = kvm_sched_in,
+	.sched_out = kvm_sched_out,
+};
 
 struct dentry *kvm_debugfs_dir;
 
@@ -751,7 +756,7 @@ int __kvm_set_memory_region(struct kvm *
 	/* We can read the guest memory with __xxx_user() later on. */
 	if ((mem->slot < KVM_USER_MEM_SLOTS) &&
 	    ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
-	     !access_ok(VERIFY_WRITE,
+	     !access_ok_noprefault(VERIFY_WRITE,
 			(void __user *)(unsigned long)mem->userspace_addr,
 			mem->memory_size)))
 		goto out;
@@ -1615,9 +1620,17 @@ EXPORT_SYMBOL_GPL(kvm_read_guest_cached)
 
 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
 {
-	const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0)));
+	int r;
+	unsigned long addr;
 
-	return kvm_write_guest_page(kvm, gfn, zero_page, offset, len);
+	addr = gfn_to_hva(kvm, gfn);
+	if (kvm_is_error_hva(addr))
+		return -EFAULT;
+	r = __clear_user((void __user *)addr + offset, len);
+	if (r)
+		return -EFAULT;
+	mark_page_dirty(kvm, gfn);
+	return 0;
 }
 EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
 
@@ -1872,7 +1885,7 @@ static int kvm_vcpu_release(struct inode
 	return 0;
 }
 
-static struct file_operations kvm_vcpu_fops = {
+static file_operations_no_const kvm_vcpu_fops __read_only = {
 	.release        = kvm_vcpu_release,
 	.unlocked_ioctl = kvm_vcpu_ioctl,
 #ifdef CONFIG_COMPAT
@@ -2532,7 +2545,7 @@ out:
 }
 #endif
 
-static struct file_operations kvm_vm_fops = {
+static file_operations_no_const kvm_vm_fops __read_only = {
 	.release        = kvm_vm_release,
 	.unlocked_ioctl = kvm_vm_ioctl,
 #ifdef CONFIG_COMPAT
@@ -2632,7 +2645,7 @@ out:
 	return r;
 }
 
-static struct file_operations kvm_chardev_ops = {
+static file_operations_no_const kvm_chardev_ops __read_only = {
 	.unlocked_ioctl = kvm_dev_ioctl,
 	.compat_ioctl   = kvm_dev_ioctl,
 	.llseek		= noop_llseek,
@@ -2658,7 +2671,7 @@ static void hardware_enable_nolock(void
 
 	if (r) {
 		cpumask_clear_cpu(cpu, cpus_hardware_enabled);
-		atomic_inc(&hardware_enable_failed);
+		atomic_inc_unchecked(&hardware_enable_failed);
 		printk(KERN_INFO "kvm: enabling virtualization on "
 				 "CPU%d failed\n", cpu);
 	}
@@ -2714,10 +2727,10 @@ static int hardware_enable_all(void)
 
 	kvm_usage_count++;
 	if (kvm_usage_count == 1) {
-		atomic_set(&hardware_enable_failed, 0);
+		atomic_set_unchecked(&hardware_enable_failed, 0);
 		on_each_cpu(hardware_enable_nolock, NULL, 1);
 
-		if (atomic_read(&hardware_enable_failed)) {
+		if (atomic_read_unchecked(&hardware_enable_failed)) {
 			hardware_disable_all_nolock();
 			r = -EBUSY;
 		}
@@ -3148,7 +3161,7 @@ static void kvm_sched_out(struct preempt
 	kvm_arch_vcpu_put(vcpu);
 }
 
-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
 		  struct module *module)
 {
 	int r;
@@ -3195,7 +3208,7 @@ int kvm_init(void *opaque, unsigned vcpu
 	if (!vcpu_align)
 		vcpu_align = __alignof__(struct kvm_vcpu);
 	kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
-					   0, NULL);
+					   SLAB_USERCOPY, NULL);
 	if (!kvm_vcpu_cache) {
 		r = -ENOMEM;
 		goto out_free_3;
@@ -3205,9 +3218,11 @@ int kvm_init(void *opaque, unsigned vcpu
 	if (r)
 		goto out_free;
 
+	pax_open_kernel();
 	kvm_chardev_ops.owner = module;
 	kvm_vm_fops.owner = module;
 	kvm_vcpu_fops.owner = module;
+	pax_close_kernel();
 
 	r = misc_register(&kvm_dev);
 	if (r) {
@@ -3217,9 +3232,6 @@ int kvm_init(void *opaque, unsigned vcpu
 
 	register_syscore_ops(&kvm_syscore_ops);
 
-	kvm_preempt_ops.sched_in = kvm_sched_in;
-	kvm_preempt_ops.sched_out = kvm_sched_out;
-
 	r = kvm_init_debug();
 	if (r) {
 		printk(KERN_ERR "kvm: create debugfs files failed\n");