diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig | 15 | ||||
-rw-r--r-- | lib/Kconfig.debug | 1078 | ||||
-rw-r--r-- | lib/Kconfig.kgdb | 4 | ||||
-rw-r--r-- | lib/Makefile | 6 | ||||
-rw-r--r-- | lib/clz_ctz.c | 58 | ||||
-rw-r--r-- | lib/crc-t10dif.c | 73 | ||||
-rw-r--r-- | lib/decompress.c | 5 | ||||
-rw-r--r-- | lib/decompress_unlz4.c | 187 | ||||
-rw-r--r-- | lib/lz4/Makefile | 3 | ||||
-rw-r--r-- | lib/lz4/lz4_compress.c | 443 | ||||
-rw-r--r-- | lib/lz4/lz4_decompress.c | 326 | ||||
-rw-r--r-- | lib/lz4/lz4defs.h | 156 | ||||
-rw-r--r-- | lib/lz4/lz4hc_compress.c | 539 | ||||
-rw-r--r-- | lib/scatterlist.c | 133 | ||||
-rw-r--r-- | lib/vsprintf.c | 126 |
15 files changed, 2565 insertions, 587 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index 5a5203ded0dd..35da51359d40 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -66,6 +66,8 @@ config CRC16 config CRC_T10DIF tristate "CRC calculation for the T10 Data Integrity Field" + select CRYPTO + select CRYPTO_CRCT10DIF help This option is only needed if a module that's not in the kernel tree needs to calculate CRC checks for use with the @@ -192,6 +194,15 @@ config LZO_COMPRESS config LZO_DECOMPRESS tristate +config LZ4_COMPRESS + tristate + +config LZ4HC_COMPRESS + tristate + +config LZ4_DECOMPRESS + tristate + source "lib/xz/Kconfig" # @@ -216,6 +227,10 @@ config DECOMPRESS_LZO select LZO_DECOMPRESS tristate +config DECOMPRESS_LZ4 + select LZ4_DECOMPRESS + tristate + # # Generic allocator support is selected if needed # diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 7154f799541a..98ac17ed6222 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1,3 +1,4 @@ +menu "printk and dmesg options" config PRINTK_TIME bool "Show timing information on printks" @@ -25,6 +26,123 @@ config DEFAULT_MESSAGE_LOGLEVEL that are auditing their logs closely may want to set it to a lower priority. +config BOOT_PRINTK_DELAY + bool "Delay each boot printk message by N milliseconds" + depends on DEBUG_KERNEL && PRINTK && GENERIC_CALIBRATE_DELAY + help + This build option allows you to read kernel boot messages + by inserting a short delay after each one. The delay is + specified in milliseconds on the kernel command line, + using "boot_delay=N". + + It is likely that you would also need to use "lpj=M" to preset + the "loops per jiffie" value. + See a previous boot log for the "lpj" value to use for your + system, and then set "lpj=M" before setting "boot_delay=N". + NOTE: Using this option may adversely affect SMP systems. + I.e., processors other than the first one may not boot up. + BOOT_PRINTK_DELAY also may cause LOCKUP_DETECTOR to detect + what it believes to be lockup conditions. + +config DYNAMIC_DEBUG + bool "Enable dynamic printk() support" + default n + depends on PRINTK + depends on DEBUG_FS + help + + Compiles debug level messages into the kernel, which would not + otherwise be available at runtime. These messages can then be + enabled/disabled based on various levels of scope - per source file, + function, module, format string, and line number. This mechanism + implicitly compiles in all pr_debug() and dev_dbg() calls, which + enlarges the kernel text size by about 2%. + + If a source file is compiled with DEBUG flag set, any + pr_debug() calls in it are enabled by default, but can be + disabled at runtime as below. Note that DEBUG flag is + turned on by many CONFIG_*DEBUG* options. + + Usage: + + Dynamic debugging is controlled via the 'dynamic_debug/control' file, + which is contained in the 'debugfs' filesystem. Thus, the debugfs + filesystem must first be mounted before making use of this feature. + We refer the control file as: <debugfs>/dynamic_debug/control. This + file contains a list of the debug statements that can be enabled. The + format for each line of the file is: + + filename:lineno [module]function flags format + + filename : source file of the debug statement + lineno : line number of the debug statement + module : module that contains the debug statement + function : function that contains the debug statement + flags : '=p' means the line is turned 'on' for printing + format : the format used for the debug statement + + From a live system: + + nullarbor:~ # cat <debugfs>/dynamic_debug/control + # filename:lineno [module]function flags format + fs/aio.c:222 [aio]__put_ioctx =_ "__put_ioctx:\040freeing\040%p\012" + fs/aio.c:248 [aio]ioctx_alloc =_ "ENOMEM:\040nr_events\040too\040high\012" + fs/aio.c:1770 [aio]sys_io_cancel =_ "calling\040cancel\012" + + Example usage: + + // enable the message at line 1603 of file svcsock.c + nullarbor:~ # echo -n 'file svcsock.c line 1603 +p' > + <debugfs>/dynamic_debug/control + + // enable all the messages in file svcsock.c + nullarbor:~ # echo -n 'file svcsock.c +p' > + <debugfs>/dynamic_debug/control + + // enable all the messages in the NFS server module + nullarbor:~ # echo -n 'module nfsd +p' > + <debugfs>/dynamic_debug/control + + // enable all 12 messages in the function svc_process() + nullarbor:~ # echo -n 'func svc_process +p' > + <debugfs>/dynamic_debug/control + + // disable all 12 messages in the function svc_process() + nullarbor:~ # echo -n 'func svc_process -p' > + <debugfs>/dynamic_debug/control + + See Documentation/dynamic-debug-howto.txt for additional information. + +endmenu # "printk and dmesg options" + +menu "Compile-time checks and compiler options" + +config DEBUG_INFO + bool "Compile the kernel with debug info" + depends on DEBUG_KERNEL + help + If you say Y here the resulting kernel image will include + debugging info resulting in a larger kernel image. + This adds debug symbols to the kernel and modules (gcc -g), and + is needed if you intend to use kernel crashdump or binary object + tools like crash, kgdb, LKCD, gdb, etc on the kernel. + Say Y here only if you plan to debug the kernel. + + If unsure, say N. + +config DEBUG_INFO_REDUCED + bool "Reduce debugging information" + depends on DEBUG_INFO + help + If you say Y here gcc is instructed to generate less debugging + information for structure types. This means that tools that + need full debugging information (like kgdb or systemtap) won't + be happy. But if you merely need debugging information to + resolve line numbers there is no loss. Advantage is that + build directory object sizes shrink dramatically over a full + DEBUG_INFO build and compile times are reduced too. + Only works with newer gcc versions. + config ENABLE_WARN_DEPRECATED bool "Enable __deprecated logic" default y @@ -52,20 +170,6 @@ config FRAME_WARN Setting it to 0 disables the warning. Requires gcc 4.4 -config MAGIC_SYSRQ - bool "Magic SysRq key" - depends on !UML - help - If you say Y here, you will have some control over the system even - if the system crashes for example during kernel debugging (e.g., you - will be able to flush the buffer cache to disk, reboot the system - immediately or dump some status information). This is accomplished - by pressing various keys while holding SysRq (Alt+PrintScreen). It - also works on a serial console (on PC hardware at least), if you - send a BREAK and then within 5 seconds a command keypress. The - keys are documented in <file:Documentation/sysrq.txt>. Don't say Y - unless you really know what this hack does. - config STRIP_ASM_SYMS bool "Strip assembler-generated symbols during link" default n @@ -156,12 +260,341 @@ config DEBUG_SECTION_MISMATCH - Enable verbose reporting from modpost in order to help resolve the section mismatches that are reported. +# +# Select this config option from the architecture Kconfig, if it +# is preferred to always offer frame pointers as a config +# option on the architecture (regardless of KERNEL_DEBUG): +# +config ARCH_WANT_FRAME_POINTERS + bool + help + +config FRAME_POINTER + bool "Compile the kernel with frame pointers" + depends on DEBUG_KERNEL && \ + (CRIS || M68K || FRV || UML || \ + AVR32 || SUPERH || BLACKFIN || MN10300 || METAG) || \ + ARCH_WANT_FRAME_POINTERS + default y if (DEBUG_INFO && UML) || ARCH_WANT_FRAME_POINTERS + help + If you say Y here the resulting kernel image will be slightly + larger and slower, but it gives very useful debugging information + in case of kernel bugs. (precise oopses/stacktraces/warnings) + +config DEBUG_FORCE_WEAK_PER_CPU + bool "Force weak per-cpu definitions" + depends on DEBUG_KERNEL + help + s390 and alpha require percpu variables in modules to be + defined weak to work around addressing range issue which + puts the following two restrictions on percpu variable + definitions. + + 1. percpu symbols must be unique whether static or not + 2. percpu variables can't be defined inside a function + + To ensure that generic code follows the above rules, this + option forces all percpu variables to be defined as weak. + +endmenu # "Compiler options" + +config MAGIC_SYSRQ + bool "Magic SysRq key" + depends on !UML + help + If you say Y here, you will have some control over the system even + if the system crashes for example during kernel debugging (e.g., you + will be able to flush the buffer cache to disk, reboot the system + immediately or dump some status information). This is accomplished + by pressing various keys while holding SysRq (Alt+PrintScreen). It + also works on a serial console (on PC hardware at least), if you + send a BREAK and then within 5 seconds a command keypress. The + keys are documented in <file:Documentation/sysrq.txt>. Don't say Y + unless you really know what this hack does. + config DEBUG_KERNEL bool "Kernel debugging" help Say Y here if you are developing drivers or trying to debug and identify kernel problems. +menu "Memory Debugging" + +source mm/Kconfig.debug + +config DEBUG_OBJECTS + bool "Debug object operations" + depends on DEBUG_KERNEL + help + If you say Y here, additional code will be inserted into the + kernel to track the life time of various objects and validate + the operations on those objects. + +config DEBUG_OBJECTS_SELFTEST + bool "Debug objects selftest" + depends on DEBUG_OBJECTS + help + This enables the selftest of the object debug code. + +config DEBUG_OBJECTS_FREE + bool "Debug objects in freed memory" + depends on DEBUG_OBJECTS + help + This enables checks whether a k/v free operation frees an area + which contains an object which has not been deactivated + properly. This can make kmalloc/kfree-intensive workloads + much slower. + +config DEBUG_OBJECTS_TIMERS + bool "Debug timer objects" + depends on DEBUG_OBJECTS + help + If you say Y here, additional code will be inserted into the + timer routines to track the life time of timer objects and + validate the timer operations. + +config DEBUG_OBJECTS_WORK + bool "Debug work objects" + depends on DEBUG_OBJECTS + help + If you say Y here, additional code will be inserted into the + work queue routines to track the life time of work objects and + validate the work operations. + +config DEBUG_OBJECTS_RCU_HEAD + bool "Debug RCU callbacks objects" + depends on DEBUG_OBJECTS + help + Enable this to turn on debugging of RCU list heads (call_rcu() usage). + +config DEBUG_OBJECTS_PERCPU_COUNTER + bool "Debug percpu counter objects" + depends on DEBUG_OBJECTS + help + If you say Y here, additional code will be inserted into the + percpu counter routines to track the life time of percpu counter + objects and validate the percpu counter operations. + +config DEBUG_OBJECTS_ENABLE_DEFAULT + int "debug_objects bootup default value (0-1)" + range 0 1 + default "1" + depends on DEBUG_OBJECTS + help + Debug objects boot parameter default value + +config DEBUG_SLAB + bool "Debug slab memory allocations" + depends on DEBUG_KERNEL && SLAB && !KMEMCHECK + help + Say Y here to have the kernel do limited verification on memory + allocation as well as poisoning memory on free to catch use of freed + memory. This can make kmalloc/kfree-intensive workloads much slower. + +config DEBUG_SLAB_LEAK + bool "Memory leak debugging" + depends on DEBUG_SLAB + +config SLUB_DEBUG_ON + bool "SLUB debugging on by default" + depends on SLUB && SLUB_DEBUG && !KMEMCHECK + default n + help + Boot with debugging on by default. SLUB boots by default with + the runtime debug capabilities switched off. Enabling this is + equivalent to specifying the "slub_debug" parameter on boot. + There is no support for more fine grained debug control like + possible with slub_debug=xxx. SLUB debugging may be switched + off in a kernel built with CONFIG_SLUB_DEBUG_ON by specifying + "slub_debug=-". + +config SLUB_STATS + default n + bool "Enable SLUB performance statistics" + depends on SLUB && SYSFS + help + SLUB statistics are useful to debug SLUBs allocation behavior in + order find ways to optimize the allocator. This should never be + enabled for production use since keeping statistics slows down + the allocator by a few percentage points. The slabinfo command + supports the determination of the most active slabs to figure + out which slabs are relevant to a particular load. + Try running: slabinfo -DA + +config HAVE_DEBUG_KMEMLEAK + bool + +config DEBUG_KMEMLEAK + bool "Kernel memory leak detector" + depends on DEBUG_KERNEL && HAVE_DEBUG_KMEMLEAK + select DEBUG_FS + select STACKTRACE if STACKTRACE_SUPPORT + select KALLSYMS + select CRC32 + help + Say Y here if you want to enable the memory leak + detector. The memory allocation/freeing is traced in a way + similar to the Boehm's conservative garbage collector, the + difference being that the orphan objects are not freed but + only shown in /sys/kernel/debug/kmemleak. Enabling this + feature will introduce an overhead to memory + allocations. See Documentation/kmemleak.txt for more + details. + + Enabling DEBUG_SLAB or SLUB_DEBUG may increase the chances + of finding leaks due to the slab objects poisoning. + + In order to access the kmemleak file, debugfs needs to be + mounted (usually at /sys/kernel/debug). + +config DEBUG_KMEMLEAK_EARLY_LOG_SIZE + int "Maximum kmemleak early log entries" + depends on DEBUG_KMEMLEAK + range 200 40000 + default 400 + help + Kmemleak must track all the memory allocations to avoid + reporting false positives. Since memory may be allocated or + freed before kmemleak is initialised, an early log buffer is + used to store these actions. If kmemleak reports "early log + buffer exceeded", please increase this value. + +config DEBUG_KMEMLEAK_TEST + tristate "Simple test for the kernel memory leak detector" + depends on DEBUG_KMEMLEAK && m + help + This option enables a module that explicitly leaks memory. + + If unsure, say N. + +config DEBUG_KMEMLEAK_DEFAULT_OFF + bool "Default kmemleak to off" + depends on DEBUG_KMEMLEAK + help + Say Y here to disable kmemleak by default. It can then be enabled + on the command line via kmemleak=on. + +config DEBUG_STACK_USAGE + bool "Stack utilization instrumentation" + depends on DEBUG_KERNEL && !IA64 && !PARISC && !METAG + help + Enables the display of the minimum amount of free stack which each + task has ever had available in the sysrq-T and sysrq-P debug output. + + This option will slow down process creation somewhat. + +config DEBUG_VM + bool "Debug VM" + depends on DEBUG_KERNEL + help + Enable this to turn on extended checks in the virtual-memory system + that may impact performance. + + If unsure, say N. + +config DEBUG_VM_RB + bool "Debug VM red-black trees" + depends on DEBUG_VM + help + Enable this to turn on more extended checks in the virtual-memory + system that may impact performance. + + If unsure, say N. + +config DEBUG_VIRTUAL + bool "Debug VM translations" + depends on DEBUG_KERNEL && X86 + help + Enable some costly sanity checks in virtual to page code. This can + catch mistakes with virt_to_page() and friends. + + If unsure, say N. + +config DEBUG_NOMMU_REGIONS + bool "Debug the global anon/private NOMMU mapping region tree" + depends on DEBUG_KERNEL && !MMU + help + This option causes the global tree of anonymous and private mapping + regions to be regularly checked for invalid topology. + +config DEBUG_MEMORY_INIT + bool "Debug memory initialisation" if EXPERT + default !EXPERT + help + Enable this for additional checks during memory initialisation. + The sanity checks verify aspects of the VM such as the memory model + and other information provided by the architecture. Verbose + information will be printed at KERN_DEBUG loglevel depending + on the mminit_loglevel= command-line option. + + If unsure, say Y + +config MEMORY_NOTIFIER_ERROR_INJECT + tristate "Memory hotplug notifier error injection module" + depends on MEMORY_HOTPLUG_SPARSE && NOTIFIER_ERROR_INJECTION + help + This option provides the ability to inject artificial errors to + memory hotplug notifier chain callbacks. It is controlled through + debugfs interface under /sys/kernel/debug/notifier-error-inject/memory + + If the notifier call chain should be failed with some events + notified, write the error code to "actions/<notifier event>/error". + + Example: Inject memory hotplug offline error (-12 == -ENOMEM) + + # cd /sys/kernel/debug/notifier-error-inject/memory + # echo -12 > actions/MEM_GOING_OFFLINE/error + # echo offline > /sys/devices/system/memory/memoryXXX/state + bash: echo: write error: Cannot allocate memory + + To compile this code as a module, choose M here: the module will + be called memory-notifier-error-inject. + + If unsure, say N. + +config DEBUG_PER_CPU_MAPS + bool "Debug access to per_cpu maps" + depends on DEBUG_KERNEL + depends on SMP + help + Say Y to verify that the per_cpu map being accessed has + been set up. This adds a fair amount of code to kernel memory + and decreases performance. + + Say N if unsure. + +config DEBUG_HIGHMEM + bool "Highmem debugging" + depends on DEBUG_KERNEL && HIGHMEM + help + This options enables addition error checking for high memory systems. + Disable for production systems. + +config HAVE_DEBUG_STACKOVERFLOW + bool + +config DEBUG_STACKOVERFLOW + bool "Check for stack overflows" + depends on DEBUG_KERNEL && HAVE_DEBUG_STACKOVERFLOW + ---help--- + Say Y here if you want to check for overflows of kernel, IRQ + and exception stacks (if your archicture uses them). This + option will show detailed messages if free stack space drops + below a certain limit. + + These kinds of bugs usually occur when call-chains in the + kernel get too deep, especially when interrupts are + involved. + + Use this in cases where you see apparently random memory + corruption, especially if it appears in 'struct thread_info' + + If in doubt, say "N". + +source "lib/Kconfig.kmemcheck" + +endmenu # "Memory Debugging" + config DEBUG_SHIRQ bool "Debug shared IRQ handlers" depends on DEBUG_KERNEL && GENERIC_HARDIRQS @@ -171,6 +604,8 @@ config DEBUG_SHIRQ Drivers ought to be able to handle interrupts coming in at those points; some don't and need to be caught. +menu "Debug Lockups and Hangs" + config LOCKUP_DETECTOR bool "Detect Hard and Soft Lockups" depends on DEBUG_KERNEL && !S390 @@ -242,25 +677,6 @@ config BOOTPARAM_SOFTLOCKUP_PANIC_VALUE default 0 if !BOOTPARAM_SOFTLOCKUP_PANIC default 1 if BOOTPARAM_SOFTLOCKUP_PANIC -config PANIC_ON_OOPS - bool "Panic on Oops" - help - Say Y here to enable the kernel to panic when it oopses. This - has the same effect as setting oops=panic on the kernel command - line. - - This feature is useful to ensure that the kernel does not do - anything erroneous after an oops which could result in data - corruption or other issues. - - Say N if unsure. - -config PANIC_ON_OOPS_VALUE - int - range 0 1 - default 0 if !PANIC_ON_OOPS - default 1 if PANIC_ON_OOPS - config DETECT_HUNG_TASK bool "Detect Hung Tasks" depends on DEBUG_KERNEL @@ -315,6 +731,27 @@ config BOOTPARAM_HUNG_TASK_PANIC_VALUE default 0 if !BOOTPARAM_HUNG_TASK_PANIC default 1 if BOOTPARAM_HUNG_TASK_PANIC +endmenu # "Debug lockups and hangs" + +config PANIC_ON_OOPS + bool "Panic on Oops" + help + Say Y here to enable the kernel to panic when it oopses. This + has the same effect as setting oops=panic on the kernel command + line. + + This feature is useful to ensure that the kernel does not do + anything erroneous after an oops which could result in data + corruption or other issues. + + Say N if unsure. + +config PANIC_ON_OOPS_VALUE + int + range 0 1 + default 0 if !PANIC_ON_OOPS + default 1 if PANIC_ON_OOPS + config SCHED_DEBUG bool "Collect scheduler debugging info" depends on DEBUG_KERNEL && PROC_FS @@ -350,158 +787,6 @@ config TIMER_STATS (it defaults to deactivated on bootup and will only be activated if some application like powertop activates it explicitly). -config DEBUG_OBJECTS - bool "Debug object operations" - depends on DEBUG_KERNEL - help - If you say Y here, additional code will be inserted into the - kernel to track the life time of various objects and validate - the operations on those objects. - -config DEBUG_OBJECTS_SELFTEST - bool "Debug objects selftest" - depends on DEBUG_OBJECTS - help - This enables the selftest of the object debug code. - -config DEBUG_OBJECTS_FREE - bool "Debug objects in freed memory" - depends on DEBUG_OBJECTS - help - This enables checks whether a k/v free operation frees an area - which contains an object which has not been deactivated - properly. This can make kmalloc/kfree-intensive workloads - much slower. - -config DEBUG_OBJECTS_TIMERS - bool "Debug timer objects" - depends on DEBUG_OBJECTS - help - If you say Y here, additional code will be inserted into the - timer routines to track the life time of timer objects and - validate the timer operations. - -config DEBUG_OBJECTS_WORK - bool "Debug work objects" - depends on DEBUG_OBJECTS - help - If you say Y here, additional code will be inserted into the - work queue routines to track the life time of work objects and - validate the work operations. - -config DEBUG_OBJECTS_RCU_HEAD - bool "Debug RCU callbacks objects" - depends on DEBUG_OBJECTS - help - Enable this to turn on debugging of RCU list heads (call_rcu() usage). - -config DEBUG_OBJECTS_PERCPU_COUNTER - bool "Debug percpu counter objects" - depends on DEBUG_OBJECTS - help - If you say Y here, additional code will be inserted into the - percpu counter routines to track the life time of percpu counter - objects and validate the percpu counter operations. - -config DEBUG_OBJECTS_ENABLE_DEFAULT - int "debug_objects bootup default value (0-1)" - range 0 1 - default "1" - depends on DEBUG_OBJECTS - help - Debug objects boot parameter default value - -config DEBUG_SLAB - bool "Debug slab memory allocations" - depends on DEBUG_KERNEL && SLAB && !KMEMCHECK - help - Say Y here to have the kernel do limited verification on memory - allocation as well as poisoning memory on free to catch use of freed - memory. This can make kmalloc/kfree-intensive workloads much slower. - -config DEBUG_SLAB_LEAK - bool "Memory leak debugging" - depends on DEBUG_SLAB - -config SLUB_DEBUG_ON - bool "SLUB debugging on by default" - depends on SLUB && SLUB_DEBUG && !KMEMCHECK - default n - help - Boot with debugging on by default. SLUB boots by default with - the runtime debug capabilities switched off. Enabling this is - equivalent to specifying the "slub_debug" parameter on boot. - There is no support for more fine grained debug control like - possible with slub_debug=xxx. SLUB debugging may be switched - off in a kernel built with CONFIG_SLUB_DEBUG_ON by specifying - "slub_debug=-". - -config SLUB_STATS - default n - bool "Enable SLUB performance statistics" - depends on SLUB && SYSFS - help - SLUB statistics are useful to debug SLUBs allocation behavior in - order find ways to optimize the allocator. This should never be - enabled for production use since keeping statistics slows down - the allocator by a few percentage points. The slabinfo command - supports the determination of the most active slabs to figure - out which slabs are relevant to a particular load. - Try running: slabinfo -DA - -config HAVE_DEBUG_KMEMLEAK - bool - -config DEBUG_KMEMLEAK - bool "Kernel memory leak detector" - depends on DEBUG_KERNEL && HAVE_DEBUG_KMEMLEAK - select DEBUG_FS - select STACKTRACE if STACKTRACE_SUPPORT - select KALLSYMS - select CRC32 - help - Say Y here if you want to enable the memory leak - detector. The memory allocation/freeing is traced in a way - similar to the Boehm's conservative garbage collector, the - difference being that the orphan objects are not freed but - only shown in /sys/kernel/debug/kmemleak. Enabling this - feature will introduce an overhead to memory - allocations. See Documentation/kmemleak.txt for more - details. - - Enabling DEBUG_SLAB or SLUB_DEBUG may increase the chances - of finding leaks due to the slab objects poisoning. - - In order to access the kmemleak file, debugfs needs to be - mounted (usually at /sys/kernel/debug). - -config DEBUG_KMEMLEAK_EARLY_LOG_SIZE - int "Maximum kmemleak early log entries" - depends on DEBUG_KMEMLEAK - range 200 40000 - default 400 - help - Kmemleak must track all the memory allocations to avoid - reporting false positives. Since memory may be allocated or - freed before kmemleak is initialised, an early log buffer is - used to store these actions. If kmemleak reports "early log - buffer exceeded", please increase this value. - -config DEBUG_KMEMLEAK_TEST - tristate "Simple test for the kernel memory leak detector" - depends on DEBUG_KMEMLEAK && m - help - This option enables a module that explicitly leaks memory. - - If unsure, say N. - -config DEBUG_KMEMLEAK_DEFAULT_OFF - bool "Default kmemleak to off" - depends on DEBUG_KMEMLEAK - help - Say Y here to disable kmemleak by default. It can then be enabled - on the command line via kmemleak=on. - config DEBUG_PREEMPT bool "Debug preemptible kernel" depends on DEBUG_KERNEL && PREEMPT && TRACE_IRQFLAGS_SUPPORT @@ -512,6 +797,8 @@ config DEBUG_PREEMPT if kernel code uses it in a preemption-unsafe way. Also, the kernel will detect preemption count underflows. +menu "Lock Debugging (spinlocks, mutexes, etc...)" + config DEBUG_RT_MUTEXES bool "RT Mutex debugging, deadlock detection" depends on DEBUG_KERNEL && RT_MUTEXES @@ -654,12 +941,6 @@ config DEBUG_LOCKDEP additional runtime checks to debug itself, at the price of more runtime overhead. -config TRACE_IRQFLAGS - bool - help - Enables hooks to interrupt enabling and disabling for - either tracing or lock debugging. - config DEBUG_ATOMIC_SLEEP bool "Sleep inside atomic section checking" select PREEMPT_COUNT @@ -681,18 +962,17 @@ config DEBUG_LOCKING_API_SELFTESTS The following locking APIs are covered: spinlocks, rwlocks, mutexes and rwsems. -config STACKTRACE - bool - depends on STACKTRACE_SUPPORT +endmenu # lock debugging -config DEBUG_STACK_USAGE - bool "Stack utilization instrumentation" - depends on DEBUG_KERNEL && !IA64 && !PARISC && !METAG +config TRACE_IRQFLAGS + bool help - Enables the display of the minimum amount of free stack which each - task has ever had available in the sysrq-T and sysrq-P debug output. + Enables hooks to interrupt enabling and disabling for + either tracing or lock debugging. - This option will slow down process creation somewhat. +config STACKTRACE + bool + depends on STACKTRACE_SUPPORT config DEBUG_KOBJECT bool "kobject debugging" @@ -701,13 +981,6 @@ config DEBUG_KOBJECT If you say Y here, some extra kobject debugging messages will be sent to the syslog. -config DEBUG_HIGHMEM - bool "Highmem debugging" - depends on DEBUG_KERNEL && HIGHMEM - help - This options enables addition error checking for high memory systems. - Disable for production systems. - config HAVE_DEBUG_BUGVERBOSE bool @@ -720,66 +993,6 @@ config DEBUG_BUGVERBOSE of the BUG call as well as the EIP and oops trace. This aids debugging but costs about 70-100K of memory. -config DEBUG_INFO - bool "Compile the kernel with debug info" - depends on DEBUG_KERNEL - help - If you say Y here the resulting kernel image will include - debugging info resulting in a larger kernel image. - This adds debug symbols to the kernel and modules (gcc -g), and - is needed if you intend to use kernel crashdump or binary object - tools like crash, kgdb, LKCD, gdb, etc on the kernel. - Say Y here only if you plan to debug the kernel. - - If unsure, say N. - -config DEBUG_INFO_REDUCED - bool "Reduce debugging information" - depends on DEBUG_INFO - help - If you say Y here gcc is instructed to generate less debugging - information for structure types. This means that tools that - need full debugging information (like kgdb or systemtap) won't - be happy. But if you merely need debugging information to - resolve line numbers there is no loss. Advantage is that - build directory object sizes shrink dramatically over a full - DEBUG_INFO build and compile times are reduced too. - Only works with newer gcc versions. - -config DEBUG_VM - bool "Debug VM" - depends on DEBUG_KERNEL - help - Enable this to turn on extended checks in the virtual-memory system - that may impact performance. - - If unsure, say N. - -config DEBUG_VM_RB - bool "Debug VM red-black trees" - depends on DEBUG_VM - help - Enable this to turn on more extended checks in the virtual-memory - system that may impact performance. - - If unsure, say N. - -config DEBUG_VIRTUAL - bool "Debug VM translations" - depends on DEBUG_KERNEL && X86 - help - Enable some costly sanity checks in virtual to page code. This can - catch mistakes with virt_to_page() and friends. - - If unsure, say N. - -config DEBUG_NOMMU_REGIONS - bool "Debug the global anon/private NOMMU mapping region tree" - depends on DEBUG_KERNEL && !MMU - help - This option causes the global tree of anonymous and private mapping - regions to be regularly checked for invalid topology. - config DEBUG_WRITECOUNT bool "Debug filesystem writers count" depends on DEBUG_KERNEL @@ -790,18 +1003,6 @@ config DEBUG_WRITECOUNT If unsure, say N. -config DEBUG_MEMORY_INIT - bool "Debug memory initialisation" if EXPERT - default !EXPERT - help - Enable this for additional checks during memory initialisation. - The sanity checks verify aspects of the VM such as the memory model - and other information provided by the architecture. Verbose - information will be printed at KERN_DEBUG loglevel depending - on the mminit_loglevel= command-line option. - - If unsure, say Y - config DEBUG_LIST bool "Debug linked list manipulation" depends on DEBUG_KERNEL @@ -811,15 +1012,6 @@ config DEBUG_LIST If unsure, say N. -config TEST_LIST_SORT - bool "Linked list sorting test" - depends on DEBUG_KERNEL - help - Enable this to turn on 'list_sort()' function test. This test is - executed only once during system boot, so affects only boot time. - - If unsure, say N. - config DEBUG_SG bool "Debug SG table operations" depends on DEBUG_KERNEL @@ -855,45 +1047,6 @@ config DEBUG_CREDENTIALS If unsure, say N. -# -# Select this config option from the architecture Kconfig, if it -# is preferred to always offer frame pointers as a config -# option on the architecture (regardless of KERNEL_DEBUG): -# -config ARCH_WANT_FRAME_POINTERS - bool - help - -config FRAME_POINTER - bool "Compile the kernel with frame pointers" - depends on DEBUG_KERNEL && \ - (CRIS || M68K || FRV || UML || \ - AVR32 || SUPERH || BLACKFIN || MN10300 || METAG) || \ - ARCH_WANT_FRAME_POINTERS - default y if (DEBUG_INFO && UML) || ARCH_WANT_FRAME_POINTERS - help - If you say Y here the resulting kernel image will be slightly - larger and slower, but it gives very useful debugging information - in case of kernel bugs. (precise oopses/stacktraces/warnings) - -config BOOT_PRINTK_DELAY - bool "Delay each boot printk message by N milliseconds" - depends on DEBUG_KERNEL && PRINTK && GENERIC_CALIBRATE_DELAY - help - This build option allows you to read kernel boot messages - by inserting a short delay after each one. The delay is - specified in milliseconds on the kernel command line, - using "boot_delay=N". - - It is likely that you would also need to use "lpj=M" to preset - the "loops per jiffie" value. - See a previous boot log for the "lpj" value to use for your - system, and then set "lpj=M" before setting "boot_delay=N". - NOTE: Using this option may adversely affect SMP systems. - I.e., processors other than the first one may not boot up. - BOOT_PRINTK_DELAY also may cause LOCKUP_DETECTOR to detect - what it believes to be lockup conditions. - menu "RCU Debugging" config PROVE_RCU @@ -1032,33 +1185,6 @@ config RCU_TRACE endmenu # "RCU Debugging" -config KPROBES_SANITY_TEST - bool "Kprobes sanity tests" - depends on DEBUG_KERNEL - depends on KPROBES - default n - help - This option provides for testing basic kprobes functionality on - boot. A sample kprobe, jprobe and kretprobe are inserted and - verified for functionality. - - Say N if you are unsure. - -config BACKTRACE_SELF_TEST - tristate "Self test for the backtrace code" - depends on DEBUG_KERNEL - default n - help - This option provides a kernel module that can be used to test - the kernel stack backtrace code. This option is not useful - for distributions or general kernels, but only for kernel - developers working on architecture code. - - Note that if you want to also test saved backtraces, you will - have to enable STACKTRACE as well. - - Say N if you are unsure. - config DEBUG_BLOCK_EXT_DEVT bool "Force extended block device numbers and spread them" depends on DEBUG_KERNEL @@ -1086,47 +1212,6 @@ config DEBUG_BLOCK_EXT_DEVT Say N if you are unsure. -config DEBUG_FORCE_WEAK_PER_CPU - bool "Force weak per-cpu definitions" - depends on DEBUG_KERNEL - help - s390 and alpha require percpu variables in modules to be - defined weak to work around addressing range issue which - puts the following two restrictions on percpu variable - definitions. - - 1. percpu symbols must be unique whether static or not - 2. percpu variables can't be defined inside a function - - To ensure that generic code follows the above rules, this - option forces all percpu variables to be defined as weak. - -config DEBUG_PER_CPU_MAPS - bool "Debug access to per_cpu maps" - depends on DEBUG_KERNEL - depends on SMP - help - Say Y to verify that the per_cpu map being accessed has - been set up. This adds a fair amount of code to kernel memory - and decreases performance. - - Say N if unsure. - -config LKDTM - tristate "Linux Kernel Dump Test Tool Module" - depends on DEBUG_FS - depends on BLOCK - default n - help - This module enables testing of the different dumping mechanisms by - inducing system failures at predefined crash points. - If you don't need it: say N - Choose M here to compile this code as a module. The module will be - called lkdtm. - - Documentation on how to use the module can be found in - Documentation/fault-injection/provoke-crashes.txt - config NOTIFIER_ERROR_INJECTION tristate "Notifier error injection" depends on DEBUG_KERNEL @@ -1186,29 +1271,6 @@ config PM_NOTIFIER_ERROR_INJECT If unsure, say N. -config MEMORY_NOTIFIER_ERROR_INJECT - tristate "Memory hotplug notifier error injection module" - depends on MEMORY_HOTPLUG_SPARSE && NOTIFIER_ERROR_INJECTION - help - This option provides the ability to inject artificial errors to - memory hotplug notifier chain callbacks. It is controlled through - debugfs interface under /sys/kernel/debug/notifier-error-inject/memory - - If the notifier call chain should be failed with some events - notified, write the error code to "actions/<notifier event>/error". - - Example: Inject memory hotplug offline error (-12 == -ENOMEM) - - # cd /sys/kernel/debug/notifier-error-inject/memory - # echo -12 > actions/MEM_GOING_OFFLINE/error - # echo offline > /sys/devices/system/memory/memoryXXX/state - bash: echo: write error: Cannot allocate memory - - To compile this code as a module, choose M here: the module will - be called memory-notifier-error-inject. - - If unsure, say N. - config OF_RECONFIG_NOTIFIER_ERROR_INJECT tristate "OF reconfig notifier error injection module" depends on OF_DYNAMIC && NOTIFIER_ERROR_INJECTION @@ -1285,7 +1347,7 @@ config FAULT_INJECTION_STACKTRACE_FILTER depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT depends on !X86_64 select STACKTRACE - select FRAME_POINTER if !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND + select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND help Provide stacktrace filter for fault-injection capabilities @@ -1323,9 +1385,61 @@ config DEBUG_STRICT_USER_COPY_CHECKS If unsure, say N. -source mm/Kconfig.debug source kernel/trace/Kconfig +menu "Runtime Testing" + +config LKDTM + tristate "Linux Kernel Dump Test Tool Module" + depends on DEBUG_FS + depends on BLOCK + default n + help + This module enables testing of the different dumping mechanisms by + inducing system failures at predefined crash points. + If you don't need it: say N + Choose M here to compile this code as a module. The module will be + called lkdtm. + + Documentation on how to use the module can be found in + Documentation/fault-injection/provoke-crashes.txt + +config TEST_LIST_SORT + bool "Linked list sorting test" + depends on DEBUG_KERNEL + help + Enable this to turn on 'list_sort()' function test. This test is + executed only once during system boot, so affects only boot time. + + If unsure, say N. + +config KPROBES_SANITY_TEST + bool "Kprobes sanity tests" + depends on DEBUG_KERNEL + depends on KPROBES + default n + help + This option provides for testing basic kprobes functionality on + boot. A sample kprobe, jprobe and kretprobe are inserted and + verified for functionality. + + Say N if you are unsure. + +config BACKTRACE_SELF_TEST + tristate "Self test for the backtrace code" + depends on DEBUG_KERNEL + default n + help + This option provides a kernel module that can be used to test + the kernel stack backtrace code. This option is not useful + for distributions or general kernels, but only for kernel + developers working on architecture code. + + Note that if you want to also test saved backtraces, you will + have to enable STACKTRACE as well. + + Say N if you are unsure. + config RBTREE_TEST tristate "Red-Black tree test" depends on m && DEBUG_KERNEL @@ -1339,6 +1453,34 @@ config INTERVAL_TREE_TEST help A benchmark measuring the performance of the interval tree library +config ATOMIC64_SELFTEST + bool "Perform an atomic64_t self-test at boot" + help + Enable this option to test the atomic64_t functions at boot. + + If unsure, say N. + +config ASYNC_RAID6_TEST + tristate "Self test for hardware accelerated raid6 recovery" + depends on ASYNC_RAID6_RECOV + select ASYNC_MEMCPY + ---help--- + This is a one-shot self test that permutes through the + recovery of all the possible two disk failure scenarios for a + N-disk array. Recovery is performed with the asynchronous + raid6 recovery routines, and will optionally use an offload + engine if one is available. + + If unsure, say N. + +config TEST_STRING_HELPERS + tristate "Test functions located in the string_helpers module at runtime" + +config TEST_KSTRTOX + tristate "Test kstrto*() family of functions at runtime" + +endmenu # runtime tests + config PROVIDE_OHCI1394_DMA_INIT bool "Remote debugging over FireWire early on boot" depends on PCI && X86 @@ -1388,75 +1530,6 @@ config BUILD_DOCSRC Say N if you are unsure. -config DYNAMIC_DEBUG - bool "Enable dynamic printk() support" - default n - depends on PRINTK - depends on DEBUG_FS - help - - Compiles debug level messages into the kernel, which would not - otherwise be available at runtime. These messages can then be - enabled/disabled based on various levels of scope - per source file, - function, module, format string, and line number. This mechanism - implicitly compiles in all pr_debug() and dev_dbg() calls, which - enlarges the kernel text size by about 2%. - - If a source file is compiled with DEBUG flag set, any - pr_debug() calls in it are enabled by default, but can be - disabled at runtime as below. Note that DEBUG flag is - turned on by many CONFIG_*DEBUG* options. - - Usage: - - Dynamic debugging is controlled via the 'dynamic_debug/control' file, - which is contained in the 'debugfs' filesystem. Thus, the debugfs - filesystem must first be mounted before making use of this feature. - We refer the control file as: <debugfs>/dynamic_debug/control. This - file contains a list of the debug statements that can be enabled. The - format for each line of the file is: - - filename:lineno [module]function flags format - - filename : source file of the debug statement - lineno : line number of the debug statement - module : module that contains the debug statement - function : function that contains the debug statement - flags : '=p' means the line is turned 'on' for printing - format : the format used for the debug statement - - From a live system: - - nullarbor:~ # cat <debugfs>/dynamic_debug/control - # filename:lineno [module]function flags format - fs/aio.c:222 [aio]__put_ioctx =_ "__put_ioctx:\040freeing\040%p\012" - fs/aio.c:248 [aio]ioctx_alloc =_ "ENOMEM:\040nr_events\040too\040high\012" - fs/aio.c:1770 [aio]sys_io_cancel =_ "calling\040cancel\012" - - Example usage: - - // enable the message at line 1603 of file svcsock.c - nullarbor:~ # echo -n 'file svcsock.c line 1603 +p' > - <debugfs>/dynamic_debug/control - - // enable all the messages in file svcsock.c - nullarbor:~ # echo -n 'file svcsock.c +p' > - <debugfs>/dynamic_debug/control - - // enable all the messages in the NFS server module - nullarbor:~ # echo -n 'module nfsd +p' > - <debugfs>/dynamic_debug/control - - // enable all 12 messages in the function svc_process() - nullarbor:~ # echo -n 'func svc_process +p' > - <debugfs>/dynamic_debug/control - - // disable all 12 messages in the function svc_process() - nullarbor:~ # echo -n 'func svc_process -p' > - <debugfs>/dynamic_debug/control - - See Documentation/dynamic-debug-howto.txt for additional information. - config DMA_API_DEBUG bool "Enable debugging of DMA-API usage" depends on HAVE_DMA_API_DEBUG @@ -1468,34 +1541,7 @@ config DMA_API_DEBUG This option causes a performance degredation. Use only if you want to debug device drivers. If unsure, say N. -config ATOMIC64_SELFTEST - bool "Perform an atomic64_t self-test at boot" - help - Enable this option to test the atomic64_t functions at boot. - - If unsure, say N. - -config ASYNC_RAID6_TEST - tristate "Self test for hardware accelerated raid6 recovery" - depends on ASYNC_RAID6_RECOV - select ASYNC_MEMCPY - ---help--- - This is a one-shot self test that permutes through the - recovery of all the possible two disk failure scenarios for a - N-disk array. Recovery is performed with the asynchronous - raid6 recovery routines, and will optionally use an offload - engine if one is available. - - If unsure, say N. - source "samples/Kconfig" source "lib/Kconfig.kgdb" -source "lib/Kconfig.kmemcheck" - -config TEST_STRING_HELPERS - tristate "Test functions located in the string_helpers module at runtime" - -config TEST_KSTRTOX - tristate "Test kstrto*() family of functions at runtime" diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb index 140e87824173..358eb81fa28d 100644 --- a/lib/Kconfig.kgdb +++ b/lib/Kconfig.kgdb @@ -64,8 +64,8 @@ config KGDB_LOW_LEVEL_TRAP default n help This will add an extra call back to kgdb for the breakpoint - exception handler on which will will allow kgdb to step - through a notify handler. + exception handler which will allow kgdb to step through a + notify handler. config KGDB_KDB bool "KGDB_KDB: include kdb frontend for kgdb" diff --git a/lib/Makefile b/lib/Makefile index c09e38eca87a..7baccfd8a4e9 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -23,7 +23,7 @@ lib-y += kobject.o klist.o obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ - gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o \ + gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \ bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o obj-y += string_helpers.o obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o @@ -75,6 +75,9 @@ obj-$(CONFIG_REED_SOLOMON) += reed_solomon/ obj-$(CONFIG_BCH) += bch.o obj-$(CONFIG_LZO_COMPRESS) += lzo/ obj-$(CONFIG_LZO_DECOMPRESS) += lzo/ +obj-$(CONFIG_LZ4_COMPRESS) += lz4/ +obj-$(CONFIG_LZ4HC_COMPRESS) += lz4/ +obj-$(CONFIG_LZ4_DECOMPRESS) += lz4/ obj-$(CONFIG_XZ_DEC) += xz/ obj-$(CONFIG_RAID6_PQ) += raid6/ @@ -83,6 +86,7 @@ lib-$(CONFIG_DECOMPRESS_BZIP2) += decompress_bunzip2.o lib-$(CONFIG_DECOMPRESS_LZMA) += decompress_unlzma.o lib-$(CONFIG_DECOMPRESS_XZ) += decompress_unxz.o lib-$(CONFIG_DECOMPRESS_LZO) += decompress_unlzo.o +lib-$(CONFIG_DECOMPRESS_LZ4) += decompress_unlz4.o obj-$(CONFIG_TEXTSEARCH) += textsearch.o obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o diff --git a/lib/clz_ctz.c b/lib/clz_ctz.c new file mode 100644 index 000000000000..a8f8379eb49f --- /dev/null +++ b/lib/clz_ctz.c @@ -0,0 +1,58 @@ +/* + * lib/clz_ctz.c + * + * Copyright (C) 2013 Chanho Min <chanho.min@lge.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * __c[lt]z[sd]i2 can be overridden by linking arch-specific versions. + */ + +#include <linux/export.h> +#include <linux/kernel.h> + +int __weak __ctzsi2(int val) +{ + return __ffs(val); +} +EXPORT_SYMBOL(__ctzsi2); + +int __weak __clzsi2(int val) +{ + return 32 - fls(val); +} +EXPORT_SYMBOL(__clzsi2); + +#if BITS_PER_LONG == 32 + +int __weak __clzdi2(long val) +{ + return 32 - fls((int)val); +} +EXPORT_SYMBOL(__clzdi2); + +int __weak __ctzdi2(long val) +{ + return __ffs((u32)val); +} +EXPORT_SYMBOL(__ctzdi2); + +#elif BITS_PER_LONG == 64 + +int __weak __clzdi2(long val) +{ + return 64 - fls64((u64)val); +} +EXPORT_SYMBOL(__clzdi2); + +int __weak __ctzdi2(long val) +{ + return __ffs64((u64)val); +} +EXPORT_SYMBOL(__ctzdi2); + +#else +#error BITS_PER_LONG not 32 or 64 +#endif diff --git a/lib/crc-t10dif.c b/lib/crc-t10dif.c index fbbd66ed86cd..fe3428c07b47 100644 --- a/lib/crc-t10dif.c +++ b/lib/crc-t10dif.c @@ -11,57 +11,44 @@ #include <linux/types.h> #include <linux/module.h> #include <linux/crc-t10dif.h> +#include <linux/err.h> +#include <linux/init.h> +#include <crypto/hash.h> -/* Table generated using the following polynomium: - * x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x + 1 - * gt: 0x8bb7 - */ -static const __u16 t10_dif_crc_table[256] = { - 0x0000, 0x8BB7, 0x9CD9, 0x176E, 0xB205, 0x39B2, 0x2EDC, 0xA56B, - 0xEFBD, 0x640A, 0x7364, 0xF8D3, 0x5DB8, 0xD60F, 0xC161, 0x4AD6, - 0x54CD, 0xDF7A, 0xC814, 0x43A3, 0xE6C8, 0x6D7F, 0x7A11, 0xF1A6, - 0xBB70, 0x30C7, 0x27A9, 0xAC1E, 0x0975, 0x82C2, 0x95AC, 0x1E1B, - 0xA99A, 0x222D, 0x3543, 0xBEF4, 0x1B9F, 0x9028, 0x8746, 0x0CF1, - 0x4627, 0xCD90, 0xDAFE, 0x5149, 0xF422, 0x7F95, 0x68FB, 0xE34C, - 0xFD57, 0x76E0, 0x618E, 0xEA39, 0x4F52, 0xC4E5, 0xD38B, 0x583C, - 0x12EA, 0x995D, 0x8E33, 0x0584, 0xA0EF, 0x2B58, 0x3C36, 0xB781, - 0xD883, 0x5334, 0x445A, 0xCFED, 0x6A86, 0xE131, 0xF65F, 0x7DE8, - 0x373E, 0xBC89, 0xABE7, 0x2050, 0x853B, 0x0E8C, 0x19E2, 0x9255, - 0x8C4E, 0x07F9, 0x1097, 0x9B20, 0x3E4B, 0xB5FC, 0xA292, 0x2925, - 0x63F3, 0xE844, 0xFF2A, 0x749D, 0xD1F6, 0x5A41, 0x4D2F, 0xC698, - 0x7119, 0xFAAE, 0xEDC0, 0x6677, 0xC31C, 0x48AB, 0x5FC5, 0xD472, - 0x9EA4, 0x1513, 0x027D, 0x89CA, 0x2CA1, 0xA716, 0xB078, 0x3BCF, - 0x25D4, 0xAE63, 0xB90D, 0x32BA, 0x97D1, 0x1C66, 0x0B08, 0x80BF, - 0xCA69, 0x41DE, 0x56B0, 0xDD07, 0x786C, 0xF3DB, 0xE4B5, 0x6F02, - 0x3AB1, 0xB106, 0xA668, 0x2DDF, 0x88B4, 0x0303, 0x146D, 0x9FDA, - 0xD50C, 0x5EBB, 0x49D5, 0xC262, 0x6709, 0xECBE, 0xFBD0, 0x7067, - 0x6E7C, 0xE5CB, 0xF2A5, 0x7912, 0xDC79, 0x57CE, 0x40A0, 0xCB17, - 0x81C1, 0x0A76, 0x1D18, 0x96AF, 0x33C4, 0xB873, 0xAF1D, 0x24AA, - 0x932B, 0x189C, 0x0FF2, 0x8445, 0x212E, 0xAA99, 0xBDF7, 0x3640, - 0x7C96, 0xF721, 0xE04F, 0x6BF8, 0xCE93, 0x4524, 0x524A, 0xD9FD, - 0xC7E6, 0x4C51, 0x5B3F, 0xD088, 0x75E3, 0xFE54, 0xE93A, 0x628D, - 0x285B, 0xA3EC, 0xB482, 0x3F35, 0x9A5E, 0x11E9, 0x0687, 0x8D30, - 0xE232, 0x6985, 0x7EEB, 0xF55C, 0x5037, 0xDB80, 0xCCEE, 0x4759, - 0x0D8F, 0x8638, 0x9156, 0x1AE1, 0xBF8A, 0x343D, 0x2353, 0xA8E4, - 0xB6FF, 0x3D48, 0x2A26, 0xA191, 0x04FA, 0x8F4D, 0x9823, 0x1394, - 0x5942, 0xD2F5, 0xC59B, 0x4E2C, 0xEB47, 0x60F0, 0x779E, 0xFC29, - 0x4BA8, 0xC01F, 0xD771, 0x5CC6, 0xF9AD, 0x721A, 0x6574, 0xEEC3, - 0xA415, 0x2FA2, 0x38CC, 0xB37B, 0x1610, 0x9DA7, 0x8AC9, 0x017E, - 0x1F65, 0x94D2, 0x83BC, 0x080B, 0xAD60, 0x26D7, 0x31B9, 0xBA0E, - 0xF0D8, 0x7B6F, 0x6C01, 0xE7B6, 0x42DD, 0xC96A, 0xDE04, 0x55B3 -}; +static struct crypto_shash *crct10dif_tfm; __u16 crc_t10dif(const unsigned char *buffer, size_t len) { - __u16 crc = 0; - unsigned int i; + struct { + struct shash_desc shash; + char ctx[2]; + } desc; + int err; + + desc.shash.tfm = crct10dif_tfm; + desc.shash.flags = 0; + *(__u16 *)desc.ctx = 0; - for (i = 0 ; i < len ; i++) - crc = (crc << 8) ^ t10_dif_crc_table[((crc >> 8) ^ buffer[i]) & 0xff]; + err = crypto_shash_update(&desc.shash, buffer, len); + BUG_ON(err); - return crc; + return *(__u16 *)desc.ctx; } EXPORT_SYMBOL(crc_t10dif); +static int __init crc_t10dif_mod_init(void) +{ + crct10dif_tfm = crypto_alloc_shash("crct10dif", 0, 0); + return PTR_RET(crct10dif_tfm); +} + +static void __exit crc_t10dif_mod_fini(void) +{ + crypto_free_shash(crct10dif_tfm); +} + +module_init(crc_t10dif_mod_init); +module_exit(crc_t10dif_mod_fini); + MODULE_DESCRIPTION("T10 DIF CRC calculation"); MODULE_LICENSE("GPL"); diff --git a/lib/decompress.c b/lib/decompress.c index f8fdedaf7b3d..4d1cd0397aab 100644 --- a/lib/decompress.c +++ b/lib/decompress.c @@ -11,6 +11,7 @@ #include <linux/decompress/unxz.h> #include <linux/decompress/inflate.h> #include <linux/decompress/unlzo.h> +#include <linux/decompress/unlz4.h> #include <linux/types.h> #include <linux/string.h> @@ -31,6 +32,9 @@ #ifndef CONFIG_DECOMPRESS_LZO # define unlzo NULL #endif +#ifndef CONFIG_DECOMPRESS_LZ4 +# define unlz4 NULL +#endif struct compress_format { unsigned char magic[2]; @@ -45,6 +49,7 @@ static const struct compress_format compressed_formats[] __initconst = { { {0x5d, 0x00}, "lzma", unlzma }, { {0xfd, 0x37}, "xz", unxz }, { {0x89, 0x4c}, "lzo", unlzo }, + { {0x02, 0x21}, "lz4", unlz4 }, { {0, 0}, NULL, NULL } }; diff --git a/lib/decompress_unlz4.c b/lib/decompress_unlz4.c new file mode 100644 index 000000000000..3e67cfad16ad --- /dev/null +++ b/lib/decompress_unlz4.c @@ -0,0 +1,187 @@ +/* + * Wrapper for decompressing LZ4-compressed kernel, initramfs, and initrd + * + * Copyright (C) 2013, LG Electronics, Kyungsik Lee <kyungsik.lee@lge.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifdef STATIC +#define PREBOOT +#include "lz4/lz4_decompress.c" +#else +#include <linux/decompress/unlz4.h> +#endif +#include <linux/types.h> +#include <linux/lz4.h> +#include <linux/decompress/mm.h> +#include <linux/compiler.h> + +#include <asm/unaligned.h> + +/* + * Note: Uncompressed chunk size is used in the compressor side + * (userspace side for compression). + * It is hardcoded because there is not proper way to extract it + * from the binary stream which is generated by the preliminary + * version of LZ4 tool so far. + */ +#define LZ4_DEFAULT_UNCOMPRESSED_CHUNK_SIZE (8 << 20) +#define ARCHIVE_MAGICNUMBER 0x184C2102 + +STATIC inline int INIT unlz4(u8 *input, int in_len, + int (*fill) (void *, unsigned int), + int (*flush) (void *, unsigned int), + u8 *output, int *posp, + void (*error) (char *x)) +{ + int ret = -1; + size_t chunksize = 0; + size_t uncomp_chunksize = LZ4_DEFAULT_UNCOMPRESSED_CHUNK_SIZE; + u8 *inp; + u8 *inp_start; + u8 *outp; + int size = in_len; +#ifdef PREBOOT + size_t out_len = get_unaligned_le32(input + in_len); +#endif + size_t dest_len; + + + if (output) { + outp = output; + } else if (!flush) { + error("NULL output pointer and no flush function provided"); + goto exit_0; + } else { + outp = large_malloc(uncomp_chunksize); + if (!outp) { + error("Could not allocate output buffer"); + goto exit_0; + } + } + + if (input && fill) { + error("Both input pointer and fill function provided,"); + goto exit_1; + } else if (input) { + inp = input; + } else if (!fill) { + error("NULL input pointer and missing fill function"); + goto exit_1; + } else { + inp = large_malloc(lz4_compressbound(uncomp_chunksize)); + if (!inp) { + error("Could not allocate input buffer"); + goto exit_1; + } + } + inp_start = inp; + + if (posp) + *posp = 0; + + if (fill) + fill(inp, 4); + + chunksize = get_unaligned_le32(inp); + if (chunksize == ARCHIVE_MAGICNUMBER) { + inp += 4; + size -= 4; + } else { + error("invalid header"); + goto exit_2; + } + + if (posp) + *posp += 4; + + for (;;) { + + if (fill) + fill(inp, 4); + + chunksize = get_unaligned_le32(inp); + if (chunksize == ARCHIVE_MAGICNUMBER) { + inp += 4; + size -= 4; + if (posp) + *posp += 4; + continue; + } + inp += 4; + size -= 4; + + if (posp) + *posp += 4; + + if (fill) { + if (chunksize > lz4_compressbound(uncomp_chunksize)) { + error("chunk length is longer than allocated"); + goto exit_2; + } + fill(inp, chunksize); + } +#ifdef PREBOOT + if (out_len >= uncomp_chunksize) { + dest_len = uncomp_chunksize; + out_len -= dest_len; + } else + dest_len = out_len; + ret = lz4_decompress(inp, &chunksize, outp, dest_len); +#else + dest_len = uncomp_chunksize; + ret = lz4_decompress_unknownoutputsize(inp, chunksize, outp, + &dest_len); +#endif + if (ret < 0) { + error("Decoding failed"); + goto exit_2; + } + + if (flush && flush(outp, dest_len) != dest_len) + goto exit_2; + if (output) + outp += dest_len; + if (posp) + *posp += chunksize; + + size -= chunksize; + + if (size == 0) + break; + else if (size < 0) { + error("data corrupted"); + goto exit_2; + } + + inp += chunksize; + if (fill) + inp = inp_start; + } + + ret = 0; +exit_2: + if (!input) + large_free(inp_start); +exit_1: + if (!output) + large_free(outp); +exit_0: + return ret; +} + +#ifdef PREBOOT +STATIC int INIT decompress(unsigned char *buf, int in_len, + int(*fill)(void*, unsigned int), + int(*flush)(void*, unsigned int), + unsigned char *output, + int *posp, + void(*error)(char *x) + ) +{ + return unlz4(buf, in_len - 4, fill, flush, output, posp, error); +} +#endif diff --git a/lib/lz4/Makefile b/lib/lz4/Makefile new file mode 100644 index 000000000000..8085d04e9309 --- /dev/null +++ b/lib/lz4/Makefile @@ -0,0 +1,3 @@ +obj-$(CONFIG_LZ4_COMPRESS) += lz4_compress.o +obj-$(CONFIG_LZ4HC_COMPRESS) += lz4hc_compress.o +obj-$(CONFIG_LZ4_DECOMPRESS) += lz4_decompress.o diff --git a/lib/lz4/lz4_compress.c b/lib/lz4/lz4_compress.c new file mode 100644 index 000000000000..fd94058bd7f9 --- /dev/null +++ b/lib/lz4/lz4_compress.c @@ -0,0 +1,443 @@ +/* + * LZ4 - Fast LZ compression algorithm + * Copyright (C) 2011-2012, Yann Collet. + * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You can contact the author at : + * - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html + * - LZ4 source repository : http://code.google.com/p/lz4/ + * + * Changed for kernel use by: + * Chanho Min <chanho.min@lge.com> + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/lz4.h> +#include <asm/unaligned.h> +#include "lz4defs.h" + +/* + * LZ4_compressCtx : + * ----------------- + * Compress 'isize' bytes from 'source' into an output buffer 'dest' of + * maximum size 'maxOutputSize'. * If it cannot achieve it, compression + * will stop, and result of the function will be zero. + * return : the number of bytes written in buffer 'dest', or 0 if the + * compression fails + */ +static inline int lz4_compressctx(void *ctx, + const char *source, + char *dest, + int isize, + int maxoutputsize) +{ + HTYPE *hashtable = (HTYPE *)ctx; + const u8 *ip = (u8 *)source; +#if LZ4_ARCH64 + const BYTE * const base = ip; +#else + const int base = 0; +#endif + const u8 *anchor = ip; + const u8 *const iend = ip + isize; + const u8 *const mflimit = iend - MFLIMIT; + #define MATCHLIMIT (iend - LASTLITERALS) + + u8 *op = (u8 *) dest; + u8 *const oend = op + maxoutputsize; + int length; + const int skipstrength = SKIPSTRENGTH; + u32 forwardh; + int lastrun; + + /* Init */ + if (isize < MINLENGTH) + goto _last_literals; + + memset((void *)hashtable, 0, LZ4_MEM_COMPRESS); + + /* First Byte */ + hashtable[LZ4_HASH_VALUE(ip)] = ip - base; + ip++; + forwardh = LZ4_HASH_VALUE(ip); + + /* Main Loop */ + for (;;) { + int findmatchattempts = (1U << skipstrength) + 3; + const u8 *forwardip = ip; + const u8 *ref; + u8 *token; + + /* Find a match */ + do { + u32 h = forwardh; + int step = findmatchattempts++ >> skipstrength; + ip = forwardip; + forwardip = ip + step; + + if (unlikely(forwardip > mflimit)) + goto _last_literals; + + forwardh = LZ4_HASH_VALUE(forwardip); + ref = base + hashtable[h]; + hashtable[h] = ip - base; + } while ((ref < ip - MAX_DISTANCE) || (A32(ref) != A32(ip))); + + /* Catch up */ + while ((ip > anchor) && (ref > (u8 *)source) && + unlikely(ip[-1] == ref[-1])) { + ip--; + ref--; + } + + /* Encode Literal length */ + length = (int)(ip - anchor); + token = op++; + /* check output limit */ + if (unlikely(op + length + (2 + 1 + LASTLITERALS) + + (length >> 8) > oend)) + return 0; + + if (length >= (int)RUN_MASK) { + int len; + *token = (RUN_MASK << ML_BITS); + len = length - RUN_MASK; + for (; len > 254 ; len -= 255) + *op++ = 255; + *op++ = (u8)len; + } else + *token = (length << ML_BITS); + + /* Copy Literals */ + LZ4_BLINDCOPY(anchor, op, length); +_next_match: + /* Encode Offset */ + LZ4_WRITE_LITTLEENDIAN_16(op, (u16)(ip - ref)); + + /* Start Counting */ + ip += MINMATCH; + /* MinMatch verified */ + ref += MINMATCH; + anchor = ip; + while (likely(ip < MATCHLIMIT - (STEPSIZE - 1))) { + #if LZ4_ARCH64 + u64 diff = A64(ref) ^ A64(ip); + #else + u32 diff = A32(ref) ^ A32(ip); + #endif + if (!diff) { + ip += STEPSIZE; + ref += STEPSIZE; + continue; + } + ip += LZ4_NBCOMMONBYTES(diff); + goto _endcount; + } + #if LZ4_ARCH64 + if ((ip < (MATCHLIMIT - 3)) && (A32(ref) == A32(ip))) { + ip += 4; + ref += 4; + } + #endif + if ((ip < (MATCHLIMIT - 1)) && (A16(ref) == A16(ip))) { + ip += 2; + ref += 2; + } + if ((ip < MATCHLIMIT) && (*ref == *ip)) + ip++; +_endcount: + /* Encode MatchLength */ + length = (int)(ip - anchor); + /* Check output limit */ + if (unlikely(op + (1 + LASTLITERALS) + (length >> 8) > oend)) + return 0; + if (length >= (int)ML_MASK) { + *token += ML_MASK; + length -= ML_MASK; + for (; length > 509 ; length -= 510) { + *op++ = 255; + *op++ = 255; + } + if (length > 254) { + length -= 255; + *op++ = 255; + } + *op++ = (u8)length; + } else + *token += length; + + /* Test end of chunk */ + if (ip > mflimit) { + anchor = ip; + break; + } + + /* Fill table */ + hashtable[LZ4_HASH_VALUE(ip-2)] = ip - 2 - base; + + /* Test next position */ + ref = base + hashtable[LZ4_HASH_VALUE(ip)]; + hashtable[LZ4_HASH_VALUE(ip)] = ip - base; + if ((ref > ip - (MAX_DISTANCE + 1)) && (A32(ref) == A32(ip))) { + token = op++; + *token = 0; + goto _next_match; + } + + /* Prepare next loop */ + anchor = ip++; + forwardh = LZ4_HASH_VALUE(ip); + } + +_last_literals: + /* Encode Last Literals */ + lastrun = (int)(iend - anchor); + if (((char *)op - dest) + lastrun + 1 + + ((lastrun + 255 - RUN_MASK) / 255) > (u32)maxoutputsize) + return 0; + + if (lastrun >= (int)RUN_MASK) { + *op++ = (RUN_MASK << ML_BITS); + lastrun -= RUN_MASK; + for (; lastrun > 254 ; lastrun -= 255) + *op++ = 255; + *op++ = (u8)lastrun; + } else + *op++ = (lastrun << ML_BITS); + memcpy(op, anchor, iend - anchor); + op += iend - anchor; + + /* End */ + return (int)(((char *)op) - dest); +} + +static inline int lz4_compress64kctx(void *ctx, + const char *source, + char *dest, + int isize, + int maxoutputsize) +{ + u16 *hashtable = (u16 *)ctx; + const u8 *ip = (u8 *) source; + const u8 *anchor = ip; + const u8 *const base = ip; + const u8 *const iend = ip + isize; + const u8 *const mflimit = iend - MFLIMIT; + #define MATCHLIMIT (iend - LASTLITERALS) + + u8 *op = (u8 *) dest; + u8 *const oend = op + maxoutputsize; + int len, length; + const int skipstrength = SKIPSTRENGTH; + u32 forwardh; + int lastrun; + + /* Init */ + if (isize < MINLENGTH) + goto _last_literals; + + memset((void *)hashtable, 0, LZ4_MEM_COMPRESS); + + /* First Byte */ + ip++; + forwardh = LZ4_HASH64K_VALUE(ip); + + /* Main Loop */ + for (;;) { + int findmatchattempts = (1U << skipstrength) + 3; + const u8 *forwardip = ip; + const u8 *ref; + u8 *token; + + /* Find a match */ + do { + u32 h = forwardh; + int step = findmatchattempts++ >> skipstrength; + ip = forwardip; + forwardip = ip + step; + + if (forwardip > mflimit) + goto _last_literals; + + forwardh = LZ4_HASH64K_VALUE(forwardip); + ref = base + hashtable[h]; + hashtable[h] = (u16)(ip - base); + } while (A32(ref) != A32(ip)); + + /* Catch up */ + while ((ip > anchor) && (ref > (u8 *)source) + && (ip[-1] == ref[-1])) { + ip--; + ref--; + } + + /* Encode Literal length */ + length = (int)(ip - anchor); + token = op++; + /* Check output limit */ + if (unlikely(op + length + (2 + 1 + LASTLITERALS) + + (length >> 8) > oend)) + return 0; + if (length >= (int)RUN_MASK) { + *token = (RUN_MASK << ML_BITS); + len = length - RUN_MASK; + for (; len > 254 ; len -= 255) + *op++ = 255; + *op++ = (u8)len; + } else + *token = (length << ML_BITS); + + /* Copy Literals */ + LZ4_BLINDCOPY(anchor, op, length); + +_next_match: + /* Encode Offset */ + LZ4_WRITE_LITTLEENDIAN_16(op, (u16)(ip - ref)); + + /* Start Counting */ + ip += MINMATCH; + /* MinMatch verified */ + ref += MINMATCH; + anchor = ip; + + while (ip < MATCHLIMIT - (STEPSIZE - 1)) { + #if LZ4_ARCH64 + u64 diff = A64(ref) ^ A64(ip); + #else + u32 diff = A32(ref) ^ A32(ip); + #endif + + if (!diff) { + ip += STEPSIZE; + ref += STEPSIZE; + continue; + } + ip += LZ4_NBCOMMONBYTES(diff); + goto _endcount; + } + #if LZ4_ARCH64 + if ((ip < (MATCHLIMIT - 3)) && (A32(ref) == A32(ip))) { + ip += 4; + ref += 4; + } + #endif + if ((ip < (MATCHLIMIT - 1)) && (A16(ref) == A16(ip))) { + ip += 2; + ref += 2; + } + if ((ip < MATCHLIMIT) && (*ref == *ip)) + ip++; +_endcount: + + /* Encode MatchLength */ + len = (int)(ip - anchor); + /* Check output limit */ + if (unlikely(op + (1 + LASTLITERALS) + (len >> 8) > oend)) + return 0; + if (len >= (int)ML_MASK) { + *token += ML_MASK; + len -= ML_MASK; + for (; len > 509 ; len -= 510) { + *op++ = 255; + *op++ = 255; + } + if (len > 254) { + len -= 255; + *op++ = 255; + } + *op++ = (u8)len; + } else + *token += len; + + /* Test end of chunk */ + if (ip > mflimit) { + anchor = ip; + break; + } + + /* Fill table */ + hashtable[LZ4_HASH64K_VALUE(ip-2)] = (u16)(ip - 2 - base); + + /* Test next position */ + ref = base + hashtable[LZ4_HASH64K_VALUE(ip)]; + hashtable[LZ4_HASH64K_VALUE(ip)] = (u16)(ip - base); + if (A32(ref) == A32(ip)) { + token = op++; + *token = 0; + goto _next_match; + } + + /* Prepare next loop */ + anchor = ip++; + forwardh = LZ4_HASH64K_VALUE(ip); + } + +_last_literals: + /* Encode Last Literals */ + lastrun = (int)(iend - anchor); + if (op + lastrun + 1 + (lastrun - RUN_MASK + 255) / 255 > oend) + return 0; + if (lastrun >= (int)RUN_MASK) { + *op++ = (RUN_MASK << ML_BITS); + lastrun -= RUN_MASK; + for (; lastrun > 254 ; lastrun -= 255) + *op++ = 255; + *op++ = (u8)lastrun; + } else + *op++ = (lastrun << ML_BITS); + memcpy(op, anchor, iend - anchor); + op += iend - anchor; + /* End */ + return (int)(((char *)op) - dest); +} + +int lz4_compress(const unsigned char *src, size_t src_len, + unsigned char *dst, size_t *dst_len, void *wrkmem) +{ + int ret = -1; + int out_len = 0; + + if (src_len < LZ4_64KLIMIT) + out_len = lz4_compress64kctx(wrkmem, src, dst, src_len, + lz4_compressbound(src_len)); + else + out_len = lz4_compressctx(wrkmem, src, dst, src_len, + lz4_compressbound(src_len)); + + if (out_len < 0) + goto exit; + + *dst_len = out_len; + + return 0; +exit: + return ret; +} +EXPORT_SYMBOL_GPL(lz4_compress); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("LZ4 compressor"); diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c new file mode 100644 index 000000000000..d3414eae73a1 --- /dev/null +++ b/lib/lz4/lz4_decompress.c @@ -0,0 +1,326 @@ +/* + * LZ4 Decompressor for Linux kernel + * + * Copyright (C) 2013, LG Electronics, Kyungsik Lee <kyungsik.lee@lge.com> + * + * Based on LZ4 implementation by Yann Collet. + * + * LZ4 - Fast LZ compression algorithm + * Copyright (C) 2011-2012, Yann Collet. + * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You can contact the author at : + * - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html + * - LZ4 source repository : http://code.google.com/p/lz4/ + */ + +#ifndef STATIC +#include <linux/module.h> +#include <linux/kernel.h> +#endif +#include <linux/lz4.h> + +#include <asm/unaligned.h> + +#include "lz4defs.h" + +static int lz4_uncompress(const char *source, char *dest, int osize) +{ + const BYTE *ip = (const BYTE *) source; + const BYTE *ref; + BYTE *op = (BYTE *) dest; + BYTE * const oend = op + osize; + BYTE *cpy; + unsigned token; + size_t length; + size_t dec32table[] = {0, 3, 2, 3, 0, 0, 0, 0}; +#if LZ4_ARCH64 + size_t dec64table[] = {0, 0, 0, -1, 0, 1, 2, 3}; +#endif + + while (1) { + + /* get runlength */ + token = *ip++; + length = (token >> ML_BITS); + if (length == RUN_MASK) { + size_t len; + + len = *ip++; + for (; len == 255; length += 255) + len = *ip++; + length += len; + } + + /* copy literals */ + cpy = op + length; + if (unlikely(cpy > oend - COPYLENGTH)) { + /* + * Error: not enough place for another match + * (min 4) + 5 literals + */ + if (cpy != oend) + goto _output_error; + + memcpy(op, ip, length); + ip += length; + break; /* EOF */ + } + LZ4_WILDCOPY(ip, op, cpy); + ip -= (op - cpy); + op = cpy; + + /* get offset */ + LZ4_READ_LITTLEENDIAN_16(ref, cpy, ip); + ip += 2; + + /* Error: offset create reference outside destination buffer */ + if (unlikely(ref < (BYTE *const) dest)) + goto _output_error; + + /* get matchlength */ + length = token & ML_MASK; + if (length == ML_MASK) { + for (; *ip == 255; length += 255) + ip++; + length += *ip++; + } + + /* copy repeated sequence */ + if (unlikely((op - ref) < STEPSIZE)) { +#if LZ4_ARCH64 + size_t dec64 = dec64table[op - ref]; +#else + const int dec64 = 0; +#endif + op[0] = ref[0]; + op[1] = ref[1]; + op[2] = ref[2]; + op[3] = ref[3]; + op += 4; + ref += 4; + ref -= dec32table[op-ref]; + PUT4(ref, op); + op += STEPSIZE - 4; + ref -= dec64; + } else { + LZ4_COPYSTEP(ref, op); + } + cpy = op + length - (STEPSIZE - 4); + if (cpy > (oend - COPYLENGTH)) { + + /* Error: request to write beyond destination buffer */ + if (cpy > oend) + goto _output_error; + LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH)); + while (op < cpy) + *op++ = *ref++; + op = cpy; + /* + * Check EOF (should never happen, since last 5 bytes + * are supposed to be literals) + */ + if (op == oend) + goto _output_error; + continue; + } + LZ4_SECURECOPY(ref, op, cpy); + op = cpy; /* correction */ + } + /* end of decoding */ + return (int) (((char *)ip) - source); + + /* write overflow error detected */ +_output_error: + return (int) (-(((char *)ip) - source)); +} + +static int lz4_uncompress_unknownoutputsize(const char *source, char *dest, + int isize, size_t maxoutputsize) +{ + const BYTE *ip = (const BYTE *) source; + const BYTE *const iend = ip + isize; + const BYTE *ref; + + + BYTE *op = (BYTE *) dest; + BYTE * const oend = op + maxoutputsize; + BYTE *cpy; + + size_t dec32table[] = {0, 3, 2, 3, 0, 0, 0, 0}; +#if LZ4_ARCH64 + size_t dec64table[] = {0, 0, 0, -1, 0, 1, 2, 3}; +#endif + + /* Main Loop */ + while (ip < iend) { + + unsigned token; + size_t length; + + /* get runlength */ + token = *ip++; + length = (token >> ML_BITS); + if (length == RUN_MASK) { + int s = 255; + while ((ip < iend) && (s == 255)) { + s = *ip++; + length += s; + } + } + /* copy literals */ + cpy = op + length; + if ((cpy > oend - COPYLENGTH) || + (ip + length > iend - COPYLENGTH)) { + + if (cpy > oend) + goto _output_error;/* writes beyond buffer */ + + if (ip + length != iend) + goto _output_error;/* + * Error: LZ4 format requires + * to consume all input + * at this stage + */ + memcpy(op, ip, length); + op += length; + break;/* Necessarily EOF, due to parsing restrictions */ + } + LZ4_WILDCOPY(ip, op, cpy); + ip -= (op - cpy); + op = cpy; + + /* get offset */ + LZ4_READ_LITTLEENDIAN_16(ref, cpy, ip); + ip += 2; + if (ref < (BYTE * const) dest) + goto _output_error; + /* + * Error : offset creates reference + * outside of destination buffer + */ + + /* get matchlength */ + length = (token & ML_MASK); + if (length == ML_MASK) { + while (ip < iend) { + int s = *ip++; + length += s; + if (s == 255) + continue; + break; + } + } + + /* copy repeated sequence */ + if (unlikely((op - ref) < STEPSIZE)) { +#if LZ4_ARCH64 + size_t dec64 = dec64table[op - ref]; +#else + const int dec64 = 0; +#endif + op[0] = ref[0]; + op[1] = ref[1]; + op[2] = ref[2]; + op[3] = ref[3]; + op += 4; + ref += 4; + ref -= dec32table[op - ref]; + PUT4(ref, op); + op += STEPSIZE - 4; + ref -= dec64; + } else { + LZ4_COPYSTEP(ref, op); + } + cpy = op + length - (STEPSIZE-4); + if (cpy > oend - COPYLENGTH) { + if (cpy > oend) + goto _output_error; /* write outside of buf */ + + LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH)); + while (op < cpy) + *op++ = *ref++; + op = cpy; + /* + * Check EOF (should never happen, since last 5 bytes + * are supposed to be literals) + */ + if (op == oend) + goto _output_error; + continue; + } + LZ4_SECURECOPY(ref, op, cpy); + op = cpy; /* correction */ + } + /* end of decoding */ + return (int) (((char *) op) - dest); + + /* write overflow error detected */ +_output_error: + return (int) (-(((char *) ip) - source)); +} + +int lz4_decompress(const char *src, size_t *src_len, char *dest, + size_t actual_dest_len) +{ + int ret = -1; + int input_len = 0; + + input_len = lz4_uncompress(src, dest, actual_dest_len); + if (input_len < 0) + goto exit_0; + *src_len = input_len; + + return 0; +exit_0: + return ret; +} +#ifndef STATIC +EXPORT_SYMBOL_GPL(lz4_decompress); +#endif + +int lz4_decompress_unknownoutputsize(const char *src, size_t src_len, + char *dest, size_t *dest_len) +{ + int ret = -1; + int out_len = 0; + + out_len = lz4_uncompress_unknownoutputsize(src, dest, src_len, + *dest_len); + if (out_len < 0) + goto exit_0; + *dest_len = out_len; + + return 0; +exit_0: + return ret; +} +#ifndef STATIC +EXPORT_SYMBOL_GPL(lz4_decompress_unknownoutputsize); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("LZ4 Decompressor"); +#endif diff --git a/lib/lz4/lz4defs.h b/lib/lz4/lz4defs.h new file mode 100644 index 000000000000..abcecdc2d0f2 --- /dev/null +++ b/lib/lz4/lz4defs.h @@ -0,0 +1,156 @@ +/* + * lz4defs.h -- architecture specific defines + * + * Copyright (C) 2013, LG Electronics, Kyungsik Lee <kyungsik.lee@lge.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +/* + * Detects 64 bits mode + */ +#if (defined(__x86_64__) || defined(__x86_64) || defined(__amd64__) \ + || defined(__ppc64__) || defined(__LP64__)) +#define LZ4_ARCH64 1 +#else +#define LZ4_ARCH64 0 +#endif + +/* + * Architecture-specific macros + */ +#define BYTE u8 +typedef struct _U16_S { u16 v; } U16_S; +typedef struct _U32_S { u32 v; } U32_S; +typedef struct _U64_S { u64 v; } U64_S; +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) \ + || defined(CONFIG_ARM) && __LINUX_ARM_ARCH__ >= 6 \ + && defined(ARM_EFFICIENT_UNALIGNED_ACCESS) + +#define A16(x) (((U16_S *)(x))->v) +#define A32(x) (((U32_S *)(x))->v) +#define A64(x) (((U64_S *)(x))->v) + +#define PUT4(s, d) (A32(d) = A32(s)) +#define PUT8(s, d) (A64(d) = A64(s)) +#define LZ4_WRITE_LITTLEENDIAN_16(p, v) \ + do { \ + A16(p) = v; \ + p += 2; \ + } while (0) +#else /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ + +#define A64(x) get_unaligned((u64 *)&(((U16_S *)(x))->v)) +#define A32(x) get_unaligned((u32 *)&(((U16_S *)(x))->v)) +#define A16(x) get_unaligned((u16 *)&(((U16_S *)(x))->v)) + +#define PUT4(s, d) \ + put_unaligned(get_unaligned((const u32 *) s), (u32 *) d) +#define PUT8(s, d) \ + put_unaligned(get_unaligned((const u64 *) s), (u64 *) d) + +#define LZ4_WRITE_LITTLEENDIAN_16(p, v) \ + do { \ + put_unaligned(v, (u16 *)(p)); \ + p += 2; \ + } while (0) +#endif + +#define COPYLENGTH 8 +#define ML_BITS 4 +#define ML_MASK ((1U << ML_BITS) - 1) +#define RUN_BITS (8 - ML_BITS) +#define RUN_MASK ((1U << RUN_BITS) - 1) +#define MEMORY_USAGE 14 +#define MINMATCH 4 +#define SKIPSTRENGTH 6 +#define LASTLITERALS 5 +#define MFLIMIT (COPYLENGTH + MINMATCH) +#define MINLENGTH (MFLIMIT + 1) +#define MAXD_LOG 16 +#define MAXD (1 << MAXD_LOG) +#define MAXD_MASK (u32)(MAXD - 1) +#define MAX_DISTANCE (MAXD - 1) +#define HASH_LOG (MAXD_LOG - 1) +#define HASHTABLESIZE (1 << HASH_LOG) +#define MAX_NB_ATTEMPTS 256 +#define OPTIMAL_ML (int)((ML_MASK-1)+MINMATCH) +#define LZ4_64KLIMIT ((1<<16) + (MFLIMIT - 1)) +#define HASHLOG64K ((MEMORY_USAGE - 2) + 1) +#define HASH64KTABLESIZE (1U << HASHLOG64K) +#define LZ4_HASH_VALUE(p) (((A32(p)) * 2654435761U) >> \ + ((MINMATCH * 8) - (MEMORY_USAGE-2))) +#define LZ4_HASH64K_VALUE(p) (((A32(p)) * 2654435761U) >> \ + ((MINMATCH * 8) - HASHLOG64K)) +#define HASH_VALUE(p) (((A32(p)) * 2654435761U) >> \ + ((MINMATCH * 8) - HASH_LOG)) + +#if LZ4_ARCH64/* 64-bit */ +#define STEPSIZE 8 + +#define LZ4_COPYSTEP(s, d) \ + do { \ + PUT8(s, d); \ + d += 8; \ + s += 8; \ + } while (0) + +#define LZ4_COPYPACKET(s, d) LZ4_COPYSTEP(s, d) + +#define LZ4_SECURECOPY(s, d, e) \ + do { \ + if (d < e) { \ + LZ4_WILDCOPY(s, d, e); \ + } \ + } while (0) +#define HTYPE u32 + +#ifdef __BIG_ENDIAN +#define LZ4_NBCOMMONBYTES(val) (__builtin_clzll(val) >> 3) +#else +#define LZ4_NBCOMMONBYTES(val) (__builtin_ctzll(val) >> 3) +#endif + +#else /* 32-bit */ +#define STEPSIZE 4 + +#define LZ4_COPYSTEP(s, d) \ + do { \ + PUT4(s, d); \ + d += 4; \ + s += 4; \ + } while (0) + +#define LZ4_COPYPACKET(s, d) \ + do { \ + LZ4_COPYSTEP(s, d); \ + LZ4_COPYSTEP(s, d); \ + } while (0) + +#define LZ4_SECURECOPY LZ4_WILDCOPY +#define HTYPE const u8* + +#ifdef __BIG_ENDIAN +#define LZ4_NBCOMMONBYTES(val) (__builtin_clz(val) >> 3) +#else +#define LZ4_NBCOMMONBYTES(val) (__builtin_ctz(val) >> 3) +#endif + +#endif + +#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \ + (d = s - get_unaligned_le16(p)) + +#define LZ4_WILDCOPY(s, d, e) \ + do { \ + LZ4_COPYPACKET(s, d); \ + } while (d < e) + +#define LZ4_BLINDCOPY(s, d, l) \ + do { \ + u8 *e = (d) + l; \ + LZ4_WILDCOPY(s, d, e); \ + d = e; \ + } while (0) diff --git a/lib/lz4/lz4hc_compress.c b/lib/lz4/lz4hc_compress.c new file mode 100644 index 000000000000..eb1a74f5e368 --- /dev/null +++ b/lib/lz4/lz4hc_compress.c @@ -0,0 +1,539 @@ +/* + * LZ4 HC - High Compression Mode of LZ4 + * Copyright (C) 2011-2012, Yann Collet. + * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You can contact the author at : + * - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html + * - LZ4 source repository : http://code.google.com/p/lz4/ + * + * Changed for kernel use by: + * Chanho Min <chanho.min@lge.com> + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/lz4.h> +#include <asm/unaligned.h> +#include "lz4defs.h" + +struct lz4hc_data { + const u8 *base; + HTYPE hashtable[HASHTABLESIZE]; + u16 chaintable[MAXD]; + const u8 *nexttoupdate; +} __attribute__((__packed__)); + +static inline int lz4hc_init(struct lz4hc_data *hc4, const u8 *base) +{ + memset((void *)hc4->hashtable, 0, sizeof(hc4->hashtable)); + memset(hc4->chaintable, 0xFF, sizeof(hc4->chaintable)); + +#if LZ4_ARCH64 + hc4->nexttoupdate = base + 1; +#else + hc4->nexttoupdate = base; +#endif + hc4->base = base; + return 1; +} + +/* Update chains up to ip (excluded) */ +static inline void lz4hc_insert(struct lz4hc_data *hc4, const u8 *ip) +{ + u16 *chaintable = hc4->chaintable; + HTYPE *hashtable = hc4->hashtable; +#if LZ4_ARCH64 + const BYTE * const base = hc4->base; +#else + const int base = 0; +#endif + + while (hc4->nexttoupdate < ip) { + const u8 *p = hc4->nexttoupdate; + size_t delta = p - (hashtable[HASH_VALUE(p)] + base); + if (delta > MAX_DISTANCE) + delta = MAX_DISTANCE; + chaintable[(size_t)(p) & MAXD_MASK] = (u16)delta; + hashtable[HASH_VALUE(p)] = (p) - base; + hc4->nexttoupdate++; + } +} + +static inline size_t lz4hc_commonlength(const u8 *p1, const u8 *p2, + const u8 *const matchlimit) +{ + const u8 *p1t = p1; + + while (p1t < matchlimit - (STEPSIZE - 1)) { +#if LZ4_ARCH64 + u64 diff = A64(p2) ^ A64(p1t); +#else + u32 diff = A32(p2) ^ A32(p1t); +#endif + if (!diff) { + p1t += STEPSIZE; + p2 += STEPSIZE; + continue; + } + p1t += LZ4_NBCOMMONBYTES(diff); + return p1t - p1; + } +#if LZ4_ARCH64 + if ((p1t < (matchlimit-3)) && (A32(p2) == A32(p1t))) { + p1t += 4; + p2 += 4; + } +#endif + + if ((p1t < (matchlimit - 1)) && (A16(p2) == A16(p1t))) { + p1t += 2; + p2 += 2; + } + if ((p1t < matchlimit) && (*p2 == *p1t)) + p1t++; + return p1t - p1; +} + +static inline int lz4hc_insertandfindbestmatch(struct lz4hc_data *hc4, + const u8 *ip, const u8 *const matchlimit, const u8 **matchpos) +{ + u16 *const chaintable = hc4->chaintable; + HTYPE *const hashtable = hc4->hashtable; + const u8 *ref; +#if LZ4_ARCH64 + const BYTE * const base = hc4->base; +#else + const int base = 0; +#endif + int nbattempts = MAX_NB_ATTEMPTS; + size_t repl = 0, ml = 0; + u16 delta; + + /* HC4 match finder */ + lz4hc_insert(hc4, ip); + ref = hashtable[HASH_VALUE(ip)] + base; + + /* potential repetition */ + if (ref >= ip-4) { + /* confirmed */ + if (A32(ref) == A32(ip)) { + delta = (u16)(ip-ref); + repl = ml = lz4hc_commonlength(ip + MINMATCH, + ref + MINMATCH, matchlimit) + MINMATCH; + *matchpos = ref; + } + ref -= (size_t)chaintable[(size_t)(ref) & MAXD_MASK]; + } + + while ((ref >= ip - MAX_DISTANCE) && nbattempts) { + nbattempts--; + if (*(ref + ml) == *(ip + ml)) { + if (A32(ref) == A32(ip)) { + size_t mlt = + lz4hc_commonlength(ip + MINMATCH, + ref + MINMATCH, matchlimit) + MINMATCH; + if (mlt > ml) { + ml = mlt; + *matchpos = ref; + } + } + } + ref -= (size_t)chaintable[(size_t)(ref) & MAXD_MASK]; + } + + /* Complete table */ + if (repl) { + const BYTE *ptr = ip; + const BYTE *end; + end = ip + repl - (MINMATCH-1); + /* Pre-Load */ + while (ptr < end - delta) { + chaintable[(size_t)(ptr) & MAXD_MASK] = delta; + ptr++; + } + do { + chaintable[(size_t)(ptr) & MAXD_MASK] = delta; + /* Head of chain */ + hashtable[HASH_VALUE(ptr)] = (ptr) - base; + ptr++; + } while (ptr < end); + hc4->nexttoupdate = end; + } + + return (int)ml; +} + +static inline int lz4hc_insertandgetwidermatch(struct lz4hc_data *hc4, + const u8 *ip, const u8 *startlimit, const u8 *matchlimit, int longest, + const u8 **matchpos, const u8 **startpos) +{ + u16 *const chaintable = hc4->chaintable; + HTYPE *const hashtable = hc4->hashtable; +#if LZ4_ARCH64 + const BYTE * const base = hc4->base; +#else + const int base = 0; +#endif + const u8 *ref; + int nbattempts = MAX_NB_ATTEMPTS; + int delta = (int)(ip - startlimit); + + /* First Match */ + lz4hc_insert(hc4, ip); + ref = hashtable[HASH_VALUE(ip)] + base; + + while ((ref >= ip - MAX_DISTANCE) && (ref >= hc4->base) + && (nbattempts)) { + nbattempts--; + if (*(startlimit + longest) == *(ref - delta + longest)) { + if (A32(ref) == A32(ip)) { + const u8 *reft = ref + MINMATCH; + const u8 *ipt = ip + MINMATCH; + const u8 *startt = ip; + + while (ipt < matchlimit-(STEPSIZE - 1)) { + #if LZ4_ARCH64 + u64 diff = A64(reft) ^ A64(ipt); + #else + u32 diff = A32(reft) ^ A32(ipt); + #endif + + if (!diff) { + ipt += STEPSIZE; + reft += STEPSIZE; + continue; + } + ipt += LZ4_NBCOMMONBYTES(diff); + goto _endcount; + } + #if LZ4_ARCH64 + if ((ipt < (matchlimit - 3)) + && (A32(reft) == A32(ipt))) { + ipt += 4; + reft += 4; + } + ipt += 2; + #endif + if ((ipt < (matchlimit - 1)) + && (A16(reft) == A16(ipt))) { + reft += 2; + } + if ((ipt < matchlimit) && (*reft == *ipt)) + ipt++; +_endcount: + reft = ref; + + while ((startt > startlimit) + && (reft > hc4->base) + && (startt[-1] == reft[-1])) { + startt--; + reft--; + } + + if ((ipt - startt) > longest) { + longest = (int)(ipt - startt); + *matchpos = reft; + *startpos = startt; + } + } + } + ref -= (size_t)chaintable[(size_t)(ref) & MAXD_MASK]; + } + return longest; +} + +static inline int lz4_encodesequence(const u8 **ip, u8 **op, const u8 **anchor, + int ml, const u8 *ref) +{ + int length, len; + u8 *token; + + /* Encode Literal length */ + length = (int)(*ip - *anchor); + token = (*op)++; + if (length >= (int)RUN_MASK) { + *token = (RUN_MASK << ML_BITS); + len = length - RUN_MASK; + for (; len > 254 ; len -= 255) + *(*op)++ = 255; + *(*op)++ = (u8)len; + } else + *token = (length << ML_BITS); + + /* Copy Literals */ + LZ4_BLINDCOPY(*anchor, *op, length); + + /* Encode Offset */ + LZ4_WRITE_LITTLEENDIAN_16(*op, (u16)(*ip - ref)); + + /* Encode MatchLength */ + len = (int)(ml - MINMATCH); + if (len >= (int)ML_MASK) { + *token += ML_MASK; + len -= ML_MASK; + for (; len > 509 ; len -= 510) { + *(*op)++ = 255; + *(*op)++ = 255; + } + if (len > 254) { + len -= 255; + *(*op)++ = 255; + } + *(*op)++ = (u8)len; + } else + *token += len; + + /* Prepare next loop */ + *ip += ml; + *anchor = *ip; + + return 0; +} + +static int lz4_compresshcctx(struct lz4hc_data *ctx, + const char *source, + char *dest, + int isize) +{ + const u8 *ip = (const u8 *)source; + const u8 *anchor = ip; + const u8 *const iend = ip + isize; + const u8 *const mflimit = iend - MFLIMIT; + const u8 *const matchlimit = (iend - LASTLITERALS); + + u8 *op = (u8 *)dest; + + int ml, ml2, ml3, ml0; + const u8 *ref = NULL; + const u8 *start2 = NULL; + const u8 *ref2 = NULL; + const u8 *start3 = NULL; + const u8 *ref3 = NULL; + const u8 *start0; + const u8 *ref0; + int lastrun; + + ip++; + + /* Main Loop */ + while (ip < mflimit) { + ml = lz4hc_insertandfindbestmatch(ctx, ip, matchlimit, (&ref)); + if (!ml) { + ip++; + continue; + } + + /* saved, in case we would skip too much */ + start0 = ip; + ref0 = ref; + ml0 = ml; +_search2: + if (ip+ml < mflimit) + ml2 = lz4hc_insertandgetwidermatch(ctx, ip + ml - 2, + ip + 1, matchlimit, ml, &ref2, &start2); + else + ml2 = ml; + /* No better match */ + if (ml2 == ml) { + lz4_encodesequence(&ip, &op, &anchor, ml, ref); + continue; + } + + if (start0 < ip) { + /* empirical */ + if (start2 < ip + ml0) { + ip = start0; + ref = ref0; + ml = ml0; + } + } + /* + * Here, start0==ip + * First Match too small : removed + */ + if ((start2 - ip) < 3) { + ml = ml2; + ip = start2; + ref = ref2; + goto _search2; + } + +_search3: + /* + * Currently we have : + * ml2 > ml1, and + * ip1+3 <= ip2 (usually < ip1+ml1) + */ + if ((start2 - ip) < OPTIMAL_ML) { + int correction; + int new_ml = ml; + if (new_ml > OPTIMAL_ML) + new_ml = OPTIMAL_ML; + if (ip + new_ml > start2 + ml2 - MINMATCH) + new_ml = (int)(start2 - ip) + ml2 - MINMATCH; + correction = new_ml - (int)(start2 - ip); + if (correction > 0) { + start2 += correction; + ref2 += correction; + ml2 -= correction; + } + } + /* + * Now, we have start2 = ip+new_ml, + * with new_ml=min(ml, OPTIMAL_ML=18) + */ + if (start2 + ml2 < mflimit) + ml3 = lz4hc_insertandgetwidermatch(ctx, + start2 + ml2 - 3, start2, matchlimit, + ml2, &ref3, &start3); + else + ml3 = ml2; + + /* No better match : 2 sequences to encode */ + if (ml3 == ml2) { + /* ip & ref are known; Now for ml */ + if (start2 < ip+ml) + ml = (int)(start2 - ip); + + /* Now, encode 2 sequences */ + lz4_encodesequence(&ip, &op, &anchor, ml, ref); + ip = start2; + lz4_encodesequence(&ip, &op, &anchor, ml2, ref2); + continue; + } + + /* Not enough space for match 2 : remove it */ + if (start3 < ip + ml + 3) { + /* + * can write Seq1 immediately ==> Seq2 is removed, + * so Seq3 becomes Seq1 + */ + if (start3 >= (ip + ml)) { + if (start2 < ip + ml) { + int correction = + (int)(ip + ml - start2); + start2 += correction; + ref2 += correction; + ml2 -= correction; + if (ml2 < MINMATCH) { + start2 = start3; + ref2 = ref3; + ml2 = ml3; + } + } + + lz4_encodesequence(&ip, &op, &anchor, ml, ref); + ip = start3; + ref = ref3; + ml = ml3; + + start0 = start2; + ref0 = ref2; + ml0 = ml2; + goto _search2; + } + + start2 = start3; + ref2 = ref3; + ml2 = ml3; + goto _search3; + } + + /* + * OK, now we have 3 ascending matches; let's write at least + * the first one ip & ref are known; Now for ml + */ + if (start2 < ip + ml) { + if ((start2 - ip) < (int)ML_MASK) { + int correction; + if (ml > OPTIMAL_ML) + ml = OPTIMAL_ML; + if (ip + ml > start2 + ml2 - MINMATCH) + ml = (int)(start2 - ip) + ml2 + - MINMATCH; + correction = ml - (int)(start2 - ip); + if (correction > 0) { + start2 += correction; + ref2 += correction; + ml2 -= correction; + } + } else + ml = (int)(start2 - ip); + } + lz4_encodesequence(&ip, &op, &anchor, ml, ref); + + ip = start2; + ref = ref2; + ml = ml2; + + start2 = start3; + ref2 = ref3; + ml2 = ml3; + + goto _search3; + } + + /* Encode Last Literals */ + lastrun = (int)(iend - anchor); + if (lastrun >= (int)RUN_MASK) { + *op++ = (RUN_MASK << ML_BITS); + lastrun -= RUN_MASK; + for (; lastrun > 254 ; lastrun -= 255) + *op++ = 255; + *op++ = (u8) lastrun; + } else + *op++ = (lastrun << ML_BITS); + memcpy(op, anchor, iend - anchor); + op += iend - anchor; + /* End */ + return (int) (((char *)op) - dest); +} + +int lz4hc_compress(const unsigned char *src, size_t src_len, + unsigned char *dst, size_t *dst_len, void *wrkmem) +{ + int ret = -1; + int out_len = 0; + + struct lz4hc_data *hc4 = (struct lz4hc_data *)wrkmem; + lz4hc_init(hc4, (const u8 *)src); + out_len = lz4_compresshcctx((struct lz4hc_data *)hc4, (const u8 *)src, + (char *)dst, (int)src_len); + + if (out_len < 0) + goto exit; + + *dst_len = out_len; + return 0; + +exit: + return ret; +} +EXPORT_SYMBOL_GPL(lz4hc_compress); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("LZ4HC compressor"); diff --git a/lib/scatterlist.c b/lib/scatterlist.c index a1cf8cae60e7..a685c8a79578 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -247,13 +247,15 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents, struct scatterlist *sg, *prv; unsigned int left; + memset(table, 0, sizeof(*table)); + + if (nents == 0) + return -EINVAL; #ifndef ARCH_HAS_SG_CHAIN if (WARN_ON_ONCE(nents > max_ents)) return -EINVAL; #endif - memset(table, 0, sizeof(*table)); - left = nents; prv = NULL; do { @@ -453,6 +455,65 @@ void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl, } EXPORT_SYMBOL(sg_miter_start); +static bool sg_miter_get_next_page(struct sg_mapping_iter *miter) +{ + if (!miter->__remaining) { + struct scatterlist *sg; + unsigned long pgoffset; + + if (!__sg_page_iter_next(&miter->piter)) + return false; + + sg = miter->piter.sg; + pgoffset = miter->piter.sg_pgoffset; + + miter->__offset = pgoffset ? 0 : sg->offset; + miter->__remaining = sg->offset + sg->length - + (pgoffset << PAGE_SHIFT) - miter->__offset; + miter->__remaining = min_t(unsigned long, miter->__remaining, + PAGE_SIZE - miter->__offset); + } + + return true; +} + +/** + * sg_miter_skip - reposition mapping iterator + * @miter: sg mapping iter to be skipped + * @offset: number of bytes to plus the current location + * + * Description: + * Sets the offset of @miter to its current location plus @offset bytes. + * If mapping iterator @miter has been proceeded by sg_miter_next(), this + * stops @miter. + * + * Context: + * Don't care if @miter is stopped, or not proceeded yet. + * Otherwise, preemption disabled if the SG_MITER_ATOMIC is set. + * + * Returns: + * true if @miter contains the valid mapping. false if end of sg + * list is reached. + */ +static bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset) +{ + sg_miter_stop(miter); + + while (offset) { + off_t consumed; + + if (!sg_miter_get_next_page(miter)) + return false; + + consumed = min_t(off_t, offset, miter->__remaining); + miter->__offset += consumed; + miter->__remaining -= consumed; + offset -= consumed; + } + + return true; +} + /** * sg_miter_next - proceed mapping iterator to the next mapping * @miter: sg mapping iter to proceed @@ -478,22 +539,9 @@ bool sg_miter_next(struct sg_mapping_iter *miter) * Get to the next page if necessary. * __remaining, __offset is adjusted by sg_miter_stop */ - if (!miter->__remaining) { - struct scatterlist *sg; - unsigned long pgoffset; - - if (!__sg_page_iter_next(&miter->piter)) - return false; - - sg = miter->piter.sg; - pgoffset = miter->piter.sg_pgoffset; + if (!sg_miter_get_next_page(miter)) + return false; - miter->__offset = pgoffset ? 0 : sg->offset; - miter->__remaining = sg->offset + sg->length - - (pgoffset << PAGE_SHIFT) - miter->__offset; - miter->__remaining = min_t(unsigned long, miter->__remaining, - PAGE_SIZE - miter->__offset); - } miter->page = sg_page_iter_page(&miter->piter); miter->consumed = miter->length = miter->__remaining; @@ -552,14 +600,16 @@ EXPORT_SYMBOL(sg_miter_stop); * @nents: Number of SG entries * @buf: Where to copy from * @buflen: The number of bytes to copy - * @to_buffer: transfer direction (non zero == from an sg list to a - * buffer, 0 == from a buffer to an sg list + * @skip: Number of bytes to skip before copying + * @to_buffer: transfer direction (true == from an sg list to a + * buffer, false == from a buffer to an sg list * * Returns the number of copied bytes. * **/ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, - void *buf, size_t buflen, int to_buffer) + void *buf, size_t buflen, off_t skip, + bool to_buffer) { unsigned int offset = 0; struct sg_mapping_iter miter; @@ -573,6 +623,9 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, sg_miter_start(&miter, sgl, nents, sg_flags); + if (!sg_miter_skip(&miter, skip)) + return false; + local_irq_save(flags); while (sg_miter_next(&miter) && offset < buflen) { @@ -607,7 +660,7 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents, void *buf, size_t buflen) { - return sg_copy_buffer(sgl, nents, buf, buflen, 0); + return sg_copy_buffer(sgl, nents, buf, buflen, 0, false); } EXPORT_SYMBOL(sg_copy_from_buffer); @@ -624,6 +677,42 @@ EXPORT_SYMBOL(sg_copy_from_buffer); size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents, void *buf, size_t buflen) { - return sg_copy_buffer(sgl, nents, buf, buflen, 1); + return sg_copy_buffer(sgl, nents, buf, buflen, 0, true); } EXPORT_SYMBOL(sg_copy_to_buffer); + +/** + * sg_pcopy_from_buffer - Copy from a linear buffer to an SG list + * @sgl: The SG list + * @nents: Number of SG entries + * @buf: Where to copy from + * @skip: Number of bytes to skip before copying + * @buflen: The number of bytes to copy + * + * Returns the number of copied bytes. + * + **/ +size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents, + void *buf, size_t buflen, off_t skip) +{ + return sg_copy_buffer(sgl, nents, buf, buflen, skip, false); +} +EXPORT_SYMBOL(sg_pcopy_from_buffer); + +/** + * sg_pcopy_to_buffer - Copy from an SG list to a linear buffer + * @sgl: The SG list + * @nents: Number of SG entries + * @buf: Where to copy to + * @skip: Number of bytes to skip before copying + * @buflen: The number of bytes to copy + * + * Returns the number of copied bytes. + * + **/ +size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents, + void *buf, size_t buflen, off_t skip) +{ + return sg_copy_buffer(sgl, nents, buf, buflen, skip, true); +} +EXPORT_SYMBOL(sg_pcopy_to_buffer); diff --git a/lib/vsprintf.c b/lib/vsprintf.c index e149c6416384..739a36366b79 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -670,7 +670,7 @@ static noinline_for_stack char *hex_string(char *buf, char *end, u8 *addr, struct printf_spec spec, const char *fmt) { - int i, len = 1; /* if we pass '%ph[CDN]', field witdh remains + int i, len = 1; /* if we pass '%ph[CDN]', field width remains negative value, fallback to the default */ char separator; @@ -923,6 +923,103 @@ char *ip4_addr_string(char *buf, char *end, const u8 *addr, } static noinline_for_stack +char *ip6_addr_string_sa(char *buf, char *end, const struct sockaddr_in6 *sa, + struct printf_spec spec, const char *fmt) +{ + bool have_p = false, have_s = false, have_f = false, have_c = false; + char ip6_addr[sizeof("[xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:255.255.255.255]") + + sizeof(":12345") + sizeof("/123456789") + + sizeof("%1234567890")]; + char *p = ip6_addr, *pend = ip6_addr + sizeof(ip6_addr); + const u8 *addr = (const u8 *) &sa->sin6_addr; + char fmt6[2] = { fmt[0], '6' }; + u8 off = 0; + + fmt++; + while (isalpha(*++fmt)) { + switch (*fmt) { + case 'p': + have_p = true; + break; + case 'f': + have_f = true; + break; + case 's': + have_s = true; + break; + case 'c': + have_c = true; + break; + } + } + + if (have_p || have_s || have_f) { + *p = '['; + off = 1; + } + + if (fmt6[0] == 'I' && have_c) + p = ip6_compressed_string(ip6_addr + off, addr); + else + p = ip6_string(ip6_addr + off, addr, fmt6); + + if (have_p || have_s || have_f) + *p++ = ']'; + + if (have_p) { + *p++ = ':'; + p = number(p, pend, ntohs(sa->sin6_port), spec); + } + if (have_f) { + *p++ = '/'; + p = number(p, pend, ntohl(sa->sin6_flowinfo & + IPV6_FLOWINFO_MASK), spec); + } + if (have_s) { + *p++ = '%'; + p = number(p, pend, sa->sin6_scope_id, spec); + } + *p = '\0'; + + return string(buf, end, ip6_addr, spec); +} + +static noinline_for_stack +char *ip4_addr_string_sa(char *buf, char *end, const struct sockaddr_in *sa, + struct printf_spec spec, const char *fmt) +{ + bool have_p = false; + char *p, ip4_addr[sizeof("255.255.255.255") + sizeof(":12345")]; + char *pend = ip4_addr + sizeof(ip4_addr); + const u8 *addr = (const u8 *) &sa->sin_addr.s_addr; + char fmt4[3] = { fmt[0], '4', 0 }; + + fmt++; + while (isalpha(*++fmt)) { + switch (*fmt) { + case 'p': + have_p = true; + break; + case 'h': + case 'l': + case 'n': + case 'b': + fmt4[2] = *fmt; + break; + } + } + + p = ip4_string(ip4_addr, addr, fmt4); + if (have_p) { + *p++ = ':'; + p = number(p, pend, ntohs(sa->sin_port), spec); + } + *p = '\0'; + + return string(buf, end, ip4_addr, spec); +} + +static noinline_for_stack char *uuid_string(char *buf, char *end, const u8 *addr, struct printf_spec spec, const char *fmt) { @@ -1007,11 +1104,17 @@ int kptr_restrict __read_mostly; * - 'I' [46] for IPv4/IPv6 addresses printed in the usual way * IPv4 uses dot-separated decimal without leading 0's (1.2.3.4) * IPv6 uses colon separated network-order 16 bit hex with leading 0's + * [S][pfs] + * Generic IPv4/IPv6 address (struct sockaddr *) that falls back to + * [4] or [6] and is able to print port [p], flowinfo [f], scope [s] * - 'i' [46] for 'raw' IPv4/IPv6 addresses * IPv6 omits the colons (01020304...0f) * IPv4 uses dot-separated decimal with leading 0's (010.123.045.006) - * - '[Ii]4[hnbl]' IPv4 addresses in host, network, big or little endian order - * - 'I6c' for IPv6 addresses printed as specified by + * [S][pfs] + * Generic IPv4/IPv6 address (struct sockaddr *) that falls back to + * [4] or [6] and is able to print port [p], flowinfo [f], scope [s] + * - '[Ii][4S][hnbl]' IPv4 addresses in host, network, big or little endian order + * - 'I[6S]c' for IPv6 addresses printed as specified by * http://tools.ietf.org/html/rfc5952 * - 'U' For a 16 byte UUID/GUID, it prints the UUID/GUID in the form * "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" @@ -1093,6 +1196,21 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr, return ip6_addr_string(buf, end, ptr, spec, fmt); case '4': return ip4_addr_string(buf, end, ptr, spec, fmt); + case 'S': { + const union { + struct sockaddr raw; + struct sockaddr_in v4; + struct sockaddr_in6 v6; + } *sa = ptr; + + switch (sa->raw.sa_family) { + case AF_INET: + return ip4_addr_string_sa(buf, end, &sa->v4, spec, fmt); + case AF_INET6: + return ip6_addr_string_sa(buf, end, &sa->v6, spec, fmt); + default: + return string(buf, end, "(invalid address)", spec); + }} } break; case 'U': @@ -1370,6 +1488,8 @@ qualifier: * %pI6 print an IPv6 address with colons * %pi6 print an IPv6 address without colons * %pI6c print an IPv6 address as specified by RFC 5952 + * %pIS depending on sa_family of 'struct sockaddr *' print IPv4/IPv6 address + * %piS depending on sa_family of 'struct sockaddr *' print IPv4/IPv6 address * %pU[bBlL] print a UUID/GUID in big or little endian using lower or upper * case. * %*ph[CDN] a variable-length hex string with a separator (supports up to 64 |