diff options
566 files changed, 15527 insertions, 8260 deletions
diff --git a/Documentation/ABI/testing/sysfs-class-mtd b/Documentation/ABI/testing/sysfs-class-mtd index 4d55a1888981..db1ad7e34fc3 100644 --- a/Documentation/ABI/testing/sysfs-class-mtd +++ b/Documentation/ABI/testing/sysfs-class-mtd @@ -123,3 +123,54 @@ Description: half page, or a quarter page). In the case of ECC NOR, it is the ECC block size. + +What: /sys/class/mtd/mtdX/ecc_strength +Date: April 2012 +KernelVersion: 3.4 +Contact: linux-mtd@lists.infradead.org +Description: + Maximum number of bit errors that the device is capable of + correcting within each region covering an ecc step. This will + always be a non-negative integer. Note that some devices will + have multiple ecc steps within each writesize region. + + In the case of devices lacking any ECC capability, it is 0. + +What: /sys/class/mtd/mtdX/bitflip_threshold +Date: April 2012 +KernelVersion: 3.4 +Contact: linux-mtd@lists.infradead.org +Description: + This allows the user to examine and adjust the criteria by which + mtd returns -EUCLEAN from mtd_read(). If the maximum number of + bit errors that were corrected on any single region comprising + an ecc step (as reported by the driver) equals or exceeds this + value, -EUCLEAN is returned. Otherwise, absent an error, 0 is + returned. Higher layers (e.g., UBI) use this return code as an + indication that an erase block may be degrading and should be + scrutinized as a candidate for being marked as bad. + + The initial value may be specified by the flash device driver. + If not, then the default value is ecc_strength. + + The introduction of this feature brings a subtle change to the + meaning of the -EUCLEAN return code. Previously, it was + interpreted to mean simply "one or more bit errors were + corrected". Its new interpretation can be phrased as "a + dangerously high number of bit errors were corrected on one or + more regions comprising an ecc step". The precise definition of + "dangerously high" can be adjusted by the user with + bitflip_threshold. Users are discouraged from doing this, + however, unless they know what they are doing and have intimate + knowledge of the properties of their device. Broadly speaking, + bitflip_threshold should be low enough to detect genuine erase + block degradation, but high enough to avoid the consequences of + a persistent return value of -EUCLEAN on devices where sticky + bitflips occur. Note that if bitflip_threshold exceeds + ecc_strength, -EUCLEAN is never returned by mtd_read(). + Conversely, if bitflip_threshold is zero, -EUCLEAN is always + returned, absent a hard error. + + This is generally applicable only to NAND flash devices with ECC + capability. It is ignored on devices lacking ECC capability; + i.e., devices for which ecc_strength is zero. diff --git a/Documentation/DocBook/mtdnand.tmpl b/Documentation/DocBook/mtdnand.tmpl index 0c674be0d3c6..e0aedb7a7827 100644 --- a/Documentation/DocBook/mtdnand.tmpl +++ b/Documentation/DocBook/mtdnand.tmpl @@ -1119,8 +1119,6 @@ in this page</entry> These constants are defined in nand.h. They are ored together to describe the chip functionality. <programlisting> -/* Chip can not auto increment pages */ -#define NAND_NO_AUTOINCR 0x00000001 /* Buswitdh is 16 bit */ #define NAND_BUSWIDTH_16 0x00000002 /* Device supports partial programming without padding */ diff --git a/Documentation/arm/OMAP/DSS b/Documentation/arm/OMAP/DSS index 888ae7b83ae4..a564ceea9e98 100644 --- a/Documentation/arm/OMAP/DSS +++ b/Documentation/arm/OMAP/DSS @@ -47,6 +47,51 @@ flexible way to enable non-common multi-display configuration. In addition to modelling the hardware overlays, omapdss supports virtual overlays and overlay managers. These can be used when updating a display with CPU or system DMA. +omapdss driver support for audio +-------------------------------- +There exist several display technologies and standards that support audio as +well. Hence, it is relevant to update the DSS device driver to provide an audio +interface that may be used by an audio driver or any other driver interested in +the functionality. + +The audio_enable function is intended to prepare the relevant +IP for playback (e.g., enabling an audio FIFO, taking in/out of reset +some IP, enabling companion chips, etc). It is intended to be called before +audio_start. The audio_disable function performs the reverse operation and is +intended to be called after audio_stop. + +While a given DSS device driver may support audio, it is possible that for +certain configurations audio is not supported (e.g., an HDMI display using a +VESA video timing). The audio_supported function is intended to query whether +the current configuration of the display supports audio. + +The audio_config function is intended to configure all the relevant audio +parameters of the display. In order to make the function independent of any +specific DSS device driver, a struct omap_dss_audio is defined. Its purpose +is to contain all the required parameters for audio configuration. At the +moment, such structure contains pointers to IEC-60958 channel status word +and CEA-861 audio infoframe structures. This should be enough to support +HDMI and DisplayPort, as both are based on CEA-861 and IEC-60958. + +The audio_enable/disable, audio_config and audio_supported functions could be +implemented as functions that may sleep. Hence, they should not be called +while holding a spinlock or a readlock. + +The audio_start/audio_stop function is intended to effectively start/stop audio +playback after the configuration has taken place. These functions are designed +to be used in an atomic context. Hence, audio_start should return quickly and be +called only after all the needed resources for audio playback (audio FIFOs, +DMA channels, companion chips, etc) have been enabled to begin data transfers. +audio_stop is designed to only stop the audio transfers. The resources used +for playback are released using audio_disable. + +The enum omap_dss_audio_state may be used to help the implementations of +the interface to keep track of the audio state. The initial state is _DISABLED; +then, the state transitions to _CONFIGURED, and then, when it is ready to +play audio, to _ENABLED. The state _PLAYING is used when the audio is being +rendered. + + Panel and controller drivers ---------------------------- @@ -156,6 +201,7 @@ timings Display timings (pixclock,xres/hfp/hbp/hsw,yres/vfp/vbp/vsw) "pal" and "ntsc" panel_name tear_elim Tearing elimination 0=off, 1=on +output_type Output type (video encoder only): "composite" or "svideo" There are also some debugfs files at <debugfs>/omapdss/ which show information about clocks and registers. diff --git a/Documentation/device-mapper/thin-provisioning.txt b/Documentation/device-mapper/thin-provisioning.txt index 3370bc4d7b98..f5cfc62b7ad3 100644 --- a/Documentation/device-mapper/thin-provisioning.txt +++ b/Documentation/device-mapper/thin-provisioning.txt @@ -287,6 +287,17 @@ iii) Messages the current transaction id is when you change it with this compare-and-swap message. + reserve_metadata_snap + + Reserve a copy of the data mapping btree for use by userland. + This allows userland to inspect the mappings as they were when + this message was executed. Use the pool's status command to + get the root block associated with the metadata snapshot. + + release_metadata_snap + + Release a previously reserved copy of the data mapping btree. + 'thin' target ------------- diff --git a/Documentation/devicetree/bindings/mtd/gpmi-nand.txt b/Documentation/devicetree/bindings/mtd/gpmi-nand.txt new file mode 100644 index 000000000000..1a5bbd346d22 --- /dev/null +++ b/Documentation/devicetree/bindings/mtd/gpmi-nand.txt @@ -0,0 +1,33 @@ +* Freescale General-Purpose Media Interface (GPMI) + +The GPMI nand controller provides an interface to control the +NAND flash chips. We support only one NAND chip now. + +Required properties: + - compatible : should be "fsl,<chip>-gpmi-nand" + - reg : should contain registers location and length for gpmi and bch. + - reg-names: Should contain the reg names "gpmi-nand" and "bch" + - interrupts : The first is the DMA interrupt number for GPMI. + The second is the BCH interrupt number. + - interrupt-names : The interrupt names "gpmi-dma", "bch"; + - fsl,gpmi-dma-channel : Should contain the dma channel it uses. + +The device tree may optionally contain sub-nodes describing partitions of the +address space. See partition.txt for more detail. + +Examples: + +gpmi-nand@8000c000 { + compatible = "fsl,imx28-gpmi-nand"; + #address-cells = <1>; + #size-cells = <1>; + reg = <0x8000c000 2000>, <0x8000a000 2000>; + reg-names = "gpmi-nand", "bch"; + interrupts = <88>, <41>; + interrupt-names = "gpmi-dma", "bch"; + fsl,gpmi-dma-channel = <4>; + + partition@0 { + ... + }; +}; diff --git a/Documentation/devicetree/bindings/mtd/mxc-nand.txt b/Documentation/devicetree/bindings/mtd/mxc-nand.txt new file mode 100644 index 000000000000..b5833d11c7be --- /dev/null +++ b/Documentation/devicetree/bindings/mtd/mxc-nand.txt @@ -0,0 +1,19 @@ +* Freescale's mxc_nand + +Required properties: +- compatible: "fsl,imxXX-nand" +- reg: address range of the nfc block +- interrupts: irq to be used +- nand-bus-width: see nand.txt +- nand-ecc-mode: see nand.txt +- nand-on-flash-bbt: see nand.txt + +Example: + + nand@d8000000 { + compatible = "fsl,imx27-nand"; + reg = <0xd8000000 0x1000>; + interrupts = <29>; + nand-bus-width = <8>; + nand-ecc-mode = "hw"; + }; diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking index d449e632e6a0..8e2da1e06e3b 100644 --- a/Documentation/filesystems/Locking +++ b/Documentation/filesystems/Locking @@ -61,6 +61,7 @@ ata *); ssize_t (*listxattr) (struct dentry *, char *, size_t); int (*removexattr) (struct dentry *, const char *); int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len); + void (*update_time)(struct inode *, struct timespec *, int); locking rules: all may block @@ -87,6 +88,8 @@ getxattr: no listxattr: no removexattr: yes fiemap: no +update_time: no + Additionally, ->rmdir(), ->unlink() and ->rename() have ->i_mutex on victim. cross-directory ->rename() has (per-superblock) ->s_vfs_rename_sem. diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt index ef19f91a0f12..efd23f481704 100644 --- a/Documentation/filesystems/vfs.txt +++ b/Documentation/filesystems/vfs.txt @@ -363,6 +363,7 @@ struct inode_operations { ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t); ssize_t (*listxattr) (struct dentry *, char *, size_t); int (*removexattr) (struct dentry *, const char *); + void (*update_time)(struct inode *, struct timespec *, int); }; Again, all methods are called without any locks being held, unless @@ -471,6 +472,9 @@ otherwise noted. removexattr: called by the VFS to remove an extended attribute from a file. This method is called by removexattr(2) system call. + update_time: called by the VFS to update a specific time or the i_version of + an inode. If this is not defined the VFS will update the inode itself + and call mark_inode_dirty_sync. The Address Space Object ======================== diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index c45513d806ab..a92c5ebf373e 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -2543,6 +2543,15 @@ bytes respectively. Such letter suffixes can also be entirely omitted. sched_debug [KNL] Enables verbose scheduler debug messages. + skew_tick= [KNL] Offset the periodic timer tick per cpu to mitigate + xtime_lock contention on larger systems, and/or RCU lock + contention on all systems with CONFIG_MAXSMP set. + Format: { "0" | "1" } + 0 -- disable. (may be 1 via CONFIG_CMDLINE="skew_tick=1" + 1 -- enable. + Note: increases power consumption, thus should only be + enabled if running jitter sensitive (HPC/RT) workloads. + security= [SECURITY] Choose a security module to enable at boot. If this boot parameter is not specified, only the first security module asking for security registration will be diff --git a/Documentation/vm/frontswap.txt b/Documentation/vm/frontswap.txt new file mode 100644 index 000000000000..37067cf455f4 --- /dev/null +++ b/Documentation/vm/frontswap.txt @@ -0,0 +1,278 @@ +Frontswap provides a "transcendent memory" interface for swap pages. +In some environments, dramatic performance savings may be obtained because +swapped pages are saved in RAM (or a RAM-like device) instead of a swap disk. + +(Note, frontswap -- and cleancache (merged at 3.0) -- are the "frontends" +and the only necessary changes to the core kernel for transcendent memory; +all other supporting code -- the "backends" -- is implemented as drivers. +See the LWN.net article "Transcendent memory in a nutshell" for a detailed +overview of frontswap and related kernel parts: +https://lwn.net/Articles/454795/ ) + +Frontswap is so named because it can be thought of as the opposite of +a "backing" store for a swap device. The storage is assumed to be +a synchronous concurrency-safe page-oriented "pseudo-RAM device" conforming +to the requirements of transcendent memory (such as Xen's "tmem", or +in-kernel compressed memory, aka "zcache", or future RAM-like devices); +this pseudo-RAM device is not directly accessible or addressable by the +kernel and is of unknown and possibly time-varying size. The driver +links itself to frontswap by calling frontswap_register_ops to set the +frontswap_ops funcs appropriately and the functions it provides must +conform to certain policies as follows: + +An "init" prepares the device to receive frontswap pages associated +with the specified swap device number (aka "type"). A "store" will +copy the page to transcendent memory and associate it with the type and +offset associated with the page. A "load" will copy the page, if found, +from transcendent memory into kernel memory, but will NOT remove the page +from from transcendent memory. An "invalidate_page" will remove the page +from transcendent memory and an "invalidate_area" will remove ALL pages +associated with the swap type (e.g., like swapoff) and notify the "device" +to refuse further stores with that swap type. + +Once a page is successfully stored, a matching load on the page will normally +succeed. So when the kernel finds itself in a situation where it needs +to swap out a page, it first attempts to use frontswap. If the store returns +success, the data has been successfully saved to transcendent memory and +a disk write and, if the data is later read back, a disk read are avoided. +If a store returns failure, transcendent memory has rejected the data, and the +page can be written to swap as usual. + +If a backend chooses, frontswap can be configured as a "writethrough +cache" by calling frontswap_writethrough(). In this mode, the reduction +in swap device writes is lost (and also a non-trivial performance advantage) +in order to allow the backend to arbitrarily "reclaim" space used to +store frontswap pages to more completely manage its memory usage. + +Note that if a page is stored and the page already exists in transcendent memory +(a "duplicate" store), either the store succeeds and the data is overwritten, +or the store fails AND the page is invalidated. This ensures stale data may +never be obtained from frontswap. + +If properly configured, monitoring of frontswap is done via debugfs in +the /sys/kernel/debug/frontswap directory. The effectiveness of +frontswap can be measured (across all swap devices) with: + +failed_stores - how many store attempts have failed +loads - how many loads were attempted (all should succeed) +succ_stores - how many store attempts have succeeded +invalidates - how many invalidates were attempted + +A backend implementation may provide additional metrics. + +FAQ + +1) Where's the value? + +When a workload starts swapping, performance falls through the floor. +Frontswap significantly increases performance in many such workloads by +providing a clean, dynamic interface to read and write swap pages to +"transcendent memory" that is otherwise not directly addressable to the kernel. +This interface is ideal when data is transformed to a different form +and size (such as with compression) or secretly moved (as might be +useful for write-balancing for some RAM-like devices). Swap pages (and +evicted page-cache pages) are a great use for this kind of slower-than-RAM- +but-much-faster-than-disk "pseudo-RAM device" and the frontswap (and +cleancache) interface to transcendent memory provides a nice way to read +and write -- and indirectly "name" -- the pages. + +Frontswap -- and cleancache -- with a fairly small impact on the kernel, +provides a huge amount of flexibility for more dynamic, flexible RAM +utilization in various system configurations: + +In the single kernel case, aka "zcache", pages are compressed and +stored in local memory, thus increasing the total anonymous pages +that can be safely kept in RAM. Zcache essentially trades off CPU +cycles used in compression/decompression for better memory utilization. +Benchmarks have shown little or no impact when memory pressure is +low while providing a significant performance improvement (25%+) +on some workloads under high memory pressure. + +"RAMster" builds on zcache by adding "peer-to-peer" transcendent memory +support for clustered systems. Frontswap pages are locally compressed +as in zcache, but then "remotified" to another system's RAM. This +allows RAM to be dynamically load-balanced back-and-forth as needed, +i.e. when system A is overcommitted, it can swap to system B, and +vice versa. RAMster can also be configured as a memory server so +many servers in a cluster can swap, dynamically as needed, to a single +server configured with a large amount of RAM... without pre-configuring +how much of the RAM is available for each of the clients! + +In the virtual case, the whole point of virtualization is to statistically +multiplex physical resources acrosst the varying demands of multiple +virtual machines. This is really hard to do with RAM and efforts to do +it well with no kernel changes have essentially failed (except in some +well-publicized special-case workloads). +Specifically, the Xen Transcendent Memory backend allows otherwise +"fallow" hypervisor-owned RAM to not only be "time-shared" between multiple +virtual machines, but the pages can be compressed and deduplicated to +optimize RAM utilization. And when guest OS's are induced to surrender +underutilized RAM (e.g. with "selfballooning"), sudden unexpected +memory pressure may result in swapping; frontswap allows those pages +to be swapped to and from hypervisor RAM (if overall host system memory +conditions allow), thus mitigating the potentially awful performance impact +of unplanned swapping. + +A KVM implementation is underway and has been RFC'ed to lkml. And, +using frontswap, investigation is also underway on the use of NVM as +a memory extension technology. + +2) Sure there may be performance advantages in some situations, but + what's the space/time overhead of frontswap? + +If CONFIG_FRONTSWAP is disabled, every frontswap hook compiles into +nothingness and the only overhead is a few extra bytes per swapon'ed +swap device. If CONFIG_FRONTSWAP is enabled but no frontswap "backend" +registers, there is one extra global variable compared to zero for +every swap page read or written. If CONFIG_FRONTSWAP is enabled +AND a frontswap backend registers AND the backend fails every "store" +request (i.e. provides no memory despite claiming it might), +CPU overhead is still negligible -- and since every frontswap fail +precedes a swap page write-to-disk, the system is highly likely +to be I/O bound and using a small fraction of a percent of a CPU +will be irrelevant anyway. + +As for space, if CONFIG_FRONTSWAP is enabled AND a frontswap backend +registers, one bit is allocated for every swap page for every swap +device that is swapon'd. This is added to the EIGHT bits (which +was sixteen until about 2.6.34) that the kernel already allocates +for every swap page for every swap device that is swapon'd. (Hugh +Dickins has observed that frontswap could probably steal one of +the existing eight bits, but let's worry about that minor optimization +later.) For very large swap disks (which are rare) on a standard +4K pagesize, this is 1MB per 32GB swap. + +When swap pages are stored in transcendent memory instead of written +out to disk, there is a side effect that this may create more memory +pressure that can potentially outweigh the other advantages. A +backend, such as zcache, must implement policies to carefully (but +dynamically) manage memory limits to ensure this doesn't happen. + +3) OK, how about a quick overview of what this frontswap patch does + in terms that a kernel hacker can grok? + +Let's assume that a frontswap "backend" has registered during +kernel initialization; this registration indicates that this +frontswap backend has access to some "memory" that is not directly +accessible by the kernel. Exactly how much memory it provides is +entirely dynamic and random. + +Whenever a swap-device is swapon'd frontswap_init() is called, +passing the swap device number (aka "type") as a parameter. +This notifies frontswap to expect attempts to "store" swap pages +associated with that number. + +Whenever the swap subsystem is readying a page to write to a swap +device (c.f swap_writepage()), frontswap_store is called. Frontswap +consults with the frontswap backend and if the backend says it does NOT +have room, frontswap_store returns -1 and the kernel swaps the page +to the swap device as normal. Note that the response from the frontswap +backend is unpredictable to the kernel; it may choose to never accept a +page, it could accept every ninth page, or it might accept every +page. But if the backend does accept a page, the data from the page +has already been copied and associated with the type and offset, +and the backend guarantees the persistence of the data. In this case, +frontswap sets a bit in the "frontswap_map" for the swap device +corresponding to the page offset on the swap device to which it would +otherwise have written the data. + +When the swap subsystem needs to swap-in a page (swap_readpage()), +it first calls frontswap_load() which checks the frontswap_map to +see if the page was earlier accepted by the frontswap backend. If +it was, the page of data is filled from the frontswap backend and +the swap-in is complete. If not, the normal swap-in code is +executed to obtain the page of data from the real swap device. + +So every time the frontswap backend accepts a page, a swap device read +and (potentially) a swap device write are replaced by a "frontswap backend +store" and (possibly) a "frontswap backend loads", which are presumably much +faster. + +4) Can't frontswap be configured as a "special" swap device that is + just higher priority than any real swap device (e.g. like zswap, + or maybe swap-over-nbd/NFS)? + +No. First, the existing swap subsystem doesn't allow for any kind of +swap hierarchy. Perhaps it could be rewritten to accomodate a hierarchy, +but this would require fairly drastic changes. Even if it were +rewritten, the existing swap subsystem uses the block I/O layer which +assumes a swap device is fixed size and any page in it is linearly +addressable. Frontswap barely touches the existing swap subsystem, +and works around the constraints of the block I/O subsystem to provide +a great deal of flexibility and dynamicity. + +For example, the acceptance of any swap page by the frontswap backend is +entirely unpredictable. This is critical to the definition of frontswap +backends because it grants completely dynamic discretion to the +backend. In zcache, one cannot know a priori how compressible a page is. +"Poorly" compressible pages can be rejected, and "poorly" can itself be +defined dynamically depending on current memory constraints. + +Further, frontswap is entirely synchronous whereas a real swap +device is, by definition, asynchronous and uses block I/O. The +block I/O layer is not only unnecessary, but may perform "optimizations" +that are inappropriate for a RAM-oriented device including delaying +the write of some pages for a significant amount of time. Synchrony is +required to ensure the dynamicity of the backend and to avoid thorny race +conditions that would unnecessarily and greatly complicate frontswap +and/or the block I/O subsystem. That said, only the initial "store" +and "load" operations need be synchronous. A separate asynchronous thread +is free to manipulate the pages stored by frontswap. For example, +the "remotification" thread in RAMster uses standard asynchronous +kernel sockets to move compressed frontswap pages to a remote machine. +Similarly, a KVM guest-side implementation could do in-guest compression +and use "batched" hypercalls. + +In a virtualized environment, the dynamicity allows the hypervisor +(or host OS) to do "intelligent overcommit". For example, it can +choose to accept pages only until host-swapping might be imminent, +then force guests to do their own swapping. + +There is a downside to the transcendent memory specifications for +frontswap: Since any "store" might fail, there must always be a real +slot on a real swap device to swap the page. Thus frontswap must be +implemented as a "shadow" to every swapon'd device with the potential +capability of holding every page that the swap device might have held +and the possibility that it might hold no pages at all. This means +that frontswap cannot contain more pages than the total of swapon'd +swap devices. For example, if NO swap device is configured on some +installation, frontswap is useless. Swapless portable devices +can still use frontswap but a backend for such devices must configure +some kind of "ghost" swap device and ensure that it is never used. + +5) Why this weird definition about "duplicate stores"? If a page + has been previously successfully stored, can't it always be + successfully overwritten? + +Nearly always it can, but no, sometimes it cannot. Consider an example +where data is compressed and the original 4K page has been compressed +to 1K. Now an attempt is made to overwrite the page with data that +is non-compressible and so would take the entire 4K. But the backend +has no more space. In this case, the store must be rejected. Whenever +frontswap rejects a store that would overwrite, it also must invalidate +the old data and ensure that it is no longer accessible. Since the +swap subsystem then writes the new data to the read swap device, +this is the correct course of action to ensure coherency. + +6) What is frontswap_shrink for? + +When the (non-frontswap) swap subsystem swaps out a page to a real +swap device, that page is only taking up low-value pre-allocated disk +space. But if frontswap has placed a page in transcendent memory, that +page may be taking up valuable real estate. The frontswap_shrink +routine allows code outside of the swap subsystem to force pages out +of the memory managed by frontswap and back into kernel-addressable memory. +For example, in RAMster, a "suction driver" thread will attempt +to "repatriate" pages sent to a remote machine back to the local machine; +this is driven using the frontswap_shrink mechanism when memory pressure +subsides. + +7) Why does the frontswap patch create the new include file swapfile.h? + +The frontswap code depends on some swap-subsystem-internal data +structures that have, over the years, moved back and forth between +static and global. This seemed a reasonable compromise: Define +them as global but declare them in a new include file that isn't +included by the large number of source files that include swap.h. + +Dan Magenheimer, last updated April 9, 2012 diff --git a/Documentation/vm/slub.txt b/Documentation/vm/slub.txt index 6752870c4970..b0c6d1bbb434 100644 --- a/Documentation/vm/slub.txt +++ b/Documentation/vm/slub.txt @@ -17,7 +17,7 @@ data and perform operation on the slabs. By default slabinfo only lists slabs that have data in them. See "slabinfo -h" for more options when running the command. slabinfo can be compiled with -gcc -o slabinfo tools/slub/slabinfo.c +gcc -o slabinfo tools/vm/slabinfo.c Some of the modes of operation of slabinfo require that slub debugging be enabled on the command line. F.e. no tracking information will be diff --git a/Documentation/x86/efi-stub.txt b/Documentation/x86/efi-stub.txt new file mode 100644 index 000000000000..44e6bb6ead10 --- /dev/null +++ b/Documentation/x86/efi-stub.txt @@ -0,0 +1,65 @@ + The EFI Boot Stub + --------------------------- + +On the x86 platform, a bzImage can masquerade as a PE/COFF image, +thereby convincing EFI firmware loaders to load it as an EFI +executable. The code that modifies the bzImage header, along with the +EFI-specific entry point that the firmware loader jumps to are +collectively known as the "EFI boot stub", and live in +arch/x86/boot/header.S and arch/x86/boot/compressed/eboot.c, +respectively. + +By using the EFI boot stub it's possible to boot a Linux kernel +without the use of a conventional EFI boot loader, such as grub or +elilo. Since the EFI boot stub performs the jobs of a boot loader, in +a certain sense it *IS* the boot loader. + +The EFI boot stub is enabled with the CONFIG_EFI_STUB kernel option. + + +**** How to install bzImage.efi + +The bzImage located in arch/x86/boot/bzImage must be copied to the EFI +System Partiion (ESP) and renamed with the extension ".efi". Without +the extension the EFI firmware loader will refuse to execute it. It's +not possible to execute bzImage.efi from the usual Linux file systems +because EFI firmware doesn't have support for them. + + +**** Passing kernel parameters from the EFI shell + +Arguments to the kernel can be passed after bzImage.efi, e.g. + + fs0:> bzImage.efi console=ttyS0 root=/dev/sda4 + + +**** The "initrd=" option + +Like most boot loaders, the EFI stub allows the user to specify +multiple initrd files using the "initrd=" option. This is the only EFI +stub-specific command line parameter, everything else is passed to the +kernel when it boots. + +The path to the initrd file must be an absolute path from the +beginning of the ESP, relative path names do not work. Also, the path +is an EFI-style path and directory elements must be separated with +backslashes (\). For example, given the following directory layout, + +fs0:> + Kernels\ + bzImage.efi + initrd-large.img + + Ramdisks\ + initrd-small.img + initrd-medium.img + +to boot with the initrd-large.img file if the current working +directory is fs0:\Kernels, the following command must be used, + + fs0:\Kernels> bzImage.efi initrd=\Kernels\initrd-large.img + +Notice how bzImage.efi can be specified with a relative path. That's +because the image we're executing is interpreted by the EFI shell, +which understands relative paths, whereas the rest of the command line +is passed to bzImage.efi. diff --git a/MAINTAINERS b/MAINTAINERS index 55f0fda602ec..6a52bb4a4fc7 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2930,6 +2930,13 @@ F: Documentation/power/freezing-of-tasks.txt F: include/linux/freezer.h F: kernel/freezer.c +FRONTSWAP API +M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> +L: linux-kernel@vger.kernel.org +S: Maintained +F: mm/frontswap.c +F: include/linux/frontswap.h + FS-CACHE: LOCAL CACHING FOR NETWORK FILESYSTEMS M: David Howells <dhowells@redhat.com> L: linux-cachefs@redhat.com @@ -1,7 +1,7 @@ VERSION = 3 -PATCHLEVEL = 4 +PATCHLEVEL = 5 SUBLEVEL = 0 -EXTRAVERSION = +EXTRAVERSION = -rc1 NAME = Saber-toothed Squirrel # *DOCUMENTATION* diff --git a/arch/alpha/include/asm/posix_types.h b/arch/alpha/include/asm/posix_types.h index 24779fc95994..5a8a48320efe 100644 --- a/arch/alpha/include/asm/posix_types.h +++ b/arch/alpha/include/asm/posix_types.h @@ -10,9 +10,6 @@ typedef unsigned int __kernel_ino_t; #define __kernel_ino_t __kernel_ino_t -typedef unsigned int __kernel_nlink_t; -#define __kernel_nlink_t __kernel_nlink_t - typedef unsigned long __kernel_sigset_t; /* at least 32 bits */ #include <asm-generic/posix_types.h> diff --git a/arch/alpha/kernel/signal.c b/arch/alpha/kernel/signal.c index f6db3032ddf0..a8c97d42ec8e 100644 --- a/arch/alpha/kernel/signal.c +++ b/arch/alpha/kernel/signal.c @@ -226,7 +226,6 @@ do_sigreturn(struct sigcontext __user *sc, struct pt_regs *regs, if (__get_user(set.sig[0], &sc->sc_mask)) goto give_sigsegv; - sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); if (restore_sigcontext(sc, regs, sw)) @@ -261,7 +260,6 @@ do_rt_sigreturn(struct rt_sigframe __user *frame, struct pt_regs *regs, if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto give_sigsegv; - sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); if (restore_sigcontext(&frame->uc.uc_mcontext, regs, sw)) @@ -468,12 +466,9 @@ static inline void handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info, struct pt_regs * regs, struct switch_stack *sw) { - sigset_t *oldset = ¤t->blocked; + sigset_t *oldset = sigmask_to_save(); int ret; - if (test_thread_flag(TIF_RESTORE_SIGMASK)) - oldset = ¤t->saved_sigmask; - if (ka->sa.sa_flags & SA_SIGINFO) ret = setup_rt_frame(sig, ka, info, oldset, regs, sw); else @@ -483,12 +478,7 @@ handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info, force_sigsegv(sig, current); return; } - block_sigmask(ka, sig); - /* A signal was successfully delivered, and the - saved sigmask was stored on the signal frame, - and will be restored by sigreturn. So we can - simply clear the restore sigmask flag. */ - clear_thread_flag(TIF_RESTORE_SIGMASK); + signal_delivered(sig, info, ka, regs, 0); } static inline void @@ -572,9 +562,7 @@ do_signal(struct pt_regs * regs, struct switch_stack * sw, } /* If there's no signal to deliver, we just restore the saved mask. */ - if (test_and_clear_thread_flag(TIF_RESTORE_SIGMASK)) - set_current_blocked(¤t->saved_sigmask); - + restore_saved_sigmask(); if (single_stepping) ptrace_set_bpt(current); /* re-set breakpoint */ } diff --git a/arch/arm/boot/dts/db8500.dtsi b/arch/arm/boot/dts/db8500.dtsi index 881bc3987844..4ad5160018cb 100644 --- a/arch/arm/boot/dts/db8500.dtsi +++ b/arch/arm/boot/dts/db8500.dtsi @@ -58,6 +58,8 @@ "st,nomadik-gpio"; reg = <0x8012e000 0x80>; interrupts = <0 119 0x4>; + interrupt-controller; + #interrupt-cells = <2>; supports-sleepmode; gpio-controller; #gpio-cells = <2>; @@ -69,6 +71,8 @@ "st,nomadik-gpio"; reg = <0x8012e080 0x80>; interrupts = <0 120 0x4>; + interrupt-controller; + #interrupt-cells = <2>; supports-sleepmode; gpio-controller; #gpio-cells = <2>; @@ -80,6 +84,8 @@ "st,nomadik-gpio"; reg = <0x8000e000 0x80>; interrupts = <0 121 0x4>; + interrupt-controller; + #interrupt-cells = <2>; supports-sleepmode; gpio-controller; #gpio-cells = <2>; @@ -91,6 +97,8 @@ "st,nomadik-gpio"; reg = <0x8000e080 0x80>; interrupts = <0 122 0x4>; + interrupt-controller; + #interrupt-cells = <2>; supports-sleepmode; gpio-controller; #gpio-cells = <2>; @@ -102,6 +110,8 @@ "st,nomadik-gpio"; reg = <0x8000e100 0x80>; interrupts = <0 123 0x4>; + interrupt-controller; + #interrupt-cells = <2>; supports-sleepmode; gpio-controller; #gpio-cells = <2>; @@ -113,6 +123,8 @@ "st,nomadik-gpio"; reg = <0x8000e180 0x80>; interrupts = <0 124 0x4>; + interrupt-controller; + #interrupt-cells = <2>; supports-sleepmode; gpio-controller; #gpio-cells = <2>; @@ -124,6 +136,8 @@ "st,nomadik-gpio"; reg = <0x8011e000 0x80>; interrupts = <0 125 0x4>; + interrupt-controller; + #interrupt-cells = <2>; supports-sleepmode; gpio-controller; #gpio-cells = <2>; @@ -135,6 +149,8 @@ "st,nomadik-gpio"; reg = <0x8011e080 0x80>; interrupts = <0 126 0x4>; + interrupt-controller; + #interrupt-cells = <2>; supports-sleepmode; gpio-controller; #gpio-cells = <2>; @@ -146,12 +162,18 @@ "st,nomadik-gpio"; reg = <0xa03fe000 0x80>; interrupts = <0 127 0x4>; + interrupt-controller; + #interrupt-cells = <2>; supports-sleepmode; gpio-controller; #gpio-cells = <2>; gpio-bank = <8>; }; + pinctrl { + compatible = "stericsson,nmk_pinctrl"; + }; + usb@a03e0000 { compatible = "stericsson,db8500-musb", "mentor,musb"; @@ -169,20 +191,195 @@ prcmu@80157000 { compatible = "stericsson,db8500-prcmu"; reg = <0x80157000 0x1000>; - interrupts = <46 47>; + interrupts = <0 47 0x4>; #address-cells = <1>; #size-cells = <1>; ranges; - prcmu-timer-4@80157450 { + prcmu-timer-4@80157450 { compatible = "stericsson,db8500-prcmu-timer-4"; reg = <0x80157450 0xC>; }; + db8500-prcmu-regulators { + compatible = "stericsson,db8500-prcmu-regulator"; + + // DB8500_REGULATOR_VAPE + db8500_vape_reg: db8500_vape { + regulator-name = "db8500-vape"; + regulator-always-on; + }; + + // DB8500_REGULATOR_VARM + db8500_varm_reg: db8500_varm { + regulator-name = "db8500-varm"; + }; + + // DB8500_REGULATOR_VMODEM + db8500_vmodem_reg: db8500_vmodem { + regulator-name = "db8500-vmodem"; + }; + + // DB8500_REGULATOR_VPLL + db8500_vpll_reg: db8500_vpll { + regulator-name = "db8500-vpll"; + }; + + // DB8500_REGULATOR_VSMPS1 + db8500_vsmps1_reg: db8500_vsmps1 { + regulator-name = "db8500-vsmps1"; + }; + + // DB8500_REGULATOR_VSMPS2 + db8500_vsmps2_reg: db8500_vsmps2 { + regulator-name = "db8500-vsmps2"; + }; + + // DB8500_REGULATOR_VSMPS3 + db8500_vsmps3_reg: db8500_vsmps3 { + regulator-name = "db8500-vsmps3"; + }; + + // DB8500_REGULATOR_VRF1 + db8500_vrf1_reg: db8500_vrf1 { + regulator-name = "db8500-vrf1"; + }; + + // DB8500_REGULATOR_SWITCH_SVAMMDSP + db8500_sva_mmdsp_reg: db8500_sva_mmdsp { + regulator-name = "db8500-sva-mmdsp"; + }; + + // DB8500_REGULATOR_SWITCH_SVAMMDSPRET + db8500_sva_mmdsp_ret_reg: db8500_sva_mmdsp_ret { + regulator-name = "db8500-sva-mmdsp-ret"; + }; + + // DB8500_REGULATOR_SWITCH_SVAPIPE + db8500_sva_pipe_reg: db8500_sva_pipe { + regulator-name = "db8500_sva_pipe"; + }; + + // DB8500_REGULATOR_SWITCH_SIAMMDSP + db8500_sia_mmdsp_reg: db8500_sia_mmdsp { + regulator-name = "db8500_sia_mmdsp"; + }; + + // DB8500_REGULATOR_SWITCH_SIAMMDSPRET + db8500_sia_mmdsp_ret_reg: db8500_sia_mmdsp_ret { + regulator-name = "db8500-sia-mmdsp-ret"; + }; + + // DB8500_REGULATOR_SWITCH_SIAPIPE + db8500_sia_pipe_reg: db8500_sia_pipe { + regulator-name = "db8500-sia-pipe"; + }; + + // DB8500_REGULATOR_SWITCH_SGA + db8500_sga_reg: db8500_sga { + regulator-name = "db8500-sga"; + vin-supply = <&db8500_vape_reg>; + }; + + // DB8500_REGULATOR_SWITCH_B2R2_MCDE + db8500_b2r2_mcde_reg: db8500_b2r2_mcde { + regulator-name = "db8500-b2r2-mcde"; + vin-supply = <&db8500_vape_reg>; + }; + + // DB8500_REGULATOR_SWITCH_ESRAM12 + db8500_esram12_reg: db8500_esram12 { + regulator-name = "db8500-esram12"; + }; + + // DB8500_REGULATOR_SWITCH_ESRAM12RET + db8500_esram12_ret_reg: db8500_esram12_ret { + regulator-name = "db8500-esram12-ret"; + }; + + // DB8500_REGULATOR_SWITCH_ESRAM34 + db8500_esram34_reg: db8500_esram34 { + regulator-name = "db8500-esram34"; + }; + + // DB8500_REGULATOR_SWITCH_ESRAM34RET + db8500_esram34_ret_reg: db8500_esram34_ret { + regulator-name = "db8500-esram34-ret"; + }; + }; + ab8500@5 { compatible = "stericsson,ab8500"; reg = <5>; /* mailbox 5 is i2c */ interrupts = <0 40 0x4>; + + ab8500-regulators { + compatible = "stericsson,ab8500-regulator"; + + // supplies to the display/camera + ab8500_ldo_aux1_reg: ab8500_ldo_aux1 { + regulator-name = "V-DISPLAY"; + regulator-min-microvolt = <2500000>; + regulator-max-microvolt = <2900000>; + regulator-boot-on; + /* BUG: If turned off MMC will be affected. */ + regulator-always-on; + }; + + // supplies to the on-board eMMC + ab8500_ldo_aux2_reg: ab8500_ldo_aux2 { + regulator-name = "V-eMMC1"; + regulator-min-microvolt = <1100000>; + regulator-max-microvolt = <3300000>; + }; + + // supply for VAUX3; SDcard slots + ab8500_ldo_aux3_reg: ab8500_ldo_aux3 { + regulator-name = "V-MMC-SD"; + regulator-min-microvolt = <1100000>; + regulator-max-microvolt = <3300000>; + }; + + // supply for v-intcore12; VINTCORE12 LDO + ab8500_ldo_initcore_reg: ab8500_ldo_initcore { + regulator-name = "V-INTCORE"; + }; + + // supply for tvout; gpadc; TVOUT LDO + ab8500_ldo_tvout_reg: ab8500_ldo_tvout { + regulator-name = "V-TVOUT"; + }; + + // supply for ab8500-usb; USB LDO + ab8500_ldo_usb_reg: ab8500_ldo_usb { + regulator-name = "dummy"; + }; + + // supply for ab8500-vaudio; VAUDIO LDO + ab8500_ldo_audio_reg: ab8500_ldo_audio { + regulator-name = "V-AUD"; + }; + + // supply for v-anamic1 VAMic1-LDO + ab8500_ldo_anamic1_reg: ab8500_ldo_anamic1 { + regulator-name = "V-AMIC1"; + }; + + // supply for v-amic2; VAMIC2 LDO; reuse constants for AMIC1 + ab8500_ldo_amamic2_reg: ab8500_ldo_amamic2 { + regulator-name = "V-AMIC2"; + }; + + // supply for v-dmic; VDMIC LDO + ab8500_ldo_dmic_reg: ab8500_ldo_dmic { + regulator-name = "V-DMIC"; + }; + + // supply for U8500 CSI/DSI; VANA LDO + ab8500_ldo_ana_reg: ab8500_ldo_ana { + regulator-name = "V-CSI/DSI"; + }; + }; }; }; @@ -235,7 +432,8 @@ status = "disabled"; // Add one of these for each child device - cs-gpios = <&gpio0 31 &gpio4 14 &gpio4 16 &gpio6 22 &gpio7 0>; + cs-gpios = <&gpio0 31 0x4 &gpio4 14 0x4 &gpio4 16 0x4 + &gpio6 22 0x4 &gpio7 0 0x4>; }; diff --git a/arch/arm/boot/dts/imx27.dtsi b/arch/arm/boot/dts/imx27.dtsi index 2b1a166d41f9..386c769c38d1 100644 --- a/arch/arm/boot/dts/imx27.dtsi +++ b/arch/arm/boot/dts/imx27.dtsi @@ -213,5 +213,14 @@ status = "disabled"; }; }; + nand@d8000000 { + #address-cells = <1>; + #size-cells = <1>; + + compatible = "fsl,imx27-nand"; + reg = <0xd8000000 0x1000>; + interrupts = <29>; + status = "disabled"; + }; }; }; diff --git a/arch/arm/boot/dts/snowball.dts b/arch/arm/boot/dts/snowball.dts index d99dc04f0d91..ec3c33975110 100644 --- a/arch/arm/boot/dts/snowball.dts +++ b/arch/arm/boot/dts/snowball.dts @@ -20,6 +20,16 @@ reg = <0x00000000 0x20000000>; }; + en_3v3_reg: en_3v3 { + compatible = "regulator-fixed"; + regulator-name = "en-3v3-fixed-supply"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + gpios = <&gpio0 26 0x4>; // 26 + startup-delay-us = <5000>; + enable-active-high; + }; + gpio_keys { compatible = "gpio-keys"; #address-cells = <1>; @@ -30,35 +40,35 @@ wakeup = <1>; linux,code = <2>; label = "userpb"; - gpios = <&gpio1 0 0>; + gpios = <&gpio1 0 0x4>; }; button@2 { debounce_interval = <50>; wakeup = <1>; linux,code = <3>; label = "extkb1"; - gpios = <&gpio4 23 0>; + gpios = <&gpio4 23 0x4>; }; button@3 { debounce_interval = <50>; wakeup = <1>; linux,code = <4>; label = "extkb2"; - gpios = <&gpio4 24 0>; + gpios = <&gpio4 24 0x4>; }; button@4 { debounce_interval = <50>; wakeup = <1>; linux,code = <5>; label = "extkb3"; - gpios = <&gpio5 1 0>; + gpios = <&gpio5 1 0x4>; }; button@5 { debounce_interval = <50>; wakeup = <1>; linux,code = <6>; label = "extkb4"; - gpios = <&gpio5 2 0>; + gpios = <&gpio5 2 0x4>; }; }; @@ -66,12 +76,11 @@ compatible = "gpio-leds"; used-led { label = "user_led"; - gpios = <&gpio4 14>; + gpios = <&gpio4 14 0x4>; }; }; soc-u9500 { - external-bus@50000000 { status = "okay"; @@ -80,6 +89,9 @@ reg = <0 0x10000>; interrupts = <12 0x1>; interrupt-parent = <&gpio4>; + vdd33a-supply = <&en_3v3_reg>; + vddvario-supply = <&db8500_vape_reg>; + reg-shift = <1>; reg-io-width = <2>; @@ -91,11 +103,13 @@ sdi@80126000 { status = "enabled"; - cd-gpios = <&gpio6 26>; + vmmc-supply = <&ab8500_ldo_aux3_reg>; + cd-gpios = <&gpio6 26 0x4>; // 218 }; sdi@80114000 { status = "enabled"; + vmmc-supply = <&ab8500_ldo_aux2_reg>; }; uart@80120000 { @@ -114,7 +128,7 @@ tc3589x@42 { //compatible = "tc3589x"; reg = <0x42>; - interrupts = <25>; + gpios = <&gpio6 25 0x4>; interrupt-parent = <&gpio6>; }; tps61052@33 { diff --git a/arch/arm/configs/u8500_defconfig b/arch/arm/configs/u8500_defconfig index 7e84f453e8a6..2d4f661d1cf6 100644 --- a/arch/arm/configs/u8500_defconfig +++ b/arch/arm/configs/u8500_defconfig @@ -75,6 +75,7 @@ CONFIG_AB5500_CORE=y CONFIG_AB8500_CORE=y CONFIG_REGULATOR=y CONFIG_REGULATOR_AB8500=y +CONFIG_REGULATOR_FIXED_VOLTAGE=y # CONFIG_HID_SUPPORT is not set CONFIG_USB_GADGET=y CONFIG_AB8500_USB=y diff --git a/arch/arm/include/asm/posix_types.h b/arch/arm/include/asm/posix_types.h index efdf99045d87..d2de9cbbcd9b 100644 --- a/arch/arm/include/asm/posix_types.h +++ b/arch/arm/include/asm/posix_types.h @@ -22,9 +22,6 @@ typedef unsigned short __kernel_mode_t; #define __kernel_mode_t __kernel_mode_t -typedef unsigned short __kernel_nlink_t; -#define __kernel_nlink_t __kernel_nlink_t - typedef unsigned short __kernel_ipc_pid_t; #define __kernel_ipc_pid_t __kernel_ipc_pid_t diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 63f327dd5198..fd2392a17ac1 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -22,8 +22,6 @@ #include "signal.h" -#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) - /* * For ARM syscalls, we encode the syscall number into the instruction. */ @@ -210,10 +208,8 @@ static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf) int err; err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set)); - if (err == 0) { - sigdelsetmask(&set, ~_BLOCKABLE); + if (err == 0) set_current_blocked(&set); - } __get_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err); __get_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err); @@ -528,13 +524,13 @@ setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info, /* * OK, we're invoking a handler */ -static int +static void handle_signal(unsigned long sig, struct k_sigaction *ka, - siginfo_t *info, sigset_t *oldset, - struct pt_regs * regs) + siginfo_t *info, struct pt_regs *regs) { struct thread_info *thread = current_thread_info(); struct task_struct *tsk = current; + sigset_t *oldset = sigmask_to_save(); int usig = sig; int ret; @@ -559,17 +555,9 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, if (ret != 0) { force_sigsegv(sig, tsk); - return ret; + return; } - - /* - * Block the signal if we were successful. - */ - block_sigmask(ka, sig); - - tracehook_signal_handler(sig, info, ka, regs, 0); - - return 0; + signal_delivered(sig, info, ka, regs, 0); } /* @@ -617,8 +605,6 @@ static void do_signal(struct pt_regs *regs, int syscall) */ signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (signr > 0) { - sigset_t *oldset; - /* * Depending on the signal settings we may need to revert the * decision to restart the system call. But skip this if a @@ -635,20 +621,7 @@ static void do_signal(struct pt_regs *regs, int syscall) clear_thread_flag(TIF_SYSCALL_RESTARTSYS); } - if (test_thread_flag(TIF_RESTORE_SIGMASK)) - oldset = ¤t->saved_sigmask; - else - oldset = ¤t->blocked; - if (handle_signal(signr, &ka, &info, oldset, regs) == 0) { - /* - * A signal was successfully delivered; the saved - * sigmask will have been stored in the signal frame, - * and will be restored by sigreturn, so we can simply - * clear the TIF_RESTORE_SIGMASK flag. - */ - if (test_thread_flag(TIF_RESTORE_SIGMASK)) - clear_thread_flag(TIF_RESTORE_SIGMASK); - } + handle_signal(signr, &ka, &info, regs); return; } @@ -663,11 +636,7 @@ static void do_signal(struct pt_regs *regs, int syscall) set_thread_flag(TIF_SYSCALL_RESTARTSYS); } - /* If there's no signal to deliver, we just put the saved sigmask - * back. - */ - if (test_and_clear_thread_flag(TIF_RESTORE_SIGMASK)) - set_current_blocked(¤t->saved_sigmask); + restore_saved_sigmask(); } asmlinkage void diff --git a/arch/arm/mach-ep93xx/snappercl15.c b/arch/arm/mach-ep93xx/snappercl15.c index eb282378fa78..01abd3516a77 100644 --- a/arch/arm/mach-ep93xx/snappercl15.c +++ b/arch/arm/mach-ep93xx/snappercl15.c @@ -82,8 +82,6 @@ static int snappercl15_nand_dev_ready(struct mtd_info *mtd) return !!(__raw_readw(NAND_CTRL_ADDR(chip)) & SNAPPERCL15_NAND_RDY); } -static const char *snappercl15_nand_part_probes[] = {"cmdlinepart", NULL}; - static struct mtd_partition snappercl15_nand_parts[] = { { .name = "Kernel", @@ -100,10 +98,8 @@ static struct mtd_partition snappercl15_nand_parts[] = { static struct platform_nand_data snappercl15_nand_data = { .chip = { .nr_chips = 1, - .part_probe_types = snappercl15_nand_part_probes, .partitions = snappercl15_nand_parts, .nr_partitions = ARRAY_SIZE(snappercl15_nand_parts), - .options = NAND_NO_AUTOINCR, .chip_delay = 25, }, .ctrl = { diff --git a/arch/arm/mach-ep93xx/ts72xx.c b/arch/arm/mach-ep93xx/ts72xx.c index d4ef339d961e..75cab2d7ec73 100644 --- a/arch/arm/mach-ep93xx/ts72xx.c +++ b/arch/arm/mach-ep93xx/ts72xx.c @@ -105,8 +105,6 @@ static int ts72xx_nand_device_ready(struct mtd_info *mtd) return !!(__raw_readb(addr) & 0x20); } -static const char *ts72xx_nand_part_probes[] = { "cmdlinepart", NULL }; - #define TS72XX_BOOTROM_PART_SIZE (SZ_16K) #define TS72XX_REDBOOT_PART_SIZE (SZ_2M + SZ_1M) @@ -134,7 +132,6 @@ static struct platform_nand_data ts72xx_nand_data = { .nr_chips = 1, .chip_offset = 0, .chip_delay = 15, - .part_probe_types = ts72xx_nand_part_probes, .partitions = ts72xx_nand_parts, .nr_partitions = ARRAY_SIZE(ts72xx_nand_parts), }, diff --git a/arch/arm/mach-exynos/mach-nuri.c b/arch/arm/mach-exynos/mach-nuri.c index 972983e392bc..656f8fc9addd 100644 --- a/arch/arm/mach-exynos/mach-nuri.c +++ b/arch/arm/mach-exynos/mach-nuri.c @@ -237,25 +237,29 @@ static struct exynos_drm_fimd_pdata drm_fimd_pdata = { #else /* Frame Buffer */ static struct s3c_fb_pd_win nuri_fb_win0 = { - .win_mode = { - .left_margin = 64, - .right_margin = 16, - .upper_margin = 64, - .lower_margin = 1, - .hsync_len = 48, - .vsync_len = 3, - .xres = 1024, - .yres = 600, - .refresh = 60, - }, .max_bpp = 24, .default_bpp = 16, + .xres = 1024, + .yres = 600, .virtual_x = 1024, .virtual_y = 2 * 600, }; +static struct fb_videomode nuri_lcd_timing = { + .left_margin = 64, + .right_margin = 16, + .upper_margin = 64, + .lower_margin = 1, + .hsync_len = 48, + .vsync_len = 3, + .xres = 1024, + .yres = 600, + .refresh = 60, +}; + static struct s3c_fb_platdata nuri_fb_pdata __initdata = { .win[0] = &nuri_fb_win0, + .vtiming = &nuri_lcd_timing, .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB | VIDCON0_CLKSEL_LCD, .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC, diff --git a/arch/arm/mach-exynos/mach-origen.c b/arch/arm/mach-exynos/mach-origen.c index a7f7fd567dde..f5572be9d7bf 100644 --- a/arch/arm/mach-exynos/mach-origen.c +++ b/arch/arm/mach-exynos/mach-origen.c @@ -604,24 +604,28 @@ static struct exynos_drm_fimd_pdata drm_fimd_pdata = { }; #else static struct s3c_fb_pd_win origen_fb_win0 = { - .win_mode = { - .left_margin = 64, - .right_margin = 16, - .upper_margin = 64, - .lower_margin = 16, - .hsync_len = 48, - .vsync_len = 3, - .xres = 1024, - .yres = 600, - }, + .xres = 1024, + .yres = 600, .max_bpp = 32, .default_bpp = 24, .virtual_x = 1024, .virtual_y = 2 * 600, }; +static struct fb_videomode origen_lcd_timing = { + .left_margin = 64, + .right_margin = 16, + .upper_margin = 64, + .lower_margin = 16, + .hsync_len = 48, + .vsync_len = 3, + .xres = 1024, + .yres = 600, +}; + static struct s3c_fb_platdata origen_lcd_pdata __initdata = { .win[0] = &origen_fb_win0, + .vtiming = &origen_lcd_timing, .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB, .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC | VIDCON1_INV_VCLK, diff --git a/arch/arm/mach-exynos/mach-smdkv310.c b/arch/arm/mach-exynos/mach-smdkv310.c index 70df1a0c2118..262e9e446a96 100644 --- a/arch/arm/mach-exynos/mach-smdkv310.c +++ b/arch/arm/mach-exynos/mach-smdkv310.c @@ -178,22 +178,26 @@ static struct exynos_drm_fimd_pdata drm_fimd_pdata = { }; #else static struct s3c_fb_pd_win smdkv310_fb_win0 = { - .win_mode = { - .left_margin = 13, - .right_margin = 8, - .upper_margin = 7, - .lower_margin = 5, - .hsync_len = 3, - .vsync_len = 1, - .xres = 800, - .yres = 480, - }, - .max_bpp = 32, - .default_bpp = 24, + .max_bpp = 32, + .default_bpp = 24, + .xres = 800, + .yres = 480, +}; + +static struct fb_videomode smdkv310_lcd_timing = { + .left_margin = 13, + .right_margin = 8, + .upper_margin = 7, + .lower_margin = 5, + .hsync_len = 3, + .vsync_len = 1, + .xres = 800, + .yres = 480, }; static struct s3c_fb_platdata smdkv310_lcd0_pdata __initdata = { .win[0] = &smdkv310_fb_win0, + .vtiming = &smdkv310_lcd_timing, .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB, .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC, .setup_gpio = exynos4_fimd0_gpio_setup_24bpp, diff --git a/arch/arm/mach-exynos/mach-universal_c210.c b/arch/arm/mach-exynos/mach-universal_c210.c index 083b44de9c10..cd92fa86ba41 100644 --- a/arch/arm/mach-exynos/mach-universal_c210.c +++ b/arch/arm/mach-exynos/mach-universal_c210.c @@ -843,25 +843,29 @@ static struct exynos_drm_fimd_pdata drm_fimd_pdata = { #else /* Frame Buffer */ static struct s3c_fb_pd_win universal_fb_win0 = { - .win_mode = { - .left_margin = 16, - .right_margin = 16, - .upper_margin = 2, - .lower_margin = 28, - .hsync_len = 2, - .vsync_len = 1, - .xres = 480, - .yres = 800, - .refresh = 55, - }, .max_bpp = 32, .default_bpp = 16, + .xres = 480, + .yres = 800, .virtual_x = 480, .virtual_y = 2 * 800, }; +static struct fb_videomode universal_lcd_timing = { + .left_margin = 16, + .right_margin = 16, + .upper_margin = 2, + .lower_margin = 28, + .hsync_len = 2, + .vsync_len = 1, + .xres = 480, + .yres = 800, + .refresh = 55, +}; + static struct s3c_fb_platdata universal_lcd_pdata __initdata = { .win[0] = &universal_fb_win0, + .vtiming = &universal_lcd_timing, .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB | VIDCON0_CLKSEL_LCD, .vidcon1 = VIDCON1_INV_VCLK | VIDCON1_INV_VDEN diff --git a/arch/arm/mach-imx/imx27-dt.c b/arch/arm/mach-imx/imx27-dt.c index ed38d03c61f2..eee0cc8d92a4 100644 --- a/arch/arm/mach-imx/imx27-dt.c +++ b/arch/arm/mach-imx/imx27-dt.c @@ -29,6 +29,7 @@ static const struct of_dev_auxdata imx27_auxdata_lookup[] __initconst = { OF_DEV_AUXDATA("fsl,imx27-cspi", MX27_CSPI2_BASE_ADDR, "imx27-cspi.1", NULL), OF_DEV_AUXDATA("fsl,imx27-cspi", MX27_CSPI3_BASE_ADDR, "imx27-cspi.2", NULL), OF_DEV_AUXDATA("fsl,imx27-wdt", MX27_WDOG_BASE_ADDR, "imx2-wdt.0", NULL), + OF_DEV_AUXDATA("fsl,imx27-nand", MX27_NFC_BASE_ADDR, "mxc_nand.0", NULL), { /* sentinel */ } }; diff --git a/arch/arm/mach-ixp4xx/ixdp425-setup.c b/arch/arm/mach-ixp4xx/ixdp425-setup.c index 3d742aee1773..108a9d3f382d 100644 --- a/arch/arm/mach-ixp4xx/ixdp425-setup.c +++ b/arch/arm/mach-ixp4xx/ixdp425-setup.c @@ -60,8 +60,6 @@ static struct platform_device ixdp425_flash = { #if defined(CONFIG_MTD_NAND_PLATFORM) || \ defined(CONFIG_MTD_NAND_PLATFORM_MODULE) -const char *part_probes[] = { "cmdlinepart", NULL }; - static struct mtd_partition ixdp425_partitions[] = { { .name = "ixp400 NAND FS 0", @@ -100,8 +98,6 @@ static struct platform_nand_data ixdp425_flash_nand_data = { .chip = { .nr_chips = 1, .chip_delay = 30, - .options = NAND_NO_AUTOINCR, - .part_probe_types = part_probes, .partitions = ixdp425_partitions, .nr_partitions = ARRAY_SIZE(ixdp425_partitions), }, diff --git a/arch/arm/mach-nomadik/board-nhk8815.c b/arch/arm/mach-nomadik/board-nhk8815.c index 58cacafcf662..2e8d3e176bc7 100644 --- a/arch/arm/mach-nomadik/board-nhk8815.c +++ b/arch/arm/mach-nomadik/board-nhk8815.c @@ -111,7 +111,7 @@ static struct nomadik_nand_platform_data nhk8815_nand_data = { .parts = nhk8815_partitions, .nparts = ARRAY_SIZE(nhk8815_partitions), .options = NAND_COPYBACK | NAND_CACHEPRG | NAND_NO_PADDING \ - | NAND_NO_READRDY | NAND_NO_AUTOINCR, + | NAND_NO_READRDY, .init = nhk8815_nand_init, }; diff --git a/arch/arm/mach-omap1/board-fsample.c b/arch/arm/mach-omap1/board-fsample.c index c7364fdbda05..6872f3fd400f 100644 --- a/arch/arm/mach-omap1/board-fsample.c +++ b/arch/arm/mach-omap1/board-fsample.c @@ -192,14 +192,11 @@ static int nand_dev_ready(struct mtd_info *mtd) return gpio_get_value(FSAMPLE_NAND_RB_GPIO_PIN); } -static const char *part_probes[] = { "cmdlinepart", NULL }; - static struct platform_nand_data nand_data = { .chip = { .nr_chips = 1, .chip_offset = 0, .options = NAND_SAMSUNG_LP_OPTIONS, - .part_probe_types = part_probes, }, .ctrl = { .cmd_ctrl = omap1_nand_cmd_ctl, diff --git a/arch/arm/mach-omap1/board-h2.c b/arch/arm/mach-omap1/board-h2.c index 7e503686f7af..a28e989a63f4 100644 --- a/arch/arm/mach-omap1/board-h2.c +++ b/arch/arm/mach-omap1/board-h2.c @@ -186,8 +186,6 @@ static int h2_nand_dev_ready(struct mtd_info *mtd) return gpio_get_value(H2_NAND_RB_GPIO_PIN); } -static const char *h2_part_probes[] = { "cmdlinepart", NULL }; - static struct platform_nand_data h2_nand_platdata = { .chip = { .nr_chips = 1, @@ -195,7 +193,6 @@ static struct platform_nand_data h2_nand_platdata = { .nr_partitions = ARRAY_SIZE(h2_nand_partitions), .partitions = h2_nand_partitions, .options = NAND_SAMSUNG_LP_OPTIONS, - .part_probe_types = h2_part_probes, }, .ctrl = { .cmd_ctrl = omap1_nand_cmd_ctl, diff --git a/arch/arm/mach-omap1/board-h3.c b/arch/arm/mach-omap1/board-h3.c index 9fb03f189d93..108a8640fc6f 100644 --- a/arch/arm/mach-omap1/board-h3.c +++ b/arch/arm/mach-omap1/board-h3.c @@ -188,8 +188,6 @@ static int nand_dev_ready(struct mtd_info *mtd) return gpio_get_value(H3_NAND_RB_GPIO_PIN); } -static const char *part_probes[] = { "cmdlinepart", NULL }; - static struct platform_nand_data nand_platdata = { .chip = { .nr_chips = 1, @@ -197,7 +195,6 @@ static struct platform_nand_data nand_platdata = { .nr_partitions = ARRAY_SIZE(nand_partitions), .partitions = nand_partitions, .options = NAND_SAMSUNG_LP_OPTIONS, - .part_probe_types = part_probes, }, .ctrl = { .cmd_ctrl = omap1_nand_cmd_ctl, diff --git a/arch/arm/mach-omap1/board-perseus2.c b/arch/arm/mach-omap1/board-perseus2.c index f2cb24387c22..703d55ecffe2 100644 --- a/arch/arm/mach-omap1/board-perseus2.c +++ b/arch/arm/mach-omap1/board-perseus2.c @@ -150,14 +150,11 @@ static int nand_dev_ready(struct mtd_info *mtd) return gpio_get_value(P2_NAND_RB_GPIO_PIN); } -static const char *part_probes[] = { "cmdlinepart", NULL }; - static struct platform_nand_data nand_data = { .chip = { .nr_chips = 1, .chip_offset = 0, .options = NAND_SAMSUNG_LP_OPTIONS, - .part_probe_types = part_probes, }, .ctrl = { .cmd_ctrl = omap1_nand_cmd_ctl, diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c index db5a88a36c63..54d49ddb9b81 100644 --- a/arch/arm/mach-omap2/display.c +++ b/arch/arm/mach-omap2/display.c @@ -180,16 +180,133 @@ static void omap_dsi_disable_pads(int dsi_id, unsigned lane_mask) omap4_dsi_mux_pads(dsi_id, 0); } +static int omap_dss_set_min_bus_tput(struct device *dev, unsigned long tput) +{ + return omap_pm_set_min_bus_tput(dev, OCP_INITIATOR_AGENT, tput); +} + +static struct platform_device *create_dss_pdev(const char *pdev_name, + int pdev_id, const char *oh_name, void *pdata, int pdata_len, + struct platform_device *parent) +{ + struct platform_device *pdev; + struct omap_device *od; + struct omap_hwmod *ohs[1]; + struct omap_hwmod *oh; + int r; + + oh = omap_hwmod_lookup(oh_name); + if (!oh) { + pr_err("Could not look up %s\n", oh_name); + r = -ENODEV; + goto err; + } + + pdev = platform_device_alloc(pdev_name, pdev_id); + if (!pdev) { + pr_err("Could not create pdev for %s\n", pdev_name); + r = -ENOMEM; + goto err; + } + + if (parent != NULL) + pdev->dev.parent = &parent->dev; + + if (pdev->id != -1) + dev_set_name(&pdev->dev, "%s.%d", pdev->name, pdev->id); + else + dev_set_name(&pdev->dev, "%s", pdev->name); + + ohs[0] = oh; + od = omap_device_alloc(pdev, ohs, 1, NULL, 0); + if (!od) { + pr_err("Could not alloc omap_device for %s\n", pdev_name); + r = -ENOMEM; + goto err; + } + + r = platform_device_add_data(pdev, pdata, pdata_len); + if (r) { + pr_err("Could not set pdata for %s\n", pdev_name); + goto err; + } + + r = omap_device_register(pdev); + if (r) { + pr_err("Could not register omap_device for %s\n", pdev_name); + goto err; + } + + return pdev; + +err: + return ERR_PTR(r); +} + +static struct platform_device *create_simple_dss_pdev(const char *pdev_name, + int pdev_id, void *pdata, int pdata_len, + struct platform_device *parent) +{ + struct platform_device *pdev; + int r; + + pdev = platform_device_alloc(pdev_name, pdev_id); + if (!pdev) { + pr_err("Could not create pdev for %s\n", pdev_name); + r = -ENOMEM; + goto err; + } + + if (parent != NULL) + pdev->dev.parent = &parent->dev; + + if (pdev->id != -1) + dev_set_name(&pdev->dev, "%s.%d", pdev->name, pdev->id); + else + dev_set_name(&pdev->dev, "%s", pdev->name); + + r = platform_device_add_data(pdev, pdata, pdata_len); + if (r) { + pr_err("Could not set pdata for %s\n", pdev_name); + goto err; + } + + r = omap_device_register(pdev); + if (r) { + pr_err("Could not register omap_device for %s\n", pdev_name); + goto err; + } + + return pdev; + +err: + return ERR_PTR(r); +} + int __init omap_display_init(struct omap_dss_board_info *board_data) { int r = 0; - struct omap_hwmod *oh; struct platform_device *pdev; int i, oh_count; - struct omap_display_platform_data pdata; const struct omap_dss_hwmod_data *curr_dss_hwmod; + struct platform_device *dss_pdev; + + /* create omapdss device */ + + board_data->dsi_enable_pads = omap_dsi_enable_pads; + board_data->dsi_disable_pads = omap_dsi_disable_pads; + board_data->get_context_loss_count = omap_pm_get_dev_context_loss_count; + board_data->set_min_bus_tput = omap_dss_set_min_bus_tput; + + omap_display_device.dev.platform_data = board_data; + + r = platform_device_register(&omap_display_device); + if (r < 0) { + pr_err("Unable to register omapdss device\n"); + return r; + } - memset(&pdata, 0, sizeof(pdata)); + /* create devices for dss hwmods */ if (cpu_is_omap24xx()) { curr_dss_hwmod = omap2_dss_hwmod_data; @@ -202,39 +319,58 @@ int __init omap_display_init(struct omap_dss_board_info *board_data) oh_count = ARRAY_SIZE(omap4_dss_hwmod_data); } - if (board_data->dsi_enable_pads == NULL) - board_data->dsi_enable_pads = omap_dsi_enable_pads; - if (board_data->dsi_disable_pads == NULL) - board_data->dsi_disable_pads = omap_dsi_disable_pads; - - pdata.board_data = board_data; - pdata.board_data->get_context_loss_count = - omap_pm_get_dev_context_loss_count; - - for (i = 0; i < oh_count; i++) { - oh = omap_hwmod_lookup(curr_dss_hwmod[i].oh_name); - if (!oh) { - pr_err("Could not look up %s\n", - curr_dss_hwmod[i].oh_name); - return -ENODEV; + /* + * First create the pdev for dss_core, which is used as a parent device + * by the other dss pdevs. Note: dss_core has to be the first item in + * the hwmod list. + */ + dss_pdev = create_dss_pdev(curr_dss_hwmod[0].dev_name, + curr_dss_hwmod[0].id, + curr_dss_hwmod[0].oh_name, + board_data, sizeof(*board_data), + NULL); + + if (IS_ERR(dss_pdev)) { + pr_err("Could not build omap_device for %s\n", + curr_dss_hwmod[0].oh_name); + + return PTR_ERR(dss_pdev); + } + + for (i = 1; i < oh_count; i++) { + pdev = create_dss_pdev(curr_dss_hwmod[i].dev_name, + curr_dss_hwmod[i].id, + curr_dss_hwmod[i].oh_name, + board_data, sizeof(*board_data), + dss_pdev); + + if (IS_ERR(pdev)) { + pr_err("Could not build omap_device for %s\n", + curr_dss_hwmod[i].oh_name); + + return PTR_ERR(pdev); } + } - pdev = omap_device_build(curr_dss_hwmod[i].dev_name, - curr_dss_hwmod[i].id, oh, &pdata, - sizeof(struct omap_display_platform_data), - NULL, 0, 0); + /* Create devices for DPI and SDI */ - if (WARN((IS_ERR(pdev)), "Could not build omap_device for %s\n", - curr_dss_hwmod[i].oh_name)) - return -ENODEV; + pdev = create_simple_dss_pdev("omapdss_dpi", -1, + board_data, sizeof(*board_data), dss_pdev); + if (IS_ERR(pdev)) { + pr_err("Could not build platform_device for omapdss_dpi\n"); + return PTR_ERR(pdev); } - omap_display_device.dev.platform_data = board_data; - r = platform_device_register(&omap_display_device); - if (r < 0) - printk(KERN_ERR "Unable to register OMAP-Display device\n"); + if (cpu_is_omap34xx()) { + pdev = create_simple_dss_pdev("omapdss_sdi", -1, + board_data, sizeof(*board_data), dss_pdev); + if (IS_ERR(pdev)) { + pr_err("Could not build platform_device for omapdss_sdi\n"); + return PTR_ERR(pdev); + } + } - return r; + return 0; } static void dispc_disable_outputs(void) diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c index 46b09dae770e..2286410671e7 100644 --- a/arch/arm/mach-omap2/gpmc.c +++ b/arch/arm/mach-omap2/gpmc.c @@ -49,6 +49,7 @@ #define GPMC_ECC_CONTROL 0x1f8 #define GPMC_ECC_SIZE_CONFIG 0x1fc #define GPMC_ECC1_RESULT 0x200 +#define GPMC_ECC_BCH_RESULT_0 0x240 /* not available on OMAP2 */ /* GPMC ECC control settings */ #define GPMC_ECC_CTRL_ECCCLEAR 0x100 @@ -935,3 +936,186 @@ int gpmc_calculate_ecc(int cs, const u_char *dat, u_char *ecc_code) return 0; } EXPORT_SYMBOL_GPL(gpmc_calculate_ecc); + +#ifdef CONFIG_ARCH_OMAP3 + +/** + * gpmc_init_hwecc_bch - initialize hardware BCH ecc functionality + * @cs: chip select number + * @nsectors: how many 512-byte sectors to process + * @nerrors: how many errors to correct per sector (4 or 8) + * + * This function must be executed before any call to gpmc_enable_hwecc_bch. + */ +int gpmc_init_hwecc_bch(int cs, int nsectors, int nerrors) +{ + /* check if ecc module is in use */ + if (gpmc_ecc_used != -EINVAL) + return -EINVAL; + + /* support only OMAP3 class */ + if (!cpu_is_omap34xx()) { + printk(KERN_ERR "BCH ecc is not supported on this CPU\n"); + return -EINVAL; + } + + /* + * For now, assume 4-bit mode is only supported on OMAP3630 ES1.x, x>=1. + * Other chips may be added if confirmed to work. + */ + if ((nerrors == 4) && + (!cpu_is_omap3630() || (GET_OMAP_REVISION() == 0))) { + printk(KERN_ERR "BCH 4-bit mode is not supported on this CPU\n"); + return -EINVAL; + } + + /* sanity check */ + if (nsectors > 8) { + printk(KERN_ERR "BCH cannot process %d sectors (max is 8)\n", + nsectors); + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL_GPL(gpmc_init_hwecc_bch); + +/** + * gpmc_enable_hwecc_bch - enable hardware BCH ecc functionality + * @cs: chip select number + * @mode: read/write mode + * @dev_width: device bus width(1 for x16, 0 for x8) + * @nsectors: how many 512-byte sectors to process + * @nerrors: how many errors to correct per sector (4 or 8) + */ +int gpmc_enable_hwecc_bch(int cs, int mode, int dev_width, int nsectors, + int nerrors) +{ + unsigned int val; + + /* check if ecc module is in use */ + if (gpmc_ecc_used != -EINVAL) + return -EINVAL; + + gpmc_ecc_used = cs; + + /* clear ecc and enable bits */ + gpmc_write_reg(GPMC_ECC_CONTROL, 0x1); + + /* + * When using BCH, sector size is hardcoded to 512 bytes. + * Here we are using wrapping mode 6 both for reading and writing, with: + * size0 = 0 (no additional protected byte in spare area) + * size1 = 32 (skip 32 nibbles = 16 bytes per sector in spare area) + */ + gpmc_write_reg(GPMC_ECC_SIZE_CONFIG, (32 << 22) | (0 << 12)); + + /* BCH configuration */ + val = ((1 << 16) | /* enable BCH */ + (((nerrors == 8) ? 1 : 0) << 12) | /* 8 or 4 bits */ + (0x06 << 8) | /* wrap mode = 6 */ + (dev_width << 7) | /* bus width */ + (((nsectors-1) & 0x7) << 4) | /* number of sectors */ + (cs << 1) | /* ECC CS */ + (0x1)); /* enable ECC */ + + gpmc_write_reg(GPMC_ECC_CONFIG, val); + gpmc_write_reg(GPMC_ECC_CONTROL, 0x101); + return 0; +} +EXPORT_SYMBOL_GPL(gpmc_enable_hwecc_bch); + +/** + * gpmc_calculate_ecc_bch4 - Generate 7 ecc bytes per sector of 512 data bytes + * @cs: chip select number + * @dat: The pointer to data on which ecc is computed + * @ecc: The ecc output buffer + */ +int gpmc_calculate_ecc_bch4(int cs, const u_char *dat, u_char *ecc) +{ + int i; + unsigned long nsectors, reg, val1, val2; + + if (gpmc_ecc_used != cs) + return -EINVAL; + + nsectors = ((gpmc_read_reg(GPMC_ECC_CONFIG) >> 4) & 0x7) + 1; + + for (i = 0; i < nsectors; i++) { + + reg = GPMC_ECC_BCH_RESULT_0 + 16*i; + + /* Read hw-computed remainder */ + val1 = gpmc_read_reg(reg + 0); + val2 = gpmc_read_reg(reg + 4); + + /* + * Add constant polynomial to remainder, in order to get an ecc + * sequence of 0xFFs for a buffer filled with 0xFFs; and + * left-justify the resulting polynomial. + */ + *ecc++ = 0x28 ^ ((val2 >> 12) & 0xFF); + *ecc++ = 0x13 ^ ((val2 >> 4) & 0xFF); + *ecc++ = 0xcc ^ (((val2 & 0xF) << 4)|((val1 >> 28) & 0xF)); + *ecc++ = 0x39 ^ ((val1 >> 20) & 0xFF); + *ecc++ = 0x96 ^ ((val1 >> 12) & 0xFF); + *ecc++ = 0xac ^ ((val1 >> 4) & 0xFF); + *ecc++ = 0x7f ^ ((val1 & 0xF) << 4); + } + + gpmc_ecc_used = -EINVAL; + return 0; +} +EXPORT_SYMBOL_GPL(gpmc_calculate_ecc_bch4); + +/** + * gpmc_calculate_ecc_bch8 - Generate 13 ecc bytes per block of 512 data bytes + * @cs: chip select number + * @dat: The pointer to data on which ecc is computed + * @ecc: The ecc output buffer + */ +int gpmc_calculate_ecc_bch8(int cs, const u_char *dat, u_char *ecc) +{ + int i; + unsigned long nsectors, reg, val1, val2, val3, val4; + + if (gpmc_ecc_used != cs) + return -EINVAL; + + nsectors = ((gpmc_read_reg(GPMC_ECC_CONFIG) >> 4) & 0x7) + 1; + + for (i = 0; i < nsectors; i++) { + + reg = GPMC_ECC_BCH_RESULT_0 + 16*i; + + /* Read hw-computed remainder */ + val1 = gpmc_read_reg(reg + 0); + val2 = gpmc_read_reg(reg + 4); + val3 = gpmc_read_reg(reg + 8); + val4 = gpmc_read_reg(reg + 12); + + /* + * Add constant polynomial to remainder, in order to get an ecc + * sequence of 0xFFs for a buffer filled with 0xFFs. + */ + *ecc++ = 0xef ^ (val4 & 0xFF); + *ecc++ = 0x51 ^ ((val3 >> 24) & 0xFF); + *ecc++ = 0x2e ^ ((val3 >> 16) & 0xFF); + *ecc++ = 0x09 ^ ((val3 >> 8) & 0xFF); + *ecc++ = 0xed ^ (val3 & 0xFF); + *ecc++ = 0x93 ^ ((val2 >> 24) & 0xFF); + *ecc++ = 0x9a ^ ((val2 >> 16) & 0xFF); + *ecc++ = 0xc2 ^ ((val2 >> 8) & 0xFF); + *ecc++ = 0x97 ^ (val2 & 0xFF); + *ecc++ = 0x79 ^ ((val1 >> 24) & 0xFF); + *ecc++ = 0xe5 ^ ((val1 >> 16) & 0xFF); + *ecc++ = 0x24 ^ ((val1 >> 8) & 0xFF); + *ecc++ = 0xb5 ^ (val1 & 0xFF); + } + + gpmc_ecc_used = -EINVAL; + return 0; +} +EXPORT_SYMBOL_GPL(gpmc_calculate_ecc_bch8); + +#endif /* CONFIG_ARCH_OMAP3 */ diff --git a/arch/arm/mach-orion5x/ts78xx-setup.c b/arch/arm/mach-orion5x/ts78xx-setup.c index a74f3cf54cc5..b4203277f3cd 100644 --- a/arch/arm/mach-orion5x/ts78xx-setup.c +++ b/arch/arm/mach-orion5x/ts78xx-setup.c @@ -251,8 +251,6 @@ static void ts78xx_ts_nand_read_buf(struct mtd_info *mtd, readsb(io_base, buf, len); } -const char *ts_nand_part_probes[] = { "cmdlinepart", NULL }; - static struct mtd_partition ts78xx_ts_nand_parts[] = { { .name = "mbr", @@ -277,7 +275,6 @@ static struct mtd_partition ts78xx_ts_nand_parts[] = { static struct platform_nand_data ts78xx_ts_nand_data = { .chip = { .nr_chips = 1, - .part_probe_types = ts_nand_part_probes, .partitions = ts78xx_ts_nand_parts, .nr_partitions = ARRAY_SIZE(ts78xx_ts_nand_parts), .chip_delay = 15, diff --git a/arch/arm/mach-pxa/balloon3.c b/arch/arm/mach-pxa/balloon3.c index 56e8cebeb7d5..9244493dbcb7 100644 --- a/arch/arm/mach-pxa/balloon3.c +++ b/arch/arm/mach-pxa/balloon3.c @@ -679,8 +679,6 @@ static struct mtd_partition balloon3_partition_info[] = { }, }; -static const char *balloon3_part_probes[] = { "cmdlinepart", NULL }; - struct platform_nand_data balloon3_nand_pdata = { .chip = { .nr_chips = 4, @@ -688,7 +686,6 @@ struct platform_nand_data balloon3_nand_pdata = { .nr_partitions = ARRAY_SIZE(balloon3_partition_info), .partitions = balloon3_partition_info, .chip_delay = 50, - .part_probe_types = balloon3_part_probes, }, .ctrl = { .hwcontrol = 0, diff --git a/arch/arm/mach-pxa/em-x270.c b/arch/arm/mach-pxa/em-x270.c index a3a4a38d4972..97f82ad341bf 100644 --- a/arch/arm/mach-pxa/em-x270.c +++ b/arch/arm/mach-pxa/em-x270.c @@ -338,8 +338,6 @@ static struct mtd_partition em_x270_partition_info[] = { }, }; -static const char *em_x270_part_probes[] = { "cmdlinepart", NULL }; - struct platform_nand_data em_x270_nand_platdata = { .chip = { .nr_chips = 1, @@ -347,7 +345,6 @@ struct platform_nand_data em_x270_nand_platdata = { .nr_partitions = ARRAY_SIZE(em_x270_partition_info), .partitions = em_x270_partition_info, .chip_delay = 20, - .part_probe_types = em_x270_part_probes, }, .ctrl = { .hwcontrol = 0, diff --git a/arch/arm/mach-pxa/palmtx.c b/arch/arm/mach-pxa/palmtx.c index 9507605ed547..0da35dccfd89 100644 --- a/arch/arm/mach-pxa/palmtx.c +++ b/arch/arm/mach-pxa/palmtx.c @@ -268,8 +268,6 @@ static struct mtd_partition palmtx_partition_info[] = { }, }; -static const char *palmtx_part_probes[] = { "cmdlinepart", NULL }; - struct platform_nand_data palmtx_nand_platdata = { .chip = { .nr_chips = 1, @@ -277,7 +275,6 @@ struct platform_nand_data palmtx_nand_platdata = { .nr_partitions = ARRAY_SIZE(palmtx_partition_info), .partitions = palmtx_partition_info, .chip_delay = 20, - .part_probe_types = palmtx_part_probes, }, .ctrl = { .cmd_ctrl = palmtx_nand_cmd_ctl, diff --git a/arch/arm/mach-s3c24xx/mach-smdk2416.c b/arch/arm/mach-s3c24xx/mach-smdk2416.c index 30a44f806e01..c3100a044fbe 100644 --- a/arch/arm/mach-s3c24xx/mach-smdk2416.c +++ b/arch/arm/mach-s3c24xx/mach-smdk2416.c @@ -148,23 +148,25 @@ static struct s3c24xx_hsudc_platdata smdk2416_hsudc_platdata = { static struct s3c_fb_pd_win smdk2416_fb_win[] = { [0] = { - /* think this is the same as the smdk6410 */ - .win_mode = { - .pixclock = 41094, - .left_margin = 8, - .right_margin = 13, - .upper_margin = 7, - .lower_margin = 5, - .hsync_len = 3, - .vsync_len = 1, - .xres = 800, - .yres = 480, - }, .default_bpp = 16, .max_bpp = 32, + .xres = 800, + .yres = 480, }, }; +static struct fb_videomode smdk2416_lcd_timing = { + .pixclock = 41094, + .left_margin = 8, + .right_margin = 13, + .upper_margin = 7, + .lower_margin = 5, + .hsync_len = 3, + .vsync_len = 1, + .xres = 800, + .yres = 480, +}; + static void s3c2416_fb_gpio_setup_24bpp(void) { unsigned int gpio; @@ -187,6 +189,7 @@ static void s3c2416_fb_gpio_setup_24bpp(void) static struct s3c_fb_platdata smdk2416_fb_platdata = { .win[0] = &smdk2416_fb_win[0], + .vtiming = &smdk2416_lcd_timing, .setup_gpio = s3c2416_fb_gpio_setup_24bpp, .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB, .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC, diff --git a/arch/arm/mach-s3c64xx/mach-anw6410.c b/arch/arm/mach-s3c64xx/mach-anw6410.c index 314df0518afd..ffa29ddfdfce 100644 --- a/arch/arm/mach-s3c64xx/mach-anw6410.c +++ b/arch/arm/mach-s3c64xx/mach-anw6410.c @@ -134,24 +134,27 @@ static struct platform_device anw6410_lcd_powerdev = { }; static struct s3c_fb_pd_win anw6410_fb_win0 = { - /* this is to ensure we use win0 */ - .win_mode = { - .left_margin = 8, - .right_margin = 13, - .upper_margin = 7, - .lower_margin = 5, - .hsync_len = 3, - .vsync_len = 1, - .xres = 800, - .yres = 480, - }, .max_bpp = 32, .default_bpp = 16, + .xres = 800, + .yres = 480, +}; + +static struct fb_videomode anw6410_lcd_timing = { + .left_margin = 8, + .right_margin = 13, + .upper_margin = 7, + .lower_margin = 5, + .hsync_len = 3, + .vsync_len = 1, + .xres = 800, + .yres = 480, }; /* 405566 clocks per frame => 60Hz refresh requires 24333960Hz clock */ static struct s3c_fb_platdata anw6410_lcd_pdata __initdata = { .setup_gpio = s3c64xx_fb_gpio_setup_24bpp, + .vtiming = &anw6410_lcd_timing, .win[0] = &anw6410_fb_win0, .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB, .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC, diff --git a/arch/arm/mach-s3c64xx/mach-crag6410.c b/arch/arm/mach-s3c64xx/mach-crag6410.c index 6b20a71d7dbf..d0c352d861f8 100644 --- a/arch/arm/mach-s3c64xx/mach-crag6410.c +++ b/arch/arm/mach-s3c64xx/mach-crag6410.c @@ -151,26 +151,29 @@ static struct platform_device crag6410_lcd_powerdev = { /* 640x480 URT */ static struct s3c_fb_pd_win crag6410_fb_win0 = { - /* this is to ensure we use win0 */ - .win_mode = { - .left_margin = 150, - .right_margin = 80, - .upper_margin = 40, - .lower_margin = 5, - .hsync_len = 40, - .vsync_len = 5, - .xres = 640, - .yres = 480, - }, .max_bpp = 32, .default_bpp = 16, + .xres = 640, + .yres = 480, .virtual_y = 480 * 2, .virtual_x = 640, }; +static struct fb_videomode crag6410_lcd_timing = { + .left_margin = 150, + .right_margin = 80, + .upper_margin = 40, + .lower_margin = 5, + .hsync_len = 40, + .vsync_len = 5, + .xres = 640, + .yres = 480, +}; + /* 405566 clocks per frame => 60Hz refresh requires 24333960Hz clock */ static struct s3c_fb_platdata crag6410_lcd_pdata __initdata = { .setup_gpio = s3c64xx_fb_gpio_setup_24bpp, + .vtiming = &crag6410_lcd_timing, .win[0] = &crag6410_fb_win0, .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB, .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC, diff --git a/arch/arm/mach-s3c64xx/mach-hmt.c b/arch/arm/mach-s3c64xx/mach-hmt.c index 1bf6b9da20fc..689088162f77 100644 --- a/arch/arm/mach-s3c64xx/mach-hmt.c +++ b/arch/arm/mach-s3c64xx/mach-hmt.c @@ -129,23 +129,27 @@ static struct platform_device hmt_backlight_device = { }; static struct s3c_fb_pd_win hmt_fb_win0 = { - .win_mode = { - .left_margin = 8, - .right_margin = 13, - .upper_margin = 7, - .lower_margin = 5, - .hsync_len = 3, - .vsync_len = 1, - .xres = 800, - .yres = 480, - }, .max_bpp = 32, .default_bpp = 16, + .xres = 800, + .yres = 480, +}; + +static struct fb_videomode hmt_lcd_timing = { + .left_margin = 8, + .right_margin = 13, + .upper_margin = 7, + .lower_margin = 5, + .hsync_len = 3, + .vsync_len = 1, + .xres = 800, + .yres = 480, }; /* 405566 clocks per frame => 60Hz refresh requires 24333960Hz clock */ static struct s3c_fb_platdata hmt_lcd_pdata __initdata = { .setup_gpio = s3c64xx_fb_gpio_setup_24bpp, + .vtiming = &hmt_lcd_timing, .win[0] = &hmt_fb_win0, .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB, .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC, diff --git a/arch/arm/mach-s3c64xx/mach-mini6410.c b/arch/arm/mach-s3c64xx/mach-mini6410.c index f8ea61ea3b33..5539a255a704 100644 --- a/arch/arm/mach-s3c64xx/mach-mini6410.c +++ b/arch/arm/mach-s3c64xx/mach-mini6410.c @@ -140,41 +140,59 @@ static struct s3c2410_platform_nand mini6410_nand_info = { .sets = mini6410_nand_sets, }; -static struct s3c_fb_pd_win mini6410_fb_win[] = { +static struct s3c_fb_pd_win mini6410_lcd_type0_fb_win = { + .max_bpp = 32, + .default_bpp = 16, + .xres = 480, + .yres = 272, +}; + +static struct fb_videomode mini6410_lcd_type0_timing = { + /* 4.3" 480x272 */ + .left_margin = 3, + .right_margin = 2, + .upper_margin = 1, + .lower_margin = 1, + .hsync_len = 40, + .vsync_len = 1, + .xres = 480, + .yres = 272, +}; + +static struct s3c_fb_pd_win mini6410_lcd_type1_fb_win = { + .max_bpp = 32, + .default_bpp = 16, + .xres = 800, + .yres = 480, +}; + +static struct fb_videomode mini6410_lcd_type1_timing = { + /* 7.0" 800x480 */ + .left_margin = 8, + .right_margin = 13, + .upper_margin = 7, + .lower_margin = 5, + .hsync_len = 3, + .vsync_len = 1, + .xres = 800, + .yres = 480, +}; + +static struct s3c_fb_platdata mini6410_lcd_pdata[] __initdata = { { - .win_mode = { /* 4.3" 480x272 */ - .left_margin = 3, - .right_margin = 2, - .upper_margin = 1, - .lower_margin = 1, - .hsync_len = 40, - .vsync_len = 1, - .xres = 480, - .yres = 272, - }, - .max_bpp = 32, - .default_bpp = 16, + .setup_gpio = s3c64xx_fb_gpio_setup_24bpp, + .vtiming = &mini6410_lcd_type0_timing, + .win[0] = &mini6410_lcd_type0_fb_win, + .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB, + .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC, }, { - .win_mode = { /* 7.0" 800x480 */ - .left_margin = 8, - .right_margin = 13, - .upper_margin = 7, - .lower_margin = 5, - .hsync_len = 3, - .vsync_len = 1, - .xres = 800, - .yres = 480, - }, - .max_bpp = 32, - .default_bpp = 16, + .setup_gpio = s3c64xx_fb_gpio_setup_24bpp, + .vtiming = &mini6410_lcd_type1_timing, + .win[0] = &mini6410_lcd_type1_fb_win, + .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB, + .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC, }, -}; - -static struct s3c_fb_platdata mini6410_lcd_pdata __initdata = { - .setup_gpio = s3c64xx_fb_gpio_setup_24bpp, - .win[0] = &mini6410_fb_win[0], - .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB, - .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC, + { }, }; static void mini6410_lcd_power_set(struct plat_lcd_data *pd, @@ -272,7 +290,7 @@ static void mini6410_parse_features( "screen type already set\n", f); } else { int li = f - '0'; - if (li >= ARRAY_SIZE(mini6410_fb_win)) + if (li >= ARRAY_SIZE(mini6410_lcd_pdata)) printk(KERN_INFO "MINI6410: '%c' out " "of range LCD mode\n", f); else { @@ -296,14 +314,12 @@ static void __init mini6410_machine_init(void) /* Parse the feature string */ mini6410_parse_features(&features, mini6410_features_str); - mini6410_lcd_pdata.win[0] = &mini6410_fb_win[features.lcd_index]; - printk(KERN_INFO "MINI6410: selected LCD display is %dx%d\n", - mini6410_lcd_pdata.win[0]->win_mode.xres, - mini6410_lcd_pdata.win[0]->win_mode.yres); + mini6410_lcd_pdata[features.lcd_index].win[0]->xres, + mini6410_lcd_pdata[features.lcd_index].win[0]->yres); s3c_nand_set_platdata(&mini6410_nand_info); - s3c_fb_set_platdata(&mini6410_lcd_pdata); + s3c_fb_set_platdata(&mini6410_lcd_pdata[features.lcd_index]); s3c24xx_ts_set_platdata(NULL); /* configure nCS1 width to 16 bits */ diff --git a/arch/arm/mach-s3c64xx/mach-real6410.c b/arch/arm/mach-s3c64xx/mach-real6410.c index b92d8e17d502..326b21604bc3 100644 --- a/arch/arm/mach-s3c64xx/mach-real6410.c +++ b/arch/arm/mach-s3c64xx/mach-real6410.c @@ -106,41 +106,57 @@ static struct platform_device real6410_device_eth = { }, }; -static struct s3c_fb_pd_win real6410_fb_win[] = { +static struct s3c_fb_pd_win real6410_lcd_type0_fb_win = { + .max_bpp = 32, + .default_bpp = 16, + .xres = 480, + .yres = 272, +}; + +static struct fb_videomode real6410_lcd_type0_timing = { + /* 4.3" 480x272 */ + .left_margin = 3, + .right_margin = 2, + .upper_margin = 1, + .lower_margin = 1, + .hsync_len = 40, + .vsync_len = 1, +}; + +static struct s3c_fb_pd_win real6410_lcd_type1_fb_win = { + .max_bpp = 32, + .default_bpp = 16, + .xres = 800, + .yres = 480, +}; + +static struct fb_videomode real6410_lcd_type1_timing = { + /* 7.0" 800x480 */ + .left_margin = 8, + .right_margin = 13, + .upper_margin = 7, + .lower_margin = 5, + .hsync_len = 3, + .vsync_len = 1, + .xres = 800, + .yres = 480, +}; + +static struct s3c_fb_platdata real6410_lcd_pdata[] __initdata = { { - .win_mode = { /* 4.3" 480x272 */ - .left_margin = 3, - .right_margin = 2, - .upper_margin = 1, - .lower_margin = 1, - .hsync_len = 40, - .vsync_len = 1, - .xres = 480, - .yres = 272, - }, - .max_bpp = 32, - .default_bpp = 16, + .setup_gpio = s3c64xx_fb_gpio_setup_24bpp, + .vtiming = &real6410_lcd_type0_timing, + .win[0] = &real6410_lcd_type0_fb_win, + .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB, + .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC, }, { - .win_mode = { /* 7.0" 800x480 */ - .left_margin = 8, - .right_margin = 13, - .upper_margin = 7, - .lower_margin = 5, - .hsync_len = 3, - .vsync_len = 1, - .xres = 800, - .yres = 480, - }, - .max_bpp = 32, - .default_bpp = 16, + .setup_gpio = s3c64xx_fb_gpio_setup_24bpp, + .vtiming = &real6410_lcd_type1_timing, + .win[0] = &real6410_lcd_type1_fb_win, + .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB, + .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC, }, -}; - -static struct s3c_fb_platdata real6410_lcd_pdata __initdata = { - .setup_gpio = s3c64xx_fb_gpio_setup_24bpp, - .win[0] = &real6410_fb_win[0], - .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB, - .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC, + { }, }; static struct mtd_partition real6410_nand_part[] = { @@ -253,7 +269,7 @@ static void real6410_parse_features( "screen type already set\n", f); } else { int li = f - '0'; - if (li >= ARRAY_SIZE(real6410_fb_win)) + if (li >= ARRAY_SIZE(real6410_lcd_pdata)) printk(KERN_INFO "REAL6410: '%c' out " "of range LCD mode\n", f); else { @@ -277,13 +293,11 @@ static void __init real6410_machine_init(void) /* Parse the feature string */ real6410_parse_features(&features, real6410_features_str); - real6410_lcd_pdata.win[0] = &real6410_fb_win[features.lcd_index]; - printk(KERN_INFO "REAL6410: selected LCD display is %dx%d\n", - real6410_lcd_pdata.win[0]->win_mode.xres, - real6410_lcd_pdata.win[0]->win_mode.yres); + real6410_lcd_pdata[features.lcd_index].win[0]->xres, + real6410_lcd_pdata[features.lcd_index].win[0]->yres); - s3c_fb_set_platdata(&real6410_lcd_pdata); + s3c_fb_set_platdata(&real6410_lcd_pdata[features.lcd_index]); s3c_nand_set_platdata(&real6410_nand_info); s3c24xx_ts_set_platdata(NULL); diff --git a/arch/arm/mach-s3c64xx/mach-smartq5.c b/arch/arm/mach-s3c64xx/mach-smartq5.c index c5021d0335c6..d6266d8b43c9 100644 --- a/arch/arm/mach-s3c64xx/mach-smartq5.c +++ b/arch/arm/mach-s3c64xx/mach-smartq5.c @@ -108,23 +108,27 @@ static struct platform_device smartq5_buttons_device = { }; static struct s3c_fb_pd_win smartq5_fb_win0 = { - .win_mode = { - .left_margin = 216, - .right_margin = 40, - .upper_margin = 35, - .lower_margin = 10, - .hsync_len = 1, - .vsync_len = 1, - .xres = 800, - .yres = 480, - .refresh = 80, - }, .max_bpp = 32, .default_bpp = 16, + .xres = 800, + .yres = 480, +}; + +static struct fb_videomode smartq5_lcd_timing = { + .left_margin = 216, + .right_margin = 40, + .upper_margin = 35, + .lower_margin = 10, + .hsync_len = 1, + .vsync_len = 1, + .xres = 800, + .yres = 480, + .refresh = 80, }; static struct s3c_fb_platdata smartq5_lcd_pdata __initdata = { .setup_gpio = s3c64xx_fb_gpio_setup_24bpp, + .vtiming = &smartq5_lcd_timing, .win[0] = &smartq5_fb_win0, .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB, .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC | diff --git a/arch/arm/mach-s3c64xx/mach-smartq7.c b/arch/arm/mach-s3c64xx/mach-smartq7.c index aa9072a4cbef..0957d2a980e1 100644 --- a/arch/arm/mach-s3c64xx/mach-smartq7.c +++ b/arch/arm/mach-s3c64xx/mach-smartq7.c @@ -124,23 +124,27 @@ static struct platform_device smartq7_buttons_device = { }; static struct s3c_fb_pd_win smartq7_fb_win0 = { - .win_mode = { - .left_margin = 3, - .right_margin = 5, - .upper_margin = 1, - .lower_margin = 20, - .hsync_len = 10, - .vsync_len = 3, - .xres = 800, - .yres = 480, - .refresh = 80, - }, .max_bpp = 32, .default_bpp = 16, + .xres = 800, + .yres = 480, +}; + +static struct fb_videomode smartq7_lcd_timing = { + .left_margin = 3, + .right_margin = 5, + .upper_margin = 1, + .lower_margin = 20, + .hsync_len = 10, + .vsync_len = 3, + .xres = 800, + .yres = 480, + .refresh = 80, }; static struct s3c_fb_platdata smartq7_lcd_pdata __initdata = { .setup_gpio = s3c64xx_fb_gpio_setup_24bpp, + .vtiming = &smartq7_lcd_timing, .win[0] = &smartq7_fb_win0, .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB, .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC | diff --git a/arch/arm/mach-s3c64xx/mach-smdk6410.c b/arch/arm/mach-s3c64xx/mach-smdk6410.c index d44319b09412..df3103d450e2 100644 --- a/arch/arm/mach-s3c64xx/mach-smdk6410.c +++ b/arch/arm/mach-s3c64xx/mach-smdk6410.c @@ -146,26 +146,29 @@ static struct platform_device smdk6410_lcd_powerdev = { }; static struct s3c_fb_pd_win smdk6410_fb_win0 = { - /* this is to ensure we use win0 */ - .win_mode = { - .left_margin = 8, - .right_margin = 13, - .upper_margin = 7, - .lower_margin = 5, - .hsync_len = 3, - .vsync_len = 1, - .xres = 800, - .yres = 480, - }, .max_bpp = 32, .default_bpp = 16, + .xres = 800, + .yres = 480, .virtual_y = 480 * 2, .virtual_x = 800, }; +static struct fb_videomode smdk6410_lcd_timing = { + .left_margin = 8, + .right_margin = 13, + .upper_margin = 7, + .lower_margin = 5, + .hsync_len = 3, + .vsync_len = 1, + .xres = 800, + .yres = 480, +}; + /* 405566 clocks per frame => 60Hz refresh requires 24333960Hz clock */ static struct s3c_fb_platdata smdk6410_lcd_pdata __initdata = { .setup_gpio = s3c64xx_fb_gpio_setup_24bpp, + .vtiming = &smdk6410_lcd_timing, .win[0] = &smdk6410_fb_win0, .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB, .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC, diff --git a/arch/arm/mach-s5p64x0/mach-smdk6440.c b/arch/arm/mach-s5p64x0/mach-smdk6440.c index a40e325d62c8..92fefad505cc 100644 --- a/arch/arm/mach-s5p64x0/mach-smdk6440.c +++ b/arch/arm/mach-s5p64x0/mach-smdk6440.c @@ -103,22 +103,26 @@ static struct s3c2410_uartcfg smdk6440_uartcfgs[] __initdata = { /* Frame Buffer */ static struct s3c_fb_pd_win smdk6440_fb_win0 = { - .win_mode = { - .left_margin = 8, - .right_margin = 13, - .upper_margin = 7, - .lower_margin = 5, - .hsync_len = 3, - .vsync_len = 1, - .xres = 800, - .yres = 480, - }, .max_bpp = 32, .default_bpp = 24, + .xres = 800, + .yres = 480, +}; + +static struct fb_videomode smdk6440_lcd_timing = { + .left_margin = 8, + .right_margin = 13, + .upper_margin = 7, + .lower_margin = 5, + .hsync_len = 3, + .vsync_len = 1, + .xres = 800, + .yres = 480, }; static struct s3c_fb_platdata smdk6440_lcd_pdata __initdata = { .win[0] = &smdk6440_fb_win0, + .vtiming = &smdk6440_lcd_timing, .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB, .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC, .setup_gpio = s5p64x0_fb_gpio_setup_24bpp, diff --git a/arch/arm/mach-s5p64x0/mach-smdk6450.c b/arch/arm/mach-s5p64x0/mach-smdk6450.c index efb69e2f2afe..e2335ecf6eae 100644 --- a/arch/arm/mach-s5p64x0/mach-smdk6450.c +++ b/arch/arm/mach-s5p64x0/mach-smdk6450.c @@ -121,22 +121,26 @@ static struct s3c2410_uartcfg smdk6450_uartcfgs[] __initdata = { /* Frame Buffer */ static struct s3c_fb_pd_win smdk6450_fb_win0 = { - .win_mode = { - .left_margin = 8, - .right_margin = 13, - .upper_margin = 7, - .lower_margin = 5, - .hsync_len = 3, - .vsync_len = 1, - .xres = 800, - .yres = 480, - }, .max_bpp = 32, .default_bpp = 24, + .xres = 800, + .yres = 480, +}; + +static struct fb_videomode smdk6450_lcd_timing = { + .left_margin = 8, + .right_margin = 13, + .upper_margin = 7, + .lower_margin = 5, + .hsync_len = 3, + .vsync_len = 1, + .xres = 800, + .yres = 480, }; static struct s3c_fb_platdata smdk6450_lcd_pdata __initdata = { .win[0] = &smdk6450_fb_win0, + .vtiming = &smdk6450_lcd_timing, .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB, .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC, .setup_gpio = s5p64x0_fb_gpio_setup_24bpp, diff --git a/arch/arm/mach-s5pc100/mach-smdkc100.c b/arch/arm/mach-s5pc100/mach-smdkc100.c index 674d22992f3c..0c3ae38d27ca 100644 --- a/arch/arm/mach-s5pc100/mach-smdkc100.c +++ b/arch/arm/mach-s5pc100/mach-smdkc100.c @@ -136,24 +136,27 @@ static struct platform_device smdkc100_lcd_powerdev = { /* Frame Buffer */ static struct s3c_fb_pd_win smdkc100_fb_win0 = { - /* this is to ensure we use win0 */ - .win_mode = { - .left_margin = 8, - .right_margin = 13, - .upper_margin = 7, - .lower_margin = 5, - .hsync_len = 3, - .vsync_len = 1, - .xres = 800, - .yres = 480, - .refresh = 80, - }, .max_bpp = 32, .default_bpp = 16, + .xres = 800, + .yres = 480, +}; + +static struct fb_videomode smdkc100_lcd_timing = { + .left_margin = 8, + .right_margin = 13, + .upper_margin = 7, + .lower_margin = 5, + .hsync_len = 3, + .vsync_len = 1, + .xres = 800, + .yres = 480, + .refresh = 80, }; static struct s3c_fb_platdata smdkc100_lcd_pdata __initdata = { .win[0] = &smdkc100_fb_win0, + .vtiming = &smdkc100_lcd_timing, .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB, .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC, .setup_gpio = s5pc100_fb_gpio_setup_24bpp, diff --git a/arch/arm/mach-s5pv210/mach-aquila.c b/arch/arm/mach-s5pv210/mach-aquila.c index 48d018f2332b..af528f9e97f9 100644 --- a/arch/arm/mach-s5pv210/mach-aquila.c +++ b/arch/arm/mach-s5pv210/mach-aquila.c @@ -96,38 +96,34 @@ static struct s3c2410_uartcfg aquila_uartcfgs[] __initdata = { /* Frame Buffer */ static struct s3c_fb_pd_win aquila_fb_win0 = { - .win_mode = { - .left_margin = 16, - .right_margin = 16, - .upper_margin = 3, - .lower_margin = 28, - .hsync_len = 2, - .vsync_len = 2, - .xres = 480, - .yres = 800, - }, .max_bpp = 32, .default_bpp = 16, + .xres = 480, + .yres = 800, }; static struct s3c_fb_pd_win aquila_fb_win1 = { - .win_mode = { - .left_margin = 16, - .right_margin = 16, - .upper_margin = 3, - .lower_margin = 28, - .hsync_len = 2, - .vsync_len = 2, - .xres = 480, - .yres = 800, - }, .max_bpp = 32, .default_bpp = 16, + .xres = 480, + .yres = 800, +}; + +static struct fb_videomode aquila_lcd_timing = { + .left_margin = 16, + .right_margin = 16, + .upper_margin = 3, + .lower_margin = 28, + .hsync_len = 2, + .vsync_len = 2, + .xres = 480, + .yres = 800, }; static struct s3c_fb_platdata aquila_lcd_pdata __initdata = { .win[0] = &aquila_fb_win0, .win[1] = &aquila_fb_win1, + .vtiming = &aquila_lcd_timing, .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB, .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC | VIDCON1_INV_VCLK | VIDCON1_INV_VDEN, diff --git a/arch/arm/mach-s5pv210/mach-goni.c b/arch/arm/mach-s5pv210/mach-goni.c index f20a97c8e411..bf5087c2b7fe 100644 --- a/arch/arm/mach-s5pv210/mach-goni.c +++ b/arch/arm/mach-s5pv210/mach-goni.c @@ -107,25 +107,29 @@ static struct s3c2410_uartcfg goni_uartcfgs[] __initdata = { /* Frame Buffer */ static struct s3c_fb_pd_win goni_fb_win0 = { - .win_mode = { - .left_margin = 16, - .right_margin = 16, - .upper_margin = 2, - .lower_margin = 28, - .hsync_len = 2, - .vsync_len = 1, - .xres = 480, - .yres = 800, - .refresh = 55, - }, .max_bpp = 32, .default_bpp = 16, + .xres = 480, + .yres = 800, .virtual_x = 480, .virtual_y = 2 * 800, }; +static struct fb_videomode goni_lcd_timing = { + .left_margin = 16, + .right_margin = 16, + .upper_margin = 2, + .lower_margin = 28, + .hsync_len = 2, + .vsync_len = 1, + .xres = 480, + .yres = 800, + .refresh = 55, +}; + static struct s3c_fb_platdata goni_lcd_pdata __initdata = { .win[0] = &goni_fb_win0, + .vtiming = &goni_lcd_timing, .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB | VIDCON0_CLKSEL_LCD, .vidcon1 = VIDCON1_INV_VCLK | VIDCON1_INV_VDEN diff --git a/arch/arm/mach-s5pv210/mach-smdkv210.c b/arch/arm/mach-s5pv210/mach-smdkv210.c index fa1b61209fd9..0d7ddec88eb7 100644 --- a/arch/arm/mach-s5pv210/mach-smdkv210.c +++ b/arch/arm/mach-s5pv210/mach-smdkv210.c @@ -178,22 +178,26 @@ static struct platform_device smdkv210_lcd_lte480wv = { }; static struct s3c_fb_pd_win smdkv210_fb_win0 = { - .win_mode = { - .left_margin = 13, - .right_margin = 8, - .upper_margin = 7, - .lower_margin = 5, - .hsync_len = 3, - .vsync_len = 1, - .xres = 800, - .yres = 480, - }, .max_bpp = 32, .default_bpp = 24, + .xres = 800, + .yres = 480, +}; + +static struct fb_videomode smdkv210_lcd_timing = { + .left_margin = 13, + .right_margin = 8, + .upper_margin = 7, + .lower_margin = 5, + .hsync_len = 3, + .vsync_len = 1, + .xres = 800, + .yres = 480, }; static struct s3c_fb_platdata smdkv210_lcd0_pdata __initdata = { .win[0] = &smdkv210_fb_win0, + .vtiming = &smdkv210_lcd_timing, .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB, .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC, .setup_gpio = s5pv210_fb_gpio_setup_24bpp, diff --git a/arch/arm/mach-shmobile/Kconfig b/arch/arm/mach-shmobile/Kconfig index f31383c32f9c..df33909205e2 100644 --- a/arch/arm/mach-shmobile/Kconfig +++ b/arch/arm/mach-shmobile/Kconfig @@ -186,6 +186,12 @@ config SH_TIMER_TMU help This enables build of the TMU timer driver. +config EM_TIMER_STI + bool "STI timer driver" + default y + help + This enables build of the STI timer driver. + endmenu config SH_CLK_CPG diff --git a/arch/arm/mach-ux500/board-mop500-uib.c b/arch/arm/mach-ux500/board-mop500-uib.c index b29a788f498c..1f47d962e3a1 100644 --- a/arch/arm/mach-ux500/board-mop500-uib.c +++ b/arch/arm/mach-ux500/board-mop500-uib.c @@ -96,7 +96,7 @@ static void __init __mop500_uib_init(struct uib *uib, const char *why) /* * Detect the UIB attached based on the presence or absence of i2c devices. */ -static int __init mop500_uib_init(void) +int __init mop500_uib_init(void) { struct uib *uib = mop500_uib; struct i2c_adapter *i2c0; @@ -131,5 +131,3 @@ static int __init mop500_uib_init(void) return 0; } - -module_init(mop500_uib_init); diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c index fba8adea421e..9c74ac545849 100644 --- a/arch/arm/mach-ux500/board-mop500.c +++ b/arch/arm/mach-ux500/board-mop500.c @@ -673,9 +673,15 @@ static void __init u8500_cryp1_hash1_init(struct device *parent) static struct platform_device *snowball_platform_devs[] __initdata = { &snowball_led_dev, &snowball_key_dev, + &snowball_sbnet_dev, &ab8500_device, }; +static struct platform_device *snowball_of_platform_devs[] __initdata = { + &snowball_led_dev, + &snowball_key_dev, +}; + static void __init mop500_init_machine(void) { struct device *parent = NULL; @@ -710,6 +716,8 @@ static void __init mop500_init_machine(void) /* This board has full regulator constraints */ regulator_has_full_constraints(); + + mop500_uib_init(); } static void __init snowball_init_machine(void) @@ -774,6 +782,8 @@ static void __init hrefv60_init_machine(void) /* This board has full regulator constraints */ regulator_has_full_constraints(); + + mop500_uib_init(); } MACHINE_START(U8500, "ST-Ericsson MOP500 platform") @@ -834,6 +844,10 @@ struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = { static const struct of_device_id u8500_local_bus_nodes[] = { /* only create devices below soc node */ { .compatible = "stericsson,db8500", }, + { .compatible = "stericsson,db8500-prcmu", }, + { .compatible = "stericsson,db8500-prcmu-regulator", }, + { .compatible = "stericsson,ab8500", }, + { .compatible = "stericsson,ab8500-regulator", }, { .compatible = "simple-bus"}, { }, }; @@ -852,7 +866,7 @@ static void __init u8500_init_machine(void) else if (of_machine_is_compatible("st-ericsson,hrefv60+")) hrefv60_pinmaps_init(); - parent = u8500_init_devices(); + parent = u8500_of_init_devices(); for (i = 0; i < ARRAY_SIZE(mop500_platform_devs); i++) mop500_platform_devs[i]->dev.parent = parent; @@ -869,15 +883,23 @@ static void __init u8500_init_machine(void) ARRAY_SIZE(mop500_platform_devs)); mop500_sdi_init(parent); - i2c0_devs = ARRAY_SIZE(mop500_i2c0_devices); i2c_register_board_info(0, mop500_i2c0_devices, i2c0_devs); i2c_register_board_info(2, mop500_i2c2_devices, ARRAY_SIZE(mop500_i2c2_devices)); + mop500_uib_init(); + } else if (of_machine_is_compatible("calaosystems,snowball-a9500")) { - platform_add_devices(snowball_platform_devs, - ARRAY_SIZE(snowball_platform_devs)); + /* + * Devices to be DT:ed: + * snowball_led_dev = todo + * snowball_key_dev = todo + * snowball_sbnet_dev = done + * ab8500_device = done + */ + platform_add_devices(snowball_of_platform_devs, + ARRAY_SIZE(snowball_of_platform_devs)); snowball_sdi_init(parent); } else if (of_machine_is_compatible("st-ericsson,hrefv60+")) { @@ -898,6 +920,8 @@ static void __init u8500_init_machine(void) i2c_register_board_info(0, mop500_i2c0_devices, i2c0_devs); i2c_register_board_info(2, mop500_i2c2_devices, ARRAY_SIZE(mop500_i2c2_devices)); + + mop500_uib_init(); } mop500_i2c_init(parent); diff --git a/arch/arm/mach-ux500/board-mop500.h b/arch/arm/mach-ux500/board-mop500.h index bc44c07c71a9..2f87b25a908a 100644 --- a/arch/arm/mach-ux500/board-mop500.h +++ b/arch/arm/mach-ux500/board-mop500.h @@ -89,7 +89,11 @@ void __init mop500_pinmaps_init(void); void __init snowball_pinmaps_init(void); void __init hrefv60_pinmaps_init(void); +int __init mop500_uib_init(void); void mop500_uib_i2c_add(int busnum, struct i2c_board_info *info, unsigned n); +/* TODO: Once all pieces are DT:ed, remove completely. */ +struct device * __init u8500_of_init_devices(void); + #endif diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c index 16169c4bf6ca..33275eb4c689 100644 --- a/arch/arm/mach-ux500/cpu-db8500.c +++ b/arch/arm/mach-ux500/cpu-db8500.c @@ -140,7 +140,6 @@ static struct platform_device *platform_devs[] __initdata = { static struct platform_device *of_platform_devs[] __initdata = { &u8500_dma40_device, &db8500_pmu_device, - &db8500_prcmu_device, }; static resource_size_t __initdata db8500_gpio_base[] = { @@ -222,6 +221,28 @@ struct device * __init u8500_init_devices(void) platform_device_register_data(parent, "cpufreq-u8500", -1, NULL, 0); + for (i = 0; i < ARRAY_SIZE(platform_devs); i++) + platform_devs[i]->dev.parent = parent; + + platform_add_devices(platform_devs, ARRAY_SIZE(platform_devs)); + + return parent; +} + +/* TODO: Once all pieces are DT:ed, remove completely. */ +struct device * __init u8500_of_init_devices(void) +{ + struct device *parent; + int i; + + parent = db8500_soc_device_init(); + + db8500_add_rtc(parent); + db8500_add_usb(parent, usb_db8500_rx_dma_cfg, usb_db8500_tx_dma_cfg); + + platform_device_register_data(parent, + "cpufreq-u8500", -1, NULL, 0); + for (i = 0; i < ARRAY_SIZE(of_platform_devs); i++) of_platform_devs[i]->dev.parent = parent; @@ -229,7 +250,7 @@ struct device * __init u8500_init_devices(void) * Devices to be DT:ed: * u8500_dma40_device = todo * db8500_pmu_device = todo - * db8500_prcmu_device = todo + * db8500_prcmu_device = done */ platform_add_devices(of_platform_devs, ARRAY_SIZE(of_platform_devs)); diff --git a/arch/arm/plat-omap/include/plat/gpmc.h b/arch/arm/plat-omap/include/plat/gpmc.h index 1527929b445a..f37764a36072 100644 --- a/arch/arm/plat-omap/include/plat/gpmc.h +++ b/arch/arm/plat-omap/include/plat/gpmc.h @@ -92,6 +92,8 @@ enum omap_ecc { OMAP_ECC_HAMMING_CODE_HW, /* gpmc to detect the error */ /* 1-bit ecc: stored at beginning of spare area as romcode */ OMAP_ECC_HAMMING_CODE_HW_ROMCODE, /* gpmc method & romcode layout */ + OMAP_ECC_BCH4_CODE_HW, /* 4-bit BCH ecc code */ + OMAP_ECC_BCH8_CODE_HW, /* 8-bit BCH ecc code */ }; /* @@ -157,4 +159,13 @@ extern int gpmc_nand_write(int cs, int cmd, int wval); int gpmc_enable_hwecc(int cs, int mode, int dev_width, int ecc_size); int gpmc_calculate_ecc(int cs, const u_char *dat, u_char *ecc_code); + +#ifdef CONFIG_ARCH_OMAP3 +int gpmc_init_hwecc_bch(int cs, int nsectors, int nerrors); +int gpmc_enable_hwecc_bch(int cs, int mode, int dev_width, int nsectors, + int nerrors); +int gpmc_calculate_ecc_bch4(int cs, const u_char *dat, u_char *ecc); +int gpmc_calculate_ecc_bch8(int cs, const u_char *dat, u_char *ecc); +#endif /* CONFIG_ARCH_OMAP3 */ + #endif diff --git a/arch/arm/plat-samsung/include/plat/fb.h b/arch/arm/plat-samsung/include/plat/fb.h index 0fedf47fa502..536002ff2ab8 100644 --- a/arch/arm/plat-samsung/include/plat/fb.h +++ b/arch/arm/plat-samsung/include/plat/fb.h @@ -24,15 +24,16 @@ /** * struct s3c_fb_pd_win - per window setup data - * @win_mode: The display parameters to initialise (not for window 0) + * @xres : The window X size. + * @yres : The window Y size. * @virtual_x: The virtual X size. * @virtual_y: The virtual Y size. */ struct s3c_fb_pd_win { - struct fb_videomode win_mode; - unsigned short default_bpp; unsigned short max_bpp; + unsigned short xres; + unsigned short yres; unsigned short virtual_x; unsigned short virtual_y; }; @@ -45,6 +46,7 @@ struct s3c_fb_pd_win { * @default_win: default window layer number to be used for UI layer. * @vidcon0: The base vidcon0 values to control the panel data format. * @vidcon1: The base vidcon1 values to control the panel data output. + * @vtiming: Video timing when connected to a RGB type panel. * @win: The setup data for each hardware window, or NULL for unused. * @display_mode: The LCD output display mode. * @@ -58,8 +60,7 @@ struct s3c_fb_platdata { void (*setup_gpio)(void); struct s3c_fb_pd_win *win[S3C_FB_MAX_WIN]; - - u32 default_win; + struct fb_videomode *vtiming; u32 vidcon0; u32 vidcon1; diff --git a/arch/avr32/include/asm/posix_types.h b/arch/avr32/include/asm/posix_types.h index 74667bfc88cc..9ba9e749b3f3 100644 --- a/arch/avr32/include/asm/posix_types.h +++ b/arch/avr32/include/asm/posix_types.h @@ -17,9 +17,6 @@ typedef unsigned short __kernel_mode_t; #define __kernel_mode_t __kernel_mode_t -typedef unsigned short __kernel_nlink_t; -#define __kernel_nlink_t __kernel_nlink_t - typedef unsigned short __kernel_ipc_pid_t; #define __kernel_ipc_pid_t __kernel_ipc_pid_t diff --git a/arch/avr32/kernel/entry-avr32b.S b/arch/avr32/kernel/entry-avr32b.S index 5e6beb2597a8..df2884181313 100644 --- a/arch/avr32/kernel/entry-avr32b.S +++ b/arch/avr32/kernel/entry-avr32b.S @@ -281,7 +281,7 @@ syscall_exit_work: ld.w r1, r0[TI_flags] rjmp 1b -2: mov r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | _TIF_NOTIFY_RESUME +2: mov r2, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME tst r1, r2 breq 3f unmask_interrupts @@ -587,7 +587,7 @@ fault_exit_work: ld.w r1, r0[TI_flags] rjmp fault_exit_work -1: mov r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | _TIF_NOTIFY_RESUME +1: mov r2, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME tst r1, r2 breq 2f unmask_interrupts diff --git a/arch/avr32/kernel/signal.c b/arch/avr32/kernel/signal.c index e7595ef74f51..c140f9b41dce 100644 --- a/arch/avr32/kernel/signal.c +++ b/arch/avr32/kernel/signal.c @@ -22,8 +22,6 @@ #include <asm/ucontext.h> #include <asm/syscalls.h> -#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) - asmlinkage int sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, struct pt_regs *regs) { @@ -89,7 +87,6 @@ asmlinkage int sys_rt_sigreturn(struct pt_regs *regs) if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto badframe; - sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); if (restore_sigcontext(regs, &frame->uc.uc_mcontext)) @@ -224,30 +221,27 @@ static inline void setup_syscall_restart(struct pt_regs *regs) static inline void handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, - sigset_t *oldset, struct pt_regs *regs, int syscall) + struct pt_regs *regs, int syscall) { int ret; /* * Set up the stack frame */ - ret = setup_rt_frame(sig, ka, info, oldset, regs); + ret = setup_rt_frame(sig, ka, info, sigmask_to_save(), regs); /* * Check that the resulting registers are sane */ ret |= !valid_user_regs(regs); - if (ret != 0) { - force_sigsegv(sig, current); - return; - } - /* * Block the signal if we were successful. */ - block_sigmask(ka, sig); - clear_thread_flag(TIF_RESTORE_SIGMASK); + if (ret != 0) + force_sigsegv(sig, current); + else + signal_delivered(sig, info, ka, regs, 0); } /* @@ -255,7 +249,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, * doesn't want to handle. Thus you cannot kill init even with a * SIGKILL even by mistake. */ -int do_signal(struct pt_regs *regs, sigset_t *oldset, int syscall) +static void do_signal(struct pt_regs *regs, int syscall) { siginfo_t info; int signr; @@ -267,12 +261,7 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset, int syscall) * without doing anything if so. */ if (!user_mode(regs)) - return 0; - - if (test_thread_flag(TIF_RESTORE_SIGMASK)) - oldset = ¤t->saved_sigmask; - else if (!oldset) - oldset = ¤t->blocked; + return; signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (syscall) { @@ -297,15 +286,11 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset, int syscall) if (signr == 0) { /* No signal to deliver -- put the saved sigmask back */ - if (test_thread_flag(TIF_RESTORE_SIGMASK)) { - clear_thread_flag(TIF_RESTORE_SIGMASK); - sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); - } - return 0; + restore_saved_sigmask(); + return; } - handle_signal(signr, &ka, &info, oldset, regs, syscall); - return 1; + handle_signal(signr, &ka, &info, regs, syscall); } asmlinkage void do_notify_resume(struct pt_regs *regs, struct thread_info *ti) @@ -315,8 +300,8 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, struct thread_info *ti) if ((sysreg_read(SR) & MODE_MASK) == MODE_SUPERVISOR) syscall = 1; - if (ti->flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) - do_signal(regs, ¤t->blocked, syscall); + if (ti->flags & _TIF_SIGPENDING)) + do_signal(regs, syscall); if (ti->flags & _TIF_NOTIFY_RESUME) { clear_thread_flag(TIF_NOTIFY_RESUME); diff --git a/arch/blackfin/include/asm/posix_types.h b/arch/blackfin/include/asm/posix_types.h index 41bc1875c4d7..1bd3436db6a7 100644 --- a/arch/blackfin/include/asm/posix_types.h +++ b/arch/blackfin/include/asm/posix_types.h @@ -10,9 +10,6 @@ typedef unsigned short __kernel_mode_t; #define __kernel_mode_t __kernel_mode_t -typedef unsigned short __kernel_nlink_t; -#define __kernel_nlink_t __kernel_nlink_t - typedef unsigned int __kernel_ipc_pid_t; #define __kernel_ipc_pid_t __kernel_ipc_pid_t diff --git a/arch/blackfin/include/asm/thread_info.h b/arch/blackfin/include/asm/thread_info.h index 02560fd8a121..53ad10005ae3 100644 --- a/arch/blackfin/include/asm/thread_info.h +++ b/arch/blackfin/include/asm/thread_info.h @@ -100,7 +100,6 @@ static inline struct thread_info *current_thread_info(void) TIF_NEED_RESCHED */ #define TIF_MEMDIE 4 /* is terminating due to OOM killer */ #define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */ -#define TIF_FREEZE 6 /* is freezing for suspend */ #define TIF_IRQ_SYNC 7 /* sync pipeline stage */ #define TIF_NOTIFY_RESUME 8 /* callback before returning to user */ #define TIF_SINGLESTEP 9 @@ -111,7 +110,6 @@ static inline struct thread_info *current_thread_info(void) #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) -#define _TIF_FREEZE (1<<TIF_FREEZE) #define _TIF_IRQ_SYNC (1<<TIF_IRQ_SYNC) #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) #define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP) diff --git a/arch/blackfin/kernel/signal.c b/arch/blackfin/kernel/signal.c index fc9ecce8b6ce..6682b73a8523 100644 --- a/arch/blackfin/kernel/signal.c +++ b/arch/blackfin/kernel/signal.c @@ -19,8 +19,6 @@ #include <asm/fixed_code.h> #include <asm/syscall.h> -#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) - /* Location of the trace bit in SYSCFG. */ #define TRACE_BITS 0x0001 @@ -98,7 +96,6 @@ asmlinkage int do_rt_sigreturn(unsigned long __unused) if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto badframe; - sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); if (rt_restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0)) @@ -190,17 +187,22 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t * info, err |= copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); if (err) - goto give_sigsegv; + return -EFAULT; /* Set up registers for signal handler */ - wrusp((unsigned long)frame); if (current->personality & FDPIC_FUNCPTRS) { struct fdpic_func_descriptor __user *funcptr = (struct fdpic_func_descriptor *) ka->sa.sa_handler; - __get_user(regs->pc, &funcptr->text); - __get_user(regs->p3, &funcptr->GOT); + u32 pc, p3; + err |= __get_user(pc, &funcptr->text); + err |= __get_user(p3, &funcptr->GOT); + if (err) + return -EFAULT; + regs->pc = pc; + regs->p3 = p3; } else regs->pc = (unsigned long)ka->sa.sa_handler; + wrusp((unsigned long)frame); regs->rets = SIGRETURN_STUB; regs->r0 = frame->sig; @@ -208,10 +210,6 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t * info, regs->r2 = (unsigned long)(&frame->uc); return 0; - - give_sigsegv: - force_sigsegv(sig, current); - return -EFAULT; } static inline void @@ -247,24 +245,21 @@ handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler) /* * OK, we're invoking a handler */ -static int +static void handle_signal(int sig, siginfo_t *info, struct k_sigaction *ka, - sigset_t *oldset, struct pt_regs *regs) + struct pt_regs *regs) { - int ret; - /* are we from a system call? to see pt_regs->orig_p0 */ if (regs->orig_p0 >= 0) /* If so, check system call restarting.. */ handle_restart(regs, ka, 1); /* set up the stack frame */ - ret = setup_rt_frame(sig, ka, info, oldset, regs); - - if (ret == 0) - block_sigmask(ka, sig); - - return ret; + if (setup_rt_frame(sig, ka, info, sigmask_to_save(), regs) < 0) + force_sigsegv(sig, current); + else + signal_delivered(sig, info, ka, regs, + test_thread_flag(TIF_SINGLESTEP)); } /* @@ -281,37 +276,16 @@ asmlinkage void do_signal(struct pt_regs *regs) siginfo_t info; int signr; struct k_sigaction ka; - sigset_t *oldset; current->thread.esp0 = (unsigned long)regs; - if (try_to_freeze()) - goto no_signal; - - if (test_thread_flag(TIF_RESTORE_SIGMASK)) - oldset = ¤t->saved_sigmask; - else - oldset = ¤t->blocked; - signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (signr > 0) { /* Whee! Actually deliver the signal. */ - if (handle_signal(signr, &info, &ka, oldset, regs) == 0) { - /* a signal was successfully delivered; the saved - * sigmask will have been stored in the signal frame, - * and will be restored by sigreturn, so we can simply - * clear the TIF_RESTORE_SIGMASK flag */ - if (test_thread_flag(TIF_RESTORE_SIGMASK)) - clear_thread_flag(TIF_RESTORE_SIGMASK); - - tracehook_signal_handler(signr, &info, &ka, regs, - test_thread_flag(TIF_SINGLESTEP)); - } - + handle_signal(signr, &info, &ka, regs); return; } - no_signal: /* Did we come from a system call? */ if (regs->orig_p0 >= 0) /* Restart the system call - no handlers present */ @@ -319,10 +293,7 @@ asmlinkage void do_signal(struct pt_regs *regs) /* if there's no signal to deliver, we just put the saved sigmask * back */ - if (test_thread_flag(TIF_RESTORE_SIGMASK)) { - clear_thread_flag(TIF_RESTORE_SIGMASK); - sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); - } + restore_saved_sigmask(); } /* @@ -330,7 +301,7 @@ asmlinkage void do_signal(struct pt_regs *regs) */ asmlinkage void do_notify_resume(struct pt_regs *regs) { - if (test_thread_flag(TIF_SIGPENDING) || test_thread_flag(TIF_RESTORE_SIGMASK)) + if (test_thread_flag(TIF_SIGPENDING)) do_signal(regs); if (test_thread_flag(TIF_NOTIFY_RESUME)) { diff --git a/arch/blackfin/mach-bf561/boards/acvilon.c b/arch/blackfin/mach-bf561/boards/acvilon.c index f6ffd6f054c3..0b74218fdd3a 100644 --- a/arch/blackfin/mach-bf561/boards/acvilon.c +++ b/arch/blackfin/mach-bf561/boards/acvilon.c @@ -248,8 +248,6 @@ static struct platform_device bfin_uart0_device = { #if defined(CONFIG_MTD_NAND_PLATFORM) || defined(CONFIG_MTD_NAND_PLATFORM_MODULE) -const char *part_probes[] = { "cmdlinepart", NULL }; - static struct mtd_partition bfin_plat_nand_partitions[] = { { .name = "params(nand)", @@ -289,7 +287,6 @@ static struct platform_nand_data bfin_plat_nand_data = { .chip = { .nr_chips = 1, .chip_delay = 30, - .part_probe_types = part_probes, .partitions = bfin_plat_nand_partitions, .nr_partitions = ARRAY_SIZE(bfin_plat_nand_partitions), }, diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S index 80aa2535e2c9..04c2fbe41a7f 100644 --- a/arch/blackfin/mach-common/entry.S +++ b/arch/blackfin/mach-common/entry.S @@ -711,8 +711,6 @@ ENTRY(_system_call) jump .Lresume_userspace_1; .Lsyscall_sigpending: - cc = BITTST(r7, TIF_RESTORE_SIGMASK); - if cc jump .Lsyscall_do_signals; cc = BITTST(r7, TIF_SIGPENDING); if cc jump .Lsyscall_do_signals; cc = BITTST(r7, TIF_NOTIFY_RESUME); diff --git a/arch/c6x/kernel/signal.c b/arch/c6x/kernel/signal.c index 9493f0bbf0a6..3d8f3c22a94f 100644 --- a/arch/c6x/kernel/signal.c +++ b/arch/c6x/kernel/signal.c @@ -20,8 +20,6 @@ #include <asm/cacheflush.h> -#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) - /* * Do a signal return, undo the signal stack. */ @@ -87,7 +85,6 @@ asmlinkage int do_rt_sigreturn(struct pt_regs *regs) if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto badframe; - sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); if (restore_sigcontext(regs, &frame->uc.uc_mcontext)) @@ -248,10 +245,9 @@ do_restart: /* * handle the actual delivery of a signal to userspace */ -static int handle_signal(int sig, +static void handle_signal(int sig, siginfo_t *info, struct k_sigaction *ka, - sigset_t *oldset, struct pt_regs *regs, - int syscall) + struct pt_regs *regs, int syscall) { int ret; @@ -278,11 +274,9 @@ static int handle_signal(int sig, } /* Set up the stack frame */ - ret = setup_rt_frame(sig, ka, info, oldset, regs); - if (ret == 0) - block_sigmask(ka, sig); - - return ret; + if (setup_rt_frame(sig, ka, info, sigmask_to_save(), regs) < 0) + return; + signal_delivered(sig, info, ka, regs, 0); } /* @@ -292,7 +286,6 @@ static void do_signal(struct pt_regs *regs, int syscall) { struct k_sigaction ka; siginfo_t info; - sigset_t *oldset; int signr; /* we want the common case to go fast, which is why we may in certain @@ -300,25 +293,9 @@ static void do_signal(struct pt_regs *regs, int syscall) if (!user_mode(regs)) return; - if (test_thread_flag(TIF_RESTORE_SIGMASK)) - oldset = ¤t->saved_sigmask; - else - oldset = ¤t->blocked; - signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (signr > 0) { - if (handle_signal(signr, &info, &ka, oldset, - regs, syscall) == 0) { - /* a signal was successfully delivered; the saved - * sigmask will have been stored in the signal frame, - * and will be restored by sigreturn, so we can simply - * clear the TIF_RESTORE_SIGMASK flag */ - if (test_thread_flag(TIF_RESTORE_SIGMASK)) - clear_thread_flag(TIF_RESTORE_SIGMASK); - - tracehook_signal_handler(signr, &info, &ka, regs, 0); - } - + handle_signal(signr, &info, &ka, regs, syscall); return; } @@ -343,10 +320,7 @@ static void do_signal(struct pt_regs *regs, int syscall) /* if there's no signal to deliver, we just put the saved sigmask * back */ - if (test_thread_flag(TIF_RESTORE_SIGMASK)) { - clear_thread_flag(TIF_RESTORE_SIGMASK); - sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); - } + restore_saved_sigmask(); } /* @@ -357,8 +331,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags, int syscall) { /* deal with pending signal delivery */ - if (thread_info_flags & ((1 << TIF_SIGPENDING) | - (1 << TIF_RESTORE_SIGMASK))) + if (thread_info_flags & (1 << TIF_SIGPENDING)) do_signal(regs, syscall); if (thread_info_flags & (1 << TIF_NOTIFY_RESUME)) { diff --git a/arch/cris/arch-v10/kernel/signal.c b/arch/cris/arch-v10/kernel/signal.c index e16f8f297f61..0bb477c13a4e 100644 --- a/arch/cris/arch-v10/kernel/signal.c +++ b/arch/cris/arch-v10/kernel/signal.c @@ -31,8 +31,6 @@ #define DEBUG_SIG 0 -#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) - /* a syscall in Linux/CRIS is a break 13 instruction which is 2 bytes */ /* manipulate regs so that upon return, it will be re-executed */ @@ -176,7 +174,6 @@ asmlinkage int sys_sigreturn(long r10, long r11, long r12, long r13, long mof, sizeof(frame->extramask)))) goto badframe; - sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); if (restore_sigcontext(regs, &frame->sc)) @@ -212,7 +209,6 @@ asmlinkage int sys_rt_sigreturn(long r10, long r11, long r12, long r13, if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto badframe; - sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); if (restore_sigcontext(regs, &frame->uc.uc_mcontext)) @@ -415,10 +411,11 @@ give_sigsegv: * OK, we're invoking a handler */ -static inline int handle_signal(int canrestart, unsigned long sig, +static inline void handle_signal(int canrestart, unsigned long sig, siginfo_t *info, struct k_sigaction *ka, - sigset_t *oldset, struct pt_regs *regs) + struct pt_regs *regs) { + sigset_t *oldset = sigmask_to_save(); int ret; /* Are we from a system call? */ @@ -456,9 +453,7 @@ static inline int handle_signal(int canrestart, unsigned long sig, ret = setup_frame(sig, ka, oldset, regs); if (ret == 0) - block_sigmask(ka, sig); - - return ret; + signal_delivered(sig, info, ka, regs, 0); } /* @@ -478,7 +473,6 @@ void do_signal(int canrestart, struct pt_regs *regs) siginfo_t info; int signr; struct k_sigaction ka; - sigset_t *oldset; /* * We want the common case to go fast, which @@ -489,23 +483,10 @@ void do_signal(int canrestart, struct pt_regs *regs) if (!user_mode(regs)) return; - if (test_thread_flag(TIF_RESTORE_SIGMASK)) - oldset = ¤t->saved_sigmask; - else - oldset = ¤t->blocked; - signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (signr > 0) { /* Whee! Actually deliver the signal. */ - if (handle_signal(canrestart, signr, &info, &ka, - oldset, regs)) { - /* a signal was successfully delivered; the saved - * sigmask will have been stored in the signal frame, - * and will be restored by sigreturn, so we can simply - * clear the TIF_RESTORE_SIGMASK flag */ - if (test_thread_flag(TIF_RESTORE_SIGMASK)) - clear_thread_flag(TIF_RESTORE_SIGMASK); - } + handle_signal(canrestart, signr, &info, &ka, regs); return; } @@ -525,8 +506,5 @@ void do_signal(int canrestart, struct pt_regs *regs) /* if there's no signal to deliver, we just put the saved sigmask * back */ - if (test_thread_flag(TIF_RESTORE_SIGMASK)) { - clear_thread_flag(TIF_RESTORE_SIGMASK); - sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); - } + restore_saved_sigmask(); } diff --git a/arch/cris/arch-v32/kernel/signal.c b/arch/cris/arch-v32/kernel/signal.c index b338d8fc0c12..b60d1b65a426 100644 --- a/arch/cris/arch-v32/kernel/signal.c +++ b/arch/cris/arch-v32/kernel/signal.c @@ -24,9 +24,6 @@ extern unsigned long cris_signal_return_page; -/* Flag to check if a signal is blockable. */ -#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) - /* * A syscall in CRIS is really a "break 13" instruction, which is 2 * bytes. The registers is manipulated so upon return the instruction @@ -167,7 +164,6 @@ sys_sigreturn(long r10, long r11, long r12, long r13, long mof, long srp, sizeof(frame->extramask)))) goto badframe; - sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); if (restore_sigcontext(regs, &frame->sc)) @@ -208,7 +204,6 @@ sys_rt_sigreturn(long r10, long r11, long r12, long r13, long mof, long srp, if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto badframe; - sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); if (restore_sigcontext(regs, &frame->uc.uc_mcontext)) @@ -434,11 +429,12 @@ give_sigsegv: } /* Invoke a signal handler to, well, handle the signal. */ -static inline int +static inline void handle_signal(int canrestart, unsigned long sig, siginfo_t *info, struct k_sigaction *ka, - sigset_t *oldset, struct pt_regs * regs) + struct pt_regs * regs) { + sigset_t *oldset = sigmask_to_save(); int ret; /* Check if this got called from a system call. */ @@ -489,9 +485,7 @@ handle_signal(int canrestart, unsigned long sig, ret = setup_frame(sig, ka, oldset, regs); if (ret == 0) - block_sigmask(ka, sig); - - return ret; + signal_delivered(sig, info, ka, regs, 0); } /* @@ -511,7 +505,6 @@ do_signal(int canrestart, struct pt_regs *regs) int signr; siginfo_t info; struct k_sigaction ka; - sigset_t *oldset; /* * The common case should go fast, which is why this point is @@ -521,25 +514,11 @@ do_signal(int canrestart, struct pt_regs *regs) if (!user_mode(regs)) return; - if (test_thread_flag(TIF_RESTORE_SIGMASK)) - oldset = ¤t->saved_sigmask; - else - oldset = ¤t->blocked; - signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (signr > 0) { /* Whee! Actually deliver the signal. */ - if (handle_signal(canrestart, signr, &info, &ka, - oldset, regs)) { - /* a signal was successfully delivered; the saved - * sigmask will have been stored in the signal frame, - * and will be restored by sigreturn, so we can simply - * clear the TIF_RESTORE_SIGMASK flag */ - if (test_thread_flag(TIF_RESTORE_SIGMASK)) - clear_thread_flag(TIF_RESTORE_SIGMASK); - } - + handle_signal(canrestart, signr, &info, &ka, regs); return; } @@ -560,10 +539,7 @@ do_signal(int canrestart, struct pt_regs *regs) /* if there's no signal to deliver, we just put the saved sigmask * back */ - if (test_thread_flag(TIF_RESTORE_SIGMASK)) { - clear_thread_flag(TIF_RESTORE_SIGMASK); - sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); - } + restore_saved_sigmask(); } asmlinkage void diff --git a/arch/cris/include/asm/posix_types.h b/arch/cris/include/asm/posix_types.h index 234891c74e2b..ce4e51793151 100644 --- a/arch/cris/include/asm/posix_types.h +++ b/arch/cris/include/asm/posix_types.h @@ -15,9 +15,6 @@ typedef unsigned short __kernel_mode_t; #define __kernel_mode_t __kernel_mode_t -typedef unsigned short __kernel_nlink_t; -#define __kernel_nlink_t __kernel_nlink_t - typedef unsigned short __kernel_ipc_pid_t; #define __kernel_ipc_pid_t __kernel_ipc_pid_t diff --git a/arch/frv/include/asm/posix_types.h b/arch/frv/include/asm/posix_types.h index 3f34cb45fbb3..fe512af74a5a 100644 --- a/arch/frv/include/asm/posix_types.h +++ b/arch/frv/include/asm/posix_types.h @@ -10,9 +10,6 @@ typedef unsigned short __kernel_mode_t; #define __kernel_mode_t __kernel_mode_t -typedef unsigned short __kernel_nlink_t; -#define __kernel_nlink_t __kernel_nlink_t - typedef unsigned short __kernel_ipc_pid_t; #define __kernel_ipc_pid_t __kernel_ipc_pid_t diff --git a/arch/frv/include/asm/thread_info.h b/arch/frv/include/asm/thread_info.h index 54ab13a0de41..0ff03a33c81e 100644 --- a/arch/frv/include/asm/thread_info.h +++ b/arch/frv/include/asm/thread_info.h @@ -94,8 +94,8 @@ register struct thread_info *__current_thread_info asm("gr15"); #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ #define TIF_SINGLESTEP 4 /* restore singlestep on return to user mode */ #define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */ -#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ -#define TIF_MEMDIE 17 /* is terminating due to OOM killer */ +#define TIF_POLLING_NRFLAG 6 /* true if poll_idle() is polling TIF_NEED_RESCHED */ +#define TIF_MEMDIE 7 /* is terminating due to OOM killer */ #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) @@ -105,8 +105,16 @@ register struct thread_info *__current_thread_info asm("gr15"); #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) -#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */ -#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */ +/* work to do on interrupt/exception return */ +#define _TIF_WORK_MASK \ + (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_SINGLESTEP) + +/* work to do on any return to u-space */ +#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | _TIF_SYSCALL_TRACE) + +#if _TIF_ALLWORK_MASK >= 0x2000 +#error "_TIF_ALLWORK_MASK won't fit in an ANDI now (see entry.S)" +#endif /* * Thread-synchronous status. diff --git a/arch/frv/kernel/entry.S b/arch/frv/kernel/entry.S index 5ba23f715ea5..7d5e000fd32e 100644 --- a/arch/frv/kernel/entry.S +++ b/arch/frv/kernel/entry.S @@ -905,18 +905,19 @@ __syscall_call: __syscall_exit: LEDS 0x6300 - sti gr8,@(gr28,#REG_GR(8)) ; save return value + # keep current PSR in GR23 + movsg psr,gr23 - # rebuild saved psr - execve will change it for init/main.c ldi @(gr28,#REG_PSR),gr22 + + sti.p gr8,@(gr28,#REG_GR(8)) ; save return value + + # rebuild saved psr - execve will change it for init/main.c srli gr22,#1,gr5 andi.p gr22,#~PSR_PS,gr22 andi gr5,#PSR_PS,gr5 or gr5,gr22,gr22 - ori gr22,#PSR_S,gr22 - - # keep current PSR in GR23 - movsg psr,gr23 + ori.p gr22,#PSR_S,gr22 # make sure we don't miss an interrupt setting need_resched or sigpending between # sampling and the RETT @@ -924,9 +925,7 @@ __syscall_exit: movgs gr23,psr ldi @(gr15,#TI_FLAGS),gr4 - sethi.p %hi(_TIF_ALLWORK_MASK),gr5 - setlo %lo(_TIF_ALLWORK_MASK),gr5 - andcc gr4,gr5,gr0,icc0 + andicc gr4,#_TIF_ALLWORK_MASK,gr0,icc0 bne icc0,#0,__syscall_exit_work # restore all registers and return @@ -1111,9 +1110,7 @@ __entry_resume_userspace: __entry_return_from_user_interrupt: LEDS 0x6402 ldi @(gr15,#TI_FLAGS),gr4 - sethi.p %hi(_TIF_WORK_MASK),gr5 - setlo %lo(_TIF_WORK_MASK),gr5 - andcc gr4,gr5,gr0,icc0 + andicc gr4,#_TIF_WORK_MASK,gr0,icc0 beq icc0,#1,__entry_return_direct __entry_work_pending: @@ -1133,9 +1130,7 @@ __entry_work_resched: LEDS 0x6401 ldi @(gr15,#TI_FLAGS),gr4 - sethi.p %hi(_TIF_WORK_MASK),gr5 - setlo %lo(_TIF_WORK_MASK),gr5 - andcc gr4,gr5,gr0,icc0 + andicc gr4,#_TIF_WORK_MASK,gr0,icc0 beq icc0,#1,__entry_return_direct andicc gr4,#_TIF_NEED_RESCHED,gr0,icc0 bne icc0,#1,__entry_work_resched @@ -1163,7 +1158,9 @@ __syscall_trace_entry: # perform syscall exit tracing __syscall_exit_work: LEDS 0x6340 - andicc gr4,#_TIF_SYSCALL_TRACE,gr0,icc0 + andicc gr22,#PSR_PS,gr0,icc1 ; don't handle on return to kernel mode + andicc.p gr4,#_TIF_SYSCALL_TRACE,gr0,icc0 + bne icc1,#0,__entry_return_direct beq icc0,#1,__entry_work_pending movsg psr,gr23 diff --git a/arch/frv/kernel/signal.c b/arch/frv/kernel/signal.c index 595bf1e5a5dc..864c2f0d497b 100644 --- a/arch/frv/kernel/signal.c +++ b/arch/frv/kernel/signal.c @@ -28,8 +28,6 @@ #define DEBUG_SIG 0 -#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) - struct fdpic_func_descriptor { unsigned long text; unsigned long GOT; @@ -149,7 +147,6 @@ asmlinkage int sys_sigreturn(void) __copy_from_user(&set.sig[1], &frame->extramask, sizeof(frame->extramask))) goto badframe; - sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); if (restore_sigcontext(&frame->sc, &gr8)) @@ -172,7 +169,6 @@ asmlinkage int sys_rt_sigreturn(void) if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto badframe; - sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); if (restore_sigcontext(&frame->uc.uc_mcontext, &gr8)) @@ -426,9 +422,10 @@ give_sigsegv: /* * OK, we're invoking a handler */ -static int handle_signal(unsigned long sig, siginfo_t *info, - struct k_sigaction *ka, sigset_t *oldset) +static void handle_signal(unsigned long sig, siginfo_t *info, + struct k_sigaction *ka) { + sigset_t *oldset = sigmask_to_save(); int ret; /* Are we from a system call? */ @@ -460,11 +457,11 @@ static int handle_signal(unsigned long sig, siginfo_t *info, else ret = setup_frame(sig, ka, oldset); - if (ret == 0) - block_sigmask(ka, sig); - - return ret; + if (ret) + return; + signal_delivered(sig, info, ka, __frame, + test_thread_flag(TIF_SINGLESTEP)); } /* end handle_signal() */ /*****************************************************************************/ @@ -477,44 +474,14 @@ static void do_signal(void) { struct k_sigaction ka; siginfo_t info; - sigset_t *oldset; int signr; - /* - * We want the common case to go fast, which - * is why we may in certain cases get here from - * kernel mode. Just return without doing anything - * if so. - */ - if (!user_mode(__frame)) - return; - - if (try_to_freeze()) - goto no_signal; - - if (test_thread_flag(TIF_RESTORE_SIGMASK)) - oldset = ¤t->saved_sigmask; - else - oldset = ¤t->blocked; - signr = get_signal_to_deliver(&info, &ka, __frame, NULL); if (signr > 0) { - if (handle_signal(signr, &info, &ka, oldset) == 0) { - /* a signal was successfully delivered; the saved - * sigmask will have been stored in the signal frame, - * and will be restored by sigreturn, so we can simply - * clear the TIF_RESTORE_SIGMASK flag */ - if (test_thread_flag(TIF_RESTORE_SIGMASK)) - clear_thread_flag(TIF_RESTORE_SIGMASK); - - tracehook_signal_handler(signr, &info, &ka, __frame, - test_thread_flag(TIF_SINGLESTEP)); - } - + handle_signal(signr, &info, &ka); return; } -no_signal: /* Did we come from a system call? */ if (__frame->syscallno != -1) { /* Restart the system call - no handlers present */ @@ -536,11 +503,7 @@ no_signal: /* if there's no signal to deliver, we just put the saved sigmask * back */ - if (test_thread_flag(TIF_RESTORE_SIGMASK)) { - clear_thread_flag(TIF_RESTORE_SIGMASK); - sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); - } - + restore_saved_sigmask(); } /* end do_signal() */ /*****************************************************************************/ @@ -555,7 +518,7 @@ asmlinkage void do_notify_resume(__u32 thread_info_flags) clear_thread_flag(TIF_SINGLESTEP); /* deal with pending signal delivery */ - if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) + if (thread_info_flags & _TIF_SIGPENDING) do_signal(); /* deal with notification on about to resume userspace execution */ diff --git a/arch/h8300/include/asm/posix_types.h b/arch/h8300/include/asm/posix_types.h index bc4c34efb1ad..91e62ba4c7b0 100644 --- a/arch/h8300/include/asm/posix_types.h +++ b/arch/h8300/include/asm/posix_types.h @@ -10,9 +10,6 @@ typedef unsigned short __kernel_mode_t; #define __kernel_mode_t __kernel_mode_t -typedef unsigned short __kernel_nlink_t; -#define __kernel_nlink_t __kernel_nlink_t - typedef unsigned short __kernel_ipc_pid_t; #define __kernel_ipc_pid_t __kernel_ipc_pid_t diff --git a/arch/h8300/kernel/signal.c b/arch/h8300/kernel/signal.c index e58992ad789e..fca10378701b 100644 --- a/arch/h8300/kernel/signal.c +++ b/arch/h8300/kernel/signal.c @@ -47,8 +47,6 @@ #include <asm/traps.h> #include <asm/ucontext.h> -#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) - /* * Atomically swap in the new signal mask, and wait for a signal. */ @@ -186,7 +184,6 @@ asmlinkage int do_sigreturn(unsigned long __unused,...) sizeof(frame->extramask)))) goto badframe; - sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); if (restore_sigcontext(regs, &frame->sc, &er0)) @@ -211,7 +208,6 @@ asmlinkage int do_rt_sigreturn(unsigned long __unused,...) if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto badframe; - sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &er0)) @@ -412,8 +408,9 @@ give_sigsegv: */ static void handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, - sigset_t *oldset, struct pt_regs * regs) + struct pt_regs * regs) { + sigset_t *oldset = sigmask_to_save(); int ret; /* are we from a system call? */ if (regs->orig_er0 >= 0) { @@ -441,10 +438,8 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, else ret = setup_frame(sig, ka, oldset, regs); - if (!ret) { - block_sigmask(ka, sig); - clear_thread_flag(TIF_RESTORE_SIGMASK); - } + if (!ret) + signal_delivered(sig, info, ka, regs, 0); } /* @@ -457,7 +452,6 @@ statis void do_signal(struct pt_regs *regs) siginfo_t info; int signr; struct k_sigaction ka; - sigset_t *oldset; /* * We want the common case to go fast, which @@ -468,23 +462,14 @@ statis void do_signal(struct pt_regs *regs) if ((regs->ccr & 0x10)) return; - if (try_to_freeze()) - goto no_signal; - current->thread.esp0 = (unsigned long) regs; - if (test_thread_flag(TIF_RESTORE_SIGMASK)) - oldset = ¤t->saved_sigmask; - else - oldset = ¤t->blocked; - signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (signr > 0) { /* Whee! Actually deliver the signal. */ - handle_signal(signr, &info, &ka, oldset, regs); + handle_signal(signr, &info, &ka, regs); return; } - no_signal: /* Did we come from a system call? */ if (regs->orig_er0 >= 0) { /* Restart the system call - no handlers present */ @@ -501,8 +486,7 @@ statis void do_signal(struct pt_regs *regs) } /* If there's no signal to deliver, we just restore the saved mask. */ - if (test_and_clear_thread_flag(TIF_RESTORE_SIGMASK)) - set_current_blocked(¤t->saved_sigmask); + restore_saved_sigmask(); } asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags) diff --git a/arch/hexagon/kernel/signal.c b/arch/hexagon/kernel/signal.c index 21a3018cb9bf..304b0808d072 100644 --- a/arch/hexagon/kernel/signal.c +++ b/arch/hexagon/kernel/signal.c @@ -31,8 +31,6 @@ #include <asm/signal.h> #include <asm/vdso.h> -#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) - struct rt_sigframe { unsigned long tramp[2]; struct siginfo info; @@ -149,11 +147,9 @@ sigsegv: /* * Setup invocation of signal handler */ -static int handle_signal(int sig, siginfo_t *info, struct k_sigaction *ka, - sigset_t *oldset, struct pt_regs *regs) +static void handle_signal(int sig, siginfo_t *info, struct k_sigaction *ka, + struct pt_regs *regs) { - int rc; - /* * If we're handling a signal that aborted a system call, * set up the error return value before adding the signal @@ -186,15 +182,12 @@ static int handle_signal(int sig, siginfo_t *info, struct k_sigaction *ka, * Set up the stack frame; not doing the SA_SIGINFO thing. We * only set up the rt_frame flavor. */ - rc = setup_rt_frame(sig, ka, info, oldset, regs); - /* If there was an error on setup, no signal was delivered. */ - if (rc) - return rc; - - block_sigmask(ka, sig); + if (setup_rt_frame(sig, ka, info, sigmask_to_save(), regs) < 0) + return; - return 0; + signal_delivered(sig, info, ka, regs, + test_thread_flag(TIF_SINGLESTEP)); } /* @@ -209,34 +202,13 @@ static void do_signal(struct pt_regs *regs) if (!user_mode(regs)) return; - if (try_to_freeze()) - goto no_signal; - signo = get_signal_to_deliver(&info, &sigact, regs, NULL); if (signo > 0) { - sigset_t *oldset; - - if (test_thread_flag(TIF_RESTORE_SIGMASK)) - oldset = ¤t->saved_sigmask; - else - oldset = ¤t->blocked; - - if (handle_signal(signo, &info, &sigact, oldset, regs) == 0) { - /* - * Successful delivery case. The saved sigmask is - * stored in the signal frame, and will be restored - * by sigreturn. We can clear the TIF flag. - */ - clear_thread_flag(TIF_RESTORE_SIGMASK); - - tracehook_signal_handler(signo, &info, &sigact, regs, - test_thread_flag(TIF_SINGLESTEP)); - } + handle_signal(signo, &info, &sigact, regs); return; } -no_signal: /* * If we came from a system call, handle the restart. */ @@ -259,10 +231,7 @@ no_signal: no_restart: /* If there's no signal to deliver, put the saved sigmask back */ - if (test_thread_flag(TIF_RESTORE_SIGMASK)) { - clear_thread_flag(TIF_RESTORE_SIGMASK); - sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); - } + restore_saved_sigmask(); } void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) @@ -301,7 +270,6 @@ asmlinkage int sys_rt_sigreturn(void) if (__copy_from_user(&blocked, &frame->uc.uc_sigmask, sizeof(blocked))) goto badframe; - sigdelsetmask(&blocked, ~_BLOCKABLE); set_current_blocked(&blocked); if (restore_sigcontext(regs, &frame->uc.uc_mcontext)) diff --git a/arch/ia64/include/asm/posix_types.h b/arch/ia64/include/asm/posix_types.h index 7323ab9467eb..99ee1d6510cf 100644 --- a/arch/ia64/include/asm/posix_types.h +++ b/arch/ia64/include/asm/posix_types.h @@ -1,9 +1,6 @@ #ifndef _ASM_IA64_POSIX_TYPES_H #define _ASM_IA64_POSIX_TYPES_H -typedef unsigned int __kernel_nlink_t; -#define __kernel_nlink_t __kernel_nlink_t - typedef unsigned long __kernel_sigset_t; /* at least 32 bits */ #include <asm-generic/posix_types.h> diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h index 310d9734f02d..f7ee85378311 100644 --- a/arch/ia64/include/asm/thread_info.h +++ b/arch/ia64/include/asm/thread_info.h @@ -141,7 +141,23 @@ static inline void set_restore_sigmask(void) { struct thread_info *ti = current_thread_info(); ti->status |= TS_RESTORE_SIGMASK; - set_bit(TIF_SIGPENDING, &ti->flags); + WARN_ON(!test_bit(TIF_SIGPENDING, &ti->flags)); +} +static inline void clear_restore_sigmask(void) +{ + current_thread_info()->status &= ~TS_RESTORE_SIGMASK; +} +static inline bool test_restore_sigmask(void) +{ + return current_thread_info()->status & TS_RESTORE_SIGMASK; +} +static inline bool test_and_clear_restore_sigmask(void) +{ + struct thread_info *ti = current_thread_info(); + if (!(ti->status & TS_RESTORE_SIGMASK)) + return false; + ti->status &= ~TS_RESTORE_SIGMASK; + return true; } #endif /* !__ASSEMBLY__ */ diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index f00ba025375d..d7f558c1e711 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -604,12 +604,6 @@ pfm_unprotect_ctx_ctxsw(pfm_context_t *x, unsigned long f) spin_unlock(&(x)->ctx_lock); } -static inline unsigned long -pfm_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, unsigned long exec) -{ - return get_unmapped_area(file, addr, len, pgoff, flags); -} - /* forward declaration */ static const struct dentry_operations pfmfs_dentry_operations; @@ -2333,8 +2327,8 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t down_write(&task->mm->mmap_sem); /* find some free area in address space, must have mmap sem held */ - vma->vm_start = pfm_get_unmapped_area(NULL, 0, size, 0, MAP_PRIVATE|MAP_ANONYMOUS, 0); - if (vma->vm_start == 0UL) { + vma->vm_start = get_unmapped_area(NULL, 0, size, 0, MAP_PRIVATE|MAP_ANONYMOUS); + if (IS_ERR_VALUE(vma->vm_start)) { DPRINT(("Cannot find unmapped area for size %ld\n", size)); up_write(&task->mm->mmap_sem); goto error; diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c index 7523501d3bc0..a199be1fe619 100644 --- a/arch/ia64/kernel/signal.c +++ b/arch/ia64/kernel/signal.c @@ -30,7 +30,6 @@ #define DEBUG_SIG 0 #define STACK_ALIGN 16 /* minimal alignment for stack pointer */ -#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) #if _NSIG_WORDS > 1 # define PUT_SIGSET(k,u) __copy_to_user((u)->sig, (k)->sig, sizeof(sigset_t)) @@ -200,7 +199,6 @@ ia64_rt_sigreturn (struct sigscratch *scr) if (GET_SIGSET(&set, &sc->sc_mask)) goto give_sigsegv; - sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); if (restore_sigcontext(sc, scr)) @@ -415,18 +413,13 @@ setup_frame (int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, } static long -handle_signal (unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *oldset, +handle_signal (unsigned long sig, struct k_sigaction *ka, siginfo_t *info, struct sigscratch *scr) { - if (!setup_frame(sig, ka, info, oldset, scr)) + if (!setup_frame(sig, ka, info, sigmask_to_save(), scr)) return 0; - block_sigmask(ka, sig); - - /* - * Let tracing know that we've done the handler setup. - */ - tracehook_signal_handler(sig, info, ka, &scr->pt, + signal_delivered(sig, info, ka, &scr->pt, test_thread_flag(TIF_SINGLESTEP)); return 1; @@ -440,7 +433,6 @@ void ia64_do_signal (struct sigscratch *scr, long in_syscall) { struct k_sigaction ka; - sigset_t *oldset; siginfo_t info; long restart = in_syscall; long errno = scr->pt.r8; @@ -453,11 +445,6 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall) if (!user_mode(&scr->pt)) return; - if (current_thread_info()->status & TS_RESTORE_SIGMASK) - oldset = ¤t->saved_sigmask; - else - oldset = ¤t->blocked; - /* * This only loops in the rare cases of handle_signal() failing, in which case we * need to push through a forced SIGSEGV. @@ -507,16 +494,8 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall) * Whee! Actually deliver the signal. If the delivery failed, we need to * continue to iterate in this loop so we can deliver the SIGSEGV... */ - if (handle_signal(signr, &ka, &info, oldset, scr)) { - /* - * A signal was successfully delivered; the saved - * sigmask will have been stored in the signal frame, - * and will be restored by sigreturn, so we can simply - * clear the TS_RESTORE_SIGMASK flag. - */ - current_thread_info()->status &= ~TS_RESTORE_SIGMASK; + if (handle_signal(signr, &ka, &info, scr)) return; - } } /* Did we come from a system call? */ @@ -538,8 +517,5 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall) /* if there's no signal to deliver, we just put the saved sigmask * back */ - if (current_thread_info()->status & TS_RESTORE_SIGMASK) { - current_thread_info()->status &= ~TS_RESTORE_SIGMASK; - sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); - } + restore_saved_sigmask(); } diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c index 609d50056a6c..d9439ef2f661 100644 --- a/arch/ia64/kernel/sys_ia64.c +++ b/arch/ia64/kernel/sys_ia64.c @@ -171,22 +171,9 @@ asmlinkage unsigned long ia64_mremap (unsigned long addr, unsigned long old_len, unsigned long new_len, unsigned long flags, unsigned long new_addr) { - extern unsigned long do_mremap (unsigned long addr, - unsigned long old_len, - unsigned long new_len, - unsigned long flags, - unsigned long new_addr); - - down_write(¤t->mm->mmap_sem); - { - addr = do_mremap(addr, old_len, new_len, flags, new_addr); - } - up_write(¤t->mm->mmap_sem); - - if (IS_ERR((void *) addr)) - return addr; - - force_successful_syscall_return(); + addr = sys_mremap(addr, old_len, new_len, flags, new_addr); + if (!IS_ERR((void *) addr)) + force_successful_syscall_return(); return addr; } diff --git a/arch/m32r/include/asm/posix_types.h b/arch/m32r/include/asm/posix_types.h index 0195850e1f88..236de26a409b 100644 --- a/arch/m32r/include/asm/posix_types.h +++ b/arch/m32r/include/asm/posix_types.h @@ -10,9 +10,6 @@ typedef unsigned short __kernel_mode_t; #define __kernel_mode_t __kernel_mode_t -typedef unsigned short __kernel_nlink_t; -#define __kernel_nlink_t __kernel_nlink_t - typedef unsigned short __kernel_ipc_pid_t; #define __kernel_ipc_pid_t __kernel_ipc_pid_t diff --git a/arch/m32r/kernel/signal.c b/arch/m32r/kernel/signal.c index 64804f1f5141..f3fb2c029cfc 100644 --- a/arch/m32r/kernel/signal.c +++ b/arch/m32r/kernel/signal.c @@ -28,8 +28,6 @@ #define DEBUG_SIG 0 -#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) - asmlinkage int sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, unsigned long r2, unsigned long r3, unsigned long r4, @@ -111,7 +109,6 @@ sys_rt_sigreturn(unsigned long r0, unsigned long r1, if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto badframe; - sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &result)) @@ -267,9 +264,9 @@ static int prev_insn(struct pt_regs *regs) * OK, we're invoking a handler */ -static int +static void handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, - sigset_t *oldset, struct pt_regs *regs) + struct pt_regs *regs) { /* Are we from a system call? */ if (regs->syscall_nr >= 0) { @@ -294,11 +291,10 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, } /* Set up the stack frame */ - if (setup_rt_frame(sig, ka, info, oldset, regs)) - return -EFAULT; + if (setup_rt_frame(sig, ka, info, sigmask_to_save(), regs)) + return; - block_sigmask(ka, sig); - return 0; + signal_delivered(sig, info, ka, regs, 0); } /* @@ -311,7 +307,6 @@ static void do_signal(struct pt_regs *regs) siginfo_t info; int signr; struct k_sigaction ka; - sigset_t *oldset; /* * We want the common case to go fast, which @@ -322,14 +317,6 @@ static void do_signal(struct pt_regs *regs) if (!user_mode(regs)) return; - if (try_to_freeze()) - goto no_signal; - - if (test_thread_flag(TIF_RESTORE_SIGMASK)) - oldset = ¤t->saved_sigmask; - else - oldset = ¤t->blocked; - signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (signr > 0) { /* Re-enable any watchpoints before delivering the @@ -339,13 +326,11 @@ static void do_signal(struct pt_regs *regs) */ /* Whee! Actually deliver the signal. */ - if (handle_signal(signr, &ka, &info, oldset, regs) == 0) - clear_thread_flag(TIF_RESTORE_SIGMASK); + handle_signal(signr, &ka, &info, regs); return; } - no_signal: /* Did we come from a system call? */ if (regs->syscall_nr >= 0) { /* Restart the system call - no handlers present */ @@ -360,10 +345,7 @@ static void do_signal(struct pt_regs *regs) prev_insn(regs); } } - if (test_thread_flag(TIF_RESTORE_SIGMASK)) { - clear_thread_flag(TIF_RESTORE_SIGMASK); - sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); - } + restore_saved_sigmask(); } /* diff --git a/arch/m68k/include/asm/posix_types.h b/arch/m68k/include/asm/posix_types.h index 6373093be72b..cf4dbf70fdc7 100644 --- a/arch/m68k/include/asm/posix_types.h +++ b/arch/m68k/include/asm/posix_types.h @@ -10,9 +10,6 @@ typedef unsigned short __kernel_mode_t; #define __kernel_mode_t __kernel_mode_t -typedef unsigned short __kernel_nlink_t; -#define __kernel_nlink_t __kernel_nlink_t - typedef unsigned short __kernel_ipc_pid_t; #define __kernel_ipc_pid_t __kernel_ipc_pid_t diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c index 973eec60cad4..710a528b928b 100644 --- a/arch/m68k/kernel/signal.c +++ b/arch/m68k/kernel/signal.c @@ -51,8 +51,6 @@ #include <asm/traps.h> #include <asm/ucontext.h> -#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) - #ifdef CONFIG_MMU /* @@ -795,7 +793,6 @@ asmlinkage int do_sigreturn(unsigned long __unused) sizeof(frame->extramask)))) goto badframe; - sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); if (restore_sigcontext(regs, &frame->sc, frame + 1)) @@ -820,7 +817,6 @@ asmlinkage int do_rt_sigreturn(unsigned long __unused) if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto badframe; - sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); if (rt_restore_ucontext(regs, sw, &frame->uc)) @@ -1123,8 +1119,9 @@ handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler) */ static void handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info, - sigset_t *oldset, struct pt_regs *regs) + struct pt_regs *regs) { + sigset_t *oldset = sigmask_to_save(); int err; /* are we from a system call? */ if (regs->orig_d0 >= 0) @@ -1140,14 +1137,12 @@ handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info, if (err) return; - block_sigmask(ka, sig); + signal_delivered(sig, info, ka, regs, 0); if (test_thread_flag(TIF_DELAYED_TRACE)) { regs->sr &= ~0x8000; send_sig(SIGTRAP, current, 1); } - - clear_thread_flag(TIF_RESTORE_SIGMASK); } /* @@ -1160,19 +1155,13 @@ static void do_signal(struct pt_regs *regs) siginfo_t info; struct k_sigaction ka; int signr; - sigset_t *oldset; current->thread.esp0 = (unsigned long) regs; - if (test_thread_flag(TIF_RESTORE_SIGMASK)) - oldset = ¤t->saved_sigmask; - else - oldset = ¤t->blocked; - signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (signr > 0) { /* Whee! Actually deliver the signal. */ - handle_signal(signr, &ka, &info, oldset, regs); + handle_signal(signr, &ka, &info, regs); return; } @@ -1182,10 +1171,7 @@ static void do_signal(struct pt_regs *regs) handle_restart(regs, NULL, 0); /* If there's no signal to deliver, we just restore the saved mask. */ - if (test_thread_flag(TIF_RESTORE_SIGMASK)) { - clear_thread_flag(TIF_RESTORE_SIGMASK); - sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); - } + restore_saved_sigmask(); } void do_notify_resume(struct pt_regs *regs) diff --git a/arch/microblaze/include/asm/thread_info.h b/arch/microblaze/include/asm/thread_info.h index 1a8ab6a5c03f..6c610234ffab 100644 --- a/arch/microblaze/include/asm/thread_info.h +++ b/arch/microblaze/include/asm/thread_info.h @@ -166,7 +166,23 @@ static inline void set_restore_sigmask(void) { struct thread_info *ti = current_thread_info(); ti->status |= TS_RESTORE_SIGMASK; - set_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags); + WARN_ON(!test_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags)); +} +static inline void clear_restore_sigmask(void) +{ + current_thread_info()->status &= ~TS_RESTORE_SIGMASK; +} +static inline bool test_restore_sigmask(void) +{ + return current_thread_info()->status & TS_RESTORE_SIGMASK; +} +static inline bool test_and_clear_restore_sigmask(void) +{ + struct thread_info *ti = current_thread_info(); + if (!(ti->status & TS_RESTORE_SIGMASK)) + return false; + ti->status &= ~TS_RESTORE_SIGMASK; + return true; } #endif diff --git a/arch/microblaze/kernel/signal.c b/arch/microblaze/kernel/signal.c index 5d796e32786e..76b9722557db 100644 --- a/arch/microblaze/kernel/signal.c +++ b/arch/microblaze/kernel/signal.c @@ -41,8 +41,6 @@ #include <asm/cacheflush.h> #include <asm/syscalls.h> -#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) - asmlinkage long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, struct pt_regs *regs) @@ -106,7 +104,6 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto badframe; - sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &rval)) @@ -310,10 +307,11 @@ do_restart: * OK, we're invoking a handler */ -static int +static void handle_signal(unsigned long sig, struct k_sigaction *ka, - siginfo_t *info, sigset_t *oldset, struct pt_regs *regs) + siginfo_t *info, struct pt_regs *regs) { + sigset_t *oldset = sigmask_to_save(); int ret; /* Set up the stack frame */ @@ -323,11 +321,9 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, ret = setup_rt_frame(sig, ka, NULL, oldset, regs); if (ret) - return ret; - - block_sigmask(ka, sig); + return; - return 0; + signal_delivered(sig, info, ka, regs, 0); } /* @@ -344,33 +340,18 @@ static void do_signal(struct pt_regs *regs, int in_syscall) siginfo_t info; int signr; struct k_sigaction ka; - sigset_t *oldset; #ifdef DEBUG_SIG printk(KERN_INFO "do signal: %p %d\n", regs, in_syscall); printk(KERN_INFO "do signal2: %lx %lx %ld [%lx]\n", regs->pc, regs->r1, regs->r12, current_thread_info()->flags); #endif - if (current_thread_info()->status & TS_RESTORE_SIGMASK) - oldset = ¤t->saved_sigmask; - else - oldset = ¤t->blocked; - signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (signr > 0) { /* Whee! Actually deliver the signal. */ if (in_syscall) handle_restart(regs, &ka, 1); - if (!handle_signal(signr, &ka, &info, oldset, regs)) { - /* - * A signal was successfully delivered; the saved - * sigmask will have been stored in the signal frame, - * and will be restored by sigreturn, so we can simply - * clear the TS_RESTORE_SIGMASK flag. - */ - current_thread_info()->status &= - ~TS_RESTORE_SIGMASK; - } + handle_signal(signr, &ka, &info, regs); return; } @@ -381,10 +362,7 @@ static void do_signal(struct pt_regs *regs, int in_syscall) * If there's no signal to deliver, we just put the saved sigmask * back. */ - if (current_thread_info()->status & TS_RESTORE_SIGMASK) { - current_thread_info()->status &= ~TS_RESTORE_SIGMASK; - sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); - } + restore_saved_sigmask(); } void do_notify_resume(struct pt_regs *regs, int in_syscall) diff --git a/arch/mips/alchemy/devboards/db1200.c b/arch/mips/alchemy/devboards/db1200.c index 7dde01642d6b..bf2248474fa8 100644 --- a/arch/mips/alchemy/devboards/db1200.c +++ b/arch/mips/alchemy/devboards/db1200.c @@ -213,8 +213,6 @@ static int au1200_nand_device_ready(struct mtd_info *mtd) return __raw_readl((void __iomem *)MEM_STSTAT) & 1; } -static const char *db1200_part_probes[] = { "cmdlinepart", NULL }; - static struct mtd_partition db1200_nand_parts[] = { { .name = "NAND FS 0", @@ -235,7 +233,6 @@ struct platform_nand_data db1200_nand_platdata = { .nr_partitions = ARRAY_SIZE(db1200_nand_parts), .partitions = db1200_nand_parts, .chip_delay = 20, - .part_probe_types = db1200_part_probes, }, .ctrl = { .dev_ready = au1200_nand_device_ready, diff --git a/arch/mips/alchemy/devboards/db1300.c b/arch/mips/alchemy/devboards/db1300.c index 0893f2af0d01..c56e0246694e 100644 --- a/arch/mips/alchemy/devboards/db1300.c +++ b/arch/mips/alchemy/devboards/db1300.c @@ -145,8 +145,6 @@ static int au1300_nand_device_ready(struct mtd_info *mtd) return __raw_readl((void __iomem *)MEM_STSTAT) & 1; } -static const char *db1300_part_probes[] = { "cmdlinepart", NULL }; - static struct mtd_partition db1300_nand_parts[] = { { .name = "NAND FS 0", @@ -167,7 +165,6 @@ struct platform_nand_data db1300_nand_platdata = { .nr_partitions = ARRAY_SIZE(db1300_nand_parts), .partitions = db1300_nand_parts, .chip_delay = 20, - .part_probe_types = db1300_part_probes, }, .ctrl = { .dev_ready = au1300_nand_device_ready, diff --git a/arch/mips/alchemy/devboards/db1550.c b/arch/mips/alchemy/devboards/db1550.c index 6815d0783cd8..9eb79062f46e 100644 --- a/arch/mips/alchemy/devboards/db1550.c +++ b/arch/mips/alchemy/devboards/db1550.c @@ -149,8 +149,6 @@ static int au1550_nand_device_ready(struct mtd_info *mtd) return __raw_readl((void __iomem *)MEM_STSTAT) & 1; } -static const char *db1550_part_probes[] = { "cmdlinepart", NULL }; - static struct mtd_partition db1550_nand_parts[] = { { .name = "NAND FS 0", @@ -171,7 +169,6 @@ struct platform_nand_data db1550_nand_platdata = { .nr_partitions = ARRAY_SIZE(db1550_nand_parts), .partitions = db1550_nand_parts, .chip_delay = 20, - .part_probe_types = db1550_part_probes, }, .ctrl = { .dev_ready = au1550_nand_device_ready, diff --git a/arch/mips/include/asm/posix_types.h b/arch/mips/include/asm/posix_types.h index e0308dcca135..fa03ec3fbf89 100644 --- a/arch/mips/include/asm/posix_types.h +++ b/arch/mips/include/asm/posix_types.h @@ -17,11 +17,6 @@ * assume GCC is being used. */ -#if (_MIPS_SZLONG == 64) -typedef unsigned int __kernel_nlink_t; -#define __kernel_nlink_t __kernel_nlink_t -#endif - typedef long __kernel_daddr_t; #define __kernel_daddr_t __kernel_daddr_t diff --git a/arch/mips/include/asm/stat.h b/arch/mips/include/asm/stat.h index 6e00f751ab6d..fe9a4c3ec5a1 100644 --- a/arch/mips/include/asm/stat.h +++ b/arch/mips/include/asm/stat.h @@ -20,7 +20,7 @@ struct stat { long st_pad1[3]; /* Reserved for network id */ ino_t st_ino; mode_t st_mode; - nlink_t st_nlink; + __u32 st_nlink; uid_t st_uid; gid_t st_gid; unsigned st_rdev; @@ -55,7 +55,7 @@ struct stat64 { unsigned long long st_ino; mode_t st_mode; - nlink_t st_nlink; + __u32 st_nlink; uid_t st_uid; gid_t st_gid; @@ -96,7 +96,7 @@ struct stat { unsigned long st_ino; mode_t st_mode; - nlink_t st_nlink; + __u32 st_nlink; uid_t st_uid; gid_t st_gid; diff --git a/arch/mips/kernel/signal-common.h b/arch/mips/kernel/signal-common.h index 10263b405981..9c60d09e62a7 100644 --- a/arch/mips/kernel/signal-common.h +++ b/arch/mips/kernel/signal-common.h @@ -19,8 +19,6 @@ # define DEBUGP(fmt, args...) #endif -#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) - /* * Determine which stack to use.. */ diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index 8a6e6d116ab0..f2c09cfc60ac 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c @@ -339,7 +339,6 @@ asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs) if (__copy_from_user(&blocked, &frame->sf_mask, sizeof(blocked))) goto badframe; - sigdelsetmask(&blocked, ~_BLOCKABLE); set_current_blocked(&blocked); sig = restore_sigcontext(®s, &frame->sf_sc); @@ -375,7 +374,6 @@ asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs) if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set))) goto badframe; - sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); sig = restore_sigcontext(®s, &frame->rs_uc.uc_mcontext); @@ -514,9 +512,10 @@ struct mips_abi mips_abi = { .restart = __NR_restart_syscall }; -static int handle_signal(unsigned long sig, siginfo_t *info, - struct k_sigaction *ka, sigset_t *oldset, struct pt_regs *regs) +static void handle_signal(unsigned long sig, siginfo_t *info, + struct k_sigaction *ka, struct pt_regs *regs) { + sigset_t *oldset = sigmask_to_save(); int ret; struct mips_abi *abi = current->thread.abi; void *vdso = current->mm->context.vdso; @@ -550,17 +549,14 @@ static int handle_signal(unsigned long sig, siginfo_t *info, ka, regs, sig, oldset); if (ret) - return ret; - - block_sigmask(ka, sig); + return; - return ret; + signal_delivered(sig, info, ka, regs, 0); } static void do_signal(struct pt_regs *regs) { struct k_sigaction ka; - sigset_t *oldset; siginfo_t info; int signr; @@ -572,25 +568,10 @@ static void do_signal(struct pt_regs *regs) if (!user_mode(regs)) return; - if (test_thread_flag(TIF_RESTORE_SIGMASK)) - oldset = ¤t->saved_sigmask; - else - oldset = ¤t->blocked; - signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (signr > 0) { /* Whee! Actually deliver the signal. */ - if (handle_signal(signr, &info, &ka, oldset, regs) == 0) { - /* - * A signal was successfully delivered; the saved - * sigmask will have been stored in the signal frame, - * and will be restored by sigreturn, so we can simply - * clear the TIF_RESTORE_SIGMASK flag. - */ - if (test_thread_flag(TIF_RESTORE_SIGMASK)) - clear_thread_flag(TIF_RESTORE_SIGMASK); - } - + handle_signal(signr, &info, &ka, regs); return; } @@ -614,10 +595,7 @@ static void do_signal(struct pt_regs *regs) * If there's no signal to deliver, we just put the saved sigmask * back */ - if (test_thread_flag(TIF_RESTORE_SIGMASK)) { - clear_thread_flag(TIF_RESTORE_SIGMASK); - sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); - } + restore_saved_sigmask(); } /* @@ -630,7 +608,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused, local_irq_enable(); /* deal with pending signal delivery */ - if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) + if (thread_info_flags & _TIF_SIGPENDING) do_signal(regs); if (thread_info_flags & _TIF_NOTIFY_RESUME) { diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c index b4fe2eacbd5d..da1b56a39ac7 100644 --- a/arch/mips/kernel/signal32.c +++ b/arch/mips/kernel/signal32.c @@ -465,7 +465,6 @@ asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs) if (__copy_conv_sigset_from_user(&blocked, &frame->sf_mask)) goto badframe; - sigdelsetmask(&blocked, ~_BLOCKABLE); set_current_blocked(&blocked); sig = restore_sigcontext32(®s, &frame->sf_sc); @@ -503,7 +502,6 @@ asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) if (__copy_conv_sigset_from_user(&set, &frame->rs_uc.uc_sigmask)) goto badframe; - sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); sig = restore_sigcontext32(®s, &frame->rs_uc.uc_mcontext); diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c index 63ffac9af7c5..3574c145511b 100644 --- a/arch/mips/kernel/signal_n32.c +++ b/arch/mips/kernel/signal_n32.c @@ -109,7 +109,6 @@ asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) if (__copy_conv_sigset_from_user(&set, &frame->rs_uc.uc_sigmask)) goto badframe; - sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); sig = restore_sigcontext(®s, &frame->rs_uc.uc_mcontext); diff --git a/arch/mips/pnx833x/common/platform.c b/arch/mips/pnx833x/common/platform.c index 87167dcc79fa..05a1d922cd60 100644 --- a/arch/mips/pnx833x/common/platform.c +++ b/arch/mips/pnx833x/common/platform.c @@ -244,11 +244,6 @@ static struct platform_device pnx833x_sata_device = { .resource = pnx833x_sata_resources, }; -static const char *part_probes[] = { - "cmdlinepart", - NULL -}; - static void pnx833x_flash_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) { @@ -268,7 +263,6 @@ static struct platform_nand_data pnx833x_flash_nand_data = { .chip = { .nr_chips = 1, .chip_delay = 25, - .part_probe_types = part_probes, }, .ctrl = { .cmd_ctrl = pnx833x_flash_nand_cmd_ctrl diff --git a/arch/mips/rb532/devices.c b/arch/mips/rb532/devices.c index ea774285e6c5..716e9a12f0e7 100644 --- a/arch/mips/rb532/devices.c +++ b/arch/mips/rb532/devices.c @@ -293,7 +293,6 @@ static void __init rb532_nand_setup(void) rb532_nand_data.chip.nr_partitions = ARRAY_SIZE(rb532_partition_info); rb532_nand_data.chip.partitions = rb532_partition_info; rb532_nand_data.chip.chip_delay = NAND_CHIP_DELAY; - rb532_nand_data.chip.options = NAND_NO_AUTOINCR; } diff --git a/arch/mn10300/include/asm/posix_types.h b/arch/mn10300/include/asm/posix_types.h index ab506181ec31..d31eeea480cf 100644 --- a/arch/mn10300/include/asm/posix_types.h +++ b/arch/mn10300/include/asm/posix_types.h @@ -20,9 +20,6 @@ typedef unsigned short __kernel_mode_t; #define __kernel_mode_t __kernel_mode_t -typedef unsigned short __kernel_nlink_t; -#define __kernel_nlink_t __kernel_nlink_t - typedef unsigned short __kernel_ipc_pid_t; #define __kernel_ipc_pid_t __kernel_ipc_pid_t diff --git a/arch/mn10300/kernel/signal.c b/arch/mn10300/kernel/signal.c index b8b6aa1a6837..6ab0bee2a54f 100644 --- a/arch/mn10300/kernel/signal.c +++ b/arch/mn10300/kernel/signal.c @@ -31,8 +31,6 @@ #define DEBUG_SIG 0 -#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) - /* * atomically swap in the new signal mask, and wait for a signal. */ @@ -163,7 +161,6 @@ asmlinkage long sys_sigreturn(void) sizeof(frame->extramask))) goto badframe; - sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); if (restore_sigcontext(current_frame(), &frame->sc, &d0)) @@ -191,7 +188,6 @@ asmlinkage long sys_rt_sigreturn(void) if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto badframe; - sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); if (restore_sigcontext(current_frame(), &frame->uc.uc_mcontext, &d0)) @@ -430,8 +426,9 @@ static inline void stepback(struct pt_regs *regs) */ static int handle_signal(int sig, siginfo_t *info, struct k_sigaction *ka, - sigset_t *oldset, struct pt_regs *regs) + struct pt_regs *regs) { + sigset_t *oldset = sigmask_to_save(); int ret; /* Are we from a system call? */ @@ -461,11 +458,11 @@ static int handle_signal(int sig, ret = setup_rt_frame(sig, ka, info, oldset, regs); else ret = setup_frame(sig, ka, oldset, regs); + if (ret) + return; - if (ret == 0) - block_sigmask(ka, sig); - - return ret; + signal_delivered(sig, info, ka, regs, + test_thread_flag(TIF_SINGLESTEP)); } /* @@ -475,7 +472,6 @@ static void do_signal(struct pt_regs *regs) { struct k_sigaction ka; siginfo_t info; - sigset_t *oldset; int signr; /* we want the common case to go fast, which is why we may in certain @@ -483,23 +479,9 @@ static void do_signal(struct pt_regs *regs) if (!user_mode(regs)) return; - if (test_thread_flag(TIF_RESTORE_SIGMASK)) - oldset = ¤t->saved_sigmask; - else - oldset = ¤t->blocked; - signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (signr > 0) { - if (handle_signal(signr, &info, &ka, oldset, regs) == 0) { - /* a signal was successfully delivered; the saved - * sigmask will have been stored in the signal frame, - * and will be restored by sigreturn, so we can simply - * clear the TIF_RESTORE_SIGMASK flag */ - if (test_thread_flag(TIF_RESTORE_SIGMASK)) - clear_thread_flag(TIF_RESTORE_SIGMASK); - - tracehook_signal_handler(signr, &info, &ka, regs, - test_thread_flag(TIF_SINGLESTEP)); + if (handle_signal(signr, &info, &ka, regs) == 0) { } return; @@ -525,10 +507,7 @@ static void do_signal(struct pt_regs *regs) /* if there's no signal to deliver, we just put the saved sigmask * back */ - if (test_thread_flag(TIF_RESTORE_SIGMASK)) { - clear_thread_flag(TIF_RESTORE_SIGMASK); - sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); - } + restore_saved_sigmask(); } /* @@ -548,7 +527,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags) } /* deal with pending signal delivery */ - if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) + if (thread_info_flags & _TIF_SIGPENDING) do_signal(regs); if (thread_info_flags & _TIF_NOTIFY_RESUME) { diff --git a/arch/openrisc/kernel/signal.c b/arch/openrisc/kernel/signal.c index 9ae611522953..30110297f4f9 100644 --- a/arch/openrisc/kernel/signal.c +++ b/arch/openrisc/kernel/signal.c @@ -33,8 +33,6 @@ #define DEBUG_SIG 0 -#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) - asmlinkage long _sys_sigaltstack(const stack_t *uss, stack_t *uoss, struct pt_regs *regs) { @@ -101,7 +99,6 @@ asmlinkage long _sys_rt_sigreturn(struct pt_regs *regs) if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto badframe; - sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); if (restore_sigcontext(regs, &frame->uc.uc_mcontext)) @@ -251,20 +248,19 @@ give_sigsegv: return -EFAULT; } -static inline int +static inline void handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, - sigset_t *oldset, struct pt_regs *regs) + struct pt_regs *regs) { int ret; - ret = setup_rt_frame(sig, ka, info, oldset, regs); + ret = setup_rt_frame(sig, ka, info, sigmask_to_save(), regs); if (ret) - return ret; - - block_sigmask(ka, sig); + return; - return 0; + signal_delivered(sig, info, ka, regs, + test_thread_flag(TIF_SINGLESTEP)); } /* @@ -339,30 +335,10 @@ void do_signal(struct pt_regs *regs) if (signr <= 0) { /* no signal to deliver so we just put the saved sigmask * back */ - if (test_thread_flag(TIF_RESTORE_SIGMASK)) { - clear_thread_flag(TIF_RESTORE_SIGMASK); - sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); - } - + restore_saved_sigmask(); } else { /* signr > 0 */ - sigset_t *oldset; - - if (current_thread_info()->flags & _TIF_RESTORE_SIGMASK) - oldset = ¤t->saved_sigmask; - else - oldset = ¤t->blocked; - /* Whee! Actually deliver the signal. */ - if (!handle_signal(signr, &info, &ka, oldset, regs)) { - /* a signal was successfully delivered; the saved - * sigmask will have been stored in the signal frame, - * and will be restored by sigreturn, so we can simply - * clear the TIF_RESTORE_SIGMASK flag */ - clear_thread_flag(TIF_RESTORE_SIGMASK); - } - - tracehook_signal_handler(signr, &info, &ka, regs, - test_thread_flag(TIF_SINGLESTEP)); + handle_signal(signr, &info, &ka, regs); } return; diff --git a/arch/parisc/include/asm/posix_types.h b/arch/parisc/include/asm/posix_types.h index 5212b0357daf..b9344256f76b 100644 --- a/arch/parisc/include/asm/posix_types.h +++ b/arch/parisc/include/asm/posix_types.h @@ -10,9 +10,6 @@ typedef unsigned short __kernel_mode_t; #define __kernel_mode_t __kernel_mode_t -typedef unsigned short __kernel_nlink_t; -#define __kernel_nlink_t __kernel_nlink_t - typedef unsigned short __kernel_ipc_pid_t; #define __kernel_ipc_pid_t __kernel_ipc_pid_t diff --git a/arch/parisc/include/asm/stat.h b/arch/parisc/include/asm/stat.h index 9d5fbbc5c31f..d76fbda5d62c 100644 --- a/arch/parisc/include/asm/stat.h +++ b/arch/parisc/include/asm/stat.h @@ -7,7 +7,7 @@ struct stat { unsigned int st_dev; /* dev_t is 32 bits on parisc */ ino_t st_ino; /* 32 bits */ mode_t st_mode; /* 16 bits */ - nlink_t st_nlink; /* 16 bits */ + unsigned short st_nlink; /* 16 bits */ unsigned short st_reserved1; /* old st_uid */ unsigned short st_reserved2; /* old st_gid */ unsigned int st_rdev; @@ -42,7 +42,7 @@ struct hpux_stat64 { unsigned int st_dev; /* dev_t is 32 bits on parisc */ ino_t st_ino; /* 32 bits */ mode_t st_mode; /* 16 bits */ - nlink_t st_nlink; /* 16 bits */ + unsigned short st_nlink; /* 16 bits */ unsigned short st_reserved1; /* old st_uid */ unsigned short st_reserved2; /* old st_gid */ unsigned int st_rdev; diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h index 83ae7dd4d99e..22b4726dee49 100644 --- a/arch/parisc/include/asm/thread_info.h +++ b/arch/parisc/include/asm/thread_info.h @@ -74,7 +74,7 @@ struct thread_info { #define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP) #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | \ - _TIF_NEED_RESCHED | _TIF_RESTORE_SIGMASK) + _TIF_NEED_RESCHED) #endif /* __KERNEL__ */ diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index c7fbc96472f3..18670a078849 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S @@ -924,7 +924,7 @@ intr_check_sig: /* As above */ mfctl %cr30,%r1 LDREG TI_FLAGS(%r1),%r19 - ldi (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK|_TIF_NOTIFY_RESUME), %r20 + ldi (_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), %r20 and,COND(<>) %r19, %r20, %r0 b,n intr_restore /* skip past if we've nothing to do */ @@ -2032,7 +2032,7 @@ syscall_check_resched: .import do_signal,code syscall_check_sig: LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19 - ldi (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK|_TIF_NOTIFY_RESUME), %r26 + ldi (_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), %r26 and,COND(<>) %r19, %r26, %r0 b,n syscall_restore /* skip past if we've nothing to do */ diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c index e7a7cd3e1120..594459bde14e 100644 --- a/arch/parisc/kernel/signal.c +++ b/arch/parisc/kernel/signal.c @@ -48,9 +48,6 @@ #define DBG(LEVEL, ...) #endif - -#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) - /* gcc will complain if a pointer is cast to an integer of different * size. If you really need to do this (and we do for an ELF32 user * application in an ELF64 kernel) then you have to do a cast to an @@ -131,7 +128,6 @@ sys_rt_sigreturn(struct pt_regs *regs, int in_syscall) goto give_sigsegv; } - sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); /* Good thing we saved the old gr[30], eh? */ @@ -443,8 +439,9 @@ give_sigsegv: static long handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, - sigset_t *oldset, struct pt_regs *regs, int in_syscall) + struct pt_regs *regs, int in_syscall) { + sigset_t *oldset = sigmask_to_save(); DBG(1,"handle_signal: sig=%ld, ka=%p, info=%p, oldset=%p, regs=%p\n", sig, ka, info, oldset, regs); @@ -452,12 +449,13 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, if (!setup_rt_frame(sig, ka, info, oldset, regs, in_syscall)) return 0; - block_sigmask(ka, sig); - - tracehook_signal_handler(sig, info, ka, regs, + signal_delivered(sig, info, ka, regs, test_thread_flag(TIF_SINGLESTEP) || test_thread_flag(TIF_BLOCKSTEP)); + DBG(1,KERN_DEBUG "do_signal: Exit (success), regs->gr[28] = %ld\n", + regs->gr[28]); + return 1; } @@ -568,28 +566,17 @@ do_signal(struct pt_regs *regs, long in_syscall) siginfo_t info; struct k_sigaction ka; int signr; - sigset_t *oldset; - DBG(1,"\ndo_signal: oldset=0x%p, regs=0x%p, sr7 %#lx, in_syscall=%d\n", - oldset, regs, regs->sr[7], in_syscall); + DBG(1,"\ndo_signal: regs=0x%p, sr7 %#lx, in_syscall=%d\n", + regs, regs->sr[7], in_syscall); /* Everyone else checks to see if they are in kernel mode at this point and exits if that's the case. I'm not sure why we would be called in that case, but for some reason we are. */ - if (test_thread_flag(TIF_RESTORE_SIGMASK)) - oldset = ¤t->saved_sigmask; - else - oldset = ¤t->blocked; - - DBG(1,"do_signal: oldset %08lx / %08lx\n", - oldset->sig[0], oldset->sig[1]); - - /* May need to force signal if handle_signal failed to deliver */ while (1) { - signr = get_signal_to_deliver(&info, &ka, regs, NULL); DBG(3,"do_signal: signr = %d, regs->gr[28] = %ld\n", signr, regs->gr[28]); @@ -603,14 +590,8 @@ do_signal(struct pt_regs *regs, long in_syscall) /* Whee! Actually deliver the signal. If the delivery failed, we need to continue to iterate in this loop so we can deliver the SIGSEGV... */ - if (handle_signal(signr, &info, &ka, oldset, - regs, in_syscall)) { - DBG(1,KERN_DEBUG "do_signal: Exit (success), regs->gr[28] = %ld\n", - regs->gr[28]); - if (test_thread_flag(TIF_RESTORE_SIGMASK)) - clear_thread_flag(TIF_RESTORE_SIGMASK); + if (handle_signal(signr, &info, &ka, regs, in_syscall)) return; - } } /* end of while(1) looping forever if we can't force a signal */ @@ -621,18 +602,12 @@ do_signal(struct pt_regs *regs, long in_syscall) DBG(1,"do_signal: Exit (not delivered), regs->gr[28] = %ld\n", regs->gr[28]); - if (test_thread_flag(TIF_RESTORE_SIGMASK)) { - clear_thread_flag(TIF_RESTORE_SIGMASK); - sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); - } - - return; + restore_saved_sigmask(); } void do_notify_resume(struct pt_regs *regs, long in_syscall) { - if (test_thread_flag(TIF_SIGPENDING) || - test_thread_flag(TIF_RESTORE_SIGMASK)) + if (test_thread_flag(TIF_SIGPENDING)) do_signal(regs, in_syscall); if (test_thread_flag(TIF_NOTIFY_RESUME)) { diff --git a/arch/parisc/kernel/signal32.c b/arch/parisc/kernel/signal32.c index e14132430762..fd49aeda9eb8 100644 --- a/arch/parisc/kernel/signal32.c +++ b/arch/parisc/kernel/signal32.c @@ -47,8 +47,6 @@ #define DBG(LEVEL, ...) #endif -#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) - inline void sigset_32to64(sigset_t *s64, compat_sigset_t *s32) { diff --git a/arch/powerpc/include/asm/posix_types.h b/arch/powerpc/include/asm/posix_types.h index f1393252bbda..2958c5b97b2d 100644 --- a/arch/powerpc/include/asm/posix_types.h +++ b/arch/powerpc/include/asm/posix_types.h @@ -16,9 +16,6 @@ typedef int __kernel_ssize_t; typedef long __kernel_ptrdiff_t; #define __kernel_size_t __kernel_size_t -typedef unsigned short __kernel_nlink_t; -#define __kernel_nlink_t __kernel_nlink_t - typedef short __kernel_ipc_pid_t; #define __kernel_ipc_pid_t __kernel_ipc_pid_t #endif diff --git a/arch/powerpc/include/asm/stat.h b/arch/powerpc/include/asm/stat.h index e4edc510b530..84880b80cc1c 100644 --- a/arch/powerpc/include/asm/stat.h +++ b/arch/powerpc/include/asm/stat.h @@ -30,11 +30,11 @@ struct stat { unsigned long st_dev; ino_t st_ino; #ifdef __powerpc64__ - nlink_t st_nlink; + unsigned long st_nlink; mode_t st_mode; #else mode_t st_mode; - nlink_t st_nlink; + unsigned short st_nlink; #endif uid_t st_uid; gid_t st_gid; diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h index a556ccc16b58..68831e9cf82f 100644 --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h @@ -140,7 +140,23 @@ static inline void set_restore_sigmask(void) { struct thread_info *ti = current_thread_info(); ti->local_flags |= _TLF_RESTORE_SIGMASK; - set_bit(TIF_SIGPENDING, &ti->flags); + WARN_ON(!test_bit(TIF_SIGPENDING, &ti->flags)); +} +static inline void clear_restore_sigmask(void) +{ + current_thread_info()->local_flags &= ~_TLF_RESTORE_SIGMASK; +} +static inline bool test_restore_sigmask(void) +{ + return current_thread_info()->local_flags & _TLF_RESTORE_SIGMASK; +} +static inline bool test_and_clear_restore_sigmask(void) +{ + struct thread_info *ti = current_thread_info(); + if (!(ti->local_flags & _TLF_RESTORE_SIGMASK)) + return false; + ti->local_flags &= ~_TLF_RESTORE_SIGMASK; + return true; } static inline bool test_thread_local_flags(unsigned int flags) diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c index bfc3ec1382fb..5c023c9cf16e 100644 --- a/arch/powerpc/kernel/signal.c +++ b/arch/powerpc/kernel/signal.c @@ -51,16 +51,6 @@ void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, return (void __user *)newsp; } - -/* - * Restore the user process's signal mask - */ -void restore_sigmask(sigset_t *set) -{ - sigdelsetmask(set, ~_BLOCKABLE); - set_current_blocked(set); -} - static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler) { @@ -114,30 +104,21 @@ static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka, static int do_signal(struct pt_regs *regs) { - sigset_t *oldset; + sigset_t *oldset = sigmask_to_save(); siginfo_t info; int signr; struct k_sigaction ka; int ret; int is32 = is_32bit_task(); - if (current_thread_info()->local_flags & _TLF_RESTORE_SIGMASK) - oldset = ¤t->saved_sigmask; - else - oldset = ¤t->blocked; - signr = get_signal_to_deliver(&info, &ka, regs, NULL); /* Is there any syscall restart business here ? */ check_syscall_restart(regs, &ka, signr > 0); if (signr <= 0) { - struct thread_info *ti = current_thread_info(); /* No signal to deliver -- put the saved sigmask back */ - if (ti->local_flags & _TLF_RESTORE_SIGMASK) { - ti->local_flags &= ~_TLF_RESTORE_SIGMASK; - sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); - } + restore_saved_sigmask(); regs->trap = 0; return 0; /* no signals delivered */ } @@ -167,18 +148,7 @@ static int do_signal(struct pt_regs *regs) regs->trap = 0; if (ret) { - block_sigmask(&ka, signr); - - /* - * A signal was successfully delivered; the saved sigmask is in - * its frame, and we can clear the TLF_RESTORE_SIGMASK flag. - */ - current_thread_info()->local_flags &= ~_TLF_RESTORE_SIGMASK; - - /* - * Let tracing know that we've done the handler setup. - */ - tracehook_signal_handler(signr, &info, &ka, regs, + signal_delivered(signr, &info, &ka, regs, test_thread_flag(TIF_SINGLESTEP)); } diff --git a/arch/powerpc/kernel/signal.h b/arch/powerpc/kernel/signal.h index 8dde973aaaf5..e00acb413934 100644 --- a/arch/powerpc/kernel/signal.h +++ b/arch/powerpc/kernel/signal.h @@ -10,13 +10,10 @@ #ifndef _POWERPC_ARCH_SIGNAL_H #define _POWERPC_ARCH_SIGNAL_H -#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) - extern void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags); extern void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size, int is_32); -extern void restore_sigmask(sigset_t *set); extern int handle_signal32(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *oldset, diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index 61f6aff25edc..8b4c049aee20 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -919,7 +919,7 @@ static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int if (!access_ok(VERIFY_READ, mcp, sizeof(*mcp))) return -EFAULT; #endif - restore_sigmask(&set); + set_current_blocked(&set); if (restore_user_regs(regs, mcp, sig)) return -EFAULT; @@ -1273,7 +1273,7 @@ long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8, set.sig[0] = sigctx.oldmask; set.sig[1] = sigctx._unused[3]; #endif - restore_sigmask(&set); + set_current_blocked(&set); sr = (struct mcontext __user *)from_user_ptr(sigctx.regs); addr = sr; diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index 2692efdb154e..d183f8719a50 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -335,7 +335,7 @@ int sys_swapcontext(struct ucontext __user *old_ctx, if (__copy_from_user(&set, &new_ctx->uc_sigmask, sizeof(set))) do_exit(SIGSEGV); - restore_sigmask(&set); + set_current_blocked(&set); if (restore_sigcontext(regs, NULL, 0, &new_ctx->uc_mcontext)) do_exit(SIGSEGV); @@ -364,7 +364,7 @@ int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5, if (__copy_from_user(&set, &uc->uc_sigmask, sizeof(set))) goto badframe; - restore_sigmask(&set); + set_current_blocked(&set); if (restore_sigcontext(regs, NULL, 1, &uc->uc_mcontext)) goto badframe; diff --git a/arch/s390/include/asm/posix_types.h b/arch/s390/include/asm/posix_types.h index edf8527ff08d..7be104c0f192 100644 --- a/arch/s390/include/asm/posix_types.h +++ b/arch/s390/include/asm/posix_types.h @@ -24,7 +24,6 @@ typedef unsigned short __kernel_old_dev_t; typedef unsigned long __kernel_ino_t; typedef unsigned short __kernel_mode_t; -typedef unsigned short __kernel_nlink_t; typedef unsigned short __kernel_ipc_pid_t; typedef unsigned short __kernel_uid_t; typedef unsigned short __kernel_gid_t; @@ -35,7 +34,6 @@ typedef int __kernel_ptrdiff_t; typedef unsigned int __kernel_ino_t; typedef unsigned int __kernel_mode_t; -typedef unsigned int __kernel_nlink_t; typedef int __kernel_ipc_pid_t; typedef unsigned int __kernel_uid_t; typedef unsigned int __kernel_gid_t; @@ -47,7 +45,6 @@ typedef unsigned long __kernel_sigset_t; /* at least 32 bits */ #define __kernel_ino_t __kernel_ino_t #define __kernel_mode_t __kernel_mode_t -#define __kernel_nlink_t __kernel_nlink_t #define __kernel_ipc_pid_t __kernel_ipc_pid_t #define __kernel_uid_t __kernel_uid_t #define __kernel_gid_t __kernel_gid_t diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index 377c096ca4a7..3c0c19830c37 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c @@ -32,8 +32,6 @@ #include "compat_ptrace.h" #include "entry.h" -#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) - typedef struct { __u8 callee_used_stack[__SIGNAL_FRAMESIZE32]; @@ -364,7 +362,6 @@ asmlinkage long sys32_sigreturn(void) goto badframe; if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE32)) goto badframe; - sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); if (restore_sigregs32(regs, &frame->sregs)) goto badframe; @@ -390,7 +387,6 @@ asmlinkage long sys32_rt_sigreturn(void) goto badframe; if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto badframe; - sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); if (restore_sigregs32(regs, &frame->uc.uc_mcontext)) goto badframe; @@ -572,7 +568,7 @@ give_sigsegv: * OK, we're invoking a handler */ -int handle_signal32(unsigned long sig, struct k_sigaction *ka, +void handle_signal32(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *oldset, struct pt_regs *regs) { int ret; @@ -583,8 +579,8 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka, else ret = setup_frame32(sig, ka, oldset, regs); if (ret) - return ret; - block_sigmask(ka, sig); - return 0; + return; + signal_delivered(sig, info, ka, regs, + test_thread_flag(TIF_SINGLE_STEP)); } diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h index 6cdddac93a2e..f66a229ab0b3 100644 --- a/arch/s390/kernel/entry.h +++ b/arch/s390/kernel/entry.h @@ -31,7 +31,7 @@ void do_per_trap(struct pt_regs *regs); void syscall_trace(struct pt_regs *regs, int entryexit); void kernel_stack_overflow(struct pt_regs * regs); void do_signal(struct pt_regs *regs); -int handle_signal32(unsigned long sig, struct k_sigaction *ka, +void handle_signal32(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *oldset, struct pt_regs *regs); void do_notify_resume(struct pt_regs *regs); diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index 42a6e8b47f06..ac565b44aabb 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c @@ -33,9 +33,6 @@ #include <asm/switch_to.h> #include "entry.h" -#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) - - typedef struct { __u8 callee_used_stack[__SIGNAL_FRAMESIZE]; @@ -169,7 +166,6 @@ SYSCALL_DEFINE0(sigreturn) goto badframe; if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE)) goto badframe; - sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); if (restore_sigregs(regs, &frame->sregs)) goto badframe; @@ -189,7 +185,6 @@ SYSCALL_DEFINE0(rt_sigreturn) goto badframe; if (__copy_from_user(&set.sig, &frame->uc.uc_sigmask, sizeof(set))) goto badframe; - sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); if (restore_sigregs(regs, &frame->uc.uc_mcontext)) goto badframe; @@ -367,7 +362,7 @@ give_sigsegv: return -EFAULT; } -static int handle_signal(unsigned long sig, struct k_sigaction *ka, +static void handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *oldset, struct pt_regs *regs) { @@ -379,9 +374,9 @@ static int handle_signal(unsigned long sig, struct k_sigaction *ka, else ret = setup_frame(sig, ka, oldset, regs); if (ret) - return ret; - block_sigmask(ka, sig); - return 0; + return; + signal_delivered(sig, info, ka, regs, + test_thread_flag(TIF_SINGLE_STEP)); } /* @@ -398,12 +393,7 @@ void do_signal(struct pt_regs *regs) siginfo_t info; int signr; struct k_sigaction ka; - sigset_t *oldset; - - if (test_thread_flag(TIF_RESTORE_SIGMASK)) - oldset = ¤t->saved_sigmask; - else - oldset = ¤t->blocked; + sigset_t *oldset = sigmask_to_save(); /* * Get signal to deliver. When running under ptrace, at this point @@ -441,24 +431,10 @@ void do_signal(struct pt_regs *regs) /* No longer in a system call */ clear_thread_flag(TIF_SYSCALL); - if ((is_compat_task() ? - handle_signal32(signr, &ka, &info, oldset, regs) : - handle_signal(signr, &ka, &info, oldset, regs)) == 0) { - /* - * A signal was successfully delivered; the saved - * sigmask will have been stored in the signal frame, - * and will be restored by sigreturn, so we can simply - * clear the TIF_RESTORE_SIGMASK flag. - */ - if (test_thread_flag(TIF_RESTORE_SIGMASK)) - clear_thread_flag(TIF_RESTORE_SIGMASK); - - /* - * Let tracing know that we've done the handler setup. - */ - tracehook_signal_handler(signr, &info, &ka, regs, - test_thread_flag(TIF_SINGLE_STEP)); - } + if (is_compat_task()) + handle_signal32(signr, &ka, &info, oldset, regs); + else + handle_signal(signr, &ka, &info, oldset, regs); return; } @@ -484,10 +460,7 @@ void do_signal(struct pt_regs *regs) /* * If there's no signal to deliver, we just put the saved sigmask back. */ - if (test_thread_flag(TIF_RESTORE_SIGMASK)) { - clear_thread_flag(TIF_RESTORE_SIGMASK); - sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); - } + restore_saved_sigmask(); } void do_notify_resume(struct pt_regs *regs) diff --git a/arch/score/kernel/signal.c b/arch/score/kernel/signal.c index 302838d3acf6..e382c52ca0d9 100644 --- a/arch/score/kernel/signal.c +++ b/arch/score/kernel/signal.c @@ -34,8 +34,6 @@ #include <asm/syscalls.h> #include <asm/ucontext.h> -#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) - struct rt_sigframe { u32 rs_ass[4]; /* argument save space */ u32 rs_code[2]; /* signal trampoline */ @@ -162,7 +160,6 @@ score_rt_sigreturn(struct pt_regs *regs) if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set))) goto badframe; - sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); sig = restore_sigcontext(regs, &frame->rs_uc.uc_mcontext); @@ -241,11 +238,9 @@ give_sigsegv: return -EFAULT; } -static int handle_signal(unsigned long sig, siginfo_t *info, - struct k_sigaction *ka, sigset_t *oldset, struct pt_regs *regs) +static void handle_signal(unsigned long sig, siginfo_t *info, + struct k_sigaction *ka, struct pt_regs *regs) { - int ret; - if (regs->is_syscall) { switch (regs->regs[4]) { case ERESTART_RESTARTBLOCK: @@ -269,18 +264,15 @@ static int handle_signal(unsigned long sig, siginfo_t *info, /* * Set up the stack frame */ - ret = setup_rt_frame(ka, regs, sig, oldset, info); - - if (ret == 0) - block_sigmask(ka, sig); + if (setup_rt_frame(ka, regs, sig, sigmask_to_save(), info) < 0) + return; - return ret; + signal_delivered(sig, info, ka, regs, 0); } static void do_signal(struct pt_regs *regs) { struct k_sigaction ka; - sigset_t *oldset; siginfo_t info; int signr; @@ -292,25 +284,10 @@ static void do_signal(struct pt_regs *regs) if (!user_mode(regs)) return; - if (test_thread_flag(TIF_RESTORE_SIGMASK)) - oldset = ¤t->saved_sigmask; - else - oldset = ¤t->blocked; - signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (signr > 0) { /* Actually deliver the signal. */ - if (handle_signal(signr, &info, &ka, oldset, regs) == 0) { - /* - * A signal was successfully delivered; the saved - * sigmask will have been stored in the signal frame, - * and will be restored by sigreturn, so we can simply - * clear the TIF_RESTORE_SIGMASK flag. - */ - if (test_thread_flag(TIF_RESTORE_SIGMASK)) - clear_thread_flag(TIF_RESTORE_SIGMASK); - } - + handle_signal(signr, &info, &ka, regs); return; } @@ -337,10 +314,7 @@ static void do_signal(struct pt_regs *regs) * If there's no signal to deliver, we just put the saved sigmask * back */ - if (test_thread_flag(TIF_RESTORE_SIGMASK)) { - clear_thread_flag(TIF_RESTORE_SIGMASK); - sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); - } + restore_saved_sigmask(); } /* diff --git a/arch/sh/boards/mach-migor/setup.c b/arch/sh/boards/mach-migor/setup.c index 34cd0c5ff2e1..a8a1ca741c85 100644 --- a/arch/sh/boards/mach-migor/setup.c +++ b/arch/sh/boards/mach-migor/setup.c @@ -188,7 +188,6 @@ static struct platform_nand_data migor_nand_flash_data = { .partitions = migor_nand_flash_partitions, .nr_partitions = ARRAY_SIZE(migor_nand_flash_partitions), .chip_delay = 20, - .part_probe_types = (const char *[]) { "cmdlinepart", NULL }, }, .ctrl = { .dev_ready = migor_nand_flash_ready, diff --git a/arch/sh/include/asm/posix_types_32.h b/arch/sh/include/asm/posix_types_32.h index abda58467ece..ba0bdc423b07 100644 --- a/arch/sh/include/asm/posix_types_32.h +++ b/arch/sh/include/asm/posix_types_32.h @@ -3,8 +3,6 @@ typedef unsigned short __kernel_mode_t; #define __kernel_mode_t __kernel_mode_t -typedef unsigned short __kernel_nlink_t; -#define __kernel_nlink_t __kernel_nlink_t typedef unsigned short __kernel_ipc_pid_t; #define __kernel_ipc_pid_t __kernel_ipc_pid_t typedef unsigned short __kernel_uid_t; diff --git a/arch/sh/include/asm/posix_types_64.h b/arch/sh/include/asm/posix_types_64.h index fcda07b4a616..244f7e950e17 100644 --- a/arch/sh/include/asm/posix_types_64.h +++ b/arch/sh/include/asm/posix_types_64.h @@ -3,8 +3,6 @@ typedef unsigned short __kernel_mode_t; #define __kernel_mode_t __kernel_mode_t -typedef unsigned short __kernel_nlink_t; -#define __kernel_nlink_t __kernel_nlink_t typedef unsigned short __kernel_ipc_pid_t; #define __kernel_ipc_pid_t __kernel_ipc_pid_t typedef unsigned short __kernel_uid_t; diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h index 0c04ffc4f12c..bc13b57cdc83 100644 --- a/arch/sh/include/asm/thread_info.h +++ b/arch/sh/include/asm/thread_info.h @@ -169,7 +169,7 @@ static inline void set_restore_sigmask(void) { struct thread_info *ti = current_thread_info(); ti->status |= TS_RESTORE_SIGMASK; - set_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags); + WARN_ON(!test_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags)); } #define TI_FLAG_FAULT_CODE_SHIFT 24 @@ -189,6 +189,23 @@ static inline unsigned int get_thread_fault_code(void) struct thread_info *ti = current_thread_info(); return ti->flags >> TI_FLAG_FAULT_CODE_SHIFT; } + +static inline void clear_restore_sigmask(void) +{ + current_thread_info()->status &= ~TS_RESTORE_SIGMASK; +} +static inline bool test_restore_sigmask(void) +{ + return current_thread_info()->status & TS_RESTORE_SIGMASK; +} +static inline bool test_and_clear_restore_sigmask(void) +{ + struct thread_info *ti = current_thread_info(); + if (!(ti->status & TS_RESTORE_SIGMASK)) + return false; + ti->status &= ~TS_RESTORE_SIGMASK; + return true; +} #endif /* !__ASSEMBLY__ */ #endif /* __KERNEL__ */ diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c index 9d7bfd66f189..d6b7b6154f87 100644 --- a/arch/sh/kernel/signal_32.c +++ b/arch/sh/kernel/signal_32.c @@ -32,8 +32,6 @@ #include <asm/syscalls.h> #include <asm/fpu.h> -#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) - struct fdpic_func_descriptor { unsigned long text; unsigned long GOT; @@ -226,7 +224,6 @@ asmlinkage int sys_sigreturn(unsigned long r4, unsigned long r5, sizeof(frame->extramask)))) goto badframe; - sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); if (restore_sigcontext(regs, &frame->sc, &r0)) @@ -256,7 +253,6 @@ asmlinkage int sys_rt_sigreturn(unsigned long r4, unsigned long r5, if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto badframe; - sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0)) @@ -522,10 +518,11 @@ handle_syscall_restart(unsigned long save_r0, struct pt_regs *regs, /* * OK, we're invoking a handler */ -static int +static void handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, - sigset_t *oldset, struct pt_regs *regs, unsigned int save_r0) + struct pt_regs *regs, unsigned int save_r0) { + sigset_t *oldset = sigmask_to_save(); int ret; /* Set up the stack frame */ @@ -534,10 +531,10 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, else ret = setup_frame(sig, ka, oldset, regs); - if (ret == 0) - block_sigmask(ka, sig); - - return ret; + if (ret) + return; + signal_delivered(sig, info, ka, regs, + test_thread_flag(TIF_SINGLESTEP)); } /* @@ -554,7 +551,6 @@ static void do_signal(struct pt_regs *regs, unsigned int save_r0) siginfo_t info; int signr; struct k_sigaction ka; - sigset_t *oldset; /* * We want the common case to go fast, which @@ -565,30 +561,12 @@ static void do_signal(struct pt_regs *regs, unsigned int save_r0) if (!user_mode(regs)) return; - if (current_thread_info()->status & TS_RESTORE_SIGMASK) - oldset = ¤t->saved_sigmask; - else - oldset = ¤t->blocked; - signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (signr > 0) { handle_syscall_restart(save_r0, regs, &ka.sa); /* Whee! Actually deliver the signal. */ - if (handle_signal(signr, &ka, &info, oldset, - regs, save_r0) == 0) { - /* - * A signal was successfully delivered; the saved - * sigmask will have been stored in the signal frame, - * and will be restored by sigreturn, so we can simply - * clear the TS_RESTORE_SIGMASK flag - */ - current_thread_info()->status &= ~TS_RESTORE_SIGMASK; - - tracehook_signal_handler(signr, &info, &ka, regs, - test_thread_flag(TIF_SINGLESTEP)); - } - + handle_signal(signr, &ka, &info, regs, save_r0); return; } @@ -610,10 +588,7 @@ static void do_signal(struct pt_regs *regs, unsigned int save_r0) * If there's no signal to deliver, we just put the saved sigmask * back. */ - if (current_thread_info()->status & TS_RESTORE_SIGMASK) { - current_thread_info()->status &= ~TS_RESTORE_SIGMASK; - sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); - } + restore_saved_sigmask(); } asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned int save_r0, diff --git a/arch/sh/kernel/signal_64.c b/arch/sh/kernel/signal_64.c index aa6428430842..6b5b3dfe886b 100644 --- a/arch/sh/kernel/signal_64.c +++ b/arch/sh/kernel/signal_64.c @@ -41,11 +41,9 @@ #define DEBUG_SIG 0 -#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) - -static int +static void handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, - sigset_t *oldset, struct pt_regs * regs); + struct pt_regs * regs); static inline void handle_syscall_restart(struct pt_regs *regs, struct sigaction *sa) @@ -88,7 +86,6 @@ static void do_signal(struct pt_regs *regs) siginfo_t info; int signr; struct k_sigaction ka; - sigset_t *oldset; /* * We want the common case to go fast, which @@ -99,28 +96,13 @@ static void do_signal(struct pt_regs *regs) if (!user_mode(regs)) return; - if (current_thread_info()->status & TS_RESTORE_SIGMASK) - oldset = ¤t->saved_sigmask; - else - oldset = ¤t->blocked; - signr = get_signal_to_deliver(&info, &ka, regs, 0); if (signr > 0) { handle_syscall_restart(regs, &ka.sa); /* Whee! Actually deliver the signal. */ - if (handle_signal(signr, &info, &ka, oldset, regs) == 0) { - /* - * If a signal was successfully delivered, the - * saved sigmask is in its frame, and we can - * clear the TS_RESTORE_SIGMASK flag. - */ - current_thread_info()->status &= ~TS_RESTORE_SIGMASK; - - tracehook_signal_handler(signr, &info, &ka, regs, - test_thread_flag(TIF_SINGLESTEP)); - return; - } + handle_signal(signr, &info, &ka, regs); + return; } /* Did we come from a system call? */ @@ -143,12 +125,7 @@ static void do_signal(struct pt_regs *regs) } /* No signal to deliver -- put the saved sigmask back */ - if (current_thread_info()->status & TS_RESTORE_SIGMASK) { - current_thread_info()->status &= ~TS_RESTORE_SIGMASK; - sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); - } - - return; + restore_saved_sigmask(); } /* @@ -351,7 +328,6 @@ asmlinkage int sys_sigreturn(unsigned long r2, unsigned long r3, sizeof(frame->extramask)))) goto badframe; - sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); if (restore_sigcontext(regs, &frame->sc, &ret)) @@ -384,7 +360,6 @@ asmlinkage int sys_rt_sigreturn(unsigned long r2, unsigned long r3, if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto badframe; - sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ret)) @@ -659,10 +634,11 @@ give_sigsegv: /* * OK, we're invoking a handler */ -static int +static void handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, - sigset_t *oldset, struct pt_regs * regs) + struct pt_regs * regs) { + sigset_t *oldset = sigmask_to_save(); int ret; /* Set up the stack frame */ @@ -671,10 +647,11 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, else ret = setup_frame(sig, ka, oldset, regs); - if (ret == 0) - block_sigmask(ka, sig); + if (ret) + return; - return ret; + signal_delivered(sig, info, ka, regs, + test_thread_flag(TIF_SINGLESTEP)); } asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) diff --git a/arch/sparc/include/asm/posix_types.h b/arch/sparc/include/asm/posix_types.h index 3070f25ae90a..156220ed99eb 100644 --- a/arch/sparc/include/asm/posix_types.h +++ b/arch/sparc/include/asm/posix_types.h @@ -9,8 +9,6 @@ #if defined(__sparc__) && defined(__arch64__) /* sparc 64 bit */ -typedef unsigned int __kernel_nlink_t; -#define __kernel_nlink_t __kernel_nlink_t typedef unsigned short __kernel_old_uid_t; typedef unsigned short __kernel_old_gid_t; @@ -38,9 +36,6 @@ typedef unsigned short __kernel_gid_t; typedef unsigned short __kernel_mode_t; #define __kernel_mode_t __kernel_mode_t -typedef short __kernel_nlink_t; -#define __kernel_nlink_t __kernel_nlink_t - typedef long __kernel_daddr_t; #define __kernel_daddr_t __kernel_daddr_t diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h index 5af664932452..e6cd224506a9 100644 --- a/arch/sparc/include/asm/thread_info_32.h +++ b/arch/sparc/include/asm/thread_info_32.h @@ -131,8 +131,7 @@ register struct thread_info *current_thread_info_reg asm("g6"); #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | \ - _TIF_SIGPENDING | \ - _TIF_RESTORE_SIGMASK) + _TIF_SIGPENDING) #endif /* __KERNEL__ */ diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h index 7f0981b09451..cfa8c38fb9c8 100644 --- a/arch/sparc/include/asm/thread_info_64.h +++ b/arch/sparc/include/asm/thread_info_64.h @@ -238,7 +238,23 @@ static inline void set_restore_sigmask(void) { struct thread_info *ti = current_thread_info(); ti->status |= TS_RESTORE_SIGMASK; - set_bit(TIF_SIGPENDING, &ti->flags); + WARN_ON(!test_bit(TIF_SIGPENDING, &ti->flags)); +} +static inline void clear_restore_sigmask(void) +{ + current_thread_info()->status &= ~TS_RESTORE_SIGMASK; +} +static inline bool test_restore_sigmask(void) +{ + return current_thread_info()->status & TS_RESTORE_SIGMASK; +} +static inline bool test_and_clear_restore_sigmask(void) +{ + struct thread_info *ti = current_thread_info(); + if (!(ti->status & TS_RESTORE_SIGMASK)) + return false; + ti->status &= ~TS_RESTORE_SIGMASK; + return true; } #endif /* !__ASSEMBLY__ */ diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c index bb1513e45f1a..a53e0a5fd3a3 100644 --- a/arch/sparc/kernel/signal32.c +++ b/arch/sparc/kernel/signal32.c @@ -32,8 +32,6 @@ #include "sigutil.h" -#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) - /* This magic should be in g_upper[0] for all upper parts * to be valid. */ @@ -274,7 +272,6 @@ void do_sigreturn32(struct pt_regs *regs) case 2: set.sig[1] = seta[2] + (((long)seta[3]) << 32); case 1: set.sig[0] = seta[0] + (((long)seta[1]) << 32); } - sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); return; @@ -376,7 +373,6 @@ asmlinkage void do_rt_sigreturn32(struct pt_regs *regs) case 2: set.sig[1] = seta.sig[2] + (((long)seta.sig[3]) << 32); case 1: set.sig[0] = seta.sig[0] + (((long)seta.sig[1]) << 32); } - sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); return; segv: @@ -775,7 +771,7 @@ sigsegv: return -EFAULT; } -static inline int handle_signal32(unsigned long signr, struct k_sigaction *ka, +static inline void handle_signal32(unsigned long signr, struct k_sigaction *ka, siginfo_t *info, sigset_t *oldset, struct pt_regs *regs) { @@ -787,12 +783,9 @@ static inline int handle_signal32(unsigned long signr, struct k_sigaction *ka, err = setup_frame32(ka, regs, signr, oldset); if (err) - return err; - - block_sigmask(ka, signr); - tracehook_signal_handler(signr, info, ka, regs, 0); + return; - return 0; + signal_delivered(signr, info, ka, regs, 0); } static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs, @@ -841,14 +834,7 @@ void do_signal32(sigset_t *oldset, struct pt_regs * regs) if (signr > 0) { if (restart_syscall) syscall_restart32(orig_i0, regs, &ka.sa); - if (handle_signal32(signr, &ka, &info, oldset, regs) == 0) { - /* A signal was successfully delivered; the saved - * sigmask will have been stored in the signal frame, - * and will be restored by sigreturn, so we can simply - * clear the TS_RESTORE_SIGMASK flag. - */ - current_thread_info()->status &= ~TS_RESTORE_SIGMASK; - } + handle_signal32(signr, &ka, &info, oldset, regs); return; } if (restart_syscall && @@ -872,10 +858,7 @@ void do_signal32(sigset_t *oldset, struct pt_regs * regs) /* If there's no signal to deliver, we just put the saved sigmask * back */ - if (current_thread_info()->status & TS_RESTORE_SIGMASK) { - current_thread_info()->status &= ~TS_RESTORE_SIGMASK; - set_current_blocked(¤t->saved_sigmask); - } + restore_saved_sigmask(); } struct sigstack32 { diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c index 6b42e8622d12..68f9c8650af4 100644 --- a/arch/sparc/kernel/signal_32.c +++ b/arch/sparc/kernel/signal_32.c @@ -29,8 +29,6 @@ #include "sigutil.h" -#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) - extern void fpsave(unsigned long *fpregs, unsigned long *fsr, void *fpqueue, unsigned long *fpqdepth); extern void fpload(unsigned long *fpregs, unsigned long *fsr); @@ -130,7 +128,6 @@ asmlinkage void do_sigreturn(struct pt_regs *regs) if (err) goto segv_and_exit; - sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); return; @@ -197,7 +194,6 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs) goto segv; } - sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); return; segv: @@ -449,10 +445,11 @@ sigsegv: return -EFAULT; } -static inline int +static inline void handle_signal(unsigned long signr, struct k_sigaction *ka, - siginfo_t *info, sigset_t *oldset, struct pt_regs *regs) + siginfo_t *info, struct pt_regs *regs) { + sigset_t *oldset = sigmask_to_save(); int err; if (ka->sa.sa_flags & SA_SIGINFO) @@ -461,12 +458,9 @@ handle_signal(unsigned long signr, struct k_sigaction *ka, err = setup_frame(ka, regs, signr, oldset); if (err) - return err; - - block_sigmask(ka, signr); - tracehook_signal_handler(signr, info, ka, regs, 0); + return; - return 0; + signal_delivered(signr, info, ka, regs, 0); } static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs, @@ -498,7 +492,6 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0) { struct k_sigaction ka; int restart_syscall; - sigset_t *oldset; siginfo_t info; int signr; @@ -523,11 +516,6 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0) if (pt_regs_is_syscall(regs) && (regs->psr & PSR_C)) regs->u_regs[UREG_G6] = orig_i0; - if (test_thread_flag(TIF_RESTORE_SIGMASK)) - oldset = ¤t->saved_sigmask; - else - oldset = ¤t->blocked; - signr = get_signal_to_deliver(&info, &ka, regs, NULL); /* If the debugger messes with the program counter, it clears @@ -544,15 +532,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0) if (signr > 0) { if (restart_syscall) syscall_restart(orig_i0, regs, &ka.sa); - if (handle_signal(signr, &ka, &info, oldset, regs) == 0) { - /* a signal was successfully delivered; the saved - * sigmask will have been stored in the signal frame, - * and will be restored by sigreturn, so we can simply - * clear the TIF_RESTORE_SIGMASK flag. - */ - if (test_thread_flag(TIF_RESTORE_SIGMASK)) - clear_thread_flag(TIF_RESTORE_SIGMASK); - } + handle_signal(signr, &ka, &info, regs); return; } if (restart_syscall && @@ -576,16 +556,13 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0) /* if there's no signal to deliver, we just put the saved sigmask * back */ - if (test_thread_flag(TIF_RESTORE_SIGMASK)) { - clear_thread_flag(TIF_RESTORE_SIGMASK); - set_current_blocked(¤t->saved_sigmask); - } + restore_saved_sigmask(); } void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, unsigned long thread_info_flags) { - if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) + if (thread_info_flags & _TIF_SIGPENDING) do_signal(regs, orig_i0); if (thread_info_flags & _TIF_NOTIFY_RESUME) { clear_thread_flag(TIF_NOTIFY_RESUME); diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c index c82cf1cc3965..867de2f8189c 100644 --- a/arch/sparc/kernel/signal_64.c +++ b/arch/sparc/kernel/signal_64.c @@ -38,8 +38,6 @@ #include "systbls.h" #include "sigutil.h" -#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) - /* {set, get}context() needed for 64-bit SparcLinux userland. */ asmlinkage void sparc64_set_context(struct pt_regs *regs) { @@ -71,7 +69,6 @@ asmlinkage void sparc64_set_context(struct pt_regs *regs) if (__copy_from_user(&set, &ucp->uc_sigmask, sizeof(sigset_t))) goto do_sigsegv; } - sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); } if (test_thread_flag(TIF_32BIT)) { @@ -315,7 +312,6 @@ void do_rt_sigreturn(struct pt_regs *regs) /* Prevent syscall restart. */ pt_regs_clear_syscall(regs); - sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); return; segv: @@ -466,7 +462,7 @@ sigsegv: return -EFAULT; } -static inline int handle_signal(unsigned long signr, struct k_sigaction *ka, +static inline void handle_signal(unsigned long signr, struct k_sigaction *ka, siginfo_t *info, sigset_t *oldset, struct pt_regs *regs) { @@ -475,12 +471,9 @@ static inline int handle_signal(unsigned long signr, struct k_sigaction *ka, err = setup_rt_frame(ka, regs, signr, oldset, (ka->sa.sa_flags & SA_SIGINFO) ? info : NULL); if (err) - return err; - - block_sigmask(ka, signr); - tracehook_signal_handler(signr, info, ka, regs, 0); + return; - return 0; + signal_delivered(signr, info, ka, regs, 0); } static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs, @@ -512,7 +505,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0) { struct k_sigaction ka; int restart_syscall; - sigset_t *oldset; + sigset_t *oldset = sigmask_to_save(); siginfo_t info; int signr; @@ -538,11 +531,6 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0) (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY))) regs->u_regs[UREG_G6] = orig_i0; - if (current_thread_info()->status & TS_RESTORE_SIGMASK) - oldset = ¤t->saved_sigmask; - else - oldset = ¤t->blocked; - #ifdef CONFIG_COMPAT if (test_thread_flag(TIF_32BIT)) { extern void do_signal32(sigset_t *, struct pt_regs *); @@ -563,14 +551,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0) if (signr > 0) { if (restart_syscall) syscall_restart(orig_i0, regs, &ka.sa); - if (handle_signal(signr, &ka, &info, oldset, regs) == 0) { - /* A signal was successfully delivered; the saved - * sigmask will have been stored in the signal frame, - * and will be restored by sigreturn, so we can simply - * clear the TS_RESTORE_SIGMASK flag. - */ - current_thread_info()->status &= ~TS_RESTORE_SIGMASK; - } + handle_signal(signr, &ka, &info, oldset, regs); return; } if (restart_syscall && @@ -594,10 +575,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0) /* If there's no signal to deliver, we just put the saved sigmask * back */ - if (current_thread_info()->status & TS_RESTORE_SIGMASK) { - current_thread_info()->status &= ~TS_RESTORE_SIGMASK; - set_current_blocked(¤t->saved_sigmask); - } + restore_saved_sigmask(); } void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, unsigned long thread_info_flags) diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c index 3ee51f189a55..275f74fd6f6a 100644 --- a/arch/sparc/kernel/sys_sparc_64.c +++ b/arch/sparc/kernel/sys_sparc_64.c @@ -580,16 +580,9 @@ SYSCALL_DEFINE5(64_mremap, unsigned long, addr, unsigned long, old_len, unsigned long, new_len, unsigned long, flags, unsigned long, new_addr) { - unsigned long ret = -EINVAL; - if (test_thread_flag(TIF_32BIT)) - goto out; - - down_write(¤t->mm->mmap_sem); - ret = do_mremap(addr, old_len, new_len, flags, new_addr); - up_write(¤t->mm->mmap_sem); -out: - return ret; + return -EINVAL; + return sys_mremap(addr, old_len, new_len, flags, new_addr); } /* we come to here via sys_nis_syscall so it can setup the regs argument */ diff --git a/arch/tile/include/asm/compat.h b/arch/tile/include/asm/compat.h index 69adc08d36a5..6e74450ff0a1 100644 --- a/arch/tile/include/asm/compat.h +++ b/arch/tile/include/asm/compat.h @@ -44,7 +44,6 @@ typedef __kernel_uid32_t __compat_gid32_t; typedef __kernel_mode_t compat_mode_t; typedef __kernel_dev_t compat_dev_t; typedef __kernel_loff_t compat_loff_t; -typedef __kernel_nlink_t compat_nlink_t; typedef __kernel_ipc_pid_t compat_ipc_pid_t; typedef __kernel_daddr_t compat_daddr_t; typedef __kernel_fsid_t compat_fsid_t; diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h index 656c486e64fa..7e1fef36bde6 100644 --- a/arch/tile/include/asm/thread_info.h +++ b/arch/tile/include/asm/thread_info.h @@ -166,7 +166,23 @@ static inline void set_restore_sigmask(void) { struct thread_info *ti = current_thread_info(); ti->status |= TS_RESTORE_SIGMASK; - set_bit(TIF_SIGPENDING, &ti->flags); + WARN_ON(!test_bit(TIF_SIGPENDING, &ti->flags)); +} +static inline void clear_restore_sigmask(void) +{ + current_thread_info()->status &= ~TS_RESTORE_SIGMASK; +} +static inline bool test_restore_sigmask(void) +{ + return current_thread_info()->status & TS_RESTORE_SIGMASK; +} +static inline bool test_and_clear_restore_sigmask(void) +{ + struct thread_info *ti = current_thread_info(); + if (!(ti->status & TS_RESTORE_SIGMASK)) + return false; + ti->status &= ~TS_RESTORE_SIGMASK; + return true; } #endif /* !__ASSEMBLY__ */ diff --git a/arch/tile/kernel/compat_signal.c b/arch/tile/kernel/compat_signal.c index cdef6e5ec022..474571b84085 100644 --- a/arch/tile/kernel/compat_signal.c +++ b/arch/tile/kernel/compat_signal.c @@ -118,8 +118,6 @@ struct compat_rt_sigframe { struct compat_ucontext uc; }; -#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) - long compat_sys_rt_sigaction(int sig, struct compat_sigaction __user *act, struct compat_sigaction __user *oact, size_t sigsetsize) @@ -302,7 +300,6 @@ long compat_sys_rt_sigreturn(struct pt_regs *regs) if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto badframe; - sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); if (restore_sigcontext(regs, &frame->uc.uc_mcontext)) diff --git a/arch/tile/kernel/signal.c b/arch/tile/kernel/signal.c index f79d4b88c747..e29b0553211d 100644 --- a/arch/tile/kernel/signal.c +++ b/arch/tile/kernel/signal.c @@ -37,8 +37,6 @@ #define DEBUG_SIG 0 -#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) - SYSCALL_DEFINE3(sigaltstack, const stack_t __user *, uss, stack_t __user *, uoss, struct pt_regs *, regs) { @@ -96,7 +94,6 @@ SYSCALL_DEFINE1(rt_sigreturn, struct pt_regs *, regs) if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto badframe; - sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); if (restore_sigcontext(regs, &frame->uc.uc_mcontext)) @@ -242,10 +239,11 @@ give_sigsegv: * OK, we're invoking a handler */ -static int handle_signal(unsigned long sig, siginfo_t *info, - struct k_sigaction *ka, sigset_t *oldset, +static void handle_signal(unsigned long sig, siginfo_t *info, + struct k_sigaction *ka, struct pt_regs *regs) { + sigset_t *oldset = sigmask_to_save(); int ret; /* Are we from a system call? */ @@ -278,15 +276,9 @@ static int handle_signal(unsigned long sig, siginfo_t *info, else #endif ret = setup_rt_frame(sig, ka, info, oldset, regs); - if (ret == 0) { - /* This code is only called from system calls or from - * the work_pending path in the return-to-user code, and - * either way we can re-enable interrupts unconditionally. - */ - block_sigmask(ka, sig); - } - - return ret; + if (ret) + return; + signal_delivered(sig, info, ka, regs, 0); } /* @@ -299,7 +291,6 @@ void do_signal(struct pt_regs *regs) siginfo_t info; int signr; struct k_sigaction ka; - sigset_t *oldset; /* * i386 will check if we're coming from kernel mode and bail out @@ -308,24 +299,10 @@ void do_signal(struct pt_regs *regs) * helpful, we can reinstate the check on "!user_mode(regs)". */ - if (current_thread_info()->status & TS_RESTORE_SIGMASK) - oldset = ¤t->saved_sigmask; - else - oldset = ¤t->blocked; - signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (signr > 0) { /* Whee! Actually deliver the signal. */ - if (handle_signal(signr, &info, &ka, oldset, regs) == 0) { - /* - * A signal was successfully delivered; the saved - * sigmask will have been stored in the signal frame, - * and will be restored by sigreturn, so we can simply - * clear the TS_RESTORE_SIGMASK flag. - */ - current_thread_info()->status &= ~TS_RESTORE_SIGMASK; - } - + handle_signal(signr, &info, &ka, regs); goto done; } @@ -350,10 +327,7 @@ void do_signal(struct pt_regs *regs) } /* If there's no signal to deliver, just put the saved sigmask back. */ - if (current_thread_info()->status & TS_RESTORE_SIGMASK) { - current_thread_info()->status &= ~TS_RESTORE_SIGMASK; - sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); - } + restore_saved_sigmask(); done: /* Avoid double syscall restart if there are nested signals. */ diff --git a/arch/um/include/shared/frame_kern.h b/arch/um/include/shared/frame_kern.h index 76078490c258..e584e40ee832 100644 --- a/arch/um/include/shared/frame_kern.h +++ b/arch/um/include/shared/frame_kern.h @@ -6,9 +6,6 @@ #ifndef __FRAME_KERN_H_ #define __FRAME_KERN_H_ -#define _S(nr) (1<<((nr)-1)) -#define _BLOCKABLE (~(_S(SIGKILL) | _S(SIGSTOP))) - extern int setup_signal_stack_sc(unsigned long stack_top, int sig, struct k_sigaction *ka, struct pt_regs *regs, diff --git a/arch/um/kernel/signal.c b/arch/um/kernel/signal.c index 292e706016c5..7362d58efc29 100644 --- a/arch/um/kernel/signal.c +++ b/arch/um/kernel/signal.c @@ -15,17 +15,13 @@ EXPORT_SYMBOL(block_signals); EXPORT_SYMBOL(unblock_signals); -#define _S(nr) (1<<((nr)-1)) - -#define _BLOCKABLE (~(_S(SIGKILL) | _S(SIGSTOP))) - /* * OK, we're invoking a handler */ -static int handle_signal(struct pt_regs *regs, unsigned long signr, - struct k_sigaction *ka, siginfo_t *info, - sigset_t *oldset) +static void handle_signal(struct pt_regs *regs, unsigned long signr, + struct k_sigaction *ka, siginfo_t *info) { + sigset_t *oldset = sigmask_to_save(); unsigned long sp; int err; @@ -65,9 +61,7 @@ static int handle_signal(struct pt_regs *regs, unsigned long signr, if (err) force_sigsegv(signr, current); else - block_sigmask(ka, signr); - - return err; + signal_delivered(signr, info, ka, regs, 0); } static int kern_do_signal(struct pt_regs *regs) @@ -77,24 +71,9 @@ static int kern_do_signal(struct pt_regs *regs) int sig, handled_sig = 0; while ((sig = get_signal_to_deliver(&info, &ka_copy, regs, NULL)) > 0) { - sigset_t *oldset; - if (test_thread_flag(TIF_RESTORE_SIGMASK)) - oldset = ¤t->saved_sigmask; - else - oldset = ¤t->blocked; handled_sig = 1; /* Whee! Actually deliver the signal. */ - if (!handle_signal(regs, sig, &ka_copy, &info, oldset)) { - /* - * a signal was successfully delivered; the saved - * sigmask will have been stored in the signal frame, - * and will be restored by sigreturn, so we can simply - * clear the TIF_RESTORE_SIGMASK flag - */ - if (test_thread_flag(TIF_RESTORE_SIGMASK)) - clear_thread_flag(TIF_RESTORE_SIGMASK); - break; - } + handle_signal(regs, sig, &ka_copy, &info); } /* Did we come from a system call? */ @@ -130,10 +109,8 @@ static int kern_do_signal(struct pt_regs *regs) * if there's no signal to deliver, we just put the saved sigmask * back */ - if (!handled_sig && test_thread_flag(TIF_RESTORE_SIGMASK)) { - clear_thread_flag(TIF_RESTORE_SIGMASK); - sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); - } + if (!handled_sig) + restore_saved_sigmask(); return handled_sig; } diff --git a/arch/unicore32/kernel/signal.c b/arch/unicore32/kernel/signal.c index 28782ad47b93..8adedb37720a 100644 --- a/arch/unicore32/kernel/signal.c +++ b/arch/unicore32/kernel/signal.c @@ -21,8 +21,6 @@ #include <asm/cacheflush.h> #include <asm/ucontext.h> -#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) - /* * For UniCore syscalls, we encode the syscall number into the instruction. */ @@ -61,10 +59,8 @@ static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf) int err; err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set)); - if (err == 0) { - sigdelsetmask(&set, ~_BLOCKABLE); + if (err == 0) set_current_blocked(&set); - } err |= __get_user(regs->UCreg_00, &sf->uc.uc_mcontext.regs.UCreg_00); err |= __get_user(regs->UCreg_01, &sf->uc.uc_mcontext.regs.UCreg_01); @@ -312,13 +308,12 @@ static inline void setup_syscall_restart(struct pt_regs *regs) /* * OK, we're invoking a handler */ -static int handle_signal(unsigned long sig, struct k_sigaction *ka, - siginfo_t *info, sigset_t *oldset, - struct pt_regs *regs, int syscall) +static void handle_signal(unsigned long sig, struct k_sigaction *ka, + siginfo_t *info, struct pt_regs *regs, int syscall) { struct thread_info *thread = current_thread_info(); struct task_struct *tsk = current; - sigset_t blocked; + sigset_t *oldset = sigmask_to_save(); int usig = sig; int ret; @@ -364,15 +359,10 @@ static int handle_signal(unsigned long sig, struct k_sigaction *ka, if (ret != 0) { force_sigsegv(sig, tsk); - return ret; + return; } - /* - * Block the signal if we were successful. - */ - block_sigmask(ka, sig); - - return 0; + signal_delivered(sig, info, ka, regs, 0); } /* @@ -399,32 +389,12 @@ static void do_signal(struct pt_regs *regs, int syscall) if (!user_mode(regs)) return; - if (try_to_freeze()) - goto no_signal; - signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (signr > 0) { - sigset_t *oldset; - - if (test_thread_flag(TIF_RESTORE_SIGMASK)) - oldset = ¤t->saved_sigmask; - else - oldset = ¤t->blocked; - if (handle_signal(signr, &ka, &info, oldset, regs, syscall) - == 0) { - /* - * A signal was successfully delivered; the saved - * sigmask will have been stored in the signal frame, - * and will be restored by sigreturn, so we can simply - * clear the TIF_RESTORE_SIGMASK flag. - */ - if (test_thread_flag(TIF_RESTORE_SIGMASK)) - clear_thread_flag(TIF_RESTORE_SIGMASK); - } + handle_signal(signr, &ka, &info, regs, syscall); return; } - no_signal: /* * No signal to deliver to the process - restart the syscall. */ @@ -451,8 +421,7 @@ static void do_signal(struct pt_regs *regs, int syscall) /* If there's no signal to deliver, we just put the saved * sigmask back. */ - if (test_and_clear_thread_flag(TIF_RESTORE_SIGMASK)) - set_current_blocked(¤t->saved_sigmask); + restore_saved_sigmask(); } asmlinkage void do_notify_resume(struct pt_regs *regs, diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index d700811785ea..c70684f859e1 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1506,6 +1506,8 @@ config EFI_STUB This kernel feature allows a bzImage to be loaded directly by EFI firmware without the use of a bootloader. + See Documentation/x86/efi-stub.txt for more information. + config SECCOMP def_bool y prompt "Enable seccomp to safely compute untrusted bytecode" diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index 2c14e76bb4c7..4e85f5f85837 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c @@ -16,6 +16,26 @@ static efi_system_table_t *sys_table; +static void efi_printk(char *str) +{ + char *s8; + + for (s8 = str; *s8; s8++) { + struct efi_simple_text_output_protocol *out; + efi_char16_t ch[2] = { 0 }; + + ch[0] = *s8; + out = (struct efi_simple_text_output_protocol *)sys_table->con_out; + + if (*s8 == '\n') { + efi_char16_t nl[2] = { '\r', 0 }; + efi_call_phys2(out->output_string, out, nl); + } + + efi_call_phys2(out->output_string, out, ch); + } +} + static efi_status_t __get_map(efi_memory_desc_t **map, unsigned long *map_size, unsigned long *desc_size) { @@ -531,8 +551,10 @@ static efi_status_t handle_ramdisks(efi_loaded_image_t *image, EFI_LOADER_DATA, nr_initrds * sizeof(*initrds), &initrds); - if (status != EFI_SUCCESS) + if (status != EFI_SUCCESS) { + efi_printk("Failed to alloc mem for initrds\n"); goto fail; + } str = (char *)(unsigned long)hdr->cmd_line_ptr; for (i = 0; i < nr_initrds; i++) { @@ -575,32 +597,42 @@ static efi_status_t handle_ramdisks(efi_loaded_image_t *image, status = efi_call_phys3(boottime->handle_protocol, image->device_handle, &fs_proto, &io); - if (status != EFI_SUCCESS) + if (status != EFI_SUCCESS) { + efi_printk("Failed to handle fs_proto\n"); goto free_initrds; + } status = efi_call_phys2(io->open_volume, io, &fh); - if (status != EFI_SUCCESS) + if (status != EFI_SUCCESS) { + efi_printk("Failed to open volume\n"); goto free_initrds; + } } status = efi_call_phys5(fh->open, fh, &h, filename_16, EFI_FILE_MODE_READ, (u64)0); - if (status != EFI_SUCCESS) + if (status != EFI_SUCCESS) { + efi_printk("Failed to open initrd file\n"); goto close_handles; + } initrd->handle = h; info_sz = 0; status = efi_call_phys4(h->get_info, h, &info_guid, &info_sz, NULL); - if (status != EFI_BUFFER_TOO_SMALL) + if (status != EFI_BUFFER_TOO_SMALL) { + efi_printk("Failed to get initrd info size\n"); goto close_handles; + } grow: status = efi_call_phys3(sys_table->boottime->allocate_pool, EFI_LOADER_DATA, info_sz, &info); - if (status != EFI_SUCCESS) + if (status != EFI_SUCCESS) { + efi_printk("Failed to alloc mem for initrd info\n"); goto close_handles; + } status = efi_call_phys4(h->get_info, h, &info_guid, &info_sz, info); @@ -612,8 +644,10 @@ grow: file_sz = info->file_size; efi_call_phys1(sys_table->boottime->free_pool, info); - if (status != EFI_SUCCESS) + if (status != EFI_SUCCESS) { + efi_printk("Failed to get initrd info\n"); goto close_handles; + } initrd->size = file_sz; initrd_total += file_sz; @@ -629,11 +663,14 @@ grow: */ status = high_alloc(initrd_total, 0x1000, &initrd_addr, hdr->initrd_addr_max); - if (status != EFI_SUCCESS) + if (status != EFI_SUCCESS) { + efi_printk("Failed to alloc highmem for initrds\n"); goto close_handles; + } /* We've run out of free low memory. */ if (initrd_addr > hdr->initrd_addr_max) { + efi_printk("We've run out of free low memory\n"); status = EFI_INVALID_PARAMETER; goto free_initrd_total; } @@ -652,8 +689,10 @@ grow: status = efi_call_phys3(fh->read, initrds[j].handle, &chunksize, addr); - if (status != EFI_SUCCESS) + if (status != EFI_SUCCESS) { + efi_printk("Failed to read initrd\n"); goto free_initrd_total; + } addr += chunksize; size -= chunksize; } @@ -674,7 +713,7 @@ free_initrd_total: low_free(initrd_total, initrd_addr); close_handles: - for (k = j; k < nr_initrds; k++) + for (k = j; k < i; k++) efi_call_phys1(fh->close, initrds[k].handle); free_initrds: efi_call_phys1(sys_table->boottime->free_pool, initrds); @@ -732,8 +771,10 @@ static efi_status_t make_boot_params(struct boot_params *boot_params, options_size++; /* NUL termination */ status = low_alloc(options_size, 1, &cmdline); - if (status != EFI_SUCCESS) + if (status != EFI_SUCCESS) { + efi_printk("Failed to alloc mem for cmdline\n"); goto fail; + } s1 = (u8 *)(unsigned long)cmdline; s2 = (u16 *)options; @@ -895,12 +936,16 @@ struct boot_params *efi_main(void *handle, efi_system_table_t *_table) status = efi_call_phys3(sys_table->boottime->handle_protocol, handle, &proto, (void *)&image); - if (status != EFI_SUCCESS) + if (status != EFI_SUCCESS) { + efi_printk("Failed to get handle for LOADED_IMAGE_PROTOCOL\n"); goto fail; + } status = low_alloc(0x4000, 1, (unsigned long *)&boot_params); - if (status != EFI_SUCCESS) + if (status != EFI_SUCCESS) { + efi_printk("Failed to alloc lowmem for boot params\n"); goto fail; + } memset(boot_params, 0x0, 0x4000); @@ -933,8 +978,10 @@ struct boot_params *efi_main(void *handle, efi_system_table_t *_table) if (status != EFI_SUCCESS) { status = low_alloc(hdr->init_size, hdr->kernel_alignment, &start); - if (status != EFI_SUCCESS) + if (status != EFI_SUCCESS) { + efi_printk("Failed to alloc mem for kernel\n"); goto fail; + } } hdr->code32_start = (__u32)start; @@ -945,19 +992,25 @@ struct boot_params *efi_main(void *handle, efi_system_table_t *_table) status = efi_call_phys3(sys_table->boottime->allocate_pool, EFI_LOADER_DATA, sizeof(*gdt), (void **)&gdt); - if (status != EFI_SUCCESS) + if (status != EFI_SUCCESS) { + efi_printk("Failed to alloc mem for gdt structure\n"); goto fail; + } gdt->size = 0x800; status = low_alloc(gdt->size, 8, (unsigned long *)&gdt->address); - if (status != EFI_SUCCESS) + if (status != EFI_SUCCESS) { + efi_printk("Failed to alloc mem for gdt\n"); goto fail; + } status = efi_call_phys3(sys_table->boottime->allocate_pool, EFI_LOADER_DATA, sizeof(*idt), (void **)&idt); - if (status != EFI_SUCCESS) + if (status != EFI_SUCCESS) { + efi_printk("Failed to alloc mem for idt structure\n"); goto fail; + } idt->size = 0; idt->address = 0; diff --git a/arch/x86/boot/compressed/eboot.h b/arch/x86/boot/compressed/eboot.h index 39251663e65b..3b6e15627c55 100644 --- a/arch/x86/boot/compressed/eboot.h +++ b/arch/x86/boot/compressed/eboot.h @@ -58,4 +58,10 @@ struct efi_uga_draw_protocol { void *blt; }; +struct efi_simple_text_output_protocol { + void *reset; + void *output_string; + void *test_string; +}; + #endif /* BOOT_COMPRESSED_EBOOT_H */ diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c index 98bd70faccc5..daeca56211e3 100644 --- a/arch/x86/ia32/ia32_signal.c +++ b/arch/x86/ia32/ia32_signal.c @@ -273,7 +273,6 @@ asmlinkage long sys32_sigreturn(struct pt_regs *regs) sizeof(frame->extramask)))) goto badframe; - sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); if (ia32_restore_sigcontext(regs, &frame->sc, &ax)) @@ -299,7 +298,6 @@ asmlinkage long sys32_rt_sigreturn(struct pt_regs *regs) if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto badframe; - sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); if (ia32_restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax)) diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h index 18d9005d9e4f..b0767bc08740 100644 --- a/arch/x86/include/asm/ftrace.h +++ b/arch/x86/include/asm/ftrace.h @@ -34,7 +34,7 @@ #ifndef __ASSEMBLY__ extern void mcount(void); -extern int modifying_ftrace_code; +extern atomic_t modifying_ftrace_code; static inline unsigned long ftrace_call_adjust(unsigned long addr) { diff --git a/arch/x86/include/asm/posix_types_32.h b/arch/x86/include/asm/posix_types_32.h index 99f262e04b91..8e525059e7d8 100644 --- a/arch/x86/include/asm/posix_types_32.h +++ b/arch/x86/include/asm/posix_types_32.h @@ -10,9 +10,6 @@ typedef unsigned short __kernel_mode_t; #define __kernel_mode_t __kernel_mode_t -typedef unsigned short __kernel_nlink_t; -#define __kernel_nlink_t __kernel_nlink_t - typedef unsigned short __kernel_ipc_pid_t; #define __kernel_ipc_pid_t __kernel_ipc_pid_t diff --git a/arch/x86/include/asm/sighandling.h b/arch/x86/include/asm/sighandling.h index ada93b3b8c66..beff97f7df37 100644 --- a/arch/x86/include/asm/sighandling.h +++ b/arch/x86/include/asm/sighandling.h @@ -7,8 +7,6 @@ #include <asm/processor-flags.h> -#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) - #define __FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_OF | \ X86_EFLAGS_DF | X86_EFLAGS_TF | X86_EFLAGS_SF | \ X86_EFLAGS_ZF | X86_EFLAGS_AF | X86_EFLAGS_PF | \ diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index 5c25de07cba8..89f794f007ec 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -248,7 +248,23 @@ static inline void set_restore_sigmask(void) { struct thread_info *ti = current_thread_info(); ti->status |= TS_RESTORE_SIGMASK; - set_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags); + WARN_ON(!test_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags)); +} +static inline void clear_restore_sigmask(void) +{ + current_thread_info()->status &= ~TS_RESTORE_SIGMASK; +} +static inline bool test_restore_sigmask(void) +{ + return current_thread_info()->status & TS_RESTORE_SIGMASK; +} +static inline bool test_and_clear_restore_sigmask(void) +{ + struct thread_info *ti = current_thread_info(); + if (!(ti->status & TS_RESTORE_SIGMASK)) + return false; + ti->status &= ~TS_RESTORE_SIGMASK; + return true; } static inline bool is_ia32_task(void) diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 82f29e70d058..6b9333b429ba 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1101,14 +1101,20 @@ int is_debug_stack(unsigned long addr) addr > (__get_cpu_var(debug_stack_addr) - DEBUG_STKSZ)); } +static DEFINE_PER_CPU(u32, debug_stack_use_ctr); + void debug_stack_set_zero(void) { + this_cpu_inc(debug_stack_use_ctr); load_idt((const struct desc_ptr *)&nmi_idt_descr); } void debug_stack_reset(void) { - load_idt((const struct desc_ptr *)&idt_descr); + if (WARN_ON(!this_cpu_read(debug_stack_use_ctr))) + return; + if (this_cpu_dec_return(debug_stack_use_ctr) == 0) + load_idt((const struct desc_ptr *)&idt_descr); } #else /* CONFIG_X86_64 */ diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 01ccf9b71473..623f28837476 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -316,7 +316,6 @@ ret_from_exception: preempt_stop(CLBR_ANY) ret_from_intr: GET_THREAD_INFO(%ebp) -resume_userspace_sig: #ifdef CONFIG_VM86 movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS movb PT_CS(%esp), %al @@ -615,9 +614,13 @@ work_notifysig: # deal with pending signals and # vm86-space TRACE_IRQS_ON ENABLE_INTERRUPTS(CLBR_NONE) + movb PT_CS(%esp), %bl + andb $SEGMENT_RPL_MASK, %bl + cmpb $USER_RPL, %bl + jb resume_kernel xorl %edx, %edx call do_notify_resume - jmp resume_userspace_sig + jmp resume_userspace ALIGN work_notifysig_v86: @@ -630,9 +633,13 @@ work_notifysig_v86: #endif TRACE_IRQS_ON ENABLE_INTERRUPTS(CLBR_NONE) + movb PT_CS(%esp), %bl + andb $SEGMENT_RPL_MASK, %bl + cmpb $USER_RPL, %bl + jb resume_kernel xorl %edx, %edx call do_notify_resume - jmp resume_userspace_sig + jmp resume_userspace END(work_pending) # perform syscall exit tracing diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 320852d02026..7d65133b51be 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -191,6 +191,44 @@ ENDPROC(native_usergs_sysret64) .endm /* + * When dynamic function tracer is enabled it will add a breakpoint + * to all locations that it is about to modify, sync CPUs, update + * all the code, sync CPUs, then remove the breakpoints. In this time + * if lockdep is enabled, it might jump back into the debug handler + * outside the updating of the IST protection. (TRACE_IRQS_ON/OFF). + * + * We need to change the IDT table before calling TRACE_IRQS_ON/OFF to + * make sure the stack pointer does not get reset back to the top + * of the debug stack, and instead just reuses the current stack. + */ +#if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_TRACE_IRQFLAGS) + +.macro TRACE_IRQS_OFF_DEBUG + call debug_stack_set_zero + TRACE_IRQS_OFF + call debug_stack_reset +.endm + +.macro TRACE_IRQS_ON_DEBUG + call debug_stack_set_zero + TRACE_IRQS_ON + call debug_stack_reset +.endm + +.macro TRACE_IRQS_IRETQ_DEBUG offset=ARGOFFSET + bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */ + jnc 1f + TRACE_IRQS_ON_DEBUG +1: +.endm + +#else +# define TRACE_IRQS_OFF_DEBUG TRACE_IRQS_OFF +# define TRACE_IRQS_ON_DEBUG TRACE_IRQS_ON +# define TRACE_IRQS_IRETQ_DEBUG TRACE_IRQS_IRETQ +#endif + +/* * C code is not supposed to know about undefined top of stack. Every time * a C function with an pt_regs argument is called from the SYSCALL based * fast path FIXUP_TOP_OF_STACK is needed. @@ -1098,7 +1136,7 @@ ENTRY(\sym) subq $ORIG_RAX-R15, %rsp CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 call save_paranoid - TRACE_IRQS_OFF + TRACE_IRQS_OFF_DEBUG movq %rsp,%rdi /* pt_regs pointer */ xorl %esi,%esi /* no error code */ subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist) @@ -1393,7 +1431,7 @@ paranoidzeroentry machine_check *machine_check_vector(%rip) ENTRY(paranoid_exit) DEFAULT_FRAME DISABLE_INTERRUPTS(CLBR_NONE) - TRACE_IRQS_OFF + TRACE_IRQS_OFF_DEBUG testl %ebx,%ebx /* swapgs needed? */ jnz paranoid_restore testl $3,CS(%rsp) @@ -1404,7 +1442,7 @@ paranoid_swapgs: RESTORE_ALL 8 jmp irq_return paranoid_restore: - TRACE_IRQS_IRETQ 0 + TRACE_IRQS_IRETQ_DEBUG 0 RESTORE_ALL 8 jmp irq_return paranoid_userspace: diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 32ff36596ab1..c3a7cb4bf6e6 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -100,7 +100,7 @@ static const unsigned char *ftrace_nop_replace(void) } static int -ftrace_modify_code(unsigned long ip, unsigned const char *old_code, +ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code, unsigned const char *new_code) { unsigned char replaced[MCOUNT_INSN_SIZE]; @@ -141,7 +141,20 @@ int ftrace_make_nop(struct module *mod, old = ftrace_call_replace(ip, addr); new = ftrace_nop_replace(); - return ftrace_modify_code(rec->ip, old, new); + /* + * On boot up, and when modules are loaded, the MCOUNT_ADDR + * is converted to a nop, and will never become MCOUNT_ADDR + * again. This code is either running before SMP (on boot up) + * or before the code will ever be executed (module load). + * We do not want to use the breakpoint version in this case, + * just modify the code directly. + */ + if (addr == MCOUNT_ADDR) + return ftrace_modify_code_direct(rec->ip, old, new); + + /* Normal cases use add_brk_on_nop */ + WARN_ONCE(1, "invalid use of ftrace_make_nop"); + return -EINVAL; } int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) @@ -152,9 +165,47 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) old = ftrace_nop_replace(); new = ftrace_call_replace(ip, addr); - return ftrace_modify_code(rec->ip, old, new); + /* Should only be called when module is loaded */ + return ftrace_modify_code_direct(rec->ip, old, new); } +/* + * The modifying_ftrace_code is used to tell the breakpoint + * handler to call ftrace_int3_handler(). If it fails to + * call this handler for a breakpoint added by ftrace, then + * the kernel may crash. + * + * As atomic_writes on x86 do not need a barrier, we do not + * need to add smp_mb()s for this to work. It is also considered + * that we can not read the modifying_ftrace_code before + * executing the breakpoint. That would be quite remarkable if + * it could do that. Here's the flow that is required: + * + * CPU-0 CPU-1 + * + * atomic_inc(mfc); + * write int3s + * <trap-int3> // implicit (r)mb + * if (atomic_read(mfc)) + * call ftrace_int3_handler() + * + * Then when we are finished: + * + * atomic_dec(mfc); + * + * If we hit a breakpoint that was not set by ftrace, it does not + * matter if ftrace_int3_handler() is called or not. It will + * simply be ignored. But it is crucial that a ftrace nop/caller + * breakpoint is handled. No other user should ever place a + * breakpoint on an ftrace nop/caller location. It must only + * be done by this code. + */ +atomic_t modifying_ftrace_code __read_mostly; + +static int +ftrace_modify_code(unsigned long ip, unsigned const char *old_code, + unsigned const char *new_code); + int ftrace_update_ftrace_func(ftrace_func_t func) { unsigned long ip = (unsigned long)(&ftrace_call); @@ -163,13 +214,17 @@ int ftrace_update_ftrace_func(ftrace_func_t func) memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE); new = ftrace_call_replace(ip, (unsigned long)func); + + /* See comment above by declaration of modifying_ftrace_code */ + atomic_inc(&modifying_ftrace_code); + ret = ftrace_modify_code(ip, old, new); + atomic_dec(&modifying_ftrace_code); + return ret; } -int modifying_ftrace_code __read_mostly; - /* * A breakpoint was added to the code address we are about to * modify, and this is the handle that will just skip over it. @@ -489,13 +544,46 @@ void ftrace_replace_code(int enable) } } +static int +ftrace_modify_code(unsigned long ip, unsigned const char *old_code, + unsigned const char *new_code) +{ + int ret; + + ret = add_break(ip, old_code); + if (ret) + goto out; + + run_sync(); + + ret = add_update_code(ip, new_code); + if (ret) + goto fail_update; + + run_sync(); + + ret = ftrace_write(ip, new_code, 1); + if (ret) { + ret = -EPERM; + goto out; + } + run_sync(); + out: + return ret; + + fail_update: + probe_kernel_write((void *)ip, &old_code[0], 1); + goto out; +} + void arch_ftrace_update_code(int command) { - modifying_ftrace_code++; + /* See comment above by declaration of modifying_ftrace_code */ + atomic_inc(&modifying_ftrace_code); ftrace_modify_all_code(command); - modifying_ftrace_code--; + atomic_dec(&modifying_ftrace_code); } int __init ftrace_dyn_arch_init(void *data) diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c index 90875279ef3d..a0b2f84457be 100644 --- a/arch/x86/kernel/nmi.c +++ b/arch/x86/kernel/nmi.c @@ -444,14 +444,16 @@ static inline void nmi_nesting_preprocess(struct pt_regs *regs) */ if (unlikely(is_debug_stack(regs->sp))) { debug_stack_set_zero(); - __get_cpu_var(update_debug_stack) = 1; + this_cpu_write(update_debug_stack, 1); } } static inline void nmi_nesting_postprocess(void) { - if (unlikely(__get_cpu_var(update_debug_stack))) + if (unlikely(this_cpu_read(update_debug_stack))) { debug_stack_reset(); + this_cpu_write(update_debug_stack, 0); + } } #endif diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 13b1990c7c58..c4c6a5c2bf0f 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -1211,12 +1211,6 @@ static long x32_arch_ptrace(struct task_struct *child, 0, sizeof(struct user_i387_struct), datap); - /* normal 64bit interface to access TLS data. - Works just like arch_prctl, except that the arguments - are reversed. */ - case PTRACE_ARCH_PRCTL: - return do_arch_prctl(child, data, addr); - default: return compat_ptrace_request(child, request, addr, data); } diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 2e937a5ad531..21af737053aa 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -555,7 +555,6 @@ unsigned long sys_sigreturn(struct pt_regs *regs) sizeof(frame->extramask)))) goto badframe; - sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); if (restore_sigcontext(regs, &frame->sc, &ax)) @@ -581,7 +580,6 @@ long sys_rt_sigreturn(struct pt_regs *regs) if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto badframe; - sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax)) @@ -647,42 +645,28 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, struct pt_regs *regs) { int usig = signr_convert(sig); - sigset_t *set = ¤t->blocked; - int ret; - - if (current_thread_info()->status & TS_RESTORE_SIGMASK) - set = ¤t->saved_sigmask; + sigset_t *set = sigmask_to_save(); /* Set up the stack frame */ if (is_ia32) { if (ka->sa.sa_flags & SA_SIGINFO) - ret = ia32_setup_rt_frame(usig, ka, info, set, regs); + return ia32_setup_rt_frame(usig, ka, info, set, regs); else - ret = ia32_setup_frame(usig, ka, set, regs); + return ia32_setup_frame(usig, ka, set, regs); #ifdef CONFIG_X86_X32_ABI } else if (is_x32) { - ret = x32_setup_rt_frame(usig, ka, info, + return x32_setup_rt_frame(usig, ka, info, (compat_sigset_t *)set, regs); #endif } else { - ret = __setup_rt_frame(sig, ka, info, set, regs); - } - - if (ret) { - force_sigsegv(sig, current); - return -EFAULT; + return __setup_rt_frame(sig, ka, info, set, regs); } - - current_thread_info()->status &= ~TS_RESTORE_SIGMASK; - return ret; } -static int +static void handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, struct pt_regs *regs) { - int ret; - /* Are we from a system call? */ if (syscall_get_nr(current, regs) >= 0) { /* If so, check system call restarting.. */ @@ -713,10 +697,10 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, likely(test_and_clear_thread_flag(TIF_FORCED_TF))) regs->flags &= ~X86_EFLAGS_TF; - ret = setup_rt_frame(sig, ka, info, regs); - - if (ret) - return ret; + if (setup_rt_frame(sig, ka, info, regs) < 0) { + force_sigsegv(sig, current); + return; + } /* * Clear the direction flag as per the ABI for function entry. @@ -731,12 +715,8 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, */ regs->flags &= ~X86_EFLAGS_TF; - block_sigmask(ka, sig); - - tracehook_signal_handler(sig, info, ka, regs, - test_thread_flag(TIF_SINGLESTEP)); - - return 0; + signal_delivered(sig, info, ka, regs, + test_thread_flag(TIF_SINGLESTEP)); } #ifdef CONFIG_X86_32 @@ -757,16 +737,6 @@ static void do_signal(struct pt_regs *regs) siginfo_t info; int signr; - /* - * We want the common case to go fast, which is why we may in certain - * cases get here from kernel mode. Just return without doing anything - * if so. - * X86_32: vm86 regs switched out by assembly code before reaching - * here, so testing against kernel CS suffices. - */ - if (!user_mode(regs)) - return; - signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (signr > 0) { /* Whee! Actually deliver the signal. */ @@ -796,10 +766,7 @@ static void do_signal(struct pt_regs *regs) * If there's no signal to deliver, we just put the saved sigmask * back. */ - if (current_thread_info()->status & TS_RESTORE_SIGMASK) { - current_thread_info()->status &= ~TS_RESTORE_SIGMASK; - set_current_blocked(¤t->saved_sigmask); - } + restore_saved_sigmask(); } /* @@ -934,7 +901,6 @@ asmlinkage long sys32_x32_rt_sigreturn(struct pt_regs *regs) if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto badframe; - sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax)) diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index ff08457a025d..05b31d92f69c 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -303,8 +303,12 @@ gp_in_kernel: dotraplinkage void __kprobes notrace do_int3(struct pt_regs *regs, long error_code) { #ifdef CONFIG_DYNAMIC_FTRACE - /* ftrace must be first, everything else may cause a recursive crash */ - if (unlikely(modifying_ftrace_code) && ftrace_int3_handler(regs)) + /* + * ftrace must be first, everything else may cause a recursive crash. + * See note by declaration of modifying_ftrace_code in ftrace.c + */ + if (unlikely(atomic_read(&modifying_ftrace_code)) && + ftrace_int3_handler(regs)) return; #endif #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP diff --git a/arch/x86/um/signal.c b/arch/x86/um/signal.c index bb0fb03b9f85..a508cea13503 100644 --- a/arch/x86/um/signal.c +++ b/arch/x86/um/signal.c @@ -486,7 +486,6 @@ long sys_sigreturn(struct pt_regs *regs) copy_from_user(&set.sig[1], extramask, sig_size)) goto segfault; - sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); if (copy_sc_from_user(¤t->thread.regs, sc)) @@ -600,7 +599,6 @@ long sys_rt_sigreturn(struct pt_regs *regs) if (copy_from_user(&set, &uc->uc_sigmask, sizeof(set))) goto segfault; - sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); if (copy_sc_from_user(¤t->thread.regs, &uc->uc_mcontext)) diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c index ea7e17778a75..b9f8e5850d3a 100644 --- a/arch/xtensa/kernel/signal.c +++ b/arch/xtensa/kernel/signal.c @@ -30,8 +30,6 @@ #define DEBUG_SIG 0 -#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) - extern struct task_struct *coproc_owners[]; struct rt_sigframe @@ -261,7 +259,6 @@ asmlinkage long xtensa_rt_sigreturn(long a0, long a1, long a2, long a3, if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto badframe; - sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); if (restore_sigcontext(regs, frame)) @@ -452,15 +449,6 @@ static void do_signal(struct pt_regs *regs) siginfo_t info; int signr; struct k_sigaction ka; - sigset_t oldset; - - if (try_to_freeze()) - goto no_signal; - - if (test_thread_flag(TIF_RESTORE_SIGMASK)) - oldset = ¤t->saved_sigmask; - else - oldset = ¤t->blocked; task_pt_regs(current)->icountlevel = 0; @@ -501,19 +489,17 @@ static void do_signal(struct pt_regs *regs) /* Whee! Actually deliver the signal. */ /* Set up the stack frame */ - ret = setup_frame(signr, &ka, &info, oldset, regs); + ret = setup_frame(signr, &ka, &info, sigmask_to_save(), regs); if (ret) return; - clear_thread_flag(TIF_RESTORE_SIGMASK); - block_sigmask(&ka, signr); + signal_delivered(signr, info, ka, regs, 0); if (current->ptrace & PT_SINGLESTEP) task_pt_regs(current)->icountlevel = 1; return; } -no_signal: /* Did we come from a system call? */ if ((signed) regs->syscall >= 0) { /* Restart the system call - no handlers present */ @@ -532,8 +518,7 @@ no_signal: } /* If there's no signal to deliver, we just restore the saved mask. */ - if (test_and_clear_thread_flag(TIF_RESTORE_SIGMASK)) - set_current_blocked(¤t->saved_sigmask); + restore_saved_sigmask(); if (current->ptrace & PT_SINGLESTEP) task_pt_regs(current)->icountlevel = 1; diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 3188da3df8da..adceafda9c17 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -182,41 +182,66 @@ EXPORT_SYMBOL(acpi_bus_get_private_data); Power Management -------------------------------------------------------------------------- */ +static const char *state_string(int state) +{ + switch (state) { + case ACPI_STATE_D0: + return "D0"; + case ACPI_STATE_D1: + return "D1"; + case ACPI_STATE_D2: + return "D2"; + case ACPI_STATE_D3_HOT: + return "D3hot"; + case ACPI_STATE_D3_COLD: + return "D3"; + default: + return "(unknown)"; + } +} + static int __acpi_bus_get_power(struct acpi_device *device, int *state) { - int result = 0; - acpi_status status = 0; - unsigned long long psc = 0; + int result = ACPI_STATE_UNKNOWN; if (!device || !state) return -EINVAL; - *state = ACPI_STATE_UNKNOWN; - - if (device->flags.power_manageable) { - /* - * Get the device's power state either directly (via _PSC) or - * indirectly (via power resources). - */ - if (device->power.flags.power_resources) { - result = acpi_power_get_inferred_state(device, state); - if (result) - return result; - } else if (device->power.flags.explicit_get) { - status = acpi_evaluate_integer(device->handle, "_PSC", - NULL, &psc); - if (ACPI_FAILURE(status)) - return -ENODEV; - *state = (int)psc; - } - } else { + if (!device->flags.power_manageable) { /* TBD: Non-recursive algorithm for walking up hierarchy. */ *state = device->parent ? device->parent->power.state : ACPI_STATE_D0; + goto out; + } + + /* + * Get the device's power state either directly (via _PSC) or + * indirectly (via power resources). + */ + if (device->power.flags.explicit_get) { + unsigned long long psc; + acpi_status status = acpi_evaluate_integer(device->handle, + "_PSC", NULL, &psc); + if (ACPI_FAILURE(status)) + return -ENODEV; + + result = psc; + } + /* The test below covers ACPI_STATE_UNKNOWN too. */ + if (result <= ACPI_STATE_D2) { + ; /* Do nothing. */ + } else if (device->power.flags.power_resources) { + int error = acpi_power_get_inferred_state(device, &result); + if (error) + return error; + } else if (result == ACPI_STATE_D3_HOT) { + result = ACPI_STATE_D3; } + *state = result; - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] power state is D%d\n", - device->pnp.bus_id, *state)); + out: + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] power state is %s\n", + device->pnp.bus_id, state_string(*state))); return 0; } @@ -234,13 +259,14 @@ static int __acpi_bus_set_power(struct acpi_device *device, int state) /* Make sure this is a valid target state */ if (state == device->power.state) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at D%d\n", - state)); + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at %s\n", + state_string(state))); return 0; } if (!device->power.states[state].flags.valid) { - printk(KERN_WARNING PREFIX "Device does not support D%d\n", state); + printk(KERN_WARNING PREFIX "Device does not support %s\n", + state_string(state)); return -ENODEV; } if (device->parent && (state < device->parent->power.state)) { @@ -294,13 +320,13 @@ static int __acpi_bus_set_power(struct acpi_device *device, int state) end: if (result) printk(KERN_WARNING PREFIX - "Device [%s] failed to transition to D%d\n", - device->pnp.bus_id, state); + "Device [%s] failed to transition to %s\n", + device->pnp.bus_id, state_string(state)); else { device->power.state = state; ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Device [%s] transitioned to D%d\n", - device->pnp.bus_id, state)); + "Device [%s] transitioned to %s\n", + device->pnp.bus_id, state_string(state))); } return result; diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c index 0500f719f63e..dd6d6a3c6780 100644 --- a/drivers/acpi/power.c +++ b/drivers/acpi/power.c @@ -631,7 +631,7 @@ int acpi_power_get_inferred_state(struct acpi_device *device, int *state) * We know a device's inferred power state when all the resources * required for a given D-state are 'on'. */ - for (i = ACPI_STATE_D0; i < ACPI_STATE_D3_HOT; i++) { + for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) { list = &device->power.states[i].resources; if (list->count < 1) continue; diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 85cbfdccc97c..c8a1f3b68110 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -1567,6 +1567,7 @@ static int acpi_bus_scan_fixed(void) ACPI_BUS_TYPE_POWER_BUTTON, ACPI_STA_DEFAULT, &ops); + device_init_wakeup(&device->dev, true); } if ((acpi_gbl_FADT.flags & ACPI_FADT_SLEEP_BUTTON) == 0) { diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 74ee4ab577b6..88561029cca8 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -57,6 +57,7 @@ MODULE_PARM_DESC(gts, "Enable evaluation of _GTS on suspend."); MODULE_PARM_DESC(bfs, "Enable evaluation of _BFS on resume".); static u8 sleep_states[ACPI_S_STATE_COUNT]; +static bool pwr_btn_event_pending; static void acpi_sleep_tts_switch(u32 acpi_state) { @@ -184,6 +185,14 @@ static int acpi_pm_prepare(void) return error; } +static int find_powerf_dev(struct device *dev, void *data) +{ + struct acpi_device *device = to_acpi_device(dev); + const char *hid = acpi_device_hid(device); + + return !strcmp(hid, ACPI_BUTTON_HID_POWERF); +} + /** * acpi_pm_finish - Instruct the platform to leave a sleep state. * @@ -192,6 +201,7 @@ static int acpi_pm_prepare(void) */ static void acpi_pm_finish(void) { + struct device *pwr_btn_dev; u32 acpi_state = acpi_target_sleep_state; acpi_ec_unblock_transactions(); @@ -209,6 +219,23 @@ static void acpi_pm_finish(void) acpi_set_firmware_waking_vector((acpi_physical_address) 0); acpi_target_sleep_state = ACPI_STATE_S0; + + /* If we were woken with the fixed power button, provide a small + * hint to userspace in the form of a wakeup event on the fixed power + * button device (if it can be found). + * + * We delay the event generation til now, as the PM layer requires + * timekeeping to be running before we generate events. */ + if (!pwr_btn_event_pending) + return; + + pwr_btn_event_pending = false; + pwr_btn_dev = bus_find_device(&acpi_bus_type, NULL, NULL, + find_powerf_dev); + if (pwr_btn_dev) { + pm_wakeup_event(pwr_btn_dev, 0); + put_device(pwr_btn_dev); + } } /** @@ -298,9 +325,23 @@ static int acpi_suspend_enter(suspend_state_t pm_state) /* ACPI 3.0 specs (P62) says that it's the responsibility * of the OSPM to clear the status bit [ implying that the * POWER_BUTTON event should not reach userspace ] + * + * However, we do generate a small hint for userspace in the form of + * a wakeup event. We flag this condition for now and generate the + * event later, as we're currently too early in resume to be able to + * generate wakeup events. */ - if (ACPI_SUCCESS(status) && (acpi_state == ACPI_STATE_S3)) - acpi_clear_event(ACPI_EVENT_POWER_BUTTON); + if (ACPI_SUCCESS(status) && (acpi_state == ACPI_STATE_S3)) { + acpi_event_status pwr_btn_status; + + acpi_get_event_status(ACPI_EVENT_POWER_BUTTON, &pwr_btn_status); + + if (pwr_btn_status & ACPI_EVENT_FLAG_SET) { + acpi_clear_event(ACPI_EVENT_POWER_BUTTON); + /* Flag for later */ + pwr_btn_event_pending = true; + } + } /* * Disable and clear GPE status before interrupt is enabled. Some GPEs @@ -730,8 +771,8 @@ int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p) * can wake the system. _S0W may be valid, too. */ if (acpi_target_sleep_state == ACPI_STATE_S0 || - (device_may_wakeup(dev) && - adev->wakeup.sleep_state <= acpi_target_sleep_state)) { + (device_may_wakeup(dev) && adev->wakeup.flags.valid && + adev->wakeup.sleep_state >= acpi_target_sleep_state)) { acpi_status status; acpi_method[3] = 'W'; diff --git a/drivers/base/soc.c b/drivers/base/soc.c index ba29b2e73d48..72b5e7280d14 100644 --- a/drivers/base/soc.c +++ b/drivers/base/soc.c @@ -42,7 +42,7 @@ struct device *soc_device_to_device(struct soc_device *soc_dev) return &soc_dev->dev; } -static mode_t soc_attribute_mode(struct kobject *kobj, +static umode_t soc_attribute_mode(struct kobject *kobj, struct attribute *attr, int index) { diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile index 8d81a1d32653..dd3e661a124d 100644 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile @@ -6,6 +6,7 @@ obj-$(CONFIG_CS5535_CLOCK_EVENT_SRC) += cs5535-clockevt.o obj-$(CONFIG_SH_TIMER_CMT) += sh_cmt.o obj-$(CONFIG_SH_TIMER_MTU2) += sh_mtu2.o obj-$(CONFIG_SH_TIMER_TMU) += sh_tmu.o +obj-$(CONFIG_EM_TIMER_STI) += em_sti.o obj-$(CONFIG_CLKBLD_I8253) += i8253.o obj-$(CONFIG_CLKSRC_MMIO) += mmio.o obj-$(CONFIG_DW_APB_TIMER) += dw_apb_timer.o diff --git a/drivers/clocksource/em_sti.c b/drivers/clocksource/em_sti.c new file mode 100644 index 000000000000..372051d1bba8 --- /dev/null +++ b/drivers/clocksource/em_sti.c @@ -0,0 +1,406 @@ +/* + * Emma Mobile Timer Support - STI + * + * Copyright (C) 2012 Magnus Damm + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/init.h> +#include <linux/platform_device.h> +#include <linux/spinlock.h> +#include <linux/interrupt.h> +#include <linux/ioport.h> +#include <linux/io.h> +#include <linux/clk.h> +#include <linux/irq.h> +#include <linux/err.h> +#include <linux/delay.h> +#include <linux/clocksource.h> +#include <linux/clockchips.h> +#include <linux/slab.h> +#include <linux/module.h> + +enum { USER_CLOCKSOURCE, USER_CLOCKEVENT, USER_NR }; + +struct em_sti_priv { + void __iomem *base; + struct clk *clk; + struct platform_device *pdev; + unsigned int active[USER_NR]; + unsigned long rate; + raw_spinlock_t lock; + struct clock_event_device ced; + struct clocksource cs; +}; + +#define STI_CONTROL 0x00 +#define STI_COMPA_H 0x10 +#define STI_COMPA_L 0x14 +#define STI_COMPB_H 0x18 +#define STI_COMPB_L 0x1c +#define STI_COUNT_H 0x20 +#define STI_COUNT_L 0x24 +#define STI_COUNT_RAW_H 0x28 +#define STI_COUNT_RAW_L 0x2c +#define STI_SET_H 0x30 +#define STI_SET_L 0x34 +#define STI_INTSTATUS 0x40 +#define STI_INTRAWSTATUS 0x44 +#define STI_INTENSET 0x48 +#define STI_INTENCLR 0x4c +#define STI_INTFFCLR 0x50 + +static inline unsigned long em_sti_read(struct em_sti_priv *p, int offs) +{ + return ioread32(p->base + offs); +} + +static inline void em_sti_write(struct em_sti_priv *p, int offs, + unsigned long value) +{ + iowrite32(value, p->base + offs); +} + +static int em_sti_enable(struct em_sti_priv *p) +{ + int ret; + + /* enable clock */ + ret = clk_enable(p->clk); + if (ret) { + dev_err(&p->pdev->dev, "cannot enable clock\n"); + return ret; + } + + /* configure channel, periodic mode and maximum timeout */ + p->rate = clk_get_rate(p->clk); + + /* reset the counter */ + em_sti_write(p, STI_SET_H, 0x40000000); + em_sti_write(p, STI_SET_L, 0x00000000); + + /* mask and clear pending interrupts */ + em_sti_write(p, STI_INTENCLR, 3); + em_sti_write(p, STI_INTFFCLR, 3); + + /* enable updates of counter registers */ + em_sti_write(p, STI_CONTROL, 1); + + return 0; +} + +static void em_sti_disable(struct em_sti_priv *p) +{ + /* mask interrupts */ + em_sti_write(p, STI_INTENCLR, 3); + + /* stop clock */ + clk_disable(p->clk); +} + +static cycle_t em_sti_count(struct em_sti_priv *p) +{ + cycle_t ticks; + unsigned long flags; + + /* the STI hardware buffers the 48-bit count, but to + * break it out into two 32-bit access the registers + * must be accessed in a certain order. + * Always read STI_COUNT_H before STI_COUNT_L. + */ + raw_spin_lock_irqsave(&p->lock, flags); + ticks = (cycle_t)(em_sti_read(p, STI_COUNT_H) & 0xffff) << 32; + ticks |= em_sti_read(p, STI_COUNT_L); + raw_spin_unlock_irqrestore(&p->lock, flags); + + return ticks; +} + +static cycle_t em_sti_set_next(struct em_sti_priv *p, cycle_t next) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&p->lock, flags); + + /* mask compare A interrupt */ + em_sti_write(p, STI_INTENCLR, 1); + + /* update compare A value */ + em_sti_write(p, STI_COMPA_H, next >> 32); + em_sti_write(p, STI_COMPA_L, next & 0xffffffff); + + /* clear compare A interrupt source */ + em_sti_write(p, STI_INTFFCLR, 1); + + /* unmask compare A interrupt */ + em_sti_write(p, STI_INTENSET, 1); + + raw_spin_unlock_irqrestore(&p->lock, flags); + + return next; +} + +static irqreturn_t em_sti_interrupt(int irq, void *dev_id) +{ + struct em_sti_priv *p = dev_id; + + p->ced.event_handler(&p->ced); + return IRQ_HANDLED; +} + +static int em_sti_start(struct em_sti_priv *p, unsigned int user) +{ + unsigned long flags; + int used_before; + int ret = 0; + + raw_spin_lock_irqsave(&p->lock, flags); + used_before = p->active[USER_CLOCKSOURCE] | p->active[USER_CLOCKEVENT]; + if (!used_before) + ret = em_sti_enable(p); + + if (!ret) + p->active[user] = 1; + raw_spin_unlock_irqrestore(&p->lock, flags); + + return ret; +} + +static void em_sti_stop(struct em_sti_priv *p, unsigned int user) +{ + unsigned long flags; + int used_before, used_after; + + raw_spin_lock_irqsave(&p->lock, flags); + used_before = p->active[USER_CLOCKSOURCE] | p->active[USER_CLOCKEVENT]; + p->active[user] = 0; + used_after = p->active[USER_CLOCKSOURCE] | p->active[USER_CLOCKEVENT]; + + if (used_before && !used_after) + em_sti_disable(p); + raw_spin_unlock_irqrestore(&p->lock, flags); +} + +static struct em_sti_priv *cs_to_em_sti(struct clocksource *cs) +{ + return container_of(cs, struct em_sti_priv, cs); +} + +static cycle_t em_sti_clocksource_read(struct clocksource *cs) +{ + return em_sti_count(cs_to_em_sti(cs)); +} + +static int em_sti_clocksource_enable(struct clocksource *cs) +{ + int ret; + struct em_sti_priv *p = cs_to_em_sti(cs); + + ret = em_sti_start(p, USER_CLOCKSOURCE); + if (!ret) + __clocksource_updatefreq_hz(cs, p->rate); + return ret; +} + +static void em_sti_clocksource_disable(struct clocksource *cs) +{ + em_sti_stop(cs_to_em_sti(cs), USER_CLOCKSOURCE); +} + +static void em_sti_clocksource_resume(struct clocksource *cs) +{ + em_sti_clocksource_enable(cs); +} + +static int em_sti_register_clocksource(struct em_sti_priv *p) +{ + struct clocksource *cs = &p->cs; + + memset(cs, 0, sizeof(*cs)); + cs->name = dev_name(&p->pdev->dev); + cs->rating = 200; + cs->read = em_sti_clocksource_read; + cs->enable = em_sti_clocksource_enable; + cs->disable = em_sti_clocksource_disable; + cs->suspend = em_sti_clocksource_disable; + cs->resume = em_sti_clocksource_resume; + cs->mask = CLOCKSOURCE_MASK(48); + cs->flags = CLOCK_SOURCE_IS_CONTINUOUS; + + dev_info(&p->pdev->dev, "used as clock source\n"); + + /* Register with dummy 1 Hz value, gets updated in ->enable() */ + clocksource_register_hz(cs, 1); + return 0; +} + +static struct em_sti_priv *ced_to_em_sti(struct clock_event_device *ced) +{ + return container_of(ced, struct em_sti_priv, ced); +} + +static void em_sti_clock_event_mode(enum clock_event_mode mode, + struct clock_event_device *ced) +{ + struct em_sti_priv *p = ced_to_em_sti(ced); + + /* deal with old setting first */ + switch (ced->mode) { + case CLOCK_EVT_MODE_ONESHOT: + em_sti_stop(p, USER_CLOCKEVENT); + break; + default: + break; + } + + switch (mode) { + case CLOCK_EVT_MODE_ONESHOT: + dev_info(&p->pdev->dev, "used for oneshot clock events\n"); + em_sti_start(p, USER_CLOCKEVENT); + clockevents_config(&p->ced, p->rate); + break; + case CLOCK_EVT_MODE_SHUTDOWN: + case CLOCK_EVT_MODE_UNUSED: + em_sti_stop(p, USER_CLOCKEVENT); + break; + default: + break; + } +} + +static int em_sti_clock_event_next(unsigned long delta, + struct clock_event_device *ced) +{ + struct em_sti_priv *p = ced_to_em_sti(ced); + cycle_t next; + int safe; + + next = em_sti_set_next(p, em_sti_count(p) + delta); + safe = em_sti_count(p) < (next - 1); + + return !safe; +} + +static void em_sti_register_clockevent(struct em_sti_priv *p) +{ + struct clock_event_device *ced = &p->ced; + + memset(ced, 0, sizeof(*ced)); + ced->name = dev_name(&p->pdev->dev); + ced->features = CLOCK_EVT_FEAT_ONESHOT; + ced->rating = 200; + ced->cpumask = cpumask_of(0); + ced->set_next_event = em_sti_clock_event_next; + ced->set_mode = em_sti_clock_event_mode; + + dev_info(&p->pdev->dev, "used for clock events\n"); + + /* Register with dummy 1 Hz value, gets updated in ->set_mode() */ + clockevents_config_and_register(ced, 1, 2, 0xffffffff); +} + +static int __devinit em_sti_probe(struct platform_device *pdev) +{ + struct em_sti_priv *p; + struct resource *res; + int irq, ret; + + p = kzalloc(sizeof(*p), GFP_KERNEL); + if (p == NULL) { + dev_err(&pdev->dev, "failed to allocate driver data\n"); + ret = -ENOMEM; + goto err0; + } + + p->pdev = pdev; + platform_set_drvdata(pdev, p); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&pdev->dev, "failed to get I/O memory\n"); + ret = -EINVAL; + goto err0; + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "failed to get irq\n"); + ret = -EINVAL; + goto err0; + } + + /* map memory, let base point to the STI instance */ + p->base = ioremap_nocache(res->start, resource_size(res)); + if (p->base == NULL) { + dev_err(&pdev->dev, "failed to remap I/O memory\n"); + ret = -ENXIO; + goto err0; + } + + /* get hold of clock */ + p->clk = clk_get(&pdev->dev, "sclk"); + if (IS_ERR(p->clk)) { + dev_err(&pdev->dev, "cannot get clock\n"); + ret = PTR_ERR(p->clk); + goto err1; + } + + if (request_irq(irq, em_sti_interrupt, + IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING, + dev_name(&pdev->dev), p)) { + dev_err(&pdev->dev, "failed to request low IRQ\n"); + ret = -ENOENT; + goto err2; + } + + raw_spin_lock_init(&p->lock); + em_sti_register_clockevent(p); + em_sti_register_clocksource(p); + return 0; + +err2: + clk_put(p->clk); +err1: + iounmap(p->base); +err0: + kfree(p); + return ret; +} + +static int __devexit em_sti_remove(struct platform_device *pdev) +{ + return -EBUSY; /* cannot unregister clockevent and clocksource */ +} + +static const struct of_device_id em_sti_dt_ids[] __devinitconst = { + { .compatible = "renesas,em-sti", }, + {}, +}; +MODULE_DEVICE_TABLE(of, em_sti_dt_ids); + +static struct platform_driver em_sti_device_driver = { + .probe = em_sti_probe, + .remove = __devexit_p(em_sti_remove), + .driver = { + .name = "em_sti", + .of_match_table = em_sti_dt_ids, + } +}; + +module_platform_driver(em_sti_device_driver); + +MODULE_AUTHOR("Magnus Damm"); +MODULE_DESCRIPTION("Renesas Emma Mobile STI Timer Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpio/gpio-samsung.c b/drivers/gpio/gpio-samsung.c index 7bb00448e13d..b6453d0e44ad 100644 --- a/drivers/gpio/gpio-samsung.c +++ b/drivers/gpio/gpio-samsung.c @@ -2833,7 +2833,7 @@ static __init void exynos5_gpiolib_init(void) } /* need to set base address for gpc4 */ - exonys5_gpios_1[11].base = gpio_base1 + 0x2E0; + exynos5_gpios_1[11].base = gpio_base1 + 0x2E0; /* need to set base address for gpx */ chip = &exynos5_gpios_1[21]; diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c index d7038230b71e..7053140c6596 100644 --- a/drivers/gpu/drm/cirrus/cirrus_drv.c +++ b/drivers/gpu/drm/cirrus/cirrus_drv.c @@ -35,9 +35,28 @@ static DEFINE_PCI_DEVICE_TABLE(pciidlist) = { {0,} }; + +static void cirrus_kick_out_firmware_fb(struct pci_dev *pdev) +{ + struct apertures_struct *ap; + bool primary = false; + + ap = alloc_apertures(1); + ap->ranges[0].base = pci_resource_start(pdev, 0); + ap->ranges[0].size = pci_resource_len(pdev, 0); + +#ifdef CONFIG_X86 + primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; +#endif + remove_conflicting_framebuffers(ap, "cirrusdrmfb", primary); + kfree(ap); +} + static int __devinit cirrus_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { + cirrus_kick_out_firmware_fb(pdev); + return drm_get_pci_dev(pdev, ent, &driver); } diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h index 21bdfa8836f7..64ea597cb6d3 100644 --- a/drivers/gpu/drm/cirrus/cirrus_drv.h +++ b/drivers/gpu/drm/cirrus/cirrus_drv.h @@ -145,7 +145,7 @@ struct cirrus_device { struct ttm_bo_device bdev; atomic_t validate_sequence; } ttm; - + bool mm_inited; }; diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c index 2ebcd11a5023..50e170f879de 100644 --- a/drivers/gpu/drm/cirrus/cirrus_ttm.c +++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c @@ -275,12 +275,17 @@ int cirrus_mm_init(struct cirrus_device *cirrus) pci_resource_len(dev->pdev, 0), DRM_MTRR_WC); + cirrus->mm_inited = true; return 0; } void cirrus_mm_fini(struct cirrus_device *cirrus) { struct drm_device *dev = cirrus->dev; + + if (!cirrus->mm_inited) + return; + ttm_bo_device_release(&cirrus->ttm.bdev); cirrus_ttm_global_release(cirrus); diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index c3b5139eba7f..eb92fe257a39 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -30,7 +30,7 @@ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/i2c.h> -#include <linux/export.h> +#include <linux/module.h> #include "drmP.h" #include "drm_edid.h" #include "drm_edid_modes.h" @@ -149,6 +149,10 @@ int drm_edid_header_is_valid(const u8 *raw_edid) } EXPORT_SYMBOL(drm_edid_header_is_valid); +static int edid_fixup __read_mostly = 6; +module_param_named(edid_fixup, edid_fixup, int, 0400); +MODULE_PARM_DESC(edid_fixup, + "Minimum number of valid EDID header bytes (0-8, default 6)"); /* * Sanity check the EDID block (base or extension). Return 0 if the block @@ -160,10 +164,13 @@ bool drm_edid_block_valid(u8 *raw_edid, int block) u8 csum = 0; struct edid *edid = (struct edid *)raw_edid; + if (edid_fixup > 8 || edid_fixup < 0) + edid_fixup = 6; + if (block == 0) { int score = drm_edid_header_is_valid(raw_edid); if (score == 8) ; - else if (score >= 6) { + else if (score >= edid_fixup) { DRM_DEBUG("Fixing EDID header, your hardware may be failing\n"); memcpy(raw_edid, edid_header, sizeof(edid_header)); } else { diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c index f920fb5e42b6..fa9439159ebd 100644 --- a/drivers/gpu/drm/i810/i810_dma.c +++ b/drivers/gpu/drm/i810/i810_dma.c @@ -130,11 +130,10 @@ static int i810_map_buffer(struct drm_buf *buf, struct drm_file *file_priv) return -EINVAL; /* This is all entirely broken */ - down_write(¤t->mm->mmap_sem); old_fops = file_priv->filp->f_op; file_priv->filp->f_op = &i810_buffer_fops; dev_priv->mmap_buffer = buf; - buf_priv->virtual = (void *)do_mmap(file_priv->filp, 0, buf->total, + buf_priv->virtual = (void *)vm_mmap(file_priv->filp, 0, buf->total, PROT_READ | PROT_WRITE, MAP_SHARED, buf->bus_address); dev_priv->mmap_buffer = NULL; @@ -145,7 +144,6 @@ static int i810_map_buffer(struct drm_buf *buf, struct drm_file *file_priv) retcode = PTR_ERR(buf_priv->virtual); buf_priv->virtual = NULL; } - up_write(¤t->mm->mmap_sem); return retcode; } diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c index 3c8e04f54713..93e832d6c328 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.c +++ b/drivers/gpu/drm/mgag200/mgag200_drv.c @@ -41,9 +41,28 @@ static DEFINE_PCI_DEVICE_TABLE(pciidlist) = { MODULE_DEVICE_TABLE(pci, pciidlist); +static void mgag200_kick_out_firmware_fb(struct pci_dev *pdev) +{ + struct apertures_struct *ap; + bool primary = false; + + ap = alloc_apertures(1); + ap->ranges[0].base = pci_resource_start(pdev, 0); + ap->ranges[0].size = pci_resource_len(pdev, 0); + +#ifdef CONFIG_X86 + primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; +#endif + remove_conflicting_framebuffers(ap, "mgag200drmfb", primary); + kfree(ap); +} + + static int __devinit mga_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { + mgag200_kick_out_firmware_fb(pdev); + return drm_get_pci_dev(pdev, ent, &driver); } diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 58991af90502..01550d05e273 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -1029,6 +1029,11 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev) WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp); WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp); WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp); + if ((rdev->family == CHIP_JUNIPER) || + (rdev->family == CHIP_CYPRESS) || + (rdev->family == CHIP_HEMLOCK) || + (rdev->family == CHIP_BARTS)) + WREG32(MC_VM_MD_L1_TLB3_CNTL, tmp); } WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp); WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); @@ -1553,163 +1558,10 @@ int evergreen_cp_resume(struct radeon_device *rdev) /* * Core functions */ -static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev, - u32 num_tile_pipes, - u32 num_backends, - u32 backend_disable_mask) -{ - u32 backend_map = 0; - u32 enabled_backends_mask = 0; - u32 enabled_backends_count = 0; - u32 cur_pipe; - u32 swizzle_pipe[EVERGREEN_MAX_PIPES]; - u32 cur_backend = 0; - u32 i; - bool force_no_swizzle; - - if (num_tile_pipes > EVERGREEN_MAX_PIPES) - num_tile_pipes = EVERGREEN_MAX_PIPES; - if (num_tile_pipes < 1) - num_tile_pipes = 1; - if (num_backends > EVERGREEN_MAX_BACKENDS) - num_backends = EVERGREEN_MAX_BACKENDS; - if (num_backends < 1) - num_backends = 1; - - for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) { - if (((backend_disable_mask >> i) & 1) == 0) { - enabled_backends_mask |= (1 << i); - ++enabled_backends_count; - } - if (enabled_backends_count == num_backends) - break; - } - - if (enabled_backends_count == 0) { - enabled_backends_mask = 1; - enabled_backends_count = 1; - } - - if (enabled_backends_count != num_backends) - num_backends = enabled_backends_count; - - memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * EVERGREEN_MAX_PIPES); - switch (rdev->family) { - case CHIP_CEDAR: - case CHIP_REDWOOD: - case CHIP_PALM: - case CHIP_SUMO: - case CHIP_SUMO2: - case CHIP_TURKS: - case CHIP_CAICOS: - force_no_swizzle = false; - break; - case CHIP_CYPRESS: - case CHIP_HEMLOCK: - case CHIP_JUNIPER: - case CHIP_BARTS: - default: - force_no_swizzle = true; - break; - } - if (force_no_swizzle) { - bool last_backend_enabled = false; - - force_no_swizzle = false; - for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) { - if (((enabled_backends_mask >> i) & 1) == 1) { - if (last_backend_enabled) - force_no_swizzle = true; - last_backend_enabled = true; - } else - last_backend_enabled = false; - } - } - - switch (num_tile_pipes) { - case 1: - case 3: - case 5: - case 7: - DRM_ERROR("odd number of pipes!\n"); - break; - case 2: - swizzle_pipe[0] = 0; - swizzle_pipe[1] = 1; - break; - case 4: - if (force_no_swizzle) { - swizzle_pipe[0] = 0; - swizzle_pipe[1] = 1; - swizzle_pipe[2] = 2; - swizzle_pipe[3] = 3; - } else { - swizzle_pipe[0] = 0; - swizzle_pipe[1] = 2; - swizzle_pipe[2] = 1; - swizzle_pipe[3] = 3; - } - break; - case 6: - if (force_no_swizzle) { - swizzle_pipe[0] = 0; - swizzle_pipe[1] = 1; - swizzle_pipe[2] = 2; - swizzle_pipe[3] = 3; - swizzle_pipe[4] = 4; - swizzle_pipe[5] = 5; - } else { - swizzle_pipe[0] = 0; - swizzle_pipe[1] = 2; - swizzle_pipe[2] = 4; - swizzle_pipe[3] = 1; - swizzle_pipe[4] = 3; - swizzle_pipe[5] = 5; - } - break; - case 8: - if (force_no_swizzle) { - swizzle_pipe[0] = 0; - swizzle_pipe[1] = 1; - swizzle_pipe[2] = 2; - swizzle_pipe[3] = 3; - swizzle_pipe[4] = 4; - swizzle_pipe[5] = 5; - swizzle_pipe[6] = 6; - swizzle_pipe[7] = 7; - } else { - swizzle_pipe[0] = 0; - swizzle_pipe[1] = 2; - swizzle_pipe[2] = 4; - swizzle_pipe[3] = 6; - swizzle_pipe[4] = 1; - swizzle_pipe[5] = 3; - swizzle_pipe[6] = 5; - swizzle_pipe[7] = 7; - } - break; - } - - for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) { - while (((1 << cur_backend) & enabled_backends_mask) == 0) - cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS; - - backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4))); - - cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS; - } - - return backend_map; -} - static void evergreen_gpu_init(struct radeon_device *rdev) { - u32 cc_rb_backend_disable = 0; - u32 cc_gc_shader_pipe_config; - u32 gb_addr_config = 0; + u32 gb_addr_config; u32 mc_shared_chmap, mc_arb_ramcfg; - u32 gb_backend_map; - u32 grbm_gfx_index; u32 sx_debug_1; u32 smx_dc_ctl0; u32 sq_config; @@ -1724,6 +1576,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev) u32 sq_stack_resource_mgmt_3; u32 vgt_cache_invalidation; u32 hdp_host_path_cntl, tmp; + u32 disabled_rb_mask; int i, j, num_shader_engines, ps_thread_count; switch (rdev->family) { @@ -1748,6 +1601,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev) rdev->config.evergreen.sc_prim_fifo_size = 0x100; rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; + gb_addr_config = CYPRESS_GB_ADDR_CONFIG_GOLDEN; break; case CHIP_JUNIPER: rdev->config.evergreen.num_ses = 1; @@ -1769,6 +1623,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev) rdev->config.evergreen.sc_prim_fifo_size = 0x100; rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; + gb_addr_config = JUNIPER_GB_ADDR_CONFIG_GOLDEN; break; case CHIP_REDWOOD: rdev->config.evergreen.num_ses = 1; @@ -1790,6 +1645,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev) rdev->config.evergreen.sc_prim_fifo_size = 0x100; rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; + gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN; break; case CHIP_CEDAR: default: @@ -1812,6 +1668,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev) rdev->config.evergreen.sc_prim_fifo_size = 0x40; rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; + gb_addr_config = CEDAR_GB_ADDR_CONFIG_GOLDEN; break; case CHIP_PALM: rdev->config.evergreen.num_ses = 1; @@ -1833,6 +1690,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev) rdev->config.evergreen.sc_prim_fifo_size = 0x40; rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; + gb_addr_config = CEDAR_GB_ADDR_CONFIG_GOLDEN; break; case CHIP_SUMO: rdev->config.evergreen.num_ses = 1; @@ -1860,6 +1718,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev) rdev->config.evergreen.sc_prim_fifo_size = 0x40; rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; + gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN; break; case CHIP_SUMO2: rdev->config.evergreen.num_ses = 1; @@ -1881,6 +1740,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev) rdev->config.evergreen.sc_prim_fifo_size = 0x40; rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; + gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN; break; case CHIP_BARTS: rdev->config.evergreen.num_ses = 2; @@ -1902,6 +1762,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev) rdev->config.evergreen.sc_prim_fifo_size = 0x100; rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; + gb_addr_config = BARTS_GB_ADDR_CONFIG_GOLDEN; break; case CHIP_TURKS: rdev->config.evergreen.num_ses = 1; @@ -1923,6 +1784,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev) rdev->config.evergreen.sc_prim_fifo_size = 0x100; rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; + gb_addr_config = TURKS_GB_ADDR_CONFIG_GOLDEN; break; case CHIP_CAICOS: rdev->config.evergreen.num_ses = 1; @@ -1944,6 +1806,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev) rdev->config.evergreen.sc_prim_fifo_size = 0x40; rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; + gb_addr_config = CAICOS_GB_ADDR_CONFIG_GOLDEN; break; } @@ -1960,20 +1823,6 @@ static void evergreen_gpu_init(struct radeon_device *rdev) evergreen_fix_pci_max_read_req_size(rdev); - cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & ~2; - - cc_gc_shader_pipe_config |= - INACTIVE_QD_PIPES((EVERGREEN_MAX_PIPES_MASK << rdev->config.evergreen.max_pipes) - & EVERGREEN_MAX_PIPES_MASK); - cc_gc_shader_pipe_config |= - INACTIVE_SIMDS((EVERGREEN_MAX_SIMDS_MASK << rdev->config.evergreen.max_simds) - & EVERGREEN_MAX_SIMDS_MASK); - - cc_rb_backend_disable = - BACKEND_DISABLE((EVERGREEN_MAX_BACKENDS_MASK << rdev->config.evergreen.max_backends) - & EVERGREEN_MAX_BACKENDS_MASK); - - mc_shared_chmap = RREG32(MC_SHARED_CHMAP); if ((rdev->family == CHIP_PALM) || (rdev->family == CHIP_SUMO) || @@ -1982,134 +1831,6 @@ static void evergreen_gpu_init(struct radeon_device *rdev) else mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); - switch (rdev->config.evergreen.max_tile_pipes) { - case 1: - default: - gb_addr_config |= NUM_PIPES(0); - break; - case 2: - gb_addr_config |= NUM_PIPES(1); - break; - case 4: - gb_addr_config |= NUM_PIPES(2); - break; - case 8: - gb_addr_config |= NUM_PIPES(3); - break; - } - - gb_addr_config |= PIPE_INTERLEAVE_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT); - gb_addr_config |= BANK_INTERLEAVE_SIZE(0); - gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.evergreen.num_ses - 1); - gb_addr_config |= SHADER_ENGINE_TILE_SIZE(1); - gb_addr_config |= NUM_GPUS(0); /* Hemlock? */ - gb_addr_config |= MULTI_GPU_TILE_SIZE(2); - - if (((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) > 2) - gb_addr_config |= ROW_SIZE(2); - else - gb_addr_config |= ROW_SIZE((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT); - - if (rdev->ddev->pdev->device == 0x689e) { - u32 efuse_straps_4; - u32 efuse_straps_3; - u8 efuse_box_bit_131_124; - - WREG32(RCU_IND_INDEX, 0x204); - efuse_straps_4 = RREG32(RCU_IND_DATA); - WREG32(RCU_IND_INDEX, 0x203); - efuse_straps_3 = RREG32(RCU_IND_DATA); - efuse_box_bit_131_124 = (u8)(((efuse_straps_4 & 0xf) << 4) | ((efuse_straps_3 & 0xf0000000) >> 28)); - - switch(efuse_box_bit_131_124) { - case 0x00: - gb_backend_map = 0x76543210; - break; - case 0x55: - gb_backend_map = 0x77553311; - break; - case 0x56: - gb_backend_map = 0x77553300; - break; - case 0x59: - gb_backend_map = 0x77552211; - break; - case 0x66: - gb_backend_map = 0x77443300; - break; - case 0x99: - gb_backend_map = 0x66552211; - break; - case 0x5a: - gb_backend_map = 0x77552200; - break; - case 0xaa: - gb_backend_map = 0x66442200; - break; - case 0x95: - gb_backend_map = 0x66553311; - break; - default: - DRM_ERROR("bad backend map, using default\n"); - gb_backend_map = - evergreen_get_tile_pipe_to_backend_map(rdev, - rdev->config.evergreen.max_tile_pipes, - rdev->config.evergreen.max_backends, - ((EVERGREEN_MAX_BACKENDS_MASK << - rdev->config.evergreen.max_backends) & - EVERGREEN_MAX_BACKENDS_MASK)); - break; - } - } else if (rdev->ddev->pdev->device == 0x68b9) { - u32 efuse_straps_3; - u8 efuse_box_bit_127_124; - - WREG32(RCU_IND_INDEX, 0x203); - efuse_straps_3 = RREG32(RCU_IND_DATA); - efuse_box_bit_127_124 = (u8)((efuse_straps_3 & 0xF0000000) >> 28); - - switch(efuse_box_bit_127_124) { - case 0x0: - gb_backend_map = 0x00003210; - break; - case 0x5: - case 0x6: - case 0x9: - case 0xa: - gb_backend_map = 0x00003311; - break; - default: - DRM_ERROR("bad backend map, using default\n"); - gb_backend_map = - evergreen_get_tile_pipe_to_backend_map(rdev, - rdev->config.evergreen.max_tile_pipes, - rdev->config.evergreen.max_backends, - ((EVERGREEN_MAX_BACKENDS_MASK << - rdev->config.evergreen.max_backends) & - EVERGREEN_MAX_BACKENDS_MASK)); - break; - } - } else { - switch (rdev->family) { - case CHIP_CYPRESS: - case CHIP_HEMLOCK: - case CHIP_BARTS: - gb_backend_map = 0x66442200; - break; - case CHIP_JUNIPER: - gb_backend_map = 0x00002200; - break; - default: - gb_backend_map = - evergreen_get_tile_pipe_to_backend_map(rdev, - rdev->config.evergreen.max_tile_pipes, - rdev->config.evergreen.max_backends, - ((EVERGREEN_MAX_BACKENDS_MASK << - rdev->config.evergreen.max_backends) & - EVERGREEN_MAX_BACKENDS_MASK)); - } - } - /* setup tiling info dword. gb_addr_config is not adequate since it does * not have bank info, so create a custom tiling dword. * bits 3:0 num_pipes @@ -2136,45 +1857,54 @@ static void evergreen_gpu_init(struct radeon_device *rdev) /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */ if (rdev->flags & RADEON_IS_IGP) rdev->config.evergreen.tile_config |= 1 << 4; - else - rdev->config.evergreen.tile_config |= - ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4; - rdev->config.evergreen.tile_config |= - ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT) << 8; + else { + if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) + rdev->config.evergreen.tile_config |= 1 << 4; + else + rdev->config.evergreen.tile_config |= 0 << 4; + } + rdev->config.evergreen.tile_config |= 0 << 8; rdev->config.evergreen.tile_config |= ((gb_addr_config & 0x30000000) >> 28) << 12; - rdev->config.evergreen.backend_map = gb_backend_map; - WREG32(GB_BACKEND_MAP, gb_backend_map); - WREG32(GB_ADDR_CONFIG, gb_addr_config); - WREG32(DMIF_ADDR_CONFIG, gb_addr_config); - WREG32(HDP_ADDR_CONFIG, gb_addr_config); - - num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1; - grbm_gfx_index = INSTANCE_BROADCAST_WRITES; + num_shader_engines = (gb_addr_config & NUM_SHADER_ENGINES(3) >> 12) + 1; - for (i = 0; i < rdev->config.evergreen.num_ses; i++) { - u32 rb = cc_rb_backend_disable | (0xf0 << 16); - u32 sp = cc_gc_shader_pipe_config; - u32 gfx = grbm_gfx_index | SE_INDEX(i); + if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK)) { + u32 efuse_straps_4; + u32 efuse_straps_3; - if (i == num_shader_engines) { - rb |= BACKEND_DISABLE(EVERGREEN_MAX_BACKENDS_MASK); - sp |= INACTIVE_SIMDS(EVERGREEN_MAX_SIMDS_MASK); + WREG32(RCU_IND_INDEX, 0x204); + efuse_straps_4 = RREG32(RCU_IND_DATA); + WREG32(RCU_IND_INDEX, 0x203); + efuse_straps_3 = RREG32(RCU_IND_DATA); + tmp = (((efuse_straps_4 & 0xf) << 4) | + ((efuse_straps_3 & 0xf0000000) >> 28)); + } else { + tmp = 0; + for (i = (rdev->config.evergreen.num_ses - 1); i >= 0; i--) { + u32 rb_disable_bitmap; + + WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i)); + WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i)); + rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16; + tmp <<= 4; + tmp |= rb_disable_bitmap; } + } + /* enabled rb are just the one not disabled :) */ + disabled_rb_mask = tmp; - WREG32(GRBM_GFX_INDEX, gfx); - WREG32(RLC_GFX_INDEX, gfx); + WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES); + WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES); - WREG32(CC_RB_BACKEND_DISABLE, rb); - WREG32(CC_SYS_RB_BACKEND_DISABLE, rb); - WREG32(GC_USER_RB_BACKEND_DISABLE, rb); - WREG32(CC_GC_SHADER_PIPE_CONFIG, sp); - } + WREG32(GB_ADDR_CONFIG, gb_addr_config); + WREG32(DMIF_ADDR_CONFIG, gb_addr_config); + WREG32(HDP_ADDR_CONFIG, gb_addr_config); - grbm_gfx_index |= SE_BROADCAST_WRITES; - WREG32(GRBM_GFX_INDEX, grbm_gfx_index); - WREG32(RLC_GFX_INDEX, grbm_gfx_index); + tmp = gb_addr_config & NUM_PIPES_MASK; + tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends, + EVERGREEN_MAX_BACKENDS, disabled_rb_mask); + WREG32(GB_BACKEND_MAP, tmp); WREG32(CGTS_SYS_TCC_DISABLE, 0); WREG32(CGTS_TCC_DISABLE, 0); diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index 79130bfd1d6f..2773039b4902 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h @@ -37,6 +37,15 @@ #define EVERGREEN_MAX_PIPES_MASK 0xFF #define EVERGREEN_MAX_LDS_NUM 0xFFFF +#define CYPRESS_GB_ADDR_CONFIG_GOLDEN 0x02011003 +#define BARTS_GB_ADDR_CONFIG_GOLDEN 0x02011003 +#define CAYMAN_GB_ADDR_CONFIG_GOLDEN 0x02011003 +#define JUNIPER_GB_ADDR_CONFIG_GOLDEN 0x02010002 +#define REDWOOD_GB_ADDR_CONFIG_GOLDEN 0x02010002 +#define TURKS_GB_ADDR_CONFIG_GOLDEN 0x02010002 +#define CEDAR_GB_ADDR_CONFIG_GOLDEN 0x02010001 +#define CAICOS_GB_ADDR_CONFIG_GOLDEN 0x02010001 + /* Registers */ #define RCU_IND_INDEX 0x100 @@ -54,6 +63,7 @@ #define BACKEND_DISABLE(x) ((x) << 16) #define GB_ADDR_CONFIG 0x98F8 #define NUM_PIPES(x) ((x) << 0) +#define NUM_PIPES_MASK 0x0000000f #define PIPE_INTERLEAVE_SIZE(x) ((x) << 4) #define BANK_INTERLEAVE_SIZE(x) ((x) << 8) #define NUM_SHADER_ENGINES(x) ((x) << 12) @@ -452,6 +462,7 @@ #define MC_VM_MD_L1_TLB0_CNTL 0x2654 #define MC_VM_MD_L1_TLB1_CNTL 0x2658 #define MC_VM_MD_L1_TLB2_CNTL 0x265C +#define MC_VM_MD_L1_TLB3_CNTL 0x2698 #define FUS_MC_VM_MD_L1_TLB0_CNTL 0x265C #define FUS_MC_VM_MD_L1_TLB1_CNTL 0x2660 diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index ce4e7cc6c905..3df4efa11942 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -417,215 +417,17 @@ out: /* * Core functions */ -static u32 cayman_get_tile_pipe_to_backend_map(struct radeon_device *rdev, - u32 num_tile_pipes, - u32 num_backends_per_asic, - u32 *backend_disable_mask_per_asic, - u32 num_shader_engines) -{ - u32 backend_map = 0; - u32 enabled_backends_mask = 0; - u32 enabled_backends_count = 0; - u32 num_backends_per_se; - u32 cur_pipe; - u32 swizzle_pipe[CAYMAN_MAX_PIPES]; - u32 cur_backend = 0; - u32 i; - bool force_no_swizzle; - - /* force legal values */ - if (num_tile_pipes < 1) - num_tile_pipes = 1; - if (num_tile_pipes > rdev->config.cayman.max_tile_pipes) - num_tile_pipes = rdev->config.cayman.max_tile_pipes; - if (num_shader_engines < 1) - num_shader_engines = 1; - if (num_shader_engines > rdev->config.cayman.max_shader_engines) - num_shader_engines = rdev->config.cayman.max_shader_engines; - if (num_backends_per_asic < num_shader_engines) - num_backends_per_asic = num_shader_engines; - if (num_backends_per_asic > (rdev->config.cayman.max_backends_per_se * num_shader_engines)) - num_backends_per_asic = rdev->config.cayman.max_backends_per_se * num_shader_engines; - - /* make sure we have the same number of backends per se */ - num_backends_per_asic = ALIGN(num_backends_per_asic, num_shader_engines); - /* set up the number of backends per se */ - num_backends_per_se = num_backends_per_asic / num_shader_engines; - if (num_backends_per_se > rdev->config.cayman.max_backends_per_se) { - num_backends_per_se = rdev->config.cayman.max_backends_per_se; - num_backends_per_asic = num_backends_per_se * num_shader_engines; - } - - /* create enable mask and count for enabled backends */ - for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) { - if (((*backend_disable_mask_per_asic >> i) & 1) == 0) { - enabled_backends_mask |= (1 << i); - ++enabled_backends_count; - } - if (enabled_backends_count == num_backends_per_asic) - break; - } - - /* force the backends mask to match the current number of backends */ - if (enabled_backends_count != num_backends_per_asic) { - u32 this_backend_enabled; - u32 shader_engine; - u32 backend_per_se; - - enabled_backends_mask = 0; - enabled_backends_count = 0; - *backend_disable_mask_per_asic = CAYMAN_MAX_BACKENDS_MASK; - for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) { - /* calc the current se */ - shader_engine = i / rdev->config.cayman.max_backends_per_se; - /* calc the backend per se */ - backend_per_se = i % rdev->config.cayman.max_backends_per_se; - /* default to not enabled */ - this_backend_enabled = 0; - if ((shader_engine < num_shader_engines) && - (backend_per_se < num_backends_per_se)) - this_backend_enabled = 1; - if (this_backend_enabled) { - enabled_backends_mask |= (1 << i); - *backend_disable_mask_per_asic &= ~(1 << i); - ++enabled_backends_count; - } - } - } - - - memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * CAYMAN_MAX_PIPES); - switch (rdev->family) { - case CHIP_CAYMAN: - case CHIP_ARUBA: - force_no_swizzle = true; - break; - default: - force_no_swizzle = false; - break; - } - if (force_no_swizzle) { - bool last_backend_enabled = false; - - force_no_swizzle = false; - for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) { - if (((enabled_backends_mask >> i) & 1) == 1) { - if (last_backend_enabled) - force_no_swizzle = true; - last_backend_enabled = true; - } else - last_backend_enabled = false; - } - } - - switch (num_tile_pipes) { - case 1: - case 3: - case 5: - case 7: - DRM_ERROR("odd number of pipes!\n"); - break; - case 2: - swizzle_pipe[0] = 0; - swizzle_pipe[1] = 1; - break; - case 4: - if (force_no_swizzle) { - swizzle_pipe[0] = 0; - swizzle_pipe[1] = 1; - swizzle_pipe[2] = 2; - swizzle_pipe[3] = 3; - } else { - swizzle_pipe[0] = 0; - swizzle_pipe[1] = 2; - swizzle_pipe[2] = 1; - swizzle_pipe[3] = 3; - } - break; - case 6: - if (force_no_swizzle) { - swizzle_pipe[0] = 0; - swizzle_pipe[1] = 1; - swizzle_pipe[2] = 2; - swizzle_pipe[3] = 3; - swizzle_pipe[4] = 4; - swizzle_pipe[5] = 5; - } else { - swizzle_pipe[0] = 0; - swizzle_pipe[1] = 2; - swizzle_pipe[2] = 4; - swizzle_pipe[3] = 1; - swizzle_pipe[4] = 3; - swizzle_pipe[5] = 5; - } - break; - case 8: - if (force_no_swizzle) { - swizzle_pipe[0] = 0; - swizzle_pipe[1] = 1; - swizzle_pipe[2] = 2; - swizzle_pipe[3] = 3; - swizzle_pipe[4] = 4; - swizzle_pipe[5] = 5; - swizzle_pipe[6] = 6; - swizzle_pipe[7] = 7; - } else { - swizzle_pipe[0] = 0; - swizzle_pipe[1] = 2; - swizzle_pipe[2] = 4; - swizzle_pipe[3] = 6; - swizzle_pipe[4] = 1; - swizzle_pipe[5] = 3; - swizzle_pipe[6] = 5; - swizzle_pipe[7] = 7; - } - break; - } - - for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) { - while (((1 << cur_backend) & enabled_backends_mask) == 0) - cur_backend = (cur_backend + 1) % CAYMAN_MAX_BACKENDS; - - backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4))); - - cur_backend = (cur_backend + 1) % CAYMAN_MAX_BACKENDS; - } - - return backend_map; -} - -static u32 cayman_get_disable_mask_per_asic(struct radeon_device *rdev, - u32 disable_mask_per_se, - u32 max_disable_mask_per_se, - u32 num_shader_engines) -{ - u32 disable_field_width_per_se = r600_count_pipe_bits(disable_mask_per_se); - u32 disable_mask_per_asic = disable_mask_per_se & max_disable_mask_per_se; - - if (num_shader_engines == 1) - return disable_mask_per_asic; - else if (num_shader_engines == 2) - return disable_mask_per_asic | (disable_mask_per_asic << disable_field_width_per_se); - else - return 0xffffffff; -} - static void cayman_gpu_init(struct radeon_device *rdev) { - u32 cc_rb_backend_disable = 0; - u32 cc_gc_shader_pipe_config; u32 gb_addr_config = 0; u32 mc_shared_chmap, mc_arb_ramcfg; - u32 gb_backend_map; u32 cgts_tcc_disable; u32 sx_debug_1; u32 smx_dc_ctl0; - u32 gc_user_shader_pipe_config; - u32 gc_user_rb_backend_disable; - u32 cgts_user_tcc_disable; u32 cgts_sm_ctrl_reg; u32 hdp_host_path_cntl; u32 tmp; + u32 disabled_rb_mask; int i, j; switch (rdev->family) { @@ -650,6 +452,7 @@ static void cayman_gpu_init(struct radeon_device *rdev) rdev->config.cayman.sc_prim_fifo_size = 0x100; rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30; rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130; + gb_addr_config = CAYMAN_GB_ADDR_CONFIG_GOLDEN; break; case CHIP_ARUBA: default: @@ -687,6 +490,7 @@ static void cayman_gpu_init(struct radeon_device *rdev) rdev->config.cayman.sc_prim_fifo_size = 0x40; rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30; rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130; + gb_addr_config = ARUBA_GB_ADDR_CONFIG_GOLDEN; break; } @@ -706,39 +510,6 @@ static void cayman_gpu_init(struct radeon_device *rdev) mc_shared_chmap = RREG32(MC_SHARED_CHMAP); mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); - cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE); - cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG); - cgts_tcc_disable = 0xffff0000; - for (i = 0; i < rdev->config.cayman.max_texture_channel_caches; i++) - cgts_tcc_disable &= ~(1 << (16 + i)); - gc_user_rb_backend_disable = RREG32(GC_USER_RB_BACKEND_DISABLE); - gc_user_shader_pipe_config = RREG32(GC_USER_SHADER_PIPE_CONFIG); - cgts_user_tcc_disable = RREG32(CGTS_USER_TCC_DISABLE); - - rdev->config.cayman.num_shader_engines = rdev->config.cayman.max_shader_engines; - tmp = ((~gc_user_shader_pipe_config) & INACTIVE_QD_PIPES_MASK) >> INACTIVE_QD_PIPES_SHIFT; - rdev->config.cayman.num_shader_pipes_per_simd = r600_count_pipe_bits(tmp); - rdev->config.cayman.num_tile_pipes = rdev->config.cayman.max_tile_pipes; - tmp = ((~gc_user_shader_pipe_config) & INACTIVE_SIMDS_MASK) >> INACTIVE_SIMDS_SHIFT; - rdev->config.cayman.num_simds_per_se = r600_count_pipe_bits(tmp); - tmp = ((~gc_user_rb_backend_disable) & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT; - rdev->config.cayman.num_backends_per_se = r600_count_pipe_bits(tmp); - tmp = (gc_user_rb_backend_disable & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT; - rdev->config.cayman.backend_disable_mask_per_asic = - cayman_get_disable_mask_per_asic(rdev, tmp, CAYMAN_MAX_BACKENDS_PER_SE_MASK, - rdev->config.cayman.num_shader_engines); - rdev->config.cayman.backend_map = - cayman_get_tile_pipe_to_backend_map(rdev, rdev->config.cayman.num_tile_pipes, - rdev->config.cayman.num_backends_per_se * - rdev->config.cayman.num_shader_engines, - &rdev->config.cayman.backend_disable_mask_per_asic, - rdev->config.cayman.num_shader_engines); - tmp = ((~cgts_user_tcc_disable) & TCC_DISABLE_MASK) >> TCC_DISABLE_SHIFT; - rdev->config.cayman.num_texture_channel_caches = r600_count_pipe_bits(tmp); - tmp = (mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT; - rdev->config.cayman.mem_max_burst_length_bytes = (tmp + 1) * 256; - if (rdev->config.cayman.mem_max_burst_length_bytes > 512) - rdev->config.cayman.mem_max_burst_length_bytes = 512; tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT; rdev->config.cayman.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024; if (rdev->config.cayman.mem_row_size_in_kb > 4) @@ -748,73 +519,6 @@ static void cayman_gpu_init(struct radeon_device *rdev) rdev->config.cayman.num_gpus = 1; rdev->config.cayman.multi_gpu_tile_size = 64; - //gb_addr_config = 0x02011003 -#if 0 - gb_addr_config = RREG32(GB_ADDR_CONFIG); -#else - gb_addr_config = 0; - switch (rdev->config.cayman.num_tile_pipes) { - case 1: - default: - gb_addr_config |= NUM_PIPES(0); - break; - case 2: - gb_addr_config |= NUM_PIPES(1); - break; - case 4: - gb_addr_config |= NUM_PIPES(2); - break; - case 8: - gb_addr_config |= NUM_PIPES(3); - break; - } - - tmp = (rdev->config.cayman.mem_max_burst_length_bytes / 256) - 1; - gb_addr_config |= PIPE_INTERLEAVE_SIZE(tmp); - gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.cayman.num_shader_engines - 1); - tmp = (rdev->config.cayman.shader_engine_tile_size / 16) - 1; - gb_addr_config |= SHADER_ENGINE_TILE_SIZE(tmp); - switch (rdev->config.cayman.num_gpus) { - case 1: - default: - gb_addr_config |= NUM_GPUS(0); - break; - case 2: - gb_addr_config |= NUM_GPUS(1); - break; - case 4: - gb_addr_config |= NUM_GPUS(2); - break; - } - switch (rdev->config.cayman.multi_gpu_tile_size) { - case 16: - gb_addr_config |= MULTI_GPU_TILE_SIZE(0); - break; - case 32: - default: - gb_addr_config |= MULTI_GPU_TILE_SIZE(1); - break; - case 64: - gb_addr_config |= MULTI_GPU_TILE_SIZE(2); - break; - case 128: - gb_addr_config |= MULTI_GPU_TILE_SIZE(3); - break; - } - switch (rdev->config.cayman.mem_row_size_in_kb) { - case 1: - default: - gb_addr_config |= ROW_SIZE(0); - break; - case 2: - gb_addr_config |= ROW_SIZE(1); - break; - case 4: - gb_addr_config |= ROW_SIZE(2); - break; - } -#endif - tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT; rdev->config.cayman.num_tile_pipes = (1 << tmp); tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT; @@ -828,17 +532,7 @@ static void cayman_gpu_init(struct radeon_device *rdev) tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT; rdev->config.cayman.mem_row_size_in_kb = 1 << tmp; - //gb_backend_map = 0x76541032; -#if 0 - gb_backend_map = RREG32(GB_BACKEND_MAP); -#else - gb_backend_map = - cayman_get_tile_pipe_to_backend_map(rdev, rdev->config.cayman.num_tile_pipes, - rdev->config.cayman.num_backends_per_se * - rdev->config.cayman.num_shader_engines, - &rdev->config.cayman.backend_disable_mask_per_asic, - rdev->config.cayman.num_shader_engines); -#endif + /* setup tiling info dword. gb_addr_config is not adequate since it does * not have bank info, so create a custom tiling dword. * bits 3:0 num_pipes @@ -866,33 +560,49 @@ static void cayman_gpu_init(struct radeon_device *rdev) /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */ if (rdev->flags & RADEON_IS_IGP) rdev->config.cayman.tile_config |= 1 << 4; - else - rdev->config.cayman.tile_config |= - ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4; + else { + if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) + rdev->config.cayman.tile_config |= 1 << 4; + else + rdev->config.cayman.tile_config |= 0 << 4; + } rdev->config.cayman.tile_config |= ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8; rdev->config.cayman.tile_config |= ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12; - rdev->config.cayman.backend_map = gb_backend_map; - WREG32(GB_BACKEND_MAP, gb_backend_map); + tmp = 0; + for (i = (rdev->config.cayman.max_shader_engines - 1); i >= 0; i--) { + u32 rb_disable_bitmap; + + WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i)); + WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i)); + rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16; + tmp <<= 4; + tmp |= rb_disable_bitmap; + } + /* enabled rb are just the one not disabled :) */ + disabled_rb_mask = tmp; + + WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES); + WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES); + WREG32(GB_ADDR_CONFIG, gb_addr_config); WREG32(DMIF_ADDR_CONFIG, gb_addr_config); WREG32(HDP_ADDR_CONFIG, gb_addr_config); - /* primary versions */ - WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); - WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); - WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); + tmp = gb_addr_config & NUM_PIPES_MASK; + tmp = r6xx_remap_render_backend(rdev, tmp, + rdev->config.cayman.max_backends_per_se * + rdev->config.cayman.max_shader_engines, + CAYMAN_MAX_BACKENDS, disabled_rb_mask); + WREG32(GB_BACKEND_MAP, tmp); + cgts_tcc_disable = 0xffff0000; + for (i = 0; i < rdev->config.cayman.max_texture_channel_caches; i++) + cgts_tcc_disable &= ~(1 << (16 + i)); WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable); WREG32(CGTS_SYS_TCC_DISABLE, cgts_tcc_disable); - - /* user versions */ - WREG32(GC_USER_RB_BACKEND_DISABLE, cc_rb_backend_disable); - WREG32(GC_USER_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); - WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); - WREG32(CGTS_USER_SYS_TCC_DISABLE, cgts_tcc_disable); WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable); diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h index 2aa7046ada56..a0b98066e207 100644 --- a/drivers/gpu/drm/radeon/nid.h +++ b/drivers/gpu/drm/radeon/nid.h @@ -41,6 +41,9 @@ #define CAYMAN_MAX_TCC 16 #define CAYMAN_MAX_TCC_MASK 0xFF +#define CAYMAN_GB_ADDR_CONFIG_GOLDEN 0x02011003 +#define ARUBA_GB_ADDR_CONFIG_GOLDEN 0x12010001 + #define DMIF_ADDR_CONFIG 0xBD4 #define SRBM_GFX_CNTL 0x0E44 #define RINGID(x) (((x) & 0x3) << 0) @@ -148,6 +151,8 @@ #define CGTS_SYS_TCC_DISABLE 0x3F90 #define CGTS_USER_SYS_TCC_DISABLE 0x3F94 +#define RLC_GFX_INDEX 0x3FC4 + #define CONFIG_MEMSIZE 0x5428 #define HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480 @@ -212,6 +217,12 @@ #define SOFT_RESET_VGT (1 << 14) #define SOFT_RESET_IA (1 << 15) +#define GRBM_GFX_INDEX 0x802C +#define INSTANCE_INDEX(x) ((x) << 0) +#define SE_INDEX(x) ((x) << 16) +#define INSTANCE_BROADCAST_WRITES (1 << 30) +#define SE_BROADCAST_WRITES (1 << 31) + #define SCRATCH_REG0 0x8500 #define SCRATCH_REG1 0x8504 #define SCRATCH_REG2 0x8508 diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index f388a1d73b63..45cfcea63507 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -1376,113 +1376,51 @@ int r600_asic_reset(struct radeon_device *rdev) return r600_gpu_soft_reset(rdev); } -static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes, - u32 num_backends, - u32 backend_disable_mask) -{ - u32 backend_map = 0; - u32 enabled_backends_mask; - u32 enabled_backends_count; - u32 cur_pipe; - u32 swizzle_pipe[R6XX_MAX_PIPES]; - u32 cur_backend; - u32 i; - - if (num_tile_pipes > R6XX_MAX_PIPES) - num_tile_pipes = R6XX_MAX_PIPES; - if (num_tile_pipes < 1) - num_tile_pipes = 1; - if (num_backends > R6XX_MAX_BACKENDS) - num_backends = R6XX_MAX_BACKENDS; - if (num_backends < 1) - num_backends = 1; - - enabled_backends_mask = 0; - enabled_backends_count = 0; - for (i = 0; i < R6XX_MAX_BACKENDS; ++i) { - if (((backend_disable_mask >> i) & 1) == 0) { - enabled_backends_mask |= (1 << i); - ++enabled_backends_count; - } - if (enabled_backends_count == num_backends) - break; - } - - if (enabled_backends_count == 0) { - enabled_backends_mask = 1; - enabled_backends_count = 1; - } - - if (enabled_backends_count != num_backends) - num_backends = enabled_backends_count; - - memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R6XX_MAX_PIPES); - switch (num_tile_pipes) { - case 1: - swizzle_pipe[0] = 0; - break; - case 2: - swizzle_pipe[0] = 0; - swizzle_pipe[1] = 1; - break; - case 3: - swizzle_pipe[0] = 0; - swizzle_pipe[1] = 1; - swizzle_pipe[2] = 2; - break; - case 4: - swizzle_pipe[0] = 0; - swizzle_pipe[1] = 1; - swizzle_pipe[2] = 2; - swizzle_pipe[3] = 3; - break; - case 5: - swizzle_pipe[0] = 0; - swizzle_pipe[1] = 1; - swizzle_pipe[2] = 2; - swizzle_pipe[3] = 3; - swizzle_pipe[4] = 4; - break; - case 6: - swizzle_pipe[0] = 0; - swizzle_pipe[1] = 2; - swizzle_pipe[2] = 4; - swizzle_pipe[3] = 5; - swizzle_pipe[4] = 1; - swizzle_pipe[5] = 3; - break; - case 7: - swizzle_pipe[0] = 0; - swizzle_pipe[1] = 2; - swizzle_pipe[2] = 4; - swizzle_pipe[3] = 6; - swizzle_pipe[4] = 1; - swizzle_pipe[5] = 3; - swizzle_pipe[6] = 5; - break; - case 8: - swizzle_pipe[0] = 0; - swizzle_pipe[1] = 2; - swizzle_pipe[2] = 4; - swizzle_pipe[3] = 6; - swizzle_pipe[4] = 1; - swizzle_pipe[5] = 3; - swizzle_pipe[6] = 5; - swizzle_pipe[7] = 7; - break; +u32 r6xx_remap_render_backend(struct radeon_device *rdev, + u32 tiling_pipe_num, + u32 max_rb_num, + u32 total_max_rb_num, + u32 disabled_rb_mask) +{ + u32 rendering_pipe_num, rb_num_width, req_rb_num; + u32 pipe_rb_ratio, pipe_rb_remain; + u32 data = 0, mask = 1 << (max_rb_num - 1); + unsigned i, j; + + /* mask out the RBs that don't exist on that asic */ + disabled_rb_mask |= (0xff << max_rb_num) & 0xff; + + rendering_pipe_num = 1 << tiling_pipe_num; + req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask); + BUG_ON(rendering_pipe_num < req_rb_num); + + pipe_rb_ratio = rendering_pipe_num / req_rb_num; + pipe_rb_remain = rendering_pipe_num - pipe_rb_ratio * req_rb_num; + + if (rdev->family <= CHIP_RV740) { + /* r6xx/r7xx */ + rb_num_width = 2; + } else { + /* eg+ */ + rb_num_width = 4; } - cur_backend = 0; - for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) { - while (((1 << cur_backend) & enabled_backends_mask) == 0) - cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS; - - backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2))); - - cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS; + for (i = 0; i < max_rb_num; i++) { + if (!(mask & disabled_rb_mask)) { + for (j = 0; j < pipe_rb_ratio; j++) { + data <<= rb_num_width; + data |= max_rb_num - i - 1; + } + if (pipe_rb_remain) { + data <<= rb_num_width; + data |= max_rb_num - i - 1; + pipe_rb_remain--; + } + } + mask >>= 1; } - return backend_map; + return data; } int r600_count_pipe_bits(uint32_t val) @@ -1500,7 +1438,6 @@ void r600_gpu_init(struct radeon_device *rdev) { u32 tiling_config; u32 ramcfg; - u32 backend_map; u32 cc_rb_backend_disable; u32 cc_gc_shader_pipe_config; u32 tmp; @@ -1511,8 +1448,9 @@ void r600_gpu_init(struct radeon_device *rdev) u32 sq_thread_resource_mgmt = 0; u32 sq_stack_resource_mgmt_1 = 0; u32 sq_stack_resource_mgmt_2 = 0; + u32 disabled_rb_mask; - /* FIXME: implement */ + rdev->config.r600.tiling_group_size = 256; switch (rdev->family) { case CHIP_R600: rdev->config.r600.max_pipes = 4; @@ -1616,10 +1554,7 @@ void r600_gpu_init(struct radeon_device *rdev) rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT); - if ((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT) - rdev->config.r600.tiling_group_size = 512; - else - rdev->config.r600.tiling_group_size = 256; + tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT; if (tmp > 3) { tiling_config |= ROW_TILING(3); @@ -1631,32 +1566,36 @@ void r600_gpu_init(struct radeon_device *rdev) tiling_config |= BANK_SWAPS(1); cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000; - cc_rb_backend_disable |= - BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK); - - cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00; - cc_gc_shader_pipe_config |= - INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK); - cc_gc_shader_pipe_config |= - INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK); - - backend_map = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes, - (R6XX_MAX_BACKENDS - - r600_count_pipe_bits((cc_rb_backend_disable & - R6XX_MAX_BACKENDS_MASK) >> 16)), - (cc_rb_backend_disable >> 16)); + tmp = R6XX_MAX_BACKENDS - + r600_count_pipe_bits((cc_rb_backend_disable >> 16) & R6XX_MAX_BACKENDS_MASK); + if (tmp < rdev->config.r600.max_backends) { + rdev->config.r600.max_backends = tmp; + } + + cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0x00ffff00; + tmp = R6XX_MAX_PIPES - + r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R6XX_MAX_PIPES_MASK); + if (tmp < rdev->config.r600.max_pipes) { + rdev->config.r600.max_pipes = tmp; + } + tmp = R6XX_MAX_SIMDS - + r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK); + if (tmp < rdev->config.r600.max_simds) { + rdev->config.r600.max_simds = tmp; + } + + disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK; + tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT; + tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.r600.max_backends, + R6XX_MAX_BACKENDS, disabled_rb_mask); + tiling_config |= tmp << 16; + rdev->config.r600.backend_map = tmp; + rdev->config.r600.tile_config = tiling_config; - rdev->config.r600.backend_map = backend_map; - tiling_config |= BACKEND_MAP(backend_map); WREG32(GB_TILING_CONFIG, tiling_config); WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff); WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff); - /* Setup pipes */ - WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); - WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); - WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); - tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8); WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK); WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK); diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index 15bd3b216243..a0dbf1fe6a40 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h @@ -219,6 +219,8 @@ #define BACKEND_MAP(x) ((x) << 16) #define GB_TILING_CONFIG 0x98F0 +#define PIPE_TILING__SHIFT 1 +#define PIPE_TILING__MASK 0x0000000e #define GC_USER_SHADER_PIPE_CONFIG 0x8954 #define INACTIVE_QD_PIPES(x) ((x) << 8) diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 2e24022b389a..85dac33e3cce 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -1848,6 +1848,11 @@ extern struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock); extern void r600_hdmi_enable(struct drm_encoder *encoder); extern void r600_hdmi_disable(struct drm_encoder *encoder); extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode); +extern u32 r6xx_remap_render_backend(struct radeon_device *rdev, + u32 tiling_pipe_num, + u32 max_rb_num, + u32 total_max_rb_num, + u32 enabled_rb_mask); /* * evergreen functions used by radeon_encoder.c diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 0137689ed461..142f89462aa4 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -147,6 +147,7 @@ static int radeon_cs_sync_rings(struct radeon_cs_parser *p) sync_to_ring, p->ring); } +/* XXX: note that this is called from the legacy UMS CS ioctl as well */ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) { struct drm_radeon_cs *cs = data; @@ -245,22 +246,24 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) } } - if ((p->cs_flags & RADEON_CS_USE_VM) && - !p->rdev->vm_manager.enabled) { - DRM_ERROR("VM not active on asic!\n"); - return -EINVAL; - } - - /* we only support VM on SI+ */ - if ((p->rdev->family >= CHIP_TAHITI) && - ((p->cs_flags & RADEON_CS_USE_VM) == 0)) { - DRM_ERROR("VM required on SI+!\n"); - return -EINVAL; - } + /* these are KMS only */ + if (p->rdev) { + if ((p->cs_flags & RADEON_CS_USE_VM) && + !p->rdev->vm_manager.enabled) { + DRM_ERROR("VM not active on asic!\n"); + return -EINVAL; + } - if (radeon_cs_get_ring(p, ring, priority)) - return -EINVAL; + /* we only support VM on SI+ */ + if ((p->rdev->family >= CHIP_TAHITI) && + ((p->cs_flags & RADEON_CS_USE_VM) == 0)) { + DRM_ERROR("VM required on SI+!\n"); + return -EINVAL; + } + if (radeon_cs_get_ring(p, ring, priority)) + return -EINVAL; + } /* deal with non-vm */ if ((p->chunk_ib_idx != -1) && diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index c2f473bc13b8..04ddc365a908 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -151,6 +151,8 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev) WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp); WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp); WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp); + if (rdev->family == CHIP_RV740) + WREG32(MC_VM_MD_L1_TLB3_CNTL, tmp); WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp); WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); @@ -363,180 +365,6 @@ void r700_cp_fini(struct radeon_device *rdev) /* * Core functions */ -static u32 r700_get_tile_pipe_to_backend_map(struct radeon_device *rdev, - u32 num_tile_pipes, - u32 num_backends, - u32 backend_disable_mask) -{ - u32 backend_map = 0; - u32 enabled_backends_mask; - u32 enabled_backends_count; - u32 cur_pipe; - u32 swizzle_pipe[R7XX_MAX_PIPES]; - u32 cur_backend; - u32 i; - bool force_no_swizzle; - - if (num_tile_pipes > R7XX_MAX_PIPES) - num_tile_pipes = R7XX_MAX_PIPES; - if (num_tile_pipes < 1) - num_tile_pipes = 1; - if (num_backends > R7XX_MAX_BACKENDS) - num_backends = R7XX_MAX_BACKENDS; - if (num_backends < 1) - num_backends = 1; - - enabled_backends_mask = 0; - enabled_backends_count = 0; - for (i = 0; i < R7XX_MAX_BACKENDS; ++i) { - if (((backend_disable_mask >> i) & 1) == 0) { - enabled_backends_mask |= (1 << i); - ++enabled_backends_count; - } - if (enabled_backends_count == num_backends) - break; - } - - if (enabled_backends_count == 0) { - enabled_backends_mask = 1; - enabled_backends_count = 1; - } - - if (enabled_backends_count != num_backends) - num_backends = enabled_backends_count; - - switch (rdev->family) { - case CHIP_RV770: - case CHIP_RV730: - force_no_swizzle = false; - break; - case CHIP_RV710: - case CHIP_RV740: - default: - force_no_swizzle = true; - break; - } - - memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R7XX_MAX_PIPES); - switch (num_tile_pipes) { - case 1: - swizzle_pipe[0] = 0; - break; - case 2: - swizzle_pipe[0] = 0; - swizzle_pipe[1] = 1; - break; - case 3: - if (force_no_swizzle) { - swizzle_pipe[0] = 0; - swizzle_pipe[1] = 1; - swizzle_pipe[2] = 2; - } else { - swizzle_pipe[0] = 0; - swizzle_pipe[1] = 2; - swizzle_pipe[2] = 1; - } - break; - case 4: - if (force_no_swizzle) { - swizzle_pipe[0] = 0; - swizzle_pipe[1] = 1; - swizzle_pipe[2] = 2; - swizzle_pipe[3] = 3; - } else { - swizzle_pipe[0] = 0; - swizzle_pipe[1] = 2; - swizzle_pipe[2] = 3; - swizzle_pipe[3] = 1; - } - break; - case 5: - if (force_no_swizzle) { - swizzle_pipe[0] = 0; - swizzle_pipe[1] = 1; - swizzle_pipe[2] = 2; - swizzle_pipe[3] = 3; - swizzle_pipe[4] = 4; - } else { - swizzle_pipe[0] = 0; - swizzle_pipe[1] = 2; - swizzle_pipe[2] = 4; - swizzle_pipe[3] = 1; - swizzle_pipe[4] = 3; - } - break; - case 6: - if (force_no_swizzle) { - swizzle_pipe[0] = 0; - swizzle_pipe[1] = 1; - swizzle_pipe[2] = 2; - swizzle_pipe[3] = 3; - swizzle_pipe[4] = 4; - swizzle_pipe[5] = 5; - } else { - swizzle_pipe[0] = 0; - swizzle_pipe[1] = 2; - swizzle_pipe[2] = 4; - swizzle_pipe[3] = 5; - swizzle_pipe[4] = 3; - swizzle_pipe[5] = 1; - } - break; - case 7: - if (force_no_swizzle) { - swizzle_pipe[0] = 0; - swizzle_pipe[1] = 1; - swizzle_pipe[2] = 2; - swizzle_pipe[3] = 3; - swizzle_pipe[4] = 4; - swizzle_pipe[5] = 5; - swizzle_pipe[6] = 6; - } else { - swizzle_pipe[0] = 0; - swizzle_pipe[1] = 2; - swizzle_pipe[2] = 4; - swizzle_pipe[3] = 6; - swizzle_pipe[4] = 3; - swizzle_pipe[5] = 1; - swizzle_pipe[6] = 5; - } - break; - case 8: - if (force_no_swizzle) { - swizzle_pipe[0] = 0; - swizzle_pipe[1] = 1; - swizzle_pipe[2] = 2; - swizzle_pipe[3] = 3; - swizzle_pipe[4] = 4; - swizzle_pipe[5] = 5; - swizzle_pipe[6] = 6; - swizzle_pipe[7] = 7; - } else { - swizzle_pipe[0] = 0; - swizzle_pipe[1] = 2; - swizzle_pipe[2] = 4; - swizzle_pipe[3] = 6; - swizzle_pipe[4] = 3; - swizzle_pipe[5] = 1; - swizzle_pipe[6] = 7; - swizzle_pipe[7] = 5; - } - break; - } - - cur_backend = 0; - for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) { - while (((1 << cur_backend) & enabled_backends_mask) == 0) - cur_backend = (cur_backend + 1) % R7XX_MAX_BACKENDS; - - backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2))); - - cur_backend = (cur_backend + 1) % R7XX_MAX_BACKENDS; - } - - return backend_map; -} - static void rv770_gpu_init(struct radeon_device *rdev) { int i, j, num_qd_pipes; @@ -552,14 +380,17 @@ static void rv770_gpu_init(struct radeon_device *rdev) u32 sq_thread_resource_mgmt; u32 hdp_host_path_cntl; u32 sq_dyn_gpr_size_simd_ab_0; - u32 backend_map; u32 gb_tiling_config = 0; u32 cc_rb_backend_disable = 0; u32 cc_gc_shader_pipe_config = 0; u32 mc_arb_ramcfg; - u32 db_debug4; + u32 db_debug4, tmp; + u32 inactive_pipes, shader_pipe_config; + u32 disabled_rb_mask; + unsigned active_number; /* setup chip specs */ + rdev->config.rv770.tiling_group_size = 256; switch (rdev->family) { case CHIP_RV770: rdev->config.rv770.max_pipes = 4; @@ -670,33 +501,70 @@ static void rv770_gpu_init(struct radeon_device *rdev) /* setup tiling, simd, pipe config */ mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); + shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG); + inactive_pipes = (shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> INACTIVE_QD_PIPES_SHIFT; + for (i = 0, tmp = 1, active_number = 0; i < R7XX_MAX_PIPES; i++) { + if (!(inactive_pipes & tmp)) { + active_number++; + } + tmp <<= 1; + } + if (active_number == 1) { + WREG32(SPI_CONFIG_CNTL, DISABLE_INTERP_1); + } else { + WREG32(SPI_CONFIG_CNTL, 0); + } + + cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000; + tmp = R7XX_MAX_BACKENDS - r600_count_pipe_bits(cc_rb_backend_disable >> 16); + if (tmp < rdev->config.rv770.max_backends) { + rdev->config.rv770.max_backends = tmp; + } + + cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00; + tmp = R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R7XX_MAX_PIPES_MASK); + if (tmp < rdev->config.rv770.max_pipes) { + rdev->config.rv770.max_pipes = tmp; + } + tmp = R7XX_MAX_SIMDS - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R7XX_MAX_SIMDS_MASK); + if (tmp < rdev->config.rv770.max_simds) { + rdev->config.rv770.max_simds = tmp; + } + switch (rdev->config.rv770.max_tile_pipes) { case 1: default: - gb_tiling_config |= PIPE_TILING(0); + gb_tiling_config = PIPE_TILING(0); break; case 2: - gb_tiling_config |= PIPE_TILING(1); + gb_tiling_config = PIPE_TILING(1); break; case 4: - gb_tiling_config |= PIPE_TILING(2); + gb_tiling_config = PIPE_TILING(2); break; case 8: - gb_tiling_config |= PIPE_TILING(3); + gb_tiling_config = PIPE_TILING(3); break; } rdev->config.rv770.tiling_npipes = rdev->config.rv770.max_tile_pipes; + disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R7XX_MAX_BACKENDS_MASK; + tmp = (gb_tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT; + tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.rv770.max_backends, + R7XX_MAX_BACKENDS, disabled_rb_mask); + gb_tiling_config |= tmp << 16; + rdev->config.rv770.backend_map = tmp; + if (rdev->family == CHIP_RV770) gb_tiling_config |= BANK_TILING(1); - else - gb_tiling_config |= BANK_TILING((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); + else { + if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) + gb_tiling_config |= BANK_TILING(1); + else + gb_tiling_config |= BANK_TILING(0); + } rdev->config.rv770.tiling_nbanks = 4 << ((gb_tiling_config >> 4) & 0x3); gb_tiling_config |= GROUP_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT); - if ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT) - rdev->config.rv770.tiling_group_size = 512; - else - rdev->config.rv770.tiling_group_size = 256; if (((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT) > 3) { gb_tiling_config |= ROW_TILING(3); gb_tiling_config |= SAMPLE_SPLIT(3); @@ -708,47 +576,19 @@ static void rv770_gpu_init(struct radeon_device *rdev) } gb_tiling_config |= BANK_SWAPS(1); - - cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000; - cc_rb_backend_disable |= - BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << rdev->config.rv770.max_backends) & R7XX_MAX_BACKENDS_MASK); - - cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00; - cc_gc_shader_pipe_config |= - INACTIVE_QD_PIPES((R7XX_MAX_PIPES_MASK << rdev->config.rv770.max_pipes) & R7XX_MAX_PIPES_MASK); - cc_gc_shader_pipe_config |= - INACTIVE_SIMDS((R7XX_MAX_SIMDS_MASK << rdev->config.rv770.max_simds) & R7XX_MAX_SIMDS_MASK); - - if (rdev->family == CHIP_RV740) - backend_map = 0x28; - else - backend_map = r700_get_tile_pipe_to_backend_map(rdev, - rdev->config.rv770.max_tile_pipes, - (R7XX_MAX_BACKENDS - - r600_count_pipe_bits((cc_rb_backend_disable & - R7XX_MAX_BACKENDS_MASK) >> 16)), - (cc_rb_backend_disable >> 16)); - rdev->config.rv770.tile_config = gb_tiling_config; - rdev->config.rv770.backend_map = backend_map; - gb_tiling_config |= BACKEND_MAP(backend_map); WREG32(GB_TILING_CONFIG, gb_tiling_config); WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff)); WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff)); - WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); - WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); - WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); - WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); - WREG32(CGTS_SYS_TCC_DISABLE, 0); WREG32(CGTS_TCC_DISABLE, 0); WREG32(CGTS_USER_SYS_TCC_DISABLE, 0); WREG32(CGTS_USER_TCC_DISABLE, 0); - num_qd_pipes = - R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8); + + num_qd_pipes = R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8); WREG32(VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & DEALLOC_DIST_MASK); WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & VTX_REUSE_DEPTH_MASK); @@ -809,8 +649,6 @@ static void rv770_gpu_init(struct radeon_device *rdev) WREG32(VGT_NUM_INSTANCES, 1); - WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0)); - WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4)); WREG32(CP_PERFMON_CNTL, 0); diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h index 9c549f702f2f..fdc089896011 100644 --- a/drivers/gpu/drm/radeon/rv770d.h +++ b/drivers/gpu/drm/radeon/rv770d.h @@ -106,10 +106,13 @@ #define BACKEND_MAP(x) ((x) << 16) #define GB_TILING_CONFIG 0x98F0 +#define PIPE_TILING__SHIFT 1 +#define PIPE_TILING__MASK 0x0000000e #define GC_USER_SHADER_PIPE_CONFIG 0x8954 #define INACTIVE_QD_PIPES(x) ((x) << 8) #define INACTIVE_QD_PIPES_MASK 0x0000FF00 +#define INACTIVE_QD_PIPES_SHIFT 8 #define INACTIVE_SIMDS(x) ((x) << 16) #define INACTIVE_SIMDS_MASK 0x00FF0000 @@ -174,6 +177,7 @@ #define MC_VM_MD_L1_TLB0_CNTL 0x2654 #define MC_VM_MD_L1_TLB1_CNTL 0x2658 #define MC_VM_MD_L1_TLB2_CNTL 0x265C +#define MC_VM_MD_L1_TLB3_CNTL 0x2698 #define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203C #define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038 #define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034 diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 36792bd4da77..b67cfcaa661f 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -1834,6 +1834,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) spin_unlock(&glob->lru_lock); (void) ttm_bo_cleanup_refs(bo, false, false, false); kref_put(&bo->list_kref, ttm_bo_release_list); + spin_lock(&glob->lru_lock); continue; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c index 51c9ba5cd2fb..21ee78226560 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c @@ -66,7 +66,7 @@ static int vmw_gmr2_bind(struct vmw_private *dev_priv, cmd += sizeof(remap_cmd) / sizeof(uint32); for (i = 0; i < num_pages; ++i) { - if (VMW_PPN_SIZE > 4) + if (VMW_PPN_SIZE <= 4) *cmd = page_to_pfn(*pages++); else *((uint64_t *)cmd) = page_to_pfn(*pages++); diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 754f38f8a692..638dae048b4f 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -18,6 +18,7 @@ #include <linux/slab.h> #include <linux/time.h> #include <linux/workqueue.h> +#include <linux/delay.h> #include <scsi/scsi_dh.h> #include <linux/atomic.h> @@ -61,11 +62,11 @@ struct multipath { struct list_head list; struct dm_target *ti; - spinlock_t lock; - const char *hw_handler_name; char *hw_handler_params; + spinlock_t lock; + unsigned nr_priority_groups; struct list_head priority_groups; @@ -81,16 +82,17 @@ struct multipath { struct priority_group *next_pg; /* Switch to this PG if set */ unsigned repeat_count; /* I/Os left before calling PS again */ - unsigned queue_io; /* Must we queue all I/O? */ - unsigned queue_if_no_path; /* Queue I/O if last path fails? */ - unsigned saved_queue_if_no_path;/* Saved state during suspension */ + unsigned queue_io:1; /* Must we queue all I/O? */ + unsigned queue_if_no_path:1; /* Queue I/O if last path fails? */ + unsigned saved_queue_if_no_path:1; /* Saved state during suspension */ + unsigned pg_init_retries; /* Number of times to retry pg_init */ unsigned pg_init_count; /* Number of times pg_init called */ unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */ + unsigned queue_size; struct work_struct process_queued_ios; struct list_head queued_ios; - unsigned queue_size; struct work_struct trigger_event; @@ -328,14 +330,18 @@ static void __choose_pgpath(struct multipath *m, size_t nr_bytes) /* * Loop through priority groups until we find a valid path. * First time we skip PGs marked 'bypassed'. - * Second time we only try the ones we skipped. + * Second time we only try the ones we skipped, but set + * pg_init_delay_retry so we do not hammer controllers. */ do { list_for_each_entry(pg, &m->priority_groups, list) { if (pg->bypassed == bypassed) continue; - if (!__choose_path_in_pg(m, pg, nr_bytes)) + if (!__choose_path_in_pg(m, pg, nr_bytes)) { + if (!bypassed) + m->pg_init_delay_retry = 1; return; + } } } while (bypassed--); @@ -481,9 +487,6 @@ static void process_queued_ios(struct work_struct *work) spin_lock_irqsave(&m->lock, flags); - if (!m->queue_size) - goto out; - if (!m->current_pgpath) __choose_pgpath(m, 0); @@ -496,7 +499,6 @@ static void process_queued_ios(struct work_struct *work) if (m->pg_init_required && !m->pg_init_in_progress && pgpath) __pg_init_all_paths(m); -out: spin_unlock_irqrestore(&m->lock, flags); if (!must_queue) dispatch_queued_ios(m); @@ -1517,11 +1519,16 @@ out: static int multipath_ioctl(struct dm_target *ti, unsigned int cmd, unsigned long arg) { - struct multipath *m = (struct multipath *) ti->private; - struct block_device *bdev = NULL; - fmode_t mode = 0; + struct multipath *m = ti->private; + struct block_device *bdev; + fmode_t mode; unsigned long flags; - int r = 0; + int r; + +again: + bdev = NULL; + mode = 0; + r = 0; spin_lock_irqsave(&m->lock, flags); @@ -1546,6 +1553,12 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd, if (!r && ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT) r = scsi_verify_blk_ioctl(NULL, cmd); + if (r == -EAGAIN && !fatal_signal_pending(current)) { + queue_work(kmultipathd, &m->process_queued_ios); + msleep(10); + goto again; + } + return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg); } @@ -1643,7 +1656,7 @@ out: *---------------------------------------------------------------*/ static struct target_type multipath_target = { .name = "multipath", - .version = {1, 3, 0}, + .version = {1, 4, 0}, .module = THIS_MODULE, .ctr = multipath_ctr, .dtr = multipath_dtr, diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index 737d38865b69..3e2907f0bc46 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c @@ -1082,12 +1082,89 @@ int dm_pool_get_metadata_transaction_id(struct dm_pool_metadata *pmd, return 0; } -static int __get_held_metadata_root(struct dm_pool_metadata *pmd, - dm_block_t *result) +static int __reserve_metadata_snap(struct dm_pool_metadata *pmd) +{ + int r, inc; + struct thin_disk_superblock *disk_super; + struct dm_block *copy, *sblock; + dm_block_t held_root; + + /* + * Copy the superblock. + */ + dm_sm_inc_block(pmd->metadata_sm, THIN_SUPERBLOCK_LOCATION); + r = dm_tm_shadow_block(pmd->tm, THIN_SUPERBLOCK_LOCATION, + &sb_validator, ©, &inc); + if (r) + return r; + + BUG_ON(!inc); + + held_root = dm_block_location(copy); + disk_super = dm_block_data(copy); + + if (le64_to_cpu(disk_super->held_root)) { + DMWARN("Pool metadata snapshot already exists: release this before taking another."); + + dm_tm_dec(pmd->tm, held_root); + dm_tm_unlock(pmd->tm, copy); + pmd->need_commit = 1; + + return -EBUSY; + } + + /* + * Wipe the spacemap since we're not publishing this. + */ + memset(&disk_super->data_space_map_root, 0, + sizeof(disk_super->data_space_map_root)); + memset(&disk_super->metadata_space_map_root, 0, + sizeof(disk_super->metadata_space_map_root)); + + /* + * Increment the data structures that need to be preserved. + */ + dm_tm_inc(pmd->tm, le64_to_cpu(disk_super->data_mapping_root)); + dm_tm_inc(pmd->tm, le64_to_cpu(disk_super->device_details_root)); + dm_tm_unlock(pmd->tm, copy); + + /* + * Write the held root into the superblock. + */ + r = dm_bm_write_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION, + &sb_validator, &sblock); + if (r) { + dm_tm_dec(pmd->tm, held_root); + pmd->need_commit = 1; + return r; + } + + disk_super = dm_block_data(sblock); + disk_super->held_root = cpu_to_le64(held_root); + dm_bm_unlock(sblock); + + pmd->need_commit = 1; + + return 0; +} + +int dm_pool_reserve_metadata_snap(struct dm_pool_metadata *pmd) +{ + int r; + + down_write(&pmd->root_lock); + r = __reserve_metadata_snap(pmd); + up_write(&pmd->root_lock); + + return r; +} + +static int __release_metadata_snap(struct dm_pool_metadata *pmd) { int r; struct thin_disk_superblock *disk_super; - struct dm_block *sblock; + struct dm_block *sblock, *copy; + dm_block_t held_root; r = dm_bm_write_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION, &sb_validator, &sblock); @@ -1095,18 +1172,65 @@ static int __get_held_metadata_root(struct dm_pool_metadata *pmd, return r; disk_super = dm_block_data(sblock); + held_root = le64_to_cpu(disk_super->held_root); + disk_super->held_root = cpu_to_le64(0); + pmd->need_commit = 1; + + dm_bm_unlock(sblock); + + if (!held_root) { + DMWARN("No pool metadata snapshot found: nothing to release."); + return -EINVAL; + } + + r = dm_tm_read_lock(pmd->tm, held_root, &sb_validator, ©); + if (r) + return r; + + disk_super = dm_block_data(copy); + dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->data_mapping_root)); + dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->device_details_root)); + dm_sm_dec_block(pmd->metadata_sm, held_root); + + return dm_tm_unlock(pmd->tm, copy); +} + +int dm_pool_release_metadata_snap(struct dm_pool_metadata *pmd) +{ + int r; + + down_write(&pmd->root_lock); + r = __release_metadata_snap(pmd); + up_write(&pmd->root_lock); + + return r; +} + +static int __get_metadata_snap(struct dm_pool_metadata *pmd, + dm_block_t *result) +{ + int r; + struct thin_disk_superblock *disk_super; + struct dm_block *sblock; + + r = dm_bm_read_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION, + &sb_validator, &sblock); + if (r) + return r; + + disk_super = dm_block_data(sblock); *result = le64_to_cpu(disk_super->held_root); return dm_bm_unlock(sblock); } -int dm_pool_get_held_metadata_root(struct dm_pool_metadata *pmd, - dm_block_t *result) +int dm_pool_get_metadata_snap(struct dm_pool_metadata *pmd, + dm_block_t *result) { int r; down_read(&pmd->root_lock); - r = __get_held_metadata_root(pmd, result); + r = __get_metadata_snap(pmd, result); up_read(&pmd->root_lock); return r; diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h index ed4725e67c96..b88918ccdaf6 100644 --- a/drivers/md/dm-thin-metadata.h +++ b/drivers/md/dm-thin-metadata.h @@ -90,11 +90,18 @@ int dm_pool_get_metadata_transaction_id(struct dm_pool_metadata *pmd, /* * Hold/get root for userspace transaction. + * + * The metadata snapshot is a copy of the current superblock (minus the + * space maps). Userland can access the data structures for READ + * operations only. A small performance hit is incurred by providing this + * copy of the metadata to userland due to extra copy-on-write operations + * on the metadata nodes. Release this as soon as you finish with it. */ -int dm_pool_hold_metadata_root(struct dm_pool_metadata *pmd); +int dm_pool_reserve_metadata_snap(struct dm_pool_metadata *pmd); +int dm_pool_release_metadata_snap(struct dm_pool_metadata *pmd); -int dm_pool_get_held_metadata_root(struct dm_pool_metadata *pmd, - dm_block_t *result); +int dm_pool_get_metadata_snap(struct dm_pool_metadata *pmd, + dm_block_t *result); /* * Actions on a single virtual device. diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index eb3d138ff55a..37fdaf81bd1f 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -111,7 +111,7 @@ struct cell_key { dm_block_t block; }; -struct cell { +struct dm_bio_prison_cell { struct hlist_node list; struct bio_prison *prison; struct cell_key key; @@ -141,6 +141,8 @@ static uint32_t calc_nr_buckets(unsigned nr_cells) return n; } +static struct kmem_cache *_cell_cache; + /* * @nr_cells should be the number of cells you want in use _concurrently_. * Don't confuse it with the number of distinct keys. @@ -157,8 +159,7 @@ static struct bio_prison *prison_create(unsigned nr_cells) return NULL; spin_lock_init(&prison->lock); - prison->cell_pool = mempool_create_kmalloc_pool(nr_cells, - sizeof(struct cell)); + prison->cell_pool = mempool_create_slab_pool(nr_cells, _cell_cache); if (!prison->cell_pool) { kfree(prison); return NULL; @@ -194,10 +195,10 @@ static int keys_equal(struct cell_key *lhs, struct cell_key *rhs) (lhs->block == rhs->block); } -static struct cell *__search_bucket(struct hlist_head *bucket, - struct cell_key *key) +static struct dm_bio_prison_cell *__search_bucket(struct hlist_head *bucket, + struct cell_key *key) { - struct cell *cell; + struct dm_bio_prison_cell *cell; struct hlist_node *tmp; hlist_for_each_entry(cell, tmp, bucket, list) @@ -214,12 +215,12 @@ static struct cell *__search_bucket(struct hlist_head *bucket, * Returns 1 if the cell was already held, 0 if @inmate is the new holder. */ static int bio_detain(struct bio_prison *prison, struct cell_key *key, - struct bio *inmate, struct cell **ref) + struct bio *inmate, struct dm_bio_prison_cell **ref) { int r = 1; unsigned long flags; uint32_t hash = hash_key(prison, key); - struct cell *cell, *cell2; + struct dm_bio_prison_cell *cell, *cell2; BUG_ON(hash > prison->nr_buckets); @@ -273,7 +274,7 @@ out: /* * @inmates must have been initialised prior to this call */ -static void __cell_release(struct cell *cell, struct bio_list *inmates) +static void __cell_release(struct dm_bio_prison_cell *cell, struct bio_list *inmates) { struct bio_prison *prison = cell->prison; @@ -287,7 +288,7 @@ static void __cell_release(struct cell *cell, struct bio_list *inmates) mempool_free(cell, prison->cell_pool); } -static void cell_release(struct cell *cell, struct bio_list *bios) +static void cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios) { unsigned long flags; struct bio_prison *prison = cell->prison; @@ -303,7 +304,7 @@ static void cell_release(struct cell *cell, struct bio_list *bios) * bio may be in the cell. This function releases the cell, and also does * a sanity check. */ -static void __cell_release_singleton(struct cell *cell, struct bio *bio) +static void __cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio) { BUG_ON(cell->holder != bio); BUG_ON(!bio_list_empty(&cell->bios)); @@ -311,7 +312,7 @@ static void __cell_release_singleton(struct cell *cell, struct bio *bio) __cell_release(cell, NULL); } -static void cell_release_singleton(struct cell *cell, struct bio *bio) +static void cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio) { unsigned long flags; struct bio_prison *prison = cell->prison; @@ -324,7 +325,8 @@ static void cell_release_singleton(struct cell *cell, struct bio *bio) /* * Sometimes we don't want the holder, just the additional bios. */ -static void __cell_release_no_holder(struct cell *cell, struct bio_list *inmates) +static void __cell_release_no_holder(struct dm_bio_prison_cell *cell, + struct bio_list *inmates) { struct bio_prison *prison = cell->prison; @@ -334,7 +336,8 @@ static void __cell_release_no_holder(struct cell *cell, struct bio_list *inmates mempool_free(cell, prison->cell_pool); } -static void cell_release_no_holder(struct cell *cell, struct bio_list *inmates) +static void cell_release_no_holder(struct dm_bio_prison_cell *cell, + struct bio_list *inmates) { unsigned long flags; struct bio_prison *prison = cell->prison; @@ -344,7 +347,7 @@ static void cell_release_no_holder(struct cell *cell, struct bio_list *inmates) spin_unlock_irqrestore(&prison->lock, flags); } -static void cell_error(struct cell *cell) +static void cell_error(struct dm_bio_prison_cell *cell) { struct bio_prison *prison = cell->prison; struct bio_list bios; @@ -491,7 +494,7 @@ static void build_virtual_key(struct dm_thin_device *td, dm_block_t b, * also provides the interface for creating and destroying internal * devices. */ -struct new_mapping; +struct dm_thin_new_mapping; struct pool_features { unsigned zero_new_blocks:1; @@ -537,7 +540,7 @@ struct pool { struct deferred_set shared_read_ds; struct deferred_set all_io_ds; - struct new_mapping *next_mapping; + struct dm_thin_new_mapping *next_mapping; mempool_t *mapping_pool; mempool_t *endio_hook_pool; }; @@ -630,11 +633,11 @@ static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev /*----------------------------------------------------------------*/ -struct endio_hook { +struct dm_thin_endio_hook { struct thin_c *tc; struct deferred_entry *shared_read_entry; struct deferred_entry *all_io_entry; - struct new_mapping *overwrite_mapping; + struct dm_thin_new_mapping *overwrite_mapping; }; static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master) @@ -647,7 +650,8 @@ static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master) bio_list_init(master); while ((bio = bio_list_pop(&bios))) { - struct endio_hook *h = dm_get_mapinfo(bio)->ptr; + struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr; + if (h->tc == tc) bio_endio(bio, DM_ENDIO_REQUEUE); else @@ -736,7 +740,7 @@ static void wake_worker(struct pool *pool) /* * Bio endio functions. */ -struct new_mapping { +struct dm_thin_new_mapping { struct list_head list; unsigned quiesced:1; @@ -746,7 +750,7 @@ struct new_mapping { struct thin_c *tc; dm_block_t virt_block; dm_block_t data_block; - struct cell *cell, *cell2; + struct dm_bio_prison_cell *cell, *cell2; int err; /* @@ -759,7 +763,7 @@ struct new_mapping { bio_end_io_t *saved_bi_end_io; }; -static void __maybe_add_mapping(struct new_mapping *m) +static void __maybe_add_mapping(struct dm_thin_new_mapping *m) { struct pool *pool = m->tc->pool; @@ -772,7 +776,7 @@ static void __maybe_add_mapping(struct new_mapping *m) static void copy_complete(int read_err, unsigned long write_err, void *context) { unsigned long flags; - struct new_mapping *m = context; + struct dm_thin_new_mapping *m = context; struct pool *pool = m->tc->pool; m->err = read_err || write_err ? -EIO : 0; @@ -786,8 +790,8 @@ static void copy_complete(int read_err, unsigned long write_err, void *context) static void overwrite_endio(struct bio *bio, int err) { unsigned long flags; - struct endio_hook *h = dm_get_mapinfo(bio)->ptr; - struct new_mapping *m = h->overwrite_mapping; + struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr; + struct dm_thin_new_mapping *m = h->overwrite_mapping; struct pool *pool = m->tc->pool; m->err = err; @@ -811,7 +815,7 @@ static void overwrite_endio(struct bio *bio, int err) /* * This sends the bios in the cell back to the deferred_bios list. */ -static void cell_defer(struct thin_c *tc, struct cell *cell, +static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell, dm_block_t data_block) { struct pool *pool = tc->pool; @@ -828,7 +832,7 @@ static void cell_defer(struct thin_c *tc, struct cell *cell, * Same as cell_defer above, except it omits one particular detainee, * a write bio that covers the block and has already been processed. */ -static void cell_defer_except(struct thin_c *tc, struct cell *cell) +static void cell_defer_except(struct thin_c *tc, struct dm_bio_prison_cell *cell) { struct bio_list bios; struct pool *pool = tc->pool; @@ -843,7 +847,7 @@ static void cell_defer_except(struct thin_c *tc, struct cell *cell) wake_worker(pool); } -static void process_prepared_mapping(struct new_mapping *m) +static void process_prepared_mapping(struct dm_thin_new_mapping *m) { struct thin_c *tc = m->tc; struct bio *bio; @@ -886,7 +890,7 @@ static void process_prepared_mapping(struct new_mapping *m) mempool_free(m, tc->pool->mapping_pool); } -static void process_prepared_discard(struct new_mapping *m) +static void process_prepared_discard(struct dm_thin_new_mapping *m) { int r; struct thin_c *tc = m->tc; @@ -909,11 +913,11 @@ static void process_prepared_discard(struct new_mapping *m) } static void process_prepared(struct pool *pool, struct list_head *head, - void (*fn)(struct new_mapping *)) + void (*fn)(struct dm_thin_new_mapping *)) { unsigned long flags; struct list_head maps; - struct new_mapping *m, *tmp; + struct dm_thin_new_mapping *m, *tmp; INIT_LIST_HEAD(&maps); spin_lock_irqsave(&pool->lock, flags); @@ -957,9 +961,9 @@ static int ensure_next_mapping(struct pool *pool) return pool->next_mapping ? 0 : -ENOMEM; } -static struct new_mapping *get_next_mapping(struct pool *pool) +static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool) { - struct new_mapping *r = pool->next_mapping; + struct dm_thin_new_mapping *r = pool->next_mapping; BUG_ON(!pool->next_mapping); @@ -971,11 +975,11 @@ static struct new_mapping *get_next_mapping(struct pool *pool) static void schedule_copy(struct thin_c *tc, dm_block_t virt_block, struct dm_dev *origin, dm_block_t data_origin, dm_block_t data_dest, - struct cell *cell, struct bio *bio) + struct dm_bio_prison_cell *cell, struct bio *bio) { int r; struct pool *pool = tc->pool; - struct new_mapping *m = get_next_mapping(pool); + struct dm_thin_new_mapping *m = get_next_mapping(pool); INIT_LIST_HEAD(&m->list); m->quiesced = 0; @@ -997,7 +1001,8 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block, * bio immediately. Otherwise we use kcopyd to clone the data first. */ if (io_overwrites_block(pool, bio)) { - struct endio_hook *h = dm_get_mapinfo(bio)->ptr; + struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr; + h->overwrite_mapping = m; m->bio = bio; save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio); @@ -1025,7 +1030,7 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block, static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block, dm_block_t data_origin, dm_block_t data_dest, - struct cell *cell, struct bio *bio) + struct dm_bio_prison_cell *cell, struct bio *bio) { schedule_copy(tc, virt_block, tc->pool_dev, data_origin, data_dest, cell, bio); @@ -1033,18 +1038,18 @@ static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block, static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block, dm_block_t data_dest, - struct cell *cell, struct bio *bio) + struct dm_bio_prison_cell *cell, struct bio *bio) { schedule_copy(tc, virt_block, tc->origin_dev, virt_block, data_dest, cell, bio); } static void schedule_zero(struct thin_c *tc, dm_block_t virt_block, - dm_block_t data_block, struct cell *cell, + dm_block_t data_block, struct dm_bio_prison_cell *cell, struct bio *bio) { struct pool *pool = tc->pool; - struct new_mapping *m = get_next_mapping(pool); + struct dm_thin_new_mapping *m = get_next_mapping(pool); INIT_LIST_HEAD(&m->list); m->quiesced = 1; @@ -1065,12 +1070,12 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block, process_prepared_mapping(m); else if (io_overwrites_block(pool, bio)) { - struct endio_hook *h = dm_get_mapinfo(bio)->ptr; + struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr; + h->overwrite_mapping = m; m->bio = bio; save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio); remap_and_issue(tc, bio, data_block); - } else { int r; struct dm_io_region to; @@ -1155,7 +1160,7 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result) */ static void retry_on_resume(struct bio *bio) { - struct endio_hook *h = dm_get_mapinfo(bio)->ptr; + struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr; struct thin_c *tc = h->tc; struct pool *pool = tc->pool; unsigned long flags; @@ -1165,7 +1170,7 @@ static void retry_on_resume(struct bio *bio) spin_unlock_irqrestore(&pool->lock, flags); } -static void no_space(struct cell *cell) +static void no_space(struct dm_bio_prison_cell *cell) { struct bio *bio; struct bio_list bios; @@ -1182,11 +1187,11 @@ static void process_discard(struct thin_c *tc, struct bio *bio) int r; unsigned long flags; struct pool *pool = tc->pool; - struct cell *cell, *cell2; + struct dm_bio_prison_cell *cell, *cell2; struct cell_key key, key2; dm_block_t block = get_bio_block(tc, bio); struct dm_thin_lookup_result lookup_result; - struct new_mapping *m; + struct dm_thin_new_mapping *m; build_virtual_key(tc->td, block, &key); if (bio_detain(tc->pool->prison, &key, bio, &cell)) @@ -1263,7 +1268,7 @@ static void process_discard(struct thin_c *tc, struct bio *bio) static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block, struct cell_key *key, struct dm_thin_lookup_result *lookup_result, - struct cell *cell) + struct dm_bio_prison_cell *cell) { int r; dm_block_t data_block; @@ -1290,7 +1295,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio, dm_block_t block, struct dm_thin_lookup_result *lookup_result) { - struct cell *cell; + struct dm_bio_prison_cell *cell; struct pool *pool = tc->pool; struct cell_key key; @@ -1305,7 +1310,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio, if (bio_data_dir(bio) == WRITE) break_sharing(tc, bio, block, &key, lookup_result, cell); else { - struct endio_hook *h = dm_get_mapinfo(bio)->ptr; + struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr; h->shared_read_entry = ds_inc(&pool->shared_read_ds); @@ -1315,7 +1320,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio, } static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block, - struct cell *cell) + struct dm_bio_prison_cell *cell) { int r; dm_block_t data_block; @@ -1363,7 +1368,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio) { int r; dm_block_t block = get_bio_block(tc, bio); - struct cell *cell; + struct dm_bio_prison_cell *cell; struct cell_key key; struct dm_thin_lookup_result lookup_result; @@ -1432,7 +1437,7 @@ static void process_deferred_bios(struct pool *pool) spin_unlock_irqrestore(&pool->lock, flags); while ((bio = bio_list_pop(&bios))) { - struct endio_hook *h = dm_get_mapinfo(bio)->ptr; + struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr; struct thin_c *tc = h->tc; /* @@ -1522,10 +1527,10 @@ static void thin_defer_bio(struct thin_c *tc, struct bio *bio) wake_worker(pool); } -static struct endio_hook *thin_hook_bio(struct thin_c *tc, struct bio *bio) +static struct dm_thin_endio_hook *thin_hook_bio(struct thin_c *tc, struct bio *bio) { struct pool *pool = tc->pool; - struct endio_hook *h = mempool_alloc(pool->endio_hook_pool, GFP_NOIO); + struct dm_thin_endio_hook *h = mempool_alloc(pool->endio_hook_pool, GFP_NOIO); h->tc = tc; h->shared_read_entry = NULL; @@ -1687,6 +1692,9 @@ static void __pool_destroy(struct pool *pool) kfree(pool); } +static struct kmem_cache *_new_mapping_cache; +static struct kmem_cache *_endio_hook_cache; + static struct pool *pool_create(struct mapped_device *pool_md, struct block_device *metadata_dev, unsigned long block_size, char **error) @@ -1755,16 +1763,16 @@ static struct pool *pool_create(struct mapped_device *pool_md, ds_init(&pool->all_io_ds); pool->next_mapping = NULL; - pool->mapping_pool = - mempool_create_kmalloc_pool(MAPPING_POOL_SIZE, sizeof(struct new_mapping)); + pool->mapping_pool = mempool_create_slab_pool(MAPPING_POOL_SIZE, + _new_mapping_cache); if (!pool->mapping_pool) { *error = "Error creating pool's mapping mempool"; err_p = ERR_PTR(-ENOMEM); goto bad_mapping_pool; } - pool->endio_hook_pool = - mempool_create_kmalloc_pool(ENDIO_HOOK_POOL_SIZE, sizeof(struct endio_hook)); + pool->endio_hook_pool = mempool_create_slab_pool(ENDIO_HOOK_POOL_SIZE, + _endio_hook_cache); if (!pool->endio_hook_pool) { *error = "Error creating pool's endio_hook mempool"; err_p = ERR_PTR(-ENOMEM); @@ -2276,6 +2284,36 @@ static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct po return 0; } +static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool) +{ + int r; + + r = check_arg_count(argc, 1); + if (r) + return r; + + r = dm_pool_reserve_metadata_snap(pool->pmd); + if (r) + DMWARN("reserve_metadata_snap message failed."); + + return r; +} + +static int process_release_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool) +{ + int r; + + r = check_arg_count(argc, 1); + if (r) + return r; + + r = dm_pool_release_metadata_snap(pool->pmd); + if (r) + DMWARN("release_metadata_snap message failed."); + + return r; +} + /* * Messages supported: * create_thin <dev_id> @@ -2283,6 +2321,8 @@ static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct po * delete <dev_id> * trim <dev_id> <new_size_in_sectors> * set_transaction_id <current_trans_id> <new_trans_id> + * reserve_metadata_snap + * release_metadata_snap */ static int pool_message(struct dm_target *ti, unsigned argc, char **argv) { @@ -2302,6 +2342,12 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv) else if (!strcasecmp(argv[0], "set_transaction_id")) r = process_set_transaction_id_mesg(argc, argv, pool); + else if (!strcasecmp(argv[0], "reserve_metadata_snap")) + r = process_reserve_metadata_snap_mesg(argc, argv, pool); + + else if (!strcasecmp(argv[0], "release_metadata_snap")) + r = process_release_metadata_snap_mesg(argc, argv, pool); + else DMWARN("Unrecognised thin pool target message received: %s", argv[0]); @@ -2361,7 +2407,7 @@ static int pool_status(struct dm_target *ti, status_type_t type, if (r) return r; - r = dm_pool_get_held_metadata_root(pool->pmd, &held_root); + r = dm_pool_get_metadata_snap(pool->pmd, &held_root); if (r) return r; @@ -2457,7 +2503,7 @@ static struct target_type pool_target = { .name = "thin-pool", .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | DM_TARGET_IMMUTABLE, - .version = {1, 1, 0}, + .version = {1, 2, 0}, .module = THIS_MODULE, .ctr = pool_ctr, .dtr = pool_dtr, @@ -2613,9 +2659,9 @@ static int thin_endio(struct dm_target *ti, union map_info *map_context) { unsigned long flags; - struct endio_hook *h = map_context->ptr; + struct dm_thin_endio_hook *h = map_context->ptr; struct list_head work; - struct new_mapping *m, *tmp; + struct dm_thin_new_mapping *m, *tmp; struct pool *pool = h->tc->pool; if (h->shared_read_entry) { @@ -2755,7 +2801,32 @@ static int __init dm_thin_init(void) r = dm_register_target(&pool_target); if (r) - dm_unregister_target(&thin_target); + goto bad_pool_target; + + r = -ENOMEM; + + _cell_cache = KMEM_CACHE(dm_bio_prison_cell, 0); + if (!_cell_cache) + goto bad_cell_cache; + + _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0); + if (!_new_mapping_cache) + goto bad_new_mapping_cache; + + _endio_hook_cache = KMEM_CACHE(dm_thin_endio_hook, 0); + if (!_endio_hook_cache) + goto bad_endio_hook_cache; + + return 0; + +bad_endio_hook_cache: + kmem_cache_destroy(_new_mapping_cache); +bad_new_mapping_cache: + kmem_cache_destroy(_cell_cache); +bad_cell_cache: + dm_unregister_target(&pool_target); +bad_pool_target: + dm_unregister_target(&thin_target); return r; } @@ -2764,6 +2835,10 @@ static void dm_thin_exit(void) { dm_unregister_target(&thin_target); dm_unregister_target(&pool_target); + + kmem_cache_destroy(_cell_cache); + kmem_cache_destroy(_new_mapping_cache); + kmem_cache_destroy(_endio_hook_cache); } module_init(dm_thin_init); diff --git a/drivers/md/persistent-data/dm-transaction-manager.c b/drivers/md/persistent-data/dm-transaction-manager.c index 6f8d38747d7f..400fe144c0cd 100644 --- a/drivers/md/persistent-data/dm-transaction-manager.c +++ b/drivers/md/persistent-data/dm-transaction-manager.c @@ -249,6 +249,7 @@ int dm_tm_shadow_block(struct dm_transaction_manager *tm, dm_block_t orig, return r; } +EXPORT_SYMBOL_GPL(dm_tm_shadow_block); int dm_tm_read_lock(struct dm_transaction_manager *tm, dm_block_t b, struct dm_block_validator *v, @@ -259,6 +260,7 @@ int dm_tm_read_lock(struct dm_transaction_manager *tm, dm_block_t b, return dm_bm_read_lock(tm->bm, b, v, blk); } +EXPORT_SYMBOL_GPL(dm_tm_read_lock); int dm_tm_unlock(struct dm_transaction_manager *tm, struct dm_block *b) { diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c index 671c8bc14bbc..50e83dc5dc49 100644 --- a/drivers/mfd/db8500-prcmu.c +++ b/drivers/mfd/db8500-prcmu.c @@ -2735,6 +2735,7 @@ static struct regulator_consumer_supply db8500_vape_consumers[] = { REGULATOR_SUPPLY("vcore", "uart2"), REGULATOR_SUPPLY("v-ape", "nmk-ske-keypad.0"), REGULATOR_SUPPLY("v-hsi", "ste_hsi.0"), + REGULATOR_SUPPLY("vddvario", "smsc911x.0"), }; static struct regulator_consumer_supply db8500_vsmps2_consumers[] = { diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig index 5760c1a4b3f6..27143e042af5 100644 --- a/drivers/mtd/Kconfig +++ b/drivers/mtd/Kconfig @@ -128,7 +128,7 @@ config MTD_AFS_PARTS config MTD_OF_PARTS tristate "OpenFirmware partitioning information support" - default Y + default y depends on OF help This provides a partition parsing function which derives diff --git a/drivers/mtd/bcm63xxpart.c b/drivers/mtd/bcm63xxpart.c index 608321ee056e..63d2a64331f7 100644 --- a/drivers/mtd/bcm63xxpart.c +++ b/drivers/mtd/bcm63xxpart.c @@ -4,7 +4,7 @@ * Copyright © 2006-2008 Florian Fainelli <florian@openwrt.org> * Mike Albon <malbon@openwrt.org> * Copyright © 2009-2010 Daniel Dickinson <openwrt@cshore.neomailbox.net> - * Copyright © 2011 Jonas Gorski <jonas.gorski@gmail.com> + * Copyright © 2011-2012 Jonas Gorski <jonas.gorski@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -82,6 +82,7 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master, int namelen = 0; int i; u32 computed_crc; + bool rootfs_first = false; if (bcm63xx_detect_cfe(master)) return -EINVAL; @@ -109,6 +110,7 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master, char *boardid = &(buf->board_id[0]); char *tagversion = &(buf->tag_version[0]); + sscanf(buf->flash_image_start, "%u", &rootfsaddr); sscanf(buf->kernel_address, "%u", &kerneladdr); sscanf(buf->kernel_length, "%u", &kernellen); sscanf(buf->total_length, "%u", &totallen); @@ -117,10 +119,19 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master, tagversion, boardid); kerneladdr = kerneladdr - BCM63XX_EXTENDED_SIZE; - rootfsaddr = kerneladdr + kernellen; + rootfsaddr = rootfsaddr - BCM63XX_EXTENDED_SIZE; spareaddr = roundup(totallen, master->erasesize) + cfelen; sparelen = master->size - spareaddr - nvramlen; - rootfslen = spareaddr - rootfsaddr; + + if (rootfsaddr < kerneladdr) { + /* default Broadcom layout */ + rootfslen = kerneladdr - rootfsaddr; + rootfs_first = true; + } else { + /* OpenWrt layout */ + rootfsaddr = kerneladdr + kernellen; + rootfslen = spareaddr - rootfsaddr; + } } else { pr_warn("CFE boot tag CRC invalid (expected %08x, actual %08x)\n", buf->header_crc, computed_crc); @@ -156,18 +167,26 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master, curpart++; if (kernellen > 0) { - parts[curpart].name = "kernel"; - parts[curpart].offset = kerneladdr; - parts[curpart].size = kernellen; + int kernelpart = curpart; + + if (rootfslen > 0 && rootfs_first) + kernelpart++; + parts[kernelpart].name = "kernel"; + parts[kernelpart].offset = kerneladdr; + parts[kernelpart].size = kernellen; curpart++; } if (rootfslen > 0) { - parts[curpart].name = "rootfs"; - parts[curpart].offset = rootfsaddr; - parts[curpart].size = rootfslen; - if (sparelen > 0) - parts[curpart].size += sparelen; + int rootfspart = curpart; + + if (kernellen > 0 && rootfs_first) + rootfspart--; + parts[rootfspart].name = "rootfs"; + parts[rootfspart].offset = rootfsaddr; + parts[rootfspart].size = rootfslen; + if (sparelen > 0 && !rootfs_first) + parts[rootfspart].size += sparelen; curpart++; } diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c index d02592e6a0f0..22d0493a026f 100644 --- a/drivers/mtd/chips/cfi_cmdset_0002.c +++ b/drivers/mtd/chips/cfi_cmdset_0002.c @@ -317,7 +317,7 @@ static void fixup_s29gl064n_sectors(struct mtd_info *mtd) if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) { cfi->cfiq->EraseRegionInfo[0] |= 0x0040; - pr_warning("%s: Bad S29GL064N CFI data, adjust from 64 to 128 sectors\n", mtd->name); + pr_warning("%s: Bad S29GL064N CFI data; adjust from 64 to 128 sectors\n", mtd->name); } } @@ -328,10 +328,23 @@ static void fixup_s29gl032n_sectors(struct mtd_info *mtd) if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) { cfi->cfiq->EraseRegionInfo[1] &= ~0x0040; - pr_warning("%s: Bad S29GL032N CFI data, adjust from 127 to 63 sectors\n", mtd->name); + pr_warning("%s: Bad S29GL032N CFI data; adjust from 127 to 63 sectors\n", mtd->name); } } +static void fixup_s29ns512p_sectors(struct mtd_info *mtd) +{ + struct map_info *map = mtd->priv; + struct cfi_private *cfi = map->fldrv_priv; + + /* + * S29NS512P flash uses more than 8bits to report number of sectors, + * which is not permitted by CFI. + */ + cfi->cfiq->EraseRegionInfo[0] = 0x020001ff; + pr_warning("%s: Bad S29NS512P CFI data; adjust to 512 sectors\n", mtd->name); +} + /* Used to fix CFI-Tables of chips without Extended Query Tables */ static struct cfi_fixup cfi_nopri_fixup_table[] = { { CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */ @@ -362,6 +375,7 @@ static struct cfi_fixup cfi_fixup_table[] = { { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors }, { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors }, { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors }, + { CFI_MFR_AMD, 0x3f00, fixup_s29ns512p_sectors }, { CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize }, /* SST38VF6402 */ { CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize }, /* SST38VF6401 */ { CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize }, /* SST38VF6404 */ diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c index ddf9ec6d9168..4558e0f4d07f 100644 --- a/drivers/mtd/cmdlinepart.c +++ b/drivers/mtd/cmdlinepart.c @@ -70,7 +70,7 @@ struct cmdline_mtd_partition { /* mtdpart_setup() parses into here */ static struct cmdline_mtd_partition *partitions; -/* the command line passed to mtdpart_setupd() */ +/* the command line passed to mtdpart_setup() */ static char *cmdline; static int cmdline_parsed = 0; diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c index a4a80b742e65..681e2ee0f2d6 100644 --- a/drivers/mtd/devices/block2mtd.c +++ b/drivers/mtd/devices/block2mtd.c @@ -52,8 +52,6 @@ static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len) while (pages) { page = page_read(mapping, index); - if (!page) - return -ENOMEM; if (IS_ERR(page)) return PTR_ERR(page); @@ -112,8 +110,6 @@ static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len, len = len - cpylen; page = page_read(dev->blkdev->bd_inode->i_mapping, index); - if (!page) - return -ENOMEM; if (IS_ERR(page)) return PTR_ERR(page); @@ -148,8 +144,6 @@ static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf, len = len - cpylen; page = page_read(mapping, index); - if (!page) - return -ENOMEM; if (IS_ERR(page)) return PTR_ERR(page); @@ -271,7 +265,6 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size) dev->mtd.flags = MTD_CAP_RAM; dev->mtd._erase = block2mtd_erase; dev->mtd._write = block2mtd_write; - dev->mtd._writev = mtd_writev; dev->mtd._sync = block2mtd_sync; dev->mtd._read = block2mtd_read; dev->mtd.priv = dev; diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c index 50aa90aa7a7f..f70854d728fe 100644 --- a/drivers/mtd/devices/docg3.c +++ b/drivers/mtd/devices/docg3.c @@ -227,7 +227,7 @@ static void doc_read_data_area(struct docg3 *docg3, void *buf, int len, u8 data8, *dst8; doc_dbg("doc_read_data_area(buf=%p, len=%d)\n", buf, len); - cdr = len & 0x3; + cdr = len & 0x1; len4 = len - cdr; if (first) @@ -732,12 +732,24 @@ err: * @len: the number of bytes to be read (must be a multiple of 4) * @buf: the buffer to be filled in (or NULL is forget bytes) * @first: 1 if first time read, DOC_READADDRESS should be set + * @last_odd: 1 if last read ended up on an odd byte + * + * Reads bytes from a prepared page. There is a trickery here : if the last read + * ended up on an odd offset in the 1024 bytes double page, ie. between the 2 + * planes, the first byte must be read apart. If a word (16bit) read was used, + * the read would return the byte of plane 2 as low *and* high endian, which + * will mess the read. * */ static int doc_read_page_getbytes(struct docg3 *docg3, int len, u_char *buf, - int first) + int first, int last_odd) { - doc_read_data_area(docg3, buf, len, first); + if (last_odd && len > 0) { + doc_read_data_area(docg3, buf, 1, first); + doc_read_data_area(docg3, buf ? buf + 1 : buf, len - 1, 0); + } else { + doc_read_data_area(docg3, buf, len, first); + } doc_delay(docg3, 2); return len; } @@ -850,6 +862,7 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t from, u8 *buf = ops->datbuf; size_t len, ooblen, nbdata, nboob; u8 hwecc[DOC_ECC_BCH_SIZE], eccconf1; + int max_bitflips = 0; if (buf) len = ops->len; @@ -876,7 +889,7 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t from, ret = 0; skip = from % DOC_LAYOUT_PAGE_SIZE; mutex_lock(&docg3->cascade->lock); - while (!ret && (len > 0 || ooblen > 0)) { + while (ret >= 0 && (len > 0 || ooblen > 0)) { calc_block_sector(from - skip, &block0, &block1, &page, &ofs, docg3->reliable); nbdata = min_t(size_t, len, DOC_LAYOUT_PAGE_SIZE - skip); @@ -887,20 +900,20 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t from, ret = doc_read_page_ecc_init(docg3, DOC_ECC_BCH_TOTAL_BYTES); if (ret < 0) goto err_in_read; - ret = doc_read_page_getbytes(docg3, skip, NULL, 1); + ret = doc_read_page_getbytes(docg3, skip, NULL, 1, 0); if (ret < skip) goto err_in_read; - ret = doc_read_page_getbytes(docg3, nbdata, buf, 0); + ret = doc_read_page_getbytes(docg3, nbdata, buf, 0, skip % 2); if (ret < nbdata) goto err_in_read; doc_read_page_getbytes(docg3, DOC_LAYOUT_PAGE_SIZE - nbdata - skip, - NULL, 0); - ret = doc_read_page_getbytes(docg3, nboob, oobbuf, 0); + NULL, 0, (skip + nbdata) % 2); + ret = doc_read_page_getbytes(docg3, nboob, oobbuf, 0, 0); if (ret < nboob) goto err_in_read; doc_read_page_getbytes(docg3, DOC_LAYOUT_OOB_SIZE - nboob, - NULL, 0); + NULL, 0, nboob % 2); doc_get_bch_hw_ecc(docg3, hwecc); eccconf1 = doc_register_readb(docg3, DOC_ECCCONF1); @@ -936,7 +949,8 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t from, } if (ret > 0) { mtd->ecc_stats.corrected += ret; - ret = -EUCLEAN; + max_bitflips = max(max_bitflips, ret); + ret = max_bitflips; } } @@ -1004,7 +1018,7 @@ static int doc_reload_bbt(struct docg3 *docg3) DOC_LAYOUT_PAGE_SIZE); if (!ret) doc_read_page_getbytes(docg3, DOC_LAYOUT_PAGE_SIZE, - buf, 1); + buf, 1, 0); buf += DOC_LAYOUT_PAGE_SIZE; } doc_read_page_finish(docg3); @@ -1064,10 +1078,10 @@ static int doc_get_erase_count(struct docg3 *docg3, loff_t from) ret = doc_reset_seq(docg3); if (!ret) ret = doc_read_page_prepare(docg3, block0, block1, page, - ofs + DOC_LAYOUT_WEAR_OFFSET); + ofs + DOC_LAYOUT_WEAR_OFFSET, 0); if (!ret) ret = doc_read_page_getbytes(docg3, DOC_LAYOUT_WEAR_SIZE, - buf, 1); + buf, 1, 0); doc_read_page_finish(docg3); if (ret || (buf[0] != DOC_ERASE_MARK) || (buf[2] != DOC_ERASE_MARK)) diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c index 1924d247c1cb..5d0d68c3fe27 100644 --- a/drivers/mtd/devices/m25p80.c +++ b/drivers/mtd/devices/m25p80.c @@ -639,12 +639,16 @@ static const struct spi_device_id m25p_ids[] = { { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) }, { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) }, + /* Everspin */ + { "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2) }, + /* Intel/Numonyx -- xxxs33b */ { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) }, { "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) }, { "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) }, /* Macronix */ + { "mx25l2005a", INFO(0xc22012, 0, 64 * 1024, 4, SECT_4K) }, { "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) }, { "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16, 0) }, { "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32, SECT_4K) }, @@ -728,6 +732,7 @@ static const struct spi_device_id m25p_ids[] = { { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) }, { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) }, { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) }, + { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) }, /* Catalyst / On Semiconductor -- non-JEDEC */ { "cat25c11", CAT25_INFO( 16, 8, 16, 1) }, diff --git a/drivers/mtd/devices/spear_smi.c b/drivers/mtd/devices/spear_smi.c index 797d43cd3550..67960362681e 100644 --- a/drivers/mtd/devices/spear_smi.c +++ b/drivers/mtd/devices/spear_smi.c @@ -990,9 +990,9 @@ static int __devinit spear_smi_probe(struct platform_device *pdev) goto err_clk; } - ret = clk_enable(dev->clk); + ret = clk_prepare_enable(dev->clk); if (ret) - goto err_clk_enable; + goto err_clk_prepare_enable; ret = request_irq(irq, spear_smi_int_handler, 0, pdev->name, dev); if (ret) { @@ -1020,8 +1020,8 @@ err_bank_setup: free_irq(irq, dev); platform_set_drvdata(pdev, NULL); err_irq: - clk_disable(dev->clk); -err_clk_enable: + clk_disable_unprepare(dev->clk); +err_clk_prepare_enable: clk_put(dev->clk); err_clk: iounmap(dev->io_base); @@ -1074,7 +1074,7 @@ static int __devexit spear_smi_remove(struct platform_device *pdev) irq = platform_get_irq(pdev, 0); free_irq(irq, dev); - clk_disable(dev->clk); + clk_disable_unprepare(dev->clk); clk_put(dev->clk); iounmap(dev->io_base); kfree(dev); @@ -1091,7 +1091,7 @@ int spear_smi_suspend(struct platform_device *pdev, pm_message_t state) struct spear_smi *dev = platform_get_drvdata(pdev); if (dev && dev->clk) - clk_disable(dev->clk); + clk_disable_unprepare(dev->clk); return 0; } @@ -1102,7 +1102,7 @@ int spear_smi_resume(struct platform_device *pdev) int ret = -EPERM; if (dev && dev->clk) - ret = clk_enable(dev->clk); + ret = clk_prepare_enable(dev->clk); if (!ret) spear_smi_hw_init(dev); diff --git a/drivers/mtd/lpddr/qinfo_probe.c b/drivers/mtd/lpddr/qinfo_probe.c index dbfe17baf046..45abed67f1ef 100644 --- a/drivers/mtd/lpddr/qinfo_probe.c +++ b/drivers/mtd/lpddr/qinfo_probe.c @@ -57,7 +57,7 @@ static struct qinfo_query_info qinfo_array[] = { static long lpddr_get_qinforec_pos(struct map_info *map, char *id_str) { - int qinfo_lines = sizeof(qinfo_array)/sizeof(struct qinfo_query_info); + int qinfo_lines = ARRAY_SIZE(qinfo_array); int i; int bankwidth = map_bankwidth(map) * 8; int major, minor; diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig index 8af67cfd671a..5ba2458e799a 100644 --- a/drivers/mtd/maps/Kconfig +++ b/drivers/mtd/maps/Kconfig @@ -224,7 +224,7 @@ config MTD_CK804XROM config MTD_SCB2_FLASH tristate "BIOS flash chip on Intel SCB2 boards" - depends on X86 && MTD_JEDECPROBE + depends on X86 && MTD_JEDECPROBE && PCI help Support for treating the BIOS flash chip on Intel SCB2 boards as an MTD device - with this you can reprogram your BIOS. diff --git a/drivers/mtd/maps/intel_vr_nor.c b/drivers/mtd/maps/intel_vr_nor.c index 92e1f41634c7..93f03175c82d 100644 --- a/drivers/mtd/maps/intel_vr_nor.c +++ b/drivers/mtd/maps/intel_vr_nor.c @@ -260,18 +260,7 @@ static struct pci_driver vr_nor_pci_driver = { .id_table = vr_nor_pci_ids, }; -static int __init vr_nor_mtd_init(void) -{ - return pci_register_driver(&vr_nor_pci_driver); -} - -static void __exit vr_nor_mtd_exit(void) -{ - pci_unregister_driver(&vr_nor_pci_driver); -} - -module_init(vr_nor_mtd_init); -module_exit(vr_nor_mtd_exit); +module_pci_driver(vr_nor_pci_driver); MODULE_AUTHOR("Andy Lowe"); MODULE_DESCRIPTION("MTD map driver for NOR flash on Intel Vermilion Range"); diff --git a/drivers/mtd/maps/pci.c b/drivers/mtd/maps/pci.c index 1d005a3e9b41..f14ce0af763f 100644 --- a/drivers/mtd/maps/pci.c +++ b/drivers/mtd/maps/pci.c @@ -352,18 +352,7 @@ static struct pci_driver mtd_pci_driver = { .id_table = mtd_pci_ids, }; -static int __init mtd_pci_maps_init(void) -{ - return pci_register_driver(&mtd_pci_driver); -} - -static void __exit mtd_pci_maps_exit(void) -{ - pci_unregister_driver(&mtd_pci_driver); -} - -module_init(mtd_pci_maps_init); -module_exit(mtd_pci_maps_exit); +module_pci_driver(mtd_pci_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>"); diff --git a/drivers/mtd/maps/scb2_flash.c b/drivers/mtd/maps/scb2_flash.c index 934a72c80078..9dcbc684abdb 100644 --- a/drivers/mtd/maps/scb2_flash.c +++ b/drivers/mtd/maps/scb2_flash.c @@ -234,20 +234,7 @@ static struct pci_driver scb2_flash_driver = { .remove = __devexit_p(scb2_flash_remove), }; -static int __init -scb2_flash_init(void) -{ - return pci_register_driver(&scb2_flash_driver); -} - -static void __exit -scb2_flash_exit(void) -{ - pci_unregister_driver(&scb2_flash_driver); -} - -module_init(scb2_flash_init); -module_exit(scb2_flash_exit); +module_pci_driver(scb2_flash_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Tim Hockin <thockin@sun.com>"); diff --git a/drivers/mtd/maps/wr_sbc82xx_flash.c b/drivers/mtd/maps/wr_sbc82xx_flash.c index 71b0ba797912..e7534c82f93a 100644 --- a/drivers/mtd/maps/wr_sbc82xx_flash.c +++ b/drivers/mtd/maps/wr_sbc82xx_flash.c @@ -59,7 +59,7 @@ static struct mtd_partition bigflash_parts[] = { } }; -static const char *part_probes[] __initdata = {"cmdlinepart", "RedBoot", NULL}; +static const char *part_probes[] __initconst = {"cmdlinepart", "RedBoot", NULL}; #define init_sbc82xx_one_flash(map, br, or) \ do { \ diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index c837507dfb1c..575730744fdb 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c @@ -250,6 +250,43 @@ static ssize_t mtd_name_show(struct device *dev, } static DEVICE_ATTR(name, S_IRUGO, mtd_name_show, NULL); +static ssize_t mtd_ecc_strength_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct mtd_info *mtd = dev_get_drvdata(dev); + + return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_strength); +} +static DEVICE_ATTR(ecc_strength, S_IRUGO, mtd_ecc_strength_show, NULL); + +static ssize_t mtd_bitflip_threshold_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct mtd_info *mtd = dev_get_drvdata(dev); + + return snprintf(buf, PAGE_SIZE, "%u\n", mtd->bitflip_threshold); +} + +static ssize_t mtd_bitflip_threshold_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct mtd_info *mtd = dev_get_drvdata(dev); + unsigned int bitflip_threshold; + int retval; + + retval = kstrtouint(buf, 0, &bitflip_threshold); + if (retval) + return retval; + + mtd->bitflip_threshold = bitflip_threshold; + return count; +} +static DEVICE_ATTR(bitflip_threshold, S_IRUGO | S_IWUSR, + mtd_bitflip_threshold_show, + mtd_bitflip_threshold_store); + static struct attribute *mtd_attrs[] = { &dev_attr_type.attr, &dev_attr_flags.attr, @@ -260,6 +297,8 @@ static struct attribute *mtd_attrs[] = { &dev_attr_oobsize.attr, &dev_attr_numeraseregions.attr, &dev_attr_name.attr, + &dev_attr_ecc_strength.attr, + &dev_attr_bitflip_threshold.attr, NULL, }; @@ -322,6 +361,10 @@ int add_mtd_device(struct mtd_info *mtd) mtd->index = i; mtd->usecount = 0; + /* default value if not set by driver */ + if (mtd->bitflip_threshold == 0) + mtd->bitflip_threshold = mtd->ecc_strength; + if (is_power_of_2(mtd->erasesize)) mtd->erasesize_shift = ffs(mtd->erasesize) - 1; else @@ -757,12 +800,24 @@ EXPORT_SYMBOL_GPL(mtd_get_unmapped_area); int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) { + int ret_code; *retlen = 0; if (from < 0 || from > mtd->size || len > mtd->size - from) return -EINVAL; if (!len) return 0; - return mtd->_read(mtd, from, len, retlen, buf); + + /* + * In the absence of an error, drivers return a non-negative integer + * representing the maximum number of bitflips that were corrected on + * any one ecc region (if applicable; zero otherwise). + */ + ret_code = mtd->_read(mtd, from, len, retlen, buf); + if (unlikely(ret_code < 0)) + return ret_code; + if (mtd->ecc_strength == 0) + return 0; /* device lacks ecc */ + return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0; } EXPORT_SYMBOL_GPL(mtd_read); diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c index 9651c06de0a9..d518e4db8a0b 100644 --- a/drivers/mtd/mtdpart.c +++ b/drivers/mtd/mtdpart.c @@ -67,12 +67,12 @@ static int part_read(struct mtd_info *mtd, loff_t from, size_t len, stats = part->master->ecc_stats; res = part->master->_read(part->master, from + part->offset, len, retlen, buf); - if (unlikely(res)) { - if (mtd_is_bitflip(res)) - mtd->ecc_stats.corrected += part->master->ecc_stats.corrected - stats.corrected; - if (mtd_is_eccerr(res)) - mtd->ecc_stats.failed += part->master->ecc_stats.failed - stats.failed; - } + if (unlikely(mtd_is_eccerr(res))) + mtd->ecc_stats.failed += + part->master->ecc_stats.failed - stats.failed; + else + mtd->ecc_stats.corrected += + part->master->ecc_stats.corrected - stats.corrected; return res; } @@ -517,6 +517,8 @@ static struct mtd_part *allocate_partition(struct mtd_info *master, slave->mtd.ecclayout = master->ecclayout; slave->mtd.ecc_strength = master->ecc_strength; + slave->mtd.bitflip_threshold = master->bitflip_threshold; + if (master->_block_isbad) { uint64_t offs = 0; diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index 7d17cecad69d..31bb7e5b504a 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig @@ -115,6 +115,46 @@ config MTD_NAND_OMAP2 Support for NAND flash on Texas Instruments OMAP2, OMAP3 and OMAP4 platforms. +config MTD_NAND_OMAP_BCH + depends on MTD_NAND && MTD_NAND_OMAP2 && ARCH_OMAP3 + bool "Enable support for hardware BCH error correction" + default n + select BCH + select BCH_CONST_PARAMS + help + Support for hardware BCH error correction. + +choice + prompt "BCH error correction capability" + depends on MTD_NAND_OMAP_BCH + +config MTD_NAND_OMAP_BCH8 + bool "8 bits / 512 bytes (recommended)" + help + Support correcting up to 8 bitflips per 512-byte block. + This will use 13 bytes of spare area per 512 bytes of page data. + This is the recommended mode, as 4-bit mode does not work + on some OMAP3 revisions, due to a hardware bug. + +config MTD_NAND_OMAP_BCH4 + bool "4 bits / 512 bytes" + help + Support correcting up to 4 bitflips per 512-byte block. + This will use 7 bytes of spare area per 512 bytes of page data. + Note that this mode does not work on some OMAP3 revisions, due to a + hardware bug. Please check your OMAP datasheet before selecting this + mode. + +endchoice + +if MTD_NAND_OMAP_BCH +config BCH_CONST_M + default 13 +config BCH_CONST_T + default 4 if MTD_NAND_OMAP_BCH4 + default 8 if MTD_NAND_OMAP_BCH8 +endif + config MTD_NAND_IDS tristate @@ -440,7 +480,7 @@ config MTD_NAND_NANDSIM config MTD_NAND_GPMI_NAND bool "GPMI NAND Flash Controller driver" - depends on MTD_NAND && (SOC_IMX23 || SOC_IMX28) + depends on MTD_NAND && (SOC_IMX23 || SOC_IMX28 || SOC_IMX6Q) help Enables NAND Flash support for IMX23 or IMX28. The GPMI controller is very powerful, with the help of BCH diff --git a/drivers/mtd/nand/alauda.c b/drivers/mtd/nand/alauda.c index 4f20e1d8bef1..60a0dfdb0808 100644 --- a/drivers/mtd/nand/alauda.c +++ b/drivers/mtd/nand/alauda.c @@ -414,7 +414,7 @@ static int alauda_bounce_read(struct mtd_info *mtd, loff_t from, size_t len, } err = 0; if (corrected) - err = -EUCLEAN; + err = 1; /* return max_bitflips per ecc step */ if (uncorrected) err = -EBADMSG; out: @@ -446,7 +446,7 @@ static int alauda_read(struct mtd_info *mtd, loff_t from, size_t len, } err = 0; if (corrected) - err = -EUCLEAN; + err = 1; /* return max_bitflips per ecc step */ if (uncorrected) err = -EBADMSG; return err; diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c index 2165576a1c67..97ac6712bb19 100644 --- a/drivers/mtd/nand/atmel_nand.c +++ b/drivers/mtd/nand/atmel_nand.c @@ -324,9 +324,10 @@ static int atmel_nand_calculate(struct mtd_info *mtd, * mtd: mtd info structure * chip: nand chip info structure * buf: buffer to store read data + * oob_required: caller expects OOB data read to chip->oob_poi */ -static int atmel_nand_read_page(struct mtd_info *mtd, - struct nand_chip *chip, uint8_t *buf, int page) +static int atmel_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip, + uint8_t *buf, int oob_required, int page) { int eccsize = chip->ecc.size; int eccbytes = chip->ecc.bytes; @@ -335,6 +336,7 @@ static int atmel_nand_read_page(struct mtd_info *mtd, uint8_t *oob = chip->oob_poi; uint8_t *ecc_pos; int stat; + unsigned int max_bitflips = 0; /* * Errata: ALE is incorrectly wired up to the ECC controller @@ -371,10 +373,12 @@ static int atmel_nand_read_page(struct mtd_info *mtd, /* check if there's an error */ stat = chip->ecc.correct(mtd, p, oob, NULL); - if (stat < 0) + if (stat < 0) { mtd->ecc_stats.failed++; - else + } else { mtd->ecc_stats.corrected += stat; + max_bitflips = max_t(unsigned int, max_bitflips, stat); + } /* get back to oob start (end of page) */ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1); @@ -382,7 +386,7 @@ static int atmel_nand_read_page(struct mtd_info *mtd, /* read the oob */ chip->read_buf(mtd, oob, mtd->oobsize); - return 0; + return max_bitflips; } /* diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c index 73abbc3e093e..9f609d2dcf62 100644 --- a/drivers/mtd/nand/au1550nd.c +++ b/drivers/mtd/nand/au1550nd.c @@ -508,8 +508,6 @@ static int __devinit au1550nd_probe(struct platform_device *pdev) this->chip_delay = 30; this->ecc.mode = NAND_ECC_SOFT; - this->options = NAND_NO_AUTOINCR; - if (pd->devwidth) this->options |= NAND_BUSWIDTH_16; diff --git a/drivers/mtd/nand/bcm_umi_bch.c b/drivers/mtd/nand/bcm_umi_bch.c index a930666d0687..5914bb32e001 100644 --- a/drivers/mtd/nand/bcm_umi_bch.c +++ b/drivers/mtd/nand/bcm_umi_bch.c @@ -22,9 +22,9 @@ /* ---- Private Function Prototypes -------------------------------------- */ static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd, - struct nand_chip *chip, uint8_t *buf, int page); + struct nand_chip *chip, uint8_t *buf, int oob_required, int page); static void bcm_umi_bch_write_page_hwecc(struct mtd_info *mtd, - struct nand_chip *chip, const uint8_t *buf); + struct nand_chip *chip, const uint8_t *buf, int oob_required); /* ---- Private Variables ------------------------------------------------ */ @@ -103,11 +103,12 @@ static struct nand_ecclayout nand_hw_eccoob_4096 = { * @mtd: mtd info structure * @chip: nand chip info structure * @buf: buffer to store read data +* @oob_required: caller expects OOB data read to chip->oob_poi * ***************************************************************************/ static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, uint8_t * buf, - int page) + int oob_required, int page) { int sectorIdx = 0; int eccsize = chip->ecc.size; @@ -116,6 +117,7 @@ static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd, uint8_t eccCalc[NAND_ECC_NUM_BYTES]; int sectorOobSize = mtd->oobsize / eccsteps; int stat; + unsigned int max_bitflips = 0; for (sectorIdx = 0; sectorIdx < eccsteps; sectorIdx++, datap += eccsize) { @@ -177,9 +179,10 @@ static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd, } #endif mtd->ecc_stats.corrected += stat; + max_bitflips = max_t(unsigned int, max_bitflips, stat); } } - return 0; + return max_bitflips; } /**************************************************************************** @@ -188,10 +191,11 @@ static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd, * @mtd: mtd info structure * @chip: nand chip info structure * @buf: data buffer +* @oob_required: must write chip->oob_poi to OOB * ***************************************************************************/ static void bcm_umi_bch_write_page_hwecc(struct mtd_info *mtd, - struct nand_chip *chip, const uint8_t *buf) + struct nand_chip *chip, const uint8_t *buf, int oob_required) { int sectorIdx = 0; int eccsize = chip->ecc.size; diff --git a/drivers/mtd/nand/bcm_umi_nand.c b/drivers/mtd/nand/bcm_umi_nand.c index 6908cdde3065..c855e7cd337b 100644 --- a/drivers/mtd/nand/bcm_umi_nand.c +++ b/drivers/mtd/nand/bcm_umi_nand.c @@ -341,7 +341,7 @@ static int bcm_umi_nand_verify_buf(struct mtd_info *mtd, const u_char * buf, * for MLC parts which may have permanently stuck bits. */ struct nand_chip *chip = mtd->priv; - int ret = chip->ecc.read_page(mtd, chip, readbackbuf, 0); + int ret = chip->ecc.read_page(mtd, chip, readbackbuf, 0, 0); if (ret < 0) return -EFAULT; else { @@ -476,12 +476,7 @@ static int __devinit bcm_umi_nand_probe(struct platform_device *pdev) this->badblock_pattern = &largepage_bbt; } - /* - * FIXME: ecc strength value of 6 bits per 512 bytes of data is a - * conservative guess, given 13 ecc bytes and using bch alg. - * (Assume Galois field order m=15 to allow a margin of error.) - */ - this->ecc.strength = 6; + this->ecc.strength = 8; #endif diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c index d7b86b925de5..3f1c18599cbd 100644 --- a/drivers/mtd/nand/bf5xx_nand.c +++ b/drivers/mtd/nand/bf5xx_nand.c @@ -558,7 +558,7 @@ static void bf5xx_nand_dma_write_buf(struct mtd_info *mtd, } static int bf5xx_nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, - uint8_t *buf, int page) + uint8_t *buf, int oob_required, int page) { bf5xx_nand_read_buf(mtd, buf, mtd->writesize); bf5xx_nand_read_buf(mtd, chip->oob_poi, mtd->oobsize); @@ -567,7 +567,7 @@ static int bf5xx_nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip } static void bf5xx_nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, - const uint8_t *buf) + const uint8_t *buf, int oob_required) { bf5xx_nand_write_buf(mtd, buf, mtd->writesize); bf5xx_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize); diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c index 2a96e1a12062..41371ba1a811 100644 --- a/drivers/mtd/nand/cafe_nand.c +++ b/drivers/mtd/nand/cafe_nand.c @@ -364,25 +364,27 @@ static int cafe_nand_write_oob(struct mtd_info *mtd, /* Don't use -- use nand_read_oob_std for now */ static int cafe_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip, - int page, int sndcmd) + int page) { chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page); chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); - return 1; + return 0; } /** * cafe_nand_read_page_syndrome - [REPLACEABLE] hardware ecc syndrome based page read * @mtd: mtd info structure * @chip: nand chip info structure * @buf: buffer to store read data + * @oob_required: caller expects OOB data read to chip->oob_poi * * The hw generator calculates the error syndrome automatically. Therefor * we need a special oob layout and handling. */ static int cafe_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip, - uint8_t *buf, int page) + uint8_t *buf, int oob_required, int page) { struct cafe_priv *cafe = mtd->priv; + unsigned int max_bitflips = 0; cafe_dev_dbg(&cafe->pdev->dev, "ECC result %08x SYN1,2 %08x\n", cafe_readl(cafe, NAND_ECC_RESULT), @@ -449,10 +451,11 @@ static int cafe_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip, } else { dev_dbg(&cafe->pdev->dev, "Corrected %d symbol errors\n", n); mtd->ecc_stats.corrected += n; + max_bitflips = max_t(unsigned int, max_bitflips, n); } } - return 0; + return max_bitflips; } static struct nand_ecclayout cafe_oobinfo_2048 = { @@ -518,7 +521,8 @@ static struct nand_bbt_descr cafe_bbt_mirror_descr_512 = { static void cafe_nand_write_page_lowlevel(struct mtd_info *mtd, - struct nand_chip *chip, const uint8_t *buf) + struct nand_chip *chip, + const uint8_t *buf, int oob_required) { struct cafe_priv *cafe = mtd->priv; @@ -530,16 +534,17 @@ static void cafe_nand_write_page_lowlevel(struct mtd_info *mtd, } static int cafe_nand_write_page(struct mtd_info *mtd, struct nand_chip *chip, - const uint8_t *buf, int page, int cached, int raw) + const uint8_t *buf, int oob_required, int page, + int cached, int raw) { int status; chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page); if (unlikely(raw)) - chip->ecc.write_page_raw(mtd, chip, buf); + chip->ecc.write_page_raw(mtd, chip, buf, oob_required); else - chip->ecc.write_page(mtd, chip, buf); + chip->ecc.write_page(mtd, chip, buf, oob_required); /* * Cached progamming disabled for now, Not sure if its worth the @@ -685,7 +690,7 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev, /* Enable the following for a flash based bad block table */ cafe->nand.bbt_options = NAND_BBT_USE_FLASH; - cafe->nand.options = NAND_NO_AUTOINCR | NAND_OWN_BUFFERS; + cafe->nand.options = NAND_OWN_BUFFERS; if (skipbbt) { cafe->nand.options |= NAND_SKIP_BBTSCAN; @@ -888,17 +893,7 @@ static struct pci_driver cafe_nand_pci_driver = { .resume = cafe_nand_resume, }; -static int __init cafe_nand_init(void) -{ - return pci_register_driver(&cafe_nand_pci_driver); -} - -static void __exit cafe_nand_exit(void) -{ - pci_unregister_driver(&cafe_nand_pci_driver); -} -module_init(cafe_nand_init); -module_exit(cafe_nand_exit); +module_pci_driver(cafe_nand_pci_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); diff --git a/drivers/mtd/nand/cs553x_nand.c b/drivers/mtd/nand/cs553x_nand.c index 821c34c62500..adb6c3ef37fb 100644 --- a/drivers/mtd/nand/cs553x_nand.c +++ b/drivers/mtd/nand/cs553x_nand.c @@ -240,7 +240,6 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr) /* Enable the following for a flash based bad block table */ this->bbt_options = NAND_BBT_USE_FLASH; - this->options = NAND_NO_AUTOINCR; /* Scan to find existence of the device */ if (nand_scan(new_mtd, 1)) { diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c index a9e57d686297..0650aafa0dd2 100644 --- a/drivers/mtd/nand/denali.c +++ b/drivers/mtd/nand/denali.c @@ -924,9 +924,10 @@ bool is_erased(uint8_t *buf, int len) #define ECC_LAST_ERR(x) ((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO) static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf, - uint32_t irq_status) + uint32_t irq_status, unsigned int *max_bitflips) { bool check_erased_page = false; + unsigned int bitflips = 0; if (irq_status & INTR_STATUS__ECC_ERR) { /* read the ECC errors. we'll ignore them for now */ @@ -965,6 +966,7 @@ static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf, /* correct the ECC error */ buf[offset] ^= err_correction_value; denali->mtd.ecc_stats.corrected++; + bitflips++; } } else { /* if the error is not correctable, need to @@ -984,6 +986,7 @@ static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf, clear_interrupts(denali); denali_set_intr_modes(denali, true); } + *max_bitflips = bitflips; return check_erased_page; } @@ -1084,7 +1087,7 @@ static void write_page(struct mtd_info *mtd, struct nand_chip *chip, * by write_page above. * */ static void denali_write_page(struct mtd_info *mtd, struct nand_chip *chip, - const uint8_t *buf) + const uint8_t *buf, int oob_required) { /* for regular page writes, we let HW handle all the ECC * data written to the device. */ @@ -1096,7 +1099,7 @@ static void denali_write_page(struct mtd_info *mtd, struct nand_chip *chip, * write_page() function above. */ static void denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, - const uint8_t *buf) + const uint8_t *buf, int oob_required) { /* for raw page writes, we want to disable ECC and simply write whatever data is in the buffer. */ @@ -1110,17 +1113,17 @@ static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip, } static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip, - int page, int sndcmd) + int page) { read_oob_data(mtd, chip->oob_poi, page); - return 0; /* notify NAND core to send command to - NAND device. */ + return 0; } static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip, - uint8_t *buf, int page) + uint8_t *buf, int oob_required, int page) { + unsigned int max_bitflips; struct denali_nand_info *denali = mtd_to_denali(mtd); dma_addr_t addr = denali->buf.dma_buf; @@ -1153,7 +1156,7 @@ static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip, memcpy(buf, denali->buf.buf, mtd->writesize); - check_erased_page = handle_ecc(denali, buf, irq_status); + check_erased_page = handle_ecc(denali, buf, irq_status, &max_bitflips); denali_enable_dma(denali, false); if (check_erased_page) { @@ -1167,11 +1170,11 @@ static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip, denali->mtd.ecc_stats.failed++; } } - return 0; + return max_bitflips; } static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, - uint8_t *buf, int page) + uint8_t *buf, int oob_required, int page) { struct denali_nand_info *denali = mtd_to_denali(mtd); @@ -1702,17 +1705,4 @@ static struct pci_driver denali_pci_driver = { .remove = denali_pci_remove, }; -static int __devinit denali_init(void) -{ - printk(KERN_INFO "Spectra MTD driver\n"); - return pci_register_driver(&denali_pci_driver); -} - -/* Free memory */ -static void __devexit denali_exit(void) -{ - pci_unregister_driver(&denali_pci_driver); -} - -module_init(denali_init); -module_exit(denali_exit); +module_pci_driver(denali_pci_driver); diff --git a/drivers/mtd/nand/docg4.c b/drivers/mtd/nand/docg4.c index b08202664543..a225e49a5623 100644 --- a/drivers/mtd/nand/docg4.c +++ b/drivers/mtd/nand/docg4.c @@ -720,6 +720,7 @@ static int read_page(struct mtd_info *mtd, struct nand_chip *nand, struct docg4_priv *doc = nand->priv; void __iomem *docptr = doc->virtadr; uint16_t status, edc_err, *buf16; + int bits_corrected = 0; dev_dbg(doc->dev, "%s: page %08x\n", __func__, page); @@ -772,7 +773,7 @@ static int read_page(struct mtd_info *mtd, struct nand_chip *nand, /* If bitflips are reported, attempt to correct with ecc */ if (edc_err & DOC_ECCCONF1_BCH_SYNDROM_ERR) { - int bits_corrected = correct_data(mtd, buf, page); + bits_corrected = correct_data(mtd, buf, page); if (bits_corrected == -EBADMSG) mtd->ecc_stats.failed++; else @@ -781,24 +782,24 @@ static int read_page(struct mtd_info *mtd, struct nand_chip *nand, } writew(0, docptr + DOC_DATAEND); - return 0; + return bits_corrected; } static int docg4_read_page_raw(struct mtd_info *mtd, struct nand_chip *nand, - uint8_t *buf, int page) + uint8_t *buf, int oob_required, int page) { return read_page(mtd, nand, buf, page, false); } static int docg4_read_page(struct mtd_info *mtd, struct nand_chip *nand, - uint8_t *buf, int page) + uint8_t *buf, int oob_required, int page) { return read_page(mtd, nand, buf, page, true); } static int docg4_read_oob(struct mtd_info *mtd, struct nand_chip *nand, - int page, int sndcmd) + int page) { struct docg4_priv *doc = nand->priv; void __iomem *docptr = doc->virtadr; @@ -952,13 +953,13 @@ static void write_page(struct mtd_info *mtd, struct nand_chip *nand, } static void docg4_write_page_raw(struct mtd_info *mtd, struct nand_chip *nand, - const uint8_t *buf) + const uint8_t *buf, int oob_required) { return write_page(mtd, nand, buf, false); } static void docg4_write_page(struct mtd_info *mtd, struct nand_chip *nand, - const uint8_t *buf) + const uint8_t *buf, int oob_required) { return write_page(mtd, nand, buf, true); } @@ -1002,7 +1003,7 @@ static int __init read_factory_bbt(struct mtd_info *mtd) return -ENOMEM; read_page_prologue(mtd, g4_addr); - status = docg4_read_page(mtd, nand, buf, DOCG4_FACTORY_BBT_PAGE); + status = docg4_read_page(mtd, nand, buf, 0, DOCG4_FACTORY_BBT_PAGE); if (status) goto exit; @@ -1079,7 +1080,7 @@ static int docg4_block_markbad(struct mtd_info *mtd, loff_t ofs) /* write first page of block */ write_page_prologue(mtd, g4_addr); - docg4_write_page(mtd, nand, buf); + docg4_write_page(mtd, nand, buf, 1); ret = pageprog(mtd); if (!ret) mtd->ecc_stats.badblocks++; @@ -1192,8 +1193,7 @@ static void __init init_mtd_structs(struct mtd_info *mtd) nand->ecc.prepad = 8; nand->ecc.bytes = 8; nand->ecc.strength = DOCG4_T; - nand->options = - NAND_BUSWIDTH_16 | NAND_NO_SUBPAGE_WRITE | NAND_NO_AUTOINCR; + nand->options = NAND_BUSWIDTH_16 | NAND_NO_SUBPAGE_WRITE; nand->IO_ADDR_R = nand->IO_ADDR_W = doc->virtadr + DOC_IOSPACE_DATA; nand->controller = &nand->hwcontrol; spin_lock_init(&nand->controller->lock); diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c index 80b5264f0a32..784293806110 100644 --- a/drivers/mtd/nand/fsl_elbc_nand.c +++ b/drivers/mtd/nand/fsl_elbc_nand.c @@ -75,6 +75,7 @@ struct fsl_elbc_fcm_ctrl { unsigned int use_mdr; /* Non zero if the MDR is to be set */ unsigned int oob; /* Non zero if operating on OOB data */ unsigned int counter; /* counter for the initializations */ + unsigned int max_bitflips; /* Saved during READ0 cmd */ }; /* These map to the positions used by the FCM hardware ECC generator */ @@ -253,6 +254,8 @@ static int fsl_elbc_run_command(struct mtd_info *mtd) if (chip->ecc.mode != NAND_ECC_HW) return 0; + elbc_fcm_ctrl->max_bitflips = 0; + if (elbc_fcm_ctrl->read_bytes == mtd->writesize + mtd->oobsize) { uint32_t lteccr = in_be32(&lbc->lteccr); /* @@ -262,11 +265,16 @@ static int fsl_elbc_run_command(struct mtd_info *mtd) * bits 28-31 are uncorrectable errors, marked elsewhere. * for small page nand only 1 bit is used. * if the ELBC doesn't have the lteccr register it reads 0 + * FIXME: 4 bits can be corrected on NANDs with 2k pages, so + * count the number of sub-pages with bitflips and update + * ecc_stats.corrected accordingly. */ if (lteccr & 0x000F000F) out_be32(&lbc->lteccr, 0x000F000F); /* clear lteccr */ - if (lteccr & 0x000F0000) + if (lteccr & 0x000F0000) { mtd->ecc_stats.corrected++; + elbc_fcm_ctrl->max_bitflips = 1; + } } return 0; @@ -738,26 +746,28 @@ static int fsl_elbc_chip_init_tail(struct mtd_info *mtd) return 0; } -static int fsl_elbc_read_page(struct mtd_info *mtd, - struct nand_chip *chip, - uint8_t *buf, - int page) +static int fsl_elbc_read_page(struct mtd_info *mtd, struct nand_chip *chip, + uint8_t *buf, int oob_required, int page) { + struct fsl_elbc_mtd *priv = chip->priv; + struct fsl_lbc_ctrl *ctrl = priv->ctrl; + struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand; + fsl_elbc_read_buf(mtd, buf, mtd->writesize); - fsl_elbc_read_buf(mtd, chip->oob_poi, mtd->oobsize); + if (oob_required) + fsl_elbc_read_buf(mtd, chip->oob_poi, mtd->oobsize); if (fsl_elbc_wait(mtd, chip) & NAND_STATUS_FAIL) mtd->ecc_stats.failed++; - return 0; + return elbc_fcm_ctrl->max_bitflips; } /* ECC will be calculated automatically, and errors will be detected in * waitfunc. */ -static void fsl_elbc_write_page(struct mtd_info *mtd, - struct nand_chip *chip, - const uint8_t *buf) +static void fsl_elbc_write_page(struct mtd_info *mtd, struct nand_chip *chip, + const uint8_t *buf, int oob_required) { fsl_elbc_write_buf(mtd, buf, mtd->writesize); fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize); @@ -795,7 +805,7 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv) chip->bbt_md = &bbt_mirror_descr; /* set up nand options */ - chip->options = NAND_NO_READRDY | NAND_NO_AUTOINCR; + chip->options = NAND_NO_READRDY; chip->bbt_options = NAND_BBT_USE_FLASH; chip->controller = &elbc_fcm_ctrl->controller; @@ -814,11 +824,6 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv) chip->ecc.size = 512; chip->ecc.bytes = 3; chip->ecc.strength = 1; - /* - * FIXME: can hardware ecc correct 4 bitflips if page size is - * 2k? Then does hardware report number of corrections for this - * case? If so, ecc_stats reporting needs to be fixed as well. - */ } else { /* otherwise fall back to default software ECC */ chip->ecc.mode = NAND_ECC_SOFT; diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c index c30ac7b83d28..9602c1b7e27e 100644 --- a/drivers/mtd/nand/fsl_ifc_nand.c +++ b/drivers/mtd/nand/fsl_ifc_nand.c @@ -63,6 +63,7 @@ struct fsl_ifc_nand_ctrl { unsigned int oob; /* Non zero if operating on OOB data */ unsigned int eccread; /* Non zero for a full-page ECC read */ unsigned int counter; /* counter for the initializations */ + unsigned int max_bitflips; /* Saved during READ0 cmd */ }; static struct fsl_ifc_nand_ctrl *ifc_nand_ctrl; @@ -262,6 +263,8 @@ static void fsl_ifc_run_command(struct mtd_info *mtd) if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_WPER) dev_err(priv->dev, "NAND Flash Write Protect Error\n"); + nctrl->max_bitflips = 0; + if (nctrl->eccread) { int errors; int bufnum = nctrl->page & priv->bufnum_mask; @@ -290,6 +293,9 @@ static void fsl_ifc_run_command(struct mtd_info *mtd) } mtd->ecc_stats.corrected += errors; + nctrl->max_bitflips = max_t(unsigned int, + nctrl->max_bitflips, + errors); } nctrl->eccread = 0; @@ -375,21 +381,31 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command, return; - /* READID must read all 8 possible bytes */ case NAND_CMD_READID: + case NAND_CMD_PARAM: { + int timing = IFC_FIR_OP_RB; + if (command == NAND_CMD_PARAM) + timing = IFC_FIR_OP_RBCD; + out_be32(&ifc->ifc_nand.nand_fir0, (IFC_FIR_OP_CMD0 << IFC_NAND_FIR0_OP0_SHIFT) | (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) | - (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT)); + (timing << IFC_NAND_FIR0_OP2_SHIFT)); out_be32(&ifc->ifc_nand.nand_fcr0, - NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT); - /* 8 bytes for manuf, device and exts */ - out_be32(&ifc->ifc_nand.nand_fbcr, 8); - ifc_nand_ctrl->read_bytes = 8; + command << IFC_NAND_FCR0_CMD0_SHIFT); + out_be32(&ifc->ifc_nand.row3, column); + + /* + * although currently it's 8 bytes for READID, we always read + * the maximum 256 bytes(for PARAM) + */ + out_be32(&ifc->ifc_nand.nand_fbcr, 256); + ifc_nand_ctrl->read_bytes = 256; set_addr(mtd, 0, 0, 0); fsl_ifc_run_command(mtd); return; + } /* ERASE1 stores the block and page address */ case NAND_CMD_ERASE1: @@ -682,15 +698,16 @@ static int fsl_ifc_wait(struct mtd_info *mtd, struct nand_chip *chip) return nand_fsr | NAND_STATUS_WP; } -static int fsl_ifc_read_page(struct mtd_info *mtd, - struct nand_chip *chip, - uint8_t *buf, int page) +static int fsl_ifc_read_page(struct mtd_info *mtd, struct nand_chip *chip, + uint8_t *buf, int oob_required, int page) { struct fsl_ifc_mtd *priv = chip->priv; struct fsl_ifc_ctrl *ctrl = priv->ctrl; + struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl; fsl_ifc_read_buf(mtd, buf, mtd->writesize); - fsl_ifc_read_buf(mtd, chip->oob_poi, mtd->oobsize); + if (oob_required) + fsl_ifc_read_buf(mtd, chip->oob_poi, mtd->oobsize); if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_ECCER) dev_err(priv->dev, "NAND Flash ECC Uncorrectable Error\n"); @@ -698,15 +715,14 @@ static int fsl_ifc_read_page(struct mtd_info *mtd, if (ctrl->nand_stat != IFC_NAND_EVTER_STAT_OPC) mtd->ecc_stats.failed++; - return 0; + return nctrl->max_bitflips; } /* ECC will be calculated automatically, and errors will be detected in * waitfunc. */ -static void fsl_ifc_write_page(struct mtd_info *mtd, - struct nand_chip *chip, - const uint8_t *buf) +static void fsl_ifc_write_page(struct mtd_info *mtd, struct nand_chip *chip, + const uint8_t *buf, int oob_required) { fsl_ifc_write_buf(mtd, buf, mtd->writesize); fsl_ifc_write_buf(mtd, chip->oob_poi, mtd->oobsize); @@ -789,7 +805,7 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv) out_be32(&ifc->ifc_nand.ncfgr, 0x0); /* set up nand options */ - chip->options = NAND_NO_READRDY | NAND_NO_AUTOINCR; + chip->options = NAND_NO_READRDY; chip->bbt_options = NAND_BBT_USE_FLASH; @@ -811,6 +827,7 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv) /* Hardware generates ECC per 512 Bytes */ chip->ecc.size = 512; chip->ecc.bytes = 8; + chip->ecc.strength = 4; switch (csor & CSOR_NAND_PGS_MASK) { case CSOR_NAND_PGS_512: diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c index 1b8330e1155a..38d26240d8b1 100644 --- a/drivers/mtd/nand/fsmc_nand.c +++ b/drivers/mtd/nand/fsmc_nand.c @@ -692,6 +692,7 @@ static void fsmc_write_buf_dma(struct mtd_info *mtd, const uint8_t *buf, * @mtd: mtd info structure * @chip: nand chip info structure * @buf: buffer to store read data + * @oob_required: caller expects OOB data read to chip->oob_poi * @page: page number to read * * This routine is needed for fsmc version 8 as reading from NAND chip has to be @@ -701,7 +702,7 @@ static void fsmc_write_buf_dma(struct mtd_info *mtd, const uint8_t *buf, * max of 8 bits) */ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, - uint8_t *buf, int page) + uint8_t *buf, int oob_required, int page) { struct fsmc_nand_data *host = container_of(mtd, struct fsmc_nand_data, mtd); @@ -720,6 +721,7 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, */ uint16_t ecc_oob[7]; uint8_t *oob = (uint8_t *)&ecc_oob[0]; + unsigned int max_bitflips = 0; for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, p += eccsize) { chip->cmdfunc(mtd, NAND_CMD_READ0, s * eccsize, page); @@ -748,13 +750,15 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, chip->ecc.calculate(mtd, p, &ecc_calc[i]); stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]); - if (stat < 0) + if (stat < 0) { mtd->ecc_stats.failed++; - else + } else { mtd->ecc_stats.corrected += stat; + max_bitflips = max_t(unsigned int, max_bitflips, stat); + } } - return 0; + return max_bitflips; } /* @@ -994,9 +998,9 @@ static int __init fsmc_nand_probe(struct platform_device *pdev) return PTR_ERR(host->clk); } - ret = clk_enable(host->clk); + ret = clk_prepare_enable(host->clk); if (ret) - goto err_clk_enable; + goto err_clk_prepare_enable; /* * This device ID is actually a common AMBA ID as used on the @@ -1176,8 +1180,8 @@ err_req_write_chnl: if (host->mode == USE_DMA_ACCESS) dma_release_channel(host->read_dma_chan); err_req_read_chnl: - clk_disable(host->clk); -err_clk_enable: + clk_disable_unprepare(host->clk); +err_clk_prepare_enable: clk_put(host->clk); return ret; } @@ -1198,7 +1202,7 @@ static int fsmc_nand_remove(struct platform_device *pdev) dma_release_channel(host->write_dma_chan); dma_release_channel(host->read_dma_chan); } - clk_disable(host->clk); + clk_disable_unprepare(host->clk); clk_put(host->clk); } @@ -1210,7 +1214,7 @@ static int fsmc_nand_suspend(struct device *dev) { struct fsmc_nand_data *host = dev_get_drvdata(dev); if (host) - clk_disable(host->clk); + clk_disable_unprepare(host->clk); return 0; } @@ -1218,7 +1222,7 @@ static int fsmc_nand_resume(struct device *dev) { struct fsmc_nand_data *host = dev_get_drvdata(dev); if (host) { - clk_enable(host->clk); + clk_prepare_enable(host->clk); fsmc_nand_setup(host->regs_va, host->bank, host->nand.options & NAND_BUSWIDTH_16, host->dev_timings); diff --git a/drivers/mtd/nand/gpmi-nand/bch-regs.h b/drivers/mtd/nand/gpmi-nand/bch-regs.h index 4effb8c579db..a0924515c396 100644 --- a/drivers/mtd/nand/gpmi-nand/bch-regs.h +++ b/drivers/mtd/nand/gpmi-nand/bch-regs.h @@ -51,15 +51,26 @@ #define BP_BCH_FLASH0LAYOUT0_ECC0 12 #define BM_BCH_FLASH0LAYOUT0_ECC0 (0xf << BP_BCH_FLASH0LAYOUT0_ECC0) -#define BF_BCH_FLASH0LAYOUT0_ECC0(v) \ - (((v) << BP_BCH_FLASH0LAYOUT0_ECC0) & BM_BCH_FLASH0LAYOUT0_ECC0) +#define MX6Q_BP_BCH_FLASH0LAYOUT0_ECC0 11 +#define MX6Q_BM_BCH_FLASH0LAYOUT0_ECC0 (0x1f << MX6Q_BP_BCH_FLASH0LAYOUT0_ECC0) +#define BF_BCH_FLASH0LAYOUT0_ECC0(v, x) \ + (GPMI_IS_MX6Q(x) \ + ? (((v) << MX6Q_BP_BCH_FLASH0LAYOUT0_ECC0) \ + & MX6Q_BM_BCH_FLASH0LAYOUT0_ECC0) \ + : (((v) << BP_BCH_FLASH0LAYOUT0_ECC0) \ + & BM_BCH_FLASH0LAYOUT0_ECC0) \ + ) #define BP_BCH_FLASH0LAYOUT0_DATA0_SIZE 0 #define BM_BCH_FLASH0LAYOUT0_DATA0_SIZE \ (0xfff << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE) -#define BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(v) \ - (((v) << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE)\ - & BM_BCH_FLASH0LAYOUT0_DATA0_SIZE) +#define MX6Q_BM_BCH_FLASH0LAYOUT0_DATA0_SIZE \ + (0x3ff << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE) +#define BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(v, x) \ + (GPMI_IS_MX6Q(x) \ + ? (((v) >> 2) & MX6Q_BM_BCH_FLASH0LAYOUT0_DATA0_SIZE) \ + : ((v) & BM_BCH_FLASH0LAYOUT0_DATA0_SIZE) \ + ) #define HW_BCH_FLASH0LAYOUT1 0x00000090 @@ -72,13 +83,24 @@ #define BP_BCH_FLASH0LAYOUT1_ECCN 12 #define BM_BCH_FLASH0LAYOUT1_ECCN (0xf << BP_BCH_FLASH0LAYOUT1_ECCN) -#define BF_BCH_FLASH0LAYOUT1_ECCN(v) \ - (((v) << BP_BCH_FLASH0LAYOUT1_ECCN) & BM_BCH_FLASH0LAYOUT1_ECCN) +#define MX6Q_BP_BCH_FLASH0LAYOUT1_ECCN 11 +#define MX6Q_BM_BCH_FLASH0LAYOUT1_ECCN (0x1f << MX6Q_BP_BCH_FLASH0LAYOUT1_ECCN) +#define BF_BCH_FLASH0LAYOUT1_ECCN(v, x) \ + (GPMI_IS_MX6Q(x) \ + ? (((v) << MX6Q_BP_BCH_FLASH0LAYOUT1_ECCN) \ + & MX6Q_BM_BCH_FLASH0LAYOUT1_ECCN) \ + : (((v) << BP_BCH_FLASH0LAYOUT1_ECCN) \ + & BM_BCH_FLASH0LAYOUT1_ECCN) \ + ) #define BP_BCH_FLASH0LAYOUT1_DATAN_SIZE 0 #define BM_BCH_FLASH0LAYOUT1_DATAN_SIZE \ (0xfff << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE) -#define BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(v) \ - (((v) << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE) \ - & BM_BCH_FLASH0LAYOUT1_DATAN_SIZE) +#define MX6Q_BM_BCH_FLASH0LAYOUT1_DATAN_SIZE \ + (0x3ff << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE) +#define BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(v, x) \ + (GPMI_IS_MX6Q(x) \ + ? (((v) >> 2) & MX6Q_BM_BCH_FLASH0LAYOUT1_DATAN_SIZE) \ + : ((v) & BM_BCH_FLASH0LAYOUT1_DATAN_SIZE) \ + ) #endif diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c index e8ea7107932e..a1f43329ad43 100644 --- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c +++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c @@ -21,7 +21,6 @@ #include <linux/mtd/gpmi-nand.h> #include <linux/delay.h> #include <linux/clk.h> -#include <mach/mxs.h> #include "gpmi-nand.h" #include "gpmi-regs.h" @@ -37,6 +36,8 @@ struct timing_threshod timing_default_threshold = { .max_dll_delay_in_ns = 16, }; +#define MXS_SET_ADDR 0x4 +#define MXS_CLR_ADDR 0x8 /* * Clear the bit and poll it cleared. This is usually called with * a reset address and mask being either SFTRST(bit 31) or CLKGATE @@ -47,7 +48,7 @@ static int clear_poll_bit(void __iomem *addr, u32 mask) int timeout = 0x400; /* clear the bit */ - __mxs_clrl(mask, addr); + writel(mask, addr + MXS_CLR_ADDR); /* * SFTRST needs 3 GPMI clocks to settle, the reference manual @@ -92,11 +93,11 @@ static int gpmi_reset_block(void __iomem *reset_addr, bool just_enable) goto error; /* clear CLKGATE */ - __mxs_clrl(MODULE_CLKGATE, reset_addr); + writel(MODULE_CLKGATE, reset_addr + MXS_CLR_ADDR); if (!just_enable) { /* set SFTRST to reset the block */ - __mxs_setl(MODULE_SFTRST, reset_addr); + writel(MODULE_SFTRST, reset_addr + MXS_SET_ADDR); udelay(1); /* poll CLKGATE becoming set */ @@ -223,13 +224,13 @@ int bch_set_geometry(struct gpmi_nand_data *this) /* Configure layout 0. */ writel(BF_BCH_FLASH0LAYOUT0_NBLOCKS(block_count) | BF_BCH_FLASH0LAYOUT0_META_SIZE(metadata_size) - | BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength) - | BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size), + | BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this) + | BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size, this), r->bch_regs + HW_BCH_FLASH0LAYOUT0); writel(BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size) - | BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength) - | BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size), + | BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this) + | BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size, this), r->bch_regs + HW_BCH_FLASH0LAYOUT1); /* Set *all* chip selects to use layout 0. */ @@ -255,11 +256,12 @@ static unsigned int ns_to_cycles(unsigned int time, return max(k, min); } +#define DEF_MIN_PROP_DELAY 5 +#define DEF_MAX_PROP_DELAY 9 /* Apply timing to current hardware conditions. */ static int gpmi_nfc_compute_hardware_timing(struct gpmi_nand_data *this, struct gpmi_nfc_hardware_timing *hw) { - struct gpmi_nand_platform_data *pdata = this->pdata; struct timing_threshod *nfc = &timing_default_threshold; struct nand_chip *nand = &this->nand; struct nand_timing target = this->timing; @@ -276,8 +278,8 @@ static int gpmi_nfc_compute_hardware_timing(struct gpmi_nand_data *this, int ideal_sample_delay_in_ns; unsigned int sample_delay_factor; int tEYE; - unsigned int min_prop_delay_in_ns = pdata->min_prop_delay_in_ns; - unsigned int max_prop_delay_in_ns = pdata->max_prop_delay_in_ns; + unsigned int min_prop_delay_in_ns = DEF_MIN_PROP_DELAY; + unsigned int max_prop_delay_in_ns = DEF_MAX_PROP_DELAY; /* * If there are multiple chips, we need to relax the timings to allow @@ -803,7 +805,8 @@ int gpmi_is_ready(struct gpmi_nand_data *this, unsigned chip) if (GPMI_IS_MX23(this)) { mask = MX23_BM_GPMI_DEBUG_READY0 << chip; reg = readl(r->gpmi_regs + HW_GPMI_DEBUG); - } else if (GPMI_IS_MX28(this)) { + } else if (GPMI_IS_MX28(this) || GPMI_IS_MX6Q(this)) { + /* MX28 shares the same R/B register as MX6Q. */ mask = MX28_BF_GPMI_STAT_READY_BUSY(1 << chip); reg = readl(r->gpmi_regs + HW_GPMI_STAT); } else diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c index b68e04310bd8..a05b7b444d4f 100644 --- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c @@ -25,6 +25,8 @@ #include <linux/mtd/gpmi-nand.h> #include <linux/mtd/partitions.h> #include <linux/pinctrl/consumer.h> +#include <linux/of.h> +#include <linux/of_device.h> #include "gpmi-nand.h" /* add our owner bbt descriptor */ @@ -387,7 +389,7 @@ static void release_bch_irq(struct gpmi_nand_data *this) static bool gpmi_dma_filter(struct dma_chan *chan, void *param) { struct gpmi_nand_data *this = param; - struct resource *r = this->private; + int dma_channel = (int)this->private; if (!mxs_dma_is_apbh(chan)) return false; @@ -399,7 +401,7 @@ static bool gpmi_dma_filter(struct dma_chan *chan, void *param) * for mx28 : MX28_DMA_GPMI0 ~ MX28_DMA_GPMI7 * (These eight channels share the same IRQ!) */ - if (r->start <= chan->chan_id && chan->chan_id <= r->end) { + if (dma_channel == chan->chan_id) { chan->private = &this->dma_data; return true; } @@ -419,57 +421,45 @@ static void release_dma_channels(struct gpmi_nand_data *this) static int __devinit acquire_dma_channels(struct gpmi_nand_data *this) { struct platform_device *pdev = this->pdev; - struct gpmi_nand_platform_data *pdata = this->pdata; - struct resources *res = &this->resources; - struct resource *r, *r_dma; - unsigned int i; + struct resource *r_dma; + struct device_node *dn; + int dma_channel; + unsigned int ret; + struct dma_chan *dma_chan; + dma_cap_mask_t mask; + + /* dma channel, we only use the first one. */ + dn = pdev->dev.of_node; + ret = of_property_read_u32(dn, "fsl,gpmi-dma-channel", &dma_channel); + if (ret) { + pr_err("unable to get DMA channel from dt.\n"); + goto acquire_err; + } + this->private = (void *)dma_channel; - r = platform_get_resource_byname(pdev, IORESOURCE_DMA, - GPMI_NAND_DMA_CHANNELS_RES_NAME); + /* gpmi dma interrupt */ r_dma = platform_get_resource_byname(pdev, IORESOURCE_IRQ, GPMI_NAND_DMA_INTERRUPT_RES_NAME); - if (!r || !r_dma) { + if (!r_dma) { pr_err("Can't get resource for DMA\n"); - return -ENXIO; + goto acquire_err; } + this->dma_data.chan_irq = r_dma->start; - /* used in gpmi_dma_filter() */ - this->private = r; - - for (i = r->start; i <= r->end; i++) { - struct dma_chan *dma_chan; - dma_cap_mask_t mask; + /* request dma channel */ + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); - if (i - r->start >= pdata->max_chip_count) - break; - - dma_cap_zero(mask); - dma_cap_set(DMA_SLAVE, mask); - - /* get the DMA interrupt */ - if (r_dma->start == r_dma->end) { - /* only register the first. */ - if (i == r->start) - this->dma_data.chan_irq = r_dma->start; - else - this->dma_data.chan_irq = NO_IRQ; - } else - this->dma_data.chan_irq = r_dma->start + (i - r->start); - - dma_chan = dma_request_channel(mask, gpmi_dma_filter, this); - if (!dma_chan) - goto acquire_err; - - /* fill the first empty item */ - this->dma_chans[i - r->start] = dma_chan; + dma_chan = dma_request_channel(mask, gpmi_dma_filter, this); + if (!dma_chan) { + pr_err("dma_request_channel failed.\n"); + goto acquire_err; } - res->dma_low_channel = r->start; - res->dma_high_channel = i; + this->dma_chans[0] = dma_chan; return 0; acquire_err: - pr_err("Can't acquire DMA channel %u\n", i); release_dma_channels(this); return -EINVAL; } @@ -851,7 +841,7 @@ static void block_mark_swapping(struct gpmi_nand_data *this, } static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip, - uint8_t *buf, int page) + uint8_t *buf, int oob_required, int page) { struct gpmi_nand_data *this = chip->priv; struct bch_geometry *nfc_geo = &this->bch_geometry; @@ -917,28 +907,31 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip, mtd->ecc_stats.corrected += corrected; } - /* - * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob() for - * details about our policy for delivering the OOB. - * - * We fill the caller's buffer with set bits, and then copy the block - * mark to th caller's buffer. Note that, if block mark swapping was - * necessary, it has already been done, so we can rely on the first - * byte of the auxiliary buffer to contain the block mark. - */ - memset(chip->oob_poi, ~0, mtd->oobsize); - chip->oob_poi[0] = ((uint8_t *) auxiliary_virt)[0]; + if (oob_required) { + /* + * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob() + * for details about our policy for delivering the OOB. + * + * We fill the caller's buffer with set bits, and then copy the + * block mark to th caller's buffer. Note that, if block mark + * swapping was necessary, it has already been done, so we can + * rely on the first byte of the auxiliary buffer to contain + * the block mark. + */ + memset(chip->oob_poi, ~0, mtd->oobsize); + chip->oob_poi[0] = ((uint8_t *) auxiliary_virt)[0]; - read_page_swap_end(this, buf, mtd->writesize, - this->payload_virt, this->payload_phys, - nfc_geo->payload_size, - payload_virt, payload_phys); + read_page_swap_end(this, buf, mtd->writesize, + this->payload_virt, this->payload_phys, + nfc_geo->payload_size, + payload_virt, payload_phys); + } exit_nfc: return ret; } -static void gpmi_ecc_write_page(struct mtd_info *mtd, - struct nand_chip *chip, const uint8_t *buf) +static void gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip, + const uint8_t *buf, int oob_required) { struct gpmi_nand_data *this = chip->priv; struct bch_geometry *nfc_geo = &this->bch_geometry; @@ -1077,7 +1070,7 @@ exit_auxiliary: * this driver. */ static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip, - int page, int sndcmd) + int page) { struct gpmi_nand_data *this = chip->priv; @@ -1100,11 +1093,7 @@ static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip, chip->oob_poi[0] = chip->read_byte(mtd); } - /* - * Return true, indicating that the next call to this function must send - * a command. - */ - return true; + return 0; } static int @@ -1318,7 +1307,7 @@ static int mx23_write_transcription_stamp(struct gpmi_nand_data *this) /* Write the first page of the current stride. */ dev_dbg(dev, "Writing an NCB fingerprint in page 0x%x\n", page); chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page); - chip->ecc.write_page_raw(mtd, chip, buffer); + chip->ecc.write_page_raw(mtd, chip, buffer, 0); chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); /* Wait for the write to finish. */ @@ -1444,6 +1433,10 @@ static int gpmi_pre_bbt_scan(struct gpmi_nand_data *this) if (ret) return ret; + /* Adjust the ECC strength according to the chip. */ + this->nand.ecc.strength = this->bch_geometry.ecc_strength; + this->mtd.ecc_strength = this->bch_geometry.ecc_strength; + /* NAND boot init, depends on the gpmi_set_geometry(). */ return nand_boot_init(this); } @@ -1471,9 +1464,9 @@ void gpmi_nfc_exit(struct gpmi_nand_data *this) static int __devinit gpmi_nfc_init(struct gpmi_nand_data *this) { - struct gpmi_nand_platform_data *pdata = this->pdata; struct mtd_info *mtd = &this->mtd; struct nand_chip *chip = &this->nand; + struct mtd_part_parser_data ppdata = {}; int ret; /* init current chip */ @@ -1502,6 +1495,7 @@ static int __devinit gpmi_nfc_init(struct gpmi_nand_data *this) chip->options |= NAND_NO_SUBPAGE_WRITE; chip->ecc.mode = NAND_ECC_HW; chip->ecc.size = 1; + chip->ecc.strength = 8; chip->ecc.layout = &gpmi_hw_ecclayout; /* Allocate a temporary DMA buffer for reading ID in the nand_scan() */ @@ -1511,14 +1505,14 @@ static int __devinit gpmi_nfc_init(struct gpmi_nand_data *this) if (ret) goto err_out; - ret = nand_scan(mtd, pdata->max_chip_count); + ret = nand_scan(mtd, 1); if (ret) { pr_err("Chip scan failed\n"); goto err_out; } - ret = mtd_device_parse_register(mtd, NULL, NULL, - pdata->partitions, pdata->partition_count); + ppdata.of_node = this->pdev->dev.of_node; + ret = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0); if (ret) goto err_out; return 0; @@ -1528,12 +1522,41 @@ err_out: return ret; } +static const struct platform_device_id gpmi_ids[] = { + { .name = "imx23-gpmi-nand", .driver_data = IS_MX23, }, + { .name = "imx28-gpmi-nand", .driver_data = IS_MX28, }, + { .name = "imx6q-gpmi-nand", .driver_data = IS_MX6Q, }, + {}, +}; + +static const struct of_device_id gpmi_nand_id_table[] = { + { + .compatible = "fsl,imx23-gpmi-nand", + .data = (void *)&gpmi_ids[IS_MX23] + }, { + .compatible = "fsl,imx28-gpmi-nand", + .data = (void *)&gpmi_ids[IS_MX28] + }, { + .compatible = "fsl,imx6q-gpmi-nand", + .data = (void *)&gpmi_ids[IS_MX6Q] + }, {} +}; +MODULE_DEVICE_TABLE(of, gpmi_nand_id_table); + static int __devinit gpmi_nand_probe(struct platform_device *pdev) { - struct gpmi_nand_platform_data *pdata = pdev->dev.platform_data; struct gpmi_nand_data *this; + const struct of_device_id *of_id; int ret; + of_id = of_match_device(gpmi_nand_id_table, &pdev->dev); + if (of_id) { + pdev->id_entry = of_id->data; + } else { + pr_err("Failed to find the right device id.\n"); + return -ENOMEM; + } + this = kzalloc(sizeof(*this), GFP_KERNEL); if (!this) { pr_err("Failed to allocate per-device memory\n"); @@ -1543,13 +1566,6 @@ static int __devinit gpmi_nand_probe(struct platform_device *pdev) platform_set_drvdata(pdev, this); this->pdev = pdev; this->dev = &pdev->dev; - this->pdata = pdata; - - if (pdata->platform_init) { - ret = pdata->platform_init(); - if (ret) - goto platform_init_error; - } ret = acquire_resources(this); if (ret) @@ -1567,7 +1583,6 @@ static int __devinit gpmi_nand_probe(struct platform_device *pdev) exit_nfc_init: release_resources(this); -platform_init_error: exit_acquire_resources: platform_set_drvdata(pdev, NULL); kfree(this); @@ -1585,19 +1600,10 @@ static int __exit gpmi_nand_remove(struct platform_device *pdev) return 0; } -static const struct platform_device_id gpmi_ids[] = { - { - .name = "imx23-gpmi-nand", - .driver_data = IS_MX23, - }, { - .name = "imx28-gpmi-nand", - .driver_data = IS_MX28, - }, {}, -}; - static struct platform_driver gpmi_nand_driver = { .driver = { .name = "gpmi-nand", + .of_match_table = gpmi_nand_id_table, }, .probe = gpmi_nand_probe, .remove = __exit_p(gpmi_nand_remove), diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h index ec6180d4ff8f..ce5daa160920 100644 --- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h +++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h @@ -266,8 +266,10 @@ extern int gpmi_read_page(struct gpmi_nand_data *, #define STATUS_UNCORRECTABLE 0xfe /* Use the platform_id to distinguish different Archs. */ -#define IS_MX23 0x1 -#define IS_MX28 0x2 +#define IS_MX23 0x0 +#define IS_MX28 0x1 +#define IS_MX6Q 0x2 #define GPMI_IS_MX23(x) ((x)->pdev->id_entry->driver_data == IS_MX23) #define GPMI_IS_MX28(x) ((x)->pdev->id_entry->driver_data == IS_MX28) +#define GPMI_IS_MX6Q(x) ((x)->pdev->id_entry->driver_data == IS_MX6Q) #endif diff --git a/drivers/mtd/nand/h1910.c b/drivers/mtd/nand/h1910.c index 9bf5ce5fa22d..50166e93ba96 100644 --- a/drivers/mtd/nand/h1910.c +++ b/drivers/mtd/nand/h1910.c @@ -124,7 +124,6 @@ static int __init h1910_init(void) /* 15 us command delay time */ this->chip_delay = 50; this->ecc.mode = NAND_ECC_SOFT; - this->options = NAND_NO_AUTOINCR; /* Scan to find existence of the device */ if (nand_scan(h1910_nand_mtd, 1)) { diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c index e4147e8acb7c..a6fa884ae49b 100644 --- a/drivers/mtd/nand/jz4740_nand.c +++ b/drivers/mtd/nand/jz4740_nand.c @@ -332,11 +332,7 @@ static int __devinit jz_nand_probe(struct platform_device *pdev) chip->ecc.mode = NAND_ECC_HW_OOB_FIRST; chip->ecc.size = 512; chip->ecc.bytes = 9; - chip->ecc.strength = 2; - /* - * FIXME: ecc_strength value of 2 bits per 512 bytes of data is a - * conservative guess, given 9 ecc bytes and reed-solomon alg. - */ + chip->ecc.strength = 4; if (pdata) chip->ecc.layout = pdata->ecc_layout; diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c index c240cf1af961..c259c24d7986 100644 --- a/drivers/mtd/nand/mpc5121_nfc.c +++ b/drivers/mtd/nand/mpc5121_nfc.c @@ -734,7 +734,6 @@ static int __devinit mpc5121_nfc_probe(struct platform_device *op) chip->write_buf = mpc5121_nfc_write_buf; chip->verify_buf = mpc5121_nfc_verify_buf; chip->select_chip = mpc5121_nfc_select_chip; - chip->options = NAND_NO_AUTOINCR; chip->bbt_options = NAND_BBT_USE_FLASH; chip->ecc.mode = NAND_ECC_SOFT; diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c index 9e374e9bd296..c58e6a93f445 100644 --- a/drivers/mtd/nand/mxc_nand.c +++ b/drivers/mtd/nand/mxc_nand.c @@ -32,6 +32,8 @@ #include <linux/io.h> #include <linux/irq.h> #include <linux/completion.h> +#include <linux/of_device.h> +#include <linux/of_mtd.h> #include <asm/mach/flash.h> #include <mach/mxc_nand.h> @@ -140,13 +142,47 @@ #define NFC_V3_DELAY_LINE (host->regs_ip + 0x34) +struct mxc_nand_host; + +struct mxc_nand_devtype_data { + void (*preset)(struct mtd_info *); + void (*send_cmd)(struct mxc_nand_host *, uint16_t, int); + void (*send_addr)(struct mxc_nand_host *, uint16_t, int); + void (*send_page)(struct mtd_info *, unsigned int); + void (*send_read_id)(struct mxc_nand_host *); + uint16_t (*get_dev_status)(struct mxc_nand_host *); + int (*check_int)(struct mxc_nand_host *); + void (*irq_control)(struct mxc_nand_host *, int); + u32 (*get_ecc_status)(struct mxc_nand_host *); + struct nand_ecclayout *ecclayout_512, *ecclayout_2k, *ecclayout_4k; + void (*select_chip)(struct mtd_info *mtd, int chip); + int (*correct_data)(struct mtd_info *mtd, u_char *dat, + u_char *read_ecc, u_char *calc_ecc); + + /* + * On i.MX21 the CONFIG2:INT bit cannot be read if interrupts are masked + * (CONFIG1:INT_MSK is set). To handle this the driver uses + * enable_irq/disable_irq_nosync instead of CONFIG1:INT_MSK + */ + int irqpending_quirk; + int needs_ip; + + size_t regs_offset; + size_t spare0_offset; + size_t axi_offset; + + int spare_len; + int eccbytes; + int eccsize; +}; + struct mxc_nand_host { struct mtd_info mtd; struct nand_chip nand; struct device *dev; - void *spare0; - void *main_area0; + void __iomem *spare0; + void __iomem *main_area0; void __iomem *base; void __iomem *regs; @@ -163,16 +199,9 @@ struct mxc_nand_host { uint8_t *data_buf; unsigned int buf_start; - int spare_len; - - void (*preset)(struct mtd_info *); - void (*send_cmd)(struct mxc_nand_host *, uint16_t, int); - void (*send_addr)(struct mxc_nand_host *, uint16_t, int); - void (*send_page)(struct mtd_info *, unsigned int); - void (*send_read_id)(struct mxc_nand_host *); - uint16_t (*get_dev_status)(struct mxc_nand_host *); - int (*check_int)(struct mxc_nand_host *); - void (*irq_control)(struct mxc_nand_host *, int); + + const struct mxc_nand_devtype_data *devtype_data; + struct mxc_nand_platform_data pdata; }; /* OOB placement block for use with hardware ecc generation */ @@ -242,21 +271,7 @@ static struct nand_ecclayout nandv2_hw_eccoob_4k = { } }; -static const char *part_probes[] = { "RedBoot", "cmdlinepart", NULL }; - -static irqreturn_t mxc_nfc_irq(int irq, void *dev_id) -{ - struct mxc_nand_host *host = dev_id; - - if (!host->check_int(host)) - return IRQ_NONE; - - host->irq_control(host, 0); - - complete(&host->op_completion); - - return IRQ_HANDLED; -} +static const char *part_probes[] = { "RedBoot", "cmdlinepart", "ofpart", NULL }; static int check_int_v3(struct mxc_nand_host *host) { @@ -280,26 +295,12 @@ static int check_int_v1_v2(struct mxc_nand_host *host) if (!(tmp & NFC_V1_V2_CONFIG2_INT)) return 0; - if (!cpu_is_mx21()) + if (!host->devtype_data->irqpending_quirk) writew(tmp & ~NFC_V1_V2_CONFIG2_INT, NFC_V1_V2_CONFIG2); return 1; } -/* - * It has been observed that the i.MX21 cannot read the CONFIG2:INT bit - * if interrupts are masked (CONFIG1:INT_MSK is set). To handle this, the - * driver can enable/disable the irq line rather than simply masking the - * interrupts. - */ -static void irq_control_mx21(struct mxc_nand_host *host, int activate) -{ - if (activate) - enable_irq(host->irq); - else - disable_irq_nosync(host->irq); -} - static void irq_control_v1_v2(struct mxc_nand_host *host, int activate) { uint16_t tmp; @@ -328,6 +329,47 @@ static void irq_control_v3(struct mxc_nand_host *host, int activate) writel(tmp, NFC_V3_CONFIG2); } +static void irq_control(struct mxc_nand_host *host, int activate) +{ + if (host->devtype_data->irqpending_quirk) { + if (activate) + enable_irq(host->irq); + else + disable_irq_nosync(host->irq); + } else { + host->devtype_data->irq_control(host, activate); + } +} + +static u32 get_ecc_status_v1(struct mxc_nand_host *host) +{ + return readw(NFC_V1_V2_ECC_STATUS_RESULT); +} + +static u32 get_ecc_status_v2(struct mxc_nand_host *host) +{ + return readl(NFC_V1_V2_ECC_STATUS_RESULT); +} + +static u32 get_ecc_status_v3(struct mxc_nand_host *host) +{ + return readl(NFC_V3_ECC_STATUS_RESULT); +} + +static irqreturn_t mxc_nfc_irq(int irq, void *dev_id) +{ + struct mxc_nand_host *host = dev_id; + + if (!host->devtype_data->check_int(host)) + return IRQ_NONE; + + irq_control(host, 0); + + complete(&host->op_completion); + + return IRQ_HANDLED; +} + /* This function polls the NANDFC to wait for the basic operation to * complete by checking the INT bit of config2 register. */ @@ -336,14 +378,14 @@ static void wait_op_done(struct mxc_nand_host *host, int useirq) int max_retries = 8000; if (useirq) { - if (!host->check_int(host)) { + if (!host->devtype_data->check_int(host)) { INIT_COMPLETION(host->op_completion); - host->irq_control(host, 1); + irq_control(host, 1); wait_for_completion(&host->op_completion); } } else { while (max_retries-- > 0) { - if (host->check_int(host)) + if (host->devtype_data->check_int(host)) break; udelay(1); @@ -374,7 +416,7 @@ static void send_cmd_v1_v2(struct mxc_nand_host *host, uint16_t cmd, int useirq) writew(cmd, NFC_V1_V2_FLASH_CMD); writew(NFC_CMD, NFC_V1_V2_CONFIG2); - if (cpu_is_mx21() && (cmd == NAND_CMD_RESET)) { + if (host->devtype_data->irqpending_quirk && (cmd == NAND_CMD_RESET)) { int max_retries = 100; /* Reset completion is indicated by NFC_CONFIG2 */ /* being set to 0 */ @@ -433,13 +475,27 @@ static void send_page_v3(struct mtd_info *mtd, unsigned int ops) wait_op_done(host, false); } -static void send_page_v1_v2(struct mtd_info *mtd, unsigned int ops) +static void send_page_v2(struct mtd_info *mtd, unsigned int ops) +{ + struct nand_chip *nand_chip = mtd->priv; + struct mxc_nand_host *host = nand_chip->priv; + + /* NANDFC buffer 0 is used for page read/write */ + writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR); + + writew(ops, NFC_V1_V2_CONFIG2); + + /* Wait for operation to complete */ + wait_op_done(host, true); +} + +static void send_page_v1(struct mtd_info *mtd, unsigned int ops) { struct nand_chip *nand_chip = mtd->priv; struct mxc_nand_host *host = nand_chip->priv; int bufs, i; - if (nfc_is_v1() && mtd->writesize > 512) + if (mtd->writesize > 512) bufs = 4; else bufs = 1; @@ -463,7 +519,7 @@ static void send_read_id_v3(struct mxc_nand_host *host) wait_op_done(host, true); - memcpy(host->data_buf, host->main_area0, 16); + memcpy_fromio(host->data_buf, host->main_area0, 16); } /* Request the NANDFC to perform a read of the NAND device ID. */ @@ -479,7 +535,7 @@ static void send_read_id_v1_v2(struct mxc_nand_host *host) /* Wait for operation to complete */ wait_op_done(host, true); - memcpy(host->data_buf, host->main_area0, 16); + memcpy_fromio(host->data_buf, host->main_area0, 16); if (this->options & NAND_BUSWIDTH_16) { /* compress the ID info */ @@ -555,7 +611,7 @@ static int mxc_nand_correct_data_v1(struct mtd_info *mtd, u_char *dat, * additional correction. 2-Bit errors cannot be corrected by * HW ECC, so we need to return failure */ - uint16_t ecc_status = readw(NFC_V1_V2_ECC_STATUS_RESULT); + uint16_t ecc_status = get_ecc_status_v1(host); if (((ecc_status & 0x3) == 2) || ((ecc_status >> 2) == 2)) { pr_debug("MXC_NAND: HWECC uncorrectable 2-bit ECC error\n"); @@ -580,10 +636,7 @@ static int mxc_nand_correct_data_v2_v3(struct mtd_info *mtd, u_char *dat, no_subpages = mtd->writesize >> 9; - if (nfc_is_v21()) - ecc_stat = readl(NFC_V1_V2_ECC_STATUS_RESULT); - else - ecc_stat = readl(NFC_V3_ECC_STATUS_RESULT); + ecc_stat = host->devtype_data->get_ecc_status(host); do { err = ecc_stat & ecc_bit_mask; @@ -616,7 +669,7 @@ static u_char mxc_nand_read_byte(struct mtd_info *mtd) /* Check for status request */ if (host->status_request) - return host->get_dev_status(host) & 0xFF; + return host->devtype_data->get_dev_status(host) & 0xFF; ret = *(uint8_t *)(host->data_buf + host->buf_start); host->buf_start++; @@ -682,7 +735,7 @@ static int mxc_nand_verify_buf(struct mtd_info *mtd, /* This function is used by upper layer for select and * deselect of the NAND chip */ -static void mxc_nand_select_chip(struct mtd_info *mtd, int chip) +static void mxc_nand_select_chip_v1_v3(struct mtd_info *mtd, int chip) { struct nand_chip *nand_chip = mtd->priv; struct mxc_nand_host *host = nand_chip->priv; @@ -701,11 +754,30 @@ static void mxc_nand_select_chip(struct mtd_info *mtd, int chip) clk_prepare_enable(host->clk); host->clk_act = 1; } +} - if (nfc_is_v21()) { - host->active_cs = chip; - writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR); +static void mxc_nand_select_chip_v2(struct mtd_info *mtd, int chip) +{ + struct nand_chip *nand_chip = mtd->priv; + struct mxc_nand_host *host = nand_chip->priv; + + if (chip == -1) { + /* Disable the NFC clock */ + if (host->clk_act) { + clk_disable(host->clk); + host->clk_act = 0; + } + return; + } + + if (!host->clk_act) { + /* Enable the NFC clock */ + clk_enable(host->clk); + host->clk_act = 1; } + + host->active_cs = chip; + writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR); } /* @@ -718,23 +790,23 @@ static void copy_spare(struct mtd_info *mtd, bool bfrom) u16 i, j; u16 n = mtd->writesize >> 9; u8 *d = host->data_buf + mtd->writesize; - u8 *s = host->spare0; - u16 t = host->spare_len; + u8 __iomem *s = host->spare0; + u16 t = host->devtype_data->spare_len; j = (mtd->oobsize / n >> 1) << 1; if (bfrom) { for (i = 0; i < n - 1; i++) - memcpy(d + i * j, s + i * t, j); + memcpy_fromio(d + i * j, s + i * t, j); /* the last section */ - memcpy(d + i * j, s + i * t, mtd->oobsize - i * j); + memcpy_fromio(d + i * j, s + i * t, mtd->oobsize - i * j); } else { for (i = 0; i < n - 1; i++) - memcpy(&s[i * t], &d[i * j], j); + memcpy_toio(&s[i * t], &d[i * j], j); /* the last section */ - memcpy(&s[i * t], &d[i * j], mtd->oobsize - i * j); + memcpy_toio(&s[i * t], &d[i * j], mtd->oobsize - i * j); } } @@ -751,34 +823,44 @@ static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr) * perform a read/write buf operation, the saved column * address is used to index into the full page. */ - host->send_addr(host, 0, page_addr == -1); + host->devtype_data->send_addr(host, 0, page_addr == -1); if (mtd->writesize > 512) /* another col addr cycle for 2k page */ - host->send_addr(host, 0, false); + host->devtype_data->send_addr(host, 0, false); } /* Write out page address, if necessary */ if (page_addr != -1) { /* paddr_0 - p_addr_7 */ - host->send_addr(host, (page_addr & 0xff), false); + host->devtype_data->send_addr(host, (page_addr & 0xff), false); if (mtd->writesize > 512) { if (mtd->size >= 0x10000000) { /* paddr_8 - paddr_15 */ - host->send_addr(host, (page_addr >> 8) & 0xff, false); - host->send_addr(host, (page_addr >> 16) & 0xff, true); + host->devtype_data->send_addr(host, + (page_addr >> 8) & 0xff, + false); + host->devtype_data->send_addr(host, + (page_addr >> 16) & 0xff, + true); } else /* paddr_8 - paddr_15 */ - host->send_addr(host, (page_addr >> 8) & 0xff, true); + host->devtype_data->send_addr(host, + (page_addr >> 8) & 0xff, true); } else { /* One more address cycle for higher density devices */ if (mtd->size >= 0x4000000) { /* paddr_8 - paddr_15 */ - host->send_addr(host, (page_addr >> 8) & 0xff, false); - host->send_addr(host, (page_addr >> 16) & 0xff, true); + host->devtype_data->send_addr(host, + (page_addr >> 8) & 0xff, + false); + host->devtype_data->send_addr(host, + (page_addr >> 16) & 0xff, + true); } else /* paddr_8 - paddr_15 */ - host->send_addr(host, (page_addr >> 8) & 0xff, true); + host->devtype_data->send_addr(host, + (page_addr >> 8) & 0xff, true); } } } @@ -800,7 +882,35 @@ static int get_eccsize(struct mtd_info *mtd) return 8; } -static void preset_v1_v2(struct mtd_info *mtd) +static void preset_v1(struct mtd_info *mtd) +{ + struct nand_chip *nand_chip = mtd->priv; + struct mxc_nand_host *host = nand_chip->priv; + uint16_t config1 = 0; + + if (nand_chip->ecc.mode == NAND_ECC_HW) + config1 |= NFC_V1_V2_CONFIG1_ECC_EN; + + if (!host->devtype_data->irqpending_quirk) + config1 |= NFC_V1_V2_CONFIG1_INT_MSK; + + host->eccsize = 1; + + writew(config1, NFC_V1_V2_CONFIG1); + /* preset operation */ + + /* Unlock the internal RAM Buffer */ + writew(0x2, NFC_V1_V2_CONFIG); + + /* Blocks to be unlocked */ + writew(0x0, NFC_V1_UNLOCKSTART_BLKADDR); + writew(0xffff, NFC_V1_UNLOCKEND_BLKADDR); + + /* Unlock Block Command for given address range */ + writew(0x4, NFC_V1_V2_WRPROT); +} + +static void preset_v2(struct mtd_info *mtd) { struct nand_chip *nand_chip = mtd->priv; struct mxc_nand_host *host = nand_chip->priv; @@ -809,13 +919,12 @@ static void preset_v1_v2(struct mtd_info *mtd) if (nand_chip->ecc.mode == NAND_ECC_HW) config1 |= NFC_V1_V2_CONFIG1_ECC_EN; - if (nfc_is_v21()) - config1 |= NFC_V2_CONFIG1_FP_INT; + config1 |= NFC_V2_CONFIG1_FP_INT; - if (!cpu_is_mx21()) + if (!host->devtype_data->irqpending_quirk) config1 |= NFC_V1_V2_CONFIG1_INT_MSK; - if (nfc_is_v21() && mtd->writesize) { + if (mtd->writesize) { uint16_t pages_per_block = mtd->erasesize / mtd->writesize; host->eccsize = get_eccsize(mtd); @@ -834,20 +943,14 @@ static void preset_v1_v2(struct mtd_info *mtd) writew(0x2, NFC_V1_V2_CONFIG); /* Blocks to be unlocked */ - if (nfc_is_v21()) { - writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR0); - writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR1); - writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR2); - writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR3); - writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR0); - writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR1); - writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR2); - writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR3); - } else if (nfc_is_v1()) { - writew(0x0, NFC_V1_UNLOCKSTART_BLKADDR); - writew(0xffff, NFC_V1_UNLOCKEND_BLKADDR); - } else - BUG(); + writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR0); + writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR1); + writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR2); + writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR3); + writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR0); + writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR1); + writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR2); + writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR3); /* Unlock Block Command for given address range */ writew(0x4, NFC_V1_V2_WRPROT); @@ -937,15 +1040,15 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command, /* Command pre-processing step */ switch (command) { case NAND_CMD_RESET: - host->preset(mtd); - host->send_cmd(host, command, false); + host->devtype_data->preset(mtd); + host->devtype_data->send_cmd(host, command, false); break; case NAND_CMD_STATUS: host->buf_start = 0; host->status_request = true; - host->send_cmd(host, command, true); + host->devtype_data->send_cmd(host, command, true); mxc_do_addr_cycle(mtd, column, page_addr); break; @@ -958,15 +1061,16 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command, command = NAND_CMD_READ0; /* only READ0 is valid */ - host->send_cmd(host, command, false); + host->devtype_data->send_cmd(host, command, false); mxc_do_addr_cycle(mtd, column, page_addr); if (mtd->writesize > 512) - host->send_cmd(host, NAND_CMD_READSTART, true); + host->devtype_data->send_cmd(host, + NAND_CMD_READSTART, true); - host->send_page(mtd, NFC_OUTPUT); + host->devtype_data->send_page(mtd, NFC_OUTPUT); - memcpy(host->data_buf, host->main_area0, mtd->writesize); + memcpy_fromio(host->data_buf, host->main_area0, mtd->writesize); copy_spare(mtd, true); break; @@ -977,28 +1081,28 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command, host->buf_start = column; - host->send_cmd(host, command, false); + host->devtype_data->send_cmd(host, command, false); mxc_do_addr_cycle(mtd, column, page_addr); break; case NAND_CMD_PAGEPROG: - memcpy(host->main_area0, host->data_buf, mtd->writesize); + memcpy_toio(host->main_area0, host->data_buf, mtd->writesize); copy_spare(mtd, false); - host->send_page(mtd, NFC_INPUT); - host->send_cmd(host, command, true); + host->devtype_data->send_page(mtd, NFC_INPUT); + host->devtype_data->send_cmd(host, command, true); mxc_do_addr_cycle(mtd, column, page_addr); break; case NAND_CMD_READID: - host->send_cmd(host, command, true); + host->devtype_data->send_cmd(host, command, true); mxc_do_addr_cycle(mtd, column, page_addr); - host->send_read_id(host); + host->devtype_data->send_read_id(host); host->buf_start = column; break; case NAND_CMD_ERASE1: case NAND_CMD_ERASE2: - host->send_cmd(host, command, false); + host->devtype_data->send_cmd(host, command, false); mxc_do_addr_cycle(mtd, column, page_addr); break; @@ -1032,15 +1136,191 @@ static struct nand_bbt_descr bbt_mirror_descr = { .pattern = mirror_pattern, }; +/* v1 + irqpending_quirk: i.MX21 */ +static const struct mxc_nand_devtype_data imx21_nand_devtype_data = { + .preset = preset_v1, + .send_cmd = send_cmd_v1_v2, + .send_addr = send_addr_v1_v2, + .send_page = send_page_v1, + .send_read_id = send_read_id_v1_v2, + .get_dev_status = get_dev_status_v1_v2, + .check_int = check_int_v1_v2, + .irq_control = irq_control_v1_v2, + .get_ecc_status = get_ecc_status_v1, + .ecclayout_512 = &nandv1_hw_eccoob_smallpage, + .ecclayout_2k = &nandv1_hw_eccoob_largepage, + .ecclayout_4k = &nandv1_hw_eccoob_smallpage, /* XXX: needs fix */ + .select_chip = mxc_nand_select_chip_v1_v3, + .correct_data = mxc_nand_correct_data_v1, + .irqpending_quirk = 1, + .needs_ip = 0, + .regs_offset = 0xe00, + .spare0_offset = 0x800, + .spare_len = 16, + .eccbytes = 3, + .eccsize = 1, +}; + +/* v1 + !irqpending_quirk: i.MX27, i.MX31 */ +static const struct mxc_nand_devtype_data imx27_nand_devtype_data = { + .preset = preset_v1, + .send_cmd = send_cmd_v1_v2, + .send_addr = send_addr_v1_v2, + .send_page = send_page_v1, + .send_read_id = send_read_id_v1_v2, + .get_dev_status = get_dev_status_v1_v2, + .check_int = check_int_v1_v2, + .irq_control = irq_control_v1_v2, + .get_ecc_status = get_ecc_status_v1, + .ecclayout_512 = &nandv1_hw_eccoob_smallpage, + .ecclayout_2k = &nandv1_hw_eccoob_largepage, + .ecclayout_4k = &nandv1_hw_eccoob_smallpage, /* XXX: needs fix */ + .select_chip = mxc_nand_select_chip_v1_v3, + .correct_data = mxc_nand_correct_data_v1, + .irqpending_quirk = 0, + .needs_ip = 0, + .regs_offset = 0xe00, + .spare0_offset = 0x800, + .axi_offset = 0, + .spare_len = 16, + .eccbytes = 3, + .eccsize = 1, +}; + +/* v21: i.MX25, i.MX35 */ +static const struct mxc_nand_devtype_data imx25_nand_devtype_data = { + .preset = preset_v2, + .send_cmd = send_cmd_v1_v2, + .send_addr = send_addr_v1_v2, + .send_page = send_page_v2, + .send_read_id = send_read_id_v1_v2, + .get_dev_status = get_dev_status_v1_v2, + .check_int = check_int_v1_v2, + .irq_control = irq_control_v1_v2, + .get_ecc_status = get_ecc_status_v2, + .ecclayout_512 = &nandv2_hw_eccoob_smallpage, + .ecclayout_2k = &nandv2_hw_eccoob_largepage, + .ecclayout_4k = &nandv2_hw_eccoob_4k, + .select_chip = mxc_nand_select_chip_v2, + .correct_data = mxc_nand_correct_data_v2_v3, + .irqpending_quirk = 0, + .needs_ip = 0, + .regs_offset = 0x1e00, + .spare0_offset = 0x1000, + .axi_offset = 0, + .spare_len = 64, + .eccbytes = 9, + .eccsize = 0, +}; + +/* v3: i.MX51, i.MX53 */ +static const struct mxc_nand_devtype_data imx51_nand_devtype_data = { + .preset = preset_v3, + .send_cmd = send_cmd_v3, + .send_addr = send_addr_v3, + .send_page = send_page_v3, + .send_read_id = send_read_id_v3, + .get_dev_status = get_dev_status_v3, + .check_int = check_int_v3, + .irq_control = irq_control_v3, + .get_ecc_status = get_ecc_status_v3, + .ecclayout_512 = &nandv2_hw_eccoob_smallpage, + .ecclayout_2k = &nandv2_hw_eccoob_largepage, + .ecclayout_4k = &nandv2_hw_eccoob_smallpage, /* XXX: needs fix */ + .select_chip = mxc_nand_select_chip_v1_v3, + .correct_data = mxc_nand_correct_data_v2_v3, + .irqpending_quirk = 0, + .needs_ip = 1, + .regs_offset = 0, + .spare0_offset = 0x1000, + .axi_offset = 0x1e00, + .spare_len = 64, + .eccbytes = 0, + .eccsize = 0, +}; + +#ifdef CONFIG_OF_MTD +static const struct of_device_id mxcnd_dt_ids[] = { + { + .compatible = "fsl,imx21-nand", + .data = &imx21_nand_devtype_data, + }, { + .compatible = "fsl,imx27-nand", + .data = &imx27_nand_devtype_data, + }, { + .compatible = "fsl,imx25-nand", + .data = &imx25_nand_devtype_data, + }, { + .compatible = "fsl,imx51-nand", + .data = &imx51_nand_devtype_data, + }, + { /* sentinel */ } +}; + +static int __init mxcnd_probe_dt(struct mxc_nand_host *host) +{ + struct device_node *np = host->dev->of_node; + struct mxc_nand_platform_data *pdata = &host->pdata; + const struct of_device_id *of_id = + of_match_device(mxcnd_dt_ids, host->dev); + int buswidth; + + if (!np) + return 1; + + if (of_get_nand_ecc_mode(np) >= 0) + pdata->hw_ecc = 1; + + pdata->flash_bbt = of_get_nand_on_flash_bbt(np); + + buswidth = of_get_nand_bus_width(np); + if (buswidth < 0) + return buswidth; + + pdata->width = buswidth / 8; + + host->devtype_data = of_id->data; + + return 0; +} +#else +static int __init mxcnd_probe_dt(struct mxc_nand_host *host) +{ + return 1; +} +#endif + +static int __init mxcnd_probe_pdata(struct mxc_nand_host *host) +{ + struct mxc_nand_platform_data *pdata = host->dev->platform_data; + + if (!pdata) + return -ENODEV; + + host->pdata = *pdata; + + if (nfc_is_v1()) { + if (cpu_is_mx21()) + host->devtype_data = &imx21_nand_devtype_data; + else + host->devtype_data = &imx27_nand_devtype_data; + } else if (nfc_is_v21()) { + host->devtype_data = &imx25_nand_devtype_data; + } else if (nfc_is_v3_2()) { + host->devtype_data = &imx51_nand_devtype_data; + } else + BUG(); + + return 0; +} + static int __init mxcnd_probe(struct platform_device *pdev) { struct nand_chip *this; struct mtd_info *mtd; - struct mxc_nand_platform_data *pdata = pdev->dev.platform_data; struct mxc_nand_host *host; struct resource *res; int err = 0; - struct nand_ecclayout *oob_smallpage, *oob_largepage; /* Allocate memory for MTD device structure and private data */ host = kzalloc(sizeof(struct mxc_nand_host) + NAND_MAX_PAGESIZE + @@ -1065,7 +1345,6 @@ static int __init mxcnd_probe(struct platform_device *pdev) this->priv = host; this->dev_ready = mxc_nand_dev_ready; this->cmdfunc = mxc_nand_command; - this->select_chip = mxc_nand_select_chip; this->read_byte = mxc_nand_read_byte; this->read_word = mxc_nand_read_word; this->write_buf = mxc_nand_write_buf; @@ -1095,36 +1374,26 @@ static int __init mxcnd_probe(struct platform_device *pdev) host->main_area0 = host->base; - if (nfc_is_v1() || nfc_is_v21()) { - host->preset = preset_v1_v2; - host->send_cmd = send_cmd_v1_v2; - host->send_addr = send_addr_v1_v2; - host->send_page = send_page_v1_v2; - host->send_read_id = send_read_id_v1_v2; - host->get_dev_status = get_dev_status_v1_v2; - host->check_int = check_int_v1_v2; - if (cpu_is_mx21()) - host->irq_control = irq_control_mx21; - else - host->irq_control = irq_control_v1_v2; - } + err = mxcnd_probe_dt(host); + if (err > 0) + err = mxcnd_probe_pdata(host); + if (err < 0) + goto eirq; - if (nfc_is_v21()) { - host->regs = host->base + 0x1e00; - host->spare0 = host->base + 0x1000; - host->spare_len = 64; - oob_smallpage = &nandv2_hw_eccoob_smallpage; - oob_largepage = &nandv2_hw_eccoob_largepage; - this->ecc.bytes = 9; - } else if (nfc_is_v1()) { - host->regs = host->base + 0xe00; - host->spare0 = host->base + 0x800; - host->spare_len = 16; - oob_smallpage = &nandv1_hw_eccoob_smallpage; - oob_largepage = &nandv1_hw_eccoob_largepage; - this->ecc.bytes = 3; - host->eccsize = 1; - } else if (nfc_is_v3_2()) { + if (host->devtype_data->regs_offset) + host->regs = host->base + host->devtype_data->regs_offset; + host->spare0 = host->base + host->devtype_data->spare0_offset; + if (host->devtype_data->axi_offset) + host->regs_axi = host->base + host->devtype_data->axi_offset; + + this->ecc.bytes = host->devtype_data->eccbytes; + host->eccsize = host->devtype_data->eccsize; + + this->select_chip = host->devtype_data->select_chip; + this->ecc.size = 512; + this->ecc.layout = host->devtype_data->ecclayout_512; + + if (host->devtype_data->needs_ip) { res = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (!res) { err = -ENODEV; @@ -1135,42 +1404,22 @@ static int __init mxcnd_probe(struct platform_device *pdev) err = -ENOMEM; goto eirq; } - host->regs_axi = host->base + 0x1e00; - host->spare0 = host->base + 0x1000; - host->spare_len = 64; - host->preset = preset_v3; - host->send_cmd = send_cmd_v3; - host->send_addr = send_addr_v3; - host->send_page = send_page_v3; - host->send_read_id = send_read_id_v3; - host->check_int = check_int_v3; - host->get_dev_status = get_dev_status_v3; - host->irq_control = irq_control_v3; - oob_smallpage = &nandv2_hw_eccoob_smallpage; - oob_largepage = &nandv2_hw_eccoob_largepage; - } else - BUG(); - - this->ecc.size = 512; - this->ecc.layout = oob_smallpage; + } - if (pdata->hw_ecc) { + if (host->pdata.hw_ecc) { this->ecc.calculate = mxc_nand_calculate_ecc; this->ecc.hwctl = mxc_nand_enable_hwecc; - if (nfc_is_v1()) - this->ecc.correct = mxc_nand_correct_data_v1; - else - this->ecc.correct = mxc_nand_correct_data_v2_v3; + this->ecc.correct = host->devtype_data->correct_data; this->ecc.mode = NAND_ECC_HW; } else { this->ecc.mode = NAND_ECC_SOFT; } - /* NAND bus width determines access funtions used by upper layer */ - if (pdata->width == 2) + /* NAND bus width determines access functions used by upper layer */ + if (host->pdata.width == 2) this->options |= NAND_BUSWIDTH_16; - if (pdata->flash_bbt) { + if (host->pdata.flash_bbt) { this->bbt_td = &bbt_main_descr; this->bbt_md = &bbt_mirror_descr; /* update flash based bbt */ @@ -1182,28 +1431,25 @@ static int __init mxcnd_probe(struct platform_device *pdev) host->irq = platform_get_irq(pdev, 0); /* - * mask the interrupt. For i.MX21 explicitely call - * irq_control_v1_v2 to use the mask bit. We can't call - * disable_irq_nosync() for an interrupt we do not own yet. + * Use host->devtype_data->irq_control() here instead of irq_control() + * because we must not disable_irq_nosync without having requested the + * irq. */ - if (cpu_is_mx21()) - irq_control_v1_v2(host, 0); - else - host->irq_control(host, 0); + host->devtype_data->irq_control(host, 0); err = request_irq(host->irq, mxc_nfc_irq, IRQF_DISABLED, DRIVER_NAME, host); if (err) goto eirq; - host->irq_control(host, 0); - /* - * Now that the interrupt is disabled make sure the interrupt - * mask bit is cleared on i.MX21. Otherwise we can't read - * the interrupt status bit on this machine. + * Now that we "own" the interrupt make sure the interrupt mask bit is + * cleared on i.MX21. Otherwise we can't read the interrupt status bit + * on this machine. */ - if (cpu_is_mx21()) - irq_control_v1_v2(host, 1); + if (host->devtype_data->irqpending_quirk) { + disable_irq_nosync(host->irq); + host->devtype_data->irq_control(host, 1); + } /* first scan to find the device and get the page size */ if (nand_scan_ident(mtd, nfc_is_v21() ? 4 : 1, NULL)) { @@ -1212,18 +1458,12 @@ static int __init mxcnd_probe(struct platform_device *pdev) } /* Call preset again, with correct writesize this time */ - host->preset(mtd); + host->devtype_data->preset(mtd); if (mtd->writesize == 2048) - this->ecc.layout = oob_largepage; - if (nfc_is_v21() && mtd->writesize == 4096) - this->ecc.layout = &nandv2_hw_eccoob_4k; - - /* second phase scan */ - if (nand_scan_tail(mtd)) { - err = -ENXIO; - goto escan; - } + this->ecc.layout = host->devtype_data->ecclayout_2k; + else if (mtd->writesize == 4096) + this->ecc.layout = host->devtype_data->ecclayout_4k; if (this->ecc.mode == NAND_ECC_HW) { if (nfc_is_v1()) @@ -1232,9 +1472,19 @@ static int __init mxcnd_probe(struct platform_device *pdev) this->ecc.strength = (host->eccsize == 4) ? 4 : 8; } + /* second phase scan */ + if (nand_scan_tail(mtd)) { + err = -ENXIO; + goto escan; + } + /* Register the partitions */ - mtd_device_parse_register(mtd, part_probes, NULL, pdata->parts, - pdata->nr_parts); + mtd_device_parse_register(mtd, part_probes, + &(struct mtd_part_parser_data){ + .of_node = pdev->dev.of_node, + }, + host->pdata.parts, + host->pdata.nr_parts); platform_set_drvdata(pdev, host); @@ -1275,6 +1525,8 @@ static int __devexit mxcnd_remove(struct platform_device *pdev) static struct platform_driver mxcnd_driver = { .driver = { .name = DRIVER_NAME, + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(mxcnd_dt_ids), }, .remove = __devexit_p(mxcnd_remove), }; diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 47b19c0bb070..d47586cf64ce 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -1066,15 +1066,17 @@ EXPORT_SYMBOL(nand_lock); * @mtd: mtd info structure * @chip: nand chip info structure * @buf: buffer to store read data + * @oob_required: caller requires OOB data read to chip->oob_poi * @page: page number to read * * Not for syndrome calculating ECC controllers, which use a special oob layout. */ static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, - uint8_t *buf, int page) + uint8_t *buf, int oob_required, int page) { chip->read_buf(mtd, buf, mtd->writesize); - chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); + if (oob_required) + chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); return 0; } @@ -1083,13 +1085,14 @@ static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, * @mtd: mtd info structure * @chip: nand chip info structure * @buf: buffer to store read data + * @oob_required: caller requires OOB data read to chip->oob_poi * @page: page number to read * * We need a special oob layout and handling even when OOB isn't used. */ static int nand_read_page_raw_syndrome(struct mtd_info *mtd, - struct nand_chip *chip, - uint8_t *buf, int page) + struct nand_chip *chip, uint8_t *buf, + int oob_required, int page) { int eccsize = chip->ecc.size; int eccbytes = chip->ecc.bytes; @@ -1126,10 +1129,11 @@ static int nand_read_page_raw_syndrome(struct mtd_info *mtd, * @mtd: mtd info structure * @chip: nand chip info structure * @buf: buffer to store read data + * @oob_required: caller requires OOB data read to chip->oob_poi * @page: page number to read */ static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip, - uint8_t *buf, int page) + uint8_t *buf, int oob_required, int page) { int i, eccsize = chip->ecc.size; int eccbytes = chip->ecc.bytes; @@ -1138,8 +1142,9 @@ static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip, uint8_t *ecc_calc = chip->buffers->ecccalc; uint8_t *ecc_code = chip->buffers->ecccode; uint32_t *eccpos = chip->ecc.layout->eccpos; + unsigned int max_bitflips = 0; - chip->ecc.read_page_raw(mtd, chip, buf, page); + chip->ecc.read_page_raw(mtd, chip, buf, 1, page); for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) chip->ecc.calculate(mtd, p, &ecc_calc[i]); @@ -1154,12 +1159,14 @@ static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip, int stat; stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]); - if (stat < 0) + if (stat < 0) { mtd->ecc_stats.failed++; - else + } else { mtd->ecc_stats.corrected += stat; + max_bitflips = max_t(unsigned int, max_bitflips, stat); + } } - return 0; + return max_bitflips; } /** @@ -1180,6 +1187,7 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip, int datafrag_len, eccfrag_len, aligned_len, aligned_pos; int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1; int index = 0; + unsigned int max_bitflips = 0; /* Column address within the page aligned to ECC size (256bytes) */ start_step = data_offs / chip->ecc.size; @@ -1244,12 +1252,14 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip, stat = chip->ecc.correct(mtd, p, &chip->buffers->ecccode[i], &chip->buffers->ecccalc[i]); - if (stat < 0) + if (stat < 0) { mtd->ecc_stats.failed++; - else + } else { mtd->ecc_stats.corrected += stat; + max_bitflips = max_t(unsigned int, max_bitflips, stat); + } } - return 0; + return max_bitflips; } /** @@ -1257,12 +1267,13 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip, * @mtd: mtd info structure * @chip: nand chip info structure * @buf: buffer to store read data + * @oob_required: caller requires OOB data read to chip->oob_poi * @page: page number to read * * Not for syndrome calculating ECC controllers which need a special oob layout. */ static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, - uint8_t *buf, int page) + uint8_t *buf, int oob_required, int page) { int i, eccsize = chip->ecc.size; int eccbytes = chip->ecc.bytes; @@ -1271,6 +1282,7 @@ static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, uint8_t *ecc_calc = chip->buffers->ecccalc; uint8_t *ecc_code = chip->buffers->ecccode; uint32_t *eccpos = chip->ecc.layout->eccpos; + unsigned int max_bitflips = 0; for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { chip->ecc.hwctl(mtd, NAND_ECC_READ); @@ -1289,12 +1301,14 @@ static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, int stat; stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]); - if (stat < 0) + if (stat < 0) { mtd->ecc_stats.failed++; - else + } else { mtd->ecc_stats.corrected += stat; + max_bitflips = max_t(unsigned int, max_bitflips, stat); + } } - return 0; + return max_bitflips; } /** @@ -1302,6 +1316,7 @@ static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, * @mtd: mtd info structure * @chip: nand chip info structure * @buf: buffer to store read data + * @oob_required: caller requires OOB data read to chip->oob_poi * @page: page number to read * * Hardware ECC for large page chips, require OOB to be read first. For this @@ -1311,7 +1326,7 @@ static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, * the data area, by overwriting the NAND manufacturer bad block markings. */ static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd, - struct nand_chip *chip, uint8_t *buf, int page) + struct nand_chip *chip, uint8_t *buf, int oob_required, int page) { int i, eccsize = chip->ecc.size; int eccbytes = chip->ecc.bytes; @@ -1320,6 +1335,7 @@ static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd, uint8_t *ecc_code = chip->buffers->ecccode; uint32_t *eccpos = chip->ecc.layout->eccpos; uint8_t *ecc_calc = chip->buffers->ecccalc; + unsigned int max_bitflips = 0; /* Read the OOB area first */ chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page); @@ -1337,12 +1353,14 @@ static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd, chip->ecc.calculate(mtd, p, &ecc_calc[i]); stat = chip->ecc.correct(mtd, p, &ecc_code[i], NULL); - if (stat < 0) + if (stat < 0) { mtd->ecc_stats.failed++; - else + } else { mtd->ecc_stats.corrected += stat; + max_bitflips = max_t(unsigned int, max_bitflips, stat); + } } - return 0; + return max_bitflips; } /** @@ -1350,19 +1368,21 @@ static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd, * @mtd: mtd info structure * @chip: nand chip info structure * @buf: buffer to store read data + * @oob_required: caller requires OOB data read to chip->oob_poi * @page: page number to read * * The hw generator calculates the error syndrome automatically. Therefore we * need a special oob layout and handling. */ static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip, - uint8_t *buf, int page) + uint8_t *buf, int oob_required, int page) { int i, eccsize = chip->ecc.size; int eccbytes = chip->ecc.bytes; int eccsteps = chip->ecc.steps; uint8_t *p = buf; uint8_t *oob = chip->oob_poi; + unsigned int max_bitflips = 0; for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { int stat; @@ -1379,10 +1399,12 @@ static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip, chip->read_buf(mtd, oob, eccbytes); stat = chip->ecc.correct(mtd, p, oob, NULL); - if (stat < 0) + if (stat < 0) { mtd->ecc_stats.failed++; - else + } else { mtd->ecc_stats.corrected += stat; + max_bitflips = max_t(unsigned int, max_bitflips, stat); + } oob += eccbytes; @@ -1397,7 +1419,7 @@ static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip, if (i) chip->read_buf(mtd, oob, i); - return 0; + return max_bitflips; } /** @@ -1459,11 +1481,9 @@ static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob, static int nand_do_read_ops(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops) { - int chipnr, page, realpage, col, bytes, aligned; + int chipnr, page, realpage, col, bytes, aligned, oob_required; struct nand_chip *chip = mtd->priv; struct mtd_ecc_stats stats; - int blkcheck = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1; - int sndcmd = 1; int ret = 0; uint32_t readlen = ops->len; uint32_t oobreadlen = ops->ooblen; @@ -1471,6 +1491,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from, mtd->oobavail : mtd->oobsize; uint8_t *bufpoi, *oob, *buf; + unsigned int max_bitflips = 0; stats = mtd->ecc_stats; @@ -1484,6 +1505,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from, buf = ops->datbuf; oob = ops->oobbuf; + oob_required = oob ? 1 : 0; while (1) { bytes = min(mtd->writesize - col, readlen); @@ -1493,21 +1515,22 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from, if (realpage != chip->pagebuf || oob) { bufpoi = aligned ? buf : chip->buffers->databuf; - if (likely(sndcmd)) { - chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page); - sndcmd = 0; - } + chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page); - /* Now read the page into the buffer */ + /* + * Now read the page into the buffer. Absent an error, + * the read methods return max bitflips per ecc step. + */ if (unlikely(ops->mode == MTD_OPS_RAW)) - ret = chip->ecc.read_page_raw(mtd, chip, - bufpoi, page); + ret = chip->ecc.read_page_raw(mtd, chip, bufpoi, + oob_required, + page); else if (!aligned && NAND_SUBPAGE_READ(chip) && !oob) ret = chip->ecc.read_subpage(mtd, chip, col, bytes, bufpoi); else ret = chip->ecc.read_page(mtd, chip, bufpoi, - page); + oob_required, page); if (ret < 0) { if (!aligned) /* Invalidate page cache */ @@ -1515,22 +1538,25 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from, break; } + max_bitflips = max_t(unsigned int, max_bitflips, ret); + /* Transfer not aligned data */ if (!aligned) { if (!NAND_SUBPAGE_READ(chip) && !oob && !(mtd->ecc_stats.failed - stats.failed) && - (ops->mode != MTD_OPS_RAW)) + (ops->mode != MTD_OPS_RAW)) { chip->pagebuf = realpage; - else + chip->pagebuf_bitflips = ret; + } else { /* Invalidate page cache */ chip->pagebuf = -1; + } memcpy(buf, chip->buffers->databuf + col, bytes); } buf += bytes; if (unlikely(oob)) { - int toread = min(oobreadlen, max_oobsize); if (toread) { @@ -1541,13 +1567,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from, } if (!(chip->options & NAND_NO_READRDY)) { - /* - * Apply delay or wait for ready/busy pin. Do - * this before the AUTOINCR check, so no - * problems arise if a chip which does auto - * increment is marked as NOAUTOINCR by the - * board driver. - */ + /* Apply delay or wait for ready/busy pin */ if (!chip->dev_ready) udelay(chip->chip_delay); else @@ -1556,6 +1576,8 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from, } else { memcpy(buf, chip->buffers->databuf + col, bytes); buf += bytes; + max_bitflips = max_t(unsigned int, max_bitflips, + chip->pagebuf_bitflips); } readlen -= bytes; @@ -1575,26 +1597,19 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from, chip->select_chip(mtd, -1); chip->select_chip(mtd, chipnr); } - - /* - * Check, if the chip supports auto page increment or if we - * have hit a block boundary. - */ - if (!NAND_CANAUTOINCR(chip) || !(page & blkcheck)) - sndcmd = 1; } ops->retlen = ops->len - (size_t) readlen; if (oob) ops->oobretlen = ops->ooblen - oobreadlen; - if (ret) + if (ret < 0) return ret; if (mtd->ecc_stats.failed - stats.failed) return -EBADMSG; - return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0; + return max_bitflips; } /** @@ -1630,17 +1645,13 @@ static int nand_read(struct mtd_info *mtd, loff_t from, size_t len, * @mtd: mtd info structure * @chip: nand chip info structure * @page: page number to read - * @sndcmd: flag whether to issue read command or not */ static int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, - int page, int sndcmd) + int page) { - if (sndcmd) { - chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page); - sndcmd = 0; - } + chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page); chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); - return sndcmd; + return 0; } /** @@ -1649,10 +1660,9 @@ static int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, * @mtd: mtd info structure * @chip: nand chip info structure * @page: page number to read - * @sndcmd: flag whether to issue read command or not */ static int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip, - int page, int sndcmd) + int page) { uint8_t *buf = chip->oob_poi; int length = mtd->oobsize; @@ -1679,7 +1689,7 @@ static int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip, if (length > 0) chip->read_buf(mtd, bufpoi, length); - return 1; + return 0; } /** @@ -1775,13 +1785,13 @@ static int nand_write_oob_syndrome(struct mtd_info *mtd, static int nand_do_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops) { - int page, realpage, chipnr, sndcmd = 1; + int page, realpage, chipnr; struct nand_chip *chip = mtd->priv; struct mtd_ecc_stats stats; - int blkcheck = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1; int readlen = ops->ooblen; int len; uint8_t *buf = ops->oobbuf; + int ret = 0; pr_debug("%s: from = 0x%08Lx, len = %i\n", __func__, (unsigned long long)from, readlen); @@ -1817,20 +1827,18 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from, while (1) { if (ops->mode == MTD_OPS_RAW) - sndcmd = chip->ecc.read_oob_raw(mtd, chip, page, sndcmd); + ret = chip->ecc.read_oob_raw(mtd, chip, page); else - sndcmd = chip->ecc.read_oob(mtd, chip, page, sndcmd); + ret = chip->ecc.read_oob(mtd, chip, page); + + if (ret < 0) + break; len = min(len, readlen); buf = nand_transfer_oob(chip, buf, ops, len); if (!(chip->options & NAND_NO_READRDY)) { - /* - * Apply delay or wait for ready/busy pin. Do this - * before the AUTOINCR check, so no problems arise if a - * chip which does auto increment is marked as - * NOAUTOINCR by the board driver. - */ + /* Apply delay or wait for ready/busy pin */ if (!chip->dev_ready) udelay(chip->chip_delay); else @@ -1851,16 +1859,12 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from, chip->select_chip(mtd, -1); chip->select_chip(mtd, chipnr); } - - /* - * Check, if the chip supports auto page increment or if we - * have hit a block boundary. - */ - if (!NAND_CANAUTOINCR(chip) || !(page & blkcheck)) - sndcmd = 1; } - ops->oobretlen = ops->ooblen; + ops->oobretlen = ops->ooblen - readlen; + + if (ret < 0) + return ret; if (mtd->ecc_stats.failed - stats.failed) return -EBADMSG; @@ -1919,14 +1923,16 @@ out: * @mtd: mtd info structure * @chip: nand chip info structure * @buf: data buffer + * @oob_required: must write chip->oob_poi to OOB * * Not for syndrome calculating ECC controllers, which use a special oob layout. */ static void nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, - const uint8_t *buf) + const uint8_t *buf, int oob_required) { chip->write_buf(mtd, buf, mtd->writesize); - chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); + if (oob_required) + chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); } /** @@ -1934,12 +1940,13 @@ static void nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, * @mtd: mtd info structure * @chip: nand chip info structure * @buf: data buffer + * @oob_required: must write chip->oob_poi to OOB * * We need a special oob layout and handling even when ECC isn't checked. */ static void nand_write_page_raw_syndrome(struct mtd_info *mtd, struct nand_chip *chip, - const uint8_t *buf) + const uint8_t *buf, int oob_required) { int eccsize = chip->ecc.size; int eccbytes = chip->ecc.bytes; @@ -1973,9 +1980,10 @@ static void nand_write_page_raw_syndrome(struct mtd_info *mtd, * @mtd: mtd info structure * @chip: nand chip info structure * @buf: data buffer + * @oob_required: must write chip->oob_poi to OOB */ static void nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip, - const uint8_t *buf) + const uint8_t *buf, int oob_required) { int i, eccsize = chip->ecc.size; int eccbytes = chip->ecc.bytes; @@ -1991,7 +1999,7 @@ static void nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip, for (i = 0; i < chip->ecc.total; i++) chip->oob_poi[eccpos[i]] = ecc_calc[i]; - chip->ecc.write_page_raw(mtd, chip, buf); + chip->ecc.write_page_raw(mtd, chip, buf, 1); } /** @@ -1999,9 +2007,10 @@ static void nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip, * @mtd: mtd info structure * @chip: nand chip info structure * @buf: data buffer + * @oob_required: must write chip->oob_poi to OOB */ static void nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, - const uint8_t *buf) + const uint8_t *buf, int oob_required) { int i, eccsize = chip->ecc.size; int eccbytes = chip->ecc.bytes; @@ -2027,12 +2036,14 @@ static void nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, * @mtd: mtd info structure * @chip: nand chip info structure * @buf: data buffer + * @oob_required: must write chip->oob_poi to OOB * * The hw generator calculates the error syndrome automatically. Therefore we * need a special oob layout and handling. */ static void nand_write_page_syndrome(struct mtd_info *mtd, - struct nand_chip *chip, const uint8_t *buf) + struct nand_chip *chip, + const uint8_t *buf, int oob_required) { int i, eccsize = chip->ecc.size; int eccbytes = chip->ecc.bytes; @@ -2071,21 +2082,23 @@ static void nand_write_page_syndrome(struct mtd_info *mtd, * @mtd: MTD device structure * @chip: NAND chip descriptor * @buf: the data to write + * @oob_required: must write chip->oob_poi to OOB * @page: page number to write * @cached: cached programming * @raw: use _raw version of write_page */ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip, - const uint8_t *buf, int page, int cached, int raw) + const uint8_t *buf, int oob_required, int page, + int cached, int raw) { int status; chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page); if (unlikely(raw)) - chip->ecc.write_page_raw(mtd, chip, buf); + chip->ecc.write_page_raw(mtd, chip, buf, oob_required); else - chip->ecc.write_page(mtd, chip, buf); + chip->ecc.write_page(mtd, chip, buf, oob_required); /* * Cached progamming disabled for now. Not sure if it's worth the @@ -2118,6 +2131,9 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip, if (chip->verify_buf(mtd, buf, mtd->writesize)) return -EIO; + + /* Make sure the next page prog is preceded by a status read */ + chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1); #endif return 0; } @@ -2202,6 +2218,7 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to, uint8_t *oob = ops->oobbuf; uint8_t *buf = ops->datbuf; int ret, subpage; + int oob_required = oob ? 1 : 0; ops->retlen = 0; if (!writelen) @@ -2264,8 +2281,8 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to, memset(chip->oob_poi, 0xff, mtd->oobsize); } - ret = chip->write_page(mtd, chip, wbuf, page, cached, - (ops->mode == MTD_OPS_RAW)); + ret = chip->write_page(mtd, chip, wbuf, oob_required, page, + cached, (ops->mode == MTD_OPS_RAW)); if (ret) break; @@ -2898,8 +2915,7 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip, *busw = NAND_BUSWIDTH_16; chip->options &= ~NAND_CHIPOPTIONS_MSK; - chip->options |= (NAND_NO_READRDY | - NAND_NO_AUTOINCR) & NAND_CHIPOPTIONS_MSK; + chip->options |= NAND_NO_READRDY & NAND_CHIPOPTIONS_MSK; pr_info("ONFI flash detected\n"); return 1; @@ -3076,11 +3092,6 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd, chip->options &= ~NAND_SAMSUNG_LP_OPTIONS; ident_done: - /* - * Set chip as a default. Board drivers can override it, if necessary. - */ - chip->options |= NAND_NO_AUTOINCR; - /* Try to identify manufacturer */ for (maf_idx = 0; nand_manuf_ids[maf_idx].id != 0x0; maf_idx++) { if (nand_manuf_ids[maf_idx].id == *maf_id) @@ -3154,10 +3165,11 @@ ident_done: if (mtd->writesize > 512 && chip->cmdfunc == nand_command) chip->cmdfunc = nand_command_lp; - pr_info("NAND device: Manufacturer ID:" - " 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id, *dev_id, - nand_manuf_ids[maf_idx].name, - chip->onfi_version ? chip->onfi_params.model : type->name); + pr_info("NAND device: Manufacturer ID: 0x%02x, Chip ID: 0x%02x (%s %s)," + " page size: %d, OOB size: %d\n", + *maf_id, *dev_id, nand_manuf_ids[maf_idx].name, + chip->onfi_version ? chip->onfi_params.model : type->name, + mtd->writesize, mtd->oobsize); return type; } @@ -3329,8 +3341,13 @@ int nand_scan_tail(struct mtd_info *mtd) if (!chip->ecc.write_oob) chip->ecc.write_oob = nand_write_oob_syndrome; - if (mtd->writesize >= chip->ecc.size) + if (mtd->writesize >= chip->ecc.size) { + if (!chip->ecc.strength) { + pr_warn("Driver must set ecc.strength when using hardware ECC\n"); + BUG(); + } break; + } pr_warn("%d byte HW ECC not possible on " "%d byte page size, fallback to SW ECC\n", chip->ecc.size, mtd->writesize); @@ -3385,7 +3402,7 @@ int nand_scan_tail(struct mtd_info *mtd) BUG(); } chip->ecc.strength = - chip->ecc.bytes*8 / fls(8*chip->ecc.size); + chip->ecc.bytes * 8 / fls(8 * chip->ecc.size); break; case NAND_ECC_NONE: @@ -3483,7 +3500,7 @@ int nand_scan_tail(struct mtd_info *mtd) /* propagate ecc info to mtd_info */ mtd->ecclayout = chip->ecc.layout; - mtd->ecc_strength = chip->ecc.strength * chip->ecc.steps; + mtd->ecc_strength = chip->ecc.strength; /* Check, if we should skip the bad block table scan */ if (chip->options & NAND_SKIP_BBTSCAN) diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c index 20a112f591fe..30d1319ff065 100644 --- a/drivers/mtd/nand/nand_bbt.c +++ b/drivers/mtd/nand/nand_bbt.c @@ -324,6 +324,7 @@ static int scan_read_raw_oob(struct mtd_info *mtd, uint8_t *buf, loff_t offs, buf += mtd->oobsize + mtd->writesize; len -= mtd->writesize; + offs += mtd->writesize; } return 0; } diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c index af4fe8ca7b5e..621b70b7a159 100644 --- a/drivers/mtd/nand/nand_ids.c +++ b/drivers/mtd/nand/nand_ids.c @@ -70,7 +70,7 @@ struct nand_flash_dev nand_flash_ids[] = { * These are the new chips with large page size. The pagesize and the * erasesize is determined from the extended id bytes */ -#define LP_OPTIONS (NAND_SAMSUNG_LP_OPTIONS | NAND_NO_READRDY | NAND_NO_AUTOINCR) +#define LP_OPTIONS (NAND_SAMSUNG_LP_OPTIONS | NAND_NO_READRDY) #define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16) /* 512 Megabit */ @@ -157,9 +157,7 @@ struct nand_flash_dev nand_flash_ids[] = { * writes possible, but not implemented now */ {"AND 128MiB 3,3V 8-bit", 0x01, 2048, 128, 0x4000, - NAND_IS_AND | NAND_NO_AUTOINCR |NAND_NO_READRDY | NAND_4PAGE_ARRAY | - BBT_AUTO_REFRESH - }, + NAND_IS_AND | NAND_NO_READRDY | NAND_4PAGE_ARRAY | BBT_AUTO_REFRESH}, {NULL,} }; diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c index 261f478f8cc3..6cc8fbfabb8e 100644 --- a/drivers/mtd/nand/nandsim.c +++ b/drivers/mtd/nand/nandsim.c @@ -268,7 +268,6 @@ MODULE_PARM_DESC(bch, "Enable BCH ecc and set how many bits should " #define OPT_PAGE512 0x00000002 /* 512-byte page chips */ #define OPT_PAGE2048 0x00000008 /* 2048-byte page chips */ #define OPT_SMARTMEDIA 0x00000010 /* SmartMedia technology chips */ -#define OPT_AUTOINCR 0x00000020 /* page number auto incrementation is possible */ #define OPT_PAGE512_8BIT 0x00000040 /* 512-byte page chips with 8-bit bus width */ #define OPT_PAGE4096 0x00000080 /* 4096-byte page chips */ #define OPT_LARGEPAGE (OPT_PAGE2048 | OPT_PAGE4096) /* 2048 & 4096-byte page chips */ @@ -594,7 +593,7 @@ static int init_nandsim(struct mtd_info *mtd) ns->options |= OPT_PAGE256; } else if (ns->geom.pgsz == 512) { - ns->options |= (OPT_PAGE512 | OPT_AUTOINCR); + ns->options |= OPT_PAGE512; if (ns->busw == 8) ns->options |= OPT_PAGE512_8BIT; } else if (ns->geom.pgsz == 2048) { @@ -663,8 +662,6 @@ static int init_nandsim(struct mtd_info *mtd) for (i = 0; nand_flash_ids[i].name != NULL; i++) { if (second_id_byte != nand_flash_ids[i].id) continue; - if (!(nand_flash_ids[i].options & NAND_NO_AUTOINCR)) - ns->options |= OPT_AUTOINCR; } if (ns->busw == 16) @@ -1936,20 +1933,8 @@ static u_char ns_nand_read_byte(struct mtd_info *mtd) if (ns->regs.count == ns->regs.num) { NS_DBG("read_byte: all bytes were read\n"); - /* - * The OPT_AUTOINCR allows to read next consecutive pages without - * new read operation cycle. - */ - if ((ns->options & OPT_AUTOINCR) && NS_STATE(ns->state) == STATE_DATAOUT) { - ns->regs.count = 0; - if (ns->regs.row + 1 < ns->geom.pgnum) - ns->regs.row += 1; - NS_DBG("read_byte: switch to the next page (%#x)\n", ns->regs.row); - do_state_action(ns, ACTION_CPY); - } - else if (NS_STATE(ns->nxstate) == STATE_READY) + if (NS_STATE(ns->nxstate) == STATE_READY) switch_state(ns); - } return outb; @@ -2203,14 +2188,7 @@ static void ns_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len) ns->regs.count += len; if (ns->regs.count == ns->regs.num) { - if ((ns->options & OPT_AUTOINCR) && NS_STATE(ns->state) == STATE_DATAOUT) { - ns->regs.count = 0; - if (ns->regs.row + 1 < ns->geom.pgnum) - ns->regs.row += 1; - NS_DBG("read_buf: switch to the next page (%#x)\n", ns->regs.row); - do_state_action(ns, ACTION_CPY); - } - else if (NS_STATE(ns->nxstate) == STATE_READY) + if (NS_STATE(ns->nxstate) == STATE_READY) switch_state(ns); } diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c index c2b0bba9d8b3..d7f681d0c9b9 100644 --- a/drivers/mtd/nand/omap2.c +++ b/drivers/mtd/nand/omap2.c @@ -21,6 +21,10 @@ #include <linux/io.h> #include <linux/slab.h> +#ifdef CONFIG_MTD_NAND_OMAP_BCH +#include <linux/bch.h> +#endif + #include <plat/dma.h> #include <plat/gpmc.h> #include <plat/nand.h> @@ -127,6 +131,11 @@ struct omap_nand_info { } iomode; u_char *buf; int buf_len; + +#ifdef CONFIG_MTD_NAND_OMAP_BCH + struct bch_control *bch; + struct nand_ecclayout ecclayout; +#endif }; /** @@ -402,7 +411,7 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write); if (ret) /* PFPW engine is busy, use cpu copy method */ - goto out_copy; + goto out_copy_unmap; init_completion(&info->comp); @@ -421,6 +430,8 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, dma_unmap_single(&info->pdev->dev, dma_addr, len, dir); return 0; +out_copy_unmap: + dma_unmap_single(&info->pdev->dev, dma_addr, len, dir); out_copy: if (info->nand.options & NAND_BUSWIDTH_16) is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len) @@ -879,7 +890,7 @@ static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip) struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, mtd); unsigned long timeo = jiffies; - int status = NAND_STATUS_FAIL, state = this->state; + int status, state = this->state; if (state == FL_ERASING) timeo += (HZ * 400) / 1000; @@ -894,6 +905,8 @@ static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip) break; cond_resched(); } + + status = gpmc_nand_read(info->gpmc_cs, GPMC_NAND_DATA); return status; } @@ -925,6 +938,226 @@ static int omap_dev_ready(struct mtd_info *mtd) return 1; } +#ifdef CONFIG_MTD_NAND_OMAP_BCH + +/** + * omap3_enable_hwecc_bch - Program OMAP3 GPMC to perform BCH ECC correction + * @mtd: MTD device structure + * @mode: Read/Write mode + */ +static void omap3_enable_hwecc_bch(struct mtd_info *mtd, int mode) +{ + int nerrors; + unsigned int dev_width; + struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, + mtd); + struct nand_chip *chip = mtd->priv; + + nerrors = (info->nand.ecc.bytes == 13) ? 8 : 4; + dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0; + /* + * Program GPMC to perform correction on one 512-byte sector at a time. + * Using 4 sectors at a time (i.e. ecc.size = 2048) is also possible and + * gives a slight (5%) performance gain (but requires additional code). + */ + (void)gpmc_enable_hwecc_bch(info->gpmc_cs, mode, dev_width, 1, nerrors); +} + +/** + * omap3_calculate_ecc_bch4 - Generate 7 bytes of ECC bytes + * @mtd: MTD device structure + * @dat: The pointer to data on which ecc is computed + * @ecc_code: The ecc_code buffer + */ +static int omap3_calculate_ecc_bch4(struct mtd_info *mtd, const u_char *dat, + u_char *ecc_code) +{ + struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, + mtd); + return gpmc_calculate_ecc_bch4(info->gpmc_cs, dat, ecc_code); +} + +/** + * omap3_calculate_ecc_bch8 - Generate 13 bytes of ECC bytes + * @mtd: MTD device structure + * @dat: The pointer to data on which ecc is computed + * @ecc_code: The ecc_code buffer + */ +static int omap3_calculate_ecc_bch8(struct mtd_info *mtd, const u_char *dat, + u_char *ecc_code) +{ + struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, + mtd); + return gpmc_calculate_ecc_bch8(info->gpmc_cs, dat, ecc_code); +} + +/** + * omap3_correct_data_bch - Decode received data and correct errors + * @mtd: MTD device structure + * @data: page data + * @read_ecc: ecc read from nand flash + * @calc_ecc: ecc read from HW ECC registers + */ +static int omap3_correct_data_bch(struct mtd_info *mtd, u_char *data, + u_char *read_ecc, u_char *calc_ecc) +{ + int i, count; + /* cannot correct more than 8 errors */ + unsigned int errloc[8]; + struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, + mtd); + + count = decode_bch(info->bch, NULL, 512, read_ecc, calc_ecc, NULL, + errloc); + if (count > 0) { + /* correct errors */ + for (i = 0; i < count; i++) { + /* correct data only, not ecc bytes */ + if (errloc[i] < 8*512) + data[errloc[i]/8] ^= 1 << (errloc[i] & 7); + pr_debug("corrected bitflip %u\n", errloc[i]); + } + } else if (count < 0) { + pr_err("ecc unrecoverable error\n"); + } + return count; +} + +/** + * omap3_free_bch - Release BCH ecc resources + * @mtd: MTD device structure + */ +static void omap3_free_bch(struct mtd_info *mtd) +{ + struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, + mtd); + if (info->bch) { + free_bch(info->bch); + info->bch = NULL; + } +} + +/** + * omap3_init_bch - Initialize BCH ECC + * @mtd: MTD device structure + * @ecc_opt: OMAP ECC mode (OMAP_ECC_BCH4_CODE_HW or OMAP_ECC_BCH8_CODE_HW) + */ +static int omap3_init_bch(struct mtd_info *mtd, int ecc_opt) +{ + int ret, max_errors; + struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, + mtd); +#ifdef CONFIG_MTD_NAND_OMAP_BCH8 + const int hw_errors = 8; +#else + const int hw_errors = 4; +#endif + info->bch = NULL; + + max_errors = (ecc_opt == OMAP_ECC_BCH8_CODE_HW) ? 8 : 4; + if (max_errors != hw_errors) { + pr_err("cannot configure %d-bit BCH ecc, only %d-bit supported", + max_errors, hw_errors); + goto fail; + } + + /* initialize GPMC BCH engine */ + ret = gpmc_init_hwecc_bch(info->gpmc_cs, 1, max_errors); + if (ret) + goto fail; + + /* software bch library is only used to detect and locate errors */ + info->bch = init_bch(13, max_errors, 0x201b /* hw polynomial */); + if (!info->bch) + goto fail; + + info->nand.ecc.size = 512; + info->nand.ecc.hwctl = omap3_enable_hwecc_bch; + info->nand.ecc.correct = omap3_correct_data_bch; + info->nand.ecc.mode = NAND_ECC_HW; + + /* + * The number of corrected errors in an ecc block that will trigger + * block scrubbing defaults to the ecc strength (4 or 8). + * Set mtd->bitflip_threshold here to define a custom threshold. + */ + + if (max_errors == 8) { + info->nand.ecc.strength = 8; + info->nand.ecc.bytes = 13; + info->nand.ecc.calculate = omap3_calculate_ecc_bch8; + } else { + info->nand.ecc.strength = 4; + info->nand.ecc.bytes = 7; + info->nand.ecc.calculate = omap3_calculate_ecc_bch4; + } + + pr_info("enabling NAND BCH ecc with %d-bit correction\n", max_errors); + return 0; +fail: + omap3_free_bch(mtd); + return -1; +} + +/** + * omap3_init_bch_tail - Build an oob layout for BCH ECC correction. + * @mtd: MTD device structure + */ +static int omap3_init_bch_tail(struct mtd_info *mtd) +{ + int i, steps; + struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, + mtd); + struct nand_ecclayout *layout = &info->ecclayout; + + /* build oob layout */ + steps = mtd->writesize/info->nand.ecc.size; + layout->eccbytes = steps*info->nand.ecc.bytes; + + /* do not bother creating special oob layouts for small page devices */ + if (mtd->oobsize < 64) { + pr_err("BCH ecc is not supported on small page devices\n"); + goto fail; + } + + /* reserve 2 bytes for bad block marker */ + if (layout->eccbytes+2 > mtd->oobsize) { + pr_err("no oob layout available for oobsize %d eccbytes %u\n", + mtd->oobsize, layout->eccbytes); + goto fail; + } + + /* put ecc bytes at oob tail */ + for (i = 0; i < layout->eccbytes; i++) + layout->eccpos[i] = mtd->oobsize-layout->eccbytes+i; + + layout->oobfree[0].offset = 2; + layout->oobfree[0].length = mtd->oobsize-2-layout->eccbytes; + info->nand.ecc.layout = layout; + + if (!(info->nand.options & NAND_BUSWIDTH_16)) + info->nand.badblock_pattern = &bb_descrip_flashbased; + return 0; +fail: + omap3_free_bch(mtd); + return -1; +} + +#else +static int omap3_init_bch(struct mtd_info *mtd, int ecc_opt) +{ + pr_err("CONFIG_MTD_NAND_OMAP_BCH is not enabled\n"); + return -1; +} +static int omap3_init_bch_tail(struct mtd_info *mtd) +{ + return -1; +} +static void omap3_free_bch(struct mtd_info *mtd) +{ +} +#endif /* CONFIG_MTD_NAND_OMAP_BCH */ + static int __devinit omap_nand_probe(struct platform_device *pdev) { struct omap_nand_info *info; @@ -1063,6 +1296,13 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) info->nand.ecc.hwctl = omap_enable_hwecc; info->nand.ecc.correct = omap_correct_data; info->nand.ecc.mode = NAND_ECC_HW; + } else if ((pdata->ecc_opt == OMAP_ECC_BCH4_CODE_HW) || + (pdata->ecc_opt == OMAP_ECC_BCH8_CODE_HW)) { + err = omap3_init_bch(&info->mtd, pdata->ecc_opt); + if (err) { + err = -EINVAL; + goto out_release_mem_region; + } } /* DIP switches on some boards change between 8 and 16 bit @@ -1094,6 +1334,14 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) (offset + omap_oobinfo.eccbytes); info->nand.ecc.layout = &omap_oobinfo; + } else if ((pdata->ecc_opt == OMAP_ECC_BCH4_CODE_HW) || + (pdata->ecc_opt == OMAP_ECC_BCH8_CODE_HW)) { + /* build OOB layout for BCH ECC correction */ + err = omap3_init_bch_tail(&info->mtd); + if (err) { + err = -EINVAL; + goto out_release_mem_region; + } } /* second phase scan */ @@ -1122,6 +1370,7 @@ static int omap_nand_remove(struct platform_device *pdev) struct mtd_info *mtd = platform_get_drvdata(pdev); struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, mtd); + omap3_free_bch(&info->mtd); platform_set_drvdata(pdev, NULL); if (info->dma_ch != -1) diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c index 974dbf8251c9..1440e51cedcc 100644 --- a/drivers/mtd/nand/pasemi_nand.c +++ b/drivers/mtd/nand/pasemi_nand.c @@ -155,7 +155,6 @@ static int __devinit pasemi_nand_probe(struct platform_device *ofdev) chip->ecc.mode = NAND_ECC_SOFT; /* Enable the following for a flash based bad block table */ - chip->options = NAND_NO_AUTOINCR; chip->bbt_options = NAND_BBT_USE_FLASH; /* Scan to find existence of the device */ diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c index 6404e6e81b10..1bcb52040422 100644 --- a/drivers/mtd/nand/plat_nand.c +++ b/drivers/mtd/nand/plat_nand.c @@ -23,14 +23,18 @@ struct plat_nand_data { void __iomem *io_base; }; +static const char *part_probe_types[] = { "cmdlinepart", NULL }; + /* * Probe for the NAND device. */ static int __devinit plat_nand_probe(struct platform_device *pdev) { struct platform_nand_data *pdata = pdev->dev.platform_data; + struct mtd_part_parser_data ppdata; struct plat_nand_data *data; struct resource *res; + const char **part_types; int err = 0; if (pdata->chip.nr_chips < 1) { @@ -75,6 +79,7 @@ static int __devinit plat_nand_probe(struct platform_device *pdev) data->chip.select_chip = pdata->ctrl.select_chip; data->chip.write_buf = pdata->ctrl.write_buf; data->chip.read_buf = pdata->ctrl.read_buf; + data->chip.read_byte = pdata->ctrl.read_byte; data->chip.chip_delay = pdata->chip.chip_delay; data->chip.options |= pdata->chip.options; data->chip.bbt_options |= pdata->chip.bbt_options; @@ -98,8 +103,10 @@ static int __devinit plat_nand_probe(struct platform_device *pdev) goto out; } - err = mtd_device_parse_register(&data->mtd, - pdata->chip.part_probe_types, NULL, + part_types = pdata->chip.part_probe_types ? : part_probe_types; + + ppdata.of_node = pdev->dev.of_node; + err = mtd_device_parse_register(&data->mtd, part_types, &ppdata, pdata->chip.partitions, pdata->chip.nr_partitions); @@ -140,12 +147,19 @@ static int __devexit plat_nand_remove(struct platform_device *pdev) return 0; } +static const struct of_device_id plat_nand_match[] = { + { .compatible = "gen_nand" }, + {}, +}; +MODULE_DEVICE_TABLE(of, plat_nand_match); + static struct platform_driver plat_nand_driver = { - .probe = plat_nand_probe, - .remove = __devexit_p(plat_nand_remove), - .driver = { - .name = "gen_nand", - .owner = THIS_MODULE, + .probe = plat_nand_probe, + .remove = __devexit_p(plat_nand_remove), + .driver = { + .name = "gen_nand", + .owner = THIS_MODULE, + .of_match_table = plat_nand_match, }, }; diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c index def50caa6f84..252aaefcacfa 100644 --- a/drivers/mtd/nand/pxa3xx_nand.c +++ b/drivers/mtd/nand/pxa3xx_nand.c @@ -682,14 +682,15 @@ static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command, } static void pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd, - struct nand_chip *chip, const uint8_t *buf) + struct nand_chip *chip, const uint8_t *buf, int oob_required) { chip->write_buf(mtd, buf, mtd->writesize); chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); } static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd, - struct nand_chip *chip, uint8_t *buf, int page) + struct nand_chip *chip, uint8_t *buf, int oob_required, + int page) { struct pxa3xx_nand_host *host = mtd->priv; struct pxa3xx_nand_info *info = host->info_data; @@ -1004,7 +1005,6 @@ KEEP_CONFIG: chip->ecc.size = host->page_size; chip->ecc.strength = 1; - chip->options = NAND_NO_AUTOINCR; chip->options |= NAND_NO_READRDY; if (host->reg_ndcr & NDCR_DWIDTH_M) chip->options |= NAND_BUSWIDTH_16; diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c index c2040187c813..8cb627751c9c 100644 --- a/drivers/mtd/nand/r852.c +++ b/drivers/mtd/nand/r852.c @@ -539,14 +539,11 @@ exit: * nand_read_oob_syndrome assumes we can send column address - we can't */ static int r852_read_oob(struct mtd_info *mtd, struct nand_chip *chip, - int page, int sndcmd) + int page) { - if (sndcmd) { - chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page); - sndcmd = 0; - } + chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page); chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); - return sndcmd; + return 0; } /* @@ -1104,18 +1101,7 @@ static struct pci_driver r852_pci_driver = { .driver.pm = &r852_pm_ops, }; -static __init int r852_module_init(void) -{ - return pci_register_driver(&r852_pci_driver); -} - -static void __exit r852_module_exit(void) -{ - pci_unregister_driver(&r852_pci_driver); -} - -module_init(r852_module_init); -module_exit(r852_module_exit); +module_pci_driver(r852_pci_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>"); diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c index e9b2b260de3a..aa9b8a5e0b8f 100644 --- a/drivers/mtd/nand/sh_flctl.c +++ b/drivers/mtd/nand/sh_flctl.c @@ -344,7 +344,7 @@ static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_va } static int flctl_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, - uint8_t *buf, int page) + uint8_t *buf, int oob_required, int page) { int i, eccsize = chip->ecc.size; int eccbytes = chip->ecc.bytes; @@ -359,14 +359,14 @@ static int flctl_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, if (flctl->hwecc_cant_correct[i]) mtd->ecc_stats.failed++; else - mtd->ecc_stats.corrected += 0; + mtd->ecc_stats.corrected += 0; /* FIXME */ } return 0; } static void flctl_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, - const uint8_t *buf) + const uint8_t *buf, int oob_required) { int i, eccsize = chip->ecc.size; int eccbytes = chip->ecc.bytes; @@ -881,8 +881,6 @@ static int __devinit flctl_probe(struct platform_device *pdev) flctl->hwecc = pdata->has_hwecc; flctl->holden = pdata->use_holden; - nand->options = NAND_NO_AUTOINCR; - /* Set address of hardware control function */ /* 20 us command delay time */ nand->chip_delay = 20; diff --git a/drivers/mtd/nand/sm_common.c b/drivers/mtd/nand/sm_common.c index 774c3c266713..082bcdcd6bcf 100644 --- a/drivers/mtd/nand/sm_common.c +++ b/drivers/mtd/nand/sm_common.c @@ -94,17 +94,16 @@ static struct nand_flash_dev nand_smartmedia_flash_ids[] = { {NULL,} }; -#define XD_TYPEM (NAND_NO_AUTOINCR | NAND_BROKEN_XD) static struct nand_flash_dev nand_xd_flash_ids[] = { {"xD 16MiB 3,3V", 0x73, 512, 16, 0x4000, 0}, {"xD 32MiB 3,3V", 0x75, 512, 32, 0x4000, 0}, {"xD 64MiB 3,3V", 0x76, 512, 64, 0x4000, 0}, {"xD 128MiB 3,3V", 0x79, 512, 128, 0x4000, 0}, - {"xD 256MiB 3,3V", 0x71, 512, 256, 0x4000, XD_TYPEM}, - {"xD 512MiB 3,3V", 0xdc, 512, 512, 0x4000, XD_TYPEM}, - {"xD 1GiB 3,3V", 0xd3, 512, 1024, 0x4000, XD_TYPEM}, - {"xD 2GiB 3,3V", 0xd5, 512, 2048, 0x4000, XD_TYPEM}, + {"xD 256MiB 3,3V", 0x71, 512, 256, 0x4000, NAND_BROKEN_XD}, + {"xD 512MiB 3,3V", 0xdc, 512, 512, 0x4000, NAND_BROKEN_XD}, + {"xD 1GiB 3,3V", 0xd3, 512, 1024, 0x4000, NAND_BROKEN_XD}, + {"xD 2GiB 3,3V", 0xd5, 512, 2048, 0x4000, NAND_BROKEN_XD}, {NULL,} }; diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c index b3ce12ef359e..7153e0d27101 100644 --- a/drivers/mtd/onenand/onenand_base.c +++ b/drivers/mtd/onenand/onenand_base.c @@ -1201,7 +1201,8 @@ static int onenand_mlc_read_ops_nolock(struct mtd_info *mtd, loff_t from, if (mtd->ecc_stats.failed - stats.failed) return -EBADMSG; - return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0; + /* return max bitflips per ecc step; ONENANDs correct 1 bit only */ + return mtd->ecc_stats.corrected != stats.corrected ? 1 : 0; } /** @@ -1333,7 +1334,8 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from, if (mtd->ecc_stats.failed - stats.failed) return -EBADMSG; - return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0; + /* return max bitflips per ecc step; ONENANDs correct 1 bit only */ + return mtd->ecc_stats.corrected != stats.corrected ? 1 : 0; } /** diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c index 97f947b3d94a..2933d08b036e 100644 --- a/drivers/net/ethernet/freescale/fec_mpc52xx.c +++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c @@ -437,7 +437,7 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id) length = status & BCOM_FEC_RX_BD_LEN_MASK; skb_put(rskb, length - 4); /* length without CRC32 */ rskb->protocol = eth_type_trans(rskb, dev); - if (!skb_defer_rx_timestamp(skb)) + if (!skb_defer_rx_timestamp(rskb)) netif_rx(rskb); spin_lock(&priv->lock); diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 95731c841044..7483ca0a6282 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -4080,7 +4080,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, spin_lock_irqsave(&adapter->stats_lock, irq_flags); e1000_tbi_adjust_stats(hw, &adapter->stats, - length, skb->data); + length, mapped); spin_unlock_irqrestore(&adapter->stats_lock, irq_flags); length--; diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index bbf70ba367da..238ab2f8a5e7 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -165,14 +165,14 @@ #define I217_EEE_100_SUPPORTED (1 << 1) /* 100BaseTx EEE supported */ /* Intel Rapid Start Technology Support */ -#define I217_PROXY_CTRL PHY_REG(BM_WUC_PAGE, 70) +#define I217_PROXY_CTRL BM_PHY_REG(BM_WUC_PAGE, 70) #define I217_PROXY_CTRL_AUTO_DISABLE 0x0080 #define I217_SxCTRL PHY_REG(BM_PORT_CTRL_PAGE, 28) -#define I217_SxCTRL_MASK 0x1000 +#define I217_SxCTRL_ENABLE_LPI_RESET 0x1000 #define I217_CGFREG PHY_REG(772, 29) -#define I217_CGFREG_MASK 0x0002 +#define I217_CGFREG_ENABLE_MTA_RESET 0x0002 #define I217_MEMPWR PHY_REG(772, 26) -#define I217_MEMPWR_MASK 0x0010 +#define I217_MEMPWR_DISABLE_SMB_RELEASE 0x0010 /* Strapping Option Register - RO */ #define E1000_STRAP 0x0000C @@ -4089,12 +4089,12 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw) * power good. */ e1e_rphy_locked(hw, I217_SxCTRL, &phy_reg); - phy_reg |= I217_SxCTRL_MASK; + phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET; e1e_wphy_locked(hw, I217_SxCTRL, phy_reg); /* Disable the SMB release on LCD reset. */ e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg); - phy_reg &= ~I217_MEMPWR; + phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE; e1e_wphy_locked(hw, I217_MEMPWR, phy_reg); } @@ -4103,7 +4103,7 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw) * Support */ e1e_rphy_locked(hw, I217_CGFREG, &phy_reg); - phy_reg |= I217_CGFREG_MASK; + phy_reg |= I217_CGFREG_ENABLE_MTA_RESET; e1e_wphy_locked(hw, I217_CGFREG, phy_reg); release: @@ -4176,7 +4176,7 @@ void e1000_resume_workarounds_pchlan(struct e1000_hw *hw) ret_val = e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg); if (ret_val) goto release; - phy_reg |= I217_MEMPWR_MASK; + phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE; e1e_wphy_locked(hw, I217_MEMPWR, phy_reg); /* Disable Proxy */ @@ -4186,7 +4186,7 @@ void e1000_resume_workarounds_pchlan(struct e1000_hw *hw) ret_val = e1e_rphy_locked(hw, I217_CGFREG, &phy_reg); if (ret_val) goto release; - phy_reg &= ~I217_CGFREG_MASK; + phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET; e1e_wphy_locked(hw, I217_CGFREG, phy_reg); release: if (ret_val) diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 1bcead1fa2f6..842c8ce9494e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -617,7 +617,7 @@ static struct mlx4_cmd_info cmd_info[] = { .out_is_imm = false, .encode_slave_id = false, .verify = NULL, - .wrapper = NULL + .wrapper = mlx4_QUERY_FW_wrapper }, { .opcode = MLX4_CMD_QUERY_HCA, @@ -635,7 +635,7 @@ static struct mlx4_cmd_info cmd_info[] = { .out_is_imm = false, .encode_slave_id = false, .verify = NULL, - .wrapper = NULL + .wrapper = mlx4_QUERY_DEV_CAP_wrapper }, { .opcode = MLX4_CMD_QUERY_FUNC_CAP, diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c index 988b2424e1c6..69ba57270481 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c @@ -136,13 +136,12 @@ static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr, struct mlx4_en_dev *mdev = (struct mlx4_en_dev *) endev_ptr; struct mlx4_en_priv *priv; - if (!mdev->pndev[port]) - return; - - priv = netdev_priv(mdev->pndev[port]); switch (event) { case MLX4_DEV_EVENT_PORT_UP: case MLX4_DEV_EVENT_PORT_DOWN: + if (!mdev->pndev[port]) + return; + priv = netdev_priv(mdev->pndev[port]); /* To prevent races, we poll the link state in a separate task rather than changing it here */ priv->link_state = event; @@ -154,7 +153,10 @@ static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr, break; default: - mlx4_warn(mdev, "Unhandled event: %d\n", event); + if (port < 1 || port > dev->caps.num_ports || + !mdev->pndev[port]) + return; + mlx4_warn(mdev, "Unhandled event %d for port %d\n", event, port); } } diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 3b6f8efbf141..bce98d9c0039 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -426,7 +426,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) mlx4_dbg(dev, "FLR event for slave: %d\n", flr_slave); - if (flr_slave > dev->num_slaves) { + if (flr_slave >= dev->num_slaves) { mlx4_warn(dev, "Got FLR for unknown function: %d\n", flr_slave); diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 68f5cd6cb3c7..9c83bb8151ea 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -412,7 +412,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) outbox = mailbox->buf; err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP, - MLX4_CMD_TIME_CLASS_A, !mlx4_is_slave(dev)); + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (err) goto out; @@ -590,8 +590,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) for (i = 1; i <= dev_cap->num_ports; ++i) { err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT, - MLX4_CMD_TIME_CLASS_B, - !mlx4_is_slave(dev)); + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); if (err) goto out; @@ -669,6 +668,28 @@ out: return err; } +int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err = 0; + u8 field; + + err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); + if (err) + return err; + + /* For guests, report Blueflame disabled */ + MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_BF_OFFSET); + field &= 0x7f; + MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET); + + return 0; +} + int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, @@ -860,6 +881,9 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev) ((fw_ver & 0xffff0000ull) >> 16) | ((fw_ver & 0x0000ffffull) << 16); + if (mlx4_is_slave(dev)) + goto out; + MLX4_GET(lg, outbox, QUERY_FW_PPF_ID); dev->caps.function = lg; @@ -927,6 +951,27 @@ out: return err; } +int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + u8 *outbuf; + int err; + + outbuf = outbox->buf; + err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_FW, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); + if (err) + return err; + + /* for slaves, zero out everything except FW version */ + outbuf[0] = outbuf[1] = 0; + memset(&outbuf[8], 0, QUERY_FW_OUT_SIZE - 8); + return 0; +} + static void get_board_id(void *vsd, char *board_id) { int i; diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 2e024a68fa81..ee6f4fe00837 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -142,12 +142,6 @@ struct mlx4_port_config { struct pci_dev *pdev; }; -static inline int mlx4_master_get_num_eqs(struct mlx4_dev *dev) -{ - return dev->caps.reserved_eqs + - MLX4_MFUNC_EQ_NUM * (dev->num_slaves + 1); -} - int mlx4_check_port_params(struct mlx4_dev *dev, enum mlx4_port_type *port_type) { @@ -217,6 +211,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) } dev->caps.num_ports = dev_cap->num_ports; + dev->phys_caps.num_phys_eqs = MLX4_MAX_EQ_NUM; for (i = 1; i <= dev->caps.num_ports; ++i) { dev->caps.vl_cap[i] = dev_cap->max_vl[i]; dev->caps.ib_mtu_cap[i] = dev_cap->ib_mtu[i]; @@ -435,12 +430,17 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz; memset(&dev_cap, 0, sizeof(dev_cap)); + dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp; err = mlx4_dev_cap(dev, &dev_cap); if (err) { mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); return err; } + err = mlx4_QUERY_FW(dev); + if (err) + mlx4_err(dev, "QUERY_FW command failed: could not get FW version.\n"); + page_size = ~dev->caps.page_size_cap + 1; mlx4_warn(dev, "HCA minimum page size:%d\n", page_size); if (page_size > PAGE_SIZE) { @@ -485,15 +485,15 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) dev->caps.num_mgms = 0; dev->caps.num_amgms = 0; - for (i = 1; i <= dev->caps.num_ports; ++i) - dev->caps.port_mask[i] = dev->caps.port_type[i]; - if (dev->caps.num_ports > MLX4_MAX_PORTS) { mlx4_err(dev, "HCA has %d ports, but we only support %d, " "aborting.\n", dev->caps.num_ports, MLX4_MAX_PORTS); return -ENODEV; } + for (i = 1; i <= dev->caps.num_ports; ++i) + dev->caps.port_mask[i] = dev->caps.port_type[i]; + if (dev->caps.uar_page_size * (dev->caps.num_uars - dev->caps.reserved_uars) > pci_resource_len(dev->pdev, 2)) { @@ -504,18 +504,6 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) return -ENODEV; } -#if 0 - mlx4_warn(dev, "sqp_demux:%d\n", dev->caps.sqp_demux); - mlx4_warn(dev, "num_uars:%d reserved_uars:%d uar region:0x%x bar2:0x%llx\n", - dev->caps.num_uars, dev->caps.reserved_uars, - dev->caps.uar_page_size * dev->caps.num_uars, - pci_resource_len(dev->pdev, 2)); - mlx4_warn(dev, "num_eqs:%d reserved_eqs:%d\n", dev->caps.num_eqs, - dev->caps.reserved_eqs); - mlx4_warn(dev, "num_pds:%d reserved_pds:%d slave_pd_shift:%d pd_base:%d\n", - dev->caps.num_pds, dev->caps.reserved_pds, - dev->caps.slave_pd_shift, dev->caps.pd_base); -#endif return 0; } @@ -810,9 +798,8 @@ static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base, if (err) goto err_srq; - num_eqs = (mlx4_is_master(dev)) ? - roundup_pow_of_two(mlx4_master_get_num_eqs(dev)) : - dev->caps.num_eqs; + num_eqs = (mlx4_is_master(dev)) ? dev->phys_caps.num_phys_eqs : + dev->caps.num_eqs; err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table, cmpt_base + ((u64) (MLX4_CMPT_TYPE_EQ * @@ -874,9 +861,8 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, } - num_eqs = (mlx4_is_master(dev)) ? - roundup_pow_of_two(mlx4_master_get_num_eqs(dev)) : - dev->caps.num_eqs; + num_eqs = (mlx4_is_master(dev)) ? dev->phys_caps.num_phys_eqs : + dev->caps.num_eqs; err = mlx4_init_icm_table(dev, &priv->eq_table.table, init_hca->eqc_base, dev_cap->eqc_entry_sz, num_eqs, num_eqs, 0, 0); diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index 86b6e5a2fabf..e5d20220762c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -1039,6 +1039,11 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev); void mlx4_free_resource_tracker(struct mlx4_dev *dev, enum mlx4_res_tracker_free_type type); +int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, @@ -1054,6 +1059,11 @@ int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd); +int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, diff --git a/drivers/net/ethernet/mellanox/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c index 06e5adeb76f7..b83bc928d52a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/profile.c +++ b/drivers/net/ethernet/mellanox/mlx4/profile.c @@ -126,7 +126,9 @@ u64 mlx4_make_profile(struct mlx4_dev *dev, profile[MLX4_RES_AUXC].num = request->num_qp; profile[MLX4_RES_SRQ].num = request->num_srq; profile[MLX4_RES_CQ].num = request->num_cq; - profile[MLX4_RES_EQ].num = min_t(unsigned, dev_cap->max_eqs, MAX_MSIX); + profile[MLX4_RES_EQ].num = mlx4_is_mfunc(dev) ? + dev->phys_caps.num_phys_eqs : + min_t(unsigned, dev_cap->max_eqs, MAX_MSIX); profile[MLX4_RES_DMPT].num = request->num_mpt; profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS; profile[MLX4_RES_MTT].num = request->num_mtt * (1 << log_mtts_per_seg); @@ -215,9 +217,10 @@ u64 mlx4_make_profile(struct mlx4_dev *dev, init_hca->log_num_cqs = profile[i].log_num; break; case MLX4_RES_EQ: - dev->caps.num_eqs = profile[i].num; + dev->caps.num_eqs = roundup_pow_of_two(min_t(unsigned, dev_cap->max_eqs, + MAX_MSIX)); init_hca->eqc_base = profile[i].start; - init_hca->log_num_eqs = profile[i].log_num; + init_hca->log_num_eqs = ilog2(dev->caps.num_eqs); break; case MLX4_RES_DMPT: dev->caps.num_mpts = profile[i].num; diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index 5eef290997f9..995d0cfc4c06 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c @@ -979,6 +979,17 @@ static void cp_init_hw (struct cp_private *cp) cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0))); cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4))); + cpw32_f(HiTxRingAddr, 0); + cpw32_f(HiTxRingAddr + 4, 0); + + ring_dma = cp->ring_dma; + cpw32_f(RxRingAddr, ring_dma & 0xffffffff); + cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16); + + ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE; + cpw32_f(TxRingAddr, ring_dma & 0xffffffff); + cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16); + cp_start_hw(cp); cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */ @@ -992,17 +1003,6 @@ static void cp_init_hw (struct cp_private *cp) cpw8(Config5, cpr8(Config5) & PMEStatus); - cpw32_f(HiTxRingAddr, 0); - cpw32_f(HiTxRingAddr + 4, 0); - - ring_dma = cp->ring_dma; - cpw32_f(RxRingAddr, ring_dma & 0xffffffff); - cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16); - - ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE; - cpw32_f(TxRingAddr, ring_dma & 0xffffffff); - cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16); - cpw16(MultiIntr, 0); cpw8_f(Cfg9346, Cfg9346_Lock); @@ -1636,7 +1636,7 @@ static void eeprom_cmd(void __iomem *ee_addr, int cmd, int cmd_len) static void eeprom_cmd_end(void __iomem *ee_addr) { - writeb (~EE_CS, ee_addr); + writeb(0, ee_addr); eeprom_delay (); } diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c index 03df076ed596..1d83565cc6af 100644 --- a/drivers/net/ethernet/realtek/8139too.c +++ b/drivers/net/ethernet/realtek/8139too.c @@ -1173,7 +1173,7 @@ static int __devinit read_eeprom (void __iomem *ioaddr, int location, int addr_l } /* Terminate the EEPROM access. */ - RTL_W8 (Cfg9346, ~EE_CS); + RTL_W8(Cfg9346, 0); eeprom_delay (); return retval; diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 00b4f56a671c..9757ce3543a0 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -6345,6 +6345,8 @@ static void __devexit rtl_remove_one(struct pci_dev *pdev) cancel_work_sync(&tp->wk.work); + netif_napi_del(&tp->napi); + unregister_netdev(dev); rtl_release_firmware(tp); @@ -6668,6 +6670,7 @@ out: return rc; err_out_msi_4: + netif_napi_del(&tp->napi); rtl_disable_msi(pdev, tp); iounmap(ioaddr); err_out_free_res_3: diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c index add1064f755d..03c2d8d653df 100644 --- a/drivers/net/usb/mcs7830.c +++ b/drivers/net/usb/mcs7830.c @@ -629,11 +629,31 @@ static int mcs7830_rx_fixup(struct usbnet *dev, struct sk_buff *skb) return skb->len > 0; } +static void mcs7830_status(struct usbnet *dev, struct urb *urb) +{ + u8 *buf = urb->transfer_buffer; + bool link; + + if (urb->actual_length < 16) + return; + + link = !(buf[1] & 0x20); + if (netif_carrier_ok(dev->net) != link) { + if (link) { + netif_carrier_on(dev->net); + usbnet_defer_kevent(dev, EVENT_LINK_RESET); + } else + netif_carrier_off(dev->net); + netdev_dbg(dev->net, "Link Status is: %d\n", link); + } +} + static const struct driver_info moschip_info = { .description = "MOSCHIP 7830/7832/7730 usb-NET adapter", .bind = mcs7830_bind, .rx_fixup = mcs7830_rx_fixup, - .flags = FLAG_ETHER, + .flags = FLAG_ETHER | FLAG_LINK_INTR, + .status = mcs7830_status, .in = 1, .out = 2, }; @@ -642,7 +662,8 @@ static const struct driver_info sitecom_info = { .description = "Sitecom LN-30 usb-NET adapter", .bind = mcs7830_bind, .rx_fixup = mcs7830_rx_fixup, - .flags = FLAG_ETHER, + .flags = FLAG_ETHER | FLAG_LINK_INTR, + .status = mcs7830_status, .in = 1, .out = 2, }; diff --git a/drivers/pinctrl/pinctrl-nomadik.c b/drivers/pinctrl/pinctrl-nomadik.c index b8e01c3eaa95..b26395d16347 100644 --- a/drivers/pinctrl/pinctrl-nomadik.c +++ b/drivers/pinctrl/pinctrl-nomadik.c @@ -24,6 +24,7 @@ #include <linux/irq.h> #include <linux/irqdomain.h> #include <linux/slab.h> +#include <linux/of_device.h> #include <linux/pinctrl/pinctrl.h> #include <linux/pinctrl/pinmux.h> #include <linux/pinctrl/pinconf.h> @@ -1688,18 +1689,34 @@ static struct pinctrl_desc nmk_pinctrl_desc = { .owner = THIS_MODULE, }; +static const struct of_device_id nmk_pinctrl_match[] = { + { + .compatible = "stericsson,nmk_pinctrl", + .data = (void *)PINCTRL_NMK_DB8500, + }, + {}, +}; + static int __devinit nmk_pinctrl_probe(struct platform_device *pdev) { const struct platform_device_id *platid = platform_get_device_id(pdev); + struct device_node *np = pdev->dev.of_node; struct nmk_pinctrl *npct; + unsigned int version = 0; int i; npct = devm_kzalloc(&pdev->dev, sizeof(*npct), GFP_KERNEL); if (!npct) return -ENOMEM; + if (platid) + version = platid->driver_data; + else if (np) + version = (unsigned int) + of_match_device(nmk_pinctrl_match, &pdev->dev)->data; + /* Poke in other ASIC variants here */ - if (platid->driver_data == PINCTRL_NMK_DB8500) + if (version == PINCTRL_NMK_DB8500) nmk_pinctrl_db8500_init(&npct->soc); /* @@ -1758,6 +1775,7 @@ static struct platform_driver nmk_pinctrl_driver = { .driver = { .owner = THIS_MODULE, .name = "pinctrl-nomadik", + .of_match_table = nmk_pinctrl_match, }, .probe = nmk_pinctrl_probe, .id_table = nmk_pinctrl_id, diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c index c1a3fd8e1243..ce875dc365e5 100644 --- a/drivers/platform/x86/acer-wmi.c +++ b/drivers/platform/x86/acer-wmi.c @@ -523,6 +523,30 @@ static const struct dmi_system_id video_vendor_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 4750"), }, }, + { + .callback = video_set_backlight_video_vendor, + .ident = "Acer Extensa 5235", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Extensa 5235"), + }, + }, + { + .callback = video_set_backlight_video_vendor, + .ident = "Acer TravelMate 5760", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 5760"), + }, + }, + { + .callback = video_set_backlight_video_vendor, + .ident = "Acer Aspire 5750", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5750"), + }, + }, {} }; diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c index 8a582bdfdc76..694a15a56230 100644 --- a/drivers/platform/x86/apple-gmux.c +++ b/drivers/platform/x86/apple-gmux.c @@ -87,6 +87,9 @@ static int gmux_update_status(struct backlight_device *bd) struct apple_gmux_data *gmux_data = bl_get_data(bd); u32 brightness = bd->props.brightness; + if (bd->props.state & BL_CORE_SUSPENDED) + return 0; + /* * Older gmux versions require writing out lower bytes first then * setting the upper byte to 0 to flush the values. Newer versions @@ -102,6 +105,7 @@ static int gmux_update_status(struct backlight_device *bd) } static const struct backlight_ops gmux_bl_ops = { + .options = BL_CORE_SUSPENDRESUME, .get_brightness = gmux_get_brightness, .update_status = gmux_update_status, }; diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c index e6c08ee8d46c..5f78aac9b163 100644 --- a/drivers/platform/x86/dell-laptop.c +++ b/drivers/platform/x86/dell-laptop.c @@ -21,7 +21,6 @@ #include <linux/err.h> #include <linux/dmi.h> #include <linux/io.h> -#include <linux/rfkill.h> #include <linux/power_supply.h> #include <linux/acpi.h> #include <linux/mm.h> @@ -90,11 +89,8 @@ static struct platform_driver platform_driver = { static struct platform_device *platform_device; static struct backlight_device *dell_backlight_device; -static struct rfkill *wifi_rfkill; -static struct rfkill *bluetooth_rfkill; -static struct rfkill *wwan_rfkill; -static const struct dmi_system_id __initdata dell_device_table[] = { +static const struct dmi_system_id dell_device_table[] __initconst = { { .ident = "Dell laptop", .matches = { @@ -119,96 +115,94 @@ static const struct dmi_system_id __initdata dell_device_table[] = { }; MODULE_DEVICE_TABLE(dmi, dell_device_table); -static struct dmi_system_id __devinitdata dell_blacklist[] = { - /* Supported by compal-laptop */ - { - .ident = "Dell Mini 9", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 910"), - }, - }, +static struct dmi_system_id __devinitdata dell_quirks[] = { { - .ident = "Dell Mini 10", + .callback = dmi_matched, + .ident = "Dell Vostro V130", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1010"), + DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V130"), }, + .driver_data = &quirk_dell_vostro_v130, }, { - .ident = "Dell Mini 10v", + .callback = dmi_matched, + .ident = "Dell Vostro V131", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1011"), + DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V131"), }, + .driver_data = &quirk_dell_vostro_v130, }, { - .ident = "Dell Mini 1012", + .callback = dmi_matched, + .ident = "Dell Vostro 3350", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1012"), + DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3350"), }, + .driver_data = &quirk_dell_vostro_v130, }, { - .ident = "Dell Inspiron 11z", + .callback = dmi_matched, + .ident = "Dell Vostro 3555", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1110"), + DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3555"), }, + .driver_data = &quirk_dell_vostro_v130, }, { - .ident = "Dell Mini 12", + .callback = dmi_matched, + .ident = "Dell Inspiron N311z", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1210"), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron N311z"), }, + .driver_data = &quirk_dell_vostro_v130, }, - {} -}; - -static struct dmi_system_id __devinitdata dell_quirks[] = { { .callback = dmi_matched, - .ident = "Dell Vostro V130", + .ident = "Dell Inspiron M5110", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V130"), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron M5110"), }, .driver_data = &quirk_dell_vostro_v130, }, { .callback = dmi_matched, - .ident = "Dell Vostro V131", + .ident = "Dell Vostro 3360", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V131"), + DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3360"), }, .driver_data = &quirk_dell_vostro_v130, }, { .callback = dmi_matched, - .ident = "Dell Vostro 3555", + .ident = "Dell Vostro 3460", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3555"), + DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3460"), }, .driver_data = &quirk_dell_vostro_v130, }, { .callback = dmi_matched, - .ident = "Dell Inspiron N311z", + .ident = "Dell Vostro 3560", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron N311z"), + DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3560"), }, .driver_data = &quirk_dell_vostro_v130, }, { .callback = dmi_matched, - .ident = "Dell Inspiron M5110", + .ident = "Dell Vostro 3450", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron M5110"), + DMI_MATCH(DMI_PRODUCT_NAME, "Dell System Vostro 3450"), }, .driver_data = &quirk_dell_vostro_v130, }, @@ -305,94 +299,6 @@ dell_send_request(struct calling_interface_buffer *buffer, int class, return buffer; } -/* Derived from information in DellWirelessCtl.cpp: - Class 17, select 11 is radio control. It returns an array of 32-bit values. - - Input byte 0 = 0: Wireless information - - result[0]: return code - result[1]: - Bit 0: Hardware switch supported - Bit 1: Wifi locator supported - Bit 2: Wifi is supported - Bit 3: Bluetooth is supported - Bit 4: WWAN is supported - Bit 5: Wireless keyboard supported - Bits 6-7: Reserved - Bit 8: Wifi is installed - Bit 9: Bluetooth is installed - Bit 10: WWAN is installed - Bits 11-15: Reserved - Bit 16: Hardware switch is on - Bit 17: Wifi is blocked - Bit 18: Bluetooth is blocked - Bit 19: WWAN is blocked - Bits 20-31: Reserved - result[2]: NVRAM size in bytes - result[3]: NVRAM format version number - - Input byte 0 = 2: Wireless switch configuration - result[0]: return code - result[1]: - Bit 0: Wifi controlled by switch - Bit 1: Bluetooth controlled by switch - Bit 2: WWAN controlled by switch - Bits 3-6: Reserved - Bit 7: Wireless switch config locked - Bit 8: Wifi locator enabled - Bits 9-14: Reserved - Bit 15: Wifi locator setting locked - Bits 16-31: Reserved -*/ - -static int dell_rfkill_set(void *data, bool blocked) -{ - int disable = blocked ? 1 : 0; - unsigned long radio = (unsigned long)data; - int hwswitch_bit = (unsigned long)data - 1; - int ret = 0; - - get_buffer(); - dell_send_request(buffer, 17, 11); - - /* If the hardware switch controls this radio, and the hardware - switch is disabled, don't allow changing the software state */ - if ((hwswitch_state & BIT(hwswitch_bit)) && - !(buffer->output[1] & BIT(16))) { - ret = -EINVAL; - goto out; - } - - buffer->input[0] = (1 | (radio<<8) | (disable << 16)); - dell_send_request(buffer, 17, 11); - -out: - release_buffer(); - return ret; -} - -static void dell_rfkill_query(struct rfkill *rfkill, void *data) -{ - int status; - int bit = (unsigned long)data + 16; - int hwswitch_bit = (unsigned long)data - 1; - - get_buffer(); - dell_send_request(buffer, 17, 11); - status = buffer->output[1]; - release_buffer(); - - rfkill_set_sw_state(rfkill, !!(status & BIT(bit))); - - if (hwswitch_state & (BIT(hwswitch_bit))) - rfkill_set_hw_state(rfkill, !(status & BIT(16))); -} - -static const struct rfkill_ops dell_rfkill_ops = { - .set_block = dell_rfkill_set, - .query = dell_rfkill_query, -}; - static struct dentry *dell_laptop_dir; static int dell_debugfs_show(struct seq_file *s, void *data) @@ -462,108 +368,6 @@ static const struct file_operations dell_debugfs_fops = { .release = single_release, }; -static void dell_update_rfkill(struct work_struct *ignored) -{ - if (wifi_rfkill) - dell_rfkill_query(wifi_rfkill, (void *)1); - if (bluetooth_rfkill) - dell_rfkill_query(bluetooth_rfkill, (void *)2); - if (wwan_rfkill) - dell_rfkill_query(wwan_rfkill, (void *)3); -} -static DECLARE_DELAYED_WORK(dell_rfkill_work, dell_update_rfkill); - - -static int __init dell_setup_rfkill(void) -{ - int status; - int ret; - - if (dmi_check_system(dell_blacklist)) { - pr_info("Blacklisted hardware detected - not enabling rfkill\n"); - return 0; - } - - get_buffer(); - dell_send_request(buffer, 17, 11); - status = buffer->output[1]; - buffer->input[0] = 0x2; - dell_send_request(buffer, 17, 11); - hwswitch_state = buffer->output[1]; - release_buffer(); - - if ((status & (1<<2|1<<8)) == (1<<2|1<<8)) { - wifi_rfkill = rfkill_alloc("dell-wifi", &platform_device->dev, - RFKILL_TYPE_WLAN, - &dell_rfkill_ops, (void *) 1); - if (!wifi_rfkill) { - ret = -ENOMEM; - goto err_wifi; - } - ret = rfkill_register(wifi_rfkill); - if (ret) - goto err_wifi; - } - - if ((status & (1<<3|1<<9)) == (1<<3|1<<9)) { - bluetooth_rfkill = rfkill_alloc("dell-bluetooth", - &platform_device->dev, - RFKILL_TYPE_BLUETOOTH, - &dell_rfkill_ops, (void *) 2); - if (!bluetooth_rfkill) { - ret = -ENOMEM; - goto err_bluetooth; - } - ret = rfkill_register(bluetooth_rfkill); - if (ret) - goto err_bluetooth; - } - - if ((status & (1<<4|1<<10)) == (1<<4|1<<10)) { - wwan_rfkill = rfkill_alloc("dell-wwan", - &platform_device->dev, - RFKILL_TYPE_WWAN, - &dell_rfkill_ops, (void *) 3); - if (!wwan_rfkill) { - ret = -ENOMEM; - goto err_wwan; - } - ret = rfkill_register(wwan_rfkill); - if (ret) - goto err_wwan; - } - - return 0; -err_wwan: - rfkill_destroy(wwan_rfkill); - if (bluetooth_rfkill) - rfkill_unregister(bluetooth_rfkill); -err_bluetooth: - rfkill_destroy(bluetooth_rfkill); - if (wifi_rfkill) - rfkill_unregister(wifi_rfkill); -err_wifi: - rfkill_destroy(wifi_rfkill); - - return ret; -} - -static void dell_cleanup_rfkill(void) -{ - if (wifi_rfkill) { - rfkill_unregister(wifi_rfkill); - rfkill_destroy(wifi_rfkill); - } - if (bluetooth_rfkill) { - rfkill_unregister(bluetooth_rfkill); - rfkill_destroy(bluetooth_rfkill); - } - if (wwan_rfkill) { - rfkill_unregister(wwan_rfkill); - rfkill_destroy(wwan_rfkill); - } -} - static int dell_send_intensity(struct backlight_device *bd) { int ret = 0; @@ -655,30 +459,6 @@ static void touchpad_led_exit(void) led_classdev_unregister(&touchpad_led); } -static bool dell_laptop_i8042_filter(unsigned char data, unsigned char str, - struct serio *port) -{ - static bool extended; - - if (str & 0x20) - return false; - - if (unlikely(data == 0xe0)) { - extended = true; - return false; - } else if (unlikely(extended)) { - switch (data) { - case 0x8: - schedule_delayed_work(&dell_rfkill_work, - round_jiffies_relative(HZ)); - break; - } - extended = false; - } - - return false; -} - static int __init dell_init(void) { int max_intensity = 0; @@ -720,26 +500,10 @@ static int __init dell_init(void) goto fail_buffer; buffer = page_address(bufferpage); - ret = dell_setup_rfkill(); - - if (ret) { - pr_warn("Unable to setup rfkill\n"); - goto fail_rfkill; - } - - ret = i8042_install_filter(dell_laptop_i8042_filter); - if (ret) { - pr_warn("Unable to install key filter\n"); - goto fail_filter; - } - if (quirks && quirks->touchpad_led) touchpad_led_init(&platform_device->dev); dell_laptop_dir = debugfs_create_dir("dell_laptop", NULL); - if (dell_laptop_dir != NULL) - debugfs_create_file("rfkill", 0444, dell_laptop_dir, NULL, - &dell_debugfs_fops); #ifdef CONFIG_ACPI /* In the event of an ACPI backlight being available, don't @@ -782,11 +546,6 @@ static int __init dell_init(void) return 0; fail_backlight: - i8042_remove_filter(dell_laptop_i8042_filter); - cancel_delayed_work_sync(&dell_rfkill_work); -fail_filter: - dell_cleanup_rfkill(); -fail_rfkill: free_page((unsigned long)bufferpage); fail_buffer: platform_device_del(platform_device); @@ -804,10 +563,7 @@ static void __exit dell_exit(void) debugfs_remove_recursive(dell_laptop_dir); if (quirks && quirks->touchpad_led) touchpad_led_exit(); - i8042_remove_filter(dell_laptop_i8042_filter); - cancel_delayed_work_sync(&dell_rfkill_work); backlight_device_unregister(dell_backlight_device); - dell_cleanup_rfkill(); if (platform_device) { platform_device_unregister(platform_device); platform_driver_unregister(&platform_driver); diff --git a/drivers/platform/x86/fujitsu-tablet.c b/drivers/platform/x86/fujitsu-tablet.c index 580d80a73c3a..da267eae8ba8 100644 --- a/drivers/platform/x86/fujitsu-tablet.c +++ b/drivers/platform/x86/fujitsu-tablet.c @@ -16,6 +16,8 @@ * 59 Temple Place Suite 330, Boston, MA 02111-1307, USA. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> @@ -34,7 +36,8 @@ #define ACPI_FUJITSU_CLASS "fujitsu" #define INVERT_TABLET_MODE_BIT 0x01 -#define FORCE_TABLET_MODE_IF_UNDOCK 0x02 +#define INVERT_DOCK_STATE_BIT 0x02 +#define FORCE_TABLET_MODE_IF_UNDOCK 0x04 #define KEYMAP_LEN 16 @@ -161,6 +164,8 @@ static void fujitsu_send_state(void) state = fujitsu_read_register(0xdd); dock = state & 0x02; + if (fujitsu.config.quirks & INVERT_DOCK_STATE_BIT) + dock = !dock; if ((fujitsu.config.quirks & FORCE_TABLET_MODE_IF_UNDOCK) && (!dock)) { tablet_mode = 1; @@ -221,9 +226,6 @@ static int __devinit input_fujitsu_setup(struct device *parent, input_set_capability(idev, EV_SW, SW_DOCK); input_set_capability(idev, EV_SW, SW_TABLET_MODE); - input_set_capability(idev, EV_SW, SW_DOCK); - input_set_capability(idev, EV_SW, SW_TABLET_MODE); - error = input_register_device(idev); if (error) { input_free_device(idev); @@ -275,25 +277,31 @@ static irqreturn_t fujitsu_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -static int __devinit fujitsu_dmi_default(const struct dmi_system_id *dmi) +static void __devinit fujitsu_dmi_common(const struct dmi_system_id *dmi) { - printk(KERN_INFO MODULENAME ": %s\n", dmi->ident); + pr_info("%s\n", dmi->ident); memcpy(fujitsu.config.keymap, dmi->driver_data, sizeof(fujitsu.config.keymap)); +} + +static int __devinit fujitsu_dmi_lifebook(const struct dmi_system_id *dmi) +{ + fujitsu_dmi_common(dmi); + fujitsu.config.quirks |= INVERT_TABLET_MODE_BIT; return 1; } static int __devinit fujitsu_dmi_stylistic(const struct dmi_system_id *dmi) { - fujitsu_dmi_default(dmi); + fujitsu_dmi_common(dmi); fujitsu.config.quirks |= FORCE_TABLET_MODE_IF_UNDOCK; - fujitsu.config.quirks |= INVERT_TABLET_MODE_BIT; + fujitsu.config.quirks |= INVERT_DOCK_STATE_BIT; return 1; } static struct dmi_system_id dmi_ids[] __initconst = { { - .callback = fujitsu_dmi_default, + .callback = fujitsu_dmi_lifebook, .ident = "Fujitsu Siemens P/T Series", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), @@ -302,7 +310,7 @@ static struct dmi_system_id dmi_ids[] __initconst = { .driver_data = keymap_Lifebook_Tseries }, { - .callback = fujitsu_dmi_default, + .callback = fujitsu_dmi_lifebook, .ident = "Fujitsu Lifebook T Series", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), @@ -320,7 +328,7 @@ static struct dmi_system_id dmi_ids[] __initconst = { .driver_data = keymap_Stylistic_Tseries }, { - .callback = fujitsu_dmi_default, + .callback = fujitsu_dmi_lifebook, .ident = "Fujitsu LifeBook U810", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), @@ -347,7 +355,7 @@ static struct dmi_system_id dmi_ids[] __initconst = { .driver_data = keymap_Stylistic_ST5xxx }, { - .callback = fujitsu_dmi_default, + .callback = fujitsu_dmi_lifebook, .ident = "Unknown (using defaults)", .matches = { DMI_MATCH(DMI_SYS_VENDOR, ""), @@ -473,6 +481,6 @@ module_exit(fujitsu_module_exit); MODULE_AUTHOR("Robert Gerlach <khnz@gmx.de>"); MODULE_DESCRIPTION("Fujitsu tablet pc extras driver"); MODULE_LICENSE("GPL"); -MODULE_VERSION("2.4"); +MODULE_VERSION("2.5"); MODULE_DEVICE_TABLE(acpi, fujitsu_ids); diff --git a/drivers/platform/x86/hdaps.c b/drivers/platform/x86/hdaps.c index 7387f97a2941..24a3ae065f1b 100644 --- a/drivers/platform/x86/hdaps.c +++ b/drivers/platform/x86/hdaps.c @@ -2,7 +2,7 @@ * hdaps.c - driver for IBM's Hard Drive Active Protection System * * Copyright (C) 2005 Robert Love <rml@novell.com> - * Copyright (C) 2005 Jesper Juhl <jesper.juhl@gmail.com> + * Copyright (C) 2005 Jesper Juhl <jj@chaosbits.net> * * The HardDisk Active Protection System (hdaps) is present in IBM ThinkPads * starting with the R40, T41, and X40. It provides a basic two-axis diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c index e2faa3cbb792..387183a2d6dd 100644 --- a/drivers/platform/x86/hp-wmi.c +++ b/drivers/platform/x86/hp-wmi.c @@ -634,6 +634,8 @@ static int __devinit hp_wmi_rfkill_setup(struct platform_device *device) RFKILL_TYPE_WLAN, &hp_wmi_rfkill_ops, (void *) HPWMI_WIFI); + if (!wifi_rfkill) + return -ENOMEM; rfkill_init_sw_state(wifi_rfkill, hp_wmi_get_sw_state(HPWMI_WIFI)); rfkill_set_hw_state(wifi_rfkill, @@ -648,6 +650,10 @@ static int __devinit hp_wmi_rfkill_setup(struct platform_device *device) RFKILL_TYPE_BLUETOOTH, &hp_wmi_rfkill_ops, (void *) HPWMI_BLUETOOTH); + if (!bluetooth_rfkill) { + err = -ENOMEM; + goto register_wifi_error; + } rfkill_init_sw_state(bluetooth_rfkill, hp_wmi_get_sw_state(HPWMI_BLUETOOTH)); rfkill_set_hw_state(bluetooth_rfkill, @@ -662,6 +668,10 @@ static int __devinit hp_wmi_rfkill_setup(struct platform_device *device) RFKILL_TYPE_WWAN, &hp_wmi_rfkill_ops, (void *) HPWMI_WWAN); + if (!wwan_rfkill) { + err = -ENOMEM; + goto register_bluetooth_error; + } rfkill_init_sw_state(wwan_rfkill, hp_wmi_get_sw_state(HPWMI_WWAN)); rfkill_set_hw_state(wwan_rfkill, diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index ac902f7a9baa..4f20f8dd3d7c 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c @@ -194,7 +194,6 @@ static int write_ec_cmd(acpi_handle handle, int cmd, unsigned long data) /* * debugfs */ -#define DEBUGFS_EVENT_LEN (4096) static int debugfs_status_show(struct seq_file *s, void *data) { unsigned long value; @@ -315,7 +314,7 @@ static int __devinit ideapad_debugfs_init(struct ideapad_private *priv) node = debugfs_create_file("status", S_IRUGO, priv->debug, NULL, &debugfs_status_fops); if (!node) { - pr_err("failed to create event in debugfs"); + pr_err("failed to create status in debugfs"); goto errout; } @@ -785,6 +784,10 @@ static void ideapad_acpi_notify(struct acpi_device *adevice, u32 event) case 9: ideapad_sync_rfk_state(priv); break; + case 13: + case 6: + ideapad_input_report(priv, vpc_bit); + break; case 4: ideapad_backlight_notify_brightness(priv); break; @@ -795,7 +798,7 @@ static void ideapad_acpi_notify(struct acpi_device *adevice, u32 event) ideapad_backlight_notify_power(priv); break; default: - ideapad_input_report(priv, vpc_bit); + pr_info("Unknown event: %lu\n", vpc_bit); } } } diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c index 8a51795aa02a..210d4ae547c2 100644 --- a/drivers/platform/x86/sony-laptop.c +++ b/drivers/platform/x86/sony-laptop.c @@ -141,6 +141,27 @@ MODULE_PARM_DESC(kbd_backlight_timeout, "(default: 0)"); static void sony_nc_kbd_backlight_resume(void); +static int sony_nc_kbd_backlight_setup(struct platform_device *pd, + unsigned int handle); +static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd); + +static int sony_nc_battery_care_setup(struct platform_device *pd, + unsigned int handle); +static void sony_nc_battery_care_cleanup(struct platform_device *pd); + +static int sony_nc_thermal_setup(struct platform_device *pd); +static void sony_nc_thermal_cleanup(struct platform_device *pd); +static void sony_nc_thermal_resume(void); + +static int sony_nc_lid_resume_setup(struct platform_device *pd); +static void sony_nc_lid_resume_cleanup(struct platform_device *pd); + +static int sony_nc_highspeed_charging_setup(struct platform_device *pd); +static void sony_nc_highspeed_charging_cleanup(struct platform_device *pd); + +static int sony_nc_touchpad_setup(struct platform_device *pd, + unsigned int handle); +static void sony_nc_touchpad_cleanup(struct platform_device *pd); enum sony_nc_rfkill { SONY_WIFI, @@ -153,6 +174,9 @@ enum sony_nc_rfkill { static int sony_rfkill_handle; static struct rfkill *sony_rfkill_devices[N_SONY_RFKILL]; static int sony_rfkill_address[N_SONY_RFKILL] = {0x300, 0x500, 0x700, 0x900}; +static int sony_nc_rfkill_setup(struct acpi_device *device, + unsigned int handle); +static void sony_nc_rfkill_cleanup(void); static void sony_nc_rfkill_update(void); /*********** Input Devices ***********/ @@ -691,59 +715,97 @@ static struct acpi_device *sony_nc_acpi_device = NULL; /* * acpi_evaluate_object wrappers + * all useful calls into SNC methods take one or zero parameters and return + * integers or arrays. */ -static int acpi_callgetfunc(acpi_handle handle, char *name, int *result) +static union acpi_object *__call_snc_method(acpi_handle handle, char *method, + u64 *value) { - struct acpi_buffer output; - union acpi_object out_obj; + union acpi_object *result = NULL; + struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; acpi_status status; - output.length = sizeof(out_obj); - output.pointer = &out_obj; + if (value) { + struct acpi_object_list params; + union acpi_object in; + in.type = ACPI_TYPE_INTEGER; + in.integer.value = *value; + params.count = 1; + params.pointer = ∈ + status = acpi_evaluate_object(handle, method, ¶ms, &output); + dprintk("__call_snc_method: [%s:0x%.8x%.8x]\n", method, + (unsigned int)(*value >> 32), + (unsigned int)*value & 0xffffffff); + } else { + status = acpi_evaluate_object(handle, method, NULL, &output); + dprintk("__call_snc_method: [%s]\n", method); + } - status = acpi_evaluate_object(handle, name, NULL, &output); - if ((status == AE_OK) && (out_obj.type == ACPI_TYPE_INTEGER)) { - *result = out_obj.integer.value; - return 0; + if (ACPI_FAILURE(status)) { + pr_err("Failed to evaluate [%s]\n", method); + return NULL; } - pr_warn("acpi_callreadfunc failed\n"); + result = (union acpi_object *) output.pointer; + if (!result) + dprintk("No return object [%s]\n", method); - return -1; + return result; } -static int acpi_callsetfunc(acpi_handle handle, char *name, int value, - int *result) +static int sony_nc_int_call(acpi_handle handle, char *name, int *value, + int *result) { - struct acpi_object_list params; - union acpi_object in_obj; - struct acpi_buffer output; - union acpi_object out_obj; - acpi_status status; - - params.count = 1; - params.pointer = &in_obj; - in_obj.type = ACPI_TYPE_INTEGER; - in_obj.integer.value = value; + union acpi_object *object = NULL; + if (value) { + u64 v = *value; + object = __call_snc_method(handle, name, &v); + } else + object = __call_snc_method(handle, name, NULL); - output.length = sizeof(out_obj); - output.pointer = &out_obj; + if (!object) + return -EINVAL; - status = acpi_evaluate_object(handle, name, ¶ms, &output); - if (status == AE_OK) { - if (result != NULL) { - if (out_obj.type != ACPI_TYPE_INTEGER) { - pr_warn("acpi_evaluate_object bad return type\n"); - return -1; - } - *result = out_obj.integer.value; - } - return 0; + if (object->type != ACPI_TYPE_INTEGER) { + pr_warn("Invalid acpi_object: expected 0x%x got 0x%x\n", + ACPI_TYPE_INTEGER, object->type); + kfree(object); + return -EINVAL; } - pr_warn("acpi_evaluate_object failed\n"); + if (result) + *result = object->integer.value; + + kfree(object); + return 0; +} + +#define MIN(a, b) (a > b ? b : a) +static int sony_nc_buffer_call(acpi_handle handle, char *name, u64 *value, + void *buffer, size_t buflen) +{ + size_t len = len; + union acpi_object *object = __call_snc_method(handle, name, value); + + if (!object) + return -EINVAL; + + if (object->type == ACPI_TYPE_BUFFER) + len = MIN(buflen, object->buffer.length); + + else if (object->type == ACPI_TYPE_INTEGER) + len = MIN(buflen, sizeof(object->integer.value)); + + else { + pr_warn("Invalid acpi_object: expected 0x%x got 0x%x\n", + ACPI_TYPE_BUFFER, object->type); + kfree(object); + return -EINVAL; + } - return -1; + memcpy(buffer, object->buffer.pointer, len); + kfree(object); + return 0; } struct sony_nc_handles { @@ -770,16 +832,17 @@ static ssize_t sony_nc_handles_show(struct device *dev, static int sony_nc_handles_setup(struct platform_device *pd) { - int i; - int result; + int i, r, result, arg; handles = kzalloc(sizeof(*handles), GFP_KERNEL); if (!handles) return -ENOMEM; for (i = 0; i < ARRAY_SIZE(handles->cap); i++) { - if (!acpi_callsetfunc(sony_nc_acpi_handle, - "SN00", i + 0x20, &result)) { + arg = i + 0x20; + r = sony_nc_int_call(sony_nc_acpi_handle, "SN00", &arg, + &result); + if (!r) { dprintk("caching handle 0x%.4x (offset: 0x%.2x)\n", result, i); handles->cap[i] = result; @@ -819,8 +882,8 @@ static int sony_find_snc_handle(int handle) int i; /* not initialized yet, return early */ - if (!handles) - return -1; + if (!handles || !handle) + return -EINVAL; for (i = 0; i < 0x10; i++) { if (handles->cap[i] == handle) { @@ -830,21 +893,20 @@ static int sony_find_snc_handle(int handle) } } dprintk("handle 0x%.4x not found\n", handle); - return -1; + return -EINVAL; } static int sony_call_snc_handle(int handle, int argument, int *result) { - int ret = 0; + int arg, ret = 0; int offset = sony_find_snc_handle(handle); if (offset < 0) - return -1; + return offset; - ret = acpi_callsetfunc(sony_nc_acpi_handle, "SN07", offset | argument, - result); - dprintk("called SN07 with 0x%.4x (result: 0x%.4x)\n", offset | argument, - *result); + arg = offset | argument; + ret = sony_nc_int_call(sony_nc_acpi_handle, "SN07", &arg, result); + dprintk("called SN07 with 0x%.4x (result: 0x%.4x)\n", arg, *result); return ret; } @@ -889,14 +951,16 @@ static int boolean_validate(const int direction, const int value) static ssize_t sony_nc_sysfs_show(struct device *dev, struct device_attribute *attr, char *buffer) { - int value; + int value, ret = 0; struct sony_nc_value *item = container_of(attr, struct sony_nc_value, devattr); if (!*item->acpiget) return -EIO; - if (acpi_callgetfunc(sony_nc_acpi_handle, *item->acpiget, &value) < 0) + ret = sony_nc_int_call(sony_nc_acpi_handle, *item->acpiget, NULL, + &value); + if (ret < 0) return -EIO; if (item->validate) @@ -909,7 +973,8 @@ static ssize_t sony_nc_sysfs_store(struct device *dev, struct device_attribute *attr, const char *buffer, size_t count) { - int value; + unsigned long value = 0; + int ret = 0; struct sony_nc_value *item = container_of(attr, struct sony_nc_value, devattr); @@ -919,7 +984,8 @@ static ssize_t sony_nc_sysfs_store(struct device *dev, if (count > 31) return -EINVAL; - value = simple_strtoul(buffer, NULL, 10); + if (kstrtoul(buffer, 10, &value)) + return -EINVAL; if (item->validate) value = item->validate(SNC_VALIDATE_IN, value); @@ -927,8 +993,11 @@ static ssize_t sony_nc_sysfs_store(struct device *dev, if (value < 0) return value; - if (acpi_callsetfunc(sony_nc_acpi_handle, *item->acpiset, value, NULL) < 0) + ret = sony_nc_int_call(sony_nc_acpi_handle, *item->acpiset, + (int *)&value, NULL); + if (ret < 0) return -EIO; + item->value = value; item->valid = 1; return count; @@ -948,15 +1017,15 @@ struct sony_backlight_props sony_bl_props; static int sony_backlight_update_status(struct backlight_device *bd) { - return acpi_callsetfunc(sony_nc_acpi_handle, "SBRT", - bd->props.brightness + 1, NULL); + int arg = bd->props.brightness + 1; + return sony_nc_int_call(sony_nc_acpi_handle, "SBRT", &arg, NULL); } static int sony_backlight_get_brightness(struct backlight_device *bd) { int value; - if (acpi_callgetfunc(sony_nc_acpi_handle, "GBRT", &value)) + if (sony_nc_int_call(sony_nc_acpi_handle, "GBRT", NULL, &value)) return 0; /* brightness levels are 1-based, while backlight ones are 0-based */ return value - 1; @@ -1024,10 +1093,14 @@ static struct sony_nc_event sony_100_events[] = { { 0x06, SONYPI_EVENT_FNKEY_RELEASED }, { 0x87, SONYPI_EVENT_FNKEY_F7 }, { 0x07, SONYPI_EVENT_FNKEY_RELEASED }, + { 0x88, SONYPI_EVENT_FNKEY_F8 }, + { 0x08, SONYPI_EVENT_FNKEY_RELEASED }, { 0x89, SONYPI_EVENT_FNKEY_F9 }, { 0x09, SONYPI_EVENT_FNKEY_RELEASED }, { 0x8A, SONYPI_EVENT_FNKEY_F10 }, { 0x0A, SONYPI_EVENT_FNKEY_RELEASED }, + { 0x8B, SONYPI_EVENT_FNKEY_F11 }, + { 0x0B, SONYPI_EVENT_FNKEY_RELEASED }, { 0x8C, SONYPI_EVENT_FNKEY_F12 }, { 0x0C, SONYPI_EVENT_FNKEY_RELEASED }, { 0x9d, SONYPI_EVENT_ZOOM_PRESSED }, @@ -1063,63 +1136,116 @@ static struct sony_nc_event sony_127_events[] = { { 0, 0 }, }; +static int sony_nc_hotkeys_decode(u32 event, unsigned int handle) +{ + int ret = -EINVAL; + unsigned int result = 0; + struct sony_nc_event *key_event; + + if (sony_call_snc_handle(handle, 0x200, &result)) { + dprintk("Unable to decode event 0x%.2x 0x%.2x\n", handle, + event); + return -EINVAL; + } + + result &= 0xFF; + + if (handle == 0x0100) + key_event = sony_100_events; + else + key_event = sony_127_events; + + for (; key_event->data; key_event++) { + if (key_event->data == result) { + ret = key_event->event; + break; + } + } + + if (!key_event->data) + pr_info("Unknown hotkey 0x%.2x/0x%.2x (handle 0x%.2x)\n", + event, result, handle); + + return ret; +} + /* * ACPI callbacks */ static void sony_nc_notify(struct acpi_device *device, u32 event) { - u32 ev = event; + u32 real_ev = event; + u8 ev_type = 0; + dprintk("sony_nc_notify, event: 0x%.2x\n", event); + + if (event >= 0x90) { + unsigned int result = 0; + unsigned int arg = 0; + unsigned int handle = 0; + unsigned int offset = event - 0x90; + + if (offset >= ARRAY_SIZE(handles->cap)) { + pr_err("Event 0x%x outside of capabilities list\n", + event); + return; + } + handle = handles->cap[offset]; + + /* list of handles known for generating events */ + switch (handle) { + /* hotkey event */ + case 0x0100: + case 0x0127: + ev_type = 1; + real_ev = sony_nc_hotkeys_decode(event, handle); + + if (real_ev > 0) + sony_laptop_report_input_event(real_ev); + else + /* restore the original event for reporting */ + real_ev = event; - if (ev >= 0x90) { - /* New-style event */ - int result; - int key_handle = 0; - ev -= 0x90; - - if (sony_find_snc_handle(0x100) == ev) - key_handle = 0x100; - if (sony_find_snc_handle(0x127) == ev) - key_handle = 0x127; - - if (key_handle) { - struct sony_nc_event *key_event; - - if (sony_call_snc_handle(key_handle, 0x200, &result)) { - dprintk("sony_nc_notify, unable to decode" - " event 0x%.2x 0x%.2x\n", key_handle, - ev); - /* restore the original event */ - ev = event; - } else { - ev = result & 0xFF; - - if (key_handle == 0x100) - key_event = sony_100_events; - else - key_event = sony_127_events; - - for (; key_event->data; key_event++) { - if (key_event->data == ev) { - ev = key_event->event; - break; - } - } + break; - if (!key_event->data) - pr_info("Unknown event: 0x%x 0x%x\n", - key_handle, ev); - else - sony_laptop_report_input_event(ev); - } - } else if (sony_find_snc_handle(sony_rfkill_handle) == ev) { - sony_nc_rfkill_update(); - return; + /* wlan switch */ + case 0x0124: + case 0x0135: + /* events on this handle are reported when the + * switch changes position or for battery + * events. We'll notify both of them but only + * update the rfkill device status when the + * switch is moved. + */ + ev_type = 2; + sony_call_snc_handle(handle, 0x0100, &result); + real_ev = result & 0x03; + + /* hw switch event */ + if (real_ev == 1) + sony_nc_rfkill_update(); + + break; + + default: + dprintk("Unknown event 0x%x for handle 0x%x\n", + event, handle); + break; } - } else - sony_laptop_report_input_event(ev); - dprintk("sony_nc_notify, event: 0x%.2x\n", ev); - acpi_bus_generate_proc_event(sony_nc_acpi_device, 1, ev); + /* clear the event (and the event reason when present) */ + arg = 1 << offset; + sony_nc_int_call(sony_nc_acpi_handle, "SN05", &arg, &result); + + } else { + /* old style event */ + ev_type = 1; + sony_laptop_report_input_event(real_ev); + } + + acpi_bus_generate_proc_event(sony_nc_acpi_device, ev_type, real_ev); + + acpi_bus_generate_netlink_event(sony_nc_acpi_device->pnp.device_class, + dev_name(&sony_nc_acpi_device->dev), ev_type, real_ev); } static acpi_status sony_walk_callback(acpi_handle handle, u32 level, @@ -1140,20 +1266,190 @@ static acpi_status sony_walk_callback(acpi_handle handle, u32 level, /* * ACPI device */ -static int sony_nc_function_setup(struct acpi_device *device) +static void sony_nc_function_setup(struct acpi_device *device, + struct platform_device *pf_device) { - int result; + unsigned int i, result, bitmask, arg; + + if (!handles) + return; + + /* setup found handles here */ + for (i = 0; i < ARRAY_SIZE(handles->cap); i++) { + unsigned int handle = handles->cap[i]; + + if (!handle) + continue; + + dprintk("setting up handle 0x%.4x\n", handle); + + switch (handle) { + case 0x0100: + case 0x0101: + case 0x0127: + /* setup hotkeys */ + sony_call_snc_handle(handle, 0, &result); + break; + case 0x0102: + /* setup hotkeys */ + sony_call_snc_handle(handle, 0x100, &result); + break; + case 0x0105: + case 0x0148: + /* touchpad enable/disable */ + result = sony_nc_touchpad_setup(pf_device, handle); + if (result) + pr_err("couldn't set up touchpad control function (%d)\n", + result); + break; + case 0x0115: + case 0x0136: + case 0x013f: + result = sony_nc_battery_care_setup(pf_device, handle); + if (result) + pr_err("couldn't set up battery care function (%d)\n", + result); + break; + case 0x0119: + result = sony_nc_lid_resume_setup(pf_device); + if (result) + pr_err("couldn't set up lid resume function (%d)\n", + result); + break; + case 0x0122: + result = sony_nc_thermal_setup(pf_device); + if (result) + pr_err("couldn't set up thermal profile function (%d)\n", + result); + break; + case 0x0131: + result = sony_nc_highspeed_charging_setup(pf_device); + if (result) + pr_err("couldn't set up high speed charging function (%d)\n", + result); + break; + case 0x0124: + case 0x0135: + result = sony_nc_rfkill_setup(device, handle); + if (result) + pr_err("couldn't set up rfkill support (%d)\n", + result); + break; + case 0x0137: + case 0x0143: + result = sony_nc_kbd_backlight_setup(pf_device, handle); + if (result) + pr_err("couldn't set up keyboard backlight function (%d)\n", + result); + break; + default: + continue; + } + } /* Enable all events */ - acpi_callsetfunc(sony_nc_acpi_handle, "SN02", 0xffff, &result); + arg = 0x10; + if (!sony_nc_int_call(sony_nc_acpi_handle, "SN00", &arg, &bitmask)) + sony_nc_int_call(sony_nc_acpi_handle, "SN02", &bitmask, + &result); +} + +static void sony_nc_function_cleanup(struct platform_device *pd) +{ + unsigned int i, result, bitmask, handle; - /* Setup hotkeys */ - sony_call_snc_handle(0x0100, 0, &result); - sony_call_snc_handle(0x0101, 0, &result); - sony_call_snc_handle(0x0102, 0x100, &result); - sony_call_snc_handle(0x0127, 0, &result); + /* get enabled events and disable them */ + sony_nc_int_call(sony_nc_acpi_handle, "SN01", NULL, &bitmask); + sony_nc_int_call(sony_nc_acpi_handle, "SN03", &bitmask, &result); - return 0; + /* cleanup handles here */ + for (i = 0; i < ARRAY_SIZE(handles->cap); i++) { + + handle = handles->cap[i]; + + if (!handle) + continue; + + switch (handle) { + case 0x0105: + case 0x0148: + sony_nc_touchpad_cleanup(pd); + break; + case 0x0115: + case 0x0136: + case 0x013f: + sony_nc_battery_care_cleanup(pd); + break; + case 0x0119: + sony_nc_lid_resume_cleanup(pd); + break; + case 0x0122: + sony_nc_thermal_cleanup(pd); + break; + case 0x0131: + sony_nc_highspeed_charging_cleanup(pd); + break; + case 0x0124: + case 0x0135: + sony_nc_rfkill_cleanup(); + break; + case 0x0137: + case 0x0143: + sony_nc_kbd_backlight_cleanup(pd); + break; + default: + continue; + } + } + + /* finally cleanup the handles list */ + sony_nc_handles_cleanup(pd); +} + +static void sony_nc_function_resume(void) +{ + unsigned int i, result, bitmask, arg; + + dprintk("Resuming SNC device\n"); + + for (i = 0; i < ARRAY_SIZE(handles->cap); i++) { + unsigned int handle = handles->cap[i]; + + if (!handle) + continue; + + switch (handle) { + case 0x0100: + case 0x0101: + case 0x0127: + /* re-enable hotkeys */ + sony_call_snc_handle(handle, 0, &result); + break; + case 0x0102: + /* re-enable hotkeys */ + sony_call_snc_handle(handle, 0x100, &result); + break; + case 0x0122: + sony_nc_thermal_resume(); + break; + case 0x0124: + case 0x0135: + sony_nc_rfkill_update(); + break; + case 0x0137: + case 0x0143: + sony_nc_kbd_backlight_resume(); + break; + default: + continue; + } + } + + /* Enable all events */ + arg = 0x10; + if (!sony_nc_int_call(sony_nc_acpi_handle, "SN00", &arg, &bitmask)) + sony_nc_int_call(sony_nc_acpi_handle, "SN02", &bitmask, + &result); } static int sony_nc_resume(struct acpi_device *device) @@ -1166,8 +1462,8 @@ static int sony_nc_resume(struct acpi_device *device) if (!item->valid) continue; - ret = acpi_callsetfunc(sony_nc_acpi_handle, *item->acpiset, - item->value, NULL); + ret = sony_nc_int_call(sony_nc_acpi_handle, *item->acpiset, + &item->value, NULL); if (ret < 0) { pr_err("%s: %d\n", __func__, ret); break; @@ -1176,21 +1472,14 @@ static int sony_nc_resume(struct acpi_device *device) if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "ECON", &handle))) { - if (acpi_callsetfunc(sony_nc_acpi_handle, "ECON", 1, NULL)) + int arg = 1; + if (sony_nc_int_call(sony_nc_acpi_handle, "ECON", &arg, NULL)) dprintk("ECON Method failed\n"); } if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "SN00", - &handle))) { - dprintk("Doing SNC setup\n"); - sony_nc_function_setup(device); - } - - /* re-read rfkill state */ - sony_nc_rfkill_update(); - - /* restore kbd backlight states */ - sony_nc_kbd_backlight_resume(); + &handle))) + sony_nc_function_resume(); return 0; } @@ -1213,7 +1502,7 @@ static int sony_nc_rfkill_set(void *data, bool blocked) int argument = sony_rfkill_address[(long) data] + 0x100; if (!blocked) - argument |= 0xff0000; + argument |= 0x030000; return sony_call_snc_handle(sony_rfkill_handle, argument, &result); } @@ -1230,7 +1519,7 @@ static int sony_nc_setup_rfkill(struct acpi_device *device, enum rfkill_type type; const char *name; int result; - bool hwblock; + bool hwblock, swblock; switch (nc_type) { case SONY_WIFI: @@ -1258,8 +1547,21 @@ static int sony_nc_setup_rfkill(struct acpi_device *device, if (!rfk) return -ENOMEM; - sony_call_snc_handle(sony_rfkill_handle, 0x200, &result); + if (sony_call_snc_handle(sony_rfkill_handle, 0x200, &result) < 0) { + rfkill_destroy(rfk); + return -1; + } hwblock = !(result & 0x1); + + if (sony_call_snc_handle(sony_rfkill_handle, + sony_rfkill_address[nc_type], + &result) < 0) { + rfkill_destroy(rfk); + return -1; + } + swblock = !(result & 0x2); + + rfkill_init_sw_state(rfk, swblock); rfkill_set_hw_state(rfk, hwblock); err = rfkill_register(rfk); @@ -1295,101 +1597,79 @@ static void sony_nc_rfkill_update(void) sony_call_snc_handle(sony_rfkill_handle, argument, &result); rfkill_set_states(sony_rfkill_devices[i], - !(result & 0xf), false); + !(result & 0x2), false); } } -static void sony_nc_rfkill_setup(struct acpi_device *device) +static int sony_nc_rfkill_setup(struct acpi_device *device, + unsigned int handle) { - int offset; - u8 dev_code, i; - acpi_status status; - struct acpi_object_list params; - union acpi_object in_obj; - union acpi_object *device_enum; - struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; - - offset = sony_find_snc_handle(0x124); - if (offset == -1) { - offset = sony_find_snc_handle(0x135); - if (offset == -1) - return; - else - sony_rfkill_handle = 0x135; - } else - sony_rfkill_handle = 0x124; - dprintk("Found rkfill handle: 0x%.4x\n", sony_rfkill_handle); - - /* need to read the whole buffer returned by the acpi call to SN06 - * here otherwise we may miss some features - */ - params.count = 1; - params.pointer = &in_obj; - in_obj.type = ACPI_TYPE_INTEGER; - in_obj.integer.value = offset; - status = acpi_evaluate_object(sony_nc_acpi_handle, "SN06", ¶ms, - &buffer); - if (ACPI_FAILURE(status)) { - dprintk("Radio device enumeration failed\n"); - return; - } - - device_enum = (union acpi_object *) buffer.pointer; - if (!device_enum) { - pr_err("No SN06 return object\n"); - goto out_no_enum; - } - if (device_enum->type != ACPI_TYPE_BUFFER) { - pr_err("Invalid SN06 return object 0x%.2x\n", - device_enum->type); - goto out_no_enum; - } + u64 offset; + int i; + unsigned char buffer[32] = { 0 }; - /* the buffer is filled with magic numbers describing the devices - * available, 0xff terminates the enumeration + offset = sony_find_snc_handle(handle); + sony_rfkill_handle = handle; + + i = sony_nc_buffer_call(sony_nc_acpi_handle, "SN06", &offset, buffer, + 32); + if (i < 0) + return i; + + /* The buffer is filled with magic numbers describing the devices + * available, 0xff terminates the enumeration. + * Known codes: + * 0x00 WLAN + * 0x10 BLUETOOTH + * 0x20 WWAN GPRS-EDGE + * 0x21 WWAN HSDPA + * 0x22 WWAN EV-DO + * 0x23 WWAN GPS + * 0x25 Gobi WWAN no GPS + * 0x26 Gobi WWAN + GPS + * 0x28 Gobi WWAN no GPS + * 0x29 Gobi WWAN + GPS + * 0x30 WIMAX + * 0x50 Gobi WWAN no GPS + * 0x51 Gobi WWAN + GPS + * 0x70 no SIM card slot + * 0x71 SIM card slot */ - for (i = 0; i < device_enum->buffer.length; i++) { + for (i = 0; i < ARRAY_SIZE(buffer); i++) { - dev_code = *(device_enum->buffer.pointer + i); - if (dev_code == 0xff) + if (buffer[i] == 0xff) break; - dprintk("Radio devices, looking at 0x%.2x\n", dev_code); + dprintk("Radio devices, found 0x%.2x\n", buffer[i]); - if (dev_code == 0 && !sony_rfkill_devices[SONY_WIFI]) + if (buffer[i] == 0 && !sony_rfkill_devices[SONY_WIFI]) sony_nc_setup_rfkill(device, SONY_WIFI); - if (dev_code == 0x10 && !sony_rfkill_devices[SONY_BLUETOOTH]) + if (buffer[i] == 0x10 && !sony_rfkill_devices[SONY_BLUETOOTH]) sony_nc_setup_rfkill(device, SONY_BLUETOOTH); - if ((0xf0 & dev_code) == 0x20 && + if (((0xf0 & buffer[i]) == 0x20 || + (0xf0 & buffer[i]) == 0x50) && !sony_rfkill_devices[SONY_WWAN]) sony_nc_setup_rfkill(device, SONY_WWAN); - if (dev_code == 0x30 && !sony_rfkill_devices[SONY_WIMAX]) + if (buffer[i] == 0x30 && !sony_rfkill_devices[SONY_WIMAX]) sony_nc_setup_rfkill(device, SONY_WIMAX); } - -out_no_enum: - kfree(buffer.pointer); - return; + return 0; } /* Keyboard backlight feature */ -#define KBDBL_HANDLER 0x137 -#define KBDBL_PRESENT 0xB00 -#define SET_MODE 0xC00 -#define SET_STATE 0xD00 -#define SET_TIMEOUT 0xE00 - struct kbd_backlight { - int mode; - int timeout; + unsigned int handle; + unsigned int base; + unsigned int mode; + unsigned int timeout; struct device_attribute mode_attr; struct device_attribute timeout_attr; }; -static struct kbd_backlight *kbdbl_handle; +static struct kbd_backlight *kbdbl_ctl; static ssize_t __sony_nc_kbd_backlight_mode_set(u8 value) { @@ -1398,15 +1678,15 @@ static ssize_t __sony_nc_kbd_backlight_mode_set(u8 value) if (value > 1) return -EINVAL; - if (sony_call_snc_handle(KBDBL_HANDLER, - (value << 0x10) | SET_MODE, &result)) + if (sony_call_snc_handle(kbdbl_ctl->handle, + (value << 0x10) | (kbdbl_ctl->base), &result)) return -EIO; /* Try to turn the light on/off immediately */ - sony_call_snc_handle(KBDBL_HANDLER, (value << 0x10) | SET_STATE, - &result); + sony_call_snc_handle(kbdbl_ctl->handle, + (value << 0x10) | (kbdbl_ctl->base + 0x100), &result); - kbdbl_handle->mode = value; + kbdbl_ctl->mode = value; return 0; } @@ -1421,7 +1701,7 @@ static ssize_t sony_nc_kbd_backlight_mode_store(struct device *dev, if (count > 31) return -EINVAL; - if (strict_strtoul(buffer, 10, &value)) + if (kstrtoul(buffer, 10, &value)) return -EINVAL; ret = __sony_nc_kbd_backlight_mode_set(value); @@ -1435,7 +1715,7 @@ static ssize_t sony_nc_kbd_backlight_mode_show(struct device *dev, struct device_attribute *attr, char *buffer) { ssize_t count = 0; - count = snprintf(buffer, PAGE_SIZE, "%d\n", kbdbl_handle->mode); + count = snprintf(buffer, PAGE_SIZE, "%d\n", kbdbl_ctl->mode); return count; } @@ -1446,11 +1726,11 @@ static int __sony_nc_kbd_backlight_timeout_set(u8 value) if (value > 3) return -EINVAL; - if (sony_call_snc_handle(KBDBL_HANDLER, - (value << 0x10) | SET_TIMEOUT, &result)) + if (sony_call_snc_handle(kbdbl_ctl->handle, (value << 0x10) | + (kbdbl_ctl->base + 0x200), &result)) return -EIO; - kbdbl_handle->timeout = value; + kbdbl_ctl->timeout = value; return 0; } @@ -1465,7 +1745,7 @@ static ssize_t sony_nc_kbd_backlight_timeout_store(struct device *dev, if (count > 31) return -EINVAL; - if (strict_strtoul(buffer, 10, &value)) + if (kstrtoul(buffer, 10, &value)) return -EINVAL; ret = __sony_nc_kbd_backlight_timeout_set(value); @@ -1479,39 +1759,58 @@ static ssize_t sony_nc_kbd_backlight_timeout_show(struct device *dev, struct device_attribute *attr, char *buffer) { ssize_t count = 0; - count = snprintf(buffer, PAGE_SIZE, "%d\n", kbdbl_handle->timeout); + count = snprintf(buffer, PAGE_SIZE, "%d\n", kbdbl_ctl->timeout); return count; } -static int sony_nc_kbd_backlight_setup(struct platform_device *pd) +static int sony_nc_kbd_backlight_setup(struct platform_device *pd, + unsigned int handle) { int result; + int ret = 0; - if (sony_call_snc_handle(KBDBL_HANDLER, KBDBL_PRESENT, &result)) - return 0; - if (!(result & 0x02)) + /* verify the kbd backlight presence, these handles are not used for + * keyboard backlight only + */ + ret = sony_call_snc_handle(handle, handle == 0x0137 ? 0x0B00 : 0x0100, + &result); + if (ret) + return ret; + + if ((handle == 0x0137 && !(result & 0x02)) || + !(result & 0x01)) { + dprintk("no backlight keyboard found\n"); return 0; + } - kbdbl_handle = kzalloc(sizeof(*kbdbl_handle), GFP_KERNEL); - if (!kbdbl_handle) + kbdbl_ctl = kzalloc(sizeof(*kbdbl_ctl), GFP_KERNEL); + if (!kbdbl_ctl) return -ENOMEM; - sysfs_attr_init(&kbdbl_handle->mode_attr.attr); - kbdbl_handle->mode_attr.attr.name = "kbd_backlight"; - kbdbl_handle->mode_attr.attr.mode = S_IRUGO | S_IWUSR; - kbdbl_handle->mode_attr.show = sony_nc_kbd_backlight_mode_show; - kbdbl_handle->mode_attr.store = sony_nc_kbd_backlight_mode_store; + kbdbl_ctl->handle = handle; + if (handle == 0x0137) + kbdbl_ctl->base = 0x0C00; + else + kbdbl_ctl->base = 0x4000; + + sysfs_attr_init(&kbdbl_ctl->mode_attr.attr); + kbdbl_ctl->mode_attr.attr.name = "kbd_backlight"; + kbdbl_ctl->mode_attr.attr.mode = S_IRUGO | S_IWUSR; + kbdbl_ctl->mode_attr.show = sony_nc_kbd_backlight_mode_show; + kbdbl_ctl->mode_attr.store = sony_nc_kbd_backlight_mode_store; - sysfs_attr_init(&kbdbl_handle->timeout_attr.attr); - kbdbl_handle->timeout_attr.attr.name = "kbd_backlight_timeout"; - kbdbl_handle->timeout_attr.attr.mode = S_IRUGO | S_IWUSR; - kbdbl_handle->timeout_attr.show = sony_nc_kbd_backlight_timeout_show; - kbdbl_handle->timeout_attr.store = sony_nc_kbd_backlight_timeout_store; + sysfs_attr_init(&kbdbl_ctl->timeout_attr.attr); + kbdbl_ctl->timeout_attr.attr.name = "kbd_backlight_timeout"; + kbdbl_ctl->timeout_attr.attr.mode = S_IRUGO | S_IWUSR; + kbdbl_ctl->timeout_attr.show = sony_nc_kbd_backlight_timeout_show; + kbdbl_ctl->timeout_attr.store = sony_nc_kbd_backlight_timeout_store; - if (device_create_file(&pd->dev, &kbdbl_handle->mode_attr)) + ret = device_create_file(&pd->dev, &kbdbl_ctl->mode_attr); + if (ret) goto outkzalloc; - if (device_create_file(&pd->dev, &kbdbl_handle->timeout_attr)) + ret = device_create_file(&pd->dev, &kbdbl_ctl->timeout_attr); + if (ret) goto outmode; __sony_nc_kbd_backlight_mode_set(kbd_backlight); @@ -1520,57 +1819,661 @@ static int sony_nc_kbd_backlight_setup(struct platform_device *pd) return 0; outmode: - device_remove_file(&pd->dev, &kbdbl_handle->mode_attr); + device_remove_file(&pd->dev, &kbdbl_ctl->mode_attr); outkzalloc: - kfree(kbdbl_handle); - kbdbl_handle = NULL; - return -1; + kfree(kbdbl_ctl); + kbdbl_ctl = NULL; + return ret; } -static int sony_nc_kbd_backlight_cleanup(struct platform_device *pd) +static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd) { - if (kbdbl_handle) { + if (kbdbl_ctl) { int result; - device_remove_file(&pd->dev, &kbdbl_handle->mode_attr); - device_remove_file(&pd->dev, &kbdbl_handle->timeout_attr); + device_remove_file(&pd->dev, &kbdbl_ctl->mode_attr); + device_remove_file(&pd->dev, &kbdbl_ctl->timeout_attr); /* restore the default hw behaviour */ - sony_call_snc_handle(KBDBL_HANDLER, 0x1000 | SET_MODE, &result); - sony_call_snc_handle(KBDBL_HANDLER, SET_TIMEOUT, &result); + sony_call_snc_handle(kbdbl_ctl->handle, + kbdbl_ctl->base | 0x10000, &result); + sony_call_snc_handle(kbdbl_ctl->handle, + kbdbl_ctl->base + 0x200, &result); - kfree(kbdbl_handle); + kfree(kbdbl_ctl); + kbdbl_ctl = NULL; } - return 0; } static void sony_nc_kbd_backlight_resume(void) { int ignore = 0; - if (!kbdbl_handle) + if (!kbdbl_ctl) return; - if (kbdbl_handle->mode == 0) - sony_call_snc_handle(KBDBL_HANDLER, SET_MODE, &ignore); - - if (kbdbl_handle->timeout != 0) - sony_call_snc_handle(KBDBL_HANDLER, - (kbdbl_handle->timeout << 0x10) | SET_TIMEOUT, + if (kbdbl_ctl->mode == 0) + sony_call_snc_handle(kbdbl_ctl->handle, kbdbl_ctl->base, &ignore); + + if (kbdbl_ctl->timeout != 0) + sony_call_snc_handle(kbdbl_ctl->handle, + (kbdbl_ctl->base + 0x200) | + (kbdbl_ctl->timeout << 0x10), &ignore); +} + +struct battery_care_control { + struct device_attribute attrs[2]; + unsigned int handle; +}; +static struct battery_care_control *bcare_ctl; + +static ssize_t sony_nc_battery_care_limit_store(struct device *dev, + struct device_attribute *attr, + const char *buffer, size_t count) +{ + unsigned int result, cmd; + unsigned long value; + + if (count > 31) + return -EINVAL; + + if (kstrtoul(buffer, 10, &value)) + return -EINVAL; + + /* limit values (2 bits): + * 00 - none + * 01 - 80% + * 10 - 50% + * 11 - 100% + * + * bit 0: 0 disable BCL, 1 enable BCL + * bit 1: 1 tell to store the battery limit (see bits 6,7) too + * bits 2,3: reserved + * bits 4,5: store the limit into the EC + * bits 6,7: store the limit into the battery + */ + + /* + * handle 0x0115 should allow storing on battery too; + * handle 0x0136 same as 0x0115 + health status; + * handle 0x013f, same as 0x0136 but no storing on the battery + * + * Store only inside the EC for now, regardless the handle number + */ + if (value == 0) + /* disable limits */ + cmd = 0x0; + + else if (value <= 50) + cmd = 0x21; + + else if (value <= 80) + cmd = 0x11; + + else if (value <= 100) + cmd = 0x31; + + else + return -EINVAL; + + if (sony_call_snc_handle(bcare_ctl->handle, (cmd << 0x10) | 0x0100, + &result)) + return -EIO; + + return count; +} + +static ssize_t sony_nc_battery_care_limit_show(struct device *dev, + struct device_attribute *attr, char *buffer) +{ + unsigned int result, status; + + if (sony_call_snc_handle(bcare_ctl->handle, 0x0000, &result)) + return -EIO; + + status = (result & 0x01) ? ((result & 0x30) >> 0x04) : 0; + switch (status) { + case 1: + status = 80; + break; + case 2: + status = 50; + break; + case 3: + status = 100; + break; + default: + status = 0; + break; + } + + return snprintf(buffer, PAGE_SIZE, "%d\n", status); +} + +static ssize_t sony_nc_battery_care_health_show(struct device *dev, + struct device_attribute *attr, char *buffer) +{ + ssize_t count = 0; + unsigned int health; + + if (sony_call_snc_handle(bcare_ctl->handle, 0x0200, &health)) + return -EIO; + + count = snprintf(buffer, PAGE_SIZE, "%d\n", health & 0xff); + + return count; +} + +static int sony_nc_battery_care_setup(struct platform_device *pd, + unsigned int handle) +{ + int ret = 0; + + bcare_ctl = kzalloc(sizeof(struct battery_care_control), GFP_KERNEL); + if (!bcare_ctl) + return -ENOMEM; + + bcare_ctl->handle = handle; + + sysfs_attr_init(&bcare_ctl->attrs[0].attr); + bcare_ctl->attrs[0].attr.name = "battery_care_limiter"; + bcare_ctl->attrs[0].attr.mode = S_IRUGO | S_IWUSR; + bcare_ctl->attrs[0].show = sony_nc_battery_care_limit_show; + bcare_ctl->attrs[0].store = sony_nc_battery_care_limit_store; + + ret = device_create_file(&pd->dev, &bcare_ctl->attrs[0]); + if (ret) + goto outkzalloc; + + /* 0x0115 is for models with no health reporting capability */ + if (handle == 0x0115) + return 0; + + sysfs_attr_init(&bcare_ctl->attrs[1].attr); + bcare_ctl->attrs[1].attr.name = "battery_care_health"; + bcare_ctl->attrs[1].attr.mode = S_IRUGO; + bcare_ctl->attrs[1].show = sony_nc_battery_care_health_show; + + ret = device_create_file(&pd->dev, &bcare_ctl->attrs[1]); + if (ret) + goto outlimiter; + + return 0; + +outlimiter: + device_remove_file(&pd->dev, &bcare_ctl->attrs[0]); + +outkzalloc: + kfree(bcare_ctl); + bcare_ctl = NULL; + + return ret; +} + +static void sony_nc_battery_care_cleanup(struct platform_device *pd) +{ + if (bcare_ctl) { + device_remove_file(&pd->dev, &bcare_ctl->attrs[0]); + if (bcare_ctl->handle != 0x0115) + device_remove_file(&pd->dev, &bcare_ctl->attrs[1]); + + kfree(bcare_ctl); + bcare_ctl = NULL; + } +} + +struct snc_thermal_ctrl { + unsigned int mode; + unsigned int profiles; + struct device_attribute mode_attr; + struct device_attribute profiles_attr; +}; +static struct snc_thermal_ctrl *th_handle; + +#define THM_PROFILE_MAX 3 +static const char * const snc_thermal_profiles[] = { + "balanced", + "silent", + "performance" +}; + +static int sony_nc_thermal_mode_set(unsigned short mode) +{ + unsigned int result; + + /* the thermal profile seems to be a two bit bitmask: + * lsb -> silent + * msb -> performance + * no bit set is the normal operation and is always valid + * Some vaio models only have "balanced" and "performance" + */ + if ((mode && !(th_handle->profiles & mode)) || mode >= THM_PROFILE_MAX) + return -EINVAL; + + if (sony_call_snc_handle(0x0122, mode << 0x10 | 0x0200, &result)) + return -EIO; + + th_handle->mode = mode; + + return 0; +} + +static int sony_nc_thermal_mode_get(void) +{ + unsigned int result; + + if (sony_call_snc_handle(0x0122, 0x0100, &result)) + return -EIO; + + return result & 0xff; +} + +static ssize_t sony_nc_thermal_profiles_show(struct device *dev, + struct device_attribute *attr, char *buffer) +{ + short cnt; + size_t idx = 0; + + for (cnt = 0; cnt < THM_PROFILE_MAX; cnt++) { + if (!cnt || (th_handle->profiles & cnt)) + idx += snprintf(buffer + idx, PAGE_SIZE - idx, "%s ", + snc_thermal_profiles[cnt]); + } + idx += snprintf(buffer + idx, PAGE_SIZE - idx, "\n"); + + return idx; +} + +static ssize_t sony_nc_thermal_mode_store(struct device *dev, + struct device_attribute *attr, + const char *buffer, size_t count) +{ + unsigned short cmd; + size_t len = count; + + if (count == 0) + return -EINVAL; + + /* skip the newline if present */ + if (buffer[len - 1] == '\n') + len--; + + for (cmd = 0; cmd < THM_PROFILE_MAX; cmd++) + if (strncmp(buffer, snc_thermal_profiles[cmd], len) == 0) + break; + + if (sony_nc_thermal_mode_set(cmd)) + return -EIO; + + return count; +} + +static ssize_t sony_nc_thermal_mode_show(struct device *dev, + struct device_attribute *attr, char *buffer) +{ + ssize_t count = 0; + unsigned int mode = sony_nc_thermal_mode_get(); + + if (mode < 0) + return mode; + + count = snprintf(buffer, PAGE_SIZE, "%s\n", snc_thermal_profiles[mode]); + + return count; +} + +static int sony_nc_thermal_setup(struct platform_device *pd) +{ + int ret = 0; + th_handle = kzalloc(sizeof(struct snc_thermal_ctrl), GFP_KERNEL); + if (!th_handle) + return -ENOMEM; + + ret = sony_call_snc_handle(0x0122, 0x0000, &th_handle->profiles); + if (ret) { + pr_warn("couldn't to read the thermal profiles\n"); + goto outkzalloc; + } + + ret = sony_nc_thermal_mode_get(); + if (ret < 0) { + pr_warn("couldn't to read the current thermal profile"); + goto outkzalloc; + } + th_handle->mode = ret; + + sysfs_attr_init(&th_handle->profiles_attr.attr); + th_handle->profiles_attr.attr.name = "thermal_profiles"; + th_handle->profiles_attr.attr.mode = S_IRUGO; + th_handle->profiles_attr.show = sony_nc_thermal_profiles_show; + + sysfs_attr_init(&th_handle->mode_attr.attr); + th_handle->mode_attr.attr.name = "thermal_control"; + th_handle->mode_attr.attr.mode = S_IRUGO | S_IWUSR; + th_handle->mode_attr.show = sony_nc_thermal_mode_show; + th_handle->mode_attr.store = sony_nc_thermal_mode_store; + + ret = device_create_file(&pd->dev, &th_handle->profiles_attr); + if (ret) + goto outkzalloc; + + ret = device_create_file(&pd->dev, &th_handle->mode_attr); + if (ret) + goto outprofiles; + + return 0; + +outprofiles: + device_remove_file(&pd->dev, &th_handle->profiles_attr); +outkzalloc: + kfree(th_handle); + th_handle = NULL; + return ret; +} + +static void sony_nc_thermal_cleanup(struct platform_device *pd) +{ + if (th_handle) { + device_remove_file(&pd->dev, &th_handle->profiles_attr); + device_remove_file(&pd->dev, &th_handle->mode_attr); + kfree(th_handle); + th_handle = NULL; + } +} + +static void sony_nc_thermal_resume(void) +{ + unsigned int status = sony_nc_thermal_mode_get(); + + if (status != th_handle->mode) + sony_nc_thermal_mode_set(th_handle->mode); +} + +/* resume on LID open */ +struct snc_lid_resume_control { + struct device_attribute attrs[3]; + unsigned int status; +}; +static struct snc_lid_resume_control *lid_ctl; + +static ssize_t sony_nc_lid_resume_store(struct device *dev, + struct device_attribute *attr, + const char *buffer, size_t count) +{ + unsigned int result, pos; + unsigned long value; + if (count > 31) + return -EINVAL; + + if (kstrtoul(buffer, 10, &value) || value > 1) + return -EINVAL; + + /* the value we have to write to SNC is a bitmask: + * +--------------+ + * | S3 | S4 | S5 | + * +--------------+ + * 2 1 0 + */ + if (strcmp(attr->attr.name, "lid_resume_S3") == 0) + pos = 2; + else if (strcmp(attr->attr.name, "lid_resume_S4") == 0) + pos = 1; + else if (strcmp(attr->attr.name, "lid_resume_S5") == 0) + pos = 0; + else + return -EINVAL; + + if (value) + value = lid_ctl->status | (1 << pos); + else + value = lid_ctl->status & ~(1 << pos); + + if (sony_call_snc_handle(0x0119, value << 0x10 | 0x0100, &result)) + return -EIO; + + lid_ctl->status = value; + + return count; +} + +static ssize_t sony_nc_lid_resume_show(struct device *dev, + struct device_attribute *attr, char *buffer) +{ + unsigned int pos; + + if (strcmp(attr->attr.name, "lid_resume_S3") == 0) + pos = 2; + else if (strcmp(attr->attr.name, "lid_resume_S4") == 0) + pos = 1; + else if (strcmp(attr->attr.name, "lid_resume_S5") == 0) + pos = 0; + else + return -EINVAL; + + return snprintf(buffer, PAGE_SIZE, "%d\n", + (lid_ctl->status >> pos) & 0x01); +} + +static int sony_nc_lid_resume_setup(struct platform_device *pd) +{ + unsigned int result; + int i; + + if (sony_call_snc_handle(0x0119, 0x0000, &result)) + return -EIO; + + lid_ctl = kzalloc(sizeof(struct snc_lid_resume_control), GFP_KERNEL); + if (!lid_ctl) + return -ENOMEM; + + lid_ctl->status = result & 0x7; + + sysfs_attr_init(&lid_ctl->attrs[0].attr); + lid_ctl->attrs[0].attr.name = "lid_resume_S3"; + lid_ctl->attrs[0].attr.mode = S_IRUGO | S_IWUSR; + lid_ctl->attrs[0].show = sony_nc_lid_resume_show; + lid_ctl->attrs[0].store = sony_nc_lid_resume_store; + + sysfs_attr_init(&lid_ctl->attrs[1].attr); + lid_ctl->attrs[1].attr.name = "lid_resume_S4"; + lid_ctl->attrs[1].attr.mode = S_IRUGO | S_IWUSR; + lid_ctl->attrs[1].show = sony_nc_lid_resume_show; + lid_ctl->attrs[1].store = sony_nc_lid_resume_store; + + sysfs_attr_init(&lid_ctl->attrs[2].attr); + lid_ctl->attrs[2].attr.name = "lid_resume_S5"; + lid_ctl->attrs[2].attr.mode = S_IRUGO | S_IWUSR; + lid_ctl->attrs[2].show = sony_nc_lid_resume_show; + lid_ctl->attrs[2].store = sony_nc_lid_resume_store; + + for (i = 0; i < 3; i++) { + result = device_create_file(&pd->dev, &lid_ctl->attrs[i]); + if (result) + goto liderror; + } + + return 0; + +liderror: + for (; i > 0; i--) + device_remove_file(&pd->dev, &lid_ctl->attrs[i]); + + kfree(lid_ctl); + lid_ctl = NULL; + + return result; +} + +static void sony_nc_lid_resume_cleanup(struct platform_device *pd) +{ + int i; + + if (lid_ctl) { + for (i = 0; i < 3; i++) + device_remove_file(&pd->dev, &lid_ctl->attrs[i]); + + kfree(lid_ctl); + lid_ctl = NULL; + } +} + +/* High speed charging function */ +static struct device_attribute *hsc_handle; + +static ssize_t sony_nc_highspeed_charging_store(struct device *dev, + struct device_attribute *attr, + const char *buffer, size_t count) +{ + unsigned int result; + unsigned long value; + + if (count > 31) + return -EINVAL; + + if (kstrtoul(buffer, 10, &value) || value > 1) + return -EINVAL; + + if (sony_call_snc_handle(0x0131, value << 0x10 | 0x0200, &result)) + return -EIO; + + return count; +} + +static ssize_t sony_nc_highspeed_charging_show(struct device *dev, + struct device_attribute *attr, char *buffer) +{ + unsigned int result; + + if (sony_call_snc_handle(0x0131, 0x0100, &result)) + return -EIO; + + return snprintf(buffer, PAGE_SIZE, "%d\n", result & 0x01); +} + +static int sony_nc_highspeed_charging_setup(struct platform_device *pd) +{ + unsigned int result; + + if (sony_call_snc_handle(0x0131, 0x0000, &result) || !(result & 0x01)) { + /* some models advertise the handle but have no implementation + * for it + */ + pr_info("No High Speed Charging capability found\n"); + return 0; + } + + hsc_handle = kzalloc(sizeof(struct device_attribute), GFP_KERNEL); + if (!hsc_handle) + return -ENOMEM; + + sysfs_attr_init(&hsc_handle->attr); + hsc_handle->attr.name = "battery_highspeed_charging"; + hsc_handle->attr.mode = S_IRUGO | S_IWUSR; + hsc_handle->show = sony_nc_highspeed_charging_show; + hsc_handle->store = sony_nc_highspeed_charging_store; + + result = device_create_file(&pd->dev, hsc_handle); + if (result) { + kfree(hsc_handle); + hsc_handle = NULL; + return result; + } + + return 0; +} + +static void sony_nc_highspeed_charging_cleanup(struct platform_device *pd) +{ + if (hsc_handle) { + device_remove_file(&pd->dev, hsc_handle); + kfree(hsc_handle); + hsc_handle = NULL; + } +} + +/* Touchpad enable/disable */ +struct touchpad_control { + struct device_attribute attr; + int handle; +}; +static struct touchpad_control *tp_ctl; + +static ssize_t sony_nc_touchpad_store(struct device *dev, + struct device_attribute *attr, const char *buffer, size_t count) +{ + unsigned int result; + unsigned long value; + + if (count > 31) + return -EINVAL; + + if (kstrtoul(buffer, 10, &value) || value > 1) + return -EINVAL; + + /* sysfs: 0 disabled, 1 enabled + * EC: 0 enabled, 1 disabled + */ + if (sony_call_snc_handle(tp_ctl->handle, + (!value << 0x10) | 0x100, &result)) + return -EIO; + + return count; +} + +static ssize_t sony_nc_touchpad_show(struct device *dev, + struct device_attribute *attr, char *buffer) +{ + unsigned int result; + + if (sony_call_snc_handle(tp_ctl->handle, 0x000, &result)) + return -EINVAL; + + return snprintf(buffer, PAGE_SIZE, "%d\n", !(result & 0x01)); +} + +static int sony_nc_touchpad_setup(struct platform_device *pd, + unsigned int handle) +{ + int ret = 0; + + tp_ctl = kzalloc(sizeof(struct touchpad_control), GFP_KERNEL); + if (!tp_ctl) + return -ENOMEM; + + tp_ctl->handle = handle; + + sysfs_attr_init(&tp_ctl->attr.attr); + tp_ctl->attr.attr.name = "touchpad"; + tp_ctl->attr.attr.mode = S_IRUGO | S_IWUSR; + tp_ctl->attr.show = sony_nc_touchpad_show; + tp_ctl->attr.store = sony_nc_touchpad_store; + + ret = device_create_file(&pd->dev, &tp_ctl->attr); + if (ret) { + kfree(tp_ctl); + tp_ctl = NULL; + } + + return ret; +} + +static void sony_nc_touchpad_cleanup(struct platform_device *pd) +{ + if (tp_ctl) { + device_remove_file(&pd->dev, &tp_ctl->attr); + kfree(tp_ctl); + tp_ctl = NULL; + } } static void sony_nc_backlight_ng_read_limits(int handle, struct sony_backlight_props *props) { - int offset; - acpi_status status; - u8 brlvl, i; + u64 offset; + int i; u8 min = 0xff, max = 0x00; - struct acpi_object_list params; - union acpi_object in_obj; - union acpi_object *lvl_enum; - struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + unsigned char buffer[32] = { 0 }; props->handle = handle; props->offset = 0; @@ -1583,50 +2486,31 @@ static void sony_nc_backlight_ng_read_limits(int handle, /* try to read the boundaries from ACPI tables, if we fail the above * defaults should be reasonable */ - params.count = 1; - params.pointer = &in_obj; - in_obj.type = ACPI_TYPE_INTEGER; - in_obj.integer.value = offset; - status = acpi_evaluate_object(sony_nc_acpi_handle, "SN06", ¶ms, - &buffer); - if (ACPI_FAILURE(status)) + i = sony_nc_buffer_call(sony_nc_acpi_handle, "SN06", &offset, buffer, + 32); + if (i < 0) return; - lvl_enum = (union acpi_object *) buffer.pointer; - if (!lvl_enum) { - pr_err("No SN06 return object."); - return; - } - if (lvl_enum->type != ACPI_TYPE_BUFFER) { - pr_err("Invalid SN06 return object 0x%.2x\n", - lvl_enum->type); - goto out_invalid; - } - /* the buffer lists brightness levels available, brightness levels are - * from 0 to 8 in the array, other values are used by ALS control. + * from position 0 to 8 in the array, other values are used by ALS + * control. */ - for (i = 0; i < 9 && i < lvl_enum->buffer.length; i++) { + for (i = 0; i < 9 && i < ARRAY_SIZE(buffer); i++) { - brlvl = *(lvl_enum->buffer.pointer + i); - dprintk("Brightness level: %d\n", brlvl); + dprintk("Brightness level: %d\n", buffer[i]); - if (!brlvl) + if (!buffer[i]) break; - if (brlvl > max) - max = brlvl; - if (brlvl < min) - min = brlvl; + if (buffer[i] > max) + max = buffer[i]; + if (buffer[i] < min) + min = buffer[i]; } props->offset = min; props->maxlvl = max; dprintk("Brightness levels: min=%d max=%d\n", props->offset, props->maxlvl); - -out_invalid: - kfree(buffer.pointer); - return; } static void sony_nc_backlight_setup(void) @@ -1715,28 +2599,25 @@ static int sony_nc_add(struct acpi_device *device) if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "ECON", &handle))) { - if (acpi_callsetfunc(sony_nc_acpi_handle, "ECON", 1, NULL)) + int arg = 1; + if (sony_nc_int_call(sony_nc_acpi_handle, "ECON", &arg, NULL)) dprintk("ECON Method failed\n"); } if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "SN00", &handle))) { dprintk("Doing SNC setup\n"); + /* retrieve the available handles */ result = sony_nc_handles_setup(sony_pf_device); - if (result) - goto outpresent; - result = sony_nc_kbd_backlight_setup(sony_pf_device); - if (result) - goto outsnc; - sony_nc_function_setup(device); - sony_nc_rfkill_setup(device); + if (!result) + sony_nc_function_setup(device, sony_pf_device); } /* setup input devices and helper fifo */ result = sony_laptop_setup_input(device); if (result) { pr_err("Unable to create input devices\n"); - goto outkbdbacklight; + goto outsnc; } if (acpi_video_backlight_support()) { @@ -1794,10 +2675,8 @@ static int sony_nc_add(struct acpi_device *device) sony_laptop_remove_input(); - outkbdbacklight: - sony_nc_kbd_backlight_cleanup(sony_pf_device); - outsnc: + sony_nc_function_cleanup(sony_pf_device); sony_nc_handles_cleanup(sony_pf_device); outpresent: @@ -1820,11 +2699,10 @@ static int sony_nc_remove(struct acpi_device *device, int type) device_remove_file(&sony_pf_device->dev, &item->devattr); } - sony_nc_kbd_backlight_cleanup(sony_pf_device); + sony_nc_function_cleanup(sony_pf_device); sony_nc_handles_cleanup(sony_pf_device); sony_pf_remove(); sony_laptop_remove_input(); - sony_nc_rfkill_cleanup(); dprintk(SONY_NC_DRIVER_NAME " removed.\n"); return 0; @@ -2437,7 +3315,9 @@ static ssize_t sony_pic_wwanpower_store(struct device *dev, if (count > 31) return -EINVAL; - value = simple_strtoul(buffer, NULL, 10); + if (kstrtoul(buffer, 10, &value)) + return -EINVAL; + mutex_lock(&spic_dev.lock); __sony_pic_set_wwanpower(value); mutex_unlock(&spic_dev.lock); @@ -2474,7 +3354,9 @@ static ssize_t sony_pic_bluetoothpower_store(struct device *dev, if (count > 31) return -EINVAL; - value = simple_strtoul(buffer, NULL, 10); + if (kstrtoul(buffer, 10, &value)) + return -EINVAL; + mutex_lock(&spic_dev.lock); __sony_pic_set_bluetoothpower(value); mutex_unlock(&spic_dev.lock); @@ -2513,7 +3395,9 @@ static ssize_t sony_pic_fanspeed_store(struct device *dev, if (count > 31) return -EINVAL; - value = simple_strtoul(buffer, NULL, 10); + if (kstrtoul(buffer, 10, &value)) + return -EINVAL; + if (sony_pic_set_fanspeed(value)) return -EIO; @@ -2671,7 +3555,8 @@ static long sonypi_misc_ioctl(struct file *fp, unsigned int cmd, ret = -EIO; break; } - if (acpi_callgetfunc(sony_nc_acpi_handle, "GBRT", &value)) { + if (sony_nc_int_call(sony_nc_acpi_handle, "GBRT", NULL, + &value)) { ret = -EIO; break; } @@ -2688,8 +3573,9 @@ static long sonypi_misc_ioctl(struct file *fp, unsigned int cmd, ret = -EFAULT; break; } - if (acpi_callsetfunc(sony_nc_acpi_handle, "SBRT", - (val8 >> 5) + 1, NULL)) { + value = (val8 >> 5) + 1; + if (sony_nc_int_call(sony_nc_acpi_handle, "SBRT", &value, + NULL)) { ret = -EIO; break; } diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index d68c0002f4a2..8b5610d88418 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -3402,7 +3402,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm) /* Do not issue duplicate brightness change events to * userspace. tpacpi_detect_brightness_capabilities() must have * been called before this point */ - if (tp_features.bright_acpimode && acpi_video_backlight_support()) { + if (acpi_video_backlight_support()) { pr_info("This ThinkPad has standard ACPI backlight " "brightness control, supported by the ACPI " "video driver\n"); diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c index 57787d87d9a4..dab10f6edcd4 100644 --- a/drivers/platform/x86/toshiba_acpi.c +++ b/drivers/platform/x86/toshiba_acpi.c @@ -95,6 +95,7 @@ MODULE_LICENSE("GPL"); /* registers */ #define HCI_FAN 0x0004 +#define HCI_TR_BACKLIGHT 0x0005 #define HCI_SYSTEM_EVENT 0x0016 #define HCI_VIDEO_OUT 0x001c #define HCI_HOTKEY_EVENT 0x001e @@ -134,6 +135,7 @@ struct toshiba_acpi_dev { unsigned int system_event_supported:1; unsigned int ntfy_supported:1; unsigned int info_supported:1; + unsigned int tr_backlight_supported:1; struct mutex mutex; }; @@ -478,34 +480,70 @@ static const struct rfkill_ops toshiba_rfk_ops = { .poll = bt_rfkill_poll, }; +static int get_tr_backlight_status(struct toshiba_acpi_dev *dev, bool *enabled) +{ + u32 hci_result; + u32 status; + + hci_read1(dev, HCI_TR_BACKLIGHT, &status, &hci_result); + *enabled = !status; + return hci_result == HCI_SUCCESS ? 0 : -EIO; +} + +static int set_tr_backlight_status(struct toshiba_acpi_dev *dev, bool enable) +{ + u32 hci_result; + u32 value = !enable; + + hci_write1(dev, HCI_TR_BACKLIGHT, value, &hci_result); + return hci_result == HCI_SUCCESS ? 0 : -EIO; +} + static struct proc_dir_entry *toshiba_proc_dir /*= 0*/ ; -static int get_lcd(struct backlight_device *bd) +static int __get_lcd_brightness(struct toshiba_acpi_dev *dev) { - struct toshiba_acpi_dev *dev = bl_get_data(bd); u32 hci_result; u32 value; + int brightness = 0; + + if (dev->tr_backlight_supported) { + bool enabled; + int ret = get_tr_backlight_status(dev, &enabled); + if (ret) + return ret; + if (enabled) + return 0; + brightness++; + } hci_read1(dev, HCI_LCD_BRIGHTNESS, &value, &hci_result); if (hci_result == HCI_SUCCESS) - return (value >> HCI_LCD_BRIGHTNESS_SHIFT); + return brightness + (value >> HCI_LCD_BRIGHTNESS_SHIFT); return -EIO; } +static int get_lcd_brightness(struct backlight_device *bd) +{ + struct toshiba_acpi_dev *dev = bl_get_data(bd); + return __get_lcd_brightness(dev); +} + static int lcd_proc_show(struct seq_file *m, void *v) { struct toshiba_acpi_dev *dev = m->private; int value; + int levels; if (!dev->backlight_dev) return -ENODEV; - value = get_lcd(dev->backlight_dev); + levels = dev->backlight_dev->props.max_brightness + 1; + value = get_lcd_brightness(dev->backlight_dev); if (value >= 0) { seq_printf(m, "brightness: %d\n", value); - seq_printf(m, "brightness_levels: %d\n", - HCI_LCD_BRIGHTNESS_LEVELS); + seq_printf(m, "brightness_levels: %d\n", levels); return 0; } @@ -518,10 +556,19 @@ static int lcd_proc_open(struct inode *inode, struct file *file) return single_open(file, lcd_proc_show, PDE(inode)->data); } -static int set_lcd(struct toshiba_acpi_dev *dev, int value) +static int set_lcd_brightness(struct toshiba_acpi_dev *dev, int value) { u32 hci_result; + if (dev->tr_backlight_supported) { + bool enable = !value; + int ret = set_tr_backlight_status(dev, enable); + if (ret) + return ret; + if (value) + value--; + } + value = value << HCI_LCD_BRIGHTNESS_SHIFT; hci_write1(dev, HCI_LCD_BRIGHTNESS, value, &hci_result); return hci_result == HCI_SUCCESS ? 0 : -EIO; @@ -530,7 +577,7 @@ static int set_lcd(struct toshiba_acpi_dev *dev, int value) static int set_lcd_status(struct backlight_device *bd) { struct toshiba_acpi_dev *dev = bl_get_data(bd); - return set_lcd(dev, bd->props.brightness); + return set_lcd_brightness(dev, bd->props.brightness); } static ssize_t lcd_proc_write(struct file *file, const char __user *buf, @@ -541,6 +588,7 @@ static ssize_t lcd_proc_write(struct file *file, const char __user *buf, size_t len; int value; int ret; + int levels = dev->backlight_dev->props.max_brightness + 1; len = min(count, sizeof(cmd) - 1); if (copy_from_user(cmd, buf, len)) @@ -548,8 +596,8 @@ static ssize_t lcd_proc_write(struct file *file, const char __user *buf, cmd[len] = '\0'; if (sscanf(cmd, " brightness : %i", &value) == 1 && - value >= 0 && value < HCI_LCD_BRIGHTNESS_LEVELS) { - ret = set_lcd(dev, value); + value >= 0 && value < levels) { + ret = set_lcd_brightness(dev, value); if (ret == 0) ret = count; } else { @@ -860,8 +908,9 @@ static void remove_toshiba_proc_entries(struct toshiba_acpi_dev *dev) } static const struct backlight_ops toshiba_backlight_data = { - .get_brightness = get_lcd, - .update_status = set_lcd_status, + .options = BL_CORE_SUSPENDRESUME, + .get_brightness = get_lcd_brightness, + .update_status = set_lcd_status, }; static bool toshiba_acpi_i8042_filter(unsigned char data, unsigned char str, @@ -1020,6 +1069,56 @@ static int __devinit toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev) return error; } +static int __devinit toshiba_acpi_setup_backlight(struct toshiba_acpi_dev *dev) +{ + struct backlight_properties props; + int brightness; + int ret; + bool enabled; + + /* + * Some machines don't support the backlight methods at all, and + * others support it read-only. Either of these is pretty useless, + * so only register the backlight device if the backlight method + * supports both reads and writes. + */ + brightness = __get_lcd_brightness(dev); + if (brightness < 0) + return 0; + ret = set_lcd_brightness(dev, brightness); + if (ret) { + pr_debug("Backlight method is read-only, disabling backlight support\n"); + return 0; + } + + /* Determine whether or not BIOS supports transflective backlight */ + ret = get_tr_backlight_status(dev, &enabled); + dev->tr_backlight_supported = !ret; + + memset(&props, 0, sizeof(props)); + props.type = BACKLIGHT_PLATFORM; + props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1; + + /* adding an extra level and having 0 change to transflective mode */ + if (dev->tr_backlight_supported) + props.max_brightness++; + + dev->backlight_dev = backlight_device_register("toshiba", + &dev->acpi_dev->dev, + dev, + &toshiba_backlight_data, + &props); + if (IS_ERR(dev->backlight_dev)) { + ret = PTR_ERR(dev->backlight_dev); + pr_err("Could not register toshiba backlight device\n"); + dev->backlight_dev = NULL; + return ret; + } + + dev->backlight_dev->props.brightness = brightness; + return 0; +} + static int toshiba_acpi_remove(struct acpi_device *acpi_dev, int type) { struct toshiba_acpi_dev *dev = acpi_driver_data(acpi_dev); @@ -1078,7 +1177,6 @@ static int __devinit toshiba_acpi_add(struct acpi_device *acpi_dev) u32 dummy; bool bt_present; int ret = 0; - struct backlight_properties props; if (toshiba_acpi) return -EBUSY; @@ -1104,22 +1202,9 @@ static int __devinit toshiba_acpi_add(struct acpi_device *acpi_dev) mutex_init(&dev->mutex); - memset(&props, 0, sizeof(props)); - props.type = BACKLIGHT_PLATFORM; - props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1; - dev->backlight_dev = backlight_device_register("toshiba", - &acpi_dev->dev, - dev, - &toshiba_backlight_data, - &props); - if (IS_ERR(dev->backlight_dev)) { - ret = PTR_ERR(dev->backlight_dev); - - pr_err("Could not register toshiba backlight device\n"); - dev->backlight_dev = NULL; + ret = toshiba_acpi_setup_backlight(dev); + if (ret) goto error; - } - dev->backlight_dev->props.brightness = get_lcd(dev->backlight_dev); /* Register rfkill switch for Bluetooth */ if (hci_get_bt_present(dev, &bt_present) == HCI_SUCCESS && bt_present) { diff --git a/drivers/platform/x86/xo1-rfkill.c b/drivers/platform/x86/xo1-rfkill.c index 41781ed8301c..b57ad8641480 100644 --- a/drivers/platform/x86/xo1-rfkill.c +++ b/drivers/platform/x86/xo1-rfkill.c @@ -15,15 +15,26 @@ #include <asm/olpc.h> +static bool card_blocked; + static int rfkill_set_block(void *data, bool blocked) { unsigned char cmd; + int r; + + if (blocked == card_blocked) + return 0; + if (blocked) cmd = EC_WLAN_ENTER_RESET; else cmd = EC_WLAN_LEAVE_RESET; - return olpc_ec_cmd(cmd, NULL, 0, NULL, 0); + r = olpc_ec_cmd(cmd, NULL, 0, NULL, 0); + if (r == 0) + card_blocked = blocked; + + return r; } static const struct rfkill_ops rfkill_ops = { diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index 7d5f56edb8ef..4267789ca995 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c @@ -910,14 +910,17 @@ static inline int cmos_poweroff(struct device *dev) static u32 rtc_handler(void *context) { + struct device *dev = context; + + pm_wakeup_event(dev, 0); acpi_clear_event(ACPI_EVENT_RTC); acpi_disable_event(ACPI_EVENT_RTC, 0); return ACPI_INTERRUPT_HANDLED; } -static inline void rtc_wake_setup(void) +static inline void rtc_wake_setup(struct device *dev) { - acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, NULL); + acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, dev); /* * After the RTC handler is installed, the Fixed_RTC event should * be disabled. Only when the RTC alarm is set will it be enabled. @@ -950,7 +953,7 @@ cmos_wake_setup(struct device *dev) if (acpi_disabled) return; - rtc_wake_setup(); + rtc_wake_setup(dev); acpi_rtc_info.wake_on = rtc_wake_on; acpi_rtc_info.wake_off = rtc_wake_off; diff --git a/drivers/staging/ramster/zcache-main.c b/drivers/staging/ramster/zcache-main.c index 4e7ef0e6b79c..d46764b5aaba 100644 --- a/drivers/staging/ramster/zcache-main.c +++ b/drivers/staging/ramster/zcache-main.c @@ -3002,7 +3002,7 @@ static inline struct tmem_oid oswiz(unsigned type, u32 ind) return oid; } -static int zcache_frontswap_put_page(unsigned type, pgoff_t offset, +static int zcache_frontswap_store(unsigned type, pgoff_t offset, struct page *page) { u64 ind64 = (u64)offset; @@ -3025,7 +3025,7 @@ static int zcache_frontswap_put_page(unsigned type, pgoff_t offset, /* returns 0 if the page was successfully gotten from frontswap, -1 if * was not present (should never happen!) */ -static int zcache_frontswap_get_page(unsigned type, pgoff_t offset, +static int zcache_frontswap_load(unsigned type, pgoff_t offset, struct page *page) { u64 ind64 = (u64)offset; @@ -3080,8 +3080,8 @@ static void zcache_frontswap_init(unsigned ignored) } static struct frontswap_ops zcache_frontswap_ops = { - .put_page = zcache_frontswap_put_page, - .get_page = zcache_frontswap_get_page, + .store = zcache_frontswap_store, + .load = zcache_frontswap_load, .invalidate_page = zcache_frontswap_flush_page, .invalidate_area = zcache_frontswap_flush_area, .init = zcache_frontswap_init diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c index 2734dacacbaf..784c796b9848 100644 --- a/drivers/staging/zcache/zcache-main.c +++ b/drivers/staging/zcache/zcache-main.c @@ -1835,7 +1835,7 @@ static int zcache_frontswap_poolid = -1; * Swizzling increases objects per swaptype, increasing tmem concurrency * for heavy swaploads. Later, larger nr_cpus -> larger SWIZ_BITS * Setting SWIZ_BITS to 27 basically reconstructs the swap entry from - * frontswap_get_page(), but has side-effects. Hence using 8. + * frontswap_load(), but has side-effects. Hence using 8. */ #define SWIZ_BITS 8 #define SWIZ_MASK ((1 << SWIZ_BITS) - 1) @@ -1849,7 +1849,7 @@ static inline struct tmem_oid oswiz(unsigned type, u32 ind) return oid; } -static int zcache_frontswap_put_page(unsigned type, pgoff_t offset, +static int zcache_frontswap_store(unsigned type, pgoff_t offset, struct page *page) { u64 ind64 = (u64)offset; @@ -1870,7 +1870,7 @@ static int zcache_frontswap_put_page(unsigned type, pgoff_t offset, /* returns 0 if the page was successfully gotten from frontswap, -1 if * was not present (should never happen!) */ -static int zcache_frontswap_get_page(unsigned type, pgoff_t offset, +static int zcache_frontswap_load(unsigned type, pgoff_t offset, struct page *page) { u64 ind64 = (u64)offset; @@ -1919,8 +1919,8 @@ static void zcache_frontswap_init(unsigned ignored) } static struct frontswap_ops zcache_frontswap_ops = { - .put_page = zcache_frontswap_put_page, - .get_page = zcache_frontswap_get_page, + .store = zcache_frontswap_store, + .load = zcache_frontswap_load, .invalidate_page = zcache_frontswap_flush_page, .invalidate_area = zcache_frontswap_flush_area, .init = zcache_frontswap_init diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c index 35819e312624..6cc4358f68c1 100644 --- a/drivers/tty/amiserial.c +++ b/drivers/tty/amiserial.c @@ -1033,7 +1033,7 @@ static int get_serial_info(struct tty_struct *tty, struct serial_state *state, if (!retinfo) return -EFAULT; memset(&tmp, 0, sizeof(tmp)); - tty_lock(tty); + tty_lock(); tmp.line = tty->index; tmp.port = state->port; tmp.flags = state->tport.flags; @@ -1042,7 +1042,7 @@ static int get_serial_info(struct tty_struct *tty, struct serial_state *state, tmp.close_delay = state->tport.close_delay; tmp.closing_wait = state->tport.closing_wait; tmp.custom_divisor = state->custom_divisor; - tty_unlock(tty); + tty_unlock(); if (copy_to_user(retinfo,&tmp,sizeof(*retinfo))) return -EFAULT; return 0; @@ -1059,12 +1059,12 @@ static int set_serial_info(struct tty_struct *tty, struct serial_state *state, if (copy_from_user(&new_serial,new_info,sizeof(new_serial))) return -EFAULT; - tty_lock(tty); + tty_lock(); change_spd = ((new_serial.flags ^ port->flags) & ASYNC_SPD_MASK) || new_serial.custom_divisor != state->custom_divisor; if (new_serial.irq || new_serial.port != state->port || new_serial.xmit_fifo_size != state->xmit_fifo_size) { - tty_unlock(tty); + tty_unlock(); return -EINVAL; } @@ -1074,7 +1074,7 @@ static int set_serial_info(struct tty_struct *tty, struct serial_state *state, (new_serial.xmit_fifo_size != state->xmit_fifo_size) || ((new_serial.flags & ~ASYNC_USR_MASK) != (port->flags & ~ASYNC_USR_MASK))) { - tty_unlock(tty); + tty_unlock(); return -EPERM; } port->flags = ((port->flags & ~ASYNC_USR_MASK) | @@ -1084,7 +1084,7 @@ static int set_serial_info(struct tty_struct *tty, struct serial_state *state, } if (new_serial.baud_base < 9600) { - tty_unlock(tty); + tty_unlock(); return -EINVAL; } @@ -1116,7 +1116,7 @@ check_and_exit: } } else retval = startup(tty, state); - tty_unlock(tty); + tty_unlock(); return retval; } diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c index 6984e1a2686a..e61cabdd69df 100644 --- a/drivers/tty/cyclades.c +++ b/drivers/tty/cyclades.c @@ -1599,7 +1599,7 @@ static int cy_open(struct tty_struct *tty, struct file *filp) * If the port is the middle of closing, bail out now */ if (tty_hung_up_p(filp) || (info->port.flags & ASYNC_CLOSING)) { - wait_event_interruptible_tty(tty, info->port.close_wait, + wait_event_interruptible_tty(info->port.close_wait, !(info->port.flags & ASYNC_CLOSING)); return (info->port.flags & ASYNC_HUP_NOTIFY) ? -EAGAIN: -ERESTARTSYS; } diff --git a/drivers/tty/n_r3964.c b/drivers/tty/n_r3964.c index 656ad93bbc96..5c6c31459a2f 100644 --- a/drivers/tty/n_r3964.c +++ b/drivers/tty/n_r3964.c @@ -1065,8 +1065,7 @@ static ssize_t r3964_read(struct tty_struct *tty, struct file *file, TRACE_L("read()"); - /* FIXME: should use a private lock */ - tty_lock(tty); + tty_lock(); pClient = findClient(pInfo, task_pid(current)); if (pClient) { @@ -1078,7 +1077,7 @@ static ssize_t r3964_read(struct tty_struct *tty, struct file *file, goto unlock; } /* block until there is a message: */ - wait_event_interruptible_tty(tty, pInfo->read_wait, + wait_event_interruptible_tty(pInfo->read_wait, (pMsg = remove_msg(pInfo, pClient))); } @@ -1108,7 +1107,7 @@ static ssize_t r3964_read(struct tty_struct *tty, struct file *file, } ret = -EPERM; unlock: - tty_unlock(tty); + tty_unlock(); return ret; } @@ -1157,7 +1156,7 @@ static ssize_t r3964_write(struct tty_struct *tty, struct file *file, pHeader->locks = 0; pHeader->owner = NULL; - tty_lock(tty); + tty_lock(); pClient = findClient(pInfo, task_pid(current)); if (pClient) { @@ -1176,7 +1175,7 @@ static ssize_t r3964_write(struct tty_struct *tty, struct file *file, add_tx_queue(pInfo, pHeader); trigger_transmit(pInfo); - tty_unlock(tty); + tty_unlock(); return 0; } diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c index 65c7c62c7aae..5505ffc91da4 100644 --- a/drivers/tty/pty.c +++ b/drivers/tty/pty.c @@ -47,7 +47,6 @@ static void pty_close(struct tty_struct *tty, struct file *filp) wake_up_interruptible(&tty->read_wait); wake_up_interruptible(&tty->write_wait); tty->packet = 0; - /* Review - krefs on tty_link ?? */ if (!tty->link) return; tty->link->packet = 0; @@ -63,9 +62,9 @@ static void pty_close(struct tty_struct *tty, struct file *filp) mutex_unlock(&devpts_mutex); } #endif - tty_unlock(tty); + tty_unlock(); tty_vhangup(tty->link); - tty_lock(tty); + tty_lock(); } } @@ -623,27 +622,26 @@ static int ptmx_open(struct inode *inode, struct file *filp) return retval; /* find a device that is not in use. */ - mutex_lock(&devpts_mutex); + tty_lock(); index = devpts_new_index(inode); + tty_unlock(); if (index < 0) { retval = index; goto err_file; } - mutex_unlock(&devpts_mutex); - mutex_lock(&tty_mutex); + mutex_lock(&devpts_mutex); tty = tty_init_dev(ptm_driver, index); + mutex_unlock(&devpts_mutex); + tty_lock(); + mutex_unlock(&tty_mutex); if (IS_ERR(tty)) { retval = PTR_ERR(tty); goto out; } - /* The tty returned here is locked so we can safely - drop the mutex */ - mutex_unlock(&tty_mutex); - set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */ tty_add_file(tty, filp); @@ -656,17 +654,16 @@ static int ptmx_open(struct inode *inode, struct file *filp) if (retval) goto err_release; - tty_unlock(tty); + tty_unlock(); return 0; err_release: - tty_unlock(tty); + tty_unlock(); tty_release(inode, filp); return retval; out: - mutex_unlock(&tty_mutex); devpts_kill_index(inode, index); + tty_unlock(); err_file: - mutex_unlock(&devpts_mutex); tty_free_file(filp); return retval; } diff --git a/drivers/tty/serial/crisv10.c b/drivers/tty/serial/crisv10.c index 7264d4d26717..80b6b1b1f725 100644 --- a/drivers/tty/serial/crisv10.c +++ b/drivers/tty/serial/crisv10.c @@ -3976,7 +3976,7 @@ block_til_ready(struct tty_struct *tty, struct file * filp, */ if (tty_hung_up_p(filp) || (info->flags & ASYNC_CLOSING)) { - wait_event_interruptible_tty(tty, info->close_wait, + wait_event_interruptible_tty(info->close_wait, !(info->flags & ASYNC_CLOSING)); #ifdef SERIAL_DO_RESTART if (info->flags & ASYNC_HUP_NOTIFY) @@ -4052,9 +4052,9 @@ block_til_ready(struct tty_struct *tty, struct file * filp, printk("block_til_ready blocking: ttyS%d, count = %d\n", info->line, info->count); #endif - tty_unlock(tty); + tty_unlock(); schedule(); - tty_lock(tty); + tty_lock(); } set_current_state(TASK_RUNNING); remove_wait_queue(&info->open_wait, &wait); @@ -4115,7 +4115,7 @@ rs_open(struct tty_struct *tty, struct file * filp) */ if (tty_hung_up_p(filp) || (info->flags & ASYNC_CLOSING)) { - wait_event_interruptible_tty(tty, info->close_wait, + wait_event_interruptible_tty(info->close_wait, !(info->flags & ASYNC_CLOSING)); #ifdef SERIAL_DO_RESTART return ((info->flags & ASYNC_HUP_NOTIFY) ? diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c index 5ed0daae6564..593d40ad0a6b 100644 --- a/drivers/tty/synclink.c +++ b/drivers/tty/synclink.c @@ -3338,9 +3338,9 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp, printk("%s(%d):block_til_ready blocking on %s count=%d\n", __FILE__,__LINE__, tty->driver->name, port->count ); - tty_unlock(tty); + tty_unlock(); schedule(); - tty_lock(tty); + tty_lock(); } set_current_state(TASK_RUNNING); diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c index 45b43f11ca39..aa1debf97cc7 100644 --- a/drivers/tty/synclink_gt.c +++ b/drivers/tty/synclink_gt.c @@ -3336,9 +3336,9 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp, } DBGINFO(("%s block_til_ready wait\n", tty->driver->name)); - tty_unlock(tty); + tty_unlock(); schedule(); - tty_lock(tty); + tty_lock(); } set_current_state(TASK_RUNNING); diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c index 4a1e4f07765b..a3dddc12d2fe 100644 --- a/drivers/tty/synclinkmp.c +++ b/drivers/tty/synclinkmp.c @@ -3357,9 +3357,9 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp, printk("%s(%d):%s block_til_ready() count=%d\n", __FILE__,__LINE__, tty->driver->name, port->count ); - tty_unlock(tty); + tty_unlock(); schedule(); - tty_lock(tty); + tty_lock(); } set_current_state(TASK_RUNNING); diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index 9e930c009bf2..b425c79675ad 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c @@ -185,7 +185,6 @@ void free_tty_struct(struct tty_struct *tty) put_device(tty->dev); kfree(tty->write_buf); tty_buffer_free_all(tty); - tty->magic = 0xDEADDEAD; kfree(tty); } @@ -574,7 +573,7 @@ void __tty_hangup(struct tty_struct *tty) } spin_unlock(&redirect_lock); - tty_lock(tty); + tty_lock(); /* some functions below drop BTM, so we need this bit */ set_bit(TTY_HUPPING, &tty->flags); @@ -667,7 +666,7 @@ void __tty_hangup(struct tty_struct *tty) clear_bit(TTY_HUPPING, &tty->flags); tty_ldisc_enable(tty); - tty_unlock(tty); + tty_unlock(); if (f) fput(f); @@ -1104,12 +1103,12 @@ void tty_write_message(struct tty_struct *tty, char *msg) { if (tty) { mutex_lock(&tty->atomic_write_lock); - tty_lock(tty); + tty_lock(); if (tty->ops->write && !test_bit(TTY_CLOSING, &tty->flags)) { - tty_unlock(tty); + tty_unlock(); tty->ops->write(tty, msg, strlen(msg)); } else - tty_unlock(tty); + tty_unlock(); tty_write_unlock(tty); } return; @@ -1404,7 +1403,6 @@ struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx) } initialize_tty_struct(tty, driver, idx); - tty_lock(tty); retval = tty_driver_install_tty(driver, tty); if (retval < 0) goto err_deinit_tty; @@ -1417,11 +1415,9 @@ struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx) retval = tty_ldisc_setup(tty, tty->link); if (retval) goto err_release_tty; - /* Return the tty locked so that it cannot vanish under the caller */ return tty; err_deinit_tty: - tty_unlock(tty); deinitialize_tty_struct(tty); free_tty_struct(tty); err_module_put: @@ -1430,7 +1426,6 @@ err_module_put: /* call the tty release_tty routine to clean out this slot */ err_release_tty: - tty_unlock(tty); printk_ratelimited(KERN_INFO "tty_init_dev: ldisc open failed, " "clearing slot %d\n", idx); release_tty(tty, idx); @@ -1633,7 +1628,7 @@ int tty_release(struct inode *inode, struct file *filp) if (tty_paranoia_check(tty, inode, __func__)) return 0; - tty_lock(tty); + tty_lock(); check_tty_count(tty, __func__); __tty_fasync(-1, filp, 0); @@ -1642,11 +1637,10 @@ int tty_release(struct inode *inode, struct file *filp) pty_master = (tty->driver->type == TTY_DRIVER_TYPE_PTY && tty->driver->subtype == PTY_TYPE_MASTER); devpts = (tty->driver->flags & TTY_DRIVER_DEVPTS_MEM) != 0; - /* Review: parallel close */ o_tty = tty->link; if (tty_release_checks(tty, o_tty, idx)) { - tty_unlock(tty); + tty_unlock(); return 0; } @@ -1658,7 +1652,7 @@ int tty_release(struct inode *inode, struct file *filp) if (tty->ops->close) tty->ops->close(tty, filp); - tty_unlock(tty); + tty_unlock(); /* * Sanity check: if tty->count is going to zero, there shouldn't be * any waiters on tty->read_wait or tty->write_wait. We test the @@ -1681,7 +1675,7 @@ int tty_release(struct inode *inode, struct file *filp) opens on /dev/tty */ mutex_lock(&tty_mutex); - tty_lock_pair(tty, o_tty); + tty_lock(); tty_closing = tty->count <= 1; o_tty_closing = o_tty && (o_tty->count <= (pty_master ? 1 : 0)); @@ -1712,7 +1706,7 @@ int tty_release(struct inode *inode, struct file *filp) printk(KERN_WARNING "%s: %s: read/write wait queue active!\n", __func__, tty_name(tty, buf)); - tty_unlock_pair(tty, o_tty); + tty_unlock(); mutex_unlock(&tty_mutex); schedule(); } @@ -1775,7 +1769,7 @@ int tty_release(struct inode *inode, struct file *filp) /* check whether both sides are closing ... */ if (!tty_closing || (o_tty && !o_tty_closing)) { - tty_unlock_pair(tty, o_tty); + tty_unlock(); return 0; } @@ -1788,16 +1782,14 @@ int tty_release(struct inode *inode, struct file *filp) tty_ldisc_release(tty, o_tty); /* * The release_tty function takes care of the details of clearing - * the slots and preserving the termios structure. The tty_unlock_pair - * should be safe as we keep a kref while the tty is locked (so the - * unlock never unlocks a freed tty). + * the slots and preserving the termios structure. */ release_tty(tty, idx); - tty_unlock_pair(tty, o_tty); /* Make this pty number available for reallocation */ if (devpts) devpts_kill_index(inode, idx); + tty_unlock(); return 0; } @@ -1901,9 +1893,6 @@ static struct tty_driver *tty_lookup_driver(dev_t device, struct file *filp, * Locking: tty_mutex protects tty, tty_lookup_driver and tty_init_dev. * tty->count should protect the rest. * ->siglock protects ->signal/->sighand - * - * Note: the tty_unlock/lock cases without a ref are only safe due to - * tty_mutex */ static int tty_open(struct inode *inode, struct file *filp) @@ -1927,7 +1916,8 @@ retry_open: retval = 0; mutex_lock(&tty_mutex); - /* This is protected by the tty_mutex */ + tty_lock(); + tty = tty_open_current_tty(device, filp); if (IS_ERR(tty)) { retval = PTR_ERR(tty); @@ -1948,19 +1938,17 @@ retry_open: } if (tty) { - tty_lock(tty); retval = tty_reopen(tty); - if (retval < 0) { - tty_unlock(tty); + if (retval) tty = ERR_PTR(retval); - } - } else /* Returns with the tty_lock held for now */ + } else tty = tty_init_dev(driver, index); mutex_unlock(&tty_mutex); if (driver) tty_driver_kref_put(driver); if (IS_ERR(tty)) { + tty_unlock(); retval = PTR_ERR(tty); goto err_file; } @@ -1989,7 +1977,7 @@ retry_open: printk(KERN_DEBUG "%s: error %d in opening %s...\n", __func__, retval, tty->name); #endif - tty_unlock(tty); /* need to call tty_release without BTM */ + tty_unlock(); /* need to call tty_release without BTM */ tty_release(inode, filp); if (retval != -ERESTARTSYS) return retval; @@ -2001,15 +1989,17 @@ retry_open: /* * Need to reset f_op in case a hangup happened. */ + tty_lock(); if (filp->f_op == &hung_up_tty_fops) filp->f_op = &tty_fops; + tty_unlock(); goto retry_open; } - tty_unlock(tty); + tty_unlock(); mutex_lock(&tty_mutex); - tty_lock(tty); + tty_lock(); spin_lock_irq(¤t->sighand->siglock); if (!noctty && current->signal->leader && @@ -2017,10 +2007,11 @@ retry_open: tty->session == NULL) __proc_set_tty(current, tty); spin_unlock_irq(¤t->sighand->siglock); - tty_unlock(tty); + tty_unlock(); mutex_unlock(&tty_mutex); return 0; err_unlock: + tty_unlock(); mutex_unlock(&tty_mutex); /* after locks to avoid deadlock */ if (!IS_ERR_OR_NULL(driver)) @@ -2103,13 +2094,10 @@ out: static int tty_fasync(int fd, struct file *filp, int on) { - struct tty_struct *tty = file_tty(filp); int retval; - - tty_lock(tty); + tty_lock(); retval = __tty_fasync(fd, filp, on); - tty_unlock(tty); - + tty_unlock(); return retval; } @@ -2946,7 +2934,6 @@ void initialize_tty_struct(struct tty_struct *tty, tty->pgrp = NULL; tty->overrun_time = jiffies; tty_buffer_init(tty); - mutex_init(&tty->legacy_mutex); mutex_init(&tty->termios_mutex); mutex_init(&tty->ldisc_mutex); init_waitqueue_head(&tty->write_wait); diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c index ba8be396a621..9911eb6b34cd 100644 --- a/drivers/tty/tty_ldisc.c +++ b/drivers/tty/tty_ldisc.c @@ -568,7 +568,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc) if (IS_ERR(new_ldisc)) return PTR_ERR(new_ldisc); - tty_lock(tty); + tty_lock(); /* * We need to look at the tty locking here for pty/tty pairs * when both sides try to change in parallel. @@ -582,12 +582,12 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc) */ if (tty->ldisc->ops->num == ldisc) { - tty_unlock(tty); + tty_unlock(); tty_ldisc_put(new_ldisc); return 0; } - tty_unlock(tty); + tty_unlock(); /* * Problem: What do we do if this blocks ? * We could deadlock here @@ -595,7 +595,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc) tty_wait_until_sent(tty, 0); - tty_lock(tty); + tty_lock(); mutex_lock(&tty->ldisc_mutex); /* @@ -605,10 +605,10 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc) while (test_bit(TTY_LDISC_CHANGING, &tty->flags)) { mutex_unlock(&tty->ldisc_mutex); - tty_unlock(tty); + tty_unlock(); wait_event(tty_ldisc_wait, test_bit(TTY_LDISC_CHANGING, &tty->flags) == 0); - tty_lock(tty); + tty_lock(); mutex_lock(&tty->ldisc_mutex); } @@ -623,7 +623,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc) o_ldisc = tty->ldisc; - tty_unlock(tty); + tty_unlock(); /* * Make sure we don't change while someone holds a * reference to the line discipline. The TTY_LDISC bit @@ -650,7 +650,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc) retval = tty_ldisc_wait_idle(tty, 5 * HZ); - tty_lock(tty); + tty_lock(); mutex_lock(&tty->ldisc_mutex); /* handle wait idle failure locked */ @@ -665,7 +665,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc) clear_bit(TTY_LDISC_CHANGING, &tty->flags); mutex_unlock(&tty->ldisc_mutex); tty_ldisc_put(new_ldisc); - tty_unlock(tty); + tty_unlock(); return -EIO; } @@ -708,7 +708,7 @@ enable: if (o_work) schedule_work(&o_tty->buf.work); mutex_unlock(&tty->ldisc_mutex); - tty_unlock(tty); + tty_unlock(); return retval; } @@ -816,11 +816,11 @@ void tty_ldisc_hangup(struct tty_struct *tty) * need to wait for another function taking the BTM */ clear_bit(TTY_LDISC, &tty->flags); - tty_unlock(tty); + tty_unlock(); cancel_work_sync(&tty->buf.work); mutex_unlock(&tty->ldisc_mutex); retry: - tty_lock(tty); + tty_lock(); mutex_lock(&tty->ldisc_mutex); /* At this point we have a closed ldisc and we want to @@ -831,7 +831,7 @@ retry: if (atomic_read(&tty->ldisc->users) != 1) { char cur_n[TASK_COMM_LEN], tty_n[64]; long timeout = 3 * HZ; - tty_unlock(tty); + tty_unlock(); while (tty_ldisc_wait_idle(tty, timeout) == -EBUSY) { timeout = MAX_SCHEDULE_TIMEOUT; @@ -894,23 +894,6 @@ int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty) tty_ldisc_enable(tty); return 0; } - -static void tty_ldisc_kill(struct tty_struct *tty) -{ - mutex_lock(&tty->ldisc_mutex); - /* - * Now kill off the ldisc - */ - tty_ldisc_close(tty, tty->ldisc); - tty_ldisc_put(tty->ldisc); - /* Force an oops if we mess this up */ - tty->ldisc = NULL; - - /* Ensure the next open requests the N_TTY ldisc */ - tty_set_termios_ldisc(tty, N_TTY); - mutex_unlock(&tty->ldisc_mutex); -} - /** * tty_ldisc_release - release line discipline * @tty: tty being shut down @@ -929,19 +912,27 @@ void tty_ldisc_release(struct tty_struct *tty, struct tty_struct *o_tty) * race with the set_ldisc code path. */ - tty_unlock_pair(tty, o_tty); + tty_unlock(); tty_ldisc_halt(tty); tty_ldisc_flush_works(tty); - if (o_tty) { - tty_ldisc_halt(o_tty); - tty_ldisc_flush_works(o_tty); - } - tty_lock_pair(tty, o_tty); + tty_lock(); + mutex_lock(&tty->ldisc_mutex); + /* + * Now kill off the ldisc + */ + tty_ldisc_close(tty, tty->ldisc); + tty_ldisc_put(tty->ldisc); + /* Force an oops if we mess this up */ + tty->ldisc = NULL; + + /* Ensure the next open requests the N_TTY ldisc */ + tty_set_termios_ldisc(tty, N_TTY); + mutex_unlock(&tty->ldisc_mutex); - tty_ldisc_kill(tty); + /* This will need doing differently if we need to lock */ if (o_tty) - tty_ldisc_kill(o_tty); + tty_ldisc_release(o_tty, NULL); /* And the memory resources remaining (buffers, termios) will be disposed of when the kref hits zero */ diff --git a/drivers/tty/tty_mutex.c b/drivers/tty/tty_mutex.c index 67feac9e6ebb..9ff986c32a21 100644 --- a/drivers/tty/tty_mutex.c +++ b/drivers/tty/tty_mutex.c @@ -4,70 +4,29 @@ #include <linux/semaphore.h> #include <linux/sched.h> -/* Legacy tty mutex glue */ - -enum { - TTY_MUTEX_NORMAL, - TTY_MUTEX_NESTED, -}; +/* + * The 'big tty mutex' + * + * This mutex is taken and released by tty_lock() and tty_unlock(), + * replacing the older big kernel lock. + * It can no longer be taken recursively, and does not get + * released implicitly while sleeping. + * + * Don't use in new code. + */ +static DEFINE_MUTEX(big_tty_mutex); /* * Getting the big tty mutex. */ - -static void __lockfunc tty_lock_nested(struct tty_struct *tty, - unsigned int subclass) +void __lockfunc tty_lock(void) { - if (tty->magic != TTY_MAGIC) { - printk(KERN_ERR "L Bad %p\n", tty); - WARN_ON(1); - return; - } - tty_kref_get(tty); - mutex_lock_nested(&tty->legacy_mutex, subclass); -} - -void __lockfunc tty_lock(struct tty_struct *tty) -{ - return tty_lock_nested(tty, TTY_MUTEX_NORMAL); + mutex_lock(&big_tty_mutex); } EXPORT_SYMBOL(tty_lock); -void __lockfunc tty_unlock(struct tty_struct *tty) +void __lockfunc tty_unlock(void) { - if (tty->magic != TTY_MAGIC) { - printk(KERN_ERR "U Bad %p\n", tty); - WARN_ON(1); - return; - } - mutex_unlock(&tty->legacy_mutex); - tty_kref_put(tty); + mutex_unlock(&big_tty_mutex); } EXPORT_SYMBOL(tty_unlock); - -/* - * Getting the big tty mutex for a pair of ttys with lock ordering - * On a non pty/tty pair tty2 can be NULL which is just fine. - */ -void __lockfunc tty_lock_pair(struct tty_struct *tty, - struct tty_struct *tty2) -{ - if (tty < tty2) { - tty_lock(tty); - tty_lock_nested(tty2, TTY_MUTEX_NESTED); - } else { - if (tty2 && tty2 != tty) - tty_lock(tty2); - tty_lock_nested(tty, TTY_MUTEX_NESTED); - } -} -EXPORT_SYMBOL(tty_lock_pair); - -void __lockfunc tty_unlock_pair(struct tty_struct *tty, - struct tty_struct *tty2) -{ - tty_unlock(tty); - if (tty2 && tty2 != tty) - tty_unlock(tty2); -} -EXPORT_SYMBOL(tty_unlock_pair); diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c index d9cca95a5452..bf6e238146ae 100644 --- a/drivers/tty/tty_port.c +++ b/drivers/tty/tty_port.c @@ -230,7 +230,7 @@ int tty_port_block_til_ready(struct tty_port *port, /* block if port is in the process of being closed */ if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING) { - wait_event_interruptible_tty(tty, port->close_wait, + wait_event_interruptible_tty(port->close_wait, !(port->flags & ASYNC_CLOSING)); if (port->flags & ASYNC_HUP_NOTIFY) return -EAGAIN; @@ -296,9 +296,9 @@ int tty_port_block_til_ready(struct tty_port *port, retval = -ERESTARTSYS; break; } - tty_unlock(tty); + tty_unlock(); schedule(); - tty_lock(tty); + tty_lock(); } finish_wait(&port->open_wait, &wait); diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index a290be51a1f4..0217f7415ef5 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig @@ -2210,7 +2210,7 @@ config FB_XILINX config FB_COBALT tristate "Cobalt server LCD frame buffer support" - depends on FB && MIPS_COBALT + depends on FB && (MIPS_COBALT || MIPS_SEAD3) config FB_SH7760 bool "SH7760/SH7763/SH7720/SH7721 LCDC support" @@ -2382,6 +2382,39 @@ config FB_BROADSHEET and could also have been called by other names when coupled with a bridge adapter. +config FB_AUO_K190X + tristate "AUO-K190X EPD controller support" + depends on FB + select FB_SYS_FILLRECT + select FB_SYS_COPYAREA + select FB_SYS_IMAGEBLIT + select FB_SYS_FOPS + select FB_DEFERRED_IO + help + Provides support for epaper controllers from the K190X series + of AUO. These controllers can be used to drive epaper displays + from Sipix. + + This option enables the common support, shared by the individual + controller drivers. You will also have to enable the driver + for the controller type used in your device. + +config FB_AUO_K1900 + tristate "AUO-K1900 EPD controller support" + depends on FB && FB_AUO_K190X + help + This driver implements support for the AUO K1900 epd-controller. + This controller can drive Sipix epaper displays but can only do + serial updates, reducing the number of possible frames per second. + +config FB_AUO_K1901 + tristate "AUO-K1901 EPD controller support" + depends on FB && FB_AUO_K190X + help + This driver implements support for the AUO K1901 epd-controller. + This controller can drive Sipix epaper displays and supports + concurrent updates, making higher frames per second possible. + config FB_JZ4740 tristate "JZ4740 LCD framebuffer support" depends on FB && MACH_JZ4740 diff --git a/drivers/video/Makefile b/drivers/video/Makefile index 9356add945b3..ee8dafb69e36 100644 --- a/drivers/video/Makefile +++ b/drivers/video/Makefile @@ -118,6 +118,9 @@ obj-$(CONFIG_FB_PMAGB_B) += pmagb-b-fb.o obj-$(CONFIG_FB_MAXINE) += maxinefb.o obj-$(CONFIG_FB_METRONOME) += metronomefb.o obj-$(CONFIG_FB_BROADSHEET) += broadsheetfb.o +obj-$(CONFIG_FB_AUO_K190X) += auo_k190x.o +obj-$(CONFIG_FB_AUO_K1900) += auo_k1900fb.o +obj-$(CONFIG_FB_AUO_K1901) += auo_k1901fb.o obj-$(CONFIG_FB_S1D13XXX) += s1d13xxxfb.o obj-$(CONFIG_FB_SH7760) += sh7760fb.o obj-$(CONFIG_FB_IMX) += imxfb.o diff --git a/drivers/video/auo_k1900fb.c b/drivers/video/auo_k1900fb.c new file mode 100644 index 000000000000..c36cf961dcb2 --- /dev/null +++ b/drivers/video/auo_k1900fb.c @@ -0,0 +1,198 @@ +/* + * auok190xfb.c -- FB driver for AUO-K1900 controllers + * + * Copyright (C) 2011, 2012 Heiko Stuebner <heiko@sntech.de> + * + * based on broadsheetfb.c + * + * Copyright (C) 2008, Jaya Kumar + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Layout is based on skeletonfb.c by James Simmons and Geert Uytterhoeven. + * + * This driver is written to be used with the AUO-K1900 display controller. + * + * It is intended to be architecture independent. A board specific driver + * must be used to perform all the physical IO interactions. + * + * The controller supports different update modes: + * mode0+1 16 step gray (4bit) + * mode2 4 step gray (2bit) - FIXME: add strange refresh + * mode3 2 step gray (1bit) - FIXME: add strange refresh + * mode4 handwriting mode (strange behaviour) + * mode5 automatic selection of update mode + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/string.h> +#include <linux/mm.h> +#include <linux/slab.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/fb.h> +#include <linux/init.h> +#include <linux/platform_device.h> +#include <linux/list.h> +#include <linux/firmware.h> +#include <linux/gpio.h> +#include <linux/pm_runtime.h> + +#include <video/auo_k190xfb.h> + +#include "auo_k190x.h" + +/* + * AUO-K1900 specific commands + */ + +#define AUOK1900_CMD_PARTIALDISP 0x1001 +#define AUOK1900_CMD_ROTATION 0x1006 +#define AUOK1900_CMD_LUT_STOP 0x1009 + +#define AUOK1900_INIT_TEMP_AVERAGE (1 << 13) +#define AUOK1900_INIT_ROTATE(_x) ((_x & 0x3) << 10) +#define AUOK1900_INIT_RESOLUTION(_res) ((_res & 0x7) << 2) + +static void auok1900_init(struct auok190xfb_par *par) +{ + struct auok190x_board *board = par->board; + u16 init_param = 0; + + init_param |= AUOK1900_INIT_TEMP_AVERAGE; + init_param |= AUOK1900_INIT_ROTATE(par->rotation); + init_param |= AUOK190X_INIT_INVERSE_WHITE; + init_param |= AUOK190X_INIT_FORMAT0; + init_param |= AUOK1900_INIT_RESOLUTION(par->resolution); + init_param |= AUOK190X_INIT_SHIFT_RIGHT; + + auok190x_send_cmdargs(par, AUOK190X_CMD_INIT, 1, &init_param); + + /* let the controller finish */ + board->wait_for_rdy(par); +} + +static void auok1900_update_region(struct auok190xfb_par *par, int mode, + u16 y1, u16 y2) +{ + struct device *dev = par->info->device; + unsigned char *buf = (unsigned char *)par->info->screen_base; + int xres = par->info->var.xres; + u16 args[4]; + + pm_runtime_get_sync(dev); + + mutex_lock(&(par->io_lock)); + + /* y1 and y2 must be a multiple of 2 so drop the lowest bit */ + y1 &= 0xfffe; + y2 &= 0xfffe; + + dev_dbg(dev, "update (x,y,w,h,mode)=(%d,%d,%d,%d,%d)\n", + 1, y1+1, xres, y2-y1, mode); + + /* to FIX handle different partial update modes */ + args[0] = mode | 1; + args[1] = y1 + 1; + args[2] = xres; + args[3] = y2 - y1; + buf += y1 * xres; + auok190x_send_cmdargs_pixels(par, AUOK1900_CMD_PARTIALDISP, 4, args, + ((y2 - y1) * xres)/2, (u16 *) buf); + auok190x_send_command(par, AUOK190X_CMD_DATA_STOP); + + par->update_cnt++; + + mutex_unlock(&(par->io_lock)); + + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); +} + +static void auok1900fb_dpy_update_pages(struct auok190xfb_par *par, + u16 y1, u16 y2) +{ + int mode; + + if (par->update_mode < 0) { + mode = AUOK190X_UPDATE_MODE(1); + par->last_mode = -1; + } else { + mode = AUOK190X_UPDATE_MODE(par->update_mode); + par->last_mode = par->update_mode; + } + + if (par->flash) + mode |= AUOK190X_UPDATE_NONFLASH; + + auok1900_update_region(par, mode, y1, y2); +} + +static void auok1900fb_dpy_update(struct auok190xfb_par *par) +{ + int mode; + + if (par->update_mode < 0) { + mode = AUOK190X_UPDATE_MODE(0); + par->last_mode = -1; + } else { + mode = AUOK190X_UPDATE_MODE(par->update_mode); + par->last_mode = par->update_mode; + } + + if (par->flash) + mode |= AUOK190X_UPDATE_NONFLASH; + + auok1900_update_region(par, mode, 0, par->info->var.yres); + par->update_cnt = 0; +} + +static bool auok1900fb_need_refresh(struct auok190xfb_par *par) +{ + return (par->update_cnt > 10); +} + +static int __devinit auok1900fb_probe(struct platform_device *pdev) +{ + struct auok190x_init_data init; + struct auok190x_board *board; + + /* pick up board specific routines */ + board = pdev->dev.platform_data; + if (!board) + return -EINVAL; + + /* fill temporary init struct for common init */ + init.id = "auo_k1900fb"; + init.board = board; + init.update_partial = auok1900fb_dpy_update_pages; + init.update_all = auok1900fb_dpy_update; + init.need_refresh = auok1900fb_need_refresh; + init.init = auok1900_init; + + return auok190x_common_probe(pdev, &init); +} + +static int __devexit auok1900fb_remove(struct platform_device *pdev) +{ + return auok190x_common_remove(pdev); +} + +static struct platform_driver auok1900fb_driver = { + .probe = auok1900fb_probe, + .remove = __devexit_p(auok1900fb_remove), + .driver = { + .owner = THIS_MODULE, + .name = "auo_k1900fb", + .pm = &auok190x_pm, + }, +}; +module_platform_driver(auok1900fb_driver); + +MODULE_DESCRIPTION("framebuffer driver for the AUO-K1900 EPD controller"); +MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/video/auo_k1901fb.c b/drivers/video/auo_k1901fb.c new file mode 100644 index 000000000000..1c054c18616e --- /dev/null +++ b/drivers/video/auo_k1901fb.c @@ -0,0 +1,251 @@ +/* + * auok190xfb.c -- FB driver for AUO-K1901 controllers + * + * Copyright (C) 2011, 2012 Heiko Stuebner <heiko@sntech.de> + * + * based on broadsheetfb.c + * + * Copyright (C) 2008, Jaya Kumar + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Layout is based on skeletonfb.c by James Simmons and Geert Uytterhoeven. + * + * This driver is written to be used with the AUO-K1901 display controller. + * + * It is intended to be architecture independent. A board specific driver + * must be used to perform all the physical IO interactions. + * + * The controller supports different update modes: + * mode0+1 16 step gray (4bit) + * mode2+3 4 step gray (2bit) + * mode4+5 2 step gray (1bit) + * - mode4 is described as "without LUT" + * mode7 automatic selection of update mode + * + * The most interesting difference to the K1900 is the ability to do screen + * updates in an asynchronous fashion. Where the K1900 needs to wait for the + * current update to complete, the K1901 can process later updates already. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/string.h> +#include <linux/mm.h> +#include <linux/slab.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/fb.h> +#include <linux/init.h> +#include <linux/platform_device.h> +#include <linux/list.h> +#include <linux/firmware.h> +#include <linux/gpio.h> +#include <linux/pm_runtime.h> + +#include <video/auo_k190xfb.h> + +#include "auo_k190x.h" + +/* + * AUO-K1901 specific commands + */ + +#define AUOK1901_CMD_LUT_INTERFACE 0x0005 +#define AUOK1901_CMD_DMA_START 0x1001 +#define AUOK1901_CMD_CURSOR_START 0x1007 +#define AUOK1901_CMD_CURSOR_STOP AUOK190X_CMD_DATA_STOP +#define AUOK1901_CMD_DDMA_START 0x1009 + +#define AUOK1901_INIT_GATE_PULSE_LOW (0 << 14) +#define AUOK1901_INIT_GATE_PULSE_HIGH (1 << 14) +#define AUOK1901_INIT_SINGLE_GATE (0 << 13) +#define AUOK1901_INIT_DOUBLE_GATE (1 << 13) + +/* Bits to pixels + * Mode 15-12 11-8 7-4 3-0 + * format2 2 T 1 T + * format3 1 T 2 T + * format4 T 2 T 1 + * format5 T 1 T 2 + * + * halftone modes: + * format6 2 2 1 1 + * format7 1 1 2 2 + */ +#define AUOK1901_INIT_FORMAT2 (1 << 7) +#define AUOK1901_INIT_FORMAT3 ((1 << 7) | (1 << 6)) +#define AUOK1901_INIT_FORMAT4 (1 << 8) +#define AUOK1901_INIT_FORMAT5 ((1 << 8) | (1 << 6)) +#define AUOK1901_INIT_FORMAT6 ((1 << 8) | (1 << 7)) +#define AUOK1901_INIT_FORMAT7 ((1 << 8) | (1 << 7) | (1 << 6)) + +/* res[4] to bit 10 + * res[3-0] to bits 5-2 + */ +#define AUOK1901_INIT_RESOLUTION(_res) (((_res & (1 << 4)) << 6) \ + | ((_res & 0xf) << 2)) + +/* + * portrait / landscape orientation in AUOK1901_CMD_DMA_START + */ +#define AUOK1901_DMA_ROTATE90(_rot) ((_rot & 1) << 13) + +/* + * equivalent to 1 << 11, needs the ~ to have same rotation like K1900 + */ +#define AUOK1901_DDMA_ROTATE180(_rot) ((~_rot & 2) << 10) + +static void auok1901_init(struct auok190xfb_par *par) +{ + struct auok190x_board *board = par->board; + u16 init_param = 0; + + init_param |= AUOK190X_INIT_INVERSE_WHITE; + init_param |= AUOK190X_INIT_FORMAT0; + init_param |= AUOK1901_INIT_RESOLUTION(par->resolution); + init_param |= AUOK190X_INIT_SHIFT_LEFT; + + auok190x_send_cmdargs(par, AUOK190X_CMD_INIT, 1, &init_param); + + /* let the controller finish */ + board->wait_for_rdy(par); +} + +static void auok1901_update_region(struct auok190xfb_par *par, int mode, + u16 y1, u16 y2) +{ + struct device *dev = par->info->device; + unsigned char *buf = (unsigned char *)par->info->screen_base; + int xres = par->info->var.xres; + u16 args[5]; + + pm_runtime_get_sync(dev); + + mutex_lock(&(par->io_lock)); + + /* y1 and y2 must be a multiple of 2 so drop the lowest bit */ + y1 &= 0xfffe; + y2 &= 0xfffe; + + dev_dbg(dev, "update (x,y,w,h,mode)=(%d,%d,%d,%d,%d)\n", + 1, y1+1, xres, y2-y1, mode); + + /* K1901: first transfer the region data */ + args[0] = AUOK1901_DMA_ROTATE90(par->rotation) | 1; + args[1] = y1 + 1; + args[2] = xres; + args[3] = y2 - y1; + buf += y1 * xres; + auok190x_send_cmdargs_pixels_nowait(par, AUOK1901_CMD_DMA_START, 4, + args, ((y2 - y1) * xres)/2, + (u16 *) buf); + auok190x_send_command_nowait(par, AUOK190X_CMD_DATA_STOP); + + /* K1901: second tell the controller to update the region with mode */ + args[0] = mode | AUOK1901_DDMA_ROTATE180(par->rotation); + args[1] = 1; + args[2] = y1 + 1; + args[3] = xres; + args[4] = y2 - y1; + auok190x_send_cmdargs_nowait(par, AUOK1901_CMD_DDMA_START, 5, args); + + par->update_cnt++; + + mutex_unlock(&(par->io_lock)); + + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); +} + +static void auok1901fb_dpy_update_pages(struct auok190xfb_par *par, + u16 y1, u16 y2) +{ + int mode; + + if (par->update_mode < 0) { + mode = AUOK190X_UPDATE_MODE(1); + par->last_mode = -1; + } else { + mode = AUOK190X_UPDATE_MODE(par->update_mode); + par->last_mode = par->update_mode; + } + + if (par->flash) + mode |= AUOK190X_UPDATE_NONFLASH; + + auok1901_update_region(par, mode, y1, y2); +} + +static void auok1901fb_dpy_update(struct auok190xfb_par *par) +{ + int mode; + + /* When doing full updates, wait for the controller to be ready + * This will hopefully catch some hangs of the K1901 + */ + par->board->wait_for_rdy(par); + + if (par->update_mode < 0) { + mode = AUOK190X_UPDATE_MODE(0); + par->last_mode = -1; + } else { + mode = AUOK190X_UPDATE_MODE(par->update_mode); + par->last_mode = par->update_mode; + } + + if (par->flash) + mode |= AUOK190X_UPDATE_NONFLASH; + + auok1901_update_region(par, mode, 0, par->info->var.yres); + par->update_cnt = 0; +} + +static bool auok1901fb_need_refresh(struct auok190xfb_par *par) +{ + return (par->update_cnt > 10); +} + +static int __devinit auok1901fb_probe(struct platform_device *pdev) +{ + struct auok190x_init_data init; + struct auok190x_board *board; + + /* pick up board specific routines */ + board = pdev->dev.platform_data; + if (!board) + return -EINVAL; + + /* fill temporary init struct for common init */ + init.id = "auo_k1901fb"; + init.board = board; + init.update_partial = auok1901fb_dpy_update_pages; + init.update_all = auok1901fb_dpy_update; + init.need_refresh = auok1901fb_need_refresh; + init.init = auok1901_init; + + return auok190x_common_probe(pdev, &init); +} + +static int __devexit auok1901fb_remove(struct platform_device *pdev) +{ + return auok190x_common_remove(pdev); +} + +static struct platform_driver auok1901fb_driver = { + .probe = auok1901fb_probe, + .remove = __devexit_p(auok1901fb_remove), + .driver = { + .owner = THIS_MODULE, + .name = "auo_k1901fb", + .pm = &auok190x_pm, + }, +}; +module_platform_driver(auok1901fb_driver); + +MODULE_DESCRIPTION("framebuffer driver for the AUO-K1901 EPD controller"); +MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/video/auo_k190x.c b/drivers/video/auo_k190x.c new file mode 100644 index 000000000000..77da6a2f43dc --- /dev/null +++ b/drivers/video/auo_k190x.c @@ -0,0 +1,1046 @@ +/* + * Common code for AUO-K190X framebuffer drivers + * + * Copyright (C) 2012 Heiko Stuebner <heiko@sntech.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/gpio.h> +#include <linux/pm_runtime.h> +#include <linux/fb.h> +#include <linux/delay.h> +#include <linux/uaccess.h> +#include <linux/vmalloc.h> +#include <linux/regulator/consumer.h> + +#include <video/auo_k190xfb.h> + +#include "auo_k190x.h" + +struct panel_info { + int w; + int h; +}; + +/* table of panel specific parameters to be indexed into by the board drivers */ +static struct panel_info panel_table[] = { + /* standard 6" */ + [AUOK190X_RESOLUTION_800_600] = { + .w = 800, + .h = 600, + }, + /* standard 9" */ + [AUOK190X_RESOLUTION_1024_768] = { + .w = 1024, + .h = 768, + }, +}; + +/* + * private I80 interface to the board driver + */ + +static void auok190x_issue_data(struct auok190xfb_par *par, u16 data) +{ + par->board->set_ctl(par, AUOK190X_I80_WR, 0); + par->board->set_hdb(par, data); + par->board->set_ctl(par, AUOK190X_I80_WR, 1); +} + +static void auok190x_issue_cmd(struct auok190xfb_par *par, u16 data) +{ + par->board->set_ctl(par, AUOK190X_I80_DC, 0); + auok190x_issue_data(par, data); + par->board->set_ctl(par, AUOK190X_I80_DC, 1); +} + +static int auok190x_issue_pixels(struct auok190xfb_par *par, int size, + u16 *data) +{ + struct device *dev = par->info->device; + int i; + u16 tmp; + + if (size & 3) { + dev_err(dev, "issue_pixels: size %d must be a multiple of 4\n", + size); + return -EINVAL; + } + + for (i = 0; i < (size >> 1); i++) { + par->board->set_ctl(par, AUOK190X_I80_WR, 0); + + /* simple reduction of 8bit staticgray to 4bit gray + * combines 4 * 4bit pixel values into a 16bit value + */ + tmp = (data[2*i] & 0xF0) >> 4; + tmp |= (data[2*i] & 0xF000) >> 8; + tmp |= (data[2*i+1] & 0xF0) << 4; + tmp |= (data[2*i+1] & 0xF000); + + par->board->set_hdb(par, tmp); + par->board->set_ctl(par, AUOK190X_I80_WR, 1); + } + + return 0; +} + +static u16 auok190x_read_data(struct auok190xfb_par *par) +{ + u16 data; + + par->board->set_ctl(par, AUOK190X_I80_OE, 0); + data = par->board->get_hdb(par); + par->board->set_ctl(par, AUOK190X_I80_OE, 1); + + return data; +} + +/* + * Command interface for the controller drivers + */ + +void auok190x_send_command_nowait(struct auok190xfb_par *par, u16 data) +{ + par->board->set_ctl(par, AUOK190X_I80_CS, 0); + auok190x_issue_cmd(par, data); + par->board->set_ctl(par, AUOK190X_I80_CS, 1); +} +EXPORT_SYMBOL_GPL(auok190x_send_command_nowait); + +void auok190x_send_cmdargs_nowait(struct auok190xfb_par *par, u16 cmd, + int argc, u16 *argv) +{ + int i; + + par->board->set_ctl(par, AUOK190X_I80_CS, 0); + auok190x_issue_cmd(par, cmd); + + for (i = 0; i < argc; i++) + auok190x_issue_data(par, argv[i]); + par->board->set_ctl(par, AUOK190X_I80_CS, 1); +} +EXPORT_SYMBOL_GPL(auok190x_send_cmdargs_nowait); + +int auok190x_send_command(struct auok190xfb_par *par, u16 data) +{ + int ret; + + ret = par->board->wait_for_rdy(par); + if (ret) + return ret; + + auok190x_send_command_nowait(par, data); + return 0; +} +EXPORT_SYMBOL_GPL(auok190x_send_command); + +int auok190x_send_cmdargs(struct auok190xfb_par *par, u16 cmd, + int argc, u16 *argv) +{ + int ret; + + ret = par->board->wait_for_rdy(par); + if (ret) + return ret; + + auok190x_send_cmdargs_nowait(par, cmd, argc, argv); + return 0; +} +EXPORT_SYMBOL_GPL(auok190x_send_cmdargs); + +int auok190x_read_cmdargs(struct auok190xfb_par *par, u16 cmd, + int argc, u16 *argv) +{ + int i, ret; + + ret = par->board->wait_for_rdy(par); + if (ret) + return ret; + + par->board->set_ctl(par, AUOK190X_I80_CS, 0); + auok190x_issue_cmd(par, cmd); + + for (i = 0; i < argc; i++) + argv[i] = auok190x_read_data(par); + par->board->set_ctl(par, AUOK190X_I80_CS, 1); + + return 0; +} +EXPORT_SYMBOL_GPL(auok190x_read_cmdargs); + +void auok190x_send_cmdargs_pixels_nowait(struct auok190xfb_par *par, u16 cmd, + int argc, u16 *argv, int size, u16 *data) +{ + int i; + + par->board->set_ctl(par, AUOK190X_I80_CS, 0); + + auok190x_issue_cmd(par, cmd); + + for (i = 0; i < argc; i++) + auok190x_issue_data(par, argv[i]); + + auok190x_issue_pixels(par, size, data); + + par->board->set_ctl(par, AUOK190X_I80_CS, 1); +} +EXPORT_SYMBOL_GPL(auok190x_send_cmdargs_pixels_nowait); + +int auok190x_send_cmdargs_pixels(struct auok190xfb_par *par, u16 cmd, + int argc, u16 *argv, int size, u16 *data) +{ + int ret; + + ret = par->board->wait_for_rdy(par); + if (ret) + return ret; + + auok190x_send_cmdargs_pixels_nowait(par, cmd, argc, argv, size, data); + + return 0; +} +EXPORT_SYMBOL_GPL(auok190x_send_cmdargs_pixels); + +/* + * fbdefio callbacks - common on both controllers. + */ + +static void auok190xfb_dpy_first_io(struct fb_info *info) +{ + /* tell runtime-pm that we wish to use the device in a short time */ + pm_runtime_get(info->device); +} + +/* this is called back from the deferred io workqueue */ +static void auok190xfb_dpy_deferred_io(struct fb_info *info, + struct list_head *pagelist) +{ + struct fb_deferred_io *fbdefio = info->fbdefio; + struct auok190xfb_par *par = info->par; + u16 yres = info->var.yres; + u16 xres = info->var.xres; + u16 y1 = 0, h = 0; + int prev_index = -1; + struct page *cur; + int h_inc; + int threshold; + + if (!list_empty(pagelist)) + /* the device resume should've been requested through first_io, + * if the resume did not finish until now, wait for it. + */ + pm_runtime_barrier(info->device); + else + /* We reached this via the fsync or some other way. + * In either case the first_io function did not run, + * so we runtime_resume the device here synchronously. + */ + pm_runtime_get_sync(info->device); + + /* Do a full screen update every n updates to prevent + * excessive darkening of the Sipix display. + * If we do this, there is no need to walk the pages. + */ + if (par->need_refresh(par)) { + par->update_all(par); + goto out; + } + + /* height increment is fixed per page */ + h_inc = DIV_ROUND_UP(PAGE_SIZE , xres); + + /* calculate number of pages from pixel height */ + threshold = par->consecutive_threshold / h_inc; + if (threshold < 1) + threshold = 1; + + /* walk the written page list and swizzle the data */ + list_for_each_entry(cur, &fbdefio->pagelist, lru) { + if (prev_index < 0) { + /* just starting so assign first page */ + y1 = (cur->index << PAGE_SHIFT) / xres; + h = h_inc; + } else if ((cur->index - prev_index) <= threshold) { + /* page is within our threshold for single updates */ + h += h_inc * (cur->index - prev_index); + } else { + /* page not consecutive, issue previous update first */ + par->update_partial(par, y1, y1 + h); + + /* start over with our non consecutive page */ + y1 = (cur->index << PAGE_SHIFT) / xres; + h = h_inc; + } + prev_index = cur->index; + } + + /* if we still have any pages to update we do so now */ + if (h >= yres) + /* its a full screen update, just do it */ + par->update_all(par); + else + par->update_partial(par, y1, min((u16) (y1 + h), yres)); + +out: + pm_runtime_mark_last_busy(info->device); + pm_runtime_put_autosuspend(info->device); +} + +/* + * framebuffer operations + */ + +/* + * this is the slow path from userspace. they can seek and write to + * the fb. it's inefficient to do anything less than a full screen draw + */ +static ssize_t auok190xfb_write(struct fb_info *info, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct auok190xfb_par *par = info->par; + unsigned long p = *ppos; + void *dst; + int err = 0; + unsigned long total_size; + + if (info->state != FBINFO_STATE_RUNNING) + return -EPERM; + + total_size = info->fix.smem_len; + + if (p > total_size) + return -EFBIG; + + if (count > total_size) { + err = -EFBIG; + count = total_size; + } + + if (count + p > total_size) { + if (!err) + err = -ENOSPC; + + count = total_size - p; + } + + dst = (void *)(info->screen_base + p); + + if (copy_from_user(dst, buf, count)) + err = -EFAULT; + + if (!err) + *ppos += count; + + par->update_all(par); + + return (err) ? err : count; +} + +static void auok190xfb_fillrect(struct fb_info *info, + const struct fb_fillrect *rect) +{ + struct auok190xfb_par *par = info->par; + + sys_fillrect(info, rect); + + par->update_all(par); +} + +static void auok190xfb_copyarea(struct fb_info *info, + const struct fb_copyarea *area) +{ + struct auok190xfb_par *par = info->par; + + sys_copyarea(info, area); + + par->update_all(par); +} + +static void auok190xfb_imageblit(struct fb_info *info, + const struct fb_image *image) +{ + struct auok190xfb_par *par = info->par; + + sys_imageblit(info, image); + + par->update_all(par); +} + +static int auok190xfb_check_var(struct fb_var_screeninfo *var, + struct fb_info *info) +{ + if (info->var.xres != var->xres || info->var.yres != var->yres || + info->var.xres_virtual != var->xres_virtual || + info->var.yres_virtual != var->yres_virtual) { + pr_info("%s: Resolution not supported: X%u x Y%u\n", + __func__, var->xres, var->yres); + return -EINVAL; + } + + /* + * Memory limit + */ + + if ((info->fix.line_length * var->yres_virtual) > info->fix.smem_len) { + pr_info("%s: Memory Limit requested yres_virtual = %u\n", + __func__, var->yres_virtual); + return -ENOMEM; + } + + return 0; +} + +static struct fb_ops auok190xfb_ops = { + .owner = THIS_MODULE, + .fb_read = fb_sys_read, + .fb_write = auok190xfb_write, + .fb_fillrect = auok190xfb_fillrect, + .fb_copyarea = auok190xfb_copyarea, + .fb_imageblit = auok190xfb_imageblit, + .fb_check_var = auok190xfb_check_var, +}; + +/* + * Controller-functions common to both K1900 and K1901 + */ + +static int auok190x_read_temperature(struct auok190xfb_par *par) +{ + struct device *dev = par->info->device; + u16 data[4]; + int temp; + + pm_runtime_get_sync(dev); + + mutex_lock(&(par->io_lock)); + + auok190x_read_cmdargs(par, AUOK190X_CMD_READ_VERSION, 4, data); + + mutex_unlock(&(par->io_lock)); + + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); + + /* sanitize and split of half-degrees for now */ + temp = ((data[0] & AUOK190X_VERSION_TEMP_MASK) >> 1); + + /* handle positive and negative temperatures */ + if (temp >= 201) + return (255 - temp + 1) * (-1); + else + return temp; +} + +static void auok190x_identify(struct auok190xfb_par *par) +{ + struct device *dev = par->info->device; + u16 data[4]; + + pm_runtime_get_sync(dev); + + mutex_lock(&(par->io_lock)); + + auok190x_read_cmdargs(par, AUOK190X_CMD_READ_VERSION, 4, data); + + mutex_unlock(&(par->io_lock)); + + par->epd_type = data[1] & AUOK190X_VERSION_TEMP_MASK; + + par->panel_size_int = AUOK190X_VERSION_SIZE_INT(data[2]); + par->panel_size_float = AUOK190X_VERSION_SIZE_FLOAT(data[2]); + par->panel_model = AUOK190X_VERSION_MODEL(data[2]); + + par->tcon_version = AUOK190X_VERSION_TCON(data[3]); + par->lut_version = AUOK190X_VERSION_LUT(data[3]); + + dev_dbg(dev, "panel %d.%din, model 0x%x, EPD 0x%x TCON-rev 0x%x, LUT-rev 0x%x", + par->panel_size_int, par->panel_size_float, par->panel_model, + par->epd_type, par->tcon_version, par->lut_version); + + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); +} + +/* + * Sysfs functions + */ + +static ssize_t update_mode_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct fb_info *info = dev_get_drvdata(dev); + struct auok190xfb_par *par = info->par; + + return sprintf(buf, "%d\n", par->update_mode); +} + +static ssize_t update_mode_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct fb_info *info = dev_get_drvdata(dev); + struct auok190xfb_par *par = info->par; + int mode, ret; + + ret = kstrtoint(buf, 10, &mode); + if (ret) + return ret; + + par->update_mode = mode; + + /* if we enter a better mode, do a full update */ + if (par->last_mode > 1 && mode < par->last_mode) + par->update_all(par); + + return count; +} + +static ssize_t flash_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct fb_info *info = dev_get_drvdata(dev); + struct auok190xfb_par *par = info->par; + + return sprintf(buf, "%d\n", par->flash); +} + +static ssize_t flash_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct fb_info *info = dev_get_drvdata(dev); + struct auok190xfb_par *par = info->par; + int flash, ret; + + ret = kstrtoint(buf, 10, &flash); + if (ret) + return ret; + + if (flash > 0) + par->flash = 1; + else + par->flash = 0; + + return count; +} + +static ssize_t temp_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct fb_info *info = dev_get_drvdata(dev); + struct auok190xfb_par *par = info->par; + int temp; + + temp = auok190x_read_temperature(par); + return sprintf(buf, "%d\n", temp); +} + +static DEVICE_ATTR(update_mode, 0644, update_mode_show, update_mode_store); +static DEVICE_ATTR(flash, 0644, flash_show, flash_store); +static DEVICE_ATTR(temp, 0644, temp_show, NULL); + +static struct attribute *auok190x_attributes[] = { + &dev_attr_update_mode.attr, + &dev_attr_flash.attr, + &dev_attr_temp.attr, + NULL +}; + +static const struct attribute_group auok190x_attr_group = { + .attrs = auok190x_attributes, +}; + +static int auok190x_power(struct auok190xfb_par *par, bool on) +{ + struct auok190x_board *board = par->board; + int ret; + + if (on) { + /* We should maintain POWER up for at least 80ms before set + * RST_N and SLP_N to high (TCON spec 20100803_v35 p59) + */ + ret = regulator_enable(par->regulator); + if (ret) + return ret; + + msleep(200); + gpio_set_value(board->gpio_nrst, 1); + gpio_set_value(board->gpio_nsleep, 1); + msleep(200); + } else { + regulator_disable(par->regulator); + gpio_set_value(board->gpio_nrst, 0); + gpio_set_value(board->gpio_nsleep, 0); + } + + return 0; +} + +/* + * Recovery - powercycle the controller + */ + +static void auok190x_recover(struct auok190xfb_par *par) +{ + auok190x_power(par, 0); + msleep(100); + auok190x_power(par, 1); + + par->init(par); + + /* wait for init to complete */ + par->board->wait_for_rdy(par); +} + +/* + * Power-management + */ + +#ifdef CONFIG_PM +static int auok190x_runtime_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct fb_info *info = platform_get_drvdata(pdev); + struct auok190xfb_par *par = info->par; + struct auok190x_board *board = par->board; + u16 standby_param; + + /* take and keep the lock until we are resumed, as the controller + * will never reach the non-busy state when in standby mode + */ + mutex_lock(&(par->io_lock)); + + if (par->standby) { + dev_warn(dev, "already in standby, runtime-pm pairing mismatch\n"); + mutex_unlock(&(par->io_lock)); + return 0; + } + + /* according to runtime_pm.txt runtime_suspend only means, that the + * device will not process data and will not communicate with the CPU + * As we hold the lock, this stays true even without standby + */ + if (board->quirks & AUOK190X_QUIRK_STANDBYBROKEN) { + dev_dbg(dev, "runtime suspend without standby\n"); + goto finish; + } else if (board->quirks & AUOK190X_QUIRK_STANDBYPARAM) { + /* for some TCON versions STANDBY expects a parameter (0) but + * it seems the real tcon version has to be determined yet. + */ + dev_dbg(dev, "runtime suspend with additional empty param\n"); + standby_param = 0; + auok190x_send_cmdargs(par, AUOK190X_CMD_STANDBY, 1, + &standby_param); + } else { + dev_dbg(dev, "runtime suspend without param\n"); + auok190x_send_command(par, AUOK190X_CMD_STANDBY); + } + + msleep(64); + +finish: + par->standby = 1; + + return 0; +} + +static int auok190x_runtime_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct fb_info *info = platform_get_drvdata(pdev); + struct auok190xfb_par *par = info->par; + struct auok190x_board *board = par->board; + + if (!par->standby) { + dev_warn(dev, "not in standby, runtime-pm pairing mismatch\n"); + return 0; + } + + if (board->quirks & AUOK190X_QUIRK_STANDBYBROKEN) { + dev_dbg(dev, "runtime resume without standby\n"); + } else { + /* when in standby, controller is always busy + * and only accepts the wakeup command + */ + dev_dbg(dev, "runtime resume from standby\n"); + auok190x_send_command_nowait(par, AUOK190X_CMD_WAKEUP); + + msleep(160); + + /* wait for the controller to be ready and release the lock */ + board->wait_for_rdy(par); + } + + par->standby = 0; + + mutex_unlock(&(par->io_lock)); + + return 0; +} + +static int auok190x_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct fb_info *info = platform_get_drvdata(pdev); + struct auok190xfb_par *par = info->par; + struct auok190x_board *board = par->board; + int ret; + + dev_dbg(dev, "suspend\n"); + if (board->quirks & AUOK190X_QUIRK_STANDBYBROKEN) { + /* suspend via powering off the ic */ + dev_dbg(dev, "suspend with broken standby\n"); + + auok190x_power(par, 0); + } else { + dev_dbg(dev, "suspend using sleep\n"); + + /* the sleep state can only be entered from the standby state. + * pm_runtime_get_noresume gets called before the suspend call. + * So the devices usage count is >0 but it is not necessarily + * active. + */ + if (!pm_runtime_status_suspended(dev)) { + ret = auok190x_runtime_suspend(dev); + if (ret < 0) { + dev_err(dev, "auok190x_runtime_suspend failed with %d\n", + ret); + return ret; + } + par->manual_standby = 1; + } + + gpio_direction_output(board->gpio_nsleep, 0); + } + + msleep(100); + + return 0; +} + +static int auok190x_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct fb_info *info = platform_get_drvdata(pdev); + struct auok190xfb_par *par = info->par; + struct auok190x_board *board = par->board; + + dev_dbg(dev, "resume\n"); + if (board->quirks & AUOK190X_QUIRK_STANDBYBROKEN) { + dev_dbg(dev, "resume with broken standby\n"); + + auok190x_power(par, 1); + + par->init(par); + } else { + dev_dbg(dev, "resume from sleep\n"); + + /* device should be in runtime suspend when we were suspended + * and pm_runtime_put_sync gets called after this function. + * So there is no need to touch the standby mode here at all. + */ + gpio_direction_output(board->gpio_nsleep, 1); + msleep(100); + + /* an additional init call seems to be necessary after sleep */ + auok190x_runtime_resume(dev); + par->init(par); + + /* if we were runtime-suspended before, suspend again*/ + if (!par->manual_standby) + auok190x_runtime_suspend(dev); + else + par->manual_standby = 0; + } + + return 0; +} +#endif + +const struct dev_pm_ops auok190x_pm = { + SET_RUNTIME_PM_OPS(auok190x_runtime_suspend, auok190x_runtime_resume, + NULL) + SET_SYSTEM_SLEEP_PM_OPS(auok190x_suspend, auok190x_resume) +}; +EXPORT_SYMBOL_GPL(auok190x_pm); + +/* + * Common probe and remove code + */ + +int __devinit auok190x_common_probe(struct platform_device *pdev, + struct auok190x_init_data *init) +{ + struct auok190x_board *board = init->board; + struct auok190xfb_par *par; + struct fb_info *info; + struct panel_info *panel; + int videomemorysize, ret; + unsigned char *videomemory; + + /* check board contents */ + if (!board->init || !board->cleanup || !board->wait_for_rdy + || !board->set_ctl || !board->set_hdb || !board->get_hdb + || !board->setup_irq) + return -EINVAL; + + info = framebuffer_alloc(sizeof(struct auok190xfb_par), &pdev->dev); + if (!info) + return -ENOMEM; + + par = info->par; + par->info = info; + par->board = board; + par->recover = auok190x_recover; + par->update_partial = init->update_partial; + par->update_all = init->update_all; + par->need_refresh = init->need_refresh; + par->init = init->init; + + /* init update modes */ + par->update_cnt = 0; + par->update_mode = -1; + par->last_mode = -1; + par->flash = 0; + + par->regulator = regulator_get(info->device, "vdd"); + if (IS_ERR(par->regulator)) { + ret = PTR_ERR(par->regulator); + dev_err(info->device, "Failed to get regulator: %d\n", ret); + goto err_reg; + } + + ret = board->init(par); + if (ret) { + dev_err(info->device, "board init failed, %d\n", ret); + goto err_board; + } + + ret = gpio_request(board->gpio_nsleep, "AUOK190x sleep"); + if (ret) { + dev_err(info->device, "could not request sleep gpio, %d\n", + ret); + goto err_gpio1; + } + + ret = gpio_direction_output(board->gpio_nsleep, 0); + if (ret) { + dev_err(info->device, "could not set sleep gpio, %d\n", ret); + goto err_gpio2; + } + + ret = gpio_request(board->gpio_nrst, "AUOK190x reset"); + if (ret) { + dev_err(info->device, "could not request reset gpio, %d\n", + ret); + goto err_gpio2; + } + + ret = gpio_direction_output(board->gpio_nrst, 0); + if (ret) { + dev_err(info->device, "could not set reset gpio, %d\n", ret); + goto err_gpio3; + } + + ret = auok190x_power(par, 1); + if (ret) { + dev_err(info->device, "could not power on the device, %d\n", + ret); + goto err_gpio3; + } + + mutex_init(&par->io_lock); + + init_waitqueue_head(&par->waitq); + + ret = par->board->setup_irq(par->info); + if (ret) { + dev_err(info->device, "could not setup ready-irq, %d\n", ret); + goto err_irq; + } + + /* wait for init to complete */ + par->board->wait_for_rdy(par); + + /* + * From here on the controller can talk to us + */ + + /* initialise fix, var, resolution and rotation */ + + strlcpy(info->fix.id, init->id, 16); + info->fix.type = FB_TYPE_PACKED_PIXELS; + info->fix.visual = FB_VISUAL_STATIC_PSEUDOCOLOR; + info->fix.xpanstep = 0; + info->fix.ypanstep = 0; + info->fix.ywrapstep = 0; + info->fix.accel = FB_ACCEL_NONE; + + info->var.bits_per_pixel = 8; + info->var.grayscale = 1; + info->var.red.length = 8; + info->var.green.length = 8; + info->var.blue.length = 8; + + panel = &panel_table[board->resolution]; + + /* if 90 degree rotation, switch width and height */ + if (board->rotation & 1) { + info->var.xres = panel->h; + info->var.yres = panel->w; + info->var.xres_virtual = panel->h; + info->var.yres_virtual = panel->w; + info->fix.line_length = panel->h; + } else { + info->var.xres = panel->w; + info->var.yres = panel->h; + info->var.xres_virtual = panel->w; + info->var.yres_virtual = panel->h; + info->fix.line_length = panel->w; + } + + par->resolution = board->resolution; + par->rotation = board->rotation; + + /* videomemory handling */ + + videomemorysize = roundup((panel->w * panel->h), PAGE_SIZE); + videomemory = vmalloc(videomemorysize); + if (!videomemory) { + ret = -ENOMEM; + goto err_irq; + } + + memset(videomemory, 0, videomemorysize); + info->screen_base = (char *)videomemory; + info->fix.smem_len = videomemorysize; + + info->flags = FBINFO_FLAG_DEFAULT | FBINFO_VIRTFB; + info->fbops = &auok190xfb_ops; + + /* deferred io init */ + + info->fbdefio = devm_kzalloc(info->device, + sizeof(struct fb_deferred_io), + GFP_KERNEL); + if (!info->fbdefio) { + dev_err(info->device, "Failed to allocate memory\n"); + ret = -ENOMEM; + goto err_defio; + } + + dev_dbg(info->device, "targetting %d frames per second\n", board->fps); + info->fbdefio->delay = HZ / board->fps; + info->fbdefio->first_io = auok190xfb_dpy_first_io, + info->fbdefio->deferred_io = auok190xfb_dpy_deferred_io, + fb_deferred_io_init(info); + + /* color map */ + + ret = fb_alloc_cmap(&info->cmap, 256, 0); + if (ret < 0) { + dev_err(info->device, "Failed to allocate colormap\n"); + goto err_cmap; + } + + /* controller init */ + + par->consecutive_threshold = 100; + par->init(par); + auok190x_identify(par); + + platform_set_drvdata(pdev, info); + + ret = register_framebuffer(info); + if (ret < 0) + goto err_regfb; + + ret = sysfs_create_group(&info->device->kobj, &auok190x_attr_group); + if (ret) + goto err_sysfs; + + dev_info(info->device, "fb%d: %dx%d using %dK of video memory\n", + info->node, info->var.xres, info->var.yres, + videomemorysize >> 10); + + /* increase autosuspend_delay when we use alternative methods + * for runtime_pm + */ + par->autosuspend_delay = (board->quirks & AUOK190X_QUIRK_STANDBYBROKEN) + ? 1000 : 200; + + pm_runtime_set_active(info->device); + pm_runtime_enable(info->device); + pm_runtime_set_autosuspend_delay(info->device, par->autosuspend_delay); + pm_runtime_use_autosuspend(info->device); + + return 0; + +err_sysfs: + unregister_framebuffer(info); +err_regfb: + fb_dealloc_cmap(&info->cmap); +err_cmap: + fb_deferred_io_cleanup(info); + kfree(info->fbdefio); +err_defio: + vfree((void *)info->screen_base); +err_irq: + auok190x_power(par, 0); +err_gpio3: + gpio_free(board->gpio_nrst); +err_gpio2: + gpio_free(board->gpio_nsleep); +err_gpio1: + board->cleanup(par); +err_board: + regulator_put(par->regulator); +err_reg: + framebuffer_release(info); + + return ret; +} +EXPORT_SYMBOL_GPL(auok190x_common_probe); + +int __devexit auok190x_common_remove(struct platform_device *pdev) +{ + struct fb_info *info = platform_get_drvdata(pdev); + struct auok190xfb_par *par = info->par; + struct auok190x_board *board = par->board; + + pm_runtime_disable(info->device); + + sysfs_remove_group(&info->device->kobj, &auok190x_attr_group); + + unregister_framebuffer(info); + + fb_dealloc_cmap(&info->cmap); + + fb_deferred_io_cleanup(info); + kfree(info->fbdefio); + + vfree((void *)info->screen_base); + + auok190x_power(par, 0); + + gpio_free(board->gpio_nrst); + gpio_free(board->gpio_nsleep); + + board->cleanup(par); + + regulator_put(par->regulator); + + framebuffer_release(info); + + return 0; +} +EXPORT_SYMBOL_GPL(auok190x_common_remove); + +MODULE_DESCRIPTION("Common code for AUO-K190X controllers"); +MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/video/auo_k190x.h b/drivers/video/auo_k190x.h new file mode 100644 index 000000000000..e35af1f51b28 --- /dev/null +++ b/drivers/video/auo_k190x.h @@ -0,0 +1,129 @@ +/* + * Private common definitions for AUO-K190X framebuffer drivers + * + * Copyright (C) 2012 Heiko Stuebner <heiko@sntech.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +/* + * I80 interface specific defines + */ + +#define AUOK190X_I80_CS 0x01 +#define AUOK190X_I80_DC 0x02 +#define AUOK190X_I80_WR 0x03 +#define AUOK190X_I80_OE 0x04 + +/* + * AUOK190x commands, common to both controllers + */ + +#define AUOK190X_CMD_INIT 0x0000 +#define AUOK190X_CMD_STANDBY 0x0001 +#define AUOK190X_CMD_WAKEUP 0x0002 +#define AUOK190X_CMD_TCON_RESET 0x0003 +#define AUOK190X_CMD_DATA_STOP 0x1002 +#define AUOK190X_CMD_LUT_START 0x1003 +#define AUOK190X_CMD_DISP_REFRESH 0x1004 +#define AUOK190X_CMD_DISP_RESET 0x1005 +#define AUOK190X_CMD_PRE_DISPLAY_START 0x100D +#define AUOK190X_CMD_PRE_DISPLAY_STOP 0x100F +#define AUOK190X_CMD_FLASH_W 0x2000 +#define AUOK190X_CMD_FLASH_E 0x2001 +#define AUOK190X_CMD_FLASH_STS 0x2002 +#define AUOK190X_CMD_FRAMERATE 0x3000 +#define AUOK190X_CMD_READ_VERSION 0x4000 +#define AUOK190X_CMD_READ_STATUS 0x4001 +#define AUOK190X_CMD_READ_LUT 0x4003 +#define AUOK190X_CMD_DRIVERTIMING 0x5000 +#define AUOK190X_CMD_LBALANCE 0x5001 +#define AUOK190X_CMD_AGINGMODE 0x6000 +#define AUOK190X_CMD_AGINGEXIT 0x6001 + +/* + * Common settings for AUOK190X_CMD_INIT + */ + +#define AUOK190X_INIT_DATA_FILTER (0 << 12) +#define AUOK190X_INIT_DATA_BYPASS (1 << 12) +#define AUOK190X_INIT_INVERSE_WHITE (0 << 9) +#define AUOK190X_INIT_INVERSE_BLACK (1 << 9) +#define AUOK190X_INIT_SCAN_DOWN (0 << 1) +#define AUOK190X_INIT_SCAN_UP (1 << 1) +#define AUOK190X_INIT_SHIFT_LEFT (0 << 0) +#define AUOK190X_INIT_SHIFT_RIGHT (1 << 0) + +/* Common bits to pixels + * Mode 15-12 11-8 7-4 3-0 + * format0 4 3 2 1 + * format1 3 4 1 2 + */ + +#define AUOK190X_INIT_FORMAT0 0 +#define AUOK190X_INIT_FORMAT1 (1 << 6) + +/* + * settings for AUOK190X_CMD_RESET + */ + +#define AUOK190X_RESET_TCON (0 << 0) +#define AUOK190X_RESET_NORMAL (1 << 0) +#define AUOK190X_RESET_PON (1 << 1) + +/* + * AUOK190X_CMD_VERSION + */ + +#define AUOK190X_VERSION_TEMP_MASK (0x1ff) +#define AUOK190X_VERSION_EPD_MASK (0xff) +#define AUOK190X_VERSION_SIZE_INT(_val) ((_val & 0xfc00) >> 10) +#define AUOK190X_VERSION_SIZE_FLOAT(_val) ((_val & 0x3c0) >> 6) +#define AUOK190X_VERSION_MODEL(_val) (_val & 0x3f) +#define AUOK190X_VERSION_LUT(_val) (_val & 0xff) +#define AUOK190X_VERSION_TCON(_val) ((_val & 0xff00) >> 8) + +/* + * update modes for CMD_PARTIALDISP on K1900 and CMD_DDMA on K1901 + */ + +#define AUOK190X_UPDATE_MODE(_res) ((_res & 0x7) << 12) +#define AUOK190X_UPDATE_NONFLASH (1 << 15) + +/* + * track panel specific parameters for common init + */ + +struct auok190x_init_data { + char *id; + struct auok190x_board *board; + + void (*update_partial)(struct auok190xfb_par *par, u16 y1, u16 y2); + void (*update_all)(struct auok190xfb_par *par); + bool (*need_refresh)(struct auok190xfb_par *par); + void (*init)(struct auok190xfb_par *par); +}; + + +extern void auok190x_send_command_nowait(struct auok190xfb_par *par, u16 data); +extern int auok190x_send_command(struct auok190xfb_par *par, u16 data); +extern void auok190x_send_cmdargs_nowait(struct auok190xfb_par *par, u16 cmd, + int argc, u16 *argv); +extern int auok190x_send_cmdargs(struct auok190xfb_par *par, u16 cmd, + int argc, u16 *argv); +extern void auok190x_send_cmdargs_pixels_nowait(struct auok190xfb_par *par, + u16 cmd, int argc, u16 *argv, + int size, u16 *data); +extern int auok190x_send_cmdargs_pixels(struct auok190xfb_par *par, u16 cmd, + int argc, u16 *argv, int size, + u16 *data); +extern int auok190x_read_cmdargs(struct auok190xfb_par *par, u16 cmd, + int argc, u16 *argv); + +extern int auok190x_common_probe(struct platform_device *pdev, + struct auok190x_init_data *init); +extern int auok190x_common_remove(struct platform_device *pdev); + +extern const struct dev_pm_ops auok190x_pm; diff --git a/drivers/video/bfin_adv7393fb.c b/drivers/video/bfin_adv7393fb.c index 1a268a294478..33ea874c87d2 100644 --- a/drivers/video/bfin_adv7393fb.c +++ b/drivers/video/bfin_adv7393fb.c @@ -414,14 +414,14 @@ static int __devinit bfin_adv7393_fb_probe(struct i2c_client *client, if (ret) { dev_err(&client->dev, "PPI0_FS3 GPIO request failed\n"); ret = -EBUSY; - goto out_8; + goto free_fbdev; } } if (peripheral_request_list(ppi_pins, DRIVER_NAME)) { dev_err(&client->dev, "requesting PPI peripheral failed\n"); ret = -EFAULT; - goto out_8; + goto free_gpio; } fbdev->fb_mem = @@ -432,7 +432,7 @@ static int __devinit bfin_adv7393_fb_probe(struct i2c_client *client, dev_err(&client->dev, "couldn't allocate dma buffer (%d bytes)\n", (u32) fbdev->fb_len); ret = -ENOMEM; - goto out_7; + goto free_ppi_pins; } fbdev->info.screen_base = (void *)fbdev->fb_mem; @@ -464,27 +464,27 @@ static int __devinit bfin_adv7393_fb_probe(struct i2c_client *client, if (!fbdev->info.pseudo_palette) { dev_err(&client->dev, "failed to allocate pseudo_palette\n"); ret = -ENOMEM; - goto out_6; + goto free_fb_mem; } if (fb_alloc_cmap(&fbdev->info.cmap, BFIN_LCD_NBR_PALETTE_ENTRIES, 0) < 0) { dev_err(&client->dev, "failed to allocate colormap (%d entries)\n", BFIN_LCD_NBR_PALETTE_ENTRIES); ret = -EFAULT; - goto out_5; + goto free_palette; } if (request_dma(CH_PPI, "BF5xx_PPI_DMA") < 0) { dev_err(&client->dev, "unable to request PPI DMA\n"); ret = -EFAULT; - goto out_4; + goto free_cmap; } if (request_irq(IRQ_PPI_ERROR, ppi_irq_error, 0, "PPI ERROR", fbdev) < 0) { dev_err(&client->dev, "unable to request PPI ERROR IRQ\n"); ret = -EFAULT; - goto out_3; + goto free_ch_ppi; } fbdev->open = 0; @@ -494,14 +494,14 @@ static int __devinit bfin_adv7393_fb_probe(struct i2c_client *client, if (ret) { dev_err(&client->dev, "i2c attach: init error\n"); - goto out_1; + goto free_irq_ppi; } if (register_framebuffer(&fbdev->info) < 0) { dev_err(&client->dev, "unable to register framebuffer\n"); ret = -EFAULT; - goto out_1; + goto free_irq_ppi; } dev_info(&client->dev, "fb%d: %s frame buffer device\n", @@ -512,7 +512,7 @@ static int __devinit bfin_adv7393_fb_probe(struct i2c_client *client, if (!entry) { dev_err(&client->dev, "unable to create /proc entry\n"); ret = -EFAULT; - goto out_0; + goto free_fb; } entry->read_proc = adv7393_read_proc; @@ -521,22 +521,25 @@ static int __devinit bfin_adv7393_fb_probe(struct i2c_client *client, return 0; - out_0: +free_fb: unregister_framebuffer(&fbdev->info); - out_1: +free_irq_ppi: free_irq(IRQ_PPI_ERROR, fbdev); - out_3: +free_ch_ppi: free_dma(CH_PPI); - out_4: - dma_free_coherent(NULL, fbdev->fb_len, fbdev->fb_mem, - fbdev->dma_handle); - out_5: +free_cmap: fb_dealloc_cmap(&fbdev->info.cmap); - out_6: +free_palette: kfree(fbdev->info.pseudo_palette); - out_7: +free_fb_mem: + dma_free_coherent(NULL, fbdev->fb_len, fbdev->fb_mem, + fbdev->dma_handle); +free_ppi_pins: peripheral_free_list(ppi_pins); - out_8: +free_gpio: + if (ANOMALY_05000400) + gpio_free(P_IDENT(P_PPI0_FS3)); +free_fbdev: kfree(fbdev); return ret; diff --git a/drivers/video/cobalt_lcdfb.c b/drivers/video/cobalt_lcdfb.c index f56699d8122a..eae46f6457e2 100644 --- a/drivers/video/cobalt_lcdfb.c +++ b/drivers/video/cobalt_lcdfb.c @@ -1,7 +1,8 @@ /* - * Cobalt server LCD frame buffer driver. + * Cobalt/SEAD3 LCD frame buffer driver. * * Copyright (C) 2008 Yoichi Yuasa <yuasa@linux-mips.org> + * Copyright (C) 2012 MIPS Technologies, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -62,6 +63,7 @@ #define LCD_CUR_POS(x) ((x) & LCD_CUR_POS_MASK) #define LCD_TEXT_POS(x) ((x) | LCD_TEXT_MODE) +#ifdef CONFIG_MIPS_COBALT static inline void lcd_write_control(struct fb_info *info, u8 control) { writel((u32)control << 24, info->screen_base); @@ -81,6 +83,47 @@ static inline u8 lcd_read_data(struct fb_info *info) { return readl(info->screen_base + LCD_DATA_REG_OFFSET) >> 24; } +#else + +#define LCD_CTL 0x00 +#define LCD_DATA 0x08 +#define CPLD_STATUS 0x10 +#define CPLD_DATA 0x18 + +static inline void cpld_wait(struct fb_info *info) +{ + do { + } while (readl(info->screen_base + CPLD_STATUS) & 1); +} + +static inline void lcd_write_control(struct fb_info *info, u8 control) +{ + cpld_wait(info); + writel(control, info->screen_base + LCD_CTL); +} + +static inline u8 lcd_read_control(struct fb_info *info) +{ + cpld_wait(info); + readl(info->screen_base + LCD_CTL); + cpld_wait(info); + return readl(info->screen_base + CPLD_DATA) & 0xff; +} + +static inline void lcd_write_data(struct fb_info *info, u8 data) +{ + cpld_wait(info); + writel(data, info->screen_base + LCD_DATA); +} + +static inline u8 lcd_read_data(struct fb_info *info) +{ + cpld_wait(info); + readl(info->screen_base + LCD_DATA); + cpld_wait(info); + return readl(info->screen_base + CPLD_DATA) & 0xff; +} +#endif static int lcd_busy_wait(struct fb_info *info) { diff --git a/drivers/video/ep93xx-fb.c b/drivers/video/ep93xx-fb.c index f8babbeee275..345d96230978 100644 --- a/drivers/video/ep93xx-fb.c +++ b/drivers/video/ep93xx-fb.c @@ -507,16 +507,16 @@ static int __devinit ep93xxfb_probe(struct platform_device *pdev) err = fb_alloc_cmap(&info->cmap, 256, 0); if (err) - goto failed; + goto failed_cmap; err = ep93xxfb_alloc_videomem(info); if (err) - goto failed; + goto failed_videomem; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { err = -ENXIO; - goto failed; + goto failed_resource; } /* @@ -532,7 +532,7 @@ static int __devinit ep93xxfb_probe(struct platform_device *pdev) fbi->mmio_base = ioremap(res->start, resource_size(res)); if (!fbi->mmio_base) { err = -ENXIO; - goto failed; + goto failed_resource; } strcpy(info->fix.id, pdev->name); @@ -553,24 +553,24 @@ static int __devinit ep93xxfb_probe(struct platform_device *pdev) if (err == 0) { dev_err(info->dev, "No suitable video mode found\n"); err = -EINVAL; - goto failed; + goto failed_mode; } if (mach_info->setup) { err = mach_info->setup(pdev); if (err) - return err; + goto failed_mode; } err = ep93xxfb_check_var(&info->var, info); if (err) - goto failed; + goto failed_check; fbi->clk = clk_get(info->dev, NULL); if (IS_ERR(fbi->clk)) { err = PTR_ERR(fbi->clk); fbi->clk = NULL; - goto failed; + goto failed_check; } ep93xxfb_set_par(info); @@ -585,15 +585,17 @@ static int __devinit ep93xxfb_probe(struct platform_device *pdev) return 0; failed: - if (fbi->clk) - clk_put(fbi->clk); - if (fbi->mmio_base) - iounmap(fbi->mmio_base); - ep93xxfb_dealloc_videomem(info); - if (&info->cmap) - fb_dealloc_cmap(&info->cmap); + clk_put(fbi->clk); +failed_check: if (fbi->mach_info->teardown) fbi->mach_info->teardown(pdev); +failed_mode: + iounmap(fbi->mmio_base); +failed_resource: + ep93xxfb_dealloc_videomem(info); +failed_videomem: + fb_dealloc_cmap(&info->cmap); +failed_cmap: kfree(info); platform_set_drvdata(pdev, NULL); diff --git a/drivers/video/exynos/exynos_dp_core.c b/drivers/video/exynos/exynos_dp_core.c index 2a4481cf260c..a36b2d28280e 100644 --- a/drivers/video/exynos/exynos_dp_core.c +++ b/drivers/video/exynos/exynos_dp_core.c @@ -21,14 +21,14 @@ #include <video/exynos_dp.h> -#include <plat/cpu.h> - #include "exynos_dp_core.h" static int exynos_dp_init_dp(struct exynos_dp_device *dp) { exynos_dp_reset(dp); + exynos_dp_swreset(dp); + /* SW defined function Normal operation */ exynos_dp_enable_sw_function(dp); @@ -478,7 +478,7 @@ static int exynos_dp_process_clock_recovery(struct exynos_dp_device *dp) int lane_count; u8 buf[5]; - u8 *adjust_request; + u8 adjust_request[2]; u8 voltage_swing; u8 pre_emphasis; u8 training_lane; @@ -493,8 +493,8 @@ static int exynos_dp_process_clock_recovery(struct exynos_dp_device *dp) /* set training pattern 2 for EQ */ exynos_dp_set_training_pattern(dp, TRAINING_PTN2); - adjust_request = link_status + (DPCD_ADDR_ADJUST_REQUEST_LANE0_1 - - DPCD_ADDR_LANE0_1_STATUS); + adjust_request[0] = link_status[4]; + adjust_request[1] = link_status[5]; exynos_dp_get_adjust_train(dp, adjust_request); @@ -566,7 +566,7 @@ static int exynos_dp_process_equalizer_training(struct exynos_dp_device *dp) u8 buf[5]; u32 reg; - u8 *adjust_request; + u8 adjust_request[2]; udelay(400); @@ -575,8 +575,8 @@ static int exynos_dp_process_equalizer_training(struct exynos_dp_device *dp) lane_count = dp->link_train.lane_count; if (exynos_dp_clock_recovery_ok(link_status, lane_count) == 0) { - adjust_request = link_status + (DPCD_ADDR_ADJUST_REQUEST_LANE0_1 - - DPCD_ADDR_LANE0_1_STATUS); + adjust_request[0] = link_status[4]; + adjust_request[1] = link_status[5]; if (exynos_dp_channel_eq_ok(link_status, lane_count) == 0) { /* traing pattern Set to Normal */ @@ -770,7 +770,7 @@ static int exynos_dp_config_video(struct exynos_dp_device *dp, return -ETIMEDOUT; } - mdelay(100); + udelay(1); } /* Set to use the register calculated M/N video */ @@ -804,7 +804,7 @@ static int exynos_dp_config_video(struct exynos_dp_device *dp, return -ETIMEDOUT; } - mdelay(100); + mdelay(1); } if (retval != 0) @@ -860,7 +860,8 @@ static int __devinit exynos_dp_probe(struct platform_device *pdev) return -EINVAL; } - dp = kzalloc(sizeof(struct exynos_dp_device), GFP_KERNEL); + dp = devm_kzalloc(&pdev->dev, sizeof(struct exynos_dp_device), + GFP_KERNEL); if (!dp) { dev_err(&pdev->dev, "no memory for device data\n"); return -ENOMEM; @@ -871,8 +872,7 @@ static int __devinit exynos_dp_probe(struct platform_device *pdev) dp->clock = clk_get(&pdev->dev, "dp"); if (IS_ERR(dp->clock)) { dev_err(&pdev->dev, "failed to get clock\n"); - ret = PTR_ERR(dp->clock); - goto err_dp; + return PTR_ERR(dp->clock); } clk_enable(dp->clock); @@ -884,35 +884,25 @@ static int __devinit exynos_dp_probe(struct platform_device *pdev) goto err_clock; } - res = request_mem_region(res->start, resource_size(res), - dev_name(&pdev->dev)); - if (!res) { - dev_err(&pdev->dev, "failed to request registers region\n"); - ret = -EINVAL; - goto err_clock; - } - - dp->res = res; - - dp->reg_base = ioremap(res->start, resource_size(res)); + dp->reg_base = devm_request_and_ioremap(&pdev->dev, res); if (!dp->reg_base) { dev_err(&pdev->dev, "failed to ioremap\n"); ret = -ENOMEM; - goto err_req_region; + goto err_clock; } dp->irq = platform_get_irq(pdev, 0); if (!dp->irq) { dev_err(&pdev->dev, "failed to get irq\n"); ret = -ENODEV; - goto err_ioremap; + goto err_clock; } - ret = request_irq(dp->irq, exynos_dp_irq_handler, 0, - "exynos-dp", dp); + ret = devm_request_irq(&pdev->dev, dp->irq, exynos_dp_irq_handler, 0, + "exynos-dp", dp); if (ret) { dev_err(&pdev->dev, "failed to request irq\n"); - goto err_ioremap; + goto err_clock; } dp->video_info = pdata->video_info; @@ -924,7 +914,7 @@ static int __devinit exynos_dp_probe(struct platform_device *pdev) ret = exynos_dp_detect_hpd(dp); if (ret) { dev_err(&pdev->dev, "unable to detect hpd\n"); - goto err_irq; + goto err_clock; } exynos_dp_handle_edid(dp); @@ -933,7 +923,7 @@ static int __devinit exynos_dp_probe(struct platform_device *pdev) dp->video_info->link_rate); if (ret) { dev_err(&pdev->dev, "unable to do link train\n"); - goto err_irq; + goto err_clock; } exynos_dp_enable_scramble(dp, 1); @@ -947,23 +937,15 @@ static int __devinit exynos_dp_probe(struct platform_device *pdev) ret = exynos_dp_config_video(dp, dp->video_info); if (ret) { dev_err(&pdev->dev, "unable to config video\n"); - goto err_irq; + goto err_clock; } platform_set_drvdata(pdev, dp); return 0; -err_irq: - free_irq(dp->irq, dp); -err_ioremap: - iounmap(dp->reg_base); -err_req_region: - release_mem_region(res->start, resource_size(res)); err_clock: clk_put(dp->clock); -err_dp: - kfree(dp); return ret; } @@ -976,16 +958,9 @@ static int __devexit exynos_dp_remove(struct platform_device *pdev) if (pdata && pdata->phy_exit) pdata->phy_exit(); - free_irq(dp->irq, dp); - iounmap(dp->reg_base); - clk_disable(dp->clock); clk_put(dp->clock); - release_mem_region(dp->res->start, resource_size(dp->res)); - - kfree(dp); - return 0; } diff --git a/drivers/video/exynos/exynos_dp_core.h b/drivers/video/exynos/exynos_dp_core.h index 90ceaca0fa24..1e0f998e0c9f 100644 --- a/drivers/video/exynos/exynos_dp_core.h +++ b/drivers/video/exynos/exynos_dp_core.h @@ -26,7 +26,6 @@ struct link_train { struct exynos_dp_device { struct device *dev; - struct resource *res; struct clk *clock; unsigned int irq; void __iomem *reg_base; @@ -39,8 +38,10 @@ struct exynos_dp_device { void exynos_dp_enable_video_mute(struct exynos_dp_device *dp, bool enable); void exynos_dp_stop_video(struct exynos_dp_device *dp); void exynos_dp_lane_swap(struct exynos_dp_device *dp, bool enable); +void exynos_dp_init_analog_param(struct exynos_dp_device *dp); void exynos_dp_init_interrupt(struct exynos_dp_device *dp); void exynos_dp_reset(struct exynos_dp_device *dp); +void exynos_dp_swreset(struct exynos_dp_device *dp); void exynos_dp_config_interrupt(struct exynos_dp_device *dp); u32 exynos_dp_get_pll_lock_status(struct exynos_dp_device *dp); void exynos_dp_set_pll_power_down(struct exynos_dp_device *dp, bool enable); diff --git a/drivers/video/exynos/exynos_dp_reg.c b/drivers/video/exynos/exynos_dp_reg.c index 6548afa0e3d2..6ce76d56c3a1 100644 --- a/drivers/video/exynos/exynos_dp_reg.c +++ b/drivers/video/exynos/exynos_dp_reg.c @@ -16,8 +16,6 @@ #include <video/exynos_dp.h> -#include <plat/cpu.h> - #include "exynos_dp_core.h" #include "exynos_dp_reg.h" @@ -65,6 +63,28 @@ void exynos_dp_lane_swap(struct exynos_dp_device *dp, bool enable) writel(reg, dp->reg_base + EXYNOS_DP_LANE_MAP); } +void exynos_dp_init_analog_param(struct exynos_dp_device *dp) +{ + u32 reg; + + reg = TX_TERMINAL_CTRL_50_OHM; + writel(reg, dp->reg_base + EXYNOS_DP_ANALOG_CTL_1); + + reg = SEL_24M | TX_DVDD_BIT_1_0625V; + writel(reg, dp->reg_base + EXYNOS_DP_ANALOG_CTL_2); + + reg = DRIVE_DVDD_BIT_1_0625V | VCO_BIT_600_MICRO; + writel(reg, dp->reg_base + EXYNOS_DP_ANALOG_CTL_3); + + reg = PD_RING_OSC | AUX_TERMINAL_CTRL_50_OHM | + TX_CUR1_2X | TX_CUR_8_MA; + writel(reg, dp->reg_base + EXYNOS_DP_PLL_FILTER_CTL_1); + + reg = CH3_AMP_400_MV | CH2_AMP_400_MV | + CH1_AMP_400_MV | CH0_AMP_400_MV; + writel(reg, dp->reg_base + EXYNOS_DP_TX_AMP_TUNING_CTL); +} + void exynos_dp_init_interrupt(struct exynos_dp_device *dp) { /* Set interrupt pin assertion polarity as high */ @@ -89,8 +109,6 @@ void exynos_dp_reset(struct exynos_dp_device *dp) { u32 reg; - writel(RESET_DP_TX, dp->reg_base + EXYNOS_DP_TX_SW_RESET); - exynos_dp_stop_video(dp); exynos_dp_enable_video_mute(dp, 0); @@ -131,9 +149,15 @@ void exynos_dp_reset(struct exynos_dp_device *dp) writel(0x00000101, dp->reg_base + EXYNOS_DP_SOC_GENERAL_CTL); + exynos_dp_init_analog_param(dp); exynos_dp_init_interrupt(dp); } +void exynos_dp_swreset(struct exynos_dp_device *dp) +{ + writel(RESET_DP_TX, dp->reg_base + EXYNOS_DP_TX_SW_RESET); +} + void exynos_dp_config_interrupt(struct exynos_dp_device *dp) { u32 reg; @@ -271,6 +295,7 @@ void exynos_dp_set_analog_power_down(struct exynos_dp_device *dp, void exynos_dp_init_analog_func(struct exynos_dp_device *dp) { u32 reg; + int timeout_loop = 0; exynos_dp_set_analog_power_down(dp, POWER_ALL, 0); @@ -282,9 +307,19 @@ void exynos_dp_init_analog_func(struct exynos_dp_device *dp) writel(reg, dp->reg_base + EXYNOS_DP_DEBUG_CTL); /* Power up PLL */ - if (exynos_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) + if (exynos_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) { exynos_dp_set_pll_power_down(dp, 0); + while (exynos_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) { + timeout_loop++; + if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) { + dev_err(dp->dev, "failed to get pll lock status\n"); + return; + } + usleep_range(10, 20); + } + } + /* Enable Serdes FIFO function and Link symbol clock domain module */ reg = readl(dp->reg_base + EXYNOS_DP_FUNC_EN_2); reg &= ~(SERDES_FIFO_FUNC_EN_N | LS_CLK_DOMAIN_FUNC_EN_N diff --git a/drivers/video/exynos/exynos_dp_reg.h b/drivers/video/exynos/exynos_dp_reg.h index 42f608e2a43e..125b27cd57ae 100644 --- a/drivers/video/exynos/exynos_dp_reg.h +++ b/drivers/video/exynos/exynos_dp_reg.h @@ -24,6 +24,12 @@ #define EXYNOS_DP_LANE_MAP 0x35C +#define EXYNOS_DP_ANALOG_CTL_1 0x370 +#define EXYNOS_DP_ANALOG_CTL_2 0x374 +#define EXYNOS_DP_ANALOG_CTL_3 0x378 +#define EXYNOS_DP_PLL_FILTER_CTL_1 0x37C +#define EXYNOS_DP_TX_AMP_TUNING_CTL 0x380 + #define EXYNOS_DP_AUX_HW_RETRY_CTL 0x390 #define EXYNOS_DP_COMMON_INT_STA_1 0x3C4 @@ -166,6 +172,29 @@ #define LANE0_MAP_LOGIC_LANE_2 (0x2 << 0) #define LANE0_MAP_LOGIC_LANE_3 (0x3 << 0) +/* EXYNOS_DP_ANALOG_CTL_1 */ +#define TX_TERMINAL_CTRL_50_OHM (0x1 << 4) + +/* EXYNOS_DP_ANALOG_CTL_2 */ +#define SEL_24M (0x1 << 3) +#define TX_DVDD_BIT_1_0625V (0x4 << 0) + +/* EXYNOS_DP_ANALOG_CTL_3 */ +#define DRIVE_DVDD_BIT_1_0625V (0x4 << 5) +#define VCO_BIT_600_MICRO (0x5 << 0) + +/* EXYNOS_DP_PLL_FILTER_CTL_1 */ +#define PD_RING_OSC (0x1 << 6) +#define AUX_TERMINAL_CTRL_50_OHM (0x2 << 4) +#define TX_CUR1_2X (0x1 << 2) +#define TX_CUR_8_MA (0x2 << 0) + +/* EXYNOS_DP_TX_AMP_TUNING_CTL */ +#define CH3_AMP_400_MV (0x0 << 24) +#define CH2_AMP_400_MV (0x0 << 16) +#define CH1_AMP_400_MV (0x0 << 8) +#define CH0_AMP_400_MV (0x0 << 0) + /* EXYNOS_DP_AUX_HW_RETRY_CTL */ #define AUX_BIT_PERIOD_EXPECTED_DELAY(x) (((x) & 0x7) << 8) #define AUX_HW_RETRY_INTERVAL_MASK (0x3 << 3) diff --git a/drivers/video/exynos/exynos_mipi_dsi.c b/drivers/video/exynos/exynos_mipi_dsi.c index 557091dc0e97..6c1f5c314a42 100644 --- a/drivers/video/exynos/exynos_mipi_dsi.c +++ b/drivers/video/exynos/exynos_mipi_dsi.c @@ -58,7 +58,7 @@ static struct mipi_dsim_platform_data *to_dsim_plat(struct platform_device } static struct regulator_bulk_data supplies[] = { - { .supply = "vdd10", }, + { .supply = "vdd11", }, { .supply = "vdd18", }, }; @@ -102,6 +102,8 @@ static void exynos_mipi_update_cfg(struct mipi_dsim_device *dsim) /* set display timing. */ exynos_mipi_dsi_set_display_mode(dsim, dsim->dsim_config); + exynos_mipi_dsi_init_interrupt(dsim); + /* * data from Display controller(FIMD) is transferred in video mode * but in case of command mode, all settigs is updated to registers. @@ -413,27 +415,30 @@ static int exynos_mipi_dsi_probe(struct platform_device *pdev) goto err_platform_get_irq; } + init_completion(&dsim_wr_comp); + init_completion(&dsim_rd_comp); + platform_set_drvdata(pdev, dsim); + ret = request_irq(dsim->irq, exynos_mipi_dsi_interrupt_handler, - IRQF_SHARED, pdev->name, dsim); + IRQF_SHARED, dev_name(&pdev->dev), dsim); if (ret != 0) { dev_err(&pdev->dev, "failed to request dsim irq\n"); ret = -EINVAL; goto err_bind; } - init_completion(&dsim_wr_comp); - init_completion(&dsim_rd_comp); - - /* enable interrupt */ + /* enable interrupts */ exynos_mipi_dsi_init_interrupt(dsim); /* initialize mipi-dsi client(lcd panel). */ if (dsim_ddi->dsim_lcd_drv && dsim_ddi->dsim_lcd_drv->probe) dsim_ddi->dsim_lcd_drv->probe(dsim_ddi->dsim_lcd_dev); - /* in case that mipi got enabled at bootloader. */ - if (dsim_pd->enabled) - goto out; + /* in case mipi-dsi has been enabled by bootloader */ + if (dsim_pd->enabled) { + exynos_mipi_regulator_enable(dsim); + goto done; + } /* lcd panel power on. */ if (dsim_ddi->dsim_lcd_drv && dsim_ddi->dsim_lcd_drv->power_on) @@ -453,12 +458,11 @@ static int exynos_mipi_dsi_probe(struct platform_device *pdev) dsim->suspended = false; -out: +done: platform_set_drvdata(pdev, dsim); - dev_dbg(&pdev->dev, "mipi-dsi driver(%s mode) has been probed.\n", - (dsim_config->e_interface == DSIM_COMMAND) ? - "CPU" : "RGB"); + dev_dbg(&pdev->dev, "%s() completed sucessfuly (%s mode)\n", __func__, + dsim_config->e_interface == DSIM_COMMAND ? "CPU" : "RGB"); return 0; @@ -515,10 +519,10 @@ static int __devexit exynos_mipi_dsi_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM -static int exynos_mipi_dsi_suspend(struct platform_device *pdev, - pm_message_t state) +#ifdef CONFIG_PM_SLEEP +static int exynos_mipi_dsi_suspend(struct device *dev) { + struct platform_device *pdev = to_platform_device(dev); struct mipi_dsim_device *dsim = platform_get_drvdata(pdev); struct mipi_dsim_lcd_driver *client_drv = dsim->dsim_lcd_drv; struct mipi_dsim_lcd_device *client_dev = dsim->dsim_lcd_dev; @@ -544,8 +548,9 @@ static int exynos_mipi_dsi_suspend(struct platform_device *pdev, return 0; } -static int exynos_mipi_dsi_resume(struct platform_device *pdev) +static int exynos_mipi_dsi_resume(struct device *dev) { + struct platform_device *pdev = to_platform_device(dev); struct mipi_dsim_device *dsim = platform_get_drvdata(pdev); struct mipi_dsim_lcd_driver *client_drv = dsim->dsim_lcd_drv; struct mipi_dsim_lcd_device *client_dev = dsim->dsim_lcd_dev; @@ -577,19 +582,19 @@ static int exynos_mipi_dsi_resume(struct platform_device *pdev) return 0; } -#else -#define exynos_mipi_dsi_suspend NULL -#define exynos_mipi_dsi_resume NULL #endif +static const struct dev_pm_ops exynos_mipi_dsi_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(exynos_mipi_dsi_suspend, exynos_mipi_dsi_resume) +}; + static struct platform_driver exynos_mipi_dsi_driver = { .probe = exynos_mipi_dsi_probe, .remove = __devexit_p(exynos_mipi_dsi_remove), - .suspend = exynos_mipi_dsi_suspend, - .resume = exynos_mipi_dsi_resume, .driver = { .name = "exynos-mipi-dsim", .owner = THIS_MODULE, + .pm = &exynos_mipi_dsi_pm_ops, }, }; diff --git a/drivers/video/exynos/exynos_mipi_dsi_common.c b/drivers/video/exynos/exynos_mipi_dsi_common.c index 14909c1d3832..47b533a183be 100644 --- a/drivers/video/exynos/exynos_mipi_dsi_common.c +++ b/drivers/video/exynos/exynos_mipi_dsi_common.c @@ -76,33 +76,25 @@ static unsigned int dpll_table[15] = { irqreturn_t exynos_mipi_dsi_interrupt_handler(int irq, void *dev_id) { - unsigned int intsrc = 0; - unsigned int intmsk = 0; - struct mipi_dsim_device *dsim = NULL; - - dsim = dev_id; - if (!dsim) { - dev_dbg(dsim->dev, KERN_ERR "%s:error: wrong parameter\n", - __func__); - return IRQ_HANDLED; + struct mipi_dsim_device *dsim = dev_id; + unsigned int intsrc, intmsk; + + if (dsim == NULL) { + dev_err(dsim->dev, "%s: wrong parameter\n", __func__); + return IRQ_NONE; } intsrc = exynos_mipi_dsi_read_interrupt(dsim); intmsk = exynos_mipi_dsi_read_interrupt_mask(dsim); + intmsk = ~intmsk & intsrc; - intmsk = ~(intmsk) & intsrc; - - switch (intmsk) { - case INTMSK_RX_DONE: + if (intsrc & INTMSK_RX_DONE) { complete(&dsim_rd_comp); dev_dbg(dsim->dev, "MIPI INTMSK_RX_DONE\n"); - break; - case INTMSK_FIFO_EMPTY: + } + if (intsrc & INTMSK_FIFO_EMPTY) { complete(&dsim_wr_comp); dev_dbg(dsim->dev, "MIPI INTMSK_FIFO_EMPTY\n"); - break; - default: - break; } exynos_mipi_dsi_clear_interrupt(dsim, intmsk); @@ -738,11 +730,11 @@ int exynos_mipi_dsi_set_display_mode(struct mipi_dsim_device *dsim, if (dsim_config->auto_vertical_cnt == 0) { exynos_mipi_dsi_set_main_disp_vporch(dsim, dsim_config->cmd_allow, - timing->upper_margin, - timing->lower_margin); + timing->lower_margin, + timing->upper_margin); exynos_mipi_dsi_set_main_disp_hporch(dsim, - timing->left_margin, - timing->right_margin); + timing->right_margin, + timing->left_margin); exynos_mipi_dsi_set_main_disp_sync_area(dsim, timing->vsync_len, timing->hsync_len); diff --git a/drivers/video/exynos/s6e8ax0.c b/drivers/video/exynos/s6e8ax0.c index 4aa9ac6218bf..05d080b63bc0 100644 --- a/drivers/video/exynos/s6e8ax0.c +++ b/drivers/video/exynos/s6e8ax0.c @@ -293,9 +293,20 @@ static void s6e8ax0_panel_cond(struct s6e8ax0 *lcd) 0x6e, 0x00, 0x00, 0x00, 0x02, 0x08, 0x08, 0x23, 0x23, 0xc0, 0xc8, 0x08, 0x48, 0xc1, 0x00, 0xc1, 0xff, 0xff, 0xc8 }; + static const unsigned char data_to_send_panel_reverse[] = { + 0xf8, 0x19, 0x35, 0x00, 0x00, 0x00, 0x93, 0x00, 0x3c, 0x7d, + 0x08, 0x27, 0x7d, 0x3f, 0x00, 0x00, 0x00, 0x20, 0x04, 0x08, + 0x6e, 0x00, 0x00, 0x00, 0x02, 0x08, 0x08, 0x23, 0x23, 0xc0, + 0xc1, 0x01, 0x41, 0xc1, 0x00, 0xc1, 0xf6, 0xf6, 0xc1 + }; - ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE, - data_to_send, ARRAY_SIZE(data_to_send)); + if (lcd->dsim_dev->panel_reverse) + ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE, + data_to_send_panel_reverse, + ARRAY_SIZE(data_to_send_panel_reverse)); + else + ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE, + data_to_send, ARRAY_SIZE(data_to_send)); } static void s6e8ax0_display_cond(struct s6e8ax0 *lcd) diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c index c27e153d8882..1ddeb11659d4 100644 --- a/drivers/video/fb_defio.c +++ b/drivers/video/fb_defio.c @@ -23,7 +23,7 @@ #include <linux/rmap.h> #include <linux/pagemap.h> -struct page *fb_deferred_io_page(struct fb_info *info, unsigned long offs) +static struct page *fb_deferred_io_page(struct fb_info *info, unsigned long offs) { void *screen_base = (void __force *) info->screen_base; struct page *page; @@ -107,6 +107,10 @@ static int fb_deferred_io_mkwrite(struct vm_area_struct *vma, /* protect against the workqueue changing the page list */ mutex_lock(&fbdefio->lock); + /* first write in this cycle, notify the driver */ + if (fbdefio->first_io && list_empty(&fbdefio->pagelist)) + fbdefio->first_io(info); + /* * We want the page to remain locked from ->page_mkwrite until * the PTE is marked dirty to avoid page_mkclean() being called diff --git a/drivers/video/fbsysfs.c b/drivers/video/fbsysfs.c index 67afa9c2289d..a55e3669d135 100644 --- a/drivers/video/fbsysfs.c +++ b/drivers/video/fbsysfs.c @@ -80,6 +80,8 @@ EXPORT_SYMBOL(framebuffer_alloc); */ void framebuffer_release(struct fb_info *info) { + if (!info) + return; kfree(info->apertures); kfree(info); } diff --git a/drivers/video/fsl-diu-fb.c b/drivers/video/fsl-diu-fb.c index 6af3f16754f0..458c00664ade 100644 --- a/drivers/video/fsl-diu-fb.c +++ b/drivers/video/fsl-diu-fb.c @@ -834,7 +834,6 @@ static void update_lcdc(struct fb_info *info) diu_ops.set_pixel_clock(var->pixclock); out_be32(&hw->syn_pol, 0); /* SYNC SIGNALS POLARITY */ - out_be32(&hw->thresholds, 0x00037800); /* The Thresholds */ out_be32(&hw->int_status, 0); /* INTERRUPT STATUS */ out_be32(&hw->plut, 0x01F5F666); diff --git a/drivers/video/intelfb/intelfbdrv.c b/drivers/video/intelfb/intelfbdrv.c index 02fd2263610c..bdcbfbae2777 100644 --- a/drivers/video/intelfb/intelfbdrv.c +++ b/drivers/video/intelfb/intelfbdrv.c @@ -680,6 +680,7 @@ static int __devinit intelfb_pci_register(struct pci_dev *pdev, + dinfo->fb.size); if (!dinfo->aperture.virtual) { ERR_MSG("Cannot remap FB region.\n"); + agp_backend_release(bridge); cleanup(dinfo); return -ENODEV; } @@ -689,6 +690,7 @@ static int __devinit intelfb_pci_register(struct pci_dev *pdev, INTEL_REG_SIZE); if (!dinfo->mmio_base) { ERR_MSG("Cannot remap MMIO region.\n"); + agp_backend_release(bridge); cleanup(dinfo); return -ENODEV; } diff --git a/drivers/video/mb862xx/mb862xx-i2c.c b/drivers/video/mb862xx/mb862xx-i2c.c index 273769bb8deb..c87e17afb3e2 100644 --- a/drivers/video/mb862xx/mb862xx-i2c.c +++ b/drivers/video/mb862xx/mb862xx-i2c.c @@ -68,7 +68,7 @@ static int mb862xx_i2c_read_byte(struct i2c_adapter *adap, u8 *byte, int last) return 1; } -void mb862xx_i2c_stop(struct i2c_adapter *adap) +static void mb862xx_i2c_stop(struct i2c_adapter *adap) { struct mb862xxfb_par *par = adap->algo_data; diff --git a/drivers/video/mb862xx/mb862xxfbdrv.c b/drivers/video/mb862xx/mb862xxfbdrv.c index 11a7a333701d..00ce1f34b496 100644 --- a/drivers/video/mb862xx/mb862xxfbdrv.c +++ b/drivers/video/mb862xx/mb862xxfbdrv.c @@ -579,7 +579,7 @@ static ssize_t mb862xxfb_show_dispregs(struct device *dev, static DEVICE_ATTR(dispregs, 0444, mb862xxfb_show_dispregs, NULL); -irqreturn_t mb862xx_intr(int irq, void *dev_id) +static irqreturn_t mb862xx_intr(int irq, void *dev_id) { struct mb862xxfb_par *par = (struct mb862xxfb_par *) dev_id; unsigned long reg_ist, mask; diff --git a/drivers/video/mbx/mbxfb.c b/drivers/video/mbx/mbxfb.c index 55bf6196b7a0..ab0a8e527333 100644 --- a/drivers/video/mbx/mbxfb.c +++ b/drivers/video/mbx/mbxfb.c @@ -950,7 +950,7 @@ static int __devinit mbxfb_probe(struct platform_device *dev) mfbi->fb_virt_addr = ioremap_nocache(mfbi->fb_phys_addr, res_size(mfbi->fb_req)); - if (!mfbi->reg_virt_addr) { + if (!mfbi->fb_virt_addr) { dev_err(&dev->dev, "failed to ioremap frame buffer\n"); ret = -EINVAL; goto err4; diff --git a/drivers/video/mxsfb.c b/drivers/video/mxsfb.c index 6c6bc578d0fc..abbe691047bd 100644 --- a/drivers/video/mxsfb.c +++ b/drivers/video/mxsfb.c @@ -889,6 +889,18 @@ static int __devexit mxsfb_remove(struct platform_device *pdev) return 0; } +static void mxsfb_shutdown(struct platform_device *pdev) +{ + struct fb_info *fb_info = platform_get_drvdata(pdev); + struct mxsfb_info *host = to_imxfb_host(fb_info); + + /* + * Force stop the LCD controller as keeping it running during reboot + * might interfere with the BootROM's boot mode pads sampling. + */ + writel(CTRL_RUN, host->base + LCDC_CTRL + REG_CLR); +} + static struct platform_device_id mxsfb_devtype[] = { { .name = "imx23-fb", @@ -905,6 +917,7 @@ MODULE_DEVICE_TABLE(platform, mxsfb_devtype); static struct platform_driver mxsfb_driver = { .probe = mxsfb_probe, .remove = __devexit_p(mxsfb_remove), + .shutdown = mxsfb_shutdown, .id_table = mxsfb_devtype, .driver = { .name = DRIVER_NAME, diff --git a/drivers/video/omap/Kconfig b/drivers/video/omap/Kconfig index 1e7536d9a8fc..b48f95f0dfe2 100644 --- a/drivers/video/omap/Kconfig +++ b/drivers/video/omap/Kconfig @@ -39,14 +39,6 @@ config FB_OMAP_LCD_MIPID the Mobile Industry Processor Interface DBI-C/DCS specification. (Supported LCDs: Philips LPH8923, Sharp LS041Y3) -config FB_OMAP_BOOTLOADER_INIT - bool "Check bootloader initialization" - depends on FB_OMAP - help - Say Y here if you want to enable checking if the bootloader has - already initialized the display controller. In this case the - driver will skip the initialization. - config FB_OMAP_CONSISTENT_DMA_SIZE int "Consistent DMA memory size (MB)" depends on FB_OMAP diff --git a/drivers/video/omap2/displays/panel-acx565akm.c b/drivers/video/omap2/displays/panel-acx565akm.c index 74e7cf078505..ad741c3d1ae1 100644 --- a/drivers/video/omap2/displays/panel-acx565akm.c +++ b/drivers/video/omap2/displays/panel-acx565akm.c @@ -739,12 +739,6 @@ static void acx_panel_set_timings(struct omap_dss_device *dssdev, } } -static void acx_panel_get_timings(struct omap_dss_device *dssdev, - struct omap_video_timings *timings) -{ - *timings = dssdev->panel.timings; -} - static int acx_panel_check_timings(struct omap_dss_device *dssdev, struct omap_video_timings *timings) { @@ -762,7 +756,6 @@ static struct omap_dss_driver acx_panel_driver = { .resume = acx_panel_resume, .set_timings = acx_panel_set_timings, - .get_timings = acx_panel_get_timings, .check_timings = acx_panel_check_timings, .get_recommended_bpp = acx_get_recommended_bpp, diff --git a/drivers/video/omap2/displays/panel-generic-dpi.c b/drivers/video/omap2/displays/panel-generic-dpi.c index 30fe4dfeb227..e42f9dc22123 100644 --- a/drivers/video/omap2/displays/panel-generic-dpi.c +++ b/drivers/video/omap2/displays/panel-generic-dpi.c @@ -386,6 +386,106 @@ static struct panel_config generic_dpi_panels[] = { .name = "innolux_at080tn52", }, + + /* Mitsubishi AA084SB01 */ + { + { + .x_res = 800, + .y_res = 600, + .pixel_clock = 40000, + + .hsw = 1, + .hfp = 254, + .hbp = 1, + + .vsw = 1, + .vfp = 26, + .vbp = 1, + }, + .config = OMAP_DSS_LCD_TFT, + .name = "mitsubishi_aa084sb01", + }, + /* EDT ET0500G0DH6 */ + { + { + .x_res = 800, + .y_res = 480, + .pixel_clock = 33260, + + .hsw = 128, + .hfp = 216, + .hbp = 40, + + .vsw = 2, + .vfp = 35, + .vbp = 10, + }, + .config = OMAP_DSS_LCD_TFT, + .name = "edt_et0500g0dh6", + }, + + /* Prime-View PD050VL1 */ + { + { + .x_res = 640, + .y_res = 480, + + .pixel_clock = 25000, + + .hsw = 96, + .hfp = 18, + .hbp = 46, + + .vsw = 2, + .vfp = 10, + .vbp = 33, + }, + .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | + OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IPC, + .name = "primeview_pd050vl1", + }, + + /* Prime-View PM070WL4 */ + { + { + .x_res = 800, + .y_res = 480, + + .pixel_clock = 32000, + + .hsw = 128, + .hfp = 42, + .hbp = 86, + + .vsw = 2, + .vfp = 10, + .vbp = 33, + }, + .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | + OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IPC, + .name = "primeview_pm070wl4", + }, + + /* Prime-View PD104SLF */ + { + { + .x_res = 800, + .y_res = 600, + + .pixel_clock = 40000, + + .hsw = 128, + .hfp = 42, + .hbp = 86, + + .vsw = 4, + .vfp = 1, + .vbp = 23, + }, + .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | + OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IPC, + .name = "primeview_pd104slf", + }, }; struct panel_drv_data { @@ -549,12 +649,6 @@ static void generic_dpi_panel_set_timings(struct omap_dss_device *dssdev, dpi_set_timings(dssdev, timings); } -static void generic_dpi_panel_get_timings(struct omap_dss_device *dssdev, - struct omap_video_timings *timings) -{ - *timings = dssdev->panel.timings; -} - static int generic_dpi_panel_check_timings(struct omap_dss_device *dssdev, struct omap_video_timings *timings) { @@ -571,7 +665,6 @@ static struct omap_dss_driver dpi_driver = { .resume = generic_dpi_panel_resume, .set_timings = generic_dpi_panel_set_timings, - .get_timings = generic_dpi_panel_get_timings, .check_timings = generic_dpi_panel_check_timings, .driver = { diff --git a/drivers/video/omap2/displays/panel-n8x0.c b/drivers/video/omap2/displays/panel-n8x0.c index dc9408dc93d1..4a34cdc1371b 100644 --- a/drivers/video/omap2/displays/panel-n8x0.c +++ b/drivers/video/omap2/displays/panel-n8x0.c @@ -610,12 +610,6 @@ static int n8x0_panel_resume(struct omap_dss_device *dssdev) return 0; } -static void n8x0_panel_get_timings(struct omap_dss_device *dssdev, - struct omap_video_timings *timings) -{ - *timings = dssdev->panel.timings; -} - static void n8x0_panel_get_resolution(struct omap_dss_device *dssdev, u16 *xres, u16 *yres) { @@ -678,8 +672,6 @@ static struct omap_dss_driver n8x0_panel_driver = { .get_resolution = n8x0_panel_get_resolution, .get_recommended_bpp = omapdss_default_get_recommended_bpp, - .get_timings = n8x0_panel_get_timings, - .driver = { .name = "n8x0_panel", .owner = THIS_MODULE, diff --git a/drivers/video/omap2/displays/panel-taal.c b/drivers/video/omap2/displays/panel-taal.c index b2dd88b48420..2ce9992f403b 100644 --- a/drivers/video/omap2/displays/panel-taal.c +++ b/drivers/video/omap2/displays/panel-taal.c @@ -30,7 +30,6 @@ #include <linux/gpio.h> #include <linux/workqueue.h> #include <linux/slab.h> -#include <linux/regulator/consumer.h> #include <linux/mutex.h> #include <video/omapdss.h> @@ -55,73 +54,6 @@ static int _taal_enable_te(struct omap_dss_device *dssdev, bool enable); static int taal_panel_reset(struct omap_dss_device *dssdev); -struct panel_regulator { - struct regulator *regulator; - const char *name; - int min_uV; - int max_uV; -}; - -static void free_regulators(struct panel_regulator *regulators, int n) -{ - int i; - - for (i = 0; i < n; i++) { - /* disable/put in reverse order */ - regulator_disable(regulators[n - i - 1].regulator); - regulator_put(regulators[n - i - 1].regulator); - } -} - -static int init_regulators(struct omap_dss_device *dssdev, - struct panel_regulator *regulators, int n) -{ - int r, i, v; - - for (i = 0; i < n; i++) { - struct regulator *reg; - - reg = regulator_get(&dssdev->dev, regulators[i].name); - if (IS_ERR(reg)) { - dev_err(&dssdev->dev, "failed to get regulator %s\n", - regulators[i].name); - r = PTR_ERR(reg); - goto err; - } - - /* FIXME: better handling of fixed vs. variable regulators */ - v = regulator_get_voltage(reg); - if (v < regulators[i].min_uV || v > regulators[i].max_uV) { - r = regulator_set_voltage(reg, regulators[i].min_uV, - regulators[i].max_uV); - if (r) { - dev_err(&dssdev->dev, - "failed to set regulator %s voltage\n", - regulators[i].name); - regulator_put(reg); - goto err; - } - } - - r = regulator_enable(reg); - if (r) { - dev_err(&dssdev->dev, "failed to enable regulator %s\n", - regulators[i].name); - regulator_put(reg); - goto err; - } - - regulators[i].regulator = reg; - } - - return 0; - -err: - free_regulators(regulators, i); - - return r; -} - /** * struct panel_config - panel configuration * @name: panel name @@ -150,8 +82,6 @@ struct panel_config { unsigned int low; } reset_sequence; - struct panel_regulator *regulators; - int num_regulators; }; enum { @@ -577,12 +507,6 @@ static const struct backlight_ops taal_bl_ops = { .update_status = taal_bl_update_status, }; -static void taal_get_timings(struct omap_dss_device *dssdev, - struct omap_video_timings *timings) -{ - *timings = dssdev->panel.timings; -} - static void taal_get_resolution(struct omap_dss_device *dssdev, u16 *xres, u16 *yres) { @@ -977,11 +901,6 @@ static int taal_probe(struct omap_dss_device *dssdev) atomic_set(&td->do_update, 0); - r = init_regulators(dssdev, panel_config->regulators, - panel_config->num_regulators); - if (r) - goto err_reg; - td->workqueue = create_singlethread_workqueue("taal_esd"); if (td->workqueue == NULL) { dev_err(&dssdev->dev, "can't create ESD workqueue\n"); @@ -1087,8 +1006,6 @@ err_bl: err_rst_gpio: destroy_workqueue(td->workqueue); err_wq: - free_regulators(panel_config->regulators, panel_config->num_regulators); -err_reg: kfree(td); err: return r; @@ -1125,9 +1042,6 @@ static void __exit taal_remove(struct omap_dss_device *dssdev) /* reset, to be sure that the panel is in a valid state */ taal_hw_reset(dssdev); - free_regulators(td->panel_config->regulators, - td->panel_config->num_regulators); - if (gpio_is_valid(panel_data->reset_gpio)) gpio_free(panel_data->reset_gpio); @@ -1909,8 +1823,6 @@ static struct omap_dss_driver taal_driver = { .run_test = taal_run_test, .memory_read = taal_memory_read, - .get_timings = taal_get_timings, - .driver = { .name = "taal", .owner = THIS_MODULE, diff --git a/drivers/video/omap2/displays/panel-tfp410.c b/drivers/video/omap2/displays/panel-tfp410.c index 52637fa8fda8..bff306e041ca 100644 --- a/drivers/video/omap2/displays/panel-tfp410.c +++ b/drivers/video/omap2/displays/panel-tfp410.c @@ -47,13 +47,9 @@ struct panel_drv_data { struct mutex lock; int pd_gpio; -}; -static inline struct tfp410_platform_data -*get_pdata(const struct omap_dss_device *dssdev) -{ - return dssdev->data; -} + struct i2c_adapter *i2c_adapter; +}; static int tfp410_power_on(struct omap_dss_device *dssdev) { @@ -68,7 +64,7 @@ static int tfp410_power_on(struct omap_dss_device *dssdev) goto err0; if (gpio_is_valid(ddata->pd_gpio)) - gpio_set_value(ddata->pd_gpio, 1); + gpio_set_value_cansleep(ddata->pd_gpio, 1); return 0; err0: @@ -83,18 +79,18 @@ static void tfp410_power_off(struct omap_dss_device *dssdev) return; if (gpio_is_valid(ddata->pd_gpio)) - gpio_set_value(ddata->pd_gpio, 0); + gpio_set_value_cansleep(ddata->pd_gpio, 0); omapdss_dpi_display_disable(dssdev); } static int tfp410_probe(struct omap_dss_device *dssdev) { - struct tfp410_platform_data *pdata = get_pdata(dssdev); struct panel_drv_data *ddata; int r; + int i2c_bus_num; - ddata = kzalloc(sizeof(*ddata), GFP_KERNEL); + ddata = devm_kzalloc(&dssdev->dev, sizeof(*ddata), GFP_KERNEL); if (!ddata) return -ENOMEM; @@ -104,10 +100,15 @@ static int tfp410_probe(struct omap_dss_device *dssdev) ddata->dssdev = dssdev; mutex_init(&ddata->lock); - if (pdata) + if (dssdev->data) { + struct tfp410_platform_data *pdata = dssdev->data; + ddata->pd_gpio = pdata->power_down_gpio; - else + i2c_bus_num = pdata->i2c_bus_num; + } else { ddata->pd_gpio = -1; + i2c_bus_num = -1; + } if (gpio_is_valid(ddata->pd_gpio)) { r = gpio_request_one(ddata->pd_gpio, GPIOF_OUT_INIT_LOW, @@ -115,13 +116,31 @@ static int tfp410_probe(struct omap_dss_device *dssdev) if (r) { dev_err(&dssdev->dev, "Failed to request PD GPIO %d\n", ddata->pd_gpio); - ddata->pd_gpio = -1; + return r; } } + if (i2c_bus_num != -1) { + struct i2c_adapter *adapter; + + adapter = i2c_get_adapter(i2c_bus_num); + if (!adapter) { + dev_err(&dssdev->dev, "Failed to get I2C adapter, bus %d\n", + i2c_bus_num); + r = -EINVAL; + goto err_i2c; + } + + ddata->i2c_adapter = adapter; + } + dev_set_drvdata(&dssdev->dev, ddata); return 0; +err_i2c: + if (gpio_is_valid(ddata->pd_gpio)) + gpio_free(ddata->pd_gpio); + return r; } static void __exit tfp410_remove(struct omap_dss_device *dssdev) @@ -130,14 +149,15 @@ static void __exit tfp410_remove(struct omap_dss_device *dssdev) mutex_lock(&ddata->lock); + if (ddata->i2c_adapter) + i2c_put_adapter(ddata->i2c_adapter); + if (gpio_is_valid(ddata->pd_gpio)) gpio_free(ddata->pd_gpio); dev_set_drvdata(&dssdev->dev, NULL); mutex_unlock(&ddata->lock); - - kfree(ddata); } static int tfp410_enable(struct omap_dss_device *dssdev) @@ -269,27 +289,17 @@ static int tfp410_read_edid(struct omap_dss_device *dssdev, u8 *edid, int len) { struct panel_drv_data *ddata = dev_get_drvdata(&dssdev->dev); - struct tfp410_platform_data *pdata = get_pdata(dssdev); - struct i2c_adapter *adapter; int r, l, bytes_read; mutex_lock(&ddata->lock); - if (pdata->i2c_bus_num == 0) { + if (!ddata->i2c_adapter) { r = -ENODEV; goto err; } - adapter = i2c_get_adapter(pdata->i2c_bus_num); - if (!adapter) { - dev_err(&dssdev->dev, "Failed to get I2C adapter, bus %d\n", - pdata->i2c_bus_num); - r = -EINVAL; - goto err; - } - l = min(EDID_LENGTH, len); - r = tfp410_ddc_read(adapter, edid, l, 0); + r = tfp410_ddc_read(ddata->i2c_adapter, edid, l, 0); if (r) goto err; @@ -299,7 +309,7 @@ static int tfp410_read_edid(struct omap_dss_device *dssdev, if (len > EDID_LENGTH && edid[0x7e] > 0) { l = min(EDID_LENGTH, len - EDID_LENGTH); - r = tfp410_ddc_read(adapter, edid + EDID_LENGTH, + r = tfp410_ddc_read(ddata->i2c_adapter, edid + EDID_LENGTH, l, EDID_LENGTH); if (r) goto err; @@ -319,21 +329,15 @@ err: static bool tfp410_detect(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = dev_get_drvdata(&dssdev->dev); - struct tfp410_platform_data *pdata = get_pdata(dssdev); - struct i2c_adapter *adapter; unsigned char out; int r; mutex_lock(&ddata->lock); - if (pdata->i2c_bus_num == 0) - goto out; - - adapter = i2c_get_adapter(pdata->i2c_bus_num); - if (!adapter) + if (!ddata->i2c_adapter) goto out; - r = tfp410_ddc_read(adapter, &out, 1, 0); + r = tfp410_ddc_read(ddata->i2c_adapter, &out, 1, 0); mutex_unlock(&ddata->lock); diff --git a/drivers/video/omap2/displays/panel-tpo-td043mtea1.c b/drivers/video/omap2/displays/panel-tpo-td043mtea1.c index 32f3fcd7f0f0..4b6448b3c31f 100644 --- a/drivers/video/omap2/displays/panel-tpo-td043mtea1.c +++ b/drivers/video/omap2/displays/panel-tpo-td043mtea1.c @@ -272,13 +272,16 @@ static const struct omap_video_timings tpo_td043_timings = { static int tpo_td043_power_on(struct tpo_td043_device *tpo_td043) { int nreset_gpio = tpo_td043->nreset_gpio; + int r; if (tpo_td043->powered_on) return 0; - regulator_enable(tpo_td043->vcc_reg); + r = regulator_enable(tpo_td043->vcc_reg); + if (r != 0) + return r; - /* wait for regulator to stabilize */ + /* wait for panel to stabilize */ msleep(160); if (gpio_is_valid(nreset_gpio)) @@ -470,6 +473,18 @@ static void tpo_td043_remove(struct omap_dss_device *dssdev) gpio_free(nreset_gpio); } +static void tpo_td043_set_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + dpi_set_timings(dssdev, timings); +} + +static int tpo_td043_check_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + return dpi_check_timings(dssdev, timings); +} + static struct omap_dss_driver tpo_td043_driver = { .probe = tpo_td043_probe, .remove = tpo_td043_remove, @@ -481,6 +496,9 @@ static struct omap_dss_driver tpo_td043_driver = { .set_mirror = tpo_td043_set_hmirror, .get_mirror = tpo_td043_get_hmirror, + .set_timings = tpo_td043_set_timings, + .check_timings = tpo_td043_check_timings, + .driver = { .name = "tpo_td043mtea1_panel", .owner = THIS_MODULE, diff --git a/drivers/video/omap2/dss/Kconfig b/drivers/video/omap2/dss/Kconfig index 7be7c06a249e..43324e5ed25f 100644 --- a/drivers/video/omap2/dss/Kconfig +++ b/drivers/video/omap2/dss/Kconfig @@ -68,6 +68,10 @@ config OMAP4_DSS_HDMI HDMI Interface. This adds the High Definition Multimedia Interface. See http://www.hdmi.org/ for HDMI specification. +config OMAP4_DSS_HDMI_AUDIO + bool + depends on OMAP4_DSS_HDMI + config OMAP2_DSS_SDI bool "SDI support" depends on ARCH_OMAP3 @@ -90,15 +94,6 @@ config OMAP2_DSS_DSI See http://www.mipi.org/ for DSI spesifications. -config OMAP2_DSS_FAKE_VSYNC - bool "Fake VSYNC irq from manual update displays" - default n - help - If this is selected, DSI will generate a fake DISPC VSYNC interrupt - when DSI has sent a frame. This is only needed with DSI or RFBI - displays using manual mode, and you want VSYNC to, for example, - time animation. - config OMAP2_DSS_MIN_FCK_PER_PCK int "Minimum FCK/PCK ratio (for scaling)" range 0 32 diff --git a/drivers/video/omap2/dss/apply.c b/drivers/video/omap2/dss/apply.c index b10b3bc1931e..ab22cc224f3e 100644 --- a/drivers/video/omap2/dss/apply.c +++ b/drivers/video/omap2/dss/apply.c @@ -99,6 +99,11 @@ struct mgr_priv_data { /* If true, a display is enabled using this manager */ bool enabled; + + bool extra_info_dirty; + bool shadow_extra_info_dirty; + + struct omap_video_timings timings; }; static struct { @@ -176,7 +181,7 @@ static bool mgr_manual_update(struct omap_overlay_manager *mgr) } static int dss_check_settings_low(struct omap_overlay_manager *mgr, - struct omap_dss_device *dssdev, bool applying) + bool applying) { struct omap_overlay_info *oi; struct omap_overlay_manager_info *mi; @@ -187,6 +192,9 @@ static int dss_check_settings_low(struct omap_overlay_manager *mgr, mp = get_mgr_priv(mgr); + if (!mp->enabled) + return 0; + if (applying && mp->user_info_dirty) mi = &mp->user_info; else @@ -206,26 +214,24 @@ static int dss_check_settings_low(struct omap_overlay_manager *mgr, ois[ovl->id] = oi; } - return dss_mgr_check(mgr, dssdev, mi, ois); + return dss_mgr_check(mgr, mi, &mp->timings, ois); } /* * check manager and overlay settings using overlay_info from data->info */ -static int dss_check_settings(struct omap_overlay_manager *mgr, - struct omap_dss_device *dssdev) +static int dss_check_settings(struct omap_overlay_manager *mgr) { - return dss_check_settings_low(mgr, dssdev, false); + return dss_check_settings_low(mgr, false); } /* * check manager and overlay settings using overlay_info from ovl->info if * dirty and from data->info otherwise */ -static int dss_check_settings_apply(struct omap_overlay_manager *mgr, - struct omap_dss_device *dssdev) +static int dss_check_settings_apply(struct omap_overlay_manager *mgr) { - return dss_check_settings_low(mgr, dssdev, true); + return dss_check_settings_low(mgr, true); } static bool need_isr(void) @@ -261,6 +267,20 @@ static bool need_isr(void) if (mp->shadow_info_dirty) return true; + /* + * NOTE: we don't check extra_info flags for disabled + * managers, once the manager is enabled, the extra_info + * related manager changes will be taken in by HW. + */ + + /* to write new values to registers */ + if (mp->extra_info_dirty) + return true; + + /* to set GO bit */ + if (mp->shadow_extra_info_dirty) + return true; + list_for_each_entry(ovl, &mgr->overlays, list) { struct ovl_priv_data *op; @@ -305,7 +325,7 @@ static bool need_go(struct omap_overlay_manager *mgr) mp = get_mgr_priv(mgr); - if (mp->shadow_info_dirty) + if (mp->shadow_info_dirty || mp->shadow_extra_info_dirty) return true; list_for_each_entry(ovl, &mgr->overlays, list) { @@ -320,20 +340,16 @@ static bool need_go(struct omap_overlay_manager *mgr) /* returns true if an extra_info field is currently being updated */ static bool extra_info_update_ongoing(void) { - const int num_ovls = omap_dss_get_num_overlays(); - struct ovl_priv_data *op; - struct omap_overlay *ovl; - struct mgr_priv_data *mp; + const int num_mgrs = dss_feat_get_num_mgrs(); int i; - for (i = 0; i < num_ovls; ++i) { - ovl = omap_dss_get_overlay(i); - op = get_ovl_priv(ovl); - - if (!ovl->manager) - continue; + for (i = 0; i < num_mgrs; ++i) { + struct omap_overlay_manager *mgr; + struct omap_overlay *ovl; + struct mgr_priv_data *mp; - mp = get_mgr_priv(ovl->manager); + mgr = omap_dss_get_overlay_manager(i); + mp = get_mgr_priv(mgr); if (!mp->enabled) continue; @@ -341,8 +357,15 @@ static bool extra_info_update_ongoing(void) if (!mp->updating) continue; - if (op->extra_info_dirty || op->shadow_extra_info_dirty) + if (mp->extra_info_dirty || mp->shadow_extra_info_dirty) return true; + + list_for_each_entry(ovl, &mgr->overlays, list) { + struct ovl_priv_data *op = get_ovl_priv(ovl); + + if (op->extra_info_dirty || op->shadow_extra_info_dirty) + return true; + } } return false; @@ -525,11 +548,13 @@ static void dss_ovl_write_regs(struct omap_overlay *ovl) oi = &op->info; + mp = get_mgr_priv(ovl->manager); + replication = dss_use_replication(ovl->manager->device, oi->color_mode); ilace = ovl->manager->device->type == OMAP_DISPLAY_TYPE_VENC; - r = dispc_ovl_setup(ovl->id, oi, ilace, replication); + r = dispc_ovl_setup(ovl->id, oi, ilace, replication, &mp->timings); if (r) { /* * We can't do much here, as this function can be called from @@ -543,8 +568,6 @@ static void dss_ovl_write_regs(struct omap_overlay *ovl) return; } - mp = get_mgr_priv(ovl->manager); - op->info_dirty = false; if (mp->updating) op->shadow_info_dirty = true; @@ -601,6 +624,22 @@ static void dss_mgr_write_regs(struct omap_overlay_manager *mgr) } } +static void dss_mgr_write_regs_extra(struct omap_overlay_manager *mgr) +{ + struct mgr_priv_data *mp = get_mgr_priv(mgr); + + DSSDBGF("%d", mgr->id); + + if (!mp->extra_info_dirty) + return; + + dispc_mgr_set_timings(mgr->id, &mp->timings); + + mp->extra_info_dirty = false; + if (mp->updating) + mp->shadow_extra_info_dirty = true; +} + static void dss_write_regs_common(void) { const int num_mgrs = omap_dss_get_num_overlay_managers(); @@ -646,7 +685,7 @@ static void dss_write_regs(void) if (!mp->enabled || mgr_manual_update(mgr) || mp->busy) continue; - r = dss_check_settings(mgr, mgr->device); + r = dss_check_settings(mgr); if (r) { DSSERR("cannot write registers for manager %s: " "illegal configuration\n", mgr->name); @@ -654,6 +693,7 @@ static void dss_write_regs(void) } dss_mgr_write_regs(mgr); + dss_mgr_write_regs_extra(mgr); } } @@ -693,6 +733,7 @@ static void mgr_clear_shadow_dirty(struct omap_overlay_manager *mgr) mp = get_mgr_priv(mgr); mp->shadow_info_dirty = false; + mp->shadow_extra_info_dirty = false; list_for_each_entry(ovl, &mgr->overlays, list) { op = get_ovl_priv(ovl); @@ -711,7 +752,7 @@ void dss_mgr_start_update(struct omap_overlay_manager *mgr) WARN_ON(mp->updating); - r = dss_check_settings(mgr, mgr->device); + r = dss_check_settings(mgr); if (r) { DSSERR("cannot start manual update: illegal configuration\n"); spin_unlock_irqrestore(&data_lock, flags); @@ -719,6 +760,7 @@ void dss_mgr_start_update(struct omap_overlay_manager *mgr) } dss_mgr_write_regs(mgr); + dss_mgr_write_regs_extra(mgr); dss_write_regs_common(); @@ -857,7 +899,7 @@ int omap_dss_mgr_apply(struct omap_overlay_manager *mgr) spin_lock_irqsave(&data_lock, flags); - r = dss_check_settings_apply(mgr, mgr->device); + r = dss_check_settings_apply(mgr); if (r) { spin_unlock_irqrestore(&data_lock, flags); DSSERR("failed to apply settings: illegal configuration.\n"); @@ -918,16 +960,13 @@ static void dss_ovl_setup_fifo(struct omap_overlay *ovl, bool use_fifo_merge) { struct ovl_priv_data *op = get_ovl_priv(ovl); - struct omap_dss_device *dssdev; u32 fifo_low, fifo_high; if (!op->enabled && !op->enabling) return; - dssdev = ovl->manager->device; - dispc_ovl_compute_fifo_thresholds(ovl->id, &fifo_low, &fifo_high, - use_fifo_merge); + use_fifo_merge, ovl_manual_update(ovl)); dss_apply_ovl_fifo_thresholds(ovl, fifo_low, fifo_high); } @@ -1050,7 +1089,7 @@ int dss_mgr_enable(struct omap_overlay_manager *mgr) mp->enabled = true; - r = dss_check_settings(mgr, mgr->device); + r = dss_check_settings(mgr); if (r) { DSSERR("failed to enable manager %d: check_settings failed\n", mgr->id); @@ -1225,6 +1264,35 @@ err: return r; } +static void dss_apply_mgr_timings(struct omap_overlay_manager *mgr, + struct omap_video_timings *timings) +{ + struct mgr_priv_data *mp = get_mgr_priv(mgr); + + mp->timings = *timings; + mp->extra_info_dirty = true; +} + +void dss_mgr_set_timings(struct omap_overlay_manager *mgr, + struct omap_video_timings *timings) +{ + unsigned long flags; + + mutex_lock(&apply_lock); + + spin_lock_irqsave(&data_lock, flags); + + dss_apply_mgr_timings(mgr, timings); + + dss_write_regs(); + dss_set_go_bits(); + + spin_unlock_irqrestore(&data_lock, flags); + + wait_pending_extra_info_updates(); + + mutex_unlock(&apply_lock); +} int dss_ovl_set_info(struct omap_overlay *ovl, struct omap_overlay_info *info) @@ -1393,7 +1461,7 @@ int dss_ovl_enable(struct omap_overlay *ovl) op->enabling = true; - r = dss_check_settings(ovl->manager, ovl->manager->device); + r = dss_check_settings(ovl->manager); if (r) { DSSERR("failed to enable overlay %d: check_settings failed\n", ovl->id); diff --git a/drivers/video/omap2/dss/core.c b/drivers/video/omap2/dss/core.c index e8a120771ac6..72ded9cd2cb0 100644 --- a/drivers/video/omap2/dss/core.c +++ b/drivers/video/omap2/dss/core.c @@ -43,6 +43,8 @@ static struct { struct regulator *vdds_dsi_reg; struct regulator *vdds_sdi_reg; + + const char *default_display_name; } core; static char *def_disp_name; @@ -54,9 +56,6 @@ bool dss_debug; module_param_named(debug, dss_debug, bool, 0644); #endif -static int omap_dss_register_device(struct omap_dss_device *); -static void omap_dss_unregister_device(struct omap_dss_device *); - /* REGULATORS */ struct regulator *dss_get_vdds_dsi(void) @@ -87,6 +86,51 @@ struct regulator *dss_get_vdds_sdi(void) return reg; } +int dss_get_ctx_loss_count(struct device *dev) +{ + struct omap_dss_board_info *board_data = core.pdev->dev.platform_data; + int cnt; + + if (!board_data->get_context_loss_count) + return -ENOENT; + + cnt = board_data->get_context_loss_count(dev); + + WARN_ONCE(cnt < 0, "get_context_loss_count failed: %d\n", cnt); + + return cnt; +} + +int dss_dsi_enable_pads(int dsi_id, unsigned lane_mask) +{ + struct omap_dss_board_info *board_data = core.pdev->dev.platform_data; + + if (!board_data->dsi_enable_pads) + return -ENOENT; + + return board_data->dsi_enable_pads(dsi_id, lane_mask); +} + +void dss_dsi_disable_pads(int dsi_id, unsigned lane_mask) +{ + struct omap_dss_board_info *board_data = core.pdev->dev.platform_data; + + if (!board_data->dsi_enable_pads) + return; + + return board_data->dsi_disable_pads(dsi_id, lane_mask); +} + +int dss_set_min_bus_tput(struct device *dev, unsigned long tput) +{ + struct omap_dss_board_info *pdata = core.pdev->dev.platform_data; + + if (pdata->set_min_bus_tput) + return pdata->set_min_bus_tput(dev, tput); + else + return 0; +} + #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT) static int dss_debug_show(struct seq_file *s, void *unused) { @@ -121,34 +165,6 @@ static int dss_initialize_debugfs(void) debugfs_create_file("clk", S_IRUGO, dss_debugfs_dir, &dss_debug_dump_clocks, &dss_debug_fops); -#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS - debugfs_create_file("dispc_irq", S_IRUGO, dss_debugfs_dir, - &dispc_dump_irqs, &dss_debug_fops); -#endif - -#if defined(CONFIG_OMAP2_DSS_DSI) && defined(CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS) - dsi_create_debugfs_files_irq(dss_debugfs_dir, &dss_debug_fops); -#endif - - debugfs_create_file("dss", S_IRUGO, dss_debugfs_dir, - &dss_dump_regs, &dss_debug_fops); - debugfs_create_file("dispc", S_IRUGO, dss_debugfs_dir, - &dispc_dump_regs, &dss_debug_fops); -#ifdef CONFIG_OMAP2_DSS_RFBI - debugfs_create_file("rfbi", S_IRUGO, dss_debugfs_dir, - &rfbi_dump_regs, &dss_debug_fops); -#endif -#ifdef CONFIG_OMAP2_DSS_DSI - dsi_create_debugfs_files_reg(dss_debugfs_dir, &dss_debug_fops); -#endif -#ifdef CONFIG_OMAP2_DSS_VENC - debugfs_create_file("venc", S_IRUGO, dss_debugfs_dir, - &venc_dump_regs, &dss_debug_fops); -#endif -#ifdef CONFIG_OMAP4_DSS_HDMI - debugfs_create_file("hdmi", S_IRUGO, dss_debugfs_dir, - &hdmi_dump_regs, &dss_debug_fops); -#endif return 0; } @@ -157,6 +173,19 @@ static void dss_uninitialize_debugfs(void) if (dss_debugfs_dir) debugfs_remove_recursive(dss_debugfs_dir); } + +int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *)) +{ + struct dentry *d; + + d = debugfs_create_file(name, S_IRUGO, dss_debugfs_dir, + write, &dss_debug_fops); + + if (IS_ERR(d)) + return PTR_ERR(d); + + return 0; +} #else /* CONFIG_DEBUG_FS && CONFIG_OMAP2_DSS_DEBUG_SUPPORT */ static inline int dss_initialize_debugfs(void) { @@ -165,14 +194,18 @@ static inline int dss_initialize_debugfs(void) static inline void dss_uninitialize_debugfs(void) { } +static inline int dss_debugfs_create_file(const char *name, + void (*write)(struct seq_file *)) +{ + return 0; +} #endif /* CONFIG_DEBUG_FS && CONFIG_OMAP2_DSS_DEBUG_SUPPORT */ /* PLATFORM DEVICE */ -static int omap_dss_probe(struct platform_device *pdev) +static int __init omap_dss_probe(struct platform_device *pdev) { struct omap_dss_board_info *pdata = pdev->dev.platform_data; int r; - int i; core.pdev = pdev; @@ -187,28 +220,13 @@ static int omap_dss_probe(struct platform_device *pdev) if (r) goto err_debugfs; - for (i = 0; i < pdata->num_devices; ++i) { - struct omap_dss_device *dssdev = pdata->devices[i]; - - r = omap_dss_register_device(dssdev); - if (r) { - DSSERR("device %d %s register failed %d\n", i, - dssdev->name ?: "unnamed", r); - - while (--i >= 0) - omap_dss_unregister_device(pdata->devices[i]); - - goto err_register; - } - - if (def_disp_name && strcmp(def_disp_name, dssdev->name) == 0) - pdata->default_device = dssdev; - } + if (def_disp_name) + core.default_display_name = def_disp_name; + else if (pdata->default_device) + core.default_display_name = pdata->default_device->name; return 0; -err_register: - dss_uninitialize_debugfs(); err_debugfs: return r; @@ -216,17 +234,11 @@ err_debugfs: static int omap_dss_remove(struct platform_device *pdev) { - struct omap_dss_board_info *pdata = pdev->dev.platform_data; - int i; - dss_uninitialize_debugfs(); dss_uninit_overlays(pdev); dss_uninit_overlay_managers(pdev); - for (i = 0; i < pdata->num_devices; ++i) - omap_dss_unregister_device(pdata->devices[i]); - return 0; } @@ -251,7 +263,6 @@ static int omap_dss_resume(struct platform_device *pdev) } static struct platform_driver omap_dss_driver = { - .probe = omap_dss_probe, .remove = omap_dss_remove, .shutdown = omap_dss_shutdown, .suspend = omap_dss_suspend, @@ -326,7 +337,6 @@ static int dss_driver_probe(struct device *dev) int r; struct omap_dss_driver *dssdrv = to_dss_driver(dev->driver); struct omap_dss_device *dssdev = to_dss_device(dev); - struct omap_dss_board_info *pdata = core.pdev->dev.platform_data; bool force; DSSDBG("driver_probe: dev %s/%s, drv %s\n", @@ -335,7 +345,8 @@ static int dss_driver_probe(struct device *dev) dss_init_device(core.pdev, dssdev); - force = pdata->default_device == dssdev; + force = core.default_display_name && + strcmp(core.default_display_name, dssdev->name) == 0; dss_recheck_connections(dssdev, force); r = dssdrv->probe(dssdev); @@ -381,6 +392,8 @@ int omap_dss_register_driver(struct omap_dss_driver *dssdriver) if (dssdriver->get_recommended_bpp == NULL) dssdriver->get_recommended_bpp = omapdss_default_get_recommended_bpp; + if (dssdriver->get_timings == NULL) + dssdriver->get_timings = omapdss_default_get_timings; return driver_register(&dssdriver->driver); } @@ -427,27 +440,38 @@ static void omap_dss_dev_release(struct device *dev) reset_device(dev, 0); } -static int omap_dss_register_device(struct omap_dss_device *dssdev) +int omap_dss_register_device(struct omap_dss_device *dssdev, + struct device *parent, int disp_num) { - static int dev_num; - WARN_ON(!dssdev->driver_name); reset_device(&dssdev->dev, 1); dssdev->dev.bus = &dss_bus_type; - dssdev->dev.parent = &dss_bus; + dssdev->dev.parent = parent; dssdev->dev.release = omap_dss_dev_release; - dev_set_name(&dssdev->dev, "display%d", dev_num++); + dev_set_name(&dssdev->dev, "display%d", disp_num); return device_register(&dssdev->dev); } -static void omap_dss_unregister_device(struct omap_dss_device *dssdev) +void omap_dss_unregister_device(struct omap_dss_device *dssdev) { device_unregister(&dssdev->dev); } +static int dss_unregister_dss_dev(struct device *dev, void *data) +{ + struct omap_dss_device *dssdev = to_dss_device(dev); + omap_dss_unregister_device(dssdev); + return 0; +} + +void omap_dss_unregister_child_devices(struct device *parent) +{ + device_for_each_child(parent, NULL, dss_unregister_dss_dev); +} + /* BUS */ -static int omap_dss_bus_register(void) +static int __init omap_dss_bus_register(void) { int r; @@ -469,12 +493,56 @@ static int omap_dss_bus_register(void) } /* INIT */ +static int (*dss_output_drv_reg_funcs[])(void) __initdata = { +#ifdef CONFIG_OMAP2_DSS_DPI + dpi_init_platform_driver, +#endif +#ifdef CONFIG_OMAP2_DSS_SDI + sdi_init_platform_driver, +#endif +#ifdef CONFIG_OMAP2_DSS_RFBI + rfbi_init_platform_driver, +#endif +#ifdef CONFIG_OMAP2_DSS_VENC + venc_init_platform_driver, +#endif +#ifdef CONFIG_OMAP2_DSS_DSI + dsi_init_platform_driver, +#endif +#ifdef CONFIG_OMAP4_DSS_HDMI + hdmi_init_platform_driver, +#endif +}; + +static void (*dss_output_drv_unreg_funcs[])(void) __exitdata = { +#ifdef CONFIG_OMAP2_DSS_DPI + dpi_uninit_platform_driver, +#endif +#ifdef CONFIG_OMAP2_DSS_SDI + sdi_uninit_platform_driver, +#endif +#ifdef CONFIG_OMAP2_DSS_RFBI + rfbi_uninit_platform_driver, +#endif +#ifdef CONFIG_OMAP2_DSS_VENC + venc_uninit_platform_driver, +#endif +#ifdef CONFIG_OMAP2_DSS_DSI + dsi_uninit_platform_driver, +#endif +#ifdef CONFIG_OMAP4_DSS_HDMI + hdmi_uninit_platform_driver, +#endif +}; + +static bool dss_output_drv_loaded[ARRAY_SIZE(dss_output_drv_reg_funcs)]; static int __init omap_dss_register_drivers(void) { int r; + int i; - r = platform_driver_register(&omap_dss_driver); + r = platform_driver_probe(&omap_dss_driver, omap_dss_probe); if (r) return r; @@ -490,40 +558,18 @@ static int __init omap_dss_register_drivers(void) goto err_dispc; } - r = rfbi_init_platform_driver(); - if (r) { - DSSERR("Failed to initialize rfbi platform driver\n"); - goto err_rfbi; - } - - r = venc_init_platform_driver(); - if (r) { - DSSERR("Failed to initialize venc platform driver\n"); - goto err_venc; - } - - r = dsi_init_platform_driver(); - if (r) { - DSSERR("Failed to initialize DSI platform driver\n"); - goto err_dsi; - } - - r = hdmi_init_platform_driver(); - if (r) { - DSSERR("Failed to initialize hdmi\n"); - goto err_hdmi; + /* + * It's ok if the output-driver register fails. It happens, for example, + * when there is no output-device (e.g. SDI for OMAP4). + */ + for (i = 0; i < ARRAY_SIZE(dss_output_drv_reg_funcs); ++i) { + r = dss_output_drv_reg_funcs[i](); + if (r == 0) + dss_output_drv_loaded[i] = true; } return 0; -err_hdmi: - dsi_uninit_platform_driver(); -err_dsi: - venc_uninit_platform_driver(); -err_venc: - rfbi_uninit_platform_driver(); -err_rfbi: - dispc_uninit_platform_driver(); err_dispc: dss_uninit_platform_driver(); err_dss: @@ -534,10 +580,13 @@ err_dss: static void __exit omap_dss_unregister_drivers(void) { - hdmi_uninit_platform_driver(); - dsi_uninit_platform_driver(); - venc_uninit_platform_driver(); - rfbi_uninit_platform_driver(); + int i; + + for (i = 0; i < ARRAY_SIZE(dss_output_drv_unreg_funcs); ++i) { + if (dss_output_drv_loaded[i]) + dss_output_drv_unreg_funcs[i](); + } + dispc_uninit_platform_driver(); dss_uninit_platform_driver(); diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c index ee30937482e1..4749ac356469 100644 --- a/drivers/video/omap2/dss/dispc.c +++ b/drivers/video/omap2/dss/dispc.c @@ -131,23 +131,6 @@ static inline u32 dispc_read_reg(const u16 idx) return __raw_readl(dispc.base + idx); } -static int dispc_get_ctx_loss_count(void) -{ - struct device *dev = &dispc.pdev->dev; - struct omap_display_platform_data *pdata = dev->platform_data; - struct omap_dss_board_info *board_data = pdata->board_data; - int cnt; - - if (!board_data->get_context_loss_count) - return -ENOENT; - - cnt = board_data->get_context_loss_count(dev); - - WARN_ONCE(cnt < 0, "get_context_loss_count failed: %d\n", cnt); - - return cnt; -} - #define SR(reg) \ dispc.ctx[DISPC_##reg / sizeof(u32)] = dispc_read_reg(DISPC_##reg) #define RR(reg) \ @@ -251,7 +234,7 @@ static void dispc_save_context(void) if (dss_has_feature(FEAT_CORE_CLK_DIV)) SR(DIVISOR); - dispc.ctx_loss_cnt = dispc_get_ctx_loss_count(); + dispc.ctx_loss_cnt = dss_get_ctx_loss_count(&dispc.pdev->dev); dispc.ctx_valid = true; DSSDBG("context saved, ctx_loss_count %d\n", dispc.ctx_loss_cnt); @@ -266,7 +249,7 @@ static void dispc_restore_context(void) if (!dispc.ctx_valid) return; - ctx = dispc_get_ctx_loss_count(); + ctx = dss_get_ctx_loss_count(&dispc.pdev->dev); if (ctx >= 0 && ctx == dispc.ctx_loss_cnt) return; @@ -413,14 +396,6 @@ static inline bool dispc_mgr_is_lcd(enum omap_channel channel) return false; } -static struct omap_dss_device *dispc_mgr_get_device(enum omap_channel channel) -{ - struct omap_overlay_manager *mgr = - omap_dss_get_overlay_manager(channel); - - return mgr ? mgr->device : NULL; -} - u32 dispc_mgr_get_vsync_irq(enum omap_channel channel) { switch (channel) { @@ -432,6 +407,7 @@ u32 dispc_mgr_get_vsync_irq(enum omap_channel channel) return DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN; default: BUG(); + return 0; } } @@ -446,6 +422,7 @@ u32 dispc_mgr_get_framedone_irq(enum omap_channel channel) return 0; default: BUG(); + return 0; } } @@ -764,7 +741,7 @@ static void dispc_ovl_set_color_mode(enum omap_plane plane, case OMAP_DSS_COLOR_XRGB16_1555: m = 0xf; break; default: - BUG(); break; + BUG(); return; } } else { switch (color_mode) { @@ -801,13 +778,25 @@ static void dispc_ovl_set_color_mode(enum omap_plane plane, case OMAP_DSS_COLOR_XRGB16_1555: m = 0xf; break; default: - BUG(); break; + BUG(); return; } } REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), m, 4, 1); } +static void dispc_ovl_configure_burst_type(enum omap_plane plane, + enum omap_dss_rotation_type rotation_type) +{ + if (dss_has_feature(FEAT_BURST_2D) == 0) + return; + + if (rotation_type == OMAP_DSS_ROT_TILER) + REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), 1, 29, 29); + else + REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), 0, 29, 29); +} + void dispc_ovl_set_channel_out(enum omap_plane plane, enum omap_channel channel) { int shift; @@ -845,6 +834,7 @@ void dispc_ovl_set_channel_out(enum omap_plane plane, enum omap_channel channel) break; default: BUG(); + return; } val = FLD_MOD(val, chan, shift, shift); @@ -872,6 +862,7 @@ static enum omap_channel dispc_ovl_get_channel_out(enum omap_plane plane) break; default: BUG(); + return 0; } val = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane)); @@ -983,20 +974,13 @@ static void dispc_ovl_enable_replication(enum omap_plane plane, bool enable) REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable, shift, shift); } -void dispc_mgr_set_lcd_size(enum omap_channel channel, u16 width, u16 height) +static void dispc_mgr_set_size(enum omap_channel channel, u16 width, + u16 height) { u32 val; - BUG_ON((width > (1 << 11)) || (height > (1 << 11))); - val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0); - dispc_write_reg(DISPC_SIZE_MGR(channel), val); -} -void dispc_set_digit_size(u16 width, u16 height) -{ - u32 val; - BUG_ON((width > (1 << 11)) || (height > (1 << 11))); val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0); - dispc_write_reg(DISPC_SIZE_MGR(OMAP_DSS_CHANNEL_DIGIT), val); + dispc_write_reg(DISPC_SIZE_MGR(channel), val); } static void dispc_read_plane_fifo_sizes(void) @@ -1063,7 +1047,8 @@ void dispc_enable_fifomerge(bool enable) } void dispc_ovl_compute_fifo_thresholds(enum omap_plane plane, - u32 *fifo_low, u32 *fifo_high, bool use_fifomerge) + u32 *fifo_low, u32 *fifo_high, bool use_fifomerge, + bool manual_update) { /* * All sizes are in bytes. Both the buffer and burst are made of @@ -1091,7 +1076,7 @@ void dispc_ovl_compute_fifo_thresholds(enum omap_plane plane, * combined fifo size */ - if (dss_has_feature(FEAT_OMAP3_DSI_FIFO_BUG)) { + if (manual_update && dss_has_feature(FEAT_OMAP3_DSI_FIFO_BUG)) { *fifo_low = ovl_fifo_size - burst_size * 2; *fifo_high = total_fifo_size - burst_size; } else { @@ -1185,6 +1170,94 @@ static void dispc_ovl_set_scale_param(enum omap_plane plane, dispc_ovl_set_fir(plane, fir_hinc, fir_vinc, color_comp); } +static void dispc_ovl_set_accu_uv(enum omap_plane plane, + u16 orig_width, u16 orig_height, u16 out_width, u16 out_height, + bool ilace, enum omap_color_mode color_mode, u8 rotation) +{ + int h_accu2_0, h_accu2_1; + int v_accu2_0, v_accu2_1; + int chroma_hinc, chroma_vinc; + int idx; + + struct accu { + s8 h0_m, h0_n; + s8 h1_m, h1_n; + s8 v0_m, v0_n; + s8 v1_m, v1_n; + }; + + const struct accu *accu_table; + const struct accu *accu_val; + + static const struct accu accu_nv12[4] = { + { 0, 1, 0, 1 , -1, 2, 0, 1 }, + { 1, 2, -3, 4 , 0, 1, 0, 1 }, + { -1, 1, 0, 1 , -1, 2, 0, 1 }, + { -1, 2, -1, 2 , -1, 1, 0, 1 }, + }; + + static const struct accu accu_nv12_ilace[4] = { + { 0, 1, 0, 1 , -3, 4, -1, 4 }, + { -1, 4, -3, 4 , 0, 1, 0, 1 }, + { -1, 1, 0, 1 , -1, 4, -3, 4 }, + { -3, 4, -3, 4 , -1, 1, 0, 1 }, + }; + + static const struct accu accu_yuv[4] = { + { 0, 1, 0, 1, 0, 1, 0, 1 }, + { 0, 1, 0, 1, 0, 1, 0, 1 }, + { -1, 1, 0, 1, 0, 1, 0, 1 }, + { 0, 1, 0, 1, -1, 1, 0, 1 }, + }; + + switch (rotation) { + case OMAP_DSS_ROT_0: + idx = 0; + break; + case OMAP_DSS_ROT_90: + idx = 1; + break; + case OMAP_DSS_ROT_180: + idx = 2; + break; + case OMAP_DSS_ROT_270: + idx = 3; + break; + default: + BUG(); + return; + } + + switch (color_mode) { + case OMAP_DSS_COLOR_NV12: + if (ilace) + accu_table = accu_nv12_ilace; + else + accu_table = accu_nv12; + break; + case OMAP_DSS_COLOR_YUV2: + case OMAP_DSS_COLOR_UYVY: + accu_table = accu_yuv; + break; + default: + BUG(); + return; + } + + accu_val = &accu_table[idx]; + + chroma_hinc = 1024 * orig_width / out_width; + chroma_vinc = 1024 * orig_height / out_height; + + h_accu2_0 = (accu_val->h0_m * chroma_hinc / accu_val->h0_n) % 1024; + h_accu2_1 = (accu_val->h1_m * chroma_hinc / accu_val->h1_n) % 1024; + v_accu2_0 = (accu_val->v0_m * chroma_vinc / accu_val->v0_n) % 1024; + v_accu2_1 = (accu_val->v1_m * chroma_vinc / accu_val->v1_n) % 1024; + + dispc_ovl_set_vid_accu2_0(plane, h_accu2_0, v_accu2_0); + dispc_ovl_set_vid_accu2_1(plane, h_accu2_1, v_accu2_1); +} + static void dispc_ovl_set_scaling_common(enum omap_plane plane, u16 orig_width, u16 orig_height, u16 out_width, u16 out_height, @@ -1258,6 +1331,10 @@ static void dispc_ovl_set_scaling_uv(enum omap_plane plane, REG_FLD_MOD(DISPC_OVL_ATTRIBUTES2(plane), 0, 8, 8); return; } + + dispc_ovl_set_accu_uv(plane, orig_width, orig_height, out_width, + out_height, ilace, color_mode, rotation); + switch (color_mode) { case OMAP_DSS_COLOR_NV12: /* UV is subsampled by 2 vertically*/ @@ -1280,6 +1357,7 @@ static void dispc_ovl_set_scaling_uv(enum omap_plane plane, break; default: BUG(); + return; } if (out_width != orig_width) @@ -1297,9 +1375,6 @@ static void dispc_ovl_set_scaling_uv(enum omap_plane plane, REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), scale_x ? 1 : 0, 5, 5); /* set V scaling */ REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), scale_y ? 1 : 0, 6, 6); - - dispc_ovl_set_vid_accu2_0(plane, 0x80, 0); - dispc_ovl_set_vid_accu2_1(plane, 0x80, 0); } static void dispc_ovl_set_scaling(enum omap_plane plane, @@ -1410,6 +1485,7 @@ static int color_mode_to_bpp(enum omap_color_mode color_mode) return 32; default: BUG(); + return 0; } } @@ -1423,6 +1499,7 @@ static s32 pixinc(int pixels, u8 ps) return 1 - (-pixels + 1) * ps; else BUG(); + return 0; } static void calc_vrfb_rotation_offset(u8 rotation, bool mirror, @@ -1431,7 +1508,7 @@ static void calc_vrfb_rotation_offset(u8 rotation, bool mirror, enum omap_color_mode color_mode, bool fieldmode, unsigned int field_offset, unsigned *offset0, unsigned *offset1, - s32 *row_inc, s32 *pix_inc) + s32 *row_inc, s32 *pix_inc, int x_predecim, int y_predecim) { u8 ps; @@ -1477,10 +1554,10 @@ static void calc_vrfb_rotation_offset(u8 rotation, bool mirror, else *offset0 = 0; - *row_inc = pixinc(1 + (screen_width - width) + - (fieldmode ? screen_width : 0), - ps); - *pix_inc = pixinc(1, ps); + *row_inc = pixinc(1 + + (y_predecim * screen_width - x_predecim * width) + + (fieldmode ? screen_width : 0), ps); + *pix_inc = pixinc(x_predecim, ps); break; case OMAP_DSS_ROT_0 + 4: @@ -1498,14 +1575,15 @@ static void calc_vrfb_rotation_offset(u8 rotation, bool mirror, *offset0 = field_offset * screen_width * ps; else *offset0 = 0; - *row_inc = pixinc(1 - (screen_width + width) - - (fieldmode ? screen_width : 0), - ps); - *pix_inc = pixinc(1, ps); + *row_inc = pixinc(1 - + (y_predecim * screen_width + x_predecim * width) - + (fieldmode ? screen_width : 0), ps); + *pix_inc = pixinc(x_predecim, ps); break; default: BUG(); + return; } } @@ -1515,7 +1593,7 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror, enum omap_color_mode color_mode, bool fieldmode, unsigned int field_offset, unsigned *offset0, unsigned *offset1, - s32 *row_inc, s32 *pix_inc) + s32 *row_inc, s32 *pix_inc, int x_predecim, int y_predecim) { u8 ps; u16 fbw, fbh; @@ -1557,10 +1635,14 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror, *offset0 = *offset1 + field_offset * screen_width * ps; else *offset0 = *offset1; - *row_inc = pixinc(1 + (screen_width - fbw) + - (fieldmode ? screen_width : 0), - ps); - *pix_inc = pixinc(1, ps); + *row_inc = pixinc(1 + + (y_predecim * screen_width - fbw * x_predecim) + + (fieldmode ? screen_width : 0), ps); + if (color_mode == OMAP_DSS_COLOR_YUV2 || + color_mode == OMAP_DSS_COLOR_UYVY) + *pix_inc = pixinc(x_predecim, 2 * ps); + else + *pix_inc = pixinc(x_predecim, ps); break; case OMAP_DSS_ROT_90: *offset1 = screen_width * (fbh - 1) * ps; @@ -1568,9 +1650,9 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror, *offset0 = *offset1 + field_offset * ps; else *offset0 = *offset1; - *row_inc = pixinc(screen_width * (fbh - 1) + 1 + - (fieldmode ? 1 : 0), ps); - *pix_inc = pixinc(-screen_width, ps); + *row_inc = pixinc(screen_width * (fbh * x_predecim - 1) + + y_predecim + (fieldmode ? 1 : 0), ps); + *pix_inc = pixinc(-x_predecim * screen_width, ps); break; case OMAP_DSS_ROT_180: *offset1 = (screen_width * (fbh - 1) + fbw - 1) * ps; @@ -1579,10 +1661,13 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror, else *offset0 = *offset1; *row_inc = pixinc(-1 - - (screen_width - fbw) - - (fieldmode ? screen_width : 0), - ps); - *pix_inc = pixinc(-1, ps); + (y_predecim * screen_width - fbw * x_predecim) - + (fieldmode ? screen_width : 0), ps); + if (color_mode == OMAP_DSS_COLOR_YUV2 || + color_mode == OMAP_DSS_COLOR_UYVY) + *pix_inc = pixinc(-x_predecim, 2 * ps); + else + *pix_inc = pixinc(-x_predecim, ps); break; case OMAP_DSS_ROT_270: *offset1 = (fbw - 1) * ps; @@ -1590,9 +1675,9 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror, *offset0 = *offset1 - field_offset * ps; else *offset0 = *offset1; - *row_inc = pixinc(-screen_width * (fbh - 1) - 1 - - (fieldmode ? 1 : 0), ps); - *pix_inc = pixinc(screen_width, ps); + *row_inc = pixinc(-screen_width * (fbh * x_predecim - 1) - + y_predecim - (fieldmode ? 1 : 0), ps); + *pix_inc = pixinc(x_predecim * screen_width, ps); break; /* mirroring */ @@ -1602,10 +1687,14 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror, *offset0 = *offset1 + field_offset * screen_width * ps; else *offset0 = *offset1; - *row_inc = pixinc(screen_width * 2 - 1 + + *row_inc = pixinc(y_predecim * screen_width * 2 - 1 + (fieldmode ? screen_width : 0), ps); - *pix_inc = pixinc(-1, ps); + if (color_mode == OMAP_DSS_COLOR_YUV2 || + color_mode == OMAP_DSS_COLOR_UYVY) + *pix_inc = pixinc(-x_predecim, 2 * ps); + else + *pix_inc = pixinc(-x_predecim, ps); break; case OMAP_DSS_ROT_90 + 4: @@ -1614,10 +1703,10 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror, *offset0 = *offset1 + field_offset * ps; else *offset0 = *offset1; - *row_inc = pixinc(-screen_width * (fbh - 1) + 1 + - (fieldmode ? 1 : 0), + *row_inc = pixinc(-screen_width * (fbh * x_predecim - 1) + + y_predecim + (fieldmode ? 1 : 0), ps); - *pix_inc = pixinc(screen_width, ps); + *pix_inc = pixinc(x_predecim * screen_width, ps); break; case OMAP_DSS_ROT_180 + 4: @@ -1626,10 +1715,14 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror, *offset0 = *offset1 - field_offset * screen_width * ps; else *offset0 = *offset1; - *row_inc = pixinc(1 - screen_width * 2 - + *row_inc = pixinc(1 - y_predecim * screen_width * 2 - (fieldmode ? screen_width : 0), ps); - *pix_inc = pixinc(1, ps); + if (color_mode == OMAP_DSS_COLOR_YUV2 || + color_mode == OMAP_DSS_COLOR_UYVY) + *pix_inc = pixinc(x_predecim, 2 * ps); + else + *pix_inc = pixinc(x_predecim, ps); break; case OMAP_DSS_ROT_270 + 4: @@ -1638,34 +1731,130 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror, *offset0 = *offset1 - field_offset * ps; else *offset0 = *offset1; - *row_inc = pixinc(screen_width * (fbh - 1) - 1 - - (fieldmode ? 1 : 0), + *row_inc = pixinc(screen_width * (fbh * x_predecim - 1) - + y_predecim - (fieldmode ? 1 : 0), ps); - *pix_inc = pixinc(-screen_width, ps); + *pix_inc = pixinc(-x_predecim * screen_width, ps); break; default: BUG(); + return; + } +} + +static void calc_tiler_rotation_offset(u16 screen_width, u16 width, + enum omap_color_mode color_mode, bool fieldmode, + unsigned int field_offset, unsigned *offset0, unsigned *offset1, + s32 *row_inc, s32 *pix_inc, int x_predecim, int y_predecim) +{ + u8 ps; + + switch (color_mode) { + case OMAP_DSS_COLOR_CLUT1: + case OMAP_DSS_COLOR_CLUT2: + case OMAP_DSS_COLOR_CLUT4: + case OMAP_DSS_COLOR_CLUT8: + BUG(); + return; + default: + ps = color_mode_to_bpp(color_mode) / 8; + break; } + + DSSDBG("scrw %d, width %d\n", screen_width, width); + + /* + * field 0 = even field = bottom field + * field 1 = odd field = top field + */ + *offset1 = 0; + if (field_offset) + *offset0 = *offset1 + field_offset * screen_width * ps; + else + *offset0 = *offset1; + *row_inc = pixinc(1 + (y_predecim * screen_width - width * x_predecim) + + (fieldmode ? screen_width : 0), ps); + if (color_mode == OMAP_DSS_COLOR_YUV2 || + color_mode == OMAP_DSS_COLOR_UYVY) + *pix_inc = pixinc(x_predecim, 2 * ps); + else + *pix_inc = pixinc(x_predecim, ps); } -static unsigned long calc_fclk_five_taps(enum omap_channel channel, u16 width, +/* + * This function is used to avoid synclosts in OMAP3, because of some + * undocumented horizontal position and timing related limitations. + */ +static int check_horiz_timing_omap3(enum omap_channel channel, + const struct omap_video_timings *t, u16 pos_x, + u16 width, u16 height, u16 out_width, u16 out_height) +{ + int DS = DIV_ROUND_UP(height, out_height); + unsigned long nonactive, lclk, pclk; + static const u8 limits[3] = { 8, 10, 20 }; + u64 val, blank; + int i; + + nonactive = t->x_res + t->hfp + t->hsw + t->hbp - out_width; + pclk = dispc_mgr_pclk_rate(channel); + if (dispc_mgr_is_lcd(channel)) + lclk = dispc_mgr_lclk_rate(channel); + else + lclk = dispc_fclk_rate(); + + i = 0; + if (out_height < height) + i++; + if (out_width < width) + i++; + blank = div_u64((u64)(t->hbp + t->hsw + t->hfp) * lclk, pclk); + DSSDBG("blanking period + ppl = %llu (limit = %u)\n", blank, limits[i]); + if (blank <= limits[i]) + return -EINVAL; + + /* + * Pixel data should be prepared before visible display point starts. + * So, atleast DS-2 lines must have already been fetched by DISPC + * during nonactive - pos_x period. + */ + val = div_u64((u64)(nonactive - pos_x) * lclk, pclk); + DSSDBG("(nonactive - pos_x) * pcd = %llu max(0, DS - 2) * width = %d\n", + val, max(0, DS - 2) * width); + if (val < max(0, DS - 2) * width) + return -EINVAL; + + /* + * All lines need to be refilled during the nonactive period of which + * only one line can be loaded during the active period. So, atleast + * DS - 1 lines should be loaded during nonactive period. + */ + val = div_u64((u64)nonactive * lclk, pclk); + DSSDBG("nonactive * pcd = %llu, max(0, DS - 1) * width = %d\n", + val, max(0, DS - 1) * width); + if (val < max(0, DS - 1) * width) + return -EINVAL; + + return 0; +} + +static unsigned long calc_core_clk_five_taps(enum omap_channel channel, + const struct omap_video_timings *mgr_timings, u16 width, u16 height, u16 out_width, u16 out_height, enum omap_color_mode color_mode) { - u32 fclk = 0; + u32 core_clk = 0; u64 tmp, pclk = dispc_mgr_pclk_rate(channel); if (height <= out_height && width <= out_width) return (unsigned long) pclk; if (height > out_height) { - struct omap_dss_device *dssdev = dispc_mgr_get_device(channel); - unsigned int ppl = dssdev->panel.timings.x_res; + unsigned int ppl = mgr_timings->x_res; tmp = pclk * height * out_width; do_div(tmp, 2 * out_height * ppl); - fclk = tmp; + core_clk = tmp; if (height > 2 * out_height) { if (ppl == out_width) @@ -1673,23 +1862,23 @@ static unsigned long calc_fclk_five_taps(enum omap_channel channel, u16 width, tmp = pclk * (height - 2 * out_height) * out_width; do_div(tmp, 2 * out_height * (ppl - out_width)); - fclk = max(fclk, (u32) tmp); + core_clk = max_t(u32, core_clk, tmp); } } if (width > out_width) { tmp = pclk * width; do_div(tmp, out_width); - fclk = max(fclk, (u32) tmp); + core_clk = max_t(u32, core_clk, tmp); if (color_mode == OMAP_DSS_COLOR_RGB24U) - fclk <<= 1; + core_clk <<= 1; } - return fclk; + return core_clk; } -static unsigned long calc_fclk(enum omap_channel channel, u16 width, +static unsigned long calc_core_clk(enum omap_channel channel, u16 width, u16 height, u16 out_width, u16 out_height) { unsigned int hf, vf; @@ -1730,15 +1919,20 @@ static unsigned long calc_fclk(enum omap_channel channel, u16 width, } static int dispc_ovl_calc_scaling(enum omap_plane plane, - enum omap_channel channel, u16 width, u16 height, - u16 out_width, u16 out_height, - enum omap_color_mode color_mode, bool *five_taps) + enum omap_channel channel, + const struct omap_video_timings *mgr_timings, + u16 width, u16 height, u16 out_width, u16 out_height, + enum omap_color_mode color_mode, bool *five_taps, + int *x_predecim, int *y_predecim, u16 pos_x) { struct omap_overlay *ovl = omap_dss_get_overlay(plane); const int maxdownscale = dss_feat_get_param_max(FEAT_PARAM_DOWNSCALE); const int maxsinglelinewidth = dss_feat_get_param_max(FEAT_PARAM_LINEWIDTH); - unsigned long fclk = 0; + const int max_decim_limit = 16; + unsigned long core_clk = 0; + int decim_x, decim_y, error, min_factor; + u16 in_width, in_height, in_width_max = 0; if (width == out_width && height == out_height) return 0; @@ -1746,64 +1940,154 @@ static int dispc_ovl_calc_scaling(enum omap_plane plane, if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0) return -EINVAL; - if (out_width < width / maxdownscale || - out_width > width * 8) + *x_predecim = max_decim_limit; + *y_predecim = max_decim_limit; + + if (color_mode == OMAP_DSS_COLOR_CLUT1 || + color_mode == OMAP_DSS_COLOR_CLUT2 || + color_mode == OMAP_DSS_COLOR_CLUT4 || + color_mode == OMAP_DSS_COLOR_CLUT8) { + *x_predecim = 1; + *y_predecim = 1; + *five_taps = false; + return 0; + } + + decim_x = DIV_ROUND_UP(DIV_ROUND_UP(width, out_width), maxdownscale); + decim_y = DIV_ROUND_UP(DIV_ROUND_UP(height, out_height), maxdownscale); + + min_factor = min(decim_x, decim_y); + + if (decim_x > *x_predecim || out_width > width * 8) return -EINVAL; - if (out_height < height / maxdownscale || - out_height > height * 8) + if (decim_y > *y_predecim || out_height > height * 8) return -EINVAL; if (cpu_is_omap24xx()) { - if (width > maxsinglelinewidth) - DSSERR("Cannot scale max input width exceeded"); *five_taps = false; - fclk = calc_fclk(channel, width, height, out_width, - out_height); + + do { + in_height = DIV_ROUND_UP(height, decim_y); + in_width = DIV_ROUND_UP(width, decim_x); + core_clk = calc_core_clk(channel, in_width, in_height, + out_width, out_height); + error = (in_width > maxsinglelinewidth || !core_clk || + core_clk > dispc_core_clk_rate()); + if (error) { + if (decim_x == decim_y) { + decim_x = min_factor; + decim_y++; + } else { + swap(decim_x, decim_y); + if (decim_x < decim_y) + decim_x++; + } + } + } while (decim_x <= *x_predecim && decim_y <= *y_predecim && + error); + + if (in_width > maxsinglelinewidth) { + DSSERR("Cannot scale max input width exceeded"); + return -EINVAL; + } } else if (cpu_is_omap34xx()) { - if (width > (maxsinglelinewidth * 2)) { + + do { + in_height = DIV_ROUND_UP(height, decim_y); + in_width = DIV_ROUND_UP(width, decim_x); + core_clk = calc_core_clk_five_taps(channel, mgr_timings, + in_width, in_height, out_width, out_height, + color_mode); + + error = check_horiz_timing_omap3(channel, mgr_timings, + pos_x, in_width, in_height, out_width, + out_height); + + if (in_width > maxsinglelinewidth) + if (in_height > out_height && + in_height < out_height * 2) + *five_taps = false; + if (!*five_taps) + core_clk = calc_core_clk(channel, in_width, + in_height, out_width, out_height); + error = (error || in_width > maxsinglelinewidth * 2 || + (in_width > maxsinglelinewidth && *five_taps) || + !core_clk || core_clk > dispc_core_clk_rate()); + if (error) { + if (decim_x == decim_y) { + decim_x = min_factor; + decim_y++; + } else { + swap(decim_x, decim_y); + if (decim_x < decim_y) + decim_x++; + } + } + } while (decim_x <= *x_predecim && decim_y <= *y_predecim + && error); + + if (check_horiz_timing_omap3(channel, mgr_timings, pos_x, width, + height, out_width, out_height)){ + DSSERR("horizontal timing too tight\n"); + return -EINVAL; + } + + if (in_width > (maxsinglelinewidth * 2)) { DSSERR("Cannot setup scaling"); DSSERR("width exceeds maximum width possible"); return -EINVAL; } - fclk = calc_fclk_five_taps(channel, width, height, out_width, - out_height, color_mode); - if (width > maxsinglelinewidth) { - if (height > out_height && height < out_height * 2) - *five_taps = false; - else { - DSSERR("cannot setup scaling with five taps"); - return -EINVAL; - } + + if (in_width > maxsinglelinewidth && *five_taps) { + DSSERR("cannot setup scaling with five taps"); + return -EINVAL; } - if (!*five_taps) - fclk = calc_fclk(channel, width, height, out_width, - out_height); } else { - if (width > maxsinglelinewidth) { + int decim_x_min = decim_x; + in_height = DIV_ROUND_UP(height, decim_y); + in_width_max = dispc_core_clk_rate() / + DIV_ROUND_UP(dispc_mgr_pclk_rate(channel), + out_width); + decim_x = DIV_ROUND_UP(width, in_width_max); + + decim_x = decim_x > decim_x_min ? decim_x : decim_x_min; + if (decim_x > *x_predecim) + return -EINVAL; + + do { + in_width = DIV_ROUND_UP(width, decim_x); + } while (decim_x <= *x_predecim && + in_width > maxsinglelinewidth && decim_x++); + + if (in_width > maxsinglelinewidth) { DSSERR("Cannot scale width exceeds max line width"); return -EINVAL; } - fclk = calc_fclk(channel, width, height, out_width, - out_height); + + core_clk = calc_core_clk(channel, in_width, in_height, + out_width, out_height); } - DSSDBG("required fclk rate = %lu Hz\n", fclk); - DSSDBG("current fclk rate = %lu Hz\n", dispc_fclk_rate()); + DSSDBG("required core clk rate = %lu Hz\n", core_clk); + DSSDBG("current core clk rate = %lu Hz\n", dispc_core_clk_rate()); - if (!fclk || fclk > dispc_fclk_rate()) { + if (!core_clk || core_clk > dispc_core_clk_rate()) { DSSERR("failed to set up scaling, " - "required fclk rate = %lu Hz, " - "current fclk rate = %lu Hz\n", - fclk, dispc_fclk_rate()); + "required core clk rate = %lu Hz, " + "current core clk rate = %lu Hz\n", + core_clk, dispc_core_clk_rate()); return -EINVAL; } + *x_predecim = decim_x; + *y_predecim = decim_y; return 0; } int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi, - bool ilace, bool replication) + bool ilace, bool replication, + const struct omap_video_timings *mgr_timings) { struct omap_overlay *ovl = omap_dss_get_overlay(plane); bool five_taps = true; @@ -1814,8 +2098,11 @@ int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi, s32 pix_inc; u16 frame_height = oi->height; unsigned int field_offset = 0; - u16 outw, outh; + u16 in_height = oi->height; + u16 in_width = oi->width; + u16 out_width, out_height; enum omap_channel channel; + int x_predecim = 1, y_predecim = 1; channel = dispc_ovl_get_channel_out(plane); @@ -1829,32 +2116,35 @@ int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi, if (oi->paddr == 0) return -EINVAL; - outw = oi->out_width == 0 ? oi->width : oi->out_width; - outh = oi->out_height == 0 ? oi->height : oi->out_height; + out_width = oi->out_width == 0 ? oi->width : oi->out_width; + out_height = oi->out_height == 0 ? oi->height : oi->out_height; - if (ilace && oi->height == outh) + if (ilace && oi->height == out_height) fieldmode = 1; if (ilace) { if (fieldmode) - oi->height /= 2; + in_height /= 2; oi->pos_y /= 2; - outh /= 2; + out_height /= 2; DSSDBG("adjusting for ilace: height %d, pos_y %d, " "out_height %d\n", - oi->height, oi->pos_y, outh); + in_height, oi->pos_y, out_height); } if (!dss_feat_color_mode_supported(plane, oi->color_mode)) return -EINVAL; - r = dispc_ovl_calc_scaling(plane, channel, oi->width, oi->height, - outw, outh, oi->color_mode, - &five_taps); + r = dispc_ovl_calc_scaling(plane, channel, mgr_timings, in_width, + in_height, out_width, out_height, oi->color_mode, + &five_taps, &x_predecim, &y_predecim, oi->pos_x); if (r) return r; + in_width = DIV_ROUND_UP(in_width, x_predecim); + in_height = DIV_ROUND_UP(in_height, y_predecim); + if (oi->color_mode == OMAP_DSS_COLOR_YUV2 || oi->color_mode == OMAP_DSS_COLOR_UYVY || oi->color_mode == OMAP_DSS_COLOR_NV12) @@ -1868,32 +2158,46 @@ int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi, * so the integer part must be added to the base address of the * bottom field. */ - if (!oi->height || oi->height == outh) + if (!in_height || in_height == out_height) field_offset = 0; else - field_offset = oi->height / outh / 2; + field_offset = in_height / out_height / 2; } /* Fields are independent but interleaved in memory. */ if (fieldmode) field_offset = 1; - if (oi->rotation_type == OMAP_DSS_ROT_DMA) + offset0 = 0; + offset1 = 0; + row_inc = 0; + pix_inc = 0; + + if (oi->rotation_type == OMAP_DSS_ROT_TILER) + calc_tiler_rotation_offset(oi->screen_width, in_width, + oi->color_mode, fieldmode, field_offset, + &offset0, &offset1, &row_inc, &pix_inc, + x_predecim, y_predecim); + else if (oi->rotation_type == OMAP_DSS_ROT_DMA) calc_dma_rotation_offset(oi->rotation, oi->mirror, - oi->screen_width, oi->width, frame_height, + oi->screen_width, in_width, frame_height, oi->color_mode, fieldmode, field_offset, - &offset0, &offset1, &row_inc, &pix_inc); + &offset0, &offset1, &row_inc, &pix_inc, + x_predecim, y_predecim); else calc_vrfb_rotation_offset(oi->rotation, oi->mirror, - oi->screen_width, oi->width, frame_height, + oi->screen_width, in_width, frame_height, oi->color_mode, fieldmode, field_offset, - &offset0, &offset1, &row_inc, &pix_inc); + &offset0, &offset1, &row_inc, &pix_inc, + x_predecim, y_predecim); DSSDBG("offset0 %u, offset1 %u, row_inc %d, pix_inc %d\n", offset0, offset1, row_inc, pix_inc); dispc_ovl_set_color_mode(plane, oi->color_mode); + dispc_ovl_configure_burst_type(plane, oi->rotation_type); + dispc_ovl_set_ba0(plane, oi->paddr + offset0); dispc_ovl_set_ba1(plane, oi->paddr + offset1); @@ -1906,19 +2210,18 @@ int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi, dispc_ovl_set_row_inc(plane, row_inc); dispc_ovl_set_pix_inc(plane, pix_inc); - DSSDBG("%d,%d %dx%d -> %dx%d\n", oi->pos_x, oi->pos_y, oi->width, - oi->height, outw, outh); + DSSDBG("%d,%d %dx%d -> %dx%d\n", oi->pos_x, oi->pos_y, in_width, + in_height, out_width, out_height); dispc_ovl_set_pos(plane, oi->pos_x, oi->pos_y); - dispc_ovl_set_pic_size(plane, oi->width, oi->height); + dispc_ovl_set_pic_size(plane, in_width, in_height); if (ovl->caps & OMAP_DSS_OVL_CAP_SCALE) { - dispc_ovl_set_scaling(plane, oi->width, oi->height, - outw, outh, - ilace, five_taps, fieldmode, + dispc_ovl_set_scaling(plane, in_width, in_height, out_width, + out_height, ilace, five_taps, fieldmode, oi->color_mode, oi->rotation); - dispc_ovl_set_vid_size(plane, outw, outh); + dispc_ovl_set_vid_size(plane, out_width, out_height); dispc_ovl_set_vid_color_conv(plane, cconv); } @@ -2087,8 +2390,10 @@ bool dispc_mgr_is_enabled(enum omap_channel channel) return !!REG_GET(DISPC_CONTROL, 1, 1); else if (channel == OMAP_DSS_CHANNEL_LCD2) return !!REG_GET(DISPC_CONTROL2, 0, 0); - else + else { BUG(); + return false; + } } void dispc_mgr_enable(enum omap_channel channel, bool enable) @@ -2285,6 +2590,12 @@ void dispc_mgr_enable_stallmode(enum omap_channel channel, bool enable) REG_FLD_MOD(DISPC_CONTROL, enable, 11, 11); } +static bool _dispc_mgr_size_ok(u16 width, u16 height) +{ + return width <= dss_feat_get_param_max(FEAT_PARAM_MGR_WIDTH) && + height <= dss_feat_get_param_max(FEAT_PARAM_MGR_HEIGHT); +} + static bool _dispc_lcd_timings_ok(int hsw, int hfp, int hbp, int vsw, int vfp, int vbp) { @@ -2309,11 +2620,20 @@ static bool _dispc_lcd_timings_ok(int hsw, int hfp, int hbp, return true; } -bool dispc_lcd_timings_ok(struct omap_video_timings *timings) +bool dispc_mgr_timings_ok(enum omap_channel channel, + const struct omap_video_timings *timings) { - return _dispc_lcd_timings_ok(timings->hsw, timings->hfp, - timings->hbp, timings->vsw, - timings->vfp, timings->vbp); + bool timings_ok; + + timings_ok = _dispc_mgr_size_ok(timings->x_res, timings->y_res); + + if (dispc_mgr_is_lcd(channel)) + timings_ok = timings_ok && _dispc_lcd_timings_ok(timings->hsw, + timings->hfp, timings->hbp, + timings->vsw, timings->vfp, + timings->vbp); + + return timings_ok; } static void _dispc_mgr_set_lcd_timings(enum omap_channel channel, int hsw, @@ -2340,37 +2660,45 @@ static void _dispc_mgr_set_lcd_timings(enum omap_channel channel, int hsw, } /* change name to mode? */ -void dispc_mgr_set_lcd_timings(enum omap_channel channel, +void dispc_mgr_set_timings(enum omap_channel channel, struct omap_video_timings *timings) { unsigned xtot, ytot; unsigned long ht, vt; + struct omap_video_timings t = *timings; + + DSSDBG("channel %d xres %u yres %u\n", channel, t.x_res, t.y_res); - if (!_dispc_lcd_timings_ok(timings->hsw, timings->hfp, - timings->hbp, timings->vsw, - timings->vfp, timings->vbp)) + if (!dispc_mgr_timings_ok(channel, &t)) { BUG(); + return; + } + + if (dispc_mgr_is_lcd(channel)) { + _dispc_mgr_set_lcd_timings(channel, t.hsw, t.hfp, t.hbp, t.vsw, + t.vfp, t.vbp); + + xtot = t.x_res + t.hfp + t.hsw + t.hbp; + ytot = t.y_res + t.vfp + t.vsw + t.vbp; - _dispc_mgr_set_lcd_timings(channel, timings->hsw, timings->hfp, - timings->hbp, timings->vsw, timings->vfp, - timings->vbp); + ht = (timings->pixel_clock * 1000) / xtot; + vt = (timings->pixel_clock * 1000) / xtot / ytot; - dispc_mgr_set_lcd_size(channel, timings->x_res, timings->y_res); + DSSDBG("pck %u\n", timings->pixel_clock); + DSSDBG("hsw %d hfp %d hbp %d vsw %d vfp %d vbp %d\n", + t.hsw, t.hfp, t.hbp, t.vsw, t.vfp, t.vbp); - xtot = timings->x_res + timings->hfp + timings->hsw + timings->hbp; - ytot = timings->y_res + timings->vfp + timings->vsw + timings->vbp; + DSSDBG("hsync %luHz, vsync %luHz\n", ht, vt); + } else { + enum dss_hdmi_venc_clk_source_select source; - ht = (timings->pixel_clock * 1000) / xtot; - vt = (timings->pixel_clock * 1000) / xtot / ytot; + source = dss_get_hdmi_venc_clk_source(); - DSSDBG("channel %d xres %u yres %u\n", channel, timings->x_res, - timings->y_res); - DSSDBG("pck %u\n", timings->pixel_clock); - DSSDBG("hsw %d hfp %d hbp %d vsw %d vfp %d vbp %d\n", - timings->hsw, timings->hfp, timings->hbp, - timings->vsw, timings->vfp, timings->vbp); + if (source == DSS_VENC_TV_CLK) + t.y_res /= 2; + } - DSSDBG("hsync %luHz, vsync %luHz\n", ht, vt); + dispc_mgr_set_size(channel, t.x_res, t.y_res); } static void dispc_mgr_set_lcd_divisor(enum omap_channel channel, u16 lck_div, @@ -2411,6 +2739,7 @@ unsigned long dispc_fclk_rate(void) break; default: BUG(); + return 0; } return r; @@ -2441,6 +2770,7 @@ unsigned long dispc_mgr_lclk_rate(enum omap_channel channel) break; default: BUG(); + return 0; } return r / lcd; @@ -2462,20 +2792,35 @@ unsigned long dispc_mgr_pclk_rate(enum omap_channel channel) return r / pcd; } else { - struct omap_dss_device *dssdev = - dispc_mgr_get_device(channel); + enum dss_hdmi_venc_clk_source_select source; - switch (dssdev->type) { - case OMAP_DISPLAY_TYPE_VENC: + source = dss_get_hdmi_venc_clk_source(); + + switch (source) { + case DSS_VENC_TV_CLK: return venc_get_pixel_clock(); - case OMAP_DISPLAY_TYPE_HDMI: + case DSS_HDMI_M_PCLK: return hdmi_get_pixel_clock(); default: BUG(); + return 0; } } } +unsigned long dispc_core_clk_rate(void) +{ + int lcd; + unsigned long fclk = dispc_fclk_rate(); + + if (dss_has_feature(FEAT_CORE_CLK_DIV)) + lcd = REG_GET(DISPC_DIVISOR, 23, 16); + else + lcd = REG_GET(DISPC_DIVISORo(OMAP_DSS_CHANNEL_LCD), 23, 16); + + return fclk / lcd; +} + void dispc_dump_clocks(struct seq_file *s) { int lcd, pcd; @@ -2588,7 +2933,7 @@ void dispc_dump_irqs(struct seq_file *s) } #endif -void dispc_dump_regs(struct seq_file *s) +static void dispc_dump_regs(struct seq_file *s) { int i, j; const char *mgr_names[] = { @@ -3247,27 +3592,6 @@ int omap_dispc_wait_for_irq_interruptible_timeout(u32 irqmask, return 0; } -#ifdef CONFIG_OMAP2_DSS_FAKE_VSYNC -void dispc_fake_vsync_irq(void) -{ - u32 irqstatus = DISPC_IRQ_VSYNC; - int i; - - WARN_ON(!in_interrupt()); - - for (i = 0; i < DISPC_MAX_NR_ISRS; i++) { - struct omap_dispc_isr_data *isr_data; - isr_data = &dispc.registered_isr[i]; - - if (!isr_data->isr) - continue; - - if (isr_data->mask & irqstatus) - isr_data->isr(isr_data->arg, irqstatus); - } -} -#endif - static void _omap_dispc_initialize_irq(void) { unsigned long flags; @@ -3330,7 +3654,7 @@ static void _omap_dispc_initial_config(void) } /* DISPC HW IP initialisation */ -static int omap_dispchw_probe(struct platform_device *pdev) +static int __init omap_dispchw_probe(struct platform_device *pdev) { u32 rev; int r = 0; @@ -3399,6 +3723,11 @@ static int omap_dispchw_probe(struct platform_device *pdev) dispc_runtime_put(); + dss_debugfs_create_file("dispc", dispc_dump_regs); + +#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS + dss_debugfs_create_file("dispc_irq", dispc_dump_irqs); +#endif return 0; err_runtime_get: @@ -3407,7 +3736,7 @@ err_runtime_get: return r; } -static int omap_dispchw_remove(struct platform_device *pdev) +static int __exit omap_dispchw_remove(struct platform_device *pdev) { pm_runtime_disable(&pdev->dev); @@ -3419,19 +3748,12 @@ static int omap_dispchw_remove(struct platform_device *pdev) static int dispc_runtime_suspend(struct device *dev) { dispc_save_context(); - dss_runtime_put(); return 0; } static int dispc_runtime_resume(struct device *dev) { - int r; - - r = dss_runtime_get(); - if (r < 0) - return r; - dispc_restore_context(); return 0; @@ -3443,8 +3765,7 @@ static const struct dev_pm_ops dispc_pm_ops = { }; static struct platform_driver omap_dispchw_driver = { - .probe = omap_dispchw_probe, - .remove = omap_dispchw_remove, + .remove = __exit_p(omap_dispchw_remove), .driver = { .name = "omapdss_dispc", .owner = THIS_MODULE, @@ -3452,12 +3773,12 @@ static struct platform_driver omap_dispchw_driver = { }, }; -int dispc_init_platform_driver(void) +int __init dispc_init_platform_driver(void) { - return platform_driver_register(&omap_dispchw_driver); + return platform_driver_probe(&omap_dispchw_driver, omap_dispchw_probe); } -void dispc_uninit_platform_driver(void) +void __exit dispc_uninit_platform_driver(void) { - return platform_driver_unregister(&omap_dispchw_driver); + platform_driver_unregister(&omap_dispchw_driver); } diff --git a/drivers/video/omap2/dss/dispc.h b/drivers/video/omap2/dss/dispc.h index 5836bd1650f9..f278080e1063 100644 --- a/drivers/video/omap2/dss/dispc.h +++ b/drivers/video/omap2/dss/dispc.h @@ -120,6 +120,7 @@ static inline u16 DISPC_DEFAULT_COLOR(enum omap_channel channel) return 0x03AC; default: BUG(); + return 0; } } @@ -134,6 +135,7 @@ static inline u16 DISPC_TRANS_COLOR(enum omap_channel channel) return 0x03B0; default: BUG(); + return 0; } } @@ -144,10 +146,12 @@ static inline u16 DISPC_TIMING_H(enum omap_channel channel) return 0x0064; case OMAP_DSS_CHANNEL_DIGIT: BUG(); + return 0; case OMAP_DSS_CHANNEL_LCD2: return 0x0400; default: BUG(); + return 0; } } @@ -158,10 +162,12 @@ static inline u16 DISPC_TIMING_V(enum omap_channel channel) return 0x0068; case OMAP_DSS_CHANNEL_DIGIT: BUG(); + return 0; case OMAP_DSS_CHANNEL_LCD2: return 0x0404; default: BUG(); + return 0; } } @@ -172,10 +178,12 @@ static inline u16 DISPC_POL_FREQ(enum omap_channel channel) return 0x006C; case OMAP_DSS_CHANNEL_DIGIT: BUG(); + return 0; case OMAP_DSS_CHANNEL_LCD2: return 0x0408; default: BUG(); + return 0; } } @@ -186,10 +194,12 @@ static inline u16 DISPC_DIVISORo(enum omap_channel channel) return 0x0070; case OMAP_DSS_CHANNEL_DIGIT: BUG(); + return 0; case OMAP_DSS_CHANNEL_LCD2: return 0x040C; default: BUG(); + return 0; } } @@ -205,6 +215,7 @@ static inline u16 DISPC_SIZE_MGR(enum omap_channel channel) return 0x03CC; default: BUG(); + return 0; } } @@ -215,10 +226,12 @@ static inline u16 DISPC_DATA_CYCLE1(enum omap_channel channel) return 0x01D4; case OMAP_DSS_CHANNEL_DIGIT: BUG(); + return 0; case OMAP_DSS_CHANNEL_LCD2: return 0x03C0; default: BUG(); + return 0; } } @@ -229,10 +242,12 @@ static inline u16 DISPC_DATA_CYCLE2(enum omap_channel channel) return 0x01D8; case OMAP_DSS_CHANNEL_DIGIT: BUG(); + return 0; case OMAP_DSS_CHANNEL_LCD2: return 0x03C4; default: BUG(); + return 0; } } @@ -243,10 +258,12 @@ static inline u16 DISPC_DATA_CYCLE3(enum omap_channel channel) return 0x01DC; case OMAP_DSS_CHANNEL_DIGIT: BUG(); + return 0; case OMAP_DSS_CHANNEL_LCD2: return 0x03C8; default: BUG(); + return 0; } } @@ -257,10 +274,12 @@ static inline u16 DISPC_CPR_COEF_R(enum omap_channel channel) return 0x0220; case OMAP_DSS_CHANNEL_DIGIT: BUG(); + return 0; case OMAP_DSS_CHANNEL_LCD2: return 0x03BC; default: BUG(); + return 0; } } @@ -271,10 +290,12 @@ static inline u16 DISPC_CPR_COEF_G(enum omap_channel channel) return 0x0224; case OMAP_DSS_CHANNEL_DIGIT: BUG(); + return 0; case OMAP_DSS_CHANNEL_LCD2: return 0x03B8; default: BUG(); + return 0; } } @@ -285,10 +306,12 @@ static inline u16 DISPC_CPR_COEF_B(enum omap_channel channel) return 0x0228; case OMAP_DSS_CHANNEL_DIGIT: BUG(); + return 0; case OMAP_DSS_CHANNEL_LCD2: return 0x03B4; default: BUG(); + return 0; } } @@ -306,6 +329,7 @@ static inline u16 DISPC_OVL_BASE(enum omap_plane plane) return 0x0300; default: BUG(); + return 0; } } @@ -321,6 +345,7 @@ static inline u16 DISPC_BA0_OFFSET(enum omap_plane plane) return 0x0008; default: BUG(); + return 0; } } @@ -335,6 +360,7 @@ static inline u16 DISPC_BA1_OFFSET(enum omap_plane plane) return 0x000C; default: BUG(); + return 0; } } @@ -343,6 +369,7 @@ static inline u16 DISPC_BA0_UV_OFFSET(enum omap_plane plane) switch (plane) { case OMAP_DSS_GFX: BUG(); + return 0; case OMAP_DSS_VIDEO1: return 0x0544; case OMAP_DSS_VIDEO2: @@ -351,6 +378,7 @@ static inline u16 DISPC_BA0_UV_OFFSET(enum omap_plane plane) return 0x0310; default: BUG(); + return 0; } } @@ -359,6 +387,7 @@ static inline u16 DISPC_BA1_UV_OFFSET(enum omap_plane plane) switch (plane) { case OMAP_DSS_GFX: BUG(); + return 0; case OMAP_DSS_VIDEO1: return 0x0548; case OMAP_DSS_VIDEO2: @@ -367,6 +396,7 @@ static inline u16 DISPC_BA1_UV_OFFSET(enum omap_plane plane) return 0x0314; default: BUG(); + return 0; } } @@ -381,6 +411,7 @@ static inline u16 DISPC_POS_OFFSET(enum omap_plane plane) return 0x009C; default: BUG(); + return 0; } } @@ -395,6 +426,7 @@ static inline u16 DISPC_SIZE_OFFSET(enum omap_plane plane) return 0x00A8; default: BUG(); + return 0; } } @@ -410,6 +442,7 @@ static inline u16 DISPC_ATTR_OFFSET(enum omap_plane plane) return 0x0070; default: BUG(); + return 0; } } @@ -418,6 +451,7 @@ static inline u16 DISPC_ATTR2_OFFSET(enum omap_plane plane) switch (plane) { case OMAP_DSS_GFX: BUG(); + return 0; case OMAP_DSS_VIDEO1: return 0x0568; case OMAP_DSS_VIDEO2: @@ -426,6 +460,7 @@ static inline u16 DISPC_ATTR2_OFFSET(enum omap_plane plane) return 0x032C; default: BUG(); + return 0; } } @@ -441,6 +476,7 @@ static inline u16 DISPC_FIFO_THRESH_OFFSET(enum omap_plane plane) return 0x008C; default: BUG(); + return 0; } } @@ -456,6 +492,7 @@ static inline u16 DISPC_FIFO_SIZE_STATUS_OFFSET(enum omap_plane plane) return 0x0088; default: BUG(); + return 0; } } @@ -471,6 +508,7 @@ static inline u16 DISPC_ROW_INC_OFFSET(enum omap_plane plane) return 0x00A4; default: BUG(); + return 0; } } @@ -486,6 +524,7 @@ static inline u16 DISPC_PIX_INC_OFFSET(enum omap_plane plane) return 0x0098; default: BUG(); + return 0; } } @@ -498,8 +537,10 @@ static inline u16 DISPC_WINDOW_SKIP_OFFSET(enum omap_plane plane) case OMAP_DSS_VIDEO2: case OMAP_DSS_VIDEO3: BUG(); + return 0; default: BUG(); + return 0; } } @@ -512,8 +553,10 @@ static inline u16 DISPC_TABLE_BA_OFFSET(enum omap_plane plane) case OMAP_DSS_VIDEO2: case OMAP_DSS_VIDEO3: BUG(); + return 0; default: BUG(); + return 0; } } @@ -522,6 +565,7 @@ static inline u16 DISPC_FIR_OFFSET(enum omap_plane plane) switch (plane) { case OMAP_DSS_GFX: BUG(); + return 0; case OMAP_DSS_VIDEO1: case OMAP_DSS_VIDEO2: return 0x0024; @@ -529,6 +573,7 @@ static inline u16 DISPC_FIR_OFFSET(enum omap_plane plane) return 0x0090; default: BUG(); + return 0; } } @@ -537,6 +582,7 @@ static inline u16 DISPC_FIR2_OFFSET(enum omap_plane plane) switch (plane) { case OMAP_DSS_GFX: BUG(); + return 0; case OMAP_DSS_VIDEO1: return 0x0580; case OMAP_DSS_VIDEO2: @@ -545,6 +591,7 @@ static inline u16 DISPC_FIR2_OFFSET(enum omap_plane plane) return 0x0424; default: BUG(); + return 0; } } @@ -553,6 +600,7 @@ static inline u16 DISPC_PIC_SIZE_OFFSET(enum omap_plane plane) switch (plane) { case OMAP_DSS_GFX: BUG(); + return 0; case OMAP_DSS_VIDEO1: case OMAP_DSS_VIDEO2: return 0x0028; @@ -560,6 +608,7 @@ static inline u16 DISPC_PIC_SIZE_OFFSET(enum omap_plane plane) return 0x0094; default: BUG(); + return 0; } } @@ -569,6 +618,7 @@ static inline u16 DISPC_ACCU0_OFFSET(enum omap_plane plane) switch (plane) { case OMAP_DSS_GFX: BUG(); + return 0; case OMAP_DSS_VIDEO1: case OMAP_DSS_VIDEO2: return 0x002C; @@ -576,6 +626,7 @@ static inline u16 DISPC_ACCU0_OFFSET(enum omap_plane plane) return 0x0000; default: BUG(); + return 0; } } @@ -584,6 +635,7 @@ static inline u16 DISPC_ACCU2_0_OFFSET(enum omap_plane plane) switch (plane) { case OMAP_DSS_GFX: BUG(); + return 0; case OMAP_DSS_VIDEO1: return 0x0584; case OMAP_DSS_VIDEO2: @@ -592,6 +644,7 @@ static inline u16 DISPC_ACCU2_0_OFFSET(enum omap_plane plane) return 0x0428; default: BUG(); + return 0; } } @@ -600,6 +653,7 @@ static inline u16 DISPC_ACCU1_OFFSET(enum omap_plane plane) switch (plane) { case OMAP_DSS_GFX: BUG(); + return 0; case OMAP_DSS_VIDEO1: case OMAP_DSS_VIDEO2: return 0x0030; @@ -607,6 +661,7 @@ static inline u16 DISPC_ACCU1_OFFSET(enum omap_plane plane) return 0x0004; default: BUG(); + return 0; } } @@ -615,6 +670,7 @@ static inline u16 DISPC_ACCU2_1_OFFSET(enum omap_plane plane) switch (plane) { case OMAP_DSS_GFX: BUG(); + return 0; case OMAP_DSS_VIDEO1: return 0x0588; case OMAP_DSS_VIDEO2: @@ -623,6 +679,7 @@ static inline u16 DISPC_ACCU2_1_OFFSET(enum omap_plane plane) return 0x042C; default: BUG(); + return 0; } } @@ -632,6 +689,7 @@ static inline u16 DISPC_FIR_COEF_H_OFFSET(enum omap_plane plane, u16 i) switch (plane) { case OMAP_DSS_GFX: BUG(); + return 0; case OMAP_DSS_VIDEO1: case OMAP_DSS_VIDEO2: return 0x0034 + i * 0x8; @@ -639,6 +697,7 @@ static inline u16 DISPC_FIR_COEF_H_OFFSET(enum omap_plane plane, u16 i) return 0x0010 + i * 0x8; default: BUG(); + return 0; } } @@ -648,6 +707,7 @@ static inline u16 DISPC_FIR_COEF_H2_OFFSET(enum omap_plane plane, u16 i) switch (plane) { case OMAP_DSS_GFX: BUG(); + return 0; case OMAP_DSS_VIDEO1: return 0x058C + i * 0x8; case OMAP_DSS_VIDEO2: @@ -656,6 +716,7 @@ static inline u16 DISPC_FIR_COEF_H2_OFFSET(enum omap_plane plane, u16 i) return 0x0430 + i * 0x8; default: BUG(); + return 0; } } @@ -665,6 +726,7 @@ static inline u16 DISPC_FIR_COEF_HV_OFFSET(enum omap_plane plane, u16 i) switch (plane) { case OMAP_DSS_GFX: BUG(); + return 0; case OMAP_DSS_VIDEO1: case OMAP_DSS_VIDEO2: return 0x0038 + i * 0x8; @@ -672,6 +734,7 @@ static inline u16 DISPC_FIR_COEF_HV_OFFSET(enum omap_plane plane, u16 i) return 0x0014 + i * 0x8; default: BUG(); + return 0; } } @@ -681,6 +744,7 @@ static inline u16 DISPC_FIR_COEF_HV2_OFFSET(enum omap_plane plane, u16 i) switch (plane) { case OMAP_DSS_GFX: BUG(); + return 0; case OMAP_DSS_VIDEO1: return 0x0590 + i * 8; case OMAP_DSS_VIDEO2: @@ -689,6 +753,7 @@ static inline u16 DISPC_FIR_COEF_HV2_OFFSET(enum omap_plane plane, u16 i) return 0x0434 + i * 0x8; default: BUG(); + return 0; } } @@ -698,12 +763,14 @@ static inline u16 DISPC_CONV_COEF_OFFSET(enum omap_plane plane, u16 i) switch (plane) { case OMAP_DSS_GFX: BUG(); + return 0; case OMAP_DSS_VIDEO1: case OMAP_DSS_VIDEO2: case OMAP_DSS_VIDEO3: return 0x0074 + i * 0x4; default: BUG(); + return 0; } } @@ -713,6 +780,7 @@ static inline u16 DISPC_FIR_COEF_V_OFFSET(enum omap_plane plane, u16 i) switch (plane) { case OMAP_DSS_GFX: BUG(); + return 0; case OMAP_DSS_VIDEO1: return 0x0124 + i * 0x4; case OMAP_DSS_VIDEO2: @@ -721,6 +789,7 @@ static inline u16 DISPC_FIR_COEF_V_OFFSET(enum omap_plane plane, u16 i) return 0x0050 + i * 0x4; default: BUG(); + return 0; } } @@ -730,6 +799,7 @@ static inline u16 DISPC_FIR_COEF_V2_OFFSET(enum omap_plane plane, u16 i) switch (plane) { case OMAP_DSS_GFX: BUG(); + return 0; case OMAP_DSS_VIDEO1: return 0x05CC + i * 0x4; case OMAP_DSS_VIDEO2: @@ -738,6 +808,7 @@ static inline u16 DISPC_FIR_COEF_V2_OFFSET(enum omap_plane plane, u16 i) return 0x0470 + i * 0x4; default: BUG(); + return 0; } } @@ -754,6 +825,7 @@ static inline u16 DISPC_PRELOAD_OFFSET(enum omap_plane plane) return 0x00A0; default: BUG(); + return 0; } } #endif diff --git a/drivers/video/omap2/dss/display.c b/drivers/video/omap2/dss/display.c index 4424c198dbcd..249010630370 100644 --- a/drivers/video/omap2/dss/display.c +++ b/drivers/video/omap2/dss/display.c @@ -304,10 +304,18 @@ int omapdss_default_get_recommended_bpp(struct omap_dss_device *dssdev) return 24; default: BUG(); + return 0; } } EXPORT_SYMBOL(omapdss_default_get_recommended_bpp); +void omapdss_default_get_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings) +{ + *timings = dssdev->panel.timings; +} +EXPORT_SYMBOL(omapdss_default_get_timings); + /* Checks if replication logic should be used. Only use for active matrix, * when overlay is in RGB12U or RGB16 mode, and LCD interface is * 18bpp or 24bpp */ @@ -340,6 +348,7 @@ bool dss_use_replication(struct omap_dss_device *dssdev, break; default: BUG(); + return false; } return bpp > 16; @@ -352,46 +361,6 @@ void dss_init_device(struct platform_device *pdev, int i; int r; - switch (dssdev->type) { -#ifdef CONFIG_OMAP2_DSS_DPI - case OMAP_DISPLAY_TYPE_DPI: - r = dpi_init_display(dssdev); - break; -#endif -#ifdef CONFIG_OMAP2_DSS_RFBI - case OMAP_DISPLAY_TYPE_DBI: - r = rfbi_init_display(dssdev); - break; -#endif -#ifdef CONFIG_OMAP2_DSS_VENC - case OMAP_DISPLAY_TYPE_VENC: - r = venc_init_display(dssdev); - break; -#endif -#ifdef CONFIG_OMAP2_DSS_SDI - case OMAP_DISPLAY_TYPE_SDI: - r = sdi_init_display(dssdev); - break; -#endif -#ifdef CONFIG_OMAP2_DSS_DSI - case OMAP_DISPLAY_TYPE_DSI: - r = dsi_init_display(dssdev); - break; -#endif - case OMAP_DISPLAY_TYPE_HDMI: - r = hdmi_init_display(dssdev); - break; - default: - DSSERR("Support for display '%s' not compiled in.\n", - dssdev->name); - return; - } - - if (r) { - DSSERR("failed to init display %s\n", dssdev->name); - return; - } - /* create device sysfs files */ i = 0; while ((attr = display_sysfs_attrs[i++]) != NULL) { diff --git a/drivers/video/omap2/dss/dpi.c b/drivers/video/omap2/dss/dpi.c index faaf305fda27..8c2056c9537b 100644 --- a/drivers/video/omap2/dss/dpi.c +++ b/drivers/video/omap2/dss/dpi.c @@ -156,7 +156,7 @@ static int dpi_set_mode(struct omap_dss_device *dssdev) t->pixel_clock = pck; } - dispc_mgr_set_lcd_timings(dssdev->manager->id, t); + dss_mgr_set_timings(dssdev->manager, t); return 0; } @@ -202,10 +202,6 @@ int omapdss_dpi_display_enable(struct omap_dss_device *dssdev) goto err_reg_enable; } - r = dss_runtime_get(); - if (r) - goto err_get_dss; - r = dispc_runtime_get(); if (r) goto err_get_dispc; @@ -244,8 +240,6 @@ err_dsi_pll_init: err_get_dsi: dispc_runtime_put(); err_get_dispc: - dss_runtime_put(); -err_get_dss: if (cpu_is_omap34xx()) regulator_disable(dpi.vdds_dsi_reg); err_reg_enable: @@ -266,7 +260,6 @@ void omapdss_dpi_display_disable(struct omap_dss_device *dssdev) } dispc_runtime_put(); - dss_runtime_put(); if (cpu_is_omap34xx()) regulator_disable(dpi.vdds_dsi_reg); @@ -283,21 +276,15 @@ void dpi_set_timings(struct omap_dss_device *dssdev, DSSDBG("dpi_set_timings\n"); dssdev->panel.timings = *timings; if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) { - r = dss_runtime_get(); - if (r) - return; - r = dispc_runtime_get(); - if (r) { - dss_runtime_put(); + if (r) return; - } dpi_set_mode(dssdev); - dispc_mgr_go(dssdev->manager->id); dispc_runtime_put(); - dss_runtime_put(); + } else { + dss_mgr_set_timings(dssdev->manager, timings); } } EXPORT_SYMBOL(dpi_set_timings); @@ -312,7 +299,7 @@ int dpi_check_timings(struct omap_dss_device *dssdev, unsigned long pck; struct dispc_clock_info dispc_cinfo; - if (!dispc_lcd_timings_ok(timings)) + if (dss_mgr_check_timings(dssdev->manager, timings)) return -EINVAL; if (timings->pixel_clock == 0) @@ -352,7 +339,7 @@ int dpi_check_timings(struct omap_dss_device *dssdev, } EXPORT_SYMBOL(dpi_check_timings); -int dpi_init_display(struct omap_dss_device *dssdev) +static int __init dpi_init_display(struct omap_dss_device *dssdev) { DSSDBG("init_display\n"); @@ -378,12 +365,58 @@ int dpi_init_display(struct omap_dss_device *dssdev) return 0; } -int dpi_init(void) +static void __init dpi_probe_pdata(struct platform_device *pdev) { + struct omap_dss_board_info *pdata = pdev->dev.platform_data; + int i, r; + + for (i = 0; i < pdata->num_devices; ++i) { + struct omap_dss_device *dssdev = pdata->devices[i]; + + if (dssdev->type != OMAP_DISPLAY_TYPE_DPI) + continue; + + r = dpi_init_display(dssdev); + if (r) { + DSSERR("device %s init failed: %d\n", dssdev->name, r); + continue; + } + + r = omap_dss_register_device(dssdev, &pdev->dev, i); + if (r) + DSSERR("device %s register failed: %d\n", + dssdev->name, r); + } +} + +static int __init omap_dpi_probe(struct platform_device *pdev) +{ + dpi_probe_pdata(pdev); + + return 0; +} + +static int __exit omap_dpi_remove(struct platform_device *pdev) +{ + omap_dss_unregister_child_devices(&pdev->dev); + return 0; } -void dpi_exit(void) +static struct platform_driver omap_dpi_driver = { + .remove = __exit_p(omap_dpi_remove), + .driver = { + .name = "omapdss_dpi", + .owner = THIS_MODULE, + }, +}; + +int __init dpi_init_platform_driver(void) { + return platform_driver_probe(&omap_dpi_driver, omap_dpi_probe); } +void __exit dpi_uninit_platform_driver(void) +{ + platform_driver_unregister(&omap_dpi_driver); +} diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c index 210a3c4f6150..ec363d8390ed 100644 --- a/drivers/video/omap2/dss/dsi.c +++ b/drivers/video/omap2/dss/dsi.c @@ -256,14 +256,13 @@ struct dsi_data { struct platform_device *pdev; void __iomem *base; + int module_id; + int irq; struct clk *dss_clk; struct clk *sys_clk; - int (*enable_pads)(int dsi_id, unsigned lane_mask); - void (*disable_pads)(int dsi_id, unsigned lane_mask); - struct dsi_clock_info current_cinfo; bool vdds_dsi_enabled; @@ -361,11 +360,6 @@ struct platform_device *dsi_get_dsidev_from_id(int module) return dsi_pdev_map[module]; } -static inline int dsi_get_dsidev_id(struct platform_device *dsidev) -{ - return dsidev->id; -} - static inline void dsi_write_reg(struct platform_device *dsidev, const struct dsi_reg idx, u32 val) { @@ -452,6 +446,7 @@ u8 dsi_get_pixel_size(enum omap_dss_dsi_pixel_format fmt) return 16; default: BUG(); + return 0; } } @@ -1184,10 +1179,9 @@ static unsigned long dsi_get_txbyteclkhs(struct platform_device *dsidev) static unsigned long dsi_fclk_rate(struct platform_device *dsidev) { unsigned long r; - int dsi_module = dsi_get_dsidev_id(dsidev); struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); - if (dss_get_dsi_clk_source(dsi_module) == OMAP_DSS_CLK_SRC_FCK) { + if (dss_get_dsi_clk_source(dsi->module_id) == OMAP_DSS_CLK_SRC_FCK) { /* DSI FCLK source is DSS_CLK_FCK */ r = clk_get_rate(dsi->dss_clk); } else { @@ -1279,10 +1273,9 @@ static int dsi_pll_power(struct platform_device *dsidev, } /* calculate clock rates using dividers in cinfo */ -static int dsi_calc_clock_rates(struct omap_dss_device *dssdev, +static int dsi_calc_clock_rates(struct platform_device *dsidev, struct dsi_clock_info *cinfo) { - struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); if (cinfo->regn == 0 || cinfo->regn > dsi->regn_max) @@ -1297,21 +1290,8 @@ static int dsi_calc_clock_rates(struct omap_dss_device *dssdev, if (cinfo->regm_dsi > dsi->regm_dsi_max) return -EINVAL; - if (cinfo->use_sys_clk) { - cinfo->clkin = clk_get_rate(dsi->sys_clk); - /* XXX it is unclear if highfreq should be used - * with DSS_SYS_CLK source also */ - cinfo->highfreq = 0; - } else { - cinfo->clkin = dispc_mgr_pclk_rate(dssdev->manager->id); - - if (cinfo->clkin < 32000000) - cinfo->highfreq = 0; - else - cinfo->highfreq = 1; - } - - cinfo->fint = cinfo->clkin / (cinfo->regn * (cinfo->highfreq ? 2 : 1)); + cinfo->clkin = clk_get_rate(dsi->sys_clk); + cinfo->fint = cinfo->clkin / cinfo->regn; if (cinfo->fint > dsi->fint_max || cinfo->fint < dsi->fint_min) return -EINVAL; @@ -1378,27 +1358,21 @@ retry: memset(&cur, 0, sizeof(cur)); cur.clkin = dss_sys_clk; - cur.use_sys_clk = 1; - cur.highfreq = 0; - /* no highfreq: 0.75MHz < Fint = clkin / regn < 2.1MHz */ - /* highfreq: 0.75MHz < Fint = clkin / (2*regn) < 2.1MHz */ + /* 0.75MHz < Fint = clkin / regn < 2.1MHz */ /* To reduce PLL lock time, keep Fint high (around 2 MHz) */ for (cur.regn = 1; cur.regn < dsi->regn_max; ++cur.regn) { - if (cur.highfreq == 0) - cur.fint = cur.clkin / cur.regn; - else - cur.fint = cur.clkin / (2 * cur.regn); + cur.fint = cur.clkin / cur.regn; if (cur.fint > dsi->fint_max || cur.fint < dsi->fint_min) continue; - /* DSIPHY(MHz) = (2 * regm / regn) * (clkin / (highfreq + 1)) */ + /* DSIPHY(MHz) = (2 * regm / regn) * clkin */ for (cur.regm = 1; cur.regm < dsi->regm_max; ++cur.regm) { unsigned long a, b; a = 2 * cur.regm * (cur.clkin/1000); - b = cur.regn * (cur.highfreq + 1); + b = cur.regn; cur.clkin4ddr = a / b * 1000; if (cur.clkin4ddr > 1800 * 1000 * 1000) @@ -1486,9 +1460,7 @@ int dsi_pll_set_clock_div(struct platform_device *dsidev, DSSDBGF(); - dsi->current_cinfo.use_sys_clk = cinfo->use_sys_clk; - dsi->current_cinfo.highfreq = cinfo->highfreq; - + dsi->current_cinfo.clkin = cinfo->clkin; dsi->current_cinfo.fint = cinfo->fint; dsi->current_cinfo.clkin4ddr = cinfo->clkin4ddr; dsi->current_cinfo.dsi_pll_hsdiv_dispc_clk = @@ -1503,17 +1475,13 @@ int dsi_pll_set_clock_div(struct platform_device *dsidev, DSSDBG("DSI Fint %ld\n", cinfo->fint); - DSSDBG("clkin (%s) rate %ld, highfreq %d\n", - cinfo->use_sys_clk ? "dss_sys_clk" : "pclkfree", - cinfo->clkin, - cinfo->highfreq); + DSSDBG("clkin rate %ld\n", cinfo->clkin); /* DSIPHY == CLKIN4DDR */ - DSSDBG("CLKIN4DDR = 2 * %d / %d * %lu / %d = %lu\n", + DSSDBG("CLKIN4DDR = 2 * %d / %d * %lu = %lu\n", cinfo->regm, cinfo->regn, cinfo->clkin, - cinfo->highfreq + 1, cinfo->clkin4ddr); DSSDBG("Data rate on 1 DSI lane %ld Mbps\n", @@ -1568,10 +1536,6 @@ int dsi_pll_set_clock_div(struct platform_device *dsidev, if (dss_has_feature(FEAT_DSI_PLL_FREQSEL)) l = FLD_MOD(l, f, 4, 1); /* DSI_PLL_FREQSEL */ - l = FLD_MOD(l, cinfo->use_sys_clk ? 0 : 1, - 11, 11); /* DSI_PLL_CLKSEL */ - l = FLD_MOD(l, cinfo->highfreq, - 12, 12); /* DSI_PLL_HIGHFREQ */ l = FLD_MOD(l, 1, 13, 13); /* DSI_PLL_REFEN */ l = FLD_MOD(l, 0, 14, 14); /* DSIPHY_CLKINEN */ l = FLD_MOD(l, 1, 20, 20); /* DSI_HSDIVBYPASS */ @@ -1716,7 +1680,7 @@ static void dsi_dump_dsidev_clocks(struct platform_device *dsidev, struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); struct dsi_clock_info *cinfo = &dsi->current_cinfo; enum omap_dss_clk_source dispc_clk_src, dsi_clk_src; - int dsi_module = dsi_get_dsidev_id(dsidev); + int dsi_module = dsi->module_id; dispc_clk_src = dss_get_dispc_clk_source(); dsi_clk_src = dss_get_dsi_clk_source(dsi_module); @@ -1726,8 +1690,7 @@ static void dsi_dump_dsidev_clocks(struct platform_device *dsidev, seq_printf(s, "- DSI%d PLL -\n", dsi_module + 1); - seq_printf(s, "dsi pll source = %s\n", - cinfo->use_sys_clk ? "dss_sys_clk" : "pclkfree"); + seq_printf(s, "dsi pll clkin\t%lu\n", cinfo->clkin); seq_printf(s, "Fint\t\t%-16luregn %u\n", cinfo->fint, cinfo->regn); @@ -1789,7 +1752,6 @@ static void dsi_dump_dsidev_irqs(struct platform_device *dsidev, struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); unsigned long flags; struct dsi_irq_stats stats; - int dsi_module = dsi_get_dsidev_id(dsidev); spin_lock_irqsave(&dsi->irq_stats_lock, flags); @@ -1806,7 +1768,7 @@ static void dsi_dump_dsidev_irqs(struct platform_device *dsidev, #define PIS(x) \ seq_printf(s, "%-20s %10d\n", #x, stats.dsi_irqs[ffs(DSI_IRQ_##x)-1]); - seq_printf(s, "-- DSI%d interrupts --\n", dsi_module + 1); + seq_printf(s, "-- DSI%d interrupts --\n", dsi->module_id + 1); PIS(VC0); PIS(VC1); PIS(VC2); @@ -1886,22 +1848,6 @@ static void dsi2_dump_irqs(struct seq_file *s) dsi_dump_dsidev_irqs(dsidev, s); } - -void dsi_create_debugfs_files_irq(struct dentry *debugfs_dir, - const struct file_operations *debug_fops) -{ - struct platform_device *dsidev; - - dsidev = dsi_get_dsidev_from_id(0); - if (dsidev) - debugfs_create_file("dsi1_irqs", S_IRUGO, debugfs_dir, - &dsi1_dump_irqs, debug_fops); - - dsidev = dsi_get_dsidev_from_id(1); - if (dsidev) - debugfs_create_file("dsi2_irqs", S_IRUGO, debugfs_dir, - &dsi2_dump_irqs, debug_fops); -} #endif static void dsi_dump_dsidev_regs(struct platform_device *dsidev, @@ -2002,21 +1948,6 @@ static void dsi2_dump_regs(struct seq_file *s) dsi_dump_dsidev_regs(dsidev, s); } -void dsi_create_debugfs_files_reg(struct dentry *debugfs_dir, - const struct file_operations *debug_fops) -{ - struct platform_device *dsidev; - - dsidev = dsi_get_dsidev_from_id(0); - if (dsidev) - debugfs_create_file("dsi1_regs", S_IRUGO, debugfs_dir, - &dsi1_dump_regs, debug_fops); - - dsidev = dsi_get_dsidev_from_id(1); - if (dsidev) - debugfs_create_file("dsi2_regs", S_IRUGO, debugfs_dir, - &dsi2_dump_regs, debug_fops); -} enum dsi_cio_power_state { DSI_COMPLEXIO_POWER_OFF = 0x0, DSI_COMPLEXIO_POWER_ON = 0x1, @@ -2073,6 +2004,7 @@ static unsigned dsi_get_line_buf_size(struct platform_device *dsidev) return 1365 * 3; /* 1365x24 bits */ default: BUG(); + return 0; } } @@ -2337,7 +2269,7 @@ static int dsi_cio_init(struct omap_dss_device *dssdev) DSSDBGF(); - r = dsi->enable_pads(dsidev->id, dsi_get_lane_mask(dssdev)); + r = dss_dsi_enable_pads(dsi->module_id, dsi_get_lane_mask(dssdev)); if (r) return r; @@ -2447,7 +2379,7 @@ err_cio_pwr: dsi_cio_disable_lane_override(dsidev); err_scp_clk_dom: dsi_disable_scp_clk(dsidev); - dsi->disable_pads(dsidev->id, dsi_get_lane_mask(dssdev)); + dss_dsi_disable_pads(dsi->module_id, dsi_get_lane_mask(dssdev)); return r; } @@ -2461,7 +2393,7 @@ static void dsi_cio_uninit(struct omap_dss_device *dssdev) dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_OFF); dsi_disable_scp_clk(dsidev); - dsi->disable_pads(dsidev->id, dsi_get_lane_mask(dssdev)); + dss_dsi_disable_pads(dsi->module_id, dsi_get_lane_mask(dssdev)); } static void dsi_config_tx_fifo(struct platform_device *dsidev, @@ -2485,6 +2417,7 @@ static void dsi_config_tx_fifo(struct platform_device *dsidev, if (add + size > 4) { DSSERR("Illegal FIFO configuration\n"); BUG(); + return; } v = FLD_VAL(add, 2, 0) | FLD_VAL(size, 7, 4); @@ -2517,6 +2450,7 @@ static void dsi_config_rx_fifo(struct platform_device *dsidev, if (add + size > 4) { DSSERR("Illegal FIFO configuration\n"); BUG(); + return; } v = FLD_VAL(add, 2, 0) | FLD_VAL(size, 7, 4); @@ -2658,6 +2592,7 @@ static int dsi_sync_vc(struct platform_device *dsidev, int channel) return dsi_sync_vc_l4(dsidev, channel); default: BUG(); + return -EINVAL; } } @@ -3226,6 +3161,7 @@ static int dsi_vc_generic_send_read_request(struct omap_dss_device *dssdev, data = reqdata[0] | (reqdata[1] << 8); } else { BUG(); + return -EINVAL; } r = dsi_vc_send_short(dsidev, channel, data_type, data, 0); @@ -3340,7 +3276,6 @@ static int dsi_vc_read_rx_fifo(struct platform_device *dsidev, int channel, goto err; } - BUG(); err: DSSERR("dsi_vc_read_rx_fifo(ch %d type %s) failed\n", channel, type == DSS_DSI_CONTENT_GENERIC ? "GENERIC" : "DCS"); @@ -3735,6 +3670,186 @@ static void dsi_config_blanking_modes(struct omap_dss_device *dssdev) dsi_write_reg(dsidev, DSI_CTRL, r); } +/* + * According to section 'HS Command Mode Interleaving' in OMAP TRM, Scenario 3 + * results in maximum transition time for data and clock lanes to enter and + * exit HS mode. Hence, this is the scenario where the least amount of command + * mode data can be interleaved. We program the minimum amount of TXBYTECLKHS + * clock cycles that can be used to interleave command mode data in HS so that + * all scenarios are satisfied. + */ +static int dsi_compute_interleave_hs(int blank, bool ddr_alwon, int enter_hs, + int exit_hs, int exiths_clk, int ddr_pre, int ddr_post) +{ + int transition; + + /* + * If DDR_CLK_ALWAYS_ON is set, we need to consider HS mode transition + * time of data lanes only, if it isn't set, we need to consider HS + * transition time of both data and clock lanes. HS transition time + * of Scenario 3 is considered. + */ + if (ddr_alwon) { + transition = enter_hs + exit_hs + max(enter_hs, 2) + 1; + } else { + int trans1, trans2; + trans1 = ddr_pre + enter_hs + exit_hs + max(enter_hs, 2) + 1; + trans2 = ddr_pre + enter_hs + exiths_clk + ddr_post + ddr_pre + + enter_hs + 1; + transition = max(trans1, trans2); + } + + return blank > transition ? blank - transition : 0; +} + +/* + * According to section 'LP Command Mode Interleaving' in OMAP TRM, Scenario 1 + * results in maximum transition time for data lanes to enter and exit LP mode. + * Hence, this is the scenario where the least amount of command mode data can + * be interleaved. We program the minimum amount of bytes that can be + * interleaved in LP so that all scenarios are satisfied. + */ +static int dsi_compute_interleave_lp(int blank, int enter_hs, int exit_hs, + int lp_clk_div, int tdsi_fclk) +{ + int trans_lp; /* time required for a LP transition, in TXBYTECLKHS */ + int tlp_avail; /* time left for interleaving commands, in CLKIN4DDR */ + int ttxclkesc; /* period of LP transmit escape clock, in CLKIN4DDR */ + int thsbyte_clk = 16; /* Period of TXBYTECLKHS clock, in CLKIN4DDR */ + int lp_inter; /* cmd mode data that can be interleaved, in bytes */ + + /* maximum LP transition time according to Scenario 1 */ + trans_lp = exit_hs + max(enter_hs, 2) + 1; + + /* CLKIN4DDR = 16 * TXBYTECLKHS */ + tlp_avail = thsbyte_clk * (blank - trans_lp); + + ttxclkesc = tdsi_fclk / lp_clk_div; + + lp_inter = ((tlp_avail - 8 * thsbyte_clk - 5 * tdsi_fclk) / ttxclkesc - + 26) / 16; + + return max(lp_inter, 0); +} + +static void dsi_config_cmd_mode_interleaving(struct omap_dss_device *dssdev) +{ + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + int blanking_mode; + int hfp_blanking_mode, hbp_blanking_mode, hsa_blanking_mode; + int hsa, hfp, hbp, width_bytes, bllp, lp_clk_div; + int ddr_clk_pre, ddr_clk_post, enter_hs_mode_lat, exit_hs_mode_lat; + int tclk_trail, ths_exit, exiths_clk; + bool ddr_alwon; + struct omap_video_timings *timings = &dssdev->panel.timings; + int bpp = dsi_get_pixel_size(dssdev->panel.dsi_pix_fmt); + int ndl = dsi->num_lanes_used - 1; + int dsi_fclk_hsdiv = dssdev->clocks.dsi.regm_dsi + 1; + int hsa_interleave_hs = 0, hsa_interleave_lp = 0; + int hfp_interleave_hs = 0, hfp_interleave_lp = 0; + int hbp_interleave_hs = 0, hbp_interleave_lp = 0; + int bl_interleave_hs = 0, bl_interleave_lp = 0; + u32 r; + + r = dsi_read_reg(dsidev, DSI_CTRL); + blanking_mode = FLD_GET(r, 20, 20); + hfp_blanking_mode = FLD_GET(r, 21, 21); + hbp_blanking_mode = FLD_GET(r, 22, 22); + hsa_blanking_mode = FLD_GET(r, 23, 23); + + r = dsi_read_reg(dsidev, DSI_VM_TIMING1); + hbp = FLD_GET(r, 11, 0); + hfp = FLD_GET(r, 23, 12); + hsa = FLD_GET(r, 31, 24); + + r = dsi_read_reg(dsidev, DSI_CLK_TIMING); + ddr_clk_post = FLD_GET(r, 7, 0); + ddr_clk_pre = FLD_GET(r, 15, 8); + + r = dsi_read_reg(dsidev, DSI_VM_TIMING7); + exit_hs_mode_lat = FLD_GET(r, 15, 0); + enter_hs_mode_lat = FLD_GET(r, 31, 16); + + r = dsi_read_reg(dsidev, DSI_CLK_CTRL); + lp_clk_div = FLD_GET(r, 12, 0); + ddr_alwon = FLD_GET(r, 13, 13); + + r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG0); + ths_exit = FLD_GET(r, 7, 0); + + r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG1); + tclk_trail = FLD_GET(r, 15, 8); + + exiths_clk = ths_exit + tclk_trail; + + width_bytes = DIV_ROUND_UP(timings->x_res * bpp, 8); + bllp = hbp + hfp + hsa + DIV_ROUND_UP(width_bytes + 6, ndl); + + if (!hsa_blanking_mode) { + hsa_interleave_hs = dsi_compute_interleave_hs(hsa, ddr_alwon, + enter_hs_mode_lat, exit_hs_mode_lat, + exiths_clk, ddr_clk_pre, ddr_clk_post); + hsa_interleave_lp = dsi_compute_interleave_lp(hsa, + enter_hs_mode_lat, exit_hs_mode_lat, + lp_clk_div, dsi_fclk_hsdiv); + } + + if (!hfp_blanking_mode) { + hfp_interleave_hs = dsi_compute_interleave_hs(hfp, ddr_alwon, + enter_hs_mode_lat, exit_hs_mode_lat, + exiths_clk, ddr_clk_pre, ddr_clk_post); + hfp_interleave_lp = dsi_compute_interleave_lp(hfp, + enter_hs_mode_lat, exit_hs_mode_lat, + lp_clk_div, dsi_fclk_hsdiv); + } + + if (!hbp_blanking_mode) { + hbp_interleave_hs = dsi_compute_interleave_hs(hbp, ddr_alwon, + enter_hs_mode_lat, exit_hs_mode_lat, + exiths_clk, ddr_clk_pre, ddr_clk_post); + + hbp_interleave_lp = dsi_compute_interleave_lp(hbp, + enter_hs_mode_lat, exit_hs_mode_lat, + lp_clk_div, dsi_fclk_hsdiv); + } + + if (!blanking_mode) { + bl_interleave_hs = dsi_compute_interleave_hs(bllp, ddr_alwon, + enter_hs_mode_lat, exit_hs_mode_lat, + exiths_clk, ddr_clk_pre, ddr_clk_post); + + bl_interleave_lp = dsi_compute_interleave_lp(bllp, + enter_hs_mode_lat, exit_hs_mode_lat, + lp_clk_div, dsi_fclk_hsdiv); + } + + DSSDBG("DSI HS interleaving(TXBYTECLKHS) HSA %d, HFP %d, HBP %d, BLLP %d\n", + hsa_interleave_hs, hfp_interleave_hs, hbp_interleave_hs, + bl_interleave_hs); + + DSSDBG("DSI LP interleaving(bytes) HSA %d, HFP %d, HBP %d, BLLP %d\n", + hsa_interleave_lp, hfp_interleave_lp, hbp_interleave_lp, + bl_interleave_lp); + + r = dsi_read_reg(dsidev, DSI_VM_TIMING4); + r = FLD_MOD(r, hsa_interleave_hs, 23, 16); + r = FLD_MOD(r, hfp_interleave_hs, 15, 8); + r = FLD_MOD(r, hbp_interleave_hs, 7, 0); + dsi_write_reg(dsidev, DSI_VM_TIMING4, r); + + r = dsi_read_reg(dsidev, DSI_VM_TIMING5); + r = FLD_MOD(r, hsa_interleave_lp, 23, 16); + r = FLD_MOD(r, hfp_interleave_lp, 15, 8); + r = FLD_MOD(r, hbp_interleave_lp, 7, 0); + dsi_write_reg(dsidev, DSI_VM_TIMING5, r); + + r = dsi_read_reg(dsidev, DSI_VM_TIMING6); + r = FLD_MOD(r, bl_interleave_hs, 31, 15); + r = FLD_MOD(r, bl_interleave_lp, 16, 0); + dsi_write_reg(dsidev, DSI_VM_TIMING6, r); +} + static int dsi_proto_config(struct omap_dss_device *dssdev) { struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); @@ -3769,6 +3884,7 @@ static int dsi_proto_config(struct omap_dss_device *dssdev) break; default: BUG(); + return -EINVAL; } r = dsi_read_reg(dsidev, DSI_CTRL); @@ -3793,6 +3909,7 @@ static int dsi_proto_config(struct omap_dss_device *dssdev) if (dssdev->panel.dsi_mode == OMAP_DSS_DSI_VIDEO_MODE) { dsi_config_vp_sync_events(dssdev); dsi_config_blanking_modes(dssdev); + dsi_config_cmd_mode_interleaving(dssdev); } dsi_vc_initial_config(dsidev, 0); @@ -4008,6 +4125,7 @@ int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel) break; default: BUG(); + return -EINVAL; }; dsi_if_enable(dsidev, false); @@ -4192,10 +4310,6 @@ static void dsi_framedone_irq_callback(void *data, u32 mask) __cancel_delayed_work(&dsi->framedone_timeout_work); dsi_handle_framedone(dsidev, 0); - -#ifdef CONFIG_OMAP2_DSS_FAKE_VSYNC - dispc_fake_vsync_irq(); -#endif } int omap_dsi_update(struct omap_dss_device *dssdev, int channel, @@ -4259,13 +4373,12 @@ static int dsi_display_init_dispc(struct omap_dss_device *dssdev) dispc_mgr_enable_stallmode(dssdev->manager->id, true); dispc_mgr_enable_fifohandcheck(dssdev->manager->id, 1); - dispc_mgr_set_lcd_timings(dssdev->manager->id, &timings); + dss_mgr_set_timings(dssdev->manager, &timings); } else { dispc_mgr_enable_stallmode(dssdev->manager->id, false); dispc_mgr_enable_fifohandcheck(dssdev->manager->id, 0); - dispc_mgr_set_lcd_timings(dssdev->manager->id, - &dssdev->panel.timings); + dss_mgr_set_timings(dssdev->manager, &dssdev->panel.timings); } dispc_mgr_set_lcd_display_type(dssdev->manager->id, @@ -4294,13 +4407,11 @@ static int dsi_configure_dsi_clocks(struct omap_dss_device *dssdev) struct dsi_clock_info cinfo; int r; - /* we always use DSS_CLK_SYSCK as input clock */ - cinfo.use_sys_clk = true; cinfo.regn = dssdev->clocks.dsi.regn; cinfo.regm = dssdev->clocks.dsi.regm; cinfo.regm_dispc = dssdev->clocks.dsi.regm_dispc; cinfo.regm_dsi = dssdev->clocks.dsi.regm_dsi; - r = dsi_calc_clock_rates(dssdev, &cinfo); + r = dsi_calc_clock_rates(dsidev, &cinfo); if (r) { DSSERR("Failed to calc dsi clocks\n"); return r; @@ -4345,7 +4456,7 @@ static int dsi_configure_dispc_clocks(struct omap_dss_device *dssdev) static int dsi_display_init_dsi(struct omap_dss_device *dssdev) { struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); - int dsi_module = dsi_get_dsidev_id(dsidev); + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); int r; r = dsi_pll_init(dsidev, true, true); @@ -4357,7 +4468,7 @@ static int dsi_display_init_dsi(struct omap_dss_device *dssdev) goto err1; dss_select_dispc_clk_source(dssdev->clocks.dispc.dispc_fclk_src); - dss_select_dsi_clk_source(dsi_module, dssdev->clocks.dsi.dsi_fclk_src); + dss_select_dsi_clk_source(dsi->module_id, dssdev->clocks.dsi.dsi_fclk_src); dss_select_lcd_clk_source(dssdev->manager->id, dssdev->clocks.dispc.channel.lcd_clk_src); @@ -4396,7 +4507,7 @@ err3: dsi_cio_uninit(dssdev); err2: dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK); - dss_select_dsi_clk_source(dsi_module, OMAP_DSS_CLK_SRC_FCK); + dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK); dss_select_lcd_clk_source(dssdev->manager->id, OMAP_DSS_CLK_SRC_FCK); err1: @@ -4410,7 +4521,6 @@ static void dsi_display_uninit_dsi(struct omap_dss_device *dssdev, { struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); - int dsi_module = dsi_get_dsidev_id(dsidev); if (enter_ulps && !dsi->ulps_enabled) dsi_enter_ulps(dsidev); @@ -4423,7 +4533,7 @@ static void dsi_display_uninit_dsi(struct omap_dss_device *dssdev, dsi_vc_enable(dsidev, 3, 0); dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK); - dss_select_dsi_clk_source(dsi_module, OMAP_DSS_CLK_SRC_FCK); + dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK); dss_select_lcd_clk_source(dssdev->manager->id, OMAP_DSS_CLK_SRC_FCK); dsi_cio_uninit(dssdev); dsi_pll_uninit(dsidev, disconnect_lanes); @@ -4527,7 +4637,7 @@ int omapdss_dsi_enable_te(struct omap_dss_device *dssdev, bool enable) } EXPORT_SYMBOL(omapdss_dsi_enable_te); -int dsi_init_display(struct omap_dss_device *dssdev) +static int __init dsi_init_display(struct omap_dss_device *dssdev) { struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); @@ -4680,13 +4790,39 @@ static void dsi_put_clocks(struct platform_device *dsidev) clk_put(dsi->sys_clk); } +static void __init dsi_probe_pdata(struct platform_device *dsidev) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + struct omap_dss_board_info *pdata = dsidev->dev.platform_data; + int i, r; + + for (i = 0; i < pdata->num_devices; ++i) { + struct omap_dss_device *dssdev = pdata->devices[i]; + + if (dssdev->type != OMAP_DISPLAY_TYPE_DSI) + continue; + + if (dssdev->phy.dsi.module != dsi->module_id) + continue; + + r = dsi_init_display(dssdev); + if (r) { + DSSERR("device %s init failed: %d\n", dssdev->name, r); + continue; + } + + r = omap_dss_register_device(dssdev, &dsidev->dev, i); + if (r) + DSSERR("device %s register failed: %d\n", + dssdev->name, r); + } +} + /* DSI1 HW IP initialisation */ -static int omap_dsihw_probe(struct platform_device *dsidev) +static int __init omap_dsihw_probe(struct platform_device *dsidev) { - struct omap_display_platform_data *dss_plat_data; - struct omap_dss_board_info *board_info; u32 rev; - int r, i, dsi_module = dsi_get_dsidev_id(dsidev); + int r, i; struct resource *dsi_mem; struct dsi_data *dsi; @@ -4694,15 +4830,11 @@ static int omap_dsihw_probe(struct platform_device *dsidev) if (!dsi) return -ENOMEM; + dsi->module_id = dsidev->id; dsi->pdev = dsidev; - dsi_pdev_map[dsi_module] = dsidev; + dsi_pdev_map[dsi->module_id] = dsidev; dev_set_drvdata(&dsidev->dev, dsi); - dss_plat_data = dsidev->dev.platform_data; - board_info = dss_plat_data->board_data; - dsi->enable_pads = board_info->dsi_enable_pads; - dsi->disable_pads = board_info->dsi_disable_pads; - spin_lock_init(&dsi->irq_lock); spin_lock_init(&dsi->errors_lock); dsi->errors = 0; @@ -4780,8 +4912,21 @@ static int omap_dsihw_probe(struct platform_device *dsidev) else dsi->num_lanes_supported = 3; + dsi_probe_pdata(dsidev); + dsi_runtime_put(dsidev); + if (dsi->module_id == 0) + dss_debugfs_create_file("dsi1_regs", dsi1_dump_regs); + else if (dsi->module_id == 1) + dss_debugfs_create_file("dsi2_regs", dsi2_dump_regs); + +#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS + if (dsi->module_id == 0) + dss_debugfs_create_file("dsi1_irqs", dsi1_dump_irqs); + else if (dsi->module_id == 1) + dss_debugfs_create_file("dsi2_irqs", dsi2_dump_irqs); +#endif return 0; err_runtime_get: @@ -4790,12 +4935,14 @@ err_runtime_get: return r; } -static int omap_dsihw_remove(struct platform_device *dsidev) +static int __exit omap_dsihw_remove(struct platform_device *dsidev) { struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); WARN_ON(dsi->scp_clk_refcount > 0); + omap_dss_unregister_child_devices(&dsidev->dev); + pm_runtime_disable(&dsidev->dev); dsi_put_clocks(dsidev); @@ -4816,7 +4963,6 @@ static int omap_dsihw_remove(struct platform_device *dsidev) static int dsi_runtime_suspend(struct device *dev) { dispc_runtime_put(); - dss_runtime_put(); return 0; } @@ -4825,20 +4971,11 @@ static int dsi_runtime_resume(struct device *dev) { int r; - r = dss_runtime_get(); - if (r) - goto err_get_dss; - r = dispc_runtime_get(); if (r) - goto err_get_dispc; + return r; return 0; - -err_get_dispc: - dss_runtime_put(); -err_get_dss: - return r; } static const struct dev_pm_ops dsi_pm_ops = { @@ -4847,8 +4984,7 @@ static const struct dev_pm_ops dsi_pm_ops = { }; static struct platform_driver omap_dsihw_driver = { - .probe = omap_dsihw_probe, - .remove = omap_dsihw_remove, + .remove = __exit_p(omap_dsihw_remove), .driver = { .name = "omapdss_dsi", .owner = THIS_MODULE, @@ -4856,12 +4992,12 @@ static struct platform_driver omap_dsihw_driver = { }, }; -int dsi_init_platform_driver(void) +int __init dsi_init_platform_driver(void) { - return platform_driver_register(&omap_dsihw_driver); + return platform_driver_probe(&omap_dsihw_driver, omap_dsihw_probe); } -void dsi_uninit_platform_driver(void) +void __exit dsi_uninit_platform_driver(void) { - return platform_driver_unregister(&omap_dsihw_driver); + platform_driver_unregister(&omap_dsihw_driver); } diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c index bd2d5e159463..6ea1ff149f6f 100644 --- a/drivers/video/omap2/dss/dss.c +++ b/drivers/video/omap2/dss/dss.c @@ -62,6 +62,9 @@ struct dss_reg { #define REG_FLD_MOD(idx, val, start, end) \ dss_write_reg(idx, FLD_MOD(dss_read_reg(idx), val, start, end)) +static int dss_runtime_get(void); +static void dss_runtime_put(void); + static struct { struct platform_device *pdev; void __iomem *base; @@ -277,7 +280,7 @@ void dss_dump_clocks(struct seq_file *s) dss_runtime_put(); } -void dss_dump_regs(struct seq_file *s) +static void dss_dump_regs(struct seq_file *s) { #define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dss_read_reg(r)) @@ -322,6 +325,7 @@ void dss_select_dispc_clk_source(enum omap_dss_clk_source clk_src) break; default: BUG(); + return; } dss_feat_get_reg_field(FEAT_REG_DISPC_CLK_SWITCH, &start, &end); @@ -335,7 +339,7 @@ void dss_select_dsi_clk_source(int dsi_module, enum omap_dss_clk_source clk_src) { struct platform_device *dsidev; - int b; + int b, pos; switch (clk_src) { case OMAP_DSS_CLK_SRC_FCK: @@ -355,9 +359,11 @@ void dss_select_dsi_clk_source(int dsi_module, break; default: BUG(); + return; } - REG_FLD_MOD(DSS_CONTROL, b, 1, 1); /* DSI_CLK_SWITCH */ + pos = dsi_module == 0 ? 1 : 10; + REG_FLD_MOD(DSS_CONTROL, b, pos, pos); /* DSIx_CLK_SWITCH */ dss.dsi_clk_source[dsi_module] = clk_src; } @@ -389,6 +395,7 @@ void dss_select_lcd_clk_source(enum omap_channel channel, break; default: BUG(); + return; } pos = channel == OMAP_DSS_CHANNEL_LCD ? 0 : 12; @@ -706,7 +713,7 @@ static void dss_put_clocks(void) clk_put(dss.dss_clk); } -int dss_runtime_get(void) +static int dss_runtime_get(void) { int r; @@ -717,7 +724,7 @@ int dss_runtime_get(void) return r < 0 ? r : 0; } -void dss_runtime_put(void) +static void dss_runtime_put(void) { int r; @@ -740,7 +747,7 @@ void dss_debug_dump_clocks(struct seq_file *s) #endif /* DSS HW IP initialisation */ -static int omap_dsshw_probe(struct platform_device *pdev) +static int __init omap_dsshw_probe(struct platform_device *pdev) { struct resource *dss_mem; u32 rev; @@ -785,40 +792,24 @@ static int omap_dsshw_probe(struct platform_device *pdev) dss.lcd_clk_source[0] = OMAP_DSS_CLK_SRC_FCK; dss.lcd_clk_source[1] = OMAP_DSS_CLK_SRC_FCK; - r = dpi_init(); - if (r) { - DSSERR("Failed to initialize DPI\n"); - goto err_dpi; - } - - r = sdi_init(); - if (r) { - DSSERR("Failed to initialize SDI\n"); - goto err_sdi; - } - rev = dss_read_reg(DSS_REVISION); printk(KERN_INFO "OMAP DSS rev %d.%d\n", FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0)); dss_runtime_put(); + dss_debugfs_create_file("dss", dss_dump_regs); + return 0; -err_sdi: - dpi_exit(); -err_dpi: - dss_runtime_put(); + err_runtime_get: pm_runtime_disable(&pdev->dev); dss_put_clocks(); return r; } -static int omap_dsshw_remove(struct platform_device *pdev) +static int __exit omap_dsshw_remove(struct platform_device *pdev) { - dpi_exit(); - sdi_exit(); - pm_runtime_disable(&pdev->dev); dss_put_clocks(); @@ -829,11 +820,24 @@ static int omap_dsshw_remove(struct platform_device *pdev) static int dss_runtime_suspend(struct device *dev) { dss_save_context(); + dss_set_min_bus_tput(dev, 0); return 0; } static int dss_runtime_resume(struct device *dev) { + int r; + /* + * Set an arbitrarily high tput request to ensure OPP100. + * What we should really do is to make a request to stay in OPP100, + * without any tput requirements, but that is not currently possible + * via the PM layer. + */ + + r = dss_set_min_bus_tput(dev, 1000000000); + if (r) + return r; + dss_restore_context(); return 0; } @@ -844,8 +848,7 @@ static const struct dev_pm_ops dss_pm_ops = { }; static struct platform_driver omap_dsshw_driver = { - .probe = omap_dsshw_probe, - .remove = omap_dsshw_remove, + .remove = __exit_p(omap_dsshw_remove), .driver = { .name = "omapdss_dss", .owner = THIS_MODULE, @@ -853,12 +856,12 @@ static struct platform_driver omap_dsshw_driver = { }, }; -int dss_init_platform_driver(void) +int __init dss_init_platform_driver(void) { - return platform_driver_register(&omap_dsshw_driver); + return platform_driver_probe(&omap_dsshw_driver, omap_dsshw_probe); } void dss_uninit_platform_driver(void) { - return platform_driver_unregister(&omap_dsshw_driver); + platform_driver_unregister(&omap_dsshw_driver); } diff --git a/drivers/video/omap2/dss/dss.h b/drivers/video/omap2/dss/dss.h index d4b3dff2ead3..dd1092ceaeef 100644 --- a/drivers/video/omap2/dss/dss.h +++ b/drivers/video/omap2/dss/dss.h @@ -150,9 +150,6 @@ struct dsi_clock_info { u16 regm_dsi; /* OMAP3: REGM4 * OMAP4: REGM5 */ u16 lp_clk_div; - - u8 highfreq; - bool use_sys_clk; }; struct seq_file; @@ -162,6 +159,16 @@ struct platform_device; struct bus_type *dss_get_bus(void); struct regulator *dss_get_vdds_dsi(void); struct regulator *dss_get_vdds_sdi(void); +int dss_get_ctx_loss_count(struct device *dev); +int dss_dsi_enable_pads(int dsi_id, unsigned lane_mask); +void dss_dsi_disable_pads(int dsi_id, unsigned lane_mask); +int dss_set_min_bus_tput(struct device *dev, unsigned long tput); +int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *)); + +int omap_dss_register_device(struct omap_dss_device *dssdev, + struct device *parent, int disp_num); +void omap_dss_unregister_device(struct omap_dss_device *dssdev); +void omap_dss_unregister_child_devices(struct device *parent); /* apply */ void dss_apply_init(void); @@ -179,6 +186,9 @@ void dss_mgr_get_info(struct omap_overlay_manager *mgr, int dss_mgr_set_device(struct omap_overlay_manager *mgr, struct omap_dss_device *dssdev); int dss_mgr_unset_device(struct omap_overlay_manager *mgr); +void dss_mgr_set_timings(struct omap_overlay_manager *mgr, + struct omap_video_timings *timings); +const struct omap_video_timings *dss_mgr_get_timings(struct omap_overlay_manager *mgr); bool dss_ovl_is_enabled(struct omap_overlay *ovl); int dss_ovl_enable(struct omap_overlay *ovl); @@ -208,9 +218,11 @@ int dss_init_overlay_managers(struct platform_device *pdev); void dss_uninit_overlay_managers(struct platform_device *pdev); int dss_mgr_simple_check(struct omap_overlay_manager *mgr, const struct omap_overlay_manager_info *info); +int dss_mgr_check_timings(struct omap_overlay_manager *mgr, + const struct omap_video_timings *timings); int dss_mgr_check(struct omap_overlay_manager *mgr, - struct omap_dss_device *dssdev, struct omap_overlay_manager_info *info, + const struct omap_video_timings *mgr_timings, struct omap_overlay_info **overlay_infos); /* overlay */ @@ -220,22 +232,18 @@ void dss_overlay_setup_dispc_manager(struct omap_overlay_manager *mgr); void dss_recheck_connections(struct omap_dss_device *dssdev, bool force); int dss_ovl_simple_check(struct omap_overlay *ovl, const struct omap_overlay_info *info); -int dss_ovl_check(struct omap_overlay *ovl, - struct omap_overlay_info *info, struct omap_dss_device *dssdev); +int dss_ovl_check(struct omap_overlay *ovl, struct omap_overlay_info *info, + const struct omap_video_timings *mgr_timings); /* DSS */ -int dss_init_platform_driver(void); +int dss_init_platform_driver(void) __init; void dss_uninit_platform_driver(void); -int dss_runtime_get(void); -void dss_runtime_put(void); - void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select); enum dss_hdmi_venc_clk_source_select dss_get_hdmi_venc_clk_source(void); const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src); void dss_dump_clocks(struct seq_file *s); -void dss_dump_regs(struct seq_file *s); #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT) void dss_debug_dump_clocks(struct seq_file *s); #endif @@ -265,19 +273,8 @@ int dss_calc_clock_div(bool is_tft, unsigned long req_pck, struct dispc_clock_info *dispc_cinfo); /* SDI */ -#ifdef CONFIG_OMAP2_DSS_SDI -int sdi_init(void); -void sdi_exit(void); -int sdi_init_display(struct omap_dss_device *display); -#else -static inline int sdi_init(void) -{ - return 0; -} -static inline void sdi_exit(void) -{ -} -#endif +int sdi_init_platform_driver(void) __init; +void sdi_uninit_platform_driver(void) __exit; /* DSI */ #ifdef CONFIG_OMAP2_DSS_DSI @@ -285,19 +282,14 @@ static inline void sdi_exit(void) struct dentry; struct file_operations; -int dsi_init_platform_driver(void); -void dsi_uninit_platform_driver(void); +int dsi_init_platform_driver(void) __init; +void dsi_uninit_platform_driver(void) __exit; int dsi_runtime_get(struct platform_device *dsidev); void dsi_runtime_put(struct platform_device *dsidev); void dsi_dump_clocks(struct seq_file *s); -void dsi_create_debugfs_files_irq(struct dentry *debugfs_dir, - const struct file_operations *debug_fops); -void dsi_create_debugfs_files_reg(struct dentry *debugfs_dir, - const struct file_operations *debug_fops); -int dsi_init_display(struct omap_dss_device *display); void dsi_irq_handler(void); u8 dsi_get_pixel_size(enum omap_dss_dsi_pixel_format fmt); @@ -314,13 +306,6 @@ void dsi_wait_pll_hsdiv_dispc_active(struct platform_device *dsidev); void dsi_wait_pll_hsdiv_dsi_active(struct platform_device *dsidev); struct platform_device *dsi_get_dsidev_from_id(int module); #else -static inline int dsi_init_platform_driver(void) -{ - return 0; -} -static inline void dsi_uninit_platform_driver(void) -{ -} static inline int dsi_runtime_get(struct platform_device *dsidev) { return 0; @@ -377,28 +362,14 @@ static inline struct platform_device *dsi_get_dsidev_from_id(int module) #endif /* DPI */ -#ifdef CONFIG_OMAP2_DSS_DPI -int dpi_init(void); -void dpi_exit(void); -int dpi_init_display(struct omap_dss_device *dssdev); -#else -static inline int dpi_init(void) -{ - return 0; -} -static inline void dpi_exit(void) -{ -} -#endif +int dpi_init_platform_driver(void) __init; +void dpi_uninit_platform_driver(void) __exit; /* DISPC */ -int dispc_init_platform_driver(void); -void dispc_uninit_platform_driver(void); +int dispc_init_platform_driver(void) __init; +void dispc_uninit_platform_driver(void) __exit; void dispc_dump_clocks(struct seq_file *s); -void dispc_dump_irqs(struct seq_file *s); -void dispc_dump_regs(struct seq_file *s); void dispc_irq_handler(void); -void dispc_fake_vsync_irq(void); int dispc_runtime_get(void); void dispc_runtime_put(void); @@ -409,12 +380,12 @@ void dispc_disable_sidle(void); void dispc_lcd_enable_signal_polarity(bool act_high); void dispc_lcd_enable_signal(bool enable); void dispc_pck_free_enable(bool enable); -void dispc_set_digit_size(u16 width, u16 height); void dispc_enable_fifomerge(bool enable); void dispc_enable_gamma_table(bool enable); void dispc_set_loadmode(enum omap_dss_load_mode mode); -bool dispc_lcd_timings_ok(struct omap_video_timings *timings); +bool dispc_mgr_timings_ok(enum omap_channel channel, + const struct omap_video_timings *timings); unsigned long dispc_fclk_rate(void); void dispc_find_clk_divs(bool is_tft, unsigned long req_pck, unsigned long fck, struct dispc_clock_info *cinfo); @@ -424,15 +395,16 @@ int dispc_calc_clock_rates(unsigned long dispc_fclk_rate, void dispc_ovl_set_fifo_threshold(enum omap_plane plane, u32 low, u32 high); void dispc_ovl_compute_fifo_thresholds(enum omap_plane plane, - u32 *fifo_low, u32 *fifo_high, bool use_fifomerge); + u32 *fifo_low, u32 *fifo_high, bool use_fifomerge, + bool manual_update); int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi, - bool ilace, bool replication); + bool ilace, bool replication, + const struct omap_video_timings *mgr_timings); int dispc_ovl_enable(enum omap_plane plane, bool enable); void dispc_ovl_set_channel_out(enum omap_plane plane, enum omap_channel channel); void dispc_mgr_enable_fifohandcheck(enum omap_channel channel, bool enable); -void dispc_mgr_set_lcd_size(enum omap_channel channel, u16 width, u16 height); u32 dispc_mgr_get_vsync_irq(enum omap_channel channel); u32 dispc_mgr_get_framedone_irq(enum omap_channel channel); bool dispc_mgr_go_busy(enum omap_channel channel); @@ -445,12 +417,13 @@ void dispc_mgr_enable_stallmode(enum omap_channel channel, bool enable); void dispc_mgr_set_tft_data_lines(enum omap_channel channel, u8 data_lines); void dispc_mgr_set_lcd_display_type(enum omap_channel channel, enum omap_lcd_display_type type); -void dispc_mgr_set_lcd_timings(enum omap_channel channel, +void dispc_mgr_set_timings(enum omap_channel channel, struct omap_video_timings *timings); void dispc_mgr_set_pol_freq(enum omap_channel channel, enum omap_panel_config config, u8 acbi, u8 acb); unsigned long dispc_mgr_lclk_rate(enum omap_channel channel); unsigned long dispc_mgr_pclk_rate(enum omap_channel channel); +unsigned long dispc_core_clk_rate(void); int dispc_mgr_set_clock_div(enum omap_channel channel, struct dispc_clock_info *cinfo); int dispc_mgr_get_clock_div(enum omap_channel channel, @@ -460,19 +433,10 @@ void dispc_mgr_setup(enum omap_channel channel, /* VENC */ #ifdef CONFIG_OMAP2_DSS_VENC -int venc_init_platform_driver(void); -void venc_uninit_platform_driver(void); -void venc_dump_regs(struct seq_file *s); -int venc_init_display(struct omap_dss_device *display); +int venc_init_platform_driver(void) __init; +void venc_uninit_platform_driver(void) __exit; unsigned long venc_get_pixel_clock(void); #else -static inline int venc_init_platform_driver(void) -{ - return 0; -} -static inline void venc_uninit_platform_driver(void) -{ -} static inline unsigned long venc_get_pixel_clock(void) { WARN("%s: VENC not compiled in, returning pclk as 0\n", __func__); @@ -482,23 +446,10 @@ static inline unsigned long venc_get_pixel_clock(void) /* HDMI */ #ifdef CONFIG_OMAP4_DSS_HDMI -int hdmi_init_platform_driver(void); -void hdmi_uninit_platform_driver(void); -int hdmi_init_display(struct omap_dss_device *dssdev); +int hdmi_init_platform_driver(void) __init; +void hdmi_uninit_platform_driver(void) __exit; unsigned long hdmi_get_pixel_clock(void); -void hdmi_dump_regs(struct seq_file *s); #else -static inline int hdmi_init_display(struct omap_dss_device *dssdev) -{ - return 0; -} -static inline int hdmi_init_platform_driver(void) -{ - return 0; -} -static inline void hdmi_uninit_platform_driver(void) -{ -} static inline unsigned long hdmi_get_pixel_clock(void) { WARN("%s: HDMI not compiled in, returning pclk as 0\n", __func__); @@ -514,22 +465,18 @@ int omapdss_hdmi_read_edid(u8 *buf, int len); bool omapdss_hdmi_detect(void); int hdmi_panel_init(void); void hdmi_panel_exit(void); +#ifdef CONFIG_OMAP4_DSS_HDMI_AUDIO +int hdmi_audio_enable(void); +void hdmi_audio_disable(void); +int hdmi_audio_start(void); +void hdmi_audio_stop(void); +bool hdmi_mode_has_audio(void); +int hdmi_audio_config(struct omap_dss_audio *audio); +#endif /* RFBI */ -#ifdef CONFIG_OMAP2_DSS_RFBI -int rfbi_init_platform_driver(void); -void rfbi_uninit_platform_driver(void); -void rfbi_dump_regs(struct seq_file *s); -int rfbi_init_display(struct omap_dss_device *display); -#else -static inline int rfbi_init_platform_driver(void) -{ - return 0; -} -static inline void rfbi_uninit_platform_driver(void) -{ -} -#endif +int rfbi_init_platform_driver(void) __init; +void rfbi_uninit_platform_driver(void) __exit; #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS diff --git a/drivers/video/omap2/dss/dss_features.c b/drivers/video/omap2/dss/dss_features.c index ce14aa6dd672..938709724f0c 100644 --- a/drivers/video/omap2/dss/dss_features.c +++ b/drivers/video/omap2/dss/dss_features.c @@ -52,6 +52,8 @@ struct omap_dss_features { const char * const *clksrc_names; const struct dss_param_range *dss_params; + const enum omap_dss_rotation_type supported_rotation_types; + const u32 buffer_size_unit; const u32 burst_size_unit; }; @@ -311,6 +313,8 @@ static const struct dss_param_range omap2_dss_param_range[] = { * scaler cannot scale a image with width more than 768. */ [FEAT_PARAM_LINEWIDTH] = { 1, 768 }, + [FEAT_PARAM_MGR_WIDTH] = { 1, 2048 }, + [FEAT_PARAM_MGR_HEIGHT] = { 1, 2048 }, }; static const struct dss_param_range omap3_dss_param_range[] = { @@ -324,6 +328,8 @@ static const struct dss_param_range omap3_dss_param_range[] = { [FEAT_PARAM_DSIPLL_LPDIV] = { 1, (1 << 13) - 1}, [FEAT_PARAM_DOWNSCALE] = { 1, 4 }, [FEAT_PARAM_LINEWIDTH] = { 1, 1024 }, + [FEAT_PARAM_MGR_WIDTH] = { 1, 2048 }, + [FEAT_PARAM_MGR_HEIGHT] = { 1, 2048 }, }; static const struct dss_param_range omap4_dss_param_range[] = { @@ -337,6 +343,8 @@ static const struct dss_param_range omap4_dss_param_range[] = { [FEAT_PARAM_DSIPLL_LPDIV] = { 0, (1 << 13) - 1 }, [FEAT_PARAM_DOWNSCALE] = { 1, 4 }, [FEAT_PARAM_LINEWIDTH] = { 1, 2048 }, + [FEAT_PARAM_MGR_WIDTH] = { 1, 2048 }, + [FEAT_PARAM_MGR_HEIGHT] = { 1, 2048 }, }; static const enum dss_feat_id omap2_dss_feat_list[] = { @@ -399,6 +407,7 @@ static const enum dss_feat_id omap4430_es1_0_dss_feat_list[] = { FEAT_FIR_COEF_V, FEAT_ALPHA_FREE_ZORDER, FEAT_FIFO_MERGE, + FEAT_BURST_2D, }; static const enum dss_feat_id omap4430_es2_0_1_2_dss_feat_list[] = { @@ -416,6 +425,7 @@ static const enum dss_feat_id omap4430_es2_0_1_2_dss_feat_list[] = { FEAT_FIR_COEF_V, FEAT_ALPHA_FREE_ZORDER, FEAT_FIFO_MERGE, + FEAT_BURST_2D, }; static const enum dss_feat_id omap4_dss_feat_list[] = { @@ -434,6 +444,7 @@ static const enum dss_feat_id omap4_dss_feat_list[] = { FEAT_FIR_COEF_V, FEAT_ALPHA_FREE_ZORDER, FEAT_FIFO_MERGE, + FEAT_BURST_2D, }; /* OMAP2 DSS Features */ @@ -451,6 +462,7 @@ static const struct omap_dss_features omap2_dss_features = { .overlay_caps = omap2_dss_overlay_caps, .clksrc_names = omap2_dss_clk_source_names, .dss_params = omap2_dss_param_range, + .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB, .buffer_size_unit = 1, .burst_size_unit = 8, }; @@ -470,6 +482,7 @@ static const struct omap_dss_features omap3430_dss_features = { .overlay_caps = omap3430_dss_overlay_caps, .clksrc_names = omap3_dss_clk_source_names, .dss_params = omap3_dss_param_range, + .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB, .buffer_size_unit = 1, .burst_size_unit = 8, }; @@ -488,6 +501,7 @@ static const struct omap_dss_features omap3630_dss_features = { .overlay_caps = omap3630_dss_overlay_caps, .clksrc_names = omap3_dss_clk_source_names, .dss_params = omap3_dss_param_range, + .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB, .buffer_size_unit = 1, .burst_size_unit = 8, }; @@ -508,6 +522,7 @@ static const struct omap_dss_features omap4430_es1_0_dss_features = { .overlay_caps = omap4_dss_overlay_caps, .clksrc_names = omap4_dss_clk_source_names, .dss_params = omap4_dss_param_range, + .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER, .buffer_size_unit = 16, .burst_size_unit = 16, }; @@ -527,6 +542,7 @@ static const struct omap_dss_features omap4430_es2_0_1_2_dss_features = { .overlay_caps = omap4_dss_overlay_caps, .clksrc_names = omap4_dss_clk_source_names, .dss_params = omap4_dss_param_range, + .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER, .buffer_size_unit = 16, .burst_size_unit = 16, }; @@ -546,6 +562,7 @@ static const struct omap_dss_features omap4_dss_features = { .overlay_caps = omap4_dss_overlay_caps, .clksrc_names = omap4_dss_clk_source_names, .dss_params = omap4_dss_param_range, + .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER, .buffer_size_unit = 16, .burst_size_unit = 16, }; @@ -562,13 +579,17 @@ static const struct ti_hdmi_ip_ops omap4_hdmi_functions = { .pll_enable = ti_hdmi_4xxx_pll_enable, .pll_disable = ti_hdmi_4xxx_pll_disable, .video_enable = ti_hdmi_4xxx_wp_video_start, + .video_disable = ti_hdmi_4xxx_wp_video_stop, .dump_wrapper = ti_hdmi_4xxx_wp_dump, .dump_core = ti_hdmi_4xxx_core_dump, .dump_pll = ti_hdmi_4xxx_pll_dump, .dump_phy = ti_hdmi_4xxx_phy_dump, -#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \ - defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE) +#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO) .audio_enable = ti_hdmi_4xxx_wp_audio_enable, + .audio_disable = ti_hdmi_4xxx_wp_audio_disable, + .audio_start = ti_hdmi_4xxx_audio_start, + .audio_stop = ti_hdmi_4xxx_audio_stop, + .audio_config = ti_hdmi_4xxx_audio_config, #endif }; @@ -662,6 +683,11 @@ void dss_feat_get_reg_field(enum dss_feat_reg_field id, u8 *start, u8 *end) *end = omap_current_dss_features->reg_fields[id].end; } +bool dss_feat_rotation_type_supported(enum omap_dss_rotation_type rot_type) +{ + return omap_current_dss_features->supported_rotation_types & rot_type; +} + void dss_features_init(void) { if (cpu_is_omap24xx()) diff --git a/drivers/video/omap2/dss/dss_features.h b/drivers/video/omap2/dss/dss_features.h index c332e7ddfce1..bdf469f080e7 100644 --- a/drivers/video/omap2/dss/dss_features.h +++ b/drivers/video/omap2/dss/dss_features.h @@ -62,6 +62,7 @@ enum dss_feat_id { FEAT_FIFO_MERGE, /* An unknown HW bug causing the normal FIFO thresholds not to work */ FEAT_OMAP3_DSI_FIFO_BUG, + FEAT_BURST_2D, }; /* DSS register field id */ @@ -91,6 +92,8 @@ enum dss_range_param { FEAT_PARAM_DSIPLL_LPDIV, FEAT_PARAM_DOWNSCALE, FEAT_PARAM_LINEWIDTH, + FEAT_PARAM_MGR_WIDTH, + FEAT_PARAM_MGR_HEIGHT, }; /* DSS Feature Functions */ @@ -108,6 +111,8 @@ const char *dss_feat_get_clk_source_name(enum omap_dss_clk_source id); u32 dss_feat_get_buffer_size_unit(void); /* in bytes */ u32 dss_feat_get_burst_size_unit(void); /* in bytes */ +bool dss_feat_rotation_type_supported(enum omap_dss_rotation_type rot_type); + bool dss_has_feature(enum dss_feat_id id); void dss_feat_get_reg_field(enum dss_feat_reg_field id, u8 *start, u8 *end); void dss_features_init(void); diff --git a/drivers/video/omap2/dss/hdmi.c b/drivers/video/omap2/dss/hdmi.c index c4b4f6950a92..8195c7166d20 100644 --- a/drivers/video/omap2/dss/hdmi.c +++ b/drivers/video/omap2/dss/hdmi.c @@ -33,12 +33,6 @@ #include <linux/pm_runtime.h> #include <linux/clk.h> #include <video/omapdss.h> -#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \ - defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE) -#include <sound/soc.h> -#include <sound/pcm_params.h> -#include "ti_hdmi_4xxx_ip.h" -#endif #include "ti_hdmi.h" #include "dss.h" @@ -63,7 +57,6 @@ static struct { struct mutex lock; - struct omap_display_platform_data *pdata; struct platform_device *pdev; struct hdmi_ip_data ip_data; @@ -130,25 +123,12 @@ static int hdmi_runtime_get(void) DSSDBG("hdmi_runtime_get\n"); - /* - * HACK: Add dss_runtime_get() to ensure DSS clock domain is enabled. - * This should be removed later. - */ - r = dss_runtime_get(); - if (r < 0) - goto err_get_dss; - r = pm_runtime_get_sync(&hdmi.pdev->dev); WARN_ON(r < 0); if (r < 0) - goto err_get_hdmi; + return r; return 0; - -err_get_hdmi: - dss_runtime_put(); -err_get_dss: - return r; } static void hdmi_runtime_put(void) @@ -159,15 +139,9 @@ static void hdmi_runtime_put(void) r = pm_runtime_put_sync(&hdmi.pdev->dev); WARN_ON(r < 0); - - /* - * HACK: This is added to complement the dss_runtime_get() call in - * hdmi_runtime_get(). This should be removed later. - */ - dss_runtime_put(); } -int hdmi_init_display(struct omap_dss_device *dssdev) +static int __init hdmi_init_display(struct omap_dss_device *dssdev) { DSSDBG("init_display\n"); @@ -344,7 +318,7 @@ static int hdmi_power_on(struct omap_dss_device *dssdev) hdmi_compute_pll(dssdev, phy, &hdmi.ip_data.pll_data); - hdmi.ip_data.ops->video_enable(&hdmi.ip_data, 0); + hdmi.ip_data.ops->video_disable(&hdmi.ip_data); /* config the PLL and PHY hdmi_set_pll_pwrfirst */ r = hdmi.ip_data.ops->pll_enable(&hdmi.ip_data); @@ -376,10 +350,11 @@ static int hdmi_power_on(struct omap_dss_device *dssdev) dispc_enable_gamma_table(0); /* tv size */ - dispc_set_digit_size(dssdev->panel.timings.x_res, - dssdev->panel.timings.y_res); + dss_mgr_set_timings(dssdev->manager, &dssdev->panel.timings); - hdmi.ip_data.ops->video_enable(&hdmi.ip_data, 1); + r = hdmi.ip_data.ops->video_enable(&hdmi.ip_data); + if (r) + goto err_vid_enable; r = dss_mgr_enable(dssdev->manager); if (r) @@ -388,7 +363,8 @@ static int hdmi_power_on(struct omap_dss_device *dssdev) return 0; err_mgr_enable: - hdmi.ip_data.ops->video_enable(&hdmi.ip_data, 0); + hdmi.ip_data.ops->video_disable(&hdmi.ip_data); +err_vid_enable: hdmi.ip_data.ops->phy_disable(&hdmi.ip_data); hdmi.ip_data.ops->pll_disable(&hdmi.ip_data); err: @@ -400,7 +376,7 @@ static void hdmi_power_off(struct omap_dss_device *dssdev) { dss_mgr_disable(dssdev->manager); - hdmi.ip_data.ops->video_enable(&hdmi.ip_data, 0); + hdmi.ip_data.ops->video_disable(&hdmi.ip_data); hdmi.ip_data.ops->phy_disable(&hdmi.ip_data); hdmi.ip_data.ops->pll_disable(&hdmi.ip_data); hdmi_runtime_put(); @@ -436,10 +412,12 @@ void omapdss_hdmi_display_set_timing(struct omap_dss_device *dssdev) r = hdmi_power_on(dssdev); if (r) DSSERR("failed to power on device\n"); + } else { + dss_mgr_set_timings(dssdev->manager, &dssdev->panel.timings); } } -void hdmi_dump_regs(struct seq_file *s) +static void hdmi_dump_regs(struct seq_file *s) { mutex_lock(&hdmi.lock); @@ -555,248 +533,201 @@ void omapdss_hdmi_display_disable(struct omap_dss_device *dssdev) mutex_unlock(&hdmi.lock); } -#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \ - defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE) - -static int hdmi_audio_trigger(struct snd_pcm_substream *substream, int cmd, - struct snd_soc_dai *dai) +static int hdmi_get_clocks(struct platform_device *pdev) { - struct snd_soc_pcm_runtime *rtd = substream->private_data; - struct snd_soc_codec *codec = rtd->codec; - struct platform_device *pdev = to_platform_device(codec->dev); - struct hdmi_ip_data *ip_data = snd_soc_codec_get_drvdata(codec); - int err = 0; + struct clk *clk; - if (!(ip_data->ops) && !(ip_data->ops->audio_enable)) { - dev_err(&pdev->dev, "Cannot enable/disable audio\n"); - return -ENODEV; + clk = clk_get(&pdev->dev, "sys_clk"); + if (IS_ERR(clk)) { + DSSERR("can't get sys_clk\n"); + return PTR_ERR(clk); } - switch (cmd) { - case SNDRV_PCM_TRIGGER_START: - case SNDRV_PCM_TRIGGER_RESUME: - case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: - ip_data->ops->audio_enable(ip_data, true); - break; - case SNDRV_PCM_TRIGGER_STOP: - case SNDRV_PCM_TRIGGER_SUSPEND: - case SNDRV_PCM_TRIGGER_PAUSE_PUSH: - ip_data->ops->audio_enable(ip_data, false); - break; - default: - err = -EINVAL; - } - return err; -} - -static int hdmi_audio_hw_params(struct snd_pcm_substream *substream, - struct snd_pcm_hw_params *params, - struct snd_soc_dai *dai) -{ - struct snd_soc_pcm_runtime *rtd = substream->private_data; - struct snd_soc_codec *codec = rtd->codec; - struct hdmi_ip_data *ip_data = snd_soc_codec_get_drvdata(codec); - struct hdmi_audio_format audio_format; - struct hdmi_audio_dma audio_dma; - struct hdmi_core_audio_config core_cfg; - struct hdmi_core_infoframe_audio aud_if_cfg; - int err, n, cts; - enum hdmi_core_audio_sample_freq sample_freq; - - switch (params_format(params)) { - case SNDRV_PCM_FORMAT_S16_LE: - core_cfg.i2s_cfg.word_max_length = - HDMI_AUDIO_I2S_MAX_WORD_20BITS; - core_cfg.i2s_cfg.word_length = HDMI_AUDIO_I2S_CHST_WORD_16_BITS; - core_cfg.i2s_cfg.in_length_bits = - HDMI_AUDIO_I2S_INPUT_LENGTH_16; - core_cfg.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_LEFT; - audio_format.samples_per_word = HDMI_AUDIO_ONEWORD_TWOSAMPLES; - audio_format.sample_size = HDMI_AUDIO_SAMPLE_16BITS; - audio_format.justification = HDMI_AUDIO_JUSTIFY_LEFT; - audio_dma.transfer_size = 0x10; - break; - case SNDRV_PCM_FORMAT_S24_LE: - core_cfg.i2s_cfg.word_max_length = - HDMI_AUDIO_I2S_MAX_WORD_24BITS; - core_cfg.i2s_cfg.word_length = HDMI_AUDIO_I2S_CHST_WORD_24_BITS; - core_cfg.i2s_cfg.in_length_bits = - HDMI_AUDIO_I2S_INPUT_LENGTH_24; - audio_format.samples_per_word = HDMI_AUDIO_ONEWORD_ONESAMPLE; - audio_format.sample_size = HDMI_AUDIO_SAMPLE_24BITS; - audio_format.justification = HDMI_AUDIO_JUSTIFY_RIGHT; - core_cfg.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_RIGHT; - audio_dma.transfer_size = 0x20; - break; - default: + hdmi.sys_clk = clk; + + return 0; +} + +static void hdmi_put_clocks(void) +{ + if (hdmi.sys_clk) + clk_put(hdmi.sys_clk); +} + +#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO) +int hdmi_compute_acr(u32 sample_freq, u32 *n, u32 *cts) +{ + u32 deep_color; + bool deep_color_correct = false; + u32 pclk = hdmi.ip_data.cfg.timings.pixel_clock; + + if (n == NULL || cts == NULL) return -EINVAL; - } - switch (params_rate(params)) { + /* TODO: When implemented, query deep color mode here. */ + deep_color = 100; + + /* + * When using deep color, the default N value (as in the HDMI + * specification) yields to an non-integer CTS. Hence, we + * modify it while keeping the restrictions described in + * section 7.2.1 of the HDMI 1.4a specification. + */ + switch (sample_freq) { case 32000: - sample_freq = HDMI_AUDIO_FS_32000; + case 48000: + case 96000: + case 192000: + if (deep_color == 125) + if (pclk == 27027 || pclk == 74250) + deep_color_correct = true; + if (deep_color == 150) + if (pclk == 27027) + deep_color_correct = true; break; case 44100: - sample_freq = HDMI_AUDIO_FS_44100; - break; - case 48000: - sample_freq = HDMI_AUDIO_FS_48000; + case 88200: + case 176400: + if (deep_color == 125) + if (pclk == 27027) + deep_color_correct = true; break; default: return -EINVAL; } - err = hdmi_config_audio_acr(ip_data, params_rate(params), &n, &cts); - if (err < 0) - return err; - - /* Audio wrapper config */ - audio_format.stereo_channels = HDMI_AUDIO_STEREO_ONECHANNEL; - audio_format.active_chnnls_msk = 0x03; - audio_format.type = HDMI_AUDIO_TYPE_LPCM; - audio_format.sample_order = HDMI_AUDIO_SAMPLE_LEFT_FIRST; - /* Disable start/stop signals of IEC 60958 blocks */ - audio_format.en_sig_blk_strt_end = HDMI_AUDIO_BLOCK_SIG_STARTEND_OFF; + if (deep_color_correct) { + switch (sample_freq) { + case 32000: + *n = 8192; + break; + case 44100: + *n = 12544; + break; + case 48000: + *n = 8192; + break; + case 88200: + *n = 25088; + break; + case 96000: + *n = 16384; + break; + case 176400: + *n = 50176; + break; + case 192000: + *n = 32768; + break; + default: + return -EINVAL; + } + } else { + switch (sample_freq) { + case 32000: + *n = 4096; + break; + case 44100: + *n = 6272; + break; + case 48000: + *n = 6144; + break; + case 88200: + *n = 12544; + break; + case 96000: + *n = 12288; + break; + case 176400: + *n = 25088; + break; + case 192000: + *n = 24576; + break; + default: + return -EINVAL; + } + } + /* Calculate CTS. See HDMI 1.3a or 1.4a specifications */ + *cts = pclk * (*n / 128) * deep_color / (sample_freq / 10); - audio_dma.block_size = 0xC0; - audio_dma.mode = HDMI_AUDIO_TRANSF_DMA; - audio_dma.fifo_threshold = 0x20; /* in number of samples */ + return 0; +} - hdmi_wp_audio_config_dma(ip_data, &audio_dma); - hdmi_wp_audio_config_format(ip_data, &audio_format); +int hdmi_audio_enable(void) +{ + DSSDBG("audio_enable\n"); - /* - * I2S config - */ - core_cfg.i2s_cfg.en_high_bitrate_aud = false; - /* Only used with high bitrate audio */ - core_cfg.i2s_cfg.cbit_order = false; - /* Serial data and word select should change on sck rising edge */ - core_cfg.i2s_cfg.sck_edge_mode = HDMI_AUDIO_I2S_SCK_EDGE_RISING; - core_cfg.i2s_cfg.vbit = HDMI_AUDIO_I2S_VBIT_FOR_PCM; - /* Set I2S word select polarity */ - core_cfg.i2s_cfg.ws_polarity = HDMI_AUDIO_I2S_WS_POLARITY_LOW_IS_LEFT; - core_cfg.i2s_cfg.direction = HDMI_AUDIO_I2S_MSB_SHIFTED_FIRST; - /* Set serial data to word select shift. See Phillips spec. */ - core_cfg.i2s_cfg.shift = HDMI_AUDIO_I2S_FIRST_BIT_SHIFT; - /* Enable one of the four available serial data channels */ - core_cfg.i2s_cfg.active_sds = HDMI_AUDIO_I2S_SD0_EN; - - /* Core audio config */ - core_cfg.freq_sample = sample_freq; - core_cfg.n = n; - core_cfg.cts = cts; - if (dss_has_feature(FEAT_HDMI_CTS_SWMODE)) { - core_cfg.aud_par_busclk = 0; - core_cfg.cts_mode = HDMI_AUDIO_CTS_MODE_SW; - core_cfg.use_mclk = dss_has_feature(FEAT_HDMI_AUDIO_USE_MCLK); - } else { - core_cfg.aud_par_busclk = (((128 * 31) - 1) << 8); - core_cfg.cts_mode = HDMI_AUDIO_CTS_MODE_HW; - core_cfg.use_mclk = true; - } + return hdmi.ip_data.ops->audio_enable(&hdmi.ip_data); +} - if (core_cfg.use_mclk) - core_cfg.mclk_mode = HDMI_AUDIO_MCLK_128FS; - core_cfg.layout = HDMI_AUDIO_LAYOUT_2CH; - core_cfg.en_spdif = false; - /* Use sample frequency from channel status word */ - core_cfg.fs_override = true; - /* Enable ACR packets */ - core_cfg.en_acr_pkt = true; - /* Disable direct streaming digital audio */ - core_cfg.en_dsd_audio = false; - /* Use parallel audio interface */ - core_cfg.en_parallel_aud_input = true; - - hdmi_core_audio_config(ip_data, &core_cfg); +void hdmi_audio_disable(void) +{ + DSSDBG("audio_disable\n"); - /* - * Configure packet - * info frame audio see doc CEA861-D page 74 - */ - aud_if_cfg.db1_coding_type = HDMI_INFOFRAME_AUDIO_DB1CT_FROM_STREAM; - aud_if_cfg.db1_channel_count = 2; - aud_if_cfg.db2_sample_freq = HDMI_INFOFRAME_AUDIO_DB2SF_FROM_STREAM; - aud_if_cfg.db2_sample_size = HDMI_INFOFRAME_AUDIO_DB2SS_FROM_STREAM; - aud_if_cfg.db4_channel_alloc = 0x00; - aud_if_cfg.db5_downmix_inh = false; - aud_if_cfg.db5_lsv = 0; - - hdmi_core_audio_infoframe_config(ip_data, &aud_if_cfg); - return 0; + hdmi.ip_data.ops->audio_disable(&hdmi.ip_data); } -static int hdmi_audio_startup(struct snd_pcm_substream *substream, - struct snd_soc_dai *dai) +int hdmi_audio_start(void) { - if (!hdmi.ip_data.cfg.cm.mode) { - pr_err("Current video settings do not support audio.\n"); - return -EIO; - } - return 0; + DSSDBG("audio_start\n"); + + return hdmi.ip_data.ops->audio_start(&hdmi.ip_data); } -static int hdmi_audio_codec_probe(struct snd_soc_codec *codec) +void hdmi_audio_stop(void) { - struct hdmi_ip_data *priv = &hdmi.ip_data; + DSSDBG("audio_stop\n"); - snd_soc_codec_set_drvdata(codec, priv); - return 0; + hdmi.ip_data.ops->audio_stop(&hdmi.ip_data); } -static struct snd_soc_codec_driver hdmi_audio_codec_drv = { - .probe = hdmi_audio_codec_probe, -}; +bool hdmi_mode_has_audio(void) +{ + if (hdmi.ip_data.cfg.cm.mode == HDMI_HDMI) + return true; + else + return false; +} -static struct snd_soc_dai_ops hdmi_audio_codec_ops = { - .hw_params = hdmi_audio_hw_params, - .trigger = hdmi_audio_trigger, - .startup = hdmi_audio_startup, -}; +int hdmi_audio_config(struct omap_dss_audio *audio) +{ + return hdmi.ip_data.ops->audio_config(&hdmi.ip_data, audio); +} -static struct snd_soc_dai_driver hdmi_codec_dai_drv = { - .name = "hdmi-audio-codec", - .playback = { - .channels_min = 2, - .channels_max = 2, - .rates = SNDRV_PCM_RATE_32000 | - SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000, - .formats = SNDRV_PCM_FMTBIT_S16_LE | - SNDRV_PCM_FMTBIT_S24_LE, - }, - .ops = &hdmi_audio_codec_ops, -}; #endif -static int hdmi_get_clocks(struct platform_device *pdev) +static void __init hdmi_probe_pdata(struct platform_device *pdev) { - struct clk *clk; + struct omap_dss_board_info *pdata = pdev->dev.platform_data; + int r, i; - clk = clk_get(&pdev->dev, "sys_clk"); - if (IS_ERR(clk)) { - DSSERR("can't get sys_clk\n"); - return PTR_ERR(clk); - } + for (i = 0; i < pdata->num_devices; ++i) { + struct omap_dss_device *dssdev = pdata->devices[i]; - hdmi.sys_clk = clk; + if (dssdev->type != OMAP_DISPLAY_TYPE_HDMI) + continue; - return 0; -} + r = hdmi_init_display(dssdev); + if (r) { + DSSERR("device %s init failed: %d\n", dssdev->name, r); + continue; + } -static void hdmi_put_clocks(void) -{ - if (hdmi.sys_clk) - clk_put(hdmi.sys_clk); + r = omap_dss_register_device(dssdev, &pdev->dev, i); + if (r) + DSSERR("device %s register failed: %d\n", + dssdev->name, r); + } } /* HDMI HW IP initialisation */ -static int omapdss_hdmihw_probe(struct platform_device *pdev) +static int __init omapdss_hdmihw_probe(struct platform_device *pdev) { struct resource *hdmi_mem; int r; - hdmi.pdata = pdev->dev.platform_data; hdmi.pdev = pdev; mutex_init(&hdmi.lock); @@ -830,28 +761,18 @@ static int omapdss_hdmihw_probe(struct platform_device *pdev) hdmi_panel_init(); -#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \ - defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE) + dss_debugfs_create_file("hdmi", hdmi_dump_regs); + + hdmi_probe_pdata(pdev); - /* Register ASoC codec DAI */ - r = snd_soc_register_codec(&pdev->dev, &hdmi_audio_codec_drv, - &hdmi_codec_dai_drv, 1); - if (r) { - DSSERR("can't register ASoC HDMI audio codec\n"); - return r; - } -#endif return 0; } -static int omapdss_hdmihw_remove(struct platform_device *pdev) +static int __exit omapdss_hdmihw_remove(struct platform_device *pdev) { - hdmi_panel_exit(); + omap_dss_unregister_child_devices(&pdev->dev); -#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \ - defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE) - snd_soc_unregister_codec(&pdev->dev); -#endif + hdmi_panel_exit(); pm_runtime_disable(&pdev->dev); @@ -867,7 +788,6 @@ static int hdmi_runtime_suspend(struct device *dev) clk_disable(hdmi.sys_clk); dispc_runtime_put(); - dss_runtime_put(); return 0; } @@ -876,23 +796,13 @@ static int hdmi_runtime_resume(struct device *dev) { int r; - r = dss_runtime_get(); - if (r < 0) - goto err_get_dss; - r = dispc_runtime_get(); if (r < 0) - goto err_get_dispc; - + return r; clk_enable(hdmi.sys_clk); return 0; - -err_get_dispc: - dss_runtime_put(); -err_get_dss: - return r; } static const struct dev_pm_ops hdmi_pm_ops = { @@ -901,8 +811,7 @@ static const struct dev_pm_ops hdmi_pm_ops = { }; static struct platform_driver omapdss_hdmihw_driver = { - .probe = omapdss_hdmihw_probe, - .remove = omapdss_hdmihw_remove, + .remove = __exit_p(omapdss_hdmihw_remove), .driver = { .name = "omapdss_hdmi", .owner = THIS_MODULE, @@ -910,12 +819,12 @@ static struct platform_driver omapdss_hdmihw_driver = { }, }; -int hdmi_init_platform_driver(void) +int __init hdmi_init_platform_driver(void) { - return platform_driver_register(&omapdss_hdmihw_driver); + return platform_driver_probe(&omapdss_hdmihw_driver, omapdss_hdmihw_probe); } -void hdmi_uninit_platform_driver(void) +void __exit hdmi_uninit_platform_driver(void) { - return platform_driver_unregister(&omapdss_hdmihw_driver); + platform_driver_unregister(&omapdss_hdmihw_driver); } diff --git a/drivers/video/omap2/dss/hdmi_panel.c b/drivers/video/omap2/dss/hdmi_panel.c index 533d5dc634d2..1179e3c4b1c7 100644 --- a/drivers/video/omap2/dss/hdmi_panel.c +++ b/drivers/video/omap2/dss/hdmi_panel.c @@ -30,7 +30,12 @@ #include "dss.h" static struct { - struct mutex hdmi_lock; + /* This protects the panel ops, mainly when accessing the HDMI IP. */ + struct mutex lock; +#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO) + /* This protects the audio ops, specifically. */ + spinlock_t audio_lock; +#endif } hdmi; @@ -54,12 +59,168 @@ static void hdmi_panel_remove(struct omap_dss_device *dssdev) } +#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO) +static int hdmi_panel_audio_enable(struct omap_dss_device *dssdev) +{ + unsigned long flags; + int r; + + mutex_lock(&hdmi.lock); + spin_lock_irqsave(&hdmi.audio_lock, flags); + + /* enable audio only if the display is active and supports audio */ + if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE || + !hdmi_mode_has_audio()) { + DSSERR("audio not supported or display is off\n"); + r = -EPERM; + goto err; + } + + r = hdmi_audio_enable(); + + if (!r) + dssdev->audio_state = OMAP_DSS_AUDIO_ENABLED; + +err: + spin_unlock_irqrestore(&hdmi.audio_lock, flags); + mutex_unlock(&hdmi.lock); + return r; +} + +static void hdmi_panel_audio_disable(struct omap_dss_device *dssdev) +{ + unsigned long flags; + + spin_lock_irqsave(&hdmi.audio_lock, flags); + + hdmi_audio_disable(); + + dssdev->audio_state = OMAP_DSS_AUDIO_DISABLED; + + spin_unlock_irqrestore(&hdmi.audio_lock, flags); +} + +static int hdmi_panel_audio_start(struct omap_dss_device *dssdev) +{ + unsigned long flags; + int r; + + spin_lock_irqsave(&hdmi.audio_lock, flags); + /* + * No need to check the panel state. It was checked when trasitioning + * to AUDIO_ENABLED. + */ + if (dssdev->audio_state != OMAP_DSS_AUDIO_ENABLED) { + DSSERR("audio start from invalid state\n"); + r = -EPERM; + goto err; + } + + r = hdmi_audio_start(); + + if (!r) + dssdev->audio_state = OMAP_DSS_AUDIO_PLAYING; + +err: + spin_unlock_irqrestore(&hdmi.audio_lock, flags); + return r; +} + +static void hdmi_panel_audio_stop(struct omap_dss_device *dssdev) +{ + unsigned long flags; + + spin_lock_irqsave(&hdmi.audio_lock, flags); + + hdmi_audio_stop(); + dssdev->audio_state = OMAP_DSS_AUDIO_ENABLED; + + spin_unlock_irqrestore(&hdmi.audio_lock, flags); +} + +static bool hdmi_panel_audio_supported(struct omap_dss_device *dssdev) +{ + bool r = false; + + mutex_lock(&hdmi.lock); + + if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) + goto err; + + if (!hdmi_mode_has_audio()) + goto err; + + r = true; +err: + mutex_unlock(&hdmi.lock); + return r; +} + +static int hdmi_panel_audio_config(struct omap_dss_device *dssdev, + struct omap_dss_audio *audio) +{ + unsigned long flags; + int r; + + mutex_lock(&hdmi.lock); + spin_lock_irqsave(&hdmi.audio_lock, flags); + + /* config audio only if the display is active and supports audio */ + if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE || + !hdmi_mode_has_audio()) { + DSSERR("audio not supported or display is off\n"); + r = -EPERM; + goto err; + } + + r = hdmi_audio_config(audio); + + if (!r) + dssdev->audio_state = OMAP_DSS_AUDIO_CONFIGURED; + +err: + spin_unlock_irqrestore(&hdmi.audio_lock, flags); + mutex_unlock(&hdmi.lock); + return r; +} + +#else +static int hdmi_panel_audio_enable(struct omap_dss_device *dssdev) +{ + return -EPERM; +} + +static void hdmi_panel_audio_disable(struct omap_dss_device *dssdev) +{ +} + +static int hdmi_panel_audio_start(struct omap_dss_device *dssdev) +{ + return -EPERM; +} + +static void hdmi_panel_audio_stop(struct omap_dss_device *dssdev) +{ +} + +static bool hdmi_panel_audio_supported(struct omap_dss_device *dssdev) +{ + return false; +} + +static int hdmi_panel_audio_config(struct omap_dss_device *dssdev, + struct omap_dss_audio *audio) +{ + return -EPERM; +} +#endif + static int hdmi_panel_enable(struct omap_dss_device *dssdev) { int r = 0; DSSDBG("ENTER hdmi_panel_enable\n"); - mutex_lock(&hdmi.hdmi_lock); + mutex_lock(&hdmi.lock); if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED) { r = -EINVAL; @@ -75,40 +236,52 @@ static int hdmi_panel_enable(struct omap_dss_device *dssdev) dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; err: - mutex_unlock(&hdmi.hdmi_lock); + mutex_unlock(&hdmi.lock); return r; } static void hdmi_panel_disable(struct omap_dss_device *dssdev) { - mutex_lock(&hdmi.hdmi_lock); - - if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) + mutex_lock(&hdmi.lock); + + if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) { + /* + * TODO: notify audio users that the display was disabled. For + * now, disable audio locally to not break our audio state + * machine. + */ + hdmi_panel_audio_disable(dssdev); omapdss_hdmi_display_disable(dssdev); + } dssdev->state = OMAP_DSS_DISPLAY_DISABLED; - mutex_unlock(&hdmi.hdmi_lock); + mutex_unlock(&hdmi.lock); } static int hdmi_panel_suspend(struct omap_dss_device *dssdev) { int r = 0; - mutex_lock(&hdmi.hdmi_lock); + mutex_lock(&hdmi.lock); if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) { r = -EINVAL; goto err; } - dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED; + /* + * TODO: notify audio users that the display was suspended. For now, + * disable audio locally to not break our audio state machine. + */ + hdmi_panel_audio_disable(dssdev); + dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED; omapdss_hdmi_display_disable(dssdev); err: - mutex_unlock(&hdmi.hdmi_lock); + mutex_unlock(&hdmi.lock); return r; } @@ -117,7 +290,7 @@ static int hdmi_panel_resume(struct omap_dss_device *dssdev) { int r = 0; - mutex_lock(&hdmi.hdmi_lock); + mutex_lock(&hdmi.lock); if (dssdev->state != OMAP_DSS_DISPLAY_SUSPENDED) { r = -EINVAL; @@ -129,11 +302,12 @@ static int hdmi_panel_resume(struct omap_dss_device *dssdev) DSSERR("failed to power on\n"); goto err; } + /* TODO: notify audio users that the panel resumed. */ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; err: - mutex_unlock(&hdmi.hdmi_lock); + mutex_unlock(&hdmi.lock); return r; } @@ -141,11 +315,11 @@ err: static void hdmi_get_timings(struct omap_dss_device *dssdev, struct omap_video_timings *timings) { - mutex_lock(&hdmi.hdmi_lock); + mutex_lock(&hdmi.lock); *timings = dssdev->panel.timings; - mutex_unlock(&hdmi.hdmi_lock); + mutex_unlock(&hdmi.lock); } static void hdmi_set_timings(struct omap_dss_device *dssdev, @@ -153,12 +327,18 @@ static void hdmi_set_timings(struct omap_dss_device *dssdev, { DSSDBG("hdmi_set_timings\n"); - mutex_lock(&hdmi.hdmi_lock); + mutex_lock(&hdmi.lock); + + /* + * TODO: notify audio users that there was a timings change. For + * now, disable audio locally to not break our audio state machine. + */ + hdmi_panel_audio_disable(dssdev); dssdev->panel.timings = *timings; omapdss_hdmi_display_set_timing(dssdev); - mutex_unlock(&hdmi.hdmi_lock); + mutex_unlock(&hdmi.lock); } static int hdmi_check_timings(struct omap_dss_device *dssdev, @@ -168,11 +348,11 @@ static int hdmi_check_timings(struct omap_dss_device *dssdev, DSSDBG("hdmi_check_timings\n"); - mutex_lock(&hdmi.hdmi_lock); + mutex_lock(&hdmi.lock); r = omapdss_hdmi_display_check_timing(dssdev, timings); - mutex_unlock(&hdmi.hdmi_lock); + mutex_unlock(&hdmi.lock); return r; } @@ -180,7 +360,7 @@ static int hdmi_read_edid(struct omap_dss_device *dssdev, u8 *buf, int len) { int r; - mutex_lock(&hdmi.hdmi_lock); + mutex_lock(&hdmi.lock); if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) { r = omapdss_hdmi_display_enable(dssdev); @@ -194,7 +374,7 @@ static int hdmi_read_edid(struct omap_dss_device *dssdev, u8 *buf, int len) dssdev->state == OMAP_DSS_DISPLAY_SUSPENDED) omapdss_hdmi_display_disable(dssdev); err: - mutex_unlock(&hdmi.hdmi_lock); + mutex_unlock(&hdmi.lock); return r; } @@ -203,7 +383,7 @@ static bool hdmi_detect(struct omap_dss_device *dssdev) { int r; - mutex_lock(&hdmi.hdmi_lock); + mutex_lock(&hdmi.lock); if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) { r = omapdss_hdmi_display_enable(dssdev); @@ -217,7 +397,7 @@ static bool hdmi_detect(struct omap_dss_device *dssdev) dssdev->state == OMAP_DSS_DISPLAY_SUSPENDED) omapdss_hdmi_display_disable(dssdev); err: - mutex_unlock(&hdmi.hdmi_lock); + mutex_unlock(&hdmi.lock); return r; } @@ -234,6 +414,12 @@ static struct omap_dss_driver hdmi_driver = { .check_timings = hdmi_check_timings, .read_edid = hdmi_read_edid, .detect = hdmi_detect, + .audio_enable = hdmi_panel_audio_enable, + .audio_disable = hdmi_panel_audio_disable, + .audio_start = hdmi_panel_audio_start, + .audio_stop = hdmi_panel_audio_stop, + .audio_supported = hdmi_panel_audio_supported, + .audio_config = hdmi_panel_audio_config, .driver = { .name = "hdmi_panel", .owner = THIS_MODULE, @@ -242,7 +428,11 @@ static struct omap_dss_driver hdmi_driver = { int hdmi_panel_init(void) { - mutex_init(&hdmi.hdmi_lock); + mutex_init(&hdmi.lock); + +#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO) + spin_lock_init(&hdmi.audio_lock); +#endif omap_dss_register_driver(&hdmi_driver); diff --git a/drivers/video/omap2/dss/manager.c b/drivers/video/omap2/dss/manager.c index e7364603f6a1..0cbcde4c688a 100644 --- a/drivers/video/omap2/dss/manager.c +++ b/drivers/video/omap2/dss/manager.c @@ -654,9 +654,20 @@ static int dss_mgr_check_zorder(struct omap_overlay_manager *mgr, return 0; } +int dss_mgr_check_timings(struct omap_overlay_manager *mgr, + const struct omap_video_timings *timings) +{ + if (!dispc_mgr_timings_ok(mgr->id, timings)) { + DSSERR("check_manager: invalid timings\n"); + return -EINVAL; + } + + return 0; +} + int dss_mgr_check(struct omap_overlay_manager *mgr, - struct omap_dss_device *dssdev, struct omap_overlay_manager_info *info, + const struct omap_video_timings *mgr_timings, struct omap_overlay_info **overlay_infos) { struct omap_overlay *ovl; @@ -668,6 +679,10 @@ int dss_mgr_check(struct omap_overlay_manager *mgr, return r; } + r = dss_mgr_check_timings(mgr, mgr_timings); + if (r) + return r; + list_for_each_entry(ovl, &mgr->overlays, list) { struct omap_overlay_info *oi; int r; @@ -677,7 +692,7 @@ int dss_mgr_check(struct omap_overlay_manager *mgr, if (oi == NULL) continue; - r = dss_ovl_check(ovl, oi, dssdev); + r = dss_ovl_check(ovl, oi, mgr_timings); if (r) return r; } diff --git a/drivers/video/omap2/dss/overlay.c b/drivers/video/omap2/dss/overlay.c index 6e821810deec..b0ba60f88dd2 100644 --- a/drivers/video/omap2/dss/overlay.c +++ b/drivers/video/omap2/dss/overlay.c @@ -628,19 +628,23 @@ int dss_ovl_simple_check(struct omap_overlay *ovl, return -EINVAL; } + if (dss_feat_rotation_type_supported(info->rotation_type) == 0) { + DSSERR("check_overlay: rotation type %d not supported\n", + info->rotation_type); + return -EINVAL; + } + return 0; } -int dss_ovl_check(struct omap_overlay *ovl, - struct omap_overlay_info *info, struct omap_dss_device *dssdev) +int dss_ovl_check(struct omap_overlay *ovl, struct omap_overlay_info *info, + const struct omap_video_timings *mgr_timings) { u16 outw, outh; u16 dw, dh; - if (dssdev == NULL) - return 0; - - dssdev->driver->get_resolution(dssdev, &dw, &dh); + dw = mgr_timings->x_res; + dh = mgr_timings->y_res; if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0) { outw = info->width; diff --git a/drivers/video/omap2/dss/rfbi.c b/drivers/video/omap2/dss/rfbi.c index 788a0ef6323a..3d8c206e90e5 100644 --- a/drivers/video/omap2/dss/rfbi.c +++ b/drivers/video/omap2/dss/rfbi.c @@ -304,13 +304,23 @@ static void rfbi_transfer_area(struct omap_dss_device *dssdev, u16 width, u16 height, void (*callback)(void *data), void *data) { u32 l; + struct omap_video_timings timings = { + .hsw = 1, + .hfp = 1, + .hbp = 1, + .vsw = 1, + .vfp = 0, + .vbp = 0, + .x_res = width, + .y_res = height, + }; /*BUG_ON(callback == 0);*/ BUG_ON(rfbi.framedone_callback != NULL); DSSDBG("rfbi_transfer_area %dx%d\n", width, height); - dispc_mgr_set_lcd_size(dssdev->manager->id, width, height); + dss_mgr_set_timings(dssdev->manager, &timings); dispc_mgr_enable(dssdev->manager->id, true); @@ -766,6 +776,16 @@ int omap_rfbi_prepare_update(struct omap_dss_device *dssdev, u16 *x, u16 *y, u16 *w, u16 *h) { u16 dw, dh; + struct omap_video_timings timings = { + .hsw = 1, + .hfp = 1, + .hbp = 1, + .vsw = 1, + .vfp = 0, + .vbp = 0, + .x_res = *w, + .y_res = *h, + }; dssdev->driver->get_resolution(dssdev, &dw, &dh); @@ -784,7 +804,7 @@ int omap_rfbi_prepare_update(struct omap_dss_device *dssdev, if (*w == 0 || *h == 0) return -EINVAL; - dispc_mgr_set_lcd_size(dssdev->manager->id, *w, *h); + dss_mgr_set_timings(dssdev->manager, &timings); return 0; } @@ -799,7 +819,7 @@ int omap_rfbi_update(struct omap_dss_device *dssdev, } EXPORT_SYMBOL(omap_rfbi_update); -void rfbi_dump_regs(struct seq_file *s) +static void rfbi_dump_regs(struct seq_file *s) { #define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, rfbi_read_reg(r)) @@ -900,15 +920,39 @@ void omapdss_rfbi_display_disable(struct omap_dss_device *dssdev) } EXPORT_SYMBOL(omapdss_rfbi_display_disable); -int rfbi_init_display(struct omap_dss_device *dssdev) +static int __init rfbi_init_display(struct omap_dss_device *dssdev) { rfbi.dssdev[dssdev->phy.rfbi.channel] = dssdev; dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE; return 0; } +static void __init rfbi_probe_pdata(struct platform_device *pdev) +{ + struct omap_dss_board_info *pdata = pdev->dev.platform_data; + int i, r; + + for (i = 0; i < pdata->num_devices; ++i) { + struct omap_dss_device *dssdev = pdata->devices[i]; + + if (dssdev->type != OMAP_DISPLAY_TYPE_DBI) + continue; + + r = rfbi_init_display(dssdev); + if (r) { + DSSERR("device %s init failed: %d\n", dssdev->name, r); + continue; + } + + r = omap_dss_register_device(dssdev, &pdev->dev, i); + if (r) + DSSERR("device %s register failed: %d\n", + dssdev->name, r); + } +} + /* RFBI HW IP initialisation */ -static int omap_rfbihw_probe(struct platform_device *pdev) +static int __init omap_rfbihw_probe(struct platform_device *pdev) { u32 rev; struct resource *rfbi_mem; @@ -956,6 +1000,10 @@ static int omap_rfbihw_probe(struct platform_device *pdev) rfbi_runtime_put(); + dss_debugfs_create_file("rfbi", rfbi_dump_regs); + + rfbi_probe_pdata(pdev); + return 0; err_runtime_get: @@ -963,8 +1011,9 @@ err_runtime_get: return r; } -static int omap_rfbihw_remove(struct platform_device *pdev) +static int __exit omap_rfbihw_remove(struct platform_device *pdev) { + omap_dss_unregister_child_devices(&pdev->dev); pm_runtime_disable(&pdev->dev); return 0; } @@ -972,7 +1021,6 @@ static int omap_rfbihw_remove(struct platform_device *pdev) static int rfbi_runtime_suspend(struct device *dev) { dispc_runtime_put(); - dss_runtime_put(); return 0; } @@ -981,20 +1029,11 @@ static int rfbi_runtime_resume(struct device *dev) { int r; - r = dss_runtime_get(); - if (r < 0) - goto err_get_dss; - r = dispc_runtime_get(); if (r < 0) - goto err_get_dispc; + return r; return 0; - -err_get_dispc: - dss_runtime_put(); -err_get_dss: - return r; } static const struct dev_pm_ops rfbi_pm_ops = { @@ -1003,8 +1042,7 @@ static const struct dev_pm_ops rfbi_pm_ops = { }; static struct platform_driver omap_rfbihw_driver = { - .probe = omap_rfbihw_probe, - .remove = omap_rfbihw_remove, + .remove = __exit_p(omap_rfbihw_remove), .driver = { .name = "omapdss_rfbi", .owner = THIS_MODULE, @@ -1012,12 +1050,12 @@ static struct platform_driver omap_rfbihw_driver = { }, }; -int rfbi_init_platform_driver(void) +int __init rfbi_init_platform_driver(void) { - return platform_driver_register(&omap_rfbihw_driver); + return platform_driver_probe(&omap_rfbihw_driver, omap_rfbihw_probe); } -void rfbi_uninit_platform_driver(void) +void __exit rfbi_uninit_platform_driver(void) { - return platform_driver_unregister(&omap_rfbihw_driver); + platform_driver_unregister(&omap_rfbihw_driver); } diff --git a/drivers/video/omap2/dss/sdi.c b/drivers/video/omap2/dss/sdi.c index 8266ca0d666b..3a43dc2a9b46 100644 --- a/drivers/video/omap2/dss/sdi.c +++ b/drivers/video/omap2/dss/sdi.c @@ -24,6 +24,7 @@ #include <linux/err.h> #include <linux/regulator/consumer.h> #include <linux/export.h> +#include <linux/platform_device.h> #include <video/omapdss.h> #include "dss.h" @@ -71,10 +72,6 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev) if (r) goto err_reg_enable; - r = dss_runtime_get(); - if (r) - goto err_get_dss; - r = dispc_runtime_get(); if (r) goto err_get_dispc; @@ -107,7 +104,7 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev) } - dispc_mgr_set_lcd_timings(dssdev->manager->id, t); + dss_mgr_set_timings(dssdev->manager, t); r = dss_set_clock_div(&dss_cinfo); if (r) @@ -137,8 +134,6 @@ err_set_dss_clock_div: err_calc_clock_div: dispc_runtime_put(); err_get_dispc: - dss_runtime_put(); -err_get_dss: regulator_disable(sdi.vdds_sdi_reg); err_reg_enable: omap_dss_stop_device(dssdev); @@ -154,7 +149,6 @@ void omapdss_sdi_display_disable(struct omap_dss_device *dssdev) dss_sdi_disable(); dispc_runtime_put(); - dss_runtime_put(); regulator_disable(sdi.vdds_sdi_reg); @@ -162,7 +156,7 @@ void omapdss_sdi_display_disable(struct omap_dss_device *dssdev) } EXPORT_SYMBOL(omapdss_sdi_display_disable); -int sdi_init_display(struct omap_dss_device *dssdev) +static int __init sdi_init_display(struct omap_dss_device *dssdev) { DSSDBG("SDI init\n"); @@ -182,11 +176,58 @@ int sdi_init_display(struct omap_dss_device *dssdev) return 0; } -int sdi_init(void) +static void __init sdi_probe_pdata(struct platform_device *pdev) +{ + struct omap_dss_board_info *pdata = pdev->dev.platform_data; + int i, r; + + for (i = 0; i < pdata->num_devices; ++i) { + struct omap_dss_device *dssdev = pdata->devices[i]; + + if (dssdev->type != OMAP_DISPLAY_TYPE_SDI) + continue; + + r = sdi_init_display(dssdev); + if (r) { + DSSERR("device %s init failed: %d\n", dssdev->name, r); + continue; + } + + r = omap_dss_register_device(dssdev, &pdev->dev, i); + if (r) + DSSERR("device %s register failed: %d\n", + dssdev->name, r); + } +} + +static int __init omap_sdi_probe(struct platform_device *pdev) { + sdi_probe_pdata(pdev); + + return 0; +} + +static int __exit omap_sdi_remove(struct platform_device *pdev) +{ + omap_dss_unregister_child_devices(&pdev->dev); + return 0; } -void sdi_exit(void) +static struct platform_driver omap_sdi_driver = { + .remove = __exit_p(omap_sdi_remove), + .driver = { + .name = "omapdss_sdi", + .owner = THIS_MODULE, + }, +}; + +int __init sdi_init_platform_driver(void) +{ + return platform_driver_probe(&omap_sdi_driver, omap_sdi_probe); +} + +void __exit sdi_uninit_platform_driver(void) { + platform_driver_unregister(&omap_sdi_driver); } diff --git a/drivers/video/omap2/dss/ti_hdmi.h b/drivers/video/omap2/dss/ti_hdmi.h index 1f58b84d6901..e734cb444bc7 100644 --- a/drivers/video/omap2/dss/ti_hdmi.h +++ b/drivers/video/omap2/dss/ti_hdmi.h @@ -96,7 +96,9 @@ struct ti_hdmi_ip_ops { void (*pll_disable)(struct hdmi_ip_data *ip_data); - void (*video_enable)(struct hdmi_ip_data *ip_data, bool start); + int (*video_enable)(struct hdmi_ip_data *ip_data); + + void (*video_disable)(struct hdmi_ip_data *ip_data); void (*dump_wrapper)(struct hdmi_ip_data *ip_data, struct seq_file *s); @@ -106,9 +108,17 @@ struct ti_hdmi_ip_ops { void (*dump_phy)(struct hdmi_ip_data *ip_data, struct seq_file *s); -#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \ - defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE) - void (*audio_enable)(struct hdmi_ip_data *ip_data, bool start); +#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO) + int (*audio_enable)(struct hdmi_ip_data *ip_data); + + void (*audio_disable)(struct hdmi_ip_data *ip_data); + + int (*audio_start)(struct hdmi_ip_data *ip_data); + + void (*audio_stop)(struct hdmi_ip_data *ip_data); + + int (*audio_config)(struct hdmi_ip_data *ip_data, + struct omap_dss_audio *audio); #endif }; @@ -173,7 +183,8 @@ int ti_hdmi_4xxx_phy_enable(struct hdmi_ip_data *ip_data); void ti_hdmi_4xxx_phy_disable(struct hdmi_ip_data *ip_data); int ti_hdmi_4xxx_read_edid(struct hdmi_ip_data *ip_data, u8 *edid, int len); bool ti_hdmi_4xxx_detect(struct hdmi_ip_data *ip_data); -void ti_hdmi_4xxx_wp_video_start(struct hdmi_ip_data *ip_data, bool start); +int ti_hdmi_4xxx_wp_video_start(struct hdmi_ip_data *ip_data); +void ti_hdmi_4xxx_wp_video_stop(struct hdmi_ip_data *ip_data); int ti_hdmi_4xxx_pll_enable(struct hdmi_ip_data *ip_data); void ti_hdmi_4xxx_pll_disable(struct hdmi_ip_data *ip_data); void ti_hdmi_4xxx_basic_configure(struct hdmi_ip_data *ip_data); @@ -181,8 +192,13 @@ void ti_hdmi_4xxx_wp_dump(struct hdmi_ip_data *ip_data, struct seq_file *s); void ti_hdmi_4xxx_pll_dump(struct hdmi_ip_data *ip_data, struct seq_file *s); void ti_hdmi_4xxx_core_dump(struct hdmi_ip_data *ip_data, struct seq_file *s); void ti_hdmi_4xxx_phy_dump(struct hdmi_ip_data *ip_data, struct seq_file *s); -#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \ - defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE) -void ti_hdmi_4xxx_wp_audio_enable(struct hdmi_ip_data *ip_data, bool enable); +#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO) +int hdmi_compute_acr(u32 sample_freq, u32 *n, u32 *cts); +int ti_hdmi_4xxx_wp_audio_enable(struct hdmi_ip_data *ip_data); +void ti_hdmi_4xxx_wp_audio_disable(struct hdmi_ip_data *ip_data); +int ti_hdmi_4xxx_audio_start(struct hdmi_ip_data *ip_data); +void ti_hdmi_4xxx_audio_stop(struct hdmi_ip_data *ip_data); +int ti_hdmi_4xxx_audio_config(struct hdmi_ip_data *ip_data, + struct omap_dss_audio *audio); #endif #endif diff --git a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c b/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c index bfe6fe65c8be..4dae1b291079 100644 --- a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c +++ b/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c @@ -29,9 +29,14 @@ #include <linux/string.h> #include <linux/seq_file.h> #include <linux/gpio.h> +#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO) +#include <sound/asound.h> +#include <sound/asoundef.h> +#endif #include "ti_hdmi_4xxx_ip.h" #include "dss.h" +#include "dss_features.h" static inline void hdmi_write_reg(void __iomem *base_addr, const u16 idx, u32 val) @@ -298,9 +303,9 @@ int ti_hdmi_4xxx_phy_enable(struct hdmi_ip_data *ip_data) REG_FLD_MOD(phy_base, HDMI_TXPHY_PAD_CFG_CTRL, 0x1, 27, 27); r = request_threaded_irq(gpio_to_irq(ip_data->hpd_gpio), - NULL, hpd_irq_handler, - IRQF_DISABLED | IRQF_TRIGGER_RISING | - IRQF_TRIGGER_FALLING, "hpd", ip_data); + NULL, hpd_irq_handler, + IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | + IRQF_ONESHOT, "hpd", ip_data); if (r) { DSSERR("HPD IRQ request failed\n"); hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_OFF); @@ -699,9 +704,15 @@ static void hdmi_wp_init(struct omap_video_timings *timings, } -void ti_hdmi_4xxx_wp_video_start(struct hdmi_ip_data *ip_data, bool start) +int ti_hdmi_4xxx_wp_video_start(struct hdmi_ip_data *ip_data) +{ + REG_FLD_MOD(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG, true, 31, 31); + return 0; +} + +void ti_hdmi_4xxx_wp_video_stop(struct hdmi_ip_data *ip_data) { - REG_FLD_MOD(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG, start, 31, 31); + REG_FLD_MOD(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG, false, 31, 31); } static void hdmi_wp_video_init_format(struct hdmi_video_format *video_fmt, @@ -886,10 +897,12 @@ void ti_hdmi_4xxx_core_dump(struct hdmi_ip_data *ip_data, struct seq_file *s) #define CORE_REG(i, name) name(i) #define DUMPCORE(r) seq_printf(s, "%-35s %08x\n", #r,\ - hdmi_read_reg(hdmi_pll_base(ip_data), r)) -#define DUMPCOREAV(i, r) seq_printf(s, "%s[%d]%*s %08x\n", #r, i, \ + hdmi_read_reg(hdmi_core_sys_base(ip_data), r)) +#define DUMPCOREAV(r) seq_printf(s, "%-35s %08x\n", #r,\ + hdmi_read_reg(hdmi_av_base(ip_data), r)) +#define DUMPCOREAV2(i, r) seq_printf(s, "%s[%d]%*s %08x\n", #r, i, \ (i < 10) ? 32 - strlen(#r) : 31 - strlen(#r), " ", \ - hdmi_read_reg(hdmi_pll_base(ip_data), CORE_REG(i, r))) + hdmi_read_reg(hdmi_av_base(ip_data), CORE_REG(i, r))) DUMPCORE(HDMI_CORE_SYS_VND_IDL); DUMPCORE(HDMI_CORE_SYS_DEV_IDL); @@ -898,6 +911,13 @@ void ti_hdmi_4xxx_core_dump(struct hdmi_ip_data *ip_data, struct seq_file *s) DUMPCORE(HDMI_CORE_SYS_SRST); DUMPCORE(HDMI_CORE_CTRL1); DUMPCORE(HDMI_CORE_SYS_SYS_STAT); + DUMPCORE(HDMI_CORE_SYS_DE_DLY); + DUMPCORE(HDMI_CORE_SYS_DE_CTRL); + DUMPCORE(HDMI_CORE_SYS_DE_TOP); + DUMPCORE(HDMI_CORE_SYS_DE_CNTL); + DUMPCORE(HDMI_CORE_SYS_DE_CNTH); + DUMPCORE(HDMI_CORE_SYS_DE_LINL); + DUMPCORE(HDMI_CORE_SYS_DE_LINH_1); DUMPCORE(HDMI_CORE_SYS_VID_ACEN); DUMPCORE(HDMI_CORE_SYS_VID_MODE); DUMPCORE(HDMI_CORE_SYS_INTR_STATE); @@ -907,102 +927,91 @@ void ti_hdmi_4xxx_core_dump(struct hdmi_ip_data *ip_data, struct seq_file *s) DUMPCORE(HDMI_CORE_SYS_INTR4); DUMPCORE(HDMI_CORE_SYS_UMASK1); DUMPCORE(HDMI_CORE_SYS_TMDS_CTRL); - DUMPCORE(HDMI_CORE_SYS_DE_DLY); - DUMPCORE(HDMI_CORE_SYS_DE_CTRL); - DUMPCORE(HDMI_CORE_SYS_DE_TOP); - DUMPCORE(HDMI_CORE_SYS_DE_CNTL); - DUMPCORE(HDMI_CORE_SYS_DE_CNTH); - DUMPCORE(HDMI_CORE_SYS_DE_LINL); - DUMPCORE(HDMI_CORE_SYS_DE_LINH_1); - DUMPCORE(HDMI_CORE_DDC_CMD); - DUMPCORE(HDMI_CORE_DDC_STATUS); DUMPCORE(HDMI_CORE_DDC_ADDR); + DUMPCORE(HDMI_CORE_DDC_SEGM); DUMPCORE(HDMI_CORE_DDC_OFFSET); DUMPCORE(HDMI_CORE_DDC_COUNT1); DUMPCORE(HDMI_CORE_DDC_COUNT2); + DUMPCORE(HDMI_CORE_DDC_STATUS); + DUMPCORE(HDMI_CORE_DDC_CMD); DUMPCORE(HDMI_CORE_DDC_DATA); - DUMPCORE(HDMI_CORE_DDC_SEGM); - DUMPCORE(HDMI_CORE_AV_HDMI_CTRL); - DUMPCORE(HDMI_CORE_AV_DPD); - DUMPCORE(HDMI_CORE_AV_PB_CTRL1); - DUMPCORE(HDMI_CORE_AV_PB_CTRL2); - DUMPCORE(HDMI_CORE_AV_AVI_TYPE); - DUMPCORE(HDMI_CORE_AV_AVI_VERS); - DUMPCORE(HDMI_CORE_AV_AVI_LEN); - DUMPCORE(HDMI_CORE_AV_AVI_CHSUM); + DUMPCOREAV(HDMI_CORE_AV_ACR_CTRL); + DUMPCOREAV(HDMI_CORE_AV_FREQ_SVAL); + DUMPCOREAV(HDMI_CORE_AV_N_SVAL1); + DUMPCOREAV(HDMI_CORE_AV_N_SVAL2); + DUMPCOREAV(HDMI_CORE_AV_N_SVAL3); + DUMPCOREAV(HDMI_CORE_AV_CTS_SVAL1); + DUMPCOREAV(HDMI_CORE_AV_CTS_SVAL2); + DUMPCOREAV(HDMI_CORE_AV_CTS_SVAL3); + DUMPCOREAV(HDMI_CORE_AV_CTS_HVAL1); + DUMPCOREAV(HDMI_CORE_AV_CTS_HVAL2); + DUMPCOREAV(HDMI_CORE_AV_CTS_HVAL3); + DUMPCOREAV(HDMI_CORE_AV_AUD_MODE); + DUMPCOREAV(HDMI_CORE_AV_SPDIF_CTRL); + DUMPCOREAV(HDMI_CORE_AV_HW_SPDIF_FS); + DUMPCOREAV(HDMI_CORE_AV_SWAP_I2S); + DUMPCOREAV(HDMI_CORE_AV_SPDIF_ERTH); + DUMPCOREAV(HDMI_CORE_AV_I2S_IN_MAP); + DUMPCOREAV(HDMI_CORE_AV_I2S_IN_CTRL); + DUMPCOREAV(HDMI_CORE_AV_I2S_CHST0); + DUMPCOREAV(HDMI_CORE_AV_I2S_CHST1); + DUMPCOREAV(HDMI_CORE_AV_I2S_CHST2); + DUMPCOREAV(HDMI_CORE_AV_I2S_CHST4); + DUMPCOREAV(HDMI_CORE_AV_I2S_CHST5); + DUMPCOREAV(HDMI_CORE_AV_ASRC); + DUMPCOREAV(HDMI_CORE_AV_I2S_IN_LEN); + DUMPCOREAV(HDMI_CORE_AV_HDMI_CTRL); + DUMPCOREAV(HDMI_CORE_AV_AUDO_TXSTAT); + DUMPCOREAV(HDMI_CORE_AV_AUD_PAR_BUSCLK_1); + DUMPCOREAV(HDMI_CORE_AV_AUD_PAR_BUSCLK_2); + DUMPCOREAV(HDMI_CORE_AV_AUD_PAR_BUSCLK_3); + DUMPCOREAV(HDMI_CORE_AV_TEST_TXCTRL); + DUMPCOREAV(HDMI_CORE_AV_DPD); + DUMPCOREAV(HDMI_CORE_AV_PB_CTRL1); + DUMPCOREAV(HDMI_CORE_AV_PB_CTRL2); + DUMPCOREAV(HDMI_CORE_AV_AVI_TYPE); + DUMPCOREAV(HDMI_CORE_AV_AVI_VERS); + DUMPCOREAV(HDMI_CORE_AV_AVI_LEN); + DUMPCOREAV(HDMI_CORE_AV_AVI_CHSUM); for (i = 0; i < HDMI_CORE_AV_AVI_DBYTE_NELEMS; i++) - DUMPCOREAV(i, HDMI_CORE_AV_AVI_DBYTE); + DUMPCOREAV2(i, HDMI_CORE_AV_AVI_DBYTE); + + DUMPCOREAV(HDMI_CORE_AV_SPD_TYPE); + DUMPCOREAV(HDMI_CORE_AV_SPD_VERS); + DUMPCOREAV(HDMI_CORE_AV_SPD_LEN); + DUMPCOREAV(HDMI_CORE_AV_SPD_CHSUM); for (i = 0; i < HDMI_CORE_AV_SPD_DBYTE_NELEMS; i++) - DUMPCOREAV(i, HDMI_CORE_AV_SPD_DBYTE); + DUMPCOREAV2(i, HDMI_CORE_AV_SPD_DBYTE); + + DUMPCOREAV(HDMI_CORE_AV_AUDIO_TYPE); + DUMPCOREAV(HDMI_CORE_AV_AUDIO_VERS); + DUMPCOREAV(HDMI_CORE_AV_AUDIO_LEN); + DUMPCOREAV(HDMI_CORE_AV_AUDIO_CHSUM); for (i = 0; i < HDMI_CORE_AV_AUD_DBYTE_NELEMS; i++) - DUMPCOREAV(i, HDMI_CORE_AV_AUD_DBYTE); + DUMPCOREAV2(i, HDMI_CORE_AV_AUD_DBYTE); + + DUMPCOREAV(HDMI_CORE_AV_MPEG_TYPE); + DUMPCOREAV(HDMI_CORE_AV_MPEG_VERS); + DUMPCOREAV(HDMI_CORE_AV_MPEG_LEN); + DUMPCOREAV(HDMI_CORE_AV_MPEG_CHSUM); for (i = 0; i < HDMI_CORE_AV_MPEG_DBYTE_NELEMS; i++) - DUMPCOREAV(i, HDMI_CORE_AV_MPEG_DBYTE); + DUMPCOREAV2(i, HDMI_CORE_AV_MPEG_DBYTE); for (i = 0; i < HDMI_CORE_AV_GEN_DBYTE_NELEMS; i++) - DUMPCOREAV(i, HDMI_CORE_AV_GEN_DBYTE); + DUMPCOREAV2(i, HDMI_CORE_AV_GEN_DBYTE); + + DUMPCOREAV(HDMI_CORE_AV_CP_BYTE1); for (i = 0; i < HDMI_CORE_AV_GEN2_DBYTE_NELEMS; i++) - DUMPCOREAV(i, HDMI_CORE_AV_GEN2_DBYTE); - - DUMPCORE(HDMI_CORE_AV_ACR_CTRL); - DUMPCORE(HDMI_CORE_AV_FREQ_SVAL); - DUMPCORE(HDMI_CORE_AV_N_SVAL1); - DUMPCORE(HDMI_CORE_AV_N_SVAL2); - DUMPCORE(HDMI_CORE_AV_N_SVAL3); - DUMPCORE(HDMI_CORE_AV_CTS_SVAL1); - DUMPCORE(HDMI_CORE_AV_CTS_SVAL2); - DUMPCORE(HDMI_CORE_AV_CTS_SVAL3); - DUMPCORE(HDMI_CORE_AV_CTS_HVAL1); - DUMPCORE(HDMI_CORE_AV_CTS_HVAL2); - DUMPCORE(HDMI_CORE_AV_CTS_HVAL3); - DUMPCORE(HDMI_CORE_AV_AUD_MODE); - DUMPCORE(HDMI_CORE_AV_SPDIF_CTRL); - DUMPCORE(HDMI_CORE_AV_HW_SPDIF_FS); - DUMPCORE(HDMI_CORE_AV_SWAP_I2S); - DUMPCORE(HDMI_CORE_AV_SPDIF_ERTH); - DUMPCORE(HDMI_CORE_AV_I2S_IN_MAP); - DUMPCORE(HDMI_CORE_AV_I2S_IN_CTRL); - DUMPCORE(HDMI_CORE_AV_I2S_CHST0); - DUMPCORE(HDMI_CORE_AV_I2S_CHST1); - DUMPCORE(HDMI_CORE_AV_I2S_CHST2); - DUMPCORE(HDMI_CORE_AV_I2S_CHST4); - DUMPCORE(HDMI_CORE_AV_I2S_CHST5); - DUMPCORE(HDMI_CORE_AV_ASRC); - DUMPCORE(HDMI_CORE_AV_I2S_IN_LEN); - DUMPCORE(HDMI_CORE_AV_HDMI_CTRL); - DUMPCORE(HDMI_CORE_AV_AUDO_TXSTAT); - DUMPCORE(HDMI_CORE_AV_AUD_PAR_BUSCLK_1); - DUMPCORE(HDMI_CORE_AV_AUD_PAR_BUSCLK_2); - DUMPCORE(HDMI_CORE_AV_AUD_PAR_BUSCLK_3); - DUMPCORE(HDMI_CORE_AV_TEST_TXCTRL); - DUMPCORE(HDMI_CORE_AV_DPD); - DUMPCORE(HDMI_CORE_AV_PB_CTRL1); - DUMPCORE(HDMI_CORE_AV_PB_CTRL2); - DUMPCORE(HDMI_CORE_AV_AVI_TYPE); - DUMPCORE(HDMI_CORE_AV_AVI_VERS); - DUMPCORE(HDMI_CORE_AV_AVI_LEN); - DUMPCORE(HDMI_CORE_AV_AVI_CHSUM); - DUMPCORE(HDMI_CORE_AV_SPD_TYPE); - DUMPCORE(HDMI_CORE_AV_SPD_VERS); - DUMPCORE(HDMI_CORE_AV_SPD_LEN); - DUMPCORE(HDMI_CORE_AV_SPD_CHSUM); - DUMPCORE(HDMI_CORE_AV_AUDIO_TYPE); - DUMPCORE(HDMI_CORE_AV_AUDIO_VERS); - DUMPCORE(HDMI_CORE_AV_AUDIO_LEN); - DUMPCORE(HDMI_CORE_AV_AUDIO_CHSUM); - DUMPCORE(HDMI_CORE_AV_MPEG_TYPE); - DUMPCORE(HDMI_CORE_AV_MPEG_VERS); - DUMPCORE(HDMI_CORE_AV_MPEG_LEN); - DUMPCORE(HDMI_CORE_AV_MPEG_CHSUM); - DUMPCORE(HDMI_CORE_AV_CP_BYTE1); - DUMPCORE(HDMI_CORE_AV_CEC_ADDR_ID); + DUMPCOREAV2(i, HDMI_CORE_AV_GEN2_DBYTE); + + DUMPCOREAV(HDMI_CORE_AV_CEC_ADDR_ID); } void ti_hdmi_4xxx_phy_dump(struct hdmi_ip_data *ip_data, struct seq_file *s) @@ -1016,9 +1025,8 @@ void ti_hdmi_4xxx_phy_dump(struct hdmi_ip_data *ip_data, struct seq_file *s) DUMPPHY(HDMI_TXPHY_PAD_CFG_CTRL); } -#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \ - defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE) -void hdmi_wp_audio_config_format(struct hdmi_ip_data *ip_data, +#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO) +static void ti_hdmi_4xxx_wp_audio_config_format(struct hdmi_ip_data *ip_data, struct hdmi_audio_format *aud_fmt) { u32 r; @@ -1037,7 +1045,7 @@ void hdmi_wp_audio_config_format(struct hdmi_ip_data *ip_data, hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_AUDIO_CFG, r); } -void hdmi_wp_audio_config_dma(struct hdmi_ip_data *ip_data, +static void ti_hdmi_4xxx_wp_audio_config_dma(struct hdmi_ip_data *ip_data, struct hdmi_audio_dma *aud_dma) { u32 r; @@ -1055,7 +1063,7 @@ void hdmi_wp_audio_config_dma(struct hdmi_ip_data *ip_data, hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_AUDIO_CTRL, r); } -void hdmi_core_audio_config(struct hdmi_ip_data *ip_data, +static void ti_hdmi_4xxx_core_audio_config(struct hdmi_ip_data *ip_data, struct hdmi_core_audio_config *cfg) { u32 r; @@ -1106,27 +1114,33 @@ void hdmi_core_audio_config(struct hdmi_ip_data *ip_data, REG_FLD_MOD(av_base, HDMI_CORE_AV_SPDIF_CTRL, cfg->fs_override, 1, 1); - /* I2S parameters */ - REG_FLD_MOD(av_base, HDMI_CORE_AV_I2S_CHST4, - cfg->freq_sample, 3, 0); - + /* + * Set IEC-60958-3 channel status word. It is passed to the IP + * just as it is received. The user of the driver is responsible + * for its contents. + */ + hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST0, + cfg->iec60958_cfg->status[0]); + hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST1, + cfg->iec60958_cfg->status[1]); + hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST2, + cfg->iec60958_cfg->status[2]); + /* yes, this is correct: status[3] goes to CHST4 register */ + hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST4, + cfg->iec60958_cfg->status[3]); + /* yes, this is correct: status[4] goes to CHST5 register */ + hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST5, + cfg->iec60958_cfg->status[4]); + + /* set I2S parameters */ r = hdmi_read_reg(av_base, HDMI_CORE_AV_I2S_IN_CTRL); - r = FLD_MOD(r, cfg->i2s_cfg.en_high_bitrate_aud, 7, 7); r = FLD_MOD(r, cfg->i2s_cfg.sck_edge_mode, 6, 6); - r = FLD_MOD(r, cfg->i2s_cfg.cbit_order, 5, 5); r = FLD_MOD(r, cfg->i2s_cfg.vbit, 4, 4); - r = FLD_MOD(r, cfg->i2s_cfg.ws_polarity, 3, 3); r = FLD_MOD(r, cfg->i2s_cfg.justification, 2, 2); r = FLD_MOD(r, cfg->i2s_cfg.direction, 1, 1); r = FLD_MOD(r, cfg->i2s_cfg.shift, 0, 0); hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_IN_CTRL, r); - r = hdmi_read_reg(av_base, HDMI_CORE_AV_I2S_CHST5); - r = FLD_MOD(r, cfg->freq_sample, 7, 4); - r = FLD_MOD(r, cfg->i2s_cfg.word_length, 3, 1); - r = FLD_MOD(r, cfg->i2s_cfg.word_max_length, 0, 0); - hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST5, r); - REG_FLD_MOD(av_base, HDMI_CORE_AV_I2S_IN_LEN, cfg->i2s_cfg.in_length_bits, 3, 0); @@ -1138,12 +1152,19 @@ void hdmi_core_audio_config(struct hdmi_ip_data *ip_data, r = FLD_MOD(r, cfg->en_parallel_aud_input, 2, 2); r = FLD_MOD(r, cfg->en_spdif, 1, 1); hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_MODE, r); + + /* Audio channel mappings */ + /* TODO: Make channel mapping dynamic. For now, map channels + * in the ALSA order: FL/FR/RL/RR/C/LFE/SL/SR. Remapping is needed as + * HDMI speaker order is different. See CEA-861 Section 6.6.2. + */ + hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_IN_MAP, 0x78); + REG_FLD_MOD(av_base, HDMI_CORE_AV_SWAP_I2S, 1, 5, 5); } -void hdmi_core_audio_infoframe_config(struct hdmi_ip_data *ip_data, - struct hdmi_core_infoframe_audio *info_aud) +static void ti_hdmi_4xxx_core_audio_infoframe_cfg(struct hdmi_ip_data *ip_data, + struct snd_cea_861_aud_if *info_aud) { - u8 val; u8 sum = 0, checksum = 0; void __iomem *av_base = hdmi_av_base(ip_data); @@ -1157,24 +1178,23 @@ void hdmi_core_audio_infoframe_config(struct hdmi_ip_data *ip_data, hdmi_write_reg(av_base, HDMI_CORE_AV_AUDIO_LEN, 0x0a); sum += 0x84 + 0x001 + 0x00a; - val = (info_aud->db1_coding_type << 4) - | (info_aud->db1_channel_count - 1); - hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(0), val); - sum += val; + hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(0), + info_aud->db1_ct_cc); + sum += info_aud->db1_ct_cc; - val = (info_aud->db2_sample_freq << 2) | info_aud->db2_sample_size; - hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(1), val); - sum += val; + hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(1), + info_aud->db2_sf_ss); + sum += info_aud->db2_sf_ss; - hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(2), 0x00); + hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(2), info_aud->db3); + sum += info_aud->db3; - val = info_aud->db4_channel_alloc; - hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(3), val); - sum += val; + hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(3), info_aud->db4_ca); + sum += info_aud->db4_ca; - val = (info_aud->db5_downmix_inh << 7) | (info_aud->db5_lsv << 3); - hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(4), val); - sum += val; + hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(4), + info_aud->db5_dminh_lsv); + sum += info_aud->db5_dminh_lsv; hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(5), 0x00); hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(6), 0x00); @@ -1192,70 +1212,212 @@ void hdmi_core_audio_infoframe_config(struct hdmi_ip_data *ip_data, */ } -int hdmi_config_audio_acr(struct hdmi_ip_data *ip_data, - u32 sample_freq, u32 *n, u32 *cts) +int ti_hdmi_4xxx_audio_config(struct hdmi_ip_data *ip_data, + struct omap_dss_audio *audio) { - u32 r; - u32 deep_color = 0; - u32 pclk = ip_data->cfg.timings.pixel_clock; - - if (n == NULL || cts == NULL) + struct hdmi_audio_format audio_format; + struct hdmi_audio_dma audio_dma; + struct hdmi_core_audio_config core; + int err, n, cts, channel_count; + unsigned int fs_nr; + bool word_length_16b = false; + + if (!audio || !audio->iec || !audio->cea || !ip_data) return -EINVAL; + + core.iec60958_cfg = audio->iec; /* - * Obtain current deep color configuration. This needed - * to calculate the TMDS clock based on the pixel clock. + * In the IEC-60958 status word, check if the audio sample word length + * is 16-bit as several optimizations can be performed in such case. */ - r = REG_GET(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG, 1, 0); - switch (r) { - case 1: /* No deep color selected */ - deep_color = 100; + if (!(audio->iec->status[4] & IEC958_AES4_CON_MAX_WORDLEN_24)) + if (audio->iec->status[4] & IEC958_AES4_CON_WORDLEN_20_16) + word_length_16b = true; + + /* I2S configuration. See Phillips' specification */ + if (word_length_16b) + core.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_LEFT; + else + core.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_RIGHT; + /* + * The I2S input word length is twice the lenght given in the IEC-60958 + * status word. If the word size is greater than + * 20 bits, increment by one. + */ + core.i2s_cfg.in_length_bits = audio->iec->status[4] + & IEC958_AES4_CON_WORDLEN; + if (audio->iec->status[4] & IEC958_AES4_CON_MAX_WORDLEN_24) + core.i2s_cfg.in_length_bits++; + core.i2s_cfg.sck_edge_mode = HDMI_AUDIO_I2S_SCK_EDGE_RISING; + core.i2s_cfg.vbit = HDMI_AUDIO_I2S_VBIT_FOR_PCM; + core.i2s_cfg.direction = HDMI_AUDIO_I2S_MSB_SHIFTED_FIRST; + core.i2s_cfg.shift = HDMI_AUDIO_I2S_FIRST_BIT_SHIFT; + + /* convert sample frequency to a number */ + switch (audio->iec->status[3] & IEC958_AES3_CON_FS) { + case IEC958_AES3_CON_FS_32000: + fs_nr = 32000; + break; + case IEC958_AES3_CON_FS_44100: + fs_nr = 44100; + break; + case IEC958_AES3_CON_FS_48000: + fs_nr = 48000; break; - case 2: /* 10-bit deep color selected */ - deep_color = 125; + case IEC958_AES3_CON_FS_88200: + fs_nr = 88200; break; - case 3: /* 12-bit deep color selected */ - deep_color = 150; + case IEC958_AES3_CON_FS_96000: + fs_nr = 96000; + break; + case IEC958_AES3_CON_FS_176400: + fs_nr = 176400; + break; + case IEC958_AES3_CON_FS_192000: + fs_nr = 192000; break; default: return -EINVAL; } - switch (sample_freq) { - case 32000: - if ((deep_color == 125) && ((pclk == 54054) - || (pclk == 74250))) - *n = 8192; - else - *n = 4096; + err = hdmi_compute_acr(fs_nr, &n, &cts); + + /* Audio clock regeneration settings */ + core.n = n; + core.cts = cts; + if (dss_has_feature(FEAT_HDMI_CTS_SWMODE)) { + core.aud_par_busclk = 0; + core.cts_mode = HDMI_AUDIO_CTS_MODE_SW; + core.use_mclk = dss_has_feature(FEAT_HDMI_AUDIO_USE_MCLK); + } else { + core.aud_par_busclk = (((128 * 31) - 1) << 8); + core.cts_mode = HDMI_AUDIO_CTS_MODE_HW; + core.use_mclk = true; + } + + if (core.use_mclk) + core.mclk_mode = HDMI_AUDIO_MCLK_128FS; + + /* Audio channels settings */ + channel_count = (audio->cea->db1_ct_cc & + CEA861_AUDIO_INFOFRAME_DB1CC) + 1; + + switch (channel_count) { + case 2: + audio_format.active_chnnls_msk = 0x03; + break; + case 3: + audio_format.active_chnnls_msk = 0x07; + break; + case 4: + audio_format.active_chnnls_msk = 0x0f; + break; + case 5: + audio_format.active_chnnls_msk = 0x1f; break; - case 44100: - *n = 6272; + case 6: + audio_format.active_chnnls_msk = 0x3f; break; - case 48000: - if ((deep_color == 125) && ((pclk == 54054) - || (pclk == 74250))) - *n = 8192; - else - *n = 6144; + case 7: + audio_format.active_chnnls_msk = 0x7f; + break; + case 8: + audio_format.active_chnnls_msk = 0xff; break; default: - *n = 0; return -EINVAL; } - /* Calculate CTS. See HDMI 1.3a or 1.4a specifications */ - *cts = pclk * (*n / 128) * deep_color / (sample_freq / 10); + /* + * the HDMI IP needs to enable four stereo channels when transmitting + * more than 2 audio channels + */ + if (channel_count == 2) { + audio_format.stereo_channels = HDMI_AUDIO_STEREO_ONECHANNEL; + core.i2s_cfg.active_sds = HDMI_AUDIO_I2S_SD0_EN; + core.layout = HDMI_AUDIO_LAYOUT_2CH; + } else { + audio_format.stereo_channels = HDMI_AUDIO_STEREO_FOURCHANNELS; + core.i2s_cfg.active_sds = HDMI_AUDIO_I2S_SD0_EN | + HDMI_AUDIO_I2S_SD1_EN | HDMI_AUDIO_I2S_SD2_EN | + HDMI_AUDIO_I2S_SD3_EN; + core.layout = HDMI_AUDIO_LAYOUT_8CH; + } + + core.en_spdif = false; + /* use sample frequency from channel status word */ + core.fs_override = true; + /* enable ACR packets */ + core.en_acr_pkt = true; + /* disable direct streaming digital audio */ + core.en_dsd_audio = false; + /* use parallel audio interface */ + core.en_parallel_aud_input = true; + + /* DMA settings */ + if (word_length_16b) + audio_dma.transfer_size = 0x10; + else + audio_dma.transfer_size = 0x20; + audio_dma.block_size = 0xC0; + audio_dma.mode = HDMI_AUDIO_TRANSF_DMA; + audio_dma.fifo_threshold = 0x20; /* in number of samples */ + + /* audio FIFO format settings */ + if (word_length_16b) { + audio_format.samples_per_word = HDMI_AUDIO_ONEWORD_TWOSAMPLES; + audio_format.sample_size = HDMI_AUDIO_SAMPLE_16BITS; + audio_format.justification = HDMI_AUDIO_JUSTIFY_LEFT; + } else { + audio_format.samples_per_word = HDMI_AUDIO_ONEWORD_ONESAMPLE; + audio_format.sample_size = HDMI_AUDIO_SAMPLE_24BITS; + audio_format.justification = HDMI_AUDIO_JUSTIFY_RIGHT; + } + audio_format.type = HDMI_AUDIO_TYPE_LPCM; + audio_format.sample_order = HDMI_AUDIO_SAMPLE_LEFT_FIRST; + /* disable start/stop signals of IEC 60958 blocks */ + audio_format.en_sig_blk_strt_end = HDMI_AUDIO_BLOCK_SIG_STARTEND_ON; + + /* configure DMA and audio FIFO format*/ + ti_hdmi_4xxx_wp_audio_config_dma(ip_data, &audio_dma); + ti_hdmi_4xxx_wp_audio_config_format(ip_data, &audio_format); + + /* configure the core*/ + ti_hdmi_4xxx_core_audio_config(ip_data, &core); + + /* configure CEA 861 audio infoframe*/ + ti_hdmi_4xxx_core_audio_infoframe_cfg(ip_data, audio->cea); return 0; } -void ti_hdmi_4xxx_wp_audio_enable(struct hdmi_ip_data *ip_data, bool enable) +int ti_hdmi_4xxx_wp_audio_enable(struct hdmi_ip_data *ip_data) +{ + REG_FLD_MOD(hdmi_wp_base(ip_data), + HDMI_WP_AUDIO_CTRL, true, 31, 31); + return 0; +} + +void ti_hdmi_4xxx_wp_audio_disable(struct hdmi_ip_data *ip_data) +{ + REG_FLD_MOD(hdmi_wp_base(ip_data), + HDMI_WP_AUDIO_CTRL, false, 31, 31); +} + +int ti_hdmi_4xxx_audio_start(struct hdmi_ip_data *ip_data) { REG_FLD_MOD(hdmi_av_base(ip_data), - HDMI_CORE_AV_AUD_MODE, enable, 0, 0); + HDMI_CORE_AV_AUD_MODE, true, 0, 0); REG_FLD_MOD(hdmi_wp_base(ip_data), - HDMI_WP_AUDIO_CTRL, enable, 31, 31); + HDMI_WP_AUDIO_CTRL, true, 30, 30); + return 0; +} + +void ti_hdmi_4xxx_audio_stop(struct hdmi_ip_data *ip_data) +{ + REG_FLD_MOD(hdmi_av_base(ip_data), + HDMI_CORE_AV_AUD_MODE, false, 0, 0); REG_FLD_MOD(hdmi_wp_base(ip_data), - HDMI_WP_AUDIO_CTRL, enable, 30, 30); + HDMI_WP_AUDIO_CTRL, false, 30, 30); } #endif diff --git a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.h b/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.h index a14d1a0e6e41..8366ae19e82e 100644 --- a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.h +++ b/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.h @@ -24,11 +24,6 @@ #include <linux/string.h> #include <video/omapdss.h> #include "ti_hdmi.h" -#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \ - defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE) -#include <sound/soc.h> -#include <sound/pcm_params.h> -#endif /* HDMI Wrapper */ @@ -57,6 +52,13 @@ #define HDMI_CORE_SYS_SRST 0x14 #define HDMI_CORE_CTRL1 0x20 #define HDMI_CORE_SYS_SYS_STAT 0x24 +#define HDMI_CORE_SYS_DE_DLY 0xC8 +#define HDMI_CORE_SYS_DE_CTRL 0xCC +#define HDMI_CORE_SYS_DE_TOP 0xD0 +#define HDMI_CORE_SYS_DE_CNTL 0xD8 +#define HDMI_CORE_SYS_DE_CNTH 0xDC +#define HDMI_CORE_SYS_DE_LINL 0xE0 +#define HDMI_CORE_SYS_DE_LINH_1 0xE4 #define HDMI_CORE_SYS_VID_ACEN 0x124 #define HDMI_CORE_SYS_VID_MODE 0x128 #define HDMI_CORE_SYS_INTR_STATE 0x1C0 @@ -66,50 +68,24 @@ #define HDMI_CORE_SYS_INTR4 0x1D0 #define HDMI_CORE_SYS_UMASK1 0x1D4 #define HDMI_CORE_SYS_TMDS_CTRL 0x208 -#define HDMI_CORE_SYS_DE_DLY 0xC8 -#define HDMI_CORE_SYS_DE_CTRL 0xCC -#define HDMI_CORE_SYS_DE_TOP 0xD0 -#define HDMI_CORE_SYS_DE_CNTL 0xD8 -#define HDMI_CORE_SYS_DE_CNTH 0xDC -#define HDMI_CORE_SYS_DE_LINL 0xE0 -#define HDMI_CORE_SYS_DE_LINH_1 0xE4 + #define HDMI_CORE_CTRL1_VEN_FOLLOWVSYNC 0x1 #define HDMI_CORE_CTRL1_HEN_FOLLOWHSYNC 0x1 -#define HDMI_CORE_CTRL1_BSEL_24BITBUS 0x1 +#define HDMI_CORE_CTRL1_BSEL_24BITBUS 0x1 #define HDMI_CORE_CTRL1_EDGE_RISINGEDGE 0x1 /* HDMI DDC E-DID */ -#define HDMI_CORE_DDC_CMD 0x3CC -#define HDMI_CORE_DDC_STATUS 0x3C8 #define HDMI_CORE_DDC_ADDR 0x3B4 +#define HDMI_CORE_DDC_SEGM 0x3B8 #define HDMI_CORE_DDC_OFFSET 0x3BC #define HDMI_CORE_DDC_COUNT1 0x3C0 #define HDMI_CORE_DDC_COUNT2 0x3C4 +#define HDMI_CORE_DDC_STATUS 0x3C8 +#define HDMI_CORE_DDC_CMD 0x3CC #define HDMI_CORE_DDC_DATA 0x3D0 -#define HDMI_CORE_DDC_SEGM 0x3B8 /* HDMI IP Core Audio Video */ -#define HDMI_CORE_AV_HDMI_CTRL 0xBC -#define HDMI_CORE_AV_DPD 0xF4 -#define HDMI_CORE_AV_PB_CTRL1 0xF8 -#define HDMI_CORE_AV_PB_CTRL2 0xFC -#define HDMI_CORE_AV_AVI_TYPE 0x100 -#define HDMI_CORE_AV_AVI_VERS 0x104 -#define HDMI_CORE_AV_AVI_LEN 0x108 -#define HDMI_CORE_AV_AVI_CHSUM 0x10C -#define HDMI_CORE_AV_AVI_DBYTE(n) (n * 4 + 0x110) -#define HDMI_CORE_AV_AVI_DBYTE_NELEMS 15 -#define HDMI_CORE_AV_SPD_DBYTE(n) (n * 4 + 0x190) -#define HDMI_CORE_AV_SPD_DBYTE_NELEMS 27 -#define HDMI_CORE_AV_AUD_DBYTE(n) (n * 4 + 0x210) -#define HDMI_CORE_AV_AUD_DBYTE_NELEMS 10 -#define HDMI_CORE_AV_MPEG_DBYTE(n) (n * 4 + 0x290) -#define HDMI_CORE_AV_MPEG_DBYTE_NELEMS 27 -#define HDMI_CORE_AV_GEN_DBYTE(n) (n * 4 + 0x300) -#define HDMI_CORE_AV_GEN_DBYTE_NELEMS 31 -#define HDMI_CORE_AV_GEN2_DBYTE(n) (n * 4 + 0x380) -#define HDMI_CORE_AV_GEN2_DBYTE_NELEMS 31 #define HDMI_CORE_AV_ACR_CTRL 0x4 #define HDMI_CORE_AV_FREQ_SVAL 0x8 #define HDMI_CORE_AV_N_SVAL1 0xC @@ -148,25 +124,39 @@ #define HDMI_CORE_AV_AVI_VERS 0x104 #define HDMI_CORE_AV_AVI_LEN 0x108 #define HDMI_CORE_AV_AVI_CHSUM 0x10C +#define HDMI_CORE_AV_AVI_DBYTE(n) (n * 4 + 0x110) #define HDMI_CORE_AV_SPD_TYPE 0x180 #define HDMI_CORE_AV_SPD_VERS 0x184 #define HDMI_CORE_AV_SPD_LEN 0x188 #define HDMI_CORE_AV_SPD_CHSUM 0x18C +#define HDMI_CORE_AV_SPD_DBYTE(n) (n * 4 + 0x190) #define HDMI_CORE_AV_AUDIO_TYPE 0x200 #define HDMI_CORE_AV_AUDIO_VERS 0x204 #define HDMI_CORE_AV_AUDIO_LEN 0x208 #define HDMI_CORE_AV_AUDIO_CHSUM 0x20C +#define HDMI_CORE_AV_AUD_DBYTE(n) (n * 4 + 0x210) #define HDMI_CORE_AV_MPEG_TYPE 0x280 #define HDMI_CORE_AV_MPEG_VERS 0x284 #define HDMI_CORE_AV_MPEG_LEN 0x288 #define HDMI_CORE_AV_MPEG_CHSUM 0x28C +#define HDMI_CORE_AV_MPEG_DBYTE(n) (n * 4 + 0x290) +#define HDMI_CORE_AV_GEN_DBYTE(n) (n * 4 + 0x300) #define HDMI_CORE_AV_CP_BYTE1 0x37C +#define HDMI_CORE_AV_GEN2_DBYTE(n) (n * 4 + 0x380) #define HDMI_CORE_AV_CEC_ADDR_ID 0x3FC + #define HDMI_CORE_AV_SPD_DBYTE_ELSIZE 0x4 #define HDMI_CORE_AV_GEN2_DBYTE_ELSIZE 0x4 #define HDMI_CORE_AV_MPEG_DBYTE_ELSIZE 0x4 #define HDMI_CORE_AV_GEN_DBYTE_ELSIZE 0x4 +#define HDMI_CORE_AV_AVI_DBYTE_NELEMS 15 +#define HDMI_CORE_AV_SPD_DBYTE_NELEMS 27 +#define HDMI_CORE_AV_AUD_DBYTE_NELEMS 10 +#define HDMI_CORE_AV_MPEG_DBYTE_NELEMS 27 +#define HDMI_CORE_AV_GEN_DBYTE_NELEMS 31 +#define HDMI_CORE_AV_GEN2_DBYTE_NELEMS 31 + /* PLL */ #define PLLCTRL_PLL_CONTROL 0x0 @@ -284,35 +274,6 @@ enum hdmi_core_infoframe { HDMI_INFOFRAME_AVI_DB5PR_8 = 7, HDMI_INFOFRAME_AVI_DB5PR_9 = 8, HDMI_INFOFRAME_AVI_DB5PR_10 = 9, - HDMI_INFOFRAME_AUDIO_DB1CT_FROM_STREAM = 0, - HDMI_INFOFRAME_AUDIO_DB1CT_IEC60958 = 1, - HDMI_INFOFRAME_AUDIO_DB1CT_AC3 = 2, - HDMI_INFOFRAME_AUDIO_DB1CT_MPEG1 = 3, - HDMI_INFOFRAME_AUDIO_DB1CT_MP3 = 4, - HDMI_INFOFRAME_AUDIO_DB1CT_MPEG2_MULTICH = 5, - HDMI_INFOFRAME_AUDIO_DB1CT_AAC = 6, - HDMI_INFOFRAME_AUDIO_DB1CT_DTS = 7, - HDMI_INFOFRAME_AUDIO_DB1CT_ATRAC = 8, - HDMI_INFOFRAME_AUDIO_DB1CT_ONEBIT = 9, - HDMI_INFOFRAME_AUDIO_DB1CT_DOLBY_DIGITAL_PLUS = 10, - HDMI_INFOFRAME_AUDIO_DB1CT_DTS_HD = 11, - HDMI_INFOFRAME_AUDIO_DB1CT_MAT = 12, - HDMI_INFOFRAME_AUDIO_DB1CT_DST = 13, - HDMI_INFOFRAME_AUDIO_DB1CT_WMA_PRO = 14, - HDMI_INFOFRAME_AUDIO_DB2SF_FROM_STREAM = 0, - HDMI_INFOFRAME_AUDIO_DB2SF_32000 = 1, - HDMI_INFOFRAME_AUDIO_DB2SF_44100 = 2, - HDMI_INFOFRAME_AUDIO_DB2SF_48000 = 3, - HDMI_INFOFRAME_AUDIO_DB2SF_88200 = 4, - HDMI_INFOFRAME_AUDIO_DB2SF_96000 = 5, - HDMI_INFOFRAME_AUDIO_DB2SF_176400 = 6, - HDMI_INFOFRAME_AUDIO_DB2SF_192000 = 7, - HDMI_INFOFRAME_AUDIO_DB2SS_FROM_STREAM = 0, - HDMI_INFOFRAME_AUDIO_DB2SS_16BIT = 1, - HDMI_INFOFRAME_AUDIO_DB2SS_20BIT = 2, - HDMI_INFOFRAME_AUDIO_DB2SS_24BIT = 3, - HDMI_INFOFRAME_AUDIO_DB5_DM_INH_PERMITTED = 0, - HDMI_INFOFRAME_AUDIO_DB5_DM_INH_PROHIBITED = 1 }; enum hdmi_packing_mode { @@ -322,17 +283,6 @@ enum hdmi_packing_mode { HDMI_PACK_ALREADYPACKED = 7 }; -enum hdmi_core_audio_sample_freq { - HDMI_AUDIO_FS_32000 = 0x3, - HDMI_AUDIO_FS_44100 = 0x0, - HDMI_AUDIO_FS_48000 = 0x2, - HDMI_AUDIO_FS_88200 = 0x8, - HDMI_AUDIO_FS_96000 = 0xA, - HDMI_AUDIO_FS_176400 = 0xC, - HDMI_AUDIO_FS_192000 = 0xE, - HDMI_AUDIO_FS_NOT_INDICATED = 0x1 -}; - enum hdmi_core_audio_layout { HDMI_AUDIO_LAYOUT_2CH = 0, HDMI_AUDIO_LAYOUT_8CH = 1 @@ -387,37 +337,12 @@ enum hdmi_audio_blk_strt_end_sig { }; enum hdmi_audio_i2s_config { - HDMI_AUDIO_I2S_WS_POLARITY_LOW_IS_LEFT = 0, - HDMI_AUDIO_I2S_WS_POLARIT_YLOW_IS_RIGHT = 1, HDMI_AUDIO_I2S_MSB_SHIFTED_FIRST = 0, HDMI_AUDIO_I2S_LSB_SHIFTED_FIRST = 1, - HDMI_AUDIO_I2S_MAX_WORD_20BITS = 0, - HDMI_AUDIO_I2S_MAX_WORD_24BITS = 1, - HDMI_AUDIO_I2S_CHST_WORD_NOT_SPECIFIED = 0, - HDMI_AUDIO_I2S_CHST_WORD_16_BITS = 1, - HDMI_AUDIO_I2S_CHST_WORD_17_BITS = 6, - HDMI_AUDIO_I2S_CHST_WORD_18_BITS = 2, - HDMI_AUDIO_I2S_CHST_WORD_19_BITS = 4, - HDMI_AUDIO_I2S_CHST_WORD_20_BITS_20MAX = 5, - HDMI_AUDIO_I2S_CHST_WORD_20_BITS_24MAX = 1, - HDMI_AUDIO_I2S_CHST_WORD_21_BITS = 6, - HDMI_AUDIO_I2S_CHST_WORD_22_BITS = 2, - HDMI_AUDIO_I2S_CHST_WORD_23_BITS = 4, - HDMI_AUDIO_I2S_CHST_WORD_24_BITS = 5, HDMI_AUDIO_I2S_SCK_EDGE_FALLING = 0, HDMI_AUDIO_I2S_SCK_EDGE_RISING = 1, HDMI_AUDIO_I2S_VBIT_FOR_PCM = 0, HDMI_AUDIO_I2S_VBIT_FOR_COMPRESSED = 1, - HDMI_AUDIO_I2S_INPUT_LENGTH_NA = 0, - HDMI_AUDIO_I2S_INPUT_LENGTH_16 = 2, - HDMI_AUDIO_I2S_INPUT_LENGTH_17 = 12, - HDMI_AUDIO_I2S_INPUT_LENGTH_18 = 4, - HDMI_AUDIO_I2S_INPUT_LENGTH_19 = 8, - HDMI_AUDIO_I2S_INPUT_LENGTH_20 = 10, - HDMI_AUDIO_I2S_INPUT_LENGTH_21 = 13, - HDMI_AUDIO_I2S_INPUT_LENGTH_22 = 5, - HDMI_AUDIO_I2S_INPUT_LENGTH_23 = 9, - HDMI_AUDIO_I2S_INPUT_LENGTH_24 = 11, HDMI_AUDIO_I2S_FIRST_BIT_SHIFT = 0, HDMI_AUDIO_I2S_FIRST_BIT_NO_SHIFT = 1, HDMI_AUDIO_I2S_SD0_EN = 1, @@ -446,20 +371,6 @@ struct hdmi_core_video_config { enum hdmi_core_tclkselclkmult tclk_sel_clkmult; }; -/* - * Refer to section 8.2 in HDMI 1.3 specification for - * details about infoframe databytes - */ -struct hdmi_core_infoframe_audio { - u8 db1_coding_type; - u8 db1_channel_count; - u8 db2_sample_freq; - u8 db2_sample_size; - u8 db4_channel_alloc; - bool db5_downmix_inh; - u8 db5_lsv; /* Level shift values for downmix */ -}; - struct hdmi_core_packet_enable_repeat { u32 audio_pkt; u32 audio_pkt_repeat; @@ -496,15 +407,10 @@ struct hdmi_audio_dma { }; struct hdmi_core_audio_i2s_config { - u8 word_max_length; - u8 word_length; u8 in_length_bits; u8 justification; - u8 en_high_bitrate_aud; u8 sck_edge_mode; - u8 cbit_order; u8 vbit; - u8 ws_polarity; u8 direction; u8 shift; u8 active_sds; @@ -512,7 +418,7 @@ struct hdmi_core_audio_i2s_config { struct hdmi_core_audio_config { struct hdmi_core_audio_i2s_config i2s_cfg; - enum hdmi_core_audio_sample_freq freq_sample; + struct snd_aes_iec958 *iec60958_cfg; bool fs_override; u32 n; u32 cts; @@ -527,17 +433,4 @@ struct hdmi_core_audio_config { bool en_spdif; }; -#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \ - defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE) -int hdmi_config_audio_acr(struct hdmi_ip_data *ip_data, - u32 sample_freq, u32 *n, u32 *cts); -void hdmi_core_audio_infoframe_config(struct hdmi_ip_data *ip_data, - struct hdmi_core_infoframe_audio *info_aud); -void hdmi_core_audio_config(struct hdmi_ip_data *ip_data, - struct hdmi_core_audio_config *cfg); -void hdmi_wp_audio_config_dma(struct hdmi_ip_data *ip_data, - struct hdmi_audio_dma *aud_dma); -void hdmi_wp_audio_config_format(struct hdmi_ip_data *ip_data, - struct hdmi_audio_format *aud_fmt); -#endif #endif diff --git a/drivers/video/omap2/dss/venc.c b/drivers/video/omap2/dss/venc.c index 9c3daf71750c..2b8973931ff4 100644 --- a/drivers/video/omap2/dss/venc.c +++ b/drivers/video/omap2/dss/venc.c @@ -415,6 +415,7 @@ static const struct venc_config *venc_timings_to_config( return &venc_config_ntsc_trm; BUG(); + return NULL; } static int venc_power_on(struct omap_dss_device *dssdev) @@ -440,10 +441,11 @@ static int venc_power_on(struct omap_dss_device *dssdev) venc_write_reg(VENC_OUTPUT_CONTROL, l); - dispc_set_digit_size(dssdev->panel.timings.x_res, - dssdev->panel.timings.y_res/2); + dss_mgr_set_timings(dssdev->manager, &dssdev->panel.timings); - regulator_enable(venc.vdda_dac_reg); + r = regulator_enable(venc.vdda_dac_reg); + if (r) + goto err; if (dssdev->platform_enable) dssdev->platform_enable(dssdev); @@ -485,16 +487,68 @@ unsigned long venc_get_pixel_clock(void) return 13500000; } +static ssize_t display_output_type_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct omap_dss_device *dssdev = to_dss_device(dev); + const char *ret; + + switch (dssdev->phy.venc.type) { + case OMAP_DSS_VENC_TYPE_COMPOSITE: + ret = "composite"; + break; + case OMAP_DSS_VENC_TYPE_SVIDEO: + ret = "svideo"; + break; + default: + return -EINVAL; + } + + return snprintf(buf, PAGE_SIZE, "%s\n", ret); +} + +static ssize_t display_output_type_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t size) +{ + struct omap_dss_device *dssdev = to_dss_device(dev); + enum omap_dss_venc_type new_type; + + if (sysfs_streq("composite", buf)) + new_type = OMAP_DSS_VENC_TYPE_COMPOSITE; + else if (sysfs_streq("svideo", buf)) + new_type = OMAP_DSS_VENC_TYPE_SVIDEO; + else + return -EINVAL; + + mutex_lock(&venc.venc_lock); + + if (dssdev->phy.venc.type != new_type) { + dssdev->phy.venc.type = new_type; + if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) { + venc_power_off(dssdev); + venc_power_on(dssdev); + } + } + + mutex_unlock(&venc.venc_lock); + + return size; +} + +static DEVICE_ATTR(output_type, S_IRUGO | S_IWUSR, + display_output_type_show, display_output_type_store); + /* driver */ static int venc_panel_probe(struct omap_dss_device *dssdev) { dssdev->panel.timings = omap_dss_pal_timings; - return 0; + return device_create_file(&dssdev->dev, &dev_attr_output_type); } static void venc_panel_remove(struct omap_dss_device *dssdev) { + device_remove_file(&dssdev->dev, &dev_attr_output_type); } static int venc_panel_enable(struct omap_dss_device *dssdev) @@ -577,12 +631,6 @@ static int venc_panel_resume(struct omap_dss_device *dssdev) return venc_panel_enable(dssdev); } -static void venc_get_timings(struct omap_dss_device *dssdev, - struct omap_video_timings *timings) -{ - *timings = dssdev->panel.timings; -} - static void venc_set_timings(struct omap_dss_device *dssdev, struct omap_video_timings *timings) { @@ -597,6 +645,8 @@ static void venc_set_timings(struct omap_dss_device *dssdev, /* turn the venc off and on to get new timings to use */ venc_panel_disable(dssdev); venc_panel_enable(dssdev); + } else { + dss_mgr_set_timings(dssdev->manager, timings); } } @@ -661,7 +711,6 @@ static struct omap_dss_driver venc_driver = { .get_resolution = omapdss_default_get_resolution, .get_recommended_bpp = omapdss_default_get_recommended_bpp, - .get_timings = venc_get_timings, .set_timings = venc_set_timings, .check_timings = venc_check_timings, @@ -675,7 +724,7 @@ static struct omap_dss_driver venc_driver = { }; /* driver end */ -int venc_init_display(struct omap_dss_device *dssdev) +static int __init venc_init_display(struct omap_dss_device *dssdev) { DSSDBG("init_display\n"); @@ -695,7 +744,7 @@ int venc_init_display(struct omap_dss_device *dssdev) return 0; } -void venc_dump_regs(struct seq_file *s) +static void venc_dump_regs(struct seq_file *s) { #define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, venc_read_reg(r)) @@ -779,8 +828,32 @@ static void venc_put_clocks(void) clk_put(venc.tv_dac_clk); } +static void __init venc_probe_pdata(struct platform_device *pdev) +{ + struct omap_dss_board_info *pdata = pdev->dev.platform_data; + int r, i; + + for (i = 0; i < pdata->num_devices; ++i) { + struct omap_dss_device *dssdev = pdata->devices[i]; + + if (dssdev->type != OMAP_DISPLAY_TYPE_VENC) + continue; + + r = venc_init_display(dssdev); + if (r) { + DSSERR("device %s init failed: %d\n", dssdev->name, r); + continue; + } + + r = omap_dss_register_device(dssdev, &pdev->dev, i); + if (r) + DSSERR("device %s register failed: %d\n", + dssdev->name, r); + } +} + /* VENC HW IP initialisation */ -static int omap_venchw_probe(struct platform_device *pdev) +static int __init omap_venchw_probe(struct platform_device *pdev) { u8 rev_id; struct resource *venc_mem; @@ -824,6 +897,10 @@ static int omap_venchw_probe(struct platform_device *pdev) if (r) goto err_reg_panel_driver; + dss_debugfs_create_file("venc", venc_dump_regs); + + venc_probe_pdata(pdev); + return 0; err_reg_panel_driver: @@ -833,12 +910,15 @@ err_runtime_get: return r; } -static int omap_venchw_remove(struct platform_device *pdev) +static int __exit omap_venchw_remove(struct platform_device *pdev) { + omap_dss_unregister_child_devices(&pdev->dev); + if (venc.vdda_dac_reg != NULL) { regulator_put(venc.vdda_dac_reg); venc.vdda_dac_reg = NULL; } + omap_dss_unregister_driver(&venc_driver); pm_runtime_disable(&pdev->dev); @@ -853,7 +933,6 @@ static int venc_runtime_suspend(struct device *dev) clk_disable(venc.tv_dac_clk); dispc_runtime_put(); - dss_runtime_put(); return 0; } @@ -862,23 +941,14 @@ static int venc_runtime_resume(struct device *dev) { int r; - r = dss_runtime_get(); - if (r < 0) - goto err_get_dss; - r = dispc_runtime_get(); if (r < 0) - goto err_get_dispc; + return r; if (venc.tv_dac_clk) clk_enable(venc.tv_dac_clk); return 0; - -err_get_dispc: - dss_runtime_put(); -err_get_dss: - return r; } static const struct dev_pm_ops venc_pm_ops = { @@ -887,8 +957,7 @@ static const struct dev_pm_ops venc_pm_ops = { }; static struct platform_driver omap_venchw_driver = { - .probe = omap_venchw_probe, - .remove = omap_venchw_remove, + .remove = __exit_p(omap_venchw_remove), .driver = { .name = "omapdss_venc", .owner = THIS_MODULE, @@ -896,18 +965,18 @@ static struct platform_driver omap_venchw_driver = { }, }; -int venc_init_platform_driver(void) +int __init venc_init_platform_driver(void) { if (cpu_is_omap44xx()) return 0; - return platform_driver_register(&omap_venchw_driver); + return platform_driver_probe(&omap_venchw_driver, omap_venchw_probe); } -void venc_uninit_platform_driver(void) +void __exit venc_uninit_platform_driver(void) { if (cpu_is_omap44xx()) return; - return platform_driver_unregister(&omap_venchw_driver); + platform_driver_unregister(&omap_venchw_driver); } diff --git a/drivers/video/omap2/omapfb/omapfb-ioctl.c b/drivers/video/omap2/omapfb/omapfb-ioctl.c index 6a09ef87e14f..c6cf372d22c5 100644 --- a/drivers/video/omap2/omapfb/omapfb-ioctl.c +++ b/drivers/video/omap2/omapfb/omapfb-ioctl.c @@ -70,7 +70,7 @@ static int omapfb_setup_plane(struct fb_info *fbi, struct omapfb_plane_info *pi) DBG("omapfb_setup_plane\n"); - if (ofbi->num_overlays != 1) { + if (ofbi->num_overlays == 0) { r = -EINVAL; goto out; } @@ -185,7 +185,7 @@ static int omapfb_query_plane(struct fb_info *fbi, struct omapfb_plane_info *pi) { struct omapfb_info *ofbi = FB2OFB(fbi); - if (ofbi->num_overlays != 1) { + if (ofbi->num_overlays == 0) { memset(pi, 0, sizeof(*pi)); } else { struct omap_overlay *ovl; @@ -225,6 +225,9 @@ static int omapfb_setup_mem(struct fb_info *fbi, struct omapfb_mem_info *mi) down_write_nested(&rg->lock, rg->id); atomic_inc(&rg->lock_count); + if (rg->size == size && rg->type == mi->type) + goto out; + if (atomic_read(&rg->map_count)) { r = -EBUSY; goto out; @@ -247,12 +250,10 @@ static int omapfb_setup_mem(struct fb_info *fbi, struct omapfb_mem_info *mi) } } - if (rg->size != size || rg->type != mi->type) { - r = omapfb_realloc_fbmem(fbi, size, mi->type); - if (r) { - dev_err(fbdev->dev, "realloc fbmem failed\n"); - goto out; - } + r = omapfb_realloc_fbmem(fbi, size, mi->type); + if (r) { + dev_err(fbdev->dev, "realloc fbmem failed\n"); + goto out; } out: diff --git a/drivers/video/omap2/omapfb/omapfb-main.c b/drivers/video/omap2/omapfb/omapfb-main.c index b00db4068d21..3450ea0966c9 100644 --- a/drivers/video/omap2/omapfb/omapfb-main.c +++ b/drivers/video/omap2/omapfb/omapfb-main.c @@ -179,6 +179,7 @@ static unsigned omapfb_get_vrfb_offset(const struct omapfb_info *ofbi, int rot) break; default: BUG(); + return 0; } offset *= vrfb->bytespp; @@ -1502,7 +1503,7 @@ static int omapfb_parse_vram_param(const char *param, int max_entries, fbnum = simple_strtoul(p, &p, 10); - if (p == param) + if (p == start) return -EINVAL; if (*p != ':') @@ -2307,7 +2308,7 @@ static int omapfb_init_display(struct omapfb2_device *fbdev, return 0; } -static int omapfb_probe(struct platform_device *pdev) +static int __init omapfb_probe(struct platform_device *pdev) { struct omapfb2_device *fbdev = NULL; int r = 0; @@ -2448,7 +2449,7 @@ err0: return r; } -static int omapfb_remove(struct platform_device *pdev) +static int __exit omapfb_remove(struct platform_device *pdev) { struct omapfb2_device *fbdev = platform_get_drvdata(pdev); @@ -2462,8 +2463,7 @@ static int omapfb_remove(struct platform_device *pdev) } static struct platform_driver omapfb_driver = { - .probe = omapfb_probe, - .remove = omapfb_remove, + .remove = __exit_p(omapfb_remove), .driver = { .name = "omapfb", .owner = THIS_MODULE, @@ -2474,7 +2474,7 @@ static int __init omapfb_init(void) { DBG("omapfb_init\n"); - if (platform_driver_register(&omapfb_driver)) { + if (platform_driver_probe(&omapfb_driver, omapfb_probe)) { printk(KERN_ERR "failed to register omapfb driver\n"); return -ENODEV; } diff --git a/drivers/video/omap2/omapfb/omapfb.h b/drivers/video/omap2/omapfb/omapfb.h index c0bdc9b54ecf..30361a09aecd 100644 --- a/drivers/video/omap2/omapfb/omapfb.h +++ b/drivers/video/omap2/omapfb/omapfb.h @@ -166,6 +166,7 @@ static inline struct omapfb_display_data *get_display_data( /* This should never happen */ BUG(); + return NULL; } static inline void omapfb_lock(struct omapfb2_device *fbdev) diff --git a/drivers/video/omap2/vrfb.c b/drivers/video/omap2/vrfb.c index 4e5b960c32c8..7e990220ad2a 100644 --- a/drivers/video/omap2/vrfb.c +++ b/drivers/video/omap2/vrfb.c @@ -179,8 +179,10 @@ void omap_vrfb_setup(struct vrfb *vrfb, unsigned long paddr, pixel_size_exp = 2; else if (bytespp == 2) pixel_size_exp = 1; - else + else { BUG(); + return; + } vrfb_width = ALIGN(width * bytespp, VRFB_PAGE_WIDTH) / bytespp; vrfb_height = ALIGN(height, VRFB_PAGE_HEIGHT); diff --git a/drivers/video/pxa3xx-gcu.c b/drivers/video/pxa3xx-gcu.c index 1d71c08a818f..0b4ae0cebeda 100644 --- a/drivers/video/pxa3xx-gcu.c +++ b/drivers/video/pxa3xx-gcu.c @@ -316,12 +316,9 @@ pxa3xx_gcu_wait_idle(struct pxa3xx_gcu_priv *priv) ret = wait_event_interruptible_timeout(priv->wait_idle, !priv->shared->hw_running, HZ*4); - if (ret < 0) + if (ret != 0) break; - if (ret > 0) - continue; - if (gc_readl(priv, REG_GCRBEXHR) == rbexhr && priv->shared->num_interrupts == num) { QERROR("TIMEOUT"); diff --git a/drivers/video/s3c-fb.c b/drivers/video/s3c-fb.c index f3105160bf98..5f9d8e69029e 100644 --- a/drivers/video/s3c-fb.c +++ b/drivers/video/s3c-fb.c @@ -47,7 +47,7 @@ #ifdef CONFIG_FB_S3C_DEBUG_REGWRITE #undef writel #define writel(v, r) do { \ - printk(KERN_DEBUG "%s: %08x => %p\n", __func__, (unsigned int)v, r); \ + pr_debug("%s: %08x => %p\n", __func__, (unsigned int)v, r); \ __raw_writel(v, r); \ } while (0) #endif /* FB_S3C_DEBUG_REGWRITE */ @@ -495,7 +495,6 @@ static int s3c_fb_set_par(struct fb_info *info) u32 alpha = 0; u32 data; u32 pagewidth; - int clkdiv; dev_dbg(sfb->dev, "setting framebuffer parameters\n"); @@ -532,48 +531,9 @@ static int s3c_fb_set_par(struct fb_info *info) /* disable the window whilst we update it */ writel(0, regs + WINCON(win_no)); - /* use platform specified window as the basis for the lcd timings */ - - if (win_no == sfb->pdata->default_win) { - clkdiv = s3c_fb_calc_pixclk(sfb, var->pixclock); - - data = sfb->pdata->vidcon0; - data &= ~(VIDCON0_CLKVAL_F_MASK | VIDCON0_CLKDIR); - - if (clkdiv > 1) - data |= VIDCON0_CLKVAL_F(clkdiv-1) | VIDCON0_CLKDIR; - else - data &= ~VIDCON0_CLKDIR; /* 1:1 clock */ - - /* write the timing data to the panel */ - - if (sfb->variant.is_2443) - data |= (1 << 5); - - writel(data, regs + VIDCON0); - + if (!sfb->output_on) s3c_fb_enable(sfb, 1); - data = VIDTCON0_VBPD(var->upper_margin - 1) | - VIDTCON0_VFPD(var->lower_margin - 1) | - VIDTCON0_VSPW(var->vsync_len - 1); - - writel(data, regs + sfb->variant.vidtcon); - - data = VIDTCON1_HBPD(var->left_margin - 1) | - VIDTCON1_HFPD(var->right_margin - 1) | - VIDTCON1_HSPW(var->hsync_len - 1); - - /* VIDTCON1 */ - writel(data, regs + sfb->variant.vidtcon + 4); - - data = VIDTCON2_LINEVAL(var->yres - 1) | - VIDTCON2_HOZVAL(var->xres - 1) | - VIDTCON2_LINEVAL_E(var->yres - 1) | - VIDTCON2_HOZVAL_E(var->xres - 1); - writel(data, regs + sfb->variant.vidtcon + 8); - } - /* write the buffer address */ /* start and end registers stride is 8 */ @@ -839,6 +799,7 @@ static int s3c_fb_blank(int blank_mode, struct fb_info *info) struct s3c_fb *sfb = win->parent; unsigned int index = win->index; u32 wincon; + u32 output_on = sfb->output_on; dev_dbg(sfb->dev, "blank mode %d\n", blank_mode); @@ -877,34 +838,18 @@ static int s3c_fb_blank(int blank_mode, struct fb_info *info) shadow_protect_win(win, 1); writel(wincon, sfb->regs + sfb->variant.wincon + (index * 4)); - shadow_protect_win(win, 0); /* Check the enabled state to see if we need to be running the * main LCD interface, as if there are no active windows then * it is highly likely that we also do not need to output * anything. */ - - /* We could do something like the following code, but the current - * system of using framebuffer events means that we cannot make - * the distinction between just window 0 being inactive and all - * the windows being down. - * - * s3c_fb_enable(sfb, sfb->enabled ? 1 : 0); - */ - - /* we're stuck with this until we can do something about overriding - * the power control using the blanking event for a single fb. - */ - if (index == sfb->pdata->default_win) { - shadow_protect_win(win, 1); - s3c_fb_enable(sfb, blank_mode != FB_BLANK_POWERDOWN ? 1 : 0); - shadow_protect_win(win, 0); - } + s3c_fb_enable(sfb, sfb->enabled ? 1 : 0); + shadow_protect_win(win, 0); pm_runtime_put_sync(sfb->dev); - return 0; + return output_on == sfb->output_on; } /** @@ -1111,7 +1056,7 @@ static struct fb_ops s3c_fb_ops = { * * Calculate the pixel clock when none has been given through platform data. */ -static void __devinit s3c_fb_missing_pixclock(struct fb_videomode *mode) +static void s3c_fb_missing_pixclock(struct fb_videomode *mode) { u64 pixclk = 1000000000000ULL; u32 div; @@ -1144,11 +1089,11 @@ static int __devinit s3c_fb_alloc_memory(struct s3c_fb *sfb, dev_dbg(sfb->dev, "allocating memory for display\n"); - real_size = windata->win_mode.xres * windata->win_mode.yres; + real_size = windata->xres * windata->yres; virt_size = windata->virtual_x * windata->virtual_y; dev_dbg(sfb->dev, "real_size=%u (%u.%u), virt_size=%u (%u.%u)\n", - real_size, windata->win_mode.xres, windata->win_mode.yres, + real_size, windata->xres, windata->yres, virt_size, windata->virtual_x, windata->virtual_y); size = (real_size > virt_size) ? real_size : virt_size; @@ -1230,7 +1175,7 @@ static int __devinit s3c_fb_probe_win(struct s3c_fb *sfb, unsigned int win_no, struct s3c_fb_win **res) { struct fb_var_screeninfo *var; - struct fb_videomode *initmode; + struct fb_videomode initmode; struct s3c_fb_pd_win *windata; struct s3c_fb_win *win; struct fb_info *fbinfo; @@ -1251,11 +1196,11 @@ static int __devinit s3c_fb_probe_win(struct s3c_fb *sfb, unsigned int win_no, } windata = sfb->pdata->win[win_no]; - initmode = &windata->win_mode; + initmode = *sfb->pdata->vtiming; WARN_ON(windata->max_bpp == 0); - WARN_ON(windata->win_mode.xres == 0); - WARN_ON(windata->win_mode.yres == 0); + WARN_ON(windata->xres == 0); + WARN_ON(windata->yres == 0); win = fbinfo->par; *res = win; @@ -1294,7 +1239,9 @@ static int __devinit s3c_fb_probe_win(struct s3c_fb *sfb, unsigned int win_no, } /* setup the initial video mode from the window */ - fb_videomode_to_var(&fbinfo->var, initmode); + initmode.xres = windata->xres; + initmode.yres = windata->yres; + fb_videomode_to_var(&fbinfo->var, &initmode); fbinfo->fix.type = FB_TYPE_PACKED_PIXELS; fbinfo->fix.accel = FB_ACCEL_NONE; @@ -1339,6 +1286,53 @@ static int __devinit s3c_fb_probe_win(struct s3c_fb *sfb, unsigned int win_no, } /** + * s3c_fb_set_rgb_timing() - set video timing for rgb interface. + * @sfb: The base resources for the hardware. + * + * Set horizontal and vertical lcd rgb interface timing. + */ +static void s3c_fb_set_rgb_timing(struct s3c_fb *sfb) +{ + struct fb_videomode *vmode = sfb->pdata->vtiming; + void __iomem *regs = sfb->regs; + int clkdiv; + u32 data; + + if (!vmode->pixclock) + s3c_fb_missing_pixclock(vmode); + + clkdiv = s3c_fb_calc_pixclk(sfb, vmode->pixclock); + + data = sfb->pdata->vidcon0; + data &= ~(VIDCON0_CLKVAL_F_MASK | VIDCON0_CLKDIR); + + if (clkdiv > 1) + data |= VIDCON0_CLKVAL_F(clkdiv-1) | VIDCON0_CLKDIR; + else + data &= ~VIDCON0_CLKDIR; /* 1:1 clock */ + + if (sfb->variant.is_2443) + data |= (1 << 5); + writel(data, regs + VIDCON0); + + data = VIDTCON0_VBPD(vmode->upper_margin - 1) | + VIDTCON0_VFPD(vmode->lower_margin - 1) | + VIDTCON0_VSPW(vmode->vsync_len - 1); + writel(data, regs + sfb->variant.vidtcon); + + data = VIDTCON1_HBPD(vmode->left_margin - 1) | + VIDTCON1_HFPD(vmode->right_margin - 1) | + VIDTCON1_HSPW(vmode->hsync_len - 1); + writel(data, regs + sfb->variant.vidtcon + 4); + + data = VIDTCON2_LINEVAL(vmode->yres - 1) | + VIDTCON2_HOZVAL(vmode->xres - 1) | + VIDTCON2_LINEVAL_E(vmode->yres - 1) | + VIDTCON2_HOZVAL_E(vmode->xres - 1); + writel(data, regs + sfb->variant.vidtcon + 8); +} + +/** * s3c_fb_clear_win() - clear hardware window registers. * @sfb: The base resources for the hardware. * @win: The window to process. @@ -1481,15 +1475,14 @@ static int __devinit s3c_fb_probe(struct platform_device *pdev) writel(0xffffff, regs + WKEYCON1); } + s3c_fb_set_rgb_timing(sfb); + /* we have the register setup, start allocating framebuffers */ for (win = 0; win < fbdrv->variant.nr_windows; win++) { if (!pd->win[win]) continue; - if (!pd->win[win]->win_mode.pixclock) - s3c_fb_missing_pixclock(&pd->win[win]->win_mode); - ret = s3c_fb_probe_win(sfb, win, fbdrv->win[win], &sfb->windows[win]); if (ret < 0) { @@ -1564,6 +1557,8 @@ static int s3c_fb_suspend(struct device *dev) struct s3c_fb_win *win; int win_no; + pm_runtime_get_sync(sfb->dev); + for (win_no = S3C_FB_MAX_WIN - 1; win_no >= 0; win_no--) { win = sfb->windows[win_no]; if (!win) @@ -1577,6 +1572,9 @@ static int s3c_fb_suspend(struct device *dev) clk_disable(sfb->lcd_clk); clk_disable(sfb->bus_clk); + + pm_runtime_put_sync(sfb->dev); + return 0; } @@ -1589,6 +1587,8 @@ static int s3c_fb_resume(struct device *dev) int win_no; u32 reg; + pm_runtime_get_sync(sfb->dev); + clk_enable(sfb->bus_clk); if (!sfb->variant.has_clksel) @@ -1623,6 +1623,8 @@ static int s3c_fb_resume(struct device *dev) shadow_protect_win(win, 0); } + s3c_fb_set_rgb_timing(sfb); + /* restore framebuffers */ for (win_no = 0; win_no < S3C_FB_MAX_WIN; win_no++) { win = sfb->windows[win_no]; @@ -1633,6 +1635,8 @@ static int s3c_fb_resume(struct device *dev) s3c_fb_set_par(win->fbinfo); } + pm_runtime_put_sync(sfb->dev); + return 0; } #endif diff --git a/drivers/video/sh_mobile_hdmi.c b/drivers/video/sh_mobile_hdmi.c index eafb19da2c07..930e550e752a 100644 --- a/drivers/video/sh_mobile_hdmi.c +++ b/drivers/video/sh_mobile_hdmi.c @@ -31,6 +31,7 @@ #include "sh_mobile_lcdcfb.h" +/* HDMI Core Control Register (HTOP0) */ #define HDMI_SYSTEM_CTRL 0x00 /* System control */ #define HDMI_L_R_DATA_SWAP_CTRL_RPKT 0x01 /* L/R data swap control, bits 19..16 of 20-bit N for Audio Clock Regeneration packet */ @@ -201,6 +202,68 @@ #define HDMI_REVISION_ID 0xF1 /* Revision ID */ #define HDMI_TEST_MODE 0xFE /* Test mode */ +/* HDMI Control Register (HTOP1) */ +#define HDMI_HTOP1_TEST_MODE 0x0000 /* Test mode */ +#define HDMI_HTOP1_VIDEO_INPUT 0x0008 /* VideoInput */ +#define HDMI_HTOP1_CORE_RSTN 0x000C /* CoreResetn */ +#define HDMI_HTOP1_PLLBW 0x0018 /* PLLBW */ +#define HDMI_HTOP1_CLK_TO_PHY 0x001C /* Clk to Phy */ +#define HDMI_HTOP1_VIDEO_INPUT2 0x0020 /* VideoInput2 */ +#define HDMI_HTOP1_TISEMP0_1 0x0024 /* tisemp0-1 */ +#define HDMI_HTOP1_TISEMP2_C 0x0028 /* tisemp2-c */ +#define HDMI_HTOP1_TISIDRV 0x002C /* tisidrv */ +#define HDMI_HTOP1_TISEN 0x0034 /* tisen */ +#define HDMI_HTOP1_TISDREN 0x0038 /* tisdren */ +#define HDMI_HTOP1_CISRANGE 0x003C /* cisrange */ +#define HDMI_HTOP1_ENABLE_SELECTOR 0x0040 /* Enable Selector */ +#define HDMI_HTOP1_MACRO_RESET 0x0044 /* Macro reset */ +#define HDMI_HTOP1_PLL_CALIBRATION 0x0048 /* PLL calibration */ +#define HDMI_HTOP1_RE_CALIBRATION 0x004C /* Re-calibration */ +#define HDMI_HTOP1_CURRENT 0x0050 /* Current */ +#define HDMI_HTOP1_PLL_LOCK_DETECT 0x0054 /* PLL lock detect */ +#define HDMI_HTOP1_PHY_TEST_MODE 0x0058 /* PHY Test Mode */ +#define HDMI_HTOP1_CLK_SET 0x0080 /* Clock Set */ +#define HDMI_HTOP1_DDC_FAIL_SAFE 0x0084 /* DDC fail safe */ +#define HDMI_HTOP1_PRBS 0x0088 /* PRBS */ +#define HDMI_HTOP1_EDID_AINC_CONTROL 0x008C /* EDID ainc Control */ +#define HDMI_HTOP1_HTOP_DCL_MODE 0x00FC /* Deep Coloer Mode */ +#define HDMI_HTOP1_HTOP_DCL_FRC_COEF0 0x0100 /* Deep Color:FRC COEF0 */ +#define HDMI_HTOP1_HTOP_DCL_FRC_COEF1 0x0104 /* Deep Color:FRC COEF1 */ +#define HDMI_HTOP1_HTOP_DCL_FRC_COEF2 0x0108 /* Deep Color:FRC COEF2 */ +#define HDMI_HTOP1_HTOP_DCL_FRC_COEF3 0x010C /* Deep Color:FRC COEF3 */ +#define HDMI_HTOP1_HTOP_DCL_FRC_COEF0_C 0x0110 /* Deep Color:FRC COEF0C */ +#define HDMI_HTOP1_HTOP_DCL_FRC_COEF1_C 0x0114 /* Deep Color:FRC COEF1C */ +#define HDMI_HTOP1_HTOP_DCL_FRC_COEF2_C 0x0118 /* Deep Color:FRC COEF2C */ +#define HDMI_HTOP1_HTOP_DCL_FRC_COEF3_C 0x011C /* Deep Color:FRC COEF3C */ +#define HDMI_HTOP1_HTOP_DCL_FRC_MODE 0x0120 /* Deep Color:FRC Mode */ +#define HDMI_HTOP1_HTOP_DCL_RECT_START1 0x0124 /* Deep Color:Rect Start1 */ +#define HDMI_HTOP1_HTOP_DCL_RECT_SIZE1 0x0128 /* Deep Color:Rect Size1 */ +#define HDMI_HTOP1_HTOP_DCL_RECT_START2 0x012C /* Deep Color:Rect Start2 */ +#define HDMI_HTOP1_HTOP_DCL_RECT_SIZE2 0x0130 /* Deep Color:Rect Size2 */ +#define HDMI_HTOP1_HTOP_DCL_RECT_START3 0x0134 /* Deep Color:Rect Start3 */ +#define HDMI_HTOP1_HTOP_DCL_RECT_SIZE3 0x0138 /* Deep Color:Rect Size3 */ +#define HDMI_HTOP1_HTOP_DCL_RECT_START4 0x013C /* Deep Color:Rect Start4 */ +#define HDMI_HTOP1_HTOP_DCL_RECT_SIZE4 0x0140 /* Deep Color:Rect Size4 */ +#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y1_1 0x0144 /* Deep Color:Fil Para Y1_1 */ +#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y1_2 0x0148 /* Deep Color:Fil Para Y1_2 */ +#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB1_1 0x014C /* Deep Color:Fil Para CB1_1 */ +#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB1_2 0x0150 /* Deep Color:Fil Para CB1_2 */ +#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR1_1 0x0154 /* Deep Color:Fil Para CR1_1 */ +#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR1_2 0x0158 /* Deep Color:Fil Para CR1_2 */ +#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y2_1 0x015C /* Deep Color:Fil Para Y2_1 */ +#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y2_2 0x0160 /* Deep Color:Fil Para Y2_2 */ +#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB2_1 0x0164 /* Deep Color:Fil Para CB2_1 */ +#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB2_2 0x0168 /* Deep Color:Fil Para CB2_2 */ +#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR2_1 0x016C /* Deep Color:Fil Para CR2_1 */ +#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR2_2 0x0170 /* Deep Color:Fil Para CR2_2 */ +#define HDMI_HTOP1_HTOP_DCL_COR_PARA_Y1 0x0174 /* Deep Color:Cor Para Y1 */ +#define HDMI_HTOP1_HTOP_DCL_COR_PARA_CB1 0x0178 /* Deep Color:Cor Para CB1 */ +#define HDMI_HTOP1_HTOP_DCL_COR_PARA_CR1 0x017C /* Deep Color:Cor Para CR1 */ +#define HDMI_HTOP1_HTOP_DCL_COR_PARA_Y2 0x0180 /* Deep Color:Cor Para Y2 */ +#define HDMI_HTOP1_HTOP_DCL_COR_PARA_CB2 0x0184 /* Deep Color:Cor Para CB2 */ +#define HDMI_HTOP1_HTOP_DCL_COR_PARA_CR2 0x0188 /* Deep Color:Cor Para CR2 */ +#define HDMI_HTOP1_EDID_DATA_READ 0x0200 /* EDID Data Read 128Byte:0x03FC */ + enum hotplug_state { HDMI_HOTPLUG_DISCONNECTED, HDMI_HOTPLUG_CONNECTED, @@ -211,6 +274,7 @@ struct sh_hdmi { struct sh_mobile_lcdc_entity entity; void __iomem *base; + void __iomem *htop1; enum hotplug_state hp_state; /* hot-plug status */ u8 preprogrammed_vic; /* use a pre-programmed VIC or the external mode */ @@ -222,20 +286,66 @@ struct sh_hdmi { struct delayed_work edid_work; struct fb_videomode mode; struct fb_monspecs monspec; + + /* register access functions */ + void (*write)(struct sh_hdmi *hdmi, u8 data, u8 reg); + u8 (*read)(struct sh_hdmi *hdmi, u8 reg); }; #define entity_to_sh_hdmi(e) container_of(e, struct sh_hdmi, entity) -static void hdmi_write(struct sh_hdmi *hdmi, u8 data, u8 reg) +static void __hdmi_write8(struct sh_hdmi *hdmi, u8 data, u8 reg) { iowrite8(data, hdmi->base + reg); } -static u8 hdmi_read(struct sh_hdmi *hdmi, u8 reg) +static u8 __hdmi_read8(struct sh_hdmi *hdmi, u8 reg) { return ioread8(hdmi->base + reg); } +static void __hdmi_write32(struct sh_hdmi *hdmi, u8 data, u8 reg) +{ + iowrite32((u32)data, hdmi->base + (reg * 4)); + udelay(100); +} + +static u8 __hdmi_read32(struct sh_hdmi *hdmi, u8 reg) +{ + return (u8)ioread32(hdmi->base + (reg * 4)); +} + +static void hdmi_write(struct sh_hdmi *hdmi, u8 data, u8 reg) +{ + hdmi->write(hdmi, data, reg); +} + +static u8 hdmi_read(struct sh_hdmi *hdmi, u8 reg) +{ + return hdmi->read(hdmi, reg); +} + +static void hdmi_bit_set(struct sh_hdmi *hdmi, u8 mask, u8 data, u8 reg) +{ + u8 val = hdmi_read(hdmi, reg); + + val &= ~mask; + val |= (data & mask); + + hdmi_write(hdmi, val, reg); +} + +static void hdmi_htop1_write(struct sh_hdmi *hdmi, u32 data, u32 reg) +{ + iowrite32(data, hdmi->htop1 + reg); + udelay(100); +} + +static u32 hdmi_htop1_read(struct sh_hdmi *hdmi, u32 reg) +{ + return ioread32(hdmi->htop1 + reg); +} + /* * HDMI sound */ @@ -693,11 +803,11 @@ static void sh_hdmi_configure(struct sh_hdmi *hdmi) msleep(10); /* PS mode b->d, reset PLLA and PLLB */ - hdmi_write(hdmi, 0x4C, HDMI_SYSTEM_CTRL); + hdmi_bit_set(hdmi, 0xFC, 0x4C, HDMI_SYSTEM_CTRL); udelay(10); - hdmi_write(hdmi, 0x40, HDMI_SYSTEM_CTRL); + hdmi_bit_set(hdmi, 0xFC, 0x40, HDMI_SYSTEM_CTRL); } static unsigned long sh_hdmi_rate_error(struct sh_hdmi *hdmi, @@ -746,7 +856,9 @@ static int sh_hdmi_read_edid(struct sh_hdmi *hdmi, unsigned long *hdmi_rate, /* Read EDID */ dev_dbg(hdmi->dev, "Read back EDID code:"); for (i = 0; i < 128; i++) { - edid[i] = hdmi_read(hdmi, HDMI_EDID_KSV_FIFO_ACCESS_WINDOW); + edid[i] = (hdmi->htop1) ? + (u8)hdmi_htop1_read(hdmi, HDMI_HTOP1_EDID_DATA_READ + (i * 4)) : + hdmi_read(hdmi, HDMI_EDID_KSV_FIFO_ACCESS_WINDOW); #ifdef DEBUG if ((i % 16) == 0) { printk(KERN_CONT "\n"); @@ -917,13 +1029,13 @@ static irqreturn_t sh_hdmi_hotplug(int irq, void *dev_id) u8 status1, status2, mask1, mask2; /* mode_b and PLLA and PLLB reset */ - hdmi_write(hdmi, 0x2C, HDMI_SYSTEM_CTRL); + hdmi_bit_set(hdmi, 0xFC, 0x2C, HDMI_SYSTEM_CTRL); /* How long shall reset be held? */ udelay(10); /* mode_b and PLLA and PLLB reset release */ - hdmi_write(hdmi, 0x20, HDMI_SYSTEM_CTRL); + hdmi_bit_set(hdmi, 0xFC, 0x20, HDMI_SYSTEM_CTRL); status1 = hdmi_read(hdmi, HDMI_INTERRUPT_STATUS_1); status2 = hdmi_read(hdmi, HDMI_INTERRUPT_STATUS_2); @@ -1001,7 +1113,7 @@ static int sh_hdmi_display_on(struct sh_mobile_lcdc_entity *entity) */ if (hdmi->hp_state == HDMI_HOTPLUG_EDID_DONE) { /* PS mode d->e. All functions are active */ - hdmi_write(hdmi, 0x80, HDMI_SYSTEM_CTRL); + hdmi_bit_set(hdmi, 0xFC, 0x80, HDMI_SYSTEM_CTRL); dev_dbg(hdmi->dev, "HDMI running\n"); } @@ -1016,7 +1128,7 @@ static void sh_hdmi_display_off(struct sh_mobile_lcdc_entity *entity) dev_dbg(hdmi->dev, "%s(%p)\n", __func__, hdmi); /* PS mode e->a */ - hdmi_write(hdmi, 0x10, HDMI_SYSTEM_CTRL); + hdmi_bit_set(hdmi, 0xFC, 0x10, HDMI_SYSTEM_CTRL); } static const struct sh_mobile_lcdc_entity_ops sh_hdmi_ops = { @@ -1110,10 +1222,58 @@ out: dev_dbg(hdmi->dev, "%s(%p): end\n", __func__, hdmi); } +static void sh_hdmi_htop1_init(struct sh_hdmi *hdmi) +{ + hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_MODE); + hdmi_htop1_write(hdmi, 0x0000000b, 0x0010); + hdmi_htop1_write(hdmi, 0x00006710, HDMI_HTOP1_HTOP_DCL_FRC_MODE); + hdmi_htop1_write(hdmi, 0x01020406, HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y1_1); + hdmi_htop1_write(hdmi, 0x07080806, HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y1_2); + hdmi_htop1_write(hdmi, 0x01020406, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB1_1); + hdmi_htop1_write(hdmi, 0x07080806, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB1_2); + hdmi_htop1_write(hdmi, 0x01020406, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR1_1); + hdmi_htop1_write(hdmi, 0x07080806, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR1_2); + hdmi_htop1_write(hdmi, 0x01020406, HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y2_1); + hdmi_htop1_write(hdmi, 0x07080806, HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y2_2); + hdmi_htop1_write(hdmi, 0x01020406, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB2_1); + hdmi_htop1_write(hdmi, 0x07080806, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB2_2); + hdmi_htop1_write(hdmi, 0x01020406, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR2_1); + hdmi_htop1_write(hdmi, 0x07080806, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR2_2); + hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_COR_PARA_Y1); + hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_COR_PARA_CB1); + hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_COR_PARA_CR1); + hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_COR_PARA_Y2); + hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_COR_PARA_CB2); + hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_COR_PARA_CR2); + hdmi_htop1_write(hdmi, 0x00000008, HDMI_HTOP1_CURRENT); + hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_TISEMP0_1); + hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_TISEMP2_C); + hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_PHY_TEST_MODE); + hdmi_htop1_write(hdmi, 0x00000081, HDMI_HTOP1_TISIDRV); + hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_PLLBW); + hdmi_htop1_write(hdmi, 0x0000000f, HDMI_HTOP1_TISEN); + hdmi_htop1_write(hdmi, 0x0000000f, HDMI_HTOP1_TISDREN); + hdmi_htop1_write(hdmi, 0x00000003, HDMI_HTOP1_ENABLE_SELECTOR); + hdmi_htop1_write(hdmi, 0x00000001, HDMI_HTOP1_MACRO_RESET); + hdmi_htop1_write(hdmi, 0x00000016, HDMI_HTOP1_CISRANGE); + msleep(100); + hdmi_htop1_write(hdmi, 0x00000001, HDMI_HTOP1_ENABLE_SELECTOR); + msleep(100); + hdmi_htop1_write(hdmi, 0x00000003, HDMI_HTOP1_ENABLE_SELECTOR); + hdmi_htop1_write(hdmi, 0x00000001, HDMI_HTOP1_MACRO_RESET); + hdmi_htop1_write(hdmi, 0x0000000f, HDMI_HTOP1_TISEN); + hdmi_htop1_write(hdmi, 0x0000000f, HDMI_HTOP1_TISDREN); + hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_VIDEO_INPUT); + hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_CLK_TO_PHY); + hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_VIDEO_INPUT2); + hdmi_htop1_write(hdmi, 0x0000000a, HDMI_HTOP1_CLK_SET); +} + static int __init sh_hdmi_probe(struct platform_device *pdev) { struct sh_mobile_hdmi_info *pdata = pdev->dev.platform_data; struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + struct resource *htop1_res; int irq = platform_get_irq(pdev, 0), ret; struct sh_hdmi *hdmi; long rate; @@ -1121,6 +1281,15 @@ static int __init sh_hdmi_probe(struct platform_device *pdev) if (!res || !pdata || irq < 0) return -ENODEV; + htop1_res = NULL; + if (pdata->flags & HDMI_HAS_HTOP1) { + htop1_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!htop1_res) { + dev_err(&pdev->dev, "htop1 needs register base\n"); + return -EINVAL; + } + } + hdmi = kzalloc(sizeof(*hdmi), GFP_KERNEL); if (!hdmi) { dev_err(&pdev->dev, "Cannot allocate device data\n"); @@ -1138,6 +1307,15 @@ static int __init sh_hdmi_probe(struct platform_device *pdev) goto egetclk; } + /* select register access functions */ + if (pdata->flags & HDMI_32BIT_REG) { + hdmi->write = __hdmi_write32; + hdmi->read = __hdmi_read32; + } else { + hdmi->write = __hdmi_write8; + hdmi->read = __hdmi_read8; + } + /* An arbitrary relaxed pixclock just to get things started: from standard 480p */ rate = clk_round_rate(hdmi->hdmi_clk, PICOS2KHZ(37037)); if (rate > 0) @@ -1176,6 +1354,24 @@ static int __init sh_hdmi_probe(struct platform_device *pdev) pm_runtime_enable(&pdev->dev); pm_runtime_get_sync(&pdev->dev); + /* init interrupt polarity */ + if (pdata->flags & HDMI_OUTPUT_PUSH_PULL) + hdmi_bit_set(hdmi, 0x02, 0x02, HDMI_SYSTEM_CTRL); + + if (pdata->flags & HDMI_OUTPUT_POLARITY_HI) + hdmi_bit_set(hdmi, 0x01, 0x01, HDMI_SYSTEM_CTRL); + + /* enable htop1 register if needed */ + if (htop1_res) { + hdmi->htop1 = ioremap(htop1_res->start, resource_size(htop1_res)); + if (!hdmi->htop1) { + dev_err(&pdev->dev, "control register region already claimed\n"); + ret = -ENOMEM; + goto emap_htop1; + } + sh_hdmi_htop1_init(hdmi); + } + /* Product and revision IDs are 0 in sh-mobile version */ dev_info(&pdev->dev, "Detected HDMI controller 0x%x:0x%x\n", hdmi_read(hdmi, HDMI_PRODUCT_ID), hdmi_read(hdmi, HDMI_REVISION_ID)); @@ -1199,6 +1395,9 @@ static int __init sh_hdmi_probe(struct platform_device *pdev) ecodec: free_irq(irq, hdmi); ereqirq: + if (hdmi->htop1) + iounmap(hdmi->htop1); +emap_htop1: pm_runtime_put(&pdev->dev); pm_runtime_disable(&pdev->dev); iounmap(hdmi->base); @@ -1230,6 +1429,8 @@ static int __exit sh_hdmi_remove(struct platform_device *pdev) pm_runtime_disable(&pdev->dev); clk_disable(hdmi->hdmi_clk); clk_put(hdmi->hdmi_clk); + if (hdmi->htop1) + iounmap(hdmi->htop1); iounmap(hdmi->base); release_mem_region(res->start, resource_size(res)); kfree(hdmi); diff --git a/drivers/video/sis/init.h b/drivers/video/sis/init.h index aff73842d877..85d6738b6c64 100644 --- a/drivers/video/sis/init.h +++ b/drivers/video/sis/init.h @@ -105,51 +105,6 @@ static const unsigned short ModeIndex_1920x1440[] = {0x68, 0x69, 0x00, 0x6b}; static const unsigned short ModeIndex_300_2048x1536[]= {0x6c, 0x6d, 0x00, 0x00}; static const unsigned short ModeIndex_310_2048x1536[]= {0x6c, 0x6d, 0x00, 0x6e}; -static const unsigned short SiS_DRAMType[17][5]={ - {0x0C,0x0A,0x02,0x40,0x39}, - {0x0D,0x0A,0x01,0x40,0x48}, - {0x0C,0x09,0x02,0x20,0x35}, - {0x0D,0x09,0x01,0x20,0x44}, - {0x0C,0x08,0x02,0x10,0x31}, - {0x0D,0x08,0x01,0x10,0x40}, - {0x0C,0x0A,0x01,0x20,0x34}, - {0x0C,0x09,0x01,0x08,0x32}, - {0x0B,0x08,0x02,0x08,0x21}, - {0x0C,0x08,0x01,0x08,0x30}, - {0x0A,0x08,0x02,0x04,0x11}, - {0x0B,0x0A,0x01,0x10,0x28}, - {0x09,0x08,0x02,0x02,0x01}, - {0x0B,0x09,0x01,0x08,0x24}, - {0x0B,0x08,0x01,0x04,0x20}, - {0x0A,0x08,0x01,0x02,0x10}, - {0x09,0x08,0x01,0x01,0x00} -}; - -static const unsigned short SiS_SDRDRAM_TYPE[13][5] = -{ - { 2,12, 9,64,0x35}, - { 1,13, 9,64,0x44}, - { 2,12, 8,32,0x31}, - { 2,11, 9,32,0x25}, - { 1,12, 9,32,0x34}, - { 1,13, 8,32,0x40}, - { 2,11, 8,16,0x21}, - { 1,12, 8,16,0x30}, - { 1,11, 9,16,0x24}, - { 1,11, 8, 8,0x20}, - { 2, 9, 8, 4,0x01}, - { 1,10, 8, 4,0x10}, - { 1, 9, 8, 2,0x00} -}; - -static const unsigned short SiS_DDRDRAM_TYPE[4][5] = -{ - { 2,12, 9,64,0x35}, - { 2,12, 8,32,0x31}, - { 2,11, 8,16,0x21}, - { 2, 9, 8, 4,0x01} -}; - static const unsigned char SiS_MDA_DAC[] = { 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c index 078ca2167d6f..a7a48db64ce2 100644 --- a/drivers/video/sis/sis_main.c +++ b/drivers/video/sis/sis_main.c @@ -4222,6 +4222,26 @@ sisfb_post_300_buswidth(struct sis_video_info *ivideo) return 1; /* 32bit */ } +static const unsigned short __devinitconst SiS_DRAMType[17][5] = { + {0x0C,0x0A,0x02,0x40,0x39}, + {0x0D,0x0A,0x01,0x40,0x48}, + {0x0C,0x09,0x02,0x20,0x35}, + {0x0D,0x09,0x01,0x20,0x44}, + {0x0C,0x08,0x02,0x10,0x31}, + {0x0D,0x08,0x01,0x10,0x40}, + {0x0C,0x0A,0x01,0x20,0x34}, + {0x0C,0x09,0x01,0x08,0x32}, + {0x0B,0x08,0x02,0x08,0x21}, + {0x0C,0x08,0x01,0x08,0x30}, + {0x0A,0x08,0x02,0x04,0x11}, + {0x0B,0x0A,0x01,0x10,0x28}, + {0x09,0x08,0x02,0x02,0x01}, + {0x0B,0x09,0x01,0x08,0x24}, + {0x0B,0x08,0x01,0x04,0x20}, + {0x0A,0x08,0x01,0x02,0x10}, + {0x09,0x08,0x01,0x01,0x00} +}; + static int __devinit sisfb_post_300_rwtest(struct sis_video_info *ivideo, int iteration, int buswidth, int PseudoRankCapacity, int PseudoAdrPinCount, @@ -4231,27 +4251,8 @@ sisfb_post_300_rwtest(struct sis_video_info *ivideo, int iteration, int buswidth unsigned short sr14; unsigned int k, RankCapacity, PageCapacity, BankNumHigh, BankNumMid; unsigned int PhysicalAdrOtherPage, PhysicalAdrHigh, PhysicalAdrHalfPage; - static const unsigned short SiS_DRAMType[17][5] = { - {0x0C,0x0A,0x02,0x40,0x39}, - {0x0D,0x0A,0x01,0x40,0x48}, - {0x0C,0x09,0x02,0x20,0x35}, - {0x0D,0x09,0x01,0x20,0x44}, - {0x0C,0x08,0x02,0x10,0x31}, - {0x0D,0x08,0x01,0x10,0x40}, - {0x0C,0x0A,0x01,0x20,0x34}, - {0x0C,0x09,0x01,0x08,0x32}, - {0x0B,0x08,0x02,0x08,0x21}, - {0x0C,0x08,0x01,0x08,0x30}, - {0x0A,0x08,0x02,0x04,0x11}, - {0x0B,0x0A,0x01,0x10,0x28}, - {0x09,0x08,0x02,0x02,0x01}, - {0x0B,0x09,0x01,0x08,0x24}, - {0x0B,0x08,0x01,0x04,0x20}, - {0x0A,0x08,0x01,0x02,0x10}, - {0x09,0x08,0x01,0x01,0x00} - }; - for(k = 0; k <= 16; k++) { + for(k = 0; k < ARRAY_SIZE(SiS_DRAMType); k++) { RankCapacity = buswidth * SiS_DRAMType[k][3]; diff --git a/drivers/video/skeletonfb.c b/drivers/video/skeletonfb.c index 30f7a815a62b..5b6abc6de84b 100644 --- a/drivers/video/skeletonfb.c +++ b/drivers/video/skeletonfb.c @@ -1036,6 +1036,6 @@ static void __exit xxxfb_exit(void) */ module_init(xxxfb_init); -module_exit(xxxfb_remove); +module_exit(xxxfb_exit); MODULE_LICENSE("GPL"); diff --git a/drivers/video/smscufx.c b/drivers/video/smscufx.c index ccbfef5e828f..af3ef27ad36c 100644 --- a/drivers/video/smscufx.c +++ b/drivers/video/smscufx.c @@ -846,7 +846,7 @@ static void ufx_raw_rect(struct ufx_data *dev, u16 *cmd, int x, int y, } } -int ufx_handle_damage(struct ufx_data *dev, int x, int y, +static int ufx_handle_damage(struct ufx_data *dev, int x, int y, int width, int height) { size_t packed_line_len = ALIGN((width * 2), 4); @@ -1083,7 +1083,7 @@ static int ufx_ops_open(struct fb_info *info, int user) struct fb_deferred_io *fbdefio; - fbdefio = kmalloc(sizeof(struct fb_deferred_io), GFP_KERNEL); + fbdefio = kzalloc(sizeof(struct fb_deferred_io), GFP_KERNEL); if (fbdefio) { fbdefio->delay = UFX_DEFIO_WRITE_DELAY; diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c index 7af1e8166182..8af64148294b 100644 --- a/drivers/video/udlfb.c +++ b/drivers/video/udlfb.c @@ -893,7 +893,7 @@ static int dlfb_ops_open(struct fb_info *info, int user) struct fb_deferred_io *fbdefio; - fbdefio = kmalloc(sizeof(struct fb_deferred_io), GFP_KERNEL); + fbdefio = kzalloc(sizeof(struct fb_deferred_io), GFP_KERNEL); if (fbdefio) { fbdefio->delay = DL_DEFIO_WRITE_DELAY; diff --git a/drivers/video/via/viafbdev.c b/drivers/video/via/viafbdev.c index 0c8837565bc7..c80e770e1800 100644 --- a/drivers/video/via/viafbdev.c +++ b/drivers/video/via/viafbdev.c @@ -1276,17 +1276,12 @@ static int viafb_dfph_proc_open(struct inode *inode, struct file *file) static ssize_t viafb_dfph_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *pos) { - char buf[20]; - u8 reg_val = 0; - unsigned long length; - if (count < 1) - return -EINVAL; - length = count > 20 ? 20 : count; - if (copy_from_user(&buf[0], buffer, length)) - return -EFAULT; - buf[length - 1] = '\0'; /*Ensure end string */ - if (kstrtou8(buf, 0, ®_val) < 0) - return -EINVAL; + int err; + u8 reg_val; + err = kstrtou8_from_user(buffer, count, 0, ®_val); + if (err) + return err; + viafb_write_reg_mask(CR97, VIACR, reg_val, 0x0f); return count; } @@ -1316,17 +1311,12 @@ static int viafb_dfpl_proc_open(struct inode *inode, struct file *file) static ssize_t viafb_dfpl_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *pos) { - char buf[20]; - u8 reg_val = 0; - unsigned long length; - if (count < 1) - return -EINVAL; - length = count > 20 ? 20 : count; - if (copy_from_user(&buf[0], buffer, length)) - return -EFAULT; - buf[length - 1] = '\0'; /*Ensure end string */ - if (kstrtou8(buf, 0, ®_val) < 0) - return -EINVAL; + int err; + u8 reg_val; + err = kstrtou8_from_user(buffer, count, 0, ®_val); + if (err) + return err; + viafb_write_reg_mask(CR99, VIACR, reg_val, 0x0f); return count; } diff --git a/drivers/xen/tmem.c b/drivers/xen/tmem.c index dcb79521e6c8..89f264c67420 100644 --- a/drivers/xen/tmem.c +++ b/drivers/xen/tmem.c @@ -269,7 +269,7 @@ static inline struct tmem_oid oswiz(unsigned type, u32 ind) } /* returns 0 if the page was successfully put into frontswap, -1 if not */ -static int tmem_frontswap_put_page(unsigned type, pgoff_t offset, +static int tmem_frontswap_store(unsigned type, pgoff_t offset, struct page *page) { u64 ind64 = (u64)offset; @@ -295,7 +295,7 @@ static int tmem_frontswap_put_page(unsigned type, pgoff_t offset, * returns 0 if the page was successfully gotten from frontswap, -1 if * was not present (should never happen!) */ -static int tmem_frontswap_get_page(unsigned type, pgoff_t offset, +static int tmem_frontswap_load(unsigned type, pgoff_t offset, struct page *page) { u64 ind64 = (u64)offset; @@ -362,8 +362,8 @@ static int __init no_frontswap(char *s) __setup("nofrontswap", no_frontswap); static struct frontswap_ops __initdata tmem_frontswap_ops = { - .put_page = tmem_frontswap_put_page, - .get_page = tmem_frontswap_get_page, + .store = tmem_frontswap_store, + .load = tmem_frontswap_load, .invalidate_page = tmem_frontswap_flush_page, .invalidate_area = tmem_frontswap_flush_area, .init = tmem_frontswap_init diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c index a1e6c990cd41..e3dd2a1e2bfc 100644 --- a/fs/9p/vfs_inode_dotl.c +++ b/fs/9p/vfs_inode_dotl.c @@ -68,24 +68,6 @@ static gid_t v9fs_get_fsgid_for_create(struct inode *dir_inode) return current_fsgid(); } -/** - * v9fs_dentry_from_dir_inode - helper function to get the dentry from - * dir inode. - * - */ - -static struct dentry *v9fs_dentry_from_dir_inode(struct inode *inode) -{ - struct dentry *dentry; - - spin_lock(&inode->i_lock); - /* Directory should have only one entry. */ - BUG_ON(S_ISDIR(inode->i_mode) && !list_is_singular(&inode->i_dentry)); - dentry = list_entry(inode->i_dentry.next, struct dentry, d_alias); - spin_unlock(&inode->i_lock); - return dentry; -} - static int v9fs_test_inode_dotl(struct inode *inode, void *data) { struct v9fs_inode *v9inode = V9FS_I(inode); @@ -415,7 +397,7 @@ static int v9fs_vfs_mkdir_dotl(struct inode *dir, if (dir->i_mode & S_ISGID) omode |= S_ISGID; - dir_dentry = v9fs_dentry_from_dir_inode(dir); + dir_dentry = dentry->d_parent; dfid = v9fs_fid_lookup(dir_dentry); if (IS_ERR(dfid)) { err = PTR_ERR(dfid); @@ -793,7 +775,7 @@ v9fs_vfs_link_dotl(struct dentry *old_dentry, struct inode *dir, dir->i_ino, old_dentry->d_name.name, dentry->d_name.name); v9ses = v9fs_inode2v9ses(dir); - dir_dentry = v9fs_dentry_from_dir_inode(dir); + dir_dentry = dentry->d_parent; dfid = v9fs_fid_lookup(dir_dentry); if (IS_ERR(dfid)) return PTR_ERR(dfid); @@ -858,7 +840,7 @@ v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, umode_t omode, return -EINVAL; v9ses = v9fs_inode2v9ses(dir); - dir_dentry = v9fs_dentry_from_dir_inode(dir); + dir_dentry = dentry->d_parent; dfid = v9fs_fid_lookup(dir_dentry); if (IS_ERR(dfid)) { err = PTR_ERR(dfid); diff --git a/fs/affs/affs.h b/fs/affs/affs.h index 45a0ce45d7b4..1fceb320d2f2 100644 --- a/fs/affs/affs.h +++ b/fs/affs/affs.h @@ -18,14 +18,6 @@ #define AFFS_GET_HASHENTRY(data,hashkey) be32_to_cpu(((struct dir_front *)data)->hashtable[hashkey]) #define AFFS_BLOCK(sb, bh, blk) (AFFS_HEAD(bh)->table[AFFS_SB(sb)->s_hashsize-1-(blk)]) -#ifdef __LITTLE_ENDIAN -#define BO_EXBITS 0x18UL -#elif defined(__BIG_ENDIAN) -#define BO_EXBITS 0x00UL -#else -#error Endianness must be known for affs to work. -#endif - #define AFFS_HEAD(bh) ((struct affs_head *)(bh)->b_data) #define AFFS_TAIL(sb, bh) ((struct affs_tail *)((bh)->b_data+(sb)->s_blocksize-sizeof(struct affs_tail))) #define AFFS_ROOT_HEAD(bh) ((struct affs_root_head *)(bh)->b_data) @@ -134,9 +134,9 @@ static int aio_setup_ring(struct kioctx *ctx) info->mmap_size = nr_pages * PAGE_SIZE; dprintk("attempting mmap of %lu bytes\n", info->mmap_size); down_write(&ctx->mm->mmap_sem); - info->mmap_base = do_mmap(NULL, 0, info->mmap_size, - PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE, - 0); + info->mmap_base = do_mmap_pgoff(NULL, 0, info->mmap_size, + PROT_READ|PROT_WRITE, + MAP_ANONYMOUS|MAP_PRIVATE, 0); if (IS_ERR((void *)info->mmap_base)) { up_write(&ctx->mm->mmap_sem); info->mmap_size = 0; diff --git a/fs/attr.c b/fs/attr.c index 584620e5dee5..0da90951d277 100644 --- a/fs/attr.c +++ b/fs/attr.c @@ -176,6 +176,11 @@ int notify_change(struct dentry * dentry, struct iattr * attr) return -EPERM; } + if ((ia_valid & ATTR_SIZE) && IS_I_VERSION(inode)) { + if (attr->ia_size != inode->i_size) + inode_inc_iversion(inode); + } + if ((ia_valid & ATTR_MODE)) { umode_t amode = attr->ia_mode; /* Flag setting protected by i_mutex */ diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index e658dd134b95..1b52956afe33 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -329,7 +329,6 @@ static unsigned long elf_map(struct file *filep, unsigned long addr, if (!size) return addr; - down_write(¤t->mm->mmap_sem); /* * total_size is the size of the ELF (interpreter) image. * The _first_ mmap needs to know the full size, otherwise @@ -340,13 +339,12 @@ static unsigned long elf_map(struct file *filep, unsigned long addr, */ if (total_size) { total_size = ELF_PAGEALIGN(total_size); - map_addr = do_mmap(filep, addr, total_size, prot, type, off); + map_addr = vm_mmap(filep, addr, total_size, prot, type, off); if (!BAD_ADDR(map_addr)) - do_munmap(current->mm, map_addr+size, total_size-size); + vm_munmap(map_addr+size, total_size-size); } else - map_addr = do_mmap(filep, addr, size, prot, type, off); + map_addr = vm_mmap(filep, addr, size, prot, type, off); - up_write(¤t->mm->mmap_sem); return(map_addr); } diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c index 6b2daf99fab8..178cb70acc26 100644 --- a/fs/binfmt_flat.c +++ b/fs/binfmt_flat.c @@ -562,7 +562,7 @@ static int load_flat_file(struct linux_binprm * bprm, realdatastart = (unsigned long) -ENOMEM; printk("Unable to allocate RAM for process data, errno %d\n", (int)-realdatastart); - do_munmap(current->mm, textpos, text_len); + vm_munmap(textpos, text_len); ret = realdatastart; goto err; } @@ -586,8 +586,8 @@ static int load_flat_file(struct linux_binprm * bprm, } if (IS_ERR_VALUE(result)) { printk("Unable to read data+bss, errno %d\n", (int)-result); - do_munmap(current->mm, textpos, text_len); - do_munmap(current->mm, realdatastart, len); + vm_munmap(textpos, text_len); + vm_munmap(realdatastart, len); ret = result; goto err; } @@ -654,7 +654,7 @@ static int load_flat_file(struct linux_binprm * bprm, } if (IS_ERR_VALUE(result)) { printk("Unable to read code+data+bss, errno %d\n",(int)-result); - do_munmap(current->mm, textpos, text_len + data_len + extra + + vm_munmap(textpos, text_len + data_len + extra + MAX_SHARED_LIBS * sizeof(unsigned long)); ret = result; goto err; diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 0151ca1ac657..0236d03c6732 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2974,7 +2974,6 @@ int btrfs_readpage(struct file *file, struct page *page); void btrfs_evict_inode(struct inode *inode); int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc); int btrfs_dirty_inode(struct inode *inode); -int btrfs_update_time(struct file *file); struct inode *btrfs_alloc_inode(struct super_block *sb); void btrfs_destroy_inode(struct inode *inode); int btrfs_drop_inode(struct inode *inode); diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c index e887ee62b6d4..614f34a899c2 100644 --- a/fs/btrfs/export.c +++ b/fs/btrfs/export.c @@ -13,15 +13,14 @@ parent_root_objectid) / 4) #define BTRFS_FID_SIZE_CONNECTABLE_ROOT (sizeof(struct btrfs_fid) / 4) -static int btrfs_encode_fh(struct dentry *dentry, u32 *fh, int *max_len, - int connectable) +static int btrfs_encode_fh(struct inode *inode, u32 *fh, int *max_len, + struct inode *parent) { struct btrfs_fid *fid = (struct btrfs_fid *)fh; - struct inode *inode = dentry->d_inode; int len = *max_len; int type; - if (connectable && (len < BTRFS_FID_SIZE_CONNECTABLE)) { + if (parent && (len < BTRFS_FID_SIZE_CONNECTABLE)) { *max_len = BTRFS_FID_SIZE_CONNECTABLE; return 255; } else if (len < BTRFS_FID_SIZE_NON_CONNECTABLE) { @@ -36,19 +35,13 @@ static int btrfs_encode_fh(struct dentry *dentry, u32 *fh, int *max_len, fid->root_objectid = BTRFS_I(inode)->root->objectid; fid->gen = inode->i_generation; - if (connectable && !S_ISDIR(inode->i_mode)) { - struct inode *parent; + if (parent) { u64 parent_root_id; - spin_lock(&dentry->d_lock); - - parent = dentry->d_parent->d_inode; fid->parent_objectid = BTRFS_I(parent)->location.objectid; fid->parent_gen = parent->i_generation; parent_root_id = BTRFS_I(parent)->root->objectid; - spin_unlock(&dentry->d_lock); - if (parent_root_id != fid->root_objectid) { fid->parent_root_objectid = parent_root_id; len = BTRFS_FID_SIZE_CONNECTABLE_ROOT; diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 876cddd6b2f0..70dc8ca73e25 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -1433,7 +1433,7 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, goto out; } - err = btrfs_update_time(file); + err = file_update_time(file); if (err) { mutex_unlock(&inode->i_mutex); goto out; diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 19a0d85b451c..81296c57405a 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -77,7 +77,8 @@ static struct inode *__lookup_free_space_inode(struct btrfs_root *root, return ERR_PTR(-ENOENT); } - inode->i_mapping->flags &= ~__GFP_FS; + mapping_set_gfp_mask(inode->i_mapping, + mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS); return inode; } @@ -367,7 +368,7 @@ static int io_ctl_prepare_pages(struct io_ctl *io_ctl, struct inode *inode, static void io_ctl_set_generation(struct io_ctl *io_ctl, u64 generation) { - u64 *val; + __le64 *val; io_ctl_map_page(io_ctl, 1); @@ -390,7 +391,7 @@ static void io_ctl_set_generation(struct io_ctl *io_ctl, u64 generation) static int io_ctl_check_generation(struct io_ctl *io_ctl, u64 generation) { - u64 *gen; + __le64 *gen; /* * Skip the crc area. If we don't check crcs then we just have a 64bit diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index e9991adc0960..f6ab6f5e635a 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -4475,46 +4475,18 @@ int btrfs_dirty_inode(struct inode *inode) * This is a copy of file_update_time. We need this so we can return error on * ENOSPC for updating the inode in the case of file write and mmap writes. */ -int btrfs_update_time(struct file *file) +static int btrfs_update_time(struct inode *inode, struct timespec *now, + int flags) { - struct inode *inode = file->f_path.dentry->d_inode; - struct timespec now; - int ret; - enum { S_MTIME = 1, S_CTIME = 2, S_VERSION = 4 } sync_it = 0; - - /* First try to exhaust all avenues to not sync */ - if (IS_NOCMTIME(inode)) - return 0; - - now = current_fs_time(inode->i_sb); - if (!timespec_equal(&inode->i_mtime, &now)) - sync_it = S_MTIME; - - if (!timespec_equal(&inode->i_ctime, &now)) - sync_it |= S_CTIME; - - if (IS_I_VERSION(inode)) - sync_it |= S_VERSION; - - if (!sync_it) - return 0; - - /* Finally allowed to write? Takes lock. */ - if (mnt_want_write_file(file)) - return 0; - - /* Only change inode inside the lock region */ - if (sync_it & S_VERSION) + if (flags & S_VERSION) inode_inc_iversion(inode); - if (sync_it & S_CTIME) - inode->i_ctime = now; - if (sync_it & S_MTIME) - inode->i_mtime = now; - ret = btrfs_dirty_inode(inode); - if (!ret) - mark_inode_dirty_sync(inode); - mnt_drop_write(file->f_path.mnt); - return ret; + if (flags & S_CTIME) + inode->i_ctime = *now; + if (flags & S_MTIME) + inode->i_mtime = *now; + if (flags & S_ATIME) + inode->i_atime = *now; + return btrfs_dirty_inode(inode); } /* @@ -6565,7 +6537,7 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE); if (!ret) { - ret = btrfs_update_time(vma->vm_file); + ret = file_update_time(vma->vm_file); reserved = 1; } if (ret) { @@ -7635,6 +7607,7 @@ static const struct inode_operations btrfs_file_inode_operations = { .permission = btrfs_permission, .fiemap = btrfs_fiemap, .get_acl = btrfs_get_acl, + .update_time = btrfs_update_time, }; static const struct inode_operations btrfs_special_inode_operations = { .getattr = btrfs_getattr, @@ -7645,6 +7618,7 @@ static const struct inode_operations btrfs_special_inode_operations = { .listxattr = btrfs_listxattr, .removexattr = btrfs_removexattr, .get_acl = btrfs_get_acl, + .update_time = btrfs_update_time, }; static const struct inode_operations btrfs_symlink_inode_operations = { .readlink = generic_readlink, @@ -7658,6 +7632,7 @@ static const struct inode_operations btrfs_symlink_inode_operations = { .listxattr = btrfs_listxattr, .removexattr = btrfs_removexattr, .get_acl = btrfs_get_acl, + .update_time = btrfs_update_time, }; const struct dentry_operations btrfs_dentry_operations = { diff --git a/fs/buffer.c b/fs/buffer.c index ad5938ca357c..838a9cf246bd 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -3152,7 +3152,7 @@ SYSCALL_DEFINE2(bdflush, int, func, long, data) /* * Buffer-head allocation */ -static struct kmem_cache *bh_cachep; +static struct kmem_cache *bh_cachep __read_mostly; /* * Once the number of bh's in the machine exceeds this level, we start diff --git a/fs/ceph/export.c b/fs/ceph/export.c index fbb2a643ef10..8e1b60e557b6 100644 --- a/fs/ceph/export.c +++ b/fs/ceph/export.c @@ -40,38 +40,49 @@ struct ceph_nfs_confh { u32 parent_name_hash; } __attribute__ ((packed)); -static int ceph_encode_fh(struct dentry *dentry, u32 *rawfh, int *max_len, - int connectable) +/* + * The presence of @parent_inode here tells us whether NFS wants a + * connectable file handle. However, we want to make a connectionable + * file handle unconditionally so that the MDS gets as much of a hint + * as possible. That means we only use @parent_dentry to indicate + * whether nfsd wants a connectable fh, and whether we should indicate + * failure from a too-small @max_len. + */ +static int ceph_encode_fh(struct inode *inode, u32 *rawfh, int *max_len, + struct inode *parent_inode) { int type; struct ceph_nfs_fh *fh = (void *)rawfh; struct ceph_nfs_confh *cfh = (void *)rawfh; - struct dentry *parent; - struct inode *inode = dentry->d_inode; int connected_handle_length = sizeof(*cfh)/4; int handle_length = sizeof(*fh)/4; + struct dentry *dentry = d_find_alias(inode); + struct dentry *parent; /* don't re-export snaps */ if (ceph_snap(inode) != CEPH_NOSNAP) return -EINVAL; - spin_lock(&dentry->d_lock); - parent = dentry->d_parent; - if (*max_len >= connected_handle_length) { + /* if we found an alias, generate a connectable fh */ + if (*max_len >= connected_handle_length && dentry) { dout("encode_fh %p connectable\n", dentry); - cfh->ino = ceph_ino(dentry->d_inode); + spin_lock(&dentry->d_lock); + parent = dentry->d_parent; + cfh->ino = ceph_ino(inode); cfh->parent_ino = ceph_ino(parent->d_inode); cfh->parent_name_hash = ceph_dentry_hash(parent->d_inode, dentry); *max_len = connected_handle_length; type = 2; + spin_unlock(&dentry->d_lock); } else if (*max_len >= handle_length) { - if (connectable) { + if (parent_inode) { + /* nfsd wants connectable */ *max_len = connected_handle_length; type = 255; } else { dout("encode_fh %p\n", dentry); - fh->ino = ceph_ino(dentry->d_inode); + fh->ino = ceph_ino(inode); *max_len = handle_length; type = 1; } @@ -79,7 +90,6 @@ static int ceph_encode_fh(struct dentry *dentry, u32 *rawfh, int *max_len, *max_len = handle_length; type = 255; } - spin_unlock(&dentry->d_lock); return type; } diff --git a/fs/compat.c b/fs/compat.c index 6556a9ce8a28..6161255fac45 100644 --- a/fs/compat.c +++ b/fs/compat.c @@ -871,12 +871,12 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd, { int error; struct file *file; + int fput_needed; struct compat_readdir_callback buf; - error = -EBADF; - file = fget(fd); + file = fget_light(fd, &fput_needed); if (!file) - goto out; + return -EBADF; buf.result = 0; buf.dirent = dirent; @@ -885,8 +885,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd, if (buf.result) error = buf.result; - fput(file); -out: + fput_light(file, fput_needed); return error; } @@ -953,16 +952,15 @@ asmlinkage long compat_sys_getdents(unsigned int fd, struct file * file; struct compat_linux_dirent __user * lastdirent; struct compat_getdents_callback buf; + int fput_needed; int error; - error = -EFAULT; if (!access_ok(VERIFY_WRITE, dirent, count)) - goto out; + return -EFAULT; - error = -EBADF; - file = fget(fd); + file = fget_light(fd, &fput_needed); if (!file) - goto out; + return -EBADF; buf.current_dir = dirent; buf.previous = NULL; @@ -979,8 +977,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd, else error = count - buf.count; } - fput(file); -out: + fput_light(file, fput_needed); return error; } @@ -1041,16 +1038,15 @@ asmlinkage long compat_sys_getdents64(unsigned int fd, struct file * file; struct linux_dirent64 __user * lastdirent; struct compat_getdents_callback64 buf; + int fput_needed; int error; - error = -EFAULT; if (!access_ok(VERIFY_WRITE, dirent, count)) - goto out; + return -EFAULT; - error = -EBADF; - file = fget(fd); + file = fget_light(fd, &fput_needed); if (!file) - goto out; + return -EBADF; buf.current_dir = dirent; buf.previous = NULL; @@ -1068,8 +1064,7 @@ asmlinkage long compat_sys_getdents64(unsigned int fd, else error = count - buf.count; } - fput(file); -out: + fput_light(file, fput_needed); return error; } #endif /* ! __ARCH_OMIT_COMPAT_SYS_GETDENTS64 */ @@ -1547,7 +1542,6 @@ asmlinkage long compat_sys_old_select(struct compat_sel_arg_struct __user *arg) compat_ptr(a.exp), compat_ptr(a.tvp)); } -#ifdef HAVE_SET_RESTORE_SIGMASK static long do_compat_pselect(int n, compat_ulong_t __user *inp, compat_ulong_t __user *outp, compat_ulong_t __user *exp, struct compat_timespec __user *tsp, compat_sigset_t __user *sigmask, @@ -1670,11 +1664,9 @@ asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds, return ret; } -#endif /* HAVE_SET_RESTORE_SIGMASK */ #ifdef CONFIG_EPOLL -#ifdef HAVE_SET_RESTORE_SIGMASK asmlinkage long compat_sys_epoll_pwait(int epfd, struct compat_epoll_event __user *events, int maxevents, int timeout, @@ -1718,7 +1710,6 @@ asmlinkage long compat_sys_epoll_pwait(int epfd, return err; } -#endif /* HAVE_SET_RESTORE_SIGMASK */ #endif /* CONFIG_EPOLL */ diff --git a/fs/dcache.c b/fs/dcache.c index 4435d8b32904..85c9e2bff8e6 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -683,8 +683,6 @@ EXPORT_SYMBOL(dget_parent); /** * d_find_alias - grab a hashed alias of inode * @inode: inode in question - * @want_discon: flag, used by d_splice_alias, to request - * that only a DISCONNECTED alias be returned. * * If inode has a hashed alias, or is a directory and has any alias, * acquire the reference to alias and return it. Otherwise return NULL. @@ -693,10 +691,9 @@ EXPORT_SYMBOL(dget_parent); * of a filesystem. * * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer - * any other hashed alias over that one unless @want_discon is set, - * in which case only return an IS_ROOT, DCACHE_DISCONNECTED alias. + * any other hashed alias over that. */ -static struct dentry *__d_find_alias(struct inode *inode, int want_discon) +static struct dentry *__d_find_alias(struct inode *inode) { struct dentry *alias, *discon_alias; @@ -708,7 +705,7 @@ again: if (IS_ROOT(alias) && (alias->d_flags & DCACHE_DISCONNECTED)) { discon_alias = alias; - } else if (!want_discon) { + } else { __dget_dlock(alias); spin_unlock(&alias->d_lock); return alias; @@ -739,7 +736,7 @@ struct dentry *d_find_alias(struct inode *inode) if (!list_empty(&inode->i_dentry)) { spin_lock(&inode->i_lock); - de = __d_find_alias(inode, 0); + de = __d_find_alias(inode); spin_unlock(&inode->i_lock); } return de; @@ -1650,9 +1647,8 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry) if (inode && S_ISDIR(inode->i_mode)) { spin_lock(&inode->i_lock); - new = __d_find_alias(inode, 1); + new = __d_find_any_alias(inode); if (new) { - BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED)); spin_unlock(&inode->i_lock); security_d_instantiate(new, inode); d_move(new, dentry); @@ -2482,7 +2478,7 @@ struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode) struct dentry *alias; /* Does an aliased dentry already exist? */ - alias = __d_find_alias(inode, 0); + alias = __d_find_alias(inode); if (alias) { actual = alias; write_seqlock(&rename_lock); @@ -2575,7 +2571,7 @@ static int prepend_path(const struct path *path, bool slash = false; int error = 0; - br_read_lock(vfsmount_lock); + br_read_lock(&vfsmount_lock); while (dentry != root->dentry || vfsmnt != root->mnt) { struct dentry * parent; @@ -2606,7 +2602,7 @@ static int prepend_path(const struct path *path, error = prepend(buffer, buflen, "/", 1); out: - br_read_unlock(vfsmount_lock); + br_read_unlock(&vfsmount_lock); return error; global_root: diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index ab35b113003b..a07441a0a878 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c @@ -660,11 +660,10 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf, { struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry); char *lower_buf; - size_t lower_bufsiz = PATH_MAX; mm_segment_t old_fs; int rc; - lower_buf = kmalloc(lower_bufsiz, GFP_KERNEL); + lower_buf = kmalloc(PATH_MAX, GFP_KERNEL); if (!lower_buf) { rc = -ENOMEM; goto out; @@ -673,58 +672,29 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf, set_fs(get_ds()); rc = lower_dentry->d_inode->i_op->readlink(lower_dentry, (char __user *)lower_buf, - lower_bufsiz); + PATH_MAX); set_fs(old_fs); if (rc < 0) goto out; - lower_bufsiz = rc; rc = ecryptfs_decode_and_decrypt_filename(buf, bufsiz, dentry, - lower_buf, lower_bufsiz); + lower_buf, rc); out: kfree(lower_buf); return rc; } -static int -ecryptfs_readlink(struct dentry *dentry, char __user *buf, int bufsiz) +static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd) { - char *kbuf; - size_t kbufsiz, copied; + char *buf; + size_t len = PATH_MAX; int rc; - rc = ecryptfs_readlink_lower(dentry, &kbuf, &kbufsiz); + rc = ecryptfs_readlink_lower(dentry, &buf, &len); if (rc) goto out; - copied = min_t(size_t, bufsiz, kbufsiz); - rc = copy_to_user(buf, kbuf, copied) ? -EFAULT : copied; - kfree(kbuf); fsstack_copy_attr_atime(dentry->d_inode, ecryptfs_dentry_to_lower(dentry)->d_inode); -out: - return rc; -} - -static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd) -{ - char *buf; - int len = PAGE_SIZE, rc; - mm_segment_t old_fs; - - /* Released in ecryptfs_put_link(); only release here on error */ - buf = kmalloc(len, GFP_KERNEL); - if (!buf) { - buf = ERR_PTR(-ENOMEM); - goto out; - } - old_fs = get_fs(); - set_fs(get_ds()); - rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len); - set_fs(old_fs); - if (rc < 0) { - kfree(buf); - buf = ERR_PTR(rc); - } else - buf[rc] = '\0'; + buf[len] = '\0'; out: nd_set_link(nd, buf); return NULL; @@ -1153,7 +1123,7 @@ out: } const struct inode_operations ecryptfs_symlink_iops = { - .readlink = ecryptfs_readlink, + .readlink = generic_readlink, .follow_link = ecryptfs_follow_link, .put_link = ecryptfs_put_link, .permission = ecryptfs_permission, diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 079d1be65ba9..74598f67efeb 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -1853,8 +1853,6 @@ error_return: return error; } -#ifdef HAVE_SET_RESTORE_SIGMASK - /* * Implement the event wait interface for the eventpoll file. It is the kernel * part of the user space epoll_pwait(2). @@ -1899,8 +1897,6 @@ SYSCALL_DEFINE6(epoll_pwait, int, epfd, struct epoll_event __user *, events, return error; } -#endif /* HAVE_SET_RESTORE_SIGMASK */ - static int __init eventpoll_init(void) { struct sysinfo si; diff --git a/fs/exec.c b/fs/exec.c index 52c9e2ff6e6b..a79786a8d2c8 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -280,10 +280,6 @@ static int __bprm_mm_init(struct linux_binprm *bprm) vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); INIT_LIST_HEAD(&vma->anon_vma_chain); - err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1); - if (err) - goto err; - err = insert_vm_struct(mm, vma); if (err) goto err; diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c index b05acb796135..b0201ca6e9c6 100644 --- a/fs/exportfs/expfs.c +++ b/fs/exportfs/expfs.c @@ -304,24 +304,23 @@ out: /** * export_encode_fh - default export_operations->encode_fh function - * @dentry: the dentry to encode + * @inode: the object to encode * @fh: where to store the file handle fragment * @max_len: maximum length to store there - * @connectable: whether to store parent information + * @parent: parent directory inode, if wanted * * This default encode_fh function assumes that the 32 inode number * is suitable for locating an inode, and that the generation number * can be used to check that it is still valid. It places them in the * filehandle fragment where export_decode_fh expects to find them. */ -static int export_encode_fh(struct dentry *dentry, struct fid *fid, - int *max_len, int connectable) +static int export_encode_fh(struct inode *inode, struct fid *fid, + int *max_len, struct inode *parent) { - struct inode * inode = dentry->d_inode; int len = *max_len; int type = FILEID_INO32_GEN; - if (connectable && (len < 4)) { + if (parent && (len < 4)) { *max_len = 4; return 255; } else if (len < 2) { @@ -332,14 +331,9 @@ static int export_encode_fh(struct dentry *dentry, struct fid *fid, len = 2; fid->i32.ino = inode->i_ino; fid->i32.gen = inode->i_generation; - if (connectable && !S_ISDIR(inode->i_mode)) { - struct inode *parent; - - spin_lock(&dentry->d_lock); - parent = dentry->d_parent->d_inode; + if (parent) { fid->i32.parent_ino = parent->i_ino; fid->i32.parent_gen = parent->i_generation; - spin_unlock(&dentry->d_lock); len = 4; type = FILEID_INO32_GEN_PARENT; } @@ -352,11 +346,22 @@ int exportfs_encode_fh(struct dentry *dentry, struct fid *fid, int *max_len, { const struct export_operations *nop = dentry->d_sb->s_export_op; int error; + struct dentry *p = NULL; + struct inode *inode = dentry->d_inode, *parent = NULL; + if (connectable && !S_ISDIR(inode->i_mode)) { + p = dget_parent(dentry); + /* + * note that while p might've ceased to be our parent already, + * it's still pinned by and still positive. + */ + parent = p->d_inode; + } if (nop->encode_fh) - error = nop->encode_fh(dentry, fid->raw, max_len, connectable); + error = nop->encode_fh(inode, fid->raw, max_len, parent); else - error = export_encode_fh(dentry, fid, max_len, connectable); + error = export_encode_fh(inode, fid, max_len, parent); + dput(p); return error; } diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig index 9ed1bb1f319f..c22f17021b6e 100644 --- a/fs/ext4/Kconfig +++ b/fs/ext4/Kconfig @@ -2,6 +2,8 @@ config EXT4_FS tristate "The Extended 4 (ext4) filesystem" select JBD2 select CRC16 + select CRYPTO + select CRYPTO_CRC32C help This is the next generation of the ext3 filesystem. diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index c45c41129a35..99b6324290db 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c @@ -168,12 +168,14 @@ void ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh, /* If checksum is bad mark all blocks used to prevent allocation * essentially implementing a per-group read-only flag. */ - if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) { + if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) { ext4_error(sb, "Checksum bad for group %u", block_group); ext4_free_group_clusters_set(sb, gdp, 0); ext4_free_inodes_set(sb, gdp, 0); ext4_itable_unused_set(sb, gdp, 0); memset(bh->b_data, 0xff, sb->s_blocksize); + ext4_block_bitmap_csum_set(sb, block_group, gdp, bh, + EXT4_BLOCKS_PER_GROUP(sb) / 8); return; } memset(bh->b_data, 0, sb->s_blocksize); @@ -210,6 +212,9 @@ void ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh, */ ext4_mark_bitmap_end(num_clusters_in_group(sb, block_group), sb->s_blocksize * 8, bh->b_data); + ext4_block_bitmap_csum_set(sb, block_group, gdp, bh, + EXT4_BLOCKS_PER_GROUP(sb) / 8); + ext4_group_desc_csum_set(sb, block_group, gdp); } /* Return the number of free blocks in a block group. It is used when @@ -276,9 +281,9 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb, } static int ext4_valid_block_bitmap(struct super_block *sb, - struct ext4_group_desc *desc, - unsigned int block_group, - struct buffer_head *bh) + struct ext4_group_desc *desc, + unsigned int block_group, + struct buffer_head *bh) { ext4_grpblk_t offset; ext4_grpblk_t next_zero_bit; @@ -325,6 +330,23 @@ err_out: block_group, bitmap_blk); return 0; } + +void ext4_validate_block_bitmap(struct super_block *sb, + struct ext4_group_desc *desc, + unsigned int block_group, + struct buffer_head *bh) +{ + if (buffer_verified(bh)) + return; + + ext4_lock_group(sb, block_group); + if (ext4_valid_block_bitmap(sb, desc, block_group, bh) && + ext4_block_bitmap_csum_verify(sb, block_group, desc, bh, + EXT4_BLOCKS_PER_GROUP(sb) / 8)) + set_buffer_verified(bh); + ext4_unlock_group(sb, block_group); +} + /** * ext4_read_block_bitmap() * @sb: super block @@ -355,12 +377,12 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group) } if (bitmap_uptodate(bh)) - return bh; + goto verify; lock_buffer(bh); if (bitmap_uptodate(bh)) { unlock_buffer(bh); - return bh; + goto verify; } ext4_lock_group(sb, block_group); if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { @@ -379,7 +401,7 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group) */ set_bitmap_uptodate(bh); unlock_buffer(bh); - return bh; + goto verify; } /* * submit the buffer_head for reading @@ -390,6 +412,9 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group) get_bh(bh); submit_bh(READ, bh); return bh; +verify: + ext4_validate_block_bitmap(sb, desc, block_group, bh); + return bh; } /* Returns 0 on success, 1 on error */ @@ -412,7 +437,7 @@ int ext4_wait_block_bitmap(struct super_block *sb, ext4_group_t block_group, } clear_buffer_new(bh); /* Panic or remount fs read-only if block bitmap is invalid */ - ext4_valid_block_bitmap(sb, desc, block_group, bh); + ext4_validate_block_bitmap(sb, desc, block_group, bh); return 0; } diff --git a/fs/ext4/bitmap.c b/fs/ext4/bitmap.c index fa3af81ac565..b319721da26a 100644 --- a/fs/ext4/bitmap.c +++ b/fs/ext4/bitmap.c @@ -29,3 +29,86 @@ unsigned int ext4_count_free(struct buffer_head *map, unsigned int numchars) #endif /* EXT4FS_DEBUG */ +int ext4_inode_bitmap_csum_verify(struct super_block *sb, ext4_group_t group, + struct ext4_group_desc *gdp, + struct buffer_head *bh, int sz) +{ + __u32 hi; + __u32 provided, calculated; + struct ext4_sb_info *sbi = EXT4_SB(sb); + + if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, + EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) + return 1; + + provided = le16_to_cpu(gdp->bg_inode_bitmap_csum_lo); + calculated = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz); + if (sbi->s_desc_size >= EXT4_BG_INODE_BITMAP_CSUM_HI_END) { + hi = le16_to_cpu(gdp->bg_inode_bitmap_csum_hi); + provided |= (hi << 16); + } else + calculated &= 0xFFFF; + + return provided == calculated; +} + +void ext4_inode_bitmap_csum_set(struct super_block *sb, ext4_group_t group, + struct ext4_group_desc *gdp, + struct buffer_head *bh, int sz) +{ + __u32 csum; + struct ext4_sb_info *sbi = EXT4_SB(sb); + + if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, + EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) + return; + + csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz); + gdp->bg_inode_bitmap_csum_lo = cpu_to_le16(csum & 0xFFFF); + if (sbi->s_desc_size >= EXT4_BG_INODE_BITMAP_CSUM_HI_END) + gdp->bg_inode_bitmap_csum_hi = cpu_to_le16(csum >> 16); +} + +int ext4_block_bitmap_csum_verify(struct super_block *sb, ext4_group_t group, + struct ext4_group_desc *gdp, + struct buffer_head *bh, int sz) +{ + __u32 hi; + __u32 provided, calculated; + struct ext4_sb_info *sbi = EXT4_SB(sb); + + if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, + EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) + return 1; + + provided = le16_to_cpu(gdp->bg_block_bitmap_csum_lo); + calculated = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz); + if (sbi->s_desc_size >= EXT4_BG_BLOCK_BITMAP_CSUM_HI_END) { + hi = le16_to_cpu(gdp->bg_block_bitmap_csum_hi); + provided |= (hi << 16); + } else + calculated &= 0xFFFF; + + if (provided == calculated) + return 1; + + ext4_error(sb, "Bad block bitmap checksum: block_group = %u", group); + return 0; +} + +void ext4_block_bitmap_csum_set(struct super_block *sb, ext4_group_t group, + struct ext4_group_desc *gdp, + struct buffer_head *bh, int sz) +{ + __u32 csum; + struct ext4_sb_info *sbi = EXT4_SB(sb); + + if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, + EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) + return; + + csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz); + gdp->bg_block_bitmap_csum_lo = cpu_to_le16(csum & 0xFFFF); + if (sbi->s_desc_size >= EXT4_BG_BLOCK_BITMAP_CSUM_HI_END) + gdp->bg_block_bitmap_csum_hi = cpu_to_le16(csum >> 16); +} diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c index b86786202643..aa39e600d159 100644 --- a/fs/ext4/dir.c +++ b/fs/ext4/dir.c @@ -179,6 +179,18 @@ static int ext4_readdir(struct file *filp, continue; } + /* Check the checksum */ + if (!buffer_verified(bh) && + !ext4_dirent_csum_verify(inode, + (struct ext4_dir_entry *)bh->b_data)) { + EXT4_ERROR_FILE(filp, 0, "directory fails checksum " + "at offset %llu", + (unsigned long long)filp->f_pos); + filp->f_pos += sb->s_blocksize - offset; + continue; + } + set_buffer_verified(bh); + revalidate: /* If the dir block has changed since the last call to * readdir(2), then we might be pointing to an invalid diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index c21b1de51afb..cfc4e01b3c83 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -29,6 +29,7 @@ #include <linux/wait.h> #include <linux/blockgroup_lock.h> #include <linux/percpu_counter.h> +#include <crypto/hash.h> #ifdef __KERNEL__ #include <linux/compat.h> #endif @@ -298,7 +299,9 @@ struct ext4_group_desc __le16 bg_free_inodes_count_lo;/* Free inodes count */ __le16 bg_used_dirs_count_lo; /* Directories count */ __le16 bg_flags; /* EXT4_BG_flags (INODE_UNINIT, etc) */ - __u32 bg_reserved[2]; /* Likely block/inode bitmap checksum */ + __le32 bg_exclude_bitmap_lo; /* Exclude bitmap for snapshots */ + __le16 bg_block_bitmap_csum_lo;/* crc32c(s_uuid+grp_num+bbitmap) LE */ + __le16 bg_inode_bitmap_csum_lo;/* crc32c(s_uuid+grp_num+ibitmap) LE */ __le16 bg_itable_unused_lo; /* Unused inodes count */ __le16 bg_checksum; /* crc16(sb_uuid+group+desc) */ __le32 bg_block_bitmap_hi; /* Blocks bitmap block MSB */ @@ -308,9 +311,19 @@ struct ext4_group_desc __le16 bg_free_inodes_count_hi;/* Free inodes count MSB */ __le16 bg_used_dirs_count_hi; /* Directories count MSB */ __le16 bg_itable_unused_hi; /* Unused inodes count MSB */ - __u32 bg_reserved2[3]; + __le32 bg_exclude_bitmap_hi; /* Exclude bitmap block MSB */ + __le16 bg_block_bitmap_csum_hi;/* crc32c(s_uuid+grp_num+bbitmap) BE */ + __le16 bg_inode_bitmap_csum_hi;/* crc32c(s_uuid+grp_num+ibitmap) BE */ + __u32 bg_reserved; }; +#define EXT4_BG_INODE_BITMAP_CSUM_HI_END \ + (offsetof(struct ext4_group_desc, bg_inode_bitmap_csum_hi) + \ + sizeof(__le16)) +#define EXT4_BG_BLOCK_BITMAP_CSUM_HI_END \ + (offsetof(struct ext4_group_desc, bg_block_bitmap_csum_hi) + \ + sizeof(__le16)) + /* * Structure of a flex block group info */ @@ -650,7 +663,8 @@ struct ext4_inode { __le16 l_i_file_acl_high; __le16 l_i_uid_high; /* these 2 fields */ __le16 l_i_gid_high; /* were reserved2[0] */ - __u32 l_i_reserved2; + __le16 l_i_checksum_lo;/* crc32c(uuid+inum+inode) LE */ + __le16 l_i_reserved; } linux2; struct { __le16 h_i_reserved1; /* Obsoleted fragment number/size which are removed in ext4 */ @@ -666,7 +680,7 @@ struct ext4_inode { } masix2; } osd2; /* OS dependent 2 */ __le16 i_extra_isize; - __le16 i_pad1; + __le16 i_checksum_hi; /* crc32c(uuid+inum+inode) BE */ __le32 i_ctime_extra; /* extra Change time (nsec << 2 | epoch) */ __le32 i_mtime_extra; /* extra Modification time(nsec << 2 | epoch) */ __le32 i_atime_extra; /* extra Access time (nsec << 2 | epoch) */ @@ -768,7 +782,7 @@ do { \ #define i_gid_low i_gid #define i_uid_high osd2.linux2.l_i_uid_high #define i_gid_high osd2.linux2.l_i_gid_high -#define i_reserved2 osd2.linux2.l_i_reserved2 +#define i_checksum_lo osd2.linux2.l_i_checksum_lo #elif defined(__GNU__) @@ -908,6 +922,9 @@ struct ext4_inode_info { */ tid_t i_sync_tid; tid_t i_datasync_tid; + + /* Precomputed uuid+inum+igen checksum for seeding inode checksums */ + __u32 i_csum_seed; }; /* @@ -1001,6 +1018,9 @@ extern void ext4_set_bits(void *bm, int cur, int len); #define EXT4_ERRORS_PANIC 3 /* Panic */ #define EXT4_ERRORS_DEFAULT EXT4_ERRORS_CONTINUE +/* Metadata checksum algorithm codes */ +#define EXT4_CRC32C_CHKSUM 1 + /* * Structure of the super block */ @@ -1087,7 +1107,7 @@ struct ext4_super_block { __le64 s_mmp_block; /* Block for multi-mount protection */ __le32 s_raid_stripe_width; /* blocks on all data disks (N*stride)*/ __u8 s_log_groups_per_flex; /* FLEX_BG group size */ - __u8 s_reserved_char_pad; + __u8 s_checksum_type; /* metadata checksum algorithm used */ __le16 s_reserved_pad; __le64 s_kbytes_written; /* nr of lifetime kilobytes written */ __le32 s_snapshot_inum; /* Inode number of active snapshot */ @@ -1113,7 +1133,8 @@ struct ext4_super_block { __le32 s_usr_quota_inum; /* inode for tracking user quota */ __le32 s_grp_quota_inum; /* inode for tracking group quota */ __le32 s_overhead_clusters; /* overhead blocks/clusters in fs */ - __le32 s_reserved[109]; /* Padding to the end of the block */ + __le32 s_reserved[108]; /* Padding to the end of the block */ + __le32 s_checksum; /* crc32c(superblock) */ }; #define EXT4_S_ERR_LEN (EXT4_S_ERR_END - EXT4_S_ERR_START) @@ -1176,6 +1197,7 @@ struct ext4_sb_info { struct proc_dir_entry *s_proc; struct kobject s_kobj; struct completion s_kobj_unregister; + struct super_block *s_sb; /* Journaling */ struct journal_s *s_journal; @@ -1266,6 +1288,12 @@ struct ext4_sb_info { /* record the last minlen when FITRIM is called. */ atomic_t s_last_trim_minblks; + + /* Reference to checksum algorithm driver via cryptoapi */ + struct crypto_shash *s_chksum_driver; + + /* Precomputed FS UUID checksum for seeding other checksums */ + __u32 s_csum_seed; }; static inline struct ext4_sb_info *EXT4_SB(struct super_block *sb) @@ -1414,6 +1442,12 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei) #define EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE 0x0040 #define EXT4_FEATURE_RO_COMPAT_QUOTA 0x0100 #define EXT4_FEATURE_RO_COMPAT_BIGALLOC 0x0200 +/* + * METADATA_CSUM also enables group descriptor checksums (GDT_CSUM). When + * METADATA_CSUM is set, group descriptor checksums use the same algorithm as + * all other data structures' checksums. However, the METADATA_CSUM and + * GDT_CSUM bits are mutually exclusive. + */ #define EXT4_FEATURE_RO_COMPAT_METADATA_CSUM 0x0400 #define EXT4_FEATURE_INCOMPAT_COMPRESSION 0x0001 @@ -1461,7 +1495,8 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei) EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE | \ EXT4_FEATURE_RO_COMPAT_BTREE_DIR |\ EXT4_FEATURE_RO_COMPAT_HUGE_FILE |\ - EXT4_FEATURE_RO_COMPAT_BIGALLOC) + EXT4_FEATURE_RO_COMPAT_BIGALLOC |\ + EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) /* * Default values for user and/or group using reserved blocks @@ -1527,6 +1562,18 @@ struct ext4_dir_entry_2 { }; /* + * This is a bogus directory entry at the end of each leaf block that + * records checksums. + */ +struct ext4_dir_entry_tail { + __le32 det_reserved_zero1; /* Pretend to be unused */ + __le16 det_rec_len; /* 12 */ + __u8 det_reserved_zero2; /* Zero name length */ + __u8 det_reserved_ft; /* 0xDE, fake file type */ + __le32 det_checksum; /* crc32c(uuid+inum+dirblock) */ +}; + +/* * Ext4 directory file types. Only the low 3 bits are used. The * other bits are reserved for now. */ @@ -1541,6 +1588,8 @@ struct ext4_dir_entry_2 { #define EXT4_FT_MAX 8 +#define EXT4_FT_DIR_CSUM 0xDE + /* * EXT4_DIR_PAD defines the directory entries boundaries * @@ -1609,6 +1658,25 @@ static inline __le16 ext4_rec_len_to_disk(unsigned len, unsigned blocksize) #define DX_HASH_HALF_MD4_UNSIGNED 4 #define DX_HASH_TEA_UNSIGNED 5 +static inline u32 ext4_chksum(struct ext4_sb_info *sbi, u32 crc, + const void *address, unsigned int length) +{ + struct { + struct shash_desc shash; + char ctx[crypto_shash_descsize(sbi->s_chksum_driver)]; + } desc; + int err; + + desc.shash.tfm = sbi->s_chksum_driver; + desc.shash.flags = 0; + *(u32 *)desc.ctx = crc; + + err = crypto_shash_update(&desc.shash, address, length); + BUG_ON(err); + + return *(u32 *)desc.ctx; +} + #ifdef __KERNEL__ /* hash info structure used by the directory hash */ @@ -1741,7 +1809,8 @@ struct mmp_struct { __le16 mmp_check_interval; __le16 mmp_pad1; - __le32 mmp_pad2[227]; + __le32 mmp_pad2[226]; + __le32 mmp_checksum; /* crc32c(uuid+mmp_block) */ }; /* arguments passed to the mmp thread */ @@ -1784,8 +1853,24 @@ struct mmpd_data { /* bitmap.c */ extern unsigned int ext4_count_free(struct buffer_head *, unsigned); +void ext4_inode_bitmap_csum_set(struct super_block *sb, ext4_group_t group, + struct ext4_group_desc *gdp, + struct buffer_head *bh, int sz); +int ext4_inode_bitmap_csum_verify(struct super_block *sb, ext4_group_t group, + struct ext4_group_desc *gdp, + struct buffer_head *bh, int sz); +void ext4_block_bitmap_csum_set(struct super_block *sb, ext4_group_t group, + struct ext4_group_desc *gdp, + struct buffer_head *bh, int sz); +int ext4_block_bitmap_csum_verify(struct super_block *sb, ext4_group_t group, + struct ext4_group_desc *gdp, + struct buffer_head *bh, int sz); /* balloc.c */ +extern void ext4_validate_block_bitmap(struct super_block *sb, + struct ext4_group_desc *desc, + unsigned int block_group, + struct buffer_head *bh); extern unsigned int ext4_block_group(struct super_block *sb, ext4_fsblk_t blocknr); extern ext4_grpblk_t ext4_block_group_offset(struct super_block *sb, @@ -1864,7 +1949,7 @@ extern void ext4_end_bitmap_read(struct buffer_head *bh, int uptodate); /* mballoc.c */ extern long ext4_mb_stats; extern long ext4_mb_max_to_scan; -extern int ext4_mb_init(struct super_block *, int); +extern int ext4_mb_init(struct super_block *); extern int ext4_mb_release(struct super_block *); extern ext4_fsblk_t ext4_mb_new_blocks(handle_t *, struct ext4_allocation_request *, int *); @@ -1936,6 +2021,8 @@ extern long ext4_compat_ioctl(struct file *, unsigned int, unsigned long); extern int ext4_ext_migrate(struct inode *); /* namei.c */ +extern int ext4_dirent_csum_verify(struct inode *inode, + struct ext4_dir_entry *dirent); extern int ext4_orphan_add(handle_t *, struct inode *); extern int ext4_orphan_del(handle_t *, struct inode *); extern int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash, @@ -1950,6 +2037,10 @@ extern int ext4_group_extend(struct super_block *sb, extern int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count); /* super.c */ +extern int ext4_superblock_csum_verify(struct super_block *sb, + struct ext4_super_block *es); +extern void ext4_superblock_csum_set(struct super_block *sb, + struct ext4_super_block *es); extern void *ext4_kvmalloc(size_t size, gfp_t flags); extern void *ext4_kvzalloc(size_t size, gfp_t flags); extern void ext4_kvfree(void *ptr); @@ -2025,10 +2116,17 @@ extern void ext4_used_dirs_set(struct super_block *sb, struct ext4_group_desc *bg, __u32 count); extern void ext4_itable_unused_set(struct super_block *sb, struct ext4_group_desc *bg, __u32 count); -extern __le16 ext4_group_desc_csum(struct ext4_sb_info *sbi, __u32 group, - struct ext4_group_desc *gdp); -extern int ext4_group_desc_csum_verify(struct ext4_sb_info *sbi, __u32 group, +extern int ext4_group_desc_csum_verify(struct super_block *sb, __u32 group, struct ext4_group_desc *gdp); +extern void ext4_group_desc_csum_set(struct super_block *sb, __u32 group, + struct ext4_group_desc *gdp); + +static inline int ext4_has_group_desc_csum(struct super_block *sb) +{ + return EXT4_HAS_RO_COMPAT_FEATURE(sb, + EXT4_FEATURE_RO_COMPAT_GDT_CSUM | + EXT4_FEATURE_RO_COMPAT_METADATA_CSUM); +} static inline ext4_fsblk_t ext4_blocks_count(struct ext4_super_block *es) { @@ -2225,6 +2323,9 @@ static inline void ext4_unlock_group(struct super_block *sb, static inline void ext4_mark_super_dirty(struct super_block *sb) { + struct ext4_super_block *es = EXT4_SB(sb)->s_es; + + ext4_superblock_csum_set(sb, es); if (EXT4_SB(sb)->s_journal == NULL) sb->s_dirt =1; } @@ -2314,6 +2415,9 @@ extern int ext4_bio_write_page(struct ext4_io_submit *io, /* mmp.c */ extern int ext4_multi_mount_protect(struct super_block *, ext4_fsblk_t); +extern void ext4_mmp_csum_set(struct super_block *sb, struct mmp_struct *mmp); +extern int ext4_mmp_csum_verify(struct super_block *sb, + struct mmp_struct *mmp); /* BH_Uninit flag: blocks are allocated but uninitialized on disk */ enum ext4_state_bits { diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h index 0f58b86e3a02..cb1b2c919963 100644 --- a/fs/ext4/ext4_extents.h +++ b/fs/ext4/ext4_extents.h @@ -63,9 +63,22 @@ * ext4_inode has i_block array (60 bytes total). * The first 12 bytes store ext4_extent_header; * the remainder stores an array of ext4_extent. + * For non-inode extent blocks, ext4_extent_tail + * follows the array. */ /* + * This is the extent tail on-disk structure. + * All other extent structures are 12 bytes long. It turns out that + * block_size % 12 >= 4 for at least all powers of 2 greater than 512, which + * covers all valid ext4 block sizes. Therefore, this tail structure can be + * crammed into the end of the block without having to rebalance the tree. + */ +struct ext4_extent_tail { + __le32 et_checksum; /* crc32c(uuid+inum+extent_block) */ +}; + +/* * This is the extent on-disk structure. * It's used at the bottom of the tree. */ @@ -101,6 +114,17 @@ struct ext4_extent_header { #define EXT4_EXT_MAGIC cpu_to_le16(0xf30a) +#define EXT4_EXTENT_TAIL_OFFSET(hdr) \ + (sizeof(struct ext4_extent_header) + \ + (sizeof(struct ext4_extent) * le16_to_cpu((hdr)->eh_max))) + +static inline struct ext4_extent_tail * +find_ext4_extent_tail(struct ext4_extent_header *eh) +{ + return (struct ext4_extent_tail *)(((void *)eh) + + EXT4_EXTENT_TAIL_OFFSET(eh)); +} + /* * Array of ext4_ext_path contains path to some extent. * Creation/lookup routines use it for traversal/splitting/etc. diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c index aca179017582..90f7c2e84db1 100644 --- a/fs/ext4/ext4_jbd2.c +++ b/fs/ext4/ext4_jbd2.c @@ -138,16 +138,23 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line, } int __ext4_handle_dirty_super(const char *where, unsigned int line, - handle_t *handle, struct super_block *sb) + handle_t *handle, struct super_block *sb, + int now) { struct buffer_head *bh = EXT4_SB(sb)->s_sbh; int err = 0; if (ext4_handle_valid(handle)) { + ext4_superblock_csum_set(sb, + (struct ext4_super_block *)bh->b_data); err = jbd2_journal_dirty_metadata(handle, bh); if (err) ext4_journal_abort_handle(where, line, __func__, bh, handle, err); + } else if (now) { + ext4_superblock_csum_set(sb, + (struct ext4_super_block *)bh->b_data); + mark_buffer_dirty(bh); } else sb->s_dirt = 1; return err; diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h index 83b20fcf9400..f440e8f1841f 100644 --- a/fs/ext4/ext4_jbd2.h +++ b/fs/ext4/ext4_jbd2.h @@ -213,7 +213,8 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line, struct buffer_head *bh); int __ext4_handle_dirty_super(const char *where, unsigned int line, - handle_t *handle, struct super_block *sb); + handle_t *handle, struct super_block *sb, + int now); #define ext4_journal_get_write_access(handle, bh) \ __ext4_journal_get_write_access(__func__, __LINE__, (handle), (bh)) @@ -225,8 +226,10 @@ int __ext4_handle_dirty_super(const char *where, unsigned int line, #define ext4_handle_dirty_metadata(handle, inode, bh) \ __ext4_handle_dirty_metadata(__func__, __LINE__, (handle), (inode), \ (bh)) +#define ext4_handle_dirty_super_now(handle, sb) \ + __ext4_handle_dirty_super(__func__, __LINE__, (handle), (sb), 1) #define ext4_handle_dirty_super(handle, sb) \ - __ext4_handle_dirty_super(__func__, __LINE__, (handle), (sb)) + __ext4_handle_dirty_super(__func__, __LINE__, (handle), (sb), 0) handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks); int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle); diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index abcdeab67f52..91341ec6e06a 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -52,6 +52,46 @@ #define EXT4_EXT_MARK_UNINIT1 0x2 /* mark first half uninitialized */ #define EXT4_EXT_MARK_UNINIT2 0x4 /* mark second half uninitialized */ +static __le32 ext4_extent_block_csum(struct inode *inode, + struct ext4_extent_header *eh) +{ + struct ext4_inode_info *ei = EXT4_I(inode); + struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); + __u32 csum; + + csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)eh, + EXT4_EXTENT_TAIL_OFFSET(eh)); + return cpu_to_le32(csum); +} + +static int ext4_extent_block_csum_verify(struct inode *inode, + struct ext4_extent_header *eh) +{ + struct ext4_extent_tail *et; + + if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, + EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) + return 1; + + et = find_ext4_extent_tail(eh); + if (et->et_checksum != ext4_extent_block_csum(inode, eh)) + return 0; + return 1; +} + +static void ext4_extent_block_csum_set(struct inode *inode, + struct ext4_extent_header *eh) +{ + struct ext4_extent_tail *et; + + if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, + EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) + return; + + et = find_ext4_extent_tail(eh); + et->et_checksum = ext4_extent_block_csum(inode, eh); +} + static int ext4_split_extent(handle_t *handle, struct inode *inode, struct ext4_ext_path *path, @@ -117,6 +157,7 @@ static int __ext4_ext_dirty(const char *where, unsigned int line, { int err; if (path->p_bh) { + ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh)); /* path points to block */ err = __ext4_handle_dirty_metadata(where, line, handle, inode, path->p_bh); @@ -391,6 +432,12 @@ static int __ext4_ext_check(const char *function, unsigned int line, error_msg = "invalid extent entries"; goto corrupted; } + /* Verify checksum on non-root extent tree nodes */ + if (ext_depth(inode) != depth && + !ext4_extent_block_csum_verify(inode, eh)) { + error_msg = "extent tree corrupted"; + goto corrupted; + } return 0; corrupted: @@ -412,6 +459,26 @@ int ext4_ext_check_inode(struct inode *inode) return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode)); } +static int __ext4_ext_check_block(const char *function, unsigned int line, + struct inode *inode, + struct ext4_extent_header *eh, + int depth, + struct buffer_head *bh) +{ + int ret; + + if (buffer_verified(bh)) + return 0; + ret = ext4_ext_check(inode, eh, depth); + if (ret) + return ret; + set_buffer_verified(bh); + return ret; +} + +#define ext4_ext_check_block(inode, eh, depth, bh) \ + __ext4_ext_check_block(__func__, __LINE__, inode, eh, depth, bh) + #ifdef EXT_DEBUG static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path) { @@ -536,7 +603,7 @@ ext4_ext_binsearch_idx(struct inode *inode, } path->p_idx = l - 1; - ext_debug(" -> %d->%lld ", le32_to_cpu(path->p_idx->ei_block), + ext_debug(" -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block), ext4_idx_pblock(path->p_idx)); #ifdef CHECK_BINSEARCH @@ -668,8 +735,6 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block, i = depth; /* walk through the tree */ while (i) { - int need_to_validate = 0; - ext_debug("depth %d: num %d, max %d\n", ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); @@ -688,8 +753,6 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block, put_bh(bh); goto err; } - /* validate the extent entries */ - need_to_validate = 1; } eh = ext_block_hdr(bh); ppos++; @@ -703,7 +766,7 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block, path[ppos].p_hdr = eh; i--; - if (need_to_validate && ext4_ext_check(inode, eh, i)) + if (ext4_ext_check_block(inode, eh, i, bh)) goto err; } @@ -914,6 +977,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, le16_add_cpu(&neh->eh_entries, m); } + ext4_extent_block_csum_set(inode, neh); set_buffer_uptodate(bh); unlock_buffer(bh); @@ -992,6 +1056,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, sizeof(struct ext4_extent_idx) * m); le16_add_cpu(&neh->eh_entries, m); } + ext4_extent_block_csum_set(inode, neh); set_buffer_uptodate(bh); unlock_buffer(bh); @@ -1089,6 +1154,7 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode, else neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); neh->eh_magic = EXT4_EXT_MAGIC; + ext4_extent_block_csum_set(inode, neh); set_buffer_uptodate(bh); unlock_buffer(bh); @@ -1344,7 +1410,8 @@ got_index: return -EIO; eh = ext_block_hdr(bh); /* subtract from p_depth to get proper eh_depth */ - if (ext4_ext_check(inode, eh, path->p_depth - depth)) { + if (ext4_ext_check_block(inode, eh, + path->p_depth - depth, bh)) { put_bh(bh); return -EIO; } @@ -1357,7 +1424,7 @@ got_index: if (bh == NULL) return -EIO; eh = ext_block_hdr(bh); - if (ext4_ext_check(inode, eh, path->p_depth - depth)) { + if (ext4_ext_check_block(inode, eh, path->p_depth - depth, bh)) { put_bh(bh); return -EIO; } @@ -2644,8 +2711,8 @@ cont: err = -EIO; break; } - if (ext4_ext_check(inode, ext_block_hdr(bh), - depth - i - 1)) { + if (ext4_ext_check_block(inode, ext_block_hdr(bh), + depth - i - 1, bh)) { err = -EIO; break; } @@ -4722,8 +4789,8 @@ int ext4_ext_punch_hole(struct file *file, loff_t offset, loff_t length) /* Now release the pages */ if (last_page_offset > first_page_offset) { - truncate_inode_pages_range(mapping, first_page_offset, - last_page_offset-1); + truncate_pagecache_range(inode, first_page_offset, + last_page_offset - 1); } /* finish any pending end_io work */ diff --git a/fs/ext4/file.c b/fs/ext4/file.c index cb70f1812a70..8c7642a00054 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@ -95,7 +95,7 @@ ext4_file_write(struct kiocb *iocb, const struct iovec *iov, { struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode; int unaligned_aio = 0; - int ret; + ssize_t ret; /* * If we have encountered a bitmap-format file, the size limit diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index 9f9acac6c43f..d48e8b14928c 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c @@ -70,24 +70,27 @@ static unsigned ext4_init_inode_bitmap(struct super_block *sb, ext4_group_t block_group, struct ext4_group_desc *gdp) { - struct ext4_sb_info *sbi = EXT4_SB(sb); - J_ASSERT_BH(bh, buffer_locked(bh)); /* If checksum is bad mark all blocks and inodes use to prevent * allocation, essentially implementing a per-group read-only flag. */ - if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) { + if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) { ext4_error(sb, "Checksum bad for group %u", block_group); ext4_free_group_clusters_set(sb, gdp, 0); ext4_free_inodes_set(sb, gdp, 0); ext4_itable_unused_set(sb, gdp, 0); memset(bh->b_data, 0xff, sb->s_blocksize); + ext4_inode_bitmap_csum_set(sb, block_group, gdp, bh, + EXT4_INODES_PER_GROUP(sb) / 8); return 0; } memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8); ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8, bh->b_data); + ext4_inode_bitmap_csum_set(sb, block_group, gdp, bh, + EXT4_INODES_PER_GROUP(sb) / 8); + ext4_group_desc_csum_set(sb, block_group, gdp); return EXT4_INODES_PER_GROUP(sb); } @@ -128,12 +131,12 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group) return NULL; } if (bitmap_uptodate(bh)) - return bh; + goto verify; lock_buffer(bh); if (bitmap_uptodate(bh)) { unlock_buffer(bh); - return bh; + goto verify; } ext4_lock_group(sb, block_group); @@ -141,6 +144,7 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group) ext4_init_inode_bitmap(sb, bh, block_group, desc); set_bitmap_uptodate(bh); set_buffer_uptodate(bh); + set_buffer_verified(bh); ext4_unlock_group(sb, block_group); unlock_buffer(bh); return bh; @@ -154,7 +158,7 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group) */ set_bitmap_uptodate(bh); unlock_buffer(bh); - return bh; + goto verify; } /* * submit the buffer_head for reading @@ -171,6 +175,20 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group) block_group, bitmap_blk); return NULL; } + +verify: + ext4_lock_group(sb, block_group); + if (!buffer_verified(bh) && + !ext4_inode_bitmap_csum_verify(sb, block_group, desc, bh, + EXT4_INODES_PER_GROUP(sb) / 8)) { + ext4_unlock_group(sb, block_group); + put_bh(bh); + ext4_error(sb, "Corrupt inode bitmap - block_group = %u, " + "inode_bitmap = %llu", block_group, bitmap_blk); + return NULL; + } + ext4_unlock_group(sb, block_group); + set_buffer_verified(bh); return bh; } @@ -276,7 +294,9 @@ void ext4_free_inode(handle_t *handle, struct inode *inode) ext4_used_dirs_set(sb, gdp, count); percpu_counter_dec(&sbi->s_dirs_counter); } - gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp); + ext4_inode_bitmap_csum_set(sb, block_group, gdp, bitmap_bh, + EXT4_INODES_PER_GROUP(sb) / 8); + ext4_group_desc_csum_set(sb, block_group, gdp); ext4_unlock_group(sb, block_group); percpu_counter_inc(&sbi->s_freeinodes_counter); @@ -488,10 +508,12 @@ fallback_retry: for (i = 0; i < ngroups; i++) { grp = (parent_group + i) % ngroups; desc = ext4_get_group_desc(sb, grp, NULL); - grp_free = ext4_free_inodes_count(sb, desc); - if (desc && grp_free && grp_free >= avefreei) { - *group = grp; - return 0; + if (desc) { + grp_free = ext4_free_inodes_count(sb, desc); + if (grp_free && grp_free >= avefreei) { + *group = grp; + return 0; + } } } @@ -709,7 +731,7 @@ repeat_in_this_group: got: /* We may have to initialize the block bitmap if it isn't already */ - if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM) && + if (ext4_has_group_desc_csum(sb) && gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { struct buffer_head *block_bitmap_bh; @@ -731,8 +753,11 @@ got: gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); ext4_free_group_clusters_set(sb, gdp, ext4_free_clusters_after_init(sb, group, gdp)); - gdp->bg_checksum = ext4_group_desc_csum(sbi, group, - gdp); + ext4_block_bitmap_csum_set(sb, group, gdp, + block_bitmap_bh, + EXT4_BLOCKS_PER_GROUP(sb) / + 8); + ext4_group_desc_csum_set(sb, group, gdp); } ext4_unlock_group(sb, group); @@ -751,7 +776,7 @@ got: goto fail; /* Update the relevant bg descriptor fields */ - if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) { + if (ext4_has_group_desc_csum(sb)) { int free; struct ext4_group_info *grp = ext4_get_group_info(sb, group); @@ -772,7 +797,10 @@ got: ext4_itable_unused_set(sb, gdp, (EXT4_INODES_PER_GROUP(sb) - ino)); up_read(&grp->alloc_sem); + } else { + ext4_lock_group(sb, group); } + ext4_free_inodes_set(sb, gdp, ext4_free_inodes_count(sb, gdp) - 1); if (S_ISDIR(mode)) { ext4_used_dirs_set(sb, gdp, ext4_used_dirs_count(sb, gdp) + 1); @@ -782,10 +810,12 @@ got: atomic_inc(&sbi->s_flex_groups[f].used_dirs); } } - if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) { - gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp); - ext4_unlock_group(sb, group); + if (ext4_has_group_desc_csum(sb)) { + ext4_inode_bitmap_csum_set(sb, group, gdp, inode_bitmap_bh, + EXT4_INODES_PER_GROUP(sb) / 8); + ext4_group_desc_csum_set(sb, group, gdp); } + ext4_unlock_group(sb, group); BUFFER_TRACE(inode_bitmap_bh, "call ext4_handle_dirty_metadata"); err = ext4_handle_dirty_metadata(handle, NULL, inode_bitmap_bh); @@ -850,6 +880,19 @@ got: inode->i_generation = sbi->s_next_generation++; spin_unlock(&sbi->s_next_gen_lock); + /* Precompute checksum seed for inode metadata */ + if (EXT4_HAS_RO_COMPAT_FEATURE(sb, + EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) { + __u32 csum; + struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); + __le32 inum = cpu_to_le32(inode->i_ino); + __le32 gen = cpu_to_le32(inode->i_generation); + csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum, + sizeof(inum)); + ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen, + sizeof(gen)); + } + ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */ ext4_set_inode_state(inode, EXT4_STATE_NEW); @@ -1140,7 +1183,7 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group, skip_zeroout: ext4_lock_group(sb, group); gdp->bg_flags |= cpu_to_le16(EXT4_BG_INODE_ZEROED); - gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp); + ext4_group_desc_csum_set(sb, group, gdp); ext4_unlock_group(sb, group); BUFFER_TRACE(group_desc_bh, diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 07eaf565fdcb..02bc8cbe7281 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -47,6 +47,73 @@ #define MPAGE_DA_EXTENT_TAIL 0x01 +static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw, + struct ext4_inode_info *ei) +{ + struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); + __u16 csum_lo; + __u16 csum_hi = 0; + __u32 csum; + + csum_lo = raw->i_checksum_lo; + raw->i_checksum_lo = 0; + if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && + EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) { + csum_hi = raw->i_checksum_hi; + raw->i_checksum_hi = 0; + } + + csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw, + EXT4_INODE_SIZE(inode->i_sb)); + + raw->i_checksum_lo = csum_lo; + if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && + EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) + raw->i_checksum_hi = csum_hi; + + return csum; +} + +static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw, + struct ext4_inode_info *ei) +{ + __u32 provided, calculated; + + if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != + cpu_to_le32(EXT4_OS_LINUX) || + !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, + EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) + return 1; + + provided = le16_to_cpu(raw->i_checksum_lo); + calculated = ext4_inode_csum(inode, raw, ei); + if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && + EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) + provided |= ((__u32)le16_to_cpu(raw->i_checksum_hi)) << 16; + else + calculated &= 0xFFFF; + + return provided == calculated; +} + +static void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw, + struct ext4_inode_info *ei) +{ + __u32 csum; + + if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != + cpu_to_le32(EXT4_OS_LINUX) || + !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, + EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) + return; + + csum = ext4_inode_csum(inode, raw, ei); + raw->i_checksum_lo = cpu_to_le16(csum & 0xFFFF); + if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && + EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) + raw->i_checksum_hi = cpu_to_le16(csum >> 16); +} + static inline int ext4_begin_ordered_truncate(struct inode *inode, loff_t new_size) { @@ -3517,8 +3584,7 @@ make_io: b = table; end = b + EXT4_SB(sb)->s_inode_readahead_blks; num = EXT4_INODES_PER_GROUP(sb); - if (EXT4_HAS_RO_COMPAT_FEATURE(sb, - EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) + if (ext4_has_group_desc_csum(sb)) num -= ext4_itable_unused_count(sb, gdp); table += num / inodes_per_block; if (end > table) @@ -3646,6 +3712,39 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) if (ret < 0) goto bad_inode; raw_inode = ext4_raw_inode(&iloc); + + if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { + ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize); + if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > + EXT4_INODE_SIZE(inode->i_sb)) { + EXT4_ERROR_INODE(inode, "bad extra_isize (%u != %u)", + EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize, + EXT4_INODE_SIZE(inode->i_sb)); + ret = -EIO; + goto bad_inode; + } + } else + ei->i_extra_isize = 0; + + /* Precompute checksum seed for inode metadata */ + if (EXT4_HAS_RO_COMPAT_FEATURE(sb, + EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) { + struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); + __u32 csum; + __le32 inum = cpu_to_le32(inode->i_ino); + __le32 gen = raw_inode->i_generation; + csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum, + sizeof(inum)); + ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen, + sizeof(gen)); + } + + if (!ext4_inode_csum_verify(inode, raw_inode, ei)) { + EXT4_ERROR_INODE(inode, "checksum invalid"); + ret = -EIO; + goto bad_inode; + } + inode->i_mode = le16_to_cpu(raw_inode->i_mode); i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low); i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low); @@ -3725,12 +3824,6 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) } if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { - ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize); - if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > - EXT4_INODE_SIZE(inode->i_sb)) { - ret = -EIO; - goto bad_inode; - } if (ei->i_extra_isize == 0) { /* The extra space is currently unused. Use it. */ ei->i_extra_isize = sizeof(struct ext4_inode) - @@ -3742,8 +3835,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC)) ext4_set_inode_state(inode, EXT4_STATE_XATTR); } - } else - ei->i_extra_isize = 0; + } EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode); EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode); @@ -3942,7 +4034,7 @@ static int ext4_do_update_inode(handle_t *handle, EXT4_SET_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_LARGE_FILE); ext4_handle_sync(handle); - err = ext4_handle_dirty_super(handle, sb); + err = ext4_handle_dirty_super_now(handle, sb); } } raw_inode->i_generation = cpu_to_le32(inode->i_generation); @@ -3969,6 +4061,8 @@ static int ext4_do_update_inode(handle_t *handle, raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize); } + ext4_inode_csum_set(inode, raw_inode, ei); + BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); rc = ext4_handle_dirty_metadata(handle, NULL, bh); if (!err) @@ -4213,7 +4307,8 @@ int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry, * will return the blocks that include the delayed allocation * blocks for this file. */ - delalloc_blocks = EXT4_I(inode)->i_reserved_data_blocks; + delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb), + EXT4_I(inode)->i_reserved_data_blocks); stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9; return 0; diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c index 6eee25591b81..8ad112ae0ade 100644 --- a/fs/ext4/ioctl.c +++ b/fs/ext4/ioctl.c @@ -38,7 +38,7 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) handle_t *handle = NULL; int err, migrate = 0; struct ext4_iloc iloc; - unsigned int oldflags; + unsigned int oldflags, mask, i; unsigned int jflag; if (!inode_owner_or_capable(inode)) @@ -115,8 +115,14 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) if (err) goto flags_err; - flags = flags & EXT4_FL_USER_MODIFIABLE; - flags |= oldflags & ~EXT4_FL_USER_MODIFIABLE; + for (i = 0, mask = 1; i < 32; i++, mask <<= 1) { + if (!(mask & EXT4_FL_USER_MODIFIABLE)) + continue; + if (mask & flags) + ext4_set_inode_flag(inode, i); + else + ext4_clear_inode_flag(inode, i); + } ei->i_flags = flags; ext4_set_inode_flags(inode); @@ -152,6 +158,13 @@ flags_out: if (!inode_owner_or_capable(inode)) return -EPERM; + if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, + EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) { + ext4_warning(sb, "Setting inode version is not " + "supported with metadata_csum enabled."); + return -ENOTTY; + } + err = mnt_want_write_file(filp); if (err) return err; diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 99ab428bcfa0..1cd6994fc446 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -788,7 +788,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore) int first_block; struct super_block *sb; struct buffer_head *bhs; - struct buffer_head **bh; + struct buffer_head **bh = NULL; struct inode *inode; char *data; char *bitmap; @@ -2375,7 +2375,7 @@ static int ext4_groupinfo_create_slab(size_t size) return 0; } -int ext4_mb_init(struct super_block *sb, int needs_recovery) +int ext4_mb_init(struct super_block *sb) { struct ext4_sb_info *sbi = EXT4_SB(sb); unsigned i, j; @@ -2517,6 +2517,9 @@ int ext4_mb_release(struct super_block *sb) struct ext4_sb_info *sbi = EXT4_SB(sb); struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits); + if (sbi->s_proc) + remove_proc_entry("mb_groups", sbi->s_proc); + if (sbi->s_group_info) { for (i = 0; i < ngroups; i++) { grinfo = ext4_get_group_info(sb, i); @@ -2564,8 +2567,6 @@ int ext4_mb_release(struct super_block *sb) } free_percpu(sbi->s_locality_groups); - if (sbi->s_proc) - remove_proc_entry("mb_groups", sbi->s_proc); return 0; } @@ -2797,7 +2798,9 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, } len = ext4_free_group_clusters(sb, gdp) - ac->ac_b_ex.fe_len; ext4_free_group_clusters_set(sb, gdp, len); - gdp->bg_checksum = ext4_group_desc_csum(sbi, ac->ac_b_ex.fe_group, gdp); + ext4_block_bitmap_csum_set(sb, ac->ac_b_ex.fe_group, gdp, bitmap_bh, + EXT4_BLOCKS_PER_GROUP(sb) / 8); + ext4_group_desc_csum_set(sb, ac->ac_b_ex.fe_group, gdp); ext4_unlock_group(sb, ac->ac_b_ex.fe_group); percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len); @@ -3071,13 +3074,9 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac) static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac) { struct ext4_prealloc_space *pa = ac->ac_pa; - int len; - - if (pa && pa->pa_type == MB_INODE_PA) { - len = ac->ac_b_ex.fe_len; - pa->pa_free += len; - } + if (pa && pa->pa_type == MB_INODE_PA) + pa->pa_free += ac->ac_b_ex.fe_len; } /* @@ -4636,6 +4635,7 @@ do_more: */ new_entry = kmem_cache_alloc(ext4_free_data_cachep, GFP_NOFS); if (!new_entry) { + ext4_mb_unload_buddy(&e4b); err = -ENOMEM; goto error_return; } @@ -4659,7 +4659,9 @@ do_more: ret = ext4_free_group_clusters(sb, gdp) + count_clusters; ext4_free_group_clusters_set(sb, gdp, ret); - gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp); + ext4_block_bitmap_csum_set(sb, block_group, gdp, bitmap_bh, + EXT4_BLOCKS_PER_GROUP(sb) / 8); + ext4_group_desc_csum_set(sb, block_group, gdp); ext4_unlock_group(sb, block_group); percpu_counter_add(&sbi->s_freeclusters_counter, count_clusters); @@ -4803,7 +4805,9 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb, mb_free_blocks(NULL, &e4b, bit, count); blk_free_count = blocks_freed + ext4_free_group_clusters(sb, desc); ext4_free_group_clusters_set(sb, desc, blk_free_count); - desc->bg_checksum = ext4_group_desc_csum(sbi, block_group, desc); + ext4_block_bitmap_csum_set(sb, block_group, desc, bitmap_bh, + EXT4_BLOCKS_PER_GROUP(sb) / 8); + ext4_group_desc_csum_set(sb, block_group, desc); ext4_unlock_group(sb, block_group); percpu_counter_add(&sbi->s_freeclusters_counter, EXT4_B2C(sbi, blocks_freed)); diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c index ed6548d89165..f99a1311e847 100644 --- a/fs/ext4/mmp.c +++ b/fs/ext4/mmp.c @@ -6,12 +6,45 @@ #include "ext4.h" +/* Checksumming functions */ +static __u32 ext4_mmp_csum(struct super_block *sb, struct mmp_struct *mmp) +{ + struct ext4_sb_info *sbi = EXT4_SB(sb); + int offset = offsetof(struct mmp_struct, mmp_checksum); + __u32 csum; + + csum = ext4_chksum(sbi, sbi->s_csum_seed, (char *)mmp, offset); + + return cpu_to_le32(csum); +} + +int ext4_mmp_csum_verify(struct super_block *sb, struct mmp_struct *mmp) +{ + if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, + EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) + return 1; + + return mmp->mmp_checksum == ext4_mmp_csum(sb, mmp); +} + +void ext4_mmp_csum_set(struct super_block *sb, struct mmp_struct *mmp) +{ + if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, + EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) + return; + + mmp->mmp_checksum = ext4_mmp_csum(sb, mmp); +} + /* * Write the MMP block using WRITE_SYNC to try to get the block on-disk * faster. */ -static int write_mmp_block(struct buffer_head *bh) +static int write_mmp_block(struct super_block *sb, struct buffer_head *bh) { + struct mmp_struct *mmp = (struct mmp_struct *)(bh->b_data); + + ext4_mmp_csum_set(sb, mmp); mark_buffer_dirty(bh); lock_buffer(bh); bh->b_end_io = end_buffer_write_sync; @@ -59,7 +92,8 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh, } mmp = (struct mmp_struct *)((*bh)->b_data); - if (le32_to_cpu(mmp->mmp_magic) != EXT4_MMP_MAGIC) + if (le32_to_cpu(mmp->mmp_magic) != EXT4_MMP_MAGIC || + !ext4_mmp_csum_verify(sb, mmp)) return -EINVAL; return 0; @@ -120,7 +154,7 @@ static int kmmpd(void *data) mmp->mmp_time = cpu_to_le64(get_seconds()); last_update_time = jiffies; - retval = write_mmp_block(bh); + retval = write_mmp_block(sb, bh); /* * Don't spew too many error messages. Print one every * (s_mmp_update_interval * 60) seconds. @@ -200,7 +234,7 @@ static int kmmpd(void *data) mmp->mmp_seq = cpu_to_le32(EXT4_MMP_SEQ_CLEAN); mmp->mmp_time = cpu_to_le64(get_seconds()); - retval = write_mmp_block(bh); + retval = write_mmp_block(sb, bh); failed: kfree(data); @@ -299,7 +333,7 @@ skip: seq = mmp_new_seq(); mmp->mmp_seq = cpu_to_le32(seq); - retval = write_mmp_block(bh); + retval = write_mmp_block(sb, bh); if (retval) goto failed; diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index e2a3f4b0ff78..5845cd97bf8b 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -145,6 +145,14 @@ struct dx_map_entry u16 size; }; +/* + * This goes at the end of each htree block. + */ +struct dx_tail { + u32 dt_reserved; + __le32 dt_checksum; /* crc32c(uuid+inum+dirblock) */ +}; + static inline ext4_lblk_t dx_get_block(struct dx_entry *entry); static void dx_set_block(struct dx_entry *entry, ext4_lblk_t value); static inline unsigned dx_get_hash(struct dx_entry *entry); @@ -180,6 +188,230 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir, static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry, struct inode *inode); +/* checksumming functions */ +#define EXT4_DIRENT_TAIL(block, blocksize) \ + ((struct ext4_dir_entry_tail *)(((void *)(block)) + \ + ((blocksize) - \ + sizeof(struct ext4_dir_entry_tail)))) + +static void initialize_dirent_tail(struct ext4_dir_entry_tail *t, + unsigned int blocksize) +{ + memset(t, 0, sizeof(struct ext4_dir_entry_tail)); + t->det_rec_len = ext4_rec_len_to_disk( + sizeof(struct ext4_dir_entry_tail), blocksize); + t->det_reserved_ft = EXT4_FT_DIR_CSUM; +} + +/* Walk through a dirent block to find a checksum "dirent" at the tail */ +static struct ext4_dir_entry_tail *get_dirent_tail(struct inode *inode, + struct ext4_dir_entry *de) +{ + struct ext4_dir_entry_tail *t; + +#ifdef PARANOID + struct ext4_dir_entry *d, *top; + + d = de; + top = (struct ext4_dir_entry *)(((void *)de) + + (EXT4_BLOCK_SIZE(inode->i_sb) - + sizeof(struct ext4_dir_entry_tail))); + while (d < top && d->rec_len) + d = (struct ext4_dir_entry *)(((void *)d) + + le16_to_cpu(d->rec_len)); + + if (d != top) + return NULL; + + t = (struct ext4_dir_entry_tail *)d; +#else + t = EXT4_DIRENT_TAIL(de, EXT4_BLOCK_SIZE(inode->i_sb)); +#endif + + if (t->det_reserved_zero1 || + le16_to_cpu(t->det_rec_len) != sizeof(struct ext4_dir_entry_tail) || + t->det_reserved_zero2 || + t->det_reserved_ft != EXT4_FT_DIR_CSUM) + return NULL; + + return t; +} + +static __le32 ext4_dirent_csum(struct inode *inode, + struct ext4_dir_entry *dirent, int size) +{ + struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); + struct ext4_inode_info *ei = EXT4_I(inode); + __u32 csum; + + csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)dirent, size); + return cpu_to_le32(csum); +} + +int ext4_dirent_csum_verify(struct inode *inode, struct ext4_dir_entry *dirent) +{ + struct ext4_dir_entry_tail *t; + + if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, + EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) + return 1; + + t = get_dirent_tail(inode, dirent); + if (!t) { + EXT4_ERROR_INODE(inode, "metadata_csum set but no space in dir " + "leaf for checksum. Please run e2fsck -D."); + return 0; + } + + if (t->det_checksum != ext4_dirent_csum(inode, dirent, + (void *)t - (void *)dirent)) + return 0; + + return 1; +} + +static void ext4_dirent_csum_set(struct inode *inode, + struct ext4_dir_entry *dirent) +{ + struct ext4_dir_entry_tail *t; + + if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, + EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) + return; + + t = get_dirent_tail(inode, dirent); + if (!t) { + EXT4_ERROR_INODE(inode, "metadata_csum set but no space in dir " + "leaf for checksum. Please run e2fsck -D."); + return; + } + + t->det_checksum = ext4_dirent_csum(inode, dirent, + (void *)t - (void *)dirent); +} + +static inline int ext4_handle_dirty_dirent_node(handle_t *handle, + struct inode *inode, + struct buffer_head *bh) +{ + ext4_dirent_csum_set(inode, (struct ext4_dir_entry *)bh->b_data); + return ext4_handle_dirty_metadata(handle, inode, bh); +} + +static struct dx_countlimit *get_dx_countlimit(struct inode *inode, + struct ext4_dir_entry *dirent, + int *offset) +{ + struct ext4_dir_entry *dp; + struct dx_root_info *root; + int count_offset; + + if (le16_to_cpu(dirent->rec_len) == EXT4_BLOCK_SIZE(inode->i_sb)) + count_offset = 8; + else if (le16_to_cpu(dirent->rec_len) == 12) { + dp = (struct ext4_dir_entry *)(((void *)dirent) + 12); + if (le16_to_cpu(dp->rec_len) != + EXT4_BLOCK_SIZE(inode->i_sb) - 12) + return NULL; + root = (struct dx_root_info *)(((void *)dp + 12)); + if (root->reserved_zero || + root->info_length != sizeof(struct dx_root_info)) + return NULL; + count_offset = 32; + } else + return NULL; + + if (offset) + *offset = count_offset; + return (struct dx_countlimit *)(((void *)dirent) + count_offset); +} + +static __le32 ext4_dx_csum(struct inode *inode, struct ext4_dir_entry *dirent, + int count_offset, int count, struct dx_tail *t) +{ + struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); + struct ext4_inode_info *ei = EXT4_I(inode); + __u32 csum, old_csum; + int size; + + size = count_offset + (count * sizeof(struct dx_entry)); + old_csum = t->dt_checksum; + t->dt_checksum = 0; + csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)dirent, size); + csum = ext4_chksum(sbi, csum, (__u8 *)t, sizeof(struct dx_tail)); + t->dt_checksum = old_csum; + + return cpu_to_le32(csum); +} + +static int ext4_dx_csum_verify(struct inode *inode, + struct ext4_dir_entry *dirent) +{ + struct dx_countlimit *c; + struct dx_tail *t; + int count_offset, limit, count; + + if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, + EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) + return 1; + + c = get_dx_countlimit(inode, dirent, &count_offset); + if (!c) { + EXT4_ERROR_INODE(inode, "dir seems corrupt? Run e2fsck -D."); + return 1; + } + limit = le16_to_cpu(c->limit); + count = le16_to_cpu(c->count); + if (count_offset + (limit * sizeof(struct dx_entry)) > + EXT4_BLOCK_SIZE(inode->i_sb) - sizeof(struct dx_tail)) { + EXT4_ERROR_INODE(inode, "metadata_csum set but no space for " + "tree checksum found. Run e2fsck -D."); + return 1; + } + t = (struct dx_tail *)(((struct dx_entry *)c) + limit); + + if (t->dt_checksum != ext4_dx_csum(inode, dirent, count_offset, + count, t)) + return 0; + return 1; +} + +static void ext4_dx_csum_set(struct inode *inode, struct ext4_dir_entry *dirent) +{ + struct dx_countlimit *c; + struct dx_tail *t; + int count_offset, limit, count; + + if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, + EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) + return; + + c = get_dx_countlimit(inode, dirent, &count_offset); + if (!c) { + EXT4_ERROR_INODE(inode, "dir seems corrupt? Run e2fsck -D."); + return; + } + limit = le16_to_cpu(c->limit); + count = le16_to_cpu(c->count); + if (count_offset + (limit * sizeof(struct dx_entry)) > + EXT4_BLOCK_SIZE(inode->i_sb) - sizeof(struct dx_tail)) { + EXT4_ERROR_INODE(inode, "metadata_csum set but no space for " + "tree checksum. Run e2fsck -D."); + return; + } + t = (struct dx_tail *)(((struct dx_entry *)c) + limit); + + t->dt_checksum = ext4_dx_csum(inode, dirent, count_offset, count, t); +} + +static inline int ext4_handle_dirty_dx_node(handle_t *handle, + struct inode *inode, + struct buffer_head *bh) +{ + ext4_dx_csum_set(inode, (struct ext4_dir_entry *)bh->b_data); + return ext4_handle_dirty_metadata(handle, inode, bh); +} + /* * p is at least 6 bytes before the end of page */ @@ -239,12 +471,20 @@ static inline unsigned dx_root_limit(struct inode *dir, unsigned infosize) { unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(1) - EXT4_DIR_REC_LEN(2) - infosize; + + if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb, + EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) + entry_space -= sizeof(struct dx_tail); return entry_space / sizeof(struct dx_entry); } static inline unsigned dx_node_limit(struct inode *dir) { unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(0); + + if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb, + EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) + entry_space -= sizeof(struct dx_tail); return entry_space / sizeof(struct dx_entry); } @@ -390,6 +630,15 @@ dx_probe(const struct qstr *d_name, struct inode *dir, goto fail; } + if (!buffer_verified(bh) && + !ext4_dx_csum_verify(dir, (struct ext4_dir_entry *)bh->b_data)) { + ext4_warning(dir->i_sb, "Root failed checksum"); + brelse(bh); + *err = ERR_BAD_DX_DIR; + goto fail; + } + set_buffer_verified(bh); + entries = (struct dx_entry *) (((char *)&root->info) + root->info.info_length); @@ -450,6 +699,17 @@ dx_probe(const struct qstr *d_name, struct inode *dir, if (!(bh = ext4_bread (NULL,dir, dx_get_block(at), 0, err))) goto fail2; at = entries = ((struct dx_node *) bh->b_data)->entries; + + if (!buffer_verified(bh) && + !ext4_dx_csum_verify(dir, + (struct ext4_dir_entry *)bh->b_data)) { + ext4_warning(dir->i_sb, "Node failed checksum"); + brelse(bh); + *err = ERR_BAD_DX_DIR; + goto fail; + } + set_buffer_verified(bh); + if (dx_get_limit(entries) != dx_node_limit (dir)) { ext4_warning(dir->i_sb, "dx entry: limit != node limit"); @@ -549,6 +809,15 @@ static int ext4_htree_next_block(struct inode *dir, __u32 hash, if (!(bh = ext4_bread(NULL, dir, dx_get_block(p->at), 0, &err))) return err; /* Failure */ + + if (!buffer_verified(bh) && + !ext4_dx_csum_verify(dir, + (struct ext4_dir_entry *)bh->b_data)) { + ext4_warning(dir->i_sb, "Node failed checksum"); + return -EIO; + } + set_buffer_verified(bh); + p++; brelse(p->bh); p->bh = bh; @@ -577,6 +846,11 @@ static int htree_dirblock_to_tree(struct file *dir_file, if (!(bh = ext4_bread (NULL, dir, block, 0, &err))) return err; + if (!buffer_verified(bh) && + !ext4_dirent_csum_verify(dir, (struct ext4_dir_entry *)bh->b_data)) + return -EIO; + set_buffer_verified(bh); + de = (struct ext4_dir_entry_2 *) bh->b_data; top = (struct ext4_dir_entry_2 *) ((char *) de + dir->i_sb->s_blocksize - @@ -936,6 +1210,15 @@ restart: brelse(bh); goto next; } + if (!buffer_verified(bh) && + !ext4_dirent_csum_verify(dir, + (struct ext4_dir_entry *)bh->b_data)) { + EXT4_ERROR_INODE(dir, "checksumming directory " + "block %lu", (unsigned long)block); + brelse(bh); + goto next; + } + set_buffer_verified(bh); i = search_dirblock(bh, dir, d_name, block << EXT4_BLOCK_SIZE_BITS(sb), res_dir); if (i == 1) { @@ -987,6 +1270,16 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct q if (!(bh = ext4_bread(NULL, dir, block, 0, err))) goto errout; + if (!buffer_verified(bh) && + !ext4_dirent_csum_verify(dir, + (struct ext4_dir_entry *)bh->b_data)) { + EXT4_ERROR_INODE(dir, "checksumming directory " + "block %lu", (unsigned long)block); + brelse(bh); + *err = -EIO; + goto errout; + } + set_buffer_verified(bh); retval = search_dirblock(bh, dir, d_name, block << EXT4_BLOCK_SIZE_BITS(sb), res_dir); @@ -1037,6 +1330,12 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, stru EXT4_ERROR_INODE(dir, "bad inode number: %u", ino); return ERR_PTR(-EIO); } + if (unlikely(ino == dir->i_ino)) { + EXT4_ERROR_INODE(dir, "'%.*s' linked to parent dir", + dentry->d_name.len, + dentry->d_name.name); + return ERR_PTR(-EIO); + } inode = ext4_iget(dir->i_sb, ino); if (inode == ERR_PTR(-ESTALE)) { EXT4_ERROR_INODE(dir, @@ -1156,8 +1455,14 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir, char *data1 = (*bh)->b_data, *data2; unsigned split, move, size; struct ext4_dir_entry_2 *de = NULL, *de2; + struct ext4_dir_entry_tail *t; + int csum_size = 0; int err = 0, i; + if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb, + EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) + csum_size = sizeof(struct ext4_dir_entry_tail); + bh2 = ext4_append (handle, dir, &newblock, &err); if (!(bh2)) { brelse(*bh); @@ -1204,10 +1509,20 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir, /* Fancy dance to stay within two buffers */ de2 = dx_move_dirents(data1, data2, map + split, count - split, blocksize); de = dx_pack_dirents(data1, blocksize); - de->rec_len = ext4_rec_len_to_disk(data1 + blocksize - (char *) de, + de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) - + (char *) de, blocksize); - de2->rec_len = ext4_rec_len_to_disk(data2 + blocksize - (char *) de2, + de2->rec_len = ext4_rec_len_to_disk(data2 + (blocksize - csum_size) - + (char *) de2, blocksize); + if (csum_size) { + t = EXT4_DIRENT_TAIL(data2, blocksize); + initialize_dirent_tail(t, blocksize); + + t = EXT4_DIRENT_TAIL(data1, blocksize); + initialize_dirent_tail(t, blocksize); + } + dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data1, blocksize, 1)); dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data2, blocksize, 1)); @@ -1218,10 +1533,10 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir, de = de2; } dx_insert_block(frame, hash2 + continued, newblock); - err = ext4_handle_dirty_metadata(handle, dir, bh2); + err = ext4_handle_dirty_dirent_node(handle, dir, bh2); if (err) goto journal_error; - err = ext4_handle_dirty_metadata(handle, dir, frame->bh); + err = ext4_handle_dirty_dx_node(handle, dir, frame->bh); if (err) goto journal_error; brelse(bh2); @@ -1258,11 +1573,16 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry, unsigned short reclen; int nlen, rlen, err; char *top; + int csum_size = 0; + + if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, + EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) + csum_size = sizeof(struct ext4_dir_entry_tail); reclen = EXT4_DIR_REC_LEN(namelen); if (!de) { de = (struct ext4_dir_entry_2 *)bh->b_data; - top = bh->b_data + blocksize - reclen; + top = bh->b_data + (blocksize - csum_size) - reclen; while ((char *) de <= top) { if (ext4_check_dir_entry(dir, NULL, de, bh, offset)) return -EIO; @@ -1295,11 +1615,8 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry, de = de1; } de->file_type = EXT4_FT_UNKNOWN; - if (inode) { - de->inode = cpu_to_le32(inode->i_ino); - ext4_set_de_type(dir->i_sb, de, inode->i_mode); - } else - de->inode = 0; + de->inode = cpu_to_le32(inode->i_ino); + ext4_set_de_type(dir->i_sb, de, inode->i_mode); de->name_len = namelen; memcpy(de->name, name, namelen); /* @@ -1318,7 +1635,7 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry, dir->i_version++; ext4_mark_inode_dirty(handle, dir); BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); - err = ext4_handle_dirty_metadata(handle, dir, bh); + err = ext4_handle_dirty_dirent_node(handle, dir, bh); if (err) ext4_std_error(dir->i_sb, err); return 0; @@ -1339,6 +1656,7 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry, struct dx_frame frames[2], *frame; struct dx_entry *entries; struct ext4_dir_entry_2 *de, *de2; + struct ext4_dir_entry_tail *t; char *data1, *top; unsigned len; int retval; @@ -1346,6 +1664,11 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry, struct dx_hash_info hinfo; ext4_lblk_t block; struct fake_dirent *fde; + int csum_size = 0; + + if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, + EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) + csum_size = sizeof(struct ext4_dir_entry_tail); blocksize = dir->i_sb->s_blocksize; dxtrace(printk(KERN_DEBUG "Creating index: inode %lu\n", dir->i_ino)); @@ -1366,7 +1689,7 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry, brelse(bh); return -EIO; } - len = ((char *) root) + blocksize - (char *) de; + len = ((char *) root) + (blocksize - csum_size) - (char *) de; /* Allocate new block for the 0th block's dirents */ bh2 = ext4_append(handle, dir, &block, &retval); @@ -1382,8 +1705,15 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry, top = data1 + len; while ((char *)(de2 = ext4_next_entry(de, blocksize)) < top) de = de2; - de->rec_len = ext4_rec_len_to_disk(data1 + blocksize - (char *) de, + de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) - + (char *) de, blocksize); + + if (csum_size) { + t = EXT4_DIRENT_TAIL(data1, blocksize); + initialize_dirent_tail(t, blocksize); + } + /* Initialize the root; the dot dirents already exist */ de = (struct ext4_dir_entry_2 *) (&root->dotdot); de->rec_len = ext4_rec_len_to_disk(blocksize - EXT4_DIR_REC_LEN(2), @@ -1408,8 +1738,8 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry, frame->bh = bh; bh = bh2; - ext4_handle_dirty_metadata(handle, dir, frame->bh); - ext4_handle_dirty_metadata(handle, dir, bh); + ext4_handle_dirty_dx_node(handle, dir, frame->bh); + ext4_handle_dirty_dirent_node(handle, dir, bh); de = do_split(handle,dir, &bh, frame, &hinfo, &retval); if (!de) { @@ -1445,11 +1775,17 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry, struct inode *dir = dentry->d_parent->d_inode; struct buffer_head *bh; struct ext4_dir_entry_2 *de; + struct ext4_dir_entry_tail *t; struct super_block *sb; int retval; int dx_fallback=0; unsigned blocksize; ext4_lblk_t block, blocks; + int csum_size = 0; + + if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, + EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) + csum_size = sizeof(struct ext4_dir_entry_tail); sb = dir->i_sb; blocksize = sb->s_blocksize; @@ -1468,6 +1804,11 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry, bh = ext4_bread(handle, dir, block, 0, &retval); if(!bh) return retval; + if (!buffer_verified(bh) && + !ext4_dirent_csum_verify(dir, + (struct ext4_dir_entry *)bh->b_data)) + return -EIO; + set_buffer_verified(bh); retval = add_dirent_to_buf(handle, dentry, inode, NULL, bh); if (retval != -ENOSPC) { brelse(bh); @@ -1484,7 +1825,13 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry, return retval; de = (struct ext4_dir_entry_2 *) bh->b_data; de->inode = 0; - de->rec_len = ext4_rec_len_to_disk(blocksize, blocksize); + de->rec_len = ext4_rec_len_to_disk(blocksize - csum_size, blocksize); + + if (csum_size) { + t = EXT4_DIRENT_TAIL(bh->b_data, blocksize); + initialize_dirent_tail(t, blocksize); + } + retval = add_dirent_to_buf(handle, dentry, inode, de, bh); brelse(bh); if (retval == 0) @@ -1516,6 +1863,11 @@ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry, if (!(bh = ext4_bread(handle,dir, dx_get_block(frame->at), 0, &err))) goto cleanup; + if (!buffer_verified(bh) && + !ext4_dirent_csum_verify(dir, (struct ext4_dir_entry *)bh->b_data)) + goto journal_error; + set_buffer_verified(bh); + BUFFER_TRACE(bh, "get_write_access"); err = ext4_journal_get_write_access(handle, bh); if (err) @@ -1583,7 +1935,7 @@ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry, dxtrace(dx_show_index("node", frames[1].entries)); dxtrace(dx_show_index("node", ((struct dx_node *) bh2->b_data)->entries)); - err = ext4_handle_dirty_metadata(handle, dir, bh2); + err = ext4_handle_dirty_dx_node(handle, dir, bh2); if (err) goto journal_error; brelse (bh2); @@ -1609,7 +1961,7 @@ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry, if (err) goto journal_error; } - err = ext4_handle_dirty_metadata(handle, dir, frames[0].bh); + err = ext4_handle_dirty_dx_node(handle, dir, frames[0].bh); if (err) { ext4_std_error(inode->i_sb, err); goto cleanup; @@ -1641,12 +1993,17 @@ static int ext4_delete_entry(handle_t *handle, { struct ext4_dir_entry_2 *de, *pde; unsigned int blocksize = dir->i_sb->s_blocksize; + int csum_size = 0; int i, err; + if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb, + EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) + csum_size = sizeof(struct ext4_dir_entry_tail); + i = 0; pde = NULL; de = (struct ext4_dir_entry_2 *) bh->b_data; - while (i < bh->b_size) { + while (i < bh->b_size - csum_size) { if (ext4_check_dir_entry(dir, NULL, de, bh, i)) return -EIO; if (de == de_del) { @@ -1667,7 +2024,7 @@ static int ext4_delete_entry(handle_t *handle, de->inode = 0; dir->i_version++; BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); - err = ext4_handle_dirty_metadata(handle, dir, bh); + err = ext4_handle_dirty_dirent_node(handle, dir, bh); if (unlikely(err)) { ext4_std_error(dir->i_sb, err); return err; @@ -1809,9 +2166,15 @@ static int ext4_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) struct inode *inode; struct buffer_head *dir_block = NULL; struct ext4_dir_entry_2 *de; + struct ext4_dir_entry_tail *t; unsigned int blocksize = dir->i_sb->s_blocksize; + int csum_size = 0; int err, retries = 0; + if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb, + EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) + csum_size = sizeof(struct ext4_dir_entry_tail); + if (EXT4_DIR_LINK_MAX(dir)) return -EMLINK; @@ -1852,16 +2215,24 @@ retry: ext4_set_de_type(dir->i_sb, de, S_IFDIR); de = ext4_next_entry(de, blocksize); de->inode = cpu_to_le32(dir->i_ino); - de->rec_len = ext4_rec_len_to_disk(blocksize - EXT4_DIR_REC_LEN(1), + de->rec_len = ext4_rec_len_to_disk(blocksize - + (csum_size + EXT4_DIR_REC_LEN(1)), blocksize); de->name_len = 2; strcpy(de->name, ".."); ext4_set_de_type(dir->i_sb, de, S_IFDIR); set_nlink(inode, 2); + + if (csum_size) { + t = EXT4_DIRENT_TAIL(dir_block->b_data, blocksize); + initialize_dirent_tail(t, blocksize); + } + BUFFER_TRACE(dir_block, "call ext4_handle_dirty_metadata"); - err = ext4_handle_dirty_metadata(handle, inode, dir_block); + err = ext4_handle_dirty_dirent_node(handle, inode, dir_block); if (err) goto out_clear_inode; + set_buffer_verified(dir_block); err = ext4_mark_inode_dirty(handle, inode); if (!err) err = ext4_add_entry(handle, dentry, inode); @@ -1911,6 +2282,14 @@ static int empty_dir(struct inode *inode) inode->i_ino); return 1; } + if (!buffer_verified(bh) && + !ext4_dirent_csum_verify(inode, + (struct ext4_dir_entry *)bh->b_data)) { + EXT4_ERROR_INODE(inode, "checksum error reading directory " + "lblock 0"); + return -EIO; + } + set_buffer_verified(bh); de = (struct ext4_dir_entry_2 *) bh->b_data; de1 = ext4_next_entry(de, sb->s_blocksize); if (le32_to_cpu(de->inode) != inode->i_ino || @@ -1942,6 +2321,14 @@ static int empty_dir(struct inode *inode) offset += sb->s_blocksize; continue; } + if (!buffer_verified(bh) && + !ext4_dirent_csum_verify(inode, + (struct ext4_dir_entry *)bh->b_data)) { + EXT4_ERROR_INODE(inode, "checksum error " + "reading directory lblock 0"); + return -EIO; + } + set_buffer_verified(bh); de = (struct ext4_dir_entry_2 *) bh->b_data; } if (ext4_check_dir_entry(inode, NULL, de, bh, offset)) { @@ -2010,7 +2397,7 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode) /* Insert this inode at the head of the on-disk orphan list... */ NEXT_ORPHAN(inode) = le32_to_cpu(EXT4_SB(sb)->s_es->s_last_orphan); EXT4_SB(sb)->s_es->s_last_orphan = cpu_to_le32(inode->i_ino); - err = ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh); + err = ext4_handle_dirty_super_now(handle, sb); rc = ext4_mark_iloc_dirty(handle, inode, &iloc); if (!err) err = rc; @@ -2083,7 +2470,7 @@ int ext4_orphan_del(handle_t *handle, struct inode *inode) if (err) goto out_brelse; sbi->s_es->s_last_orphan = cpu_to_le32(ino_next); - err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh); + err = ext4_handle_dirty_super_now(handle, inode->i_sb); } else { struct ext4_iloc iloc2; struct inode *i_prev = @@ -2442,6 +2829,11 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, dir_bh = ext4_bread(handle, old_inode, 0, 0, &retval); if (!dir_bh) goto end_rename; + if (!buffer_verified(dir_bh) && + !ext4_dirent_csum_verify(old_inode, + (struct ext4_dir_entry *)dir_bh->b_data)) + goto end_rename; + set_buffer_verified(dir_bh); if (le32_to_cpu(PARENT_INO(dir_bh->b_data, old_dir->i_sb->s_blocksize)) != old_dir->i_ino) goto end_rename; @@ -2472,7 +2864,7 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, ext4_current_time(new_dir); ext4_mark_inode_dirty(handle, new_dir); BUFFER_TRACE(new_bh, "call ext4_handle_dirty_metadata"); - retval = ext4_handle_dirty_metadata(handle, new_dir, new_bh); + retval = ext4_handle_dirty_dirent_node(handle, new_dir, new_bh); if (unlikely(retval)) { ext4_std_error(new_dir->i_sb, retval); goto end_rename; @@ -2526,7 +2918,8 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, PARENT_INO(dir_bh->b_data, new_dir->i_sb->s_blocksize) = cpu_to_le32(new_dir->i_ino); BUFFER_TRACE(dir_bh, "call ext4_handle_dirty_metadata"); - retval = ext4_handle_dirty_metadata(handle, old_inode, dir_bh); + retval = ext4_handle_dirty_dirent_node(handle, old_inode, + dir_bh); if (retval) { ext4_std_error(old_dir->i_sb, retval); goto end_rename; diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c index 59fa0be27251..7ea6cbb44121 100644 --- a/fs/ext4/resize.c +++ b/fs/ext4/resize.c @@ -161,6 +161,8 @@ static struct ext4_new_flex_group_data *alloc_flex_gd(unsigned long flexbg_size) if (flex_gd == NULL) goto out3; + if (flexbg_size >= UINT_MAX / sizeof(struct ext4_new_flex_group_data)) + goto out2; flex_gd->count = flexbg_size; flex_gd->groups = kmalloc(sizeof(struct ext4_new_group_data) * @@ -796,7 +798,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode, ext4_kvfree(o_group_desc); le16_add_cpu(&es->s_reserved_gdt_blocks, -1); - err = ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh); + err = ext4_handle_dirty_super_now(handle, sb); if (err) ext4_std_error(sb, err); @@ -968,6 +970,8 @@ static void update_backups(struct super_block *sb, goto exit_err; } + ext4_superblock_csum_set(sb, (struct ext4_super_block *)data); + while ((group = ext4_list_backups(sb, &three, &five, &seven)) < last) { struct buffer_head *bh; @@ -1067,6 +1071,54 @@ static int ext4_add_new_descs(handle_t *handle, struct super_block *sb, return err; } +static struct buffer_head *ext4_get_bitmap(struct super_block *sb, __u64 block) +{ + struct buffer_head *bh = sb_getblk(sb, block); + if (!bh) + return NULL; + + if (bitmap_uptodate(bh)) + return bh; + + lock_buffer(bh); + if (bh_submit_read(bh) < 0) { + unlock_buffer(bh); + brelse(bh); + return NULL; + } + unlock_buffer(bh); + + return bh; +} + +static int ext4_set_bitmap_checksums(struct super_block *sb, + ext4_group_t group, + struct ext4_group_desc *gdp, + struct ext4_new_group_data *group_data) +{ + struct buffer_head *bh; + + if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, + EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) + return 0; + + bh = ext4_get_bitmap(sb, group_data->inode_bitmap); + if (!bh) + return -EIO; + ext4_inode_bitmap_csum_set(sb, group, gdp, bh, + EXT4_INODES_PER_GROUP(sb) / 8); + brelse(bh); + + bh = ext4_get_bitmap(sb, group_data->block_bitmap); + if (!bh) + return -EIO; + ext4_block_bitmap_csum_set(sb, group, gdp, bh, + EXT4_BLOCKS_PER_GROUP(sb) / 8); + brelse(bh); + + return 0; +} + /* * ext4_setup_new_descs() will set up the group descriptor descriptors of a flex bg */ @@ -1093,18 +1145,24 @@ static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb, */ gdb_bh = sbi->s_group_desc[gdb_num]; /* Update group descriptor block for new group */ - gdp = (struct ext4_group_desc *)((char *)gdb_bh->b_data + + gdp = (struct ext4_group_desc *)(gdb_bh->b_data + gdb_off * EXT4_DESC_SIZE(sb)); memset(gdp, 0, EXT4_DESC_SIZE(sb)); ext4_block_bitmap_set(sb, gdp, group_data->block_bitmap); ext4_inode_bitmap_set(sb, gdp, group_data->inode_bitmap); + err = ext4_set_bitmap_checksums(sb, group, gdp, group_data); + if (err) { + ext4_std_error(sb, err); + break; + } + ext4_inode_table_set(sb, gdp, group_data->inode_table); ext4_free_group_clusters_set(sb, gdp, EXT4_B2C(sbi, group_data->free_blocks_count)); ext4_free_inodes_set(sb, gdp, EXT4_INODES_PER_GROUP(sb)); gdp->bg_flags = cpu_to_le16(*bg_flags); - gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp); + ext4_group_desc_csum_set(sb, group, gdp); err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh); if (unlikely(err)) { @@ -1343,17 +1401,14 @@ static int ext4_setup_next_flex_gd(struct super_block *sb, (1 + ext4_bg_num_gdb(sb, group + i) + le16_to_cpu(es->s_reserved_gdt_blocks)) : 0; group_data[i].free_blocks_count = blocks_per_group - overhead; - if (EXT4_HAS_RO_COMPAT_FEATURE(sb, - EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) + if (ext4_has_group_desc_csum(sb)) flex_gd->bg_flags[i] = EXT4_BG_BLOCK_UNINIT | EXT4_BG_INODE_UNINIT; else flex_gd->bg_flags[i] = EXT4_BG_INODE_ZEROED; } - if (last_group == n_group && - EXT4_HAS_RO_COMPAT_FEATURE(sb, - EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) + if (last_group == n_group && ext4_has_group_desc_csum(sb)) /* We need to initialize block bitmap of last group. */ flex_gd->bg_flags[i - 1] &= ~EXT4_BG_BLOCK_UNINIT; diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 35b5954489ee..eb7aa3e4ef05 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -112,6 +112,48 @@ static struct file_system_type ext3_fs_type = { #define IS_EXT3_SB(sb) (0) #endif +static int ext4_verify_csum_type(struct super_block *sb, + struct ext4_super_block *es) +{ + if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, + EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) + return 1; + + return es->s_checksum_type == EXT4_CRC32C_CHKSUM; +} + +static __le32 ext4_superblock_csum(struct super_block *sb, + struct ext4_super_block *es) +{ + struct ext4_sb_info *sbi = EXT4_SB(sb); + int offset = offsetof(struct ext4_super_block, s_checksum); + __u32 csum; + + csum = ext4_chksum(sbi, ~0, (char *)es, offset); + + return cpu_to_le32(csum); +} + +int ext4_superblock_csum_verify(struct super_block *sb, + struct ext4_super_block *es) +{ + if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, + EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) + return 1; + + return es->s_checksum == ext4_superblock_csum(sb, es); +} + +void ext4_superblock_csum_set(struct super_block *sb, + struct ext4_super_block *es) +{ + if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, + EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) + return; + + es->s_checksum = ext4_superblock_csum(sb, es); +} + void *ext4_kvmalloc(size_t size, gfp_t flags) { void *ret; @@ -497,6 +539,7 @@ void __ext4_error(struct super_block *sb, const char *function, printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n", sb->s_id, function, line, current->comm, &vaf); va_end(args); + save_error_info(sb, function, line); ext4_handle_error(sb); } @@ -905,6 +948,8 @@ static void ext4_put_super(struct super_block *sb) unlock_super(sb); kobject_put(&sbi->s_kobj); wait_for_completion(&sbi->s_kobj_unregister); + if (sbi->s_chksum_driver) + crypto_free_shash(sbi->s_chksum_driver); kfree(sbi->s_blockgroup_lock); kfree(sbi); } @@ -1922,43 +1967,69 @@ failed: return 0; } -__le16 ext4_group_desc_csum(struct ext4_sb_info *sbi, __u32 block_group, - struct ext4_group_desc *gdp) +static __le16 ext4_group_desc_csum(struct ext4_sb_info *sbi, __u32 block_group, + struct ext4_group_desc *gdp) { + int offset; __u16 crc = 0; + __le32 le_group = cpu_to_le32(block_group); - if (sbi->s_es->s_feature_ro_compat & - cpu_to_le32(EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) { - int offset = offsetof(struct ext4_group_desc, bg_checksum); - __le32 le_group = cpu_to_le32(block_group); - - crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid)); - crc = crc16(crc, (__u8 *)&le_group, sizeof(le_group)); - crc = crc16(crc, (__u8 *)gdp, offset); - offset += sizeof(gdp->bg_checksum); /* skip checksum */ - /* for checksum of struct ext4_group_desc do the rest...*/ - if ((sbi->s_es->s_feature_incompat & - cpu_to_le32(EXT4_FEATURE_INCOMPAT_64BIT)) && - offset < le16_to_cpu(sbi->s_es->s_desc_size)) - crc = crc16(crc, (__u8 *)gdp + offset, - le16_to_cpu(sbi->s_es->s_desc_size) - - offset); + if ((sbi->s_es->s_feature_ro_compat & + cpu_to_le32(EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))) { + /* Use new metadata_csum algorithm */ + __u16 old_csum; + __u32 csum32; + + old_csum = gdp->bg_checksum; + gdp->bg_checksum = 0; + csum32 = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&le_group, + sizeof(le_group)); + csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp, + sbi->s_desc_size); + gdp->bg_checksum = old_csum; + + crc = csum32 & 0xFFFF; + goto out; } + /* old crc16 code */ + offset = offsetof(struct ext4_group_desc, bg_checksum); + + crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid)); + crc = crc16(crc, (__u8 *)&le_group, sizeof(le_group)); + crc = crc16(crc, (__u8 *)gdp, offset); + offset += sizeof(gdp->bg_checksum); /* skip checksum */ + /* for checksum of struct ext4_group_desc do the rest...*/ + if ((sbi->s_es->s_feature_incompat & + cpu_to_le32(EXT4_FEATURE_INCOMPAT_64BIT)) && + offset < le16_to_cpu(sbi->s_es->s_desc_size)) + crc = crc16(crc, (__u8 *)gdp + offset, + le16_to_cpu(sbi->s_es->s_desc_size) - + offset); + +out: return cpu_to_le16(crc); } -int ext4_group_desc_csum_verify(struct ext4_sb_info *sbi, __u32 block_group, +int ext4_group_desc_csum_verify(struct super_block *sb, __u32 block_group, struct ext4_group_desc *gdp) { - if ((sbi->s_es->s_feature_ro_compat & - cpu_to_le32(EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) && - (gdp->bg_checksum != ext4_group_desc_csum(sbi, block_group, gdp))) + if (ext4_has_group_desc_csum(sb) && + (gdp->bg_checksum != ext4_group_desc_csum(EXT4_SB(sb), + block_group, gdp))) return 0; return 1; } +void ext4_group_desc_csum_set(struct super_block *sb, __u32 block_group, + struct ext4_group_desc *gdp) +{ + if (!ext4_has_group_desc_csum(sb)) + return; + gdp->bg_checksum = ext4_group_desc_csum(EXT4_SB(sb), block_group, gdp); +} + /* Called at mount-time, super-block is locked */ static int ext4_check_descriptors(struct super_block *sb, ext4_group_t *first_not_zeroed) @@ -2013,7 +2084,7 @@ static int ext4_check_descriptors(struct super_block *sb, return 0; } ext4_lock_group(sb, i); - if (!ext4_group_desc_csum_verify(sbi, i, gdp)) { + if (!ext4_group_desc_csum_verify(sb, i, gdp)) { ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " "Checksum for group %u failed (%u!=%u)", i, le16_to_cpu(ext4_group_desc_csum(sbi, i, @@ -2417,6 +2488,23 @@ static ssize_t sbi_ui_store(struct ext4_attr *a, return count; } +static ssize_t trigger_test_error(struct ext4_attr *a, + struct ext4_sb_info *sbi, + const char *buf, size_t count) +{ + int len = count; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + if (len && buf[len-1] == '\n') + len--; + + if (len) + ext4_error(sbi->s_sb, "%.*s", len, buf); + return count; +} + #define EXT4_ATTR_OFFSET(_name,_mode,_show,_store,_elname) \ static struct ext4_attr ext4_attr_##_name = { \ .attr = {.name = __stringify(_name), .mode = _mode }, \ @@ -2447,6 +2535,7 @@ EXT4_RW_ATTR_SBI_UI(mb_order2_req, s_mb_order2_reqs); EXT4_RW_ATTR_SBI_UI(mb_stream_req, s_mb_stream_request); EXT4_RW_ATTR_SBI_UI(mb_group_prealloc, s_mb_group_prealloc); EXT4_RW_ATTR_SBI_UI(max_writeback_mb_bump, s_max_writeback_mb_bump); +EXT4_ATTR(trigger_fs_error, 0200, NULL, trigger_test_error); static struct attribute *ext4_attrs[] = { ATTR_LIST(delayed_allocation_blocks), @@ -2461,6 +2550,7 @@ static struct attribute *ext4_attrs[] = { ATTR_LIST(mb_stream_req), ATTR_LIST(mb_group_prealloc), ATTR_LIST(max_writeback_mb_bump), + ATTR_LIST(trigger_fs_error), NULL, }; @@ -2957,6 +3047,44 @@ static void ext4_destroy_lazyinit_thread(void) kthread_stop(ext4_lazyinit_task); } +static int set_journal_csum_feature_set(struct super_block *sb) +{ + int ret = 1; + int compat, incompat; + struct ext4_sb_info *sbi = EXT4_SB(sb); + + if (EXT4_HAS_RO_COMPAT_FEATURE(sb, + EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) { + /* journal checksum v2 */ + compat = 0; + incompat = JBD2_FEATURE_INCOMPAT_CSUM_V2; + } else { + /* journal checksum v1 */ + compat = JBD2_FEATURE_COMPAT_CHECKSUM; + incompat = 0; + } + + if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) { + ret = jbd2_journal_set_features(sbi->s_journal, + compat, 0, + JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT | + incompat); + } else if (test_opt(sb, JOURNAL_CHECKSUM)) { + ret = jbd2_journal_set_features(sbi->s_journal, + compat, 0, + incompat); + jbd2_journal_clear_features(sbi->s_journal, 0, 0, + JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT); + } else { + jbd2_journal_clear_features(sbi->s_journal, + JBD2_FEATURE_COMPAT_CHECKSUM, 0, + JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT | + JBD2_FEATURE_INCOMPAT_CSUM_V2); + } + + return ret; +} + static int ext4_fill_super(struct super_block *sb, void *data, int silent) { char *orig_data = kstrdup(data, GFP_KERNEL); @@ -2993,6 +3121,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) goto out_free_orig; } sb->s_fs_info = sbi; + sbi->s_sb = sb; sbi->s_mount_opt = 0; sbi->s_resuid = make_kuid(&init_user_ns, EXT4_DEF_RESUID); sbi->s_resgid = make_kgid(&init_user_ns, EXT4_DEF_RESGID); @@ -3032,13 +3161,54 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) * Note: s_es must be initialized as soon as possible because * some ext4 macro-instructions depend on its value */ - es = (struct ext4_super_block *) (((char *)bh->b_data) + offset); + es = (struct ext4_super_block *) (bh->b_data + offset); sbi->s_es = es; sb->s_magic = le16_to_cpu(es->s_magic); if (sb->s_magic != EXT4_SUPER_MAGIC) goto cantfind_ext4; sbi->s_kbytes_written = le64_to_cpu(es->s_kbytes_written); + /* Warn if metadata_csum and gdt_csum are both set. */ + if (EXT4_HAS_RO_COMPAT_FEATURE(sb, + EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) && + EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) + ext4_warning(sb, KERN_INFO "metadata_csum and uninit_bg are " + "redundant flags; please run fsck."); + + /* Check for a known checksum algorithm */ + if (!ext4_verify_csum_type(sb, es)) { + ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with " + "unknown checksum algorithm."); + silent = 1; + goto cantfind_ext4; + } + + /* Load the checksum driver */ + if (EXT4_HAS_RO_COMPAT_FEATURE(sb, + EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) { + sbi->s_chksum_driver = crypto_alloc_shash("crc32c", 0, 0); + if (IS_ERR(sbi->s_chksum_driver)) { + ext4_msg(sb, KERN_ERR, "Cannot load crc32c driver."); + ret = PTR_ERR(sbi->s_chksum_driver); + sbi->s_chksum_driver = NULL; + goto failed_mount; + } + } + + /* Check superblock checksum */ + if (!ext4_superblock_csum_verify(sb, es)) { + ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with " + "invalid superblock checksum. Run e2fsck?"); + silent = 1; + goto cantfind_ext4; + } + + /* Precompute checksum seed for all metadata */ + if (EXT4_HAS_RO_COMPAT_FEATURE(sb, + EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) + sbi->s_csum_seed = ext4_chksum(sbi, ~0, es->s_uuid, + sizeof(es->s_uuid)); + /* Set defaults before we parse the mount options */ def_mount_opts = le32_to_cpu(es->s_default_mount_opts); set_opt(sb, INIT_INODE_TABLE); @@ -3200,7 +3370,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) "Can't read superblock on 2nd try"); goto failed_mount; } - es = (struct ext4_super_block *)(((char *)bh->b_data) + offset); + es = (struct ext4_super_block *)(bh->b_data + offset); sbi->s_es = es; if (es->s_magic != cpu_to_le16(EXT4_SUPER_MAGIC)) { ext4_msg(sb, KERN_ERR, @@ -3392,6 +3562,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) GFP_KERNEL); if (sbi->s_group_desc == NULL) { ext4_msg(sb, KERN_ERR, "not enough memory"); + ret = -ENOMEM; goto failed_mount; } @@ -3449,6 +3620,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) } if (err) { ext4_msg(sb, KERN_ERR, "insufficient memory"); + ret = err; goto failed_mount3; } @@ -3506,26 +3678,17 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) goto no_journal; } - if (ext4_blocks_count(es) > 0xffffffffULL && + if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT) && !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_64BIT)) { ext4_msg(sb, KERN_ERR, "Failed to set 64-bit journal feature"); goto failed_mount_wq; } - if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) { - jbd2_journal_set_features(sbi->s_journal, - JBD2_FEATURE_COMPAT_CHECKSUM, 0, - JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT); - } else if (test_opt(sb, JOURNAL_CHECKSUM)) { - jbd2_journal_set_features(sbi->s_journal, - JBD2_FEATURE_COMPAT_CHECKSUM, 0, 0); - jbd2_journal_clear_features(sbi->s_journal, 0, 0, - JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT); - } else { - jbd2_journal_clear_features(sbi->s_journal, - JBD2_FEATURE_COMPAT_CHECKSUM, 0, - JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT); + if (!set_journal_csum_feature_set(sb)) { + ext4_msg(sb, KERN_ERR, "Failed to set journal checksum " + "feature set"); + goto failed_mount_wq; } /* We have now updated the journal if required, so we can @@ -3606,7 +3769,8 @@ no_journal: goto failed_mount4; } - ext4_setup_super(sb, es, sb->s_flags & MS_RDONLY); + if (ext4_setup_super(sb, es, sb->s_flags & MS_RDONLY)) + sb->s_flags |= MS_RDONLY; /* determine the minimum size of new large inodes, if present */ if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE) { @@ -3641,7 +3805,7 @@ no_journal: } ext4_ext_init(sb); - err = ext4_mb_init(sb, needs_recovery); + err = ext4_mb_init(sb); if (err) { ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)", err); @@ -3724,6 +3888,8 @@ failed_mount2: brelse(sbi->s_group_desc[i]); ext4_kvfree(sbi->s_group_desc); failed_mount: + if (sbi->s_chksum_driver) + crypto_free_shash(sbi->s_chksum_driver); if (sbi->s_proc) { remove_proc_entry("options", sbi->s_proc); remove_proc_entry(sb->s_id, ext4_proc_root); @@ -3847,7 +4013,7 @@ static journal_t *ext4_get_dev_journal(struct super_block *sb, goto out_bdev; } - es = (struct ext4_super_block *) (((char *)bh->b_data) + offset); + es = (struct ext4_super_block *) (bh->b_data + offset); if ((le16_to_cpu(es->s_magic) != EXT4_SUPER_MAGIC) || !(le32_to_cpu(es->s_feature_incompat) & EXT4_FEATURE_INCOMPAT_JOURNAL_DEV)) { @@ -4039,6 +4205,7 @@ static int ext4_commit_super(struct super_block *sb, int sync) &EXT4_SB(sb)->s_freeinodes_counter)); sb->s_dirt = 0; BUFFER_TRACE(sbh, "marking dirty"); + ext4_superblock_csum_set(sb, es); mark_buffer_dirty(sbh); if (sync) { error = sync_dirty_buffer(sbh); @@ -4333,7 +4500,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) struct ext4_group_desc *gdp = ext4_get_group_desc(sb, g, NULL); - if (!ext4_group_desc_csum_verify(sbi, g, gdp)) { + if (!ext4_group_desc_csum_verify(sb, g, gdp)) { ext4_msg(sb, KERN_ERR, "ext4_remount: Checksum for group %u failed (%u!=%u)", g, le16_to_cpu(ext4_group_desc_csum(sbi, g, gdp)), diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index e88748e55c0f..e56c9ed7d6e3 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -122,6 +122,58 @@ const struct xattr_handler *ext4_xattr_handlers[] = { NULL }; +static __le32 ext4_xattr_block_csum(struct inode *inode, + sector_t block_nr, + struct ext4_xattr_header *hdr) +{ + struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); + struct ext4_inode_info *ei = EXT4_I(inode); + __u32 csum, old; + + old = hdr->h_checksum; + hdr->h_checksum = 0; + if (le32_to_cpu(hdr->h_refcount) != 1) { + block_nr = cpu_to_le64(block_nr); + csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&block_nr, + sizeof(block_nr)); + } else + csum = ei->i_csum_seed; + csum = ext4_chksum(sbi, csum, (__u8 *)hdr, + EXT4_BLOCK_SIZE(inode->i_sb)); + hdr->h_checksum = old; + return cpu_to_le32(csum); +} + +static int ext4_xattr_block_csum_verify(struct inode *inode, + sector_t block_nr, + struct ext4_xattr_header *hdr) +{ + if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, + EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) && + (hdr->h_checksum != ext4_xattr_block_csum(inode, block_nr, hdr))) + return 0; + return 1; +} + +static void ext4_xattr_block_csum_set(struct inode *inode, + sector_t block_nr, + struct ext4_xattr_header *hdr) +{ + if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, + EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) + return; + + hdr->h_checksum = ext4_xattr_block_csum(inode, block_nr, hdr); +} + +static inline int ext4_handle_dirty_xattr_block(handle_t *handle, + struct inode *inode, + struct buffer_head *bh) +{ + ext4_xattr_block_csum_set(inode, bh->b_blocknr, BHDR(bh)); + return ext4_handle_dirty_metadata(handle, inode, bh); +} + static inline const struct xattr_handler * ext4_xattr_handler(int name_index) { @@ -156,12 +208,22 @@ ext4_xattr_check_names(struct ext4_xattr_entry *entry, void *end) } static inline int -ext4_xattr_check_block(struct buffer_head *bh) +ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh) { + int error; + + if (buffer_verified(bh)) + return 0; + if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) || BHDR(bh)->h_blocks != cpu_to_le32(1)) return -EIO; - return ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size); + if (!ext4_xattr_block_csum_verify(inode, bh->b_blocknr, BHDR(bh))) + return -EIO; + error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size); + if (!error) + set_buffer_verified(bh); + return error; } static inline int @@ -224,7 +286,7 @@ ext4_xattr_block_get(struct inode *inode, int name_index, const char *name, goto cleanup; ea_bdebug(bh, "b_count=%d, refcount=%d", atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount)); - if (ext4_xattr_check_block(bh)) { + if (ext4_xattr_check_block(inode, bh)) { bad_block: EXT4_ERROR_INODE(inode, "bad block %llu", EXT4_I(inode)->i_file_acl); @@ -369,7 +431,7 @@ ext4_xattr_block_list(struct dentry *dentry, char *buffer, size_t buffer_size) goto cleanup; ea_bdebug(bh, "b_count=%d, refcount=%d", atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount)); - if (ext4_xattr_check_block(bh)) { + if (ext4_xattr_check_block(inode, bh)) { EXT4_ERROR_INODE(inode, "bad block %llu", EXT4_I(inode)->i_file_acl); error = -EIO; @@ -492,7 +554,7 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode, if (ce) mb_cache_entry_release(ce); unlock_buffer(bh); - error = ext4_handle_dirty_metadata(handle, inode, bh); + error = ext4_handle_dirty_xattr_block(handle, inode, bh); if (IS_SYNC(inode)) ext4_handle_sync(handle); dquot_free_block(inode, 1); @@ -662,7 +724,7 @@ ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i, ea_bdebug(bs->bh, "b_count=%d, refcount=%d", atomic_read(&(bs->bh->b_count)), le32_to_cpu(BHDR(bs->bh)->h_refcount)); - if (ext4_xattr_check_block(bs->bh)) { + if (ext4_xattr_check_block(inode, bs->bh)) { EXT4_ERROR_INODE(inode, "bad block %llu", EXT4_I(inode)->i_file_acl); error = -EIO; @@ -725,9 +787,9 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode, if (error == -EIO) goto bad_block; if (!error) - error = ext4_handle_dirty_metadata(handle, - inode, - bs->bh); + error = ext4_handle_dirty_xattr_block(handle, + inode, + bs->bh); if (error) goto cleanup; goto inserted; @@ -796,9 +858,9 @@ inserted: ea_bdebug(new_bh, "reusing; refcount now=%d", le32_to_cpu(BHDR(new_bh)->h_refcount)); unlock_buffer(new_bh); - error = ext4_handle_dirty_metadata(handle, - inode, - new_bh); + error = ext4_handle_dirty_xattr_block(handle, + inode, + new_bh); if (error) goto cleanup_dquot; } @@ -855,8 +917,8 @@ getblk_failed: set_buffer_uptodate(new_bh); unlock_buffer(new_bh); ext4_xattr_cache_insert(new_bh); - error = ext4_handle_dirty_metadata(handle, - inode, new_bh); + error = ext4_handle_dirty_xattr_block(handle, + inode, new_bh); if (error) goto cleanup; } @@ -1193,7 +1255,7 @@ retry: error = -EIO; if (!bh) goto cleanup; - if (ext4_xattr_check_block(bh)) { + if (ext4_xattr_check_block(inode, bh)) { EXT4_ERROR_INODE(inode, "bad block %llu", EXT4_I(inode)->i_file_acl); error = -EIO; diff --git a/fs/ext4/xattr.h b/fs/ext4/xattr.h index 25b7387ff183..91f31ca7d9af 100644 --- a/fs/ext4/xattr.h +++ b/fs/ext4/xattr.h @@ -27,7 +27,9 @@ struct ext4_xattr_header { __le32 h_refcount; /* reference count */ __le32 h_blocks; /* number of disk blocks used */ __le32 h_hash; /* hash value of all attributes */ - __u32 h_reserved[4]; /* zero right now */ + __le32 h_checksum; /* crc32c(uuid+id+xattrblock) */ + /* id = inum if refcount=1, blknum otherwise */ + __u32 h_reserved[3]; /* zero right now */ }; struct ext4_xattr_ibody_header { diff --git a/fs/fat/inode.c b/fs/fat/inode.c index c2973ea5df9a..a3d81ebf6d86 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c @@ -735,10 +735,9 @@ static struct dentry *fat_fh_to_dentry(struct super_block *sb, } static int -fat_encode_fh(struct dentry *de, __u32 *fh, int *lenp, int connectable) +fat_encode_fh(struct inode *inode, __u32 *fh, int *lenp, struct inode *parent) { int len = *lenp; - struct inode *inode = de->d_inode; u32 ipos_h, ipos_m, ipos_l; if (len < 5) { @@ -754,9 +753,9 @@ fat_encode_fh(struct dentry *de, __u32 *fh, int *lenp, int connectable) fh[1] = inode->i_generation; fh[2] = ipos_h; fh[3] = ipos_m | MSDOS_I(inode)->i_logstart; - spin_lock(&de->d_lock); - fh[4] = ipos_l | MSDOS_I(de->d_parent->d_inode)->i_logstart; - spin_unlock(&de->d_lock); + fh[4] = ipos_l; + if (parent) + fh[4] |= MSDOS_I(parent)->i_logstart; return 3; } diff --git a/fs/fcntl.c b/fs/fcntl.c index d078b75572a7..81b70e665bf0 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c @@ -442,28 +442,24 @@ static int check_fcntl_cmd(unsigned cmd) SYSCALL_DEFINE3(fcntl, unsigned int, fd, unsigned int, cmd, unsigned long, arg) { struct file *filp; + int fput_needed; long err = -EBADF; - filp = fget_raw(fd); + filp = fget_raw_light(fd, &fput_needed); if (!filp) goto out; if (unlikely(filp->f_mode & FMODE_PATH)) { - if (!check_fcntl_cmd(cmd)) { - fput(filp); - goto out; - } + if (!check_fcntl_cmd(cmd)) + goto out1; } err = security_file_fcntl(filp, cmd, arg); - if (err) { - fput(filp); - return err; - } + if (!err) + err = do_fcntl(fd, cmd, arg, filp); - err = do_fcntl(fd, cmd, arg, filp); - - fput(filp); +out1: + fput_light(filp, fput_needed); out: return err; } @@ -473,26 +469,21 @@ SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd, unsigned long, arg) { struct file * filp; - long err; + long err = -EBADF; + int fput_needed; - err = -EBADF; - filp = fget_raw(fd); + filp = fget_raw_light(fd, &fput_needed); if (!filp) goto out; if (unlikely(filp->f_mode & FMODE_PATH)) { - if (!check_fcntl_cmd(cmd)) { - fput(filp); - goto out; - } + if (!check_fcntl_cmd(cmd)) + goto out1; } err = security_file_fcntl(filp, cmd, arg); - if (err) { - fput(filp); - return err; - } - err = -EBADF; + if (err) + goto out1; switch (cmd) { case F_GETLK64: @@ -507,7 +498,8 @@ SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd, err = do_fcntl(fd, cmd, arg, filp); break; } - fput(filp); +out1: + fput_light(filp, fput_needed); out: return err; } diff --git a/fs/file_table.c b/fs/file_table.c index 70f2a0fd6aec..a305d9e2d1b2 100644 --- a/fs/file_table.c +++ b/fs/file_table.c @@ -34,7 +34,6 @@ struct files_stat_struct files_stat = { .max_files = NR_FILE }; -DECLARE_LGLOCK(files_lglock); DEFINE_LGLOCK(files_lglock); /* SLAB cache for file structures */ @@ -421,9 +420,9 @@ static inline void __file_sb_list_add(struct file *file, struct super_block *sb) */ void file_sb_list_add(struct file *file, struct super_block *sb) { - lg_local_lock(files_lglock); + lg_local_lock(&files_lglock); __file_sb_list_add(file, sb); - lg_local_unlock(files_lglock); + lg_local_unlock(&files_lglock); } /** @@ -436,9 +435,9 @@ void file_sb_list_add(struct file *file, struct super_block *sb) void file_sb_list_del(struct file *file) { if (!list_empty(&file->f_u.fu_list)) { - lg_local_lock_cpu(files_lglock, file_list_cpu(file)); + lg_local_lock_cpu(&files_lglock, file_list_cpu(file)); list_del_init(&file->f_u.fu_list); - lg_local_unlock_cpu(files_lglock, file_list_cpu(file)); + lg_local_unlock_cpu(&files_lglock, file_list_cpu(file)); } } @@ -485,7 +484,7 @@ void mark_files_ro(struct super_block *sb) struct file *f; retry: - lg_global_lock(files_lglock); + lg_global_lock(&files_lglock); do_file_list_for_each_entry(sb, f) { struct vfsmount *mnt; if (!S_ISREG(f->f_path.dentry->d_inode->i_mode)) @@ -502,12 +501,12 @@ retry: file_release_write(f); mnt = mntget(f->f_path.mnt); /* This can sleep, so we can't hold the spinlock. */ - lg_global_unlock(files_lglock); + lg_global_unlock(&files_lglock); mnt_drop_write(mnt); mntput(mnt); goto retry; } while_file_list_for_each_entry; - lg_global_unlock(files_lglock); + lg_global_unlock(&files_lglock); } void __init files_init(unsigned long mempages) @@ -525,6 +524,6 @@ void __init files_init(unsigned long mempages) n = (mempages * (PAGE_SIZE / 1024)) / 10; files_stat.max_files = max_t(unsigned long, n, NR_FILE); files_defer_init(); - lg_lock_init(files_lglock); + lg_lock_init(&files_lglock, "files_lglock"); percpu_counter_init(&nr_files, 0); } diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 504e61b7fd75..9562109d3a87 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -962,7 +962,9 @@ static ssize_t fuse_file_aio_write(struct kiocb *iocb, const struct iovec *iov, if (err) goto out; - file_update_time(file); + err = file_update_time(file); + if (err) + goto out; if (file->f_flags & O_DIRECT) { written = generic_file_direct_write(iocb, iov, &nr_segs, diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 56f6dcf30768..42678a33b7bb 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -627,12 +627,10 @@ static struct dentry *fuse_get_dentry(struct super_block *sb, return ERR_PTR(err); } -static int fuse_encode_fh(struct dentry *dentry, u32 *fh, int *max_len, - int connectable) +static int fuse_encode_fh(struct inode *inode, u32 *fh, int *max_len, + struct inode *parent) { - struct inode *inode = dentry->d_inode; - bool encode_parent = connectable && !S_ISDIR(inode->i_mode); - int len = encode_parent ? 6 : 3; + int len = parent ? 6 : 3; u64 nodeid; u32 generation; @@ -648,14 +646,9 @@ static int fuse_encode_fh(struct dentry *dentry, u32 *fh, int *max_len, fh[1] = (u32)(nodeid & 0xffffffff); fh[2] = generation; - if (encode_parent) { - struct inode *parent; - - spin_lock(&dentry->d_lock); - parent = dentry->d_parent->d_inode; + if (parent) { nodeid = get_fuse_inode(parent)->nodeid; generation = parent->i_generation; - spin_unlock(&dentry->d_lock); fh[3] = (u32)(nodeid >> 32); fh[4] = (u32)(nodeid & 0xffffffff); @@ -663,7 +656,7 @@ static int fuse_encode_fh(struct dentry *dentry, u32 *fh, int *max_len, } *max_len = len; - return encode_parent ? 0x82 : 0x81; + return parent ? 0x82 : 0x81; } static struct dentry *fuse_fh_to_dentry(struct super_block *sb, diff --git a/fs/gfs2/export.c b/fs/gfs2/export.c index 70ba891654f8..e8ed6d4a6181 100644 --- a/fs/gfs2/export.c +++ b/fs/gfs2/export.c @@ -28,15 +28,14 @@ #define GFS2_LARGE_FH_SIZE 8 #define GFS2_OLD_FH_SIZE 10 -static int gfs2_encode_fh(struct dentry *dentry, __u32 *p, int *len, - int connectable) +static int gfs2_encode_fh(struct inode *inode, __u32 *p, int *len, + struct inode *parent) { __be32 *fh = (__force __be32 *)p; - struct inode *inode = dentry->d_inode; struct super_block *sb = inode->i_sb; struct gfs2_inode *ip = GFS2_I(inode); - if (connectable && (*len < GFS2_LARGE_FH_SIZE)) { + if (parent && (*len < GFS2_LARGE_FH_SIZE)) { *len = GFS2_LARGE_FH_SIZE; return 255; } else if (*len < GFS2_SMALL_FH_SIZE) { @@ -50,14 +49,10 @@ static int gfs2_encode_fh(struct dentry *dentry, __u32 *p, int *len, fh[3] = cpu_to_be32(ip->i_no_addr & 0xFFFFFFFF); *len = GFS2_SMALL_FH_SIZE; - if (!connectable || inode == sb->s_root->d_inode) + if (!parent || inode == sb->s_root->d_inode) return *len; - spin_lock(&dentry->d_lock); - inode = dentry->d_parent->d_inode; - ip = GFS2_I(inode); - igrab(inode); - spin_unlock(&dentry->d_lock); + ip = GFS2_I(parent); fh[4] = cpu_to_be32(ip->i_no_formal_ino >> 32); fh[5] = cpu_to_be32(ip->i_no_formal_ino & 0xFFFFFFFF); @@ -65,8 +60,6 @@ static int gfs2_encode_fh(struct dentry *dentry, __u32 *p, int *len, fh[7] = cpu_to_be32(ip->i_no_addr & 0xFFFFFFFF); *len = GFS2_LARGE_FH_SIZE; - iput(inode); - return *len; } diff --git a/fs/hpfs/alloc.c b/fs/hpfs/alloc.c index 7a5eb2c718c8..cdb84a838068 100644 --- a/fs/hpfs/alloc.c +++ b/fs/hpfs/alloc.c @@ -16,9 +16,9 @@ static int chk_if_allocated(struct super_block *s, secno sec, char *msg) { struct quad_buffer_head qbh; - u32 *bmp; + __le32 *bmp; if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "chk"))) goto fail; - if ((cpu_to_le32(bmp[(sec & 0x3fff) >> 5]) >> (sec & 0x1f)) & 1) { + if ((le32_to_cpu(bmp[(sec & 0x3fff) >> 5]) >> (sec & 0x1f)) & 1) { hpfs_error(s, "sector '%s' - %08x not allocated in bitmap", msg, sec); goto fail1; } @@ -62,7 +62,7 @@ int hpfs_chk_sectors(struct super_block *s, secno start, int len, char *msg) static secno alloc_in_bmp(struct super_block *s, secno near, unsigned n, unsigned forward) { struct quad_buffer_head qbh; - unsigned *bmp; + __le32 *bmp; unsigned bs = near & ~0x3fff; unsigned nr = (near & 0x3fff) & ~(n - 1); /*unsigned mnr;*/ @@ -236,7 +236,7 @@ static secno alloc_in_dirband(struct super_block *s, secno near) int hpfs_alloc_if_possible(struct super_block *s, secno sec) { struct quad_buffer_head qbh; - u32 *bmp; + __le32 *bmp; if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "aip"))) goto end; if (le32_to_cpu(bmp[(sec & 0x3fff) >> 5]) & (1 << (sec & 0x1f))) { bmp[(sec & 0x3fff) >> 5] &= cpu_to_le32(~(1 << (sec & 0x1f))); @@ -254,7 +254,7 @@ int hpfs_alloc_if_possible(struct super_block *s, secno sec) void hpfs_free_sectors(struct super_block *s, secno sec, unsigned n) { struct quad_buffer_head qbh; - u32 *bmp; + __le32 *bmp; struct hpfs_sb_info *sbi = hpfs_sb(s); /*printk("2 - ");*/ if (!n) return; @@ -299,7 +299,7 @@ int hpfs_check_free_dnodes(struct super_block *s, int n) int n_bmps = (hpfs_sb(s)->sb_fs_size + 0x4000 - 1) >> 14; int b = hpfs_sb(s)->sb_c_bitmap & 0x0fffffff; int i, j; - u32 *bmp; + __le32 *bmp; struct quad_buffer_head qbh; if ((bmp = hpfs_map_dnode_bitmap(s, &qbh))) { for (j = 0; j < 512; j++) { @@ -351,7 +351,7 @@ void hpfs_free_dnode(struct super_block *s, dnode_secno dno) hpfs_free_sectors(s, dno, 4); } else { struct quad_buffer_head qbh; - u32 *bmp; + __le32 *bmp; unsigned ssec = (dno - hpfs_sb(s)->sb_dirband_start) / 4; if (!(bmp = hpfs_map_dnode_bitmap(s, &qbh))) { return; diff --git a/fs/hpfs/anode.c b/fs/hpfs/anode.c index 08b503e8ed29..4bae4a4a60b1 100644 --- a/fs/hpfs/anode.c +++ b/fs/hpfs/anode.c @@ -20,7 +20,7 @@ secno hpfs_bplus_lookup(struct super_block *s, struct inode *inode, int c1, c2 = 0; go_down: if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, a, &c1, &c2, "hpfs_bplus_lookup")) return -1; - if (btree->internal) { + if (bp_internal(btree)) { for (i = 0; i < btree->n_used_nodes; i++) if (le32_to_cpu(btree->u.internal[i].file_secno) > sec) { a = le32_to_cpu(btree->u.internal[i].down); @@ -82,7 +82,7 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi brelse(bh); return -1; } - if (btree->internal) { + if (bp_internal(btree)) { a = le32_to_cpu(btree->u.internal[n].down); btree->u.internal[n].file_secno = cpu_to_le32(-1); mark_buffer_dirty(bh); @@ -129,12 +129,12 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi } if (a == node && fnod) { anode->up = cpu_to_le32(node); - anode->btree.fnode_parent = 1; + anode->btree.flags |= BP_fnode_parent; anode->btree.n_used_nodes = btree->n_used_nodes; anode->btree.first_free = btree->first_free; anode->btree.n_free_nodes = 40 - anode->btree.n_used_nodes; memcpy(&anode->u, &btree->u, btree->n_used_nodes * 12); - btree->internal = 1; + btree->flags |= BP_internal; btree->n_free_nodes = 11; btree->n_used_nodes = 1; btree->first_free = cpu_to_le16((char *)&(btree->u.internal[1]) - (char *)btree); @@ -184,7 +184,10 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi hpfs_free_sectors(s, ra, 1); if ((anode = hpfs_map_anode(s, na, &bh))) { anode->up = cpu_to_le32(up); - anode->btree.fnode_parent = up == node && fnod; + if (up == node && fnod) + anode->btree.flags |= BP_fnode_parent; + else + anode->btree.flags &= ~BP_fnode_parent; mark_buffer_dirty(bh); brelse(bh); } @@ -198,7 +201,7 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi if ((new_anode = hpfs_alloc_anode(s, a, &na, &bh))) { anode = new_anode; /*anode->up = cpu_to_le32(up != -1 ? up : ra);*/ - anode->btree.internal = 1; + anode->btree.flags |= BP_internal; anode->btree.n_used_nodes = 1; anode->btree.n_free_nodes = 59; anode->btree.first_free = cpu_to_le16(16); @@ -215,7 +218,8 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi } if ((anode = hpfs_map_anode(s, na, &bh))) { anode->up = cpu_to_le32(node); - if (fnod) anode->btree.fnode_parent = 1; + if (fnod) + anode->btree.flags |= BP_fnode_parent; mark_buffer_dirty(bh); brelse(bh); } @@ -234,18 +238,19 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi } ranode->up = cpu_to_le32(node); memcpy(&ranode->btree, btree, le16_to_cpu(btree->first_free)); - if (fnod) ranode->btree.fnode_parent = 1; - ranode->btree.n_free_nodes = (ranode->btree.internal ? 60 : 40) - ranode->btree.n_used_nodes; - if (ranode->btree.internal) for (n = 0; n < ranode->btree.n_used_nodes; n++) { + if (fnod) + ranode->btree.flags |= BP_fnode_parent; + ranode->btree.n_free_nodes = (bp_internal(&ranode->btree) ? 60 : 40) - ranode->btree.n_used_nodes; + if (bp_internal(&ranode->btree)) for (n = 0; n < ranode->btree.n_used_nodes; n++) { struct anode *unode; if ((unode = hpfs_map_anode(s, le32_to_cpu(ranode->u.internal[n].down), &bh1))) { unode->up = cpu_to_le32(ra); - unode->btree.fnode_parent = 0; + unode->btree.flags &= ~BP_fnode_parent; mark_buffer_dirty(bh1); brelse(bh1); } } - btree->internal = 1; + btree->flags |= BP_internal; btree->n_free_nodes = fnod ? 10 : 58; btree->n_used_nodes = 2; btree->first_free = cpu_to_le16((char *)&btree->u.internal[2] - (char *)btree); @@ -278,7 +283,7 @@ void hpfs_remove_btree(struct super_block *s, struct bplus_header *btree) int d1, d2; go_down: d2 = 0; - while (btree1->internal) { + while (bp_internal(btree1)) { ano = le32_to_cpu(btree1->u.internal[pos].down); if (level) brelse(bh); if (hpfs_sb(s)->sb_chk) @@ -412,13 +417,13 @@ void hpfs_truncate_btree(struct super_block *s, secno f, int fno, unsigned secs) btree->n_free_nodes = 8; btree->n_used_nodes = 0; btree->first_free = cpu_to_le16(8); - btree->internal = 0; + btree->flags &= ~BP_internal; mark_buffer_dirty(bh); } else hpfs_free_sectors(s, f, 1); brelse(bh); return; } - while (btree->internal) { + while (bp_internal(btree)) { nodes = btree->n_used_nodes + btree->n_free_nodes; for (i = 0; i < btree->n_used_nodes; i++) if (le32_to_cpu(btree->u.internal[i].file_secno) >= secs) goto f; @@ -479,13 +484,13 @@ void hpfs_remove_fnode(struct super_block *s, fnode_secno fno) struct extended_attribute *ea; struct extended_attribute *ea_end; if (!(fnode = hpfs_map_fnode(s, fno, &bh))) return; - if (!fnode->dirflag) hpfs_remove_btree(s, &fnode->btree); + if (!fnode_is_dir(fnode)) hpfs_remove_btree(s, &fnode->btree); else hpfs_remove_dtree(s, le32_to_cpu(fnode->u.external[0].disk_secno)); ea_end = fnode_end_ea(fnode); for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea)) - if (ea->indirect) - hpfs_ea_remove(s, ea_sec(ea), ea->anode, ea_len(ea)); - hpfs_ea_ext_remove(s, le32_to_cpu(fnode->ea_secno), fnode->ea_anode, le32_to_cpu(fnode->ea_size_l)); + if (ea_indirect(ea)) + hpfs_ea_remove(s, ea_sec(ea), ea_in_anode(ea), ea_len(ea)); + hpfs_ea_ext_remove(s, le32_to_cpu(fnode->ea_secno), fnode_in_anode(fnode), le32_to_cpu(fnode->ea_size_l)); brelse(bh); hpfs_free_sectors(s, fno, 1); } diff --git a/fs/hpfs/dir.c b/fs/hpfs/dir.c index 2fa0089a02a8..b8472f803f4e 100644 --- a/fs/hpfs/dir.c +++ b/fs/hpfs/dir.c @@ -87,7 +87,7 @@ static int hpfs_readdir(struct file *filp, void *dirent, filldir_t filldir) ret = -EIOERROR; goto out; } - if (!fno->dirflag) { + if (!fnode_is_dir(fno)) { e = 1; hpfs_error(inode->i_sb, "not a directory, fnode %08lx", (unsigned long)inode->i_ino); diff --git a/fs/hpfs/dnode.c b/fs/hpfs/dnode.c index 1e0e2ac30fd3..3228c524ebe5 100644 --- a/fs/hpfs/dnode.c +++ b/fs/hpfs/dnode.c @@ -153,7 +153,7 @@ static void set_last_pointer(struct super_block *s, struct dnode *d, dnode_secno } de->length = cpu_to_le16(36); de->down = 1; - *(dnode_secno *)((char *)de + 32) = cpu_to_le32(ptr); + *(__le32 *)((char *)de + 32) = cpu_to_le32(ptr); } } @@ -177,7 +177,7 @@ struct hpfs_dirent *hpfs_add_de(struct super_block *s, struct dnode *d, memmove((char *)de + d_size, de, (char *)de_end - (char *)de); memset(de, 0, d_size); if (down_ptr) { - *(dnode_secno *)((char *)de + d_size - 4) = cpu_to_le32(down_ptr); + *(__le32 *)((char *)de + d_size - 4) = cpu_to_le32(down_ptr); de->down = 1; } de->length = cpu_to_le16(d_size); @@ -656,7 +656,7 @@ static void delete_empty_dnode(struct inode *i, dnode_secno dno) del->down = 0; d1->first_free = cpu_to_le32(le32_to_cpu(d1->first_free) - 4); } else if (down) - *(dnode_secno *) ((void *) del + le16_to_cpu(del->length) - 4) = cpu_to_le32(down); + *(__le32 *) ((void *) del + le16_to_cpu(del->length) - 4) = cpu_to_le32(down); } else goto endm; if (!(de_cp = kmalloc(le16_to_cpu(de_prev->length), GFP_NOFS))) { printk("HPFS: out of memory for dtree balancing\n"); @@ -672,7 +672,7 @@ static void delete_empty_dnode(struct inode *i, dnode_secno dno) de_prev->down = 1; dnode->first_free = cpu_to_le32(le32_to_cpu(dnode->first_free) + 4); } - *(dnode_secno *) ((void *) de_prev + le16_to_cpu(de_prev->length) - 4) = cpu_to_le32(ndown); + *(__le32 *) ((void *) de_prev + le16_to_cpu(de_prev->length) - 4) = cpu_to_le32(ndown); hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); for_all_poss(i, hpfs_pos_subst, ((loff_t)up << 4) | (p - 1), 4); @@ -1015,7 +1015,7 @@ struct hpfs_dirent *map_fnode_dirent(struct super_block *s, fnode_secno fno, kfree(name2); return NULL; } - if (!upf->dirflag) { + if (!fnode_is_dir(upf)) { brelse(bh); hpfs_error(s, "fnode %08x has non-directory parent %08x", fno, le32_to_cpu(f->up)); kfree(name2); diff --git a/fs/hpfs/ea.c b/fs/hpfs/ea.c index d8b84d113c89..bcaafcd2666a 100644 --- a/fs/hpfs/ea.c +++ b/fs/hpfs/ea.c @@ -23,15 +23,15 @@ void hpfs_ea_ext_remove(struct super_block *s, secno a, int ano, unsigned len) return; } if (hpfs_ea_read(s, a, ano, pos, 4, ex)) return; - if (ea->indirect) { + if (ea_indirect(ea)) { if (ea_valuelen(ea) != 8) { - hpfs_error(s, "ea->indirect set while ea->valuelen!=8, %s %08x, pos %08x", + hpfs_error(s, "ea_indirect(ea) set while ea->valuelen!=8, %s %08x, pos %08x", ano ? "anode" : "sectors", a, pos); return; } if (hpfs_ea_read(s, a, ano, pos + 4, ea->namelen + 9, ex+4)) return; - hpfs_ea_remove(s, ea_sec(ea), ea->anode, ea_len(ea)); + hpfs_ea_remove(s, ea_sec(ea), ea_in_anode(ea), ea_len(ea)); } pos += ea->namelen + ea_valuelen(ea) + 5; } @@ -81,7 +81,7 @@ int hpfs_read_ea(struct super_block *s, struct fnode *fnode, char *key, struct extended_attribute *ea_end = fnode_end_ea(fnode); for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea)) if (!strcmp(ea->name, key)) { - if (ea->indirect) + if (ea_indirect(ea)) goto indirect; if (ea_valuelen(ea) >= size) return -EINVAL; @@ -91,7 +91,7 @@ int hpfs_read_ea(struct super_block *s, struct fnode *fnode, char *key, } a = le32_to_cpu(fnode->ea_secno); len = le32_to_cpu(fnode->ea_size_l); - ano = fnode->ea_anode; + ano = fnode_in_anode(fnode); pos = 0; while (pos < len) { ea = (struct extended_attribute *)ex; @@ -101,10 +101,10 @@ int hpfs_read_ea(struct super_block *s, struct fnode *fnode, char *key, return -EIO; } if (hpfs_ea_read(s, a, ano, pos, 4, ex)) return -EIO; - if (hpfs_ea_read(s, a, ano, pos + 4, ea->namelen + 1 + (ea->indirect ? 8 : 0), ex + 4)) + if (hpfs_ea_read(s, a, ano, pos + 4, ea->namelen + 1 + (ea_indirect(ea) ? 8 : 0), ex + 4)) return -EIO; if (!strcmp(ea->name, key)) { - if (ea->indirect) + if (ea_indirect(ea)) goto indirect; if (ea_valuelen(ea) >= size) return -EINVAL; @@ -119,7 +119,7 @@ int hpfs_read_ea(struct super_block *s, struct fnode *fnode, char *key, indirect: if (ea_len(ea) >= size) return -EINVAL; - if (hpfs_ea_read(s, ea_sec(ea), ea->anode, 0, ea_len(ea), buf)) + if (hpfs_ea_read(s, ea_sec(ea), ea_in_anode(ea), 0, ea_len(ea), buf)) return -EIO; buf[ea_len(ea)] = 0; return 0; @@ -136,8 +136,8 @@ char *hpfs_get_ea(struct super_block *s, struct fnode *fnode, char *key, int *si struct extended_attribute *ea_end = fnode_end_ea(fnode); for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea)) if (!strcmp(ea->name, key)) { - if (ea->indirect) - return get_indirect_ea(s, ea->anode, ea_sec(ea), *size = ea_len(ea)); + if (ea_indirect(ea)) + return get_indirect_ea(s, ea_in_anode(ea), ea_sec(ea), *size = ea_len(ea)); if (!(ret = kmalloc((*size = ea_valuelen(ea)) + 1, GFP_NOFS))) { printk("HPFS: out of memory for EA\n"); return NULL; @@ -148,7 +148,7 @@ char *hpfs_get_ea(struct super_block *s, struct fnode *fnode, char *key, int *si } a = le32_to_cpu(fnode->ea_secno); len = le32_to_cpu(fnode->ea_size_l); - ano = fnode->ea_anode; + ano = fnode_in_anode(fnode); pos = 0; while (pos < len) { char ex[4 + 255 + 1 + 8]; @@ -159,11 +159,11 @@ char *hpfs_get_ea(struct super_block *s, struct fnode *fnode, char *key, int *si return NULL; } if (hpfs_ea_read(s, a, ano, pos, 4, ex)) return NULL; - if (hpfs_ea_read(s, a, ano, pos + 4, ea->namelen + 1 + (ea->indirect ? 8 : 0), ex + 4)) + if (hpfs_ea_read(s, a, ano, pos + 4, ea->namelen + 1 + (ea_indirect(ea) ? 8 : 0), ex + 4)) return NULL; if (!strcmp(ea->name, key)) { - if (ea->indirect) - return get_indirect_ea(s, ea->anode, ea_sec(ea), *size = ea_len(ea)); + if (ea_indirect(ea)) + return get_indirect_ea(s, ea_in_anode(ea), ea_sec(ea), *size = ea_len(ea)); if (!(ret = kmalloc((*size = ea_valuelen(ea)) + 1, GFP_NOFS))) { printk("HPFS: out of memory for EA\n"); return NULL; @@ -199,9 +199,9 @@ void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key, struct extended_attribute *ea_end = fnode_end_ea(fnode); for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea)) if (!strcmp(ea->name, key)) { - if (ea->indirect) { + if (ea_indirect(ea)) { if (ea_len(ea) == size) - set_indirect_ea(s, ea->anode, ea_sec(ea), data, size); + set_indirect_ea(s, ea_in_anode(ea), ea_sec(ea), data, size); } else if (ea_valuelen(ea) == size) { memcpy(ea_data(ea), data, size); } @@ -209,7 +209,7 @@ void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key, } a = le32_to_cpu(fnode->ea_secno); len = le32_to_cpu(fnode->ea_size_l); - ano = fnode->ea_anode; + ano = fnode_in_anode(fnode); pos = 0; while (pos < len) { char ex[4 + 255 + 1 + 8]; @@ -220,12 +220,12 @@ void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key, return; } if (hpfs_ea_read(s, a, ano, pos, 4, ex)) return; - if (hpfs_ea_read(s, a, ano, pos + 4, ea->namelen + 1 + (ea->indirect ? 8 : 0), ex + 4)) + if (hpfs_ea_read(s, a, ano, pos + 4, ea->namelen + 1 + (ea_indirect(ea) ? 8 : 0), ex + 4)) return; if (!strcmp(ea->name, key)) { - if (ea->indirect) { + if (ea_indirect(ea)) { if (ea_len(ea) == size) - set_indirect_ea(s, ea->anode, ea_sec(ea), data, size); + set_indirect_ea(s, ea_in_anode(ea), ea_sec(ea), data, size); } else { if (ea_valuelen(ea) == size) @@ -246,7 +246,7 @@ void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key, if (le16_to_cpu(fnode->ea_offs) < 0xc4 || le16_to_cpu(fnode->ea_offs) + le16_to_cpu(fnode->acl_size_s) + le16_to_cpu(fnode->ea_size_s) > 0x200) { hpfs_error(s, "fnode %08lx: ea_offs == %03x, ea_size_s == %03x", (unsigned long)inode->i_ino, - le32_to_cpu(fnode->ea_offs), le16_to_cpu(fnode->ea_size_s)); + le16_to_cpu(fnode->ea_offs), le16_to_cpu(fnode->ea_size_s)); return; } if ((le16_to_cpu(fnode->ea_size_s) || !le32_to_cpu(fnode->ea_size_l)) && @@ -276,7 +276,7 @@ void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key, fnode->ea_size_l = cpu_to_le32(le16_to_cpu(fnode->ea_size_s)); fnode->ea_size_s = cpu_to_le16(0); fnode->ea_secno = cpu_to_le32(n); - fnode->ea_anode = cpu_to_le32(0); + fnode->flags &= ~FNODE_anode; mark_buffer_dirty(bh); brelse(bh); } @@ -288,9 +288,9 @@ void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key, secno q = hpfs_alloc_sector(s, fno, 1, 0); if (!q) goto bail; fnode->ea_secno = cpu_to_le32(q); - fnode->ea_anode = 0; + fnode->flags &= ~FNODE_anode; len++; - } else if (!fnode->ea_anode) { + } else if (!fnode_in_anode(fnode)) { if (hpfs_alloc_if_possible(s, le32_to_cpu(fnode->ea_secno) + len)) { len++; } else { @@ -310,7 +310,7 @@ void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key, anode->u.external[0].length = cpu_to_le32(len); mark_buffer_dirty(bh); brelse(bh); - fnode->ea_anode = 1; + fnode->flags |= FNODE_anode; fnode->ea_secno = cpu_to_le32(a_s);*/ secno new_sec; int i; @@ -338,7 +338,7 @@ void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key, len = (pos + 511) >> 9; } } - if (fnode->ea_anode) { + if (fnode_in_anode(fnode)) { if (hpfs_add_sector_to_btree(s, le32_to_cpu(fnode->ea_secno), 0, len) != -1) { len++; @@ -351,16 +351,16 @@ void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key, h[1] = strlen(key); h[2] = size & 0xff; h[3] = size >> 8; - if (hpfs_ea_write(s, le32_to_cpu(fnode->ea_secno), fnode->ea_anode, le32_to_cpu(fnode->ea_size_l), 4, h)) goto bail; - if (hpfs_ea_write(s, le32_to_cpu(fnode->ea_secno), fnode->ea_anode, le32_to_cpu(fnode->ea_size_l) + 4, h[1] + 1, key)) goto bail; - if (hpfs_ea_write(s, le32_to_cpu(fnode->ea_secno), fnode->ea_anode, le32_to_cpu(fnode->ea_size_l) + 5 + h[1], size, data)) goto bail; + if (hpfs_ea_write(s, le32_to_cpu(fnode->ea_secno), fnode_in_anode(fnode), le32_to_cpu(fnode->ea_size_l), 4, h)) goto bail; + if (hpfs_ea_write(s, le32_to_cpu(fnode->ea_secno), fnode_in_anode(fnode), le32_to_cpu(fnode->ea_size_l) + 4, h[1] + 1, key)) goto bail; + if (hpfs_ea_write(s, le32_to_cpu(fnode->ea_secno), fnode_in_anode(fnode), le32_to_cpu(fnode->ea_size_l) + 5 + h[1], size, data)) goto bail; fnode->ea_size_l = cpu_to_le32(pos); ret: hpfs_i(inode)->i_ea_size += 5 + strlen(key) + size; return; bail: if (le32_to_cpu(fnode->ea_secno)) - if (fnode->ea_anode) hpfs_truncate_btree(s, le32_to_cpu(fnode->ea_secno), 1, (le32_to_cpu(fnode->ea_size_l) + 511) >> 9); + if (fnode_in_anode(fnode)) hpfs_truncate_btree(s, le32_to_cpu(fnode->ea_secno), 1, (le32_to_cpu(fnode->ea_size_l) + 511) >> 9); else hpfs_free_sectors(s, le32_to_cpu(fnode->ea_secno) + ((le32_to_cpu(fnode->ea_size_l) + 511) >> 9), len - ((le32_to_cpu(fnode->ea_size_l) + 511) >> 9)); else fnode->ea_secno = fnode->ea_size_l = cpu_to_le32(0); } diff --git a/fs/hpfs/hpfs.h b/fs/hpfs/hpfs.h index 8b0650aae328..cce025aff1b1 100644 --- a/fs/hpfs/hpfs.h +++ b/fs/hpfs/hpfs.h @@ -51,11 +51,11 @@ struct hpfs_boot_block u8 n_rootdir_entries[2]; u8 n_sectors_s[2]; u8 media_byte; - u16 sectors_per_fat; - u16 sectors_per_track; - u16 heads_per_cyl; - u32 n_hidden_sectors; - u32 n_sectors_l; /* size of partition */ + __le16 sectors_per_fat; + __le16 sectors_per_track; + __le16 heads_per_cyl; + __le32 n_hidden_sectors; + __le32 n_sectors_l; /* size of partition */ u8 drive_number; u8 mbz; u8 sig_28h; /* 28h */ @@ -63,7 +63,7 @@ struct hpfs_boot_block u8 vol_label[11]; u8 sig_hpfs[8]; /* "HPFS " */ u8 pad[448]; - u16 magic; /* aa55 */ + __le16 magic; /* aa55 */ }; @@ -75,28 +75,28 @@ struct hpfs_boot_block struct hpfs_super_block { - u32 magic; /* f995 e849 */ - u32 magic1; /* fa53 e9c5, more magic? */ + __le32 magic; /* f995 e849 */ + __le32 magic1; /* fa53 e9c5, more magic? */ u8 version; /* version of a filesystem usually 2 */ u8 funcversion; /* functional version - oldest version of filesystem that can understand this disk */ - u16 zero; /* 0 */ - fnode_secno root; /* fnode of root directory */ - secno n_sectors; /* size of filesystem */ - u32 n_badblocks; /* number of bad blocks */ - secno bitmaps; /* pointers to free space bit maps */ - u32 zero1; /* 0 */ - secno badblocks; /* bad block list */ - u32 zero3; /* 0 */ - time32_t last_chkdsk; /* date last checked, 0 if never */ - time32_t last_optimize; /* date last optimized, 0 if never */ - secno n_dir_band; /* number of sectors in dir band */ - secno dir_band_start; /* first sector in dir band */ - secno dir_band_end; /* last sector in dir band */ - secno dir_band_bitmap; /* free space map, 1 dnode per bit */ + __le16 zero; /* 0 */ + __le32 root; /* fnode of root directory */ + __le32 n_sectors; /* size of filesystem */ + __le32 n_badblocks; /* number of bad blocks */ + __le32 bitmaps; /* pointers to free space bit maps */ + __le32 zero1; /* 0 */ + __le32 badblocks; /* bad block list */ + __le32 zero3; /* 0 */ + __le32 last_chkdsk; /* date last checked, 0 if never */ + __le32 last_optimize; /* date last optimized, 0 if never */ + __le32 n_dir_band; /* number of sectors in dir band */ + __le32 dir_band_start; /* first sector in dir band */ + __le32 dir_band_end; /* last sector in dir band */ + __le32 dir_band_bitmap; /* free space map, 1 dnode per bit */ u8 volume_name[32]; /* not used */ - secno user_id_table; /* 8 preallocated sectors - user id */ + __le32 user_id_table; /* 8 preallocated sectors - user id */ u32 zero6[103]; /* 0 */ }; @@ -109,8 +109,8 @@ struct hpfs_super_block struct hpfs_spare_block { - u32 magic; /* f991 1849 */ - u32 magic1; /* fa52 29c5, more magic? */ + __le32 magic; /* f991 1849 */ + __le32 magic1; /* fa52 29c5, more magic? */ #ifdef __LITTLE_ENDIAN u8 dirty: 1; /* 0 clean, 1 "improperly stopped" */ @@ -153,21 +153,21 @@ struct hpfs_spare_block u8 mm_contlgulty; u8 unused; - secno hotfix_map; /* info about remapped bad sectors */ - u32 n_spares_used; /* number of hotfixes */ - u32 n_spares; /* number of spares in hotfix map */ - u32 n_dnode_spares_free; /* spare dnodes unused */ - u32 n_dnode_spares; /* length of spare_dnodes[] list, + __le32 hotfix_map; /* info about remapped bad sectors */ + __le32 n_spares_used; /* number of hotfixes */ + __le32 n_spares; /* number of spares in hotfix map */ + __le32 n_dnode_spares_free; /* spare dnodes unused */ + __le32 n_dnode_spares; /* length of spare_dnodes[] list, follows in this block*/ - secno code_page_dir; /* code page directory block */ - u32 n_code_pages; /* number of code pages */ - u32 super_crc; /* on HPFS386 and LAN Server this is + __le32 code_page_dir; /* code page directory block */ + __le32 n_code_pages; /* number of code pages */ + __le32 super_crc; /* on HPFS386 and LAN Server this is checksum of superblock, on normal OS/2 unused */ - u32 spare_crc; /* on HPFS386 checksum of spareblock */ - u32 zero1[15]; /* unused */ - dnode_secno spare_dnodes[100]; /* emergency free dnode list */ - u32 zero2[1]; /* room for more? */ + __le32 spare_crc; /* on HPFS386 checksum of spareblock */ + __le32 zero1[15]; /* unused */ + __le32 spare_dnodes[100]; /* emergency free dnode list */ + __le32 zero2[1]; /* room for more? */ }; /* The bad block list is 4 sectors long. The first word must be zero, @@ -202,18 +202,18 @@ struct hpfs_spare_block struct code_page_directory { - u32 magic; /* 4945 21f7 */ - u32 n_code_pages; /* number of pointers following */ - u32 zero1[2]; + __le32 magic; /* 4945 21f7 */ + __le32 n_code_pages; /* number of pointers following */ + __le32 zero1[2]; struct { - u16 ix; /* index */ - u16 code_page_number; /* code page number */ - u32 bounds; /* matches corresponding word + __le16 ix; /* index */ + __le16 code_page_number; /* code page number */ + __le32 bounds; /* matches corresponding word in data block */ - secno code_page_data; /* sector number of a code_page_data + __le32 code_page_data; /* sector number of a code_page_data containing c.p. array */ - u16 index; /* index in c.p. array in that sector*/ - u16 unknown; /* some unknown value; usually 0; + __le16 index; /* index in c.p. array in that sector*/ + __le16 unknown; /* some unknown value; usually 0; 2 in Japanese version */ } array[31]; /* unknown length */ }; @@ -224,19 +224,19 @@ struct code_page_directory struct code_page_data { - u32 magic; /* 8945 21f7 */ - u32 n_used; /* # elements used in c_p_data[] */ - u32 bounds[3]; /* looks a bit like + __le32 magic; /* 8945 21f7 */ + __le32 n_used; /* # elements used in c_p_data[] */ + __le32 bounds[3]; /* looks a bit like (beg1,end1), (beg2,end2) one byte each */ - u16 offs[3]; /* offsets from start of sector + __le16 offs[3]; /* offsets from start of sector to start of c_p_data[ix] */ struct { - u16 ix; /* index */ - u16 code_page_number; /* code page number */ - u16 unknown; /* the same as in cp directory */ + __le16 ix; /* index */ + __le16 code_page_number; /* code page number */ + __le16 unknown; /* the same as in cp directory */ u8 map[128]; /* upcase table for chars 80..ff */ - u16 zero2; + __le16 zero2; } code_page[3]; u8 incognita[78]; }; @@ -278,8 +278,8 @@ struct code_page_data #define DNODE_MAGIC 0x77e40aae struct dnode { - u32 magic; /* 77e4 0aae */ - u32 first_free; /* offset from start of dnode to + __le32 magic; /* 77e4 0aae */ + __le32 first_free; /* offset from start of dnode to first free dir entry */ #ifdef __LITTLE_ENDIAN u8 root_dnode: 1; /* Is it root dnode? */ @@ -293,14 +293,14 @@ struct dnode { u8 root_dnode: 1; /* Is it root dnode? */ #endif u8 increment_me2[3]; - secno up; /* (root dnode) directory's fnode + __le32 up; /* (root dnode) directory's fnode (nonroot) parent dnode */ - dnode_secno self; /* pointer to this dnode */ + __le32 self; /* pointer to this dnode */ u8 dirent[2028]; /* one or more dirents */ }; struct hpfs_dirent { - u16 length; /* offset to next dirent */ + __le16 length; /* offset to next dirent */ #ifdef __LITTLE_ENDIAN u8 first: 1; /* set on phony ^A^A (".") entry */ @@ -346,12 +346,12 @@ struct hpfs_dirent { u8 read_only: 1; /* dos attrib */ #endif - fnode_secno fnode; /* fnode giving allocation info */ - time32_t write_date; /* mtime */ - u32 file_size; /* file length, bytes */ - time32_t read_date; /* atime */ - time32_t creation_date; /* ctime */ - u32 ea_size; /* total EA length, bytes */ + __le32 fnode; /* fnode giving allocation info */ + __le32 write_date; /* mtime */ + __le32 file_size; /* file length, bytes */ + __le32 read_date; /* atime */ + __le32 creation_date; /* ctime */ + __le32 ea_size; /* total EA length, bytes */ u8 no_of_acls; /* number of ACL's (low 3 bits) */ u8 ix; /* code page index (of filename), see struct code_page_data */ @@ -375,50 +375,36 @@ struct hpfs_dirent { struct bplus_leaf_node { - u32 file_secno; /* first file sector in extent */ - u32 length; /* length, sectors */ - secno disk_secno; /* first corresponding disk sector */ + __le32 file_secno; /* first file sector in extent */ + __le32 length; /* length, sectors */ + __le32 disk_secno; /* first corresponding disk sector */ }; struct bplus_internal_node { - u32 file_secno; /* subtree maps sectors < this */ - anode_secno down; /* pointer to subtree */ + __le32 file_secno; /* subtree maps sectors < this */ + __le32 down; /* pointer to subtree */ }; +enum { + BP_hbff = 1, + BP_fnode_parent = 0x20, + BP_binary_search = 0x40, + BP_internal = 0x80 +}; struct bplus_header { -#ifdef __LITTLE_ENDIAN - u8 hbff: 1; /* high bit of first free entry offset */ - u8 flag1234: 4; - u8 fnode_parent: 1; /* ? we're pointed to by an fnode, - the data btree or some ea or the - main ea bootage pointer ea_secno */ - /* also can get set in fnodes, which - may be a chkdsk glitch or may mean - this bit is irrelevant in fnodes, - or this interpretation is all wet */ - u8 binary_search: 1; /* suggest binary search (unused) */ - u8 internal: 1; /* 1 -> (internal) tree of anodes - 0 -> (leaf) list of extents */ -#else - u8 internal: 1; /* 1 -> (internal) tree of anodes - 0 -> (leaf) list of extents */ - u8 binary_search: 1; /* suggest binary search (unused) */ - u8 fnode_parent: 1; /* ? we're pointed to by an fnode, + u8 flags; /* bit 0 - high bit of first free entry offset + bit 5 - we're pointed to by an fnode, the data btree or some ea or the - main ea bootage pointer ea_secno */ - /* also can get set in fnodes, which - may be a chkdsk glitch or may mean - this bit is irrelevant in fnodes, - or this interpretation is all wet */ - u8 flag1234: 4; - u8 hbff: 1; /* high bit of first free entry offset */ -#endif + main ea bootage pointer ea_secno + bit 6 - suggest binary search (unused) + bit 7 - 1 -> (internal) tree of anodes + 0 -> (leaf) list of extents */ u8 fill[3]; u8 n_free_nodes; /* free nodes in following array */ u8 n_used_nodes; /* used nodes in following array */ - u16 first_free; /* offset from start of header to + __le16 first_free; /* offset from start of header to first free node in array */ union { struct bplus_internal_node internal[0]; /* (internal) 2-word entries giving @@ -428,6 +414,16 @@ struct bplus_header } u; }; +static inline bool bp_internal(struct bplus_header *bp) +{ + return bp->flags & BP_internal; +} + +static inline bool bp_fnode_parent(struct bplus_header *bp) +{ + return bp->flags & BP_fnode_parent; +} + /* fnode: root of allocation b+ tree, and EA's */ /* Every file and every directory has one fnode, pointed to by the directory @@ -436,62 +432,56 @@ struct bplus_header #define FNODE_MAGIC 0xf7e40aae +enum {FNODE_anode = cpu_to_le16(2), FNODE_dir = cpu_to_le16(256)}; struct fnode { - u32 magic; /* f7e4 0aae */ - u32 zero1[2]; /* read history */ + __le32 magic; /* f7e4 0aae */ + __le32 zero1[2]; /* read history */ u8 len, name[15]; /* true length, truncated name */ - fnode_secno up; /* pointer to file's directory fnode */ - secno acl_size_l; - secno acl_secno; - u16 acl_size_s; + __le32 up; /* pointer to file's directory fnode */ + __le32 acl_size_l; + __le32 acl_secno; + __le16 acl_size_s; u8 acl_anode; u8 zero2; /* history bit count */ - u32 ea_size_l; /* length of disk-resident ea's */ - secno ea_secno; /* first sector of disk-resident ea's*/ - u16 ea_size_s; /* length of fnode-resident ea's */ - -#ifdef __LITTLE_ENDIAN - u8 flag0: 1; - u8 ea_anode: 1; /* 1 -> ea_secno is an anode */ - u8 flag234567: 6; -#else - u8 flag234567: 6; - u8 ea_anode: 1; /* 1 -> ea_secno is an anode */ - u8 flag0: 1; -#endif + __le32 ea_size_l; /* length of disk-resident ea's */ + __le32 ea_secno; /* first sector of disk-resident ea's*/ + __le16 ea_size_s; /* length of fnode-resident ea's */ -#ifdef __LITTLE_ENDIAN - u8 dirflag: 1; /* 1 -> directory. first & only extent - points to dnode. */ - u8 flag9012345: 7; -#else - u8 flag9012345: 7; - u8 dirflag: 1; /* 1 -> directory. first & only extent + __le16 flags; /* bit 1 set -> ea_secno is an anode */ + /* bit 8 set -> directory. first & only extent points to dnode. */ -#endif - struct bplus_header btree; /* b+ tree, 8 extents or 12 subtrees */ union { struct bplus_leaf_node external[8]; struct bplus_internal_node internal[12]; } u; - u32 file_size; /* file length, bytes */ - u32 n_needea; /* number of EA's with NEEDEA set */ + __le32 file_size; /* file length, bytes */ + __le32 n_needea; /* number of EA's with NEEDEA set */ u8 user_id[16]; /* unused */ - u16 ea_offs; /* offset from start of fnode + __le16 ea_offs; /* offset from start of fnode to first fnode-resident ea */ u8 dasd_limit_treshhold; u8 dasd_limit_delta; - u32 dasd_limit; - u32 dasd_usage; + __le32 dasd_limit; + __le32 dasd_usage; u8 ea[316]; /* zero or more EA's, packed together with no alignment padding. (Do not use this name, get here via fnode + ea_offs. I think.) */ }; +static inline bool fnode_in_anode(struct fnode *p) +{ + return (p->flags & FNODE_anode) != 0; +} + +static inline bool fnode_is_dir(struct fnode *p) +{ + return (p->flags & FNODE_dir) != 0; +} + /* anode: 99.44% pure allocation tree */ @@ -499,9 +489,9 @@ struct fnode struct anode { - u32 magic; /* 37e4 0aae */ - anode_secno self; /* pointer to this anode */ - secno up; /* parent anode or fnode */ + __le32 magic; /* 37e4 0aae */ + __le32 self; /* pointer to this anode */ + __le32 up; /* parent anode or fnode */ struct bplus_header btree; /* b+tree, 40 extents or 60 subtrees */ union { @@ -509,7 +499,7 @@ struct anode struct bplus_internal_node internal[60]; } u; - u32 fill[3]; /* unused */ + __le32 fill[3]; /* unused */ }; @@ -528,32 +518,23 @@ struct anode run, or in multiple runs. Flags in the fnode tell whether the EA list is immediate, in a single run, or in multiple runs. */ +enum {EA_indirect = 1, EA_anode = 2, EA_needea = 128 }; struct extended_attribute { -#ifdef __LITTLE_ENDIAN - u8 indirect: 1; /* 1 -> value gives sector number + u8 flags; /* bit 0 set -> value gives sector number where real value starts */ - u8 anode: 1; /* 1 -> sector is an anode + /* bit 1 set -> sector is an anode that points to fragmented value */ - u8 flag23456: 5; - u8 needea: 1; /* required ea */ -#else - u8 needea: 1; /* required ea */ - u8 flag23456: 5; - u8 anode: 1; /* 1 -> sector is an anode - that points to fragmented value */ - u8 indirect: 1; /* 1 -> value gives sector number - where real value starts */ -#endif + /* bit 7 set -> required ea */ u8 namelen; /* length of name, bytes */ u8 valuelen_lo; /* length of value, bytes */ u8 valuelen_hi; /* length of value, bytes */ - u8 name[0]; + u8 name[]; /* u8 name[namelen]; ascii attrib name u8 nul; terminating '\0', not counted u8 value[valuelen]; value, arbitrary - if this.indirect, valuelen is 8 and the value is + if this.flags & 1, valuelen is 8 and the value is u32 length; real length of value, bytes secno secno; sector address where it starts if this.anode, the above sector number is the root of an anode tree @@ -561,6 +542,16 @@ struct extended_attribute */ }; +static inline bool ea_indirect(struct extended_attribute *ea) +{ + return ea->flags & EA_indirect; +} + +static inline bool ea_in_anode(struct extended_attribute *ea) +{ + return ea->flags & EA_anode; +} + /* Local Variables: comment-column: 40 diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h index 6d2d5008fa43..c07ef1f1ced6 100644 --- a/fs/hpfs/hpfs_fn.h +++ b/fs/hpfs/hpfs_fn.h @@ -75,7 +75,7 @@ struct hpfs_sb_info { unsigned char *sb_cp_table; /* code page tables: */ /* 128 bytes uppercasing table & */ /* 128 bytes lowercasing table */ - unsigned *sb_bmp_dir; /* main bitmap directory */ + __le32 *sb_bmp_dir; /* main bitmap directory */ unsigned sb_c_bitmap; /* current bitmap */ unsigned sb_max_fwd_alloc; /* max forwad allocation */ int sb_timeshift; @@ -93,7 +93,7 @@ struct quad_buffer_head { static inline dnode_secno de_down_pointer (struct hpfs_dirent *de) { CHKCOND(de->down,("HPFS: de_down_pointer: !de->down\n")); - return le32_to_cpu(*(dnode_secno *) ((void *) de + le16_to_cpu(de->length) - 4)); + return le32_to_cpu(*(__le32 *) ((void *) de + le16_to_cpu(de->length) - 4)); } /* The first dir entry in a dnode */ @@ -141,12 +141,12 @@ static inline struct extended_attribute *next_ea(struct extended_attribute *ea) static inline secno ea_sec(struct extended_attribute *ea) { - return le32_to_cpu(get_unaligned((secno *)((char *)ea + 9 + ea->namelen))); + return le32_to_cpu(get_unaligned((__le32 *)((char *)ea + 9 + ea->namelen))); } static inline secno ea_len(struct extended_attribute *ea) { - return le32_to_cpu(get_unaligned((secno *)((char *)ea + 5 + ea->namelen))); + return le32_to_cpu(get_unaligned((__le32 *)((char *)ea + 5 + ea->namelen))); } static inline char *ea_data(struct extended_attribute *ea) @@ -171,7 +171,7 @@ static inline void copy_de(struct hpfs_dirent *dst, struct hpfs_dirent *src) dst->not_8x3 = n; } -static inline unsigned tstbits(u32 *bmp, unsigned b, unsigned n) +static inline unsigned tstbits(__le32 *bmp, unsigned b, unsigned n) { int i; if ((b >= 0x4000) || (b + n - 1 >= 0x4000)) return n; @@ -268,10 +268,10 @@ void hpfs_evict_inode(struct inode *); /* map.c */ -unsigned *hpfs_map_dnode_bitmap(struct super_block *, struct quad_buffer_head *); -unsigned *hpfs_map_bitmap(struct super_block *, unsigned, struct quad_buffer_head *, char *); +__le32 *hpfs_map_dnode_bitmap(struct super_block *, struct quad_buffer_head *); +__le32 *hpfs_map_bitmap(struct super_block *, unsigned, struct quad_buffer_head *, char *); unsigned char *hpfs_load_code_page(struct super_block *, secno); -secno *hpfs_load_bitmap_directory(struct super_block *, secno bmp); +__le32 *hpfs_load_bitmap_directory(struct super_block *, secno bmp); struct fnode *hpfs_map_fnode(struct super_block *s, ino_t, struct buffer_head **); struct anode *hpfs_map_anode(struct super_block *s, anode_secno, struct buffer_head **); struct dnode *hpfs_map_dnode(struct super_block *s, dnode_secno, struct quad_buffer_head *); diff --git a/fs/hpfs/inode.c b/fs/hpfs/inode.c index b43066cbdc6a..ed671e0ea784 100644 --- a/fs/hpfs/inode.c +++ b/fs/hpfs/inode.c @@ -110,7 +110,7 @@ void hpfs_read_inode(struct inode *i) } } } - if (fnode->dirflag) { + if (fnode_is_dir(fnode)) { int n_dnodes, n_subdirs; i->i_mode |= S_IFDIR; i->i_op = &hpfs_dir_iops; diff --git a/fs/hpfs/map.c b/fs/hpfs/map.c index a790821366a7..4acb19d78359 100644 --- a/fs/hpfs/map.c +++ b/fs/hpfs/map.c @@ -8,12 +8,12 @@ #include "hpfs_fn.h" -unsigned *hpfs_map_dnode_bitmap(struct super_block *s, struct quad_buffer_head *qbh) +__le32 *hpfs_map_dnode_bitmap(struct super_block *s, struct quad_buffer_head *qbh) { return hpfs_map_4sectors(s, hpfs_sb(s)->sb_dmap, qbh, 0); } -unsigned int *hpfs_map_bitmap(struct super_block *s, unsigned bmp_block, +__le32 *hpfs_map_bitmap(struct super_block *s, unsigned bmp_block, struct quad_buffer_head *qbh, char *id) { secno sec; @@ -89,18 +89,18 @@ unsigned char *hpfs_load_code_page(struct super_block *s, secno cps) return cp_table; } -secno *hpfs_load_bitmap_directory(struct super_block *s, secno bmp) +__le32 *hpfs_load_bitmap_directory(struct super_block *s, secno bmp) { struct buffer_head *bh; int n = (hpfs_sb(s)->sb_fs_size + 0x200000 - 1) >> 21; int i; - secno *b; + __le32 *b; if (!(b = kmalloc(n * 512, GFP_KERNEL))) { printk("HPFS: can't allocate memory for bitmap directory\n"); return NULL; } for (i=0;i<n;i++) { - secno *d = hpfs_map_sector(s, bmp+i, &bh, n - i - 1); + __le32 *d = hpfs_map_sector(s, bmp+i, &bh, n - i - 1); if (!d) { kfree(b); return NULL; @@ -130,16 +130,16 @@ struct fnode *hpfs_map_fnode(struct super_block *s, ino_t ino, struct buffer_hea (unsigned long)ino); goto bail; } - if (!fnode->dirflag) { + if (!fnode_is_dir(fnode)) { if ((unsigned)fnode->btree.n_used_nodes + (unsigned)fnode->btree.n_free_nodes != - (fnode->btree.internal ? 12 : 8)) { + (bp_internal(&fnode->btree) ? 12 : 8)) { hpfs_error(s, "bad number of nodes in fnode %08lx", (unsigned long)ino); goto bail; } if (le16_to_cpu(fnode->btree.first_free) != - 8 + fnode->btree.n_used_nodes * (fnode->btree.internal ? 8 : 12)) { + 8 + fnode->btree.n_used_nodes * (bp_internal(&fnode->btree) ? 8 : 12)) { hpfs_error(s, "bad first_free pointer in fnode %08lx", (unsigned long)ino); @@ -187,12 +187,12 @@ struct anode *hpfs_map_anode(struct super_block *s, anode_secno ano, struct buff goto bail; } if ((unsigned)anode->btree.n_used_nodes + (unsigned)anode->btree.n_free_nodes != - (anode->btree.internal ? 60 : 40)) { + (bp_internal(&anode->btree) ? 60 : 40)) { hpfs_error(s, "bad number of nodes in anode %08x", ano); goto bail; } if (le16_to_cpu(anode->btree.first_free) != - 8 + anode->btree.n_used_nodes * (anode->btree.internal ? 8 : 12)) { + 8 + anode->btree.n_used_nodes * (bp_internal(&anode->btree) ? 8 : 12)) { hpfs_error(s, "bad first_free pointer in anode %08x", ano); goto bail; } diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c index 30dd7b10b507..9083ef8af58c 100644 --- a/fs/hpfs/namei.c +++ b/fs/hpfs/namei.c @@ -70,7 +70,7 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) fnode->len = len; memcpy(fnode->name, name, len > 15 ? 15 : len); fnode->up = cpu_to_le32(dir->i_ino); - fnode->dirflag = 1; + fnode->flags |= FNODE_dir; fnode->btree.n_free_nodes = 7; fnode->btree.n_used_nodes = 1; fnode->btree.first_free = cpu_to_le16(0x14); diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c index 54f6eccb79d9..706a12c083ea 100644 --- a/fs/hpfs/super.c +++ b/fs/hpfs/super.c @@ -572,7 +572,7 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent) mark_buffer_dirty(bh2); } - if (le32_to_cpu(spareblock->hotfixes_used) || le32_to_cpu(spareblock->n_spares_used)) { + if (spareblock->hotfixes_used || spareblock->n_spares_used) { if (errs >= 2) { printk("HPFS: Hotfixes not supported here, try chkdsk\n"); mark_dirty(s, 0); @@ -645,7 +645,7 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent) root->i_mtime.tv_nsec = 0; root->i_ctime.tv_sec = local_to_gmt(s, le32_to_cpu(de->creation_date)); root->i_ctime.tv_nsec = 0; - hpfs_i(root)->i_ea_size = le16_to_cpu(de->ea_size); + hpfs_i(root)->i_ea_size = le32_to_cpu(de->ea_size); hpfs_i(root)->i_parent_dir = root->i_ino; if (root->i_size == -1) root->i_size = 2048; diff --git a/fs/inode.c b/fs/inode.c index c474c1d7062b..c99163b1b310 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -1487,10 +1487,30 @@ static int relatime_need_update(struct vfsmount *mnt, struct inode *inode, return 0; } +/* + * This does the actual work of updating an inodes time or version. Must have + * had called mnt_want_write() before calling this. + */ +static int update_time(struct inode *inode, struct timespec *time, int flags) +{ + if (inode->i_op->update_time) + return inode->i_op->update_time(inode, time, flags); + + if (flags & S_ATIME) + inode->i_atime = *time; + if (flags & S_VERSION) + inode_inc_iversion(inode); + if (flags & S_CTIME) + inode->i_ctime = *time; + if (flags & S_MTIME) + inode->i_mtime = *time; + mark_inode_dirty_sync(inode); + return 0; +} + /** * touch_atime - update the access time - * @mnt: mount the inode is accessed on - * @dentry: dentry accessed + * @path: the &struct path to update * * Update the accessed time on an inode and mark it for writeback. * This function automatically handles read only file systems and media, @@ -1525,12 +1545,83 @@ void touch_atime(struct path *path) if (mnt_want_write(mnt)) return; - inode->i_atime = now; - mark_inode_dirty_sync(inode); + /* + * File systems can error out when updating inodes if they need to + * allocate new space to modify an inode (such is the case for + * Btrfs), but since we touch atime while walking down the path we + * really don't care if we failed to update the atime of the file, + * so just ignore the return value. + */ + update_time(inode, &now, S_ATIME); mnt_drop_write(mnt); } EXPORT_SYMBOL(touch_atime); +/* + * The logic we want is + * + * if suid or (sgid and xgrp) + * remove privs + */ +int should_remove_suid(struct dentry *dentry) +{ + umode_t mode = dentry->d_inode->i_mode; + int kill = 0; + + /* suid always must be killed */ + if (unlikely(mode & S_ISUID)) + kill = ATTR_KILL_SUID; + + /* + * sgid without any exec bits is just a mandatory locking mark; leave + * it alone. If some exec bits are set, it's a real sgid; kill it. + */ + if (unlikely((mode & S_ISGID) && (mode & S_IXGRP))) + kill |= ATTR_KILL_SGID; + + if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode))) + return kill; + + return 0; +} +EXPORT_SYMBOL(should_remove_suid); + +static int __remove_suid(struct dentry *dentry, int kill) +{ + struct iattr newattrs; + + newattrs.ia_valid = ATTR_FORCE | kill; + return notify_change(dentry, &newattrs); +} + +int file_remove_suid(struct file *file) +{ + struct dentry *dentry = file->f_path.dentry; + struct inode *inode = dentry->d_inode; + int killsuid; + int killpriv; + int error = 0; + + /* Fast path for nothing security related */ + if (IS_NOSEC(inode)) + return 0; + + killsuid = should_remove_suid(dentry); + killpriv = security_inode_need_killpriv(dentry); + + if (killpriv < 0) + return killpriv; + if (killpriv) + error = security_inode_killpriv(dentry); + if (!error && killsuid) + error = __remove_suid(dentry, killsuid); + if (!error && (inode->i_sb->s_flags & MS_NOSEC)) + inode->i_flags |= S_NOSEC; + + return error; +} +EXPORT_SYMBOL(file_remove_suid); + /** * file_update_time - update mtime and ctime time * @file: file accessed @@ -1540,18 +1631,20 @@ EXPORT_SYMBOL(touch_atime); * usage in the file write path of filesystems, and filesystems may * choose to explicitly ignore update via this function with the * S_NOCMTIME inode flag, e.g. for network filesystem where these - * timestamps are handled by the server. + * timestamps are handled by the server. This can return an error for + * file systems who need to allocate space in order to update an inode. */ -void file_update_time(struct file *file) +int file_update_time(struct file *file) { struct inode *inode = file->f_path.dentry->d_inode; struct timespec now; - enum { S_MTIME = 1, S_CTIME = 2, S_VERSION = 4 } sync_it = 0; + int sync_it = 0; + int ret; /* First try to exhaust all avenues to not sync */ if (IS_NOCMTIME(inode)) - return; + return 0; now = current_fs_time(inode->i_sb); if (!timespec_equal(&inode->i_mtime, &now)) @@ -1564,21 +1657,16 @@ void file_update_time(struct file *file) sync_it |= S_VERSION; if (!sync_it) - return; + return 0; /* Finally allowed to write? Takes lock. */ if (mnt_want_write_file(file)) - return; + return 0; - /* Only change inode inside the lock region */ - if (sync_it & S_VERSION) - inode_inc_iversion(inode); - if (sync_it & S_CTIME) - inode->i_ctime = now; - if (sync_it & S_MTIME) - inode->i_mtime = now; - mark_inode_dirty_sync(inode); + ret = update_time(inode, &now, sync_it); mnt_drop_write_file(file); + + return ret; } EXPORT_SYMBOL(file_update_time); diff --git a/fs/internal.h b/fs/internal.h index 9962c59ba280..18bc216ea09d 100644 --- a/fs/internal.h +++ b/fs/internal.h @@ -56,7 +56,7 @@ extern int sb_prepare_remount_readonly(struct super_block *); extern void __init mnt_init(void); -DECLARE_BRLOCK(vfsmount_lock); +extern struct lglock vfsmount_lock; /* @@ -100,6 +100,7 @@ extern struct file *do_file_open_root(struct dentry *, struct vfsmount *, extern long do_handle_open(int mountdirfd, struct file_handle __user *ufh, int open_flag); +extern int open_check_o_direct(struct file *f); /* * inode.c diff --git a/fs/isofs/export.c b/fs/isofs/export.c index dd4687ff30d0..aa4356d09eee 100644 --- a/fs/isofs/export.c +++ b/fs/isofs/export.c @@ -107,12 +107,11 @@ static struct dentry *isofs_export_get_parent(struct dentry *child) } static int -isofs_export_encode_fh(struct dentry *dentry, +isofs_export_encode_fh(struct inode *inode, __u32 *fh32, int *max_len, - int connectable) + struct inode *parent) { - struct inode * inode = dentry->d_inode; struct iso_inode_info * ei = ISOFS_I(inode); int len = *max_len; int type = 1; @@ -124,7 +123,7 @@ isofs_export_encode_fh(struct dentry *dentry, * offset of the inode and the upper 16 bits of fh32[1] to * hold the offset of the parent. */ - if (connectable && (len < 5)) { + if (parent && (len < 5)) { *max_len = 5; return 255; } else if (len < 3) { @@ -136,16 +135,12 @@ isofs_export_encode_fh(struct dentry *dentry, fh32[0] = ei->i_iget5_block; fh16[2] = (__u16)ei->i_iget5_offset; /* fh16 [sic] */ fh32[2] = inode->i_generation; - if (connectable && !S_ISDIR(inode->i_mode)) { - struct inode *parent; + if (parent) { struct iso_inode_info *eparent; - spin_lock(&dentry->d_lock); - parent = dentry->d_parent->d_inode; eparent = ISOFS_I(parent); fh32[3] = eparent->i_iget5_block; fh16[3] = (__u16)eparent->i_iget5_offset; /* fh16 [sic] */ fh32[4] = parent->i_generation; - spin_unlock(&dentry->d_lock); len = 5; type = 2; } diff --git a/fs/jbd2/Kconfig b/fs/jbd2/Kconfig index f32f346f4b0a..69a48c2944da 100644 --- a/fs/jbd2/Kconfig +++ b/fs/jbd2/Kconfig @@ -1,6 +1,8 @@ config JBD2 tristate select CRC32 + select CRYPTO + select CRYPTO_CRC32C help This is a generic journaling layer for block devices that support both 32-bit and 64-bit block numbers. It is currently used by diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c index 840f70f50792..216f4299f65e 100644 --- a/fs/jbd2/commit.c +++ b/fs/jbd2/commit.c @@ -85,6 +85,24 @@ nope: __brelse(bh); } +static void jbd2_commit_block_csum_set(journal_t *j, + struct journal_head *descriptor) +{ + struct commit_header *h; + __u32 csum; + + if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2)) + return; + + h = (struct commit_header *)(jh2bh(descriptor)->b_data); + h->h_chksum_type = 0; + h->h_chksum_size = 0; + h->h_chksum[0] = 0; + csum = jbd2_chksum(j, j->j_csum_seed, jh2bh(descriptor)->b_data, + j->j_blocksize); + h->h_chksum[0] = cpu_to_be32(csum); +} + /* * Done it all: now submit the commit record. We should have * cleaned up our previous buffers by now, so if we are in abort @@ -128,6 +146,7 @@ static int journal_submit_commit_record(journal_t *journal, tmp->h_chksum_size = JBD2_CRC32_CHKSUM_SIZE; tmp->h_chksum[0] = cpu_to_be32(crc32_sum); } + jbd2_commit_block_csum_set(journal, descriptor); JBUFFER_TRACE(descriptor, "submit commit block"); lock_buffer(bh); @@ -301,6 +320,44 @@ static void write_tag_block(int tag_bytes, journal_block_tag_t *tag, tag->t_blocknr_high = cpu_to_be32((block >> 31) >> 1); } +static void jbd2_descr_block_csum_set(journal_t *j, + struct journal_head *descriptor) +{ + struct jbd2_journal_block_tail *tail; + __u32 csum; + + if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2)) + return; + + tail = (struct jbd2_journal_block_tail *) + (jh2bh(descriptor)->b_data + j->j_blocksize - + sizeof(struct jbd2_journal_block_tail)); + tail->t_checksum = 0; + csum = jbd2_chksum(j, j->j_csum_seed, jh2bh(descriptor)->b_data, + j->j_blocksize); + tail->t_checksum = cpu_to_be32(csum); +} + +static void jbd2_block_tag_csum_set(journal_t *j, journal_block_tag_t *tag, + struct buffer_head *bh, __u32 sequence) +{ + struct page *page = bh->b_page; + __u8 *addr; + __u32 csum; + + if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2)) + return; + + sequence = cpu_to_be32(sequence); + addr = kmap_atomic(page, KM_USER0); + csum = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&sequence, + sizeof(sequence)); + csum = jbd2_chksum(j, csum, addr + offset_in_page(bh->b_data), + bh->b_size); + kunmap_atomic(addr, KM_USER0); + + tag->t_checksum = cpu_to_be32(csum); +} /* * jbd2_journal_commit_transaction * @@ -334,6 +391,10 @@ void jbd2_journal_commit_transaction(journal_t *journal) unsigned long first_block; tid_t first_tid; int update_tail; + int csum_size = 0; + + if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) + csum_size = sizeof(struct jbd2_journal_block_tail); /* * First job: lock down the current transaction and wait for @@ -627,7 +688,9 @@ void jbd2_journal_commit_transaction(journal_t *journal) tag = (journal_block_tag_t *) tagp; write_tag_block(tag_bytes, tag, jh2bh(jh)->b_blocknr); - tag->t_flags = cpu_to_be32(tag_flag); + tag->t_flags = cpu_to_be16(tag_flag); + jbd2_block_tag_csum_set(journal, tag, jh2bh(new_jh), + commit_transaction->t_tid); tagp += tag_bytes; space_left -= tag_bytes; @@ -643,7 +706,7 @@ void jbd2_journal_commit_transaction(journal_t *journal) if (bufs == journal->j_wbufsize || commit_transaction->t_buffers == NULL || - space_left < tag_bytes + 16) { + space_left < tag_bytes + 16 + csum_size) { jbd_debug(4, "JBD2: Submit %d IOs\n", bufs); @@ -651,8 +714,9 @@ void jbd2_journal_commit_transaction(journal_t *journal) submitting the IOs. "tag" still points to the last tag we set up. */ - tag->t_flags |= cpu_to_be32(JBD2_FLAG_LAST_TAG); + tag->t_flags |= cpu_to_be16(JBD2_FLAG_LAST_TAG); + jbd2_descr_block_csum_set(journal, descriptor); start_journal_io: for (i = 0; i < bufs; i++) { struct buffer_head *bh = wbuf[i]; diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index 1afb701622b0..e9a3c4c85594 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -97,6 +97,43 @@ EXPORT_SYMBOL(jbd2_inode_cache); static void __journal_abort_soft (journal_t *journal, int errno); static int jbd2_journal_create_slab(size_t slab_size); +/* Checksumming functions */ +int jbd2_verify_csum_type(journal_t *j, journal_superblock_t *sb) +{ + if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2)) + return 1; + + return sb->s_checksum_type == JBD2_CRC32C_CHKSUM; +} + +static __u32 jbd2_superblock_csum(journal_t *j, journal_superblock_t *sb) +{ + __u32 csum, old_csum; + + old_csum = sb->s_checksum; + sb->s_checksum = 0; + csum = jbd2_chksum(j, ~0, (char *)sb, sizeof(journal_superblock_t)); + sb->s_checksum = old_csum; + + return cpu_to_be32(csum); +} + +int jbd2_superblock_csum_verify(journal_t *j, journal_superblock_t *sb) +{ + if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2)) + return 1; + + return sb->s_checksum == jbd2_superblock_csum(j, sb); +} + +void jbd2_superblock_csum_set(journal_t *j, journal_superblock_t *sb) +{ + if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2)) + return; + + sb->s_checksum = jbd2_superblock_csum(j, sb); +} + /* * Helper function used to manage commit timeouts */ @@ -1348,6 +1385,7 @@ static void jbd2_journal_update_sb_errno(journal_t *journal) jbd_debug(1, "JBD2: updating superblock error (errno %d)\n", journal->j_errno); sb->s_errno = cpu_to_be32(journal->j_errno); + jbd2_superblock_csum_set(journal, sb); read_unlock(&journal->j_state_lock); jbd2_write_superblock(journal, WRITE_SYNC); @@ -1376,6 +1414,9 @@ static int journal_get_superblock(journal_t *journal) } } + if (buffer_verified(bh)) + return 0; + sb = journal->j_superblock; err = -EINVAL; @@ -1413,6 +1454,43 @@ static int journal_get_superblock(journal_t *journal) goto out; } + if (JBD2_HAS_COMPAT_FEATURE(journal, JBD2_FEATURE_COMPAT_CHECKSUM) && + JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) { + /* Can't have checksum v1 and v2 on at the same time! */ + printk(KERN_ERR "JBD: Can't enable checksumming v1 and v2 " + "at the same time!\n"); + goto out; + } + + if (!jbd2_verify_csum_type(journal, sb)) { + printk(KERN_ERR "JBD: Unknown checksum type\n"); + goto out; + } + + /* Load the checksum driver */ + if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) { + journal->j_chksum_driver = crypto_alloc_shash("crc32c", 0, 0); + if (IS_ERR(journal->j_chksum_driver)) { + printk(KERN_ERR "JBD: Cannot load crc32c driver.\n"); + err = PTR_ERR(journal->j_chksum_driver); + journal->j_chksum_driver = NULL; + goto out; + } + } + + /* Check superblock checksum */ + if (!jbd2_superblock_csum_verify(journal, sb)) { + printk(KERN_ERR "JBD: journal checksum error\n"); + goto out; + } + + /* Precompute checksum seed for all metadata */ + if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) + journal->j_csum_seed = jbd2_chksum(journal, ~0, sb->s_uuid, + sizeof(sb->s_uuid)); + + set_buffer_verified(bh); + return 0; out: @@ -1564,6 +1642,8 @@ int jbd2_journal_destroy(journal_t *journal) iput(journal->j_inode); if (journal->j_revoke) jbd2_journal_destroy_revoke(journal); + if (journal->j_chksum_driver) + crypto_free_shash(journal->j_chksum_driver); kfree(journal->j_wbuf); kfree(journal); @@ -1653,6 +1733,10 @@ int jbd2_journal_check_available_features (journal_t *journal, unsigned long com int jbd2_journal_set_features (journal_t *journal, unsigned long compat, unsigned long ro, unsigned long incompat) { +#define INCOMPAT_FEATURE_ON(f) \ + ((incompat & (f)) && !(sb->s_feature_incompat & cpu_to_be32(f))) +#define COMPAT_FEATURE_ON(f) \ + ((compat & (f)) && !(sb->s_feature_compat & cpu_to_be32(f))) journal_superblock_t *sb; if (jbd2_journal_check_used_features(journal, compat, ro, incompat)) @@ -1661,16 +1745,54 @@ int jbd2_journal_set_features (journal_t *journal, unsigned long compat, if (!jbd2_journal_check_available_features(journal, compat, ro, incompat)) return 0; + /* Asking for checksumming v2 and v1? Only give them v2. */ + if (incompat & JBD2_FEATURE_INCOMPAT_CSUM_V2 && + compat & JBD2_FEATURE_COMPAT_CHECKSUM) + compat &= ~JBD2_FEATURE_COMPAT_CHECKSUM; + jbd_debug(1, "Setting new features 0x%lx/0x%lx/0x%lx\n", compat, ro, incompat); sb = journal->j_superblock; + /* If enabling v2 checksums, update superblock */ + if (INCOMPAT_FEATURE_ON(JBD2_FEATURE_INCOMPAT_CSUM_V2)) { + sb->s_checksum_type = JBD2_CRC32C_CHKSUM; + sb->s_feature_compat &= + ~cpu_to_be32(JBD2_FEATURE_COMPAT_CHECKSUM); + + /* Load the checksum driver */ + if (journal->j_chksum_driver == NULL) { + journal->j_chksum_driver = crypto_alloc_shash("crc32c", + 0, 0); + if (IS_ERR(journal->j_chksum_driver)) { + printk(KERN_ERR "JBD: Cannot load crc32c " + "driver.\n"); + journal->j_chksum_driver = NULL; + return 0; + } + } + + /* Precompute checksum seed for all metadata */ + if (JBD2_HAS_INCOMPAT_FEATURE(journal, + JBD2_FEATURE_INCOMPAT_CSUM_V2)) + journal->j_csum_seed = jbd2_chksum(journal, ~0, + sb->s_uuid, + sizeof(sb->s_uuid)); + } + + /* If enabling v1 checksums, downgrade superblock */ + if (COMPAT_FEATURE_ON(JBD2_FEATURE_COMPAT_CHECKSUM)) + sb->s_feature_incompat &= + ~cpu_to_be32(JBD2_FEATURE_INCOMPAT_CSUM_V2); + sb->s_feature_compat |= cpu_to_be32(compat); sb->s_feature_ro_compat |= cpu_to_be32(ro); sb->s_feature_incompat |= cpu_to_be32(incompat); return 1; +#undef COMPAT_FEATURE_ON +#undef INCOMPAT_FEATURE_ON } /* @@ -1975,10 +2097,16 @@ int jbd2_journal_blocks_per_page(struct inode *inode) */ size_t journal_tag_bytes(journal_t *journal) { + journal_block_tag_t tag; + size_t x = 0; + + if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) + x += sizeof(tag.t_checksum); + if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT)) - return JBD2_TAG_SIZE64; + return x + JBD2_TAG_SIZE64; else - return JBD2_TAG_SIZE32; + return x + JBD2_TAG_SIZE32; } /* diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c index c1a03354a22f..0131e4362534 100644 --- a/fs/jbd2/recovery.c +++ b/fs/jbd2/recovery.c @@ -174,6 +174,25 @@ static int jread(struct buffer_head **bhp, journal_t *journal, return 0; } +static int jbd2_descr_block_csum_verify(journal_t *j, + void *buf) +{ + struct jbd2_journal_block_tail *tail; + __u32 provided, calculated; + + if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2)) + return 1; + + tail = (struct jbd2_journal_block_tail *)(buf + j->j_blocksize - + sizeof(struct jbd2_journal_block_tail)); + provided = tail->t_checksum; + tail->t_checksum = 0; + calculated = jbd2_chksum(j, j->j_csum_seed, buf, j->j_blocksize); + tail->t_checksum = provided; + + provided = be32_to_cpu(provided); + return provided == calculated; +} /* * Count the number of in-use tags in a journal descriptor block. @@ -186,6 +205,9 @@ static int count_tags(journal_t *journal, struct buffer_head *bh) int nr = 0, size = journal->j_blocksize; int tag_bytes = journal_tag_bytes(journal); + if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) + size -= sizeof(struct jbd2_journal_block_tail); + tagp = &bh->b_data[sizeof(journal_header_t)]; while ((tagp - bh->b_data + tag_bytes) <= size) { @@ -193,10 +215,10 @@ static int count_tags(journal_t *journal, struct buffer_head *bh) nr++; tagp += tag_bytes; - if (!(tag->t_flags & cpu_to_be32(JBD2_FLAG_SAME_UUID))) + if (!(tag->t_flags & cpu_to_be16(JBD2_FLAG_SAME_UUID))) tagp += 16; - if (tag->t_flags & cpu_to_be32(JBD2_FLAG_LAST_TAG)) + if (tag->t_flags & cpu_to_be16(JBD2_FLAG_LAST_TAG)) break; } @@ -353,6 +375,41 @@ static int calc_chksums(journal_t *journal, struct buffer_head *bh, return 0; } +static int jbd2_commit_block_csum_verify(journal_t *j, void *buf) +{ + struct commit_header *h; + __u32 provided, calculated; + + if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2)) + return 1; + + h = buf; + provided = h->h_chksum[0]; + h->h_chksum[0] = 0; + calculated = jbd2_chksum(j, j->j_csum_seed, buf, j->j_blocksize); + h->h_chksum[0] = provided; + + provided = be32_to_cpu(provided); + return provided == calculated; +} + +static int jbd2_block_tag_csum_verify(journal_t *j, journal_block_tag_t *tag, + void *buf, __u32 sequence) +{ + __u32 provided, calculated; + + if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2)) + return 1; + + sequence = cpu_to_be32(sequence); + calculated = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&sequence, + sizeof(sequence)); + calculated = jbd2_chksum(j, calculated, buf, j->j_blocksize); + provided = be32_to_cpu(tag->t_checksum); + + return provided == cpu_to_be32(calculated); +} + static int do_one_pass(journal_t *journal, struct recovery_info *info, enum passtype pass) { @@ -366,6 +423,7 @@ static int do_one_pass(journal_t *journal, int blocktype; int tag_bytes = journal_tag_bytes(journal); __u32 crc32_sum = ~0; /* Transactional Checksums */ + int descr_csum_size = 0; /* * First thing is to establish what we expect to find in the log @@ -451,6 +509,18 @@ static int do_one_pass(journal_t *journal, switch(blocktype) { case JBD2_DESCRIPTOR_BLOCK: + /* Verify checksum first */ + if (JBD2_HAS_INCOMPAT_FEATURE(journal, + JBD2_FEATURE_INCOMPAT_CSUM_V2)) + descr_csum_size = + sizeof(struct jbd2_journal_block_tail); + if (descr_csum_size > 0 && + !jbd2_descr_block_csum_verify(journal, + bh->b_data)) { + err = -EIO; + goto failed; + } + /* If it is a valid descriptor block, replay it * in pass REPLAY; if journal_checksums enabled, then * calculate checksums in PASS_SCAN, otherwise, @@ -481,11 +551,11 @@ static int do_one_pass(journal_t *journal, tagp = &bh->b_data[sizeof(journal_header_t)]; while ((tagp - bh->b_data + tag_bytes) - <= journal->j_blocksize) { + <= journal->j_blocksize - descr_csum_size) { unsigned long io_block; tag = (journal_block_tag_t *) tagp; - flags = be32_to_cpu(tag->t_flags); + flags = be16_to_cpu(tag->t_flags); io_block = next_log_block++; wrap(journal, next_log_block); @@ -516,6 +586,19 @@ static int do_one_pass(journal_t *journal, goto skip_write; } + /* Look for block corruption */ + if (!jbd2_block_tag_csum_verify( + journal, tag, obh->b_data, + be32_to_cpu(tmp->h_sequence))) { + brelse(obh); + success = -EIO; + printk(KERN_ERR "JBD: Invalid " + "checksum recovering " + "block %llu in log\n", + blocknr); + continue; + } + /* Find a buffer for the new * data being restored */ nbh = __getblk(journal->j_fs_dev, @@ -650,6 +733,19 @@ static int do_one_pass(journal_t *journal, } crc32_sum = ~0; } + if (pass == PASS_SCAN && + !jbd2_commit_block_csum_verify(journal, + bh->b_data)) { + info->end_transaction = next_commit_ID; + + if (!JBD2_HAS_INCOMPAT_FEATURE(journal, + JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) { + journal->j_failed_commit = + next_commit_ID; + brelse(bh); + break; + } + } brelse(bh); next_commit_ID++; continue; @@ -706,6 +802,25 @@ static int do_one_pass(journal_t *journal, return err; } +static int jbd2_revoke_block_csum_verify(journal_t *j, + void *buf) +{ + struct jbd2_journal_revoke_tail *tail; + __u32 provided, calculated; + + if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2)) + return 1; + + tail = (struct jbd2_journal_revoke_tail *)(buf + j->j_blocksize - + sizeof(struct jbd2_journal_revoke_tail)); + provided = tail->r_checksum; + tail->r_checksum = 0; + calculated = jbd2_chksum(j, j->j_csum_seed, buf, j->j_blocksize); + tail->r_checksum = provided; + + provided = be32_to_cpu(provided); + return provided == calculated; +} /* Scan a revoke record, marking all blocks mentioned as revoked. */ @@ -720,6 +835,9 @@ static int scan_revoke_records(journal_t *journal, struct buffer_head *bh, offset = sizeof(jbd2_journal_revoke_header_t); max = be32_to_cpu(header->r_count); + if (!jbd2_revoke_block_csum_verify(journal, header)) + return -EINVAL; + if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT)) record_len = 8; diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c index 6973705d6a3d..f30b80b4ce8b 100644 --- a/fs/jbd2/revoke.c +++ b/fs/jbd2/revoke.c @@ -578,6 +578,7 @@ static void write_one_revoke_record(journal_t *journal, struct jbd2_revoke_record_s *record, int write_op) { + int csum_size = 0; struct journal_head *descriptor; int offset; journal_header_t *header; @@ -592,9 +593,13 @@ static void write_one_revoke_record(journal_t *journal, descriptor = *descriptorp; offset = *offsetp; + /* Do we need to leave space at the end for a checksum? */ + if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) + csum_size = sizeof(struct jbd2_journal_revoke_tail); + /* Make sure we have a descriptor with space left for the record */ if (descriptor) { - if (offset == journal->j_blocksize) { + if (offset >= journal->j_blocksize - csum_size) { flush_descriptor(journal, descriptor, offset, write_op); descriptor = NULL; } @@ -631,6 +636,24 @@ static void write_one_revoke_record(journal_t *journal, *offsetp = offset; } +static void jbd2_revoke_csum_set(journal_t *j, + struct journal_head *descriptor) +{ + struct jbd2_journal_revoke_tail *tail; + __u32 csum; + + if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2)) + return; + + tail = (struct jbd2_journal_revoke_tail *) + (jh2bh(descriptor)->b_data + j->j_blocksize - + sizeof(struct jbd2_journal_revoke_tail)); + tail->r_checksum = 0; + csum = jbd2_chksum(j, j->j_csum_seed, jh2bh(descriptor)->b_data, + j->j_blocksize); + tail->r_checksum = cpu_to_be32(csum); +} + /* * Flush a revoke descriptor out to the journal. If we are aborting, * this is a noop; otherwise we are generating a buffer which needs to @@ -652,6 +675,8 @@ static void flush_descriptor(journal_t *journal, header = (jbd2_journal_revoke_header_t *) jh2bh(descriptor)->b_data; header->r_count = cpu_to_be32(offset); + jbd2_revoke_csum_set(journal, descriptor); + set_buffer_jwrite(bh); BUFFER_TRACE(bh, "write"); set_buffer_dirty(bh); diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index ddcd3549c6c2..fb1ab9533b67 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -162,8 +162,8 @@ static int start_this_handle(journal_t *journal, handle_t *handle, alloc_transaction: if (!journal->j_running_transaction) { - new_transaction = kmem_cache_alloc(transaction_cache, - gfp_mask | __GFP_ZERO); + new_transaction = kmem_cache_zalloc(transaction_cache, + gfp_mask); if (!new_transaction) { /* * If __GFP_FS is not present, then we may be diff --git a/fs/jffs2/jffs2_fs_sb.h b/fs/jffs2/jffs2_fs_sb.h index 55a0c1dceadf..413ef89c2d1b 100644 --- a/fs/jffs2/jffs2_fs_sb.h +++ b/fs/jffs2/jffs2_fs_sb.h @@ -32,6 +32,13 @@ struct jffs2_inodirty; struct jffs2_mount_opts { bool override_compr; unsigned int compr; + + /* The size of the reserved pool. The reserved pool is the JFFS2 flash + * space which may only be used by root cannot be used by the other + * users. This is implemented simply by means of not allowing the + * latter users to write to the file system if the amount if the + * available space is less then 'rp_size'. */ + unsigned int rp_size; }; /* A struct for the overall file system control. Pointers to @@ -126,6 +133,10 @@ struct jffs2_sb_info { struct jffs2_inodirty *wbuf_inodes; struct rw_semaphore wbuf_sem; /* Protects the write buffer */ + struct delayed_work wbuf_dwork; /* write-buffer write-out work */ + int wbuf_queued; /* non-zero delayed work is queued */ + spinlock_t wbuf_dwork_lock; /* protects wbuf_dwork and and wbuf_queued */ + unsigned char *oobbuf; int oobavail; /* How many bytes are available for JFFS2 in OOB */ #endif diff --git a/fs/jffs2/nodemgmt.c b/fs/jffs2/nodemgmt.c index 6784d1e7a7eb..0c96eb52c797 100644 --- a/fs/jffs2/nodemgmt.c +++ b/fs/jffs2/nodemgmt.c @@ -18,6 +18,37 @@ #include "nodelist.h" #include "debug.h" +/* + * Check whether the user is allowed to write. + */ +static int jffs2_rp_can_write(struct jffs2_sb_info *c) +{ + uint32_t avail; + struct jffs2_mount_opts *opts = &c->mount_opts; + + avail = c->dirty_size + c->free_size + c->unchecked_size + + c->erasing_size - c->resv_blocks_write * c->sector_size + - c->nospc_dirty_size; + + if (avail < 2 * opts->rp_size) + jffs2_dbg(1, "rpsize %u, dirty_size %u, free_size %u, " + "erasing_size %u, unchecked_size %u, " + "nr_erasing_blocks %u, avail %u, resrv %u\n", + opts->rp_size, c->dirty_size, c->free_size, + c->erasing_size, c->unchecked_size, + c->nr_erasing_blocks, avail, c->nospc_dirty_size); + + if (avail > opts->rp_size) + return 1; + + /* Always allow root */ + if (capable(CAP_SYS_RESOURCE)) + return 1; + + jffs2_dbg(1, "forbid writing\n"); + return 0; +} + /** * jffs2_reserve_space - request physical space to write nodes to flash * @c: superblock info @@ -55,6 +86,15 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, spin_lock(&c->erase_completion_lock); + /* + * Check if the free space is greater then size of the reserved pool. + * If not, only allow root to proceed with writing. + */ + if (prio != ALLOC_DELETION && !jffs2_rp_can_write(c)) { + ret = -ENOSPC; + goto out; + } + /* this needs a little more thought (true <tglx> :)) */ while(ret == -EAGAIN) { while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) { @@ -158,6 +198,8 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, jffs2_dbg(1, "%s(): ret is %d\n", __func__, ret); } } + +out: spin_unlock(&c->erase_completion_lock); if (!ret) ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1); diff --git a/fs/jffs2/os-linux.h b/fs/jffs2/os-linux.h index 1cd3aec9d9ae..bcd983d7e7f9 100644 --- a/fs/jffs2/os-linux.h +++ b/fs/jffs2/os-linux.h @@ -95,6 +95,7 @@ static inline void jffs2_init_inode_info(struct jffs2_inode_info *f) #define jffs2_ubivol(c) (0) #define jffs2_ubivol_setup(c) (0) #define jffs2_ubivol_cleanup(c) do {} while (0) +#define jffs2_dirty_trigger(c) do {} while (0) #else /* NAND and/or ECC'd NOR support present */ @@ -135,14 +136,10 @@ void jffs2_ubivol_cleanup(struct jffs2_sb_info *c); #define jffs2_nor_wbuf_flash(c) (c->mtd->type == MTD_NORFLASH && ! (c->mtd->flags & MTD_BIT_WRITEABLE)) int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c); void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c); +void jffs2_dirty_trigger(struct jffs2_sb_info *c); #endif /* WRITEBUFFER */ -static inline void jffs2_dirty_trigger(struct jffs2_sb_info *c) -{ - OFNI_BS_2SFFJ(c)->s_dirt = 1; -} - /* background.c */ int jffs2_start_garbage_collect_thread(struct jffs2_sb_info *c); void jffs2_stop_garbage_collect_thread(struct jffs2_sb_info *c); diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c index dc0437e84763..1ea349fff68b 100644 --- a/fs/jffs2/readinode.c +++ b/fs/jffs2/readinode.c @@ -1266,19 +1266,25 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c, /* Symlink's inode data is the target path. Read it and * keep in RAM to facilitate quick follow symlink * operation. */ - f->target = kmalloc(je32_to_cpu(latest_node->csize) + 1, GFP_KERNEL); + uint32_t csize = je32_to_cpu(latest_node->csize); + if (csize > JFFS2_MAX_NAME_LEN) { + mutex_unlock(&f->sem); + jffs2_do_clear_inode(c, f); + return -ENAMETOOLONG; + } + f->target = kmalloc(csize + 1, GFP_KERNEL); if (!f->target) { - JFFS2_ERROR("can't allocate %d bytes of memory for the symlink target path cache\n", je32_to_cpu(latest_node->csize)); + JFFS2_ERROR("can't allocate %u bytes of memory for the symlink target path cache\n", csize); mutex_unlock(&f->sem); jffs2_do_clear_inode(c, f); return -ENOMEM; } ret = jffs2_flash_read(c, ref_offset(rii.latest_ref) + sizeof(*latest_node), - je32_to_cpu(latest_node->csize), &retlen, (char *)f->target); + csize, &retlen, (char *)f->target); - if (ret || retlen != je32_to_cpu(latest_node->csize)) { - if (retlen != je32_to_cpu(latest_node->csize)) + if (ret || retlen != csize) { + if (retlen != csize) ret = -EIO; kfree(f->target); f->target = NULL; @@ -1287,7 +1293,7 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c, return ret; } - f->target[je32_to_cpu(latest_node->csize)] = '\0'; + f->target[csize] = '\0'; dbg_readinode("symlink's target '%s' cached\n", f->target); } @@ -1415,6 +1421,7 @@ int jffs2_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *i mutex_unlock(&f->sem); jffs2_do_clear_inode(c, f); } + jffs2_xattr_do_crccheck_inode(c, ic); kfree (f); return ret; } diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c index f9916f312bd8..61ea41389f90 100644 --- a/fs/jffs2/super.c +++ b/fs/jffs2/super.c @@ -63,21 +63,6 @@ static void jffs2_i_init_once(void *foo) inode_init_once(&f->vfs_inode); } -static void jffs2_write_super(struct super_block *sb) -{ - struct jffs2_sb_info *c = JFFS2_SB_INFO(sb); - - lock_super(sb); - sb->s_dirt = 0; - - if (!(sb->s_flags & MS_RDONLY)) { - jffs2_dbg(1, "%s()\n", __func__); - jffs2_flush_wbuf_gc(c, 0); - } - - unlock_super(sb); -} - static const char *jffs2_compr_name(unsigned int compr) { switch (compr) { @@ -105,6 +90,8 @@ static int jffs2_show_options(struct seq_file *s, struct dentry *root) if (opts->override_compr) seq_printf(s, ",compr=%s", jffs2_compr_name(opts->compr)); + if (opts->rp_size) + seq_printf(s, ",rp_size=%u", opts->rp_size / 1024); return 0; } @@ -113,8 +100,6 @@ static int jffs2_sync_fs(struct super_block *sb, int wait) { struct jffs2_sb_info *c = JFFS2_SB_INFO(sb); - jffs2_write_super(sb); - mutex_lock(&c->alloc_sem); jffs2_flush_wbuf_pad(c); mutex_unlock(&c->alloc_sem); @@ -171,15 +156,18 @@ static const struct export_operations jffs2_export_ops = { * JFFS2 mount options. * * Opt_override_compr: override default compressor + * Opt_rp_size: size of reserved pool in KiB * Opt_err: just end of array marker */ enum { Opt_override_compr, + Opt_rp_size, Opt_err, }; static const match_table_t tokens = { {Opt_override_compr, "compr=%s"}, + {Opt_rp_size, "rp_size=%u"}, {Opt_err, NULL}, }; @@ -187,6 +175,7 @@ static int jffs2_parse_options(struct jffs2_sb_info *c, char *data) { substring_t args[MAX_OPT_ARGS]; char *p, *name; + unsigned int opt; if (!data) return 0; @@ -224,6 +213,17 @@ static int jffs2_parse_options(struct jffs2_sb_info *c, char *data) kfree(name); c->mount_opts.override_compr = true; break; + case Opt_rp_size: + if (match_int(&args[0], &opt)) + return -EINVAL; + opt *= 1024; + if (opt > c->mtd->size) { + pr_warn("Too large reserve pool specified, max " + "is %llu KB\n", c->mtd->size / 1024); + return -EINVAL; + } + c->mount_opts.rp_size = opt; + break; default: pr_err("Error: unrecognized mount option '%s' or missing value\n", p); @@ -251,7 +251,6 @@ static const struct super_operations jffs2_super_operations = .alloc_inode = jffs2_alloc_inode, .destroy_inode =jffs2_destroy_inode, .put_super = jffs2_put_super, - .write_super = jffs2_write_super, .statfs = jffs2_statfs, .remount_fs = jffs2_remount_fs, .evict_inode = jffs2_evict_inode, @@ -319,9 +318,6 @@ static void jffs2_put_super (struct super_block *sb) jffs2_dbg(2, "%s()\n", __func__); - if (sb->s_dirt) - jffs2_write_super(sb); - mutex_lock(&c->alloc_sem); jffs2_flush_wbuf_pad(c); mutex_unlock(&c->alloc_sem); diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c index 74d9be19df3f..6f4529d3697f 100644 --- a/fs/jffs2/wbuf.c +++ b/fs/jffs2/wbuf.c @@ -20,6 +20,7 @@ #include <linux/mtd/nand.h> #include <linux/jiffies.h> #include <linux/sched.h> +#include <linux/writeback.h> #include "nodelist.h" @@ -85,7 +86,7 @@ static void jffs2_wbuf_dirties_inode(struct jffs2_sb_info *c, uint32_t ino) { struct jffs2_inodirty *new; - /* Mark the superblock dirty so that kupdated will flush... */ + /* Schedule delayed write-buffer write-out */ jffs2_dirty_trigger(c); if (jffs2_wbuf_pending_for_ino(c, ino)) @@ -1148,6 +1149,47 @@ int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock * return 1; } +static struct jffs2_sb_info *work_to_sb(struct work_struct *work) +{ + struct delayed_work *dwork; + + dwork = container_of(work, struct delayed_work, work); + return container_of(dwork, struct jffs2_sb_info, wbuf_dwork); +} + +static void delayed_wbuf_sync(struct work_struct *work) +{ + struct jffs2_sb_info *c = work_to_sb(work); + struct super_block *sb = OFNI_BS_2SFFJ(c); + + spin_lock(&c->wbuf_dwork_lock); + c->wbuf_queued = 0; + spin_unlock(&c->wbuf_dwork_lock); + + if (!(sb->s_flags & MS_RDONLY)) { + jffs2_dbg(1, "%s()\n", __func__); + jffs2_flush_wbuf_gc(c, 0); + } +} + +void jffs2_dirty_trigger(struct jffs2_sb_info *c) +{ + struct super_block *sb = OFNI_BS_2SFFJ(c); + unsigned long delay; + + if (sb->s_flags & MS_RDONLY) + return; + + spin_lock(&c->wbuf_dwork_lock); + if (!c->wbuf_queued) { + jffs2_dbg(1, "%s()\n", __func__); + delay = msecs_to_jiffies(dirty_writeback_interval * 10); + queue_delayed_work(system_long_wq, &c->wbuf_dwork, delay); + c->wbuf_queued = 1; + } + spin_unlock(&c->wbuf_dwork_lock); +} + int jffs2_nand_flash_setup(struct jffs2_sb_info *c) { struct nand_ecclayout *oinfo = c->mtd->ecclayout; @@ -1169,6 +1211,8 @@ int jffs2_nand_flash_setup(struct jffs2_sb_info *c) /* Initialise write buffer */ init_rwsem(&c->wbuf_sem); + spin_lock_init(&c->wbuf_dwork_lock); + INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync); c->wbuf_pagesize = c->mtd->writesize; c->wbuf_ofs = 0xFFFFFFFF; @@ -1207,8 +1251,8 @@ int jffs2_dataflash_setup(struct jffs2_sb_info *c) { /* Initialize write buffer */ init_rwsem(&c->wbuf_sem); - - + spin_lock_init(&c->wbuf_dwork_lock); + INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync); c->wbuf_pagesize = c->mtd->erasesize; /* Find a suitable c->sector_size @@ -1267,6 +1311,9 @@ int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c) { /* Initialize write buffer */ init_rwsem(&c->wbuf_sem); + spin_lock_init(&c->wbuf_dwork_lock); + INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync); + c->wbuf_pagesize = c->mtd->writesize; c->wbuf_ofs = 0xFFFFFFFF; @@ -1299,6 +1346,8 @@ int jffs2_ubivol_setup(struct jffs2_sb_info *c) { return 0; init_rwsem(&c->wbuf_sem); + spin_lock_init(&c->wbuf_dwork_lock); + INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync); c->wbuf_pagesize = c->mtd->writesize; c->wbuf_ofs = 0xFFFFFFFF; diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c index b55b803eddcb..3034e970eb9a 100644 --- a/fs/jffs2/xattr.c +++ b/fs/jffs2/xattr.c @@ -11,6 +11,8 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#define JFFS2_XATTR_IS_CORRUPTED 1 + #include <linux/kernel.h> #include <linux/slab.h> #include <linux/fs.h> @@ -153,7 +155,7 @@ static int do_verify_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_dat JFFS2_ERROR("node CRC failed at %#08x, read=%#08x, calc=%#08x\n", offset, je32_to_cpu(rx.hdr_crc), crc); xd->flags |= JFFS2_XFLAGS_INVALID; - return -EIO; + return JFFS2_XATTR_IS_CORRUPTED; } totlen = PAD(sizeof(rx) + rx.name_len + 1 + je16_to_cpu(rx.value_len)); if (je16_to_cpu(rx.magic) != JFFS2_MAGIC_BITMASK @@ -169,7 +171,7 @@ static int do_verify_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_dat je32_to_cpu(rx.xid), xd->xid, je32_to_cpu(rx.version), xd->version); xd->flags |= JFFS2_XFLAGS_INVALID; - return -EIO; + return JFFS2_XATTR_IS_CORRUPTED; } xd->xprefix = rx.xprefix; xd->name_len = rx.name_len; @@ -227,12 +229,12 @@ static int do_load_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum data[xd->name_len] = '\0'; crc = crc32(0, data, length); if (crc != xd->data_crc) { - JFFS2_WARNING("node CRC failed (JFFS2_NODETYPE_XREF)" + JFFS2_WARNING("node CRC failed (JFFS2_NODETYPE_XATTR)" " at %#08x, read: 0x%08x calculated: 0x%08x\n", ref_offset(xd->node), xd->data_crc, crc); kfree(data); xd->flags |= JFFS2_XFLAGS_INVALID; - return -EIO; + return JFFS2_XATTR_IS_CORRUPTED; } xd->flags |= JFFS2_XFLAGS_HOT; @@ -270,7 +272,7 @@ static int load_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *x if (xd->xname) return 0; if (xd->flags & JFFS2_XFLAGS_INVALID) - return -EIO; + return JFFS2_XATTR_IS_CORRUPTED; if (unlikely(is_xattr_datum_unchecked(c, xd))) rc = do_verify_xattr_datum(c, xd); if (!rc) @@ -435,6 +437,8 @@ static void unrefer_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datu * is called to release xattr related objects when unmounting. * check_xattr_ref_inode(c, ic) * is used to confirm inode does not have duplicate xattr name/value pair. + * jffs2_xattr_do_crccheck_inode(c, ic) + * is used to force xattr data integrity check during the initial gc scan. * -------------------------------------------------- */ static int verify_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref) { @@ -462,7 +466,7 @@ static int verify_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref if (crc != je32_to_cpu(rr.node_crc)) { JFFS2_ERROR("node CRC failed at %#08x, read=%#08x, calc=%#08x\n", offset, je32_to_cpu(rr.node_crc), crc); - return -EIO; + return JFFS2_XATTR_IS_CORRUPTED; } if (je16_to_cpu(rr.magic) != JFFS2_MAGIC_BITMASK || je16_to_cpu(rr.nodetype) != JFFS2_NODETYPE_XREF @@ -472,7 +476,7 @@ static int verify_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref offset, je16_to_cpu(rr.magic), JFFS2_MAGIC_BITMASK, je16_to_cpu(rr.nodetype), JFFS2_NODETYPE_XREF, je32_to_cpu(rr.totlen), PAD(sizeof(rr))); - return -EIO; + return JFFS2_XATTR_IS_CORRUPTED; } ref->ino = je32_to_cpu(rr.ino); ref->xid = je32_to_cpu(rr.xid); @@ -682,6 +686,11 @@ static int check_xattr_ref_inode(struct jffs2_sb_info *c, struct jffs2_inode_cac return rc; } +void jffs2_xattr_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic) +{ + check_xattr_ref_inode(c, ic); +} + /* -------- xattr subsystem functions --------------- * jffs2_init_xattr_subsystem(c) * is used to initialize semaphore and list_head, and some variables. diff --git a/fs/jffs2/xattr.h b/fs/jffs2/xattr.h index 7be4beb306f3..467ff376ee26 100644 --- a/fs/jffs2/xattr.h +++ b/fs/jffs2/xattr.h @@ -77,6 +77,7 @@ extern void jffs2_clear_xattr_subsystem(struct jffs2_sb_info *c); extern struct jffs2_xattr_datum *jffs2_setup_xattr_datum(struct jffs2_sb_info *c, uint32_t xid, uint32_t version); +extern void jffs2_xattr_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic); extern void jffs2_xattr_delete_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic); extern void jffs2_xattr_free_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic); @@ -108,6 +109,7 @@ extern ssize_t jffs2_listxattr(struct dentry *, char *, size_t); #define jffs2_build_xattr_subsystem(c) #define jffs2_clear_xattr_subsystem(c) +#define jffs2_xattr_do_crccheck_inode(c, ic) #define jffs2_xattr_delete_inode(c, ic) #define jffs2_xattr_free_inode(c, ic) #define jffs2_verify_xattr(c) (1) diff --git a/fs/locks.c b/fs/locks.c index 4f441e46cef4..814c51d0de47 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -1636,12 +1636,13 @@ EXPORT_SYMBOL(flock_lock_file_wait); SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd) { struct file *filp; + int fput_needed; struct file_lock *lock; int can_sleep, unlock; int error; error = -EBADF; - filp = fget(fd); + filp = fget_light(fd, &fput_needed); if (!filp) goto out; @@ -1674,7 +1675,7 @@ SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd) locks_free_lock(lock); out_putf: - fput(filp); + fput_light(filp, fput_needed); out: return error; } diff --git a/fs/namei.c b/fs/namei.c index c651f02c9fec..7d694194024a 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -449,7 +449,7 @@ static int unlazy_walk(struct nameidata *nd, struct dentry *dentry) mntget(nd->path.mnt); rcu_read_unlock(); - br_read_unlock(vfsmount_lock); + br_read_unlock(&vfsmount_lock); nd->flags &= ~LOOKUP_RCU; return 0; @@ -507,14 +507,14 @@ static int complete_walk(struct nameidata *nd) if (unlikely(!__d_rcu_to_refcount(dentry, nd->seq))) { spin_unlock(&dentry->d_lock); rcu_read_unlock(); - br_read_unlock(vfsmount_lock); + br_read_unlock(&vfsmount_lock); return -ECHILD; } BUG_ON(nd->inode != dentry->d_inode); spin_unlock(&dentry->d_lock); mntget(nd->path.mnt); rcu_read_unlock(); - br_read_unlock(vfsmount_lock); + br_read_unlock(&vfsmount_lock); } if (likely(!(nd->flags & LOOKUP_JUMPED))) @@ -681,15 +681,15 @@ int follow_up(struct path *path) struct mount *parent; struct dentry *mountpoint; - br_read_lock(vfsmount_lock); + br_read_lock(&vfsmount_lock); parent = mnt->mnt_parent; if (&parent->mnt == path->mnt) { - br_read_unlock(vfsmount_lock); + br_read_unlock(&vfsmount_lock); return 0; } mntget(&parent->mnt); mountpoint = dget(mnt->mnt_mountpoint); - br_read_unlock(vfsmount_lock); + br_read_unlock(&vfsmount_lock); dput(path->dentry); path->dentry = mountpoint; mntput(path->mnt); @@ -947,7 +947,7 @@ failed: if (!(nd->flags & LOOKUP_ROOT)) nd->root.mnt = NULL; rcu_read_unlock(); - br_read_unlock(vfsmount_lock); + br_read_unlock(&vfsmount_lock); return -ECHILD; } @@ -1125,8 +1125,8 @@ static struct dentry *__lookup_hash(struct qstr *name, * small and for now I'd prefer to have fast path as straight as possible. * It _is_ time-critical. */ -static int do_lookup(struct nameidata *nd, struct qstr *name, - struct path *path, struct inode **inode) +static int lookup_fast(struct nameidata *nd, struct qstr *name, + struct path *path, struct inode **inode) { struct vfsmount *mnt = nd->path.mnt; struct dentry *dentry, *parent = nd->path.dentry; @@ -1208,7 +1208,7 @@ unlazy: goto need_lookup; } } -done: + path->mnt = mnt; path->dentry = dentry; err = follow_managed(path, nd->flags); @@ -1222,6 +1222,17 @@ done: return 0; need_lookup: + return 1; +} + +/* Fast lookup failed, do it the slow way */ +static int lookup_slow(struct nameidata *nd, struct qstr *name, + struct path *path) +{ + struct dentry *dentry, *parent; + int err; + + parent = nd->path.dentry; BUG_ON(nd->inode != parent->d_inode); mutex_lock(&parent->d_inode->i_mutex); @@ -1229,7 +1240,16 @@ need_lookup: mutex_unlock(&parent->d_inode->i_mutex); if (IS_ERR(dentry)) return PTR_ERR(dentry); - goto done; + path->mnt = nd->path.mnt; + path->dentry = dentry; + err = follow_managed(path, nd->flags); + if (unlikely(err < 0)) { + path_put_conditional(path, nd); + return err; + } + if (err) + nd->flags |= LOOKUP_JUMPED; + return 0; } static inline int may_lookup(struct nameidata *nd) @@ -1265,7 +1285,7 @@ static void terminate_walk(struct nameidata *nd) if (!(nd->flags & LOOKUP_ROOT)) nd->root.mnt = NULL; rcu_read_unlock(); - br_read_unlock(vfsmount_lock); + br_read_unlock(&vfsmount_lock); } } @@ -1301,21 +1321,26 @@ static inline int walk_component(struct nameidata *nd, struct path *path, */ if (unlikely(type != LAST_NORM)) return handle_dots(nd, type); - err = do_lookup(nd, name, path, &inode); + err = lookup_fast(nd, name, path, &inode); if (unlikely(err)) { - terminate_walk(nd); - return err; - } - if (!inode) { - path_to_nameidata(path, nd); - terminate_walk(nd); - return -ENOENT; + if (err < 0) + goto out_err; + + err = lookup_slow(nd, name, path); + if (err < 0) + goto out_err; + + inode = path->dentry->d_inode; } + err = -ENOENT; + if (!inode) + goto out_path_put; + if (should_follow_link(inode, follow)) { if (nd->flags & LOOKUP_RCU) { if (unlikely(unlazy_walk(nd, path->dentry))) { - terminate_walk(nd); - return -ECHILD; + err = -ECHILD; + goto out_err; } } BUG_ON(inode != path->dentry->d_inode); @@ -1324,6 +1349,12 @@ static inline int walk_component(struct nameidata *nd, struct path *path, path_to_nameidata(path, nd); nd->inode = inode; return 0; + +out_path_put: + path_to_nameidata(path, nd); +out_err: + terminate_walk(nd); + return err; } /* @@ -1620,7 +1651,7 @@ static int path_init(int dfd, const char *name, unsigned int flags, nd->path = nd->root; nd->inode = inode; if (flags & LOOKUP_RCU) { - br_read_lock(vfsmount_lock); + br_read_lock(&vfsmount_lock); rcu_read_lock(); nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq); } else { @@ -1633,7 +1664,7 @@ static int path_init(int dfd, const char *name, unsigned int flags, if (*name=='/') { if (flags & LOOKUP_RCU) { - br_read_lock(vfsmount_lock); + br_read_lock(&vfsmount_lock); rcu_read_lock(); set_root_rcu(nd); } else { @@ -1646,7 +1677,7 @@ static int path_init(int dfd, const char *name, unsigned int flags, struct fs_struct *fs = current->fs; unsigned seq; - br_read_lock(vfsmount_lock); + br_read_lock(&vfsmount_lock); rcu_read_lock(); do { @@ -1682,7 +1713,7 @@ static int path_init(int dfd, const char *name, unsigned int flags, if (fput_needed) *fp = file; nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq); - br_read_lock(vfsmount_lock); + br_read_lock(&vfsmount_lock); rcu_read_lock(); } else { path_get(&file->f_path); @@ -2169,6 +2200,10 @@ static struct file *do_last(struct nameidata *nd, struct path *path, int want_write = 0; int acc_mode = op->acc_mode; struct file *filp; + struct inode *inode; + int symlink_ok = 0; + struct path save_parent = { .dentry = NULL, .mnt = NULL }; + bool retried = false; int error; nd->flags &= ~LOOKUP_PARENT; @@ -2200,30 +2235,23 @@ static struct file *do_last(struct nameidata *nd, struct path *path, } if (!(open_flag & O_CREAT)) { - int symlink_ok = 0; if (nd->last.name[nd->last.len]) nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY; if (open_flag & O_PATH && !(nd->flags & LOOKUP_FOLLOW)) symlink_ok = 1; /* we _can_ be in RCU mode here */ - error = walk_component(nd, path, &nd->last, LAST_NORM, - !symlink_ok); - if (error < 0) - return ERR_PTR(error); - if (error) /* symlink */ - return NULL; - /* sayonara */ - error = complete_walk(nd); - if (error) - return ERR_PTR(error); + error = lookup_fast(nd, &nd->last, path, &inode); + if (unlikely(error)) { + if (error < 0) + goto exit; - error = -ENOTDIR; - if (nd->flags & LOOKUP_DIRECTORY) { - if (!nd->inode->i_op->lookup) + error = lookup_slow(nd, &nd->last, path); + if (error < 0) goto exit; + + inode = path->dentry->d_inode; } - audit_inode(pathname, nd->path.dentry); - goto ok; + goto finish_lookup; } /* create side of things */ @@ -2241,6 +2269,7 @@ static struct file *do_last(struct nameidata *nd, struct path *path, if (nd->last.name[nd->last.len]) goto exit; +retry_lookup: mutex_lock(&dir->d_inode->i_mutex); dentry = lookup_hash(nd); @@ -2302,22 +2331,49 @@ static struct file *do_last(struct nameidata *nd, struct path *path, if (error) nd->flags |= LOOKUP_JUMPED; + BUG_ON(nd->flags & LOOKUP_RCU); + inode = path->dentry->d_inode; +finish_lookup: + /* we _can_ be in RCU mode here */ error = -ENOENT; - if (!path->dentry->d_inode) - goto exit_dput; + if (!inode) { + path_to_nameidata(path, nd); + goto exit; + } - if (path->dentry->d_inode->i_op->follow_link) + if (should_follow_link(inode, !symlink_ok)) { + if (nd->flags & LOOKUP_RCU) { + if (unlikely(unlazy_walk(nd, path->dentry))) { + error = -ECHILD; + goto exit; + } + } + BUG_ON(inode != path->dentry->d_inode); return NULL; + } - path_to_nameidata(path, nd); - nd->inode = path->dentry->d_inode; + if ((nd->flags & LOOKUP_RCU) || nd->path.mnt != path->mnt) { + path_to_nameidata(path, nd); + } else { + save_parent.dentry = nd->path.dentry; + save_parent.mnt = mntget(path->mnt); + nd->path.dentry = path->dentry; + + } + nd->inode = inode; /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */ error = complete_walk(nd); - if (error) + if (error) { + path_put(&save_parent); return ERR_PTR(error); + } error = -EISDIR; - if (S_ISDIR(nd->inode->i_mode)) + if ((open_flag & O_CREAT) && S_ISDIR(nd->inode->i_mode)) + goto exit; + error = -ENOTDIR; + if ((nd->flags & LOOKUP_DIRECTORY) && !nd->inode->i_op->lookup) goto exit; + audit_inode(pathname, nd->path.dentry); ok: if (!S_ISREG(nd->inode->i_mode)) will_truncate = 0; @@ -2333,6 +2389,20 @@ common: if (error) goto exit; filp = nameidata_to_filp(nd); + if (filp == ERR_PTR(-EOPENSTALE) && save_parent.dentry && !retried) { + BUG_ON(save_parent.dentry != dir); + path_put(&nd->path); + nd->path = save_parent; + nd->inode = dir->d_inode; + save_parent.mnt = NULL; + save_parent.dentry = NULL; + if (want_write) { + mnt_drop_write(nd->path.mnt); + want_write = 0; + } + retried = true; + goto retry_lookup; + } if (!IS_ERR(filp)) { error = ima_file_check(filp, op->acc_mode); if (error) { @@ -2352,7 +2422,8 @@ common: out: if (want_write) mnt_drop_write(nd->path.mnt); - path_put(&nd->path); + path_put(&save_parent); + terminate_walk(nd); return filp; exit_mutex_unlock: @@ -2415,6 +2486,12 @@ out: if (base) fput(base); release_open_intent(nd); + if (filp == ERR_PTR(-EOPENSTALE)) { + if (flags & LOOKUP_RCU) + filp = ERR_PTR(-ECHILD); + else + filp = ERR_PTR(-ESTALE); + } return filp; out_filp: diff --git a/fs/namespace.c b/fs/namespace.c index e6081996c9a2..1e4a5fe3d7b7 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -397,7 +397,7 @@ static int mnt_make_readonly(struct mount *mnt) { int ret = 0; - br_write_lock(vfsmount_lock); + br_write_lock(&vfsmount_lock); mnt->mnt.mnt_flags |= MNT_WRITE_HOLD; /* * After storing MNT_WRITE_HOLD, we'll read the counters. This store @@ -431,15 +431,15 @@ static int mnt_make_readonly(struct mount *mnt) */ smp_wmb(); mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD; - br_write_unlock(vfsmount_lock); + br_write_unlock(&vfsmount_lock); return ret; } static void __mnt_unmake_readonly(struct mount *mnt) { - br_write_lock(vfsmount_lock); + br_write_lock(&vfsmount_lock); mnt->mnt.mnt_flags &= ~MNT_READONLY; - br_write_unlock(vfsmount_lock); + br_write_unlock(&vfsmount_lock); } int sb_prepare_remount_readonly(struct super_block *sb) @@ -451,7 +451,7 @@ int sb_prepare_remount_readonly(struct super_block *sb) if (atomic_long_read(&sb->s_remove_count)) return -EBUSY; - br_write_lock(vfsmount_lock); + br_write_lock(&vfsmount_lock); list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) { if (!(mnt->mnt.mnt_flags & MNT_READONLY)) { mnt->mnt.mnt_flags |= MNT_WRITE_HOLD; @@ -473,7 +473,7 @@ int sb_prepare_remount_readonly(struct super_block *sb) if (mnt->mnt.mnt_flags & MNT_WRITE_HOLD) mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD; } - br_write_unlock(vfsmount_lock); + br_write_unlock(&vfsmount_lock); return err; } @@ -522,14 +522,14 @@ struct vfsmount *lookup_mnt(struct path *path) { struct mount *child_mnt; - br_read_lock(vfsmount_lock); + br_read_lock(&vfsmount_lock); child_mnt = __lookup_mnt(path->mnt, path->dentry, 1); if (child_mnt) { mnt_add_count(child_mnt, 1); - br_read_unlock(vfsmount_lock); + br_read_unlock(&vfsmount_lock); return &child_mnt->mnt; } else { - br_read_unlock(vfsmount_lock); + br_read_unlock(&vfsmount_lock); return NULL; } } @@ -714,9 +714,9 @@ vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void mnt->mnt.mnt_sb = root->d_sb; mnt->mnt_mountpoint = mnt->mnt.mnt_root; mnt->mnt_parent = mnt; - br_write_lock(vfsmount_lock); + br_write_lock(&vfsmount_lock); list_add_tail(&mnt->mnt_instance, &root->d_sb->s_mounts); - br_write_unlock(vfsmount_lock); + br_write_unlock(&vfsmount_lock); return &mnt->mnt; } EXPORT_SYMBOL_GPL(vfs_kern_mount); @@ -745,9 +745,9 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root, mnt->mnt.mnt_root = dget(root); mnt->mnt_mountpoint = mnt->mnt.mnt_root; mnt->mnt_parent = mnt; - br_write_lock(vfsmount_lock); + br_write_lock(&vfsmount_lock); list_add_tail(&mnt->mnt_instance, &sb->s_mounts); - br_write_unlock(vfsmount_lock); + br_write_unlock(&vfsmount_lock); if (flag & CL_SLAVE) { list_add(&mnt->mnt_slave, &old->mnt_slave_list); @@ -803,35 +803,36 @@ static void mntput_no_expire(struct mount *mnt) { put_again: #ifdef CONFIG_SMP - br_read_lock(vfsmount_lock); + br_read_lock(&vfsmount_lock); if (likely(atomic_read(&mnt->mnt_longterm))) { mnt_add_count(mnt, -1); - br_read_unlock(vfsmount_lock); + br_read_unlock(&vfsmount_lock); return; } - br_read_unlock(vfsmount_lock); + br_read_unlock(&vfsmount_lock); - br_write_lock(vfsmount_lock); + br_write_lock(&vfsmount_lock); mnt_add_count(mnt, -1); if (mnt_get_count(mnt)) { - br_write_unlock(vfsmount_lock); + br_write_unlock(&vfsmount_lock); return; } #else mnt_add_count(mnt, -1); if (likely(mnt_get_count(mnt))) return; - br_write_lock(vfsmount_lock); + br_write_lock(&vfsmount_lock); #endif if (unlikely(mnt->mnt_pinned)) { mnt_add_count(mnt, mnt->mnt_pinned + 1); mnt->mnt_pinned = 0; - br_write_unlock(vfsmount_lock); + br_write_unlock(&vfsmount_lock); acct_auto_close_mnt(&mnt->mnt); goto put_again; } + list_del(&mnt->mnt_instance); - br_write_unlock(vfsmount_lock); + br_write_unlock(&vfsmount_lock); mntfree(mnt); } @@ -857,21 +858,21 @@ EXPORT_SYMBOL(mntget); void mnt_pin(struct vfsmount *mnt) { - br_write_lock(vfsmount_lock); + br_write_lock(&vfsmount_lock); real_mount(mnt)->mnt_pinned++; - br_write_unlock(vfsmount_lock); + br_write_unlock(&vfsmount_lock); } EXPORT_SYMBOL(mnt_pin); void mnt_unpin(struct vfsmount *m) { struct mount *mnt = real_mount(m); - br_write_lock(vfsmount_lock); + br_write_lock(&vfsmount_lock); if (mnt->mnt_pinned) { mnt_add_count(mnt, 1); mnt->mnt_pinned--; } - br_write_unlock(vfsmount_lock); + br_write_unlock(&vfsmount_lock); } EXPORT_SYMBOL(mnt_unpin); @@ -988,12 +989,12 @@ int may_umount_tree(struct vfsmount *m) BUG_ON(!m); /* write lock needed for mnt_get_count */ - br_write_lock(vfsmount_lock); + br_write_lock(&vfsmount_lock); for (p = mnt; p; p = next_mnt(p, mnt)) { actual_refs += mnt_get_count(p); minimum_refs += 2; } - br_write_unlock(vfsmount_lock); + br_write_unlock(&vfsmount_lock); if (actual_refs > minimum_refs) return 0; @@ -1020,10 +1021,10 @@ int may_umount(struct vfsmount *mnt) { int ret = 1; down_read(&namespace_sem); - br_write_lock(vfsmount_lock); + br_write_lock(&vfsmount_lock); if (propagate_mount_busy(real_mount(mnt), 2)) ret = 0; - br_write_unlock(vfsmount_lock); + br_write_unlock(&vfsmount_lock); up_read(&namespace_sem); return ret; } @@ -1040,13 +1041,13 @@ void release_mounts(struct list_head *head) struct dentry *dentry; struct mount *m; - br_write_lock(vfsmount_lock); + br_write_lock(&vfsmount_lock); dentry = mnt->mnt_mountpoint; m = mnt->mnt_parent; mnt->mnt_mountpoint = mnt->mnt.mnt_root; mnt->mnt_parent = mnt; m->mnt_ghosts--; - br_write_unlock(vfsmount_lock); + br_write_unlock(&vfsmount_lock); dput(dentry); mntput(&m->mnt); } @@ -1073,8 +1074,9 @@ void umount_tree(struct mount *mnt, int propagate, struct list_head *kill) list_del_init(&p->mnt_expire); list_del_init(&p->mnt_list); __touch_mnt_namespace(p->mnt_ns); + if (p->mnt_ns) + __mnt_make_shortterm(p); p->mnt_ns = NULL; - __mnt_make_shortterm(p); list_del_init(&p->mnt_child); if (mnt_has_parent(p)) { p->mnt_parent->mnt_ghosts++; @@ -1112,12 +1114,12 @@ static int do_umount(struct mount *mnt, int flags) * probably don't strictly need the lock here if we examined * all race cases, but it's a slowpath. */ - br_write_lock(vfsmount_lock); + br_write_lock(&vfsmount_lock); if (mnt_get_count(mnt) != 2) { - br_write_unlock(vfsmount_lock); + br_write_unlock(&vfsmount_lock); return -EBUSY; } - br_write_unlock(vfsmount_lock); + br_write_unlock(&vfsmount_lock); if (!xchg(&mnt->mnt_expiry_mark, 1)) return -EAGAIN; @@ -1159,7 +1161,7 @@ static int do_umount(struct mount *mnt, int flags) } down_write(&namespace_sem); - br_write_lock(vfsmount_lock); + br_write_lock(&vfsmount_lock); event++; if (!(flags & MNT_DETACH)) @@ -1171,7 +1173,7 @@ static int do_umount(struct mount *mnt, int flags) umount_tree(mnt, 1, &umount_list); retval = 0; } - br_write_unlock(vfsmount_lock); + br_write_unlock(&vfsmount_lock); up_write(&namespace_sem); release_mounts(&umount_list); return retval; @@ -1286,19 +1288,19 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry, q = clone_mnt(p, p->mnt.mnt_root, flag); if (!q) goto Enomem; - br_write_lock(vfsmount_lock); + br_write_lock(&vfsmount_lock); list_add_tail(&q->mnt_list, &res->mnt_list); attach_mnt(q, &path); - br_write_unlock(vfsmount_lock); + br_write_unlock(&vfsmount_lock); } } return res; Enomem: if (res) { LIST_HEAD(umount_list); - br_write_lock(vfsmount_lock); + br_write_lock(&vfsmount_lock); umount_tree(res, 0, &umount_list); - br_write_unlock(vfsmount_lock); + br_write_unlock(&vfsmount_lock); release_mounts(&umount_list); } return NULL; @@ -1318,9 +1320,9 @@ void drop_collected_mounts(struct vfsmount *mnt) { LIST_HEAD(umount_list); down_write(&namespace_sem); - br_write_lock(vfsmount_lock); + br_write_lock(&vfsmount_lock); umount_tree(real_mount(mnt), 0, &umount_list); - br_write_unlock(vfsmount_lock); + br_write_unlock(&vfsmount_lock); up_write(&namespace_sem); release_mounts(&umount_list); } @@ -1448,7 +1450,7 @@ static int attach_recursive_mnt(struct mount *source_mnt, if (err) goto out_cleanup_ids; - br_write_lock(vfsmount_lock); + br_write_lock(&vfsmount_lock); if (IS_MNT_SHARED(dest_mnt)) { for (p = source_mnt; p; p = next_mnt(p, source_mnt)) @@ -1467,7 +1469,7 @@ static int attach_recursive_mnt(struct mount *source_mnt, list_del_init(&child->mnt_hash); commit_tree(child); } - br_write_unlock(vfsmount_lock); + br_write_unlock(&vfsmount_lock); return 0; @@ -1565,10 +1567,10 @@ static int do_change_type(struct path *path, int flag) goto out_unlock; } - br_write_lock(vfsmount_lock); + br_write_lock(&vfsmount_lock); for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL)) change_mnt_propagation(m, type); - br_write_unlock(vfsmount_lock); + br_write_unlock(&vfsmount_lock); out_unlock: up_write(&namespace_sem); @@ -1617,9 +1619,9 @@ static int do_loopback(struct path *path, char *old_name, err = graft_tree(mnt, path); if (err) { - br_write_lock(vfsmount_lock); + br_write_lock(&vfsmount_lock); umount_tree(mnt, 0, &umount_list); - br_write_unlock(vfsmount_lock); + br_write_unlock(&vfsmount_lock); } out2: unlock_mount(path); @@ -1677,16 +1679,16 @@ static int do_remount(struct path *path, int flags, int mnt_flags, else err = do_remount_sb(sb, flags, data, 0); if (!err) { - br_write_lock(vfsmount_lock); + br_write_lock(&vfsmount_lock); mnt_flags |= mnt->mnt.mnt_flags & MNT_PROPAGATION_MASK; mnt->mnt.mnt_flags = mnt_flags; - br_write_unlock(vfsmount_lock); + br_write_unlock(&vfsmount_lock); } up_write(&sb->s_umount); if (!err) { - br_write_lock(vfsmount_lock); + br_write_lock(&vfsmount_lock); touch_mnt_namespace(mnt->mnt_ns); - br_write_unlock(vfsmount_lock); + br_write_unlock(&vfsmount_lock); } return err; } @@ -1893,9 +1895,9 @@ fail: /* remove m from any expiration list it may be on */ if (!list_empty(&mnt->mnt_expire)) { down_write(&namespace_sem); - br_write_lock(vfsmount_lock); + br_write_lock(&vfsmount_lock); list_del_init(&mnt->mnt_expire); - br_write_unlock(vfsmount_lock); + br_write_unlock(&vfsmount_lock); up_write(&namespace_sem); } mntput(m); @@ -1911,11 +1913,11 @@ fail: void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list) { down_write(&namespace_sem); - br_write_lock(vfsmount_lock); + br_write_lock(&vfsmount_lock); list_add_tail(&real_mount(mnt)->mnt_expire, expiry_list); - br_write_unlock(vfsmount_lock); + br_write_unlock(&vfsmount_lock); up_write(&namespace_sem); } EXPORT_SYMBOL(mnt_set_expiry); @@ -1935,7 +1937,7 @@ void mark_mounts_for_expiry(struct list_head *mounts) return; down_write(&namespace_sem); - br_write_lock(vfsmount_lock); + br_write_lock(&vfsmount_lock); /* extract from the expiration list every vfsmount that matches the * following criteria: @@ -1954,7 +1956,7 @@ void mark_mounts_for_expiry(struct list_head *mounts) touch_mnt_namespace(mnt->mnt_ns); umount_tree(mnt, 1, &umounts); } - br_write_unlock(vfsmount_lock); + br_write_unlock(&vfsmount_lock); up_write(&namespace_sem); release_mounts(&umounts); @@ -2218,9 +2220,9 @@ void mnt_make_shortterm(struct vfsmount *m) struct mount *mnt = real_mount(m); if (atomic_add_unless(&mnt->mnt_longterm, -1, 1)) return; - br_write_lock(vfsmount_lock); + br_write_lock(&vfsmount_lock); atomic_dec(&mnt->mnt_longterm); - br_write_unlock(vfsmount_lock); + br_write_unlock(&vfsmount_lock); #endif } @@ -2250,9 +2252,9 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns, return ERR_PTR(-ENOMEM); } new_ns->root = new; - br_write_lock(vfsmount_lock); + br_write_lock(&vfsmount_lock); list_add_tail(&new_ns->list, &new->mnt_list); - br_write_unlock(vfsmount_lock); + br_write_unlock(&vfsmount_lock); /* * Second pass: switch the tsk->fs->* elements and mark new vfsmounts @@ -2416,9 +2418,9 @@ bool is_path_reachable(struct mount *mnt, struct dentry *dentry, int path_is_under(struct path *path1, struct path *path2) { int res; - br_read_lock(vfsmount_lock); + br_read_lock(&vfsmount_lock); res = is_path_reachable(real_mount(path1->mnt), path1->dentry, path2); - br_read_unlock(vfsmount_lock); + br_read_unlock(&vfsmount_lock); return res; } EXPORT_SYMBOL(path_is_under); @@ -2505,7 +2507,7 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root, /* make sure we can reach put_old from new_root */ if (!is_path_reachable(real_mount(old.mnt), old.dentry, &new)) goto out4; - br_write_lock(vfsmount_lock); + br_write_lock(&vfsmount_lock); detach_mnt(new_mnt, &parent_path); detach_mnt(root_mnt, &root_parent); /* mount old root on put_old */ @@ -2513,7 +2515,7 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root, /* mount new_root on / */ attach_mnt(new_mnt, &root_parent); touch_mnt_namespace(current->nsproxy->mnt_ns); - br_write_unlock(vfsmount_lock); + br_write_unlock(&vfsmount_lock); chroot_fs_refs(&root, &new); error = 0; out4: @@ -2576,7 +2578,7 @@ void __init mnt_init(void) for (u = 0; u < HASH_SIZE; u++) INIT_LIST_HEAD(&mount_hashtable[u]); - br_lock_init(vfsmount_lock); + br_lock_init(&vfsmount_lock); err = sysfs_init(); if (err) @@ -2596,9 +2598,9 @@ void put_mnt_ns(struct mnt_namespace *ns) if (!atomic_dec_and_test(&ns->count)) return; down_write(&namespace_sem); - br_write_lock(vfsmount_lock); + br_write_lock(&vfsmount_lock); umount_tree(ns->root, 0, &umount_list); - br_write_unlock(vfsmount_lock); + br_write_unlock(&vfsmount_lock); up_write(&namespace_sem); release_mounts(&umount_list); kfree(ns); diff --git a/fs/ncpfs/file.c b/fs/ncpfs/file.c index 3ff5fcc1528f..122e260247f5 100644 --- a/fs/ncpfs/file.c +++ b/fs/ncpfs/file.c @@ -221,6 +221,10 @@ ncp_file_write(struct file *file, const char __user *buf, size_t count, loff_t * already_written = 0; + errno = file_update_time(file); + if (errno) + goto outrel; + bouncebuffer = vmalloc(bufsize); if (!bouncebuffer) { errno = -EIO; /* -ENOMEM */ @@ -252,8 +256,6 @@ ncp_file_write(struct file *file, const char __user *buf, size_t count, loff_t * } vfree(bouncebuffer); - file_update_time(file); - *ppos = pos; if (pos > i_size_read(inode)) { diff --git a/fs/ncpfs/ncp_fs_sb.h b/fs/ncpfs/ncp_fs_sb.h index 4af803f13516..54cc0cdb3dcb 100644 --- a/fs/ncpfs/ncp_fs_sb.h +++ b/fs/ncpfs/ncp_fs_sb.h @@ -23,17 +23,17 @@ struct ncp_mount_data_kernel { unsigned long flags; /* NCP_MOUNT_* flags */ unsigned int int_flags; /* internal flags */ #define NCP_IMOUNT_LOGGEDIN_POSSIBLE 0x0001 - __kernel_uid32_t mounted_uid; /* Who may umount() this filesystem? */ + uid_t mounted_uid; /* Who may umount() this filesystem? */ struct pid *wdog_pid; /* Who cares for our watchdog packets? */ unsigned int ncp_fd; /* The socket to the ncp port */ unsigned int time_out; /* How long should I wait after sending a NCP request? */ unsigned int retry_count; /* And how often should I retry? */ unsigned char mounted_vol[NCP_VOLNAME_LEN + 1]; - __kernel_uid32_t uid; - __kernel_gid32_t gid; - __kernel_mode_t file_mode; - __kernel_mode_t dir_mode; + uid_t uid; + gid_t gid; + umode_t file_mode; + umode_t dir_mode; int info_fd; }; diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 0989a2099688..f430057ff3b3 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -1354,10 +1354,10 @@ out: } #ifdef CONFIG_NFS_V4 -static int nfs_open_revalidate(struct dentry *, struct nameidata *); +static int nfs4_lookup_revalidate(struct dentry *, struct nameidata *); const struct dentry_operations nfs4_dentry_operations = { - .d_revalidate = nfs_open_revalidate, + .d_revalidate = nfs4_lookup_revalidate, .d_delete = nfs_dentry_delete, .d_iput = nfs_dentry_iput, .d_automount = nfs_d_automount, @@ -1519,13 +1519,11 @@ no_open: return nfs_lookup(dir, dentry, nd); } -static int nfs_open_revalidate(struct dentry *dentry, struct nameidata *nd) +static int nfs4_lookup_revalidate(struct dentry *dentry, struct nameidata *nd) { struct dentry *parent = NULL; struct inode *inode; struct inode *dir; - struct nfs_open_context *ctx; - struct iattr attr; int openflags, ret = 0; if (nd->flags & LOOKUP_RCU) @@ -1554,57 +1552,13 @@ static int nfs_open_revalidate(struct dentry *dentry, struct nameidata *nd) /* We cannot do exclusive creation on a positive dentry */ if ((openflags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL)) goto no_open_dput; - /* We can't create new files here */ - openflags &= ~(O_CREAT|O_EXCL); - - ctx = create_nfs_open_context(dentry, openflags); - ret = PTR_ERR(ctx); - if (IS_ERR(ctx)) - goto out; - attr.ia_valid = ATTR_OPEN; - if (openflags & O_TRUNC) { - attr.ia_valid |= ATTR_SIZE; - attr.ia_size = 0; - nfs_wb_all(inode); - } - - /* - * Note: we're not holding inode->i_mutex and so may be racing with - * operations that change the directory. We therefore save the - * change attribute *before* we do the RPC call. - */ - inode = NFS_PROTO(dir)->open_context(dir, ctx, openflags, &attr); - if (IS_ERR(inode)) { - ret = PTR_ERR(inode); - switch (ret) { - case -EPERM: - case -EACCES: - case -EDQUOT: - case -ENOSPC: - case -EROFS: - goto out_put_ctx; - default: - goto out_drop; - } - } - iput(inode); - if (inode != dentry->d_inode) - goto out_drop; + /* Let f_op->open() actually open (and revalidate) the file */ + ret = 1; - nfs_set_verifier(dentry, nfs_save_change_attribute(dir)); - ret = nfs_intent_set_file(nd, ctx); - if (ret >= 0) - ret = 1; out: dput(parent); return ret; -out_drop: - d_drop(dentry); - ret = 0; -out_put_ctx: - put_nfs_open_context(ctx); - goto out; no_open_dput: dput(parent); diff --git a/fs/nfs/file.c b/fs/nfs/file.c index 56311ca5f9f8..a6708e6b438d 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -879,12 +879,81 @@ const struct file_operations nfs_file_operations = { static int nfs4_file_open(struct inode *inode, struct file *filp) { + struct nfs_open_context *ctx; + struct dentry *dentry = filp->f_path.dentry; + struct dentry *parent = NULL; + struct inode *dir; + unsigned openflags = filp->f_flags; + struct iattr attr; + int err; + + BUG_ON(inode != dentry->d_inode); /* - * NFSv4 opens are handled in d_lookup and d_revalidate. If we get to - * this point, then something is very wrong + * If no cached dentry exists or if it's negative, NFSv4 handled the + * opens in ->lookup() or ->create(). + * + * We only get this far for a cached positive dentry. We skipped + * revalidation, so handle it here by dropping the dentry and returning + * -EOPENSTALE. The VFS will retry the lookup/create/open. */ - dprintk("NFS: %s called! inode=%p filp=%p\n", __func__, inode, filp); - return -ENOTDIR; + + dprintk("NFS: open file(%s/%s)\n", + dentry->d_parent->d_name.name, + dentry->d_name.name); + + if ((openflags & O_ACCMODE) == 3) + openflags--; + + /* We can't create new files here */ + openflags &= ~(O_CREAT|O_EXCL); + + parent = dget_parent(dentry); + dir = parent->d_inode; + + ctx = alloc_nfs_open_context(filp->f_path.dentry, filp->f_mode); + err = PTR_ERR(ctx); + if (IS_ERR(ctx)) + goto out; + + attr.ia_valid = ATTR_OPEN; + if (openflags & O_TRUNC) { + attr.ia_valid |= ATTR_SIZE; + attr.ia_size = 0; + nfs_wb_all(inode); + } + + inode = NFS_PROTO(dir)->open_context(dir, ctx, openflags, &attr); + if (IS_ERR(inode)) { + err = PTR_ERR(inode); + switch (err) { + case -EPERM: + case -EACCES: + case -EDQUOT: + case -ENOSPC: + case -EROFS: + goto out_put_ctx; + default: + goto out_drop; + } + } + iput(inode); + if (inode != dentry->d_inode) + goto out_drop; + + nfs_set_verifier(dentry, nfs_save_change_attribute(dir)); + nfs_file_set_open_context(filp, ctx); + err = 0; + +out_put_ctx: + put_nfs_open_context(ctx); +out: + dput(parent); + return err; + +out_drop: + d_drop(dentry); + err = -EOPENSTALE; + goto out_put_ctx; } const struct file_operations nfs4_file_operations = { diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c index 0bb2c2010b95..b72847988b78 100644 --- a/fs/nilfs2/namei.c +++ b/fs/nilfs2/namei.c @@ -508,31 +508,29 @@ static struct dentry *nilfs_fh_to_parent(struct super_block *sb, struct fid *fh, return nilfs_get_dentry(sb, fid->cno, fid->parent_ino, fid->parent_gen); } -static int nilfs_encode_fh(struct dentry *dentry, __u32 *fh, int *lenp, - int connectable) +static int nilfs_encode_fh(struct inode *inode, __u32 *fh, int *lenp, + struct inode *parent) { struct nilfs_fid *fid = (struct nilfs_fid *)fh; - struct inode *inode = dentry->d_inode; struct nilfs_root *root = NILFS_I(inode)->i_root; int type; - if (*lenp < NILFS_FID_SIZE_NON_CONNECTABLE || - (connectable && *lenp < NILFS_FID_SIZE_CONNECTABLE)) + if (parent && *lenp < NILFS_FID_SIZE_CONNECTABLE) { + *lenp = NILFS_FID_SIZE_CONNECTABLE; + return 255; + } + if (*lenp < NILFS_FID_SIZE_NON_CONNECTABLE) { + *lenp = NILFS_FID_SIZE_NON_CONNECTABLE; return 255; + } fid->cno = root->cno; fid->ino = inode->i_ino; fid->gen = inode->i_generation; - if (connectable && !S_ISDIR(inode->i_mode)) { - struct inode *parent; - - spin_lock(&dentry->d_lock); - parent = dentry->d_parent->d_inode; + if (parent) { fid->parent_ino = parent->i_ino; fid->parent_gen = parent->i_generation; - spin_unlock(&dentry->d_lock); - type = FILEID_NILFS_WITH_PARENT; *lenp = NILFS_FID_SIZE_CONNECTABLE; } else { diff --git a/fs/nls/Kconfig b/fs/nls/Kconfig index b5eac98fd7bd..e2ce79ef48c4 100644 --- a/fs/nls/Kconfig +++ b/fs/nls/Kconfig @@ -452,7 +452,7 @@ config NLS_KOI8_U input/output character sets. Say Y here for the preferred Ukrainian (koi8-u) and Belarusian (koi8-ru) character sets. -config NLS_CODEPAGE_MACROMAN +config NLS_MAC_ROMAN tristate "Codepage macroman" ---help--- The Apple HFS file system family can deal with filenames in @@ -467,7 +467,7 @@ config NLS_CODEPAGE_MACROMAN If unsure, say Y. -config NLS_CODEPAGE_MACCELTIC +config NLS_MAC_CELTIC tristate "Codepage macceltic" ---help--- The Apple HFS file system family can deal with filenames in @@ -481,7 +481,7 @@ config NLS_CODEPAGE_MACCELTIC If unsure, say Y. -config NLS_CODEPAGE_MACCENTEURO +config NLS_MAC_CENTEURO tristate "Codepage maccenteuro" ---help--- The Apple HFS file system family can deal with filenames in @@ -495,7 +495,7 @@ config NLS_CODEPAGE_MACCENTEURO If unsure, say Y. -config NLS_CODEPAGE_MACCROATIAN +config NLS_MAC_CROATIAN tristate "Codepage maccroatian" ---help--- The Apple HFS file system family can deal with filenames in @@ -509,7 +509,7 @@ config NLS_CODEPAGE_MACCROATIAN If unsure, say Y. -config NLS_CODEPAGE_MACCYRILLIC +config NLS_MAC_CYRILLIC tristate "Codepage maccyrillic" ---help--- The Apple HFS file system family can deal with filenames in @@ -523,7 +523,7 @@ config NLS_CODEPAGE_MACCYRILLIC If unsure, say Y. -config NLS_CODEPAGE_MACGAELIC +config NLS_MAC_GAELIC tristate "Codepage macgaelic" ---help--- The Apple HFS file system family can deal with filenames in @@ -537,7 +537,7 @@ config NLS_CODEPAGE_MACGAELIC If unsure, say Y. -config NLS_CODEPAGE_MACGREEK +config NLS_MAC_GREEK tristate "Codepage macgreek" ---help--- The Apple HFS file system family can deal with filenames in @@ -551,7 +551,7 @@ config NLS_CODEPAGE_MACGREEK If unsure, say Y. -config NLS_CODEPAGE_MACICELAND +config NLS_MAC_ICELAND tristate "Codepage maciceland" ---help--- The Apple HFS file system family can deal with filenames in @@ -565,7 +565,7 @@ config NLS_CODEPAGE_MACICELAND If unsure, say Y. -config NLS_CODEPAGE_MACINUIT +config NLS_MAC_INUIT tristate "Codepage macinuit" ---help--- The Apple HFS file system family can deal with filenames in @@ -579,7 +579,7 @@ config NLS_CODEPAGE_MACINUIT If unsure, say Y. -config NLS_CODEPAGE_MACROMANIAN +config NLS_MAC_ROMANIAN tristate "Codepage macromanian" ---help--- The Apple HFS file system family can deal with filenames in @@ -593,7 +593,7 @@ config NLS_CODEPAGE_MACROMANIAN If unsure, say Y. -config NLS_CODEPAGE_MACTURKISH +config NLS_MAC_TURKISH tristate "Codepage macturkish" ---help--- The Apple HFS file system family can deal with filenames in diff --git a/fs/nls/Makefile b/fs/nls/Makefile index b6b0550a7c80..8ae37c1b5249 100644 --- a/fs/nls/Makefile +++ b/fs/nls/Makefile @@ -2,18 +2,6 @@ # Makefile for native language support # -CONFIG_NLS_MACCELTIC=m -CONFIG_NLS_MACCENTEURO=m -CONFIG_NLS_MACCROATIAN=m -CONFIG_NLS_MACCYRILLIC=m -CONFIG_NLS_MACGAELIC=m -CONFIG_NLS_MACGREEK=m -CONFIG_NLS_MACICELAND=m -CONFIG_NLS_MACINUIT=m -CONFIG_NLS_MACROMANIAN=m -CONFIG_NLS_MACROMAN=m -CONFIG_NLS_MACTURKISH=m - obj-$(CONFIG_NLS) += nls_base.o obj-$(CONFIG_NLS_CODEPAGE_437) += nls_cp437.o @@ -54,14 +42,14 @@ obj-$(CONFIG_NLS_ISO8859_15) += nls_iso8859-15.o obj-$(CONFIG_NLS_KOI8_R) += nls_koi8-r.o obj-$(CONFIG_NLS_KOI8_U) += nls_koi8-u.o nls_koi8-ru.o obj-$(CONFIG_NLS_UTF8) += nls_utf8.o -obj-$(CONFIG_NLS_MACCELTIC) += nls_macceltic.o -obj-$(CONFIG_NLS_MACCENTEURO) += nls_maccenteuro.o -obj-$(CONFIG_NLS_MACCROATIAN) += nls_maccroatian.o -obj-$(CONFIG_NLS_MACCYRILLIC) += nls_maccyrillic.o -obj-$(CONFIG_NLS_MACGAELIC) += nls_macgaelic.o -obj-$(CONFIG_NLS_MACGREEK) += nls_macgreek.o -obj-$(CONFIG_NLS_MACICELAND) += nls_maciceland.o -obj-$(CONFIG_NLS_MACINUIT) += nls_macinuit.o -obj-$(CONFIG_NLS_MACROMANIAN) += nls_macromanian.o -obj-$(CONFIG_NLS_MACROMAN) += nls_macroman.o -obj-$(CONFIG_NLS_MACTURKISH) += nls_macturkish.o +obj-$(CONFIG_NLS_MAC_CELTIC) += mac-celtic.o +obj-$(CONFIG_NLS_MAC_CENTEURO) += mac-centeuro.o +obj-$(CONFIG_NLS_MAC_CROATIAN) += mac-croatian.o +obj-$(CONFIG_NLS_MAC_CYRILLIC) += mac-cyrillic.o +obj-$(CONFIG_NLS_MAC_GAELIC) += mac-gaelic.o +obj-$(CONFIG_NLS_MAC_GREEK) += mac-greek.o +obj-$(CONFIG_NLS_MAC_ICELAND) += mac-iceland.o +obj-$(CONFIG_NLS_MAC_INUIT) += mac-inuit.o +obj-$(CONFIG_NLS_MAC_ROMANIAN) += mac-romanian.o +obj-$(CONFIG_NLS_MAC_ROMAN) += mac-roman.o +obj-$(CONFIG_NLS_MAC_TURKISH) += mac-turkish.o diff --git a/fs/nls/nls_macceltic.c b/fs/nls/mac-celtic.c index 95ac5b41ad1c..634a8b717b02 100644 --- a/fs/nls/nls_macceltic.c +++ b/fs/nls/mac-celtic.c @@ -1,5 +1,5 @@ /* - * linux/fs/nls/nls_macceltic.c + * linux/fs/nls/mac-celtic.c * * Charset macceltic translation tables. * Generated automatically from the Unicode and charset diff --git a/fs/nls/nls_maccenteuro.c b/fs/nls/mac-centeuro.c index ce0d57ef39f6..979e6265ac5e 100644 --- a/fs/nls/nls_maccenteuro.c +++ b/fs/nls/mac-centeuro.c @@ -1,5 +1,5 @@ /* - * linux/fs/nls/nls_maccenteuro.c + * linux/fs/nls/mac-centeuro.c * * Charset maccenteuro translation tables. * Generated automatically from the Unicode and charset diff --git a/fs/nls/nls_maccroatian.c b/fs/nls/mac-croatian.c index 10b01c3eed66..dd3f675911ee 100644 --- a/fs/nls/nls_maccroatian.c +++ b/fs/nls/mac-croatian.c @@ -1,5 +1,5 @@ /* - * linux/fs/nls/nls_maccroatian.c + * linux/fs/nls/mac-croatian.c * * Charset maccroatian translation tables. * Generated automatically from the Unicode and charset diff --git a/fs/nls/nls_maccyrillic.c b/fs/nls/mac-cyrillic.c index 318473fbb26c..1112c84dd8bb 100644 --- a/fs/nls/nls_maccyrillic.c +++ b/fs/nls/mac-cyrillic.c @@ -1,5 +1,5 @@ /* - * linux/fs/nls/nls_maccyrillic.c + * linux/fs/nls/mac-cyrillic.c * * Charset maccyrillic translation tables. * Generated automatically from the Unicode and charset diff --git a/fs/nls/nls_macgaelic.c b/fs/nls/mac-gaelic.c index 615d8e128f14..2de9158409c8 100644 --- a/fs/nls/nls_macgaelic.c +++ b/fs/nls/mac-gaelic.c @@ -1,5 +1,5 @@ /* - * linux/fs/nls/nls_macgaelic.c + * linux/fs/nls/mac-gaelic.c * * Charset macgaelic translation tables. * Generated automatically from the Unicode and charset diff --git a/fs/nls/nls_macgreek.c b/fs/nls/mac-greek.c index 79880f30494f..a86310082802 100644 --- a/fs/nls/nls_macgreek.c +++ b/fs/nls/mac-greek.c @@ -1,5 +1,5 @@ /* - * linux/fs/nls/nls_macgreek.c + * linux/fs/nls/mac-greek.c * * Charset macgreek translation tables. * Generated automatically from the Unicode and charset diff --git a/fs/nls/nls_maciceland.c b/fs/nls/mac-iceland.c index 1e688c59b252..babe2998d5ce 100644 --- a/fs/nls/nls_maciceland.c +++ b/fs/nls/mac-iceland.c @@ -1,5 +1,5 @@ /* - * linux/fs/nls/nls_maciceland.c + * linux/fs/nls/mac-iceland.c * * Charset maciceland translation tables. * Generated automatically from the Unicode and charset diff --git a/fs/nls/nls_macinuit.c b/fs/nls/mac-inuit.c index f333d98941d6..312364f010dc 100644 --- a/fs/nls/nls_macinuit.c +++ b/fs/nls/mac-inuit.c @@ -1,5 +1,5 @@ /* - * linux/fs/nls/nls_macinuit.c + * linux/fs/nls/mac-inuit.c * * Charset macinuit translation tables. * Generated automatically from the Unicode and charset diff --git a/fs/nls/nls_macroman.c b/fs/nls/mac-roman.c index 6315a857ab68..53ce0809cbd2 100644 --- a/fs/nls/nls_macroman.c +++ b/fs/nls/mac-roman.c @@ -1,5 +1,5 @@ /* - * linux/fs/nls/nls_macroman.c + * linux/fs/nls/mac-roman.c * * Charset macroman translation tables. * Generated automatically from the Unicode and charset diff --git a/fs/nls/nls_macromanian.c b/fs/nls/mac-romanian.c index b83c07a57d25..add6f7a0c666 100644 --- a/fs/nls/nls_macromanian.c +++ b/fs/nls/mac-romanian.c @@ -1,5 +1,5 @@ /* - * linux/fs/nls/nls_macromanian.c + * linux/fs/nls/mac-romanian.c * * Charset macromanian translation tables. * Generated automatically from the Unicode and charset diff --git a/fs/nls/nls_macturkish.c b/fs/nls/mac-turkish.c index 0cc2c6572826..dffa96d5de00 100644 --- a/fs/nls/nls_macturkish.c +++ b/fs/nls/mac-turkish.c @@ -1,5 +1,5 @@ /* - * linux/fs/nls/nls_macturkish.c + * linux/fs/nls/mac-turkish.c * * Charset macturkish translation tables. * Generated automatically from the Unicode and charset diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c index ccb14d3fc0de..b39c5c161adb 100644 --- a/fs/notify/fsnotify.c +++ b/fs/notify/fsnotify.c @@ -123,7 +123,7 @@ int __fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask) } EXPORT_SYMBOL_GPL(__fsnotify_parent); -static int send_to_group(struct inode *to_tell, struct vfsmount *mnt, +static int send_to_group(struct inode *to_tell, struct fsnotify_mark *inode_mark, struct fsnotify_mark *vfsmount_mark, __u32 mask, void *data, @@ -168,10 +168,10 @@ static int send_to_group(struct inode *to_tell, struct vfsmount *mnt, vfsmount_test_mask &= ~inode_mark->ignored_mask; } - pr_debug("%s: group=%p to_tell=%p mnt=%p mask=%x inode_mark=%p" + pr_debug("%s: group=%p to_tell=%p mask=%x inode_mark=%p" " inode_test_mask=%x vfsmount_mark=%p vfsmount_test_mask=%x" " data=%p data_is=%d cookie=%d event=%p\n", - __func__, group, to_tell, mnt, mask, inode_mark, + __func__, group, to_tell, mask, inode_mark, inode_test_mask, vfsmount_mark, vfsmount_test_mask, data, data_is, cookie, *event); @@ -258,16 +258,16 @@ int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is, if (inode_group > vfsmount_group) { /* handle inode */ - ret = send_to_group(to_tell, NULL, inode_mark, NULL, mask, data, + ret = send_to_group(to_tell, inode_mark, NULL, mask, data, data_is, cookie, file_name, &event); /* we didn't use the vfsmount_mark */ vfsmount_group = NULL; } else if (vfsmount_group > inode_group) { - ret = send_to_group(to_tell, &mnt->mnt, NULL, vfsmount_mark, mask, data, + ret = send_to_group(to_tell, NULL, vfsmount_mark, mask, data, data_is, cookie, file_name, &event); inode_group = NULL; } else { - ret = send_to_group(to_tell, &mnt->mnt, inode_mark, vfsmount_mark, + ret = send_to_group(to_tell, inode_mark, vfsmount_mark, mask, data, data_is, cookie, file_name, &event); } diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c index 8639169221c7..7389d2d5e51d 100644 --- a/fs/ntfs/file.c +++ b/fs/ntfs/file.c @@ -2096,7 +2096,9 @@ static ssize_t ntfs_file_aio_write_nolock(struct kiocb *iocb, err = file_remove_suid(file); if (err) goto out; - file_update_time(file); + err = file_update_time(file); + if (err) + goto out; written = ntfs_file_buffered_write(iocb, iov, nr_segs, pos, ppos, count); out: diff --git a/fs/ocfs2/blockcheck.c b/fs/ocfs2/blockcheck.c index c7ee03c22226..0725e6054650 100644 --- a/fs/ocfs2/blockcheck.c +++ b/fs/ocfs2/blockcheck.c @@ -422,45 +422,46 @@ int ocfs2_block_check_validate(void *data, size_t blocksize, struct ocfs2_blockcheck_stats *stats) { int rc = 0; - struct ocfs2_block_check check; + u32 bc_crc32e; + u16 bc_ecc; u32 crc, ecc; ocfs2_blockcheck_inc_check(stats); - check.bc_crc32e = le32_to_cpu(bc->bc_crc32e); - check.bc_ecc = le16_to_cpu(bc->bc_ecc); + bc_crc32e = le32_to_cpu(bc->bc_crc32e); + bc_ecc = le16_to_cpu(bc->bc_ecc); memset(bc, 0, sizeof(struct ocfs2_block_check)); /* Fast path - if the crc32 validates, we're good to go */ crc = crc32_le(~0, data, blocksize); - if (crc == check.bc_crc32e) + if (crc == bc_crc32e) goto out; ocfs2_blockcheck_inc_failure(stats); mlog(ML_ERROR, "CRC32 failed: stored: 0x%x, computed 0x%x. Applying ECC.\n", - (unsigned int)check.bc_crc32e, (unsigned int)crc); + (unsigned int)bc_crc32e, (unsigned int)crc); /* Ok, try ECC fixups */ ecc = ocfs2_hamming_encode_block(data, blocksize); - ocfs2_hamming_fix_block(data, blocksize, ecc ^ check.bc_ecc); + ocfs2_hamming_fix_block(data, blocksize, ecc ^ bc_ecc); /* And check the crc32 again */ crc = crc32_le(~0, data, blocksize); - if (crc == check.bc_crc32e) { + if (crc == bc_crc32e) { ocfs2_blockcheck_inc_recover(stats); goto out; } mlog(ML_ERROR, "Fixed CRC32 failed: stored: 0x%x, computed 0x%x\n", - (unsigned int)check.bc_crc32e, (unsigned int)crc); + (unsigned int)bc_crc32e, (unsigned int)crc); rc = -EIO; out: - bc->bc_crc32e = cpu_to_le32(check.bc_crc32e); - bc->bc_ecc = cpu_to_le16(check.bc_ecc); + bc->bc_crc32e = cpu_to_le32(bc_crc32e); + bc->bc_ecc = cpu_to_le16(bc_ecc); return rc; } @@ -528,7 +529,8 @@ int ocfs2_block_check_validate_bhs(struct buffer_head **bhs, int nr, struct ocfs2_blockcheck_stats *stats) { int i, rc = 0; - struct ocfs2_block_check check; + u32 bc_crc32e; + u16 bc_ecc; u32 crc, ecc, fix; BUG_ON(nr < 0); @@ -538,21 +540,21 @@ int ocfs2_block_check_validate_bhs(struct buffer_head **bhs, int nr, ocfs2_blockcheck_inc_check(stats); - check.bc_crc32e = le32_to_cpu(bc->bc_crc32e); - check.bc_ecc = le16_to_cpu(bc->bc_ecc); + bc_crc32e = le32_to_cpu(bc->bc_crc32e); + bc_ecc = le16_to_cpu(bc->bc_ecc); memset(bc, 0, sizeof(struct ocfs2_block_check)); /* Fast path - if the crc32 validates, we're good to go */ for (i = 0, crc = ~0; i < nr; i++) crc = crc32_le(crc, bhs[i]->b_data, bhs[i]->b_size); - if (crc == check.bc_crc32e) + if (crc == bc_crc32e) goto out; ocfs2_blockcheck_inc_failure(stats); mlog(ML_ERROR, "CRC32 failed: stored: %u, computed %u. Applying ECC.\n", - (unsigned int)check.bc_crc32e, (unsigned int)crc); + (unsigned int)bc_crc32e, (unsigned int)crc); /* Ok, try ECC fixups */ for (i = 0, ecc = 0; i < nr; i++) { @@ -565,7 +567,7 @@ int ocfs2_block_check_validate_bhs(struct buffer_head **bhs, int nr, bhs[i]->b_size * 8, bhs[i]->b_size * 8 * i); } - fix = ecc ^ check.bc_ecc; + fix = ecc ^ bc_ecc; for (i = 0; i < nr; i++) { /* * Try the fix against each buffer. It will only affect @@ -578,19 +580,19 @@ int ocfs2_block_check_validate_bhs(struct buffer_head **bhs, int nr, /* And check the crc32 again */ for (i = 0, crc = ~0; i < nr; i++) crc = crc32_le(crc, bhs[i]->b_data, bhs[i]->b_size); - if (crc == check.bc_crc32e) { + if (crc == bc_crc32e) { ocfs2_blockcheck_inc_recover(stats); goto out; } mlog(ML_ERROR, "Fixed CRC32 failed: stored: %u, computed %u\n", - (unsigned int)check.bc_crc32e, (unsigned int)crc); + (unsigned int)bc_crc32e, (unsigned int)crc); rc = -EIO; out: - bc->bc_crc32e = cpu_to_le32(check.bc_crc32e); - bc->bc_ecc = cpu_to_le16(check.bc_ecc); + bc->bc_crc32e = cpu_to_le32(bc_crc32e); + bc->bc_ecc = cpu_to_le16(bc_ecc); return rc; } diff --git a/fs/ocfs2/dlm/dlmast.c b/fs/ocfs2/dlm/dlmast.c index 3a3ed4bb794b..fbec0be62326 100644 --- a/fs/ocfs2/dlm/dlmast.c +++ b/fs/ocfs2/dlm/dlmast.c @@ -293,7 +293,7 @@ int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data, struct dlm_proxy_ast *past = (struct dlm_proxy_ast *) msg->buf; char *name; struct list_head *iter, *head=NULL; - u64 cookie; + __be64 cookie; u32 flags; u8 node; diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h index a5952ceecba5..de854cca12a2 100644 --- a/fs/ocfs2/dlm/dlmcommon.h +++ b/fs/ocfs2/dlm/dlmcommon.h @@ -679,7 +679,7 @@ struct dlm_query_join_packet { }; union dlm_query_join_response { - u32 intval; + __be32 intval; struct dlm_query_join_packet packet; }; @@ -755,8 +755,8 @@ struct dlm_query_region { struct dlm_node_info { u8 ni_nodenum; u8 pad1; - u16 ni_ipv4_port; - u32 ni_ipv4_address; + __be16 ni_ipv4_port; + __be32 ni_ipv4_address; }; struct dlm_query_nodeinfo { diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c index 92f2ead0fab6..9e89d70df337 100644 --- a/fs/ocfs2/dlm/dlmdomain.c +++ b/fs/ocfs2/dlm/dlmdomain.c @@ -818,7 +818,7 @@ static void dlm_query_join_packet_to_wire(struct dlm_query_join_packet *packet, union dlm_query_join_response response; response.packet = *packet; - *wire = cpu_to_be32(response.intval); + *wire = be32_to_cpu(response.intval); } static void dlm_query_join_wire_to_packet(u32 wire, diff --git a/fs/ocfs2/export.c b/fs/ocfs2/export.c index 745db42528d5..322216a5f0dd 100644 --- a/fs/ocfs2/export.c +++ b/fs/ocfs2/export.c @@ -177,21 +177,23 @@ bail: return parent; } -static int ocfs2_encode_fh(struct dentry *dentry, u32 *fh_in, int *max_len, - int connectable) +static int ocfs2_encode_fh(struct inode *inode, u32 *fh_in, int *max_len, + struct inode *parent) { - struct inode *inode = dentry->d_inode; int len = *max_len; int type = 1; u64 blkno; u32 generation; __le32 *fh = (__force __le32 *) fh_in; +#ifdef TRACE_HOOKS_ARE_NOT_BRAINDEAD_IN_YOUR_OPINION +#error "You go ahead and fix that mess, then. Somehow" trace_ocfs2_encode_fh_begin(dentry, dentry->d_name.len, dentry->d_name.name, fh, len, connectable); +#endif - if (connectable && (len < 6)) { + if (parent && (len < 6)) { *max_len = 6; type = 255; goto bail; @@ -211,12 +213,7 @@ static int ocfs2_encode_fh(struct dentry *dentry, u32 *fh_in, int *max_len, fh[1] = cpu_to_le32((u32)(blkno & 0xffffffff)); fh[2] = cpu_to_le32(generation); - if (connectable && !S_ISDIR(inode->i_mode)) { - struct inode *parent; - - spin_lock(&dentry->d_lock); - - parent = dentry->d_parent->d_inode; + if (parent) { blkno = OCFS2_I(parent)->ip_blkno; generation = parent->i_generation; @@ -224,8 +221,6 @@ static int ocfs2_encode_fh(struct dentry *dentry, u32 *fh_in, int *max_len, fh[4] = cpu_to_le32((u32)(blkno & 0xffffffff)); fh[5] = cpu_to_le32(generation); - spin_unlock(&dentry->d_lock); - len = 6; type = 2; diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c index 735514ca400f..d89e08a81eda 100644 --- a/fs/ocfs2/inode.c +++ b/fs/ocfs2/inode.c @@ -273,11 +273,13 @@ void ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe, inode->i_gid = le32_to_cpu(fe->i_gid); /* Fast symlinks will have i_size but no allocated clusters. */ - if (S_ISLNK(inode->i_mode) && !fe->i_clusters) + if (S_ISLNK(inode->i_mode) && !fe->i_clusters) { inode->i_blocks = 0; - else + inode->i_mapping->a_ops = &ocfs2_fast_symlink_aops; + } else { inode->i_blocks = ocfs2_inode_sector_count(inode); - inode->i_mapping->a_ops = &ocfs2_aops; + inode->i_mapping->a_ops = &ocfs2_aops; + } inode->i_atime.tv_sec = le64_to_cpu(fe->i_atime); inode->i_atime.tv_nsec = le32_to_cpu(fe->i_atime_nsec); inode->i_mtime.tv_sec = le64_to_cpu(fe->i_mtime); @@ -331,10 +333,7 @@ void ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe, OCFS2_I(inode)->ip_dir_lock_gen = 1; break; case S_IFLNK: - if (ocfs2_inode_is_fast_symlink(inode)) - inode->i_op = &ocfs2_fast_symlink_inode_operations; - else - inode->i_op = &ocfs2_symlink_inode_operations; + inode->i_op = &ocfs2_symlink_inode_operations; i_size_write(inode, le64_to_cpu(fe->i_size)); break; default: diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c index a1a1bfd652c9..d96f7f81d8dd 100644 --- a/fs/ocfs2/ioctl.c +++ b/fs/ocfs2/ioctl.c @@ -864,7 +864,7 @@ int ocfs2_info_handle(struct inode *inode, struct ocfs2_info *info, if (status) break; - reqp = (struct ocfs2_info_request *)(unsigned long)req_addr; + reqp = (struct ocfs2_info_request __user *)(unsigned long)req_addr; if (!reqp) { status = -EINVAL; goto bail; @@ -888,9 +888,11 @@ long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) struct ocfs2_space_resv sr; struct ocfs2_new_group_input input; struct reflink_arguments args; - const char *old_path, *new_path; + const char __user *old_path; + const char __user *new_path; bool preserve; struct ocfs2_info info; + void __user *argp = (void __user *)arg; switch (cmd) { case OCFS2_IOC_GETFLAGS: @@ -937,17 +939,15 @@ long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) return ocfs2_group_add(inode, &input); case OCFS2_IOC_REFLINK: - if (copy_from_user(&args, (struct reflink_arguments *)arg, - sizeof(args))) + if (copy_from_user(&args, argp, sizeof(args))) return -EFAULT; - old_path = (const char *)(unsigned long)args.old_path; - new_path = (const char *)(unsigned long)args.new_path; + old_path = (const char __user *)(unsigned long)args.old_path; + new_path = (const char __user *)(unsigned long)args.new_path; preserve = (args.preserve != 0); return ocfs2_reflink_ioctl(inode, old_path, new_path, preserve); case OCFS2_IOC_INFO: - if (copy_from_user(&info, (struct ocfs2_info __user *)arg, - sizeof(struct ocfs2_info))) + if (copy_from_user(&info, argp, sizeof(struct ocfs2_info))) return -EFAULT; return ocfs2_info_handle(inode, &info, 0); @@ -960,22 +960,20 @@ long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) if (!capable(CAP_SYS_ADMIN)) return -EPERM; - if (copy_from_user(&range, (struct fstrim_range *)arg, - sizeof(range))) + if (copy_from_user(&range, argp, sizeof(range))) return -EFAULT; ret = ocfs2_trim_fs(sb, &range); if (ret < 0) return ret; - if (copy_to_user((struct fstrim_range *)arg, &range, - sizeof(range))) + if (copy_to_user(argp, &range, sizeof(range))) return -EFAULT; return 0; } case OCFS2_IOC_MOVE_EXT: - return ocfs2_ioctl_move_extents(filp, (void __user *)arg); + return ocfs2_ioctl_move_extents(filp, argp); default: return -ENOTTY; } @@ -988,6 +986,7 @@ long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg) struct reflink_arguments args; struct inode *inode = file->f_path.dentry->d_inode; struct ocfs2_info info; + void __user *argp = (void __user *)arg; switch (cmd) { case OCFS2_IOC32_GETFLAGS: @@ -1006,16 +1005,14 @@ long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg) case FITRIM: break; case OCFS2_IOC_REFLINK: - if (copy_from_user(&args, (struct reflink_arguments *)arg, - sizeof(args))) + if (copy_from_user(&args, argp, sizeof(args))) return -EFAULT; preserve = (args.preserve != 0); return ocfs2_reflink_ioctl(inode, compat_ptr(args.old_path), compat_ptr(args.new_path), preserve); case OCFS2_IOC_INFO: - if (copy_from_user(&info, (struct ocfs2_info __user *)arg, - sizeof(struct ocfs2_info))) + if (copy_from_user(&info, argp, sizeof(struct ocfs2_info))) return -EFAULT; return ocfs2_info_handle(inode, &info, 1); diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c index b1e3fce72ea4..6083432f667e 100644 --- a/fs/ocfs2/move_extents.c +++ b/fs/ocfs2/move_extents.c @@ -1082,8 +1082,7 @@ int ocfs2_ioctl_move_extents(struct file *filp, void __user *argp) context->file = filp; if (argp) { - if (copy_from_user(&range, (struct ocfs2_move_extents *)argp, - sizeof(range))) { + if (copy_from_user(&range, argp, sizeof(range))) { status = -EFAULT; goto out; } @@ -1138,8 +1137,7 @@ out: * length and new_offset even if failure happens somewhere. */ if (argp) { - if (copy_to_user((struct ocfs2_move_extents *)argp, &range, - sizeof(range))) + if (copy_to_user(argp, &range, sizeof(range))) status = -EFAULT; } diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index a9856e3eaaf0..9f39c640cddf 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c @@ -1724,15 +1724,16 @@ static int ocfs2_symlink(struct inode *dir, fe = (struct ocfs2_dinode *) new_fe_bh->b_data; inode->i_rdev = 0; newsize = l - 1; + inode->i_op = &ocfs2_symlink_inode_operations; if (l > ocfs2_fast_symlink_chars(sb)) { u32 offset = 0; - inode->i_op = &ocfs2_symlink_inode_operations; status = dquot_alloc_space_nodirty(inode, ocfs2_clusters_to_bytes(osb->sb, 1)); if (status) goto bail; did_quota = 1; + inode->i_mapping->a_ops = &ocfs2_aops; status = ocfs2_add_inode_data(osb, inode, &offset, 1, 0, new_fe_bh, handle, data_ac, NULL, @@ -1750,7 +1751,7 @@ static int ocfs2_symlink(struct inode *dir, i_size_write(inode, newsize); inode->i_blocks = ocfs2_inode_sector_count(inode); } else { - inode->i_op = &ocfs2_fast_symlink_inode_operations; + inode->i_mapping->a_ops = &ocfs2_fast_symlink_aops; memcpy((char *) fe->id2.i_symlink, symname, l); i_size_write(inode, newsize); inode->i_blocks = 0; diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c index 5d22872e2bb3..f1fbb4b552ad 100644 --- a/fs/ocfs2/symlink.c +++ b/fs/ocfs2/symlink.c @@ -54,101 +54,40 @@ #include "buffer_head_io.h" -static char *ocfs2_fast_symlink_getlink(struct inode *inode, - struct buffer_head **bh) +static int ocfs2_fast_symlink_readpage(struct file *unused, struct page *page) { - int status; - char *link = NULL; + struct inode *inode = page->mapping->host; + struct buffer_head *bh; + int status = ocfs2_read_inode_block(inode, &bh); struct ocfs2_dinode *fe; + const char *link; + void *kaddr; + size_t len; - status = ocfs2_read_inode_block(inode, bh); if (status < 0) { mlog_errno(status); - link = ERR_PTR(status); - goto bail; + return status; } - fe = (struct ocfs2_dinode *) (*bh)->b_data; + fe = (struct ocfs2_dinode *) bh->b_data; link = (char *) fe->id2.i_symlink; -bail: - - return link; -} - -static int ocfs2_readlink(struct dentry *dentry, - char __user *buffer, - int buflen) -{ - int ret; - char *link; - struct buffer_head *bh = NULL; - struct inode *inode = dentry->d_inode; - - link = ocfs2_fast_symlink_getlink(inode, &bh); - if (IS_ERR(link)) { - ret = PTR_ERR(link); - goto out; - } - - /* - * Without vfsmount we can't update atime now, - * but we will update atime here ultimately. - */ - ret = vfs_readlink(dentry, buffer, buflen, link); - + /* will be less than a page size */ + len = strnlen(link, ocfs2_fast_symlink_chars(inode->i_sb)); + kaddr = kmap_atomic(page); + memcpy(kaddr, link, len + 1); + kunmap_atomic(kaddr); + SetPageUptodate(page); + unlock_page(page); brelse(bh); -out: - if (ret < 0) - mlog_errno(ret); - return ret; + return 0; } -static void *ocfs2_fast_follow_link(struct dentry *dentry, - struct nameidata *nd) -{ - int status = 0; - int len; - char *target, *link = ERR_PTR(-ENOMEM); - struct inode *inode = dentry->d_inode; - struct buffer_head *bh = NULL; - - BUG_ON(!ocfs2_inode_is_fast_symlink(inode)); - target = ocfs2_fast_symlink_getlink(inode, &bh); - if (IS_ERR(target)) { - status = PTR_ERR(target); - mlog_errno(status); - goto bail; - } - - /* Fast symlinks can't be large */ - len = strnlen(target, ocfs2_fast_symlink_chars(inode->i_sb)); - link = kzalloc(len + 1, GFP_NOFS); - if (!link) { - status = -ENOMEM; - mlog_errno(status); - goto bail; - } - - memcpy(link, target, len); - -bail: - nd_set_link(nd, status ? ERR_PTR(status) : link); - brelse(bh); - - if (status) - mlog_errno(status); - return NULL; -} - -static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) -{ - char *link = nd_get_link(nd); - if (!IS_ERR(link)) - kfree(link); -} +const struct address_space_operations ocfs2_fast_symlink_aops = { + .readpage = ocfs2_fast_symlink_readpage, +}; const struct inode_operations ocfs2_symlink_inode_operations = { - .readlink = page_readlink, + .readlink = generic_readlink, .follow_link = page_follow_link_light, .put_link = page_put_link, .getattr = ocfs2_getattr, @@ -159,15 +98,3 @@ const struct inode_operations ocfs2_symlink_inode_operations = { .removexattr = generic_removexattr, .fiemap = ocfs2_fiemap, }; -const struct inode_operations ocfs2_fast_symlink_inode_operations = { - .readlink = ocfs2_readlink, - .follow_link = ocfs2_fast_follow_link, - .put_link = ocfs2_fast_put_link, - .getattr = ocfs2_getattr, - .setattr = ocfs2_setattr, - .setxattr = generic_setxattr, - .getxattr = generic_getxattr, - .listxattr = ocfs2_listxattr, - .removexattr = generic_removexattr, - .fiemap = ocfs2_fiemap, -}; diff --git a/fs/ocfs2/symlink.h b/fs/ocfs2/symlink.h index 65a6c9c6ad51..71ee4245e919 100644 --- a/fs/ocfs2/symlink.h +++ b/fs/ocfs2/symlink.h @@ -27,7 +27,7 @@ #define OCFS2_SYMLINK_H extern const struct inode_operations ocfs2_symlink_inode_operations; -extern const struct inode_operations ocfs2_fast_symlink_inode_operations; +extern const struct address_space_operations ocfs2_fast_symlink_aops; /* * Test whether an inode is a fast symlink. diff --git a/fs/open.c b/fs/open.c index d54301219d04..d6c79a0dffc7 100644 --- a/fs/open.c +++ b/fs/open.c @@ -654,10 +654,23 @@ static inline int __get_file_write_access(struct inode *inode, return error; } -static struct file *__dentry_open(struct dentry *dentry, struct vfsmount *mnt, - struct file *f, - int (*open)(struct inode *, struct file *), - const struct cred *cred) +int open_check_o_direct(struct file *f) +{ + /* NB: we're sure to have correct a_ops only after f_op->open */ + if (f->f_flags & O_DIRECT) { + if (!f->f_mapping->a_ops || + ((!f->f_mapping->a_ops->direct_IO) && + (!f->f_mapping->a_ops->get_xip_mem))) { + return -EINVAL; + } + } + return 0; +} + +static struct file *do_dentry_open(struct dentry *dentry, struct vfsmount *mnt, + struct file *f, + int (*open)(struct inode *, struct file *), + const struct cred *cred) { static const struct file_operations empty_fops = {}; struct inode *inode; @@ -713,16 +726,6 @@ static struct file *__dentry_open(struct dentry *dentry, struct vfsmount *mnt, file_ra_state_init(&f->f_ra, f->f_mapping->host->i_mapping); - /* NB: we're sure to have correct a_ops only after f_op->open */ - if (f->f_flags & O_DIRECT) { - if (!f->f_mapping->a_ops || - ((!f->f_mapping->a_ops->direct_IO) && - (!f->f_mapping->a_ops->get_xip_mem))) { - fput(f); - f = ERR_PTR(-EINVAL); - } - } - return f; cleanup_all: @@ -744,12 +747,29 @@ cleanup_all: f->f_path.dentry = NULL; f->f_path.mnt = NULL; cleanup_file: - put_filp(f); dput(dentry); mntput(mnt); return ERR_PTR(error); } +static struct file *__dentry_open(struct dentry *dentry, struct vfsmount *mnt, + struct file *f, + int (*open)(struct inode *, struct file *), + const struct cred *cred) +{ + struct file *res = do_dentry_open(dentry, mnt, f, open, cred); + if (!IS_ERR(res)) { + int error = open_check_o_direct(f); + if (error) { + fput(res); + res = ERR_PTR(error); + } + } else { + put_filp(f); + } + return res; +} + /** * lookup_instantiate_filp - instantiates the open intent filp * @nd: pointer to nameidata @@ -804,13 +824,31 @@ struct file *nameidata_to_filp(struct nameidata *nd) /* Pick up the filp from the open intent */ filp = nd->intent.open.file; - nd->intent.open.file = NULL; /* Has the filesystem initialised the file for us? */ - if (filp->f_path.dentry == NULL) { + if (filp->f_path.dentry != NULL) { + nd->intent.open.file = NULL; + } else { + struct file *res; + path_get(&nd->path); - filp = __dentry_open(nd->path.dentry, nd->path.mnt, filp, - NULL, cred); + res = do_dentry_open(nd->path.dentry, nd->path.mnt, + filp, NULL, cred); + if (!IS_ERR(res)) { + int error; + + nd->intent.open.file = NULL; + BUG_ON(res != filp); + + error = open_check_o_direct(filp); + if (error) { + fput(filp); + filp = ERR_PTR(error); + } + } else { + /* Allow nd->intent.open.file to be recycled */ + filp = res; + } } return filp; } diff --git a/fs/pipe.c b/fs/pipe.c index 95ebb56de494..49c1065256fd 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -654,8 +654,11 @@ out: wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM); kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); } - if (ret > 0) - file_update_time(filp); + if (ret > 0) { + int err = file_update_time(filp); + if (err) + ret = err; + } return ret; } diff --git a/fs/pnode.c b/fs/pnode.c index ab5fa9e1a79a..bed378db0758 100644 --- a/fs/pnode.c +++ b/fs/pnode.c @@ -257,12 +257,12 @@ int propagate_mnt(struct mount *dest_mnt, struct dentry *dest_dentry, prev_src_mnt = child; } out: - br_write_lock(vfsmount_lock); + br_write_lock(&vfsmount_lock); while (!list_empty(&tmp_list)) { child = list_first_entry(&tmp_list, struct mount, mnt_hash); umount_tree(child, 0, &umount_list); } - br_write_unlock(vfsmount_lock); + br_write_unlock(&vfsmount_lock); release_mounts(&umount_list); return ret; } diff --git a/fs/proc/base.c b/fs/proc/base.c index 616f41a7cde6..437195f204e1 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -1803,7 +1803,7 @@ static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd) rcu_read_lock(); file = fcheck_files(files, fd); if (file) { - unsigned i_mode, f_mode = file->f_mode; + unsigned f_mode = file->f_mode; rcu_read_unlock(); put_files_struct(files); @@ -1819,12 +1819,14 @@ static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd) inode->i_gid = GLOBAL_ROOT_GID; } - i_mode = S_IFLNK; - if (f_mode & FMODE_READ) - i_mode |= S_IRUSR | S_IXUSR; - if (f_mode & FMODE_WRITE) - i_mode |= S_IWUSR | S_IXUSR; - inode->i_mode = i_mode; + if (S_ISLNK(inode->i_mode)) { + unsigned i_mode = S_IFLNK; + if (f_mode & FMODE_READ) + i_mode |= S_IRUSR | S_IXUSR; + if (f_mode & FMODE_WRITE) + i_mode |= S_IWUSR | S_IXUSR; + inode->i_mode = i_mode; + } security_task_to_inode(task, inode); put_task_struct(task); @@ -1859,6 +1861,7 @@ static struct dentry *proc_fd_instantiate(struct inode *dir, ei = PROC_I(inode); ei->fd = fd; + inode->i_mode = S_IFLNK; inode->i_op = &proc_pid_link_inode_operations; inode->i_size = 64; ei->op.proc_get_link = proc_fd_link; diff --git a/fs/proc_namespace.c b/fs/proc_namespace.c index 12412852d88a..5e289a7cbad1 100644 --- a/fs/proc_namespace.c +++ b/fs/proc_namespace.c @@ -23,12 +23,12 @@ static unsigned mounts_poll(struct file *file, poll_table *wait) poll_wait(file, &p->ns->poll, wait); - br_read_lock(vfsmount_lock); + br_read_lock(&vfsmount_lock); if (p->m.poll_event != ns->event) { p->m.poll_event = ns->event; res |= POLLERR | POLLPRI; } - br_read_unlock(vfsmount_lock); + br_read_unlock(&vfsmount_lock); return res; } diff --git a/fs/readdir.c b/fs/readdir.c index cc0a8227cddf..39e3370d79cf 100644 --- a/fs/readdir.c +++ b/fs/readdir.c @@ -108,11 +108,11 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd, int error; struct file * file; struct readdir_callback buf; + int fput_needed; - error = -EBADF; - file = fget(fd); + file = fget_light(fd, &fput_needed); if (!file) - goto out; + return -EBADF; buf.result = 0; buf.dirent = dirent; @@ -121,8 +121,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd, if (buf.result) error = buf.result; - fput(file); -out: + fput_light(file, fput_needed); return error; } @@ -195,16 +194,15 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd, struct file * file; struct linux_dirent __user * lastdirent; struct getdents_callback buf; + int fput_needed; int error; - error = -EFAULT; if (!access_ok(VERIFY_WRITE, dirent, count)) - goto out; + return -EFAULT; - error = -EBADF; - file = fget(fd); + file = fget_light(fd, &fput_needed); if (!file) - goto out; + return -EBADF; buf.current_dir = dirent; buf.previous = NULL; @@ -221,8 +219,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd, else error = count - buf.count; } - fput(file); -out: + fput_light(file, fput_needed); return error; } @@ -278,16 +275,15 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd, struct file * file; struct linux_dirent64 __user * lastdirent; struct getdents_callback64 buf; + int fput_needed; int error; - error = -EFAULT; if (!access_ok(VERIFY_WRITE, dirent, count)) - goto out; + return -EFAULT; - error = -EBADF; - file = fget(fd); + file = fget_light(fd, &fput_needed); if (!file) - goto out; + return -EBADF; buf.current_dir = dirent; buf.previous = NULL; @@ -305,7 +301,6 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd, else error = count - buf.count; } - fput(file); -out: + fput_light(file, fput_needed); return error; } diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index 59d06871a850..a6d4268fb6c1 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c @@ -1592,13 +1592,12 @@ struct dentry *reiserfs_fh_to_parent(struct super_block *sb, struct fid *fid, (fh_type == 6) ? fid->raw[5] : 0); } -int reiserfs_encode_fh(struct dentry *dentry, __u32 * data, int *lenp, - int need_parent) +int reiserfs_encode_fh(struct inode *inode, __u32 * data, int *lenp, + struct inode *parent) { - struct inode *inode = dentry->d_inode; int maxlen = *lenp; - if (need_parent && (maxlen < 5)) { + if (parent && (maxlen < 5)) { *lenp = 5; return 255; } else if (maxlen < 3) { @@ -1610,20 +1609,15 @@ int reiserfs_encode_fh(struct dentry *dentry, __u32 * data, int *lenp, data[1] = le32_to_cpu(INODE_PKEY(inode)->k_dir_id); data[2] = inode->i_generation; *lenp = 3; - /* no room for directory info? return what we've stored so far */ - if (maxlen < 5 || !need_parent) - return 3; - - spin_lock(&dentry->d_lock); - inode = dentry->d_parent->d_inode; - data[3] = inode->i_ino; - data[4] = le32_to_cpu(INODE_PKEY(inode)->k_dir_id); - *lenp = 5; - if (maxlen >= 6) { - data[5] = inode->i_generation; - *lenp = 6; - } - spin_unlock(&dentry->d_lock); + if (parent) { + data[3] = parent->i_ino; + data[4] = le32_to_cpu(INODE_PKEY(parent)->k_dir_id); + *lenp = 5; + if (maxlen >= 6) { + data[5] = parent->i_generation; + *lenp = 6; + } + } return *lenp; } diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c index b1a08573fe14..afcadcc03e8a 100644 --- a/fs/reiserfs/journal.c +++ b/fs/reiserfs/journal.c @@ -1923,6 +1923,8 @@ static int do_journal_release(struct reiserfs_transaction_handle *th, * the workqueue job (flush_async_commit) needs this lock */ reiserfs_write_unlock(sb); + + cancel_delayed_work_sync(&REISERFS_SB(sb)->old_work); flush_workqueue(commit_wq); if (!reiserfs_mounted_fs_count) { @@ -3231,8 +3233,6 @@ int journal_mark_dirty(struct reiserfs_transaction_handle *th, th->t_trans_id, journal->j_trans_id); } - sb->s_dirt = 1; - prepared = test_clear_buffer_journal_prepared(bh); clear_buffer_journal_restore_dirty(bh); /* already in this transaction, we are done */ @@ -3316,6 +3316,7 @@ int journal_mark_dirty(struct reiserfs_transaction_handle *th, journal->j_first = cn; journal->j_last = cn; } + reiserfs_schedule_old_flush(sb); return 0; } @@ -3492,7 +3493,7 @@ static void flush_async_commits(struct work_struct *work) ** flushes any old transactions to disk ** ends the current transaction if it is too old */ -int reiserfs_flush_old_commits(struct super_block *sb) +void reiserfs_flush_old_commits(struct super_block *sb) { time_t now; struct reiserfs_transaction_handle th; @@ -3502,9 +3503,8 @@ int reiserfs_flush_old_commits(struct super_block *sb) /* safety check so we don't flush while we are replaying the log during * mount */ - if (list_empty(&journal->j_journal_list)) { - return 0; - } + if (list_empty(&journal->j_journal_list)) + return; /* check the current transaction. If there are no writers, and it is * too old, finish it, and force the commit blocks to disk @@ -3526,7 +3526,6 @@ int reiserfs_flush_old_commits(struct super_block *sb) do_journal_end(&th, sb, 1, COMMIT_NOW | WAIT); } } - return sb->s_dirt; } /* @@ -3955,7 +3954,7 @@ static int do_journal_end(struct reiserfs_transaction_handle *th, ** it tells us if we should continue with the journal_end, or just return */ if (!check_journal_end(th, sb, nblocks, flags)) { - sb->s_dirt = 1; + reiserfs_schedule_old_flush(sb); wake_queued_writers(sb); reiserfs_async_progress_wait(sb); goto out; diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h index a59d27126338..33215f57ea06 100644 --- a/fs/reiserfs/reiserfs.h +++ b/fs/reiserfs/reiserfs.h @@ -480,6 +480,11 @@ struct reiserfs_sb_info { struct dentry *priv_root; /* root of /.reiserfs_priv */ struct dentry *xattr_root; /* root of /.reiserfs_priv/xattrs */ int j_errno; + + int work_queued; /* non-zero delayed work is queued */ + struct delayed_work old_work; /* old transactions flush delayed work */ + spinlock_t old_work_lock; /* protects old_work and work_queued */ + #ifdef CONFIG_QUOTA char *s_qf_names[MAXQUOTAS]; int s_jquota_fmt; @@ -2452,7 +2457,7 @@ struct reiserfs_transaction_handle *reiserfs_persistent_transaction(struct int reiserfs_end_persistent_transaction(struct reiserfs_transaction_handle *); int reiserfs_commit_page(struct inode *inode, struct page *page, unsigned from, unsigned to); -int reiserfs_flush_old_commits(struct super_block *); +void reiserfs_flush_old_commits(struct super_block *); int reiserfs_commit_for_inode(struct inode *); int reiserfs_inode_needs_commit(struct inode *); void reiserfs_update_inode_transaction(struct inode *); @@ -2487,6 +2492,7 @@ void reiserfs_abort(struct super_block *sb, int errno, const char *fmt, ...); int reiserfs_allocate_list_bitmaps(struct super_block *s, struct reiserfs_list_bitmap *, unsigned int); +void reiserfs_schedule_old_flush(struct super_block *s); void add_save_link(struct reiserfs_transaction_handle *th, struct inode *inode, int truncate); int remove_save_link(struct inode *inode, int truncate); @@ -2611,8 +2617,8 @@ struct dentry *reiserfs_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type); struct dentry *reiserfs_fh_to_parent(struct super_block *sb, struct fid *fid, int fh_len, int fh_type); -int reiserfs_encode_fh(struct dentry *dentry, __u32 * data, int *lenp, - int connectable); +int reiserfs_encode_fh(struct inode *inode, __u32 * data, int *lenp, + struct inode *parent); int reiserfs_truncate_file(struct inode *, int update_timestamps); void make_cpu_key(struct cpu_key *cpu_key, struct inode *inode, loff_t offset, diff --git a/fs/reiserfs/resize.c b/fs/reiserfs/resize.c index 9a17f63c3fd7..3ce02cff5e90 100644 --- a/fs/reiserfs/resize.c +++ b/fs/reiserfs/resize.c @@ -200,7 +200,6 @@ int reiserfs_resize(struct super_block *s, unsigned long block_count_new) (bmap_nr_new - bmap_nr))); PUT_SB_BLOCK_COUNT(s, block_count_new); PUT_SB_BMAP_NR(s, bmap_would_wrap(bmap_nr_new) ? : bmap_nr_new); - s->s_dirt = 1; journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB(s)); diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index c07b7d709447..651ce767b55d 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c @@ -72,20 +72,58 @@ static int reiserfs_sync_fs(struct super_block *s, int wait) if (!journal_begin(&th, s, 1)) if (!journal_end_sync(&th, s, 1)) reiserfs_flush_old_commits(s); - s->s_dirt = 0; /* Even if it's not true. - * We'll loop forever in sync_supers otherwise */ reiserfs_write_unlock(s); return 0; } -static void reiserfs_write_super(struct super_block *s) +static void flush_old_commits(struct work_struct *work) { + struct reiserfs_sb_info *sbi; + struct super_block *s; + + sbi = container_of(work, struct reiserfs_sb_info, old_work.work); + s = sbi->s_journal->j_work_sb; + + spin_lock(&sbi->old_work_lock); + sbi->work_queued = 0; + spin_unlock(&sbi->old_work_lock); + reiserfs_sync_fs(s, 1); } +void reiserfs_schedule_old_flush(struct super_block *s) +{ + struct reiserfs_sb_info *sbi = REISERFS_SB(s); + unsigned long delay; + + if (s->s_flags & MS_RDONLY) + return; + + spin_lock(&sbi->old_work_lock); + if (!sbi->work_queued) { + delay = msecs_to_jiffies(dirty_writeback_interval * 10); + queue_delayed_work(system_long_wq, &sbi->old_work, delay); + sbi->work_queued = 1; + } + spin_unlock(&sbi->old_work_lock); +} + +static void cancel_old_flush(struct super_block *s) +{ + struct reiserfs_sb_info *sbi = REISERFS_SB(s); + + cancel_delayed_work_sync(&REISERFS_SB(s)->old_work); + spin_lock(&sbi->old_work_lock); + sbi->work_queued = 0; + spin_unlock(&sbi->old_work_lock); +} + static int reiserfs_freeze(struct super_block *s) { struct reiserfs_transaction_handle th; + + cancel_old_flush(s); + reiserfs_write_lock(s); if (!(s->s_flags & MS_RDONLY)) { int err = journal_begin(&th, s, 1); @@ -99,7 +137,6 @@ static int reiserfs_freeze(struct super_block *s) journal_end_sync(&th, s, 1); } } - s->s_dirt = 0; reiserfs_write_unlock(s); return 0; } @@ -483,9 +520,6 @@ static void reiserfs_put_super(struct super_block *s) reiserfs_write_lock(s); - if (s->s_dirt) - reiserfs_write_super(s); - /* change file system state to current state if it was mounted with read-write permissions */ if (!(s->s_flags & MS_RDONLY)) { if (!journal_begin(&th, s, 10)) { @@ -692,7 +726,6 @@ static const struct super_operations reiserfs_sops = { .dirty_inode = reiserfs_dirty_inode, .evict_inode = reiserfs_evict_inode, .put_super = reiserfs_put_super, - .write_super = reiserfs_write_super, .sync_fs = reiserfs_sync_fs, .freeze_fs = reiserfs_freeze, .unfreeze_fs = reiserfs_unfreeze, @@ -1400,7 +1433,6 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg) err = journal_end(&th, s, 10); if (err) goto out_err; - s->s_dirt = 0; if (!(*mount_flags & MS_RDONLY)) { dquot_resume(s, -1); @@ -1730,19 +1762,21 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent) return -ENOMEM; s->s_fs_info = sbi; /* Set default values for options: non-aggressive tails, RO on errors */ - REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_SMALLTAIL); - REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_ERROR_RO); - REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_BARRIER_FLUSH); + sbi->s_mount_opt |= (1 << REISERFS_SMALLTAIL); + sbi->s_mount_opt |= (1 << REISERFS_ERROR_RO); + sbi->s_mount_opt |= (1 << REISERFS_BARRIER_FLUSH); /* no preallocation minimum, be smart in reiserfs_file_write instead */ - REISERFS_SB(s)->s_alloc_options.preallocmin = 0; + sbi->s_alloc_options.preallocmin = 0; /* Preallocate by 16 blocks (17-1) at once */ - REISERFS_SB(s)->s_alloc_options.preallocsize = 17; + sbi->s_alloc_options.preallocsize = 17; /* setup default block allocator options */ reiserfs_init_alloc_options(s); - mutex_init(&REISERFS_SB(s)->lock); - REISERFS_SB(s)->lock_depth = -1; + spin_lock_init(&sbi->old_work_lock); + INIT_DELAYED_WORK(&sbi->old_work, flush_old_commits); + mutex_init(&sbi->lock); + sbi->lock_depth = -1; jdev_name = NULL; if (reiserfs_parse_options @@ -1751,8 +1785,8 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent) goto error_unlocked; } if (jdev_name && jdev_name[0]) { - REISERFS_SB(s)->s_jdev = kstrdup(jdev_name, GFP_KERNEL); - if (!REISERFS_SB(s)->s_jdev) { + sbi->s_jdev = kstrdup(jdev_name, GFP_KERNEL); + if (!sbi->s_jdev) { SWARN(silent, s, "", "Cannot allocate memory for " "journal device name"); goto error; @@ -1810,7 +1844,7 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent) /* make data=ordered the default */ if (!reiserfs_data_log(s) && !reiserfs_data_ordered(s) && !reiserfs_data_writeback(s)) { - REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_DATA_ORDERED); + sbi->s_mount_opt |= (1 << REISERFS_DATA_ORDERED); } if (reiserfs_data_log(s)) { @@ -2003,6 +2037,8 @@ error_unlocked: reiserfs_write_unlock(s); } + cancel_delayed_work_sync(&REISERFS_SB(s)->old_work); + reiserfs_free_bitmap_cache(s); if (SB_BUFFER_WITH_SB(s)) brelse(SB_BUFFER_WITH_SB(s)); diff --git a/fs/select.c b/fs/select.c index 17d33d09fc16..bae321569dfa 100644 --- a/fs/select.c +++ b/fs/select.c @@ -614,7 +614,6 @@ SYSCALL_DEFINE5(select, int, n, fd_set __user *, inp, fd_set __user *, outp, return ret; } -#ifdef HAVE_SET_RESTORE_SIGMASK static long do_pselect(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, struct timespec __user *tsp, const sigset_t __user *sigmask, size_t sigsetsize) @@ -686,7 +685,6 @@ SYSCALL_DEFINE6(pselect6, int, n, fd_set __user *, inp, fd_set __user *, outp, return do_pselect(n, inp, outp, exp, tsp, up, sigsetsize); } -#endif /* HAVE_SET_RESTORE_SIGMASK */ #ifdef __ARCH_WANT_SYS_OLD_SELECT struct sel_arg_struct { @@ -941,7 +939,6 @@ SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds, return ret; } -#ifdef HAVE_SET_RESTORE_SIGMASK SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, unsigned int, nfds, struct timespec __user *, tsp, const sigset_t __user *, sigmask, size_t, sigsetsize) @@ -992,4 +989,3 @@ SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, unsigned int, nfds, return ret; } -#endif /* HAVE_SET_RESTORE_SIGMASK */ diff --git a/fs/signalfd.c b/fs/signalfd.c index 7ae2a574cb25..9f35a37173de 100644 --- a/fs/signalfd.c +++ b/fs/signalfd.c @@ -269,12 +269,13 @@ SYSCALL_DEFINE4(signalfd4, int, ufd, sigset_t __user *, user_mask, if (ufd < 0) kfree(ctx); } else { - struct file *file = fget(ufd); + int fput_needed; + struct file *file = fget_light(ufd, &fput_needed); if (!file) return -EBADF; ctx = file->private_data; if (file->f_op != &signalfd_fops) { - fput(file); + fput_light(file, fput_needed); return -EINVAL; } spin_lock_irq(¤t->sighand->siglock); @@ -282,7 +283,7 @@ SYSCALL_DEFINE4(signalfd4, int, ufd, sigset_t __user *, user_mask, spin_unlock_irq(¤t->sighand->siglock); wake_up(¤t->sighand->signalfd_wqh); - fput(file); + fput_light(file, fput_needed); } return ufd; diff --git a/fs/splice.c b/fs/splice.c index 406ef2b792c2..c9f1318a3b82 100644 --- a/fs/splice.c +++ b/fs/splice.c @@ -1003,8 +1003,10 @@ generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out, mutex_lock_nested(&inode->i_mutex, I_MUTEX_CHILD); ret = file_remove_suid(out); if (!ret) { - file_update_time(out); - ret = splice_from_pipe_feed(pipe, &sd, pipe_to_file); + ret = file_update_time(out); + if (!ret) + ret = splice_from_pipe_feed(pipe, &sd, + pipe_to_file); } mutex_unlock(&inode->i_mutex); } while (ret > 0); diff --git a/fs/statfs.c b/fs/statfs.c index 43e6b6fe4e85..95ad5c0e586c 100644 --- a/fs/statfs.c +++ b/fs/statfs.c @@ -87,11 +87,12 @@ int user_statfs(const char __user *pathname, struct kstatfs *st) int fd_statfs(int fd, struct kstatfs *st) { - struct file *file = fget(fd); + int fput_needed; + struct file *file = fget_light(fd, &fput_needed); int error = -EBADF; if (file) { error = vfs_statfs(&file->f_path, st); - fput(file); + fput_light(file, fput_needed); } return error; } diff --git a/fs/sync.c b/fs/sync.c index 0e8db939d96f..11e3d1c44901 100644 --- a/fs/sync.c +++ b/fs/sync.c @@ -188,11 +188,12 @@ static int do_fsync(unsigned int fd, int datasync) { struct file *file; int ret = -EBADF; + int fput_needed; - file = fget(fd); + file = fget_light(fd, &fput_needed); if (file) { ret = vfs_fsync(file, datasync); - fput(file); + fput_light(file, fput_needed); } return ret; } diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c index 62a2727f4ecf..a6d42efc76d2 100644 --- a/fs/ubifs/dir.c +++ b/fs/ubifs/dir.c @@ -1127,16 +1127,7 @@ int ubifs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct ubifs_inode *ui = ubifs_inode(inode); mutex_lock(&ui->ui_mutex); - stat->dev = inode->i_sb->s_dev; - stat->ino = inode->i_ino; - stat->mode = inode->i_mode; - stat->nlink = inode->i_nlink; - stat->uid = inode->i_uid; - stat->gid = inode->i_gid; - stat->rdev = inode->i_rdev; - stat->atime = inode->i_atime; - stat->mtime = inode->i_mtime; - stat->ctime = inode->i_ctime; + generic_fillattr(inode, stat); stat->blksize = UBIFS_BLOCK_SIZE; stat->size = ui->ui_size; diff --git a/fs/udf/namei.c b/fs/udf/namei.c index a165c66e3eef..18024178ac4c 100644 --- a/fs/udf/namei.c +++ b/fs/udf/namei.c @@ -1260,16 +1260,15 @@ static struct dentry *udf_fh_to_parent(struct super_block *sb, fid->udf.parent_partref, fid->udf.parent_generation); } -static int udf_encode_fh(struct dentry *de, __u32 *fh, int *lenp, - int connectable) +static int udf_encode_fh(struct inode *inode, __u32 *fh, int *lenp, + struct inode *parent) { int len = *lenp; - struct inode *inode = de->d_inode; struct kernel_lb_addr location = UDF_I(inode)->i_location; struct fid *fid = (struct fid *)fh; int type = FILEID_UDF_WITHOUT_PARENT; - if (connectable && (len < 5)) { + if (parent && (len < 5)) { *lenp = 5; return 255; } else if (len < 3) { @@ -1282,14 +1281,11 @@ static int udf_encode_fh(struct dentry *de, __u32 *fh, int *lenp, fid->udf.partref = location.partitionReferenceNum; fid->udf.generation = inode->i_generation; - if (connectable && !S_ISDIR(inode->i_mode)) { - spin_lock(&de->d_lock); - inode = de->d_parent->d_inode; - location = UDF_I(inode)->i_location; + if (parent) { + location = UDF_I(parent)->i_location; fid->udf.parent_block = location.logicalBlockNum; fid->udf.parent_partref = location.partitionReferenceNum; fid->udf.parent_generation = inode->i_generation; - spin_unlock(&de->d_lock); *lenp = 5; type = FILEID_UDF_WITH_PARENT; } diff --git a/fs/utimes.c b/fs/utimes.c index ba653f3dc1bc..fa4dbe451e27 100644 --- a/fs/utimes.c +++ b/fs/utimes.c @@ -140,18 +140,19 @@ long do_utimes(int dfd, const char __user *filename, struct timespec *times, goto out; if (filename == NULL && dfd != AT_FDCWD) { + int fput_needed; struct file *file; if (flags & AT_SYMLINK_NOFOLLOW) goto out; - file = fget(dfd); + file = fget_light(dfd, &fput_needed); error = -EBADF; if (!file) goto out; error = utimes_common(&file->f_path, times); - fput(file); + fput_light(file, fput_needed); } else { struct path path; int lookup_flags = 0; diff --git a/fs/xattr.c b/fs/xattr.c index 3c8c1cc333c7..1d7ac3790458 100644 --- a/fs/xattr.c +++ b/fs/xattr.c @@ -399,11 +399,12 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname, SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name, const void __user *,value, size_t, size, int, flags) { + int fput_needed; struct file *f; struct dentry *dentry; int error = -EBADF; - f = fget(fd); + f = fget_light(fd, &fput_needed); if (!f) return error; dentry = f->f_path.dentry; @@ -413,7 +414,7 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name, error = setxattr(dentry, name, value, size, flags); mnt_drop_write_file(f); } - fput(f); + fput_light(f, fput_needed); return error; } @@ -486,15 +487,16 @@ SYSCALL_DEFINE4(lgetxattr, const char __user *, pathname, SYSCALL_DEFINE4(fgetxattr, int, fd, const char __user *, name, void __user *, value, size_t, size) { + int fput_needed; struct file *f; ssize_t error = -EBADF; - f = fget(fd); + f = fget_light(fd, &fput_needed); if (!f) return error; audit_inode(NULL, f->f_path.dentry); error = getxattr(f->f_path.dentry, name, value, size); - fput(f); + fput_light(f, fput_needed); return error; } @@ -566,15 +568,16 @@ SYSCALL_DEFINE3(llistxattr, const char __user *, pathname, char __user *, list, SYSCALL_DEFINE3(flistxattr, int, fd, char __user *, list, size_t, size) { + int fput_needed; struct file *f; ssize_t error = -EBADF; - f = fget(fd); + f = fget_light(fd, &fput_needed); if (!f) return error; audit_inode(NULL, f->f_path.dentry); error = listxattr(f->f_path.dentry, list, size); - fput(f); + fput_light(f, fput_needed); return error; } @@ -634,11 +637,12 @@ SYSCALL_DEFINE2(lremovexattr, const char __user *, pathname, SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name) { + int fput_needed; struct file *f; struct dentry *dentry; int error = -EBADF; - f = fget(fd); + f = fget_light(fd, &fput_needed); if (!f) return error; dentry = f->f_path.dentry; @@ -648,7 +652,7 @@ SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name) error = removexattr(dentry, name); mnt_drop_write_file(f); } - fput(f); + fput_light(f, fput_needed); return error; } diff --git a/fs/xfs/kmem.c b/fs/xfs/kmem.c index a907de565db3..4a7286c1dc80 100644 --- a/fs/xfs/kmem.c +++ b/fs/xfs/kmem.c @@ -46,7 +46,7 @@ kmem_zalloc_greedy(size_t *size, size_t minsize, size_t maxsize) } void * -kmem_alloc(size_t size, unsigned int __nocast flags) +kmem_alloc(size_t size, xfs_km_flags_t flags) { int retries = 0; gfp_t lflags = kmem_flags_convert(flags); @@ -65,7 +65,7 @@ kmem_alloc(size_t size, unsigned int __nocast flags) } void * -kmem_zalloc(size_t size, unsigned int __nocast flags) +kmem_zalloc(size_t size, xfs_km_flags_t flags) { void *ptr; @@ -87,7 +87,7 @@ kmem_free(const void *ptr) void * kmem_realloc(const void *ptr, size_t newsize, size_t oldsize, - unsigned int __nocast flags) + xfs_km_flags_t flags) { void *new; @@ -102,7 +102,7 @@ kmem_realloc(const void *ptr, size_t newsize, size_t oldsize, } void * -kmem_zone_alloc(kmem_zone_t *zone, unsigned int __nocast flags) +kmem_zone_alloc(kmem_zone_t *zone, xfs_km_flags_t flags) { int retries = 0; gfp_t lflags = kmem_flags_convert(flags); @@ -121,7 +121,7 @@ kmem_zone_alloc(kmem_zone_t *zone, unsigned int __nocast flags) } void * -kmem_zone_zalloc(kmem_zone_t *zone, unsigned int __nocast flags) +kmem_zone_zalloc(kmem_zone_t *zone, xfs_km_flags_t flags) { void *ptr; diff --git a/fs/xfs/kmem.h b/fs/xfs/kmem.h index ab7c53fe346e..b2f2620f9a87 100644 --- a/fs/xfs/kmem.h +++ b/fs/xfs/kmem.h @@ -27,10 +27,11 @@ * General memory allocation interfaces */ -#define KM_SLEEP 0x0001u -#define KM_NOSLEEP 0x0002u -#define KM_NOFS 0x0004u -#define KM_MAYFAIL 0x0008u +typedef unsigned __bitwise xfs_km_flags_t; +#define KM_SLEEP ((__force xfs_km_flags_t)0x0001u) +#define KM_NOSLEEP ((__force xfs_km_flags_t)0x0002u) +#define KM_NOFS ((__force xfs_km_flags_t)0x0004u) +#define KM_MAYFAIL ((__force xfs_km_flags_t)0x0008u) /* * We use a special process flag to avoid recursive callbacks into @@ -38,7 +39,7 @@ * warnings, so we explicitly skip any generic ones (silly of us). */ static inline gfp_t -kmem_flags_convert(unsigned int __nocast flags) +kmem_flags_convert(xfs_km_flags_t flags) { gfp_t lflags; @@ -54,9 +55,9 @@ kmem_flags_convert(unsigned int __nocast flags) return lflags; } -extern void *kmem_alloc(size_t, unsigned int __nocast); -extern void *kmem_zalloc(size_t, unsigned int __nocast); -extern void *kmem_realloc(const void *, size_t, size_t, unsigned int __nocast); +extern void *kmem_alloc(size_t, xfs_km_flags_t); +extern void *kmem_zalloc(size_t, xfs_km_flags_t); +extern void *kmem_realloc(const void *, size_t, size_t, xfs_km_flags_t); extern void kmem_free(const void *); static inline void *kmem_zalloc_large(size_t size) @@ -107,7 +108,7 @@ kmem_zone_destroy(kmem_zone_t *zone) kmem_cache_destroy(zone); } -extern void *kmem_zone_alloc(kmem_zone_t *, unsigned int __nocast); -extern void *kmem_zone_zalloc(kmem_zone_t *, unsigned int __nocast); +extern void *kmem_zone_alloc(kmem_zone_t *, xfs_km_flags_t); +extern void *kmem_zone_zalloc(kmem_zone_t *, xfs_km_flags_t); #endif /* __XFS_SUPPORT_KMEM_H__ */ diff --git a/fs/xfs/xfs_export.c b/fs/xfs/xfs_export.c index 2d25d19c4ea1..42679223a0fd 100644 --- a/fs/xfs/xfs_export.c +++ b/fs/xfs/xfs_export.c @@ -52,19 +52,18 @@ static int xfs_fileid_length(int fileid_type) STATIC int xfs_fs_encode_fh( - struct dentry *dentry, - __u32 *fh, - int *max_len, - int connectable) + struct inode *inode, + __u32 *fh, + int *max_len, + struct inode *parent) { struct fid *fid = (struct fid *)fh; struct xfs_fid64 *fid64 = (struct xfs_fid64 *)fh; - struct inode *inode = dentry->d_inode; int fileid_type; int len; /* Directories don't need their parent encoded, they have ".." */ - if (S_ISDIR(inode->i_mode) || !connectable) + if (!parent) fileid_type = FILEID_INO32_GEN; else fileid_type = FILEID_INO32_GEN_PARENT; @@ -96,20 +95,16 @@ xfs_fs_encode_fh( switch (fileid_type) { case FILEID_INO32_GEN_PARENT: - spin_lock(&dentry->d_lock); - fid->i32.parent_ino = XFS_I(dentry->d_parent->d_inode)->i_ino; - fid->i32.parent_gen = dentry->d_parent->d_inode->i_generation; - spin_unlock(&dentry->d_lock); + fid->i32.parent_ino = XFS_I(parent)->i_ino; + fid->i32.parent_gen = parent->i_generation; /*FALLTHRU*/ case FILEID_INO32_GEN: fid->i32.ino = XFS_I(inode)->i_ino; fid->i32.gen = inode->i_generation; break; case FILEID_INO32_GEN_PARENT | XFS_FILEID_TYPE_64FLAG: - spin_lock(&dentry->d_lock); - fid64->parent_ino = XFS_I(dentry->d_parent->d_inode)->i_ino; - fid64->parent_gen = dentry->d_parent->d_inode->i_generation; - spin_unlock(&dentry->d_lock); + fid64->parent_ino = XFS_I(parent)->i_ino; + fid64->parent_gen = parent->i_generation; /*FALLTHRU*/ case FILEID_INO32_GEN | XFS_FILEID_TYPE_64FLAG: fid64->ino = XFS_I(inode)->i_ino; diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 8d214b87f6bb..9f7ec15a6522 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -586,8 +586,11 @@ restart: * lock above. Eventually we should look into a way to avoid * the pointless lock roundtrip. */ - if (likely(!(file->f_mode & FMODE_NOCMTIME))) - file_update_time(file); + if (likely(!(file->f_mode & FMODE_NOCMTIME))) { + error = file_update_time(file); + if (error) + return error; + } /* * If we're writing the file then make sure to clear the setuid and diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 6b965bf450e4..f30d9807dc48 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -3152,7 +3152,7 @@ xlog_ticket_alloc( int cnt, char client, bool permanent, - int alloc_flags) + xfs_km_flags_t alloc_flags) { struct xlog_ticket *tic; uint num_headers; diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h index 735ff1ee53da..5bc33261f5be 100644 --- a/fs/xfs/xfs_log_priv.h +++ b/fs/xfs/xfs_log_priv.h @@ -555,7 +555,7 @@ extern void xlog_pack_data(xlog_t *log, xlog_in_core_t *iclog, int); extern kmem_zone_t *xfs_log_ticket_zone; struct xlog_ticket *xlog_ticket_alloc(struct log *log, int unit_bytes, int count, char client, bool permanent, - int alloc_flags); + xfs_km_flags_t alloc_flags); static inline void diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c index cdf896fcbfa4..fdf324508c5e 100644 --- a/fs/xfs/xfs_trans.c +++ b/fs/xfs/xfs_trans.c @@ -584,7 +584,7 @@ xfs_trans_t * _xfs_trans_alloc( xfs_mount_t *mp, uint type, - uint memflags) + xfs_km_flags_t memflags) { xfs_trans_t *tp; diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h index 7ab99e1898c8..7c37b533aa8e 100644 --- a/fs/xfs/xfs_trans.h +++ b/fs/xfs/xfs_trans.h @@ -443,7 +443,7 @@ typedef struct xfs_trans { * XFS transaction mechanism exported interfaces. */ xfs_trans_t *xfs_trans_alloc(struct xfs_mount *, uint); -xfs_trans_t *_xfs_trans_alloc(struct xfs_mount *, uint, uint); +xfs_trans_t *_xfs_trans_alloc(struct xfs_mount *, uint, xfs_km_flags_t); xfs_trans_t *xfs_trans_dup(xfs_trans_t *); int xfs_trans_reserve(xfs_trans_t *, uint, uint, uint, uint, uint); diff --git a/include/asm-generic/posix_types.h b/include/asm-generic/posix_types.h index 91d44bd4dde3..fe74fccf18db 100644 --- a/include/asm-generic/posix_types.h +++ b/include/asm-generic/posix_types.h @@ -23,10 +23,6 @@ typedef __kernel_ulong_t __kernel_ino_t; typedef unsigned int __kernel_mode_t; #endif -#ifndef __kernel_nlink_t -typedef __kernel_ulong_t __kernel_nlink_t; -#endif - #ifndef __kernel_pid_t typedef int __kernel_pid_t; #endif diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h index 81e803e90aa4..acba894374a1 100644 --- a/include/linux/clockchips.h +++ b/include/linux/clockchips.h @@ -132,6 +132,7 @@ extern u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt); extern void clockevents_register_device(struct clock_event_device *dev); +extern void clockevents_config(struct clock_event_device *dev, u32 freq); extern void clockevents_config_and_register(struct clock_event_device *dev, u32 freq, unsigned long min_delta, unsigned long max_delta); diff --git a/include/linux/compaction.h b/include/linux/compaction.h index e988037abd2a..51a90b7f2d60 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -1,8 +1,6 @@ #ifndef _LINUX_COMPACTION_H #define _LINUX_COMPACTION_H -#include <linux/node.h> - /* Return values for compact_zone() and try_to_compact_pages() */ /* compaction didn't start as it was not possible or direct reclaim was more suitable */ #define COMPACT_SKIPPED 0 @@ -13,23 +11,6 @@ /* The full zone was compacted */ #define COMPACT_COMPLETE 3 -/* - * compaction supports three modes - * - * COMPACT_ASYNC_MOVABLE uses asynchronous migration and only scans - * MIGRATE_MOVABLE pageblocks as migration sources and targets. - * COMPACT_ASYNC_UNMOVABLE uses asynchronous migration and only scans - * MIGRATE_MOVABLE pageblocks as migration sources. - * MIGRATE_UNMOVABLE pageblocks are scanned as potential migration - * targets and convers them to MIGRATE_MOVABLE if possible - * COMPACT_SYNC uses synchronous migration and scans all pageblocks - */ -enum compact_mode { - COMPACT_ASYNC_MOVABLE, - COMPACT_ASYNC_UNMOVABLE, - COMPACT_SYNC, -}; - #ifdef CONFIG_COMPACTION extern int sysctl_compact_memory; extern int sysctl_compaction_handler(struct ctl_table *table, int write, diff --git a/include/linux/errno.h b/include/linux/errno.h index 2d09bfa5c262..e0de516374da 100644 --- a/include/linux/errno.h +++ b/include/linux/errno.h @@ -17,6 +17,7 @@ #define ENOIOCTLCMD 515 /* No ioctl command */ #define ERESTART_RESTARTBLOCK 516 /* restart by calling sys_restart_syscall */ #define EPROBE_DEFER 517 /* Driver requests probe retry */ +#define EOPENSTALE 518 /* open found a stale dentry */ /* Defined for the NFSv3 protocol */ #define EBADHANDLE 521 /* Illegal NFS file handle */ diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h index 3a4cef5322dc..12291a7ee275 100644 --- a/include/linux/exportfs.h +++ b/include/linux/exportfs.h @@ -165,8 +165,8 @@ struct fid { */ struct export_operations { - int (*encode_fh)(struct dentry *de, __u32 *fh, int *max_len, - int connectable); + int (*encode_fh)(struct inode *inode, __u32 *fh, int *max_len, + struct inode *parent); struct dentry * (*fh_to_dentry)(struct super_block *sb, struct fid *fid, int fh_len, int fh_type); struct dentry * (*fh_to_parent)(struct super_block *sb, struct fid *fid, diff --git a/include/linux/fb.h b/include/linux/fb.h index a3229d7ab9f2..ac3f1c605843 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -611,6 +611,7 @@ struct fb_deferred_io { struct mutex lock; /* mutex that protects the page list */ struct list_head pagelist; /* list of touched pages */ /* callback */ + void (*first_io)(struct fb_info *info); void (*deferred_io)(struct fb_info *info, struct list_head *pagelist); }; #endif diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h new file mode 100644 index 000000000000..0e4e2eec5c1d --- /dev/null +++ b/include/linux/frontswap.h @@ -0,0 +1,127 @@ +#ifndef _LINUX_FRONTSWAP_H +#define _LINUX_FRONTSWAP_H + +#include <linux/swap.h> +#include <linux/mm.h> +#include <linux/bitops.h> + +struct frontswap_ops { + void (*init)(unsigned); + int (*store)(unsigned, pgoff_t, struct page *); + int (*load)(unsigned, pgoff_t, struct page *); + void (*invalidate_page)(unsigned, pgoff_t); + void (*invalidate_area)(unsigned); +}; + +extern bool frontswap_enabled; +extern struct frontswap_ops + frontswap_register_ops(struct frontswap_ops *ops); +extern void frontswap_shrink(unsigned long); +extern unsigned long frontswap_curr_pages(void); +extern void frontswap_writethrough(bool); + +extern void __frontswap_init(unsigned type); +extern int __frontswap_store(struct page *page); +extern int __frontswap_load(struct page *page); +extern void __frontswap_invalidate_page(unsigned, pgoff_t); +extern void __frontswap_invalidate_area(unsigned); + +#ifdef CONFIG_FRONTSWAP + +static inline bool frontswap_test(struct swap_info_struct *sis, pgoff_t offset) +{ + bool ret = false; + + if (frontswap_enabled && sis->frontswap_map) + ret = test_bit(offset, sis->frontswap_map); + return ret; +} + +static inline void frontswap_set(struct swap_info_struct *sis, pgoff_t offset) +{ + if (frontswap_enabled && sis->frontswap_map) + set_bit(offset, sis->frontswap_map); +} + +static inline void frontswap_clear(struct swap_info_struct *sis, pgoff_t offset) +{ + if (frontswap_enabled && sis->frontswap_map) + clear_bit(offset, sis->frontswap_map); +} + +static inline void frontswap_map_set(struct swap_info_struct *p, + unsigned long *map) +{ + p->frontswap_map = map; +} + +static inline unsigned long *frontswap_map_get(struct swap_info_struct *p) +{ + return p->frontswap_map; +} +#else +/* all inline routines become no-ops and all externs are ignored */ + +#define frontswap_enabled (0) + +static inline bool frontswap_test(struct swap_info_struct *sis, pgoff_t offset) +{ + return false; +} + +static inline void frontswap_set(struct swap_info_struct *sis, pgoff_t offset) +{ +} + +static inline void frontswap_clear(struct swap_info_struct *sis, pgoff_t offset) +{ +} + +static inline void frontswap_map_set(struct swap_info_struct *p, + unsigned long *map) +{ +} + +static inline unsigned long *frontswap_map_get(struct swap_info_struct *p) +{ + return NULL; +} +#endif + +static inline int frontswap_store(struct page *page) +{ + int ret = -1; + + if (frontswap_enabled) + ret = __frontswap_store(page); + return ret; +} + +static inline int frontswap_load(struct page *page) +{ + int ret = -1; + + if (frontswap_enabled) + ret = __frontswap_load(page); + return ret; +} + +static inline void frontswap_invalidate_page(unsigned type, pgoff_t offset) +{ + if (frontswap_enabled) + __frontswap_invalidate_page(type, offset); +} + +static inline void frontswap_invalidate_area(unsigned type) +{ + if (frontswap_enabled) + __frontswap_invalidate_area(type); +} + +static inline void frontswap_init(unsigned type) +{ + if (frontswap_enabled) + __frontswap_init(type); +} + +#endif /* _LINUX_FRONTSWAP_H */ diff --git a/include/linux/fs.h b/include/linux/fs.h index 40887afaaca7..17fd887c798f 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -802,13 +802,14 @@ struct inode { unsigned int __i_nlink; }; dev_t i_rdev; + loff_t i_size; struct timespec i_atime; struct timespec i_mtime; struct timespec i_ctime; spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */ unsigned short i_bytes; + unsigned int i_blkbits; blkcnt_t i_blocks; - loff_t i_size; #ifdef __NEED_I_SIZE_ORDERED seqcount_t i_size_seqcount; @@ -828,9 +829,8 @@ struct inode { struct list_head i_dentry; struct rcu_head i_rcu; }; - atomic_t i_count; - unsigned int i_blkbits; u64 i_version; + atomic_t i_count; atomic_t i_dio_count; atomic_t i_writecount; const struct file_operations *i_fop; /* former ->i_op->default_file_ops */ @@ -1692,6 +1692,7 @@ struct inode_operations { int (*removexattr) (struct dentry *, const char *); int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len); + int (*update_time)(struct inode *, struct timespec *, int); } ____cacheline_aligned; struct seq_file; @@ -1850,6 +1851,13 @@ static inline void inode_inc_iversion(struct inode *inode) spin_unlock(&inode->i_lock); } +enum file_time_flags { + S_ATIME = 1, + S_MTIME = 2, + S_CTIME = 4, + S_VERSION = 8, +}; + extern void touch_atime(struct path *); static inline void file_accessed(struct file *file) { @@ -2583,7 +2591,7 @@ extern int inode_change_ok(const struct inode *, struct iattr *); extern int inode_newsize_ok(const struct inode *, loff_t offset); extern void setattr_copy(struct inode *inode, const struct iattr *attr); -extern void file_update_time(struct file *file); +extern int file_update_time(struct file *file); extern int generic_show_options(struct seq_file *m, struct dentry *root); extern void save_mount_options(struct super_block *sb, char *options); diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 91d0e0a34ef3..63d966d5c2ea 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -60,7 +60,7 @@ #define FS_EVENTS_POSS_ON_CHILD (FS_ACCESS | FS_MODIFY | FS_ATTRIB |\ FS_CLOSE_WRITE | FS_CLOSE_NOWRITE | FS_OPEN |\ FS_MOVED_FROM | FS_MOVED_TO | FS_CREATE |\ - FS_DELETE) + FS_DELETE | FS_OPEN_PERM | FS_ACCESS_PERM) #define FS_MOVE (FS_MOVED_FROM | FS_MOVED_TO) diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index 912c30a8ddb1..f334c7fab967 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -31,6 +31,7 @@ #include <linux/mutex.h> #include <linux/timer.h> #include <linux/slab.h> +#include <crypto/hash.h> #endif #define journal_oom_retry 1 @@ -147,12 +148,24 @@ typedef struct journal_header_s #define JBD2_CRC32_CHKSUM 1 #define JBD2_MD5_CHKSUM 2 #define JBD2_SHA1_CHKSUM 3 +#define JBD2_CRC32C_CHKSUM 4 #define JBD2_CRC32_CHKSUM_SIZE 4 #define JBD2_CHECKSUM_BYTES (32 / sizeof(u32)) /* * Commit block header for storing transactional checksums: + * + * NOTE: If FEATURE_COMPAT_CHECKSUM (checksum v1) is set, the h_chksum* + * fields are used to store a checksum of the descriptor and data blocks. + * + * If FEATURE_INCOMPAT_CSUM_V2 (checksum v2) is set, then the h_chksum + * field is used to store crc32c(uuid+commit_block). Each journal metadata + * block gets its own checksum, and data block checksums are stored in + * journal_block_tag (in the descriptor). The other h_chksum* fields are + * not used. + * + * Checksum v1 and v2 are mutually exclusive features. */ struct commit_header { __be32 h_magic; @@ -175,13 +188,19 @@ struct commit_header { typedef struct journal_block_tag_s { __be32 t_blocknr; /* The on-disk block number */ - __be32 t_flags; /* See below */ + __be16 t_checksum; /* truncated crc32c(uuid+seq+block) */ + __be16 t_flags; /* See below */ __be32 t_blocknr_high; /* most-significant high 32bits. */ } journal_block_tag_t; #define JBD2_TAG_SIZE32 (offsetof(journal_block_tag_t, t_blocknr_high)) #define JBD2_TAG_SIZE64 (sizeof(journal_block_tag_t)) +/* Tail of descriptor block, for checksumming */ +struct jbd2_journal_block_tail { + __be32 t_checksum; /* crc32c(uuid+descr_block) */ +}; + /* * The revoke descriptor: used on disk to describe a series of blocks to * be revoked from the log @@ -192,6 +211,10 @@ typedef struct jbd2_journal_revoke_header_s __be32 r_count; /* Count of bytes used in the block */ } jbd2_journal_revoke_header_t; +/* Tail of revoke block, for checksumming */ +struct jbd2_journal_revoke_tail { + __be32 r_checksum; /* crc32c(uuid+revoke_block) */ +}; /* Definitions for the journal tag flags word: */ #define JBD2_FLAG_ESCAPE 1 /* on-disk block is escaped */ @@ -241,7 +264,10 @@ typedef struct journal_superblock_s __be32 s_max_trans_data; /* Limit of data blocks per trans. */ /* 0x0050 */ - __u32 s_padding[44]; + __u8 s_checksum_type; /* checksum type */ + __u8 s_padding2[3]; + __u32 s_padding[42]; + __be32 s_checksum; /* crc32c(superblock) */ /* 0x0100 */ __u8 s_users[16*48]; /* ids of all fs'es sharing the log */ @@ -263,13 +289,15 @@ typedef struct journal_superblock_s #define JBD2_FEATURE_INCOMPAT_REVOKE 0x00000001 #define JBD2_FEATURE_INCOMPAT_64BIT 0x00000002 #define JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT 0x00000004 +#define JBD2_FEATURE_INCOMPAT_CSUM_V2 0x00000008 /* Features known to this kernel version: */ #define JBD2_KNOWN_COMPAT_FEATURES JBD2_FEATURE_COMPAT_CHECKSUM #define JBD2_KNOWN_ROCOMPAT_FEATURES 0 #define JBD2_KNOWN_INCOMPAT_FEATURES (JBD2_FEATURE_INCOMPAT_REVOKE | \ JBD2_FEATURE_INCOMPAT_64BIT | \ - JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT) + JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT | \ + JBD2_FEATURE_INCOMPAT_CSUM_V2) #ifdef __KERNEL__ @@ -939,6 +967,12 @@ struct journal_s * superblock pointer here */ void *j_private; + + /* Reference to checksum algorithm driver via cryptoapi */ + struct crypto_shash *j_chksum_driver; + + /* Precomputed journal UUID checksum for seeding other checksums */ + __u32 j_csum_seed; }; /* @@ -1268,6 +1302,25 @@ static inline int jbd_space_needed(journal_t *journal) extern int jbd_blocks_per_page(struct inode *inode); +static inline u32 jbd2_chksum(journal_t *journal, u32 crc, + const void *address, unsigned int length) +{ + struct { + struct shash_desc shash; + char ctx[crypto_shash_descsize(journal->j_chksum_driver)]; + } desc; + int err; + + desc.shash.tfm = journal->j_chksum_driver; + desc.shash.flags = 0; + *(u32 *)desc.ctx = crc; + + err = crypto_shash_update(&desc.shash, address, length); + BUG_ON(err); + + return *(u32 *)desc.ctx; +} + #ifdef __KERNEL__ #define buffer_trace_init(bh) do {} while (0) diff --git a/include/linux/jbd_common.h b/include/linux/jbd_common.h index 6230f8556a4e..6133679bc4c0 100644 --- a/include/linux/jbd_common.h +++ b/include/linux/jbd_common.h @@ -12,6 +12,7 @@ enum jbd_state_bits { BH_State, /* Pins most journal_head state */ BH_JournalHead, /* Pins bh->b_private and jh->b_bh */ BH_Unshadow, /* Dummy bit, for BJ_Shadow wakeup filtering */ + BH_Verified, /* Metadata block has been verified ok */ BH_JBDPrivateStart, /* First bit available for private use by FS */ }; @@ -24,6 +25,7 @@ TAS_BUFFER_FNS(Revoked, revoked) BUFFER_FNS(RevokeValid, revokevalid) TAS_BUFFER_FNS(RevokeValid, revokevalid) BUFFER_FNS(Freed, freed) +BUFFER_FNS(Verified, verified) static inline struct buffer_head *jh2bh(struct journal_head *jh) { diff --git a/include/linux/lglock.h b/include/linux/lglock.h index 87f402ccec55..f01e5f6d1f07 100644 --- a/include/linux/lglock.h +++ b/include/linux/lglock.h @@ -23,28 +23,17 @@ #include <linux/lockdep.h> #include <linux/percpu.h> #include <linux/cpu.h> +#include <linux/notifier.h> /* can make br locks by using local lock for read side, global lock for write */ -#define br_lock_init(name) name##_lock_init() -#define br_read_lock(name) name##_local_lock() -#define br_read_unlock(name) name##_local_unlock() -#define br_write_lock(name) name##_global_lock_online() -#define br_write_unlock(name) name##_global_unlock_online() +#define br_lock_init(name) lg_lock_init(name, #name) +#define br_read_lock(name) lg_local_lock(name) +#define br_read_unlock(name) lg_local_unlock(name) +#define br_write_lock(name) lg_global_lock(name) +#define br_write_unlock(name) lg_global_unlock(name) -#define DECLARE_BRLOCK(name) DECLARE_LGLOCK(name) #define DEFINE_BRLOCK(name) DEFINE_LGLOCK(name) - -#define lg_lock_init(name) name##_lock_init() -#define lg_local_lock(name) name##_local_lock() -#define lg_local_unlock(name) name##_local_unlock() -#define lg_local_lock_cpu(name, cpu) name##_local_lock_cpu(cpu) -#define lg_local_unlock_cpu(name, cpu) name##_local_unlock_cpu(cpu) -#define lg_global_lock(name) name##_global_lock() -#define lg_global_unlock(name) name##_global_unlock() -#define lg_global_lock_online(name) name##_global_lock_online() -#define lg_global_unlock_online(name) name##_global_unlock_online() - #ifdef CONFIG_DEBUG_LOCK_ALLOC #define LOCKDEP_INIT_MAP lockdep_init_map @@ -59,142 +48,26 @@ #define DEFINE_LGLOCK_LOCKDEP(name) #endif - -#define DECLARE_LGLOCK(name) \ - extern void name##_lock_init(void); \ - extern void name##_local_lock(void); \ - extern void name##_local_unlock(void); \ - extern void name##_local_lock_cpu(int cpu); \ - extern void name##_local_unlock_cpu(int cpu); \ - extern void name##_global_lock(void); \ - extern void name##_global_unlock(void); \ - extern void name##_global_lock_online(void); \ - extern void name##_global_unlock_online(void); \ +struct lglock { + arch_spinlock_t __percpu *lock; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lock_class_key lock_key; + struct lockdep_map lock_dep_map; +#endif +}; #define DEFINE_LGLOCK(name) \ - \ - DEFINE_SPINLOCK(name##_cpu_lock); \ - cpumask_t name##_cpus __read_mostly; \ - DEFINE_PER_CPU(arch_spinlock_t, name##_lock); \ - DEFINE_LGLOCK_LOCKDEP(name); \ - \ - static int \ - name##_lg_cpu_callback(struct notifier_block *nb, \ - unsigned long action, void *hcpu) \ - { \ - switch (action & ~CPU_TASKS_FROZEN) { \ - case CPU_UP_PREPARE: \ - spin_lock(&name##_cpu_lock); \ - cpu_set((unsigned long)hcpu, name##_cpus); \ - spin_unlock(&name##_cpu_lock); \ - break; \ - case CPU_UP_CANCELED: case CPU_DEAD: \ - spin_lock(&name##_cpu_lock); \ - cpu_clear((unsigned long)hcpu, name##_cpus); \ - spin_unlock(&name##_cpu_lock); \ - } \ - return NOTIFY_OK; \ - } \ - static struct notifier_block name##_lg_cpu_notifier = { \ - .notifier_call = name##_lg_cpu_callback, \ - }; \ - void name##_lock_init(void) { \ - int i; \ - LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \ - for_each_possible_cpu(i) { \ - arch_spinlock_t *lock; \ - lock = &per_cpu(name##_lock, i); \ - *lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; \ - } \ - register_hotcpu_notifier(&name##_lg_cpu_notifier); \ - get_online_cpus(); \ - for_each_online_cpu(i) \ - cpu_set(i, name##_cpus); \ - put_online_cpus(); \ - } \ - EXPORT_SYMBOL(name##_lock_init); \ - \ - void name##_local_lock(void) { \ - arch_spinlock_t *lock; \ - preempt_disable(); \ - rwlock_acquire_read(&name##_lock_dep_map, 0, 0, _THIS_IP_); \ - lock = &__get_cpu_var(name##_lock); \ - arch_spin_lock(lock); \ - } \ - EXPORT_SYMBOL(name##_local_lock); \ - \ - void name##_local_unlock(void) { \ - arch_spinlock_t *lock; \ - rwlock_release(&name##_lock_dep_map, 1, _THIS_IP_); \ - lock = &__get_cpu_var(name##_lock); \ - arch_spin_unlock(lock); \ - preempt_enable(); \ - } \ - EXPORT_SYMBOL(name##_local_unlock); \ - \ - void name##_local_lock_cpu(int cpu) { \ - arch_spinlock_t *lock; \ - preempt_disable(); \ - rwlock_acquire_read(&name##_lock_dep_map, 0, 0, _THIS_IP_); \ - lock = &per_cpu(name##_lock, cpu); \ - arch_spin_lock(lock); \ - } \ - EXPORT_SYMBOL(name##_local_lock_cpu); \ - \ - void name##_local_unlock_cpu(int cpu) { \ - arch_spinlock_t *lock; \ - rwlock_release(&name##_lock_dep_map, 1, _THIS_IP_); \ - lock = &per_cpu(name##_lock, cpu); \ - arch_spin_unlock(lock); \ - preempt_enable(); \ - } \ - EXPORT_SYMBOL(name##_local_unlock_cpu); \ - \ - void name##_global_lock_online(void) { \ - int i; \ - spin_lock(&name##_cpu_lock); \ - rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \ - for_each_cpu(i, &name##_cpus) { \ - arch_spinlock_t *lock; \ - lock = &per_cpu(name##_lock, i); \ - arch_spin_lock(lock); \ - } \ - } \ - EXPORT_SYMBOL(name##_global_lock_online); \ - \ - void name##_global_unlock_online(void) { \ - int i; \ - rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \ - for_each_cpu(i, &name##_cpus) { \ - arch_spinlock_t *lock; \ - lock = &per_cpu(name##_lock, i); \ - arch_spin_unlock(lock); \ - } \ - spin_unlock(&name##_cpu_lock); \ - } \ - EXPORT_SYMBOL(name##_global_unlock_online); \ - \ - void name##_global_lock(void) { \ - int i; \ - preempt_disable(); \ - rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \ - for_each_possible_cpu(i) { \ - arch_spinlock_t *lock; \ - lock = &per_cpu(name##_lock, i); \ - arch_spin_lock(lock); \ - } \ - } \ - EXPORT_SYMBOL(name##_global_lock); \ - \ - void name##_global_unlock(void) { \ - int i; \ - rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \ - for_each_possible_cpu(i) { \ - arch_spinlock_t *lock; \ - lock = &per_cpu(name##_lock, i); \ - arch_spin_unlock(lock); \ - } \ - preempt_enable(); \ - } \ - EXPORT_SYMBOL(name##_global_unlock); + DEFINE_LGLOCK_LOCKDEP(name); \ + DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \ + = __ARCH_SPIN_LOCK_UNLOCKED; \ + struct lglock name = { .lock = &name ## _lock } + +void lg_lock_init(struct lglock *lg, char *name); +void lg_local_lock(struct lglock *lg); +void lg_local_unlock(struct lglock *lg); +void lg_local_lock_cpu(struct lglock *lg, int cpu); +void lg_local_unlock_cpu(struct lglock *lg, int cpu); +void lg_global_lock(struct lglock *lg); +void lg_global_unlock(struct lglock *lg); + #endif diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 6e27fa99e8b9..6a8f002b8ed3 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -64,6 +64,7 @@ enum { MLX4_MAX_NUM_PF = 16, MLX4_MAX_NUM_VF = 64, MLX4_MFUNC_MAX = 80, + MLX4_MAX_EQ_NUM = 1024, MLX4_MFUNC_EQ_NUM = 4, MLX4_MFUNC_MAX_EQES = 8, MLX4_MFUNC_EQE_MASK = (MLX4_MFUNC_MAX_EQES - 1) @@ -239,6 +240,10 @@ static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor) return (major << 32) | (minor << 16) | subminor; } +struct mlx4_phys_caps { + u32 num_phys_eqs; +}; + struct mlx4_caps { u64 fw_ver; u32 function; @@ -499,6 +504,7 @@ struct mlx4_dev { unsigned long flags; unsigned long num_slaves; struct mlx4_caps caps; + struct mlx4_phys_caps phys_caps; struct radix_tree_root qp_table_tree; u8 rev_id; char board_id[MLX4_BOARD_ID_LEN]; diff --git a/include/linux/mm.h b/include/linux/mm.h index ce26716238c3..b36d08ce5c57 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1392,7 +1392,7 @@ extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned lo extern unsigned long mmap_region(struct file *file, unsigned long addr, unsigned long len, unsigned long flags, vm_flags_t vm_flags, unsigned long pgoff); -extern unsigned long do_mmap(struct file *, unsigned long, +extern unsigned long do_mmap_pgoff(struct file *, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long); extern int do_munmap(struct mm_struct *, unsigned long, size_t); diff --git a/include/linux/mtd/gpmi-nand.h b/include/linux/mtd/gpmi-nand.h index 69b6dbf46b5e..ed3c4e09f3d1 100644 --- a/include/linux/mtd/gpmi-nand.h +++ b/include/linux/mtd/gpmi-nand.h @@ -23,12 +23,12 @@ #define GPMI_NAND_RES_SIZE 6 /* Resource names for the GPMI NAND driver. */ -#define GPMI_NAND_GPMI_REGS_ADDR_RES_NAME "GPMI NAND GPMI Registers" +#define GPMI_NAND_GPMI_REGS_ADDR_RES_NAME "gpmi-nand" #define GPMI_NAND_GPMI_INTERRUPT_RES_NAME "GPMI NAND GPMI Interrupt" -#define GPMI_NAND_BCH_REGS_ADDR_RES_NAME "GPMI NAND BCH Registers" -#define GPMI_NAND_BCH_INTERRUPT_RES_NAME "GPMI NAND BCH Interrupt" +#define GPMI_NAND_BCH_REGS_ADDR_RES_NAME "bch" +#define GPMI_NAND_BCH_INTERRUPT_RES_NAME "bch" #define GPMI_NAND_DMA_CHANNELS_RES_NAME "GPMI NAND DMA Channels" -#define GPMI_NAND_DMA_INTERRUPT_RES_NAME "GPMI NAND DMA Interrupt" +#define GPMI_NAND_DMA_INTERRUPT_RES_NAME "gpmi-dma" /** * struct gpmi_nand_platform_data - GPMI NAND driver platform data. diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index cf5ea8cdcf8e..63dadc0dfb62 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h @@ -157,6 +157,15 @@ struct mtd_info { unsigned int erasesize_mask; unsigned int writesize_mask; + /* + * read ops return -EUCLEAN if max number of bitflips corrected on any + * one region comprising an ecc step equals or exceeds this value. + * Settable by driver, else defaults to ecc_strength. User can override + * in sysfs. N.B. The meaning of the -EUCLEAN return code has changed; + * see Documentation/ABI/testing/sysfs-class-mtd for more detail. + */ + unsigned int bitflip_threshold; + // Kernel-only stuff starts here. const char *name; int index; @@ -164,7 +173,7 @@ struct mtd_info { /* ECC layout structure pointer - read only! */ struct nand_ecclayout *ecclayout; - /* max number of correctible bit errors per writesize */ + /* max number of correctible bit errors per ecc step */ unsigned int ecc_strength; /* Data for variable erase regions. If numeraseregions is zero, diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 1482340d3d9f..57977c640529 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -161,8 +161,6 @@ typedef enum { * Option constants for bizarre disfunctionality and real * features. */ -/* Chip can not auto increment pages */ -#define NAND_NO_AUTOINCR 0x00000001 /* Buswidth is 16 bit */ #define NAND_BUSWIDTH_16 0x00000002 /* Device supports partial programming without padding */ @@ -207,7 +205,6 @@ typedef enum { (NAND_NO_PADDING | NAND_CACHEPRG | NAND_COPYBACK) /* Macros to identify the above */ -#define NAND_CANAUTOINCR(chip) (!(chip->options & NAND_NO_AUTOINCR)) #define NAND_MUST_PAD(chip) (!(chip->options & NAND_NO_PADDING)) #define NAND_HAS_CACHEPROG(chip) ((chip->options & NAND_CACHEPRG)) #define NAND_HAS_COPYBACK(chip) ((chip->options & NAND_COPYBACK)) @@ -216,7 +213,7 @@ typedef enum { && (chip->page_shift > 9)) /* Mask to zero out the chip options, which come from the id table */ -#define NAND_CHIPOPTIONS_MSK (0x0000ffff & ~NAND_NO_AUTOINCR) +#define NAND_CHIPOPTIONS_MSK 0x0000ffff /* Non chip related options */ /* This option skips the bbt scan during initialization. */ @@ -363,21 +360,20 @@ struct nand_ecc_ctrl { int (*correct)(struct mtd_info *mtd, uint8_t *dat, uint8_t *read_ecc, uint8_t *calc_ecc); int (*read_page_raw)(struct mtd_info *mtd, struct nand_chip *chip, - uint8_t *buf, int page); + uint8_t *buf, int oob_required, int page); void (*write_page_raw)(struct mtd_info *mtd, struct nand_chip *chip, - const uint8_t *buf); + const uint8_t *buf, int oob_required); int (*read_page)(struct mtd_info *mtd, struct nand_chip *chip, - uint8_t *buf, int page); + uint8_t *buf, int oob_required, int page); int (*read_subpage)(struct mtd_info *mtd, struct nand_chip *chip, uint32_t offs, uint32_t len, uint8_t *buf); void (*write_page)(struct mtd_info *mtd, struct nand_chip *chip, - const uint8_t *buf); + const uint8_t *buf, int oob_required); int (*write_oob_raw)(struct mtd_info *mtd, struct nand_chip *chip, int page); int (*read_oob_raw)(struct mtd_info *mtd, struct nand_chip *chip, - int page, int sndcmd); - int (*read_oob)(struct mtd_info *mtd, struct nand_chip *chip, int page, - int sndcmd); + int page); + int (*read_oob)(struct mtd_info *mtd, struct nand_chip *chip, int page); int (*write_oob)(struct mtd_info *mtd, struct nand_chip *chip, int page); }; @@ -459,6 +455,8 @@ struct nand_buffers { * @pagemask: [INTERN] page number mask = number of (pages / chip) - 1 * @pagebuf: [INTERN] holds the pagenumber which is currently in * data_buf. + * @pagebuf_bitflips: [INTERN] holds the bitflip count for the page which is + * currently in data_buf. * @subpagesize: [INTERN] holds the subpagesize * @onfi_version: [INTERN] holds the chip ONFI version (BCD encoded), * non 0 if ONFI supported. @@ -505,7 +503,8 @@ struct nand_chip { int (*errstat)(struct mtd_info *mtd, struct nand_chip *this, int state, int status, int page); int (*write_page)(struct mtd_info *mtd, struct nand_chip *chip, - const uint8_t *buf, int page, int cached, int raw); + const uint8_t *buf, int oob_required, int page, + int cached, int raw); int chip_delay; unsigned int options; @@ -519,6 +518,7 @@ struct nand_chip { uint64_t chipsize; int pagemask; int pagebuf; + unsigned int pagebuf_bitflips; int subpagesize; uint8_t cellinfo; int badblockpos; @@ -654,6 +654,7 @@ struct platform_nand_ctrl { void (*cmd_ctrl)(struct mtd_info *mtd, int dat, unsigned int ctrl); void (*write_buf)(struct mtd_info *mtd, const uint8_t *buf, int len); void (*read_buf)(struct mtd_info *mtd, uint8_t *buf, int len); + unsigned char (*read_byte)(struct mtd_info *mtd); void *priv; }; diff --git a/include/linux/sched.h b/include/linux/sched.h index 660c8ae93471..f34437e835a7 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2207,6 +2207,20 @@ extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group); extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *); extern int do_sigaltstack(const stack_t __user *, stack_t __user *, unsigned long); +static inline void restore_saved_sigmask(void) +{ + if (test_and_clear_restore_sigmask()) + __set_current_blocked(¤t->saved_sigmask); +} + +static inline sigset_t *sigmask_to_save(void) +{ + sigset_t *res = ¤t->blocked; + if (unlikely(test_restore_sigmask())) + res = ¤t->saved_sigmask; + return res; +} + static inline int kill_cad_pid(int sig, int priv) { return kill_pid(cad_pid, sig, priv); diff --git a/include/linux/security.h b/include/linux/security.h index ab0e091ce5fa..4e5a73cdbbef 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -86,9 +86,9 @@ extern int cap_inode_setxattr(struct dentry *dentry, const char *name, extern int cap_inode_removexattr(struct dentry *dentry, const char *name); extern int cap_inode_need_killpriv(struct dentry *dentry); extern int cap_inode_killpriv(struct dentry *dentry); -extern int cap_file_mmap(struct file *file, unsigned long reqprot, - unsigned long prot, unsigned long flags, - unsigned long addr, unsigned long addr_only); +extern int cap_mmap_addr(unsigned long addr); +extern int cap_mmap_file(struct file *file, unsigned long reqprot, + unsigned long prot, unsigned long flags); extern int cap_task_fix_setuid(struct cred *new, const struct cred *old, int flags); extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5); @@ -586,15 +586,17 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * simple integer value. When @arg represents a user space pointer, it * should never be used by the security module. * Return 0 if permission is granted. - * @file_mmap : + * @mmap_addr : + * Check permissions for a mmap operation at @addr. + * @addr contains virtual address that will be used for the operation. + * Return 0 if permission is granted. + * @mmap_file : * Check permissions for a mmap operation. The @file may be NULL, e.g. * if mapping anonymous memory. * @file contains the file structure for file to map (may be NULL). * @reqprot contains the protection requested by the application. * @prot contains the protection that will be applied by the kernel. * @flags contains the operational flags. - * @addr contains virtual address that will be used for the operation. - * @addr_only contains a boolean: 0 if file-backed VMA, otherwise 1. * Return 0 if permission is granted. * @file_mprotect: * Check permissions before changing memory access permissions. @@ -1481,10 +1483,10 @@ struct security_operations { void (*file_free_security) (struct file *file); int (*file_ioctl) (struct file *file, unsigned int cmd, unsigned long arg); - int (*file_mmap) (struct file *file, + int (*mmap_addr) (unsigned long addr); + int (*mmap_file) (struct file *file, unsigned long reqprot, unsigned long prot, - unsigned long flags, unsigned long addr, - unsigned long addr_only); + unsigned long flags); int (*file_mprotect) (struct vm_area_struct *vma, unsigned long reqprot, unsigned long prot); @@ -1743,9 +1745,9 @@ int security_file_permission(struct file *file, int mask); int security_file_alloc(struct file *file); void security_file_free(struct file *file); int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg); -int security_file_mmap(struct file *file, unsigned long reqprot, - unsigned long prot, unsigned long flags, - unsigned long addr, unsigned long addr_only); +int security_mmap_file(struct file *file, unsigned long prot, + unsigned long flags); +int security_mmap_addr(unsigned long addr); int security_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot, unsigned long prot); int security_file_lock(struct file *file, unsigned int cmd); @@ -2181,13 +2183,15 @@ static inline int security_file_ioctl(struct file *file, unsigned int cmd, return 0; } -static inline int security_file_mmap(struct file *file, unsigned long reqprot, - unsigned long prot, - unsigned long flags, - unsigned long addr, - unsigned long addr_only) +static inline int security_mmap_file(struct file *file, unsigned long prot, + unsigned long flags) +{ + return 0; +} + +static inline int security_mmap_addr(unsigned long addr) { - return cap_file_mmap(file, reqprot, prot, flags, addr, addr_only); + return cap_mmap_addr(addr); } static inline int security_file_mprotect(struct vm_area_struct *vma, diff --git a/include/linux/signal.h b/include/linux/signal.h index 17046cc484bc..26b424adc842 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -250,12 +250,13 @@ extern long do_sigpending(void __user *, unsigned long); extern int do_sigtimedwait(const sigset_t *, siginfo_t *, const struct timespec *); extern int sigprocmask(int, sigset_t *, sigset_t *); -extern void set_current_blocked(const sigset_t *); +extern void set_current_blocked(sigset_t *); +extern void __set_current_blocked(const sigset_t *); extern int show_unhandled_signals; extern int sigsuspend(sigset_t *); extern int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, struct pt_regs *regs, void *cookie); -extern void block_sigmask(struct k_sigaction *ka, int signr); +extern void signal_delivered(int sig, siginfo_t *info, struct k_sigaction *ka, struct pt_regs *regs, int stepping); extern void exit_signals(struct task_struct *tsk); extern struct kmem_cache *sighand_cachep; diff --git a/include/linux/swap.h b/include/linux/swap.h index b6661933e252..c84ec68eaec9 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -197,6 +197,10 @@ struct swap_info_struct { struct block_device *bdev; /* swap device or bdev of swap file */ struct file *swap_file; /* seldom referenced */ unsigned int old_block_size; /* seldom referenced */ +#ifdef CONFIG_FRONTSWAP + unsigned long *frontswap_map; /* frontswap in-use, one bit per page */ + atomic_t frontswap_pages; /* frontswap pages in-use counter */ +#endif }; struct swap_list_t { diff --git a/include/linux/swapfile.h b/include/linux/swapfile.h new file mode 100644 index 000000000000..e282624e8c10 --- /dev/null +++ b/include/linux/swapfile.h @@ -0,0 +1,13 @@ +#ifndef _LINUX_SWAPFILE_H +#define _LINUX_SWAPFILE_H + +/* + * these were static in swapfile.c but frontswap.c needs them and we don't + * want to expose them to the dozens of source files that include swap.h + */ +extern spinlock_t swap_lock; +extern struct swap_list_t swap_list; +extern struct swap_info_struct *swap_info[]; +extern int try_to_unuse(unsigned int, bool, unsigned long); + +#endif /* _LINUX_SWAPFILE_H */ diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index db78775eff3b..ccc1899bd62e 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h @@ -8,6 +8,7 @@ #define _LINUX_THREAD_INFO_H #include <linux/types.h> +#include <linux/bug.h> struct timespec; struct compat_timespec; @@ -125,10 +126,26 @@ static inline int test_ti_thread_flag(struct thread_info *ti, int flag) static inline void set_restore_sigmask(void) { set_thread_flag(TIF_RESTORE_SIGMASK); - set_thread_flag(TIF_SIGPENDING); + WARN_ON(!test_thread_flag(TIF_SIGPENDING)); +} +static inline void clear_restore_sigmask(void) +{ + clear_thread_flag(TIF_RESTORE_SIGMASK); +} +static inline bool test_restore_sigmask(void) +{ + return test_thread_flag(TIF_RESTORE_SIGMASK); +} +static inline bool test_and_clear_restore_sigmask(void) +{ + return test_and_clear_thread_flag(TIF_RESTORE_SIGMASK); } #endif /* TIF_RESTORE_SIGMASK && !HAVE_SET_RESTORE_SIGMASK */ +#ifndef HAVE_SET_RESTORE_SIGMASK +#error "no set_restore_sigmask() provided and default one won't work" +#endif + #endif /* __KERNEL__ */ #endif /* _LINUX_THREAD_INFO_H */ diff --git a/include/linux/tty.h b/include/linux/tty.h index 4990ef2b1fb7..9f47ab540f65 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -268,7 +268,6 @@ struct tty_struct { struct mutex ldisc_mutex; struct tty_ldisc *ldisc; - struct mutex legacy_mutex; struct mutex termios_mutex; spinlock_t ctrl_lock; /* Termios values are protected by the termios mutex */ @@ -606,12 +605,8 @@ extern long vt_compat_ioctl(struct tty_struct *tty, /* tty_mutex.c */ /* functions for preparation of BKL removal */ -extern void __lockfunc tty_lock(struct tty_struct *tty); -extern void __lockfunc tty_unlock(struct tty_struct *tty); -extern void __lockfunc tty_lock_pair(struct tty_struct *tty, - struct tty_struct *tty2); -extern void __lockfunc tty_unlock_pair(struct tty_struct *tty, - struct tty_struct *tty2); +extern void __lockfunc tty_lock(void) __acquires(tty_lock); +extern void __lockfunc tty_unlock(void) __releases(tty_lock); /* * this shall be called only from where BTM is held (like close) @@ -626,9 +621,9 @@ extern void __lockfunc tty_unlock_pair(struct tty_struct *tty, static inline void tty_wait_until_sent_from_close(struct tty_struct *tty, long timeout) { - tty_unlock(tty); /* tty->ops->close holds the BTM, drop it while waiting */ + tty_unlock(); /* tty->ops->close holds the BTM, drop it while waiting */ tty_wait_until_sent(tty, timeout); - tty_lock(tty); + tty_lock(); } /* @@ -643,16 +638,16 @@ static inline void tty_wait_until_sent_from_close(struct tty_struct *tty, * * Do not use in new code. */ -#define wait_event_interruptible_tty(tty, wq, condition) \ +#define wait_event_interruptible_tty(wq, condition) \ ({ \ int __ret = 0; \ if (!(condition)) { \ - __wait_event_interruptible_tty(tty, wq, condition, __ret); \ + __wait_event_interruptible_tty(wq, condition, __ret); \ } \ __ret; \ }) -#define __wait_event_interruptible_tty(tty, wq, condition, ret) \ +#define __wait_event_interruptible_tty(wq, condition, ret) \ do { \ DEFINE_WAIT(__wait); \ \ @@ -661,9 +656,9 @@ do { \ if (condition) \ break; \ if (!signal_pending(current)) { \ - tty_unlock(tty); \ + tty_unlock(); \ schedule(); \ - tty_lock(tty); \ + tty_lock(); \ continue; \ } \ ret = -ERESTARTSYS; \ diff --git a/include/linux/types.h b/include/linux/types.h index 7f480db60231..9c1bd539ea70 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -25,7 +25,7 @@ typedef __kernel_dev_t dev_t; typedef __kernel_ino_t ino_t; typedef __kernel_mode_t mode_t; typedef unsigned short umode_t; -typedef __kernel_nlink_t nlink_t; +typedef __u32 nlink_t; typedef __kernel_off_t off_t; typedef __kernel_pid_t pid_t; typedef __kernel_daddr_t daddr_t; diff --git a/include/net/cipso_ipv4.h b/include/net/cipso_ipv4.h index 9808877c2ab9..a7a683e30b64 100644 --- a/include/net/cipso_ipv4.h +++ b/include/net/cipso_ipv4.h @@ -42,6 +42,7 @@ #include <net/netlabel.h> #include <net/request_sock.h> #include <linux/atomic.h> +#include <asm/unaligned.h> /* known doi values */ #define CIPSO_V4_DOI_UNKNOWN 0x00000000 @@ -285,7 +286,33 @@ static inline int cipso_v4_skbuff_getattr(const struct sk_buff *skb, static inline int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option) { - return -ENOSYS; + unsigned char *opt = *option; + unsigned char err_offset = 0; + u8 opt_len = opt[1]; + u8 opt_iter; + + if (opt_len < 8) { + err_offset = 1; + goto out; + } + + if (get_unaligned_be32(&opt[2]) == 0) { + err_offset = 2; + goto out; + } + + for (opt_iter = 6; opt_iter < opt_len;) { + if (opt[opt_iter + 1] > (opt_len - opt_iter)) { + err_offset = opt_iter + 1; + goto out; + } + opt_iter += opt[opt_iter + 1]; + } + +out: + *option = opt + err_offset; + return err_offset; + } #endif /* CONFIG_NETLABEL */ diff --git a/include/video/auo_k190xfb.h b/include/video/auo_k190xfb.h new file mode 100644 index 000000000000..609efe8c686e --- /dev/null +++ b/include/video/auo_k190xfb.h @@ -0,0 +1,106 @@ +/* + * Definitions for AUO-K190X framebuffer drivers + * + * Copyright (C) 2012 Heiko Stuebner <heiko@sntech.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _LINUX_VIDEO_AUO_K190XFB_H_ +#define _LINUX_VIDEO_AUO_K190XFB_H_ + +/* Controller standby command needs a param */ +#define AUOK190X_QUIRK_STANDBYPARAM (1 << 0) + +/* Controller standby is completely broken */ +#define AUOK190X_QUIRK_STANDBYBROKEN (1 << 1) + +/* + * Resolutions for the displays + */ +#define AUOK190X_RESOLUTION_800_600 0 +#define AUOK190X_RESOLUTION_1024_768 1 + +/* + * struct used by auok190x. board specific stuff comes from *board + */ +struct auok190xfb_par { + struct fb_info *info; + struct auok190x_board *board; + + struct regulator *regulator; + + struct mutex io_lock; + struct delayed_work work; + wait_queue_head_t waitq; + int resolution; + int rotation; + int consecutive_threshold; + int update_cnt; + + /* panel and controller informations */ + int epd_type; + int panel_size_int; + int panel_size_float; + int panel_model; + int tcon_version; + int lut_version; + + /* individual controller callbacks */ + void (*update_partial)(struct auok190xfb_par *par, u16 y1, u16 y2); + void (*update_all)(struct auok190xfb_par *par); + bool (*need_refresh)(struct auok190xfb_par *par); + void (*init)(struct auok190xfb_par *par); + void (*recover)(struct auok190xfb_par *par); + + int update_mode; /* mode to use for updates */ + int last_mode; /* update mode last used */ + int flash; + + /* power management */ + int autosuspend_delay; + bool standby; + bool manual_standby; +}; + +/** + * Board specific platform-data + * @init: initialize the controller interface + * @cleanup: cleanup the controller interface + * @wait_for_rdy: wait until the controller is not busy anymore + * @set_ctl: change an interface control + * @set_hdb: write a value to the data register + * @get_hdb: read a value from the data register + * @setup_irq: method to setup the irq handling on the busy gpio + * @gpio_nsleep: sleep gpio + * @gpio_nrst: reset gpio + * @gpio_nbusy: busy gpio + * @resolution: one of the AUOK190X_RESOLUTION constants + * @rotation: rotation of the framebuffer + * @quirks: controller quirks to honor + * @fps: frames per second for defio + */ +struct auok190x_board { + int (*init)(struct auok190xfb_par *); + void (*cleanup)(struct auok190xfb_par *); + int (*wait_for_rdy)(struct auok190xfb_par *); + + void (*set_ctl)(struct auok190xfb_par *, unsigned char, u8); + void (*set_hdb)(struct auok190xfb_par *, u16); + u16 (*get_hdb)(struct auok190xfb_par *); + + int (*setup_irq)(struct fb_info *); + + int gpio_nsleep; + int gpio_nrst; + int gpio_nbusy; + + int resolution; + int rotation; + int quirks; + int fps; +}; + +#endif diff --git a/include/video/exynos_dp.h b/include/video/exynos_dp.h index 8847a9d6dd42..bd8cabd344db 100644 --- a/include/video/exynos_dp.h +++ b/include/video/exynos_dp.h @@ -14,7 +14,7 @@ #define DP_TIMEOUT_LOOP_COUNT 100 #define MAX_CR_LOOP 5 -#define MAX_EQ_LOOP 4 +#define MAX_EQ_LOOP 5 enum link_rate_type { LINK_RATE_1_62GBPS = 0x06, diff --git a/include/video/exynos_mipi_dsim.h b/include/video/exynos_mipi_dsim.h index 772c770535f1..83ce5e667d47 100644 --- a/include/video/exynos_mipi_dsim.h +++ b/include/video/exynos_mipi_dsim.h @@ -315,6 +315,7 @@ struct mipi_dsim_lcd_device { int id; int bus_id; int irq; + int panel_reverse; struct mipi_dsim_device *master; void *platform_data; diff --git a/include/video/omapdss.h b/include/video/omapdss.h index 1c46a14341dd..c8e59b4a3364 100644 --- a/include/video/omapdss.h +++ b/include/video/omapdss.h @@ -51,6 +51,8 @@ struct omap_dss_device; struct omap_overlay_manager; +struct snd_aes_iec958; +struct snd_cea_861_aud_if; enum omap_display_type { OMAP_DISPLAY_TYPE_NONE = 0, @@ -158,6 +160,13 @@ enum omap_dss_display_state { OMAP_DSS_DISPLAY_SUSPENDED, }; +enum omap_dss_audio_state { + OMAP_DSS_AUDIO_DISABLED = 0, + OMAP_DSS_AUDIO_ENABLED, + OMAP_DSS_AUDIO_CONFIGURED, + OMAP_DSS_AUDIO_PLAYING, +}; + /* XXX perhaps this should be removed */ enum omap_dss_overlay_managers { OMAP_DSS_OVL_MGR_LCD, @@ -166,8 +175,9 @@ enum omap_dss_overlay_managers { }; enum omap_dss_rotation_type { - OMAP_DSS_ROT_DMA = 0, - OMAP_DSS_ROT_VRFB = 1, + OMAP_DSS_ROT_DMA = 1 << 0, + OMAP_DSS_ROT_VRFB = 1 << 1, + OMAP_DSS_ROT_TILER = 1 << 2, }; /* clockwise rotation angle */ @@ -309,6 +319,7 @@ struct omap_dss_board_info { struct omap_dss_device *default_device; int (*dsi_enable_pads)(int dsi_id, unsigned lane_mask); void (*dsi_disable_pads)(int dsi_id, unsigned lane_mask); + int (*set_min_bus_tput)(struct device *dev, unsigned long r); }; /* Init with the board info */ @@ -316,11 +327,6 @@ extern int omap_display_init(struct omap_dss_board_info *board_data); /* HDMI mux init*/ extern int omap_hdmi_init(enum omap_hdmi_flags flags); -struct omap_display_platform_data { - struct omap_dss_board_info *board_data; - /* TODO: Additional members to be added when PM is considered */ -}; - struct omap_video_timings { /* Unit: pixels */ u16 x_res; @@ -587,6 +593,8 @@ struct omap_dss_device { enum omap_dss_display_state state; + enum omap_dss_audio_state audio_state; + /* platform specific */ int (*platform_enable)(struct omap_dss_device *dssdev); void (*platform_disable)(struct omap_dss_device *dssdev); @@ -599,6 +607,11 @@ struct omap_dss_hdmi_data int hpd_gpio; }; +struct omap_dss_audio { + struct snd_aes_iec958 *iec; + struct snd_cea_861_aud_if *cea; +}; + struct omap_dss_driver { struct device_driver driver; @@ -646,6 +659,24 @@ struct omap_dss_driver { int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len); bool (*detect)(struct omap_dss_device *dssdev); + + /* + * For display drivers that support audio. This encompasses + * HDMI and DisplayPort at the moment. + */ + /* + * Note: These functions might sleep. Do not call while + * holding a spinlock/readlock. + */ + int (*audio_enable)(struct omap_dss_device *dssdev); + void (*audio_disable)(struct omap_dss_device *dssdev); + bool (*audio_supported)(struct omap_dss_device *dssdev); + int (*audio_config)(struct omap_dss_device *dssdev, + struct omap_dss_audio *audio); + /* Note: These functions may not sleep */ + int (*audio_start)(struct omap_dss_device *dssdev); + void (*audio_stop)(struct omap_dss_device *dssdev); + }; int omap_dss_register_driver(struct omap_dss_driver *); @@ -670,6 +701,8 @@ struct omap_overlay *omap_dss_get_overlay(int num); void omapdss_default_get_resolution(struct omap_dss_device *dssdev, u16 *xres, u16 *yres); int omapdss_default_get_recommended_bpp(struct omap_dss_device *dssdev); +void omapdss_default_get_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings); typedef void (*omap_dispc_isr_t) (void *arg, u32 mask); int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask); diff --git a/include/video/sh_mobile_hdmi.h b/include/video/sh_mobile_hdmi.h index 728f9de9c258..63d20efa254a 100644 --- a/include/video/sh_mobile_hdmi.h +++ b/include/video/sh_mobile_hdmi.h @@ -18,9 +18,11 @@ struct clk; /* * flags format * - * 0x0000000A + * 0x00000CBA * * A: Audio source select + * B: Int output option + * C: Chip specific option */ /* Audio source select */ @@ -30,6 +32,14 @@ struct clk; #define HDMI_SND_SRC_DSD (2 << 0) #define HDMI_SND_SRC_HBR (3 << 0) +/* Int output option */ +#define HDMI_OUTPUT_PUSH_PULL (1 << 4) /* System control : output mode */ +#define HDMI_OUTPUT_POLARITY_HI (1 << 5) /* System control : output polarity */ + +/* Chip specific option */ +#define HDMI_32BIT_REG (1 << 8) +#define HDMI_HAS_HTOP1 (1 << 9) + struct sh_mobile_hdmi_info { unsigned int flags; long (*clk_optimize_parent)(unsigned long target, unsigned long *best_freq, diff --git a/ipc/shm.c b/ipc/shm.c index 406c5b208193..5e2cbfdab6fc 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -1036,6 +1036,10 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr) sfd->file = shp->shm_file; sfd->vm_ops = NULL; + err = security_mmap_file(file, prot, flags); + if (err) + goto out_fput; + down_write(¤t->mm->mmap_sem); if (addr && !(shmflg & SHM_REMAP)) { err = -EINVAL; @@ -1050,7 +1054,7 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr) goto invalid; } - user_addr = do_mmap (file, addr, size, prot, flags, 0); + user_addr = do_mmap_pgoff(file, addr, size, prot, flags, 0); *raddr = user_addr; err = 0; if (IS_ERR_VALUE(user_addr)) @@ -1058,6 +1062,7 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr) invalid: up_write(¤t->mm->mmap_sem); +out_fput: fput(file); out_nattch: diff --git a/kernel/Makefile b/kernel/Makefile index 6f3d0ae044b2..c0cc67ad764c 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -10,7 +10,7 @@ obj-y = fork.o exec_domain.o panic.o printk.o \ kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \ hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ notifier.o ksysfs.o cred.o \ - async.o range.o groups.o + async.o range.o groups.o lglock.o ifdef CONFIG_FUNCTION_TRACER # Do not trace debug files and internal ftrace files diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index fc275e4f629b..eebd6d5cfb44 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -275,8 +275,10 @@ void handle_nested_irq(unsigned int irq) kstat_incr_irqs_this_cpu(irq, desc); action = desc->action; - if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) + if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) { + desc->istate |= IRQS_PENDING; goto out_unlock; + } irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); raw_spin_unlock_irq(&desc->lock); @@ -324,8 +326,10 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc) desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); kstat_incr_irqs_this_cpu(irq, desc); - if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) + if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { + desc->istate |= IRQS_PENDING; goto out_unlock; + } handle_irq_event(desc); diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index 8e5c56b3b7d9..001fa5bab490 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h @@ -101,6 +101,9 @@ extern int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask); extern void irq_set_thread_affinity(struct irq_desc *desc); +extern int irq_do_set_affinity(struct irq_data *data, + const struct cpumask *dest, bool force); + /* Inline functions for support of irq chips on slow busses */ static inline void chip_bus_lock(struct irq_desc *desc) { diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index ea0c6c2ae6f7..8c548232ba39 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -142,6 +142,25 @@ static inline void irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { } #endif +int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, + bool force) +{ + struct irq_desc *desc = irq_data_to_desc(data); + struct irq_chip *chip = irq_data_get_irq_chip(data); + int ret; + + ret = chip->irq_set_affinity(data, mask, false); + switch (ret) { + case IRQ_SET_MASK_OK: + cpumask_copy(data->affinity, mask); + case IRQ_SET_MASK_OK_NOCOPY: + irq_set_thread_affinity(desc); + ret = 0; + } + + return ret; +} + int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask) { struct irq_chip *chip = irq_data_get_irq_chip(data); @@ -152,14 +171,7 @@ int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask) return -EINVAL; if (irq_can_move_pcntxt(data)) { - ret = chip->irq_set_affinity(data, mask, false); - switch (ret) { - case IRQ_SET_MASK_OK: - cpumask_copy(data->affinity, mask); - case IRQ_SET_MASK_OK_NOCOPY: - irq_set_thread_affinity(desc); - ret = 0; - } + ret = irq_do_set_affinity(data, mask, false); } else { irqd_set_move_pending(data); irq_copy_pending(desc, mask); @@ -283,9 +295,8 @@ EXPORT_SYMBOL_GPL(irq_set_affinity_notifier); static int setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) { - struct irq_chip *chip = irq_desc_get_chip(desc); struct cpumask *set = irq_default_affinity; - int ret, node = desc->irq_data.node; + int node = desc->irq_data.node; /* Excludes PER_CPU and NO_BALANCE interrupts */ if (!irq_can_set_affinity(irq)) @@ -311,13 +322,7 @@ setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) if (cpumask_intersects(mask, nodemask)) cpumask_and(mask, mask, nodemask); } - ret = chip->irq_set_affinity(&desc->irq_data, mask, false); - switch (ret) { - case IRQ_SET_MASK_OK: - cpumask_copy(desc->irq_data.affinity, mask); - case IRQ_SET_MASK_OK_NOCOPY: - irq_set_thread_affinity(desc); - } + irq_do_set_affinity(&desc->irq_data, mask, false); return 0; } #else diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c index c3c89751b327..ca3f4aaff707 100644 --- a/kernel/irq/migration.c +++ b/kernel/irq/migration.c @@ -42,17 +42,8 @@ void irq_move_masked_irq(struct irq_data *idata) * For correct operation this depends on the caller * masking the irqs. */ - if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask) - < nr_cpu_ids)) { - int ret = chip->irq_set_affinity(&desc->irq_data, - desc->pending_mask, false); - switch (ret) { - case IRQ_SET_MASK_OK: - cpumask_copy(desc->irq_data.affinity, desc->pending_mask); - case IRQ_SET_MASK_OK_NOCOPY: - irq_set_thread_affinity(desc); - } - } + if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids) + irq_do_set_affinity(&desc->irq_data, desc->pending_mask, false); cpumask_clear(desc->pending_mask); } diff --git a/kernel/lglock.c b/kernel/lglock.c new file mode 100644 index 000000000000..6535a667a5a7 --- /dev/null +++ b/kernel/lglock.c @@ -0,0 +1,89 @@ +/* See include/linux/lglock.h for description */ +#include <linux/module.h> +#include <linux/lglock.h> +#include <linux/cpu.h> +#include <linux/string.h> + +/* + * Note there is no uninit, so lglocks cannot be defined in + * modules (but it's fine to use them from there) + * Could be added though, just undo lg_lock_init + */ + +void lg_lock_init(struct lglock *lg, char *name) +{ + LOCKDEP_INIT_MAP(&lg->lock_dep_map, name, &lg->lock_key, 0); +} +EXPORT_SYMBOL(lg_lock_init); + +void lg_local_lock(struct lglock *lg) +{ + arch_spinlock_t *lock; + + preempt_disable(); + rwlock_acquire_read(&lg->lock_dep_map, 0, 0, _RET_IP_); + lock = this_cpu_ptr(lg->lock); + arch_spin_lock(lock); +} +EXPORT_SYMBOL(lg_local_lock); + +void lg_local_unlock(struct lglock *lg) +{ + arch_spinlock_t *lock; + + rwlock_release(&lg->lock_dep_map, 1, _RET_IP_); + lock = this_cpu_ptr(lg->lock); + arch_spin_unlock(lock); + preempt_enable(); +} +EXPORT_SYMBOL(lg_local_unlock); + +void lg_local_lock_cpu(struct lglock *lg, int cpu) +{ + arch_spinlock_t *lock; + + preempt_disable(); + rwlock_acquire_read(&lg->lock_dep_map, 0, 0, _RET_IP_); + lock = per_cpu_ptr(lg->lock, cpu); + arch_spin_lock(lock); +} +EXPORT_SYMBOL(lg_local_lock_cpu); + +void lg_local_unlock_cpu(struct lglock *lg, int cpu) +{ + arch_spinlock_t *lock; + + rwlock_release(&lg->lock_dep_map, 1, _RET_IP_); + lock = per_cpu_ptr(lg->lock, cpu); + arch_spin_unlock(lock); + preempt_enable(); +} +EXPORT_SYMBOL(lg_local_unlock_cpu); + +void lg_global_lock(struct lglock *lg) +{ + int i; + + preempt_disable(); + rwlock_acquire(&lg->lock_dep_map, 0, 0, _RET_IP_); + for_each_possible_cpu(i) { + arch_spinlock_t *lock; + lock = per_cpu_ptr(lg->lock, i); + arch_spin_lock(lock); + } +} +EXPORT_SYMBOL(lg_global_lock); + +void lg_global_unlock(struct lglock *lg) +{ + int i; + + rwlock_release(&lg->lock_dep_map, 1, _RET_IP_); + for_each_possible_cpu(i) { + arch_spinlock_t *lock; + lock = per_cpu_ptr(lg->lock, i); + arch_spin_unlock(lock); + } + preempt_enable(); +} +EXPORT_SYMBOL(lg_global_unlock); diff --git a/kernel/signal.c b/kernel/signal.c index 08dfbd748cd2..677102789cf2 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -2368,24 +2368,34 @@ relock: } /** - * block_sigmask - add @ka's signal mask to current->blocked - * @ka: action for @signr - * @signr: signal that has been successfully delivered + * signal_delivered - + * @sig: number of signal being delivered + * @info: siginfo_t of signal being delivered + * @ka: sigaction setting that chose the handler + * @regs: user register state + * @stepping: nonzero if debugger single-step or block-step in use * * This function should be called when a signal has succesfully been - * delivered. It adds the mask of signals for @ka to current->blocked - * so that they are blocked during the execution of the signal - * handler. In addition, @signr will be blocked unless %SA_NODEFER is - * set in @ka->sa.sa_flags. + * delivered. It updates the blocked signals accordingly (@ka->sa.sa_mask + * is always blocked, and the signal itself is blocked unless %SA_NODEFER + * is set in @ka->sa.sa_flags. Tracing is notified. */ -void block_sigmask(struct k_sigaction *ka, int signr) +void signal_delivered(int sig, siginfo_t *info, struct k_sigaction *ka, + struct pt_regs *regs, int stepping) { sigset_t blocked; + /* A signal was successfully delivered, and the + saved sigmask was stored on the signal frame, + and will be restored by sigreturn. So we can + simply clear the restore sigmask flag. */ + clear_restore_sigmask(); + sigorsets(&blocked, ¤t->blocked, &ka->sa.sa_mask); if (!(ka->sa.sa_flags & SA_NODEFER)) - sigaddset(&blocked, signr); + sigaddset(&blocked, sig); set_current_blocked(&blocked); + tracehook_signal_handler(sig, info, ka, regs, stepping); } /* @@ -2518,7 +2528,16 @@ static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset) * It is wrong to change ->blocked directly, this helper should be used * to ensure the process can't miss a shared signal we are going to block. */ -void set_current_blocked(const sigset_t *newset) +void set_current_blocked(sigset_t *newset) +{ + struct task_struct *tsk = current; + sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP)); + spin_lock_irq(&tsk->sighand->siglock); + __set_task_blocked(tsk, newset); + spin_unlock_irq(&tsk->sighand->siglock); +} + +void __set_current_blocked(const sigset_t *newset) { struct task_struct *tsk = current; @@ -2558,7 +2577,7 @@ int sigprocmask(int how, sigset_t *set, sigset_t *oldset) return -EINVAL; } - set_current_blocked(&newset); + __set_current_blocked(&newset); return 0; } @@ -3132,7 +3151,7 @@ SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset, return -EINVAL; } - set_current_blocked(&new_blocked); + __set_current_blocked(&new_blocked); } if (oset) { @@ -3196,7 +3215,6 @@ SYSCALL_DEFINE1(ssetmask, int, newmask) int old = current->blocked.sig[0]; sigset_t newset; - siginitset(&newset, newmask & ~(sigmask(SIGKILL) | sigmask(SIGSTOP))); set_current_blocked(&newset); return old; @@ -3235,11 +3253,8 @@ SYSCALL_DEFINE0(pause) #endif -#ifdef HAVE_SET_RESTORE_SIGMASK int sigsuspend(sigset_t *set) { - sigdelsetmask(set, sigmask(SIGKILL)|sigmask(SIGSTOP)); - current->saved_sigmask = current->blocked; set_current_blocked(set); @@ -3248,7 +3263,6 @@ int sigsuspend(sigset_t *set) set_restore_sigmask(); return -ERESTARTNOHAND; } -#endif #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND /** diff --git a/kernel/smpboot.c b/kernel/smpboot.c index e1a797e028a3..98f60c5caa1b 100644 --- a/kernel/smpboot.c +++ b/kernel/smpboot.c @@ -31,6 +31,12 @@ void __init idle_thread_set_boot_cpu(void) per_cpu(idle_threads, smp_processor_id()) = current; } +/** + * idle_init - Initialize the idle thread for a cpu + * @cpu: The cpu for which the idle thread should be initialized + * + * Creates the thread if it does not exist. + */ static inline void idle_init(unsigned int cpu) { struct task_struct *tsk = per_cpu(idle_threads, cpu); @@ -45,17 +51,16 @@ static inline void idle_init(unsigned int cpu) } /** - * idle_thread_init - Initialize the idle thread for a cpu - * @cpu: The cpu for which the idle thread should be initialized - * - * Creates the thread if it does not exist. + * idle_threads_init - Initialize idle threads for all cpus */ void __init idle_threads_init(void) { - unsigned int cpu; + unsigned int cpu, boot_cpu; + + boot_cpu = smp_processor_id(); for_each_possible_cpu(cpu) { - if (cpu != smp_processor_id()) + if (cpu != boot_cpu) idle_init(cpu); } } diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index 9cd928f7a7c6..7e1ce012a851 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c @@ -297,8 +297,7 @@ void clockevents_register_device(struct clock_event_device *dev) } EXPORT_SYMBOL_GPL(clockevents_register_device); -static void clockevents_config(struct clock_event_device *dev, - u32 freq) +void clockevents_config(struct clock_event_device *dev, u32 freq) { u64 sec; diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 6a3a5b9ff561..efd386667536 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -814,6 +814,16 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) return HRTIMER_RESTART; } +static int sched_skew_tick; + +static int __init skew_tick(char *str) +{ + get_option(&str, &sched_skew_tick); + + return 0; +} +early_param("skew_tick", skew_tick); + /** * tick_setup_sched_timer - setup the tick emulation timer */ @@ -831,6 +841,14 @@ void tick_setup_sched_timer(void) /* Get the next period (per cpu) */ hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update()); + /* Offset the tick to avert xtime_lock contention. */ + if (sched_skew_tick) { + u64 offset = ktime_to_ns(tick_period) >> 1; + do_div(offset, num_possible_cpus()); + offset *= smp_processor_id(); + hrtimer_add_expires_ns(&ts->sched_timer, offset); + } + for (;;) { hrtimer_forward(&ts->sched_timer, now, tick_period); hrtimer_start_expires(&ts->sched_timer, diff --git a/lib/dynamic_queue_limits.c b/lib/dynamic_queue_limits.c index 6ab4587d052b..0777c5a45fa0 100644 --- a/lib/dynamic_queue_limits.c +++ b/lib/dynamic_queue_limits.c @@ -10,23 +10,27 @@ #include <linux/jiffies.h> #include <linux/dynamic_queue_limits.h> -#define POSDIFF(A, B) ((A) > (B) ? (A) - (B) : 0) +#define POSDIFF(A, B) ((int)((A) - (B)) > 0 ? (A) - (B) : 0) +#define AFTER_EQ(A, B) ((int)((A) - (B)) >= 0) /* Records completed count and recalculates the queue limit */ void dql_completed(struct dql *dql, unsigned int count) { unsigned int inprogress, prev_inprogress, limit; - unsigned int ovlimit, all_prev_completed, completed; + unsigned int ovlimit, completed, num_queued; + bool all_prev_completed; + + num_queued = ACCESS_ONCE(dql->num_queued); /* Can't complete more than what's in queue */ - BUG_ON(count > dql->num_queued - dql->num_completed); + BUG_ON(count > num_queued - dql->num_completed); completed = dql->num_completed + count; limit = dql->limit; - ovlimit = POSDIFF(dql->num_queued - dql->num_completed, limit); - inprogress = dql->num_queued - completed; + ovlimit = POSDIFF(num_queued - dql->num_completed, limit); + inprogress = num_queued - completed; prev_inprogress = dql->prev_num_queued - dql->num_completed; - all_prev_completed = POSDIFF(completed, dql->prev_num_queued); + all_prev_completed = AFTER_EQ(completed, dql->prev_num_queued); if ((ovlimit && !inprogress) || (dql->prev_ovlimit && all_prev_completed)) { @@ -104,7 +108,7 @@ void dql_completed(struct dql *dql, unsigned int count) dql->prev_ovlimit = ovlimit; dql->prev_last_obj_cnt = dql->last_obj_cnt; dql->num_completed = completed; - dql->prev_num_queued = dql->num_queued; + dql->prev_num_queued = num_queued; } EXPORT_SYMBOL(dql_completed); diff --git a/mm/Kconfig b/mm/Kconfig index b2176374b98e..82fed4eb2b6f 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -389,3 +389,20 @@ config CLEANCACHE in a negligible performance hit. If unsure, say Y to enable cleancache + +config FRONTSWAP + bool "Enable frontswap to cache swap pages if tmem is present" + depends on SWAP + default n + help + Frontswap is so named because it can be thought of as the opposite + of a "backing" store for a swap device. The data is stored into + "transcendent memory", memory that is not directly accessible or + addressable by the kernel and is of unknown and possibly + time-varying size. When space in transcendent memory is available, + a significant swap I/O reduction may be achieved. When none is + available, all frontswap calls are reduced to a single pointer- + compare-against-NULL resulting in a negligible performance hit + and swap data is stored as normal on the matching swap device. + + If unsure, say Y to enable frontswap. diff --git a/mm/Makefile b/mm/Makefile index a156285ce88d..2e2fbbefb99f 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -29,6 +29,7 @@ obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o obj-$(CONFIG_BOUNCE) += bounce.o obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o +obj-$(CONFIG_FRONTSWAP) += frontswap.o obj-$(CONFIG_HAS_DMA) += dmapool.o obj-$(CONFIG_HUGETLBFS) += hugetlb.o obj-$(CONFIG_NUMA) += mempolicy.o diff --git a/mm/cleancache.c b/mm/cleancache.c index 5646c740f613..32e6f4136fa2 100644 --- a/mm/cleancache.c +++ b/mm/cleancache.c @@ -80,7 +80,7 @@ EXPORT_SYMBOL(__cleancache_init_shared_fs); static int cleancache_get_key(struct inode *inode, struct cleancache_filekey *key) { - int (*fhfn)(struct dentry *, __u32 *fh, int *, int); + int (*fhfn)(struct inode *, __u32 *fh, int *, struct inode *); int len = 0, maxlen = CLEANCACHE_KEY_MAX; struct super_block *sb = inode->i_sb; @@ -88,9 +88,7 @@ static int cleancache_get_key(struct inode *inode, if (sb->s_export_op != NULL) { fhfn = sb->s_export_op->encode_fh; if (fhfn) { - struct dentry d; - d.d_inode = inode; - len = (*fhfn)(&d, &key->u.fh[0], &maxlen, 0); + len = (*fhfn)(inode, &key->u.fh[0], &maxlen, NULL); if (len <= 0 || len == 255) return -1; if (maxlen > CLEANCACHE_KEY_MAX) diff --git a/mm/compaction.c b/mm/compaction.c index 4ac338af5120..7ea259d82a99 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -236,7 +236,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, */ while (unlikely(too_many_isolated(zone))) { /* async migration should just abort */ - if (cc->mode != COMPACT_SYNC) + if (!cc->sync) return 0; congestion_wait(BLK_RW_ASYNC, HZ/10); @@ -304,8 +304,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, * satisfies the allocation */ pageblock_nr = low_pfn >> pageblock_order; - if (cc->mode != COMPACT_SYNC && - last_pageblock_nr != pageblock_nr && + if (!cc->sync && last_pageblock_nr != pageblock_nr && !migrate_async_suitable(get_pageblock_migratetype(page))) { low_pfn += pageblock_nr_pages; low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1; @@ -326,7 +325,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, continue; } - if (cc->mode != COMPACT_SYNC) + if (!cc->sync) mode |= ISOLATE_ASYNC_MIGRATE; lruvec = mem_cgroup_page_lruvec(page, zone); @@ -361,90 +360,27 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, #endif /* CONFIG_COMPACTION || CONFIG_CMA */ #ifdef CONFIG_COMPACTION -/* - * Returns true if MIGRATE_UNMOVABLE pageblock was successfully - * converted to MIGRATE_MOVABLE type, false otherwise. - */ -static bool rescue_unmovable_pageblock(struct page *page) -{ - unsigned long pfn, start_pfn, end_pfn; - struct page *start_page, *end_page; - - pfn = page_to_pfn(page); - start_pfn = pfn & ~(pageblock_nr_pages - 1); - end_pfn = start_pfn + pageblock_nr_pages; - - start_page = pfn_to_page(start_pfn); - end_page = pfn_to_page(end_pfn); - - /* Do not deal with pageblocks that overlap zones */ - if (page_zone(start_page) != page_zone(end_page)) - return false; - - for (page = start_page, pfn = start_pfn; page < end_page; pfn++, - page++) { - if (!pfn_valid_within(pfn)) - continue; - - if (PageBuddy(page)) { - int order = page_order(page); - - pfn += (1 << order) - 1; - page += (1 << order) - 1; - - continue; - } else if (page_count(page) == 0 || PageLRU(page)) - continue; - - return false; - } - - set_pageblock_migratetype(page, MIGRATE_MOVABLE); - move_freepages_block(page_zone(page), page, MIGRATE_MOVABLE); - return true; -} -enum smt_result { - GOOD_AS_MIGRATION_TARGET, - FAIL_UNMOVABLE_TARGET, - FAIL_BAD_TARGET, -}; - -/* - * Returns GOOD_AS_MIGRATION_TARGET if the page is within a block - * suitable for migration to, FAIL_UNMOVABLE_TARGET if the page - * is within a MIGRATE_UNMOVABLE block, FAIL_BAD_TARGET otherwise. - */ -static enum smt_result suitable_migration_target(struct page *page, - struct compact_control *cc) +/* Returns true if the page is within a block suitable for migration to */ +static bool suitable_migration_target(struct page *page) { int migratetype = get_pageblock_migratetype(page); /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */ if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE) - return FAIL_BAD_TARGET; + return false; /* If the page is a large free page, then allow migration */ if (PageBuddy(page) && page_order(page) >= pageblock_order) - return GOOD_AS_MIGRATION_TARGET; + return true; /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */ - if (cc->mode != COMPACT_ASYNC_UNMOVABLE && - migrate_async_suitable(migratetype)) - return GOOD_AS_MIGRATION_TARGET; - - if (cc->mode == COMPACT_ASYNC_MOVABLE && - migratetype == MIGRATE_UNMOVABLE) - return FAIL_UNMOVABLE_TARGET; - - if (cc->mode != COMPACT_ASYNC_MOVABLE && - migratetype == MIGRATE_UNMOVABLE && - rescue_unmovable_pageblock(page)) - return GOOD_AS_MIGRATION_TARGET; + if (migrate_async_suitable(migratetype)) + return true; /* Otherwise skip the block */ - return FAIL_BAD_TARGET; + return false; } /* @@ -478,13 +414,6 @@ static void isolate_freepages(struct zone *zone, zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages; /* - * isolate_freepages() may be called more than once during - * compact_zone_order() run and we want only the most recent - * count. - */ - cc->nr_pageblocks_skipped = 0; - - /* * Isolate free pages until enough are available to migrate the * pages on cc->migratepages. We stop searching if the migrate * and free page scanners meet or enough free pages are isolated. @@ -492,7 +421,6 @@ static void isolate_freepages(struct zone *zone, for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages; pfn -= pageblock_nr_pages) { unsigned long isolated; - enum smt_result ret; if (!pfn_valid(pfn)) continue; @@ -509,12 +437,9 @@ static void isolate_freepages(struct zone *zone, continue; /* Check the block is suitable for migration */ - ret = suitable_migration_target(page, cc); - if (ret != GOOD_AS_MIGRATION_TARGET) { - if (ret == FAIL_UNMOVABLE_TARGET) - cc->nr_pageblocks_skipped++; + if (!suitable_migration_target(page)) continue; - } + /* * Found a block suitable for isolating free pages from. Now * we disabled interrupts, double check things are ok and @@ -523,14 +448,12 @@ static void isolate_freepages(struct zone *zone, */ isolated = 0; spin_lock_irqsave(&zone->lock, flags); - ret = suitable_migration_target(page, cc); - if (ret == GOOD_AS_MIGRATION_TARGET) { + if (suitable_migration_target(page)) { end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn); isolated = isolate_freepages_block(pfn, end_pfn, freelist, false); nr_freepages += isolated; - } else if (ret == FAIL_UNMOVABLE_TARGET) - cc->nr_pageblocks_skipped++; + } spin_unlock_irqrestore(&zone->lock, flags); /* @@ -762,9 +685,8 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) nr_migrate = cc->nr_migratepages; err = migrate_pages(&cc->migratepages, compaction_alloc, - (unsigned long)&cc->freepages, false, - (cc->mode == COMPACT_SYNC) ? MIGRATE_SYNC_LIGHT - : MIGRATE_ASYNC); + (unsigned long)cc, false, + cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC); update_nr_listpages(cc); nr_remaining = cc->nr_migratepages; @@ -793,8 +715,7 @@ out: static unsigned long compact_zone_order(struct zone *zone, int order, gfp_t gfp_mask, - enum compact_mode mode, - unsigned long *nr_pageblocks_skipped) + bool sync) { struct compact_control cc = { .nr_freepages = 0, @@ -802,17 +723,12 @@ static unsigned long compact_zone_order(struct zone *zone, .order = order, .migratetype = allocflags_to_migratetype(gfp_mask), .zone = zone, - .mode = mode, + .sync = sync, }; - unsigned long rc; - INIT_LIST_HEAD(&cc.freepages); INIT_LIST_HEAD(&cc.migratepages); - rc = compact_zone(zone, &cc); - *nr_pageblocks_skipped = cc.nr_pageblocks_skipped; - - return rc; + return compact_zone(zone, &cc); } int sysctl_extfrag_threshold = 500; @@ -837,8 +753,6 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist, struct zoneref *z; struct zone *zone; int rc = COMPACT_SKIPPED; - unsigned long nr_pageblocks_skipped; - enum compact_mode mode; /* * Check whether it is worth even starting compaction. The order check is @@ -855,22 +769,12 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist, nodemask) { int status; - mode = sync ? COMPACT_SYNC : COMPACT_ASYNC_MOVABLE; -retry: - status = compact_zone_order(zone, order, gfp_mask, mode, - &nr_pageblocks_skipped); + status = compact_zone_order(zone, order, gfp_mask, sync); rc = max(status, rc); /* If a normal allocation would succeed, stop compacting */ if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0)) break; - - if (rc == COMPACT_COMPLETE && mode == COMPACT_ASYNC_MOVABLE) { - if (nr_pageblocks_skipped) { - mode = COMPACT_ASYNC_UNMOVABLE; - goto retry; - } - } } return rc; @@ -904,7 +808,7 @@ static int __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc) if (ok && cc->order > zone->compact_order_failed) zone->compact_order_failed = cc->order + 1; /* Currently async compaction is never deferred. */ - else if (!ok && cc->mode == COMPACT_SYNC) + else if (!ok && cc->sync) defer_compaction(zone, cc->order); } @@ -919,7 +823,7 @@ int compact_pgdat(pg_data_t *pgdat, int order) { struct compact_control cc = { .order = order, - .mode = COMPACT_ASYNC_MOVABLE, + .sync = false, }; return __compact_pgdat(pgdat, &cc); @@ -929,7 +833,7 @@ static int compact_node(int nid) { struct compact_control cc = { .order = -1, - .mode = COMPACT_SYNC, + .sync = true, }; return __compact_pgdat(NODE_DATA(nid), &cc); diff --git a/mm/filemap.c b/mm/filemap.c index 64b48f934b89..a4a5260b0279 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1899,71 +1899,6 @@ struct page *read_cache_page(struct address_space *mapping, } EXPORT_SYMBOL(read_cache_page); -/* - * The logic we want is - * - * if suid or (sgid and xgrp) - * remove privs - */ -int should_remove_suid(struct dentry *dentry) -{ - umode_t mode = dentry->d_inode->i_mode; - int kill = 0; - - /* suid always must be killed */ - if (unlikely(mode & S_ISUID)) - kill = ATTR_KILL_SUID; - - /* - * sgid without any exec bits is just a mandatory locking mark; leave - * it alone. If some exec bits are set, it's a real sgid; kill it. - */ - if (unlikely((mode & S_ISGID) && (mode & S_IXGRP))) - kill |= ATTR_KILL_SGID; - - if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode))) - return kill; - - return 0; -} -EXPORT_SYMBOL(should_remove_suid); - -static int __remove_suid(struct dentry *dentry, int kill) -{ - struct iattr newattrs; - - newattrs.ia_valid = ATTR_FORCE | kill; - return notify_change(dentry, &newattrs); -} - -int file_remove_suid(struct file *file) -{ - struct dentry *dentry = file->f_path.dentry; - struct inode *inode = dentry->d_inode; - int killsuid; - int killpriv; - int error = 0; - - /* Fast path for nothing security related */ - if (IS_NOSEC(inode)) - return 0; - - killsuid = should_remove_suid(dentry); - killpriv = security_inode_need_killpriv(dentry); - - if (killpriv < 0) - return killpriv; - if (killpriv) - error = security_inode_killpriv(dentry); - if (!error && killsuid) - error = __remove_suid(dentry, killsuid); - if (!error && (inode->i_sb->s_flags & MS_NOSEC)) - inode->i_flags |= S_NOSEC; - - return error; -} -EXPORT_SYMBOL(file_remove_suid); - static size_t __iovec_copy_from_user_inatomic(char *vaddr, const struct iovec *iov, size_t base, size_t bytes) { @@ -2489,7 +2424,9 @@ ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov, if (err) goto out; - file_update_time(file); + err = file_update_time(file); + if (err) + goto out; /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */ if (unlikely(file->f_flags & O_DIRECT)) { diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c index a4eb31132229..213ca1f53409 100644 --- a/mm/filemap_xip.c +++ b/mm/filemap_xip.c @@ -426,7 +426,9 @@ xip_file_write(struct file *filp, const char __user *buf, size_t len, if (ret) goto out_backing; - file_update_time(filp); + ret = file_update_time(filp); + if (ret) + goto out_backing; ret = __xip_file_write (filp, buf, count, pos, ppos); diff --git a/mm/frontswap.c b/mm/frontswap.c new file mode 100644 index 000000000000..e25025574a02 --- /dev/null +++ b/mm/frontswap.c @@ -0,0 +1,314 @@ +/* + * Frontswap frontend + * + * This code provides the generic "frontend" layer to call a matching + * "backend" driver implementation of frontswap. See + * Documentation/vm/frontswap.txt for more information. + * + * Copyright (C) 2009-2012 Oracle Corp. All rights reserved. + * Author: Dan Magenheimer + * + * This work is licensed under the terms of the GNU GPL, version 2. + */ + +#include <linux/mm.h> +#include <linux/mman.h> +#include <linux/swap.h> +#include <linux/swapops.h> +#include <linux/proc_fs.h> +#include <linux/security.h> +#include <linux/capability.h> +#include <linux/module.h> +#include <linux/uaccess.h> +#include <linux/debugfs.h> +#include <linux/frontswap.h> +#include <linux/swapfile.h> + +/* + * frontswap_ops is set by frontswap_register_ops to contain the pointers + * to the frontswap "backend" implementation functions. + */ +static struct frontswap_ops frontswap_ops __read_mostly; + +/* + * This global enablement flag reduces overhead on systems where frontswap_ops + * has not been registered, so is preferred to the slower alternative: a + * function call that checks a non-global. + */ +bool frontswap_enabled __read_mostly; +EXPORT_SYMBOL(frontswap_enabled); + +/* + * If enabled, frontswap_store will return failure even on success. As + * a result, the swap subsystem will always write the page to swap, in + * effect converting frontswap into a writethrough cache. In this mode, + * there is no direct reduction in swap writes, but a frontswap backend + * can unilaterally "reclaim" any pages in use with no data loss, thus + * providing increases control over maximum memory usage due to frontswap. + */ +static bool frontswap_writethrough_enabled __read_mostly; + +#ifdef CONFIG_DEBUG_FS +/* + * Counters available via /sys/kernel/debug/frontswap (if debugfs is + * properly configured). These are for information only so are not protected + * against increment races. + */ +static u64 frontswap_loads; +static u64 frontswap_succ_stores; +static u64 frontswap_failed_stores; +static u64 frontswap_invalidates; + +static inline void inc_frontswap_loads(void) { + frontswap_loads++; +} +static inline void inc_frontswap_succ_stores(void) { + frontswap_succ_stores++; +} +static inline void inc_frontswap_failed_stores(void) { + frontswap_failed_stores++; +} +static inline void inc_frontswap_invalidates(void) { + frontswap_invalidates++; +} +#else +static inline void inc_frontswap_loads(void) { } +static inline void inc_frontswap_succ_stores(void) { } +static inline void inc_frontswap_failed_stores(void) { } +static inline void inc_frontswap_invalidates(void) { } +#endif +/* + * Register operations for frontswap, returning previous thus allowing + * detection of multiple backends and possible nesting. + */ +struct frontswap_ops frontswap_register_ops(struct frontswap_ops *ops) +{ + struct frontswap_ops old = frontswap_ops; + + frontswap_ops = *ops; + frontswap_enabled = true; + return old; +} +EXPORT_SYMBOL(frontswap_register_ops); + +/* + * Enable/disable frontswap writethrough (see above). + */ +void frontswap_writethrough(bool enable) +{ + frontswap_writethrough_enabled = enable; +} +EXPORT_SYMBOL(frontswap_writethrough); + +/* + * Called when a swap device is swapon'd. + */ +void __frontswap_init(unsigned type) +{ + struct swap_info_struct *sis = swap_info[type]; + + BUG_ON(sis == NULL); + if (sis->frontswap_map == NULL) + return; + if (frontswap_enabled) + (*frontswap_ops.init)(type); +} +EXPORT_SYMBOL(__frontswap_init); + +/* + * "Store" data from a page to frontswap and associate it with the page's + * swaptype and offset. Page must be locked and in the swap cache. + * If frontswap already contains a page with matching swaptype and + * offset, the frontswap implmentation may either overwrite the data and + * return success or invalidate the page from frontswap and return failure. + */ +int __frontswap_store(struct page *page) +{ + int ret = -1, dup = 0; + swp_entry_t entry = { .val = page_private(page), }; + int type = swp_type(entry); + struct swap_info_struct *sis = swap_info[type]; + pgoff_t offset = swp_offset(entry); + + BUG_ON(!PageLocked(page)); + BUG_ON(sis == NULL); + if (frontswap_test(sis, offset)) + dup = 1; + ret = (*frontswap_ops.store)(type, offset, page); + if (ret == 0) { + frontswap_set(sis, offset); + inc_frontswap_succ_stores(); + if (!dup) + atomic_inc(&sis->frontswap_pages); + } else if (dup) { + /* + failed dup always results in automatic invalidate of + the (older) page from frontswap + */ + frontswap_clear(sis, offset); + atomic_dec(&sis->frontswap_pages); + inc_frontswap_failed_stores(); + } else + inc_frontswap_failed_stores(); + if (frontswap_writethrough_enabled) + /* report failure so swap also writes to swap device */ + ret = -1; + return ret; +} +EXPORT_SYMBOL(__frontswap_store); + +/* + * "Get" data from frontswap associated with swaptype and offset that were + * specified when the data was put to frontswap and use it to fill the + * specified page with data. Page must be locked and in the swap cache. + */ +int __frontswap_load(struct page *page) +{ + int ret = -1; + swp_entry_t entry = { .val = page_private(page), }; + int type = swp_type(entry); + struct swap_info_struct *sis = swap_info[type]; + pgoff_t offset = swp_offset(entry); + + BUG_ON(!PageLocked(page)); + BUG_ON(sis == NULL); + if (frontswap_test(sis, offset)) + ret = (*frontswap_ops.load)(type, offset, page); + if (ret == 0) + inc_frontswap_loads(); + return ret; +} +EXPORT_SYMBOL(__frontswap_load); + +/* + * Invalidate any data from frontswap associated with the specified swaptype + * and offset so that a subsequent "get" will fail. + */ +void __frontswap_invalidate_page(unsigned type, pgoff_t offset) +{ + struct swap_info_struct *sis = swap_info[type]; + + BUG_ON(sis == NULL); + if (frontswap_test(sis, offset)) { + (*frontswap_ops.invalidate_page)(type, offset); + atomic_dec(&sis->frontswap_pages); + frontswap_clear(sis, offset); + inc_frontswap_invalidates(); + } +} +EXPORT_SYMBOL(__frontswap_invalidate_page); + +/* + * Invalidate all data from frontswap associated with all offsets for the + * specified swaptype. + */ +void __frontswap_invalidate_area(unsigned type) +{ + struct swap_info_struct *sis = swap_info[type]; + + BUG_ON(sis == NULL); + if (sis->frontswap_map == NULL) + return; + (*frontswap_ops.invalidate_area)(type); + atomic_set(&sis->frontswap_pages, 0); + memset(sis->frontswap_map, 0, sis->max / sizeof(long)); +} +EXPORT_SYMBOL(__frontswap_invalidate_area); + +/* + * Frontswap, like a true swap device, may unnecessarily retain pages + * under certain circumstances; "shrink" frontswap is essentially a + * "partial swapoff" and works by calling try_to_unuse to attempt to + * unuse enough frontswap pages to attempt to -- subject to memory + * constraints -- reduce the number of pages in frontswap to the + * number given in the parameter target_pages. + */ +void frontswap_shrink(unsigned long target_pages) +{ + struct swap_info_struct *si = NULL; + int si_frontswap_pages; + unsigned long total_pages = 0, total_pages_to_unuse; + unsigned long pages = 0, pages_to_unuse = 0; + int type; + bool locked = false; + + /* + * we don't want to hold swap_lock while doing a very + * lengthy try_to_unuse, but swap_list may change + * so restart scan from swap_list.head each time + */ + spin_lock(&swap_lock); + locked = true; + total_pages = 0; + for (type = swap_list.head; type >= 0; type = si->next) { + si = swap_info[type]; + total_pages += atomic_read(&si->frontswap_pages); + } + if (total_pages <= target_pages) + goto out; + total_pages_to_unuse = total_pages - target_pages; + for (type = swap_list.head; type >= 0; type = si->next) { + si = swap_info[type]; + si_frontswap_pages = atomic_read(&si->frontswap_pages); + if (total_pages_to_unuse < si_frontswap_pages) + pages = pages_to_unuse = total_pages_to_unuse; + else { + pages = si_frontswap_pages; + pages_to_unuse = 0; /* unuse all */ + } + /* ensure there is enough RAM to fetch pages from frontswap */ + if (security_vm_enough_memory_mm(current->mm, pages)) + continue; + vm_unacct_memory(pages); + break; + } + if (type < 0) + goto out; + locked = false; + spin_unlock(&swap_lock); + try_to_unuse(type, true, pages_to_unuse); +out: + if (locked) + spin_unlock(&swap_lock); + return; +} +EXPORT_SYMBOL(frontswap_shrink); + +/* + * Count and return the number of frontswap pages across all + * swap devices. This is exported so that backend drivers can + * determine current usage without reading debugfs. + */ +unsigned long frontswap_curr_pages(void) +{ + int type; + unsigned long totalpages = 0; + struct swap_info_struct *si = NULL; + + spin_lock(&swap_lock); + for (type = swap_list.head; type >= 0; type = si->next) { + si = swap_info[type]; + totalpages += atomic_read(&si->frontswap_pages); + } + spin_unlock(&swap_lock); + return totalpages; +} +EXPORT_SYMBOL(frontswap_curr_pages); + +static int __init init_frontswap(void) +{ +#ifdef CONFIG_DEBUG_FS + struct dentry *root = debugfs_create_dir("frontswap", NULL); + if (root == NULL) + return -ENXIO; + debugfs_create_u64("loads", S_IRUGO, root, &frontswap_loads); + debugfs_create_u64("succ_stores", S_IRUGO, root, &frontswap_succ_stores); + debugfs_create_u64("failed_stores", S_IRUGO, root, + &frontswap_failed_stores); + debugfs_create_u64("invalidates", S_IRUGO, + root, &frontswap_invalidates); +#endif + return 0; +} + +module_init(init_frontswap); diff --git a/mm/internal.h b/mm/internal.h index 4194ab9dc19b..2ba87fbfb75b 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -94,9 +94,6 @@ extern void putback_lru_page(struct page *page); /* * in mm/page_alloc.c */ -extern void set_pageblock_migratetype(struct page *page, int migratetype); -extern int move_freepages_block(struct zone *zone, struct page *page, - int migratetype); extern void __free_pages_bootmem(struct page *page, unsigned int order); extern void prep_compound_page(struct page *page, unsigned long order); #ifdef CONFIG_MEMORY_FAILURE @@ -104,7 +101,6 @@ extern bool is_free_buddy_page(struct page *page); #endif #if defined CONFIG_COMPACTION || defined CONFIG_CMA -#include <linux/compaction.h> /* * in mm/compaction.c @@ -123,14 +119,11 @@ struct compact_control { unsigned long nr_migratepages; /* Number of pages to migrate */ unsigned long free_pfn; /* isolate_freepages search base */ unsigned long migrate_pfn; /* isolate_migratepages search base */ - enum compact_mode mode; /* Compaction mode */ + bool sync; /* Synchronous migration */ int order; /* order a direct compactor needs */ int migratetype; /* MOVABLE, RECLAIMABLE etc */ struct zone *zone; - - /* Number of UNMOVABLE destination pageblocks skipped during scan */ - unsigned long nr_pageblocks_skipped; }; unsigned long @@ -350,3 +343,7 @@ extern u64 hwpoison_filter_flags_mask; extern u64 hwpoison_filter_flags_value; extern u64 hwpoison_filter_memcg; extern u32 hwpoison_filter_enable; + +extern unsigned long vm_mmap_pgoff(struct file *, unsigned long, + unsigned long, unsigned long, + unsigned long, unsigned long); diff --git a/mm/migrate.c b/mm/migrate.c index ab81d482ae6f..be26d5cbe56b 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -436,7 +436,10 @@ void migrate_page_copy(struct page *newpage, struct page *page) * is actually a signal that all of the page has become dirty. * Whereas only part of our page may be dirty. */ - __set_page_dirty_nobuffers(newpage); + if (PageSwapBacked(page)) + SetPageDirty(newpage); + else + __set_page_dirty_nobuffers(newpage); } mlock_migrate_page(newpage, page); diff --git a/mm/mmap.c b/mm/mmap.c index 4a9c2a391e28..3edfcdfa42d9 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -971,15 +971,13 @@ static inline unsigned long round_hint_to_min(unsigned long hint) * The caller must hold down_write(¤t->mm->mmap_sem). */ -static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, +unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long pgoff) { struct mm_struct * mm = current->mm; struct inode *inode; vm_flags_t vm_flags; - int error; - unsigned long reqprot = prot; /* * Does the application expect PROT_READ to imply PROT_EXEC? @@ -1101,39 +1099,9 @@ static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, } } - error = security_file_mmap(file, reqprot, prot, flags, addr, 0); - if (error) - return error; - return mmap_region(file, addr, len, flags, vm_flags, pgoff); } -unsigned long do_mmap(struct file *file, unsigned long addr, - unsigned long len, unsigned long prot, - unsigned long flag, unsigned long offset) -{ - if (unlikely(offset + PAGE_ALIGN(len) < offset)) - return -EINVAL; - if (unlikely(offset & ~PAGE_MASK)) - return -EINVAL; - return do_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT); -} -EXPORT_SYMBOL(do_mmap); - -unsigned long vm_mmap(struct file *file, unsigned long addr, - unsigned long len, unsigned long prot, - unsigned long flag, unsigned long offset) -{ - unsigned long ret; - struct mm_struct *mm = current->mm; - - down_write(&mm->mmap_sem); - ret = do_mmap(file, addr, len, prot, flag, offset); - up_write(&mm->mmap_sem); - return ret; -} -EXPORT_SYMBOL(vm_mmap); - SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, unsigned long, prot, unsigned long, flags, unsigned long, fd, unsigned long, pgoff) @@ -1165,10 +1133,7 @@ SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); - down_write(¤t->mm->mmap_sem); - retval = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); - up_write(¤t->mm->mmap_sem); - + retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff); if (file) fput(file); out: @@ -1629,7 +1594,9 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, if (addr & ~PAGE_MASK) return -EINVAL; - return arch_rebalance_pgtables(addr, len); + addr = arch_rebalance_pgtables(addr, len); + error = security_mmap_addr(addr); + return error ? error : addr; } EXPORT_SYMBOL(get_unmapped_area); @@ -1819,7 +1786,7 @@ int expand_downwards(struct vm_area_struct *vma, return -ENOMEM; address &= PAGE_MASK; - error = security_file_mmap(NULL, 0, 0, 0, address, 1); + error = security_mmap_addr(address); if (error) return error; @@ -2159,7 +2126,6 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) return 0; } -EXPORT_SYMBOL(do_munmap); int vm_munmap(unsigned long start, size_t len) { @@ -2207,10 +2173,6 @@ static unsigned long do_brk(unsigned long addr, unsigned long len) if (!len) return addr; - error = security_file_mmap(NULL, 0, 0, 0, addr, 1); - if (error) - return error; - flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED); @@ -2563,10 +2525,6 @@ int install_special_mapping(struct mm_struct *mm, vma->vm_ops = &special_mapping_vmops; vma->vm_private_data = pages; - ret = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1); - if (ret) - goto out; - ret = insert_vm_struct(mm, vma); if (ret) goto out; diff --git a/mm/mremap.c b/mm/mremap.c index db8d983b5a7d..21fed202ddad 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -371,10 +371,6 @@ static unsigned long mremap_to(unsigned long addr, if ((addr <= new_addr) && (addr+old_len) > new_addr) goto out; - ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1); - if (ret) - goto out; - ret = do_munmap(mm, new_addr, new_len); if (ret) goto out; @@ -432,15 +428,17 @@ static int vma_expandable(struct vm_area_struct *vma, unsigned long delta) * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise * This option implies MREMAP_MAYMOVE. */ -unsigned long do_mremap(unsigned long addr, - unsigned long old_len, unsigned long new_len, - unsigned long flags, unsigned long new_addr) +SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, + unsigned long, new_len, unsigned long, flags, + unsigned long, new_addr) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long ret = -EINVAL; unsigned long charged = 0; + down_write(¤t->mm->mmap_sem); + if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE)) goto out; @@ -530,25 +528,11 @@ unsigned long do_mremap(unsigned long addr, goto out; } - ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1); - if (ret) - goto out; ret = move_vma(vma, addr, old_len, new_len, new_addr); } out: if (ret & ~PAGE_MASK) vm_unacct_memory(charged); - return ret; -} - -SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, - unsigned long, new_len, unsigned long, flags, - unsigned long, new_addr) -{ - unsigned long ret; - - down_write(¤t->mm->mmap_sem); - ret = do_mremap(addr, old_len, new_len, flags, new_addr); up_write(¤t->mm->mmap_sem); return ret; } diff --git a/mm/nommu.c b/mm/nommu.c index bb8f4f004a82..c4acfbc09972 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -889,7 +889,6 @@ static int validate_mmap_request(struct file *file, unsigned long *_capabilities) { unsigned long capabilities, rlen; - unsigned long reqprot = prot; int ret; /* do the simple checks first */ @@ -1047,7 +1046,7 @@ static int validate_mmap_request(struct file *file, } /* allow the security API to have its say */ - ret = security_file_mmap(file, reqprot, prot, flags, addr, 0); + ret = security_mmap_addr(addr); if (ret < 0) return ret; @@ -1233,7 +1232,7 @@ enomem: /* * handle mapping creation for uClinux */ -static unsigned long do_mmap_pgoff(struct file *file, +unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, @@ -1471,32 +1470,6 @@ error_getting_region: return -ENOMEM; } -unsigned long do_mmap(struct file *file, unsigned long addr, - unsigned long len, unsigned long prot, - unsigned long flag, unsigned long offset) -{ - if (unlikely(offset + PAGE_ALIGN(len) < offset)) - return -EINVAL; - if (unlikely(offset & ~PAGE_MASK)) - return -EINVAL; - return do_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT); -} -EXPORT_SYMBOL(do_mmap); - -unsigned long vm_mmap(struct file *file, unsigned long addr, - unsigned long len, unsigned long prot, - unsigned long flag, unsigned long offset) -{ - unsigned long ret; - struct mm_struct *mm = current->mm; - - down_write(&mm->mmap_sem); - ret = do_mmap(file, addr, len, prot, flag, offset); - up_write(&mm->mmap_sem); - return ret; -} -EXPORT_SYMBOL(vm_mmap); - SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, unsigned long, prot, unsigned long, flags, unsigned long, fd, unsigned long, pgoff) @@ -1513,9 +1486,7 @@ SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); - down_write(¤t->mm->mmap_sem); - retval = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); - up_write(¤t->mm->mmap_sem); + ret = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff); if (file) fput(file); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 6092f331b32e..44030096da63 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -219,7 +219,7 @@ EXPORT_SYMBOL(nr_online_nodes); int page_group_by_mobility_disabled __read_mostly; -void set_pageblock_migratetype(struct page *page, int migratetype) +static void set_pageblock_migratetype(struct page *page, int migratetype) { if (unlikely(page_group_by_mobility_disabled)) @@ -954,8 +954,8 @@ static int move_freepages(struct zone *zone, return pages_moved; } -int move_freepages_block(struct zone *zone, struct page *page, - int migratetype) +static int move_freepages_block(struct zone *zone, struct page *page, + int migratetype) { unsigned long start_pfn, end_pfn; struct page *start_page, *end_page; @@ -5651,7 +5651,7 @@ static int __alloc_contig_migrate_range(unsigned long start, unsigned long end) .nr_migratepages = 0, .order = -1, .zone = page_zone(pfn_to_page(start)), - .mode = COMPACT_SYNC, + .sync = true, }; INIT_LIST_HEAD(&cc.migratepages); diff --git a/mm/page_io.c b/mm/page_io.c index dc76b4d0611e..34f02923744c 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -18,6 +18,7 @@ #include <linux/bio.h> #include <linux/swapops.h> #include <linux/writeback.h> +#include <linux/frontswap.h> #include <asm/pgtable.h> static struct bio *get_swap_bio(gfp_t gfp_flags, @@ -98,6 +99,12 @@ int swap_writepage(struct page *page, struct writeback_control *wbc) unlock_page(page); goto out; } + if (frontswap_store(page) == 0) { + set_page_writeback(page); + unlock_page(page); + end_page_writeback(page); + goto out; + } bio = get_swap_bio(GFP_NOIO, page, end_swap_bio_write); if (bio == NULL) { set_page_dirty(page); @@ -122,6 +129,11 @@ int swap_readpage(struct page *page) VM_BUG_ON(!PageLocked(page)); VM_BUG_ON(PageUptodate(page)); + if (frontswap_load(page) == 0) { + SetPageUptodate(page); + unlock_page(page); + goto out; + } bio = get_swap_bio(GFP_KERNEL, page, end_swap_bio_read); if (bio == NULL) { unlock_page(page); diff --git a/mm/shmem.c b/mm/shmem.c index d576b84d913c..585bd220a21e 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2439,11 +2439,9 @@ static struct dentry *shmem_fh_to_dentry(struct super_block *sb, return dentry; } -static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len, - int connectable) +static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len, + struct inode *parent) { - struct inode *inode = dentry->d_inode; - if (*len < 3) { *len = 3; return 255; diff --git a/mm/slub.c b/mm/slub.c index 80848cd3901c..8c691fa1cf3c 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1369,7 +1369,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) inc_slabs_node(s, page_to_nid(page), page->objects); page->slab = s; - page->flags |= 1 << PG_slab; + __SetPageSlab(page); start = page_address(page); @@ -1514,15 +1514,19 @@ static inline void *acquire_slab(struct kmem_cache *s, freelist = page->freelist; counters = page->counters; new.counters = counters; - if (mode) + if (mode) { new.inuse = page->objects; + new.freelist = NULL; + } else { + new.freelist = freelist; + } VM_BUG_ON(new.frozen); new.frozen = 1; } while (!__cmpxchg_double_slab(s, page, freelist, counters, - NULL, new.counters, + new.freelist, new.counters, "lock and freeze")); remove_partial(n, page); @@ -1564,7 +1568,6 @@ static void *get_partial_node(struct kmem_cache *s, object = t; available = page->objects - page->inuse; } else { - page->freelist = t; available = put_cpu_partial(s, page, 0); stat(s, CPU_PARTIAL_NODE); } @@ -1579,7 +1582,7 @@ static void *get_partial_node(struct kmem_cache *s, /* * Get a page from somewhere. Search in increasing NUMA distances. */ -static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags, +static void *get_any_partial(struct kmem_cache *s, gfp_t flags, struct kmem_cache_cpu *c) { #ifdef CONFIG_NUMA @@ -2766,7 +2769,7 @@ static unsigned long calculate_alignment(unsigned long flags, } static void -init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s) +init_kmem_cache_node(struct kmem_cache_node *n) { n->nr_partial = 0; spin_lock_init(&n->list_lock); @@ -2836,7 +2839,7 @@ static void early_kmem_cache_node_alloc(int node) init_object(kmem_cache_node, n, SLUB_RED_ACTIVE); init_tracking(kmem_cache_node, n); #endif - init_kmem_cache_node(n, kmem_cache_node); + init_kmem_cache_node(n); inc_slabs_node(kmem_cache_node, node, page->objects); add_partial(n, page, DEACTIVATE_TO_HEAD); @@ -2876,7 +2879,7 @@ static int init_kmem_cache_nodes(struct kmem_cache *s) } s->node[node] = n; - init_kmem_cache_node(n, s); + init_kmem_cache_node(n); } return 1; } @@ -3625,7 +3628,7 @@ static int slab_mem_going_online_callback(void *arg) ret = -ENOMEM; goto out; } - init_kmem_cache_node(n, s); + init_kmem_cache_node(n); s->node[nid] = n; } out: @@ -3968,9 +3971,9 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, } return s; } - kfree(n); kfree(s); } + kfree(n); err: up_write(&slub_lock); diff --git a/mm/swapfile.c b/mm/swapfile.c index 457b10baef59..de5bc51c4a66 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -31,6 +31,8 @@ #include <linux/memcontrol.h> #include <linux/poll.h> #include <linux/oom.h> +#include <linux/frontswap.h> +#include <linux/swapfile.h> #include <asm/pgtable.h> #include <asm/tlbflush.h> @@ -42,7 +44,7 @@ static bool swap_count_continued(struct swap_info_struct *, pgoff_t, static void free_swap_count_continuations(struct swap_info_struct *); static sector_t map_swap_entry(swp_entry_t, struct block_device**); -static DEFINE_SPINLOCK(swap_lock); +DEFINE_SPINLOCK(swap_lock); static unsigned int nr_swapfiles; long nr_swap_pages; long total_swap_pages; @@ -53,9 +55,9 @@ static const char Unused_file[] = "Unused swap file entry "; static const char Bad_offset[] = "Bad swap offset entry "; static const char Unused_offset[] = "Unused swap offset entry "; -static struct swap_list_t swap_list = {-1, -1}; +struct swap_list_t swap_list = {-1, -1}; -static struct swap_info_struct *swap_info[MAX_SWAPFILES]; +struct swap_info_struct *swap_info[MAX_SWAPFILES]; static DEFINE_MUTEX(swapon_mutex); @@ -556,6 +558,7 @@ static unsigned char swap_entry_free(struct swap_info_struct *p, swap_list.next = p->type; nr_swap_pages++; p->inuse_pages--; + frontswap_invalidate_page(p->type, offset); if ((p->flags & SWP_BLKDEV) && disk->fops->swap_slot_free_notify) disk->fops->swap_slot_free_notify(p->bdev, offset); @@ -985,11 +988,12 @@ static int unuse_mm(struct mm_struct *mm, } /* - * Scan swap_map from current position to next entry still in use. + * Scan swap_map (or frontswap_map if frontswap parameter is true) + * from current position to next entry still in use. * Recycle to start on reaching the end, returning 0 when empty. */ static unsigned int find_next_to_unuse(struct swap_info_struct *si, - unsigned int prev) + unsigned int prev, bool frontswap) { unsigned int max = si->max; unsigned int i = prev; @@ -1015,6 +1019,12 @@ static unsigned int find_next_to_unuse(struct swap_info_struct *si, prev = 0; i = 1; } + if (frontswap) { + if (frontswap_test(si, i)) + break; + else + continue; + } count = si->swap_map[i]; if (count && swap_count(count) != SWAP_MAP_BAD) break; @@ -1026,8 +1036,12 @@ static unsigned int find_next_to_unuse(struct swap_info_struct *si, * We completely avoid races by reading each swap page in advance, * and then search for the process using it. All the necessary * page table adjustments can then be made atomically. + * + * if the boolean frontswap is true, only unuse pages_to_unuse pages; + * pages_to_unuse==0 means all pages; ignored if frontswap is false */ -static int try_to_unuse(unsigned int type) +int try_to_unuse(unsigned int type, bool frontswap, + unsigned long pages_to_unuse) { struct swap_info_struct *si = swap_info[type]; struct mm_struct *start_mm; @@ -1060,7 +1074,7 @@ static int try_to_unuse(unsigned int type) * one pass through swap_map is enough, but not necessarily: * there are races when an instance of an entry might be missed. */ - while ((i = find_next_to_unuse(si, i)) != 0) { + while ((i = find_next_to_unuse(si, i, frontswap)) != 0) { if (signal_pending(current)) { retval = -EINTR; break; @@ -1227,6 +1241,10 @@ static int try_to_unuse(unsigned int type) * interactive performance. */ cond_resched(); + if (frontswap && pages_to_unuse > 0) { + if (!--pages_to_unuse) + break; + } } mmput(start_mm); @@ -1486,7 +1504,8 @@ bad_bmap: } static void enable_swap_info(struct swap_info_struct *p, int prio, - unsigned char *swap_map) + unsigned char *swap_map, + unsigned long *frontswap_map) { int i, prev; @@ -1496,6 +1515,7 @@ static void enable_swap_info(struct swap_info_struct *p, int prio, else p->prio = --least_priority; p->swap_map = swap_map; + frontswap_map_set(p, frontswap_map); p->flags |= SWP_WRITEOK; nr_swap_pages += p->pages; total_swap_pages += p->pages; @@ -1512,6 +1532,7 @@ static void enable_swap_info(struct swap_info_struct *p, int prio, swap_list.head = swap_list.next = p->type; else swap_info[prev]->next = p->type; + frontswap_init(p->type); spin_unlock(&swap_lock); } @@ -1585,7 +1606,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) spin_unlock(&swap_lock); oom_score_adj = test_set_oom_score_adj(OOM_SCORE_ADJ_MAX); - err = try_to_unuse(type); + err = try_to_unuse(type, false, 0); /* force all pages to be unused */ compare_swap_oom_score_adj(OOM_SCORE_ADJ_MAX, oom_score_adj); if (err) { @@ -1596,7 +1617,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) * sys_swapoff for this swap_info_struct at this point. */ /* re-insert swap space back into swap_list */ - enable_swap_info(p, p->prio, p->swap_map); + enable_swap_info(p, p->prio, p->swap_map, frontswap_map_get(p)); goto out_dput; } @@ -1622,9 +1643,11 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) swap_map = p->swap_map; p->swap_map = NULL; p->flags = 0; + frontswap_invalidate_area(type); spin_unlock(&swap_lock); mutex_unlock(&swapon_mutex); vfree(swap_map); + vfree(frontswap_map_get(p)); /* Destroy swap account informatin */ swap_cgroup_swapoff(type); @@ -1988,6 +2011,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) sector_t span; unsigned long maxpages; unsigned char *swap_map = NULL; + unsigned long *frontswap_map = NULL; struct page *page = NULL; struct inode *inode = NULL; @@ -2071,6 +2095,9 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) error = nr_extents; goto bad_swap; } + /* frontswap enabled? set up bit-per-page map for frontswap */ + if (frontswap_enabled) + frontswap_map = vzalloc(maxpages / sizeof(long)); if (p->bdev) { if (blk_queue_nonrot(bdev_get_queue(p->bdev))) { @@ -2086,14 +2113,15 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) if (swap_flags & SWAP_FLAG_PREFER) prio = (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT; - enable_swap_info(p, prio, swap_map); + enable_swap_info(p, prio, swap_map, frontswap_map); printk(KERN_INFO "Adding %uk swap on %s. " - "Priority:%d extents:%d across:%lluk %s%s\n", + "Priority:%d extents:%d across:%lluk %s%s%s\n", p->pages<<(PAGE_SHIFT-10), name, p->prio, nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10), (p->flags & SWP_SOLIDSTATE) ? "SS" : "", - (p->flags & SWP_DISCARDABLE) ? "D" : ""); + (p->flags & SWP_DISCARDABLE) ? "D" : "", + (frontswap_map) ? "FS" : ""); mutex_unlock(&swapon_mutex); atomic_inc(&proc_poll_event); diff --git a/mm/util.c b/mm/util.c index ae962b31de88..8c7265afa29f 100644 --- a/mm/util.c +++ b/mm/util.c @@ -4,6 +4,7 @@ #include <linux/export.h> #include <linux/err.h> #include <linux/sched.h> +#include <linux/security.h> #include <asm/uaccess.h> #include "internal.h" @@ -341,6 +342,35 @@ int __attribute__((weak)) get_user_pages_fast(unsigned long start, } EXPORT_SYMBOL_GPL(get_user_pages_fast); +unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr, + unsigned long len, unsigned long prot, + unsigned long flag, unsigned long pgoff) +{ + unsigned long ret; + struct mm_struct *mm = current->mm; + + ret = security_mmap_file(file, prot, flag); + if (!ret) { + down_write(&mm->mmap_sem); + ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff); + up_write(&mm->mmap_sem); + } + return ret; +} + +unsigned long vm_mmap(struct file *file, unsigned long addr, + unsigned long len, unsigned long prot, + unsigned long flag, unsigned long offset) +{ + if (unlikely(offset + PAGE_ALIGN(len) < offset)) + return -EINVAL; + if (unlikely(offset & ~PAGE_MASK)) + return -EINVAL; + + return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT); +} +EXPORT_SYMBOL(vm_mmap); + /* Tracepoints definitions. */ EXPORT_TRACEPOINT_SYMBOL(kmalloc); EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc); diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c index aa5d73b786ac..d1820ff14aee 100644 --- a/net/bluetooth/rfcomm/tty.c +++ b/net/bluetooth/rfcomm/tty.c @@ -710,9 +710,9 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp) break; } - tty_unlock(tty); + tty_unlock(); schedule(); - tty_lock(tty); + tty_lock(); } set_current_state(TASK_RUNNING); remove_wait_queue(&dev->wait, &wait); diff --git a/net/core/sock.c b/net/core/sock.c index 653f8c0aedc5..9e5b71fda6ec 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1592,6 +1592,11 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, gfp_t gfp_mask; long timeo; int err; + int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT; + + err = -EMSGSIZE; + if (npages > MAX_SKB_FRAGS) + goto failure; gfp_mask = sk->sk_allocation; if (gfp_mask & __GFP_WAIT) @@ -1610,14 +1615,12 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { skb = alloc_skb(header_len, gfp_mask); if (skb) { - int npages; int i; /* No pages, we're done... */ if (!data_len) break; - npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT; skb->truesize += data_len; skb_shinfo(skb)->nr_frags = npages; for (i = 0; i < npages; i++) { diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 95e61596e605..f9ee7417f6a0 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -377,7 +377,8 @@ struct dst_entry *inet_csk_route_req(struct sock *sk, flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark, RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, - sk->sk_protocol, inet_sk_flowi_flags(sk), + sk->sk_protocol, + inet_sk_flowi_flags(sk) & ~FLOWI_FLAG_PRECOW_METRICS, (opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr, ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport); security_req_classify_flow(req, flowi4_to_flowi(fl4)); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index a43b87dfe800..c8d28c433b2b 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -824,7 +824,8 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb, */ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst, struct request_sock *req, - struct request_values *rvp) + struct request_values *rvp, + u16 queue_mapping) { const struct inet_request_sock *ireq = inet_rsk(req); struct flowi4 fl4; @@ -840,6 +841,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst, if (skb) { __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr); + skb_set_queue_mapping(skb, queue_mapping); err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr, ireq->rmt_addr, ireq->opt); @@ -854,7 +856,7 @@ static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req, struct request_values *rvp) { TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); - return tcp_v4_send_synack(sk, NULL, req, rvp); + return tcp_v4_send_synack(sk, NULL, req, rvp, 0); } /* @@ -1422,7 +1424,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) tcp_rsk(req)->snt_synack = tcp_time_stamp; if (tcp_v4_send_synack(sk, dst, req, - (struct request_values *)&tmp_ext) || + (struct request_values *)&tmp_ext, + skb_get_queue_mapping(skb)) || want_cookie) goto drop_and_free; diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 554d5999abc4..3a9aec29581a 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -476,7 +476,8 @@ out: static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req, - struct request_values *rvp) + struct request_values *rvp, + u16 queue_mapping) { struct inet6_request_sock *treq = inet6_rsk(req); struct ipv6_pinfo *np = inet6_sk(sk); @@ -513,6 +514,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req, __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr); fl6.daddr = treq->rmt_addr; + skb_set_queue_mapping(skb, queue_mapping); err = ip6_xmit(sk, skb, &fl6, opt, np->tclass); err = net_xmit_eval(err); } @@ -528,7 +530,7 @@ static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req, struct request_values *rvp) { TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); - return tcp_v6_send_synack(sk, req, rvp); + return tcp_v6_send_synack(sk, req, rvp, 0); } static void tcp_v6_reqsk_destructor(struct request_sock *req) @@ -1213,7 +1215,8 @@ have_isn: security_inet_conn_request(sk, skb, req); if (tcp_v6_send_synack(sk, req, - (struct request_values *)&tmp_ext) || + (struct request_values *)&tmp_ext, + skb_get_queue_mapping(skb)) || want_cookie) goto drop_and_free; diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c index 8522a4793374..ca8e0a57d945 100644 --- a/net/sched/sch_atm.c +++ b/net/sched/sch_atm.c @@ -16,8 +16,6 @@ #include <net/netlink.h> #include <net/pkt_sched.h> -extern struct socket *sockfd_lookup(int fd, int *err); /* @@@ fix this */ - /* * The ATM queuing discipline provides a framework for invoking classifiers * (aka "filters"), which in turn select classes of this queuing discipline. diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c index 032daab449b0..8ea39aabe948 100644 --- a/security/apparmor/lsm.c +++ b/security/apparmor/lsm.c @@ -490,17 +490,9 @@ static int common_mmap(int op, struct file *file, unsigned long prot, return common_file_perm(op, file, mask); } -static int apparmor_file_mmap(struct file *file, unsigned long reqprot, - unsigned long prot, unsigned long flags, - unsigned long addr, unsigned long addr_only) +static int apparmor_mmap_file(struct file *file, unsigned long reqprot, + unsigned long prot, unsigned long flags) { - int rc = 0; - - /* do DAC check */ - rc = cap_file_mmap(file, reqprot, prot, flags, addr, addr_only); - if (rc || addr_only) - return rc; - return common_mmap(OP_FMMAP, file, prot, flags); } @@ -646,7 +638,8 @@ static struct security_operations apparmor_ops = { .file_permission = apparmor_file_permission, .file_alloc_security = apparmor_file_alloc_security, .file_free_security = apparmor_file_free_security, - .file_mmap = apparmor_file_mmap, + .mmap_file = apparmor_mmap_file, + .mmap_addr = cap_mmap_addr, .file_mprotect = apparmor_file_mprotect, .file_lock = apparmor_file_lock, diff --git a/security/capability.c b/security/capability.c index fca889676c5e..61095df8b89a 100644 --- a/security/capability.c +++ b/security/capability.c @@ -949,7 +949,8 @@ void __init security_fixup_ops(struct security_operations *ops) set_to_cap_if_null(ops, file_alloc_security); set_to_cap_if_null(ops, file_free_security); set_to_cap_if_null(ops, file_ioctl); - set_to_cap_if_null(ops, file_mmap); + set_to_cap_if_null(ops, mmap_addr); + set_to_cap_if_null(ops, mmap_file); set_to_cap_if_null(ops, file_mprotect); set_to_cap_if_null(ops, file_lock); set_to_cap_if_null(ops, file_fcntl); diff --git a/security/commoncap.c b/security/commoncap.c index e771cb1b2d79..6dbae4650abe 100644 --- a/security/commoncap.c +++ b/security/commoncap.c @@ -958,22 +958,15 @@ int cap_vm_enough_memory(struct mm_struct *mm, long pages) } /* - * cap_file_mmap - check if able to map given addr - * @file: unused - * @reqprot: unused - * @prot: unused - * @flags: unused + * cap_mmap_addr - check if able to map given addr * @addr: address attempting to be mapped - * @addr_only: unused * * If the process is attempting to map memory below dac_mmap_min_addr they need * CAP_SYS_RAWIO. The other parameters to this function are unused by the * capability security module. Returns 0 if this mapping should be allowed * -EPERM if not. */ -int cap_file_mmap(struct file *file, unsigned long reqprot, - unsigned long prot, unsigned long flags, - unsigned long addr, unsigned long addr_only) +int cap_mmap_addr(unsigned long addr) { int ret = 0; @@ -986,3 +979,9 @@ int cap_file_mmap(struct file *file, unsigned long reqprot, } return ret; } + +int cap_mmap_file(struct file *file, unsigned long reqprot, + unsigned long prot, unsigned long flags) +{ + return 0; +} diff --git a/security/security.c b/security/security.c index 5497a57fba01..3efc9b12aef4 100644 --- a/security/security.c +++ b/security/security.c @@ -20,6 +20,9 @@ #include <linux/ima.h> #include <linux/evm.h> #include <linux/fsnotify.h> +#include <linux/mman.h> +#include <linux/mount.h> +#include <linux/personality.h> #include <net/flow.h> #define MAX_LSM_EVM_XATTR 2 @@ -657,18 +660,56 @@ int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return security_ops->file_ioctl(file, cmd, arg); } -int security_file_mmap(struct file *file, unsigned long reqprot, - unsigned long prot, unsigned long flags, - unsigned long addr, unsigned long addr_only) +static inline unsigned long mmap_prot(struct file *file, unsigned long prot) { - int ret; + /* + * Does we have PROT_READ and does the application expect + * it to imply PROT_EXEC? If not, nothing to talk about... + */ + if ((prot & (PROT_READ | PROT_EXEC)) != PROT_READ) + return prot; + if (!(current->personality & READ_IMPLIES_EXEC)) + return prot; + /* + * if that's an anonymous mapping, let it. + */ + if (!file) + return prot | PROT_EXEC; + /* + * ditto if it's not on noexec mount, except that on !MMU we need + * BDI_CAP_EXEC_MMAP (== VM_MAYEXEC) in this case + */ + if (!(file->f_path.mnt->mnt_flags & MNT_NOEXEC)) { +#ifndef CONFIG_MMU + unsigned long caps = 0; + struct address_space *mapping = file->f_mapping; + if (mapping && mapping->backing_dev_info) + caps = mapping->backing_dev_info->capabilities; + if (!(caps & BDI_CAP_EXEC_MAP)) + return prot; +#endif + return prot | PROT_EXEC; + } + /* anything on noexec mount won't get PROT_EXEC */ + return prot; +} - ret = security_ops->file_mmap(file, reqprot, prot, flags, addr, addr_only); +int security_mmap_file(struct file *file, unsigned long prot, + unsigned long flags) +{ + int ret; + ret = security_ops->mmap_file(file, prot, + mmap_prot(file, prot), flags); if (ret) return ret; return ima_file_mmap(file, prot); } +int security_mmap_addr(unsigned long addr) +{ + return security_ops->mmap_addr(addr); +} + int security_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot, unsigned long prot) { diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index fa2341b68331..372ec6502aa8 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -3083,9 +3083,7 @@ error: return rc; } -static int selinux_file_mmap(struct file *file, unsigned long reqprot, - unsigned long prot, unsigned long flags, - unsigned long addr, unsigned long addr_only) +static int selinux_mmap_addr(unsigned long addr) { int rc = 0; u32 sid = current_sid(); @@ -3104,10 +3102,12 @@ static int selinux_file_mmap(struct file *file, unsigned long reqprot, } /* do DAC check on address space usage */ - rc = cap_file_mmap(file, reqprot, prot, flags, addr, addr_only); - if (rc || addr_only) - return rc; + return cap_mmap_addr(addr); +} +static int selinux_mmap_file(struct file *file, unsigned long reqprot, + unsigned long prot, unsigned long flags) +{ if (selinux_checkreqprot) prot = reqprot; @@ -5570,7 +5570,8 @@ static struct security_operations selinux_ops = { .file_alloc_security = selinux_file_alloc_security, .file_free_security = selinux_file_free_security, .file_ioctl = selinux_file_ioctl, - .file_mmap = selinux_file_mmap, + .mmap_file = selinux_mmap_file, + .mmap_addr = selinux_mmap_addr, .file_mprotect = selinux_file_mprotect, .file_lock = selinux_file_lock, .file_fcntl = selinux_file_fcntl, diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c index 4e93f9ef970b..3ad290251288 100644 --- a/security/selinux/selinuxfs.c +++ b/security/selinux/selinuxfs.c @@ -1259,12 +1259,8 @@ static int sel_make_bools(void) if (!inode) goto out; - ret = -EINVAL; - len = snprintf(page, PAGE_SIZE, "/%s/%s", BOOL_DIR_NAME, names[i]); - if (len < 0) - goto out; - ret = -ENAMETOOLONG; + len = snprintf(page, PAGE_SIZE, "/%s/%s", BOOL_DIR_NAME, names[i]); if (len >= PAGE_SIZE) goto out; @@ -1557,19 +1553,10 @@ static inline u32 sel_ino_to_perm(unsigned long ino) static ssize_t sel_read_class(struct file *file, char __user *buf, size_t count, loff_t *ppos) { - ssize_t rc, len; - char *page; unsigned long ino = file->f_path.dentry->d_inode->i_ino; - - page = (char *)__get_free_page(GFP_KERNEL); - if (!page) - return -ENOMEM; - - len = snprintf(page, PAGE_SIZE, "%d", sel_ino_to_class(ino)); - rc = simple_read_from_buffer(buf, count, ppos, page, len); - free_page((unsigned long)page); - - return rc; + char res[TMPBUFLEN]; + ssize_t len = snprintf(res, sizeof(res), "%d", sel_ino_to_class(ino)); + return simple_read_from_buffer(buf, count, ppos, res, len); } static const struct file_operations sel_class_ops = { @@ -1580,19 +1567,10 @@ static const struct file_operations sel_class_ops = { static ssize_t sel_read_perm(struct file *file, char __user *buf, size_t count, loff_t *ppos) { - ssize_t rc, len; - char *page; unsigned long ino = file->f_path.dentry->d_inode->i_ino; - - page = (char *)__get_free_page(GFP_KERNEL); - if (!page) - return -ENOMEM; - - len = snprintf(page, PAGE_SIZE, "%d", sel_ino_to_perm(ino)); - rc = simple_read_from_buffer(buf, count, ppos, page, len); - free_page((unsigned long)page); - - return rc; + char res[TMPBUFLEN]; + ssize_t len = snprintf(res, sizeof(res), "%d", sel_ino_to_perm(ino)); + return simple_read_from_buffer(buf, count, ppos, res, len); } static const struct file_operations sel_perm_ops = { diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c index d583c0545808..ee0bb5735f35 100644 --- a/security/smack/smack_lsm.c +++ b/security/smack/smack_lsm.c @@ -1171,7 +1171,7 @@ static int smack_file_fcntl(struct file *file, unsigned int cmd, } /** - * smack_file_mmap : + * smack_mmap_file : * Check permissions for a mmap operation. The @file may be NULL, e.g. * if mapping anonymous memory. * @file contains the file structure for file to map (may be NULL). @@ -1180,10 +1180,9 @@ static int smack_file_fcntl(struct file *file, unsigned int cmd, * @flags contains the operational flags. * Return 0 if permission is granted. */ -static int smack_file_mmap(struct file *file, +static int smack_mmap_file(struct file *file, unsigned long reqprot, unsigned long prot, - unsigned long flags, unsigned long addr, - unsigned long addr_only) + unsigned long flags) { struct smack_known *skp; struct smack_rule *srp; @@ -1198,11 +1197,6 @@ static int smack_file_mmap(struct file *file, int tmay; int rc; - /* do DAC check on address space usage */ - rc = cap_file_mmap(file, reqprot, prot, flags, addr, addr_only); - if (rc || addr_only) - return rc; - if (file == NULL || file->f_dentry == NULL) return 0; @@ -3482,7 +3476,8 @@ struct security_operations smack_ops = { .file_ioctl = smack_file_ioctl, .file_lock = smack_file_lock, .file_fcntl = smack_file_fcntl, - .file_mmap = smack_file_mmap, + .mmap_file = smack_mmap_file, + .mmap_addr = cap_mmap_addr, .file_set_fowner = smack_file_set_fowner, .file_send_sigiotask = smack_file_send_sigiotask, .file_receive = smack_file_receive, diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c index 0a5027b94714..b8ac8710f47f 100644 --- a/sound/pci/rme9652/hdspm.c +++ b/sound/pci/rme9652/hdspm.c @@ -1988,6 +1988,13 @@ static int hdspm_get_system_sample_rate(struct hdspm *hdspm) period = hdspm_read(hdspm, HDSPM_RD_PLL_FREQ); rate = hdspm_calc_dds_value(hdspm, period); + if (rate > 207000) { + /* Unreasonable high sample rate as seen on PCI MADI cards. + * Use the cached value instead. + */ + rate = hdspm->system_sample_rate; + } + return rate; } diff --git a/sound/soc/fsl/imx-ssi.c b/sound/soc/fsl/imx-ssi.c index cf3ed0362c9c..28dd76c7cb1c 100644 --- a/sound/soc/fsl/imx-ssi.c +++ b/sound/soc/fsl/imx-ssi.c @@ -543,7 +543,7 @@ static int imx_ssi_probe(struct platform_device *pdev) ret); goto failed_clk; } - clk_enable(ssi->clk); + clk_prepare_enable(ssi->clk); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { @@ -641,7 +641,7 @@ failed_ac97: failed_ioremap: release_mem_region(res->start, resource_size(res)); failed_get_resource: - clk_disable(ssi->clk); + clk_disable_unprepare(ssi->clk); clk_put(ssi->clk); failed_clk: kfree(ssi); @@ -664,7 +664,7 @@ static int __devexit imx_ssi_remove(struct platform_device *pdev) iounmap(ssi->base); release_mem_region(res->start, resource_size(res)); - clk_disable(ssi->clk); + clk_disable_unprepare(ssi->clk); clk_put(ssi->clk); kfree(ssi); diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c index 7cee22515d9d..2ef98536f1da 100644 --- a/sound/soc/sh/fsi.c +++ b/sound/soc/sh/fsi.c @@ -1052,6 +1052,13 @@ static int fsi_dma_quit(struct fsi_priv *fsi, struct fsi_stream *io) return 0; } +static dma_addr_t fsi_dma_get_area(struct fsi_stream *io) +{ + struct snd_pcm_runtime *runtime = io->substream->runtime; + + return io->dma + samples_to_bytes(runtime, io->buff_sample_pos); +} + static void fsi_dma_complete(void *data) { struct fsi_stream *io = (struct fsi_stream *)data; @@ -1061,7 +1068,7 @@ static void fsi_dma_complete(void *data) enum dma_data_direction dir = fsi_stream_is_play(fsi, io) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; - dma_sync_single_for_cpu(dai->dev, io->dma, + dma_sync_single_for_cpu(dai->dev, fsi_dma_get_area(io), samples_to_bytes(runtime, io->period_samples), dir); io->buff_sample_pos += io->period_samples; @@ -1078,13 +1085,6 @@ static void fsi_dma_complete(void *data) snd_pcm_period_elapsed(io->substream); } -static dma_addr_t fsi_dma_get_area(struct fsi_stream *io) -{ - struct snd_pcm_runtime *runtime = io->substream->runtime; - - return io->dma + samples_to_bytes(runtime, io->buff_sample_pos); -} - static void fsi_dma_do_tasklet(unsigned long data) { struct fsi_stream *io = (struct fsi_stream *)data; @@ -1110,7 +1110,7 @@ static void fsi_dma_do_tasklet(unsigned long data) len = samples_to_bytes(runtime, io->period_samples); buf = fsi_dma_get_area(io); - dma_sync_single_for_device(dai->dev, io->dma, len, dir); + dma_sync_single_for_device(dai->dev, buf, len, dir); sg_init_table(&sg, 1); sg_set_page(&sg, pfn_to_page(PFN_DOWN(buf)), @@ -1172,9 +1172,16 @@ static int fsi_dma_transfer(struct fsi_priv *fsi, struct fsi_stream *io) static void fsi_dma_push_start_stop(struct fsi_priv *fsi, struct fsi_stream *io, int start) { + struct fsi_master *master = fsi_get_master(fsi); + u32 clk = fsi_is_port_a(fsi) ? CRA : CRB; u32 enable = start ? DMA_ON : 0; fsi_reg_mask_set(fsi, OUT_DMAC, DMA_ON, enable); + + dmaengine_terminate_all(io->chan); + + if (fsi_is_clk_master(fsi)) + fsi_master_mask_set(master, CLK_RST, clk, (enable) ? clk : 0); } static int fsi_dma_probe(struct fsi_priv *fsi, struct fsi_stream *io) diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c index 24839d932648..cdf8b7601973 100644 --- a/sound/usb/pcm.c +++ b/sound/usb/pcm.c @@ -788,6 +788,9 @@ static int snd_usb_pcm_check_knot(struct snd_pcm_runtime *runtime, int count = 0, needs_knot = 0; int err; + kfree(subs->rate_list.list); + subs->rate_list.list = NULL; + list_for_each_entry(fp, &subs->fmt_list, list) { if (fp->rates & SNDRV_PCM_RATE_CONTINUOUS) return 0; |