diff options
author | David S. Miller <davem@davemloft.net> | 2019-01-27 10:43:17 -0800 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2019-01-27 10:43:17 -0800 |
commit | 1d68101367a92336e633d0c3681bf8c86644e124 (patch) | |
tree | 8fdacef561481c25c71641b8fa91a5dfa0ba7f96 /include/linux | |
parent | 085c4c7dd2b6558fb079fad07d6e9064f5e0b4c2 (diff) | |
parent | 1fc7f56db7a7c467e46a5d2e2a009d2f337e0338 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/backing-dev-defs.h | 1 | ||||
-rw-r--r-- | include/linux/blk_types.h | 2 | ||||
-rw-r--r-- | include/linux/hid.h | 3 | ||||
-rw-r--r-- | include/linux/hyperv.h | 5 | ||||
-rw-r--r-- | include/linux/if_arp.h | 1 | ||||
-rw-r--r-- | include/linux/libnvdimm.h | 1 | ||||
-rw-r--r-- | include/linux/xarray.h | 227 |
7 files changed, 177 insertions, 63 deletions
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h index c31157135598..07e02d6df5ad 100644 --- a/include/linux/backing-dev-defs.h +++ b/include/linux/backing-dev-defs.h @@ -190,6 +190,7 @@ struct backing_dev_info { struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */ struct rb_root cgwb_congested_tree; /* their congested states */ struct mutex cgwb_release_mutex; /* protect shutdown of wb structs */ + struct rw_semaphore wb_switch_rwsem; /* no cgwb switch while syncing */ #else struct bdi_writeback_congested *wb_congested; #endif diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 5c7e7f859a24..d66bf5f32610 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -287,7 +287,7 @@ enum req_opf { REQ_OP_DISCARD = 3, /* securely erase sectors */ REQ_OP_SECURE_ERASE = 5, - /* seset a zone write pointer */ + /* reset a zone write pointer */ REQ_OP_ZONE_RESET = 6, /* write the same sector many times */ REQ_OP_WRITE_SAME = 7, diff --git a/include/linux/hid.h b/include/linux/hid.h index d99287327ef2..f9707d1dcb58 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -430,7 +430,7 @@ struct hid_local { */ struct hid_collection { - struct hid_collection *parent; + int parent_idx; /* device->collection */ unsigned type; unsigned usage; unsigned level; @@ -658,7 +658,6 @@ struct hid_parser { unsigned int *collection_stack; unsigned int collection_stack_ptr; unsigned int collection_stack_size; - struct hid_collection *active_collection; struct hid_device *device; unsigned int scan_flags; }; diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index f0885cc01db6..dcb6977afce9 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -1159,8 +1159,9 @@ struct hv_ring_buffer_debug_info { u32 bytes_avail_towrite; }; -void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info, - struct hv_ring_buffer_debug_info *debug_info); + +int hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info, + struct hv_ring_buffer_debug_info *debug_info); /* Vmbus interface */ #define vmbus_driver_register(driver) \ diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h index 6756fea18b69..e44746de95cd 100644 --- a/include/linux/if_arp.h +++ b/include/linux/if_arp.h @@ -54,6 +54,7 @@ static inline bool dev_is_mac_header_xmit(const struct net_device *dev) case ARPHRD_IPGRE: case ARPHRD_VOID: case ARPHRD_NONE: + case ARPHRD_RAWIP: return false; default: return true; diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h index 7315977b64da..ad609617aeb8 100644 --- a/include/linux/libnvdimm.h +++ b/include/linux/libnvdimm.h @@ -235,7 +235,6 @@ static inline struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, cmd_mask, num_flush, flush_wpq, NULL, NULL); } -int nvdimm_security_setup_events(struct nvdimm *nvdimm); const struct nd_cmd_desc *nd_cmd_dimm_desc(int cmd); const struct nd_cmd_desc *nd_cmd_bus_desc(int cmd); u32 nd_cmd_in_size(struct nvdimm *nvdimm, int cmd, diff --git a/include/linux/xarray.h b/include/linux/xarray.h index f492e21c4aa2..5d9d318bcf7a 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -176,7 +176,8 @@ static inline bool xa_is_internal(const void *entry) */ static inline bool xa_is_err(const void *entry) { - return unlikely(xa_is_internal(entry)); + return unlikely(xa_is_internal(entry) && + entry >= xa_mk_internal(-MAX_ERRNO)); } /** @@ -286,7 +287,6 @@ struct xarray { */ #define DEFINE_XARRAY_ALLOC(name) DEFINE_XARRAY_FLAGS(name, XA_FLAGS_ALLOC) -void xa_init_flags(struct xarray *, gfp_t flags); void *xa_load(struct xarray *, unsigned long index); void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t); void *xa_erase(struct xarray *, unsigned long index); @@ -304,6 +304,24 @@ unsigned int xa_extract(struct xarray *, void **dst, unsigned long start, void xa_destroy(struct xarray *); /** + * xa_init_flags() - Initialise an empty XArray with flags. + * @xa: XArray. + * @flags: XA_FLAG values. + * + * If you need to initialise an XArray with special flags (eg you need + * to take the lock from interrupt context), use this function instead + * of xa_init(). + * + * Context: Any context. + */ +static inline void xa_init_flags(struct xarray *xa, gfp_t flags) +{ + spin_lock_init(&xa->xa_lock); + xa->xa_flags = flags; + xa->xa_head = NULL; +} + +/** * xa_init() - Initialise an empty XArray. * @xa: XArray. * @@ -342,20 +360,45 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark) } /** - * xa_for_each() - Iterate over a portion of an XArray. + * xa_for_each_start() - Iterate over a portion of an XArray. * @xa: XArray. + * @index: Index of @entry. * @entry: Entry retrieved from array. + * @start: First index to retrieve from array. + * + * During the iteration, @entry will have the value of the entry stored + * in @xa at @index. You may modify @index during the iteration if you + * want to skip or reprocess indices. It is safe to modify the array + * during the iteration. At the end of the iteration, @entry will be set + * to NULL and @index will have a value less than or equal to max. + * + * xa_for_each_start() is O(n.log(n)) while xas_for_each() is O(n). You have + * to handle your own locking with xas_for_each(), and if you have to unlock + * after each iteration, it will also end up being O(n.log(n)). + * xa_for_each_start() will spin if it hits a retry entry; if you intend to + * see retry entries, you should use the xas_for_each() iterator instead. + * The xas_for_each() iterator will expand into more inline code than + * xa_for_each_start(). + * + * Context: Any context. Takes and releases the RCU lock. + */ +#define xa_for_each_start(xa, index, entry, start) \ + for (index = start, \ + entry = xa_find(xa, &index, ULONG_MAX, XA_PRESENT); \ + entry; \ + entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT)) + +/** + * xa_for_each() - Iterate over present entries in an XArray. + * @xa: XArray. * @index: Index of @entry. - * @max: Maximum index to retrieve from array. - * @filter: Selection criterion. + * @entry: Entry retrieved from array. * - * Initialise @index to the lowest index you want to retrieve from the - * array. During the iteration, @entry will have the value of the entry - * stored in @xa at @index. The iteration will skip all entries in the - * array which do not match @filter. You may modify @index during the - * iteration if you want to skip or reprocess indices. It is safe to modify - * the array during the iteration. At the end of the iteration, @entry will - * be set to NULL and @index will have a value less than or equal to max. + * During the iteration, @entry will have the value of the entry stored + * in @xa at @index. You may modify @index during the iteration if you want + * to skip or reprocess indices. It is safe to modify the array during the + * iteration. At the end of the iteration, @entry will be set to NULL and + * @index will have a value less than or equal to max. * * xa_for_each() is O(n.log(n)) while xas_for_each() is O(n). You have * to handle your own locking with xas_for_each(), and if you have to unlock @@ -366,9 +409,36 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark) * * Context: Any context. Takes and releases the RCU lock. */ -#define xa_for_each(xa, entry, index, max, filter) \ - for (entry = xa_find(xa, &index, max, filter); entry; \ - entry = xa_find_after(xa, &index, max, filter)) +#define xa_for_each(xa, index, entry) \ + xa_for_each_start(xa, index, entry, 0) + +/** + * xa_for_each_marked() - Iterate over marked entries in an XArray. + * @xa: XArray. + * @index: Index of @entry. + * @entry: Entry retrieved from array. + * @filter: Selection criterion. + * + * During the iteration, @entry will have the value of the entry stored + * in @xa at @index. The iteration will skip all entries in the array + * which do not match @filter. You may modify @index during the iteration + * if you want to skip or reprocess indices. It is safe to modify the array + * during the iteration. At the end of the iteration, @entry will be set to + * NULL and @index will have a value less than or equal to max. + * + * xa_for_each_marked() is O(n.log(n)) while xas_for_each_marked() is O(n). + * You have to handle your own locking with xas_for_each(), and if you have + * to unlock after each iteration, it will also end up being O(n.log(n)). + * xa_for_each_marked() will spin if it hits a retry entry; if you intend to + * see retry entries, you should use the xas_for_each_marked() iterator + * instead. The xas_for_each_marked() iterator will expand into more inline + * code than xa_for_each_marked(). + * + * Context: Any context. Takes and releases the RCU lock. + */ +#define xa_for_each_marked(xa, index, entry, filter) \ + for (index = 0, entry = xa_find(xa, &index, ULONG_MAX, filter); \ + entry; entry = xa_find_after(xa, &index, ULONG_MAX, filter)) #define xa_trylock(xa) spin_trylock(&(xa)->xa_lock) #define xa_lock(xa) spin_lock(&(xa)->xa_lock) @@ -393,40 +463,13 @@ void *__xa_erase(struct xarray *, unsigned long index); void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t); void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old, void *entry, gfp_t); +int __xa_insert(struct xarray *, unsigned long index, void *entry, gfp_t); int __xa_alloc(struct xarray *, u32 *id, u32 max, void *entry, gfp_t); int __xa_reserve(struct xarray *, unsigned long index, gfp_t); void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t); void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t); /** - * __xa_insert() - Store this entry in the XArray unless another entry is - * already present. - * @xa: XArray. - * @index: Index into array. - * @entry: New entry. - * @gfp: Memory allocation flags. - * - * If you would rather see the existing entry in the array, use __xa_cmpxchg(). - * This function is for users who don't care what the entry is, only that - * one is present. - * - * Context: Any context. Expects xa_lock to be held on entry. May - * release and reacquire xa_lock if the @gfp flags permit. - * Return: 0 if the store succeeded. -EEXIST if another entry was present. - * -ENOMEM if memory could not be allocated. - */ -static inline int __xa_insert(struct xarray *xa, unsigned long index, - void *entry, gfp_t gfp) -{ - void *curr = __xa_cmpxchg(xa, index, NULL, entry, gfp); - if (!curr) - return 0; - if (xa_is_err(curr)) - return xa_err(curr); - return -EEXIST; -} - -/** * xa_store_bh() - Store this entry in the XArray. * @xa: XArray. * @index: Index into array. @@ -453,7 +496,7 @@ static inline void *xa_store_bh(struct xarray *xa, unsigned long index, } /** - * xa_store_irq() - Erase this entry from the XArray. + * xa_store_irq() - Store this entry in the XArray. * @xa: XArray. * @index: Index into array. * @entry: New entry. @@ -615,24 +658,83 @@ static inline void *xa_cmpxchg_irq(struct xarray *xa, unsigned long index, * @entry: New entry. * @gfp: Memory allocation flags. * - * If you would rather see the existing entry in the array, use xa_cmpxchg(). - * This function is for users who don't care what the entry is, only that - * one is present. + * Inserting a NULL entry will store a reserved entry (like xa_reserve()) + * if no entry is present. Inserting will fail if a reserved entry is + * present, even though loading from this index will return NULL. * - * Context: Process context. Takes and releases the xa_lock. - * May sleep if the @gfp flags permit. + * Context: Any context. Takes and releases the xa_lock. May sleep if + * the @gfp flags permit. * Return: 0 if the store succeeded. -EEXIST if another entry was present. * -ENOMEM if memory could not be allocated. */ static inline int xa_insert(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) { - void *curr = xa_cmpxchg(xa, index, NULL, entry, gfp); - if (!curr) - return 0; - if (xa_is_err(curr)) - return xa_err(curr); - return -EEXIST; + int err; + + xa_lock(xa); + err = __xa_insert(xa, index, entry, gfp); + xa_unlock(xa); + + return err; +} + +/** + * xa_insert_bh() - Store this entry in the XArray unless another entry is + * already present. + * @xa: XArray. + * @index: Index into array. + * @entry: New entry. + * @gfp: Memory allocation flags. + * + * Inserting a NULL entry will store a reserved entry (like xa_reserve()) + * if no entry is present. Inserting will fail if a reserved entry is + * present, even though loading from this index will return NULL. + * + * Context: Any context. Takes and releases the xa_lock while + * disabling softirqs. May sleep if the @gfp flags permit. + * Return: 0 if the store succeeded. -EEXIST if another entry was present. + * -ENOMEM if memory could not be allocated. + */ +static inline int xa_insert_bh(struct xarray *xa, unsigned long index, + void *entry, gfp_t gfp) +{ + int err; + + xa_lock_bh(xa); + err = __xa_insert(xa, index, entry, gfp); + xa_unlock_bh(xa); + + return err; +} + +/** + * xa_insert_irq() - Store this entry in the XArray unless another entry is + * already present. + * @xa: XArray. + * @index: Index into array. + * @entry: New entry. + * @gfp: Memory allocation flags. + * + * Inserting a NULL entry will store a reserved entry (like xa_reserve()) + * if no entry is present. Inserting will fail if a reserved entry is + * present, even though loading from this index will return NULL. + * + * Context: Process context. Takes and releases the xa_lock while + * disabling interrupts. May sleep if the @gfp flags permit. + * Return: 0 if the store succeeded. -EEXIST if another entry was present. + * -ENOMEM if memory could not be allocated. + */ +static inline int xa_insert_irq(struct xarray *xa, unsigned long index, + void *entry, gfp_t gfp) +{ + int err; + + xa_lock_irq(xa); + err = __xa_insert(xa, index, entry, gfp); + xa_unlock_irq(xa); + + return err; } /** @@ -970,8 +1072,8 @@ static inline bool xa_is_sibling(const void *entry) (entry < xa_mk_sibling(XA_CHUNK_SIZE - 1)); } -#define XA_ZERO_ENTRY xa_mk_internal(256) -#define XA_RETRY_ENTRY xa_mk_internal(257) +#define XA_RETRY_ENTRY xa_mk_internal(256) +#define XA_ZERO_ENTRY xa_mk_internal(257) /** * xa_is_zero() - Is the entry a zero entry? @@ -996,6 +1098,17 @@ static inline bool xa_is_retry(const void *entry) } /** + * xa_is_advanced() - Is the entry only permitted for the advanced API? + * @entry: Entry to be stored in the XArray. + * + * Return: %true if the entry cannot be stored by the normal API. + */ +static inline bool xa_is_advanced(const void *entry) +{ + return xa_is_internal(entry) && (entry <= XA_RETRY_ENTRY); +} + +/** * typedef xa_update_node_t - A callback function from the XArray. * @node: The node which is being processed * |