diff options
Diffstat (limited to 'include')
665 files changed, 26171 insertions, 8499 deletions
diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h index 6db3b4668b1a..ffe364fa4040 100644 --- a/include/acpi/acconfig.h +++ b/include/acpi/acconfig.h @@ -145,9 +145,9 @@ #define ACPI_ADDRESS_RANGE_MAX 2 -/* Maximum number of While() loops before abort */ +/* Maximum time (default 30s) of While() loops before abort */ -#define ACPI_MAX_LOOP_COUNT 0x000FFFFF +#define ACPI_MAX_LOOP_TIMEOUT 30 /****************************************************************************** * diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h index 17d61b1f2511..3c46f0ef5f7a 100644 --- a/include/acpi/acexcep.h +++ b/include/acpi/acexcep.h @@ -130,8 +130,9 @@ struct acpi_exception_info { #define AE_HEX_OVERFLOW EXCEP_ENV (0x0020) #define AE_DECIMAL_OVERFLOW EXCEP_ENV (0x0021) #define AE_OCTAL_OVERFLOW EXCEP_ENV (0x0022) +#define AE_END_OF_TABLE EXCEP_ENV (0x0023) -#define AE_CODE_ENV_MAX 0x0022 +#define AE_CODE_ENV_MAX 0x0023 /* * Programmer exceptions @@ -195,7 +196,7 @@ struct acpi_exception_info { #define AE_AML_CIRCULAR_REFERENCE EXCEP_AML (0x001E) #define AE_AML_BAD_RESOURCE_LENGTH EXCEP_AML (0x001F) #define AE_AML_ILLEGAL_ADDRESS EXCEP_AML (0x0020) -#define AE_AML_INFINITE_LOOP EXCEP_AML (0x0021) +#define AE_AML_LOOP_TIMEOUT EXCEP_AML (0x0021) #define AE_AML_UNINITIALIZED_NODE EXCEP_AML (0x0022) #define AE_AML_TARGET_TYPE EXCEP_AML (0x0023) @@ -275,7 +276,8 @@ static const struct acpi_exception_info acpi_gbl_exception_names_env[] = { EXCEP_TXT("AE_DECIMAL_OVERFLOW", "Overflow during ASCII decimal-to-binary conversion"), EXCEP_TXT("AE_OCTAL_OVERFLOW", - "Overflow during ASCII octal-to-binary conversion") + "Overflow during ASCII octal-to-binary conversion"), + EXCEP_TXT("AE_END_OF_TABLE", "Reached the end of table") }; static const struct acpi_exception_info acpi_gbl_exception_names_pgm[] = { @@ -368,8 +370,8 @@ static const struct acpi_exception_info acpi_gbl_exception_names_aml[] = { "The length of a Resource Descriptor in the AML is incorrect"), EXCEP_TXT("AE_AML_ILLEGAL_ADDRESS", "A memory, I/O, or PCI configuration address is invalid"), - EXCEP_TXT("AE_AML_INFINITE_LOOP", - "An apparent infinite AML While loop, method was aborted"), + EXCEP_TXT("AE_AML_LOOP_TIMEOUT", + "An AML While loop exceeded the maximum execution time"), EXCEP_TXT("AE_AML_UNINITIALIZED_NODE", "A namespace node is uninitialized or unresolved"), EXCEP_TXT("AE_AML_TARGET_TYPE", diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index 79287629c888..c9608b0b80c6 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -91,6 +91,9 @@ acpi_evaluate_dsm_typed(acpi_handle handle, const guid_t *guid, u64 rev, bool acpi_dev_found(const char *hid); bool acpi_dev_present(const char *hid, const char *uid, s64 hrv); +const char * +acpi_dev_get_first_match_name(const char *hid, const char *uid, s64 hrv); + #ifdef CONFIG_ACPI #include <linux/proc_fs.h> diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h index e1dd1a8d42b6..c589c3e12d90 100644 --- a/include/acpi/acpixf.h +++ b/include/acpi/acpixf.h @@ -46,7 +46,7 @@ /* Current ACPICA subsystem version in YYYYMMDD format */ -#define ACPI_CA_VERSION 0x20170831 +#define ACPI_CA_VERSION 0x20171215 #include <acpi/acconfig.h> #include <acpi/actypes.h> @@ -260,11 +260,11 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_osi_data, 0); ACPI_INIT_GLOBAL(u8, acpi_gbl_reduced_hardware, FALSE); /* - * Maximum number of While() loop iterations before forced method abort. + * Maximum timeout for While() loop iterations before forced method abort. * This mechanism is intended to prevent infinite loops during interpreter * execution within a host kernel. */ -ACPI_INIT_GLOBAL(u32, acpi_gbl_max_loop_iterations, ACPI_MAX_LOOP_COUNT); +ACPI_INIT_GLOBAL(u32, acpi_gbl_max_loop_iterations, ACPI_MAX_LOOP_TIMEOUT); /* * This mechanism is used to trace a specified AML method. The method is diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h index 7a89e6de94da..4c304bf4d591 100644 --- a/include/acpi/actbl1.h +++ b/include/acpi/actbl1.h @@ -69,9 +69,10 @@ #define ACPI_SIG_HEST "HEST" /* Hardware Error Source Table */ #define ACPI_SIG_MADT "APIC" /* Multiple APIC Description Table */ #define ACPI_SIG_MSCT "MSCT" /* Maximum System Characteristics Table */ -#define ACPI_SIG_PDTT "PDTT" /* Processor Debug Trigger Table */ +#define ACPI_SIG_PDTT "PDTT" /* Platform Debug Trigger Table */ #define ACPI_SIG_PPTT "PPTT" /* Processor Properties Topology Table */ #define ACPI_SIG_SBST "SBST" /* Smart Battery Specification Table */ +#define ACPI_SIG_SDEV "SDEV" /* Secure Devices table */ #define ACPI_SIG_SLIT "SLIT" /* System Locality Distance Information Table */ #define ACPI_SIG_SRAT "SRAT" /* System Resource Affinity Table */ #define ACPI_SIG_NFIT "NFIT" /* NVDIMM Firmware Interface Table */ @@ -1149,7 +1150,8 @@ enum acpi_nfit_type { ACPI_NFIT_TYPE_CONTROL_REGION = 4, ACPI_NFIT_TYPE_DATA_REGION = 5, ACPI_NFIT_TYPE_FLUSH_ADDRESS = 6, - ACPI_NFIT_TYPE_RESERVED = 7 /* 7 and greater are reserved */ + ACPI_NFIT_TYPE_CAPABILITIES = 7, + ACPI_NFIT_TYPE_RESERVED = 8 /* 8 and greater are reserved */ }; /* @@ -1162,7 +1164,7 @@ struct acpi_nfit_system_address { struct acpi_nfit_header header; u16 range_index; u16 flags; - u32 reserved; /* Reseved, must be zero */ + u32 reserved; /* Reserved, must be zero */ u32 proximity_domain; u8 range_guid[16]; u64 address; @@ -1281,9 +1283,72 @@ struct acpi_nfit_flush_address { u64 hint_address[1]; /* Variable length */ }; +/* 7: Platform Capabilities Structure */ + +struct acpi_nfit_capabilities { + struct acpi_nfit_header header; + u8 highest_capability; + u8 reserved[3]; /* Reserved, must be zero */ + u32 capabilities; + u32 reserved2; +}; + +/* Capabilities Flags */ + +#define ACPI_NFIT_CAPABILITY_CACHE_FLUSH (1) /* 00: Cache Flush to NVDIMM capable */ +#define ACPI_NFIT_CAPABILITY_MEM_FLUSH (1<<1) /* 01: Memory Flush to NVDIMM capable */ +#define ACPI_NFIT_CAPABILITY_MEM_MIRRORING (1<<2) /* 02: Memory Mirroring capable */ + +/* + * NFIT/DVDIMM device handle support - used as the _ADR for each NVDIMM + */ +struct nfit_device_handle { + u32 handle; +}; + +/* Device handle construction and extraction macros */ + +#define ACPI_NFIT_DIMM_NUMBER_MASK 0x0000000F +#define ACPI_NFIT_CHANNEL_NUMBER_MASK 0x000000F0 +#define ACPI_NFIT_MEMORY_ID_MASK 0x00000F00 +#define ACPI_NFIT_SOCKET_ID_MASK 0x0000F000 +#define ACPI_NFIT_NODE_ID_MASK 0x0FFF0000 + +#define ACPI_NFIT_DIMM_NUMBER_OFFSET 0 +#define ACPI_NFIT_CHANNEL_NUMBER_OFFSET 4 +#define ACPI_NFIT_MEMORY_ID_OFFSET 8 +#define ACPI_NFIT_SOCKET_ID_OFFSET 12 +#define ACPI_NFIT_NODE_ID_OFFSET 16 + +/* Macro to construct a NFIT/NVDIMM device handle */ + +#define ACPI_NFIT_BUILD_DEVICE_HANDLE(dimm, channel, memory, socket, node) \ + ((dimm) | \ + ((channel) << ACPI_NFIT_CHANNEL_NUMBER_OFFSET) | \ + ((memory) << ACPI_NFIT_MEMORY_ID_OFFSET) | \ + ((socket) << ACPI_NFIT_SOCKET_ID_OFFSET) | \ + ((node) << ACPI_NFIT_NODE_ID_OFFSET)) + +/* Macros to extract individual fields from a NFIT/NVDIMM device handle */ + +#define ACPI_NFIT_GET_DIMM_NUMBER(handle) \ + ((handle) & ACPI_NFIT_DIMM_NUMBER_MASK) + +#define ACPI_NFIT_GET_CHANNEL_NUMBER(handle) \ + (((handle) & ACPI_NFIT_CHANNEL_NUMBER_MASK) >> ACPI_NFIT_CHANNEL_NUMBER_OFFSET) + +#define ACPI_NFIT_GET_MEMORY_ID(handle) \ + (((handle) & ACPI_NFIT_MEMORY_ID_MASK) >> ACPI_NFIT_MEMORY_ID_OFFSET) + +#define ACPI_NFIT_GET_SOCKET_ID(handle) \ + (((handle) & ACPI_NFIT_SOCKET_ID_MASK) >> ACPI_NFIT_SOCKET_ID_OFFSET) + +#define ACPI_NFIT_GET_NODE_ID(handle) \ + (((handle) & ACPI_NFIT_NODE_ID_MASK) >> ACPI_NFIT_NODE_ID_OFFSET) + /******************************************************************************* * - * PDTT - Processor Debug Trigger Table (ACPI 6.2) + * PDTT - Platform Debug Trigger Table (ACPI 6.2) * Version 0 * ******************************************************************************/ @@ -1301,14 +1366,14 @@ struct acpi_table_pdtt { * starting at array_offset. */ struct acpi_pdtt_channel { - u16 sub_channel_id; + u8 subchannel_id; + u8 flags; }; -/* Mask and Flags for above */ +/* Flags for above */ -#define ACPI_PDTT_SUBCHANNEL_ID_MASK 0x00FF -#define ACPI_PDTT_RUNTIME_TRIGGER (1<<8) -#define ACPI_PPTT_WAIT_COMPLETION (1<<9) +#define ACPI_PDTT_RUNTIME_TRIGGER (1) +#define ACPI_PDTT_WAIT_COMPLETION (1<<1) /******************************************************************************* * @@ -1376,6 +1441,20 @@ struct acpi_pptt_cache { #define ACPI_PPTT_MASK_CACHE_TYPE (0x0C) /* Cache type */ #define ACPI_PPTT_MASK_WRITE_POLICY (0x10) /* Write policy */ +/* Attributes describing cache */ +#define ACPI_PPTT_CACHE_READ_ALLOCATE (0x0) /* Cache line is allocated on read */ +#define ACPI_PPTT_CACHE_WRITE_ALLOCATE (0x01) /* Cache line is allocated on write */ +#define ACPI_PPTT_CACHE_RW_ALLOCATE (0x02) /* Cache line is allocated on read and write */ +#define ACPI_PPTT_CACHE_RW_ALLOCATE_ALT (0x03) /* Alternate representation of above */ + +#define ACPI_PPTT_CACHE_TYPE_DATA (0x0) /* Data cache */ +#define ACPI_PPTT_CACHE_TYPE_INSTR (1<<2) /* Instruction cache */ +#define ACPI_PPTT_CACHE_TYPE_UNIFIED (2<<2) /* Unified I & D cache */ +#define ACPI_PPTT_CACHE_TYPE_UNIFIED_ALT (3<<2) /* Alternate representation of above */ + +#define ACPI_PPTT_CACHE_POLICY_WB (0x0) /* Cache is write back */ +#define ACPI_PPTT_CACHE_POLICY_WT (1<<4) /* Cache is write through */ + /* 2: ID Structure */ struct acpi_pptt_id { @@ -1405,6 +1484,68 @@ struct acpi_table_sbst { /******************************************************************************* * + * SDEV - Secure Devices Table (ACPI 6.2) + * Version 1 + * + ******************************************************************************/ + +struct acpi_table_sdev { + struct acpi_table_header header; /* Common ACPI table header */ +}; + +struct acpi_sdev_header { + u8 type; + u8 flags; + u16 length; +}; + +/* Values for subtable type above */ + +enum acpi_sdev_type { + ACPI_SDEV_TYPE_NAMESPACE_DEVICE = 0, + ACPI_SDEV_TYPE_PCIE_ENDPOINT_DEVICE = 1, + ACPI_SDEV_TYPE_RESERVED = 2 /* 2 and greater are reserved */ +}; + +/* Values for flags above */ + +#define ACPI_SDEV_HANDOFF_TO_UNSECURE_OS (1) + +/* + * SDEV subtables + */ + +/* 0: Namespace Device Based Secure Device Structure */ + +struct acpi_sdev_namespace { + struct acpi_sdev_header header; + u16 device_id_offset; + u16 device_id_length; + u16 vendor_data_offset; + u16 vendor_data_length; +}; + +/* 1: PCIe Endpoint Device Based Device Structure */ + +struct acpi_sdev_pcie { + struct acpi_sdev_header header; + u16 segment; + u16 start_bus; + u16 path_offset; + u16 path_length; + u16 vendor_data_offset; + u16 vendor_data_length; +}; + +/* 1a: PCIe Endpoint path entry */ + +struct acpi_sdev_pcie_path { + u8 device; + u8 function; +}; + +/******************************************************************************* + * * SLIT - System Locality Distance Information Table * Version 1 * diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h index 686b6f8c09dc..0d60d5df14f8 100644 --- a/include/acpi/actbl2.h +++ b/include/acpi/actbl2.h @@ -810,6 +810,7 @@ struct acpi_iort_smmu_v3 { u8 pxm; u8 reserved1; u16 reserved2; + u32 id_mapping_index; }; /* Values for Model field above */ @@ -1246,6 +1247,8 @@ enum acpi_spmi_interface_types { * TCPA - Trusted Computing Platform Alliance table * Version 2 * + * TCG Hardware Interface Table for TPM 1.2 Clients and Servers + * * Conforms to "TCG ACPI Specification, Family 1.2 and 2.0", * Version 1.2, Revision 8 * February 27, 2017 @@ -1310,6 +1313,8 @@ struct acpi_table_tcpa_server { * TPM2 - Trusted Platform Module (TPM) 2.0 Hardware Interface Table * Version 4 * + * TCG Hardware Interface Table for TPM 2.0 Clients and Servers + * * Conforms to "TCG ACPI Specification, Family 1.2 and 2.0", * Version 1.2, Revision 8 * February 27, 2017 @@ -1329,15 +1334,23 @@ struct acpi_table_tpm2 { /* Values for start_method above */ #define ACPI_TPM2_NOT_ALLOWED 0 +#define ACPI_TPM2_RESERVED1 1 #define ACPI_TPM2_START_METHOD 2 +#define ACPI_TPM2_RESERVED3 3 +#define ACPI_TPM2_RESERVED4 4 +#define ACPI_TPM2_RESERVED5 5 #define ACPI_TPM2_MEMORY_MAPPED 6 #define ACPI_TPM2_COMMAND_BUFFER 7 #define ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD 8 +#define ACPI_TPM2_RESERVED9 9 +#define ACPI_TPM2_RESERVED10 10 #define ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC 11 /* V1.2 Rev 8 */ +#define ACPI_TPM2_RESERVED 12 -/* Trailer appears after any start_method subtables */ +/* Optional trailer appears after any start_method subtables */ struct acpi_tpm2_trailer { + u8 method_parameters[12]; u32 minimum_log_length; /* Minimum length for the event log area */ u64 log_address; /* Address of the event log area */ }; diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h index 4f077edb9b81..31f1be74dd16 100644 --- a/include/acpi/actypes.h +++ b/include/acpi/actypes.h @@ -468,6 +468,8 @@ typedef void *acpi_handle; /* Actually a ptr to a NS Node */ #define ACPI_NSEC_PER_MSEC 1000000L #define ACPI_NSEC_PER_SEC 1000000000L +#define ACPI_TIME_AFTER(a, b) ((s64)((b) - (a)) < 0) + /* Owner IDs are used to track namespace nodes for selective deletion */ typedef u8 acpi_owner_id; @@ -1299,6 +1301,8 @@ typedef enum { #define ACPI_OSI_WIN_7 0x0B #define ACPI_OSI_WIN_8 0x0C #define ACPI_OSI_WIN_10 0x0D +#define ACPI_OSI_WIN_10_RS1 0x0E +#define ACPI_OSI_WIN_10_RS2 0x0F /* Definitions of getopt */ diff --git a/include/asm-generic/audit_dir_write.h b/include/asm-generic/audit_dir_write.h index da09fb986459..dd5a9dd7a102 100644 --- a/include/asm-generic/audit_dir_write.h +++ b/include/asm-generic/audit_dir_write.h @@ -27,7 +27,9 @@ __NR_mknod, __NR_mkdirat, __NR_mknodat, __NR_unlinkat, +#ifdef __NR_renameat __NR_renameat, +#endif __NR_linkat, __NR_symlinkat, #endif diff --git a/include/asm-generic/bitops/find.h b/include/asm-generic/bitops/find.h index 1ba611e16fa0..8a1ee10014de 100644 --- a/include/asm-generic/bitops/find.h +++ b/include/asm-generic/bitops/find.h @@ -16,6 +16,22 @@ extern unsigned long find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset); #endif +#ifndef find_next_and_bit +/** + * find_next_and_bit - find the next set bit in both memory regions + * @addr1: The first address to base the search on + * @addr2: The second address to base the search on + * @offset: The bitnumber to start searching at + * @size: The bitmap size in bits + * + * Returns the bit number for the next set bit + * If no bits are set, returns @size. + */ +extern unsigned long find_next_and_bit(const unsigned long *addr1, + const unsigned long *addr2, unsigned long size, + unsigned long offset); +#endif + #ifndef find_next_zero_bit /** * find_next_zero_bit - find the next cleared bit in a memory region @@ -55,8 +71,12 @@ extern unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size); #else /* CONFIG_GENERIC_FIND_FIRST_BIT */ +#ifndef find_first_bit #define find_first_bit(addr, size) find_next_bit((addr), (size), 0) +#endif +#ifndef find_first_zero_bit #define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0) +#endif #endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ diff --git a/include/asm-generic/clkdev.h b/include/asm-generic/clkdev.h deleted file mode 100644 index 4ff334749ed5..000000000000 --- a/include/asm-generic/clkdev.h +++ /dev/null @@ -1,30 +0,0 @@ -/* - * include/asm-generic/clkdev.h - * - * Based on the ARM clkdev.h: - * Copyright (C) 2008 Russell King. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * Helper for the clk API to assist looking up a struct clk. - */ -#ifndef __ASM_CLKDEV_H -#define __ASM_CLKDEV_H - -#include <linux/slab.h> - -#ifndef CONFIG_COMMON_CLK -struct clk; - -static inline int __clk_get(struct clk *clk) { return 1; } -static inline void __clk_put(struct clk *clk) { } -#endif - -static inline struct clk_lookup_alloc *__clkdev_alloc(size_t size) -{ - return kzalloc(size, GFP_KERNEL); -} - -#endif diff --git a/include/asm-generic/dma-mapping.h b/include/asm-generic/dma-mapping.h new file mode 100644 index 000000000000..880a292d792f --- /dev/null +++ b/include/asm-generic/dma-mapping.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_DMA_MAPPING_H +#define _ASM_GENERIC_DMA_MAPPING_H + +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) +{ + return &dma_direct_ops; +} + +#endif /* _ASM_GENERIC_DMA_MAPPING_H */ diff --git a/include/asm-generic/error-injection.h b/include/asm-generic/error-injection.h new file mode 100644 index 000000000000..296c65442f00 --- /dev/null +++ b/include/asm-generic/error-injection.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_ERROR_INJECTION_H +#define _ASM_GENERIC_ERROR_INJECTION_H + +#if defined(__KERNEL__) && !defined(__ASSEMBLY__) +enum { + EI_ETYPE_NONE, /* Dummy value for undefined case */ + EI_ETYPE_NULL, /* Return NULL if failure */ + EI_ETYPE_ERRNO, /* Return -ERRNO if failure */ + EI_ETYPE_ERRNO_NULL, /* Return -ERRNO or NULL if failure */ +}; + +struct error_injection_entry { + unsigned long addr; + int etype; +}; + +#ifdef CONFIG_FUNCTION_ERROR_INJECTION +/* + * Whitelist ganerating macro. Specify functions which can be + * error-injectable using this macro. + */ +#define ALLOW_ERROR_INJECTION(fname, _etype) \ +static struct error_injection_entry __used \ + __attribute__((__section__("_error_injection_whitelist"))) \ + _eil_addr_##fname = { \ + .addr = (unsigned long)fname, \ + .etype = EI_ETYPE_##_etype, \ + }; +#else +#define ALLOW_ERROR_INJECTION(fname, _etype) +#endif +#endif + +#endif /* _ASM_GENERIC_ERROR_INJECTION_H */ diff --git a/include/asm-generic/pci_iomap.h b/include/asm-generic/pci_iomap.h index b1e17fcee2d0..854f96ad5ccb 100644 --- a/include/asm-generic/pci_iomap.h +++ b/include/asm-generic/pci_iomap.h @@ -1,12 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* Generic I/O port emulation, based on MN10300 code * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public Licence - * as published by the Free Software Foundation; either version - * 2 of the Licence, or (at your option) any later version. */ #ifndef __ASM_GENERIC_PCI_IOMAP_H #define __ASM_GENERIC_PCI_IOMAP_H diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 868e68561f91..2cfa3075d148 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -309,19 +309,26 @@ extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); #endif -#ifndef __HAVE_ARCH_PMDP_INVALIDATE -extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, - pmd_t *pmdp); -#endif - -#ifndef __HAVE_ARCH_PMDP_HUGE_SPLIT_PREPARE -static inline void pmdp_huge_split_prepare(struct vm_area_struct *vma, - unsigned long address, pmd_t *pmdp) +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +/* + * This is an implementation of pmdp_establish() that is only suitable for an + * architecture that doesn't have hardware dirty/accessed bits. In this case we + * can't race with CPU which sets these bits and non-atomic aproach is fine. + */ +static inline pmd_t generic_pmdp_establish(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp, pmd_t pmd) { - + pmd_t old_pmd = *pmdp; + set_pmd_at(vma->vm_mm, address, pmdp, pmd); + return old_pmd; } #endif +#ifndef __HAVE_ARCH_PMDP_INVALIDATE +extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, + pmd_t *pmdp); +#endif + #ifndef __HAVE_ARCH_PTE_SAME static inline int pte_same(pte_t pte_a, pte_t pte_b) { diff --git a/include/asm-generic/qrwlock_types.h b/include/asm-generic/qrwlock_types.h index 137ecdd16daa..c36f1d5a2572 100644 --- a/include/asm-generic/qrwlock_types.h +++ b/include/asm-generic/qrwlock_types.h @@ -3,6 +3,7 @@ #define __ASM_GENERIC_QRWLOCK_TYPES_H #include <linux/types.h> +#include <asm/byteorder.h> #include <asm/spinlock_types.h> /* diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h index 03cc5f9bba71..849cd8eb5ca0 100644 --- a/include/asm-generic/sections.h +++ b/include/asm-generic/sections.h @@ -30,6 +30,7 @@ * __ctors_start, __ctors_end * __irqentry_text_start, __irqentry_text_end * __softirqentry_text_start, __softirqentry_text_end + * __start_opd, __end_opd */ extern char _text[], _stext[], _etext[]; extern char _data[], _sdata[], _edata[]; @@ -49,12 +50,15 @@ extern char __start_once[], __end_once[]; /* Start and end of .ctors section - used for constructor calls. */ extern char __ctors_start[], __ctors_end[]; +/* Start and end of .opd section - used for function descriptors. */ +extern char __start_opd[], __end_opd[]; + extern __visible const void __nosave_begin, __nosave_end; -/* function descriptor handling (if any). Override - * in asm/sections.h */ +/* Function descriptor handling (if any). Override in asm/sections.h */ #ifndef dereference_function_descriptor #define dereference_function_descriptor(p) (p) +#define dereference_kernel_function_descriptor(p) (p) #endif /* random extra sections (if any). Override diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index ee8b707d9fa9..1ab0e520d6fc 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -136,6 +136,15 @@ #define KPROBE_BLACKLIST() #endif +#ifdef CONFIG_FUNCTION_ERROR_INJECTION +#define ERROR_INJECT_WHITELIST() STRUCT_ALIGN(); \ + VMLINUX_SYMBOL(__start_error_injection_whitelist) = .;\ + KEEP(*(_error_injection_whitelist)) \ + VMLINUX_SYMBOL(__stop_error_injection_whitelist) = .; +#else +#define ERROR_INJECT_WHITELIST() +#endif + #ifdef CONFIG_EVENT_TRACING #define FTRACE_EVENTS() . = ALIGN(8); \ VMLINUX_SYMBOL(__start_ftrace_events) = .; \ @@ -268,7 +277,11 @@ #define INIT_TASK_DATA(align) \ . = ALIGN(align); \ VMLINUX_SYMBOL(__start_init_task) = .; \ + VMLINUX_SYMBOL(init_thread_union) = .; \ + VMLINUX_SYMBOL(init_stack) = .; \ *(.data..init_task) \ + *(.data..init_thread_info) \ + . = VMLINUX_SYMBOL(__start_init_task) + THREAD_SIZE; \ VMLINUX_SYMBOL(__end_init_task) = .; /* @@ -564,6 +577,7 @@ FTRACE_EVENTS() \ TRACE_SYSCALLS() \ KPROBE_BLACKLIST() \ + ERROR_INJECT_WHITELIST() \ MEM_DISCARD(init.rodata) \ CLK_OF_TABLES() \ RESERVEDMEM_OF_TABLES() \ diff --git a/include/crypto/aead.h b/include/crypto/aead.h index 03b97629442c..1e26f790b03f 100644 --- a/include/crypto/aead.h +++ b/include/crypto/aead.h @@ -327,7 +327,12 @@ static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req) */ static inline int crypto_aead_encrypt(struct aead_request *req) { - return crypto_aead_alg(crypto_aead_reqtfm(req))->encrypt(req); + struct crypto_aead *aead = crypto_aead_reqtfm(req); + + if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + + return crypto_aead_alg(aead)->encrypt(req); } /** @@ -356,6 +361,9 @@ static inline int crypto_aead_decrypt(struct aead_request *req) { struct crypto_aead *aead = crypto_aead_reqtfm(req); + if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + if (req->cryptlen < crypto_aead_authsize(aead)) return -EINVAL; diff --git a/include/crypto/chacha20.h b/include/crypto/chacha20.h index caaa470389e0..b83d66073db0 100644 --- a/include/crypto/chacha20.h +++ b/include/crypto/chacha20.h @@ -13,12 +13,13 @@ #define CHACHA20_IV_SIZE 16 #define CHACHA20_KEY_SIZE 32 #define CHACHA20_BLOCK_SIZE 64 +#define CHACHA20_BLOCK_WORDS (CHACHA20_BLOCK_SIZE / sizeof(u32)) struct chacha20_ctx { u32 key[8]; }; -void chacha20_block(u32 *state, void *stream); +void chacha20_block(u32 *state, u32 *stream); void crypto_chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv); int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keysize); diff --git a/include/crypto/hash.h b/include/crypto/hash.h index 0ed31fd80242..2d1849dffb80 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -71,12 +71,11 @@ struct ahash_request { /** * struct ahash_alg - asynchronous message digest definition - * @init: Initialize the transformation context. Intended only to initialize the + * @init: **[mandatory]** Initialize the transformation context. Intended only to initialize the * state of the HASH transformation at the beginning. This shall fill in * the internal structures used during the entire duration of the whole * transformation. No data processing happens at this point. - * Note: mandatory. - * @update: Push a chunk of data into the driver for transformation. This + * @update: **[mandatory]** Push a chunk of data into the driver for transformation. This * function actually pushes blocks of data from upper layers into the * driver, which then passes those to the hardware as seen fit. This * function must not finalize the HASH transformation by calculating the @@ -85,20 +84,17 @@ struct ahash_request { * context, as this function may be called in parallel with the same * transformation object. Data processing can happen synchronously * [SHASH] or asynchronously [AHASH] at this point. - * Note: mandatory. - * @final: Retrieve result from the driver. This function finalizes the + * @final: **[mandatory]** Retrieve result from the driver. This function finalizes the * transformation and retrieves the resulting hash from the driver and * pushes it back to upper layers. No data processing happens at this * point unless hardware requires it to finish the transformation * (then the data buffered by the device driver is processed). - * Note: mandatory. - * @finup: Combination of @update and @final. This function is effectively a + * @finup: **[optional]** Combination of @update and @final. This function is effectively a * combination of @update and @final calls issued in sequence. As some * hardware cannot do @update and @final separately, this callback was * added to allow such hardware to be used at least by IPsec. Data * processing can happen synchronously [SHASH] or asynchronously [AHASH] * at this point. - * Note: optional. * @digest: Combination of @init and @update and @final. This function * effectively behaves as the entire chain of operations, @init, * @update and @final issued in sequence. Just like @finup, this was @@ -210,7 +206,6 @@ struct crypto_ahash { unsigned int keylen); unsigned int reqsize; - bool has_setkey; struct crypto_tfm base; }; @@ -410,11 +405,6 @@ static inline void *ahash_request_ctx(struct ahash_request *req) int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen); -static inline bool crypto_ahash_has_setkey(struct crypto_ahash *tfm) -{ - return tfm->has_setkey; -} - /** * crypto_ahash_finup() - update and finalize message digest * @req: reference to the ahash_request handle that holds all information @@ -487,7 +477,12 @@ static inline int crypto_ahash_export(struct ahash_request *req, void *out) */ static inline int crypto_ahash_import(struct ahash_request *req, const void *in) { - return crypto_ahash_reqtfm(req)->import(req, in); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + + if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + + return tfm->import(req, in); } /** @@ -503,7 +498,12 @@ static inline int crypto_ahash_import(struct ahash_request *req, const void *in) */ static inline int crypto_ahash_init(struct ahash_request *req) { - return crypto_ahash_reqtfm(req)->init(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + + if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + + return tfm->init(req); } /** @@ -855,7 +855,12 @@ static inline int crypto_shash_export(struct shash_desc *desc, void *out) */ static inline int crypto_shash_import(struct shash_desc *desc, const void *in) { - return crypto_shash_alg(desc->tfm)->import(desc, in); + struct crypto_shash *tfm = desc->tfm; + + if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + + return crypto_shash_alg(tfm)->import(desc, in); } /** @@ -871,7 +876,12 @@ static inline int crypto_shash_import(struct shash_desc *desc, const void *in) */ static inline int crypto_shash_init(struct shash_desc *desc) { - return crypto_shash_alg(desc->tfm)->init(desc); + struct crypto_shash *tfm = desc->tfm; + + if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + + return crypto_shash_alg(tfm)->init(desc); } /** diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h index f38227a78eae..482461d8931d 100644 --- a/include/crypto/if_alg.h +++ b/include/crypto/if_alg.h @@ -245,7 +245,7 @@ ssize_t af_alg_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags); void af_alg_free_resources(struct af_alg_async_req *areq); void af_alg_async_cb(struct crypto_async_request *_req, int err); -unsigned int af_alg_poll(struct file *file, struct socket *sock, +__poll_t af_alg_poll(struct file *file, struct socket *sock, poll_table *wait); struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk, unsigned int areqlen); diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index c2bae8da642c..27040a46d50a 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h @@ -90,6 +90,8 @@ static inline bool crypto_shash_alg_has_setkey(struct shash_alg *alg) return alg->setkey != shash_no_setkey; } +bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg); + int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn, struct hash_alg_common *alg, struct crypto_instance *inst); diff --git a/include/crypto/internal/scompress.h b/include/crypto/internal/scompress.h index ccad9b2c9bd6..0f6ddac1acfc 100644 --- a/include/crypto/internal/scompress.h +++ b/include/crypto/internal/scompress.h @@ -28,17 +28,6 @@ struct crypto_scomp { * @free_ctx: Function frees context allocated with alloc_ctx * @compress: Function performs a compress operation * @decompress: Function performs a de-compress operation - * @init: Initialize the cryptographic transformation object. - * This function is used to initialize the cryptographic - * transformation object. This function is called only once at - * the instantiation time, right after the transformation context - * was allocated. In case the cryptographic hardware has some - * special requirements which need to be handled by software, this - * function shall check for the precise requirement of the - * transformation and put any software fallbacks in place. - * @exit: Deinitialize the cryptographic transformation object. This is a - * counterpart to @init, used to remove various changes set in - * @init. * @base: Common crypto API algorithm data structure */ struct scomp_alg { diff --git a/include/crypto/null.h b/include/crypto/null.h index 5757c0a4b321..15aeef6e30ef 100644 --- a/include/crypto/null.h +++ b/include/crypto/null.h @@ -12,14 +12,4 @@ struct crypto_skcipher *crypto_get_default_null_skcipher(void); void crypto_put_default_null_skcipher(void); -static inline struct crypto_skcipher *crypto_get_default_null_skcipher2(void) -{ - return crypto_get_default_null_skcipher(); -} - -static inline void crypto_put_default_null_skcipher2(void) -{ - crypto_put_default_null_skcipher(); -} - #endif diff --git a/include/crypto/poly1305.h b/include/crypto/poly1305.h index c65567d01e8e..f718a19da82f 100644 --- a/include/crypto/poly1305.h +++ b/include/crypto/poly1305.h @@ -31,8 +31,6 @@ struct poly1305_desc_ctx { }; int crypto_poly1305_init(struct shash_desc *desc); -int crypto_poly1305_setkey(struct crypto_shash *tfm, - const u8 *key, unsigned int keylen); unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx, const u8 *src, unsigned int srclen); int crypto_poly1305_update(struct shash_desc *desc, diff --git a/include/crypto/salsa20.h b/include/crypto/salsa20.h new file mode 100644 index 000000000000..19ed48aefc86 --- /dev/null +++ b/include/crypto/salsa20.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Common values for the Salsa20 algorithm + */ + +#ifndef _CRYPTO_SALSA20_H +#define _CRYPTO_SALSA20_H + +#include <linux/types.h> + +#define SALSA20_IV_SIZE 8 +#define SALSA20_MIN_KEY_SIZE 16 +#define SALSA20_MAX_KEY_SIZE 32 +#define SALSA20_BLOCK_SIZE 64 + +struct crypto_skcipher; + +struct salsa20_ctx { + u32 initial_state[16]; +}; + +void crypto_salsa20_init(u32 *state, const struct salsa20_ctx *ctx, + const u8 *iv); +int crypto_salsa20_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keysize); + +#endif /* _CRYPTO_SALSA20_H */ diff --git a/include/crypto/sha3.h b/include/crypto/sha3.h index b9d9bd553b48..080f60c2e6b1 100644 --- a/include/crypto/sha3.h +++ b/include/crypto/sha3.h @@ -19,7 +19,6 @@ struct sha3_state { u64 st[25]; - unsigned int md_len; unsigned int rsiz; unsigned int rsizw; @@ -27,4 +26,9 @@ struct sha3_state { u8 buf[SHA3_224_BLOCK_SIZE]; }; +int crypto_sha3_init(struct shash_desc *desc); +int crypto_sha3_update(struct shash_desc *desc, const u8 *data, + unsigned int len); +int crypto_sha3_final(struct shash_desc *desc, u8 *out); + #endif diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h index 562001cb412b..2f327f090c3e 100644 --- a/include/crypto/skcipher.h +++ b/include/crypto/skcipher.h @@ -401,11 +401,6 @@ static inline int crypto_skcipher_setkey(struct crypto_skcipher *tfm, return tfm->setkey(tfm, key, keylen); } -static inline bool crypto_skcipher_has_setkey(struct crypto_skcipher *tfm) -{ - return tfm->keysize; -} - static inline unsigned int crypto_skcipher_default_keysize( struct crypto_skcipher *tfm) { @@ -442,6 +437,9 @@ static inline int crypto_skcipher_encrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + return tfm->encrypt(req); } @@ -460,6 +458,9 @@ static inline int crypto_skcipher_decrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + return tfm->decrypt(req); } diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 59be1232d005..c6666cd09347 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -75,6 +75,7 @@ #include <drm/drm_sarea.h> #include <drm/drm_drv.h> #include <drm/drm_prime.h> +#include <drm/drm_print.h> #include <drm/drm_pci.h> #include <drm/drm_file.h> #include <drm/drm_debugfs.h> @@ -94,212 +95,16 @@ struct dma_buf_attachment; struct pci_dev; struct pci_controller; -/* - * The following categories are defined: - * - * CORE: Used in the generic drm code: drm_ioctl.c, drm_mm.c, drm_memory.c, ... - * This is the category used by the DRM_DEBUG() macro. - * - * DRIVER: Used in the vendor specific part of the driver: i915, radeon, ... - * This is the category used by the DRM_DEBUG_DRIVER() macro. - * - * KMS: used in the modesetting code. - * This is the category used by the DRM_DEBUG_KMS() macro. - * - * PRIME: used in the prime code. - * This is the category used by the DRM_DEBUG_PRIME() macro. - * - * ATOMIC: used in the atomic code. - * This is the category used by the DRM_DEBUG_ATOMIC() macro. - * - * VBL: used for verbose debug message in the vblank code - * This is the category used by the DRM_DEBUG_VBL() macro. - * - * Enabling verbose debug messages is done through the drm.debug parameter, - * each category being enabled by a bit. - * - * drm.debug=0x1 will enable CORE messages - * drm.debug=0x2 will enable DRIVER messages - * drm.debug=0x3 will enable CORE and DRIVER messages - * ... - * drm.debug=0x3f will enable all messages - * - * An interesting feature is that it's possible to enable verbose logging at - * run-time by echoing the debug value in its sysfs node: - * # echo 0xf > /sys/module/drm/parameters/debug - */ -#define DRM_UT_NONE 0x00 -#define DRM_UT_CORE 0x01 -#define DRM_UT_DRIVER 0x02 -#define DRM_UT_KMS 0x04 -#define DRM_UT_PRIME 0x08 -#define DRM_UT_ATOMIC 0x10 -#define DRM_UT_VBL 0x20 -#define DRM_UT_STATE 0x40 -#define DRM_UT_LEASE 0x80 - /***********************************************************************/ /** \name DRM template customization defaults */ /*@{*/ /***********************************************************************/ -/** \name Macros to make printk easier */ -/*@{*/ - -#define _DRM_PRINTK(once, level, fmt, ...) \ - do { \ - printk##once(KERN_##level "[" DRM_NAME "] " fmt, \ - ##__VA_ARGS__); \ - } while (0) - -#define DRM_INFO(fmt, ...) \ - _DRM_PRINTK(, INFO, fmt, ##__VA_ARGS__) -#define DRM_NOTE(fmt, ...) \ - _DRM_PRINTK(, NOTICE, fmt, ##__VA_ARGS__) -#define DRM_WARN(fmt, ...) \ - _DRM_PRINTK(, WARNING, fmt, ##__VA_ARGS__) - -#define DRM_INFO_ONCE(fmt, ...) \ - _DRM_PRINTK(_once, INFO, fmt, ##__VA_ARGS__) -#define DRM_NOTE_ONCE(fmt, ...) \ - _DRM_PRINTK(_once, NOTICE, fmt, ##__VA_ARGS__) -#define DRM_WARN_ONCE(fmt, ...) \ - _DRM_PRINTK(_once, WARNING, fmt, ##__VA_ARGS__) - -/** - * Error output. - * - * \param fmt printf() like format string. - * \param arg arguments - */ -#define DRM_DEV_ERROR(dev, fmt, ...) \ - drm_dev_printk(dev, KERN_ERR, DRM_UT_NONE, __func__, " *ERROR*",\ - fmt, ##__VA_ARGS__) -#define DRM_ERROR(fmt, ...) \ - drm_printk(KERN_ERR, DRM_UT_NONE, fmt, ##__VA_ARGS__) - -/** - * Rate limited error output. Like DRM_ERROR() but won't flood the log. - * - * \param fmt printf() like format string. - * \param arg arguments - */ -#define DRM_DEV_ERROR_RATELIMITED(dev, fmt, ...) \ -({ \ - static DEFINE_RATELIMIT_STATE(_rs, \ - DEFAULT_RATELIMIT_INTERVAL, \ - DEFAULT_RATELIMIT_BURST); \ - \ - if (__ratelimit(&_rs)) \ - DRM_DEV_ERROR(dev, fmt, ##__VA_ARGS__); \ -}) -#define DRM_ERROR_RATELIMITED(fmt, ...) \ - DRM_DEV_ERROR_RATELIMITED(NULL, fmt, ##__VA_ARGS__) - -#define DRM_DEV_INFO(dev, fmt, ...) \ - drm_dev_printk(dev, KERN_INFO, DRM_UT_NONE, __func__, "", fmt, \ - ##__VA_ARGS__) - -#define DRM_DEV_INFO_ONCE(dev, fmt, ...) \ -({ \ - static bool __print_once __read_mostly; \ - if (!__print_once) { \ - __print_once = true; \ - DRM_DEV_INFO(dev, fmt, ##__VA_ARGS__); \ - } \ -}) - -/** - * Debug output. - * - * \param fmt printf() like format string. - * \param arg arguments - */ -#define DRM_DEV_DEBUG(dev, fmt, args...) \ - drm_dev_printk(dev, KERN_DEBUG, DRM_UT_CORE, __func__, "", fmt, \ - ##args) -#define DRM_DEBUG(fmt, ...) \ - drm_printk(KERN_DEBUG, DRM_UT_CORE, fmt, ##__VA_ARGS__) - -#define DRM_DEV_DEBUG_DRIVER(dev, fmt, args...) \ - drm_dev_printk(dev, KERN_DEBUG, DRM_UT_DRIVER, __func__, "", \ - fmt, ##args) -#define DRM_DEBUG_DRIVER(fmt, ...) \ - drm_printk(KERN_DEBUG, DRM_UT_DRIVER, fmt, ##__VA_ARGS__) - -#define DRM_DEV_DEBUG_KMS(dev, fmt, args...) \ - drm_dev_printk(dev, KERN_DEBUG, DRM_UT_KMS, __func__, "", fmt, \ - ##args) -#define DRM_DEBUG_KMS(fmt, ...) \ - drm_printk(KERN_DEBUG, DRM_UT_KMS, fmt, ##__VA_ARGS__) - -#define DRM_DEV_DEBUG_PRIME(dev, fmt, args...) \ - drm_dev_printk(dev, KERN_DEBUG, DRM_UT_PRIME, __func__, "", \ - fmt, ##args) -#define DRM_DEBUG_PRIME(fmt, ...) \ - drm_printk(KERN_DEBUG, DRM_UT_PRIME, fmt, ##__VA_ARGS__) - -#define DRM_DEV_DEBUG_ATOMIC(dev, fmt, args...) \ - drm_dev_printk(dev, KERN_DEBUG, DRM_UT_ATOMIC, __func__, "", \ - fmt, ##args) -#define DRM_DEBUG_ATOMIC(fmt, ...) \ - drm_printk(KERN_DEBUG, DRM_UT_ATOMIC, fmt, ##__VA_ARGS__) - -#define DRM_DEV_DEBUG_VBL(dev, fmt, args...) \ - drm_dev_printk(dev, KERN_DEBUG, DRM_UT_VBL, __func__, "", fmt, \ - ##args) -#define DRM_DEBUG_VBL(fmt, ...) \ - drm_printk(KERN_DEBUG, DRM_UT_VBL, fmt, ##__VA_ARGS__) - -#define DRM_DEBUG_LEASE(fmt, ...) \ - drm_printk(KERN_DEBUG, DRM_UT_LEASE, fmt, ##__VA_ARGS__) - -#define _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, level, fmt, args...) \ -({ \ - static DEFINE_RATELIMIT_STATE(_rs, \ - DEFAULT_RATELIMIT_INTERVAL, \ - DEFAULT_RATELIMIT_BURST); \ - if (__ratelimit(&_rs)) \ - drm_dev_printk(dev, KERN_DEBUG, DRM_UT_ ## level, \ - __func__, "", fmt, ##args); \ -}) - -/** - * Rate limited debug output. Like DRM_DEBUG() but won't flood the log. - * - * \param fmt printf() like format string. - * \param arg arguments - */ -#define DRM_DEV_DEBUG_RATELIMITED(dev, fmt, args...) \ - DEV__DRM_DEFINE_DEBUG_RATELIMITED(dev, CORE, fmt, ##args) -#define DRM_DEBUG_RATELIMITED(fmt, args...) \ - DRM_DEV_DEBUG_RATELIMITED(NULL, fmt, ##args) -#define DRM_DEV_DEBUG_DRIVER_RATELIMITED(dev, fmt, args...) \ - _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, DRIVER, fmt, ##args) -#define DRM_DEBUG_DRIVER_RATELIMITED(fmt, args...) \ - DRM_DEV_DEBUG_DRIVER_RATELIMITED(NULL, fmt, ##args) -#define DRM_DEV_DEBUG_KMS_RATELIMITED(dev, fmt, args...) \ - _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, KMS, fmt, ##args) -#define DRM_DEBUG_KMS_RATELIMITED(fmt, args...) \ - DRM_DEV_DEBUG_KMS_RATELIMITED(NULL, fmt, ##args) -#define DRM_DEV_DEBUG_PRIME_RATELIMITED(dev, fmt, args...) \ - _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, PRIME, fmt, ##args) -#define DRM_DEBUG_PRIME_RATELIMITED(fmt, args...) \ - DRM_DEV_DEBUG_PRIME_RATELIMITED(NULL, fmt, ##args) - -/* Format strings and argument splitters to simplify printing - * various "complex" objects - */ - -/*@}*/ - -/***********************************************************************/ /** \name Internal types and structures */ /*@{*/ #define DRM_IF_VERSION(maj, min) (maj << 16 | min) - /** * drm_drv_uses_atomic_modeset - check if the driver implements * atomic_commit() diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h index 5afd6e364fb6..1c27526c499e 100644 --- a/include/drm/drm_atomic.h +++ b/include/drm/drm_atomic.h @@ -189,12 +189,40 @@ struct drm_private_state_funcs { struct drm_private_state *state); }; +/** + * struct drm_private_obj - base struct for driver private atomic object + * + * A driver private object is initialized by calling + * drm_atomic_private_obj_init() and cleaned up by calling + * drm_atomic_private_obj_fini(). + * + * Currently only tracks the state update functions and the opaque driver + * private state itself, but in the future might also track which + * &drm_modeset_lock is required to duplicate and update this object's state. + */ struct drm_private_obj { + /** + * @state: Current atomic state for this driver private object. + */ struct drm_private_state *state; + /** + * @funcs: + * + * Functions to manipulate the state of this driver private object, see + * &drm_private_state_funcs. + */ const struct drm_private_state_funcs *funcs; }; +/** + * struct drm_private_state - base struct for driver private object state + * @state: backpointer to global drm_atomic_state + * + * Currently only contains a backpointer to the overall atomic update, but in + * the future also might hold synchronization information similar to e.g. + * &drm_crtc.commit. + */ struct drm_private_state { struct drm_atomic_state *state; }; @@ -218,6 +246,10 @@ struct __drm_private_objs_state { * @num_private_objs: size of the @private_objs array * @private_objs: pointer to array of private object pointers * @acquire_ctx: acquire context for this atomic modeset state update + * + * States are added to an atomic update by calling drm_atomic_get_crtc_state(), + * drm_atomic_get_plane_state(), drm_atomic_get_connector_state(), or for + * private state structures, drm_atomic_get_private_obj_state(). */ struct drm_atomic_state { struct kref ref; diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h index d2b56cc657e9..4842ee9485ce 100644 --- a/include/drm/drm_atomic_helper.h +++ b/include/drm/drm_atomic_helper.h @@ -38,6 +38,13 @@ struct drm_private_state; int drm_atomic_helper_check_modeset(struct drm_device *dev, struct drm_atomic_state *state); +int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state, + const struct drm_crtc_state *crtc_state, + const struct drm_rect *clip, + int min_scale, + int max_scale, + bool can_position, + bool can_update_disabled); int drm_atomic_helper_check_planes(struct drm_device *dev, struct drm_atomic_state *state); int drm_atomic_helper_check(struct drm_device *dev, diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h index 5971577016a2..ed38df4ac204 100644 --- a/include/drm/drm_connector.h +++ b/include/drm/drm_connector.h @@ -177,6 +177,35 @@ enum drm_link_status { }; /** + * enum drm_panel_orientation - panel_orientation info for &drm_display_info + * + * This enum is used to track the (LCD) panel orientation. There are no + * separate #defines for the uapi! + * + * @DRM_MODE_PANEL_ORIENTATION_UNKNOWN: The drm driver has not provided any + * panel orientation information (normal + * for non panels) in this case the "panel + * orientation" connector prop will not be + * attached. + * @DRM_MODE_PANEL_ORIENTATION_NORMAL: The top side of the panel matches the + * top side of the device's casing. + * @DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP: The top side of the panel matches the + * bottom side of the device's casing, iow + * the panel is mounted upside-down. + * @DRM_MODE_PANEL_ORIENTATION_LEFT_UP: The left side of the panel matches the + * top side of the device's casing. + * @DRM_MODE_PANEL_ORIENTATION_RIGHT_UP: The right side of the panel matches the + * top side of the device's casing. + */ +enum drm_panel_orientation { + DRM_MODE_PANEL_ORIENTATION_UNKNOWN = -1, + DRM_MODE_PANEL_ORIENTATION_NORMAL = 0, + DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP, + DRM_MODE_PANEL_ORIENTATION_LEFT_UP, + DRM_MODE_PANEL_ORIENTATION_RIGHT_UP, +}; + +/** * struct drm_display_info - runtime data about the connected sink * * Describes a given display (e.g. CRT or flat panel) and its limitations. For @@ -224,6 +253,15 @@ struct drm_display_info { #define DRM_COLOR_FORMAT_YCRCB420 (1<<3) /** + * @panel_orientation: Read only connector property for built-in panels, + * indicating the orientation of the panel vs the device's casing. + * drm_connector_init() sets this to DRM_MODE_PANEL_ORIENTATION_UNKNOWN. + * When not UNKNOWN this gets used by the drm_fb_helpers to rotate the + * fb to compensate and gets exported as prop to userspace. + */ + int panel_orientation; + + /** * @color_formats: HDMI Color formats, selects between RGB and YCrCb * modes. Used DRM_COLOR_FORMAT\_ defines, which are _not_ the same ones * as used to describe the pixel format in framebuffers, and also don't @@ -271,6 +309,11 @@ struct drm_display_info { bool dvi_dual; /** + * @has_hdmi_infoframe: Does the sink support the HDMI infoframe? + */ + bool has_hdmi_infoframe; + + /** * @edid_hdmi_dc_modes: Mask of supported hdmi deep color modes. Even * more stuff redundant with @bus_formats. */ @@ -705,7 +748,6 @@ struct drm_cmdline_mode { * @force: a DRM_FORCE_<foo> state for forced mode sets * @override_edid: has the EDID been overwritten through debugfs for testing? * @encoder_ids: valid encoders for this connector - * @encoder: encoder driving this connector, if any * @eld: EDID-like data, if present * @latency_present: AV delay info from ELD, if found * @video_latency: video latency info from ELD, if found @@ -875,7 +917,13 @@ struct drm_connector { #define DRM_CONNECTOR_MAX_ENCODER 3 uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER]; - struct drm_encoder *encoder; /* currently active encoder */ + /** + * @encoder: Currently bound encoder driving this connector, if any. + * Only really meaningful for non-atomic drivers. Atomic drivers should + * instead look at &drm_connector_state.best_encoder, and in case they + * need the CRTC driving this output, &drm_connector_state.crtc. + */ + struct drm_encoder *encoder; #define MAX_ELD_BYTES 128 /* EDID bits */ @@ -1035,6 +1083,8 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector, const struct edid *edid); void drm_mode_connector_set_link_status_property(struct drm_connector *connector, uint64_t link_status); +int drm_connector_init_panel_orientation_property( + struct drm_connector *connector, int width, int height); /** * struct drm_tile_group - Tile group metadata diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h index e21af87a2f3c..7c4fa32f3fc6 100644 --- a/include/drm/drm_device.h +++ b/include/drm/drm_device.h @@ -17,6 +17,7 @@ struct drm_vblank_crtc; struct drm_sg_mem; struct drm_local_map; struct drm_vma_offset_manager; +struct drm_fb_helper; struct inode; @@ -185,6 +186,14 @@ struct drm_device { struct drm_vma_offset_manager *vma_offset_manager; /*@} */ int switch_power_state; + + /** + * @fb_helper: + * + * Pointer to the fbdev emulation structure. + * Set by drm_fb_helper_init() and cleared by drm_fb_helper_fini(). + */ + struct drm_fb_helper *fb_helper; }; #endif diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index 2623a1255481..da58a428c8d7 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h @@ -635,6 +635,7 @@ # define DP_SET_POWER_D0 0x1 # define DP_SET_POWER_D3 0x2 # define DP_SET_POWER_MASK 0x3 +# define DP_SET_POWER_D3_AUX_ON 0x5 #define DP_EDP_DPCD_REV 0x700 /* eDP 1.2 */ # define DP_EDP_11 0x00 diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h index 412e83a4d3db..d32b688eb346 100644 --- a/include/drm/drm_drv.h +++ b/include/drm/drm_drv.h @@ -39,6 +39,7 @@ struct drm_minor; struct dma_buf_attachment; struct drm_display_mode; struct drm_mode_create_dumb; +struct drm_printer; /* driver capabilities and requirements mask */ #define DRIVER_USE_AGP 0x1 @@ -429,6 +430,20 @@ struct drm_driver { void (*gem_close_object) (struct drm_gem_object *, struct drm_file *); /** + * @gem_print_info: + * + * If driver subclasses struct &drm_gem_object, it can implement this + * optional hook for printing additional driver specific info. + * + * drm_printf_indent() should be used in the callback passing it the + * indent argument. + * + * This callback is called from drm_gem_print_info(). + */ + void (*gem_print_info)(struct drm_printer *p, unsigned int indent, + const struct drm_gem_object *obj); + + /** * @gem_create_object: constructor for gem objects * * Hook for allocating the GEM object struct, for use by core @@ -592,13 +607,6 @@ struct drm_driver { int dev_priv_size; }; -__printf(6, 7) -void drm_dev_printk(const struct device *dev, const char *level, - unsigned int category, const char *function_name, - const char *prefix, const char *format, ...); -__printf(3, 4) -void drm_printk(const char *level, unsigned int category, - const char *format, ...); extern unsigned int drm_debug; int drm_dev_init(struct drm_device *dev, diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h index efe6d5a8e834..8d89a9c3748d 100644 --- a/include/drm/drm_edid.h +++ b/include/drm/drm_edid.h @@ -333,7 +333,6 @@ struct drm_encoder; struct drm_connector; struct drm_display_mode; -void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid); int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads); int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb); int drm_av_sync_delay(struct drm_connector *connector, @@ -357,6 +356,7 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame, bool is_hdmi2_sink); int drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame, + struct drm_connector *connector, const struct drm_display_mode *mode); void drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame, diff --git a/include/drm/drm_encoder.h b/include/drm/drm_encoder.h index ee4cfbe63c52..fb299696c7c4 100644 --- a/include/drm/drm_encoder.h +++ b/include/drm/drm_encoder.h @@ -88,7 +88,6 @@ struct drm_encoder_funcs { * @head: list management * @base: base KMS object * @name: human readable name, can be overwritten by the driver - * @crtc: currently bound CRTC * @bridge: bridge associated to the encoder * @funcs: control functions * @helper_private: mid-layer private data @@ -166,6 +165,11 @@ struct drm_encoder { */ uint32_t possible_clones; + /** + * @crtc: Currently bound CRTC, only really meaningful for non-atomic + * drivers. Atomic drivers should instead check + * &drm_connector_state.crtc. + */ struct drm_crtc *crtc; struct drm_bridge *bridge; const struct drm_encoder_funcs *funcs; diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h index faf56c53df28..d532f88a8d55 100644 --- a/include/drm/drm_fb_cma_helper.h +++ b/include/drm/drm_fb_cma_helper.h @@ -16,6 +16,13 @@ struct drm_mode_fb_cmd2; struct drm_plane; struct drm_plane_state; +int drm_fb_cma_fbdev_init_with_funcs(struct drm_device *dev, + unsigned int preferred_bpp, unsigned int max_conn_count, + const struct drm_framebuffer_funcs *funcs); +int drm_fb_cma_fbdev_init(struct drm_device *dev, unsigned int preferred_bpp, + unsigned int max_conn_count); +void drm_fb_cma_fbdev_fini(struct drm_device *dev); + struct drm_fbdev_cma *drm_fbdev_cma_init_with_funcs(struct drm_device *dev, unsigned int preferred_bpp, unsigned int max_conn_count, const struct drm_framebuffer_funcs *funcs); @@ -36,11 +43,5 @@ dma_addr_t drm_fb_cma_get_gem_addr(struct drm_framebuffer *fb, struct drm_plane_state *state, unsigned int plane); -#ifdef CONFIG_DEBUG_FS -struct seq_file; - -int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg); -#endif - #endif diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h index 33fe95927742..b069433e7fc1 100644 --- a/include/drm/drm_fb_helper.h +++ b/include/drm/drm_fb_helper.h @@ -33,6 +33,7 @@ struct drm_fb_helper; #include <drm/drm_crtc.h> +#include <drm/drm_device.h> #include <linux/kgdb.h> enum mode_set_atomic { @@ -48,6 +49,7 @@ struct drm_fb_helper_crtc { struct drm_mode_set mode_set; struct drm_display_mode *desired_mode; int x, y; + int rotation; }; /** @@ -159,6 +161,13 @@ struct drm_fb_helper { int connector_count; int connector_info_alloc_count; /** + * @sw_rotations: + * Bitmask of all rotations requested for panel-orientation which + * could not be handled in hardware. If only one bit is set + * fbdev->fbcon_rotate_hint gets set to the requested rotation. + */ + int sw_rotations; + /** * @connector_info: * * Array of per-connector information. Do not iterate directly, but use @@ -267,6 +276,7 @@ void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper); void drm_fb_helper_deferred_io(struct fb_info *info, struct list_head *pagelist); +int drm_fb_helper_defio_init(struct drm_fb_helper *fb_helper); ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf, size_t count, loff_t *ppos); @@ -310,6 +320,16 @@ drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn); int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_connector *connector); int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, struct drm_connector *connector); + +int drm_fb_helper_fbdev_setup(struct drm_device *dev, + struct drm_fb_helper *fb_helper, + const struct drm_fb_helper_funcs *funcs, + unsigned int preferred_bpp, + unsigned int max_conn_count); +void drm_fb_helper_fbdev_teardown(struct drm_device *dev); + +void drm_fb_helper_lastclose(struct drm_device *dev); +void drm_fb_helper_output_poll_changed(struct drm_device *dev); #else static inline void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper, @@ -321,11 +341,17 @@ static inline int drm_fb_helper_init(struct drm_device *dev, struct drm_fb_helper *helper, int max_conn) { + /* So drivers can use it to free the struct */ + helper->dev = dev; + dev->fb_helper = helper; + return 0; } static inline void drm_fb_helper_fini(struct drm_fb_helper *helper) { + if (helper && helper->dev) + helper->dev->fb_helper = NULL; } static inline int drm_fb_helper_blank(int blank, struct fb_info *info) @@ -398,6 +424,11 @@ static inline void drm_fb_helper_deferred_io(struct fb_info *info, { } +static inline int drm_fb_helper_defio_init(struct drm_fb_helper *fb_helper) +{ + return -ENODEV; +} + static inline ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf, size_t count, loff_t *ppos) @@ -507,6 +538,32 @@ drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, return 0; } +static inline int +drm_fb_helper_fbdev_setup(struct drm_device *dev, + struct drm_fb_helper *fb_helper, + const struct drm_fb_helper_funcs *funcs, + unsigned int preferred_bpp, + unsigned int max_conn_count) +{ + /* So drivers can use it to free the struct */ + dev->fb_helper = fb_helper; + + return 0; +} + +static inline void drm_fb_helper_fbdev_teardown(struct drm_device *dev) +{ + dev->fb_helper = NULL; +} + +static inline void drm_fb_helper_lastclose(struct drm_device *dev) +{ +} + +static inline void drm_fb_helper_output_poll_changed(struct drm_device *dev) +{ +} + #endif static inline int diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h index 0e0c868451a5..5176c3797680 100644 --- a/include/drm/drm_file.h +++ b/include/drm/drm_file.h @@ -364,7 +364,7 @@ int drm_open(struct inode *inode, struct file *filp); ssize_t drm_read(struct file *filp, char __user *buffer, size_t count, loff_t *offset); int drm_release(struct inode *inode, struct file *filp); -unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait); +__poll_t drm_poll(struct file *filp, struct poll_table_struct *wait); int drm_event_reserve_init_locked(struct drm_device *dev, struct drm_file *file_priv, struct drm_pending_event *p, diff --git a/include/drm/drm_framebuffer.h b/include/drm/drm_framebuffer.h index 4c5ee4ae54df..c50502c656e5 100644 --- a/include/drm/drm_framebuffer.h +++ b/include/drm/drm_framebuffer.h @@ -121,6 +121,12 @@ struct drm_framebuffer { * @base: base modeset object structure, contains the reference count. */ struct drm_mode_object base; + + /** + * @comm: Name of the process allocating the fb, used for fb dumping. + */ + char comm[TASK_COMM_LEN]; + /** * @format: framebuffer format information */ @@ -264,7 +270,7 @@ static inline void drm_framebuffer_unreference(struct drm_framebuffer *fb) * * This functions returns the framebuffer's reference count. */ -static inline uint32_t drm_framebuffer_read_refcount(struct drm_framebuffer *fb) +static inline uint32_t drm_framebuffer_read_refcount(const struct drm_framebuffer *fb) { return kref_read(&fb->base.refcount); } diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h index 520e3feb502c..19777145cf8e 100644 --- a/include/drm/drm_gem_cma_helper.h +++ b/include/drm/drm_gem_cma_helper.h @@ -9,7 +9,9 @@ * struct drm_gem_cma_object - GEM object backed by CMA memory allocations * @base: base GEM object * @paddr: physical address of the backing memory - * @sgt: scatter/gather table for imported PRIME buffers + * @sgt: scatter/gather table for imported PRIME buffers. The table can have + * more than one entry but they are guaranteed to have contiguous + * DMA addresses. * @vaddr: kernel virtual address of the backing memory */ struct drm_gem_cma_object { @@ -21,11 +23,8 @@ struct drm_gem_cma_object { void *vaddr; }; -static inline struct drm_gem_cma_object * -to_drm_gem_cma_obj(struct drm_gem_object *gem_obj) -{ - return container_of(gem_obj, struct drm_gem_cma_object, base); -} +#define to_drm_gem_cma_obj(gem_obj) \ + container_of(gem_obj, struct drm_gem_cma_object, base) #ifndef CONFIG_MMU #define DRM_GEM_CMA_UNMAPPED_AREA_FOPS \ @@ -91,9 +90,8 @@ unsigned long drm_gem_cma_get_unmapped_area(struct file *filp, unsigned long flags); #endif -#ifdef CONFIG_DEBUG_FS -void drm_gem_cma_describe(struct drm_gem_cma_object *obj, struct seq_file *m); -#endif +void drm_gem_cma_print_info(struct drm_printer *p, unsigned int indent, + const struct drm_gem_object *obj); struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj); struct drm_gem_object * diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h index 8d10fc97801c..101f566ae43d 100644 --- a/include/drm/drm_mm.h +++ b/include/drm/drm_mm.h @@ -386,7 +386,7 @@ int drm_mm_insert_node_in_range(struct drm_mm *mm, * @color: opaque tag value to use for this node * @mode: fine-tune the allocation search and placement * - * This is a simplified version of drm_mm_insert_node_in_range_generic() with no + * This is a simplified version of drm_mm_insert_node_in_range() with no * range restrictions applied. * * The preallocated node must be cleared to 0. diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h index b0ce26d71296..2cb6f02df64a 100644 --- a/include/drm/drm_mode_config.h +++ b/include/drm/drm_mode_config.h @@ -269,6 +269,9 @@ struct drm_mode_config_funcs { * state easily. If this hook is implemented, drivers must also * implement @atomic_state_clear and @atomic_state_free. * + * Subclassing of &drm_atomic_state is deprecated in favour of using + * &drm_private_state and &drm_private_obj. + * * RETURNS: * * A new &drm_atomic_state on success or NULL on failure. @@ -290,6 +293,9 @@ struct drm_mode_config_funcs { * * Drivers that implement this must call drm_atomic_state_default_clear() * to clear common state. + * + * Subclassing of &drm_atomic_state is deprecated in favour of using + * &drm_private_state and &drm_private_obj. */ void (*atomic_state_clear)(struct drm_atomic_state *state); @@ -302,6 +308,9 @@ struct drm_mode_config_funcs { * * Drivers that implement this must call * drm_atomic_state_default_release() to release common resources. + * + * Subclassing of &drm_atomic_state is deprecated in favour of using + * &drm_private_state and &drm_private_obj. */ void (*atomic_state_free)(struct drm_atomic_state *state); }; @@ -751,6 +760,13 @@ struct drm_mode_config { */ struct drm_property *non_desktop_property; + /** + * @panel_orientation_property: Optional connector property indicating + * how the lcd-panel is mounted inside the casing (e.g. normal or + * upside-down). + */ + struct drm_property *panel_orientation_property; + /* dumb ioctl parameters */ uint32_t preferred_depth, prefer_shadow; @@ -768,7 +784,7 @@ struct drm_mode_config { bool allow_fb_modifiers; /** - * @modifiers: Plane property to list support modifier/format + * @modifiers_property: Plane property to list support modifier/format * combination. */ struct drm_property *modifiers_property; @@ -776,6 +792,15 @@ struct drm_mode_config { /* cursor size */ uint32_t cursor_width, cursor_height; + /** + * @suspend_state: + * + * Atomic state when suspended. + * Set by drm_mode_config_helper_suspend() and cleared by + * drm_mode_config_helper_resume(). + */ + struct drm_atomic_state *suspend_state; + const struct drm_mode_config_helper_funcs *helper_private; }; diff --git a/include/drm/drm_modeset_helper.h b/include/drm/drm_modeset_helper.h index cb0ec92e11e6..efa337f03129 100644 --- a/include/drm/drm_modeset_helper.h +++ b/include/drm/drm_modeset_helper.h @@ -34,4 +34,7 @@ void drm_helper_mode_fill_fb_struct(struct drm_device *dev, int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, const struct drm_crtc_funcs *funcs); +int drm_mode_config_helper_suspend(struct drm_device *dev); +int drm_mode_config_helper_resume(struct drm_device *dev); + #endif diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h index 16646c44b7df..3e76ca805b0f 100644 --- a/include/drm/drm_modeset_helper_vtables.h +++ b/include/drm/drm_modeset_helper_vtables.h @@ -801,9 +801,6 @@ struct drm_connector_helper_funcs { * resolution can call drm_add_modes_noedid(), and mark the preferred * one using drm_set_preferred_mode(). * - * Finally drivers that support audio probably want to update the ELD - * data, too, using drm_edid_to_eld(). - * * This function is only called after the @detect hook has indicated * that a sink is connected and when the EDID isn't overridden through * sysfs or the kernel commandline. diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h index 571615079230..8185e3468a23 100644 --- a/include/drm/drm_plane.h +++ b/include/drm/drm_plane.h @@ -474,8 +474,8 @@ enum drm_plane_type { * @format_types: array of formats supported by this plane * @format_count: number of formats supported * @format_default: driver hasn't supplied supported formats for the plane - * @crtc: currently bound CRTC - * @fb: currently bound fb + * @modifiers: array of modifiers supported by this plane + * @modifier_count: number of modifiers supported * @old_fb: Temporary tracking of the old fb while a modeset is ongoing. Used by * drm_mode_set_config_internal() to implement correct refcounting. * @funcs: helper functions @@ -512,7 +512,17 @@ struct drm_plane { uint64_t *modifiers; unsigned int modifier_count; + /** + * @crtc: Currently bound CRTC, only really meaningful for non-atomic + * drivers. Atomic drivers should instead check &drm_plane_state.crtc. + */ struct drm_crtc *crtc; + + /** + * @fb: Currently bound framebuffer, only really meaningful for + * non-atomic drivers. Atomic drivers should instead check + * &drm_plane_state.fb. + */ struct drm_framebuffer *fb; struct drm_framebuffer *old_fb; diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h index 7c8a00ceadb7..8aa49c0ecd4d 100644 --- a/include/drm/drm_plane_helper.h +++ b/include/drm/drm_plane_helper.h @@ -38,11 +38,6 @@ */ #define DRM_PLANE_HELPER_NO_SCALING (1<<16) -int drm_plane_helper_check_state(struct drm_plane_state *state, - const struct drm_rect *clip, - int min_scale, int max_scale, - bool can_position, - bool can_update_disabled); int drm_plane_helper_check_update(struct drm_plane *plane, struct drm_crtc *crtc, struct drm_framebuffer *fb, diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h index ca4d7c6321f2..2a4a42e59a47 100644 --- a/include/drm/drm_print.h +++ b/include/drm/drm_print.h @@ -80,6 +80,29 @@ void __drm_printfn_debug(struct drm_printer *p, struct va_format *vaf); __printf(2, 3) void drm_printf(struct drm_printer *p, const char *f, ...); +__printf(2, 0) +/** + * drm_vprintf - print to a &drm_printer stream + * @p: the &drm_printer + * @fmt: format string + * @va: the va_list + */ +static inline void +drm_vprintf(struct drm_printer *p, const char *fmt, va_list *va) +{ + struct va_format vaf = { .fmt = fmt, .va = va }; + + p->printfn(p, &vaf); +} + +/** + * drm_printf_indent - Print to a &drm_printer stream with indentation + * @printer: DRM printer + * @indent: Tab indentation level (max 5) + * @fmt: Format string + */ +#define drm_printf_indent(printer, indent, fmt, ...) \ + drm_printf((printer), "%.*s" fmt, (indent), "\t\t\t\t\tX", ##__VA_ARGS__) /** * drm_seq_file_printer - construct a &drm_printer that outputs to &seq_file @@ -128,4 +151,200 @@ static inline struct drm_printer drm_debug_printer(const char *prefix) }; return p; } + +/* + * The following categories are defined: + * + * CORE: Used in the generic drm code: drm_ioctl.c, drm_mm.c, drm_memory.c, ... + * This is the category used by the DRM_DEBUG() macro. + * + * DRIVER: Used in the vendor specific part of the driver: i915, radeon, ... + * This is the category used by the DRM_DEBUG_DRIVER() macro. + * + * KMS: used in the modesetting code. + * This is the category used by the DRM_DEBUG_KMS() macro. + * + * PRIME: used in the prime code. + * This is the category used by the DRM_DEBUG_PRIME() macro. + * + * ATOMIC: used in the atomic code. + * This is the category used by the DRM_DEBUG_ATOMIC() macro. + * + * VBL: used for verbose debug message in the vblank code + * This is the category used by the DRM_DEBUG_VBL() macro. + * + * Enabling verbose debug messages is done through the drm.debug parameter, + * each category being enabled by a bit. + * + * drm.debug=0x1 will enable CORE messages + * drm.debug=0x2 will enable DRIVER messages + * drm.debug=0x3 will enable CORE and DRIVER messages + * ... + * drm.debug=0x3f will enable all messages + * + * An interesting feature is that it's possible to enable verbose logging at + * run-time by echoing the debug value in its sysfs node: + * # echo 0xf > /sys/module/drm/parameters/debug + */ +#define DRM_UT_NONE 0x00 +#define DRM_UT_CORE 0x01 +#define DRM_UT_DRIVER 0x02 +#define DRM_UT_KMS 0x04 +#define DRM_UT_PRIME 0x08 +#define DRM_UT_ATOMIC 0x10 +#define DRM_UT_VBL 0x20 +#define DRM_UT_STATE 0x40 +#define DRM_UT_LEASE 0x80 + +__printf(6, 7) +void drm_dev_printk(const struct device *dev, const char *level, + unsigned int category, const char *function_name, + const char *prefix, const char *format, ...); +__printf(3, 4) +void drm_printk(const char *level, unsigned int category, + const char *format, ...); + +/* Macros to make printk easier */ + +#define _DRM_PRINTK(once, level, fmt, ...) \ + do { \ + printk##once(KERN_##level "[" DRM_NAME "] " fmt, \ + ##__VA_ARGS__); \ + } while (0) + +#define DRM_INFO(fmt, ...) \ + _DRM_PRINTK(, INFO, fmt, ##__VA_ARGS__) +#define DRM_NOTE(fmt, ...) \ + _DRM_PRINTK(, NOTICE, fmt, ##__VA_ARGS__) +#define DRM_WARN(fmt, ...) \ + _DRM_PRINTK(, WARNING, fmt, ##__VA_ARGS__) + +#define DRM_INFO_ONCE(fmt, ...) \ + _DRM_PRINTK(_once, INFO, fmt, ##__VA_ARGS__) +#define DRM_NOTE_ONCE(fmt, ...) \ + _DRM_PRINTK(_once, NOTICE, fmt, ##__VA_ARGS__) +#define DRM_WARN_ONCE(fmt, ...) \ + _DRM_PRINTK(_once, WARNING, fmt, ##__VA_ARGS__) + +/** + * Error output. + * + * @dev: device pointer + * @fmt: printf() like format string. + */ +#define DRM_DEV_ERROR(dev, fmt, ...) \ + drm_dev_printk(dev, KERN_ERR, DRM_UT_NONE, __func__, " *ERROR*",\ + fmt, ##__VA_ARGS__) +#define DRM_ERROR(fmt, ...) \ + drm_printk(KERN_ERR, DRM_UT_NONE, fmt, ##__VA_ARGS__) + +/** + * Rate limited error output. Like DRM_ERROR() but won't flood the log. + * + * @dev: device pointer + * @fmt: printf() like format string. + */ +#define DRM_DEV_ERROR_RATELIMITED(dev, fmt, ...) \ +({ \ + static DEFINE_RATELIMIT_STATE(_rs, \ + DEFAULT_RATELIMIT_INTERVAL, \ + DEFAULT_RATELIMIT_BURST); \ + \ + if (__ratelimit(&_rs)) \ + DRM_DEV_ERROR(dev, fmt, ##__VA_ARGS__); \ +}) +#define DRM_ERROR_RATELIMITED(fmt, ...) \ + DRM_DEV_ERROR_RATELIMITED(NULL, fmt, ##__VA_ARGS__) + +#define DRM_DEV_INFO(dev, fmt, ...) \ + drm_dev_printk(dev, KERN_INFO, DRM_UT_NONE, __func__, "", fmt, \ + ##__VA_ARGS__) + +#define DRM_DEV_INFO_ONCE(dev, fmt, ...) \ +({ \ + static bool __print_once __read_mostly; \ + if (!__print_once) { \ + __print_once = true; \ + DRM_DEV_INFO(dev, fmt, ##__VA_ARGS__); \ + } \ +}) + +/** + * Debug output. + * + * @dev: device pointer + * @fmt: printf() like format string. + */ +#define DRM_DEV_DEBUG(dev, fmt, args...) \ + drm_dev_printk(dev, KERN_DEBUG, DRM_UT_CORE, __func__, "", fmt, \ + ##args) +#define DRM_DEBUG(fmt, ...) \ + drm_printk(KERN_DEBUG, DRM_UT_CORE, fmt, ##__VA_ARGS__) + +#define DRM_DEV_DEBUG_DRIVER(dev, fmt, args...) \ + drm_dev_printk(dev, KERN_DEBUG, DRM_UT_DRIVER, __func__, "", \ + fmt, ##args) +#define DRM_DEBUG_DRIVER(fmt, ...) \ + drm_printk(KERN_DEBUG, DRM_UT_DRIVER, fmt, ##__VA_ARGS__) + +#define DRM_DEV_DEBUG_KMS(dev, fmt, args...) \ + drm_dev_printk(dev, KERN_DEBUG, DRM_UT_KMS, __func__, "", fmt, \ + ##args) +#define DRM_DEBUG_KMS(fmt, ...) \ + drm_printk(KERN_DEBUG, DRM_UT_KMS, fmt, ##__VA_ARGS__) + +#define DRM_DEV_DEBUG_PRIME(dev, fmt, args...) \ + drm_dev_printk(dev, KERN_DEBUG, DRM_UT_PRIME, __func__, "", \ + fmt, ##args) +#define DRM_DEBUG_PRIME(fmt, ...) \ + drm_printk(KERN_DEBUG, DRM_UT_PRIME, fmt, ##__VA_ARGS__) + +#define DRM_DEV_DEBUG_ATOMIC(dev, fmt, args...) \ + drm_dev_printk(dev, KERN_DEBUG, DRM_UT_ATOMIC, __func__, "", \ + fmt, ##args) +#define DRM_DEBUG_ATOMIC(fmt, ...) \ + drm_printk(KERN_DEBUG, DRM_UT_ATOMIC, fmt, ##__VA_ARGS__) + +#define DRM_DEV_DEBUG_VBL(dev, fmt, args...) \ + drm_dev_printk(dev, KERN_DEBUG, DRM_UT_VBL, __func__, "", fmt, \ + ##args) +#define DRM_DEBUG_VBL(fmt, ...) \ + drm_printk(KERN_DEBUG, DRM_UT_VBL, fmt, ##__VA_ARGS__) + +#define DRM_DEBUG_LEASE(fmt, ...) \ + drm_printk(KERN_DEBUG, DRM_UT_LEASE, fmt, ##__VA_ARGS__) + +#define _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, level, fmt, args...) \ +({ \ + static DEFINE_RATELIMIT_STATE(_rs, \ + DEFAULT_RATELIMIT_INTERVAL, \ + DEFAULT_RATELIMIT_BURST); \ + if (__ratelimit(&_rs)) \ + drm_dev_printk(dev, KERN_DEBUG, DRM_UT_ ## level, \ + __func__, "", fmt, ##args); \ +}) + +/** + * Rate limited debug output. Like DRM_DEBUG() but won't flood the log. + * + * @dev: device pointer + * @fmt: printf() like format string. + */ +#define DRM_DEV_DEBUG_RATELIMITED(dev, fmt, args...) \ + DEV__DRM_DEFINE_DEBUG_RATELIMITED(dev, CORE, fmt, ##args) +#define DRM_DEBUG_RATELIMITED(fmt, args...) \ + DRM_DEV_DEBUG_RATELIMITED(NULL, fmt, ##args) +#define DRM_DEV_DEBUG_DRIVER_RATELIMITED(dev, fmt, args...) \ + _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, DRIVER, fmt, ##args) +#define DRM_DEBUG_DRIVER_RATELIMITED(fmt, args...) \ + DRM_DEV_DEBUG_DRIVER_RATELIMITED(NULL, fmt, ##args) +#define DRM_DEV_DEBUG_KMS_RATELIMITED(dev, fmt, args...) \ + _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, KMS, fmt, ##args) +#define DRM_DEBUG_KMS_RATELIMITED(fmt, args...) \ + DRM_DEV_DEBUG_KMS_RATELIMITED(NULL, fmt, ##args) +#define DRM_DEV_DEBUG_PRIME_RATELIMITED(dev, fmt, args...) \ + _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, PRIME, fmt, ##args) +#define DRM_DEBUG_PRIME_RATELIMITED(fmt, args...) \ + DRM_DEV_DEBUG_PRIME_RATELIMITED(NULL, fmt, ##args) + #endif /* DRM_PRINT_H_ */ diff --git a/include/drm/drm_syncobj.h b/include/drm/drm_syncobj.h index 43e2f382d2f0..3980602472c0 100644 --- a/include/drm/drm_syncobj.h +++ b/include/drm/drm_syncobj.h @@ -33,36 +33,31 @@ struct drm_syncobj_cb; /** * struct drm_syncobj - sync object. * - * This structure defines a generic sync object which wraps a dma fence. + * This structure defines a generic sync object which wraps a &dma_fence. */ struct drm_syncobj { /** - * @refcount: - * - * Reference count of this object. + * @refcount: Reference count of this object. */ struct kref refcount; /** * @fence: * NULL or a pointer to the fence bound to this object. * - * This field should not be used directly. Use drm_syncobj_fence_get - * and drm_syncobj_replace_fence instead. + * This field should not be used directly. Use drm_syncobj_fence_get() + * and drm_syncobj_replace_fence() instead. */ - struct dma_fence *fence; + struct dma_fence __rcu *fence; /** - * @cb_list: - * List of callbacks to call when the fence gets replaced + * @cb_list: List of callbacks to call when the &fence gets replaced. */ struct list_head cb_list; /** - * @lock: - * locks cb_list and write-locks fence. + * @lock: Protects &cb_list and write-locks &fence. */ spinlock_t lock; /** - * @file: - * a file backing for this syncobj. + * @file: A file backing for this syncobj. */ struct file *file; }; @@ -73,7 +68,7 @@ typedef void (*drm_syncobj_func_t)(struct drm_syncobj *syncobj, /** * struct drm_syncobj_cb - callback for drm_syncobj_add_callback * @node: used by drm_syncob_add_callback to append this struct to - * syncobj::cb_list + * &drm_syncobj.cb_list * @func: drm_syncobj_func_t to call * * This struct will be initialized by drm_syncobj_add_callback, additional @@ -92,7 +87,7 @@ void drm_syncobj_free(struct kref *kref); * drm_syncobj_get - acquire a syncobj reference * @obj: sync object * - * This acquires additional reference to @obj. It is illegal to call this + * This acquires an additional reference to @obj. It is illegal to call this * without already holding a reference. No locks required. */ static inline void @@ -111,6 +106,17 @@ drm_syncobj_put(struct drm_syncobj *obj) kref_put(&obj->refcount, drm_syncobj_free); } +/** + * drm_syncobj_fence_get - get a reference to a fence in a sync object + * @syncobj: sync object. + * + * This acquires additional reference to &drm_syncobj.fence contained in @obj, + * if not NULL. It is illegal to call this without already holding a reference. + * No locks required. + * + * Returns: + * Either the fence of @obj or NULL if there's none. + */ static inline struct dma_fence * drm_syncobj_fence_get(struct drm_syncobj *syncobj) { diff --git a/include/drm/drm_utils.h b/include/drm/drm_utils.h new file mode 100644 index 000000000000..a803988d8579 --- /dev/null +++ b/include/drm/drm_utils.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Function prototypes for misc. drm utility functions. + * Specifically this file is for function prototypes for functions which + * may also be used outside of drm code (e.g. in fbdev drivers). + * + * Copyright (C) 2017 Hans de Goede <hdegoede@redhat.com> + */ + +#ifndef __DRM_UTILS_H__ +#define __DRM_UTILS_H__ + +int drm_get_panel_orientation_quirk(int width, int height); + +#endif diff --git a/include/drm/drm_vma_manager.h b/include/drm/drm_vma_manager.h index d84d52f6d2b1..8758df94e9a0 100644 --- a/include/drm/drm_vma_manager.h +++ b/include/drm/drm_vma_manager.h @@ -152,7 +152,7 @@ static inline void drm_vma_node_reset(struct drm_vma_offset_node *node) * Start address of @node for page-based addressing. 0 if the node does not * have an offset allocated. */ -static inline unsigned long drm_vma_node_start(struct drm_vma_offset_node *node) +static inline unsigned long drm_vma_node_start(const struct drm_vma_offset_node *node) { return node->vm_node.start; } diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h new file mode 100644 index 000000000000..dfd54fb94e10 --- /dev/null +++ b/include/drm/gpu_scheduler.h @@ -0,0 +1,173 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _DRM_GPU_SCHEDULER_H_ +#define _DRM_GPU_SCHEDULER_H_ + +#include <drm/spsc_queue.h> +#include <linux/dma-fence.h> + +struct drm_gpu_scheduler; +struct drm_sched_rq; + +enum drm_sched_priority { + DRM_SCHED_PRIORITY_MIN, + DRM_SCHED_PRIORITY_LOW = DRM_SCHED_PRIORITY_MIN, + DRM_SCHED_PRIORITY_NORMAL, + DRM_SCHED_PRIORITY_HIGH_SW, + DRM_SCHED_PRIORITY_HIGH_HW, + DRM_SCHED_PRIORITY_KERNEL, + DRM_SCHED_PRIORITY_MAX, + DRM_SCHED_PRIORITY_INVALID = -1, + DRM_SCHED_PRIORITY_UNSET = -2 +}; + +/** + * A scheduler entity is a wrapper around a job queue or a group + * of other entities. Entities take turns emitting jobs from their + * job queues to corresponding hardware ring based on scheduling + * policy. +*/ +struct drm_sched_entity { + struct list_head list; + struct drm_sched_rq *rq; + spinlock_t rq_lock; + struct drm_gpu_scheduler *sched; + + spinlock_t queue_lock; + struct spsc_queue job_queue; + + atomic_t fence_seq; + uint64_t fence_context; + + struct dma_fence *dependency; + struct dma_fence_cb cb; + atomic_t *guilty; /* points to ctx's guilty */ +}; + +/** + * Run queue is a set of entities scheduling command submissions for + * one specific ring. It implements the scheduling policy that selects + * the next entity to emit commands from. +*/ +struct drm_sched_rq { + spinlock_t lock; + struct list_head entities; + struct drm_sched_entity *current_entity; +}; + +struct drm_sched_fence { + struct dma_fence scheduled; + struct dma_fence finished; + struct dma_fence_cb cb; + struct dma_fence *parent; + struct drm_gpu_scheduler *sched; + spinlock_t lock; + void *owner; +}; + +struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f); + +struct drm_sched_job { + struct spsc_node queue_node; + struct drm_gpu_scheduler *sched; + struct drm_sched_fence *s_fence; + struct dma_fence_cb finish_cb; + struct work_struct finish_work; + struct list_head node; + struct delayed_work work_tdr; + uint64_t id; + atomic_t karma; + enum drm_sched_priority s_priority; +}; + +static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job, + int threshold) +{ + return (s_job && atomic_inc_return(&s_job->karma) > threshold); +} + +/** + * Define the backend operations called by the scheduler, + * these functions should be implemented in driver side +*/ +struct drm_sched_backend_ops { + struct dma_fence *(*dependency)(struct drm_sched_job *sched_job, + struct drm_sched_entity *s_entity); + struct dma_fence *(*run_job)(struct drm_sched_job *sched_job); + void (*timedout_job)(struct drm_sched_job *sched_job); + void (*free_job)(struct drm_sched_job *sched_job); +}; + +/** + * One scheduler is implemented for each hardware ring +*/ +struct drm_gpu_scheduler { + const struct drm_sched_backend_ops *ops; + uint32_t hw_submission_limit; + long timeout; + const char *name; + struct drm_sched_rq sched_rq[DRM_SCHED_PRIORITY_MAX]; + wait_queue_head_t wake_up_worker; + wait_queue_head_t job_scheduled; + atomic_t hw_rq_count; + atomic64_t job_id_count; + struct task_struct *thread; + struct list_head ring_mirror_list; + spinlock_t job_list_lock; + int hang_limit; +}; + +int drm_sched_init(struct drm_gpu_scheduler *sched, + const struct drm_sched_backend_ops *ops, + uint32_t hw_submission, unsigned hang_limit, long timeout, + const char *name); +void drm_sched_fini(struct drm_gpu_scheduler *sched); + +int drm_sched_entity_init(struct drm_gpu_scheduler *sched, + struct drm_sched_entity *entity, + struct drm_sched_rq *rq, + uint32_t jobs, atomic_t *guilty); +void drm_sched_entity_fini(struct drm_gpu_scheduler *sched, + struct drm_sched_entity *entity); +void drm_sched_entity_push_job(struct drm_sched_job *sched_job, + struct drm_sched_entity *entity); +void drm_sched_entity_set_rq(struct drm_sched_entity *entity, + struct drm_sched_rq *rq); + +struct drm_sched_fence *drm_sched_fence_create( + struct drm_sched_entity *s_entity, void *owner); +void drm_sched_fence_scheduled(struct drm_sched_fence *fence); +void drm_sched_fence_finished(struct drm_sched_fence *fence); +int drm_sched_job_init(struct drm_sched_job *job, + struct drm_gpu_scheduler *sched, + struct drm_sched_entity *entity, + void *owner); +void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, + struct drm_sched_job *job); +void drm_sched_job_recovery(struct drm_gpu_scheduler *sched); +bool drm_sched_dependency_optimized(struct dma_fence* fence, + struct drm_sched_entity *entity); +void drm_sched_job_kickout(struct drm_sched_job *s_job); + +#endif diff --git a/include/drm/gpu_scheduler_trace.h b/include/drm/gpu_scheduler_trace.h new file mode 100644 index 000000000000..0789e8d0a0e1 --- /dev/null +++ b/include/drm/gpu_scheduler_trace.h @@ -0,0 +1,82 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#if !defined(_GPU_SCHED_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _GPU_SCHED_TRACE_H_ + +#include <linux/stringify.h> +#include <linux/types.h> +#include <linux/tracepoint.h> + +#include <drm/drmP.h> + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM gpu_scheduler +#define TRACE_INCLUDE_FILE gpu_scheduler_trace + +TRACE_EVENT(drm_sched_job, + TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity), + TP_ARGS(sched_job, entity), + TP_STRUCT__entry( + __field(struct drm_sched_entity *, entity) + __field(struct dma_fence *, fence) + __field(const char *, name) + __field(uint64_t, id) + __field(u32, job_count) + __field(int, hw_job_count) + ), + + TP_fast_assign( + __entry->entity = entity; + __entry->id = sched_job->id; + __entry->fence = &sched_job->s_fence->finished; + __entry->name = sched_job->sched->name; + __entry->job_count = spsc_queue_count(&entity->job_queue); + __entry->hw_job_count = atomic_read( + &sched_job->sched->hw_rq_count); + ), + TP_printk("entity=%p, id=%llu, fence=%p, ring=%s, job count:%u, hw job count:%d", + __entry->entity, __entry->id, + __entry->fence, __entry->name, + __entry->job_count, __entry->hw_job_count) +); + +TRACE_EVENT(drm_sched_process_job, + TP_PROTO(struct drm_sched_fence *fence), + TP_ARGS(fence), + TP_STRUCT__entry( + __field(struct dma_fence *, fence) + ), + + TP_fast_assign( + __entry->fence = &fence->finished; + ), + TP_printk("fence=%p signaled", __entry->fence) +); + +#endif + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#include <trace/define_trace.h> diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h index 4e1b274e1164..c9e5a6621b95 100644 --- a/include/drm/i915_drm.h +++ b/include/drm/i915_drm.h @@ -36,6 +36,9 @@ extern bool i915_gpu_lower(void); extern bool i915_gpu_busy(void); extern bool i915_gpu_turbo_disable(void); +/* Exported from arch/x86/kernel/early-quirks.c */ +extern struct resource intel_graphics_stolen_res; + /* * The Bridge device's PCI config space has information about the * fb aperture size and the amount of pre-reserved memory. diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h index 972a25633525..5db0458dd832 100644 --- a/include/drm/i915_pciids.h +++ b/include/drm/i915_pciids.h @@ -373,24 +373,46 @@ /* CFL S */ #define INTEL_CFL_S_GT1_IDS(info) \ INTEL_VGA_DEVICE(0x3E90, info), /* SRV GT1 */ \ - INTEL_VGA_DEVICE(0x3E93, info) /* SRV GT1 */ + INTEL_VGA_DEVICE(0x3E93, info), /* SRV GT1 */ \ + INTEL_VGA_DEVICE(0x3E99, info) /* SRV GT1 */ #define INTEL_CFL_S_GT2_IDS(info) \ INTEL_VGA_DEVICE(0x3E91, info), /* SRV GT2 */ \ INTEL_VGA_DEVICE(0x3E92, info), /* SRV GT2 */ \ - INTEL_VGA_DEVICE(0x3E96, info) /* SRV GT2 */ + INTEL_VGA_DEVICE(0x3E96, info), /* SRV GT2 */ \ + INTEL_VGA_DEVICE(0x3E9A, info) /* SRV GT2 */ /* CFL H */ #define INTEL_CFL_H_GT2_IDS(info) \ INTEL_VGA_DEVICE(0x3E9B, info), /* Halo GT2 */ \ INTEL_VGA_DEVICE(0x3E94, info) /* Halo GT2 */ -/* CFL U */ +/* CFL U GT1 */ +#define INTEL_CFL_U_GT1_IDS(info) \ + INTEL_VGA_DEVICE(0x3EA1, info), \ + INTEL_VGA_DEVICE(0x3EA4, info) + +/* CFL U GT2 */ +#define INTEL_CFL_U_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x3EA0, info), \ + INTEL_VGA_DEVICE(0x3EA3, info), \ + INTEL_VGA_DEVICE(0x3EA9, info) + +/* CFL U GT3 */ #define INTEL_CFL_U_GT3_IDS(info) \ + INTEL_VGA_DEVICE(0x3EA2, info), /* ULT GT3 */ \ + INTEL_VGA_DEVICE(0x3EA5, info), /* ULT GT3 */ \ INTEL_VGA_DEVICE(0x3EA6, info), /* ULT GT3 */ \ INTEL_VGA_DEVICE(0x3EA7, info), /* ULT GT3 */ \ - INTEL_VGA_DEVICE(0x3EA8, info), /* ULT GT3 */ \ - INTEL_VGA_DEVICE(0x3EA5, info) /* ULT GT3 */ + INTEL_VGA_DEVICE(0x3EA8, info) /* ULT GT3 */ + +#define INTEL_CFL_IDS(info) \ + INTEL_CFL_S_GT1_IDS(info), \ + INTEL_CFL_S_GT2_IDS(info), \ + INTEL_CFL_H_GT2_IDS(info), \ + INTEL_CFL_U_GT1_IDS(info), \ + INTEL_CFL_U_GT2_IDS(info), \ + INTEL_CFL_U_GT3_IDS(info) /* CNL U 2+2 */ #define INTEL_CNL_U_GT2_IDS(info) \ diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h index c5db7975c640..2324c84a25c0 100644 --- a/include/drm/intel-gtt.h +++ b/include/drm/intel-gtt.h @@ -5,9 +5,8 @@ #define _DRM_INTEL_GTT_H void intel_gtt_get(u64 *gtt_total, - u32 *stolen_size, phys_addr_t *mappable_base, - u64 *mappable_end); + resource_size_t *mappable_end); int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev, struct agp_bridge_data *bridge); diff --git a/include/drm/spsc_queue.h b/include/drm/spsc_queue.h new file mode 100644 index 000000000000..125f096c88cb --- /dev/null +++ b/include/drm/spsc_queue.h @@ -0,0 +1,122 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef DRM_SCHEDULER_SPSC_QUEUE_H_ +#define DRM_SCHEDULER_SPSC_QUEUE_H_ + +#include <linux/atomic.h> +#include <linux/preempt.h> + +/** SPSC lockless queue */ + +struct spsc_node { + + /* Stores spsc_node* */ + struct spsc_node *next; +}; + +struct spsc_queue { + + struct spsc_node *head; + + /* atomic pointer to struct spsc_node* */ + atomic_long_t tail; + + atomic_t job_count; +}; + +static inline void spsc_queue_init(struct spsc_queue *queue) +{ + queue->head = NULL; + atomic_long_set(&queue->tail, (long)&queue->head); + atomic_set(&queue->job_count, 0); +} + +static inline struct spsc_node *spsc_queue_peek(struct spsc_queue *queue) +{ + return queue->head; +} + +static inline int spsc_queue_count(struct spsc_queue *queue) +{ + return atomic_read(&queue->job_count); +} + +static inline bool spsc_queue_push(struct spsc_queue *queue, struct spsc_node *node) +{ + struct spsc_node **tail; + + node->next = NULL; + + preempt_disable(); + + tail = (struct spsc_node **)atomic_long_xchg(&queue->tail, (long)&node->next); + WRITE_ONCE(*tail, node); + atomic_inc(&queue->job_count); + + /* + * In case of first element verify new node will be visible to the consumer + * thread when we ping the kernel thread that there is new work to do. + */ + smp_wmb(); + + preempt_enable(); + + return tail == &queue->head; +} + + +static inline struct spsc_node *spsc_queue_pop(struct spsc_queue *queue) +{ + struct spsc_node *next, *node; + + /* Verify reading from memory and not the cache */ + smp_rmb(); + + node = READ_ONCE(queue->head); + + if (!node) + return NULL; + + next = READ_ONCE(node->next); + WRITE_ONCE(queue->head, next); + + if (unlikely(!next)) { + /* slowpath for the last element in the queue */ + + if (atomic_long_cmpxchg(&queue->tail, + (long)&node->next, (long) &queue->head) != (long)&node->next) { + /* Updating tail failed wait for new next to appear */ + do { + smp_rmb(); + } while (unlikely(!(queue->head = READ_ONCE(node->next)))); + } + } + + atomic_dec(&queue->job_count); + return node; +} + + + +#endif /* DRM_SCHEDULER_SPSC_QUEUE_H_ */ diff --git a/include/drm/tinydrm/mipi-dbi.h b/include/drm/tinydrm/mipi-dbi.h index 83346ddb9dba..5d0e82b36eaf 100644 --- a/include/drm/tinydrm/mipi-dbi.h +++ b/include/drm/tinydrm/mipi-dbi.h @@ -72,10 +72,12 @@ void mipi_dbi_pipe_enable(struct drm_simple_display_pipe *pipe, void mipi_dbi_pipe_disable(struct drm_simple_display_pipe *pipe); void mipi_dbi_hw_reset(struct mipi_dbi *mipi); bool mipi_dbi_display_is_on(struct mipi_dbi *mipi); +u32 mipi_dbi_spi_cmd_max_speed(struct spi_device *spi, size_t len); int mipi_dbi_command_read(struct mipi_dbi *mipi, u8 cmd, u8 *val); int mipi_dbi_command_buf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len); - +int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb, + struct drm_clip_rect *clip, bool swap); /** * mipi_dbi_command - MIPI DCS command with optional parameter(s) * @mipi: MIPI structure diff --git a/include/drm/tinydrm/tinydrm.h b/include/drm/tinydrm/tinydrm.h index 4774fe3d4273..07a9a11fe19d 100644 --- a/include/drm/tinydrm/tinydrm.h +++ b/include/drm/tinydrm/tinydrm.h @@ -19,16 +19,12 @@ * @drm: DRM device * @pipe: Display pipe structure * @dirty_lock: Serializes framebuffer flushing - * @fbdev_cma: CMA fbdev structure - * @suspend_state: Atomic state when suspended * @fb_funcs: Framebuffer functions used when creating framebuffers */ struct tinydrm_device { struct drm_device *drm; struct drm_simple_display_pipe pipe; struct mutex dirty_lock; - struct drm_fbdev_cma *fbdev_cma; - struct drm_atomic_state *suspend_state; const struct drm_framebuffer_funcs *fb_funcs; }; @@ -46,6 +42,7 @@ pipe_to_tinydrm(struct drm_simple_display_pipe *pipe) */ #define TINYDRM_GEM_DRIVER_OPS \ .gem_free_object = tinydrm_gem_cma_free_object, \ + .gem_print_info = drm_gem_cma_print_info, \ .gem_vm_ops = &drm_gem_cma_vm_ops, \ .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \ .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \ @@ -81,7 +78,6 @@ pipe_to_tinydrm(struct drm_simple_display_pipe *pipe) .type = DRM_MODE_TYPE_DRIVER, \ .clock = 1 /* pass validation */ -void tinydrm_lastclose(struct drm_device *drm); void tinydrm_gem_cma_free_object(struct drm_gem_object *gem_obj); struct drm_gem_object * tinydrm_gem_cma_prime_import_sg_table(struct drm_device *drm, @@ -92,8 +88,6 @@ int devm_tinydrm_init(struct device *parent, struct tinydrm_device *tdev, struct drm_driver *driver); int devm_tinydrm_register(struct tinydrm_device *tdev); void tinydrm_shutdown(struct tinydrm_device *tdev); -int tinydrm_suspend(struct tinydrm_device *tdev); -int tinydrm_resume(struct tinydrm_device *tdev); void tinydrm_display_pipe_update(struct drm_simple_display_pipe *pipe, struct drm_plane_state *old_state); diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index fa07be197945..2cd025c2abe7 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -224,7 +224,6 @@ struct ttm_buffer_object { */ uint64_t offset; /* GPU address space is independent of CPU word size */ - uint32_t cur_placement; struct sg_table *sg; @@ -260,6 +259,25 @@ struct ttm_bo_kmap_obj { }; /** + * struct ttm_operation_ctx + * + * @interruptible: Sleep interruptible if sleeping. + * @no_wait_gpu: Return immediately if the GPU is busy. + * @allow_reserved_eviction: Allow eviction of reserved BOs. + * @resv: Reservation object to allow reserved evictions with. + * + * Context for TTM operations like changing buffer placement or general memory + * allocation. + */ +struct ttm_operation_ctx { + bool interruptible; + bool no_wait_gpu; + bool allow_reserved_eviction; + struct reservation_object *resv; + uint64_t bytes_moved; +}; + +/** * ttm_bo_reference - reference a struct ttm_buffer_object * * @bo: The buffer object. @@ -288,8 +306,7 @@ ttm_bo_reference(struct ttm_buffer_object *bo) * Returns -EBUSY if no_wait is true and the buffer is busy. * Returns -ERESTARTSYS if interrupted by a signal. */ -extern int ttm_bo_wait(struct ttm_buffer_object *bo, - bool interruptible, bool no_wait); +int ttm_bo_wait(struct ttm_buffer_object *bo, bool interruptible, bool no_wait); /** * ttm_bo_mem_compat - Check if proposed placement is compatible with a bo @@ -300,17 +317,15 @@ extern int ttm_bo_wait(struct ttm_buffer_object *bo, * * Returns true if the placement is compatible */ -extern bool ttm_bo_mem_compat(struct ttm_placement *placement, - struct ttm_mem_reg *mem, - uint32_t *new_flags); +bool ttm_bo_mem_compat(struct ttm_placement *placement, struct ttm_mem_reg *mem, + uint32_t *new_flags); /** * ttm_bo_validate * * @bo: The buffer object. * @placement: Proposed placement for the buffer object. - * @interruptible: Sleep interruptible if sleeping. - * @no_wait_gpu: Return immediately if the GPU is busy. + * @ctx: validation parameters. * * Changes placement and caching policy of the buffer object * according proposed placement. @@ -320,10 +335,9 @@ extern bool ttm_bo_mem_compat(struct ttm_placement *placement, * -EBUSY if no_wait is true and buffer busy. * -ERESTARTSYS if interrupted by a signal. */ -extern int ttm_bo_validate(struct ttm_buffer_object *bo, - struct ttm_placement *placement, - bool interruptible, - bool no_wait_gpu); +int ttm_bo_validate(struct ttm_buffer_object *bo, + struct ttm_placement *placement, + struct ttm_operation_ctx *ctx); /** * ttm_bo_unref @@ -332,7 +346,7 @@ extern int ttm_bo_validate(struct ttm_buffer_object *bo, * * Unreference and clear a pointer to a buffer object. */ -extern void ttm_bo_unref(struct ttm_buffer_object **bo); +void ttm_bo_unref(struct ttm_buffer_object **bo); /** * ttm_bo_add_to_lru @@ -344,7 +358,7 @@ extern void ttm_bo_unref(struct ttm_buffer_object **bo); * This function must be called with struct ttm_bo_global::lru_lock held, and * is typically called immediately prior to unreserving a bo. */ -extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo); +void ttm_bo_add_to_lru(struct ttm_buffer_object *bo); /** * ttm_bo_del_from_lru @@ -356,7 +370,7 @@ extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo); * and is usually called just immediately after the bo has been reserved to * avoid recursive reservation from lru lists. */ -extern void ttm_bo_del_from_lru(struct ttm_buffer_object *bo); +void ttm_bo_del_from_lru(struct ttm_buffer_object *bo); /** * ttm_bo_move_to_lru_tail @@ -367,7 +381,7 @@ extern void ttm_bo_del_from_lru(struct ttm_buffer_object *bo); * object. This function must be called with struct ttm_bo_global::lru_lock * held, and is used to make a BO less likely to be considered for eviction. */ -extern void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo); +void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo); /** * ttm_bo_lock_delayed_workqueue @@ -376,15 +390,14 @@ extern void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo); * Returns * True if the workqueue was queued at the time */ -extern int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev); +int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev); /** * ttm_bo_unlock_delayed_workqueue * * Allows the delayed workqueue to run. */ -extern void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, - int resched); +void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched); /** * ttm_bo_eviction_valuable @@ -411,8 +424,7 @@ bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, * -EBUSY if the buffer is busy and no_wait is true. * -ERESTARTSYS if interrupted by a signal. */ -extern int -ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait); +int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait); /** * ttm_bo_synccpu_write_release: @@ -421,7 +433,7 @@ ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait); * * Releases a synccpu lock. */ -extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo); +void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo); /** * ttm_bo_acc_size @@ -448,8 +460,7 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev, * @type: Requested type of buffer object. * @flags: Initial placement flags. * @page_alignment: Data alignment in pages. - * @interruptible: If needing to sleep to wait for GPU resources, - * sleep interruptible. + * @ctx: TTM operation context for memory allocation. * @persistent_swap_storage: Usually the swap storage is deleted for buffers * pinned in physical memory. If this behaviour is not desired, this member * holds a pointer to a persistent shmem object. Typically, this would @@ -480,18 +491,18 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev, * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources. */ -extern int ttm_bo_init_reserved(struct ttm_bo_device *bdev, - struct ttm_buffer_object *bo, - unsigned long size, - enum ttm_bo_type type, - struct ttm_placement *placement, - uint32_t page_alignment, - bool interrubtible, - struct file *persistent_swap_storage, - size_t acc_size, - struct sg_table *sg, - struct reservation_object *resv, - void (*destroy) (struct ttm_buffer_object *)); +int ttm_bo_init_reserved(struct ttm_bo_device *bdev, + struct ttm_buffer_object *bo, + unsigned long size, + enum ttm_bo_type type, + struct ttm_placement *placement, + uint32_t page_alignment, + struct ttm_operation_ctx *ctx, + struct file *persistent_swap_storage, + size_t acc_size, + struct sg_table *sg, + struct reservation_object *resv, + void (*destroy) (struct ttm_buffer_object *)); /** * ttm_bo_init @@ -531,19 +542,13 @@ extern int ttm_bo_init_reserved(struct ttm_bo_device *bdev, * -EINVAL: Invalid placement flags. * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources. */ - -extern int ttm_bo_init(struct ttm_bo_device *bdev, - struct ttm_buffer_object *bo, - unsigned long size, - enum ttm_bo_type type, - struct ttm_placement *placement, - uint32_t page_alignment, - bool interrubtible, - struct file *persistent_swap_storage, - size_t acc_size, - struct sg_table *sg, - struct reservation_object *resv, - void (*destroy) (struct ttm_buffer_object *)); +int ttm_bo_init(struct ttm_bo_device *bdev, struct ttm_buffer_object *bo, + unsigned long size, enum ttm_bo_type type, + struct ttm_placement *placement, + uint32_t page_alignment, bool interrubtible, + struct file *persistent_swap_storage, size_t acc_size, + struct sg_table *sg, struct reservation_object *resv, + void (*destroy) (struct ttm_buffer_object *)); /** * ttm_bo_create @@ -569,15 +574,11 @@ extern int ttm_bo_init(struct ttm_bo_device *bdev, * -EINVAL: Invalid placement flags. * -ERESTARTSYS: Interrupted by signal while waiting for resources. */ - -extern int ttm_bo_create(struct ttm_bo_device *bdev, - unsigned long size, - enum ttm_bo_type type, - struct ttm_placement *placement, - uint32_t page_alignment, - bool interruptible, - struct file *persistent_swap_storage, - struct ttm_buffer_object **p_bo); +int ttm_bo_create(struct ttm_bo_device *bdev, unsigned long size, + enum ttm_bo_type type, struct ttm_placement *placement, + uint32_t page_alignment, bool interruptible, + struct file *persistent_swap_storage, + struct ttm_buffer_object **p_bo); /** * ttm_bo_init_mm @@ -594,9 +595,9 @@ extern int ttm_bo_create(struct ttm_bo_device *bdev, * -ENOMEM: Not enough memory. * May also return driver-specified errors. */ +int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, + unsigned long p_size); -extern int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, - unsigned long p_size); /** * ttm_bo_clean_mm * @@ -623,8 +624,7 @@ extern int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, * -EINVAL: invalid or uninitialized memory type. * -EBUSY: There are still buffers left in this memory type. */ - -extern int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type); +int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type); /** * ttm_bo_evict_mm @@ -644,8 +644,7 @@ extern int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type); * -ERESTARTSYS: The call was interrupted by a signal while waiting to * evict a buffer. */ - -extern int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type); +int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type); /** * ttm_kmap_obj_virtual @@ -658,7 +657,6 @@ extern int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type); * If *is_iomem is 1 on return, the virtual address points to an io memory area, * that should strictly be accessed by the iowriteXX() and similar functions. */ - static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map, bool *is_iomem) { @@ -682,9 +680,8 @@ static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map, * -ENOMEM: Out of memory. * -EINVAL: Invalid range. */ - -extern int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page, - unsigned long num_pages, struct ttm_bo_kmap_obj *map); +int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page, + unsigned long num_pages, struct ttm_bo_kmap_obj *map); /** * ttm_bo_kunmap @@ -693,8 +690,7 @@ extern int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page, * * Unmaps a kernel map set up by ttm_bo_kmap. */ - -extern void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map); +void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map); /** * ttm_fbdev_mmap - mmap fbdev memory backed by a ttm buffer object. @@ -706,20 +702,7 @@ extern void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map); * This function is intended to be called by the fbdev mmap method * if the fbdev address space is to be backed by a bo. */ - -extern int ttm_fbdev_mmap(struct vm_area_struct *vma, - struct ttm_buffer_object *bo); - -/** - * ttm_bo_default_iomem_pfn - get a pfn for a page offset - * - * @bo: the BO we need to look up the pfn for - * @page_offset: offset inside the BO to look up. - * - * Calculate the PFN for iomem based mappings during page fault - */ -unsigned long ttm_bo_default_io_mem_pfn(struct ttm_buffer_object *bo, - unsigned long page_offset); +int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo); /** * ttm_bo_mmap - mmap out of the ttm device address space. @@ -731,9 +714,8 @@ unsigned long ttm_bo_default_io_mem_pfn(struct ttm_buffer_object *bo, * This function is intended to be called by the device mmap method. * if the device address space is to be backed by the bo manager. */ - -extern int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, - struct ttm_bo_device *bdev); +int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, + struct ttm_bo_device *bdev); /** * ttm_bo_io @@ -755,11 +737,12 @@ extern int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, * the function may return -ERESTARTSYS if * interrupted by a signal. */ +ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp, + const char __user *wbuf, char __user *rbuf, + size_t count, loff_t *f_pos, bool write); -extern ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp, - const char __user *wbuf, char __user *rbuf, - size_t count, loff_t *f_pos, bool write); - -extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev); -extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo); +int ttm_bo_swapout(struct ttm_bo_global *glob, + struct ttm_operation_ctx *ctx); +void ttm_bo_swapout_all(struct ttm_bo_device *bdev); +int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo); #endif diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 5f821a9b3a1f..94064b126e8e 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -352,7 +352,8 @@ struct ttm_bo_driver { * Returns: * -ENOMEM: Out of memory. */ - int (*ttm_tt_populate)(struct ttm_tt *ttm); + int (*ttm_tt_populate)(struct ttm_tt *ttm, + struct ttm_operation_ctx *ctx); /** * ttm_tt_unpopulate @@ -409,15 +410,13 @@ struct ttm_bo_driver { * @bo: the buffer to move * @evict: whether this motion is evicting the buffer from * the graphics address space - * @interruptible: Use interruptible sleeps if possible when sleeping. - * @no_wait: whether this should give up and return -EBUSY - * if this move would require sleeping + * @ctx: context for this move with parameters * @new_mem: the new memory region receiving the buffer * * Move a buffer between two memory regions. */ int (*move)(struct ttm_buffer_object *bo, bool evict, - bool interruptible, bool no_wait_gpu, + struct ttm_operation_ctx *ctx, struct ttm_mem_reg *new_mem); /** @@ -524,7 +523,6 @@ struct ttm_bo_global { struct kobject kobj; struct ttm_mem_global *mem_glob; struct page *dummy_read_page; - struct ttm_mem_shrink shrink; struct mutex device_list_mutex; spinlock_t lru_lock; @@ -627,12 +625,12 @@ ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask) * Returns: * NULL: Out of memory. */ -extern int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev, - unsigned long size, uint32_t page_flags, - struct page *dummy_read_page); -extern int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev, - unsigned long size, uint32_t page_flags, - struct page *dummy_read_page); +int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev, + unsigned long size, uint32_t page_flags, + struct page *dummy_read_page); +int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev, + unsigned long size, uint32_t page_flags, + struct page *dummy_read_page); /** * ttm_tt_fini @@ -641,8 +639,8 @@ extern int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bde * * Free memory of ttm_tt structure */ -extern void ttm_tt_fini(struct ttm_tt *ttm); -extern void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma); +void ttm_tt_fini(struct ttm_tt *ttm); +void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma); /** * ttm_ttm_bind: @@ -652,7 +650,8 @@ extern void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma); * * Bind the pages of @ttm to an aperture location identified by @bo_mem */ -extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem); +int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem, + struct ttm_operation_ctx *ctx); /** * ttm_ttm_destroy: @@ -661,7 +660,7 @@ extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem); * * Unbind, unpopulate and destroy common struct ttm_tt. */ -extern void ttm_tt_destroy(struct ttm_tt *ttm); +void ttm_tt_destroy(struct ttm_tt *ttm); /** * ttm_ttm_unbind: @@ -670,7 +669,7 @@ extern void ttm_tt_destroy(struct ttm_tt *ttm); * * Unbind a struct ttm_tt. */ -extern void ttm_tt_unbind(struct ttm_tt *ttm); +void ttm_tt_unbind(struct ttm_tt *ttm); /** * ttm_tt_swapin: @@ -679,7 +678,7 @@ extern void ttm_tt_unbind(struct ttm_tt *ttm); * * Swap in a previously swap out ttm_tt. */ -extern int ttm_tt_swapin(struct ttm_tt *ttm); +int ttm_tt_swapin(struct ttm_tt *ttm); /** * ttm_tt_set_placement_caching: @@ -694,9 +693,8 @@ extern int ttm_tt_swapin(struct ttm_tt *ttm); * hit RAM. This function may be very costly as it involves global TLB * and cache flushes and potential page splitting / combining. */ -extern int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement); -extern int ttm_tt_swapout(struct ttm_tt *ttm, - struct file *persistent_swap_storage); +int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement); +int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage); /** * ttm_tt_unpopulate - free pages from a ttm @@ -705,7 +703,7 @@ extern int ttm_tt_swapout(struct ttm_tt *ttm, * * Calls the driver method to free all pages from a ttm */ -extern void ttm_tt_unpopulate(struct ttm_tt *ttm); +void ttm_tt_unpopulate(struct ttm_tt *ttm); /* * ttm_bo.c @@ -720,8 +718,7 @@ extern void ttm_tt_unpopulate(struct ttm_tt *ttm); * Returns true if the memory described by @mem is PCI memory, * false otherwise. */ -extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, - struct ttm_mem_reg *mem); +bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem); /** * ttm_bo_mem_space @@ -742,21 +739,19 @@ extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, * fragmentation or concurrent allocators. * -ERESTARTSYS: An interruptible sleep was interrupted by a signal. */ -extern int ttm_bo_mem_space(struct ttm_buffer_object *bo, - struct ttm_placement *placement, - struct ttm_mem_reg *mem, - bool interruptible, - bool no_wait_gpu); +int ttm_bo_mem_space(struct ttm_buffer_object *bo, + struct ttm_placement *placement, + struct ttm_mem_reg *mem, + struct ttm_operation_ctx *ctx); -extern void ttm_bo_mem_put(struct ttm_buffer_object *bo, +void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem); +void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem); -extern void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo, - struct ttm_mem_reg *mem); -extern void ttm_bo_global_release(struct drm_global_reference *ref); -extern int ttm_bo_global_init(struct drm_global_reference *ref); +void ttm_bo_global_release(struct drm_global_reference *ref); +int ttm_bo_global_init(struct drm_global_reference *ref); -extern int ttm_bo_device_release(struct ttm_bo_device *bdev); +int ttm_bo_device_release(struct ttm_bo_device *bdev); /** * ttm_bo_device_init @@ -773,18 +768,17 @@ extern int ttm_bo_device_release(struct ttm_bo_device *bdev); * Returns: * !0: Failure. */ -extern int ttm_bo_device_init(struct ttm_bo_device *bdev, - struct ttm_bo_global *glob, - struct ttm_bo_driver *driver, - struct address_space *mapping, - uint64_t file_page_offset, bool need_dma32); +int ttm_bo_device_init(struct ttm_bo_device *bdev, struct ttm_bo_global *glob, + struct ttm_bo_driver *driver, + struct address_space *mapping, + uint64_t file_page_offset, bool need_dma32); /** * ttm_bo_unmap_virtual * * @bo: tear down the virtual mappings for this BO */ -extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); +void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); /** * ttm_bo_unmap_virtual @@ -793,16 +787,15 @@ extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); * * The caller must take ttm_mem_io_lock before calling this function. */ -extern void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo); +void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo); -extern int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo); -extern void ttm_mem_io_free_vm(struct ttm_buffer_object *bo); -extern int ttm_mem_io_lock(struct ttm_mem_type_manager *man, - bool interruptible); -extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man); +int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo); +void ttm_mem_io_free_vm(struct ttm_buffer_object *bo); +int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible); +void ttm_mem_io_unlock(struct ttm_mem_type_manager *man); -extern void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo); -extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo); +void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo); +void ttm_bo_add_to_lru(struct ttm_buffer_object *bo); /** * __ttm_bo_reserve: @@ -836,14 +829,14 @@ static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo, if (WARN_ON(ticket)) return -EBUSY; - success = ww_mutex_trylock(&bo->resv->lock); + success = reservation_object_trylock(bo->resv); return success ? 0 : -EBUSY; } if (interruptible) - ret = ww_mutex_lock_interruptible(&bo->resv->lock, ticket); + ret = reservation_object_lock_interruptible(bo->resv, ticket); else - ret = ww_mutex_lock(&bo->resv->lock, ticket); + ret = reservation_object_lock(bo->resv, ticket); if (ret == -EINTR) return -ERESTARTSYS; return ret; @@ -941,18 +934,6 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, } /** - * __ttm_bo_unreserve - * @bo: A pointer to a struct ttm_buffer_object. - * - * Unreserve a previous reservation of @bo where the buffer object is - * already on lru lists. - */ -static inline void __ttm_bo_unreserve(struct ttm_buffer_object *bo) -{ - ww_mutex_unlock(&bo->resv->lock); -} - -/** * ttm_bo_unreserve * * @bo: A pointer to a struct ttm_buffer_object. @@ -966,20 +947,7 @@ static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo) ttm_bo_add_to_lru(bo); spin_unlock(&bo->glob->lru_lock); } - __ttm_bo_unreserve(bo); -} - -/** - * ttm_bo_unreserve_ticket - * @bo: A pointer to a struct ttm_buffer_object. - * @ticket: ww_acquire_ctx used for reserving - * - * Unreserve a previous reservation of @bo made with @ticket. - */ -static inline void ttm_bo_unreserve_ticket(struct ttm_buffer_object *bo, - struct ww_acquire_ctx *t) -{ - ttm_bo_unreserve(bo); + reservation_object_unlock(bo->resv); } /* @@ -1008,9 +976,9 @@ void ttm_mem_io_free(struct ttm_bo_device *bdev, * !0: Failure. */ -extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo, - bool interruptible, bool no_wait_gpu, - struct ttm_mem_reg *new_mem); +int ttm_bo_move_ttm(struct ttm_buffer_object *bo, + struct ttm_operation_ctx *ctx, + struct ttm_mem_reg *new_mem); /** * ttm_bo_move_memcpy @@ -1030,9 +998,9 @@ extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo, * !0: Failure. */ -extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, - bool interruptible, bool no_wait_gpu, - struct ttm_mem_reg *new_mem); +int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, + struct ttm_operation_ctx *ctx, + struct ttm_mem_reg *new_mem); /** * ttm_bo_free_old_node @@ -1041,7 +1009,7 @@ extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, * * Utility function to free an old placement after a successful move. */ -extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo); +void ttm_bo_free_old_node(struct ttm_buffer_object *bo); /** * ttm_bo_move_accel_cleanup. @@ -1058,10 +1026,9 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo); * destroyed when the move is complete. This will help pipeline * buffer moves. */ - -extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, - struct dma_fence *fence, bool evict, - struct ttm_mem_reg *new_mem); +int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, + struct dma_fence *fence, bool evict, + struct ttm_mem_reg *new_mem); /** * ttm_bo_pipeline_move. @@ -1087,7 +1054,7 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, * Utility function that returns the pgprot_t that should be used for * setting up a PTE with the caching model indicated by @c_state. */ -extern pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp); +pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp); extern const struct ttm_mem_type_manager_func ttm_bo_manager_func; @@ -1108,11 +1075,11 @@ extern const struct ttm_mem_type_manager_func ttm_bo_manager_func; * for TT memory. This function uses the linux agpgart interface to * bind and unbind memory backing a ttm_tt. */ -extern struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev, - struct agp_bridge_data *bridge, - unsigned long size, uint32_t page_flags, - struct page *dummy_read_page); -int ttm_agp_tt_populate(struct ttm_tt *ttm); +struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev, + struct agp_bridge_data *bridge, + unsigned long size, uint32_t page_flags, + struct page *dummy_read_page); +int ttm_agp_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx); void ttm_agp_tt_unpopulate(struct ttm_tt *ttm); #endif diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h index 2c1e3598effe..8936285b6543 100644 --- a/include/drm/ttm/ttm_memory.h +++ b/include/drm/ttm/ttm_memory.h @@ -35,20 +35,7 @@ #include <linux/errno.h> #include <linux/kobject.h> #include <linux/mm.h> - -/** - * struct ttm_mem_shrink - callback to shrink TTM memory usage. - * - * @do_shrink: The callback function. - * - * Arguments to the do_shrink functions are intended to be passed using - * inheritance. That is, the argument class derives from struct ttm_mem_shrink, - * and can be accessed using container_of(). - */ - -struct ttm_mem_shrink { - int (*do_shrink) (struct ttm_mem_shrink *); -}; +#include "ttm_bo_api.h" /** * struct ttm_mem_global - Global memory accounting structure. @@ -76,7 +63,7 @@ struct ttm_mem_shrink { struct ttm_mem_zone; struct ttm_mem_global { struct kobject kobj; - struct ttm_mem_shrink *shrink; + struct ttm_bo_global *bo_glob; struct workqueue_struct *swap_queue; struct work_struct work; spinlock_t lock; @@ -90,67 +77,15 @@ struct ttm_mem_global { #endif }; -/** - * ttm_mem_init_shrink - initialize a struct ttm_mem_shrink object - * - * @shrink: The object to initialize. - * @func: The callback function. - */ - -static inline void ttm_mem_init_shrink(struct ttm_mem_shrink *shrink, - int (*func) (struct ttm_mem_shrink *)) -{ - shrink->do_shrink = func; -} - -/** - * ttm_mem_register_shrink - register a struct ttm_mem_shrink object. - * - * @glob: The struct ttm_mem_global object to register with. - * @shrink: An initialized struct ttm_mem_shrink object to register. - * - * Returns: - * -EBUSY: There's already a callback registered. (May change). - */ - -static inline int ttm_mem_register_shrink(struct ttm_mem_global *glob, - struct ttm_mem_shrink *shrink) -{ - spin_lock(&glob->lock); - if (glob->shrink != NULL) { - spin_unlock(&glob->lock); - return -EBUSY; - } - glob->shrink = shrink; - spin_unlock(&glob->lock); - return 0; -} - -/** - * ttm_mem_unregister_shrink - unregister a struct ttm_mem_shrink object. - * - * @glob: The struct ttm_mem_global object to unregister from. - * @shrink: A previously registert struct ttm_mem_shrink object. - * - */ - -static inline void ttm_mem_unregister_shrink(struct ttm_mem_global *glob, - struct ttm_mem_shrink *shrink) -{ - spin_lock(&glob->lock); - BUG_ON(glob->shrink != shrink); - glob->shrink = NULL; - spin_unlock(&glob->lock); -} - extern int ttm_mem_global_init(struct ttm_mem_global *glob); extern void ttm_mem_global_release(struct ttm_mem_global *glob); extern int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, - bool no_wait, bool interruptible); + struct ttm_operation_ctx *ctx); extern void ttm_mem_global_free(struct ttm_mem_global *glob, uint64_t amount); extern int ttm_mem_global_alloc_page(struct ttm_mem_global *glob, - struct page *page, uint64_t size); + struct page *page, uint64_t size, + struct ttm_operation_ctx *ctx); extern void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page, uint64_t size); extern size_t ttm_round_pot(size_t size); diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h index 593811362a91..4d9b019d253c 100644 --- a/include/drm/ttm/ttm_page_alloc.h +++ b/include/drm/ttm/ttm_page_alloc.h @@ -47,7 +47,7 @@ void ttm_page_alloc_fini(void); * * Add backing pages to all of @ttm */ -int ttm_pool_populate(struct ttm_tt *ttm); +int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx); /** * ttm_pool_unpopulate: @@ -61,7 +61,8 @@ void ttm_pool_unpopulate(struct ttm_tt *ttm); /** * Populates and DMA maps pages to fullfil a ttm_dma_populate() request */ -int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt); +int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt, + struct ttm_operation_ctx *ctx); /** * Unpopulates and DMA unmaps pages as part of a @@ -89,7 +90,8 @@ void ttm_dma_page_alloc_fini(void); */ int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data); -int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev); +int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev, + struct ttm_operation_ctx *ctx); void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev); #else @@ -106,7 +108,8 @@ static inline int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data) return 0; } static inline int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, - struct device *dev) + struct device *dev, + struct ttm_operation_ctx *ctx) { return -ENOMEM; } diff --git a/include/dt-bindings/bus/ti-sysc.h b/include/dt-bindings/bus/ti-sysc.h new file mode 100644 index 000000000000..2c005376ac0e --- /dev/null +++ b/include/dt-bindings/bus/ti-sysc.h @@ -0,0 +1,22 @@ +/* TI sysc interconnect target module defines */ + +/* Generic sysc found on omap2 and later, also known as type1 */ +#define SYSC_OMAP2_CLOCKACTIVITY (3 << 8) +#define SYSC_OMAP2_EMUFREE (1 << 5) +#define SYSC_OMAP2_ENAWAKEUP (1 << 2) +#define SYSC_OMAP2_SOFTRESET (1 << 1) +#define SYSC_OMAP2_AUTOIDLE (1 << 0) + +/* Generic sysc found on omap4 and later, also known as type2 */ +#define SYSC_OMAP4_DMADISABLE (1 << 16) +#define SYSC_OMAP4_FREEEMU (1 << 1) /* Also known as EMUFREE */ +#define SYSC_OMAP4_SOFTRESET (1 << 0) + +/* SmartReflex sysc found on 36xx and later */ +#define SYSC_OMAP3_SR_ENAWAKEUP (1 << 26) + +/* SYSCONFIG STANDBYMODE/MIDLEMODE/SIDLEMODE supported by hardware */ +#define SYSC_IDLE_FORCE 0 +#define SYSC_IDLE_NO 1 +#define SYSC_IDLE_SMART 2 +#define SYSC_IDLE_SMART_WKUP 3 diff --git a/include/dt-bindings/clock/am3.h b/include/dt-bindings/clock/am3.h new file mode 100644 index 000000000000..b396f00e481d --- /dev/null +++ b/include/dt-bindings/clock/am3.h @@ -0,0 +1,108 @@ +/* + * Copyright 2017 Texas Instruments, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef __DT_BINDINGS_CLK_AM3_H +#define __DT_BINDINGS_CLK_AM3_H + +#define AM3_CLKCTRL_OFFSET 0x0 +#define AM3_CLKCTRL_INDEX(offset) ((offset) - AM3_CLKCTRL_OFFSET) + +/* l4_per clocks */ +#define AM3_L4_PER_CLKCTRL_OFFSET 0x14 +#define AM3_L4_PER_CLKCTRL_INDEX(offset) ((offset) - AM3_L4_PER_CLKCTRL_OFFSET) +#define AM3_CPGMAC0_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x14) +#define AM3_LCDC_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x18) +#define AM3_USB_OTG_HS_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x1c) +#define AM3_TPTC0_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x24) +#define AM3_EMIF_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x28) +#define AM3_OCMCRAM_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x2c) +#define AM3_GPMC_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x30) +#define AM3_MCASP0_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x34) +#define AM3_UART6_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x38) +#define AM3_MMC1_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x3c) +#define AM3_ELM_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x40) +#define AM3_I2C3_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x44) +#define AM3_I2C2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x48) +#define AM3_SPI0_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x4c) +#define AM3_SPI1_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x50) +#define AM3_L4_LS_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x60) +#define AM3_MCASP1_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x68) +#define AM3_UART2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x6c) +#define AM3_UART3_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x70) +#define AM3_UART4_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x74) +#define AM3_UART5_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x78) +#define AM3_TIMER7_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x7c) +#define AM3_TIMER2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x80) +#define AM3_TIMER3_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x84) +#define AM3_TIMER4_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x88) +#define AM3_RNG_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x90) +#define AM3_AES_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x94) +#define AM3_SHAM_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xa0) +#define AM3_GPIO2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xac) +#define AM3_GPIO3_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xb0) +#define AM3_GPIO4_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xb4) +#define AM3_TPCC_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xbc) +#define AM3_D_CAN0_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xc0) +#define AM3_D_CAN1_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xc4) +#define AM3_EPWMSS1_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xcc) +#define AM3_EPWMSS0_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xd4) +#define AM3_EPWMSS2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xd8) +#define AM3_L3_INSTR_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xdc) +#define AM3_L3_MAIN_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xe0) +#define AM3_PRUSS_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xe8) +#define AM3_TIMER5_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xec) +#define AM3_TIMER6_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xf0) +#define AM3_MMC2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xf4) +#define AM3_MMC3_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xf8) +#define AM3_TPTC1_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xfc) +#define AM3_TPTC2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x100) +#define AM3_SPINLOCK_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x10c) +#define AM3_MAILBOX_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x110) +#define AM3_L4_HS_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x120) +#define AM3_OCPWP_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x130) +#define AM3_CLKDIV32K_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x14c) + +/* l4_wkup clocks */ +#define AM3_L4_WKUP_CLKCTRL_OFFSET 0x4 +#define AM3_L4_WKUP_CLKCTRL_INDEX(offset) ((offset) - AM3_L4_WKUP_CLKCTRL_OFFSET) +#define AM3_CONTROL_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0x4) +#define AM3_GPIO1_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0x8) +#define AM3_L4_WKUP_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xc) +#define AM3_DEBUGSS_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0x14) +#define AM3_WKUP_M3_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xb0) +#define AM3_UART1_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xb4) +#define AM3_I2C1_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xb8) +#define AM3_ADC_TSC_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xbc) +#define AM3_SMARTREFLEX0_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xc0) +#define AM3_TIMER1_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xc4) +#define AM3_SMARTREFLEX1_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xc8) +#define AM3_WD_TIMER2_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xd4) + +/* mpu clocks */ +#define AM3_MPU_CLKCTRL_OFFSET 0x4 +#define AM3_MPU_CLKCTRL_INDEX(offset) ((offset) - AM3_MPU_CLKCTRL_OFFSET) +#define AM3_MPU_CLKCTRL AM3_MPU_CLKCTRL_INDEX(0x4) + +/* l4_rtc clocks */ +#define AM3_RTC_CLKCTRL AM3_CLKCTRL_INDEX(0x0) + +/* gfx_l3 clocks */ +#define AM3_GFX_L3_CLKCTRL_OFFSET 0x4 +#define AM3_GFX_L3_CLKCTRL_INDEX(offset) ((offset) - AM3_GFX_L3_CLKCTRL_OFFSET) +#define AM3_GFX_CLKCTRL AM3_GFX_L3_CLKCTRL_INDEX(0x4) + +/* l4_cefuse clocks */ +#define AM3_L4_CEFUSE_CLKCTRL_OFFSET 0x20 +#define AM3_L4_CEFUSE_CLKCTRL_INDEX(offset) ((offset) - AM3_L4_CEFUSE_CLKCTRL_OFFSET) +#define AM3_CEFUSE_CLKCTRL AM3_L4_CEFUSE_CLKCTRL_INDEX(0x20) + +#endif diff --git a/include/dt-bindings/clock/am4.h b/include/dt-bindings/clock/am4.h new file mode 100644 index 000000000000..d21df00b3270 --- /dev/null +++ b/include/dt-bindings/clock/am4.h @@ -0,0 +1,113 @@ +/* + * Copyright 2017 Texas Instruments, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef __DT_BINDINGS_CLK_AM4_H +#define __DT_BINDINGS_CLK_AM4_H + +#define AM4_CLKCTRL_OFFSET 0x20 +#define AM4_CLKCTRL_INDEX(offset) ((offset) - AM4_CLKCTRL_OFFSET) + +/* l4_wkup clocks */ +#define AM4_ADC_TSC_CLKCTRL AM4_CLKCTRL_INDEX(0x120) +#define AM4_L4_WKUP_CLKCTRL AM4_CLKCTRL_INDEX(0x220) +#define AM4_WKUP_M3_CLKCTRL AM4_CLKCTRL_INDEX(0x228) +#define AM4_COUNTER_32K_CLKCTRL AM4_CLKCTRL_INDEX(0x230) +#define AM4_TIMER1_CLKCTRL AM4_CLKCTRL_INDEX(0x328) +#define AM4_WD_TIMER2_CLKCTRL AM4_CLKCTRL_INDEX(0x338) +#define AM4_I2C1_CLKCTRL AM4_CLKCTRL_INDEX(0x340) +#define AM4_UART1_CLKCTRL AM4_CLKCTRL_INDEX(0x348) +#define AM4_SMARTREFLEX0_CLKCTRL AM4_CLKCTRL_INDEX(0x350) +#define AM4_SMARTREFLEX1_CLKCTRL AM4_CLKCTRL_INDEX(0x358) +#define AM4_CONTROL_CLKCTRL AM4_CLKCTRL_INDEX(0x360) +#define AM4_GPIO1_CLKCTRL AM4_CLKCTRL_INDEX(0x368) + +/* mpu clocks */ +#define AM4_MPU_CLKCTRL AM4_CLKCTRL_INDEX(0x20) + +/* gfx_l3 clocks */ +#define AM4_GFX_CLKCTRL AM4_CLKCTRL_INDEX(0x20) + +/* l4_rtc clocks */ +#define AM4_RTC_CLKCTRL AM4_CLKCTRL_INDEX(0x20) + +/* l4_per clocks */ +#define AM4_L3_MAIN_CLKCTRL AM4_CLKCTRL_INDEX(0x20) +#define AM4_AES_CLKCTRL AM4_CLKCTRL_INDEX(0x28) +#define AM4_DES_CLKCTRL AM4_CLKCTRL_INDEX(0x30) +#define AM4_L3_INSTR_CLKCTRL AM4_CLKCTRL_INDEX(0x40) +#define AM4_OCMCRAM_CLKCTRL AM4_CLKCTRL_INDEX(0x50) +#define AM4_SHAM_CLKCTRL AM4_CLKCTRL_INDEX(0x58) +#define AM4_VPFE0_CLKCTRL AM4_CLKCTRL_INDEX(0x68) +#define AM4_VPFE1_CLKCTRL AM4_CLKCTRL_INDEX(0x70) +#define AM4_TPCC_CLKCTRL AM4_CLKCTRL_INDEX(0x78) +#define AM4_TPTC0_CLKCTRL AM4_CLKCTRL_INDEX(0x80) +#define AM4_TPTC1_CLKCTRL AM4_CLKCTRL_INDEX(0x88) +#define AM4_TPTC2_CLKCTRL AM4_CLKCTRL_INDEX(0x90) +#define AM4_L4_HS_CLKCTRL AM4_CLKCTRL_INDEX(0xa0) +#define AM4_GPMC_CLKCTRL AM4_CLKCTRL_INDEX(0x220) +#define AM4_MCASP0_CLKCTRL AM4_CLKCTRL_INDEX(0x238) +#define AM4_MCASP1_CLKCTRL AM4_CLKCTRL_INDEX(0x240) +#define AM4_MMC3_CLKCTRL AM4_CLKCTRL_INDEX(0x248) +#define AM4_QSPI_CLKCTRL AM4_CLKCTRL_INDEX(0x258) +#define AM4_USB_OTG_SS0_CLKCTRL AM4_CLKCTRL_INDEX(0x260) +#define AM4_USB_OTG_SS1_CLKCTRL AM4_CLKCTRL_INDEX(0x268) +#define AM4_PRUSS_CLKCTRL AM4_CLKCTRL_INDEX(0x320) +#define AM4_L4_LS_CLKCTRL AM4_CLKCTRL_INDEX(0x420) +#define AM4_D_CAN0_CLKCTRL AM4_CLKCTRL_INDEX(0x428) +#define AM4_D_CAN1_CLKCTRL AM4_CLKCTRL_INDEX(0x430) +#define AM4_EPWMSS0_CLKCTRL AM4_CLKCTRL_INDEX(0x438) +#define AM4_EPWMSS1_CLKCTRL AM4_CLKCTRL_INDEX(0x440) +#define AM4_EPWMSS2_CLKCTRL AM4_CLKCTRL_INDEX(0x448) +#define AM4_EPWMSS3_CLKCTRL AM4_CLKCTRL_INDEX(0x450) +#define AM4_EPWMSS4_CLKCTRL AM4_CLKCTRL_INDEX(0x458) +#define AM4_EPWMSS5_CLKCTRL AM4_CLKCTRL_INDEX(0x460) +#define AM4_ELM_CLKCTRL AM4_CLKCTRL_INDEX(0x468) +#define AM4_GPIO2_CLKCTRL AM4_CLKCTRL_INDEX(0x478) +#define AM4_GPIO3_CLKCTRL AM4_CLKCTRL_INDEX(0x480) +#define AM4_GPIO4_CLKCTRL AM4_CLKCTRL_INDEX(0x488) +#define AM4_GPIO5_CLKCTRL AM4_CLKCTRL_INDEX(0x490) +#define AM4_GPIO6_CLKCTRL AM4_CLKCTRL_INDEX(0x498) +#define AM4_HDQ1W_CLKCTRL AM4_CLKCTRL_INDEX(0x4a0) +#define AM4_I2C2_CLKCTRL AM4_CLKCTRL_INDEX(0x4a8) +#define AM4_I2C3_CLKCTRL AM4_CLKCTRL_INDEX(0x4b0) +#define AM4_MAILBOX_CLKCTRL AM4_CLKCTRL_INDEX(0x4b8) +#define AM4_MMC1_CLKCTRL AM4_CLKCTRL_INDEX(0x4c0) +#define AM4_MMC2_CLKCTRL AM4_CLKCTRL_INDEX(0x4c8) +#define AM4_RNG_CLKCTRL AM4_CLKCTRL_INDEX(0x4e0) +#define AM4_SPI0_CLKCTRL AM4_CLKCTRL_INDEX(0x500) +#define AM4_SPI1_CLKCTRL AM4_CLKCTRL_INDEX(0x508) +#define AM4_SPI2_CLKCTRL AM4_CLKCTRL_INDEX(0x510) +#define AM4_SPI3_CLKCTRL AM4_CLKCTRL_INDEX(0x518) +#define AM4_SPI4_CLKCTRL AM4_CLKCTRL_INDEX(0x520) +#define AM4_SPINLOCK_CLKCTRL AM4_CLKCTRL_INDEX(0x528) +#define AM4_TIMER2_CLKCTRL AM4_CLKCTRL_INDEX(0x530) +#define AM4_TIMER3_CLKCTRL AM4_CLKCTRL_INDEX(0x538) +#define AM4_TIMER4_CLKCTRL AM4_CLKCTRL_INDEX(0x540) +#define AM4_TIMER5_CLKCTRL AM4_CLKCTRL_INDEX(0x548) +#define AM4_TIMER6_CLKCTRL AM4_CLKCTRL_INDEX(0x550) +#define AM4_TIMER7_CLKCTRL AM4_CLKCTRL_INDEX(0x558) +#define AM4_TIMER8_CLKCTRL AM4_CLKCTRL_INDEX(0x560) +#define AM4_TIMER9_CLKCTRL AM4_CLKCTRL_INDEX(0x568) +#define AM4_TIMER10_CLKCTRL AM4_CLKCTRL_INDEX(0x570) +#define AM4_TIMER11_CLKCTRL AM4_CLKCTRL_INDEX(0x578) +#define AM4_UART2_CLKCTRL AM4_CLKCTRL_INDEX(0x580) +#define AM4_UART3_CLKCTRL AM4_CLKCTRL_INDEX(0x588) +#define AM4_UART4_CLKCTRL AM4_CLKCTRL_INDEX(0x590) +#define AM4_UART5_CLKCTRL AM4_CLKCTRL_INDEX(0x598) +#define AM4_UART6_CLKCTRL AM4_CLKCTRL_INDEX(0x5a0) +#define AM4_OCP2SCP0_CLKCTRL AM4_CLKCTRL_INDEX(0x5b8) +#define AM4_OCP2SCP1_CLKCTRL AM4_CLKCTRL_INDEX(0x5c0) +#define AM4_EMIF_CLKCTRL AM4_CLKCTRL_INDEX(0x720) +#define AM4_DSS_CORE_CLKCTRL AM4_CLKCTRL_INDEX(0xa20) +#define AM4_CPGMAC0_CLKCTRL AM4_CLKCTRL_INDEX(0xb20) + +#endif diff --git a/include/dt-bindings/clock/aspeed-clock.h b/include/dt-bindings/clock/aspeed-clock.h new file mode 100644 index 000000000000..d3558d897a4d --- /dev/null +++ b/include/dt-bindings/clock/aspeed-clock.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */ + +#ifndef DT_BINDINGS_ASPEED_CLOCK_H +#define DT_BINDINGS_ASPEED_CLOCK_H + +#define ASPEED_CLK_GATE_ECLK 0 +#define ASPEED_CLK_GATE_GCLK 1 +#define ASPEED_CLK_GATE_MCLK 2 +#define ASPEED_CLK_GATE_VCLK 3 +#define ASPEED_CLK_GATE_BCLK 4 +#define ASPEED_CLK_GATE_DCLK 5 +#define ASPEED_CLK_GATE_REFCLK 6 +#define ASPEED_CLK_GATE_USBPORT2CLK 7 +#define ASPEED_CLK_GATE_LCLK 8 +#define ASPEED_CLK_GATE_USBUHCICLK 9 +#define ASPEED_CLK_GATE_D1CLK 10 +#define ASPEED_CLK_GATE_YCLK 11 +#define ASPEED_CLK_GATE_USBPORT1CLK 12 +#define ASPEED_CLK_GATE_UART1CLK 13 +#define ASPEED_CLK_GATE_UART2CLK 14 +#define ASPEED_CLK_GATE_UART5CLK 15 +#define ASPEED_CLK_GATE_ESPICLK 16 +#define ASPEED_CLK_GATE_MAC1CLK 17 +#define ASPEED_CLK_GATE_MAC2CLK 18 +#define ASPEED_CLK_GATE_RSACLK 19 +#define ASPEED_CLK_GATE_UART3CLK 20 +#define ASPEED_CLK_GATE_UART4CLK 21 +#define ASPEED_CLK_GATE_SDCLKCLK 22 +#define ASPEED_CLK_GATE_LHCCLK 23 +#define ASPEED_CLK_HPLL 24 +#define ASPEED_CLK_AHB 25 +#define ASPEED_CLK_APB 26 +#define ASPEED_CLK_UART 27 +#define ASPEED_CLK_SDIO 28 +#define ASPEED_CLK_ECLK 29 +#define ASPEED_CLK_ECLK_MUX 30 +#define ASPEED_CLK_LHCLK 31 +#define ASPEED_CLK_MAC 32 +#define ASPEED_CLK_BCLK 33 +#define ASPEED_CLK_MPLL 34 + +#define ASPEED_RESET_XDMA 0 +#define ASPEED_RESET_MCTP 1 +#define ASPEED_RESET_ADC 2 +#define ASPEED_RESET_JTAG_MASTER 3 +#define ASPEED_RESET_MIC 4 +#define ASPEED_RESET_PWM 5 +#define ASPEED_RESET_PCIVGA 6 +#define ASPEED_RESET_I2C 7 +#define ASPEED_RESET_AHB 8 + +#endif diff --git a/include/dt-bindings/clock/axg-clkc.h b/include/dt-bindings/clock/axg-clkc.h new file mode 100644 index 000000000000..941ac70e7f30 --- /dev/null +++ b/include/dt-bindings/clock/axg-clkc.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */ +/* + * Meson-AXG clock tree IDs + * + * Copyright (c) 2017 Amlogic, Inc. All rights reserved. + */ + +#ifndef __AXG_CLKC_H +#define __AXG_CLKC_H + +#define CLKID_SYS_PLL 0 +#define CLKID_FIXED_PLL 1 +#define CLKID_FCLK_DIV2 2 +#define CLKID_FCLK_DIV3 3 +#define CLKID_FCLK_DIV4 4 +#define CLKID_FCLK_DIV5 5 +#define CLKID_FCLK_DIV7 6 +#define CLKID_GP0_PLL 7 +#define CLKID_CLK81 10 +#define CLKID_MPLL0 11 +#define CLKID_MPLL1 12 +#define CLKID_MPLL2 13 +#define CLKID_MPLL3 14 +#define CLKID_DDR 15 +#define CLKID_AUDIO_LOCKER 16 +#define CLKID_MIPI_DSI_HOST 17 +#define CLKID_ISA 18 +#define CLKID_PL301 19 +#define CLKID_PERIPHS 20 +#define CLKID_SPICC0 21 +#define CLKID_I2C 22 +#define CLKID_RNG0 23 +#define CLKID_UART0 24 +#define CLKID_MIPI_DSI_PHY 25 +#define CLKID_SPICC1 26 +#define CLKID_PCIE_A 27 +#define CLKID_PCIE_B 28 +#define CLKID_HIU_IFACE 29 +#define CLKID_ASSIST_MISC 30 +#define CLKID_SD_EMMC_B 31 +#define CLKID_SD_EMMC_C 32 +#define CLKID_DMA 33 +#define CLKID_SPI 34 +#define CLKID_AUDIO 35 +#define CLKID_ETH 36 +#define CLKID_UART1 37 +#define CLKID_G2D 38 +#define CLKID_USB0 39 +#define CLKID_USB1 40 +#define CLKID_RESET 41 +#define CLKID_USB 42 +#define CLKID_AHB_ARB0 43 +#define CLKID_EFUSE 44 +#define CLKID_BOOT_ROM 45 +#define CLKID_AHB_DATA_BUS 46 +#define CLKID_AHB_CTRL_BUS 47 +#define CLKID_USB1_DDR_BRIDGE 48 +#define CLKID_USB0_DDR_BRIDGE 49 +#define CLKID_MMC_PCLK 50 +#define CLKID_VPU_INTR 51 +#define CLKID_SEC_AHB_AHB3_BRIDGE 52 +#define CLKID_GIC 53 +#define CLKID_AO_MEDIA_CPU 54 +#define CLKID_AO_AHB_SRAM 55 +#define CLKID_AO_AHB_BUS 56 +#define CLKID_AO_IFACE 57 +#define CLKID_AO_I2C 58 +#define CLKID_SD_EMMC_B_CLK0 59 +#define CLKID_SD_EMMC_C_CLK0 60 + +#endif /* __AXG_CLKC_H */ diff --git a/include/dt-bindings/clock/dm814.h b/include/dt-bindings/clock/dm814.h new file mode 100644 index 000000000000..0e7099a344e1 --- /dev/null +++ b/include/dt-bindings/clock/dm814.h @@ -0,0 +1,45 @@ +/* + * Copyright 2017 Texas Instruments, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef __DT_BINDINGS_CLK_DM814_H +#define __DT_BINDINGS_CLK_DM814_H + +#define DM814_CLKCTRL_OFFSET 0x0 +#define DM814_CLKCTRL_INDEX(offset) ((offset) - DM814_CLKCTRL_OFFSET) + +/* default clocks */ +#define DM814_USB_OTG_HS_CLKCTRL DM814_CLKCTRL_INDEX(0x58) + +/* alwon clocks */ +#define DM814_UART1_CLKCTRL DM814_CLKCTRL_INDEX(0x150) +#define DM814_UART2_CLKCTRL DM814_CLKCTRL_INDEX(0x154) +#define DM814_UART3_CLKCTRL DM814_CLKCTRL_INDEX(0x158) +#define DM814_GPIO1_CLKCTRL DM814_CLKCTRL_INDEX(0x15c) +#define DM814_GPIO2_CLKCTRL DM814_CLKCTRL_INDEX(0x160) +#define DM814_I2C1_CLKCTRL DM814_CLKCTRL_INDEX(0x164) +#define DM814_I2C2_CLKCTRL DM814_CLKCTRL_INDEX(0x168) +#define DM814_WD_TIMER_CLKCTRL DM814_CLKCTRL_INDEX(0x18c) +#define DM814_MCSPI1_CLKCTRL DM814_CLKCTRL_INDEX(0x190) +#define DM814_GPMC_CLKCTRL DM814_CLKCTRL_INDEX(0x1d0) +#define DM814_CPGMAC0_CLKCTRL DM814_CLKCTRL_INDEX(0x1d4) +#define DM814_MPU_CLKCTRL DM814_CLKCTRL_INDEX(0x1dc) +#define DM814_RTC_CLKCTRL DM814_CLKCTRL_INDEX(0x1f0) +#define DM814_TPCC_CLKCTRL DM814_CLKCTRL_INDEX(0x1f4) +#define DM814_TPTC0_CLKCTRL DM814_CLKCTRL_INDEX(0x1f8) +#define DM814_TPTC1_CLKCTRL DM814_CLKCTRL_INDEX(0x1fc) +#define DM814_TPTC2_CLKCTRL DM814_CLKCTRL_INDEX(0x200) +#define DM814_TPTC3_CLKCTRL DM814_CLKCTRL_INDEX(0x204) +#define DM814_MMC1_CLKCTRL DM814_CLKCTRL_INDEX(0x21c) +#define DM814_MMC2_CLKCTRL DM814_CLKCTRL_INDEX(0x220) +#define DM814_MMC3_CLKCTRL DM814_CLKCTRL_INDEX(0x224) + +#endif diff --git a/include/dt-bindings/clock/dm816.h b/include/dt-bindings/clock/dm816.h new file mode 100644 index 000000000000..69e8a36d783e --- /dev/null +++ b/include/dt-bindings/clock/dm816.h @@ -0,0 +1,53 @@ +/* + * Copyright 2017 Texas Instruments, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef __DT_BINDINGS_CLK_DM816_H +#define __DT_BINDINGS_CLK_DM816_H + +#define DM816_CLKCTRL_OFFSET 0x0 +#define DM816_CLKCTRL_INDEX(offset) ((offset) - DM816_CLKCTRL_OFFSET) + +/* default clocks */ +#define DM816_USB_OTG_HS_CLKCTRL DM816_CLKCTRL_INDEX(0x58) + +/* alwon clocks */ +#define DM816_UART1_CLKCTRL DM816_CLKCTRL_INDEX(0x150) +#define DM816_UART2_CLKCTRL DM816_CLKCTRL_INDEX(0x154) +#define DM816_UART3_CLKCTRL DM816_CLKCTRL_INDEX(0x158) +#define DM816_GPIO1_CLKCTRL DM816_CLKCTRL_INDEX(0x15c) +#define DM816_GPIO2_CLKCTRL DM816_CLKCTRL_INDEX(0x160) +#define DM816_I2C1_CLKCTRL DM816_CLKCTRL_INDEX(0x164) +#define DM816_I2C2_CLKCTRL DM816_CLKCTRL_INDEX(0x168) +#define DM816_TIMER1_CLKCTRL DM816_CLKCTRL_INDEX(0x170) +#define DM816_TIMER2_CLKCTRL DM816_CLKCTRL_INDEX(0x174) +#define DM816_TIMER3_CLKCTRL DM816_CLKCTRL_INDEX(0x178) +#define DM816_TIMER4_CLKCTRL DM816_CLKCTRL_INDEX(0x17c) +#define DM816_TIMER5_CLKCTRL DM816_CLKCTRL_INDEX(0x180) +#define DM816_TIMER6_CLKCTRL DM816_CLKCTRL_INDEX(0x184) +#define DM816_TIMER7_CLKCTRL DM816_CLKCTRL_INDEX(0x188) +#define DM816_WD_TIMER_CLKCTRL DM816_CLKCTRL_INDEX(0x18c) +#define DM816_MCSPI1_CLKCTRL DM816_CLKCTRL_INDEX(0x190) +#define DM816_MAILBOX_CLKCTRL DM816_CLKCTRL_INDEX(0x194) +#define DM816_SPINBOX_CLKCTRL DM816_CLKCTRL_INDEX(0x198) +#define DM816_MMC1_CLKCTRL DM816_CLKCTRL_INDEX(0x1b0) +#define DM816_GPMC_CLKCTRL DM816_CLKCTRL_INDEX(0x1d0) +#define DM816_DAVINCI_MDIO_CLKCTRL DM816_CLKCTRL_INDEX(0x1d4) +#define DM816_EMAC1_CLKCTRL DM816_CLKCTRL_INDEX(0x1d8) +#define DM816_MPU_CLKCTRL DM816_CLKCTRL_INDEX(0x1dc) +#define DM816_RTC_CLKCTRL DM816_CLKCTRL_INDEX(0x1f0) +#define DM816_TPCC_CLKCTRL DM816_CLKCTRL_INDEX(0x1f4) +#define DM816_TPTC0_CLKCTRL DM816_CLKCTRL_INDEX(0x1f8) +#define DM816_TPTC1_CLKCTRL DM816_CLKCTRL_INDEX(0x1fc) +#define DM816_TPTC2_CLKCTRL DM816_CLKCTRL_INDEX(0x200) +#define DM816_TPTC3_CLKCTRL DM816_CLKCTRL_INDEX(0x204) + +#endif diff --git a/include/dt-bindings/clock/dra7.h b/include/dt-bindings/clock/dra7.h new file mode 100644 index 000000000000..5e1061b15aed --- /dev/null +++ b/include/dt-bindings/clock/dra7.h @@ -0,0 +1,172 @@ +/* + * Copyright 2017 Texas Instruments, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef __DT_BINDINGS_CLK_DRA7_H +#define __DT_BINDINGS_CLK_DRA7_H + +#define DRA7_CLKCTRL_OFFSET 0x20 +#define DRA7_CLKCTRL_INDEX(offset) ((offset) - DRA7_CLKCTRL_OFFSET) + +/* mpu clocks */ +#define DRA7_MPU_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) + +/* ipu clocks */ +#define DRA7_IPU_CLKCTRL_OFFSET 0x40 +#define DRA7_IPU_CLKCTRL_INDEX(offset) ((offset) - DRA7_IPU_CLKCTRL_OFFSET) +#define DRA7_MCASP1_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x50) +#define DRA7_TIMER5_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x58) +#define DRA7_TIMER6_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x60) +#define DRA7_TIMER7_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x68) +#define DRA7_TIMER8_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x70) +#define DRA7_I2C5_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x78) +#define DRA7_UART6_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x80) + +/* rtc clocks */ +#define DRA7_RTC_CLKCTRL_OFFSET 0x40 +#define DRA7_RTC_CLKCTRL_INDEX(offset) ((offset) - DRA7_RTC_CLKCTRL_OFFSET) +#define DRA7_RTCSS_CLKCTRL DRA7_RTC_CLKCTRL_INDEX(0x44) + +/* coreaon clocks */ +#define DRA7_SMARTREFLEX_MPU_CLKCTRL DRA7_CLKCTRL_INDEX(0x28) +#define DRA7_SMARTREFLEX_CORE_CLKCTRL DRA7_CLKCTRL_INDEX(0x38) + +/* l3main1 clocks */ +#define DRA7_L3_MAIN_1_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) +#define DRA7_GPMC_CLKCTRL DRA7_CLKCTRL_INDEX(0x28) +#define DRA7_TPCC_CLKCTRL DRA7_CLKCTRL_INDEX(0x70) +#define DRA7_TPTC0_CLKCTRL DRA7_CLKCTRL_INDEX(0x78) +#define DRA7_TPTC1_CLKCTRL DRA7_CLKCTRL_INDEX(0x80) +#define DRA7_VCP1_CLKCTRL DRA7_CLKCTRL_INDEX(0x88) +#define DRA7_VCP2_CLKCTRL DRA7_CLKCTRL_INDEX(0x90) + +/* dma clocks */ +#define DRA7_DMA_SYSTEM_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) + +/* emif clocks */ +#define DRA7_DMM_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) + +/* atl clocks */ +#define DRA7_ATL_CLKCTRL_OFFSET 0x0 +#define DRA7_ATL_CLKCTRL_INDEX(offset) ((offset) - DRA7_ATL_CLKCTRL_OFFSET) +#define DRA7_ATL_CLKCTRL DRA7_ATL_CLKCTRL_INDEX(0x0) + +/* l4cfg clocks */ +#define DRA7_L4_CFG_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) +#define DRA7_SPINLOCK_CLKCTRL DRA7_CLKCTRL_INDEX(0x28) +#define DRA7_MAILBOX1_CLKCTRL DRA7_CLKCTRL_INDEX(0x30) +#define DRA7_MAILBOX2_CLKCTRL DRA7_CLKCTRL_INDEX(0x48) +#define DRA7_MAILBOX3_CLKCTRL DRA7_CLKCTRL_INDEX(0x50) +#define DRA7_MAILBOX4_CLKCTRL DRA7_CLKCTRL_INDEX(0x58) +#define DRA7_MAILBOX5_CLKCTRL DRA7_CLKCTRL_INDEX(0x60) +#define DRA7_MAILBOX6_CLKCTRL DRA7_CLKCTRL_INDEX(0x68) +#define DRA7_MAILBOX7_CLKCTRL DRA7_CLKCTRL_INDEX(0x70) +#define DRA7_MAILBOX8_CLKCTRL DRA7_CLKCTRL_INDEX(0x78) +#define DRA7_MAILBOX9_CLKCTRL DRA7_CLKCTRL_INDEX(0x80) +#define DRA7_MAILBOX10_CLKCTRL DRA7_CLKCTRL_INDEX(0x88) +#define DRA7_MAILBOX11_CLKCTRL DRA7_CLKCTRL_INDEX(0x90) +#define DRA7_MAILBOX12_CLKCTRL DRA7_CLKCTRL_INDEX(0x98) +#define DRA7_MAILBOX13_CLKCTRL DRA7_CLKCTRL_INDEX(0xa0) + +/* l3instr clocks */ +#define DRA7_L3_MAIN_2_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) +#define DRA7_L3_INSTR_CLKCTRL DRA7_CLKCTRL_INDEX(0x28) + +/* dss clocks */ +#define DRA7_DSS_CORE_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) +#define DRA7_BB2D_CLKCTRL DRA7_CLKCTRL_INDEX(0x30) + +/* l3init clocks */ +#define DRA7_MMC1_CLKCTRL DRA7_CLKCTRL_INDEX(0x28) +#define DRA7_MMC2_CLKCTRL DRA7_CLKCTRL_INDEX(0x30) +#define DRA7_USB_OTG_SS2_CLKCTRL DRA7_CLKCTRL_INDEX(0x40) +#define DRA7_USB_OTG_SS3_CLKCTRL DRA7_CLKCTRL_INDEX(0x48) +#define DRA7_USB_OTG_SS4_CLKCTRL DRA7_CLKCTRL_INDEX(0x50) +#define DRA7_SATA_CLKCTRL DRA7_CLKCTRL_INDEX(0x88) +#define DRA7_PCIE1_CLKCTRL DRA7_CLKCTRL_INDEX(0xb0) +#define DRA7_PCIE2_CLKCTRL DRA7_CLKCTRL_INDEX(0xb8) +#define DRA7_GMAC_CLKCTRL DRA7_CLKCTRL_INDEX(0xd0) +#define DRA7_OCP2SCP1_CLKCTRL DRA7_CLKCTRL_INDEX(0xe0) +#define DRA7_OCP2SCP3_CLKCTRL DRA7_CLKCTRL_INDEX(0xe8) +#define DRA7_USB_OTG_SS1_CLKCTRL DRA7_CLKCTRL_INDEX(0xf0) + +/* l4per clocks */ +#define DRA7_L4PER_CLKCTRL_OFFSET 0x0 +#define DRA7_L4PER_CLKCTRL_INDEX(offset) ((offset) - DRA7_L4PER_CLKCTRL_OFFSET) +#define DRA7_L4_PER2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xc) +#define DRA7_L4_PER3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x14) +#define DRA7_TIMER10_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x28) +#define DRA7_TIMER11_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x30) +#define DRA7_TIMER2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x38) +#define DRA7_TIMER3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x40) +#define DRA7_TIMER4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x48) +#define DRA7_TIMER9_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x50) +#define DRA7_ELM_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x58) +#define DRA7_GPIO2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x60) +#define DRA7_GPIO3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x68) +#define DRA7_GPIO4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x70) +#define DRA7_GPIO5_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x78) +#define DRA7_GPIO6_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x80) +#define DRA7_HDQ1W_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x88) +#define DRA7_EPWMSS1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x90) +#define DRA7_EPWMSS2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x98) +#define DRA7_I2C1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xa0) +#define DRA7_I2C2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xa8) +#define DRA7_I2C3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xb0) +#define DRA7_I2C4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xb8) +#define DRA7_L4_PER1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xc0) +#define DRA7_EPWMSS0_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xc4) +#define DRA7_TIMER13_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xc8) +#define DRA7_TIMER14_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xd0) +#define DRA7_TIMER15_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xd8) +#define DRA7_MCSPI1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xf0) +#define DRA7_MCSPI2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xf8) +#define DRA7_MCSPI3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x100) +#define DRA7_MCSPI4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x108) +#define DRA7_GPIO7_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x110) +#define DRA7_GPIO8_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x118) +#define DRA7_MMC3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x120) +#define DRA7_MMC4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x128) +#define DRA7_TIMER16_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x130) +#define DRA7_QSPI_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x138) +#define DRA7_UART1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x140) +#define DRA7_UART2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x148) +#define DRA7_UART3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x150) +#define DRA7_UART4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x158) +#define DRA7_MCASP2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x160) +#define DRA7_MCASP3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x168) +#define DRA7_UART5_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x170) +#define DRA7_MCASP5_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x178) +#define DRA7_MCASP8_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x190) +#define DRA7_MCASP4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x198) +#define DRA7_AES1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1a0) +#define DRA7_AES2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1a8) +#define DRA7_DES_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1b0) +#define DRA7_RNG_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1c0) +#define DRA7_SHAM_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1c8) +#define DRA7_UART7_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1d0) +#define DRA7_UART8_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1e0) +#define DRA7_UART9_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1e8) +#define DRA7_DCAN2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1f0) +#define DRA7_MCASP6_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x204) +#define DRA7_MCASP7_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x208) + +/* wkupaon clocks */ +#define DRA7_L4_WKUP_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) +#define DRA7_WD_TIMER2_CLKCTRL DRA7_CLKCTRL_INDEX(0x30) +#define DRA7_GPIO1_CLKCTRL DRA7_CLKCTRL_INDEX(0x38) +#define DRA7_TIMER1_CLKCTRL DRA7_CLKCTRL_INDEX(0x40) +#define DRA7_TIMER12_CLKCTRL DRA7_CLKCTRL_INDEX(0x48) +#define DRA7_COUNTER_32K_CLKCTRL DRA7_CLKCTRL_INDEX(0x50) +#define DRA7_UART10_CLKCTRL DRA7_CLKCTRL_INDEX(0x80) +#define DRA7_DCAN1_CLKCTRL DRA7_CLKCTRL_INDEX(0x88) + +#endif diff --git a/include/dt-bindings/clock/hi3660-clock.h b/include/dt-bindings/clock/hi3660-clock.h index adb768d447a5..75d583eb84dd 100644 --- a/include/dt-bindings/clock/hi3660-clock.h +++ b/include/dt-bindings/clock/hi3660-clock.h @@ -208,4 +208,11 @@ #define HI3660_CLK_I2C6_IOMCU 3 #define HI3660_CLK_IOMCU_PERI0 4 +/* clk in stub clock */ +#define HI3660_CLK_STUB_CLUSTER0 0 +#define HI3660_CLK_STUB_CLUSTER1 1 +#define HI3660_CLK_STUB_GPU 2 +#define HI3660_CLK_STUB_DDR 3 +#define HI3660_CLK_STUB_NUM 4 + #endif /* __DTS_HI3660_CLOCK_H */ diff --git a/include/dt-bindings/clock/jz4770-cgu.h b/include/dt-bindings/clock/jz4770-cgu.h new file mode 100644 index 000000000000..d68a7695a1f8 --- /dev/null +++ b/include/dt-bindings/clock/jz4770-cgu.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides clock numbers for the ingenic,jz4770-cgu DT binding. + */ + +#ifndef __DT_BINDINGS_CLOCK_JZ4770_CGU_H__ +#define __DT_BINDINGS_CLOCK_JZ4770_CGU_H__ + +#define JZ4770_CLK_EXT 0 +#define JZ4770_CLK_OSC32K 1 +#define JZ4770_CLK_PLL0 2 +#define JZ4770_CLK_PLL1 3 +#define JZ4770_CLK_CCLK 4 +#define JZ4770_CLK_H0CLK 5 +#define JZ4770_CLK_H1CLK 6 +#define JZ4770_CLK_H2CLK 7 +#define JZ4770_CLK_C1CLK 8 +#define JZ4770_CLK_PCLK 9 +#define JZ4770_CLK_MMC0_MUX 10 +#define JZ4770_CLK_MMC0 11 +#define JZ4770_CLK_MMC1_MUX 12 +#define JZ4770_CLK_MMC1 13 +#define JZ4770_CLK_MMC2_MUX 14 +#define JZ4770_CLK_MMC2 15 +#define JZ4770_CLK_CIM 16 +#define JZ4770_CLK_UHC 17 +#define JZ4770_CLK_GPU 18 +#define JZ4770_CLK_BCH 19 +#define JZ4770_CLK_LPCLK_MUX 20 +#define JZ4770_CLK_GPS 21 +#define JZ4770_CLK_SSI_MUX 22 +#define JZ4770_CLK_PCM_MUX 23 +#define JZ4770_CLK_I2S 24 +#define JZ4770_CLK_OTG 25 +#define JZ4770_CLK_SSI0 26 +#define JZ4770_CLK_SSI1 27 +#define JZ4770_CLK_SSI2 28 +#define JZ4770_CLK_PCM0 29 +#define JZ4770_CLK_PCM1 30 +#define JZ4770_CLK_DMA 31 +#define JZ4770_CLK_I2C0 32 +#define JZ4770_CLK_I2C1 33 +#define JZ4770_CLK_I2C2 34 +#define JZ4770_CLK_UART0 35 +#define JZ4770_CLK_UART1 36 +#define JZ4770_CLK_UART2 37 +#define JZ4770_CLK_UART3 38 +#define JZ4770_CLK_IPU 39 +#define JZ4770_CLK_ADC 40 +#define JZ4770_CLK_AIC 41 +#define JZ4770_CLK_AUX 42 +#define JZ4770_CLK_VPU 43 +#define JZ4770_CLK_UHC_PHY 44 +#define JZ4770_CLK_OTG_PHY 45 +#define JZ4770_CLK_EXT512 46 +#define JZ4770_CLK_RTC 47 + +#endif /* __DT_BINDINGS_CLOCK_JZ4770_CGU_H__ */ diff --git a/include/dt-bindings/clock/omap5.h b/include/dt-bindings/clock/omap5.h new file mode 100644 index 000000000000..f51821a91216 --- /dev/null +++ b/include/dt-bindings/clock/omap5.h @@ -0,0 +1,118 @@ +/* + * Copyright 2017 Texas Instruments, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef __DT_BINDINGS_CLK_OMAP5_H +#define __DT_BINDINGS_CLK_OMAP5_H + +#define OMAP5_CLKCTRL_OFFSET 0x20 +#define OMAP5_CLKCTRL_INDEX(offset) ((offset) - OMAP5_CLKCTRL_OFFSET) + +/* mpu clocks */ +#define OMAP5_MPU_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20) + +/* dsp clocks */ +#define OMAP5_MMU_DSP_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20) + +/* abe clocks */ +#define OMAP5_L4_ABE_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20) +#define OMAP5_MCPDM_CLKCTRL OMAP5_CLKCTRL_INDEX(0x30) +#define OMAP5_DMIC_CLKCTRL OMAP5_CLKCTRL_INDEX(0x38) +#define OMAP5_MCBSP1_CLKCTRL OMAP5_CLKCTRL_INDEX(0x48) +#define OMAP5_MCBSP2_CLKCTRL OMAP5_CLKCTRL_INDEX(0x50) +#define OMAP5_MCBSP3_CLKCTRL OMAP5_CLKCTRL_INDEX(0x58) +#define OMAP5_TIMER5_CLKCTRL OMAP5_CLKCTRL_INDEX(0x68) +#define OMAP5_TIMER6_CLKCTRL OMAP5_CLKCTRL_INDEX(0x70) +#define OMAP5_TIMER7_CLKCTRL OMAP5_CLKCTRL_INDEX(0x78) +#define OMAP5_TIMER8_CLKCTRL OMAP5_CLKCTRL_INDEX(0x80) + +/* l3main1 clocks */ +#define OMAP5_L3_MAIN_1_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20) + +/* l3main2 clocks */ +#define OMAP5_L3_MAIN_2_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20) + +/* ipu clocks */ +#define OMAP5_MMU_IPU_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20) + +/* dma clocks */ +#define OMAP5_DMA_SYSTEM_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20) + +/* emif clocks */ +#define OMAP5_DMM_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20) +#define OMAP5_EMIF1_CLKCTRL OMAP5_CLKCTRL_INDEX(0x30) +#define OMAP5_EMIF2_CLKCTRL OMAP5_CLKCTRL_INDEX(0x38) + +/* l4cfg clocks */ +#define OMAP5_L4_CFG_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20) +#define OMAP5_SPINLOCK_CLKCTRL OMAP5_CLKCTRL_INDEX(0x28) +#define OMAP5_MAILBOX_CLKCTRL OMAP5_CLKCTRL_INDEX(0x30) + +/* l3instr clocks */ +#define OMAP5_L3_MAIN_3_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20) +#define OMAP5_L3_INSTR_CLKCTRL OMAP5_CLKCTRL_INDEX(0x28) + +/* l4per clocks */ +#define OMAP5_TIMER10_CLKCTRL OMAP5_CLKCTRL_INDEX(0x28) +#define OMAP5_TIMER11_CLKCTRL OMAP5_CLKCTRL_INDEX(0x30) +#define OMAP5_TIMER2_CLKCTRL OMAP5_CLKCTRL_INDEX(0x38) +#define OMAP5_TIMER3_CLKCTRL OMAP5_CLKCTRL_INDEX(0x40) +#define OMAP5_TIMER4_CLKCTRL OMAP5_CLKCTRL_INDEX(0x48) +#define OMAP5_TIMER9_CLKCTRL OMAP5_CLKCTRL_INDEX(0x50) +#define OMAP5_GPIO2_CLKCTRL OMAP5_CLKCTRL_INDEX(0x60) +#define OMAP5_GPIO3_CLKCTRL OMAP5_CLKCTRL_INDEX(0x68) +#define OMAP5_GPIO4_CLKCTRL OMAP5_CLKCTRL_INDEX(0x70) +#define OMAP5_GPIO5_CLKCTRL OMAP5_CLKCTRL_INDEX(0x78) +#define OMAP5_GPIO6_CLKCTRL OMAP5_CLKCTRL_INDEX(0x80) +#define OMAP5_I2C1_CLKCTRL OMAP5_CLKCTRL_INDEX(0xa0) +#define OMAP5_I2C2_CLKCTRL OMAP5_CLKCTRL_INDEX(0xa8) +#define OMAP5_I2C3_CLKCTRL OMAP5_CLKCTRL_INDEX(0xb0) +#define OMAP5_I2C4_CLKCTRL OMAP5_CLKCTRL_INDEX(0xb8) +#define OMAP5_L4_PER_CLKCTRL OMAP5_CLKCTRL_INDEX(0xc0) +#define OMAP5_MCSPI1_CLKCTRL OMAP5_CLKCTRL_INDEX(0xf0) +#define OMAP5_MCSPI2_CLKCTRL OMAP5_CLKCTRL_INDEX(0xf8) +#define OMAP5_MCSPI3_CLKCTRL OMAP5_CLKCTRL_INDEX(0x100) +#define OMAP5_MCSPI4_CLKCTRL OMAP5_CLKCTRL_INDEX(0x108) +#define OMAP5_GPIO7_CLKCTRL OMAP5_CLKCTRL_INDEX(0x110) +#define OMAP5_GPIO8_CLKCTRL OMAP5_CLKCTRL_INDEX(0x118) +#define OMAP5_MMC3_CLKCTRL OMAP5_CLKCTRL_INDEX(0x120) +#define OMAP5_MMC4_CLKCTRL OMAP5_CLKCTRL_INDEX(0x128) +#define OMAP5_UART1_CLKCTRL OMAP5_CLKCTRL_INDEX(0x140) +#define OMAP5_UART2_CLKCTRL OMAP5_CLKCTRL_INDEX(0x148) +#define OMAP5_UART3_CLKCTRL OMAP5_CLKCTRL_INDEX(0x150) +#define OMAP5_UART4_CLKCTRL OMAP5_CLKCTRL_INDEX(0x158) +#define OMAP5_MMC5_CLKCTRL OMAP5_CLKCTRL_INDEX(0x160) +#define OMAP5_I2C5_CLKCTRL OMAP5_CLKCTRL_INDEX(0x168) +#define OMAP5_UART5_CLKCTRL OMAP5_CLKCTRL_INDEX(0x170) +#define OMAP5_UART6_CLKCTRL OMAP5_CLKCTRL_INDEX(0x178) + +/* dss clocks */ +#define OMAP5_DSS_CORE_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20) + +/* l3init clocks */ +#define OMAP5_MMC1_CLKCTRL OMAP5_CLKCTRL_INDEX(0x28) +#define OMAP5_MMC2_CLKCTRL OMAP5_CLKCTRL_INDEX(0x30) +#define OMAP5_USB_HOST_HS_CLKCTRL OMAP5_CLKCTRL_INDEX(0x58) +#define OMAP5_USB_TLL_HS_CLKCTRL OMAP5_CLKCTRL_INDEX(0x68) +#define OMAP5_SATA_CLKCTRL OMAP5_CLKCTRL_INDEX(0x88) +#define OMAP5_OCP2SCP1_CLKCTRL OMAP5_CLKCTRL_INDEX(0xe0) +#define OMAP5_OCP2SCP3_CLKCTRL OMAP5_CLKCTRL_INDEX(0xe8) +#define OMAP5_USB_OTG_SS_CLKCTRL OMAP5_CLKCTRL_INDEX(0xf0) + +/* wkupaon clocks */ +#define OMAP5_L4_WKUP_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20) +#define OMAP5_WD_TIMER2_CLKCTRL OMAP5_CLKCTRL_INDEX(0x30) +#define OMAP5_GPIO1_CLKCTRL OMAP5_CLKCTRL_INDEX(0x38) +#define OMAP5_TIMER1_CLKCTRL OMAP5_CLKCTRL_INDEX(0x40) +#define OMAP5_COUNTER_32K_CLKCTRL OMAP5_CLKCTRL_INDEX(0x50) +#define OMAP5_KBD_CLKCTRL OMAP5_CLKCTRL_INDEX(0x78) + +#endif diff --git a/include/dt-bindings/clock/qcom,gcc-ipq8074.h b/include/dt-bindings/clock/qcom,gcc-ipq8074.h index 370c83c3bccc..238f872e52f4 100644 --- a/include/dt-bindings/clock/qcom,gcc-ipq8074.h +++ b/include/dt-bindings/clock/qcom,gcc-ipq8074.h @@ -58,6 +58,186 @@ #define GCC_QPIC_AHB_CLK 41 #define GCC_QPIC_CLK 42 #define PCNOC_BFDCD_CLK_SRC 43 +#define GPLL2_MAIN 44 +#define GPLL2 45 +#define GPLL4_MAIN 46 +#define GPLL4 47 +#define GPLL6_MAIN 48 +#define GPLL6 49 +#define UBI32_PLL_MAIN 50 +#define UBI32_PLL 51 +#define NSS_CRYPTO_PLL_MAIN 52 +#define NSS_CRYPTO_PLL 53 +#define PCIE0_AXI_CLK_SRC 54 +#define PCIE0_AUX_CLK_SRC 55 +#define PCIE0_PIPE_CLK_SRC 56 +#define PCIE1_AXI_CLK_SRC 57 +#define PCIE1_AUX_CLK_SRC 58 +#define PCIE1_PIPE_CLK_SRC 59 +#define SDCC1_APPS_CLK_SRC 60 +#define SDCC1_ICE_CORE_CLK_SRC 61 +#define SDCC2_APPS_CLK_SRC 62 +#define USB0_MASTER_CLK_SRC 63 +#define USB0_AUX_CLK_SRC 64 +#define USB0_MOCK_UTMI_CLK_SRC 65 +#define USB0_PIPE_CLK_SRC 66 +#define USB1_MASTER_CLK_SRC 67 +#define USB1_AUX_CLK_SRC 68 +#define USB1_MOCK_UTMI_CLK_SRC 69 +#define USB1_PIPE_CLK_SRC 70 +#define GCC_XO_CLK_SRC 71 +#define SYSTEM_NOC_BFDCD_CLK_SRC 72 +#define NSS_CE_CLK_SRC 73 +#define NSS_NOC_BFDCD_CLK_SRC 74 +#define NSS_CRYPTO_CLK_SRC 75 +#define NSS_UBI0_CLK_SRC 76 +#define NSS_UBI0_DIV_CLK_SRC 77 +#define NSS_UBI1_CLK_SRC 78 +#define NSS_UBI1_DIV_CLK_SRC 79 +#define UBI_MPT_CLK_SRC 80 +#define NSS_IMEM_CLK_SRC 81 +#define NSS_PPE_CLK_SRC 82 +#define NSS_PORT1_RX_CLK_SRC 83 +#define NSS_PORT1_RX_DIV_CLK_SRC 84 +#define NSS_PORT1_TX_CLK_SRC 85 +#define NSS_PORT1_TX_DIV_CLK_SRC 86 +#define NSS_PORT2_RX_CLK_SRC 87 +#define NSS_PORT2_RX_DIV_CLK_SRC 88 +#define NSS_PORT2_TX_CLK_SRC 89 +#define NSS_PORT2_TX_DIV_CLK_SRC 90 +#define NSS_PORT3_RX_CLK_SRC 91 +#define NSS_PORT3_RX_DIV_CLK_SRC 92 +#define NSS_PORT3_TX_CLK_SRC 93 +#define NSS_PORT3_TX_DIV_CLK_SRC 94 +#define NSS_PORT4_RX_CLK_SRC 95 +#define NSS_PORT4_RX_DIV_CLK_SRC 96 +#define NSS_PORT4_TX_CLK_SRC 97 +#define NSS_PORT4_TX_DIV_CLK_SRC 98 +#define NSS_PORT5_RX_CLK_SRC 99 +#define NSS_PORT5_RX_DIV_CLK_SRC 100 +#define NSS_PORT5_TX_CLK_SRC 101 +#define NSS_PORT5_TX_DIV_CLK_SRC 102 +#define NSS_PORT6_RX_CLK_SRC 103 +#define NSS_PORT6_RX_DIV_CLK_SRC 104 +#define NSS_PORT6_TX_CLK_SRC 105 +#define NSS_PORT6_TX_DIV_CLK_SRC 106 +#define CRYPTO_CLK_SRC 107 +#define GP1_CLK_SRC 108 +#define GP2_CLK_SRC 109 +#define GP3_CLK_SRC 110 +#define GCC_PCIE0_AHB_CLK 111 +#define GCC_PCIE0_AUX_CLK 112 +#define GCC_PCIE0_AXI_M_CLK 113 +#define GCC_PCIE0_AXI_S_CLK 114 +#define GCC_PCIE0_PIPE_CLK 115 +#define GCC_SYS_NOC_PCIE0_AXI_CLK 116 +#define GCC_PCIE1_AHB_CLK 117 +#define GCC_PCIE1_AUX_CLK 118 +#define GCC_PCIE1_AXI_M_CLK 119 +#define GCC_PCIE1_AXI_S_CLK 120 +#define GCC_PCIE1_PIPE_CLK 121 +#define GCC_SYS_NOC_PCIE1_AXI_CLK 122 +#define GCC_USB0_AUX_CLK 123 +#define GCC_SYS_NOC_USB0_AXI_CLK 124 +#define GCC_USB0_MASTER_CLK 125 +#define GCC_USB0_MOCK_UTMI_CLK 126 +#define GCC_USB0_PHY_CFG_AHB_CLK 127 +#define GCC_USB0_PIPE_CLK 128 +#define GCC_USB0_SLEEP_CLK 129 +#define GCC_USB1_AUX_CLK 130 +#define GCC_SYS_NOC_USB1_AXI_CLK 131 +#define GCC_USB1_MASTER_CLK 132 +#define GCC_USB1_MOCK_UTMI_CLK 133 +#define GCC_USB1_PHY_CFG_AHB_CLK 134 +#define GCC_USB1_PIPE_CLK 135 +#define GCC_USB1_SLEEP_CLK 136 +#define GCC_SDCC1_AHB_CLK 137 +#define GCC_SDCC1_APPS_CLK 138 +#define GCC_SDCC1_ICE_CORE_CLK 139 +#define GCC_SDCC2_AHB_CLK 140 +#define GCC_SDCC2_APPS_CLK 141 +#define GCC_MEM_NOC_NSS_AXI_CLK 142 +#define GCC_NSS_CE_APB_CLK 143 +#define GCC_NSS_CE_AXI_CLK 144 +#define GCC_NSS_CFG_CLK 145 +#define GCC_NSS_CRYPTO_CLK 146 +#define GCC_NSS_CSR_CLK 147 +#define GCC_NSS_EDMA_CFG_CLK 148 +#define GCC_NSS_EDMA_CLK 149 +#define GCC_NSS_IMEM_CLK 150 +#define GCC_NSS_NOC_CLK 151 +#define GCC_NSS_PPE_BTQ_CLK 152 +#define GCC_NSS_PPE_CFG_CLK 153 +#define GCC_NSS_PPE_CLK 154 +#define GCC_NSS_PPE_IPE_CLK 155 +#define GCC_NSS_PTP_REF_CLK 156 +#define GCC_NSSNOC_CE_APB_CLK 157 +#define GCC_NSSNOC_CE_AXI_CLK 158 +#define GCC_NSSNOC_CRYPTO_CLK 159 +#define GCC_NSSNOC_PPE_CFG_CLK 160 +#define GCC_NSSNOC_PPE_CLK 161 +#define GCC_NSSNOC_QOSGEN_REF_CLK 162 +#define GCC_NSSNOC_SNOC_CLK 163 +#define GCC_NSSNOC_TIMEOUT_REF_CLK 164 +#define GCC_NSSNOC_UBI0_AHB_CLK 165 +#define GCC_NSSNOC_UBI1_AHB_CLK 166 +#define GCC_UBI0_AHB_CLK 167 +#define GCC_UBI0_AXI_CLK 168 +#define GCC_UBI0_NC_AXI_CLK 169 +#define GCC_UBI0_CORE_CLK 170 +#define GCC_UBI0_MPT_CLK 171 +#define GCC_UBI1_AHB_CLK 172 +#define GCC_UBI1_AXI_CLK 173 +#define GCC_UBI1_NC_AXI_CLK 174 +#define GCC_UBI1_CORE_CLK 175 +#define GCC_UBI1_MPT_CLK 176 +#define GCC_CMN_12GPLL_AHB_CLK 177 +#define GCC_CMN_12GPLL_SYS_CLK 178 +#define GCC_MDIO_AHB_CLK 179 +#define GCC_UNIPHY0_AHB_CLK 180 +#define GCC_UNIPHY0_SYS_CLK 181 +#define GCC_UNIPHY1_AHB_CLK 182 +#define GCC_UNIPHY1_SYS_CLK 183 +#define GCC_UNIPHY2_AHB_CLK 184 +#define GCC_UNIPHY2_SYS_CLK 185 +#define GCC_NSS_PORT1_RX_CLK 186 +#define GCC_NSS_PORT1_TX_CLK 187 +#define GCC_NSS_PORT2_RX_CLK 188 +#define GCC_NSS_PORT2_TX_CLK 189 +#define GCC_NSS_PORT3_RX_CLK 190 +#define GCC_NSS_PORT3_TX_CLK 191 +#define GCC_NSS_PORT4_RX_CLK 192 +#define GCC_NSS_PORT4_TX_CLK 193 +#define GCC_NSS_PORT5_RX_CLK 194 +#define GCC_NSS_PORT5_TX_CLK 195 +#define GCC_NSS_PORT6_RX_CLK 196 +#define GCC_NSS_PORT6_TX_CLK 197 +#define GCC_PORT1_MAC_CLK 198 +#define GCC_PORT2_MAC_CLK 199 +#define GCC_PORT3_MAC_CLK 200 +#define GCC_PORT4_MAC_CLK 201 +#define GCC_PORT5_MAC_CLK 202 +#define GCC_PORT6_MAC_CLK 203 +#define GCC_UNIPHY0_PORT1_RX_CLK 204 +#define GCC_UNIPHY0_PORT1_TX_CLK 205 +#define GCC_UNIPHY0_PORT2_RX_CLK 206 +#define GCC_UNIPHY0_PORT2_TX_CLK 207 +#define GCC_UNIPHY0_PORT3_RX_CLK 208 +#define GCC_UNIPHY0_PORT3_TX_CLK 209 +#define GCC_UNIPHY0_PORT4_RX_CLK 210 +#define GCC_UNIPHY0_PORT4_TX_CLK 211 +#define GCC_UNIPHY0_PORT5_RX_CLK 212 +#define GCC_UNIPHY0_PORT5_TX_CLK 213 +#define GCC_UNIPHY1_PORT5_RX_CLK 214 +#define GCC_UNIPHY1_PORT5_TX_CLK 215 +#define GCC_UNIPHY2_PORT6_RX_CLK 216 +#define GCC_UNIPHY2_PORT6_TX_CLK 217 +#define GCC_CRYPTO_AHB_CLK 218 +#define GCC_CRYPTO_AXI_CLK 219 +#define GCC_CRYPTO_CLK 220 +#define GCC_GP1_CLK 221 +#define GCC_GP2_CLK 222 +#define GCC_GP3_CLK 223 #define GCC_BLSP1_BCR 0 #define GCC_BLSP1_QUP1_BCR 1 @@ -148,5 +328,47 @@ #define GCC_APC0_VOLTAGE_DROOP_DETECTOR_BCR 86 #define GCC_APC1_VOLTAGE_DROOP_DETECTOR_BCR 87 #define GCC_SMMU_CATS_BCR 88 +#define GCC_UBI0_AXI_ARES 89 +#define GCC_UBI0_AHB_ARES 90 +#define GCC_UBI0_NC_AXI_ARES 91 +#define GCC_UBI0_DBG_ARES 92 +#define GCC_UBI0_CORE_CLAMP_ENABLE 93 +#define GCC_UBI0_CLKRST_CLAMP_ENABLE 94 +#define GCC_UBI1_AXI_ARES 95 +#define GCC_UBI1_AHB_ARES 96 +#define GCC_UBI1_NC_AXI_ARES 97 +#define GCC_UBI1_DBG_ARES 98 +#define GCC_UBI1_CORE_CLAMP_ENABLE 99 +#define GCC_UBI1_CLKRST_CLAMP_ENABLE 100 +#define GCC_NSS_CFG_ARES 101 +#define GCC_NSS_IMEM_ARES 102 +#define GCC_NSS_NOC_ARES 103 +#define GCC_NSS_CRYPTO_ARES 104 +#define GCC_NSS_CSR_ARES 105 +#define GCC_NSS_CE_APB_ARES 106 +#define GCC_NSS_CE_AXI_ARES 107 +#define GCC_NSSNOC_CE_APB_ARES 108 +#define GCC_NSSNOC_CE_AXI_ARES 109 +#define GCC_NSSNOC_UBI0_AHB_ARES 110 +#define GCC_NSSNOC_UBI1_AHB_ARES 111 +#define GCC_NSSNOC_SNOC_ARES 112 +#define GCC_NSSNOC_CRYPTO_ARES 113 +#define GCC_NSSNOC_ATB_ARES 114 +#define GCC_NSSNOC_QOSGEN_REF_ARES 115 +#define GCC_NSSNOC_TIMEOUT_REF_ARES 116 +#define GCC_PCIE0_PIPE_ARES 117 +#define GCC_PCIE0_SLEEP_ARES 118 +#define GCC_PCIE0_CORE_STICKY_ARES 119 +#define GCC_PCIE0_AXI_MASTER_ARES 120 +#define GCC_PCIE0_AXI_SLAVE_ARES 121 +#define GCC_PCIE0_AHB_ARES 122 +#define GCC_PCIE0_AXI_MASTER_STICKY_ARES 123 +#define GCC_PCIE1_PIPE_ARES 124 +#define GCC_PCIE1_SLEEP_ARES 125 +#define GCC_PCIE1_CORE_STICKY_ARES 126 +#define GCC_PCIE1_AXI_MASTER_ARES 127 +#define GCC_PCIE1_AXI_SLAVE_ARES 128 +#define GCC_PCIE1_AHB_ARES 129 +#define GCC_PCIE1_AXI_MASTER_STICKY_ARES 130 #endif diff --git a/include/dt-bindings/clock/sprd,sc9860-clk.h b/include/dt-bindings/clock/sprd,sc9860-clk.h new file mode 100644 index 000000000000..4cb202f090c2 --- /dev/null +++ b/include/dt-bindings/clock/sprd,sc9860-clk.h @@ -0,0 +1,404 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +// +// Spreadtrum SC9860 platform clocks +// +// Copyright (C) 2017, Spreadtrum Communications Inc. + +#ifndef _DT_BINDINGS_CLK_SC9860_H_ +#define _DT_BINDINGS_CLK_SC9860_H_ + +#define CLK_FAC_4M 0 +#define CLK_FAC_2M 1 +#define CLK_FAC_1M 2 +#define CLK_FAC_250K 3 +#define CLK_FAC_RPLL0_26M 4 +#define CLK_FAC_RPLL1_26M 5 +#define CLK_FAC_RCO25M 6 +#define CLK_FAC_RCO4M 7 +#define CLK_FAC_RCO2M 8 +#define CLK_FAC_3K2 9 +#define CLK_FAC_1K 10 +#define CLK_MPLL0_GATE 11 +#define CLK_MPLL1_GATE 12 +#define CLK_DPLL0_GATE 13 +#define CLK_DPLL1_GATE 14 +#define CLK_LTEPLL0_GATE 15 +#define CLK_TWPLL_GATE 16 +#define CLK_LTEPLL1_GATE 17 +#define CLK_RPLL0_GATE 18 +#define CLK_RPLL1_GATE 19 +#define CLK_CPPLL_GATE 20 +#define CLK_GPLL_GATE 21 +#define CLK_PMU_GATE_NUM (CLK_GPLL_GATE + 1) + +#define CLK_MPLL0 0 +#define CLK_MPLL1 1 +#define CLK_DPLL0 2 +#define CLK_DPLL1 3 +#define CLK_RPLL0 4 +#define CLK_RPLL1 5 +#define CLK_TWPLL 6 +#define CLK_LTEPLL0 7 +#define CLK_LTEPLL1 8 +#define CLK_GPLL 9 +#define CLK_CPPLL 10 +#define CLK_GPLL_42M5 11 +#define CLK_TWPLL_768M 12 +#define CLK_TWPLL_384M 13 +#define CLK_TWPLL_192M 14 +#define CLK_TWPLL_96M 15 +#define CLK_TWPLL_48M 16 +#define CLK_TWPLL_24M 17 +#define CLK_TWPLL_12M 18 +#define CLK_TWPLL_512M 19 +#define CLK_TWPLL_256M 20 +#define CLK_TWPLL_128M 21 +#define CLK_TWPLL_64M 22 +#define CLK_TWPLL_307M2 23 +#define CLK_TWPLL_153M6 24 +#define CLK_TWPLL_76M8 25 +#define CLK_TWPLL_51M2 26 +#define CLK_TWPLL_38M4 27 +#define CLK_TWPLL_19M2 28 +#define CLK_L0_614M4 29 +#define CLK_L0_409M6 30 +#define CLK_L0_38M 31 +#define CLK_L1_38M 32 +#define CLK_RPLL0_192M 33 +#define CLK_RPLL0_96M 34 +#define CLK_RPLL0_48M 35 +#define CLK_RPLL1_468M 36 +#define CLK_RPLL1_192M 37 +#define CLK_RPLL1_96M 38 +#define CLK_RPLL1_64M 39 +#define CLK_RPLL1_48M 40 +#define CLK_DPLL0_50M 41 +#define CLK_DPLL1_50M 42 +#define CLK_CPPLL_50M 43 +#define CLK_M0_39M 44 +#define CLK_M1_63M 45 +#define CLK_PLL_NUM (CLK_M1_63M + 1) + + +#define CLK_AP_APB 0 +#define CLK_AP_USB3 1 +#define CLK_UART0 2 +#define CLK_UART1 3 +#define CLK_UART2 4 +#define CLK_UART3 5 +#define CLK_UART4 6 +#define CLK_I2C0 7 +#define CLK_I2C1 8 +#define CLK_I2C2 9 +#define CLK_I2C3 10 +#define CLK_I2C4 11 +#define CLK_I2C5 12 +#define CLK_SPI0 13 +#define CLK_SPI1 14 +#define CLK_SPI2 15 +#define CLK_SPI3 16 +#define CLK_IIS0 17 +#define CLK_IIS1 18 +#define CLK_IIS2 19 +#define CLK_IIS3 20 +#define CLK_AP_CLK_NUM (CLK_IIS3 + 1) + +#define CLK_AON_APB 0 +#define CLK_AUX0 1 +#define CLK_AUX1 2 +#define CLK_AUX2 3 +#define CLK_PROBE 4 +#define CLK_SP_AHB 5 +#define CLK_CCI 6 +#define CLK_GIC 7 +#define CLK_CSSYS 8 +#define CLK_SDIO0_2X 9 +#define CLK_SDIO1_2X 10 +#define CLK_SDIO2_2X 11 +#define CLK_EMMC_2X 12 +#define CLK_SDIO0_1X 13 +#define CLK_SDIO1_1X 14 +#define CLK_SDIO2_1X 15 +#define CLK_EMMC_1X 16 +#define CLK_ADI 17 +#define CLK_PWM0 18 +#define CLK_PWM1 19 +#define CLK_PWM2 20 +#define CLK_PWM3 21 +#define CLK_EFUSE 22 +#define CLK_CM3_UART0 23 +#define CLK_CM3_UART1 24 +#define CLK_THM 25 +#define CLK_CM3_I2C0 26 +#define CLK_CM3_I2C1 27 +#define CLK_CM4_SPI 28 +#define CLK_AON_I2C 29 +#define CLK_AVS 30 +#define CLK_CA53_DAP 31 +#define CLK_CA53_TS 32 +#define CLK_DJTAG_TCK 33 +#define CLK_PMU 34 +#define CLK_PMU_26M 35 +#define CLK_DEBOUNCE 36 +#define CLK_OTG2_REF 37 +#define CLK_USB3_REF 38 +#define CLK_AP_AXI 39 +#define CLK_AON_PREDIV_NUM (CLK_AP_AXI + 1) + +#define CLK_USB3_EB 0 +#define CLK_USB3_SUSPEND_EB 1 +#define CLK_USB3_REF_EB 2 +#define CLK_DMA_EB 3 +#define CLK_SDIO0_EB 4 +#define CLK_SDIO1_EB 5 +#define CLK_SDIO2_EB 6 +#define CLK_EMMC_EB 7 +#define CLK_ROM_EB 8 +#define CLK_BUSMON_EB 9 +#define CLK_CC63S_EB 10 +#define CLK_CC63P_EB 11 +#define CLK_CE0_EB 12 +#define CLK_CE1_EB 13 +#define CLK_APAHB_GATE_NUM (CLK_CE1_EB + 1) + +#define CLK_AVS_LIT_EB 0 +#define CLK_AVS_BIG_EB 1 +#define CLK_AP_INTC5_EB 2 +#define CLK_GPIO_EB 3 +#define CLK_PWM0_EB 4 +#define CLK_PWM1_EB 5 +#define CLK_PWM2_EB 6 +#define CLK_PWM3_EB 7 +#define CLK_KPD_EB 8 +#define CLK_AON_SYS_EB 9 +#define CLK_AP_SYS_EB 10 +#define CLK_AON_TMR_EB 11 +#define CLK_AP_TMR0_EB 12 +#define CLK_EFUSE_EB 13 +#define CLK_EIC_EB 14 +#define CLK_PUB1_REG_EB 15 +#define CLK_ADI_EB 16 +#define CLK_AP_INTC0_EB 17 +#define CLK_AP_INTC1_EB 18 +#define CLK_AP_INTC2_EB 19 +#define CLK_AP_INTC3_EB 20 +#define CLK_AP_INTC4_EB 21 +#define CLK_SPLK_EB 22 +#define CLK_MSPI_EB 23 +#define CLK_PUB0_REG_EB 24 +#define CLK_PIN_EB 25 +#define CLK_AON_CKG_EB 26 +#define CLK_GPU_EB 27 +#define CLK_APCPU_TS0_EB 28 +#define CLK_APCPU_TS1_EB 29 +#define CLK_DAP_EB 30 +#define CLK_I2C_EB 31 +#define CLK_PMU_EB 32 +#define CLK_THM_EB 33 +#define CLK_AUX0_EB 34 +#define CLK_AUX1_EB 35 +#define CLK_AUX2_EB 36 +#define CLK_PROBE_EB 37 +#define CLK_GPU0_AVS_EB 38 +#define CLK_GPU1_AVS_EB 39 +#define CLK_APCPU_WDG_EB 40 +#define CLK_AP_TMR1_EB 41 +#define CLK_AP_TMR2_EB 42 +#define CLK_DISP_EMC_EB 43 +#define CLK_ZIP_EMC_EB 44 +#define CLK_GSP_EMC_EB 45 +#define CLK_OSC_AON_EB 46 +#define CLK_LVDS_TRX_EB 47 +#define CLK_LVDS_TCXO_EB 48 +#define CLK_MDAR_EB 49 +#define CLK_RTC4M0_CAL_EB 50 +#define CLK_RCT100M_CAL_EB 51 +#define CLK_DJTAG_EB 52 +#define CLK_MBOX_EB 53 +#define CLK_AON_DMA_EB 54 +#define CLK_DBG_EMC_EB 55 +#define CLK_LVDS_PLL_DIV_EN 56 +#define CLK_DEF_EB 57 +#define CLK_AON_APB_RSV0 58 +#define CLK_ORP_JTAG_EB 59 +#define CLK_VSP_EB 60 +#define CLK_CAM_EB 61 +#define CLK_DISP_EB 62 +#define CLK_DBG_AXI_IF_EB 63 +#define CLK_SDIO0_2X_EN 64 +#define CLK_SDIO1_2X_EN 65 +#define CLK_SDIO2_2X_EN 66 +#define CLK_EMMC_2X_EN 67 +#define CLK_AON_GATE_NUM (CLK_EMMC_2X_EN + 1) + +#define CLK_LIT_MCU 0 +#define CLK_BIG_MCU 1 +#define CLK_AONSECURE_NUM (CLK_BIG_MCU + 1) + +#define CLK_AGCP_IIS0_EB 0 +#define CLK_AGCP_IIS1_EB 1 +#define CLK_AGCP_IIS2_EB 2 +#define CLK_AGCP_IIS3_EB 3 +#define CLK_AGCP_UART_EB 4 +#define CLK_AGCP_DMACP_EB 5 +#define CLK_AGCP_DMAAP_EB 6 +#define CLK_AGCP_ARC48K_EB 7 +#define CLK_AGCP_SRC44P1K_EB 8 +#define CLK_AGCP_MCDT_EB 9 +#define CLK_AGCP_VBCIFD_EB 10 +#define CLK_AGCP_VBC_EB 11 +#define CLK_AGCP_SPINLOCK_EB 12 +#define CLK_AGCP_ICU_EB 13 +#define CLK_AGCP_AP_ASHB_EB 14 +#define CLK_AGCP_CP_ASHB_EB 15 +#define CLK_AGCP_AUD_EB 16 +#define CLK_AGCP_AUDIF_EB 17 +#define CLK_AGCP_GATE_NUM (CLK_AGCP_AUDIF_EB + 1) + +#define CLK_GPU 0 +#define CLK_GPU_NUM (CLK_GPU + 1) + +#define CLK_AHB_VSP 0 +#define CLK_VSP 1 +#define CLK_VSP_ENC 2 +#define CLK_VPP 3 +#define CLK_VSP_26M 4 +#define CLK_VSP_NUM (CLK_VSP_26M + 1) + +#define CLK_VSP_DEC_EB 0 +#define CLK_VSP_CKG_EB 1 +#define CLK_VSP_MMU_EB 2 +#define CLK_VSP_ENC_EB 3 +#define CLK_VPP_EB 4 +#define CLK_VSP_26M_EB 5 +#define CLK_VSP_AXI_GATE 6 +#define CLK_VSP_ENC_GATE 7 +#define CLK_VPP_AXI_GATE 8 +#define CLK_VSP_BM_GATE 9 +#define CLK_VSP_ENC_BM_GATE 10 +#define CLK_VPP_BM_GATE 11 +#define CLK_VSP_GATE_NUM (CLK_VPP_BM_GATE + 1) + +#define CLK_AHB_CAM 0 +#define CLK_SENSOR0 1 +#define CLK_SENSOR1 2 +#define CLK_SENSOR2 3 +#define CLK_MIPI_CSI0_EB 4 +#define CLK_MIPI_CSI1_EB 5 +#define CLK_CAM_NUM (CLK_MIPI_CSI1_EB + 1) + +#define CLK_DCAM0_EB 0 +#define CLK_DCAM1_EB 1 +#define CLK_ISP0_EB 2 +#define CLK_CSI0_EB 3 +#define CLK_CSI1_EB 4 +#define CLK_JPG0_EB 5 +#define CLK_JPG1_EB 6 +#define CLK_CAM_CKG_EB 7 +#define CLK_CAM_MMU_EB 8 +#define CLK_ISP1_EB 9 +#define CLK_CPP_EB 10 +#define CLK_MMU_PF_EB 11 +#define CLK_ISP2_EB 12 +#define CLK_DCAM2ISP_IF_EB 13 +#define CLK_ISP2DCAM_IF_EB 14 +#define CLK_ISP_LCLK_EB 15 +#define CLK_ISP_ICLK_EB 16 +#define CLK_ISP_MCLK_EB 17 +#define CLK_ISP_PCLK_EB 18 +#define CLK_ISP_ISP2DCAM_EB 19 +#define CLK_DCAM0_IF_EB 20 +#define CLK_CLK26M_IF_EB 21 +#define CLK_CPHY0_GATE 22 +#define CLK_MIPI_CSI0_GATE 23 +#define CLK_CPHY1_GATE 24 +#define CLK_MIPI_CSI1 25 +#define CLK_DCAM0_AXI_GATE 26 +#define CLK_DCAM1_AXI_GATE 27 +#define CLK_SENSOR0_GATE 28 +#define CLK_SENSOR1_GATE 29 +#define CLK_JPG0_AXI_GATE 30 +#define CLK_GPG1_AXI_GATE 31 +#define CLK_ISP0_AXI_GATE 32 +#define CLK_ISP1_AXI_GATE 33 +#define CLK_ISP2_AXI_GATE 34 +#define CLK_CPP_AXI_GATE 35 +#define CLK_D0_IF_AXI_GATE 36 +#define CLK_D2I_IF_AXI_GATE 37 +#define CLK_I2D_IF_AXI_GATE 38 +#define CLK_SPARE_AXI_GATE 39 +#define CLK_SENSOR2_GATE 40 +#define CLK_D0IF_IN_D_EN 41 +#define CLK_D1IF_IN_D_EN 42 +#define CLK_D0IF_IN_D2I_EN 43 +#define CLK_D1IF_IN_D2I_EN 44 +#define CLK_IA_IN_D2I_EN 45 +#define CLK_IB_IN_D2I_EN 46 +#define CLK_IC_IN_D2I_EN 47 +#define CLK_IA_IN_I_EN 48 +#define CLK_IB_IN_I_EN 49 +#define CLK_IC_IN_I_EN 50 +#define CLK_CAM_GATE_NUM (CLK_IC_IN_I_EN + 1) + +#define CLK_AHB_DISP 0 +#define CLK_DISPC0_DPI 1 +#define CLK_DISPC1_DPI 2 +#define CLK_DISP_NUM (CLK_DISPC1_DPI + 1) + +#define CLK_DISPC0_EB 0 +#define CLK_DISPC1_EB 1 +#define CLK_DISPC_MMU_EB 2 +#define CLK_GSP0_EB 3 +#define CLK_GSP1_EB 4 +#define CLK_GSP0_MMU_EB 5 +#define CLK_GSP1_MMU_EB 6 +#define CLK_DSI0_EB 7 +#define CLK_DSI1_EB 8 +#define CLK_DISP_CKG_EB 9 +#define CLK_DISP_GPU_EB 10 +#define CLK_GPU_MTX_EB 11 +#define CLK_GSP_MTX_EB 12 +#define CLK_TMC_MTX_EB 13 +#define CLK_DISPC_MTX_EB 14 +#define CLK_DPHY0_GATE 15 +#define CLK_DPHY1_GATE 16 +#define CLK_GSP0_A_GATE 17 +#define CLK_GSP1_A_GATE 18 +#define CLK_GSP0_F_GATE 19 +#define CLK_GSP1_F_GATE 20 +#define CLK_D_MTX_F_GATE 21 +#define CLK_D_MTX_A_GATE 22 +#define CLK_D_NOC_F_GATE 23 +#define CLK_D_NOC_A_GATE 24 +#define CLK_GSP_MTX_F_GATE 25 +#define CLK_GSP_MTX_A_GATE 26 +#define CLK_GSP_NOC_F_GATE 27 +#define CLK_GSP_NOC_A_GATE 28 +#define CLK_DISPM0IDLE_GATE 29 +#define CLK_GSPM0IDLE_GATE 30 +#define CLK_DISP_GATE_NUM (CLK_GSPM0IDLE_GATE + 1) + +#define CLK_SIM0_EB 0 +#define CLK_IIS0_EB 1 +#define CLK_IIS1_EB 2 +#define CLK_IIS2_EB 3 +#define CLK_IIS3_EB 4 +#define CLK_SPI0_EB 5 +#define CLK_SPI1_EB 6 +#define CLK_SPI2_EB 7 +#define CLK_I2C0_EB 8 +#define CLK_I2C1_EB 9 +#define CLK_I2C2_EB 10 +#define CLK_I2C3_EB 11 +#define CLK_I2C4_EB 12 +#define CLK_I2C5_EB 13 +#define CLK_UART0_EB 14 +#define CLK_UART1_EB 15 +#define CLK_UART2_EB 16 +#define CLK_UART3_EB 17 +#define CLK_UART4_EB 18 +#define CLK_AP_CKG_EB 19 +#define CLK_SPI3_EB 20 +#define CLK_APAPB_GATE_NUM (CLK_SPI3_EB + 1) + +#endif /* _DT_BINDINGS_CLK_SC9860_H_ */ diff --git a/include/dt-bindings/gpio/aspeed-gpio.h b/include/dt-bindings/gpio/aspeed-gpio.h new file mode 100644 index 000000000000..56fc4889b2c4 --- /dev/null +++ b/include/dt-bindings/gpio/aspeed-gpio.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * This header provides constants for binding aspeed,*-gpio. + * + * The first cell in Aspeed's GPIO specifier is the GPIO ID. The macros below + * provide names for this. + * + * The second cell contains standard flag values specified in gpio.h. + */ + +#ifndef _DT_BINDINGS_GPIO_ASPEED_GPIO_H +#define _DT_BINDINGS_GPIO_ASPEED_GPIO_H + +#include <dt-bindings/gpio/gpio.h> + +#define ASPEED_GPIO_PORT_A 0 +#define ASPEED_GPIO_PORT_B 1 +#define ASPEED_GPIO_PORT_C 2 +#define ASPEED_GPIO_PORT_D 3 +#define ASPEED_GPIO_PORT_E 4 +#define ASPEED_GPIO_PORT_F 5 +#define ASPEED_GPIO_PORT_G 6 +#define ASPEED_GPIO_PORT_H 7 +#define ASPEED_GPIO_PORT_I 8 +#define ASPEED_GPIO_PORT_J 9 +#define ASPEED_GPIO_PORT_K 10 +#define ASPEED_GPIO_PORT_L 11 +#define ASPEED_GPIO_PORT_M 12 +#define ASPEED_GPIO_PORT_N 13 +#define ASPEED_GPIO_PORT_O 14 +#define ASPEED_GPIO_PORT_P 15 +#define ASPEED_GPIO_PORT_Q 16 +#define ASPEED_GPIO_PORT_R 17 +#define ASPEED_GPIO_PORT_S 18 +#define ASPEED_GPIO_PORT_T 19 +#define ASPEED_GPIO_PORT_U 20 +#define ASPEED_GPIO_PORT_V 21 +#define ASPEED_GPIO_PORT_W 22 +#define ASPEED_GPIO_PORT_X 23 +#define ASPEED_GPIO_PORT_Y 24 +#define ASPEED_GPIO_PORT_Z 25 +#define ASPEED_GPIO_PORT_AA 26 +#define ASPEED_GPIO_PORT_AB 27 +#define ASPEED_GPIO_PORT_AC 28 + +#define ASPEED_GPIO(port, offset) \ + ((ASPEED_GPIO_PORT_##port * 8) + offset) + +#endif diff --git a/include/dt-bindings/gpio/gpio.h b/include/dt-bindings/gpio/gpio.h index dd549ff04295..2cc10ae4bbb7 100644 --- a/include/dt-bindings/gpio/gpio.h +++ b/include/dt-bindings/gpio/gpio.h @@ -29,8 +29,8 @@ #define GPIO_OPEN_DRAIN (GPIO_SINGLE_ENDED | GPIO_LINE_OPEN_DRAIN) #define GPIO_OPEN_SOURCE (GPIO_SINGLE_ENDED | GPIO_LINE_OPEN_SOURCE) -/* Bit 3 express GPIO suspend/resume persistence */ -#define GPIO_SLEEP_MAINTAIN_VALUE 0 -#define GPIO_SLEEP_MAY_LOSE_VALUE 8 +/* Bit 3 express GPIO suspend/resume and reset persistence */ +#define GPIO_PERSISTENT 0 +#define GPIO_TRANSITORY 8 #endif diff --git a/include/dt-bindings/gpio/meson-axg-gpio.h b/include/dt-bindings/gpio/meson-axg-gpio.h new file mode 100644 index 000000000000..25bb1fffa97a --- /dev/null +++ b/include/dt-bindings/gpio/meson-axg-gpio.h @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2017 Amlogic, Inc. All rights reserved. + * Author: Xingyu Chen <xingyu.chen@amlogic.com> + * + * SPDX-License-Identifier: GPL-2.0+ + */ + +#ifndef _DT_BINDINGS_MESON_AXG_GPIO_H +#define _DT_BINDINGS_MESON_AXG_GPIO_H + +/* First GPIO chip */ +#define GPIOAO_0 0 +#define GPIOAO_1 1 +#define GPIOAO_2 2 +#define GPIOAO_3 3 +#define GPIOAO_4 4 +#define GPIOAO_5 5 +#define GPIOAO_6 6 +#define GPIOAO_7 7 +#define GPIOAO_8 8 +#define GPIOAO_9 9 +#define GPIOAO_10 10 +#define GPIOAO_11 11 +#define GPIOAO_12 12 +#define GPIOAO_13 13 +#define GPIO_TEST_N 14 + +/* Second GPIO chip */ +#define GPIOZ_0 0 +#define GPIOZ_1 1 +#define GPIOZ_2 2 +#define GPIOZ_3 3 +#define GPIOZ_4 4 +#define GPIOZ_5 5 +#define GPIOZ_6 6 +#define GPIOZ_7 7 +#define GPIOZ_8 8 +#define GPIOZ_9 9 +#define GPIOZ_10 10 +#define BOOT_0 11 +#define BOOT_1 12 +#define BOOT_2 13 +#define BOOT_3 14 +#define BOOT_4 15 +#define BOOT_5 16 +#define BOOT_6 17 +#define BOOT_7 18 +#define BOOT_8 19 +#define BOOT_9 20 +#define BOOT_10 21 +#define BOOT_11 22 +#define BOOT_12 23 +#define BOOT_13 24 +#define BOOT_14 25 +#define GPIOA_0 26 +#define GPIOA_1 27 +#define GPIOA_2 28 +#define GPIOA_3 29 +#define GPIOA_4 30 +#define GPIOA_5 31 +#define GPIOA_6 32 +#define GPIOA_7 33 +#define GPIOA_8 34 +#define GPIOA_9 35 +#define GPIOA_10 36 +#define GPIOA_11 37 +#define GPIOA_12 38 +#define GPIOA_13 39 +#define GPIOA_14 40 +#define GPIOA_15 41 +#define GPIOA_16 42 +#define GPIOA_17 43 +#define GPIOA_18 44 +#define GPIOA_19 45 +#define GPIOA_20 46 +#define GPIOX_0 47 +#define GPIOX_1 48 +#define GPIOX_2 49 +#define GPIOX_3 50 +#define GPIOX_4 51 +#define GPIOX_5 52 +#define GPIOX_6 53 +#define GPIOX_7 54 +#define GPIOX_8 55 +#define GPIOX_9 56 +#define GPIOX_10 57 +#define GPIOX_11 58 +#define GPIOX_12 59 +#define GPIOX_13 60 +#define GPIOX_14 61 +#define GPIOX_15 62 +#define GPIOX_16 63 +#define GPIOX_17 64 +#define GPIOX_18 65 +#define GPIOX_19 66 +#define GPIOX_20 67 +#define GPIOX_21 68 +#define GPIOX_22 69 +#define GPIOY_0 70 +#define GPIOY_1 71 +#define GPIOY_2 72 +#define GPIOY_3 73 +#define GPIOY_4 74 +#define GPIOY_5 75 +#define GPIOY_6 76 +#define GPIOY_7 77 +#define GPIOY_8 78 +#define GPIOY_9 79 +#define GPIOY_10 80 +#define GPIOY_11 81 +#define GPIOY_12 82 +#define GPIOY_13 83 +#define GPIOY_14 84 +#define GPIOY_15 85 + +#endif /* _DT_BINDINGS_MESON_AXG_GPIO_H */ diff --git a/include/dt-bindings/memory/tegra186-mc.h b/include/dt-bindings/memory/tegra186-mc.h new file mode 100644 index 000000000000..64813536aec9 --- /dev/null +++ b/include/dt-bindings/memory/tegra186-mc.h @@ -0,0 +1,111 @@ +#ifndef DT_BINDINGS_MEMORY_TEGRA186_MC_H +#define DT_BINDINGS_MEMORY_TEGRA186_MC_H + +/* special clients */ +#define TEGRA186_SID_INVALID 0x00 +#define TEGRA186_SID_PASSTHROUGH 0x7f + +/* host1x clients */ +#define TEGRA186_SID_HOST1X 0x01 +#define TEGRA186_SID_CSI 0x02 +#define TEGRA186_SID_VIC 0x03 +#define TEGRA186_SID_VI 0x04 +#define TEGRA186_SID_ISP 0x05 +#define TEGRA186_SID_NVDEC 0x06 +#define TEGRA186_SID_NVENC 0x07 +#define TEGRA186_SID_NVJPG 0x08 +#define TEGRA186_SID_NVDISPLAY 0x09 +#define TEGRA186_SID_TSEC 0x0a +#define TEGRA186_SID_TSECB 0x0b +#define TEGRA186_SID_SE 0x0c +#define TEGRA186_SID_SE1 0x0d +#define TEGRA186_SID_SE2 0x0e +#define TEGRA186_SID_SE3 0x0f + +/* GPU clients */ +#define TEGRA186_SID_GPU 0x10 + +/* other SoC clients */ +#define TEGRA186_SID_AFI 0x11 +#define TEGRA186_SID_HDA 0x12 +#define TEGRA186_SID_ETR 0x13 +#define TEGRA186_SID_EQOS 0x14 +#define TEGRA186_SID_UFSHC 0x15 +#define TEGRA186_SID_AON 0x16 +#define TEGRA186_SID_SDMMC4 0x17 +#define TEGRA186_SID_SDMMC3 0x18 +#define TEGRA186_SID_SDMMC2 0x19 +#define TEGRA186_SID_SDMMC1 0x1a +#define TEGRA186_SID_XUSB_HOST 0x1b +#define TEGRA186_SID_XUSB_DEV 0x1c +#define TEGRA186_SID_SATA 0x1d +#define TEGRA186_SID_APE 0x1e +#define TEGRA186_SID_SCE 0x1f + +/* GPC DMA clients */ +#define TEGRA186_SID_GPCDMA_0 0x20 +#define TEGRA186_SID_GPCDMA_1 0x21 +#define TEGRA186_SID_GPCDMA_2 0x22 +#define TEGRA186_SID_GPCDMA_3 0x23 +#define TEGRA186_SID_GPCDMA_4 0x24 +#define TEGRA186_SID_GPCDMA_5 0x25 +#define TEGRA186_SID_GPCDMA_6 0x26 +#define TEGRA186_SID_GPCDMA_7 0x27 + +/* APE DMA clients */ +#define TEGRA186_SID_APE_1 0x28 +#define TEGRA186_SID_APE_2 0x29 + +/* camera RTCPU */ +#define TEGRA186_SID_RCE 0x2a + +/* camera RTCPU on host1x address space */ +#define TEGRA186_SID_RCE_1X 0x2b + +/* APE DMA clients */ +#define TEGRA186_SID_APE_3 0x2c + +/* camera RTCPU running on APE */ +#define TEGRA186_SID_APE_CAM 0x2d +#define TEGRA186_SID_APE_CAM_1X 0x2e + +/* + * The BPMP has its SID value hardcoded in the firmware. Changing it requires + * considerable effort. + */ +#define TEGRA186_SID_BPMP 0x32 + +/* for SMMU tests */ +#define TEGRA186_SID_SMMU_TEST 0x33 + +/* host1x virtualization channels */ +#define TEGRA186_SID_HOST1X_CTX0 0x38 +#define TEGRA186_SID_HOST1X_CTX1 0x39 +#define TEGRA186_SID_HOST1X_CTX2 0x3a +#define TEGRA186_SID_HOST1X_CTX3 0x3b +#define TEGRA186_SID_HOST1X_CTX4 0x3c +#define TEGRA186_SID_HOST1X_CTX5 0x3d +#define TEGRA186_SID_HOST1X_CTX6 0x3e +#define TEGRA186_SID_HOST1X_CTX7 0x3f + +/* host1x command buffers */ +#define TEGRA186_SID_HOST1X_VM0 0x40 +#define TEGRA186_SID_HOST1X_VM1 0x41 +#define TEGRA186_SID_HOST1X_VM2 0x42 +#define TEGRA186_SID_HOST1X_VM3 0x43 +#define TEGRA186_SID_HOST1X_VM4 0x44 +#define TEGRA186_SID_HOST1X_VM5 0x45 +#define TEGRA186_SID_HOST1X_VM6 0x46 +#define TEGRA186_SID_HOST1X_VM7 0x47 + +/* SE data buffers */ +#define TEGRA186_SID_SE_VM0 0x48 +#define TEGRA186_SID_SE_VM1 0x49 +#define TEGRA186_SID_SE_VM2 0x4a +#define TEGRA186_SID_SE_VM3 0x4b +#define TEGRA186_SID_SE_VM4 0x4c +#define TEGRA186_SID_SE_VM5 0x4d +#define TEGRA186_SID_SE_VM6 0x4e +#define TEGRA186_SID_SE_VM7 0x4f + +#endif diff --git a/include/dt-bindings/pinctrl/am43xx.h b/include/dt-bindings/pinctrl/am43xx.h index a69e310789c5..6ce4a32f77d4 100644 --- a/include/dt-bindings/pinctrl/am43xx.h +++ b/include/dt-bindings/pinctrl/am43xx.h @@ -25,7 +25,8 @@ #define DS0_FORCE_OFF_MODE (1 << 24) #define DS0_INPUT (1 << 25) #define DS0_FORCE_OUT_HIGH (1 << 26) -#define DS0_PULL_UP_DOWN_EN (1 << 27) +#define DS0_PULL_UP_DOWN_EN (0 << 27) +#define DS0_PULL_UP_DOWN_DIS (1 << 27) #define DS0_PULL_UP_SEL (1 << 28) #define WAKEUP_ENABLE (1 << 29) diff --git a/include/dt-bindings/pinctrl/stm32-pinfunc.h b/include/dt-bindings/pinctrl/stm32-pinfunc.h index b8dfe31821e6..b5a2174a6386 100644 --- a/include/dt-bindings/pinctrl/stm32-pinfunc.h +++ b/include/dt-bindings/pinctrl/stm32-pinfunc.h @@ -1,3 +1,9 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* + * Copyright (C) STMicroelectronics 2017 - All Rights Reserved + * Author: Torgue Alexandre <alexandre.torgue@st.com> for STMicroelectronics. + */ + #ifndef _DT_BINDINGS_STM32_PINFUNC_H #define _DT_BINDINGS_STM32_PINFUNC_H diff --git a/include/dt-bindings/power/mt2712-power.h b/include/dt-bindings/power/mt2712-power.h new file mode 100644 index 000000000000..92b46d772fae --- /dev/null +++ b/include/dt-bindings/power/mt2712-power.h @@ -0,0 +1,26 @@ +/* + * Copyright (C) 2017 MediaTek Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See http://www.gnu.org/licenses/gpl-2.0.html for more details. + */ + +#ifndef _DT_BINDINGS_POWER_MT2712_POWER_H +#define _DT_BINDINGS_POWER_MT2712_POWER_H + +#define MT2712_POWER_DOMAIN_MM 0 +#define MT2712_POWER_DOMAIN_VDEC 1 +#define MT2712_POWER_DOMAIN_VENC 2 +#define MT2712_POWER_DOMAIN_ISP 3 +#define MT2712_POWER_DOMAIN_AUDIO 4 +#define MT2712_POWER_DOMAIN_USB 5 +#define MT2712_POWER_DOMAIN_USB2 6 +#define MT2712_POWER_DOMAIN_MFG 7 + +#endif /* _DT_BINDINGS_POWER_MT2712_POWER_H */ diff --git a/include/dt-bindings/power/owl-s700-powergate.h b/include/dt-bindings/power/owl-s700-powergate.h new file mode 100644 index 000000000000..4cf1aefbf09c --- /dev/null +++ b/include/dt-bindings/power/owl-s700-powergate.h @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * Actions Semi S700 SPS + * + * Copyright (c) 2017 Andreas Färber + */ +#ifndef DT_BINDINGS_POWER_OWL_S700_POWERGATE_H +#define DT_BINDINGS_POWER_OWL_S700_POWERGATE_H + +#define S700_PD_VDE 0 +#define S700_PD_VCE_SI 1 +#define S700_PD_USB2_1 2 +#define S700_PD_HDE 3 +#define S700_PD_DMA 4 +#define S700_PD_DS 5 +#define S700_PD_USB3 6 +#define S700_PD_USB2_0 7 + +#endif diff --git a/include/dt-bindings/reset/amlogic,meson-axg-reset.h b/include/dt-bindings/reset/amlogic,meson-axg-reset.h new file mode 100644 index 000000000000..ad6f55dabd6d --- /dev/null +++ b/include/dt-bindings/reset/amlogic,meson-axg-reset.h @@ -0,0 +1,124 @@ +/* + * + * Copyright (c) 2016 BayLibre, SAS. + * Author: Neil Armstrong <narmstrong@baylibre.com> + * + * Copyright (c) 2017 Amlogic, inc. + * Author: Yixun Lan <yixun.lan@amlogic.com> + * + * SPDX-License-Identifier: (GPL-2.0+ OR BSD) + */ + +#ifndef _DT_BINDINGS_AMLOGIC_MESON_AXG_RESET_H +#define _DT_BINDINGS_AMLOGIC_MESON_AXG_RESET_H + +/* RESET0 */ +#define RESET_HIU 0 +#define RESET_PCIE_A 1 +#define RESET_PCIE_B 2 +#define RESET_DDR_TOP 3 +/* 4 */ +#define RESET_VIU 5 +#define RESET_PCIE_PHY 6 +#define RESET_PCIE_APB 7 +/* 8 */ +/* 9 */ +#define RESET_VENC 10 +#define RESET_ASSIST 11 +/* 12 */ +#define RESET_VCBUS 13 +/* 14 */ +/* 15 */ +#define RESET_GIC 16 +#define RESET_CAPB3_DECODE 17 +/* 18-21 */ +#define RESET_SYS_CPU_CAPB3 22 +#define RESET_CBUS_CAPB3 23 +#define RESET_AHB_CNTL 24 +#define RESET_AHB_DATA 25 +#define RESET_VCBUS_CLK81 26 +#define RESET_MMC 27 +/* 28-31 */ +/* RESET1 */ +/* 32 */ +/* 33 */ +#define RESET_USB_OTG 34 +#define RESET_DDR 35 +#define RESET_AO_RESET 36 +/* 37 */ +#define RESET_AHB_SRAM 38 +/* 39 */ +/* 40 */ +#define RESET_DMA 41 +#define RESET_ISA 42 +#define RESET_ETHERNET 43 +/* 44 */ +#define RESET_SD_EMMC_B 45 +#define RESET_SD_EMMC_C 46 +#define RESET_ROM_BOOT 47 +#define RESET_SYS_CPU_0 48 +#define RESET_SYS_CPU_1 49 +#define RESET_SYS_CPU_2 50 +#define RESET_SYS_CPU_3 51 +#define RESET_SYS_CPU_CORE_0 52 +#define RESET_SYS_CPU_CORE_1 53 +#define RESET_SYS_CPU_CORE_2 54 +#define RESET_SYS_CPU_CORE_3 55 +#define RESET_SYS_PLL_DIV 56 +#define RESET_SYS_CPU_AXI 57 +#define RESET_SYS_CPU_L2 58 +#define RESET_SYS_CPU_P 59 +#define RESET_SYS_CPU_MBIST 60 +/* 61-63 */ +/* RESET2 */ +/* 64 */ +/* 65 */ +#define RESET_AUDIO 66 +/* 67 */ +#define RESET_MIPI_HOST 68 +#define RESET_AUDIO_LOCKER 69 +#define RESET_GE2D 70 +/* 71-76 */ +#define RESET_AO_CPU_RESET 77 +/* 78-95 */ +/* RESET3 */ +#define RESET_RING_OSCILLATOR 96 +/* 97-127 */ +/* RESET4 */ +/* 128 */ +/* 129 */ +#define RESET_MIPI_PHY 130 +/* 131-140 */ +#define RESET_VENCL 141 +#define RESET_I2C_MASTER_2 142 +#define RESET_I2C_MASTER_1 143 +/* 144-159 */ +/* RESET5 */ +/* 160-191 */ +/* RESET6 */ +#define RESET_PERIPHS_GENERAL 192 +#define RESET_PERIPHS_SPICC 193 +/* 194 */ +/* 195 */ +#define RESET_PERIPHS_I2C_MASTER_0 196 +/* 197-200 */ +#define RESET_PERIPHS_UART_0 201 +#define RESET_PERIPHS_UART_1 202 +/* 203-204 */ +#define RESET_PERIPHS_SPI_0 205 +#define RESET_PERIPHS_I2C_MASTER_3 206 +/* 207-223 */ +/* RESET7 */ +#define RESET_USB_DDR_0 224 +#define RESET_USB_DDR_1 225 +#define RESET_USB_DDR_2 226 +#define RESET_USB_DDR_3 227 +/* 228 */ +#define RESET_DEVICE_MMC_ARB 229 +/* 230 */ +#define RESET_VID_LOCK 231 +#define RESET_A9_DMC_PIPEL 232 +#define RESET_DMC_VPU_PIPEL 233 +/* 234-255 */ + +#endif diff --git a/include/kvm/arm_psci.h b/include/kvm/arm_psci.h new file mode 100644 index 000000000000..e518e4e3dfb5 --- /dev/null +++ b/include/kvm/arm_psci.h @@ -0,0 +1,51 @@ +/* + * Copyright (C) 2012,2013 - ARM Ltd + * Author: Marc Zyngier <marc.zyngier@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __KVM_ARM_PSCI_H__ +#define __KVM_ARM_PSCI_H__ + +#include <linux/kvm_host.h> +#include <uapi/linux/psci.h> + +#define KVM_ARM_PSCI_0_1 PSCI_VERSION(0, 1) +#define KVM_ARM_PSCI_0_2 PSCI_VERSION(0, 2) +#define KVM_ARM_PSCI_1_0 PSCI_VERSION(1, 0) + +#define KVM_ARM_PSCI_LATEST KVM_ARM_PSCI_1_0 + +/* + * We need the KVM pointer independently from the vcpu as we can call + * this from HYP, and need to apply kern_hyp_va on it... + */ +static inline int kvm_psci_version(struct kvm_vcpu *vcpu, struct kvm *kvm) +{ + /* + * Our PSCI implementation stays the same across versions from + * v0.2 onward, only adding the few mandatory functions (such + * as FEATURES with 1.0) that are required by newer + * revisions. It is thus safe to return the latest. + */ + if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features)) + return KVM_ARM_PSCI_LATEST; + + return KVM_ARM_PSCI_0_1; +} + + +int kvm_hvc_call_handler(struct kvm_vcpu *vcpu); + +#endif /* __KVM_ARM_PSCI_H__ */ diff --git a/include/linux/acpi.h b/include/linux/acpi.h index dc1ebfeeb5ec..e6d41b65d396 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -56,6 +56,8 @@ static inline acpi_handle acpi_device_handle(struct acpi_device *adev) #define ACPI_COMPANION_SET(dev, adev) set_primary_fwnode(dev, (adev) ? \ acpi_fwnode_handle(adev) : NULL) #define ACPI_HANDLE(dev) acpi_device_handle(ACPI_COMPANION(dev)) +#define ACPI_HANDLE_FWNODE(fwnode) \ + acpi_device_handle(to_acpi_device_node(fwnode)) static inline struct fwnode_handle *acpi_alloc_fwnode_static(void) { @@ -451,6 +453,7 @@ void __init acpi_no_s4_hw_signature(void); void __init acpi_old_suspend_ordering(void); void __init acpi_nvs_nosave(void); void __init acpi_nvs_nosave_s3(void); +void __init acpi_sleep_no_blacklist(void); #endif /* CONFIG_PM_SLEEP */ struct acpi_osc_context { @@ -584,6 +587,7 @@ extern int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *), const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids, const struct device *dev); +void *acpi_get_match_data(const struct device *dev); extern bool acpi_driver_match_device(struct device *dev, const struct device_driver *drv); int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *); @@ -626,6 +630,7 @@ int acpi_arch_timer_mem_init(struct arch_timer_mem *timer_mem, int *timer_count) #define ACPI_COMPANION(dev) (NULL) #define ACPI_COMPANION_SET(dev, adev) do { } while (0) #define ACPI_HANDLE(dev) (NULL) +#define ACPI_HANDLE_FWNODE(fwnode) (NULL) #define ACPI_DEVICE_CLASS(_cls, _msk) .cls = (0), .cls_msk = (0), struct fwnode_handle; @@ -640,6 +645,12 @@ static inline bool acpi_dev_present(const char *hid, const char *uid, s64 hrv) return false; } +static inline const char * +acpi_dev_get_first_match_name(const char *hid, const char *uid, s64 hrv) +{ + return NULL; +} + static inline bool is_acpi_node(struct fwnode_handle *fwnode) { return false; @@ -755,6 +766,11 @@ static inline const struct acpi_device_id *acpi_match_device( return NULL; } +static inline void *acpi_get_match_data(const struct device *dev) +{ + return NULL; +} + static inline bool acpi_driver_match_device(struct device *dev, const struct device_driver *drv) { @@ -978,6 +994,11 @@ struct acpi_gpio_mapping { const char *name; const struct acpi_gpio_params *data; unsigned int size; + +/* Ignore IoRestriction field */ +#define ACPI_GPIO_QUIRK_NO_IO_RESTRICTION BIT(0) + + unsigned int quirks; }; #if defined(CONFIG_ACPI) && defined(CONFIG_GPIOLIB) diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h index 304511267c82..2b709416de05 100644 --- a/include/linux/arch_topology.h +++ b/include/linux/arch_topology.h @@ -27,7 +27,7 @@ void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity); DECLARE_PER_CPU(unsigned long, freq_scale); static inline -unsigned long topology_get_freq_scale(struct sched_domain *sd, int cpu) +unsigned long topology_get_freq_scale(int cpu) { return per_cpu(freq_scale, cpu); } diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h index 4c5bca38c653..a031897fca76 100644 --- a/include/linux/arm-smccc.h +++ b/include/linux/arm-smccc.h @@ -14,14 +14,16 @@ #ifndef __LINUX_ARM_SMCCC_H #define __LINUX_ARM_SMCCC_H +#include <uapi/linux/const.h> + /* * This file provides common defines for ARM SMC Calling Convention as * specified in * http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html */ -#define ARM_SMCCC_STD_CALL 0 -#define ARM_SMCCC_FAST_CALL 1 +#define ARM_SMCCC_STD_CALL _AC(0,U) +#define ARM_SMCCC_FAST_CALL _AC(1,U) #define ARM_SMCCC_TYPE_SHIFT 31 #define ARM_SMCCC_SMC_32 0 @@ -60,6 +62,24 @@ #define ARM_SMCCC_QUIRK_NONE 0 #define ARM_SMCCC_QUIRK_QCOM_A6 1 /* Save/restore register a6 */ +#define ARM_SMCCC_VERSION_1_0 0x10000 +#define ARM_SMCCC_VERSION_1_1 0x10001 + +#define ARM_SMCCC_VERSION_FUNC_ID \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_32, \ + 0, 0) + +#define ARM_SMCCC_ARCH_FEATURES_FUNC_ID \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_32, \ + 0, 1) + +#define ARM_SMCCC_ARCH_WORKAROUND_1 \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_32, \ + 0, 0x8000) + #ifndef __ASSEMBLY__ #include <linux/linkage.h> @@ -130,5 +150,146 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1, #define arm_smccc_hvc_quirk(...) __arm_smccc_hvc(__VA_ARGS__) +/* SMCCC v1.1 implementation madness follows */ +#ifdef CONFIG_ARM64 + +#define SMCCC_SMC_INST "smc #0" +#define SMCCC_HVC_INST "hvc #0" + +#elif defined(CONFIG_ARM) +#include <asm/opcodes-sec.h> +#include <asm/opcodes-virt.h> + +#define SMCCC_SMC_INST __SMC(0) +#define SMCCC_HVC_INST __HVC(0) + +#endif + +#define ___count_args(_0, _1, _2, _3, _4, _5, _6, _7, _8, x, ...) x + +#define __count_args(...) \ + ___count_args(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0) + +#define __constraint_write_0 \ + "+r" (r0), "=&r" (r1), "=&r" (r2), "=&r" (r3) +#define __constraint_write_1 \ + "+r" (r0), "+r" (r1), "=&r" (r2), "=&r" (r3) +#define __constraint_write_2 \ + "+r" (r0), "+r" (r1), "+r" (r2), "=&r" (r3) +#define __constraint_write_3 \ + "+r" (r0), "+r" (r1), "+r" (r2), "+r" (r3) +#define __constraint_write_4 __constraint_write_3 +#define __constraint_write_5 __constraint_write_4 +#define __constraint_write_6 __constraint_write_5 +#define __constraint_write_7 __constraint_write_6 + +#define __constraint_read_0 +#define __constraint_read_1 +#define __constraint_read_2 +#define __constraint_read_3 +#define __constraint_read_4 "r" (r4) +#define __constraint_read_5 __constraint_read_4, "r" (r5) +#define __constraint_read_6 __constraint_read_5, "r" (r6) +#define __constraint_read_7 __constraint_read_6, "r" (r7) + +#define __declare_arg_0(a0, res) \ + struct arm_smccc_res *___res = res; \ + register u32 r0 asm("r0") = a0; \ + register unsigned long r1 asm("r1"); \ + register unsigned long r2 asm("r2"); \ + register unsigned long r3 asm("r3") + +#define __declare_arg_1(a0, a1, res) \ + struct arm_smccc_res *___res = res; \ + register u32 r0 asm("r0") = a0; \ + register typeof(a1) r1 asm("r1") = a1; \ + register unsigned long r2 asm("r2"); \ + register unsigned long r3 asm("r3") + +#define __declare_arg_2(a0, a1, a2, res) \ + struct arm_smccc_res *___res = res; \ + register u32 r0 asm("r0") = a0; \ + register typeof(a1) r1 asm("r1") = a1; \ + register typeof(a2) r2 asm("r2") = a2; \ + register unsigned long r3 asm("r3") + +#define __declare_arg_3(a0, a1, a2, a3, res) \ + struct arm_smccc_res *___res = res; \ + register u32 r0 asm("r0") = a0; \ + register typeof(a1) r1 asm("r1") = a1; \ + register typeof(a2) r2 asm("r2") = a2; \ + register typeof(a3) r3 asm("r3") = a3 + +#define __declare_arg_4(a0, a1, a2, a3, a4, res) \ + __declare_arg_3(a0, a1, a2, a3, res); \ + register typeof(a4) r4 asm("r4") = a4 + +#define __declare_arg_5(a0, a1, a2, a3, a4, a5, res) \ + __declare_arg_4(a0, a1, a2, a3, a4, res); \ + register typeof(a5) r5 asm("r5") = a5 + +#define __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res) \ + __declare_arg_5(a0, a1, a2, a3, a4, a5, res); \ + register typeof(a6) r6 asm("r6") = a6 + +#define __declare_arg_7(a0, a1, a2, a3, a4, a5, a6, a7, res) \ + __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res); \ + register typeof(a7) r7 asm("r7") = a7 + +#define ___declare_args(count, ...) __declare_arg_ ## count(__VA_ARGS__) +#define __declare_args(count, ...) ___declare_args(count, __VA_ARGS__) + +#define ___constraints(count) \ + : __constraint_write_ ## count \ + : __constraint_read_ ## count \ + : "memory" +#define __constraints(count) ___constraints(count) + +/* + * We have an output list that is not necessarily used, and GCC feels + * entitled to optimise the whole sequence away. "volatile" is what + * makes it stick. + */ +#define __arm_smccc_1_1(inst, ...) \ + do { \ + __declare_args(__count_args(__VA_ARGS__), __VA_ARGS__); \ + asm volatile(inst "\n" \ + __constraints(__count_args(__VA_ARGS__))); \ + if (___res) \ + *___res = (typeof(*___res)){r0, r1, r2, r3}; \ + } while (0) + +/* + * arm_smccc_1_1_smc() - make an SMCCC v1.1 compliant SMC call + * + * This is a variadic macro taking one to eight source arguments, and + * an optional return structure. + * + * @a0-a7: arguments passed in registers 0 to 7 + * @res: result values from registers 0 to 3 + * + * This macro is used to make SMC calls following SMC Calling Convention v1.1. + * The content of the supplied param are copied to registers 0 to 7 prior + * to the SMC instruction. The return values are updated with the content + * from register 0 to 3 on return from the SMC instruction if not NULL. + */ +#define arm_smccc_1_1_smc(...) __arm_smccc_1_1(SMCCC_SMC_INST, __VA_ARGS__) + +/* + * arm_smccc_1_1_hvc() - make an SMCCC v1.1 compliant HVC call + * + * This is a variadic macro taking one to eight source arguments, and + * an optional return structure. + * + * @a0-a7: arguments passed in registers 0 to 7 + * @res: result values from registers 0 to 3 + * + * This macro is used to make HVC calls following SMC Calling Convention v1.1. + * The content of the supplied param are copied to registers 0 to 7 prior + * to the HVC instruction. The return values are updated with the content + * from register 0 to 3 on return from the HVC instruction if not NULL. + */ +#define arm_smccc_1_1_hvc(...) __arm_smccc_1_1(SMCCC_HVC_INST, __VA_ARGS__) + #endif /*__ASSEMBLY__*/ #endif /*__LINUX_ARM_SMCCC_H*/ diff --git a/include/linux/arm_sdei.h b/include/linux/arm_sdei.h new file mode 100644 index 000000000000..942afbd544b7 --- /dev/null +++ b/include/linux/arm_sdei.h @@ -0,0 +1,79 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2017 Arm Ltd. +#ifndef __LINUX_ARM_SDEI_H +#define __LINUX_ARM_SDEI_H + +#include <uapi/linux/arm_sdei.h> + +enum sdei_conduit_types { + CONDUIT_INVALID = 0, + CONDUIT_SMC, + CONDUIT_HVC, +}; + +#include <asm/sdei.h> + +/* Arch code should override this to set the entry point from firmware... */ +#ifndef sdei_arch_get_entry_point +#define sdei_arch_get_entry_point(conduit) (0) +#endif + +/* + * When an event occurs sdei_event_handler() will call a user-provided callback + * like this in NMI context on the CPU that received the event. + */ +typedef int (sdei_event_callback)(u32 event, struct pt_regs *regs, void *arg); + +/* + * Register your callback to claim an event. The event must be described + * by firmware. + */ +int sdei_event_register(u32 event_num, sdei_event_callback *cb, void *arg); + +/* + * Calls to sdei_event_unregister() may return EINPROGRESS. Keep calling + * it until it succeeds. + */ +int sdei_event_unregister(u32 event_num); + +int sdei_event_enable(u32 event_num); +int sdei_event_disable(u32 event_num); + +#ifdef CONFIG_ARM_SDE_INTERFACE +/* For use by arch code when CPU hotplug notifiers are not appropriate. */ +int sdei_mask_local_cpu(void); +int sdei_unmask_local_cpu(void); +#else +static inline int sdei_mask_local_cpu(void) { return 0; } +static inline int sdei_unmask_local_cpu(void) { return 0; } +#endif /* CONFIG_ARM_SDE_INTERFACE */ + + +/* + * This struct represents an event that has been registered. The driver + * maintains a list of all events, and which ones are registered. (Private + * events have one entry in the list, but are registered on each CPU). + * A pointer to this struct is passed to firmware, and back to the event + * handler. The event handler can then use this to invoke the registered + * callback, without having to walk the list. + * + * For CPU private events, this structure is per-cpu. + */ +struct sdei_registered_event { + /* For use by arch code: */ + struct pt_regs interrupted_regs; + + sdei_event_callback *callback; + void *callback_arg; + u32 event_num; + u8 priority; +}; + +/* The arch code entry point should then call this when an event arrives. */ +int notrace sdei_event_handler(struct pt_regs *regs, + struct sdei_registered_event *arg); + +/* arch code may use this to retrieve the extra registers. */ +int sdei_api_event_context(u32 query, u64 *result); + +#endif /* __LINUX_ARM_SDEI_H */ diff --git a/include/linux/ata.h b/include/linux/ata.h index c7a353825450..40d150ad7e07 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h @@ -448,6 +448,8 @@ enum { ATA_SET_MAX_LOCK = 0x02, ATA_SET_MAX_UNLOCK = 0x03, ATA_SET_MAX_FREEZE_LOCK = 0x04, + ATA_SET_MAX_PASSWD_DMA = 0x05, + ATA_SET_MAX_UNLOCK_DMA = 0x06, /* feature values for DEVICE CONFIGURATION OVERLAY */ ATA_DCO_RESTORE = 0xC0, diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index e54e7e0033eb..3e4ce54d84ab 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -332,7 +332,7 @@ static inline bool inode_to_wb_is_valid(struct inode *inode) * holding either @inode->i_lock, @inode->i_mapping->tree_lock, or the * associated wb's list_lock. */ -static inline struct bdi_writeback *inode_to_wb(struct inode *inode) +static inline struct bdi_writeback *inode_to_wb(const struct inode *inode) { #ifdef CONFIG_LOCKDEP WARN_ON_ONCE(debug_locks && diff --git a/include/linux/bio.h b/include/linux/bio.h index 23d29b39f71e..d0eb659fa733 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -300,6 +300,29 @@ static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv) bv->bv_len = iter.bi_bvec_done; } +static inline unsigned bio_pages_all(struct bio *bio) +{ + WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); + return bio->bi_vcnt; +} + +static inline struct bio_vec *bio_first_bvec_all(struct bio *bio) +{ + WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); + return bio->bi_io_vec; +} + +static inline struct page *bio_first_page_all(struct bio *bio) +{ + return bio_first_bvec_all(bio)->bv_page; +} + +static inline struct bio_vec *bio_last_bvec_all(struct bio *bio) +{ + WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); + return &bio->bi_io_vec[bio->bi_vcnt - 1]; +} + enum bip_flags { BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */ BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */ @@ -477,7 +500,6 @@ static inline void bio_flush_dcache_pages(struct bio *bi) #endif extern void bio_copy_data(struct bio *dst, struct bio *src); -extern int bio_alloc_pages(struct bio *bio, gfp_t gfp); extern void bio_free_pages(struct bio *bio); extern struct bio *bio_copy_user_iov(struct request_queue *, diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h index 1030651f8309..cf2588d81148 100644 --- a/include/linux/bitfield.h +++ b/include/linux/bitfield.h @@ -16,6 +16,7 @@ #define _LINUX_BITFIELD_H #include <linux/build_bug.h> +#include <asm/byteorder.h> /* * Bitfield access macros @@ -103,4 +104,49 @@ (typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \ }) +extern void __compiletime_warning("value doesn't fit into mask") +__field_overflow(void); +extern void __compiletime_error("bad bitfield mask") +__bad_mask(void); +static __always_inline u64 field_multiplier(u64 field) +{ + if ((field | (field - 1)) & ((field | (field - 1)) + 1)) + __bad_mask(); + return field & -field; +} +static __always_inline u64 field_mask(u64 field) +{ + return field / field_multiplier(field); +} +#define ____MAKE_OP(type,base,to,from) \ +static __always_inline __##type type##_encode_bits(base v, base field) \ +{ \ + if (__builtin_constant_p(v) && (v & ~field_multiplier(field))) \ + __field_overflow(); \ + return to((v & field_mask(field)) * field_multiplier(field)); \ +} \ +static __always_inline __##type type##_replace_bits(__##type old, \ + base val, base field) \ +{ \ + return (old & ~to(field)) | type##_encode_bits(val, field); \ +} \ +static __always_inline void type##p_replace_bits(__##type *p, \ + base val, base field) \ +{ \ + *p = (*p & ~to(field)) | type##_encode_bits(val, field); \ +} \ +static __always_inline base type##_get_bits(__##type v, base field) \ +{ \ + return (from(v) & field)/field_multiplier(field); \ +} +#define __MAKE_OP(size) \ + ____MAKE_OP(le##size,u##size,cpu_to_le##size,le##size##_to_cpu) \ + ____MAKE_OP(be##size,u##size,cpu_to_be##size,be##size##_to_cpu) \ + ____MAKE_OP(u##size,u##size,,) +__MAKE_OP(16) +__MAKE_OP(32) +__MAKE_OP(64) +#undef __MAKE_OP +#undef ____MAKE_OP + #endif diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index 3489253e38fc..5f11fbdc27f8 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -64,9 +64,14 @@ * bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region * bitmap_release_region(bitmap, pos, order) Free specified bit region * bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region - * bitmap_from_u32array(dst, nbits, buf, nwords) *dst = *buf (nwords 32b words) - * bitmap_to_u32array(buf, nwords, src, nbits) *buf = *dst (nwords 32b words) + * bitmap_from_arr32(dst, buf, nbits) Copy nbits from u32[] buf to dst + * bitmap_to_arr32(buf, src, nbits) Copy nbits from buf to u32[] dst * + * Note, bitmap_zero() and bitmap_fill() operate over the region of + * unsigned longs, that is, bits behind bitmap till the unsigned long + * boundary will be zeroed or filled as well. Consider to use + * bitmap_clear() or bitmap_set() to make explicit zeroing or filling + * respectively. */ /** @@ -83,8 +88,12 @@ * test_and_change_bit(bit, addr) Change bit and return old value * find_first_zero_bit(addr, nbits) Position first zero bit in *addr * find_first_bit(addr, nbits) Position first set bit in *addr - * find_next_zero_bit(addr, nbits, bit) Position next zero bit in *addr >= bit + * find_next_zero_bit(addr, nbits, bit) + * Position next zero bit in *addr >= bit * find_next_bit(addr, nbits, bit) Position next set bit in *addr >= bit + * find_next_and_bit(addr1, addr2, nbits, bit) + * Same as find_next_bit, but in + * (*addr1 & *addr2) * */ @@ -174,14 +183,7 @@ extern void bitmap_fold(unsigned long *dst, const unsigned long *orig, extern int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order); extern void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order); extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order); -extern unsigned int bitmap_from_u32array(unsigned long *bitmap, - unsigned int nbits, - const u32 *buf, - unsigned int nwords); -extern unsigned int bitmap_to_u32array(u32 *buf, - unsigned int nwords, - const unsigned long *bitmap, - unsigned int nbits); + #ifdef __BIG_ENDIAN extern void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits); #else @@ -209,12 +211,12 @@ static inline void bitmap_zero(unsigned long *dst, unsigned int nbits) static inline void bitmap_fill(unsigned long *dst, unsigned int nbits) { - unsigned int nlongs = BITS_TO_LONGS(nbits); - if (!small_const_nbits(nbits)) { - unsigned int len = (nlongs - 1) * sizeof(unsigned long); - memset(dst, 0xff, len); + if (small_const_nbits(nbits)) + *dst = ~0UL; + else { + unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); + memset(dst, 0xff, len); } - dst[nlongs - 1] = BITMAP_LAST_WORD_MASK(nbits); } static inline void bitmap_copy(unsigned long *dst, const unsigned long *src, @@ -228,6 +230,35 @@ static inline void bitmap_copy(unsigned long *dst, const unsigned long *src, } } +/* + * Copy bitmap and clear tail bits in last word. + */ +static inline void bitmap_copy_clear_tail(unsigned long *dst, + const unsigned long *src, unsigned int nbits) +{ + bitmap_copy(dst, src, nbits); + if (nbits % BITS_PER_LONG) + dst[nbits / BITS_PER_LONG] &= BITMAP_LAST_WORD_MASK(nbits); +} + +/* + * On 32-bit systems bitmaps are represented as u32 arrays internally, and + * therefore conversion is not needed when copying data from/to arrays of u32. + */ +#if BITS_PER_LONG == 64 +extern void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf, + unsigned int nbits); +extern void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap, + unsigned int nbits); +#else +#define bitmap_from_arr32(bitmap, buf, nbits) \ + bitmap_copy_clear_tail((unsigned long *) (bitmap), \ + (const unsigned long *) (buf), (nbits)) +#define bitmap_to_arr32(buf, bitmap, nbits) \ + bitmap_copy_clear_tail((unsigned long *) (buf), \ + (const unsigned long *) (bitmap), (nbits)) +#endif + static inline int bitmap_and(unsigned long *dst, const unsigned long *src1, const unsigned long *src2, unsigned int nbits) { diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index e9825ff57b15..69bea82ebeb1 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h @@ -660,12 +660,14 @@ static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat) static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to, struct blkg_rwstat *from) { - struct blkg_rwstat v = blkg_rwstat_read(from); + u64 sum[BLKG_RWSTAT_NR]; int i; for (i = 0; i < BLKG_RWSTAT_NR; i++) - atomic64_add(atomic64_read(&v.aux_cnt[i]) + - atomic64_read(&from->aux_cnt[i]), + sum[i] = percpu_counter_sum_positive(&from->cpu_cnt[i]); + + for (i = 0; i < BLKG_RWSTAT_NR; i++) + atomic64_add(sum[i] + atomic64_read(&from->aux_cnt[i]), &to->aux_cnt[i]); } diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 95c9a5c862e2..8efcf49796a3 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -51,6 +51,7 @@ struct blk_mq_hw_ctx { unsigned int queue_num; atomic_t nr_active; + unsigned int nr_expired; struct hlist_node cpuhp_dead; struct kobject kobj; @@ -65,7 +66,7 @@ struct blk_mq_hw_ctx { #endif /* Must be the last member - see also blk_mq_hw_ctx_size(). */ - struct srcu_struct queue_rq_srcu[0]; + struct srcu_struct srcu[0]; }; struct blk_mq_tag_set { diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 9e7d8bd776d2..bf18b95ed92d 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -39,6 +39,52 @@ typedef u8 __bitwise blk_status_t; #define BLK_STS_AGAIN ((__force blk_status_t)12) +/* + * BLK_STS_DEV_RESOURCE is returned from the driver to the block layer if + * device related resources are unavailable, but the driver can guarantee + * that the queue will be rerun in the future once resources become + * available again. This is typically the case for device specific + * resources that are consumed for IO. If the driver fails allocating these + * resources, we know that inflight (or pending) IO will free these + * resource upon completion. + * + * This is different from BLK_STS_RESOURCE in that it explicitly references + * a device specific resource. For resources of wider scope, allocation + * failure can happen without having pending IO. This means that we can't + * rely on request completions freeing these resources, as IO may not be in + * flight. Examples of that are kernel memory allocations, DMA mappings, or + * any other system wide resources. + */ +#define BLK_STS_DEV_RESOURCE ((__force blk_status_t)13) + +/** + * blk_path_error - returns true if error may be path related + * @error: status the request was completed with + * + * Description: + * This classifies block error status into non-retryable errors and ones + * that may be successful if retried on a failover path. + * + * Return: + * %false - retrying failover path will not help + * %true - may succeed if retried + */ +static inline bool blk_path_error(blk_status_t error) +{ + switch (error) { + case BLK_STS_NOTSUPP: + case BLK_STS_NOSPC: + case BLK_STS_TARGET: + case BLK_STS_NEXUS: + case BLK_STS_MEDIUM: + case BLK_STS_PROTECTION: + return false; + } + + /* Anything else could be a path failure, so should be retried */ + return true; +} + struct blk_issue_stat { u64 stat; }; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 0ce8a372d506..4f3df807cf8f 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -27,6 +27,8 @@ #include <linux/percpu-refcount.h> #include <linux/scatterlist.h> #include <linux/blkzoned.h> +#include <linux/seqlock.h> +#include <linux/u64_stats_sync.h> struct module; struct scsi_ioctl_command; @@ -121,6 +123,12 @@ typedef __u32 __bitwise req_flags_t; /* Look at ->special_vec for the actual data payload instead of the bio chain. */ #define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18)) +/* The per-zone write lock is held for this request */ +#define RQF_ZONE_WRITE_LOCKED ((__force req_flags_t)(1 << 19)) +/* timeout is expired */ +#define RQF_MQ_TIMEOUT_EXPIRED ((__force req_flags_t)(1 << 20)) +/* already slept for hybrid poll */ +#define RQF_MQ_POLL_SLEPT ((__force req_flags_t)(1 << 21)) /* flags that prevent us from merging requests: */ #define RQF_NOMERGE_FLAGS \ @@ -133,12 +141,6 @@ typedef __u32 __bitwise req_flags_t; * especially blk_mq_rq_ctx_init() to take care of the added fields. */ struct request { - struct list_head queuelist; - union { - struct __call_single_data csd; - u64 fifo_time; - }; - struct request_queue *q; struct blk_mq_ctx *mq_ctx; @@ -148,8 +150,6 @@ struct request { int internal_tag; - unsigned long atomic_flags; - /* the following two fields are internal, NEVER access directly */ unsigned int __data_len; /* total data len */ int tag; @@ -158,6 +158,8 @@ struct request { struct bio *bio; struct bio *biotail; + struct list_head queuelist; + /* * The hash is used inside the scheduler, and killed once the * request reaches the dispatch list. The ipi_list is only used @@ -205,19 +207,16 @@ struct request { struct hd_struct *part; unsigned long start_time; struct blk_issue_stat issue_stat; -#ifdef CONFIG_BLK_CGROUP - struct request_list *rl; /* rl this rq is alloced from */ - unsigned long long start_time_ns; - unsigned long long io_start_time_ns; /* when passed to hardware */ -#endif /* Number of scatter-gather DMA addr+len pairs after * physical address coalescing is performed. */ unsigned short nr_phys_segments; + #if defined(CONFIG_BLK_DEV_INTEGRITY) unsigned short nr_integrity_segments; #endif + unsigned short write_hint; unsigned short ioprio; unsigned int timeout; @@ -226,11 +225,37 @@ struct request { unsigned int extra_len; /* length of alignment and padding */ - unsigned short write_hint; + /* + * On blk-mq, the lower bits of ->gstate (generation number and + * state) carry the MQ_RQ_* state value and the upper bits the + * generation number which is monotonically incremented and used to + * distinguish the reuse instances. + * + * ->gstate_seq allows updates to ->gstate and other fields + * (currently ->deadline) during request start to be read + * atomically from the timeout path, so that it can operate on a + * coherent set of information. + */ + seqcount_t gstate_seq; + u64 gstate; + + /* + * ->aborted_gstate is used by the timeout to claim a specific + * recycle instance of this request. See blk_mq_timeout_work(). + */ + struct u64_stats_sync aborted_gstate_sync; + u64 aborted_gstate; + + /* access through blk_rq_set_deadline, blk_rq_deadline */ + unsigned long __deadline; - unsigned long deadline; struct list_head timeout_list; + union { + struct __call_single_data csd; + u64 fifo_time; + }; + /* * completion callback. */ @@ -239,6 +264,12 @@ struct request { /* for bidi */ struct request *next_rq; + +#ifdef CONFIG_BLK_CGROUP + struct request_list *rl; /* rl this rq is alloced from */ + unsigned long long start_time_ns; + unsigned long long io_start_time_ns; /* when passed to hardware */ +#endif }; static inline bool blk_op_is_scsi(unsigned int op) @@ -564,6 +595,22 @@ struct request_queue { struct queue_limits limits; /* + * Zoned block device information for request dispatch control. + * nr_zones is the total number of zones of the device. This is always + * 0 for regular block devices. seq_zones_bitmap is a bitmap of nr_zones + * bits which indicates if a zone is conventional (bit clear) or + * sequential (bit set). seq_zones_wlock is a bitmap of nr_zones + * bits which indicates if a zone is write locked, that is, if a write + * request targeting the zone was dispatched. All three fields are + * initialized by the low level device driver (e.g. scsi/sd.c). + * Stacking drivers (device mappers) may or may not initialize + * these fields. + */ + unsigned int nr_zones; + unsigned long *seq_zones_bitmap; + unsigned long *seq_zones_wlock; + + /* * sg stuff */ unsigned int sg_timeout; @@ -807,6 +854,27 @@ static inline unsigned int blk_queue_zone_sectors(struct request_queue *q) return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0; } +static inline unsigned int blk_queue_nr_zones(struct request_queue *q) +{ + return q->nr_zones; +} + +static inline unsigned int blk_queue_zone_no(struct request_queue *q, + sector_t sector) +{ + if (!blk_queue_is_zoned(q)) + return 0; + return sector >> ilog2(q->limits.chunk_sectors); +} + +static inline bool blk_queue_zone_is_seq(struct request_queue *q, + sector_t sector) +{ + if (!blk_queue_is_zoned(q) || !q->seq_zones_bitmap) + return false; + return test_bit(blk_queue_zone_no(q, sector), q->seq_zones_bitmap); +} + static inline bool rq_is_sync(struct request *rq) { return op_is_sync(rq->cmd_flags); @@ -1046,6 +1114,16 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq) return blk_rq_cur_bytes(rq) >> 9; } +static inline unsigned int blk_rq_zone_no(struct request *rq) +{ + return blk_queue_zone_no(rq->q, blk_rq_pos(rq)); +} + +static inline unsigned int blk_rq_zone_is_seq(struct request *rq) +{ + return blk_queue_zone_is_seq(rq->q, blk_rq_pos(rq)); +} + /* * Some commands like WRITE SAME have a payload or data transfer size which * is different from the size of the request. Any driver that supports such @@ -1595,7 +1673,15 @@ static inline unsigned int bdev_zone_sectors(struct block_device *bdev) if (q) return blk_queue_zone_sectors(q); + return 0; +} + +static inline unsigned int bdev_nr_zones(struct block_device *bdev) +{ + struct request_queue *q = bdev_get_queue(bdev); + if (q) + return blk_queue_nr_zones(q); return 0; } @@ -1731,8 +1817,6 @@ static inline bool req_gap_front_merge(struct request *req, struct bio *bio) int kblockd_schedule_work(struct work_struct *work); int kblockd_schedule_work_on(int cpu, struct work_struct *work); -int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay); -int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay); int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay); #ifdef CONFIG_BLK_CGROUP @@ -1971,6 +2055,60 @@ extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, extern int bdev_read_page(struct block_device *, sector_t, struct page *); extern int bdev_write_page(struct block_device *, sector_t, struct page *, struct writeback_control *); + +#ifdef CONFIG_BLK_DEV_ZONED +bool blk_req_needs_zone_write_lock(struct request *rq); +void __blk_req_zone_write_lock(struct request *rq); +void __blk_req_zone_write_unlock(struct request *rq); + +static inline void blk_req_zone_write_lock(struct request *rq) +{ + if (blk_req_needs_zone_write_lock(rq)) + __blk_req_zone_write_lock(rq); +} + +static inline void blk_req_zone_write_unlock(struct request *rq) +{ + if (rq->rq_flags & RQF_ZONE_WRITE_LOCKED) + __blk_req_zone_write_unlock(rq); +} + +static inline bool blk_req_zone_is_write_locked(struct request *rq) +{ + return rq->q->seq_zones_wlock && + test_bit(blk_rq_zone_no(rq), rq->q->seq_zones_wlock); +} + +static inline bool blk_req_can_dispatch_to_zone(struct request *rq) +{ + if (!blk_req_needs_zone_write_lock(rq)) + return true; + return !blk_req_zone_is_write_locked(rq); +} +#else +static inline bool blk_req_needs_zone_write_lock(struct request *rq) +{ + return false; +} + +static inline void blk_req_zone_write_lock(struct request *rq) +{ +} + +static inline void blk_req_zone_write_unlock(struct request *rq) +{ +} +static inline bool blk_req_zone_is_write_locked(struct request *rq) +{ + return false; +} + +static inline bool blk_req_can_dispatch_to_zone(struct request *rq) +{ + return true; +} +#endif /* CONFIG_BLK_DEV_ZONED */ + #else /* CONFIG_BLOCK */ struct block_device; diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 0b25cf87b6d6..66df387106de 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -17,6 +17,7 @@ #include <linux/numa.h> #include <linux/wait.h> +struct bpf_verifier_env; struct perf_event; struct bpf_prog; struct bpf_map; @@ -24,6 +25,7 @@ struct bpf_map; /* map is generic key/value storage optionally accesible by eBPF programs */ struct bpf_map_ops { /* funcs callable from userspace (via syscall) */ + int (*map_alloc_check)(union bpf_attr *attr); struct bpf_map *(*map_alloc)(union bpf_attr *attr); void (*map_release)(struct bpf_map *map, struct file *map_file); void (*map_free)(struct bpf_map *map); @@ -72,6 +74,33 @@ struct bpf_map { char name[BPF_OBJ_NAME_LEN]; }; +struct bpf_offloaded_map; + +struct bpf_map_dev_ops { + int (*map_get_next_key)(struct bpf_offloaded_map *map, + void *key, void *next_key); + int (*map_lookup_elem)(struct bpf_offloaded_map *map, + void *key, void *value); + int (*map_update_elem)(struct bpf_offloaded_map *map, + void *key, void *value, u64 flags); + int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key); +}; + +struct bpf_offloaded_map { + struct bpf_map map; + struct net_device *netdev; + const struct bpf_map_dev_ops *dev_ops; + void *dev_priv; + struct list_head offloads; +}; + +static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map) +{ + return container_of(map, struct bpf_offloaded_map, map); +} + +extern const struct bpf_map_ops bpf_map_offload_ops; + /* function argument constraints */ enum bpf_arg_type { ARG_DONTCARE = 0, /* unused argument in helper function */ @@ -193,14 +222,20 @@ struct bpf_verifier_ops { struct bpf_prog *prog, u32 *target_size); }; -struct bpf_dev_offload { +struct bpf_prog_offload_ops { + int (*insn_hook)(struct bpf_verifier_env *env, + int insn_idx, int prev_insn_idx); +}; + +struct bpf_prog_offload { struct bpf_prog *prog; struct net_device *netdev; void *dev_priv; struct list_head offloads; bool dev_state; - bool verifier_running; - wait_queue_head_t verifier_done; + const struct bpf_prog_offload_ops *dev_ops; + void *jited_image; + u32 jited_len; }; struct bpf_prog_aux { @@ -209,6 +244,10 @@ struct bpf_prog_aux { u32 max_ctx_offset; u32 stack_depth; u32 id; + u32 func_cnt; + bool offload_requested; + struct bpf_prog **func; + void *jit_data; /* JIT specific data. arch dependent */ struct latch_tree_node ksym_tnode; struct list_head ksym_lnode; const struct bpf_prog_ops *ops; @@ -220,7 +259,7 @@ struct bpf_prog_aux { #ifdef CONFIG_SECURITY void *security; #endif - struct bpf_dev_offload *offload; + struct bpf_prog_offload *offload; union { struct work_struct work; struct rcu_head rcu; @@ -295,6 +334,9 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs, void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *progs, struct bpf_prog *old_prog); +int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array, + __u32 __user *prog_ids, u32 request_cnt, + __u32 __user *prog_cnt); int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array, struct bpf_prog *exclude_prog, struct bpf_prog *include_prog, @@ -355,6 +397,9 @@ void bpf_prog_put(struct bpf_prog *prog); int __bpf_prog_charge(struct user_struct *user, u32 pages); void __bpf_prog_uncharge(struct user_struct *user, u32 pages); +void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock); +void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock); + struct bpf_map *bpf_map_get_with_uref(u32 ufd); struct bpf_map *__bpf_map_get(struct fd f); struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref); @@ -363,6 +408,7 @@ void bpf_map_put(struct bpf_map *map); int bpf_map_precharge_memlock(u32 pages); void *bpf_map_area_alloc(size_t size, int numa_node); void bpf_map_area_free(void *base); +void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr); extern int sysctl_unprivileged_bpf_disabled; @@ -409,6 +455,7 @@ static inline void bpf_long_memcpy(void *dst, const void *src, u32 size) /* verify correctness of eBPF program */ int bpf_check(struct bpf_prog **fp, union bpf_attr *attr); +void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth); /* Map specifics */ struct net_device *__dev_map_lookup_elem(struct bpf_map *map, u32 key); @@ -536,14 +583,35 @@ bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool); int bpf_prog_offload_compile(struct bpf_prog *prog); void bpf_prog_offload_destroy(struct bpf_prog *prog); +int bpf_prog_offload_info_fill(struct bpf_prog_info *info, + struct bpf_prog *prog); + +int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map); + +int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value); +int bpf_map_offload_update_elem(struct bpf_map *map, + void *key, void *value, u64 flags); +int bpf_map_offload_delete_elem(struct bpf_map *map, void *key); +int bpf_map_offload_get_next_key(struct bpf_map *map, + void *key, void *next_key); + +bool bpf_offload_dev_match(struct bpf_prog *prog, struct bpf_map *map); #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr); static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux) { - return aux->offload; + return aux->offload_requested; } + +static inline bool bpf_map_is_dev_bound(struct bpf_map *map) +{ + return unlikely(map->ops == &bpf_map_offload_ops); +} + +struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr); +void bpf_map_offload_map_free(struct bpf_map *map); #else static inline int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr) @@ -555,9 +623,23 @@ static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux) { return false; } + +static inline bool bpf_map_is_dev_bound(struct bpf_map *map) +{ + return false; +} + +static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static inline void bpf_map_offload_map_free(struct bpf_map *map) +{ +} #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ -#if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_BPF_SYSCALL) +#if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_INET) struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key); int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type); #else diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h index 978c1d9c9383..19b8349a3809 100644 --- a/include/linux/bpf_types.h +++ b/include/linux/bpf_types.h @@ -42,7 +42,7 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_of_maps_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, htab_of_maps_map_ops) #ifdef CONFIG_NET BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops) -#ifdef CONFIG_STREAM_PARSER +#if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_INET) BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKMAP, sock_map_ops) #endif BPF_MAP_TYPE(BPF_MAP_TYPE_CPUMAP, cpu_map_ops) diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 1632bb13ad8a..6b66cd1aa0b9 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -76,6 +76,14 @@ struct bpf_reg_state { s64 smax_value; /* maximum possible (s64)value */ u64 umin_value; /* minimum possible (u64)value */ u64 umax_value; /* maximum possible (u64)value */ + /* Inside the callee two registers can be both PTR_TO_STACK like + * R1=fp-8 and R2=fp-8, but one of them points to this function stack + * while another to the caller's stack. To differentiate them 'frameno' + * is used which is an index in bpf_verifier_state->frame[] array + * pointing to bpf_func_state. + * This field must be second to last, for states_equal() reasons. + */ + u32 frameno; /* This field must be last, for states_equal() reasons. */ enum bpf_reg_liveness live; }; @@ -83,7 +91,8 @@ struct bpf_reg_state { enum bpf_stack_slot_type { STACK_INVALID, /* nothing was stored in this stack slot */ STACK_SPILL, /* register spilled into stack */ - STACK_MISC /* BPF program wrote some data into this slot */ + STACK_MISC, /* BPF program wrote some data into this slot */ + STACK_ZERO, /* BPF program wrote constant zero */ }; #define BPF_REG_SIZE 8 /* size of eBPF register in bytes */ @@ -96,13 +105,34 @@ struct bpf_stack_state { /* state of the program: * type of all registers and stack info */ -struct bpf_verifier_state { +struct bpf_func_state { struct bpf_reg_state regs[MAX_BPF_REG]; struct bpf_verifier_state *parent; + /* index of call instruction that called into this func */ + int callsite; + /* stack frame number of this function state from pov of + * enclosing bpf_verifier_state. + * 0 = main function, 1 = first callee. + */ + u32 frameno; + /* subprog number == index within subprog_stack_depth + * zero == main subprog + */ + u32 subprogno; + + /* should be second to last. See copy_func_state() */ int allocated_stack; struct bpf_stack_state *stack; }; +#define MAX_CALL_FRAMES 8 +struct bpf_verifier_state { + /* call stack tracking */ + struct bpf_func_state *frame[MAX_CALL_FRAMES]; + struct bpf_verifier_state *parent; + u32 curframe; +}; + /* linked list of verifier states used to prune search */ struct bpf_verifier_state_list { struct bpf_verifier_state state; @@ -113,6 +143,7 @@ struct bpf_insn_aux_data { union { enum bpf_reg_type ptr_type; /* pointer type for load/store insns */ struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */ + s32 call_imm; /* saved imm field of call insn */ }; int ctx_field_size; /* the ctx field size for load insn, maybe 0 */ bool seen; /* this insn was processed by the verifier */ @@ -135,11 +166,7 @@ static inline bool bpf_verifier_log_full(const struct bpf_verifer_log *log) return log->len_used >= log->len_total - 1; } -struct bpf_verifier_env; -struct bpf_ext_analyzer_ops { - int (*insn_hook)(struct bpf_verifier_env *env, - int insn_idx, int prev_insn_idx); -}; +#define BPF_MAX_SUBPROGS 256 /* single container for all structs * one verifier_env per bpf_check() call @@ -152,29 +179,31 @@ struct bpf_verifier_env { bool strict_alignment; /* perform strict pointer alignment checks */ struct bpf_verifier_state *cur_state; /* current verifier state */ struct bpf_verifier_state_list **explored_states; /* search pruning optimization */ - const struct bpf_ext_analyzer_ops *dev_ops; /* device analyzer ops */ struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */ u32 used_map_cnt; /* number of used maps */ u32 id_gen; /* used to generate unique reg IDs */ bool allow_ptr_leaks; bool seen_direct_write; struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */ - struct bpf_verifer_log log; + u32 subprog_starts[BPF_MAX_SUBPROGS]; + /* computes the stack depth of each bpf function */ + u16 subprog_stack_depth[BPF_MAX_SUBPROGS + 1]; + u32 subprog_cnt; }; +__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env, + const char *fmt, ...); + static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env) { - return env->cur_state->regs; + struct bpf_verifier_state *cur = env->cur_state; + + return cur->frame[cur->curframe]->regs; } -#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env); -#else -static inline int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env) -{ - return -EOPNOTSUPP; -} -#endif +int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env, + int insn_idx, int prev_insn_idx); #endif /* _LINUX_BPF_VERIFIER_H */ diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h index 8ff86b4c1b8a..d3339dd48b1a 100644 --- a/include/linux/brcmphy.h +++ b/include/linux/brcmphy.h @@ -14,6 +14,7 @@ #define PHY_ID_BCM5241 0x0143bc30 #define PHY_ID_BCMAC131 0x0143bc70 #define PHY_ID_BCM5481 0x0143bca0 +#define PHY_ID_BCM5395 0x0143bcf0 #define PHY_ID_BCM54810 0x03625d00 #define PHY_ID_BCM5482 0x0143bcb0 #define PHY_ID_BCM5411 0x00206070 diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 8b1bf8d3d4a2..894e5d125de6 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -81,11 +81,14 @@ struct buffer_head { /* * macro tricks to expand the set_buffer_foo(), clear_buffer_foo() * and buffer_foo() functions. + * To avoid reset buffer flags that are already set, because that causes + * a costly cache line transition, check the flag first. */ #define BUFFER_FNS(bit, name) \ static __always_inline void set_buffer_##name(struct buffer_head *bh) \ { \ - set_bit(BH_##bit, &(bh)->b_state); \ + if (!test_bit(BH_##bit, &(bh)->b_state)) \ + set_bit(BH_##bit, &(bh)->b_state); \ } \ static __always_inline void clear_buffer_##name(struct buffer_head *bh) \ { \ @@ -151,7 +154,6 @@ void buffer_check_dirty_writeback(struct page *page, void mark_buffer_dirty(struct buffer_head *bh); void mark_buffer_write_io_error(struct buffer_head *bh); -void init_buffer(struct buffer_head *, bh_end_io_t *, void *); void touch_buffer(struct buffer_head *bh); void set_bh_page(struct buffer_head *bh, struct page *page, unsigned long offset); diff --git a/include/linux/build_bug.h b/include/linux/build_bug.h index 3efed0d742a0..43d1fd50d433 100644 --- a/include/linux/build_bug.h +++ b/include/linux/build_bug.h @@ -8,7 +8,6 @@ #define __BUILD_BUG_ON_NOT_POWER_OF_2(n) (0) #define BUILD_BUG_ON_NOT_POWER_OF_2(n) (0) #define BUILD_BUG_ON_ZERO(e) (0) -#define BUILD_BUG_ON_NULL(e) ((void *)0) #define BUILD_BUG_ON_INVALID(e) (0) #define BUILD_BUG_ON_MSG(cond, msg) (0) #define BUILD_BUG_ON(condition) (0) @@ -28,7 +27,6 @@ * aren't permitted). */ #define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:(-!!(e)); })) -#define BUILD_BUG_ON_NULL(e) ((void *)sizeof(struct { int:(-!!(e)); })) /* * BUILD_BUG_ON_INVALID() permits the compiler to check the validity of the diff --git a/include/linux/bvec.h b/include/linux/bvec.h index ec8a4d7af6bd..fe7a22dd133b 100644 --- a/include/linux/bvec.h +++ b/include/linux/bvec.h @@ -125,4 +125,13 @@ static inline bool bvec_iter_rewind(const struct bio_vec *bv, ((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \ bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len)) +/* for iterating one bio from start to end */ +#define BVEC_ITER_ALL_INIT (struct bvec_iter) \ +{ \ + .bi_sector = 0, \ + .bi_size = UINT_MAX, \ + .bi_idx = 0, \ + .bi_bvec_done = 0, \ +} + #endif /* __LINUX_BVEC_ITER_H */ diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h index 61f1cf2d9f44..055aaf5ed9af 100644 --- a/include/linux/can/dev.h +++ b/include/linux/can/dev.h @@ -46,6 +46,7 @@ struct can_priv { unsigned int bitrate_const_cnt; const u32 *data_bitrate_const; unsigned int data_bitrate_const_cnt; + u32 bitrate_max; struct can_clock clock; enum can_state state; @@ -166,6 +167,12 @@ void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx); void can_free_echo_skb(struct net_device *dev, unsigned int idx); +#ifdef CONFIG_OF +void of_can_transceiver(struct net_device *dev); +#else +static inline void of_can_transceiver(struct net_device *dev) { } +#endif + struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf); struct sk_buff *alloc_canfd_skb(struct net_device *dev, struct canfd_frame **cfd); diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 8b7fd8eeccee..9f242b876fde 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -561,7 +561,7 @@ struct cftype { /* * Control Group subsystem type. - * See Documentation/cgroups/cgroups.txt for details + * See Documentation/cgroup-v1/cgroups.txt for details */ struct cgroup_subsys { struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css); diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index 7c925e6211f1..f711be6e8c44 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -20,6 +20,8 @@ * flags used across common struct clk. these flags should only affect the * top-level framework. custom flags for dealing with hardware specifics * belong in struct clk_foo + * + * Please update clk_flags[] in drivers/clk/clk.c when making changes here! */ #define CLK_SET_RATE_GATE BIT(0) /* must be gated across rate change */ #define CLK_SET_PARENT_GATE BIT(1) /* must be gated across re-parent */ @@ -412,7 +414,7 @@ extern const struct clk_ops clk_divider_ro_ops; unsigned long divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate, unsigned int val, const struct clk_div_table *table, - unsigned long flags); + unsigned long flags, unsigned long width); long divider_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent, unsigned long rate, unsigned long *prate, const struct clk_div_table *table, @@ -744,6 +746,7 @@ unsigned long clk_hw_get_rate(const struct clk_hw *hw); unsigned long __clk_get_flags(struct clk *clk); unsigned long clk_hw_get_flags(const struct clk_hw *hw); bool clk_hw_is_prepared(const struct clk_hw *hw); +bool clk_hw_rate_is_protected(const struct clk_hw *hw); bool clk_hw_is_enabled(const struct clk_hw *hw); bool __clk_is_enabled(struct clk *clk); struct clk *__clk_lookup(const char *name); @@ -806,6 +809,44 @@ extern struct of_device_id __clk_of_table; } \ OF_DECLARE_1(clk, name, compat, name##_of_clk_init_driver) +#define CLK_HW_INIT(_name, _parent, _ops, _flags) \ + (&(struct clk_init_data) { \ + .flags = _flags, \ + .name = _name, \ + .parent_names = (const char *[]) { _parent }, \ + .num_parents = 1, \ + .ops = _ops, \ + }) + +#define CLK_HW_INIT_PARENTS(_name, _parents, _ops, _flags) \ + (&(struct clk_init_data) { \ + .flags = _flags, \ + .name = _name, \ + .parent_names = _parents, \ + .num_parents = ARRAY_SIZE(_parents), \ + .ops = _ops, \ + }) + +#define CLK_HW_INIT_NO_PARENT(_name, _ops, _flags) \ + (&(struct clk_init_data) { \ + .flags = _flags, \ + .name = _name, \ + .parent_names = NULL, \ + .num_parents = 0, \ + .ops = _ops, \ + }) + +#define CLK_FIXED_FACTOR(_struct, _name, _parent, \ + _div, _mult, _flags) \ + struct clk_fixed_factor _struct = { \ + .div = _div, \ + .mult = _mult, \ + .hw.init = CLK_HW_INIT(_name, \ + _parent, \ + &clk_fixed_factor_ops, \ + _flags), \ + } + #ifdef CONFIG_OF int of_clk_add_provider(struct device_node *np, struct clk *(*clk_src_get)(struct of_phandle_args *args, diff --git a/include/linux/clk.h b/include/linux/clk.h index 12c96d94d1fa..4c4ef9f34db3 100644 --- a/include/linux/clk.h +++ b/include/linux/clk.h @@ -331,6 +331,38 @@ struct clk *devm_clk_get(struct device *dev, const char *id); */ struct clk *devm_get_clk_from_child(struct device *dev, struct device_node *np, const char *con_id); +/** + * clk_rate_exclusive_get - get exclusivity over the rate control of a + * producer + * @clk: clock source + * + * This function allows drivers to get exclusive control over the rate of a + * provider. It prevents any other consumer to execute, even indirectly, + * opereation which could alter the rate of the provider or cause glitches + * + * If exlusivity is claimed more than once on clock, even by the same driver, + * the rate effectively gets locked as exclusivity can't be preempted. + * + * Must not be called from within atomic context. + * + * Returns success (0) or negative errno. + */ +int clk_rate_exclusive_get(struct clk *clk); + +/** + * clk_rate_exclusive_put - release exclusivity over the rate control of a + * producer + * @clk: clock source + * + * This function allows drivers to release the exclusivity it previously got + * from clk_rate_exclusive_get() + * + * The caller must balance the number of clk_rate_exclusive_get() and + * clk_rate_exclusive_put() calls. + * + * Must not be called from within atomic context. + */ +void clk_rate_exclusive_put(struct clk *clk); /** * clk_enable - inform the system when the clock source should be running. @@ -473,6 +505,23 @@ long clk_round_rate(struct clk *clk, unsigned long rate); int clk_set_rate(struct clk *clk, unsigned long rate); /** + * clk_set_rate_exclusive- set the clock rate and claim exclusivity over + * clock source + * @clk: clock source + * @rate: desired clock rate in Hz + * + * This helper function allows drivers to atomically set the rate of a producer + * and claim exclusivity over the rate control of the producer. + * + * It is essentially a combination of clk_set_rate() and + * clk_rate_exclusite_get(). Caller must balance this call with a call to + * clk_rate_exclusive_put() + * + * Returns success (0) or negative errno. + */ +int clk_set_rate_exclusive(struct clk *clk, unsigned long rate); + +/** * clk_has_parent - check if a clock is a possible parent for another * @clk: clock source * @parent: parent clock source @@ -583,6 +632,14 @@ static inline void clk_bulk_put(int num_clks, struct clk_bulk_data *clks) {} static inline void devm_clk_put(struct device *dev, struct clk *clk) {} + +static inline int clk_rate_exclusive_get(struct clk *clk) +{ + return 0; +} + +static inline void clk_rate_exclusive_put(struct clk *clk) {} + static inline int clk_enable(struct clk *clk) { return 0; @@ -609,6 +666,11 @@ static inline int clk_set_rate(struct clk *clk, unsigned long rate) return 0; } +static inline int clk_set_rate_exclusive(struct clk *clk, unsigned long rate) +{ + return 0; +} + static inline long clk_round_rate(struct clk *clk, unsigned long rate) { return 0; diff --git a/include/linux/clkdev.h b/include/linux/clkdev.h index 2eabc862abdb..4890ff033220 100644 --- a/include/linux/clkdev.h +++ b/include/linux/clkdev.h @@ -12,7 +12,7 @@ #ifndef __CLKDEV_H #define __CLKDEV_H -#include <asm/clkdev.h> +#include <linux/slab.h> struct clk; struct clk_hw; @@ -52,9 +52,4 @@ int clk_add_alias(const char *, const char *, const char *, struct device *); int clk_register_clkdev(struct clk *, const char *, const char *); int clk_hw_register_clkdev(struct clk_hw *, const char *, const char *); -#ifdef CONFIG_COMMON_CLK -int __clk_get(struct clk *clk); -void __clk_put(struct clk *clk); -#endif - #endif diff --git a/include/linux/compat.h b/include/linux/compat.h index 0fc36406f32c..8a9643857c4a 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -157,6 +157,104 @@ struct compat_sigaction { compat_sigset_t sa_mask __packed; }; +typedef union compat_sigval { + compat_int_t sival_int; + compat_uptr_t sival_ptr; +} compat_sigval_t; + +typedef struct compat_siginfo { + int si_signo; +#ifndef __ARCH_HAS_SWAPPED_SIGINFO + int si_errno; + int si_code; +#else + int si_code; + int si_errno; +#endif + + union { + int _pad[128/sizeof(int) - 3]; + + /* kill() */ + struct { + compat_pid_t _pid; /* sender's pid */ + __compat_uid32_t _uid; /* sender's uid */ + } _kill; + + /* POSIX.1b timers */ + struct { + compat_timer_t _tid; /* timer id */ + int _overrun; /* overrun count */ + compat_sigval_t _sigval; /* same as below */ + } _timer; + + /* POSIX.1b signals */ + struct { + compat_pid_t _pid; /* sender's pid */ + __compat_uid32_t _uid; /* sender's uid */ + compat_sigval_t _sigval; + } _rt; + + /* SIGCHLD */ + struct { + compat_pid_t _pid; /* which child */ + __compat_uid32_t _uid; /* sender's uid */ + int _status; /* exit code */ + compat_clock_t _utime; + compat_clock_t _stime; + } _sigchld; + +#ifdef CONFIG_X86_X32_ABI + /* SIGCHLD (x32 version) */ + struct { + compat_pid_t _pid; /* which child */ + __compat_uid32_t _uid; /* sender's uid */ + int _status; /* exit code */ + compat_s64 _utime; + compat_s64 _stime; + } _sigchld_x32; +#endif + + /* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGTRAP, SIGEMT */ + struct { + compat_uptr_t _addr; /* faulting insn/memory ref. */ +#ifdef __ARCH_SI_TRAPNO + int _trapno; /* TRAP # which caused the signal */ +#endif + union { + /* + * used when si_code=BUS_MCEERR_AR or + * used when si_code=BUS_MCEERR_AO + */ + short int _addr_lsb; /* Valid LSB of the reported address. */ + /* used when si_code=SEGV_BNDERR */ + struct { + short _dummy_bnd; + compat_uptr_t _lower; + compat_uptr_t _upper; + } _addr_bnd; + /* used when si_code=SEGV_PKUERR */ + struct { + short _dummy_pkey; + u32 _pkey; + } _addr_pkey; + }; + } _sigfault; + + /* SIGPOLL */ + struct { + compat_long_t _band; /* POLL_IN, POLL_OUT, POLL_MSG */ + int _fd; + } _sigpoll; + + struct { + compat_uptr_t _call_addr; /* calling user insn */ + int _syscall; /* triggering system call number */ + unsigned int _arch; /* AUDIT_ARCH_* of syscall */ + } _sigsys; + } _sifields; +} compat_siginfo_t; + /* * These functions operate on 32- or 64-bit specs depending on * COMPAT_USE_64BIT_TIME, hence the void user pointer arguments. @@ -412,7 +510,7 @@ long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask, unsigned long bitmap_size); long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask, unsigned long bitmap_size); -int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from); +int copy_siginfo_from_user32(siginfo_t *to, const struct compat_siginfo __user *from); int copy_siginfo_to_user32(struct compat_siginfo __user *to, const siginfo_t *from); int get_compat_sigevent(struct sigevent *event, const struct compat_sigevent __user *u_event); diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index 3b609edffa8f..d02a4df3f473 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h @@ -19,3 +19,11 @@ #define randomized_struct_fields_start struct { #define randomized_struct_fields_end }; + +/* all clang versions usable with the kernel support KASAN ABI version 5 */ +#define KASAN_ABI_VERSION 5 + +/* emulate gcc's __SANITIZE_ADDRESS__ flag */ +#if __has_feature(address_sanitizer) +#define __SANITIZE_ADDRESS__ +#endif diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 2272ded07496..631354acfa72 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -219,7 +219,7 @@ /* Mark a function definition as prohibited from being cloned. */ #define __noclone __attribute__((__noclone__, __optimize__("no-tracer"))) -#ifdef RANDSTRUCT_PLUGIN +#if defined(RANDSTRUCT_PLUGIN) && !defined(__CHECKER__) #define __randomize_layout __attribute__((randomize_layout)) #define __no_randomize_layout __attribute__((no_randomize_layout)) #endif diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 52e611ab9a6c..c2cc57a2f508 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -185,23 +185,21 @@ void __read_once_size(const volatile void *p, void *res, int size) #ifdef CONFIG_KASAN /* - * This function is not 'inline' because __no_sanitize_address confilcts + * We can't declare function 'inline' because __no_sanitize_address confilcts * with inlining. Attempt to inline it may cause a build failure. * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 * '__maybe_unused' allows us to avoid defined-but-not-used warnings. */ -static __no_sanitize_address __maybe_unused -void __read_once_size_nocheck(const volatile void *p, void *res, int size) -{ - __READ_ONCE_SIZE; -} +# define __no_kasan_or_inline __no_sanitize_address __maybe_unused #else -static __always_inline +# define __no_kasan_or_inline __always_inline +#endif + +static __no_kasan_or_inline void __read_once_size_nocheck(const volatile void *p, void *res, int size) { __READ_ONCE_SIZE; } -#endif static __always_inline void __write_once_size(volatile void *p, void *res, int size) { @@ -240,6 +238,7 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s * required ordering. */ #include <asm/barrier.h> +#include <linux/kasan-checks.h> #define __READ_ONCE(x, check) \ ({ \ @@ -259,6 +258,13 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s */ #define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0) +static __no_kasan_or_inline +unsigned long read_word_at_a_time(const void *addr) +{ + kasan_check_read(addr, 1); + return *(unsigned long *)addr; +} + #define WRITE_ONCE(x, val) \ ({ \ union { typeof(x) __val; char __c[1]; } __u = \ diff --git a/include/linux/cper.h b/include/linux/cper.h index 723e952fde0d..d14ef4e77c8a 100644 --- a/include/linux/cper.h +++ b/include/linux/cper.h @@ -275,6 +275,50 @@ enum { #define CPER_ARM_INFO_FLAGS_PROPAGATED BIT(2) #define CPER_ARM_INFO_FLAGS_OVERFLOW BIT(3) +#define CPER_ARM_CACHE_ERROR 0 +#define CPER_ARM_TLB_ERROR 1 +#define CPER_ARM_BUS_ERROR 2 +#define CPER_ARM_VENDOR_ERROR 3 +#define CPER_ARM_MAX_TYPE CPER_ARM_VENDOR_ERROR + +#define CPER_ARM_ERR_VALID_TRANSACTION_TYPE BIT(0) +#define CPER_ARM_ERR_VALID_OPERATION_TYPE BIT(1) +#define CPER_ARM_ERR_VALID_LEVEL BIT(2) +#define CPER_ARM_ERR_VALID_PROC_CONTEXT_CORRUPT BIT(3) +#define CPER_ARM_ERR_VALID_CORRECTED BIT(4) +#define CPER_ARM_ERR_VALID_PRECISE_PC BIT(5) +#define CPER_ARM_ERR_VALID_RESTARTABLE_PC BIT(6) +#define CPER_ARM_ERR_VALID_PARTICIPATION_TYPE BIT(7) +#define CPER_ARM_ERR_VALID_TIME_OUT BIT(8) +#define CPER_ARM_ERR_VALID_ADDRESS_SPACE BIT(9) +#define CPER_ARM_ERR_VALID_MEM_ATTRIBUTES BIT(10) +#define CPER_ARM_ERR_VALID_ACCESS_MODE BIT(11) + +#define CPER_ARM_ERR_TRANSACTION_SHIFT 16 +#define CPER_ARM_ERR_TRANSACTION_MASK GENMASK(1,0) +#define CPER_ARM_ERR_OPERATION_SHIFT 18 +#define CPER_ARM_ERR_OPERATION_MASK GENMASK(3,0) +#define CPER_ARM_ERR_LEVEL_SHIFT 22 +#define CPER_ARM_ERR_LEVEL_MASK GENMASK(2,0) +#define CPER_ARM_ERR_PC_CORRUPT_SHIFT 25 +#define CPER_ARM_ERR_PC_CORRUPT_MASK GENMASK(0,0) +#define CPER_ARM_ERR_CORRECTED_SHIFT 26 +#define CPER_ARM_ERR_CORRECTED_MASK GENMASK(0,0) +#define CPER_ARM_ERR_PRECISE_PC_SHIFT 27 +#define CPER_ARM_ERR_PRECISE_PC_MASK GENMASK(0,0) +#define CPER_ARM_ERR_RESTARTABLE_PC_SHIFT 28 +#define CPER_ARM_ERR_RESTARTABLE_PC_MASK GENMASK(0,0) +#define CPER_ARM_ERR_PARTICIPATION_TYPE_SHIFT 29 +#define CPER_ARM_ERR_PARTICIPATION_TYPE_MASK GENMASK(1,0) +#define CPER_ARM_ERR_TIME_OUT_SHIFT 31 +#define CPER_ARM_ERR_TIME_OUT_MASK GENMASK(0,0) +#define CPER_ARM_ERR_ADDRESS_SPACE_SHIFT 32 +#define CPER_ARM_ERR_ADDRESS_SPACE_MASK GENMASK(1,0) +#define CPER_ARM_ERR_MEM_ATTRIBUTES_SHIFT 34 +#define CPER_ARM_ERR_MEM_ATTRIBUTES_MASK GENMASK(8,0) +#define CPER_ARM_ERR_ACCESS_MODE_SHIFT 43 +#define CPER_ARM_ERR_ACCESS_MODE_MASK GENMASK(0,0) + /* * All tables and structs must be byte-packed to match CPER * specification, since the tables are provided by the system BIOS @@ -494,6 +538,8 @@ struct cper_sec_pcie { /* Reset to default packing */ #pragma pack() +extern const char * const cper_proc_error_type_strs[4]; + u64 cper_next_record_id(void); const char *cper_severity_str(unsigned int); const char *cper_mem_err_type_str(unsigned int); @@ -503,5 +549,7 @@ void cper_mem_err_pack(const struct cper_sec_mem_err *, struct cper_mem_err_compact *); const char *cper_mem_err_unpack(struct trace_seq *, struct cper_mem_err_compact *); +void cper_print_proc_arm(const char *pfx, + const struct cper_sec_proc_arm *proc); #endif diff --git a/include/linux/cpu_cooling.h b/include/linux/cpu_cooling.h index d4292ebc5c8b..de0dafb9399d 100644 --- a/include/linux/cpu_cooling.h +++ b/include/linux/cpu_cooling.h @@ -30,9 +30,6 @@ struct cpufreq_policy; -typedef int (*get_static_t)(cpumask_t *cpumask, int interval, - unsigned long voltage, u32 *power); - #ifdef CONFIG_CPU_THERMAL /** * cpufreq_cooling_register - function to create cpufreq cooling device. @@ -41,43 +38,6 @@ typedef int (*get_static_t)(cpumask_t *cpumask, int interval, struct thermal_cooling_device * cpufreq_cooling_register(struct cpufreq_policy *policy); -struct thermal_cooling_device * -cpufreq_power_cooling_register(struct cpufreq_policy *policy, - u32 capacitance, get_static_t plat_static_func); - -/** - * of_cpufreq_cooling_register - create cpufreq cooling device based on DT. - * @np: a valid struct device_node to the cooling device device tree node. - * @policy: cpufreq policy. - */ -#ifdef CONFIG_THERMAL_OF -struct thermal_cooling_device * -of_cpufreq_cooling_register(struct device_node *np, - struct cpufreq_policy *policy); - -struct thermal_cooling_device * -of_cpufreq_power_cooling_register(struct device_node *np, - struct cpufreq_policy *policy, - u32 capacitance, - get_static_t plat_static_func); -#else -static inline struct thermal_cooling_device * -of_cpufreq_cooling_register(struct device_node *np, - struct cpufreq_policy *policy) -{ - return ERR_PTR(-ENOSYS); -} - -static inline struct thermal_cooling_device * -of_cpufreq_power_cooling_register(struct device_node *np, - struct cpufreq_policy *policy, - u32 capacitance, - get_static_t plat_static_func) -{ - return NULL; -} -#endif - /** * cpufreq_cooling_unregister - function to remove cpufreq cooling device. * @cdev: thermal cooling device pointer. @@ -90,34 +50,27 @@ cpufreq_cooling_register(struct cpufreq_policy *policy) { return ERR_PTR(-ENOSYS); } -static inline struct thermal_cooling_device * -cpufreq_power_cooling_register(struct cpufreq_policy *policy, - u32 capacitance, get_static_t plat_static_func) -{ - return NULL; -} -static inline struct thermal_cooling_device * -of_cpufreq_cooling_register(struct device_node *np, - struct cpufreq_policy *policy) +static inline +void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) { - return ERR_PTR(-ENOSYS); + return; } +#endif /* CONFIG_CPU_THERMAL */ +#if defined(CONFIG_THERMAL_OF) && defined(CONFIG_CPU_THERMAL) +/** + * of_cpufreq_cooling_register - create cpufreq cooling device based on DT. + * @policy: cpufreq policy. + */ +struct thermal_cooling_device * +of_cpufreq_cooling_register(struct cpufreq_policy *policy); +#else static inline struct thermal_cooling_device * -of_cpufreq_power_cooling_register(struct device_node *np, - struct cpufreq_policy *policy, - u32 capacitance, - get_static_t plat_static_func) +of_cpufreq_cooling_register(struct cpufreq_policy *policy) { return NULL; } - -static inline -void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) -{ - return; -} -#endif /* CONFIG_CPU_THERMAL */ +#endif /* defined(CONFIG_THERMAL_OF) && defined(CONFIG_CPU_THERMAL) */ #endif /* __CPU_COOLING_H__ */ diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 1a32e558eb11..5172ad0daa7c 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -59,6 +59,7 @@ enum cpuhp_state { CPUHP_PCI_XGENE_DEAD, CPUHP_IOMMU_INTEL_DEAD, CPUHP_LUSTRE_CFS_DEAD, + CPUHP_AP_ARM_CACHE_B15_RAC_DEAD, CPUHP_WORKQUEUE_PREP, CPUHP_POWER_NUMA_PREPARE, CPUHP_HRTIMERS_PREPARE, @@ -109,6 +110,7 @@ enum cpuhp_state { CPUHP_AP_PERF_XTENSA_STARTING, CPUHP_AP_PERF_METAG_STARTING, CPUHP_AP_MIPS_OP_LOONGSON3_STARTING, + CPUHP_AP_ARM_SDEI_STARTING, CPUHP_AP_ARM_VFP_STARTING, CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING, CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING, @@ -137,6 +139,7 @@ enum cpuhp_state { CPUHP_AP_ARM64_ISNDEP_STARTING, CPUHP_AP_SMPCFD_DYING, CPUHP_AP_X86_TBOOT_DYING, + CPUHP_AP_ARM_CACHE_B15_RAC_DYING, CPUHP_AP_ONLINE, CPUHP_TEARDOWN_CPU, CPUHP_AP_ONLINE_IDLE, diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index 8f7788d23b57..871f9e21810c 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -257,22 +257,30 @@ static inline int cpuidle_register_governor(struct cpuidle_governor *gov) {return 0;} #endif -#define CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx) \ -({ \ - int __ret; \ - \ - if (!idx) { \ - cpu_do_idle(); \ - return idx; \ - } \ - \ - __ret = cpu_pm_enter(); \ - if (!__ret) { \ - __ret = low_level_idle_enter(idx); \ - cpu_pm_exit(); \ - } \ - \ - __ret ? -1 : idx; \ +#define __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, is_retention) \ +({ \ + int __ret = 0; \ + \ + if (!idx) { \ + cpu_do_idle(); \ + return idx; \ + } \ + \ + if (!is_retention) \ + __ret = cpu_pm_enter(); \ + if (!__ret) { \ + __ret = low_level_idle_enter(idx); \ + if (!is_retention) \ + cpu_pm_exit(); \ + } \ + \ + __ret ? -1 : idx; \ }) +#define CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx) \ + __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, 0) + +#define CPU_PM_CPU_IDLE_ENTER_RETENTION(low_level_idle_enter, idx) \ + __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, 1) + #endif /* _LINUX_CPUIDLE_H */ diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 75b565194437..d4a2a7dcd72d 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -640,7 +640,7 @@ static inline int cpulist_parse(const char *buf, struct cpumask *dstp) /** * cpumask_size - size to allocate for a 'struct cpumask' in bytes */ -static inline size_t cpumask_size(void) +static inline unsigned int cpumask_size(void) { return BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long); } diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 1b8e41597ef5..934633a05d20 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -112,7 +112,7 @@ static inline int cpuset_do_slab_mem_spread(void) return task_spread_slab(current); } -extern int current_cpuset_is_being_rebound(void); +extern bool current_cpuset_is_being_rebound(void); extern void rebuild_sched_domains(void); @@ -247,9 +247,9 @@ static inline int cpuset_do_slab_mem_spread(void) return 0; } -static inline int current_cpuset_is_being_rebound(void) +static inline bool current_cpuset_is_being_rebound(void) { - return 0; + return false; } static inline void rebuild_sched_domains(void) diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h index a992e6ca2f1c..f7ac2aa93269 100644 --- a/include/linux/crash_dump.h +++ b/include/linux/crash_dump.h @@ -2,13 +2,13 @@ #ifndef LINUX_CRASH_DUMP_H #define LINUX_CRASH_DUMP_H -#ifdef CONFIG_CRASH_DUMP #include <linux/kexec.h> #include <linux/proc_fs.h> #include <linux/elf.h> #include <asm/pgtable.h> /* for pgprot_t */ +#ifdef CONFIG_CRASH_DUMP #define ELFCORE_ADDR_MAX (-1ULL) #define ELFCORE_ADDR_ERR (-2ULL) @@ -52,13 +52,13 @@ void vmcore_cleanup(void); * has passed the elf core header address on command line. * * This is not just a test if CONFIG_CRASH_DUMP is enabled or not. It will - * return 1 if CONFIG_CRASH_DUMP=y and if kernel is booting after a panic of - * previous kernel. + * return true if CONFIG_CRASH_DUMP=y and if kernel is booting after a panic + * of previous kernel. */ -static inline int is_kdump_kernel(void) +static inline bool is_kdump_kernel(void) { - return (elfcorehdr_addr != ELFCORE_ADDR_MAX) ? 1 : 0; + return elfcorehdr_addr != ELFCORE_ADDR_MAX; } /* is_vmcore_usable() checks if the kernel is booting after a panic and @@ -89,7 +89,7 @@ extern int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn)); extern void unregister_oldmem_pfn_is_ram(void); #else /* !CONFIG_CRASH_DUMP */ -static inline int is_kdump_kernel(void) { return 0; } +static inline bool is_kdump_kernel(void) { return 0; } #endif /* CONFIG_CRASH_DUMP */ extern unsigned long saved_max_pfn; diff --git a/include/linux/crc-ccitt.h b/include/linux/crc-ccitt.h index cd4f420231ba..72c92c396bb8 100644 --- a/include/linux/crc-ccitt.h +++ b/include/linux/crc-ccitt.h @@ -5,12 +5,19 @@ #include <linux/types.h> extern u16 const crc_ccitt_table[256]; +extern u16 const crc_ccitt_false_table[256]; extern u16 crc_ccitt(u16 crc, const u8 *buffer, size_t len); +extern u16 crc_ccitt_false(u16 crc, const u8 *buffer, size_t len); static inline u16 crc_ccitt_byte(u16 crc, const u8 c) { return (crc >> 8) ^ crc_ccitt_table[(crc ^ c) & 0xff]; } +static inline u16 crc_ccitt_false_byte(u16 crc, const u8 c) +{ + return (crc << 8) ^ crc_ccitt_false_table[(crc >> 8) ^ c]; +} + #endif /* _LINUX_CRC_CCITT_H */ diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 78508ca4b108..7e6e84cf6383 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -107,8 +107,16 @@ #define CRYPTO_ALG_INTERNAL 0x00002000 /* + * Set if the algorithm has a ->setkey() method but can be used without + * calling it first, i.e. there is a default key. + */ +#define CRYPTO_ALG_OPTIONAL_KEY 0x00004000 + +/* * Transform masks and values (for crt_flags). */ +#define CRYPTO_TFM_NEED_KEY 0x00000001 + #define CRYPTO_TFM_REQ_MASK 0x000fff00 #define CRYPTO_TFM_RES_MASK 0xfff00000 @@ -447,7 +455,7 @@ struct crypto_alg { unsigned int cra_alignmask; int cra_priority; - atomic_t cra_refcnt; + refcount_t cra_refcnt; char cra_name[CRYPTO_MAX_ALG_NAME]; char cra_driver_name[CRYPTO_MAX_ALG_NAME]; diff --git a/include/linux/dax.h b/include/linux/dax.h index 5258346c558c..0185ecdae135 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -96,7 +96,7 @@ bool dax_write_cache_enabled(struct dax_device *dax_dev); ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, const struct iomap_ops *ops); int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, - pfn_t *pfnp, const struct iomap_ops *ops); + pfn_t *pfnp, int *errp, const struct iomap_ops *ops); int dax_finish_sync_fault(struct vm_fault *vmf, enum page_entry_size pe_size, pfn_t pfn); int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 65cd8ab60b7a..82a99d366aec 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -227,6 +227,7 @@ extern seqlock_t rename_lock; */ extern void d_instantiate(struct dentry *, struct inode *); extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *); +extern struct dentry * d_instantiate_anon(struct dentry *, struct inode *); extern int d_instantiate_no_diralias(struct dentry *, struct inode *); extern void __d_drop(struct dentry *dentry); extern void d_drop(struct dentry *dentry); @@ -235,6 +236,7 @@ extern void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op /* allocate/de-allocate */ extern struct dentry * d_alloc(struct dentry *, const struct qstr *); +extern struct dentry * d_alloc_anon(struct super_block *); extern struct dentry * d_alloc_pseudo(struct super_block *, const struct qstr *); extern struct dentry * d_alloc_parallel(struct dentry *, const struct qstr *, wait_queue_head_t *); diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h index 4178d2493547..5e335b6203f4 100644 --- a/include/linux/delayacct.h +++ b/include/linux/delayacct.h @@ -71,7 +71,7 @@ extern void delayacct_init(void); extern void __delayacct_tsk_init(struct task_struct *); extern void __delayacct_tsk_exit(struct task_struct *); extern void __delayacct_blkio_start(void); -extern void __delayacct_blkio_end(void); +extern void __delayacct_blkio_end(struct task_struct *); extern int __delayacct_add_tsk(struct taskstats *, struct task_struct *); extern __u64 __delayacct_blkio_ticks(struct task_struct *); extern void __delayacct_freepages_start(void); @@ -122,10 +122,10 @@ static inline void delayacct_blkio_start(void) __delayacct_blkio_start(); } -static inline void delayacct_blkio_end(void) +static inline void delayacct_blkio_end(struct task_struct *p) { if (current->delays) - __delayacct_blkio_end(); + __delayacct_blkio_end(p); delayacct_clear_flag(DELAYACCT_PF_BLKIO); } @@ -169,7 +169,7 @@ static inline void delayacct_tsk_free(struct task_struct *tsk) {} static inline void delayacct_blkio_start(void) {} -static inline void delayacct_blkio_end(void) +static inline void delayacct_blkio_end(struct task_struct *p) {} static inline int delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index a5538433c927..da83f64952e7 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -28,6 +28,7 @@ enum dm_queue_mode { DM_TYPE_REQUEST_BASED = 2, DM_TYPE_MQ_REQUEST_BASED = 3, DM_TYPE_DAX_BIO_BASED = 4, + DM_TYPE_NVME_BIO_BASED = 5, }; typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t; @@ -221,14 +222,6 @@ struct target_type { #define dm_target_is_wildcard(type) ((type)->features & DM_TARGET_WILDCARD) /* - * Some targets need to be sent the same WRITE bio severals times so - * that they can send copies of it to different devices. This function - * examines any supplied bio and returns the number of copies of it the - * target requires. - */ -typedef unsigned (*dm_num_write_bios_fn) (struct dm_target *ti, struct bio *bio); - -/* * A target implements own bio data integrity. */ #define DM_TARGET_INTEGRITY 0x00000010 @@ -291,13 +284,6 @@ struct dm_target { */ unsigned per_io_data_size; - /* - * If defined, this function is called to find out how many - * duplicate bios should be sent to the target when writing - * data. - */ - dm_num_write_bios_fn num_write_bios; - /* target specific data */ void *private; @@ -329,35 +315,9 @@ struct dm_target_callbacks { int (*congested_fn) (struct dm_target_callbacks *, int); }; -/* - * For bio-based dm. - * One of these is allocated for each bio. - * This structure shouldn't be touched directly by target drivers. - * It is here so that we can inline dm_per_bio_data and - * dm_bio_from_per_bio_data - */ -struct dm_target_io { - struct dm_io *io; - struct dm_target *ti; - unsigned target_bio_nr; - unsigned *len_ptr; - struct bio clone; -}; - -static inline void *dm_per_bio_data(struct bio *bio, size_t data_size) -{ - return (char *)bio - offsetof(struct dm_target_io, clone) - data_size; -} - -static inline struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size) -{ - return (struct bio *)((char *)data + data_size + offsetof(struct dm_target_io, clone)); -} - -static inline unsigned dm_bio_get_target_bio_nr(const struct bio *bio) -{ - return container_of(bio, struct dm_target_io, clone)->target_bio_nr; -} +void *dm_per_bio_data(struct bio *bio, size_t data_size); +struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size); +unsigned dm_bio_get_target_bio_nr(const struct bio *bio); int dm_register_target(struct target_type *t); void dm_unregister_target(struct target_type *t); @@ -500,6 +460,11 @@ void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type); int dm_table_complete(struct dm_table *t); /* + * Destroy the table when finished. + */ +void dm_table_destroy(struct dm_table *t); + +/* * Target may require that it is never sent I/O larger than len. */ int __must_check dm_set_target_max_io_len(struct dm_target *ti, sector_t len); @@ -585,6 +550,7 @@ do { \ #define DM_ENDIO_DONE 0 #define DM_ENDIO_INCOMPLETE 1 #define DM_ENDIO_REQUEUE 2 +#define DM_ENDIO_DELAY_REQUEUE 3 /* * Definitions of return values from target map function. @@ -592,7 +558,7 @@ do { \ #define DM_MAPIO_SUBMITTED 0 #define DM_MAPIO_REMAPPED 1 #define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE -#define DM_MAPIO_DELAY_REQUEUE 3 +#define DM_MAPIO_DELAY_REQUEUE DM_ENDIO_DELAY_REQUEUE #define DM_MAPIO_KILL 4 #define dm_sector_div64(x, y)( \ diff --git a/include/linux/device.h b/include/linux/device.h index 9d32000725da..b093405ed525 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * device.h - generic, centralized driver model * @@ -5,8 +6,6 @@ * Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de> * Copyright (c) 2008-2009 Novell Inc. * - * This file is released under the GPLv2 - * * See Documentation/driver-model/ for more information. */ @@ -21,7 +20,6 @@ #include <linux/compiler.h> #include <linux/types.h> #include <linux/mutex.h> -#include <linux/pinctrl/devinfo.h> #include <linux/pm.h> #include <linux/atomic.h> #include <linux/ratelimit.h> @@ -42,6 +40,7 @@ struct fwnode_handle; struct iommu_ops; struct iommu_group; struct iommu_fwspec; +struct dev_pin_info; struct bus_attribute { struct attribute attr; @@ -288,6 +287,7 @@ struct device_driver { const struct attribute_group **groups; const struct dev_pm_ops *pm; + int (*coredump) (struct device *dev); struct driver_private *p; }; @@ -301,7 +301,6 @@ extern struct device_driver *driver_find(const char *name, extern int driver_probe_done(void); extern void wait_for_device_probe(void); - /* sysfs interface for exporting driver attributes */ struct driver_attribute { @@ -575,6 +574,9 @@ ssize_t device_store_bool(struct device *dev, struct device_attribute *attr, #define DEVICE_ATTR(_name, _mode, _show, _store) \ struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store) +#define DEVICE_ATTR_PREALLOC(_name, _mode, _show, _store) \ + struct device_attribute dev_attr_##_name = \ + __ATTR_PREALLOC(_name, _mode, _show, _store) #define DEVICE_ATTR_RW(_name) \ struct device_attribute dev_attr_##_name = __ATTR_RW(_name) #define DEVICE_ATTR_RO(_name) \ diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index 79f27d60ec66..085db2fee2d7 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -301,7 +301,7 @@ struct dma_buf { struct dma_fence_cb cb; wait_queue_head_t *poll; - unsigned long active; + __poll_t active; } cb_excl, cb_shared; }; diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h new file mode 100644 index 000000000000..bcdb1a3e4b1f --- /dev/null +++ b/include/linux/dma-direct.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_DMA_DIRECT_H +#define _LINUX_DMA_DIRECT_H 1 + +#include <linux/dma-mapping.h> + +#ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA +#include <asm/dma-direct.h> +#else +static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) +{ + dma_addr_t dev_addr = (dma_addr_t)paddr; + + return dev_addr - ((dma_addr_t)dev->dma_pfn_offset << PAGE_SHIFT); +} + +static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr) +{ + phys_addr_t paddr = (phys_addr_t)dev_addr; + + return paddr + ((phys_addr_t)dev->dma_pfn_offset << PAGE_SHIFT); +} + +static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) +{ + if (!dev->dma_mask) + return false; + + return addr + size - 1 <= *dev->dma_mask; +} +#endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */ + +#ifdef CONFIG_ARCH_HAS_DMA_MARK_CLEAN +void dma_mark_clean(void *addr, size_t size); +#else +static inline void dma_mark_clean(void *addr, size_t size) +{ +} +#endif /* CONFIG_ARCH_HAS_DMA_MARK_CLEAN */ + +void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, + gfp_t gfp, unsigned long attrs); +void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, + dma_addr_t dma_addr, unsigned long attrs); +int dma_direct_supported(struct device *dev, u64 mask); + +#endif /* _LINUX_DMA_DIRECT_H */ diff --git a/include/linux/dma-fence-array.h b/include/linux/dma-fence-array.h index 332a5420243c..bc8940ca280d 100644 --- a/include/linux/dma-fence-array.h +++ b/include/linux/dma-fence-array.h @@ -21,6 +21,7 @@ #define __LINUX_DMA_FENCE_ARRAY_H #include <linux/dma-fence.h> +#include <linux/irq_work.h> /** * struct dma_fence_array_cb - callback helper for fence array @@ -47,6 +48,8 @@ struct dma_fence_array { unsigned num_fences; atomic_t num_pending; struct dma_fence **fences; + + struct irq_work work; }; extern const struct dma_fence_ops dma_fence_array_ops; diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h index efdabbb64e3c..4c008170fe65 100644 --- a/include/linux/dma-fence.h +++ b/include/linux/dma-fence.h @@ -242,7 +242,7 @@ static inline struct dma_fence *dma_fence_get_rcu(struct dma_fence *fence) * The caller is required to hold the RCU read lock. */ static inline struct dma_fence * -dma_fence_get_rcu_safe(struct dma_fence * __rcu *fencep) +dma_fence_get_rcu_safe(struct dma_fence __rcu **fencep) { do { struct dma_fence *fence; diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 81ed9b2d84dc..34fe8463d10e 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -136,7 +136,7 @@ struct dma_map_ops { int is_phys; }; -extern const struct dma_map_ops dma_noop_ops; +extern const struct dma_map_ops dma_direct_ops; extern const struct dma_map_ops dma_virt_ops; #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) @@ -513,10 +513,18 @@ static inline void *dma_alloc_attrs(struct device *dev, size_t size, void *cpu_addr; BUG_ON(!ops); + WARN_ON_ONCE(dev && !dev->coherent_dma_mask); if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr)) return cpu_addr; + /* + * Let the implementation decide on the zone to allocate from, and + * decide on the way of zeroing the memory given that the memory + * returned should always be zeroed. + */ + flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM | __GFP_ZERO); + if (!arch_dma_alloc_attrs(&dev, &flag)) return NULL; if (!ops->alloc) @@ -568,6 +576,14 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) return 0; } +/* + * This is a hack for the legacy x86 forbid_dac and iommu_sac_force. Please + * don't use this is new code. + */ +#ifndef arch_dma_supported +#define arch_dma_supported(dev, mask) (1) +#endif + static inline void dma_check_mask(struct device *dev, u64 mask) { if (sme_active() && (mask < (((u64)sme_get_me_mask() << 1) - 1))) @@ -580,6 +596,9 @@ static inline int dma_supported(struct device *dev, u64 mask) if (!ops) return 0; + if (!arch_dma_supported(dev, mask)) + return 0; + if (!ops->dma_supported) return 1; return ops->dma_supported(dev, mask); @@ -692,7 +711,7 @@ static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask) #ifndef dma_max_pfn static inline unsigned long dma_max_pfn(struct device *dev) { - return *dev->dma_mask >> PAGE_SHIFT; + return (*dev->dma_mask >> PAGE_SHIFT) + dev->dma_pfn_offset; } #endif diff --git a/include/linux/dsa/lan9303.h b/include/linux/dsa/lan9303.h index f48a85c377de..b4f22112ba75 100644 --- a/include/linux/dsa/lan9303.h +++ b/include/linux/dsa/lan9303.h @@ -23,9 +23,10 @@ struct lan9303 { struct regmap_irq_chip_data *irq_data; struct gpio_desc *reset_gpio; u32 reset_duration; /* in [ms] */ - bool phy_addr_sel_strap; + int phy_addr_base; struct dsa_switch *ds; struct mutex indirect_mutex; /* protect indexed register access */ + struct mutex alr_mutex; /* protect ALR access */ const struct lan9303_phy_ops *ops; bool is_bridged; /* true if port 1 and 2 are bridged */ diff --git a/include/linux/efi.h b/include/linux/efi.h index 29fdf8029cf6..f5083aa72eae 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -475,6 +475,39 @@ typedef struct { u64 get_all; } apple_properties_protocol_64_t; +typedef struct { + u32 get_capability; + u32 get_event_log; + u32 hash_log_extend_event; + u32 submit_command; + u32 get_active_pcr_banks; + u32 set_active_pcr_banks; + u32 get_result_of_set_active_pcr_banks; +} efi_tcg2_protocol_32_t; + +typedef struct { + u64 get_capability; + u64 get_event_log; + u64 hash_log_extend_event; + u64 submit_command; + u64 get_active_pcr_banks; + u64 set_active_pcr_banks; + u64 get_result_of_set_active_pcr_banks; +} efi_tcg2_protocol_64_t; + +typedef u32 efi_tcg2_event_log_format; + +typedef struct { + void *get_capability; + efi_status_t (*get_event_log)(efi_handle_t, efi_tcg2_event_log_format, + efi_physical_addr_t *, efi_physical_addr_t *, efi_bool_t *); + void *hash_log_extend_event; + void *submit_command; + void *get_active_pcr_banks; + void *set_active_pcr_banks; + void *get_result_of_set_active_pcr_banks; +} efi_tcg2_protocol_t; + /* * Types and defines for EFI ResetSystem */ @@ -625,6 +658,7 @@ void efi_native_runtime_setup(void); #define EFI_MEMORY_ATTRIBUTES_TABLE_GUID EFI_GUID(0xdcfa911d, 0x26eb, 0x469f, 0xa2, 0x20, 0x38, 0xb7, 0xdc, 0x46, 0x12, 0x20) #define EFI_CONSOLE_OUT_DEVICE_GUID EFI_GUID(0xd3b36f2c, 0xd551, 0x11d4, 0x9a, 0x46, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) #define APPLE_PROPERTIES_PROTOCOL_GUID EFI_GUID(0x91bd12fe, 0xf6c3, 0x44fb, 0xa5, 0xb7, 0x51, 0x22, 0xab, 0x30, 0x3a, 0xe0) +#define EFI_TCG2_PROTOCOL_GUID EFI_GUID(0x607f766c, 0x7455, 0x42be, 0x93, 0x0b, 0xe4, 0xd7, 0x6d, 0xb2, 0x72, 0x0f) #define EFI_IMAGE_SECURITY_DATABASE_GUID EFI_GUID(0xd719b2cb, 0x3d3a, 0x4596, 0xa3, 0xbc, 0xda, 0xd0, 0x0e, 0x67, 0x65, 0x6f) #define EFI_SHIM_LOCK_GUID EFI_GUID(0x605dab50, 0xe046, 0x4300, 0xab, 0xb6, 0x3d, 0xd8, 0x10, 0xdd, 0x8b, 0x23) @@ -637,6 +671,7 @@ void efi_native_runtime_setup(void); #define LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID EFI_GUID(0xe03fc20a, 0x85dc, 0x406e, 0xb9, 0x0e, 0x4a, 0xb5, 0x02, 0x37, 0x1d, 0x95) #define LINUX_EFI_LOADER_ENTRY_GUID EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf, 0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f) #define LINUX_EFI_RANDOM_SEED_TABLE_GUID EFI_GUID(0x1ce1e5bc, 0x7ceb, 0x42f2, 0x81, 0xe5, 0x8a, 0xad, 0xf1, 0x80, 0xf5, 0x7b) +#define LINUX_EFI_TPM_EVENT_LOG_GUID EFI_GUID(0xb7799cb0, 0xeca2, 0x4943, 0x96, 0x67, 0x1f, 0xae, 0x07, 0xb7, 0x47, 0xfa) typedef struct { efi_guid_t guid; @@ -911,6 +946,7 @@ extern struct efi { unsigned long properties_table; /* properties table */ unsigned long mem_attr_table; /* memory attributes table */ unsigned long rng_seed; /* UEFI firmware random seed */ + unsigned long tpm_log; /* TPM2 Event Log table */ efi_get_time_t *get_time; efi_set_time_t *set_time; efi_get_wakeup_time_t *get_wakeup_time; @@ -1536,6 +1572,8 @@ static inline void efi_enable_reset_attack_mitigation(efi_system_table_t *sys_table_arg) { } #endif +void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table); + /* * Arch code can implement the following three template macros, avoiding * reptition for the void/non-void return cases of {__,}efi_call_virt(): @@ -1603,4 +1641,12 @@ struct linux_efi_random_seed { u8 bits[]; }; +struct linux_efi_tpm_eventlog { + u32 size; + u8 version; + u8 log[]; +}; + +extern int efi_tpm_eventlog_init(void); + #endif /* _LINUX_EFI_H */ diff --git a/include/linux/elevator.h b/include/linux/elevator.h index 3d794b3dc532..6d9e230dffd2 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -198,8 +198,6 @@ extern bool elv_attempt_insert_merge(struct request_queue *, struct request *); extern void elv_requeue_request(struct request_queue *, struct request *); extern struct request *elv_former_request(struct request_queue *, struct request *); extern struct request *elv_latter_request(struct request_queue *, struct request *); -extern int elv_register_queue(struct request_queue *q); -extern void elv_unregister_queue(struct request_queue *q); extern int elv_may_queue(struct request_queue *, unsigned int); extern void elv_completed_request(struct request_queue *, struct request *); extern int elv_set_request(struct request_queue *q, struct request *rq, diff --git a/include/linux/error-injection.h b/include/linux/error-injection.h new file mode 100644 index 000000000000..280c61ecbf20 --- /dev/null +++ b/include/linux/error-injection.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_ERROR_INJECTION_H +#define _LINUX_ERROR_INJECTION_H + +#ifdef CONFIG_FUNCTION_ERROR_INJECTION + +#include <asm/error-injection.h> + +extern bool within_error_injection_list(unsigned long addr); +extern int get_injectable_error_type(unsigned long addr); + +#else /* !CONFIG_FUNCTION_ERROR_INJECTION */ + +#include <asm-generic/error-injection.h> +static inline bool within_error_injection_list(unsigned long addr) +{ + return false; +} + +static inline int get_injectable_error_type(unsigned long addr) +{ + return EI_ETYPE_NONE; +} + +#endif + +#endif /* _LINUX_ERROR_INJECTION_H */ diff --git a/include/linux/errseq.h b/include/linux/errseq.h index 6ffae9c5052d..fc2777770768 100644 --- a/include/linux/errseq.h +++ b/include/linux/errseq.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * See Documentation/errseq.rst and lib/errseq.c + * See Documentation/core-api/errseq.rst and lib/errseq.c */ #ifndef _LINUX_ERRSEQ_H #define _LINUX_ERRSEQ_H diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h index 60b2985e8a18..7094718b653b 100644 --- a/include/linux/eventfd.h +++ b/include/linux/eventfd.h @@ -26,18 +26,16 @@ #define EFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK) #define EFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS | EFD_SEMAPHORE) +struct eventfd_ctx; struct file; #ifdef CONFIG_EVENTFD -struct file *eventfd_file_create(unsigned int count, int flags); -struct eventfd_ctx *eventfd_ctx_get(struct eventfd_ctx *ctx); void eventfd_ctx_put(struct eventfd_ctx *ctx); struct file *eventfd_fget(int fd); struct eventfd_ctx *eventfd_ctx_fdget(int fd); struct eventfd_ctx *eventfd_ctx_fileget(struct file *file); __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n); -ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, __u64 *cnt); int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait, __u64 *cnt); @@ -47,10 +45,6 @@ int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *w * Ugly ugly ugly error layer to support modules that uses eventfd but * pretend to work in !CONFIG_EVENTFD configurations. Namely, AIO. */ -static inline struct file *eventfd_file_create(unsigned int count, int flags) -{ - return ERR_PTR(-ENOSYS); -} static inline struct eventfd_ctx *eventfd_ctx_fdget(int fd) { @@ -67,12 +61,6 @@ static inline void eventfd_ctx_put(struct eventfd_ctx *ctx) } -static inline ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, - __u64 *cnt) -{ - return -ENOSYS; -} - static inline int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait, __u64 *cnt) { diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index 43e98d30d2df..58aecb60ea51 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h @@ -117,6 +117,7 @@ struct f2fs_super_block { /* * For checkpoint */ +#define CP_NOCRC_RECOVERY_FLAG 0x00000200 #define CP_TRIMMED_FLAG 0x00000100 #define CP_NAT_BITS_FLAG 0x00000080 #define CP_CRC_RECOVERY_FLAG 0x00000040 @@ -212,6 +213,7 @@ struct f2fs_extent { #define F2FS_DATA_EXIST 0x08 /* file inline data exist flag */ #define F2FS_INLINE_DOTS 0x10 /* file having implicit dot dentries */ #define F2FS_EXTRA_ATTR 0x20 /* file having extra attribute */ +#define F2FS_PIN_FILE 0x40 /* file should not be gced */ struct f2fs_inode { __le16 i_mode; /* file mode */ @@ -229,7 +231,13 @@ struct f2fs_inode { __le32 i_ctime_nsec; /* change time in nano scale */ __le32 i_mtime_nsec; /* modification time in nano scale */ __le32 i_generation; /* file version (for NFS) */ - __le32 i_current_depth; /* only for directory depth */ + union { + __le32 i_current_depth; /* only for directory depth */ + __le16 i_gc_failures; /* + * # of gc failures on pinned file. + * only for regular files. + */ + }; __le32 i_xattr_nid; /* nid to save xattr */ __le32 i_flags; /* file attributes */ __le32 i_pino; /* parent inode number */ @@ -245,8 +253,10 @@ struct f2fs_inode { __le16 i_inline_xattr_size; /* inline xattr size, unit: 4 bytes */ __le32 i_projid; /* project id */ __le32 i_inode_checksum;/* inode meta checksum */ + __le64 i_crtime; /* creation time */ + __le32 i_crtime_nsec; /* creation time in nano scale */ __le32 i_extra_end[0]; /* for attribute size calculation */ - }; + } __packed; __le32 i_addr[DEF_ADDRS_PER_INODE]; /* Pointers to data blocks */ }; __le32 i_nid[DEF_NIDS_PER_INODE]; /* direct(2), indirect(2), diff --git a/include/linux/fb.h b/include/linux/fb.h index bc24e48e396d..f577d3c89618 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -465,6 +465,11 @@ struct fb_info { atomic_t count; int node; int flags; + /* + * -1 by default, set to a FB_ROTATE_* value by the driver, if it knows + * a lcd is not mounted upright and fbcon should rotate to compensate. + */ + int fbcon_rotate_hint; struct mutex lock; /* Lock for open/release/ioctl funcs */ struct mutex mm_lock; /* Lock for fb_mmap and smem_* fields */ struct fb_var_screeninfo var; /* Current var */ @@ -564,7 +569,10 @@ static inline struct apertures_struct *alloc_apertures(unsigned int max_num) { #define fb_memcpy_fromfb sbus_memcpy_fromio #define fb_memcpy_tofb sbus_memcpy_toio -#elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || defined(__hppa__) || defined(__sh__) || defined(__powerpc__) || defined(__avr32__) || defined(__bfin__) || defined(__arm__) +#elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || \ + defined(__hppa__) || defined(__sh__) || defined(__powerpc__) || \ + defined(__avr32__) || defined(__bfin__) || defined(__arm__) || \ + defined(__aarch64__) #define fb_readb __raw_readb #define fb_readw __raw_readw diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h index 1c65817673db..41615f38bcff 100644 --- a/include/linux/fdtable.h +++ b/include/linux/fdtable.h @@ -10,6 +10,7 @@ #include <linux/compiler.h> #include <linux/spinlock.h> #include <linux/rcupdate.h> +#include <linux/nospec.h> #include <linux/types.h> #include <linux/init.h> #include <linux/fs.h> @@ -82,8 +83,10 @@ static inline struct file *__fcheck_files(struct files_struct *files, unsigned i { struct fdtable *fdt = rcu_dereference_raw(files->fdt); - if (fd < fdt->max_fds) + if (fd < fdt->max_fds) { + fd = array_index_nospec(fd, fdt->max_fds); return rcu_dereference_raw(fdt->fd[fd]); + } return NULL; } diff --git a/include/linux/filter.h b/include/linux/filter.h index 80b5b482cb46..276932d75975 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -18,7 +18,9 @@ #include <linux/capability.h> #include <linux/cryptohash.h> #include <linux/set_memory.h> +#include <linux/kallsyms.h> +#include <net/xdp.h> #include <net/sch_generic.h> #include <uapi/linux/filter.h> @@ -58,6 +60,9 @@ struct bpf_prog_aux; /* unused opcode to mark special call to bpf_tail_call() helper */ #define BPF_TAIL_CALL 0xf0 +/* unused opcode to mark call to interpreter with arguments */ +#define BPF_CALL_ARGS 0xe0 + /* As per nm, we expose JITed images as text (code) section for * kallsyms. That way, tools like perf can find it to match * addresses. @@ -455,10 +460,14 @@ struct bpf_binary_header { struct bpf_prog { u16 pages; /* Number of allocated pages */ u16 jited:1, /* Is our filter JIT'ed? */ + jit_requested:1,/* archs need to JIT the prog */ locked:1, /* Program image locked? */ gpl_compatible:1, /* Is filter GPL compatible? */ cb_access:1, /* Is control block accessed? */ - dst_needed:1; /* Do we need dst entry? */ + dst_needed:1, /* Do we need dst entry? */ + blinded:1, /* Was blinded */ + is_func:1, /* program is a bpf function */ + kprobe_override:1; /* Do we override a kprobe? */ enum bpf_prog_type type; /* Type of BPF program */ u32 len; /* Number of filter blocks */ u32 jited_len; /* Size of jited insns in bytes */ @@ -495,6 +504,7 @@ struct xdp_buff { void *data_end; void *data_meta; void *data_hard_start; + struct xdp_rxq_info *rxq; }; /* Compute the linear packet data range [data, data_end) which @@ -678,6 +688,8 @@ static inline int sk_filter(struct sock *sk, struct sk_buff *skb) struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err); void bpf_prog_free(struct bpf_prog *fp); +bool bpf_opcode_in_insntable(u8 code); + struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags); struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size, gfp_t gfp_extra_flags); @@ -709,11 +721,22 @@ bool sk_filter_charge(struct sock *sk, struct sk_filter *fp); void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp); u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); +#define __bpf_call_base_args \ + ((u64 (*)(u64, u64, u64, u64, u64, const struct bpf_insn *)) \ + __bpf_call_base) struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog); void bpf_jit_compile(struct bpf_prog *prog); bool bpf_helper_changes_pkt_data(void *func); +static inline bool bpf_dump_raw_ok(void) +{ + /* Reconstruction of call-sites is dependent on kallsyms, + * thus make dump the same restriction. + */ + return kallsyms_show_value() == 1; +} + struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, const struct bpf_insn *patch, u32 len); @@ -797,7 +820,7 @@ static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp) return fp->jited && bpf_jit_is_ebpf(); } -static inline bool bpf_jit_blinding_enabled(void) +static inline bool bpf_jit_blinding_enabled(struct bpf_prog *prog) { /* These are the prerequisites, should someone ever have the * idea to call blinding outside of them, we make sure to @@ -805,7 +828,7 @@ static inline bool bpf_jit_blinding_enabled(void) */ if (!bpf_jit_is_ebpf()) return false; - if (!bpf_jit_enable) + if (!prog->jit_requested) return false; if (!bpf_jit_harden) return false; @@ -982,9 +1005,20 @@ struct bpf_sock_ops_kern { struct sock *sk; u32 op; union { + u32 args[4]; u32 reply; u32 replylong[4]; }; + u32 is_fullsock; + u64 temp; /* temp and everything after is not + * initialized to 0 before calling + * the BPF program. New fields that + * should be initialized to 0 should + * be inserted before temp. + * temp is scratch storage used by + * sock_ops_convert_ctx_access + * as temporary storage of a register. + */ }; #endif /* __LINUX_FILTER_H__ */ diff --git a/include/linux/fpga/fpga-bridge.h b/include/linux/fpga/fpga-bridge.h index aa66c87c120b..3694821a6d2d 100644 --- a/include/linux/fpga/fpga-bridge.h +++ b/include/linux/fpga/fpga-bridge.h @@ -1,10 +1,11 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#include <linux/device.h> -#include <linux/fpga/fpga-mgr.h> #ifndef _LINUX_FPGA_BRIDGE_H #define _LINUX_FPGA_BRIDGE_H +#include <linux/device.h> +#include <linux/fpga/fpga-mgr.h> + struct fpga_bridge; /** @@ -12,11 +13,13 @@ struct fpga_bridge; * @enable_show: returns the FPGA bridge's status * @enable_set: set a FPGA bridge as enabled or disabled * @fpga_bridge_remove: set FPGA into a specific state during driver remove + * @groups: optional attribute groups. */ struct fpga_bridge_ops { int (*enable_show)(struct fpga_bridge *bridge); int (*enable_set)(struct fpga_bridge *bridge, bool enable); void (*fpga_bridge_remove)(struct fpga_bridge *bridge); + const struct attribute_group **groups; }; /** @@ -43,6 +46,8 @@ struct fpga_bridge { struct fpga_bridge *of_fpga_bridge_get(struct device_node *node, struct fpga_image_info *info); +struct fpga_bridge *fpga_bridge_get(struct device *dev, + struct fpga_image_info *info); void fpga_bridge_put(struct fpga_bridge *bridge); int fpga_bridge_enable(struct fpga_bridge *bridge); int fpga_bridge_disable(struct fpga_bridge *bridge); @@ -50,9 +55,12 @@ int fpga_bridge_disable(struct fpga_bridge *bridge); int fpga_bridges_enable(struct list_head *bridge_list); int fpga_bridges_disable(struct list_head *bridge_list); void fpga_bridges_put(struct list_head *bridge_list); -int fpga_bridge_get_to_list(struct device_node *np, +int fpga_bridge_get_to_list(struct device *dev, struct fpga_image_info *info, struct list_head *bridge_list); +int of_fpga_bridge_get_to_list(struct device_node *np, + struct fpga_image_info *info, + struct list_head *bridge_list); int fpga_bridge_register(struct device *dev, const char *name, const struct fpga_bridge_ops *br_ops, void *priv); diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h index bfa14bc023fb..3c6de23aabdf 100644 --- a/include/linux/fpga/fpga-mgr.h +++ b/include/linux/fpga/fpga-mgr.h @@ -1,7 +1,8 @@ /* * FPGA Framework * - * Copyright (C) 2013-2015 Altera Corporation + * Copyright (C) 2013-2016 Altera Corporation + * Copyright (C) 2017 Intel Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -15,12 +16,12 @@ * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ -#include <linux/mutex.h> -#include <linux/platform_device.h> - #ifndef _LINUX_FPGA_MGR_H #define _LINUX_FPGA_MGR_H +#include <linux/mutex.h> +#include <linux/platform_device.h> + struct fpga_manager; struct sg_table; @@ -83,12 +84,26 @@ enum fpga_mgr_states { * @disable_timeout_us: maximum time to disable traffic through bridge (uSec) * @config_complete_timeout_us: maximum time for FPGA to switch to operating * status in the write_complete op. + * @firmware_name: name of FPGA image firmware file + * @sgt: scatter/gather table containing FPGA image + * @buf: contiguous buffer containing FPGA image + * @count: size of buf + * @dev: device that owns this + * @overlay: Device Tree overlay */ struct fpga_image_info { u32 flags; u32 enable_timeout_us; u32 disable_timeout_us; u32 config_complete_timeout_us; + char *firmware_name; + struct sg_table *sgt; + const char *buf; + size_t count; + struct device *dev; +#ifdef CONFIG_OF + struct device_node *overlay; +#endif }; /** @@ -100,6 +115,7 @@ struct fpga_image_info { * @write_sg: write the scatter list of configuration data to the FPGA * @write_complete: set FPGA to operating state after writing is done * @fpga_remove: optional: Set FPGA into a specific state during driver remove + * @groups: optional attribute groups. * * fpga_manager_ops are the low level functions implemented by a specific * fpga manager driver. The optional ones are tested for NULL before being @@ -116,6 +132,7 @@ struct fpga_manager_ops { int (*write_complete)(struct fpga_manager *mgr, struct fpga_image_info *info); void (*fpga_remove)(struct fpga_manager *mgr); + const struct attribute_group **groups; }; /** @@ -138,14 +155,14 @@ struct fpga_manager { #define to_fpga_manager(d) container_of(d, struct fpga_manager, dev) -int fpga_mgr_buf_load(struct fpga_manager *mgr, struct fpga_image_info *info, - const char *buf, size_t count); -int fpga_mgr_buf_load_sg(struct fpga_manager *mgr, struct fpga_image_info *info, - struct sg_table *sgt); +struct fpga_image_info *fpga_image_info_alloc(struct device *dev); + +void fpga_image_info_free(struct fpga_image_info *info); + +int fpga_mgr_load(struct fpga_manager *mgr, struct fpga_image_info *info); -int fpga_mgr_firmware_load(struct fpga_manager *mgr, - struct fpga_image_info *info, - const char *image_name); +int fpga_mgr_lock(struct fpga_manager *mgr); +void fpga_mgr_unlock(struct fpga_manager *mgr); struct fpga_manager *of_fpga_mgr_get(struct device_node *node); diff --git a/include/linux/fpga/fpga-region.h b/include/linux/fpga/fpga-region.h new file mode 100644 index 000000000000..b6520318ab9c --- /dev/null +++ b/include/linux/fpga/fpga-region.h @@ -0,0 +1,40 @@ +#ifndef _FPGA_REGION_H +#define _FPGA_REGION_H + +#include <linux/device.h> +#include <linux/fpga/fpga-mgr.h> +#include <linux/fpga/fpga-bridge.h> + +/** + * struct fpga_region - FPGA Region structure + * @dev: FPGA Region device + * @mutex: enforces exclusive reference to region + * @bridge_list: list of FPGA bridges specified in region + * @mgr: FPGA manager + * @info: FPGA image info + * @priv: private data + * @get_bridges: optional function to get bridges to a list + * @groups: optional attribute groups. + */ +struct fpga_region { + struct device dev; + struct mutex mutex; /* for exclusive reference to region */ + struct list_head bridge_list; + struct fpga_manager *mgr; + struct fpga_image_info *info; + void *priv; + int (*get_bridges)(struct fpga_region *region); + const struct attribute_group **groups; +}; + +#define to_fpga_region(d) container_of(d, struct fpga_region, dev) + +struct fpga_region *fpga_region_class_find( + struct device *start, const void *data, + int (*match)(struct device *, const void *)); + +int fpga_region_program_fpga(struct fpga_region *region); +int fpga_region_register(struct device *dev, struct fpga_region *region); +int fpga_region_unregister(struct fpga_region *region); + +#endif /* _FPGA_REGION_H */ diff --git a/include/linux/fs.h b/include/linux/fs.h index 511fbaabf624..2a815560fda0 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -639,7 +639,7 @@ struct inode { struct hlist_head i_dentry; struct rcu_head i_rcu; }; - u64 i_version; + atomic64_t i_version; atomic_t i_count; atomic_t i_dio_count; atomic_t i_writecount; @@ -748,6 +748,11 @@ static inline void inode_lock_nested(struct inode *inode, unsigned subclass) down_write_nested(&inode->i_rwsem, subclass); } +static inline void inode_lock_shared_nested(struct inode *inode, unsigned subclass) +{ + down_read_nested(&inode->i_rwsem, subclass); +} + void lock_two_nondirectories(struct inode *, struct inode*); void unlock_two_nondirectories(struct inode *, struct inode*); @@ -1359,7 +1364,7 @@ struct super_block { const struct fscrypt_operations *s_cop; - struct hlist_bl_head s_anon; /* anonymous dentries for (nfs) exporting */ + struct hlist_bl_head s_roots; /* alternate root dentries for NFS */ struct list_head s_mounts; /* list of mounts; _not_ for fs use */ struct block_device *s_bdev; struct backing_dev_info *s_bdi; @@ -1608,6 +1613,10 @@ extern int vfs_whiteout(struct inode *, struct dentry *); extern struct dentry *vfs_tmpfile(struct dentry *dentry, umode_t mode, int open_flag); +int vfs_mkobj(struct dentry *, umode_t, + int (*f)(struct dentry *, umode_t, void *), + void *); + /* * VFS file helper functions. */ @@ -1698,7 +1707,7 @@ struct file_operations { ssize_t (*write_iter) (struct kiocb *, struct iov_iter *); int (*iterate) (struct file *, struct dir_context *); int (*iterate_shared) (struct file *, struct dir_context *); - unsigned int (*poll) (struct file *, struct poll_table_struct *); + __poll_t (*poll) (struct file *, struct poll_table_struct *); long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); long (*compat_ioctl) (struct file *, unsigned int, unsigned long); int (*mmap) (struct file *, struct vm_area_struct *); @@ -2036,21 +2045,6 @@ static inline void inode_dec_link_count(struct inode *inode) mark_inode_dirty(inode); } -/** - * inode_inc_iversion - increments i_version - * @inode: inode that need to be updated - * - * Every time the inode is modified, the i_version field will be incremented. - * The filesystem has to be mounted with i_version flag - */ - -static inline void inode_inc_iversion(struct inode *inode) -{ - spin_lock(&inode->i_lock); - inode->i_version++; - spin_unlock(&inode->i_lock); -} - enum file_time_flags { S_ATIME = 1, S_MTIME = 2, @@ -2699,7 +2693,6 @@ extern sector_t bmap(struct inode *, sector_t); #endif extern int notify_change(struct dentry *, struct iattr *, struct inode **); extern int inode_permission(struct inode *, int); -extern int __inode_permission(struct inode *, int); extern int generic_permission(struct inode *, int); extern int __check_sticky(struct inode *dir, struct inode *inode); @@ -2992,6 +2985,7 @@ enum { }; void dio_end_io(struct bio *bio); +void dio_warn_stale_pagecache(struct file *filp); ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, struct block_device *bdev, struct iov_iter *iter, @@ -3239,6 +3233,8 @@ static inline int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags) ki->ki_flags |= IOCB_DSYNC; if (flags & RWF_SYNC) ki->ki_flags |= (IOCB_DSYNC | IOCB_SYNC); + if (flags & RWF_APPEND) + ki->ki_flags |= IOCB_APPEND; return 0; } diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h index 08b4b40c5aa8..952ab97af325 100644 --- a/include/linux/fscrypt.h +++ b/include/linux/fscrypt.h @@ -14,42 +14,13 @@ #ifndef _LINUX_FSCRYPT_H #define _LINUX_FSCRYPT_H -#include <linux/key.h> #include <linux/fs.h> -#include <linux/mm.h> -#include <linux/bio.h> -#include <linux/dcache.h> -#include <crypto/skcipher.h> -#include <uapi/linux/fs.h> #define FS_CRYPTO_BLOCK_SIZE 16 +struct fscrypt_ctx; struct fscrypt_info; -struct fscrypt_ctx { - union { - struct { - struct page *bounce_page; /* Ciphertext page */ - struct page *control_page; /* Original page */ - } w; - struct { - struct bio *bio; - struct work_struct work; - } r; - struct list_head free_list; /* Free list */ - }; - u8 flags; /* Flags */ -}; - -/** - * For encrypted symlinks, the ciphertext length is stored at the beginning - * of the string in little-endian format. - */ -struct fscrypt_symlink_data { - __le16 len; - char encrypted_path[1]; -} __packed; - struct fscrypt_str { unsigned char *name; u32 len; @@ -68,89 +39,14 @@ struct fscrypt_name { #define fname_name(p) ((p)->disk_name.name) #define fname_len(p) ((p)->disk_name.len) -/* - * fscrypt superblock flags - */ -#define FS_CFLG_OWN_PAGES (1U << 1) - -/* - * crypto opertions for filesystems - */ -struct fscrypt_operations { - unsigned int flags; - const char *key_prefix; - int (*get_context)(struct inode *, void *, size_t); - int (*set_context)(struct inode *, const void *, size_t, void *); - bool (*dummy_context)(struct inode *); - bool (*empty_dir)(struct inode *); - unsigned (*max_namelen)(struct inode *); -}; - /* Maximum value for the third parameter of fscrypt_operations.set_context(). */ #define FSCRYPT_SET_CONTEXT_MAX_SIZE 28 -static inline bool fscrypt_dummy_context_enabled(struct inode *inode) -{ - if (inode->i_sb->s_cop->dummy_context && - inode->i_sb->s_cop->dummy_context(inode)) - return true; - return false; -} - -static inline bool fscrypt_valid_enc_modes(u32 contents_mode, - u32 filenames_mode) -{ - if (contents_mode == FS_ENCRYPTION_MODE_AES_128_CBC && - filenames_mode == FS_ENCRYPTION_MODE_AES_128_CTS) - return true; - - if (contents_mode == FS_ENCRYPTION_MODE_AES_256_XTS && - filenames_mode == FS_ENCRYPTION_MODE_AES_256_CTS) - return true; - - return false; -} - -static inline bool fscrypt_is_dot_dotdot(const struct qstr *str) -{ - if (str->len == 1 && str->name[0] == '.') - return true; - - if (str->len == 2 && str->name[0] == '.' && str->name[1] == '.') - return true; - - return false; -} - #if __FS_HAS_ENCRYPTION - -static inline struct page *fscrypt_control_page(struct page *page) -{ - return ((struct fscrypt_ctx *)page_private(page))->w.control_page; -} - -static inline bool fscrypt_has_encryption_key(const struct inode *inode) -{ - return (inode->i_crypt_info != NULL); -} - #include <linux/fscrypt_supp.h> - -#else /* !__FS_HAS_ENCRYPTION */ - -static inline struct page *fscrypt_control_page(struct page *page) -{ - WARN_ON_ONCE(1); - return ERR_PTR(-EINVAL); -} - -static inline bool fscrypt_has_encryption_key(const struct inode *inode) -{ - return 0; -} - +#else #include <linux/fscrypt_notsupp.h> -#endif /* __FS_HAS_ENCRYPTION */ +#endif /** * fscrypt_require_key - require an inode's encryption key @@ -291,4 +187,68 @@ static inline int fscrypt_prepare_setattr(struct dentry *dentry, return 0; } +/** + * fscrypt_prepare_symlink - prepare to create a possibly-encrypted symlink + * @dir: directory in which the symlink is being created + * @target: plaintext symlink target + * @len: length of @target excluding null terminator + * @max_len: space the filesystem has available to store the symlink target + * @disk_link: (out) the on-disk symlink target being prepared + * + * This function computes the size the symlink target will require on-disk, + * stores it in @disk_link->len, and validates it against @max_len. An + * encrypted symlink may be longer than the original. + * + * Additionally, @disk_link->name is set to @target if the symlink will be + * unencrypted, but left NULL if the symlink will be encrypted. For encrypted + * symlinks, the filesystem must call fscrypt_encrypt_symlink() to create the + * on-disk target later. (The reason for the two-step process is that some + * filesystems need to know the size of the symlink target before creating the + * inode, e.g. to determine whether it will be a "fast" or "slow" symlink.) + * + * Return: 0 on success, -ENAMETOOLONG if the symlink target is too long, + * -ENOKEY if the encryption key is missing, or another -errno code if a problem + * occurred while setting up the encryption key. + */ +static inline int fscrypt_prepare_symlink(struct inode *dir, + const char *target, + unsigned int len, + unsigned int max_len, + struct fscrypt_str *disk_link) +{ + if (IS_ENCRYPTED(dir) || fscrypt_dummy_context_enabled(dir)) + return __fscrypt_prepare_symlink(dir, len, max_len, disk_link); + + disk_link->name = (unsigned char *)target; + disk_link->len = len + 1; + if (disk_link->len > max_len) + return -ENAMETOOLONG; + return 0; +} + +/** + * fscrypt_encrypt_symlink - encrypt the symlink target if needed + * @inode: symlink inode + * @target: plaintext symlink target + * @len: length of @target excluding null terminator + * @disk_link: (in/out) the on-disk symlink target being prepared + * + * If the symlink target needs to be encrypted, then this function encrypts it + * into @disk_link->name. fscrypt_prepare_symlink() must have been called + * previously to compute @disk_link->len. If the filesystem did not allocate a + * buffer for @disk_link->name after calling fscrypt_prepare_link(), then one + * will be kmalloc()'ed and the filesystem will be responsible for freeing it. + * + * Return: 0 on success, -errno on failure + */ +static inline int fscrypt_encrypt_symlink(struct inode *inode, + const char *target, + unsigned int len, + struct fscrypt_str *disk_link) +{ + if (IS_ENCRYPTED(inode)) + return __fscrypt_encrypt_symlink(inode, target, len, disk_link); + return 0; +} + #endif /* _LINUX_FSCRYPT_H */ diff --git a/include/linux/fscrypt_notsupp.h b/include/linux/fscrypt_notsupp.h index 63e58808519a..44b50c04bae9 100644 --- a/include/linux/fscrypt_notsupp.h +++ b/include/linux/fscrypt_notsupp.h @@ -14,6 +14,16 @@ #ifndef _LINUX_FSCRYPT_NOTSUPP_H #define _LINUX_FSCRYPT_NOTSUPP_H +static inline bool fscrypt_has_encryption_key(const struct inode *inode) +{ + return false; +} + +static inline bool fscrypt_dummy_context_enabled(struct inode *inode) +{ + return false; +} + /* crypto.c */ static inline struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *inode, gfp_t gfp_flags) @@ -43,6 +53,11 @@ static inline int fscrypt_decrypt_page(const struct inode *inode, return -EOPNOTSUPP; } +static inline struct page *fscrypt_control_page(struct page *page) +{ + WARN_ON_ONCE(1); + return ERR_PTR(-EINVAL); +} static inline void fscrypt_restore_control_page(struct page *page) { @@ -90,8 +105,7 @@ static inline int fscrypt_get_encryption_info(struct inode *inode) return -EOPNOTSUPP; } -static inline void fscrypt_put_encryption_info(struct inode *inode, - struct fscrypt_info *ci) +static inline void fscrypt_put_encryption_info(struct inode *inode) { return; } @@ -116,16 +130,8 @@ static inline void fscrypt_free_filename(struct fscrypt_name *fname) return; } -static inline u32 fscrypt_fname_encrypted_size(const struct inode *inode, - u32 ilen) -{ - /* never happens */ - WARN_ON(1); - return 0; -} - static inline int fscrypt_fname_alloc_buffer(const struct inode *inode, - u32 ilen, + u32 max_encrypted_len, struct fscrypt_str *crypto_str) { return -EOPNOTSUPP; @@ -144,13 +150,6 @@ static inline int fscrypt_fname_disk_to_usr(struct inode *inode, return -EOPNOTSUPP; } -static inline int fscrypt_fname_usr_to_disk(struct inode *inode, - const struct qstr *iname, - struct fscrypt_str *oname) -{ - return -EOPNOTSUPP; -} - static inline bool fscrypt_match_name(const struct fscrypt_name *fname, const u8 *de_name, u32 de_name_len) { @@ -208,4 +207,28 @@ static inline int __fscrypt_prepare_lookup(struct inode *dir, return -EOPNOTSUPP; } +static inline int __fscrypt_prepare_symlink(struct inode *dir, + unsigned int len, + unsigned int max_len, + struct fscrypt_str *disk_link) +{ + return -EOPNOTSUPP; +} + +static inline int __fscrypt_encrypt_symlink(struct inode *inode, + const char *target, + unsigned int len, + struct fscrypt_str *disk_link) +{ + return -EOPNOTSUPP; +} + +static inline const char *fscrypt_get_symlink(struct inode *inode, + const void *caddr, + unsigned int max_size, + struct delayed_call *done) +{ + return ERR_PTR(-EOPNOTSUPP); +} + #endif /* _LINUX_FSCRYPT_NOTSUPP_H */ diff --git a/include/linux/fscrypt_supp.h b/include/linux/fscrypt_supp.h index cf9e9fc02f0a..477a7a6504d2 100644 --- a/include/linux/fscrypt_supp.h +++ b/include/linux/fscrypt_supp.h @@ -11,8 +11,54 @@ #ifndef _LINUX_FSCRYPT_SUPP_H #define _LINUX_FSCRYPT_SUPP_H +#include <linux/mm.h> +#include <linux/slab.h> + +/* + * fscrypt superblock flags + */ +#define FS_CFLG_OWN_PAGES (1U << 1) + +/* + * crypto operations for filesystems + */ +struct fscrypt_operations { + unsigned int flags; + const char *key_prefix; + int (*get_context)(struct inode *, void *, size_t); + int (*set_context)(struct inode *, const void *, size_t, void *); + bool (*dummy_context)(struct inode *); + bool (*empty_dir)(struct inode *); + unsigned (*max_namelen)(struct inode *); +}; + +struct fscrypt_ctx { + union { + struct { + struct page *bounce_page; /* Ciphertext page */ + struct page *control_page; /* Original page */ + } w; + struct { + struct bio *bio; + struct work_struct work; + } r; + struct list_head free_list; /* Free list */ + }; + u8 flags; /* Flags */ +}; + +static inline bool fscrypt_has_encryption_key(const struct inode *inode) +{ + return (inode->i_crypt_info != NULL); +} + +static inline bool fscrypt_dummy_context_enabled(struct inode *inode) +{ + return inode->i_sb->s_cop->dummy_context && + inode->i_sb->s_cop->dummy_context(inode); +} + /* crypto.c */ -extern struct kmem_cache *fscrypt_info_cachep; extern struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *, gfp_t); extern void fscrypt_release_ctx(struct fscrypt_ctx *); extern struct page *fscrypt_encrypt_page(const struct inode *, struct page *, @@ -20,6 +66,12 @@ extern struct page *fscrypt_encrypt_page(const struct inode *, struct page *, u64, gfp_t); extern int fscrypt_decrypt_page(const struct inode *, struct page *, unsigned int, unsigned int, u64); + +static inline struct page *fscrypt_control_page(struct page *page) +{ + return ((struct fscrypt_ctx *)page_private(page))->w.control_page; +} + extern void fscrypt_restore_control_page(struct page *); extern const struct dentry_operations fscrypt_d_ops; @@ -44,7 +96,7 @@ extern int fscrypt_inherit_context(struct inode *, struct inode *, void *, bool); /* keyinfo.c */ extern int fscrypt_get_encryption_info(struct inode *); -extern void fscrypt_put_encryption_info(struct inode *, struct fscrypt_info *); +extern void fscrypt_put_encryption_info(struct inode *); /* fname.c */ extern int fscrypt_setup_filename(struct inode *, const struct qstr *, @@ -55,14 +107,11 @@ static inline void fscrypt_free_filename(struct fscrypt_name *fname) kfree(fname->crypto_buf.name); } -extern u32 fscrypt_fname_encrypted_size(const struct inode *, u32); extern int fscrypt_fname_alloc_buffer(const struct inode *, u32, struct fscrypt_str *); extern void fscrypt_fname_free_buffer(struct fscrypt_str *); extern int fscrypt_fname_disk_to_usr(struct inode *, u32, u32, const struct fscrypt_str *, struct fscrypt_str *); -extern int fscrypt_fname_usr_to_disk(struct inode *, const struct qstr *, - struct fscrypt_str *); #define FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE 32 @@ -153,5 +202,14 @@ extern int __fscrypt_prepare_rename(struct inode *old_dir, struct dentry *new_dentry, unsigned int flags); extern int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry); +extern int __fscrypt_prepare_symlink(struct inode *dir, unsigned int len, + unsigned int max_len, + struct fscrypt_str *disk_link); +extern int __fscrypt_encrypt_symlink(struct inode *inode, const char *target, + unsigned int len, + struct fscrypt_str *disk_link); +extern const char *fscrypt_get_symlink(struct inode *inode, const void *caddr, + unsigned int max_size, + struct delayed_call *done); #endif /* _LINUX_FSCRYPT_SUPP_H */ diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 2bab81951ced..9c3c9a319e48 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -332,6 +332,8 @@ extern int ftrace_text_reserved(const void *start, const void *end); extern int ftrace_nr_registered_ops(void); +struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr); + bool is_ftrace_trampoline(unsigned long addr); /* @@ -764,9 +766,6 @@ typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER -/* for init task */ -#define INIT_FTRACE_GRAPH .ret_stack = NULL, - /* * Stack of return addresses for functions * of a thread. @@ -844,7 +843,6 @@ static inline void unpause_graph_tracing(void) #else /* !CONFIG_FUNCTION_GRAPH_TRACER */ #define __notrace_funcgraph -#define INIT_FTRACE_GRAPH static inline void ftrace_graph_init_task(struct task_struct *t) { } static inline void ftrace_graph_exit_task(struct task_struct *t) { } @@ -923,10 +921,6 @@ extern int tracepoint_printk; extern void disable_trace_on_warning(void); extern int __disable_trace_on_warning; -#ifdef CONFIG_PREEMPT -#define INIT_TRACE_RECURSION .trace_recursion = 0, -#endif - int tracepoint_printk_sysctl(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); @@ -935,10 +929,6 @@ int tracepoint_printk_sysctl(struct ctl_table *table, int write, static inline void disable_trace_on_warning(void) { } #endif /* CONFIG_TRACING */ -#ifndef INIT_TRACE_RECURSION -#define INIT_TRACE_RECURSION -#endif - #ifdef CONFIG_FTRACE_SYSCALLS unsigned long arch_syscall_addr(int nr); diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h index 411a84c6c400..4fa1a489efe4 100644 --- a/include/linux/fwnode.h +++ b/include/linux/fwnode.h @@ -15,6 +15,7 @@ #include <linux/types.h> struct fwnode_operations; +struct device; struct fwnode_handle { struct fwnode_handle *secondary; @@ -51,6 +52,7 @@ struct fwnode_reference_args { * struct fwnode_operations - Operations for fwnode interface * @get: Get a reference to an fwnode. * @put: Put a reference to an fwnode. + * @device_get_match_data: Return the device driver match data. * @property_present: Return true if a property is present. * @property_read_integer_array: Read an array of integer properties. Return * zero on success, a negative error code @@ -71,6 +73,8 @@ struct fwnode_operations { struct fwnode_handle *(*get)(struct fwnode_handle *fwnode); void (*put)(struct fwnode_handle *fwnode); bool (*device_is_available)(const struct fwnode_handle *fwnode); + void *(*device_get_match_data)(const struct fwnode_handle *fwnode, + const struct device *dev); bool (*property_present)(const struct fwnode_handle *fwnode, const char *propname); int (*property_read_int_array)(const struct fwnode_handle *fwnode, diff --git a/include/linux/genetlink.h b/include/linux/genetlink.h index ecc2928e8046..bc738504ab4a 100644 --- a/include/linux/genetlink.h +++ b/include/linux/genetlink.h @@ -31,8 +31,7 @@ extern wait_queue_head_t genl_sk_destructing_waitq; * @p: The pointer to read, prior to dereferencing * * Return the value of the specified RCU-protected pointer, but omit - * both the smp_read_barrier_depends() and the READ_ONCE(), because - * caller holds genl mutex. + * the READ_ONCE(), because caller holds genl mutex. */ #define genl_dereference(p) \ rcu_dereference_protected(p, lockdep_genl_is_held()) diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 5144ebe046c9..5e3531027b51 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -395,6 +395,11 @@ static inline void add_disk(struct gendisk *disk) { device_add_disk(NULL, disk); } +extern void device_add_disk_no_queue_reg(struct device *parent, struct gendisk *disk); +static inline void add_disk_no_queue_reg(struct gendisk *disk) +{ + device_add_disk_no_queue_reg(NULL, disk); +} extern void del_gendisk(struct gendisk *gp); extern struct gendisk *get_gendisk(dev_t dev, int *partno); diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h index 604967609e55..83f81ac53282 100644 --- a/include/linux/genl_magic_func.h +++ b/include/linux/genl_magic_func.h @@ -2,6 +2,7 @@ #ifndef GENL_MAGIC_FUNC_H #define GENL_MAGIC_FUNC_H +#include <linux/build_bug.h> #include <linux/genl_magic_struct.h> /* @@ -132,17 +133,6 @@ static void dprint_array(const char *dir, int nla_type, * use one static buffer for parsing of nested attributes */ static struct nlattr *nested_attr_tb[128]; -#ifndef BUILD_BUG_ON -/* Force a compilation error if condition is true */ -#define BUILD_BUG_ON(condition) ((void)BUILD_BUG_ON_ZERO(condition)) -/* Force a compilation error if condition is true, but also produce a - result (of value 0 and type size_t), so the expression can be used - e.g. in a structure initializer (or where-ever else comma expressions - aren't permitted). */ -#define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:-!!(e); })) -#define BUILD_BUG_ON_NULL(e) ((void *)sizeof(struct { int:-!!(e); })) -#endif - #undef GENL_struct #define GENL_struct(tag_name, tag_number, s_name, s_fields) \ /* *_from_attrs functions are static, but potentially unused */ \ diff --git a/include/linux/gpio.h b/include/linux/gpio.h index 8ef7fc0ce0f0..91ed23468530 100644 --- a/include/linux/gpio.h +++ b/include/linux/gpio.h @@ -1,4 +1,14 @@ /* SPDX-License-Identifier: GPL-2.0 */ +/* + * <linux/gpio.h> + * + * This is the LEGACY GPIO bulk include file, including legacy APIs. It is + * used for GPIO drivers still referencing the global GPIO numberspace, + * and should not be included in new code. + * + * If you're implementing a GPIO driver, only include <linux/gpio/driver.h> + * If you're implementing a GPIO consumer, only include <linux/gpio/consumer.h> + */ #ifndef __LINUX_GPIO_H #define __LINUX_GPIO_H diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h index 7447d85dbe2f..dbd065963296 100644 --- a/include/linux/gpio/consumer.h +++ b/include/linux/gpio/consumer.h @@ -139,6 +139,7 @@ void gpiod_set_raw_array_value_cansleep(unsigned int array_size, int *value_array); int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce); +int gpiod_set_transitory(struct gpio_desc *desc, bool transitory); int gpiod_is_active_low(const struct gpio_desc *desc); int gpiod_cansleep(const struct gpio_desc *desc); @@ -150,8 +151,14 @@ struct gpio_desc *gpio_to_desc(unsigned gpio); int desc_to_gpio(const struct gpio_desc *desc); /* Child properties interface */ +struct device_node; struct fwnode_handle; +struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev, + struct device_node *node, + const char *propname, int index, + enum gpiod_flags dflags, + const char *label); struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode, const char *propname, int index, enum gpiod_flags dflags, @@ -431,6 +438,13 @@ static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce) return -ENOSYS; } +static inline int gpiod_set_transitory(struct gpio_desc *desc, bool transitory) +{ + /* GPIO can never have been requested */ + WARN_ON(1); + return -ENOSYS; +} + static inline int gpiod_is_active_low(const struct gpio_desc *desc) { /* GPIO can never have been requested */ @@ -464,9 +478,20 @@ static inline int desc_to_gpio(const struct gpio_desc *desc) } /* Child properties interface */ +struct device_node; struct fwnode_handle; static inline +struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev, + struct device_node *node, + const char *propname, int index, + enum gpiod_flags dflags, + const char *label) +{ + return ERR_PTR(-ENOSYS); +} + +static inline struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode, const char *propname, int index, enum gpiod_flags dflags, diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index 7258cd676df4..1ba9a331ec51 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h @@ -436,6 +436,9 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip, struct lock_class_key *lock_key, struct lock_class_key *request_key); +bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gpiochip, + unsigned int offset); + #ifdef CONFIG_LOCKDEP /* diff --git a/include/linux/gpio/machine.h b/include/linux/gpio/machine.h index 846be7c69a52..b2f2dc638463 100644 --- a/include/linux/gpio/machine.h +++ b/include/linux/gpio/machine.h @@ -10,8 +10,8 @@ enum gpio_lookup_flags { GPIO_ACTIVE_LOW = (1 << 0), GPIO_OPEN_DRAIN = (1 << 1), GPIO_OPEN_SOURCE = (1 << 2), - GPIO_SLEEP_MAINTAIN_VALUE = (0 << 3), - GPIO_SLEEP_MAY_LOSE_VALUE = (1 << 3), + GPIO_PERSISTENT = (0 << 3), + GPIO_TRANSITORY = (1 << 3), }; /** diff --git a/include/linux/hid.h b/include/linux/hid.h index d491027a7c22..091a81cf330f 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -281,6 +281,7 @@ struct hid_item { #define HID_DG_DEVICECONFIG 0x000d000e #define HID_DG_DEVICESETTINGS 0x000d0023 +#define HID_DG_AZIMUTH 0x000d003f #define HID_DG_CONFIDENCE 0x000d0047 #define HID_DG_WIDTH 0x000d0048 #define HID_DG_HEIGHT 0x000d0049 @@ -342,6 +343,7 @@ struct hid_item { #define HID_QUIRK_SKIP_OUTPUT_REPORTS 0x00010000 #define HID_QUIRK_SKIP_OUTPUT_REPORT_ID 0x00020000 #define HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP 0x00040000 +#define HID_QUIRK_HAVE_SPECIAL_DRIVER 0x00080000 #define HID_QUIRK_FULLSPEED_INTERVAL 0x10000000 #define HID_QUIRK_NO_INIT_REPORTS 0x20000000 #define HID_QUIRK_NO_IGNORE 0x40000000 @@ -671,6 +673,7 @@ struct hid_usage_id { * to be called) * @dyn_list: list of dynamically added device ids * @dyn_lock: lock protecting @dyn_list + * @match: check if the given device is handled by this driver * @probe: new device inserted * @remove: device removed (NULL if not a hot-plug capable driver) * @report_table: on which reports to call raw_event (NULL means all) @@ -683,6 +686,8 @@ struct hid_usage_id { * @input_mapped: invoked on input registering after mapping an usage * @input_configured: invoked just before the device is registered * @feature_mapping: invoked on feature registering + * @bus_add_driver: invoked when a HID driver is about to be added + * @bus_removed_driver: invoked when a HID driver has been removed * @suspend: invoked on suspend (NULL means nop) * @resume: invoked on resume if device was not reset (NULL means nop) * @reset_resume: invoked on resume if device was reset (NULL means nop) @@ -711,6 +716,7 @@ struct hid_driver { struct list_head dyn_list; spinlock_t dyn_lock; + bool (*match)(struct hid_device *dev, bool ignore_special_driver); int (*probe)(struct hid_device *dev, const struct hid_device_id *id); void (*remove)(struct hid_device *dev); @@ -736,6 +742,8 @@ struct hid_driver { void (*feature_mapping)(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage); + void (*bus_add_driver)(struct hid_driver *driver); + void (*bus_removed_driver)(struct hid_driver *driver); #ifdef CONFIG_PM int (*suspend)(struct hid_device *hdev, pm_message_t message); int (*resume)(struct hid_device *hdev); @@ -814,6 +822,8 @@ extern bool hid_ignore(struct hid_device *); extern int hid_add_device(struct hid_device *); extern void hid_destroy_device(struct hid_device *); +extern struct bus_type hid_bus_type; + extern int __must_check __hid_register_driver(struct hid_driver *, struct module *, const char *mod_name); @@ -860,8 +870,12 @@ int hid_open_report(struct hid_device *device); int hid_check_keys_pressed(struct hid_device *hid); int hid_connect(struct hid_device *hid, unsigned int connect_mask); void hid_disconnect(struct hid_device *hid); -const struct hid_device_id *hid_match_id(struct hid_device *hdev, +bool hid_match_one_id(const struct hid_device *hdev, + const struct hid_device_id *id); +const struct hid_device_id *hid_match_id(const struct hid_device *hdev, const struct hid_device_id *id); +const struct hid_device_id *hid_match_device(struct hid_device *hdev, + struct hid_driver *hdrv); s32 hid_snto32(__u32 value, unsigned n); __u32 hid_field_extract(const struct hid_device *hid, __u8 *report, unsigned offset, unsigned n); @@ -1098,9 +1112,9 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size, int interrupt); /* HID quirks API */ -u32 usbhid_lookup_quirk(const u16 idVendor, const u16 idProduct); -int usbhid_quirks_init(char **quirks_param); -void usbhid_quirks_exit(void); +unsigned long hid_lookup_quirk(const struct hid_device *hdev); +int hid_quirks_init(char **quirks_param, __u16 bus, int count); +void hid_quirks_exit(__u16 bus); #ifdef CONFIG_HID_PID int hid_pidff_init(struct hid_device *hid); diff --git a/include/linux/hil_mlc.h b/include/linux/hil_mlc.h index 394a8405dd74..774f7d3b8f6a 100644 --- a/include/linux/hil_mlc.h +++ b/include/linux/hil_mlc.h @@ -144,12 +144,12 @@ struct hil_mlc { hil_packet ipacket[16]; hil_packet imatch; int icount; - struct timeval instart; - suseconds_t intimeout; + unsigned long instart; + unsigned long intimeout; int ddi; /* Last operational device id */ int lcv; /* LCV to throttle loops */ - struct timeval lcv_tv; /* Time loop was started */ + time64_t lcv_time; /* Time loop was started */ int di_map[7]; /* Maps below items to live devs */ struct hil_mlc_devinfo di[HIL_MLC_DEVMEM]; diff --git a/include/linux/hp_sdc.h b/include/linux/hp_sdc.h index d392975d8887..6f1dee7e67e0 100644 --- a/include/linux/hp_sdc.h +++ b/include/linux/hp_sdc.h @@ -281,7 +281,7 @@ typedef struct { hp_sdc_transaction *tq[HP_SDC_QUEUE_LEN]; /* All pending read/writes */ int rcurr, rqty; /* Current read transact in process */ - struct timeval rtv; /* Time when current read started */ + ktime_t rtime; /* Time when current read started */ int wcurr; /* Current write transact in process */ int dev_err; /* carries status from registration */ diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 012c37fdb688..c7902ca7c9f4 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -28,13 +28,29 @@ struct hrtimer_cpu_base; /* * Mode arguments of xxx_hrtimer functions: + * + * HRTIMER_MODE_ABS - Time value is absolute + * HRTIMER_MODE_REL - Time value is relative to now + * HRTIMER_MODE_PINNED - Timer is bound to CPU (is only considered + * when starting the timer) + * HRTIMER_MODE_SOFT - Timer callback function will be executed in + * soft irq context */ enum hrtimer_mode { - HRTIMER_MODE_ABS = 0x0, /* Time value is absolute */ - HRTIMER_MODE_REL = 0x1, /* Time value is relative to now */ - HRTIMER_MODE_PINNED = 0x02, /* Timer is bound to CPU */ - HRTIMER_MODE_ABS_PINNED = 0x02, - HRTIMER_MODE_REL_PINNED = 0x03, + HRTIMER_MODE_ABS = 0x00, + HRTIMER_MODE_REL = 0x01, + HRTIMER_MODE_PINNED = 0x02, + HRTIMER_MODE_SOFT = 0x04, + + HRTIMER_MODE_ABS_PINNED = HRTIMER_MODE_ABS | HRTIMER_MODE_PINNED, + HRTIMER_MODE_REL_PINNED = HRTIMER_MODE_REL | HRTIMER_MODE_PINNED, + + HRTIMER_MODE_ABS_SOFT = HRTIMER_MODE_ABS | HRTIMER_MODE_SOFT, + HRTIMER_MODE_REL_SOFT = HRTIMER_MODE_REL | HRTIMER_MODE_SOFT, + + HRTIMER_MODE_ABS_PINNED_SOFT = HRTIMER_MODE_ABS_PINNED | HRTIMER_MODE_SOFT, + HRTIMER_MODE_REL_PINNED_SOFT = HRTIMER_MODE_REL_PINNED | HRTIMER_MODE_SOFT, + }; /* @@ -87,6 +103,7 @@ enum hrtimer_restart { * @base: pointer to the timer base (per cpu and per clock) * @state: state information (See bit values above) * @is_rel: Set if the timer was armed relative + * @is_soft: Set if hrtimer will be expired in soft interrupt context. * * The hrtimer structure must be initialized by hrtimer_init() */ @@ -97,6 +114,7 @@ struct hrtimer { struct hrtimer_clock_base *base; u8 state; u8 is_rel; + u8 is_soft; }; /** @@ -112,9 +130,9 @@ struct hrtimer_sleeper { }; #ifdef CONFIG_64BIT -# define HRTIMER_CLOCK_BASE_ALIGN 64 +# define __hrtimer_clock_base_align ____cacheline_aligned #else -# define HRTIMER_CLOCK_BASE_ALIGN 32 +# define __hrtimer_clock_base_align #endif /** @@ -123,48 +141,57 @@ struct hrtimer_sleeper { * @index: clock type index for per_cpu support when moving a * timer to a base on another cpu. * @clockid: clock id for per_cpu support + * @seq: seqcount around __run_hrtimer + * @running: pointer to the currently running hrtimer * @active: red black tree root node for the active timers * @get_time: function to retrieve the current time of the clock * @offset: offset of this clock to the monotonic base */ struct hrtimer_clock_base { struct hrtimer_cpu_base *cpu_base; - int index; + unsigned int index; clockid_t clockid; + seqcount_t seq; + struct hrtimer *running; struct timerqueue_head active; ktime_t (*get_time)(void); ktime_t offset; -} __attribute__((__aligned__(HRTIMER_CLOCK_BASE_ALIGN))); +} __hrtimer_clock_base_align; enum hrtimer_base_type { HRTIMER_BASE_MONOTONIC, HRTIMER_BASE_REALTIME, HRTIMER_BASE_BOOTTIME, HRTIMER_BASE_TAI, + HRTIMER_BASE_MONOTONIC_SOFT, + HRTIMER_BASE_REALTIME_SOFT, + HRTIMER_BASE_BOOTTIME_SOFT, + HRTIMER_BASE_TAI_SOFT, HRTIMER_MAX_CLOCK_BASES, }; -/* +/** * struct hrtimer_cpu_base - the per cpu clock bases * @lock: lock protecting the base and associated clock bases * and timers - * @seq: seqcount around __run_hrtimer - * @running: pointer to the currently running hrtimer * @cpu: cpu number * @active_bases: Bitfield to mark bases with active timers * @clock_was_set_seq: Sequence counter of clock was set events - * @migration_enabled: The migration of hrtimers to other cpus is enabled - * @nohz_active: The nohz functionality is enabled - * @expires_next: absolute time of the next event which was scheduled - * via clock_set_next_event() - * @next_timer: Pointer to the first expiring timer - * @in_hrtirq: hrtimer_interrupt() is currently executing * @hres_active: State of high resolution mode + * @in_hrtirq: hrtimer_interrupt() is currently executing * @hang_detected: The last hrtimer interrupt detected a hang + * @softirq_activated: displays, if the softirq is raised - update of softirq + * related settings is not required then. * @nr_events: Total number of hrtimer interrupt events * @nr_retries: Total number of hrtimer interrupt retries * @nr_hangs: Total number of hrtimer interrupt hangs * @max_hang_time: Maximum time spent in hrtimer_interrupt + * @expires_next: absolute time of the next event, is required for remote + * hrtimer enqueue; it is the total first expiry time (hard + * and soft hrtimer are taken into account) + * @next_timer: Pointer to the first expiring timer + * @softirq_expires_next: Time to check, if soft queues needs also to be expired + * @softirq_next_timer: Pointer to the first expiring softirq based timer * @clock_base: array of clock bases for this cpu * * Note: next_timer is just an optimization for __remove_hrtimer(). @@ -173,31 +200,28 @@ enum hrtimer_base_type { */ struct hrtimer_cpu_base { raw_spinlock_t lock; - seqcount_t seq; - struct hrtimer *running; unsigned int cpu; unsigned int active_bases; unsigned int clock_was_set_seq; - bool migration_enabled; - bool nohz_active; + unsigned int hres_active : 1, + in_hrtirq : 1, + hang_detected : 1, + softirq_activated : 1; #ifdef CONFIG_HIGH_RES_TIMERS - unsigned int in_hrtirq : 1, - hres_active : 1, - hang_detected : 1; - ktime_t expires_next; - struct hrtimer *next_timer; unsigned int nr_events; - unsigned int nr_retries; - unsigned int nr_hangs; + unsigned short nr_retries; + unsigned short nr_hangs; unsigned int max_hang_time; #endif + ktime_t expires_next; + struct hrtimer *next_timer; + ktime_t softirq_expires_next; + struct hrtimer *softirq_next_timer; struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; } ____cacheline_aligned; static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time) { - BUILD_BUG_ON(sizeof(struct hrtimer_clock_base) > HRTIMER_CLOCK_BASE_ALIGN); - timer->node.expires = time; timer->_softexpires = time; } @@ -266,16 +290,17 @@ static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer) return timer->base->get_time(); } +static inline int hrtimer_is_hres_active(struct hrtimer *timer) +{ + return IS_ENABLED(CONFIG_HIGH_RES_TIMERS) ? + timer->base->cpu_base->hres_active : 0; +} + #ifdef CONFIG_HIGH_RES_TIMERS struct clock_event_device; extern void hrtimer_interrupt(struct clock_event_device *dev); -static inline int hrtimer_is_hres_active(struct hrtimer *timer) -{ - return timer->base->cpu_base->hres_active; -} - /* * The resolution of the clocks. The resolution value is returned in * the clock_getres() system call to give application programmers an @@ -298,11 +323,6 @@ extern unsigned int hrtimer_resolution; #define hrtimer_resolution (unsigned int)LOW_RES_NSEC -static inline int hrtimer_is_hres_active(struct hrtimer *timer) -{ - return 0; -} - static inline void clock_was_set_delayed(void) { } #endif @@ -365,11 +385,12 @@ extern void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, u64 range_ns, const enum hrtimer_mode mode); /** - * hrtimer_start - (re)start an hrtimer on the current CPU + * hrtimer_start - (re)start an hrtimer * @timer: the timer to be added * @tim: expiry time - * @mode: expiry mode: absolute (HRTIMER_MODE_ABS) or - * relative (HRTIMER_MODE_REL) + * @mode: timer mode: absolute (HRTIMER_MODE_ABS) or + * relative (HRTIMER_MODE_REL), and pinned (HRTIMER_MODE_PINNED); + * softirq based mode is considered for debug purpose only! */ static inline void hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode) @@ -422,7 +443,7 @@ static inline int hrtimer_is_queued(struct hrtimer *timer) */ static inline int hrtimer_callback_running(struct hrtimer *timer) { - return timer->base->cpu_base->running == timer; + return timer->base->running == timer; } /* Forward a hrtimer so it expires after now: */ @@ -466,7 +487,7 @@ extern int schedule_hrtimeout_range(ktime_t *expires, u64 delta, extern int schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta, const enum hrtimer_mode mode, - int clock); + clockid_t clock_id); extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode); /* Soft interrupt function to run the hrtimer queues: */ diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 82a25880714a..36fa6a2a82e3 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -119,6 +119,7 @@ long hugetlb_unreserve_pages(struct inode *inode, long start, long end, long freed); bool isolate_huge_page(struct page *page, struct list_head *list); void putback_active_hugepage(struct page *page); +void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason); void free_huge_page(struct page *page); void hugetlb_fix_reserve_counts(struct inode *inode); extern struct mutex *hugetlb_fault_mutex_table; @@ -129,7 +130,6 @@ u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm, pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud); -extern int hugepages_treat_as_movable; extern int sysctl_hugetlb_shm_group; extern struct list_head huge_boot_pages; @@ -158,6 +158,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma, unsigned long address, unsigned long end, pgprot_t newprot); bool is_hugetlb_entry_migration(pte_t pte); + #else /* !CONFIG_HUGETLB_PAGE */ static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma) @@ -198,6 +199,7 @@ static inline bool isolate_huge_page(struct page *page, struct list_head *list) return false; } #define putback_active_hugepage(p) do {} while (0) +#define move_hugetlb_state(old, new, reason) do {} while (0) static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma, unsigned long address, unsigned long end, pgprot_t newprot) @@ -271,6 +273,17 @@ static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb) return sb->s_fs_info; } +struct hugetlbfs_inode_info { + struct shared_policy policy; + struct inode vfs_inode; + unsigned int seals; +}; + +static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode) +{ + return container_of(inode, struct hugetlbfs_inode_info, vfs_inode); +} + extern const struct file_operations hugetlbfs_file_operations; extern const struct vm_operations_struct hugetlb_vm_ops; struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct, @@ -343,10 +356,10 @@ struct huge_bootmem_page { struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr, int avoid_reserve); struct page *alloc_huge_page_node(struct hstate *h, int nid); -struct page *alloc_huge_page_noerr(struct vm_area_struct *vma, - unsigned long addr, int avoid_reserve); struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, nodemask_t *nmask); +struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, + unsigned long address); int huge_add_to_page_cache(struct page *page, struct address_space *mapping, pgoff_t idx); @@ -524,7 +537,7 @@ struct hstate {}; #define alloc_huge_page(v, a, r) NULL #define alloc_huge_page_node(h, nid) NULL #define alloc_huge_page_nodemask(h, preferred_nid, nmask) NULL -#define alloc_huge_page_noerr(v, a, r) NULL +#define alloc_huge_page_vma(h, vma, address) NULL #define alloc_bootmem_huge_page(h) NULL #define hstate_file(f) NULL #define hstate_sizelog(s) NULL diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 6c9336626592..93bd6fcd6e62 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -127,28 +127,6 @@ struct hv_ring_buffer_info { u32 priv_read_index; }; -/* - * - * hv_get_ringbuffer_availbytes() - * - * Get number of bytes available to read and to write to - * for the specified ring buffer - */ -static inline void -hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi, - u32 *read, u32 *write) -{ - u32 read_loc, write_loc, dsize; - - /* Capture the read/write indices before they changed */ - read_loc = rbi->ring_buffer->read_index; - write_loc = rbi->ring_buffer->write_index; - dsize = rbi->ring_datasize; - - *write = write_loc >= read_loc ? dsize - (write_loc - read_loc) : - read_loc - write_loc; - *read = dsize - *write; -} static inline u32 hv_get_bytes_to_read(const struct hv_ring_buffer_info *rbi) { diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 0f774406fad0..419a38e7c315 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -55,7 +55,7 @@ typedef int (*i2c_slave_cb_t)(struct i2c_client *, enum i2c_slave_event, u8 *); struct module; struct property_entry; -#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) +#if IS_ENABLED(CONFIG_I2C) /* * The master routines are the ones normally used to transmit data to devices * on a bus (or read from them). Apart from two basic transfer functions to @@ -63,10 +63,68 @@ struct property_entry; * transmit an arbitrary number of messages without interruption. * @count must be be less than 64k since msg.len is u16. */ -extern int i2c_master_send(const struct i2c_client *client, const char *buf, - int count); -extern int i2c_master_recv(const struct i2c_client *client, char *buf, - int count); +extern int i2c_transfer_buffer_flags(const struct i2c_client *client, + char *buf, int count, u16 flags); + +/** + * i2c_master_recv - issue a single I2C message in master receive mode + * @client: Handle to slave device + * @buf: Where to store data read from slave + * @count: How many bytes to read, must be less than 64k since msg.len is u16 + * + * Returns negative errno, or else the number of bytes read. + */ +static inline int i2c_master_recv(const struct i2c_client *client, + char *buf, int count) +{ + return i2c_transfer_buffer_flags(client, buf, count, I2C_M_RD); +}; + +/** + * i2c_master_recv_dmasafe - issue a single I2C message in master receive mode + * using a DMA safe buffer + * @client: Handle to slave device + * @buf: Where to store data read from slave, must be safe to use with DMA + * @count: How many bytes to read, must be less than 64k since msg.len is u16 + * + * Returns negative errno, or else the number of bytes read. + */ +static inline int i2c_master_recv_dmasafe(const struct i2c_client *client, + char *buf, int count) +{ + return i2c_transfer_buffer_flags(client, buf, count, + I2C_M_RD | I2C_M_DMA_SAFE); +}; + +/** + * i2c_master_send - issue a single I2C message in master transmit mode + * @client: Handle to slave device + * @buf: Data that will be written to the slave + * @count: How many bytes to write, must be less than 64k since msg.len is u16 + * + * Returns negative errno, or else the number of bytes written. + */ +static inline int i2c_master_send(const struct i2c_client *client, + const char *buf, int count) +{ + return i2c_transfer_buffer_flags(client, (char *)buf, count, 0); +}; + +/** + * i2c_master_send_dmasafe - issue a single I2C message in master transmit mode + * using a DMA safe buffer + * @client: Handle to slave device + * @buf: Data that will be written to the slave, must be safe to use with DMA + * @count: How many bytes to write, must be less than 64k since msg.len is u16 + * + * Returns negative errno, or else the number of bytes written. + */ +static inline int i2c_master_send_dmasafe(const struct i2c_client *client, + const char *buf, int count) +{ + return i2c_transfer_buffer_flags(client, (char *)buf, count, + I2C_M_DMA_SAFE); +}; /* Transfer num messages. */ @@ -354,7 +412,7 @@ struct i2c_board_info { .type = dev_type, .addr = (dev_addr) -#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) +#if IS_ENABLED(CONFIG_I2C) /* Add-on boards should register/unregister their devices; e.g. a board * with integrated I2C, a config eeprom, sensors, and a codec that's * used in conjunction with the primary hardware. @@ -485,40 +543,43 @@ struct i2c_timings { /** * struct i2c_bus_recovery_info - I2C bus recovery information * @recover_bus: Recover routine. Either pass driver's recover_bus() routine, or - * i2c_generic_scl_recovery() or i2c_generic_gpio_recovery(). + * i2c_generic_scl_recovery(). * @get_scl: This gets current value of SCL line. Mandatory for generic SCL - * recovery. Used internally for generic GPIO recovery. - * @set_scl: This sets/clears SCL line. Mandatory for generic SCL recovery. Used - * internally for generic GPIO recovery. + * recovery. Populated internally for generic GPIO recovery. + * @set_scl: This sets/clears the SCL line. Mandatory for generic SCL recovery. + * Populated internally for generic GPIO recovery. * @get_sda: This gets current value of SDA line. Optional for generic SCL - * recovery. Used internally, if sda_gpio is a valid GPIO, for generic GPIO - * recovery. + * recovery. Populated internally, if sda_gpio is a valid GPIO, for generic + * GPIO recovery. + * @set_sda: This sets/clears the SDA line. Optional for generic SCL recovery. + * Populated internally, if sda_gpio is a valid GPIO, for generic GPIO + * recovery. * @prepare_recovery: This will be called before starting recovery. Platform may * configure padmux here for SDA/SCL line or something else they want. * @unprepare_recovery: This will be called after completing recovery. Platform * may configure padmux here for SDA/SCL line or something else they want. - * @scl_gpio: gpio number of the SCL line. Only required for GPIO recovery. - * @sda_gpio: gpio number of the SDA line. Only required for GPIO recovery. + * @scl_gpiod: gpiod of the SCL line. Only required for GPIO recovery. + * @sda_gpiod: gpiod of the SDA line. Only required for GPIO recovery. */ struct i2c_bus_recovery_info { - int (*recover_bus)(struct i2c_adapter *); + int (*recover_bus)(struct i2c_adapter *adap); - int (*get_scl)(struct i2c_adapter *); - void (*set_scl)(struct i2c_adapter *, int val); - int (*get_sda)(struct i2c_adapter *); + int (*get_scl)(struct i2c_adapter *adap); + void (*set_scl)(struct i2c_adapter *adap, int val); + int (*get_sda)(struct i2c_adapter *adap); + void (*set_sda)(struct i2c_adapter *adap, int val); - void (*prepare_recovery)(struct i2c_adapter *); - void (*unprepare_recovery)(struct i2c_adapter *); + void (*prepare_recovery)(struct i2c_adapter *adap); + void (*unprepare_recovery)(struct i2c_adapter *adap); /* gpio recovery */ - int scl_gpio; - int sda_gpio; + struct gpio_desc *scl_gpiod; + struct gpio_desc *sda_gpiod; }; int i2c_recover_bus(struct i2c_adapter *adap); /* Generic recovery routines */ -int i2c_generic_gpio_recovery(struct i2c_adapter *adap); int i2c_generic_scl_recovery(struct i2c_adapter *adap); /** @@ -706,7 +767,7 @@ i2c_unlock_adapter(struct i2c_adapter *adapter) /* administration... */ -#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) +#if IS_ENABLED(CONFIG_I2C) extern int i2c_add_adapter(struct i2c_adapter *); extern void i2c_del_adapter(struct i2c_adapter *); extern int i2c_add_numbered_adapter(struct i2c_adapter *); @@ -769,6 +830,9 @@ static inline u8 i2c_8bit_addr_from_msg(const struct i2c_msg *msg) return (msg->addr << 1) | (msg->flags & I2C_M_RD ? 1 : 0); } +u8 *i2c_get_dma_safe_msg_buf(struct i2c_msg *msg, unsigned int threshold); +void i2c_release_dma_safe_msg_buf(struct i2c_msg *msg, u8 *buf); + int i2c_handle_smbus_host_notify(struct i2c_adapter *adap, unsigned short addr); /** * module_i2c_driver() - Helper macro for registering a modular I2C driver diff --git a/include/linux/i7300_idle.h b/include/linux/i7300_idle.h deleted file mode 100644 index 4dbe651f71f5..000000000000 --- a/include/linux/i7300_idle.h +++ /dev/null @@ -1,84 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ - -#ifndef I7300_IDLE_H -#define I7300_IDLE_H - -#include <linux/pci.h> - -/* - * I/O AT controls (PCI bus 0 device 8 function 0) - * DIMM controls (PCI bus 0 device 16 function 1) - */ -#define IOAT_BUS 0 -#define IOAT_DEVFN PCI_DEVFN(8, 0) -#define MEMCTL_BUS 0 -#define MEMCTL_DEVFN PCI_DEVFN(16, 1) - -struct fbd_ioat { - unsigned int vendor; - unsigned int ioat_dev; - unsigned int enabled; -}; - -/* - * The i5000 chip-set has the same hooks as the i7300 - * but it is not enabled by default and must be manually - * manually enabled with "forceload=1" because it is - * only lightly validated. - */ - -static const struct fbd_ioat fbd_ioat_list[] = { - {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_CNB, 1}, - {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT, 0}, - {0, 0} -}; - -/* table of devices that work with this driver */ -static const struct pci_device_id pci_tbl[] = { - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_FBD_CNB) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5000_ERR) }, - { } /* Terminating entry */ -}; - -/* Check for known platforms with I/O-AT */ -static inline int i7300_idle_platform_probe(struct pci_dev **fbd_dev, - struct pci_dev **ioat_dev, - int enable_all) -{ - int i; - struct pci_dev *memdev, *dmadev; - - memdev = pci_get_bus_and_slot(MEMCTL_BUS, MEMCTL_DEVFN); - if (!memdev) - return -ENODEV; - - for (i = 0; pci_tbl[i].vendor != 0; i++) { - if (memdev->vendor == pci_tbl[i].vendor && - memdev->device == pci_tbl[i].device) { - break; - } - } - if (pci_tbl[i].vendor == 0) - return -ENODEV; - - dmadev = pci_get_bus_and_slot(IOAT_BUS, IOAT_DEVFN); - if (!dmadev) - return -ENODEV; - - for (i = 0; fbd_ioat_list[i].vendor != 0; i++) { - if (dmadev->vendor == fbd_ioat_list[i].vendor && - dmadev->device == fbd_ioat_list[i].ioat_dev) { - if (!(fbd_ioat_list[i].enabled || enable_all)) - continue; - if (fbd_dev) - *fbd_dev = memdev; - if (ioat_dev) - *ioat_dev = dmadev; - - return 0; - } - } - return -ENODEV; -} - -#endif diff --git a/include/linux/if_link.h b/include/linux/if_link.h index 4c54611e03e9..622658dfbf0a 100644 --- a/include/linux/if_link.h +++ b/include/linux/if_link.h @@ -13,6 +13,8 @@ struct ifla_vf_stats { __u64 tx_bytes; __u64 broadcast; __u64 multicast; + __u64 rx_dropped; + __u64 tx_dropped; }; struct ifla_vf_info { diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h index bedf54b6f943..4cb7aeeafce0 100644 --- a/include/linux/if_macvlan.h +++ b/include/linux/if_macvlan.h @@ -30,10 +30,10 @@ struct macvlan_dev { enum macvlan_mode mode; u16 flags; int nest_level; + unsigned int macaddr_count; #ifdef CONFIG_NET_POLL_CONTROLLER struct netpoll *netpoll; #endif - unsigned int macaddr_count; }; static inline void macvlan_count_rx(const struct macvlan_dev *vlan, diff --git a/include/linux/if_tap.h b/include/linux/if_tap.h index 3ecef57c31e3..8e66866c11be 100644 --- a/include/linux/if_tap.h +++ b/include/linux/if_tap.h @@ -4,7 +4,7 @@ #if IS_ENABLED(CONFIG_TAP) struct socket *tap_get_socket(struct file *); -struct skb_array *tap_get_skb_array(struct file *file); +struct ptr_ring *tap_get_ptr_ring(struct file *file); #else #include <linux/err.h> #include <linux/errno.h> @@ -14,7 +14,7 @@ static inline struct socket *tap_get_socket(struct file *f) { return ERR_PTR(-EINVAL); } -static inline struct skb_array *tap_get_skb_array(struct file *f) +static inline struct ptr_ring *tap_get_ptr_ring(struct file *f) { return ERR_PTR(-EINVAL); } @@ -70,7 +70,7 @@ struct tap_queue { u16 queue_index; bool enabled; struct list_head next; - struct skb_array skb_array; + struct ptr_ring ring; }; rx_handler_result_t tap_handle_frame(struct sk_buff **pskb); diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h index bf9bdf42d577..c5b0a75a7812 100644 --- a/include/linux/if_tun.h +++ b/include/linux/if_tun.h @@ -17,9 +17,14 @@ #include <uapi/linux/if_tun.h> +#define TUN_XDP_FLAG 0x1UL + #if defined(CONFIG_TUN) || defined(CONFIG_TUN_MODULE) struct socket *tun_get_socket(struct file *); -struct skb_array *tun_get_skb_array(struct file *file); +struct ptr_ring *tun_get_tx_ring(struct file *file); +bool tun_is_xdp_buff(void *ptr); +void *tun_xdp_to_ptr(void *ptr); +void *tun_ptr_to_xdp(void *ptr); #else #include <linux/err.h> #include <linux/errno.h> @@ -29,9 +34,21 @@ static inline struct socket *tun_get_socket(struct file *f) { return ERR_PTR(-EINVAL); } -static inline struct skb_array *tun_get_skb_array(struct file *f) +static inline struct ptr_ring *tun_get_tx_ring(struct file *f) { return ERR_PTR(-EINVAL); } +static inline bool tun_is_xdp_buff(void *ptr) +{ + return false; +} +static inline void *tun_xdp_to_ptr(void *ptr) +{ + return NULL; +} +static inline void *tun_ptr_to_xdp(void *ptr) +{ + return NULL; +} #endif /* CONFIG_TUN */ #endif /* __IF_TUN_H */ diff --git a/include/linux/iio/adc/stm32-dfsdm-adc.h b/include/linux/iio/adc/stm32-dfsdm-adc.h new file mode 100644 index 000000000000..e7dc7a542a4e --- /dev/null +++ b/include/linux/iio/adc/stm32-dfsdm-adc.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This file discribe the STM32 DFSDM IIO driver API for audio part + * + * Copyright (C) 2017, STMicroelectronics - All Rights Reserved + * Author(s): Arnaud Pouliquen <arnaud.pouliquen@st.com>. + */ + +#ifndef STM32_DFSDM_ADC_H +#define STM32_DFSDM_ADC_H + +int stm32_dfsdm_get_buff_cb(struct iio_dev *iio_dev, + int (*cb)(const void *data, size_t size, + void *private), + void *private); +int stm32_dfsdm_release_buff_cb(struct iio_dev *iio_dev); + +#endif diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h index 5e347a9805fd..9887f4f8e2a8 100644 --- a/include/linux/iio/consumer.h +++ b/include/linux/iio/consumer.h @@ -134,6 +134,17 @@ struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev, void *private), void *private); /** + * iio_channel_cb_set_buffer_watermark() - set the buffer watermark. + * @cb_buffer: The callback buffer from whom we want the channel + * information. + * @watermark: buffer watermark in bytes. + * + * This function allows to configure the buffer watermark. + */ +int iio_channel_cb_set_buffer_watermark(struct iio_cb_buffer *cb_buffer, + size_t watermark); + +/** * iio_channel_release_all_cb() - release and unregister the callback. * @cb_buffer: The callback buffer that was allocated. */ @@ -216,6 +227,32 @@ int iio_read_channel_average_raw(struct iio_channel *chan, int *val); int iio_read_channel_processed(struct iio_channel *chan, int *val); /** + * iio_write_channel_attribute() - Write values to the device attribute. + * @chan: The channel being queried. + * @val: Value being written. + * @val2: Value being written.val2 use depends on attribute type. + * @attribute: info attribute to be read. + * + * Returns an error code or 0. + */ +int iio_write_channel_attribute(struct iio_channel *chan, int val, + int val2, enum iio_chan_info_enum attribute); + +/** + * iio_read_channel_attribute() - Read values from the device attribute. + * @chan: The channel being queried. + * @val: Value being written. + * @val2: Value being written.Val2 use depends on attribute type. + * @attribute: info attribute to be written. + * + * Returns an error code if failed. Else returns a description of what is in val + * and val2, such as IIO_VAL_INT_PLUS_MICRO telling us we have a value of val + * + val2/1e6 + */ +int iio_read_channel_attribute(struct iio_channel *chan, int *val, + int *val2, enum iio_chan_info_enum attribute); + +/** * iio_write_channel_raw() - write to a given channel * @chan: The channel being queried. * @val: Value being written. diff --git a/include/linux/iio/hw-consumer.h b/include/linux/iio/hw-consumer.h new file mode 100644 index 000000000000..44d48bb1d39f --- /dev/null +++ b/include/linux/iio/hw-consumer.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Industrial I/O in kernel hardware consumer interface + * + * Copyright 2017 Analog Devices Inc. + * Author: Lars-Peter Clausen <lars@metafoo.de> + */ + +#ifndef LINUX_IIO_HW_CONSUMER_H +#define LINUX_IIO_HW_CONSUMER_H + +struct iio_hw_consumer; + +struct iio_hw_consumer *iio_hw_consumer_alloc(struct device *dev); +void iio_hw_consumer_free(struct iio_hw_consumer *hwc); +struct iio_hw_consumer *devm_iio_hw_consumer_alloc(struct device *dev); +void devm_iio_hw_consumer_free(struct device *dev, struct iio_hw_consumer *hwc); +int iio_hw_consumer_enable(struct iio_hw_consumer *hwc); +void iio_hw_consumer_disable(struct iio_hw_consumer *hwc); + +#endif diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index 20b61347ea58..11579fd4126e 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -20,34 +20,6 @@ * Currently assumes nano seconds. */ -enum iio_chan_info_enum { - IIO_CHAN_INFO_RAW = 0, - IIO_CHAN_INFO_PROCESSED, - IIO_CHAN_INFO_SCALE, - IIO_CHAN_INFO_OFFSET, - IIO_CHAN_INFO_CALIBSCALE, - IIO_CHAN_INFO_CALIBBIAS, - IIO_CHAN_INFO_PEAK, - IIO_CHAN_INFO_PEAK_SCALE, - IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW, - IIO_CHAN_INFO_AVERAGE_RAW, - IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY, - IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY, - IIO_CHAN_INFO_SAMP_FREQ, - IIO_CHAN_INFO_FREQUENCY, - IIO_CHAN_INFO_PHASE, - IIO_CHAN_INFO_HARDWAREGAIN, - IIO_CHAN_INFO_HYSTERESIS, - IIO_CHAN_INFO_INT_TIME, - IIO_CHAN_INFO_ENABLE, - IIO_CHAN_INFO_CALIBHEIGHT, - IIO_CHAN_INFO_CALIBWEIGHT, - IIO_CHAN_INFO_DEBOUNCE_COUNT, - IIO_CHAN_INFO_DEBOUNCE_TIME, - IIO_CHAN_INFO_CALIBEMISSIVITY, - IIO_CHAN_INFO_OVERSAMPLING_RATIO, -}; - enum iio_shared_by { IIO_SEPARATE, IIO_SHARED_BY_TYPE, @@ -606,8 +578,8 @@ const struct iio_chan_spec * iio_device_register() - register a device with the IIO subsystem * @indio_dev: Device structure filled by the device driver **/ -#define iio_device_register(iio_dev) \ - __iio_device_register((iio_dev), THIS_MODULE) +#define iio_device_register(indio_dev) \ + __iio_device_register((indio_dev), THIS_MODULE) int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod); void iio_device_unregister(struct iio_dev *indio_dev); /** diff --git a/include/linux/iio/machine.h b/include/linux/iio/machine.h index 1601a2a63a72..5e1cfa75f652 100644 --- a/include/linux/iio/machine.h +++ b/include/linux/iio/machine.h @@ -28,4 +28,11 @@ struct iio_map { void *consumer_data; }; +#define IIO_MAP(_provider_channel, _consumer_dev_name, _consumer_channel) \ +{ \ + .adc_channel_label = _provider_channel, \ + .consumer_dev_name = _consumer_dev_name, \ + .consumer_channel = _consumer_channel, \ +} + #endif diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h index 7d5e44518379..b19b7204ef84 100644 --- a/include/linux/iio/trigger.h +++ b/include/linux/iio/trigger.h @@ -43,12 +43,13 @@ struct iio_trigger_ops { /** * struct iio_trigger - industrial I/O trigger device * @ops: [DRIVER] operations structure + * @owner: [INTERN] owner of this driver module * @id: [INTERN] unique id number * @name: [DRIVER] unique name * @dev: [DRIVER] associated device (if relevant) * @list: [INTERN] used in maintenance of global trigger list * @alloc_list: [DRIVER] used for driver specific trigger list - * @use_count: use count for the trigger + * @use_count: [INTERN] use count for the trigger. * @subirq_chip: [INTERN] associate 'virtual' irq chip. * @subirq_base: [INTERN] base number for irqs provided by trigger. * @subirqs: [INTERN] information about the 'child' irqs. diff --git a/include/linux/iio/types.h b/include/linux/iio/types.h index 2aa7b6384d64..6eb3d683ef62 100644 --- a/include/linux/iio/types.h +++ b/include/linux/iio/types.h @@ -34,4 +34,32 @@ enum iio_available_type { IIO_AVAIL_RANGE, }; +enum iio_chan_info_enum { + IIO_CHAN_INFO_RAW = 0, + IIO_CHAN_INFO_PROCESSED, + IIO_CHAN_INFO_SCALE, + IIO_CHAN_INFO_OFFSET, + IIO_CHAN_INFO_CALIBSCALE, + IIO_CHAN_INFO_CALIBBIAS, + IIO_CHAN_INFO_PEAK, + IIO_CHAN_INFO_PEAK_SCALE, + IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW, + IIO_CHAN_INFO_AVERAGE_RAW, + IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY, + IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY, + IIO_CHAN_INFO_SAMP_FREQ, + IIO_CHAN_INFO_FREQUENCY, + IIO_CHAN_INFO_PHASE, + IIO_CHAN_INFO_HARDWAREGAIN, + IIO_CHAN_INFO_HYSTERESIS, + IIO_CHAN_INFO_INT_TIME, + IIO_CHAN_INFO_ENABLE, + IIO_CHAN_INFO_CALIBHEIGHT, + IIO_CHAN_INFO_CALIBWEIGHT, + IIO_CHAN_INFO_DEBOUNCE_COUNT, + IIO_CHAN_INFO_DEBOUNCE_TIME, + IIO_CHAN_INFO_CALIBEMISSIVITY, + IIO_CHAN_INFO_OVERSAMPLING_RATIO, +}; + #endif /* _IIO_TYPES_H_ */ diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index 1ac5bf95bfdd..e16fe7d44a71 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h @@ -173,7 +173,7 @@ static inline struct net_device *ip_dev_find(struct net *net, __be32 addr) } int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b); -int devinet_ioctl(struct net *net, unsigned int cmd, void __user *); +int devinet_ioctl(struct net *net, unsigned int cmd, struct ifreq *); void devinet_init(void); struct in_device *inetdev_by_index(struct net *, int); __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope); diff --git a/include/linux/init.h b/include/linux/init.h index ea1b31101d9e..506a98151131 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -5,6 +5,13 @@ #include <linux/compiler.h> #include <linux/types.h> +/* Built-in __init functions needn't be compiled with retpoline */ +#if defined(RETPOLINE) && !defined(MODULE) +#define __noretpoline __attribute__((indirect_branch("keep"))) +#else +#define __noretpoline +#endif + /* These macros are used to mark some functions or * initialized data (doesn't apply to uninitialized data) * as `initialization' functions. The kernel can take this @@ -40,7 +47,7 @@ /* These are for everybody (although not all archs will actually discard it in modules) */ -#define __init __section(.init.text) __cold __latent_entropy +#define __init __section(.init.text) __cold __latent_entropy __noretpoline #define __initdata __section(.init.data) #define __initconst __section(.init.rodata) #define __exitdata __section(.exit.data) diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 6a532629c983..a454b8aeb938 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -21,22 +21,11 @@ #include <asm/thread_info.h> -#ifdef CONFIG_SMP -# define INIT_PUSHABLE_TASKS(tsk) \ - .pushable_tasks = PLIST_NODE_INIT(tsk.pushable_tasks, MAX_PRIO), -#else -# define INIT_PUSHABLE_TASKS(tsk) -#endif - extern struct files_struct init_files; extern struct fs_struct init_fs; - -#ifdef CONFIG_CPUSETS -#define INIT_CPUSET_SEQ(tsk) \ - .mems_allowed_seq = SEQCNT_ZERO(tsk.mems_allowed_seq), -#else -#define INIT_CPUSET_SEQ(tsk) -#endif +extern struct nsproxy init_nsproxy; +extern struct group_info init_groups; +extern struct cred init_cred; #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE #define INIT_PREV_CPUTIME(x) .prev_cputime = { \ @@ -47,67 +36,16 @@ extern struct fs_struct init_fs; #endif #ifdef CONFIG_POSIX_TIMERS -#define INIT_POSIX_TIMERS(s) \ - .posix_timers = LIST_HEAD_INIT(s.posix_timers), #define INIT_CPU_TIMERS(s) \ .cpu_timers = { \ LIST_HEAD_INIT(s.cpu_timers[0]), \ LIST_HEAD_INIT(s.cpu_timers[1]), \ - LIST_HEAD_INIT(s.cpu_timers[2]), \ - }, -#define INIT_CPUTIMER(s) \ - .cputimer = { \ - .cputime_atomic = INIT_CPUTIME_ATOMIC, \ - .running = false, \ - .checking_timer = false, \ + LIST_HEAD_INIT(s.cpu_timers[2]), \ }, #else -#define INIT_POSIX_TIMERS(s) #define INIT_CPU_TIMERS(s) -#define INIT_CPUTIMER(s) #endif -#define INIT_SIGNALS(sig) { \ - .nr_threads = 1, \ - .thread_head = LIST_HEAD_INIT(init_task.thread_node), \ - .wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\ - .shared_pending = { \ - .list = LIST_HEAD_INIT(sig.shared_pending.list), \ - .signal = {{0}}}, \ - INIT_POSIX_TIMERS(sig) \ - INIT_CPU_TIMERS(sig) \ - .rlim = INIT_RLIMITS, \ - INIT_CPUTIMER(sig) \ - INIT_PREV_CPUTIME(sig) \ - .cred_guard_mutex = \ - __MUTEX_INITIALIZER(sig.cred_guard_mutex), \ -} - -extern struct nsproxy init_nsproxy; - -#define INIT_SIGHAND(sighand) { \ - .count = ATOMIC_INIT(1), \ - .action = { { { .sa_handler = SIG_DFL, } }, }, \ - .siglock = __SPIN_LOCK_UNLOCKED(sighand.siglock), \ - .signalfd_wqh = __WAIT_QUEUE_HEAD_INITIALIZER(sighand.signalfd_wqh), \ -} - -extern struct group_info init_groups; - -#define INIT_STRUCT_PID { \ - .count = ATOMIC_INIT(1), \ - .tasks = { \ - { .first = NULL }, \ - { .first = NULL }, \ - { .first = NULL }, \ - }, \ - .level = 0, \ - .numbers = { { \ - .nr = 0, \ - .ns = &init_pid_ns, \ - }, } \ -} - #define INIT_PID_LINK(type) \ { \ .node = { \ @@ -117,192 +55,16 @@ extern struct group_info init_groups; .pid = &init_struct_pid, \ } -#ifdef CONFIG_AUDITSYSCALL -#define INIT_IDS \ - .loginuid = INVALID_UID, \ - .sessionid = (unsigned int)-1, -#else -#define INIT_IDS -#endif - -#ifdef CONFIG_PREEMPT_RCU -#define INIT_TASK_RCU_PREEMPT(tsk) \ - .rcu_read_lock_nesting = 0, \ - .rcu_read_unlock_special.s = 0, \ - .rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry), \ - .rcu_blocked_node = NULL, -#else -#define INIT_TASK_RCU_PREEMPT(tsk) -#endif -#ifdef CONFIG_TASKS_RCU -#define INIT_TASK_RCU_TASKS(tsk) \ - .rcu_tasks_holdout = false, \ - .rcu_tasks_holdout_list = \ - LIST_HEAD_INIT(tsk.rcu_tasks_holdout_list), \ - .rcu_tasks_idle_cpu = -1, -#else -#define INIT_TASK_RCU_TASKS(tsk) -#endif - -extern struct cred init_cred; - -#ifdef CONFIG_CGROUP_SCHED -# define INIT_CGROUP_SCHED(tsk) \ - .sched_task_group = &root_task_group, -#else -# define INIT_CGROUP_SCHED(tsk) -#endif - -#ifdef CONFIG_PERF_EVENTS -# define INIT_PERF_EVENTS(tsk) \ - .perf_event_mutex = \ - __MUTEX_INITIALIZER(tsk.perf_event_mutex), \ - .perf_event_list = LIST_HEAD_INIT(tsk.perf_event_list), -#else -# define INIT_PERF_EVENTS(tsk) -#endif - -#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN -# define INIT_VTIME(tsk) \ - .vtime.seqcount = SEQCNT_ZERO(tsk.vtime.seqcount), \ - .vtime.starttime = 0, \ - .vtime.state = VTIME_SYS, -#else -# define INIT_VTIME(tsk) -#endif - #define INIT_TASK_COMM "swapper" -#ifdef CONFIG_RT_MUTEXES -# define INIT_RT_MUTEXES(tsk) \ - .pi_waiters = RB_ROOT_CACHED, \ - .pi_top_task = NULL, -#else -# define INIT_RT_MUTEXES(tsk) -#endif - -#ifdef CONFIG_NUMA_BALANCING -# define INIT_NUMA_BALANCING(tsk) \ - .numa_preferred_nid = -1, \ - .numa_group = NULL, \ - .numa_faults = NULL, -#else -# define INIT_NUMA_BALANCING(tsk) -#endif - -#ifdef CONFIG_KASAN -# define INIT_KASAN(tsk) \ - .kasan_depth = 1, -#else -# define INIT_KASAN(tsk) -#endif - -#ifdef CONFIG_LIVEPATCH -# define INIT_LIVEPATCH(tsk) \ - .patch_state = KLP_UNDEFINED, -#else -# define INIT_LIVEPATCH(tsk) -#endif - -#ifdef CONFIG_THREAD_INFO_IN_TASK -# define INIT_TASK_TI(tsk) \ - .thread_info = INIT_THREAD_INFO(tsk), \ - .stack_refcount = ATOMIC_INIT(1), -#else -# define INIT_TASK_TI(tsk) -#endif - -#ifdef CONFIG_SECURITY -#define INIT_TASK_SECURITY .security = NULL, -#else -#define INIT_TASK_SECURITY -#endif - -/* - * INIT_TASK is used to set up the first task table, touch at - * your own risk!. Base=0, limit=0x1fffff (=2MB) - */ -#define INIT_TASK(tsk) \ -{ \ - INIT_TASK_TI(tsk) \ - .state = 0, \ - .stack = init_stack, \ - .usage = ATOMIC_INIT(2), \ - .flags = PF_KTHREAD, \ - .prio = MAX_PRIO-20, \ - .static_prio = MAX_PRIO-20, \ - .normal_prio = MAX_PRIO-20, \ - .policy = SCHED_NORMAL, \ - .cpus_allowed = CPU_MASK_ALL, \ - .nr_cpus_allowed= NR_CPUS, \ - .mm = NULL, \ - .active_mm = &init_mm, \ - .restart_block = { \ - .fn = do_no_restart_syscall, \ - }, \ - .se = { \ - .group_node = LIST_HEAD_INIT(tsk.se.group_node), \ - }, \ - .rt = { \ - .run_list = LIST_HEAD_INIT(tsk.rt.run_list), \ - .time_slice = RR_TIMESLICE, \ - }, \ - .tasks = LIST_HEAD_INIT(tsk.tasks), \ - INIT_PUSHABLE_TASKS(tsk) \ - INIT_CGROUP_SCHED(tsk) \ - .ptraced = LIST_HEAD_INIT(tsk.ptraced), \ - .ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \ - .real_parent = &tsk, \ - .parent = &tsk, \ - .children = LIST_HEAD_INIT(tsk.children), \ - .sibling = LIST_HEAD_INIT(tsk.sibling), \ - .group_leader = &tsk, \ - RCU_POINTER_INITIALIZER(real_cred, &init_cred), \ - RCU_POINTER_INITIALIZER(cred, &init_cred), \ - .comm = INIT_TASK_COMM, \ - .thread = INIT_THREAD, \ - .fs = &init_fs, \ - .files = &init_files, \ - .signal = &init_signals, \ - .sighand = &init_sighand, \ - .nsproxy = &init_nsproxy, \ - .pending = { \ - .list = LIST_HEAD_INIT(tsk.pending.list), \ - .signal = {{0}}}, \ - .blocked = {{0}}, \ - .alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \ - .journal_info = NULL, \ - INIT_CPU_TIMERS(tsk) \ - .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \ - .timer_slack_ns = 50000, /* 50 usec default slack */ \ - .pids = { \ - [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \ - [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \ - [PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \ - }, \ - .thread_group = LIST_HEAD_INIT(tsk.thread_group), \ - .thread_node = LIST_HEAD_INIT(init_signals.thread_head), \ - INIT_IDS \ - INIT_PERF_EVENTS(tsk) \ - INIT_TRACE_IRQFLAGS \ - INIT_LOCKDEP \ - INIT_FTRACE_GRAPH \ - INIT_TRACE_RECURSION \ - INIT_TASK_RCU_PREEMPT(tsk) \ - INIT_TASK_RCU_TASKS(tsk) \ - INIT_CPUSET_SEQ(tsk) \ - INIT_RT_MUTEXES(tsk) \ - INIT_PREV_CPUTIME(tsk) \ - INIT_VTIME(tsk) \ - INIT_NUMA_BALANCING(tsk) \ - INIT_KASAN(tsk) \ - INIT_LIVEPATCH(tsk) \ - INIT_TASK_SECURITY \ -} - - /* Attach to the init_task data structure for proper alignment */ +#ifdef CONFIG_ARCH_TASK_STRUCT_ON_STACK #define __init_task_data __attribute__((__section__(".data..init_task"))) +#else +#define __init_task_data /**/ +#endif +/* Attach to the thread_info data structure for proper alignment */ +#define __init_thread_info __attribute__((__section__(".data..init_thread_info"))) #endif diff --git a/include/linux/input/gpio_tilt.h b/include/linux/input/gpio_tilt.h deleted file mode 100644 index f9d932476a80..000000000000 --- a/include/linux/input/gpio_tilt.h +++ /dev/null @@ -1,74 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _INPUT_GPIO_TILT_H -#define _INPUT_GPIO_TILT_H - -/** - * struct gpio_tilt_axis - Axis used by the tilt switch - * @axis: Constant describing the axis, e.g. ABS_X - * @min: minimum value for abs_param - * @max: maximum value for abs_param - * @fuzz: fuzz value for abs_param - * @flat: flat value for abs_param - */ -struct gpio_tilt_axis { - int axis; - int min; - int max; - int fuzz; - int flat; -}; - -/** - * struct gpio_tilt_state - state description - * @gpios: bitfield of gpio target-states for the value - * @axes: array containing the axes settings for the gpio state - * The array indizes must correspond to the axes defined - * in platform_data - * - * This structure describes a supported axis settings - * and the necessary gpio-state which represent it. - * - * The n-th bit in the bitfield describes the state of the n-th GPIO - * from the gpios-array defined in gpio_regulator_config below. - */ -struct gpio_tilt_state { - int gpios; - int *axes; -}; - -/** - * struct gpio_tilt_platform_data - * @gpios: Array containing the gpios determining the tilt state - * @nr_gpios: Number of gpios - * @axes: Array of gpio_tilt_axis descriptions - * @nr_axes: Number of axes - * @states: Array of gpio_tilt_state entries describing - * the gpio state for specific tilts - * @nr_states: Number of states available - * @debounce_interval: debounce ticks interval in msecs - * @poll_interval: polling interval in msecs - for polling driver only - * @enable: callback to enable the tilt switch - * @disable: callback to disable the tilt switch - * - * This structure contains gpio-tilt-switch configuration - * information that must be passed by platform code to the - * gpio-tilt input driver. - */ -struct gpio_tilt_platform_data { - struct gpio *gpios; - int nr_gpios; - - struct gpio_tilt_axis *axes; - int nr_axes; - - struct gpio_tilt_state *states; - int nr_states; - - int debounce_interval; - - unsigned int poll_interval; - int (*enable)(struct device *dev); - void (*disable)(struct device *dev); -}; - -#endif diff --git a/include/linux/integrity.h b/include/linux/integrity.h index c2d6082a1a4c..858d3f4a2241 100644 --- a/include/linux/integrity.h +++ b/include/linux/integrity.h @@ -14,6 +14,7 @@ enum integrity_status { INTEGRITY_PASS = 0, + INTEGRITY_PASS_IMMUTABLE, INTEGRITY_FAIL, INTEGRITY_NOLABEL, INTEGRITY_NOXATTRS, diff --git a/include/linux/ioport.h b/include/linux/ioport.h index 93b4183cf53d..da0ebaec25f0 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h @@ -265,7 +265,7 @@ extern struct resource * __devm_request_region(struct device *dev, extern void __devm_release_region(struct device *dev, struct resource *parent, resource_size_t start, resource_size_t n); extern int iomem_map_sanity_check(resource_size_t addr, unsigned long size); -extern int iomem_is_exclusive(u64 addr); +extern bool iomem_is_exclusive(u64 addr); extern int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h index 0e81035b678f..b11fcdfd0770 100644 --- a/include/linux/irq_work.h +++ b/include/linux/irq_work.h @@ -13,10 +13,13 @@ * busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed */ -#define IRQ_WORK_PENDING 1UL -#define IRQ_WORK_BUSY 2UL -#define IRQ_WORK_FLAGS 3UL -#define IRQ_WORK_LAZY 4UL /* Doesn't want IPI, wait for tick */ +#define IRQ_WORK_PENDING BIT(0) +#define IRQ_WORK_BUSY BIT(1) + +/* Doesn't want IPI, wait for tick: */ +#define IRQ_WORK_LAZY BIT(2) + +#define IRQ_WORK_CLAIMED (IRQ_WORK_PENDING | IRQ_WORK_BUSY) struct irq_work { unsigned long flags; diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h index 1b3996ff3f16..9700f00bbc04 100644 --- a/include/linux/irqflags.h +++ b/include/linux/irqflags.h @@ -40,7 +40,6 @@ do { \ do { \ current->softirq_context--; \ } while (0) -# define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1, #else # define trace_hardirqs_on() do { } while (0) # define trace_hardirqs_off() do { } while (0) @@ -54,7 +53,6 @@ do { \ # define trace_hardirq_exit() do { } while (0) # define lockdep_softirq_enter() do { } while (0) # define lockdep_softirq_exit() do { } while (0) -# define INIT_TRACE_IRQFLAGS #endif #if defined(CONFIG_IRQSOFF_TRACER) || \ diff --git a/include/linux/iversion.h b/include/linux/iversion.h new file mode 100644 index 000000000000..be50ef7cedab --- /dev/null +++ b/include/linux/iversion.h @@ -0,0 +1,337 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_IVERSION_H +#define _LINUX_IVERSION_H + +#include <linux/fs.h> + +/* + * The inode->i_version field: + * --------------------------- + * The change attribute (i_version) is mandated by NFSv4 and is mostly for + * knfsd, but is also used for other purposes (e.g. IMA). The i_version must + * appear different to observers if there was a change to the inode's data or + * metadata since it was last queried. + * + * Observers see the i_version as a 64-bit number that never decreases. If it + * remains the same since it was last checked, then nothing has changed in the + * inode. If it's different then something has changed. Observers cannot infer + * anything about the nature or magnitude of the changes from the value, only + * that the inode has changed in some fashion. + * + * Not all filesystems properly implement the i_version counter. Subsystems that + * want to use i_version field on an inode should first check whether the + * filesystem sets the SB_I_VERSION flag (usually via the IS_I_VERSION macro). + * + * Those that set SB_I_VERSION will automatically have their i_version counter + * incremented on writes to normal files. If the SB_I_VERSION is not set, then + * the VFS will not touch it on writes, and the filesystem can use it how it + * wishes. Note that the filesystem is always responsible for updating the + * i_version on namespace changes in directories (mkdir, rmdir, unlink, etc.). + * We consider these sorts of filesystems to have a kernel-managed i_version. + * + * It may be impractical for filesystems to keep i_version updates atomic with + * respect to the changes that cause them. They should, however, guarantee + * that i_version updates are never visible before the changes that caused + * them. Also, i_version updates should never be delayed longer than it takes + * the original change to reach disk. + * + * This implementation uses the low bit in the i_version field as a flag to + * track when the value has been queried. If it has not been queried since it + * was last incremented, we can skip the increment in most cases. + * + * In the event that we're updating the ctime, we will usually go ahead and + * bump the i_version anyway. Since that has to go to stable storage in some + * fashion, we might as well increment it as well. + * + * With this implementation, the value should always appear to observers to + * increase over time if the file has changed. It's recommended to use + * inode_eq_iversion() helper to compare values. + * + * Note that some filesystems (e.g. NFS and AFS) just use the field to store + * a server-provided value (for the most part). For that reason, those + * filesystems do not set SB_I_VERSION. These filesystems are considered to + * have a self-managed i_version. + * + * Persistently storing the i_version + * ---------------------------------- + * Queries of the i_version field are not gated on them hitting the backing + * store. It's always possible that the host could crash after allowing + * a query of the value but before it has made it to disk. + * + * To mitigate this problem, filesystems should always use + * inode_set_iversion_queried when loading an existing inode from disk. This + * ensures that the next attempted inode increment will result in the value + * changing. + * + * Storing the value to disk therefore does not count as a query, so those + * filesystems should use inode_peek_iversion to grab the value to be stored. + * There is no need to flag the value as having been queried in that case. + */ + +/* + * We borrow the lowest bit in the i_version to use as a flag to tell whether + * it has been queried since we last incremented it. If it has, then we must + * increment it on the next change. After that, we can clear the flag and + * avoid incrementing it again until it has again been queried. + */ +#define I_VERSION_QUERIED_SHIFT (1) +#define I_VERSION_QUERIED (1ULL << (I_VERSION_QUERIED_SHIFT - 1)) +#define I_VERSION_INCREMENT (1ULL << I_VERSION_QUERIED_SHIFT) + +/** + * inode_set_iversion_raw - set i_version to the specified raw value + * @inode: inode to set + * @val: new i_version value to set + * + * Set @inode's i_version field to @val. This function is for use by + * filesystems that self-manage the i_version. + * + * For example, the NFS client stores its NFSv4 change attribute in this way, + * and the AFS client stores the data_version from the server here. + */ +static inline void +inode_set_iversion_raw(struct inode *inode, u64 val) +{ + atomic64_set(&inode->i_version, val); +} + +/** + * inode_peek_iversion_raw - grab a "raw" iversion value + * @inode: inode from which i_version should be read + * + * Grab a "raw" inode->i_version value and return it. The i_version is not + * flagged or converted in any way. This is mostly used to access a self-managed + * i_version. + * + * With those filesystems, we want to treat the i_version as an entirely + * opaque value. + */ +static inline u64 +inode_peek_iversion_raw(const struct inode *inode) +{ + return atomic64_read(&inode->i_version); +} + +/** + * inode_set_iversion - set i_version to a particular value + * @inode: inode to set + * @val: new i_version value to set + * + * Set @inode's i_version field to @val. This function is for filesystems with + * a kernel-managed i_version, for initializing a newly-created inode from + * scratch. + * + * In this case, we do not set the QUERIED flag since we know that this value + * has never been queried. + */ +static inline void +inode_set_iversion(struct inode *inode, u64 val) +{ + inode_set_iversion_raw(inode, val << I_VERSION_QUERIED_SHIFT); +} + +/** + * inode_set_iversion_queried - set i_version to a particular value as quereied + * @inode: inode to set + * @val: new i_version value to set + * + * Set @inode's i_version field to @val, and flag it for increment on the next + * change. + * + * Filesystems that persistently store the i_version on disk should use this + * when loading an existing inode from disk. + * + * When loading in an i_version value from a backing store, we can't be certain + * that it wasn't previously viewed before being stored. Thus, we must assume + * that it was, to ensure that we don't end up handing out the same value for + * different versions of the same inode. + */ +static inline void +inode_set_iversion_queried(struct inode *inode, u64 val) +{ + inode_set_iversion_raw(inode, (val << I_VERSION_QUERIED_SHIFT) | + I_VERSION_QUERIED); +} + +/** + * inode_maybe_inc_iversion - increments i_version + * @inode: inode with the i_version that should be updated + * @force: increment the counter even if it's not necessary? + * + * Every time the inode is modified, the i_version field must be seen to have + * changed by any observer. + * + * If "force" is set or the QUERIED flag is set, then ensure that we increment + * the value, and clear the queried flag. + * + * In the common case where neither is set, then we can return "false" without + * updating i_version. + * + * If this function returns false, and no other metadata has changed, then we + * can avoid logging the metadata. + */ +static inline bool +inode_maybe_inc_iversion(struct inode *inode, bool force) +{ + u64 cur, old, new; + + /* + * The i_version field is not strictly ordered with any other inode + * information, but the legacy inode_inc_iversion code used a spinlock + * to serialize increments. + * + * Here, we add full memory barriers to ensure that any de-facto + * ordering with other info is preserved. + * + * This barrier pairs with the barrier in inode_query_iversion() + */ + smp_mb(); + cur = inode_peek_iversion_raw(inode); + for (;;) { + /* If flag is clear then we needn't do anything */ + if (!force && !(cur & I_VERSION_QUERIED)) + return false; + + /* Since lowest bit is flag, add 2 to avoid it */ + new = (cur & ~I_VERSION_QUERIED) + I_VERSION_INCREMENT; + + old = atomic64_cmpxchg(&inode->i_version, cur, new); + if (likely(old == cur)) + break; + cur = old; + } + return true; +} + + +/** + * inode_inc_iversion - forcibly increment i_version + * @inode: inode that needs to be updated + * + * Forcbily increment the i_version field. This always results in a change to + * the observable value. + */ +static inline void +inode_inc_iversion(struct inode *inode) +{ + inode_maybe_inc_iversion(inode, true); +} + +/** + * inode_iversion_need_inc - is the i_version in need of being incremented? + * @inode: inode to check + * + * Returns whether the inode->i_version counter needs incrementing on the next + * change. Just fetch the value and check the QUERIED flag. + */ +static inline bool +inode_iversion_need_inc(struct inode *inode) +{ + return inode_peek_iversion_raw(inode) & I_VERSION_QUERIED; +} + +/** + * inode_inc_iversion_raw - forcibly increment raw i_version + * @inode: inode that needs to be updated + * + * Forcbily increment the raw i_version field. This always results in a change + * to the raw value. + * + * NFS will use the i_version field to store the value from the server. It + * mostly treats it as opaque, but in the case where it holds a write + * delegation, it must increment the value itself. This function does that. + */ +static inline void +inode_inc_iversion_raw(struct inode *inode) +{ + atomic64_inc(&inode->i_version); +} + +/** + * inode_peek_iversion - read i_version without flagging it to be incremented + * @inode: inode from which i_version should be read + * + * Read the inode i_version counter for an inode without registering it as a + * query. + * + * This is typically used by local filesystems that need to store an i_version + * on disk. In that situation, it's not necessary to flag it as having been + * viewed, as the result won't be used to gauge changes from that point. + */ +static inline u64 +inode_peek_iversion(const struct inode *inode) +{ + return inode_peek_iversion_raw(inode) >> I_VERSION_QUERIED_SHIFT; +} + +/** + * inode_query_iversion - read i_version for later use + * @inode: inode from which i_version should be read + * + * Read the inode i_version counter. This should be used by callers that wish + * to store the returned i_version for later comparison. This will guarantee + * that a later query of the i_version will result in a different value if + * anything has changed. + * + * In this implementation, we fetch the current value, set the QUERIED flag and + * then try to swap it into place with a cmpxchg, if it wasn't already set. If + * that fails, we try again with the newly fetched value from the cmpxchg. + */ +static inline u64 +inode_query_iversion(struct inode *inode) +{ + u64 cur, old, new; + + cur = inode_peek_iversion_raw(inode); + for (;;) { + /* If flag is already set, then no need to swap */ + if (cur & I_VERSION_QUERIED) { + /* + * This barrier (and the implicit barrier in the + * cmpxchg below) pairs with the barrier in + * inode_maybe_inc_iversion(). + */ + smp_mb(); + break; + } + + new = cur | I_VERSION_QUERIED; + old = atomic64_cmpxchg(&inode->i_version, cur, new); + if (likely(old == cur)) + break; + cur = old; + } + return cur >> I_VERSION_QUERIED_SHIFT; +} + +/** + * inode_eq_iversion_raw - check whether the raw i_version counter has changed + * @inode: inode to check + * @old: old value to check against its i_version + * + * Compare the current raw i_version counter with a previous one. Returns true + * if they are the same or false if they are different. + */ +static inline bool +inode_eq_iversion_raw(const struct inode *inode, u64 old) +{ + return inode_peek_iversion_raw(inode) == old; +} + +/** + * inode_eq_iversion - check whether the i_version counter has changed + * @inode: inode to check + * @old: old value to check against its i_version + * + * Compare an i_version counter with a previous one. Returns true if they are + * the same, and false if they are different. + * + * Note that we don't need to set the QUERIED flag in this case, as the value + * in the inode is not being recorded for later use. + */ +static inline bool +inode_eq_iversion(const struct inode *inode, u64 old) +{ + return inode_peek_iversion(inode) == old; +} +#endif diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index 296d1e0ea87b..b708e5169d1d 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -418,26 +418,41 @@ static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh) #define JI_WAIT_DATA (1 << __JI_WAIT_DATA) /** - * struct jbd_inode is the structure linking inodes in ordered mode - * present in a transaction so that we can sync them during commit. + * struct jbd_inode - The jbd_inode type is the structure linking inodes in + * ordered mode present in a transaction so that we can sync them during commit. */ struct jbd2_inode { - /* Which transaction does this inode belong to? Either the running - * transaction or the committing one. [j_list_lock] */ + /** + * @i_transaction: + * + * Which transaction does this inode belong to? Either the running + * transaction or the committing one. [j_list_lock] + */ transaction_t *i_transaction; - /* Pointer to the running transaction modifying inode's data in case - * there is already a committing transaction touching it. [j_list_lock] */ + /** + * @i_next_transaction: + * + * Pointer to the running transaction modifying inode's data in case + * there is already a committing transaction touching it. [j_list_lock] + */ transaction_t *i_next_transaction; - /* List of inodes in the i_transaction [j_list_lock] */ + /** + * @i_list: List of inodes in the i_transaction [j_list_lock] + */ struct list_head i_list; - /* VFS inode this inode belongs to [constant during the lifetime - * of the structure] */ + /** + * @i_vfs_inode: + * + * VFS inode this inode belongs to [constant for lifetime of structure] + */ struct inode *i_vfs_inode; - /* Flags of inode [j_list_lock] */ + /** + * @i_flags: Flags of inode [j_list_lock] + */ unsigned long i_flags; }; @@ -447,12 +462,20 @@ struct jbd2_revoke_table_s; * struct handle_s - The handle_s type is the concrete type associated with * handle_t. * @h_transaction: Which compound transaction is this update a part of? + * @h_journal: Which journal handle belongs to - used iff h_reserved set. + * @h_rsv_handle: Handle reserved for finishing the logical operation. * @h_buffer_credits: Number of remaining buffers we are allowed to dirty. - * @h_ref: Reference count on this handle - * @h_err: Field for caller's use to track errors through large fs operations - * @h_sync: flag for sync-on-close - * @h_jdata: flag to force data journaling - * @h_aborted: flag indicating fatal error on handle + * @h_ref: Reference count on this handle. + * @h_err: Field for caller's use to track errors through large fs operations. + * @h_sync: Flag for sync-on-close. + * @h_jdata: Flag to force data journaling. + * @h_reserved: Flag for handle for reserved credits. + * @h_aborted: Flag indicating fatal error on handle. + * @h_type: For handle statistics. + * @h_line_no: For handle statistics. + * @h_start_jiffies: Handle Start time. + * @h_requested_credits: Holds @h_buffer_credits after handle is started. + * @saved_alloc_context: Saved context while transaction is open. **/ /* Docbook can't yet cope with the bit fields, but will leave the documentation @@ -462,32 +485,23 @@ struct jbd2_revoke_table_s; struct jbd2_journal_handle { union { - /* Which compound transaction is this update a part of? */ transaction_t *h_transaction; /* Which journal handle belongs to - used iff h_reserved set */ journal_t *h_journal; }; - /* Handle reserved for finishing the logical operation */ handle_t *h_rsv_handle; - - /* Number of remaining buffers we are allowed to dirty: */ int h_buffer_credits; - - /* Reference count on this handle */ int h_ref; - - /* Field for caller's use to track errors through large fs */ - /* operations */ int h_err; /* Flags [no locking] */ - unsigned int h_sync: 1; /* sync-on-close */ - unsigned int h_jdata: 1; /* force data journaling */ - unsigned int h_reserved: 1; /* handle with reserved credits */ - unsigned int h_aborted: 1; /* fatal error on handle */ - unsigned int h_type: 8; /* for handle statistics */ - unsigned int h_line_no: 16; /* for handle statistics */ + unsigned int h_sync: 1; + unsigned int h_jdata: 1; + unsigned int h_reserved: 1; + unsigned int h_aborted: 1; + unsigned int h_type: 8; + unsigned int h_line_no: 16; unsigned long h_start_jiffies; unsigned int h_requested_credits; @@ -729,228 +743,253 @@ jbd2_time_diff(unsigned long start, unsigned long end) /** * struct journal_s - The journal_s type is the concrete type associated with * journal_t. - * @j_flags: General journaling state flags - * @j_errno: Is there an outstanding uncleared error on the journal (from a - * prior abort)? - * @j_sb_buffer: First part of superblock buffer - * @j_superblock: Second part of superblock buffer - * @j_format_version: Version of the superblock format - * @j_state_lock: Protect the various scalars in the journal - * @j_barrier_count: Number of processes waiting to create a barrier lock - * @j_barrier: The barrier lock itself - * @j_running_transaction: The current running transaction.. - * @j_committing_transaction: the transaction we are pushing to disk - * @j_checkpoint_transactions: a linked circular list of all transactions - * waiting for checkpointing - * @j_wait_transaction_locked: Wait queue for waiting for a locked transaction - * to start committing, or for a barrier lock to be released - * @j_wait_done_commit: Wait queue for waiting for commit to complete - * @j_wait_commit: Wait queue to trigger commit - * @j_wait_updates: Wait queue to wait for updates to complete - * @j_wait_reserved: Wait queue to wait for reserved buffer credits to drop - * @j_checkpoint_mutex: Mutex for locking against concurrent checkpoints - * @j_head: Journal head - identifies the first unused block in the journal - * @j_tail: Journal tail - identifies the oldest still-used block in the - * journal. - * @j_free: Journal free - how many free blocks are there in the journal? - * @j_first: The block number of the first usable block - * @j_last: The block number one beyond the last usable block - * @j_dev: Device where we store the journal - * @j_blocksize: blocksize for the location where we store the journal. - * @j_blk_offset: starting block offset for into the device where we store the - * journal - * @j_fs_dev: Device which holds the client fs. For internal journal this will - * be equal to j_dev - * @j_reserved_credits: Number of buffers reserved from the running transaction - * @j_maxlen: Total maximum capacity of the journal region on disk. - * @j_list_lock: Protects the buffer lists and internal buffer state. - * @j_inode: Optional inode where we store the journal. If present, all journal - * block numbers are mapped into this inode via bmap(). - * @j_tail_sequence: Sequence number of the oldest transaction in the log - * @j_transaction_sequence: Sequence number of the next transaction to grant - * @j_commit_sequence: Sequence number of the most recently committed - * transaction - * @j_commit_request: Sequence number of the most recent transaction wanting - * commit - * @j_uuid: Uuid of client object. - * @j_task: Pointer to the current commit thread for this journal - * @j_max_transaction_buffers: Maximum number of metadata buffers to allow in a - * single compound commit transaction - * @j_commit_interval: What is the maximum transaction lifetime before we begin - * a commit? - * @j_commit_timer: The timer used to wakeup the commit thread - * @j_revoke_lock: Protect the revoke table - * @j_revoke: The revoke table - maintains the list of revoked blocks in the - * current transaction. - * @j_revoke_table: alternate revoke tables for j_revoke - * @j_wbuf: array of buffer_heads for jbd2_journal_commit_transaction - * @j_wbufsize: maximum number of buffer_heads allowed in j_wbuf, the - * number that will fit in j_blocksize - * @j_last_sync_writer: most recent pid which did a synchronous write - * @j_history_lock: Protect the transactions statistics history - * @j_proc_entry: procfs entry for the jbd statistics directory - * @j_stats: Overall statistics - * @j_private: An opaque pointer to fs-private information. - * @j_trans_commit_map: Lockdep entity to track transaction commit dependencies */ - struct journal_s { - /* General journaling state flags [j_state_lock] */ + /** + * @j_flags: General journaling state flags [j_state_lock] + */ unsigned long j_flags; - /* + /** + * @j_errno: + * * Is there an outstanding uncleared error on the journal (from a prior * abort)? [j_state_lock] */ int j_errno; - /* The superblock buffer */ + /** + * @j_sb_buffer: The first part of the superblock buffer. + */ struct buffer_head *j_sb_buffer; + + /** + * @j_superblock: The second part of the superblock buffer. + */ journal_superblock_t *j_superblock; - /* Version of the superblock format */ + /** + * @j_format_version: Version of the superblock format. + */ int j_format_version; - /* - * Protect the various scalars in the journal + /** + * @j_state_lock: Protect the various scalars in the journal. */ rwlock_t j_state_lock; - /* + /** + * @j_barrier_count: + * * Number of processes waiting to create a barrier lock [j_state_lock] */ int j_barrier_count; - /* The barrier lock itself */ + /** + * @j_barrier: The barrier lock itself. + */ struct mutex j_barrier; - /* + /** + * @j_running_transaction: + * * Transactions: The current running transaction... * [j_state_lock] [caller holding open handle] */ transaction_t *j_running_transaction; - /* + /** + * @j_committing_transaction: + * * the transaction we are pushing to disk * [j_state_lock] [caller holding open handle] */ transaction_t *j_committing_transaction; - /* + /** + * @j_checkpoint_transactions: + * * ... and a linked circular list of all transactions waiting for * checkpointing. [j_list_lock] */ transaction_t *j_checkpoint_transactions; - /* + /** + * @j_wait_transaction_locked: + * * Wait queue for waiting for a locked transaction to start committing, - * or for a barrier lock to be released + * or for a barrier lock to be released. */ wait_queue_head_t j_wait_transaction_locked; - /* Wait queue for waiting for commit to complete */ + /** + * @j_wait_done_commit: Wait queue for waiting for commit to complete. + */ wait_queue_head_t j_wait_done_commit; - /* Wait queue to trigger commit */ + /** + * @j_wait_commit: Wait queue to trigger commit. + */ wait_queue_head_t j_wait_commit; - /* Wait queue to wait for updates to complete */ + /** + * @j_wait_updates: Wait queue to wait for updates to complete. + */ wait_queue_head_t j_wait_updates; - /* Wait queue to wait for reserved buffer credits to drop */ + /** + * @j_wait_reserved: + * + * Wait queue to wait for reserved buffer credits to drop. + */ wait_queue_head_t j_wait_reserved; - /* Semaphore for locking against concurrent checkpoints */ + /** + * @j_checkpoint_mutex: + * + * Semaphore for locking against concurrent checkpoints. + */ struct mutex j_checkpoint_mutex; - /* + /** + * @j_chkpt_bhs: + * * List of buffer heads used by the checkpoint routine. This * was moved from jbd2_log_do_checkpoint() to reduce stack * usage. Access to this array is controlled by the - * j_checkpoint_mutex. [j_checkpoint_mutex] + * @j_checkpoint_mutex. [j_checkpoint_mutex] */ struct buffer_head *j_chkpt_bhs[JBD2_NR_BATCH]; - - /* + + /** + * @j_head: + * * Journal head: identifies the first unused block in the journal. * [j_state_lock] */ unsigned long j_head; - /* + /** + * @j_tail: + * * Journal tail: identifies the oldest still-used block in the journal. * [j_state_lock] */ unsigned long j_tail; - /* + /** + * @j_free: + * * Journal free: how many free blocks are there in the journal? * [j_state_lock] */ unsigned long j_free; - /* - * Journal start and end: the block numbers of the first usable block - * and one beyond the last usable block in the journal. [j_state_lock] + /** + * @j_first: + * + * The block number of the first usable block in the journal + * [j_state_lock]. */ unsigned long j_first; + + /** + * @j_last: + * + * The block number one beyond the last usable block in the journal + * [j_state_lock]. + */ unsigned long j_last; - /* - * Device, blocksize and starting block offset for the location where we - * store the journal. + /** + * @j_dev: Device where we store the journal. */ struct block_device *j_dev; + + /** + * @j_blocksize: Block size for the location where we store the journal. + */ int j_blocksize; + + /** + * @j_blk_offset: + * + * Starting block offset into the device where we store the journal. + */ unsigned long long j_blk_offset; + + /** + * @j_devname: Journal device name. + */ char j_devname[BDEVNAME_SIZE+24]; - /* + /** + * @j_fs_dev: + * * Device which holds the client fs. For internal journal this will be * equal to j_dev. */ struct block_device *j_fs_dev; - /* Total maximum capacity of the journal region on disk. */ + /** + * @j_maxlen: Total maximum capacity of the journal region on disk. + */ unsigned int j_maxlen; - /* Number of buffers reserved from the running transaction */ + /** + * @j_reserved_credits: + * + * Number of buffers reserved from the running transaction. + */ atomic_t j_reserved_credits; - /* - * Protects the buffer lists and internal buffer state. + /** + * @j_list_lock: Protects the buffer lists and internal buffer state. */ spinlock_t j_list_lock; - /* Optional inode where we store the journal. If present, all */ - /* journal block numbers are mapped into this inode via */ - /* bmap(). */ + /** + * @j_inode: + * + * Optional inode where we store the journal. If present, all + * journal block numbers are mapped into this inode via bmap(). + */ struct inode *j_inode; - /* + /** + * @j_tail_sequence: + * * Sequence number of the oldest transaction in the log [j_state_lock] */ tid_t j_tail_sequence; - /* + /** + * @j_transaction_sequence: + * * Sequence number of the next transaction to grant [j_state_lock] */ tid_t j_transaction_sequence; - /* + /** + * @j_commit_sequence: + * * Sequence number of the most recently committed transaction * [j_state_lock]. */ tid_t j_commit_sequence; - /* + /** + * @j_commit_request: + * * Sequence number of the most recent transaction wanting commit * [j_state_lock] */ tid_t j_commit_request; - /* + /** + * @j_uuid: + * * Journal uuid: identifies the object (filesystem, LVM volume etc) * backed by this journal. This will eventually be replaced by an array * of uuids, allowing us to index multiple devices within a single @@ -958,85 +997,151 @@ struct journal_s */ __u8 j_uuid[16]; - /* Pointer to the current commit thread for this journal */ + /** + * @j_task: Pointer to the current commit thread for this journal. + */ struct task_struct *j_task; - /* + /** + * @j_max_transaction_buffers: + * * Maximum number of metadata buffers to allow in a single compound - * commit transaction + * commit transaction. */ int j_max_transaction_buffers; - /* + /** + * @j_commit_interval: + * * What is the maximum transaction lifetime before we begin a commit? */ unsigned long j_commit_interval; - /* The timer used to wakeup the commit thread: */ + /** + * @j_commit_timer: The timer used to wakeup the commit thread. + */ struct timer_list j_commit_timer; - /* - * The revoke table: maintains the list of revoked blocks in the - * current transaction. [j_revoke_lock] + /** + * @j_revoke_lock: Protect the revoke table. */ spinlock_t j_revoke_lock; |