aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-class-rtc-rtc0-device-rtc_calibration5
-rw-r--r--Documentation/DocBook/Makefile2
-rw-r--r--Documentation/DocBook/crypto-API.tmpl2092
-rw-r--r--Documentation/crypto/api-aead.rst23
-rw-r--r--Documentation/crypto/api-akcipher.rst20
-rw-r--r--Documentation/crypto/api-digest.rst35
-rw-r--r--Documentation/crypto/api-kpp.rst38
-rw-r--r--Documentation/crypto/api-rng.rst14
-rw-r--r--Documentation/crypto/api-samples.rst224
-rw-r--r--Documentation/crypto/api-skcipher.rst62
-rw-r--r--Documentation/crypto/api.rst25
-rw-r--r--Documentation/crypto/architecture.rst441
-rw-r--r--Documentation/crypto/devel-algos.rst247
-rw-r--r--Documentation/crypto/index.rst24
-rw-r--r--Documentation/crypto/intro.rst74
-rw-r--r--Documentation/crypto/userspace-if.rst387
-rw-r--r--Documentation/devicetree/bindings/input/da9062-onkey.txt45
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/imx6ul_tsc.txt3
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/silead_gsl1680.txt2
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/touchscreen.txt3
-rw-r--r--Documentation/devicetree/bindings/mfd/altera-a10sr.txt46
-rw-r--r--Documentation/devicetree/bindings/mfd/qcom-pm8xxx.txt1
-rw-r--r--Documentation/devicetree/bindings/mfd/rn5t618.txt16
-rw-r--r--Documentation/devicetree/bindings/mtd/oxnas-nand.txt41
-rw-r--r--Documentation/devicetree/bindings/mtd/samsung-s3c2410.txt56
-rw-r--r--Documentation/devicetree/bindings/mtd/tango-nand.txt38
-rw-r--r--Documentation/devicetree/bindings/regulator/tps65218.txt87
-rw-r--r--Documentation/devicetree/bindings/rtc/epson,rtc7301.txt16
-rw-r--r--Documentation/devicetree/bindings/rtc/ingenic,jz4740-rtc.txt37
-rw-r--r--Documentation/devicetree/bindings/rtc/twl-rtc.txt19
-rw-r--r--Documentation/features/io/dma-api-debug/arch-support.txt2
-rw-r--r--Documentation/features/io/dma-contiguous/arch-support.txt2
-rw-r--r--Documentation/filesystems/Locking2
-rw-r--r--Documentation/filesystems/porting4
-rw-r--r--Documentation/filesystems/vfs.txt11
-rw-r--r--Documentation/index.rst1
-rwxr-xr-xDocumentation/sphinx/rstFlatTable.py5
-rw-r--r--Documentation/virtual/kvm/locking.txt8
-rw-r--r--MAINTAINERS8
-rw-r--r--arch/arm/mach-s3c24xx/common-smdk.c1
-rw-r--r--arch/arm/mach-s3c24xx/mach-anubis.c1
-rw-r--r--arch/arm/mach-s3c24xx/mach-at2440evb.c1
-rw-r--r--arch/arm/mach-s3c24xx/mach-bast.c1
-rw-r--r--arch/arm/mach-s3c24xx/mach-gta02.c1
-rw-r--r--arch/arm/mach-s3c24xx/mach-jive.c1
-rw-r--r--arch/arm/mach-s3c24xx/mach-mini2440.c1
-rw-r--r--arch/arm/mach-s3c24xx/mach-osiris.c1
-rw-r--r--arch/arm/mach-s3c24xx/mach-qt2410.c1
-rw-r--r--arch/arm/mach-s3c24xx/mach-rx1950.c1
-rw-r--r--arch/arm/mach-s3c24xx/mach-rx3715.c1
-rw-r--r--arch/arm/mach-s3c24xx/mach-vstms.c1
-rw-r--r--arch/arm/mach-s3c64xx/mach-hmt.c1
-rw-r--r--arch/arm/mach-s3c64xx/mach-mini6410.c1
-rw-r--r--arch/arm/mach-s3c64xx/mach-real6410.c1
-rw-r--r--arch/arm64/include/asm/numa.h2
-rw-r--r--arch/arm64/mm/numa.c2
-rw-r--r--arch/ia64/include/asm/numa.h2
-rw-r--r--arch/microblaze/include/asm/unistd.h2
-rw-r--r--arch/microblaze/include/uapi/asm/unistd.h6
-rw-r--r--arch/microblaze/kernel/cpu/cpuinfo.c6
-rw-r--r--arch/microblaze/kernel/syscall_table.S6
-rw-r--r--arch/microblaze/kernel/timer.c2
-rw-r--r--arch/mips/boot/dts/ingenic/jz4740.dtsi11
-rw-r--r--arch/mips/boot/dts/ingenic/qi_lb60.dts4
-rw-r--r--arch/mips/include/asm/mach-jz4740/platform.h1
-rw-r--r--arch/mips/jz4740/board-qi_lb60.c1
-rw-r--r--arch/mips/jz4740/platform.c21
-rw-r--r--arch/mips/jz4740/reset.c63
-rw-r--r--arch/powerpc/oprofile/cell/spu_task_sync.c2
-rw-r--r--arch/tile/include/asm/cache.h7
-rw-r--r--arch/tile/include/asm/sections.h3
-rw-r--r--arch/tile/kernel/module.c11
-rw-r--r--arch/tile/kernel/pci.c2
-rw-r--r--arch/tile/kernel/pci_gx.c2
-rw-r--r--arch/tile/kernel/setup.c18
-rw-r--r--arch/tile/kernel/smp.c2
-rw-r--r--arch/tile/kernel/time.c4
-rw-r--r--arch/tile/kernel/unaligned.c2
-rw-r--r--arch/tile/lib/cacheflush.c8
-rw-r--r--arch/tile/mm/extable.c2
-rw-r--r--arch/tile/mm/fault.c2
-rw-r--r--arch/tile/mm/homecache.c2
-rw-r--r--arch/tile/mm/init.c10
-rw-r--r--arch/x86/Kconfig12
-rw-r--r--arch/x86/include/asm/asm-prototypes.h16
-rw-r--r--arch/x86/include/asm/cpufeatures.h1
-rw-r--r--arch/x86/include/asm/floppy.h20
-rw-r--r--arch/x86/include/asm/kvm_host.h1
-rw-r--r--arch/x86/include/asm/mmu.h4
-rw-r--r--arch/x86/include/asm/mpx.h4
-rw-r--r--arch/x86/include/asm/pgtable_64.h3
-rw-r--r--arch/x86/include/asm/tsc.h9
-rw-r--r--arch/x86/kernel/Makefile2
-rw-r--r--arch/x86/kernel/acpi/boot.c2
-rw-r--r--arch/x86/kernel/apic/apic.c15
-rw-r--r--arch/x86/kernel/cpu/common.c24
-rw-r--r--arch/x86/kernel/head_64.S5
-rw-r--r--arch/x86/kernel/process.c1
-rw-r--r--arch/x86/kernel/smpboot.c59
-rw-r--r--arch/x86/kernel/tsc.c42
-rw-r--r--arch/x86/kernel/tsc_msr.c19
-rw-r--r--arch/x86/kernel/tsc_sync.c290
-rw-r--r--arch/x86/kvm/cpuid.c9
-rw-r--r--arch/x86/kvm/hyperv.c24
-rw-r--r--arch/x86/kvm/vmx.c13
-rw-r--r--arch/x86/kvm/x86.c18
-rw-r--r--arch/x86/mm/fault.c2
-rw-r--r--arch/x86/mm/init_64.c24
-rw-r--r--arch/x86/mm/mpx.c10
-rw-r--r--arch/x86/mm/numa.c2
-rw-r--r--arch/x86/platform/Makefile1
-rw-r--r--arch/x86/platform/intel-mid/mfld.c9
-rw-r--r--arch/x86/platform/intel-mid/mrfld.c8
-rw-r--r--arch/x86/platform/mellanox/Makefile1
-rw-r--r--arch/x86/power/cpu.c1
-rw-r--r--arch/x86/xen/smp.c6
-rw-r--r--arch/xtensa/Kconfig1
-rw-r--r--arch/xtensa/boot/dts/kc705.dts16
-rw-r--r--arch/xtensa/include/asm/Kbuild1
-rw-r--r--arch/xtensa/kernel/Makefile1
-rw-r--r--arch/xtensa/kernel/pci-dma.c21
-rw-r--r--arch/xtensa/kernel/s32c1i_selftest.c128
-rw-r--r--arch/xtensa/kernel/setup.c137
-rw-r--r--arch/xtensa/mm/init.c2
-rw-r--r--crypto/algif_aead.c14
-rw-r--r--drivers/acpi/numa.c2
-rw-r--r--drivers/dax/dax.c94
-rw-r--r--drivers/dax/pmem.c3
-rw-r--r--drivers/firmware/dmi_scan.c4
-rw-r--r--drivers/gpio/gpio-tps65218.c3
-rw-r--r--drivers/gpu/drm/i915/Kconfig1
-rw-r--r--drivers/gpu/drm/i915/gvt/Makefile2
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h6
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c975
-rw-r--r--drivers/input/joystick/xpad.c7
-rw-r--r--drivers/input/keyboard/gpio_keys.c190
-rw-r--r--drivers/input/keyboard/gpio_keys_polled.c128
-rw-r--r--drivers/input/keyboard/lpc32xx-keys.c2
-rw-r--r--drivers/input/keyboard/pxa27x_keypad.c2
-rw-r--r--drivers/input/keyboard/tca8418_keypad.c21
-rw-r--r--drivers/input/misc/Kconfig7
-rw-r--r--drivers/input/misc/bma150.c9
-rw-r--r--drivers/input/misc/da9063_onkey.c9
-rw-r--r--drivers/input/misc/drv260x.c119
-rw-r--r--drivers/input/misc/drv2665.c6
-rw-r--r--drivers/input/misc/drv2667.c4
-rw-r--r--drivers/input/misc/soc_button_array.c8
-rw-r--r--drivers/input/misc/tps65218-pwrbutton.c8
-rw-r--r--drivers/input/mouse/alps.c56
-rw-r--r--drivers/input/mouse/alps.h24
-rw-r--r--drivers/input/mouse/elan_i2c_core.c17
-rw-r--r--drivers/input/rmi4/Kconfig42
-rw-r--r--drivers/input/rmi4/Makefile4
-rw-r--r--drivers/input/rmi4/rmi_2d_sensor.c10
-rw-r--r--drivers/input/rmi4/rmi_2d_sensor.h2
-rw-r--r--drivers/input/rmi4/rmi_bus.c12
-rw-r--r--drivers/input/rmi4/rmi_bus.h12
-rw-r--r--drivers/input/rmi4/rmi_driver.c420
-rw-r--r--drivers/input/rmi4/rmi_driver.h31
-rw-r--r--drivers/input/rmi4/rmi_f01.c12
-rw-r--r--drivers/input/rmi4/rmi_f03.c250
-rw-r--r--drivers/input/rmi4/rmi_f11.c98
-rw-r--r--drivers/input/rmi4/rmi_f12.c146
-rw-r--r--drivers/input/rmi4/rmi_f30.c22
-rw-r--r--drivers/input/rmi4/rmi_f34.c516
-rw-r--r--drivers/input/rmi4/rmi_f34.h314
-rw-r--r--drivers/input/rmi4/rmi_f34v7.c1372
-rw-r--r--drivers/input/rmi4/rmi_f54.c19
-rw-r--r--drivers/input/rmi4/rmi_f55.c131
-rw-r--r--drivers/input/rmi4/rmi_i2c.c74
-rw-r--r--drivers/input/rmi4/rmi_smbus.c447
-rw-r--r--drivers/input/rmi4/rmi_spi.c72
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h78
-rw-r--r--drivers/input/serio/i8042.c2
-rw-r--r--drivers/input/touchscreen/fsl-imx25-tcq.c1
-rw-r--r--drivers/input/touchscreen/imx6ul_tsc.c83
-rw-r--r--drivers/input/touchscreen/melfas_mip4.c51
-rw-r--r--drivers/input/touchscreen/raydium_i2c_ts.c2
-rw-r--r--drivers/input/touchscreen/silead.c29
-rw-r--r--drivers/isdn/gigaset/bas-gigaset.c32
-rw-r--r--drivers/isdn/gigaset/ser-gigaset.c32
-rw-r--r--drivers/isdn/gigaset/usb-gigaset.c32
-rw-r--r--drivers/isdn/hisax/config.c16
-rw-r--r--drivers/isdn/hisax/hisax.h4
-rw-r--r--drivers/isdn/i4l/isdn_concap.c6
-rw-r--r--drivers/isdn/i4l/isdn_x25iface.c16
-rw-r--r--drivers/mfd/Kconfig26
-rw-r--r--drivers/mfd/Makefile1
-rw-r--r--drivers/mfd/ab3100-core.c39
-rw-r--r--drivers/mfd/ab8500-core.c37
-rw-r--r--drivers/mfd/ab8500-debugfs.c21
-rw-r--r--drivers/mfd/ab8500-gpadc.c19
-rw-r--r--drivers/mfd/ab8500-sysctrl.c9
-rw-r--r--drivers/mfd/abx500-core.c7
-rw-r--r--drivers/mfd/arizona-core.c1
-rw-r--r--drivers/mfd/arizona-irq.c8
-rw-r--r--drivers/mfd/axp20x-i2c.c7
-rw-r--r--drivers/mfd/axp20x.c4
-rw-r--r--drivers/mfd/bcm590xx.c2
-rw-r--r--drivers/mfd/cs47l24-tables.c6
-rw-r--r--drivers/mfd/davinci_voicecodec.c1
-rw-r--r--drivers/mfd/fsl-imx25-tsadc.c1
-rw-r--r--drivers/mfd/hi655x-pmic.c1
-rw-r--r--drivers/mfd/intel-lpss-pci.c1
-rw-r--r--drivers/mfd/intel_soc_pmic_bxtwc.c40
-rw-r--r--drivers/mfd/lpc_ich.c2
-rw-r--r--drivers/mfd/palmas.c3
-rw-r--r--drivers/mfd/qcom-pm8xxx.c231
-rw-r--r--drivers/mfd/rk808.c23
-rw-r--r--drivers/mfd/rn5t618.c1
-rw-r--r--drivers/mfd/si476x-i2c.c2
-rw-r--r--drivers/mfd/sun4i-gpadc.c181
-rw-r--r--drivers/mfd/tc3589x.c4
-rw-r--r--drivers/mfd/tps65217.c71
-rw-r--r--drivers/mfd/tps65218.c34
-rw-r--r--drivers/mfd/tps65912-core.c17
-rw-r--r--drivers/mfd/wm5102-tables.c1412
-rw-r--r--drivers/mfd/wm8994-core.c6
-rw-r--r--drivers/mtd/bcm47xxpart.c10
-rw-r--r--drivers/mtd/devices/bcm47xxsflash.c24
-rw-r--r--drivers/mtd/maps/sc520cdp.c8
-rw-r--r--drivers/mtd/mtdcore.c44
-rw-r--r--drivers/mtd/mtdswap.c2
-rw-r--r--drivers/mtd/nand/Kconfig21
-rw-r--r--drivers/mtd/nand/Makefile2
-rw-r--r--drivers/mtd/nand/ams-delta.c5
-rw-r--r--drivers/mtd/nand/atmel_nand.c10
-rw-r--r--drivers/mtd/nand/brcmnand/brcmnand.c10
-rw-r--r--drivers/mtd/nand/cafe_nand.c5
-rw-r--r--drivers/mtd/nand/cmx270_nand.c4
-rw-r--r--drivers/mtd/nand/cs553x_nand.c5
-rw-r--r--drivers/mtd/nand/denali.c101
-rw-r--r--drivers/mtd/nand/denali.h12
-rw-r--r--drivers/mtd/nand/denali_dt.c3
-rw-r--r--drivers/mtd/nand/denali_pci.c1
-rw-r--r--drivers/mtd/nand/fsmc_nand.c9
-rw-r--r--drivers/mtd/nand/gpio.c5
-rw-r--r--drivers/mtd/nand/hisi504_nand.c4
-rw-r--r--drivers/mtd/nand/lpc32xx_mlc.c10
-rw-r--r--drivers/mtd/nand/lpc32xx_slc.c10
-rw-r--r--drivers/mtd/nand/mpc5121_nfc.c4
-rw-r--r--drivers/mtd/nand/mtk_nand.c4
-rw-r--r--drivers/mtd/nand/mxc_nand.c10
-rw-r--r--drivers/mtd/nand/nand_base.c84
-rw-r--r--drivers/mtd/nand/nand_ids.c3
-rw-r--r--drivers/mtd/nand/nand_timings.c26
-rw-r--r--drivers/mtd/nand/nandsim.c19
-rw-r--r--drivers/mtd/nand/omap2.c10
-rw-r--r--drivers/mtd/nand/orion_nand.c5
-rw-r--r--drivers/mtd/nand/oxnas_nand.c195
-rw-r--r--drivers/mtd/nand/pasemi_nand.c5
-rw-r--r--drivers/mtd/nand/plat_nand.c5
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c22
-rw-r--r--drivers/mtd/nand/s3c2410.c286
-rw-r--r--drivers/mtd/nand/socrates_nand.c12
-rw-r--r--drivers/mtd/nand/sunxi_nand.c4
-rw-r--r--drivers/mtd/nand/tango_nand.c676
-rw-r--r--drivers/mtd/nand/tmio_nand.c6
-rw-r--r--drivers/mtd/nand/vf610_nfc.c10
-rw-r--r--drivers/mtd/spi-nor/cadence-quadspi.c6
-rw-r--r--drivers/mtd/spi-nor/fsl-quadspi.c8
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c14
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c6
-rw-r--r--drivers/net/ethernet/3com/3c515.c15
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_enet.c8
-rw-r--r--drivers/net/ethernet/cadence/Kconfig9
-rw-r--r--drivers/net/ethernet/cadence/Makefile1
-rw-r--r--drivers/net/ethernet/cadence/macb.c31
-rw-r--r--drivers/net/ethernet/cadence/macb_pci.c153
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c64
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c65
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c14
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c14
-rw-r--r--drivers/net/ethernet/freescale/dpaa/Kconfig2
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c71
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hisi_femac.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c12
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c2
-rw-r--r--drivers/net/ethernet/microchip/encx24j600-regmap.c17
-rw-r--r--drivers/net/ethernet/microchip/encx24j600.c19
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.c2
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c9
-rw-r--r--drivers/net/ethernet/rdc/r6040.c10
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c35
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c60
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h12
-rw-r--r--drivers/net/gtp.c8
-rw-r--r--drivers/net/irda/w83977af_ir.c6
-rw-r--r--drivers/net/virtio_net.c369
-rw-r--r--drivers/net/vrf.c6
-rw-r--r--drivers/net/wan/lmc/lmc_media.c97
-rw-r--r--drivers/nvdimm/claim.c46
-rw-r--r--drivers/nvdimm/core.c29
-rw-r--r--drivers/nvdimm/dimm.c2
-rw-r--r--drivers/nvdimm/dimm_devs.c7
-rw-r--r--drivers/nvdimm/e820.c12
-rw-r--r--drivers/nvdimm/label.c2
-rw-r--r--drivers/nvdimm/namespace_devs.c8
-rw-r--r--drivers/nvdimm/nd.h12
-rw-r--r--drivers/nvdimm/pfn_devs.c2
-rw-r--r--drivers/nvdimm/pmem.c21
-rw-r--r--drivers/nvdimm/region_devs.c2
-rw-r--r--drivers/oprofile/buffer_sync.c2
-rw-r--r--drivers/platform/x86/Kconfig40
-rw-r--r--drivers/platform/x86/Makefile4
-rw-r--r--drivers/platform/x86/ideapad-laptop.c7
-rw-r--r--drivers/platform/x86/intel_bxtwc_tmu.c162
-rw-r--r--drivers/platform/x86/mlx-platform.c (renamed from arch/x86/platform/mellanox/mlx-platform.c)101
-rw-r--r--drivers/platform/x86/surface3-wmi.c297
-rw-r--r--drivers/platform/x86/surface3_button.c250
-rw-r--r--drivers/regulator/tps65218-regulator.c153
-rw-r--r--drivers/rtc/Kconfig38
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/rtc-cmos.c75
-rw-r--r--drivers/rtc/rtc-ds1307.c52
-rw-r--r--drivers/rtc/rtc-ds1374.c4
-rw-r--r--drivers/rtc/rtc-imxdi.c2
-rw-r--r--drivers/rtc/rtc-jz4740.c154
-rw-r--r--drivers/rtc/rtc-lib.c4
-rw-r--r--drivers/rtc/rtc-mcp795.c122
-rw-r--r--drivers/rtc/rtc-pcf85063.c7
-rw-r--r--drivers/rtc/rtc-r7301.c453
-rw-r--r--drivers/rtc/rtc-starfire.c10
-rw-r--r--drivers/rtc/rtc-sun4v.c10
-rw-r--r--drivers/rtc/rtc-twl.c202
-rw-r--r--drivers/staging/lustre/lustre/llite/symlink.c1
-rw-r--r--fs/9p/vfs_addr.c15
-rw-r--r--fs/9p/vfs_inode.c1
-rw-r--r--fs/9p/vfs_inode_dotl.c1
-rw-r--r--fs/affs/symlink.c1
-rw-r--r--fs/autofs4/autofs_i.h5
-rw-r--r--fs/autofs4/dev-ioctl.c10
-rw-r--r--fs/autofs4/expire.c25
-rw-r--r--fs/autofs4/root.c61
-rw-r--r--fs/autofs4/symlink.c1
-rw-r--r--fs/autofs4/waitq.c13
-rw-r--r--fs/bad_inode.c55
-rw-r--r--fs/btrfs/ctree.h3
-rw-r--r--fs/btrfs/file.c1
-rw-r--r--fs/btrfs/inode.c1
-rw-r--r--fs/btrfs/ioctl.c14
-rw-r--r--fs/ceph/addr.c14
-rw-r--r--fs/ceph/inode.c1
-rw-r--r--fs/cifs/cifsfs.c1
-rw-r--r--fs/coda/cnode.c1
-rw-r--r--fs/configfs/symlink.c1
-rw-r--r--fs/dcache.c40
-rw-r--r--fs/dcookies.c4
-rw-r--r--fs/ecryptfs/inode.c30
-rw-r--r--fs/exofs/inode.c68
-rw-r--r--fs/ext2/inode.c4
-rw-r--r--fs/ext2/symlink.c2
-rw-r--r--fs/ext4/super.c4
-rw-r--r--fs/ext4/symlink.c3
-rw-r--r--fs/f2fs/namei.c2
-rw-r--r--fs/file_table.c2
-rw-r--r--fs/fuse/dir.c1
-rw-r--r--fs/gfs2/aops.c4
-rw-r--r--fs/gfs2/inode.c1
-rw-r--r--fs/hostfs/hostfs_kern.c1
-rw-r--r--fs/internal.h2
-rw-r--r--fs/jffs2/symlink.c1
-rw-r--r--fs/jfs/symlink.c2
-rw-r--r--fs/kernfs/symlink.c1
-rw-r--r--fs/libfs.c15
-rw-r--r--fs/minix/inode.c1
-rw-r--r--fs/mount.h6
-rw-r--r--fs/namei.c52
-rw-r--r--fs/namespace.c39
-rw-r--r--fs/ncpfs/inode.c1
-rw-r--r--fs/nfs/file.c2
-rw-r--r--fs/nfs/symlink.c1
-rw-r--r--fs/nfsd/nfs4xdr.c8
-rw-r--r--fs/nfsd/vfs.c6
-rw-r--r--fs/nilfs2/namei.c1
-rw-r--r--fs/notify/dnotify/dnotify.c2
-rw-r--r--fs/notify/fanotify/fanotify.c8
-rw-r--r--fs/notify/fanotify/fanotify.h2
-rw-r--r--fs/notify/fsnotify.c8
-rw-r--r--fs/notify/inode_mark.c45
-rw-r--r--fs/notify/inotify/inotify.h2
-rw-r--r--fs/notify/inotify/inotify_fsnotify.c4
-rw-r--r--fs/ocfs2/alloc.c7
-rw-r--r--fs/ocfs2/aops.c31
-rw-r--r--fs/ocfs2/file.c42
-rw-r--r--fs/ocfs2/file.h3
-rw-r--r--fs/ocfs2/inode.h6
-rw-r--r--fs/ocfs2/move_extents.c10
-rw-r--r--fs/ocfs2/quota_global.c10
-rw-r--r--fs/ocfs2/quota_local.c11
-rw-r--r--fs/ocfs2/refcounttree.c464
-rw-r--r--fs/ocfs2/refcounttree.h7
-rw-r--r--fs/ocfs2/super.c1
-rw-r--r--fs/ocfs2/symlink.c1
-rw-r--r--fs/ocfs2/xattr.c4
-rw-r--r--fs/orangefs/symlink.c1
-rw-r--r--fs/overlayfs/inode.c1
-rw-r--r--fs/proc/inode.c1
-rw-r--r--fs/proc/self.c13
-rw-r--r--fs/proc/thread_self.c14
-rw-r--r--fs/quota/dquot.c138
-rw-r--r--fs/quota/quota.c26
-rw-r--r--fs/read_write.c231
-rw-r--r--fs/reiserfs/namei.c1
-rw-r--r--fs/reiserfs/super.c4
-rw-r--r--fs/squashfs/symlink.c1
-rw-r--r--fs/stat.c8
-rw-r--r--fs/statfs.c2
-rw-r--r--fs/super.c81
-rw-r--r--fs/sysv/inode.c1
-rw-r--r--fs/ubifs/file.c1
-rw-r--r--fs/utimes.c2
-rw-r--r--fs/xfs/xfs_file.c19
-rw-r--r--fs/xfs/xfs_ioctl.c4
-rw-r--r--fs/xfs/xfs_iops.c2
-rw-r--r--fs/xfs/xfs_reflink.c219
-rw-r--r--include/asm-generic/asm-prototypes.h7
-rw-r--r--include/asm-generic/vmlinux.lds.h68
-rw-r--r--include/crypto/aead.h50
-rw-r--r--include/crypto/dh.h58
-rw-r--r--include/crypto/ecdh.h58
-rw-r--r--include/crypto/hash.h2
-rw-r--r--include/crypto/kpp.h15
-rw-r--r--include/crypto/skcipher.h4
-rw-r--r--include/linux/audit.h2
-rw-r--r--include/linux/bpf-cgroup.h2
-rw-r--r--include/linux/bpf.h13
-rw-r--r--include/linux/cpuhotplug.h3
-rw-r--r--include/linux/cpumask.h5
-rw-r--r--include/linux/crypto.h4
-rw-r--r--include/linux/dcache.h4
-rw-r--r--include/linux/dcookies.h4
-rw-r--r--include/linux/file.h2
-rw-r--r--include/linux/filter.h15
-rw-r--r--include/linux/fs.h17
-rw-r--r--include/linux/fsnotify.h12
-rw-r--r--include/linux/fsnotify_backend.h10
-rw-r--r--include/linux/gpio_keys.h5
-rw-r--r--include/linux/mfd/axp20x.h6
-rw-r--r--include/linux/mfd/davinci_voicecodec.h4
-rw-r--r--include/linux/mfd/intel_soc_pmic.h1
-rw-r--r--include/linux/mfd/rk808.h1
-rw-r--r--include/linux/mfd/rn5t618.h9
-rw-r--r--include/linux/mfd/sun4i-gpadc.h94
-rw-r--r--include/linux/mfd/tps65217.h4
-rw-r--r--include/linux/mfd/tps65218.h3
-rw-r--r--include/linux/mfd/tps65912.h16
-rw-r--r--include/linux/miscdevice.h1
-rw-r--r--include/linux/mm_types.h4
-rw-r--r--include/linux/mount.h6
-rw-r--r--include/linux/mtd/nand.h30
-rw-r--r--include/linux/platform_data/drv260x-pdata.h28
-rw-r--r--include/linux/platform_data/macb.h6
-rw-r--r--include/linux/platform_data/mtd-nand-s3c2410.h7
-rw-r--r--include/linux/quota.h3
-rw-r--r--include/linux/quotaops.h2
-rw-r--r--include/linux/rmi.h60
-rw-r--r--include/net/inet6_connection_sock.h3
-rw-r--r--include/net/inet_connection_sock.h6
-rw-r--r--include/net/netlink.h3
-rw-r--r--init/Kconfig3
-rw-r--r--kernel/audit.c4
-rw-r--r--kernel/audit.h2
-rw-r--r--kernel/audit_fsnotify.c10
-rw-r--r--kernel/audit_tree.c2
-rw-r--r--kernel/audit_watch.c8
-rw-r--r--kernel/bpf/core.c43
-rw-r--r--kernel/bpf/syscall.c38
-rw-r--r--kernel/bpf/verifier.c28
-rw-r--r--kernel/cpu.c6
-rw-r--r--kernel/irq/affinity.c6
-rw-r--r--kernel/time/tick-broadcast.c3
-rw-r--r--mm/shmem.c2
-rw-r--r--net/atm/lec.c6
-rw-r--r--net/atm/mpoa_caches.c43
-rw-r--r--net/core/filter.c6
-rw-r--r--net/decnet/dn_dev.c2
-rw-r--r--net/ipv4/inet_connection_sock.c16
-rw-r--r--net/ipv6/inet6_connection_sock.c7
-rw-r--r--net/ipv6/route.c2
-rw-r--r--net/irda/irnet/irnet.h1
-rw-r--r--net/irda/irnet/irnet_ppp.h11
-rw-r--r--net/irda/irproc.c1
-rw-r--r--net/mac80211/key.c3
-rw-r--r--net/mac80211/rx.c2
-rw-r--r--net/mac80211/sta_info.c14
-rw-r--r--net/sched/cls_flower.c6
-rw-r--r--net/sctp/endpointola.c5
-rw-r--r--net/sctp/socket.c7
-rw-r--r--net/vmw_vsock/vmci_transport_notify.c30
-rw-r--r--net/vmw_vsock/vmci_transport_notify_qstate.c30
-rw-r--r--net/x25/sysctl_net_x25.c2
-rw-r--r--samples/bpf/Makefile4
-rw-r--r--samples/connector/Makefile2
-rw-r--r--scripts/Makefile.build17
-rwxr-xr-xscripts/adjust_autoksyms.sh1
-rw-r--r--scripts/coccinelle/misc/boolconv.cocci90
-rw-r--r--scripts/coccinelle/misc/irqf_oneshot.cocci52
-rw-r--r--scripts/genksyms/keywords.gperf1
-rw-r--r--scripts/genksyms/keywords.hash.c_shipped59
-rw-r--r--scripts/genksyms/parse.tab.c_shipped676
-rw-r--r--scripts/genksyms/parse.tab.h_shipped33
-rw-r--r--scripts/genksyms/parse.y2
-rw-r--r--scripts/kallsyms.c1
-rw-r--r--scripts/kconfig/nconf.c2
-rw-r--r--scripts/kconfig/nconf.gui.c15
-rw-r--r--scripts/kconfig/qconf.cc19
-rwxr-xr-xscripts/link-vmlinux.sh37
-rw-r--r--scripts/mod/modpost.c6
-rwxr-xr-xscripts/package/builddeb2
-rwxr-xr-xscripts/package/mkspec3
-rw-r--r--tools/build/Makefile2
-rw-r--r--tools/gpio/Makefile2
-rw-r--r--tools/lib/api/Makefile2
-rw-r--r--tools/lib/bpf/Makefile2
-rw-r--r--tools/lib/lockdep/Makefile2
-rw-r--r--tools/lib/subcmd/Makefile2
-rw-r--r--tools/lib/traceevent/Makefile2
-rw-r--r--tools/objtool/Makefile4
-rw-r--r--tools/perf/Makefile.perf2
-rw-r--r--tools/perf/tests/make2
-rw-r--r--tools/power/cpupower/Makefile3
-rw-r--r--tools/power/cpupower/debug/kernel/Makefile3
-rw-r--r--tools/testing/nvdimm/test/nfit.c30
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c30
527 files changed, 16094 insertions, 6805 deletions
diff --git a/Documentation/ABI/testing/sysfs-class-rtc-rtc0-device-rtc_calibration b/Documentation/ABI/testing/sysfs-class-rtc-rtc0-device-rtc_calibration
index 4cf1e72222d9..ec950c93e5c6 100644
--- a/Documentation/ABI/testing/sysfs-class-rtc-rtc0-device-rtc_calibration
+++ b/Documentation/ABI/testing/sysfs-class-rtc-rtc0-device-rtc_calibration
@@ -1,8 +1,9 @@
-What: Attribute for calibrating ST-Ericsson AB8500 Real Time Clock
+What: /sys/class/rtc/rtc0/device/rtc_calibration
Date: Oct 2011
KernelVersion: 3.0
Contact: Mark Godfrey <mark.godfrey@stericsson.com>
-Description: The rtc_calibration attribute allows the userspace to
+Description: Attribute for calibrating ST-Ericsson AB8500 Real Time Clock
+ The rtc_calibration attribute allows the userspace to
calibrate the AB8500.s 32KHz Real Time Clock.
Every 60 seconds the AB8500 will correct the RTC's value
by adding to it the value of this attribute.
diff --git a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile
index caab9039362f..c75e5d6b8fa8 100644
--- a/Documentation/DocBook/Makefile
+++ b/Documentation/DocBook/Makefile
@@ -13,7 +13,7 @@ DOCBOOKS := z8530book.xml \
gadget.xml libata.xml mtdnand.xml librs.xml rapidio.xml \
genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \
80211.xml sh.xml regulator.xml w1.xml \
- writing_musb_glue_layer.xml crypto-API.xml iio.xml
+ writing_musb_glue_layer.xml iio.xml
ifeq ($(DOCBOOKS),)
diff --git a/Documentation/DocBook/crypto-API.tmpl b/Documentation/DocBook/crypto-API.tmpl
deleted file mode 100644
index 088b79c341ff..000000000000
--- a/Documentation/DocBook/crypto-API.tmpl
+++ /dev/null
@@ -1,2092 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
- "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
-
-<book id="KernelCryptoAPI">
- <bookinfo>
- <title>Linux Kernel Crypto API</title>
-
- <authorgroup>
- <author>
- <firstname>Stephan</firstname>
- <surname>Mueller</surname>
- <affiliation>
- <address>
- <email>smueller@chronox.de</email>
- </address>
- </affiliation>
- </author>
- <author>
- <firstname>Marek</firstname>
- <surname>Vasut</surname>
- <affiliation>
- <address>
- <email>marek@denx.de</email>
- </address>
- </affiliation>
- </author>
- </authorgroup>
-
- <copyright>
- <year>2014</year>
- <holder>Stephan Mueller</holder>
- </copyright>
-
-
- <legalnotice>
- <para>
- This documentation is free software; you can redistribute
- it and/or modify it under the terms of the GNU General Public
- License as published by the Free Software Foundation; either
- version 2 of the License, or (at your option) any later
- version.
- </para>
-
- <para>
- This program is distributed in the hope that it will be
- useful, but WITHOUT ANY WARRANTY; without even the implied
- warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- </para>
-
- <para>
- You should have received a copy of the GNU General Public
- License along with this program; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- MA 02111-1307 USA
- </para>
-
- <para>
- For more details see the file COPYING in the source
- distribution of Linux.
- </para>
- </legalnotice>
- </bookinfo>
-
- <toc></toc>
-
- <chapter id="Intro">
- <title>Kernel Crypto API Interface Specification</title>
-
- <sect1><title>Introduction</title>
-
- <para>
- The kernel crypto API offers a rich set of cryptographic ciphers as
- well as other data transformation mechanisms and methods to invoke
- these. This document contains a description of the API and provides
- example code.
- </para>
-
- <para>
- To understand and properly use the kernel crypto API a brief
- explanation of its structure is given. Based on the architecture,
- the API can be separated into different components. Following the
- architecture specification, hints to developers of ciphers are
- provided. Pointers to the API function call documentation are
- given at the end.
- </para>
-
- <para>
- The kernel crypto API refers to all algorithms as "transformations".
- Therefore, a cipher handle variable usually has the name "tfm".
- Besides cryptographic operations, the kernel crypto API also knows
- compression transformations and handles them the same way as ciphers.
- </para>
-
- <para>
- The kernel crypto API serves the following entity types:
-
- <itemizedlist>
- <listitem>
- <para>consumers requesting cryptographic services</para>
- </listitem>
- <listitem>
- <para>data transformation implementations (typically ciphers)
- that can be called by consumers using the kernel crypto
- API</para>
- </listitem>
- </itemizedlist>
- </para>
-
- <para>
- This specification is intended for consumers of the kernel crypto
- API as well as for developers implementing ciphers. This API
- specification, however, does not discuss all API calls available
- to data transformation implementations (i.e. implementations of
- ciphers and other transformations (such as CRC or even compression
- algorithms) that can register with the kernel crypto API).
- </para>
-
- <para>
- Note: The terms "transformation" and cipher algorithm are used
- interchangeably.
- </para>
- </sect1>
-
- <sect1><title>Terminology</title>
- <para>
- The transformation implementation is an actual code or interface
- to hardware which implements a certain transformation with precisely
- defined behavior.
- </para>
-
- <para>
- The transformation object (TFM) is an instance of a transformation
- implementation. There can be multiple transformation objects
- associated with a single transformation implementation. Each of
- those transformation objects is held by a crypto API consumer or
- another transformation. Transformation object is allocated when a
- crypto API consumer requests a transformation implementation.
- The consumer is then provided with a structure, which contains
- a transformation object (TFM).
- </para>
-
- <para>
- The structure that contains transformation objects may also be
- referred to as a "cipher handle". Such a cipher handle is always
- subject to the following phases that are reflected in the API calls
- applicable to such a cipher handle:
- </para>
-
- <orderedlist>
- <listitem>
- <para>Initialization of a cipher handle.</para>
- </listitem>
- <listitem>
- <para>Execution of all intended cipher operations applicable
- for the handle where the cipher handle must be furnished to
- every API call.</para>
- </listitem>
- <listitem>
- <para>Destruction of a cipher handle.</para>
- </listitem>
- </orderedlist>
-
- <para>
- When using the initialization API calls, a cipher handle is
- created and returned to the consumer. Therefore, please refer
- to all initialization API calls that refer to the data
- structure type a consumer is expected to receive and subsequently
- to use. The initialization API calls have all the same naming
- conventions of crypto_alloc_*.
- </para>
-
- <para>
- The transformation context is private data associated with
- the transformation object.
- </para>
- </sect1>
- </chapter>
-
- <chapter id="Architecture"><title>Kernel Crypto API Architecture</title>
- <sect1><title>Cipher algorithm types</title>
- <para>
- The kernel crypto API provides different API calls for the
- following cipher types:
-
- <itemizedlist>
- <listitem><para>Symmetric ciphers</para></listitem>
- <listitem><para>AEAD ciphers</para></listitem>
- <listitem><para>Message digest, including keyed message digest</para></listitem>
- <listitem><para>Random number generation</para></listitem>
- <listitem><para>User space interface</para></listitem>
- </itemizedlist>
- </para>
- </sect1>
-
- <sect1><title>Ciphers And Templates</title>
- <para>
- The kernel crypto API provides implementations of single block
- ciphers and message digests. In addition, the kernel crypto API
- provides numerous "templates" that can be used in conjunction
- with the single block ciphers and message digests. Templates
- include all types of block chaining mode, the HMAC mechanism, etc.
- </para>
-
- <para>
- Single block ciphers and message digests can either be directly
- used by a caller or invoked together with a template to form
- multi-block ciphers or keyed message digests.
- </para>
-
- <para>
- A single block cipher may even be called with multiple templates.
- However, templates cannot be used without a single cipher.
- </para>
-
- <para>
- See /proc/crypto and search for "name". For example:
-
- <itemizedlist>
- <listitem><para>aes</para></listitem>
- <listitem><para>ecb(aes)</para></listitem>
- <listitem><para>cmac(aes)</para></listitem>
- <listitem><para>ccm(aes)</para></listitem>
- <listitem><para>rfc4106(gcm(aes))</para></listitem>
- <listitem><para>sha1</para></listitem>
- <listitem><para>hmac(sha1)</para></listitem>
- <listitem><para>authenc(hmac(sha1),cbc(aes))</para></listitem>
- </itemizedlist>
- </para>
-
- <para>
- In these examples, "aes" and "sha1" are the ciphers and all
- others are the templates.
- </para>
- </sect1>
-
- <sect1><title>Synchronous And Asynchronous Operation</title>
- <para>
- The kernel crypto API provides synchronous and asynchronous
- API operations.
- </para>
-
- <para>
- When using the synchronous API operation, the caller invokes
- a cipher operation which is performed synchronously by the
- kernel crypto API. That means, the caller waits until the
- cipher operation completes. Therefore, the kernel crypto API
- calls work like regular function calls. For synchronous
- operation, the set of API calls is small and conceptually
- similar to any other crypto library.
- </para>
-
- <para>
- Asynchronous operation is provided by the kernel crypto API
- which implies that the invocation of a cipher operation will
- complete almost instantly. That invocation triggers the
- cipher operation but it does not signal its completion. Before
- invoking a cipher operation, the caller must provide a callback
- function the kernel crypto API can invoke to signal the
- completion of the cipher operation. Furthermore, the caller
- must ensure it can handle such asynchronous events by applying
- appropriate locking around its data. The kernel crypto API
- does not perform any special serialization operation to protect
- the caller's data integrity.
- </para>
- </sect1>
-
- <sect1><title>Crypto API Cipher References And Priority</title>
- <para>
- A cipher is referenced by the caller with a string. That string
- has the following semantics:
-
- <programlisting>
- template(single block cipher)
- </programlisting>
-
- where "template" and "single block cipher" is the aforementioned
- template and single block cipher, respectively. If applicable,
- additional templates may enclose other templates, such as
-
- <programlisting>
- template1(template2(single block cipher)))
- </programlisting>
- </para>
-
- <para>
- The kernel crypto API may provide multiple implementations of a
- template or a single block cipher. For example, AES on newer
- Intel hardware has the following implementations: AES-NI,
- assembler implementation, or straight C. Now, when using the
- string "aes" with the kernel crypto API, which cipher
- implementation is used? The answer to that question is the
- priority number assigned to each cipher implementation by the
- kernel crypto API. When a caller uses the string to refer to a
- cipher during initialization of a cipher handle, the kernel
- crypto API looks up all implementations providing an
- implementation with that name and selects the implementation
- with the highest priority.
- </para>
-
- <para>
- Now, a caller may have the need to refer to a specific cipher
- implementation and thus does not want to rely on the
- priority-based selection. To accommodate this scenario, the
- kernel crypto API allows the cipher implementation to register
- a unique name in addition to common names. When using that
- unique name, a caller is therefore always sure to refer to
- the intended cipher implementation.
- </para>
-
- <para>
- The list of available ciphers is given in /proc/crypto. However,
- that list does not specify all possible permutations of
- templates and ciphers. Each block listed in /proc/crypto may
- contain the following information -- if one of the components
- listed as follows are not applicable to a cipher, it is not
- displayed:
- </para>
-
- <itemizedlist>
- <listitem>
- <para>name: the generic name of the cipher that is subject
- to the priority-based selection -- this name can be used by
- the cipher allocation API calls (all names listed above are
- examples for such generic names)</para>
- </listitem>
- <listitem>
- <para>driver: the unique name of the cipher -- this name can
- be used by the cipher allocation API calls</para>
- </listitem>
- <listitem>
- <para>module: the kernel module providing the cipher
- implementation (or "kernel" for statically linked ciphers)</para>
- </listitem>
- <listitem>
- <para>priority: the priority value of the cipher implementation</para>
- </listitem>
- <listitem>
- <para>refcnt: the reference count of the respective cipher
- (i.e. the number of current consumers of this cipher)</para>
- </listitem>
- <listitem>
- <para>selftest: specification whether the self test for the
- cipher passed</para>
- </listitem>
- <listitem>
- <para>type:
- <itemizedlist>
- <listitem>
- <para>skcipher for symmetric key ciphers</para>
- </listitem>
- <listitem>
- <para>cipher for single block ciphers that may be used with
- an additional template</para>
- </listitem>
- <listitem>
- <para>shash for synchronous message digest</para>
- </listitem>
- <listitem>
- <para>ahash for asynchronous message digest</para>
- </listitem>
- <listitem>
- <para>aead for AEAD cipher type</para>
- </listitem>
- <listitem>
- <para>compression for compression type transformations</para>
- </listitem>
- <listitem>
- <para>rng for random number generator</para>
- </listitem>
- <listitem>
- <para>givcipher for cipher with associated IV generator
- (see the geniv entry below for the specification of the
- IV generator type used by the cipher implementation)</para>
- </listitem>
- </itemizedlist>
- </para>
- </listitem>
- <listitem>
- <para>blocksize: blocksize of cipher in bytes</para>
- </listitem>
- <listitem>
- <para>keysize: key size in bytes</para>
- </listitem>
- <listitem>
- <para>ivsize: IV size in bytes</para>
- </listitem>
- <listitem>
- <para>seedsize: required size of seed data for random number
- generator</para>
- </listitem>
- <listitem>
- <para>digestsize: output size of the message digest</para>
- </listitem>
- <listitem>
- <para>geniv: IV generation type:
- <itemizedlist>
- <listitem>
- <para>eseqiv for encrypted sequence number based IV
- generation</para>
- </listitem>
- <listitem>
- <para>seqiv for sequence number based IV generation</para>
- </listitem>
- <listitem>
- <para>chainiv for chain iv generation</para>
- </listitem>
- <listitem>
- <para>&lt;builtin&gt; is a marker that the cipher implements
- IV generation and handling as it is specific to the given
- cipher</para>
- </listitem>
- </itemizedlist>
- </para>
- </listitem>
- </itemizedlist>
- </sect1>
-
- <sect1><title>Key Sizes</title>
- <para>
- When allocating a cipher handle, the caller only specifies the
- cipher type. Symmetric ciphers, however, typically support
- multiple key sizes (e.g. AES-128 vs. AES-192 vs. AES-256).
- These key sizes are determined with the length of the provided
- key. Thus, the kernel crypto API does not provide a separate
- way to select the particular symmetric cipher key size.
- </para>
- </sect1>
-
- <sect1><title>Cipher Allocation Type And Masks</title>
- <para>
- The different cipher handle allocation functions allow the
- specification of a type and mask flag. Both parameters have
- the following meaning (and are therefore not covered in the
- subsequent sections).
- </para>
-
- <para>
- The type flag specifies the type of the cipher algorithm.
- The caller usually provides a 0 when the caller wants the
- default handling. Otherwise, the caller may provide the
- following selections which match the aforementioned cipher
- types:
- </para>
-
- <itemizedlist>
- <listitem>
- <para>CRYPTO_ALG_TYPE_CIPHER Single block cipher</para>
- </listitem>
- <listitem>
- <para>CRYPTO_ALG_TYPE_COMPRESS Compression</para>
- </listitem>
- <listitem>
- <para>CRYPTO_ALG_TYPE_AEAD Authenticated Encryption with
- Associated Data (MAC)</para>
- </listitem>
- <listitem>
- <para>CRYPTO_ALG_TYPE_BLKCIPHER Synchronous multi-block cipher</para>
- </listitem>
- <listitem>
- <para>CRYPTO_ALG_TYPE_ABLKCIPHER Asynchronous multi-block cipher</para>
- </listitem>
- <listitem>
- <para>CRYPTO_ALG_TYPE_GIVCIPHER Asynchronous multi-block
- cipher packed together with an IV generator (see geniv field
- in the /proc/crypto listing for the known IV generators)</para>
- </listitem>
- <listitem>
- <para>CRYPTO_ALG_TYPE_DIGEST Raw message digest</para>
- </listitem>
- <listitem>
- <para>CRYPTO_ALG_TYPE_HASH Alias for CRYPTO_ALG_TYPE_DIGEST</para>
- </listitem>
- <listitem>
- <para>CRYPTO_ALG_TYPE_SHASH Synchronous multi-block hash</para>
- </listitem>
- <listitem>
- <para>CRYPTO_ALG_TYPE_AHASH Asynchronous multi-block hash</para>
- </listitem>
- <listitem>
- <para>CRYPTO_ALG_TYPE_RNG Random Number Generation</para>
- </listitem>
- <listitem>
- <para>CRYPTO_ALG_TYPE_AKCIPHER Asymmetric cipher</para>
- </listitem>
- <listitem>
- <para>CRYPTO_ALG_TYPE_PCOMPRESS Enhanced version of
- CRYPTO_ALG_TYPE_COMPRESS allowing for segmented compression /
- decompression instead of performing the operation on one
- segment only. CRYPTO_ALG_TYPE_PCOMPRESS is intended to replace
- CRYPTO_ALG_TYPE_COMPRESS once existing consumers are converted.</para>
- </listitem>
- </itemizedlist>
-
- <para>
- The mask flag restricts the type of cipher. The only allowed
- flag is CRYPTO_ALG_ASYNC to restrict the cipher lookup function
- to asynchronous ciphers. Usually, a caller provides a 0 for the
- mask flag.
- </para>
-
- <para>
- When the caller provides a mask and type specification, the
- caller limits the search the kernel crypto API can perform for
- a suitable cipher implementation for the given cipher name.
- That means, even when a caller uses a cipher name that exists
- during its initialization call, the kernel crypto API may not
- select it due to the used type and mask field.
- </para>
- </sect1>
-
- <sect1><title>Internal Structure of Kernel Crypto API</title>
-
- <para>
- The kernel crypto API has an internal structure where a cipher
- implementation may use many layers and indirections. This section
- shall help to clarify how the kernel crypto API uses
- various components to implement the complete cipher.
- </para>
-
- <para>
- The following subsections explain the internal structure based
- on existing cipher implementations. The first section addresses
- the most complex scenario where all other scenarios form a logical
- subset.
- </para>
-
- <sect2><title>Generic AEAD Cipher Structure</title>
-
- <para>
- The following ASCII art decomposes the kernel crypto API layers
- when using the AEAD cipher with the automated IV generation. The
- shown example is used by the IPSEC layer.
- </para>
-
- <para>
- For other use cases of AEAD ciphers, the ASCII art applies as
- well, but the caller may not use the AEAD cipher with a separate
- IV generator. In this case, the caller must generate the IV.
- </para>
-
- <para>
- The depicted example decomposes the AEAD cipher of GCM(AES) based
- on the generic C implementations (gcm.c, aes-generic.c, ctr.c,
- ghash-generic.c, seqiv.c). The generic implementation serves as an
- example showing the complete logic of the kernel crypto API.
- </para>
-
- <para>
- It is possible that some streamlined cipher implementations (like
- AES-NI) provide implementations merging aspects which in the view
- of the kernel crypto API cannot be decomposed into layers any more.
- In case of the AES-NI implementation, the CTR mode, the GHASH
- implementation and the AES cipher are all merged into one cipher
- implementation registered with the kernel crypto API. In this case,
- the concept described by the following ASCII art applies too. However,
- the decomposition of GCM into the individual sub-components
- by the kernel crypto API is not done any more.
- </para>
-
- <para>
- Each block in the following ASCII art is an independent cipher
- instance obtained from the kernel crypto API. Each block
- is accessed by the caller or by other blocks using the API functions
- defined by the kernel crypto API for the cipher implementation type.
- </para>
-
- <para>
- The blocks below indicate the cipher type as well as the specific
- logic implemented in the cipher.
- </para>
-
- <para>
- The ASCII art picture also indicates the call structure, i.e. who
- calls which component. The arrows point to the invoked block
- where the caller uses the API applicable to the cipher type
- specified for the block.
- </para>
-
- <programlisting>
-<![CDATA[
-kernel crypto API | IPSEC Layer
- |
-+-----------+ |
-| | (1)
-| aead | <----------------------------------- esp_output
-| (seqiv) | ---+
-+-----------+ |
- | (2)
-+-----------+ |
-| | <--+ (2)
-| aead | <----------------------------------- esp_input
-| (gcm) | ------------+
-+-----------+ |
- | (3) | (5)
- v v
-+-----------+ +-----------+
-| | | |
-| skcipher | | ahash |
-| (ctr) | ---+ | (ghash) |
-+-----------+ | +-----------+
- |
-+-----------+ | (4)
-| | <--+
-| cipher |
-| (aes) |
-+-----------+
-]]>
- </programlisting>
-
- <para>
- The following call sequence is applicable when the IPSEC layer
- triggers an encryption operation with the esp_output function. During
- configuration, the administrator set up the use of rfc4106(gcm(aes)) as
- the cipher for ESP. The following call sequence is now depicted in the
- ASCII art above:
- </para>
-
- <orderedlist>
- <listitem>
- <para>
- esp_output() invokes crypto_aead_encrypt() to trigger an encryption
- operation of the AEAD cipher with IV generator.
- </para>
-
- <para>
- In case of GCM, the SEQIV implementation is registered as GIVCIPHER
- in crypto_rfc4106_alloc().
- </para>
-
- <para>
- The SEQIV performs its operation to generate an IV where the core
- function is seqiv_geniv().
- </para>
- </listitem>
-
- <listitem>
- <para>
- Now, SEQIV uses the AEAD API function calls to invoke the associated
- AEAD cipher. In our case, during the instantiation of SEQIV, the
- cipher handle for GCM is provided to SEQIV. This means that SEQIV
- invokes AEAD cipher operations with the GCM cipher handle.
- </para>
-
- <para>
- During instantiation of the GCM handle, the CTR(AES) and GHASH
- ciphers are instantiated. The cipher handles for CTR(AES) and GHASH
- are retained for later use.
- </para>
-
- <para>
- The GCM implementation is responsible to invoke the CTR mode AES and
- the GHASH cipher in the right manner to implement the GCM
- specification.
- </para>
- </listitem>
-
- <listitem>
- <para>
- The GCM AEAD cipher type implementation now invokes the SKCIPHER API
- with the instantiated CTR(AES) cipher handle.
- </para>
-
- <para>
- During instantiation of the CTR(AES) cipher, the CIPHER type
- implementation of AES is instantiated. The cipher handle for AES is
- retained.
- </para>
-
- <para>
- That means that the SKCIPHER implementation of CTR(AES) only
- implements the CTR block chaining mode. After performing the block
- chaining operation, the CIPHER implementation of AES is invoked.
- </para>
- </listitem>
-
- <listitem>
- <para>
- The SKCIPHER of CTR(AES) now invokes the CIPHER API with the AES
- cipher handle to encrypt one block.
- </para>
- </listitem>
-
- <listitem>
- <para>
- The GCM AEAD implementation also invokes the GHASH cipher
- implementation via the AHASH API.
- </para>
- </listitem>
- </orderedlist>
-
- <para>
- When the IPSEC layer triggers the esp_input() function, the same call
- sequence is followed with the only difference that the operation starts
- with step (2).
- </para>
- </sect2>
-
- <sect2><title>Generic Block Cipher Structure</title>
- <para>
- Generic block ciphers follow the same concept as depicted with the ASCII
- art picture above.
- </para>
-
- <para>
- For example, CBC(AES) is implemented with cbc.c, and aes-generic.c. The
- ASCII art picture above applies as well with the difference that only
- step (4) is used and the SKCIPHER block chaining mode is CBC.
- </para>
- </sect2>
-
- <sect2><title>Generic Keyed Message Digest Structure</title>
- <para>
- Keyed message digest implementations again follow the same concept as
- depicted in the ASCII art picture above.
- </para>
-
- <para>
- For example, HMAC(SHA256) is implemented with hmac.c and
- sha256_generic.c. The following ASCII art illustrates the
- implementation:
- </para>
-
- <programlisting>
-<![CDATA[
-kernel crypto API | Caller
- |
-+-----------+ (1) |
-| | <------------------ some_function
-| ahash |
-| (hmac) | ---+
-+-----------+ |
- | (2)
-+-----------+ |
-| | <--+
-| shash |
-| (sha256) |
-+-----------+
-]]>
- </programlisting>
-
- <para>
- The following call sequence is applicable when a caller triggers
- an HMAC operation:
- </para>
-
- <orderedlist>
- <listitem>
- <para>
- The AHASH API functions are invoked by the caller. The HMAC
- implementation performs its operation as needed.
- </para>
-
- <para>
- During initialization of the HMAC cipher, the SHASH cipher type of
- SHA256 is instantiated. The cipher handle for the SHA256 instance is
- retained.
- </para>
-
- <para>
- At one time, the HMAC implementation requires a SHA256 operation
- where the SHA256 cipher handle is used.
- </para>
- </listitem>
-
- <listitem>
- <para>
- The HMAC instance now invokes the SHASH API with the SHA256
- cipher handle to calculate the message digest.
- </para>
- </listitem>
- </orderedlist>
- </sect2>
- </sect1>
- </chapter>
-
- <chapter id="Development"><title>Developing Cipher Algorithms</title>
- <sect1><title>Registering And Unregistering Transformation</title>
- <para>
- There are three distinct types of registration functions in
- the Crypto API. One is used to register a generic cryptographic
- transformation, while the other two are specific to HASH
- transformations and COMPRESSion. We will discuss the latter
- two in a separate chapter, here we will only look at the
- generic ones.
- </para>
-
- <para>
- Before discussing the register functions, the data structure
- to be filled with each, struct crypto_alg, must be considered
- -- see below for a description of this data structure.
- </para>
-
- <para>
- The generic registration functions can be found in
- include/linux/crypto.h and their definition can be seen below.
- The former function registers a single transformation, while
- the latter works on an array of transformation descriptions.
- The latter is useful when registering transformations in bulk,
- for example when a driver implements multiple transformations.
- </para>
-
- <programlisting>
- int crypto_register_alg(struct crypto_alg *alg);
- int crypto_register_algs(struct crypto_alg *algs, int count);
- </programlisting>
-
- <para>
- The counterparts to those functions are listed below.
- </para>
-
- <programlisting>
- int crypto_unregister_alg(struct crypto_alg *alg);
- int crypto_unregister_algs(struct crypto_alg *algs, int count);
- </programlisting>
-
- <para>
- Notice that both registration and unregistration functions
- do return a value, so make sure to handle errors. A return
- code of zero implies success. Any return code &lt; 0 implies
- an error.
- </para>
-
- <para>
- The bulk registration/unregistration functions
- register/unregister each transformation in the given array of
- length count. They handle errors as follows:
- </para>
- <itemizedlist>
- <listitem>
- <para>
- crypto_register_algs() succeeds if and only if it
- successfully registers all the given transformations. If an
- error occurs partway through, then it rolls back successful
- registrations before returning the error code. Note that if
- a driver needs to handle registration errors for individual
- transformations, then it will need to use the non-bulk
- function crypto_register_alg() instead.
- </para>
- </listitem>
- <listitem>
- <para>
- crypto_unregister_algs() tries to unregister all the given
- transformations, continuing on error. It logs errors and
- always returns zero.
- </para>
- </listitem>
- </itemizedlist>
-
- </sect1>
-
- <sect1><title>Single-Block Symmetric Ciphers [CIPHER]</title>
- <para>
- Example of transformations: aes, arc4, ...
- </para>
-
- <para>
- This section describes the simplest of all transformation
- implementations, that being the CIPHER type used for symmetric
- ciphers. The CIPHER type is used for transformations which
- operate on exactly one block at a time and there are no
- dependencies between blocks at all.
- </para>
-
- <sect2><title>Registration specifics</title>
- <para>
- The registration of [CIPHER] algorithm is specific in that
- struct crypto_alg field .cra_type is empty. The .cra_u.cipher
- has to be filled in with proper callbacks to implement this
- transformation.
- </para>
-
- <para>
- See struct cipher_alg below.
- </para>
- </sect2>
-
- <sect2><title>Cipher Definition With struct cipher_alg</title>
- <para>
- Struct cipher_alg defines a single block cipher.
- </para>
-
- <para>
- Here are schematics of how these functions are called when
- operated from other part of the kernel. Note that the
- .cia_setkey() call might happen before or after any of these
- schematics happen, but must not happen during any of these
- are in-flight.
- </para>
-
- <para>
- <programlisting>
- KEY ---. PLAINTEXT ---.
- v v
- .cia_setkey() -&gt; .cia_encrypt()
- |
- '-----&gt; CIPHERTEXT
- </programlisting>
- </para>
-
- <para>
- Please note that a pattern where .cia_setkey() is called
- multiple times is also valid:
- </para>
-
- <para>
- <programlisting>
-
- KEY1 --. PLAINTEXT1 --. KEY2 --. PLAINTEXT2 --.
- v v v v
- .cia_setkey() -&gt; .cia_encrypt() -&gt; .cia_setkey() -&gt; .cia_encrypt()
- | |
- '---&gt; CIPHERTEXT1 '---&gt; CIPHERTEXT2
- </programlisting>
- </para>
-
- </sect2>
- </sect1>
-
- <sect1><title>Multi-Block Ciphers</title>
- <para>
- Example of transformations: cbc(aes), ecb(arc4), ...
- </para>
-
- <para>
- This section describes the multi-block cipher transformation
- implementations. The multi-block ciphers are
- used for transformations which operate on scatterlists of
- data supplied to the transformation functions. They output
- the result into a scatterlist of data as well.
- </para>
-
- <sect2><title>Registration Specifics</title>
-
- <para>
- The registration of multi-block cipher algorithms
- is one of the most standard procedures throughout the crypto API.
- </para>
-
- <para>
- Note, if a cipher implementation requires a proper alignment
- of data, the caller should use the functions of
- crypto_skcipher_alignmask() to identify a memory alignment mask.
- The kernel crypto API is able to process requests that are unaligned.
- This implies, however, additional overhead as the kernel
- crypto API needs to perform the realignment of the data which
- may imply moving of data.
- </para>
- </sect2>
-
- <sect2><title>Cipher Definition With struct blkcipher_alg and ablkcipher_alg</title>
- <para>
- Struct blkcipher_alg defines a synchronous block cipher whereas
- struct ablkcipher_alg defines an asynchronous block cipher.
- </para>
-
- <para>
- Please refer to the single block cipher description for schematics
- of the block cipher usage.
- </para>
- </sect2>
-
- <sect2><title>Specifics Of Asynchronous Multi-Block Cipher</title>
- <para>
- There are a couple of specifics to the asynchronous interface.
- </para>
-
- <para>
- First of all, some of the drivers will want to use the
- Generic ScatterWalk in case the hardware needs to be fed
- separate chunks of the scatterlist which contains the
- plaintext and will contain the ciphertext. Please refer
- to the ScatterWalk interface offered by the Linux kernel
- scatter / gather list implementation.
- </para>
- </sect2>
- </sect1>
-
- <sect1><title>Hashing [HASH]</title>
-
- <para>
- Example of transformations: crc32, md5, sha1, sha256,...
- </para>
-
- <sect2><title>Registering And Unregistering The Transformation</title>
-
- <para>
- There are multiple ways to register a HASH transformation,
- depending on whether the transformation is synchronous [SHASH]
- or asynchronous [AHASH] and the amount of HASH transformations
- we are registering. You can find the prototypes defined in
- include/crypto/internal/hash.h:
- </para>
-
- <programlisting>
- int crypto_register_ahash(struct ahash_alg *alg);
-
- int crypto_register_shash(struct shash_alg *alg);
- int crypto_register_shashes(struct shash_alg *algs, int count);
- </programlisting>
-
- <para>
- The respective counterparts for unregistering the HASH
- transformation are as follows:
- </para>
-
- <programlisting>
- int crypto_unregister_ahash(struct ahash_alg *alg);
-
- int crypto_unregister_shash(struct shash_alg *alg);
- int crypto_unregister_shashes(struct shash_alg *algs, int count);
- </programlisting>
- </sect2>
-
- <sect2><title>Cipher Definition With struct shash_alg and ahash_alg</title>
- <para>
- Here are schematics of how these functions are called when
- operated from other part of the kernel. Note that the .setkey()
- call might happen before or after any of these schematics happen,
- but must not happen during any of these are in-flight. Please note
- that calling .init() followed immediately by .finish() is also a
- perfectly valid transformation.
- </para>
-
- <programlisting>
- I) DATA -----------.
- v
- .init() -&gt; .update() -&gt; .final() ! .update() might not be called
- ^ | | at all in this scenario.
- '----' '---&gt; HASH
-
- II) DATA -----------.-----------.
- v v
- .init() -&gt; .update() -&gt; .finup() ! .update() may not be called
- ^ | | at all in this scenario.
- '----' '---&gt; HASH
-
- III) DATA -----------.
- v
- .digest() ! The entire process is handled
- | by the .digest() call.
- '---------------&gt; HASH
- </programlisting>
-
- <para>
- Here is a schematic of how the .export()/.import() functions are
- called when used from another part of the kernel.
- </para>
-
- <programlisting>
- KEY--. DATA--.
- v v ! .update() may not be called
- .setkey() -&gt; .init() -&gt; .update() -&gt; .export() at all in this scenario.
- ^ | |
- '-----' '--&gt; PARTIAL_HASH
-
- ----------- other transformations happen here -----------
-
- PARTIAL_HASH--. DATA1--.
- v v
- .import -&gt; .update() -&gt; .final() ! .update() may not be called
- ^ | | at all in this scenario.
- '----' '--&gt; HASH1
-
- PARTIAL_HASH--. DATA2-.
- v v
- .import -&gt; .finup()
- |
- '---------------&gt; HASH2
- </programlisting>
- </sect2>
-
- <sect2><title>Specifics Of Asynchronous HASH Transformation</title>
- <para>
- Some of the drivers will want to use the Generic ScatterWalk
- in case the implementation needs to be fed separate chunks of the
- scatterlist which contains the input data. The buffer containing
- the resulting hash will always be properly aligned to
- .cra_alignmask so there is no need to worry about this.
- </para>
- </sect2>
- </sect1>
- </chapter>
-
- <chapter id="User"><title>User Space Interface</title>
- <sect1><title>Introduction</title>
- <para>
- The concepts of the kernel crypto API visible to kernel space is fully
- applicable to the user space interface as well. Therefore, the kernel
- crypto API high level discussion for the in-kernel use cases applies
- here as well.
- </para>
-
- <para>
- The major difference, however, is that user space can only act as a
- consumer and never as a provider of a transformation or cipher algorithm.
- </para>
-
- <para>
- The following covers the user space interface exported by the kernel
- crypto API. A working example of this description is libkcapi that
- can be obtained from [1]. That library can be used by user space
- applications that require cryptographic services from the kernel.
- </para>
-
- <para>
- Some details of the in-kernel kernel crypto API aspects do not
- apply to user space, however. This includes the difference between
- synchronous and asynchronous invocations. The user space API call
- is fully synchronous.
- </para>
-
- <para>
- [1] <ulink url="http://www.chronox.de/libkcapi.html">http://www.chronox.de/libkcapi.html</ulink>
- </para>
-
- </sect1>
-
- <sect1><title>User Space API General Remarks</title>
- <para>
- The kernel crypto API is accessible from user space. Currently,
- the following ciphers are accessible:
- </para>
-
- <itemizedlist>
- <listitem>
- <para>Message digest including keyed message digest (HMAC, CMAC)</para>
- </listitem>
-
- <listitem>
- <para>Symmetric ciphers</para>
- </listitem>
-
- <listitem>
- <para>AEAD ciphers</para>
- </listitem>
-
- <listitem>
- <para>Random Number Generators</para>
- </listitem>
- </itemizedlist>
-
- <para>
- The interface is provided via socket type using the type AF_ALG.
- In addition, the setsockopt option type is SOL_ALG. In case the
- user space header files do not export these flags yet, use the
- following macros:
- </para>
-
- <programlisting>
-#ifndef AF_ALG
-#define AF_ALG 38
-#endif
-#ifndef SOL_ALG
-#define SOL_ALG 279
-#endif
- </programlisting>
-
- <para>
- A cipher is accessed with the same name as done for the in-kernel
- API calls. This includes the generic vs. unique naming schema for
- ciphers as well as the enforcement of priorities for generic names.
- </para>
-
- <para>
- To interact with the kernel crypto API, a socket must be
- created by the user space application. User space invokes the cipher
- operation with the send()/write() system call family. The result of the
- cipher operation is obtained with the read()/recv() system call family.
- </para>
-
- <para>
- The following API calls assume that the socket descriptor
- is already opened by the user space application and discusses only
- the kernel crypto API specific invocations.
- </para>
-
- <para>
- To initialize the socket interface, the following sequence has to
- be performed by the consumer:
- </para>
-
- <orderedlist>
- <listitem>
- <para>
- Create a socket of type AF_ALG with the struct sockaddr_alg
- parameter specified below for the different cipher types.
- </para>
- </listitem>
-
- <listitem>
- <para>
- Invoke bind with the socket descriptor
- </para>
- </listitem>
-
- <listitem>
- <para>
- Invoke accept with the socket descriptor. The accept system call
- returns a new file descriptor that is to be used to interact with
- the particular cipher instance. When invoking send/write or recv/read
- system calls to send data to the kernel or obtain data from the
- kernel, the file descriptor returned by accept must be used.
- </para>
- </listitem>
- </orderedlist>
- </sect1>
-
- <sect1><title>In-place Cipher operation</title>
- <para>
- Just like the in-kernel operation of the kernel crypto API, the user
- space interface allows the cipher operation in-place. That means that
- the input buffer used for the send/write system call and the output
- buffer used by the read/recv system call may be one and the same.
- This is of particular interest for symmetric cipher operations where a
- copying of the output data to its final destination can be avoided.
- </para>
-
- <para>
- If a consumer on the other hand wants to maintain the plaintext and
- the ciphertext in different memory locations, all a consumer needs
- to do is to provide different memory pointers for the encryption and
- decryption operation.
- </para>
- </sect1>
-
- <sect1><title>Message Digest API</title>
- <para>
- The message digest type to be used for the cipher operation is
- selected when invoking the bind syscall. bind requires the caller
- to provide a filled struct sockaddr data structure. This data
- structure must be filled as follows:
- </para>
-
- <programlisting>
-struct sockaddr_alg sa = {
- .salg_family = AF_ALG,
- .salg_type = "hash", /* this selects the hash logic in the kernel */
- .salg_name = "sha1" /* this is the cipher name */
-};
- </programlisting>
-
- <para>
- The salg_type value "hash" applies to message digests and keyed
- message digests. Though, a keyed message digest is referenced by
- the appropriate salg_name. Please see below for the setsockopt
- interface that explains how the key can be set for a keyed message
- digest.
- </para>
-
- <para>
- Using the send() system call, the application provides the data that
- should be processed with the message digest. The send system call
- allows the following flags to be specified:
- </para>
-
- <itemizedlist>
- <listitem>
- <para>
- MSG_MORE: If this flag is set, the send system call acts like a
- message digest update function where the final hash is not
- yet calculated. If the flag is not set, the send system call
- calculates the final message digest immediately.
- </para>
- </listitem>
- </itemizedlist>
-
- <para>
- With the recv() system call, the application can read the message
- digest from the kernel crypto API. If the buffer is too small for the
- message digest, the flag MSG_TRUNC is set by the kernel.
- </para>
-
- <para>
- In order to set a message digest key, the calling application must use
- the setsockopt() option of ALG_SET_KEY. If the key is not set the HMAC
- operation is performed without the initial HMAC state change caused by
- the key.
- </para>
- </sect1>
-
- <sect1><title>Symmetric Cipher API</title>
- <para>
- The operation is very similar to the message digest discussion.
- During initialization, the struct sockaddr data structure must be
- filled as follows:
- </para>
-
- <programlisting>
-struct sockaddr_alg sa = {
- .salg_family = AF_ALG,
- .salg_type = "skcipher", /* this selects the symmetric cipher */
- .salg_name = "cbc(aes)" /* this is the cipher name */
-};
- </programlisting>
-
- <para>
- Before data can be sent to the kernel using the write/send system
- call family, the consumer must set the key. The key setting is
- described with the setsockopt invocation below.
- </para>
-
- <para>
- Using the sendmsg() system call, the application provides the data that should be processed for encryption or decryption. In addition, the IV is
- specified with the data structure provided by the sendmsg() system call.
- </para>
-
- <para>
- The sendmsg system call parameter of struct msghdr is embedded into the
- struct cmsghdr data structure. See recv(2) and cmsg(3) for more
- information on how the cmsghdr data structure is used together with the
- send/recv system call family. That cmsghdr data structure holds the
- following information specified with a separate header instances:
- </para>
-
- <itemizedlist>
- <listitem>
- <para>
- specification of the cipher operation type with one of these flags:
- </para>
- <itemizedlist>
- <listitem>
- <para>ALG_OP_ENCRYPT - encryption of data</para>
- </listitem>
- <listitem>
- <para>ALG_OP_DECRYPT - decryption of data</para>
- </listitem>
- </itemizedlist>
- </listitem>
-
- <listitem>
- <para>
- specification of the IV information marked with the flag ALG_SET_IV
- </para>
- </listitem>
- </itemizedlist>
-
- <para>
- The send system call family allows the following flag to be specified:
- </para>
-
- <itemizedlist>
- <listitem>
- <para>
- MSG_MORE: If this flag is set, the send system call acts like a
- cipher update function where more input data is expected
- with a subsequent invocation of the send system call.
- </para>
- </listitem>
- </itemizedlist>
-
- <para>
- Note: The kernel reports -EINVAL for any unexpected data. The caller
- must make sure that all data matches the constraints given in
- /proc/crypto for the selected cipher.
- </para>
-
- <para>
- With the recv() system call, the application can read the result of
- the cipher operation from the kernel crypto API. The output buffer
- must be at least as large as to hold all blocks of the encrypted or
- decrypted data. If the output data size is smaller, only as many
- blocks are returned that fit into that output buffer size.
- </para>
- </sect1>
-
- <sect1><title>AEAD Cipher API</title>
- <para>
- The operation is very similar to the symmetric cipher discussion.
- During initialization, the struct sockaddr data structure must be
- filled as follows:
- </para>
-
- <programlisting>
-struct sockaddr_alg sa = {
- .salg_family = AF_ALG,
- .salg_type = "aead", /* this selects the symmetric cipher */
- .salg_name = "gcm(aes)" /* this is the cipher name */
-};
- </programlisting>
-
- <para>
- Before data can be sent to the kernel using the write/send system
- call family, the consumer must set the key. The key setting is
- described with the setsockopt invocation below.
- </para>
-
- <para>
- In addition, before data can be sent to the kernel using the
- write/send system call family, the consumer must set the authentication
- tag size. To set the authentication tag size, the caller must use the
- setsockopt invocation described below.
- </para>
-
- <para>
- Using the sendmsg() system call, the application provides the data that should be processed for encryption or decryption. In addition, the IV is
- specified with the data structure provided by the sendmsg() system call.
- </para>
-
- <para>
- The sendmsg system call parameter of struct msghdr is embedded into the
- struct cmsghdr data structure. See recv(2) and cmsg(3) for more
- information on how the cmsghdr data structure is used together with the
- send/recv system call family. That cmsghdr data structure holds the
- following information specified with a separate header instances:
- </para>
-
- <itemizedlist>
- <listitem>
- <para>
- specification of the cipher operation type with one of these flags:
- </para>
- <itemizedlist>
- <listitem>
- <para>ALG_OP_ENCRYPT - encryption of data</para>
- </listitem>
- <listitem>
- <para>ALG_OP_DECRYPT - decryption of data</para>
- </listitem>
- </itemizedlist>
- </listitem>
-
- <listitem>
- <para>
- specification of the IV information marked with the flag ALG_SET_IV
- </para>
- </listitem>
-
- <listitem>
- <para>
- specification of the associated authentication data (AAD) with the
- flag ALG_SET_AEAD_ASSOCLEN. The AAD is sent to the kernel together
- with the plaintext / ciphertext. See below for the memory structure.
- </para>
- </listitem>
- </itemizedlist>
-
- <para>
- The send system call family allows the following flag to be specified:
- </para>
-
- <itemizedlist>
- <listitem>
- <para>
- MSG_MORE: If this flag is set, the send system call acts like a
- cipher update function where more input data is expected
- with a subsequent invocation of the send system call.
- </para>
- </listitem>
- </itemizedlist>
-
- <para>
- Note: The kernel reports -EINVAL for any unexpected data. The caller
- must make sure that all data matches the constraints given in
- /proc/crypto for the selected cipher.
- </para>
-
- <para>
- With the recv() system call, the application can read the result of
- the cipher operation from the kernel crypto API. The output buffer
- must be at least as large as defined with the memory structure below.
- If the output data size is smaller, the cipher operation is not performed.
- </para>
-
- <para>
- The authenticated decryption operation may indicate an integrity error.
- Such breach in integrity is marked with the -EBADMSG error code.
- </para>
-
- <sect2><title>AEAD Memory Structure</title>
- <para>
- The AEAD cipher operates with the following information that
- is communicated between user and kernel space as one data stream:
- </para>
-
- <itemizedlist>
- <listitem>
- <para>plaintext or ciphertext</para>
- </listitem>
-
- <listitem>
- <para>associated authentication data (AAD)</para>
- </listitem>
-
- <listitem>
- <para>authentication tag</para>
- </listitem>
- </itemizedlist>
-
- <para>
- The sizes of the AAD and the authentication tag are provided with
- the sendmsg and setsockopt calls (see there). As the kernel knows
- the size of the entire data stream, the kernel is now able to
- calculate the right offsets of the data components in the data
- stream.
- </para>
-
- <para>
- The user space caller must arrange the aforementioned information
- in the following order:
- </para>
-
- <itemizedlist>
- <listitem>
- <para>
- AEAD encryption input: AAD || plaintext
- </para>
- </listitem>
-
- <listitem>
- <para>
- AEAD decryption input: AAD || ciphertext || authentication tag
- </para>
- </listitem>
- </itemizedlist>
-
- <para>
- The output buffer the user space caller provides must be at least as
- large to hold the following data:
- </para>
-
- <itemizedlist>
- <listitem>
- <para>
- AEAD encryption output: ciphertext || authentication tag
- </para>
- </listitem>
-
- <listitem>
- <para>
- AEAD decryption output: plaintext
- </para>
- </listitem>
- </itemizedlist>
- </sect2>
- </sect1>
-
- <sect1><title>Random Number Generator API</title>
- <para>
- Again, the operation is very similar to the other APIs.
- During initialization, the struct sockaddr data structure must be
- filled as follows:
- </para>
-
- <programlisting>
-struct sockaddr_alg sa = {
- .salg_family = AF_ALG,
- .salg_type = "rng", /* this selects the symmetric cipher */
- .salg_name = "drbg_nopr_sha256" /* this is the cipher name */
-};
- </programlisting>
-
- <para>
- Depending on the RNG type, the RNG must be seeded. The seed is provided
- using the setsockopt interface to set the key. For example, the
- ansi_cprng requires a seed. The DRBGs do not require a seed, but
- may be seeded.
- </para>
-
- <para>
- Using the read()/recvmsg() system calls, random numbers can be obtained.
- The kernel generates at most 128 bytes in one call. If user space
- requires more data, multiple calls to read()/recvmsg() must be made.
- </para>
-
- <para>
- WARNING: The user space caller may invoke the initially mentioned
- accept system call multiple times. In this case, the returned file
- descriptors have the same state.
- </para>
-
- </sect1>
-
- <sect1><title>Zero-Copy Interface</title>
- <para>
- In addition to the send/write/read/recv system call family, the AF_ALG
- interface can be accessed with the zero-copy interface of splice/vmsplice.
- As the name indicates, the kernel tries to avoid a copy operation into
- kernel space.
- </para>
-
- <para>
- The zero-copy operation requires data to be aligned at the page boundary.
- Non-aligned data can be used as well, but may require more operations of
- the kernel which would defeat the speed gains obtained from the zero-copy
- interface.
- </para>
-
- <para>
- The system-interent limit for the size of one zero-copy operation is
- 16 pages. If more data is to be sent to AF_ALG, user space must slice
- the input into segments with a maximum size of 16 pages.
- </para>
-
- <para>
- Zero-copy can be used with the following code example (a complete working
- example is provided with libkcapi):
- </para>
-
- <programlisting>
-int pipes[2];
-
-pipe(pipes);
-/* input data in iov */
-vmsplice(pipes[1], iov, iovlen, SPLICE_F_GIFT);
-/* opfd is the file descriptor returned from accept() system call */
-splice(pipes[0], NULL, opfd, NULL, ret, 0);
-read(opfd, out, outlen);
- </programlisting>
-
- </sect1>
-
- <sect1><title>Setsockopt Interface</title>
- <para>
- In addition to the read/recv and send/write system call handling
- to send and retrieve data subject to the cipher operation, a consumer
- also needs to set the additional information for the cipher operation.
- This additional information is set using the setsockopt system call
- that must be invoked with the file descriptor of the open cipher
- (i.e. the file descriptor returned by the accept system call).
- </para>
-
- <para>
- Each setsockopt invocation must use the level SOL_ALG.
- </para>
-
- <para>
- The setsockopt interface allows setting the following data using
- the mentioned optname:
- </para>
-
- <itemizedlist>
- <listitem>
- <para>
- ALG_SET_KEY -- Setting the key. Key setting is applicable to:
- </para>
- <itemizedlist>
- <listitem>
- <para>the skcipher cipher type (symmetric ciphers)</para>
- </listitem>
- <listitem>
- <para>the hash cipher type (keyed message digests)</para>
- </listitem>
- <listitem>
- <para>the AEAD cipher type</para>
- </listitem>
- <listitem>
- <para>the RNG cipher type to provide the seed</para>
- </listitem>
- </itemizedlist>
- </listitem>
-
- <listitem>
- <para>
- ALG_SET_AEAD_AUTHSIZE -- Setting the authentication tag size
- for AEAD ciphers. For a encryption operation, the authentication
- tag of the given size will be generated. For a decryption operation,
- the provided ciphertext is assumed to contain an authentication tag
- of the given size (see section about AEAD memory layout below).
- </para>
- </listitem>
- </itemizedlist>
-
- </sect1>
-
- <sect1><title>User space API example</title>
- <para>
- Please see [1] for libkcapi which provides an easy-to-use wrapper
- around the aforementioned Netlink kernel interface. [1] also contains
- a test application that invokes all libkcapi API calls.
- </para>
-
- <para>
- [1] <ulink url="http://www.chronox.de/libkcapi.html">http://www.chronox.de/libkcapi.html</ulink>
- </para>
-
- </sect1>
-
- </chapter>
-
- <chapter id="API"><title>Programming Interface</title>
- <para>
- Please note that the kernel crypto API contains the AEAD givcrypt
- API (crypto_aead_giv* and aead_givcrypt_* function calls in
- include/crypto/aead.h). This API is obsolete and will be removed
- in the future. To obtain the functionality of an AEAD cipher with
- internal IV generation, use the IV generator as a regular cipher.
- For example, rfc4106(gcm(aes)) is the AEAD cipher with external
- IV generation and seqniv(rfc4106(gcm(aes))) implies that the kernel
- crypto API generates the IV. Different IV generators are available.
- </para>
- <sect1><title>Block Cipher Context Data Structures</title>
-!Pinclude/linux/crypto.h Block Cipher Context Data Structures
-!Finclude/crypto/aead.h aead_request
- </sect1>
- <sect1><title>Block Cipher Algorithm Definitions</title>
-!Pinclude/linux/crypto.h Block Cipher Algorithm Definitions
-!Finclude/linux/crypto.h crypto_alg
-!Finclude/linux/crypto.h ablkcipher_alg
-!Finclude/crypto/aead.h aead_alg
-!Finclude/linux/crypto.h blkcipher_alg
-!Finclude/linux/crypto.h cipher_alg
-!Finclude/crypto/rng.h rng_alg
- </sect1>
- <sect1><title>Symmetric Key Cipher API</title>
-!Pinclude/crypto/skcipher.h Symmetric Key Cipher API
-!Finclude/crypto/skcipher.h crypto_alloc_skcipher
-!Finclude/crypto/skcipher.h crypto_free_skcipher
-!Finclude/crypto/skcipher.h crypto_has_skcipher
-!Finclude/crypto/skcipher.h crypto_skcipher_ivsize
-!Finclude/crypto/skcipher.h crypto_skcipher_blocksize
-!Finclude/crypto/skcipher.h crypto_skcipher_setkey
-!Finclude/crypto/skcipher.h crypto_skcipher_reqtfm
-!Finclude/crypto/skcipher.h crypto_skcipher_encrypt
-!Finclude/crypto/skcipher.h crypto_skcipher_decrypt
- </sect1>
- <sect1><title>Symmetric Key Cipher Request Handle</title>
-!Pinclude/crypto/skcipher.h Symmetric Key Cipher Request Handle
-!Finclude/crypto/skcipher.h crypto_skcipher_reqsize
-!Finclude/crypto/skcipher.h skcipher_request_set_tfm
-!Finclude/crypto/skcipher.h skcipher_request_alloc
-!Finclude/crypto/skcipher.h skcipher_request_free
-!Finclude/crypto/skcipher.h skcipher_request_set_callback
-!Finclude/crypto/skcipher.h skcipher_request_set_crypt
- </sect1>
- <sect1><title>Asynchronous Block Cipher API - Deprecated</title>
-!Pinclude/linux/crypto.h Asynchronous Block Cipher API
-!Finclude/linux/crypto.h crypto_alloc_ablkcipher
-!Finclude/linux/crypto.h crypto_free_ablkcipher
-!Finclude/linux/crypto.h crypto_has_ablkcipher
-!Finclude/linux/crypto.h crypto_ablkcipher_ivsize
-!Finclude/linux/crypto.h crypto_ablkcipher_blocksize
-!Finclude/linux/crypto.h crypto_ablkcipher_setkey
-!Finclude/linux/crypto.h crypto_ablkcipher_reqtfm
-!Finclude/linux/crypto.h crypto_ablkcipher_encrypt
-!Finclude/linux/crypto.h crypto_ablkcipher_decrypt
- </sect1>
- <sect1><title>Asynchronous Cipher Request Handle - Deprecated</title>
-!Pinclude/linux/crypto.h Asynchronous Cipher Request Handle
-!Finclude/linux/crypto.h crypto_ablkcipher_reqsize
-!Finclude/linux/crypto.h ablkcipher_request_set_tfm
-!Finclude/linux/crypto.h ablkcipher_request_alloc
-!Finclude/linux/crypto.h ablkcipher_request_free
-!Finclude/linux/crypto.h ablkcipher_request_set_callback
-!Finclude/linux/crypto.h ablkcipher_request_set_crypt
- </sect1>
- <sect1><title>Authenticated Encryption With Associated Data (AEAD) Cipher API</title>
-!Pinclude/crypto/aead.h Authenticated Encryption With Associated Data (AEAD) Cipher API
-!Finclude/crypto/aead.h crypto_alloc_aead
-!Finclude/crypto/aead.h crypto_free_aead
-!Finclude/crypto/aead.h crypto_aead_ivsize
-!Finclude/crypto/aead.h crypto_aead_authsize
-!Finclude/crypto/aead.h crypto_aead_blocksize
-!Finclude/crypto/aead.h crypto_aead_setkey
-!Finclude/crypto/aead.h crypto_aead_setauthsize
-!Finclude/crypto/aead.h crypto_aead_encrypt
-!Finclude/crypto/aead.h crypto_aead_decrypt
- </sect1>
- <sect1><title>Asynchronous AEAD Request Handle</title>
-!Pinclude/crypto/aead.h Asynchronous AEAD Request Handle
-!Finclude/crypto/aead.h crypto_aead_reqsize
-!Finclude/crypto/aead.h aead_request_set_tfm
-!Finclude/crypto/aead.h aead_request_alloc
-!Finclude/crypto/aead.h aead_request_free
-!Finclude/crypto/aead.h aead_request_set_callback
-!Finclude/crypto/aead.h aead_request_set_crypt
-!Finclude/crypto/aead.h aead_request_set_ad
- </sect1>
- <sect1><title>Synchronous Block Cipher API - Deprecated</title>
-!Pinclude/linux/crypto.h Synchronous Block Cipher API
-!Finclude/linux/crypto.h crypto_alloc_blkcipher
-!Finclude/linux/crypto.h crypto_free_blkcipher
-!Finclude/linux/crypto.h crypto_has_blkcipher
-!Finclude/linux/crypto.h crypto_blkcipher_name
-!Finclude/linux/crypto.h crypto_blkcipher_ivsize
-!Finclude/linux/crypto.h crypto_blkcipher_blocksize
-!Finclude/linux/crypto.h crypto_blkcipher_setkey
-!Finclude/linux/crypto.h crypto_blkcipher_encrypt
-!Finclude/linux/crypto.h crypto_blkcipher_encrypt_iv
-!Finclude/linux/crypto.h crypto_blkcipher_decrypt
-!Finclude/linux/crypto.h crypto_blkcipher_decrypt_iv
-!Finclude/linux/crypto.h crypto_blkcipher_set_iv
-!Finclude/linux/crypto.h crypto_blkcipher_get_iv
- </sect1>
- <sect1><title>Single Block Cipher API</title>
-!Pinclude/linux/crypto.h Single Block Cipher API
-!Finclude/linux/crypto.h crypto_alloc_cipher
-!Finclude/linux/crypto.h crypto_free_cipher
-!Finclude/linux/crypto.h crypto_has_cipher
-!Finclude/linux/crypto.h crypto_cipher_blocksize
-!Finclude/linux/crypto.h crypto_cipher_setkey
-!Finclude/linux/crypto.h crypto_cipher_encrypt_one
-!Finclude/linux/crypto.h crypto_cipher_decrypt_one
- </sect1>
- <sect1><title>Message Digest Algorithm Definitions</title>
-!Pinclude/crypto/hash.h Message Digest Algorithm Definitions
-!Finclude/crypto/hash.h hash_alg_common
-!Finclude/crypto/hash.h ahash_alg
-!Finclude/crypto/hash.h shash_alg
- </sect1>
- <sect1><title>Asynchronous Message Digest API</title>
-!Pinclude/crypto/hash.h Asynchronous Message Digest API
-!Finclude/crypto/hash.h crypto_alloc_ahash
-!Finclude/crypto/hash.h crypto_free_ahash
-!Finclude/crypto/hash.h crypto_ahash_init
-!Finclude/crypto/hash.h crypto_ahash_digestsize
-!Finclude/crypto/hash.h crypto_ahash_reqtfm
-!Finclude/crypto/hash.h crypto_ahash_reqsize
-!Finclude/crypto/hash.h crypto_ahash_setkey
-!Finclude/crypto/hash.h crypto_ahash_finup
-!Finclude/crypto/hash.h crypto_ahash_final
-!Finclude/crypto/hash.h crypto_ahash_digest
-!Finclude/crypto/hash.h crypto_ahash_export
-!Finclude/crypto/hash.h crypto_ahash_import
- </sect1>
- <sect1><title>Asynchronous Hash Request Handle</title>
-!Pinclude/crypto/hash.h Asynchronous Hash Request Handle
-!Finclude/crypto/hash.h ahash_request_set_tfm
-!Finclude/crypto/hash.h ahash_request_alloc
-!Finclude/crypto/hash.h ahash_request_free
-!Finclude/crypto/hash.h ahash_request_set_callback
-!Finclude/crypto/hash.h ahash_request_set_crypt
- </sect1>
- <sect1><title>Synchronous Message Digest API</title>
-!Pinclude/crypto/hash.h Synchronous Message Digest API
-!Finclude/crypto/hash.h crypto_alloc_shash
-!Finclude/crypto/hash.h crypto_free_shash
-!Finclude/crypto/hash.h crypto_shash_blocksize
-!Finclude/crypto/hash.h crypto_shash_digestsize
-!Finclude/crypto/hash.h crypto_shash_descsize
-!Finclude/crypto/hash.h crypto_shash_setkey
-!Finclude/crypto/hash.h crypto_shash_digest
-!Finclude/crypto/hash.h crypto_shash_export
-!Finclude/crypto/hash.h crypto_shash_import
-!Finclude/crypto/hash.h crypto_shash_init
-!Finclude/crypto/hash.h crypto_shash_update
-!Finclude/crypto/hash.h crypto_shash_final
-!Finclude/crypto/hash.h crypto_shash_finup
- </sect1>
- <sect1><title>Crypto API Random Number API</title>
-!Pinclude/crypto/rng.h Random number generator API
-!Finclude/crypto/rng.h crypto_alloc_rng
-!Finclude/crypto/rng.h crypto_rng_alg
-!Finclude/crypto/rng.h crypto_free_rng
-!Finclude/crypto/rng.h crypto_rng_generate
-!Finclude/crypto/rng.h crypto_rng_get_bytes
-!Finclude/crypto/rng.h crypto_rng_reset
-!Finclude/crypto/rng.h crypto_rng_seedsize
-!Cinclude/crypto/rng.h
- </sect1>
- <sect1><title>Asymmetric Cipher API</title>
-!Pinclude/crypto/akcipher.h Generic Public Key API
-!Finclude/crypto/akcipher.h akcipher_alg
-!Finclude/crypto/akcipher.h akcipher_request
-!Finclude/crypto/akcipher.h crypto_alloc_akcipher
-!Finclude/crypto/akcipher.h crypto_free_akcipher
-!Finclude/crypto/akcipher.h crypto_akcipher_set_pub_key
-!Finclude/crypto/akcipher.h crypto_akcipher_set_priv_key
- </sect1>
- <sect1><title>Asymmetric Cipher Request Handle</title>
-!Finclude/crypto/akcipher.h akcipher_request_alloc
-!Finclude/crypto/akcipher.h akcipher_request_free
-!Finclude/crypto/akcipher.h akcipher_request_set_callback
-!Finclude/crypto/akcipher.h akcipher_request_set_crypt
-!Finclude/crypto/akcipher.h crypto_akcipher_maxsize
-!Finclude/crypto/akcipher.h crypto_akcipher_encrypt
-!Finclude/crypto/akcipher.h crypto_akcipher_decrypt
-!Finclude/crypto/akcipher.h crypto_akcipher_sign
-!Finclude/crypto/akcipher.h crypto_akcipher_verify
- </sect1>
- </chapter>
-
- <chapter id="Code"><title>Code Examples</title>
- <sect1><title>Code Example For Symmetric Key Cipher Operation</title>
- <programlisting>
-
-struct tcrypt_result {
- struct completion completion;
- int err;
-};
-
-/* tie all data structures together */
-struct skcipher_def {
- struct scatterlist sg;
- struct crypto_skcipher *tfm;
- struct skcipher_request *req;
- struct tcrypt_result result;
-};
-
-/* Callback function */
-static void test_skcipher_cb(struct crypto_async_request *req, int error)
-{
- struct tcrypt_result *result = req-&gt;data;
-
- if (error == -EINPROGRESS)
- return;
- result-&gt;err = error;
- complete(&amp;result-&gt;completion);
- pr_info("Encryption finished successfully\n");
-}
-
-/* Perform cipher operation */
-static unsigned int test_skcipher_encdec(struct skcipher_def *sk,
- int enc)
-{
- int rc = 0;
-
- if (enc)
- rc = crypto_skcipher_encrypt(sk-&gt;req);
- else
- rc = crypto_skcipher_decrypt(sk-&gt;req);
-
- switch (rc) {
- case 0:
- break;
- case -EINPROGRESS:
- case -EBUSY:
- rc = wait_for_completion_interruptible(
- &amp;sk-&gt;result.completion);
- if (!rc &amp;&amp; !sk-&gt;result.err) {
- reinit_completion(&amp;sk-&gt;result.completion);
- break;
- }
- default:
- pr_info("skcipher encrypt returned with %d result %d\n",
- rc, sk-&gt;result.err);
- break;
- }
- init_completion(&amp;sk-&gt;result.completion);
-
- return rc;
-}
-
-/* Initialize and trigger cipher operation */
-static int test_skcipher(void)
-{
- struct skcipher_def sk;
- struct crypto_skcipher *skcipher = NULL;
- struct skcipher_request *req = NULL;
- char *scratchpad = NULL;
- char *ivdata = NULL;
- unsigned char key[32];
- int ret = -EFAULT;
-
- skcipher = crypto_alloc_skcipher("cbc-aes-aesni", 0, 0);
- if (IS_ERR(skcipher)) {
- pr_info("could not allocate skcipher handle\n");
- return PTR_ERR(skcipher);
- }
-
- req = skcipher_request_alloc(skcipher, GFP_KERNEL);
- if (!req) {
- pr_info("could not allocate skcipher request\n");
- ret = -ENOMEM;
- goto out;
- }
-
- skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- test_skcipher_cb,
- &amp;sk.result);
-
- /* AES 256 with random key */
- get_random_bytes(&amp;key, 32);
- if (crypto_skcipher_setkey(skcipher, key, 32)) {
- pr_info("key could not be set\n");
- ret = -EAGAIN;
- goto out;
- }
-
- /* IV will be random */
- ivdata = kmalloc(16, GFP_KERNEL);
- if (!ivdata) {
- pr_info("could not allocate ivdata\n");
- goto out;
- }
- get_random_bytes(ivdata, 16);
-
- /* Input data will be random */
- scratchpad = kmalloc(16, GFP_KERNEL);
- if (!scratchpad) {
- pr_info("could not allocate scratchpad\n");
- goto out;
- }
- get_random_bytes(scratchpad, 16);
-
- sk.tfm = skcipher;
- sk.req = req;
-
- /* We encrypt one block */
- sg_init_one(&amp;sk.sg, scratchpad, 16);
- skcipher_request_set_crypt(req, &amp;sk.sg, &amp;sk.sg, 16, ivdata);
- init_completion(&amp;sk.result.completion);
-
- /* encrypt data */
- ret = test_skcipher_encdec(&amp;sk, 1);
- if (ret)
- goto out;
-
- pr_info("Encryption triggered successfully\n");
-
-out:
- if (skcipher)
- crypto_free_skcipher(skcipher);
- if (req)
- skcipher_request_free(req);
- if (ivdata)
- kfree(ivdata);
- if (scratchpad)
- kfree(scratchpad);
- return ret;
-}
- </programlisting>
- </sect1>
-
- <sect1><title>Code Example For Use of Operational State Memory With SHASH</title>
- <programlisting>
-
-struct sdesc {
- struct shash_desc shash;
- char ctx[];
-};
-
-static struct sdescinit_sdesc(struct crypto_shash *alg)
-{
- struct sdescsdesc;
- int size;
-
- size = sizeof(struct shash_desc) + crypto_shash_descsize(alg);
- sdesc = kmalloc(size, GFP_KERNEL);
- if (!sdesc)
- return ERR_PTR(-ENOMEM);
- sdesc-&gt;shash.tfm = alg;
- sdesc-&gt;shash.flags = 0x0;
- return sdesc;
-}
-
-static int calc_hash(struct crypto_shashalg,
- const unsigned chardata, unsigned int datalen,
- unsigned chardigest) {
- struct sdescsdesc;
- int ret;
-
- sdesc = init_sdesc(alg);
- if (IS_ERR(sdesc)) {
- pr_info("trusted_key: can't alloc %s\n", hash_alg);
- return PTR_ERR(sdesc);
- }
-
- ret = crypto_shash_digest(&amp;sdesc-&gt;shash, data, datalen, digest);
- kfree(sdesc);
- return ret;
-}
- </programlisting>
- </sect1>
-
- <sect1><title>Code Example For Random Number Generator Usage</title>
- <programlisting>
-
-static int get_random_numbers(u8 *buf, unsigned int len)
-{
- struct crypto_rngrng = NULL;
- chardrbg = "drbg_nopr_sha256"; /* Hash DRBG with SHA-256, no PR */
- int ret;
-
- if (!buf || !len) {
- pr_debug("No output buffer provided\n");
- return -EINVAL;
- }
-
- rng = crypto_alloc_rng(drbg, 0, 0);
- if (IS_ERR(rng)) {
- pr_debug("could not allocate RNG handle for %s\n", drbg);
- return -PTR_ERR(rng);
- }
-
- ret = crypto_rng_get_bytes(rng, buf, len);
- if (ret &lt; 0)
- pr_debug("generation of random numbers failed\n");
- else if (ret == 0)
- pr_debug("RNG returned no data");
- else
- pr_debug("RNG returned %d bytes of data\n", ret);
-
-out:
- crypto_free_rng(rng);
- return ret;
-}
- </programlisting>
- </sect1>
- </chapter>
- </book>
diff --git a/Documentation/crypto/api-aead.rst b/Documentation/crypto/api-aead.rst
new file mode 100644
index 000000000000..d15256f1ae36
--- /dev/null
+++ b/Documentation/crypto/api-aead.rst
@@ -0,0 +1,23 @@
+Authenticated Encryption With Associated Data (AEAD) Algorithm Definitions
+--------------------------------------------------------------------------
+
+.. kernel-doc:: include/crypto/aead.h
+ :doc: Authenticated Encryption With Associated Data (AEAD) Cipher API
+
+.. kernel-doc:: include/crypto/aead.h
+ :functions: aead_request aead_alg
+
+Authenticated Encryption With Associated Data (AEAD) Cipher API
+---------------------------------------------------------------
+
+.. kernel-doc:: include/crypto/aead.h
+ :functions: crypto_alloc_aead crypto_free_aead crypto_aead_ivsize crypto_aead_authsize crypto_aead_blocksize crypto_aead_setkey crypto_aead_setauthsize crypto_aead_encrypt crypto_aead_decrypt
+
+Asynchronous AEAD Request Handle
+--------------------------------
+
+.. kernel-doc:: include/crypto/aead.h
+ :doc: Asynchronous AEAD Request Handle
+
+.. kernel-doc:: include/crypto/aead.h
+ :functions: crypto_aead_reqsize aead_request_set_tfm aead_request_alloc aead_request_free aead_request_set_callback aead_request_set_crypt aead_request_set_ad
diff --git a/Documentation/crypto/api-akcipher.rst b/Documentation/crypto/api-akcipher.rst
new file mode 100644
index 000000000000..40aa8746e2a1
--- /dev/null
+++ b/Documentation/crypto/api-akcipher.rst
@@ -0,0 +1,20 @@
+Asymmetric Cipher Algorithm Definitions
+---------------------------------------
+
+.. kernel-doc:: include/crypto/akcipher.h
+ :functions: akcipher_alg akcipher_request
+
+Asymmetric Cipher API
+---------------------
+
+.. kernel-doc:: include/crypto/akcipher.h
+ :doc: Generic Public Key API
+
+.. kernel-doc:: include/crypto/akcipher.h
+ :functions: crypto_alloc_akcipher crypto_free_akcipher crypto_akcipher_set_pub_key crypto_akcipher_set_priv_key crypto_akcipher_maxsize crypto_akcipher_encrypt crypto_akcipher_decrypt crypto_akcipher_sign crypto_akcipher_verify
+
+Asymmetric Cipher Request Handle
+--------------------------------
+
+.. kernel-doc:: include/crypto/akcipher.h
+ :functions: akcipher_request_alloc akcipher_request_free akcipher_request_set_callback akcipher_request_set_crypt
diff --git a/Documentation/crypto/api-digest.rst b/Documentation/crypto/api-digest.rst
new file mode 100644
index 000000000000..07356fa99200
--- /dev/null
+++ b/Documentation/crypto/api-digest.rst
@@ -0,0 +1,35 @@
+Message Digest Algorithm Definitions
+------------------------------------
+
+.. kernel-doc:: include/crypto/hash.h
+ :doc: Message Digest Algorithm Definitions
+
+.. kernel-doc:: include/crypto/hash.h
+ :functions: hash_alg_common ahash_alg shash_alg
+
+Asynchronous Message Digest API
+-------------------------------
+
+.. kernel-doc:: include/crypto/hash.h
+ :doc: Asynchronous Message Digest API
+
+.. kernel-doc:: include/crypto/hash.h
+ :functions: crypto_alloc_ahash crypto_free_ahash crypto_ahash_init crypto_ahash_digestsize crypto_ahash_reqtfm crypto_ahash_reqsize crypto_ahash_setkey crypto_ahash_finup crypto_ahash_final crypto_ahash_digest crypto_ahash_export crypto_ahash_import
+
+Asynchronous Hash Request Handle
+--------------------------------
+
+.. kernel-doc:: include/crypto/hash.h
+ :doc: Asynchronous Hash Request Handle
+
+.. kernel-doc:: include/crypto/hash.h
+ :functions: ahash_request_set_tfm ahash_request_alloc ahash_request_free ahash_request_set_callback ahash_request_set_crypt
+
+Synchronous Message Digest API
+------------------------------
+
+.. kernel-doc:: include/crypto/hash.h
+ :doc: Synchronous Message Digest API
+
+.. kernel-doc:: include/crypto/hash.h
+ :functions: crypto_alloc_shash crypto_free_shash crypto_shash_blocksize crypto_shash_digestsize crypto_shash_descsize crypto_shash_setkey crypto_shash_digest crypto_shash_export crypto_shash_import crypto_shash_init crypto_shash_update crypto_shash_final crypto_shash_finup
diff --git a/Documentation/crypto/api-kpp.rst b/Documentation/crypto/api-kpp.rst
new file mode 100644
index 000000000000..7d86ab906bdf
--- /dev/null
+++ b/Documentation/crypto/api-kpp.rst
@@ -0,0 +1,38 @@
+Key-agreement Protocol Primitives (KPP) Cipher Algorithm Definitions
+--------------------------------------------------------------------
+
+.. kernel-doc:: include/crypto/kpp.h
+ :functions: kpp_request crypto_kpp kpp_alg kpp_secret
+
+Key-agreement Protocol Primitives (KPP) Cipher API
+--------------------------------------------------
+
+.. kernel-doc:: include/crypto/kpp.h
+ :doc: Generic Key-agreement Protocol Primitives API
+
+.. kernel-doc:: include/crypto/kpp.h
+ :functions: crypto_alloc_kpp crypto_free_kpp crypto_kpp_set_secret crypto_kpp_generate_public_key crypto_kpp_compute_shared_secret crypto_kpp_maxsize
+
+Key-agreement Protocol Primitives (KPP) Cipher Request Handle
+-------------------------------------------------------------
+
+.. kernel-doc:: include/crypto/kpp.h
+ :functions: kpp_request_alloc kpp_request_free kpp_request_set_callback kpp_request_set_input kpp_request_set_output
+
+ECDH Helper Functions
+---------------------
+
+.. kernel-doc:: include/crypto/ecdh.h
+ :doc: ECDH Helper Functions
+
+.. kernel-doc:: include/crypto/ecdh.h
+ :functions: ecdh crypto_ecdh_key_len crypto_ecdh_encode_key crypto_ecdh_decode_key
+
+DH Helper Functions
+-------------------
+
+.. kernel-doc:: include/crypto/dh.h
+ :doc: DH Helper Functions
+
+.. kernel-doc:: include/crypto/dh.h
+ :functions: dh crypto_dh_key_len crypto_dh_encode_key crypto_dh_decode_key
diff --git a/Documentation/crypto/api-rng.rst b/Documentation/crypto/api-rng.rst
new file mode 100644
index 000000000000..10ba7436cee4
--- /dev/null
+++ b/Documentation/crypto/api-rng.rst
@@ -0,0 +1,14 @@
+Random Number Algorithm Definitions
+-----------------------------------
+
+.. kernel-doc:: include/crypto/rng.h
+ :functions: rng_alg
+
+Crypto API Random Number API
+----------------------------
+
+.. kernel-doc:: include/crypto/rng.h
+ :doc: Random number generator API
+
+.. kernel-doc:: include/crypto/rng.h
+ :functions: crypto_alloc_rng crypto_rng_alg crypto_free_rng crypto_rng_generate crypto_rng_get_bytes crypto_rng_reset crypto_rng_seedsize
diff --git a/Documentation/crypto/api-samples.rst b/Documentation/crypto/api-samples.rst
new file mode 100644
index 000000000000..0a10819f6107
--- /dev/null
+++ b/Documentation/crypto/api-samples.rst
@@ -0,0 +1,224 @@
+Code Examples
+=============
+
+Code Example For Symmetric Key Cipher Operation
+-----------------------------------------------
+
+::
+
+
+ struct tcrypt_result {
+ struct completion completion;
+ int err;
+ };
+
+ /* tie all data structures together */
+ struct skcipher_def {
+ struct scatterlist sg;
+ struct crypto_skcipher *tfm;
+ struct skcipher_request *req;
+ struct tcrypt_result result;
+ };
+
+ /* Callback function */
+ static void test_skcipher_cb(struct crypto_async_request *req, int error)
+ {
+ struct tcrypt_result *result = req->data;
+
+ if (error == -EINPROGRESS)
+ return;
+ result->err = error;
+ complete(&result->completion);
+ pr_info("Encryption finished successfully\n");
+ }
+
+ /* Perform cipher operation */
+ static unsigned int test_skcipher_encdec(struct skcipher_def *sk,
+ int enc)
+ {
+ int rc = 0;
+
+ if (enc)
+ rc = crypto_skcipher_encrypt(sk->req);
+ else
+ rc = crypto_skcipher_decrypt(sk->req);
+
+ switch (rc) {
+ case 0:
+ break;
+ case -EINPROGRESS:
+ case -EBUSY:
+ rc = wait_for_completion_interruptible(
+ &sk->result.completion);
+ if (!rc && !sk->result.err) {
+ reinit_completion(&sk->result.completion);
+ break;
+ }
+ default:
+ pr_info("skcipher encrypt returned with %d result %d\n",
+ rc, sk->result.err);
+ break;
+ }
+ init_completion(&sk->result.completion);
+
+ return rc;
+ }
+
+ /* Initialize and trigger cipher operation */
+ static int test_skcipher(void)
+ {
+ struct skcipher_def sk;
+ struct crypto_skcipher *skcipher = NULL;
+ struct skcipher_request *req = NULL;
+ char *scratchpad = NULL;
+ char *ivdata = NULL;
+ unsigned char key[32];
+ int ret = -EFAULT;
+
+ skcipher = crypto_alloc_skcipher("cbc-aes-aesni", 0, 0);
+ if (IS_ERR(skcipher)) {
+ pr_info("could not allocate skcipher handle\n");
+ return PTR_ERR(skcipher);
+ }
+
+ req = skcipher_request_alloc(skcipher, GFP_KERNEL);
+ if (!req) {
+ pr_info("could not allocate skcipher request\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ test_skcipher_cb,
+ &sk.result);
+
+ /* AES 256 with random key */
+ get_random_bytes(&key, 32);
+ if (crypto_skcipher_setkey(skcipher, key, 32)) {
+ pr_info("key could not be set\n");
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ /* IV will be random */
+ ivdata = kmalloc(16, GFP_KERNEL);
+ if (!ivdata) {
+ pr_info("could not allocate ivdata\n");
+ goto out;
+ }
+ get_random_bytes(ivdata, 16);
+
+ /* Input data will be random */
+ scratchpad = kmalloc(16, GFP_KERNEL);
+ if (!scratchpad) {
+ pr_info("could not allocate scratchpad\n");
+ goto out;
+ }
+ get_random_bytes(scratchpad, 16);
+
+ sk.tfm = skcipher;
+ sk.req = req;
+
+ /* We encrypt one block */
+ sg_init_one(&sk.sg, scratchpad, 16);
+ skcipher_request_set_crypt(req, &sk.sg, &sk.sg, 16, ivdata);
+ init_completion(&sk.result.completion);
+
+ /* encrypt data */
+ ret = test_skcipher_encdec(&sk, 1);
+ if (ret)
+ goto out;
+
+ pr_info("Encryption triggered successfully\n");
+
+ out:
+ if (skcipher)
+ crypto_free_skcipher(skcipher);
+ if (req)
+ skcipher_request_free(req);
+ if (ivdata)
+ kfree(ivdata);
+ if (scratchpad)
+ kfree(scratchpad);
+ return ret;
+ }
+
+
+Code Example For Use of Operational State Memory With SHASH
+-----------------------------------------------------------
+
+::
+
+
+ struct sdesc {
+ struct shash_desc shash;
+ char ctx[];
+ };
+
+ static struct sdescinit_sdesc(struct crypto_shash *alg)
+ {
+ struct sdescsdesc;
+ int size;
+
+ size = sizeof(struct shash_desc) + crypto_shash_descsize(alg);
+ sdesc = kmalloc(size, GFP_KERNEL);
+ if (!sdesc)
+ return ERR_PTR(-ENOMEM);
+ sdesc->shash.tfm = alg;
+ sdesc->shash.flags = 0x0;
+ return sdesc;
+ }
+
+ static int calc_hash(struct crypto_shashalg,
+ const unsigned chardata, unsigned int datalen,
+ unsigned chardigest) {
+ struct sdescsdesc;
+ int ret;
+
+ sdesc = init_sdesc(alg);
+ if (IS_ERR(sdesc)) {
+ pr_info("trusted_key: can't alloc %s\n", hash_alg);
+ return PTR_ERR(sdesc);
+ }
+
+ ret = crypto_shash_digest(&sdesc->shash, data, datalen, digest);
+ kfree(sdesc);
+ return ret;
+ }
+
+
+Code Example For Random Number Generator Usage
+----------------------------------------------
+
+::
+
+
+ static int get_random_numbers(u8 *buf, unsigned int len)
+ {
+ struct crypto_rngrng = NULL;
+ chardrbg = "drbg_nopr_sha256"; /* Hash DRBG with SHA-256, no PR */
+ int ret;
+
+ if (!buf || !len) {
+ pr_debug("No output buffer provided\n");
+ return -EINVAL;
+ }
+
+ rng = crypto_alloc_rng(drbg, 0, 0);
+ if (IS_ERR(rng)) {
+ pr_debug("could not allocate RNG handle for %s\n", drbg);
+ return -PTR_ERR(rng);
+ }
+
+ ret = crypto_rng_get_bytes(rng, buf, len);
+ if (ret < 0)
+ pr_debug("generation of random numbers failed\n");
+ else if (ret == 0)
+ pr_debug("RNG returned no data");
+ else
+ pr_debug("RNG returned %d bytes of data\n", ret);
+
+ out:
+ crypto_free_rng(rng);
+ return ret;
+ }
diff --git a/Documentation/crypto/api-skcipher.rst b/Documentation/crypto/api-skcipher.rst
new file mode 100644
index 000000000000..b20028a361a9
--- /dev/null
+++ b/Documentation/crypto/api-skcipher.rst
@@ -0,0 +1,62 @@
+Block Cipher Algorithm Definitions
+----------------------------------
+
+.. kernel-doc:: include/linux/crypto.h
+ :doc: Block Cipher Algorithm Definitions
+
+.. kernel-doc:: include/linux/crypto.h
+ :functions: crypto_alg ablkcipher_alg blkcipher_alg cipher_alg
+
+Symmetric Key Cipher API
+------------------------
+
+.. kernel-doc:: include/crypto/skcipher.h
+ :doc: Symmetric Key Cipher API
+
+.. kernel-doc:: include/crypto/skcipher.h
+ :functions: crypto_alloc_skcipher crypto_free_skcipher crypto_has_skcipher crypto_skcipher_ivsize crypto_skcipher_blocksize crypto_skcipher_setkey crypto_skcipher_reqtfm crypto_skcipher_encrypt crypto_skcipher_decrypt
+
+Symmetric Key Cipher Request Handle
+-----------------------------------
+
+.. kernel-doc:: include/crypto/skcipher.h
+ :doc: Symmetric Key Cipher Request Handle
+
+.. kernel-doc:: include/crypto/skcipher.h
+ :functions: crypto_skcipher_reqsize skcipher_request_set_tfm skcipher_request_alloc skcipher_request_free skcipher_request_set_callback skcipher_request_set_crypt
+
+Single Block Cipher API
+-----------------------
+
+.. kernel-doc:: include/linux/crypto.h
+ :doc: Single Block Cipher API
+
+.. kernel-doc:: include/linux/crypto.h
+ :functions: crypto_alloc_cipher crypto_free_cipher crypto_has_cipher crypto_cipher_blocksize crypto_cipher_setkey crypto_cipher_encrypt_one crypto_cipher_decrypt_one
+
+Asynchronous Block Cipher API - Deprecated
+------------------------------------------
+
+.. kernel-doc:: include/linux/crypto.h
+ :doc: Asynchronous Block Cipher API
+
+.. kernel-doc:: include/linux/crypto.h
+ :functions: crypto_free_ablkcipher crypto_has_ablkcipher crypto_ablkcipher_ivsize crypto_ablkcipher_blocksize crypto_ablkcipher_setkey crypto_ablkcipher_reqtfm crypto_ablkcipher_encrypt crypto_ablkcipher_decrypt
+
+Asynchronous Cipher Request Handle - Deprecated
+-----------------------------------------------
+
+.. kernel-doc:: include/linux/crypto.h
+ :doc: Asynchronous Cipher Request Handle
+
+.. kernel-doc:: include/linux/crypto.h
+ :functions: crypto_ablkcipher_reqsize ablkcipher_request_set_tfm ablkcipher_request_alloc ablkcipher_request_free ablkcipher_request_set_callback ablkcipher_request_set_crypt
+
+Synchronous Block Cipher API - Deprecated
+-----------------------------------------
+
+.. kernel-doc:: include/linux/crypto.h
+ :doc: Synchronous Block Cipher API
+
+.. kernel-doc:: include/linux/crypto.h
+ :functions: crypto_alloc_blkcipher rypto_free_blkcipher crypto_has_blkcipher crypto_blkcipher_name crypto_blkcipher_ivsize crypto_blkcipher_blocksize crypto_blkcipher_setkey crypto_blkcipher_encrypt crypto_blkcipher_encrypt_iv crypto_blkcipher_decrypt crypto_blkcipher_decrypt_iv crypto_blkcipher_set_iv crypto_blkcipher_get_iv
diff --git a/Documentation/crypto/api.rst b/Documentation/crypto/api.rst
new file mode 100644
index 000000000000..2e519193ab4a
--- /dev/null
+++ b/Documentation/crypto/api.rst
@@ -0,0 +1,25 @@
+Programming Interface
+=====================
+
+Please note that the kernel crypto API contains the AEAD givcrypt API
+(crypto_aead_giv\* and aead_givcrypt\* function calls in
+include/crypto/aead.h). This API is obsolete and will be removed in the
+future. To obtain the functionality of an AEAD cipher with internal IV
+generation, use the IV generator as a regular cipher. For example,
+rfc4106(gcm(aes)) is the AEAD cipher with external IV generation and
+seqniv(rfc4106(gcm(aes))) implies that the kernel crypto API generates
+the IV. Different IV generators are available.
+
+.. class:: toc-title
+
+ Table of contents
+
+.. toctree::
+ :maxdepth: 2
+
+ api-skcipher
+ api-aead
+ api-digest
+ api-rng
+ api-akcipher
+ api-kpp
diff --git a/Documentation/crypto/architecture.rst b/Documentation/crypto/architecture.rst
new file mode 100644
index 000000000000..ca2d09b991f5
--- /dev/null
+++ b/Documentation/crypto/architecture.rst
@@ -0,0 +1,441 @@
+Kernel Crypto API Architecture
+==============================
+
+Cipher algorithm types
+----------------------
+
+The kernel crypto API provides different API calls for the following
+cipher types:
+
+- Symmetric ciphers
+
+- AEAD ciphers
+
+- Message digest, including keyed message digest
+
+- Random number generation
+
+- User space interface
+
+Ciphers And Templates
+---------------------
+
+The kernel crypto API provides implementations of single block ciphers
+and message digests. In addition, the kernel crypto API provides
+numerous "templates" that can be used in conjunction with the single
+block ciphers and message digests. Templates include all types of block
+chaining mode, the HMAC mechanism, etc.
+
+Single block ciphers and message digests can either be directly used by
+a caller or invoked together with a template to form multi-block ciphers
+or keyed message digests.
+
+A single block cipher may even be called with multiple templates.
+However, templates cannot be used without a single cipher.
+
+See /proc/crypto and search for "name". For example:
+
+- aes
+
+- ecb(aes)
+
+- cmac(aes)
+
+- ccm(aes)
+
+- rfc4106(gcm(aes))
+
+- sha1
+
+- hmac(sha1)
+
+- authenc(hmac(sha1),cbc(aes))
+
+In these examples, "aes" and "sha1" are the ciphers and all others are
+the templates.
+
+Synchronous And Asynchronous Operation
+--------------------------------------
+
+The kernel crypto API provides synchronous and asynchronous API
+operations.
+
+When using the synchronous API operation, the caller invokes a cipher
+operation which is performed synchronously by the kernel crypto API.
+That means, the caller waits until the cipher operation completes.
+Therefore, the kernel crypto API calls work like regular function calls.
+For synchronous operation, the set of API calls is small and
+conceptually similar to any other crypto library.
+
+Asynchronous operation is provided by the kernel crypto API which
+implies that the invocation of a cipher operation will complete almost
+instantly. That invocation triggers the cipher operation but it does not
+signal its completion. Before invoking a cipher operation, the caller
+must provide a callback function the kernel crypto API can invoke to
+signal the completion of the cipher operation. Furthermore, the caller
+must ensure it can handle such asynchronous events by applying
+appropriate locking around its data. The kernel crypto API does not
+perform any special serialization operation to protect the caller's data
+integrity.
+
+Crypto API Cipher References And Priority
+-----------------------------------------
+
+A cipher is referenced by the caller with a string. That string has the
+following semantics:
+
+::
+
+ template(single block cipher)
+
+
+where "template" and "single block cipher" is the aforementioned
+template and single block cipher, respectively. If applicable,
+additional templates may enclose other templates, such as
+
+::
+
+ template1(template2(single block cipher)))
+
+
+The kernel crypto API may provide multiple implementations of a template
+or a single block cipher. For example, AES on newer Intel hardware has
+the following implementations: AES-NI, assembler implementation, or
+straight C. Now, when using the string "aes" with the kernel crypto API,
+which cipher implementation is used? The answer to that question is the
+priority number assigned to each cipher implementation by the kernel
+crypto API. When a caller uses the string to refer to a cipher during
+initialization of a cipher handle, the kernel crypto API looks up all
+implementations providing an implementation with that name and selects
+the implementation with the highest priority.
+
+Now, a caller may have the need to refer to a specific cipher
+implementation and thus does not want to rely on the priority-based
+selection. To accommodate this scenario, the kernel crypto API allows
+the cipher implementation to register a unique name in addition to
+common names. When using that unique name, a caller is therefore always
+sure to refer to the intended cipher implementation.
+
+The list of available ciphers is given in /proc/crypto. However, that
+list does not specify all possible permutations of templates and
+ciphers. Each block listed in /proc/crypto may contain the following
+information -- if one of the components listed as follows are not
+applicable to a cipher, it is not displayed:
+
+- name: the generic name of the cipher that is subject to the
+ priority-based selection -- this name can be used by the cipher
+ allocation API calls (all names listed above are examples for such
+ generic names)
+
+- driver: the unique name of the cipher -- this name can be used by the
+ cipher allocation API calls
+
+- module: the kernel module providing the cipher implementation (or
+ "kernel" for statically linked ciphers)
+
+- priority: the priority value of the cipher implementation
+
+- refcnt: the reference count of the respective cipher (i.e. the number
+ of current consumers of this cipher)
+
+- selftest: specification whether the self test for the cipher passed
+
+- type:
+
+ - skcipher for symmetric key ciphers
+
+ - cipher for single block ciphers that may be used with an
+ additional template
+
+ - shash for synchronous message digest
+
+ - ahash for asynchronous message digest
+
+ - aead for AEAD cipher type
+
+ - compression for compression type transformations
+
+ - rng for random number generator
+
+ - givcipher for cipher with associated IV generator (see the geniv
+ entry below for the specification of the IV generator type used by
+ the cipher implementation)
+
+ - kpp for a Key-agreement Protocol Primitive (KPP) cipher such as
+ an ECDH or DH implementation
+
+- blocksize: blocksize of cipher in bytes
+
+- keysize: key size in bytes
+
+- ivsize: IV size in bytes
+
+- seedsize: required size of seed data for random number generator
+
+- digestsize: output size of the message digest
+
+- geniv: IV generation type:
+
+ - eseqiv for encrypted sequence number based IV generation
+
+ - seqiv for sequence number based IV generation
+
+ - chainiv for chain iv generation
+
+ - <builtin> is a marker that the cipher implements IV generation and
+ handling as it is specific to the given cipher
+
+Key Sizes
+---------
+
+When allocating a cipher handle, the caller only specifies the cipher
+type. Symmetric ciphers, however, typically support multiple key sizes
+(e.g. AES-128 vs. AES-192 vs. AES-256). These key sizes are determined
+with the length of the provided key. Thus, the kernel crypto API does
+not provide a separate way to select the particular symmetric cipher key
+size.
+
+Cipher Allocation Type And Masks
+--------------------------------
+
+The different cipher handle allocation functions allow the specification
+of a type and mask flag. Both parameters have the following meaning (and
+are therefore not covered in the subsequent sections).
+
+The type flag specifies the type of the cipher algorithm. The caller
+usually provides a 0 when the caller wants the default handling.
+Otherwise, the caller may provide the following selections which match
+the aforementioned cipher types:
+
+- CRYPTO_ALG_TYPE_CIPHER Single block cipher
+
+- CRYPTO_ALG_TYPE_COMPRESS Compression
+
+- CRYPTO_ALG_TYPE_AEAD Authenticated Encryption with Associated Data
+ (MAC)
+
+- CRYPTO_ALG_TYPE_BLKCIPHER Synchronous multi-block cipher
+
+- CRYPTO_ALG_TYPE_ABLKCIPHER Asynchronous multi-block cipher
+
+- CRYPTO_ALG_TYPE_GIVCIPHER Asynchronous multi-block cipher packed
+ together with an IV generator (see geniv field in the /proc/crypto
+ listing for the known IV generators)
+
+- CRYPTO_ALG_TYPE_KPP Key-agreement Protocol Primitive (KPP) such as
+ an ECDH or DH implementation
+
+- CRYPTO_ALG_TYPE_DIGEST Raw message digest
+
+- CRYPTO_ALG_TYPE_HASH Alias for CRYPTO_ALG_TYPE_DIGEST
+
+- CRYPTO_ALG_TYPE_SHASH Synchronous multi-block hash
+
+- CRYPTO_ALG_TYPE_AHASH Asynchronous multi-block hash
+
+- CRYPTO_ALG_TYPE_RNG Random Number Generation
+
+- CRYPTO_ALG_TYPE_AKCIPHER Asymmetric cipher
+
+- CRYPTO_ALG_TYPE_PCOMPRESS Enhanced version of
+ CRYPTO_ALG_TYPE_COMPRESS allowing for segmented compression /
+ decompression instead of performing the operation on one segment
+ only. CRYPTO_ALG_TYPE_PCOMPRESS is intended to replace
+ CRYPTO_ALG_TYPE_COMPRESS once existing consumers are converted.
+
+The mask flag restricts the type of cipher. The only allowed flag is
+CRYPTO_ALG_ASYNC to restrict the cipher lookup function to
+asynchronous ciphers. Usually, a caller provides a 0 for the mask flag.
+
+When the caller provides a mask and type specification, the caller
+limits the search the kernel crypto API can perform for a suitable
+cipher implementation for the given cipher name. That means, even when a
+caller uses a cipher name that exists during its initialization call,
+the kernel crypto API may not select it due to the used type and mask
+field.
+
+Internal Structure of Kernel Crypto API
+---------------------------------------
+
+The kernel crypto API has an internal structure where a cipher
+implementation may use many layers and indirections. This section shall
+help to clarify how the kernel crypto API uses various components to
+implement the complete cipher.
+
+The following subsections explain the internal structure based on
+existing cipher implementations. The first section addresses the most
+complex scenario where all other scenarios form a logical subset.
+
+Generic AEAD Cipher Structure
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The following ASCII art decomposes the kernel crypto API layers when
+using the AEAD cipher with the automated IV generation. The shown
+example is used by the IPSEC layer.
+
+For other use cases of AEAD ciphers, the ASCII art applies as well, but
+the caller may not use the AEAD cipher with a separate IV generator. In
+this case, the caller must generate the IV.
+
+The depicted example decomposes the AEAD cipher of GCM(AES) based on the
+generic C implementations (gcm.c, aes-generic.c, ctr.c, ghash-generic.c,
+seqiv.c). The generic implementation serves as an example showing the
+complete logic of the kernel crypto API.
+
+It is possible that some streamlined cipher implementations (like
+AES-NI) provide implementations merging aspects which in the view of the
+kernel crypto API cannot be decomposed into layers any more. In case of
+the AES-NI implementation, the CTR mode, the GHASH implementation and
+the AES cipher are all merged into one cipher implementation registered
+with the kernel crypto API. In this case, the concept described by the
+following ASCII art applies too. However, the decomposition of GCM into
+the individual sub-components by the kernel crypto API is not done any
+more.
+
+Each block in the following ASCII art is an independent cipher instance
+obtained from the kernel crypto API. Each block is accessed by the
+caller or by other blocks using the API functions defined by the kernel
+crypto API for the cipher implementation type.
+
+The blocks below indicate the cipher type as well as the specific logic
+implemented in the cipher.
+
+The ASCII art picture also indicates the call structure, i.e. who calls
+which component. The arrows point to the invoked block where the caller
+uses the API applicable to the cipher type specified for the block.
+
+::
+
+
+ kernel crypto API | IPSEC Layer
+ |
+ +-----------+ |
+ | | (1)
+ | aead | <----------------------------------- esp_output
+ | (seqiv) | ---+
+ +-----------+ |
+ | (2)
+ +-----------+ |
+ | | <--+ (2)
+ | aead | <----------------------------------- esp_input
+ | (gcm) | ------------+
+ +-----------+ |
+ | (3) | (5)
+ v v
+ +-----------+ +-----------+
+ | | | |
+ | skcipher | | ahash |
+ | (ctr) | ---+ | (ghash) |
+ +-----------+ | +-----------+
+ |
+ +-----------+ | (4)
+ | | <--+
+ | cipher |
+ | (aes) |
+ +-----------+
+
+
+
+The following call sequence is applicable when the IPSEC layer triggers
+an encryption operation with the esp_output function. During
+configuration, the administrator set up the use of rfc4106(gcm(aes)) as
+the cipher for ESP. The following call sequence is now depicted in the
+ASCII art above:
+
+1. esp_output() invokes crypto_aead_encrypt() to trigger an
+ encryption operation of the AEAD cipher with IV generator.
+
+ In case of GCM, the SEQIV implementation is registered as GIVCIPHER
+ in crypto_rfc4106_alloc().
+
+ The SEQIV performs its operation to generate an IV where the core
+ function is seqiv_geniv().
+
+2. Now, SEQIV uses the AEAD API function calls to invoke the associated
+ AEAD cipher. In our case, during the instantiation of SEQIV, the
+ cipher handle for GCM is provided to SEQIV. This means that SEQIV
+ invokes AEAD cipher operations with the GCM cipher handle.
+
+ During instantiation of the GCM handle, the CTR(AES) and GHASH
+ ciphers are instantiated. The cipher handles for CTR(AES) and GHASH
+ are retained for later use.
+
+ The GCM implementation is responsible to invoke the CTR mode AES and
+ the GHASH cipher in the right manner to implement the GCM
+ specification.
+
+3. The GCM AEAD cipher type implementation now invokes the SKCIPHER API
+ with the instantiated CTR(AES) cipher handle.
+
+ During instantiation of the CTR(AES) cipher, the CIPHER type
+ implementation of AES is instantiated. The cipher handle for AES is
+ retained.
+
+ That means that the SKCIPHER implementation of CTR(AES) only
+ implements the CTR block chaining mode. After performing the block
+ chaining operation, the CIPHER implementation of AES is invoked.
+
+4. The SKCIPHER of CTR(AES) now invokes the CIPHER API with the AES
+ cipher handle to encrypt one block.
+
+5. The GCM AEAD implementation also invokes the GHASH cipher
+ implementation via the AHASH API.
+
+When the IPSEC layer triggers the esp_input() function, the same call
+sequence is followed with the only difference that the operation starts
+with step (2).
+
+Generic Block Cipher Structure
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Generic block ciphers follow the same concept as depicted with the ASCII
+art picture above.
+
+For example, CBC(AES) is implemented with cbc.c, and aes-generic.c. The
+ASCII art picture above applies as well with the difference that only
+step (4) is used and the SKCIPHER block chaining mode is CBC.
+
+Generic Keyed Message Digest Structure
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Keyed message digest implementations again follow the same concept as
+depicted in the ASCII art picture above.
+
+For example, HMAC(SHA256) is implemented with hmac.c and
+sha256_generic.c. The following ASCII art illustrates the
+implementation:
+
+::
+
+
+ kernel crypto API | Caller
+ |
+ +-----------+ (1) |
+ | | <------------------ some_function
+ | ahash |
+ | (hmac) | ---+
+ +-----------+ |
+ | (2)
+ +-----------+ |
+ | | <--+
+ | shash |
+ | (sha256) |
+ +-----------+
+
+
+
+The following call sequence is applicable when a caller triggers an HMAC
+operation:
+
+1. The AHASH API functions are invoked by the caller. The HMAC
+ implementation performs its operation as needed.
+
+ During initialization of the HMAC cipher, the SHASH cipher type of
+ SHA256 is instantiated. The cipher handle for the SHA256 instance is
+ retained.
+
+ At one time, the HMAC implementation requires a SHA256 operation
+ where the SHA256 cipher handle is used.
+
+2. The HMAC instance now invokes the SHASH API with the SHA256 cipher
+ handle to calculate the message digest.
diff --git a/Documentation/crypto/devel-algos.rst b/Documentation/crypto/devel-algos.rst
new file mode 100644
index 000000000000..66f50d32dcec
--- /dev/null
+++ b/Documentation/crypto/devel-algos.rst
@@ -0,0 +1,247 @@
+Developing Cipher Algorithms
+============================
+
+Registering And Unregistering Transformation
+--------------------------------------------
+
+There are three distinct types of registration functions in the Crypto
+API. One is used to register a generic cryptographic transformation,
+while the other two are specific to HASH transformations and
+COMPRESSion. We will discuss the latter two in a separate chapter, here
+we will only look at the generic ones.
+
+Before discussing the register functions, the data structure to be
+filled with each, struct crypto_alg, must be considered -- see below
+for a description of this data structure.
+
+The generic registration functions can be found in
+include/linux/crypto.h and their definition can be seen below. The
+former function registers a single transformation, while the latter
+works on an array of transformation descriptions. The latter is useful
+when registering transformations in bulk, for example when a driver
+implements multiple transformations.
+
+::
+
+ int crypto_register_alg(struct crypto_alg *alg);
+ int crypto_register_algs(struct crypto_alg *algs, int count);
+
+
+The counterparts to those functions are listed below.
+
+::
+
+ int crypto_unregister_alg(struct crypto_alg *alg);
+ int crypto_unregister_algs(struct crypto_alg *algs, int count);
+
+
+Notice that both registration and unregistration functions do return a
+value, so make sure to handle errors. A return code of zero implies
+success. Any return code < 0 implies an error.
+
+The bulk registration/unregistration functions register/unregister each
+transformation in the given array of length count. They handle errors as
+follows:
+
+- crypto_register_algs() succeeds if and only if it successfully
+ registers all the given transformations. If an error occurs partway
+ through, then it rolls back successful registrations before returning
+ the error code. Note that if a driver needs to handle registration
+ errors for individual transformations, then it will need to use the
+ non-bulk function crypto_register_alg() instead.
+
+- crypto_unregister_algs() tries to unregister all the given
+ transformations, continuing on error. It logs errors and always
+ returns zero.
+
+Single-Block Symmetric Ciphers [CIPHER]
+---------------------------------------
+
+Example of transformations: aes, arc4, ...
+
+This section describes the simplest of all transformation
+implementations, that being the CIPHER type used for symmetric ciphers.
+The CIPHER type is used for transformations which operate on exactly one
+block at a time and there are no dependencies between blocks at all.
+
+Registration specifics
+~~~~~~~~~~~~~~~~~~~~~~
+
+The registration of [CIPHER] algorithm is specific in that struct
+crypto_alg field .cra_type is empty. The .cra_u.cipher has to be
+filled in with proper callbacks to implement this transformation.
+
+See struct cipher_alg below.
+
+Cipher Definition With struct cipher_alg
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Struct cipher_alg defines a single block cipher.
+
+Here are schematics of how these functions are called when operated from
+other part of the kernel. Note that the .cia_setkey() call might happen
+before or after any of these schematics happen, but must not happen
+during any of these are in-flight.
+
+::
+
+ KEY ---. PLAINTEXT ---.
+ v v
+ .cia_setkey() -> .cia_encrypt()
+ |
+ '-----> CIPHERTEXT
+
+
+Please note that a pattern where .cia_setkey() is called multiple times
+is also valid:
+
+::
+
+
+ KEY1 --. PLAINTEXT1 --. KEY2 --. PLAINTEXT2 --.
+ v v v v
+ .cia_setkey() -> .cia_encrypt() -> .cia_setkey() -> .cia_encrypt()
+ | |
+ '---> CIPHERTEXT1 '---> CIPHERTEXT2
+
+
+Multi-Block Ciphers
+-------------------
+
+Example of transformations: cbc(aes), ecb(arc4), ...
+
+This section describes the multi-block cipher transformation
+implementations. The multi-block ciphers are used for transformations
+which operate on scatterlists of data supplied to the transformation
+functions. They output the result into a scatterlist of data as well.
+
+Registration Specifics
+~~~~~~~~~~~~~~~~~~~~~~
+
+The registration of multi-block cipher algorithms is one of the most
+standard procedures throughout the crypto API.
+
+Note, if a cipher implementation requires a proper alignment of data,
+the caller should use the functions of crypto_skcipher_alignmask() to
+identify a memory alignment mask. The kernel crypto API is able to
+process requests that are unaligned. This implies, however, additional
+overhead as the kernel crypto API needs to perform the realignment of
+the data which may imply moving of data.
+
+Cipher Definition With struct blkcipher_alg and ablkcipher_alg
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Struct blkcipher_alg defines a synchronous block cipher whereas struct
+ablkcipher_alg defines an asynchronous block cipher.
+
+Please refer to the single block cipher description for schematics of
+the block cipher usage.
+
+Specifics Of Asynchronous Multi-Block Cipher
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+There are a couple of specifics to the asynchronous interface.
+
+First of all, some of the drivers will want to use the Generic
+ScatterWalk in case the hardware needs to be fed separate chunks of the
+scatterlist which contains the plaintext and will contain the
+ciphertext. Please refer to the ScatterWalk interface offered by the
+Linux kernel scatter / gather list implementation.
+
+Hashing [HASH]
+--------------
+
+Example of transformations: crc32, md5, sha1, sha256,...
+
+Registering And Unregistering The Transformation
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+There are multiple ways to register a HASH transformation, depending on
+whether the transformation is synchronous [SHASH] or asynchronous
+[AHASH] and the amount of HASH transformations we are registering. You
+can find the prototypes defined in include/crypto/internal/hash.h:
+
+::
+
+ int crypto_register_ahash(struct ahash_alg *alg);
+
+ int crypto_register_shash(struct shash_alg *alg);
+ int crypto_register_shashes(struct shash_alg *algs, int count);
+
+
+The respective counterparts for unregistering the HASH transformation
+are as follows:
+
+::
+
+ int crypto_unregister_ahash(struct ahash_alg *alg);
+
+ int crypto_unregister_shash(struct shash_alg *alg);
+ int crypto_unregister_shashes(struct shash_alg *algs, int count);
+
+
+Cipher Definition With struct shash_alg and ahash_alg
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Here are schematics of how these functions are called when operated from
+other part of the kernel. Note that the .setkey() call might happen
+before or after any of these schematics happen, but must not happen
+during any of these are in-flight. Please note that calling .init()
+followed immediately by .finish() is also a perfectly valid
+transformation.
+
+::
+
+ I) DATA -----------.
+ v
+ .init() -> .update() -> .final() ! .update() might not be called
+ ^ | | at all in this scenario.
+ '----' '---> HASH
+
+ II) DATA -----------.-----------.
+ v v
+ .init() -> .update() -> .finup() ! .update() may not be called
+ ^ | | at all in this scenario.
+ '----' '---> HASH
+
+ III) DATA -----------.
+ v
+ .digest() ! The entire process is handled
+ | by the .digest() call.
+ '---------------> HASH
+
+
+Here is a schematic of how the .export()/.import() functions are called
+when used from another part of the kernel.
+
+::
+
+ KEY--. DATA--.
+ v v ! .update() may not be called
+ .setkey() -> .init() -> .update() -> .export() at all in this scenario.
+ ^ | |
+ '-----' '--> PARTIAL_HASH
+
+ ----------- other transformations happen here -----------
+
+ PARTIAL_HASH--. DATA1--.
+ v v
+ .import -> .update() -> .final() ! .update() may not be called
+ ^ | | at all in this scenario.
+ '----' '--> HASH1
+
+ PARTIAL_HASH--. DATA2-.
+ v v
+ .import -> .finup()
+ |
+ '---------------> HASH2
+
+
+Specifics Of Asynchronous HASH Transformation
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Some of the drivers will want to use the Generic ScatterWalk in case the
+implementation needs to be fed separate chunks of the scatterlist which
+contains the input data. The buffer containing the resulting hash will
+always be properly aligned to .cra_alignmask so there is no need to
+worry about this.
diff --git a/Documentation/crypto/index.rst b/Documentation/crypto/index.rst
new file mode 100644
index 000000000000..94c4786f2573
--- /dev/null
+++ b/Documentation/crypto/index.rst
@@ -0,0 +1,24 @@
+=======================
+Linux Kernel Crypto API
+=======================
+
+:Author: Stephan Mueller
+:Author: Marek Vasut
+
+This documentation outlines the Linux kernel crypto API with its
+concepts, details about developing cipher implementations, employment of the API
+for cryptographic use cases, as well as programming examples.
+
+.. class:: toc-title
+
+ Table of contents
+
+.. toctree::
+ :maxdepth: 2
+
+ intro
+ architecture
+ devel-algos
+ userspace-if
+ api
+ api-samples
diff --git a/Documentation/crypto/intro.rst b/Documentation/crypto/intro.rst
new file mode 100644
index 000000000000..9aa89ebbfba9
--- /dev/null
+++ b/Documentation/crypto/intro.rst
@@ -0,0 +1,74 @@
+Kernel Crypto API Interface Specification
+=========================================
+
+Introduction
+------------
+
+The kernel crypto API offers a rich set of cryptographic ciphers as well
+as other data transformation mechanisms and methods to invoke these.
+This document contains a description of the API and provides example
+code.
+
+To understand and properly use the kernel crypto API a brief explanation
+of its structure is given. Based on the architecture, the API can be
+separated into different components. Following the architecture
+specification, hints to developers of ciphers are provided. Pointers to
+the API function call documentation are given at the end.
+
+The kernel crypto API refers to all algorithms as "transformations".
+Therefore, a cipher handle variable usually has the name "tfm". Besides
+cryptographic operations, the kernel crypto API also knows compression
+transformations and handles them the same way as ciphers.
+
+The kernel crypto API serves the following entity types:
+
+- consumers requesting cryptographic services
+
+- data transformation implementations (typically ciphers) that can be
+ called by consumers using the kernel crypto API
+
+This specification is intended for consumers of the kernel crypto API as
+well as for developers implementing ciphers. This API specification,
+however, does not discuss all API calls available to data transformation
+implementations (i.e. implementations of ciphers and other
+transformations (such as CRC or even compression algorithms) that can
+register with the kernel crypto API).
+
+Note: The terms "transformation" and cipher algorithm are used
+interchangeably.
+
+Terminology
+-----------
+
+The transformation implementation is an actual code or interface to
+hardware which implements a certain transformation with precisely
+defined behavior.
+
+The transformation object (TFM) is an instance of a transformation
+implementation. There can be multiple transformation objects associated
+with a single transformation implementation. Each of those
+transformation objects is held by a crypto API consumer or another
+transformation. Transformation object is allocated when a crypto API
+consumer requests a transformation implementation. The consumer is then
+provided with a structure, which contains a transformation object (TFM).
+
+The structure that contains transformation objects may also be referred
+to as a "cipher handle". Such a cipher handle is always subject to the
+following phases that are reflected in the API calls applicable to such
+a cipher handle:
+
+1. Initialization of a cipher handle.
+
+2. Execution of all intended cipher operations applicable for the handle
+ where the cipher handle must be furnished to every API call.
+
+3. Destruction of a cipher handle.
+
+When using the initialization API calls, a cipher handle is created and
+returned to the consumer. Therefore, please refer to all initialization
+API calls that refer to the data structure type a consumer is expected
+to receive and subsequently to use. The initialization API calls have
+all the same naming conventions of crypto_alloc\*.
+
+The transformation context is private data associated with the
+transformation object.
diff --git a/Documentation/crypto/userspace-if.rst b/Documentation/crypto/userspace-if.rst
new file mode 100644
index 000000000000..de5a72e32bc9
--- /dev/null
+++ b/Documentation/crypto/userspace-if.rst
@@ -0,0 +1,387 @@
+User Space Interface
+====================
+
+Introduction
+------------
+
+The concepts of the kernel crypto API visible to kernel space is fully
+applicable to the user space interface as well. Therefore, the kernel
+crypto API high level discussion for the in-kernel use cases applies
+here as well.
+
+The major difference, however, is that user space can only act as a
+consumer and never as a provider of a transformation or cipher
+algorithm.
+
+The following covers the user space interface exported by the kernel
+crypto API. A working example of this description is libkcapi that can
+be obtained from [1]. That library can be used by user space
+applications that require cryptographic services from the kernel.
+
+Some details of the in-kernel kernel crypto API aspects do not apply to
+user space, however. This includes the difference between synchronous
+and asynchronous invocations. The user space API call is fully
+synchronous.
+
+[1] http://www.chronox.de/libkcapi.html
+
+User Space API General Remarks
+------------------------------
+
+The kernel crypto API is accessible from user space. Currently, the
+following ciphers are accessible:
+
+- Message digest including keyed message digest (HMAC, CMAC)
+
+- Symmetric ciphers
+
+- AEAD ciphers
+
+- Random Number Generators
+
+The interface is provided via socket type using the type AF_ALG. In
+addition, the setsockopt option type is SOL_ALG. In case the user space
+header files do not export these flags yet, use the following macros:
+
+::
+
+ #ifndef AF_ALG
+ #define AF_ALG 38
+ #endif
+ #ifndef SOL_ALG
+ #define SOL_ALG 279
+ #endif
+
+
+A cipher is accessed with the same name as done for the in-kernel API
+calls. This includes the generic vs. unique naming schema for ciphers as
+well as the enforcement of priorities for generic names.
+
+To interact with the kernel crypto API, a socket must be created by the
+user space application. User space invokes the cipher operation with the
+send()/write() system call family. The result of the cipher operation is
+obtained with the read()/recv() system call family.
+
+The following API calls assume that the socket descriptor is already
+opened by the user space application and discusses only the kernel
+crypto API specific invocations.
+
+To initialize the socket interface, the following sequence has to be
+performed by the consumer:
+
+1. Create a socket of type AF_ALG with the struct sockaddr_alg
+ parameter specified below for the different cipher types.
+
+2. Invoke bind with the socket descriptor
+
+3. Invoke accept with the socket descriptor. The accept system call
+ returns a new file descriptor that is to be used to interact with the
+ particular cipher instance. When invoking send/write or recv/read
+ system calls to send data to the kernel or obtain data from the
+ kernel, the file descriptor returned by accept must be used.
+
+In-place Cipher operation
+-------------------------
+
+Just like the in-kernel operation of the kernel crypto API, the user
+space interface allows the cipher operation in-place. That means that
+the input buffer used for the send/write system call and the output
+buffer used by the read/recv system call may be one and the same. This
+is of particular interest for symmetric cipher operations where a
+copying of the output data to its final destination can be avoided.
+
+If a consumer on the other hand wants to maintain the plaintext and the
+ciphertext in different memory locations, all a consumer needs to do is
+to provide different memory pointers for the encryption and decryption
+operation.
+
+Message Digest API
+------------------
+
+The message digest type to be used for the cipher operation is selected
+when invoking the bind syscall. bind requires the caller to provide a
+filled struct sockaddr data structure. This data structure must be
+filled as follows:
+
+::
+
+ struct sockaddr_alg sa = {
+ .salg_family = AF_ALG,
+ .salg_type = "hash", /* this selects the hash logic in the kernel */
+ .salg_name = "sha1" /* this is the cipher name */
+ };
+
+
+The salg_type value "hash" applies to message digests and keyed message
+digests. Though, a keyed message digest is referenced by the appropriate
+salg_name. Please see below for the setsockopt interface that explains
+how the key can be set for a keyed message digest.
+
+Using the send() system call, the application provides the data that
+should be processed with the message digest. The send system call allows
+the following flags to be specified:
+
+- MSG_MORE: If this flag is set, the send system call acts like a
+ message digest update function where the final hash is not yet
+ calculated. If the flag is not set, the send system call calculates
+ the final message digest immediately.
+
+With the recv() system call, the application can read the message digest
+from the kernel crypto API. If the buffer is too small for the message
+digest, the flag MSG_TRUNC is set by the kernel.
+
+In order to set a message digest key, the calling application must use
+the setsockopt() option of ALG_SET_KEY. If the key is not set the HMAC
+operation is performed without the initial HMAC state change caused by
+the key.
+
+Symmetric Cipher API
+--------------------
+
+The operation is very similar to the message digest discussion. During
+initialization, the struct sockaddr data structure must be filled as
+follows:
+
+::
+
+ struct sockaddr_alg sa = {
+ .salg_family = AF_ALG,
+ .salg_type = "skcipher", /* this selects the symmetric cipher */
+ .salg_name = "cbc(aes)" /* this is the cipher name */
+ };
+
+
+Before data can be sent to the kernel using the write/send system call
+family, the consumer must set the key. The key setting is described with
+the setsockopt invocation below.
+
+Using the sendmsg() system call, the application provides the data that
+should be processed for encryption or decryption. In addition, the IV is
+specified with the data structure provided by the sendmsg() system call.
+
+The sendmsg system call parameter of struct msghdr is embedded into the
+struct cmsghdr data structure. See recv(2) and cmsg(3) for more
+information on how the cmsghdr data structure is used together with the
+send/recv system call family. That cmsghdr data structure holds the
+following information specified with a separate header instances:
+
+- specification of the cipher operation type with one of these flags:
+
+ - ALG_OP_ENCRYPT - encryption of data
+
+ - ALG_OP_DECRYPT - decryption of data
+
+- specification of the IV information marked with the flag ALG_SET_IV
+
+The send system call family allows the following flag to be specified:
+
+- MSG_MORE: If this flag is set, the send system call acts like a
+ cipher update function where more input data is expected with a
+ subsequent invocation of the send system call.
+
+Note: The kernel reports -EINVAL for any unexpected data. The caller
+must make sure that all data matches the constraints given in
+/proc/crypto for the selected cipher.
+
+With the recv() system call, the application can read the result of the
+cipher operation from the kernel crypto API. The output buffer must be
+at least as large as to hold all blocks of the encrypted or decrypted
+data. If the output data size is smaller, only as many blocks are
+returned that fit into that output buffer size.
+
+AEAD Cipher API
+---------------
+
+The operation is very similar to the symmetric cipher discussion. During
+initialization, the struct sockaddr data structure must be filled as
+follows:
+
+::
+
+ struct sockaddr_alg sa = {
+ .salg_family = AF_ALG,
+ .salg_type = "aead", /* this selects the symmetric cipher */
+ .salg_name = "gcm(aes)" /* this is the cipher name */
+ };
+
+
+Before data can be sent to the kernel using the write/send system call
+family, the consumer must set the key. The key setting is described with
+the setsockopt invocation below.
+
+In addition, before data can be sent to the kernel using the write/send
+system call family, the consumer must set the authentication tag size.
+To set the authentication tag size, the caller must use the setsockopt
+invocation described below.
+
+Using the sendmsg() system call, the application provides the data that
+should be processed for encryption or decryption. In addition, the IV is
+specified with the data structure provided by the sendmsg() system call.
+
+The sendmsg system call parameter of struct msghdr is embedded into the
+struct cmsghdr data structure. See recv(2) and cmsg(3) for more
+information on how the cmsghdr data structure is used together with the
+send/recv system call family. That cmsghdr data structure holds the
+following information specified with a separate header instances:
+
+- specification of the cipher operation type with one of these flags:
+
+ - ALG_OP_ENCRYPT - encryption of data
+
+ - ALG_OP_DECRYPT - decryption of data
+
+- specification of the IV information marked with the flag ALG_SET_IV
+
+- specification of the associated authentication data (AAD) with the
+ flag ALG_SET_AEAD_ASSOCLEN. The AAD is sent to the kernel together
+ with the plaintext / ciphertext. See below for the memory structure.
+
+The send system call family allows the following flag to be specified:
+
+- MSG_MORE: If this flag is set, the send system call acts like a
+ cipher update function where more input data is expected with a
+ subsequent invocation of the send system call.
+
+Note: The kernel reports -EINVAL for any unexpected data. The caller
+must make sure that all data matches the constraints given in
+/proc/crypto for the selected cipher.
+
+With the recv() system call, the application can read the result of the
+cipher operation from the kernel crypto API. The output buffer must be
+at least as large as defined with the memory structure below. If the
+output data size is smaller, the cipher operation is not performed.
+
+The authenticated decryption operation may indicate an integrity error.
+Such breach in integrity is marked with the -EBADMSG error code.
+
+AEAD Memory Structure
+~~~~~~~~~~~~~~~~~~~~~
+
+The AEAD cipher operates with the following information that is
+communicated between user and kernel space as one data stream:
+
+- plaintext or ciphertext
+
+- associated authentication data (AAD)
+
+- authentication tag
+
+The sizes of the AAD and the authentication tag are provided with the
+sendmsg and setsockopt calls (see there). As the kernel knows the size
+of the entire data stream, the kernel is now able to calculate the right
+offsets of the data components in the data stream.
+
+The user space caller must arrange the aforementioned information in the
+following order:
+
+- AEAD encryption input: AAD \|\| plaintext
+
+- AEAD decryption input: AAD \|\| ciphertext \|\| authentication tag
+
+The output buffer the user space caller provides must be at least as
+large to hold the following data:
+
+- AEAD encryption output: ciphertext \|\| authentication tag
+
+- AEAD decryption output: plaintext
+
+Random Number Generator API
+---------------------------
+
+Again, the operation is very similar to the other APIs. During
+initialization, the struct sockaddr data structure must be filled as
+follows:
+
+::
+
+ struct sockaddr_alg sa = {
+ .salg_family = AF_ALG,
+ .salg_type = "rng", /* this selects the symmetric cipher */
+ .salg_name = "drbg_nopr_sha256" /* this is the cipher name */
+ };
+
+
+Depending on the RNG type, the RNG must be seeded. The seed is provided
+using the setsockopt interface to set the key. For example, the
+ansi_cprng requires a seed. The DRBGs do not require a seed, but may be
+seeded.
+
+Using the read()/recvmsg() system calls, random numbers can be obtained.
+The kernel generates at most 128 bytes in one call. If user space
+requires more data, multiple calls to read()/recvmsg() must be made.
+
+WARNING: The user space caller may invoke the initially mentioned accept
+system call multiple times. In this case, the returned file descriptors
+have the same state.
+
+Zero-Copy Interface
+-------------------
+
+In addition to the send/write/read/recv system call family, the AF_ALG
+interface can be accessed with the zero-copy interface of
+splice/vmsplice. As the name indicates, the kernel tries to avoid a copy
+operation into kernel space.
+
+The zero-copy operation requires data to be aligned at the page
+boundary. Non-aligned data can be used as well, but may require more
+operations of the kernel which would defeat the speed gains obtained
+from the zero-copy interface.
+
+The system-interent limit for the size of one zero-copy operation is 16
+pages. If more data is to be sent to AF_ALG, user space must slice the
+input into segments with a maximum size of 16 pages.
+
+Zero-copy can be used with the following code example (a complete
+working example is provided with libkcapi):
+
+::
+
+ int pipes[2];
+
+ pipe(pipes);
+ /* input data in iov */
+ vmsplice(pipes[1], iov, iovlen, SPLICE_F_GIFT);
+ /* opfd is the file descriptor returned from accept() system call */
+ splice(pipes[0], NULL, opfd, NULL, ret, 0);
+ read(opfd, out, outlen);
+
+
+Setsockopt Interface
+--------------------
+
+In addition to the read/recv and send/write system call handling to send
+and retrieve data subject to the cipher operation, a consumer also needs
+to set the additional information for the cipher operation. This
+additional information is set using the setsockopt system call that must
+be invoked with the file descriptor of the open cipher (i.e. the file
+descriptor returned by the accept system call).
+
+Each setsockopt invocation must use the level SOL_ALG.
+
+The setsockopt interface allows setting the following data using the
+mentioned optname:
+
+- ALG_SET_KEY -- Setting the key. Key setting is applicable to:
+
+ - the skcipher cipher type (symmetric ciphers)
+
+ - the hash cipher type (keyed message digests)
+
+ - the AEAD cipher type
+
+ - the RNG cipher type to provide the seed
+
+- ALG_SET_AEAD_AUTHSIZE -- Setting the authentication tag size for
+ AEAD ciphers. For a encryption operation, the authentication tag of
+ the given size will be generated. For a decryption operation, the
+ provided ciphertext is assumed to contain an authentication tag of
+ the given size (see section about AEAD memory layout below).
+
+User space API example
+----------------------
+
+Please see [1] for libkcapi which provides an easy-to-use wrapper around
+the aforementioned Netlink kernel interface. [1] also contains a test
+application that invokes all libkcapi API calls.
+
+[1] http://www.chronox.de/libkcapi.html
diff --git a/Documentation/devicetree/bindings/input/da9062-onkey.txt b/Documentation/devicetree/bindings/input/da9062-onkey.txt
index ab0e0488fe92..5f9fbc68e58a 100644
--- a/Documentation/devicetree/bindings/input/da9062-onkey.txt
+++ b/Documentation/devicetree/bindings/input/da9062-onkey.txt
@@ -1,32 +1,47 @@
-* Dialog DA9062/63 OnKey Module
+* Dialog DA9061/62/63 OnKey Module
-This module is part of the DA9062/DA9063. For more details about entire
-chips see Documentation/devicetree/bindings/mfd/da9062.txt and
-Documentation/devicetree/bindings/mfd/da9063.txt
+This module is part of the DA9061/DA9062/DA9063. For more details about entire
+DA9062 and DA9061 chips see Documentation/devicetree/bindings/mfd/da9062.txt
+For DA9063 see Documentation/devicetree/bindings/mfd/da9063.txt
-This module provides KEY_POWER, KEY_SLEEP and events.
+This module provides the KEY_POWER event.
Required properties:
-- compatible: should be one of:
- dlg,da9062-onkey
- dlg,da9063-onkey
+- compatible: should be one of the following valid compatible string lines:
+ "dlg,da9061-onkey", "dlg,da9062-onkey"
+ "dlg,da9062-onkey"
+ "dlg,da9063-onkey"
Optional properties:
- - dlg,disable-key-power : Disable power-down using a long key-press. If this
+- dlg,disable-key-power : Disable power-down using a long key-press. If this
entry exists the OnKey driver will remove support for the KEY_POWER key
- press. If this entry does not exist then by default the key-press
- triggered power down is enabled and the OnKey will support both KEY_POWER
- and KEY_SLEEP.
+ press when triggered using a long press of the OnKey.
-Example:
-
- pmic0: da9062@58 {
+Example: DA9063
+ pmic0: da9063@58 {
onkey {
compatible = "dlg,da9063-onkey";
dlg,disable-key-power;
};
+ };
+
+Example: DA9062
+
+ pmic0: da9062@58 {
+ onkey {
+ compatible = "dlg,da9062-onkey";
+ dlg,disable-key-power;
+ };
+ };
+
+Example: DA9061 using a fall-back compatible for the DA9062 onkey driver
+ pmic0: da9061@58 {
+ onkey {
+ compatible = "dlg,da9061-onkey", "dlg,da9062-onkey";
+ dlg,disable-key-power;
+ };
};
diff --git a/Documentation/devicetree/bindings/input/touchscreen/imx6ul_tsc.txt b/Documentation/devicetree/bindings/input/touchscreen/imx6ul_tsc.txt
index 853dff96dd9f..d4927c202aef 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/imx6ul_tsc.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/imx6ul_tsc.txt
@@ -17,6 +17,8 @@ Optional properties:
This value depends on the touch screen.
- pre-charge-time: the touch screen need some time to precharge.
This value depends on the touch screen.
+- touchscreen-average-samples: Number of data samples which are averaged for
+ each read. Valid values are 1, 4, 8, 16 and 32.
Example:
tsc: tsc@02040000 {
@@ -32,5 +34,6 @@ Example:
xnur-gpio = <&gpio1 3 GPIO_ACTIVE_LOW>;
measure-delay-time = <0xfff>;
pre-charge-time = <0xffff>;
+ touchscreen-average-samples = <32>;
status = "okay";
};
diff --git a/Documentation/devicetree/bindings/input/touchscreen/silead_gsl1680.txt b/Documentation/devicetree/bindings/input/touchscreen/silead_gsl1680.txt
index 820fee4b77b6..ce85ee508238 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/silead_gsl1680.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/silead_gsl1680.txt
@@ -18,6 +18,8 @@ Optional properties:
- touchscreen-inverted-y : See touchscreen.txt
- touchscreen-swapped-x-y : See touchscreen.txt
- silead,max-fingers : maximum number of fingers the touchscreen can detect
+- vddio-supply : regulator phandle for controller VDDIO
+- avdd-supply : regulator phandle for controller AVDD
Example:
diff --git a/Documentation/devicetree/bindings/input/touchscreen/touchscreen.txt b/Documentation/devicetree/bindings/input/touchscreen/touchscreen.txt
index bccaa4e73045..537643e86f61 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/touchscreen.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/touchscreen.txt
@@ -14,6 +14,9 @@ Optional properties for Touchscreens:
- touchscreen-fuzz-pressure : pressure noise value of the absolute input
device (arbitrary range dependent on the
controller)
+ - touchscreen-average-samples : Number of data samples which are averaged
+ for each read (valid values dependent on the
+ controller)
- touchscreen-inverted-x : X axis is inverted (boolean)
- touchscreen-inverted-y : Y axis is inverted (boolean)
- touchscreen-swapped-x-y : X and Y axis are swapped (boolean)
diff --git a/Documentation/devicetree/bindings/mfd/altera-a10sr.txt b/Documentation/devicetree/bindings/mfd/altera-a10sr.txt
new file mode 100644
index 000000000000..ea151f295ad7
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/altera-a10sr.txt
@@ -0,0 +1,46 @@
+* Altera Arria10 Development Kit System Resource Chip
+
+Required parent device properties:
+- compatible : "altr,a10sr"
+- spi-max-frequency : Maximum SPI frequency.
+- reg : The SPI Chip Select address for the Arria10
+ System Resource chip
+- interrupt-parent : The parent interrupt controller.
+- interrupts : The interrupt line the device is connected to.
+- interrupt-controller : Marks the device node as an interrupt controller.
+- #interrupt-cells : The number of cells to describe an IRQ, should be 2.
+ The first cell is the IRQ number.
+ The second cell is the flags, encoded as trigger
+ masks from ../interrupt-controller/interrupts.txt.
+
+The A10SR consists of these sub-devices:
+
+Device Description
+------ ----------
+a10sr_gpio GPIO Controller
+
+Arria10 GPIO
+Required Properties:
+- compatible : Should be "altr,a10sr-gpio"
+- gpio-controller : Marks the device node as a GPIO Controller.
+- #gpio-cells : Should be two. The first cell is the pin number and
+ the second cell is used to specify flags.
+ See ../gpio/gpio.txt for more information.
+
+Example:
+
+ resource-manager@0 {
+ compatible = "altr,a10sr";
+ reg = <0>;
+ spi-max-frequency = <100000>;
+ interrupt-parent = <&portb>;
+ interrupts = <5 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ a10sr_gpio: gpio-controller {
+ compatible = "altr,a10sr-gpio";
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mfd/qcom-pm8xxx.txt b/Documentation/devicetree/bindings/mfd/qcom-pm8xxx.txt
index 37a088f9a648..9e5eba4a4f0d 100644
--- a/Documentation/devicetree/bindings/mfd/qcom-pm8xxx.txt
+++ b/Documentation/devicetree/bindings/mfd/qcom-pm8xxx.txt
@@ -10,6 +10,7 @@ voltages and other various functionality to Qualcomm SoCs.
Value type: <string>
Definition: must be one of:
"qcom,pm8058"
+ "qcom,pm8821"
"qcom,pm8921"
- #address-cells:
diff --git a/Documentation/devicetree/bindings/mfd/rn5t618.txt b/Documentation/devicetree/bindings/mfd/rn5t618.txt
index 9e6770b105c9..65c23263cc54 100644
--- a/Documentation/devicetree/bindings/mfd/rn5t618.txt
+++ b/Documentation/devicetree/bindings/mfd/rn5t618.txt
@@ -1,21 +1,25 @@
* Ricoh RN5T567/RN5T618 PMIC
-Ricoh RN5T567/RN5T618 is a power management IC family which integrates
-3 to 4 step-down DCDC converters, 7 low-dropout regulators, GPIOs and
-a watchdog timer. The RN5T618 provides additionally a Li-ion battery
-charger, fuel gauge and an ADC. It can be controlled through an I2C
-interface.
+Ricoh RN5T567/RN5T618/RC5T619 is a power management IC family which
+integrates 3 to 5 step-down DCDC converters, 7 to 10 low-dropout regulators,
+GPIOs, and a watchdog timer. It can be controlled through an I2C interface.
+The RN5T618/RC5T619 provides additionally a Li-ion battery charger,
+fuel gauge, and an ADC.
+The RC5T619 additionnally includes USB charger detection and an RTC.
Required properties:
- compatible: must be one of
"ricoh,rn5t567"
"ricoh,rn5t618"
+ "ricoh,rc5t619"
- reg: the I2C slave address of the device
Sub-nodes:
- regulators: the node is required if the regulator functionality is
needed. The valid regulator names are: DCDC1, DCDC2, DCDC3, DCDC4
- (RN5T567), LDO1, LDO2, LDO3, LDO4, LDO5, LDORTC1 and LDORTC2.
+ (RN5T567/RC5T619), LDO1, LDO2, LDO3, LDO4, LDO5, LDO6, LDO7, LDO8,
+ LDO9, LDO10, LDORTC1 and LDORTC2.
+ LDO7-10 are specific to RC5T619.
The common bindings for each individual regulator can be found in:
Documentation/devicetree/bindings/regulator/regulator.txt
diff --git a/Documentation/devicetree/bindings/mtd/oxnas-nand.txt b/Documentation/devicetree/bindings/mtd/oxnas-nand.txt
new file mode 100644
index 000000000000..56d5c19da41d
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/oxnas-nand.txt
@@ -0,0 +1,41 @@
+* Oxford Semiconductor OXNAS NAND Controller
+
+Please refer to nand.txt for generic information regarding MTD NAND bindings.
+
+Required properties:
+ - compatible: "oxsemi,ox820-nand"
+ - reg: Base address and length for NAND mapped memory.
+
+Optional Properties:
+ - clocks: phandle to the NAND gate clock if needed.
+ - resets: phandle to the NAND reset control if needed.
+
+Example:
+
+nandc: nand-controller@41000000 {
+ compatible = "oxsemi,ox820-nand";
+ reg = <0x41000000 0x100000>;
+ clocks = <&stdclk CLK_820_NAND>;
+ resets = <&reset RESET_NAND>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ nand@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ nand-ecc-mode = "soft";
+ nand-ecc-algo = "hamming";
+
+ partition@0 {
+ label = "boot";
+ reg = <0x00000000 0x00e00000>;
+ read-only;
+ };
+
+ partition@e00000 {
+ label = "ubi";
+ reg = <0x00e00000 0x07200000>;
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/mtd/samsung-s3c2410.txt b/Documentation/devicetree/bindings/mtd/samsung-s3c2410.txt
new file mode 100644
index 000000000000..0040eb8895e0
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/samsung-s3c2410.txt
@@ -0,0 +1,56 @@
+* Samsung S3C2410 and compatible NAND flash controller
+
+Required properties:
+- compatible : The possible values are:
+ "samsung,s3c2410-nand"
+ "samsung,s3c2412-nand"
+ "samsung,s3c2440-nand"
+- reg : register's location and length.
+- #address-cells, #size-cells : see nand.txt
+- clocks : phandle to the nand controller clock
+- clock-names : must contain "nand"
+
+Optional child nodes:
+Child nodes representing the available nand chips.
+
+Optional child properties:
+- nand-ecc-mode : see nand.txt
+- nand-on-flash-bbt : see nand.txt
+
+Each child device node may optionally contain a 'partitions' sub-node,
+which further contains sub-nodes describing the flash partition mapping.
+See partition.txt for more detail.
+
+Example:
+
+nand-controller@4e000000 {
+ compatible = "samsung,s3c2440-nand";
+ reg = <0x4e000000 0x40>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ clocks = <&clocks HCLK_NAND>;
+ clock-names = "nand";
+
+ nand {
+ nand-ecc-mode = "soft";
+ nand-on-flash-bbt;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "u-boot";
+ reg = <0 0x040000>;
+ };
+
+ partition@40000 {
+ label = "kernel";
+ reg = <0x040000 0x500000>;
+ };
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/mtd/tango-nand.txt b/Documentation/devicetree/bindings/mtd/tango-nand.txt
new file mode 100644
index 000000000000..ad5a02f2ac8c
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/tango-nand.txt
@@ -0,0 +1,38 @@
+Sigma Designs Tango4 NAND Flash Controller (NFC)
+
+Required properties:
+
+- compatible: "sigma,smp8758-nand"
+- reg: address/size of nfc_reg, nfc_mem, and pbus_reg
+- dmas: reference to the DMA channel used by the controller
+- dma-names: "nfc_sbox"
+- clocks: reference to the system clock
+- #address-cells: <1>
+- #size-cells: <0>
+
+Children nodes represent the available NAND chips.
+See Documentation/devicetree/bindings/mtd/nand.txt for generic bindings.
+
+Example:
+
+ nandc: nand-controller@2c000 {
+ compatible = "sigma,smp8758-nand";
+ reg = <0x2c000 0x30 0x2d000 0x800 0x20000 0x1000>;
+ dmas = <&dma0 3>;
+ dma-names = "nfc_sbox";
+ clocks = <&clkgen SYS_CLK>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ nand@0 {
+ reg = <0>; /* CS0 */
+ nand-ecc-strength = <14>;
+ nand-ecc-step-size = <1024>;
+ };
+
+ nand@1 {
+ reg = <1>; /* CS1 */
+ nand-ecc-strength = <14>;
+ nand-ecc-step-size = <1024>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/regulator/tps65218.txt b/Documentation/devicetree/bindings/regulator/tps65218.txt
index fccc1d24af58..02f0e9bbfbf8 100644
--- a/Documentation/devicetree/bindings/regulator/tps65218.txt
+++ b/Documentation/devicetree/bindings/regulator/tps65218.txt
@@ -1,23 +1,78 @@
TPS65218 family of regulators
Required properties:
-For tps65218 regulators/LDOs
-- compatible:
- - "ti,tps65218-dcdc1" for DCDC1
- - "ti,tps65218-dcdc2" for DCDC2
- - "ti,tps65218-dcdc3" for DCDC3
- - "ti,tps65218-dcdc4" for DCDC4
- - "ti,tps65218-dcdc5" for DCDC5
- - "ti,tps65218-dcdc6" for DCDC6
- - "ti,tps65218-ldo1" for LDO1
-
-Optional properties:
-- Any optional property defined in bindings/regulator/regulator.txt
+- compatible: "ti,tps65218"
+- reg: I2C slave address
+
+- List of regulators provided by this controller, must be named
+ after their hardware counterparts: dcdc[1-6] and ldo1
+- This is the list of child nodes that specify the regulator
+ initialization data for defined regulators. Not all regulators for the given
+ device need to be present. The definition for each of these nodes is defined
+ using the standard binding for regulators found at ./regulator.txt.
+
+ The valid names for regulators are:
+ tps65217: regulator-dcdc1, regulator-dcdc2, regulator-dcdc3, regulator-dcdc4,
+ regulator-dcdc5, regulator-dcdc6, regulator-ldo1, regulator-ls3.
+ Each regulator is defined using the standard binding for regulators.
Example:
+tps65218: tps65218@24 {
+ reg = <0x24>;
+ compatible = "ti,tps65218";
+ interrupts = <GIC_SPI 7 IRQ_TYPE_NONE>; /* NMIn */
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ dcdc1: regulator-dcdc1 {
+ regulator-name = "vdd_core";
+ regulator-min-microvolt = <912000>;
+ regulator-max-microvolt = <1144000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ dcdc2: regulator-dcdc2 {
+ regulator-name = "vdd_mpu";
+ regulator-min-microvolt = <912000>;
+ regulator-max-microvolt = <1378000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ dcdc3: regulator-dcdc3 {
+ regulator-name = "vdcdc3";
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1500000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ dcdc5: regulator-dcdc5 {
+ regulator-name = "v1_0bat";
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ dcdc6: regulator-dcdc6 {
+ regulator-name = "v1_8bat";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo1: regulator-ldo1 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
- xyz: regulator@0 {
- compatible = "ti,tps65218-dcdc1";
- regulator-min-microvolt = <1000000>;
- regulator-max-microvolt = <3000000>;
+ ls3: regulator-ls3 {
+ regulator-min-microvolt = <100000>;
+ regulator-max-microvolt = <1000000>;
};
+};
diff --git a/Documentation/devicetree/bindings/rtc/epson,rtc7301.txt b/Documentation/devicetree/bindings/rtc/epson,rtc7301.txt
new file mode 100644
index 000000000000..5f9df3f1467c
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/epson,rtc7301.txt
@@ -0,0 +1,16 @@
+EPSON TOYOCOM RTC-7301SF/DG
+
+Required properties:
+
+- compatible: Should be "epson,rtc7301sf" or "epson,rtc7301dg"
+- reg: Specifies base physical address and size of the registers.
+- interrupts: A single interrupt specifier.
+
+Example:
+
+rtc: rtc@44a00000 {
+ compatible = "epson,rtc7301dg";
+ reg = <0x44a00000 0x10000>;
+ interrupt-parent = <&axi_intc_0>;
+ interrupts = <3 2>;
+};
diff --git a/Documentation/devicetree/bindings/rtc/ingenic,jz4740-rtc.txt b/Documentation/devicetree/bindings/rtc/ingenic,jz4740-rtc.txt
new file mode 100644
index 000000000000..41c7ae18fd7b
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/ingenic,jz4740-rtc.txt
@@ -0,0 +1,37 @@
+JZ4740 and similar SoCs real-time clock driver
+
+Required properties:
+
+- compatible: One of:
+ - "ingenic,jz4740-rtc" - for use with the JZ4740 SoC
+ - "ingenic,jz4780-rtc" - for use with the JZ4780 SoC
+- reg: Address range of rtc register set
+- interrupts: IRQ number for the alarm interrupt
+- clocks: phandle to the "rtc" clock
+- clock-names: must be "rtc"
+
+Optional properties:
+- system-power-controller: To use this component as the
+ system power controller
+- reset-pin-assert-time-ms: Reset pin low-level assertion
+ time after wakeup (default 60ms; range 0-125ms if RTC clock
+ at 32 kHz)
+- min-wakeup-pin-assert-time-ms: Minimum wakeup pin assertion
+ time (default 100ms; range 0-2s if RTC clock at 32 kHz)
+
+Example:
+
+rtc@10003000 {
+ compatible = "ingenic,jz4740-rtc";
+ reg = <0x10003000 0x40>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <32>;
+
+ clocks = <&rtc_clock>;
+ clock-names = "rtc";
+
+ system-power-controller;
+ reset-pin-assert-time-ms = <60>;
+ min-wakeup-pin-assert-time-ms = <100>;
+};
diff --git a/Documentation/devicetree/bindings/rtc/twl-rtc.txt b/Documentation/devicetree/bindings/rtc/twl-rtc.txt
index 596e0c97be7a..8f9a94f2f896 100644
--- a/Documentation/devicetree/bindings/rtc/twl-rtc.txt
+++ b/Documentation/devicetree/bindings/rtc/twl-rtc.txt
@@ -1,12 +1,11 @@
-* TI twl RTC
-
-The TWL family (twl4030/6030) contains a RTC.
+* Texas Instruments TWL4030/6030 RTC
Required properties:
-- compatible : Should be twl4030-rtc
-
-Examples:
-
-rtc@0 {
- compatible = "ti,twl4030-rtc";
-};
+- compatible : Should be "ti,twl4030-rtc"
+- interrupts : Should be the interrupt number.
+
+Example:
+ rtc {
+ compatible = "ti,twl4030-rtc";
+ interrupts = <11>;
+ };
diff --git a/Documentation/features/io/dma-api-debug/arch-support.txt b/Documentation/features/io/dma-api-debug/arch-support.txt
index 4f4a3443b114..ffa522a9bdfd 100644
--- a/Documentation/features/io/dma-api-debug/arch-support.txt
+++ b/Documentation/features/io/dma-api-debug/arch-support.txt
@@ -36,5 +36,5 @@
| um: | TODO |
| unicore32: | TODO |
| x86: | ok |
- | xtensa: | TODO |
+ | xtensa: | ok |
-----------------------
diff --git a/Documentation/features/io/dma-contiguous/arch-support.txt b/Documentation/features/io/dma-contiguous/arch-support.txt
index a97e8e3f4ebb..83d2cf989ea3 100644
--- a/Documentation/features/io/dma-contiguous/arch-support.txt
+++ b/Documentation/features/io/dma-contiguous/arch-support.txt
@@ -36,5 +36,5 @@
| um: | TODO |
| unicore32: | TODO |
| x86: | ok |
- | xtensa: | TODO |
+ | xtensa: | ok |
-----------------------
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index 69e2387ca278..ace63cd7af8c 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -20,7 +20,7 @@ prototypes:
void (*d_iput)(struct dentry *, struct inode *);
char *(*d_dname)((struct dentry *dentry, char *buffer, int buflen);
struct vfsmount *(*d_automount)(struct path *path);
- int (*d_manage)(struct dentry *, bool);
+ int (*d_manage)(const struct path *, bool);
struct dentry *(*d_real)(struct dentry *, const struct inode *,
unsigned int);
diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting
index bdd025ceb763..95280079c0b3 100644
--- a/Documentation/filesystems/porting
+++ b/Documentation/filesystems/porting
@@ -596,3 +596,7 @@ in your dentry operations instead.
[mandatory]
->rename() has an added flags argument. Any flags not handled by the
filesystem should result in EINVAL being returned.
+--
+[recommended]
+ ->readlink is optional for symlinks. Don't set, unless filesystem needs
+ to fake something for readlink(2).
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index b5039a00caaf..b968084eeac1 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -451,9 +451,6 @@ otherwise noted.
exist; this is checked by the VFS. Unlike plain rename,
source and target may be of different type.
- readlink: called by the readlink(2) system call. Only required if
- you want to support reading symbolic links
-
get_link: called by the VFS to follow a symbolic link to the
inode it points to. Only required if you want to support
symbolic links. This method returns the symlink body
@@ -468,6 +465,12 @@ otherwise noted.
argument. If request can't be handled without leaving RCU mode,
have it return ERR_PTR(-ECHILD).
+ readlink: this is now just an override for use by readlink(2) for the
+ cases when ->get_link uses nd_jump_link() or object is not in
+ fact a symlink. Normally filesystems should only implement
+ ->get_link for symlinks and readlink(2) will automatically use
+ that.
+
permission: called by the VFS to check for access rights on a POSIX-like
filesystem.
@@ -948,7 +951,7 @@ struct dentry_operations {
void (*d_iput)(struct dentry *, struct inode *);
char *(*d_dname)(struct dentry *, char *, int);
struct vfsmount *(*d_automount)(struct path *);
- int (*d_manage)(struct dentry *, bool);
+ int (*d_manage)(const struct path *, bool);
struct dentry *(*d_real)(struct dentry *, const struct inode *,
unsigned int);
};
diff --git a/Documentation/index.rst b/Documentation/index.rst
index 2bd8fdc9207c..cb5d77699c60 100644
--- a/Documentation/index.rst
+++ b/Documentation/index.rst
@@ -58,6 +58,7 @@ needed).
gpu/index
security/index
sound/index
+ crypto/index
Korean translations
-------------------
diff --git a/Documentation/sphinx/rstFlatTable.py b/Documentation/sphinx/rstFlatTable.py
index 55f275793028..25feb0d35e7a 100755
--- a/Documentation/sphinx/rstFlatTable.py
+++ b/Documentation/sphinx/rstFlatTable.py
@@ -157,6 +157,11 @@ class ListTableBuilder(object):
def buildTableNode(self):
colwidths = self.directive.get_column_widths(self.max_cols)
+ if isinstance(colwidths, tuple):
+ # Since docutils 0.13, get_column_widths returns a (widths,
+ # colwidths) tuple, where widths is a string (i.e. 'auto').
+ # See https://sourceforge.net/p/docutils/patches/120/.
+ colwidths = colwidths[1]
stub_columns = self.directive.options.get('stub-columns', 0)
header_rows = self.directive.options.get('header-rows', 0)
diff --git a/Documentation/virtual/kvm/locking.txt b/Documentation/virtual/kvm/locking.txt
index e5dd9f4d6100..fd013bf4115b 100644
--- a/Documentation/virtual/kvm/locking.txt
+++ b/Documentation/virtual/kvm/locking.txt
@@ -13,8 +13,12 @@ The acquisition orders for mutexes are as follows:
- kvm->slots_lock is taken outside kvm->irq_lock, though acquiring
them together is quite rare.
-For spinlocks, kvm_lock is taken outside kvm->mmu_lock. Everything
-else is a leaf: no other lock is taken inside the critical sections.
+On x86, vcpu->mutex is taken outside kvm->arch.hyperv.hv_lock.
+
+For spinlocks, kvm_lock is taken outside kvm->mmu_lock.
+
+Everything else is a leaf: no other lock is taken inside the critical
+sections.
2: Exception
------------
diff --git a/MAINTAINERS b/MAINTAINERS
index bf8690d0a1e1..f6eb97b35e0f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -5163,6 +5163,12 @@ S: Maintained
F: drivers/net/ethernet/freescale/fman
F: Documentation/devicetree/bindings/powerpc/fsl/fman.txt
+FREESCALE QORIQ DPAA ETHERNET DRIVER
+M: Madalin Bucur <madalin.bucur@nxp.com>
+L: netdev@vger.kernel.org
+S: Maintained
+F: drivers/net/ethernet/freescale/dpaa
+
FREESCALE SOC DRIVERS
M: Scott Wood <oss@buserror.net>
L: linuxppc-dev@lists.ozlabs.org
@@ -8064,7 +8070,7 @@ MELLANOX PLATFORM DRIVER
M: Vadim Pasternak <vadimp@mellanox.com>
L: platform-driver-x86@vger.kernel.org
S: Supported
-F: arch/x86/platform/mellanox/mlx-platform.c
+F: drivers/platform/x86/mlx-platform.c
MELLANOX MLX CPLD HOTPLUG DRIVER
M: Vadim Pasternak <vadimp@mellanox.com>
diff --git a/arch/arm/mach-s3c24xx/common-smdk.c b/arch/arm/mach-s3c24xx/common-smdk.c
index e9fbcc91c5c0..9e0bc46e90ec 100644
--- a/arch/arm/mach-s3c24xx/common-smdk.c
+++ b/arch/arm/mach-s3c24xx/common-smdk.c
@@ -171,6 +171,7 @@ static struct s3c2410_platform_nand smdk_nand_info = {
.twrph1 = 20,
.nr_sets = ARRAY_SIZE(smdk_nand_sets),
.sets = smdk_nand_sets,
+ .ecc_mode = NAND_ECC_SOFT,
};
/* devices we initialise */
diff --git a/arch/arm/mach-s3c24xx/mach-anubis.c b/arch/arm/mach-s3c24xx/mach-anubis.c
index d03df0df01fa..029ef1b58925 100644
--- a/arch/arm/mach-s3c24xx/mach-anubis.c
+++ b/arch/arm/mach-s3c24xx/mach-anubis.c
@@ -223,6 +223,7 @@ static struct s3c2410_platform_nand __initdata anubis_nand_info = {
.nr_sets = ARRAY_SIZE(anubis_nand_sets),
.sets = anubis_nand_sets,
.select_chip = anubis_nand_select,
+ .ecc_mode = NAND_ECC_SOFT,
};
/* IDE channels */
diff --git a/arch/arm/mach-s3c24xx/mach-at2440evb.c b/arch/arm/mach-s3c24xx/mach-at2440evb.c
index 9ae170fef2a7..7b28eb623fc1 100644
--- a/arch/arm/mach-s3c24xx/mach-at2440evb.c
+++ b/arch/arm/mach-s3c24xx/mach-at2440evb.c
@@ -114,6 +114,7 @@ static struct s3c2410_platform_nand __initdata at2440evb_nand_info = {
.twrph1 = 40,
.nr_sets = ARRAY_SIZE(at2440evb_nand_sets),
.sets = at2440evb_nand_sets,
+ .ecc_mode = NAND_ECC_SOFT,
};
/* DM9000AEP 10/100 ethernet controller */
diff --git a/arch/arm/mach-s3c24xx/mach-bast.c b/arch/arm/mach-s3c24xx/mach-bast.c
index ed07cf392d4b..5185036765db 100644
--- a/arch/arm/mach-s3c24xx/mach-bast.c
+++ b/arch/arm/mach-s3c24xx/mach-bast.c
@@ -299,6 +299,7 @@ static struct s3c2410_platform_nand __initdata bast_nand_info = {
.nr_sets = ARRAY_SIZE(bast_nand_sets),
.sets = bast_nand_sets,
.select_chip = bast_nand_select,
+ .ecc_mode = NAND_ECC_SOFT,
};
/* DM9000 */
diff --git a/arch/arm/mach-s3c24xx/mach-gta02.c b/arch/arm/mach-s3c24xx/mach-gta02.c
index 27ae6877550f..b0ed401da3a3 100644
--- a/arch/arm/mach-s3c24xx/mach-gta02.c
+++ b/arch/arm/mach-s3c24xx/mach-gta02.c
@@ -443,6 +443,7 @@ static struct s3c2410_platform_nand __initdata gta02_nand_info = {
.twrph1 = 15,
.nr_sets = ARRAY_SIZE(gta02_nand_sets),
.sets = gta02_nand_sets,
+ .ecc_mode = NAND_ECC_SOFT,
};
diff --git a/arch/arm/mach-s3c24xx/mach-jive.c b/arch/arm/mach-s3c24xx/mach-jive.c
index 7d99fe8f6157..895aca225952 100644
--- a/arch/arm/mach-s3c24xx/mach-jive.c
+++ b/arch/arm/mach-s3c24xx/mach-jive.c
@@ -232,6 +232,7 @@ static struct s3c2410_platform_nand __initdata jive_nand_info = {
.twrph1 = 40,
.sets = jive_nand_sets,
.nr_sets = ARRAY_SIZE(jive_nand_sets),
+ .ecc_mode = NAND_ECC_SOFT,
};
static int __init jive_mtdset(char *options)
diff --git a/arch/arm/mach-s3c24xx/mach-mini2440.c b/arch/arm/mach-s3c24xx/mach-mini2440.c
index ec60bd4a1646..71af8d2fd320 100644
--- a/arch/arm/mach-s3c24xx/mach-mini2440.c
+++ b/arch/arm/mach-s3c24xx/mach-mini2440.c
@@ -287,6 +287,7 @@ static struct s3c2410_platform_nand mini2440_nand_info __initdata = {
.nr_sets = ARRAY_SIZE(mini2440_nand_sets),
.sets = mini2440_nand_sets,
.ignore_unset_ecc = 1,
+ .ecc_mode = NAND_ECC_SOFT,
};
/* DM9000AEP 10/100 ethernet controller */
diff --git a/arch/arm/mach-s3c24xx/mach-osiris.c b/arch/arm/mach-s3c24xx/mach-osiris.c
index 2f6fdc326835..70b0eb7d3134 100644
--- a/arch/arm/mach-s3c24xx/mach-osiris.c
+++ b/arch/arm/mach-s3c24xx/mach-osiris.c
@@ -238,6 +238,7 @@ static struct s3c2410_platform_nand __initdata osiris_nand_info = {
.nr_sets = ARRAY_SIZE(osiris_nand_sets),
.sets = osiris_nand_sets,
.select_chip = osiris_nand_select,
+ .ecc_mode = NAND_ECC_SOFT,
};
/* PCMCIA control and configuration */
diff --git a/arch/arm/mach-s3c24xx/mach-qt2410.c b/arch/arm/mach-s3c24xx/mach-qt2410.c
index 984516e8307a..868c82087403 100644
--- a/arch/arm/mach-s3c24xx/mach-qt2410.c
+++ b/arch/arm/mach-s3c24xx/mach-qt2410.c
@@ -284,6 +284,7 @@ static struct s3c2410_platform_nand __initdata qt2410_nand_info = {
.twrph1 = 20,
.nr_sets = ARRAY_SIZE(qt2410_nand_sets),
.sets = qt2410_nand_sets,
+ .ecc_mode = NAND_ECC_SOFT,
};
/* UDC */
diff --git a/arch/arm/mach-s3c24xx/mach-rx1950.c b/arch/arm/mach-s3c24xx/mach-rx1950.c
index 25a139bb9826..e86ad6a68a0b 100644
--- a/arch/arm/mach-s3c24xx/mach-rx1950.c
+++ b/arch/arm/mach-s3c24xx/mach-rx1950.c
@@ -611,6 +611,7 @@ static struct s3c2410_platform_nand rx1950_nand_info = {
.twrph1 = 15,
.nr_sets = ARRAY_SIZE(rx1950_nand_sets),
.sets = rx1950_nand_sets,
+ .ecc_mode = NAND_ECC_SOFT,
};
static struct s3c2410_udc_mach_info rx1950_udc_cfg __initdata = {
diff --git a/arch/arm/mach-s3c24xx/mach-rx3715.c b/arch/arm/mach-s3c24xx/mach-rx3715.c
index cf55196f89ca..a39fb9780dd3 100644
--- a/arch/arm/mach-s3c24xx/mach-rx3715.c
+++ b/arch/arm/mach-s3c24xx/mach-rx3715.c
@@ -164,6 +164,7 @@ static struct s3c2410_platform_nand __initdata rx3715_nand_info = {
.twrph1 = 15,
.nr_sets = ARRAY_SIZE(rx3715_nand_sets),
.sets = rx3715_nand_sets,
+ .ecc_mode = NAND_ECC_SOFT,
};
static struct platform_device *rx3715_devices[] __initdata = {
diff --git a/arch/arm/mach-s3c24xx/mach-vstms.c b/arch/arm/mach-s3c24xx/mach-vstms.c
index b4460d5f7011..f5e6322145fa 100644
--- a/arch/arm/mach-s3c24xx/mach-vstms.c
+++ b/arch/arm/mach-s3c24xx/mach-vstms.c
@@ -117,6 +117,7 @@ static struct s3c2410_platform_nand __initdata vstms_nand_info = {
.twrph1 = 20,
.nr_sets = ARRAY_SIZE(vstms_nand_sets),
.sets = vstms_nand_sets,
+ .ecc_mode = NAND_ECC_SOFT,
};
static struct platform_device *vstms_devices[] __initdata = {
diff --git a/arch/arm/mach-s3c64xx/mach-hmt.c b/arch/arm/mach-s3c64xx/mach-hmt.c
index bc7dc1fcbf7d..59b5531f1987 100644
--- a/arch/arm/mach-s3c64xx/mach-hmt.c
+++ b/arch/arm/mach-s3c64xx/mach-hmt.c
@@ -204,6 +204,7 @@ static struct s3c2410_platform_nand hmt_nand_info = {
.twrph1 = 40,
.nr_sets = ARRAY_SIZE(hmt_nand_sets),
.sets = hmt_nand_sets,
+ .ecc_mode = NAND_ECC_SOFT,
};
static struct gpio_led hmt_leds[] = {
diff --git a/arch/arm/mach-s3c64xx/mach-mini6410.c b/arch/arm/mach-s3c64xx/mach-mini6410.c
index ae999fb3fe6d..a3e3e25728b4 100644
--- a/arch/arm/mach-s3c64xx/mach-mini6410.c
+++ b/arch/arm/mach-s3c64xx/mach-mini6410.c
@@ -142,6 +142,7 @@ static struct s3c2410_platform_nand mini6410_nand_info = {
.twrph1 = 40,
.nr_sets = ARRAY_SIZE(mini6410_nand_sets),
.sets = mini6410_nand_sets,
+ .ecc_mode = NAND_ECC_SOFT,
};
static struct s3c_fb_pd_win mini6410_lcd_type0_fb_win = {
diff --git a/arch/arm/mach-s3c64xx/mach-real6410.c b/arch/arm/mach-s3c64xx/mach-real6410.c
index 4e240ffa7ac7..d6b3ffd7704b 100644
--- a/arch/arm/mach-s3c64xx/mach-real6410.c
+++ b/arch/arm/mach-s3c64xx/mach-real6410.c
@@ -194,6 +194,7 @@ static struct s3c2410_platform_nand real6410_nand_info = {
.twrph1 = 40,
.nr_sets = ARRAY_SIZE(real6410_nand_sets),
.sets = real6410_nand_sets,
+ .ecc_mode = NAND_ECC_SOFT,
};
static struct platform_device *real6410_devices[] __initdata = {
diff --git a/arch/arm64/include/asm/numa.h b/arch/arm64/include/asm/numa.h
index 600887e491fd..bf466d1876e3 100644
--- a/arch/arm64/include/asm/numa.h
+++ b/arch/arm64/include/asm/numa.h
@@ -15,6 +15,8 @@ int __node_distance(int from, int to);
extern nodemask_t numa_nodes_parsed __initdata;
+extern bool numa_off;
+
/* Mappings between node number and cpus on that node. */
extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
void numa_clear_node(unsigned int cpu);
diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c
index 4b32168cf91a..b388a99fea7b 100644
--- a/arch/arm64/mm/numa.c
+++ b/arch/arm64/mm/numa.c
@@ -35,7 +35,7 @@ static int cpu_to_node_map[NR_CPUS] = { [0 ... NR_CPUS-1] = NUMA_NO_NODE };
static int numa_distance_cnt;
static u8 *numa_distance;
-static bool numa_off;
+bool numa_off;
static __init int numa_parse_early_param(char *opt)
{
diff --git a/arch/ia64/include/asm/numa.h b/arch/ia64/include/asm/numa.h
index 2db0a6c6daa5..ebef7f40aabb 100644
--- a/arch/ia64/include/asm/numa.h
+++ b/arch/ia64/include/asm/numa.h
@@ -65,6 +65,8 @@ extern int paddr_to_nid(unsigned long paddr);
#define local_nodeid (cpu_to_node_map[smp_processor_id()])
+#define numa_off 0
+
extern void map_cpu_to_node(int cpu, int nid);
extern void unmap_cpu_from_node(int cpu, int nid);
extern void numa_clear_node(int cpu);
diff --git a/arch/microblaze/include/asm/unistd.h b/arch/microblaze/include/asm/unistd.h
index 805ae5d712e8..032fed71223f 100644
--- a/arch/microblaze/include/asm/unistd.h
+++ b/arch/microblaze/include/asm/unistd.h
@@ -38,6 +38,6 @@
#endif /* __ASSEMBLY__ */
-#define __NR_syscalls 392
+#define __NR_syscalls 398
#endif /* _ASM_MICROBLAZE_UNISTD_H */
diff --git a/arch/microblaze/include/uapi/asm/unistd.h b/arch/microblaze/include/uapi/asm/unistd.h
index a8bd3fa28bc7..d8086159d996 100644
--- a/arch/microblaze/include/uapi/asm/unistd.h
+++ b/arch/microblaze/include/uapi/asm/unistd.h
@@ -407,5 +407,11 @@
#define __NR_userfaultfd 389
#define __NR_membarrier 390
#define __NR_mlock2 391
+#define __NR_copy_file_range 392
+#define __NR_preadv2 393
+#define __NR_pwritev2 394
+#define __NR_pkey_mprotect 395
+#define __NR_pkey_alloc 396
+#define __NR_pkey_free 397
#endif /* _UAPI_ASM_MICROBLAZE_UNISTD_H */
diff --git a/arch/microblaze/kernel/cpu/cpuinfo.c b/arch/microblaze/kernel/cpu/cpuinfo.c
index b70bb538f001..96b3f26d16be 100644
--- a/arch/microblaze/kernel/cpu/cpuinfo.c
+++ b/arch/microblaze/kernel/cpu/cpuinfo.c
@@ -49,6 +49,8 @@ const struct cpu_ver_key cpu_ver_lookup[] = {
{"9.3", 0x20},
{"9.4", 0x21},
{"9.5", 0x22},
+ {"9.6", 0x23},
+ {"10.0", 0x24},
{NULL, 0},
};
@@ -75,6 +77,10 @@ const struct family_string_key family_string_lookup[] = {
{"zynq7000", 0x12},
{"UltraScale Virtex", 0x13},
{"UltraScale Kintex", 0x14},
+ {"UltraScale+ Zynq", 0x15},
+ {"UltraScale+ Virtex", 0x16},
+ {"UltraScale+ Kintex", 0x17},
+ {"Spartan7", 0x18},
{NULL, 0},
};
diff --git a/arch/microblaze/kernel/syscall_table.S b/arch/microblaze/kernel/syscall_table.S
index 6b3dd99126d7..6841c2df14d9 100644
--- a/arch/microblaze/kernel/syscall_table.S
+++ b/arch/microblaze/kernel/syscall_table.S
@@ -392,3 +392,9 @@ ENTRY(sys_call_table)
.long sys_userfaultfd
.long sys_membarrier /* 390 */
.long sys_mlock2
+ .long sys_copy_file_range
+ .long sys_preadv2
+ .long sys_pwritev2
+ .long sys_pkey_mprotect /* 395 */
+ .long sys_pkey_alloc
+ .long sys_pkey_free
diff --git a/arch/microblaze/kernel/timer.c b/arch/microblaze/kernel/timer.c
index 5bbf38b916ef..9e954959f605 100644
--- a/arch/microblaze/kernel/timer.c
+++ b/arch/microblaze/kernel/timer.c
@@ -259,7 +259,7 @@ static int __init xilinx_timer_init(struct device_node *timer)
int ret;
if (initialized)
- return;
+ return -EINVAL;
initialized = 1;
diff --git a/arch/mips/boot/dts/ingenic/jz4740.dtsi b/arch/mips/boot/dts/ingenic/jz4740.dtsi
index f6ae6ed9c4b1..3e1587f1f77a 100644
--- a/arch/mips/boot/dts/ingenic/jz4740.dtsi
+++ b/arch/mips/boot/dts/ingenic/jz4740.dtsi
@@ -44,6 +44,17 @@
#clock-cells = <1>;
};
+ rtc_dev: rtc@10003000 {
+ compatible = "ingenic,jz4740-rtc";
+ reg = <0x10003000 0x40>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <15>;
+
+ clocks = <&cgu JZ4740_CLK_RTC>;
+ clock-names = "rtc";
+ };
+
uart0: serial@10030000 {
compatible = "ingenic,jz4740-uart";
reg = <0x10030000 0x100>;
diff --git a/arch/mips/boot/dts/ingenic/qi_lb60.dts b/arch/mips/boot/dts/ingenic/qi_lb60.dts
index 2414d63ae818..be1a7d3a3e1b 100644
--- a/arch/mips/boot/dts/ingenic/qi_lb60.dts
+++ b/arch/mips/boot/dts/ingenic/qi_lb60.dts
@@ -13,3 +13,7 @@
&ext {
clock-frequency = <12000000>;
};
+
+&rtc_dev {
+ system-power-controller;
+};
diff --git a/arch/mips/include/asm/mach-jz4740/platform.h b/arch/mips/include/asm/mach-jz4740/platform.h
index 073b8bfbb3b3..3645974b7f65 100644
--- a/arch/mips/include/asm/mach-jz4740/platform.h
+++ b/arch/mips/include/asm/mach-jz4740/platform.h
@@ -22,7 +22,6 @@
extern struct platform_device jz4740_udc_device;
extern struct platform_device jz4740_udc_xceiv_device;
extern struct platform_device jz4740_mmc_device;
-extern struct platform_device jz4740_rtc_device;
extern struct platform_device jz4740_i2c_device;
extern struct platform_device jz4740_nand_device;
extern struct platform_device jz4740_framebuffer_device;
diff --git a/arch/mips/jz4740/board-qi_lb60.c b/arch/mips/jz4740/board-qi_lb60.c
index 258fd03c9ef5..a5bd94b95263 100644
--- a/arch/mips/jz4740/board-qi_lb60.c
+++ b/arch/mips/jz4740/board-qi_lb60.c
@@ -438,7 +438,6 @@ static struct platform_device *jz_platform_devices[] __initdata = {
&jz4740_pcm_device,
&jz4740_i2s_device,
&jz4740_codec_device,
- &jz4740_rtc_device,
&jz4740_adc_device,
&jz4740_pwm_device,
&jz4740_dma_device,
diff --git a/arch/mips/jz4740/platform.c b/arch/mips/jz4740/platform.c
index 2f1dab35c061..5b7cdd67a9d9 100644
--- a/arch/mips/jz4740/platform.c
+++ b/arch/mips/jz4740/platform.c
@@ -88,27 +88,6 @@ struct platform_device jz4740_mmc_device = {
.resource = jz4740_mmc_resources,
};
-/* RTC controller */
-static struct resource jz4740_rtc_resources[] = {
- {
- .start = JZ4740_RTC_BASE_ADDR,
- .end = JZ4740_RTC_BASE_ADDR + 0x38 - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = JZ4740_IRQ_RTC,
- .end = JZ4740_IRQ_RTC,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-struct platform_device jz4740_rtc_device = {
- .name = "jz4740-rtc",
- .id = -1,
- .num_resources = ARRAY_SIZE(jz4740_rtc_resources),
- .resource = jz4740_rtc_resources,
-};
-
/* I2C controller */
static struct resource jz4740_i2c_resources[] = {
{
diff --git a/arch/mips/jz4740/reset.c b/arch/mips/jz4740/reset.c
index 954e669c9e6b..67780c4b6573 100644
--- a/arch/mips/jz4740/reset.c
+++ b/arch/mips/jz4740/reset.c
@@ -57,71 +57,8 @@ static void jz4740_restart(char *command)
jz4740_halt();
}
-#define JZ_REG_RTC_CTRL 0x00
-#define JZ_REG_RTC_HIBERNATE 0x20
-#define JZ_REG_RTC_WAKEUP_FILTER 0x24
-#define JZ_REG_RTC_RESET_COUNTER 0x28
-
-#define JZ_RTC_CTRL_WRDY BIT(7)
-#define JZ_RTC_WAKEUP_FILTER_MASK 0x0000FFE0
-#define JZ_RTC_RESET_COUNTER_MASK 0x00000FE0
-
-static inline void jz4740_rtc_wait_ready(void __iomem *rtc_base)
-{
- uint32_t ctrl;
-
- do {
- ctrl = readl(rtc_base + JZ_REG_RTC_CTRL);
- } while (!(ctrl & JZ_RTC_CTRL_WRDY));
-}
-
-static void jz4740_power_off(void)
-{
- void __iomem *rtc_base = ioremap(JZ4740_RTC_BASE_ADDR, 0x38);
- unsigned long wakeup_filter_ticks;
- unsigned long reset_counter_ticks;
- struct clk *rtc_clk;
- unsigned long rtc_rate;
-
- rtc_clk = clk_get(NULL, "rtc");
- if (IS_ERR(rtc_clk))
- panic("unable to get RTC clock");
- rtc_rate = clk_get_rate(rtc_clk);
- clk_put(rtc_clk);
-
- /*
- * Set minimum wakeup pin assertion time: 100 ms.
- * Range is 0 to 2 sec if RTC is clocked at 32 kHz.
- */
- wakeup_filter_ticks = (100 * rtc_rate) / 1000;
- if (wakeup_filter_ticks < JZ_RTC_WAKEUP_FILTER_MASK)
- wakeup_filter_ticks &= JZ_RTC_WAKEUP_FILTER_MASK;
- else
- wakeup_filter_ticks = JZ_RTC_WAKEUP_FILTER_MASK;
- jz4740_rtc_wait_ready(rtc_base);
- writel(wakeup_filter_ticks, rtc_base + JZ_REG_RTC_WAKEUP_FILTER);
-
- /*
- * Set reset pin low-level assertion time after wakeup: 60 ms.
- * Range is 0 to 125 ms if RTC is clocked at 32 kHz.
- */
- reset_counter_ticks = (60 * rtc_rate) / 1000;
- if (reset_counter_ticks < JZ_RTC_RESET_COUNTER_MASK)
- reset_counter_ticks &= JZ_RTC_RESET_COUNTER_MASK;
- else
- reset_counter_ticks = JZ_RTC_RESET_COUNTER_MASK;
- jz4740_rtc_wait_ready(rtc_base);
- writel(reset_counter_ticks, rtc_base + JZ_REG_RTC_RESET_COUNTER);
-
- jz4740_rtc_wait_ready(rtc_base);
- writel(1, rtc_base + JZ_REG_RTC_HIBERNATE);
-
- jz4740_halt();
-}
-
void jz4740_reset_init(void)
{
_machine_restart = jz4740_restart;
_machine_halt = jz4740_halt;
- pm_power_off = jz4740_power_off;
}
diff --git a/arch/powerpc/oprofile/cell/spu_task_sync.c b/arch/powerpc/oprofile/cell/spu_task_sync.c
index 83d2b4ef7f0d..44d67b167e0b 100644
--- a/arch/powerpc/oprofile/cell/spu_task_sync.c
+++ b/arch/powerpc/oprofile/cell/spu_task_sync.c
@@ -295,7 +295,7 @@ out:
* dcookie user still being registered (namely, the reader
* of the event buffer).
*/
-static inline unsigned long fast_get_dcookie(struct path *path)
+static inline unsigned long fast_get_dcookie(const struct path *path)
{
unsigned long cookie;
diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
index 4810e48dbbbf..7d6aaa128e8b 100644
--- a/arch/tile/include/asm/cache.h
+++ b/arch/tile/include/asm/cache.h
@@ -50,18 +50,15 @@
/*
* Originally we used small TLB pages for kernel data and grouped some
- * things together as "write once", enforcing the property at the end
+ * things together as ro-after-init, enforcing the property at the end
* of initialization by making those pages read-only and non-coherent.
* This allowed better cache utilization since cache inclusion did not
* need to be maintained. However, to do this requires an extra TLB
* entry, which on balance is more of a performance hit than the
* non-coherence is a performance gain, so we now just make "read
- * mostly" and "write once" be synonyms. We keep the attribute
+ * mostly" and "ro-after-init" be synonyms. We keep the attribute
* separate in case we change our minds at a future date.
*/
-#define __write_once __read_mostly
-
-/* __ro_after_init is the generic name for the tile arch __write_once. */
#define __ro_after_init __read_mostly
#endif /* _ASM_TILE_CACHE_H */
diff --git a/arch/tile/include/asm/sections.h b/arch/tile/include/asm/sections.h
index 86a746243dc8..50343bfe7936 100644
--- a/arch/tile/include/asm/sections.h
+++ b/arch/tile/include/asm/sections.h
@@ -19,9 +19,6 @@
#include <asm-generic/sections.h>
-/* Write-once data is writable only till the end of initialization. */
-extern char __w1data_begin[], __w1data_end[];
-
extern char vdso_start[], vdso_end[];
#ifdef CONFIG_COMPAT
extern char vdso32_start[], vdso32_end[];
diff --git a/arch/tile/kernel/module.c b/arch/tile/kernel/module.c
index 2305084c9b93..09233fbe7801 100644
--- a/arch/tile/kernel/module.c
+++ b/arch/tile/kernel/module.c
@@ -43,29 +43,28 @@ void *module_alloc(unsigned long size)
int npages;
npages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
- pages = kmalloc(npages * sizeof(struct page *), GFP_KERNEL);
+ pages = kmalloc_array(npages, sizeof(*pages), GFP_KERNEL);
if (pages == NULL)
return NULL;
for (; i < npages; ++i) {
pages[i] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
if (!pages[i])
- goto error;
+ goto free_pages;
}
area = __get_vm_area(size, VM_ALLOC, MEM_MODULE_START, MEM_MODULE_END);
if (!area)
- goto error;
+ goto free_pages;
area->nr_pages = npages;
area->pages = pages;
if (map_vm_area(area, prot_rwx, pages)) {
vunmap(area->addr);
- goto error;
+ goto free_pages;
}
return area->addr;
-
-error:
+ free_pages:
while (--i >= 0)
__free_page(pages[i]);
kfree(pages);
diff --git a/arch/tile/kernel/pci.c b/arch/tile/kernel/pci.c
index 9475a74cd53a..bc6656b5708b 100644
--- a/arch/tile/kernel/pci.c
+++ b/arch/tile/kernel/pci.c
@@ -57,7 +57,7 @@ static int pci_probe = 1;
* This flag tells if the platform is TILEmpower that needs
* special configuration for the PLX switch chip.
*/
-int __write_once tile_plx_gen1;
+int __ro_after_init tile_plx_gen1;
static struct pci_controller controllers[TILE_NUM_PCIE];
static int num_controllers;
diff --git a/arch/tile/kernel/pci_gx.c b/arch/tile/kernel/pci_gx.c
index 0e7a5d09e023..b554a68eea1b 100644
--- a/arch/tile/kernel/pci_gx.c
+++ b/arch/tile/kernel/pci_gx.c
@@ -131,7 +131,7 @@ static int tile_irq_cpu(int irq)
count = cpumask_weight(&intr_cpus_map);
if (unlikely(count == 0)) {
- pr_warn("intr_cpus_map empty, interrupts will be delievered to dataplane tiles\n");
+ pr_warn("intr_cpus_map empty, interrupts will be delivered to dataplane tiles\n");
return irq % (smp_height * smp_width);
}
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index 153020abd2f5..443a70bccc1c 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -49,7 +49,7 @@
static inline int ABS(int x) { return x >= 0 ? x : -x; }
/* Chip information */
-char chip_model[64] __write_once;
+char chip_model[64] __ro_after_init;
#ifdef CONFIG_VT
struct screen_info screen_info;
@@ -97,17 +97,17 @@ int node_controller[MAX_NUMNODES] = { [0 ... MAX_NUMNODES-1] = -1 };
#ifdef CONFIG_HIGHMEM
/* Map information from VAs to PAs */
unsigned long pbase_map[1 << (32 - HPAGE_SHIFT)]
- __write_once __attribute__((aligned(L2_CACHE_BYTES)));
+ __ro_after_init __attribute__((aligned(L2_CACHE_BYTES)));
EXPORT_SYMBOL(pbase_map);
/* Map information from PAs to VAs */
void *vbase_map[NR_PA_HIGHBIT_VALUES]
- __write_once __attribute__((aligned(L2_CACHE_BYTES)));
+ __ro_after_init __attribute__((aligned(L2_CACHE_BYTES)));
EXPORT_SYMBOL(vbase_map);
#endif
/* Node number as a function of the high PA bits */
-int highbits_to_node[NR_PA_HIGHBIT_VALUES] __write_once;
+int highbits_to_node[NR_PA_HIGHBIT_VALUES] __ro_after_init;
EXPORT_SYMBOL(highbits_to_node);
static unsigned int __initdata maxmem_pfn = -1U;
@@ -844,11 +844,11 @@ static void __init zone_sizes_init(void)
#ifdef CONFIG_NUMA
/* which logical CPUs are on which nodes */
-struct cpumask node_2_cpu_mask[MAX_NUMNODES] __write_once;
+struct cpumask node_2_cpu_mask[MAX_NUMNODES] __ro_after_init;
EXPORT_SYMBOL(node_2_cpu_mask);
/* which node each logical CPU is on */
-char cpu_2_node[NR_CPUS] __write_once __attribute__((aligned(L2_CACHE_BYTES)));
+char cpu_2_node[NR_CPUS] __ro_after_init __attribute__((aligned(L2_CACHE_BYTES)));
EXPORT_SYMBOL(cpu_2_node);
/* Return cpu_to_node() except for cpus not yet assigned, which return -1 */
@@ -1269,7 +1269,7 @@ static void __init validate_va(void)
* cpus plus any other cpus that are willing to share their cache.
* It is set by hv_inquire_tiles(HV_INQ_TILES_LOTAR).
*/
-struct cpumask __write_once cpu_lotar_map;
+struct cpumask __ro_after_init cpu_lotar_map;
EXPORT_SYMBOL(cpu_lotar_map);
/*
@@ -1291,7 +1291,7 @@ EXPORT_SYMBOL(hash_for_home_map);
* cache, those tiles will only appear in cpu_lotar_map, NOT in
* cpu_cacheable_map, as they are a special case.
*/
-struct cpumask __write_once cpu_cacheable_map;
+struct cpumask __ro_after_init cpu_cacheable_map;
EXPORT_SYMBOL(cpu_cacheable_map);
static __initdata struct cpumask disabled_map;
@@ -1506,7 +1506,7 @@ void __init setup_arch(char **cmdline_p)
* Set up per-cpu memory.
*/
-unsigned long __per_cpu_offset[NR_CPUS] __write_once;
+unsigned long __per_cpu_offset[NR_CPUS] __ro_after_init;
EXPORT_SYMBOL(__per_cpu_offset);
static size_t __initdata pfn_offset[MAX_NUMNODES] = { 0 };
diff --git a/arch/tile/kernel/smp.c b/arch/tile/kernel/smp.c
index 07e3ff5cc740..94a62e1197ce 100644
--- a/arch/tile/kernel/smp.c
+++ b/arch/tile/kernel/smp.c
@@ -27,7 +27,7 @@
* We write to width and height with a single store in head_NN.S,
* so make the variable aligned to "long".
*/
-HV_Topology smp_topology __write_once __aligned(sizeof(long));
+HV_Topology smp_topology __ro_after_init __aligned(sizeof(long));
EXPORT_SYMBOL(smp_topology);
#if CHIP_HAS_IPI()
diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c
index ea960d660917..c9357012b1c8 100644
--- a/arch/tile/kernel/time.c
+++ b/arch/tile/kernel/time.c
@@ -37,7 +37,7 @@
*/
/* How many cycles per second we are running at. */
-static cycles_t cycles_per_sec __write_once;
+static cycles_t cycles_per_sec __ro_after_init;
cycles_t get_clock_rate(void)
{
@@ -68,7 +68,7 @@ EXPORT_SYMBOL(get_cycles);
*/
#define SCHED_CLOCK_SHIFT 10
-static unsigned long sched_clock_mult __write_once;
+static unsigned long sched_clock_mult __ro_after_init;
static cycles_t clocksource_get_cycles(struct clocksource *cs)
{
diff --git a/arch/tile/kernel/unaligned.c b/arch/tile/kernel/unaligned.c
index 9772a3554282..4fe78c5b8394 100644
--- a/arch/tile/kernel/unaligned.c
+++ b/arch/tile/kernel/unaligned.c
@@ -22,7 +22,7 @@
#include <linux/mman.h>
#include <linux/types.h>
#include <linux/err.h>
-#include <linux/module.h>
+#include <linux/extable.h>
#include <linux/compat.h>
#include <linux/prctl.h>
#include <asm/cacheflush.h>
diff --git a/arch/tile/lib/cacheflush.c b/arch/tile/lib/cacheflush.c
index 9c0ec22009a5..c1ebc1065fc1 100644
--- a/arch/tile/lib/cacheflush.c
+++ b/arch/tile/lib/cacheflush.c
@@ -138,19 +138,13 @@ finv_buffer_remote(void *buffer, size_t size, int hfh)
if ((unsigned long)base < (unsigned long)buffer)
base = buffer;
- /*
- * Fire all the loads we need. The MAF only has eight entries
- * so we can have at most eight outstanding loads, so we
- * unroll by that amount.
- */
-#pragma unroll 8
+ /* Fire all the loads we need. */
for (; p >= base; p -= step_size)
force_load(p);
/*
* Repeat, but with finv's instead of loads, to get rid of the
* data we just loaded into our own cache and the old home L3.
- * No need to unroll since finv's don't target a register.
* The finv's are guaranteed not to actually flush the data in
* the buffer back to their home, since we just read it, so the
* lines are clean in cache; we will only invalidate those lines.
diff --git a/arch/tile/mm/extable.c b/arch/tile/mm/extable.c
index 4fb0acb9d154..aeaf20c7aaa4 100644
--- a/arch/tile/mm/extable.c
+++ b/arch/tile/mm/extable.c
@@ -12,7 +12,7 @@
* more details.
*/
-#include <linux/module.h>
+#include <linux/extable.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c
index beba986589e5..709f8e9ba3e9 100644
--- a/arch/tile/mm/fault.c
+++ b/arch/tile/mm/fault.c
@@ -29,7 +29,7 @@
#include <linux/tty.h>
#include <linux/vt_kern.h> /* For unblank_screen() */
#include <linux/highmem.h>
-#include <linux/module.h>
+#include <linux/extable.h>
#include <linux/kprobes.h>
#include <linux/hugetlb.h>
#include <linux/syscalls.h>
diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c
index 40ca30a9fee3..b51cc28acd0a 100644
--- a/arch/tile/mm/homecache.c
+++ b/arch/tile/mm/homecache.c
@@ -47,7 +47,7 @@
* The noallocl2 option suppresses all use of the L2 cache to cache
* locally from a remote home.
*/
-static int __write_once noallocl2;
+static int __ro_after_init noallocl2;
static int __init set_noallocl2(char *str)
{
noallocl2 = 1;
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c
index adce25462b0d..3a97e4d7205c 100644
--- a/arch/tile/mm/init.c
+++ b/arch/tile/mm/init.c
@@ -190,9 +190,9 @@ static void __init page_table_range_init(unsigned long start,
static int __initdata ktext_hash = 1; /* .text pages */
static int __initdata kdata_hash = 1; /* .data and .bss pages */
-int __write_once hash_default = 1; /* kernel allocator pages */
+int __ro_after_init hash_default = 1; /* kernel allocator pages */
EXPORT_SYMBOL(hash_default);
-int __write_once kstack_hash = 1; /* if no homecaching, use h4h */
+int __ro_after_init kstack_hash = 1; /* if no homecaching, use h4h */
/*
* CPUs to use to for striping the pages of kernel data. If hash-for-home
@@ -203,7 +203,7 @@ int __write_once kstack_hash = 1; /* if no homecaching, use h4h */
static __initdata struct cpumask kdata_mask;
static __initdata int kdata_arg_seen;
-int __write_once kdata_huge; /* if no homecaching, small pages */
+int __ro_after_init kdata_huge; /* if no homecaching, small pages */
/* Combine a generic pgprot_t with cache home to get a cache-aware pgprot. */
@@ -896,8 +896,8 @@ void __init pgtable_cache_init(void)
panic("pgtable_cache_init(): Cannot create pgd cache");
}
-static long __write_once initfree = 1;
-static bool __write_once set_initfree_done;
+static long __ro_after_init initfree = 1;
+static bool __ro_after_init set_initfree_done;
/* Select whether to free (1) or mark unusable (0) the __init pages. */
static int __init set_initfree(char *str)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index dd47e60aabf5..64024c999531 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -555,18 +555,6 @@ config X86_INTEL_QUARK
Say Y here if you have a Quark based system such as the Arduino
compatible Intel Galileo.
-config MLX_PLATFORM
- tristate "Mellanox Technologies platform support"
- depends on X86_64
- depends on X86_EXTENDED_PLATFORM
- ---help---
- This option enables system support for the Mellanox Technologies
- platform.
-
- Say Y here if you are building a kernel for Mellanox system.
-
- Otherwise, say N.
-
config X86_INTEL_LPSS
bool "Intel Low Power Subsystem Support"
depends on X86 && ACPI
diff --git a/arch/x86/include/asm/asm-prototypes.h b/arch/x86/include/asm/asm-prototypes.h
new file mode 100644
index 000000000000..44b8762fa0c7
--- /dev/null
+++ b/arch/x86/include/asm/asm-prototypes.h
@@ -0,0 +1,16 @@
+#include <asm/ftrace.h>
+#include <asm/uaccess.h>
+#include <asm/string.h>
+#include <asm/page.h>
+#include <asm/checksum.h>
+
+#include <asm-generic/asm-prototypes.h>
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/special_insns.h>
+#include <asm/preempt.h>
+
+#ifndef CONFIG_X86_CMPXCHG64
+extern void cmpxchg8b_emu(void);
+#endif
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 59ac427960d4..6ccbf1aaa7ce 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -105,6 +105,7 @@
#define X86_FEATURE_AMD_DCM ( 3*32+27) /* multi-node processor */
#define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */
#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
+#define X86_FEATURE_TSC_KNOWN_FREQ ( 3*32+31) /* TSC has known frequency */
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
#define X86_FEATURE_XMM3 ( 4*32+ 0) /* "pni" SSE-3 */
diff --git a/arch/x86/include/asm/floppy.h b/arch/x86/include/asm/floppy.h
index 1c7eefe32502..7ec59edde154 100644
--- a/arch/x86/include/asm/floppy.h
+++ b/arch/x86/include/asm/floppy.h
@@ -229,18 +229,18 @@ static struct fd_routine_l {
int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
} fd_routine[] = {
{
- request_dma,
- free_dma,
- get_dma_residue,
- dma_mem_alloc,
- hard_dma_setup
+ ._request_dma = request_dma,
+ ._free_dma = free_dma,
+ ._get_dma_residue = get_dma_residue,
+ ._dma_mem_alloc = dma_mem_alloc,
+ ._dma_setup = hard_dma_setup
},
{
- vdma_request_dma,
- vdma_nop,
- vdma_get_dma_residue,
- vdma_mem_alloc,
- vdma_dma_setup
+ ._request_dma = vdma_request_dma,
+ ._free_dma = vdma_nop,
+ ._get_dma_residue = vdma_get_dma_residue,
+ ._dma_mem_alloc = vdma_mem_alloc,
+ ._dma_setup = vdma_dma_setup
}
};
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 7892530cbacf..2e25038dbd93 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -704,6 +704,7 @@ struct kvm_apic_map {
/* Hyper-V emulation context */
struct kvm_hv {
+ struct mutex hv_lock;
u64 hv_guest_os_id;
u64 hv_hypercall;
u64 hv_tsc_page;
diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
index 72198c64e646..f9813b6d8b80 100644
--- a/arch/x86/include/asm/mmu.h
+++ b/arch/x86/include/asm/mmu.h
@@ -31,6 +31,10 @@ typedef struct {
u16 pkey_allocation_map;
s16 execute_only_pkey;
#endif
+#ifdef CONFIG_X86_INTEL_MPX
+ /* address of the bounds directory */
+ void __user *bd_addr;
+#endif
} mm_context_t;
#ifdef CONFIG_SMP
diff --git a/arch/x86/include/asm/mpx.h b/arch/x86/include/asm/mpx.h
index 7a35495275a9..0b416d4cf73b 100644
--- a/arch/x86/include/asm/mpx.h
+++ b/arch/x86/include/asm/mpx.h
@@ -59,7 +59,7 @@ siginfo_t *mpx_generate_siginfo(struct pt_regs *regs);
int mpx_handle_bd_fault(void);
static inline int kernel_managing_mpx_tables(struct mm_struct *mm)
{
- return (mm->bd_addr != MPX_INVALID_BOUNDS_DIR);
+ return (mm->context.bd_addr != MPX_INVALID_BOUNDS_DIR);
}
static inline void mpx_mm_init(struct mm_struct *mm)
{
@@ -67,7 +67,7 @@ static inline void mpx_mm_init(struct mm_struct *mm)
* NULL is theoretically a valid place to put the bounds
* directory, so point this at an invalid address.
*/
- mm->bd_addr = MPX_INVALID_BOUNDS_DIR;
+ mm->context.bd_addr = MPX_INVALID_BOUNDS_DIR;
}
void mpx_notify_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long start, unsigned long end);
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index 1cc82ece9ac1..62b775926045 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -116,8 +116,7 @@ static inline void native_pgd_clear(pgd_t *pgd)
native_set_pgd(pgd, native_make_pgd(0));
}
-extern void sync_global_pgds(unsigned long start, unsigned long end,
- int removed);
+extern void sync_global_pgds(unsigned long start, unsigned long end);
/*
* Conversion functions: convert a page and protection to a page entry,
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
index 33b6365c22fe..abb1fdcc545a 100644
--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -45,8 +45,17 @@ extern int tsc_clocksource_reliable;
* Boot-time check whether the TSCs are synchronized across
* all CPUs/cores:
*/
+#ifdef CONFIG_X86_TSC
+extern bool tsc_store_and_check_tsc_adjust(bool bootcpu);
+extern void tsc_verify_tsc_adjust(bool resume);
extern void check_tsc_sync_source(int cpu);
extern void check_tsc_sync_target(void);
+#else
+static inline bool tsc_store_and_check_tsc_adjust(bool bootcpu) { return false; }
+static inline void tsc_verify_tsc_adjust(bool resume) { }
+static inline void check_tsc_sync_source(int cpu) { }
+static inline void check_tsc_sync_target(void) { }
+#endif
extern int notsc_setup(char *);
extern void tsc_save_sched_clock_state(void);
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 05110c1097ae..581386c7e429 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -75,7 +75,7 @@ apm-y := apm_32.o
obj-$(CONFIG_APM) += apm.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_SMP) += smpboot.o
-obj-$(CONFIG_SMP) += tsc_sync.o
+obj-$(CONFIG_X86_TSC) += tsc_sync.o
obj-$(CONFIG_SMP) += setup_percpu.o
obj-$(CONFIG_X86_MPPARSE) += mpparse.o
obj-y += apic/
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 4764fa56924d..6f65b0eed384 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -715,7 +715,7 @@ int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
int nid;
nid = acpi_get_node(handle);
- if (nid != -1) {
+ if (nid != NUMA_NO_NODE) {
set_apicid_to_node(physid, nid);
numa_set_node(cpu, nid);
}
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index bb47e5eacd44..5b7e43eff139 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -2160,21 +2160,6 @@ int __generic_processor_info(int apicid, int version, bool enabled)
}
/*
- * This can happen on physical hotplug. The sanity check at boot time
- * is done from native_smp_prepare_cpus() after num_possible_cpus() is
- * established.
- */
- if (topology_update_package_map(apicid, cpu) < 0) {
- int thiscpu = max + disabled_cpus;
-
- pr_warning("APIC: Package limit reached. Processor %d/0x%x ignored.\n",
- thiscpu, apicid);
-
- disabled_cpus++;
- return -ENOSPC;
- }
-
- /*
* Validate version
*/
if (version == 0x0) {
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 729f92ba8224..1f6b50a449ab 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -979,29 +979,21 @@ static void x86_init_cache_qos(struct cpuinfo_x86 *c)
}
/*
- * The physical to logical package id mapping is initialized from the
- * acpi/mptables information. Make sure that CPUID actually agrees with
- * that.
+ * Validate that ACPI/mptables have the same information about the
+ * effective APIC id and update the package map.
*/
-static void sanitize_package_id(struct cpuinfo_x86 *c)
+static void validate_apic_and_package_id(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_SMP
- unsigned int pkg, apicid, cpu = smp_processor_id();
+ unsigned int apicid, cpu = smp_processor_id();
apicid = apic->cpu_present_to_apicid(cpu);
- pkg = apicid >> boot_cpu_data.x86_coreid_bits;
- if (apicid != c->initial_apicid) {
- pr_err(FW_BUG "CPU%u: APIC id mismatch. Firmware: %x CPUID: %x\n",
+ if (apicid != c->apicid) {
+ pr_err(FW_BUG "CPU%u: APIC id mismatch. Firmware: %x APIC: %x\n",
cpu, apicid, c->initial_apicid);
- c->initial_apicid = apicid;
}
- if (pkg != c->phys_proc_id) {
- pr_err(FW_BUG "CPU%u: Using firmware package id %u instead of %u\n",
- cpu, pkg, c->phys_proc_id);
- c->phys_proc_id = pkg;
- }
- c->logical_proc_id = topology_phys_to_logical_pkg(pkg);
+ BUG_ON(topology_update_package_map(c->phys_proc_id, cpu));
#else
c->logical_proc_id = 0;
#endif
@@ -1132,7 +1124,6 @@ static void identify_cpu(struct cpuinfo_x86 *c)
#ifdef CONFIG_NUMA
numa_add_cpu(smp_processor_id());
#endif
- sanitize_package_id(c);
}
/*
@@ -1187,6 +1178,7 @@ void identify_secondary_cpu(struct cpuinfo_x86 *c)
enable_sep_cpu();
#endif
mtrr_ap_init();
+ validate_apic_and_package_id(c);
}
static __init int setup_noclflush(char *arg)
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 90de28841242..b467b14b03eb 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -298,12 +298,13 @@ ENTRY(start_cpu)
* REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
* address given in m16:64.
*/
- call 1f # put return address on stack for unwinder
-1: xorq %rbp, %rbp # clear frame pointer
+ pushq $.Lafter_lret # put return address on stack for unwinder
+ xorq %rbp, %rbp # clear frame pointer
movq initial_code(%rip), %rax
pushq $__KERNEL_CS # set correct cs
pushq %rax # target address in negative space
lretq
+.Lafter_lret:
ENDPROC(start_cpu)
#include "verify_cpu.S"
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 43c36d8a6ae2..37363e46b1f0 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -235,6 +235,7 @@ static inline void play_dead(void)
void arch_cpu_idle_enter(void)
{
+ tsc_verify_tsc_adjust(false);
local_touch_nmi();
}
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 0c37d4fd01b2..46732dc3b73c 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -103,7 +103,6 @@ static unsigned int max_physical_pkg_id __read_mostly;
unsigned int __max_logical_packages __read_mostly;
EXPORT_SYMBOL(__max_logical_packages);
static unsigned int logical_packages __read_mostly;
-static bool logical_packages_frozen __read_mostly;
/* Maximum number of SMT threads on any online core */
int __max_smt_threads __read_mostly;
@@ -273,9 +272,14 @@ static void notrace start_secondary(void *unused)
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
}
-int topology_update_package_map(unsigned int apicid, unsigned int cpu)
+/**
+ * topology_update_package_map - Update the physical to logical package map
+ * @pkg: The physical package id as retrieved via CPUID
+ * @cpu: The cpu for which this is updated
+ */
+int topology_update_package_map(unsigned int pkg, unsigned int cpu)
{
- unsigned int new, pkg = apicid >> boot_cpu_data.x86_coreid_bits;
+ unsigned int new;
/* Called from early boot ? */
if (!physical_package_map)
@@ -288,16 +292,17 @@ int topology_update_package_map(unsigned int apicid, unsigned int cpu)
if (test_and_set_bit(pkg, physical_package_map))
goto found;
- if (logical_packages_frozen) {
- physical_to_logical_pkg[pkg] = -1;
- pr_warn("APIC(%x) Package %u exceeds logical package max\n",
- apicid, pkg);
+ if (logical_packages >= __max_logical_packages) {
+ pr_warn("Package %u of CPU %u exceeds BIOS package data %u.\n",
+ logical_packages, cpu, __max_logical_packages);
return -ENOSPC;
}
new = logical_packages++;
- pr_info("APIC(%x) Converting physical %u to logical package %u\n",
- apicid, pkg, new);
+ if (new != pkg) {
+ pr_info("CPU %u Converting physical %u to logical package %u\n",
+ cpu, pkg, new);
+ }
physical_to_logical_pkg[pkg] = new;
found:
@@ -318,9 +323,9 @@ int topology_phys_to_logical_pkg(unsigned int phys_pkg)
}
EXPORT_SYMBOL(topology_phys_to_logical_pkg);
-static void __init smp_init_package_map(void)
+static void __init smp_init_package_map(struct cpuinfo_x86 *c, unsigned int cpu)
{
- unsigned int ncpus, cpu;
+ unsigned int ncpus;
size_t size;
/*
@@ -365,27 +370,9 @@ static void __init smp_init_package_map(void)
size = BITS_TO_LONGS(max_physical_pkg_id) * sizeof(unsigned long);
physical_package_map = kzalloc(size, GFP_KERNEL);
- for_each_present_cpu(cpu) {
- unsigned int apicid = apic->cpu_present_to_apicid(cpu);
-
- if (apicid == BAD_APICID || !apic->apic_id_valid(apicid))
- continue;
- if (!topology_update_package_map(apicid, cpu))
- continue;
- pr_warn("CPU %u APICId %x disabled\n", cpu, apicid);
- per_cpu(x86_bios_cpu_apicid, cpu) = BAD_APICID;
- set_cpu_possible(cpu, false);
- set_cpu_present(cpu, false);
- }
-
- if (logical_packages > __max_logical_packages) {
- pr_warn("Detected more packages (%u), then computed by BIOS data (%u).\n",
- logical_packages, __max_logical_packages);
- logical_packages_frozen = true;
- __max_logical_packages = logical_packages;
- }
-
pr_info("Max logical packages: %u\n", __max_logical_packages);
+
+ topology_update_package_map(c->phys_proc_id, cpu);
}
void __init smp_store_boot_cpu_info(void)
@@ -395,7 +382,7 @@ void __init smp_store_boot_cpu_info(void)
*c = boot_cpu_data;
c->cpu_index = id;
- smp_init_package_map();
+ smp_init_package_map(c, id);
}
/*
@@ -1476,15 +1463,15 @@ __init void prefill_possible_map(void)
possible = i;
}
+ nr_cpu_ids = possible;
+
pr_info("Allowing %d CPUs, %d hotplug CPUs\n",
possible, max_t(int, possible - num_processors, 0));
+ reset_cpu_possible_mask();
+
for (i = 0; i < possible; i++)
set_cpu_possible(i, true);
- for (; i < NR_CPUS; i++)
- set_cpu_possible(i, false);
-
- nr_cpu_ids = possible;
}
#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 46b2f41f8b05..0aed75a1e31b 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -702,6 +702,20 @@ unsigned long native_calibrate_tsc(void)
}
}
+ /*
+ * TSC frequency determined by CPUID is a "hardware reported"
+ * frequency and is the most accurate one so far we have. This
+ * is considered a known frequency.
+ */
+ setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
+
+ /*
+ * For Atom SoCs TSC is the only reliable clocksource.
+ * Mark TSC reliable so no watchdog on it.
+ */
+ if (boot_cpu_data.x86_model == INTEL_FAM6_ATOM_GOLDMONT)
+ setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE);
+
return crystal_khz * ebx_numerator / eax_denominator;
}
@@ -1043,18 +1057,20 @@ static void detect_art(void)
if (boot_cpu_data.cpuid_level < ART_CPUID_LEAF)
return;
- cpuid(ART_CPUID_LEAF, &art_to_tsc_denominator,
- &art_to_tsc_numerator, unused, unused+1);
-
- /* Don't enable ART in a VM, non-stop TSC required */
+ /* Don't enable ART in a VM, non-stop TSC and TSC_ADJUST required */
if (boot_cpu_has(X86_FEATURE_HYPERVISOR) ||
!boot_cpu_has(X86_FEATURE_NONSTOP_TSC) ||
- art_to_tsc_denominator < ART_MIN_DENOMINATOR)
+ !boot_cpu_has(X86_FEATURE_TSC_ADJUST))
return;
- if (rdmsrl_safe(MSR_IA32_TSC_ADJUST, &art_to_tsc_offset))
+ cpuid(ART_CPUID_LEAF, &art_to_tsc_denominator,
+ &art_to_tsc_numerator, unused, unused+1);
+
+ if (art_to_tsc_denominator < ART_MIN_DENOMINATOR)
return;
+ rdmsrl(MSR_IA32_TSC_ADJUST, art_to_tsc_offset);
+
/* Make this sticky over multiple CPU init calls */
setup_force_cpu_cap(X86_FEATURE_ART);
}
@@ -1064,6 +1080,11 @@ static void detect_art(void)
static struct clocksource clocksource_tsc;
+static void tsc_resume(struct clocksource *cs)
+{
+ tsc_verify_tsc_adjust(true);
+}
+
/*
* We used to compare the TSC to the cycle_last value in the clocksource
* structure to avoid a nasty time-warp. This can be observed in a
@@ -1096,6 +1117,7 @@ static struct clocksource clocksource_tsc = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS |
CLOCK_SOURCE_MUST_VERIFY,
.archdata = { .vclock_mode = VCLOCK_TSC },
+ .resume = tsc_resume,
};
void mark_tsc_unstable(char *reason)
@@ -1283,10 +1305,10 @@ static int __init init_tsc_clocksource(void)
clocksource_tsc.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
/*
- * Trust the results of the earlier calibration on systems
- * exporting a reliable TSC.
+ * When TSC frequency is known (retrieved via MSR or CPUID), we skip
+ * the refined calibration and directly register it as a clocksource.
*/
- if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) {
+ if (boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) {
clocksource_register_khz(&clocksource_tsc, tsc_khz);
return 0;
}
@@ -1363,6 +1385,8 @@ void __init tsc_init(void)
if (unsynchronized_tsc())
mark_tsc_unstable("TSCs unsynchronized");
+ else
+ tsc_store_and_check_tsc_adjust(true);
check_system_tsc_reliable();
diff --git a/arch/x86/kernel/tsc_msr.c b/arch/x86/kernel/tsc_msr.c
index 0fe720d64fef..19afdbd7d0a7 100644
--- a/arch/x86/kernel/tsc_msr.c
+++ b/arch/x86/kernel/tsc_msr.c
@@ -100,5 +100,24 @@ unsigned long cpu_khz_from_msr(void)
#ifdef CONFIG_X86_LOCAL_APIC
lapic_timer_frequency = (freq * 1000) / HZ;
#endif
+
+ /*
+ * TSC frequency determined by MSR is always considered "known"
+ * because it is reported by HW.
+ * Another fact is that on MSR capable platforms, PIT/HPET is
+ * generally not available so calibration won't work at all.
+ */
+ setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
+
+ /*
+ * Unfortunately there is no way for hardware to tell whether the
+ * TSC is reliable. We were told by silicon design team that TSC
+ * on Atom SoCs are always "reliable". TSC is also the only
+ * reliable clocksource on these SoCs (HPET is either not present
+ * or not functional) so mark TSC reliable which removes the
+ * requirement for a watchdog clocksource.
+ */
+ setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE);
+
return res;
}
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
index 78083bf23ed1..d0db011051a5 100644
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -14,18 +14,166 @@
* ( The serial nature of the boot logic and the CPU hotplug lock
* protects against more than 2 CPUs entering this code. )
*/
+#include <linux/topology.h>
#include <linux/spinlock.h>
#include <linux/kernel.h>
#include <linux/smp.h>
#include <linux/nmi.h>
#include <asm/tsc.h>
+struct tsc_adjust {
+ s64 bootval;
+ s64 adjusted;
+ unsigned long nextcheck;
+ bool warned;
+};
+
+static DEFINE_PER_CPU(struct tsc_adjust, tsc_adjust);
+
+void tsc_verify_tsc_adjust(bool resume)
+{
+ struct tsc_adjust *adj = this_cpu_ptr(&tsc_adjust);
+ s64 curval;
+
+ if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST))
+ return;
+
+ /* Rate limit the MSR check */
+ if (!resume && time_before(jiffies, adj->nextcheck))
+ return;
+
+ adj->nextcheck = jiffies + HZ;
+
+ rdmsrl(MSR_IA32_TSC_ADJUST, curval);
+ if (adj->adjusted == curval)
+ return;
+
+ /* Restore the original value */
+ wrmsrl(MSR_IA32_TSC_ADJUST, adj->adjusted);
+
+ if (!adj->warned || resume) {
+ pr_warn(FW_BUG "TSC ADJUST differs: CPU%u %lld --> %lld. Restoring\n",
+ smp_processor_id(), adj->adjusted, curval);
+ adj->warned = true;
+ }
+}
+
+static void tsc_sanitize_first_cpu(struct tsc_adjust *cur, s64 bootval,
+ unsigned int cpu, bool bootcpu)
+{
+ /*
+ * First online CPU in a package stores the boot value in the
+ * adjustment value. This value might change later via the sync
+ * mechanism. If that fails we still can yell about boot values not
+ * being consistent.
+ *
+ * On the boot cpu we just force set the ADJUST value to 0 if it's
+ * non zero. We don't do that on non boot cpus because physical
+ * hotplug should have set the ADJUST register to a value > 0 so
+ * the TSC is in sync with the already running cpus.
+ *
+ * But we always force positive ADJUST values. Otherwise the TSC
+ * deadline timer creates an interrupt storm. We also have to
+ * prevent values > 0x7FFFFFFF as those wreckage the timer as well.
+ */
+ if ((bootcpu && bootval != 0) || (!bootcpu && bootval < 0) ||
+ (bootval > 0x7FFFFFFF)) {
+ pr_warn(FW_BUG "TSC ADJUST: CPU%u: %lld force to 0\n", cpu,
+ bootval);
+ wrmsrl(MSR_IA32_TSC_ADJUST, 0);
+ bootval = 0;
+ }
+ cur->adjusted = bootval;
+}
+
+#ifndef CONFIG_SMP
+bool __init tsc_store_and_check_tsc_adjust(bool bootcpu)
+{
+ struct tsc_adjust *cur = this_cpu_ptr(&tsc_adjust);
+ s64 bootval;
+
+ if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST))
+ return false;
+
+ rdmsrl(MSR_IA32_TSC_ADJUST, bootval);
+ cur->bootval = bootval;
+ cur->nextcheck = jiffies + HZ;
+ tsc_sanitize_first_cpu(cur, bootval, smp_processor_id(), bootcpu);
+ return false;
+}
+
+#else /* !CONFIG_SMP */
+
+/*
+ * Store and check the TSC ADJUST MSR if available
+ */
+bool tsc_store_and_check_tsc_adjust(bool bootcpu)
+{
+ struct tsc_adjust *ref, *cur = this_cpu_ptr(&tsc_adjust);
+ unsigned int refcpu, cpu = smp_processor_id();
+ struct cpumask *mask;
+ s64 bootval;
+
+ if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST))
+ return false;
+
+ rdmsrl(MSR_IA32_TSC_ADJUST, bootval);
+ cur->bootval = bootval;
+ cur->nextcheck = jiffies + HZ;
+ cur->warned = false;
+
+ /*
+ * Check whether this CPU is the first in a package to come up. In
+ * this case do not check the boot value against another package
+ * because the new package might have been physically hotplugged,
+ * where TSC_ADJUST is expected to be different. When called on the
+ * boot CPU topology_core_cpumask() might not be available yet.
+ */
+ mask = topology_core_cpumask(cpu);
+ refcpu = mask ? cpumask_any_but(mask, cpu) : nr_cpu_ids;
+
+ if (refcpu >= nr_cpu_ids) {
+ tsc_sanitize_first_cpu(cur, bootval, smp_processor_id(),
+ bootcpu);
+ return false;
+ }
+
+ ref = per_cpu_ptr(&tsc_adjust, refcpu);
+ /*
+ * Compare the boot value and complain if it differs in the
+ * package.
+ */
+ if (bootval != ref->bootval) {
+ pr_warn(FW_BUG "TSC ADJUST differs: Reference CPU%u: %lld CPU%u: %lld\n",
+ refcpu, ref->bootval, cpu, bootval);
+ }
+ /*
+ * The TSC_ADJUST values in a package must be the same. If the boot
+ * value on this newly upcoming CPU differs from the adjustment
+ * value of the already online CPU in this package, set it to that
+ * adjusted value.
+ */
+ if (bootval != ref->adjusted) {
+ pr_warn("TSC ADJUST synchronize: Reference CPU%u: %lld CPU%u: %lld\n",
+ refcpu, ref->adjusted, cpu, bootval);
+ cur->adjusted = ref->adjusted;
+ wrmsrl(MSR_IA32_TSC_ADJUST, ref->adjusted);
+ }
+ /*
+ * We have the TSCs forced to be in sync on this package. Skip sync
+ * test:
+ */
+ return true;
+}
+
/*
* Entry/exit counters that make sure that both CPUs
* run the measurement code at once:
*/
static atomic_t start_count;
static atomic_t stop_count;
+static atomic_t skip_test;
+static atomic_t test_runs;
/*
* We use a raw spinlock in this exceptional case, because
@@ -37,15 +185,16 @@ static arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED;
static cycles_t last_tsc;
static cycles_t max_warp;
static int nr_warps;
+static int random_warps;
/*
* TSC-warp measurement loop running on both CPUs. This is not called
* if there is no TSC.
*/
-static void check_tsc_warp(unsigned int timeout)
+static cycles_t check_tsc_warp(unsigned int timeout)
{
- cycles_t start, now, prev, end;
- int i;
+ cycles_t start, now, prev, end, cur_max_warp = 0;
+ int i, cur_warps = 0;
start = rdtsc_ordered();
/*
@@ -85,13 +234,22 @@ static void check_tsc_warp(unsigned int timeout)
if (unlikely(prev > now)) {
arch_spin_lock(&sync_lock);
max_warp = max(max_warp, prev - now);
+ cur_max_warp = max_warp;
+ /*
+ * Check whether this bounces back and forth. Only
+ * one CPU should observe time going backwards.
+ */
+ if (cur_warps != nr_warps)
+ random_warps++;
nr_warps++;
+ cur_warps = nr_warps;
arch_spin_unlock(&sync_lock);
}
}
WARN(!(now-start),
"Warning: zero tsc calibration delta: %Ld [max: %Ld]\n",
now-start, end-start);
+ return cur_max_warp;
}
/*
@@ -136,15 +294,26 @@ void check_tsc_sync_source(int cpu)
}
/*
- * Reset it - in case this is a second bootup:
+ * Set the maximum number of test runs to
+ * 1 if the CPU does not provide the TSC_ADJUST MSR
+ * 3 if the MSR is available, so the target can try to adjust
*/
- atomic_set(&stop_count, 0);
-
+ if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST))
+ atomic_set(&test_runs, 1);
+ else
+ atomic_set(&test_runs, 3);
+retry:
/*
- * Wait for the target to arrive:
+ * Wait for the target to start or to skip the test:
*/
- while (atomic_read(&start_count) != cpus-1)
+ while (atomic_read(&start_count) != cpus - 1) {
+ if (atomic_read(&skip_test) > 0) {
+ atomic_set(&skip_test, 0);
+ return;
+ }
cpu_relax();
+ }
+
/*
* Trigger the target to continue into the measurement too:
*/
@@ -155,21 +324,35 @@ void check_tsc_sync_source(int cpu)
while (atomic_read(&stop_count) != cpus-1)
cpu_relax();
- if (nr_warps) {
+ /*
+ * If the test was successful set the number of runs to zero and
+ * stop. If not, decrement the number of runs an check if we can
+ * retry. In case of random warps no retry is attempted.
+ */
+ if (!nr_warps) {
+ atomic_set(&test_runs, 0);
+
+ pr_debug("TSC synchronization [CPU#%d -> CPU#%d]: passed\n",
+ smp_processor_id(), cpu);
+
+ } else if (atomic_dec_and_test(&test_runs) || random_warps) {
+ /* Force it to 0 if random warps brought us here */
+ atomic_set(&test_runs, 0);
+
pr_warning("TSC synchronization [CPU#%d -> CPU#%d]:\n",
smp_processor_id(), cpu);
pr_warning("Measured %Ld cycles TSC warp between CPUs, "
"turning off TSC clock.\n", max_warp);
+ if (random_warps)
+ pr_warning("TSC warped randomly between CPUs\n");
mark_tsc_unstable("check_tsc_sync_source failed");
- } else {
- pr_debug("TSC synchronization [CPU#%d -> CPU#%d]: passed\n",
- smp_processor_id(), cpu);
}
/*
* Reset it - just in case we boot another CPU later:
*/
atomic_set(&start_count, 0);
+ random_warps = 0;
nr_warps = 0;
max_warp = 0;
last_tsc = 0;
@@ -178,6 +361,12 @@ void check_tsc_sync_source(int cpu)
* Let the target continue with the bootup:
*/
atomic_inc(&stop_count);
+
+ /*
+ * Retry, if there is a chance to do so.
+ */
+ if (atomic_read(&test_runs) > 0)
+ goto retry;
}
/*
@@ -185,6 +374,9 @@ void check_tsc_sync_source(int cpu)
*/
void check_tsc_sync_target(void)
{
+ struct tsc_adjust *cur = this_cpu_ptr(&tsc_adjust);
+ unsigned int cpu = smp_processor_id();
+ cycles_t cur_max_warp, gbl_max_warp;
int cpus = 2;
/* Also aborts if there is no TSC. */
@@ -192,6 +384,16 @@ void check_tsc_sync_target(void)
return;
/*
+ * Store, verify and sanitize the TSC adjust register. If
+ * successful skip the test.
+ */
+ if (tsc_store_and_check_tsc_adjust(false)) {
+ atomic_inc(&skip_test);
+ return;
+ }
+
+retry:
+ /*
* Register this CPU's participation and wait for the
* source CPU to start the measurement:
*/
@@ -199,7 +401,12 @@ void check_tsc_sync_target(void)
while (atomic_read(&start_count) != cpus)
cpu_relax();
- check_tsc_warp(loop_timeout(smp_processor_id()));
+ cur_max_warp = check_tsc_warp(loop_timeout(cpu));
+
+ /*
+ * Store the maximum observed warp value for a potential retry:
+ */
+ gbl_max_warp = max_warp;
/*
* Ok, we are done:
@@ -211,4 +418,61 @@ void check_tsc_sync_target(void)
*/
while (atomic_read(&stop_count) != cpus)
cpu_relax();
+
+ /*
+ * Reset it for the next sync test:
+ */
+ atomic_set(&stop_count, 0);
+
+ /*
+ * Check the number of remaining test runs. If not zero, the test
+ * failed and a retry with adjusted TSC is possible. If zero the
+ * test was either successful or failed terminally.
+ */
+ if (!atomic_read(&test_runs))
+ return;
+
+ /*
+ * If the warp value of this CPU is 0, then the other CPU
+ * observed time going backwards so this TSC was ahead and
+ * needs to move backwards.
+ */
+ if (!cur_max_warp)
+ cur_max_warp = -gbl_max_warp;
+
+ /*
+ * Add the result to the previous adjustment value.
+ *
+ * The adjustement value is slightly off by the overhead of the
+ * sync mechanism (observed values are ~200 TSC cycles), but this
+ * really depends on CPU, node distance and frequency. So
+ * compensating for this is hard to get right. Experiments show
+ * that the warp is not longer detectable when the observed warp
+ * value is used. In the worst case the adjustment needs to go
+ * through a 3rd run for fine tuning.
+ */
+ cur->adjusted += cur_max_warp;
+
+ /*
+ * TSC deadline timer stops working or creates an interrupt storm
+ * with adjust values < 0 and > x07ffffff.
+ *
+ * To allow adjust values > 0x7FFFFFFF we need to disable the
+ * deadline timer and use the local APIC timer, but that requires
+ * more intrusive changes and we do not have any useful information
+ * from Intel about the underlying HW wreckage yet.
+ */
+ if (cur->adjusted < 0)
+ cur->adjusted = 0;
+ if (cur->adjusted > 0x7FFFFFFF)
+ cur->adjusted = 0x7FFFFFFF;
+
+ pr_warn("TSC ADJUST compensate: CPU%u observed %lld warp. Adjust: %lld\n",
+ cpu, cur_max_warp, cur->adjusted);
+
+ wrmsrl(MSR_IA32_TSC_ADJUST, cur->adjusted);
+ goto retry;
+
}
+
+#endif /* CONFIG_SMP */
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index b2d3cf1ef54a..e85f6bd7b9d5 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -373,16 +373,17 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
const u32 kvm_cpuid_7_0_ebx_x86_features =
F(FSGSBASE) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) |
F(BMI2) | F(ERMS) | f_invpcid | F(RTM) | f_mpx | F(RDSEED) |
- F(ADX) | F(SMAP) | F(AVX512F) | F(AVX512PF) | F(AVX512ER) |
- F(AVX512CD) | F(CLFLUSHOPT) | F(CLWB) | F(AVX512DQ) |
- F(AVX512BW) | F(AVX512VL);
+ F(ADX) | F(SMAP) | F(AVX512IFMA) | F(AVX512F) | F(AVX512PF) |
+ F(AVX512ER) | F(AVX512CD) | F(CLFLUSHOPT) | F(CLWB) | F(AVX512DQ) |
+ F(SHA_NI) | F(AVX512BW) | F(AVX512VL);
/* cpuid 0xD.1.eax */
const u32 kvm_cpuid_D_1_eax_x86_features =
F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | f_xsaves;
/* cpuid 7.0.ecx*/
- const u32 kvm_cpuid_7_0_ecx_x86_features = F(PKU) | 0 /*OSPKE*/;
+ const u32 kvm_cpuid_7_0_ecx_x86_features =
+ F(AVX512VBMI) | F(PKU) | 0 /*OSPKE*/;
/* cpuid 7.0.edx*/
const u32 kvm_cpuid_7_0_edx_x86_features =
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 99cde5220e07..1572c35b4f1a 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -852,6 +852,10 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm,
if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
return;
+ mutex_lock(&kvm->arch.hyperv.hv_lock);
+ if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
+ goto out_unlock;
+
gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
/*
* Because the TSC parameters only vary when there is a
@@ -859,7 +863,7 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm,
*/
if (unlikely(kvm_read_guest(kvm, gfn_to_gpa(gfn),
&tsc_seq, sizeof(tsc_seq))))
- return;
+ goto out_unlock;
/*
* While we're computing and writing the parameters, force the
@@ -868,15 +872,15 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm,
hv->tsc_ref.tsc_sequence = 0;
if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
&hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
- return;
+ goto out_unlock;
if (!compute_tsc_page_parameters(hv_clock, &hv->tsc_ref))
- return;
+ goto out_unlock;
/* Ensure sequence is zero before writing the rest of the struct. */
smp_wmb();
if (kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref)))
- return;
+ goto out_unlock;
/*
* Now switch to the TSC page mechanism by writing the sequence.
@@ -891,6 +895,8 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm,
hv->tsc_ref.tsc_sequence = tsc_seq;
kvm_write_guest(kvm, gfn_to_gpa(gfn),
&hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence));
+out_unlock:
+ mutex_unlock(&kvm->arch.hyperv.hv_lock);
}
static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
@@ -1142,9 +1148,9 @@ int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
if (kvm_hv_msr_partition_wide(msr)) {
int r;
- mutex_lock(&vcpu->kvm->lock);
+ mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock);
r = kvm_hv_set_msr_pw(vcpu, msr, data, host);
- mutex_unlock(&vcpu->kvm->lock);
+ mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock);
return r;
} else
return kvm_hv_set_msr(vcpu, msr, data, host);
@@ -1155,9 +1161,9 @@ int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
if (kvm_hv_msr_partition_wide(msr)) {
int r;
- mutex_lock(&vcpu->kvm->lock);
+ mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock);
r = kvm_hv_get_msr_pw(vcpu, msr, pdata);
- mutex_unlock(&vcpu->kvm->lock);
+ mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock);
return r;
} else
return kvm_hv_get_msr(vcpu, msr, pdata);
@@ -1165,7 +1171,7 @@ int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
bool kvm_hv_hypercall_enabled(struct kvm *kvm)
{
- return kvm->arch.hyperv.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE;
+ return READ_ONCE(kvm->arch.hyperv.hv_hypercall) & HV_X64_MSR_HYPERCALL_ENABLE;
}
static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index aae43c6f2472..24db5fb6f575 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1389,10 +1389,10 @@ static inline bool nested_cpu_has_posted_intr(struct vmcs12 *vmcs12)
return vmcs12->pin_based_vm_exec_control & PIN_BASED_POSTED_INTR;
}
-static inline bool is_exception(u32 intr_info)
+static inline bool is_nmi(u32 intr_info)
{
return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
- == (INTR_TYPE_HARD_EXCEPTION | INTR_INFO_VALID_MASK);
+ == (INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK);
}
static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
@@ -5728,7 +5728,7 @@ static int handle_exception(struct kvm_vcpu *vcpu)
if (is_machine_check(intr_info))
return handle_machine_check(vcpu);
- if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR)
+ if (is_nmi(intr_info))
return 1; /* already handled by vmx_vcpu_run() */
if (is_no_device(intr_info)) {
@@ -7122,7 +7122,7 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
if (vmptr == vmx->nested.vmxon_ptr) {
nested_vmx_failValid(vcpu,
- VMXERR_VMCLEAR_VMXON_POINTER);
+ VMXERR_VMPTRLD_VMXON_POINTER);
return kvm_skip_emulated_instruction(vcpu);
}
break;
@@ -8170,7 +8170,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
switch (exit_reason) {
case EXIT_REASON_EXCEPTION_NMI:
- if (!is_exception(intr_info))
+ if (is_nmi(intr_info))
return false;
else if (is_page_fault(intr_info))
return enable_ept;
@@ -8765,8 +8765,7 @@ static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx)
kvm_machine_check();
/* We need to handle NMIs before interrupts are enabled */
- if ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR &&
- (exit_intr_info & INTR_INFO_VALID_MASK)) {
+ if (is_nmi(exit_intr_info)) {
kvm_before_handle_nmi(&vmx->vcpu);
asm("int $2");
kvm_after_handle_nmi(&vmx->vcpu);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 1f0d2383f5ee..445c51b6cf6d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2844,7 +2844,24 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
+ int idx;
+ /*
+ * Disable page faults because we're in atomic context here.
+ * kvm_write_guest_offset_cached() would call might_fault()
+ * that relies on pagefault_disable() to tell if there's a
+ * bug. NOTE: the write to guest memory may not go through if
+ * during postcopy live migration or if there's heavy guest
+ * paging.
+ */
+ pagefault_disable();
+ /*
+ * kvm_memslots() will be called by
+ * kvm_write_guest_offset_cached() so take the srcu lock.
+ */
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm_steal_time_set_preempted(vcpu);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
+ pagefault_enable();
kvm_x86_ops->vcpu_put(vcpu);
kvm_put_guest_fpu(vcpu);
vcpu->arch.last_host_tsc = rdtsc();
@@ -7881,6 +7898,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
raw_spin_lock_init(&kvm->arch.tsc_write_lock);
mutex_init(&kvm->arch.apic_map_lock);
+ mutex_init(&kvm->arch.hyperv.hv_lock);
spin_lock_init(&kvm->arch.pvclock_gtod_sync_lock);
kvm->arch.kvmclock_offset = -ktime_get_boot_ns();
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 17c55a536fdd..e3254ca0eec4 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -413,7 +413,7 @@ out:
void vmalloc_sync_all(void)
{
- sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END, 0);
+ sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END);
}
/*
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 14b9dd71d9e8..963895f9af7f 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -89,10 +89,10 @@ static int __init nonx32_setup(char *str)
__setup("noexec32=", nonx32_setup);
/*
- * When memory was added/removed make sure all the processes MM have
+ * When memory was added make sure all the processes MM have
* suitable PGD entries in the local PGD level page.
*/
-void sync_global_pgds(unsigned long start, unsigned long end, int removed)
+void sync_global_pgds(unsigned long start, unsigned long end)
{
unsigned long address;
@@ -100,12 +100,7 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
const pgd_t *pgd_ref = pgd_offset_k(address);
struct page *page;
- /*
- * When it is called after memory hot remove, pgd_none()
- * returns true. In this case (removed == 1), we must clear
- * the PGD entries in the local PGD level page.
- */
- if (pgd_none(*pgd_ref) && !removed)
+ if (pgd_none(*pgd_ref))
continue;
spin_lock(&pgd_lock);
@@ -122,13 +117,8 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
BUG_ON(pgd_page_vaddr(*pgd)
!= pgd_page_vaddr(*pgd_ref));
- if (removed) {
- if (pgd_none(*pgd_ref) && !pgd_none(*pgd))
- pgd_clear(pgd);
- } else {
- if (pgd_none(*pgd))
- set_pgd(pgd, *pgd_ref);
- }
+ if (pgd_none(*pgd))
+ set_pgd(pgd, *pgd_ref);
spin_unlock(pgt_lock);
}
@@ -596,7 +586,7 @@ kernel_physical_mapping_init(unsigned long paddr_start,
}
if (pgd_changed)
- sync_global_pgds(vaddr_start, vaddr_end - 1, 0);
+ sync_global_pgds(vaddr_start, vaddr_end - 1);
__flush_tlb_all();
@@ -1239,7 +1229,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
} else
err = vmemmap_populate_basepages(start, end, node);
if (!err)
- sync_global_pgds(start, end - 1, 0);
+ sync_global_pgds(start, end - 1);
return err;
}
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
index e4f800999b32..324e5713d386 100644
--- a/arch/x86/mm/mpx.c
+++ b/arch/x86/mm/mpx.c
@@ -350,12 +350,12 @@ int mpx_enable_management(void)
* The copy_xregs_to_kernel() beneath get_xsave_field_ptr() is
* expected to be relatively expensive. Storing the bounds
* directory here means that we do not have to do xsave in the
- * unmap path; we can just use mm->bd_addr instead.
+ * unmap path; we can just use mm->context.bd_addr instead.
*/
bd_base = mpx_get_bounds_dir();
down_write(&mm->mmap_sem);
- mm->bd_addr = bd_base;
- if (mm->bd_addr == MPX_INVALID_BOUNDS_DIR)
+ mm->context.bd_addr = bd_base;
+ if (mm->context.bd_addr == MPX_INVALID_BOUNDS_DIR)
ret = -ENXIO;
up_write(&mm->mmap_sem);
@@ -370,7 +370,7 @@ int mpx_disable_management(void)
return -ENXIO;
down_write(&mm->mmap_sem);
- mm->bd_addr = MPX_INVALID_BOUNDS_DIR;
+ mm->context.bd_addr = MPX_INVALID_BOUNDS_DIR;
up_write(&mm->mmap_sem);
return 0;
}
@@ -947,7 +947,7 @@ static int try_unmap_single_bt(struct mm_struct *mm,
end = bta_end_vaddr;
}
- bde_vaddr = mm->bd_addr + mpx_get_bd_entry_offset(mm, start);
+ bde_vaddr = mm->context.bd_addr + mpx_get_bd_entry_offset(mm, start);
ret = get_bt_addr(mm, bde_vaddr, &bt_addr);
/*
* No bounds table there, so nothing to unmap.
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index 3f35b48d1d9d..12dcad7297a5 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -19,7 +19,7 @@
#include "numa_internal.h"
-int __initdata numa_off;
+int numa_off;
nodemask_t numa_nodes_parsed __initdata;
struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
diff --git a/arch/x86/platform/Makefile b/arch/x86/platform/Makefile
index 3c3c19ea94df..184842ef332e 100644
--- a/arch/x86/platform/Makefile
+++ b/arch/x86/platform/Makefile
@@ -8,7 +8,6 @@ obj-y += iris/
obj-y += intel/
obj-y += intel-mid/
obj-y += intel-quark/
-obj-y += mellanox/
obj-y += olpc/
obj-y += scx200/
obj-y += sfi/
diff --git a/arch/x86/platform/intel-mid/mfld.c b/arch/x86/platform/intel-mid/mfld.c
index 1eb47b6298c2..e793fe509971 100644
--- a/arch/x86/platform/intel-mid/mfld.c
+++ b/arch/x86/platform/intel-mid/mfld.c
@@ -49,8 +49,13 @@ static unsigned long __init mfld_calibrate_tsc(void)
fast_calibrate = ratio * fsb;
pr_debug("read penwell tsc %lu khz\n", fast_calibrate);
lapic_timer_frequency = fsb * 1000 / HZ;
- /* mark tsc clocksource as reliable */
- set_cpu_cap(&boot_cpu_data, X86_FEATURE_TSC_RELIABLE);
+
+ /*
+ * TSC on Intel Atom SoCs is reliable and of known frequency.
+ * See tsc_msr.c for details.
+ */
+ setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
+ setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE);
return fast_calibrate;
}
diff --git a/arch/x86/platform/intel-mid/mrfld.c b/arch/x86/platform/intel-mid/mrfld.c
index 59253db41bbc..e0607c77a1bd 100644
--- a/arch/x86/platform/intel-mid/mrfld.c
+++ b/arch/x86/platform/intel-mid/mrfld.c
@@ -78,8 +78,12 @@ static unsigned long __init tangier_calibrate_tsc(void)
pr_debug("Setting lapic_timer_frequency = %d\n",
lapic_timer_frequency);
- /* mark tsc clocksource as reliable */
- set_cpu_cap(&boot_cpu_data, X86_FEATURE_TSC_RELIABLE);
+ /*
+ * TSC on Intel Atom SoCs is reliable and of known frequency.
+ * See tsc_msr.c for details.
+ */
+ setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
+ setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE);
return fast_calibrate;
}
diff --git a/arch/x86/platform/mellanox/Makefile b/arch/x86/platform/mellanox/Makefile
deleted file mode 100644
index f43c93188a1d..000000000000
--- a/arch/x86/platform/mellanox/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-$(CONFIG_MLX_PLATFORM) += mlx-platform.o
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index 53cace2ec0e2..66ade16c7693 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -252,6 +252,7 @@ static void notrace __restore_processor_state(struct saved_context *ctxt)
fix_processor_context();
do_fpu_end();
+ tsc_verify_tsc_adjust(true);
x86_platform.restore_sched_clock_state();
mtrr_bp_restore();
perf_restore_debug_store();
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 9fa27ceeecfd..311acad7dad2 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -87,12 +87,6 @@ static void cpu_bringup(void)
cpu_data(cpu).x86_max_cores = 1;
set_cpu_sibling_map(cpu);
- /*
- * identify_cpu() may have set logical_pkg_id to -1 due
- * to incorrect phys_proc_id. Let's re-comupte it.
- */
- topology_update_package_map(apic->cpu_present_to_apicid(cpu), cpu);
-
xen_setup_cpu_clockevents();
notify_cpu_starting(cpu);
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index f61058617ada..f4126cf997a4 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -15,6 +15,7 @@ config XTENSA
select GENERIC_SCHED_CLOCK
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_API_DEBUG
+ select HAVE_DMA_CONTIGUOUS
select HAVE_EXIT_THREAD
select HAVE_FUNCTION_TRACER
select HAVE_FUTEX_CMPXCHG if !MMU
diff --git a/arch/xtensa/boot/dts/kc705.dts b/arch/xtensa/boot/dts/kc705.dts
index b1f4ee8c9a22..6106bdc097ad 100644
--- a/arch/xtensa/boot/dts/kc705.dts
+++ b/arch/xtensa/boot/dts/kc705.dts
@@ -11,4 +11,20 @@
device_type = "memory";
reg = <0x00000000 0x38000000>;
};
+
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ /* global autoconfigured region for contiguous allocations */
+ linux,cma {
+ compatible = "shared-dma-pool";
+ reusable;
+ size = <0x04000000>;
+ alignment = <0x2000>;
+ alloc-ranges = <0x00000000 0x20000000>;
+ linux,cma-default;
+ };
+ };
};
diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild
index 28cf4c5d65ef..b7fbaa56b51a 100644
--- a/arch/xtensa/include/asm/Kbuild
+++ b/arch/xtensa/include/asm/Kbuild
@@ -3,6 +3,7 @@ generic-y += bug.h
generic-y += clkdev.h
generic-y += cputime.h
generic-y += div64.h
+generic-y += dma-contiguous.h
generic-y += emergency-restart.h
generic-y += errno.h
generic-y += exec.h
diff --git a/arch/xtensa/kernel/Makefile b/arch/xtensa/kernel/Makefile
index c31f5d5afc7d..264fb89c444e 100644
--- a/arch/xtensa/kernel/Makefile
+++ b/arch/xtensa/kernel/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_FUNCTION_TRACER) += mcount.o
obj-$(CONFIG_SMP) += smp.o mxhead.o
obj-$(CONFIG_XTENSA_VARIANT_HAVE_PERF_EVENTS) += perf_event.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
+obj-$(CONFIG_S32C1I_SELFTEST) += s32c1i_selftest.o
AFLAGS_head.o += -mtext-section-literals
AFLAGS_mxhead.o += -mtext-section-literals
diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c
index 6a16decf278f..70e362e6038e 100644
--- a/arch/xtensa/kernel/pci-dma.c
+++ b/arch/xtensa/kernel/pci-dma.c
@@ -15,6 +15,7 @@
* Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
*/
+#include <linux/dma-contiguous.h>
#include <linux/gfp.h>
#include <linux/highmem.h>
#include <linux/mm.h>
@@ -146,6 +147,8 @@ static void *xtensa_dma_alloc(struct device *dev, size_t size,
{
unsigned long ret;
unsigned long uncached = 0;
+ unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ struct page *page = NULL;
/* ignore region speicifiers */
@@ -153,11 +156,18 @@ static void *xtensa_dma_alloc(struct device *dev, size_t size,
if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
flag |= GFP_DMA;
- ret = (unsigned long)__get_free_pages(flag, get_order(size));
- if (ret == 0)
+ if (gfpflags_allow_blocking(flag))
+ page = dma_alloc_from_contiguous(dev, count, get_order(size));
+
+ if (!page)
+ page = alloc_pages(flag, get_order(size));
+
+ if (!page)
return NULL;
+ ret = (unsigned long)page_address(page);
+
/* We currently don't support coherent memory outside KSEG */
BUG_ON(ret < XCHAL_KSEG_CACHED_VADDR ||
@@ -170,16 +180,19 @@ static void *xtensa_dma_alloc(struct device *dev, size_t size,
return (void *)uncached;
}
-static void xtensa_dma_free(struct device *hwdev, size_t size, void *vaddr,
+static void xtensa_dma_free(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, unsigned long attrs)
{
unsigned long addr = (unsigned long)vaddr +
XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
+ struct page *page = virt_to_page(addr);
+ unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
BUG_ON(addr < XCHAL_KSEG_CACHED_VADDR ||
addr > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1);
- free_pages(addr, get_order(size));
+ if (!dma_release_from_contiguous(dev, page, count))
+ __free_pages(page, get_order(size));
}
static dma_addr_t xtensa_map_page(struct device *dev, struct page *page,
diff --git a/arch/xtensa/kernel/s32c1i_selftest.c b/arch/xtensa/kernel/s32c1i_selftest.c
new file mode 100644
index 000000000000..07e56e3a9a8b
--- /dev/null
+++ b/arch/xtensa/kernel/s32c1i_selftest.c
@@ -0,0 +1,128 @@
+/*
+ * S32C1I selftest.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2016 Cadence Design Systems Inc.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+
+#include <asm/traps.h>
+
+#if XCHAL_HAVE_S32C1I
+
+static int __initdata rcw_word, rcw_probe_pc, rcw_exc;
+
+/*
+ * Basic atomic compare-and-swap, that records PC of S32C1I for probing.
+ *
+ * If *v == cmp, set *v = set. Return previous *v.
+ */
+static inline int probed_compare_swap(int *v, int cmp, int set)
+{
+ int tmp;
+
+ __asm__ __volatile__(
+ " movi %1, 1f\n"
+ " s32i %1, %4, 0\n"
+ " wsr %2, scompare1\n"
+ "1: s32c1i %0, %3, 0\n"
+ : "=a" (set), "=&a" (tmp)
+ : "a" (cmp), "a" (v), "a" (&rcw_probe_pc), "0" (set)
+ : "memory"
+ );
+ return set;
+}
+
+/* Handle probed exception */
+
+static void __init do_probed_exception(struct pt_regs *regs,
+ unsigned long exccause)
+{
+ if (regs->pc == rcw_probe_pc) { /* exception on s32c1i ? */
+ regs->pc += 3; /* skip the s32c1i instruction */
+ rcw_exc = exccause;
+ } else {
+ do_unhandled(regs, exccause);
+ }
+}
+
+/* Simple test of S32C1I (soc bringup assist) */
+
+static int __init check_s32c1i(void)
+{
+ int n, cause1, cause2;
+ void *handbus, *handdata, *handaddr; /* temporarily saved handlers */
+
+ rcw_probe_pc = 0;
+ handbus = trap_set_handler(EXCCAUSE_LOAD_STORE_ERROR,
+ do_probed_exception);
+ handdata = trap_set_handler(EXCCAUSE_LOAD_STORE_DATA_ERROR,
+ do_probed_exception);
+ handaddr = trap_set_handler(EXCCAUSE_LOAD_STORE_ADDR_ERROR,
+ do_probed_exception);
+
+ /* First try an S32C1I that does not store: */
+ rcw_exc = 0;
+ rcw_word = 1;
+ n = probed_compare_swap(&rcw_word, 0, 2);
+ cause1 = rcw_exc;
+
+ /* took exception? */
+ if (cause1 != 0) {
+ /* unclean exception? */
+ if (n != 2 || rcw_word != 1)
+ panic("S32C1I exception error");
+ } else if (rcw_word != 1 || n != 1) {
+ panic("S32C1I compare error");
+ }
+
+ /* Then an S32C1I that stores: */
+ rcw_exc = 0;
+ rcw_word = 0x1234567;
+ n = probed_compare_swap(&rcw_word, 0x1234567, 0xabcde);
+ cause2 = rcw_exc;
+
+ if (cause2 != 0) {
+ /* unclean exception? */
+ if (n != 0xabcde || rcw_word != 0x1234567)
+ panic("S32C1I exception error (b)");
+ } else if (rcw_word != 0xabcde || n != 0x1234567) {
+ panic("S32C1I store error");
+ }
+
+ /* Verify consistency of exceptions: */
+ if (cause1 || cause2) {
+ pr_warn("S32C1I took exception %d, %d\n", cause1, cause2);
+ /* If emulation of S32C1I upon bus error gets implemented,
+ * we can get rid of this panic for single core (not SMP)
+ */
+ panic("S32C1I exceptions not currently supported");
+ }
+ if (cause1 != cause2)
+ panic("inconsistent S32C1I exceptions");
+
+ trap_set_handler(EXCCAUSE_LOAD_STORE_ERROR, handbus);
+ trap_set_handler(EXCCAUSE_LOAD_STORE_DATA_ERROR, handdata);
+ trap_set_handler(EXCCAUSE_LOAD_STORE_ADDR_ERROR, handaddr);
+ return 0;
+}
+
+#else /* XCHAL_HAVE_S32C1I */
+
+/* This condition should not occur with a commercially deployed processor.
+ * Display reminder for early engr test or demo chips / FPGA bitstreams
+ */
+static int __init check_s32c1i(void)
+{
+ pr_warn("Processor configuration lacks atomic compare-and-swap support!\n");
+ return 0;
+}
+
+#endif /* XCHAL_HAVE_S32C1I */
+
+early_initcall(check_s32c1i);
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index 88a044af7504..848e8568fb3c 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -31,10 +31,6 @@
# include <linux/console.h>
#endif
-#ifdef CONFIG_RTC
-# include <linux/timex.h>
-#endif
-
#ifdef CONFIG_PROC_FS
# include <linux/seq_file.h>
#endif
@@ -48,24 +44,22 @@
#include <asm/page.h>
#include <asm/setup.h>
#include <asm/param.h>
-#include <asm/traps.h>
#include <asm/smp.h>
#include <asm/sysmem.h>
#include <platform/hardware.h>
#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
-struct screen_info screen_info = { 0, 24, 0, 0, 0, 80, 0, 0, 0, 24, 1, 16};
-#endif
-
-#ifdef CONFIG_BLK_DEV_FD
-extern struct fd_ops no_fd_ops;
-struct fd_ops *fd_ops;
+struct screen_info screen_info = {
+ .orig_x = 0,
+ .orig_y = 24,
+ .orig_video_cols = 80,
+ .orig_video_lines = 24,
+ .orig_video_isVGA = 1,
+ .orig_video_points = 16,
+};
#endif
-extern struct rtc_ops no_rtc_ops;
-struct rtc_ops *rtc_ops;
-
#ifdef CONFIG_BLK_DEV_INITRD
extern unsigned long initrd_start;
extern unsigned long initrd_end;
@@ -77,7 +71,6 @@ extern int initrd_below_start_ok;
void *dtb_start = __dtb_start;
#endif
-unsigned char aux_device_present;
extern unsigned long loops_per_jiffy;
/* Command line specified as configuration option. */
@@ -317,120 +310,6 @@ extern char _SecondaryResetVector_text_start;
extern char _SecondaryResetVector_text_end;
#endif
-
-#ifdef CONFIG_S32C1I_SELFTEST
-#if XCHAL_HAVE_S32C1I
-
-static int __initdata rcw_word, rcw_probe_pc, rcw_exc;
-
-/*
- * Basic atomic compare-and-swap, that records PC of S32C1I for probing.
- *
- * If *v == cmp, set *v = set. Return previous *v.
- */
-static inline int probed_compare_swap(int *v, int cmp, int set)
-{
- int tmp;
-
- __asm__ __volatile__(
- " movi %1, 1f\n"
- " s32i %1, %4, 0\n"
- " wsr %2, scompare1\n"
- "1: s32c1i %0, %3, 0\n"
- : "=a" (set), "=&a" (tmp)
- : "a" (cmp), "a" (v), "a" (&rcw_probe_pc), "0" (set)
- : "memory"
- );
- return set;
-}
-
-/* Handle probed exception */
-
-static void __init do_probed_exception(struct pt_regs *regs,
- unsigned long exccause)
-{
- if (regs->pc == rcw_probe_pc) { /* exception on s32c1i ? */
- regs->pc += 3; /* skip the s32c1i instruction */
- rcw_exc = exccause;
- } else {
- do_unhandled(regs, exccause);
- }
-}
-
-/* Simple test of S32C1I (soc bringup assist) */
-
-static int __init check_s32c1i(void)
-{
- int n, cause1, cause2;
- void *handbus, *handdata, *handaddr; /* temporarily saved handlers */
-
- rcw_probe_pc = 0;
- handbus = trap_set_handler(EXCCAUSE_LOAD_STORE_ERROR,
- do_probed_exception);
- handdata = trap_set_handler(EXCCAUSE_LOAD_STORE_DATA_ERROR,
- do_probed_exception);
- handaddr = trap_set_handler(EXCCAUSE_LOAD_STORE_ADDR_ERROR,
- do_probed_exception);
-
- /* First try an S32C1I that does not store: */
- rcw_exc = 0;
- rcw_word = 1;
- n = probed_compare_swap(&rcw_word, 0, 2);
- cause1 = rcw_exc;
-
- /* took exception? */
- if (cause1 != 0) {
- /* unclean exception? */
- if (n != 2 || rcw_word != 1)
- panic("S32C1I exception error");
- } else if (rcw_word != 1 || n != 1) {
- panic("S32C1I compare error");
- }
-
- /* Then an S32C1I that stores: */
- rcw_exc = 0;
- rcw_word = 0x1234567;
- n = probed_compare_swap(&rcw_word, 0x1234567, 0xabcde);
- cause2 = rcw_exc;
-
- if (cause2 != 0) {
- /* unclean exception? */
- if (n != 0xabcde || rcw_word != 0x1234567)
- panic("S32C1I exception error (b)");
- } else if (rcw_word != 0xabcde || n != 0x1234567) {
- panic("S32C1I store error");
- }
-
- /* Verify consistency of exceptions: */
- if (cause1 || cause2) {
- pr_warn("S32C1I took exception %d, %d\n", cause1, cause2);
- /* If emulation of S32C1I upon bus error gets implemented,
- we can get rid of this panic for single core (not SMP) */
- panic("S32C1I exceptions not currently supported");
- }
- if (cause1 != cause2)
- panic("inconsistent S32C1I exceptions");
-
- trap_set_handler(EXCCAUSE_LOAD_STORE_ERROR, handbus);
- trap_set_handler(EXCCAUSE_LOAD_STORE_DATA_ERROR, handdata);
- trap_set_handler(EXCCAUSE_LOAD_STORE_ADDR_ERROR, handaddr);
- return 0;
-}
-
-#else /* XCHAL_HAVE_S32C1I */
-
-/* This condition should not occur with a commercially deployed processor.
- Display reminder for early engr test or demo chips / FPGA bitstreams */
-static int __init check_s32c1i(void)
-{
- pr_warn("Processor configuration lacks atomic compare-and-swap support!\n");
- return 0;
-}
-
-#endif /* XCHAL_HAVE_S32C1I */
-early_initcall(check_s32c1i);
-#endif /* CONFIG_S32C1I_SELFTEST */
-
static inline int mem_reserve(unsigned long start, unsigned long end)
{
return memblock_reserve(start, end - start);
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
index 80e4cfb2471a..720fe4e8b497 100644
--- a/arch/xtensa/mm/init.c
+++ b/arch/xtensa/mm/init.c
@@ -26,6 +26,7 @@
#include <linux/nodemask.h>
#include <linux/mm.h>
#include <linux/of_fdt.h>
+#include <linux/dma-contiguous.h>
#include <asm/bootparam.h>
#include <asm/page.h>
@@ -60,6 +61,7 @@ void __init bootmem_init(void)
max_low_pfn = min(max_pfn, MAX_LOW_PFN);
memblock_set_current_limit(PFN_PHYS(max_low_pfn));
+ dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
memblock_dump_all();
}
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index 668ef402c6eb..f849311e9fd4 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -556,18 +556,8 @@ static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags)
lock_sock(sk);
/*
- * AEAD memory structure: For encryption, the tag is appended to the
- * ciphertext which implies that the memory allocated for the ciphertext
- * must be increased by the tag length. For decryption, the tag
- * is expected to be concatenated to the ciphertext. The plaintext
- * therefore has a memory size of the ciphertext minus the tag length.
- *
- * The memory structure for cipher operation has the following
- * structure:
- * AEAD encryption input: assoc data || plaintext
- * AEAD encryption output: cipherntext || auth tag
- * AEAD decryption input: assoc data || ciphertext || auth tag
- * AEAD decryption output: plaintext
+ * Please see documentation of aead_request_set_crypt for the
+ * description of the AEAD memory structure expected from the caller.
*/
if (ctx->more) {
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index ce3a7a16f03f..edb0c79f7c64 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -70,7 +70,7 @@ int acpi_map_pxm_to_node(int pxm)
{
int node;
- if (pxm < 0 || pxm >= MAX_PXM_DOMAINS)
+ if (pxm < 0 || pxm >= MAX_PXM_DOMAINS || numa_off)
return NUMA_NO_NODE;
node = pxm_to_node_map[pxm];
diff --git a/drivers/dax/dax.c b/drivers/dax/dax.c
index 26ec39ddf21f..ed758b74ddf0 100644
--- a/drivers/dax/dax.c
+++ b/drivers/dax/dax.c
@@ -75,6 +75,73 @@ struct dax_dev {
struct resource res[0];
};
+static ssize_t id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dax_region *dax_region;
+ ssize_t rc = -ENXIO;
+
+ device_lock(dev);
+ dax_region = dev_get_drvdata(dev);
+ if (dax_region)
+ rc = sprintf(buf, "%d\n", dax_region->id);
+ device_unlock(dev);
+
+ return rc;
+}
+static DEVICE_ATTR_RO(id);
+
+static ssize_t region_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dax_region *dax_region;
+ ssize_t rc = -ENXIO;
+
+ device_lock(dev);
+ dax_region = dev_get_drvdata(dev);
+ if (dax_region)
+ rc = sprintf(buf, "%llu\n", (unsigned long long)
+ resource_size(&dax_region->res));
+ device_unlock(dev);
+
+ return rc;
+}
+static struct device_attribute dev_attr_region_size = __ATTR(size, 0444,
+ region_size_show, NULL);
+
+static ssize_t align_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dax_region *dax_region;
+ ssize_t rc = -ENXIO;
+
+ device_lock(dev);
+ dax_region = dev_get_drvdata(dev);
+ if (dax_region)
+ rc = sprintf(buf, "%u\n", dax_region->align);
+ device_unlock(dev);
+
+ return rc;
+}
+static DEVICE_ATTR_RO(align);
+
+static struct attribute *dax_region_attributes[] = {
+ &dev_attr_region_size.attr,
+ &dev_attr_align.attr,
+ &dev_attr_id.attr,
+ NULL,
+};
+
+static const struct attribute_group dax_region_attribute_group = {
+ .name = "dax_region",
+ .attrs = dax_region_attributes,
+};
+
+static const struct attribute_group *dax_region_attribute_groups[] = {
+ &dax_region_attribute_group,
+ NULL,
+};
+
static struct inode *dax_alloc_inode(struct super_block *sb)
{
return kmem_cache_alloc(dax_cache, GFP_KERNEL);
@@ -200,12 +267,31 @@ void dax_region_put(struct dax_region *dax_region)
}
EXPORT_SYMBOL_GPL(dax_region_put);
+static void dax_region_unregister(void *region)
+{
+ struct dax_region *dax_region = region;
+
+ sysfs_remove_groups(&dax_region->dev->kobj,
+ dax_region_attribute_groups);
+ dax_region_put(dax_region);
+}
+
struct dax_region *alloc_dax_region(struct device *parent, int region_id,
struct resource *res, unsigned int align, void *addr,
unsigned long pfn_flags)
{
struct dax_region *dax_region;
+ /*
+ * The DAX core assumes that it can store its private data in
+ * parent->driver_data. This WARN is a reminder / safeguard for
+ * developers of device-dax drivers.
+ */
+ if (dev_get_drvdata(parent)) {
+ dev_WARN(parent, "dax core failed to setup private data\n");
+ return NULL;
+ }
+
if (!IS_ALIGNED(res->start, align)
|| !IS_ALIGNED(resource_size(res), align))
return NULL;
@@ -214,6 +300,7 @@ struct dax_region *alloc_dax_region(struct device *parent, int region_id,
if (!dax_region)
return NULL;
+ dev_set_drvdata(parent, dax_region);
memcpy(&dax_region->res, res, sizeof(*res));
dax_region->pfn_flags = pfn_flags;
kref_init(&dax_region->kref);
@@ -222,7 +309,14 @@ struct dax_region *alloc_dax_region(struct device *parent, int region_id,
dax_region->align = align;
dax_region->dev = parent;
dax_region->base = addr;
+ if (sysfs_create_groups(&parent->kobj, dax_region_attribute_groups)) {
+ kfree(dax_region);
+ return NULL;;
+ }
+ kref_get(&dax_region->kref);
+ if (devm_add_action_or_reset(parent, dax_region_unregister, dax_region))
+ return NULL;
return dax_region;
}
EXPORT_SYMBOL_GPL(alloc_dax_region);
diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c
index 73c6ce93a0d9..033f49b31fdc 100644
--- a/drivers/dax/pmem.c
+++ b/drivers/dax/pmem.c
@@ -89,7 +89,8 @@ static int dax_pmem_probe(struct device *dev)
pfn_sb = nd_pfn->pfn_sb;
if (!devm_request_mem_region(dev, nsio->res.start,
- resource_size(&nsio->res), dev_name(dev))) {
+ resource_size(&nsio->res),
+ dev_name(&ndns->dev))) {
dev_warn(dev, "could not reserve region %pR\n", &nsio->res);
return -EBUSY;
}
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 88bebe1968b7..54be60ead08f 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -560,7 +560,7 @@ static int __init dmi_present(const u8 *buf)
dmi_ver >> 16, (dmi_ver >> 8) & 0xFF);
}
dmi_format_ids(dmi_ids_string, sizeof(dmi_ids_string));
- printk(KERN_DEBUG "DMI: %s\n", dmi_ids_string);
+ pr_info("DMI: %s\n", dmi_ids_string);
return 0;
}
}
@@ -588,7 +588,7 @@ static int __init dmi_smbios3_present(const u8 *buf)
dmi_ver >> 16, (dmi_ver >> 8) & 0xFF,
dmi_ver & 0xFF);
dmi_format_ids(dmi_ids_string, sizeof(dmi_ids_string));
- pr_debug("DMI: %s\n", dmi_ids_string);
+ pr_info("DMI: %s\n", dmi_ids_string);
return 0;
}
}
diff --git a/drivers/gpio/gpio-tps65218.c b/drivers/gpio/gpio-tps65218.c
index d779307a9685..46e6dcc089cb 100644
--- a/drivers/gpio/gpio-tps65218.c
+++ b/drivers/gpio/gpio-tps65218.c
@@ -16,6 +16,7 @@
#include <linux/errno.h>
#include <linux/gpio/driver.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include <linux/mfd/tps65218.h>
struct tps65218_gpio {
@@ -30,7 +31,7 @@ static int tps65218_gpio_get(struct gpio_chip *gc, unsigned offset)
unsigned int val;
int ret;
- ret = tps65218_reg_read(tps65218, TPS65218_REG_ENABLE2, &val);
+ ret = regmap_read(tps65218->regmap, TPS65218_REG_ENABLE2, &val);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 5ddde7349fbd..183f5dc1c3f2 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -116,6 +116,7 @@ config DRM_I915_GVT_KVMGT
tristate "Enable KVM/VFIO support for Intel GVT-g"
depends on DRM_I915_GVT
depends on KVM
+ depends on VFIO_MDEV && VFIO_MDEV_DEVICE
default n
help
Choose this option if you want to enable KVMGT support for
diff --git a/drivers/gpu/drm/i915/gvt/Makefile b/drivers/gpu/drm/i915/gvt/Makefile
index 8a46a7f31d53..b123c20e2097 100644
--- a/drivers/gpu/drm/i915/gvt/Makefile
+++ b/drivers/gpu/drm/i915/gvt/Makefile
@@ -5,6 +5,4 @@ GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
ccflags-y += -I$(src) -I$(src)/$(GVT_DIR) -Wall
i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
-
-CFLAGS_kvmgt.o := -Wno-unused-function
obj-$(CONFIG_DRM_I915_GVT_KVMGT) += $(GVT_DIR)/kvmgt.o
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index b1a7c8dd4b5f..ad0e9364ee70 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -164,15 +164,17 @@ struct intel_vgpu {
#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
struct {
- struct device *mdev;
+ struct mdev_device *mdev;
struct vfio_region *region;
int num_regions;
struct eventfd_ctx *intx_trigger;
struct eventfd_ctx *msi_trigger;
struct rb_root cache;
struct mutex cache_lock;
- void *vfio_group;
struct notifier_block iommu_notifier;
+ struct notifier_block group_notifier;
+ struct kvm *kvm;
+ struct work_struct release_work;
} vdev;
#endif
};
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index dc0365033157..4dd6722a7339 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -31,6 +31,7 @@
#include <linux/init.h>
#include <linux/device.h>
#include <linux/mm.h>
+#include <linux/mmu_context.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/rbtree.h>
@@ -39,24 +40,13 @@
#include <linux/uuid.h>
#include <linux/kvm_host.h>
#include <linux/vfio.h>
+#include <linux/mdev.h>
#include "i915_drv.h"
#include "gvt.h"
-static inline long kvmgt_pin_pages(struct device *dev, unsigned long *user_pfn,
- long npage, int prot, unsigned long *phys_pfn)
-{
- return 0;
-}
-static inline long kvmgt_unpin_pages(struct device *dev, unsigned long *pfn,
- long npage)
-{
- return 0;
-}
-
static const struct intel_gvt_ops *intel_gvt_ops;
-
/* helper macros copied from vfio-pci */
#define VFIO_PCI_OFFSET_SHIFT 40
#define VFIO_PCI_OFFSET_TO_INDEX(off) (off >> VFIO_PCI_OFFSET_SHIFT)
@@ -90,6 +80,15 @@ struct gvt_dma {
kvm_pfn_t pfn;
};
+static inline bool handle_valid(unsigned long handle)
+{
+ return !!(handle & ~0xff);
+}
+
+static int kvmgt_guest_init(struct mdev_device *mdev);
+static void intel_vgpu_release_work(struct work_struct *work);
+static bool kvmgt_guest_exit(struct kvmgt_guest_info *info);
+
static struct gvt_dma *__gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
{
struct rb_node *node = vgpu->vdev.cache.rb_node;
@@ -167,9 +166,10 @@ static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu,
static void gvt_cache_remove(struct intel_vgpu *vgpu, gfn_t gfn)
{
- struct device *dev = vgpu->vdev.mdev;
+ struct device *dev = &vgpu->vdev.mdev->dev;
struct gvt_dma *this;
- unsigned long pfn;
+ unsigned long g1;
+ int rc;
mutex_lock(&vgpu->vdev.cache_lock);
this = __gvt_cache_find(vgpu, gfn);
@@ -178,8 +178,9 @@ static void gvt_cache_remove(struct intel_vgpu *vgpu, gfn_t gfn)
return;
}
- pfn = this->pfn;
- WARN_ON((kvmgt_unpin_pages(dev, &pfn, 1) != 1));
+ g1 = gfn;
+ rc = vfio_unpin_pages(dev, &g1, 1);
+ WARN_ON(rc != 1);
__gvt_cache_remove_entry(vgpu, this);
mutex_unlock(&vgpu->vdev.cache_lock);
}
@@ -194,15 +195,15 @@ static void gvt_cache_destroy(struct intel_vgpu *vgpu)
{
struct gvt_dma *dma;
struct rb_node *node = NULL;
- struct device *dev = vgpu->vdev.mdev;
- unsigned long pfn;
+ struct device *dev = &vgpu->vdev.mdev->dev;
+ unsigned long gfn;
mutex_lock(&vgpu->vdev.cache_lock);
while ((node = rb_first(&vgpu->vdev.cache))) {
dma = rb_entry(node, struct gvt_dma, node);
- pfn = dma->pfn;
+ gfn = dma->gfn;
- kvmgt_unpin_pages(dev, &pfn, 1);
+ vfio_unpin_pages(dev, &gfn, 1);
__gvt_cache_remove_entry(vgpu, dma);
}
mutex_unlock(&vgpu->vdev.cache_lock);
@@ -226,7 +227,53 @@ static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
return NULL;
}
+static ssize_t available_instance_show(struct kobject *kobj, struct device *dev,
+ char *buf)
+{
+ struct intel_vgpu_type *type;
+ unsigned int num = 0;
+ void *gvt = kdev_to_i915(dev)->gvt;
+
+ type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
+ if (!type)
+ num = 0;
+ else
+ num = type->avail_instance;
+
+ return sprintf(buf, "%u\n", num);
+}
+
+static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
+ char *buf)
+{
+ return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
+}
+
+static ssize_t description_show(struct kobject *kobj, struct device *dev,
+ char *buf)
+{
+ struct intel_vgpu_type *type;
+ void *gvt = kdev_to_i915(dev)->gvt;
+
+ type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
+ if (!type)
+ return 0;
+
+ return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n"
+ "fence: %d\n",
+ BYTES_TO_MB(type->low_gm_size),
+ BYTES_TO_MB(type->high_gm_size),
+ type->fence);
+}
+
+static MDEV_TYPE_ATTR_RO(available_instance);
+static MDEV_TYPE_ATTR_RO(device_api);
+static MDEV_TYPE_ATTR_RO(description);
+
static struct attribute *type_attrs[] = {
+ &mdev_type_attr_available_instance.attr,
+ &mdev_type_attr_device_api.attr,
+ &mdev_type_attr_description.attr,
NULL,
};
@@ -322,7 +369,7 @@ static void kvmgt_protect_table_add(struct kvmgt_guest_info *info, gfn_t gfn)
if (kvmgt_gfn_is_write_protected(info, gfn))
return;
- p = kmalloc(sizeof(struct kvmgt_pgfn), GFP_ATOMIC);
+ p = kzalloc(sizeof(struct kvmgt_pgfn), GFP_ATOMIC);
if (WARN(!p, "gfn: 0x%llx\n", gfn))
return;
@@ -342,6 +389,720 @@ static void kvmgt_protect_table_del(struct kvmgt_guest_info *info,
}
}
+static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
+{
+ struct intel_vgpu *vgpu;
+ struct intel_vgpu_type *type;
+ struct device *pdev;
+ void *gvt;
+
+ pdev = mdev->parent->dev;
+ gvt = kdev_to_i915(pdev)->gvt;
+
+ type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
+ if (!type) {
+ gvt_err("failed to find type %s to create\n",
+ kobject_name(kobj));
+ return -EINVAL;
+ }
+
+ vgpu = intel_gvt_ops->vgpu_create(gvt, type);
+ if (IS_ERR_OR_NULL(vgpu)) {
+ gvt_err("create intel vgpu failed\n");
+ return -EINVAL;
+ }
+
+ INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work);
+
+ vgpu->vdev.mdev = mdev;
+ mdev_set_drvdata(mdev, vgpu);
+
+ gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n",
+ dev_name(&mdev->dev));
+ return 0;
+}
+
+static int intel_vgpu_remove(struct mdev_device *mdev)
+{
+ struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
+
+ if (handle_valid(vgpu->handle))
+ return -EBUSY;
+
+ intel_gvt_ops->vgpu_destroy(vgpu);
+ return 0;
+}
+
+static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct intel_vgpu *vgpu = container_of(nb,
+ struct intel_vgpu,
+ vdev.iommu_notifier);
+
+ if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
+ struct vfio_iommu_type1_dma_unmap *unmap = data;
+ unsigned long gfn, end_gfn;
+
+ gfn = unmap->iova >> PAGE_SHIFT;
+ end_gfn = gfn + unmap->size / PAGE_SIZE;
+
+ while (gfn < end_gfn)
+ gvt_cache_remove(vgpu, gfn++);
+ }
+
+ return NOTIFY_OK;
+}
+
+static int intel_vgpu_group_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct intel_vgpu *vgpu = container_of(nb,
+ struct intel_vgpu,
+ vdev.group_notifier);
+
+ /* the only action we care about */
+ if (action == VFIO_GROUP_NOTIFY_SET_KVM) {
+ vgpu->vdev.kvm = data;
+
+ if (!data)
+ schedule_work(&vgpu->vdev.release_work);
+ }
+
+ return NOTIFY_OK;
+}
+
+static int intel_vgpu_open(struct mdev_device *mdev)
+{
+ struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
+ unsigned long events;
+ int ret;
+
+ vgpu->vdev.iommu_notifier.notifier_call = intel_vgpu_iommu_notifier;
+ vgpu->vdev.group_notifier.notifier_call = intel_vgpu_group_notifier;
+
+ events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
+ ret = vfio_register_notifier(&mdev->dev, VFIO_IOMMU_NOTIFY, &events,
+ &vgpu->vdev.iommu_notifier);
+ if (ret != 0) {
+ gvt_err("vfio_register_notifier for iommu failed: %d\n", ret);
+ goto out;
+ }
+
+ events = VFIO_GROUP_NOTIFY_SET_KVM;
+ ret = vfio_register_notifier(&mdev->dev, VFIO_GROUP_NOTIFY, &events,
+ &vgpu->vdev.group_notifier);
+ if (ret != 0) {
+ gvt_err("vfio_register_notifier for group failed: %d\n", ret);
+ goto undo_iommu;
+ }
+
+ return kvmgt_guest_init(mdev);
+
+undo_iommu:
+ vfio_unregister_notifier(&mdev->dev, VFIO_IOMMU_NOTIFY,
+ &vgpu->vdev.iommu_notifier);
+out:
+ return ret;
+}
+
+static void __intel_vgpu_release(struct intel_vgpu *vgpu)
+{
+ struct kvmgt_guest_info *info;
+
+ if (!handle_valid(vgpu->handle))
+ return;
+
+ vfio_unregister_notifier(&vgpu->vdev.mdev->dev, VFIO_IOMMU_NOTIFY,
+ &vgpu->vdev.iommu_notifier);
+ vfio_unregister_notifier(&vgpu->vdev.mdev->dev, VFIO_GROUP_NOTIFY,
+ &vgpu->vdev.group_notifier);
+
+ info = (struct kvmgt_guest_info *)vgpu->handle;
+ kvmgt_guest_exit(info);
+ vgpu->handle = 0;
+}
+
+static void intel_vgpu_release(struct mdev_device *mdev)
+{
+ struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
+
+ __intel_vgpu_release(vgpu);
+}
+
+static void intel_vgpu_release_work(struct work_struct *work)
+{
+ struct intel_vgpu *vgpu = container_of(work, struct intel_vgpu,
+ vdev.release_work);
+ __intel_vgpu_release(vgpu);
+}
+
+static uint64_t intel_vgpu_get_bar0_addr(struct intel_vgpu *vgpu)
+{
+ u32 start_lo, start_hi;
+ u32 mem_type;
+ int pos = PCI_BASE_ADDRESS_0;
+
+ start_lo = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + pos)) &
+ PCI_BASE_ADDRESS_MEM_MASK;
+ mem_type = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + pos)) &
+ PCI_BASE_ADDRESS_MEM_TYPE_MASK;
+
+ switch (mem_type) {
+ case PCI_BASE_ADDRESS_MEM_TYPE_64:
+ start_hi = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space
+ + pos + 4));
+ break;
+ case PCI_BASE_ADDRESS_MEM_TYPE_32:
+ case PCI_BASE_ADDRESS_MEM_TYPE_1M:
+ /* 1M mem BAR treated as 32-bit BAR */
+ default:
+ /* mem unknown type treated as 32-bit BAR */
+ start_hi = 0;
+ break;
+ }
+
+ return ((u64)start_hi << 32) | start_lo;
+}
+
+static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
+ size_t count, loff_t *ppos, bool is_write)
+{
+ struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
+ unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
+ uint64_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
+ int ret = -EINVAL;
+
+
+ if (index >= VFIO_PCI_NUM_REGIONS) {
+ gvt_err("invalid index: %u\n", index);
+ return -EINVAL;
+ }
+
+ switch (index) {
+ case VFIO_PCI_CONFIG_REGION_INDEX:
+ if (is_write)
+ ret = intel_gvt_ops->emulate_cfg_write(vgpu, pos,
+ buf, count);
+ else
+ ret = intel_gvt_ops->emulate_cfg_read(vgpu, pos,
+ buf, count);
+ break;
+ case VFIO_PCI_BAR0_REGION_INDEX:
+ case VFIO_PCI_BAR1_REGION_INDEX:
+ if (is_write) {
+ uint64_t bar0_start = intel_vgpu_get_bar0_addr(vgpu);
+
+ ret = intel_gvt_ops->emulate_mmio_write(vgpu,
+ bar0_start + pos, buf, count);
+ } else {
+ uint64_t bar0_start = intel_vgpu_get_bar0_addr(vgpu);
+
+ ret = intel_gvt_ops->emulate_mmio_read(vgpu,
+ bar0_start + pos, buf, count);
+ }
+ break;
+ case VFIO_PCI_BAR2_REGION_INDEX:
+ case VFIO_PCI_BAR3_REGION_INDEX:
+ case VFIO_PCI_BAR4_REGION_INDEX:
+ case VFIO_PCI_BAR5_REGION_INDEX:
+ case VFIO_PCI_VGA_REGION_INDEX:
+ case VFIO_PCI_ROM_REGION_INDEX:
+ default:
+ gvt_err("unsupported region: %u\n", index);
+ }
+
+ return ret == 0 ? count : ret;
+}
+
+static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned int done = 0;
+ int ret;
+
+ while (count) {
+ size_t filled;
+
+ if (count >= 4 && !(*ppos % 4)) {
+ u32 val;
+
+ ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
+ ppos, false);
+ if (ret <= 0)
+ goto read_err;
+
+ if (copy_to_user(buf, &val, sizeof(val)))
+ goto read_err;
+
+ filled = 4;
+ } else if (count >= 2 && !(*ppos % 2)) {
+ u16 val;
+
+ ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
+ ppos, false);
+ if (ret <= 0)
+ goto read_err;
+
+ if (copy_to_user(buf, &val, sizeof(val)))
+ goto read_err;
+
+ filled = 2;
+ } else {
+ u8 val;
+
+ ret = intel_vgpu_rw(mdev, &val, sizeof(val), ppos,
+ false);
+ if (ret <= 0)
+ goto read_err;
+
+ if (copy_to_user(buf, &val, sizeof(val)))
+ goto read_err;
+
+ filled = 1;
+ }
+
+ count -= filled;
+ done += filled;
+ *ppos += filled;
+ buf += filled;
+ }
+
+ return done;
+
+read_err:
+ return -EFAULT;
+}
+
+static ssize_t intel_vgpu_write(struct mdev_device *mdev,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned int done = 0;
+ int ret;
+
+ while (count) {
+ size_t filled;
+
+ if (count >= 4 && !(*ppos % 4)) {
+ u32 val;
+
+ if (copy_from_user(&val, buf, sizeof(val)))
+ goto write_err;
+
+ ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
+ ppos, true);
+ if (ret <= 0)
+ goto write_err;
+
+ filled = 4;
+ } else if (count >= 2 && !(*ppos % 2)) {
+ u16 val;
+
+ if (copy_from_user(&val, buf, sizeof(val)))
+ goto write_err;
+
+ ret = intel_vgpu_rw(mdev, (char *)&val,
+ sizeof(val), ppos, true);
+ if (ret <= 0)
+ goto write_err;
+
+ filled = 2;
+ } else {
+ u8 val;
+
+ if (copy_from_user(&val, buf, sizeof(val)))
+ goto write_err;
+
+ ret = intel_vgpu_rw(mdev, &val, sizeof(val),
+ ppos, true);
+ if (ret <= 0)
+ goto write_err;
+
+ filled = 1;
+ }
+
+ count -= filled;
+ done += filled;
+ *ppos += filled;
+ buf += filled;
+ }
+
+ return done;
+write_err:
+ return -EFAULT;
+}
+
+static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
+{
+ unsigned int index;
+ u64 virtaddr;
+ unsigned long req_size, pgoff = 0;
+ pgprot_t pg_prot;
+ struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
+
+ index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
+ if (index >= VFIO_PCI_ROM_REGION_INDEX)
+ return -EINVAL;
+
+ if (vma->vm_end < vma->vm_start)
+ return -EINVAL;
+ if ((vma->vm_flags & VM_SHARED) == 0)
+ return -EINVAL;
+ if (index != VFIO_PCI_BAR2_REGION_INDEX)
+ return -EINVAL;
+
+ pg_prot = vma->vm_page_prot;
+ virtaddr = vma->vm_start;
+ req_size = vma->vm_end - vma->vm_start;
+ pgoff = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT;
+
+ return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot);
+}
+
+static int intel_vgpu_get_irq_count(struct intel_vgpu *vgpu, int type)
+{
+ if (type == VFIO_PCI_INTX_IRQ_INDEX || type == VFIO_PCI_MSI_IRQ_INDEX)
+ return 1;
+
+ return 0;
+}
+
+static int intel_vgpu_set_intx_mask(struct intel_vgpu *vgpu,
+ unsigned int index, unsigned int start,
+ unsigned int count, uint32_t flags,
+ void *data)
+{
+ return 0;
+}
+
+static int intel_vgpu_set_intx_unmask(struct intel_vgpu *vgpu,
+ unsigned int index, unsigned int start,
+ unsigned int count, uint32_t flags, void *data)
+{
+ return 0;
+}
+
+static int intel_vgpu_set_intx_trigger(struct intel_vgpu *vgpu,
+ unsigned int index, unsigned int start, unsigned int count,
+ uint32_t flags, void *data)
+{
+ return 0;
+}
+
+static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu,
+ unsigned int index, unsigned int start, unsigned int count,
+ uint32_t flags, void *data)
+{
+ struct eventfd_ctx *trigger;
+
+ if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
+ int fd = *(int *)data;
+
+ trigger = eventfd_ctx_fdget(fd);
+ if (IS_ERR(trigger)) {
+ gvt_err("eventfd_ctx_fdget failed\n");
+ return PTR_ERR(trigger);
+ }
+ vgpu->vdev.msi_trigger = trigger;
+ }
+
+ return 0;
+}
+
+static int intel_vgpu_set_irqs(struct intel_vgpu *vgpu, uint32_t flags,
+ unsigned int index, unsigned int start, unsigned int count,
+ void *data)
+{
+ int (*func)(struct intel_vgpu *vgpu, unsigned int index,
+ unsigned int start, unsigned int count, uint32_t flags,
+ void *data) = NULL;
+
+ switch (index) {
+ case VFIO_PCI_INTX_IRQ_INDEX:
+ switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
+ case VFIO_IRQ_SET_ACTION_MASK:
+ func = intel_vgpu_set_intx_mask;
+ break;
+ case VFIO_IRQ_SET_ACTION_UNMASK:
+ func = intel_vgpu_set_intx_unmask;
+ break;
+ case VFIO_IRQ_SET_ACTION_TRIGGER:
+ func = intel_vgpu_set_intx_trigger;
+ break;
+ }
+ break;
+ case VFIO_PCI_MSI_IRQ_INDEX:
+ switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
+ case VFIO_IRQ_SET_ACTION_MASK:
+ case VFIO_IRQ_SET_ACTION_UNMASK:
+ /* XXX Need masking support exported */
+ break;
+ case VFIO_IRQ_SET_ACTION_TRIGGER:
+ func = intel_vgpu_set_msi_trigger;
+ break;
+ }
+ break;
+ }
+
+ if (!func)
+ return -ENOTTY;
+
+ return func(vgpu, index, start, count, flags, data);
+}
+
+static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
+ unsigned long arg)
+{
+ struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
+ unsigned long minsz;
+
+ gvt_dbg_core("vgpu%d ioctl, cmd: %d\n", vgpu->id, cmd);
+
+ if (cmd == VFIO_DEVICE_GET_INFO) {
+ struct vfio_device_info info;
+
+ minsz = offsetofend(struct vfio_device_info, num_irqs);
+
+ if (copy_from_user(&info, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (info.argsz < minsz)
+ return -EINVAL;
+
+ info.flags = VFIO_DEVICE_FLAGS_PCI;
+ info.flags |= VFIO_DEVICE_FLAGS_RESET;
+ info.num_regions = VFIO_PCI_NUM_REGIONS;
+ info.num_irqs = VFIO_PCI_NUM_IRQS;
+
+ return copy_to_user((void __user *)arg, &info, minsz) ?
+ -EFAULT : 0;
+
+ } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
+ struct vfio_region_info info;
+ struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
+ int i, ret;
+ struct vfio_region_info_cap_sparse_mmap *sparse = NULL;
+ size_t size;
+ int nr_areas = 1;
+ int cap_type_id;
+
+ minsz = offsetofend(struct vfio_region_info, offset);
+
+ if (copy_from_user(&info, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (info.argsz < minsz)
+ return -EINVAL;
+
+ switch (info.index) {
+ case VFIO_PCI_CONFIG_REGION_INDEX:
+ info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+ info.size = INTEL_GVT_MAX_CFG_SPACE_SZ;
+ info.flags = VFIO_REGION_INFO_FLAG_READ |
+ VFIO_REGION_INFO_FLAG_WRITE;
+ break;
+ case VFIO_PCI_BAR0_REGION_INDEX:
+ info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+ info.size = vgpu->cfg_space.bar[info.index].size;
+ if (!info.size) {
+ info.flags = 0;
+ break;
+ }
+
+ info.flags = VFIO_REGION_INFO_FLAG_READ |
+ VFIO_REGION_INFO_FLAG_WRITE;
+ break;
+ case VFIO_PCI_BAR1_REGION_INDEX:
+ info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+ info.size = 0;
+ info.flags = 0;
+ break;
+ case VFIO_PCI_BAR2_REGION_INDEX:
+ info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+ info.flags = VFIO_REGION_INFO_FLAG_CAPS |
+ VFIO_REGION_INFO_FLAG_MMAP |
+ VFIO_REGION_INFO_FLAG_READ |
+ VFIO_REGION_INFO_FLAG_WRITE;
+ info.size = gvt_aperture_sz(vgpu->gvt);
+
+ size = sizeof(*sparse) +
+ (nr_areas * sizeof(*sparse->areas));
+ sparse = kzalloc(size, GFP_KERNEL);
+ if (!sparse)
+ return -ENOMEM;
+
+ sparse->nr_areas = nr_areas;
+ cap_type_id = VFIO_REGION_INFO_CAP_SPARSE_MMAP;
+ sparse->areas[0].offset =
+ PAGE_ALIGN(vgpu_aperture_offset(vgpu));
+ sparse->areas[0].size = vgpu_aperture_sz(vgpu);
+ if (!caps.buf) {
+ kfree(caps.buf);
+ caps.buf = NULL;
+ caps.size = 0;
+ }
+ break;
+
+ case VFIO_PCI_BAR3_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
+ info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+ info.size = 0;
+
+ info.flags = 0;
+ gvt_dbg_core("get region info bar:%d\n", info.index);
+ break;
+
+ case VFIO_PCI_ROM_REGION_INDEX:
+ case VFIO_PCI_VGA_REGION_INDEX:
+ gvt_dbg_core("get region info index:%d\n", info.index);
+ break;
+ default:
+ {
+ struct vfio_region_info_cap_type cap_type;
+
+ if (info.index >= VFIO_PCI_NUM_REGIONS +
+ vgpu->vdev.num_regions)
+ return -EINVAL;
+
+ i = info.index - VFIO_PCI_NUM_REGIONS;
+
+ info.offset =
+ VFIO_PCI_INDEX_TO_OFFSET(info.index);
+ info.size = vgpu->vdev.region[i].size;
+ info.flags = vgpu->vdev.region[i].flags;
+
+ cap_type.type = vgpu->vdev.region[i].type;
+ cap_type.subtype = vgpu->vdev.region[i].subtype;
+
+ ret = vfio_info_add_capability(&caps,
+ VFIO_REGION_INFO_CAP_TYPE,
+ &cap_type);
+ if (ret)
+ return ret;
+ }
+ }
+
+ if ((info.flags & VFIO_REGION_INFO_FLAG_CAPS) && sparse) {
+ switch (cap_type_id) {
+ case VFIO_REGION_INFO_CAP_SPARSE_MMAP:
+ ret = vfio_info_add_capability(&caps,
+ VFIO_REGION_INFO_CAP_SPARSE_MMAP,
+ sparse);
+ kfree(sparse);
+ if (ret)
+ return ret;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ if (caps.size) {
+ if (info.argsz < sizeof(info) + caps.size) {
+ info.argsz = sizeof(info) + caps.size;
+ info.cap_offset = 0;
+ } else {
+ vfio_info_cap_shift(&caps, sizeof(info));
+ if (copy_to_user((void __user *)arg +
+ sizeof(info), caps.buf,
+ caps.size)) {
+ kfree(caps.buf);
+ return -EFAULT;
+ }
+ info.cap_offset = sizeof(info);
+ }
+
+ kfree(caps.buf);
+ }
+
+ return copy_to_user((void __user *)arg, &info, minsz) ?
+ -EFAULT : 0;
+ } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
+ struct vfio_irq_info info;
+
+ minsz = offsetofend(struct vfio_irq_info, count);
+
+ if (copy_from_user(&info, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS)
+ return -EINVAL;
+
+ switch (info.index) {
+ case VFIO_PCI_INTX_IRQ_INDEX:
+ case VFIO_PCI_MSI_IRQ_INDEX:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ info.flags = VFIO_IRQ_INFO_EVENTFD;
+
+ info.count = intel_vgpu_get_irq_count(vgpu, info.index);
+
+ if (info.index == VFIO_PCI_INTX_IRQ_INDEX)
+ info.flags |= (VFIO_IRQ_INFO_MASKABLE |
+ VFIO_IRQ_INFO_AUTOMASKED);
+ else
+ info.flags |= VFIO_IRQ_INFO_NORESIZE;
+
+ return copy_to_user((void __user *)arg, &info, minsz) ?
+ -EFAULT : 0;
+ } else if (cmd == VFIO_DEVICE_SET_IRQS) {
+ struct vfio_irq_set hdr;
+ u8 *data = NULL;
+ int ret = 0;
+ size_t data_size = 0;
+
+ minsz = offsetofend(struct vfio_irq_set, count);
+
+ if (copy_from_user(&hdr, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
+ int max = intel_vgpu_get_irq_count(vgpu, hdr.index);
+
+ ret = vfio_set_irqs_validate_and_prepare(&hdr, max,
+ VFIO_PCI_NUM_IRQS, &data_size);
+ if (ret) {
+ gvt_err("intel:vfio_set_irqs_validate_and_prepare failed\n");
+ return -EINVAL;
+ }
+ if (data_size) {
+ data = memdup_user((void __user *)(arg + minsz),
+ data_size);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+ }
+ }
+
+ ret = intel_vgpu_set_irqs(vgpu, hdr.flags, hdr.index,
+ hdr.start, hdr.count, data);
+ kfree(data);
+
+ return ret;
+ } else if (cmd == VFIO_DEVICE_RESET) {
+ intel_gvt_ops->vgpu_reset(vgpu);
+ return 0;
+ }
+
+ return 0;
+}
+
+static const struct parent_ops intel_vgpu_ops = {
+ .supported_type_groups = intel_vgpu_type_groups,
+ .create = intel_vgpu_create,
+ .remove = intel_vgpu_remove,
+
+ .open = intel_vgpu_open,
+ .release = intel_vgpu_release,
+
+ .read = intel_vgpu_read,
+ .write = intel_vgpu_write,
+ .mmap = intel_vgpu_mmap,
+ .ioctl = intel_vgpu_ioctl,
+};
+
static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops)
{
if (!intel_gvt_init_vgpu_type_groups(gvt))
@@ -349,22 +1110,28 @@ static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops)
intel_gvt_ops = ops;
- /* MDEV is not yet available */
- return -ENODEV;
+ return mdev_register_device(dev, &intel_vgpu_ops);
}
static void kvmgt_host_exit(struct device *dev, void *gvt)
{
intel_gvt_cleanup_vgpu_type_groups(gvt);
+ mdev_unregister_device(dev);
}
static int kvmgt_write_protect_add(unsigned long handle, u64 gfn)
{
- struct kvmgt_guest_info *info = (struct kvmgt_guest_info *)handle;
- struct kvm *kvm = info->kvm;
+ struct kvmgt_guest_info *info;
+ struct kvm *kvm;
struct kvm_memory_slot *slot;
int idx;
+ if (!handle_valid(handle))
+ return -ESRCH;
+
+ info = (struct kvmgt_guest_info *)handle;
+ kvm = info->kvm;
+
idx = srcu_read_lock(&kvm->srcu);
slot = gfn_to_memslot(kvm, gfn);
@@ -384,11 +1151,17 @@ out:
static int kvmgt_write_protect_remove(unsigned long handle, u64 gfn)
{
- struct kvmgt_guest_info *info = (struct kvmgt_guest_info *)handle;
- struct kvm *kvm = info->kvm;
+ struct kvmgt_guest_info *info;
+ struct kvm *kvm;
struct kvm_memory_slot *slot;
int idx;
+ if (!handle_valid(handle))
+ return 0;
+
+ info = (struct kvmgt_guest_info *)handle;
+ kvm = info->kvm;
+
idx = srcu_read_lock(&kvm->srcu);
slot = gfn_to_memslot(kvm, gfn);
@@ -476,6 +1249,85 @@ static int kvmgt_detect_host(void)
return kvmgt_check_guest() ? -ENODEV : 0;
}
+static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu, struct kvm *kvm)
+{
+ struct intel_vgpu *itr;
+ struct kvmgt_guest_info *info;
+ int id;
+ bool ret = false;
+
+ mutex_lock(&vgpu->gvt->lock);
+ for_each_active_vgpu(vgpu->gvt, itr, id) {
+ if (!handle_valid(itr->handle))
+ continue;
+
+ info = (struct kvmgt_guest_info *)itr->handle;
+ if (kvm && kvm == info->kvm) {
+ ret = true;
+ goto out;
+ }
+ }
+out:
+ mutex_unlock(&vgpu->gvt->lock);
+ return ret;
+}
+
+static int kvmgt_guest_init(struct mdev_device *mdev)
+{
+ struct kvmgt_guest_info *info;
+ struct intel_vgpu *vgpu;
+ struct kvm *kvm;
+
+ vgpu = mdev_get_drvdata(mdev);
+ if (handle_valid(vgpu->handle))
+ return -EEXIST;
+
+ kvm = vgpu->vdev.kvm;
+ if (!kvm || kvm->mm != current->mm) {
+ gvt_err("KVM is required to use Intel vGPU\n");
+ return -ESRCH;
+ }
+
+ if (__kvmgt_vgpu_exist(vgpu, kvm))
+ return -EEXIST;
+
+ info = vzalloc(sizeof(struct kvmgt_guest_info));
+ if (!info)
+ return -ENOMEM;
+
+ vgpu->handle = (unsigned long)info;
+ info->vgpu = vgpu;
+ info->kvm = kvm;
+
+ kvmgt_protect_table_init(info);
+ gvt_cache_init(vgpu);
+
+ info->track_node.track_write = kvmgt_page_track_write;
+ info->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
+ kvm_page_track_register_notifier(kvm, &info->track_node);
+
+ return 0;
+}
+
+static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
+{
+ struct intel_vgpu *vgpu;
+
+ if (!info) {
+ gvt_err("kvmgt_guest_info invalid\n");
+ return false;
+ }
+
+ vgpu = info->vgpu;
+
+ kvm_page_track_unregister_notifier(info->kvm, &info->track_node);
+ kvmgt_protect_table_destroy(info);
+ gvt_cache_destroy(vgpu);
+ vfree(info);
+
+ return true;
+}
+
static int kvmgt_attach_vgpu(void *vgpu, unsigned long *handle)
{
/* nothing to do here */
@@ -489,63 +1341,72 @@ static void kvmgt_detach_vgpu(unsigned long handle)
static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
{
- struct kvmgt_guest_info *info = (struct kvmgt_guest_info *)handle;
- struct intel_vgpu *vgpu = info->vgpu;
+ struct kvmgt_guest_info *info;
+ struct intel_vgpu *vgpu;
- if (vgpu->vdev.msi_trigger)
- return eventfd_signal(vgpu->vdev.msi_trigger, 1) == 1;
+ if (!handle_valid(handle))
+ return -ESRCH;
- return false;
+ info = (struct kvmgt_guest_info *)handle;
+ vgpu = info->vgpu;
+
+ if (eventfd_signal(vgpu->vdev.msi_trigger, 1) == 1)
+ return 0;
+
+ return -EFAULT;
}
static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
{
unsigned long pfn;
- struct kvmgt_guest_info *info = (struct kvmgt_guest_info *)handle;
+ struct kvmgt_guest_info *info;
+ struct device *dev;
int rc;
+ if (!handle_valid(handle))
+ return INTEL_GVT_INVALID_ADDR;
+
+ info = (struct kvmgt_guest_info *)handle;
pfn = gvt_cache_find(info->vgpu, gfn);
if (pfn != 0)
return pfn;
- rc = kvmgt_pin_pages(info->vgpu->vdev.mdev, &gfn, 1,
- IOMMU_READ | IOMMU_WRITE, &pfn);
+ pfn = INTEL_GVT_INVALID_ADDR;
+ dev = &info->vgpu->vdev.mdev->dev;
+ rc = vfio_pin_pages(dev, &gfn, 1, IOMMU_READ | IOMMU_WRITE, &pfn);
if (rc != 1) {
- gvt_err("vfio_pin_pages failed for gfn: 0x%lx\n", gfn);
- return 0;
+ gvt_err("vfio_pin_pages failed for gfn 0x%lx: %d\n", gfn, rc);
+ return INTEL_GVT_INVALID_ADDR;
}
gvt_cache_add(info->vgpu, gfn, pfn);
return pfn;
}
-static void *kvmgt_gpa_to_hva(unsigned long handle, unsigned long gpa)
+static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
+ void *buf, unsigned long len, bool write)
{
- unsigned long pfn;
- gfn_t gfn = gpa_to_gfn(gpa);
+ struct kvmgt_guest_info *info;
+ struct kvm *kvm;
+ int ret;
+ bool kthread = current->mm == NULL;
- pfn = kvmgt_gfn_to_pfn(handle, gfn);
- if (!pfn)
- return NULL;
+ if (!handle_valid(handle))
+ return -ESRCH;
- return (char *)pfn_to_kaddr(pfn) + offset_in_page(gpa);
-}
+ info = (struct kvmgt_guest_info *)handle;
+ kvm = info->kvm;
-static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
- void *buf, unsigned long len, bool write)
-{
- void *hva = NULL;
+ if (kthread)
+ use_mm(kvm->mm);
- hva = kvmgt_gpa_to_hva(handle, gpa);
- if (!hva)
- return -EFAULT;
+ ret = write ? kvm_write_guest(kvm, gpa, buf, len) :
+ kvm_read_guest(kvm, gpa, buf, len);
- if (write)
- memcpy(hva, buf, len);
- else
- memcpy(buf, hva, len);
+ if (kthread)
+ unuse_mm(kvm->mm);
- return 0;
+ return ret;
}
static int kvmgt_read_gpa(unsigned long handle, unsigned long gpa,
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 83af17ad0f1f..6d9499658671 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -134,6 +134,7 @@ static const struct xpad_device {
{ 0x045e, 0x02d1, "Microsoft X-Box One pad", 0, XTYPE_XBOXONE },
{ 0x045e, 0x02dd, "Microsoft X-Box One pad (Firmware 2015)", 0, XTYPE_XBOXONE },
{ 0x045e, 0x02e3, "Microsoft X-Box One Elite pad", 0, XTYPE_XBOXONE },
+ { 0x045e, 0x02ea, "Microsoft X-Box One S pad", 0, XTYPE_XBOXONE },
{ 0x045e, 0x0291, "Xbox 360 Wireless Receiver (XBOX)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
{ 0x045e, 0x0719, "Xbox 360 Wireless Receiver", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
{ 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
@@ -1044,9 +1045,9 @@ static int xpad_play_effect(struct input_dev *dev, void *data, struct ff_effect
packet->data[7] = 0x00;
packet->data[8] = strong / 512; /* left actuator */
packet->data[9] = weak / 512; /* right actuator */
- packet->data[10] = 0xFF;
- packet->data[11] = 0x00;
- packet->data[12] = 0x00;
+ packet->data[10] = 0xFF; /* on period */
+ packet->data[11] = 0x00; /* off period */
+ packet->data[12] = 0xFF; /* repeat count */
packet->len = 13;
packet->pending = true;
break;
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index 29093657f2ef..582462d0af75 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -26,15 +26,15 @@
#include <linux/gpio_keys.h>
#include <linux/workqueue.h>
#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/of.h>
-#include <linux/of_platform.h>
-#include <linux/of_gpio.h>
#include <linux/of_irq.h>
#include <linux/spinlock.h>
struct gpio_button_data {
const struct gpio_keys_button *button;
struct input_dev *input;
+ struct gpio_desc *gpiod;
struct timer_list release_timer;
unsigned int release_delay; /* in msecs, for IRQ-only buttons */
@@ -140,7 +140,7 @@ static void gpio_keys_disable_button(struct gpio_button_data *bdata)
*/
disable_irq(bdata->irq);
- if (gpio_is_valid(bdata->button->gpio))
+ if (bdata->gpiod)
cancel_delayed_work_sync(&bdata->work);
else
del_timer_sync(&bdata->release_timer);
@@ -358,19 +358,20 @@ static void gpio_keys_gpio_report_event(struct gpio_button_data *bdata)
const struct gpio_keys_button *button = bdata->button;
struct input_dev *input = bdata->input;
unsigned int type = button->type ?: EV_KEY;
- int state = gpio_get_value_cansleep(button->gpio);
+ int state;
+ state = gpiod_get_value_cansleep(bdata->gpiod);
if (state < 0) {
- dev_err(input->dev.parent, "failed to get gpio state\n");
+ dev_err(input->dev.parent,
+ "failed to get gpio state: %d\n", state);
return;
}
- state = (state ? 1 : 0) ^ button->active_low;
if (type == EV_ABS) {
if (state)
input_event(input, type, button->code, button->value);
} else {
- input_event(input, type, button->code, !!state);
+ input_event(input, type, button->code, state);
}
input_sync(input);
}
@@ -456,7 +457,7 @@ static void gpio_keys_quiesce_key(void *data)
{
struct gpio_button_data *bdata = data;
- if (gpio_is_valid(bdata->button->gpio))
+ if (bdata->gpiod)
cancel_delayed_work_sync(&bdata->work);
else
del_timer_sync(&bdata->release_timer);
@@ -465,7 +466,8 @@ static void gpio_keys_quiesce_key(void *data)
static int gpio_keys_setup_key(struct platform_device *pdev,
struct input_dev *input,
struct gpio_button_data *bdata,
- const struct gpio_keys_button *button)
+ const struct gpio_keys_button *button,
+ struct fwnode_handle *child)
{
const char *desc = button->desc ? button->desc : "gpio_keys";
struct device *dev = &pdev->dev;
@@ -478,18 +480,56 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
bdata->button = button;
spin_lock_init(&bdata->lock);
- if (gpio_is_valid(button->gpio)) {
+ if (child) {
+ bdata->gpiod = devm_get_gpiod_from_child(dev, NULL, child);
+ if (IS_ERR(bdata->gpiod)) {
+ error = PTR_ERR(bdata->gpiod);
+ if (error == -ENOENT) {
+ /*
+ * GPIO is optional, we may be dealing with
+ * purely interrupt-driven setup.
+ */
+ bdata->gpiod = NULL;
+ } else {
+ if (error != -EPROBE_DEFER)
+ dev_err(dev, "failed to get gpio: %d\n",
+ error);
+ return error;
+ }
+ } else {
+ error = gpiod_direction_input(bdata->gpiod);
+ if (error) {
+ dev_err(dev, "Failed to configure GPIO %d as input: %d\n",
+ desc_to_gpio(bdata->gpiod), error);
+ return error;
+ }
+ }
+ } else if (gpio_is_valid(button->gpio)) {
+ /*
+ * Legacy GPIO number, so request the GPIO here and
+ * convert it to descriptor.
+ */
+ unsigned flags = GPIOF_IN;
+
+ if (button->active_low)
+ flags |= GPIOF_ACTIVE_LOW;
- error = devm_gpio_request_one(&pdev->dev, button->gpio,
- GPIOF_IN, desc);
+ error = devm_gpio_request_one(&pdev->dev, button->gpio, flags,
+ desc);
if (error < 0) {
dev_err(dev, "Failed to request GPIO %d, error %d\n",
button->gpio, error);
return error;
}
+ bdata->gpiod = gpio_to_desc(button->gpio);
+ if (!bdata->gpiod)
+ return -EINVAL;
+ }
+
+ if (bdata->gpiod) {
if (button->debounce_interval) {
- error = gpio_set_debounce(button->gpio,
+ error = gpiod_set_debounce(bdata->gpiod,
button->debounce_interval * 1000);
/* use timer if gpiolib doesn't provide debounce */
if (error < 0)
@@ -500,7 +540,7 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
if (button->irq) {
bdata->irq = button->irq;
} else {
- irq = gpio_to_irq(button->gpio);
+ irq = gpiod_to_irq(bdata->gpiod);
if (irq < 0) {
error = irq;
dev_err(dev,
@@ -518,9 +558,10 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
} else {
if (!button->irq) {
- dev_err(dev, "No IRQ specified\n");
+ dev_err(dev, "Found button without gpio or irq\n");
return -EINVAL;
}
+
bdata->irq = button->irq;
if (button->type && button->type != EV_KEY) {
@@ -575,7 +616,7 @@ static void gpio_keys_report_state(struct gpio_keys_drvdata *ddata)
for (i = 0; i < ddata->pdata->nbuttons; i++) {
struct gpio_button_data *bdata = &ddata->data[i];
- if (gpio_is_valid(bdata->button->gpio))
+ if (bdata->gpiod)
gpio_keys_gpio_report_event(bdata);
}
input_sync(input);
@@ -612,25 +653,18 @@ static void gpio_keys_close(struct input_dev *input)
* Handlers for alternative sources of platform_data
*/
-#ifdef CONFIG_OF
/*
- * Translate OpenFirmware node properties into platform_data
+ * Translate properties into platform_data
*/
static struct gpio_keys_platform_data *
gpio_keys_get_devtree_pdata(struct device *dev)
{
- struct device_node *node, *pp;
struct gpio_keys_platform_data *pdata;
struct gpio_keys_button *button;
- int error;
+ struct fwnode_handle *child;
int nbuttons;
- int i;
- node = dev->of_node;
- if (!node)
- return ERR_PTR(-ENODEV);
-
- nbuttons = of_get_available_child_count(node);
+ nbuttons = device_get_child_node_count(dev);
if (nbuttons == 0)
return ERR_PTR(-ENODEV);
@@ -640,64 +674,47 @@ gpio_keys_get_devtree_pdata(struct device *dev)
if (!pdata)
return ERR_PTR(-ENOMEM);
- pdata->buttons = (struct gpio_keys_button *)(pdata + 1);
- pdata->nbuttons = nbuttons;
-
- pdata->rep = !!of_get_property(node, "autorepeat", NULL);
-
- of_property_read_string(node, "label", &pdata->name);
-
- i = 0;
- for_each_available_child_of_node(node, pp) {
- enum of_gpio_flags flags;
+ button = (struct gpio_keys_button *)(pdata + 1);
- button = &pdata->buttons[i++];
+ pdata->buttons = button;
+ pdata->nbuttons = nbuttons;
- button->gpio = of_get_gpio_flags(pp, 0, &flags);
- if (button->gpio < 0) {
- error = button->gpio;
- if (error != -ENOENT) {
- if (error != -EPROBE_DEFER)
- dev_err(dev,
- "Failed to get gpio flags, error: %d\n",
- error);
- return ERR_PTR(error);
- }
- } else {
- button->active_low = flags & OF_GPIO_ACTIVE_LOW;
- }
+ pdata->rep = device_property_read_bool(dev, "autorepeat");
- button->irq = irq_of_parse_and_map(pp, 0);
+ device_property_read_string(dev, "label", &pdata->name);
- if (!gpio_is_valid(button->gpio) && !button->irq) {
- dev_err(dev, "Found button without gpios or irqs\n");
- return ERR_PTR(-EINVAL);
- }
+ device_for_each_child_node(dev, child) {
+ if (is_of_node(child))
+ button->irq =
+ irq_of_parse_and_map(to_of_node(child), 0);
- if (of_property_read_u32(pp, "linux,code", &button->code)) {
- dev_err(dev, "Button without keycode: 0x%x\n",
- button->gpio);
+ if (fwnode_property_read_u32(child, "linux,code",
+ &button->code)) {
+ dev_err(dev, "Button without keycode\n");
+ fwnode_handle_put(child);
return ERR_PTR(-EINVAL);
}
- button->desc = of_get_property(pp, "label", NULL);
+ fwnode_property_read_string(child, "label", &button->desc);
- if (of_property_read_u32(pp, "linux,input-type", &button->type))
+ if (fwnode_property_read_u32(child, "linux,input-type",
+ &button->type))
button->type = EV_KEY;
- button->wakeup = of_property_read_bool(pp, "wakeup-source") ||
- /* legacy name */
- of_property_read_bool(pp, "gpio-key,wakeup");
+ button->wakeup =
+ fwnode_property_read_bool(child, "wakeup-source") ||
+ /* legacy name */
+ fwnode_property_read_bool(child, "gpio-key,wakeup");
- button->can_disable = !!of_get_property(pp, "linux,can-disable", NULL);
+ button->can_disable =
+ fwnode_property_read_bool(child, "linux,can-disable");
- if (of_property_read_u32(pp, "debounce-interval",
+ if (fwnode_property_read_u32(child, "debounce-interval",
&button->debounce_interval))
button->debounce_interval = 5;
- }
- if (pdata->nbuttons == 0)
- return ERR_PTR(-EINVAL);
+ button++;
+ }
return pdata;
}
@@ -708,20 +725,11 @@ static const struct of_device_id gpio_keys_of_match[] = {
};
MODULE_DEVICE_TABLE(of, gpio_keys_of_match);
-#else
-
-static inline struct gpio_keys_platform_data *
-gpio_keys_get_devtree_pdata(struct device *dev)
-{
- return ERR_PTR(-ENODEV);
-}
-
-#endif
-
static int gpio_keys_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct gpio_keys_platform_data *pdata = dev_get_platdata(dev);
+ struct fwnode_handle *child = NULL;
struct gpio_keys_drvdata *ddata;
struct input_dev *input;
size_t size;
@@ -774,14 +782,28 @@ static int gpio_keys_probe(struct platform_device *pdev)
const struct gpio_keys_button *button = &pdata->buttons[i];
struct gpio_button_data *bdata = &ddata->data[i];
- error = gpio_keys_setup_key(pdev, input, bdata, button);
- if (error)
+ if (!dev_get_platdata(dev)) {
+ child = device_get_next_child_node(&pdev->dev, child);
+ if (!child) {
+ dev_err(&pdev->dev,
+ "missing child device node for entry %d\n",
+ i);
+ return -EINVAL;
+ }
+ }
+
+ error = gpio_keys_setup_key(pdev, input, bdata, button, child);
+ if (error) {
+ fwnode_handle_put(child);
return error;
+ }
if (button->wakeup)
wakeup = 1;
}
+ fwnode_handle_put(child);
+
error = sysfs_create_group(&pdev->dev.kobj, &gpio_keys_attr_group);
if (error) {
dev_err(dev, "Unable to export keys/switches, error: %d\n",
@@ -814,8 +836,7 @@ static int gpio_keys_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int gpio_keys_suspend(struct device *dev)
+static int __maybe_unused gpio_keys_suspend(struct device *dev)
{
struct gpio_keys_drvdata *ddata = dev_get_drvdata(dev);
struct input_dev *input = ddata->input;
@@ -837,7 +858,7 @@ static int gpio_keys_suspend(struct device *dev)
return 0;
}
-static int gpio_keys_resume(struct device *dev)
+static int __maybe_unused gpio_keys_resume(struct device *dev)
{
struct gpio_keys_drvdata *ddata = dev_get_drvdata(dev);
struct input_dev *input = ddata->input;
@@ -863,7 +884,6 @@ static int gpio_keys_resume(struct device *dev)
gpio_keys_report_state(ddata);
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(gpio_keys_pm_ops, gpio_keys_suspend, gpio_keys_resume);
@@ -873,7 +893,7 @@ static struct platform_driver gpio_keys_device_driver = {
.driver = {
.name = "gpio-keys",
.pm = &gpio_keys_pm_ops,
- .of_match_table = of_match_ptr(gpio_keys_of_match),
+ .of_match_table = gpio_keys_of_match,
}
};
diff --git a/drivers/input/keyboard/gpio_keys_polled.c b/drivers/input/keyboard/gpio_keys_polled.c
index 62bdb1d48c49..bed4f2086158 100644
--- a/drivers/input/keyboard/gpio_keys_polled.c
+++ b/drivers/input/keyboard/gpio_keys_polled.c
@@ -30,10 +30,10 @@
#define DRV_NAME "gpio-keys-polled"
struct gpio_keys_button_data {
+ struct gpio_desc *gpiod;
int last_state;
int count;
int threshold;
- int can_sleep;
};
struct gpio_keys_polled_dev {
@@ -46,7 +46,7 @@ struct gpio_keys_polled_dev {
};
static void gpio_keys_button_event(struct input_polled_dev *dev,
- struct gpio_keys_button *button,
+ const struct gpio_keys_button *button,
int state)
{
struct gpio_keys_polled_dev *bdev = dev->private;
@@ -70,21 +70,22 @@ static void gpio_keys_button_event(struct input_polled_dev *dev,
}
static void gpio_keys_polled_check_state(struct input_polled_dev *dev,
- struct gpio_keys_button *button,
+ const struct gpio_keys_button *button,
struct gpio_keys_button_data *bdata)
{
int state;
- if (bdata->can_sleep)
- state = !!gpiod_get_value_cansleep(button->gpiod);
- else
- state = !!gpiod_get_value(button->gpiod);
-
- gpio_keys_button_event(dev, button, state);
+ state = gpiod_get_value_cansleep(bdata->gpiod);
+ if (state < 0) {
+ dev_err(dev->input->dev.parent,
+ "failed to get gpio state: %d\n", state);
+ } else {
+ gpio_keys_button_event(dev, button, state);
- if (state != bdata->last_state) {
- bdata->count = 0;
- bdata->last_state = state;
+ if (state != bdata->last_state) {
+ bdata->count = 0;
+ bdata->last_state = state;
+ }
}
}
@@ -142,48 +143,35 @@ static void gpio_keys_polled_close(struct input_polled_dev *dev)
pdata->disable(bdev->dev);
}
-static struct gpio_keys_platform_data *gpio_keys_polled_get_devtree_pdata(struct device *dev)
+static struct gpio_keys_platform_data *
+gpio_keys_polled_get_devtree_pdata(struct device *dev)
{
struct gpio_keys_platform_data *pdata;
struct gpio_keys_button *button;
struct fwnode_handle *child;
- int error;
int nbuttons;
nbuttons = device_get_child_node_count(dev);
if (nbuttons == 0)
- return NULL;
+ return ERR_PTR(-EINVAL);
pdata = devm_kzalloc(dev, sizeof(*pdata) + nbuttons * sizeof(*button),
GFP_KERNEL);
if (!pdata)
return ERR_PTR(-ENOMEM);
- pdata->buttons = (struct gpio_keys_button *)(pdata + 1);
+ button = (struct gpio_keys_button *)(pdata + 1);
+
+ pdata->buttons = button;
+ pdata->nbuttons = nbuttons;
pdata->rep = device_property_present(dev, "autorepeat");
device_property_read_u32(dev, "poll-interval", &pdata->poll_interval);
device_for_each_child_node(dev, child) {
- struct gpio_desc *desc;
-
- desc = devm_get_gpiod_from_child(dev, NULL, child);
- if (IS_ERR(desc)) {
- error = PTR_ERR(desc);
- if (error != -EPROBE_DEFER)
- dev_err(dev,
- "Failed to get gpio flags, error: %d\n",
- error);
- fwnode_handle_put(child);
- return ERR_PTR(error);
- }
-
- button = &pdata->buttons[pdata->nbuttons++];
- button->gpiod = desc;
-
- if (fwnode_property_read_u32(child, "linux,code", &button->code)) {
- dev_err(dev, "Button without keycode: %d\n",
- pdata->nbuttons - 1);
+ if (fwnode_property_read_u32(child, "linux,code",
+ &button->code)) {
+ dev_err(dev, "button without keycode\n");
fwnode_handle_put(child);
return ERR_PTR(-EINVAL);
}
@@ -206,10 +194,9 @@ static struct gpio_keys_platform_data *gpio_keys_polled_get_devtree_pdata(struct
if (fwnode_property_read_u32(child, "debounce-interval",
&button->debounce_interval))
button->debounce_interval = 5;
- }
- if (pdata->nbuttons == 0)
- return ERR_PTR(-EINVAL);
+ button++;
+ }
return pdata;
}
@@ -220,7 +207,7 @@ static void gpio_keys_polled_set_abs_params(struct input_dev *input,
int i, min = 0, max = 0;
for (i = 0; i < pdata->nbuttons; i++) {
- struct gpio_keys_button *button = &pdata->buttons[i];
+ const struct gpio_keys_button *button = &pdata->buttons[i];
if (button->type != EV_ABS || button->code != code)
continue;
@@ -230,6 +217,7 @@ static void gpio_keys_polled_set_abs_params(struct input_dev *input,
if (button->value > max)
max = button->value;
}
+
input_set_abs_params(input, code, min, max, 0, 0);
}
@@ -242,6 +230,7 @@ MODULE_DEVICE_TABLE(of, gpio_keys_polled_of_match);
static int gpio_keys_polled_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct fwnode_handle *child = NULL;
const struct gpio_keys_platform_data *pdata = dev_get_platdata(dev);
struct gpio_keys_polled_dev *bdev;
struct input_polled_dev *poll_dev;
@@ -254,10 +243,6 @@ static int gpio_keys_polled_probe(struct platform_device *pdev)
pdata = gpio_keys_polled_get_devtree_pdata(dev);
if (IS_ERR(pdata))
return PTR_ERR(pdata);
- if (!pdata) {
- dev_err(dev, "missing platform data\n");
- return -EINVAL;
- }
}
if (!pdata->poll_interval) {
@@ -300,20 +285,48 @@ static int gpio_keys_polled_probe(struct platform_device *pdev)
__set_bit(EV_REP, input->evbit);
for (i = 0; i < pdata->nbuttons; i++) {
- struct gpio_keys_button *button = &pdata->buttons[i];
+ const struct gpio_keys_button *button = &pdata->buttons[i];
struct gpio_keys_button_data *bdata = &bdev->data[i];
unsigned int type = button->type ?: EV_KEY;
if (button->wakeup) {
dev_err(dev, DRV_NAME " does not support wakeup\n");
+ fwnode_handle_put(child);
return -EINVAL;
}
- /*
- * Legacy GPIO number so request the GPIO here and
- * convert it to descriptor.
- */
- if (!button->gpiod && gpio_is_valid(button->gpio)) {
+ if (!dev_get_platdata(dev)) {
+ /* No legacy static platform data */
+ child = device_get_next_child_node(dev, child);
+ if (!child) {
+ dev_err(dev, "missing child device node\n");
+ return -EINVAL;
+ }
+
+ bdata->gpiod = devm_get_gpiod_from_child(dev, NULL,
+ child);
+ if (IS_ERR(bdata->gpiod)) {
+ error = PTR_ERR(bdata->gpiod);
+ if (error != -EPROBE_DEFER)
+ dev_err(dev,
+ "failed to get gpio: %d\n",
+ error);
+ fwnode_handle_put(child);
+ return error;
+ }
+
+ error = gpiod_direction_input(bdata->gpiod);
+ if (error) {
+ dev_err(dev, "Failed to configure GPIO %d as input: %d\n",
+ desc_to_gpio(bdata->gpiod), error);
+ fwnode_handle_put(child);
+ return error;
+ }
+ } else if (gpio_is_valid(button->gpio)) {
+ /*
+ * Legacy GPIO number so request the GPIO here and
+ * convert it to descriptor.
+ */
unsigned flags = GPIOF_IN;
if (button->active_low)
@@ -322,18 +335,21 @@ static int gpio_keys_polled_probe(struct platform_device *pdev)
error = devm_gpio_request_one(&pdev->dev, button->gpio,
flags, button->desc ? : DRV_NAME);
if (error) {
- dev_err(dev, "unable to claim gpio %u, err=%d\n",
+ dev_err(dev,
+ "unable to claim gpio %u, err=%d\n",
button->gpio, error);
return error;
}
- button->gpiod = gpio_to_desc(button->gpio);
+ bdata->gpiod = gpio_to_desc(button->gpio);
+ if (!bdata->gpiod) {
+ dev_err(dev,
+ "unable to convert gpio %u to descriptor\n",
+ button->gpio);
+ return -EINVAL;
+ }
}
- if (IS_ERR(button->gpiod))
- return PTR_ERR(button->gpiod);
-
- bdata->can_sleep = gpiod_cansleep(button->gpiod);
bdata->last_state = -1;
bdata->threshold = DIV_ROUND_UP(button->debounce_interval,
pdata->poll_interval);
@@ -344,6 +360,8 @@ static int gpio_keys_polled_probe(struct platform_device *pdev)
button->code);
}
+ fwnode_handle_put(child);
+
bdev->poll_dev = poll_dev;
bdev->dev = dev;
bdev->pdata = pdata;
diff --git a/drivers/input/keyboard/lpc32xx-keys.c b/drivers/input/keyboard/lpc32xx-keys.c
index 265d641c40e2..632523d4f5dc 100644
--- a/drivers/input/keyboard/lpc32xx-keys.c
+++ b/drivers/input/keyboard/lpc32xx-keys.c
@@ -182,7 +182,7 @@ static int lpc32xx_kscan_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0 || irq >= NR_IRQS) {
+ if (irq < 0) {
dev_err(&pdev->dev, "failed to get platform irq\n");
return -EINVAL;
}
diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c
index fcef5d1365e2..e24443376e75 100644
--- a/drivers/input/keyboard/pxa27x_keypad.c
+++ b/drivers/input/keyboard/pxa27x_keypad.c
@@ -316,7 +316,7 @@ static int pxa27x_keypad_build_keycode_from_dt(struct pxa27x_keypad *keypad)
error = of_property_read_u32(np, "marvell,debounce-interval",
&pdata->debounce_interval);
if (error) {
- dev_err(dev, "failed to parse debpunce-interval\n");
+ dev_err(dev, "failed to parse debounce-interval\n");
return error;
}
diff --git a/drivers/input/keyboard/tca8418_keypad.c b/drivers/input/keyboard/tca8418_keypad.c
index 9002298698fc..3048ef3e3e16 100644
--- a/drivers/input/keyboard/tca8418_keypad.c
+++ b/drivers/input/keyboard/tca8418_keypad.c
@@ -164,11 +164,18 @@ static void tca8418_read_keypad(struct tca8418_keypad *keypad_data)
int error, col, row;
u8 reg, state, code;
- /* Initial read of the key event FIFO */
- error = tca8418_read_byte(keypad_data, REG_KEY_EVENT_A, &reg);
+ do {
+ error = tca8418_read_byte(keypad_data, REG_KEY_EVENT_A, &reg);
+ if (error < 0) {
+ dev_err(&keypad_data->client->dev,
+ "unable to read REG_KEY_EVENT_A\n");
+ break;
+ }
+
+ /* Assume that key code 0 signifies empty FIFO */
+ if (reg <= 0)
+ break;
- /* Assume that key code 0 signifies empty FIFO */
- while (error >= 0 && reg > 0) {
state = reg & KEY_EVENT_VALUE;
code = reg & KEY_EVENT_CODE;
@@ -184,11 +191,7 @@ static void tca8418_read_keypad(struct tca8418_keypad *keypad_data)
/* Read for next loop */
error = tca8418_read_byte(keypad_data, REG_KEY_EVENT_A, &reg);
- }
-
- if (error < 0)
- dev_err(&keypad_data->client->dev,
- "unable to read REG_KEY_EVENT_A\n");
+ } while (1);
input_sync(input);
}
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 7ffb614ce566..1ae4d9617ff8 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -625,11 +625,12 @@ config INPUT_DA9055_ONKEY
will be called da9055_onkey.
config INPUT_DA9063_ONKEY
- tristate "Dialog DA9062/63 OnKey"
+ tristate "Dialog DA9063/62/61 OnKey"
depends on MFD_DA9063 || MFD_DA9062
help
- Support the ONKEY of Dialog DA9063 and DA9062 Power Management ICs
- as an input device capable of reporting the power button status.
+ Support the ONKEY of Dialog DA9063, DA9062 and DA9061 Power
+ Management ICs as an input device capable of reporting the
+ power button status.
To compile this driver as a module, choose M here: the module
will be called da9063_onkey.
diff --git a/drivers/input/misc/bma150.c b/drivers/input/misc/bma150.c
index b0d445390ee4..2124390ec38c 100644
--- a/drivers/input/misc/bma150.c
+++ b/drivers/input/misc/bma150.c
@@ -538,8 +538,13 @@ static int bma150_probe(struct i2c_client *client,
return -EIO;
}
+ /*
+ * Note if the IIO CONFIG_BMA180 driver is enabled we want to fail
+ * the probe for the bma180 as the iio driver is preferred.
+ */
chip_id = i2c_smbus_read_byte_data(client, BMA150_CHIP_ID_REG);
- if (chip_id != BMA150_CHIP_ID && chip_id != BMA180_CHIP_ID) {
+ if (chip_id != BMA150_CHIP_ID &&
+ (IS_ENABLED(CONFIG_BMA180) || chip_id != BMA180_CHIP_ID)) {
dev_err(&client->dev, "BMA150 chip id error: %d\n", chip_id);
return -EINVAL;
}
@@ -643,7 +648,9 @@ static UNIVERSAL_DEV_PM_OPS(bma150_pm, bma150_suspend, bma150_resume, NULL);
static const struct i2c_device_id bma150_id[] = {
{ "bma150", 0 },
+#if !IS_ENABLED(CONFIG_BMA180)
{ "bma180", 0 },
+#endif
{ "smb380", 0 },
{ "bma023", 0 },
{ }
diff --git a/drivers/input/misc/da9063_onkey.c b/drivers/input/misc/da9063_onkey.c
index bb863e062b03..b4ff1e86d3d3 100644
--- a/drivers/input/misc/da9063_onkey.c
+++ b/drivers/input/misc/da9063_onkey.c
@@ -1,5 +1,5 @@
/*
- * OnKey device driver for DA9063 and DA9062 PMICs
+ * OnKey device driver for DA9063, DA9062 and DA9061 PMICs
* Copyright (C) 2015 Dialog Semiconductor Ltd.
*
* This program is free software; you can redistribute it and/or
@@ -87,6 +87,7 @@ static const struct of_device_id da9063_compatible_reg_id_table[] = {
{ .compatible = "dlg,da9062-onkey", .data = &da9062_regs },
{ },
};
+MODULE_DEVICE_TABLE(of, da9063_compatible_reg_id_table);
static void da9063_poll_on(struct work_struct *work)
{
@@ -149,13 +150,13 @@ static void da9063_poll_on(struct work_struct *work)
* and then send shutdown command
*/
dev_dbg(&onkey->input->dev,
- "Sending SHUTDOWN to DA9063 ...\n");
+ "Sending SHUTDOWN to PMIC ...\n");
error = regmap_write(onkey->regmap,
config->onkey_shutdown,
config->onkey_shutdown_mask);
if (error)
dev_err(&onkey->input->dev,
- "Cannot SHUTDOWN DA9063: %d\n",
+ "Cannot SHUTDOWN PMIC: %d\n",
error);
}
}
@@ -300,6 +301,6 @@ static struct platform_driver da9063_onkey_driver = {
module_platform_driver(da9063_onkey_driver);
MODULE_AUTHOR("S Twiss <stwiss.opensource@diasemi.com>");
-MODULE_DESCRIPTION("Onkey device driver for Dialog DA9063 and DA9062");
+MODULE_DESCRIPTION("Onkey device driver for Dialog DA9063, DA9062 and DA9061");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DA9063_DRVNAME_ONKEY);
diff --git a/drivers/input/misc/drv260x.c b/drivers/input/misc/drv260x.c
index 2adfd86c869a..0a2b865b1000 100644
--- a/drivers/input/misc/drv260x.c
+++ b/drivers/input/misc/drv260x.c
@@ -18,8 +18,6 @@
#include <linux/i2c.h>
#include <linux/input.h>
#include <linux/module.h>
-#include <linux/of_gpio.h>
-#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/delay.h>
@@ -27,7 +25,6 @@
#include <linux/regulator/consumer.h>
#include <dt-bindings/input/ti-drv260x.h>
-#include <linux/platform_data/drv260x-pdata.h>
#define DRV260X_STATUS 0x0
#define DRV260X_MODE 0x1
@@ -468,90 +465,39 @@ static const struct regmap_config drv260x_regmap_config = {
.cache_type = REGCACHE_NONE,
};
-#ifdef CONFIG_OF
-static int drv260x_parse_dt(struct device *dev,
- struct drv260x_data *haptics)
-{
- struct device_node *np = dev->of_node;
- unsigned int voltage;
- int error;
-
- error = of_property_read_u32(np, "mode", &haptics->mode);
- if (error) {
- dev_err(dev, "%s: No entry for mode\n", __func__);
- return error;
- }
-
- error = of_property_read_u32(np, "library-sel", &haptics->library);
- if (error) {
- dev_err(dev, "%s: No entry for library selection\n",
- __func__);
- return error;
- }
-
- error = of_property_read_u32(np, "vib-rated-mv", &voltage);
- if (!error)
- haptics->rated_voltage = drv260x_calculate_voltage(voltage);
-
-
- error = of_property_read_u32(np, "vib-overdrive-mv", &voltage);
- if (!error)
- haptics->overdrive_voltage = drv260x_calculate_voltage(voltage);
-
- return 0;
-}
-#else
-static inline int drv260x_parse_dt(struct device *dev,
- struct drv260x_data *haptics)
-{
- dev_err(dev, "no platform data defined\n");
-
- return -EINVAL;
-}
-#endif
-
static int drv260x_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- const struct drv260x_platform_data *pdata = dev_get_platdata(&client->dev);
+ struct device *dev = &client->dev;
struct drv260x_data *haptics;
+ u32 voltage;
int error;
- haptics = devm_kzalloc(&client->dev, sizeof(*haptics), GFP_KERNEL);
+ haptics = devm_kzalloc(dev, sizeof(*haptics), GFP_KERNEL);
if (!haptics)
return -ENOMEM;
- haptics->rated_voltage = DRV260X_DEF_OD_CLAMP_VOLT;
- haptics->rated_voltage = DRV260X_DEF_RATED_VOLT;
-
- if (pdata) {
- haptics->mode = pdata->mode;
- haptics->library = pdata->library_selection;
- if (pdata->vib_overdrive_voltage)
- haptics->overdrive_voltage = drv260x_calculate_voltage(pdata->vib_overdrive_voltage);
- if (pdata->vib_rated_voltage)
- haptics->rated_voltage = drv260x_calculate_voltage(pdata->vib_rated_voltage);
- } else if (client->dev.of_node) {
- error = drv260x_parse_dt(&client->dev, haptics);
- if (error)
- return error;
- } else {
- dev_err(&client->dev, "Platform data not set\n");
- return -ENODEV;
+ error = device_property_read_u32(dev, "mode", &haptics->mode);
+ if (error) {
+ dev_err(dev, "Can't fetch 'mode' property: %d\n", error);
+ return error;
}
-
if (haptics->mode < DRV260X_LRA_MODE ||
haptics->mode > DRV260X_ERM_MODE) {
- dev_err(&client->dev,
- "Vibrator mode is invalid: %i\n",
- haptics->mode);
+ dev_err(dev, "Vibrator mode is invalid: %i\n", haptics->mode);
return -EINVAL;
}
+ error = device_property_read_u32(dev, "library-sel", &haptics->library);
+ if (error) {
+ dev_err(dev, "Can't fetch 'library-sel' property: %d\n", error);
+ return error;
+ }
+
if (haptics->library < DRV260X_LIB_EMPTY ||
haptics->library > DRV260X_ERM_LIB_F) {
- dev_err(&client->dev,
+ dev_err(dev,
"Library value is invalid: %i\n", haptics->library);
return -EINVAL;
}
@@ -559,40 +505,44 @@ static int drv260x_probe(struct i2c_client *client,
if (haptics->mode == DRV260X_LRA_MODE &&
haptics->library != DRV260X_LIB_EMPTY &&
haptics->library != DRV260X_LIB_LRA) {
- dev_err(&client->dev,
- "LRA Mode with ERM Library mismatch\n");
+ dev_err(dev, "LRA Mode with ERM Library mismatch\n");
return -EINVAL;
}
if (haptics->mode == DRV260X_ERM_MODE &&
(haptics->library == DRV260X_LIB_EMPTY ||
haptics->library == DRV260X_LIB_LRA)) {
- dev_err(&client->dev,
- "ERM Mode with LRA Library mismatch\n");
+ dev_err(dev, "ERM Mode with LRA Library mismatch\n");
return -EINVAL;
}
- haptics->regulator = devm_regulator_get(&client->dev, "vbat");
+ error = device_property_read_u32(dev, "vib-rated-mv", &voltage);
+ haptics->rated_voltage = error ? DRV260X_DEF_RATED_VOLT :
+ drv260x_calculate_voltage(voltage);
+
+ error = device_property_read_u32(dev, "vib-overdrive-mv", &voltage);
+ haptics->overdrive_voltage = error ? DRV260X_DEF_OD_CLAMP_VOLT :
+ drv260x_calculate_voltage(voltage);
+
+ haptics->regulator = devm_regulator_get(dev, "vbat");
if (IS_ERR(haptics->regulator)) {
error = PTR_ERR(haptics->regulator);
- dev_err(&client->dev,
- "unable to get regulator, error: %d\n", error);
+ dev_err(dev, "unable to get regulator, error: %d\n", error);
return error;
}
- haptics->enable_gpio = devm_gpiod_get_optional(&client->dev, "enable",
+ haptics->enable_gpio = devm_gpiod_get_optional(dev, "enable",
GPIOD_OUT_HIGH);
if (IS_ERR(haptics->enable_gpio))
return PTR_ERR(haptics->enable_gpio);
- haptics->input_dev = devm_input_allocate_device(&client->dev);
+ haptics->input_dev = devm_input_allocate_device(dev);
if (!haptics->input_dev) {
dev_err(&client->dev, "Failed to allocate input device\n");
return -ENOMEM;
}
haptics->input_dev->name = "drv260x:haptics";
- haptics->input_dev->dev.parent = client->dev.parent;
haptics->input_dev->close = drv260x_close;
input_set_drvdata(haptics->input_dev, haptics);
input_set_capability(haptics->input_dev, EV_FF, FF_RUMBLE);
@@ -600,8 +550,7 @@ static int drv260x_probe(struct i2c_client *client,
error = input_ff_create_memless(haptics->input_dev, NULL,
drv260x_haptics_play);
if (error) {
- dev_err(&client->dev, "input_ff_create() failed: %d\n",
- error);
+ dev_err(dev, "input_ff_create() failed: %d\n", error);
return error;
}
@@ -613,21 +562,19 @@ static int drv260x_probe(struct i2c_client *client,
haptics->regmap = devm_regmap_init_i2c(client, &drv260x_regmap_config);
if (IS_ERR(haptics->regmap)) {
error = PTR_ERR(haptics->regmap);
- dev_err(&client->dev, "Failed to allocate register map: %d\n",
- error);
+ dev_err(dev, "Failed to allocate register map: %d\n", error);
return error;
}
error = drv260x_init(haptics);
if (error) {
- dev_err(&client->dev, "Device init failed: %d\n", error);
+ dev_err(dev, "Device init failed: %d\n", error);
return error;
}
error = input_register_device(haptics->input_dev);
if (error) {
- dev_err(&client->dev, "couldn't register input device: %d\n",
- error);
+ dev_err(dev, "couldn't register input device: %d\n", error);
return error;
}
diff --git a/drivers/input/misc/drv2665.c b/drivers/input/misc/drv2665.c
index ef9bc12b3be3..dcb6d8e94b11 100644
--- a/drivers/input/misc/drv2665.c
+++ b/drivers/input/misc/drv2665.c
@@ -125,8 +125,8 @@ static void drv2665_close(struct input_dev *input)
cancel_work_sync(&haptics->work);
- error = regmap_update_bits(haptics->regmap,
- DRV2665_CTRL_2, DRV2665_STANDBY, 1);
+ error = regmap_update_bits(haptics->regmap, DRV2665_CTRL_2,
+ DRV2665_STANDBY, DRV2665_STANDBY);
if (error)
dev_err(&haptics->client->dev,
"Failed to enter standby mode: %d\n", error);
@@ -240,7 +240,7 @@ static int __maybe_unused drv2665_suspend(struct device *dev)
if (haptics->input_dev->users) {
ret = regmap_update_bits(haptics->regmap, DRV2665_CTRL_2,
- DRV2665_STANDBY, 1);
+ DRV2665_STANDBY, DRV2665_STANDBY);
if (ret) {
dev_err(dev, "Failed to set standby mode\n");
regulator_disable(haptics->regulator);
diff --git a/drivers/input/misc/drv2667.c b/drivers/input/misc/drv2667.c
index d5ba7481328c..2849bb6906a8 100644
--- a/drivers/input/misc/drv2667.c
+++ b/drivers/input/misc/drv2667.c
@@ -256,7 +256,7 @@ static void drv2667_close(struct input_dev *input)
cancel_work_sync(&haptics->work);
error = regmap_update_bits(haptics->regmap, DRV2667_CTRL_2,
- DRV2667_STANDBY, 1);
+ DRV2667_STANDBY, DRV2667_STANDBY);
if (error)
dev_err(&haptics->client->dev,
"Failed to enter standby mode: %d\n", error);
@@ -415,7 +415,7 @@ static int __maybe_unused drv2667_suspend(struct device *dev)
if (haptics->input_dev->users) {
ret = regmap_update_bits(haptics->regmap, DRV2667_CTRL_2,
- DRV2667_STANDBY, 1);
+ DRV2667_STANDBY, DRV2667_STANDBY);
if (ret) {
dev_err(dev, "Failed to set standby mode\n");
regulator_disable(haptics->regulator);
diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c
index c14b82709b0f..908b51089dee 100644
--- a/drivers/input/misc/soc_button_array.c
+++ b/drivers/input/misc/soc_button_array.c
@@ -17,6 +17,7 @@
#include <linux/acpi.h>
#include <linux/gpio/consumer.h>
#include <linux/gpio_keys.h>
+#include <linux/gpio.h>
#include <linux/platform_device.h>
/*
@@ -92,7 +93,7 @@ soc_button_device_create(struct platform_device *pdev,
continue;
gpio = soc_button_lookup_gpio(&pdev->dev, info->acpi_index);
- if (gpio < 0)
+ if (!gpio_is_valid(gpio))
continue;
gpio_keys[n_buttons].type = info->event_type;
@@ -166,6 +167,11 @@ static int soc_button_probe(struct platform_device *pdev)
button_info = (struct soc_button_info *)id->driver_data;
+ if (gpiod_count(&pdev->dev, KBUILD_MODNAME) <= 0) {
+ dev_dbg(&pdev->dev, "no GPIO attached, ignoring...\n");
+ return -ENODEV;
+ }
+
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
diff --git a/drivers/input/misc/tps65218-pwrbutton.c b/drivers/input/misc/tps65218-pwrbutton.c
index 3273217ce80c..cc74a41bdb0d 100644
--- a/drivers/input/misc/tps65218-pwrbutton.c
+++ b/drivers/input/misc/tps65218-pwrbutton.c
@@ -150,12 +150,20 @@ static int tps6521x_pb_probe(struct platform_device *pdev)
return 0;
}
+static const struct platform_device_id tps6521x_pwrbtn_id_table[] = {
+ { "tps65218-pwrbutton", },
+ { "tps65217-pwrbutton", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, tps6521x_pwrbtn_id_table);
+
static struct platform_driver tps6521x_pb_driver = {
.probe = tps6521x_pb_probe,
.driver = {
.name = "tps6521x_pwrbutton",
.of_match_table = of_tps6521x_pb_match,
},
+ .id_table = tps6521x_pwrbtn_id_table,
};
module_platform_driver(tps6521x_pb_driver);
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 6d7de9bfed9a..328edc8c8786 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -1153,15 +1153,13 @@ static void alps_process_packet_v7(struct psmouse *psmouse)
alps_process_touchpad_packet_v7(psmouse);
}
-static unsigned char alps_get_pkt_id_ss4_v2(unsigned char *byte)
+static enum SS4_PACKET_ID alps_get_pkt_id_ss4_v2(unsigned char *byte)
{
- unsigned char pkt_id = SS4_PACKET_ID_IDLE;
+ enum SS4_PACKET_ID pkt_id = SS4_PACKET_ID_IDLE;
switch (byte[3] & 0x30) {
case 0x00:
- if (byte[0] == 0x18 && byte[1] == 0x10 && byte[2] == 0x00 &&
- (byte[3] & 0x88) == 0x08 && byte[4] == 0x10 &&
- byte[5] == 0x00) {
+ if (SS4_IS_IDLE_V2(byte)) {
pkt_id = SS4_PACKET_ID_IDLE;
} else {
pkt_id = SS4_PACKET_ID_ONE;
@@ -1188,7 +1186,7 @@ static int alps_decode_ss4_v2(struct alps_fields *f,
unsigned char *p, struct psmouse *psmouse)
{
struct alps_data *priv = psmouse->private;
- unsigned char pkt_id;
+ enum SS4_PACKET_ID pkt_id;
unsigned int no_data_x, no_data_y;
pkt_id = alps_get_pkt_id_ss4_v2(p);
@@ -1267,18 +1265,12 @@ static int alps_decode_ss4_v2(struct alps_fields *f,
break;
case SS4_PACKET_ID_STICK:
- if (!(priv->flags & ALPS_DUALPOINT)) {
- psmouse_warn(psmouse,
- "Rejected trackstick packet from non DualPoint device");
- } else {
- int x = (s8)(((p[0] & 1) << 7) | (p[1] & 0x7f));
- int y = (s8)(((p[3] & 1) << 7) | (p[2] & 0x7f));
- int pressure = (s8)(p[4] & 0x7f);
-
- input_report_rel(priv->dev2, REL_X, x);
- input_report_rel(priv->dev2, REL_Y, -y);
- input_report_abs(priv->dev2, ABS_PRESSURE, pressure);
- }
+ /*
+ * x, y, and pressure are decoded in
+ * alps_process_packet_ss4_v2()
+ */
+ f->first_mp = 0;
+ f->is_mp = 0;
break;
case SS4_PACKET_ID_IDLE:
@@ -1346,6 +1338,27 @@ static void alps_process_packet_ss4_v2(struct psmouse *psmouse)
priv->multi_packet = 0;
+ /* Report trackstick */
+ if (alps_get_pkt_id_ss4_v2(packet) == SS4_PACKET_ID_STICK) {
+ if (!(priv->flags & ALPS_DUALPOINT)) {
+ psmouse_warn(psmouse,
+ "Rejected trackstick packet from non DualPoint device");
+ return;
+ }
+
+ input_report_rel(dev2, REL_X, SS4_TS_X_V2(packet));
+ input_report_rel(dev2, REL_Y, SS4_TS_Y_V2(packet));
+ input_report_abs(dev2, ABS_PRESSURE, SS4_TS_Z_V2(packet));
+
+ input_report_key(dev2, BTN_LEFT, f->ts_left);
+ input_report_key(dev2, BTN_RIGHT, f->ts_right);
+ input_report_key(dev2, BTN_MIDDLE, f->ts_middle);
+
+ input_sync(dev2);
+ return;
+ }
+
+ /* Report touchpad */
alps_report_mt_data(psmouse, (f->fingers <= 4) ? f->fingers : 4);
input_mt_report_finger_count(dev, f->fingers);
@@ -1356,13 +1369,6 @@ static void alps_process_packet_ss4_v2(struct psmouse *psmouse)
input_report_abs(dev, ABS_PRESSURE, f->pressure);
input_sync(dev);
-
- if (priv->flags & ALPS_DUALPOINT) {
- input_report_key(dev2, BTN_LEFT, f->ts_left);
- input_report_key(dev2, BTN_RIGHT, f->ts_right);
- input_report_key(dev2, BTN_MIDDLE, f->ts_middle);
- input_sync(dev2);
- }
}
static bool alps_is_valid_package_ss4_v2(struct psmouse *psmouse)
diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h
index b9417e2d7ad3..cde6f4bd8ea2 100644
--- a/drivers/input/mouse/alps.h
+++ b/drivers/input/mouse/alps.h
@@ -54,7 +54,15 @@ enum SS4_PACKET_ID {
#define SS4_MASK_NORMAL_BUTTONS 0x07
-#define SS4_1F_X_V2(_b) ((_b[0] & 0x0007) | \
+#define SS4_IS_IDLE_V2(_b) (((_b[0]) == 0x18) && \
+ ((_b[1]) == 0x10) && \
+ ((_b[2]) == 0x00) && \
+ ((_b[3] & 0x88) == 0x08) && \
+ ((_b[4]) == 0x10) && \
+ ((_b[5]) == 0x00) \
+ )
+
+#define SS4_1F_X_V2(_b) (((_b[0]) & 0x0007) | \
((_b[1] << 3) & 0x0078) | \
((_b[1] << 2) & 0x0380) | \
((_b[2] << 5) & 0x1C00) \
@@ -101,6 +109,18 @@ enum SS4_PACKET_ID {
#define SS4_IS_MF_CONTINUE(_b) ((_b[2] & 0x10) == 0x10)
#define SS4_IS_5F_DETECTED(_b) ((_b[2] & 0x10) == 0x10)
+#define SS4_TS_X_V2(_b) (s8)( \
+ ((_b[0] & 0x01) << 7) | \
+ (_b[1] & 0x7F) \
+ )
+
+#define SS4_TS_Y_V2(_b) (s8)( \
+ ((_b[3] & 0x01) << 7) | \
+ (_b[2] & 0x7F) \
+ )
+
+#define SS4_TS_Z_V2(_b) (s8)(_b[4] & 0x7F)
+
#define SS4_MFPACKET_NO_AX 8160 /* X-Coordinate value */
#define SS4_MFPACKET_NO_AY 4080 /* Y-Coordinate value */
@@ -146,7 +166,7 @@ struct alps_protocol_info {
* (aka command mode response) identifies the firmware minor version. This
* can be used to distinguish different hardware models which are not
* uniquely identifiable through their E7 responses.
- * @protocol_info: information about protcol used by the device.
+ * @protocol_info: information about protocol used by the device.
*
* Many (but not all) ALPS touchpads can be identified by looking at the
* values returned in the "E7 report" and/or the "EC report." This table
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index d15b33813021..fa598f7f4372 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -1093,19 +1093,18 @@ static int elan_probe(struct i2c_client *client,
if (error)
return error;
+ dev_info(&client->dev,
+ "Elan Touchpad: Module ID: 0x%04x, Firmware: 0x%04x, Sample: 0x%04x, IAP: 0x%04x\n",
+ data->product_id,
+ data->fw_version,
+ data->sm_version,
+ data->iap_version);
+
dev_dbg(&client->dev,
- "Elan Touchpad Information:\n"
- " Module product ID: 0x%04x\n"
- " Firmware Version: 0x%04x\n"
- " Sample Version: 0x%04x\n"
- " IAP Version: 0x%04x\n"
+ "Elan Touchpad Extra Information:\n"
" Max ABS X,Y: %d,%d\n"
" Width X,Y: %d,%d\n"
" Resolution X,Y: %d,%d (dots/mm)\n",
- data->product_id,
- data->fw_version,
- data->sm_version,
- data->iap_version,
data->max_x, data->max_y,
data->width_x, data->width_y,
data->x_res, data->y_res);
diff --git a/drivers/input/rmi4/Kconfig b/drivers/input/rmi4/Kconfig
index 4c8a55857e00..30cc627a4f45 100644
--- a/drivers/input/rmi4/Kconfig
+++ b/drivers/input/rmi4/Kconfig
@@ -27,6 +27,27 @@ config RMI4_SPI
If unsure, say N.
+config RMI4_SMB
+ tristate "RMI4 SMB Support"
+ depends on RMI4_CORE && I2C
+ help
+ Say Y here if you want to support RMI4 devices connected to an SMB
+ bus.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the module will be
+ called rmi_smbus.
+
+config RMI4_F03
+ bool "RMI4 Function 03 (PS2 Guest)"
+ depends on RMI4_CORE && SERIO
+ help
+ Say Y here if you want to add support for RMI4 function 03.
+
+ Function 03 provides PS2 guest support for RMI4 devices. This
+ includes support for TrackPoints on TouchPads.
+
config RMI4_2D_SENSOR
bool
depends on RMI4_CORE
@@ -62,13 +83,34 @@ config RMI4_F30
Function 30 provides GPIO and LED support for RMI4 devices. This
includes support for buttons on TouchPads and ClickPads.
+config RMI4_F34
+ bool "RMI4 Function 34 (Device reflash)"
+ depends on RMI4_CORE
+ select FW_LOADER
+ help
+ Say Y here if you want to add support for RMI4 function 34.
+
+ Function 34 provides support for upgrading the firmware on the RMI4
+ device via the firmware loader interface. This is triggered using a
+ sysfs attribute.
+
config RMI4_F54
bool "RMI4 Function 54 (Analog diagnostics)"
depends on RMI4_CORE
depends on VIDEO_V4L2=y || (RMI4_CORE=m && VIDEO_V4L2=m)
select VIDEOBUF2_VMALLOC
+ select RMI4_F55
help
Say Y here if you want to add support for RMI4 function 54
Function 54 provides access to various diagnostic features in certain
RMI4 touch sensors.
+
+config RMI4_F55
+ bool "RMI4 Function 55 (Sensor tuning)"
+ depends on RMI4_CORE
+ help
+ Say Y here if you want to add support for RMI4 function 55
+
+ Function 55 provides access to the RMI4 touch sensor tuning
+ mechanism.
diff --git a/drivers/input/rmi4/Makefile b/drivers/input/rmi4/Makefile
index 0bafc8502c4b..9aaac3dd8613 100644
--- a/drivers/input/rmi4/Makefile
+++ b/drivers/input/rmi4/Makefile
@@ -4,11 +4,15 @@ rmi_core-y := rmi_bus.o rmi_driver.o rmi_f01.o
rmi_core-$(CONFIG_RMI4_2D_SENSOR) += rmi_2d_sensor.o
# Function drivers
+rmi_core-$(CONFIG_RMI4_F03) += rmi_f03.o
rmi_core-$(CONFIG_RMI4_F11) += rmi_f11.o
rmi_core-$(CONFIG_RMI4_F12) += rmi_f12.o
rmi_core-$(CONFIG_RMI4_F30) += rmi_f30.o
+rmi_core-$(CONFIG_RMI4_F34) += rmi_f34.o rmi_f34v7.o
rmi_core-$(CONFIG_RMI4_F54) += rmi_f54.o
+rmi_core-$(CONFIG_RMI4_F55) += rmi_f55.o
# Transports
obj-$(CONFIG_RMI4_I2C) += rmi_i2c.o
obj-$(CONFIG_RMI4_SPI) += rmi_spi.o
+obj-$(CONFIG_RMI4_SMB) += rmi_smbus.o
diff --git a/drivers/input/rmi4/rmi_2d_sensor.c b/drivers/input/rmi4/rmi_2d_sensor.c
index e97bd7fabccc..07007ff8e29f 100644
--- a/drivers/input/rmi4/rmi_2d_sensor.c
+++ b/drivers/input/rmi4/rmi_2d_sensor.c
@@ -177,10 +177,12 @@ static void rmi_2d_sensor_set_input_params(struct rmi_2d_sensor *sensor)
sensor->dmax = DMAX * res_x;
}
- input_set_abs_params(input, ABS_MT_PRESSURE, 0, 0xff, 0, 0);
- input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 0x0f, 0, 0);
- input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0, 0x0f, 0, 0);
- input_set_abs_params(input, ABS_MT_ORIENTATION, 0, 1, 0, 0);
+ input_set_abs_params(input, ABS_MT_PRESSURE, 0, 0xff, 0, 0);
+ input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 0x0f, 0, 0);
+ input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0, 0x0f, 0, 0);
+ input_set_abs_params(input, ABS_MT_ORIENTATION, 0, 1, 0, 0);
+ input_set_abs_params(input, ABS_MT_TOOL_TYPE,
+ 0, MT_TOOL_MAX, 0, 0);
if (sensor->sensor_type == rmi_sensor_touchpad)
input_flags = INPUT_MT_POINTER;
diff --git a/drivers/input/rmi4/rmi_2d_sensor.h b/drivers/input/rmi4/rmi_2d_sensor.h
index 77fcdfef003c..c871bef4dac0 100644
--- a/drivers/input/rmi4/rmi_2d_sensor.h
+++ b/drivers/input/rmi4/rmi_2d_sensor.h
@@ -67,6 +67,8 @@ struct rmi_2d_sensor {
u8 report_rel;
u8 x_mm;
u8 y_mm;
+ enum rmi_reg_state dribble;
+ enum rmi_reg_state palm_detect;
};
int rmi_2d_sensor_of_probe(struct device *dev,
diff --git a/drivers/input/rmi4/rmi_bus.c b/drivers/input/rmi4/rmi_bus.c
index ef8c747c35e7..1c40d94ca506 100644
--- a/drivers/input/rmi4/rmi_bus.c
+++ b/drivers/input/rmi4/rmi_bus.c
@@ -230,6 +230,9 @@ err_put_device:
void rmi_unregister_function(struct rmi_function *fn)
{
+ rmi_dbg(RMI_DEBUG_CORE, &fn->dev, "Unregistering F%02X.\n",
+ fn->fd.function_number);
+
device_del(&fn->dev);
of_node_put(fn->dev.of_node);
put_device(&fn->dev);
@@ -302,6 +305,9 @@ struct bus_type rmi_bus_type = {
static struct rmi_function_handler *fn_handlers[] = {
&rmi_f01_handler,
+#ifdef CONFIG_RMI4_F03
+ &rmi_f03_handler,
+#endif
#ifdef CONFIG_RMI4_F11
&rmi_f11_handler,
#endif
@@ -311,9 +317,15 @@ static struct rmi_function_handler *fn_handlers[] = {
#ifdef CONFIG_RMI4_F30
&rmi_f30_handler,
#endif
+#ifdef CONFIG_RMI4_F34
+ &rmi_f34_handler,
+#endif
#ifdef CONFIG_RMI4_F54
&rmi_f54_handler,
#endif
+#ifdef CONFIG_RMI4_F55
+ &rmi_f55_handler,
+#endif
};
static void __rmi_unregister_function_handlers(int start_idx)
diff --git a/drivers/input/rmi4/rmi_bus.h b/drivers/input/rmi4/rmi_bus.h
index 899579830536..b7625a9ac66a 100644
--- a/drivers/input/rmi4/rmi_bus.h
+++ b/drivers/input/rmi4/rmi_bus.h
@@ -105,6 +105,18 @@ rmi_get_platform_data(struct rmi_device *d)
bool rmi_is_physical_device(struct device *dev);
/**
+ * rmi_reset - reset a RMI4 device
+ * @d: Pointer to an RMI device
+ *
+ * Calls for a reset of each function implemented by a specific device.
+ * Returns 0 on success or a negative error code.
+ */
+static inline int rmi_reset(struct rmi_device *d)
+{
+ return d->driver->reset_handler(d);
+}
+
+/**
* rmi_read - read a single byte
* @d: Pointer to an RMI device
* @addr: The address to read from
diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c
index 4a88312fbd25..11447ab1055c 100644
--- a/drivers/input/rmi4/rmi_driver.c
+++ b/drivers/input/rmi4/rmi_driver.c
@@ -17,6 +17,7 @@
#include <linux/bitmap.h>
#include <linux/delay.h>
#include <linux/fs.h>
+#include <linux/irq.h>
#include <linux/pm.h>
#include <linux/slab.h>
#include <linux/of.h>
@@ -33,12 +34,22 @@
#define RMI_DEVICE_RESET_CMD 0x01
#define DEFAULT_RESET_DELAY_MS 100
-static void rmi_free_function_list(struct rmi_device *rmi_dev)
+void rmi_free_function_list(struct rmi_device *rmi_dev)
{
struct rmi_function *fn, *tmp;
struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
+ rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev, "Freeing function list\n");
+
+ devm_kfree(&rmi_dev->dev, data->irq_memory);
+ data->irq_memory = NULL;
+ data->irq_status = NULL;
+ data->fn_irq_bits = NULL;
+ data->current_irq_mask = NULL;
+ data->new_irq_mask = NULL;
+
data->f01_container = NULL;
+ data->f34_container = NULL;
/* Doing it in the reverse order so F01 will be removed last */
list_for_each_entry_safe_reverse(fn, tmp,
@@ -133,7 +144,7 @@ static void process_one_interrupt(struct rmi_driver_data *data,
}
}
-int rmi_process_interrupt_requests(struct rmi_device *rmi_dev)
+static int rmi_process_interrupt_requests(struct rmi_device *rmi_dev)
{
struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
struct device *dev = &rmi_dev->dev;
@@ -143,7 +154,7 @@ int rmi_process_interrupt_requests(struct rmi_device *rmi_dev)
if (!data)
return 0;
- if (!rmi_dev->xport->attn_data) {
+ if (!data->attn_data.data) {
error = rmi_read_block(rmi_dev,
data->f01_container->fd.data_base_addr + 1,
data->irq_status, data->num_of_irq_regs);
@@ -178,7 +189,81 @@ int rmi_process_interrupt_requests(struct rmi_device *rmi_dev)
return 0;
}
-EXPORT_SYMBOL_GPL(rmi_process_interrupt_requests);
+
+void rmi_set_attn_data(struct rmi_device *rmi_dev, unsigned long irq_status,
+ void *data, size_t size)
+{
+ struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
+ struct rmi4_attn_data attn_data;
+ void *fifo_data;
+
+ if (!drvdata->enabled)
+ return;
+
+ fifo_data = kmemdup(data, size, GFP_ATOMIC);
+ if (!fifo_data)
+ return;
+
+ attn_data.irq_status = irq_status;
+ attn_data.size = size;
+ attn_data.data = fifo_data;
+
+ kfifo_put(&drvdata->attn_fifo, attn_data);
+}
+EXPORT_SYMBOL_GPL(rmi_set_attn_data);
+
+static irqreturn_t rmi_irq_fn(int irq, void *dev_id)
+{
+ struct rmi_device *rmi_dev = dev_id;
+ struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
+ struct rmi4_attn_data attn_data = {0};
+ int ret, count;
+
+ count = kfifo_get(&drvdata->attn_fifo, &attn_data);
+ if (count) {
+ *(drvdata->irq_status) = attn_data.irq_status;
+ drvdata->attn_data = attn_data;
+ }
+
+ ret = rmi_process_interrupt_requests(rmi_dev);
+ if (ret)
+ rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev,
+ "Failed to process interrupt request: %d\n", ret);
+
+ if (count)
+ kfree(attn_data.data);
+
+ if (!kfifo_is_empty(&drvdata->attn_fifo))
+ return rmi_irq_fn(irq, dev_id);
+
+ return IRQ_HANDLED;
+}
+
+static int rmi_irq_init(struct rmi_device *rmi_dev)
+{
+ struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev);
+ struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
+ int irq_flags = irq_get_trigger_type(pdata->irq);
+ int ret;
+
+ if (!irq_flags)
+ irq_flags = IRQF_TRIGGER_LOW;
+
+ ret = devm_request_threaded_irq(&rmi_dev->dev, pdata->irq, NULL,
+ rmi_irq_fn, irq_flags | IRQF_ONESHOT,
+ dev_name(rmi_dev->xport->dev),
+ rmi_dev);
+ if (ret < 0) {
+ dev_err(&rmi_dev->dev, "Failed to register interrupt %d\n",
+ pdata->irq);
+
+ return ret;
+ }
+
+ data->enabled = true;
+
+ return 0;
+}
static int suspend_one_function(struct rmi_function *fn)
{
@@ -248,7 +333,7 @@ static int rmi_resume_functions(struct rmi_device *rmi_dev)
return 0;
}
-static int enable_sensor(struct rmi_device *rmi_dev)
+int rmi_enable_sensor(struct rmi_device *rmi_dev)
{
int retval = 0;
@@ -379,8 +464,8 @@ static int rmi_driver_reset_handler(struct rmi_device *rmi_dev)
return 0;
}
-int rmi_read_pdt_entry(struct rmi_device *rmi_dev, struct pdt_entry *entry,
- u16 pdt_address)
+static int rmi_read_pdt_entry(struct rmi_device *rmi_dev,
+ struct pdt_entry *entry, u16 pdt_address)
{
u8 buf[RMI_PDT_ENTRY_SIZE];
int error;
@@ -403,7 +488,6 @@ int rmi_read_pdt_entry(struct rmi_device *rmi_dev, struct pdt_entry *entry,
return 0;
}
-EXPORT_SYMBOL_GPL(rmi_read_pdt_entry);
static void rmi_driver_copy_pdt_to_fd(const struct pdt_entry *pdt,
struct rmi_function_descriptor *fd)
@@ -422,6 +506,7 @@ static void rmi_driver_copy_pdt_to_fd(const struct pdt_entry *pdt,
static int rmi_scan_pdt_page(struct rmi_device *rmi_dev,
int page,
+ int *empty_pages,
void *ctx,
int (*callback)(struct rmi_device *rmi_dev,
void *ctx,
@@ -449,20 +534,30 @@ static int rmi_scan_pdt_page(struct rmi_device *rmi_dev,
return retval;
}
- return (data->f01_bootloader_mode || addr == pdt_start) ?
+ /*
+ * Count number of empty PDT pages. If a gap of two pages
+ * or more is found, stop scanning.
+ */
+ if (addr == pdt_start)
+ ++*empty_pages;
+ else
+ *empty_pages = 0;
+
+ return (data->bootloader_mode || *empty_pages >= 2) ?
RMI_SCAN_DONE : RMI_SCAN_CONTINUE;
}
-static int rmi_scan_pdt(struct rmi_device *rmi_dev, void *ctx,
- int (*callback)(struct rmi_device *rmi_dev,
- void *ctx,
- const struct pdt_entry *entry))
+int rmi_scan_pdt(struct rmi_device *rmi_dev, void *ctx,
+ int (*callback)(struct rmi_device *rmi_dev,
+ void *ctx, const struct pdt_entry *entry))
{
int page;
+ int empty_pages = 0;
int retval = RMI_SCAN_DONE;
for (page = 0; page <= RMI4_MAX_PAGE; page++) {
- retval = rmi_scan_pdt_page(rmi_dev, page, ctx, callback);
+ retval = rmi_scan_pdt_page(rmi_dev, page, &empty_pages,
+ ctx, callback);
if (retval != RMI_SCAN_CONTINUE)
break;
}
@@ -600,7 +695,6 @@ free_struct_buff:
kfree(struct_buf);
return ret;
}
-EXPORT_SYMBOL_GPL(rmi_read_register_desc);
const struct rmi_register_desc_item *rmi_get_register_desc_item(
struct rmi_register_descriptor *rdesc, u16 reg)
@@ -616,7 +710,6 @@ const struct rmi_register_desc_item *rmi_get_register_desc_item(
return NULL;
}
-EXPORT_SYMBOL_GPL(rmi_get_register_desc_item);
size_t rmi_register_desc_calc_size(struct rmi_register_descriptor *rdesc)
{
@@ -630,7 +723,6 @@ size_t rmi_register_desc_calc_size(struct rmi_register_descriptor *rdesc)
}
return size;
}
-EXPORT_SYMBOL_GPL(rmi_register_desc_calc_size);
/* Compute the register offset relative to the base address */
int rmi_register_desc_calc_reg_offset(
@@ -648,7 +740,6 @@ int rmi_register_desc_calc_reg_offset(
}
return -1;
}
-EXPORT_SYMBOL_GPL(rmi_register_desc_calc_reg_offset);
bool rmi_register_desc_has_subpacket(const struct rmi_register_desc_item *item,
u8 subpacket)
@@ -657,51 +748,55 @@ bool rmi_register_desc_has_subpacket(const struct rmi_register_desc_item *item,
subpacket) == subpacket;
}
-/* Indicates that flash programming is enabled (bootloader mode). */
-#define RMI_F01_STATUS_BOOTLOADER(status) (!!((status) & 0x40))
-
-/*
- * Given the PDT entry for F01, read the device status register to determine
- * if we're stuck in bootloader mode or not.
- *
- */
static int rmi_check_bootloader_mode(struct rmi_device *rmi_dev,
const struct pdt_entry *pdt)
{
- int error;
- u8 device_status;
+ struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
+ int ret;
+ u8 status;
- error = rmi_read(rmi_dev, pdt->data_base_addr + pdt->page_start,
- &device_status);
- if (error) {
- dev_err(&rmi_dev->dev,
- "Failed to read device status: %d.\n", error);
- return error;
+ if (pdt->function_number == 0x34 && pdt->function_version > 1) {
+ ret = rmi_read(rmi_dev, pdt->data_base_addr, &status);
+ if (ret) {
+ dev_err(&rmi_dev->dev,
+ "Failed to read F34 status: %d.\n", ret);
+ return ret;
+ }
+
+ if (status & BIT(7))
+ data->bootloader_mode = true;
+ } else if (pdt->function_number == 0x01) {
+ ret = rmi_read(rmi_dev, pdt->data_base_addr, &status);
+ if (ret) {
+ dev_err(&rmi_dev->dev,
+ "Failed to read F01 status: %d.\n", ret);
+ return ret;
+ }
+
+ if (status & BIT(6))
+ data->bootloader_mode = true;
}
- return RMI_F01_STATUS_BOOTLOADER(device_status);
+ return 0;
}
static int rmi_count_irqs(struct rmi_device *rmi_dev,
void *ctx, const struct pdt_entry *pdt)
{
- struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
int *irq_count = ctx;
+ int ret;
*irq_count += pdt->interrupt_source_count;
- if (pdt->function_number == 0x01) {
- data->f01_bootloader_mode =
- rmi_check_bootloader_mode(rmi_dev, pdt);
- if (data->f01_bootloader_mode)
- dev_warn(&rmi_dev->dev,
- "WARNING: RMI4 device is in bootloader mode!\n");
- }
+
+ ret = rmi_check_bootloader_mode(rmi_dev, pdt);
+ if (ret < 0)
+ return ret;
return RMI_SCAN_CONTINUE;
}
-static int rmi_initial_reset(struct rmi_device *rmi_dev,
- void *ctx, const struct pdt_entry *pdt)
+int rmi_initial_reset(struct rmi_device *rmi_dev, void *ctx,
+ const struct pdt_entry *pdt)
{
int error;
@@ -720,6 +815,7 @@ static int rmi_initial_reset(struct rmi_device *rmi_dev,
return RMI_SCAN_DONE;
}
+ rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev, "Sending reset\n");
error = rmi_write_block(rmi_dev, cmd_addr, &cmd_buf, 1);
if (error) {
dev_err(&rmi_dev->dev,
@@ -776,6 +872,8 @@ static int rmi_create_function(struct rmi_device *rmi_dev,
if (pdt->function_number == 0x01)
data->f01_container = fn;
+ else if (pdt->function_number == 0x34)
+ data->f34_container = fn;
list_add_tail(&fn->node, &data->function_list);
@@ -786,23 +884,95 @@ err_put_fn:
return error;
}
-int rmi_driver_suspend(struct rmi_device *rmi_dev)
+void rmi_enable_irq(struct rmi_device *rmi_dev, bool clear_wake)
{
- int retval = 0;
+ struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev);
+ struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
+ int irq = pdata->irq;
+ int irq_flags;
+ int retval;
+
+ mutex_lock(&data->enabled_mutex);
+
+ if (data->enabled)
+ goto out;
+
+ enable_irq(irq);
+ data->enabled = true;
+ if (clear_wake && device_may_wakeup(rmi_dev->xport->dev)) {
+ retval = disable_irq_wake(irq);
+ if (!retval)
+ dev_warn(&rmi_dev->dev,
+ "Failed to disable irq for wake: %d\n",
+ retval);
+ }
+
+ /*
+ * Call rmi_process_interrupt_requests() after enabling irq,
+ * otherwise we may lose interrupt on edge-triggered systems.
+ */
+ irq_flags = irq_get_trigger_type(pdata->irq);
+ if (irq_flags & IRQ_TYPE_EDGE_BOTH)
+ rmi_process_interrupt_requests(rmi_dev);
+
+out:
+ mutex_unlock(&data->enabled_mutex);
+}
+
+void rmi_disable_irq(struct rmi_device *rmi_dev, bool enable_wake)
+{
+ struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev);
+ struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
+ struct rmi4_attn_data attn_data = {0};
+ int irq = pdata->irq;
+ int retval, count;
+
+ mutex_lock(&data->enabled_mutex);
+
+ if (!data->enabled)
+ goto out;
+
+ data->enabled = false;
+ disable_irq(irq);
+ if (enable_wake && device_may_wakeup(rmi_dev->xport->dev)) {
+ retval = enable_irq_wake(irq);
+ if (!retval)
+ dev_warn(&rmi_dev->dev,
+ "Failed to enable irq for wake: %d\n",
+ retval);
+ }
+
+ /* make sure the fifo is clean */
+ while (!kfifo_is_empty(&data->attn_fifo)) {
+ count = kfifo_get(&data->attn_fifo, &attn_data);
+ if (count)
+ kfree(attn_data.data);
+ }
+
+out:
+ mutex_unlock(&data->enabled_mutex);
+}
+
+int rmi_driver_suspend(struct rmi_device *rmi_dev, bool enable_wake)
+{
+ int retval;
retval = rmi_suspend_functions(rmi_dev);
if (retval)
dev_warn(&rmi_dev->dev, "Failed to suspend functions: %d\n",
retval);
+ rmi_disable_irq(rmi_dev, enable_wake);
return retval;
}
EXPORT_SYMBOL_GPL(rmi_driver_suspend);
-int rmi_driver_resume(struct rmi_device *rmi_dev)
+int rmi_driver_resume(struct rmi_device *rmi_dev, bool clear_wake)
{
int retval;
+ rmi_enable_irq(rmi_dev, clear_wake);
+
retval = rmi_resume_functions(rmi_dev);
if (retval)
dev_warn(&rmi_dev->dev, "Failed to suspend functions: %d\n",
@@ -816,6 +986,9 @@ static int rmi_driver_remove(struct device *dev)
{
struct rmi_device *rmi_dev = to_rmi_device(dev);
+ rmi_disable_irq(rmi_dev, false);
+
+ rmi_f34_remove_sysfs(rmi_dev);
rmi_free_function_list(rmi_dev);
return 0;
@@ -842,15 +1015,95 @@ static inline int rmi_driver_of_probe(struct device *dev,
}
#endif
+int rmi_probe_interrupts(struct rmi_driver_data *data)
+{
+ struct rmi_device *rmi_dev = data->rmi_dev;
+ struct device *dev = &rmi_dev->dev;
+ int irq_count;
+ size_t size;
+ int retval;
+
+ /*
+ * We need to count the IRQs and allocate their storage before scanning
+ * the PDT and creating the function entries, because adding a new
+ * function can trigger events that result in the IRQ related storage
+ * being accessed.
+ */
+ rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Counting IRQs.\n", __func__);
+ irq_count = 0;
+ data->bootloader_mode = false;
+
+ retval = rmi_scan_pdt(rmi_dev, &irq_count, rmi_count_irqs);
+ if (retval < 0) {
+ dev_err(dev, "IRQ counting failed with code %d.\n", retval);
+ return retval;
+ }
+
+ if (data->bootloader_mode)
+ dev_warn(&rmi_dev->dev, "Device in bootloader mode.\n");
+
+ data->irq_count = irq_count;
+ data->num_of_irq_regs = (data->irq_count + 7) / 8;
+
+ size = BITS_TO_LONGS(data->irq_count) * sizeof(unsigned long);
+ data->irq_memory = devm_kzalloc(dev, size * 4, GFP_KERNEL);
+ if (!data->irq_memory) {
+ dev_err(dev, "Failed to allocate memory for irq masks.\n");
+ return retval;
+ }
+
+ data->irq_status = data->irq_memory + size * 0;
+ data->fn_irq_bits = data->irq_memory + size * 1;
+ data->current_irq_mask = data->irq_memory + size * 2;
+ data->new_irq_mask = data->irq_memory + size * 3;
+
+ return retval;
+}
+
+int rmi_init_functions(struct rmi_driver_data *data)
+{
+ struct rmi_device *rmi_dev = data->rmi_dev;
+ struct device *dev = &rmi_dev->dev;
+ int irq_count;
+ int retval;
+
+ irq_count = 0;
+ rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Creating functions.\n", __func__);
+ retval = rmi_scan_pdt(rmi_dev, &irq_count, rmi_create_function);
+ if (retval < 0) {
+ dev_err(dev, "Function creation failed with code %d.\n",
+ retval);
+ goto err_destroy_functions;
+ }
+
+ if (!data->f01_container) {
+ dev_err(dev, "Missing F01 container!\n");
+ retval = -EINVAL;
+ goto err_destroy_functions;
+ }
+
+ retval = rmi_read_block(rmi_dev,
+ data->f01_container->fd.control_base_addr + 1,
+ data->current_irq_mask, data->num_of_irq_regs);
+ if (retval < 0) {
+ dev_err(dev, "%s: Failed to read current IRQ mask.\n",
+ __func__);
+ goto err_destroy_functions;
+ }
+
+ return 0;
+
+err_destroy_functions:
+ rmi_free_function_list(rmi_dev);
+ return retval;
+}
+
static int rmi_driver_probe(struct device *dev)
{
struct rmi_driver *rmi_driver;
struct rmi_driver_data *data;
struct rmi_device_platform_data *pdata;
struct rmi_device *rmi_dev;
- size_t size;
- void *irq_memory;
- int irq_count;
int retval;
rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Starting probe.\n",
@@ -916,35 +1169,12 @@ static int rmi_driver_probe(struct device *dev)
PDT_PROPERTIES_LOCATION, retval);
}
- /*
- * We need to count the IRQs and allocate their storage before scanning
- * the PDT and creating the function entries, because adding a new
- * function can trigger events that result in the IRQ related storage
- * being accessed.
- */
- rmi_dbg(RMI_DEBUG_CORE, dev, "Counting IRQs.\n");
- irq_count = 0;
- retval = rmi_scan_pdt(rmi_dev, &irq_count, rmi_count_irqs);
- if (retval < 0) {
- dev_err(dev, "IRQ counting failed with code %d.\n", retval);
- goto err;
- }
- data->irq_count = irq_count;
- data->num_of_irq_regs = (data->irq_count + 7) / 8;
-
mutex_init(&data->irq_mutex);
+ mutex_init(&data->enabled_mutex);
- size = BITS_TO_LONGS(data->irq_count) * sizeof(unsigned long);
- irq_memory = devm_kzalloc(dev, size * 4, GFP_KERNEL);
- if (!irq_memory) {
- dev_err(dev, "Failed to allocate memory for irq masks.\n");
+ retval = rmi_probe_interrupts(data);
+ if (retval)
goto err;
- }
-
- data->irq_status = irq_memory + size * 0;
- data->fn_irq_bits = irq_memory + size * 1;
- data->current_irq_mask = irq_memory + size * 2;
- data->new_irq_mask = irq_memory + size * 3;
if (rmi_dev->xport->input) {
/*
@@ -961,36 +1191,20 @@ static int rmi_driver_probe(struct device *dev)
dev_err(dev, "%s: Failed to allocate input device.\n",
__func__);
retval = -ENOMEM;
- goto err_destroy_functions;
+ goto err;
}
rmi_driver_set_input_params(rmi_dev, data->input);
data->input->phys = devm_kasprintf(dev, GFP_KERNEL,
"%s/input0", dev_name(dev));
}
- irq_count = 0;
- rmi_dbg(RMI_DEBUG_CORE, dev, "Creating functions.");
- retval = rmi_scan_pdt(rmi_dev, &irq_count, rmi_create_function);
- if (retval < 0) {
- dev_err(dev, "Function creation failed with code %d.\n",
- retval);
- goto err_destroy_functions;
- }
-
- if (!data->f01_container) {
- dev_err(dev, "Missing F01 container!\n");
- retval = -EINVAL;
- goto err_destroy_functions;
- }
+ retval = rmi_init_functions(data);
+ if (retval)
+ goto err;
- retval = rmi_read_block(rmi_dev,
- data->f01_container->fd.control_base_addr + 1,
- data->current_irq_mask, data->num_of_irq_regs);
- if (retval < 0) {
- dev_err(dev, "%s: Failed to read current IRQ mask.\n",
- __func__);
- goto err_destroy_functions;
- }
+ retval = rmi_f34_create_sysfs(rmi_dev);
+ if (retval)
+ goto err;
if (data->input) {
rmi_driver_set_input_name(rmi_dev, data->input);
@@ -1003,9 +1217,13 @@ static int rmi_driver_probe(struct device *dev)
}
}
+ retval = rmi_irq_init(rmi_dev);
+ if (retval < 0)
+ goto err_destroy_functions;
+
if (data->f01_container->dev.driver)
/* Driver already bound, so enable ATTN now. */
- return enable_sensor(rmi_dev);
+ return rmi_enable_sensor(rmi_dev);
return 0;
diff --git a/drivers/input/rmi4/rmi_driver.h b/drivers/input/rmi4/rmi_driver.h
index 8dfbebe9bf86..24f8f764d171 100644
--- a/drivers/input/rmi4/rmi_driver.h
+++ b/drivers/input/rmi4/rmi_driver.h
@@ -51,9 +51,6 @@ struct pdt_entry {
u8 function_number;
};
-int rmi_read_pdt_entry(struct rmi_device *rmi_dev, struct pdt_entry *entry,
- u16 pdt_address);
-
#define RMI_REG_DESC_PRESENSE_BITS (32 * BITS_PER_BYTE)
#define RMI_REG_DESC_SUBPACKET_BITS (37 * BITS_PER_BYTE)
@@ -95,12 +92,40 @@ bool rmi_register_desc_has_subpacket(const struct rmi_register_desc_item *item,
bool rmi_is_physical_driver(struct device_driver *);
int rmi_register_physical_driver(void);
void rmi_unregister_physical_driver(void);
+void rmi_free_function_list(struct rmi_device *rmi_dev);
+int rmi_enable_sensor(struct rmi_device *rmi_dev);
+int rmi_scan_pdt(struct rmi_device *rmi_dev, void *ctx,
+ int (*callback)(struct rmi_device *rmi_dev, void *ctx,
+ const struct pdt_entry *entry));
+int rmi_probe_interrupts(struct rmi_driver_data *data);
+void rmi_enable_irq(struct rmi_device *rmi_dev, bool clear_wake);
+void rmi_disable_irq(struct rmi_device *rmi_dev, bool enable_wake);
+int rmi_init_functions(struct rmi_driver_data *data);
+int rmi_initial_reset(struct rmi_device *rmi_dev, void *ctx,
+ const struct pdt_entry *pdt);
char *rmi_f01_get_product_ID(struct rmi_function *fn);
+#ifdef CONFIG_RMI4_F34
+int rmi_f34_create_sysfs(struct rmi_device *rmi_dev);
+void rmi_f34_remove_sysfs(struct rmi_device *rmi_dev);
+#else
+static inline int rmi_f34_create_sysfs(struct rmi_device *rmi_dev)
+{
+ return 0;
+}
+
+static inline void rmi_f34_remove_sysfs(struct rmi_device *rmi_dev)
+{
+}
+#endif /* CONFIG_RMI_F34 */
+
extern struct rmi_function_handler rmi_f01_handler;
+extern struct rmi_function_handler rmi_f03_handler;
extern struct rmi_function_handler rmi_f11_handler;
extern struct rmi_function_handler rmi_f12_handler;
extern struct rmi_function_handler rmi_f30_handler;
+extern struct rmi_function_handler rmi_f34_handler;
extern struct rmi_function_handler rmi_f54_handler;
+extern struct rmi_function_handler rmi_f55_handler;
#endif
diff --git a/drivers/input/rmi4/rmi_f01.c b/drivers/input/rmi4/rmi_f01.c
index b5d2dfc23bad..18baf4ceb940 100644
--- a/drivers/input/rmi4/rmi_f01.c
+++ b/drivers/input/rmi4/rmi_f01.c
@@ -62,6 +62,8 @@ struct f01_basic_properties {
#define RMI_F01_STATUS_CODE(status) ((status) & 0x0f)
/* The device has lost its configuration for some reason. */
#define RMI_F01_STATUS_UNCONFIGURED(status) (!!((status) & 0x80))
+/* The device is in bootloader mode */
+#define RMI_F01_STATUS_BOOTLOADER(status) ((status) & 0x40)
/* Control register bits */
@@ -326,12 +328,12 @@ static int rmi_f01_probe(struct rmi_function *fn)
}
switch (pdata->power_management.nosleep) {
- case RMI_F01_NOSLEEP_DEFAULT:
+ case RMI_REG_STATE_DEFAULT:
break;
- case RMI_F01_NOSLEEP_OFF:
+ case RMI_REG_STATE_OFF:
f01->device_control.ctrl0 &= ~RMI_F01_CTRL0_NOSLEEP_BIT;
break;
- case RMI_F01_NOSLEEP_ON:
+ case RMI_REG_STATE_ON:
f01->device_control.ctrl0 |= RMI_F01_CTRL0_NOSLEEP_BIT;
break;
}
@@ -593,6 +595,10 @@ static int rmi_f01_attention(struct rmi_function *fn,
return error;
}
+ if (RMI_F01_STATUS_BOOTLOADER(device_status))
+ dev_warn(&fn->dev,
+ "Device in bootloader mode, please update firmware\n");
+
if (RMI_F01_STATUS_UNCONFIGURED(device_status)) {
dev_warn(&fn->dev, "Device reset detected.\n");
error = rmi_dev->driver->reset_handler(rmi_dev);
diff --git a/drivers/input/rmi4/rmi_f03.c b/drivers/input/rmi4/rmi_f03.c
new file mode 100644
index 000000000000..8a7ca3e2f95e
--- /dev/null
+++ b/drivers/input/rmi4/rmi_f03.c
@@ -0,0 +1,250 @@
+/*
+ * Copyright (C) 2015-2016 Red Hat
+ * Copyright (C) 2015 Lyude Paul <thatslyude@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/serio.h>
+#include <linux/notifier.h>
+#include "rmi_driver.h"
+
+#define RMI_F03_RX_DATA_OFB 0x01
+#define RMI_F03_OB_SIZE 2
+
+#define RMI_F03_OB_OFFSET 2
+#define RMI_F03_OB_DATA_OFFSET 1
+#define RMI_F03_OB_FLAG_TIMEOUT BIT(6)
+#define RMI_F03_OB_FLAG_PARITY BIT(7)
+
+#define RMI_F03_DEVICE_COUNT 0x07
+#define RMI_F03_BYTES_PER_DEVICE 0x07
+#define RMI_F03_BYTES_PER_DEVICE_SHIFT 4
+#define RMI_F03_QUEUE_LENGTH 0x0F
+
+struct f03_data {
+ struct rmi_function *fn;
+
+ struct serio *serio;
+
+ u8 device_count;
+ u8 rx_queue_length;
+};
+
+static int rmi_f03_pt_write(struct serio *id, unsigned char val)
+{
+ struct f03_data *f03 = id->port_data;
+ int error;
+
+ rmi_dbg(RMI_DEBUG_FN, &f03->fn->dev,
+ "%s: Wrote %.2hhx to PS/2 passthrough address",
+ __func__, val);
+
+ error = rmi_write(f03->fn->rmi_dev, f03->fn->fd.data_base_addr, val);
+ if (error) {
+ dev_err(&f03->fn->dev,
+ "%s: Failed to write to F03 TX register (%d).\n",
+ __func__, error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int rmi_f03_initialize(struct f03_data *f03)
+{
+ struct rmi_function *fn = f03->fn;
+ struct device *dev = &fn->dev;
+ int error;
+ u8 bytes_per_device;
+ u8 query1;
+ u8 query2[RMI_F03_DEVICE_COUNT * RMI_F03_BYTES_PER_DEVICE];
+ size_t query2_len;
+
+ error = rmi_read(fn->rmi_dev, fn->fd.query_base_addr, &query1);
+ if (error) {
+ dev_err(dev, "Failed to read query register (%d).\n", error);
+ return error;
+ }
+
+ f03->device_count = query1 & RMI_F03_DEVICE_COUNT;
+ bytes_per_device = (query1 >> RMI_F03_BYTES_PER_DEVICE_SHIFT) &
+ RMI_F03_BYTES_PER_DEVICE;
+
+ query2_len = f03->device_count * bytes_per_device;
+
+ /*
+ * The first generation of image sensors don't have a second part to
+ * their f03 query, as such we have to set some of these values manually
+ */
+ if (query2_len < 1) {
+ f03->device_count = 1;
+ f03->rx_queue_length = 7;
+ } else {
+ error = rmi_read_block(fn->rmi_dev, fn->fd.query_base_addr + 1,
+ query2, query2_len);
+ if (error) {
+ dev_err(dev,
+ "Failed to read second set of query registers (%d).\n",
+ error);
+ return error;
+ }
+
+ f03->rx_queue_length = query2[0] & RMI_F03_QUEUE_LENGTH;
+ }
+
+ return 0;
+}
+
+static int rmi_f03_register_pt(struct f03_data *f03)
+{
+ struct serio *serio;
+
+ serio = kzalloc(sizeof(struct serio), GFP_KERNEL);
+ if (!serio)
+ return -ENOMEM;
+
+ serio->id.type = SERIO_8042;
+ serio->write = rmi_f03_pt_write;
+ serio->port_data = f03;
+
+ strlcpy(serio->name, "Synaptics RMI4 PS/2 pass-through",
+ sizeof(serio->name));
+ strlcpy(serio->phys, "synaptics-rmi4-pt/serio1",
+ sizeof(serio->phys));
+ serio->dev.parent = &f03->fn->dev;
+
+ f03->serio = serio;
+
+ serio_register_port(serio);
+
+ return 0;
+}
+
+static int rmi_f03_probe(struct rmi_function *fn)
+{
+ struct device *dev = &fn->dev;
+ struct f03_data *f03;
+ int error;
+
+ f03 = devm_kzalloc(dev, sizeof(struct f03_data), GFP_KERNEL);
+ if (!f03)
+ return -ENOMEM;
+
+ f03->fn = fn;
+
+ error = rmi_f03_initialize(f03);
+ if (error < 0)
+ return error;
+
+ if (f03->device_count != 1)
+ dev_warn(dev, "found %d devices on PS/2 passthrough",
+ f03->device_count);
+
+ dev_set_drvdata(dev, f03);
+
+ error = rmi_f03_register_pt(f03);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+static int rmi_f03_config(struct rmi_function *fn)
+{
+ fn->rmi_dev->driver->set_irq_bits(fn->rmi_dev, fn->irq_mask);
+
+ return 0;
+}
+
+static int rmi_f03_attention(struct rmi_function *fn, unsigned long *irq_bits)
+{
+ struct rmi_device *rmi_dev = fn->rmi_dev;
+ struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
+ struct f03_data *f03 = dev_get_drvdata(&fn->dev);
+ u16 data_addr = fn->fd.data_base_addr;
+ const u8 ob_len = f03->rx_queue_length * RMI_F03_OB_SIZE;
+ u8 obs[RMI_F03_QUEUE_LENGTH * RMI_F03_OB_SIZE];
+ u8 ob_status;
+ u8 ob_data;
+ unsigned int serio_flags;
+ int i;
+ int error;
+
+ if (!rmi_dev)
+ return -ENODEV;
+
+ if (drvdata->attn_data.data) {
+ /* First grab the data passed by the transport device */
+ if (drvdata->attn_data.size < ob_len) {
+ dev_warn(&fn->dev, "F03 interrupted, but data is missing!\n");
+ return 0;
+ }
+
+ memcpy(obs, drvdata->attn_data.data, ob_len);
+
+ drvdata->attn_data.data += ob_len;
+ drvdata->attn_data.size -= ob_len;
+ } else {
+ /* Grab all of the data registers, and check them for data */
+ error = rmi_read_block(fn->rmi_dev, data_addr + RMI_F03_OB_OFFSET,
+ &obs, ob_len);
+ if (error) {
+ dev_err(&fn->dev,
+ "%s: Failed to read F03 output buffers: %d\n",
+ __func__, error);
+ serio_interrupt(f03->serio, 0, SERIO_TIMEOUT);
+ return error;
+ }
+ }
+
+ for (i = 0; i < ob_len; i += RMI_F03_OB_SIZE) {
+ ob_status = obs[i];
+ ob_data = obs[i + RMI_F03_OB_DATA_OFFSET];
+ serio_flags = 0;
+
+ if (!(ob_status & RMI_F03_RX_DATA_OFB))
+ continue;
+
+ if (ob_status & RMI_F03_OB_FLAG_TIMEOUT)
+ serio_flags |= SERIO_TIMEOUT;
+ if (ob_status & RMI_F03_OB_FLAG_PARITY)
+ serio_flags |= SERIO_PARITY;
+
+ rmi_dbg(RMI_DEBUG_FN, &fn->dev,
+ "%s: Received %.2hhx from PS2 guest T: %c P: %c\n",
+ __func__, ob_data,
+ serio_flags & SERIO_TIMEOUT ? 'Y' : 'N',
+ serio_flags & SERIO_PARITY ? 'Y' : 'N');
+
+ serio_interrupt(f03->serio, ob_data, serio_flags);
+ }
+
+ return 0;
+}
+
+static void rmi_f03_remove(struct rmi_function *fn)
+{
+ struct f03_data *f03 = dev_get_drvdata(&fn->dev);
+
+ serio_unregister_port(f03->serio);
+}
+
+struct rmi_function_handler rmi_f03_handler = {
+ .driver = {
+ .name = "rmi4_f03",
+ },
+ .func = 0x03,
+ .probe = rmi_f03_probe,
+ .config = rmi_f03_config,
+ .attention = rmi_f03_attention,
+ .remove = rmi_f03_remove,
+};
+
+MODULE_AUTHOR("Lyude Paul <thatslyude@gmail.com>");
+MODULE_DESCRIPTION("RMI F03 module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/rmi4/rmi_f11.c b/drivers/input/rmi4/rmi_f11.c
index f798f427a46f..bc5e37f30ac1 100644
--- a/drivers/input/rmi4/rmi_f11.c
+++ b/drivers/input/rmi4/rmi_f11.c
@@ -571,31 +571,48 @@ static inline u8 rmi_f11_parse_finger_state(const u8 *f_state, u8 n_finger)
static void rmi_f11_finger_handler(struct f11_data *f11,
struct rmi_2d_sensor *sensor,
- unsigned long *irq_bits, int num_irq_regs)
+ unsigned long *irq_bits, int num_irq_regs,
+ int size)
{
const u8 *f_state = f11->data.f_state;
u8 finger_state;
u8 i;
+ int abs_fingers;
+ int rel_fingers;
+ int abs_size = sensor->nbr_fingers * RMI_F11_ABS_BYTES;
int abs_bits = bitmap_and(f11->result_bits, irq_bits, f11->abs_mask,
num_irq_regs * 8);
int rel_bits = bitmap_and(f11->result_bits, irq_bits, f11->rel_mask,
num_irq_regs * 8);
- for (i = 0; i < sensor->nbr_fingers; i++) {
- /* Possible of having 4 fingers per f_statet register */
- finger_state = rmi_f11_parse_finger_state(f_state, i);
- if (finger_state == F11_RESERVED) {
- pr_err("Invalid finger state[%d]: 0x%02x", i,
- finger_state);
- continue;
- }
+ if (abs_bits) {
+ if (abs_size > size)
+ abs_fingers = size / RMI_F11_ABS_BYTES;
+ else
+ abs_fingers = sensor->nbr_fingers;
+
+ for (i = 0; i < abs_fingers; i++) {
+ /* Possible of having 4 fingers per f_state register */
+ finger_state = rmi_f11_parse_finger_state(f_state, i);
+ if (finger_state == F11_RESERVED) {
+ pr_err("Invalid finger state[%d]: 0x%02x", i,
+ finger_state);
+ continue;
+ }
- if (abs_bits)
rmi_f11_abs_pos_process(f11, sensor, &sensor->objs[i],
finger_state, i);
+ }
+ }
+
+ if (rel_bits) {
+ if ((abs_size + sensor->nbr_fingers * RMI_F11_REL_BYTES) > size)
+ rel_fingers = (size - abs_size) / RMI_F11_REL_BYTES;
+ else
+ rel_fingers = sensor->nbr_fingers;
- if (rel_bits)
+ for (i = 0; i < rel_fingers; i++)
rmi_f11_rel_pos_report(f11, i);
}
@@ -611,7 +628,7 @@ static void rmi_f11_finger_handler(struct f11_data *f11,
sensor->nbr_fingers,
sensor->dmax);
- for (i = 0; i < sensor->nbr_fingers; i++) {
+ for (i = 0; i < abs_fingers; i++) {
finger_state = rmi_f11_parse_finger_state(f_state, i);
if (finger_state == F11_RESERVED)
/* no need to send twice the error */
@@ -1062,8 +1079,8 @@ static int rmi_f11_initialize(struct rmi_function *fn)
rc = rmi_2d_sensor_of_probe(&fn->dev, &f11->sensor_pdata);
if (rc)
return rc;
- } else if (pdata->sensor_pdata) {
- f11->sensor_pdata = *pdata->sensor_pdata;
+ } else {
+ f11->sensor_pdata = pdata->sensor_pdata;
}
f11->rezero_wait_ms = f11->sensor_pdata.rezero_wait;
@@ -1124,6 +1141,8 @@ static int rmi_f11_initialize(struct rmi_function *fn)
sensor->topbuttonpad = f11->sensor_pdata.topbuttonpad;
sensor->kernel_tracking = f11->sensor_pdata.kernel_tracking;
sensor->dmax = f11->sensor_pdata.dmax;
+ sensor->dribble = f11->sensor_pdata.dribble;
+ sensor->palm_detect = f11->sensor_pdata.palm_detect;
if (f11->sens_query.has_physical_props) {
sensor->x_mm = f11->sens_query.x_sensor_size_mm;
@@ -1191,11 +1210,33 @@ static int rmi_f11_initialize(struct rmi_function *fn)
ctrl->ctrl0_11[RMI_F11_DELTA_Y_THRESHOLD] =
sensor->axis_align.delta_y_threshold;
- if (f11->sens_query.has_dribble)
- ctrl->ctrl0_11[0] = ctrl->ctrl0_11[0] & ~BIT(6);
+ if (f11->sens_query.has_dribble) {
+ switch (sensor->dribble) {
+ case RMI_REG_STATE_OFF:
+ ctrl->ctrl0_11[0] &= ~BIT(6);
+ break;
+ case RMI_REG_STATE_ON:
+ ctrl->ctrl0_11[0] |= BIT(6);
+ break;
+ case RMI_REG_STATE_DEFAULT:
+ default:
+ break;
+ }
+ }
- if (f11->sens_query.has_palm_det)
- ctrl->ctrl0_11[11] = ctrl->ctrl0_11[11] & ~BIT(0);
+ if (f11->sens_query.has_palm_det) {
+ switch (sensor->palm_detect) {
+ case RMI_REG_STATE_OFF:
+ ctrl->ctrl0_11[11] &= ~BIT(0);
+ break;
+ case RMI_REG_STATE_ON:
+ ctrl->ctrl0_11[11] |= BIT(0);
+ break;
+ case RMI_REG_STATE_DEFAULT:
+ default:
+ break;
+ }
+ }
rc = f11_write_control_regs(fn, &f11->sens_query,
&f11->dev_controls, fn->fd.query_base_addr);
@@ -1241,12 +1282,21 @@ static int rmi_f11_attention(struct rmi_function *fn, unsigned long *irq_bits)
struct f11_data *f11 = dev_get_drvdata(&fn->dev);
u16 data_base_addr = fn->fd.data_base_addr;
int error;
+ int valid_bytes = f11->sensor.pkt_size;
- if (rmi_dev->xport->attn_data) {
- memcpy(f11->sensor.data_pkt, rmi_dev->xport->attn_data,
- f11->sensor.attn_size);
- rmi_dev->xport->attn_data += f11->sensor.attn_size;
- rmi_dev->xport->attn_size -= f11->sensor.attn_size;
+ if (drvdata->attn_data.data) {
+ /*
+ * The valid data in the attention report is less then
+ * expected. Only process the complete fingers.
+ */
+ if (f11->sensor.attn_size > drvdata->attn_data.size)
+ valid_bytes = drvdata->attn_data.size;
+ else
+ valid_bytes = f11->sensor.attn_size;
+ memcpy(f11->sensor.data_pkt, drvdata->attn_data.data,
+ valid_bytes);
+ drvdata->attn_data.data += f11->sensor.attn_size;
+ drvdata->attn_data.size -= f11->sensor.attn_size;
} else {
error = rmi_read_block(rmi_dev,
data_base_addr, f11->sensor.data_pkt,
@@ -1256,7 +1306,7 @@ static int rmi_f11_attention(struct rmi_function *fn, unsigned long *irq_bits)
}
rmi_f11_finger_handler(f11, &f11->sensor, irq_bits,
- drvdata->num_of_irq_regs);
+ drvdata->num_of_irq_regs, valid_bytes);
return 0;
}
diff --git a/drivers/input/rmi4/rmi_f12.c b/drivers/input/rmi4/rmi_f12.c
index 332c02f0b107..07aff4356fe0 100644
--- a/drivers/input/rmi4/rmi_f12.c
+++ b/drivers/input/rmi4/rmi_f12.c
@@ -26,9 +26,12 @@ enum rmi_f12_object_type {
RMI_F12_OBJECT_SMALL_OBJECT = 0x0D,
};
+#define F12_DATA1_BYTES_PER_OBJ 8
+
struct f12_data {
struct rmi_2d_sensor sensor;
struct rmi_2d_sensor_platform_data sensor_pdata;
+ bool has_dribble;
u16 data_addr;
@@ -68,10 +71,6 @@ static int rmi_f12_read_sensor_tuning(struct f12_data *f12)
u8 buf[15];
int pitch_x = 0;
int pitch_y = 0;
- int clip_x_low = 0;
- int clip_x_high = 0;
- int clip_y_low = 0;
- int clip_y_high = 0;
int rx_receivers = 0;
int tx_receivers = 0;
int sensor_flags = 0;
@@ -124,7 +123,9 @@ static int rmi_f12_read_sensor_tuning(struct f12_data *f12)
}
rmi_dbg(RMI_DEBUG_FN, &fn->dev, "%s: x low: %d x high: %d y low: %d y high: %d\n",
- __func__, clip_x_low, clip_x_high, clip_y_low, clip_y_high);
+ __func__,
+ sensor->axis_align.clip_x_low, sensor->axis_align.clip_x_high,
+ sensor->axis_align.clip_y_low, sensor->axis_align.clip_y_high);
if (rmi_register_desc_has_subpacket(item, 3)) {
rx_receivers = buf[offset];
@@ -146,12 +147,16 @@ static int rmi_f12_read_sensor_tuning(struct f12_data *f12)
return 0;
}
-static void rmi_f12_process_objects(struct f12_data *f12, u8 *data1)
+static void rmi_f12_process_objects(struct f12_data *f12, u8 *data1, int size)
{
int i;
struct rmi_2d_sensor *sensor = &f12->sensor;
+ int objects = f12->data1->num_subpackets;
+
+ if ((f12->data1->num_subpackets * F12_DATA1_BYTES_PER_OBJ) > size)
+ objects = size / F12_DATA1_BYTES_PER_OBJ;
- for (i = 0; i < f12->data1->num_subpackets; i++) {
+ for (i = 0; i < objects; i++) {
struct rmi_2d_sensor_abs_object *obj = &sensor->objs[i];
obj->type = RMI_2D_OBJECT_NONE;
@@ -182,7 +187,7 @@ static void rmi_f12_process_objects(struct f12_data *f12, u8 *data1)
rmi_2d_sensor_abs_process(sensor, obj, i);
- data1 += 8;
+ data1 += F12_DATA1_BYTES_PER_OBJ;
}
if (sensor->kernel_tracking)
@@ -192,7 +197,7 @@ static void rmi_f12_process_objects(struct f12_data *f12, u8 *data1)
sensor->nbr_fingers,
sensor->dmax);
- for (i = 0; i < sensor->nbr_fingers; i++)
+ for (i = 0; i < objects; i++)
rmi_2d_sensor_abs_report(sensor, &sensor->objs[i], i);
}
@@ -201,14 +206,20 @@ static int rmi_f12_attention(struct rmi_function *fn,
{
int retval;
struct rmi_device *rmi_dev = fn->rmi_dev;
+ struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
struct f12_data *f12 = dev_get_drvdata(&fn->dev);
struct rmi_2d_sensor *sensor = &f12->sensor;
-
- if (rmi_dev->xport->attn_data) {
- memcpy(sensor->data_pkt, rmi_dev->xport->attn_data,
- sensor->attn_size);
- rmi_dev->xport->attn_data += sensor->attn_size;
- rmi_dev->xport->attn_size -= sensor->attn_size;
+ int valid_bytes = sensor->pkt_size;
+
+ if (drvdata->attn_data.data) {
+ if (sensor->attn_size > drvdata->attn_data.size)
+ valid_bytes = drvdata->attn_data.size;
+ else
+ valid_bytes = sensor->attn_size;
+ memcpy(sensor->data_pkt, drvdata->attn_data.data,
+ valid_bytes);
+ drvdata->attn_data.data += sensor->attn_size;
+ drvdata->attn_data.size -= sensor->attn_size;
} else {
retval = rmi_read_block(rmi_dev, f12->data_addr,
sensor->data_pkt, sensor->pkt_size);
@@ -221,19 +232,83 @@ static int rmi_f12_attention(struct rmi_function *fn,
if (f12->data1)
rmi_f12_process_objects(f12,
- &sensor->data_pkt[f12->data1_offset]);
+ &sensor->data_pkt[f12->data1_offset], valid_bytes);
input_mt_sync_frame(sensor->input);
return 0;
}
+static int rmi_f12_write_control_regs(struct rmi_function *fn)
+{
+ int ret;
+ const struct rmi_register_desc_item *item;
+ struct rmi_device *rmi_dev = fn->rmi_dev;
+ struct f12_data *f12 = dev_get_drvdata(&fn->dev);
+ int control_size;
+ char buf[3];
+ u16 control_offset = 0;
+ u8 subpacket_offset = 0;
+
+ if (f12->has_dribble
+ && (f12->sensor.dribble != RMI_REG_STATE_DEFAULT)) {
+ item = rmi_get_register_desc_item(&f12->control_reg_desc, 20);
+ if (item) {
+ control_offset = rmi_register_desc_calc_reg_offset(
+ &f12->control_reg_desc, 20);
+
+ /*
+ * The byte containing the EnableDribble bit will be
+ * in either byte 0 or byte 2 of control 20. Depending
+ * on the existence of subpacket 0. If control 20 is
+ * larger then 3 bytes, just read the first 3.
+ */
+ control_size = min(item->reg_size, 3UL);
+
+ ret = rmi_read_block(rmi_dev, fn->fd.control_base_addr
+ + control_offset, buf, control_size);
+ if (ret)
+ return ret;
+
+ if (rmi_register_desc_has_subpacket(item, 0))
+ subpacket_offset += 1;
+
+ switch (f12->sensor.dribble) {
+ case RMI_REG_STATE_OFF:
+ buf[subpacket_offset] &= ~BIT(2);
+ break;
+ case RMI_REG_STATE_ON:
+ buf[subpacket_offset] |= BIT(2);
+ break;
+ case RMI_REG_STATE_DEFAULT:
+ default:
+ break;
+ }
+
+ ret = rmi_write_block(rmi_dev,
+ fn->fd.control_base_addr + control_offset,
+ buf, control_size);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+
+}
+
static int rmi_f12_config(struct rmi_function *fn)
{
struct rmi_driver *drv = fn->rmi_dev->driver;
+ int ret;
drv->set_irq_bits(fn->rmi_dev, fn->irq_mask);
+ ret = rmi_f12_write_control_regs(fn);
+ if (ret)
+ dev_warn(&fn->dev,
+ "Failed to write F12 control registers: %d\n", ret);
+
return 0;
}
@@ -247,7 +322,7 @@ static int rmi_f12_probe(struct rmi_function *fn)
const struct rmi_register_desc_item *item;
struct rmi_2d_sensor *sensor;
struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev);
- struct rmi_transport_dev *xport = rmi_dev->xport;
+ struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
u16 data_offset = 0;
rmi_dbg(RMI_DEBUG_FN, &fn->dev, "%s\n", __func__);
@@ -260,7 +335,7 @@ static int rmi_f12_probe(struct rmi_function *fn)
}
++query_addr;
- if (!(buf & 0x1)) {
+ if (!(buf & BIT(0))) {
dev_err(&fn->dev,
"Behavior of F12 without register descriptors is undefined.\n");
return -ENODEV;
@@ -270,12 +345,14 @@ static int rmi_f12_probe(struct rmi_function *fn)
if (!f12)
return -ENOMEM;
+ f12->has_dribble = !!(buf & BIT(3));
+
if (fn->dev.of_node) {
ret = rmi_2d_sensor_of_probe(&fn->dev, &f12->sensor_pdata);
if (ret)
return ret;
- } else if (pdata->sensor_pdata) {
- f12->sensor_pdata = *pdata->sensor_pdata;
+ } else {
+ f12->sensor_pdata = pdata->sensor_pdata;
}
ret = rmi_read_register_desc(rmi_dev, query_addr,
@@ -318,6 +395,7 @@ static int rmi_f12_probe(struct rmi_function *fn)
sensor->x_mm = f12->sensor_pdata.x_mm;
sensor->y_mm = f12->sensor_pdata.y_mm;
+ sensor->dribble = f12->sensor_pdata.dribble;
if (sensor->sensor_type == rmi_sensor_default)
sensor->sensor_type =
@@ -343,7 +421,7 @@ static int rmi_f12_probe(struct rmi_function *fn)
* HID attention reports.
*/
item = rmi_get_register_desc_item(&f12->data_reg_desc, 0);
- if (item && !xport->attn_data)
+ if (item && !drvdata->attn_data.data)
data_offset += item->reg_size;
item = rmi_get_register_desc_item(&f12->data_reg_desc, 1);
@@ -357,15 +435,15 @@ static int rmi_f12_probe(struct rmi_function *fn)
}
item = rmi_get_register_desc_item(&f12->data_reg_desc, 2);
- if (item && !xport->attn_data)
+ if (item && !drvdata->attn_data.data)
data_offset += item->reg_size;
item = rmi_get_register_desc_item(&f12->data_reg_desc, 3);
- if (item && !xport->attn_data)
+ if (item && !drvdata->attn_data.data)
data_offset += item->reg_size;
item = rmi_get_register_desc_item(&f12->data_reg_desc, 4);
- if (item && !xport->attn_data)
+ if (item && !drvdata->attn_data.data)
data_offset += item->reg_size;
item = rmi_get_register_desc_item(&f12->data_reg_desc, 5);
@@ -377,22 +455,22 @@ static int rmi_f12_probe(struct rmi_function *fn)
}
item = rmi_get_register_desc_item(&f12->data_reg_desc, 6);
- if (item && !xport->attn_data) {
+ if (item && !drvdata->attn_data.data) {
f12->data6 = item;
f12->data6_offset = data_offset;
data_offset += item->reg_size;
}
item = rmi_get_register_desc_item(&f12->data_reg_desc, 7);
- if (item && !xport->attn_data)
+ if (item && !drvdata->attn_data.data)
data_offset += item->reg_size;
item = rmi_get_register_desc_item(&f12->data_reg_desc, 8);
- if (item && !xport->attn_data)
+ if (item && !drvdata->attn_data.data)
data_offset += item->reg_size;
item = rmi_get_register_desc_item(&f12->data_reg_desc, 9);
- if (item && !xport->attn_data) {
+ if (item && !drvdata->attn_data.data) {
f12->data9 = item;
f12->data9_offset = data_offset;
data_offset += item->reg_size;
@@ -401,27 +479,27 @@ static int rmi_f12_probe(struct rmi_function *fn)
}
item = rmi_get_register_desc_item(&f12->data_reg_desc, 10);
- if (item && !xport->attn_data)
+ if (item && !drvdata->attn_data.data)
data_offset += item->reg_size;
item = rmi_get_register_desc_item(&f12->data_reg_desc, 11);
- if (item && !xport->attn_data)
+ if (item && !drvdata->attn_data.data)
data_offset += item->reg_size;
item = rmi_get_register_desc_item(&f12->data_reg_desc, 12);
- if (item && !xport->attn_data)
+ if (item && !drvdata->attn_data.data)
data_offset += item->reg_size;
item = rmi_get_register_desc_item(&f12->data_reg_desc, 13);
- if (item && !xport->attn_data)
+ if (item && !drvdata->attn_data.data)
data_offset += item->reg_size;
item = rmi_get_register_desc_item(&f12->data_reg_desc, 14);
- if (item && !xport->attn_data)
+ if (item && !drvdata->attn_data.data)
data_offset += item->reg_size;
item = rmi_get_register_desc_item(&f12->data_reg_desc, 15);
- if (item && !xport->attn_data) {
+ if (item && !drvdata->attn_data.data) {
f12->data15 = item;
f12->data15_offset = data_offset;
data_offset += item->reg_size;
diff --git a/drivers/input/rmi4/rmi_f30.c b/drivers/input/rmi4/rmi_f30.c
index 760aff1bc420..f4b491e3e0fd 100644
--- a/drivers/input/rmi4/rmi_f30.c
+++ b/drivers/input/rmi4/rmi_f30.c
@@ -99,6 +99,7 @@ static int rmi_f30_attention(struct rmi_function *fn, unsigned long *irq_bits)
{
struct f30_data *f30 = dev_get_drvdata(&fn->dev);
struct rmi_device *rmi_dev = fn->rmi_dev;
+ struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
int retval;
int gpiled = 0;
int value = 0;
@@ -109,11 +110,15 @@ static int rmi_f30_attention(struct rmi_function *fn, unsigned long *irq_bits)
return 0;
/* Read the gpi led data. */
- if (rmi_dev->xport->attn_data) {
- memcpy(f30->data_regs, rmi_dev->xport->attn_data,
+ if (drvdata->attn_data.data) {
+ if (drvdata->attn_data.size < f30->register_count) {
+ dev_warn(&fn->dev, "F30 interrupted, but data is missing\n");
+ return 0;
+ }
+ memcpy(f30->data_regs, drvdata->attn_data.data,
f30->register_count);
- rmi_dev->xport->attn_data += f30->register_count;
- rmi_dev->xport->attn_size -= f30->register_count;
+ drvdata->attn_data.data += f30->register_count;
+ drvdata->attn_data.size -= f30->register_count;
} else {
retval = rmi_read_block(rmi_dev, fn->fd.data_base_addr,
f30->data_regs, f30->register_count);
@@ -192,7 +197,7 @@ static int rmi_f30_config(struct rmi_function *fn)
rmi_get_platform_data(fn->rmi_dev);
int error;
- if (pdata->f30_data && pdata->f30_data->disable) {
+ if (pdata->f30_data.disable) {
drv->clear_irq_bits(fn->rmi_dev, fn->irq_mask);
} else {
/* Write Control Register values back to device */
@@ -351,7 +356,7 @@ static inline int rmi_f30_initialize(struct rmi_function *fn)
f30->gpioled_key_map = (u16 *)map_memory;
pdata = rmi_get_platform_data(rmi_dev);
- if (pdata && f30->has_gpio) {
+ if (f30->has_gpio) {
button = BTN_LEFT;
for (i = 0; i < f30->gpioled_count; i++) {
if (rmi_f30_is_valid_button(i, f30->ctrl)) {
@@ -362,8 +367,7 @@ static inline int rmi_f30_initialize(struct rmi_function *fn)
* f30->has_mech_mouse_btns, but I am
* not sure, so use only the pdata info
*/
- if (pdata->f30_data &&
- pdata->f30_data->buttonpad)
+ if (pdata->f30_data.buttonpad)
break;
}
}
@@ -378,7 +382,7 @@ static int rmi_f30_probe(struct rmi_function *fn)
const struct rmi_device_platform_data *pdata =
rmi_get_platform_data(fn->rmi_dev);
- if (pdata->f30_data && pdata->f30_data->disable)
+ if (pdata->f30_data.disable)
return 0;
rc = rmi_f30_initialize(fn);
diff --git a/drivers/input/rmi4/rmi_f34.c b/drivers/input/rmi4/rmi_f34.c
new file mode 100644
index 000000000000..9774dfbab9bb
--- /dev/null
+++ b/drivers/input/rmi4/rmi_f34.c
@@ -0,0 +1,516 @@
+/*
+ * Copyright (c) 2007-2016, Synaptics Incorporated
+ * Copyright (C) 2016 Zodiac Inflight Innovations
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/rmi.h>
+#include <linux/firmware.h>
+#include <asm/unaligned.h>
+#include <asm/unaligned.h>
+#include <linux/bitops.h>
+
+#include "rmi_driver.h"
+#include "rmi_f34.h"
+
+static int rmi_f34_write_bootloader_id(struct f34_data *f34)
+{
+ struct rmi_function *fn = f34->fn;
+ struct rmi_device *rmi_dev = fn->rmi_dev;
+ u8 bootloader_id[F34_BOOTLOADER_ID_LEN];
+ int ret;
+
+ ret = rmi_read_block(rmi_dev, fn->fd.query_base_addr,
+ bootloader_id, sizeof(bootloader_id));
+ if (ret) {
+ dev_err(&fn->dev, "%s: Reading bootloader ID failed: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ rmi_dbg(RMI_DEBUG_FN, &fn->dev, "%s: writing bootloader id '%c%c'\n",
+ __func__, bootloader_id[0], bootloader_id[1]);
+
+ ret = rmi_write_block(rmi_dev,
+ fn->fd.data_base_addr + F34_BLOCK_DATA_OFFSET,
+ bootloader_id, sizeof(bootloader_id));
+ if (ret) {
+ dev_err(&fn->dev, "Failed to write bootloader ID: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rmi_f34_command(struct f34_data *f34, u8 command,
+ unsigned int timeout, bool write_bl_id)
+{
+ struct rmi_function *fn = f34->fn;
+ struct rmi_device *rmi_dev = fn->rmi_dev;
+ int ret;
+
+ if (write_bl_id) {
+ ret = rmi_f34_write_bootloader_id(f34);
+ if (ret)
+ return ret;
+ }
+
+ init_completion(&f34->v5.cmd_done);
+
+ ret = rmi_read(rmi_dev, f34->v5.ctrl_address, &f34->v5.status);
+ if (ret) {
+ dev_err(&f34->fn->dev,
+ "%s: Failed to read cmd register: %d (command %#02x)\n",
+ __func__, ret, command);
+ return ret;
+ }
+
+ f34->v5.status |= command & 0x0f;
+
+ ret = rmi_write(rmi_dev, f34->v5.ctrl_address, f34->v5.status);
+ if (ret < 0) {
+ dev_err(&f34->fn->dev,
+ "Failed to write F34 command %#02x: %d\n",
+ command, ret);
+ return ret;
+ }
+
+ if (!wait_for_completion_timeout(&f34->v5.cmd_done,
+ msecs_to_jiffies(timeout))) {
+
+ ret = rmi_read(rmi_dev, f34->v5.ctrl_address, &f34->v5.status);
+ if (ret) {
+ dev_err(&f34->fn->dev,
+ "%s: cmd %#02x timed out: %d\n",
+ __func__, command, ret);
+ return ret;
+ }
+
+ if (f34->v5.status & 0x7f) {
+ dev_err(&f34->fn->dev,
+ "%s: cmd %#02x timed out, status: %#02x\n",
+ __func__, command, f34->v5.status);
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
+}
+
+static int rmi_f34_attention(struct rmi_function *fn, unsigned long *irq_bits)
+{
+ struct f34_data *f34 = dev_get_drvdata(&fn->dev);
+ int ret;
+
+ if (f34->bl_version != 5)
+ return 0;
+
+ ret = rmi_read(f34->fn->rmi_dev, f34->v5.ctrl_address, &f34->v5.status);
+ rmi_dbg(RMI_DEBUG_FN, &fn->dev, "%s: status: %#02x, ret: %d\n",
+ __func__, f34->v5.status, ret);
+
+ if (!ret && !(f34->v5.status & 0x7f))
+ complete(&f34->v5.cmd_done);
+
+ return 0;
+}
+
+static int rmi_f34_write_blocks(struct f34_data *f34, const void *data,
+ int block_count, u8 command)
+{
+ struct rmi_function *fn = f34->fn;
+ struct rmi_device *rmi_dev = fn->rmi_dev;
+ u16 address = fn->fd.data_base_addr + F34_BLOCK_DATA_OFFSET;
+ u8 start_address[] = { 0, 0 };
+ int i;
+ int ret;
+
+ ret = rmi_write_block(rmi_dev, fn->fd.data_base_addr,
+ start_address, sizeof(start_address));
+ if (ret) {
+ dev_err(&fn->dev, "Failed to write initial zeros: %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < block_count; i++) {
+ ret = rmi_write_block(rmi_dev, address,
+ data, f34->v5.block_size);
+ if (ret) {
+ dev_err(&fn->dev,
+ "failed to write block #%d: %d\n", i, ret);
+ return ret;
+ }
+
+ ret = rmi_f34_command(f34, command, F34_IDLE_WAIT_MS, false);
+ if (ret) {
+ dev_err(&fn->dev,
+ "Failed to write command for block #%d: %d\n",
+ i, ret);
+ return ret;
+ }
+
+ rmi_dbg(RMI_DEBUG_FN, &fn->dev, "wrote block %d of %d\n",
+ i + 1, block_count);
+
+ data += f34->v5.block_size;
+ }
+
+ return 0;
+}
+
+static int rmi_f34_write_firmware(struct f34_data *f34, const void *data)
+{
+ return rmi_f34_write_blocks(f34, data, f34->v5.fw_blocks,
+ F34_WRITE_FW_BLOCK);
+}
+
+static int rmi_f34_write_config(struct f34_data *f34, const void *data)
+{
+ return rmi_f34_write_blocks(f34, data, f34->v5.config_blocks,
+ F34_WRITE_CONFIG_BLOCK);
+}
+
+int rmi_f34_enable_flash(struct f34_data *f34)
+{
+ return rmi_f34_command(f34, F34_ENABLE_FLASH_PROG,
+ F34_ENABLE_WAIT_MS, true);
+}
+
+static int rmi_f34_flash_firmware(struct f34_data *f34,
+ const struct rmi_f34_firmware *syn_fw)
+{
+ struct rmi_function *fn = f34->fn;
+ int ret;
+
+ if (syn_fw->image_size) {
+ dev_info(&fn->dev, "Erasing firmware...\n");
+ ret = rmi_f34_command(f34, F34_ERASE_ALL,
+ F34_ERASE_WAIT_MS, true);
+ if (ret)
+ return ret;
+
+ dev_info(&fn->dev, "Writing firmware (%d bytes)...\n",
+ syn_fw->image_size);
+ ret = rmi_f34_write_firmware(f34, syn_fw->data);
+ if (ret)
+ return ret;
+ }
+
+ if (syn_fw->config_size) {
+ /*
+ * We only need to erase config if we haven't updated
+ * firmware.
+ */
+ if (!syn_fw->image_size) {
+ dev_info(&fn->dev, "Erasing config...\n");
+ ret = rmi_f34_command(f34, F34_ERASE_CONFIG,
+ F34_ERASE_WAIT_MS, true);
+ if (ret)
+ return ret;
+ }
+
+ dev_info(&fn->dev, "Writing config (%d bytes)...\n",
+ syn_fw->config_size);
+ ret = rmi_f34_write_config(f34,
+ &syn_fw->data[syn_fw->image_size]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int rmi_f34_update_firmware(struct f34_data *f34, const struct firmware *fw)
+{
+ const struct rmi_f34_firmware *syn_fw;
+ int ret;
+
+ syn_fw = (const struct rmi_f34_firmware *)fw->data;
+ BUILD_BUG_ON(offsetof(struct rmi_f34_firmware, data) !=
+ F34_FW_IMAGE_OFFSET);
+
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+ "FW size:%d, checksum:%08x, image_size:%d, config_size:%d\n",
+ (int)fw->size,
+ le32_to_cpu(syn_fw->checksum),
+ le32_to_cpu(syn_fw->image_size),
+ le32_to_cpu(syn_fw->config_size));
+
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+ "FW bootloader_id:%02x, product_id:%.*s, info: %02x%02x\n",
+ syn_fw->bootloader_version,
+ (int)sizeof(syn_fw->product_id), syn_fw->product_id,
+ syn_fw->product_info[0], syn_fw->product_info[1]);
+
+ if (syn_fw->image_size &&
+ syn_fw->image_size != f34->v5.fw_blocks * f34->v5.block_size) {
+ dev_err(&f34->fn->dev,
+ "Bad firmware image: fw size %d, expected %d\n",
+ syn_fw->image_size,
+ f34->v5.fw_blocks * f34->v5.block_size);
+ ret = -EILSEQ;
+ goto out;
+ }
+
+ if (syn_fw->config_size &&
+ syn_fw->config_size != f34->v5.config_blocks * f34->v5.block_size) {
+ dev_err(&f34->fn->dev,
+ "Bad firmware image: config size %d, expected %d\n",
+ syn_fw->config_size,
+ f34->v5.config_blocks * f34->v5.block_size);
+ ret = -EILSEQ;
+ goto out;
+ }
+
+ if (syn_fw->image_size && !syn_fw->config_size) {
+ dev_err(&f34->fn->dev, "Bad firmware image: no config data\n");
+ ret = -EILSEQ;
+ goto out;
+ }
+
+ dev_info(&f34->fn->dev, "Firmware image OK\n");
+ mutex_lock(&f34->v5.flash_mutex);
+
+ ret = rmi_f34_flash_firmware(f34, syn_fw);
+
+ mutex_unlock(&f34->v5.flash_mutex);
+
+out:
+ return ret;
+}
+
+static int rmi_firmware_update(struct rmi_driver_data *data,
+ const struct firmware *fw)
+{
+ struct rmi_device *rmi_dev = data->rmi_dev;
+ struct device *dev = &rmi_dev->dev;
+ struct f34_data *f34;
+ int ret;
+
+ if (!data->f34_container) {
+ dev_warn(dev, "%s: No F34 present!\n", __func__);
+ return -EINVAL;
+ }
+
+ f34 = dev_get_drvdata(&data->f34_container->dev);
+
+ if (f34->bl_version == 7) {
+ if (data->pdt_props & HAS_BSR) {
+ dev_err(dev, "%s: LTS not supported\n", __func__);
+ return -ENODEV;
+ }
+ } else if (f34->bl_version != 5) {
+ dev_warn(dev, "F34 V%d not supported!\n",
+ data->f34_container->fd.function_version);
+ return -ENODEV;
+ }
+
+ /* Enter flash mode */
+ if (f34->bl_version == 7)
+ ret = rmi_f34v7_start_reflash(f34, fw);
+ else
+ ret = rmi_f34_enable_flash(f34);
+ if (ret)
+ return ret;
+
+ rmi_disable_irq(rmi_dev, false);
+
+ /* Tear down functions and re-probe */
+ rmi_free_function_list(rmi_dev);
+
+ ret = rmi_probe_interrupts(data);
+ if (ret)
+ return ret;
+
+ ret = rmi_init_functions(data);
+ if (ret)
+ return ret;
+
+ if (!data->bootloader_mode || !data->f34_container) {
+ dev_warn(dev, "%s: No F34 present or not in bootloader!\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ rmi_enable_irq(rmi_dev, false);
+
+ f34 = dev_get_drvdata(&data->f34_container->dev);
+
+ /* Perform firmware update */
+ if (f34->bl_version == 7)
+ ret = rmi_f34v7_do_reflash(f34, fw);
+ else
+ ret = rmi_f34_update_firmware(f34, fw);
+
+ dev_info(&f34->fn->dev, "Firmware update complete, status:%d\n", ret);
+
+ rmi_disable_irq(rmi_dev, false);
+
+ /* Re-probe */
+ rmi_dbg(RMI_DEBUG_FN, dev, "Re-probing device\n");
+ rmi_free_function_list(rmi_dev);
+
+ ret = rmi_scan_pdt(rmi_dev, NULL, rmi_initial_reset);
+ if (ret < 0)
+ dev_warn(dev, "RMI reset failed!\n");
+
+ ret = rmi_probe_interrupts(data);
+ if (ret)
+ return ret;
+
+ ret = rmi_init_functions(data);
+ if (ret)
+ return ret;
+
+ rmi_enable_irq(rmi_dev, false);
+
+ if (data->f01_container->dev.driver)
+ /* Driver already bound, so enable ATTN now. */
+ return rmi_enable_sensor(rmi_dev);
+
+ rmi_dbg(RMI_DEBUG_FN, dev, "%s complete\n", __func__);
+
+ return ret;
+}
+
+static int rmi_firmware_update(struct rmi_driver_data *data,
+ const struct firmware *fw);
+
+static ssize_t rmi_driver_update_fw_store(struct device *dev,
+ struct device_attribute *dattr,
+ const char *buf, size_t count)
+{
+ struct rmi_driver_data *data = dev_get_drvdata(dev);
+ char fw_name[NAME_MAX];
+ const struct firmware *fw;
+ size_t copy_count = count;
+ int ret;
+
+ if (count == 0 || count >= NAME_MAX)
+ return -EINVAL;
+
+ if (buf[count - 1] == '\0' || buf[count - 1] == '\n')
+ copy_count -= 1;
+
+ strncpy(fw_name, buf, copy_count);
+ fw_name[copy_count] = '\0';
+
+ ret = request_firmware(&fw, fw_name, dev);
+ if (ret)
+ return ret;
+
+ dev_info(dev, "Flashing %s\n", fw_name);
+
+ ret = rmi_firmware_update(data, fw);
+
+ release_firmware(fw);
+
+ return ret ?: count;
+}
+
+static DEVICE_ATTR(update_fw, 0200, NULL, rmi_driver_update_fw_store);
+
+static struct attribute *rmi_firmware_attrs[] = {
+ &dev_attr_update_fw.attr,
+ NULL
+};
+
+static struct attribute_group rmi_firmware_attr_group = {
+ .attrs = rmi_firmware_attrs,
+};
+
+static int rmi_f34_probe(struct rmi_function *fn)
+{
+ struct f34_data *f34;
+ unsigned char f34_queries[9];
+ bool has_config_id;
+ u8 version = fn->fd.function_version;
+ int ret;
+
+ f34 = devm_kzalloc(&fn->dev, sizeof(struct f34_data), GFP_KERNEL);
+ if (!f34)
+ return -ENOMEM;
+
+ f34->fn = fn;
+ dev_set_drvdata(&fn->dev, f34);
+
+ /* v5 code only supported version 0, try V7 probe */
+ if (version > 0)
+ return rmi_f34v7_probe(f34);
+ else if (version != 0)
+ return -ENODEV;
+
+ f34->bl_version = 5;
+
+ ret = rmi_read_block(fn->rmi_dev, fn->fd.query_base_addr,
+ f34_queries, sizeof(f34_queries));
+ if (ret) {
+ dev_err(&fn->dev, "%s: Failed to query properties\n",
+ __func__);
+ return ret;
+ }
+
+ snprintf(f34->bootloader_id, sizeof(f34->bootloader_id),
+ "%c%c", f34_queries[0], f34_queries[1]);
+
+ mutex_init(&f34->v5.flash_mutex);
+ init_completion(&f34->v5.cmd_done);
+
+ f34->v5.block_size = get_unaligned_le16(&f34_queries[3]);
+ f34->v5.fw_blocks = get_unaligned_le16(&f34_queries[5]);
+ f34->v5.config_blocks = get_unaligned_le16(&f34_queries[7]);
+ f34->v5.ctrl_address = fn->fd.data_base_addr + F34_BLOCK_DATA_OFFSET +
+ f34->v5.block_size;
+ has_config_id = f34_queries[2] & (1 << 2);
+
+ rmi_dbg(RMI_DEBUG_FN, &fn->dev, "Bootloader ID: %s\n",
+ f34->bootloader_id);
+ rmi_dbg(RMI_DEBUG_FN, &fn->dev, "Block size: %d\n",
+ f34->v5.block_size);
+ rmi_dbg(RMI_DEBUG_FN, &fn->dev, "FW blocks: %d\n",
+ f34->v5.fw_blocks);
+ rmi_dbg(RMI_DEBUG_FN, &fn->dev, "CFG blocks: %d\n",
+ f34->v5.config_blocks);
+
+ if (has_config_id) {
+ ret = rmi_read_block(fn->rmi_dev, fn->fd.control_base_addr,
+ f34_queries, sizeof(f34_queries));
+ if (ret) {
+ dev_err(&fn->dev, "Failed to read F34 config ID\n");
+ return ret;
+ }
+
+ snprintf(f34->configuration_id, sizeof(f34->configuration_id),
+ "%02x%02x%02x%02x",
+ f34_queries[0], f34_queries[1],
+ f34_queries[2], f34_queries[3]);
+
+ rmi_dbg(RMI_DEBUG_FN, &fn->dev, "Configuration ID: %s\n",
+ f34->configuration_id);
+ }
+
+ return 0;
+}
+
+int rmi_f34_create_sysfs(struct rmi_device *rmi_dev)
+{
+ return sysfs_create_group(&rmi_dev->dev.kobj, &rmi_firmware_attr_group);
+}
+
+void rmi_f34_remove_sysfs(struct rmi_device *rmi_dev)
+{
+ sysfs_remove_group(&rmi_dev->dev.kobj, &rmi_firmware_attr_group);
+}
+
+struct rmi_function_handler rmi_f34_handler = {
+ .driver = {
+ .name = "rmi4_f34",
+ },
+ .func = 0x34,
+ .probe = rmi_f34_probe,
+ .attention = rmi_f34_attention,
+};
diff --git a/drivers/input/rmi4/rmi_f34.h b/drivers/input/rmi4/rmi_f34.h
new file mode 100644
index 000000000000..2c21056dc375
--- /dev/null
+++ b/drivers/input/rmi4/rmi_f34.h
@@ -0,0 +1,314 @@
+/*
+ * Copyright (c) 2007-2016, Synaptics Incorporated
+ * Copyright (C) 2016 Zodiac Inflight Innovations
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef _RMI_F34_H
+#define _RMI_F34_H
+
+/* F34 image file offsets. */
+#define F34_FW_IMAGE_OFFSET 0x100
+
+/* F34 register offsets. */
+#define F34_BLOCK_DATA_OFFSET 2
+
+/* F34 commands */
+#define F34_WRITE_FW_BLOCK 0x2
+#define F34_ERASE_ALL 0x3
+#define F34_READ_CONFIG_BLOCK 0x5
+#define F34_WRITE_CONFIG_BLOCK 0x6
+#define F34_ERASE_CONFIG 0x7
+#define F34_ENABLE_FLASH_PROG 0xf
+
+#define F34_STATUS_IN_PROGRESS 0xff
+#define F34_STATUS_IDLE 0x80
+
+#define F34_IDLE_WAIT_MS 500
+#define F34_ENABLE_WAIT_MS 300
+#define F34_ERASE_WAIT_MS 5000
+
+#define F34_BOOTLOADER_ID_LEN 2
+
+/* F34 V7 defines */
+#define V7_FLASH_STATUS_OFFSET 0
+#define V7_PARTITION_ID_OFFSET 1
+#define V7_BLOCK_NUMBER_OFFSET 2
+#define V7_TRANSFER_LENGTH_OFFSET 3
+#define V7_COMMAND_OFFSET 4
+#define V7_PAYLOAD_OFFSET 5
+#define V7_BOOTLOADER_ID_OFFSET 1
+
+#define IMAGE_HEADER_VERSION_10 0x10
+
+#define CONFIG_ID_SIZE 32
+#define PRODUCT_ID_SIZE 10
+
+#define ENABLE_WAIT_MS (1 * 1000)
+#define WRITE_WAIT_MS (3 * 1000)
+
+#define MIN_SLEEP_TIME_US 50
+#define MAX_SLEEP_TIME_US 100
+
+#define HAS_BSR BIT(5)
+#define HAS_CONFIG_ID BIT(3)
+#define HAS_GUEST_CODE BIT(6)
+#define HAS_DISP_CFG BIT(5)
+
+/* F34 V7 commands */
+#define CMD_V7_IDLE 0
+#define CMD_V7_ENTER_BL 1
+#define CMD_V7_READ 2
+#define CMD_V7_WRITE 3
+#define CMD_V7_ERASE 4
+#define CMD_V7_ERASE_AP 5
+#define CMD_V7_SENSOR_ID 6
+
+#define v7_CMD_IDLE 0
+#define v7_CMD_WRITE_FW 1
+#define v7_CMD_WRITE_CONFIG 2
+#define v7_CMD_WRITE_LOCKDOWN 3
+#define v7_CMD_WRITE_GUEST_CODE 4
+#define v7_CMD_READ_CONFIG 5
+#define v7_CMD_ERASE_ALL 6
+#define v7_CMD_ERASE_UI_FIRMWARE 7
+#define v7_CMD_ERASE_UI_CONFIG 8
+#define v7_CMD_ERASE_BL_CONFIG 9
+#define v7_CMD_ERASE_DISP_CONFIG 10
+#define v7_CMD_ERASE_FLASH_CONFIG 11
+#define v7_CMD_ERASE_GUEST_CODE 12
+#define v7_CMD_ENABLE_FLASH_PROG 13
+
+#define v7_UI_CONFIG_AREA 0
+#define v7_PM_CONFIG_AREA 1
+#define v7_BL_CONFIG_AREA 2
+#define v7_DP_CONFIG_AREA 3
+#define v7_FLASH_CONFIG_AREA 4
+
+/* F34 V7 partition IDs */
+#define BOOTLOADER_PARTITION 1
+#define DEVICE_CONFIG_PARTITION 2
+#define FLASH_CONFIG_PARTITION 3
+#define MANUFACTURING_BLOCK_PARTITION 4
+#define GUEST_SERIALIZATION_PARTITION 5
+#define GLOBAL_PARAMETERS_PARTITION 6
+#define CORE_CODE_PARTITION 7
+#define CORE_CONFIG_PARTITION 8
+#define GUEST_CODE_PARTITION 9
+#define DISPLAY_CONFIG_PARTITION 10
+
+/* F34 V7 container IDs */
+#define TOP_LEVEL_CONTAINER 0
+#define UI_CONTAINER 1
+#define UI_CONFIG_CONTAINER 2
+#define BL_CONTAINER 3
+#define BL_IMAGE_CONTAINER 4
+#define BL_CONFIG_CONTAINER 5
+#define BL_LOCKDOWN_INFO_CONTAINER 6
+#define PERMANENT_CONFIG_CONTAINER 7
+#define GUEST_CODE_CONTAINER 8
+#define BL_PROTOCOL_DESCRIPTOR_CONTAINER 9
+#define UI_PROTOCOL_DESCRIPTOR_CONTAINER 10
+#define RMI_SELF_DISCOVERY_CONTAINER 11
+#define RMI_PAGE_CONTENT_CONTAINER 12
+#define GENERAL_INFORMATION_CONTAINER 13
+#define DEVICE_CONFIG_CONTAINER 14
+#define FLASH_CONFIG_CONTAINER 15
+#define GUEST_SERIALIZATION_CONTAINER 16
+#define GLOBAL_PARAMETERS_CONTAINER 17
+#define CORE_CODE_CONTAINER 18
+#define CORE_CONFIG_CONTAINER 19
+#define DISPLAY_CONFIG_CONTAINER 20
+
+struct f34v7_query_1_7 {
+ u8 bl_minor_revision; /* query 1 */
+ u8 bl_major_revision;
+ __le32 bl_fw_id; /* query 2 */
+ u8 minimum_write_size; /* query 3 */
+ __le16 block_size;
+ __le16 flash_page_size;
+ __le16 adjustable_partition_area_size; /* query 4 */
+ __le16 flash_config_length; /* query 5 */
+ __le16 payload_length; /* query 6 */
+ u8 partition_support[4]; /* query 7 */
+} __packed;
+
+struct f34v7_data_1_5 {
+ u8 partition_id;
+ __le16 block_offset;
+ __le16 transfer_length;
+ u8 command;
+ u8 payload[2];
+} __packed;
+
+struct block_data {
+ const void *data;
+ int size;
+};
+
+struct partition_table {
+ u8 partition_id;
+ u8 byte_1_reserved;
+ __le16 partition_length;
+ __le16 start_physical_address;
+ __le16 partition_properties;
+} __packed;
+
+struct physical_address {
+ u16 ui_firmware;
+ u16 ui_config;
+ u16 dp_config;
+ u16 guest_code;
+};
+
+struct container_descriptor {
+ __le32 content_checksum;
+ __le16 container_id;
+ u8 minor_version;
+ u8 major_version;
+ u8 reserved_08;
+ u8 reserved_09;
+ u8 reserved_0a;
+ u8 reserved_0b;
+ u8 container_option_flags[4];
+ __le32 content_options_length;
+ __le32 content_options_address;
+ __le32 content_length;
+ __le32 content_address;
+} __packed;
+
+struct block_count {
+ u16 ui_firmware;
+ u16 ui_config;
+ u16 dp_config;
+ u16 fl_config;
+ u16 pm_config;
+ u16 bl_config;
+ u16 lockdown;
+ u16 guest_code;
+};
+
+struct image_header_10 {
+ __le32 checksum;
+ u8 reserved_04;
+ u8 reserved_05;
+ u8 minor_header_version;
+ u8 major_header_version;
+ u8 reserved_08;
+ u8 reserved_09;
+ u8 reserved_0a;
+ u8 reserved_0b;
+ __le32 top_level_container_start_addr;
+};
+
+struct image_metadata {
+ bool contains_firmware_id;
+ bool contains_bootloader;
+ bool contains_display_cfg;
+ bool contains_guest_code;
+ bool contains_flash_config;
+ unsigned int firmware_id;
+ unsigned int checksum;
+ unsigned int bootloader_size;
+ unsigned int display_cfg_offset;
+ unsigned char bl_version;
+ unsigned char product_id[PRODUCT_ID_SIZE + 1];
+ unsigned char cstmr_product_id[PRODUCT_ID_SIZE + 1];
+ struct block_data bootloader;
+ struct block_data ui_firmware;
+ struct block_data ui_config;
+ struct block_data dp_config;
+ struct block_data fl_config;
+ struct block_data bl_config;
+ struct block_data guest_code;
+ struct block_data lockdown;
+ struct block_count blkcount;
+ struct physical_address phyaddr;
+};
+
+struct register_offset {
+ u8 properties;
+ u8 properties_2;
+ u8 block_size;
+ u8 block_count;
+ u8 gc_block_count;
+ u8 flash_status;
+ u8 partition_id;
+ u8 block_number;
+ u8 transfer_length;
+ u8 flash_cmd;
+ u8 payload;
+};
+
+struct rmi_f34_firmware {
+ __le32 checksum;
+ u8 pad1[3];
+ u8 bootloader_version;
+ __le32 image_size;
+ __le32 config_size;
+ u8 product_id[10];
+ u8 product_info[2];
+ u8 pad2[228];
+ u8 data[];
+};
+
+struct f34v5_data {
+ u16 block_size;
+ u16 fw_blocks;
+ u16 config_blocks;
+ u16 ctrl_address;
+ u8 status;
+
+ struct completion cmd_done;
+ struct mutex flash_mutex;
+};
+
+struct f34v7_data {
+ bool has_display_cfg;
+ bool has_guest_code;
+ bool force_update;
+ bool in_bl_mode;
+ u8 *read_config_buf;
+ size_t read_config_buf_size;
+ u8 command;
+ u8 flash_status;
+ u16 block_size;
+ u16 config_block_count;
+ u16 config_size;
+ u16 config_area;
+ u16 flash_config_length;
+ u16 payload_length;
+ u8 partitions;
+ u16 partition_table_bytes;
+ bool new_partition_table;
+
+ struct register_offset off;
+ struct block_count blkcount;
+ struct physical_address phyaddr;
+ struct image_metadata img;
+
+ const void *config_data;
+ const void *image;
+};
+
+struct f34_data {
+ struct rmi_function *fn;
+
+ u8 bl_version;
+ unsigned char bootloader_id[5];
+ unsigned char configuration_id[CONFIG_ID_SIZE*2 + 1];
+
+ union {
+ struct f34v5_data v5;
+ struct f34v7_data v7;
+ };
+};
+
+int rmi_f34v7_start_reflash(struct f34_data *f34, const struct firmware *fw);
+int rmi_f34v7_do_reflash(struct f34_data *f34, const struct firmware *fw);
+int rmi_f34v7_probe(struct f34_data *f34);
+
+#endif /* _RMI_F34_H */
diff --git a/drivers/input/rmi4/rmi_f34v7.c b/drivers/input/rmi4/rmi_f34v7.c
new file mode 100644
index 000000000000..ca31f9539d9b
--- /dev/null
+++ b/drivers/input/rmi4/rmi_f34v7.c
@@ -0,0 +1,1372 @@
+/*
+ * Copyright (c) 2016, Zodiac Inflight Innovations
+ * Copyright (c) 2007-2016, Synaptics Incorporated
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/rmi.h>
+#include <linux/firmware.h>
+#include <asm/unaligned.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+
+#include "rmi_driver.h"
+#include "rmi_f34.h"
+
+static int rmi_f34v7_read_flash_status(struct f34_data *f34)
+{
+ u8 status;
+ u8 command;
+ int ret;
+
+ ret = rmi_read_block(f34->fn->rmi_dev,
+ f34->fn->fd.data_base_addr + f34->v7.off.flash_status,
+ &status,
+ sizeof(status));
+ if (ret < 0) {
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+ "%s: Failed to read flash status\n", __func__);
+ return ret;
+ }
+
+ f34->v7.in_bl_mode = status >> 7;
+ f34->v7.flash_status = status & 0x1f;
+
+ if (f34->v7.flash_status != 0x00) {
+ dev_err(&f34->fn->dev, "%s: status=%d, command=0x%02x\n",
+ __func__, f34->v7.flash_status, f34->v7.command);
+ }
+
+ ret = rmi_read_block(f34->fn->rmi_dev,
+ f34->fn->fd.data_base_addr + f34->v7.off.flash_cmd,
+ &command,
+ sizeof(command));
+ if (ret < 0) {
+ dev_err(&f34->fn->dev, "%s: Failed to read flash command\n",
+ __func__);
+ return ret;
+ }
+
+ f34->v7.command = command;
+
+ return 0;
+}
+
+static int rmi_f34v7_wait_for_idle(struct f34_data *f34, int timeout_ms)
+{
+ int count = 0;
+ int timeout_count = ((timeout_ms * 1000) / MAX_SLEEP_TIME_US) + 1;
+
+ do {
+ usleep_range(MIN_SLEEP_TIME_US, MAX_SLEEP_TIME_US);
+
+ count++;
+
+ rmi_f34v7_read_flash_status(f34);
+
+ if ((f34->v7.command == v7_CMD_IDLE)
+ && (f34->v7.flash_status == 0x00)) {
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+ "Idle status detected\n");
+ return 0;
+ }
+ } while (count < timeout_count);
+
+ dev_err(&f34->fn->dev,
+ "%s: Timed out waiting for idle status\n", __func__);
+
+ return -ETIMEDOUT;
+}
+
+static int rmi_f34v7_write_command_single_transaction(struct f34_data *f34,
+ u8 cmd)
+{
+ int ret;
+ u8 base;
+ struct f34v7_data_1_5 data_1_5;
+
+ base = f34->fn->fd.data_base_addr;
+
+ memset(&data_1_5, 0, sizeof(data_1_5));
+
+ switch (cmd) {
+ case v7_CMD_ERASE_ALL:
+ data_1_5.partition_id = CORE_CODE_PARTITION;
+ data_1_5.command = CMD_V7_ERASE_AP;
+ break;
+ case v7_CMD_ERASE_UI_FIRMWARE:
+ data_1_5.partition_id = CORE_CODE_PARTITION;
+ data_1_5.command = CMD_V7_ERASE;
+ break;
+ case v7_CMD_ERASE_BL_CONFIG:
+ data_1_5.partition_id = GLOBAL_PARAMETERS_PARTITION;
+ data_1_5.command = CMD_V7_ERASE;
+ break;
+ case v7_CMD_ERASE_UI_CONFIG:
+ data_1_5.partition_id = CORE_CONFIG_PARTITION;
+ data_1_5.command = CMD_V7_ERASE;
+ break;
+ case v7_CMD_ERASE_DISP_CONFIG:
+ data_1_5.partition_id = DISPLAY_CONFIG_PARTITION;
+ data_1_5.command = CMD_V7_ERASE;
+ break;
+ case v7_CMD_ERASE_FLASH_CONFIG:
+ data_1_5.partition_id = FLASH_CONFIG_PARTITION;
+ data_1_5.command = CMD_V7_ERASE;
+ break;
+ case v7_CMD_ERASE_GUEST_CODE:
+ data_1_5.partition_id = GUEST_CODE_PARTITION;
+ data_1_5.command = CMD_V7_ERASE;
+ break;
+ case v7_CMD_ENABLE_FLASH_PROG:
+ data_1_5.partition_id = BOOTLOADER_PARTITION;
+ data_1_5.command = CMD_V7_ENTER_BL;
+ break;
+ }
+
+ data_1_5.payload[0] = f34->bootloader_id[0];
+ data_1_5.payload[1] = f34->bootloader_id[1];
+
+ ret = rmi_write_block(f34->fn->rmi_dev,
+ base + f34->v7.off.partition_id,
+ &data_1_5, sizeof(data_1_5));
+ if (ret < 0) {
+ dev_err(&f34->fn->dev,
+ "%s: Failed to write single transaction command\n",
+ __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rmi_f34v7_write_command(struct f34_data *f34, u8 cmd)
+{
+ int ret;
+ u8 base;
+ u8 command;
+
+ base = f34->fn->fd.data_base_addr;
+
+ switch (cmd) {
+ case v7_CMD_WRITE_FW:
+ case v7_CMD_WRITE_CONFIG:
+ case v7_CMD_WRITE_GUEST_CODE:
+ command = CMD_V7_WRITE;
+ break;
+ case v7_CMD_READ_CONFIG:
+ command = CMD_V7_READ;
+ break;
+ case v7_CMD_ERASE_ALL:
+ command = CMD_V7_ERASE_AP;
+ break;
+ case v7_CMD_ERASE_UI_FIRMWARE:
+ case v7_CMD_ERASE_BL_CONFIG:
+ case v7_CMD_ERASE_UI_CONFIG:
+ case v7_CMD_ERASE_DISP_CONFIG:
+ case v7_CMD_ERASE_FLASH_CONFIG:
+ case v7_CMD_ERASE_GUEST_CODE:
+ command = CMD_V7_ERASE;
+ break;
+ case v7_CMD_ENABLE_FLASH_PROG:
+ command = CMD_V7_ENTER_BL;
+ break;
+ default:
+ dev_err(&f34->fn->dev, "%s: Invalid command 0x%02x\n",
+ __func__, cmd);
+ return -EINVAL;
+ }
+
+ f34->v7.command = command;
+
+ switch (cmd) {
+ case v7_CMD_ERASE_ALL:
+ case v7_CMD_ERASE_UI_FIRMWARE:
+ case v7_CMD_ERASE_BL_CONFIG:
+ case v7_CMD_ERASE_UI_CONFIG:
+ case v7_CMD_ERASE_DISP_CONFIG:
+ case v7_CMD_ERASE_FLASH_CONFIG:
+ case v7_CMD_ERASE_GUEST_CODE:
+ case v7_CMD_ENABLE_FLASH_PROG:
+ ret = rmi_f34v7_write_command_single_transaction(f34, cmd);
+ if (ret < 0)
+ return ret;
+ else
+ return 0;
+ default:
+ break;
+ }
+
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev, "%s: writing cmd %02X\n",
+ __func__, command);
+
+ ret = rmi_write_block(f34->fn->rmi_dev,
+ base + f34->v7.off.flash_cmd,
+ &command, sizeof(command));
+ if (ret < 0) {
+ dev_err(&f34->fn->dev, "%s: Failed to write flash command\n",
+ __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rmi_f34v7_write_partition_id(struct f34_data *f34, u8 cmd)
+{
+ int ret;
+ u8 base;
+ u8 partition;
+
+ base = f34->fn->fd.data_base_addr;
+
+ switch (cmd) {
+ case v7_CMD_WRITE_FW:
+ partition = CORE_CODE_PARTITION;
+ break;
+ case v7_CMD_WRITE_CONFIG:
+ case v7_CMD_READ_CONFIG:
+ if (f34->v7.config_area == v7_UI_CONFIG_AREA)
+ partition = CORE_CONFIG_PARTITION;
+ else if (f34->v7.config_area == v7_DP_CONFIG_AREA)
+ partition = DISPLAY_CONFIG_PARTITION;
+ else if (f34->v7.config_area == v7_PM_CONFIG_AREA)
+ partition = GUEST_SERIALIZATION_PARTITION;
+ else if (f34->v7.config_area == v7_BL_CONFIG_AREA)
+ partition = GLOBAL_PARAMETERS_PARTITION;
+ else if (f34->v7.config_area == v7_FLASH_CONFIG_AREA)
+ partition = FLASH_CONFIG_PARTITION;
+ break;
+ case v7_CMD_WRITE_GUEST_CODE:
+ partition = GUEST_CODE_PARTITION;
+ break;
+ case v7_CMD_ERASE_ALL:
+ partition = CORE_CODE_PARTITION;
+ break;
+ case v7_CMD_ERASE_BL_CONFIG:
+ partition = GLOBAL_PARAMETERS_PARTITION;
+ break;
+ case v7_CMD_ERASE_UI_CONFIG:
+ partition = CORE_CONFIG_PARTITION;
+ break;
+ case v7_CMD_ERASE_DISP_CONFIG:
+ partition = DISPLAY_CONFIG_PARTITION;
+ break;
+ case v7_CMD_ERASE_FLASH_CONFIG:
+ partition = FLASH_CONFIG_PARTITION;
+ break;
+ case v7_CMD_ERASE_GUEST_CODE:
+ partition = GUEST_CODE_PARTITION;
+ break;
+ case v7_CMD_ENABLE_FLASH_PROG:
+ partition = BOOTLOADER_PARTITION;
+ break;
+ default:
+ dev_err(&f34->fn->dev, "%s: Invalid command 0x%02x\n",
+ __func__, cmd);
+ return -EINVAL;
+ }
+
+ ret = rmi_write_block(f34->fn->rmi_dev,
+ base + f34->v7.off.partition_id,
+ &partition, sizeof(partition));
+ if (ret < 0) {
+ dev_err(&f34->fn->dev, "%s: Failed to write partition ID\n",
+ __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rmi_f34v7_read_f34v7_partition_table(struct f34_data *f34)
+{
+ int ret;
+ u8 base;
+ __le16 length;
+ u16 block_number = 0;
+
+ base = f34->fn->fd.data_base_addr;
+
+ f34->v7.config_area = v7_FLASH_CONFIG_AREA;
+
+ ret = rmi_f34v7_write_partition_id(f34, v7_CMD_READ_CONFIG);
+ if (ret < 0)
+ return ret;
+
+ ret = rmi_write_block(f34->fn->rmi_dev,
+ base + f34->v7.off.block_number,
+ &block_number, sizeof(block_number));
+ if (ret < 0) {
+ dev_err(&f34->fn->dev, "%s: Failed to write block number\n",
+ __func__);
+ return ret;
+ }
+
+ put_unaligned_le16(f34->v7.flash_config_length, &length);
+
+ ret = rmi_write_block(f34->fn->rmi_dev,
+ base + f34->v7.off.transfer_length,
+ &length, sizeof(length));
+ if (ret < 0) {
+ dev_err(&f34->fn->dev, "%s: Failed to write transfer length\n",
+ __func__);
+ return ret;
+ }
+
+ ret = rmi_f34v7_write_command(f34, v7_CMD_READ_CONFIG);
+ if (ret < 0) {
+ dev_err(&f34->fn->dev, "%s: Failed to write command\n",
+ __func__);
+ return ret;
+ }
+
+ ret = rmi_f34v7_wait_for_idle(f34, WRITE_WAIT_MS);
+ if (ret < 0) {
+ dev_err(&f34->fn->dev, "%s: Failed to wait for idle status\n",
+ __func__);
+ return ret;
+ }
+
+ ret = rmi_read_block(f34->fn->rmi_dev,
+ base + f34->v7.off.payload,
+ f34->v7.read_config_buf,
+ f34->v7.partition_table_bytes);
+ if (ret < 0) {
+ dev_err(&f34->fn->dev, "%s: Failed to read block data\n",
+ __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void rmi_f34v7_parse_partition_table(struct f34_data *f34,
+ const void *partition_table,
+ struct block_count *blkcount,
+ struct physical_address *phyaddr)
+{
+ int i;
+ int index;
+ u16 partition_length;
+ u16 physical_address;
+ const struct partition_table *ptable;
+
+ for (i = 0; i < f34->v7.partitions; i++) {
+ index = i * 8 + 2;
+ ptable = partition_table + index;
+ partition_length = le16_to_cpu(ptable->partition_length);
+ physical_address = le16_to_cpu(ptable->start_physical_address);
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+ "%s: Partition entry %d: %*ph\n",
+ __func__, i, sizeof(struct partition_table), ptable);
+ switch (ptable->partition_id & 0x1f) {
+ case CORE_CODE_PARTITION:
+ blkcount->ui_firmware = partition_length;
+ phyaddr->ui_firmware = physical_address;
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+ "%s: Core code block count: %d\n",
+ __func__, blkcount->ui_firmware);
+ break;
+ case CORE_CONFIG_PARTITION:
+ blkcount->ui_config = partition_length;
+ phyaddr->ui_config = physical_address;
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+ "%s: Core config block count: %d\n",
+ __func__, blkcount->ui_config);
+ break;
+ case DISPLAY_CONFIG_PARTITION:
+ blkcount->dp_config = partition_length;
+ phyaddr->dp_config = physical_address;
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+ "%s: Display config block count: %d\n",
+ __func__, blkcount->dp_config);
+ break;
+ case FLASH_CONFIG_PARTITION:
+ blkcount->fl_config = partition_length;
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+ "%s: Flash config block count: %d\n",
+ __func__, blkcount->fl_config);
+ break;
+ case GUEST_CODE_PARTITION:
+ blkcount->guest_code = partition_length;
+ phyaddr->guest_code = physical_address;
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+ "%s: Guest code block count: %d\n",
+ __func__, blkcount->guest_code);
+ break;
+ case GUEST_SERIALIZATION_PARTITION:
+ blkcount->pm_config = partition_length;
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+ "%s: Guest serialization block count: %d\n",
+ __func__, blkcount->pm_config);
+ break;
+ case GLOBAL_PARAMETERS_PARTITION:
+ blkcount->bl_config = partition_length;
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+ "%s: Global parameters block count: %d\n",
+ __func__, blkcount->bl_config);
+ break;
+ case DEVICE_CONFIG_PARTITION:
+ blkcount->lockdown = partition_length;
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+ "%s: Device config block count: %d\n",
+ __func__, blkcount->lockdown);
+ break;
+ }
+ }
+}
+
+static int rmi_f34v7_read_queries_bl_version(struct f34_data *f34)
+{
+ int ret;
+ u8 base;
+ int offset;
+ u8 query_0;
+ struct f34v7_query_1_7 query_1_7;
+
+ base = f34->fn->fd.query_base_addr;
+
+ ret = rmi_read_block(f34->fn->rmi_dev,
+ base,
+ &query_0,
+ sizeof(query_0));
+ if (ret < 0) {
+ dev_err(&f34->fn->dev,
+ "%s: Failed to read query 0\n", __func__);
+ return ret;
+ }
+
+ offset = (query_0 & 0x7) + 1;
+
+ ret = rmi_read_block(f34->fn->rmi_dev,
+ base + offset,
+ &query_1_7,
+ sizeof(query_1_7));
+ if (ret < 0) {
+ dev_err(&f34->fn->dev, "%s: Failed to read queries 1 to 7\n",
+ __func__);
+ return ret;
+ }
+
+ f34->bootloader_id[0] = query_1_7.bl_minor_revision;
+ f34->bootloader_id[1] = query_1_7.bl_major_revision;
+
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev, "Bootloader V%d.%d\n",
+ f34->bootloader_id[1], f34->bootloader_id[0]);
+
+ return 0;
+}
+
+static int rmi_f34v7_read_queries(struct f34_data *f34)
+{
+ int ret;
+ int i, j;
+ u8 base;
+ int offset;
+ u8 *ptable;
+ u8 query_0;
+ struct f34v7_query_1_7 query_1_7;
+
+ base = f34->fn->fd.query_base_addr;
+
+ ret = rmi_read_block(f34->fn->rmi_dev,
+ base,
+ &query_0,
+ sizeof(query_0));
+ if (ret < 0) {
+ dev_err(&f34->fn->dev,
+ "%s: Failed to read query 0\n", __func__);
+ return ret;
+ }
+
+ offset = (query_0 & 0x07) + 1;
+
+ ret = rmi_read_block(f34->fn->rmi_dev,
+ base + offset,
+ &query_1_7,
+ sizeof(query_1_7));
+ if (ret < 0) {
+ dev_err(&f34->fn->dev, "%s: Failed to read queries 1 to 7\n",
+ __func__);
+ return ret;
+ }
+
+ f34->bootloader_id[0] = query_1_7.bl_minor_revision;
+ f34->bootloader_id[1] = query_1_7.bl_major_revision;
+
+ f34->v7.block_size = le16_to_cpu(query_1_7.block_size);
+ f34->v7.flash_config_length =
+ le16_to_cpu(query_1_7.flash_config_length);
+ f34->v7.payload_length = le16_to_cpu(query_1_7.payload_length);
+
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev, "%s: f34->v7.block_size = %d\n",
+ __func__, f34->v7.block_size);
+
+ f34->v7.off.flash_status = V7_FLASH_STATUS_OFFSET;
+ f34->v7.off.partition_id = V7_PARTITION_ID_OFFSET;
+ f34->v7.off.block_number = V7_BLOCK_NUMBER_OFFSET;
+ f34->v7.off.transfer_length = V7_TRANSFER_LENGTH_OFFSET;
+ f34->v7.off.flash_cmd = V7_COMMAND_OFFSET;
+ f34->v7.off.payload = V7_PAYLOAD_OFFSET;
+
+ f34->v7.has_display_cfg = query_1_7.partition_support[1] & HAS_DISP_CFG;
+ f34->v7.has_guest_code =
+ query_1_7.partition_support[1] & HAS_GUEST_CODE;
+
+ if (query_0 & HAS_CONFIG_ID) {
+ char f34_ctrl[CONFIG_ID_SIZE];
+ int i = 0;
+ u8 *p = f34->configuration_id;
+ *p = '\0';
+
+ ret = rmi_read_block(f34->fn->rmi_dev,
+ f34->fn->fd.control_base_addr,
+ f34_ctrl,
+ sizeof(f34_ctrl));
+ if (ret)
+ return ret;
+
+ /* Eat leading zeros */
+ while (i < sizeof(f34_ctrl) && !f34_ctrl[i])
+ i++;
+
+ for (; i < sizeof(f34_ctrl); i++)
+ p += snprintf(p, f34->configuration_id
+ + sizeof(f34->configuration_id) - p,
+ "%02X", f34_ctrl[i]);
+
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev, "Configuration ID: %s\n",
+ f34->configuration_id);
+ }
+
+ f34->v7.partitions = 0;
+ for (i = 0; i < sizeof(query_1_7.partition_support); i++)
+ for (j = 0; j < 8; j++)
+ if (query_1_7.partition_support[i] & (1 << j))
+ f34->v7.partitions++;
+
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev, "%s: Supported partitions: %*ph\n",
+ __func__, sizeof(query_1_7.partition_support),
+ query_1_7.partition_support);
+
+
+ f34->v7.partition_table_bytes = f34->v7.partitions * 8 + 2;
+
+ f34->v7.read_config_buf = devm_kzalloc(&f34->fn->dev,
+ f34->v7.partition_table_bytes,
+ GFP_KERNEL);
+ if (!f34->v7.read_config_buf) {
+ f34->v7.read_config_buf_size = 0;
+ return -ENOMEM;
+ }
+
+ f34->v7.read_config_buf_size = f34->v7.partition_table_bytes;
+ ptable = f34->v7.read_config_buf;
+
+ ret = rmi_f34v7_read_f34v7_partition_table(f34);
+ if (ret < 0) {
+ dev_err(&f34->fn->dev, "%s: Failed to read partition table\n",
+ __func__);
+ return ret;
+ }
+
+ rmi_f34v7_parse_partition_table(f34, ptable,
+ &f34->v7.blkcount, &f34->v7.phyaddr);
+
+ return 0;
+}
+
+static int rmi_f34v7_check_ui_firmware_size(struct f34_data *f34)
+{
+ u16 block_count;
+
+ block_count = f34->v7.img.ui_firmware.size / f34->v7.block_size;
+
+ if (block_count != f34->v7.blkcount.ui_firmware) {
+ dev_err(&f34->fn->dev,
+ "UI firmware size mismatch: %d != %d\n",
+ block_count, f34->v7.blkcount.ui_firmware);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rmi_f34v7_check_ui_config_size(struct f34_data *f34)
+{
+ u16 block_count;
+
+ block_count = f34->v7.img.ui_config.size / f34->v7.block_size;
+
+ if (block_count != f34->v7.blkcount.ui_config) {
+ dev_err(&f34->fn->dev, "UI config size mismatch\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rmi_f34v7_check_dp_config_size(struct f34_data *f34)
+{
+ u16 block_count;
+
+ block_count = f34->v7.img.dp_config.size / f34->v7.block_size;
+
+ if (block_count != f34->v7.blkcount.dp_config) {
+ dev_err(&f34->fn->dev, "Display config size mismatch\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rmi_f34v7_check_guest_code_size(struct f34_data *f34)
+{
+ u16 block_count;
+
+ block_count = f34->v7.img.guest_code.size / f34->v7.block_size;
+ if (block_count != f34->v7.blkcount.guest_code) {
+ dev_err(&f34->fn->dev, "Guest code size mismatch\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rmi_f34v7_check_bl_config_size(struct f34_data *f34)
+{
+ u16 block_count;
+
+ block_count = f34->v7.img.bl_config.size / f34->v7.block_size;
+
+ if (block_count != f34->v7.blkcount.bl_config) {
+ dev_err(&f34->fn->dev, "Bootloader config size mismatch\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rmi_f34v7_erase_config(struct f34_data *f34)
+{
+ int ret;
+
+ dev_info(&f34->fn->dev, "Erasing config...\n");
+
+ switch (f34->v7.config_area) {
+ case v7_UI_CONFIG_AREA:
+ ret = rmi_f34v7_write_command(f34, v7_CMD_ERASE_UI_CONFIG);
+ if (ret < 0)
+ return ret;
+ break;
+ case v7_DP_CONFIG_AREA:
+ ret = rmi_f34v7_write_command(f34, v7_CMD_ERASE_DISP_CONFIG);
+ if (ret < 0)
+ return ret;
+ break;
+ case v7_BL_CONFIG_AREA:
+ ret = rmi_f34v7_write_command(f34, v7_CMD_ERASE_BL_CONFIG);
+ if (ret < 0)
+ return ret;
+ break;
+ }
+
+ ret = rmi_f34v7_wait_for_idle(f34, ENABLE_WAIT_MS);
+ if (ret < 0)
+ return ret;
+
+ return ret;
+}
+
+static int rmi_f34v7_erase_guest_code(struct f34_data *f34)
+{
+ int ret;
+
+ dev_info(&f34->fn->dev, "Erasing guest code...\n");
+
+ ret = rmi_f34v7_write_command(f34, v7_CMD_ERASE_GUEST_CODE);
+ if (ret < 0)
+ return ret;
+
+ ret = rmi_f34v7_wait_for_idle(f34, ENABLE_WAIT_MS);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int rmi_f34v7_erase_all(struct f34_data *f34)
+{
+ int ret;
+
+ dev_info(&f34->fn->dev, "Erasing firmware...\n");
+
+ ret = rmi_f34v7_write_command(f34, v7_CMD_ERASE_UI_FIRMWARE);
+ if (ret < 0)
+ return ret;
+
+ ret = rmi_f34v7_wait_for_idle(f34, ENABLE_WAIT_MS);
+ if (ret < 0)
+ return ret;
+
+ f34->v7.config_area = v7_UI_CONFIG_AREA;
+ ret = rmi_f34v7_erase_config(f34);
+ if (ret < 0)
+ return ret;
+
+ if (f34->v7.has_display_cfg) {
+ f34->v7.config_area = v7_DP_CONFIG_AREA;
+ ret = rmi_f34v7_erase_config(f34);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (f34->v7.new_partition_table && f34->v7.has_guest_code) {
+ ret = rmi_f34v7_erase_guest_code(f34);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rmi_f34v7_read_f34v7_blocks(struct f34_data *f34, u16 block_cnt,
+ u8 command)
+{
+ int ret;
+ u8 base;
+ __le16 length;
+ u16 transfer;
+ u16 max_transfer;
+ u16 remaining = block_cnt;
+ u16 block_number = 0;
+ u16 index = 0;
+
+ base = f34->fn->fd.data_base_addr;
+
+ ret = rmi_f34v7_write_partition_id(f34, command);
+ if (ret < 0)
+ return ret;
+
+ ret = rmi_write_block(f34->fn->rmi_dev,
+ base + f34->v7.off.block_number,
+ &block_number, sizeof(block_number));
+ if (ret < 0) {
+ dev_err(&f34->fn->dev, "%s: Failed to write block number\n",
+ __func__);
+ return ret;
+ }
+
+ max_transfer = min(f34->v7.payload_length,
+ (u16)(PAGE_SIZE / f34->v7.block_size));
+
+ do {
+ transfer = min(remaining, max_transfer);
+ put_unaligned_le16(transfer, &length);
+
+ ret = rmi_write_block(f34->fn->rmi_dev,
+ base + f34->v7.off.transfer_length,
+ &length, sizeof(length));
+ if (ret < 0) {
+ dev_err(&f34->fn->dev,
+ "%s: Write transfer length fail (%d remaining)\n",
+ __func__, remaining);
+ return ret;
+ }
+
+ ret = rmi_f34v7_write_command(f34, command);
+ if (ret < 0)
+ return ret;
+
+ ret = rmi_f34v7_wait_for_idle(f34, ENABLE_WAIT_MS);
+ if (ret < 0) {
+ dev_err(&f34->fn->dev,
+ "%s: Wait for idle failed (%d blks remaining)\n",
+ __func__, remaining);
+ return ret;
+ }
+
+ ret = rmi_read_block(f34->fn->rmi_dev,
+ base + f34->v7.off.payload,
+ &f34->v7.read_config_buf[index],
+ transfer * f34->v7.block_size);
+ if (ret < 0) {
+ dev_err(&f34->fn->dev,
+ "%s: Read block failed (%d blks remaining)\n",
+ __func__, remaining);
+ return ret;
+ }
+
+ index += (transfer * f34->v7.block_size);
+ remaining -= transfer;
+ } while (remaining);
+
+ return 0;
+}
+
+static int rmi_f34v7_write_f34v7_blocks(struct f34_data *f34,
+ const void *block_ptr, u16 block_cnt,
+ u8 command)
+{
+ int ret;
+ u8 base;
+ __le16 length;
+ u16 transfer;
+ u16 max_transfer;
+ u16 remaining = block_cnt;
+ u16 block_number = 0;
+
+ base = f34->fn->fd.data_base_addr;
+
+ ret = rmi_f34v7_write_partition_id(f34, command);
+ if (ret < 0)
+ return ret;
+
+ ret = rmi_write_block(f34->fn->rmi_dev,
+ base + f34->v7.off.block_number,
+ &block_number, sizeof(block_number));
+ if (ret < 0) {
+ dev_err(&f34->fn->dev, "%s: Failed to write block number\n",
+ __func__);
+ return ret;
+ }
+
+ if (f34->v7.payload_length > (PAGE_SIZE / f34->v7.block_size))
+ max_transfer = PAGE_SIZE / f34->v7.block_size;
+ else
+ max_transfer = f34->v7.payload_length;
+
+ do {
+ transfer = min(remaining, max_transfer);
+ put_unaligned_le16(transfer, &length);
+
+ ret = rmi_write_block(f34->fn->rmi_dev,
+ base + f34->v7.off.transfer_length,
+ &length, sizeof(length));
+ if (ret < 0) {
+ dev_err(&f34->fn->dev,
+ "%s: Write transfer length fail (%d remaining)\n",
+ __func__, remaining);
+ return ret;
+ }
+
+ ret = rmi_f34v7_write_command(f34, command);
+ if (ret < 0)
+ return ret;
+
+ ret = rmi_write_block(f34->fn->rmi_dev,
+ base + f34->v7.off.payload,
+ block_ptr, transfer * f34->v7.block_size);
+ if (ret < 0) {
+ dev_err(&f34->fn->dev,
+ "%s: Failed writing data (%d blks remaining)\n",
+ __func__, remaining);
+ return ret;
+ }
+
+ ret = rmi_f34v7_wait_for_idle(f34, ENABLE_WAIT_MS);
+ if (ret < 0) {
+ dev_err(&f34->fn->dev,
+ "%s: Failed wait for idle (%d blks remaining)\n",
+ __func__, remaining);
+ return ret;
+ }
+
+ block_ptr += (transfer * f34->v7.block_size);
+ remaining -= transfer;
+ } while (remaining);
+
+ return 0;
+}
+
+static int rmi_f34v7_write_config(struct f34_data *f34)
+{
+ return rmi_f34v7_write_f34v7_blocks(f34, f34->v7.config_data,
+ f34->v7.config_block_count,
+ v7_CMD_WRITE_CONFIG);
+}
+
+static int rmi_f34v7_write_ui_config(struct f34_data *f34)
+{
+ f34->v7.config_area = v7_UI_CONFIG_AREA;
+ f34->v7.config_data = f34->v7.img.ui_config.data;
+ f34->v7.config_size = f34->v7.img.ui_config.size;
+ f34->v7.config_block_count = f34->v7.config_size / f34->v7.block_size;
+
+ return rmi_f34v7_write_config(f34);
+}
+
+static int rmi_f34v7_write_dp_config(struct f34_data *f34)
+{
+ f34->v7.config_area = v7_DP_CONFIG_AREA;
+ f34->v7.config_data = f34->v7.img.dp_config.data;
+ f34->v7.config_size = f34->v7.img.dp_config.size;
+ f34->v7.config_block_count = f34->v7.config_size / f34->v7.block_size;
+
+ return rmi_f34v7_write_config(f34);
+}
+
+static int rmi_f34v7_write_guest_code(struct f34_data *f34)
+{
+ return rmi_f34v7_write_f34v7_blocks(f34, f34->v7.img.guest_code.data,
+ f34->v7.img.guest_code.size /
+ f34->v7.block_size,
+ v7_CMD_WRITE_GUEST_CODE);
+}
+
+static int rmi_f34v7_write_flash_config(struct f34_data *f34)
+{
+ int ret;
+
+ f34->v7.config_area = v7_FLASH_CONFIG_AREA;
+ f34->v7.config_data = f34->v7.img.fl_config.data;
+ f34->v7.config_size = f34->v7.img.fl_config.size;
+ f34->v7.config_block_count = f34->v7.config_size / f34->v7.block_size;
+
+ if (f34->v7.config_block_count != f34->v7.blkcount.fl_config) {
+ dev_err(&f34->fn->dev, "%s: Flash config size mismatch\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ ret = rmi_f34v7_write_command(f34, v7_CMD_ERASE_FLASH_CONFIG);
+ if (ret < 0)
+ return ret;
+
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+ "%s: Erase flash config command written\n", __func__);
+
+ ret = rmi_f34v7_wait_for_idle(f34, ENABLE_WAIT_MS);
+ if (ret < 0)
+ return ret;
+
+ ret = rmi_f34v7_write_config(f34);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int rmi_f34v7_write_partition_table(struct f34_data *f34)
+{
+ u16 block_count;
+ int ret;
+
+ block_count = f34->v7.blkcount.bl_config;
+ f34->v7.config_area = v7_BL_CONFIG_AREA;
+ f34->v7.config_size = f34->v7.block_size * block_count;
+ devm_kfree(&f34->fn->dev, f34->v7.read_config_buf);
+ f34->v7.read_config_buf = devm_kzalloc(&f34->fn->dev,
+ f34->v7.config_size, GFP_KERNEL);
+ if (!f34->v7.read_config_buf) {
+ f34->v7.read_config_buf_size = 0;
+ return -ENOMEM;
+ }
+
+ f34->v7.read_config_buf_size = f34->v7.config_size;
+
+ ret = rmi_f34v7_read_f34v7_blocks(f34, block_count, v7_CMD_READ_CONFIG);
+ if (ret < 0)
+ return ret;
+
+ ret = rmi_f34v7_erase_config(f34);
+ if (ret < 0)
+ return ret;
+
+ ret = rmi_f34v7_write_flash_config(f34);
+ if (ret < 0)
+ return ret;
+
+ f34->v7.config_area = v7_BL_CONFIG_AREA;
+ f34->v7.config_data = f34->v7.read_config_buf;
+ f34->v7.config_size = f34->v7.img.bl_config.size;
+ f34->v7.config_block_count = f34->v7.config_size / f34->v7.block_size;
+
+ ret = rmi_f34v7_write_config(f34);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int rmi_f34v7_write_firmware(struct f34_data *f34)
+{
+ u16 blk_count;
+
+ blk_count = f34->v7.img.ui_firmware.size / f34->v7.block_size;
+
+ return rmi_f34v7_write_f34v7_blocks(f34, f34->v7.img.ui_firmware.data,
+ blk_count, v7_CMD_WRITE_FW);
+}
+
+static void rmi_f34v7_compare_partition_tables(struct f34_data *f34)
+{
+ if (f34->v7.phyaddr.ui_firmware != f34->v7.img.phyaddr.ui_firmware) {
+ f34->v7.new_partition_table = true;
+ return;
+ }
+
+ if (f34->v7.phyaddr.ui_config != f34->v7.img.phyaddr.ui_config) {
+ f34->v7.new_partition_table = true;
+ return;
+ }
+
+ if (f34->v7.has_display_cfg &&
+ f34->v7.phyaddr.dp_config != f34->v7.img.phyaddr.dp_config) {
+ f34->v7.new_partition_table = true;
+ return;
+ }
+
+ if (f34->v7.has_guest_code &&
+ f34->v7.phyaddr.guest_code != f34->v7.img.phyaddr.guest_code) {
+ f34->v7.new_partition_table = true;
+ return;
+ }
+
+ f34->v7.new_partition_table = false;
+}
+
+static void rmi_f34v7_parse_img_header_10_bl_container(struct f34_data *f34,
+ const void *image)
+{
+ int i;
+ int num_of_containers;
+ unsigned int addr;
+ unsigned int container_id;
+ unsigned int length;
+ const void *content;
+ const struct container_descriptor *descriptor;
+
+ num_of_containers = f34->v7.img.bootloader.size / 4 - 1;
+
+ for (i = 1; i <= num_of_containers; i++) {
+ addr = get_unaligned_le32(f34->v7.img.bootloader.data + i * 4);
+ descriptor = image + addr;
+ container_id = le16_to_cpu(descriptor->container_id);
+ content = image + le32_to_cpu(descriptor->content_address);
+ length = le32_to_cpu(descriptor->content_length);
+ switch (container_id) {
+ case BL_CONFIG_CONTAINER:
+ case GLOBAL_PARAMETERS_CONTAINER:
+ f34->v7.img.bl_config.data = content;
+ f34->v7.img.bl_config.size = length;
+ break;
+ case BL_LOCKDOWN_INFO_CONTAINER:
+ case DEVICE_CONFIG_CONTAINER:
+ f34->v7.img.lockdown.data = content;
+ f34->v7.img.lockdown.size = length;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static void rmi_f34v7_parse_image_header_10(struct f34_data *f34)
+{
+ unsigned int i;
+ unsigned int num_of_containers;
+ unsigned int addr;
+ unsigned int offset;
+ unsigned int container_id;
+ unsigned int length;
+ const void *image = f34->v7.image;
+ const u8 *content;
+ const struct container_descriptor *descriptor;
+ const struct image_header_10 *header = image;
+
+ f34->v7.img.checksum = le32_to_cpu(header->checksum);
+
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev, "%s: f34->v7.img.checksum=%X\n",
+ __func__, f34->v7.img.checksum);
+
+ /* address of top level container */
+ offset = le32_to_cpu(header->top_level_container_start_addr);
+ descriptor = image + offset;
+
+ /* address of top level container content */
+ offset = le32_to_cpu(descriptor->content_address);
+ num_of_containers = le32_to_cpu(descriptor->content_length) / 4;
+
+ for (i = 0; i < num_of_containers; i++) {
+ addr = get_unaligned_le32(image + offset);
+ offset += 4;
+ descriptor = image + addr;
+ container_id = le16_to_cpu(descriptor->container_id);
+ content = image + le32_to_cpu(descriptor->content_address);
+ length = le32_to_cpu(descriptor->content_length);
+
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+ "%s: container_id=%d, length=%d\n", __func__,
+ container_id, length);
+
+ switch (container_id) {
+ case UI_CONTAINER:
+ case CORE_CODE_CONTAINER:
+ f34->v7.img.ui_firmware.data = content;
+ f34->v7.img.ui_firmware.size = length;
+ break;
+ case UI_CONFIG_CONTAINER:
+ case CORE_CONFIG_CONTAINER:
+ f34->v7.img.ui_config.data = content;
+ f34->v7.img.ui_config.size = length;
+ break;
+ case BL_CONTAINER:
+ f34->v7.img.bl_version = *content;
+ f34->v7.img.bootloader.data = content;
+ f34->v7.img.bootloader.size = length;
+ rmi_f34v7_parse_img_header_10_bl_container(f34, image);
+ break;
+ case GUEST_CODE_CONTAINER:
+ f34->v7.img.contains_guest_code = true;
+ f34->v7.img.guest_code.data = content;
+ f34->v7.img.guest_code.size = length;
+ break;
+ case DISPLAY_CONFIG_CONTAINER:
+ f34->v7.img.contains_display_cfg = true;
+ f34->v7.img.dp_config.data = content;
+ f34->v7.img.dp_config.size = length;
+ break;
+ case FLASH_CONFIG_CONTAINER:
+ f34->v7.img.contains_flash_config = true;
+ f34->v7.img.fl_config.data = content;
+ f34->v7.img.fl_config.size = length;
+ break;
+ case GENERAL_INFORMATION_CONTAINER:
+ f34->v7.img.contains_firmware_id = true;
+ f34->v7.img.firmware_id =
+ get_unaligned_le32(content + 4);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static int rmi_f34v7_parse_image_info(struct f34_data *f34)
+{
+ const struct image_header_10 *header = f34->v7.image;
+
+ memset(&f34->v7.img, 0x00, sizeof(f34->v7.img));
+
+ rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+ "%s: header->major_header_version = %d\n",
+ __func__, header->major_header_version);
+
+ switch (header->major_header_version) {
+ case IMAGE_HEADER_VERSION_10:
+ rmi_f34v7_parse_image_header_10(f34);
+ break;
+ default:
+ dev_err(&f34->fn->dev, "Unsupported image file format %02X\n",
+ header->major_header_version);
+ return -EINVAL;
+ }
+
+ if (!f34->v7.img.contains_flash_config) {
+ dev_err(&f34->fn->dev, "%s: No flash config in fw image\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ rmi_f34v7_parse_partition_table(f34, f34->v7.img.fl_config.data,
+ &f34->v7.img.blkcount, &f34->v7.img.phyaddr);
+
+ rmi_f34v7_compare_partition_tables(f34);
+
+ return 0;
+}
+
+int rmi_f34v7_do_reflash(struct f34_data *f34, const struct firmware *fw)
+{
+ int ret;
+
+ rmi_f34v7_read_queries_bl_version(f34);
+
+ f34->v7.image = fw->data;
+
+ ret = rmi_f34v7_parse_image_info(f34);
+ if (ret < 0)
+ goto fail;
+
+ if (!f34->v7.new_partition_table) {
+ ret = rmi_f34v7_check_ui_firmware_size(f34);
+ if (ret < 0)
+ goto fail;
+
+ ret = rmi_f34v7_check_ui_config_size(f34);
+ if (ret < 0)
+ goto fail;
+
+ if (f34->v7.has_display_cfg &&
+ f34->v7.img.contains_display_cfg) {
+ ret = rmi_f34v7_check_dp_config_size(f34);
+ if (ret < 0)
+ goto fail;
+ }
+
+ if (f34->v7.has_guest_code && f34->v7.img.contains_guest_code) {
+ ret = rmi_f34v7_check_guest_code_size(f34);
+ if (ret < 0)
+ goto fail;
+ }
+ } else {
+ ret = rmi_f34v7_check_bl_config_size(f34);
+ if (ret < 0)
+ goto fail;
+ }
+
+ ret = rmi_f34v7_erase_all(f34);
+ if (ret < 0)
+ goto fail;
+
+ if (f34->v7.new_partition_table) {
+ ret = rmi_f34v7_write_partition_table(f34);
+ if (ret < 0)
+ goto fail;
+ dev_info(&f34->fn->dev, "%s: Partition table programmed\n",
+ __func__);
+ }
+
+ dev_info(&f34->fn->dev, "Writing firmware (%d bytes)...\n",
+ f34->v7.img.ui_firmware.size);
+
+ ret = rmi_f34v7_write_firmware(f34);
+ if (ret < 0)
+ goto fail;
+
+ dev_info(&f34->fn->dev, "Writing config (%d bytes)...\n",
+ f34->v7.img.ui_config.size);
+
+ f34->v7.config_area = v7_UI_CONFIG_AREA;
+ ret = rmi_f34v7_write_ui_config(f34);
+ if (ret < 0)
+ goto fail;
+
+ if (f34->v7.has_display_cfg && f34->v7.img.contains_display_cfg) {
+ dev_info(&f34->fn->dev, "Writing display config...\n");
+
+ ret = rmi_f34v7_write_dp_config(f34);
+ if (ret < 0)
+ goto fail;
+ }
+
+ if (f34->v7.new_partition_table) {
+ if (f34->v7.has_guest_code && f34->v7.img.contains_guest_code) {
+ dev_info(&f34->fn->dev, "Writing guest code...\n");
+
+ ret = rmi_f34v7_write_guest_code(f34);
+ if (ret < 0)
+ goto fail;
+ }
+ }
+
+fail:
+ return ret;
+}
+
+static int rmi_f34v7_enter_flash_prog(struct f34_data *f34)
+{
+ int ret;
+
+ ret = rmi_f34v7_read_flash_status(f34);
+ if (ret < 0)
+ return ret;
+
+ if (f34->v7.in_bl_mode)
+ return 0;
+
+ ret = rmi_f34v7_write_command(f34, v7_CMD_ENABLE_FLASH_PROG);
+ if (ret < 0)
+ return ret;
+
+ ret = rmi_f34v7_wait_for_idle(f34, ENABLE_WAIT_MS);
+ if (ret < 0)
+ return ret;
+
+ if (!f34->v7.in_bl_mode) {
+ dev_err(&f34->fn->dev, "%s: BL mode not entered\n", __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int rmi_f34v7_start_reflash(struct f34_data *f34, const struct firmware *fw)
+{
+ int ret = 0;
+
+ f34->v7.config_area = v7_UI_CONFIG_AREA;
+ f34->v7.image = fw->data;
+
+ ret = rmi_f34v7_parse_image_info(f34);
+ if (ret < 0)
+ goto exit;
+
+ if (!f34->v7.force_update && f34->v7.new_partition_table) {
+ dev_err(&f34->fn->dev, "%s: Partition table mismatch\n",
+ __func__);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ dev_info(&f34->fn->dev, "Firmware image OK\n");
+
+ ret = rmi_f34v7_read_flash_status(f34);
+ if (ret < 0)
+ goto exit;
+
+ if (f34->v7.in_bl_mode) {
+ dev_info(&f34->fn->dev, "%s: Device in bootloader mode\n",
+ __func__);
+ }
+
+ rmi_f34v7_enter_flash_prog(f34);
+
+ return 0;
+
+exit:
+ return ret;
+}
+
+int rmi_f34v7_probe(struct f34_data *f34)
+{
+ int ret;
+
+ /* Read bootloader version */
+ ret = rmi_read_block(f34->fn->rmi_dev,
+ f34->fn->fd.query_base_addr + V7_BOOTLOADER_ID_OFFSET,
+ f34->bootloader_id,
+ sizeof(f34->bootloader_id));
+ if (ret < 0) {
+ dev_err(&f34->fn->dev, "%s: Failed to read bootloader ID\n",
+ __func__);
+ return ret;
+ }
+
+ if (f34->bootloader_id[1] == '5') {
+ f34->bl_version = 5;
+ } else if (f34->bootloader_id[1] == '6') {
+ f34->bl_version = 6;
+ } else if (f34->bootloader_id[1] == 7) {
+ f34->bl_version = 7;
+ } else {
+ dev_err(&f34->fn->dev, "%s: Unrecognized bootloader version\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ memset(&f34->v7.blkcount, 0x00, sizeof(f34->v7.blkcount));
+ memset(&f34->v7.phyaddr, 0x00, sizeof(f34->v7.phyaddr));
+ rmi_f34v7_read_queries(f34);
+
+ f34->v7.force_update = false;
+ return 0;
+}
diff --git a/drivers/input/rmi4/rmi_f54.c b/drivers/input/rmi4/rmi_f54.c
index cf805b960866..dea63e2db3e6 100644
--- a/drivers/input/rmi4/rmi_f54.c
+++ b/drivers/input/rmi4/rmi_f54.c
@@ -200,7 +200,7 @@ static int rmi_f54_request_report(struct rmi_function *fn, u8 report_type)
error = rmi_write(rmi_dev, fn->fd.command_base_addr, F54_GET_REPORT);
if (error < 0)
- return error;
+ goto unlock;
init_completion(&f54->cmd_done);
@@ -209,15 +209,18 @@ static int rmi_f54_request_report(struct rmi_function *fn, u8 report_type)
queue_delayed_work(f54->workqueue, &f54->work, 0);
+unlock:
mutex_unlock(&f54->data_mutex);
- return 0;
+ return error;
}
static size_t rmi_f54_get_report_size(struct f54_data *f54)
{
- u8 rx = f54->num_rx_electrodes ? : f54->num_rx_electrodes;
- u8 tx = f54->num_tx_electrodes ? : f54->num_tx_electrodes;
+ struct rmi_device *rmi_dev = f54->fn->rmi_dev;
+ struct rmi_driver_data *drv_data = dev_get_drvdata(&rmi_dev->dev);
+ u8 rx = drv_data->num_rx_electrodes ? : f54->num_rx_electrodes;
+ u8 tx = drv_data->num_tx_electrodes ? : f54->num_tx_electrodes;
size_t size;
switch (rmi_f54_get_reptype(f54, f54->input)) {
@@ -401,6 +404,10 @@ static int rmi_f54_vidioc_enum_input(struct file *file, void *priv,
static int rmi_f54_set_input(struct f54_data *f54, unsigned int i)
{
+ struct rmi_device *rmi_dev = f54->fn->rmi_dev;
+ struct rmi_driver_data *drv_data = dev_get_drvdata(&rmi_dev->dev);
+ u8 rx = drv_data->num_rx_electrodes ? : f54->num_rx_electrodes;
+ u8 tx = drv_data->num_tx_electrodes ? : f54->num_tx_electrodes;
struct v4l2_pix_format *f = &f54->format;
enum rmi_f54_report_type reptype;
int ret;
@@ -415,8 +422,8 @@ static int rmi_f54_set_input(struct f54_data *f54, unsigned int i)
f54->input = i;
- f->width = f54->num_rx_electrodes;
- f->height = f54->num_tx_electrodes;
+ f->width = rx;
+ f->height = tx;
f->field = V4L2_FIELD_NONE;
f->colorspace = V4L2_COLORSPACE_RAW;
f->bytesperline = f->width * sizeof(u16);
diff --git a/drivers/input/rmi4/rmi_f55.c b/drivers/input/rmi4/rmi_f55.c
new file mode 100644
ind