aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/block/bfq-iosched.txt43
-rw-r--r--Documentation/devicetree/bindings/thermal/brcm,avs-tmon.txt20
-rw-r--r--Documentation/devicetree/bindings/thermal/imx-thermal.txt7
-rw-r--r--Documentation/devicetree/bindings/thermal/rockchip-thermal.txt1
-rw-r--r--Documentation/filesystems/cramfs.txt42
-rw-r--r--Documentation/filesystems/porting4
-rw-r--r--Documentation/gpu/todo.rst10
-rw-r--r--Documentation/power/runtime_pm.txt3
-rw-r--r--MAINTAINERS18
-rw-r--r--arch/arm64/include/asm/compat.h1
-rw-r--r--arch/mips/include/asm/compat-signal.h37
-rw-r--r--arch/mips/include/asm/compat.h1
-rw-r--r--arch/parisc/Kconfig19
-rw-r--r--arch/parisc/Makefile2
-rw-r--r--arch/parisc/include/asm/compat.h1
-rw-r--r--arch/parisc/include/asm/pdc.h255
-rw-r--r--arch/parisc/include/asm/topology.h36
-rw-r--r--arch/parisc/include/uapi/asm/pdc.h256
-rw-r--r--arch/parisc/kernel/Makefile4
-rw-r--r--arch/parisc/kernel/processor.c13
-rw-r--r--arch/parisc/kernel/setup.c2
-rw-r--r--arch/parisc/kernel/signal.c9
-rw-r--r--arch/parisc/kernel/signal32.c13
-rw-r--r--arch/parisc/kernel/signal32.h2
-rw-r--r--arch/parisc/kernel/syscall.S6
-rw-r--r--arch/parisc/kernel/topology.c153
-rw-r--r--arch/powerpc/include/asm/compat.h1
-rw-r--r--arch/powerpc/kernel/signal_32.c31
-rw-r--r--arch/s390/Kconfig19
-rw-r--r--arch/s390/configs/default_defconfig5
-rw-r--r--arch/s390/configs/gcov_defconfig4
-rw-r--r--arch/s390/configs/performance_defconfig4
-rw-r--r--arch/s390/include/asm/alternative.h20
-rw-r--r--arch/s390/include/asm/compat.h1
-rw-r--r--arch/s390/include/asm/cpu_mf.h8
-rw-r--r--arch/s390/include/asm/futex.h9
-rw-r--r--arch/s390/include/asm/lowcore.h37
-rw-r--r--arch/s390/include/asm/mmu_context.h36
-rw-r--r--arch/s390/include/asm/perf_event.h17
-rw-r--r--arch/s390/include/asm/processor.h6
-rw-r--r--arch/s390/include/asm/ptrace.h2
-rw-r--r--arch/s390/include/asm/setup.h2
-rw-r--r--arch/s390/include/asm/uaccess.h29
-rw-r--r--arch/s390/include/asm/vdso.h1
-rw-r--r--arch/s390/include/uapi/asm/perf_regs.h43
-rw-r--r--arch/s390/kernel/Makefile5
-rw-r--r--arch/s390/kernel/asm-offsets.c2
-rw-r--r--arch/s390/kernel/compat_signal.c33
-rw-r--r--arch/s390/kernel/dis.c4
-rw-r--r--arch/s390/kernel/entry.S33
-rw-r--r--arch/s390/kernel/head64.S2
-rw-r--r--arch/s390/kernel/module.c15
-rw-r--r--arch/s390/kernel/nmi.c2
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c768
-rw-r--r--arch/s390/kernel/perf_regs.c70
-rw-r--r--arch/s390/kernel/vdso.c44
-rw-r--r--arch/s390/kernel/vdso32/getcpu.S16
-rw-r--r--arch/s390/kernel/vdso64/clock_gettime.S19
-rw-r--r--arch/s390/kernel/vdso64/getcpu.S15
-rw-r--r--arch/s390/lib/spinlock.c7
-rw-r--r--arch/s390/lib/uaccess.c90
-rw-r--r--arch/s390/mm/fault.c110
-rw-r--r--arch/s390/mm/gmap.c9
-rw-r--r--arch/s390/mm/init.c1
-rw-r--r--arch/s390/mm/pgalloc.c4
-rw-r--r--arch/s390/tools/Makefile2
-rw-r--r--arch/sparc/Kconfig3
-rw-r--r--arch/sparc/include/asm/compat.h1
-rw-r--r--arch/sparc/kernel/signal32.c9
-rw-r--r--arch/sparc/kernel/sys_sparc32.c8
-rw-r--r--arch/tile/include/asm/compat.h1
-rw-r--r--arch/x86/include/asm/compat.h1
-rw-r--r--arch/x86/kernel/cpu/Makefile2
-rw-r--r--arch/x86/kernel/cpu/aperfmperf.c74
-rw-r--r--arch/x86/kernel/cpu/cpu.h3
-rw-r--r--arch/x86/kernel/cpu/proc.c6
-rw-r--r--block/bfq-cgroup.c148
-rw-r--r--block/bfq-iosched.c117
-rw-r--r--block/bfq-iosched.h4
-rw-r--r--block/bfq-wf2q.c1
-rw-r--r--block/bio.c193
-rw-r--r--block/blk-core.c4
-rw-r--r--block/blk-map.c7
-rw-r--r--drivers/acpi/utils.c1
-rw-r--r--drivers/base/power/runtime.c31
-rw-r--r--drivers/block/amiflop.c57
-rw-r--r--drivers/block/aoe/aoecmd.c6
-rw-r--r--drivers/block/aoe/aoedev.c9
-rw-r--r--drivers/block/floppy.c10
-rw-r--r--drivers/block/swim3.c31
-rw-r--r--drivers/char/ipmi/bt-bmc.c6
-rw-r--r--drivers/dma-buf/dma-fence.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Kconfig1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c105
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.h33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h97
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c21
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig45
-rw-r--r--drivers/gpu/drm/amd/display/Makefile22
-rw-r--r--drivers/gpu/drm/amd/display/TODO107
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/Makefile17
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c4925
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h259
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c498
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c755
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h102
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c446
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h35
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c379
-rw-r--r--drivers/gpu/drm/amd/display/dc/Makefile33
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/Makefile11
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/conversion.c104
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/conversion.h46
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c567
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/fixpt32_32.c161
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/grph_object_id.c75
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/log_helpers.c102
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/logger.c397
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/logger.h30
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/vector.c307
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/Makefile27
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.c3871
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c1934
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser_common.c288
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser_common.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c82
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h40
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser_interface.c56
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser_types_internal.h72
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser_types_internal2.h74
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table.c2424
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table.h102
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.c812
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.h105
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c290
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper.h90
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c265
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.h82
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.c364
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.c418
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.c418
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.c354
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/Makefile18
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/bw_fixed.c191
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/custom_float.c197
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c3257
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c1899
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.h37
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.c120
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.h40
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c1626
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c1677
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_debug.c359
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c101
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c2367
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c775
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c2587
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c331
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c2795
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_sink.c104
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c398
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_surface.c193
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h1103
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_bios_types.h218
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_ddc_types.h115
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h467
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_helper.c171
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h706
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h652
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/Makefile15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_abm.c485
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_abm.h228
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_audio.c945
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_audio.h148
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c1383
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h145
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c827
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h137
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c620
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h218
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c209
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h631
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_ipp.c268
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_ipp.h238
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c1379
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h268
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c700
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h347
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_opp.c567
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_opp.h310
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters.c1119
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c1617
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h733
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_transform.c1463
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_transform.h516
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/Makefile23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c152
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h42
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c933
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.h26
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/Makefile12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c522
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.h81
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c2987
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h81
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c1052
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.h35
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_csc_v.c738
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_regamma_v.c555
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_v.c54
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_v.h39
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c1327
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.h49
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c1966
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h273
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c688
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c716
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.h58
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/Makefile10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_compressor.c854
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_compressor.h78
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c163
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.h36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c1283
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h61
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/Makefile11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c257
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.h36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c1004
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.h39
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c1174
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.h40
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/Makefile13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_compressor.c834
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_compressor.h78
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c117
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.h36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c1257
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.h47
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c239
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.h45
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/Makefile11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c123
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h99
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c481
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h1386
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c816
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c702
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c960
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h683
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c2958
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h38
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.c70
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h167
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c363
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h138
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c351
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h186
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c1466
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.h47
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.c1203
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.h374
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_helpers.h105
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_pp_smu.h131
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services.h387
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services_types.h282
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dc_features.h559
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h111
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c138
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h50
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h557
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c6124
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h598
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_calc.c1763
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_calc.h148
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c392
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h47
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c1905
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h67
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c43
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.h39
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h121
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.c69
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.h35
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/Makefile58
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_factory_dce110.c178
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_factory_dce110.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_translate_dce110.c387
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_translate_dce110.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.c197
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_translate_dce120.c408
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_translate_dce120.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_factory_dce80.c173
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_factory_dce80.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_translate_dce80.c411
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_translate_dce80.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.c192
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_translate_dcn10.c408
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_translate_dcn10.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h150
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.c63
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_translate_diag.c40
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_translate_diag.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c272
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_regs.h45
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c591
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_service.h56
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hpd_regs.h79
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c232
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.h46
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c107
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_factory.h74
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_gpio.c203
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_gpio.h144
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c162
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.h46
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c90
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_translate.h50
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/Makefile78
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c571
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.h119
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.c105
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c470
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.h78
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c570
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.h210
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_sw_engine_dce110.c160
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_sw_engine_dce110.h43
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.c311
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.h53
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.c128
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.c118
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_hw_engine_dce80.c875
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_hw_engine_dce80.h54
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_sw_engine_dce80.c173
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_sw_engine_dce80.h43
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2caux_dce80.c284
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2caux_dce80.h38
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.c118
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/diagnostics/i2caux_diag.c97
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/diagnostics/i2caux_diag.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/engine.h120
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/engine_base.c52
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.c118
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.h113
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2c_generic_hw_engine.c284
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2c_generic_hw_engine.h77
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.c244
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.h80
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2c_sw_engine.c601
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2c_sw_engine.h81
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c485
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2caux.h122
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/bw_fixed.h166
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/clock_source.h182
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/compressor.h136
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_status.h50
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h283
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/custom_float.h40
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h141
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h64
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h481
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h635
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/abm.h48
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/audio.h62
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h85
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h50
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h134
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/gpio.h86
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h105
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h141
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/ipp.h112
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h134
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h175
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h61
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/opp.h289
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h130
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h183
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/transform.h311
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h197
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link_hwss.h79
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/reg_helper.h392
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h172
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/Makefile48
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c430
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.h53
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c289
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c303
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.h35
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c356
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/irq_service.c170
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/irq_service.h85
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq_types.h193
-rw-r--r--drivers/gpu/drm/amd/display/dc/os_types.h96
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/Makefile9
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c124
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c136
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.h39
-rw-r--r--drivers/gpu/drm/amd/display/include/audio_types.h106
-rw-r--r--drivers/gpu/drm/amd/display/include/bios_parser_interface.h44
-rw-r--r--drivers/gpu/drm/amd/display/include/bios_parser_types.h310
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_asic_id.h143
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_types.h49
-rw-r--r--drivers/gpu/drm/amd/display/include/ddc_service_types.h154
-rw-r--r--drivers/gpu/drm/amd/display/include/dpcd_defs.h149
-rw-r--r--drivers/gpu/drm/amd/display/include/fixed31_32.h466
-rw-r--r--drivers/gpu/drm/amd/display/include/fixed32_32.h129
-rw-r--r--drivers/gpu/drm/amd/display/include/gpio_interface.h92
-rw-r--r--drivers/gpu/drm/amd/display/include/gpio_service_interface.h105
-rw-r--r--drivers/gpu/drm/amd/display/include/gpio_types.h332
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h445
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_defs.h140
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_id.h294
-rw-r--r--drivers/gpu/drm/amd/display/include/i2caux_interface.h92
-rw-r--r--drivers/gpu/drm/amd/display/include/irq_service_interface.h51
-rw-r--r--drivers/gpu/drm/amd/display/include/link_service_types.h170
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_interface.h188
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_types.h166
-rw-r--r--drivers/gpu/drm/amd/display/include/set_mode_types.h107
-rw-r--r--drivers/gpu/drm/amd/display/include/signal_types.h95
-rw-r--r--drivers/gpu/drm/amd/display/include/vector.h150
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/Makefile10
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c1483
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h167
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/raven1/DCN/dcn_1_0_offset.h6
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/raven1/DCN/dcn_1_0_sh_mask.h5
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vega10/DC/dce_12_0_offset.h4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vega10/DC/dce_12_0_sh_mask.h8
-rw-r--r--drivers/gpu/drm/r128/r128_drv.h4
-rw-r--r--drivers/gpu/drm/r128/r128_ioc32.c99
-rw-r--r--drivers/gpu/drm/r128/r128_state.c6
-rw-r--r--drivers/gpu/drm/via/via_dmablit.c6
-rw-r--r--drivers/i2c/i2c-dev.c268
-rw-r--r--drivers/ide/ide-cd.c3
-rw-r--r--drivers/md/bcache/stats.c8
-rw-r--r--drivers/md/dm-delay.c6
-rw-r--r--drivers/md/dm-integrity.c6
-rw-r--r--drivers/md/dm-raid1.c8
-rw-r--r--drivers/md/md.c9
-rw-r--r--drivers/mtd/mtdchar.c24
-rw-r--r--drivers/nvme/target/fc.c6
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c6
-rw-r--r--drivers/s390/char/con3215.c6
-rw-r--r--drivers/s390/char/con3270.c10
-rw-r--r--drivers/s390/char/sclp.c45
-rw-r--r--drivers/s390/char/sclp_con.c5
-rw-r--r--drivers/s390/char/sclp_tty.c5
-rw-r--r--drivers/s390/char/sclp_vt220.c6
-rw-r--r--drivers/s390/char/tape_core.c14
-rw-r--r--drivers/s390/char/tty3270.c8
-rw-r--r--drivers/s390/cio/device.c8
-rw-r--r--drivers/s390/cio/device.h2
-rw-r--r--drivers/s390/cio/device_fsm.c10
-rw-r--r--drivers/s390/cio/eadm_sch.c9
-rw-r--r--drivers/s390/cio/qdio.h2
-rw-r--r--drivers/s390/cio/qdio_main.c4
-rw-r--r--drivers/s390/cio/qdio_setup.c3
-rw-r--r--drivers/s390/crypto/ap_bus.c10
-rw-r--r--drivers/s390/crypto/ap_bus.h2
-rw-r--r--drivers/s390/crypto/ap_queue.c2
-rw-r--r--drivers/s390/scsi/zfcp_erp.c15
-rw-r--r--drivers/s390/scsi/zfcp_ext.h2
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c14
-rw-r--r--drivers/scsi/cxlflash/main.c6
-rw-r--r--drivers/scsi/st.c6
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c4
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h9
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c157
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c99
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-move.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_bo.c6
-rw-r--r--drivers/staging/pi433/pi433_if.c90
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c20
-rw-r--r--drivers/thermal/Kconfig3
-rw-r--r--drivers/thermal/armada_thermal.c2
-rw-r--r--drivers/thermal/broadcom/Kconfig7
-rw-r--r--drivers/thermal/broadcom/Makefile1
-rw-r--r--drivers/thermal/broadcom/brcmstb_thermal.c387
-rw-r--r--drivers/thermal/cpu_cooling.c2
-rw-r--r--drivers/thermal/hisi_thermal.c612
-rw-r--r--drivers/thermal/imx_thermal.c104
-rw-r--r--drivers/thermal/int340x_thermal/processor_thermal_device.c6
-rw-r--r--drivers/thermal/intel_bxt_pmic_thermal.c3
-rw-r--r--drivers/thermal/intel_pch_thermal.c11
-rw-r--r--drivers/thermal/intel_powerclamp.c4
-rw-r--r--drivers/thermal/qcom-spmi-temp-alarm.c43
-rw-r--r--drivers/thermal/rcar_gen3_thermal.c34
-rw-r--r--drivers/thermal/rockchip_thermal.c67
-rw-r--r--drivers/thermal/step_wise.c11
-rw-r--r--drivers/thermal/tegra/soctherm.c2
-rw-r--r--drivers/thermal/thermal-generic-adc.c24
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-thermal-common.c3
-rw-r--r--drivers/tty/vt/selection.c50
-rw-r--r--drivers/tty/vt/vt_ioctl.c68
-rw-r--r--drivers/vhost/scsi.c73
-rw-r--r--drivers/video/fbdev/pvr2fb.c4
-rw-r--r--drivers/virt/fsl_hypervisor.c4
-rw-r--r--drivers/xen/pvcalls-back.c16
-rw-r--r--fs/aio.c55
-rw-r--r--fs/binfmt_elf_fdpic.c2
-rw-r--r--fs/coda/upcall.c3
-rw-r--r--fs/compat_ioctl.c123
-rw-r--r--fs/coredump.c7
-rw-r--r--fs/cramfs/Kconfig39
-rw-r--r--fs/cramfs/README31
-rw-r--r--fs/cramfs/inode.c511
-rw-r--r--fs/ecryptfs/crypto.c44
-rw-r--r--fs/ecryptfs/ecryptfs_kernel.h9
-rw-r--r--fs/ecryptfs/inode.c4
-rw-r--r--fs/ecryptfs/keystore.c48
-rw-r--r--fs/ecryptfs/main.c4
-rw-r--r--fs/ecryptfs/messaging.c13
-rw-r--r--fs/ecryptfs/miscdev.c8
-rw-r--r--fs/ecryptfs/mmap.c2
-rw-r--r--fs/eventpoll.c4
-rw-r--r--fs/ext4/ioctl.c86
-rw-r--r--fs/fcntl.c16
-rw-r--r--fs/fhandle.c4
-rw-r--r--fs/file.c12
-rw-r--r--fs/internal.h1
-rw-r--r--fs/iomap.c24
-rw-r--r--fs/namei.c14
-rw-r--r--fs/nfs/cache_lib.c6
-rw-r--r--fs/nfs/cache_lib.h2
-rw-r--r--fs/nfs/callback.c14
-rw-r--r--fs/nfs/callback_proc.c2
-rw-r--r--fs/nfs/client.c10
-rw-r--r--fs/nfs/delegation.c27
-rw-r--r--fs/nfs/delegation.h1
-rw-r--r--fs/nfs/dir.c50
-rw-r--r--fs/nfs/file.c18
-rw-r--r--fs/nfs/filelayout/filelayout.c12
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c20
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.h3
-rw-r--r--fs/nfs/inode.c16
-rw-r--r--fs/nfs/nfs3proc.c17
-rw-r--r--fs/nfs/nfs4_fs.h12
-rw-r--r--fs/nfs/nfs4client.c12
-rw-r--r--fs/nfs/nfs4proc.c511
-rw-r--r--fs/nfs/nfs4state.c53
-rw-r--r--fs/nfs/nfs4trace.h26
-rw-r--r--fs/nfs/nfs4xdr.c12
-rw-r--r--fs/nfs/pnfs.c44
-rw-r--r--fs/nfs/pnfs.h15
-rw-r--r--fs/nfs/pnfs_nfs.c10
-rw-r--r--fs/nfs/super.c14
-rw-r--r--fs/nfs/write.c17
-rw-r--r--fs/orangefs/orangefs-kernel.h6
-rw-r--r--fs/overlayfs/copy_up.c8
-rw-r--r--fs/overlayfs/dir.c25
-rw-r--r--fs/overlayfs/inode.c63
-rw-r--r--fs/overlayfs/namei.c59
-rw-r--r--fs/overlayfs/overlayfs.h13
-rw-r--r--fs/overlayfs/ovl_entry.h14
-rw-r--r--fs/overlayfs/readdir.c55
-rw-r--r--fs/overlayfs/super.c688
-rw-r--r--fs/overlayfs/util.c21
-rw-r--r--fs/proc/cpuinfo.c6
-rw-r--r--fs/pstore/platform.c2
-rw-r--r--fs/read_write.c21
-rw-r--r--fs/select.c68
-rw-r--r--fs/signalfd.c4
-rw-r--r--fs/statfs.c2
-rw-r--r--fs/super.c46
-rw-r--r--fs/xfs/libxfs/xfs_iext_tree.c2
-rw-r--r--fs/xfs/libxfs/xfs_log_format.h4
-rw-r--r--fs/xfs/xfs_inode.c1
-rw-r--r--include/drm/drm_dp_helper.h20
-rw-r--r--include/linux/acct.h3
-rw-r--r--include/linux/bio.h4
-rw-r--r--include/linux/compat.h10
-rw-r--r--include/linux/cpufreq.h1
-rw-r--r--include/linux/fs.h17
-rw-r--r--include/linux/ftrace.h113
-rw-r--r--include/linux/init.h4
-rw-r--r--include/linux/nfs_fs.h13
-rw-r--r--include/linux/nfs_fs_sb.h3
-rw-r--r--include/linux/path.h6
-rw-r--r--include/linux/perf_event.h2
-rw-r--r--include/linux/sunrpc/rpc_rdma.h60
-rw-r--r--include/linux/thermal.h2
-rw-r--r--include/linux/trace_events.h9
-rw-r--r--include/linux/uio.h6
-rw-r--r--include/trace/events/dma_fence.h40
-rw-r--r--include/trace/events/preemptirq.h70
-rw-r--r--include/trace/events/sunrpc.h79
-rw-r--r--include/trace/events/thermal.h4
-rw-r--r--include/trace/events/vmscan.h4
-rw-r--r--include/trace/events/xen.h35
-rw-r--r--include/uapi/linux/cramfs_fs.h26
-rw-r--r--ipc/msg.c4
-rw-r--r--ipc/sem.c4
-rw-r--r--ipc/shm.c4
-rw-r--r--ipc/syscall.c2
-rw-r--r--kernel/compat.c77
-rw-r--r--kernel/events/core.c13
-rw-r--r--kernel/kallsyms.c43
-rw-r--r--kernel/module.c2
-rw-r--r--kernel/printk/printk_safe.c15
-rw-r--r--kernel/sched/core.c36
-rw-r--r--kernel/signal.c77
-rw-r--r--kernel/trace/Kconfig11
-rw-r--r--kernel/trace/Makefile1
-rw-r--r--kernel/trace/ftrace.c354
-rw-r--r--kernel/trace/ring_buffer.c64
-rw-r--r--kernel/trace/trace.c91
-rw-r--r--kernel/trace/trace.h9
-rw-r--r--kernel/trace/trace_event_perf.c82
-rw-r--r--kernel/trace/trace_events.c31
-rw-r--r--kernel/trace/trace_events_hist.c128
-rw-r--r--kernel/trace/trace_irqsoff.c133
-rw-r--r--kernel/trace/trace_kprobe.c22
-rw-r--r--kernel/trace/trace_probe.c86
-rw-r--r--kernel/trace/trace_probe.h7
-rw-r--r--kernel/trace/trace_selftest.c34
-rw-r--r--kernel/trace/trace_syscalls.c4
-rw-r--r--kernel/trace/trace_uprobe.c4
-rw-r--r--kernel/trace/tracing_map.c3
-rw-r--r--kernel/trace/tracing_map.h2
-rw-r--r--lib/iov_iter.c22
-rw-r--r--net/ceph/pagevec.c4
-rw-r--r--net/sunrpc/clnt.c14
-rw-r--r--net/sunrpc/rpc_pipe.c8
-rw-r--r--net/sunrpc/rpcb_clnt.c6
-rw-r--r--net/sunrpc/sched.c3
-rw-r--r--net/sunrpc/sunrpc_syms.c3
-rw-r--r--net/sunrpc/xprt.c1
-rw-r--r--net/sunrpc/xprtrdma/backchannel.c6
-rw-r--r--net/sunrpc/xprtrdma/fmr_ops.c19
-rw-r--r--net/sunrpc/xprtrdma/frwr_ops.c27
-rw-r--r--net/sunrpc/xprtrdma/rpc_rdma.c363
-rw-r--r--net/sunrpc/xprtrdma/transport.c19
-rw-r--r--net/sunrpc/xprtrdma/verbs.c236
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h119
-rw-r--r--net/sunrpc/xprtsock.c4
-rw-r--r--tools/perf/Makefile.config6
-rw-r--r--tools/perf/arch/s390/include/dwarf-regs-table.h71
-rw-r--r--tools/perf/arch/s390/include/perf_regs.h95
-rw-r--r--tools/perf/arch/s390/util/Build3
-rw-r--r--tools/perf/arch/s390/util/auxtrace.c118
-rw-r--r--tools/perf/arch/s390/util/dwarf-regs.c11
-rw-r--r--tools/perf/arch/s390/util/unwind-libdw.c63
-rw-r--r--tools/testing/selftests/Makefile3
-rw-r--r--tools/testing/selftests/android/Makefile46
-rw-r--r--tools/testing/selftests/android/ion/.gitignore2
-rw-r--r--tools/testing/selftests/android/ion/Makefile16
-rw-r--r--tools/testing/selftests/android/ion/README101
-rw-r--r--tools/testing/selftests/android/ion/config4
-rw-r--r--tools/testing/selftests/android/ion/ion.h143
-rwxr-xr-xtools/testing/selftests/android/ion/ion_test.sh55
-rw-r--r--tools/testing/selftests/android/ion/ionapp_export.c135
-rw-r--r--tools/testing/selftests/android/ion/ionapp_import.c88
-rw-r--r--tools/testing/selftests/android/ion/ionutils.c259
-rw-r--r--tools/testing/selftests/android/ion/ionutils.h55
-rw-r--r--tools/testing/selftests/android/ion/ipcsocket.c227
-rw-r--r--tools/testing/selftests/android/ion/ipcsocket.h35
-rwxr-xr-xtools/testing/selftests/android/run.sh3
-rw-r--r--tools/testing/selftests/breakpoints/breakpoint_test_arm64.c1
-rw-r--r--tools/testing/selftests/cpu-hotplug/config1
-rw-r--r--tools/testing/selftests/exec/execveat.c27
-rwxr-xr-xtools/testing/selftests/firmware/fw_fallback.sh38
-rwxr-xr-xtools/testing/selftests/firmware/fw_filesystem.sh34
-rw-r--r--tools/testing/selftests/ftrace/config4
-rwxr-xr-xtools/testing/selftests/ftrace/ftracetest7
-rw-r--r--tools/testing/selftests/ftrace/test.d/00basic/basic4.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/event/event-enable.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/event/event-pid.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/event/toplevel-enable.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/fgraph-filter-stack.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/fgraph-filter.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/func-filter-pid.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc5
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/func_profiler.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/func_set_ftrace_file.tc4
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/func_traceonoff_triggers.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/instances/instance-event.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/instances/instance.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/busy_check.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_type.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kretprobe_args.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/template1
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/trigger-eventonoff.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/trigger-filter.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-mod.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/trigger-hist.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/trigger-multihist.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/trigger-snapshot.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/trigger-stacktrace.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/trigger-traceonoff.tc2
-rw-r--r--tools/testing/selftests/memfd/memfd_test.c4
-rw-r--r--tools/testing/selftests/memory-hotplug/Makefile4
-rw-r--r--tools/testing/selftests/seccomp/.gitignore1
-rw-r--r--tools/testing/selftests/timers/.gitignore2
-rw-r--r--tools/testing/selftests/vDSO/vdso_test.c19
-rw-r--r--tools/testing/selftests/vm/.gitignore2
-rw-r--r--tools/thermal/tmon/Makefile18
-rw-r--r--virt/kvm/kvm_main.c7
729 files changed, 141118 insertions, 4711 deletions
diff --git a/Documentation/block/bfq-iosched.txt b/Documentation/block/bfq-iosched.txt
index 3d6951d63489..8d8d8f06cab2 100644
--- a/Documentation/block/bfq-iosched.txt
+++ b/Documentation/block/bfq-iosched.txt
@@ -20,12 +20,27 @@ for that device, by setting low_latency to 0. See Section 3 for
details on how to configure BFQ for the desired tradeoff between
latency and throughput, or on how to maximize throughput.
-On average CPUs, the current version of BFQ can handle devices
-performing at most ~30K IOPS; at most ~50 KIOPS on faster CPUs. As a
-reference, 30-50 KIOPS correspond to very high bandwidths with
-sequential I/O (e.g., 8-12 GB/s if I/O requests are 256 KB large), and
-to 120-200 MB/s with 4KB random I/O. BFQ is currently being tested on
-multi-queue devices too.
+BFQ has a non-null overhead, which limits the maximum IOPS that a CPU
+can process for a device scheduled with BFQ. To give an idea of the
+limits on slow or average CPUs, here are, first, the limits of BFQ for
+three different CPUs, on, respectively, an average laptop, an old
+desktop, and a cheap embedded system, in case full hierarchical
+support is enabled (i.e., CONFIG_BFQ_GROUP_IOSCHED is set), but
+CONFIG_DEBUG_BLK_CGROUP is not set (Section 4-2):
+- Intel i7-4850HQ: 400 KIOPS
+- AMD A8-3850: 250 KIOPS
+- ARM CortexTM-A53 Octa-core: 80 KIOPS
+
+If CONFIG_DEBUG_BLK_CGROUP is set (and of course full hierarchical
+support is enabled), then the sustainable throughput with BFQ
+decreases, because all blkio.bfq* statistics are created and updated
+(Section 4-2). For BFQ, this leads to the following maximum
+sustainable throughputs, on the same systems as above:
+- Intel i7-4850HQ: 310 KIOPS
+- AMD A8-3850: 200 KIOPS
+- ARM CortexTM-A53 Octa-core: 56 KIOPS
+
+BFQ works for multi-queue devices too.
The table of contents follow. Impatients can just jump to Section 3.
@@ -500,6 +515,22 @@ BFQ-specific files is "blkio.bfq." or "io.bfq." For example, the group
parameter to set the weight of a group with BFQ is blkio.bfq.weight
or io.bfq.weight.
+As for cgroups-v1 (blkio controller), the exact set of stat files
+created, and kept up-to-date by bfq, depends on whether
+CONFIG_DEBUG_BLK_CGROUP is set. If it is set, then bfq creates all
+the stat files documented in
+Documentation/cgroup-v1/blkio-controller.txt. If, instead,
+CONFIG_DEBUG_BLK_CGROUP is not set, then bfq creates only the files
+blkio.bfq.io_service_bytes
+blkio.bfq.io_service_bytes_recursive
+blkio.bfq.io_serviced
+blkio.bfq.io_serviced_recursive
+
+The value of CONFIG_DEBUG_BLK_CGROUP greatly influences the maximum
+throughput sustainable with bfq, because updating the blkio.bfq.*
+stats is rather costly, especially for some of the stats enabled by
+CONFIG_DEBUG_BLK_CGROUP.
+
Parameters to set
-----------------
diff --git a/Documentation/devicetree/bindings/thermal/brcm,avs-tmon.txt b/Documentation/devicetree/bindings/thermal/brcm,avs-tmon.txt
new file mode 100644
index 000000000000..9d43553a8d39
--- /dev/null
+++ b/Documentation/devicetree/bindings/thermal/brcm,avs-tmon.txt
@@ -0,0 +1,20 @@
+* Broadcom STB thermal management
+
+Thermal management core, provided by the AVS TMON hardware block.
+
+Required properties:
+- compatible: must be "brcm,avs-tmon" and/or "brcm,avs-tmon-bcm7445"
+- reg: address range for the AVS TMON registers
+- interrupts: temperature monitor interrupt, for high/low threshold triggers
+- interrupt-names: should be "tmon"
+- interrupt-parent: the parent interrupt controller
+
+Example:
+
+ thermal@f04d1500 {
+ compatible = "brcm,avs-tmon-bcm7445", "brcm,avs-tmon";
+ reg = <0xf04d1500 0x28>;
+ interrupts = <0x6>;
+ interrupt-names = "tmon";
+ interrupt-parent = <&avs_host_l2_intc>;
+ };
diff --git a/Documentation/devicetree/bindings/thermal/imx-thermal.txt b/Documentation/devicetree/bindings/thermal/imx-thermal.txt
index 3c67bd50aa10..28be51afdb6a 100644
--- a/Documentation/devicetree/bindings/thermal/imx-thermal.txt
+++ b/Documentation/devicetree/bindings/thermal/imx-thermal.txt
@@ -7,10 +7,17 @@ Required properties:
is higher than panic threshold, system will auto reboot by SRC module.
- fsl,tempmon : phandle pointer to system controller that contains TEMPMON
control registers, e.g. ANATOP on imx6q.
+- nvmem-cells: A phandle to the calibration cells provided by ocotp.
+- nvmem-cell-names: Should be "calib", "temp_grade".
+
+Deprecated properties:
- fsl,tempmon-data : phandle pointer to fuse controller that contains TEMPMON
calibration data, e.g. OCOTP on imx6q. The details about calibration data
can be found in SoC Reference Manual.
+Direct access to OCOTP via fsl,tempmon-data is incorrect on some newer chips
+because it does not handle OCOTP clock requirements.
+
Optional properties:
- clocks : thermal sensor's clock source.
diff --git a/Documentation/devicetree/bindings/thermal/rockchip-thermal.txt b/Documentation/devicetree/bindings/thermal/rockchip-thermal.txt
index e3a6234fb1ac..43d744e5305e 100644
--- a/Documentation/devicetree/bindings/thermal/rockchip-thermal.txt
+++ b/Documentation/devicetree/bindings/thermal/rockchip-thermal.txt
@@ -2,6 +2,7 @@
Required properties:
- compatible : should be "rockchip,<name>-tsadc"
+ "rockchip,rv1108-tsadc": found on RV1108 SoCs
"rockchip,rk3228-tsadc": found on RK3228 SoCs
"rockchip,rk3288-tsadc": found on RK3288 SoCs
"rockchip,rk3328-tsadc": found on RK3328 SoCs
diff --git a/Documentation/filesystems/cramfs.txt b/Documentation/filesystems/cramfs.txt
index 4006298f6707..8e19a53d648b 100644
--- a/Documentation/filesystems/cramfs.txt
+++ b/Documentation/filesystems/cramfs.txt
@@ -45,6 +45,48 @@ you can just change the #define in mkcramfs.c, so long as you don't
mind the filesystem becoming unreadable to future kernels.
+Memory Mapped cramfs image
+--------------------------
+
+The CRAMFS_MTD Kconfig option adds support for loading data directly from
+a physical linear memory range (usually non volatile memory like Flash)
+instead of going through the block device layer. This saves some memory
+since no intermediate buffering is necessary to hold the data before
+decompressing.
+
+And when data blocks are kept uncompressed and properly aligned, they will
+automatically be mapped directly into user space whenever possible providing
+eXecute-In-Place (XIP) from ROM of read-only segments. Data segments mapped
+read-write (hence they have to be copied to RAM) may still be compressed in
+the cramfs image in the same file along with non compressed read-only
+segments. Both MMU and no-MMU systems are supported. This is particularly
+handy for tiny embedded systems with very tight memory constraints.
+
+The location of the cramfs image in memory is system dependent. You must
+know the proper physical address where the cramfs image is located and
+configure an MTD device for it. Also, that MTD device must be supported
+by a map driver that implements the "point" method. Examples of such
+MTD drivers are cfi_cmdset_0001 (Intel/Sharp CFI flash) or physmap
+(Flash device in physical memory map). MTD partitions based on such devices
+are fine too. Then that device should be specified with the "mtd:" prefix
+as the mount device argument. For example, to mount the MTD device named
+"fs_partition" on the /mnt directory:
+
+$ mount -t cramfs mtd:fs_partition /mnt
+
+To boot a kernel with this as root filesystem, suffice to specify
+something like "root=mtd:fs_partition" on the kernel command line.
+
+
+Tools
+-----
+
+A version of mkcramfs that can take advantage of the latest capabilities
+described above can be found here:
+
+https://github.com/npitre/cramfs-tools
+
+
For /usr/share/magic
--------------------
diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting
index 93e0a2404532..17bb4dc28fae 100644
--- a/Documentation/filesystems/porting
+++ b/Documentation/filesystems/porting
@@ -502,10 +502,6 @@ in your dentry operations instead.
store it as cookie.
--
[mandatory]
- __fd_install() & fd_install() can now sleep. Callers should not
- hold a spinlock or other resources that do not allow a schedule.
---
-[mandatory]
any symlink that might use page_follow_link_light/page_put_link() must
have inode_nohighmem(inode) called before anything might start playing with
its pagecache. No highmem pages should end up in the pagecache of such
diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst
index 96f8ec7dbe4e..36625aa66c27 100644
--- a/Documentation/gpu/todo.rst
+++ b/Documentation/gpu/todo.rst
@@ -409,5 +409,15 @@ those drivers as simple as possible, so lots of room for refactoring:
Contact: Noralf Trønnes, Daniel Vetter
+AMD DC Display Driver
+---------------------
+
+AMD DC is the display driver for AMD devices starting with Vega. There has been
+a bunch of progress cleaning it up but there's still plenty of work to be done.
+
+See drivers/gpu/drm/amd/display/TODO for tasks.
+
+Contact: Harry Wentland, Alex Deucher
+
Outside DRM
===========
diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
index 57af2f7963ee..937e33c46211 100644
--- a/Documentation/power/runtime_pm.txt
+++ b/Documentation/power/runtime_pm.txt
@@ -435,8 +435,7 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
PM status to 'suspended' and update its parent's counter of 'active'
children as appropriate (it is only valid to use this function if
'power.runtime_error' is set or 'power.disable_depth' is greater than
- zero); it will fail and return an error code if the device has a child
- which is active and the 'power.ignore_children' flag is unset
+ zero)
bool pm_runtime_active(struct device *dev);
- return true if the device's runtime PM status is 'active' or its
diff --git a/MAINTAINERS b/MAINTAINERS
index e04d108055f0..bcab816b25f4 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2986,6 +2986,14 @@ S: Maintained
F: Documentation/devicetree/bindings/cpufreq/brcm,stb-avs-cpu-freq.txt
F: drivers/cpufreq/brcmstb*
+BROADCOM STB AVS TMON DRIVER
+M: Markus Mayer <mmayer@broadcom.com>
+M: bcm-kernel-feedback-list@broadcom.com
+L: linux-pm@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/thermal/brcm,avs-tmon.txt
+F: drivers/thermal/broadcom/brcmstb*
+
BROADCOM STB NAND FLASH DRIVER
M: Brian Norris <computersforpeace@gmail.com>
M: Kamal Dasu <kdasu.kdev@gmail.com>
@@ -3720,8 +3728,8 @@ F: drivers/cpuidle/*
F: include/linux/cpuidle.h
CRAMFS FILESYSTEM
-W: http://sourceforge.net/projects/cramfs/
-S: Orphan / Obsolete
+M: Nicolas Pitre <nico@linaro.org>
+S: Maintained
F: Documentation/filesystems/cramfs.txt
F: fs/cramfs/
@@ -5216,7 +5224,7 @@ F: drivers/video/fbdev/s1d13xxxfb.c
F: include/video/s1d13xxxfb.h
ERRSEQ ERROR TRACKING INFRASTRUCTURE
-M: Jeff Layton <jlayton@poochiereds.net>
+M: Jeff Layton <jlayton@kernel.org>
S: Maintained
F: lib/errseq.c
F: include/linux/errseq.h
@@ -5404,7 +5412,7 @@ F: include/scsi/libfcoe.h
F: include/uapi/scsi/fc/
FILE LOCKING (flock() and fcntl()/lockf())
-M: Jeff Layton <jlayton@poochiereds.net>
+M: Jeff Layton <jlayton@kernel.org>
M: "J. Bruce Fields" <bfields@fieldses.org>
L: linux-fsdevel@vger.kernel.org
S: Maintained
@@ -7550,7 +7558,7 @@ S: Odd Fixes
KERNEL NFSD, SUNRPC, AND LOCKD SERVERS
M: "J. Bruce Fields" <bfields@fieldses.org>
-M: Jeff Layton <jlayton@poochiereds.net>
+M: Jeff Layton <jlayton@kernel.org>
L: linux-nfs@vger.kernel.org
W: http://nfs.sourceforge.net/
T: git git://linux-nfs.org/~bfields/linux.git
diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h
index e39d487bf724..a3c7f271ad4c 100644
--- a/arch/arm64/include/asm/compat.h
+++ b/arch/arm64/include/asm/compat.h
@@ -215,7 +215,6 @@ typedef struct compat_siginfo {
} compat_siginfo_t;
#define COMPAT_OFF_T_MAX 0x7fffffff
-#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL
/*
* A pointer passed in from user mode. This should not
diff --git a/arch/mips/include/asm/compat-signal.h b/arch/mips/include/asm/compat-signal.h
index e87cd243b0f4..c3b7a2550d1d 100644
--- a/arch/mips/include/asm/compat-signal.h
+++ b/arch/mips/include/asm/compat-signal.h
@@ -14,45 +14,16 @@
static inline int __copy_conv_sigset_to_user(compat_sigset_t __user *d,
const sigset_t *s)
{
- int err;
+ BUILD_BUG_ON(sizeof(*d) != sizeof(*s));
+ BUILD_BUG_ON(_NSIG_WORDS != 2);
- BUG_ON(sizeof(*d) != sizeof(*s));
- BUG_ON(_NSIG_WORDS != 2);
-
- err = __put_user(s->sig[0], &d->sig[0]);
- err |= __put_user(s->sig[0] >> 32, &d->sig[1]);
- err |= __put_user(s->sig[1], &d->sig[2]);
- err |= __put_user(s->sig[1] >> 32, &d->sig[3]);
-
- return err;
+ return put_compat_sigset(d, s, sizeof(*d));
}
static inline int __copy_conv_sigset_from_user(sigset_t *d,
const compat_sigset_t __user *s)
{
- int err;
- union sigset_u {
- sigset_t s;
- compat_sigset_t c;
- } *u = (union sigset_u *) d;
-
- BUG_ON(sizeof(*d) != sizeof(*s));
- BUG_ON(_NSIG_WORDS != 2);
-
-#ifdef CONFIG_CPU_BIG_ENDIAN
- err = __get_user(u->c.sig[1], &s->sig[0]);
- err |= __get_user(u->c.sig[0], &s->sig[1]);
- err |= __get_user(u->c.sig[3], &s->sig[2]);
- err |= __get_user(u->c.sig[2], &s->sig[3]);
-#endif
-#ifdef CONFIG_CPU_LITTLE_ENDIAN
- err = __get_user(u->c.sig[0], &s->sig[0]);
- err |= __get_user(u->c.sig[1], &s->sig[1]);
- err |= __get_user(u->c.sig[2], &s->sig[2]);
- err |= __get_user(u->c.sig[3], &s->sig[3]);
-#endif
-
- return err;
+ return get_compat_sigset(d, s);
}
#endif /* __ASM_COMPAT_SIGNAL_H */
diff --git a/arch/mips/include/asm/compat.h b/arch/mips/include/asm/compat.h
index 8e2b5b556488..49691331ada4 100644
--- a/arch/mips/include/asm/compat.h
+++ b/arch/mips/include/asm/compat.h
@@ -200,7 +200,6 @@ typedef struct compat_siginfo {
} compat_siginfo_t;
#define COMPAT_OFF_T_MAX 0x7fffffff
-#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL
/*
* A pointer passed in from user mode. This should not
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 1fd3eb5b66c6..9792d8cf4f56 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -32,6 +32,7 @@ config PARISC
select GENERIC_PCI_IOMAP
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select GENERIC_SMP_IDLE_THREAD
+ select GENERIC_CPU_DEVICES
select GENERIC_STRNCPY_FROM_USER
select SYSCTL_ARCH_UNALIGN_ALLOW
select SYSCTL_EXCEPTION_TRACE
@@ -60,9 +61,6 @@ config PARISC
config CPU_BIG_ENDIAN
def_bool y
-config CPU_BIG_ENDIAN
- def_bool y
-
config MMU
def_bool y
@@ -288,6 +286,21 @@ config SMP
If you don't know what to do here, say N.
+config PARISC_CPU_TOPOLOGY
+ bool "Support cpu topology definition"
+ depends on SMP
+ default y
+ help
+ Support PARISC cpu topology definition.
+
+config SCHED_MC
+ bool "Multi-core scheduler support"
+ depends on PARISC_CPU_TOPOLOGY && PA8X00
+ help
+ Multi-core scheduler support improves the CPU scheduler's decision
+ making when dealing with multi-core CPU chips at a cost of slightly
+ increased overhead in some places. If unsure say N here.
+
config IRQSTACKS
bool "Use separate kernel stacks when processing interrupts"
default y
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile
index 01946ebaff72..e2364ff59180 100644
--- a/arch/parisc/Makefile
+++ b/arch/parisc/Makefile
@@ -22,7 +22,7 @@ KBUILD_IMAGE := vmlinuz
KBUILD_DEFCONFIG := default_defconfig
NM = sh $(srctree)/arch/parisc/nm
-CHECKFLAGS += -D__hppa__=1
+CHECKFLAGS += -D__hppa__=1 -mbig-endian
LIBGCC = $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
export LIBGCC
diff --git a/arch/parisc/include/asm/compat.h b/arch/parisc/include/asm/compat.h
index 07f48827afda..acf8aa07cbe0 100644
--- a/arch/parisc/include/asm/compat.h
+++ b/arch/parisc/include/asm/compat.h
@@ -195,7 +195,6 @@ typedef struct compat_siginfo {
} compat_siginfo_t;
#define COMPAT_OFF_T_MAX 0x7fffffff
-#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL
struct compat_ipc64_perm {
compat_key_t key;
diff --git a/arch/parisc/include/asm/pdc.h b/arch/parisc/include/asm/pdc.h
index efee44a5e063..339e83ddb39e 100644
--- a/arch/parisc/include/asm/pdc.h
+++ b/arch/parisc/include/asm/pdc.h
@@ -18,261 +18,6 @@ extern unsigned long parisc_cell_loc; /* cell location of CPU (PAT) */
#define PDC_TYPE_SYSTEM_MAP 1 /* 32-bit, but supports PDC_SYSTEM_MAP */
#define PDC_TYPE_SNAKE 2 /* Doesn't support SYSTEM_MAP */
-struct pdc_chassis_info { /* for PDC_CHASSIS_INFO */
- unsigned long actcnt; /* actual number of bytes returned */
- unsigned long maxcnt; /* maximum number of bytes that could be returned */
-};
-
-struct pdc_coproc_cfg { /* for PDC_COPROC_CFG */
- unsigned long ccr_functional;
- unsigned long ccr_present;
- unsigned long revision;
- unsigned long model;
-};
-
-struct pdc_model { /* for PDC_MODEL */
- unsigned long hversion;
- unsigned long sversion;
- unsigned long hw_id;
- unsigned long boot_id;
- unsigned long sw_id;
- unsigned long sw_cap;
- unsigned long arch_rev;
- unsigned long pot_key;
- unsigned long curr_key;
-};
-
-struct pdc_cache_cf { /* for PDC_CACHE (I/D-caches) */
- unsigned long
-#ifdef CONFIG_64BIT
- cc_padW:32,
-#endif
- cc_alias: 4, /* alias boundaries for virtual addresses */
- cc_block: 4, /* to determine most efficient stride */
- cc_line : 3, /* maximum amount written back as a result of store (multiple of 16 bytes) */
- cc_shift: 2, /* how much to shift cc_block left */
- cc_wt : 1, /* 0 = WT-Dcache, 1 = WB-Dcache */
- cc_sh : 2, /* 0 = separate I/D-cache, else shared I/D-cache */
- cc_cst : 3, /* 0 = incoherent D-cache, 1=coherent D-cache */
- cc_pad1 : 10, /* reserved */
- cc_hv : 3; /* hversion dependent */
-};
-
-struct pdc_tlb_cf { /* for PDC_CACHE (I/D-TLB's) */
- unsigned long tc_pad0:12, /* reserved */
-#ifdef CONFIG_64BIT
- tc_padW:32,
-#endif
- tc_sh : 2, /* 0 = separate I/D-TLB, else shared I/D-TLB */
- tc_hv : 1, /* HV */
- tc_page : 1, /* 0 = 2K page-size-machine, 1 = 4k page size */
- tc_cst : 3, /* 0 = incoherent operations, else coherent operations */
- tc_aid : 5, /* ITLB: width of access ids of processor (encoded!) */
- tc_sr : 8; /* ITLB: width of space-registers (encoded) */
-};
-
-struct pdc_cache_info { /* main-PDC_CACHE-structure (caches & TLB's) */
- /* I-cache */
- unsigned long ic_size; /* size in bytes */
- struct pdc_cache_cf ic_conf; /* configuration */
- unsigned long ic_base; /* base-addr */
- unsigned long ic_stride;
- unsigned long ic_count;
- unsigned long ic_loop;
- /* D-cache */
- unsigned long dc_size; /* size in bytes */
- struct pdc_cache_cf dc_conf; /* configuration */
- unsigned long dc_base; /* base-addr */
- unsigned long dc_stride;
- unsigned long dc_count;
- unsigned long dc_loop;
- /* Instruction-TLB */
- unsigned long it_size; /* number of entries in I-TLB */
- struct pdc_tlb_cf it_conf; /* I-TLB-configuration */
- unsigned long it_sp_base;
- unsigned long it_sp_stride;
- unsigned long it_sp_count;
- unsigned long it_off_base;
- unsigned long it_off_stride;
- unsigned long it_off_count;
- unsigned long it_loop;
- /* data-TLB */
- unsigned long dt_size; /* number of entries in D-TLB */
- struct pdc_tlb_cf dt_conf; /* D-TLB-configuration */
- unsigned long dt_sp_base;
- unsigned long dt_sp_stride;
- unsigned long dt_sp_count;
- unsigned long dt_off_base;
- unsigned long dt_off_stride;
- unsigned long dt_off_count;
- unsigned long dt_loop;
-};
-
-#if 0
-/* If you start using the next struct, you'll have to adjust it to
- * work with 64-bit firmware I think -PB
- */
-struct pdc_iodc { /* PDC_IODC */
- unsigned char hversion_model;
- unsigned char hversion;
- unsigned char spa;
- unsigned char type;
- unsigned int sversion_rev:4;
- unsigned int sversion_model:19;
- unsigned int sversion_opt:8;
- unsigned char rev;
- unsigned char dep;
- unsigned char features;
- unsigned char pad1;
- unsigned int checksum:16;
- unsigned int length:16;
- unsigned int pad[15];
-} __attribute__((aligned(8))) ;
-#endif
-
-#ifndef CONFIG_PA20
-/* no BLTBs in pa2.0 processors */
-struct pdc_btlb_info_range {
- __u8 res00;
- __u8 num_i;
- __u8 num_d;
- __u8 num_comb;
-};
-
-struct pdc_btlb_info { /* PDC_BLOCK_TLB, return of PDC_BTLB_INFO */
- unsigned int min_size; /* minimum size of BTLB in pages */
- unsigned int max_size; /* maximum size of BTLB in pages */
- struct pdc_btlb_info_range fixed_range_info;
- struct pdc_btlb_info_range variable_range_info;
-};
-
-#endif /* !CONFIG_PA20 */
-
-struct pdc_mem_retinfo { /* PDC_MEM/PDC_MEM_MEMINFO (return info) */
- unsigned long pdt_size;
- unsigned long pdt_entries;
- unsigned long pdt_status;
- unsigned long first_dbe_loc;
- unsigned long good_mem;
-};
-
-struct pdc_mem_read_pdt { /* PDC_MEM/PDC_MEM_READ_PDT (return info) */
- unsigned long pdt_entries;
-};
-
-#ifdef CONFIG_64BIT
-struct pdc_memory_table_raddr { /* PDC_MEM/PDC_MEM_TABLE (return info) */
- unsigned long entries_returned;
- unsigned long entries_total;
-};
-
-struct pdc_memory_table { /* PDC_MEM/PDC_MEM_TABLE (arguments) */
- unsigned long paddr;
- unsigned int pages;
- unsigned int reserved;
-};
-#endif /* CONFIG_64BIT */
-
-struct pdc_system_map_mod_info { /* PDC_SYSTEM_MAP/FIND_MODULE */
- unsigned long mod_addr;
- unsigned long mod_pgs;
- unsigned long add_addrs;
-};
-
-struct pdc_system_map_addr_info { /* PDC_SYSTEM_MAP/FIND_ADDRESS */
- unsigned long mod_addr;
- unsigned long mod_pgs;
-};
-
-struct pdc_initiator { /* PDC_INITIATOR */
- int host_id;
- int factor;
- int width;
- int mode;
-};
-
-struct hardware_path {
- char flags; /* see bit definitions below */
- char bc[6]; /* Bus Converter routing info to a specific */
- /* I/O adaptor (< 0 means none, > 63 resvd) */
- char mod; /* fixed field of specified module */
-};
-
-/*
- * Device path specifications used by PDC.
- */
-struct pdc_module_path {
- struct hardware_path path;
- unsigned int layers[6]; /* device-specific info (ctlr #, unit # ...) */
-};
-
-#ifndef CONFIG_PA20
-/* Only used on some pre-PA2.0 boxes */
-struct pdc_memory_map { /* PDC_MEMORY_MAP */
- unsigned long hpa; /* mod's register set address */
- unsigned long more_pgs; /* number of additional I/O pgs */
-};
-#endif
-
-struct pdc_tod {
- unsigned long tod_sec;
- unsigned long tod_usec;
-};
-
-/* architected results from PDC_PIM/transfer hpmc on a PA1.1 machine */
-
-struct pdc_hpmc_pim_11 { /* PDC_PIM */
- __u32 gr[32];
- __u32 cr[32];
- __u32 sr[8];
- __u32 iasq_back;
- __u32 iaoq_back;
- __u32 check_type;
- __u32 cpu_state;
- __u32 rsvd1;
- __u32 cache_check;
- __u32 tlb_check;
- __u32 bus_check;
- __u32 assists_check;
- __u32 rsvd2;
- __u32 assist_state;
- __u32 responder_addr;
- __u32 requestor_addr;
- __u32 path_info;
- __u64 fr[32];
-};
-
-/*
- * architected results from PDC_PIM/transfer hpmc on a PA2.0 machine
- *
- * Note that PDC_PIM doesn't care whether or not wide mode was enabled
- * so the results are different on PA1.1 vs. PA2.0 when in narrow mode.
- *
- * Note also that there are unarchitected results available, which
- * are hversion dependent. Do a "ser pim 0 hpmc" after rebooting, since
- * the firmware is probably the best way of printing hversion dependent
- * data.
- */
-
-struct pdc_hpmc_pim_20 { /* PDC_PIM */
- __u64 gr[32];
- __u64 cr[32];
- __u64 sr[8];
- __u64 iasq_back;
- __u64 iaoq_back;
- __u32 check_type;
- __u32 cpu_state;
- __u32 cache_check;
- __u32 tlb_check;
- __u32 bus_check;
- __u32 assists_check;
- __u32 assist_state;
- __u32 path_info;
- __u64 responder_addr;
- __u64 requestor_addr;
- __u64 fr[32];
-};
-
void pdc_console_init(void); /* in pdc_console.c */
void pdc_console_restart(void);
diff --git a/arch/parisc/include/asm/topology.h b/arch/parisc/include/asm/topology.h
new file mode 100644
index 000000000000..6f0750c74e47
--- /dev/null
+++ b/arch/parisc/include/asm/topology.h
@@ -0,0 +1,36 @@
+#ifndef _ASM_PARISC_TOPOLOGY_H
+#define _ASM_PARISC_TOPOLOGY_H
+
+#ifdef CONFIG_PARISC_CPU_TOPOLOGY
+
+#include <linux/cpumask.h>
+
+struct cputopo_parisc {
+ int thread_id;
+ int core_id;
+ int socket_id;
+ cpumask_t thread_sibling;
+ cpumask_t core_sibling;
+};
+
+extern struct cputopo_parisc cpu_topology[NR_CPUS];
+
+#define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id)
+#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
+#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling)
+#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
+
+void init_cpu_topology(void);
+void store_cpu_topology(unsigned int cpuid);
+const struct cpumask *cpu_coregroup_mask(int cpu);
+
+#else
+
+static inline void init_cpu_topology(void) { }
+static inline void store_cpu_topology(unsigned int cpuid) { }
+
+#endif
+
+#include <asm-generic/topology.h>
+
+#endif /* _ASM_ARM_TOPOLOGY_H */
diff --git a/arch/parisc/include/uapi/asm/pdc.h b/arch/parisc/include/uapi/asm/pdc.h
index 0ad117617f1a..593eeb573138 100644
--- a/arch/parisc/include/uapi/asm/pdc.h
+++ b/arch/parisc/include/uapi/asm/pdc.h
@@ -16,6 +16,7 @@
#define PDC_ERROR -3 /* Call could not complete without an error */
#define PDC_NE_MOD -5 /* Module not found */
#define PDC_NE_CELL_MOD -7 /* Cell module not found */
+#define PDC_NE_BOOTDEV -9 /* Cannot locate a console device or boot device */
#define PDC_INVALID_ARG -10 /* Called with an invalid argument */
#define PDC_BUS_POW_WARN -12 /* Call could not complete in allowed power budget */
#define PDC_NOT_NARROW -17 /* Narrow mode not supported */
@@ -340,9 +341,6 @@
#if !defined(__ASSEMBLY__)
-#include <linux/types.h>
-
-
/* flags of the device_path */
#define PF_AUTOBOOT 0x80
#define PF_AUTOSEARCH 0x40
@@ -418,9 +416,255 @@ struct zeropage {
int pad430[116];
/* [0x600] processor dependent */
- __u32 pad600[1];
- __u32 proc_sti; /* pointer to STI ROM */
- __u32 pad608[126];
+ unsigned int pad600[1];
+ unsigned int proc_sti; /* pointer to STI ROM */
+ unsigned int pad608[126];
+};
+
+struct pdc_chassis_info { /* for PDC_CHASSIS_INFO */
+ unsigned long actcnt; /* actual number of bytes returned */
+ unsigned long maxcnt; /* maximum number of bytes that could be returned */
+};
+
+struct pdc_coproc_cfg { /* for PDC_COPROC_CFG */
+ unsigned long ccr_functional;
+ unsigned long ccr_present;
+ unsigned long revision;
+ unsigned long model;
+};
+
+struct pdc_model { /* for PDC_MODEL */
+ unsigned long hversion;
+ unsigned long sversion;
+ unsigned long hw_id;
+ unsigned long boot_id;
+ unsigned long sw_id;
+ unsigned long sw_cap;
+ unsigned long arch_rev;
+ unsigned long pot_key;
+ unsigned long curr_key;
+};
+
+struct pdc_cache_cf { /* for PDC_CACHE (I/D-caches) */
+ unsigned long
+#ifdef __LP64__
+ cc_padW:32,
+#endif
+ cc_alias: 4, /* alias boundaries for virtual addresses */
+ cc_block: 4, /* to determine most efficient stride */
+ cc_line : 3, /* maximum amount written back as a result of store (multiple of 16 bytes) */
+ cc_shift: 2, /* how much to shift cc_block left */
+ cc_wt : 1, /* 0 = WT-Dcache, 1 = WB-Dcache */
+ cc_sh : 2, /* 0 = separate I/D-cache, else shared I/D-cache */
+ cc_cst : 3, /* 0 = incoherent D-cache, 1=coherent D-cache */
+ cc_pad1 : 10, /* reserved */
+ cc_hv : 3; /* hversion dependent */
+};
+
+struct pdc_tlb_cf { /* for PDC_CACHE (I/D-TLB's) */
+ unsigned long tc_pad0:12, /* reserved */
+#ifdef __LP64__
+ tc_padW:32,
+#endif
+ tc_sh : 2, /* 0 = separate I/D-TLB, else shared I/D-TLB */
+ tc_hv : 1, /* HV */
+ tc_page : 1, /* 0 = 2K page-size-machine, 1 = 4k page size */
+ tc_cst : 3, /* 0 = incoherent operations, else coherent operations */
+ tc_aid : 5, /* ITLB: width of access ids of processor (encoded!) */
+ tc_sr : 8; /* ITLB: width of space-registers (encoded) */
+};
+
+struct pdc_cache_info { /* main-PDC_CACHE-structure (caches & TLB's) */
+ /* I-cache */
+ unsigned long ic_size; /* size in bytes */
+ struct pdc_cache_cf ic_conf; /* configuration */
+ unsigned long ic_base; /* base-addr */
+ unsigned long ic_stride;
+ unsigned long ic_count;
+ unsigned long ic_loop;
+ /* D-cache */
+ unsigned long dc_size; /* size in bytes */
+ struct pdc_cache_cf dc_conf; /* configuration */
+ unsigned long dc_base; /* base-addr */
+ unsigned long dc_stride;
+ unsigned long dc_count;
+ unsigned long dc_loop;
+ /* Instruction-TLB */
+ unsigned long it_size; /* number of entries in I-TLB */
+ struct pdc_tlb_cf it_conf; /* I-TLB-configuration */
+ unsigned long it_sp_base;
+ unsigned long it_sp_stride;
+ unsigned long it_sp_count;
+ unsigned long it_off_base;
+ unsigned long it_off_stride;
+ unsigned long it_off_count;
+ unsigned long it_loop;
+ /* data-TLB */
+ unsigned long dt_size; /* number of entries in D-TLB */
+ struct pdc_tlb_cf dt_conf; /* D-TLB-configuration */
+ unsigned long dt_sp_base;
+ unsigned long dt_sp_stride;
+ unsigned long dt_sp_count;
+ unsigned long dt_off_base;
+ unsigned long dt_off_stride;
+ unsigned long dt_off_count;
+ unsigned long dt_loop;
+};
+
+/* Might need adjustment to work with 64-bit firmware */
+struct pdc_iodc { /* PDC_IODC */
+ unsigned char hversion_model;
+ unsigned char hversion;
+ unsigned char spa;
+ unsigned char type;
+ unsigned int sversion_rev:4;
+ unsigned int sversion_model:19;
+ unsigned int sversion_opt:8;
+ unsigned char rev;
+ unsigned char dep;
+ unsigned char features;
+ unsigned char pad1;
+ unsigned int checksum:16;
+ unsigned int length:16;
+ unsigned int pad[15];
+} __attribute__((aligned(8))) ;
+
+/* no BLTBs in pa2.0 processors */
+struct pdc_btlb_info_range {
+ unsigned char res00;
+ unsigned char num_i;
+ unsigned char num_d;
+ unsigned char num_comb;
+};
+
+struct pdc_btlb_info { /* PDC_BLOCK_TLB, return of PDC_BTLB_INFO */
+ unsigned int min_size; /* minimum size of BTLB in pages */
+ unsigned int max_size; /* maximum size of BTLB in pages */
+ struct pdc_btlb_info_range fixed_range_info;
+ struct pdc_btlb_info_range variable_range_info;
+};
+
+struct pdc_mem_retinfo { /* PDC_MEM/PDC_MEM_MEMINFO (return info) */
+ unsigned long pdt_size;
+ unsigned long pdt_entries;
+ unsigned long pdt_status;
+ unsigned long first_dbe_loc;
+ unsigned long good_mem;
+};
+
+struct pdc_mem_read_pdt { /* PDC_MEM/PDC_MEM_READ_PDT (return info) */
+ unsigned long pdt_entries;
+};
+
+#ifdef __LP64__
+struct pdc_memory_table_raddr { /* PDC_MEM/PDC_MEM_TABLE (return info) */
+ unsigned long entries_returned;
+ unsigned long entries_total;
+};
+
+struct pdc_memory_table { /* PDC_MEM/PDC_MEM_TABLE (arguments) */
+ unsigned long paddr;
+ unsigned int pages;
+ unsigned int reserved;
+};
+#endif /* __LP64__ */
+
+struct pdc_system_map_mod_info { /* PDC_SYSTEM_MAP/FIND_MODULE */
+ unsigned long mod_addr;
+ unsigned long mod_pgs;
+ unsigned long add_addrs;
+};
+
+struct pdc_system_map_addr_info { /* PDC_SYSTEM_MAP/FIND_ADDRESS */
+ unsigned long mod_addr;
+ unsigned long mod_pgs;
+};
+
+struct pdc_initiator { /* PDC_INITIATOR */
+ int host_id;
+ int factor;
+ int width;
+ int mode;
+};
+
+struct hardware_path {
+ char flags; /* see bit definitions below */
+ char bc[6]; /* Bus Converter routing info to a specific */
+ /* I/O adaptor (< 0 means none, > 63 resvd) */
+ char mod; /* fixed field of specified module */
+};
+
+/*
+ * Device path specifications used by PDC.
+ */
+struct pdc_module_path {
+ struct hardware_path path;
+ unsigned int layers[6]; /* device-specific info (ctlr #, unit # ...) */
+};
+
+/* Only used on some pre-PA2.0 boxes */
+struct pdc_memory_map { /* PDC_MEMORY_MAP */
+ unsigned long hpa; /* mod's register set address */
+ unsigned long more_pgs; /* number of additional I/O pgs */
+};
+
+struct pdc_tod {
+ unsigned long tod_sec;
+ unsigned long tod_usec;
+};
+
+/* architected results from PDC_PIM/transfer hpmc on a PA1.1 machine */
+
+struct pdc_hpmc_pim_11 { /* PDC_PIM */
+ unsigned int gr[32];
+ unsigned int cr[32];
+ unsigned int sr[8];
+ unsigned int iasq_back;
+ unsigned int iaoq_back;
+ unsigned int check_type;
+ unsigned int cpu_state;
+ unsigned int rsvd1;
+ unsigned int cache_check;
+ unsigned int tlb_check;
+ unsigned int bus_check;
+ unsigned int assists_check;
+ unsigned int rsvd2;
+ unsigned int assist_state;
+ unsigned int responder_addr;
+ unsigned int requestor_addr;
+ unsigned int path_info;
+ unsigned long long fr[32];
+};
+
+/*
+ * architected results from PDC_PIM/transfer hpmc on a PA2.0 machine
+ *
+ * Note that PDC_PIM doesn't care whether or not wide mode was enabled
+ * so the results are different on PA1.1 vs. PA2.0 when in narrow mode.
+ *
+ * Note also that there are unarchitected results available, which
+ * are hversion dependent. Do a "ser pim 0 hpmc" after rebooting, since
+ * the firmware is probably the best way of printing hversion dependent
+ * data.
+ */
+
+struct pdc_hpmc_pim_20 { /* PDC_PIM */
+ unsigned long long gr[32];
+ unsigned long long cr[32];
+ unsigned long long sr[8];
+ unsigned long long iasq_back;
+ unsigned long long iaoq_back;
+ unsigned int check_type;
+ unsigned int cpu_state;
+ unsigned int cache_check;
+ unsigned int tlb_check;
+ unsigned int bus_check;
+ unsigned int assists_check;
+ unsigned int assist_state;
+ unsigned int path_info;
+ unsigned long long responder_addr;
+ unsigned long long requestor_addr;
+ unsigned long long fr[32];
};
#endif /* !defined(__ASSEMBLY__) */
diff --git a/arch/parisc/kernel/Makefile b/arch/parisc/kernel/Makefile
index 649dc3eda448..eafd06ab59ef 100644
--- a/arch/parisc/kernel/Makefile
+++ b/arch/parisc/kernel/Makefile
@@ -9,8 +9,7 @@ obj-y := cache.o pacache.o setup.o pdt.o traps.o time.o irq.o \
pa7300lc.o syscall.o entry.o sys_parisc.o firmware.o \
ptrace.o hardware.o inventory.o drivers.o \
signal.o hpmc.o real2.o parisc_ksyms.o unaligned.o \
- process.o processor.o pdc_cons.o pdc_chassis.o unwind.o \
- topology.o
+ process.o processor.o pdc_cons.o pdc_chassis.o unwind.o
ifdef CONFIG_FUNCTION_TRACER
# Do not profile debug and lowlevel utilities
@@ -30,5 +29,6 @@ obj-$(CONFIG_AUDIT) += audit.o
obj64-$(CONFIG_AUDIT) += compat_audit.o
# only supported for PCX-W/U in 64-bit mode at the moment
obj-$(CONFIG_64BIT) += perf.o perf_asm.o $(obj64-y)
+obj-$(CONFIG_PARISC_CPU_TOPOLOGY) += topology.o
obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c
index e120d63c1b28..45cc65902fce 100644
--- a/arch/parisc/kernel/processor.c
+++ b/arch/parisc/kernel/processor.c
@@ -184,6 +184,9 @@ static int __init processor_probe(struct parisc_device *dev)
p->txn_addr = txn_addr; /* save CPU IRQ address */
p->cpu_num = cpu_info.cpu_num;
p->cpu_loc = cpu_info.cpu_loc;
+
+ store_cpu_topology(cpuid);
+
#ifdef CONFIG_SMP
/*
** FIXME: review if any other initialization is clobbered
@@ -325,6 +328,8 @@ int __init init_per_cpu(int cpunum)
set_firmware_width();
ret = pdc_coproc_cfg(&coproc_cfg);
+ store_cpu_topology(cpunum);
+
if(ret >= 0 && coproc_cfg.ccr_functional) {
mtctl(coproc_cfg.ccr_functional, 10); /* 10 == Coprocessor Control Reg */
@@ -388,6 +393,14 @@ show_cpuinfo (struct seq_file *m, void *v)
boot_cpu_data.cpu_hz / 1000000,
boot_cpu_data.cpu_hz % 1000000 );
+#ifdef CONFIG_PARISC_CPU_TOPOLOGY
+ seq_printf(m, "physical id\t: %d\n",
+ topology_physical_package_id(cpu));
+ seq_printf(m, "siblings\t: %d\n",
+ cpumask_weight(topology_core_cpumask(cpu)));
+ seq_printf(m, "core id\t\t: %d\n", topology_core_id(cpu));
+#endif
+
seq_printf(m, "capabilities\t:");
if (boot_cpu_data.pdc.capabilities & PDC_MODEL_OS32)
seq_puts(m, " os32");
diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
index f7d0c3b33d70..0e9675f857a5 100644
--- a/arch/parisc/kernel/setup.c
+++ b/arch/parisc/kernel/setup.c
@@ -408,6 +408,8 @@ void __init start_parisc(void)
cpunum = smp_processor_id();
+ init_cpu_topology();
+
set_firmware_width_unlocked();
ret = pdc_coproc_cfg_unlocked(&coproc_cfg);
diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c
index f2a4038e275b..342073f44d3f 100644
--- a/arch/parisc/kernel/signal.c
+++ b/arch/parisc/kernel/signal.c
@@ -93,7 +93,6 @@ sys_rt_sigreturn(struct pt_regs *regs, int in_syscall)
unsigned long usp = (regs->gr[30] & ~(0x01UL));
unsigned long sigframe_size = PARISC_RT_SIGFRAME_SIZE;
#ifdef CONFIG_64BIT
- compat_sigset_t compat_set;
struct compat_rt_sigframe __user * compat_frame;
if (is_compat_task())
@@ -114,9 +113,8 @@ sys_rt_sigreturn(struct pt_regs *regs, int in_syscall)
if (is_compat_task()) {
DBG(2,"sys_rt_sigreturn: ELF32 process.\n");
- if (__copy_from_user(&compat_set, &compat_frame->uc.uc_sigmask, sizeof(compat_set)))
+ if (get_compat_sigset(&set, &compat_frame->uc.uc_sigmask))
goto give_sigsegv;
- sigset_32to64(&set,&compat_set);
} else
#endif
{
@@ -238,7 +236,6 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs,
int err = 0;
#ifdef CONFIG_64BIT
struct compat_rt_sigframe __user * compat_frame;
- compat_sigset_t compat_set;
#endif
usp = (regs->gr[30] & ~(0x01UL));
@@ -261,8 +258,8 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs,
DBG(1,"setup_rt_frame: frame->uc.uc_mcontext = 0x%p\n", &compat_frame->uc.uc_mcontext);
err |= setup_sigcontext32(&compat_frame->uc.uc_mcontext,
&compat_frame->regs, regs, in_syscall);
- sigset_64to32(&compat_set,set);
- err |= __copy_to_user(&compat_frame->uc.uc_sigmask, &compat_set, sizeof(compat_set));
+ err |= put_compat_sigset(&compat_frame->uc.uc_sigmask, set,
+ sizeof(compat_sigset_t));
} else
#endif
{
diff --git a/arch/parisc/kernel/signal32.c b/arch/parisc/kernel/signal32.c
index 9e0cb6a577d6..41afa9cd1f55 100644
--- a/arch/parisc/kernel/signal32.c
+++ b/arch/parisc/kernel/signal32.c
@@ -46,19 +46,6 @@
#define DBG(LEVEL, ...)
#endif
-inline void
-sigset_32to64(sigset_t *s64, compat_sigset_t *s32)
-{
- s64->sig[0] = s32->sig[0] | ((unsigned long)s32->sig[1] << 32);
-}
-
-inline void
-sigset_64to32(compat_sigset_t *s32, sigset_t *s64)
-{
- s32->sig[0] = s64->sig[0] & 0xffffffffUL;
- s32->sig[1] = (s64->sig[0] >> 32) & 0xffffffffUL;
-}
-
long
restore_sigcontext32(struct compat_sigcontext __user *sc, struct compat_regfile __user * rf,
struct pt_regs *regs)
diff --git a/arch/parisc/kernel/signal32.h b/arch/parisc/kernel/signal32.h
index af51d4ccee42..719e7417732c 100644
--- a/arch/parisc/kernel/signal32.h
+++ b/arch/parisc/kernel/signal32.h
@@ -79,8 +79,6 @@ struct compat_rt_sigframe {
#define FUNCTIONCALLFRAME32 48
#define PARISC_RT_SIGFRAME_SIZE32 (((sizeof(struct compat_rt_sigframe) + FUNCTIONCALLFRAME32) + SIGFRAME32) & -SIGFRAME32)
-void sigset_32to64(sigset_t *s64, compat_sigset_t *s32);
-void sigset_64to32(compat_sigset_t *s32, sigset_t *s64);
long restore_sigcontext32(struct compat_sigcontext __user *sc,
struct compat_regfile __user *rf,
struct pt_regs *regs);
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index 41e60a9c7db2..e775f80ae28c 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -690,15 +690,15 @@ cas_action:
/* ELF32 Process entry path */
lws_compare_and_swap_2:
#ifdef CONFIG_64BIT
- /* Clip the input registers */
+ /* Clip the input registers. We don't need to clip %r23 as we
+ only use it for word operations */
depdi 0, 31, 32, %r26
depdi 0, 31, 32, %r25
depdi 0, 31, 32, %r24
- depdi 0, 31, 32, %r23
#endif
/* Check the validity of the size pointer */
- subi,>>= 4, %r23, %r0
+ subi,>>= 3, %r23, %r0
b,n lws_exit_nosys
/* Jump to the functions which will load the old and new values into
diff --git a/arch/parisc/kernel/topology.c b/arch/parisc/kernel/topology.c
index f5159381fdd6..0a10e4ddc528 100644
--- a/arch/parisc/kernel/topology.c
+++ b/arch/parisc/kernel/topology.c
@@ -1,37 +1,142 @@
/*
- * arch/parisc/kernel/topology.c - Populate sysfs with topology information
+ * arch/parisc/kernel/topology.c
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * Copyright (C) 2017 Helge Deller <deller@gmx.de>
*
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
+ * based on arch/arm/kernel/topology.c
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
*/
-#include <linux/init.h>
-#include <linux/smp.h>
-#include <linux/cpu.h>
-#include <linux/cache.h>
+#include <linux/percpu.h>
+#include <linux/sched.h>
+#include <linux/sched/topology.h>
-static DEFINE_PER_CPU(struct cpu, cpu_devices);
+#include <asm/topology.h>
-static int __init topology_init(void)
+ /*
+ * cpu topology table
+ */
+struct cputopo_parisc cpu_topology[NR_CPUS] __read_mostly;
+EXPORT_SYMBOL_GPL(cpu_topology);
+
+const struct cpumask *cpu_coregroup_mask(int cpu)
{
- int num;
+ return &cpu_topology[cpu].core_sibling;
+}
+
+static void update_siblings_masks(unsigned int cpuid)
+{
+ struct cputopo_parisc *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
+ int cpu;
+
+ /* update core and thread sibling masks */
+ for_each_possible_cpu(cpu) {
+ cpu_topo = &cpu_topology[cpu];
+
+ if (cpuid_topo->socket_id != cpu_topo->socket_id)
+ continue;
+
+ cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
+ if (cpu != cpuid)
+ cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
+
+ if (cpuid_topo->core_id != cpu_topo->core_id)
+ continue;
+
+ cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
+ if (cpu != cpuid)
+ cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
+ }
+ smp_wmb();
+}
+
+static int dualcores_found __initdata;
+
+/*
+ * store_cpu_topology is called at boot when only one cpu is running
+ * and with the mutex cpu_hotplug.lock locked, when several cpus have booted,
+ * which prevents simultaneous write access to cpu_topology array
+ */
+void __init store_cpu_topology(unsigned int cpuid)
+{
+ struct cputopo_parisc *cpuid_topo = &cpu_topology[cpuid];
+ struct cpuinfo_parisc *p;
+ int max_socket = -1;
+ unsigned long cpu;
+
+ /* If the cpu topology has been already set, just return */
+ if (cpuid_topo->core_id != -1)
+ return;
- for_each_present_cpu(num) {
- register_cpu(&per_cpu(cpu_devices, num), num);
+ /* create cpu topology mapping */
+ cpuid_topo->thread_id = -1;
+ cpuid_topo->core_id = 0;
+
+ p = &per_cpu(cpu_data, cpuid);
+ for_each_online_cpu(cpu) {
+ const struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
+
+ if (cpu == cpuid) /* ignore current cpu */
+ continue;
+
+ if (cpuinfo->cpu_loc == p->cpu_loc) {
+ cpuid_topo->core_id = cpu_topology[cpu].core_id;
+ if (p->cpu_loc) {
+ cpuid_topo->core_id++;
+ cpuid_topo->socket_id = cpu_topology[cpu].socket_id;
+ dualcores_found = 1;
+ continue;
+ }
+ }
+
+ if (cpuid_topo->socket_id == -1)
+ max_socket = max(max_socket, cpu_topology[cpu].socket_id);
}
- return 0;
+
+ if (cpuid_topo->socket_id == -1)
+ cpuid_topo->socket_id = max_socket + 1;
+
+ update_siblings_masks(cpuid);
+
+ pr_info("CPU%u: thread %d, cpu %d, socket %d\n",
+ cpuid, cpu_topology[cpuid].thread_id,
+ cpu_topology[cpuid].core_id,
+ cpu_topology[cpuid].socket_id);
}
-subsys_initcall(topology_init);
+static struct sched_domain_topology_level parisc_mc_topology[] = {
+#ifdef CONFIG_SCHED_MC
+ { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
+#endif
+
+ { cpu_cpu_mask, SD_INIT_NAME(DIE) },
+ { NULL, },
+};
+
+/*
+ * init_cpu_topology is called at boot when only one cpu is running
+ * which prevent simultaneous write access to cpu_topology array
+ */
+void __init init_cpu_topology(void)
+{
+ unsigned int cpu;
+
+ /* init core mask and capacity */
+ for_each_possible_cpu(cpu) {
+ struct cputopo_parisc *cpu_topo = &(cpu_topology[cpu]);
+
+ cpu_topo->thread_id = -1;
+ cpu_topo->core_id = -1;
+ cpu_topo->socket_id = -1;
+ cpumask_clear(&cpu_topo->core_sibling);
+ cpumask_clear(&cpu_topo->thread_sibling);
+ }
+ smp_wmb();
+
+ /* Set scheduler topology descriptor */
+ if (dualcores_found)
+ set_sched_topology(parisc_mc_topology);
+}
diff --git a/arch/powerpc/include/asm/compat.h b/arch/powerpc/include/asm/compat.h
index a035b1e5dfa7..8a2aecfe9b02 100644
--- a/arch/powerpc/include/asm/compat.h
+++ b/arch/powerpc/include/asm/compat.h
@@ -185,7 +185,6 @@ typedef struct compat_siginfo {
} compat_siginfo_t;
#define COMPAT_OFF_T_MAX 0x7fffffff
-#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL
/*
* A pointer passed in from user mode. This should not
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 16d16583cf11..9ffd73296f64 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -94,40 +94,13 @@
*/
static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
{
- compat_sigset_t cset;
-
- switch (_NSIG_WORDS) {
- case 4: cset.sig[6] = set->sig[3] & 0xffffffffull;
- cset.sig[7] = set->sig[3] >> 32;
- case 3: cset.sig[4] = set->sig[2] & 0xffffffffull;
- cset.sig[5] = set->sig[2] >> 32;
- case 2: cset.sig[2] = set->sig[1] & 0xffffffffull;
- cset.sig[3] = set->sig[1] >> 32;
- case 1: cset.sig[0] = set->sig[0] & 0xffffffffull;
- cset.sig[1] = set->sig[0] >> 32;
- }
- return copy_to_user(uset, &cset, sizeof(*uset));
+ return put_compat_sigset(uset, set, sizeof(*uset));
}
static inline int get_sigset_t(sigset_t *set,
const compat_sigset_t __user *uset)
{
- compat_sigset_t s32;
-
- if (copy_from_user(&s32, uset, sizeof(*uset)))
- return -EFAULT;
-
- /*
- * Swap the 2 words of the 64-bit sigset_t (they are stored
- * in the "wrong" endian in 32-bit user storage).
- */
- switch (_NSIG_WORDS) {
- case 4: set->sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32);
- case 3: set->sig[2] = s32.sig[4] | (((long)s32.sig[5]) << 32);
- case 2: set->sig[1] = s32.sig[2] | (((long)s32.sig[3]) << 32);
- case 1: set->sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32);
- }
- return 0;
+ return get_compat_sigset(set, uset);
}
#define to_user_ptr(p) ptr_to_compat(p)
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 863a62a6de3c..829c67986db7 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -148,6 +148,7 @@ config S390
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER
select HAVE_FUTEX_CMPXCHG if FUTEX
+ select HAVE_GCC_PLUGINS
select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZ4
@@ -158,6 +159,8 @@ config S390
select HAVE_KRETPROBES
select HAVE_KVM
select HAVE_LIVEPATCH
+ select HAVE_PERF_REGS
+ select HAVE_PERF_USER_STACK_DUMP
select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_MEMBLOCK_PHYS_MAP
@@ -538,22 +541,6 @@ config ARCH_RANDOM
If unsure, say Y.
-config ALTERNATIVES
- def_bool y
- prompt "Patch optimized instructions for running CPU type"
- help
- When enabled the kernel code is compiled with additional
- alternative instructions blocks optimized for newer CPU types.
- These alternative instructions blocks are patched at kernel boot
- time when running CPU supports them. This mechanism is used to
- optimize some critical code paths (i.e. spinlocks) for newer CPUs
- even if kernel is build to support older machine generations.
-
- This mechanism could be disabled by appending "noaltinstr"
- option to the kernel command line.
-
- If unsure, say Y.
-
endmenu
menu "Memory setup"
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig
index 84eccc88c065..5af8458951cf 100644
--- a/arch/s390/configs/default_defconfig
+++ b/arch/s390/configs/default_defconfig
@@ -629,6 +629,7 @@ CONFIG_STACK_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_FUNCTION_PROFILER=y
CONFIG_HIST_TRIGGERS=y
+CONFIG_DMA_API_DEBUG=y
CONFIG_LKDTM=m
CONFIG_TEST_LIST_SORT=y
CONFIG_TEST_SORT=y
@@ -637,14 +638,12 @@ CONFIG_RBTREE_TEST=y
CONFIG_INTERVAL_TREE_TEST=m
CONFIG_PERCPU_TEST=m
CONFIG_ATOMIC64_SELFTEST=y
-CONFIG_DMA_API_DEBUG=y
CONFIG_TEST_BPF=m
CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_S390_PTDUMP=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
-CONFIG_HARDENED_USERCOPY=y
CONFIG_FORTIFY_SOURCE=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SELINUX_BOOTPARAM=y
@@ -660,13 +659,11 @@ CONFIG_CRYPTO_PCRYPT=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_GCM=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
-CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m
diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig
index f7202358e6d7..d52eafe57ae8 100644
--- a/arch/s390/configs/gcov_defconfig
+++ b/arch/s390/configs/gcov_defconfig
@@ -587,7 +587,6 @@ CONFIG_BIG_KEYS=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
-CONFIG_HARDENED_USERCOPY=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SELINUX_BOOTPARAM=y
CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
@@ -605,13 +604,10 @@ CONFIG_CRYPTO_PCRYPT=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
-CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
index 03100fe74ea8..20ed149e1137 100644
--- a/arch/s390/configs/performance_defconfig
+++ b/arch/s390/configs/performance_defconfig
@@ -585,7 +585,6 @@ CONFIG_BIG_KEYS=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
-CONFIG_HARDENED_USERCOPY=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SELINUX_BOOTPARAM=y
CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
@@ -603,13 +602,10 @@ CONFIG_CRYPTO_PCRYPT=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
-CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m
diff --git a/arch/s390/include/asm/alternative.h b/arch/s390/include/asm/alternative.h
index 6c268f6a51d3..a72002056b54 100644
--- a/arch/s390/include/asm/alternative.h
+++ b/arch/s390/include/asm/alternative.h
@@ -15,14 +15,9 @@ struct alt_instr {
u8 replacementlen; /* length of new instruction */
} __packed;
-#ifdef CONFIG_ALTERNATIVES
-extern void apply_alternative_instructions(void);
-extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
-#else
-static inline void apply_alternative_instructions(void) {};
-static inline void apply_alternatives(struct alt_instr *start,
- struct alt_instr *end) {};
-#endif
+void apply_alternative_instructions(void);
+void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
+
/*
* |661: |662: |6620 |663:
* +-----------+---------------------+
@@ -109,7 +104,6 @@ static inline void apply_alternatives(struct alt_instr *start,
b_altinstr(num)":\n\t" altinstr "\n" e_altinstr(num) ":\n" \
INSTR_LEN_SANITY_CHECK(altinstr_len(num))
-#ifdef CONFIG_ALTERNATIVES
/* alternative assembly primitive: */
#define ALTERNATIVE(oldinstr, altinstr, facility) \
".pushsection .altinstr_replacement, \"ax\"\n" \
@@ -130,14 +124,6 @@ static inline void apply_alternatives(struct alt_instr *start,
ALTINSTR_ENTRY(facility1, 1) \
ALTINSTR_ENTRY(facility2, 2) \
".popsection\n"
-#else
-/* Alternative instructions are disabled, let's put just oldinstr in */
-#define ALTERNATIVE(oldinstr, altinstr, facility) \
- oldinstr "\n"
-
-#define ALTERNATIVE_2(oldinstr, altinstr1, facility1, altinstr2, facility2) \
- oldinstr "\n"
-#endif
/*
* Alternative instructions for different CPU types or capabilities.
diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h
index 1b60eb3676d5..5e6a63641a5f 100644
--- a/arch/s390/include/asm/compat.h
+++ b/arch/s390/include/asm/compat.h
@@ -263,7 +263,6 @@ typedef struct compat_siginfo {
#define si_overrun _sifields._timer._overrun
#define COMPAT_OFF_T_MAX 0x7fffffff
-#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL
/*
* A pointer passed in from user mode. This should not
diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h
index 05480e4cc5ca..792cda339af1 100644
--- a/arch/s390/include/asm/cpu_mf.h
+++ b/arch/s390/include/asm/cpu_mf.h
@@ -144,6 +144,12 @@ struct hws_trailer_entry {
unsigned long long progusage2; /* */
} __packed;
+/* Load program parameter */
+static inline void lpp(void *pp)
+{
+ asm volatile(".insn s,0xb2800000,0(%0)\n":: "a" (pp) : "memory");
+}
+
/* Query counter information */
static inline int qctri(struct cpumf_ctr_info *info)
{
@@ -167,7 +173,7 @@ static inline int lcctl(u64 ctl)
" .insn s,0xb2840000,%1\n"
" ipm %0\n"
" srl %0,28\n"
- : "=d" (cc) : "m" (ctl) : "cc");
+ : "=d" (cc) : "Q" (ctl) : "cc");
return cc;
}
diff --git a/arch/s390/include/asm/futex.h b/arch/s390/include/asm/futex.h
index 9b5a3469fed9..5e97a4353147 100644
--- a/arch/s390/include/asm/futex.h
+++ b/arch/s390/include/asm/futex.h
@@ -26,9 +26,9 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
u32 __user *uaddr)
{
int oldval = 0, newval, ret;
+ mm_segment_t old_fs;
- load_kernel_asce();
-
+ old_fs = enable_sacf_uaccess();
pagefault_disable();
switch (op) {
case FUTEX_OP_SET:
@@ -55,6 +55,7 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
ret = -ENOSYS;
}
pagefault_enable();
+ disable_sacf_uaccess(old_fs);
if (!ret)
*oval = oldval;
@@ -65,9 +66,10 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{
+ mm_segment_t old_fs;
int ret;
- load_kernel_asce();
+ old_fs = enable_sacf_uaccess();
asm volatile(
" sacf 256\n"
"0: cs %1,%4,0(%5)\n"
@@ -77,6 +79,7 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
: "=d" (ret), "+d" (oldval), "=m" (*uaddr)
: "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
: "cc", "memory");
+ disable_sacf_uaccess(old_fs);
*uval = oldval;
return ret;
}
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 9eb36a1592c7..ec6592e8ba36 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -115,33 +115,28 @@ struct lowcore {
/* Address space pointer. */
__u64 kernel_asce; /* 0x0378 */
__u64 user_asce; /* 0x0380 */
+ __u64 vdso_asce; /* 0x0388 */
/*
* The lpp and current_pid fields form a
* 64-bit value that is set as program
* parameter with the LPP instruction.
*/
- __u32 lpp; /* 0x0388 */
- __u32 current_pid; /* 0x038c */
+ __u32 lpp; /* 0x0390 */
+ __u32 current_pid; /* 0x0394 */
/* SMP info area */
- __u32 cpu_nr; /* 0x0390 */
- __u32 softirq_pending; /* 0x0394 */
- __u64 percpu_offset; /* 0x0398 */
- __u64 vdso_per_cpu_data; /* 0x03a0 */
- __u64 machine_flags; /* 0x03a8 */
- __u32 preempt_count; /* 0x03b0 */
- __u8 pad_0x03b4[0x03b8-0x03b4]; /* 0x03b4 */
- __u64 gmap; /* 0x03b8 */
- __u32 spinlock_lockval; /* 0x03c0 */
- __u32 spinlock_index; /* 0x03c4 */
- __u32 fpu_flags; /* 0x03c8 */
- __u8 pad_0x03cc[0x0400-0x03cc]; /* 0x03cc */
-
- /* Per cpu primary space access list */
- __u32 paste[16]; /* 0x0400 */
-
- __u8 pad_0x04c0[0x0e00-0x0440]; /* 0x0440 */
+ __u32 cpu_nr; /* 0x0398 */
+ __u32 softirq_pending; /* 0x039c */
+ __u32 preempt_count; /* 0x03a0 */
+ __u32 spinlock_lockval; /* 0x03a4 */
+ __u32 spinlock_index; /* 0x03a8 */
+ __u32 fpu_flags; /* 0x03ac */
+ __u64 percpu_offset; /* 0x03b0 */
+ __u64 vdso_per_cpu_data; /* 0x03b8 */
+ __u64 machine_flags; /* 0x03c0 */
+ __u64 gmap; /* 0x03c8 */
+ __u8 pad_0x03d0[0x0e00-0x03d0]; /* 0x03d0 */
/*
* 0xe00 contains the address of the IPL Parameter Information
@@ -193,14 +188,14 @@ extern struct lowcore *lowcore_ptr[];
static inline void set_prefix(__u32 address)
{
- asm volatile("spx %0" : : "m" (address) : "memory");
+ asm volatile("spx %0" : : "Q" (address) : "memory");
}
static inline __u32 store_prefix(void)
{
__u32 address;
- asm volatile("stpx %0" : "=m" (address));
+ asm volatile("stpx %0" : "=Q" (address));
return address;
}
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index cf4c1cb17dcd..f4a07f788f78 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -73,41 +73,38 @@ static inline int init_new_context(struct task_struct *tsk,
static inline void set_user_asce(struct mm_struct *mm)
{
S390_lowcore.user_asce = mm->context.asce;
- if (current->thread.mm_segment.ar4)
- __ctl_load(S390_lowcore.user_asce, 7, 7);
- set_cpu_flag(CIF_ASCE_PRIMARY);
+ __ctl_load(S390_lowcore.user_asce, 1, 1);
+ clear_cpu_flag(CIF_ASCE_PRIMARY);
}
static inline void clear_user_asce(void)
{
S390_lowcore.user_asce = S390_lowcore.kernel_asce;
-
- __ctl_load(S390_lowcore.user_asce, 1, 1);
- __ctl_load(S390_lowcore.user_asce, 7, 7);
-}
-
-static inline void load_kernel_asce(void)
-{
- unsigned long asce;
-
- __ctl_store(asce, 1, 1);
- if (asce != S390_lowcore.kernel_asce)
- __ctl_load(S390_lowcore.kernel_asce, 1, 1);
+ __ctl_load(S390_lowcore.kernel_asce, 1, 1);
set_cpu_flag(CIF_ASCE_PRIMARY);
}
+mm_segment_t enable_sacf_uaccess(void);
+void disable_sacf_uaccess(mm_segment_t old_fs);
+
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
int cpu = smp_processor_id();
- S390_lowcore.user_asce = next->context.asce;
if (prev == next)
return;
+ S390_lowcore.user_asce = next->context.asce;
cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
- /* Clear old ASCE by loading the kernel ASCE. */
- __ctl_load(S390_lowcore.kernel_asce, 1, 1);
- __ctl_load(S390_lowcore.kernel_asce, 7, 7);
+ /* Clear previous user-ASCE from CR1 and CR7 */
+ if (!test_cpu_flag(CIF_ASCE_PRIMARY)) {
+ __ctl_load(S390_lowcore.kernel_asce, 1, 1);
+ set_cpu_flag(CIF_ASCE_PRIMARY);
+ }
+ if (test_cpu_flag(CIF_ASCE_SECONDARY)) {
+ __ctl_load(S390_lowcore.vdso_asce, 7, 7);
+ clear_cpu_flag(CIF_ASCE_SECONDARY);
+ }
cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
}
@@ -117,7 +114,6 @@ static inline void finish_arch_post_lock_switch(void)
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm;
- load_kernel_asce();
if (mm) {
preempt_disable();
while (atomic_read(&mm->context.flush_count))
diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h
index 79aa6421fedb..d6c9d1e0dc2d 100644
--- a/arch/s390/include/asm/perf_event.h
+++ b/arch/s390/include/asm/perf_event.h
@@ -64,27 +64,10 @@ struct perf_sf_sde_regs {
#define REG_OVERFLOW 1
#define OVERFLOW_REG(hwc) ((hwc)->extra_reg.config)
#define SFB_ALLOC_REG(hwc) ((hwc)->extra_reg.alloc)
-#define RAWSAMPLE_REG(hwc) ((hwc)->config)
#define TEAR_REG(hwc) ((hwc)->last_tag)
#define SAMPL_RATE(hwc) ((hwc)->event_base)
#define SAMPL_FLAGS(hwc) ((hwc)->config_base)
#define SAMPL_DIAG_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_DIAG_MODE)
#define SDB_FULL_BLOCKS(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_FULL_BLOCKS)
-/* Structure for sampling data entries to be passed as perf raw sample data
- * to user space. Note that raw sample data must be aligned and, thus, might
- * be padded with zeros.
- */
-struct sf_raw_sample {
-#define SF_RAW_SAMPLE_BASIC PERF_CPUM_SF_BASIC_MODE
-#define SF_RAW_SAMPLE_DIAG PERF_CPUM_SF_DIAG_MODE
- u64 format;
- u32 size; /* Size of sf_raw_sample */
- u16 bsdes; /* Basic-sampling data entry size */
- u16 dsdes; /* Diagnostic-sampling data entry size */
- struct hws_basic_entry basic; /* Basic-sampling data entry */
- struct hws_diag_entry diag; /* Diagnostic-sampling data entry */
- u8 padding[]; /* Padding to next multiple of 8 */
-} __packed;
-
#endif /* _ASM_S390_PERF_EVENT_H */
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index f25bfe888933..bfbfad482289 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -109,9 +109,7 @@ extern void execve_tail(void);
#define HAVE_ARCH_PICK_MMAP_LAYOUT
-typedef struct {
- __u32 ar4;
-} mm_segment_t;
+typedef unsigned int mm_segment_t;
/*
* Thread structure
@@ -247,7 +245,7 @@ static inline unsigned short stap(void)
{
unsigned short cpu_address;
- asm volatile("stap %0" : "=m" (cpu_address));
+ asm volatile("stap %0" : "=Q" (cpu_address));
return cpu_address;
}
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index 2f84e77f1f1b..a3788dafc0e1 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -13,10 +13,12 @@
#define PIF_SYSCALL 0 /* inside a system call */
#define PIF_PER_TRAP 1 /* deliver sigtrap on return to user */
#define PIF_SYSCALL_RESTART 2 /* restart the current system call */
+#define PIF_GUEST_FAULT 3 /* indicates program check in sie64a */
#define _PIF_SYSCALL _BITUL(PIF_SYSCALL)
#define _PIF_PER_TRAP _BITUL(PIF_PER_TRAP)
#define _PIF_SYSCALL_RESTART _BITUL(PIF_SYSCALL_RESTART)
+#define _PIF_GUEST_FAULT _BITUL(PIF_GUEST_FAULT)
#ifndef __ASSEMBLY__
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 8bc87dcb10eb..2eb0c8a7b664 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -36,7 +36,7 @@
#define MACHINE_FLAG_SCC _BITUL(17)
#define LPP_MAGIC _BITUL(31)
-#define LPP_PFAULT_PID_MASK _AC(0xffffffff, UL)
+#define LPP_PID_MASK _AC(0xffffffff, UL)
#ifndef __ASSEMBLY__
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index cdd0f0d999e2..ad6b91013a05 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -16,7 +16,7 @@
#include <asm/processor.h>
#include <asm/ctl_reg.h>
#include <asm/extable.h>
-
+#include <asm/facility.h>
/*
* The fs value determines whether argument validity checking should be
@@ -26,27 +26,16 @@
* For historical reasons, these macros are grossly misnamed.
*/
-#define MAKE_MM_SEG(a) ((mm_segment_t) { (a) })
-
-
-#define KERNEL_DS MAKE_MM_SEG(0)
-#define USER_DS MAKE_MM_SEG(1)
+#define KERNEL_DS (0)
+#define KERNEL_DS_SACF (1)
+#define USER_DS (2)
+#define USER_DS_SACF (3)
#define get_ds() (KERNEL_DS)
#define get_fs() (current->thread.mm_segment)
-#define segment_eq(a,b) ((a).ar4 == (b).ar4)
+#define segment_eq(a,b) (((a) & 2) == ((b) & 2))
-static inline void set_fs(mm_segment_t fs)
-{
- current->thread.mm_segment = fs;
- if (uaccess_kernel()) {
- set_cpu_flag(CIF_ASCE_SECONDARY);
- __ctl_load(S390_lowcore.kernel_asce, 7, 7);
- } else {
- clear_cpu_flag(CIF_ASCE_SECONDARY);
- __ctl_load(S390_lowcore.user_asce, 7, 7);
- }
-}
+void set_fs(mm_segment_t fs);
static inline int __range_ok(unsigned long addr, unsigned long size)
{
@@ -95,7 +84,7 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n);
static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
{
- unsigned long spec = 0x810000UL;
+ unsigned long spec = 0x010000UL;
int rc;
switch (size) {
@@ -125,7 +114,7 @@ static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
{
- unsigned long spec = 0x81UL;
+ unsigned long spec = 0x01UL;
int rc;
switch (size) {
diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h
index ae6261ef97d5..169d7604eb80 100644
--- a/arch/s390/include/asm/vdso.h
+++ b/arch/s390/include/asm/vdso.h
@@ -46,6 +46,7 @@ struct vdso_per_cpu_data {
};
extern struct vdso_data *vdso_data;
+extern struct vdso_data boot_vdso_data;
void vdso_alloc_boot_cpu(struct lowcore *lowcore);
int vdso_alloc_per_cpu(struct lowcore *lowcore);
diff --git a/arch/s390/include/uapi/asm/perf_regs.h b/arch/s390/include/uapi/asm/perf_regs.h
new file mode 100644
index 000000000000..7c8564f98205
--- /dev/null
+++ b/arch/s390/include/uapi/asm/perf_regs.h
@@ -0,0 +1,43 @@
+#ifndef _ASM_S390_PERF_REGS_H
+#define _ASM_S390_PERF_REGS_H
+
+enum perf_event_s390_regs {
+ PERF_REG_S390_R0,
+ PERF_REG_S390_R1,
+ PERF_REG_S390_R2,
+ PERF_REG_S390_R3,
+ PERF_REG_S390_R4,
+ PERF_REG_S390_R5,
+ PERF_REG_S390_R6,
+ PERF_REG_S390_R7,
+ PERF_REG_S390_R8,
+ PERF_REG_S390_R9,
+ PERF_REG_S390_R10,
+ PERF_REG_S390_R11,
+ PERF_REG_S390_R12,
+ PERF_REG_S390_R13,
+ PERF_REG_S390_R14,
+ PERF_REG_S390_R15,
+ PERF_REG_S390_FP0,
+ PERF_REG_S390_FP1,
+ PERF_REG_S390_FP2,
+ PERF_REG_S390_FP3,
+ PERF_REG_S390_FP4,
+ PERF_REG_S390_FP5,
+ PERF_REG_S390_FP6,
+ PERF_REG_S390_FP7,
+ PERF_REG_S390_FP8,
+ PERF_REG_S390_FP9,
+ PERF_REG_S390_FP10,
+ PERF_REG_S390_FP11,
+ PERF_REG_S390_FP12,
+ PERF_REG_S390_FP13,
+ PERF_REG_S390_FP14,
+ PERF_REG_S390_FP15,
+ PERF_REG_S390_MASK,
+ PERF_REG_S390_PC,
+
+ PERF_REG_S390_MAX
+};
+
+#endif /* _ASM_S390_PERF_REGS_H */
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 83bc82001c06..909bce65cb2b 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -59,7 +59,7 @@ obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o als.o
obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o
-obj-y += entry.o reipl.o relocate_kernel.o kdebugfs.o
+obj-y += entry.o reipl.o relocate_kernel.o kdebugfs.o alternative.o
extra-y += head.o head64.o vmlinux.lds
@@ -77,10 +77,9 @@ obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_UPROBES) += uprobes.o
-obj-$(CONFIG_ALTERNATIVES) += alternative.o
obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o perf_cpum_sf.o
-obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf_events.o
+obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf_events.o perf_regs.o
obj-$(CONFIG_TRACEPOINTS) += trace.o
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 33ec80df7ed4..587b195b588d 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -171,6 +171,7 @@ int main(void)
OFFSET(__LC_RESTART_DATA, lowcore, restart_data);
OFFSET(__LC_RESTART_SOURCE, lowcore, restart_source);
OFFSET(__LC_USER_ASCE, lowcore, user_asce);
+ OFFSET(__LC_VDSO_ASCE, lowcore, vdso_asce);
OFFSET(__LC_LPP, lowcore, lpp);
OFFSET(__LC_CURRENT_PID, lowcore, current_pid);
OFFSET(__LC_PERCPU_OFFSET, lowcore, percpu_offset);
@@ -178,7 +179,6 @@ int main(void)
OFFSET(__LC_MACHINE_FLAGS, lowcore, machine_flags);
OFFSET(__LC_PREEMPT_COUNT, lowcore, preempt_count);
OFFSET(__LC_GMAP, lowcore, gmap);
- OFFSET(__LC_PASTE, lowcore, paste);
/* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */
OFFSET(__LC_DUMP_REIPL, lowcore, ipib);
/* hardware defined lowcore locations 0x1000 - 0x18ff */
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index a4a1208e3df3..ef246940b44c 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -50,19 +50,6 @@ typedef struct
struct ucontext32 uc;
} rt_sigframe32;
-static inline void sigset_to_sigset32(unsigned long *set64,
- compat_sigset_word *set32)
-{
- set32[0] = (compat_sigset_word) set64[0];
- set32[1] = (compat_sigset_word)(set64[0] >> 32);
-}
-
-static inline void sigset32_to_sigset(compat_sigset_word *set32,
- unsigned long *set64)
-{
- set64[0] = (unsigned long) set32[0] | ((unsigned long) set32[1] << 32);
-}
-
int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
{
int err;
@@ -294,12 +281,10 @@ COMPAT_SYSCALL_DEFINE0(sigreturn)
{
struct pt_regs *regs = task_pt_regs(current);
sigframe32 __user *frame = (sigframe32 __user *)regs->gprs[15];
- compat_sigset_t cset;
sigset_t set;
- if (__copy_from_user(&cset.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE32))
+ if (get_compat_sigset(&set, (compat_sigset_t __user *)frame->sc.oldmask))
goto badframe;
- sigset32_to_sigset(cset.sig, set.sig);
set_current_blocked(&set);
save_fpu_regs();
if (restore_sigregs32(regs, &frame->sregs))
@@ -317,12 +302,10 @@ COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
{
struct pt_regs *regs = task_pt_regs(current);
rt_sigframe32 __user *frame = (rt_sigframe32 __user *)regs->gprs[15];
- compat_sigset_t cset;
sigset_t set;
- if (__copy_from_user(&cset, &frame->uc.uc_sigmask, sizeof(cset)))
+ if (get_compat_sigset(&set, &frame->uc.uc_sigmask))
goto badframe;
- sigset32_to_sigset(cset.sig, set.sig);
set_current_blocked(&set);
if (compat_restore_altstack(&frame->uc.uc_stack))
goto badframe;
@@ -372,7 +355,6 @@ static int setup_frame32(struct ksignal *ksig, sigset_t *set,
{
int sig = ksig->sig;
sigframe32 __user *frame;
- struct sigcontext32 sc;
unsigned long restorer;
size_t frame_size;
@@ -394,9 +376,10 @@ static int setup_frame32(struct ksignal *ksig, sigset_t *set,
return -EFAULT;
/* Create struct sigcontext32 on the signal stack */
- sigset_to_sigset32(set->sig, sc.oldmask);
- sc.sregs = (__u32)(unsigned long __force) &frame->sregs;
- if (__copy_to_user(&frame->sc, &sc, sizeof(frame->sc)))
+ if (put_compat_sigset((compat_sigset_t __user *)frame->sc.oldmask,
+ set, sizeof(compat_sigset_t)))
+ return -EFAULT;
+ if (__put_user(ptr_to_compat(&frame->sc), &frame->sc.sregs))
return -EFAULT;
/* Store registers needed to create the signal frame */
@@ -455,7 +438,6 @@ static int setup_frame32(struct ksignal *ksig, sigset_t *set,
static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set,
struct pt_regs *regs)
{
- compat_sigset_t cset;
rt_sigframe32 __user *frame;
unsigned long restorer;
size_t frame_size;
@@ -502,12 +484,11 @@ static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set,
store_sigregs();
/* Create ucontext on the signal stack. */
- sigset_to_sigset32(set->sig, cset.sig);
if (__put_user(uc_flags, &frame->uc.uc_flags) ||
__put_user(0, &frame->uc.uc_link) ||
__compat_save_altstack(&frame->uc.uc_stack, regs->gprs[15]) ||
save_sigregs32(regs, &frame->uc.uc_mcontext) ||
- __copy_to_user(&frame->uc.uc_sigmask, &cset, sizeof(cset)) ||
+ put_compat_sigset(&frame->uc.uc_sigmask, set, sizeof(compat_sigset_t)) ||
save_sigregs_ext32(regs, &frame->uc.uc_mcontext_ext))
return -EFAULT;
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index b811d3a8417d..3be829721cf9 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -480,7 +480,7 @@ void show_code(struct pt_regs *regs)
{
char *mode = user_mode(regs) ? "User" : "Krnl";
unsigned char code[64];
- char buffer[64], *ptr;
+ char buffer[128], *ptr;
mm_segment_t old_fs;
unsigned long addr;
int start, end, opsize, hops, i;
@@ -543,7 +543,7 @@ void show_code(struct pt_regs *regs)
start += opsize;
pr_cont("%s", buffer);
ptr = buffer;
- ptr += sprintf(ptr, "\n ");
+ ptr += sprintf(ptr, "\n\t ");
hops++;
}
pr_cont("\n");
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index f498d201f98d..a316cd6999ad 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -379,13 +379,21 @@ ENTRY(system_call)
jg s390_handle_mcck # TIF bit will be cleared by handler
#
-# _CIF_ASCE_PRIMARY and/or CIF_ASCE_SECONDARY set, load user space asce
+# _CIF_ASCE_PRIMARY and/or _CIF_ASCE_SECONDARY set, load user space asce
#
.Lsysc_asce:
+ ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_SECONDARY
+ lctlg %c7,%c7,__LC_VDSO_ASCE # load secondary asce
+ TSTMSK __LC_CPU_FLAGS,_CIF_ASCE_PRIMARY
+ jz .Lsysc_return
+#ifndef CONFIG_HAVE_MARCH_Z10_FEATURES
+ tm __LC_STFLE_FAC_LIST+3,0x10 # has MVCOS ?
+ jnz .Lsysc_set_fs_fixup
ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_PRIMARY
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
- TSTMSK __LC_CPU_FLAGS,_CIF_ASCE_SECONDARY
- jz .Lsysc_return
+ j .Lsysc_return
+.Lsysc_set_fs_fixup:
+#endif
larl %r14,.Lsysc_return
jg set_fs_fixup
@@ -518,6 +526,7 @@ ENTRY(pgm_check_handler)
stmg %r8,%r15,__LC_SAVE_AREA_SYNC
lg %r10,__LC_LAST_BREAK
lg %r12,__LC_CURRENT
+ lghi %r11,0
larl %r13,cleanup_critical
lmg %r8,%r9,__LC_PGM_OLD_PSW
tmhh %r8,0x0001 # test problem state bit
@@ -532,6 +541,7 @@ ENTRY(pgm_check_handler)
ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
larl %r9,sie_exit # skip forward to sie_exit
+ lghi %r11,_PIF_GUEST_FAULT
#endif
0: tmhh %r8,0x4000 # PER bit set in old PSW ?
jnz 1f # -> enabled, can't be a double fault
@@ -549,13 +559,14 @@ ENTRY(pgm_check_handler)
jz 3f
mvc __THREAD_trap_tdb(256,%r14),0(%r13)
3: stg %r10,__THREAD_last_break(%r14)
-4: la %r11,STACK_FRAME_OVERHEAD(%r15)
+4: lgr %r13,%r11
+ la %r11,STACK_FRAME_OVERHEAD(%r15)
stmg %r0,%r7,__PT_R0(%r11)
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
stmg %r8,%r9,__PT_PSW(%r11)
mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC
mvc __PT_INT_PARM_LONG(8,%r11),__LC_TRANS_EXC_CODE
- xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
+ stg %r13,__PT_FLAGS(%r11)
stg %r10,__PT_ARGS(%r11)
tm __LC_PGM_ILC+3,0x80 # check for per exception
jz 5f
@@ -738,10 +749,18 @@ ENTRY(io_int_handler)
# _CIF_ASCE_PRIMARY and/or CIF_ASCE_SECONDARY set, load user space asce
#
.Lio_asce:
+ ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_SECONDARY
+ lctlg %c7,%c7,__LC_VDSO_ASCE # load secondary asce
+ TSTMSK __LC_CPU_FLAGS,_CIF_ASCE_PRIMARY
+ jz .Lio_return
+#ifndef CONFIG_HAVE_MARCH_Z10_FEATURES
+ tm __LC_STFLE_FAC_LIST+3,0x10 # has MVCOS ?
+ jnz .Lio_set_fs_fixup
ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_PRIMARY
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
- TSTMSK __LC_CPU_FLAGS,_CIF_ASCE_SECONDARY
- jz .Lio_return
+ j .Lio_return
+.Lio_set_fs_fixup:
+#endif
larl %r14,.Lio_return
jg set_fs_fixup
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index 172002da7075..38a973ccf501 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -28,7 +28,7 @@ ENTRY(startup_continue)
lctlg %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
lg %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area
# move IPL device to lowcore
- lghi %r0,__LC_PASTE
+ larl %r0,boot_vdso_data
stg %r0,__LC_VDSO_PER_CPU
#
# Setup stack
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index 6d9f73bb4142..7b87991416fd 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -433,16 +433,13 @@ int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *s;
char *secstrings;
- if (IS_ENABLED(CONFIG_ALTERNATIVES)) {
- secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
- for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
- if (!strcmp(".altinstructions",
- secstrings + s->sh_name)) {
- /* patch .altinstructions */
- void *aseg = (void *)s->sh_addr;
+ secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+ for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
+ if (!strcmp(".altinstructions", secstrings + s->sh_name)) {
+ /* patch .altinstructions */
+ void *aseg = (void *)s->sh_addr;
- apply_alternatives(aseg, aseg + s->sh_size);
- }
+ apply_alternatives(aseg, aseg + s->sh_size);
}
}
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 3f3cda41f32a..6ff169253cae 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -191,7 +191,6 @@ static int notrace s390_check_registers(union mci mci, int umode)
{
union ctlreg2 cr2;
int kill_task;
- void *fpt_save_area;
kill_task = 0;
@@ -224,7 +223,6 @@ static int notrace s390_check_registers(union mci mci, int umode)
if (!test_cpu_flag(CIF_FPU))
kill_task = 1;
}
- fpt_save_area = &S390_lowcore.floating_pt_save_area;
if (!mci.fc) {
/*
* Floating point control register can't be restored.
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index bd4bbf61aaf3..227b38bd82c9 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -15,6 +15,7 @@
#include <linux/kernel_stat.h>
#include <linux/perf_event.h>
#include <linux/percpu.h>
+#include <linux/pid.h>
#include <linux/notifier.h>
#include <linux/export.h>
#include <linux/slab.h>
@@ -77,6 +78,15 @@ struct sf_buffer {
unsigned long *tail; /* last sample-data-block-table */
};
+struct aux_buffer {
+ struct sf_buffer sfb;
+ unsigned long head; /* index of SDB of buffer head */
+ unsigned long alert_mark; /* index of SDB of alert request position */
+ unsigned long empty_mark; /* mark of SDB not marked full */
+ unsigned long *sdb_index; /* SDB address for fast lookup */
+ unsigned long *sdbt_index; /* SDBT address for fast lookup */
+};
+
struct cpu_hw_sf {
/* CPU-measurement sampling information block */
struct hws_qsi_info_block qsi;
@@ -85,6 +95,7 @@ struct cpu_hw_sf {
struct sf_buffer sfb; /* Sampling buffer */
unsigned int flags; /* Status flags */
struct perf_event *event; /* Scheduled perf event */
+ struct perf_output_handle handle; /* AUX buffer output handle */
};
static DEFINE_PER_CPU(struct cpu_hw_sf, cpu_hw_sf);
@@ -341,22 +352,6 @@ static void sfb_init_allocs(unsigned long num, struct hw_perf_event *hwc)
sfb_account_allocs(num, hwc);
}
-static size_t event_sample_size(struct hw_perf_event *hwc)
-{
- struct sf_raw_sample *sfr = (struct sf_raw_sample *) RAWSAMPLE_REG(hwc);
- size_t sample_size;
-
- /* The sample size depends on the sampling function: The basic-sampling
- * function must be always enabled, diagnostic-sampling function is
- * optional.
- */
- sample_size = sfr->bsdes;
- if (SAMPL_DIAG_MODE(hwc))
- sample_size += sfr->dsdes;
-
- return sample_size;
-}
-
static void deallocate_buffers(struct cpu_hw_sf *cpuhw)
{
if (cpuhw->sfb.sdbt)
@@ -366,35 +361,7 @@ static void deallocate_buffers(struct cpu_hw_sf *cpuhw)
static int allocate_buffers(struct cpu_hw_sf *cpuhw, struct hw_perf_event *hwc)
{
unsigned long n_sdb, freq, factor;
- size_t sfr_size, sample_size;
- struct sf_raw_sample *sfr;
-
- /* Allocate raw sample buffer
- *
- * The raw sample buffer is used to temporarily store sampling data
- * entries for perf raw sample processing. The buffer size mainly
- * depends on the size of diagnostic-sampling data entries which is
- * machine-specific. The exact size calculation includes:
- * 1. The first 4 bytes of diagnostic-sampling data entries are
- * already reflected in the sf_raw_sample structure. Subtract
- * these bytes.
- * 2. The perf raw sample data must be 8-byte aligned (u64) and
- * perf's internal data size must be considered too. So add
- * an additional u32 for correct alignment and subtract before
- * allocating the buffer.
- * 3. Store the raw sample buffer pointer in the perf event
- * hardware structure.
- */
- sfr_size = ALIGN((sizeof(*sfr) - sizeof(sfr->diag) + cpuhw->qsi.dsdes) +
- sizeof(u32), sizeof(u64));
- sfr_size -= sizeof(u32);
- sfr = kzalloc(sfr_size, GFP_KERNEL);
- if (!sfr)
- return -ENOMEM;
- sfr->size = sfr_size;
- sfr->bsdes = cpuhw->qsi.bsdes;
- sfr->dsdes = cpuhw->qsi.dsdes;
- RAWSAMPLE_REG(hwc) = (unsigned long) sfr;
+ size_t sample_size;
/* Calculate sampling buffers using 4K pages
*
@@ -420,7 +387,7 @@ static int allocate_buffers(struct cpu_hw_sf *cpuhw, struct hw_perf_event *hwc)
* ensure a minimum of CPUM_SF_MIN_SDBT (one table can manage up
* to 511 SDBs).
*/
- sample_size = event_sample_size(hwc);
+ sample_size = sizeof(struct hws_basic_entry);
freq = sample_rate_to_freq(&cpuhw->qsi, SAMPL_RATE(hwc));
factor = 1;
n_sdb = DIV_ROUND_UP(freq, factor * ((PAGE_SIZE-64) / sample_size));
@@ -619,10 +586,6 @@ static int reserve_pmc_hardware(void)
static void hw_perf_event_destroy(struct perf_event *event)
{
- /* Free raw sample buffer */
- if (RAWSAMPLE_REG(&event->hw))
- kfree((void *) RAWSAMPLE_REG(&event->hw));
-
/* Release PMC if this is the last perf event */
if (!atomic_add_unless(&num_events, -1, 1)) {
mutex_lock(&pmc_reserve_mutex);
@@ -642,15 +605,8 @@ static void hw_init_period(struct hw_perf_event *hwc, u64 period)
static void hw_reset_registers(struct hw_perf_event *hwc,
unsigned long *sdbt_origin)
{
- struct sf_raw_sample *sfr;
-
/* (Re)set to first sample-data-block-table */
TEAR_REG(hwc) = (unsigned long) sdbt_origin;
-
- /* (Re)set raw sampling buffer register */
- sfr = (struct sf_raw_sample *) RAWSAMPLE_REG(hwc);
- memset(&sfr->basic, 0, sizeof(sfr->basic));
- memset(&sfr->diag, 0, sfr->dsdes);
}
static unsigned long hw_limit_rate(const struct hws_qsi_info_block *si,
@@ -660,6 +616,67 @@ static unsigned long hw_limit_rate(const struct hws_qsi_info_block *si,
si->min_sampl_rate, si->max_sampl_rate);
}
+static u32 cpumsf_pid_type(struct perf_event *event,
+ u32 pid, enum pid_type type)
+{
+ struct task_struct *tsk;
+
+ /* Idle process */
+ if (!pid)
+ goto out;
+
+ tsk = find_task_by_pid_ns(pid, &init_pid_ns);
+ pid = -1;
+ if (tsk) {
+ /*
+ * Only top level events contain the pid namespace in which
+ * they are created.
+ */
+ if (event->parent)
+ event = event->parent;
+ pid = __task_pid_nr_ns(tsk, type, event->ns);
+ /*
+ * See also 1d953111b648
+ * "perf/core: Don't report zero PIDs for exiting tasks".
+ */
+ if (!pid && !pid_alive(tsk))
+ pid = -1;
+ }
+out:
+ return pid;
+}
+
+static void cpumsf_output_event_pid(struct perf_event *event,
+ struct perf_sample_data *data,
+ struct pt_regs *regs)
+{
+ u32 pid;
+ struct perf_event_header header;
+ struct perf_output_handle handle;
+
+ /*
+ * Obtain the PID from the basic-sampling data entry and
+ * correct the data->tid_entry.pid value.
+ */
+ pid = data->tid_entry.pid;
+
+ /* Protect callchain buffers, tasks */
+ rcu_read_lock();
+
+ perf_prepare_sample(&header, data, event, regs);
+ if (perf_output_begin(&handle, event, header.size))
+ goto out;
+
+ /* Update the process ID (see also kernel/events/core.c) */
+ data->tid_entry.pid = cpumsf_pid_type(event, pid, __PIDTYPE_TGID);
+ data->tid_entry.tid = cpumsf_pid_type(event, pid, PIDTYPE_PID);
+
+ perf_output_sample(&handle, &header, data, event);
+ perf_output_end(&handle);
+out:
+ rcu_read_unlock();
+}
+
static int __hw_perf_event_init(struct perf_event *event)
{
struct cpu_hw_sf *cpuhw;
@@ -770,6 +787,10 @@ static int __hw_perf_event_init(struct perf_event *event)
hwc->extra_reg.reg = REG_OVERFLOW;
OVERFLOW_REG(hwc) = 0;
+ /* Use AUX buffer. No need to allocate it by ourself */
+ if (attr->config == PERF_EVENT_CPUM_SF_DIAG)
+ return 0;
+
/* Allocate the per-CPU sampling buffer using the CPU information
* from the event. If the event is not pinned to a particular
* CPU (event->cpu == -1; or cpuhw == NULL), allocate sampling
@@ -789,6 +810,14 @@ static int __hw_perf_event_init(struct perf_event *event)
break;
}
}
+
+ /* If PID/TID sampling is active, replace the default overflow
+ * handler to extract and resolve the PIDs from the basic-sampling
+ * data entries.
+ */
+ if (event->attr.sample_type & PERF_SAMPLE_TID)
+ if (is_default_overflow_handler(event))
+ event->overflow_handler = cpumsf_output_event_pid;
out:
return err;
}
@@ -866,10 +895,15 @@ static void cpumsf_pmu_enable(struct pmu *pmu)
*/
if (cpuhw->event) {
hwc = &cpuhw->event->hw;
- /* Account number of overflow-designated buffer extents */
- sfb_account_overflows(cpuhw, hwc);
- if (sfb_has_pending_allocs(&cpuhw->sfb, hwc))
- extend_sampling_buffer(&cpuhw->sfb, hwc);
+ if (!(SAMPL_DIAG_MODE(hwc))) {
+ /*
+ * Account number of overflow-designated
+ * buffer extents
+ */
+ sfb_account_overflows(cpuhw, hwc);
+ if (sfb_has_pending_allocs(&cpuhw->sfb, hwc))
+ extend_sampling_buffer(&cpuhw->sfb, hwc);
+ }
}
/* (Re)enable the PMU and sampling facility */
@@ -884,6 +918,9 @@ static void cpumsf_pmu_enable(struct pmu *pmu)
return;
}
+ /* Load current program parameter */
+ lpp(&S390_lowcore.lpp);
+
debug_sprintf_event(sfdbg, 6, "pmu_enable: es=%i cs=%i ed=%i cd=%i "
"tear=%p dear=%p\n", cpuhw->lsctl.es, cpuhw->lsctl.cs,
cpuhw->lsctl.ed, cpuhw->lsctl.cd,
@@ -967,22 +1004,16 @@ static int perf_exclude_event(struct perf_event *event, struct pt_regs *regs,
*
* Return non-zero if an event overflow occurred.
*/
-static int perf_push_sample(struct perf_event *event, struct sf_raw_sample *sfr)
+static int perf_push_sample(struct perf_event *event,
+ struct hws_basic_entry *basic)
{
int overflow;
struct pt_regs regs;
struct perf_sf_sde_regs *sde_regs;
struct perf_sample_data data;
- struct perf_raw_record raw = {
- .frag = {
- .size = sfr->size,
- .data = sfr,
- },
- };
/* Setup perf sample */
perf_sample_data_init(&data, 0, event->hw.last_period);
- data.raw = &raw;
/* Setup pt_regs to look like an CPU-measurement external interrupt
* using the Program Request Alert code. The regs.int_parm_long
@@ -994,11 +1025,11 @@ static int perf_push_sample(struct perf_event *event, struct sf_raw_sample *sfr)
regs.int_parm = CPU_MF_INT_SF_PRA;
sde_regs = (struct perf_sf_sde_regs *) &regs.int_parm_long;
- psw_bits(regs.psw).ia = sfr->basic.ia;
- psw_bits(regs.psw).dat = sfr->basic.T;
- psw_bits(regs.psw).wait = sfr->basic.W;
- psw_bits(regs.psw).pstate = sfr->basic.P;
- psw_bits(regs.psw).as = sfr->basic.AS;
+ psw_bits(regs.psw).ia = basic->ia;
+ psw_bits(regs.psw).dat = basic->T;
+ psw_bits(regs.psw).wait = basic->W;
+ psw_bits(regs.psw).pstate = basic->P;
+ psw_bits(regs.psw).as = basic->AS;
/*
* Use the hardware provided configuration level to decide if the
@@ -1011,7 +1042,7 @@ static int perf_push_sample(struct perf_event *event, struct sf_raw_sample *sfr)
* If the value differs from 0xffff (the host value), we assume to
* be a KVM guest.
*/
- switch (sfr->basic.CL) {
+ switch (basic->CL) {
case 1: /* logical partition */
sde_regs->in_guest = 0;
break;
@@ -1019,11 +1050,17 @@ static int perf_push_sample(struct perf_event *event, struct sf_raw_sample *sfr)
sde_regs->in_guest = 1;
break;
default: /* old machine, use heuristics */
- if (sfr->basic.gpp || sfr->basic.prim_asn != 0xffff)
+ if (basic->gpp || basic->prim_asn != 0xffff)
sde_regs->in_guest = 1;
break;
}
+ /*
+ * Store the PID value from the sample-data-entry to be
+ * processed and resolved by cpumsf_output_event_pid().
+ */
+ data.tid_entry.pid = basic->hpp & LPP_PID_MASK;
+
overflow = 0;
if (perf_exclude_event(event, &regs, sde_regs))
goto out;
@@ -1041,75 +1078,12 @@ static void perf_event_count_update(struct perf_event *event, u64 count)
local64_add(count, &event->count);
}
-static int sample_format_is_valid(struct hws_combined_entry *sample,
- unsigned int flags)
-{
- if (likely(flags & PERF_CPUM_SF_BASIC_MODE))
- /* Only basic-sampling data entries with data-entry-format
- * version of 0x0001 can be processed.
- */
- if (sample->basic.def != 0x0001)
- return 0;
- if (flags & PERF_CPUM_SF_DIAG_MODE)
- /* The data-entry-format number of diagnostic-sampling data
- * entries can vary. Because diagnostic data is just passed
- * through, do only a sanity check on the DEF.
- */
- if (sample->diag.def < 0x8001)
- return 0;
- return 1;
-}
-
-static int sample_is_consistent(struct hws_combined_entry *sample,
- unsigned long flags)
-{
- /* This check applies only to basic-sampling data entries of potentially
- * combined-sampling data entries. Invalid entries cannot be processed
- * by the PMU and, thus, do not deliver an associated
- * diagnostic-sampling data entry.
- */
- if (unlikely(!(flags & PERF_CPUM_SF_BASIC_MODE)))
- return 0;
- /*
- * Samples are skipped, if they are invalid or for which the
- * instruction address is not predictable, i.e., the wait-state bit is
- * set.
- */
- if (sample->basic.I || sample->basic.W)
- return 0;
- return 1;
-}
-
-static void reset_sample_slot(struct hws_combined_entry *sample,
- unsigned long flags)
-{
- if (likely(flags & PERF_CPUM_SF_BASIC_MODE))
- sample->basic.def = 0;
- if (flags & PERF_CPUM_SF_DIAG_MODE)
- sample->diag.def = 0;
-}
-
-static void sfr_store_sample(struct sf_raw_sample *sfr,
- struct hws_combined_entry *sample)
-{
- if (likely(sfr->format & PERF_CPUM_SF_BASIC_MODE))
- sfr->basic = sample->basic;
- if (sfr->format & PERF_CPUM_SF_DIAG_MODE)
- memcpy(&sfr->diag, &sample->diag, sfr->dsdes);
-}
-
-static void debug_sample_entry(struct hws_combined_entry *sample,
- struct hws_trailer_entry *te,
- unsigned long flags)
+static void debug_sample_entry(struct hws_basic_entry *sample,
+ struct hws_trailer_entry *te)
{
debug_sprintf_event(sfdbg, 4, "hw_collect_samples: Found unknown "
- "sampling data entry: te->f=%i basic.def=%04x (%p)"
- " diag.def=%04x (%p)\n", te->f,
- sample->basic.def, &sample->basic,
- (flags & PERF_CPUM_SF_DIAG_MODE)
- ? sample->diag.def : 0xFFFF,
- (flags & PERF_CPUM_SF_DIAG_MODE)
- ? &sample->diag : NULL);
+ "sampling data entry: te->f=%i basic.def=%04x (%p)\n",
+ te->f, sample->def, sample);
}
/* hw_collect_samples() - Walk through a sample-data-block and collect samples
@@ -1135,44 +1109,37 @@ static void debug_sample_entry(struct hws_combined_entry *sample,
static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt,
unsigned long long *overflow)
{
- unsigned long flags = SAMPL_FLAGS(&event->hw);
- struct hws_combined_entry *sample;
struct hws_trailer_entry *te;
- struct sf_raw_sample *sfr;
- size_t sample_size;
-
- /* Prepare and initialize raw sample data */
- sfr = (struct sf_raw_sample *) RAWSAMPLE_REG(&event->hw);
- sfr->format = flags & PERF_CPUM_SF_MODE_MASK;
+ struct hws_basic_entry *sample;
- sample_size = event_sample_size(&event->hw);
te = (struct hws_trailer_entry *) trailer_entry_ptr(*sdbt);
- sample = (struct hws_combined_entry *) *sdbt;
+ sample = (struct hws_basic_entry *) *sdbt;
while ((unsigned long *) sample < (unsigned long *) te) {
/* Check for an empty sample */
- if (!sample->basic.def)
+ if (!sample->def)
break;
/* Update perf event period */
perf_event_count_update(event, SAMPL_RATE(&event->hw));
- /* Check sampling data entry */
- if (sample_format_is_valid(sample, flags)) {
+ /* Check whether sample is valid */
+ if (sample->def == 0x0001) {
/* If an event overflow occurred, the PMU is stopped to
* throttle event delivery. Remaining sample data is
* discarded.
*/
if (!*overflow) {
- if (sample_is_consistent(sample, flags)) {
+ /* Check whether sample is consistent */
+ if (sample->I == 0 && sample->W == 0) {
/* Deliver sample data to perf */
- sfr_store_sample(sfr, sample);
- *overflow = perf_push_sample(event, sfr);
+ *overflow = perf_push_sample(event,
+ sample);
}
} else
/* Count discarded samples */
*overflow += 1;
} else {
- debug_sample_entry(sample, te, flags);
+ debug_sample_entry(sample, te);
/* Sample slot is not yet written or other record.
*
* This condition can occur if the buffer was reused
@@ -1188,8 +1155,8 @@ static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt,
}
/* Reset sample slot and advance to next sample */
- reset_sample_slot(sample, flags);
- sample += sample_size;
+ sample->def = 0;
+ sample++;
}
}
@@ -1215,6 +1182,13 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all)
unsigned long long event_overflow, sampl_overflow, num_sdb, te_flags;
int done;
+ /*
+ * AUX buffer is used when in diagnostic sampling mode.
+ * No perf events/samples are created.
+ */
+ if (SAMPL_DIAG_MODE(&event->hw))
+ return;
+
if (flush_all && SDB_FULL_BLOCKS(hwc))
flush_all = 0;
@@ -1291,6 +1265,439 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all)
sampl_overflow, event_overflow);
}
+#define AUX_SDB_INDEX(aux, i) ((i) % aux->sfb.num_sdb)
+#define AUX_SDB_NUM(aux, start, end) (end >= start ? end - start + 1 : 0)
+#define AUX_SDB_NUM_ALERT(aux) AUX_SDB_NUM(aux, aux->head, aux->alert_mark)
+#define AUX_SDB_NUM_EMPTY(aux) AUX_SDB_NUM(aux, aux->head, aux->empty_mark)
+
+/*
+ * Get trailer entry by index of SDB.
+ */
+static struct hws_trailer_entry *aux_sdb_trailer(struct aux_buffer *aux,
+ unsigned long index)
+{
+ unsigned long sdb;
+
+ index = AUX_SDB_INDEX(aux, index);
+ sdb = aux->sdb_index[index];
+ return (struct hws_trailer_entry *)trailer_entry_ptr(sdb);
+}
+
+/*
+ * Finish sampling on the cpu. Called by cpumsf_pmu_del() with pmu
+ * disabled. Collect the full SDBs in AUX buffer which have not reached
+ * the point of alert indicator. And ignore the SDBs which are not
+ * full.
+ *
+ * 1. Scan SDBs to see how much data is there and consume them.
+ * 2. Remove alert indicator in the buffer.
+ */
+static void aux_output_end(struct perf_output_handle *handle)
+{
+ unsigned long i, range_scan, idx;
+ struct aux_buffer *aux;
+ struct hws_trailer_entry *te;
+
+ aux = perf_get_aux(handle);
+ if (!aux)
+ return;
+
+ range_scan = AUX_SDB_NUM_ALERT(aux);
+ for (i = 0, idx = aux->head; i < range_scan; i++, idx++) {
+ te = aux_sdb_trailer(aux, idx);
+ if (!(te->flags & SDB_TE_BUFFER_FULL_MASK))
+ break;
+ }
+ /* i is num of SDBs which are full */
+ perf_aux_output_end(handle, i << PAGE_SHIFT);
+
+ /* Remove alert indicators in the buffer */
+ te = aux_sdb_trailer(aux, aux->alert_mark);
+ te->flags &= ~SDB_TE_ALERT_REQ_MASK;
+
+ debug_sprintf_event(sfdbg, 6, "aux_output_end: collect %lx SDBs\n", i);
+}
+
+/*
+ * Start sampling on the CPU. Called by cpumsf_pmu_add() when an event
+ * is first added to the CPU or rescheduled again to the CPU. It is called
+ * with pmu disabled.
+ *
+ * 1. Reset the trailer of SDBs to get ready for new data.
+ * 2. Tell the hardware where to put the data by reset the SDBs buffer
+ * head(tear/dear).
+ */
+static int aux_output_begin(struct perf_output_handle *handle,
+ struct aux_buffer *aux,
+ struct cpu_hw_sf *cpuhw)
+{
+ unsigned long range;
+ unsigned long i, range_scan, idx;
+ unsigned long head, base, offset;
+ struct hws_trailer_entry *te;
+
+ if (WARN_ON_ONCE(handle->head & ~PAGE_MASK))
+ return -EINVAL;
+
+ aux->head = handle->head >> PAGE_SHIFT;
+ range = (handle->size + 1) >> PAGE_SHIFT;
+ if (range <= 1)
+ return -ENOMEM;
+
+ /*
+ * SDBs between aux->head and aux->empty_mark are already ready
+ * for new data. range_scan is num of SDBs not within them.
+ */
+ if (range > AUX_SDB_NUM_EMPTY(aux)) {
+ range_scan = range - AUX_SDB_NUM_EMPTY(aux);
+ idx = aux->empty_mark + 1;
+ for (i = 0; i < range_scan; i++, idx++) {
+ te = aux_sdb_trailer(aux, idx);
+ te->flags = te->flags & ~SDB_TE_BUFFER_FULL_MASK;
+ te->flags = te->flags & ~SDB_TE_ALERT_REQ_MASK;
+ te->overflow = 0;
+ }
+ /* Save the position of empty SDBs */
+ aux->empty_mark = aux->head + range - 1;
+ }
+
+ /* Set alert indicator */
+ aux->alert_mark = aux->head + range/2 - 1;
+ te = aux_sdb_trailer(aux, aux->alert_mark);
+ te->flags = te->flags | SDB_TE_ALERT_REQ_MASK;
+
+ /* Reset hardware buffer head */
+ head = AUX_SDB_INDEX(aux, aux->head);
+ base = aux->sdbt_index[head / CPUM_SF_SDB_PER_TABLE];
+ offset = head % CPUM_SF_SDB_PER_TABLE;
+ cpuhw->lsctl.tear = base + offset * sizeof(unsigned long);
+ cpuhw->lsctl.dear = aux->sdb_index[head];
+
+ debug_sprintf_event(sfdbg, 6, "aux_output_begin: "
+ "head->alert_mark->empty_mark (num_alert, range)"
+ "[%lx -> %lx -> %lx] (%lx, %lx) "
+ "tear index %lx, tear %lx dear %lx\n",
+ aux->head, aux->alert_mark, aux->empty_mark,
+ AUX_SDB_NUM_ALERT(aux), range,
+ head / CPUM_SF_SDB_PER_TABLE,
+ cpuhw->lsctl.tear,
+ cpuhw->lsctl.dear);
+
+ return 0;
+}
+
+/*
+ * Set alert indicator on SDB at index @alert_index while sampler is running.
+ *
+ * Return true if successfully.
+ * Return false if full indicator is already set by hardware sampler.
+ */
+static bool aux_set_alert(struct aux_buffer *aux, unsigned long alert_index,
+ unsigned long long *overflow)
+{
+ unsigned long long orig_overflow, orig_flags, new_flags;
+ struct hws_trailer_entry *te;
+
+ te = aux_sdb_trailer(aux, alert_index);
+ do {
+ orig_flags = te->flags;
+ orig_overflow = te->overflow;
+ *overflow = orig_overflow;
+ if (orig_flags & SDB_TE_BUFFER_FULL_MASK) {
+ /*
+ * SDB is already set by hardware.
+ * Abort and try to set somewhere
+ * behind.
+ */
+ return false;
+ }
+ new_flags = orig_flags | SDB_TE_ALERT_REQ_MASK;
+ } while (!cmpxchg_double(&te->flags, &te->overflow,
+ orig_flags, orig_overflow,
+ new_flags, 0ULL));
+ return true;
+}
+
+/*
+ * aux_reset_buffer() - Scan and setup SDBs for new samples
+ * @aux: The AUX buffer to set
+ * @range: The range of SDBs to scan started from aux->head
+ * @overflow: Set to overflow count
+ *
+ * Set alert indicator on the SDB at index of aux->alert_mark. If this SDB is
+ * marked as empty, check if it is already set full by the hardware sampler.
+ * If yes, that means new data is already there before we can set an alert
+ * indicator. Caller should try to set alert indicator to some position behind.
+ *
+ * Scan the SDBs in AUX buffer from behind aux->empty_mark. They are used
+ * previously and have already been consumed by user space. Reset these SDBs
+ * (clear full indicator and alert indicator) for new data.
+ * If aux->alert_mark fall in this area, just set it. Overflow count is
+ * recorded while scanning.
+ *
+ * SDBs between aux->head and aux->empty_mark are already reset at last time.
+ * and ready for new samples. So scanning on this area could be skipped.
+ *
+ * Return true if alert indicator is set successfully and false if not.
+ */
+static bool aux_reset_buffer(struct aux_buffer *aux, unsigned long range,
+ unsigned long long *overflow)
+{
+ unsigned long long orig_overflow, orig_flags, new_flags;
+ unsigned long i, range_scan, idx;
+ struct hws_trailer_entry *te;
+
+ if (range <= AUX_SDB_NUM_EMPTY(aux))
+ /*
+ * No need to scan. All SDBs in range are marked as empty.
+ * Just set alert indicator. Should check race with hardware
+ * sampler.
+ */
+ return aux_set_alert(aux, aux->alert_mark, overflow);
+
+ if (aux->alert_mark <= aux->empty_mark)
+ /*
+ * Set alert indicator on empty SDB. Should check race
+ * with hardware sampler.
+ */
+ if (!aux_set_alert(aux, aux->alert_mark, overflow))
+ return false;
+
+ /*
+ * Scan the SDBs to clear full and alert indicator used previously.
+ * Start scanning from one SDB behind empty_mark. If the new alert
+ * indicator fall into this range, set it.
+ */
+ range_scan = range - AUX_SDB_NUM_EMPTY(aux);
+ idx = aux->empty_mark + 1;
+ for (i = 0; i < range_scan; i++, idx++) {
+ te = aux_sdb_trailer(aux, idx);
+ do {
+ orig_flags = te->flags;
+ orig_overflow = te->overflow;
+ new_flags = orig_flags & ~SDB_TE_BUFFER_FULL_MASK;
+ if (idx == aux->alert_mark)
+ new_flags |= SDB_TE_ALERT_REQ_MASK;
+ else
+ new_flags &= ~SDB_TE_ALERT_REQ_MASK;
+ } while (!cmpxchg_double(&te->flags, &te->overflow,
+ orig_flags, orig_overflow,
+ new_flags, 0ULL));
+ *overflow += orig_overflow;
+ }
+
+ /* Update empty_mark to new position */
+ aux->empty_mark = aux->head + range - 1;
+
+ return true;
+}
+
+/*
+ * Measurement alert handler for diagnostic mode sampling.
+ */
+static void hw_collect_aux(struct cpu_hw_sf *cpuhw)
+{
+ struct aux_buffer *aux;
+ int done = 0;
+ unsigned long range = 0, size;
+ unsigned long long overflow = 0;
+ struct perf_output_handle *handle = &cpuhw->handle;
+ unsigned long num_sdb;
+
+ aux = perf_get_aux(handle);
+ if (WARN_ON_ONCE(!aux))
+ return;
+
+ /* Inform user space new data arrived */
+ size = AUX_SDB_NUM_ALERT(aux) << PAGE_SHIFT;
+ perf_aux_output_end(handle, size);
+ num_sdb = aux->sfb.num_sdb;
+
+ while (!done) {
+ /* Get an output handle */
+ aux = perf_aux_output_begin(handle, cpuhw->event);
+ if (handle->size == 0) {
+ pr_err("The AUX buffer with %lu pages for the "
+ "diagnostic-sampling mode is full\n",
+ num_sdb);
+ debug_sprintf_event(sfdbg, 1, "AUX buffer used up\n");
+ break;
+ }
+ if (WARN_ON_ONCE(!aux))
+ return;
+
+ /* Update head and alert_mark to new position */
+ aux->head = handle->head >> PAGE_SHIFT;
+ range = (handle->size + 1) >> PAGE_SHIFT;
+ if (range == 1)
+ aux->alert_mark = aux->head;
+ else
+ aux->alert_mark = aux->head + range/2 - 1;
+
+ if (aux_reset_buffer(aux, range, &overflow)) {
+ if (!overflow) {
+ done = 1;
+ break;
+ }
+ size = range << PAGE_SHIFT;
+ perf_aux_output_end(&cpuhw->handle, size);
+ pr_err("Sample data caused the AUX buffer with %lu "
+ "pages to overflow\n", num_sdb);
+ debug_sprintf_event(sfdbg, 1, "head %lx range %lx "
+ "overflow %llx\n",
+ aux->head, range, overflow);
+ } else {
+ size = AUX_SDB_NUM_ALERT(aux) << PAGE_SHIFT;
+ perf_aux_output_end(&cpuhw->handle, size);
+ debug_sprintf_event(sfdbg, 6, "head %lx alert %lx "
+ "already full, try another\n",
+ aux->head, aux->alert_mark);
+ }
+ }
+
+ if (done)
+ debug_sprintf_event(sfdbg, 6, "aux_reset_buffer: "
+ "[%lx -> %lx -> %lx] (%lx, %lx)\n",
+ aux->head, aux->alert_mark, aux->empty_mark,
+ AUX_SDB_NUM_ALERT(aux), range);
+}
+
+/*
+ * Callback when freeing AUX buffers.
+ */
+static void aux_buffer_free(void *data)
+{
+ struct aux_buffer *aux = data;
+ unsigned long i, num_sdbt;
+
+ if (!aux)
+ return;
+
+ /* Free SDBT. SDB is freed by the caller */
+ num_sdbt = aux->sfb.num_sdbt;
+ for (i = 0; i < num_sdbt; i++)
+ free_page(aux->sdbt_index[i]);
+
+ kfree(aux->sdbt_index);
+ kfree(aux->sdb_index);
+ kfree(aux);
+
+ debug_sprintf_event(sfdbg, 4, "aux_buffer_free: free "
+ "%lu SDBTs\n", num_sdbt);
+}
+
+/*
+ * aux_buffer_setup() - Setup AUX buffer for diagnostic mode sampling
+ * @cpu: On which to allocate, -1 means current
+ * @pages: Array of pointers to buffer pages passed from perf core
+ * @nr_pages: Total pages
+ * @snapshot: Flag for snapshot mode
+ *
+ * This is the callback when setup an event using AUX buffer. Perf tool can
+ * trigger this by an additional mmap() call on the event. Unlike the buffer
+ * for basic samples, AUX buffer belongs to the event. It is scheduled with
+ * the task among online cpus when it is a per-thread event.
+ *
+ * Return the private AUX buffer structure if success or NULL if fails.
+ */
+static void *aux_buffer_setup(int cpu, void **pages, int nr_pages,
+ bool snapshot)
+{
+ struct sf_buffer *sfb;
+ struct aux_buffer *aux;
+ unsigned long *new, *tail;
+ int i, n_sdbt;
+
+ if (!nr_pages || !pages)
+ return NULL;
+
+ if (nr_pages > CPUM_SF_MAX_SDB * CPUM_SF_SDB_DIAG_FACTOR) {
+ pr_err("AUX buffer size (%i pages) is larger than the "
+ "maximum sampling buffer limit\n",
+ nr_pages);
+ return NULL;
+ } else if (nr_pages < CPUM_SF_MIN_SDB * CPUM_SF_SDB_DIAG_FACTOR) {
+ pr_err("AUX buffer size (%i pages) is less than the "
+ "minimum sampling buffer limit\n",
+ nr_pages);
+ return NULL;
+ }
+
+ /* Allocate aux_buffer struct for the event */
+ aux = kmalloc(sizeof(struct aux_buffer), GFP_KERNEL);
+ if (!aux)
+ goto no_aux;
+ sfb = &aux->sfb;
+
+ /* Allocate sdbt_index for fast reference */
+ n_sdbt = (nr_pages + CPUM_SF_SDB_PER_TABLE - 1) / CPUM_SF_SDB_PER_TABLE;
+ aux->sdbt_index = kmalloc_array(n_sdbt, sizeof(void *), GFP_KERNEL);
+ if (!aux->sdbt_index)
+ goto no_sdbt_index;
+
+ /* Allocate sdb_index for fast reference */
+ aux->sdb_index = kmalloc_array(nr_pages, sizeof(void *), GFP_KERNEL);
+ if (!aux->sdb_index)
+ goto no_sdb_index;
+
+ /* Allocate the first SDBT */
+ sfb->num_sdbt = 0;
+ sfb->sdbt = (unsigned long *) get_zeroed_page(GFP_KERNEL);
+ if (!sfb->sdbt)
+ goto no_sdbt;
+ aux->sdbt_index[sfb->num_sdbt++] = (unsigned long)sfb->sdbt;
+ tail = sfb->tail = sfb->sdbt;
+
+ /*
+ * Link the provided pages of AUX buffer to SDBT.
+ * Allocate SDBT if needed.
+ */
+ for (i = 0; i < nr_pages; i++, tail++) {
+ if (require_table_link(tail)) {
+ new = (unsigned long *) get_zeroed_page(GFP_KERNEL);
+ if (!new)
+ goto no_sdbt;
+ aux->sdbt_index[sfb->num_sdbt++] = (unsigned long)new;
+ /* Link current page to tail of chain */
+ *tail = (unsigned long)(void *) new + 1;
+ tail = new;
+ }
+ /* Tail is the entry in a SDBT */
+ *tail = (unsigned long)pages[i];
+ aux->sdb_index[i] = (unsigned long)pages[i];
+ }
+ sfb->num_sdb = nr_pages;
+
+ /* Link the last entry in the SDBT to the first SDBT */
+ *tail = (unsigned long) sfb->sdbt + 1;
+ sfb->tail = tail;
+
+ /*
+ * Initial all SDBs are zeroed. Mark it as empty.
+ * So there is no need to clear the full indicator
+ * when this event is first added.
+ */
+ aux->empty_mark = sfb->num_sdb - 1;
+
+ debug_sprintf_event(sfdbg, 4, "aux_buffer_setup: setup %lu SDBTs"
+ " and %lu SDBs\n",
+ sfb->num_sdbt, sfb->num_sdb);
+
+ return aux;
+
+no_sdbt:
+ /* SDBs (AUX buffer pages) are freed by caller */
+ for (i = 0; i < sfb->num_sdbt; i++)
+ free_page(aux->sdbt_index[i]);
+ kfree(aux->sdb_index);
+no_sdb_index:
+ kfree(aux->sdbt_index);
+no_sdbt_index:
+ kfree(aux);
+no_aux:
+ return NULL;
+}
+
static void cpumsf_pmu_read(struct perf_event *event)
{
/* Nothing to do ... updates are interrupt-driven */
@@ -1342,12 +1749,13 @@ static void cpumsf_pmu_stop(struct perf_event *event, int flags)
static int cpumsf_pmu_add(struct perf_event *event, int flags)
{
struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf);
+ struct aux_buffer *aux;
int err;
if (cpuhw->flags & PMU_F_IN_USE)
return -EAGAIN;
- if (!cpuhw->sfb.sdbt)
+ if (!SAMPL_DIAG_MODE(&event->hw) && !cpuhw->sfb.sdbt)
return -EINVAL;
err = 0;
@@ -1362,10 +1770,12 @@ static int cpumsf_pmu_add(struct perf_event *event, int flags)
*/
cpuhw->lsctl.s = 0;
cpuhw->lsctl.h = 1;
- cpuhw->lsctl.tear = (unsigned long) cpuhw->sfb.sdbt;
- cpuhw->lsctl.dear = *(unsigned long *) cpuhw->sfb.sdbt;
cpuhw->lsctl.interval = SAMPL_RATE(&event->hw);
- hw_reset_registers(&event->hw, cpuhw->sfb.sdbt);
+ if (!SAMPL_DIAG_MODE(&event->hw)) {
+ cpuhw->lsctl.tear = (unsigned long) cpuhw->sfb.sdbt;
+ cpuhw->lsctl.dear = *(unsigned long *) cpuhw->sfb.sdbt;
+ hw_reset_registers(&event->hw, cpuhw->sfb.sdbt);
+ }
/* Ensure sampling functions are in the disabled state. If disabled,
* switch on sampling enable control. */
@@ -1373,9 +1783,18 @@ static int cpumsf_pmu_add(struct perf_event *event, int flags)
err = -EAGAIN;
goto out;
}
- cpuhw->lsctl.es = 1;
- if (SAMPL_DIAG_MODE(&event->hw))
+ if (SAMPL_DIAG_MODE(&event->hw)) {
+ aux = perf_aux_output_begin(&cpuhw->handle, event);
+ if (!aux) {
+ err = -EINVAL;
+ goto out;
+ }
+ err = aux_output_begin(&cpuhw->handle, aux, cpuhw);
+ if (err)
+ goto out;
cpuhw->lsctl.ed = 1;
+ }
+ cpuhw->lsctl.es = 1;
/* Set in_use flag and store event */
cpuhw->event = event;
@@ -1401,6 +1820,8 @@ static void cpumsf_pmu_del(struct perf_event *event, int flags)
cpuhw->flags &= ~PMU_F_IN_USE;
cpuhw->event = NULL;
+ if (SAMPL_DIAG_MODE(&event->hw))
+ aux_output_end(&cpuhw->handle);
perf_event_update_userpage(event);
perf_pmu_enable(event->pmu);
}
@@ -1448,6 +1869,9 @@ static struct pmu cpumf_sampling = {
.read = cpumsf_pmu_read,
.attr_groups = cpumsf_pmu_attr_groups,
+
+ .setup_aux = aux_buffer_setup,
+ .free_aux = aux_buffer_free,
};
static void cpumf_measurement_alert(struct ext_code ext_code,
@@ -1471,7 +1895,10 @@ static void cpumf_measurement_alert(struct ext_code ext_code,
/* Program alert request */
if (alert & CPU_MF_INT_SF_PRA) {
if (cpuhw->flags & PMU_F_IN_USE)
- hw_perf_event_update(cpuhw->event, 0);
+ if (SAMPL_DIAG_MODE(&cpuhw->event->hw))
+ hw_collect_aux(cpuhw);
+ else
+ hw_perf_event_update(cpuhw->event, 0);
else
WARN_ON_ONCE(!(cpuhw->flags & PMU_F_IN_USE));
}
@@ -1590,6 +2017,9 @@ static int __init init_cpum_sampling_pmu(void)
return -ENODEV;
}
+ if (!si.as && !si.ad)
+ return -ENODEV;
+
if (si.bsdes != sizeof(struct hws_basic_entry)) {
pr_cpumsf_err(RS_INIT_FAILURE_BSDES);
return -EINVAL;
diff --git a/arch/s390/kernel/perf_regs.c b/arch/s390/kernel/perf_regs.c
new file mode 100644
index 000000000000..f8603ebed669
--- /dev/null
+++ b/arch/s390/kernel/perf_regs.c
@@ -0,0 +1,70 @@
+#include <linux/perf_event.h>
+#include <linux/perf_regs.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/bug.h>
+#include <asm/ptrace.h>
+#include <asm/fpu/api.h>
+#include <asm/fpu/types.h>
+
+u64 perf_reg_value(struct pt_regs *regs, int idx)
+{
+ freg_t fp;
+
+ if (WARN_ON_ONCE((u32)idx >= PERF_REG_S390_MAX))
+ return 0;
+
+ if (idx >= PERF_REG_S390_R0 && idx <= PERF_REG_S390_R15)
+ return regs->gprs[idx];
+
+ if (idx >= PERF_REG_S390_FP0 && idx <= PERF_REG_S390_FP15) {
+ if (!user_mode(regs))
+ return 0;
+
+ idx -= PERF_REG_S390_FP0;
+ fp = MACHINE_HAS_VX ? *(freg_t *)(current->thread.fpu.vxrs + idx)
+ : current->thread.fpu.fprs[idx];
+ return fp.ui;
+ }
+
+ if (idx == PERF_REG_S390_MASK)
+ return regs->psw.mask;
+ if (idx == PERF_REG_S390_PC)
+ return regs->psw.addr;
+
+ return regs->gprs[idx];
+}
+
+#define REG_RESERVED (~((1UL << PERF_REG_S390_MAX) - 1))
+
+int perf_reg_validate(u64 mask)
+{
+ if (!mask || mask & REG_RESERVED)
+ return -EINVAL;
+
+ return 0;
+}
+
+u64 perf_reg_abi(struct task_struct *task)
+{
+ if (test_tsk_thread_flag(task, TIF_31BIT))
+ return PERF_SAMPLE_REGS_ABI_32;
+
+ return PERF_SAMPLE_REGS_ABI_64;
+}
+
+void perf_get_regs_user(struct perf_regs *regs_user,
+ struct pt_regs *regs,
+ struct pt_regs *regs_user_copy)
+{
+ /*
+ * Use the regs from the first interruption and let
+ * perf_sample_regs_intr() handle interrupts (regs == get_irq_regs()).
+ *
+ * Also save FPU registers for user-space tasks only.
+ */
+ regs_user->regs = task_pt_regs(current);
+ if (user_mode(regs_user->regs))
+ save_fpu_regs();
+ regs_user->abi = perf_reg_abi(current);
+}
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index 0520854a4dab..39a218703c50 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -158,16 +158,9 @@ int vdso_alloc_per_cpu(struct lowcore *lowcore)
{
unsigned long segment_table, page_table, page_frame;
struct vdso_per_cpu_data *vd;
- u32 *psal, *aste;
- int i;
-
- lowcore->vdso_per_cpu_data = __LC_PASTE;
-
- if (!vdso_enabled)
- return 0;
segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER);
- page_table = get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ page_table = get_zeroed_page(GFP_KERNEL);
page_frame = get_zeroed_page(GFP_KERNEL);
if (!segment_table || !page_table || !page_frame)
goto out;
@@ -179,25 +172,15 @@ int vdso_alloc_per_cpu(struct lowcore *lowcore)
vd->cpu_nr = lowcore->cpu_nr;
vd->node_id = cpu_to_node(vd->cpu_nr);
- /* Set up access register mode page table */
+ /* Set up page table for the vdso address space */
memset64((u64 *)segment_table, _SEGMENT_ENTRY_EMPTY, _CRST_ENTRIES);
memset64((u64 *)page_table, _PAGE_INVALID, PTRS_PER_PTE);
*(unsigned long *) segment_table = _SEGMENT_ENTRY + page_table;
*(unsigned long *) page_table = _PAGE_PROTECT + page_frame;
- psal = (u32 *) (page_table + 256*sizeof(unsigned long));
- aste = psal + 32;
-
- for (i = 4; i < 32; i += 4)
- psal[i] = 0x80000000;
-
- lowcore->paste[4] = (u32)(addr_t) psal;
- psal[0] = 0x02000000;
- psal[2] = (u32)(addr_t) aste;
- *(unsigned long *) (aste + 2) = segment_table +
+ lowcore->vdso_asce = segment_table +
_ASCE_TABLE_LENGTH + _ASCE_USER_BITS + _ASCE_TYPE_SEGMENT;
- aste[4] = (u32)(addr_t) psal;
lowcore->vdso_per_cpu_data = page_frame;
return 0;
@@ -212,14 +195,8 @@ out:
void vdso_free_per_cpu(struct lowcore *lowcore)
{
unsigned long segment_table, page_table, page_frame;
- u32 *psal, *aste;
-
- if (!vdso_enabled)
- return;
- psal = (u32 *)(addr_t) lowcore->paste[4];
- aste = (u32 *)(addr_t) psal[2];
- segment_table = *(unsigned long *)(aste + 2) & PAGE_MASK;
+ segment_table = lowcore->vdso_asce & PAGE_MASK;
page_table = *(unsigned long *) segment_table;
page_frame = *(unsigned long *) page_table;
@@ -228,16 +205,6 @@ void vdso_free_per_cpu(struct lowcore *lowcore)
free_pages(segment_table, SEGMENT_ORDER);
}
-static void vdso_init_cr5(void)
-{
- unsigned long cr5;
-
- if (!vdso_enabled)
- return;
- cr5 = offsetof(struct lowcore, paste);
- __ctl_load(cr5, 5, 5);
-}
-
/*
* This is called from binfmt_elf, we create the special vma for the
* vDSO and insert it into the mm struct tree
@@ -314,8 +281,6 @@ static int __init vdso_init(void)
{
int i;
- if (!vdso_enabled)
- return 0;
vdso_init_data(vdso_data);
#ifdef CONFIG_COMPAT
/* Calculate the size of the 32 bit vDSO */
@@ -354,7 +319,6 @@ static int __init vdso_init(void)
vdso64_pagelist[vdso64_pages] = NULL;
if (vdso_alloc_per_cpu(&S390_lowcore))
BUG();
- vdso_init_cr5();
get_page(virt_to_page(vdso_data));
diff --git a/arch/s390/kernel/vdso32/getcpu.S b/arch/s390/kernel/vdso32/getcpu.S
index 6e30769dd017..5477a2c112fb 100644
--- a/arch/s390/kernel/vdso32/getcpu.S
+++ b/arch/s390/kernel/vdso32/getcpu.S
@@ -15,23 +15,11 @@
.type __kernel_getcpu,@function
__kernel_getcpu:
.cfi_startproc
- ear %r1,%a4
- lhi %r4,1
- sll %r4,24
- sar %a4,%r4
la %r4,0
- epsw %r0,0
- sacf 512
+ sacf 256
l %r5,__VDSO_CPU_NR(%r4)
l %r4,__VDSO_NODE_ID(%r4)
- tml %r0,0x4000
- jo 1f
- tml %r0,0x8000
- jno 0f
- sacf 256
- j 1f
-0: sacf 0
-1: sar %a4,%r1
+ sacf 0
ltr %r2,%r2
jz 2f
st %r5,0(%r2)
diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S
index 9c3b12626dba..5d7b56b49458 100644
--- a/arch/s390/kernel/vdso64/clock_gettime.S
+++ b/arch/s390/kernel/vdso64/clock_gettime.S
@@ -114,23 +114,12 @@ __kernel_clock_gettime:
br %r14
/* CPUCLOCK_VIRT for this thread */
-9: icm %r0,15,__VDSO_ECTG_OK(%r5)
+9: lghi %r4,0
+ icm %r0,15,__VDSO_ECTG_OK(%r5)
jz 12f
- ear %r2,%a4
- llilh %r4,0x0100
- sar %a4,%r4
- lghi %r4,0
- epsw %r5,0
- sacf 512 /* Magic ectg instruction */
+ sacf 256 /* Magic ectg instruction */
.insn ssf,0xc80100000000,__VDSO_ECTG_BASE(4),__VDSO_ECTG_USER(4),4
- tml %r5,0x4000
- jo 11f
- tml %r5,0x8000
- jno 10f
- sacf 256
- j 11f
-10: sacf 0
-11: sar %a4,%r2
+ sacf 0
algr %r1,%r0 /* r1 = cputime as TOD value */
mghi %r1,1000 /* convert to nanoseconds */
srlg %r1,%r1,12 /* r1 = cputime in nanosec */
diff --git a/arch/s390/kernel/vdso64/getcpu.S b/arch/s390/kernel/vdso64/getcpu.S
index 43983764b959..e9c34364d97b 100644
--- a/arch/s390/kernel/vdso64/getcpu.S
+++ b/arch/s390/kernel/vdso64/getcpu.S
@@ -15,22 +15,11 @@
.type __kernel_getcpu,@function
__kernel_getcpu:
.cfi_startproc
- ear %r1,%a4
- llilh %r4,0x0100
- sar %a4,%r4
la %r4,0
- epsw %r0,0
- sacf 512
+ sacf 256
l %r5,__VDSO_CPU_NR(%r4)
l %r4,__VDSO_NODE_ID(%r4)
- tml %r0,0x4000
- jo 1f
- tml %r0,0x8000
- jno 0f
- sacf 256
- j 1f
-0: sacf 0
-1: sar %a4,%r1
+ sacf 0
ltgr %r2,%r2
jz 2f
st %r5,0(%r2)
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index 84c0faeaf7ea..30a7c8c29964 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -78,7 +78,7 @@ static inline int arch_load_niai4(int *lock)
ALTERNATIVE("", ".long 0xb2fa0040", 49) /* NIAI 4 */
" l %0,%1\n"
: "=d" (owner) : "Q" (*lock) : "memory");
- return owner;
+ return owner;
}
static inline int arch_cmpxchg_niai8(int *lock, int old, int new)
@@ -226,9 +226,10 @@ static inline void arch_spin_lock_classic(arch_spinlock_t *lp)
/* Try to get the lock if it is free. */
if (!owner) {
new = (old & _Q_TAIL_MASK) | lockval;
- if (arch_cmpxchg_niai8(&lp->lock, old, new))
+ if (arch_cmpxchg_niai8(&lp->lock, old, new)) {
/* Got the lock */
- return;
+ return;
+ }
continue;
}
if (count-- >= 0)
diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c
index 802903c50de1..cae5a1e16cbd 100644
--- a/arch/s390/lib/uaccess.c
+++ b/arch/s390/lib/uaccess.c
@@ -40,10 +40,67 @@ static inline int copy_with_mvcos(void)
}
#endif
+void set_fs(mm_segment_t fs)
+{
+ current->thread.mm_segment = fs;
+ if (fs == USER_DS) {
+ __ctl_load(S390_lowcore.user_asce, 1, 1);
+ clear_cpu_flag(CIF_ASCE_PRIMARY);
+ } else {
+ __ctl_load(S390_lowcore.kernel_asce, 1, 1);
+ set_cpu_flag(CIF_ASCE_PRIMARY);
+ }
+ if (fs & 1) {
+ if (fs == USER_DS_SACF)
+ __ctl_load(S390_lowcore.user_asce, 7, 7);
+ else
+ __ctl_load(S390_lowcore.kernel_asce, 7, 7);
+ set_cpu_flag(CIF_ASCE_SECONDARY);
+ }
+}
+EXPORT_SYMBOL(set_fs);
+
+mm_segment_t enable_sacf_uaccess(void)
+{
+ mm_segment_t old_fs;
+ unsigned long asce, cr;
+
+ old_fs = current->thread.mm_segment;
+ if (old_fs & 1)
+ return old_fs;
+ current->thread.mm_segment |= 1;
+ asce = S390_lowcore.kernel_asce;
+ if (likely(old_fs == USER_DS)) {
+ __ctl_store(cr, 1, 1);
+ if (cr != S390_lowcore.kernel_asce) {
+ __ctl_load(S390_lowcore.kernel_asce, 1, 1);
+ set_cpu_flag(CIF_ASCE_PRIMARY);
+ }
+ asce = S390_lowcore.user_asce;
+ }
+ __ctl_store(cr, 7, 7);
+ if (cr != asce) {
+ __ctl_load(asce, 7, 7);
+ set_cpu_flag(CIF_ASCE_SECONDARY);
+ }
+ return old_fs;
+}
+EXPORT_SYMBOL(enable_sacf_uaccess);
+
+void disable_sacf_uaccess(mm_segment_t old_fs)
+{
+ if (old_fs == USER_DS && test_facility(27)) {
+ __ctl_load(S390_lowcore.user_asce, 1, 1);
+ clear_cpu_flag(CIF_ASCE_PRIMARY);
+ }
+ current->thread.mm_segment = old_fs;
+}
+EXPORT_SYMBOL(disable_sacf_uaccess);
+
static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr,
unsigned long size)
{
- register unsigned long reg0 asm("0") = 0x81UL;
+ register unsigned long reg0 asm("0") = 0x01UL;
unsigned long tmp1, tmp2;
tmp1 = -4096UL;
@@ -74,8 +131,9 @@ static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr,
unsigned long size)
{
unsigned long tmp1, tmp2;
+ mm_segment_t old_fs;
- load_kernel_asce();
+ old_fs = enable_sacf_uaccess();
tmp1 = -256UL;
asm volatile(
" sacf 0\n"
@@ -102,6 +160,7 @@ static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr,
EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b)
: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
: : "cc", "memory");
+ disable_sacf_uaccess(old_fs);
return size;
}
@@ -116,7 +175,7 @@ EXPORT_SYMBOL(raw_copy_from_user);
static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x,
unsigned long size)
{
- register unsigned long reg0 asm("0") = 0x810000UL;
+ register unsigned long reg0 asm("0") = 0x010000UL;
unsigned long tmp1, tmp2;
tmp1 = -4096UL;
@@ -147,8 +206,9 @@ static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x,
unsigned long size)
{
unsigned long tmp1, tmp2;
+ mm_segment_t old_fs;
- load_kernel_asce();
+ old_fs = enable_sacf_uaccess();
tmp1 = -256UL;
asm volatile(
" sacf 0\n"
@@ -175,6 +235,7 @@ static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x,
EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b)
: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
: : "cc", "memory");
+ disable_sacf_uaccess(old_fs);
return size;
}
@@ -189,7 +250,7 @@ EXPORT_SYMBOL(raw_copy_to_user);
static inline unsigned long copy_in_user_mvcos(void __user *to, const void __user *from,
unsigned long size)
{
- register unsigned long reg0 asm("0") = 0x810081UL;
+ register unsigned long reg0 asm("0") = 0x010001UL;
unsigned long tmp1, tmp2;
tmp1 = -4096UL;
@@ -212,9 +273,10 @@ static inline unsigned long copy_in_user_mvcos(void __user *to, const void __use
static inline unsigned long copy_in_user_mvc(void __user *to, const void __user *from,
unsigned long size)
{
+ mm_segment_t old_fs;
unsigned long tmp1;
- load_kernel_asce();
+ old_fs = enable_sacf_uaccess();
asm volatile(
" sacf 256\n"
" aghi %0,-1\n"
@@ -238,6 +300,7 @@ static inline unsigned long copy_in_user_mvc(void __user *to, const void __user
EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
: "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1)
: : "cc", "memory");
+ disable_sacf_uaccess(old_fs);
return size;
}
@@ -251,7 +314,7 @@ EXPORT_SYMBOL(raw_copy_in_user);
static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size)
{
- register unsigned long reg0 asm("0") = 0x810000UL;
+ register unsigned long reg0 asm("0") = 0x010000UL;
unsigned long tmp1, tmp2;
tmp1 = -4096UL;
@@ -279,9 +342,10 @@ static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size
static inline unsigned long clear_user_xc(void __user *to, unsigned long size)
{
+ mm_segment_t old_fs;
unsigned long tmp1, tmp2;
- load_kernel_asce();
+ old_fs = enable_sacf_uaccess();
asm volatile(
" sacf 256\n"
" aghi %0,-1\n"
@@ -310,6 +374,7 @@ static inline unsigned long clear_user_xc(void __user *to, unsigned long size)
EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
: "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2)
: : "cc", "memory");
+ disable_sacf_uaccess(old_fs);
return size;
}
@@ -345,10 +410,15 @@ static inline unsigned long strnlen_user_srst(const char __user *src,
unsigned long __strnlen_user(const char __user *src, unsigned long size)
{
+ mm_segment_t old_fs;
+ unsigned long len;
+
if (unlikely(!size))
return 0;
- load_kernel_asce();
- return strnlen_user_srst(src, size);
+ old_fs = enable_sacf_uaccess();
+ len = strnlen_user_srst(src, size);
+ disable_sacf_uaccess(old_fs);
+ return len;
}
EXPORT_SYMBOL(__strnlen_user);
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 242b78c0a9ec..93faeca52284 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -50,6 +50,13 @@
#define VM_FAULT_SIGNAL 0x080000
#define VM_FAULT_PFAULT 0x100000
+enum fault_type {
+ KERNEL_FAULT,
+ USER_FAULT,
+ VDSO_FAULT,
+ GMAP_FAULT,
+};
+
static unsigned long store_indication __read_mostly;
static int __init fault_init(void)
@@ -99,27 +106,34 @@ void bust_spinlocks(int yes)
}
/*
- * Returns the address space associated with the fault.
- * Returns 0 for kernel space and 1 for user space.
+ * Find out which address space caused the exception.
+ * Access register mode is impossible, ignore space == 3.
*/
-static inline int user_space_fault(struct pt_regs *regs)
+static inline enum fault_type get_fault_type(struct pt_regs *regs)
{
unsigned long trans_exc_code;
- /*
- * The lowest two bits of the translation exception
- * identification indicate which paging table was used.
- */
trans_exc_code = regs->int_parm_long & 3;
- if (trans_exc_code == 3) /* home space -> kernel */
- return 0;
- if (user_mode(regs))
- return 1;
- if (trans_exc_code == 2) /* secondary space -> set_fs */
- return current->thread.mm_segment.ar4;
- if (current->flags & PF_VCPU)
- return 1;
- return 0;
+ if (likely(trans_exc_code == 0)) {
+ /* primary space exception */
+ if (IS_ENABLED(CONFIG_PGSTE) &&
+ test_pt_regs_flag(regs, PIF_GUEST_FAULT))
+ return GMAP_FAULT;
+ if (current->thread.mm_segment == USER_DS)
+ return USER_FAULT;
+ return KERNEL_FAULT;
+ }
+ if (trans_exc_code == 2) {
+ /* secondary space exception */
+ if (current->thread.mm_segment & 1) {
+ if (current->thread.mm_segment == USER_DS_SACF)
+ return USER_FAULT;
+ return KERNEL_FAULT;
+ }
+ return VDSO_FAULT;
+ }
+ /* home space exception -> access via kernel ASCE */
+ return KERNEL_FAULT;
}
static int bad_address(void *p)
@@ -204,20 +218,23 @@ static void dump_fault_info(struct pt_regs *regs)
break;
}
pr_cont("mode while using ");
- if (!user_space_fault(regs)) {
- asce = S390_lowcore.kernel_asce;
- pr_cont("kernel ");
- }
-#ifdef CONFIG_PGSTE
- else if ((current->flags & PF_VCPU) && S390_lowcore.gmap) {
- struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
- asce = gmap->asce;
- pr_cont("gmap ");
- }
-#endif
- else {
+ switch (get_fault_type(regs)) {
+ case USER_FAULT:
asce = S390_lowcore.user_asce;
pr_cont("user ");
+ break;
+ case VDSO_FAULT:
+ asce = S390_lowcore.vdso_asce;
+ pr_cont("vdso ");
+ break;
+ case GMAP_FAULT:
+ asce = ((struct gmap *) S390_lowcore.gmap)->asce;
+ pr_cont("gmap ");
+ break;
+ case KERNEL_FAULT:
+ asce = S390_lowcore.kernel_asce;
+ pr_cont("kernel ");
+ break;
}
pr_cont("ASCE.\n");
dump_pagetable(asce, regs->int_parm_long & __FAIL_ADDR_MASK);
@@ -273,7 +290,7 @@ static noinline void do_no_context(struct pt_regs *regs)
* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice.
*/
- if (!user_space_fault(regs))
+ if (get_fault_type(regs) == KERNEL_FAULT)
printk(KERN_ALERT "Unable to handle kernel pointer dereference"
" in virtual kernel address space\n");
else
@@ -395,12 +412,11 @@ static noinline void do_fault_error(struct pt_regs *regs, int access, int fault)
*/
static inline int do_exception(struct pt_regs *regs, int access)
{
-#ifdef CONFIG_PGSTE
struct gmap *gmap;
-#endif
struct task_struct *tsk;
struct mm_struct *mm;
struct vm_area_struct *vma;
+ enum fault_type type;
unsigned long trans_exc_code;
unsigned long address;
unsigned int flags;
@@ -425,8 +441,19 @@ static inline int do_exception(struct pt_regs *regs, int access)
* user context.
*/
fault = VM_FAULT_BADCONTEXT;
- if (unlikely(!user_space_fault(regs) || faulthandler_disabled() || !mm))
+ type = get_fault_type(regs);
+ switch (type) {
+ case KERNEL_FAULT:
+ goto out;
+ case VDSO_FAULT:
+ fault = VM_FAULT_BADMAP;
goto out;
+ case USER_FAULT:
+ case GMAP_FAULT:
+ if (faulthandler_disabled() || !mm)
+ goto out;
+ break;
+ }
address = trans_exc_code & __FAIL_ADDR_MASK;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
@@ -437,10 +464,9 @@ static inline int do_exception(struct pt_regs *regs, int access)
flags |= FAULT_FLAG_WRITE;
down_read(&mm->mmap_sem);
-#ifdef CONFIG_PGSTE
- gmap = (current->flags & PF_VCPU) ?
- (struct gmap *) S390_lowcore.gmap : NULL;
- if (gmap) {
+ gmap = NULL;
+ if (IS_ENABLED(CONFIG_PGSTE) && type == GMAP_FAULT) {
+ gmap = (struct gmap *) S390_lowcore.gmap;
current->thread.gmap_addr = address;
current->thread.gmap_write_flag = !!(flags & FAULT_FLAG_WRITE);
current->thread.gmap_int_code = regs->int_code & 0xffff;
@@ -452,7 +478,6 @@ static inline int do_exception(struct pt_regs *regs, int access)
if (gmap->pfault_enabled)
flags |= FAULT_FLAG_RETRY_NOWAIT;
}
-#endif
retry:
fault = VM_FAULT_BADMAP;
@@ -507,15 +532,14 @@ retry:
regs, address);
}
if (fault & VM_FAULT_RETRY) {
-#ifdef CONFIG_PGSTE
- if (gmap && (flags & FAULT_FLAG_RETRY_NOWAIT)) {
+ if (IS_ENABLED(CONFIG_PGSTE) && gmap &&
+ (flags & FAULT_FLAG_RETRY_NOWAIT)) {
/* FAULT_FLAG_RETRY_NOWAIT has been set,
* mmap_sem has not been released */
current->thread.gmap_pfault = 1;
fault = VM_FAULT_PFAULT;
goto out_up;
}
-#endif
/* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
* of starvation. */
flags &= ~(FAULT_FLAG_ALLOW_RETRY |
@@ -525,8 +549,7 @@ retry:
goto retry;
}
}
-#ifdef CONFIG_PGSTE
- if (gmap) {
+ if (IS_ENABLED(CONFIG_PGSTE) && gmap) {
address = __gmap_link(gmap, current->thread.gmap_addr,
address);
if (address == -EFAULT) {
@@ -538,7 +561,6 @@ retry:
goto out_up;
}
}
-#endif
fault = 0;
out_up:
up_read(&mm->mmap_sem);
@@ -706,7 +728,7 @@ static void pfault_interrupt(struct ext_code ext_code,
return;
inc_irq_stat(IRQEXT_PFL);
/* Get the token (= pid of the affected task). */
- pid = param64 & LPP_PFAULT_PID_MASK;
+ pid = param64 & LPP_PID_MASK;
rcu_read_lock();
tsk = find_task_by_pid_ns(pid, &init_pid_ns);
if (tsk)
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index 2f66290c9b92..b2c140193b0a 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -1187,12 +1187,11 @@ static void gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr)
static void __gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr,
unsigned long *sgt)
{
- unsigned long asce, *pgt;
+ unsigned long *pgt;
struct page *page;
int i;
BUG_ON(!gmap_is_shadow(sg));
- asce = (unsigned long) sgt | _ASCE_TYPE_SEGMENT;
for (i = 0; i < _CRST_ENTRIES; i++, raddr += _SEGMENT_SIZE) {
if (!(sgt[i] & _SEGMENT_ENTRY_ORIGIN))
continue;
@@ -1245,12 +1244,11 @@ static void gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr)
static void __gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr,
unsigned long *r3t)
{
- unsigned long asce, *sgt;
+ unsigned long *sgt;
struct page *page;
int i;
BUG_ON(!gmap_is_shadow(sg));
- asce = (unsigned long) r3t | _ASCE_TYPE_REGION3;
for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION3_SIZE) {
if (!(r3t[i] & _REGION_ENTRY_ORIGIN))
continue;
@@ -1303,12 +1301,11 @@ static void gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr)
static void __gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr,
unsigned long *r2t)
{
- unsigned long asce, *r3t;
+ unsigned long *r3t;
struct page *page;
int i;
BUG_ON(!gmap_is_shadow(sg));
- asce = (unsigned long) r2t | _ASCE_TYPE_REGION2;
for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION2_SIZE) {
if (!(r2t[i] & _REGION_ENTRY_ORIGIN))
continue;
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 817c9e16e83e..671535e64aba 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -95,6 +95,7 @@ void __init paging_init(void)
}
init_mm.context.asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
S390_lowcore.kernel_asce = init_mm.context.asce;
+ S390_lowcore.user_asce = S390_lowcore.kernel_asce;
crst_table_init((unsigned long *) init_mm.pgd, pgd_type);
vmem_map_init();
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index 4ad4c4f77b4d..434a9564917b 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -71,10 +71,8 @@ static void __crst_table_upgrade(void *arg)
{
struct mm_struct *mm = arg;
- if (current->active_mm == mm) {
- clear_user_asce();
+ if (current->active_mm == mm)
set_user_asce(mm);
- }
__tlb_flush_local();
}
diff --git a/arch/s390/tools/Makefile b/arch/s390/tools/Makefile
index 2ebf2872cc16..2e70e25de07a 100644
--- a/arch/s390/tools/Makefile
+++ b/arch/s390/tools/Makefile
@@ -21,4 +21,4 @@ include/generated/facilities.h: $(obj)/gen_facilities FORCE
$(call filechk,facilities.h)
include/generated/dis.h: $(obj)/gen_opcode_table FORCE
- $(call filechk,dis.h,__FUN)
+ $(call filechk,dis.h)
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 4e83f950713e..987a57502909 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -96,9 +96,6 @@ config ARCH_PROC_KCORE_TEXT
config CPU_BIG_ENDIAN
def_bool y
-config CPU_BIG_ENDIAN
- def_bool y
-
config ARCH_ATU
bool
default y if SPARC64
diff --git a/arch/sparc/include/asm/compat.h b/arch/sparc/include/asm/compat.h
index 977c3f280ba1..fa38c78de0f0 100644
--- a/arch/sparc/include/asm/compat.h
+++ b/arch/sparc/include/asm/compat.h
@@ -209,7 +209,6 @@ typedef struct compat_siginfo {
} compat_siginfo_t;
#define COMPAT_OFF_T_MAX 0x7fffffff
-#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL
/*
* A pointer passed in from user mode. This should not
diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c
index 5c572de64c74..54a6159b9cd8 100644
--- a/arch/sparc/kernel/signal32.c
+++ b/arch/sparc/kernel/signal32.c
@@ -249,7 +249,6 @@ asmlinkage void do_rt_sigreturn32(struct pt_regs *regs)
compat_uptr_t fpu_save;
compat_uptr_t rwin_save;
sigset_t set;
- compat_sigset_t seta;
int err, i;
/* Always make any pending restarted system calls return -EINTR */
@@ -312,7 +311,7 @@ asmlinkage void do_rt_sigreturn32(struct pt_regs *regs)
err |= __get_user(fpu_save, &sf->fpu_save);
if (!err && fpu_save)
err |= restore_fpu_state(regs, compat_ptr(fpu_save));
- err |= copy_from_user(&seta, &sf->mask, sizeof(compat_sigset_t));
+ err |= get_compat_sigset(&set, &sf->mask);
err |= compat_restore_altstack(&sf->stack);
if (err)
goto segv;
@@ -323,7 +322,6 @@ asmlinkage void do_rt_sigreturn32(struct pt_regs *regs)
goto segv;
}
- set.sig[0] = seta.sig[0] + (((long)seta.sig[1]) << 32);
set_current_blocked(&set);
return;
segv:
@@ -555,7 +553,6 @@ static int setup_rt_frame32(struct ksignal *ksig, struct pt_regs *regs,
void __user *tail;
int sigframe_size;
u32 psr;
- compat_sigset_t seta;
/* 1. Make sure everything is clean */
synchronize_user_stack();
@@ -625,9 +622,7 @@ static int setup_rt_frame32(struct ksignal *ksig, struct pt_regs *regs,
/* Setup sigaltstack */
err |= __compat_save_altstack(&sf->stack, regs->u_regs[UREG_FP]);
- seta.sig[1] = (oldset->sig[0] >> 32);
- seta.sig[0] = oldset->sig[0];
- err |= __copy_to_user(&sf->mask, &seta, sizeof(compat_sigset_t));
+ err |= put_compat_sigset(&sf->mask, oldset, sizeof(compat_sigset_t));
if (!wsaved) {
err |= copy_in_user((u32 __user *)sf,
diff --git a/arch/sparc/kernel/sys_sparc32.c b/arch/sparc/kernel/sys_sparc32.c
index b4e1478413a1..6d964bdefbaa 100644
--- a/arch/sparc/kernel/sys_sparc32.c
+++ b/arch/sparc/kernel/sys_sparc32.c
@@ -160,7 +160,6 @@ COMPAT_SYSCALL_DEFINE5(rt_sigaction, int, sig,
{
struct k_sigaction new_ka, old_ka;
int ret;
- compat_sigset_t set32;
/* XXX: Don't preclude handling different sized sigset_t's. */
if (sigsetsize != sizeof(compat_sigset_t))
@@ -172,8 +171,7 @@ COMPAT_SYSCALL_DEFINE5(rt_sigaction, int, sig,
new_ka.ka_restorer = restorer;
ret = get_user(u_handler, &act->sa_handler);
new_ka.sa.sa_handler = compat_ptr(u_handler);
- ret |= copy_from_user(&set32, &act->sa_mask, sizeof(compat_sigset_t));
- sigset_from_compat(&new_ka.sa.sa_mask, &set32);
+ ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
ret |= get_user(u_restorer, &act->sa_restorer);
new_ka.sa.sa_restorer = compat_ptr(u_restorer);
@@ -184,9 +182,9 @@ COMPAT_SYSCALL_DEFINE5(rt_sigaction, int, sig,
ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
if (!ret && oact) {
- sigset_to_compat(&set32, &old_ka.sa.sa_mask);
ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), &oact->sa_handler);
- ret |= copy_to_user(&oact->sa_mask, &set32, sizeof(compat_sigset_t));
+ ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
+ sizeof(oact->sa_mask));
ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer), &oact->sa_restorer);
if (ret)
diff --git a/arch/tile/include/asm/compat.h b/arch/tile/include/asm/compat.h
index c14e36f008c8..62a7b83025dd 100644
--- a/arch/tile/include/asm/compat.h
+++ b/arch/tile/include/asm/compat.h
@@ -173,7 +173,6 @@ typedef struct compat_siginfo {
} compat_siginfo_t;
#define COMPAT_OFF_T_MAX 0x7fffffff
-#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL
struct compat_ipc64_perm {
compat_key_t key;
diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
index a600a6cda9ec..2cbd75dd2fd3 100644
--- a/arch/x86/include/asm/compat.h
+++ b/arch/x86/include/asm/compat.h
@@ -210,7 +210,6 @@ typedef struct compat_siginfo {
} compat_siginfo_t;
#define COMPAT_OFF_T_MAX 0x7fffffff
-#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL
struct compat_ipc64_perm {
compat_key_t key;
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 90cb82dbba57..570e8bb1f386 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -22,7 +22,7 @@ obj-y += common.o
obj-y += rdrand.o
obj-y += match.o
obj-y += bugs.o
-obj-$(CONFIG_CPU_FREQ) += aperfmperf.o
+obj-y += aperfmperf.o
obj-y += cpuid-deps.o
obj-$(CONFIG_PROC_FS) += proc.o
diff --git a/arch/x86/kernel/cpu/aperfmperf.c b/arch/x86/kernel/cpu/aperfmperf.c
index 957813e0180d..7eba34df54c3 100644
--- a/arch/x86/kernel/cpu/aperfmperf.c
+++ b/arch/x86/kernel/cpu/aperfmperf.c
@@ -14,6 +14,8 @@
#include <linux/percpu.h>
#include <linux/smp.h>
+#include "cpu.h"
+
struct aperfmperf_sample {
unsigned int khz;
ktime_t time;
@@ -24,7 +26,7 @@ struct aperfmperf_sample {
static DEFINE_PER_CPU(struct aperfmperf_sample, samples);
#define APERFMPERF_CACHE_THRESHOLD_MS 10
-#define APERFMPERF_REFRESH_DELAY_MS 20
+#define APERFMPERF_REFRESH_DELAY_MS 10
#define APERFMPERF_STALE_THRESHOLD_MS 1000
/*
@@ -38,8 +40,6 @@ static void aperfmperf_snapshot_khz(void *dummy)
u64 aperf, aperf_delta;
u64 mperf, mperf_delta;
struct aperfmperf_sample *s = this_cpu_ptr(&samples);
- ktime_t now = ktime_get();
- s64 time_delta = ktime_ms_delta(now, s->time);
unsigned long flags;
local_irq_save(flags);
@@ -57,38 +57,68 @@ static void aperfmperf_snapshot_khz(void *dummy)
if (mperf_delta == 0)
return;
- s->time = now;
+ s->time = ktime_get();
s->aperf = aperf;
s->mperf = mperf;
-
- /* If the previous iteration was too long ago, discard it. */
- if (time_delta > APERFMPERF_STALE_THRESHOLD_MS)
- s->khz = 0;
- else
- s->khz = div64_u64((cpu_khz * aperf_delta), mperf_delta);
+ s->khz = div64_u64((cpu_khz * aperf_delta), mperf_delta);
}
-unsigned int arch_freq_get_on_cpu(int cpu)
+static bool aperfmperf_snapshot_cpu(int cpu, ktime_t now, bool wait)
{
- s64 time_delta;
- unsigned int khz;
+ s64 time_delta = ktime_ms_delta(now, per_cpu(samples.time, cpu));
+
+ /* Don't bother re-computing within the cache threshold time. */
+ if (time_delta < APERFMPERF_CACHE_THRESHOLD_MS)
+ return true;
+
+ smp_call_function_single(cpu, aperfmperf_snapshot_khz, NULL, wait);
+
+ /* Return false if the previous iteration was too long ago. */
+ return time_delta <= APERFMPERF_STALE_THRESHOLD_MS;
+}
+unsigned int aperfmperf_get_khz(int cpu)
+{
if (!cpu_khz)
return 0;
if (!static_cpu_has(X86_FEATURE_APERFMPERF))
return 0;
- /* Don't bother re-computing within the cache threshold time. */
- time_delta = ktime_ms_delta(ktime_get(), per_cpu(samples.time, cpu));
- khz = per_cpu(samples.khz, cpu);
- if (khz && time_delta < APERFMPERF_CACHE_THRESHOLD_MS)
- return khz;
+ aperfmperf_snapshot_cpu(cpu, ktime_get(), true);
+ return per_cpu(samples.khz, cpu);
+}
- smp_call_function_single(cpu, aperfmperf_snapshot_khz, NULL, 1);
- khz = per_cpu(samples.khz, cpu);
- if (khz)
- return khz;
+void arch_freq_prepare_all(void)
+{
+ ktime_t now = ktime_get();
+ bool wait = false;
+ int cpu;
+
+ if (!cpu_khz)
+ return;
+
+ if (!static_cpu_has(X86_FEATURE_APERFMPERF))
+ return;
+
+ for_each_online_cpu(cpu)
+ if (!aperfmperf_snapshot_cpu(cpu, now, false))
+ wait = true;
+
+ if (wait)
+ msleep(APERFMPERF_REFRESH_DELAY_MS);
+}
+
+unsigned int arch_freq_get_on_cpu(int cpu)
+{
+ if (!cpu_khz)
+ return 0;
+
+ if (!static_cpu_has(X86_FEATURE_APERFMPERF))
+ return 0;
+
+ if (aperfmperf_snapshot_cpu(cpu, ktime_get(), true))
+ return per_cpu(samples.khz, cpu);
msleep(APERFMPERF_REFRESH_DELAY_MS);
smp_call_function_single(cpu, aperfmperf_snapshot_khz, NULL, 1);
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
index f52a370b6c00..e806b11a99af 100644
--- a/arch/x86/kernel/cpu/cpu.h
+++ b/arch/x86/kernel/cpu/cpu.h
@@ -47,4 +47,7 @@ extern const struct cpu_dev *const __x86_cpu_dev_start[],
extern void get_cpu_cap(struct cpuinfo_x86 *c);
extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
+
+unsigned int aperfmperf_get_khz(int cpu);
+
#endif /* ARCH_X86_CPU_H */
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
index 6b7e17bf0b71..e7ecedafa1c8 100644
--- a/arch/x86/kernel/cpu/proc.c
+++ b/arch/x86/kernel/cpu/proc.c
@@ -5,6 +5,8 @@
#include <linux/seq_file.h>
#include <linux/cpufreq.h>
+#include "cpu.h"
+
/*
* Get CPU information for use by the procfs.
*/
@@ -78,9 +80,11 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m, "microcode\t: 0x%x\n", c->microcode);
if (cpu_has(c, X86_FEATURE_TSC)) {
- unsigned int freq = cpufreq_quick_get(cpu);
+ unsigned int freq = aperfmperf_get_khz(cpu);
if (!freq)
+ freq = cpufreq_quick_get(cpu);
+ if (!freq)
freq = cpu_khz;
seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
freq / 1000, (freq % 1000));
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
index ceefb9a706d6..da1525ec4c87 100644
--- a/block/bfq-cgroup.c
+++ b/block/bfq-cgroup.c
@@ -24,7 +24,7 @@
#include "bfq-iosched.h"
-#ifdef CONFIG_BFQ_GROUP_IOSCHED
+#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
/* bfqg stats flags */
enum bfqg_stats_flags {
@@ -152,6 +152,57 @@ void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg)
bfqg_stats_update_group_wait_time(stats);
}
+void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
+ unsigned int op)
+{
+ blkg_rwstat_add(&bfqg->stats.queued, op, 1);
+ bfqg_stats_end_empty_time(&bfqg->stats);
+ if (!(bfqq == ((struct bfq_data *)bfqg->bfqd)->in_service_queue))
+ bfqg_stats_set_start_group_wait_time(bfqg, bfqq_group(bfqq));
+}
+
+void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op)
+{
+ blkg_rwstat_add(&bfqg->stats.queued, op, -1);
+}
+
+void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op)
+{
+ blkg_rwstat_add(&bfqg->stats.merged, op, 1);
+}
+
+void bfqg_stats_update_completion(struct bfq_group *bfqg, uint64_t start_time,
+ uint64_t io_start_time, unsigned int op)
+{
+ struct bfqg_stats *stats = &bfqg->stats;
+ unsigned long long now = sched_clock();
+
+ if (time_after64(now, io_start_time))
+ blkg_rwstat_add(&stats->service_time, op,
+ now - io_start_time);
+ if (time_after64(io_start_time, start_time))
+ blkg_rwstat_add(&stats->wait_time, op,
+ io_start_time - start_time);
+}
+
+#else /* CONFIG_BFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
+
+void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
+ unsigned int op) { }
+void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) { }
+void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) { }
+void bfqg_stats_update_completion(struct bfq_group *bfqg, uint64_t start_time,
+ uint64_t io_start_time, unsigned int op) { }
+void bfqg_stats_update_dequeue(struct bfq_group *bfqg) { }
+void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) { }
+void bfqg_stats_update_idle_time(struct bfq_group *bfqg) { }
+void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) { }
+void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg) { }
+
+#endif /* CONFIG_BFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
+
+#ifdef CONFIG_BFQ_GROUP_IOSCHED
+
/*
* blk-cgroup policy-related handlers
* The following functions help in converting between blk-cgroup
@@ -229,42 +280,10 @@ void bfqg_and_blkg_put(struct bfq_group *bfqg)
blkg_put(bfqg_to_blkg(bfqg));
}
-void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
- unsigned int op)
-{
- blkg_rwstat_add(&bfqg->stats.queued, op, 1);
- bfqg_stats_end_empty_time(&bfqg->stats);
- if (!(bfqq == ((struct bfq_data *)bfqg->bfqd)->in_service_queue))
- bfqg_stats_set_start_group_wait_time(bfqg, bfqq_group(bfqq));
-}
-
-void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op)
-{
- blkg_rwstat_add(&bfqg->stats.queued, op, -1);
-}
-
-void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op)
-{
- blkg_rwstat_add(&bfqg->stats.merged, op, 1);
-}
-
-void bfqg_stats_update_completion(struct bfq_group *bfqg, uint64_t start_time,
- uint64_t io_start_time, unsigned int op)
-{
- struct bfqg_stats *stats = &bfqg->stats;
- unsigned long long now = sched_clock();
-
- if (time_after64(now, io_start_time))
- blkg_rwstat_add(&stats->service_time, op,
- now - io_start_time);
- if (time_after64(io_start_time, start_time))
- blkg_rwstat_add(&stats->wait_time, op,
- io_start_time - start_time);
-}
-
/* @stats = 0 */
static void bfqg_stats_reset(struct bfqg_stats *stats)
{
+#ifdef CONFIG_DEBUG_BLK_CGROUP
/* queued stats shouldn't be cleared */
blkg_rwstat_reset(&stats->merged);
blkg_rwstat_reset(&stats->service_time);
@@ -276,6 +295,7 @@ static void bfqg_stats_reset(struct bfqg_stats *stats)
blkg_stat_reset(&stats->group_wait_time);
blkg_stat_reset(&stats->idle_time);
blkg_stat_reset(&stats->empty_time);
+#endif
}
/* @to += @from */
@@ -284,6 +304,7 @@ static void bfqg_stats_add_aux(struct bfqg_stats *to, struct bfqg_stats *from)
if (!to || !from)
return;
+#ifdef CONFIG_DEBUG_BLK_CGROUP
/* queued stats shouldn't be cleared */
blkg_rwstat_add_aux(&to->merged, &from->merged);
blkg_rwstat_add_aux(&to->service_time, &from->service_time);
@@ -296,6 +317,7 @@ static void bfqg_stats_add_aux(struct bfqg_stats *to, struct bfqg_stats *from)
blkg_stat_add_aux(&to->group_wait_time, &from->group_wait_time);
blkg_stat_add_aux(&to->idle_time, &from->idle_time);
blkg_stat_add_aux(&to->empty_time, &from->empty_time);
+#endif
}
/*
@@ -342,6 +364,7 @@ void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
static void bfqg_stats_exit(struct bfqg_stats *stats)
{
+#ifdef CONFIG_DEBUG_BLK_CGROUP
blkg_rwstat_exit(&stats->merged);
blkg_rwstat_exit(&stats->service_time);
blkg_rwstat_exit(&stats->wait_time);
@@ -353,10 +376,12 @@ static void bfqg_stats_exit(struct bfqg_stats *stats)
blkg_stat_exit(&stats->group_wait_time);
blkg_stat_exit(&stats->idle_time);
blkg_stat_exit(&stats->empty_time);
+#endif
}
static int bfqg_stats_init(struct bfqg_stats *stats, gfp_t gfp)
{
+#ifdef CONFIG_DEBUG_BLK_CGROUP
if (blkg_rwstat_init(&stats->merged, gfp) ||
blkg_rwstat_init(&stats->service_time, gfp) ||
blkg_rwstat_init(&stats->wait_time, gfp) ||
@@ -371,6 +396,7 @@ static int bfqg_stats_init(struct bfqg_stats *stats, gfp_t gfp)
bfqg_stats_exit(stats);
return -ENOMEM;
}
+#endif
return 0;
}
@@ -887,6 +913,7 @@ static ssize_t bfq_io_set_weight(struct kernfs_open_file *of,
return bfq_io_set_weight_legacy(of_css(of), NULL, weight);
}
+#ifdef CONFIG_DEBUG_BLK_CGROUP
static int bfqg_print_stat(struct seq_file *sf, void *v)
{
blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat,
@@ -991,6 +1018,7 @@ static int bfqg_print_avg_queue_size(struct seq_file *sf, void *v)
0, false);
return 0;
}
+#endif /* CONFIG_DEBUG_BLK_CGROUP */
struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
{
@@ -1029,15 +1057,6 @@ struct cftype bfq_blkcg_legacy_files[] = {
/* statistics, covers only the tasks in the bfqg */
{
- .name = "bfq.time",
- .private = offsetof(struct bfq_group, stats.time),
- .seq_show = bfqg_print_stat,
- },
- {
- .name = "bfq.sectors",
- .seq_show = bfqg_print_stat_sectors,
- },
- {
.name = "bfq.io_service_bytes",
.private = (unsigned long)&blkcg_policy_bfq,
.seq_show = blkg_print_stat_bytes,
@@ -1047,6 +1066,16 @@ struct cftype bfq_blkcg_legacy_files[] = {
.private = (unsigned long)&blkcg_policy_bfq,
.seq_show = blkg_print_stat_ios,
},
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+ {
+ .name = "bfq.time",
+ .private = offsetof(struct bfq_group, stats.time),
+ .seq_show = bfqg_print_stat,
+ },
+ {
+ .name = "bfq.sectors",
+ .seq_show = bfqg_print_stat_sectors,
+ },
{
.name = "bfq.io_service_time",
.private = offsetof(struct bfq_group, stats.service_time),
@@ -1067,18 +1096,10 @@ struct cftype bfq_blkcg_legacy_files[] = {
.private = offsetof(struct bfq_group, stats.queued),
.seq_show = bfqg_print_rwstat,
},
+#endif /* CONFIG_DEBUG_BLK_CGROUP */
/* the same statictics which cover the bfqg and its descendants */
{
- .name = "bfq.time_recursive",
- .private = offsetof(struct bfq_group, stats.time),
- .seq_show = bfqg_print_stat_recursive,
- },
- {
- .name = "bfq.sectors_recursive",
- .seq_show = bfqg_print_stat_sectors_recursive,
- },
- {
.name = "bfq.io_service_bytes_recursive",
.private = (unsigned long)&blkcg_policy_bfq,
.seq_show = blkg_print_stat_bytes_recursive,
@@ -1088,6 +1109,16 @@ struct cftype bfq_blkcg_legacy_files[] = {
.private = (unsigned long)&blkcg_policy_bfq,
.seq_show = blkg_print_stat_ios_recursive,
},
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+ {
+ .name = "bfq.time_recursive",
+ .private = offsetof(struct bfq_group, stats.time),
+ .seq_show = bfqg_print_stat_recursive,
+ },
+ {
+ .name = "bfq.sectors_recursive",
+ .seq_show = bfqg_print_stat_sectors_recursive,
+ },
{
.name = "bfq.io_service_time_recursive",
.private = offsetof(struct bfq_group, stats.service_time),
@@ -1132,6 +1163,7 @@ struct cftype bfq_blkcg_legacy_files[] = {
.private = offsetof(struct bfq_group, stats.dequeue),
.seq_show = bfqg_print_stat,
},
+#endif /* CONFIG_DEBUG_BLK_CGROUP */
{ } /* terminate */
};
@@ -1147,18 +1179,6 @@ struct cftype bfq_blkg_files[] = {
#else /* CONFIG_BFQ_GROUP_IOSCHED */
-void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
- unsigned int op) { }
-void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) { }
-void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) { }
-void bfqg_stats_update_completion(struct bfq_group *bfqg, uint64_t start_time,
- uint64_t io_start_time, unsigned int op) { }
-void bfqg_stats_update_dequeue(struct bfq_group *bfqg) { }
-void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) { }
-void bfqg_stats_update_idle_time(struct bfq_group *bfqg) { }
-void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) { }
-void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg) { }
-
void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
struct bfq_group *bfqg) {}
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 889a8549d97f..bcb6d21baf12 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -1359,7 +1359,6 @@ static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd,
bfqq->ttime.last_end_request +
bfqd->bfq_slice_idle * 3;
- bfqg_stats_update_io_add(bfqq_group(RQ_BFQQ(rq)), bfqq, rq->cmd_flags);
/*
* bfqq deserves to be weight-raised if:
@@ -1633,7 +1632,6 @@ static void bfq_remove_request(struct request_queue *q,
if (rq->cmd_flags & REQ_META)
bfqq->meta_pending--;
- bfqg_stats_update_io_remove(bfqq_group(bfqq), rq->cmd_flags);
}
static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio)
@@ -1746,6 +1744,7 @@ static void bfq_requests_merged(struct request_queue *q, struct request *rq,
bfqq->next_rq = rq;
bfq_remove_request(q, next);
+ bfqg_stats_update_io_remove(bfqq_group(bfqq), next->cmd_flags);
spin_unlock_irq(&bfqq->bfqd->lock);
end:
@@ -2229,7 +2228,6 @@ static void __bfq_set_in_service_queue(struct bfq_data *bfqd,
struct bfq_queue *bfqq)
{
if (bfqq) {
- bfqg_stats_update_avg_queue_size(bfqq_group(bfqq));
bfq_clear_bfqq_fifo_expire(bfqq);
bfqd->budgets_assigned = (bfqd->budgets_assigned * 7 + 256) / 8;
@@ -3470,7 +3468,6 @@ check_queue:
*/
bfq_clear_bfqq_wait_request(bfqq);
hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
- bfqg_stats_update_idle_time(bfqq_group(bfqq));
}
goto keep_queue;
}
@@ -3696,12 +3693,67 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
{
struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
struct request *rq;
+#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
+ struct bfq_queue *in_serv_queue, *bfqq;
+ bool waiting_rq, idle_timer_disabled;
+#endif
spin_lock_irq(&bfqd->lock);
+#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
+ in_serv_queue = bfqd->in_service_queue;
+ waiting_rq = in_serv_queue && bfq_bfqq_wait_request(in_serv_queue);
+
+ rq = __bfq_dispatch_request(hctx);
+
+ idle_timer_disabled =
+ waiting_rq && !bfq_bfqq_wait_request(in_serv_queue);
+
+#else
rq = __bfq_dispatch_request(hctx);
+#endif
spin_unlock_irq(&bfqd->lock);
+#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
+ bfqq = rq ? RQ_BFQQ(rq) : NULL;
+ if (!idle_timer_disabled && !bfqq)
+ return rq;
+
+ /*
+ * rq and bfqq are guaranteed to exist until this function
+ * ends, for the following reasons. First, rq can be
+ * dispatched to the device, and then can be completed and
+ * freed, only after this function ends. Second, rq cannot be
+ * merged (and thus freed because of a merge) any longer,
+ * because it has already started. Thus rq cannot be freed
+ * before this function ends, and, since rq has a reference to
+ * bfqq, the same guarantee holds for bfqq too.
+ *
+ * In addition, the following queue lock guarantees that
+ * bfqq_group(bfqq) exists as well.
+ */
+ spin_lock_irq(hctx->queue->queue_lock);
+ if (idle_timer_disabled)
+ /*
+ * Since the idle timer has been disabled,
+ * in_serv_queue contained some request when
+ * __bfq_dispatch_request was invoked above, which
+ * implies that rq was picked exactly from
+ * in_serv_queue. Thus in_serv_queue == bfqq, and is
+ * therefore guaranteed to exist because of the above
+ * arguments.
+ */
+ bfqg_stats_update_idle_time(bfqq_group(in_serv_queue));
+ if (bfqq) {
+ struct bfq_group *bfqg = bfqq_group(bfqq);
+
+ bfqg_stats_update_avg_queue_size(bfqg);
+ bfqg_stats_set_start_empty_time(bfqg);
+ bfqg_stats_update_io_remove(bfqg, rq->cmd_flags);
+ }
+ spin_unlock_irq(hctx->queue->queue_lock);
+#endif
+
return rq;
}
@@ -4159,7 +4211,6 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
*/
bfq_clear_bfqq_wait_request(bfqq);
hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
- bfqg_stats_update_idle_time(bfqq_group(bfqq));
/*
* The queue is not empty, because a new request just
@@ -4174,10 +4225,12 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
}
}
-static void __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
+/* returns true if it causes the idle timer to be disabled */
+static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
{
struct bfq_queue *bfqq = RQ_BFQQ(rq),
*new_bfqq = bfq_setup_cooperator(bfqd, bfqq, rq, true);
+ bool waiting, idle_timer_disabled = false;
if (new_bfqq) {
if (bic_to_bfqq(RQ_BIC(rq), 1) != bfqq)
@@ -4211,12 +4264,16 @@ static void __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
bfqq = new_bfqq;
}
+ waiting = bfqq && bfq_bfqq_wait_request(bfqq);
bfq_add_request(rq);
+ idle_timer_disabled = waiting && !bfq_bfqq_wait_request(bfqq);
rq->fifo_time = ktime_get_ns() + bfqd->bfq_fifo_expire[rq_is_sync(rq)];
list_add_tail(&rq->queuelist, &bfqq->fifo);
bfq_rq_enqueued(bfqd, bfqq, rq);
+
+ return idle_timer_disabled;
}
static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
@@ -4224,6 +4281,11 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
{
struct request_queue *q = hctx->queue;
struct bfq_data *bfqd = q->elevator->elevator_data;
+#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
+ struct bfq_queue *bfqq = RQ_BFQQ(rq);
+ bool idle_timer_disabled = false;
+ unsigned int cmd_flags;
+#endif
spin_lock_irq(&bfqd->lock);
if (blk_mq_sched_try_insert_merge(q, rq)) {
@@ -4242,7 +4304,17 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
else
list_add_tail(&rq->queuelist, &bfqd->dispatch);
} else {
+#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
+ idle_timer_disabled = __bfq_insert_request(bfqd, rq);
+ /*
+ * Update bfqq, because, if a queue merge has occurred
+ * in __bfq_insert_request, then rq has been
+ * redirected into a new queue.
+ */
+ bfqq = RQ_BFQQ(rq);
+#else
__bfq_insert_request(bfqd, rq);
+#endif
if (rq_mergeable(rq)) {
elv_rqhash_add(q, rq);
@@ -4251,7 +4323,35 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
}
}
+#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
+ /*
+ * Cache cmd_flags before releasing scheduler lock, because rq
+ * may disappear afterwards (for example, because of a request
+ * merge).
+ */
+ cmd_flags = rq->cmd_flags;
+#endif
spin_unlock_irq(&bfqd->lock);
+
+#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
+ if (!bfqq)
+ return;
+ /*
+ * bfqq still exists, because it can disappear only after
+ * either it is merged with another queue, or the process it
+ * is associated with exits. But both actions must be taken by
+ * the same process currently executing this flow of
+ * instruction.
+ *
+ * In addition, the following queue lock guarantees that
+ * bfqq_group(bfqq) exists as well.
+ */
+ spin_lock_irq(q->queue_lock);
+ bfqg_stats_update_io_add(bfqq_group(bfqq), bfqq, cmd_flags);
+ if (idle_timer_disabled)
+ bfqg_stats_update_idle_time(bfqq_group(bfqq));
+ spin_unlock_irq(q->queue_lock);
+#endif
}
static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx,
@@ -4428,8 +4528,11 @@ static void bfq_finish_request(struct request *rq)
* lock is held.
*/
- if (!RB_EMPTY_NODE(&rq->rb_node))
+ if (!RB_EMPTY_NODE(&rq->rb_node)) {
bfq_remove_request(rq->q, rq);
+ bfqg_stats_update_io_remove(bfqq_group(bfqq),
+ rq->cmd_flags);
+ }
bfq_put_rq_priv_body(bfqq);
}
diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h
index ac0809c72c98..91c4390903a1 100644
--- a/block/bfq-iosched.h
+++ b/block/bfq-iosched.h
@@ -689,7 +689,7 @@ enum bfqq_expiration {
};
struct bfqg_stats {
-#ifdef CONFIG_BFQ_GROUP_IOSCHED
+#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
/* number of ios merged */
struct blkg_rwstat merged;
/* total time spent on device in ns, may not be accurate w/ queueing */
@@ -717,7 +717,7 @@ struct bfqg_stats {
uint64_t start_idle_time;
uint64_t start_empty_time;
uint16_t flags;
-#endif /* CONFIG_BFQ_GROUP_IOSCHED */
+#endif /* CONFIG_BFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
};
#ifdef CONFIG_BFQ_GROUP_IOSCHED
diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c
index 414ba686a847..e495d3f9b4b0 100644
--- a/block/bfq-wf2q.c
+++ b/block/bfq-wf2q.c
@@ -843,7 +843,6 @@ void bfq_bfqq_served(struct bfq_queue *bfqq, int served)
st->vtime += bfq_delta(served, st->wsum);
bfq_forget_idle(st);
}
- bfqg_stats_set_start_empty_time(bfqq_group(bfqq));
bfq_log_bfqq(bfqq->bfqd, bfqq, "bfqq_served %d secs", served);
}
diff --git a/block/bio.c b/block/bio.c
index b94a802f8ba3..228229f3bb76 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -597,6 +597,7 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
* so we don't set nor calculate new physical/hw segment counts here
*/
bio->bi_disk = bio_src->bi_disk;
+ bio->bi_partno = bio_src->bi_partno;
bio_set_flag(bio, BIO_CLONED);
bio->bi_opf = bio_src->bi_opf;
bio->bi_write_hint = bio_src->bi_write_hint;
@@ -1061,14 +1062,21 @@ struct bio_map_data {
struct iovec iov[];
};
-static struct bio_map_data *bio_alloc_map_data(unsigned int iov_count,
+static struct bio_map_data *bio_alloc_map_data(struct iov_iter *data,
gfp_t gfp_mask)
{
- if (iov_count > UIO_MAXIOV)
+ struct bio_map_data *bmd;
+ if (data->nr_segs > UIO_MAXIOV)
return NULL;
- return kmalloc(sizeof(struct bio_map_data) +
- sizeof(struct iovec) * iov_count, gfp_mask);
+ bmd = kmalloc(sizeof(struct bio_map_data) +
+ sizeof(struct iovec) * data->nr_segs, gfp_mask);
+ if (!bmd)
+ return NULL;
+ memcpy(bmd->iov, data->iov, sizeof(struct iovec) * data->nr_segs);
+ bmd->iter = *data;
+ bmd->iter.iov = bmd->iov;
+ return bmd;
}
/**
@@ -1079,7 +1087,7 @@ static struct bio_map_data *bio_alloc_map_data(unsigned int iov_count,
* Copy all pages from iov_iter to bio.
* Returns 0 on success, or error on failure.
*/
-static int bio_copy_from_iter(struct bio *bio, struct iov_iter iter)
+static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter)
{
int i;
struct bio_vec *bvec;
@@ -1090,9 +1098,9 @@ static int bio_copy_from_iter(struct bio *bio, struct iov_iter iter)
ret = copy_page_from_iter(bvec->bv_page,
bvec->bv_offset,
bvec->bv_len,
- &iter);
+ iter);
- if (!iov_iter_count(&iter))
+ if (!iov_iter_count(iter))
break;
if (ret < bvec->bv_len)
@@ -1186,40 +1194,18 @@ int bio_uncopy_user(struct bio *bio)
*/
struct bio *bio_copy_user_iov(struct request_queue *q,
struct rq_map_data *map_data,
- const struct iov_iter *iter,
+ struct iov_iter *iter,
gfp_t gfp_mask)
{
struct bio_map_data *bmd;
struct page *page;
struct bio *bio;
- int i, ret;
- int nr_pages = 0;
+ int i = 0, ret;
+ int nr_pages;
unsigned int len = iter->count;
unsigned int offset = map_data ? offset_in_page(map_data->offset) : 0;
- for (i = 0; i < iter->nr_segs; i++) {
- unsigned long uaddr;
- unsigned long end;
- unsigned long start;
-
- uaddr = (unsigned long) iter->iov[i].iov_base;
- end = (uaddr + iter->iov[i].iov_len + PAGE_SIZE - 1)
- >> PAGE_SHIFT;
- start = uaddr >> PAGE_SHIFT;
-
- /*
- * Overflow, abort
- */
- if (end < start)
- return ERR_PTR(-EINVAL);
-
- nr_pages += end - start;
- }
-
- if (offset)
- nr_pages++;
-
- bmd = bio_alloc_map_data(iter->nr_segs, gfp_mask);
+ bmd = bio_alloc_map_data(iter, gfp_mask);
if (!bmd)
return ERR_PTR(-ENOMEM);
@@ -1229,9 +1215,10 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
* shortlived one.
*/
bmd->is_our_pages = map_data ? 0 : 1;
- memcpy(bmd->iov, iter->iov, sizeof(struct iovec) * iter->nr_segs);
- bmd->iter = *iter;
- bmd->iter.iov = bmd->iov;
+
+ nr_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE);
+ if (nr_pages > BIO_MAX_PAGES)
+ nr_pages = BIO_MAX_PAGES;
ret = -ENOMEM;
bio = bio_kmalloc(gfp_mask, nr_pages);
@@ -1280,17 +1267,24 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
if (ret)
goto cleanup;
+ if (map_data)
+ map_data->offset += bio->bi_iter.bi_size;
+
/*
* success
*/
if (((iter->type & WRITE) && (!map_data || !map_data->null_mapped)) ||
(map_data && map_data->from_user)) {
- ret = bio_copy_from_iter(bio, *iter);
+ ret = bio_copy_from_iter(bio, iter);
if (ret)
goto cleanup;
+ } else {
+ iov_iter_advance(iter, bio->bi_iter.bi_size);
}
bio->bi_private = bmd;
+ if (map_data && map_data->null_mapped)
+ bio_set_flag(bio, BIO_NULL_MAPPED);
return bio;
cleanup:
if (!map_data)
@@ -1311,111 +1305,74 @@ out_bmd:
* device. Returns an error pointer in case of error.
*/
struct bio *bio_map_user_iov(struct request_queue *q,
- const struct iov_iter *iter,
+ struct iov_iter *iter,
gfp_t gfp_mask)
{
int j;
- int nr_pages = 0;
- struct page **pages;
struct bio *bio;
- int cur_page = 0;
- int ret, offset;
- struct iov_iter i;
- struct iovec iov;
+ int ret;
struct bio_vec *bvec;
- iov_for_each(iov, i, *iter) {
- unsigned long uaddr = (unsigned long) iov.iov_base;
- unsigned long len = iov.iov_len;
- unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
- unsigned long start = uaddr >> PAGE_SHIFT;
-
- /*
- * Overflow, abort
- */
- if (end < start)
- return ERR_PTR(-EINVAL);
-
- nr_pages += end - start;
- /*
- * buffer must be aligned to at least logical block size for now
- */
- if (uaddr & queue_dma_alignment(q))
- return ERR_PTR(-EINVAL);
- }
-
- if (!nr_pages)
+ if (!iov_iter_count(iter))
return ERR_PTR(-EINVAL);
- bio = bio_kmalloc(gfp_mask, nr_pages);
+ bio = bio_kmalloc(gfp_mask, iov_iter_npages(iter, BIO_MAX_PAGES));
if (!bio)
return ERR_PTR(-ENOMEM);
- ret = -ENOMEM;
- pages = kcalloc(nr_pages, sizeof(struct page *), gfp_mask);
- if (!pages)
- goto out;
+ while (iov_iter_count(iter)) {
+ struct page **pages;
+ ssize_t bytes;
+ size_t offs, added = 0;
+ int npages;
- iov_for_each(iov, i, *iter) {
- unsigned long uaddr = (unsigned long) iov.iov_base;
- unsigned long len = iov.iov_len;
- unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
- unsigned long start = uaddr >> PAGE_SHIFT;
- const int local_nr_pages = end - start;
- const int page_limit = cur_page + local_nr_pages;
-
- ret = get_user_pages_fast(uaddr, local_nr_pages,
- (iter->type & WRITE) != WRITE,
- &pages[cur_page]);
- if (unlikely(ret < local_nr_pages)) {
- for (j = cur_page; j < page_limit; j++) {
- if (!pages[j])
- break;
- put_page(pages[j]);
- }
- ret = -EFAULT;
+ bytes = iov_iter_get_pages_alloc(iter, &pages, LONG_MAX, &offs);
+ if (unlikely(bytes <= 0)) {
+ ret = bytes ? bytes : -EFAULT;
goto out_unmap;
}
- offset = offset_in_page(uaddr);
- for (j = cur_page; j < page_limit; j++) {
- unsigned int bytes = PAGE_SIZE - offset;
- unsigned short prev_bi_vcnt = bio->bi_vcnt;
+ npages = DIV_ROUND_UP(offs + bytes, PAGE_SIZE);
- if (len <= 0)
- break;
-
- if (bytes > len)
- bytes = len;
-
- /*
- * sorry...
- */
- if (bio_add_pc_page(q, bio, pages[j], bytes, offset) <
- bytes)
- break;
+ if (unlikely(offs & queue_dma_alignment(q))) {
+ ret = -EINVAL;
+ j = 0;
+ } else {
+ for (j = 0; j < npages; j++) {
+ struct page *page = pages[j];
+ unsigned int n = PAGE_SIZE - offs;
+ unsigned short prev_bi_vcnt = bio->bi_vcnt;
- /*
- * check if vector was merged with previous
- * drop page reference if needed
- */
- if (bio->bi_vcnt == prev_bi_vcnt)
- put_page(pages[j]);
+ if (n > bytes)
+ n = bytes;
- len -= bytes;
- offset = 0;
- }
+ if (!bio_add_pc_page(q, bio, page, n, offs))
+ break;
- cur_page = j;
+ /*
+ * check if vector was merged with previous
+ * drop page reference if needed
+ */
+ if (bio->bi_vcnt == prev_bi_vcnt)
+ put_page(page);
+
+ added += n;
+ bytes -= n;
+ offs = 0;
+ }
+ iov_iter_advance(iter, added);
+ }
/*
* release the pages we didn't map into the bio, if any
*/
- while (j < page_limit)
+ while (j < npages)
put_page(pages[j++]);
+ kvfree(pages);
+ /* couldn't stuff something into bio? */
+ if (bytes)
+ break;
}
- kfree(pages);
-
bio_set_flag(bio, BIO_USER_MAPPED);
/*
@@ -1431,8 +1388,6 @@ struct bio *bio_map_user_iov(struct request_queue *q,
bio_for_each_segment_all(bvec, bio, j) {
put_page(bvec->bv_page);
}
- out:
- kfree(pages);
bio_put(bio);
return ERR_PTR(ret);
}
diff --git a/block/blk-core.c b/block/blk-core.c
index 7c54c195e79e..1038706edd87 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -637,8 +637,8 @@ void blk_set_queue_dying(struct request_queue *q)
spin_lock_irq(q->queue_lock);
blk_queue_for_each_rl(rl, q) {
if (rl->rq_pool) {
- wake_up(&rl->wait[BLK_RW_SYNC]);
- wake_up(&rl->wait[BLK_RW_ASYNC]);
+ wake_up_all(&rl->wait[BLK_RW_SYNC]);
+ wake_up_all(&rl->wait[BLK_RW_ASYNC]);
}
}
spin_unlock_irq(q->queue_lock);
diff --git a/block/blk-map.c b/block/blk-map.c
index d5251edcc0dd..b21f8e86f120 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -67,13 +67,6 @@ static int __blk_rq_map_user_iov(struct request *rq,
bio->bi_opf &= ~REQ_OP_MASK;
bio->bi_opf |= req_op(rq);
- if (map_data && map_data->null_mapped)
- bio_set_flag(bio, BIO_NULL_MAPPED);
-
- iov_iter_advance(iter, bio->bi_iter.bi_size);
- if (map_data)
- map_data->offset += bio->bi_iter.bi_size;
-
orig_bio = bio;
/*
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 0a9e5979aaa9..9d49a1acebe3 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -355,6 +355,7 @@ acpi_evaluate_reference(acpi_handle handle,
}
if (package->package.count > ACPI_MAX_HANDLES) {
+ kfree(package);
return AE_NO_MEMORY;
}
list->count = package->package.count;
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 2362b9e9701e..027d159ac381 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -1101,29 +1101,13 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status)
goto out;
}
- if (dev->power.runtime_status == status)
+ if (dev->power.runtime_status == status || !parent)
goto out_set;
if (status == RPM_SUSPENDED) {
- /*
- * It is invalid to suspend a device with an active child,
- * unless it has been set to ignore its children.
- */
- if (!dev->power.ignore_children &&
- atomic_read(&dev->power.child_count)) {
- dev_err(dev, "runtime PM trying to suspend device but active child\n");
- error = -EBUSY;
- goto out;
- }
-
- if (parent) {
- atomic_add_unless(&parent->power.child_count, -1, 0);
- notify_parent = !parent->power.ignore_children;
- }
- goto out_set;
- }
-
- if (parent) {
+ atomic_add_unless(&parent->power.child_count, -1, 0);
+ notify_parent = !parent->power.ignore_children;
+ } else {
spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
/*
@@ -1307,6 +1291,13 @@ void pm_runtime_enable(struct device *dev)
else
dev_warn(dev, "Unbalanced %s!\n", __func__);
+ WARN(!dev->power.disable_depth &&
+ dev->power.runtime_status == RPM_SUSPENDED &&
+ !dev->power.ignore_children &&
+ atomic_read(&dev->power.child_count) > 0,
+ "Enabling runtime PM for inactive device (%s) with active children\n",
+ dev_name(dev));
+
spin_unlock_irqrestore(&dev->power.lock, flags);
}
EXPORT_SYMBOL_GPL(pm_runtime_enable);
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 4e3fb9f104af..e5aa62fcf5a8 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -146,6 +146,7 @@ static struct amiga_floppy_struct unit[FD_MAX_UNITS];
static struct timer_list flush_track_timer[FD_MAX_UNITS];
static struct timer_list post_write_timer;
+static unsigned long post_write_timer_drive;
static struct timer_list motor_on_timer;
static struct timer_list motor_off_timer[FD_MAX_UNITS];
static int on_attempts;
@@ -323,7 +324,7 @@ static void fd_deselect (int drive)
}
-static void motor_on_callback(unsigned long ignored)
+static void motor_on_callback(struct timer_list *unused)
{
if (!(ciaa.pra & DSKRDY) || --on_attempts == 0) {
complete_all(&motor_on_completion);
@@ -355,7 +356,7 @@ static int fd_motor_on(int nr)
on_attempts = -1;
#if 0
printk (KERN_ERR "motor_on failed, turning motor off\n");
- fd_motor_off (nr);
+ fd_motor_off (motor_off_timer + nr);
return 0;
#else
printk (KERN_WARNING "DSKRDY not set after 1.5 seconds - assuming drive is spinning notwithstanding\n");
@@ -365,20 +366,17 @@ static int fd_motor_on(int nr)
return 1;
}
-static void fd_motor_off(unsigned long drive)
+static void fd_motor_off(struct timer_list *timer)
{
- long calledfromint;
-#ifdef MODULE
- long decusecount;
+ unsigned long drive = ((unsigned long)timer -
+ (unsigned long)&motor_off_timer[0]) /
+ sizeof(motor_off_timer[0]);
- decusecount = drive & 0x40000000;
-#endif
- calledfromint = drive & 0x80000000;
drive&=3;
- if (calledfromint && !try_fdc(drive)) {
+ if (!try_fdc(drive)) {
/* We would be blocked in an interrupt, so try again later */
- motor_off_timer[drive].expires = jiffies + 1;
- add_timer(motor_off_timer + drive);
+ timer->expires = jiffies + 1;
+ add_timer(timer);
return;
}
unit[drive].motor = 0;
@@ -392,8 +390,6 @@ static void floppy_off (unsigned int nr)
int drive;
drive = nr & 3;
- /* called this way it is always from interrupt */
- motor_off_timer[drive].data = nr | 0x80000000;
mod_timer(motor_off_timer + drive, jiffies + 3*HZ);
}
@@ -435,7 +431,7 @@ static int fd_calibrate(int drive)
break;
if (--n == 0) {
printk (KERN_ERR "fd%d: calibrate failed, turning motor off\n", drive);
- fd_motor_off (drive);
+ fd_motor_off (motor_off_timer + drive);
unit[drive].track = -1;
rel_fdc();
return 0;
@@ -564,7 +560,7 @@ static irqreturn_t fd_block_done(int irq, void *dummy)
if (block_flag == 2) { /* writing */
writepending = 2;
post_write_timer.expires = jiffies + 1; /* at least 2 ms */
- post_write_timer.data = selected;
+ post_write_timer_drive = selected;
add_timer(&post_write_timer);
}
else { /* reading */
@@ -651,6 +647,10 @@ static void post_write (unsigned long drive)
rel_fdc(); /* corresponds to get_fdc() in raw_write */
}
+static void post_write_callback(struct timer_list *timer)
+{
+ post_write(post_write_timer_drive);
+}
/*
* The following functions are to convert the block contents into raw data
@@ -1244,8 +1244,12 @@ static void dos_write(int disk)
/* FIXME: this assumes the drive is still spinning -
* which is only true if we complete writing a track within three seconds
*/
-static void flush_track_callback(unsigned long nr)
+static void flush_track_callback(struct timer_list *timer)
{
+ unsigned long nr = ((unsigned long)timer -
+ (unsigned long)&flush_track_timer[0]) /
+ sizeof(flush_track_timer[0]);
+
nr&=3;
writefromint = 1;
if (!try_fdc(nr)) {
@@ -1649,8 +1653,7 @@ static void floppy_release(struct gendisk *disk, fmode_t mode)
fd_ref[drive] = 0;
}
#ifdef MODULE
-/* the mod_use counter is handled this way */
- floppy_off (drive | 0x40000000);
+ floppy_off (drive);
#endif
mutex_unlock(&amiflop_mutex);
}
@@ -1791,27 +1794,19 @@ static int __init amiga_floppy_probe(struct platform_device *pdev)
floppy_find, NULL, NULL);
/* initialize variables */
- init_timer(&motor_on_timer);
+ timer_setup(&motor_on_timer, motor_on_callback, 0);
motor_on_timer.expires = 0;
- motor_on_timer.data = 0;
- motor_on_timer.function = motor_on_callback;
for (i = 0; i < FD_MAX_UNITS; i++) {
- init_timer(&motor_off_timer[i]);
+ timer_setup(&motor_off_timer[i], fd_motor_off, 0);
motor_off_timer[i].expires = 0;
- motor_off_timer[i].data = i|0x80000000;
- motor_off_timer[i].function = fd_motor_off;
- init_timer(&flush_track_timer[i]);
+ timer_setup(&flush_track_timer[i], flush_track_callback, 0);
flush_track_timer[i].expires = 0;
- flush_track_timer[i].data = i;
- flush_track_timer[i].function = flush_track_callback;
unit[i].track = -1;
}
- init_timer(&post_write_timer);
+ timer_setup(&post_write_timer, post_write_callback, 0);
post_write_timer.expires = 0;
- post_write_timer.data = 0;
- post_write_timer.function = post_write;
for (i = 0; i < 128; i++)
mfmdecode[i]=255;
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index dc43254e05a4..55ab25f79a08 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -744,7 +744,7 @@ count_targets(struct aoedev *d, int *untainted)
}
static void
-rexmit_timer(ulong vp)
+rexmit_timer(struct timer_list *timer)
{
struct aoedev *d;
struct aoetgt *t;
@@ -758,7 +758,7 @@ rexmit_timer(ulong vp)
int utgts; /* number of aoetgt descriptors (not slots) */
int since;
- d = (struct aoedev *) vp;
+ d = from_timer(d, timer, timer);
spin_lock_irqsave(&d->lock, flags);
@@ -1429,7 +1429,7 @@ aoecmd_ata_id(struct aoedev *d)
d->rttavg = RTTAVG_INIT;
d->rttdev = RTTDEV_INIT;
- d->timer.function = rexmit_timer;
+ d->timer.function = (TIMER_FUNC_TYPE)rexmit_timer;
skb = skb_clone(skb, GFP_ATOMIC);
if (skb) {
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index b28fefb90391..697f735b07a4 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -15,7 +15,6 @@
#include <linux/string.h>
#include "aoe.h"
-static void dummy_timer(ulong);
static void freetgt(struct aoedev *d, struct aoetgt *t);
static void skbpoolfree(struct aoedev *d);
@@ -146,11 +145,11 @@ aoedev_put(struct aoedev *d)
}
static void
-dummy_timer(ulong vp)
+dummy_timer(struct timer_list *t)
{
struct aoedev *d;
- d = (struct aoedev *)vp;
+ d = from_timer(d, t, timer);
if (d->flags & DEVFL_TKILL)
return;
d->timer.expires = jiffies + HZ;
@@ -466,9 +465,7 @@ aoedev_by_aoeaddr(ulong maj, int min, int do_alloc)
INIT_WORK(&d->work, aoecmd_sleepwork);
spin_lock_init(&d->lock);
skb_queue_head_init(&d->skbpool);
- init_timer(&d->timer);
- d->timer.data = (ulong) d;
- d->timer.function = dummy_timer;
+ timer_setup(&d->timer, dummy_timer, 0);
d->timer.expires = jiffies + HZ;
add_timer(&d->timer);
d->bufpool = NULL; /* defer to aoeblk_gdalloc */
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index a54183935aa1..eae484acfbbc 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -903,10 +903,14 @@ static void unlock_fdc(void)
}
/* switches the motor off after a given timeout */
-static void motor_off_callback(unsigned long nr)
+static void motor_off_callback(struct timer_list *t)
{
+ unsigned long nr = t - motor_off_timer;
unsigned char mask = ~(0x10 << UNIT(nr));
+ if (WARN_ON_ONCE(nr >= N_DRIVE))
+ return;
+
set_dor(FDC(nr), mask, 0);
}
@@ -3047,7 +3051,7 @@ static void raw_cmd_done(int flag)
else
raw_cmd->flags &= ~FD_RAW_DISK_CHANGE;
if (raw_cmd->flags & FD_RAW_NO_MOTOR_AFTER)
- motor_off_callback(current_drive);
+ motor_off_callback(&motor_off_timer[current_drive]);
if (raw_cmd->next &&
(!(raw_cmd->flags & FD_RAW_FAILURE) ||
@@ -4542,7 +4546,7 @@ static int __init do_floppy_init(void)
disks[drive]->fops = &floppy_fops;
sprintf(disks[drive]->disk_name, "fd%d", drive);
- setup_timer(&motor_off_timer[drive], motor_off_callback, drive);
+ timer_setup(&motor_off_timer[drive], motor_off_callback, 0);
}
err = register_blkdev(FLOPPY_MAJOR, "fd");
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 9f931f8f6b4c..e620e423102b 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -239,10 +239,10 @@ static unsigned short write_postamble[] = {
static void seek_track(struct floppy_state *fs, int n);
static void init_dma(struct dbdma_cmd *cp, int cmd, void *buf, int count);
static void act(struct floppy_state *fs);
-static void scan_timeout(unsigned long data);
-static void seek_timeout(unsigned long data);
-static void settle_timeout(unsigned long data);
-static void xfer_timeout(unsigned long data);
+static void scan_timeout(struct timer_list *t);
+static void seek_timeout(struct timer_list *t);
+static void settle_timeout(struct timer_list *t);
+static void xfer_timeout(struct timer_list *t);
static irqreturn_t swim3_interrupt(int irq, void *dev_id);
/*static void fd_dma_interrupt(int irq, void *dev_id);*/
static int grab_drive(struct floppy_state *fs, enum swim_state state,
@@ -392,13 +392,12 @@ static void do_fd_request(struct request_queue * q)
}
static void set_timeout(struct floppy_state *fs, int nticks,
- void (*proc)(unsigned long))
+ void (*proc)(struct timer_list *t))
{
if (fs->timeout_pending)
del_timer(&fs->timeout);
fs->timeout.expires = jiffies + nticks;
- fs->timeout.function = proc;
- fs->timeout.data = (unsigned long) fs;
+ fs->timeout.function = (TIMER_FUNC_TYPE)proc;
add_timer(&fs->timeout);
fs->timeout_pending = 1;
}
@@ -569,9 +568,9 @@ static void act(struct floppy_state *fs)
}
}
-static void scan_timeout(unsigned long data)
+static void scan_timeout(struct timer_list *t)
{
- struct floppy_state *fs = (struct floppy_state *) data;
+ struct floppy_state *fs = from_timer(fs, t, timeout);
struct swim3 __iomem *sw = fs->swim3;
unsigned long flags;
@@ -594,9 +593,9 @@ static void scan_timeout(unsigned long data)
spin_unlock_irqrestore(&swim3_lock, flags);
}
-static void seek_timeout(unsigned long data)
+static void seek_timeout(struct timer_list *t)
{
- struct floppy_state *fs = (struct floppy_state *) data;
+ struct floppy_state *fs = from_timer(fs, t, timeout);
struct swim3 __iomem *sw = fs->swim3;
unsigned long flags;
@@ -614,9 +613,9 @@ static void seek_timeout(unsigned long data)
spin_unlock_irqrestore(&swim3_lock, flags);
}
-static void settle_timeout(unsigned long data)
+static void settle_timeout(struct timer_list *t)
{
- struct floppy_state *fs = (struct floppy_state *) data;
+ struct floppy_state *fs = from_timer(fs, t, timeout);
struct swim3 __iomem *sw = fs->swim3;
unsigned long flags;
@@ -644,9 +643,9 @@ static void settle_timeout(unsigned long data)
spin_unlock_irqrestore(&swim3_lock, flags);
}
-static void xfer_timeout(unsigned long data)
+static void xfer_timeout(struct timer_list *t)
{
- struct floppy_state *fs = (struct floppy_state *) data;
+ struct floppy_state *fs = from_timer(fs, t, timeout);
struct swim3 __iomem *sw = fs->swim3;
struct dbdma_regs __iomem *dr = fs->dma;
unsigned long flags;
@@ -1182,7 +1181,7 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
return -EBUSY;
}
- init_timer(&fs->timeout);
+ timer_setup(&fs->timeout, NULL, 0);
swim3_info("SWIM3 floppy controller %s\n",
mdev->media_bay ? "in media bay" : "");
diff --git a/drivers/char/ipmi/bt-bmc.c b/drivers/char/ipmi/bt-bmc.c
index 70d434bc1cbf..c4ef73c6f455 100644
--- a/drivers/char/ipmi/bt-bmc.c
+++ b/drivers/char/ipmi/bt-bmc.c
@@ -204,9 +204,6 @@ static ssize_t bt_bmc_read(struct file *file, char __user *buf,
ssize_t ret = 0;
ssize_t nread;
- if (!access_ok(VERIFY_WRITE, buf, count))
- return -EFAULT;
-
WARN_ON(*ppos);
if (wait_event_interruptible(bt_bmc->queue,
@@ -277,9 +274,6 @@ static ssize_t bt_bmc_write(struct file *file, const char __user *buf,
if (count < 5)
return -EINVAL;
- if (!access_ok(VERIFY_READ, buf, count))
- return -EFAULT;
-
WARN_ON(*ppos);
/*
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 9a302799040e..5d101c4053e0 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -27,7 +27,6 @@
#define CREATE_TRACE_POINTS
#include <trace/events/dma_fence.h>
-EXPORT_TRACEPOINT_SYMBOL(dma_fence_annotate_wait_on);
EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit);
EXPORT_TRACEPOINT_SYMBOL(dma_fence_enable_signal);
diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig
index 26682454a446..e8af1f5e8a79 100644
--- a/drivers/gpu/drm/amd/amdgpu/Kconfig
+++ b/drivers/gpu/drm/amd/amdgpu/Kconfig
@@ -41,3 +41,4 @@ config DRM_AMDGPU_GART_DEBUGFS
pages. Uses more memory for housekeeping, enable only for debugging.
source "drivers/gpu/drm/amd/acp/Kconfig"
+source "drivers/gpu/drm/amd/display/Kconfig"
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 7fc42e077770..78d609123420 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -4,13 +4,19 @@
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
FULL_AMD_PATH=$(src)/..
+DISPLAY_FOLDER_NAME=display
+FULL_AMD_DISPLAY_PATH = $(FULL_AMD_PATH)/$(DISPLAY_FOLDER_NAME)
ccflags-y := -I$(FULL_AMD_PATH)/include/asic_reg \
-I$(FULL_AMD_PATH)/include \
-I$(FULL_AMD_PATH)/amdgpu \
-I$(FULL_AMD_PATH)/scheduler \
-I$(FULL_AMD_PATH)/powerplay/inc \
- -I$(FULL_AMD_PATH)/acp/include
+ -I$(FULL_AMD_PATH)/acp/include \
+ -I$(FULL_AMD_DISPLAY_PATH) \
+ -I$(FULL_AMD_DISPLAY_PATH)/include \
+ -I$(FULL_AMD_DISPLAY_PATH)/dc \
+ -I$(FULL_AMD_DISPLAY_PATH)/amdgpu_dm
amdgpu-y := amdgpu_drv.o
@@ -133,4 +139,13 @@ include $(FULL_AMD_PATH)/powerplay/Makefile
amdgpu-y += $(AMD_POWERPLAY_FILES)
+ifneq ($(CONFIG_DRM_AMD_DC),)
+
+RELATIVE_AMD_DISPLAY_PATH = ../$(DISPLAY_FOLDER_NAME)
+include $(FULL_AMD_DISPLAY_PATH)/Makefile
+
+amdgpu-y += $(AMD_DISPLAY_FILES)
+
+endif
+
obj-$(CONFIG_DRM_AMDGPU)+= amdgpu.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index cbcb6a153aba..5afaf6016b4a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -66,6 +66,7 @@
#include "amdgpu_vce.h"
#include "amdgpu_vcn.h"
#include "amdgpu_mn.h"
+#include "amdgpu_dm.h"
#include "gpu_scheduler.h"
#include "amdgpu_virt.h"
@@ -101,6 +102,8 @@ extern int amdgpu_vm_fragment_size;
extern int amdgpu_vm_fault_stop;
extern int amdgpu_vm_debug;
extern int amdgpu_vm_update_mode;
+extern int amdgpu_dc;
+extern int amdgpu_dc_log;
extern int amdgpu_sched_jobs;
extern int amdgpu_sched_hw_submission;
extern int amdgpu_no_evict;
@@ -1535,6 +1538,7 @@ struct amdgpu_device {
/* display */
bool enable_virtual_display;
struct amdgpu_mode_info mode_info;
+ /* For pre-DCE11. DCE11 and later are in "struct amdgpu_device->dm" */
struct work_struct hotplug_work;
struct amdgpu_irq_src crtc_irq;
struct amdgpu_irq_src pageflip_irq;
@@ -1590,6 +1594,9 @@ struct amdgpu_device {
/* GDS */
struct amdgpu_gds gds;
+ /* display related functionality */
+ struct amdgpu_display_manager dm;
+
struct amdgpu_ip_block ip_blocks[AMDGPU_MAX_IP_NUM];
int num_ip_blocks;
struct mutex mn_lock;
@@ -1653,6 +1660,9 @@ void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index);
void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v);
+bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type);
+bool amdgpu_device_has_dc_support(struct amdgpu_device *adev);
+
/*
* Registers read & write functions.
*/
@@ -1911,5 +1921,11 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
uint64_t addr, struct amdgpu_bo **bo,
struct amdgpu_bo_va_mapping **mapping);
+#if defined(CONFIG_DRM_AMD_DC)
+int amdgpu_dm_display_resume(struct amdgpu_device *adev );
+#else
+static inline int amdgpu_dm_display_resume(struct amdgpu_device *adev) { return 0; }
+#endif
+
#include "amdgpu_object.h"
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index a7afe553e0a1..f2b72c7c6857 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -911,10 +911,6 @@ static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device,
struct cgs_display_info *info)
{
CGS_FUNC_ADEV;
- struct amdgpu_crtc *amdgpu_crtc;
- struct drm_device *ddev = adev->ddev;
- struct drm_crtc *crtc;
- uint32_t line_time_us, vblank_lines;
struct cgs_mode_info *mode_info;
if (info == NULL)
@@ -928,30 +924,43 @@ static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device,
mode_info->ref_clock = adev->clock.spll.reference_freq;
}
- if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
- list_for_each_entry(crtc,
- &ddev->mode_config.crtc_list, head) {
- amdgpu_crtc = to_amdgpu_crtc(crtc);
- if (crtc->enabled) {
- info->active_display_mask |= (1 << amdgpu_crtc->crtc_id);
- info->display_count++;
- }
- if (mode_info != NULL &&
- crtc->enabled && amdgpu_crtc->enabled &&
- amdgpu_crtc->hw_mode.clock) {
- line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
- amdgpu_crtc->hw_mode.clock;
- vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
- amdgpu_crtc->hw_mode.crtc_vdisplay +
- (amdgpu_crtc->v_border * 2);
- mode_info->vblank_time_us = vblank_lines * line_time_us;
- mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
- mode_info->ref_clock = adev->clock.spll.reference_freq;
- mode_info = NULL;
+ if (!amdgpu_device_has_dc_support(adev)) {
+ struct amdgpu_crtc *amdgpu_crtc;
+ struct drm_device *ddev = adev->ddev;
+ struct drm_crtc *crtc;
+ uint32_t line_time_us, vblank_lines;
+
+ if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
+ list_for_each_entry(crtc,
+ &ddev->mode_config.crtc_list, head) {
+ amdgpu_crtc = to_amdgpu_crtc(crtc);
+ if (crtc->enabled) {
+ info->active_display_mask |= (1 << amdgpu_crtc->crtc_id);
+ info->display_count++;
+ }
+ if (mode_info != NULL &&
+ crtc->enabled && amdgpu_crtc->enabled &&
+ amdgpu_crtc->hw_mode.clock) {
+ line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
+ amdgpu_crtc->hw_mode.clock;
+ vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
+ amdgpu_crtc->hw_mode.crtc_vdisplay +
+ (amdgpu_crtc->v_border * 2);
+ mode_info->vblank_time_us = vblank_lines * line_time_us;
+ mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
+ mode_info->ref_clock = adev->clock.spll.reference_freq;
+ mode_info = NULL;
+ }
}
}
+ } else {
+ info->display_count = adev->pm.pm_display_cfg.num_display;
+ if (mode_info != NULL) {
+ mode_info->vblank_time_us = adev->pm.pm_display_cfg.min_vblank_time;
+ mode_info->refresh_rate = adev->pm.pm_display_cfg.vrefresh;
+ mode_info->ref_clock = adev->clock.spll.reference_freq;
+ }
}
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index efcacb827de7..2d792cdc094c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -31,6 +31,7 @@
#include <linux/debugfs.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/amdgpu_drm.h>
#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
@@ -2046,6 +2047,52 @@ static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
}
}
+bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
+{
+ switch (asic_type) {
+#if defined(CONFIG_DRM_AMD_DC)
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ case CHIP_KAVERI:
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS12:
+ case CHIP_TONGA:
+ case CHIP_FIJI:
+#if defined(CONFIG_DRM_AMD_DC_PRE_VEGA)
+ return amdgpu_dc != 0;
+#endif
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ return amdgpu_dc > 0;
+ case CHIP_VEGA10:
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ case CHIP_RAVEN:
+#endif
+ return amdgpu_dc != 0;
+#endif
+ default:
+ return false;
+ }
+}
+
+/**
+ * amdgpu_device_has_dc_support - check if dc is supported
+ *
+ * @adev: amdgpu_device_pointer
+ *
+ * Returns true for supported, false for not supported
+ */
+bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
+{
+ if (amdgpu_sriov_vf(adev))
+ return false;
+
+ return amdgpu_device_asic_has_dc_support(adev->asic_type);
+}
+
/**
* amdgpu_device_init - initialize the driver
*
@@ -2100,7 +2147,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
-
DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
@@ -2242,7 +2288,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
goto failed;
}
/* init i2c buses */
- amdgpu_atombios_i2c_init(adev);
+ if (!amdgpu_device_has_dc_support(adev))
+ amdgpu_atombios_i2c_init(adev);
}
/* Fence driver */
@@ -2378,7 +2425,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
adev->accel_working = false;
cancel_delayed_work_sync(&adev->late_init_work);
/* free i2c buses */
- amdgpu_i2c_fini(adev);
+ if (!amdgpu_device_has_dc_support(adev))
+ amdgpu_i2c_fini(adev);
amdgpu_atombios_fini(adev);
kfree(adev->bios);
adev->bios = NULL;
@@ -2429,12 +2477,14 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
drm_kms_helper_poll_disable(dev);
- /* turn off display hw */
- drm_modeset_lock_all(dev);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ if (!amdgpu_device_has_dc_support(adev)) {
+ /* turn off display hw */
+ drm_modeset_lock_all(dev);
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ }
+ drm_modeset_unlock_all(dev);
}
- drm_modeset_unlock_all(dev);
amdgpu_amdkfd_suspend(adev);
@@ -2577,13 +2627,25 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
/* blat the mode back in */
if (fbcon) {
- drm_helper_resume_force_mode(dev);
- /* turn on display hw */
- drm_modeset_lock_all(dev);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+ if (!amdgpu_device_has_dc_support(adev)) {
+ /* pre DCE11 */
+ drm_helper_resume_force_mode(dev);
+
+ /* turn on display hw */
+ drm_modeset_lock_all(dev);
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+ }
+ drm_modeset_unlock_all(dev);
+ } else {
+ /*
+ * There is no equivalent atomic helper to turn on
+ * display, so we defined our own function for this,
+ * once suspend resume is supported by the atomic
+ * framework this will be reworked
+ */
+ amdgpu_dm_display_resume(adev);
}
- drm_modeset_unlock_all(dev);
}
drm_kms_helper_poll_enable(dev);
@@ -2600,7 +2662,10 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
#ifdef CONFIG_PM
dev->dev->power.disable_depth++;
#endif
- drm_helper_hpd_irq_event(dev);
+ if (!amdgpu_device_has_dc_support(adev))
+ drm_helper_hpd_irq_event(dev);
+ else
+ drm_kms_helper_hotplug_event(dev);
#ifdef CONFIG_PM
dev->dev->power.disable_depth--;
#endif
@@ -2900,6 +2965,7 @@ give_up_reset:
*/
int amdgpu_gpu_reset(struct amdgpu_device *adev)
{
+ struct drm_atomic_state *state = NULL;
int i, r;
int resched;
bool need_full_reset, vram_lost = false;
@@ -2913,6 +2979,9 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
/* block TTM */
resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
+ /* store modesetting */
+ if (amdgpu_device_has_dc_support(adev))
+ state = drm_atomic_helper_suspend(adev->ddev);
/* block scheduler */
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
@@ -3029,7 +3098,11 @@ out:
}
}
- drm_helper_resume_force_mode(adev->ddev);
+ if (amdgpu_device_has_dc_support(adev)) {
+ r = drm_atomic_helper_resume(adev->ddev, state);
+ amdgpu_dm_display_resume(adev);
+ } else
+ drm_helper_resume_force_mode(adev->ddev);
ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
if (r) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 6ad243293a78..138beb550a58 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -518,7 +518,7 @@ amdgpu_framebuffer_init(struct drm_device *dev,
return 0;
}
-static struct drm_framebuffer *
+struct drm_framebuffer *
amdgpu_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd)
@@ -556,7 +556,7 @@ amdgpu_user_framebuffer_create(struct drm_device *dev,
return &amdgpu_fb->base;
}
-static void amdgpu_output_poll_changed(struct drm_device *dev)
+void amdgpu_output_poll_changed(struct drm_device *dev)
{
struct amdgpu_device *adev = dev->dev_private;
amdgpu_fb_output_poll_changed(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
new file mode 100644
index 000000000000..3cc0ef0c055e
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __AMDGPU_DISPLAY_H__
+#define __AMDGPU_DISPLAY_H__
+
+struct drm_framebuffer *
+amdgpu_user_framebuffer_create(struct drm_device *dev,
+ struct drm_file *file_priv,
+ const struct drm_mode_fb_cmd2 *mode_cmd);
+
+void amdgpu_output_poll_changed(struct drm_device *dev);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
index 7279fb5c3abc..56caaeee6fea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
@@ -433,7 +433,7 @@ struct amdgpu_pm {
uint32_t fw_version;
uint32_t pcie_gen_mask;
uint32_t pcie_mlw_mask;
- struct amd_pp_display_configuration pm_display_cfg;/* set by DAL */
+ struct amd_pp_display_configuration pm_display_cfg;/* set by dc */
};
#define R600_SSTU_DFLT 0
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index dd2f060d62a8..ec96bb1f9eaf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -106,6 +106,8 @@ int amdgpu_vm_debug = 0;
int amdgpu_vram_page_split = 512;
int amdgpu_vm_update_mode = -1;
int amdgpu_exp_hw_support = 0;
+int amdgpu_dc = -1;
+int amdgpu_dc_log = 0;
int amdgpu_sched_jobs = 32;
int amdgpu_sched_hw_submission = 2;
int amdgpu_no_evict = 0;
@@ -211,6 +213,12 @@ module_param_named(vram_page_split, amdgpu_vram_page_split, int, 0444);
MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))");
module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
+MODULE_PARM_DESC(dc, "Display Core driver (1 = enable, 0 = disable, -1 = auto (default))");
+module_param_named(dc, amdgpu_dc, int, 0444);
+
+MODULE_PARM_DESC(dc, "Display Core Log Level (0 = minimal (default), 1 = chatty");
+module_param_named(dc_log, amdgpu_dc_log, int, 0444);
+
MODULE_PARM_DESC(sched_jobs, "the max number of jobs supported in the sw queue (default 32)");
module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444);
@@ -518,15 +526,15 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x6997, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
{0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
/* Vega 10 */
- {0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x6861, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x6862, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x6863, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x6864, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x6867, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x6868, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x686c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x687f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
+ {0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x6861, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x6862, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x6863, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x6864, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x6867, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x6868, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x686c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x687f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
/* Raven */
{0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU|AMD_EXP_HW_SUPPORT},
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index 562930b17a6d..90fa8e8bc6fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -42,11 +42,6 @@
this contains a helper + a amdgpu fb
the helper contains a pointer to amdgpu framebuffer baseclass.
*/
-struct amdgpu_fbdev {
- struct drm_fb_helper helper;
- struct amdgpu_framebuffer rfb;
- struct amdgpu_device *adev;
-};
static int
amdgpufb_open(struct fb_info *info, int user)
@@ -353,7 +348,8 @@ int amdgpu_fbdev_init(struct amdgpu_device *adev)
drm_fb_helper_single_add_all_connectors(&rfbdev->helper);
/* disable all the possible outputs/crtcs before entering KMS mode */
- drm_helper_disable_unused_functions(adev->ddev);
+ if (!amdgpu_device_has_dc_support(adev))
+ drm_helper_disable_unused_functions(adev->ddev);
drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 538e5f27d120..47c5ce9807db 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -37,6 +37,10 @@
#include <linux/pm_runtime.h>
+#ifdef CONFIG_DRM_AMD_DC
+#include "amdgpu_dm_irq.h"
+#endif
+
#define AMDGPU_WAIT_IDLE_TIMEOUT 200
/*
@@ -221,15 +225,6 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
spin_lock_init(&adev->irq.lock);
- if (!adev->enable_virtual_display)
- /* Disable vblank irqs aggressively for power-saving */
- adev->ddev->vblank_disable_immediate = true;
-
- r = drm_vblank_init(adev->ddev, adev->mode_info.num_crtc);
- if (r) {
- return r;
- }
-
/* enable msi */
adev->irq.msi_enabled = false;
@@ -241,7 +236,21 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
}
}
- INIT_WORK(&adev->hotplug_work, amdgpu_hotplug_work_func);
+ if (!amdgpu_device_has_dc_support(adev)) {
+ if (!adev->enable_virtual_display)
+ /* Disable vblank irqs aggressively for power-saving */
+ /* XXX: can this be enabled for DC? */
+ adev->ddev->vblank_disable_immediate = true;
+
+ r = drm_vblank_init(adev->ddev, adev->mode_info.num_crtc);
+ if (r)
+ return r;
+
+ /* pre DCE11 */
+ INIT_WORK(&adev->hotplug_work,
+ amdgpu_hotplug_work_func);
+ }
+
INIT_WORK(&adev->reset_work, amdgpu_irq_reset_work_func);
adev->irq.installed = true;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 6f0b26dae3b0..720139e182a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -1030,7 +1030,7 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW)
};
const int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 2af2678ddaf6..ffde1e9666e8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -38,11 +38,15 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_plane_helper.h>
+#include <drm/drm_fb_helper.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <linux/hrtimer.h>
#include "amdgpu_irq.h"
+#include <drm/drm_dp_mst_helper.h>
+#include "modules/inc/mod_freesync.h"
+
struct amdgpu_bo;
struct amdgpu_device;
struct amdgpu_encoder;
@@ -53,9 +57,13 @@ struct amdgpu_hpd;
#define to_amdgpu_connector(x) container_of(x, struct amdgpu_connector, base)
#define to_amdgpu_encoder(x) container_of(x, struct amdgpu_encoder, base)
#define to_amdgpu_framebuffer(x) container_of(x, struct amdgpu_framebuffer, base)
+#define to_amdgpu_plane(x) container_of(x, struct amdgpu_plane, base)
+
+#define to_dm_plane_state(x) container_of(x, struct dm_plane_state, base);
#define AMDGPU_MAX_HPD_PINS 6
#define AMDGPU_MAX_CRTCS 6
+#define AMDGPU_MAX_PLANES 6
#define AMDGPU_MAX_AFMT_BLOCKS 9
enum amdgpu_rmx_type {
@@ -292,6 +300,30 @@ struct amdgpu_display_funcs {
uint16_t connector_object_id,
struct amdgpu_hpd *hpd,
struct amdgpu_router *router);
+ /* it is used to enter or exit into free sync mode */
+ int (*notify_freesync)(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+ /* it is used to allow enablement of freesync mode */
+ int (*set_freesync_property)(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val);
+
+
+};
+
+struct amdgpu_framebuffer {
+ struct drm_framebuffer base;
+ struct drm_gem_object *obj;
+
+ /* caching for later use */
+ uint64_t address;
+};
+
+struct amdgpu_fbdev {
+ struct drm_fb_helper helper;
+ struct amdgpu_framebuffer rfb;
+ struct list_head fbdev_list;
+ struct amdgpu_device *adev;
};
struct amdgpu_mode_info {
@@ -299,6 +331,7 @@ struct amdgpu_mode_info {
struct card_info *atom_card_info;
bool mode_config_initialized;
struct amdgpu_crtc *crtcs[AMDGPU_MAX_CRTCS];
+ struct amdgpu_plane *planes[AMDGPU_MAX_PLANES];
struct amdgpu_afmt *afmt[AMDGPU_MAX_AFMT_BLOCKS];
/* DVI-I properties */
struct drm_property *coherent_mode_property;
@@ -328,6 +361,7 @@ struct amdgpu_mode_info {
int num_dig; /* number of dig blocks */
int disp_priority;
const struct amdgpu_display_funcs *funcs;
+ const enum drm_plane_type *plane_type;
};
#define AMDGPU_MAX_BL_LEVEL 0xFF
@@ -400,6 +434,14 @@ struct amdgpu_crtc {
/* for virtual dce */
struct hrtimer vblank_timer;
enum amdgpu_interrupt_state vsync_timer_enabled;
+
+ int otg_inst;
+ struct drm_pending_vblank_event *event;
+};
+
+struct amdgpu_plane {
+ struct drm_plane base;
+ enum drm_plane_type plane_type;
};
struct amdgpu_encoder_atom_dig {
@@ -489,6 +531,19 @@ enum amdgpu_connector_dither {
AMDGPU_FMT_DITHER_ENABLE = 1,
};
+struct amdgpu_dm_dp_aux {
+ struct drm_dp_aux aux;
+ struct ddc_service *ddc_service;
+};
+
+struct amdgpu_i2c_adapter {
+ struct i2c_adapter base;
+
+ struct ddc_service *ddc_service;
+};
+
+#define TO_DM_AUX(x) container_of((x), struct amdgpu_dm_dp_aux, aux)
+
struct amdgpu_connector {
struct drm_connector base;
uint32_t connector_id;
@@ -500,6 +555,14 @@ struct amdgpu_connector {
/* we need to mind the EDID between detect
and get modes due to analog/digital/tvencoder */
struct edid *edid;
+ /* number of modes generated from EDID at 'dc_sink' */
+ int num_modes;
+ /* The 'old' sink - before an HPD.
+ * The 'current' sink is in dc_link->sink. */
+ struct dc_sink *dc_sink;
+ struct dc_link *dc_link;
+ struct dc_sink *dc_em_sink;
+ const struct dc_stream *stream;
void *con_priv;
bool dac_load_detect;
bool detected_by_load; /* if the connection status was determined by load */
@@ -510,11 +573,39 @@ struct amdgpu_connector {
enum amdgpu_connector_audio audio;
enum amdgpu_connector_dither dither;
unsigned pixelclock_for_modeset;
+
+ struct drm_dp_mst_topology_mgr mst_mgr;
+ struct amdgpu_dm_dp_aux dm_dp_aux;
+ struct drm_dp_mst_port *port;
+ struct amdgpu_connector *mst_port;
+ struct amdgpu_encoder *mst_encoder;
+ struct semaphore mst_sem;
+
+ /* TODO see if we can merge with ddc_bus or make a dm_connector */
+ struct amdgpu_i2c_adapter *i2c;
+
+ /* Monitor range limits */
+ int min_vfreq ;
+ int max_vfreq ;
+ int pixel_clock_mhz;
+
+ /*freesync caps*/
+ struct mod_freesync_caps caps;
+
+ struct mutex hpd_lock;
+
};
-struct amdgpu_framebuffer {
- struct drm_framebuffer base;
- struct drm_gem_object *obj;
+/* TODO: start to use this struct and remove same field from base one */
+struct amdgpu_mst_connector {
+ struct amdgpu_connector base;
+
+ struct drm_dp_mst_topology_mgr mst_mgr;
+ struct amdgpu_dm_dp_aux dm_dp_aux;
+ struct drm_dp_mst_port *port;
+ struct amdgpu_connector *mst_port;
+ bool is_mst_connector;
+ struct amdgpu_encoder *mst_encoder;
};
#define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index a59e04f3eeba..d6df5728df7f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -1466,7 +1466,7 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
list_for_each_entry(crtc,
&ddev->mode_config.crtc_list, head) {
amdgpu_crtc = to_amdgpu_crtc(crtc);
- if (crtc->enabled) {
+ if (amdgpu_crtc->enabled) {
adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
adev->pm.dpm.new_active_crtc_count++;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 567c4a5cf90c..793b1470284d 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -65,6 +65,7 @@
#include "oss/oss_2_0_d.h"
#include "oss/oss_2_0_sh_mask.h"
+#include "amdgpu_dm.h"
#include "amdgpu_amdkfd.h"
#include "amdgpu_powerplay.h"
#include "dce_virtual.h"
@@ -1900,6 +1901,10 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display)
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+ else if (amdgpu_device_has_dc_support(adev))
+ amdgpu_ip_block_add(adev, &dm_ip_block);
+#endif
else
amdgpu_ip_block_add(adev, &dce_v8_2_ip_block);
amdgpu_ip_block_add(adev, &gfx_v7_2_ip_block);
@@ -1914,6 +1919,10 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display)
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+ else if (amdgpu_device_has_dc_support(adev))
+ amdgpu_ip_block_add(adev, &dm_ip_block);
+#endif
else
amdgpu_ip_block_add(adev, &dce_v8_5_ip_block);
amdgpu_ip_block_add(adev, &gfx_v7_3_ip_block);
@@ -1928,6 +1937,10 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display)
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+ else if (amdgpu_device_has_dc_support(adev))
+ amdgpu_ip_block_add(adev, &dm_ip_block);
+#endif
else
amdgpu_ip_block_add(adev, &dce_v8_1_ip_block);
amdgpu_ip_block_add(adev, &gfx_v7_1_ip_block);
@@ -1943,6 +1956,10 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display)
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+ else if (amdgpu_device_has_dc_support(adev))
+ amdgpu_ip_block_add(adev, &dm_ip_block);
+#endif
else
amdgpu_ip_block_add(adev, &dce_v8_3_ip_block);
amdgpu_ip_block_add(adev, &gfx_v7_2_ip_block);
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 3ca9d114f630..4e67fe1e7955 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -532,6 +532,12 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+ else if (amdgpu_device_has_dc_support(adev))
+ amdgpu_ip_block_add(adev, &dm_ip_block);
+#else
+# warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15."
+#endif
amdgpu_ip_block_add(adev, &gfx_v9_0_ip_block);
amdgpu_ip_block_add(adev, &sdma_v4_0_ip_block);
amdgpu_ip_block_add(adev, &uvd_v7_0_ip_block);
@@ -545,6 +551,12 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+ else if (amdgpu_device_has_dc_support(adev))
+ amdgpu_ip_block_add(adev, &dm_ip_block);
+#else
+# warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15."
+#endif
amdgpu_ip_block_add(adev, &gfx_v9_0_ip_block);
amdgpu_ip_block_add(adev, &sdma_v4_0_ip_block);
amdgpu_ip_block_add(adev, &vcn_v1_0_ip_block);
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index f3cfef48aa99..3a4c2fa7e36d 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -77,6 +77,7 @@
#endif
#include "dce_virtual.h"
#include "mxgpu_vi.h"
+#include "amdgpu_dm.h"
/*
* Indirect registers accessor
@@ -1502,6 +1503,10 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+ else if (amdgpu_device_has_dc_support(adev))
+ amdgpu_ip_block_add(adev, &dm_ip_block);
+#endif
else
amdgpu_ip_block_add(adev, &dce_v10_1_ip_block);
amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
@@ -1518,6 +1523,10 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+ else if (amdgpu_device_has_dc_support(adev))
+ amdgpu_ip_block_add(adev, &dm_ip_block);
+#endif
else
amdgpu_ip_block_add(adev, &dce_v10_0_ip_block);
amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
@@ -1536,6 +1545,10 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display)
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+ else if (amdgpu_device_has_dc_support(adev))
+ amdgpu_ip_block_add(adev, &dm_ip_block);
+#endif
else
amdgpu_ip_block_add(adev, &dce_v11_2_ip_block);
amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
@@ -1550,6 +1563,10 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display)
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+ else if (amdgpu_device_has_dc_support(adev))
+ amdgpu_ip_block_add(adev, &dm_ip_block);
+#endif
else
amdgpu_ip_block_add(adev, &dce_v11_0_ip_block);
amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
@@ -1567,6 +1584,10 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display)
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+ else if (amdgpu_device_has_dc_support(adev))
+ amdgpu_ip_block_add(adev, &dm_ip_block);
+#endif
else
amdgpu_ip_block_add(adev, &dce_v11_0_ip_block);
amdgpu_ip_block_add(adev, &gfx_v8_1_ip_block);
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
new file mode 100644
index 000000000000..ec3285f65517
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -0,0 +1,45 @@
+menu "Display Engine Configuration"
+ depends on DRM && DRM_AMDGPU
+
+config DRM_AMD_DC
+ bool "AMD DC - Enable new display engine"
+ default y
+ help
+ Choose this option if you want to use the new display engine
+ support for AMDGPU. This adds required support for Vega and
+ Raven ASICs.
+
+config DRM_AMD_DC_PRE_VEGA
+ bool "DC support for Polaris and older ASICs"
+ default n
+ help
+ Choose this option to enable the new DC support for older asics
+ by default. This includes Polaris, Carrizo, Tonga, Bonaire,
+ and Hawaii.
+
+config DRM_AMD_DC_FBC
+ bool "AMD FBC - Enable Frame Buffer Compression"
+ depends on DRM_AMD_DC
+ help
+ Choose this option if you want to use frame buffer compression
+ support.
+ This is a power optimisation feature, check its availability
+ on your hardware before enabling this option.
+
+
+config DRM_AMD_DC_DCN1_0
+ bool "DCN 1.0 Raven family"
+ depends on DRM_AMD_DC && X86
+ help
+ Choose this option if you want to have
+ RV family for display engine
+
+config DEBUG_KERNEL_DC
+ bool "Enable kgdb break in DC"
+ depends on DRM_AMD_DC
+ help
+ Choose this option
+ if you want to hit
+ kdgb_break in assert.
+
+endmenu
diff --git a/drivers/gpu/drm/amd/display/Makefile b/drivers/gpu/drm/amd/display/Makefile
new file mode 100644
index 000000000000..8ba37dd9cf7f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/Makefile
@@ -0,0 +1,22 @@
+#
+# Makefile for the DAL (Display Abstract Layer), which is a sub-component
+# of the AMDGPU drm driver.
+# It provides the HW control for display related functionalities.
+
+AMDDALPATH = $(RELATIVE_AMD_DISPLAY_PATH)
+
+subdir-ccflags-y += -I$(AMDDALPATH)/ -I$(AMDDALPATH)/include
+
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/inc/
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/inc/hw
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/inc
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/freesync
+
+#TODO: remove when Timing Sync feature is complete
+subdir-ccflags-y += -DBUILD_FEATURE_TIMING_SYNC=0
+
+DAL_LIBS = amdgpu_dm dc modules/freesync
+
+AMD_DAL = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/,$(DAL_LIBS)))
+
+include $(AMD_DAL)
diff --git a/drivers/gpu/drm/amd/display/TODO b/drivers/gpu/drm/amd/display/TODO
new file mode 100644
index 000000000000..46464678f2b3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/TODO
@@ -0,0 +1,107 @@
+===============================================================================
+TODOs
+===============================================================================
+
+1. Base this on drm-next - WIP
+
+
+2. Cleanup commit history
+
+
+3. WIP - Drop page flip helper and use DRM's version
+
+
+4. DONE - Flatten all DC objects
+ * dc_stream/core_stream/stream should just be dc_stream
+ * Same for other DC objects
+
+ "Is there any major reason to keep all those abstractions?
+
+ Could you collapse everything into struct dc_stream?
+
+ I haven't looked recently but I didn't get the impression there was a
+ lot of design around what was public/protected, more whatever needed
+ to be used by someone else was in public."
+ ~ Dave Airlie
+
+
+5. DONE - Rename DC objects to align more with DRM
+ * dc_surface -> dc_plane_state
+ * dc_stream -> dc_stream_state
+
+
+6. DONE - Per-plane and per-stream validation
+
+
+7. WIP - Per-plane and per-stream commit
+
+
+8. WIP - Split pipe_ctx into plane and stream resource structs
+
+
+9. Attach plane and stream reources to state object instead of validate_context
+
+
+10. Remove dc_edid_caps and drm_helpers_parse_edid_caps
+ * Use drm_display_info instead
+ * Remove DC's edid quirks and rely on DRM's quirks (add quirks if needed)
+
+ "Making sure you use the sink-specific helper libraries and kernel
+ subsystems, since there's really no good reason to have 2nd
+ implementation of those in the kernel. Looks likes that's done for mst
+ and edid parsing. There's still a bit a midlayer feeling to the edid
+ parsing side (e.g. dc_edid_caps and dm_helpers_parse_edid_caps, I
+ think it'd be much better if you convert that over to reading stuff
+ from drm_display_info and if needed, push stuff into the core). Also,
+ I can't come up with a good reason why DC needs all this (except to
+ reimplement half of our edid quirk table, which really isn't a good
+ idea). Might be good if you put this onto the list of things to fix
+ long-term, but imo not a blocker. Definitely make sure new stuff
+ doesn't slip in (i.e. if you start adding edid quirks to DC instead of
+ the drm core, refactoring to use the core edid stuff was pointless)."
+ ~ Daniel Vetter
+
+
+11. Remove dc/i2caux. This folder can be somewhat misleading. It's basically an
+overy complicated HW programming function for sendind and receiving i2c/aux
+commands. We can greatly simplify that and move it into dc/dceXYZ like other
+HW blocks.
+
+12. drm_modeset_lock in MST should no longer be needed in recent kernels
+ * Adopt appropriate locking scheme
+
+13. get_modes and best_encoder callbacks look a bit funny. Can probably rip out
+a few indirections, and consider removing entirely and using the
+drm_atomic_helper_best_encoder default behaviour.
+
+14. core/dc_debug.c, consider switching to the atomic state debug helpers and
+moving all your driver state printing into the various atomic_print_state
+callbacks. There's also plans to expose this stuff in a standard way across all
+drivers, to make debugging userspace compositors easier across different hw.
+
+15. Move DP/HDMI dual mode adaptors to drm_dp_dual_mode_helper.c. See
+dal_ddc_service_i2c_query_dp_dual_mode_adaptor.
+
+16. Move to core SCDC helpers (I think those are new since initial DC review).
+
+17. There's still a pretty massive layer cake around dp aux and DPCD handling,
+with like 3 levels of abstraction and using your own structures instead of the
+stuff in drm_dp_helper.h. drm_dp_helper.h isn't really great and already has 2
+incompatible styles, just means more reasons not to add a third (or well third
+one gets to do the cleanup refactor).
+
+18. There's a pile of sink handling code, both for DP and HDMI where I didn't
+immediately recognize the standard. I think long term it'd be best for the drm
+subsystem if we try to move as much of that into helpers/core as possible, and
+share it with drivers. But that's a very long term goal, and by far not just an
+issue with DC - other drivers, especially around DP sink handling, are equally
+guilty.
+
+19. The DC logger is still a rather sore thing, but I know that the DRM_DEBUG
+stuff just isn't up to the challenges either. We need to figure out something
+that integrates better with DRM and linux debug printing, while not being
+useless with filtering output. dynamic debug printing might be an option.
+
+20. Use kernel i2c device to program HDMI retimer. Some boards have an HDMI
+retimer that we need to program to pass PHY compliance. Currently that's
+bypassing the i2c device and goes directly to HW. This should be changed.
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
new file mode 100644
index 000000000000..4699e47aa76b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
@@ -0,0 +1,17 @@
+#
+# Makefile for the 'dm' sub-component of DAL.
+# It provides the control and status of dm blocks.
+
+
+
+AMDGPUDM = amdgpu_dm.o amdgpu_dm_irq.o amdgpu_dm_mst_types.o
+
+ifneq ($(CONFIG_DRM_AMD_DC),)
+AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o
+endif
+
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc
+
+AMDGPU_DM = $(addprefix $(AMDDALPATH)/amdgpu_dm/,$(AMDGPUDM))
+
+AMD_DISPLAY_FILES += $(AMDGPU_DM)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
new file mode 100644
index 000000000000..889ed24084e8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -0,0 +1,4925 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services_types.h"
+#include "dc.h"
+#include "dc/inc/core_types.h"
+
+#include "vid.h"
+#include "amdgpu.h"
+#include "amdgpu_display.h"
+#include "atom.h"
+#include "amdgpu_dm.h"
+#include "amdgpu_pm.h"
+
+#include "amd_shared.h"
+#include "amdgpu_dm_irq.h"
+#include "dm_helpers.h"
+#include "dm_services_types.h"
+#include "amdgpu_dm_mst_types.h"
+
+#include "ivsrcid/ivsrcid_vislands30.h"
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/version.h>
+#include <linux/types.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_dp_mst_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_edid.h>
+
+#include "modules/inc/mod_freesync.h"
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#include "ivsrcid/irqsrcs_dcn_1_0.h"
+
+#include "raven1/DCN/dcn_1_0_offset.h"
+#include "raven1/DCN/dcn_1_0_sh_mask.h"
+#include "vega10/soc15ip.h"
+
+#include "soc15_common.h"
+#endif
+
+#include "modules/inc/mod_freesync.h"
+
+#include "i2caux_interface.h"
+
+/* basic init/fini API */
+static int amdgpu_dm_init(struct amdgpu_device *adev);
+static void amdgpu_dm_fini(struct amdgpu_device *adev);
+
+/* initializes drm_device display related structures, based on the information
+ * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
+ * drm_encoder, drm_mode_config
+ *
+ * Returns 0 on success
+ */
+static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
+/* removes and deallocates the drm structures, created by the above function */
+static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
+
+static void
+amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector);
+
+static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
+ struct amdgpu_plane *aplane,
+ unsigned long possible_crtcs);
+static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
+ struct drm_plane *plane,
+ uint32_t link_index);
+static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
+ struct amdgpu_dm_connector *amdgpu_dm_connector,
+ uint32_t link_index,
+ struct amdgpu_encoder *amdgpu_encoder);
+static int amdgpu_dm_encoder_init(struct drm_device *dev,
+ struct amdgpu_encoder *aencoder,
+ uint32_t link_index);
+
+static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
+
+static int amdgpu_dm_atomic_commit(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ bool nonblock);
+
+static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
+
+static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ struct drm_atomic_state *state);
+
+
+
+
+static const enum drm_plane_type dm_plane_type_default[AMDGPU_MAX_PLANES] = {
+ DRM_PLANE_TYPE_PRIMARY,
+ DRM_PLANE_TYPE_PRIMARY,
+ DRM_PLANE_TYPE_PRIMARY,
+ DRM_PLANE_TYPE_PRIMARY,
+ DRM_PLANE_TYPE_PRIMARY,
+ DRM_PLANE_TYPE_PRIMARY,
+};
+
+static const enum drm_plane_type dm_plane_type_carizzo[AMDGPU_MAX_PLANES] = {
+ DRM_PLANE_TYPE_PRIMARY,
+ DRM_PLANE_TYPE_PRIMARY,
+ DRM_PLANE_TYPE_PRIMARY,
+ DRM_PLANE_TYPE_OVERLAY,/* YUV Capable Underlay */
+};
+
+static const enum drm_plane_type dm_plane_type_stoney[AMDGPU_MAX_PLANES] = {
+ DRM_PLANE_TYPE_PRIMARY,
+ DRM_PLANE_TYPE_PRIMARY,
+ DRM_PLANE_TYPE_OVERLAY, /* YUV Capable Underlay */
+};
+
+/*
+ * dm_vblank_get_counter
+ *
+ * @brief
+ * Get counter for number of vertical blanks
+ *
+ * @param
+ * struct amdgpu_device *adev - [in] desired amdgpu device
+ * int disp_idx - [in] which CRTC to get the counter from
+ *
+ * @return
+ * Counter for vertical blanks
+ */
+static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
+{
+ if (crtc >= adev->mode_info.num_crtc)
+ return 0;
+ else {
+ struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
+ struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
+ acrtc->base.state);
+
+
+ if (acrtc_state->stream == NULL) {
+ DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
+ crtc);
+ return 0;
+ }
+
+ return dc_stream_get_vblank_counter(acrtc_state->stream);
+ }
+}
+
+static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
+ u32 *vbl, u32 *position)
+{
+ uint32_t v_blank_start, v_blank_end, h_position, v_position;
+
+ if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
+ return -EINVAL;
+ else {
+ struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
+ struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
+ acrtc->base.state);
+
+ if (acrtc_state->stream == NULL) {
+ DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
+ crtc);
+ return 0;
+ }
+
+ /*
+ * TODO rework base driver to use values directly.
+ * for now parse it back into reg-format
+ */
+ dc_stream_get_scanoutpos(acrtc_state->stream,
+ &v_blank_start,
+ &v_blank_end,
+ &h_position,
+ &v_position);
+
+ *position = v_position | (h_position << 16);
+ *vbl = v_blank_start | (v_blank_end << 16);
+ }
+
+ return 0;
+}
+
+static bool dm_is_idle(void *handle)
+{
+ /* XXX todo */
+ return true;
+}
+
+static int dm_wait_for_idle(void *handle)
+{
+ /* XXX todo */
+ return 0;
+}
+
+static bool dm_check_soft_reset(void *handle)
+{
+ return false;
+}
+
+static int dm_soft_reset(void *handle)
+{
+ /* XXX todo */
+ return 0;
+}
+
+static struct amdgpu_crtc *
+get_crtc_by_otg_inst(struct amdgpu_device *adev,
+ int otg_inst)
+{
+ struct drm_device *dev = adev->ddev;
+ struct drm_crtc *crtc;
+ struct amdgpu_crtc *amdgpu_crtc;
+
+ /*
+ * following if is check inherited from both functions where this one is
+ * used now. Need to be checked why it could happen.
+ */
+ if (otg_inst == -1) {
+ WARN_ON(1);
+ return adev->mode_info.crtcs[0];
+ }
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+ if (amdgpu_crtc->otg_inst == otg_inst)
+ return amdgpu_crtc;
+ }
+
+ return NULL;
+}
+
+static void dm_pflip_high_irq(void *interrupt_params)
+{
+ struct amdgpu_crtc *amdgpu_crtc;
+ struct common_irq_params *irq_params = interrupt_params;
+ struct amdgpu_device *adev = irq_params->adev;
+ unsigned long flags;
+
+ amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
+
+ /* IRQ could occur when in initial stage */
+ /*TODO work and BO cleanup */
+ if (amdgpu_crtc == NULL) {
+ DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
+ return;
+ }
+
+ spin_lock_irqsave(&adev->ddev->event_lock, flags);
+
+ if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
+ DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
+ amdgpu_crtc->pflip_status,
+ AMDGPU_FLIP_SUBMITTED,
+ amdgpu_crtc->crtc_id,
+ amdgpu_crtc);
+ spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+ return;
+ }
+
+
+ /* wakeup usersapce */
+ if (amdgpu_crtc->event) {
+ /* Update to correct count/ts if racing with vblank irq */
+ drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
+
+ drm_crtc_send_vblank_event(&amdgpu_crtc->base, amdgpu_crtc->event);
+
+ /* page flip completed. clean up */
+ amdgpu_crtc->event = NULL;
+
+ } else
+ WARN_ON(1);
+
+ amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
+ spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+
+ DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE\n",
+ __func__, amdgpu_crtc->crtc_id, amdgpu_crtc);
+
+ drm_crtc_vblank_put(&amdgpu_crtc->base);
+}
+
+static void dm_crtc_high_irq(void *interrupt_params)
+{
+ struct common_irq_params *irq_params = interrupt_params;
+ struct amdgpu_device *adev = irq_params->adev;
+ uint8_t crtc_index = 0;
+ struct amdgpu_crtc *acrtc;
+
+ acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
+
+ if (acrtc)
+ crtc_index = acrtc->crtc_id;
+
+ drm_handle_vblank(adev->ddev, crtc_index);
+}
+
+static int dm_set_clockgating_state(void *handle,
+ enum amd_clockgating_state state)
+{
+ return 0;
+}
+
+static int dm_set_powergating_state(void *handle,
+ enum amd_powergating_state state)
+{
+ return 0;
+}
+
+/* Prototypes of private functions */
+static int dm_early_init(void* handle);
+
+static void hotplug_notify_work_func(struct work_struct *work)
+{
+ struct amdgpu_display_manager *dm = container_of(work, struct amdgpu_display_manager, mst_hotplug_work);
+ struct drm_device *dev = dm->ddev;
+
+ drm_kms_helper_hotplug_event(dev);
+}
+
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+#include "dal_asic_id.h"
+/* Allocate memory for FBC compressed data */
+/* TODO: Dynamic allocation */
+#define AMDGPU_FBC_SIZE (3840 * 2160 * 4)
+
+static void amdgpu_dm_initialize_fbc(struct amdgpu_device *adev)
+{
+ int r;
+ struct dm_comressor_info *compressor = &adev->dm.compressor;
+
+ if (!compressor->bo_ptr) {
+ r = amdgpu_bo_create_kernel(adev, AMDGPU_FBC_SIZE, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM, &compressor->bo_ptr,
+ &compressor->gpu_addr, &compressor->cpu_addr);
+
+ if (r)
+ DRM_ERROR("DM: Failed to initialize fbc\n");
+ }
+
+}
+#endif
+
+
+/* Init display KMS
+ *
+ * Returns 0 on success
+ */
+static int amdgpu_dm_init(struct amdgpu_device *adev)
+{
+ struct dc_init_data init_data;
+ adev->dm.ddev = adev->ddev;
+ adev->dm.adev = adev;
+
+ /* Zero all the fields */
+ memset(&init_data, 0, sizeof(init_data));
+
+ /* initialize DAL's lock (for SYNC context use) */
+ spin_lock_init(&adev->dm.dal_lock);
+
+ /* initialize DAL's mutex */
+ mutex_init(&adev->dm.dal_mutex);
+
+ if(amdgpu_dm_irq_init(adev)) {
+ DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
+ goto error;
+ }
+
+ init_data.asic_id.chip_family = adev->family;
+
+ init_data.asic_id.pci_revision_id = adev->rev_id;
+ init_data.asic_id.hw_internal_rev = adev->external_rev_id;
+
+ init_data.asic_id.vram_width = adev->mc.vram_width;
+ /* TODO: initialize init_data.asic_id.vram_type here!!!! */
+ init_data.asic_id.atombios_base_address =
+ adev->mode_info.atom_context->bios;
+
+ init_data.driver = adev;
+
+ adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
+
+ if (!adev->dm.cgs_device) {
+ DRM_ERROR("amdgpu: failed to create cgs device.\n");
+ goto error;
+ }
+
+ init_data.cgs_device = adev->dm.cgs_device;
+
+ adev->dm.dal = NULL;
+
+ init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
+
+ if (amdgpu_dc_log)
+ init_data.log_mask = DC_DEFAULT_LOG_MASK;
+ else
+ init_data.log_mask = DC_MIN_LOG_MASK;
+
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+ if (adev->family == FAMILY_CZ)
+ amdgpu_dm_initialize_fbc(adev);
+ init_data.fbc_gpu_addr = adev->dm.compressor.gpu_addr;
+#endif
+ /* Display Core create. */
+ adev->dm.dc = dc_create(&init_data);
+
+ if (adev->dm.dc) {
+ DRM_INFO("Display Core initialized!\n");
+ } else {
+ DRM_INFO("Display Core failed to initialize!\n");
+ goto error;
+ }
+
+ INIT_WORK(&adev->dm.mst_hotplug_work, hotplug_notify_work_func);
+
+ adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
+ if (!adev->dm.freesync_module) {
+ DRM_ERROR(
+ "amdgpu: failed to initialize freesync_module.\n");
+ } else
+ DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
+ adev->dm.freesync_module);
+
+ if (amdgpu_dm_initialize_drm_device(adev)) {
+ DRM_ERROR(
+ "amdgpu: failed to initialize sw for display support.\n");
+ goto error;
+ }
+
+ /* Update the actual used number of crtc */
+ adev->mode_info.num_crtc = adev->dm.display_indexes_num;
+
+ /* TODO: Add_display_info? */
+
+ /* TODO use dynamic cursor width */
+ adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
+ adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
+
+ if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
+ DRM_ERROR(
+ "amdgpu: failed to initialize sw for display support.\n");
+ goto error;
+ }
+
+ DRM_DEBUG_DRIVER("KMS initialized.\n");
+
+ return 0;
+error:
+ amdgpu_dm_fini(adev);
+
+ return -1;
+}
+
+static void amdgpu_dm_fini(struct amdgpu_device *adev)
+{
+ amdgpu_dm_destroy_drm_device(&adev->dm);
+ /*
+ * TODO: pageflip, vlank interrupt
+ *
+ * amdgpu_dm_irq_fini(adev);
+ */
+
+ if (adev->dm.cgs_device) {
+ amdgpu_cgs_destroy_device(adev->dm.cgs_device);
+ adev->dm.cgs_device = NULL;
+ }
+ if (adev->dm.freesync_module) {
+ mod_freesync_destroy(adev->dm.freesync_module);
+ adev->dm.freesync_module = NULL;
+ }
+ /* DC Destroy TODO: Replace destroy DAL */
+ if (adev->dm.dc)
+ dc_destroy(&adev->dm.dc);
+ return;
+}
+
+static int dm_sw_init(void *handle)
+{
+ return 0;
+}
+
+static int dm_sw_fini(void *handle)
+{
+ return 0;
+}
+
+static int detect_mst_link_for_all_connectors(struct drm_device *dev)
+{
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_connector *connector;
+ int ret = 0;
+
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ aconnector = to_amdgpu_dm_connector(connector);
+ if (aconnector->dc_link->type == dc_connection_mst_branch) {
+ DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
+ aconnector, aconnector->base.base.id);
+
+ ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
+ if (ret < 0) {
+ DRM_ERROR("DM_MST: Failed to start MST\n");
+ ((struct dc_link *)aconnector->dc_link)->type = dc_connection_single;
+ return ret;
+ }
+ }
+ }
+
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ return ret;
+}
+
+static int dm_late_init(void *handle)
+{
+ struct drm_device *dev = ((struct amdgpu_device *)handle)->ddev;
+
+ return detect_mst_link_for_all_connectors(dev);
+}
+
+static void s3_handle_mst(struct drm_device *dev, bool suspend)
+{
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_connector *connector;
+
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ aconnector = to_amdgpu_dm_connector(connector);
+ if (aconnector->dc_link->type == dc_connection_mst_branch &&
+ !aconnector->mst_port) {
+
+ if (suspend)
+ drm_dp_mst_topology_mgr_suspend(&aconnector->mst_mgr);
+ else
+ drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr);
+ }
+ }
+
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+}
+
+static int dm_hw_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ /* Create DAL display manager */
+ amdgpu_dm_init(adev);
+ amdgpu_dm_hpd_init(adev);
+
+ return 0;
+}
+
+static int dm_hw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ amdgpu_dm_hpd_fini(adev);
+
+ amdgpu_dm_irq_fini(adev);
+ amdgpu_dm_fini(adev);
+ return 0;
+}
+
+static int dm_suspend(void *handle)
+{
+ struct amdgpu_device *adev = handle;
+ struct amdgpu_display_manager *dm = &adev->dm;
+ int ret = 0;
+
+ s3_handle_mst(adev->ddev, true);
+
+ amdgpu_dm_irq_suspend(adev);
+
+ WARN_ON(adev->dm.cached_state);
+ adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
+
+ dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
+
+ return ret;
+}
+
+static struct amdgpu_dm_connector *
+amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
+ struct drm_crtc *crtc)
+{
+ uint32_t i;
+ struct drm_connector_state *new_con_state;
+ struct drm_connector *connector;
+ struct drm_crtc *crtc_from_state;
+
+ for_each_new_connector_in_state(state, connector, new_con_state, i) {
+ crtc_from_state = new_con_state->crtc;
+
+ if (crtc_from_state == crtc)
+ return to_amdgpu_dm_connector(connector);
+ }
+
+ return NULL;
+}
+
+static int dm_resume(void *handle)
+{
+ struct amdgpu_device *adev = handle;
+ struct amdgpu_display_manager *dm = &adev->dm;
+
+ /* power on hardware */
+ dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
+
+ return 0;
+}
+
+int amdgpu_dm_display_resume(struct amdgpu_device *adev)
+{
+ struct drm_device *ddev = adev->ddev;
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_connector *connector;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *new_crtc_state;
+ struct dm_crtc_state *dm_new_crtc_state;
+ struct drm_plane *plane;
+ struct drm_plane_state *new_plane_state;
+ struct dm_plane_state *dm_new_plane_state;
+
+ int ret = 0;
+ int i;
+
+ /* program HPD filter */
+ dc_resume(dm->dc);
+
+ /* On resume we need to rewrite the MSTM control bits to enamble MST*/
+ s3_handle_mst(ddev, false);
+
+ /*
+ * early enable HPD Rx IRQ, should be done before set mode as short
+ * pulse interrupts are used for MST
+ */
+ amdgpu_dm_irq_resume_early(adev);
+
+ /* Do detection*/
+ list_for_each_entry(connector,
+ &ddev->mode_config.connector_list, head) {
+ aconnector = to_amdgpu_dm_connector(connector);
+
+ /*
+ * this is the case when traversing through already created
+ * MST connectors, should be skipped
+ */
+ if (aconnector->mst_port)
+ continue;
+
+ mutex_lock(&aconnector->hpd_lock);
+ dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
+ aconnector->dc_sink = NULL;
+ amdgpu_dm_update_connector_after_detect(aconnector);
+ mutex_unlock(&aconnector->hpd_lock);
+ }
+
+ /* Force mode set in atomic comit */
+ for_each_new_crtc_in_state(adev->dm.cached_state, crtc, new_crtc_state, i)
+ new_crtc_state->active_changed = true;
+
+ /*
+ * atomic_check is expected to create the dc states. We need to release
+ * them here, since they were duplicated as part of the suspend
+ * procedure.
+ */
+ for_each_new_crtc_in_state(adev->dm.cached_state, crtc, new_crtc_state, i) {
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ if (dm_new_crtc_state->stream) {
+ WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
+ dc_stream_release(dm_new_crtc_state->stream);
+ dm_new_crtc_state->stream = NULL;
+ }
+ }
+
+ for_each_new_plane_in_state(adev->dm.cached_state, plane, new_plane_state, i) {
+ dm_new_plane_state = to_dm_plane_state(new_plane_state);
+ if (dm_new_plane_state->dc_state) {
+ WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
+ dc_plane_state_release(dm_new_plane_state->dc_state);
+ dm_new_plane_state->dc_state = NULL;
+ }
+ }
+
+ ret = drm_atomic_helper_resume(ddev, adev->dm.cached_state);
+
+ drm_atomic_state_put(adev->dm.cached_state);
+ adev->dm.cached_state = NULL;
+
+ amdgpu_dm_irq_resume_late(adev);
+
+ return ret;
+}
+
+static const struct amd_ip_funcs amdgpu_dm_funcs = {
+ .name = "dm",
+ .early_init = dm_early_init,
+ .late_init = dm_late_init,
+ .sw_init = dm_sw_init,
+ .sw_fini = dm_sw_fini,
+ .hw_init = dm_hw_init,
+ .hw_fini = dm_hw_fini,
+ .suspend = dm_suspend,
+ .resume = dm_resume,
+ .is_idle = dm_is_idle,
+ .wait_for_idle = dm_wait_for_idle,
+ .check_soft_reset = dm_check_soft_reset,
+ .soft_reset = dm_soft_reset,
+ .set_clockgating_state = dm_set_clockgating_state,
+ .set_powergating_state = dm_set_powergating_state,
+};
+
+const struct amdgpu_ip_block_version dm_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &amdgpu_dm_funcs,
+};
+
+
+static struct drm_atomic_state *
+dm_atomic_state_alloc(struct drm_device *dev)
+{
+ struct dm_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
+
+ if (!state)
+ return NULL;
+
+ if (drm_atomic_state_init(dev, &state->base) < 0)
+ goto fail;
+
+ return &state->base;
+
+fail:
+ kfree(state);
+ return NULL;
+}
+
+static void
+dm_atomic_state_clear(struct drm_atomic_state *state)
+{
+ struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
+
+ if (dm_state->context) {
+ dc_release_state(dm_state->context);
+ dm_state->context = NULL;
+ }
+
+ drm_atomic_state_default_clear(state);
+}
+
+static void
+dm_atomic_state_alloc_free(struct drm_atomic_state *state)
+{
+ struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
+ drm_atomic_state_default_release(state);
+ kfree(dm_state);
+}
+
+static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
+ .fb_create = amdgpu_user_framebuffer_create,
+ .output_poll_changed = amdgpu_output_poll_changed,
+ .atomic_check = amdgpu_dm_atomic_check,
+ .atomic_commit = amdgpu_dm_atomic_commit,
+ .atomic_state_alloc = dm_atomic_state_alloc,
+ .atomic_state_clear = dm_atomic_state_clear,
+ .atomic_state_free = dm_atomic_state_alloc_free
+};
+
+static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
+ .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
+};
+
+static void
+amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
+{
+ struct drm_connector *connector = &aconnector->base;
+ struct drm_device *dev = connector->dev;
+ struct dc_sink *sink;
+
+ /* MST handled by drm_mst framework */
+ if (aconnector->mst_mgr.mst_state == true)
+ return;
+
+
+ sink = aconnector->dc_link->local_sink;
+
+ /* Edid mgmt connector gets first update only in mode_valid hook and then
+ * the connector sink is set to either fake or physical sink depends on link status.
+ * don't do it here if u are during boot
+ */
+ if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
+ && aconnector->dc_em_sink) {
+
+ /* For S3 resume with headless use eml_sink to fake stream
+ * because on resume connecotr->sink is set ti NULL
+ */
+ mutex_lock(&dev->mode_config.mutex);
+
+ if (sink) {
+ if (aconnector->dc_sink) {
+ amdgpu_dm_remove_sink_from_freesync_module(
+ connector);
+ /* retain and release bellow are used for
+ * bump up refcount for sink because the link don't point
+ * to it anymore after disconnect so on next crtc to connector
+ * reshuffle by UMD we will get into unwanted dc_sink release
+ */
+ if (aconnector->dc_sink != aconnector->dc_em_sink)
+ dc_sink_release(aconnector->dc_sink);
+ }
+ aconnector->dc_sink = sink;
+ amdgpu_dm_add_sink_to_freesync_module(
+ connector, aconnector->edid);
+ } else {
+ amdgpu_dm_remove_sink_from_freesync_module(connector);
+ if (!aconnector->dc_sink)
+ aconnector->dc_sink = aconnector->dc_em_sink;
+ else if (aconnector->dc_sink != aconnector->dc_em_sink)
+ dc_sink_retain(aconnector->dc_sink);
+ }
+
+ mutex_unlock(&dev->mode_config.mutex);
+ return;
+ }
+
+ /*
+ * TODO: temporary guard to look for proper fix
+ * if this sink is MST sink, we should not do anything
+ */
+ if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
+ return;
+
+ if (aconnector->dc_sink == sink) {
+ /* We got a DP short pulse (Link Loss, DP CTS, etc...).
+ * Do nothing!! */
+ DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
+ aconnector->connector_id);
+ return;
+ }
+
+ DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
+ aconnector->connector_id, aconnector->dc_sink, sink);
+
+ mutex_lock(&dev->mode_config.mutex);
+
+ /* 1. Update status of the drm connector
+ * 2. Send an event and let userspace tell us what to do */
+ if (sink) {
+ /* TODO: check if we still need the S3 mode update workaround.
+ * If yes, put it here. */
+ if (aconnector->dc_sink)
+ amdgpu_dm_remove_sink_from_freesync_module(
+ connector);
+
+ aconnector->dc_sink = sink;
+ if (sink->dc_edid.length == 0) {
+ aconnector->edid = NULL;
+ } else {
+ aconnector->edid =
+ (struct edid *) sink->dc_edid.raw_edid;
+
+
+ drm_mode_connector_update_edid_property(connector,
+ aconnector->edid);
+ }
+ amdgpu_dm_add_sink_to_freesync_module(connector, aconnector->edid);
+
+ } else {
+ amdgpu_dm_remove_sink_from_freesync_module(connector);
+ drm_mode_connector_update_edid_property(connector, NULL);
+ aconnector->num_modes = 0;
+ aconnector->dc_sink = NULL;
+ }
+
+ mutex_unlock(&dev->mode_config.mutex);
+}
+
+static void handle_hpd_irq(void *param)
+{
+ struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
+ struct drm_connector *connector = &aconnector->base;
+ struct drm_device *dev = connector->dev;
+
+ /* In case of failure or MST no need to update connector status or notify the OS
+ * since (for MST case) MST does this in it's own context.
+ */
+ mutex_lock(&aconnector->hpd_lock);
+
+ if (aconnector->fake_enable)
+ aconnector->fake_enable = false;
+
+ if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
+ amdgpu_dm_update_connector_after_detect(aconnector);
+
+
+ drm_modeset_lock_all(dev);
+ dm_restore_drm_connector_state(dev, connector);
+ drm_modeset_unlock_all(dev);
+
+ if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
+ drm_kms_helper_hotplug_event(dev);
+ }
+ mutex_unlock(&aconnector->hpd_lock);
+
+}
+
+static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
+{
+ uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
+ uint8_t dret;
+ bool new_irq_handled = false;
+ int dpcd_addr;
+ int dpcd_bytes_to_read;
+
+ const int max_process_count = 30;
+ int process_count = 0;
+
+ const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
+
+ if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
+ dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
+ /* DPCD 0x200 - 0x201 for downstream IRQ */
+ dpcd_addr = DP_SINK_COUNT;
+ } else {
+ dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
+ /* DPCD 0x2002 - 0x2005 for downstream IRQ */
+ dpcd_addr = DP_SINK_COUNT_ESI;
+ }
+
+ dret = drm_dp_dpcd_read(
+ &aconnector->dm_dp_aux.aux,
+ dpcd_addr,
+ esi,
+ dpcd_bytes_to_read);
+
+ while (dret == dpcd_bytes_to_read &&
+ process_count < max_process_count) {
+ uint8_t retry;
+ dret = 0;
+
+ process_count++;
+
+ DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
+ /* handle HPD short pulse irq */
+ if (aconnector->mst_mgr.mst_state)
+ drm_dp_mst_hpd_irq(
+ &aconnector->mst_mgr,
+ esi,
+ &new_irq_handled);
+
+ if (new_irq_handled) {
+ /* ACK at DPCD to notify down stream */
+ const int ack_dpcd_bytes_to_write =
+ dpcd_bytes_to_read - 1;
+
+ for (retry = 0; retry < 3; retry++) {
+ uint8_t wret;
+
+ wret = drm_dp_dpcd_write(
+ &aconnector->dm_dp_aux.aux,
+ dpcd_addr + 1,
+ &esi[1],
+ ack_dpcd_bytes_to_write);
+ if (wret == ack_dpcd_bytes_to_write)
+ break;
+ }
+
+ /* check if there is new irq to be handle */
+ dret = drm_dp_dpcd_read(
+ &aconnector->dm_dp_aux.aux,
+ dpcd_addr,
+ esi,
+ dpcd_bytes_to_read);
+
+ new_irq_handled = false;
+ } else {
+ break;
+ }
+ }
+
+ if (process_count == max_process_count)
+ DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
+}
+
+static void handle_hpd_rx_irq(void *param)
+{
+ struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
+ struct drm_connector *connector = &aconnector->base;
+ struct drm_device *dev = connector->dev;
+ struct dc_link *dc_link = aconnector->dc_link;
+ bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
+
+ /* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
+ * conflict, after implement i2c helper, this mutex should be
+ * retired.
+ */
+ if (dc_link->type != dc_connection_mst_branch)
+ mutex_lock(&aconnector->hpd_lock);
+
+ if (dc_link_handle_hpd_rx_irq(dc_link, NULL) &&
+ !is_mst_root_connector) {
+ /* Downstream Port status changed. */
+ if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
+ amdgpu_dm_update_connector_after_detect(aconnector);
+
+
+ drm_modeset_lock_all(dev);
+ dm_restore_drm_connector_state(dev, connector);
+ drm_modeset_unlock_all(dev);
+
+ drm_kms_helper_hotplug_event(dev);
+ }
+ }
+ if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
+ (dc_link->type == dc_connection_mst_branch))
+ dm_handle_hpd_rx_irq(aconnector);
+
+ if (dc_link->type != dc_connection_mst_branch)
+ mutex_unlock(&aconnector->hpd_lock);
+}
+
+static void register_hpd_handlers(struct amdgpu_device *adev)
+{
+ struct drm_device *dev = adev->ddev;
+ struct drm_connector *connector;
+ struct amdgpu_dm_connector *aconnector;
+ const struct dc_link *dc_link;
+ struct dc_interrupt_params int_params = {0};
+
+ int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
+ int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
+
+ list_for_each_entry(connector,
+ &dev->mode_config.connector_list, head) {
+
+ aconnector = to_amdgpu_dm_connector(connector);
+ dc_link = aconnector->dc_link;
+
+ if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
+ int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
+ int_params.irq_source = dc_link->irq_source_hpd;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ handle_hpd_irq,
+ (void *) aconnector);
+ }
+
+ if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
+
+ /* Also register for DP short pulse (hpd_rx). */
+ int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
+ int_params.irq_source = dc_link->irq_source_hpd_rx;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ handle_hpd_rx_irq,
+ (void *) aconnector);
+ }
+ }
+}
+
+/* Register IRQ sources and initialize IRQ callbacks */
+static int dce110_register_irq_handlers(struct amdgpu_device *adev)
+{
+ struct dc *dc = adev->dm.dc;
+ struct common_irq_params *c_irq_params;
+ struct dc_interrupt_params int_params = {0};
+ int r;
+ int i;
+ unsigned client_id = AMDGPU_IH_CLIENTID_LEGACY;
+
+ if (adev->asic_type == CHIP_VEGA10 ||
+ adev->asic_type == CHIP_RAVEN)
+ client_id = AMDGPU_IH_CLIENTID_DCE;
+
+ int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
+ int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
+
+ /* Actions of amdgpu_irq_add_id():
+ * 1. Register a set() function with base driver.
+ * Base driver will call set() function to enable/disable an
+ * interrupt in DC hardware.
+ * 2. Register amdgpu_dm_irq_handler().
+ * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
+ * coming from DC hardware.
+ * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
+ * for acknowledging and handling. */
+
+ /* Use VBLANK interrupt */
+ for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
+ r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
+ if (r) {
+ DRM_ERROR("Failed to add crtc irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i, 0);
+
+ c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_crtc_high_irq, c_irq_params);
+ }
+
+ /* Use GRPH_PFLIP interrupt */
+ for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
+ i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
+ r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
+ if (r) {
+ DRM_ERROR("Failed to add page flip irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i, 0);
+
+ c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_pflip_high_irq, c_irq_params);
+
+ }
+
+ /* HPD */
+ r = amdgpu_irq_add_id(adev, client_id,
+ VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
+ if (r) {
+ DRM_ERROR("Failed to add hpd irq id!\n");
+ return r;
+ }
+
+ register_hpd_handlers(adev);
+
+ return 0;
+}
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+/* Register IRQ sources and initialize IRQ callbacks */
+static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
+{
+ struct dc *dc = adev->dm.dc;
+ struct common_irq_params *c_irq_params;
+ struct dc_interrupt_params int_params = {0};
+ int r;
+ int i;
+
+ int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
+ int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
+
+ /* Actions of amdgpu_irq_add_id():
+ * 1. Register a set() function with base driver.
+ * Base driver will call set() function to enable/disable an
+ * interrupt in DC hardware.
+ * 2. Register amdgpu_dm_irq_handler().
+ * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
+ * coming from DC hardware.
+ * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
+ * for acknowledging and handling.
+ * */
+
+ /* Use VSTARTUP interrupt */
+ for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
+ i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
+ i++) {
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, i, &adev->crtc_irq);
+
+ if (r) {
+ DRM_ERROR("Failed to add crtc irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i, 0);
+
+ c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_crtc_high_irq, c_irq_params);
+ }
+
+ /* Use GRPH_PFLIP interrupt */
+ for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
+ i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
+ i++) {
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
+ if (r) {
+ DRM_ERROR("Failed to add page flip irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i, 0);
+
+ c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_pflip_high_irq, c_irq_params);
+
+ }
+
+ /* HPD */
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
+ &adev->hpd_irq);
+ if (r) {
+ DRM_ERROR("Failed to add hpd irq id!\n");
+ return r;
+ }
+
+ register_hpd_handlers(adev);
+
+ return 0;
+}
+#endif
+
+static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
+{
+ int r;
+
+ adev->mode_info.mode_config_initialized = true;
+
+ adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
+ adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
+
+ adev->ddev->mode_config.max_width = 16384;
+ adev->ddev->mode_config.max_height = 16384;
+
+ adev->ddev->mode_config.preferred_depth = 24;
+ adev->ddev->mode_config.prefer_shadow = 1;
+ /* indicate support of immediate flip */
+ adev->ddev->mode_config.async_page_flip = true;
+
+ adev->ddev->mode_config.fb_base = adev->mc.aper_base;
+
+ r = amdgpu_modeset_create_props(adev);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
+ defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+
+static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
+{
+ struct amdgpu_display_manager *dm = bl_get_data(bd);
+
+ if (dc_link_set_backlight_level(dm->backlight_link,
+ bd->props.brightness, 0, 0))
+ return 0;
+ else
+ return 1;
+}
+
+static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
+{
+ return bd->props.brightness;
+}
+
+static const struct backlight_ops amdgpu_dm_backlight_ops = {
+ .get_brightness = amdgpu_dm_backlight_get_brightness,
+ .update_status = amdgpu_dm_backlight_update_status,
+};
+
+static void
+amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
+{
+ char bl_name[16];
+ struct backlight_properties props = { 0 };
+
+ props.max_brightness = AMDGPU_MAX_BL_LEVEL;
+ props.type = BACKLIGHT_RAW;
+
+ snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
+ dm->adev->ddev->primary->index);
+
+ dm->backlight_dev = backlight_device_register(bl_name,
+ dm->adev->ddev->dev,
+ dm,
+ &amdgpu_dm_backlight_ops,
+ &props);
+
+ if (IS_ERR(dm->backlight_dev))
+ DRM_ERROR("DM: Backlight registration failed!\n");
+ else
+ DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
+}
+
+#endif
+
+/* In this architecture, the association
+ * connector -> encoder -> crtc
+ * id not really requried. The crtc and connector will hold the
+ * display_index as an abstraction to use with DAL component
+ *
+ * Returns 0 on success
+ */
+static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
+{
+ struct amdgpu_display_manager *dm = &adev->dm;
+ uint32_t i;
+ struct amdgpu_dm_connector *aconnector = NULL;
+ struct amdgpu_encoder *aencoder = NULL;
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ uint32_t link_cnt;
+ unsigned long possible_crtcs;
+
+ link_cnt = dm->dc->caps.max_links;
+ if (amdgpu_dm_mode_config_init(dm->adev)) {
+ DRM_ERROR("DM: Failed to initialize mode config\n");
+ return -1;
+ }
+
+ for (i = 0; i < dm->dc->caps.max_planes; i++) {
+ struct amdgpu_plane *plane;
+
+ plane = kzalloc(sizeof(struct amdgpu_plane), GFP_KERNEL);
+ mode_info->planes[i] = plane;
+
+ if (!plane) {
+ DRM_ERROR("KMS: Failed to allocate plane\n");
+ goto fail;
+ }
+ plane->base.type = mode_info->plane_type[i];
+
+ /*
+ * HACK: IGT tests expect that each plane can only have one
+ * one possible CRTC. For now, set one CRTC for each
+ * plane that is not an underlay, but still allow multiple
+ * CRTCs for underlay planes.
+ */
+ possible_crtcs = 1 << i;
+ if (i >= dm->dc->caps.max_streams)
+ possible_crtcs = 0xff;
+
+ if (amdgpu_dm_plane_init(dm, mode_info->planes[i], possible_crtcs)) {
+ DRM_ERROR("KMS: Failed to initialize plane\n");
+ goto fail;
+ }
+ }
+
+ for (i = 0; i < dm->dc->caps.max_streams; i++)
+ if (amdgpu_dm_crtc_init(dm, &mode_info->planes[i]->base, i)) {
+ DRM_ERROR("KMS: Failed to initialize crtc\n");
+ goto fail;
+ }
+
+ dm->display_indexes_num = dm->dc->caps.max_streams;
+
+ /* loops over all connectors on the board */
+ for (i = 0; i < link_cnt; i++) {
+
+ if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
+ DRM_ERROR(
+ "KMS: Cannot support more than %d display indexes\n",
+ AMDGPU_DM_MAX_DISPLAY_INDEX);
+ continue;
+ }
+
+ aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
+ if (!aconnector)
+ goto fail;
+
+ aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
+ if (!aencoder)
+ goto fail;
+
+ if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
+ DRM_ERROR("KMS: Failed to initialize encoder\n");
+ goto fail;
+ }
+
+ if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
+ DRM_ERROR("KMS: Failed to initialize connector\n");
+ goto fail;
+ }
+
+ if (dc_link_detect(dc_get_link_at_index(dm->dc, i),
+ DETECT_REASON_BOOT))
+ amdgpu_dm_update_connector_after_detect(aconnector);
+ }
+
+ /* Software is initialized. Now we can register interrupt handlers. */
+ switch (adev->asic_type) {
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ case CHIP_KAVERI:
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ case CHIP_TONGA:
+ case CHIP_FIJI:
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS12:
+ case CHIP_VEGA10:
+ if (dce110_register_irq_handlers(dm->adev)) {
+ DRM_ERROR("DM: Failed to initialize IRQ\n");
+ goto fail;
+ }
+ break;
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ case CHIP_RAVEN:
+ if (dcn10_register_irq_handlers(dm->adev)) {
+ DRM_ERROR("DM: Failed to initialize IRQ\n");
+ goto fail;
+ }
+ /*
+ * Temporary disable until pplib/smu interaction is implemented
+ */
+ dm->dc->debug.disable_stutter = true;
+ break;
+#endif
+ default:
+ DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
+ goto fail;
+ }
+
+ return 0;
+fail:
+ kfree(aencoder);
+ kfree(aconnector);
+ for (i = 0; i < dm->dc->caps.max_planes; i++)
+ kfree(mode_info->planes[i]);
+ return -1;
+}
+
+static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
+{
+ drm_mode_config_cleanup(dm->ddev);
+ return;
+}
+
+/******************************************************************************
+ * amdgpu_display_funcs functions
+ *****************************************************************************/
+
+/**
+ * dm_bandwidth_update - program display watermarks
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Calculate and program the display watermarks and line buffer allocation.
+ */
+static void dm_bandwidth_update(struct amdgpu_device *adev)
+{
+ /* TODO: implement later */
+}
+
+static void dm_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder,
+ u8 level)
+{
+ /* TODO: translate amdgpu_encoder to display_index and call DAL */
+}
+
+static u8 dm_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder)
+{
+ /* TODO: translate amdgpu_encoder to display_index and call DAL */
+ return 0;
+}
+
+static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct mod_freesync_params freesync_params;
+ uint8_t num_streams;
+ uint8_t i;
+
+ struct amdgpu_device *adev = dev->dev_private;
+ int r = 0;
+
+ /* Get freesync enable flag from DRM */
+
+ num_streams = dc_get_current_stream_count(adev->dm.dc);
+
+ for (i = 0; i < num_streams; i++) {
+ struct dc_stream_state *stream;
+ stream = dc_get_stream_at_index(adev->dm.dc, i);
+
+ mod_freesync_update_state(adev->dm.freesync_module,
+ &stream, 1, &freesync_params);
+ }
+
+ return r;
+}
+
+static const struct amdgpu_display_funcs dm_display_funcs = {
+ .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
+ .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
+ .vblank_wait = NULL,
+ .backlight_set_level =
+ dm_set_backlight_level,/* called unconditionally */
+ .backlight_get_level =
+ dm_get_backlight_level,/* called unconditionally */
+ .hpd_sense = NULL,/* called unconditionally */
+ .hpd_set_polarity = NULL, /* called unconditionally */
+ .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
+ .page_flip_get_scanoutpos =
+ dm_crtc_get_scanoutpos,/* called unconditionally */
+ .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
+ .add_connector = NULL, /* VBIOS parsing. DAL does it. */
+ .notify_freesync = amdgpu_notify_freesync,
+
+};
+
+#if defined(CONFIG_DEBUG_KERNEL_DC)
+
+static ssize_t s3_debug_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ int ret;
+ int s3_state;
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct amdgpu_device *adev = drm_dev->dev_private;
+
+ ret = kstrtoint(buf, 0, &s3_state);
+
+ if (ret == 0) {
+ if (s3_state) {
+ dm_resume(adev);
+ amdgpu_dm_display_resume(adev);
+ drm_kms_helper_hotplug_event(adev->ddev);
+ } else
+ dm_suspend(adev);
+ }
+
+ return ret == 0 ? count : 0;
+}
+
+DEVICE_ATTR_WO(s3_debug);
+
+#endif
+
+static int dm_early_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ adev->ddev->driver->driver_features |= DRIVER_ATOMIC;
+ amdgpu_dm_set_irq_funcs(adev);
+
+ switch (adev->asic_type) {
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ adev->mode_info.num_crtc = 6;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 6;
+ adev->mode_info.plane_type = dm_plane_type_default;
+ break;
+ case CHIP_KAVERI:
+ adev->mode_info.num_crtc = 4;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 7;
+ adev->mode_info.plane_type = dm_plane_type_default;
+ break;
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ adev->mode_info.num_crtc = 2;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 6;
+ adev->mode_info.plane_type = dm_plane_type_default;
+ break;
+ case CHIP_FIJI:
+ case CHIP_TONGA:
+ adev->mode_info.num_crtc = 6;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 7;
+ adev->mode_info.plane_type = dm_plane_type_default;
+ break;
+ case CHIP_CARRIZO:
+ adev->mode_info.num_crtc = 3;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 9;
+ adev->mode_info.plane_type = dm_plane_type_carizzo;
+ break;
+ case CHIP_STONEY:
+ adev->mode_info.num_crtc = 2;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 9;
+ adev->mode_info.plane_type = dm_plane_type_stoney;
+ break;
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
+ adev->mode_info.num_crtc = 5;
+ adev->mode_info.num_hpd = 5;
+ adev->mode_info.num_dig = 5;
+ adev->mode_info.plane_type = dm_plane_type_default;
+ break;
+ case CHIP_POLARIS10:
+ adev->mode_info.num_crtc = 6;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 6;
+ adev->mode_info.plane_type = dm_plane_type_default;
+ break;
+ case CHIP_VEGA10:
+ adev->mode_info.num_crtc = 6;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 6;
+ adev->mode_info.plane_type = dm_plane_type_default;
+ break;
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ case CHIP_RAVEN:
+ adev->mode_info.num_crtc = 4;
+ adev->mode_info.num_hpd = 4;
+ adev->mode_info.num_dig = 4;
+ adev->mode_info.plane_type = dm_plane_type_default;
+ break;
+#endif
+ default:
+ DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
+ return -EINVAL;
+ }
+
+ if (adev->mode_info.funcs == NULL)
+ adev->mode_info.funcs = &dm_display_funcs;
+
+ /* Note: Do NOT change adev->audio_endpt_rreg and
+ * adev->audio_endpt_wreg because they are initialised in
+ * amdgpu_device_init() */
+#if defined(CONFIG_DEBUG_KERNEL_DC)
+ device_create_file(
+ adev->ddev->dev,
+ &dev_attr_s3_debug);
+#endif
+
+ return 0;
+}
+
+struct dm_connector_state {
+ struct drm_connector_state base;
+
+ enum amdgpu_rmx_type scaling;
+ uint8_t underscan_vborder;
+ uint8_t underscan_hborder;
+ bool underscan_enable;
+};
+
+#define to_dm_connector_state(x)\
+ container_of((x), struct dm_connector_state, base)
+
+static bool modeset_required(struct drm_crtc_state *crtc_state,
+ struct dc_stream_state *new_stream,
+ struct dc_stream_state *old_stream)
+{
+ if (!drm_atomic_crtc_needs_modeset(crtc_state))
+ return false;
+
+ if (!crtc_state->enable)
+ return false;
+
+ return crtc_state->active;
+}
+
+static bool modereset_required(struct drm_crtc_state *crtc_state)
+{
+ if (!drm_atomic_crtc_needs_modeset(crtc_state))
+ return false;
+
+ return !crtc_state->enable || !crtc_state->active;
+}
+
+static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+ kfree(encoder);
+}
+
+static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
+ .destroy = amdgpu_dm_encoder_destroy,
+};
+
+static bool fill_rects_from_plane_state(const struct drm_plane_state *state,
+ struct dc_plane_state *plane_state)
+{
+ plane_state->src_rect.x = state->src_x >> 16;
+ plane_state->src_rect.y = state->src_y >> 16;
+ /*we ignore for now mantissa and do not to deal with floating pixels :(*/
+ plane_state->src_rect.width = state->src_w >> 16;
+
+ if (plane_state->src_rect.width == 0)
+ return false;
+
+ plane_state->src_rect.height = state->src_h >> 16;
+ if (plane_state->src_rect.height == 0)
+ return false;
+
+ plane_state->dst_rect.x = state->crtc_x;
+ plane_state->dst_rect.y = state->crtc_y;
+
+ if (state->crtc_w == 0)
+ return false;
+
+ plane_state->dst_rect.width = state->crtc_w;
+
+ if (state->crtc_h == 0)
+ return false;
+
+ plane_state->dst_rect.height = state->crtc_h;
+
+ plane_state->clip_rect = plane_state->dst_rect;
+
+ switch (state->rotation & DRM_MODE_ROTATE_MASK) {
+ case DRM_MODE_ROTATE_0:
+ plane_state->rotation = ROTATION_ANGLE_0;
+ break;
+ case DRM_MODE_ROTATE_90:
+ plane_state->rotation = ROTATION_ANGLE_90;
+ break;
+ case DRM_MODE_ROTATE_180:
+ plane_state->rotation = ROTATION_ANGLE_180;
+ break;
+ case DRM_MODE_ROTATE_270:
+ plane_state->rotation = ROTATION_ANGLE_270;
+ break;
+ default:
+ plane_state->rotation = ROTATION_ANGLE_0;
+ break;
+ }
+
+ return true;
+}
+static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
+ uint64_t *tiling_flags,
+ uint64_t *fb_location)
+{
+ struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+ int r = amdgpu_bo_reserve(rbo, false);
+
+ if (unlikely(r)) {
+ // Don't show error msg. when return -ERESTARTSYS
+ if (r != -ERESTARTSYS)
+ DRM_ERROR("Unable to reserve buffer: %d\n", r);
+ return r;
+ }
+
+ if (fb_location)
+ *fb_location = amdgpu_bo_gpu_offset(rbo);
+
+ if (tiling_flags)
+ amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
+
+ amdgpu_bo_unreserve(rbo);
+
+ return r;
+}
+
+static int fill_plane_attributes_from_fb(struct amdgpu_device *adev,
+ struct dc_plane_state *plane_state,
+ const struct amdgpu_framebuffer *amdgpu_fb,
+ bool addReq)
+{
+ uint64_t tiling_flags;
+ uint64_t fb_location = 0;
+ uint64_t chroma_addr = 0;
+ unsigned int awidth;
+ const struct drm_framebuffer *fb = &amdgpu_fb->base;
+ int ret = 0;
+ struct drm_format_name_buf format_name;
+
+ ret = get_fb_info(
+ amdgpu_fb,
+ &tiling_flags,
+ addReq == true ? &fb_location:NULL);
+
+ if (ret)
+ return ret;
+
+ switch (fb->format->format) {
+ case DRM_FORMAT_C8:
+ plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
+ break;
+ case DRM_FORMAT_RGB565:
+ plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_ARGB2101010:
+ plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
+ break;
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ABGR2101010:
+ plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
+ break;
+ case DRM_FORMAT_NV21:
+ plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
+ break;
+ case DRM_FORMAT_NV12:
+ plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
+ break;
+ default:
+ DRM_ERROR("Unsupported screen format %s\n",
+ drm_get_format_name(fb->format->format, &format_name));
+ return -EINVAL;
+ }
+
+ if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
+ plane_state->address.type = PLN_ADDR_TYPE_GRAPHICS;
+ plane_state->address.grph.addr.low_part = lower_32_bits(fb_location);
+ plane_state->address.grph.addr.high_part = upper_32_bits(fb_location);
+ plane_state->plane_size.grph.surface_size.x = 0;
+ plane_state->plane_size.grph.surface_size.y = 0;
+ plane_state->plane_size.grph.surface_size.width = fb->width;
+ plane_state->plane_size.grph.surface_size.height = fb->height;
+ plane_state->plane_size.grph.surface_pitch =
+ fb->pitches[0] / fb->format->cpp[0];
+ /* TODO: unhardcode */
+ plane_state->color_space = COLOR_SPACE_SRGB;
+
+ } else {
+ awidth = ALIGN(fb->width, 64);
+ plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
+ plane_state->address.video_progressive.luma_addr.low_part
+ = lower_32_bits(fb_location);
+ plane_state->address.video_progressive.luma_addr.high_part
+ = upper_32_bits(fb_location);
+ chroma_addr = fb_location + (u64)(awidth * fb->height);
+ plane_state->address.video_progressive.chroma_addr.low_part
+ = lower_32_bits(chroma_addr);
+ plane_state->address.video_progressive.chroma_addr.high_part
+ = upper_32_bits(chroma_addr);
+ plane_state->plane_size.video.luma_size.x = 0;
+ plane_state->plane_size.video.luma_size.y = 0;
+ plane_state->plane_size.video.luma_size.width = awidth;
+ plane_state->plane_size.video.luma_size.height = fb->height;
+ /* TODO: unhardcode */
+ plane_state->plane_size.video.luma_pitch = awidth;
+
+ plane_state->plane_size.video.chroma_size.x = 0;
+ plane_state->plane_size.video.chroma_size.y = 0;
+ plane_state->plane_size.video.chroma_size.width = awidth;
+ plane_state->plane_size.video.chroma_size.height = fb->height;
+ plane_state->plane_size.video.chroma_pitch = awidth / 2;
+
+ /* TODO: unhardcode */
+ plane_state->color_space = COLOR_SPACE_YCBCR709;
+ }
+
+ memset(&plane_state->tiling_info, 0, sizeof(plane_state->tiling_info));
+
+ /* Fill GFX8 params */
+ if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
+ unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
+
+ bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
+ bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
+ mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
+ tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
+ num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
+
+ /* XXX fix me for VI */
+ plane_state->tiling_info.gfx8.num_banks = num_banks;
+ plane_state->tiling_info.gfx8.array_mode =
+ DC_ARRAY_2D_TILED_THIN1;
+ plane_state->tiling_info.gfx8.tile_split = tile_split;
+ plane_state->tiling_info.gfx8.bank_width = bankw;
+ plane_state->tiling_info.gfx8.bank_height = bankh;
+ plane_state->tiling_info.gfx8.tile_aspect = mtaspect;
+ plane_state->tiling_info.gfx8.tile_mode =
+ DC_ADDR_SURF_MICRO_TILING_DISPLAY;
+ } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
+ == DC_ARRAY_1D_TILED_THIN1) {
+ plane_state->tiling_info.gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
+ }
+
+ plane_state->tiling_info.gfx8.pipe_config =
+ AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
+
+ if (adev->asic_type == CHIP_VEGA10 ||
+ adev->asic_type == CHIP_RAVEN) {
+ /* Fill GFX9 params */
+ plane_state->tiling_info.gfx9.num_pipes =
+ adev->gfx.config.gb_addr_config_fields.num_pipes;
+ plane_state->tiling_info.gfx9.num_banks =
+ adev->gfx.config.gb_addr_config_fields.num_banks;
+ plane_state->tiling_info.gfx9.pipe_interleave =
+ adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
+ plane_state->tiling_info.gfx9.num_shader_engines =
+ adev->gfx.config.gb_addr_config_fields.num_se;
+ plane_state->tiling_info.gfx9.max_compressed_frags =
+ adev->gfx.config.gb_addr_config_fields.max_compress_frags;
+ plane_state->tiling_info.gfx9.num_rb_per_se =
+ adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
+ plane_state->tiling_info.gfx9.swizzle =
+ AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
+ plane_state->tiling_info.gfx9.shaderEnable = 1;
+ }
+
+ plane_state->visible = true;
+ plane_state->scaling_quality.h_taps_c = 0;
+ plane_state->scaling_quality.v_taps_c = 0;
+
+ /* is this needed? is plane_state zeroed at allocation? */
+ plane_state->scaling_quality.h_taps = 0;
+ plane_state->scaling_quality.v_taps = 0;
+ plane_state->stereo_format = PLANE_STEREO_FORMAT_NONE;
+
+ return ret;
+
+}
+
+static void fill_gamma_from_crtc_state(const struct drm_crtc_state *crtc_state,
+ struct dc_plane_state *plane_state)
+{
+ int i;
+ struct dc_gamma *gamma;
+ struct drm_color_lut *lut =
+ (struct drm_color_lut *) crtc_state->gamma_lut->data;
+
+ gamma = dc_create_gamma();
+
+ if (gamma == NULL) {
+ WARN_ON(1);
+ return;
+ }
+
+ gamma->type = GAMMA_RGB_256;
+ gamma->num_entries = GAMMA_RGB_256_ENTRIES;
+ for (i = 0; i < GAMMA_RGB_256_ENTRIES; i++) {
+ gamma->entries.red[i] = dal_fixed31_32_from_int(lut[i].red);
+ gamma->entries.green[i] = dal_fixed31_32_from_int(lut[i].green);
+ gamma->entries.blue[i] = dal_fixed31_32_from_int(lut[i].blue);
+ }
+
+ plane_state->gamma_correction = gamma;
+}
+
+static int fill_plane_attributes(struct amdgpu_device *adev,
+ struct dc_plane_state *dc_plane_state,
+ struct drm_plane_state *plane_state,
+ struct drm_crtc_state *crtc_state,
+ bool addrReq)
+{
+ const struct amdgpu_framebuffer *amdgpu_fb =
+ to_amdgpu_framebuffer(plane_state->fb);
+ const struct drm_crtc *crtc = plane_state->crtc;
+ struct dc_transfer_func *input_tf;
+ int ret = 0;
+
+ if (!fill_rects_from_plane_state(plane_state, dc_plane_state))
+ return -EINVAL;
+
+ ret = fill_plane_attributes_from_fb(
+ crtc->dev->dev_private,
+ dc_plane_state,
+ amdgpu_fb,
+ addrReq);
+
+ if (ret)
+ return ret;
+
+ input_tf = dc_create_transfer_func();
+
+ if (input_tf == NULL)
+ return -ENOMEM;
+
+ input_tf->type = TF_TYPE_PREDEFINED;
+ input_tf->tf = TRANSFER_FUNCTION_SRGB;
+
+ dc_plane_state->in_transfer_func = input_tf;
+
+ /* In case of gamma set, update gamma value */
+ if (crtc_state->gamma_lut)
+ fill_gamma_from_crtc_state(crtc_state, dc_plane_state);
+
+ return ret;
+}
+
+/*****************************************************************************/
+
+static void update_stream_scaling_settings(const struct drm_display_mode *mode,
+ const struct dm_connector_state *dm_state,
+ struct dc_stream_state *stream)
+{
+ enum amdgpu_rmx_type rmx_type;
+
+ struct rect src = { 0 }; /* viewport in composition space*/
+ struct rect dst = { 0 }; /* stream addressable area */
+
+ /* no mode. nothing to be done */
+ if (!mode)
+ return;
+
+ /* Full screen scaling by default */
+ src.width = mode->hdisplay;
+ src.height = mode->vdisplay;
+ dst.width = stream->timing.h_addressable;
+ dst.height = stream->timing.v_addressable;
+
+ rmx_type = dm_state->scaling;
+ if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
+ if (src.width * dst.height <
+ src.height * dst.width) {
+ /* height needs less upscaling/more downscaling */
+ dst.width = src.width *
+ dst.height / src.height;
+ } else {
+ /* width needs less upscaling/more downscaling */
+ dst.height = src.height *
+ dst.width / src.width;
+ }
+ } else if (rmx_type == RMX_CENTER) {
+ dst = src;
+ }
+
+ dst.x = (stream->timing.h_addressable - dst.width) / 2;
+ dst.y = (stream->timing.v_addressable - dst.height) / 2;
+
+ if (dm_state->underscan_enable) {
+ dst.x += dm_state->underscan_hborder / 2;
+ dst.y += dm_state->underscan_vborder / 2;
+ dst.width -= dm_state->underscan_hborder;
+ dst.height -= dm_state->underscan_vborder;
+ }
+
+ stream->src = src;
+ stream->dst = dst;
+
+ DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
+ dst.x, dst.y, dst.width, dst.height);
+
+}
+
+static enum dc_color_depth
+convert_color_depth_from_display_info(const struct drm_connector *connector)
+{
+ uint32_t bpc = connector->display_info.bpc;
+
+ /* Limited color depth to 8bit
+ * TODO: Still need to handle deep color
+ */
+ if (bpc > 8)
+ bpc = 8;
+
+ switch (bpc) {
+ case 0:
+ /* Temporary Work around, DRM don't parse color depth for
+ * EDID revision before 1.4
+ * TODO: Fix edid parsing
+ */
+ return COLOR_DEPTH_888;
+ case 6:
+ return COLOR_DEPTH_666;
+ case 8:
+ return COLOR_DEPTH_888;
+ case 10:
+ return COLOR_DEPTH_101010;
+ case 12:
+ return COLOR_DEPTH_121212;
+ case 14:
+ return COLOR_DEPTH_141414;
+ case 16:
+ return COLOR_DEPTH_161616;
+ default:
+ return COLOR_DEPTH_UNDEFINED;
+ }
+}
+
+static enum dc_aspect_ratio
+get_aspect_ratio(const struct drm_display_mode *mode_in)
+{
+ int32_t width = mode_in->crtc_hdisplay * 9;
+ int32_t height = mode_in->crtc_vdisplay * 16;
+
+ if ((width - height) < 10 && (width - height) > -10)
+ return ASPECT_RATIO_16_9;
+ else
+ return ASPECT_RATIO_4_3;
+}
+
+static enum dc_color_space
+get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
+{
+ enum dc_color_space color_space = COLOR_SPACE_SRGB;
+
+ switch (dc_crtc_timing->pixel_encoding) {
+ case PIXEL_ENCODING_YCBCR422:
+ case PIXEL_ENCODING_YCBCR444:
+ case PIXEL_ENCODING_YCBCR420:
+ {
+ /*
+ * 27030khz is the separation point between HDTV and SDTV
+ * according to HDMI spec, we use YCbCr709 and YCbCr601
+ * respectively
+ */
+ if (dc_crtc_timing->pix_clk_khz > 27030) {
+ if (dc_crtc_timing->flags.Y_ONLY)
+ color_space =
+ COLOR_SPACE_YCBCR709_LIMITED;
+ else
+ color_space = COLOR_SPACE_YCBCR709;
+ } else {
+ if (dc_crtc_timing->flags.Y_ONLY)
+ color_space =
+ COLOR_SPACE_YCBCR601_LIMITED;
+ else
+ color_space = COLOR_SPACE_YCBCR601;
+ }
+
+ }
+ break;
+ case PIXEL_ENCODING_RGB:
+ color_space = COLOR_SPACE_SRGB;
+ break;
+
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ return color_space;
+}
+
+/*****************************************************************************/
+
+static void
+fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
+ const struct drm_display_mode *mode_in,
+ const struct drm_connector *connector)
+{
+ struct dc_crtc_timing *timing_out = &stream->timing;
+
+ memset(timing_out, 0, sizeof(struct dc_crtc_timing));
+
+ timing_out->h_border_left = 0;
+ timing_out->h_border_right = 0;
+ timing_out->v_border_top = 0;
+ timing_out->v_border_bottom = 0;
+ /* TODO: un-hardcode */
+
+ if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
+ && stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
+ timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
+ else
+ timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
+
+ timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
+ timing_out->display_color_depth = convert_color_depth_from_display_info(
+ connector);
+ timing_out->scan_type = SCANNING_TYPE_NODATA;
+ timing_out->hdmi_vic = 0;
+ timing_out->vic = drm_match_cea_mode(mode_in);
+
+ timing_out->h_addressable = mode_in->crtc_hdisplay;
+ timing_out->h_total = mode_in->crtc_htotal;
+ timing_out->h_sync_width =
+ mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
+ timing_out->h_front_porch =
+ mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
+ timing_out->v_total = mode_in->crtc_vtotal;
+ timing_out->v_addressable = mode_in->crtc_vdisplay;
+ timing_out->v_front_porch =
+ mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
+ timing_out->v_sync_width =
+ mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
+ timing_out->pix_clk_khz = mode_in->crtc_clock;
+ timing_out->aspect_ratio = get_aspect_ratio(mode_in);
+ if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
+ timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
+ if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
+ timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
+
+ stream->output_color_space = get_output_color_space(timing_out);
+
+ {
+ struct dc_transfer_func *tf = dc_create_transfer_func();
+
+ tf->type = TF_TYPE_PREDEFINED;
+ tf->tf = TRANSFER_FUNCTION_SRGB;
+ stream->out_transfer_func = tf;
+ }
+}
+
+static void fill_audio_info(struct audio_info *audio_info,
+ const struct drm_connector *drm_connector,
+ const struct dc_sink *dc_sink)
+{
+ int i = 0;
+ int cea_revision = 0;
+ const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
+
+ audio_info->manufacture_id = edid_caps->manufacturer_id;
+ audio_info->product_id = edid_caps->product_id;
+
+ cea_revision = drm_connector->display_info.cea_rev;
+
+ strncpy(audio_info->display_name,
+ edid_caps->display_name,
+ AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS - 1);
+
+ if (cea_revision >= 3) {
+ audio_info->mode_count = edid_caps->audio_mode_count;
+
+ for (i = 0; i < audio_info->mode_count; ++i) {
+ audio_info->modes[i].format_code =
+ (enum audio_format_code)
+ (edid_caps->audio_modes[i].format_code);
+ audio_info->modes[i].channel_count =
+ edid_caps->audio_modes[i].channel_count;
+ audio_info->modes[i].sample_rates.all =
+ edid_caps->audio_modes[i].sample_rate;
+ audio_info->modes[i].sample_size =
+ edid_caps->audio_modes[i].sample_size;
+ }
+ }
+
+ audio_info->flags.all = edid_caps->speaker_flags;
+
+ /* TODO: We only check for the progressive mode, check for interlace mode too */
+ if (drm_connector->latency_present[0]) {
+ audio_info->video_latency = drm_connector->video_latency[0];
+ audio_info->audio_latency = drm_connector->audio_latency[0];
+ }
+
+ /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
+
+}
+
+static void
+copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
+ struct drm_display_mode *dst_mode)
+{
+ dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
+ dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
+ dst_mode->crtc_clock = src_mode->crtc_clock;
+ dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
+ dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
+ dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
+ dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
+ dst_mode->crtc_htotal = src_mode->crtc_htotal;
+ dst_mode->crtc_hskew = src_mode->crtc_hskew;
+ dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
+ dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
+ dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
+ dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
+ dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
+}
+
+static void
+decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
+ const struct drm_display_mode *native_mode,
+ bool scale_enabled)
+{
+ if (scale_enabled) {
+ copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
+ } else if (native_mode->clock == drm_mode->clock &&
+ native_mode->htotal == drm_mode->htotal &&
+ native_mode->vtotal == drm_mode->vtotal) {
+ copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
+ } else {
+ /* no scaling nor amdgpu inserted, no need to patch */
+ }
+}
+
+static int create_fake_sink(struct amdgpu_dm_connector *aconnector)
+{
+ struct dc_sink *sink = NULL;
+ struct dc_sink_init_data sink_init_data = { 0 };
+
+ sink_init_data.link = aconnector->dc_link;
+ sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
+
+ sink = dc_sink_create(&sink_init_data);
+ if (!sink) {
+ DRM_ERROR("Failed to create sink!\n");
+ return -ENOMEM;
+ }
+
+ sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
+ aconnector->fake_enable = true;
+
+ aconnector->dc_sink = sink;
+ aconnector->dc_link->local_sink = sink;
+
+ return 0;
+}
+
+static struct dc_stream_state *
+create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
+ const struct drm_display_mode *drm_mode,
+ const struct dm_connector_state *dm_state)
+{
+ struct drm_display_mode *preferred_mode = NULL;
+ const struct drm_connector *drm_connector;
+ struct dc_stream_state *stream = NULL;
+ struct drm_display_mode mode = *drm_mode;
+ bool native_mode_found = false;
+
+ if (aconnector == NULL) {
+ DRM_ERROR("aconnector is NULL!\n");
+ goto drm_connector_null;
+ }
+
+ if (dm_state == NULL) {
+ DRM_ERROR("dm_state is NULL!\n");
+ goto dm_state_null;
+ }
+
+ drm_connector = &aconnector->base;
+
+ if (!aconnector->dc_sink) {
+ /*
+ * Exclude MST from creating fake_sink
+ * TODO: need to enable MST into fake_sink feature
+ */
+ if (aconnector->mst_port)
+ goto stream_create_fail;
+
+ if (create_fake_sink(aconnector))
+ goto stream_create_fail;
+ }
+
+ stream = dc_create_stream_for_sink(aconnector->dc_sink);
+
+ if (stream == NULL) {
+ DRM_ERROR("Failed to create stream for sink!\n");
+ goto stream_create_fail;
+ }
+
+ list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
+ /* Search for preferred mode */
+ if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
+ native_mode_found = true;
+ break;
+ }
+ }
+ if (!native_mode_found)
+ preferred_mode = list_first_entry_or_null(
+ &aconnector->base.modes,
+ struct drm_display_mode,
+ head);
+
+ if (preferred_mode == NULL) {
+ /* This may not be an error, the use case is when we we have no
+ * usermode calls to reset and set mode upon hotplug. In this
+ * case, we call set mode ourselves to restore the previous mode
+ * and the modelist may not be filled in in time.
+ */
+ DRM_DEBUG_DRIVER("No preferred mode found\n");
+ } else {
+ decide_crtc_timing_for_drm_display_mode(
+ &mode, preferred_mode,
+ dm_state->scaling != RMX_OFF);
+ }
+
+ fill_stream_properties_from_drm_display_mode(stream,
+ &mode, &aconnector->base);
+ update_stream_scaling_settings(&mode, dm_state, stream);
+
+ fill_audio_info(
+ &stream->audio_info,
+ drm_connector,
+ aconnector->dc_sink);
+
+stream_create_fail:
+dm_state_null:
+drm_connector_null:
+ return stream;
+}
+
+static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
+{
+ drm_crtc_cleanup(crtc);
+ kfree(crtc);
+}
+
+static void dm_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct dm_crtc_state *cur = to_dm_crtc_state(state);
+
+ /* TODO Destroy dc_stream objects are stream object is flattened */
+ if (cur->stream)
+ dc_stream_release(cur->stream);
+
+
+ __drm_atomic_helper_crtc_destroy_state(state);
+
+
+ kfree(state);
+}
+
+static void dm_crtc_reset_state(struct drm_crtc *crtc)
+{
+ struct dm_crtc_state *state;
+
+ if (crtc->state)
+ dm_crtc_destroy_state(crtc, crtc->state);
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (WARN_ON(!state))
+ return;
+
+ crtc->state = &state->base;
+ crtc->state->crtc = crtc;
+
+}
+
+static struct drm_crtc_state *
+dm_crtc_duplicate_state(struct drm_crtc *crtc)
+{
+ struct dm_crtc_state *state, *cur;
+
+ cur = to_dm_crtc_state(crtc->state);
+
+ if (WARN_ON(!crtc->state))
+ return NULL;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
+
+ if (cur->stream) {
+ state->stream = cur->stream;
+ dc_stream_retain(state->stream);
+ }
+
+ /* TODO Duplicate dc_stream after objects are stream object is flattened */
+
+ return &state->base;
+}
+
+/* Implemented only the options currently availible for the driver */
+static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
+ .reset = dm_crtc_reset_state,
+ .destroy = amdgpu_dm_crtc_destroy,
+ .gamma_set = drm_atomic_helper_legacy_gamma_set,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_duplicate_state = dm_crtc_duplicate_state,
+ .atomic_destroy_state = dm_crtc_destroy_state,
+};
+
+static enum drm_connector_status
+amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
+{
+ bool connected;
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+
+ /* Notes:
+ * 1. This interface is NOT called in context of HPD irq.
+ * 2. This interface *is called* in context of user-mode ioctl. Which
+ * makes it a bad place for *any* MST-related activit. */
+
+ if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
+ !aconnector->fake_enable)
+ connected = (aconnector->dc_sink != NULL);
+ else
+ connected = (aconnector->base.force == DRM_FORCE_ON);
+
+ return (connected ? connector_status_connected :
+ connector_status_disconnected);
+}
+
+int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
+ struct drm_connector_state *connector_state,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct drm_device *dev = connector->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct dm_connector_state *dm_old_state =
+ to_dm_connector_state(connector->state);
+ struct dm_connector_state *dm_new_state =
+ to_dm_connector_state(connector_state);
+
+ int ret = -EINVAL;
+
+ if (property == dev->mode_config.scaling_mode_property) {
+ enum amdgpu_rmx_type rmx_type;
+
+ switch (val) {
+ case DRM_MODE_SCALE_CENTER:
+ rmx_type = RMX_CENTER;
+ break;
+ case DRM_MODE_SCALE_ASPECT:
+ rmx_type = RMX_ASPECT;
+ break;
+ case DRM_MODE_SCALE_FULLSCREEN:
+ rmx_type = RMX_FULL;
+ break;
+ case DRM_MODE_SCALE_NONE:
+ default:
+ rmx_type = RMX_OFF;
+ break;
+ }
+
+ if (dm_old_state->scaling == rmx_type)
+ return 0;
+
+ dm_new_state->scaling = rmx_type;
+ ret = 0;
+ } else if (property == adev->mode_info.underscan_hborder_property) {
+ dm_new_state->underscan_hborder = val;
+ ret = 0;
+ } else if (property == adev->mode_info.underscan_vborder_property) {
+ dm_new_state->underscan_vborder = val;
+ ret = 0;
+ } else if (property == adev->mode_info.underscan_property) {
+ dm_new_state->underscan_enable = val;
+ ret = 0;
+ }
+
+ return ret;
+}
+
+int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
+ const struct drm_connector_state *state,
+ struct drm_property *property,
+ uint64_t *val)
+{
+ struct drm_device *dev = connector->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct dm_connector_state *dm_state =
+ to_dm_connector_state(state);
+ int ret = -EINVAL;
+
+ if (property == dev->mode_config.scaling_mode_property) {
+ switch (dm_state->scaling) {
+ case RMX_CENTER:
+ *val = DRM_MODE_SCALE_CENTER;
+ break;
+ case RMX_ASPECT:
+ *val = DRM_MODE_SCALE_ASPECT;
+ break;
+ case RMX_FULL:
+ *val = DRM_MODE_SCALE_FULLSCREEN;
+ break;
+ case RMX_OFF:
+ default:
+ *val = DRM_MODE_SCALE_NONE;
+ break;
+ }
+ ret = 0;
+ } else if (property == adev->mode_info.underscan_hborder_property) {
+ *val = dm_state->underscan_hborder;
+ ret = 0;
+ } else if (property == adev->mode_info.underscan_vborder_property) {
+ *val = dm_state->underscan_vborder;
+ ret = 0;
+ } else if (property == adev->mode_info.underscan_property) {
+ *val = dm_state->underscan_enable;
+ ret = 0;
+ }
+ return ret;
+}
+
+static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
+{
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ const struct dc_link *link = aconnector->dc_link;
+ struct amdgpu_device *adev = connector->dev->dev_private;
+ struct amdgpu_display_manager *dm = &adev->dm;
+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
+ defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+
+ if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) {
+ amdgpu_dm_register_backlight_device(dm);
+
+ if (dm->backlight_dev) {
+ backlight_device_unregister(dm->backlight_dev);
+ dm->backlight_dev = NULL;
+ }
+
+ }
+#endif
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
+{
+ struct dm_connector_state *state =
+ to_dm_connector_state(connector->state);
+
+ kfree(state);
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+
+ if (state) {
+ state->scaling = RMX_OFF;
+ state->underscan_enable = false;
+ state->underscan_hborder = 0;
+ state->underscan_vborder = 0;
+
+ connector->state = &state->base;
+ connector->state->connector = connector;
+ }
+}
+
+struct drm_connector_state *
+amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
+{
+ struct dm_connector_state *state =
+ to_dm_connector_state(connector->state);
+
+ struct dm_connector_state *new_state =
+ kmemdup(state, sizeof(*state), GFP_KERNEL);
+
+ if (new_state) {
+ __drm_atomic_helper_connector_duplicate_state(connector,
+ &new_state->base);
+ return &new_state->base;
+ }
+
+ return NULL;
+}
+
+static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
+ .reset = amdgpu_dm_connector_funcs_reset,
+ .detect = amdgpu_dm_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = amdgpu_dm_connector_destroy,
+ .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
+ .atomic_get_property = amdgpu_dm_connector_atomic_get_property
+};
+
+static struct drm_encoder *best_encoder(struct drm_connector *connector)
+{
+ int enc_id = connector->encoder_ids[0];
+ struct drm_mode_object *obj;
+ struct drm_encoder *encoder;
+
+ DRM_DEBUG_DRIVER("Finding the best encoder\n");
+
+ /* pick the encoder ids */
+ if (enc_id) {
+ obj = drm_mode_object_find(connector->dev, NULL, enc_id, DRM_MODE_OBJECT_ENCODER);
+ if (!obj) {
+ DRM_ERROR("Couldn't find a matching encoder for our connector\n");
+ return NULL;
+ }
+ encoder = obj_to_encoder(obj);
+ return encoder;
+ }
+ DRM_ERROR("No encoder id\n");
+ return NULL;
+}
+
+static int get_modes(struct drm_connector *connector)
+{
+ return amdgpu_dm_connector_get_modes(connector);
+}
+
+static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
+{
+ struct dc_sink_init_data init_params = {
+ .link = aconnector->dc_link,
+ .sink_signal = SIGNAL_TYPE_VIRTUAL
+ };
+ struct edid *edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
+
+ if (!aconnector->base.edid_blob_ptr ||
+ !aconnector->base.edid_blob_ptr->data) {
+ DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
+ aconnector->base.name);
+
+ aconnector->base.force = DRM_FORCE_OFF;
+ aconnector->base.override_edid = false;
+ return;
+ }
+
+ aconnector->edid = edid;
+
+ aconnector->dc_em_sink = dc_link_add_remote_sink(
+ aconnector->dc_link,
+ (uint8_t *)edid,
+ (edid->extensions + 1) * EDID_LENGTH,
+ &init_params);
+
+ if (aconnector->base.force == DRM_FORCE_ON)
+ aconnector->dc_sink = aconnector->dc_link->local_sink ?
+ aconnector->dc_link->local_sink :
+ aconnector->dc_em_sink;
+}
+
+static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
+{
+ struct dc_link *link = (struct dc_link *)aconnector->dc_link;
+
+ /* In case of headless boot with force on for DP managed connector
+ * Those settings have to be != 0 to get initial modeset
+ */
+ if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
+ link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
+ link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
+ }
+
+
+ aconnector->base.override_edid = true;
+ create_eml_sink(aconnector);
+}
+
+int amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ int result = MODE_ERROR;
+ struct dc_sink *dc_sink;
+ struct amdgpu_device *adev = connector->dev->dev_private;
+ /* TODO: Unhardcode stream count */
+ struct dc_stream_state *stream;
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+
+ if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
+ (mode->flags & DRM_MODE_FLAG_DBLSCAN))
+ return result;
+
+ /* Only run this the first time mode_valid is called to initilialize
+ * EDID mgmt
+ */
+ if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
+ !aconnector->dc_em_sink)
+ handle_edid_mgmt(aconnector);
+
+ dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
+
+ if (dc_sink == NULL) {
+ DRM_ERROR("dc_sink is NULL!\n");
+ goto fail;
+ }
+
+ stream = dc_create_stream_for_sink(dc_sink);
+ if (stream == NULL) {
+ DRM_ERROR("Failed to create stream for sink!\n");
+ goto fail;
+ }
+
+ drm_mode_set_crtcinfo(mode, 0);
+ fill_stream_properties_from_drm_display_mode(stream, mode, connector);
+
+ stream->src.width = mode->hdisplay;
+ stream->src.height = mode->vdisplay;
+ stream->dst = stream->src;
+
+ if (dc_validate_stream(adev->dm.dc, stream) == DC_OK)
+ result = MODE_OK;
+
+ dc_stream_release(stream);
+
+fail:
+ /* TODO: error handling*/
+ return result;
+}
+
+static const struct drm_connector_helper_funcs
+amdgpu_dm_connector_helper_funcs = {
+ /*
+ * If hotplug a second bigger display in FB Con mode, bigger resolution
+ * modes will be filtered by drm_mode_validate_size(), and those modes
+ * is missing after user start lightdm. So we need to renew modes list.
+ * in get_modes call back, not just return the modes count
+ */
+ .get_modes = get_modes,
+ .mode_valid = amdgpu_dm_connector_mode_valid,
+ .best_encoder = best_encoder
+};
+
+static void dm_crtc_helper_disable(struct drm_crtc *crtc)
+{
+}
+
+static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct amdgpu_device *adev = crtc->dev->dev_private;
+ struct dc *dc = adev->dm.dc;
+ struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
+ int ret = -EINVAL;
+
+ if (unlikely(!dm_crtc_state->stream &&
+ modeset_required(state, NULL, dm_crtc_state->stream))) {
+ WARN_ON(1);
+ return ret;
+ }
+
+ /* In some use cases, like reset, no stream is attached */
+ if (!dm_crtc_state->stream)
+ return 0;
+
+ if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
+ return 0;
+
+ return ret;
+}
+
+static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
+ .disable = dm_crtc_helper_disable,
+ .atomic_check = dm_crtc_helper_atomic_check,
+ .mode_fixup = dm_crtc_helper_mode_fixup
+};
+
+static void dm_encoder_helper_disable(struct drm_encoder *encoder)
+{
+
+}
+
+static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ return 0;
+}
+
+const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
+ .disable = dm_encoder_helper_disable,
+ .atomic_check = dm_encoder_helper_atomic_check
+};
+
+static void dm_drm_plane_reset(struct drm_plane *plane)
+{
+ struct dm_plane_state *amdgpu_state = NULL;
+
+ if (plane->state)
+ plane->funcs->atomic_destroy_state(plane, plane->state);
+
+ amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
+ WARN_ON(amdgpu_state == NULL);
+
+ if (amdgpu_state) {
+ plane->state = &amdgpu_state->base;
+ plane->state->plane = plane;
+ plane->state->rotation = DRM_MODE_ROTATE_0;
+ }
+}
+
+static struct drm_plane_state *
+dm_drm_plane_duplicate_state(struct drm_plane *plane)
+{
+ struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
+
+ old_dm_plane_state = to_dm_plane_state(plane->state);
+ dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
+ if (!dm_plane_state)
+ return NULL;
+
+ __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
+
+ if (old_dm_plane_state->dc_state) {
+ dm_plane_state->dc_state = old_dm_plane_state->dc_state;
+ dc_plane_state_retain(dm_plane_state->dc_state);
+ }
+
+ return &dm_plane_state->base;
+}
+
+void dm_drm_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
+
+ if (dm_plane_state->dc_state)
+ dc_plane_state_release(dm_plane_state->dc_state);
+
+ drm_atomic_helper_plane_destroy_state(plane, state);
+}
+
+static const struct drm_plane_funcs dm_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ .reset = dm_drm_plane_reset,
+ .atomic_duplicate_state = dm_drm_plane_duplicate_state,
+ .atomic_destroy_state = dm_drm_plane_destroy_state,
+};
+
+static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ struct amdgpu_framebuffer *afb;
+ struct drm_gem_object *obj;
+ struct amdgpu_bo *rbo;
+ uint64_t chroma_addr = 0;
+ int r;
+ struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
+ unsigned int awidth;
+
+ dm_plane_state_old = to_dm_plane_state(plane->state);
+ dm_plane_state_new = to_dm_plane_state(new_state);
+
+ if (!new_state->fb) {
+ DRM_DEBUG_DRIVER("No FB bound\n");
+ return 0;
+ }
+
+ afb = to_amdgpu_framebuffer(new_state->fb);
+
+ obj = afb->obj;
+ rbo = gem_to_amdgpu_bo(obj);
+ r = amdgpu_bo_reserve(rbo, false);
+ if (unlikely(r != 0))
+ return r;
+
+ r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &afb->address);
+
+
+ amdgpu_bo_unreserve(rbo);
+
+ if (unlikely(r != 0)) {
+ if (r != -ERESTARTSYS)
+ DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
+ return r;
+ }
+
+ amdgpu_bo_ref(rbo);
+
+ if (dm_plane_state_new->dc_state &&
+ dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
+ struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
+
+ if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
+ plane_state->address.grph.addr.low_part = lower_32_bits(afb->address);
+ plane_state->address.grph.addr.high_part = upper_32_bits(afb->address);
+ } else {
+ awidth = ALIGN(new_state->fb->width, 64);
+ plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
+ plane_state->address.video_progressive.luma_addr.low_part
+ = lower_32_bits(afb->address);
+ plane_state->address.video_progressive.luma_addr.high_part
+ = upper_32_bits(afb->address);
+ chroma_addr = afb->address + (u64)(awidth * new_state->fb->height);
+ plane_state->address.video_progressive.chroma_addr.low_part
+ = lower_32_bits(chroma_addr);
+ plane_state->address.video_progressive.chroma_addr.high_part
+ = upper_32_bits(chroma_addr);
+ }
+ }
+
+ /* It's a hack for s3 since in 4.9 kernel filter out cursor buffer
+ * prepare and cleanup in drm_atomic_helper_prepare_planes
+ * and drm_atomic_helper_cleanup_planes because fb doens't in s3.
+ * IN 4.10 kernel this code should be removed and amdgpu_device_suspend
+ * code touching fram buffers should be avoided for DC.
+ */
+ if (plane->type == DRM_PLANE_TYPE_CURSOR) {
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_state->crtc);
+
+ acrtc->cursor_bo = obj;
+ }
+ return 0;
+}
+
+static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct amdgpu_bo *rbo;
+ struct amdgpu_framebuffer *afb;
+ int r;
+
+ if (!old_state->fb)
+ return;
+
+ afb = to_amdgpu_framebuffer(old_state->fb);
+ rbo = gem_to_amdgpu_bo(afb->obj);
+ r = amdgpu_bo_reserve(rbo, false);
+ if (unlikely(r)) {
+ DRM_ERROR("failed to reserve rbo before unpin\n");
+ return;
+ }
+
+ amdgpu_bo_unpin(rbo);
+ amdgpu_bo_unreserve(rbo);
+ amdgpu_bo_unref(&rbo);
+}
+
+static int dm_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct amdgpu_device *adev = plane->dev->dev_private;
+ struct dc *dc = adev->dm.dc;
+ struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
+
+ if (!dm_plane_state->dc_state)
+ return 0;
+
+ if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
+ return 0;
+
+ return -EINVAL;
+}
+
+static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
+ .prepare_fb = dm_plane_helper_prepare_fb,
+ .cleanup_fb = dm_plane_helper_cleanup_fb,
+ .atomic_check = dm_plane_atomic_check,
+};
+
+/*
+ * TODO: these are currently initialized to rgb formats only.
+ * For future use cases we should either initialize them dynamically based on
+ * plane capabilities, or initialize this array to all formats, so internal drm
+ * check will succeed, and let DC to implement proper check
+ */
+static const uint32_t rgb_formats[] = {
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_ABGR2101010,
+};
+
+static const uint32_t yuv_formats[] = {
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_NV21,
+};
+
+static const u32 cursor_formats[] = {
+ DRM_FORMAT_ARGB8888
+};
+
+static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
+ struct amdgpu_plane *aplane,
+ unsigned long possible_crtcs)
+{
+ int res = -EPERM;
+
+ switch (aplane->base.type) {
+ case DRM_PLANE_TYPE_PRIMARY:
+ aplane->base.format_default = true;
+
+ res = drm_universal_plane_init(
+ dm->adev->ddev,
+ &aplane->base,
+ possible_crtcs,
+ &dm_plane_funcs,
+ rgb_formats,
+ ARRAY_SIZE(rgb_formats),
+ NULL, aplane->base.type, NULL);
+ break;
+ case DRM_PLANE_TYPE_OVERLAY:
+ res = drm_universal_plane_init(
+ dm->adev->ddev,
+ &aplane->base,
+ possible_crtcs,
+ &dm_plane_funcs,
+ yuv_formats,
+ ARRAY_SIZE(yuv_formats),
+ NULL, aplane->base.type, NULL);
+ break;
+ case DRM_PLANE_TYPE_CURSOR:
+ res = drm_universal_plane_init(
+ dm->adev->ddev,
+ &aplane->base,
+ possible_crtcs,
+ &dm_plane_funcs,
+ cursor_formats,
+ ARRAY_SIZE(cursor_formats),
+ NULL, aplane->base.type, NULL);
+ break;
+ }
+
+ drm_plane_helper_add(&aplane->base, &dm_plane_helper_funcs);
+
+ /* Create (reset) the plane state */
+ if (aplane->base.funcs->reset)
+ aplane->base.funcs->reset(&aplane->base);
+
+
+ return res;
+}
+
+static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
+ struct drm_plane *plane,
+ uint32_t crtc_index)
+{
+ struct amdgpu_crtc *acrtc = NULL;
+ struct amdgpu_plane *cursor_plane;
+
+ int res = -ENOMEM;
+
+ cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
+ if (!cursor_plane)
+ goto fail;
+
+ cursor_plane->base.type = DRM_PLANE_TYPE_CURSOR;
+ res = amdgpu_dm_plane_init(dm, cursor_plane, 0);
+
+ acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
+ if (!acrtc)
+ goto fail;
+
+ res = drm_crtc_init_with_planes(
+ dm->ddev,
+ &acrtc->base,
+ plane,
+ &cursor_plane->base,
+ &amdgpu_dm_crtc_funcs, NULL);
+
+ if (res)
+ goto fail;
+
+ drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
+
+ /* Create (reset) the plane state */
+ if (acrtc->base.funcs->reset)
+ acrtc->base.funcs->reset(&acrtc->base);
+
+ acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
+ acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
+
+ acrtc->crtc_id = crtc_index;
+ acrtc->base.enabled = false;
+
+ dm->adev->mode_info.crtcs[crtc_index] = acrtc;
+ drm_mode_crtc_set_gamma_size(&acrtc->base, 256);
+
+ return 0;
+
+fail:
+ kfree(acrtc);
+ kfree(cursor_plane);
+ return res;
+}
+
+
+static int to_drm_connector_type(enum signal_type st)
+{
+ switch (st) {
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ return DRM_MODE_CONNECTOR_HDMIA;
+ case SIGNAL_TYPE_EDP:
+ return DRM_MODE_CONNECTOR_eDP;
+ case SIGNAL_TYPE_RGB:
+ return DRM_MODE_CONNECTOR_VGA;
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ return DRM_MODE_CONNECTOR_DisplayPort;
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ return DRM_MODE_CONNECTOR_DVID;
+ case SIGNAL_TYPE_VIRTUAL:
+ return DRM_MODE_CONNECTOR_VIRTUAL;
+
+ default:
+ return DRM_MODE_CONNECTOR_Unknown;
+ }
+}
+
+static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
+{
+ const struct drm_connector_helper_funcs *helper =
+ connector->helper_private;
+ struct drm_encoder *encoder;
+ struct amdgpu_encoder *amdgpu_encoder;
+
+ encoder = helper->best_encoder(connector);
+
+ if (encoder == NULL)
+ return;
+
+ amdgpu_encoder = to_amdgpu_encoder(encoder);
+
+ amdgpu_encoder->native_mode.clock = 0;
+
+ if (!list_empty(&connector->probed_modes)) {
+ struct drm_display_mode *preferred_mode = NULL;
+
+ list_for_each_entry(preferred_mode,
+ &connector->probed_modes,
+ head) {
+ if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
+ amdgpu_encoder->native_mode = *preferred_mode;
+
+ break;
+ }
+
+ }
+}
+
+static struct drm_display_mode *
+amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
+ char *name,
+ int hdisplay, int vdisplay)
+{
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ struct drm_display_mode *mode = NULL;
+ struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
+
+ mode = drm_mode_duplicate(dev, native_mode);
+
+ if (mode == NULL)
+ return NULL;
+
+ mode->hdisplay = hdisplay;
+ mode->vdisplay = vdisplay;
+ mode->type &= ~DRM_MODE_TYPE_PREFERRED;
+ strncpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
+
+ return mode;
+
+}
+
+static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ struct drm_display_mode *mode = NULL;
+ struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
+ struct amdgpu_dm_connector *amdgpu_dm_connector =
+ to_amdgpu_dm_connector(connector);
+ int i;
+ int n;
+ struct mode_size {
+ char name[DRM_DISPLAY_MODE_LEN];
+ int w;
+ int h;
+ } common_modes[] = {
+ { "640x480", 640, 480},
+ { "800x600", 800, 600},
+ { "1024x768", 1024, 768},
+ { "1280x720", 1280, 720},
+ { "1280x800", 1280, 800},
+ {"1280x1024", 1280, 1024},
+ { "1440x900", 1440, 900},
+ {"1680x1050", 1680, 1050},
+ {"1600x1200", 1600, 1200},
+ {"1920x1080", 1920, 1080},
+ {"1920x1200", 1920, 1200}
+ };
+
+ n = ARRAY_SIZE(common_modes);
+
+ for (i = 0; i < n; i++) {
+ struct drm_display_mode *curmode = NULL;
+ bool mode_existed = false;
+
+ if (common_modes[i].w > native_mode->hdisplay ||
+ common_modes[i].h > native_mode->vdisplay ||
+ (common_modes[i].w == native_mode->hdisplay &&
+ common_modes[i].h == native_mode->vdisplay))
+ continue;
+
+ list_for_each_entry(curmode, &connector->probed_modes, head) {
+ if (common_modes[i].w == curmode->hdisplay &&
+ common_modes[i].h == curmode->vdisplay) {
+ mode_existed = true;
+ break;
+ }
+ }
+
+ if (mode_existed)
+ continue;
+
+ mode = amdgpu_dm_create_common_mode(encoder,
+ common_modes[i].name, common_modes[i].w,
+ common_modes[i].h);
+ drm_mode_probed_add(connector, mode);
+ amdgpu_dm_connector->num_modes++;
+ }
+}
+
+static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
+ struct edid *edid)
+{
+ struct amdgpu_dm_connector *amdgpu_dm_connector =
+ to_amdgpu_dm_connector(connector);
+
+ if (edid) {
+ /* empty probed_modes */
+ INIT_LIST_HEAD(&connector->probed_modes);
+ amdgpu_dm_connector->num_modes =
+ drm_add_edid_modes(connector, edid);
+
+ drm_edid_to_eld(connector, edid);
+
+ amdgpu_dm_get_native_mode(connector);
+ } else {
+ amdgpu_dm_connector->num_modes = 0;
+ }
+}
+
+static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
+{
+ const struct drm_connector_helper_funcs *helper =
+ connector->helper_private;
+ struct amdgpu_dm_connector *amdgpu_dm_connector =
+ to_amdgpu_dm_connector(connector);
+ struct drm_encoder *encoder;
+ struct edid *edid = amdgpu_dm_connector->edid;
+
+ encoder = helper->best_encoder(connector);
+
+ amdgpu_dm_connector_ddc_get_modes(connector, edid);
+ amdgpu_dm_connector_add_common_modes(encoder, connector);
+ return amdgpu_dm_connector->num_modes;
+}
+
+void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
+ struct amdgpu_dm_connector *aconnector,
+ int connector_type,
+ struct dc_link *link,
+ int link_index)
+{
+ struct amdgpu_device *adev = dm->ddev->dev_private;
+
+ aconnector->connector_id = link_index;
+ aconnector->dc_link = link;
+ aconnector->base.interlace_allowed = false;
+ aconnector->base.doublescan_allowed = false;
+ aconnector->base.stereo_allowed = false;
+ aconnector->base.dpms = DRM_MODE_DPMS_OFF;
+ aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
+
+ mutex_init(&aconnector->hpd_lock);
+
+ /* configure support HPD hot plug connector_>polled default value is 0
+ * which means HPD hot plug not supported
+ */
+ switch (connector_type) {
+ case DRM_MODE_CONNECTOR_HDMIA:
+ aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
+ break;
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
+ break;
+ case DRM_MODE_CONNECTOR_DVID:
+ aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
+ break;
+ default:
+ break;
+ }
+
+ drm_object_attach_property(&aconnector->base.base,
+ dm->ddev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_NONE);
+
+ drm_object_attach_property(&aconnector->base.base,
+ adev->mode_info.underscan_property,
+ UNDERSCAN_OFF);
+ drm_object_attach_property(&aconnector->base.base,
+ adev->mode_info.underscan_hborder_property,
+ 0);
+ drm_object_attach_property(&aconnector->base.base,
+ adev->mode_info.underscan_vborder_property,
+ 0);
+
+}
+
+static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
+ struct i2c_msg *msgs, int num)
+{
+ struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
+ struct ddc_service *ddc_service = i2c->ddc_service;
+ struct i2c_command cmd;
+ int i;
+ int result = -EIO;
+
+ cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
+
+ if (!cmd.payloads)
+ return result;
+
+ cmd.number_of_payloads = num;
+ cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
+ cmd.speed = 100;
+
+ for (i = 0; i < num; i++) {
+ cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
+ cmd.payloads[i].address = msgs[i].addr;
+ cmd.payloads[i].length = msgs[i].len;
+ cmd.payloads[i].data = msgs[i].buf;
+ }
+
+ if (dal_i2caux_submit_i2c_command(
+ ddc_service->ctx->i2caux,
+ ddc_service->ddc_pin,
+ &cmd))
+ result = num;
+
+ kfree(cmd.payloads);
+ return result;
+}
+
+static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
+ .master_xfer = amdgpu_dm_i2c_xfer,
+ .functionality = amdgpu_dm_i2c_func,
+};
+
+static struct amdgpu_i2c_adapter *
+create_i2c(struct ddc_service *ddc_service,
+ int link_index,
+ int *res)
+{
+ struct amdgpu_device *adev = ddc_service->ctx->driver_context;
+ struct amdgpu_i2c_adapter *i2c;
+
+ i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
+ if (!i2c)
+ return NULL;
+ i2c->base.owner = THIS_MODULE;
+ i2c->base.class = I2C_CLASS_DDC;
+ i2c->base.dev.parent = &adev->pdev->dev;
+ i2c->base.algo = &amdgpu_dm_i2c_algo;
+ snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
+ i2c_set_adapdata(&i2c->base, i2c);
+ i2c->ddc_service = ddc_service;
+
+ return i2c;
+}
+
+/* Note: this function assumes that dc_link_detect() was called for the
+ * dc_link which will be represented by this aconnector.
+ */
+static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
+ struct amdgpu_dm_connector *aconnector,
+ uint32_t link_index,
+ struct amdgpu_encoder *aencoder)
+{
+ int res = 0;
+ int connector_type;
+ struct dc *dc = dm->dc;
+ struct dc_link *link = dc_get_link_at_index(dc, link_index);
+ struct amdgpu_i2c_adapter *i2c;
+
+ link->priv = aconnector;
+
+ DRM_DEBUG_DRIVER("%s()\n", __func__);
+
+ i2c = create_i2c(link->ddc, link->link_index, &res);
+ if (!i2c) {
+ DRM_ERROR("Failed to create i2c adapter data\n");
+ return -ENOMEM;
+ }
+
+ aconnector->i2c = i2c;
+ res = i2c_add_adapter(&i2c->base);
+
+ if (res) {
+ DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
+ goto out_free;
+ }
+
+ connector_type = to_drm_connector_type(link->connector_signal);
+
+ res = drm_connector_init(
+ dm->ddev,
+ &aconnector->base,
+ &amdgpu_dm_connector_funcs,
+ connector_type);
+
+ if (res) {
+ DRM_ERROR("connector_init failed\n");
+ aconnector->connector_id = -1;
+ goto out_free;
+ }
+
+ drm_connector_helper_add(
+ &aconnector->base,
+ &amdgpu_dm_connector_helper_funcs);
+
+ if (aconnector->base.funcs->reset)
+ aconnector->base.funcs->reset(&aconnector->base);
+
+ amdgpu_dm_connector_init_helper(
+ dm,
+ aconnector,
+ connector_type,
+ link,
+ link_index);
+
+ drm_mode_connector_attach_encoder(
+ &aconnector->base, &aencoder->base);
+
+ drm_connector_register(&aconnector->base);
+
+ if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
+ || connector_type == DRM_MODE_CONNECTOR_eDP)
+ amdgpu_dm_initialize_dp_connector(dm, aconnector);
+
+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
+ defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+
+ /* NOTE: this currently will create backlight device even if a panel
+ * is not connected to the eDP/LVDS connector.
+ *
+ * This is less than ideal but we don't have sink information at this
+ * stage since detection happens after. We can't do detection earlier
+ * since MST detection needs connectors to be created first.
+ */
+ if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) {
+ /* Event if registration failed, we should continue with
+ * DM initialization because not having a backlight control
+ * is better then a black screen.
+ */
+ amdgpu_dm_register_backlight_device(dm);
+
+ if (dm->backlight_dev)
+ dm->backlight_link = link;
+ }
+#endif
+
+out_free:
+ if (res) {
+ kfree(i2c);
+ aconnector->i2c = NULL;
+ }
+ return res;
+}
+
+int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
+{
+ switch (adev->mode_info.num_crtc) {
+ case 1:
+ return 0x1;
+ case 2:
+ return 0x3;
+ case 3:
+ return 0x7;
+ case 4:
+ return 0xf;
+ case 5:
+ return 0x1f;
+ case 6:
+ default:
+ return 0x3f;
+ }
+}
+
+static int amdgpu_dm_encoder_init(struct drm_device *dev,
+ struct amdgpu_encoder *aencoder,
+ uint32_t link_index)
+{
+ struct amdgpu_device *adev = dev->dev_private;
+
+ int res = drm_encoder_init(dev,
+ &aencoder->base,
+ &amdgpu_dm_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS,
+ NULL);
+
+ aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
+
+ if (!res)
+ aencoder->encoder_id = link_index;
+ else
+ aencoder->encoder_id = -1;
+
+ drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
+
+ return res;
+}
+
+static void manage_dm_interrupts(struct amdgpu_device *adev,
+ struct amdgpu_crtc *acrtc,
+ bool enable)
+{
+ /*
+ * this is not correct translation but will work as soon as VBLANK
+ * constant is the same as PFLIP
+ */
+ int irq_type =
+ amdgpu_crtc_idx_to_irq_type(
+ adev,
+ acrtc->crtc_id);
+
+ if (enable) {
+ drm_crtc_vblank_on(&acrtc->base);
+ amdgpu_irq_get(
+ adev,
+ &adev->pageflip_irq,
+ irq_type);
+ } else {
+
+ amdgpu_irq_put(
+ adev,
+ &adev->pageflip_irq,
+ irq_type);
+ drm_crtc_vblank_off(&acrtc->base);
+ }
+}
+
+static bool
+is_scaling_state_different(const struct dm_connector_state *dm_state,
+ const struct dm_connector_state *old_dm_state)
+{
+ if (dm_state->scaling != old_dm_state->scaling)
+ return true;
+ if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
+ if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
+ return true;
+ } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
+ if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
+ return true;
+ } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
+ dm_state->underscan_vborder != old_dm_state->underscan_vborder)
+ return true;
+ return false;
+}
+
+static void remove_stream(struct amdgpu_device *adev,
+ struct amdgpu_crtc *acrtc,
+ struct dc_stream_state *stream)
+{
+ /* this is the update mode case */
+ if (adev->dm.freesync_module)
+ mod_freesync_remove_stream(adev->dm.freesync_module, stream);
+
+ acrtc->otg_inst = -1;
+ acrtc->enabled = false;
+}
+
+static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct dc_cursor_position *position)
+{
+ struct amdgpu_crtc *amdgpu_crtc = amdgpu_crtc = to_amdgpu_crtc(crtc);
+ int x, y;
+ int xorigin = 0, yorigin = 0;
+
+ if (!crtc || !plane->state->fb) {
+ position->enable = false;
+ position->x = 0;
+ position->y = 0;
+ return 0;
+ }
+
+ if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
+ (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
+ DRM_ERROR("%s: bad cursor width or height %d x %d\n",
+ __func__,
+ plane->state->crtc_w,
+ plane->state->crtc_h);
+ return -EINVAL;
+ }
+
+ x = plane->state->crtc_x;
+ y = plane->state->crtc_y;
+ /* avivo cursor are offset into the total surface */
+ x += crtc->primary->state->src_x >> 16;
+ y += crtc->primary->state->src_y >> 16;
+ if (x < 0) {
+ xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
+ x = 0;
+ }
+ if (y < 0) {
+ yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
+ y = 0;
+ }
+ position->enable = true;
+ position->x = x;
+ position->y = y;
+ position->x_hotspot = xorigin;
+ position->y_hotspot = yorigin;
+
+ return 0;
+}
+
+static void handle_cursor_update(struct drm_plane *plane,
+ struct drm_plane_state *old_plane_state)
+{
+ struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
+ struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
+ struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ uint64_t address = afb ? afb->address : 0;
+ struct dc_cursor_position position;
+ struct dc_cursor_attributes attributes;
+ int ret;
+
+ if (!plane->state->fb && !old_plane_state->fb)
+ return;
+
+ DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
+ __func__,
+ amdgpu_crtc->crtc_id,
+ plane->state->crtc_w,
+ plane->state->crtc_h);
+
+ ret = get_cursor_position(plane, crtc, &position);
+ if (ret)
+ return;
+
+ if (!position.enable) {
+ /* turn off cursor */
+ if (crtc_state && crtc_state->stream)
+ dc_stream_set_cursor_position(crtc_state->stream,
+ &position);
+ return;
+ }
+
+ amdgpu_crtc->cursor_width = plane->state->crtc_w;
+ amdgpu_crtc->cursor_height = plane->state->crtc_h;
+
+ attributes.address.high_part = upper_32_bits(address);
+ attributes.address.low_part = lower_32_bits(address);
+ attributes.width = plane->state->crtc_w;
+ attributes.height = plane->state->crtc_h;
+ attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
+ attributes.rotation_angle = 0;
+ attributes.attribute_flags.value = 0;
+
+ attributes.pitch = attributes.width;
+
+ if (crtc_state->stream) {
+ if (!dc_stream_set_cursor_attributes(crtc_state->stream,
+ &attributes))
+ DRM_ERROR("DC failed to set cursor attributes\n");
+
+ if (!dc_stream_set_cursor_position(crtc_state->stream,
+ &position))
+ DRM_ERROR("DC failed to set cursor position\n");
+ }
+}
+
+static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
+{
+
+ assert_spin_locked(&acrtc->base.dev->event_lock);
+ WARN_ON(acrtc->event);
+
+ acrtc->event = acrtc->base.state->event;
+
+ /* Set the flip status */
+ acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
+
+ /* Mark this event as consumed */
+ acrtc->base.state->event = NULL;
+
+ DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
+ acrtc->crtc_id);
+}
+
+/*
+ * Executes flip
+ *
+ * Waits on all BO's fences and for proper vblank count
+ */
+static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ uint32_t target,
+ struct dc_state *state)
+{
+ unsigned long flags;
+ uint32_t target_vblank;
+ int r, vpos, hpos;
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+ struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
+ struct amdgpu_bo *abo = gem_to_amdgpu_bo(afb->obj);
+ struct amdgpu_device *adev = crtc->dev->dev_private;
+ bool async_flip = (crtc->state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
+ struct dc_flip_addrs addr = { {0} };
+ /* TODO eliminate or rename surface_update */
+ struct dc_surface_update surface_updates[1] = { {0} };
+ struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
+
+
+ /* Prepare wait for target vblank early - before the fence-waits */
+ target_vblank = target - drm_crtc_vblank_count(crtc) +
+ amdgpu_get_vblank_counter_kms(crtc->dev, acrtc->crtc_id);
+
+ /* TODO This might fail and hence better not used, wait
+ * explicitly on fences instead
+ * and in general should be called for
+ * blocking commit to as per framework helpers
+ */
+ r = amdgpu_bo_reserve(abo, true);
+ if (unlikely(r != 0)) {
+ DRM_ERROR("failed to reserve buffer before flip\n");
+ WARN_ON(1);
+ }
+
+ /* Wait for all fences on this FB */
+ WARN_ON(reservation_object_wait_timeout_rcu(abo->tbo.resv, true, false,
+ MAX_SCHEDULE_TIMEOUT) < 0);
+
+ amdgpu_bo_unreserve(abo);
+
+ /* Wait until we're out of the vertical blank period before the one
+ * targeted by the flip
+ */
+ while ((acrtc->enabled &&
+ (amdgpu_get_crtc_scanoutpos(adev->ddev, acrtc->crtc_id, 0,
+ &vpos, &hpos, NULL, NULL,
+ &crtc->hwmode)
+ & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
+ (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
+ (int)(target_vblank -
+ amdgpu_get_vblank_counter_kms(adev->ddev, acrtc->crtc_id)) > 0)) {
+ usleep_range(1000, 1100);
+ }
+
+ /* Flip */
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ /* update crtc fb */
+ crtc->primary->fb = fb;
+
+ WARN_ON(acrtc->pflip_status != AMDGPU_FLIP_NONE);
+ WARN_ON(!acrtc_state->stream);
+
+ addr.address.grph.addr.low_part = lower_32_bits(afb->address);
+ addr.address.grph.addr.high_part = upper_32_bits(afb->address);
+ addr.flip_immediate = async_flip;
+
+
+ if (acrtc->base.state->event)
+ prepare_flip_isr(acrtc);
+
+ surface_updates->surface = dc_stream_get_status(acrtc_state->stream)->plane_states[0];
+ surface_updates->flip_addr = &addr;
+
+
+ dc_commit_updates_for_stream(adev->dm.dc,
+ surface_updates,
+ 1,
+ acrtc_state->stream,
+ NULL,
+ &surface_updates->surface,
+ state);
+
+ DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n",
+ __func__,
+ addr.address.grph.addr.high_part,
+ addr.address.grph.addr.low_part);
+
+
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+}
+
+static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
+ struct drm_device *dev,
+ struct amdgpu_display_manager *dm,
+ struct drm_crtc *pcrtc,
+ bool *wait_for_vblank)
+{
+ uint32_t i;
+ struct drm_plane *plane;
+ struct drm_plane_state *old_plane_state, *new_plane_state;
+ struct dc_stream_state *dc_stream_attach;
+ struct dc_plane_state *plane_states_constructed[MAX_SURFACES];
+ struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
+ struct drm_crtc_state *new_pcrtc_state =
+ drm_atomic_get_new_crtc_state(state, pcrtc);
+ struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
+ struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
+ int planes_count = 0;
+ unsigned long flags;
+
+ /* update planes when needed */
+ for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
+ struct drm_crtc *crtc = new_plane_state->crtc;
+ struct drm_crtc_state *new_crtc_state;
+ struct drm_framebuffer *fb = new_plane_state->fb;
+ bool pflip_needed;
+ struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
+
+ if (plane->type == DRM_PLANE_TYPE_CURSOR) {
+ handle_cursor_update(plane, old_plane_state);
+ continue;
+ }
+
+ if (!fb || !crtc || pcrtc != crtc)
+ continue;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ if (!new_crtc_state->active)
+ continue;
+
+ pflip_needed = !state->allow_modeset;
+
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ if (acrtc_attach->pflip_status != AMDGPU_FLIP_NONE) {
+ DRM_ERROR("%s: acrtc %d, already busy\n",
+ __func__,
+ acrtc_attach->crtc_id);
+ /* In commit tail framework this cannot happen */
+ WARN_ON(1);
+ }
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+
+ if (!pflip_needed) {
+ WARN_ON(!dm_new_plane_state->dc_state);
+
+ plane_states_constructed[planes_count] = dm_new_plane_state->dc_state;
+
+ dc_stream_attach = acrtc_state->stream;
+ planes_count++;
+
+ } else if (new_crtc_state->planes_changed) {
+ /* Assume even ONE crtc with immediate flip means
+ * entire can't wait for VBLANK
+ * TODO Check if it's correct
+ */
+ *wait_for_vblank =
+ new_pcrtc_state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC ?
+ false : true;
+
+ /* TODO: Needs rework for multiplane flip */
+ if (plane->type == DRM_PLANE_TYPE_PRIMARY)
+ drm_crtc_vblank_get(crtc);
+
+ amdgpu_dm_do_flip(
+ crtc,
+ fb,
+ drm_crtc_vblank_count(crtc) + *wait_for_vblank,
+ dm_state->context);
+ }
+
+ }
+
+ if (planes_count) {
+ unsigned long flags;
+
+ if (new_pcrtc_state->event) {
+
+ drm_crtc_vblank_get(pcrtc);
+
+ spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
+ prepare_flip_isr(acrtc_attach);
+ spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
+ }
+
+ if (false == dc_commit_planes_to_stream(dm->dc,
+ plane_states_constructed,
+ planes_count,
+ dc_stream_attach,
+ dm_state->context))
+ dm_error("%s: Failed to attach plane!\n", __func__);
+ } else {
+ /*TODO BUG Here should go disable planes on CRTC. */
+ }
+}
+
+
+static int amdgpu_dm_atomic_commit(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ bool nonblock)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct amdgpu_device *adev = dev->dev_private;
+ int i;
+
+ /*
+ * We evade vblanks and pflips on crtc that
+ * should be changed. We do it here to flush & disable
+ * interrupts before drm_swap_state is called in drm_atomic_helper_commit
+ * it will update crtc->dm_crtc_state->stream pointer which is used in
+ * the ISRs.
+ */
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+
+ if (drm_atomic_crtc_needs_modeset(new_crtc_state) && dm_old_crtc_state->stream)
+ manage_dm_interrupts(adev, acrtc, false);
+ }
+ /* Add check here for SoC's that support hardware cursor plane, to
+ * unset legacy_cursor_update */
+
+ return drm_atomic_helper_commit(dev, state, nonblock);
+
+ /*TODO Handle EINTR, reenable IRQ*/
+}
+
+static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
+{
+ struct drm_device *dev = state->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct dm_atomic_state *dm_state;
+ uint32_t i, j;
+ uint32_t new_crtcs_count = 0;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct amdgpu_crtc *new_crtcs[MAX_STREAMS];
+ struct dc_stream_state *new_stream = NULL;
+ unsigned long flags;
+ bool wait_for_vblank = true;
+ struct drm_connector *connector;
+ struct drm_connector_state *old_con_state, *new_con_state;
+ struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+
+ drm_atomic_helper_update_legacy_modeset_state(dev, state);
+
+ dm_state = to_dm_atomic_state(state);
+
+ /* update changed items */
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+
+ DRM_DEBUG_DRIVER(
+ "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
+ "planes_changed:%d, mode_changed:%d,active_changed:%d,"
+ "connectors_changed:%d\n",
+ acrtc->crtc_id,
+ new_crtc_state->enable,
+ new_crtc_state->active,
+ new_crtc_state->planes_changed,
+ new_crtc_state->mode_changed,
+ new_crtc_state->active_changed,
+ new_crtc_state->connectors_changed);
+
+ /* handles headless hotplug case, updating new_state and
+ * aconnector as needed
+ */
+
+ if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
+
+ DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
+
+ if (!dm_new_crtc_state->stream) {
+ /*
+ * this could happen because of issues with
+ * userspace notifications delivery.
+ * In this case userspace tries to set mode on
+ * display which is disconnect in fact.
+ * dc_sink in NULL in this case on aconnector.
+ * We expect reset mode will come soon.
+ *
+ * This can also happen when unplug is done
+ * during resume sequence ended
+ *
+ * In this case, we want to pretend we still
+ * have a sink to keep the pipe running so that
+ * hw state is consistent with the sw state
+ */
+ DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
+ __func__, acrtc->base.base.id);
+ continue;
+ }
+
+
+ if (dm_old_crtc_state->stream)
+ remove_stream(adev, acrtc, dm_old_crtc_state->stream);
+
+
+ /*
+ * this loop saves set mode crtcs
+ * we needed to enable vblanks once all
+ * resources acquired in dc after dc_commit_streams
+ */
+
+ /*TODO move all this into dm_crtc_state, get rid of
+ * new_crtcs array and use old and new atomic states
+ * instead
+ */
+ new_crtcs[new_crtcs_count] = acrtc;
+ new_crtcs_count++;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ acrtc->enabled = true;
+ acrtc->hw_mode = new_crtc_state->mode;
+ crtc->hwmode = new_crtc_state->mode;
+ } else if (modereset_required(new_crtc_state)) {
+ DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
+
+ /* i.e. reset mode */
+ if (dm_old_crtc_state->stream)
+ remove_stream(adev, acrtc, dm_old_crtc_state->stream);
+ }
+ } /* for_each_crtc_in_state() */
+
+ /*
+ * Add streams after required streams from new and replaced streams
+ * are removed from freesync module
+ */
+ if (adev->dm.freesync_module) {
+ for (i = 0; i < new_crtcs_count; i++) {
+ struct amdgpu_dm_connector *aconnector = NULL;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(state,
+ &new_crtcs[i]->base);
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+ new_stream = dm_new_crtc_state->stream;
+ aconnector = amdgpu_dm_find_first_crtc_matching_connector(
+ state,
+ &new_crtcs[i]->base);
+ if (!aconnector) {
+ DRM_DEBUG_DRIVER("Atomic commit: Failed to find connector for acrtc id:%d "
+ "skipping freesync init\n",
+ new_crtcs[i]->crtc_id);
+ continue;
+ }
+
+ mod_freesync_add_stream(adev->dm.freesync_module,
+ new_stream, &aconnector->caps);
+ }
+ }
+
+ if (dm_state->context)
+ WARN_ON(!dc_commit_state(dm->dc, dm_state->context));
+
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+ if (dm_new_crtc_state->stream != NULL) {
+ const struct dc_stream_status *status =
+ dc_stream_get_status(dm_new_crtc_state->stream);
+
+ if (!status)
+ DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
+ else
+ acrtc->otg_inst = status->primary_otg_inst;
+ }
+ }
+
+ /* Handle scaling and underscan changes*/
+ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
+ struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
+ struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
+ struct dc_stream_status *status = NULL;
+
+ if (acrtc)
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
+
+ /* Skip any modesets/resets */
+ if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
+ continue;
+
+ /* Skip any thing not scale or underscan changes */
+ if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
+ continue;
+
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+ update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
+ dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream);
+
+ status = dc_stream_get_status(dm_new_crtc_state->stream);
+ WARN_ON(!status);
+ WARN_ON(!status->plane_count);
+
+ if (!dm_new_crtc_state->stream)
+ continue;
+
+ /*TODO How it works with MPO ?*/
+ if (!dc_commit_planes_to_stream(
+ dm->dc,
+ status->plane_states,
+ status->plane_count,
+ dm_new_crtc_state->stream,
+ dm_state->context))
+ dm_error("%s: Failed to update stream scaling!\n", __func__);
+ }
+
+ for (i = 0; i < new_crtcs_count; i++) {
+ /*
+ * loop to enable interrupts on newly arrived crtc
+ */
+ struct amdgpu_crtc *acrtc = new_crtcs[i];
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+ if (adev->dm.freesync_module)
+ mod_freesync_notify_mode_change(
+ adev->dm.freesync_module, &dm_new_crtc_state->stream, 1);
+
+ manage_dm_interrupts(adev, acrtc, true);
+ }
+
+ /* update planes when needed per crtc*/
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+ if (dm_new_crtc_state->stream)
+ amdgpu_dm_commit_planes(state, dev, dm, crtc, &wait_for_vblank);
+ }
+
+
+ /*
+ * send vblank event on all events not handled in flip and
+ * mark consumed event for drm_atomic_helper_commit_hw_done
+ */
+ spin_lock_irqsave(&adev->ddev->event_lock, flags);
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+
+ if (new_crtc_state->event)
+ drm_send_event_locked(dev, &new_crtc_state->event->base);
+
+ new_crtc_state->event = NULL;
+ }
+ spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+
+ /* Signal HW programming completion */
+ drm_atomic_helper_commit_hw_done(state);
+
+ if (wait_for_vblank)
+ drm_atomic_helper_wait_for_vblanks(dev, state);
+
+ drm_atomic_helper_cleanup_planes(dev, state);
+}
+
+
+static int dm_force_atomic_commit(struct drm_connector *connector)
+{
+ int ret = 0;
+ struct drm_device *ddev = connector->dev;
+ struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
+ struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
+ struct drm_plane *plane = disconnected_acrtc->base.primary;
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
+ struct drm_plane_state *plane_state;
+
+ if (!state)
+ return -ENOMEM;
+
+ state->acquire_ctx = ddev->mode_config.acquire_ctx;
+
+ /* Construct an atomic state to restore previous display setting */
+
+ /*
+ * Attach connectors to drm_atomic_state
+ */
+ conn_state = drm_atomic_get_connector_state(state, connector);
+
+ ret = PTR_ERR_OR_ZERO(conn_state);
+ if (ret)
+ goto err;
+
+ /* Attach crtc to drm_atomic_state*/
+ crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
+
+ ret = PTR_ERR_OR_ZERO(crtc_state);
+ if (ret)
+ goto err;
+
+ /* force a restore */
+ crtc_state->mode_changed = true;
+
+ /* Attach plane to drm_atomic_state */
+ plane_state = drm_atomic_get_plane_state(state, plane);
+
+ ret = PTR_ERR_OR_ZERO(plane_state);
+ if (ret)
+ goto err;
+
+
+ /* Call commit internally with the state we just constructed */
+ ret = drm_atomic_commit(state);
+ if (!ret)
+ return 0;
+
+err:
+ DRM_ERROR("Restoring old state failed with %i\n", ret);
+ drm_atomic_state_put(state);
+
+ return ret;
+}
+
+/*
+ * This functions handle all cases when set mode does not come upon hotplug.
+ * This include when the same display is unplugged then plugged back into the
+ * same port and when we are running without usermode desktop manager supprot
+ */
+void dm_restore_drm_connector_state(struct drm_device *dev,
+ struct drm_connector *connector)
+{
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ struct amdgpu_crtc *disconnected_acrtc;
+ struct dm_crtc_state *acrtc_state;
+
+ if (!aconnector->dc_sink || !connector->state || !connector->encoder)
+ return;
+
+ disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
+ acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
+
+ if (!disconnected_acrtc || !acrtc_state->stream)
+ return;
+
+ /*
+ * If the previous sink is not released and different from the current,
+ * we deduce we are in a state where we can not rely on usermode call
+ * to turn on the display, so we do it here
+ */
+ if (acrtc_state->stream->sink != aconnector->dc_sink)
+ dm_force_atomic_commit(&aconnector->base);
+}
+
+/*`
+ * Grabs all modesetting locks to serialize against any blocking commits,
+ * Waits for completion of all non blocking commits.
+ */
+static int do_aquire_global_lock(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_commit *commit;
+ long ret;
+
+ /* Adding all modeset locks to aquire_ctx will
+ * ensure that when the framework release it the
+ * extra locks we are locking here will get released to
+ */
+ ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ spin_lock(&crtc->commit_lock);
+ commit = list_first_entry_or_null(&crtc->commit_list,
+ struct drm_crtc_commit, commit_entry);
+ if (commit)
+ drm_crtc_commit_get(commit);
+ spin_unlock(&crtc->commit_lock);
+
+ if (!commit)
+ continue;
+
+ /* Make sure all pending HW programming completed and
+ * page flips done
+ */
+ ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
+
+ if (ret > 0)
+ ret = wait_for_completion_interruptible_timeout(
+ &commit->flip_done, 10*HZ);
+
+ if (ret == 0)
+ DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
+ "timed out\n", crtc->base.id, crtc->name);
+
+ drm_crtc_commit_put(commit);
+ }
+
+ return ret < 0 ? ret : 0;
+}
+
+static int dm_update_crtcs_state(struct dc *dc,
+ struct drm_atomic_state *state,
+ bool enable,
+ bool *lock_and_validation_needed)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ int i;
+ struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+ struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
+ struct dc_stream_state *new_stream;
+ int ret = 0;
+
+ /*TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set */
+ /* update changed items */
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ struct amdgpu_crtc *acrtc = NULL;
+ struct amdgpu_dm_connector *aconnector = NULL;
+ struct drm_connector_state *new_con_state = NULL;
+ struct dm_connector_state *dm_conn_state = NULL;
+
+ new_stream = NULL;
+
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ acrtc = to_amdgpu_crtc(crtc);
+
+ aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
+
+ /* TODO This hack should go away */
+ if (aconnector && enable) {
+ // Make sure fake sink is created in plug-in scenario
+ new_con_state = drm_atomic_get_connector_state(state,
+ &aconnector->base);
+
+ if (IS_ERR(new_con_state)) {
+ ret = PTR_ERR_OR_ZERO(new_con_state);
+ break;
+ }
+
+ dm_conn_state = to_dm_connector_state(new_con_state);
+
+ new_stream = create_stream_for_sink(aconnector,
+ &new_crtc_state->mode,
+ dm_conn_state);
+
+ /*
+ * we can have no stream on ACTION_SET if a display
+ * was disconnected during S3, in this case it not and
+ * error, the OS will be updated after detection, and
+ * do the right thing on next atomic commit
+ */
+
+ if (!new_stream) {
+ DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
+ __func__, acrtc->base.base.id);
+ break;
+ }
+ }
+
+ if (dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
+ dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
+
+ new_crtc_state->mode_changed = false;
+
+ DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
+ new_crtc_state->mode_changed);
+ }
+
+
+ if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
+ goto next_crtc;
+
+ DRM_DEBUG_DRIVER(
+ "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
+ "planes_changed:%d, mode_changed:%d,active_changed:%d,"
+ "connectors_changed:%d\n",
+ acrtc->crtc_id,
+ new_crtc_state->enable,
+ new_crtc_state->active,
+ new_crtc_state->planes_changed,
+ new_crtc_state->mode_changed,
+ new_crtc_state->active_changed,
+ new_crtc_state->connectors_changed);
+
+ /* Remove stream for any changed/disabled CRTC */
+ if (!enable) {
+
+ if (!dm_old_crtc_state->stream)
+ goto next_crtc;
+
+ DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
+ crtc->base.id);
+
+ /* i.e. reset mode */
+ if (dc_remove_stream_from_ctx(
+ dc,
+ dm_state->context,
+ dm_old_crtc_state->stream) != DC_OK) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ dc_stream_release(dm_old_crtc_state->stream);
+ dm_new_crtc_state->stream = NULL;
+
+ *lock_and_validation_needed = true;
+
+ } else {/* Add stream for any updated/enabled CRTC */
+ /*
+ * Quick fix to prevent NULL pointer on new_stream when
+ * added MST connectors not found in existing crtc_state in the chained mode
+ * TODO: need to dig out the root cause of that
+ */
+ if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
+ goto next_crtc;
+
+ if (modereset_required(new_crtc_state))
+ goto next_crtc;
+
+ if (modeset_required(new_crtc_state, new_stream,
+ dm_old_crtc_state->stream)) {
+
+ WARN_ON(dm_new_crtc_state->stream);
+
+ dm_new_crtc_state->stream = new_stream;
+ dc_stream_retain(new_stream);
+
+ DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
+ crtc->base.id);
+
+ if (dc_add_stream_to_ctx(
+ dc,
+ dm_state->context,
+ dm_new_crtc_state->stream) != DC_OK) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ *lock_and_validation_needed = true;
+ }
+ }
+
+next_crtc:
+ /* Release extra reference */
+ if (new_stream)
+ dc_stream_release(new_stream);
+ }
+
+ return ret;
+
+fail:
+ if (new_stream)
+ dc_stream_release(new_stream);
+ return ret;
+}
+
+static int dm_update_planes_state(struct dc *dc,
+ struct drm_atomic_state *state,
+ bool enable,
+ bool *lock_and_validation_needed)
+{
+ struct drm_crtc *new_plane_crtc, *old_plane_crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct drm_plane *plane;
+ struct drm_plane_state *old_plane_state, *new_plane_state;
+ struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
+ struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
+ struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
+ int i ;
+ /* TODO return page_flip_needed() function */
+ bool pflip_needed = !state->allow_modeset;
+ int ret = 0;
+
+ if (pflip_needed)
+ return ret;
+
+ /* Add new planes */
+ for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
+ new_plane_crtc = new_plane_state->crtc;
+ old_plane_crtc = old_plane_state->crtc;
+ dm_new_plane_state = to_dm_plane_state(new_plane_state);
+ dm_old_plane_state = to_dm_plane_state(old_plane_state);
+
+ /*TODO Implement atomic check for cursor plane */
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ continue;
+
+ /* Remove any changed/removed planes */
+ if (!enable) {
+
+ if (!old_plane_crtc)
+ continue;
+
+ old_crtc_state = drm_atomic_get_old_crtc_state(
+ state, old_plane_crtc);
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+
+ if (!dm_old_crtc_state->stream)
+ continue;
+
+ DRM_DEBUG_DRIVER("Disabling DRM plane: %d on DRM crtc %d\n",
+ plane->base.id, old_plane_crtc->base.id);
+
+ if (!dc_remove_plane_from_context(
+ dc,
+ dm_old_crtc_state->stream,
+ dm_old_plane_state->dc_state,
+ dm_state->context)) {
+
+ ret = EINVAL;
+ return ret;
+ }
+
+
+ dc_plane_state_release(dm_old_plane_state->dc_state);
+ dm_new_plane_state->dc_state = NULL;
+
+ *lock_and_validation_needed = true;
+
+ } else { /* Add new planes */
+
+ if (drm_atomic_plane_disabling(plane->state, new_plane_state))
+ continue;
+
+ if (!new_plane_crtc)
+ continue;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+ if (!dm_new_crtc_state->stream)
+ continue;
+
+
+ WARN_ON(dm_new_plane_state->dc_state);
+
+ dm_new_plane_state->dc_state = dc_create_plane_state(dc);
+
+ DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
+ plane->base.id, new_plane_crtc->base.id);
+
+ if (!dm_new_plane_state->dc_state) {
+ ret = -EINVAL;
+ return ret;
+ }
+
+ ret = fill_plane_attributes(
+ new_plane_crtc->dev->dev_private,
+ dm_new_plane_state->dc_state,
+ new_plane_state,
+ new_crtc_state,
+ false);
+ if (ret)
+ return ret;
+
+
+ if (!dc_add_plane_to_context(
+ dc,
+ dm_new_crtc_state->stream,
+ dm_new_plane_state->dc_state,
+ dm_state->context)) {
+
+ ret = -EINVAL;
+ return ret;
+ }
+
+ *lock_and_validation_needed = true;
+ }
+ }
+
+
+ return ret;
+}
+
+static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ int i;
+ int ret;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct dc *dc = adev->dm.dc;
+ struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
+ struct drm_connector *connector;
+ struct drm_connector_state *old_con_state, *new_con_state;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+
+ /*
+ * This bool will be set for true for any modeset/reset
+ * or plane update which implies non fast surface update.
+ */
+ bool lock_and_validation_needed = false;
+
+ ret = drm_atomic_helper_check_modeset(dev, state);
+ if (ret)
+ goto fail;
+
+ /*
+ * legacy_cursor_update should be made false for SoC's having
+ * a dedicated hardware plane for cursor in amdgpu_dm_atomic_commit(),
+ * otherwise for software cursor plane,
+ * we should not add it to list of affected planes.
+ */
+ if (state->legacy_cursor_update) {
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+ if (new_crtc_state->color_mgmt_changed) {
+ ret = drm_atomic_add_affected_planes(state, crtc);
+ if (ret)
+ goto fail;
+ }
+ }
+ } else {
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
+ continue;
+
+ if (!new_crtc_state->enable)
+ continue;
+
+ ret = drm_atomic_add_affected_connectors(state, crtc);
+ if (ret)
+ return ret;
+
+ ret = drm_atomic_add_affected_planes(state, crtc);
+ if (ret)
+ goto fail;
+ }
+ }
+
+ dm_state->context = dc_create_state();
+ ASSERT(dm_state->context);
+ dc_resource_state_copy_construct_current(dc, dm_state->context);
+
+ /* Remove exiting planes if they are modified */
+ ret = dm_update_planes_state(dc, state, false, &lock_and_validation_needed);
+ if (ret) {
+ goto fail;
+ }
+
+ /* Disable all crtcs which require disable */
+ ret = dm_update_crtcs_state(dc, state, false, &lock_and_validation_needed);
+ if (ret) {
+ goto fail;
+ }
+
+ /* Enable all crtcs which require enable */
+ ret = dm_update_crtcs_state(dc, state, true, &lock_and_validation_needed);
+ if (ret) {
+ goto fail;
+ }
+
+ /* Add new/modified planes */
+ ret = dm_update_planes_state(dc, state, true, &lock_and_validation_needed);
+ if (ret) {
+ goto fail;
+ }
+
+ /* Run this here since we want to validate the streams we created */
+ ret = drm_atomic_helper_check_planes(dev, state);
+ if (ret)
+ goto fail;
+
+ /* Check scaling and underscan changes*/
+ /*TODO Removed scaling changes validation due to inability to commit
+ * new stream into context w\o causing full reset. Need to
+ * decide how to handle.
+ */
+ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
+ struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
+ struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
+
+ /* Skip any modesets/resets */
+ if (!acrtc || drm_atomic_crtc_needs_modeset(
+ drm_atomic_get_new_crtc_state(state, &acrtc->base)))
+ continue;
+
+ /* Skip any thing not scale or underscan changes */
+ if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
+ continue;
+
+ lock_and_validation_needed = true;
+ }
+
+ /*
+ * For full updates case when
+ * removing/adding/updating streams on once CRTC while flipping
+ * on another CRTC,
+ * acquiring global lock will guarantee that any such full
+ * update commit
+ * will wait for completion of any outstanding flip using DRMs
+ * synchronization events.
+ */
+
+ if (lock_and_validation_needed) {
+
+ ret = do_aquire_global_lock(dev, state);
+ if (ret)
+ goto fail;
+
+ if (dc_validate_global_state(dc, dm_state->context) != DC_OK) {
+ ret = -EINVAL;
+ goto fail;
+ }
+ }
+
+ /* Must be success */
+ WARN_ON(ret);
+ return ret;
+
+fail:
+ if (ret == -EDEADLK)
+ DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
+ else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
+ DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
+ else
+ DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
+
+ return ret;
+}
+
+static bool is_dp_capable_without_timing_msa(struct dc *dc,
+ struct amdgpu_dm_connector *amdgpu_dm_connector)
+{
+ uint8_t dpcd_data;
+ bool capable = false;
+
+ if (amdgpu_dm_connector->dc_link &&
+ dm_helpers_dp_read_dpcd(
+ NULL,
+ amdgpu_dm_connector->dc_link,
+ DP_DOWN_STREAM_PORT_COUNT,
+ &dpcd_data,
+ sizeof(dpcd_data))) {
+ capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
+ }
+
+ return capable;
+}
+void amdgpu_dm_add_sink_to_freesync_module(struct drm_connector *connector,
+ struct edid *edid)
+{
+ int i;
+ uint64_t val_capable;
+ bool edid_check_required;
+ struct detailed_timing *timing;
+ struct detailed_non_pixel *data;
+ struct detailed_data_monitor_range *range;
+ struct amdgpu_dm_connector *amdgpu_dm_connector =
+ to_amdgpu_dm_connector(connector);
+
+ struct drm_device *dev = connector->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+
+ edid_check_required = false;
+ if (!amdgpu_dm_connector->dc_sink) {
+ DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
+ return;
+ }
+ if (!adev->dm.freesync_module)
+ return;
+ /*
+ * if edid non zero restrict freesync only for dp and edp
+ */
+ if (edid) {
+ if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
+ || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
+ edid_check_required = is_dp_capable_without_timing_msa(
+ adev->dm.dc,
+ amdgpu_dm_connector);
+ }
+ }
+ val_capable = 0;
+ if (edid_check_required == true && (edid->version > 1 ||
+ (edid->version == 1 && edid->revision > 1))) {
+ for (i = 0; i < 4; i++) {
+
+ timing = &edid->detailed_timings[i];
+ data = &timing->data.other_data;
+ range = &data->data.range;
+ /*
+ * Check if monitor has continuous frequency mode
+ */
+ if (data->type != EDID_DETAIL_MONITOR_RANGE)
+ continue;
+ /*
+ * Check for flag range limits only. If flag == 1 then
+ * no additional timing information provided.
+ * Default GTF, GTF Secondary curve and CVT are not
+ * supported
+ */
+ if (range->flags != 1)
+ continue;
+
+ amdgpu_dm_connector->min_vfreq = range->min_vfreq;
+ amdgpu_dm_connector->max_vfreq = range->max_vfreq;
+ amdgpu_dm_connector->pixel_clock_mhz =
+ range->pixel_clock_mhz * 10;
+ break;
+ }
+
+ if (amdgpu_dm_connector->max_vfreq -
+ amdgpu_dm_connector->min_vfreq > 10) {
+ amdgpu_dm_connector->caps.supported = true;
+ amdgpu_dm_connector->caps.min_refresh_in_micro_hz =
+ amdgpu_dm_connector->min_vfreq * 1000000;
+ amdgpu_dm_connector->caps.max_refresh_in_micro_hz =
+ amdgpu_dm_connector->max_vfreq * 1000000;
+ val_capable = 1;
+ }
+ }
+
+ /*
+ * TODO figure out how to notify user-mode or DRM of freesync caps
+ * once we figure out how to deal with freesync in an upstreamable
+ * fashion
+ */
+
+}
+
+void amdgpu_dm_remove_sink_from_freesync_module(struct drm_connector *connector)
+{
+ /*
+ * TODO fill in once we figure out how to deal with freesync in
+ * an upstreamable fashion
+ */
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
new file mode 100644
index 000000000000..117521c6a6ed
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -0,0 +1,259 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __AMDGPU_DM_H__
+#define __AMDGPU_DM_H__
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include "dc.h"
+
+/*
+ * This file contains the definition for amdgpu_display_manager
+ * and its API for amdgpu driver's use.
+ * This component provides all the display related functionality
+ * and this is the only component that calls DAL API.
+ * The API contained here intended for amdgpu driver use.
+ * The API that is called directly from KMS framework is located
+ * in amdgpu_dm_kms.h file
+ */
+
+#define AMDGPU_DM_MAX_DISPLAY_INDEX 31
+/*
+#include "include/amdgpu_dal_power_if.h"
+#include "amdgpu_dm_irq.h"
+*/
+
+#include "irq_types.h"
+#include "signal_types.h"
+
+/* Forward declarations */
+struct amdgpu_device;
+struct drm_device;
+struct amdgpu_dm_irq_handler_data;
+
+struct amdgpu_dm_prev_state {
+ struct drm_framebuffer *fb;
+ int32_t x;
+ int32_t y;
+ struct drm_display_mode mode;
+};
+
+struct common_irq_params {
+ struct amdgpu_device *adev;
+ enum dc_irq_source irq_src;
+};
+
+struct irq_list_head {
+ struct list_head head;
+ /* In case this interrupt needs post-processing, 'work' will be queued*/
+ struct work_struct work;
+};
+
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+struct dm_comressor_info {
+ void *cpu_addr;
+ struct amdgpu_bo *bo_ptr;
+ uint64_t gpu_addr;
+};
+#endif
+
+
+struct amdgpu_display_manager {
+ struct dal *dal;
+ struct dc *dc;
+ struct cgs_device *cgs_device;
+ /* lock to be used when DAL is called from SYNC IRQ context */
+ spinlock_t dal_lock;
+
+ struct amdgpu_device *adev; /*AMD base driver*/
+ struct drm_device *ddev; /*DRM base driver*/
+ u16 display_indexes_num;
+
+ struct amdgpu_dm_prev_state prev_state;
+
+ /*
+ * 'irq_source_handler_table' holds a list of handlers
+ * per (DAL) IRQ source.
+ *
+ * Each IRQ source may need to be handled at different contexts.
+ * By 'context' we mean, for example:
+ * - The ISR context, which is the direct interrupt handler.
+ * - The 'deferred' context - this is the post-processing of the
+ * interrupt, but at a lower priority.
+ *
+ * Note that handlers are called in the same order as they were
+ * registered (FIFO).
+ */
+ struct irq_list_head irq_handler_list_low_tab[DAL_IRQ_SOURCES_NUMBER];
+ struct list_head irq_handler_list_high_tab[DAL_IRQ_SOURCES_NUMBER];
+
+ struct common_irq_params
+ pflip_params[DC_IRQ_SOURCE_PFLIP_LAST - DC_IRQ_SOURCE_PFLIP_FIRST + 1];
+
+ struct common_irq_params
+ vblank_params[DC_IRQ_SOURCE_VBLANK6 - DC_IRQ_SOURCE_VBLANK1 + 1];
+
+ /* this spin lock synchronizes access to 'irq_handler_list_table' */
+ spinlock_t irq_handler_list_table_lock;
+
+ /* Timer-related data. */
+ struct list_head timer_handler_list;
+ struct workqueue_struct *timer_workqueue;
+
+ /* Use dal_mutex for any activity which is NOT syncronized by
+ * DRM mode setting locks.
+ * For example: amdgpu_dm_hpd_low_irq() calls into DAL *without*
+ * DRM mode setting locks being acquired. This is where dal_mutex
+ * is acquired before calling into DAL. */
+ struct mutex dal_mutex;
+
+ struct backlight_device *backlight_dev;
+
+ const struct dc_link *backlight_link;
+
+ struct work_struct mst_hotplug_work;
+
+ struct mod_freesync *freesync_module;
+
+ /**
+ * Caches device atomic state for suspend/resume
+ */
+ struct drm_atomic_state *cached_state;
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+ struct dm_comressor_info compressor;
+#endif
+};
+
+struct amdgpu_dm_connector {
+
+ struct drm_connector base;
+ uint32_t connector_id;
+
+ /* we need to mind the EDID between detect
+ and get modes due to analog/digital/tvencoder */
+ struct edid *edid;
+
+ /* shared with amdgpu */
+ struct amdgpu_hpd hpd;
+
+ /* number of modes generated from EDID at 'dc_sink' */
+ int num_modes;
+
+ /* The 'old' sink - before an HPD.
+ * The 'current' sink is in dc_link->sink. */
+ struct dc_sink *dc_sink;
+ struct dc_link *dc_link;
+ struct dc_sink *dc_em_sink;
+
+ /* DM only */
+ struct drm_dp_mst_topology_mgr mst_mgr;
+ struct amdgpu_dm_dp_aux dm_dp_aux;
+ struct drm_dp_mst_port *port;
+ struct amdgpu_dm_connector *mst_port;
+ struct amdgpu_encoder *mst_encoder;
+
+ /* TODO see if we can merge with ddc_bus or make a dm_connector */
+ struct amdgpu_i2c_adapter *i2c;
+
+ /* Monitor range limits */
+ int min_vfreq ;
+ int max_vfreq ;
+ int pixel_clock_mhz;
+
+ /*freesync caps*/
+ struct mod_freesync_caps caps;
+
+ struct mutex hpd_lock;
+
+ bool fake_enable;
+};
+
+#define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base)
+
+extern const struct amdgpu_ip_block_version dm_ip_block;
+
+struct amdgpu_framebuffer;
+struct amdgpu_display_manager;
+struct dc_validation_set;
+struct dc_plane_state;
+
+struct dm_plane_state {
+ struct drm_plane_state base;
+ struct dc_plane_state *dc_state;
+};
+
+struct dm_crtc_state {
+ struct drm_crtc_state base;
+ struct dc_stream_state *stream;
+};
+
+#define to_dm_crtc_state(x) container_of(x, struct dm_crtc_state, base)
+
+struct dm_atomic_state {
+ struct drm_atomic_state base;
+
+ struct dc_state *context;
+};
+
+#define to_dm_atomic_state(x) container_of(x, struct dm_atomic_state, base)
+
+
+void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector);
+struct drm_connector_state *
+amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector);
+int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
+ struct drm_connector_state *state,
+ struct drm_property *property,
+ uint64_t val);
+
+int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
+ const struct drm_connector_state *state,
+ struct drm_property *property,
+ uint64_t *val);
+
+int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev);
+
+void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
+ struct amdgpu_dm_connector *aconnector,
+ int connector_type,
+ struct dc_link *link,
+ int link_index);
+
+int amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode);
+
+void dm_restore_drm_connector_state(struct drm_device *dev,
+ struct drm_connector *connector);
+
+void amdgpu_dm_add_si