Kernel bump from 4.1.3-rt to 4.1.7-rt. 75/2375/1
authorJosé Pekkarinen <jose.pekkarinen@nokia.com>
Fri, 9 Oct 2015 05:42:44 +0000 (08:42 +0300)
committerJosé Pekkarinen <jose.pekkarinen@nokia.com>
Fri, 9 Oct 2015 05:52:35 +0000 (08:52 +0300)
These changes brings a vanilla kernel from kernel.org, and the patch
applied for rt is patch-4.1.7-rt8.patch. No further changes needed.

Change-Id: Id8dd03c2ddd971e4d1d69b905f3069737053b700
Signed-off-by: José Pekkarinen <jose.pekkarinen@nokia.com>
628 files changed:
kernel/Documentation/ABI/testing/ima_policy
kernel/Documentation/ABI/testing/sysfs-ata
kernel/Documentation/ABI/testing/sysfs-bus-iio
kernel/Documentation/devicetree/bindings/clock/keystone-pll.txt
kernel/Documentation/devicetree/bindings/pinctrl/marvell,armada-370-pinctrl.txt
kernel/Documentation/devicetree/bindings/pinctrl/marvell,armada-375-pinctrl.txt
kernel/Documentation/devicetree/bindings/pinctrl/marvell,armada-38x-pinctrl.txt
kernel/Documentation/devicetree/bindings/pinctrl/marvell,armada-xp-pinctrl.txt
kernel/Documentation/devicetree/bindings/usb/atmel-usb.txt
kernel/Documentation/hwmon/nct7904
kernel/Documentation/input/alps.txt
kernel/Documentation/kbuild/makefiles.txt
kernel/Documentation/kernel-parameters.txt
kernel/Makefile
kernel/arch/arc/Makefile
kernel/arch/arc/include/asm/bitops.h
kernel/arch/arc/include/asm/ptrace.h
kernel/arch/arm/boot/dts/am57xx-beagle-x15.dts
kernel/arch/arm/boot/dts/at91-sama5d4ek.dts
kernel/arch/arm/boot/dts/at91sam9g45.dtsi
kernel/arch/arm/boot/dts/at91sam9x5.dtsi
kernel/arch/arm/boot/dts/dra7-evm.dts
kernel/arch/arm/boot/dts/dra7.dtsi
kernel/arch/arm/boot/dts/dra72-evm.dts
kernel/arch/arm/boot/dts/imx23.dtsi
kernel/arch/arm/boot/dts/imx35.dtsi
kernel/arch/arm/boot/dts/imx6qdl.dtsi
kernel/arch/arm/boot/dts/k2e-clocks.dtsi
kernel/arch/arm/boot/dts/k2hk-clocks.dtsi
kernel/arch/arm/boot/dts/k2l-clocks.dtsi
kernel/arch/arm/boot/dts/omap2430.dtsi
kernel/arch/arm/boot/dts/omap4.dtsi
kernel/arch/arm/boot/dts/omap5.dtsi
kernel/arch/arm/boot/dts/sama5d3.dtsi
kernel/arch/arm/boot/dts/sama5d4.dtsi
kernel/arch/arm/kernel/smp.c
kernel/arch/arm/mach-bcm/Makefile
kernel/arch/arm/mach-bcm/brcmstb.h [deleted file]
kernel/arch/arm/mach-bcm/headsmp-brcmstb.S [deleted file]
kernel/arch/arm/mach-bcm/platsmp-brcmstb.c
kernel/arch/arm/mach-berlin/headsmp.S
kernel/arch/arm/mach-berlin/platsmp.c
kernel/arch/arm/mach-dove/include/mach/irqs.h
kernel/arch/arm/mach-dove/irq.c
kernel/arch/arm/mach-hisi/Makefile
kernel/arch/arm/mach-hisi/core.h
kernel/arch/arm/mach-hisi/headsmp.S [deleted file]
kernel/arch/arm/mach-hisi/platsmp.c
kernel/arch/arm/mach-imx/gpc.c
kernel/arch/arm/mach-imx/headsmp.S
kernel/arch/arm/mach-mvebu/headsmp-a9.S
kernel/arch/arm/mach-omap2/omap-wakeupgen.c
kernel/arch/arm/mach-omap2/omap_hwmod.c
kernel/arch/arm/mach-prima2/headsmp.S
kernel/arch/arm/mach-pxa/capc7117.c
kernel/arch/arm/mach-pxa/cm-x2xx.c
kernel/arch/arm/mach-pxa/cm-x300.c
kernel/arch/arm/mach-pxa/colibri-pxa270.c
kernel/arch/arm/mach-pxa/em-x270.c
kernel/arch/arm/mach-pxa/icontrol.c
kernel/arch/arm/mach-pxa/trizeps4.c
kernel/arch/arm/mach-pxa/vpac270.c
kernel/arch/arm/mach-pxa/zeus.c
kernel/arch/arm/mach-rockchip/core.h
kernel/arch/arm/mach-rockchip/headsmp.S
kernel/arch/arm/mach-rockchip/platsmp.c
kernel/arch/arm/mach-shmobile/common.h
kernel/arch/arm/mach-shmobile/headsmp-scu.S
kernel/arch/arm/mach-shmobile/headsmp.S
kernel/arch/arm/mach-shmobile/platsmp-apmu.c
kernel/arch/arm/mach-socfpga/core.h
kernel/arch/arm/mach-socfpga/headsmp.S
kernel/arch/arm/mach-socfpga/platsmp.c
kernel/arch/arm/mach-tegra/Makefile
kernel/arch/arm/mach-tegra/headsmp.S [deleted file]
kernel/arch/arm/mach-tegra/reset.c
kernel/arch/arm/mach-tegra/reset.h
kernel/arch/arm/mach-zynq/common.h
kernel/arch/arm/mach-zynq/headsmp.S
kernel/arch/arm/mach-zynq/platsmp.c
kernel/arch/arm/mm/dma-mapping.c
kernel/arch/arm/mm/proc-v7.S
kernel/arch/arm/vdso/Makefile
kernel/arch/arm/vdso/vdsomunge.c
kernel/arch/arm64/kernel/efi.c
kernel/arch/arm64/kernel/perf_event.c
kernel/arch/arm64/kernel/signal32.c
kernel/arch/arm64/kernel/smp.c
kernel/arch/arm64/kvm/inject_fault.c
kernel/arch/arm64/mm/hugetlbpage.c
kernel/arch/arm64/net/bpf_jit.h
kernel/arch/arm64/net/bpf_jit_comp.c
kernel/arch/avr32/mach-at32ap/clock.c
kernel/arch/m68k/Kconfig.cpu
kernel/arch/m68k/include/asm/coldfire.h
kernel/arch/mips/Kconfig
kernel/arch/mips/ath79/setup.c
kernel/arch/mips/include/asm/fpu.h
kernel/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h [deleted file]
kernel/arch/mips/include/asm/pgtable.h
kernel/arch/mips/include/asm/smp.h
kernel/arch/mips/include/asm/stackframe.h
kernel/arch/mips/kernel/mips-mt-fpaff.c
kernel/arch/mips/kernel/relocate_kernel.S
kernel/arch/mips/kernel/scall64-64.S
kernel/arch/mips/kernel/scall64-n32.S
kernel/arch/mips/kernel/signal32.c
kernel/arch/mips/kernel/smp.c
kernel/arch/mips/kernel/traps.c
kernel/arch/mips/kernel/unaligned.c
kernel/arch/mips/lantiq/irq.c
kernel/arch/mips/math-emu/cp1emu.c
kernel/arch/mips/mm/c-r4k.c
kernel/arch/mips/mti-malta/malta-time.c
kernel/arch/mips/mti-sead3/sead3-time.c
kernel/arch/mips/pistachio/time.c
kernel/arch/mips/ralink/irq.c
kernel/arch/openrisc/Kconfig
kernel/arch/parisc/include/asm/pgalloc.h
kernel/arch/parisc/include/asm/pgtable.h
kernel/arch/parisc/include/asm/tlbflush.h
kernel/arch/parisc/kernel/cache.c
kernel/arch/parisc/kernel/entry.S
kernel/arch/parisc/kernel/traps.c
kernel/arch/powerpc/kernel/idle_power7.S
kernel/arch/powerpc/kernel/signal_32.c
kernel/arch/s390/include/asm/ctl_reg.h
kernel/arch/s390/kernel/cache.c
kernel/arch/s390/kernel/nmi.c
kernel/arch/s390/kernel/process.c
kernel/arch/s390/kernel/sclp.S
kernel/arch/s390/net/bpf_jit_comp.c
kernel/arch/sparc/include/asm/visasm.h
kernel/arch/sparc/lib/NG4memcpy.S
kernel/arch/sparc/lib/VISsave.S
kernel/arch/sparc/lib/ksyms.c
kernel/arch/tile/kernel/compat_signal.c
kernel/arch/tile/kernel/setup.c
kernel/arch/x86/boot/compressed/eboot.c
kernel/arch/x86/include/asm/kasan.h
kernel/arch/x86/include/asm/mmu_context.h
kernel/arch/x86/include/asm/sigcontext.h
kernel/arch/x86/include/uapi/asm/sigcontext.h
kernel/arch/x86/kernel/apic/apic.c
kernel/arch/x86/kernel/cpu/perf_event_intel_cqm.c
kernel/arch/x86/kernel/dumpstack_32.c
kernel/arch/x86/kernel/dumpstack_64.c
kernel/arch/x86/kernel/entry_64.S
kernel/arch/x86/kernel/head64.c
kernel/arch/x86/kernel/head_64.S
kernel/arch/x86/kernel/nmi.c
kernel/arch/x86/kernel/process.c
kernel/arch/x86/kernel/signal.c
kernel/arch/x86/kvm/lapic.h
kernel/arch/x86/mm/kasan_init_64.c
kernel/arch/x86/mm/mmap.c
kernel/arch/x86/mm/mpx.c
kernel/arch/x86/mm/tlb.c
kernel/arch/x86/platform/efi/efi.c
kernel/arch/x86/xen/Kconfig
kernel/arch/x86/xen/Makefile
kernel/arch/x86/xen/enlighten.c
kernel/arch/x86/xen/xen-ops.h
kernel/block/bio-integrity.c
kernel/block/bio.c
kernel/block/blk-cgroup.c
kernel/block/blk-mq.c
kernel/block/blk-settings.c
kernel/crypto/asymmetric_keys/asymmetric_keys.h
kernel/crypto/asymmetric_keys/asymmetric_type.c
kernel/crypto/asymmetric_keys/x509_public_key.c
kernel/drivers/acpi/acpi_lpss.c
kernel/drivers/acpi/acpica/aclocal.h
kernel/drivers/acpi/acpica/tbfadt.c
kernel/drivers/acpi/acpica/tbutils.c
kernel/drivers/acpi/acpica/tbxfload.c
kernel/drivers/acpi/acpica/utxfinit.c
kernel/drivers/acpi/osl.c
kernel/drivers/acpi/resource.c
kernel/drivers/ata/libata-core.c
kernel/drivers/ata/libata-eh.c
kernel/drivers/ata/libata-pmp.c
kernel/drivers/ata/libata-scsi.c
kernel/drivers/ata/libata-transport.c
kernel/drivers/ata/libata.h
kernel/drivers/base/firmware_class.c
kernel/drivers/base/power/clock_ops.c
kernel/drivers/base/regmap/regcache-rbtree.c
kernel/drivers/block/loop.c
kernel/drivers/block/loop.h
kernel/drivers/block/rbd.c
kernel/drivers/block/xen-blkback/blkback.c
kernel/drivers/block/xen-blkfront.c
kernel/drivers/bluetooth/btbcm.c
kernel/drivers/bluetooth/btusb.c
kernel/drivers/bus/arm-ccn.c
kernel/drivers/char/agp/intel-gtt.c
kernel/drivers/char/hw_random/core.c
kernel/drivers/char/i8k.c
kernel/drivers/char/tpm/tpm-chip.c
kernel/drivers/char/tpm/tpm_crb.c
kernel/drivers/char/tpm/tpm_ibmvtpm.c
kernel/drivers/clk/clk.c
kernel/drivers/clk/keystone/pll.c
kernel/drivers/clk/pxa/clk-pxa3xx.c
kernel/drivers/clk/qcom/clk-rcg2.c
kernel/drivers/clk/st/clk-flexgen.c
kernel/drivers/clk/st/clkgen-fsyn.c
kernel/drivers/clk/st/clkgen-mux.c
kernel/drivers/clk/ti/clk-dra7-atl.c
kernel/drivers/clocksource/exynos_mct.c
kernel/drivers/cpufreq/intel_pstate.c
kernel/drivers/crypto/caam/caamhash.c
kernel/drivers/crypto/ixp4xx_crypto.c
kernel/drivers/crypto/nx/nx-aes-ccm.c
kernel/drivers/crypto/nx/nx-aes-ctr.c
kernel/drivers/crypto/nx/nx-aes-gcm.c
kernel/drivers/crypto/nx/nx-aes-xcbc.c
kernel/drivers/crypto/nx/nx-sha256.c
kernel/drivers/crypto/nx/nx-sha512.c
kernel/drivers/crypto/nx/nx.c
kernel/drivers/crypto/nx/nx.h
kernel/drivers/crypto/omap-des.c
kernel/drivers/crypto/qat/qat_common/qat_algs.c
kernel/drivers/dma/at_xdmac.c
kernel/drivers/dma/mv_xor.c
kernel/drivers/dma/mv_xor.h
kernel/drivers/dma/pl330.c
kernel/drivers/edac/octeon_edac-l2c.c
kernel/drivers/edac/octeon_edac-lmc.c
kernel/drivers/edac/octeon_edac-pc.c
kernel/drivers/edac/ppc4xx_edac.c
kernel/drivers/firmware/dmi_scan.c
kernel/drivers/firmware/efi/cper.c
kernel/drivers/firmware/efi/efi.c
kernel/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
kernel/drivers/gpu/drm/bridge/ptn3460.c
kernel/drivers/gpu/drm/drm_crtc.c
kernel/drivers/gpu/drm/drm_dp_mst_topology.c
kernel/drivers/gpu/drm/drm_ioc32.c
kernel/drivers/gpu/drm/i915/i915_drv.h
kernel/drivers/gpu/drm/i915/i915_gem.c
kernel/drivers/gpu/drm/i915/i915_gem_gtt.c
kernel/drivers/gpu/drm/i915/i915_gem_tiling.c
kernel/drivers/gpu/drm/i915/i915_ioc32.c
kernel/drivers/gpu/drm/i915/i915_irq.c
kernel/drivers/gpu/drm/i915/i915_reg.h
kernel/drivers/gpu/drm/i915/intel_display.c
kernel/drivers/gpu/drm/i915/intel_dp.c
kernel/drivers/gpu/drm/i915/intel_drv.h
kernel/drivers/gpu/drm/i915/intel_lrc.c
kernel/drivers/gpu/drm/i915/intel_panel.c
kernel/drivers/gpu/drm/i915/intel_ringbuffer.h
kernel/drivers/gpu/drm/i915/intel_uncore.c
kernel/drivers/gpu/drm/nouveau/nouveau_drm.c
kernel/drivers/gpu/drm/nouveau/nv04_fbcon.c
kernel/drivers/gpu/drm/nouveau/nv50_display.c
kernel/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
kernel/drivers/gpu/drm/qxl/qxl_cmd.c
kernel/drivers/gpu/drm/qxl/qxl_ioctl.c
kernel/drivers/gpu/drm/radeon/ci_dpm.c
kernel/drivers/gpu/drm/radeon/cik.c
kernel/drivers/gpu/drm/radeon/cik_sdma.c
kernel/drivers/gpu/drm/radeon/dce6_afmt.c
kernel/drivers/gpu/drm/radeon/evergreen.c
kernel/drivers/gpu/drm/radeon/r600.c
kernel/drivers/gpu/drm/radeon/radeon_audio.c
kernel/drivers/gpu/drm/radeon/radeon_audio.h
kernel/drivers/gpu/drm/radeon/radeon_combios.c
kernel/drivers/gpu/drm/radeon/radeon_connectors.c
kernel/drivers/gpu/drm/radeon/radeon_cursor.c
kernel/drivers/gpu/drm/radeon/radeon_device.c
kernel/drivers/gpu/drm/radeon/radeon_fb.c
kernel/drivers/gpu/drm/radeon/radeon_gart.c
kernel/drivers/gpu/drm/radeon/radeon_gem.c
kernel/drivers/gpu/drm/radeon/radeon_irq_kms.c
kernel/drivers/gpu/drm/radeon/radeon_mode.h
kernel/drivers/gpu/drm/radeon/radeon_object.c
kernel/drivers/gpu/drm/radeon/si.c
kernel/drivers/gpu/drm/radeon/si_dpm.c
kernel/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
kernel/drivers/gpu/drm/tegra/dpaux.c
kernel/drivers/gpu/drm/vgem/vgem_drv.c
kernel/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
kernel/drivers/hid/hid-cp2112.c
kernel/drivers/hid/hid-input.c
kernel/drivers/hid/hid-uclogic.c
kernel/drivers/hwmon/mcp3021.c
kernel/drivers/hwmon/nct7802.c
kernel/drivers/hwmon/nct7904.c
kernel/drivers/i2c/busses/i2c-at91.c
kernel/drivers/i2c/i2c-mux.c
kernel/drivers/i2c/muxes/i2c-mux-pca9541.c
kernel/drivers/i2c/muxes/i2c-mux-pca954x.c
kernel/drivers/iio/accel/bmc150-accel.c
kernel/drivers/iio/adc/Kconfig
kernel/drivers/iio/adc/at91_adc.c
kernel/drivers/iio/adc/rockchip_saradc.c
kernel/drivers/iio/adc/twl4030-madc.c
kernel/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
kernel/drivers/iio/dac/ad5624r_spi.c
kernel/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
kernel/drivers/iio/light/cm3323.c
kernel/drivers/iio/light/tcs3414.c
kernel/drivers/iio/proximity/sx9500.c
kernel/drivers/iio/temperature/tmp006.c
kernel/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
kernel/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
kernel/drivers/input/keyboard/gpio_keys_polled.c
kernel/drivers/input/mouse/alps.c
kernel/drivers/input/mouse/synaptics.c
kernel/drivers/input/touchscreen/usbtouchscreen.c
kernel/drivers/iommu/intel-iommu.c
kernel/drivers/irqchip/irq-crossbar.c
kernel/drivers/irqchip/irq-gic-v3-its.c
kernel/drivers/md/bitmap.c
kernel/drivers/md/dm-cache-policy-cleaner.c
kernel/drivers/md/dm-cache-policy-internal.h
kernel/drivers/md/dm-cache-policy-mq.c
kernel/drivers/md/dm-cache-policy.h
kernel/drivers/md/dm-cache-target.c
kernel/drivers/md/dm-stats.c
kernel/drivers/md/dm-thin-metadata.c
kernel/drivers/md/dm-thin.c
kernel/drivers/md/dm.c
kernel/drivers/md/md.c
kernel/drivers/md/persistent-data/dm-btree-remove.c
kernel/drivers/md/persistent-data/dm-btree.c
kernel/drivers/md/persistent-data/dm-space-map-metadata.c
kernel/drivers/md/raid1.c
kernel/drivers/media/dvb-frontends/af9013.c
kernel/drivers/media/dvb-frontends/cx24116.c
kernel/drivers/media/dvb-frontends/cx24117.c
kernel/drivers/media/dvb-frontends/s5h1420.c
kernel/drivers/media/pci/cx18/cx18-streams.c
kernel/drivers/media/pci/saa7164/saa7164-encoder.c
kernel/drivers/media/pci/saa7164/saa7164-vbi.c
kernel/drivers/media/usb/dvb-usb/dib0700_core.c
kernel/drivers/media/usb/dvb-usb/dib0700_devices.c
kernel/drivers/media/v4l2-core/videobuf2-core.c
kernel/drivers/mfd/arizona-core.c
kernel/drivers/misc/cxl/context.c
kernel/drivers/misc/cxl/main.c
kernel/drivers/misc/mei/main.c
kernel/drivers/mmc/card/block.c
kernel/drivers/mmc/host/omap_hsmmc.c
kernel/drivers/mmc/host/sdhci-esdhc.h
kernel/drivers/mmc/host/sdhci-pxav3.c
kernel/drivers/mmc/host/sdhci.c
kernel/drivers/net/can/c_can/c_can.c
kernel/drivers/net/can/dev.c
kernel/drivers/net/can/rcar_can.c
kernel/drivers/net/can/slcan.c
kernel/drivers/net/can/spi/mcp251x.c
kernel/drivers/net/can/usb/peak_usb/pcan_usb.c
kernel/drivers/net/can/usb/peak_usb/pcan_usb_core.c
kernel/drivers/net/can/usb/peak_usb/pcan_usb_core.h
kernel/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
kernel/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
kernel/drivers/net/can/vcan.c
kernel/drivers/net/ethernet/intel/e1000e/82571.c
kernel/drivers/net/ethernet/intel/e1000e/ich8lan.c
kernel/drivers/net/ethernet/intel/e1000e/netdev.c
kernel/drivers/net/wireless/ath/ath10k/pci.c
kernel/drivers/net/wireless/ath/ath9k/htc.h
kernel/drivers/net/wireless/ath/ath9k/main.c
kernel/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
kernel/drivers/net/wireless/iwlwifi/mvm/debugfs.c
kernel/drivers/net/wireless/iwlwifi/mvm/mac80211.c
kernel/drivers/net/wireless/iwlwifi/mvm/mvm.h
kernel/drivers/net/wireless/iwlwifi/mvm/time-event.c
kernel/drivers/net/wireless/iwlwifi/mvm/tx.c
kernel/drivers/net/wireless/iwlwifi/pcie/trans.c
kernel/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
kernel/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
kernel/drivers/net/wireless/rtlwifi/core.c
kernel/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
kernel/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c
kernel/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
kernel/drivers/net/wireless/rtlwifi/rtl8723be/hw.c
kernel/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
kernel/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
kernel/drivers/nfc/st21nfcb/i2c.c
kernel/drivers/nfc/st21nfcb/st21nfcb.c
kernel/drivers/of/address.c
kernel/drivers/of/base.c
kernel/drivers/pci/Kconfig
kernel/drivers/phy/phy-berlin-usb.c
kernel/drivers/phy/phy-twl4030-usb.c
kernel/drivers/pinctrl/freescale/pinctrl-imx1-core.c
kernel/drivers/pinctrl/mvebu/pinctrl-armada-370.c
kernel/drivers/pinctrl/mvebu/pinctrl-armada-375.c
kernel/drivers/pinctrl/mvebu/pinctrl-armada-38x.c
kernel/drivers/pinctrl/mvebu/pinctrl-armada-39x.c
kernel/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
kernel/drivers/pinctrl/pinctrl-zynq.c
kernel/drivers/platform/x86/dell-laptop.c
kernel/drivers/platform/x86/ideapad-laptop.c
kernel/drivers/pnp/system.c
kernel/drivers/regulator/s2mps11.c
kernel/drivers/rtc/rtc-snvs.c
kernel/drivers/scsi/fnic/fnic.h
kernel/drivers/scsi/fnic/fnic_scsi.c
kernel/drivers/scsi/ipr.c
kernel/drivers/scsi/ipr.h
kernel/drivers/scsi/libfc/fc_exch.c
kernel/drivers/scsi/libfc/fc_fcp.c
kernel/drivers/scsi/libiscsi.c
kernel/drivers/scsi/qla2xxx/qla_dbg.c
kernel/drivers/scsi/qla2xxx/qla_init.c
kernel/drivers/scsi/qla2xxx/qla_target.c
kernel/drivers/scsi/scsi_error.c
kernel/drivers/scsi/scsi_lib.c
kernel/drivers/scsi/scsi_pm.c
kernel/drivers/scsi/scsi_sysfs.c
kernel/drivers/scsi/sd.c
kernel/drivers/scsi/st.c
kernel/drivers/spi/spi-img-spfi.c
kernel/drivers/spi/spi-imx.c
kernel/drivers/staging/comedi/drivers/cb_pcimdas.c
kernel/drivers/staging/lustre/lustre/obdclass/debug.c
kernel/drivers/staging/rtl8712/rtl8712_recv.c
kernel/drivers/staging/vt6655/device_main.c
kernel/drivers/staging/vt6656/main_usb.c
kernel/drivers/target/iscsi/iscsi_target.c
kernel/drivers/target/iscsi/iscsi_target_login.c
kernel/drivers/target/iscsi/iscsi_target_login.h
kernel/drivers/target/iscsi/iscsi_target_nego.c
kernel/drivers/thermal/samsung/exynos_tmu.c
kernel/drivers/tty/n_tty.c
kernel/drivers/tty/serial/Kconfig
kernel/drivers/tty/serial/atmel_serial.c
kernel/drivers/tty/serial/imx.c
kernel/drivers/tty/serial/serial_core.c
kernel/drivers/tty/sysrq.c
kernel/drivers/usb/chipidea/core.c
kernel/drivers/usb/chipidea/host.c
kernel/drivers/usb/chipidea/host.h
kernel/drivers/usb/core/devio.c
kernel/drivers/usb/core/hcd.c
kernel/drivers/usb/core/hub.c
kernel/drivers/usb/core/usb.h
kernel/drivers/usb/dwc3/ep0.c
kernel/drivers/usb/dwc3/gadget.c
kernel/drivers/usb/gadget/composite.c
kernel/drivers/usb/gadget/function/f_fs.c
kernel/drivers/usb/gadget/function/f_mass_storage.c
kernel/drivers/usb/gadget/function/f_uac2.c
kernel/drivers/usb/gadget/udc/mv_udc_core.c
kernel/drivers/usb/gadget/udc/udc-core.c
kernel/drivers/usb/host/ohci-q.c
kernel/drivers/usb/host/xhci-hub.c
kernel/drivers/usb/host/xhci-mem.c
kernel/drivers/usb/host/xhci-ring.c
kernel/drivers/usb/host/xhci.c
kernel/drivers/usb/host/xhci.h
kernel/drivers/usb/musb/musb_virthub.c
kernel/drivers/usb/phy/phy-mxs-usb.c
kernel/drivers/usb/serial/cp210x.c
kernel/drivers/usb/serial/option.c
kernel/drivers/usb/serial/qcserial.c
kernel/drivers/usb/serial/sierra.c
kernel/drivers/usb/serial/usb-serial.c
kernel/drivers/usb/storage/unusual_devs.h
kernel/drivers/vhost/vhost.c
kernel/drivers/w1/slaves/w1_therm.c
kernel/drivers/watchdog/omap_wdt.c
kernel/drivers/xen/gntdev.c
kernel/drivers/xen/xenbus/xenbus_client.c
kernel/fs/9p/vfs_inode.c
kernel/fs/9p/vfs_inode_dotl.c
kernel/fs/btrfs/inode-map.c
kernel/fs/btrfs/ioctl.c
kernel/fs/btrfs/transaction.c
kernel/fs/btrfs/tree-log.c
kernel/fs/dcache.c
kernel/fs/ext4/extents.c
kernel/fs/ext4/indirect.c
kernel/fs/ext4/inode.c
kernel/fs/ext4/mballoc.c
kernel/fs/ext4/migrate.c
kernel/fs/ext4/super.c
kernel/fs/fuse/inode.c
kernel/fs/hpfs/super.c
kernel/fs/jbd2/checkpoint.c
kernel/fs/jbd2/journal.c
kernel/fs/namespace.c
kernel/fs/nfs/flexfilelayout/flexfilelayout.c
kernel/fs/nfs/flexfilelayout/flexfilelayoutdev.c
kernel/fs/nfs/inode.c
kernel/fs/nfs/nfs3xdr.c
kernel/fs/nfs/nfs4proc.c
kernel/fs/nfs/nfs4state.c
kernel/fs/nfs/pagelist.c
kernel/fs/nfs/pnfs.c
kernel/fs/nfs/write.c
kernel/fs/nfsd/nfs4state.c
kernel/fs/nfsd/nfs4xdr.c
kernel/fs/notify/mark.c
kernel/fs/ocfs2/aops.c
kernel/fs/ocfs2/dlmglue.c
kernel/fs/overlayfs/readdir.c
kernel/fs/pnode.h
kernel/fs/signalfd.c
kernel/fs/xfs/libxfs/xfs_attr_remote.c
kernel/fs/xfs/xfs_attr_inactive.c
kernel/fs/xfs/xfs_inode.c
kernel/fs/xfs/xfs_inode.h
kernel/fs/xfs/xfs_log_recover.c
kernel/fs/xfs/xfs_symlink.c
kernel/include/acpi/acpixf.h
kernel/include/acpi/actypes.h
kernel/include/drm/drm_atomic.h
kernel/include/drm/drm_crtc.h
kernel/include/drm/drm_dp_mst_helper.h
kernel/include/drm/drm_pciids.h
kernel/include/linux/acpi.h
kernel/include/linux/ata.h
kernel/include/linux/buffer_head.h
kernel/include/linux/can/skb.h
kernel/include/linux/compiler-intel.h
kernel/include/linux/cper.h
kernel/include/linux/ftrace.h
kernel/include/linux/gpio/consumer.h
kernel/include/linux/hid-sensor-hub.h
kernel/include/linux/highmem.h
kernel/include/linux/interrupt.h
kernel/include/linux/irq.h
kernel/include/linux/jbd2.h
kernel/include/linux/libata.h
kernel/include/linux/mtd/nand.h
kernel/include/linux/nfs_xdr.h
kernel/include/linux/of.h
kernel/include/scsi/scsi_eh.h
kernel/include/target/iscsi/iscsi_target_core.h
kernel/include/uapi/drm/i915_drm.h
kernel/include/uapi/linux/pci_regs.h
kernel/ipc/mqueue.c
kernel/ipc/sem.c
kernel/kernel/cpuset.c
kernel/kernel/events/core.c
kernel/kernel/events/ring_buffer.c
kernel/kernel/irq/chip.c
kernel/kernel/irq/manage.c
kernel/kernel/irq/resend.c
kernel/kernel/power/Kconfig
kernel/kernel/printk/printk.c
kernel/kernel/signal.c
kernel/kernel/trace/ftrace.c
kernel/kernel/trace/trace.h
kernel/kernel/trace/trace_branch.c
kernel/kernel/trace/trace_events_filter.c
kernel/kernel/trace/trace_irqsoff.c
kernel/lib/bitmap.c
kernel/lib/dma-debug.c
kernel/lib/dump_stack.c
kernel/localversion-rt
kernel/mm/hugetlb.c
kernel/mm/memory-failure.c
kernel/mm/memory.c
kernel/mm/vmscan.c
kernel/net/9p/client.c
kernel/net/bluetooth/hci_sock.c
kernel/net/bluetooth/smp.c
kernel/net/can/af_can.c
kernel/net/can/bcm.c
kernel/net/can/raw.c
kernel/net/ceph/osdmap.c
kernel/net/ieee802154/socket.c
kernel/net/mac80211/cfg.c
kernel/net/mac80211/debugfs_netdev.c
kernel/net/mac80211/ibss.c
kernel/net/mac80211/main.c
kernel/net/mac80211/mesh.c
kernel/net/mac80211/rc80211_minstrel.c
kernel/net/rds/ib_rdma.c
kernel/net/sunrpc/backchannel_rqst.c
kernel/net/wireless/util.c
kernel/samples/trace_events/trace-events-sample.h
kernel/scripts/kconfig/streamline_config.pl
kernel/security/integrity/evm/evm_main.c
kernel/security/integrity/ima/ima.h
kernel/security/integrity/ima/ima_fs.c
kernel/security/integrity/ima/ima_policy.c
kernel/security/integrity/ima/ima_template_lib.c
kernel/security/keys/keyring.c
kernel/security/selinux/hooks.c
kernel/security/selinux/ss/ebitmap.c
kernel/sound/core/pcm_native.c
kernel/sound/firewire/amdtp.c
kernel/sound/firewire/amdtp.h
kernel/sound/firewire/fireworks/fireworks.c
kernel/sound/firewire/fireworks/fireworks.h
kernel/sound/firewire/fireworks/fireworks_stream.c
kernel/sound/pci/hda/hda_codec.c
kernel/sound/pci/hda/hda_generic.c
kernel/sound/pci/hda/hda_intel.c
kernel/sound/pci/hda/patch_cirrus.c
kernel/sound/pci/hda/patch_conexant.c
kernel/sound/pci/hda/patch_hdmi.c
kernel/sound/pci/hda/patch_realtek.c
kernel/sound/pci/hda/patch_sigmatel.c
kernel/sound/soc/codecs/max98925.c
kernel/sound/soc/codecs/pcm1681.c
kernel/sound/soc/codecs/rt5645.c
kernel/sound/soc/codecs/ssm4567.c
kernel/sound/soc/codecs/tas2552.c
kernel/sound/soc/codecs/wm5102.c
kernel/sound/soc/codecs/wm5110.c
kernel/sound/soc/codecs/wm8737.c
kernel/sound/soc/codecs/wm8903.h
kernel/sound/soc/codecs/wm8955.c
kernel/sound/soc/codecs/wm8960.c
kernel/sound/soc/codecs/wm8997.c
kernel/sound/soc/fsl/imx-wm8962.c
kernel/sound/soc/intel/atom/sst/sst_drv_interface.c
kernel/sound/soc/omap/Kconfig
kernel/sound/soc/qcom/Kconfig
kernel/sound/soc/soc-dapm.c
kernel/sound/usb/card.c
kernel/sound/usb/line6/pcm.c
kernel/sound/usb/mixer_maps.c
kernel/sound/usb/quirks-table.h
kernel/sound/usb/quirks.c
kernel/tools/perf/ui/browsers/hists.c
kernel/tools/perf/util/cloexec.c
kernel/tools/perf/util/symbol.c
kernel/tools/perf/util/symbol.h

index d0d0c57..0a378a8 100644 (file)
@@ -20,17 +20,19 @@ Description:
                action: measure | dont_measure | appraise | dont_appraise | audit
                condition:= base | lsm  [option]
                        base:   [[func=] [mask=] [fsmagic=] [fsuuid=] [uid=]
-                                [fowner]]
+                               [euid=] [fowner=]]
                        lsm:    [[subj_user=] [subj_role=] [subj_type=]
                                 [obj_user=] [obj_role=] [obj_type=]]
                        option: [[appraise_type=]] [permit_directio]
 
                base:   func:= [BPRM_CHECK][MMAP_CHECK][FILE_CHECK][MODULE_CHECK]
                                [FIRMWARE_CHECK]
-                       mask:= [MAY_READ] [MAY_WRITE] [MAY_APPEND] [MAY_EXEC]
+                       mask:= [[^]MAY_READ] [[^]MAY_WRITE] [[^]MAY_APPEND]
+                              [[^]MAY_EXEC]
                        fsmagic:= hex value
                        fsuuid:= file system UUID (e.g 8bcbe394-4f13-4144-be8e-5aa9ea2ce2f6)
                        uid:= decimal value
+                       euid:= decimal value
                        fowner:=decimal value
                lsm:    are LSM specific
                option: appraise_type:= [imasig]
@@ -49,11 +51,25 @@ Description:
                        dont_measure fsmagic=0x01021994
                        dont_appraise fsmagic=0x01021994
                        # RAMFS_MAGIC
-                       dont_measure fsmagic=0x858458f6
                        dont_appraise fsmagic=0x858458f6
+                       # DEVPTS_SUPER_MAGIC
+                       dont_measure fsmagic=0x1cd1
+                       dont_appraise fsmagic=0x1cd1
+                       # BINFMTFS_MAGIC
+                       dont_measure fsmagic=0x42494e4d
+                       dont_appraise fsmagic=0x42494e4d
                        # SECURITYFS_MAGIC
                        dont_measure fsmagic=0x73636673
                        dont_appraise fsmagic=0x73636673
+                       # SELINUX_MAGIC
+                       dont_measure fsmagic=0xf97cff8c
+                       dont_appraise fsmagic=0xf97cff8c
+                       # CGROUP_SUPER_MAGIC
+                       dont_measure fsmagic=0x27e0eb
+                       dont_appraise fsmagic=0x27e0eb
+                       # NSFS_MAGIC
+                       dont_measure fsmagic=0x6e736673
+                       dont_appraise fsmagic=0x6e736673
 
                        measure func=BPRM_CHECK
                        measure func=FILE_MMAP mask=MAY_EXEC
@@ -70,10 +86,6 @@ Description:
                Examples of LSM specific definitions:
 
                SELinux:
-                       # SELINUX_MAGIC
-                       dont_measure fsmagic=0xf97cff8c
-                       dont_appraise fsmagic=0xf97cff8c
-
                        dont_measure obj_type=var_log_t
                        dont_appraise obj_type=var_log_t
                        dont_measure obj_type=auditd_log_t
index 0a93215..9231dae 100644 (file)
@@ -90,6 +90,17 @@ gscr
        130:    SATA_PMP_GSCR_SII_GPIO
        Only valid if the device is a PM.
 
+trim
+
+       Shows the DSM TRIM mode currently used by the device. Valid
+       values are:
+       unsupported:            Drive does not support DSM TRIM
+       unqueued:               Drive supports unqueued DSM TRIM only
+       queued:                 Drive supports queued DSM TRIM
+       forced_unqueued:        Drive's unqueued DSM support is known to be
+                               buggy and only unqueued TRIM commands
+                               are sent
+
 spdn_cnt
 
        Number of time libata decided to lower the speed of link due to errors.
index 3befcb1..1fbdd79 100644 (file)
@@ -1165,10 +1165,8 @@ Description:
                object is near the sensor, usually be observing
                reflectivity of infrared or ultrasound emitted.
                Often these sensors are unit less and as such conversion
-               to SI units is not possible.  Where it is, the units should
-               be meters.  If such a conversion is not possible, the reported
-               values should behave in the same way as a distance, i.e. lower
-               values indicate something is closer to the sensor.
+               to SI units is not possible. Higher proximity measurements
+               indicate closer objects, and vice versa.
 
 What:          /sys/.../iio:deviceX/in_illuminance_input
 What:          /sys/.../iio:deviceX/in_illuminance_raw
index 225990f..47570d2 100644 (file)
@@ -15,8 +15,8 @@ Required properties:
 - compatible : shall be "ti,keystone,main-pll-clock" or "ti,keystone,pll-clock"
 - clocks : parent clock phandle
 - reg - pll control0 and pll multipler registers
-- reg-names : control and multiplier. The multiplier is applicable only for
-               main pll clock
+- reg-names : control, multiplier and post-divider. The multiplier and
+               post-divider registers are applicable only for main pll clock
 - fixed-postdiv : fixed post divider value. If absent, use clkod register bits
                for postdiv
 
@@ -25,8 +25,8 @@ Example:
                #clock-cells = <0>;
                compatible = "ti,keystone,main-pll-clock";
                clocks = <&refclksys>;
-               reg = <0x02620350 4>, <0x02310110 4>;
-               reg-names = "control", "multiplier";
+               reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>;
+               reg-names = "control", "multiplier", "post-divider";
                fixed-postdiv = <2>;
        };
 
index adda2a8..e357b02 100644 (file)
@@ -92,5 +92,5 @@ mpp61         61       gpo, dev(wen1), uart1(txd), audio(rclk)
 mpp62         62       gpio, dev(a2), uart1(cts), tdm(drx), pcie(clkreq0),
                        audio(mclk), uart0(cts)
 mpp63         63       gpo, spi0(sck), tclk
-mpp64         64       gpio, spi0(miso), spi0-1(cs1)
-mpp65         65       gpio, spi0(mosi), spi0-1(cs2)
+mpp64         64       gpio, spi0(miso), spi0(cs1)
+mpp65         65       gpio, spi0(mosi), spi0(cs2)
index 7de0cda..bedbe42 100644 (file)
@@ -22,8 +22,8 @@ mpp5          5        gpio, dev(ad7), spi0(cs2), spi1(cs2)
 mpp6          6        gpio, dev(ad0), led(p1), audio(rclk)
 mpp7          7        gpio, dev(ad1), ptp(clk), led(p2), audio(extclk)
 mpp8          8        gpio, dev (bootcs), spi0(cs0), spi1(cs0)
-mpp9          9        gpio, nf(wen), spi0(sck), spi1(sck)
-mpp10        10        gpio, nf(ren), dram(vttctrl), led(c1)
+mpp9          9        gpio, spi0(sck), spi1(sck), nand(we)
+mpp10        10        gpio, dram(vttctrl), led(c1), nand(re)
 mpp11        11        gpio, dev(a0), led(c2), audio(sdo)
 mpp12        12        gpio, dev(a1), audio(bclk)
 mpp13        13        gpio, dev(readyn), pcie0(rstoutn), pcie1(rstoutn)
index b17c968..4ac138a 100644 (file)
@@ -27,15 +27,15 @@ mpp8          8        gpio, ge0(txd1), dev(ad10)
 mpp9          9        gpio, ge0(txd2), dev(ad11)
 mpp10         10       gpio, ge0(txd3), dev(ad12)
 mpp11         11       gpio, ge0(txctl), dev(ad13)
-mpp12         12       gpio, ge0(rxd0), pcie0(rstout), pcie1(rstout) [1], spi0(cs1), dev(ad14)
-mpp13         13       gpio, ge0(rxd1), pcie0(clkreq), pcie1(clkreq) [1], spi0(cs2), dev(ad15)
-mpp14         14       gpio, ge0(rxd2), ptp(clk), m(vtt_ctrl), spi0(cs3), dev(wen1)
-mpp15         15       gpio, ge0(rxd3), ge(mdc slave), pcie0(rstout), spi0(mosi), pcie1(rstout) [1]
-mpp16         16       gpio, ge0(rxctl), ge(mdio slave), m(decc_err), spi0(miso), pcie0(clkreq)
+mpp12         12       gpio, ge0(rxd0), pcie0(rstout), spi0(cs1), dev(ad14), pcie3(clkreq)
+mpp13         13       gpio, ge0(rxd1), pcie0(clkreq), pcie1(clkreq) [1], spi0(cs2), dev(ad15), pcie2(clkreq)
+mpp14         14       gpio, ge0(rxd2), ptp(clk), m(vtt_ctrl), spi0(cs3), dev(wen1), pcie3(clkreq)
+mpp15         15       gpio, ge0(rxd3), ge(mdc slave), pcie0(rstout), spi0(mosi)
+mpp16         16       gpio, ge0(rxctl), ge(mdio slave), m(decc_err), spi0(miso), pcie0(clkreq), pcie1(clkreq) [1]
 mpp17         17       gpio, ge0(rxclk), ptp(clk), ua1(rxd), spi0(sck), sata1(prsnt)
-mpp18         18       gpio, ge0(rxerr), ptp(trig_gen), ua1(txd), spi0(cs0), pcie1(rstout) [1]
-mpp19         19       gpio, ge0(col), ptp(event_req), pcie0(clkreq), sata1(prsnt), ua0(cts)
-mpp20         20       gpio, ge0(txclk), ptp(clk), pcie1(rstout) [1], sata0(prsnt), ua0(rts)
+mpp18         18       gpio, ge0(rxerr), ptp(trig_gen), ua1(txd), spi0(cs0)
+mpp19         19       gpio, ge0(col), ptp(event_req), ge0(txerr), sata1(prsnt), ua0(cts)
+mpp20         20       gpio, ge0(txclk), ptp(clk), sata0(prsnt), ua0(rts)
 mpp21         21       gpio, spi0(cs1), ge1(rxd0), sata0(prsnt), sd0(cmd), dev(bootcs)
 mpp22         22       gpio, spi0(mosi), dev(ad0)
 mpp23         23       gpio, spi0(sck), dev(ad2)
@@ -58,23 +58,23 @@ mpp39         39       gpio, i2c1(sck), ge1(rxd2), ua0(cts), sd0(d1), dev(a2)
 mpp40         40       gpio, i2c1(sda), ge1(rxd3), ua0(rts), sd0(d2), dev(ad6)
 mpp41         41       gpio, ua1(rxd), ge1(rxctl), ua0(cts), spi1(cs3), dev(burst/last)
 mpp42         42       gpio, ua1(txd), ua0(rts), dev(ad7)
-mpp43         43       gpio, pcie0(clkreq), m(vtt_ctrl), m(decc_err), pcie0(rstout), dev(clkout)
-mpp44         44       gpio, sata0(prsnt), sata1(prsnt), sata2(prsnt) [2], sata3(prsnt) [3], pcie0(rstout)
-mpp45         45       gpio, ref(clk_out0), pcie0(rstout), pcie1(rstout) [1], pcie2(rstout), pcie3(rstout)
-mpp46         46       gpio, ref(clk_out1), pcie0(rstout), pcie1(rstout) [1], pcie2(rstout), pcie3(rstout)
-mpp47         47       gpio, sata0(prsnt), sata1(prsnt), sata2(prsnt) [2], spi1(cs2), sata3(prsnt) [2]
-mpp48         48       gpio, sata0(prsnt), m(vtt_ctrl), tdm2c(pclk), audio(mclk), sd0(d4)
-mpp49         49       gpio, sata2(prsnt) [2], sata3(prsnt) [2], tdm2c(fsync), audio(lrclk), sd0(d5)
-mpp50         50       gpio, pcie0(rstout), pcie1(rstout) [1], tdm2c(drx), audio(extclk), sd0(cmd)
+mpp43         43       gpio, pcie0(clkreq), m(vtt_ctrl), m(decc_err), spi1(cs2), dev(clkout)
+mpp44         44       gpio, sata0(prsnt), sata1(prsnt), sata2(prsnt) [2], sata3(prsnt) [3]
+mpp45         45       gpio, ref(clk_out0), pcie0(rstout)
+mpp46         46       gpio, ref(clk_out1), pcie0(rstout)
+mpp47         47       gpio, sata0(prsnt), sata1(prsnt), sata2(prsnt) [2], sata3(prsnt) [2]
+mpp48         48       gpio, sata0(prsnt), m(vtt_ctrl), tdm2c(pclk), audio(mclk), sd0(d4), pcie0(clkreq)
+mpp49         49       gpio, sata2(prsnt) [2], sata3(prsnt) [2], tdm2c(fsync), audio(lrclk), sd0(d5), pcie1(clkreq)
+mpp50         50       gpio, pcie0(rstout), tdm2c(drx), audio(extclk), sd0(cmd)
 mpp51         51       gpio, tdm2c(dtx), audio(sdo), m(decc_err)
-mpp52         52       gpio, pcie0(rstout), pcie1(rstout) [1], tdm2c(intn), audio(sdi), sd0(d6)
+mpp52         52       gpio, pcie0(rstout), tdm2c(intn), audio(sdi), sd0(d6)
 mpp53         53       gpio, sata1(prsnt), sata0(prsnt), tdm2c(rstn), audio(bclk), sd0(d7)
-mpp54         54       gpio, sata0(prsnt), sata1(prsnt), pcie0(rstout), pcie1(rstout) [1], sd0(d3)
+mpp54         54       gpio, sata0(prsnt), sata1(prsnt), pcie0(rstout), ge0(txerr), sd0(d3)
 mpp55         55       gpio, ua1(cts), ge(mdio), pcie1(clkreq) [1], spi1(cs1), sd0(d0)
 mpp56         56       gpio, ua1(rts), ge(mdc), m(decc_err), spi1(mosi)
 mpp57         57       gpio, spi1(sck), sd0(clk)
 mpp58         58       gpio, pcie1(clkreq) [1], i2c1(sck), pcie2(clkreq), spi1(miso), sd0(d1)
-mpp59         59       gpio, pcie0(rstout), i2c1(sda), pcie1(rstout) [1], spi1(cs0), sd0(d2)
+mpp59         59       gpio, pcie0(rstout), i2c1(sda), spi1(cs0), sd0(d2)
 
 [1]: only available on 88F6820 and 88F6828
 [2]: only available on 88F6828
index 373dbcc..96e7744 100644 (file)
@@ -42,15 +42,15 @@ mpp20         20       gpio, ge0(rxd4), ge1(rxd2), lcd(d20), ptp(clk)
 mpp21         21       gpio, ge0(rxd5), ge1(rxd3), lcd(d21), mem(bat)
 mpp22         22       gpio, ge0(rxd6), ge1(rxctl), lcd(d22), sata0(prsnt)
 mpp23         23       gpio, ge0(rxd7), ge1(rxclk), lcd(d23), sata1(prsnt)
-mpp24         24       gpio, lcd(hsync), sata1(prsnt), nf(bootcs-re), tdm(rst)
-mpp25         25       gpio, lcd(vsync), sata0(prsnt), nf(bootcs-we), tdm(pclk)
-mpp26         26       gpio, lcd(clk), tdm(fsync), vdd(cpu1-pd)
+mpp24         24       gpio, lcd(hsync), sata1(prsnt), tdm(rst)
+mpp25         25       gpio, lcd(vsync), sata0(prsnt), tdm(pclk)
+mpp26         26       gpio, lcd(clk), tdm(fsync)
 mpp27         27       gpio, lcd(e), tdm(dtx), ptp(trig)
 mpp28         28       gpio, lcd(pwm), tdm(drx), ptp(evreq)
-mpp29         29       gpio, lcd(ref-clk), tdm(int0), ptp(clk), vdd(cpu0-pd)
+mpp29         29       gpio, lcd(ref-clk), tdm(int0), ptp(clk)
 mpp30         30       gpio, tdm(int1), sd0(clk)
-mpp31         31       gpio, tdm(int2), sd0(cmd), vdd(cpu0-pd)
-mpp32         32       gpio, tdm(int3), sd0(d0), vdd(cpu1-pd)
+mpp31         31       gpio, tdm(int2), sd0(cmd)
+mpp32         32       gpio, tdm(int3), sd0(d0)
 mpp33         33       gpio, tdm(int4), sd0(d1), mem(bat)
 mpp34         34       gpio, tdm(int5), sd0(d2), sata0(prsnt)
 mpp35         35       gpio, tdm(int6), sd0(d3), sata1(prsnt)
@@ -58,21 +58,18 @@ mpp36         36       gpio, spi(mosi)
 mpp37         37       gpio, spi(miso)
 mpp38         38       gpio, spi(sck)
 mpp39         39       gpio, spi(cs0)
-mpp40         40       gpio, spi(cs1), uart2(cts), lcd(vga-hsync), vdd(cpu1-pd),
-                       pcie(clkreq0)
+mpp40         40       gpio, spi(cs1), uart2(cts), lcd(vga-hsync), pcie(clkreq0)
 mpp41         41       gpio, spi(cs2), uart2(rts), lcd(vga-vsync), sata1(prsnt),
                        pcie(clkreq1)
-mpp42         42       gpio, uart2(rxd), uart0(cts), tdm(int7), tdm-1(timer),
-                       vdd(cpu0-pd)
-mpp43         43       gpio, uart2(txd), uart0(rts), spi(cs3), pcie(rstout),
-                       vdd(cpu2-3-pd){1}
+mpp42         42       gpio, uart2(rxd), uart0(cts), tdm(int7), tdm-1(timer)
+mpp43         43       gpio, uart2(txd), uart0(rts), spi(cs3), pcie(rstout)
 mpp44         44       gpio, uart2(cts), uart3(rxd), spi(cs4), pcie(clkreq2),
                        mem(bat)
 mpp45         45       gpio, uart2(rts), uart3(txd), spi(cs5), sata1(prsnt)
 mpp46         46       gpio, uart3(rts), uart1(rts), spi(cs6), sata0(prsnt)
 mpp47         47       gpio, uart3(cts), uart1(cts), spi(cs7), pcie(clkreq3),
                        ref(clkout)
-mpp48         48       gpio, tclk, dev(burst/last)
+mpp48         48       gpio, dev(clkout), dev(burst/last)
 
 * Marvell Armada XP (mv78260 and mv78460 only)
 
@@ -84,9 +81,9 @@ mpp51         51       gpio, dev(ad16)
 mpp52         52       gpio, dev(ad17)
 mpp53         53       gpio, dev(ad18)
 mpp54         54       gpio, dev(ad19)
-mpp55         55       gpio, dev(ad20), vdd(cpu0-pd)
-mpp56         56       gpio, dev(ad21), vdd(cpu1-pd)
-mpp57         57       gpio, dev(ad22), vdd(cpu2-3-pd){1}
+mpp55         55       gpio, dev(ad20)
+mpp56         56       gpio, dev(ad21)
+mpp57         57       gpio, dev(ad22)
 mpp58         58       gpio, dev(ad23)
 mpp59         59       gpio, dev(ad24)
 mpp60         60       gpio, dev(ad25)
@@ -96,6 +93,3 @@ mpp63         63       gpio, dev(ad28)
 mpp64         64       gpio, dev(ad29)
 mpp65         65       gpio, dev(ad30)
 mpp66         66       gpio, dev(ad31)
-
-Notes:
-* {1} vdd(cpu2-3-pd) only available on mv78460.
index e180d56..de773a0 100644 (file)
@@ -60,9 +60,9 @@ Atmel High-Speed USB device controller
 
 Required properties:
  - compatible: Should be one of the following
-              "at91sam9rl-udc"
-              "at91sam9g45-udc"
-              "sama5d3-udc"
+              "atmel,at91sam9rl-udc"
+              "atmel,at91sam9g45-udc"
+              "atmel,sama5d3-udc"
  - reg: Address and length of the register set for the device
  - interrupts: Should contain usba interrupt
  - ep childnode: To specify the number of endpoints and their properties.
index 014f112..57fffe3 100644 (file)
@@ -35,11 +35,11 @@ temp1_input         Local temperature (1/1000 degree,
 temp[2-9]_input                CPU temperatures (1/1000 degree,
                        0.125 degree resolution)
 
-fan[1-4]_mode          R/W, 0/1 for manual or SmartFan mode
+pwm[1-4]_enable                R/W, 1/2 for manual or SmartFan mode
                        Setting SmartFan mode is supported only if it has been
                        previously configured by BIOS (or configuration EEPROM)
 
-fan[1-4]_pwm           R/O in SmartFan mode, R/W in manual control mode
+pwm[1-4]               R/O in SmartFan mode, R/W in manual control mode
 
 The driver checks sensor control registers and does not export the sensors
 that are not enabled. Anyway, a sensor that is enabled may actually be not
index c86f2f1..1fec113 100644 (file)
@@ -119,8 +119,10 @@ ALPS Absolute Mode - Protocol Version 2
  byte 5:  0   z6   z5   z4   z3   z2   z1   z0
 
 Protocol Version 2 DualPoint devices send standard PS/2 mouse packets for
-the DualPoint Stick. For non interleaved dualpoint devices the pointingstick
-buttons get reported separately in the PSM, PSR and PSL bits.
+the DualPoint Stick. The M, R and L bits signal the combined status of both
+the pointingstick and touchpad buttons, except for Dell dualpoint devices
+where the pointingstick buttons get reported separately in the PSM, PSR
+and PSL bits.
 
 Dualpoint device -- interleaved packet format
 ---------------------------------------------
index 74b6c6d..d2b1c40 100644 (file)
@@ -952,6 +952,14 @@ When kbuild executes, the following steps are followed (roughly):
        $(KBUILD_ARFLAGS) set by the top level Makefile to "D" (deterministic
        mode) if this option is supported by $(AR).
 
+    ARCH_CPPFLAGS, ARCH_AFLAGS, ARCH_CFLAGS   Overrides the kbuild defaults
+
+       These variables are appended to the KBUILD_CPPFLAGS,
+       KBUILD_AFLAGS, and KBUILD_CFLAGS, respectively, after the
+       top-level Makefile has set any other flags. This provides a
+       means for an architecture to override the defaults.
+
+
 --- 6.2 Add prerequisites to archheaders:
 
        The archheaders: rule is used to generate header files that
index 6726139..cd03a0f 100644 (file)
@@ -1398,7 +1398,15 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        The list of supported hash algorithms is defined
                        in crypto/hash_info.h.
 
-       ima_tcb         [IMA]
+       ima_policy=     [IMA]
+                       The builtin measurement policy to load during IMA
+                       setup.  Specyfing "tcb" as the value, measures all
+                       programs exec'd, files mmap'd for exec, and all files
+                       opened with the read mode bit set by either the
+                       effective uid (euid=0) or uid=0.
+                       Format: "tcb"
+
+       ima_tcb         [IMA] Deprecated.  Use ima_policy= instead.
                        Load a policy which meets the needs of the Trusted
                        Computing Base.  This means IMA will measure all
                        programs exec'd, files mmap'd for exec, and all files
index e3cdec4..b8591e5 100644 (file)
@@ -1,6 +1,6 @@
 VERSION = 4
 PATCHLEVEL = 1
-SUBLEVEL = 3
+SUBLEVEL = 7
 EXTRAVERSION =
 NAME = Series 4800
 
@@ -783,10 +783,11 @@ endif
 include scripts/Makefile.kasan
 include scripts/Makefile.extrawarn
 
-# Add user supplied CPPFLAGS, AFLAGS and CFLAGS as the last assignments
-KBUILD_CPPFLAGS += $(KCPPFLAGS)
-KBUILD_AFLAGS += $(KAFLAGS)
-KBUILD_CFLAGS += $(KCFLAGS)
+# Add any arch overrides and user supplied CPPFLAGS, AFLAGS and CFLAGS as the
+# last assignments
+KBUILD_CPPFLAGS += $(ARCH_CPPFLAGS) $(KCPPFLAGS)
+KBUILD_AFLAGS   += $(ARCH_AFLAGS)   $(KAFLAGS)
+KBUILD_CFLAGS   += $(ARCH_CFLAGS)   $(KCFLAGS)
 
 # Use --build-id when available.
 LDFLAGS_BUILD_ID = $(patsubst -Wl$(comma)%,%,\
index db72fec..2f21e1e 100644 (file)
@@ -43,7 +43,8 @@ endif
 
 ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
 # Generic build system uses -O2, we want -O3
-cflags-y  += -O3
+# Note: No need to add to cflags-y as that happens anyways
+ARCH_CFLAGS += -O3
 endif
 
 # small data is default for elf32 tool-chain. If not usable, disable it
index 624a9d0..dae03e6 100644 (file)
 #include <linux/types.h>
 #include <linux/compiler.h>
 #include <asm/barrier.h>
+#ifndef CONFIG_ARC_HAS_LLSC
+#include <asm/smp.h>
+#endif
 
-/*
- * Hardware assisted read-modify-write using ARC700 LLOCK/SCOND insns.
- * The Kconfig glue ensures that in SMP, this is only set if the container
- * SoC/platform has cross-core coherent LLOCK/SCOND
- */
 #if defined(CONFIG_ARC_HAS_LLSC)
 
-static inline void set_bit(unsigned long nr, volatile unsigned long *m)
-{
-       unsigned int temp;
-
-       m += nr >> 5;
-
-       /*
-        * ARC ISA micro-optimization:
-        *
-        * Instructions dealing with bitpos only consider lower 5 bits (0-31)
-        * e.g (x << 33) is handled like (x << 1) by ASL instruction
-        *  (mem pointer still needs adjustment to point to next word)
-        *
-        * Hence the masking to clamp @nr arg can be elided in general.
-        *
-        * However if @nr is a constant (above assumed it in a register),
-        * and greater than 31, gcc can optimize away (x << 33) to 0,
-        * as overflow, given the 32-bit ISA. Thus masking needs to be done
-        * for constant @nr, but no code is generated due to const prop.
-        */
-       if (__builtin_constant_p(nr))
-               nr &= 0x1f;
-
-       __asm__ __volatile__(
-       "1:     llock   %0, [%1]        \n"
-       "       bset    %0, %0, %2      \n"
-       "       scond   %0, [%1]        \n"
-       "       bnz     1b      \n"
-       : "=&r"(temp)
-       : "r"(m), "ir"(nr)
-       : "cc");
-}
-
-static inline void clear_bit(unsigned long nr, volatile unsigned long *m)
-{
-       unsigned int temp;
-
-       m += nr >> 5;
-
-       if (__builtin_constant_p(nr))
-               nr &= 0x1f;
-
-       __asm__ __volatile__(
-       "1:     llock   %0, [%1]        \n"
-       "       bclr    %0, %0, %2      \n"
-       "       scond   %0, [%1]        \n"
-       "       bnz     1b      \n"
-       : "=&r"(temp)
-       : "r"(m), "ir"(nr)
-       : "cc");
-}
-
-static inline void change_bit(unsigned long nr, volatile unsigned long *m)
-{
-       unsigned int temp;
-
-       m += nr >> 5;
-
-       if (__builtin_constant_p(nr))
-               nr &= 0x1f;
+/*
+ * Hardware assisted Atomic-R-M-W
+ */
 
-       __asm__ __volatile__(
-       "1:     llock   %0, [%1]        \n"
-       "       bxor    %0, %0, %2      \n"
-       "       scond   %0, [%1]        \n"
-       "       bnz     1b              \n"
-       : "=&r"(temp)
-       : "r"(m), "ir"(nr)
-       : "cc");
+#define BIT_OP(op, c_op, asm_op)                                       \
+static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
+{                                                                      \
+       unsigned int temp;                                              \
+                                                                       \
+       m += nr >> 5;                                                   \
+                                                                       \
+       /*                                                              \
+        * ARC ISA micro-optimization:                                  \
+        *                                                              \
+        * Instructions dealing with bitpos only consider lower 5 bits  \
+        * e.g (x << 33) is handled like (x << 1) by ASL instruction    \
+        *  (mem pointer still needs adjustment to point to next word)  \
+        *                                                              \
+        * Hence the masking to clamp @nr arg can be elided in general. \
+        *                                                              \
+        * However if @nr is a constant (above assumed in a register),  \
+        * and greater than 31, gcc can optimize away (x << 33) to 0,   \
+        * as overflow, given the 32-bit ISA. Thus masking needs to be  \
+        * done for const @nr, but no code is generated due to gcc      \
+        * const prop.                                                  \
+        */                                                             \
+       nr &= 0x1f;                                                     \
+                                                                       \
+       __asm__ __volatile__(                                           \
+       "1:     llock       %0, [%1]            \n"                     \
+       "       " #asm_op " %0, %0, %2  \n"                             \
+       "       scond       %0, [%1]            \n"                     \
+       "       bnz         1b                  \n"                     \
+       : "=&r"(temp)   /* Early clobber, to prevent reg reuse */       \
+       : "r"(m),       /* Not "m": llock only supports reg direct addr mode */ \
+         "ir"(nr)                                                      \
+       : "cc");                                                        \
 }
 
 /*
@@ -108,91 +74,37 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *m)
  * Since ARC lacks a equivalent h/w primitive, the bit is set unconditionally
  * and the old value of bit is returned
  */
-static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m)
-{
-       unsigned long old, temp;
-
-       m += nr >> 5;
-
-       if (__builtin_constant_p(nr))
-               nr &= 0x1f;
-
-       /*
-        * Explicit full memory barrier needed before/after as
-        * LLOCK/SCOND themselves don't provide any such semantics
-        */
-       smp_mb();
-
-       __asm__ __volatile__(
-       "1:     llock   %0, [%2]        \n"
-       "       bset    %1, %0, %3      \n"
-       "       scond   %1, [%2]        \n"
-       "       bnz     1b              \n"
-       : "=&r"(old), "=&r"(temp)
-       : "r"(m), "ir"(nr)
-       : "cc");
-
-       smp_mb();
-
-       return (old & (1 << nr)) != 0;
-}
-
-static inline int
-test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
-{
-       unsigned int old, temp;
-
-       m += nr >> 5;
-
-       if (__builtin_constant_p(nr))
-               nr &= 0x1f;
-
-       smp_mb();
-
-       __asm__ __volatile__(
-       "1:     llock   %0, [%2]        \n"
-       "       bclr    %1, %0, %3      \n"
-       "       scond   %1, [%2]        \n"
-       "       bnz     1b              \n"
-       : "=&r"(old), "=&r"(temp)
-       : "r"(m), "ir"(nr)
-       : "cc");
-
-       smp_mb();
-
-       return (old & (1 << nr)) != 0;
-}
-
-static inline int
-test_and_change_bit(unsigned long nr, volatile unsigned long *m)
-{
-       unsigned int old, temp;
-
-       m += nr >> 5;
-
-       if (__builtin_constant_p(nr))
-               nr &= 0x1f;
-
-       smp_mb();
-
-       __asm__ __volatile__(
-       "1:     llock   %0, [%2]        \n"
-       "       bxor    %1, %0, %3      \n"
-       "       scond   %1, [%2]        \n"
-       "       bnz     1b              \n"
-       : "=&r"(old), "=&r"(temp)
-       : "r"(m), "ir"(nr)
-       : "cc");
-
-       smp_mb();
-
-       return (old & (1 << nr)) != 0;
+#define TEST_N_BIT_OP(op, c_op, asm_op)                                        \
+static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
+{                                                                      \
+       unsigned long old, temp;                                        \
+                                                                       \
+       m += nr >> 5;                                                   \
+                                                                       \
+       nr &= 0x1f;                                                     \
+                                                                       \
+       /*                                                              \
+        * Explicit full memory barrier needed before/after as          \
+        * LLOCK/SCOND themselves don't provide any such smenatic       \
+        */                                                             \
+       smp_mb();                                                       \
+                                                                       \
+       __asm__ __volatile__(                                           \
+       "1:     llock       %0, [%2]    \n"                             \
+       "       " #asm_op " %1, %0, %3  \n"                             \
+       "       scond       %1, [%2]    \n"                             \
+       "       bnz         1b          \n"                             \
+       : "=&r"(old), "=&r"(temp)                                       \
+       : "r"(m), "ir"(nr)                                              \
+       : "cc");                                                        \
+                                                                       \
+       smp_mb();                                                       \
+                                                                       \
+       return (old & (1 << nr)) != 0;                                  \
 }
 
 #else  /* !CONFIG_ARC_HAS_LLSC */
 
-#include <asm/smp.h>
-
 /*
  * Non hardware assisted Atomic-R-M-W
  * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
@@ -209,111 +121,37 @@ test_and_change_bit(unsigned long nr, volatile unsigned long *m)
  *             at compile time)
  */
 
-static inline void set_bit(unsigned long nr, volatile unsigned long *m)
-{
-       unsigned long temp, flags;
-       m += nr >> 5;
-
-       if (__builtin_constant_p(nr))
-               nr &= 0x1f;
-
-       bitops_lock(flags);
-
-       temp = *m;
-       *m = temp | (1UL << nr);
-
-       bitops_unlock(flags);
+#define BIT_OP(op, c_op, asm_op)                                       \
+static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
+{                                                                      \
+       unsigned long temp, flags;                                      \
+       m += nr >> 5;                                                   \
+                                                                       \
+       /*                                                              \
+        * spin lock/unlock provide the needed smp_mb() before/after    \
+        */                                                             \
+       bitops_lock(flags);                                             \
+                                                                       \
+       temp = *m;                                                      \
+       *m = temp c_op (1UL << (nr & 0x1f));                                    \
+                                                                       \
+       bitops_unlock(flags);                                           \
 }
 
-static inline void clear_bit(unsigned long nr, volatile unsigned long *m)
-{
-       unsigned long temp, flags;
-       m += nr >> 5;
-
-       if (__builtin_constant_p(nr))
-               nr &= 0x1f;
-
-       bitops_lock(flags);
-
-       temp = *m;
-       *m = temp & ~(1UL << nr);
-
-       bitops_unlock(flags);
-}
-
-static inline void change_bit(unsigned long nr, volatile unsigned long *m)
-{
-       unsigned long temp, flags;
-       m += nr >> 5;
-
-       if (__builtin_constant_p(nr))
-               nr &= 0x1f;
-
-       bitops_lock(flags);
-
-       temp = *m;
-       *m = temp ^ (1UL << nr);
-
-       bitops_unlock(flags);
-}
-
-static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m)
-{
-       unsigned long old, flags;
-       m += nr >> 5;
-
-       if (__builtin_constant_p(nr))
-               nr &= 0x1f;
-
-       /*
-        * spin lock/unlock provide the needed smp_mb() before/after
-        */
-       bitops_lock(flags);
-
-       old = *m;
-       *m = old | (1 << nr);
-
-       bitops_unlock(flags);
-
-       return (old & (1 << nr)) != 0;
-}
-
-static inline int
-test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
-{
-       unsigned long old, flags;
-       m += nr >> 5;
-
-       if (__builtin_constant_p(nr))
-               nr &= 0x1f;
-
-       bitops_lock(flags);
-
-       old = *m;
-       *m = old & ~(1 << nr);
-
-       bitops_unlock(flags);
-
-       return (old & (1 << nr)) != 0;
-}
-
-static inline int
-test_and_change_bit(unsigned long nr, volatile unsigned long *m)
-{
-       unsigned long old, flags;
-       m += nr >> 5;
-
-       if (__builtin_constant_p(nr))
-               nr &= 0x1f;
-
-       bitops_lock(flags);
-
-       old = *m;
-       *m = old ^ (1 << nr);
-
-       bitops_unlock(flags);
-
-       return (old & (1 << nr)) != 0;
+#define TEST_N_BIT_OP(op, c_op, asm_op)                                        \
+static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
+{                                                                      \
+       unsigned long old, flags;                                       \
+       m += nr >> 5;                                                   \
+                                                                       \
+       bitops_lock(flags);                                             \
+                                                                       \
+       old = *m;                                                       \
+       *m = old c_op (1UL << (nr & 0x1f));                             \
+                                                                       \
+       bitops_unlock(flags);                                           \
+                                                                       \
+       return (old & (1UL << (nr & 0x1f))) != 0;                       \
 }
 
 #endif /* CONFIG_ARC_HAS_LLSC */
@@ -322,86 +160,45 @@ test_and_change_bit(unsigned long nr, volatile unsigned long *m)
  * Non atomic variants
  **************************************/
 
-static inline void __set_bit(unsigned long nr, volatile unsigned long *m)
-{
-       unsigned long temp;
-       m += nr >> 5;
-
-       if (__builtin_constant_p(nr))
-               nr &= 0x1f;
-
-       temp = *m;
-       *m = temp | (1UL << nr);
+#define __BIT_OP(op, c_op, asm_op)                                     \
+static inline void __##op##_bit(unsigned long nr, volatile unsigned long *m)   \
+{                                                                      \
+       unsigned long temp;                                             \
+       m += nr >> 5;                                                   \
+                                                                       \
+       temp = *m;                                                      \
+       *m = temp c_op (1UL << (nr & 0x1f));                            \
 }
 
-static inline void __clear_bit(unsigned long nr, volatile unsigned long *m)
-{
-       unsigned long temp;
-       m += nr >> 5;
-
-       if (__builtin_constant_p(nr))
-               nr &= 0x1f;
-
-       temp = *m;
-       *m = temp & ~(1UL << nr);
+#define __TEST_N_BIT_OP(op, c_op, asm_op)                              \
+static inline int __test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
+{                                                                      \
+       unsigned long old;                                              \
+       m += nr >> 5;                                                   \
+                                                                       \
+       old = *m;                                                       \
+       *m = old c_op (1UL << (nr & 0x1f));                             \
+                                                                       \
+       return (old & (1UL << (nr & 0x1f))) != 0;                       \
 }
 
-static inline void __change_bit(unsigned long nr, volatile unsigned long *m)
-{
-       unsigned long temp;
-       m += nr >> 5;
-
-       if (__builtin_constant_p(nr))
-               nr &= 0x1f;
-
-       temp = *m;
-       *m = temp ^ (1UL << nr);
-}
-
-static inline int
-__test_and_set_bit(unsigned long nr, volatile unsigned long *m)
-{
-       unsigned long old;
-       m += nr >> 5;
-
-       if (__builtin_constant_p(nr))
-               nr &= 0x1f;
-
-       old = *m;
-       *m = old | (1 << nr);
-
-       return (old & (1 << nr)) != 0;
-}
-
-static inline int
-__test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
-{
-       unsigned long old;
-       m += nr >> 5;
-
-       if (__builtin_constant_p(nr))
-               nr &= 0x1f;
-
-       old = *m;
-       *m = old & ~(1 << nr);
-
-       return (old & (1 << nr)) != 0;
-}
-
-static inline int
-__test_and_change_bit(unsigned long nr, volatile unsigned long *m)
-{
-       unsigned long old;
-       m += nr >> 5;
-
-       if (__builtin_constant_p(nr))
-               nr &= 0x1f;
-
-       old = *m;
-       *m = old ^ (1 << nr);
-
-       return (old & (1 << nr)) != 0;
-}
+#define BIT_OPS(op, c_op, asm_op)                                      \
+                                                                       \
+       /* set_bit(), clear_bit(), change_bit() */                      \
+       BIT_OP(op, c_op, asm_op)                                        \
+                                                                       \
+       /* test_and_set_bit(), test_and_clear_bit(), test_and_change_bit() */\
+       TEST_N_BIT_OP(op, c_op, asm_op)                                 \
+                                                                       \
+       /* __set_bit(), __clear_bit(), __change_bit() */                \
+       __BIT_OP(op, c_op, asm_op)                                      \
+                                                                       \
+       /* __test_and_set_bit(), __test_and_clear_bit(), __test_and_change_bit() */\
+       __TEST_N_BIT_OP(op, c_op, asm_op)
+
+BIT_OPS(set, |, bset)
+BIT_OPS(clear, & ~, bclr)
+BIT_OPS(change, ^, bxor)
 
 /*
  * This routine doesn't need to be atomic.
@@ -413,10 +210,7 @@ test_bit(unsigned int nr, const volatile unsigned long *addr)
 
        addr += nr >> 5;
 
-       if (__builtin_constant_p(nr))
-               nr &= 0x1f;
-
-       mask = 1 << nr;
+       mask = 1UL << (nr & 0x1f);
 
        return ((mask & *addr) != 0);
 }
index 1bfeec2..2a58af7 100644 (file)
@@ -63,7 +63,7 @@ struct callee_regs {
        long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13;
 };
 
-#define instruction_pointer(regs)      ((regs)->ret)
+#define instruction_pointer(regs)      (unsigned long)((regs)->ret)
 #define profile_pc(regs)               instruction_pointer(regs)
 
 /* return 1 if user mode or 0 if kernel mode */
index 7128fad..c9df40e 100644 (file)
        phy-supply = <&ldousb_reg>;
 };
 
+&usb2_phy2 {
+       phy-supply = <&ldousb_reg>;
+};
+
 &usb1 {
        dr_mode = "host";
        pinctrl-names = "default";
index 89ef4a5..45e7761 100644 (file)
                        mmc0: mmc@f8000000 {
                                pinctrl-names = "default";
                                pinctrl-0 = <&pinctrl_mmc0_clk_cmd_dat0 &pinctrl_mmc0_dat1_3 &pinctrl_mmc0_cd>;
-                               slot@1 {
-                                       reg = <1>;
+                               slot@0 {
+                                       reg = <0>;
                                        bus-width = <4>;
                                        cd-gpios = <&pioE 5 0>;
                                };
index 70e59c5..e544211 100644 (file)
                        usb2: gadget@fff78000 {
                                #address-cells = <1>;
                                #size-cells = <0>;
-                               compatible = "atmel,at91sam9rl-udc";
+                               compatible = "atmel,at91sam9g45-udc";
                                reg = <0x00600000 0x80000
                                       0xfff78000 0x400>;
                                interrupts = <27 IRQ_TYPE_LEVEL_HIGH 0>;
index 3aa56ae..3314a73 100644 (file)
                        usb2: gadget@f803c000 {
                                #address-cells = <1>;
                                #size-cells = <0>;
-                               compatible = "atmel,at91sam9rl-udc";
+                               compatible = "atmel,at91sam9g45-udc";
                                reg = <0x00500000 0x80000
                                       0xf803c000 0x400>;
                                interrupts = <23 IRQ_TYPE_LEVEL_HIGH 0>;
index aa46590..096f68b 100644 (file)
 
 &dcan1 {
        status = "ok";
-       pinctrl-names = "default", "sleep";
-       pinctrl-0 = <&dcan1_pins_default>;
+       pinctrl-names = "default", "sleep", "active";
+       pinctrl-0 = <&dcan1_pins_sleep>;
        pinctrl-1 = <&dcan1_pins_sleep>;
+       pinctrl-2 = <&dcan1_pins_default>;
 };
index f03a091..dfcc0dd 100644 (file)
                                ranges = <0 0x2000 0x2000>;
 
                                scm_conf: scm_conf@0 {
-                                       compatible = "syscon";
+                                       compatible = "syscon", "simple-bus";
                                        reg = <0x0 0x1400>;
                                        #address-cells = <1>;
                                        #size-cells = <1>;
index ce0390f..6b05f6a 100644 (file)
 
 &dcan1 {
        status = "ok";
-       pinctrl-names = "default", "sleep";
-       pinctrl-0 = <&dcan1_pins_default>;
+       pinctrl-names = "default", "sleep", "active";
+       pinctrl-0 = <&dcan1_pins_sleep>;
        pinctrl-1 = <&dcan1_pins_sleep>;
+       pinctrl-2 = <&dcan1_pins_default>;
 };
 
 &qspi {
index bbcfb5a..0cb8b0b 100644 (file)
                                interrupts = <36 37 38 39 40 41 42 43 44>;
                                status = "disabled";
                                clocks = <&clks 26>;
+                               #io-channel-cells = <1>;
                        };
 
                        spdif@80054000 {
index b6478e9..e6540b5 100644 (file)
                        can1: can@53fe4000 {
                                compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan";
                                reg = <0x53fe4000 0x1000>;
-                               clocks = <&clks 33>;
-                               clock-names = "ipg";
+                               clocks = <&clks 33>, <&clks 33>;
+                               clock-names = "ipg", "per";
                                interrupts = <43>;
                                status = "disabled";
                        };
                        can2: can@53fe8000 {
                                compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan";
                                reg = <0x53fe8000 0x1000>;
-                               clocks = <&clks 34>;
-                               clock-names = "ipg";
+                               clocks = <&clks 34>, <&clks 34>;
+                               clock-names = "ipg", "per";
                                interrupts = <44>;
                                status = "disabled";
                        };
index f74a8de..38c7860 100644 (file)
                        interrupt-names = "msi";
                        #interrupt-cells = <1>;
                        interrupt-map-mask = <0 0 0 0x7>;
-                       interrupt-map = <0 0 0 1 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
-                                       <0 0 0 2 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
-                                       <0 0 0 3 &intc GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
-                                       <0 0 0 4 &intc GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupt-map = <0 0 0 1 &gpc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
+                                       <0 0 0 2 &gpc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
+                                       <0 0 0 3 &gpc GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
+                                       <0 0 0 4 &gpc GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
                        clocks = <&clks IMX6QDL_CLK_PCIE_AXI>,
                                 <&clks IMX6QDL_CLK_LVDS1_GATE>,
                                 <&clks IMX6QDL_CLK_PCIE_REF_125M>;
index 4773d6a..d56d68f 100644 (file)
@@ -13,9 +13,8 @@ clocks {
                #clock-cells = <0>;
                compatible = "ti,keystone,main-pll-clock";
                clocks = <&refclksys>;
-               reg = <0x02620350 4>, <0x02310110 4>;
-               reg-names = "control", "multiplier";
-               fixed-postdiv = <2>;
+               reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>;
+               reg-names = "control", "multiplier", "post-divider";
        };
 
        papllclk: papllclk@2620358 {
index d5adee3..af9b719 100644 (file)
@@ -22,9 +22,8 @@ clocks {
                #clock-cells = <0>;
                compatible = "ti,keystone,main-pll-clock";
                clocks = <&refclksys>;
-               reg = <0x02620350 4>, <0x02310110 4>;
-               reg-names = "control", "multiplier";
-               fixed-postdiv = <2>;
+               reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>;
+               reg-names = "control", "multiplier", "post-divider";
        };
 
        papllclk: papllclk@2620358 {
index eb1e3e2..ef8464b 100644 (file)
@@ -22,9 +22,8 @@ clocks {
                #clock-cells = <0>;
                compatible = "ti,keystone,main-pll-clock";
                clocks = <&refclksys>;
-               reg = <0x02620350 4>, <0x02310110 4>;
-               reg-names = "control", "multiplier";
-               fixed-postdiv = <2>;
+               reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>;
+               reg-names = "control", "multiplier", "post-divider";
        };
 
        papllclk: papllclk@2620358 {
index 11a7963..2390f38 100644 (file)
@@ -51,7 +51,8 @@
                                };
 
                                scm_conf: scm_conf@270 {
-                                       compatible = "syscon";
+                                       compatible = "syscon",
+                                                    "simple-bus";
                                        reg = <0x270 0x240>;
                                        #address-cells = <1>;
                                        #size-cells = <1>;
index f884d6a..84be9da 100644 (file)
                                };
 
                                omap4_padconf_global: omap4_padconf_global@5a0 {
-                                       compatible = "syscon";
+                                       compatible = "syscon",
+                                                    "simple-bus";
                                        reg = <0x5a0 0x170>;
                                        #address-cells = <1>;
                                        #size-cells = <1>;
index 7d24ae0..874a26f 100644 (file)
                                };
 
                                omap5_padconf_global: omap5_padconf_global@5a0 {
-                                       compatible = "syscon";
+                                       compatible = "syscon",
+                                                    "simple-bus";
                                        reg = <0x5a0 0xec>;
                                        #address-cells = <1>;
                                        #size-cells = <1>;
index 57ab858..37e6182 100644 (file)
                usb0: gadget@00500000 {
                        #address-cells = <1>;
                        #size-cells = <0>;
-                       compatible = "atmel,at91sam9rl-udc";
+                       compatible = "atmel,sama5d3-udc";
                        reg = <0x00500000 0x100000
                               0xf8030000 0x4000>;
                        interrupts = <33 IRQ_TYPE_LEVEL_HIGH 2>;
index 6b1bb58..a5f5f40 100644 (file)
                usb0: gadget@00400000 {
                        #address-cells = <1>;
                        #size-cells = <0>;
-                       compatible = "atmel,at91sam9rl-udc";
+                       compatible = "atmel,sama5d3-udc";
                        reg = <0x00400000 0x100000
                               0xfc02c000 0x4000>;
                        interrupts = <47 IRQ_TYPE_LEVEL_HIGH 2>;
                                compatible = "atmel,at91sam9g46-aes";
                                reg = <0xfc044000 0x100>;
                                interrupts = <12 IRQ_TYPE_LEVEL_HIGH 0>;
-                               dmas = <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1))
-                                       AT91_XDMAC_DT_PERID(41)>,
-                                      <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1))
-                                       AT91_XDMAC_DT_PERID(40)>;
+                               dmas = <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
+                                       | AT91_XDMAC_DT_PERID(41))>,
+                                      <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
+                                       | AT91_XDMAC_DT_PERID(40))>;
                                dma-names = "tx", "rx";
                                clocks = <&aes_clk>;
                                clock-names = "aes_clk";
                                compatible = "atmel,at91sam9g46-tdes";
                                reg = <0xfc04c000 0x100>;
                                interrupts = <14 IRQ_TYPE_LEVEL_HIGH 0>;
-                               dmas = <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1))
-                                       AT91_XDMAC_DT_PERID(42)>,
-                                      <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1))
-                                       AT91_XDMAC_DT_PERID(43)>;
+                               dmas = <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
+                                       | AT91_XDMAC_DT_PERID(42))>,
+                                      <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
+                                       | AT91_XDMAC_DT_PERID(43))>;
                                dma-names = "tx", "rx";
                                clocks = <&tdes_clk>;
                                clock-names = "tdes_clk";
                                compatible = "atmel,at91sam9g46-sha";
                                reg = <0xfc050000 0x100>;
                                interrupts = <15 IRQ_TYPE_LEVEL_HIGH 0>;
-                               dmas = <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1))
-                                       AT91_XDMAC_DT_PERID(44)>;
+                               dmas = <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
+                                       | AT91_XDMAC_DT_PERID(44))>;
                                dma-names = "tx";
                                clocks = <&sha_clk>;
                                clock-names = "sha_clk";
index cca5b87..e561aef 100644 (file)
@@ -213,8 +213,6 @@ int __cpu_disable(void)
        flush_cache_louis();
        local_flush_tlb_all();
 
-       clear_tasks_mm_cpumask(cpu);
-
        return 0;
 }
 
@@ -230,6 +228,9 @@ void __cpu_die(unsigned int cpu)
                pr_err("CPU%u: cpu didn't die\n", cpu);
                return;
        }
+
+       clear_tasks_mm_cpumask(cpu);
+
        pr_notice("CPU%u: shutdown\n", cpu);
 
        /*
@@ -576,7 +577,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
        struct pt_regs *old_regs = set_irq_regs(regs);
 
        if ((unsigned)ipinr < NR_IPI) {
-               trace_ipi_entry(ipi_types[ipinr]);
+               trace_ipi_entry_rcuidle(ipi_types[ipinr]);
                __inc_irq_stat(cpu, ipi_irqs[ipinr]);
        }
 
@@ -635,7 +636,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
        }
 
        if ((unsigned)ipinr < NR_IPI)
-               trace_ipi_exit(ipi_types[ipinr]);
+               trace_ipi_exit_rcuidle(ipi_types[ipinr]);
        set_irq_regs(old_regs);
 }
 
index 4c38674..54d274d 100644 (file)
@@ -43,5 +43,5 @@ obj-$(CONFIG_ARCH_BCM_63XX)   := bcm63xx.o
 ifeq ($(CONFIG_ARCH_BRCMSTB),y)
 CFLAGS_platsmp-brcmstb.o       += -march=armv7-a
 obj-y                          += brcmstb.o
-obj-$(CONFIG_SMP)              += headsmp-brcmstb.o platsmp-brcmstb.o
+obj-$(CONFIG_SMP)              += platsmp-brcmstb.o
 endif
diff --git a/kernel/arch/arm/mach-bcm/brcmstb.h b/kernel/arch/arm/mach-bcm/brcmstb.h
deleted file mode 100644 (file)
index ec0c3d1..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * Copyright (C) 2013-2014 Broadcom Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#ifndef __BRCMSTB_H__
-#define __BRCMSTB_H__
-
-void brcmstb_secondary_startup(void);
-
-#endif /* __BRCMSTB_H__ */
diff --git a/kernel/arch/arm/mach-bcm/headsmp-brcmstb.S b/kernel/arch/arm/mach-bcm/headsmp-brcmstb.S
deleted file mode 100644 (file)
index 199c1ea..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * SMP boot code for secondary CPUs
- * Based on arch/arm/mach-tegra/headsmp.S
- *
- * Copyright (C) 2010 NVIDIA, Inc.
- * Copyright (C) 2013-2014 Broadcom Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <asm/assembler.h>
-#include <linux/linkage.h>
-#include <linux/init.h>
-
-        .section ".text.head", "ax"
-
-ENTRY(brcmstb_secondary_startup)
-        /*
-         * Ensure CPU is in a sane state by disabling all IRQs and switching
-         * into SVC mode.
-         */
-        setmode        PSR_I_BIT | PSR_F_BIT | SVC_MODE, r0
-
-        bl      v7_invalidate_l1
-        b       secondary_startup
-ENDPROC(brcmstb_secondary_startup)
index e209e6f..44d6bdd 100644 (file)
@@ -30,8 +30,6 @@
 #include <asm/mach-types.h>
 #include <asm/smp_plat.h>
 
-#include "brcmstb.h"
-
 enum {
        ZONE_MAN_CLKEN_MASK             = BIT(0),
        ZONE_MAN_RESET_CNTL_MASK        = BIT(1),
@@ -153,7 +151,7 @@ static void brcmstb_cpu_boot(u32 cpu)
         * Set the reset vector to point to the secondary_startup
         * routine
         */
-       cpu_set_boot_addr(cpu, virt_to_phys(brcmstb_secondary_startup));
+       cpu_set_boot_addr(cpu, virt_to_phys(secondary_startup));
 
        /* Unhalt the cpu */
        cpu_rst_cfg_set(cpu, 0);
index 4a4c56a..dc82a34 100644 (file)
 #include <linux/init.h>
 #include <asm/assembler.h>
 
-ENTRY(berlin_secondary_startup)
- ARM_BE8(setend be)
-       bl      v7_invalidate_l1
-       b       secondary_startup
-ENDPROC(berlin_secondary_startup)
-
 /*
  * If the following instruction is set in the reset exception vector, CPUs
  * will fetch the value of the software reset address vector when being
index 702e798..34a3753 100644 (file)
@@ -22,7 +22,6 @@
 #define RESET_VECT             0x00
 #define SW_RESET_ADDR          0x94
 
-extern void berlin_secondary_startup(void);
 extern u32 boot_inst;
 
 static void __iomem *cpu_ctrl;
@@ -85,7 +84,7 @@ static void __init berlin_smp_prepare_cpus(unsigned int max_cpus)
         * Write the secondary startup address into the SW reset address
         * vector. This is used by boot_inst.
         */
-       writel(virt_to_phys(berlin_secondary_startup), vectors_base + SW_RESET_ADDR);
+       writel(virt_to_phys(secondary_startup), vectors_base + SW_RESET_ADDR);
 
        iounmap(vectors_base);
 unmap_scu:
index 03d401d..3f29e6b 100644 (file)
 /*
  * Dove Low Interrupt Controller
  */
-#define IRQ_DOVE_BRIDGE                0
-#define IRQ_DOVE_H2C           1
-#define IRQ_DOVE_C2H           2
-#define IRQ_DOVE_NAND          3
-#define IRQ_DOVE_PDMA          4
-#define IRQ_DOVE_SPI1          5
-#define IRQ_DOVE_SPI0          6
-#define IRQ_DOVE_UART_0                7
-#define IRQ_DOVE_UART_1                8
-#define IRQ_DOVE_UART_2                9
-#define IRQ_DOVE_UART_3                10
-#define IRQ_DOVE_I2C           11
-#define IRQ_DOVE_GPIO_0_7      12
-#define IRQ_DOVE_GPIO_8_15     13
-#define IRQ_DOVE_GPIO_16_23    14
-#define IRQ_DOVE_PCIE0_ERR     15
-#define IRQ_DOVE_PCIE0         16
-#define IRQ_DOVE_PCIE1_ERR     17
-#define IRQ_DOVE_PCIE1         18
-#define IRQ_DOVE_I2S0          19
-#define IRQ_DOVE_I2S0_ERR      20
-#define IRQ_DOVE_I2S1          21
-#define IRQ_DOVE_I2S1_ERR      22
-#define IRQ_DOVE_USB_ERR       23
-#define IRQ_DOVE_USB0          24
-#define IRQ_DOVE_USB1          25
-#define IRQ_DOVE_GE00_RX       26
-#define IRQ_DOVE_GE00_TX       27
-#define IRQ_DOVE_GE00_MISC     28
-#define IRQ_DOVE_GE00_SUM      29
-#define IRQ_DOVE_GE00_ERR      30
-#define IRQ_DOVE_CRYPTO                31
+#define IRQ_DOVE_BRIDGE                (1 + 0)
+#define IRQ_DOVE_H2C           (1 + 1)
+#define IRQ_DOVE_C2H           (1 + 2)
+#define IRQ_DOVE_NAND          (1 + 3)
+#define IRQ_DOVE_PDMA          (1 + 4)
+#define IRQ_DOVE_SPI1          (1 + 5)
+#define IRQ_DOVE_SPI0          (1 + 6)
+#define IRQ_DOVE_UART_0                (1 + 7)
+#define IRQ_DOVE_UART_1                (1 + 8)
+#define IRQ_DOVE_UART_2                (1 + 9)
+#define IRQ_DOVE_UART_3                (1 + 10)
+#define IRQ_DOVE_I2C           (1 + 11)
+#define IRQ_DOVE_GPIO_0_7      (1 + 12)
+#define IRQ_DOVE_GPIO_8_15     (1 + 13)
+#define IRQ_DOVE_GPIO_16_23    (1 + 14)
+#define IRQ_DOVE_PCIE0_ERR     (1 + 15)
+#define IRQ_DOVE_PCIE0         (1 + 16)
+#define IRQ_DOVE_PCIE1_ERR     (1 + 17)
+#define IRQ_DOVE_PCIE1         (1 + 18)
+#define IRQ_DOVE_I2S0          (1 + 19)
+#define IRQ_DOVE_I2S0_ERR      (1 + 20)
+#define IRQ_DOVE_I2S1          (1 + 21)
+#define IRQ_DOVE_I2S1_ERR      (1 + 22)
+#define IRQ_DOVE_USB_ERR       (1 + 23)
+#define IRQ_DOVE_USB0          (1 + 24)
+#define IRQ_DOVE_USB1          (1 + 25)
+#define IRQ_DOVE_GE00_RX       (1 + 26)
+#define IRQ_DOVE_GE00_TX       (1 + 27)
+#define IRQ_DOVE_GE00_MISC     (1 + 28)
+#define IRQ_DOVE_GE00_SUM      (1 + 29)
+#define IRQ_DOVE_GE00_ERR      (1 + 30)
+#define IRQ_DOVE_CRYPTO                (1 + 31)
 
 /*
  * Dove High Interrupt Controller
  */
-#define IRQ_DOVE_AC97          32
-#define IRQ_DOVE_PMU           33
-#define IRQ_DOVE_CAM           34
-#define IRQ_DOVE_SDIO0         35
-#define IRQ_DOVE_SDIO1         36
-#define IRQ_DOVE_SDIO0_WAKEUP  37
-#define IRQ_DOVE_SDIO1_WAKEUP  38
-#define IRQ_DOVE_XOR_00                39
-#define IRQ_DOVE_XOR_01                40
-#define IRQ_DOVE_XOR0_ERR      41
-#define IRQ_DOVE_XOR_10                42
-#define IRQ_DOVE_XOR_11                43
-#define IRQ_DOVE_XOR1_ERR      44
-#define IRQ_DOVE_LCD_DCON      45
-#define IRQ_DOVE_LCD1          46
-#define IRQ_DOVE_LCD0          47
-#define IRQ_DOVE_GPU           48
-#define IRQ_DOVE_PERFORM_MNTR  49
-#define IRQ_DOVE_VPRO_DMA1     51
-#define IRQ_DOVE_SSP_TIMER     54
-#define IRQ_DOVE_SSP           55
-#define IRQ_DOVE_MC_L2_ERR     56
-#define IRQ_DOVE_CRYPTO_ERR    59
-#define IRQ_DOVE_GPIO_24_31    60
-#define IRQ_DOVE_HIGH_GPIO     61
-#define IRQ_DOVE_SATA          62
+#define IRQ_DOVE_AC97          (1 + 32)
+#define IRQ_DOVE_PMU           (1 + 33)
+#define IRQ_DOVE_CAM           (1 + 34)
+#define IRQ_DOVE_SDIO0         (1 + 35)
+#define IRQ_DOVE_SDIO1         (1 + 36)
+#define IRQ_DOVE_SDIO0_WAKEUP  (1 + 37)
+#define IRQ_DOVE_SDIO1_WAKEUP  (1 + 38)
+#define IRQ_DOVE_XOR_00                (1 + 39)
+#define IRQ_DOVE_XOR_01                (1 + 40)
+#define IRQ_DOVE_XOR0_ERR      (1 + 41)
+#define IRQ_DOVE_XOR_10                (1 + 42)
+#define IRQ_DOVE_XOR_11                (1 + 43)
+#define IRQ_DOVE_XOR1_ERR      (1 + 44)
+#define IRQ_DOVE_LCD_DCON      (1 + 45)
+#define IRQ_DOVE_LCD1          (1 + 46)
+#define IRQ_DOVE_LCD0          (1 + 47)
+#define IRQ_DOVE_GPU           (1 + 48)
+#define IRQ_DOVE_PERFORM_MNTR  (1 + 49)
+#define IRQ_DOVE_VPRO_DMA1     (1 + 51)
+#define IRQ_DOVE_SSP_TIMER     (1 + 54)
+#define IRQ_DOVE_SSP           (1 + 55)
+#define IRQ_DOVE_MC_L2_ERR     (1 + 56)
+#define IRQ_DOVE_CRYPTO_ERR    (1 + 59)
+#define IRQ_DOVE_GPIO_24_31    (1 + 60)
+#define IRQ_DOVE_HIGH_GPIO     (1 + 61)
+#define IRQ_DOVE_SATA          (1 + 62)
 
 /*
  * DOVE General Purpose Pins
  */
-#define IRQ_DOVE_GPIO_START    64
+#define IRQ_DOVE_GPIO_START    65
 #define NR_GPIO_IRQS           64
 
 /*
index 4a5a7ae..df0223f 100644 (file)
@@ -126,14 +126,14 @@ __exception_irq_entry dove_legacy_handle_irq(struct pt_regs *regs)
        stat = readl_relaxed(dove_irq_base + IRQ_CAUSE_LOW_OFF);
        stat &= readl_relaxed(dove_irq_base + IRQ_MASK_LOW_OFF);
        if (stat) {
-               unsigned int hwirq = __fls(stat);
+               unsigned int hwirq = 1 + __fls(stat);
                handle_IRQ(hwirq, regs);
                return;
        }
        stat = readl_relaxed(dove_irq_base + IRQ_CAUSE_HIGH_OFF);
        stat &= readl_relaxed(dove_irq_base + IRQ_MASK_HIGH_OFF);
        if (stat) {
-               unsigned int hwirq = 32 + __fls(stat);
+               unsigned int hwirq = 33 + __fls(stat);
                handle_IRQ(hwirq, regs);
                return;
        }
@@ -144,8 +144,8 @@ void __init dove_init_irq(void)
 {
        int i;
 
-       orion_irq_init(0, IRQ_VIRT_BASE + IRQ_MASK_LOW_OFF);
-       orion_irq_init(32, IRQ_VIRT_BASE + IRQ_MASK_HIGH_OFF);
+       orion_irq_init(1, IRQ_VIRT_BASE + IRQ_MASK_LOW_OFF);
+       orion_irq_init(33, IRQ_VIRT_BASE + IRQ_MASK_HIGH_OFF);
 
 #ifdef CONFIG_MULTI_IRQ_HANDLER
        set_handle_irq(dove_legacy_handle_irq);
index 6b7b303..659db19 100644 (file)
@@ -6,4 +6,4 @@ CFLAGS_platmcpm.o       := -march=armv7-a
 
 obj-y  += hisilicon.o
 obj-$(CONFIG_MCPM)             += platmcpm.o
-obj-$(CONFIG_SMP)              += platsmp.o hotplug.o headsmp.o
+obj-$(CONFIG_SMP)              += platsmp.o hotplug.o
index 92a682d..c7648ef 100644 (file)
@@ -12,7 +12,6 @@ extern void hi3xxx_cpu_die(unsigned int cpu);
 extern int hi3xxx_cpu_kill(unsigned int cpu);
 extern void hi3xxx_set_cpu(int cpu, bool enable);
 
-extern void hisi_secondary_startup(void);
 extern struct smp_operations hix5hd2_smp_ops;
 extern void hix5hd2_set_cpu(int cpu, bool enable);
 extern void hix5hd2_cpu_die(unsigned int cpu);
diff --git a/kernel/arch/arm/mach-hisi/headsmp.S b/kernel/arch/arm/mach-hisi/headsmp.S
deleted file mode 100644 (file)
index 81e35b1..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- *  Copyright (c) 2014 Hisilicon Limited.
- *  Copyright (c) 2014 Linaro Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/linkage.h>
-#include <linux/init.h>
-
-       __CPUINIT
-
-ENTRY(hisi_secondary_startup)
-       bl      v7_invalidate_l1
-       b       secondary_startup
index 8880c8e..5174412 100644 (file)
@@ -118,7 +118,7 @@ static int hix5hd2_boot_secondary(unsigned int cpu, struct task_struct *idle)
 {
        phys_addr_t jumpaddr;
 
-       jumpaddr = virt_to_phys(hisi_secondary_startup);
+       jumpaddr = virt_to_phys(secondary_startup);
        hix5hd2_set_scu_boot_addr(HIX5HD2_BOOT_ADDRESS, jumpaddr);
        hix5hd2_set_cpu(cpu, true);
        arch_send_wakeup_ipi_mask(cpumask_of(cpu));
@@ -156,7 +156,7 @@ static int hip01_boot_secondary(unsigned int cpu, struct task_struct *idle)
        struct device_node *node;
 
 
-       jumpaddr = virt_to_phys(hisi_secondary_startup);
+       jumpaddr = virt_to_phys(secondary_startup);
        hip01_set_boot_addr(HIP01_BOOT_ADDRESS, jumpaddr);
 
        node = of_find_compatible_node(NULL, NULL, "hisilicon,hip01-sysctrl");
index 6d0893a..78b6fd0 100644 (file)
@@ -291,8 +291,6 @@ void __init imx_gpc_check_dt(void)
        }
 }
 
-#ifdef CONFIG_PM_GENERIC_DOMAINS
-
 static void _imx6q_pm_pu_power_off(struct generic_pm_domain *genpd)
 {
        int iso, iso2sw;
@@ -399,7 +397,6 @@ static struct genpd_onecell_data imx_gpc_onecell_data = {
 static int imx_gpc_genpd_init(struct device *dev, struct regulator *pu_reg)
 {
        struct clk *clk;
-       bool is_off;
        int i;
 
        imx6q_pu_domain.reg = pu_reg;
@@ -416,18 +413,13 @@ static int imx_gpc_genpd_init(struct device *dev, struct regulator *pu_reg)
        }
        imx6q_pu_domain.num_clks = i;
 
-       is_off = IS_ENABLED(CONFIG_PM);
-       if (is_off) {
-               _imx6q_pm_pu_power_off(&imx6q_pu_domain.base);
-       } else {
-               /*
-                * Enable power if compiled without CONFIG_PM in case the
-                * bootloader disabled it.
-                */
-               imx6q_pm_pu_power_on(&imx6q_pu_domain.base);
-       }
+       /* Enable power always in case bootloader disabled it. */
+       imx6q_pm_pu_power_on(&imx6q_pu_domain.base);
+
+       if (!IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS))
+               return 0;
 
-       pm_genpd_init(&imx6q_pu_domain.base, NULL, is_off);
+       pm_genpd_init(&imx6q_pu_domain.base, NULL, false);
        return of_genpd_add_provider_onecell(dev->of_node,
                                             &imx_gpc_onecell_data);
 
@@ -437,13 +429,6 @@ clk_err:
        return -EINVAL;
 }
 
-#else
-static inline int imx_gpc_genpd_init(struct device *dev, struct regulator *reg)
-{
-       return 0;
-}
-#endif /* CONFIG_PM_GENERIC_DOMAINS */
-
 static int imx_gpc_probe(struct platform_device *pdev)
 {
        struct regulator *pu_reg;
index de5047c..b5e9768 100644 (file)
@@ -25,7 +25,6 @@ diag_reg_offset:
        .endm
 
 ENTRY(v7_secondary_startup)
-       bl      v7_invalidate_l1
        set_diag_reg
        b       secondary_startup
 ENDPROC(v7_secondary_startup)
index 08d5ed4..48e4c4b 100644 (file)
@@ -21,7 +21,6 @@
 
 ENTRY(mvebu_cortex_a9_secondary_startup)
 ARM_BE8(setend be)
-       bl      v7_invalidate_l1
        bl      armada_38x_scu_power_up
        b       secondary_startup
 ENDPROC(mvebu_cortex_a9_secondary_startup)
index 3b56722..6833df4 100644 (file)
@@ -392,6 +392,7 @@ static struct irq_chip wakeupgen_chip = {
        .irq_mask               = wakeupgen_mask,
        .irq_unmask             = wakeupgen_unmask,
        .irq_retrigger          = irq_chip_retrigger_hierarchy,
+       .irq_set_type           = irq_chip_set_type_parent,
        .flags                  = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
 #ifdef CONFIG_SMP
        .irq_set_affinity       = irq_chip_set_affinity_parent,
index 752969f..5286e77 100644 (file)
@@ -2373,6 +2373,9 @@ static int of_dev_hwmod_lookup(struct device_node *np,
  * registers.  This address is needed early so the OCP registers that
  * are part of the device's address space can be ioremapped properly.
  *
+ * If SYSC access is not needed, the registers will not be remapped
+ * and non-availability of MPU access is not treated as an error.
+ *
  * Returns 0 on success, -EINVAL if an invalid hwmod is passed, and
  * -ENXIO on absent or invalid register target address space.
  */
@@ -2387,6 +2390,11 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data,
 
        _save_mpu_port_index(oh);
 
+       /* if we don't need sysc access we don't need to ioremap */
+       if (!oh->class->sysc)
+               return 0;
+
+       /* we can't continue without MPU PORT if we need sysc access */
        if (oh->_int_flags & _HWMOD_NO_MPU_PORT)
                return -ENXIO;
 
@@ -2396,8 +2404,10 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data,
                         oh->name);
 
                /* Extract the IO space from device tree blob */
-               if (!np)
+               if (!np) {
+                       pr_err("omap_hwmod: %s: no dt node\n", oh->name);
                        return -ENXIO;
+               }
 
                va_start = of_iomap(np, index + oh->mpu_rt_idx);
        } else {
@@ -2456,13 +2466,11 @@ static int __init _init(struct omap_hwmod *oh, void *data)
                                oh->name, np->name);
        }
 
-       if (oh->class->sysc) {
-               r = _init_mpu_rt_base(oh, NULL, index, np);
-               if (r < 0) {
-                       WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n",
-                            oh->name);
-                       return 0;
-               }
+       r = _init_mpu_rt_base(oh, NULL, index, np);
+       if (r < 0) {
+               WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n",
+                    oh->name);
+               return 0;
        }
 
        r = _init_clocks(oh, NULL);
index d86fe33..209d9fc 100644 (file)
@@ -15,7 +15,6 @@
  * ready for them to initialise.
  */
 ENTRY(sirfsoc_secondary_startup)
-       bl v7_invalidate_l1
         mrc     p15, 0, r0, c0, c0, 5
         and     r0, r0, #15
         adr     r4, 1f
index c092730..bf366b3 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/ata_platform.h>
 #include <linux/serial_8250.h>
 #include <linux/gpio.h>
+#include <linux/regulator/machine.h>
 
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
@@ -144,6 +145,8 @@ static void __init capc7117_init(void)
 
        capc7117_uarts_init();
        capc7117_ide_init();
+
+       regulator_has_full_constraints();
 }
 
 MACHINE_START(CAPC7117,
index bb99f59..a17a91e 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/syscore_ops.h>
 #include <linux/irq.h>
 #include <linux/gpio.h>
+#include <linux/regulator/machine.h>
 
 #include <linux/dm9000.h>
 #include <linux/leds.h>
@@ -466,6 +467,8 @@ static void __init cmx2xx_init(void)
        cmx2xx_init_ac97();
        cmx2xx_init_touchscreen();
        cmx2xx_init_leds();
+
+       regulator_has_full_constraints();
 }
 
 static void __init cmx2xx_init_irq(void)
index 4d3588d..5851f4c 100644 (file)
@@ -835,6 +835,8 @@ static void __init cm_x300_init(void)
        cm_x300_init_ac97();
        cm_x300_init_wi2wi();
        cm_x300_init_bl();
+
+       regulator_has_full_constraints();
 }
 
 static void __init cm_x300_fixup(struct tag *tags, char **cmdline)
index 5f9d930..3503826 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/mtd/partitions.h>
 #include <linux/mtd/physmap.h>
 #include <linux/platform_device.h>
+#include <linux/regulator/machine.h>
 #include <linux/ucb1400.h>
 
 #include <asm/mach/arch.h>
@@ -294,6 +295,8 @@ static void __init colibri_pxa270_init(void)
                printk(KERN_ERR "Illegal colibri_pxa270_baseboard type %d\n",
                                colibri_pxa270_baseboard);
        }
+
+       regulator_has_full_constraints();
 }
 
 /* The "Income s.r.o. SH-Dmaster PXA270 SBC" board can be booted either
index 51531ec..9d7072b 100644 (file)
@@ -1306,6 +1306,8 @@ static void __init em_x270_init(void)
        em_x270_init_i2c();
        em_x270_init_camera();
        em_x270_userspace_consumers_init();
+
+       regulator_has_full_constraints();
 }
 
 MACHINE_START(EM_X270, "Compulab EM-X270")
index c98511c..9b0eb02 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/spi/spi.h>
 #include <linux/spi/pxa2xx_spi.h>
 #include <linux/can/platform/mcp251x.h>
+#include <linux/regulator/machine.h>
 
 #include "generic.h"
 
@@ -185,6 +186,8 @@ static void __init icontrol_init(void)
        mxm_8x10_mmc_init();
 
        icontrol_can_init();
+
+       regulator_has_full_constraints();
 }
 
 MACHINE_START(ICONTROL, "iControl/SafeTcam boards using Embedian MXM-8x10 CoM")
index 872dcb2..066e3a2 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/dm9000.h>
 #include <linux/mtd/physmap.h>
 #include <linux/mtd/partitions.h>
+#include <linux/regulator/machine.h>
 #include <linux/i2c/pxa-i2c.h>
 
 #include <asm/types.h>
@@ -534,6 +535,8 @@ static void __init trizeps4_init(void)
 
        BCR_writew(trizeps_conxs_bcr);
        board_backlight_power(1);
+
+       regulator_has_full_constraints();
 }
 
 static void __init trizeps4_map_io(void)
index aa89488..54122a9 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/dm9000.h>
 #include <linux/ucb1400.h>
 #include <linux/ata_platform.h>
+#include <linux/regulator/machine.h>
 #include <linux/regulator/max1586.h>
 #include <linux/i2c/pxa-i2c.h>
 
@@ -711,6 +712,8 @@ static void __init vpac270_init(void)
        vpac270_ts_init();
        vpac270_rtc_init();
        vpac270_ide_init();
+
+       regulator_has_full_constraints();
 }
 
 MACHINE_START(VPAC270, "Voipac PXA270")
index ac2ae5c..6158566 100644 (file)
@@ -868,6 +868,8 @@ static void __init zeus_init(void)
        i2c_register_board_info(0, ARRAY_AND_SIZE(zeus_i2c_devices));
        pxa2xx_set_spi_info(3, &pxa2xx_spi_ssp3_master_info);
        spi_register_board_info(zeus_spi_board_info, ARRAY_SIZE(zeus_spi_board_info));
+
+       regulator_has_full_constraints();
 }
 
 static struct map_desc zeus_io_desc[] __initdata = {
index 39bca96..492c048 100644 (file)
@@ -17,4 +17,3 @@ extern char rockchip_secondary_trampoline;
 extern char rockchip_secondary_trampoline_end;
 
 extern unsigned long rockchip_boot_fn;
-extern void rockchip_secondary_startup(void);
index 46c22de..d69708b 100644 (file)
 #include <linux/linkage.h>
 #include <linux/init.h>
 
-ENTRY(rockchip_secondary_startup)
-       mrc     p15, 0, r0, c0, c0, 0   @ read main ID register
-       ldr     r1, =0x00000c09         @ Cortex-A9 primary part number
-       teq     r0, r1
-       beq     v7_invalidate_l1
-       b       secondary_startup
-ENDPROC(rockchip_secondary_startup)
-
 ENTRY(rockchip_secondary_trampoline)
        ldr     pc, 1f
 ENDPROC(rockchip_secondary_trampoline)
index 5b4ca3c..2e6ab67 100644 (file)
@@ -149,8 +149,7 @@ static int __cpuinit rockchip_boot_secondary(unsigned int cpu,
                 * sram_base_addr + 8: start address for pc
                 * */
                udelay(10);
-               writel(virt_to_phys(rockchip_secondary_startup),
-                       sram_base_addr + 8);
+               writel(virt_to_phys(secondary_startup), sram_base_addr + 8);
                writel(0xDEADBEAF, sram_base_addr + 4);
                dsb_sev();
        }
@@ -189,7 +188,7 @@ static int __init rockchip_smp_prepare_sram(struct device_node *node)
        }
 
        /* set the boot function for the sram code */
-       rockchip_boot_fn = virt_to_phys(rockchip_secondary_startup);
+       rockchip_boot_fn = virt_to_phys(secondary_startup);
 
        /* copy the trampoline to sram, that runs during startup of the core */
        memcpy(sram_base_addr, &rockchip_secondary_trampoline, trampoline_sz);
index afc60ba..476092b 100644 (file)
@@ -14,7 +14,6 @@ extern void shmobile_smp_sleep(void);
 extern void shmobile_smp_hook(unsigned int cpu, unsigned long fn,
                              unsigned long arg);
 extern int shmobile_smp_cpu_disable(unsigned int cpu);
-extern void shmobile_invalidate_start(void);
 extern void shmobile_boot_scu(void);
 extern void shmobile_smp_scu_prepare_cpus(unsigned int max_cpus);
 extern void shmobile_smp_scu_cpu_die(unsigned int cpu);
index 69df8bf..fa5248c 100644 (file)
@@ -22,7 +22,7 @@
  * Boot code for secondary CPUs.
  *
  * First we turn on L1 cache coherency for our CPU. Then we jump to
- * shmobile_invalidate_start that invalidates the cache and hands over control
+ * secondary_startup that invalidates the cache and hands over control
  * to the common ARM startup code.
  */
 ENTRY(shmobile_boot_scu)
@@ -36,7 +36,7 @@ ENTRY(shmobile_boot_scu)
        bic     r2, r2, r3              @ Clear bits of our CPU (Run Mode)
        str     r2, [r0, #8]            @ write back
 
-       b       shmobile_invalidate_start
+       b       secondary_startup
 ENDPROC(shmobile_boot_scu)
 
        .text
index 50c4915..330c1fc 100644 (file)
 #include <asm/assembler.h>
 #include <asm/memory.h>
 
-#ifdef CONFIG_SMP
-ENTRY(shmobile_invalidate_start)
-       bl      v7_invalidate_l1
-       b       secondary_startup
-ENDPROC(shmobile_invalidate_start)
-#endif
-
 /*
  * Reset vector for secondary CPUs.
  * This will be mapped at address 0 by SBAR register.
index f483b56..b0790fc 100644 (file)
@@ -133,7 +133,7 @@ void __init shmobile_smp_apmu_prepare_cpus(unsigned int max_cpus,
 int shmobile_smp_apmu_boot_secondary(unsigned int cpu, struct task_struct *idle)
 {
        /* For this particular CPU register boot vector */
-       shmobile_smp_hook(cpu, virt_to_phys(shmobile_invalidate_start), 0);
+       shmobile_smp_hook(cpu, virt_to_phys(secondary_startup), 0);
 
        return apmu_wrap(cpu, apmu_power_on);
 }
index a0f3b1c..767c09e 100644 (file)
@@ -31,7 +31,6 @@
 
 #define RSTMGR_MPUMODRST_CPU1          0x2     /* CPU1 Reset */
 
-extern void socfpga_secondary_startup(void);
 extern void __iomem *socfpga_scu_base_addr;
 
 extern void socfpga_init_clocks(void);
index f65ea0a..5bb0164 100644 (file)
@@ -30,8 +30,3 @@ ENTRY(secondary_trampoline)
 1:     .long   .
        .long   socfpga_cpu1start_addr
 ENTRY(secondary_trampoline_end)
-
-ENTRY(socfpga_secondary_startup)
-       bl      v7_invalidate_l1
-       b       secondary_startup
-ENDPROC(socfpga_secondary_startup)
index c64d89b..79c5336 100644 (file)
@@ -40,7 +40,7 @@ static int socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle)
 
                memcpy(phys_to_virt(0), &secondary_trampoline, trampoline_size);
 
-               writel(virt_to_phys(socfpga_secondary_startup),
+               writel(virt_to_phys(secondary_startup),
                       sys_manager_base_addr + (socfpga_cpu1start_addr & 0x000000ff));
 
                flush_cache_all();
index e48a744..fffad24 100644 (file)
@@ -19,7 +19,7 @@ obj-$(CONFIG_ARCH_TEGRA_3x_SOC)               += pm-tegra30.o
 ifeq ($(CONFIG_CPU_IDLE),y)
 obj-$(CONFIG_ARCH_TEGRA_3x_SOC)                += cpuidle-tegra30.o
 endif
-obj-$(CONFIG_SMP)                      += platsmp.o headsmp.o
+obj-$(CONFIG_SMP)                      += platsmp.o
 obj-$(CONFIG_HOTPLUG_CPU)               += hotplug.o
 
 obj-$(CONFIG_ARCH_TEGRA_114_SOC)       += sleep-tegra30.o
diff --git a/kernel/arch/arm/mach-tegra/headsmp.S b/kernel/arch/arm/mach-tegra/headsmp.S
deleted file mode 100644 (file)
index 2072e73..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-#include <linux/linkage.h>
-#include <linux/init.h>
-
-#include "sleep.h"
-
-        .section ".text.head", "ax"
-
-ENTRY(tegra_secondary_startup)
-        check_cpu_part_num 0xc09, r8, r9
-        bleq    v7_invalidate_l1
-        b       secondary_startup
-ENDPROC(tegra_secondary_startup)
index 894c5c4..6fd9db5 100644 (file)
@@ -94,7 +94,7 @@ void __init tegra_cpu_reset_handler_init(void)
        __tegra_cpu_reset_handler_data[TEGRA_RESET_MASK_PRESENT] =
                *((u32 *)cpu_possible_mask);
        __tegra_cpu_reset_handler_data[TEGRA_RESET_STARTUP_SECONDARY] =
-               virt_to_phys((void *)tegra_secondary_startup);
+               virt_to_phys((void *)secondary_startup);
 #endif
 
 #ifdef CONFIG_PM_SLEEP
index 29c3dec..9c479c7 100644 (file)
@@ -37,7 +37,6 @@ void __tegra_cpu_reset_handler_start(void);
 void __tegra_cpu_reset_handler(void);
 void __tegra20_cpu1_resettable_status_offset(void);
 void __tegra_cpu_reset_handler_end(void);
-void tegra_secondary_startup(void);
 
 #ifdef CONFIG_PM_SLEEP
 #define tegra_cpu_lp1_mask \
index 382c60e..7038cae 100644 (file)
@@ -17,8 +17,6 @@
 #ifndef __MACH_ZYNQ_COMMON_H__
 #define __MACH_ZYNQ_COMMON_H__
 
-void zynq_secondary_startup(void);
-
 extern int zynq_slcr_init(void);
 extern int zynq_early_slcr_init(void);
 extern void zynq_slcr_system_reset(void);
index dd8c071..045c727 100644 (file)
@@ -22,8 +22,3 @@ zynq_secondary_trampoline_jump:
 .globl zynq_secondary_trampoline_end
 zynq_secondary_trampoline_end:
 ENDPROC(zynq_secondary_trampoline)
-
-ENTRY(zynq_secondary_startup)
-       bl      v7_invalidate_l1
-       b       secondary_startup
-ENDPROC(zynq_secondary_startup)
index 52d768f..f66816c 100644 (file)
@@ -87,10 +87,9 @@ int zynq_cpun_start(u32 address, int cpu)
 }
 EXPORT_SYMBOL(zynq_cpun_start);
 
-static int zynq_boot_secondary(unsigned int cpu,
-                                               struct task_struct *idle)
+static int zynq_boot_secondary(unsigned int cpu, struct task_struct *idle)
 {
-       return zynq_cpun_start(virt_to_phys(zynq_secondary_startup), cpu);
+       return zynq_cpun_start(virt_to_phys(secondary_startup), cpu);
 }
 
 /*
index 7e7583d..6e4b9ff 100644 (file)
@@ -1953,7 +1953,7 @@ static int extend_iommu_mapping(struct dma_iommu_mapping *mapping)
 {
        int next_bitmap;
 
-       if (mapping->nr_bitmaps > mapping->extensions)
+       if (mapping->nr_bitmaps >= mapping->extensions)
                return -EINVAL;
 
        next_bitmap = mapping->nr_bitmaps;
index 3d1054f..7911f14 100644 (file)
@@ -268,7 +268,10 @@ __v7_ca15mp_setup:
 __v7_b15mp_setup:
 __v7_ca17mp_setup:
        mov     r10, #0
-1:
+1:     adr     r12, __v7_setup_stack           @ the local stack
+       stmia   r12, {r0-r5, lr}                @ v7_invalidate_l1 touches r0-r6
+       bl      v7_invalidate_l1
+       ldmia   r12, {r0-r5, lr}
 #ifdef CONFIG_SMP
        ALT_SMP(mrc     p15, 0, r0, c1, c0, 1)
        ALT_UP(mov      r0, #(1 << 6))          @ fake it for UP
@@ -277,7 +280,7 @@ __v7_ca17mp_setup:
        orreq   r0, r0, r10                     @ Enable CPU-specific SMP bits
        mcreq   p15, 0, r0, c1, c0, 1
 #endif
-       b       __v7_setup
+       b       __v7_setup_cont
 
 __v7_pj4b_setup:
 #ifdef CONFIG_CPU_PJ4B
@@ -335,10 +338,11 @@ __v7_pj4b_setup:
 
 __v7_setup:
        adr     r12, __v7_setup_stack           @ the local stack
-       stmia   r12, {r0-r5, r7, r9, r11, lr}
-       bl      v7_flush_dcache_louis
-       ldmia   r12, {r0-r5, r7, r9, r11, lr}
+       stmia   r12, {r0-r5, lr}                @ v7_invalidate_l1 touches r0-r6
+       bl      v7_invalidate_l1
+       ldmia   r12, {r0-r5, lr}
 
+__v7_setup_cont:
        mrc     p15, 0, r0, c0, c0, 0           @ read main ID register
        and     r10, r0, #0xff000000            @ ARM?
        teq     r10, #0x41000000
@@ -460,7 +464,7 @@ ENDPROC(__v7_setup)
 
        .align  2
 __v7_setup_stack:
-       .space  4 * 11                          @ 11 registers
+       .space  4 * 7                           @ 12 registers
 
        __INITDATA
 
index 8aa7910..1160434 100644 (file)
@@ -6,9 +6,15 @@ obj-vdso := vgettimeofday.o datapage.o
 targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.so.raw vdso.lds
 obj-vdso := $(addprefix $(obj)/, $(obj-vdso))
 
-ccflags-y := -shared -fPIC -fno-common -fno-builtin -fno-stack-protector
-ccflags-y += -nostdlib -Wl,-soname=linux-vdso.so.1 -DDISABLE_BRANCH_PROFILING
-ccflags-y += -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
+ccflags-y := -fPIC -fno-common -fno-builtin -fno-stack-protector
+ccflags-y += -DDISABLE_BRANCH_PROFILING
+
+VDSO_LDFLAGS := -Wl,-Bsymbolic -Wl,--no-undefined -Wl,-soname=linux-vdso.so.1
+VDSO_LDFLAGS += -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096
+VDSO_LDFLAGS += -nostdlib -shared
+VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
+VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--build-id)
+VDSO_LDFLAGS += $(call cc-ldoption, -fuse-ld=bfd)
 
 obj-$(CONFIG_VDSO) += vdso.o
 extra-$(CONFIG_VDSO) += vdso.lds
@@ -40,10 +46,8 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
 
 # Actual build commands
 quiet_cmd_vdsold = VDSO    $@
-      cmd_vdsold = $(CC) $(c_flags) -Wl,-T $(filter %.lds,$^) $(filter %.o,$^) \
-                   $(call cc-ldoption, -Wl$(comma)--build-id) \
-                   -Wl,-Bsymbolic -Wl,-z,max-page-size=4096 \
-                   -Wl,-z,common-page-size=4096 -o $@
+      cmd_vdsold = $(CC) $(c_flags) $(VDSO_LDFLAGS) \
+                   -Wl,-T $(filter %.lds,$^) $(filter %.o,$^) -o $@
 
 quiet_cmd_vdsomunge = MUNGE   $@
       cmd_vdsomunge = $(objtree)/$(obj)/vdsomunge $< $@
index 9005b07..aedec81 100644 (file)
  * it does.
  */
 
-#define _GNU_SOURCE
-
 #include <byteswap.h>
 #include <elf.h>
 #include <errno.h>
-#include <error.h>
 #include <fcntl.h>
+#include <stdarg.h>
 #include <stdbool.h>
 #include <stdio.h>
 #include <stdlib.h>
 #define EF_ARM_ABI_FLOAT_HARD 0x400
 #endif
 
+static int failed;
+static const char *argv0;
 static const char *outfile;
 
+static void fail(const char *fmt, ...)
+{
+       va_list ap;
+
+       failed = 1;
+       fprintf(stderr, "%s: ", argv0);
+       va_start(ap, fmt);
+       vfprintf(stderr, fmt, ap);
+       va_end(ap);
+       exit(EXIT_FAILURE);
+}
+
 static void cleanup(void)
 {
-       if (error_message_count > 0 && outfile != NULL)
+       if (failed && outfile != NULL)
                unlink(outfile);
 }
 
@@ -119,68 +131,66 @@ int main(int argc, char **argv)
        int infd;
 
        atexit(cleanup);
+       argv0 = argv[0];
 
        if (argc != 3)
-               error(EXIT_FAILURE, 0, "Usage: %s [infile] [outfile]", argv[0]);
+               fail("Usage: %s [infile] [outfile]\n", argv[0]);
 
        infile = argv[1];
        outfile = argv[2];
 
        infd = open(infile, O_RDONLY);
        if (infd < 0)
-               error(EXIT_FAILURE, errno, "Cannot open %s", infile);
+               fail("Cannot open %s: %s\n", infile, strerror(errno));
 
        if (fstat(infd, &stat) != 0)
-               error(EXIT_FAILURE, errno, "Failed stat for %s", infile);
+               fail("Failed stat for %s: %s\n", infile, strerror(errno));
 
        inbuf = mmap(NULL, stat.st_size, PROT_READ, MAP_PRIVATE, infd, 0);
        if (inbuf == MAP_FAILED)
-               error(EXIT_FAILURE, errno, "Failed to map %s", infile);
+               fail("Failed to map %s: %s\n", infile, strerror(errno));
 
        close(infd);
 
        inhdr = inbuf;
 
        if (memcmp(&inhdr->e_ident, ELFMAG, SELFMAG) != 0)
-               error(EXIT_FAILURE, 0, "Not an ELF file");
+               fail("Not an ELF file\n");
 
        if (inhdr->e_ident[EI_CLASS] != ELFCLASS32)
-               error(EXIT_FAILURE, 0, "Unsupported ELF class");
+               fail("Unsupported ELF class\n");
 
        swap = inhdr->e_ident[EI_DATA] != HOST_ORDER;
 
        if (read_elf_half(inhdr->e_type, swap) != ET_DYN)
-               error(EXIT_FAILURE, 0, "Not a shared object");
+               fail("Not a shared object\n");
 
-       if (read_elf_half(inhdr->e_machine, swap) != EM_ARM) {
-               error(EXIT_FAILURE, 0, "Unsupported architecture %#x",
-                     inhdr->e_machine);
-       }
+       if (read_elf_half(inhdr->e_machine, swap) != EM_ARM)
+               fail("Unsupported architecture %#x\n", inhdr->e_machine);
 
        e_flags = read_elf_word(inhdr->e_flags, swap);
 
        if (EF_ARM_EABI_VERSION(e_flags) != EF_ARM_EABI_VER5) {
-               error(EXIT_FAILURE, 0, "Unsupported EABI version %#x",
-                     EF_ARM_EABI_VERSION(e_flags));
+               fail("Unsupported EABI version %#x\n",
+                    EF_ARM_EABI_VERSION(e_flags));
        }
 
        if (e_flags & EF_ARM_ABI_FLOAT_HARD)
-               error(EXIT_FAILURE, 0,
-                     "Unexpected hard-float flag set in e_flags");
+               fail("Unexpected hard-float flag set in e_flags\n");
 
        clear_soft_float = !!(e_flags & EF_ARM_ABI_FLOAT_SOFT);
 
        outfd = open(outfile, O_RDWR | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR);
        if (outfd < 0)
-               error(EXIT_FAILURE, errno, "Cannot open %s", outfile);
+               fail("Cannot open %s: %s\n", outfile, strerror(errno));
 
        if (ftruncate(outfd, stat.st_size) != 0)
-               error(EXIT_FAILURE, errno, "Cannot truncate %s", outfile);
+               fail("Cannot truncate %s: %s\n", outfile, strerror(errno));
 
        outbuf = mmap(NULL, stat.st_size, PROT_READ | PROT_WRITE, MAP_SHARED,
                      outfd, 0);
        if (outbuf == MAP_FAILED)
-               error(EXIT_FAILURE, errno, "Failed to map %s", outfile);
+               fail("Failed to map %s: %s\n", outfile, strerror(errno));
 
        close(outfd);
 
@@ -195,7 +205,7 @@ int main(int argc, char **argv)
        }
 
        if (msync(outbuf, stat.st_size, MS_SYNC) != 0)
-               error(EXIT_FAILURE, errno, "Failed to sync %s", outfile);
+               fail("Failed to sync %s: %s\n", outfile, strerror(errno));
 
        return EXIT_SUCCESS;
 }
index ab21e0d..352962b 100644 (file)
@@ -122,12 +122,12 @@ static int __init uefi_init(void)
 
        /* Show what we know for posterity */
        c16 = early_memremap(efi_to_phys(efi.systab->fw_vendor),
-                            sizeof(vendor));
+                            sizeof(vendor) * sizeof(efi_char16_t));
        if (c16) {
                for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i)
                        vendor[i] = c16[i];
                vendor[i] = '\0';
-               early_memunmap(c16, sizeof(vendor));
+               early_memunmap(c16, sizeof(vendor) * sizeof(efi_char16_t));
        }
 
        pr_info("EFI v%u.%.02u by %s\n",
index 702591f..fd26e57 100644 (file)
@@ -1318,7 +1318,7 @@ static int armpmu_device_probe(struct platform_device *pdev)
        /* Don't bother with PPIs; they're already affine */
        irq = platform_get_irq(pdev, 0);
        if (irq >= 0 && irq_is_percpu(irq))
-               return 0;
+               goto out;
 
        irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
        if (!irqs)
@@ -1355,6 +1355,7 @@ static int armpmu_device_probe(struct platform_device *pdev)
        else
                kfree(irqs);
 
+out:
        cpu_pmu->plat_device = pdev;
        return 0;
 }
index d26fcd4..c0cff34 100644 (file)
@@ -168,7 +168,8 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
                 * Other callers might not initialize the si_lsb field,
                 * so check explicitely for the right codes here.
                 */
-               if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
+               if (from->si_signo == SIGBUS &&
+                   (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
                        err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
 #endif
                break;
@@ -201,8 +202,6 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
 
 int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
 {
-       memset(to, 0, sizeof *to);
-
        if (copy_from_user(to, from, __ARCH_SI_PREAMBLE_SIZE) ||
            copy_from_user(to->_sifields._pad,
                           from->_sifields._pad, SI_PAD_SIZE))
index 2cb0081..d3a202b 100644 (file)
@@ -569,7 +569,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
        struct pt_regs *old_regs = set_irq_regs(regs);
 
        if ((unsigned)ipinr < NR_IPI) {
-               trace_ipi_entry(ipi_types[ipinr]);
+               trace_ipi_entry_rcuidle(ipi_types[ipinr]);
                __inc_irq_stat(cpu, ipi_irqs[ipinr]);
        }
 
@@ -612,7 +612,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
        }
 
        if ((unsigned)ipinr < NR_IPI)
-               trace_ipi_exit(ipi_types[ipinr]);
+               trace_ipi_exit_rcuidle(ipi_types[ipinr]);
        set_irq_regs(old_regs);
 }
 
index f02530e..85c5715 100644 (file)
@@ -168,8 +168,8 @@ void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
 {
        if (!(vcpu->arch.hcr_el2 & HCR_RW))
                inject_abt32(vcpu, false, addr);
-
-       inject_abt64(vcpu, false, addr);
+       else
+               inject_abt64(vcpu, false, addr);
 }
 
 /**
@@ -184,8 +184,8 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
 {
        if (!(vcpu->arch.hcr_el2 & HCR_RW))
                inject_abt32(vcpu, true, addr);
-
-       inject_abt64(vcpu, true, addr);
+       else
+               inject_abt64(vcpu, true, addr);
 }
 
 /**
@@ -198,6 +198,6 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
 {
        if (!(vcpu->arch.hcr_el2 & HCR_RW))
                inject_undef32(vcpu);
-
-       inject_undef64(vcpu);
+       else
+               inject_undef64(vcpu);
 }
index 2de9d2e..0eeb4f0 100644 (file)
@@ -40,13 +40,13 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
 
 int pmd_huge(pmd_t pmd)
 {
-       return !(pmd_val(pmd) & PMD_TABLE_BIT);
+       return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
 }
 
 int pud_huge(pud_t pud)
 {
 #ifndef __PAGETABLE_PMD_FOLDED
-       return !(pud_val(pud) & PUD_TABLE_BIT);
+       return pud_val(pud) && !(pud_val(pud) & PUD_TABLE_BIT);
 #else
        return 0;
 #endif
index de0a81a..98a26ce 100644 (file)
 /* Rd = Rn >> shift; signed */
 #define A64_ASR(sf, Rd, Rn, shift) A64_SBFM(sf, Rd, Rn, shift, (sf) ? 63 : 31)
 
+/* Zero extend */
+#define A64_UXTH(sf, Rd, Rn) A64_UBFM(sf, Rd, Rn, 0, 15)
+#define A64_UXTW(sf, Rd, Rn) A64_UBFM(sf, Rd, Rn, 0, 31)
+
 /* Move wide (immediate) */
 #define A64_MOVEW(sf, Rd, imm16, shift, type) \
        aarch64_insn_gen_movewide(Rd, imm16, shift, \
index dc6a484..c047598 100644 (file)
@@ -113,9 +113,9 @@ static inline void emit_a64_mov_i(const int is64, const int reg,
 static inline int bpf2a64_offset(int bpf_to, int bpf_from,
                                 const struct jit_ctx *ctx)
 {
-       int to = ctx->offset[bpf_to + 1];
+       int to = ctx->offset[bpf_to];
        /* -1 to account for the Branch instruction */
-       int from = ctx->offset[bpf_from + 1] - 1;
+       int from = ctx->offset[bpf_from] - 1;
 
        return to - from;
 }
@@ -289,23 +289,41 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
        case BPF_ALU | BPF_END | BPF_FROM_BE:
 #ifdef CONFIG_CPU_BIG_ENDIAN
                if (BPF_SRC(code) == BPF_FROM_BE)
-                       break;
+                       goto emit_bswap_uxt;
 #else /* !CONFIG_CPU_BIG_ENDIAN */
                if (BPF_SRC(code) == BPF_FROM_LE)
-                       break;
+                       goto emit_bswap_uxt;
 #endif
                switch (imm) {
                case 16:
                        emit(A64_REV16(is64, dst, dst), ctx);
+                       /* zero-extend 16 bits into 64 bits */
+                       emit(A64_UXTH(is64, dst, dst), ctx);
                        break;
                case 32:
                        emit(A64_REV32(is64, dst, dst), ctx);
+                       /* upper 32 bits already cleared */
                        break;
                case 64:
                        emit(A64_REV64(dst, dst), ctx);
                        break;
                }
                break;
+emit_bswap_uxt:
+               switch (imm) {
+               case 16:
+                       /* zero-extend 16 bits into 64 bits */
+                       emit(A64_UXTH(is64, dst, dst), ctx);
+                       break;
+               case 32:
+                       /* zero-extend 32 bits into 64 bits */
+                       emit(A64_UXTW(is64, dst, dst), ctx);
+                       break;
+               case 64:
+                       /* nop */
+                       break;
+               }
+               break;
        /* dst = imm */
        case BPF_ALU | BPF_MOV | BPF_K:
        case BPF_ALU64 | BPF_MOV | BPF_K:
@@ -640,10 +658,11 @@ static int build_body(struct jit_ctx *ctx)
                const struct bpf_insn *insn = &prog->insnsi[i];
                int ret;
 
+               ret = build_insn(insn, ctx);
+
                if (ctx->image == NULL)
                        ctx->offset[i] = ctx->idx;
 
-               ret = build_insn(insn, ctx);
                if (ret > 0) {
                        i++;
                        continue;
index 23b1a97..52c179b 100644 (file)
@@ -80,6 +80,9 @@ int clk_enable(struct clk *clk)
 {
        unsigned long flags;
 
+       if (!clk)
+               return 0;
+
        spin_lock_irqsave(&clk_lock, flags);
        __clk_enable(clk);
        spin_unlock_irqrestore(&clk_lock, flags);
@@ -106,6 +109,9 @@ void clk_disable(struct clk *clk)
 {
        unsigned long flags;
 
+       if (IS_ERR_OR_NULL(clk))
+               return;
+
        spin_lock_irqsave(&clk_lock, flags);
        __clk_disable(clk);
        spin_unlock_irqrestore(&clk_lock, flags);
@@ -117,6 +123,9 @@ unsigned long clk_get_rate(struct clk *clk)
        unsigned long flags;
        unsigned long rate;
 
+       if (!clk)
+               return 0;
+
        spin_lock_irqsave(&clk_lock, flags);
        rate = clk->get_rate(clk);
        spin_unlock_irqrestore(&clk_lock, flags);
@@ -129,6 +138,9 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
 {
        unsigned long flags, actual_rate;
 
+       if (!clk)
+               return 0;
+
        if (!clk->set_rate)
                return -ENOSYS;
 
@@ -145,6 +157,9 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
        unsigned long flags;
        long ret;
 
+       if (!clk)
+               return 0;
+
        if (!clk->set_rate)
                return -ENOSYS;
 
@@ -161,6 +176,9 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
        unsigned long flags;
        int ret;
 
+       if (!clk)
+               return 0;
+
        if (!clk->set_parent)
                return -ENOSYS;
 
@@ -174,7 +192,7 @@ EXPORT_SYMBOL(clk_set_parent);
 
 struct clk *clk_get_parent(struct clk *clk)
 {
-       return clk->parent;
+       return !clk ? NULL : clk->parent;
 }
 EXPORT_SYMBOL(clk_get_parent);
 
index 33013df..5c68c85 100644 (file)
@@ -125,6 +125,13 @@ endif # M68KCLASSIC
 
 if COLDFIRE
 
+choice
+       prompt "ColdFire SoC type"
+       default M520x
+       help
+         Select the type of ColdFire System-on-Chip (SoC) that you want
+         to build for.
+
 config M5206
        bool "MCF5206"
        depends on !MMU
@@ -174,9 +181,6 @@ config M525x
        help
          Freescale (Motorola) Coldfire 5251/5253 processor support.
 
-config M527x
-       bool
-
 config M5271
        bool "MCF5271"
        depends on !MMU
@@ -223,9 +227,6 @@ config M5307
        help
          Motorola ColdFire 5307 processor support.
 
-config M53xx
-       bool
-
 config M532x
        bool "MCF532x"
        depends on !MMU
@@ -251,9 +252,6 @@ config M5407
        help
          Motorola ColdFire 5407 processor support.
 
-config M54xx
-       bool
-
 config M547x
        bool "MCF547x"
        select M54xx
@@ -280,6 +278,17 @@ config M5441x
        help
          Freescale Coldfire 54410/54415/54416/54417/54418 processor support.
 
+endchoice
+
+config M527x
+       bool
+
+config M53xx
+       bool
+
+config M54xx
+       bool
+
 endif # COLDFIRE
 
 
@@ -416,22 +425,10 @@ config HAVE_MBAR
 config HAVE_IPSBAR
        bool
 
-config CLOCK_SET
-       bool "Enable setting the CPU clock frequency"
-       depends on COLDFIRE
-       default n
-       help
-         On some CPU's you do not need to know what the core CPU clock
-         frequency is. On these you can disable clock setting. On some
-         traditional 68K parts, and on all ColdFire parts you need to set
-         the appropriate CPU clock frequency. On these devices many of the
-         onboard peripherals derive their timing from the master CPU clock
-         frequency.
-
 config CLOCK_FREQ
        int "Set the core clock frequency"
        default "66666666"
-       depends on CLOCK_SET
+       depends on COLDFIRE
        help
          Define the CPU clock frequency in use. This is the core clock
          frequency, it may or may not be the same as the external clock
index c94557b..50aa4da 100644 (file)
@@ -19,7 +19,7 @@
  *     in any case new boards come along from time to time that have yet
  *     another different clocking frequency.
  */
-#ifdef CONFIG_CLOCK_SET
+#ifdef CONFIG_CLOCK_FREQ
 #define        MCF_CLK         CONFIG_CLOCK_FREQ
 #else
 #error "Don't know what your ColdFire CPU clock frequency is??"
index db1f416..9b95c9b 100644 (file)
@@ -1417,6 +1417,7 @@ config CPU_MIPS64_R6
        select CPU_SUPPORTS_HIGHMEM
        select CPU_SUPPORTS_MSA
        select GENERIC_CSUM
+       select MIPS_O32_FP64_SUPPORT if MIPS32_O32
        help
          Choose this option to build a kernel for release 6 or later of the
          MIPS64 architecture.  New MIPS processors, starting with the Warrior
index 7fc8397..fd2a36a 100644 (file)
@@ -186,6 +186,7 @@ int get_c0_perfcount_int(void)
 {
        return ATH79_MISC_IRQ(5);
 }
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
 
 unsigned int get_c0_compare_int(void)
 {
index 084780b..1b06251 100644 (file)
@@ -74,7 +74,7 @@ static inline int __enable_fpu(enum fpu_mode mode)
                goto fr_common;
 
        case FPU_64BIT:
-#if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6) \
+#if !(defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) \
       || defined(CONFIG_64BIT))
                /* we only have a 32-bit FPU */
                return SIGFPE;
diff --git a/kernel/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h b/kernel/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h
deleted file mode 100644 (file)
index 11d3b57..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef __ASM_MACH_BCM63XX_DMA_COHERENCE_H
-#define __ASM_MACH_BCM63XX_DMA_COHERENCE_H
-
-#include <asm/bmips.h>
-
-#define plat_post_dma_flush    bmips_post_dma_flush
-
-#include <asm/mach-generic/dma-coherence.h>
-
-#endif /* __ASM_MACH_BCM63XX_DMA_COHERENCE_H */
index 819af9d..70f6e7f 100644 (file)
@@ -182,8 +182,39 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
                 * Make sure the buddy is global too (if it's !none,
                 * it better already be global)
                 */
+#ifdef CONFIG_SMP
+               /*
+                * For SMP, multiple CPUs can race, so we need to do
+                * this atomically.
+                */
+#ifdef CONFIG_64BIT
+#define LL_INSN "lld"
+#define SC_INSN "scd"
+#else /* CONFIG_32BIT */
+#define LL_INSN "ll"
+#define SC_INSN "sc"
+#endif
+               unsigned long page_global = _PAGE_GLOBAL;
+               unsigned long tmp;
+
+               __asm__ __volatile__ (
+                       "       .set    push\n"
+                       "       .set    noreorder\n"
+                       "1:     " LL_INSN "     %[tmp], %[buddy]\n"
+                       "       bnez    %[tmp], 2f\n"
+                       "        or     %[tmp], %[tmp], %[global]\n"
+                       "       " SC_INSN "     %[tmp], %[buddy]\n"
+                       "       beqz    %[tmp], 1b\n"
+                       "        nop\n"
+                       "2:\n"
+                       "       .set pop"
+                       : [buddy] "+m" (buddy->pte),
+                         [tmp] "=&r" (tmp)
+                       : [global] "r" (page_global));
+#else /* !CONFIG_SMP */
                if (pte_none(*buddy))
                        pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
+#endif /* CONFIG_SMP */
        }
 #endif
 }
index 2b25d1b..16f1ea9 100644 (file)
@@ -23,6 +23,7 @@
 extern int smp_num_siblings;
 extern cpumask_t cpu_sibling_map[];
 extern cpumask_t cpu_core_map[];
+extern cpumask_t cpu_foreign_map;
 
 #define raw_smp_processor_id() (current_thread_info()->cpu)
 
index 28d6d93..a71da57 100644 (file)
                .set    noreorder
                bltz    k0, 8f
                 move   k1, sp
+#ifdef CONFIG_EVA
+               /*
+                * Flush interAptiv's Return Prediction Stack (RPS) by writing
+                * EntryHi. Toggling Config7.RPS is slower and less portable.
+                *
+                * The RPS isn't automatically flushed when exceptions are
+                * taken, which can result in kernel mode speculative accesses
+                * to user addresses if the RPS mispredicts. That's harmless
+                * when user and kernel share the same address space, but with
+                * EVA the same user segments may be unmapped to kernel mode,
+                * even containing sensitive MMIO regions or invalid memory.
+                *
+                * This can happen when the kernel sets the return address to
+                * ret_from_* and jr's to the exception handler, which looks
+                * more like a tail call than a function call. If nested calls
+                * don't evict the last user address in the RPS, it will
+                * mispredict the return and fetch from a user controlled
+                * address into the icache.
+                *
+                * More recent EVA-capable cores with MAAR to restrict
+                * speculative accesses aren't affected.
+                */
+               MFC0    k0, CP0_ENTRYHI
+               MTC0    k0, CP0_ENTRYHI
+#endif
                .set    reorder
                /* Called from user mode, new stack. */
                get_saved_sp
index 3e4491a..789d7bf 100644 (file)
@@ -154,7 +154,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
                                      unsigned long __user *user_mask_ptr)
 {
        unsigned int real_len;
-       cpumask_t mask;
+       cpumask_t allowed, mask;
        int retval;
        struct task_struct *p;
 
@@ -173,7 +173,8 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
        if (retval)
                goto out_unlock;
 
-       cpumask_and(&mask, &p->thread.user_cpus_allowed, cpu_possible_mask);
+       cpumask_or(&allowed, &p->thread.user_cpus_allowed, &p->cpus_allowed);
+       cpumask_and(&mask, &allowed, cpu_active_mask);
 
 out_unlock:
        read_unlock(&tasklist_lock);
index 74bab9d..c6bbf21 100644 (file)
@@ -24,7 +24,7 @@ LEAF(relocate_new_kernel)
 
 process_entry:
        PTR_L           s2, (s0)
-       PTR_ADD         s0, s0, SZREG
+       PTR_ADDIU       s0, s0, SZREG
 
        /*
         * In case of a kdump/crash kernel, the indirection page is not
@@ -61,9 +61,9 @@ copy_word:
        /* copy page word by word */
        REG_L           s5, (s2)
        REG_S           s5, (s4)
-       PTR_ADD         s4, s4, SZREG
-       PTR_ADD         s2, s2, SZREG
-       LONG_SUB        s6, s6, 1
+       PTR_ADDIU       s4, s4, SZREG
+       PTR_ADDIU       s2, s2, SZREG
+       LONG_ADDIU      s6, s6, -1
        beq             s6, zero, process_entry
        b               copy_word
        b               process_entry
index ad4d446..a6f6b76 100644 (file)
@@ -80,7 +80,7 @@ syscall_trace_entry:
        SAVE_STATIC
        move    s0, t2
        move    a0, sp
-       daddiu  a1, v0, __NR_64_Linux
+       move    a1, v0
        jal     syscall_trace_enter
 
        bltz    v0, 2f                  # seccomp failed? Skip syscall
index 446cc65..4b20106 100644 (file)
@@ -72,7 +72,7 @@ n32_syscall_trace_entry:
        SAVE_STATIC
        move    s0, t2
        move    a0, sp
-       daddiu  a1, v0, __NR_N32_Linux
+       move    a1, v0
        jal     syscall_trace_enter
 
        bltz    v0, 2f                  # seccomp failed? Skip syscall
index 19a7705..5d7f263 100644 (file)
@@ -409,8 +409,6 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
 
 int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
 {
-       memset(to, 0, sizeof *to);
-
        if (copy_from_user(to, from, 3*sizeof(int)) ||
            copy_from_user(to->_sifields._pad,
                           from->_sifields._pad, SI_PAD_SIZE32))
index faa46eb..d0744cc 100644 (file)
@@ -63,6 +63,13 @@ EXPORT_SYMBOL(cpu_sibling_map);
 cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
 EXPORT_SYMBOL(cpu_core_map);
 
+/*
+ * A logcal cpu mask containing only one VPE per core to
+ * reduce the number of IPIs on large MT systems.
+ */
+cpumask_t cpu_foreign_map __read_mostly;
+EXPORT_SYMBOL(cpu_foreign_map);
+
 /* representing cpus for which sibling maps can be computed */
 static cpumask_t cpu_sibling_setup_map;
 
@@ -103,6 +110,29 @@ static inline void set_cpu_core_map(int cpu)
        }
 }
 
+/*
+ * Calculate a new cpu_foreign_map mask whenever a
+ * new cpu appears or disappears.
+ */
+static inline void calculate_cpu_foreign_map(void)
+{
+       int i, k, core_present;
+       cpumask_t temp_foreign_map;
+
+       /* Re-calculate the mask */
+       for_each_online_cpu(i) {
+               core_present = 0;
+               for_each_cpu(k, &temp_foreign_map)
+                       if (cpu_data[i].package == cpu_data[k].package &&
+                           cpu_data[i].core == cpu_data[k].core)
+                               core_present = 1;
+               if (!core_present)
+                       cpumask_set_cpu(i, &temp_foreign_map);
+       }
+
+       cpumask_copy(&cpu_foreign_map, &temp_foreign_map);
+}
+
 struct plat_smp_ops *mp_ops;
 EXPORT_SYMBOL(mp_ops);
 
@@ -146,6 +176,8 @@ asmlinkage void start_secondary(void)
        set_cpu_sibling_map(cpu);
        set_cpu_core_map(cpu);
 
+       calculate_cpu_foreign_map();
+
        cpumask_set_cpu(cpu, &cpu_callin_map);
 
        synchronise_count_slave(cpu);
@@ -173,9 +205,18 @@ void __irq_entry smp_call_function_interrupt(void)
 static void stop_this_cpu(void *dummy)
 {
        /*
-        * Remove this CPU:
+        * Remove this CPU. Be a bit slow here and
+        * set the bits for every online CPU so we don't miss
+        * any IPI whilst taking this VPE down.
         */
+
+       cpumask_copy(&cpu_foreign_map, cpu_online_mask);
+
+       /* Make it visible to every other CPU */
+       smp_mb();
+
        set_cpu_online(smp_processor_id(), false);
+       calculate_cpu_foreign_map();
        local_irq_disable();
        while (1);
 }
@@ -197,6 +238,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
        mp_ops->prepare_cpus(max_cpus);
        set_cpu_sibling_map(0);
        set_cpu_core_map(0);
+       calculate_cpu_foreign_map();
 #ifndef CONFIG_HOTPLUG_CPU
        init_cpu_present(cpu_possible_mask);
 #endif
index d2d1c19..5f5f44e 100644 (file)
@@ -192,6 +192,7 @@ static void show_stacktrace(struct task_struct *task,
 void show_stack(struct task_struct *task, unsigned long *sp)
 {
        struct pt_regs regs;
+       mm_segment_t old_fs = get_fs();
        if (sp) {
                regs.regs[29] = (unsigned long)sp;
                regs.regs[31] = 0;
@@ -210,7 +211,13 @@ void show_stack(struct task_struct *task, unsigned long *sp)
                        prepare_frametrace(&regs);
                }
        }
+       /*
+        * show_stack() deals exclusively with kernel mode, so be sure to access
+        * the stack in the kernel (not user) address space.
+        */
+       set_fs(KERNEL_DS);
        show_stacktrace(task, &regs);
+       set_fs(old_fs);
 }
 
 static void show_code(unsigned int __user *pc)
@@ -1518,6 +1525,7 @@ asmlinkage void do_mcheck(struct pt_regs *regs)
        const int field = 2 * sizeof(unsigned long);
        int multi_match = regs->cp0_status & ST0_TS;
        enum ctx_state prev_state;
+       mm_segment_t old_fs = get_fs();
 
        prev_state = exception_enter();
        show_regs(regs);
@@ -1539,8 +1547,13 @@ asmlinkage void do_mcheck(struct pt_regs *regs)
                dump_tlb_all();
        }
 
+       if (!user_mode(regs))
+               set_fs(KERNEL_DS);
+
        show_code((unsigned int __user *) regs->cp0_epc);
 
+       set_fs(old_fs);
+
        /*
         * Some chips may have other causes of machine check (e.g. SB1
         * graduation timer)
index af84bef..eb3efd1 100644 (file)
@@ -438,7 +438,7 @@ do {                                                        \
                : "memory");                                \
 } while(0)
 
-#define     StoreDW(addr, value, res) \
+#define     _StoreDW(addr, value, res) \
 do {                                                        \
                __asm__ __volatile__ (                      \
                        ".set\tpush\n\t"                    \
index 6ab1057..d01ade6 100644 (file)
@@ -466,6 +466,7 @@ int get_c0_perfcount_int(void)
 {
        return ltq_perfcount_irq;
 }
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
 
 unsigned int get_c0_compare_int(void)
 {
index 22b9b2c..6983fcd 100644 (file)
@@ -451,7 +451,7 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
                        /* Fall through */
                case jr_op:
                        /* For R6, JR already emulated in jalr_op */
-                       if (NO_R6EMU && insn.r_format.opcode == jr_op)
+                       if (NO_R6EMU && insn.r_format.func == jr_op)
                                break;
                        *contpc = regs->regs[insn.r_format.rs];
                        return 1;
index 2e03ab1..dca0efc 100644 (file)
@@ -37,6 +37,7 @@
 #include <asm/cacheflush.h> /* for run_uncached() */
 #include <asm/traps.h>
 #include <asm/dma-coherence.h>
+#include <asm/mips-cm.h>
 
 /*
  * Special Variant of smp_call_function for use by cache functions:
@@ -51,9 +52,16 @@ static inline void r4k_on_each_cpu(void (*func) (void *info), void *info)
 {
        preempt_disable();
 
-#ifndef CONFIG_MIPS_MT_SMP
-       smp_call_function(func, info, 1);
-#endif
+       /*
+        * The Coherent Manager propagates address-based cache ops to other
+        * cores but not index-based ops. However, r4k_on_each_cpu is used
+        * in both cases so there is no easy way to tell what kind of op is
+        * executed to the other cores. The best we can probably do is
+        * to restrict that call when a CM is not present because both
+        * CM-based SMP protocols (CMP & CPS) restrict index-based cache ops.
+        */
+       if (!mips_cm_present())
+               smp_call_function_many(&cpu_foreign_map, func, info, 1);
        func(info);
        preempt_enable();
 }
index 185e682..a7f7d9f 100644 (file)
@@ -148,6 +148,7 @@ int get_c0_perfcount_int(void)
 
        return mips_cpu_perf_irq;
 }
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
 
 unsigned int get_c0_compare_int(void)
 {
@@ -165,14 +166,17 @@ unsigned int get_c0_compare_int(void)
 
 static void __init init_rtc(void)
 {
-       /* stop the clock whilst setting it up */
-       CMOS_WRITE(RTC_SET | RTC_24H, RTC_CONTROL);
+       unsigned char freq, ctrl;
 
-       /* 32KHz time base */
-       CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_FREQ_SELECT);
+       /* Set 32KHz time base if not already set */
+       freq = CMOS_READ(RTC_FREQ_SELECT);
+       if ((freq & RTC_DIV_CTL) != RTC_REF_CLCK_32KHZ)
+               CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_FREQ_SELECT);
 
-       /* start the clock */
-       CMOS_WRITE(RTC_24H, RTC_CONTROL);
+       /* Ensure SET bit is clear so RTC can run */
+       ctrl = CMOS_READ(RTC_CONTROL);
+       if (ctrl & RTC_SET)
+               CMOS_WRITE(ctrl & ~RTC_SET, RTC_CONTROL);
 }
 
 void __init plat_time_init(void)
index e1d6989..a120b7a 100644 (file)
@@ -77,6 +77,7 @@ int get_c0_perfcount_int(void)
                return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
        return -1;
 }
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
 
 unsigned int get_c0_compare_int(void)
 {
index 67889fc..ab73f6f 100644 (file)
@@ -26,6 +26,7 @@ int get_c0_perfcount_int(void)
 {
        return gic_get_c0_perfcount_int();
 }
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
 
 void __init plat_time_init(void)
 {
index 7cf91b9..199ace4 100644 (file)
@@ -89,6 +89,7 @@ int get_c0_perfcount_int(void)
 {
        return rt_perfcount_irq;
 }
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
 
 unsigned int get_c0_compare_int(void)
 {
index e5a693b..443f44d 100644 (file)
@@ -17,6 +17,7 @@ config OPENRISC
        select GENERIC_IRQ_SHOW
        select GENERIC_IOMAP
        select GENERIC_CPU_DEVICES
+       select HAVE_UID16
        select GENERIC_ATOMIC64
        select GENERIC_CLOCKEVENTS
        select GENERIC_STRNCPY_FROM_USER
@@ -31,9 +32,6 @@ config MMU
 config HAVE_DMA_ATTRS
        def_bool y
 
-config UID16
-       def_bool y
-
 config RWSEM_GENERIC_SPINLOCK
        def_bool y
 
index 3a08eae..3edbb9f 100644 (file)
@@ -72,7 +72,7 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
 
 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
 {
-       if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
+       if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED) {
                /*
                 * This is the permanent pmd attached to the pgd;
                 * cannot free it.
@@ -81,6 +81,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
                 */
                mm_inc_nr_pmds(mm);
                return;
+       }
        free_pages((unsigned long)pmd, PMD_ORDER);
 }
 
index 0a18375..f93c4a4 100644 (file)
@@ -16,7 +16,7 @@
 #include <asm/processor.h>
 #include <asm/cache.h>
 
-extern spinlock_t pa_dbit_lock;
+extern spinlock_t pa_tlb_lock;
 
 /*
  * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
@@ -33,6 +33,19 @@ extern spinlock_t pa_dbit_lock;
  */
 #define kern_addr_valid(addr)  (1)
 
+/* Purge data and instruction TLB entries.  Must be called holding
+ * the pa_tlb_lock.  The TLB purge instructions are slow on SMP
+ * machines since the purge must be broadcast to all CPUs.
+ */
+
+static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
+{
+       mtsp(mm->context, 1);
+       pdtlb(addr);
+       if (unlikely(split_tlb))
+               pitlb(addr);
+}
+
 /* Certain architectures need to do special things when PTEs
  * within a page table are directly modified.  Thus, the following
  * hook is made available.
@@ -42,15 +55,20 @@ extern spinlock_t pa_dbit_lock;
                 *(pteptr) = (pteval);                           \
         } while(0)
 
-extern void purge_tlb_entries(struct mm_struct *, unsigned long);
+#define pte_inserted(x)                                                \
+       ((pte_val(x) & (_PAGE_PRESENT|_PAGE_ACCESSED))          \
+        == (_PAGE_PRESENT|_PAGE_ACCESSED))
 
-#define set_pte_at(mm, addr, ptep, pteval)                      \
-       do {                                                    \
+#define set_pte_at(mm, addr, ptep, pteval)                     \
+       do {                                                    \
+               pte_t old_pte;                                  \
                unsigned long flags;                            \
-               spin_lock_irqsave(&pa_dbit_lock, flags);        \
-               set_pte(ptep, pteval);                          \
-               purge_tlb_entries(mm, addr);                    \
-               spin_unlock_irqrestore(&pa_dbit_lock, flags);   \
+               spin_lock_irqsave(&pa_tlb_lock, flags);         \
+               old_pte = *ptep;                                \
+               set_pte(ptep, pteval);                          \
+               if (pte_inserted(old_pte))                      \
+                       purge_tlb_entries(mm, addr);            \
+               spin_unlock_irqrestore(&pa_tlb_lock, flags);    \
        } while (0)
 
 #endif /* !__ASSEMBLY__ */
@@ -268,7 +286,7 @@ extern unsigned long *empty_zero_page;
 
 #define pte_none(x)     (pte_val(x) == 0)
 #define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
-#define pte_clear(mm,addr,xp)  do { pte_val(*(xp)) = 0; } while (0)
+#define pte_clear(mm, addr, xp)  set_pte_at(mm, addr, xp, __pte(0))
 
 #define pmd_flag(x)    (pmd_val(x) & PxD_FLAG_MASK)
 #define pmd_address(x) ((unsigned long)(pmd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
@@ -435,15 +453,15 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned
        if (!pte_young(*ptep))
                return 0;
 
-       spin_lock_irqsave(&pa_dbit_lock, flags);
+       spin_lock_irqsave(&pa_tlb_lock, flags);
        pte = *ptep;
        if (!pte_young(pte)) {
-               spin_unlock_irqrestore(&pa_dbit_lock, flags);
+               spin_unlock_irqrestore(&pa_tlb_lock, flags);
                return 0;
        }
        set_pte(ptep, pte_mkold(pte));
        purge_tlb_entries(vma->vm_mm, addr);
-       spin_unlock_irqrestore(&pa_dbit_lock, flags);
+       spin_unlock_irqrestore(&pa_tlb_lock, flags);
        return 1;
 }
 
@@ -453,11 +471,12 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
        pte_t old_pte;
        unsigned long flags;
 
-       spin_lock_irqsave(&pa_dbit_lock, flags);
+       spin_lock_irqsave(&pa_tlb_lock, flags);
        old_pte = *ptep;
-       pte_clear(mm,addr,ptep);
-       purge_tlb_entries(mm, addr);
-       spin_unlock_irqrestore(&pa_dbit_lock, flags);
+       set_pte(ptep, __pte(0));
+       if (pte_inserted(old_pte))
+               purge_tlb_entries(mm, addr);
+       spin_unlock_irqrestore(&pa_tlb_lock, flags);
 
        return old_pte;
 }
@@ -465,10 +484,10 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 {
        unsigned long flags;
-       spin_lock_irqsave(&pa_dbit_lock, flags);
+       spin_lock_irqsave(&pa_tlb_lock, flags);
        set_pte(ptep, pte_wrprotect(*ptep));
        purge_tlb_entries(mm, addr);
-       spin_unlock_irqrestore(&pa_dbit_lock, flags);
+       spin_unlock_irqrestore(&pa_tlb_lock, flags);
 }
 
 #define pte_same(A,B)  (pte_val(A) == pte_val(B))
index 9d086a5..e84b964 100644 (file)
@@ -13,6 +13,9 @@
  * active at any one time on the Merced bus.  This tlb purge
  * synchronisation is fairly lightweight and harmless so we activate
  * it on all systems not just the N class.
+
+ * It is also used to ensure PTE updates are atomic and consistent
+ * with the TLB.
  */
 extern spinlock_t pa_tlb_lock;
 
@@ -24,20 +27,24 @@ extern void flush_tlb_all_local(void *);
 
 #define smp_flush_tlb_all()    flush_tlb_all()
 
+int __flush_tlb_range(unsigned long sid,
+       unsigned long start, unsigned long end);
+
+#define flush_tlb_range(vma, start, end) \
+       __flush_tlb_range((vma)->vm_mm->context, start, end)
+
+#define flush_tlb_kernel_range(start, end) \
+       __flush_tlb_range(0, start, end)
+
 /*
  * flush_tlb_mm()
  *
- * XXX This code is NOT valid for HP-UX compatibility processes,
- * (although it will probably work 99% of the time). HP-UX
- * processes are free to play with the space id's and save them
- * over long periods of time, etc. so we have to preserve the
- * space and just flush the entire tlb. We need to check the
- * personality in order to do that, but the personality is not
- * currently being set correctly.
- *
- * Of course, Linux processes could do the same thing, but
- * we don't support that (and the compilers, dynamic linker,
- * etc. do not do that).
+ * The code to switch to a new context is NOT valid for processes
+ * which play with the space id's.  Thus, we have to preserve the
+ * space and just flush the entire tlb.  However, the compilers,
+ * dynamic linker, etc, do not manipulate space id's, so there
+ * could be a significant performance benefit in switching contexts
+ * and not flushing the whole tlb.
  */
 
 static inline void flush_tlb_mm(struct mm_struct *mm)
@@ -45,10 +52,18 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
        BUG_ON(mm == &init_mm); /* Should never happen */
 
 #if 1 || defined(CONFIG_SMP)
+       /* Except for very small threads, flushing the whole TLB is
+        * faster than using __flush_tlb_range.  The pdtlb and pitlb
+        * instructions are very slow because of the TLB broadcast.
+        * It might be faster to do local range flushes on all CPUs
+        * on PA 2.0 systems.
+        */
        flush_tlb_all();
 #else
        /* FIXME: currently broken, causing space id and protection ids
-        *  to go out of sync, resulting in faults on userspace accesses.
+        * to go out of sync, resulting in faults on userspace accesses.
+        * This approach needs further investigation since running many
+        * small applications (e.g., GCC testsuite) is faster on HP-UX.
         */
        if (mm) {
                if (mm->context != 0)
@@ -65,22 +80,12 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
 {
        unsigned long flags, sid;
 
-       /* For one page, it's not worth testing the split_tlb variable */
-
-       mb();
        sid = vma->vm_mm->context;
        purge_tlb_start(flags);
        mtsp(sid, 1);
        pdtlb(addr);
-       pitlb(addr);
+       if (unlikely(split_tlb))
+               pitlb(addr);
        purge_tlb_end(flags);
 }
-
-void __flush_tlb_range(unsigned long sid,
-       unsigned long start, unsigned long end);
-
-#define flush_tlb_range(vma,start,end) __flush_tlb_range((vma)->vm_mm->context,start,end)
-
-#define flush_tlb_kernel_range(start, end) __flush_tlb_range(0,start,end)
-
 #endif
index f6448c7..cda6dbb 100644 (file)
@@ -342,12 +342,15 @@ EXPORT_SYMBOL(flush_data_cache_local);
 EXPORT_SYMBOL(flush_kernel_icache_range_asm);
 
 #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
-int parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;
+static unsigned long parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;
+
+#define FLUSH_TLB_THRESHOLD (2*1024*1024) /* 2MB initial TLB threshold */
+static unsigned long parisc_tlb_flush_threshold __read_mostly = FLUSH_TLB_THRESHOLD;
 
 void __init parisc_setup_cache_timing(void)
 {
        unsigned long rangetime, alltime;
-       unsigned long size;
+       unsigned long size, start;
 
        alltime = mfctl(16);
        flush_data_cache();
@@ -364,14 +367,43 @@ void __init parisc_setup_cache_timing(void)
        /* Racy, but if we see an intermediate value, it's ok too... */
        parisc_cache_flush_threshold = size * alltime / rangetime;
 
-       parisc_cache_flush_threshold = (parisc_cache_flush_threshold + L1_CACHE_BYTES - 1) &~ (L1_CACHE_BYTES - 1); 
+       parisc_cache_flush_threshold = L1_CACHE_ALIGN(parisc_cache_flush_threshold);
        if (!parisc_cache_flush_threshold)
                parisc_cache_flush_threshold = FLUSH_THRESHOLD;
 
        if (parisc_cache_flush_threshold > cache_info.dc_size)
                parisc_cache_flush_threshold = cache_info.dc_size;
 
-       printk(KERN_INFO "Setting cache flush threshold to %x (%d CPUs online)\n", parisc_cache_flush_threshold, num_online_cpus());
+       printk(KERN_INFO "Setting cache flush threshold to %lu kB\n",
+               parisc_cache_flush_threshold/1024);
+
+       /* calculate TLB flush threshold */
+
+       alltime = mfctl(16);
+       flush_tlb_all();
+       alltime = mfctl(16) - alltime;
+
+       size = PAGE_SIZE;
+       start = (unsigned long) _text;
+       rangetime = mfctl(16);
+       while (start < (unsigned long) _end) {
+               flush_tlb_kernel_range(start, start + PAGE_SIZE);
+               start += PAGE_SIZE;
+               size += PAGE_SIZE;
+       }
+       rangetime = mfctl(16) - rangetime;
+
+       printk(KERN_DEBUG "Whole TLB flush %lu cycles, flushing %lu bytes %lu cycles\n",
+               alltime, size, rangetime);
+
+       parisc_tlb_flush_threshold = size * alltime / rangetime;
+       parisc_tlb_flush_threshold *= num_online_cpus();
+       parisc_tlb_flush_threshold = PAGE_ALIGN(parisc_tlb_flush_threshold);
+       if (!parisc_tlb_flush_threshold)
+               parisc_tlb_flush_threshold = FLUSH_TLB_THRESHOLD;
+
+       printk(KERN_INFO "Setting TLB flush threshold to %lu kB\n",
+               parisc_tlb_flush_threshold/1024);
 }
 
 extern void purge_kernel_dcache_page_asm(unsigned long);
@@ -403,48 +435,45 @@ void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
 }
 EXPORT_SYMBOL(copy_user_page);
 
-void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
-{
-       unsigned long flags;
-
-       /* Note: purge_tlb_entries can be called at startup with
-          no context.  */
-
-       purge_tlb_start(flags);
-       mtsp(mm->context, 1);
-       pdtlb(addr);
-       pitlb(addr);
-       purge_tlb_end(flags);
-}
-EXPORT_SYMBOL(purge_tlb_entries);
-
-void __flush_tlb_range(unsigned long sid, unsigned long start,
-                      unsigned long end)
+/* __flush_tlb_range()
+ *
+ * returns 1 if all TLBs were flushed.
+ */
+int __flush_tlb_range(unsigned long sid, unsigned long start,
+                     unsigned long end)
 {
-       unsigned long npages;
+       unsigned long flags, size;
 
-       npages = ((end - (start & PAGE_MASK)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
-       if (npages >= 512)  /* 2MB of space: arbitrary, should be tuned */
+       size = (end - start);
+       if (size >= parisc_tlb_flush_threshold) {
                flush_tlb_all();
-       else {
-               unsigned long flags;
+               return 1;
+       }
 
+       /* Purge TLB entries for small ranges using the pdtlb and
+          pitlb instructions.  These instructions execute locally
+          but cause a purge request to be broadcast to other TLBs.  */
+       if (likely(!split_tlb)) {
+               while (start < end) {
+                       purge_tlb_start(flags);
+                       mtsp(sid, 1);
+                       pdtlb(start);
+                       purge_tlb_end(flags);
+                       start += PAGE_SIZE;
+               }
+               return 0;
+       }
+
+       /* split TLB case */
+       while (start < end) {
                purge_tlb_start(flags);
                mtsp(sid, 1);
-               if (split_tlb) {
-                       while (npages--) {
-                               pdtlb(start);
-                               pitlb(start);
-                               start += PAGE_SIZE;
-                       }
-               } else {
-                       while (npages--) {
-                               pdtlb(start);
-                               start += PAGE_SIZE;
-                       }
-               }
+               pdtlb(start);
+               pitlb(start);
                purge_tlb_end(flags);
+               start += PAGE_SIZE;
        }
+       return 0;
 }
 
 static void cacheflush_h_tmp_function(void *dummy)
index 7581961..c5ef408 100644 (file)
@@ -45,7 +45,7 @@
        .level 2.0
 #endif
 
-       .import         pa_dbit_lock,data
+       .import         pa_tlb_lock,data
 
        /* space_to_prot macro creates a prot id from a space id */
 
        SHLREG          %r9,PxD_VALUE_SHIFT,\pmd
        extru           \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
        dep             %r0,31,PAGE_SHIFT,\pmd  /* clear offset */
-       shladd          \index,BITS_PER_PTE_ENTRY,\pmd,\pmd
-       LDREG           %r0(\pmd),\pte          /* pmd is now pte */
+       shladd          \index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */
+       LDREG           %r0(\pmd),\pte
        bb,>=,n         \pte,_PAGE_PRESENT_BIT,\fault
        .endm
 
        L2_ptep         \pgd,\pte,\index,\va,\fault
        .endm
 
-       /* Acquire pa_dbit_lock lock. */
-       .macro          dbit_lock       spc,tmp,tmp1
+       /* Acquire pa_tlb_lock lock and recheck page is still present. */
+       .macro          tlb_lock        spc,ptp,pte,tmp,tmp1,fault
 #ifdef CONFIG_SMP
        cmpib,COND(=),n 0,\spc,2f
-       load32          PA(pa_dbit_lock),\tmp
+       load32          PA(pa_tlb_lock),\tmp
 1:     LDCW            0(\tmp),\tmp1
        cmpib,COND(=)   0,\tmp1,1b
        nop
+       LDREG           0(\ptp),\pte
+       bb,<,n          \pte,_PAGE_PRESENT_BIT,2f
+       b               \fault
+       stw              \spc,0(\tmp)
 2:
 #endif
        .endm
 
-       /* Release pa_dbit_lock lock without reloading lock address. */
-       .macro          dbit_unlock0    spc,tmp
+       /* Release pa_tlb_lock lock without reloading lock address. */
+       .macro          tlb_unlock0     spc,tmp
 #ifdef CONFIG_SMP
        or,COND(=)      %r0,\spc,%r0
        stw             \spc,0(\tmp)
 #endif
        .endm
 
-       /* Release pa_dbit_lock lock. */
-       .macro          dbit_unlock1    spc,tmp
+       /* Release pa_tlb_lock lock. */
+       .macro          tlb_unlock1     spc,tmp
 #ifdef CONFIG_SMP
-       load32          PA(pa_dbit_lock),\tmp
-       dbit_unlock0    \spc,\tmp
+       load32          PA(pa_tlb_lock),\tmp
+       tlb_unlock0     \spc,\tmp
 #endif
        .endm
 
        /* Set the _PAGE_ACCESSED bit of the PTE.  Be clever and
         * don't needlessly dirty the cache line if it was already set */
-       .macro          update_ptep     spc,ptep,pte,tmp,tmp1
-#ifdef CONFIG_SMP
-       or,COND(=)      %r0,\spc,%r0
-       LDREG           0(\ptep),\pte
-#endif
+       .macro          update_accessed ptp,pte,tmp,tmp1
        ldi             _PAGE_ACCESSED,\tmp1
        or              \tmp1,\pte,\tmp
        and,COND(<>)    \tmp1,\pte,%r0
-       STREG           \tmp,0(\ptep)
+       STREG           \tmp,0(\ptp)
        .endm
 
        /* Set the dirty bit (and accessed bit).  No need to be
         * clever, this is only used from the dirty fault */
-       .macro          update_dirty    spc,ptep,pte,tmp
-#ifdef CONFIG_SMP
-       or,COND(=)      %r0,\spc,%r0
-       LDREG           0(\ptep),\pte
-#endif
+       .macro          update_dirty    ptp,pte,tmp
        ldi             _PAGE_ACCESSED|_PAGE_DIRTY,\tmp
        or              \tmp,\pte,\pte
-       STREG           \pte,0(\ptep)
+       STREG           \pte,0(\ptp)
        .endm
 
        /* bitshift difference between a PFN (based on kernel's PAGE_SIZE)
@@ -1148,14 +1144,14 @@ dtlb_miss_20w:
 
        L3_ptep         ptp,pte,t0,va,dtlb_check_alias_20w
 
-       dbit_lock       spc,t0,t1
-       update_ptep     spc,ptp,pte,t0,t1
+       tlb_lock        spc,ptp,pte,t0,t1,dtlb_check_alias_20w
+       update_accessed ptp,pte,t0,t1
 
        make_insert_tlb spc,pte,prot
        
        idtlbt          pte,prot
-       dbit_unlock1    spc,t0
 
+       tlb_unlock1     spc,t0
        rfir
        nop
 
@@ -1174,14 +1170,14 @@ nadtlb_miss_20w:
 
        L3_ptep         ptp,pte,t0,va,nadtlb_check_alias_20w
 
-       dbit_lock       spc,t0,t1
-       update_ptep     spc,ptp,pte,t0,t1
+       tlb_lock        spc,ptp,pte,t0,t1,nadtlb_check_alias_20w
+       update_accessed ptp,pte,t0,t1
 
        make_insert_tlb spc,pte,prot
 
        idtlbt          pte,prot
-       dbit_unlock1    spc,t0
 
+       tlb_unlock1     spc,t0
        rfir
        nop
 
@@ -1202,20 +1198,20 @@ dtlb_miss_11:
 
        L2_ptep         ptp,pte,t0,va,dtlb_check_alias_11
 
-       dbit_lock       spc,t0,t1
-       update_ptep     spc,ptp,pte,t0,t1
+       tlb_lock        spc,ptp,pte,t0,t1,dtlb_check_alias_11
+       update_accessed ptp,pte,t0,t1
 
        make_insert_tlb_11      spc,pte,prot
 
-       mfsp            %sr1,t0  /* Save sr1 so we can use it in tlb inserts */
+       mfsp            %sr1,t1  /* Save sr1 so we can use it in tlb inserts */
        mtsp            spc,%sr1
 
        idtlba          pte,(%sr1,va)
        idtlbp          prot,(%sr1,va)
 
-       mtsp            t0, %sr1        /* Restore sr1 */
-       dbit_unlock1    spc,t0
+       mtsp            t1, %sr1        /* Restore sr1 */
 
+       tlb_unlock1     spc,t0
        rfir
        nop
 
@@ -1235,21 +1231,20 @@ nadtlb_miss_11:
 
        L2_ptep         ptp,pte,t0,va,nadtlb_check_alias_11
 
-       dbit_lock       spc,t0,t1
-       update_ptep     spc,ptp,pte,t0,t1
+       tlb_lock        spc,ptp,pte,t0,t1,nadtlb_check_alias_11
+       update_accessed ptp,pte,t0,t1
 
        make_insert_tlb_11      spc,pte,prot
 
-
-       mfsp            %sr1,t0  /* Save sr1 so we can use it in tlb inserts */
+       mfsp            %sr1,t1  /* Save sr1 so we can use it in tlb inserts */
        mtsp            spc,%sr1
 
        idtlba          pte,(%sr1,va)
        idtlbp          prot,(%sr1,va)
 
-       mtsp            t0, %sr1        /* Restore sr1 */
-       dbit_unlock1    spc,t0
+       mtsp            t1, %sr1        /* Restore sr1 */
 
+       tlb_unlock1     spc,t0
        rfir
        nop
 
@@ -1269,16 +1264,16 @@ dtlb_miss_20:
 
        L2_ptep         ptp,pte,t0,va,dtlb_check_alias_20
 
-       dbit_lock       spc,t0,t1
-       update_ptep     spc,ptp,pte,t0,t1
+       tlb_lock        spc,ptp,pte,t0,t1,dtlb_check_alias_20
+       update_accessed ptp,pte,t0,t1
 
        make_insert_tlb spc,pte,prot
 
-       f_extend        pte,t0
+       f_extend        pte,t1
 
        idtlbt          pte,prot
-       dbit_unlock1    spc,t0
 
+       tlb_unlock1     spc,t0
        rfir
        nop
 
@@ -1297,16 +1292,16 @@ nadtlb_miss_20:
 
        L2_ptep         ptp,pte,t0,va,nadtlb_check_alias_20
 
-       dbit_lock       spc,t0,t1
-       update_ptep     spc,ptp,pte,t0,t1
+       tlb_lock        spc,ptp,pte,t0,t1,nadtlb_check_alias_20
+       update_accessed ptp,pte,t0,t1
 
        make_insert_tlb spc,pte,prot
 
-       f_extend        pte,t0
+       f_extend        pte,t1
        
-        idtlbt          pte,prot
-       dbit_unlock1    spc,t0
+       idtlbt          pte,prot
 
+       tlb_unlock1     spc,t0
        rfir
        nop
 
@@ -1406,14 +1401,14 @@ itlb_miss_20w:
 
        L3_ptep         ptp,pte,t0,va,itlb_fault
 
-       dbit_lock       spc,t0,t1
-       update_ptep     spc,ptp,pte,t0,t1
+       tlb_lock        spc,ptp,pte,t0,t1,itlb_fault
+       update_accessed ptp,pte,t0,t1
 
        make_insert_tlb spc,pte,prot
        
        iitlbt          pte,prot
-       dbit_unlock1    spc,t0
 
+       tlb_unlock1     spc,t0
        rfir
        nop
 
@@ -1430,14 +1425,14 @@ naitlb_miss_20w:
 
        L3_ptep         ptp,pte,t0,va,naitlb_check_alias_20w
 
-       dbit_lock       spc,t0,t1
-       update_ptep     spc,ptp,pte,t0,t1
+       tlb_lock        spc,ptp,pte,t0,t1,naitlb_check_alias_20w
+       update_accessed ptp,pte,t0,t1
 
        make_insert_tlb spc,pte,prot
 
        iitlbt          pte,prot
-       dbit_unlock1    spc,t0
 
+       tlb_unlock1     spc,t0
        rfir
        nop
 
@@ -1458,20 +1453,20 @@ itlb_miss_11:
 
        L2_ptep         ptp,pte,t0,va,itlb_fault
 
-       dbit_lock       spc,t0,t1
-       update_ptep     spc,ptp,pte,t0,t1
+       tlb_lock        spc,ptp,pte,t0,t1,itlb_fault
+       update_accessed ptp,pte,t0,t1
 
        make_insert_tlb_11      spc,pte,prot
 
-       mfsp            %sr1,t0  /* Save sr1 so we can use it in tlb inserts */
+       mfsp            %sr1,t1  /* Save sr1 so we can use it in tlb inserts */
        mtsp            spc,%sr1
 
        iitlba          pte,(%sr1,va)
        iitlbp          prot,(%sr1,va)
 
-       mtsp            t0, %sr1        /* Restore sr1 */
-       dbit_unlock1    spc,t0
+       mtsp            t1, %sr1        /* Restore sr1 */
 
+       tlb_unlock1     spc,t0
        rfir
        nop
 
@@ -1482,20 +1477,20 @@ naitlb_miss_11:
 
        L2_ptep         ptp,pte,t0,va,naitlb_check_alias_11
 
-       dbit_lock       spc,t0,t1
-       update_ptep     spc,ptp,pte,t0,t1
+       tlb_lock        spc,ptp,pte,t0,t1,naitlb_check_alias_11
+       update_accessed ptp,pte,t0,t1
 
        make_insert_tlb_11      spc,pte,prot
 
-       mfsp            %sr1,t0  /* Save sr1 so we can use it in tlb inserts */
+       mfsp            %sr1,t1  /* Save sr1 so we can use it in tlb inserts */
        mtsp            spc,%sr1
 
        iitlba          pte,(%sr1,va)
        iitlbp          prot,(%sr1,va)
 
-       mtsp            t0, %sr1        /* Restore sr1 */
-       dbit_unlock1    spc,t0
+       mtsp            t1, %sr1        /* Restore sr1 */
 
+       tlb_unlock1     spc,t0
        rfir
        nop
 
@@ -1516,16 +1511,16 @@ itlb_miss_20:
 
        L2_ptep         ptp,pte,t0,va,itlb_fault
 
-       dbit_lock       spc,t0,t1
-       update_ptep     spc,ptp,pte,t0,t1
+       tlb_lock        spc,ptp,pte,t0,t1,itlb_fault
+       update_accessed ptp,pte,t0,t1
 
        make_insert_tlb spc,pte,prot
 
-       f_extend        pte,t0  
+       f_extend        pte,t1
 
        iitlbt          pte,prot
-       dbit_unlock1    spc,t0
 
+       tlb_unlock1     spc,t0
        rfir
        nop
 
@@ -1536,16 +1531,16 @@ naitlb_miss_20:
 
        L2_ptep         ptp,pte,t0,va,naitlb_check_alias_20
 
-       dbit_lock       spc,t0,t1
-       update_ptep     spc,ptp,pte,t0,t1
+       tlb_lock        spc,ptp,pte,t0,t1,naitlb_check_alias_20
+       update_accessed ptp,pte,t0,t1
 
        make_insert_tlb spc,pte,prot
 
-       f_extend        pte,t0
+       f_extend        pte,t1
 
        iitlbt          pte,prot
-       dbit_unlock1    spc,t0
 
+       tlb_unlock1     spc,t0
        rfir
        nop
 
@@ -1568,14 +1563,14 @@ dbit_trap_20w:
 
        L3_ptep         ptp,pte,t0,va,dbit_fault
 
-       dbit_lock       spc,t0,t1
-       update_dirty    spc,ptp,pte,t1
+       tlb_lock        spc,ptp,pte,t0,t1,dbit_fault
+       update_dirty    ptp,pte,t1
 
        make_insert_tlb spc,pte,prot
                
        idtlbt          pte,prot
-       dbit_unlock0    spc,t0
 
+       tlb_unlock0     spc,t0
        rfir
        nop
 #else
@@ -1588,8 +1583,8 @@ dbit_trap_11:
 
        L2_ptep         ptp,pte,t0,va,dbit_fault
 
-       dbit_lock       spc,t0,t1
-       update_dirty    spc,ptp,pte,t1
+       tlb_lock        spc,ptp,pte,t0,t1,dbit_fault
+       update_dirty    ptp,pte,t1
 
        make_insert_tlb_11      spc,pte,prot
 
@@ -1600,8 +1595,8 @@ dbit_trap_11:
        idtlbp          prot,(%sr1,va)
 
        mtsp            t1, %sr1     /* Restore sr1 */
-       dbit_unlock0    spc,t0
 
+       tlb_unlock0     spc,t0
        rfir
        nop
 
@@ -1612,16 +1607,16 @@ dbit_trap_20:
 
        L2_ptep         ptp,pte,t0,va,dbit_fault
 
-       dbit_lock       spc,t0,t1
-       update_dirty    spc,ptp,pte,t1
+       tlb_lock        spc,ptp,pte,t0,t1,dbit_fault
+       update_dirty    ptp,pte,t1
 
        make_insert_tlb spc,pte,prot
 
        f_extend        pte,t1
        
-        idtlbt          pte,prot
-       dbit_unlock0    spc,t0
+       idtlbt          pte,prot
 
+       tlb_unlock0     spc,t0
        rfir
        nop
 #endif
index 6548fd1..b99b39f 100644 (file)
 
 #include "../math-emu/math-emu.h"      /* for handle_fpe() */
 
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
-DEFINE_SPINLOCK(pa_dbit_lock);
-#endif
-
 static void parisc_show_stack(struct task_struct *task, unsigned long *sp,
        struct pt_regs *regs);
 
index ccde8f0..112ccf4 100644 (file)
 
        .text
 
+/*
+ * Used by threads when the lock bit of core_idle_state is set.
+ * Threads will spin in HMT_LOW until the lock bit is cleared.
+ * r14 - pointer to core_idle_state
+ * r15 - used to load contents of core_idle_state
+ */
+
+core_idle_lock_held:
+       HMT_LOW
+3:     lwz     r15,0(r14)
+       andi.   r15,r15,PNV_CORE_IDLE_LOCK_BIT
+       bne     3b
+       HMT_MEDIUM
+       lwarx   r15,0,r14
+       blr
+
 /*
  * Pass requested state in r3:
  *     r3 - PNV_THREAD_NAP/SLEEP/WINKLE
@@ -150,6 +166,10 @@ power7_enter_nap_mode:
        ld      r14,PACA_CORE_IDLE_STATE_PTR(r13)
 lwarx_loop1:
        lwarx   r15,0,r14
+
+       andi.   r9,r15,PNV_CORE_IDLE_LOCK_BIT
+       bnel    core_idle_lock_held
+
        andc    r15,r15,r7                      /* Clear thread bit */
 
        andi.   r15,r15,PNV_CORE_IDLE_THREAD_BITS
@@ -294,7 +314,7 @@ lwarx_loop2:
         * workaround undo code or resyncing timebase or restoring context
         * In either case loop until the lock bit is cleared.
         */
-       bne     core_idle_lock_held
+       bnel    core_idle_lock_held
 
        cmpwi   cr2,r15,0
        lbz     r4,PACA_SUBCORE_SIBLING_MASK(r13)
@@ -319,15 +339,6 @@ lwarx_loop2:
        isync
        b       common_exit
 
-core_idle_lock_held:
-       HMT_LOW
-core_idle_lock_loop:
-       lwz     r15,0(14)
-       andi.   r9,r15,PNV_CORE_IDLE_LOCK_BIT
-       bne     core_idle_lock_loop
-       HMT_MEDIUM
-       b       lwarx_loop2
-
 first_thread_in_subcore:
        /* First thread in subcore to wakeup */
        ori     r15,r15,PNV_CORE_IDLE_LOCK_BIT
index d3a831a..da50e0c 100644 (file)
@@ -966,8 +966,6 @@ int copy_siginfo_to_user32(struct compat_siginfo __user *d, const siginfo_t *s)
 
 int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
 {
-       memset(to, 0, sizeof *to);
-
        if (copy_from_user(to, from, 3*sizeof(int)) ||
            copy_from_user(to->_sifields._pad,
                           from->_sifields._pad, SI_PAD_SIZE32))
index cfad7fc..d7697ab 100644 (file)
@@ -57,7 +57,10 @@ union ctlreg0 {
                unsigned long lap  : 1; /* Low-address-protection control */
                unsigned long      : 4;
                unsigned long edat : 1; /* Enhanced-DAT-enablement control */
-               unsigned long      : 23;
+               unsigned long      : 4;
+               unsigned long afp  : 1; /* AFP-register control */
+               unsigned long vx   : 1; /* Vector enablement control */
+               unsigned long      : 17;
        };
 };
 
index bff5e3b..8ba3243 100644 (file)
@@ -138,6 +138,8 @@ int init_cache_level(unsigned int cpu)
        union cache_topology ct;
        enum cache_type ctype;
 
+       if (!test_facility(34))
+               return -EOPNOTSUPP;
        if (!this_cpu_ci)
                return -EINVAL;
        ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
index 505c17c..56b5508 100644 (file)
@@ -21,6 +21,7 @@
 #include <asm/nmi.h>
 #include <asm/crw.h>
 #include <asm/switch_to.h>
+#include <asm/ctl_reg.h>
 
 struct mcck_struct {
        int kill_task;
@@ -129,26 +130,30 @@ static int notrace s390_revalidate_registers(struct mci *mci)
        } else
                asm volatile("lfpc 0(%0)" : : "a" (fpt_creg_save_area));
 
-       asm volatile(
-               "       ld      0,0(%0)\n"
-               "       ld      1,8(%0)\n"
-               "       ld      2,16(%0)\n"
-               "       ld      3,24(%0)\n"
-               "       ld      4,32(%0)\n"
-               "       ld      5,40(%0)\n"
-               "       ld      6,48(%0)\n"
-               "       ld      7,56(%0)\n"
-               "       ld      8,64(%0)\n"
-               "       ld      9,72(%0)\n"
-               "       ld      10,80(%0)\n"
-               "       ld      11,88(%0)\n"
-               "       ld      12,96(%0)\n"
-               "       ld      13,104(%0)\n"
-               "       ld      14,112(%0)\n"
-               "       ld      15,120(%0)\n"
-               : : "a" (fpt_save_area));
-       /* Revalidate vector registers */
-       if (MACHINE_HAS_VX && current->thread.vxrs) {
+       if (!MACHINE_HAS_VX) {
+               /* Revalidate floating point registers */
+               asm volatile(
+                       "       ld      0,0(%0)\n"
+                       "       ld      1,8(%0)\n"
+                       "       ld      2,16(%0)\n"
+                       "       ld      3,24(%0)\n"
+                       "       ld      4,32(%0)\n"
+                       "       ld      5,40(%0)\n"
+                       "       ld      6,48(%0)\n"
+                       "       ld      7,56(%0)\n"
+                       "       ld      8,64(%0)\n"
+                       "       ld      9,72(%0)\n"
+                       "       ld      10,80(%0)\n"
+                       "       ld      11,88(%0)\n"
+                       "       ld      12,96(%0)\n"
+                       "       ld      13,104(%0)\n"
+                       "       ld      14,112(%0)\n"
+                       "       ld      15,120(%0)\n"
+                       : : "a" (fpt_save_area));
+       } else {
+               /* Revalidate vector registers */
+               union ctlreg0 cr0;
+
                if (!mci->vr) {
                        /*
                         * Vector registers can't be restored and therefore
@@ -156,8 +161,12 @@ static int notrace s390_revalidate_registers(struct mci *mci)
                         */
                        kill_task = 1;
                }
+               cr0.val = S390_lowcore.cregs_save_area[0];
+               cr0.afp = cr0.vx = 1;
+               __ctl_load(cr0.val, 0, 0);
                restore_vx_regs((__vector128 *)
-                               S390_lowcore.vector_save_area_addr);
+                               &S390_lowcore.vector_save_area);
+               __ctl_load(S390_lowcore.cregs_save_area[0], 0, 0);
        }
        /* Revalidate access registers */
        asm volatile(
index dc5edc2..8f587d8 100644 (file)
@@ -163,7 +163,7 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
 asmlinkage void execve_tail(void)
 {
        current->thread.fp_regs.fpc = 0;
-       asm volatile("sfpc %0,%0" : : "d" (0));
+       asm volatile("sfpc %0" : : "d" (0));
 }
 
 /*
index 43c3169..ada0c07 100644 (file)
@@ -270,6 +270,8 @@ ENTRY(_sclp_print_early)
        jno     .Lesa2
        ahi     %r15,-80
        stmh    %r6,%r15,96(%r15)               # store upper register halves
+       basr    %r13,0
+       lmh     %r0,%r15,.Lzeroes-.(%r13)       # clear upper register halves
 .Lesa2:
        lr      %r10,%r2                        # save string pointer
        lhi     %r2,0
@@ -291,6 +293,8 @@ ENTRY(_sclp_print_early)
 .Lesa3:
        lm      %r6,%r15,120(%r15)              # restore registers
        br      %r14
+.Lzeroes:
+       .fill   64,4,0
 
 .LwritedataS4:
        .long   0x00760005                      # SCLP command for write data
index 9afb9d6..dc2d7aa 100644 (file)
@@ -415,13 +415,13 @@ static void bpf_jit_prologue(struct bpf_jit *jit)
                EMIT6_DISP_LH(0xe3000000, 0x0004, REG_SKB_DATA, REG_0,
                              BPF_REG_1, offsetof(struct sk_buff, data));
        }
-       /* BPF compatibility: clear A (%b7) and X (%b8) registers */
-       if (REG_SEEN(BPF_REG_7))
-               /* lghi %b7,0 */
-               EMIT4_IMM(0xa7090000, BPF_REG_7, 0);
-       if (REG_SEEN(BPF_REG_8))
-               /* lghi %b8,0 */
-               EMIT4_IMM(0xa7090000, BPF_REG_8, 0);
+       /* BPF compatibility: clear A (%b0) and X (%b7) registers */
+       if (REG_SEEN(BPF_REG_A))
+               /* lghi %ba,0 */
+               EMIT4_IMM(0xa7090000, BPF_REG_A, 0);
+       if (REG_SEEN(BPF_REG_X))
+               /* lghi %bx,0 */
+               EMIT4_IMM(0xa7090000, BPF_REG_X, 0);
 }
 
 /*
index 1f0aa20..6424249 100644 (file)
  * Must preserve %o5 between VISEntryHalf and VISExitHalf */
 
 #define VISEntryHalf                                   \
-       rd              %fprs, %o5;                     \
-       andcc           %o5, FPRS_FEF, %g0;             \
-       be,pt           %icc, 297f;                     \
-        sethi          %hi(298f), %g7;                 \
-       sethi           %hi(VISenterhalf), %g1;         \
-       jmpl            %g1 + %lo(VISenterhalf), %g0;   \
-        or             %g7, %lo(298f), %g7;            \
-       clr             %o5;                            \
-297:   wr              %o5, FPRS_FEF, %fprs;           \
-298:
+       VISEntry
+
+#define VISExitHalf                                    \
+       VISExit
 
 #define VISEntryHalfFast(fail_label)                   \
        rd              %fprs, %o5;                     \
@@ -47,7 +41,7 @@
        ba,a,pt         %xcc, fail_label;               \
 297:   wr              %o5, FPRS_FEF, %fprs;
 
-#define VISExitHalf                                    \
+#define VISExitHalfFast                                        \
        wr              %o5, 0, %fprs;
 
 #ifndef __ASSEMBLY__
index 140527a..83aeeb1 100644 (file)
@@ -240,8 +240,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
        add             %o0, 0x40, %o0
        bne,pt          %icc, 1b
         LOAD(prefetch, %g1 + 0x200, #n_reads_strong)
+#ifdef NON_USER_COPY
+       VISExitHalfFast
+#else
        VISExitHalf
-
+#endif
        brz,pn          %o2, .Lexit
         cmp            %o2, 19
        ble,pn          %icc, .Lsmall_unaligned
index b320ae9..a063d84 100644 (file)
@@ -44,9 +44,8 @@ vis1: ldub            [%g6 + TI_FPSAVED], %g3
 
         stx            %g3, [%g6 + TI_GSR]
 2:     add             %g6, %g1, %g3
-       cmp             %o5, FPRS_DU
-       be,pn           %icc, 6f
-        sll            %g1, 3, %g1
+       mov             FPRS_DU | FPRS_DL | FPRS_FEF, %o5
+       sll             %g1, 3, %g1
        stb             %o5, [%g3 + TI_FPSAVED]
        rd              %gsr, %g2
        add             %g6, %g1, %g3
@@ -80,65 +79,3 @@ vis1:        ldub            [%g6 + TI_FPSAVED], %g3
        .align          32
 80:    jmpl            %g7 + %g0, %g0
         nop
-
-6:     ldub            [%g3 + TI_FPSAVED], %o5
-       or              %o5, FPRS_DU, %o5
-       add             %g6, TI_FPREGS+0x80, %g2
-       stb             %o5, [%g3 + TI_FPSAVED]
-
-       sll             %g1, 5, %g1
-       add             %g6, TI_FPREGS+0xc0, %g3
-       wr              %g0, FPRS_FEF, %fprs
-       membar          #Sync
-       stda            %f32, [%g2 + %g1] ASI_BLK_P
-       stda            %f48, [%g3 + %g1] ASI_BLK_P
-       membar          #Sync
-       ba,pt           %xcc, 80f
-        nop
-
-       .align          32
-80:    jmpl            %g7 + %g0, %g0
-        nop
-
-       .align          32
-VISenterhalf:
-       ldub            [%g6 + TI_FPDEPTH], %g1
-       brnz,a,pn       %g1, 1f
-        cmp            %g1, 1
-       stb             %g0, [%g6 + TI_FPSAVED]
-       stx             %fsr, [%g6 + TI_XFSR]
-       clr             %o5
-       jmpl            %g7 + %g0, %g0
-        wr             %g0, FPRS_FEF, %fprs
-
-1:     bne,pn          %icc, 2f
-        srl            %g1, 1, %g1
-       ba,pt           %xcc, vis1
-        sub            %g7, 8, %g7
-2:     addcc           %g6, %g1, %g3
-       sll             %g1, 3, %g1
-       andn            %o5, FPRS_DU, %g2
-       stb             %g2, [%g3 + TI_FPSAVED]
-
-       rd              %gsr, %g2
-       add             %g6, %g1, %g3
-       stx             %g2, [%g3 + TI_GSR]
-       add             %g6, %g1, %g2
-       stx             %fsr, [%g2 + TI_XFSR]
-       sll             %g1, 5, %g1
-3:     andcc           %o5, FPRS_DL, %g0
-       be,pn           %icc, 4f
-        add            %g6, TI_FPREGS, %g2
-
-       add             %g6, TI_FPREGS+0x40, %g3
-       membar          #Sync
-       stda            %f0, [%g2 + %g1] ASI_BLK_P
-       stda            %f16, [%g3 + %g1] ASI_BLK_P
-       membar          #Sync
-       ba,pt           %xcc, 4f
-        nop
-
-       .align          32
-4:     and             %o5, FPRS_DU, %o5
-       jmpl            %g7 + %g0, %g0
-        wr             %o5, FPRS_FEF, %fprs
index 1d649a9..8069ce1 100644 (file)
@@ -135,10 +135,6 @@ EXPORT_SYMBOL(copy_user_page);
 void VISenter(void);
 EXPORT_SYMBOL(VISenter);
 
-/* CRYPTO code needs this */
-void VISenterhalf(void);
-EXPORT_SYMBOL(VISenterhalf);
-
 extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *);
 extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *,
                unsigned long *);
index e8c2c04..c667e10 100644 (file)
@@ -113,8 +113,6 @@ int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
        if (!access_ok(VERIFY_READ, from, sizeof(struct compat_siginfo)))
                return -EFAULT;
 
-       memset(to, 0, sizeof(*to));
-
        err = __get_user(to->si_signo, &from->si_signo);
        err |= __get_user(to->si_errno, &from->si_errno);
        err |= __get_user(to->si_code, &from->si_code);
index d366675..396b5c9 100644 (file)
@@ -1139,7 +1139,7 @@ static void __init load_hv_initrd(void)
 
 void __init free_initrd_mem(unsigned long begin, unsigned long end)
 {
-       free_bootmem(__pa(begin), end - begin);
+       free_bootmem_late(__pa(begin), end - begin);
 }
 
 static int __init setup_initrd(char *str)
index 48304b8..0cdc154 100644 (file)
@@ -1193,6 +1193,10 @@ static efi_status_t setup_e820(struct boot_params *params,
                unsigned int e820_type = 0;
                unsigned long m = efi->efi_memmap;
 
+#ifdef CONFIG_X86_64
+               m |= (u64)efi->efi_memmap_hi << 32;
+#endif
+
                d = (efi_memory_desc_t *)(m + (i * efi->efi_memdesc_size));
                switch (d->type) {
                case EFI_RESERVED_TYPE:
index 8b22422..74a2a8d 100644 (file)
 
 #ifndef __ASSEMBLY__
 
-extern pte_t kasan_zero_pte[];
-extern pte_t kasan_zero_pmd[];
-extern pte_t kasan_zero_pud[];
-
 #ifdef CONFIG_KASAN
-void __init kasan_map_early_shadow(pgd_t *pgd);
+void __init kasan_early_init(void);
 void __init kasan_init(void);
 #else
-static inline void kasan_map_early_shadow(pgd_t *pgd) { }
+static inline void kasan_early_init(void) { }
 static inline void kasan_init(void) { }
 #endif
 
index 883f6b9..e997f70 100644 (file)
@@ -23,7 +23,7 @@ extern struct static_key rdpmc_always_available;
 
 static inline void load_mm_cr4(struct mm_struct *mm)
 {
-       if (static_key_true(&rdpmc_always_available) ||
+       if (static_key_false(&rdpmc_always_available) ||
            atomic_read(&mm->context.perf_rdpmc_allowed))
                cr4_set_bits(X86_CR4_PCE);
        else
index 6fe6b18..9dfce4e 100644 (file)
@@ -57,9 +57,9 @@ struct sigcontext {
        unsigned long ip;
        unsigned long flags;
        unsigned short cs;
-       unsigned short __pad2;  /* Was called gs, but was always zero. */
-       unsigned short __pad1;  /* Was called fs, but was always zero. */
-       unsigned short ss;
+       unsigned short gs;
+       unsigned short fs;
+       unsigned short __pad0;
        unsigned long err;
        unsigned long trapno;
        unsigned long oldmask;
index 16dc4e8..d8b9f90 100644 (file)
@@ -177,24 +177,9 @@ struct sigcontext {
        __u64 rip;
        __u64 eflags;           /* RFLAGS */
        __u16 cs;
-
-       /*
-        * Prior to 2.5.64 ("[PATCH] x86-64 updates for 2.5.64-bk3"),
-        * Linux saved and restored fs and gs in these slots.  This
-        * was counterproductive, as fsbase and gsbase were never
-        * saved, so arch_prctl was presumably unreliable.
-        *
-        * If these slots are ever needed for any other purpose, there
-        * is some risk that very old 64-bit binaries could get
-        * confused.  I doubt that many such binaries still work,
-        * though, since the same patch in 2.5.64 also removed the
-        * 64-bit set_thread_area syscall, so it appears that there is
-        * no TLS API that works in both pre- and post-2.5.64 kernels.
-        */
-       __u16 __pad2;           /* Was gs. */
-       __u16 __pad1;           /* Was fs. */
-
-       __u16 ss;
+       __u16 gs;
+       __u16 fs;
+       __u16 __pad0;
        __u64 err;
        __u64 trapno;
        __u64 oldmask;
index dcb5285..cde732c 100644 (file)
@@ -1424,7 +1424,7 @@ static inline void __x2apic_disable(void)
 {
        u64 msr;
 
-       if (cpu_has_apic)
+       if (!cpu_has_apic)
                return;
 
        rdmsrl(MSR_IA32_APICBASE, msr);
@@ -1483,10 +1483,13 @@ void x2apic_setup(void)
 
 static __init void x2apic_disable(void)
 {
-       u32 x2apic_id;
+       u32 x2apic_id, state = x2apic_state;
 
-       if (x2apic_state != X2APIC_ON)
-               goto out;
+       x2apic_mode = 0;
+       x2apic_state = X2APIC_DISABLED;
+
+       if (state != X2APIC_ON)
+               return;
 
        x2apic_id = read_apic_id();
        if (x2apic_id >= 255)
@@ -1494,9 +1497,6 @@ static __init void x2apic_disable(void)
 
        __x2apic_disable();
        register_lapic_address(mp_lapic_addr);
-out:
-       x2apic_state = X2APIC_DISABLED;
-       x2apic_mode = 0;
 }
 
 static __init void x2apic_enable(void)
index e4d1b8b..cb77b11 100644 (file)
@@ -933,6 +933,14 @@ static u64 intel_cqm_event_count(struct perf_event *event)
        if (!cqm_group_leader(event))
                return 0;
 
+       /*
+        * Getting up-to-date values requires an SMP IPI which is not
+        * possible if we're being called in interrupt context. Return
+        * the cached values instead.
+        */
+       if (unlikely(in_interrupt()))
+               goto out;
+
        /*
         * Notice that we don't perform the reading of an RMID
         * atomically, because we can't hold a spin lock across the
index 464ffd6..00db1aa 100644 (file)
@@ -42,7 +42,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
                unsigned long *stack, unsigned long bp,
                const struct stacktrace_ops *ops, void *data)
 {
-       const unsigned cpu = get_cpu();
+       const unsigned cpu = get_cpu_light();
        int graph = 0;
        u32 *prev_esp;
 
@@ -86,7 +86,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
                        break;
                touch_nmi_watchdog();
        }
-       put_cpu();
+       put_cpu_light();
 }
 EXPORT_SYMBOL(dump_trace);
 
index 5f1c626..c331e3f 100644 (file)
@@ -152,7 +152,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
                unsigned long *stack, unsigned long bp,
                const struct stacktrace_ops *ops, void *data)
 {
-       const unsigned cpu = get_cpu();
+       const unsigned cpu = get_cpu_light();
        struct thread_info *tinfo;
        unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu);
        unsigned long dummy;
@@ -241,7 +241,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
         * This handles the process stack:
         */
        bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
-       put_cpu();
+       put_cpu_light();
 }
 EXPORT_SYMBOL(dump_trace);
 
@@ -255,7 +255,7 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
        int cpu;
        int i;
 
-       preempt_disable();
+       migrate_disable();
        cpu = smp_processor_id();
 
        irq_stack_end   = (unsigned long *)(per_cpu(irq_stack_ptr, cpu));
@@ -291,7 +291,7 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
                        pr_cont(" %016lx", *stack++);
                touch_nmi_watchdog();
        }
-       preempt_enable();
+       migrate_enable();
 
        pr_cont("\n");
        show_trace_log_lvl(task, regs, sp, bp, log_lvl);
index 9bf9070..8a3445b 100644 (file)
@@ -809,8 +809,6 @@ do_preempt_schedule_irq:
 restore_c_regs_and_iret:
        RESTORE_C_REGS
        REMOVE_PT_GPREGS_FROM_STACK 8
-
-irq_return:
        INTERRUPT_RETURN
 
 ENTRY(native_iret)
@@ -1431,11 +1429,12 @@ ENTRY(nmi)
         *  If the variable is not set and the stack is not the NMI
         *  stack then:
         *    o Set the special variable on the stack
-        *    o Copy the interrupt frame into a "saved" location on the stack
-        *    o Copy the interrupt frame into a "copy" location on the stack
+        *    o Copy the interrupt frame into an "outermost" location on the
+        *      stack
+        *    o Copy the interrupt frame into an "iret" location on the stack
         *    o Continue processing the NMI
         *  If the variable is set or the previous stack is the NMI stack:
-        *    o Modify the "copy" location to jump to the repeate_nmi
+        *    o Modify the "iret" location to jump to the repeat_nmi
         *    o return back to the first NMI
         *
         * Now on exit of the first NMI, we first clear the stack variable
@@ -1444,32 +1443,151 @@ ENTRY(nmi)
         * a nested NMI that updated the copy interrupt stack frame, a
         * jump will be made to the repeat_nmi code that will handle the second
         * NMI.
+        *
+        * However, espfix prevents us from directly returning to userspace
+        * with a single IRET instruction.  Similarly, IRET to user mode
+        * can fault.  We therefore handle NMIs from user space like
+        * other IST entries.
         */
 
        /* Use %rdx as our temp variable throughout */
        pushq_cfi %rdx
        CFI_REL_OFFSET rdx, 0
 
+       testb   $3, CS-RIP+8(%rsp)
+       jz      .Lnmi_from_kernel
+
        /*
-        * If %cs was not the kernel segment, then the NMI triggered in user
-        * space, which means it is definitely not nested.
+        * NMI from user mode.  We need to run on the thread stack, but we
+        * can't go through the normal entry paths: NMIs are masked, and
+        * we don't want to enable interrupts, because then we'll end
+        * up in an awkward situation in which IRQs are on but NMIs
+        * are off.
         */
-       cmpl $__KERNEL_CS, 16(%rsp)
-       jne first_nmi
+
+       SWAPGS
+       cld
+       movq    %rsp, %rdx
+       movq    PER_CPU_VAR(kernel_stack), %rsp
+       pushq   5*8(%rdx)       /* pt_regs->ss */
+       pushq   4*8(%rdx)       /* pt_regs->rsp */
+       pushq   3*8(%rdx)       /* pt_regs->flags */
+       pushq   2*8(%rdx)       /* pt_regs->cs */
+       pushq   1*8(%rdx)       /* pt_regs->rip */
+       pushq   $-1             /* pt_regs->orig_ax */
+       pushq   %rdi            /* pt_regs->di */
+       pushq   %rsi            /* pt_regs->si */
+       pushq   (%rdx)          /* pt_regs->dx */
+       pushq   %rcx            /* pt_regs->cx */
+       pushq   %rax            /* pt_regs->ax */
+       pushq   %r8             /* pt_regs->r8 */
+       pushq   %r9             /* pt_regs->r9 */
+       pushq   %r10            /* pt_regs->r10 */
+       pushq   %r11            /* pt_regs->r11 */
+       pushq   %rbx            /* pt_regs->rbx */
+       pushq   %rbp            /* pt_regs->rbp */
+       pushq   %r12            /* pt_regs->r12 */
+       pushq   %r13            /* pt_regs->r13 */
+       pushq   %r14            /* pt_regs->r14 */
+       pushq   %r15            /* pt_regs->r15 */
 
        /*
-        * Check the special variable on the stack to see if NMIs are
-        * executing.
+        * At this point we no longer need to worry about stack damage
+        * due to nesting -- we're on the normal thread stack and we're
+        * done with the NMI stack.
+        */
+       movq    %rsp, %rdi
+       movq    $-1, %rsi
+       call    do_nmi
+
+       /*
+        * Return back to user mode.  We must *not* do the normal exit
+        * work, because we don't want to enable interrupts.  Fortunately,
+        * do_nmi doesn't modify pt_regs.
+        */
+       SWAPGS
+       jmp     restore_c_regs_and_iret
+
+.Lnmi_from_kernel:
+       /*
+        * Here's what our stack frame will look like:
+        * +---------------------------------------------------------+
+        * | original SS                                             |
+        * | original Return RSP                                     |
+        * | original RFLAGS                                         |
+        * | original CS                                             |
+        * | original RIP                                            |
+        * +---------------------------------------------------------+
+        * | temp storage for rdx                                    |
+        * +---------------------------------------------------------+
+        * | "NMI executing" variable                                |
+        * +---------------------------------------------------------+
+        * | iret SS          } Copied from "outermost" frame        |
+        * | iret Return RSP  } on each loop iteration; overwritten  |
+        * | iret RFLAGS      } by a nested NMI to force another     |
+        * | iret CS          } iteration if needed.                 |
+        * | iret RIP         }                                      |
+        * +---------------------------------------------------------+
+        * | outermost SS          } initialized in first_nmi;       |
+        * | outermost Return RSP  } will not be changed before      |
+        * | outermost RFLAGS      } NMI processing is done.         |
+        * | outermost CS          } Copied to "iret" frame on each  |
+        * | outermost RIP         } iteration.                      |
+        * +---------------------------------------------------------+
+        * | pt_regs                                                 |
+        * +---------------------------------------------------------+
+        *
+        * The "original" frame is used by hardware.  Before re-enabling
+        * NMIs, we need to be done with it, and we need to leave enough
+        * space for the asm code here.
+        *
+        * We return by executing IRET while RSP points to the "iret" frame.
+        * That will either return for real or it will loop back into NMI
+        * processing.
+        *
+        * The "outermost" frame is copied to the "iret" frame on each
+        * iteration of the loop, so each iteration starts with the "iret"
+        * frame pointing to the final return target.
+        */
+
+       /*
+        * Determine whether we're a nested NMI.
+        *
+        * If we interrupted kernel code between repeat_nmi and
+        * end_repeat_nmi, then we are a nested NMI.  We must not
+        * modify the "iret" frame because it's being written by
+        * the outer NMI.  That's okay; the outer NMI handler is
+        * about to about to call do_nmi anyway, so we can just
+        * resume the outer NMI.
+        */
+
+       movq    $repeat_nmi, %rdx
+       cmpq    8(%rsp), %rdx
+       ja      1f
+       movq    $end_repeat_nmi, %rdx
+       cmpq    8(%rsp), %rdx
+       ja      nested_nmi_out
+1:
+
+       /*
+        * Now check "NMI executing".  If it's set, then we're nested.
+        * This will not detect if we interrupted an outer NMI just
+        * before IRET.
         */
        cmpl $1, -8(%rsp)
        je nested_nmi
 
        /*
-        * Now test if the previous stack was an NMI stack.
-        * We need the double check. We check the NMI stack to satisfy the
-        * race when the first NMI clears the variable before returning.
-        * We check the variable because the first NMI could be in a
-        * breakpoint routine using a breakpoint stack.
+        * Now test if the previous stack was an NMI stack.  This covers
+        * the case where we interrupt an outer NMI after it clears
+        * "NMI executing" but before IRET.  We need to be careful, though:
+        * there is one case in which RSP could point to the NMI stack
+        * despite there being no NMI active: naughty userspace controls
+        * RSP at the very beginning of the SYSCALL targets.  We can
+        * pull a fast one on naughty userspace, though: we program
+        * SYSCALL to mask DF, so userspace cannot cause DF to be set
+        * if it controls the kernel's RSP.  We set DF before we clear
+        * "NMI executing".
         */
        lea     6*8(%rsp), %rdx
        /* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */
@@ -1480,25 +1598,21 @@ ENTRY(nmi)
        cmpq    %rdx, 4*8(%rsp)
        /* If it is below the NMI stack, it is a normal NMI */
        jb      first_nmi
-       /* Ah, it is within the NMI stack, treat it as nested */
+
+       /* Ah, it is within the NMI stack. */
+
+       testb   $(X86_EFLAGS_DF >> 8), (3*8 + 1)(%rsp)
+       jz      first_nmi       /* RSP was user controlled. */
+
+       /* This is a nested NMI. */
 
        CFI_REMEMBER_STATE
 
 nested_nmi:
        /*
-        * Do nothing if we interrupted the fixup in repeat_nmi.
-        * It's about to repeat the NMI handler, so we are fine
-        * with ignoring this one.
+        * Modify the "iret" frame to point to repeat_nmi, forcing another
+        * iteration of NMI handling.
         */
-       movq $repeat_nmi, %rdx
-       cmpq 8(%rsp), %rdx
-       ja 1f
-       movq $end_repeat_nmi, %rdx
-       cmpq 8(%rsp), %rdx
-       ja nested_nmi_out
-
-1:
-       /* Set up the interrupted NMIs stack to jump to repeat_nmi */
        leaq -1*8(%rsp), %rdx
        movq %rdx, %rsp
        CFI_ADJUST_CFA_OFFSET 1*8
@@ -1517,60 +1631,23 @@ nested_nmi_out:
        popq_cfi %rdx
        CFI_RESTORE rdx
 
-       /* No need to check faults here */
+       /* We are returning to kernel mode, so this cannot result in a fault. */
        INTERRUPT_RETURN
 
        CFI_RESTORE_STATE
 first_nmi:
-       /*
-        * Because nested NMIs will use the pushed location that we
-        * stored in rdx, we must keep that space available.
-        * Here's what our stack frame will look like:
-        * +-------------------------+
-        * | original SS             |
-        * | original Return RSP     |
-        * | original RFLAGS         |
-        * | original CS             |
-        * | original RIP            |
-        * +-------------------------+
-        * | temp storage for rdx    |
-        * +-------------------------+
-        * | NMI executing variable  |
-        * +-------------------------+
-        * | copied SS               |
-        * | copied Return RSP       |
-        * | copied RFLAGS           |
-        * | copied CS               |
-        * | copied RIP              |
-        * +-------------------------+
-        * | Saved SS                |
-        * | Saved Return RSP        |
-        * | Saved RFLAGS            |
-        * | Saved CS                |
-        * | Saved RIP               |
-        * +-------------------------+
-        * | pt_regs                 |
-        * +-------------------------+
-        *
-        * The saved stack frame is used to fix up the copied stack frame
-        * that a nested NMI may change to make the interrupted NMI iret jump
-        * to the repeat_nmi. The original stack frame and the temp storage
-        * is also used by nested NMIs and can not be trusted on exit.
-        */
-       /* Do not pop rdx, nested NMIs will corrupt that part of the stack */
+       /* Restore rdx. */
        movq (%rsp), %rdx
        CFI_RESTORE rdx
 
-       /* Set the NMI executing variable on the stack. */
+       /* Set "NMI executing" on the stack. */
        pushq_cfi $1
 
-       /*
-        * Leave room for the "copied" frame
-        */
+       /* Leave room for the "iret" frame */
        subq $(5*8), %rsp
        CFI_ADJUST_CFA_OFFSET 5*8
 
-       /* Copy the stack frame to the Saved frame */
+       /* Copy the "original" frame to the "outermost" frame */
        .rept 5
        pushq_cfi 11*8(%rsp)
        .endr
@@ -1578,6 +1655,7 @@ first_nmi:
 
        /* Everything up to here is safe from nested NMIs */
 
+repeat_nmi:
        /*
         * If there was a nested NMI, the first NMI's iret will return
         * here. But NMIs are still enabled and we can take another
@@ -1586,16 +1664,21 @@ first_nmi:
         * it will just return, as we are about to repeat an NMI anyway.
         * This makes it safe to copy to the stack frame that a nested
         * NMI will update.
-        */
-repeat_nmi:
-       /*
-        * Update the stack variable to say we are still in NMI (the update
-        * is benign for the non-repeat case, where 1 was pushed just above
-        * to this very stack slot).
+        *
+        * RSP is pointing to "outermost RIP".  gsbase is unknown, but, if
+        * we're repeating an NMI, gsbase has the same value that it had on
+        * the first iteration.  paranoid_entry will load the kernel
+        * gsbase if needed before we call do_nmi.
+        *
+        * Set "NMI executing" in case we came back here via IRET.
         */
        movq $1, 10*8(%rsp)
 
-       /* Make another copy, this one may be modified by nested NMIs */
+       /*
+        * Copy the "outermost" frame to the "iret" frame.  NMIs that nest
+        * here must not modify the "iret" frame while we're writing to
+        * it or it will end up containing garbage.
+        */
        addq $(10*8), %rsp
        CFI_ADJUST_CFA_OFFSET -10*8
        .rept 5
@@ -1606,9 +1689,9 @@ repeat_nmi:
 end_repeat_nmi:
 
        /*
-        * Everything below this point can be preempted by a nested
-        * NMI if the first NMI took an exception and reset our iret stack
-        * so that we repeat another NMI.
+        * Everything below this point can be preempted by a nested NMI.
+        * If this happens, then the inner NMI will change the "iret"
+        * frame to point back to repeat_nmi.
         */
        pushq_cfi $-1           /* ORIG_RAX: no syscall to restart */
        ALLOC_PT_GPREGS_ON_STACK
@@ -1623,29 +1706,11 @@ end_repeat_nmi:
        call paranoid_entry
        DEFAULT_FRAME 0
 
-       /*
-        * Save off the CR2 register. If we take a page fault in the NMI then
-        * it could corrupt the CR2 value. If the NMI preempts a page fault
-        * handler before it was able to read the CR2 register, and then the
-        * NMI itself takes a page fault, the page fault that was preempted
-        * will read the information from the NMI page fault and not the
-        * origin fault. Save it off and restore it if it changes.
-        * Use the r12 callee-saved register.
-        */
-       movq %cr2, %r12
-
        /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
        movq %rsp,%rdi
        movq $-1,%rsi
        call do_nmi
 
-       /* Did the NMI take a page fault? Restore cr2 if it did */
-       movq %cr2, %rcx
-       cmpq %rcx, %r12
-       je 1f
-       movq %r12, %cr2
-1:
-       
        testl %ebx,%ebx                         /* swapgs needed? */
        jnz nmi_restore
 nmi_swapgs:
@@ -1653,12 +1718,27 @@ nmi_swapgs:
 nmi_restore:
        RESTORE_EXTRA_REGS
        RESTORE_C_REGS
-       /* Pop the extra iret frame at once */
+
+       /* Point RSP at the "iret" frame. */
        REMOVE_PT_GPREGS_FROM_STACK 6*8
 
-       /* Clear the NMI executing stack variable */
-       movq $0, 5*8(%rsp)
-       jmp irq_return
+       /*
+        * Clear "NMI executing".  Set DF first so that we can easily
+        * distinguish the remaining code between here and IRET from
+        * the SYSCALL entry and exit paths.  On a native kernel, we
+        * could just inspect RIP, but, on paravirt kernels,
+        * INTERRUPT_RETURN can translate into a jump into a
+        * hypercall page.
+        */
+       std
+       movq    $0, 5*8(%rsp)           /* clear "NMI executing" */
+
+       /*
+        * INTERRUPT_RETURN reads the "iret" frame and exits the NMI
+        * stack in a single instruction.  We are returning to kernel
+        * mode, so this cannot result in a fault.
+        */
+       INTERRUPT_RETURN
        CFI_ENDPROC
 END(nmi)
 
index 5a46681..f129a9a 100644 (file)
@@ -161,11 +161,12 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
        /* Kill off the identity-map trampoline */
        reset_early_page_tables();
 
-       kasan_map_early_shadow(early_level4_pgt);
-
-       /* clear bss before set_intr_gate with early_idt_handler */
        clear_bss();
 
+       clear_page(init_level4_pgt);
+
+       kasan_early_init();
+
        for (i = 0; i < NUM_EXCEPTION_VECTORS; i++)
                set_intr_gate(i, early_idt_handler_array[i]);
        load_idt((const struct desc_ptr *)&idt_descr);
@@ -177,12 +178,9 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
         */
        load_ucode_bsp();
 
-       clear_page(init_level4_pgt);
        /* set init_level4_pgt kernel high mapping*/
        init_level4_pgt[511] = early_level4_pgt[511];
 
-       kasan_map_early_shadow(init_level4_pgt);
-
        x86_64_start_reservations(real_mode_data);
 }
 
index df7e780..7e5da2c 100644 (file)
@@ -516,38 +516,9 @@ ENTRY(phys_base)
        /* This must match the first entry in level2_kernel_pgt */
        .quad   0x0000000000000000
 
-#ifdef CONFIG_KASAN
-#define FILL(VAL, COUNT)                               \
-       .rept (COUNT) ;                                 \
-       .quad   (VAL) ;                                 \
-       .endr
-
-NEXT_PAGE(kasan_zero_pte)
-       FILL(kasan_zero_page - __START_KERNEL_map + _KERNPG_TABLE, 512)
-NEXT_PAGE(kasan_zero_pmd)
-       FILL(kasan_zero_pte - __START_KERNEL_map + _KERNPG_TABLE, 512)
-NEXT_PAGE(kasan_zero_pud)
-       FILL(kasan_zero_pmd - __START_KERNEL_map + _KERNPG_TABLE, 512)
-
-#undef FILL
-#endif
-
-
 #include "../../x86/xen/xen-head.S"
        
        __PAGE_ALIGNED_BSS
 NEXT_PAGE(empty_zero_page)
        .skip PAGE_SIZE
 
-#ifdef CONFIG_KASAN
-/*
- * This page used as early shadow. We don't use empty_zero_page
- * at early stages, stack instrumentation could write some garbage
- * to this page.
- * Latter we reuse it as zero shadow for large ranges of memory
- * that allowed to access, but not instrumented by kasan
- * (vmalloc/vmemmap ...).
- */
-NEXT_PAGE(kasan_zero_page)
-       .skip PAGE_SIZE
-#endif
index c3e985d..d05bd2e 100644 (file)
@@ -408,15 +408,15 @@ static void default_do_nmi(struct pt_regs *regs)
 NOKPROBE_SYMBOL(default_do_nmi);
 
 /*
- * NMIs can hit breakpoints which will cause it to lose its
- * NMI context with the CPU when the breakpoint does an iret.
- */
-#ifdef CONFIG_X86_32
-/*
- * For i386, NMIs use the same stack as the kernel, and we can
- * add a workaround to the iret problem in C (preventing nested
- * NMIs if an NMI takes a trap). Simply have 3 states the NMI
- * can be in:
+ * NMIs can page fault or hit breakpoints which will cause it to lose
+ * its NMI context with the CPU when the breakpoint or page fault does an IRET.
+ *
+ * As a result, NMIs can nest if NMIs get unmasked due an IRET during
+ * NMI processing.  On x86_64, the asm glue protects us from nested NMIs
+ * if the outer NMI came from kernel mode, but we can still nest if the
+ * outer NMI came from user mode.
+ *
+ * To handle these nested NMIs, we have three states:
  *
  *  1) not running
  *  2) executing
@@ -430,15 +430,14 @@ NOKPROBE_SYMBOL(default_do_nmi);
  * (Note, the latch is binary, thus multiple NMIs triggering,
  *  when one is running, are ignored. Only one NMI is restarted.)
  *
- * If an NMI hits a breakpoint that executes an iret, another
- * NMI can preempt it. We do not want to allow this new NMI
- * to run, but we want to execute it when the first one finishes.
- * We set the state to "latched", and the exit of the first NMI will
- * perform a dec_return, if the result is zero (NOT_RUNNING), then
- * it will simply exit the NMI handler. If not, the dec_return
- * would have set the state to NMI_EXECUTING (what we want it to
- * be when we are running). In this case, we simply jump back
- * to rerun the NMI handler again, and restart the 'latched' NMI.
+ * If an NMI executes an iret, another NMI can preempt it. We do not
+ * want to allow this new NMI to run, but we want to execute it when the
+ * first one finishes.  We set the state to "latched", and the exit of
+ * the first NMI will perform a dec_return, if the result is zero
+ * (NOT_RUNNING), then it will simply exit the NMI handler. If not, the
+ * dec_return would have set the state to NMI_EXECUTING (what we want it
+ * to be when we are running). In this case, we simply jump back to
+ * rerun the NMI handler again, and restart the 'latched' NMI.
  *
  * No trap (breakpoint or page fault) should be hit before nmi_restart,
  * thus there is no race between the first check of state for NOT_RUNNING
@@ -461,49 +460,36 @@ enum nmi_states {
 static DEFINE_PER_CPU(enum nmi_states, nmi_state);
 static DEFINE_PER_CPU(unsigned long, nmi_cr2);
 
-#define nmi_nesting_preprocess(regs)                                   \
-       do {                                                            \
-               if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) {      \
-                       this_cpu_write(nmi_state, NMI_LATCHED);         \
-                       return;                                         \
-               }                                                       \
-               this_cpu_write(nmi_state, NMI_EXECUTING);               \
-               this_cpu_write(nmi_cr2, read_cr2());                    \
-       } while (0);                                                    \
-       nmi_restart:
-
-#define nmi_nesting_postprocess()                                      \
-       do {                                                            \
-               if (unlikely(this_cpu_read(nmi_cr2) != read_cr2()))     \
-                       write_cr2(this_cpu_read(nmi_cr2));              \
-               if (this_cpu_dec_return(nmi_state))                     \
-                       goto nmi_restart;                               \
-       } while (0)
-#else /* x86_64 */
+#ifdef CONFIG_X86_64
 /*
- * In x86_64 things are a bit more difficult. This has the same problem
- * where an NMI hitting a breakpoint that calls iret will remove the
- * NMI context, allowing a nested NMI to enter. What makes this more
- * difficult is that both NMIs and breakpoints have their own stack.
- * When a new NMI or breakpoint is executed, the stack is set to a fixed
- * point. If an NMI is nested, it will have its stack set at that same
- * fixed address that the first NMI had, and will start corrupting the
- * stack. This is handled in entry_64.S, but the same problem exists with
- * the breakpoint stack.
+ * In x86_64, we need to handle breakpoint -> NMI -> breakpoint.  Without
+ * some care, the inner breakpoint will clobber the outer breakpoint's
+ * stack.
  *
- * If a breakpoint is being processed, and the debug stack is being used,
- * if an NMI comes in and also hits a breakpoint, the stack pointer
- * will be set to the same fixed address as the breakpoint that was
- * interrupted, causing that stack to be corrupted. To handle this case,
- * check if the stack that was interrupted is the debug stack, and if
- * so, change the IDT so that new breakpoints will use the current stack
- * and not switch to the fixed address. On return of the NMI, switch back
- * to the original IDT.
+ * If a breakpoint is being processed, and the debug stack is being
+ * used, if an NMI comes in and also hits a breakpoint, the stack
+ * pointer will be set to the same fixed address as the breakpoint that
+ * was interrupted, causing that stack to be corrupted. To handle this
+ * case, check if the stack that was interrupted is the debug stack, and
+ * if so, change the IDT so that new breakpoints will use the current
+ * stack and not switch to the fixed address. On return of the NMI,
+ * switch back to the original IDT.
  */
 static DEFINE_PER_CPU(int, update_debug_stack);
+#endif
 
-static inline void nmi_nesting_preprocess(struct pt_regs *regs)
+dotraplinkage notrace void
+do_nmi(struct pt_regs *regs, long error_code)
 {
+       if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) {
+               this_cpu_write(nmi_state, NMI_LATCHED);
+               return;
+       }
+       this_cpu_write(nmi_state, NMI_EXECUTING);
+       this_cpu_write(nmi_cr2, read_cr2());
+nmi_restart:
+
+#ifdef CONFIG_X86_64
        /*
         * If we interrupted a breakpoint, it is possible that
         * the nmi handler will have breakpoints too. We need to
@@ -514,22 +500,8 @@ static inline void nmi_nesting_preprocess(struct pt_regs *regs)
                debug_stack_set_zero();
                this_cpu_write(update_debug_stack, 1);
        }
-}
-
-static inline void nmi_nesting_postprocess(void)
-{
-       if (unlikely(this_cpu_read(update_debug_stack))) {
-               debug_stack_reset();
-               this_cpu_write(update_debug_stack, 0);
-       }
-}
 #endif
 
-dotraplinkage notrace void
-do_nmi(struct pt_regs *regs, long error_code)
-{
-       nmi_nesting_preprocess(regs);
-
        nmi_enter();
 
        inc_irq_stat(__nmi_count);
@@ -539,8 +511,17 @@ do_nmi(struct pt_regs *regs, long error_code)
 
        nmi_exit();
 
-       /* On i386, may loop back to preprocess */
-       nmi_nesting_postprocess();
+#ifdef CONFIG_X86_64
+       if (unlikely(this_cpu_read(update_debug_stack))) {
+               debug_stack_reset();
+               this_cpu_write(update_debug_stack, 0);
+       }
+#endif
+
+       if (unlikely(this_cpu_read(nmi_cr2) != read_cr2()))
+               write_cr2(this_cpu_read(nmi_cr2));
+       if (this_cpu_dec_return(nmi_state))
+               goto nmi_restart;
 }
 NOKPROBE_SYMBOL(do_nmi);
 
index 6e338e3..9717437 100644 (file)
@@ -453,6 +453,7 @@ static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
 static void mwait_idle(void)
 {
        if (!current_set_polling_and_test()) {
+               trace_cpu_idle_rcuidle(1, smp_processor_id());
                if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
                        smp_mb(); /* quirk */
                        clflush((void *)&current_thread_info()->flags);
@@ -464,6 +465,7 @@ static void mwait_idle(void)
                        __sti_mwait(0, 0);
                else
                        local_irq_enable();
+               trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
        } else {
                local_irq_enable();
        }
index 74c44c4..12c28f7 100644 (file)
@@ -93,8 +93,15 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
                COPY(r15);
 #endif /* CONFIG_X86_64 */
 
+#ifdef CONFIG_X86_32
                COPY_SEG_CPL3(cs);
                COPY_SEG_CPL3(ss);
+#else /* !CONFIG_X86_32 */
+               /* Kernel saves and restores only the CS segment register on signals,
+                * which is the bare minimum needed to allow mixed 32/64-bit code.
+                * App's signal handler can save/restore other segments if needed. */
+               COPY_SEG_CPL3(cs);
+#endif /* CONFIG_X86_32 */
 
                get_user_ex(tmpflags, &sc->flags);
                regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
@@ -154,9 +161,8 @@ int setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
 #else /* !CONFIG_X86_32 */
                put_user_ex(regs->flags, &sc->flags);
                put_user_ex(regs->cs, &sc->cs);
-               put_user_ex(0, &sc->__pad2);
-               put_user_ex(0, &sc->__pad1);
-               put_user_ex(regs->ss, &sc->ss);
+               put_user_ex(0, &sc->gs);
+               put_user_ex(0, &sc->fs);
 #endif /* CONFIG_X86_32 */
 
                put_user_ex(fpstate, &sc->fpstate);
@@ -450,19 +456,9 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
 
        regs->sp = (unsigned long)frame;
 
-       /*
-        * Set up the CS and SS registers to run signal handlers in
-        * 64-bit mode, even if the handler happens to be interrupting
-        * 32-bit or 16-bit code.
-        *
-        * SS is subtle.  In 64-bit mode, we don't need any particular
-        * SS descriptor, but we do need SS to be valid.  It's possible
-        * that the old SS is entirely bogus -- this can happen if the
-        * signal we're trying to deliver is #GP or #SS caused by a bad
-        * SS value.
-        */
+       /* Set up the CS register to run signal handlers in 64-bit mode,
+          even if the handler happens to be interrupting 32-bit code. */
        regs->cs = __USER_CS;
-       regs->ss = __USER_DS;
 
        return 0;
 }
index 9d28383..c4ea87e 100644 (file)
@@ -150,7 +150,7 @@ static inline bool kvm_apic_vid_enabled(struct kvm *kvm)
 
 static inline bool kvm_apic_has_events(struct kvm_vcpu *vcpu)
 {
-       return vcpu->arch.apic->pending_events;
+       return kvm_vcpu_has_lapic(vcpu) && vcpu->arch.apic->pending_events;
 }
 
 bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector);
index 4860906..9a54dbe 100644 (file)
 extern pgd_t early_level4_pgt[PTRS_PER_PGD];
 extern struct range pfn_mapped[E820_X_MAX];
 
-extern unsigned char kasan_zero_page[PAGE_SIZE];
+static pud_t kasan_zero_pud[PTRS_PER_PUD] __page_aligned_bss;
+static pmd_t kasan_zero_pmd[PTRS_PER_PMD] __page_aligned_bss;
+static pte_t kasan_zero_pte[PTRS_PER_PTE] __page_aligned_bss;
+
+/*
+ * This page used as early shadow. We don't use empty_zero_page
+ * at early stages, stack instrumentation could write some garbage
+ * to this page.
+ * Latter we reuse it as zero shadow for large ranges of memory
+ * that allowed to access, but not instrumented by kasan
+ * (vmalloc/vmemmap ...).
+ */
+static unsigned char kasan_zero_page[PAGE_SIZE] __page_aligned_bss;
 
 static int __init map_range(struct range *range)
 {
@@ -36,7 +48,7 @@ static void __init clear_pgds(unsigned long start,
                pgd_clear(pgd_offset_k(start));
 }
 
-void __init kasan_map_early_shadow(pgd_t *pgd)
+static void __init kasan_map_early_shadow(pgd_t *pgd)
 {
        int i;
        unsigned long start = KASAN_SHADOW_START;
@@ -73,7 +85,7 @@ static int __init zero_pmd_populate(pud_t *pud, unsigned long addr,
        while (IS_ALIGNED(addr, PMD_SIZE) && addr + PMD_SIZE <= end) {
                WARN_ON(!pmd_none(*pmd));
                set_pmd(pmd, __pmd(__pa_nodebug(kasan_zero_pte)
-                                       | __PAGE_KERNEL_RO));
+                                       | _KERNPG_TABLE));
                addr += PMD_SIZE;
                pmd = pmd_offset(pud, addr);
        }
@@ -99,7 +111,7 @@ static int __init zero_pud_populate(pgd_t *pgd, unsigned long addr,
        while (IS_ALIGNED(addr, PUD_SIZE) && addr + PUD_SIZE <= end) {
                WARN_ON(!pud_none(*pud));
                set_pud(pud, __pud(__pa_nodebug(kasan_zero_pmd)
-                                       | __PAGE_KERNEL_RO));
+                                       | _KERNPG_TABLE));
                addr += PUD_SIZE;
                pud = pud_offset(pgd, addr);
        }
@@ -124,7 +136,7 @@ static int __init zero_pgd_populate(unsigned long addr, unsigned long end)
        while (IS_ALIGNED(addr, PGDIR_SIZE) && addr + PGDIR_SIZE <= end) {
                WARN_ON(!pgd_none(*pgd));
                set_pgd(pgd, __pgd(__pa_nodebug(kasan_zero_pud)
-                                       | __PAGE_KERNEL_RO));
+                                       | _KERNPG_TABLE));
                addr += PGDIR_SIZE;
                pgd = pgd_offset_k(addr);
        }
@@ -166,6 +178,26 @@ static struct notifier_block kasan_die_notifier = {
 };
 #endif
 
+void __init kasan_early_init(void)
+{
+       int i;
+       pteval_t pte_val = __pa_nodebug(kasan_zero_page) | __PAGE_KERNEL;
+       pmdval_t pmd_val = __pa_nodebug(kasan_zero_pte) | _KERNPG_TABLE;
+       pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE;
+
+       for (i = 0; i < PTRS_PER_PTE; i++)
+               kasan_zero_pte[i] = __pte(pte_val);
+
+       for (i = 0; i < PTRS_PER_PMD; i++)
+               kasan_zero_pmd[i] = __pmd(pmd_val);
+
+       for (i = 0; i < PTRS_PER_PUD; i++)
+               kasan_zero_pud[i] = __pud(pud_val);
+
+       kasan_map_early_shadow(early_level4_pgt);
+       kasan_map_early_shadow(init_level4_pgt);
+}
+
 void __init kasan_init(void)
 {
        int i;
@@ -176,6 +208,7 @@ void __init kasan_init(void)
 
        memcpy(early_level4_pgt, init_level4_pgt, sizeof(early_level4_pgt));
        load_cr3(early_level4_pgt);
+       __flush_tlb_all();
 
        clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
 
@@ -202,5 +235,6 @@ void __init kasan_init(void)
        memset(kasan_zero_page, 0, PAGE_SIZE);
 
        load_cr3(init_level4_pgt);
+       __flush_tlb_all();
        init_task.kasan_depth = 0;
 }
index 9d518d6..844b06d 100644 (file)
@@ -126,3 +126,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
                mm->get_unmapped_area = arch_get_unmapped_area_topdown;
        }
 }
+
+const char *arch_vma_name(struct vm_area_struct *vma)
+{
+       if (vma->vm_flags & VM_MPX)
+               return "[mpx]";
+       return NULL;
+}
index c439ec4..4d1c11c 100644 (file)
 #include <asm/processor.h>
 #include <asm/fpu-internal.h>
 
-static const char *mpx_mapping_name(struct vm_area_struct *vma)
-{
-       return "[mpx]";
-}
-
-static struct vm_operations_struct mpx_vma_ops = {
-       .name = mpx_mapping_name,
-};
-
-static int is_mpx_vma(struct vm_area_struct *vma)
-{
-       return (vma->vm_ops == &mpx_vma_ops);
-}
-
 /*
  * This is really a simplified "vm_mmap". it only handles MPX
  * bounds tables (the bounds directory is user-allocated).
- *
- * Later on, we use the vma->vm_ops to uniquely identify these
- * VMAs.
  */
 static unsigned long mpx_mmap(unsigned long len)
 {
@@ -83,7 +66,6 @@ static unsigned long mpx_mmap(unsigned long len)
                ret = -ENOMEM;
                goto out;
        }
-       vma->vm_ops = &mpx_vma_ops;
 
        if (vm_flags & VM_LOCKED) {
                up_write(&mm->mmap_sem);
@@ -661,7 +643,7 @@ static int zap_bt_entries(struct mm_struct *mm,
                 * so stop immediately and return an error.  This
                 * probably results in a SIGSEGV.
                 */
-               if (!is_mpx_vma(vma))
+               if (!(vma->vm_flags & VM_MPX))
                        return -EINVAL;
 
                len = min(vma->vm_end, end) - addr;
index 3250f23..90b924a 100644 (file)
@@ -117,7 +117,7 @@ static void flush_tlb_func(void *info)
                } else {
                        unsigned long addr;
                        unsigned long nr_pages =
-                               f->flush_end - f->flush_start / PAGE_SIZE;
+                               (f->flush_end - f->flush_start) / PAGE_SIZE;
                        addr = f->flush_start;
                        while (addr < f->flush_end) {
                                __flush_tlb_single(addr);
index 02744df..841ea05 100644 (file)
@@ -946,6 +946,11 @@ u64 efi_mem_attributes(unsigned long phys_addr)
 
 static int __init arch_parse_efi_cmdline(char *str)
 {
+       if (!str) {
+               pr_warn("need at least one option\n");
+               return -EINVAL;
+       }
+
        if (parse_option_str(str, "old_map"))
                set_bit(EFI_OLD_MEMMAP, &efi.flags);
        if (parse_option_str(str, "debug"))
index e88fda8..4841453 100644 (file)
@@ -8,7 +8,7 @@ config XEN
        select PARAVIRT_CLOCK
        select XEN_HAVE_PVMMU
        depends on X86_64 || (X86_32 && X86_PAE)
-       depends on X86_TSC
+       depends on X86_LOCAL_APIC && X86_TSC
        help
          This is the Linux Xen port.  Enabling this will allow the
          kernel to boot in a paravirtualized environment under the
@@ -17,7 +17,7 @@ config XEN
 config XEN_DOM0
        def_bool y
        depends on XEN && PCI_XEN && SWIOTLB_XEN
-       depends on X86_LOCAL_APIC && X86_IO_APIC && ACPI && PCI
+       depends on X86_IO_APIC && ACPI && PCI
 
 config XEN_PVHVM
        def_bool y
index 7322755..4b6e29a 100644 (file)
@@ -13,13 +13,13 @@ CFLAGS_mmu.o                        := $(nostackp)
 obj-y          := enlighten.o setup.o multicalls.o mmu.o irq.o \
                        time.o xen-asm.o xen-asm_$(BITS).o \
                        grant-table.o suspend.o platform-pci-unplug.o \
-                       p2m.o
+                       p2m.o apic.o
 
 obj-$(CONFIG_EVENT_TRACING) += trace.o
 
 obj-$(CONFIG_SMP)              += smp.o
 obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o
 obj-$(CONFIG_XEN_DEBUG_FS)     += debugfs.o
-obj-$(CONFIG_XEN_DOM0)         += apic.o vga.o
+obj-$(CONFIG_XEN_DOM0)         += vga.o
 obj-$(CONFIG_SWIOTLB_XEN)      += pci-swiotlb-xen.o
 obj-$(CONFIG_XEN_EFI)          += efi.o
index 46957ea..a671e83 100644 (file)
@@ -483,6 +483,7 @@ static void set_aliased_prot(void *v, pgprot_t prot)
        pte_t pte;
        unsigned long pfn;
        struct page *page;
+       unsigned char dummy;
 
        ptep = lookup_address((unsigned long)v, &level);
        BUG_ON(ptep == NULL);
@@ -492,6 +493,32 @@ static void set_aliased_prot(void *v, pgprot_t prot)
 
        pte = pfn_pte(pfn, prot);
 
+       /*
+        * Careful: update_va_mapping() will fail if the virtual address
+        * we're poking isn't populated in the page tables.  We don't
+        * need to worry about the direct map (that's always in the page
+        * tables), but we need to be careful about vmap space.  In
+        * particular, the top level page table can lazily propagate
+        * entries between processes, so if we've switched mms since we
+        * vmapped the target in the first place, we might not have the
+        * top-level page table entry populated.
+        *
+        * We disable preemption because we want the same mm active when
+        * we probe the target and when we issue the hypercall.  We'll
+        * have the same nominal mm, but if we're a kernel thread, lazy
+        * mm dropping could change our pgd.
+        *
+        * Out of an abundance of caution, this uses __get_user() to fault
+        * in the target address just in case there's some obscure case
+        * in which the target address isn't readable.
+        */
+
+       preempt_disable();
+
+       pagefault_disable();    /* Avoid warnings due to being atomic. */
+       __get_user(dummy, (unsigned char __user __force *)v);
+       pagefault_enable();
+
        if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
                BUG();
 
@@ -503,6 +530,8 @@ static void set_aliased_prot(void *v, pgprot_t prot)
                                BUG();
        } else
                kmap_flush_unused();
+
+       preempt_enable();
 }
 
 static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
@@ -510,6 +539,17 @@ static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
        const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
        int i;
 
+       /*
+        * We need to mark the all aliases of the LDT pages RO.  We
+        * don't need to call vm_flush_aliases(), though, since that's
+        * only responsible for flushing aliases out the TLBs, not the
+        * page tables, and Xen will flush the TLB for us if needed.
+        *
+        * To avoid confusing future readers: none of this is necessary
+        * to load the LDT.  The hypervisor only checks this when the
+        * LDT is faulted in due to subsequent descriptor access.
+        */
+
        for(i = 0; i < entries; i += entries_per_page)
                set_aliased_prot(ldt + i, PAGE_KERNEL_RO);
 }
index 9e195c6..bef30cb 100644 (file)
@@ -101,17 +101,15 @@ struct dom0_vga_console_info;
 
 #ifdef CONFIG_XEN_DOM0
 void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size);
-void __init xen_init_apic(void);
 #else
 static inline void __init xen_init_vga(const struct dom0_vga_console_info *info,
                                       size_t size)
 {
 }
-static inline void __init xen_init_apic(void)
-{
-}
 #endif
 
+void __init xen_init_apic(void);
+
 #ifdef CONFIG_XEN_EFI
 extern void xen_efi_init(void);
 #else
index 5cbd5d9..39ce74d 100644 (file)
@@ -51,7 +51,7 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
        unsigned long idx = BIO_POOL_NONE;
        unsigned inline_vecs;
 
-       if (!bs) {
+       if (!bs || !bs->bio_integrity_pool) {
                bip = kmalloc(sizeof(struct bio_integrity_payload) +
                              sizeof(struct bio_vec) * nr_vecs, gfp_mask);
                inline_vecs = nr_vecs;
@@ -104,7 +104,7 @@ void bio_integrity_free(struct bio *bio)
                kfree(page_address(bip->bip_vec->bv_page) +
                      bip->bip_vec->bv_offset);
 
-       if (bs) {
+       if (bs && bs->bio_integrity_pool) {
                if (bip->bip_slab != BIO_POOL_NONE)
                        bvec_free(bs->bvec_integrity_pool, bip->bip_vec,
                                  bip->bip_slab);
index f66a4ea..4441522 100644 (file)
@@ -1814,8 +1814,9 @@ EXPORT_SYMBOL(bio_endio_nodec);
  * Allocates and returns a new bio which represents @sectors from the start of
  * @bio, and updates @bio to represent the remaining sectors.
  *
- * The newly allocated bio will point to @bio's bi_io_vec; it is the caller's
- * responsibility to ensure that @bio is not freed before the split.
+ * Unless this is a discard request the newly allocated bio will point
+ * to @bio's bi_io_vec; it is the caller's responsibility to ensure that
+ * @bio is not freed before the split.
  */
 struct bio *bio_split(struct bio *bio, int sectors,
                      gfp_t gfp, struct bio_set *bs)
@@ -1825,7 +1826,15 @@ struct bio *bio_split(struct bio *bio, int sectors,
        BUG_ON(sectors <= 0);
        BUG_ON(sectors >= bio_sectors(bio));
 
-       split = bio_clone_fast(bio, gfp, bs);
+       /*
+        * Discards need a mutable bio_vec to accommodate the payload
+        * required by the DSM TRIM and UNMAP commands.
+        */
+       if (bio->bi_rw & REQ_DISCARD)
+               split = bio_clone_bioset(bio, gfp, bs);
+       else
+               split = bio_clone_fast(bio, gfp, bs);
+
        if (!split)
                return NULL;
 
index 0ac817b..6817e28 100644 (file)
@@ -716,8 +716,12 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
                return -EINVAL;
 
        disk = get_gendisk(MKDEV(major, minor), &part);
-       if (!disk || part)
+       if (!disk)
                return -EINVAL;
+       if (part) {
+               put_disk(disk);
+               return -EINVAL;
+       }
 
        rcu_read_lock();
        spin_lock_irq(disk->queue->queue_lock);
index 1f469b3..c473bd1 100644 (file)
@@ -1988,7 +1988,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
                goto err_hctxs;
 
        setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
-       blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30000);
+       blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
 
        q->nr_queues = nr_cpu_ids;
        q->nr_hw_queues = set->nr_hw_queues;
index 12600bf..e0057d0 100644 (file)
@@ -241,8 +241,8 @@ EXPORT_SYMBOL(blk_queue_bounce_limit);
  * Description:
  *    Enables a low level driver to set a hard upper limit,
  *    max_hw_sectors, on the size of requests.  max_hw_sectors is set by
- *    the device driver based upon the combined capabilities of I/O
- *    controller and storage device.
+ *    the device driver based upon the capabilities of the I/O
+ *    controller.
  *
  *    max_sectors is a soft limit imposed by the block layer for
  *    filesystem type requests.  This value can be overridden on a
index f973308..3f5b537 100644 (file)
@@ -11,6 +11,9 @@
 
 extern struct asymmetric_key_id *asymmetric_key_hex_to_key_id(const char *id);
 
+extern int __asymmetric_key_hex_to_key_id(const char *id,
+                                         struct asymmetric_key_id *match_id,
+                                         size_t hexlen);
 static inline
 const struct asymmetric_key_ids *asymmetric_key_ids(const struct key *key)
 {
index bcbbbd7..b0e4ed2 100644 (file)
@@ -104,6 +104,15 @@ static bool asymmetric_match_key_ids(
        return false;
 }
 
+/* helper function can be called directly with pre-allocated memory */
+inline int __asymmetric_key_hex_to_key_id(const char *id,
+                                  struct asymmetric_key_id *match_id,
+                                  size_t hexlen)
+{
+       match_id->len = hexlen;
+       return hex2bin(match_id->data, id, hexlen);
+}
+
 /**
  * asymmetric_key_hex_to_key_id - Convert a hex string into a key ID.
  * @id: The ID as a hex string.
@@ -111,21 +120,20 @@ static bool asymmetric_match_key_ids(
 struct asymmetric_key_id *asymmetric_key_hex_to_key_id(const char *id)
 {
        struct asymmetric_key_id *match_id;
-       size_t hexlen;
+       size_t asciihexlen;
        int ret;
 
        if (!*id)
                return ERR_PTR(-EINVAL);
-       hexlen = strlen(id);
-       if (hexlen & 1)
+       asciihexlen = strlen(id);
+       if (asciihexlen & 1)
                return ERR_PTR(-EINVAL);
 
-       match_id = kmalloc(sizeof(struct asymmetric_key_id) + hexlen / 2,
+       match_id = kmalloc(sizeof(struct asymmetric_key_id) + asciihexlen / 2,
                           GFP_KERNEL);
        if (!match_id)
                return ERR_PTR(-ENOMEM);
-       match_id->len = hexlen / 2;
-       ret = hex2bin(match_id->data, id, hexlen / 2);
+       ret = __asymmetric_key_hex_to_key_id(id, match_id, asciihexlen / 2);
        if (ret < 0) {
                kfree(match_id);
                return ERR_PTR(-EINVAL);
index a6c4203..24f17e6 100644 (file)
@@ -28,17 +28,30 @@ static bool use_builtin_keys;
 static struct asymmetric_key_id *ca_keyid;
 
 #ifndef MODULE
+static struct {
+       struct asymmetric_key_id id;
+       unsigned char data[10];
+} cakey;
+
 static int __init ca_keys_setup(char *str)
 {
        if (!str)               /* default system keyring */
                return 1;
 
        if (strncmp(str, "id:", 3) == 0) {
-               struct asymmetric_key_id *p;
-               p = asymmetric_key_hex_to_key_id(str + 3);
-               if (p == ERR_PTR(-EINVAL))
-                       pr_err("Unparsable hex string in ca_keys\n");
-               else if (!IS_ERR(p))
+               struct asymmetric_key_id *p = &cakey.id;
+               size_t hexlen = (strlen(str) - 3) / 2;
+               int ret;
+
+               if (hexlen == 0 || hexlen > sizeof(cakey.data)) {
+                       pr_err("Missing or invalid ca_keys id\n");
+                       return 1;
+               }
+
+               ret = __asymmetric_key_hex_to_key_id(str + 3, p, hexlen);
+               if (ret < 0)
+                       pr_err("Unparsable ca_keys id hex string\n");
+               else
                        ca_keyid = p;   /* owner key 'id:xxxxxx' */
        } else if (strcmp(str, "builtin") == 0) {
                use_builtin_keys = true;
index 37fb190..73f056a 100644 (file)
@@ -352,13 +352,16 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
                                pdata->mmio_size = resource_size(rentry->res);
                        pdata->mmio_base = ioremap(rentry->res->start,
                                                   pdata->mmio_size);
-                       if (!pdata->mmio_base)
-                               goto err_out;
                        break;
                }
 
        acpi_dev_free_resource_list(&resource_list);
 
+       if (!pdata->mmio_base) {
+               ret = -ENOMEM;
+               goto err_out;
+       }
+
        pdata->dev_desc = dev_desc;
 
        if (dev_desc->setup)
index 87b2752..7f50dd9 100644 (file)
@@ -213,6 +213,7 @@ struct acpi_table_list {
 
 #define ACPI_TABLE_INDEX_DSDT           (0)
 #define ACPI_TABLE_INDEX_FACS           (1)
+#define ACPI_TABLE_INDEX_X_FACS         (2)
 
 struct acpi_find_context {
        char *search_for;
index 7d24860..05be59c 100644 (file)
@@ -350,9 +350,18 @@ void acpi_tb_parse_fadt(u32 table_index)
        /* If Hardware Reduced flag is set, there is no FACS */
 
        if (!acpi_gbl_reduced_hardware) {
-               acpi_tb_install_fixed_table((acpi_physical_address)
-                                           acpi_gbl_FADT.Xfacs, ACPI_SIG_FACS,
-                                           ACPI_TABLE_INDEX_FACS);
+               if (acpi_gbl_FADT.facs) {
+                       acpi_tb_install_fixed_table((acpi_physical_address)
+                                                   acpi_gbl_FADT.facs,
+                                                   ACPI_SIG_FACS,
+                                                   ACPI_TABLE_INDEX_FACS);
+               }
+               if (acpi_gbl_FADT.Xfacs) {
+                       acpi_tb_install_fixed_table((acpi_physical_address)
+                                                   acpi_gbl_FADT.Xfacs,
+                                                   ACPI_SIG_FACS,
+                                                   ACPI_TABLE_INDEX_X_FACS);
+               }
        }
 }
 
@@ -491,13 +500,9 @@ static void acpi_tb_convert_fadt(void)
        acpi_gbl_FADT.header.length = sizeof(struct acpi_table_fadt);
 
        /*
-        * Expand the 32-bit FACS and DSDT addresses to 64-bit as necessary.
+        * Expand the 32-bit DSDT addresses to 64-bit as necessary.
         * Later ACPICA code will always use the X 64-bit field.
         */
-       acpi_gbl_FADT.Xfacs = acpi_tb_select_address("FACS",
-                                                    acpi_gbl_FADT.facs,
-                                                    acpi_gbl_FADT.Xfacs);
-
        acpi_gbl_FADT.Xdsdt = acpi_tb_select_address("DSDT",
                                                     acpi_gbl_FADT.dsdt,
                                                     acpi_gbl_FADT.Xdsdt);
index 6559a58..2fb1afa 100644 (file)
@@ -68,7 +68,8 @@ acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size);
 
 acpi_status acpi_tb_initialize_facs(void)
 {
-       acpi_status status;
+       struct acpi_table_facs *facs32;
+       struct acpi_table_facs *facs64;
 
        /* If Hardware Reduced flag is set, there is no FACS */
 
@@ -77,11 +78,22 @@ acpi_status acpi_tb_initialize_facs(void)
                return (AE_OK);
        }
 
-       status = acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS,
-                                        ACPI_CAST_INDIRECT_PTR(struct
-                                                               acpi_table_header,
-                                                               &acpi_gbl_FACS));
-       return (status);
+       (void)acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS,
+                                     ACPI_CAST_INDIRECT_PTR(struct
+                                                            acpi_table_header,
+                                                            &facs32));
+       (void)acpi_get_table_by_index(ACPI_TABLE_INDEX_X_FACS,
+                                     ACPI_CAST_INDIRECT_PTR(struct
+                                                            acpi_table_header,
+                                                            &facs64));
+
+       if (acpi_gbl_use32_bit_facs_addresses) {
+               acpi_gbl_FACS = facs32 ? facs32 : facs64;
+       } else {
+               acpi_gbl_FACS = facs64 ? facs64 : facs32;
+       }
+
+       return (AE_OK);
 }
 #endif                         /* !ACPI_REDUCED_HARDWARE */
 
@@ -101,7 +113,7 @@ acpi_status acpi_tb_initialize_facs(void)
 u8 acpi_tb_tables_loaded(void)
 {
 
-       if (acpi_gbl_root_table_list.current_table_count >= 3) {
+       if (acpi_gbl_root_table_list.current_table_count >= 4) {
                return (TRUE);
        }
 
@@ -357,11 +369,11 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
        table_entry = ACPI_ADD_PTR(u8, table, sizeof(struct acpi_table_header));
 
        /*
-        * First two entries in the table array are reserved for the DSDT
-        * and FACS, which are not actually present in the RSDT/XSDT - they
-        * come from the FADT
+        * First three entries in the table array are reserved for the DSDT
+        * and 32bit/64bit FACS, which are not actually present in the
+        * RSDT/XSDT - they come from the FADT
         */
-       acpi_gbl_root_table_list.current_table_count = 2;
+       acpi_gbl_root_table_list.current_table_count = 3;
 
        /* Initialize the root table array from the RSDT/XSDT */
 
index aadb300..b63e35d 100644 (file)
@@ -166,7 +166,8 @@ static acpi_status acpi_tb_load_namespace(void)
 
        (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
        for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
-               if ((!ACPI_COMPARE_NAME
+               if (!acpi_gbl_root_table_list.tables[i].address ||
+                   (!ACPI_COMPARE_NAME
                     (&(acpi_gbl_root_table_list.tables[i].signature),
                      ACPI_SIG_SSDT)
                     &&
index 083a768..42a32a6 100644 (file)
@@ -179,10 +179,12 @@ acpi_status __init acpi_enable_subsystem(u32 flags)
         * Obtain a permanent mapping for the FACS. This is required for the
         * Global Lock and the Firmware Waking Vector
         */
-       status = acpi_tb_initialize_facs();
-       if (ACPI_FAILURE(status)) {
-               ACPI_WARNING((AE_INFO, "Could not map the FACS table"));
-               return_ACPI_STATUS(status);
+       if (!(flags & ACPI_NO_FACS_INIT)) {
+               status = acpi_tb_initialize_facs();
+               if (ACPI_FAILURE(status)) {
+                       ACPI_WARNING((AE_INFO, "Could not map the FACS table"));
+                       return_ACPI_STATUS(status);
+               }
        }
 #endif                         /* !ACPI_REDUCED_HARDWARE */
 
index 5226a8b..98f5316 100644 (file)
@@ -175,10 +175,14 @@ static void __init acpi_request_region (struct acpi_generic_address *gas,
        if (!addr || !length)
                return;
 
-       acpi_reserve_region(addr, length, gas->space_id, 0, desc);
+       /* Resources are never freed */
+       if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
+               request_region(addr, length, desc);
+       else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
+               request_mem_region(addr, length, desc);
 }
 
-static void __init acpi_reserve_resources(void)
+static int __init acpi_reserve_resources(void)
 {
        acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
                "ACPI PM1a_EVT_BLK");
@@ -207,7 +211,10 @@ static void __init acpi_reserve_resources(void)
        if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
                acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
                               acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
+
+       return 0;
 }
+fs_initcall_sync(acpi_reserve_resources);
 
 void acpi_os_printf(const char *fmt, ...)
 {
@@ -1838,7 +1845,6 @@ acpi_status __init acpi_os_initialize(void)
 
 acpi_status __init acpi_os_initialize1(void)
 {
-       acpi_reserve_resources();
        kacpid_wq = alloc_workqueue("kacpid", 0, 1);
        kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1);
        kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0);
index fcb7807..f1c966e 100644 (file)
@@ -26,7 +26,6 @@
 #include <linux/device.h>
 #include <linux/export.h>
 #include <linux/ioport.h>
-#include <linux/list.h>
 #include <linux/slab.h>
 
 #ifdef CONFIG_X86
@@ -194,6 +193,7 @@ static bool acpi_decode_space(struct resource_win *win,
        u8 iodec = attr->granularity == 0xfff ? ACPI_DECODE_10 : ACPI_DECODE_16;
        bool wp = addr->info.mem.write_protect;
        u64 len = attr->address_length;
+       u64 start, end, offset = 0;
        struct resource *res = &win->res;
 
        /*
@@ -205,9 +205,6 @@ static bool acpi_decode_space(struct resource_win *win,
                pr_debug("ACPI: Invalid address space min_addr_fix %d, max_addr_fix %d, len %llx\n",
                         addr->min_address_fixed, addr->max_address_fixed, len);
 
-       res->start = attr->minimum;
-       res->end = attr->maximum;
-
        /*
         * For bridges that translate addresses across the bridge,
         * translation_offset is the offset that must be added to the
@@ -215,12 +212,22 @@ static bool acpi_decode_space(struct resource_win *win,
         * primary side. Non-bridge devices must list 0 for all Address
         * Translation offset bits.
         */
-       if (addr->producer_consumer == ACPI_PRODUCER) {
-               res->start += attr->translation_offset;
-               res->end += attr->translation_offset;
-       } else if (attr->translation_offset) {
+       if (addr->producer_consumer == ACPI_PRODUCER)
+               offset = attr->translation_offset;
+       else if (attr->translation_offset)
                pr_debug("ACPI: translation_offset(%lld) is invalid for non-bridge device.\n",
                         attr->translation_offset);
+       start = attr->minimum + offset;
+       end = attr->maximum + offset;
+
+       win->offset = offset;
+       res->start = start;
+       res->end = end;
+       if (sizeof(resource_size_t) < sizeof(u64) &&
+           (offset != win->offset || start != res->start || end != res->end)) {
+               pr_warn("acpi resource window ([%#llx-%#llx] ignored, not CPU addressable)\n",
+                       attr->minimum, attr->maximum);
+               return false;
        }
 
        switch (addr->resource_type) {
@@ -237,8 +244,6 @@ static bool acpi_decode_space(struct resource_win *win,
                return false;
        }
 
-       win->offset = attr->translation_offset;
-
        if (addr->producer_consumer == ACPI_PRODUCER)
                res->flags |= IORESOURCE_WINDOW;
 
@@ -622,162 +627,3 @@ int acpi_dev_filter_resource_type(struct acpi_resource *ares,
        return (type & types) ? 0 : 1;
 }
 EXPORT_SYMBOL_GPL(acpi_dev_filter_resource_type);
-
-struct reserved_region {
-       struct list_head node;
-       u64 start;
-       u64 end;
-};
-
-static LIST_HEAD(reserved_io_regions);
-static LIST_HEAD(reserved_mem_regions);
-
-static int request_range(u64 start, u64 end, u8 space_id, unsigned long flags,
-                        char *desc)
-{
-       unsigned int length = end - start + 1;
-       struct resource *res;
-
-       res = space_id == ACPI_ADR_SPACE_SYSTEM_IO ?
-               request_region(start, length, desc) :
-               request_mem_region(start, length, desc);
-       if (!res)
-               return -EIO;
-
-       res->flags &= ~flags;
-       return 0;
-}
-
-static int add_region_before(u64 start, u64 end, u8 space_id,
-                            unsigned long flags, char *desc,
-                            struct list_head *head)
-{
-       struct reserved_region *reg;
-       int error;
-
-       reg = kmalloc(sizeof(*reg), GFP_KERNEL);
-       if (!reg)
-               return -ENOMEM;
-
-       error = request_range(start, end, space_id, flags, desc);
-       if (error)
-               return error;
-
-       reg->start = start;
-       reg->end = end;
-       list_add_tail(&reg->node, head);
-       return 0;
-}
-
-/**
- * acpi_reserve_region - Reserve an I/O or memory region as a system resource.
- * @start: Starting address of the region.
- * @length: Length of the region.
- * @space_id: Identifier of address space to reserve the region from.
- * @flags: Resource flags to clear for the region after requesting it.
- * @desc: Region description (for messages).
- *
- * Reserve an I/O or memory region as a system resource to prevent others from
- * using it.  If the new region overlaps with one of the regions (in the given
- * address space) already reserved by this routine, only the non-overlapping
- * parts of it will be reserved.
- *
- * Returned is either 0 (success) or a negative error code indicating a resource
- * reservation problem.  It is the code of the first encountered error, but the
- * routine doesn't abort until it has attempted to request all of the parts of
- * the new region that don't overlap with other regions reserved previously.
- *
- * The resources requested by this routine are never released.
- */
-int acpi_reserve_region(u64 start, unsigned int length, u8 space_id,
-                       unsigned long flags, char *desc)
-{
-       struct list_head *regions;
-       struct reserved_region *reg;
-       u64 end = start + length - 1;
-       int ret = 0, error = 0;
-
-       if (space_id == ACPI_ADR_SPACE_SYSTEM_IO)
-               regions = &reserved_io_regions;
-       else if (space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
-               regions = &reserved_mem_regions;
-       else
-               return -EINVAL;
-
-       if (list_empty(regions))
-               return add_region_before(start, end, space_id, flags, desc, regions);
-
-       list_for_each_entry(reg, regions, node)
-               if (reg->start == end + 1) {
-                       /* The new region can be prepended to this one. */
-                       ret = request_range(start, end, space_id, flags, desc);
-                       if (!ret)
-                               reg->start = start;
-
-                       return ret;
-               } else if (reg->start > end) {
-                       /* No overlap.  Add the new region here and get out. */
-                       return add_region_before(start, end, space_id, flags,
-                                                desc, &reg->node);
-               } else if (reg->end == start - 1) {
-                       goto combine;
-               } else if (reg->end >= start) {
-                       goto overlap;
-               }
-
-       /* The new region goes after the last existing one. */
-       return add_region_before(start, end, space_id, flags, desc, regions);
-
- overlap:
-       /*
-        * The new region overlaps an existing one.
-        *
-        * The head part of the new region immediately preceding the existing
-        * overlapping one can be combined with it right away.
-        */
-       if (reg->start > start) {
-               error = request_range(start, reg->start - 1, space_id, flags, desc);
-               if (error)
-                       ret = error;
-               else
-                       reg->start = start;
-       }
-
- combine:
-       /*
-        * The new region is adjacent to an existing one.  If it extends beyond
-        * that region all the way to the next one, it is possible to combine
-        * all three of them.
-        */
-       while (reg->end < end) {
-               struct reserved_region *next = NULL;
-               u64 a = reg->end + 1, b = end;
-
-               if (!list_is_last(&reg->node, regions)) {
-                       next = list_next_entry(reg, node);
-                       if (next->start <= end)
-                               b = next->start - 1;
-               }
-               error = request_range(a, b, space_id, flags, desc);
-               if (!error) {
-                       if (next && next->start == b + 1) {
-                               reg->end = next->end;
-                               list_del(&next->node);
-                               kfree(next);
-                       } else {
-                               reg->end = end;
-                               break;
-                       }
-               } else if (next) {
-                       if (!ret)
-                               ret = error;
-
-                       reg = next;
-               } else {
-                       break;
-               }
-       }
-
-       return ret ? ret : error;
-}
-EXPORT_SYMBOL_GPL(acpi_reserve_region);
index 577849c..e0064d1 100644 (file)
@@ -694,11 +694,11 @@ static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
  *     RETURNS:
  *     Block address read from @tf.
  */
-u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev)
+u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
 {
        u64 block = 0;
 
-       if (!dev || tf->flags & ATA_TFLAG_LBA) {
+       if (tf->flags & ATA_TFLAG_LBA) {
                if (tf->flags & ATA_TFLAG_LBA48) {
                        block |= (u64)tf->hob_lbah << 40;
                        block |= (u64)tf->hob_lbam << 32;
@@ -2147,24 +2147,6 @@ static int ata_dev_config_ncq(struct ata_device *dev,
        return 0;
 }
 
-static void ata_dev_config_sense_reporting(struct ata_device *dev)
-{
-       unsigned int err_mask;
-
-       if (!ata_id_has_sense_reporting(dev->id))
-               return;
-
-       if (ata_id_sense_reporting_enabled(dev->id))
-               return;
-
-       err_mask = ata_dev_set_feature(dev, SETFEATURE_SENSE_DATA, 0x1);
-       if (err_mask) {
-               ata_dev_dbg(dev,
-                           "failed to enable Sense Data Reporting, Emask 0x%x\n",
-                           err_mask);
-       }
-}
-
 /**
  *     ata_dev_configure - Configure the specified ATA/ATAPI device
  *     @dev: Target device to configure
@@ -2387,7 +2369,7 @@ int ata_dev_configure(struct ata_device *dev)
                                        dev->devslp_timing[i] = sata_setting[j];
                                }
                }
-               ata_dev_config_sense_reporting(dev);
+
                dev->cdb_len = 16;
        }
 
@@ -2478,6 +2460,10 @@ int ata_dev_configure(struct ata_device *dev)
                dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
                                         dev->max_sectors);
 
+       if (dev->horkage & ATA_HORKAGE_MAX_SEC_1024)
+               dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024,
+                                        dev->max_sectors);
+
        if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
                dev->max_sectors = ATA_MAX_SECTORS_LBA48;
 
@@ -4146,6 +4132,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
        { "Slimtype DVD A  DS8A8SH", NULL,      ATA_HORKAGE_MAX_SEC_LBA48 },
        { "Slimtype DVD A  DS8A9SH", NULL,      ATA_HORKAGE_MAX_SEC_LBA48 },
 
+       /*
+        * Causes silent data corruption with higher max sects.
+        * http://lkml.kernel.org/g/x49wpy40ysk.fsf@segfault.boston.devel.redhat.com
+        */
+       { "ST380013AS",         "3.20",         ATA_HORKAGE_MAX_SEC_1024 },
+
        /* Devices we expect to fail diagnostics */
 
        /* Devices where NCQ should be avoided */
@@ -4174,9 +4166,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
        { "ST3320[68]13AS",     "SD1[5-9]",     ATA_HORKAGE_NONCQ |
                                                ATA_HORKAGE_FIRMWARE_WARN },
 
-       /* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */
+       /* drives which fail FPDMA_AA activation (some may freeze afterwards) */
        { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
        { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA },
+       { "VB0250EAVER",        "HPG7",         ATA_HORKAGE_BROKEN_FPDMA_AA },
 
        /* Blacklist entries taken from Silicon Image 3124/3132
           Windows driver .inf file - also several Linux problem reports */
@@ -4225,11 +4218,11 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
        { "PIONEER DVD-RW  DVR-216D",   NULL,   ATA_HORKAGE_NOSETXFER },
 
        /* devices that don't properly handle queued TRIM commands */
-       { "Micron_M500*",               NULL,   ATA_HORKAGE_NO_NCQ_TRIM |
+       { "Micron_M500_*",              NULL,   ATA_HORKAGE_NO_NCQ_TRIM |
                                                ATA_HORKAGE_ZERO_AFTER_TRIM, },
        { "Crucial_CT*M500*",           NULL,   ATA_HORKAGE_NO_NCQ_TRIM |
                                                ATA_HORKAGE_ZERO_AFTER_TRIM, },
-       { "Micron_M5[15]0*",            "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
+       { "Micron_M5[15]0_*",           "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
                                                ATA_HORKAGE_ZERO_AFTER_TRIM, },
        { "Crucial_CT*M550*",           "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
                                                ATA_HORKAGE_ZERO_AFTER_TRIM, },
@@ -4237,6 +4230,11 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
                                                ATA_HORKAGE_ZERO_AFTER_TRIM, },
        { "Samsung SSD 8*",             NULL,   ATA_HORKAGE_NO_NCQ_TRIM |
                                                ATA_HORKAGE_ZERO_AFTER_TRIM, },
+       { "FCCT*M500*",                 NULL,   ATA_HORKAGE_NO_NCQ_TRIM |
+                                               ATA_HORKAGE_ZERO_AFTER_TRIM, },
+
+       /* devices that don't properly handle TRIM commands */
+       { "SuperSSpeed S238*",          NULL,   ATA_HORKAGE_NOTRIM, },
 
        /*
         * As defined, the DRAT (Deterministic Read After Trim) and RZAT
@@ -4501,7 +4499,8 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
        else /* In the ancient relic department - skip all of this */
                return 0;
 
-       err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
+       /* On some disks, this command causes spin-up, so we need longer timeout */
+       err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000);
 
        DPRINTK("EXIT, err_mask=%x\n", err_mask);
        return err_mask;
index cf0022e..cb0508a 100644 (file)
@@ -1507,16 +1507,21 @@ unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
 {
        struct ata_taskfile tf;
        unsigned int err_mask;
+       bool dma = false;
 
        DPRINTK("read log page - log 0x%x, page 0x%x\n", log, page);
 
+retry:
        ata_tf_init(dev, &tf);
-       if (dev->dma_mode && ata_id_has_read_log_dma_ext(dev->id)) {
+       if (dev->dma_mode && ata_id_has_read_log_dma_ext(dev->id) &&
+           !(dev->horkage & ATA_HORKAGE_NO_NCQ_LOG)) {
                tf.command = ATA_CMD_READ_LOG_DMA_EXT;
                tf.protocol = ATA_PROT_DMA;
+               dma = true;
        } else {
                tf.command = ATA_CMD_READ_LOG_EXT;
                tf.protocol = ATA_PROT_PIO;
+               dma = false;
        }
        tf.lbal = log;
        tf.lbam = page;
@@ -1527,6 +1532,12 @@ unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
        err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
                                     buf, sectors * ATA_SECT_SIZE, 0);
 
+       if (err_mask && dma) {
+               dev->horkage |= ATA_HORKAGE_NO_NCQ_LOG;
+               ata_dev_warn(dev, "READ LOG DMA EXT failed, trying unqueued\n");
+               goto retry;
+       }
+
        DPRINTK("EXIT, err_mask=%x\n", err_mask);
        return err_mask;
 }
@@ -1581,8 +1592,6 @@ static int ata_eh_read_log_10h(struct ata_device *dev,
        tf->hob_lbah = buf[10];
        tf->nsect = buf[12];
        tf->hob_nsect = buf[13];
-       if (ata_id_has_ncq_autosense(dev->id))
-               tf->auxiliary = buf[14] << 16 | buf[15] << 8 | buf[16];
 
        return 0;
 }
@@ -1618,70 +1627,6 @@ unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
        return err_mask;
 }
 
-/**
- *     ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT
- *     @dev: device to perform REQUEST_SENSE_SENSE_DATA_EXT to
- *     @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
- *     @dfl_sense_key: default sense key to use
- *
- *     Perform REQUEST_SENSE_DATA_EXT after the device reported CHECK
- *     SENSE.  This function is EH helper.
- *
- *     LOCKING:
- *     Kernel thread context (may sleep).
- *
- *     RETURNS:
- *     encoded sense data on success, 0 on failure or if sense data
- *     is not available.
- */
-static u32 ata_eh_request_sense(struct ata_queued_cmd *qc,
-                               struct scsi_cmnd *cmd)
-{
-       struct ata_device *dev = qc->dev;
-       struct ata_taskfile tf;
-       unsigned int err_mask;
-
-       if (!cmd)
-               return 0;
-
-       DPRINTK("ATA request sense\n");
-       ata_dev_warn(dev, "request sense\n");
-       if (!ata_id_sense_reporting_enabled(dev->id)) {
-               ata_dev_warn(qc->dev, "sense data reporting disabled\n");
-               return 0;
-       }
-       ata_tf_init(dev, &tf);
-
-       tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
-       tf.flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
-       tf.command = ATA_CMD_REQ_SENSE_DATA;
-       tf.protocol = ATA_PROT_NODATA;
-
-       err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
-       /*
-        * ACS-4 states:
-        * The device may set the SENSE DATA AVAILABLE bit to one in the
-        * STATUS field and clear the ERROR bit to zero in the STATUS field
-        * to indicate that the command returned completion without an error
-        * and the sense data described in table 306 is available.
-        *
-        * IOW the 'ATA_SENSE' bit might not be set even though valid
-        * sense data is available.
-        * So check for both.
-        */
-       if ((tf.command & ATA_SENSE) ||
-               tf.lbah != 0 || tf.lbam != 0 || tf.lbal != 0) {
-               ata_scsi_set_sense(cmd, tf.lbah, tf.lbam, tf.lbal);
-               qc->flags |= ATA_QCFLAG_SENSE_VALID;
-               ata_dev_warn(dev, "sense data %02x/%02x/%02x\n",
-                            tf.lbah, tf.lbam, tf.lbal);
-       } else {
-               ata_dev_warn(dev, "request sense failed stat %02x emask %x\n",
-                            tf.command, err_mask);
-       }
-       return err_mask;
-}
-
 /**
  *     atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
  *     @dev: device to perform REQUEST_SENSE to
@@ -1844,19 +1789,6 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
        memcpy(&qc->result_tf, &tf, sizeof(tf));
        qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
        qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
-       if (qc->result_tf.auxiliary) {
-               char sense_key, asc, ascq;
-
-               sense_key = (qc->result_tf.auxiliary >> 16) & 0xff;
-               asc = (qc->result_tf.auxiliary >> 8) & 0xff;
-               ascq = qc->result_tf.auxiliary & 0xff;
-               ata_dev_dbg(dev, "NCQ Autosense %02x/%02x/%02x\n",
-                           sense_key, asc, ascq);
-               ata_scsi_set_sense(qc->scsicmd, sense_key, asc, ascq);
-               ata_scsi_set_sense_information(qc->scsicmd, &qc->result_tf);
-               qc->flags |= ATA_QCFLAG_SENSE_VALID;
-       }
-
        ehc->i.err_mask &= ~AC_ERR_DEV;
 }
 
@@ -1886,27 +1818,6 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
                return ATA_EH_RESET;
        }
 
-       /*
-        * Sense data reporting does not work if the
-        * device fault bit is set.
-        */
-       if ((stat & ATA_SENSE) && !(stat & ATA_DF) &&
-           !(qc->flags & ATA_QCFLAG_SENSE_VALID)) {
-               if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
-                       tmp = ata_eh_request_sense(qc, qc->scsicmd);
-                       if (tmp)
-                               qc->err_mask |= tmp;
-                       else
-                               ata_scsi_set_sense_information(qc->scsicmd, tf);
-               } else {
-                       ata_dev_warn(qc->dev, "sense data available but port frozen\n");
-               }
-       }
-
-       /* Set by NCQ autosense or request sense above */
-       if (qc->flags & ATA_QCFLAG_SENSE_VALID)
-               return 0;
-
        if (stat & (ATA_ERR | ATA_DF))
                qc->err_mask |= AC_ERR_DEV;
        else
@@ -2650,15 +2561,14 @@ static void ata_eh_link_report(struct ata_link *link)
 
 #ifdef CONFIG_ATA_VERBOSE_ERROR
                if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
-                                   ATA_SENSE | ATA_ERR)) {
+                                   ATA_ERR)) {
                        if (res->command & ATA_BUSY)
                                ata_dev_err(qc->dev, "status: { Busy }\n");
                        else
-                               ata_dev_err(qc->dev, "status: { %s%s%s%s%s}\n",
+                               ata_dev_err(qc->dev, "status: { %s%s%s%s}\n",
                                  res->command & ATA_DRDY ? "DRDY " : "",
                                  res->command & ATA_DF ? "DF " : "",
                                  res->command & ATA_DRQ ? "DRQ " : "",
-                                 res->command & ATA_SENSE ? "SENSE " : "",
                                  res->command & ATA_ERR ? "ERR " : "");
                }
 
index 7ccc084..85aa761 100644 (file)
@@ -460,6 +460,13 @@ static void sata_pmp_quirks(struct ata_port *ap)
                                       ATA_LFLAG_NO_SRST |
                                       ATA_LFLAG_ASSUME_ATA;
                }
+       } else if (vendor == 0x11ab && devid == 0x4140) {
+               /* Marvell 4140 quirks */
+               ata_for_each_link(link, ap, EDGE) {
+                       /* port 4 is for SEMB device and it doesn't like SRST */
+                       if (link->pmp == 4)
+                               link->flags |= ATA_LFLAG_DISABLED;
+               }
        }
 }
 
index 3131adc..0d7f0da 100644 (file)
@@ -270,28 +270,13 @@ DEVICE_ATTR(unload_heads, S_IRUGO | S_IWUSR,
            ata_scsi_park_show, ata_scsi_park_store);
 EXPORT_SYMBOL_GPL(dev_attr_unload_heads);
 
-void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
+static void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
 {
-       if (!cmd)
-               return;
-
        cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
 
        scsi_build_sense_buffer(0, cmd->sense_buffer, sk, asc, ascq);
 }
 
-void ata_scsi_set_sense_information(struct scsi_cmnd *cmd,
-                                   const struct ata_taskfile *tf)
-{
-       u64 information;
-
-       if (!cmd)
-               return;
-
-       information = ata_tf_read_block(tf, NULL);
-       scsi_set_sense_information(cmd->sense_buffer, information);
-}
-
 static ssize_t
 ata_scsi_em_message_store(struct device *dev, struct device_attribute *attr,
                          const char *buf, size_t count)
@@ -1792,9 +1777,7 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
            ((cdb[2] & 0x20) || need_sense)) {
                ata_gen_passthru_sense(qc);
        } else {
-               if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
-                       cmd->result = SAM_STAT_CHECK_CONDITION;
-               } else if (!need_sense) {
+               if (!need_sense) {
                        cmd->result = SAM_STAT_GOOD;
                } else {
                        /* TODO: decide which descriptor format to use
@@ -2568,7 +2551,8 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
                rbuf[14] = (lowest_aligned >> 8) & 0x3f;
                rbuf[15] = lowest_aligned;
 
-               if (ata_id_has_trim(args->id)) {
+               if (ata_id_has_trim(args->id) &&
+                   !(dev->horkage & ATA_HORKAGE_NOTRIM)) {
                        rbuf[14] |= 0x80; /* LBPME */
 
                        if (ata_id_has_zero_after_trim(args->id) &&
index 3227b7c..e2d9497 100644 (file)
@@ -560,6 +560,29 @@ show_ata_dev_gscr(struct device *dev,
 
 static DEVICE_ATTR(gscr, S_IRUGO, show_ata_dev_gscr, NULL);
 
+static ssize_t
+show_ata_dev_trim(struct device *dev,
+                 struct device_attribute *attr, char *buf)
+{
+       struct ata_device *ata_dev = transport_class_to_dev(dev);
+       unsigned char *mode;
+
+       if (!ata_id_has_trim(ata_dev->id))
+               mode = "unsupported";
+       else if (ata_dev->horkage & ATA_HORKAGE_NOTRIM)
+               mode = "forced_unsupported";
+       else if (ata_dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM)
+                       mode = "forced_unqueued";
+       else if (ata_fpdma_dsm_supported(ata_dev))
+               mode = "queued";
+       else
+               mode = "unqueued";
+
+       return snprintf(buf, 20, "%s\n", mode);
+}
+
+static DEVICE_ATTR(trim, S_IRUGO, show_ata_dev_trim, NULL);
+
 static DECLARE_TRANSPORT_CLASS(ata_dev_class,
                               "ata_device", NULL, NULL, NULL);
 
@@ -733,6 +756,7 @@ struct scsi_transport_template *ata_attach_transport(void)
        SETUP_DEV_ATTRIBUTE(ering);
        SETUP_DEV_ATTRIBUTE(id);
        SETUP_DEV_ATTRIBUTE(gscr);
+       SETUP_DEV_ATTRIBUTE(trim);
        BUG_ON(count > ATA_DEV_ATTRS);
        i->dev_attrs[count] = NULL;
 
index a998a17..f840ca1 100644 (file)
@@ -67,8 +67,7 @@ extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag);
 extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
                           u64 block, u32 n_block, unsigned int tf_flags,
                           unsigned int tag);
-extern u64 ata_tf_read_block(const struct ata_taskfile *tf,
-                            struct ata_device *dev);
+extern u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev);
 extern unsigned ata_exec_internal(struct ata_device *dev,
                                  struct ata_taskfile *tf, const u8 *cdb,
                                  int dma_dir, void *buf, unsigned int buflen,
@@ -138,9 +137,6 @@ extern int ata_scsi_add_hosts(struct ata_host *host,
                              struct scsi_host_template *sht);
 extern void ata_scsi_scan_host(struct ata_port *ap, int sync);
 extern int ata_scsi_offline_dev(struct ata_device *dev);
-extern void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq);
-extern void ata_scsi_set_sense_information(struct scsi_cmnd *cmd,
-                                          const struct ata_taskfile *tf);
 extern void ata_scsi_media_change_notify(struct ata_device *dev);
 extern void ata_scsi_hotplug(struct work_struct *work);
 extern void ata_schedule_scsi_eh(struct Scsi_Host *shost);
index 171841a..4d1d9de 100644 (file)
@@ -544,10 +544,8 @@ static void fw_dev_release(struct device *dev)
        kfree(fw_priv);
 }
 
-static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int do_firmware_uevent(struct firmware_priv *fw_priv, struct kobj_uevent_env *env)
 {
-       struct firmware_priv *fw_priv = to_firmware_priv(dev);
-
        if (add_uevent_var(env, "FIRMWARE=%s", fw_priv->buf->fw_id))
                return -ENOMEM;
        if (add_uevent_var(env, "TIMEOUT=%i", loading_timeout))
@@ -558,6 +556,18 @@ static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
        return 0;
 }
 
+static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+       struct firmware_priv *fw_priv = to_firmware_priv(dev);
+       int err = 0;
+
+       mutex_lock(&fw_lock);
+       if (fw_priv->buf)
+               err = do_firmware_uevent(fw_priv, env);
+       mutex_unlock(&fw_lock);
+       return err;
+}
+
 static struct class firmware_class = {
        .name           = "firmware",
        .class_attrs    = firmware_class_attrs,
index 7fdd017..c7b0fce 100644 (file)
@@ -93,7 +93,7 @@ static int __pm_clk_add(struct device *dev, const char *con_id,
                        return -ENOMEM;
                }
        } else {
-               if (IS_ERR(ce->clk) || !__clk_get(clk)) {
+               if (IS_ERR(clk) || !__clk_get(clk)) {
                        kfree(ce);
                        return -ENOENT;
                }
index 81751a4..56486d9 100644 (file)
@@ -296,11 +296,20 @@ static int regcache_rbtree_insert_to_block(struct regmap *map,
        if (!blk)
                return -ENOMEM;
 
-       present = krealloc(rbnode->cache_present,
-                   BITS_TO_LONGS(blklen) * sizeof(*present), GFP_KERNEL);
-       if (!present) {
-               kfree(blk);
-               return -ENOMEM;
+       if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) {
+               present = krealloc(rbnode->cache_present,
+                                  BITS_TO_LONGS(blklen) * sizeof(*present),
+                                  GFP_KERNEL);
+               if (!present) {
+                       kfree(blk);
+                       return -ENOMEM;
+               }
+
+               memset(present + BITS_TO_LONGS(rbnode->blklen), 0,
+                      (BITS_TO_LONGS(blklen) - BITS_TO_LONGS(rbnode->blklen))
+                      * sizeof(*present));
+       } else {
+               present = rbnode->cache_present;
        }
 
        /* insert the register value in the correct place in the rbnode block */
index d7173cb..cef6fa8 100644 (file)
@@ -86,8 +86,6 @@ static DEFINE_MUTEX(loop_index_mutex);
 static int max_part;
 static int part_shift;
 
-static struct workqueue_struct *loop_wq;
-
 static int transfer_xor(struct loop_device *lo, int cmd,
                        struct page *raw_page, unsigned raw_off,
                        struct page *loop_page, unsigned loop_off,
@@ -725,6 +723,12 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
        size = get_loop_size(lo, file);
        if ((loff_t)(sector_t)size != size)
                goto out_putf;
+       error = -ENOMEM;
+       lo->wq = alloc_workqueue("kloopd%d",
+                       WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_UNBOUND, 16,
+                       lo->lo_number);
+       if (!lo->wq)
+               goto out_putf;
 
        error = 0;
 
@@ -872,6 +876,8 @@ static int loop_clr_fd(struct loop_device *lo)
        lo->lo_flags = 0;
        if (!part_shift)
                lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
+       destroy_workqueue(lo->wq);
+       lo->wq = NULL;
        mutex_unlock(&lo->lo_ctl_mutex);
        /*
         * Need not hold lo_ctl_mutex to fput backing file.
@@ -1425,9 +1431,13 @@ static int loop_queue_rq(struct blk_mq_hw_ctx *hctx,
                const struct blk_mq_queue_data *bd)
 {
        struct loop_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
+       struct loop_device *lo = cmd->rq->q->queuedata;
 
        blk_mq_start_request(bd->rq);
 
+       if (lo->lo_state != Lo_bound)
+               return -EIO;
+
        if (cmd->rq->cmd_flags & REQ_WRITE) {
                struct loop_device *lo = cmd->rq->q->queuedata;
                bool need_sched = true;
@@ -1441,9 +1451,9 @@ static int loop_queue_rq(struct blk_mq_hw_ctx *hctx,
                spin_unlock_irq(&lo->lo_lock);
 
                if (need_sched)
-                       queue_work(loop_wq, &lo->write_work);
+                       queue_work(lo->wq, &lo->write_work);
        } else {
-               queue_work(loop_wq, &cmd->read_work);
+               queue_work(lo->wq, &cmd->read_work);
        }
 
        return BLK_MQ_RQ_QUEUE_OK;
@@ -1455,9 +1465,6 @@ static void loop_handle_cmd(struct loop_cmd *cmd)
        struct loop_device *lo = cmd->rq->q->queuedata;
        int ret = -EIO;
 
-       if (lo->lo_state != Lo_bound)
-               goto failed;
-
        if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY))
                goto failed;
 
@@ -1806,13 +1813,6 @@ static int __init loop_init(void)
                goto misc_out;
        }
 
-       loop_wq = alloc_workqueue("kloopd",
-                       WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_UNBOUND, 0);
-       if (!loop_wq) {
-               err = -ENOMEM;
-               goto misc_out;
-       }
-
        blk_register_region(MKDEV(LOOP_MAJOR, 0), range,
                                  THIS_MODULE, loop_probe, NULL, NULL);
 
@@ -1850,8 +1850,6 @@ static void __exit loop_exit(void)
        blk_unregister_region(MKDEV(LOOP_MAJOR, 0), range);
        unregister_blkdev(LOOP_MAJOR, "loop");
 
-       destroy_workqueue(loop_wq);
-
        misc_deregister(&loop_misc);
 }
 
index 301c27f..49564ed 100644 (file)
@@ -54,6 +54,7 @@ struct loop_device {
        gfp_t           old_gfp_mask;
 
        spinlock_t              lo_lock;
+       struct workqueue_struct *wq;
        struct list_head        write_cmd_head;
        struct work_struct      write_work;
        bool                    write_started;
index ec6c5c6..010ce0b 100644 (file)
@@ -522,6 +522,7 @@ void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
 #  define rbd_assert(expr)     ((void) 0)
 #endif /* !RBD_DEBUG */
 
+static void rbd_osd_copyup_callback(struct rbd_obj_request *obj_request);
 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
 static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
@@ -1797,6 +1798,16 @@ static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
        obj_request_done_set(obj_request);
 }
 
+static void rbd_osd_call_callback(struct rbd_obj_request *obj_request)
+{
+       dout("%s: obj %p\n", __func__, obj_request);
+
+       if (obj_request_img_data_test(obj_request))
+               rbd_osd_copyup_callback(obj_request);
+       else
+               obj_request_done_set(obj_request);
+}
+
 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
                                struct ceph_msg *msg)
 {
@@ -1845,6 +1856,8 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
                rbd_osd_discard_callback(obj_request);
                break;
        case CEPH_OSD_OP_CALL:
+               rbd_osd_call_callback(obj_request);
+               break;
        case CEPH_OSD_OP_NOTIFY_ACK:
        case CEPH_OSD_OP_WATCH:
                rbd_osd_trivial_callback(obj_request);
@@ -2001,11 +2014,11 @@ static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
        rbd_assert(obj_request_type_valid(type));
 
        size = strlen(object_name) + 1;
-       name = kmalloc(size, GFP_KERNEL);
+       name = kmalloc(size, GFP_NOIO);
        if (!name)
                return NULL;
 
-       obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_KERNEL);
+       obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO);
        if (!obj_request) {
                kfree(name);
                return NULL;
@@ -2509,13 +2522,15 @@ out_unwind:
 }
 
 static void
-rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
+rbd_osd_copyup_callback(struct rbd_obj_request *obj_request)
 {
        struct rbd_img_request *img_request;
        struct rbd_device *rbd_dev;
        struct page **pages;
        u32 page_count;
 
+       dout("%s: obj %p\n", __func__, obj_request);
+
        rbd_assert(obj_request->type == OBJ_REQUEST_BIO ||
                obj_request->type == OBJ_REQUEST_NODATA);
        rbd_assert(obj_request_img_data_test(obj_request));
@@ -2542,9 +2557,7 @@ rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
        if (!obj_request->result)
                obj_request->xferred = obj_request->length;
 
-       /* Finish up with the normal image object callback */
-
-       rbd_img_obj_callback(obj_request);
+       obj_request_done_set(obj_request);
 }
 
 static void
@@ -2629,7 +2642,6 @@ rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
 
        /* All set, send it off. */
 
-       orig_request->callback = rbd_img_obj_copyup_callback;
        osdc = &rbd_dev->rbd_client->client->osdc;
        img_result = rbd_obj_request_submit(osdc, orig_request);
        if (!img_result)
index 713fc9f..3e9ec95 100644 (file)
@@ -362,8 +362,8 @@ static void purge_persistent_gnt(struct xen_blkif *blkif)
                return;
        }
 
-       if (work_pending(&blkif->persistent_purge_work)) {
-               pr_alert_ratelimited("Scheduled work from previous purge is still pending, cannot purge list\n");
+       if (work_busy(&blkif->persistent_purge_work)) {
+               pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n");
                return;
        }
 
index 2c61cf8..89c7371 100644 (file)
@@ -1118,8 +1118,10 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
                                 * Add the used indirect page back to the list of
                                 * available pages for indirect grefs.
                                 */
-                               indirect_page = pfn_to_page(s->indirect_grants[i]->pfn);
-                               list_add(&indirect_page->lru, &info->indirect_pages);
+                               if (!info->feature_persistent) {
+                                       indirect_page = pfn_to_page(s->indirect_grants[i]->pfn);
+                                       list_add(&indirect_page->lru, &info->indirect_pages);
+                               }
                                s->indirect_grants[i]->gref = GRANT_INVALID_REF;
                                list_add_tail(&s->indirect_grants[i]->node, &info->grants);
                        }
index 4bba866..3f146c9 100644 (file)
@@ -378,12 +378,11 @@ int btbcm_setup_apple(struct hci_dev *hdev)
 
        /* Read Verbose Config Version Info */
        skb = btbcm_read_verbose_config(hdev);
-       if (IS_ERR(skb))
-               return PTR_ERR(skb);
-
-       BT_INFO("%s: BCM: chip id %u build %4.4u", hdev->name, skb->data[1],
-               get_unaligned_le16(skb->data + 5));
-       kfree_skb(skb);
+       if (!IS_ERR(skb)) {
+               BT_INFO("%s: BCM: chip id %u build %4.4u", hdev->name, skb->data[1],
+                       get_unaligned_le16(skb->data + 5));
+               kfree_skb(skb);
+       }
 
        set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
 
index 420cc9f..c655015 100644 (file)
@@ -268,7 +268,7 @@ static const struct usb_device_id blacklist_table[] = {
        { USB_DEVICE(0x0e5e, 0x6622), .driver_info = BTUSB_BROKEN_ISOC },
 
        /* Roper Class 1 Bluetooth Dongle (Silicon Wave based) */
-       { USB_DEVICE(0x1300, 0x0001), .driver_info = BTUSB_SWAVE },
+       { USB_DEVICE(0x1310, 0x0001), .driver_info = BTUSB_SWAVE },
 
        /* Digianswer devices */
        { USB_DEVICE(0x08fd, 0x0001), .driver_info = BTUSB_DIGIANSWER },
@@ -1993,6 +1993,8 @@ static int btusb_setup_intel(struct hci_dev *hdev)
        }
        fw_ptr = fw->data;
 
+       kfree_skb(skb);
+
        /* This Intel specific command enables the manufacturer mode of the
         * controller.
         *
@@ -2334,6 +2336,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
        struct intel_boot_params *params;
        const struct firmware *fw;
        const u8 *fw_ptr;
+       u32 frag_len;
        char fwname[64];
        ktime_t calltime, delta, rettime;
        unsigned long long duration;
@@ -2540,24 +2543,33 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
        }
 
        fw_ptr = fw->data + 644;
+       frag_len = 0;
 
        while (fw_ptr - fw->data < fw->size) {
-               struct hci_command_hdr *cmd = (void *)fw_ptr;
-               u8 cmd_len;
+               struct hci_command_hdr *cmd = (void *)(fw_ptr + frag_len);
 
-               cmd_len = sizeof(*cmd) + cmd->plen;
+               frag_len += sizeof(*cmd) + cmd->plen;
 
-               /* Send each command from the firmware data buffer as
-                * a single Data fragment.
+               /* The paramter length of the secure send command requires
+                * a 4 byte alignment. It happens so that the firmware file
+                * contains proper Intel_NOP commands to align the fragments
+                * as needed.
+                *
+                * Send set of commands with 4 byte alignment from the
+                * firmware data buffer as a single Data fragement.
                 */
-               err = btusb_intel_secure_send(hdev, 0x01, cmd_len, fw_ptr);
-               if (err < 0) {
-                       BT_ERR("%s: Failed to send firmware data (%d)",
-                              hdev->name, err);
-                       goto done;
-               }
+               if (!(frag_len % 4)) {
+                       err = btusb_intel_secure_send(hdev, 0x01, frag_len,
+                                                     fw_ptr);
+                       if (err < 0) {
+                               BT_ERR("%s: Failed to send firmware data (%d)",
+                                      hdev->name, err);
+                               goto done;
+                       }
 
-               fw_ptr += cmd_len;
+                       fw_ptr += frag_len;
+                       frag_len = 0;
+               }
        }
 
        set_bit(BTUSB_FIRMWARE_LOADED, &data->flags);
index aaa0f2a..60397ec 100644 (file)
@@ -212,7 +212,7 @@ static int arm_ccn_node_to_xp_port(int node)
 
 static void arm_ccn_pmu_config_set(u64 *config, u32 node_xp, u32 type, u32 port)
 {
-       *config &= ~((0xff << 0) | (0xff << 8) | (0xff << 24));
+       *config &= ~((0xff << 0) | (0xff << 8) | (0x3 << 24));
        *config |= (node_xp << 0) | (type << 8) | (port << 24);
 }
 
index 0b4188b..c6dea3f 100644 (file)
@@ -581,7 +581,7 @@ static inline int needs_ilk_vtd_wa(void)
        /* Query intel_iommu to see if we need the workaround. Presumably that
         * was loaded first.
         */
-       if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB ||
+       if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG ||
             gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
             intel_iommu_gfx_mapped)
                return 1;
index da8faf7..5643b65 100644 (file)
@@ -429,7 +429,7 @@ static int hwrng_fillfn(void *unused)
 static void start_khwrngd(void)
 {
        hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
-       if (hwrng_fill == ERR_PTR(-ENOMEM)) {
+       if (IS_ERR(hwrng_fill)) {
                pr_err("hwrng_fill thread creation failed");
                hwrng_fill = NULL;
        }
index a43048b..3c1a123 100644 (file)
@@ -900,6 +900,21 @@ static struct dmi_system_id i8k_dmi_table[] __initdata = {
 
 MODULE_DEVICE_TABLE(dmi, i8k_dmi_table);
 
+static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = {
+       {
+               /*
+                * CPU fan speed going up and down on Dell Studio XPS 8100
+                * for unknown reasons.
+                */
+               .ident = "Dell Studio XPS 8100",
+               .matches = {
+                       DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Studio XPS 8100"),
+               },
+       },
+       { }
+};
+
 /*
  * Probe for the presence of a supported laptop.
  */
@@ -911,7 +926,8 @@ static int __init i8k_probe(void)
        /*
         * Get DMI information
         */
-       if (!dmi_check_system(i8k_dmi_table)) {
+       if (!dmi_check_system(i8k_dmi_table) ||
+           dmi_check_system(i8k_blacklist_dmi_table)) {
                if (!ignore_dmi && !force)
                        return -ENODEV;
 
index 283f00a..1082d4b 100644 (file)
@@ -129,8 +129,9 @@ struct tpm_chip *tpmm_chip_alloc(struct device *dev,
 
        device_initialize(&chip->dev);
 
-       chip->cdev.owner = chip->pdev->driver->owner;
        cdev_init(&chip->cdev, &tpm_fops);
+       chip->cdev.owner = chip->pdev->driver->owner;
+       chip->cdev.kobj.parent = &chip->dev.kobj;
 
        return chip;
 }
index b26ceee..1267322 100644 (file)
@@ -233,6 +233,14 @@ static int crb_acpi_add(struct acpi_device *device)
                return -ENODEV;
        }
 
+       /* At least some versions of AMI BIOS have a bug that TPM2 table has
+        * zero address for the control area and therefore we must fail.
+       */
+       if (!buf->control_area_pa) {
+               dev_err(dev, "TPM2 ACPI table has a zero address for the control area\n");
+               return -EINVAL;
+       }
+
        if (buf->hdr.length < sizeof(struct acpi_tpm2)) {
                dev_err(dev, "TPM2 ACPI table has wrong size");
                return -EINVAL;
@@ -267,7 +275,7 @@ static int crb_acpi_add(struct acpi_device *device)
 
        memcpy_fromio(&pa, &priv->cca->cmd_pa, 8);
        pa = le64_to_cpu(pa);
-       priv->cmd = devm_ioremap_nocache(dev, le64_to_cpu(pa),
+       priv->cmd = devm_ioremap_nocache(dev, pa,
                                         ioread32(&priv->cca->cmd_size));
        if (!priv->cmd) {
                dev_err(dev, "ioremap of the command buffer failed\n");
@@ -276,7 +284,7 @@ static int crb_acpi_add(struct acpi_device *device)
 
        memcpy_fromio(&pa, &priv->cca->rsp_pa, 8);
        pa = le64_to_cpu(pa);
-       priv->rsp = devm_ioremap_nocache(dev, le64_to_cpu(pa),
+       priv->rsp = devm_ioremap_nocache(dev, pa,
                                         ioread32(&priv->cca->rsp_size));
        if (!priv->rsp) {
                dev_err(dev, "ioremap of the response buffer failed\n");
index 42ffa5e..27ebf95 100644 (file)
@@ -578,6 +578,9 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
                goto cleanup;
        }
 
+       ibmvtpm->dev = dev;
+       ibmvtpm->vdev = vio_dev;
+
        crq_q = &ibmvtpm->crq_queue;
        crq_q->crq_addr = (struct ibmvtpm_crq *)get_zeroed_page(GFP_KERNEL);
        if (!crq_q->crq_addr) {
@@ -622,8 +625,6 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
 
        crq_q->index = 0;
 
-       ibmvtpm->dev = dev;
-       ibmvtpm->vdev = vio_dev;
        TPM_VPRIV(chip) = (void *)ibmvtpm;
 
        spin_lock_init(&ibmvtpm->rtce_lock);
index 5b0f418..9f9cadd 100644 (file)
@@ -230,11 +230,12 @@ static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
        if (!c)
                return;
 
+       /* This should be JSON format, i.e. elements separated with a comma */
        seq_printf(s, "\"%s\": { ", c->name);
        seq_printf(s, "\"enable_count\": %d,", c->enable_count);
        seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
-       seq_printf(s, "\"rate\": %lu", clk_core_get_rate(c));
-       seq_printf(s, "\"accuracy\": %lu", clk_core_get_accuracy(c));
+       seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c));
+       seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c));
        seq_printf(s, "\"phase\": %d", clk_core_get_phase(c));
 }
 
index 0dd8a4b..4a375ea 100644 (file)
@@ -37,7 +37,8 @@
  *     Main PLL or any other PLLs in the device such as ARM PLL, DDR PLL
  *     or PA PLL available on keystone2. These PLLs are controlled by
  *     this register. Main PLL is controlled by a PLL controller.
- * @pllm: PLL register map address
+ * @pllm: PLL register map address for multiplier bits
+ * @pllod: PLL register map address for post divider bits
  * @pll_ctl0: PLL controller map address
  * @pllm_lower_mask: multiplier lower mask
  * @pllm_upper_mask: multiplier upper mask
@@ -53,6 +54,7 @@ struct clk_pll_data {
        u32 phy_pllm;
        u32 phy_pll_ctl0;
        void __iomem *pllm;
+       void __iomem *pllod;
        void __iomem *pll_ctl0;
        u32 pllm_lower_mask;
        u32 pllm_upper_mask;
@@ -102,7 +104,11 @@ static unsigned long clk_pllclk_recalc(struct clk_hw *hw,
                /* read post divider from od bits*/
                postdiv = ((val & pll_data->clkod_mask) >>
                                 pll_data->clkod_shift) + 1;
-       else
+       else if (pll_data->pllod) {
+               postdiv = readl(pll_data->pllod);
+               postdiv = ((postdiv & pll_data->clkod_mask) >>
+                               pll_data->clkod_shift) + 1;
+       } else
                postdiv = pll_data->postdiv;
 
        rate /= (prediv + 1);
@@ -172,12 +178,21 @@ static void __init _of_pll_clk_init(struct device_node *node, bool pllctrl)
                /* assume the PLL has output divider register bits */
                pll_data->clkod_mask = CLKOD_MASK;
                pll_data->clkod_shift = CLKOD_SHIFT;
+
+               /*
+                * Check if there is an post-divider register. If not
+                * assume od bits are part of control register.
+                */
+               i = of_property_match_string(node, "reg-names",
+                                            "post-divider");
+               pll_data->pllod = of_iomap(node, i);
        }
 
        i = of_property_match_string(node, "reg-names", "control");
        pll_data->pll_ctl0 = of_iomap(node, i);
        if (!pll_data->pll_ctl0) {
                pr_err("%s: ioremap failed\n", __func__);
+               iounmap(pll_data->pllod);
                goto out;
        }
 
@@ -193,6 +208,7 @@ static void __init _of_pll_clk_init(struct device_node *node, bool pllctrl)
                pll_data->pllm = of_iomap(node, i);
                if (!pll_data->pllm) {
                        iounmap(pll_data->pll_ctl0);
+                       iounmap(pll_data->pllod);
                        goto out;
                }
        }
index 4b93a1e..ac03ba4 100644 (file)
@@ -126,7 +126,7 @@ PARENTS(pxa3xx_ac97_bus) = { "ring_osc_60mhz", "ac97" };
 PARENTS(pxa3xx_sbus) = { "ring_osc_60mhz", "system_bus" };
 PARENTS(pxa3xx_smemcbus) = { "ring_osc_60mhz", "smemc" };
 
-#define CKEN_AB(bit) ((CKEN_ ## bit > 31) ? &CKENA : &CKENB)
+#define CKEN_AB(bit) ((CKEN_ ## bit > 31) ? &CKENB : &CKENA)
 #define PXA3XX_CKEN(dev_id, con_id, parents, mult_lp, div_lp, mult_hp, \
                    div_hp, bit, is_lp, flags)                          \
        PXA_CKEN(dev_id, con_id, bit, parents, mult_lp, div_lp,         \
index b95d17f..92936f0 100644 (file)
@@ -530,19 +530,16 @@ static int clk_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
        struct clk_rcg2 *rcg = to_clk_rcg2(hw);
        struct freq_tbl f = *rcg->freq_tbl;
        const struct frac_entry *frac = frac_table_pixel;
-       unsigned long request, src_rate;
+       unsigned long request;
        int delta = 100000;
        u32 mask = BIT(rcg->hid_width) - 1;
        u32 hid_div;
-       int index = qcom_find_src_index(hw, rcg->parent_map, f.src);
-       struct clk *parent = clk_get_parent_by_index(hw->clk, index);
 
        for (; frac->num; frac++) {
                request = (rate * frac->den) / frac->num;
 
-               src_rate = __clk_round_rate(parent, request);
-               if ((src_rate < (request - delta)) ||
-                       (src_rate > (request + delta)))
+               if ((parent_rate < (request - delta)) ||
+                       (parent_rate > (request + delta)))
                        continue;
 
                regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
index bf12a25..0f8db28 100644 (file)
@@ -303,6 +303,8 @@ void __init st_of_flexgen_setup(struct device_node *np)
        if (!rlock)
                goto err;
 
+       spin_lock_init(rlock);
+
        for (i = 0; i < clk_data->clk_num; i++) {
                struct clk *clk;
                const char *clk_name;
index a917c4c..6ae068a 100644 (file)
@@ -340,7 +340,7 @@ static const struct clkgen_quadfs_data st_fs660c32_C_407 = {
                    CLKGEN_FIELD(0x30c, 0xf, 20),
                    CLKGEN_FIELD(0x310, 0xf, 20) },
        .lockstatus_present = true,
-       .lock_status = CLKGEN_FIELD(0x2A0, 0x1, 24),
+       .lock_status = CLKGEN_FIELD(0x2f0, 0x1, 24),
        .powerup_polarity = 1,
        .standby_polarity = 1,
        .pll_ops        = &st_quadfs_pll_c32_ops,
index fdcff10..ef65146 100644 (file)
@@ -582,7 +582,7 @@ static struct clkgen_mux_data stih416_a9_mux_data = {
 };
 static struct clkgen_mux_data stih407_a9_mux_data = {
        .offset = 0x1a4,
-       .shift = 1,
+       .shift = 0,
        .width = 2,
 };
 
index d86bc46..0a1df82 100644 (file)
@@ -252,6 +252,11 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev)
                }
 
                clk = of_clk_get_from_provider(&clkspec);
+               if (IS_ERR(clk)) {
+                       pr_err("%s: failed to get atl clock %d from provider\n",
+                              __func__, i);
+                       return PTR_ERR(clk);
+               }
 
                cdesc = to_atl_desc(__clk_get_hw(clk));
                cdesc->cinfo = cinfo;
index 83564c9..c844616 100644 (file)
@@ -466,15 +466,12 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt)
        exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET);
 
        if (mct_int_type == MCT_INT_SPI) {
-               evt->irq = mct_irqs[MCT_L0_IRQ + cpu];
-               if (request_irq(evt->irq, exynos4_mct_tick_isr,
-                               IRQF_TIMER | IRQF_NOBALANCING,
-                               evt->name, mevt)) {
-                       pr_err("exynos-mct: cannot register IRQ %d\n",
-                               evt->irq);
+
+               if (evt->irq == -1)
                        return -EIO;
-               }
-               irq_force_affinity(mct_irqs[MCT_L0_IRQ + cpu], cpumask_of(cpu));
+
+               irq_force_affinity(evt->irq, cpumask_of(cpu));
+               enable_irq(evt->irq);
        } else {
                enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0);
        }
@@ -487,10 +484,12 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt)
 static void exynos4_local_timer_stop(struct clock_event_device *evt)
 {
        evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
-       if (mct_int_type == MCT_INT_SPI)
-               free_irq(evt->irq, this_cpu_ptr(&percpu_mct_tick));
-       else
+       if (mct_int_type == MCT_INT_SPI) {
+               if (evt->irq != -1)
+                       disable_irq_nosync(evt->irq);
+       } else {
                disable_percpu_irq(mct_irqs[MCT_L0_IRQ]);
+       }
 }
 
 static int exynos4_mct_cpu_notify(struct notifier_block *self,
@@ -522,7 +521,7 @@ static struct notifier_block exynos4_mct_cpu_nb = {
 
 static void __init exynos4_timer_resources(struct device_node *np, void __iomem *base)
 {
-       int err;
+       int err, cpu;
        struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick);
        struct clk *mct_clk, *tick_clk;
 
@@ -549,7 +548,25 @@ static void __init exynos4_timer_resources(struct device_node *np, void __iomem
                WARN(err, "MCT: can't request IRQ %d (%d)\n",
                     mct_irqs[MCT_L0_IRQ], err);
        } else {
-               irq_set_affinity(mct_irqs[MCT_L0_IRQ], cpumask_of(0));
+               for_each_possible_cpu(cpu) {
+                       int mct_irq = mct_irqs[MCT_L0_IRQ + cpu];
+                       struct mct_clock_event_device *pcpu_mevt =
+                               per_cpu_ptr(&percpu_mct_tick, cpu);
+
+                       pcpu_mevt->evt.irq = -1;
+
+                       irq_set_status_flags(mct_irq, IRQ_NOAUTOEN);
+                       if (request_irq(mct_irq,
+                                       exynos4_mct_tick_isr,
+                                       IRQF_TIMER | IRQF_NOBALANCING,
+                                       pcpu_mevt->name, pcpu_mevt)) {
+                               pr_err("exynos-mct: cannot register IRQ (cpu%d)\n",
+                                                                       cpu);
+
+                               continue;
+                       }
+                       pcpu_mevt->evt.irq = mct_irq;
+               }
        }
 
        err = register_cpu_notifier(&exynos4_mct_cpu_nb);
index c45d274..6f9d27f 100644 (file)
@@ -678,6 +678,7 @@ static struct cpu_defaults knl_params = {
                .get_max = core_get_max_pstate,
                .get_min = core_get_min_pstate,
                .get_turbo = knl_get_turbo_pstate,
+               .get_scaling = core_get_scaling,
                .set = core_set_pstate,
        },
 };
index 332c8ef..0436997 100644 (file)
@@ -909,13 +909,14 @@ static int ahash_final_ctx(struct ahash_request *req)
                          state->buflen_1;
        u32 *sh_desc = ctx->sh_desc_fin, *desc;
        dma_addr_t ptr = ctx->sh_desc_fin_dma;
-       int sec4_sg_bytes;
+       int sec4_sg_bytes, sec4_sg_src_index;
        int digestsize = crypto_ahash_digestsize(ahash);
        struct ahash_edesc *edesc;
        int ret = 0;
        int sh_len;
 
-       sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry);
+       sec4_sg_src_index = 1 + (buflen ? 1 : 0);
+       sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
 
        /* allocate space for base edesc and hw desc commands, link tables */
        edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
@@ -942,7 +943,7 @@ static int ahash_final_ctx(struct ahash_request *req)
        state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
                                                buf, state->buf_dma, buflen,
                                                last_buflen);
-       (edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN;
+       (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= SEC4_SG_LEN_FIN;
 
        edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
                                            sec4_sg_bytes, DMA_TO_DEVICE);
index 48f4535..ede9e9e 100644 (file)
@@ -904,7 +904,6 @@ static int ablk_perform(struct ablkcipher_request *req, int encrypt)
                crypt->mode |= NPE_OP_NOT_IN_PLACE;
                /* This was never tested by Intel
                 * for more than one dst buffer, I think. */
-               BUG_ON(req->dst->length < nbytes);
                req_ctx->dst = NULL;
                if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
                                        flags, DMA_FROM_DEVICE))
index 67f8081..e4311ce 100644 (file)
@@ -494,8 +494,9 @@ out:
 static int ccm4309_aes_nx_encrypt(struct aead_request *req)
 {
        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+       struct nx_gcm_rctx *rctx = aead_request_ctx(req);
        struct blkcipher_desc desc;
-       u8 *iv = nx_ctx->priv.ccm.iv;
+       u8 *iv = rctx->iv;
 
        iv[0] = 3;
        memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
@@ -525,8 +526,9 @@ static int ccm_aes_nx_encrypt(struct aead_request *req)
 static int ccm4309_aes_nx_decrypt(struct aead_request *req)
 {
        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+       struct nx_gcm_rctx *rctx = aead_request_ctx(req);
        struct blkcipher_desc desc;
-       u8 *iv = nx_ctx->priv.ccm.iv;
+       u8 *iv = rctx->iv;
 
        iv[0] = 3;
        memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
index 2617cd4..dd7e9f3 100644 (file)
@@ -72,7 +72,7 @@ static int ctr3686_aes_nx_set_key(struct crypto_tfm *tfm,
        if (key_len < CTR_RFC3686_NONCE_SIZE)
                return -EINVAL;
 
-       memcpy(nx_ctx->priv.ctr.iv,
+       memcpy(nx_ctx->priv.ctr.nonce,
               in_key + key_len - CTR_RFC3686_NONCE_SIZE,
               CTR_RFC3686_NONCE_SIZE);
 
@@ -131,14 +131,15 @@ static int ctr3686_aes_nx_crypt(struct blkcipher_desc *desc,
                                unsigned int           nbytes)
 {
        struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
-       u8 *iv = nx_ctx->priv.ctr.iv;
+       u8 iv[16];
 
+       memcpy(iv, nx_ctx->priv.ctr.nonce, CTR_RFC3686_IV_SIZE);
        memcpy(iv + CTR_RFC3686_NONCE_SIZE,
               desc->info, CTR_RFC3686_IV_SIZE);
        iv[12] = iv[13] = iv[14] = 0;
        iv[15] = 1;
 
-       desc->info = nx_ctx->priv.ctr.iv;
+       desc->info = iv;
 
        return ctr_aes_nx_crypt(desc, dst, src, nbytes);
 }
index 88c5624..c6ebeb6 100644 (file)
@@ -330,6 +330,7 @@ out:
 static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
 {
        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+       struct nx_gcm_rctx *rctx = aead_request_ctx(req);
        struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
        struct blkcipher_desc desc;
        unsigned int nbytes = req->cryptlen;
@@ -339,7 +340,7 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
 
        spin_lock_irqsave(&nx_ctx->lock, irq_flags);
 
-       desc.info = nx_ctx->priv.gcm.iv;
+       desc.info = rctx->iv;
        /* initialize the counter */
        *(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1;
 
@@ -434,8 +435,8 @@ out:
 
 static int gcm_aes_nx_encrypt(struct aead_request *req)
 {
-       struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
-       char *iv = nx_ctx->priv.gcm.iv;
+       struct nx_gcm_rctx *rctx = aead_request_ctx(req);
+       char *iv = rctx->iv;
 
        memcpy(iv, req->iv, 12);
 
@@ -444,8 +445,8 @@ static int gcm_aes_nx_encrypt(struct aead_request *req)
 
 static int gcm_aes_nx_decrypt(struct aead_request *req)
 {
-       struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
-       char *iv = nx_ctx->priv.gcm.iv;
+       struct nx_gcm_rctx *rctx = aead_request_ctx(req);
+       char *iv = rctx->iv;
 
        memcpy(iv, req->iv, 12);
 
@@ -455,7 +456,8 @@ static int gcm_aes_nx_decrypt(struct aead_request *req)
 static int gcm4106_aes_nx_encrypt(struct aead_request *req)
 {
        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
-       char *iv = nx_ctx->priv.gcm.iv;
+       struct nx_gcm_rctx *rctx = aead_request_ctx(req);
+       char *iv = rctx->iv;
        char *nonce = nx_ctx->priv.gcm.nonce;
 
        memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
@@ -467,7 +469,8 @@ static int gcm4106_aes_nx_encrypt(struct aead_request *req)
 static int gcm4106_aes_nx_decrypt(struct aead_request *req)
 {
        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
-       char *iv = nx_ctx->priv.gcm.iv;
+       struct nx_gcm_rctx *rctx = aead_request_ctx(req);
+       char *iv = rctx->iv;
        char *nonce = nx_ctx->priv.gcm.nonce;
 
        memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
index 8c2faff..c2f7d4b 100644 (file)
@@ -42,6 +42,7 @@ static int nx_xcbc_set_key(struct crypto_shash *desc,
                           unsigned int         key_len)
 {
        struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc);
+       struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
 
        switch (key_len) {
        case AES_KEYSIZE_128:
@@ -51,7 +52,7 @@ static int nx_xcbc_set_key(struct crypto_shash *desc,
                return -EINVAL;
        }
 
-       memcpy(nx_ctx->priv.xcbc.key, in_key, key_len);
+       memcpy(csbcpb->cpb.aes_xcbc.key, in_key, key_len);
 
        return 0;
 }
@@ -148,32 +149,29 @@ out:
        return rc;
 }
 
-static int nx_xcbc_init(struct shash_desc *desc)
+static int nx_crypto_ctx_aes_xcbc_init2(struct crypto_tfm *tfm)
 {
-       struct xcbc_state *sctx = shash_desc_ctx(desc);
-       struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+       struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
        struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
-       struct nx_sg *out_sg;
-       int len;
+       int err;
 
-       nx_ctx_init(nx_ctx, HCOP_FC_AES);
+       err = nx_crypto_ctx_aes_xcbc_init(tfm);
+       if (err)
+               return err;
 
-       memset(sctx, 0, sizeof *sctx);
+       nx_ctx_init(nx_ctx, HCOP_FC_AES);
 
        NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
        csbcpb->cpb.hdr.mode = NX_MODE_AES_XCBC_MAC;
 
-       memcpy(csbcpb->cpb.aes_xcbc.key, nx_ctx->priv.xcbc.key, AES_BLOCK_SIZE);
-       memset(nx_ctx->priv.xcbc.key, 0, sizeof *nx_ctx->priv.xcbc.key);
-
-       len = AES_BLOCK_SIZE;
-       out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
-                                 &len, nx_ctx->ap->sglen);
+       return 0;
+}
 
-       if (len != AES_BLOCK_SIZE)
-               return -EINVAL;
+static int nx_xcbc_init(struct shash_desc *desc)
+{
+       struct xcbc_state *sctx = shash_desc_ctx(desc);
 
-       nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+       memset(sctx, 0, sizeof *sctx);
 
        return 0;
 }
@@ -186,6 +184,7 @@ static int nx_xcbc_update(struct shash_desc *desc,
        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
        struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
        struct nx_sg *in_sg;
+       struct nx_sg *out_sg;
        u32 to_process = 0, leftover, total;
        unsigned int max_sg_len;
        unsigned long irq_flags;
@@ -213,6 +212,17 @@ static int nx_xcbc_update(struct shash_desc *desc,
        max_sg_len = min_t(u64, max_sg_len,
                                nx_ctx->ap->databytelen/NX_PAGE_SIZE);
 
+       data_len = AES_BLOCK_SIZE;
+       out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
+                                 &len, nx_ctx->ap->sglen);
+
+       if (data_len != AES_BLOCK_SIZE) {
+               rc = -EINVAL;
+               goto out;
+       }
+
+       nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+
        do {
                to_process = total - to_process;
                to_process = to_process & ~(AES_BLOCK_SIZE - 1);
@@ -235,8 +245,10 @@ static int nx_xcbc_update(struct shash_desc *desc,
                                                (u8 *) sctx->buffer,
                                                &data_len,
                                                max_sg_len);
-                       if (data_len != sctx->count)
-                               return -EINVAL;
+                       if (data_len != sctx->count) {
+                               rc = -EINVAL;
+                               goto out;
+                       }
                }
 
                data_len = to_process - sctx->count;
@@ -245,8 +257,10 @@ static int nx_xcbc_update(struct shash_desc *desc,
                                        &data_len,
                                        max_sg_len);
 
-               if (data_len != to_process - sctx->count)
-                       return -EINVAL;
+               if (data_len != to_process - sctx->count) {
+                       rc = -EINVAL;
+                       goto out;
+               }
 
                nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
                                        sizeof(struct nx_sg);
@@ -325,15 +339,19 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
        in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buffer,
                                 &len, nx_ctx->ap->sglen);
 
-       if (len != sctx->count)
-               return -EINVAL;
+       if (len != sctx->count) {
+               rc = -EINVAL;
+               goto out;
+       }
 
        len = AES_BLOCK_SIZE;
        out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len,
                                  nx_ctx->ap->sglen);
 
-       if (len != AES_BLOCK_SIZE)
-               return -EINVAL;
+       if (len != AES_BLOCK_SIZE) {
+               rc = -EINVAL;
+               goto out;
+       }
 
        nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
        nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
@@ -372,7 +390,7 @@ struct shash_alg nx_shash_aes_xcbc_alg = {
                .cra_blocksize   = AES_BLOCK_SIZE,
                .cra_module      = THIS_MODULE,
                .cra_ctxsize     = sizeof(struct nx_crypto_ctx),
-               .cra_init        = nx_crypto_ctx_aes_xcbc_init,
+               .cra_init        = nx_crypto_ctx_aes_xcbc_init2,
                .cra_exit        = nx_crypto_ctx_exit,
        }
 };
index 23621da..becb738 100644 (file)
 #include "nx.h"
 
 
-static int nx_sha256_init(struct shash_desc *desc)
+static int nx_crypto_ctx_sha256_init(struct crypto_tfm *tfm)
 {
-       struct sha256_state *sctx = shash_desc_ctx(desc);
-       struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
-       int len;
-       int rc;
+       struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+       int err;
 
-       nx_ctx_init(nx_ctx, HCOP_FC_SHA);
+       err = nx_crypto_ctx_sha_init(tfm);
+       if (err)
+               return err;
 
-       memset(sctx, 0, sizeof *sctx);
+       nx_ctx_init(nx_ctx, HCOP_FC_SHA);
 
        nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA256];
 
        NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA256);
 
-       len = SHA256_DIGEST_SIZE;
-       rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg,
-                                 &nx_ctx->op.outlen,
-                                 &len,
-                                 (u8 *) sctx->state,
-                                 NX_DS_SHA256);
+       return 0;
+}
 
-       if (rc)
-               goto out;
+static int nx_sha256_init(struct shash_desc *desc) {
+       struct sha256_state *sctx = shash_desc_ctx(desc);
+
+       memset(sctx, 0, sizeof *sctx);
 
        sctx->state[0] = __cpu_to_be32(SHA256_H0);
        sctx->state[1] = __cpu_to_be32(SHA256_H1);
@@ -64,7 +62,6 @@ static int nx_sha256_init(struct shash_desc *desc)
        sctx->state[7] = __cpu_to_be32(SHA256_H7);
        sctx->count = 0;
 
-out:
        return 0;
 }
 
@@ -74,10 +71,12 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
        struct sha256_state *sctx = shash_desc_ctx(desc);
        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
        struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
+       struct nx_sg *out_sg;
        u64 to_process = 0, leftover, total;
        unsigned long irq_flags;
        int rc = 0;
        int data_len;
+       u32 max_sg_len;
        u64 buf_len = (sctx->count % SHA256_BLOCK_SIZE);
 
        spin_lock_irqsave(&nx_ctx->lock, irq_flags);
@@ -97,38 +96,57 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
        NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
        NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
 
+       max_sg_len = min_t(u64, nx_ctx->ap->sglen,
+                       nx_driver.of.max_sg_len/sizeof(struct nx_sg));
+       max_sg_len = min_t(u64, max_sg_len,
+                       nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+
+       data_len = SHA256_DIGEST_SIZE;
+       out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
+                                 &data_len, max_sg_len);
+       nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+
+       if (data_len != SHA256_DIGEST_SIZE) {
+               rc = -EINVAL;
+               goto out;
+       }
+
        do {
-               /*
-                * to_process: the SHA256_BLOCK_SIZE data chunk to process in
-                * this update. This value is also restricted by the sg list
-                * limits.
-                */
-               to_process = total - to_process;
-               to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
+               int used_sgs = 0;
+               struct nx_sg *in_sg = nx_ctx->in_sg;
 
                if (buf_len) {
                        data_len = buf_len;
-                       rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
-                                                 &nx_ctx->op.inlen,
-                                                 &data_len,
-                                                 (u8 *) sctx->buf,
-                                                 NX_DS_SHA256);
+                       in_sg = nx_build_sg_list(in_sg,
+                                                (u8 *) sctx->buf,
+                                                &data_len,
+                                                max_sg_len);
 
-                       if (rc || data_len != buf_len)
+                       if (data_len != buf_len) {
+                               rc = -EINVAL;
                                goto out;
+                       }
+                       used_sgs = in_sg - nx_ctx->in_sg;
                }
 
+               /* to_process: SHA256_BLOCK_SIZE aligned chunk to be
+                * processed in this iteration. This value is restricted
+                * by sg list limits and number of sgs we already used
+                * for leftover data. (see above)
+                * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
+                * but because data may not be aligned, we need to account
+                * for that too. */
+               to_process = min_t(u64, total,
+                       (max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
+               to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
+
                data_len = to_process - buf_len;
-               rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
-                                         &nx_ctx->op.inlen,
-                                         &data_len,
-                                         (u8 *) data,
-                                         NX_DS_SHA256);
+               in_sg = nx_build_sg_list(in_sg, (u8 *) data,
+                                        &data_len, max_sg_len);
 
-               if (rc)
-                       goto out;
+               nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
 
-               to_process = (data_len + buf_len);
+               to_process = data_len + buf_len;
                leftover = total - to_process;
 
                /*
@@ -173,12 +191,19 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)
        struct sha256_state *sctx = shash_desc_ctx(desc);
        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
        struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
+       struct nx_sg *in_sg, *out_sg;
        unsigned long irq_flags;
-       int rc;
+       u32 max_sg_len;
+       int rc = 0;
        int len;
 
        spin_lock_irqsave(&nx_ctx->lock, irq_flags);
 
+       max_sg_len = min_t(u64, nx_ctx->ap->sglen,
+                       nx_driver.of.max_sg_len/sizeof(struct nx_sg));
+       max_sg_len = min_t(u64, max_sg_len,
+                       nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+
        /* final is represented by continuing the operation and indicating that
         * this is not an intermediate operation */
        if (sctx->count >= SHA256_BLOCK_SIZE) {
@@ -195,25 +220,24 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)
        csbcpb->cpb.sha256.message_bit_length = (u64) (sctx->count * 8);
 
        len = sctx->count & (SHA256_BLOCK_SIZE - 1);
-       rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
-                                 &nx_ctx->op.inlen,
-                                 &len,
-                                 (u8 *) sctx->buf,
-                                 NX_DS_SHA256);
+       in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) sctx->buf,
+                                &len, max_sg_len);
 
-       if (rc || len != (sctx->count & (SHA256_BLOCK_SIZE - 1)))
+       if (len != (sctx->count & (SHA256_BLOCK_SIZE - 1))) {
+               rc = -EINVAL;
                goto out;
+       }
 
        len = SHA256_DIGEST_SIZE;
-       rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg,
-                                 &nx_ctx->op.outlen,
-                                 &len,
-                                 out,
-                                 NX_DS_SHA256);
+       out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len, max_sg_len);
 
-       if (rc || len != SHA256_DIGEST_SIZE)
+       if (len != SHA256_DIGEST_SIZE) {
+               rc = -EINVAL;
                goto out;
+       }
 
+       nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
+       nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
        if (!nx_ctx->op.outlen) {
                rc = -EINVAL;
                goto out;
@@ -268,7 +292,7 @@ struct shash_alg nx_shash_sha256_alg = {
                .cra_blocksize   = SHA256_BLOCK_SIZE,
                .cra_module      = THIS_MODULE,
                .cra_ctxsize     = sizeof(struct nx_crypto_ctx),
-               .cra_init        = nx_crypto_ctx_sha_init,
+               .cra_init        = nx_crypto_ctx_sha256_init,
                .cra_exit        = nx_crypto_ctx_exit,
        }
 };
index b3adf10..b6e183d 100644 (file)
 #include "nx.h"
 
 
-static int nx_sha512_init(struct shash_desc *desc)
+static int nx_crypto_ctx_sha512_init(struct crypto_tfm *tfm)
 {
-       struct sha512_state *sctx = shash_desc_ctx(desc);
-       struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
-       int len;
-       int rc;
+       struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+       int err;
 
-       nx_ctx_init(nx_ctx, HCOP_FC_SHA);
+       err = nx_crypto_ctx_sha_init(tfm);
+       if (err)
+               return err;
 
-       memset(sctx, 0, sizeof *sctx);
+       nx_ctx_init(nx_ctx, HCOP_FC_SHA);
 
        nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA512];
 
        NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA512);
 
-       len = SHA512_DIGEST_SIZE;
-       rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg,
-                                 &nx_ctx->op.outlen,
-                                 &len,
-                                 (u8 *)sctx->state,
-                                 NX_DS_SHA512);
+       return 0;
+}
 
-       if (rc || len != SHA512_DIGEST_SIZE)
-               goto out;
+static int nx_sha512_init(struct shash_desc *desc)
+{
+       struct sha512_state *sctx = shash_desc_ctx(desc);
+
+       memset(sctx, 0, sizeof *sctx);
 
        sctx->state[0] = __cpu_to_be64(SHA512_H0);
        sctx->state[1] = __cpu_to_be64(SHA512_H1);
@@ -63,7 +62,6 @@ static int nx_sha512_init(struct shash_desc *desc)
        sctx->state[7] = __cpu_to_be64(SHA512_H7);
        sctx->count[0] = 0;
 
-out:
        return 0;
 }
 
@@ -73,10 +71,12 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
        struct sha512_state *sctx = shash_desc_ctx(desc);
        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
        struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
+       struct nx_sg *out_sg;
        u64 to_process, leftover = 0, total;
        unsigned long irq_flags;
        int rc = 0;
        int data_len;
+       u32 max_sg_len;
        u64 buf_len = (sctx->count[0] % SHA512_BLOCK_SIZE);
 
        spin_lock_irqsave(&nx_ctx->lock, irq_flags);
@@ -96,39 +96,61 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
        NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
        NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
 
+       max_sg_len = min_t(u64, nx_ctx->ap->sglen,
+                       nx_driver.of.max_sg_len/sizeof(struct nx_sg));
+       max_sg_len = min_t(u64, max_sg_len,
+                       nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+
+       data_len = SHA512_DIGEST_SIZE;
+       out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
+                                 &data_len, max_sg_len);
+       nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+
+       if (data_len != SHA512_DIGEST_SIZE) {
+               rc = -EINVAL;
+               goto out;
+       }
+
        do {
-               /*
-                * to_process: the SHA512_BLOCK_SIZE data chunk to process in
-                * this update. This value is also restricted by the sg list
-                * limits.
-                */
-               to_process = total - leftover;
-               to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
-               leftover = total - to_process;
+               int used_sgs = 0;
+               struct nx_sg *in_sg = nx_ctx->in_sg;
 
                if (buf_len) {
                        data_len = buf_len;
-                       rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
-                                                 &nx_ctx->op.inlen,
-                                                 &data_len,
-                                                 (u8 *) sctx->buf,
-                                                 NX_DS_SHA512);
+                       in_sg = nx_build_sg_list(in_sg,
+                                                (u8 *) sctx->buf,
+                                                &data_len, max_sg_len);
 
-                       if (rc || data_len != buf_len)
+                       if (data_len != buf_len) {
+                               rc = -EINVAL;
                                goto out;
+                       }
+                       used_sgs = in_sg - nx_ctx->in_sg;
                }
 
+               /* to_process: SHA512_BLOCK_SIZE aligned chunk to be
+                * processed in this iteration. This value is restricted
+                * by sg list limits and number of sgs we already used
+                * for leftover data. (see above)
+                * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
+                * but because data may not be aligned, we need to account
+                * for that too. */
+               to_process = min_t(u64, total,
+                       (max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
+               to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
+
                data_len = to_process - buf_len;
-               rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
-                                         &nx_ctx->op.inlen,
-                                         &data_len,
-                                         (u8 *) data,
-                                         NX_DS_SHA512);
+               in_sg = nx_build_sg_list(in_sg, (u8 *) data,
+                                        &data_len, max_sg_len);
 
-               if (rc || data_len != (to_process - buf_len))
+               nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
+
+               if (data_len != (to_process - buf_len)) {
+                       rc = -EINVAL;
                        goto out;
+               }
 
-               to_process = (data_len + buf_len);
+               to_process = data_len + buf_len;
                leftover = total - to_process;
 
                /*
@@ -172,13 +194,20 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out)
        struct sha512_state *sctx = shash_desc_ctx(desc);
        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
        struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
+       struct nx_sg *in_sg, *out_sg;
+       u32 max_sg_len;
        u64 count0;
        unsigned long irq_flags;
-       int rc;
+       int rc = 0;
        int len;
 
        spin_lock_irqsave(&nx_ctx->lock, irq_flags);
 
+       max_sg_len = min_t(u64, nx_ctx->ap->sglen,
+                       nx_driver.of.max_sg_len/sizeof(struct nx_sg));
+       max_sg_len = min_t(u64, max_sg_len,
+                       nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+
        /* final is represented by continuing the operation and indicating that
         * this is not an intermediate operation */
        if (sctx->count[0] >= SHA512_BLOCK_SIZE) {
@@ -200,24 +229,20 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out)
        csbcpb->cpb.sha512.message_bit_length_lo = count0;
 
        len = sctx->count[0] & (SHA512_BLOCK_SIZE - 1);
-       rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
-                                 &nx_ctx->op.inlen,
-                                 &len,
-                                 (u8 *)sctx->buf,
-                                 NX_DS_SHA512);
+       in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buf, &len,
+                                max_sg_len);
 
-       if (rc || len != (sctx->count[0] & (SHA512_BLOCK_SIZE - 1)))
+       if (len != (sctx->count[0] & (SHA512_BLOCK_SIZE - 1))) {
+               rc = -EINVAL;
                goto out;
+       }
 
        len = SHA512_DIGEST_SIZE;
-       rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg,
-                                 &nx_ctx->op.outlen,
-                                 &len,
-                                 out,
-                                 NX_DS_SHA512);
+       out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len,
+                                max_sg_len);
 
-       if (rc)
-               goto out;
+       nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
+       nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
 
        if (!nx_ctx->op.outlen) {
                rc = -EINVAL;
@@ -273,7 +298,7 @@ struct shash_alg nx_shash_sha512_alg = {
                .cra_blocksize   = SHA512_BLOCK_SIZE,
                .cra_module      = THIS_MODULE,
                .cra_ctxsize     = sizeof(struct nx_crypto_ctx),
-               .cra_init        = nx_crypto_ctx_sha_init,
+               .cra_init        = nx_crypto_ctx_sha512_init,
                .cra_exit        = nx_crypto_ctx_exit,
        }
 };
index 1da6dc5..737d33d 100644 (file)
@@ -215,8 +215,15 @@ struct nx_sg *nx_walk_and_build(struct nx_sg       *nx_dst,
  * @delta:  is the amount we need to crop in order to bound the list.
  *
  */
-static long int trim_sg_list(struct nx_sg *sg, struct nx_sg *end, unsigned int delta)
+static long int trim_sg_list(struct nx_sg *sg,
+                            struct nx_sg *end,
+                            unsigned int delta,
+                            unsigned int *nbytes)
 {
+       long int oplen;
+       long int data_back;
+       unsigned int is_delta = delta;
+
        while (delta && end > sg) {
                struct nx_sg *last = end - 1;
 
@@ -228,54 +235,20 @@ static long int trim_sg_list(struct nx_sg *sg, struct nx_sg *end, unsigned int d
                        delta -= last->len;
                }
        }
-       return (sg - end) * sizeof(struct nx_sg);
-}
-
-/**
- * nx_sha_build_sg_list - walk and build sg list to sha modes
- *                       using right bounds and limits.
- * @nx_ctx: NX crypto context for the lists we're building
- * @nx_sg: current sg list in or out list
- * @op_len: current op_len to be used in order to build a sg list
- * @nbytes:  number or bytes to be processed
- * @offset: buf offset
- * @mode: SHA256 or SHA512
- */
-int nx_sha_build_sg_list(struct nx_crypto_ctx *nx_ctx,
-                         struct nx_sg        *nx_in_outsg,
-                         s64                 *op_len,
-                         unsigned int        *nbytes,
-                         u8                  *offset,
-                         u32                 mode)
-{
-       unsigned int delta = 0;
-       unsigned int total = *nbytes;
-       struct nx_sg *nx_insg = nx_in_outsg;
-       unsigned int max_sg_len;
 
-       max_sg_len = min_t(u64, nx_ctx->ap->sglen,
-                       nx_driver.of.max_sg_len/sizeof(struct nx_sg));
-       max_sg_len = min_t(u64, max_sg_len,
-                       nx_ctx->ap->databytelen/NX_PAGE_SIZE);
-
-       *nbytes = min_t(u64, *nbytes, nx_ctx->ap->databytelen);
-       nx_insg = nx_build_sg_list(nx_insg, offset, nbytes, max_sg_len);
-
-       switch (mode) {
-       case NX_DS_SHA256:
-               if (*nbytes < total)
-                       delta = *nbytes - (*nbytes & ~(SHA256_BLOCK_SIZE - 1));
-               break;
-       case NX_DS_SHA512:
-               if (*nbytes < total)
-                       delta = *nbytes - (*nbytes & ~(SHA512_BLOCK_SIZE - 1));
-               break;
-       default:
-               return -EINVAL;
+       /* There are cases where we need to crop list in order to make it
+        * a block size multiple, but we also need to align data. In order to
+        * that we need to calculate how much we need to put back to be
+        * processed
+        */
+       oplen = (sg - end) * sizeof(struct nx_sg);
+       if (is_delta) {
+               data_back = (abs(oplen) / AES_BLOCK_SIZE) *  sg->len;
+               data_back = *nbytes - (data_back & ~(AES_BLOCK_SIZE - 1));
+               *nbytes -= data_back;
        }
-       *op_len = trim_sg_list(nx_in_outsg, nx_insg, delta);
 
-       return 0;
+       return oplen;
 }
 
 /**
@@ -330,8 +303,8 @@ int nx_build_sg_lists(struct nx_crypto_ctx  *nx_ctx,
        /* these lengths should be negative, which will indicate to phyp that
         * the input and output parameters are scatterlists, not linear
         * buffers */
-       nx_ctx->op.inlen = trim_sg_list(nx_ctx->in_sg, nx_insg, delta);
-       nx_ctx->op.outlen = trim_sg_list(nx_ctx->out_sg, nx_outsg, delta);
+       nx_ctx->op.inlen = trim_sg_list(nx_ctx->in_sg, nx_insg, delta, nbytes);
+       nx_ctx->op.outlen = trim_sg_list(nx_ctx->out_sg, nx_outsg, delta, nbytes);
 
        return 0;
 }
@@ -662,12 +635,14 @@ static int nx_crypto_ctx_init(struct nx_crypto_ctx *nx_ctx, u32 fc, u32 mode)
 /* entry points from the crypto tfm initializers */
 int nx_crypto_ctx_aes_ccm_init(struct crypto_tfm *tfm)
 {
+       tfm->crt_aead.reqsize = sizeof(struct nx_ccm_rctx);
        return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
                                  NX_MODE_AES_CCM);
 }
 
 int nx_crypto_ctx_aes_gcm_init(struct crypto_tfm *tfm)
 {
+       tfm->crt_aead.reqsize = sizeof(struct nx_gcm_rctx);
        return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
                                  NX_MODE_AES_GCM);
 }
index 6c9ecaa..c3ed837 100644 (file)
@@ -2,6 +2,8 @@
 #ifndef __NX_H__
 #define __NX_H__
 
+#include <crypto/ctr.h>
+
 #define NX_NAME                "nx-crypto"
 #define NX_STRING      "IBM Power7+ Nest Accelerator Crypto Driver"
 #define NX_VERSION     "1.0"
@@ -91,8 +93,11 @@ struct nx_crypto_driver {
 
 #define NX_GCM4106_NONCE_LEN           (4)
 #define NX_GCM_CTR_OFFSET              (12)
-struct nx_gcm_priv {
+struct nx_gcm_rctx {
        u8 iv[16];
+};
+
+struct nx_gcm_priv {
        u8 iauth_tag[16];
        u8 nonce[NX_GCM4106_NONCE_LEN];
 };
@@ -100,8 +105,11 @@ struct nx_gcm_priv {
 #define NX_CCM_AES_KEY_LEN             (16)
 #define NX_CCM4309_AES_KEY_LEN         (19)
 #define NX_CCM4309_NONCE_LEN           (3)
-struct nx_ccm_priv {
+struct nx_ccm_rctx {
        u8 iv[16];
+};
+
+struct nx_ccm_priv {
        u8 b0[16];
        u8 iauth_tag[16];
        u8 oauth_tag[16];
@@ -113,7 +121,7 @@ struct nx_xcbc_priv {
 };
 
 struct nx_ctr_priv {
-       u8 iv[16];
+       u8 nonce[CTR_RFC3686_NONCE_SIZE];
 };
 
 struct nx_crypto_ctx {
@@ -153,8 +161,6 @@ void nx_crypto_ctx_exit(struct crypto_tfm *tfm);
 void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function);
 int nx_hcall_sync(struct nx_crypto_ctx *ctx, struct vio_pfo_op *op,
                  u32 may_sleep);
-int nx_sha_build_sg_list(struct nx_crypto_ctx *, struct nx_sg *,
-                        s64 *, unsigned int *, u8 *, u32);
 struct nx_sg *nx_build_sg_list(struct nx_sg *, u8 *, unsigned int *, u32);
 int nx_build_sg_lists(struct nx_crypto_ctx *, struct blkcipher_desc *,
                      struct scatterlist *, struct scatterlist *, unsigned int *,
index 4630709..0a70e46 100644 (file)
@@ -536,9 +536,6 @@ static int omap_des_crypt_dma_stop(struct omap_des_dev *dd)
        dmaengine_terminate_all(dd->dma_lch_in);
        dmaengine_terminate_all(dd->dma_lch_out);
 
-       dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
-       dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, DMA_FROM_DEVICE);
-
        return err;
 }
 
index 1dc5b0a..34139a8 100644 (file)
@@ -73,7 +73,8 @@
                                       ICP_QAT_HW_CIPHER_KEY_CONVERT, \
                                       ICP_QAT_HW_CIPHER_DECRYPT)
 
-static atomic_t active_dev;
+static DEFINE_MUTEX(algs_lock);
+static unsigned int active_devs;
 
 struct qat_alg_buf {
        uint32_t len;
@@ -1271,7 +1272,10 @@ static struct crypto_alg qat_algs[] = { {
 
 int qat_algs_register(void)
 {
-       if (atomic_add_return(1, &active_dev) == 1) {
+       int ret = 0;
+
+       mutex_lock(&algs_lock);
+       if (++active_devs == 1) {
                int i;
 
                for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
@@ -1280,21 +1284,25 @@ int qat_algs_register(void)
                                CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC :
                                CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
 
-               return crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
+               ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
        }
-       return 0;
+       mutex_unlock(&algs_lock);
+       return ret;
 }
 
 int qat_algs_unregister(void)
 {
-       if (atomic_sub_return(1, &active_dev) == 0)
-               return crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
-       return 0;
+       int ret = 0;
+
+       mutex_lock(&algs_lock);
+       if (--active_devs == 0)
+               ret = crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
+       mutex_unlock(&algs_lock);
+       return ret;
 }
 
 int qat_algs_init(void)
 {
-       atomic_set(&active_dev, 0);
        crypto_get_default_rng();
        return 0;
 }
index 7992164..c89a7ab 100644 (file)
@@ -648,16 +648,17 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
                        desc->lld.mbr_sa = mem;
                        desc->lld.mbr_da = atchan->sconfig.dst_addr;
                }
-               desc->lld.mbr_cfg = atchan->cfg;
-               dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
+               dwidth = at_xdmac_get_dwidth(atchan->cfg);
                fixed_dwidth = IS_ALIGNED(len, 1 << dwidth)
-                              ? at_xdmac_get_dwidth(desc->lld.mbr_cfg)
+                              ? dwidth
                               : AT_XDMAC_CC_DWIDTH_BYTE;
                desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2                       /* next descriptor view */
                        | AT_XDMAC_MBR_UBC_NDEN                                 /* next descriptor dst parameter update */
                        | AT_XDMAC_MBR_UBC_NSEN                                 /* next descriptor src parameter update */
                        | (i == sg_len - 1 ? 0 : AT_XDMAC_MBR_UBC_NDE)          /* descriptor fetch */
                        | (len >> fixed_dwidth);                                /* microblock length */
+               desc->lld.mbr_cfg = (atchan->cfg & ~AT_XDMAC_CC_DWIDTH_MASK) |
+                                   AT_XDMAC_CC_DWIDTH(fixed_dwidth);
                dev_dbg(chan2dev(chan),
                         "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
                         __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
index 1c56001..50f1b42 100644 (file)
@@ -273,7 +273,8 @@ static void mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
        dma_cookie_t cookie = 0;
        int busy = mv_chan_is_busy(mv_chan);
        u32 current_desc = mv_chan_get_current_desc(mv_chan);
-       int seen_current = 0;
+       int current_cleaned = 0;
+       struct mv_xor_desc *hw_desc;
 
        dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
        dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
@@ -285,38 +286,57 @@ static void mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
 
        list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
                                        chain_node) {
-               prefetch(_iter);
-               prefetch(&_iter->async_tx);
 
-               /* do not advance past the current descriptor loaded into the
-                * hardware channel, subsequent descriptors are either in
-                * process or have not been submitted
-                */
-               if (seen_current)
-                       break;
+               /* clean finished descriptors */
+               hw_desc = iter->hw_desc;
+               if (hw_desc->status & XOR_DESC_SUCCESS) {
+                       cookie = mv_xor_run_tx_complete_actions(iter, mv_chan,
+                                                               cookie);
 
-               /* stop the search if we reach the current descriptor and the
-                * channel is busy
-                */
-               if (iter->async_tx.phys == current_desc) {
-                       seen_current = 1;
-                       if (busy)
+                       /* done processing desc, clean slot */
+                       mv_xor_clean_slot(iter, mv_chan);
+
+                       /* break if we did cleaned the current */
+                       if (iter->async_tx.phys == current_desc) {
+                               current_cleaned = 1;
+                               break;
+                       }
+               } else {
+                       if (iter->async_tx.phys == current_desc) {
+                               current_cleaned = 0;
                                break;
+                       }
                }
-
-               cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie);
-
-               if (mv_xor_clean_slot(iter, mv_chan))
-                       break;
        }
 
        if ((busy == 0) && !list_empty(&mv_chan->chain)) {
-               struct mv_xor_desc_slot *chain_head;
-               chain_head = list_entry(mv_chan->chain.next,
-                                       struct mv_xor_desc_slot,
-                                       chain_node);
-
-               mv_xor_start_new_chain(mv_chan, chain_head);
+               if (current_cleaned) {
+                       /*
+                        * current descriptor cleaned and removed, run
+                        * from list head
+                        */
+                       iter = list_entry(mv_chan->chain.next,
+                                         struct mv_xor_desc_slot,
+                                         chain_node);
+                       mv_xor_start_new_chain(mv_chan, iter);
+               } else {
+                       if (!list_is_last(&iter->chain_node, &mv_chan->chain)) {
+                               /*
+                                * descriptors are still waiting after
+                                * current, trigger them
+                                */
+                               iter = list_entry(iter->chain_node.next,
+                                                 struct mv_xor_desc_slot,
+                                                 chain_node);
+                               mv_xor_start_new_chain(mv_chan, iter);
+                       } else {
+                               /*
+                                * some descriptors are still waiting
+                                * to be cleaned
+                                */
+                               tasklet_schedule(&mv_chan->irq_tasklet);
+                       }
+               }
        }
 
        if (cookie > 0)
index 91958db..0e302b3 100644 (file)
@@ -31,6 +31,7 @@
 #define XOR_OPERATION_MODE_XOR         0
 #define XOR_OPERATION_MODE_MEMCPY      2
 #define XOR_DESCRIPTOR_SWAP            BIT(14)
+#define XOR_DESC_SUCCESS               0x40000000
 
 #define XOR_DESC_DMA_OWNED             BIT(31)
 #define XOR_DESC_EOD_INT_EN            BIT(31)
index 340f9e6..3dabc52 100644 (file)
@@ -2328,7 +2328,7 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
                        desc->txd.callback = last->txd.callback;
                        desc->txd.callback_param = last->txd.callback_param;
                }
-               last->last = false;
+               desc->last = false;
 
                dma_cookie_assign(&desc->txd);
 
@@ -2621,6 +2621,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
                desc->rqcfg.brst_len = 1;
 
        desc->rqcfg.brst_len = get_burst_len(desc, len);
+       desc->bytes_requested = len;
 
        desc->txd.flags = flags;
 
index 7e98084..afea7fc 100644 (file)
@@ -151,7 +151,7 @@ static int octeon_l2c_probe(struct platform_device *pdev)
        l2c->ctl_name = "octeon_l2c_err";
 
 
-       if (OCTEON_IS_MODEL(OCTEON_FAM_1_PLUS)) {
+       if (OCTEON_IS_OCTEON1PLUS()) {
                union cvmx_l2t_err l2t_err;
                union cvmx_l2d_err l2d_err;
 
index bb19e07..cda6dab 100644 (file)
@@ -234,7 +234,7 @@ static int octeon_lmc_edac_probe(struct platform_device *pdev)
        layers[0].size = 1;
        layers[0].is_virt_csrow = false;
 
-       if (OCTEON_IS_MODEL(OCTEON_FAM_1_PLUS)) {
+       if (OCTEON_IS_OCTEON1PLUS()) {
                union cvmx_lmcx_mem_cfg0 cfg0;
 
                cfg0.u64 = cvmx_read_csr(CVMX_LMCX_MEM_CFG0(0));
index 0f83c33..2ab6cf2 100644 (file)
@@ -73,7 +73,7 @@ static int  co_cache_error_event(struct notifier_block *this,
                        edac_device_handle_ce(p->ed, cpu, 0, "dcache");
 
                /* Clear the error indication */
-               if (OCTEON_IS_MODEL(OCTEON_FAM_2))
+               if (OCTEON_IS_OCTEON2())
                        write_octeon_c0_dcacheerr(1);
                else
                        write_octeon_c0_dcacheerr(0);
index 3515b38..711d8ad 100644 (file)
@@ -920,7 +920,7 @@ static int ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
         */
 
        for (row = 0; row < mci->nr_csrows; row++) {
-               struct csrow_info *csi = &mci->csrows[row];
+               struct csrow_info *csi = mci->csrows[row];
 
                /*
                 * Get the configuration settings for this
index 97b1616..bba843c 100644 (file)
@@ -89,9 +89,9 @@ static void dmi_table(u8 *buf,
 
        /*
         * Stop when we have seen all the items the table claimed to have
-        * (SMBIOS < 3.0 only) OR we reach an end-of-table marker OR we run
-        * off the end of the table (should never happen but sometimes does
-        * on bogus implementations.)
+        * (SMBIOS < 3.0 only) OR we reach an end-of-table marker (SMBIOS
+        * >= 3.0 only) OR we run off the end of the table (should never
+        * happen but sometimes does on bogus implementations.)
         */
        while ((!dmi_num || i < dmi_num) &&
               (data - buf + sizeof(struct dmi_header)) <= dmi_len) {
@@ -110,8 +110,13 @@ static void dmi_table(u8 *buf,
 
                /*
                 * 7.45 End-of-Table (Type 127) [SMBIOS reference spec v3.0.0]
+                * For tables behind a 64-bit entry point, we have no item
+                * count and no exact table length, so stop on end-of-table
+                * marker. For tables behind a 32-bit entry point, we have
+                * seen OEM structures behind the end-of-table marker on
+                * some systems, so don't trust it.
                 */
-               if (dm->type == DMI_ENTRY_END_OF_TABLE)
+               if (!dmi_num && dm->type == DMI_ENTRY_END_OF_TABLE)
                        break;
 
                data += 2;
index 4fd9961..d425374 100644 (file)
@@ -305,10 +305,17 @@ const char *cper_mem_err_unpack(struct trace_seq *p,
        return ret;
 }
 
-static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem)
+static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem,
+       int len)
 {
        struct cper_mem_err_compact cmem;
 
+       /* Don't trust UEFI 2.1/2.2 structure with bad validation bits */
+       if (len == sizeof(struct cper_sec_mem_err_old) &&
+           (mem->validation_bits & ~(CPER_MEM_VALID_RANK_NUMBER - 1))) {
+               pr_err(FW_WARN "valid bits set for fields beyond structure\n");
+               return;
+       }
        if (mem->validation_bits & CPER_MEM_VALID_ERROR_STATUS)
                printk("%s""error_status: 0x%016llx\n", pfx, mem->error_status);
        if (mem->validation_bits & CPER_MEM_VALID_PA)
@@ -405,8 +412,10 @@ static void cper_estatus_print_section(
        } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PLATFORM_MEM)) {
                struct cper_sec_mem_err *mem_err = (void *)(gdata + 1);
                printk("%s""section_type: memory error\n", newpfx);
-               if (gdata->error_data_length >= sizeof(*mem_err))
-                       cper_print_mem(newpfx, mem_err);
+               if (gdata->error_data_length >=
+                   sizeof(struct cper_sec_mem_err_old))
+                       cper_print_mem(newpfx, mem_err,
+                                      gdata->error_data_length);
                else
                        goto err_section_too_small;
        } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PCIE)) {
index e14363d..63226e9 100644 (file)
@@ -57,6 +57,11 @@ bool efi_runtime_disabled(void)
 
 static int __init parse_efi_cmdline(char *str)
 {
+       if (!str) {
+               pr_warn("need at least one option\n");
+               return -EINVAL;
+       }
+
        if (parse_option_str(str, "noruntime"))
                disable_runtime = true;
 
index 60b0c13..aebc459 100644 (file)
@@ -559,7 +559,7 @@ static int atmel_hlcdc_dc_drm_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int atmel_hlcdc_dc_drm_suspend(struct device *dev)
 {
        struct drm_device *drm_dev = dev_get_drvdata(dev);
index 9d2f053..63a09e4 100644 (file)
@@ -15,6 +15,7 @@
 
 #include <linux/delay.h>
 #include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/i2c.h>
 #include <linux/module.h>
 #include <linux/of.h>
index 3007b44..800a025 100644 (file)
@@ -2749,8 +2749,11 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
        if (!drm_core_check_feature(dev, DRIVER_MODESET))
                return -EINVAL;
 
-       /* For some reason crtc x/y offsets are signed internally. */
-       if (crtc_req->x > INT_MAX || crtc_req->y > INT_MAX)
+       /*
+        * Universal plane src offsets are only 16.16, prevent havoc for
+        * drivers using universal plane code internally.
+        */
+       if (crtc_req->x & 0xffff0000 || crtc_req->y & 0xffff0000)
                return -ERANGE;
 
        drm_modeset_lock_all(dev);
@@ -5048,12 +5051,9 @@ void drm_mode_config_reset(struct drm_device *dev)
                if (encoder->funcs->reset)
                        encoder->funcs->reset(encoder);
 
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-               connector->status = connector_status_unknown;
-
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head)
                if (connector->funcs->reset)
                        connector->funcs->reset(connector);
-       }
 }
 EXPORT_SYMBOL(drm_mode_config_reset);
 
index 132581c..b0487c9 100644 (file)
@@ -867,8 +867,16 @@ static void drm_dp_destroy_port(struct kref *kref)
                port->vcpi.num_slots = 0;
 
                kfree(port->cached_edid);
-               if (port->connector)
-                       (*port->mgr->cbs->destroy_connector)(mgr, port->connector);
+
+               /* we can't destroy the connector here, as
+                  we might be holding the mode_config.mutex
+                  from an EDID retrieval */
+               if (port->connector) {
+                       mutex_lock(&mgr->destroy_connector_lock);
+                       list_add(&port->connector->destroy_list, &mgr->destroy_connector_list);
+                       mutex_unlock(&mgr->destroy_connector_lock);
+                       schedule_work(&mgr->destroy_connector_work);
+               }
                drm_dp_port_teardown_pdt(port, port->pdt);
 
                if (!port->input && port->vcpi.vcpi > 0)
@@ -1163,6 +1171,8 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_
        struct drm_dp_mst_port *port;
        int i;
        /* find the port by iterating down */
+
+       mutex_lock(&mgr->lock);
        mstb = mgr->mst_primary;
 
        for (i = 0; i < lct - 1; i++) {
@@ -1182,6 +1192,7 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_
                }
        }
        kref_get(&mstb->kref);
+       mutex_unlock(&mgr->lock);
        return mstb;
 }
 
@@ -1189,7 +1200,7 @@ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *m
                                               struct drm_dp_mst_branch *mstb)
 {
        struct drm_dp_mst_port *port;
-
+       struct drm_dp_mst_branch *mstb_child;
        if (!mstb->link_address_sent) {
                drm_dp_send_link_address(mgr, mstb);
                mstb->link_address_sent = true;
@@ -1204,17 +1215,31 @@ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *m
                if (!port->available_pbn)
                        drm_dp_send_enum_path_resources(mgr, mstb, port);
 
-               if (port->mstb)
-                       drm_dp_check_and_send_link_address(mgr, port->mstb);
+               if (port->mstb) {
+                       mstb_child = drm_dp_get_validated_mstb_ref(mgr, port->mstb);
+                       if (mstb_child) {
+                               drm_dp_check_and_send_link_address(mgr, mstb_child);
+                               drm_dp_put_mst_branch_device(mstb_child);
+                       }
+               }
        }
 }
 
 static void drm_dp_mst_link_probe_work(struct work_struct *work)
 {
        struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work);
+       struct drm_dp_mst_branch *mstb;
 
-       drm_dp_check_and_send_link_address(mgr, mgr->mst_primary);
-
+       mutex_lock(&mgr->lock);
+       mstb = mgr->mst_primary;
+       if (mstb) {
+               kref_get(&mstb->kref);
+       }
+       mutex_unlock(&mgr->lock);
+       if (mstb) {
+               drm_dp_check_and_send_link_address(mgr, mstb);
+               drm_dp_put_mst_branch_device(mstb);
+       }
 }
 
 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
@@ -1269,7 +1294,6 @@ retry:
                                goto retry;
                        }
                        DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
-                       WARN(1, "fail\n");
 
                        return -EIO;
                }
@@ -2632,6 +2656,30 @@ static void drm_dp_tx_work(struct work_struct *work)
        mutex_unlock(&mgr->qlock);
 }
 
+static void drm_dp_destroy_connector_work(struct work_struct *work)
+{
+       struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
+       struct drm_connector *connector;
+
+       /*
+        * Not a regular list traverse as we have to drop the destroy
+        * connector lock before destroying the connector, to avoid AB->BA
+        * ordering between this lock and the config mutex.
+        */
+       for (;;) {
+               mutex_lock(&mgr->destroy_connector_lock);
+               connector = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_connector, destroy_list);
+               if (!connector) {
+                       mutex_unlock(&mgr->destroy_connector_lock);
+                       break;
+               }
+               list_del(&connector->destroy_list);
+               mutex_unlock(&mgr->destroy_connector_lock);
+
+               mgr->cbs->destroy_connector(mgr, connector);
+       }
+}
+
 /**
  * drm_dp_mst_topology_mgr_init - initialise a topology manager
  * @mgr: manager struct to initialise
@@ -2651,10 +2699,13 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
        mutex_init(&mgr->lock);
        mutex_init(&mgr->qlock);
        mutex_init(&mgr->payload_lock);
+       mutex_init(&mgr->destroy_connector_lock);
        INIT_LIST_HEAD(&mgr->tx_msg_upq);
        INIT_LIST_HEAD(&mgr->tx_msg_downq);
+       INIT_LIST_HEAD(&mgr->destroy_connector_list);
        INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
        INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
+       INIT_WORK(&mgr->destroy_connector_work, drm_dp_destroy_connector_work);
        init_waitqueue_head(&mgr->tx_waitq);
        mgr->dev = dev;
        mgr->aux = aux;
@@ -2679,6 +2730,7 @@ EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
  */
 void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
 {
+       flush_work(&mgr->destroy_connector_work);
        mutex_lock(&mgr->payload_lock);
        kfree(mgr->payloads);
        mgr->payloads = NULL;
index aa8bbb4..9cfcd0a 100644 (file)
@@ -70,6 +70,8 @@
 
 #define DRM_IOCTL_WAIT_VBLANK32                DRM_IOWR(0x3a, drm_wait_vblank32_t)
 
+#define DRM_IOCTL_MODE_ADDFB232                DRM_IOWR(0xb8, drm_mode_fb_cmd232_t)
+
 typedef struct drm_version_32 {
        int version_major;        /**< Major version */
        int version_minor;        /**< Minor version */
@@ -1016,6 +1018,63 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
        return 0;
 }
 
+typedef struct drm_mode_fb_cmd232 {
+       u32 fb_id;
+       u32 width;
+       u32 height;
+       u32 pixel_format;
+       u32 flags;
+       u32 handles[4];
+       u32 pitches[4];
+       u32 offsets[4];
+       u64 modifier[4];
+} __attribute__((packed)) drm_mode_fb_cmd232_t;
+
+static int compat_drm_mode_addfb2(struct file *file, unsigned int cmd,
+                                 unsigned long arg)
+{
+       struct drm_mode_fb_cmd232 __user *argp = (void __user *)arg;
+       struct drm_mode_fb_cmd232 req32;
+       struct drm_mode_fb_cmd2 __user *req64;
+       int i;
+       int err;
+
+       if (copy_from_user(&req32, argp, sizeof(req32)))
+               return -EFAULT;
+
+       req64 = compat_alloc_user_space(sizeof(*req64));
+
+       if (!access_ok(VERIFY_WRITE, req64, sizeof(*req64))
+           || __put_user(req32.width, &req64->width)
+           || __put_user(req32.height, &req64->height)
+           || __put_user(req32.pixel_format, &req64->pixel_format)
+           || __put_user(req32.flags, &req64->flags))
+               return -EFAULT;
+
+       for (i = 0; i < 4; i++) {
+               if (__put_user(req32.handles[i], &req64->handles[i]))
+                       return -EFAULT;
+               if (__put_user(req32.pitches[i], &req64->pitches[i]))
+                       return -EFAULT;
+               if (__put_user(req32.offsets[i], &req64->offsets[i]))
+                       return -EFAULT;
+               if (__put_user(req32.modifier[i], &req64->modifier[i]))
+                       return -EFAULT;
+       }
+
+       err = drm_ioctl(file, DRM_IOCTL_MODE_ADDFB2, (unsigned long)req64);
+       if (err)
+               return err;
+
+       if (__get_user(req32.fb_id, &req64->fb_id))
+               return -EFAULT;
+
+       if (copy_to_user(argp, &req32, sizeof(req32)))
+               return -EFAULT;
+
+       return 0;
+}
+
 static drm_ioctl_compat_t *drm_compat_ioctls[] = {
        [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
        [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
@@ -1048,6 +1107,7 @@ static drm_ioctl_compat_t *drm_compat_ioctls[] = {
        [DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW32)] = compat_drm_update_draw,
 #endif
        [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK32)] = compat_drm_wait_vblank,
+       [DRM_IOCTL_NR(DRM_IOCTL_MODE_ADDFB232)] = compat_drm_mode_addfb2,
 };
 
 /**
index 8ae6f7f..683a9b0 100644 (file)
@@ -3190,15 +3190,14 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
 #define I915_READ64(reg)       dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
 
 #define I915_READ64_2x32(lower_reg, upper_reg) ({                      \
-               u32 upper = I915_READ(upper_reg);                       \
-               u32 lower = I915_READ(lower_reg);                       \
-               u32 tmp = I915_READ(upper_reg);                         \
-               if (upper != tmp) {                                     \
-                       upper = tmp;                                    \
-                       lower = I915_READ(lower_reg);                   \
-                       WARN_ON(I915_READ(upper_reg) != upper);         \
-               }                                                       \
-               (u64)upper << 32 | lower; })
+       u32 upper, lower, tmp;                                          \
+       tmp = I915_READ(upper_reg);                                     \
+       do {                                                            \
+               upper = tmp;                                            \
+               lower = I915_READ(lower_reg);                           \
+               tmp = I915_READ(upper_reg);                             \
+       } while (upper != tmp);                                         \
+       (u64)upper << 32 | lower; })
 
 #define POSTING_READ(reg)      (void)I915_READ_NOTRACE(reg)
 #define POSTING_READ16(reg)    (void)I915_READ16_NOTRACE(reg)
index 2d0995e..596bce5 100644 (file)
@@ -2401,6 +2401,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
        }
 
        request->emitted_jiffies = jiffies;
+       ring->last_submitted_seqno = request->seqno;
        list_add_tail(&request->list, &ring->request_list);
        request->file_priv = NULL;
 
index 0239fbf..ad90fa3 100644 (file)
@@ -502,17 +502,17 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
                struct page *page_table;
 
                if (WARN_ON(!ppgtt->pdp.page_directory[pdpe]))
-                       continue;
+                       break;
 
                pd = ppgtt->pdp.page_directory[pdpe];
 
                if (WARN_ON(!pd->page_table[pde]))
-                       continue;
+                       break;
 
                pt = pd->page_table[pde];
 
                if (WARN_ON(!pt->page))
-                       continue;
+                       break;
 
                page_table = pt->page;
 
index 6377b22..7ee23d1 100644 (file)
@@ -464,7 +464,10 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
        }
 
        /* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
-       args->phys_swizzle_mode = args->swizzle_mode;
+       if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
+               args->phys_swizzle_mode = I915_BIT_6_SWIZZLE_UNKNOWN;
+       else
+               args->phys_swizzle_mode = args->swizzle_mode;
        if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
                args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
        if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
index 176de63..23aa04c 100644 (file)
@@ -204,7 +204,7 @@ long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
        drm_ioctl_compat_t *fn = NULL;
        int ret;
 
-       if (nr < DRM_COMMAND_BASE)
+       if (nr < DRM_COMMAND_BASE || nr >= DRM_COMMAND_END)
                return drm_compat_ioctl(filp, cmd, arg);
 
        if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls))
index 6d49443..b0df8d1 100644 (file)
@@ -2650,18 +2650,11 @@ static void gen8_disable_vblank(struct drm_device *dev, int pipe)
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 }
 
-static struct drm_i915_gem_request *
-ring_last_request(struct intel_engine_cs *ring)
-{
-       return list_entry(ring->request_list.prev,
-                         struct drm_i915_gem_request, list);
-}
-
 static bool
-ring_idle(struct intel_engine_cs *ring)
+ring_idle(struct intel_engine_cs *ring, u32 seqno)
 {
        return (list_empty(&ring->request_list) ||
-               i915_gem_request_completed(ring_last_request(ring), false));
+               i915_seqno_passed(seqno, ring->last_submitted_seqno));
 }
 
 static bool
@@ -2883,7 +2876,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
                acthd = intel_ring_get_active_head(ring);
 
                if (ring->hangcheck.seqno == seqno) {
-                       if (ring_idle(ring)) {
+                       if (ring_idle(ring, seqno)) {
                                ring->hangcheck.action = HANGCHECK_IDLE;
 
                                if (waitqueue_active(&ring->irq_queue)) {
index 773d1d2..a30db4b 100644 (file)
@@ -3209,6 +3209,7 @@ enum skl_disp_power_wells {
 #define   BLM_POLARITY_PNV                     (1 << 0) /* pnv only */
 
 #define BLC_HIST_CTL   (dev_priv->info.display_mmio_offset + 0x61260)
+#define  BLM_HISTOGRAM_ENABLE                  (1 << 31)
 
 /* New registers for PCH-split platforms. Safe where new bits show up, the
  * register layout machtes with gen4 BLC_PWM_CTL[12]. */
index 3002759..4021633 100644 (file)
@@ -12499,6 +12499,16 @@ intel_check_primary_plane(struct drm_plane *plane,
                                intel_crtc->atomic.wait_vblank = true;
                }
 
+               /*
+                * FIXME: Actually if we will still have any other plane enabled
+                * on the pipe we could let IPS enabled still, but for
+                * now lets consider that when we make primary invisible
+                * by setting DSPCNTR to 0 on update_primary_plane function
+                * IPS needs to be disable.
+                */
+               if (!state->visible || !fb)
+                       intel_crtc->atomic.disable_ips = true;
+
                intel_crtc->atomic.fb_bits |=
                        INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
 
@@ -12590,6 +12600,9 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc)
        if (intel_crtc->atomic.disable_fbc)
                intel_fbc_disable(dev);
 
+       if (intel_crtc->atomic.disable_ips)
+               hsw_disable_ips(intel_crtc);
+
        if (intel_crtc->atomic.pre_disable_primary)
                intel_pre_disable_primary(crtc);
 
index d714a4b..b1fe32b 100644 (file)
@@ -1150,6 +1150,19 @@ intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
        return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
 }
 
+static bool intel_dp_source_supports_hbr2(struct drm_device *dev)
+{
+       /* WaDisableHBR2:skl */
+       if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
+               return false;
+
+       if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
+           (INTEL_INFO(dev)->gen >= 9))
+               return true;
+       else
+               return false;
+}
+
 static int
 intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
 {
@@ -1163,11 +1176,8 @@ intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
 
        *source_rates = default_rates;
 
-       if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
-               /* WaDisableHBR2:skl */
-               return (DP_LINK_BW_2_7 >> 3) + 1;
-       else if (INTEL_INFO(dev)->gen >= 8 ||
-           (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
+       /* This depends on the fact that 5.4 is last value in the array */
+       if (intel_dp_source_supports_hbr2(dev))
                return (DP_LINK_BW_5_4 >> 3) + 1;
        else
                return (DP_LINK_BW_2_7 >> 3) + 1;
@@ -3783,10 +3793,15 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
                }
        }
 
-       /* Training Pattern 3 support, both source and sink */
+       /* Training Pattern 3 support, Intel platforms that support HBR2 alone
+        * have support for TP3 hence that check is used along with dpcd check
+        * to ensure TP3 can be enabled.
+        * SKL < B0: due it's WaDisableHBR2 is the only exception where TP3 is
+        * supported but still not enabled.
+        */
        if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
            intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
-           (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
+           intel_dp_source_supports_hbr2(dev)) {
                intel_dp->use_tps3 = true;
                DRM_DEBUG_KMS("Displayport TPS3 supported\n");
        } else
index 897f17d..68d1f74 100644 (file)
@@ -424,6 +424,7 @@ struct intel_crtc_atomic_commit {
        /* Sleepable operations to perform before commit */
        bool wait_for_flips;
        bool disable_fbc;
+       bool disable_ips;
        bool pre_disable_primary;
        bool update_wm;
        unsigned disabled_planes;
index 424e621..9ab7c1c 100644 (file)
@@ -848,6 +848,8 @@ static int intel_lr_context_pin(struct intel_engine_cs *ring,
                ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf);
                if (ret)
                        goto unpin_ctx_obj;
+
+               ctx_obj->dirty = true;
        }
 
        return ret;
index 08532d4..2bf92cb 100644 (file)
@@ -879,6 +879,14 @@ static void i9xx_enable_backlight(struct intel_connector *connector)
 
        /* XXX: combine this into above write? */
        intel_panel_actually_set_backlight(connector, panel->backlight.level);
+
+       /*
+        * Needed to enable backlight on some 855gm models. BLC_HIST_CTL is
+        * 855gm only, but checking for gen2 is safe, as 855gm is the only gen2
+        * that has backlight.
+        */
+       if (IS_GEN2(dev))
+               I915_WRITE(BLC_HIST_CTL, BLM_HISTOGRAM_ENABLE);
 }
 
 static void i965_enable_backlight(struct intel_connector *connector)
index c761fe0..94514d3 100644 (file)
@@ -266,6 +266,13 @@ struct  intel_engine_cs {
         * Do we have some not yet emitted requests outstanding?
         */
        struct drm_i915_gem_request *outstanding_lazy_request;
+       /**
+        * Seqno of request most recently submitted to request_list.
+        * Used exclusively by hang checker to avoid grabbing lock while
+        * inspecting request list.
+        */
+       u32 last_submitted_seqno;
+
        bool gpu_caches_dirty;
 
        wait_queue_head_t irq_queue;
index ff2a746..a18807e 100644 (file)
@@ -1220,10 +1220,12 @@ int i915_reg_read_ioctl(struct drm_device *dev,
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_reg_read *reg = data;
        struct register_whitelist const *entry = whitelist;
+       unsigned size;
+       u64 offset;
        int i, ret = 0;
 
        for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
-               if (entry->offset == reg->offset &&
+               if (entry->offset == (reg->offset & -entry->size) &&
                    (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
                        break;
        }
@@ -1231,23 +1233,33 @@ int i915_reg_read_ioctl(struct drm_device *dev,
        if (i == ARRAY_SIZE(whitelist))
                return -EINVAL;
 
+       /* We use the low bits to encode extra flags as the register should
+        * be naturally aligned (and those that are not so aligned merely
+        * limit the available flags for that register).
+        */
+       offset = entry->offset;
+       size = entry->size;
+       size |= reg->offset ^ offset;
+
        intel_runtime_pm_get(dev_priv);
 
-       switch (entry->size) {
+       switch (size) {
+       case 8 | 1:
+               reg->val = I915_READ64_2x32(offset, offset+4);
+               break;
        case 8:
-               reg->val = I915_READ64(reg->offset);
+               reg->val = I915_READ64(offset);
                break;
        case 4:
-               reg->val = I915_READ(reg->offset);
+               reg->val = I915_READ(offset);
                break;
        case 2:
-               reg->val = I915_READ16(reg->offset);
+               reg->val = I915_READ16(offset);
                break;
        case 1:
-               reg->val = I915_READ8(reg->offset);
+               reg->val = I915_READ8(offset);
                break;
        default:
-               MISSING_CASE(entry->size);
                ret = -EINVAL;
                goto out;
        }
index 8904933..cd6dae0 100644 (file)
@@ -863,8 +863,10 @@ nouveau_drm_preclose(struct drm_device *dev, struct drm_file *fpriv)
 
        pm_runtime_get_sync(dev->dev);
 
+       mutex_lock(&cli->mutex);
        if (cli->abi16)
                nouveau_abi16_fini(cli->abi16);
+       mutex_unlock(&cli->mutex);
 
        mutex_lock(&drm->client.mutex);
        list_del(&cli->head);
index 4ef602c..495c576 100644 (file)
@@ -203,7 +203,7 @@ nv04_fbcon_accel_init(struct fb_info *info)
        if (ret)
                return ret;
 
-       if (RING_SPACE(chan, 49)) {
+       if (RING_SPACE(chan, 49 + (device->info.chipset >= 0x11 ? 4 : 0))) {
                nouveau_fbcon_gpu_lockup(info);
                return 0;
        }
index 7da7958..981342d 100644 (file)
@@ -979,7 +979,7 @@ nv50_crtc_cursor_show_hide(struct nouveau_crtc *nv_crtc, bool show, bool update)
 {
        struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
 
-       if (show && nv_crtc->cursor.nvbo)
+       if (show && nv_crtc->cursor.nvbo && nv_crtc->base.enabled)
                nv50_crtc_cursor_show(nv_crtc);
        else
                nv50_crtc_cursor_hide(nv_crtc);
index 80614f1..282143f 100644 (file)
@@ -50,7 +50,12 @@ nv04_instobj_dtor(struct nvkm_object *object)
 {
        struct nv04_instmem_priv *priv = (void *)nvkm_instmem(object);
        struct nv04_instobj_priv *node = (void *)object;
+       struct nvkm_subdev *subdev = (void *)priv;
+
+       mutex_lock(&subdev->mutex);
        nvkm_mm_free(&priv->heap, &node->mem);
+       mutex_unlock(&subdev->mutex);
+
        nvkm_instobj_destroy(&node->base);
 }
 
@@ -62,6 +67,7 @@ nv04_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
        struct nv04_instmem_priv *priv = (void *)nvkm_instmem(parent);
        struct nv04_instobj_priv *node;
        struct nvkm_instobj_args *args = data;
+       struct nvkm_subdev *subdev = (void *)priv;
        int ret;
 
        if (!args->align)
@@ -72,8 +78,10 @@ nv04_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
        if (ret)
                return ret;
 
+       mutex_lock(&subdev->mutex);
        ret = nvkm_mm_head(&priv->heap, 0, 1, args->size, args->size,
                           args->align, &node->mem);
+       mutex_unlock(&subdev->mutex);
        if (ret)
                return ret;
 
index 9782364..f33251d 100644 (file)
@@ -505,6 +505,7 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev,
 
        cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
        cmd->type = QXL_SURFACE_CMD_CREATE;
+       cmd->flags = QXL_SURF_FLAG_KEEP_DATA;
        cmd->u.surface_create.format = surf->surf.format;
        cmd->u.surface_create.width = surf->surf.width;
        cmd->u.surface_create.height = surf->surf.height;
index b110883..7354a4c 100644 (file)
@@ -122,8 +122,10 @@ static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev,
        qobj = gem_to_qxl_bo(gobj);
 
        ret = qxl_release_list_add(release, qobj);
-       if (ret)
+       if (ret) {
+               drm_gem_object_unreference_unlocked(gobj);
                return NULL;
+       }
 
        return qobj;
 }
index 8730562..4a09947 100644 (file)
@@ -5818,7 +5818,7 @@ int ci_dpm_init(struct radeon_device *rdev)
                        tmp |= DPM_ENABLED;
                        break;
                default:
-                       DRM_ERROR("Invalid PCC GPIO: %u!\n", gpio.shift);
+                       DRM_DEBUG("Invalid PCC GPIO: %u!\n", gpio.shift);
                        break;
                }
                WREG32_SMC(CNB_PWRMGT_CNTL, tmp);
index ba50f3c..8456653 100644 (file)
@@ -4579,6 +4579,31 @@ void cik_compute_set_wptr(struct radeon_device *rdev,
        WDOORBELL32(ring->doorbell_index, ring->wptr);
 }
 
+static void cik_compute_stop(struct radeon_device *rdev,
+                            struct radeon_ring *ring)
+{
+       u32 j, tmp;
+
+       cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
+       /* Disable wptr polling. */
+       tmp = RREG32(CP_PQ_WPTR_POLL_CNTL);
+       tmp &= ~WPTR_POLL_EN;
+       WREG32(CP_PQ_WPTR_POLL_CNTL, tmp);
+       /* Disable HQD. */
+       if (RREG32(CP_HQD_ACTIVE) & 1) {
+               WREG32(CP_HQD_DEQUEUE_REQUEST, 1);
+               for (j = 0; j < rdev->usec_timeout; j++) {
+                       if (!(RREG32(CP_HQD_ACTIVE) & 1))
+                               break;
+                       udelay(1);
+               }
+               WREG32(CP_HQD_DEQUEUE_REQUEST, 0);
+               WREG32(CP_HQD_PQ_RPTR, 0);
+               WREG32(CP_HQD_PQ_WPTR, 0);
+       }
+       cik_srbm_select(rdev, 0, 0, 0, 0);
+}
+
 /**
  * cik_cp_compute_enable - enable/disable the compute CP MEs
  *
@@ -4592,6 +4617,15 @@ static void cik_cp_compute_enable(struct radeon_device *rdev, bool enable)
        if (enable)
                WREG32(CP_MEC_CNTL, 0);
        else {
+               /*
+                * To make hibernation reliable we need to clear compute ring
+                * configuration before halting the compute ring.
+                */
+               mutex_lock(&rdev->srbm_mutex);
+               cik_compute_stop(rdev,&rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]);
+               cik_compute_stop(rdev,&rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]);
+               mutex_unlock(&rdev->srbm_mutex);
+
                WREG32(CP_MEC_CNTL, (MEC_ME1_HALT | MEC_ME2_HALT));
                rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
                rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
@@ -7905,23 +7939,27 @@ restart_ih:
                case 1: /* D1 vblank/vline */
                        switch (src_data) {
                        case 0: /* D1 vblank */
-                               if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT) {
-                                       if (rdev->irq.crtc_vblank_int[0]) {
-                                               drm_handle_vblank(rdev->ddev, 0);
-                                               rdev->pm.vblank_sync = true;
-                                               wake_up(&rdev->irq.vblank_queue);
-                                       }
-                                       if (atomic_read(&rdev->irq.pflip[0]))
-                                               radeon_crtc_handle_vblank(rdev, 0);
-                                       rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
-                                       DRM_DEBUG("IH: D1 vblank\n");
+                               if (!(rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               if (rdev->irq.crtc_vblank_int[0]) {
+                                       drm_handle_vblank(rdev->ddev, 0);
+                                       rdev->pm.vblank_sync = true;
+                                       wake_up(&rdev->irq.vblank_queue);
                                }
+                               if (atomic_read(&rdev->irq.pflip[0]))
+                                       radeon_crtc_handle_vblank(rdev, 0);
+                               rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+                               DRM_DEBUG("IH: D1 vblank\n");
+
                                break;
                        case 1: /* D1 vline */
-                               if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT) {
-                                       rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VLINE_INTERRUPT;
-                                       DRM_DEBUG("IH: D1 vline\n");
-                               }
+                               if (!(rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VLINE_INTERRUPT;
+                               DRM_DEBUG("IH: D1 vline\n");
+
                                break;
                        default:
                                DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -7931,23 +7969,27 @@ restart_ih:
                case 2: /* D2 vblank/vline */
                        switch (src_data) {
                        case 0: /* D2 vblank */
-                               if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
-                                       if (rdev->irq.crtc_vblank_int[1]) {
-                                               drm_handle_vblank(rdev->ddev, 1);
-                                               rdev->pm.vblank_sync = true;
-                                               wake_up(&rdev->irq.vblank_queue);
-                                       }
-                                       if (atomic_read(&rdev->irq.pflip[1]))
-                                               radeon_crtc_handle_vblank(rdev, 1);
-                                       rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
-                                       DRM_DEBUG("IH: D2 vblank\n");
+                               if (!(rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VBLANK_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               if (rdev->irq.crtc_vblank_int[1]) {
+                                       drm_handle_vblank(rdev->ddev, 1);
+                                       rdev->pm.vblank_sync = true;
+                                       wake_up(&rdev->irq.vblank_queue);
                                }
+                               if (atomic_read(&rdev->irq.pflip[1]))
+                                       radeon_crtc_handle_vblank(rdev, 1);
+                               rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
+                               DRM_DEBUG("IH: D2 vblank\n");
+
                                break;
                        case 1: /* D2 vline */
-                               if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
-                                       rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
-                                       DRM_DEBUG("IH: D2 vline\n");
-                               }
+                               if (!(rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VLINE_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
+                               DRM_DEBUG("IH: D2 vline\n");
+
                                break;
                        default:
                                DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -7957,23 +7999,27 @@ restart_ih:
                case 3: /* D3 vblank/vline */
                        switch (src_data) {
                        case 0: /* D3 vblank */
-                               if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
-                                       if (rdev->irq.crtc_vblank_int[2]) {
-                                               drm_handle_vblank(rdev->ddev, 2);
-                                               rdev->pm.vblank_sync = true;
-                                               wake_up(&rdev->irq.vblank_queue);
-                                       }
-                                       if (atomic_read(&rdev->irq.pflip[2]))
-                                               radeon_crtc_handle_vblank(rdev, 2);
-                                       rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
-                                       DRM_DEBUG("IH: D3 vblank\n");
+                               if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               if (rdev->irq.crtc_vblank_int[2]) {
+                                       drm_handle_vblank(rdev->ddev, 2);
+                                       rdev->pm.vblank_sync = true;
+                                       wake_up(&rdev->irq.vblank_queue);
                                }
+                               if (atomic_read(&rdev->irq.pflip[2]))
+                                       radeon_crtc_handle_vblank(rdev, 2);
+                               rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
+                               DRM_DEBUG("IH: D3 vblank\n");
+
                                break;
                        case 1: /* D3 vline */
-                               if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
-                                       rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
-                                       DRM_DEBUG("IH: D3 vline\n");
-                               }
+                               if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
+                               DRM_DEBUG("IH: D3 vline\n");
+
                                break;
                        default:
                                DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -7983,23 +8029,27 @@ restart_ih:
                case 4: /* D4 vblank/vline */
                        switch (src_data) {
                        case 0: /* D4 vblank */
-                               if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
-                                       if (rdev->irq.crtc_vblank_int[3]) {
-                                               drm_handle_vblank(rdev->ddev, 3);
-                                               rdev->pm.vblank_sync = true;
-                                               wake_up(&rdev->irq.vblank_queue);
-                                       }
-                                       if (atomic_read(&rdev->irq.pflip[3]))
-                                               radeon_crtc_handle_vblank(rdev, 3);
-                                       rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
-                                       DRM_DEBUG("IH: D4 vblank\n");
+                               if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               if (rdev->irq.crtc_vblank_int[3]) {
+                                       drm_handle_vblank(rdev->ddev, 3);
+                                       rdev->pm.vblank_sync = true;
+                                       wake_up(&rdev->irq.vblank_queue);
                                }
+                               if (atomic_read(&rdev->irq.pflip[3]))
+                                       radeon_crtc_handle_vblank(rdev, 3);
+                               rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
+                               DRM_DEBUG("IH: D4 vblank\n");
+
                                break;
                        case 1: /* D4 vline */
-                               if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
-                                       rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
-                                       DRM_DEBUG("IH: D4 vline\n");
-                               }
+                               if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VLINE_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
+                               DRM_DEBUG("IH: D4 vline\n");
+
                                break;
                        default:
                                DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -8009,23 +8059,27 @@ restart_ih:
                case 5: /* D5 vblank/vline */
                        switch (src_data) {
                        case 0: /* D5 vblank */
-                               if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
-                                       if (rdev->irq.crtc_vblank_int[4]) {
-                                               drm_handle_vblank(rdev->ddev, 4);
-                                               rdev->pm.vblank_sync = true;
-                                               wake_up(&rdev->irq.vblank_queue);
-                                       }
-                                       if (atomic_read(&rdev->irq.pflip[4]))
-                                               radeon_crtc_handle_vblank(rdev, 4);
-                                       rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
-                                       DRM_DEBUG("IH: D5 vblank\n");
+                               if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               if (rdev->irq.crtc_vblank_int[4]) {
+                                       drm_handle_vblank(rdev->ddev, 4);
+                                       rdev->pm.vblank_sync = true;
+                                       wake_up(&rdev->irq.vblank_queue);
                                }
+                               if (atomic_read(&rdev->irq.pflip[4]))
+                                       radeon_crtc_handle_vblank(rdev, 4);
+                               rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
+                               DRM_DEBUG("IH: D5 vblank\n");
+
                                break;
                        case 1: /* D5 vline */
-                               if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
-                                       rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
-                                       DRM_DEBUG("IH: D5 vline\n");
-                               }
+                               if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
+                               DRM_DEBUG("IH: D5 vline\n");
+
                                break;
                        default:
                                DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -8035,23 +8089,27 @@ restart_ih:
                case 6: /* D6 vblank/vline */
                        switch (src_data) {
                        case 0: /* D6 vblank */
-                               if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
-                                       if (rdev->irq.crtc_vblank_int[5]) {
-                                               drm_handle_vblank(rdev->ddev, 5);
-                                               rdev->pm.vblank_sync = true;
-                                               wake_up(&rdev->irq.vblank_queue);
-                                       }
-                                       if (atomic_read(&rdev->irq.pflip[5]))
-                                               radeon_crtc_handle_vblank(rdev, 5);
-                                       rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
-                                       DRM_DEBUG("IH: D6 vblank\n");
+                               if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               if (rdev->irq.crtc_vblank_int[5]) {
+                                       drm_handle_vblank(rdev->ddev, 5);
+                                       rdev->pm.vblank_sync = true;
+                                       wake_up(&rdev->irq.vblank_queue);
                                }
+                               if (atomic_read(&rdev->irq.pflip[5]))
+                                       radeon_crtc_handle_vblank(rdev, 5);
+                               rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
+                               DRM_DEBUG("IH: D6 vblank\n");
+
                                break;
                        case 1: /* D6 vline */
-                               if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
-                                       rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
-                                       DRM_DEBUG("IH: D6 vline\n");
-                               }
+                               if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VLINE_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
+                               DRM_DEBUG("IH: D6 vline\n");
+
                                break;
                        default:
                                DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -8071,88 +8129,112 @@ restart_ih:
                case 42: /* HPD hotplug */
                        switch (src_data) {
                        case 0:
-                               if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_INTERRUPT) {
-                                       rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_INTERRUPT;
-                                       queue_hotplug = true;
-                                       DRM_DEBUG("IH: HPD1\n");
-                               }
+                               if (!(rdev->irq.stat_regs.cik.disp_int & DC_HPD1_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_INTERRUPT;
+                               queue_hotplug = true;
+                               DRM_DEBUG("IH: HPD1\n");
+
                                break;
                        case 1:
-                               if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_INTERRUPT) {
-                                       rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_INTERRUPT;
-                                       queue_hotplug = true;
-                                       DRM_DEBUG("IH: HPD2\n");
-                               }
+                               if (!(rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_INTERRUPT;
+                               queue_hotplug = true;
+                               DRM_DEBUG("IH: HPD2\n");
+
                                break;
                        case 2:
-                               if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_INTERRUPT) {
-                                       rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
-                                       queue_hotplug = true;
-                                       DRM_DEBUG("IH: HPD3\n");
-                               }
+                               if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
+                               queue_hotplug = true;
+                               DRM_DEBUG("IH: HPD3\n");
+
                                break;
                        case 3:
-                               if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_INTERRUPT) {
-                                       rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
-                                       queue_hotplug = true;
-                                       DRM_DEBUG("IH: HPD4\n");
-                               }
+                               if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
+                               queue_hotplug = true;
+                               DRM_DEBUG("IH: HPD4\n");
+
                                break;
                        case 4:
-                               if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_INTERRUPT) {
-                                       rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
-                                       queue_hotplug = true;
-                                       DRM_DEBUG("IH: HPD5\n");
-                               }
+                               if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
+                               queue_hotplug = true;
+                               DRM_DEBUG("IH: HPD5\n");
+
                                break;
                        case 5:
-                               if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) {
-                                       rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
-                                       queue_hotplug = true;
-                                       DRM_DEBUG("IH: HPD6\n");
-                               }
+                               if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
+                               queue_hotplug = true;
+                               DRM_DEBUG("IH: HPD6\n");
+
                                break;
                        case 6:
-                               if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_RX_INTERRUPT) {
-                                       rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_RX_INTERRUPT;
-                                       queue_dp = true;
-                                       DRM_DEBUG("IH: HPD_RX 1\n");
-                               }
+                               if (!(rdev->irq.stat_regs.cik.disp_int & DC_HPD1_RX_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_RX_INTERRUPT;
+                               queue_dp = true;
+                               DRM_DEBUG("IH: HPD_RX 1\n");
+
                                break;
                        case 7:
-                               if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_RX_INTERRUPT) {
-                                       rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
-                                       queue_dp = true;
-                                       DRM_DEBUG("IH: HPD_RX 2\n");
-                               }
+                               if (!(rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_RX_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
+                               queue_dp = true;
+                               DRM_DEBUG("IH: HPD_RX 2\n");
+
                                break;
                        case 8:
-                               if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) {
-                                       rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
-                                       queue_dp = true;
-                                       DRM_DEBUG("IH: HPD_RX 3\n");
-                               }
+                               if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_RX_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
+                               queue_dp = true;
+                               DRM_DEBUG("IH: HPD_RX 3\n");
+
                                break;
                        case 9:
-                               if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) {
-                                       rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
-                                       queue_dp = true;
-                                       DRM_DEBUG("IH: HPD_RX 4\n");
-                               }
+                               if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_RX_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
+                               queue_dp = true;
+                               DRM_DEBUG("IH: HPD_RX 4\n");
+
                                break;
                        case 10:
-                               if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) {
-                                       rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
-                                       queue_dp = true;
-                                       DRM_DEBUG("IH: HPD_RX 5\n");
-                               }
+                               if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_RX_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
+                               queue_dp = true;
+                               DRM_DEBUG("IH: HPD_RX 5\n");
+
                                break;
                        case 11:
-                               if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
-                                       rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
-                                       queue_dp = true;
-                                       DRM_DEBUG("IH: HPD_RX 6\n");
-                               }
+                               if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
+                               queue_dp = true;
+                               DRM_DEBUG("IH: HPD_RX 6\n");
+
                                break;
                        default:
                                DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
index f86eb54..d16f2ee 100644 (file)
@@ -268,6 +268,17 @@ static void cik_sdma_gfx_stop(struct radeon_device *rdev)
        }
        rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
        rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
+
+       /* FIXME use something else than big hammer but after few days can not
+        * seem to find good combination so reset SDMA blocks as it seems we
+        * do not shut them down properly. This fix hibernation and does not
+        * affect suspend to ram.
+        */
+       WREG32(SRBM_SOFT_RESET, SOFT_RESET_SDMA | SOFT_RESET_SDMA1);
+       (void)RREG32(SRBM_SOFT_RESET);
+       udelay(50);
+       WREG32(SRBM_SOFT_RESET, 0);
+       (void)RREG32(SRBM_SOFT_RESET);
 }
 
 /**
index 68fd9fc..44480c1 100644 (file)
@@ -93,30 +93,26 @@ void dce6_afmt_select_pin(struct drm_encoder *encoder)
        struct radeon_device *rdev = encoder->dev->dev_private;
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-       u32 offset;
 
-       if (!dig || !dig->afmt || !dig->afmt->pin)
+       if (!dig || !dig->afmt || !dig->pin)
                return;
 
-       offset = dig->afmt->offset;
-
-       WREG32(AFMT_AUDIO_SRC_CONTROL + offset,
-              AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id));
+       WREG32(AFMT_AUDIO_SRC_CONTROL +  dig->afmt->offset,
+              AFMT_AUDIO_SRC_SELECT(dig->pin->id));
 }
 
 void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
-               struct drm_connector *connector, struct drm_display_mode *mode)
+                                   struct drm_connector *connector,
+                                   struct drm_display_mode *mode)
 {
        struct radeon_device *rdev = encoder->dev->dev_private;
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-       u32 tmp = 0, offset;
+       u32 tmp = 0;
 
-       if (!dig || !dig->afmt || !dig->afmt->pin)
+       if (!dig || !dig->afmt || !dig->pin)
                return;
 
-       offset = dig->afmt->pin->offset;
-
        if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
                if (connector->latency_present[1])
                        tmp = VIDEO_LIPSYNC(connector->video_latency[1]) |
@@ -130,24 +126,24 @@ void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
                else
                        tmp = VIDEO_LIPSYNC(0) | AUDIO_LIPSYNC(0);
        }
-       WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
+       WREG32_ENDPOINT(dig->pin->offset,
+                       AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
 }
 
 void dce6_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder,
-       u8 *sadb, int sad_count)
+                                            u8 *sadb, int sad_count)
 {
        struct radeon_device *rdev = encoder->dev->dev_private;
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-       u32 offset, tmp;
+       u32 tmp;
 
-       if (!dig || !dig->afmt || !dig->afmt->pin)
+       if (!dig || !dig->afmt || !dig->pin)
                return;
 
-       offset = dig->afmt->pin->offset;
-
        /* program the speaker allocation */
-       tmp = RREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
+       tmp = RREG32_ENDPOINT(dig->pin->offset,
+                             AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
        tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK);
        /* set HDMI mode */
        tmp |= HDMI_CONNECTION;
@@ -155,24 +151,24 @@ void dce6_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder,
                tmp |= SPEAKER_ALLOCATION(sadb[0]);
        else
                tmp |= SPEAKER_ALLOCATION(5); /* stereo */
-       WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
+       WREG32_ENDPOINT(dig->pin->offset,
+                       AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
 }
 
 void dce6_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder,
-       u8 *sadb, int sad_count)
+                                          u8 *sadb, int sad_count)
 {
        struct radeon_device *rdev = encoder->dev->dev_private;
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-       u32 offset, tmp;
+       u32 tmp;
 
-       if (!dig || !dig->afmt || !dig->afmt->pin)
+       if (!dig || !dig->afmt || !dig->pin)
                return;
 
-       offset = dig->afmt->pin->offset;
-
        /* program the speaker allocation */
-       tmp = RREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
+       tmp = RREG32_ENDPOINT(dig->pin->offset,
+                             AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
        tmp &= ~(HDMI_CONNECTION | SPEAKER_ALLOCATION_MASK);
        /* set DP mode */
        tmp |= DP_CONNECTION;
@@ -180,13 +176,13 @@ void dce6_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder,
                tmp |= SPEAKER_ALLOCATION(sadb[0]);
        else
                tmp |= SPEAKER_ALLOCATION(5); /* stereo */
-       WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
+       WREG32_ENDPOINT(dig->pin->offset,
+                       AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
 }
 
 void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
-       struct cea_sad *sads, int sad_count)
+                             struct cea_sad *sads, int sad_count)
 {
-       u32 offset;
        int i;
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
@@ -206,11 +202,9 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
                { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
        };
 
-       if (!dig || !dig->afmt || !dig->afmt->pin)
+       if (!dig || !dig->afmt || !dig->pin)
                return;
 
-       offset = dig->afmt->pin->offset;
-
        for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
                u32 value = 0;
                u8 stereo_freqs = 0;
@@ -237,7 +231,7 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
 
                value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs);
 
-               WREG32_ENDPOINT(offset, eld_reg_to_type[i][0], value);
+               WREG32_ENDPOINT(dig->pin->offset, eld_reg_to_type[i][0], value);
        }
 }
 
@@ -253,7 +247,7 @@ void dce6_audio_enable(struct radeon_device *rdev,
 }
 
 void dce6_hdmi_audio_set_dto(struct radeon_device *rdev,
-       struct radeon_crtc *crtc, unsigned int clock)
+                            struct radeon_crtc *crtc, unsigned int clock)
 {
        /* Two dtos; generally use dto0 for HDMI */
        u32 value = 0;
@@ -272,7 +266,7 @@ void dce6_hdmi_audio_set_dto(struct radeon_device *rdev,
 }
 
 void dce6_dp_audio_set_dto(struct radeon_device *rdev,
-       struct radeon_crtc *crtc, unsigned int clock)
+                          struct radeon_crtc *crtc, unsigned int clock)
 {
        /* Two dtos; generally use dto1 for DP */
        u32 value = 0;
index f848acf..feef136 100644 (file)
@@ -4855,7 +4855,7 @@ restart_ih:
                return IRQ_NONE;
 
        rptr = rdev->ih.rptr;
-       DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
+       DRM_DEBUG("evergreen_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
 
        /* Order reading of wptr vs. reading of IH ring data */
        rmb();
@@ -4873,23 +4873,27 @@ restart_ih:
                case 1: /* D1 vblank/vline */
                        switch (src_data) {
                        case 0: /* D1 vblank */
-                               if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) {
-                                       if (rdev->irq.crtc_vblank_int[0]) {
-                                               drm_handle_vblank(rdev->ddev, 0);
-                                               rdev->pm.vblank_sync = true;
-                                               wake_up(&rdev->irq.vblank_queue);
-                                       }
-                                       if (atomic_read(&rdev->irq.pflip[0]))
-                                               radeon_crtc_handle_vblank(rdev, 0);
-                                       rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
-                                       DRM_DEBUG("IH: D1 vblank\n");
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT))
+                                       DRM_DEBUG("IH: D1 vblank - IH event w/o asserted irq bit?\n");
+
+                               if (rdev->irq.crtc_vblank_int[0]) {
+                                       drm_handle_vblank(rdev->ddev, 0);
+                                       rdev->pm.vblank_sync = true;
+                                       wake_up(&rdev->irq.vblank_queue);
                                }
+                               if (atomic_read(&rdev->irq.pflip[0]))
+                                       radeon_crtc_handle_vblank(rdev, 0);
+                               rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+                               DRM_DEBUG("IH: D1 vblank\n");
+
                                break;
                        case 1: /* D1 vline */
-                               if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
-                                       DRM_DEBUG("IH: D1 vline\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT))
+                                       DRM_DEBUG("IH: D1 vline - IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
+                               DRM_DEBUG("IH: D1 vline\n");
+
                                break;
                        default:
                                DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -4899,23 +4903,27 @@ restart_ih:
                case 2: /* D2 vblank/vline */
                        switch (src_data) {
                        case 0: /* D2 vblank */
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
-                                       if (rdev->irq.crtc_vblank_int[1]) {
-                                               drm_handle_vblank(rdev->ddev, 1);
-                                               rdev->pm.vblank_sync = true;
-                                               wake_up(&rdev->irq.vblank_queue);
-                                       }
-                                       if (atomic_read(&rdev->irq.pflip[1]))
-                                               radeon_crtc_handle_vblank(rdev, 1);
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
-                                       DRM_DEBUG("IH: D2 vblank\n");
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT))
+                                       DRM_DEBUG("IH: D2 vblank - IH event w/o asserted irq bit?\n");
+
+                               if (rdev->irq.crtc_vblank_int[1]) {
+                                       drm_handle_vblank(rdev->ddev, 1);
+                                       rdev->pm.vblank_sync = true;
+                                       wake_up(&rdev->irq.vblank_queue);
                                }
+                               if (atomic_read(&rdev->irq.pflip[1]))
+                                       radeon_crtc_handle_vblank(rdev, 1);
+                               rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
+                               DRM_DEBUG("IH: D2 vblank\n");
+
                                break;
                        case 1: /* D2 vline */
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
-                                       DRM_DEBUG("IH: D2 vline\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT))
+                                       DRM_DEBUG("IH: D2 vline - IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
+                               DRM_DEBUG("IH: D2 vline\n");
+
                                break;
                        default:
                                DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -4925,23 +4933,27 @@ restart_ih:
                case 3: /* D3 vblank/vline */
                        switch (src_data) {
                        case 0: /* D3 vblank */
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
-                                       if (rdev->irq.crtc_vblank_int[2]) {
-                                               drm_handle_vblank(rdev->ddev, 2);
-                                               rdev->pm.vblank_sync = true;
-                                               wake_up(&rdev->irq.vblank_queue);
-                                       }
-                                       if (atomic_read(&rdev->irq.pflip[2]))
-                                               radeon_crtc_handle_vblank(rdev, 2);
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
-                                       DRM_DEBUG("IH: D3 vblank\n");
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT))
+                                       DRM_DEBUG("IH: D3 vblank - IH event w/o asserted irq bit?\n");
+
+                               if (rdev->irq.crtc_vblank_int[2]) {
+                                       drm_handle_vblank(rdev->ddev, 2);
+                                       rdev->pm.vblank_sync = true;
+                                       wake_up(&rdev->irq.vblank_queue);
                                }
+                               if (atomic_read(&rdev->irq.pflip[2]))
+                                       radeon_crtc_handle_vblank(rdev, 2);
+                               rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
+                               DRM_DEBUG("IH: D3 vblank\n");
+
                                break;
                        case 1: /* D3 vline */
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
-                                       DRM_DEBUG("IH: D3 vline\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT))
+                                       DRM_DEBUG("IH: D3 vline - IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
+                               DRM_DEBUG("IH: D3 vline\n");
+
                                break;
                        default:
                                DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -4951,23 +4963,27 @@ restart_ih:
                case 4: /* D4 vblank/vline */
                        switch (src_data) {
                        case 0: /* D4 vblank */
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
-                                       if (rdev->irq.crtc_vblank_int[3]) {
-                                               drm_handle_vblank(rdev->ddev, 3);
-                                               rdev->pm.vblank_sync = true;
-                                               wake_up(&rdev->irq.vblank_queue);
-                                       }
-                                       if (atomic_read(&rdev->irq.pflip[3]))
-                                               radeon_crtc_handle_vblank(rdev, 3);
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
-                                       DRM_DEBUG("IH: D4 vblank\n");
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT))
+                                       DRM_DEBUG("IH: D4 vblank - IH event w/o asserted irq bit?\n");
+
+                               if (rdev->irq.crtc_vblank_int[3]) {
+                                       drm_handle_vblank(rdev->ddev, 3);
+                                       rdev->pm.vblank_sync = true;
+                                       wake_up(&rdev->irq.vblank_queue);
                                }
+                               if (atomic_read(&rdev->irq.pflip[3]))
+                                       radeon_crtc_handle_vblank(rdev, 3);
+                               rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
+                               DRM_DEBUG("IH: D4 vblank\n");
+
                                break;
                        case 1: /* D4 vline */
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
-                                       DRM_DEBUG("IH: D4 vline\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT))
+                                       DRM_DEBUG("IH: D4 vline - IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
+                               DRM_DEBUG("IH: D4 vline\n");
+
                                break;
                        default:
                                DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -4977,23 +4993,27 @@ restart_ih:
                case 5: /* D5 vblank/vline */
                        switch (src_data) {
                        case 0: /* D5 vblank */
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
-                                       if (rdev->irq.crtc_vblank_int[4]) {
-                                               drm_handle_vblank(rdev->ddev, 4);
-                                               rdev->pm.vblank_sync = true;
-                                               wake_up(&rdev->irq.vblank_queue);
-                                       }
-                                       if (atomic_read(&rdev->irq.pflip[4]))
-                                               radeon_crtc_handle_vblank(rdev, 4);
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
-                                       DRM_DEBUG("IH: D5 vblank\n");
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT))
+                                       DRM_DEBUG("IH: D5 vblank - IH event w/o asserted irq bit?\n");
+
+                               if (rdev->irq.crtc_vblank_int[4]) {
+                                       drm_handle_vblank(rdev->ddev, 4);
+                                       rdev->pm.vblank_sync = true;
+                                       wake_up(&rdev->irq.vblank_queue);
                                }
+                               if (atomic_read(&rdev->irq.pflip[4]))
+                                       radeon_crtc_handle_vblank(rdev, 4);
+                               rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
+                               DRM_DEBUG("IH: D5 vblank\n");
+
                                break;
                        case 1: /* D5 vline */
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
-                                       DRM_DEBUG("IH: D5 vline\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT))
+                                       DRM_DEBUG("IH: D5 vline - IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
+                               DRM_DEBUG("IH: D5 vline\n");
+
                                break;
                        default:
                                DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -5003,23 +5023,27 @@ restart_ih:
                case 6: /* D6 vblank/vline */
                        switch (src_data) {
                        case 0: /* D6 vblank */
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
-                                       if (rdev->irq.crtc_vblank_int[5]) {
-                                               drm_handle_vblank(rdev->ddev, 5);
-                                               rdev->pm.vblank_sync = true;
-                                               wake_up(&rdev->irq.vblank_queue);
-                                       }
-                                       if (atomic_read(&rdev->irq.pflip[5]))
-                                               radeon_crtc_handle_vblank(rdev, 5);
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
-                                       DRM_DEBUG("IH: D6 vblank\n");
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT))
+                                       DRM_DEBUG("IH: D6 vblank - IH event w/o asserted irq bit?\n");
+
+                               if (rdev->irq.crtc_vblank_int[5]) {
+                                       drm_handle_vblank(rdev->ddev, 5);
+                                       rdev->pm.vblank_sync = true;
+                                       wake_up(&rdev->irq.vblank_queue);
                                }
+                               if (atomic_read(&rdev->irq.pflip[5]))
+                                       radeon_crtc_handle_vblank(rdev, 5);
+                               rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
+                               DRM_DEBUG("IH: D6 vblank\n");
+
                                break;
                        case 1: /* D6 vline */
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
-                                       DRM_DEBUG("IH: D6 vline\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT))
+                                       DRM_DEBUG("IH: D6 vline - IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
+                               DRM_DEBUG("IH: D6 vline\n");
+
                                break;
                        default:
                                DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -5039,88 +5063,100 @@ restart_ih:
                case 42: /* HPD hotplug */
                        switch (src_data) {
                        case 0:
-                               if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
-                                       queue_hotplug = true;
-                                       DRM_DEBUG("IH: HPD1\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
+                               queue_hotplug = true;
+                               DRM_DEBUG("IH: HPD1\n");
                                break;
                        case 1:
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
-                                       queue_hotplug = true;
-                                       DRM_DEBUG("IH: HPD2\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
+                               queue_hotplug = true;
+                               DRM_DEBUG("IH: HPD2\n");
                                break;
                        case 2:
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
-                                       queue_hotplug = true;
-                                       DRM_DEBUG("IH: HPD3\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
+                               queue_hotplug = true;
+                               DRM_DEBUG("IH: HPD3\n");
                                break;
                        case 3:
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
-                                       queue_hotplug = true;
-                                       DRM_DEBUG("IH: HPD4\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
+                               queue_hotplug = true;
+                               DRM_DEBUG("IH: HPD4\n");
                                break;
                        case 4:
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
-                                       queue_hotplug = true;
-                                       DRM_DEBUG("IH: HPD5\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
+                               queue_hotplug = true;
+                               DRM_DEBUG("IH: HPD5\n");
                                break;
                        case 5:
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
-                                       queue_hotplug = true;
-                                       DRM_DEBUG("IH: HPD6\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
+                               queue_hotplug = true;
+                               DRM_DEBUG("IH: HPD6\n");
                                break;
                        case 6:
-                               if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT;
-                                       queue_dp = true;
-                                       DRM_DEBUG("IH: HPD_RX 1\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT;
+                               queue_dp = true;
+                               DRM_DEBUG("IH: HPD_RX 1\n");
                                break;
                        case 7:
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
-                                       queue_dp = true;
-                                       DRM_DEBUG("IH: HPD_RX 2\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
+                               queue_dp = true;
+                               DRM_DEBUG("IH: HPD_RX 2\n");
                                break;
                        case 8:
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
-                                       queue_dp = true;
-                                       DRM_DEBUG("IH: HPD_RX 3\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
+                               queue_dp = true;
+                               DRM_DEBUG("IH: HPD_RX 3\n");
                                break;
                        case 9:
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
-                                       queue_dp = true;
-                                       DRM_DEBUG("IH: HPD_RX 4\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
+                               queue_dp = true;
+                               DRM_DEBUG("IH: HPD_RX 4\n");
                                break;
                        case 10:
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
-                                       queue_dp = true;
-                                       DRM_DEBUG("IH: HPD_RX 5\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
+                               queue_dp = true;
+                               DRM_DEBUG("IH: HPD_RX 5\n");
                                break;
                        case 11:
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
-                                       queue_dp = true;
-                                       DRM_DEBUG("IH: HPD_RX 6\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
+                               queue_dp = true;
+                               DRM_DEBUG("IH: HPD_RX 6\n");
                                break;
                        default:
                                DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -5130,46 +5166,52 @@ restart_ih:
                case 44: /* hdmi */
                        switch (src_data) {
                        case 0:
-                               if (rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG) {
-                                       rdev->irq.stat_regs.evergreen.afmt_status1 &= ~AFMT_AZ_FORMAT_WTRIG;
-                                       queue_hdmi = true;
-                                       DRM_DEBUG("IH: HDMI0\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.afmt_status1 &= ~AFMT_AZ_FORMAT_WTRIG;
+                               queue_hdmi = true;
+                               DRM_DEBUG("IH: HDMI0\n");
                                break;
                        case 1:
-                               if (rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG) {
-                                       rdev->irq.stat_regs.evergreen.afmt_status2 &= ~AFMT_AZ_FORMAT_WTRIG;
-                                       queue_hdmi = true;
-                                       DRM_DEBUG("IH: HDMI1\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.afmt_status2 &= ~AFMT_AZ_FORMAT_WTRIG;
+                               queue_hdmi = true;
+                               DRM_DEBUG("IH: HDMI1\n");
                                break;
                        case 2:
-                               if (rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG) {
-                                       rdev->irq.stat_regs.evergreen.afmt_status3 &= ~AFMT_AZ_FORMAT_WTRIG;
-                                       queue_hdmi = true;
-                                       DRM_DEBUG("IH: HDMI2\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.afmt_status3 &= ~AFMT_AZ_FORMAT_WTRIG;
+                               queue_hdmi = true;
+                               DRM_DEBUG("IH: HDMI2\n");
                                break;
                        case 3:
-                               if (rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG) {
-                                       rdev->irq.stat_regs.evergreen.afmt_status4 &= ~AFMT_AZ_FORMAT_WTRIG;
-                                       queue_hdmi = true;
-                                       DRM_DEBUG("IH: HDMI3\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.afmt_status4 &= ~AFMT_AZ_FORMAT_WTRIG;
+                               queue_hdmi = true;
+                               DRM_DEBUG("IH: HDMI3\n");
                                break;
                        case 4:
-                               if (rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG) {
-                                       rdev->irq.stat_regs.evergreen.afmt_status5 &= ~AFMT_AZ_FORMAT_WTRIG;
-                                       queue_hdmi = true;
-                                       DRM_DEBUG("IH: HDMI4\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.afmt_status5 &= ~AFMT_AZ_FORMAT_WTRIG;
+                               queue_hdmi = true;
+                               DRM_DEBUG("IH: HDMI4\n");
                                break;
                        case 5:
-                               if (rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG) {
-                                       rdev->irq.stat_regs.evergreen.afmt_status6 &= ~AFMT_AZ_FORMAT_WTRIG;
-                                       queue_hdmi = true;
-                                       DRM_DEBUG("IH: HDMI5\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.afmt_status6 &= ~AFMT_AZ_FORMAT_WTRIG;
+                               queue_hdmi = true;
+                               DRM_DEBUG("IH: HDMI5\n");
                                break;
                        default:
                                DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
index 8f6d862..21e479f 100644 (file)
@@ -4039,23 +4039,27 @@ restart_ih:
                case 1: /* D1 vblank/vline */
                        switch (src_data) {
                        case 0: /* D1 vblank */
-                               if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) {
-                                       if (rdev->irq.crtc_vblank_int[0]) {
-                                               drm_handle_vblank(rdev->ddev, 0);
-                                               rdev->pm.vblank_sync = true;
-                                               wake_up(&rdev->irq.vblank_queue);
-                                       }
-                                       if (atomic_read(&rdev->irq.pflip[0]))
-                                               radeon_crtc_handle_vblank(rdev, 0);
-                                       rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
-                                       DRM_DEBUG("IH: D1 vblank\n");
+                               if (!(rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT))
+                                       DRM_DEBUG("IH: D1 vblank - IH event w/o asserted irq bit?\n");
+
+                               if (rdev->irq.crtc_vblank_int[0]) {
+                                       drm_handle_vblank(rdev->ddev, 0);
+                                       rdev->pm.vblank_sync = true;
+                                       wake_up(&rdev->irq.vblank_queue);
                                }
+                               if (atomic_read(&rdev->irq.pflip[0]))
+                                       radeon_crtc_handle_vblank(rdev, 0);
+                               rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+                               DRM_DEBUG("IH: D1 vblank\n");
+
                                break;
                        case 1: /* D1 vline */
-                               if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) {
-                                       rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
-                                       DRM_DEBUG("IH: D1 vline\n");
-                               }
+                               if (!(rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT))
+                                   DRM_DEBUG("IH: D1 vline - IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
+                               DRM_DEBUG("IH: D1 vline\n");
+
                                break;
                        default:
                                DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -4065,23 +4069,27 @@ restart_ih:
                case 5: /* D2 vblank/vline */
                        switch (src_data) {
                        case 0: /* D2 vblank */
-                               if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) {
-                                       if (rdev->irq.crtc_vblank_int[1]) {
-                                               drm_handle_vblank(rdev->ddev, 1);
-                                               rdev->pm.vblank_sync = true;
-                                               wake_up(&rdev->irq.vblank_queue);
-                                       }
-                                       if (atomic_read(&rdev->irq.pflip[1]))
-                                               radeon_crtc_handle_vblank(rdev, 1);
-                                       rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
-                                       DRM_DEBUG("IH: D2 vblank\n");
+                               if (!(rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT))
+                                       DRM_DEBUG("IH: D2 vblank - IH event w/o asserted irq bit?\n");
+
+                               if (rdev->irq.crtc_vblank_int[1]) {
+                                       drm_handle_vblank(rdev->ddev, 1);
+                                       rdev->pm.vblank_sync = true;
+                                       wake_up(&rdev->irq.vblank_queue);
                                }
+                               if (atomic_read(&rdev->irq.pflip[1]))
+                                       radeon_crtc_handle_vblank(rdev, 1);
+                               rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
+                               DRM_DEBUG("IH: D2 vblank\n");
+
                                break;
                        case 1: /* D1 vline */
-                               if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) {
-                                       rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
-                                       DRM_DEBUG("IH: D2 vline\n");
-                               }
+                               if (!(rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT))
+                                       DRM_DEBUG("IH: D2 vline - IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
+                               DRM_DEBUG("IH: D2 vline\n");
+
                                break;
                        default:
                                DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -4101,46 +4109,53 @@ restart_ih:
                case 19: /* HPD/DAC hotplug */
                        switch (src_data) {
                        case 0:
-                               if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
-                                       rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
-                                       queue_hotplug = true;
-                                       DRM_DEBUG("IH: HPD1\n");
-                               }
+                               if (!(rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT))
+                                       DRM_DEBUG("IH: HPD1 - IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
+                               queue_hotplug = true;
+                               DRM_DEBUG("IH: HPD1\n");
                                break;
                        case 1:
-                               if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
-                                       rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
-                                       queue_hotplug = true;
-                                       DRM_DEBUG("IH: HPD2\n");
-                               }
+                               if (!(rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT))
+                                       DRM_DEBUG("IH: HPD2 - IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
+                               queue_hotplug = true;
+                               DRM_DEBUG("IH: HPD2\n");
                                break;
                        case 4:
-                               if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
-                                       rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
-                                       queue_hotplug = true;
-                                       DRM_DEBUG("IH: HPD3\n");
-                               }
+                               if (!(rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT))
+                                       DRM_DEBUG("IH: HPD3 - IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
+                               queue_hotplug = true;
+                               DRM_DEBUG("IH: HPD3\n");
                                break;
                        case 5:
-                               if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
-                                       rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
-                                       queue_hotplug = true;
-                                       DRM_DEBUG("IH: HPD4\n");
-                               }
+                               if (!(rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT))
+                                       DRM_DEBUG("IH: HPD4 - IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
+                               queue_hotplug = true;
+                               DRM_DEBUG("IH: HPD4\n");
                                break;
                        case 10:
-                               if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
-                                       rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
-                                       queue_hotplug = true;
-                                       DRM_DEBUG("IH: HPD5\n");
-                               }
+                               if (!(rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT))
+                                       DRM_DEBUG("IH: HPD5 - IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
+                               queue_hotplug = true;
+                               DRM_DEBUG("IH: HPD5\n");
                                break;
                        case 12:
-                               if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
-                                       rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
-                                       queue_hotplug = true;
-                                       DRM_DEBUG("IH: HPD6\n");
-                               }
+                               if (!(rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT))
+                                       DRM_DEBUG("IH: HPD6 - IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
+                               queue_hotplug = true;
+                               DRM_DEBUG("IH: HPD6\n");
+
                                break;
                        default:
                                DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -4150,18 +4165,22 @@ restart_ih:
                case 21: /* hdmi */
                        switch (src_data) {
                        case 4:
-                               if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
-                                       rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG;
-                                       queue_hdmi = true;
-                                       DRM_DEBUG("IH: HDMI0\n");
-                               }
+                               if (!(rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG))
+                                       DRM_DEBUG("IH: HDMI0 - IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG;
+                               queue_hdmi = true;
+                               DRM_DEBUG("IH: HDMI0\n");
+
                                break;
                        case 5:
-                               if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
-                                       rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG;
-                                       queue_hdmi = true;
-                                       DRM_DEBUG("IH: HDMI1\n");
-                               }
+                               if (!(rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG))
+                                       DRM_DEBUG("IH: HDMI1 - IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG;
+                               queue_hdmi = true;
+                               DRM_DEBUG("IH: HDMI1\n");
+
                                break;
                        default:
                                DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
index 25191f1..59b3d32 100644 (file)
@@ -242,6 +242,35 @@ static struct radeon_audio_funcs dce6_dp_funcs = {
        .dpms = evergreen_dp_enable,
 };
 
+static void radeon_audio_enable(struct radeon_device *rdev,
+                               struct r600_audio_pin *pin, u8 enable_mask)
+{
+       struct drm_encoder *encoder;
+       struct radeon_encoder *radeon_encoder;
+       struct radeon_encoder_atom_dig *dig;
+       int pin_count = 0;
+
+       if (!pin)
+               return;
+
+       if (rdev->mode_info.mode_config_initialized) {
+               list_for_each_entry(encoder, &rdev->ddev->mode_config.encoder_list, head) {
+                       if (radeon_encoder_is_digital(encoder)) {
+                               radeon_encoder = to_radeon_encoder(encoder);
+                               dig = radeon_encoder->enc_priv;
+                               if (dig->pin == pin)
+                                       pin_count++;
+                       }
+               }
+
+               if ((pin_count > 1) && (enable_mask == 0))
+                       return;
+       }
+
+       if (rdev->audio.funcs->enable)
+               rdev->audio.funcs->enable(rdev, pin, enable_mask);
+}
+
 static void radeon_audio_interface_init(struct radeon_device *rdev)
 {
        if (ASIC_IS_DCE6(rdev)) {
@@ -307,7 +336,7 @@ int radeon_audio_init(struct radeon_device *rdev)
 
        /* disable audio.  it will be set up later */
        for (i = 0; i < rdev->audio.num_pins; i++)
-               radeon_audio_enable(rdev, &rdev->audio.pin[i], false);
+               radeon_audio_enable(rdev, &rdev->audio.pin[i], 0);
 
        return 0;
 }
@@ -329,24 +358,13 @@ void radeon_audio_endpoint_wreg(struct radeon_device *rdev, u32 offset,
 
 static void radeon_audio_write_sad_regs(struct drm_encoder *encoder)
 {
-       struct radeon_encoder *radeon_encoder;
-       struct drm_connector *connector;
-       struct radeon_connector *radeon_connector = NULL;
+       struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+       struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct cea_sad *sads;
        int sad_count;
 
-       list_for_each_entry(connector,
-               &encoder->dev->mode_config.connector_list, head) {
-               if (connector->encoder == encoder) {
-                       radeon_connector = to_radeon_connector(connector);
-                       break;
-               }
-       }
-
-       if (!radeon_connector) {
-               DRM_ERROR("Couldn't find encoder's connector\n");
+       if (!connector)
                return;
-       }
 
        sad_count = drm_edid_to_sad(radeon_connector_edid(connector), &sads);
        if (sad_count <= 0) {
@@ -355,8 +373,6 @@ static void radeon_audio_write_sad_regs(struct drm_encoder *encoder)
        }
        BUG_ON(!sads);
 
-       radeon_encoder = to_radeon_encoder(encoder);
-
        if (radeon_encoder->audio && radeon_encoder->audio->write_sad_regs)
                radeon_encoder->audio->write_sad_regs(encoder, sads, sad_count);
 
@@ -365,27 +381,16 @@ static void radeon_audio_write_sad_regs(struct drm_encoder *encoder)
 
 static void radeon_audio_write_speaker_allocation(struct drm_encoder *encoder)
 {
+       struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-       struct drm_connector *connector;
-       struct radeon_connector *radeon_connector = NULL;
        u8 *sadb = NULL;
        int sad_count;
 
-       list_for_each_entry(connector,
-                           &encoder->dev->mode_config.connector_list, head) {
-               if (connector->encoder == encoder) {
-                       radeon_connector = to_radeon_connector(connector);
-                       break;
-               }
-       }
-
-       if (!radeon_connector) {
-               DRM_ERROR("Couldn't find encoder's connector\n");
+       if (!connector)
                return;
-       }
 
-       sad_count = drm_edid_to_speaker_allocation(
-               radeon_connector_edid(connector), &sadb);
+       sad_count = drm_edid_to_speaker_allocation(radeon_connector_edid(connector),
+                                                  &sadb);
        if (sad_count < 0) {
                DRM_DEBUG("Couldn't read Speaker Allocation Data Block: %d\n",
                          sad_count);
@@ -399,26 +404,13 @@ static void radeon_audio_write_speaker_allocation(struct drm_encoder *encoder)
 }
 
 static void radeon_audio_write_latency_fields(struct drm_encoder *encoder,
-       struct drm_display_mode *mode)
+                                             struct drm_display_mode *mode)
 {
-       struct radeon_encoder *radeon_encoder;
-       struct drm_connector *connector;
-       struct radeon_connector *radeon_connector = 0;
-
-       list_for_each_entry(connector,
-               &encoder->dev->mode_config.connector_list, head) {
-               if (connector->encoder == encoder) {
-                       radeon_connector = to_radeon_connector(connector);
-                       break;
-               }
-       }
+       struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+       struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
 
-       if (!radeon_connector) {
-               DRM_ERROR("Couldn't find encoder's connector\n");
+       if (!connector)
                return;
-       }
-
-       radeon_encoder = to_radeon_encoder(encoder);
 
        if (radeon_encoder->audio && radeon_encoder->audio->write_latency_fields)
                radeon_encoder->audio->write_latency_fields(encoder, connector, mode);
@@ -443,54 +435,47 @@ static void radeon_audio_select_pin(struct drm_encoder *encoder)
                radeon_encoder->audio->select_pin(encoder);
 }
 
-void radeon_audio_enable(struct radeon_device *rdev,
-       struct r600_audio_pin *pin, u8 enable_mask)
-{
-       if (rdev->audio.funcs->enable)
-               rdev->audio.funcs->enable(rdev, pin, enable_mask);
-}
-
 void radeon_audio_detect(struct drm_connector *connector,
+                        struct drm_encoder *encoder,
                         enum drm_connector_status status)
 {
-       struct radeon_device *rdev;
-       struct radeon_encoder *radeon_encoder;
+       struct drm_device *dev = connector->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct radeon_encoder_atom_dig *dig;
 
-       if (!connector || !connector->encoder)
+       if (!radeon_audio_chipset_supported(rdev))
                return;
 
-       rdev = connector->encoder->dev->dev_private;
-
-       if (!radeon_audio_chipset_supported(rdev))
+       if (!radeon_encoder_is_digital(encoder))
                return;
 
-       radeon_encoder = to_radeon_encoder(connector->encoder);
        dig = radeon_encoder->enc_priv;
 
        if (status == connector_status_connected) {
-               struct radeon_connector *radeon_connector;
-               int sink_type;
-
-               if (!drm_detect_monitor_audio(radeon_connector_edid(connector))) {
-                       radeon_encoder->audio = NULL;
-                       return;
-               }
-
-               radeon_connector = to_radeon_connector(connector);
-               sink_type = radeon_dp_getsinktype(radeon_connector);
-
-               if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort &&
-                       sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT)
-                       radeon_encoder->audio = rdev->audio.dp_funcs;
-               else
+               if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+                       struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+                       if (radeon_dp_getsinktype(radeon_connector) ==
+                           CONNECTOR_OBJECT_ID_DISPLAYPORT)
+                               radeon_encoder->audio = rdev->audio.dp_funcs;
+                       else
+                               radeon_encoder->audio = rdev->audio.hdmi_funcs;
+               } else {
                        radeon_encoder->audio = rdev->audio.hdmi_funcs;
+               }
 
-               dig->afmt->pin = radeon_audio_get_pin(connector->encoder);
-               radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
+               if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
+                       if (!dig->pin)
+                               dig->pin = radeon_audio_get_pin(encoder);
+                       radeon_audio_enable(rdev, dig->pin, 0xf);
+               } else {
+                       radeon_audio_enable(rdev, dig->pin, 0);
+                       dig->pin = NULL;
+               }
        } else {
-               radeon_audio_enable(rdev, dig->afmt->pin, 0);
-               dig->afmt->pin = NULL;
+               radeon_audio_enable(rdev, dig->pin, 0);
+               dig->pin = NULL;
        }
 }
 
@@ -502,7 +487,7 @@ void radeon_audio_fini(struct radeon_device *rdev)
                return;
 
        for (i = 0; i < rdev->audio.num_pins; i++)
-               radeon_audio_enable(rdev, &rdev->audio.pin[i], false);
+               radeon_audio_enable(rdev, &rdev->audio.pin[i], 0);
 
        rdev->audio.enabled = false;
 }
@@ -518,29 +503,18 @@ static void radeon_audio_set_dto(struct drm_encoder *encoder, unsigned int clock
 }
 
 static int radeon_audio_set_avi_packet(struct drm_encoder *encoder,
-       struct drm_display_mode *mode)
+                                      struct drm_display_mode *mode)
 {
        struct radeon_device *rdev = encoder->dev->dev_private;
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-       struct drm_connector *connector;
-       struct radeon_connector *radeon_connector = NULL;
+       struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
        u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
        struct hdmi_avi_infoframe frame;
        int err;
 
-       list_for_each_entry(connector,
-               &encoder->dev->mode_config.connector_list, head) {
-               if (connector->encoder == encoder) {
-                       radeon_connector = to_radeon_connector(connector);
-                       break;
-               }
-       }
-
-       if (!radeon_connector) {
-               DRM_ERROR("Couldn't find encoder's connector\n");
-               return -ENOENT;
-       }
+       if (!connector)
+               return -EINVAL;
 
        err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
        if (err < 0) {
@@ -563,8 +537,8 @@ static int radeon_audio_set_avi_packet(struct drm_encoder *encoder,
                return err;
        }
 
-       if (dig && dig->afmt &&
-               radeon_encoder->audio && radeon_encoder->audio->set_avi_packet)
+       if (dig && dig->afmt && radeon_encoder->audio &&
+           radeon_encoder->audio->set_avi_packet)
                radeon_encoder->audio->set_avi_packet(rdev, dig->afmt->offset,
                        buffer, sizeof(buffer));
 
@@ -745,7 +719,7 @@ static void radeon_audio_hdmi_mode_set(struct drm_encoder *encoder,
 }
 
 static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
-       struct drm_display_mode *mode)
+                                    struct drm_display_mode *mode)
 {
        struct drm_device *dev = encoder->dev;
        struct radeon_device *rdev = dev->dev_private;
@@ -756,6 +730,9 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
        struct radeon_connector_atom_dig *dig_connector =
                radeon_connector->con_priv;
 
+       if (!connector)
+               return;
+
        if (!dig || !dig->afmt)
                return;
 
@@ -774,7 +751,7 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
 }
 
 void radeon_audio_mode_set(struct drm_encoder *encoder,
-       struct drm_display_mode *mode)
+                          struct drm_display_mode *mode)
 {
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
 
index c92d059..059cc30 100644 (file)
@@ -68,14 +68,13 @@ struct radeon_audio_funcs
 
 int radeon_audio_init(struct radeon_device *rdev);
 void radeon_audio_detect(struct drm_connector *connector,
-       enum drm_connector_status status);
+                        struct drm_encoder *encoder,
+                        enum drm_connector_status status);
 u32 radeon_audio_endpoint_rreg(struct radeon_device *rdev,
        u32 offset, u32 reg);
 void radeon_audio_endpoint_wreg(struct radeon_device *rdev,
        u32 offset,     u32 reg, u32 v);
 struct r600_audio_pin *radeon_audio_get_pin(struct drm_encoder *encoder);
-void radeon_audio_enable(struct radeon_device *rdev,
-       struct r600_audio_pin *pin, u8 enable_mask);
 void radeon_audio_fini(struct radeon_device *rdev);
 void radeon_audio_mode_set(struct drm_encoder *encoder,
        struct drm_display_mode *mode);
index 3e5f6b7..c097d3a 100644 (file)
@@ -1255,10 +1255,15 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
 
                        if ((RBIOS16(tmp) == lvds->native_mode.hdisplay) &&
                            (RBIOS16(tmp + 2) == lvds->native_mode.vdisplay)) {
+                               u32 hss = (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8;
+
+                               if (hss > lvds->native_mode.hdisplay)
+                                       hss = (10 - 1) * 8;
+
                                lvds->native_mode.htotal = lvds->native_mode.hdisplay +
                                        (RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8;
                                lvds->native_mode.hsync_start = lvds->native_mode.hdisplay +
-                                       (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8;
+                                       hss;
                                lvds->native_mode.hsync_end = lvds->native_mode.hsync_start +
                                        (RBIOS8(tmp + 23) * 8);
 
index cebb65e..94b21ae 100644 (file)
@@ -1379,8 +1379,16 @@ out:
        /* updated in get modes as well since we need to know if it's analog or digital */
        radeon_connector_update_scratch_regs(connector, ret);
 
-       if (radeon_audio != 0)
-               radeon_audio_detect(connector, ret);
+       if ((radeon_audio != 0) && radeon_connector->use_digital) {
+               const struct drm_connector_helper_funcs *connector_funcs =
+                       connector->helper_private;
+
+               encoder = connector_funcs->best_encoder(connector);
+               if (encoder && (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)) {
+                       radeon_connector_get_edid(connector);
+                       radeon_audio_detect(connector, encoder, ret);
+               }
+       }
 
 exit:
        pm_runtime_mark_last_busy(connector->dev->dev);
@@ -1717,8 +1725,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
 
        radeon_connector_update_scratch_regs(connector, ret);
 
-       if (radeon_audio != 0)
-               radeon_audio_detect(connector, ret);
+       if ((radeon_audio != 0) && encoder) {
+               radeon_connector_get_edid(connector);
+               radeon_audio_detect(connector, encoder, ret);
+       }
 
 out:
        pm_runtime_mark_last_busy(connector->dev->dev);
index 45e5406..fa66174 100644 (file)
@@ -205,8 +205,9 @@ static int radeon_cursor_move_locked(struct drm_crtc *crtc, int x, int y)
                        | (x << 16)
                        | y));
                /* offset is from DISP(2)_BASE_ADDRESS */
-               WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset +
-                                                                     (yorigin * 256)));
+               WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset,
+                      radeon_crtc->cursor_addr - radeon_crtc->legacy_display_base_addr +
+                      yorigin * 256);
        }
 
        radeon_crtc->cursor_x = x;
@@ -227,51 +228,32 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
        return ret;
 }
 
-static int radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj)
+static void radeon_set_cursor(struct drm_crtc *crtc)
 {
        struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
        struct radeon_device *rdev = crtc->dev->dev_private;
-       struct radeon_bo *robj = gem_to_radeon_bo(obj);
-       uint64_t gpu_addr;
-       int ret;
-
-       ret = radeon_bo_reserve(robj, false);
-       if (unlikely(ret != 0))
-               goto fail;
-       /* Only 27 bit offset for legacy cursor */
-       ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM,
-                                      ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,
-                                      &gpu_addr);
-       radeon_bo_unreserve(robj);
-       if (ret)
-               goto fail;
 
        if (ASIC_IS_DCE4(rdev)) {
                WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
-                      upper_32_bits(gpu_addr));
+                      upper_32_bits(radeon_crtc->cursor_addr));
                WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
-                      gpu_addr & 0xffffffff);
+                      lower_32_bits(radeon_crtc->cursor_addr));
        } else if (ASIC_IS_AVIVO(rdev)) {
                if (rdev->family >= CHIP_RV770) {
                        if (radeon_crtc->crtc_id)
-                               WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));
+                               WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH,
+                                      upper_32_bits(radeon_crtc->cursor_addr));
                        else
-                               WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));
+                               WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH,
+                                      upper_32_bits(radeon_crtc->cursor_addr));
                }
                WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
-                      gpu_addr & 0xffffffff);
+                      lower_32_bits(radeon_crtc->cursor_addr));
        } else {
-               radeon_crtc->legacy_cursor_offset = gpu_addr - radeon_crtc->legacy_display_base_addr;
                /* offset is from DISP(2)_BASE_ADDRESS */
-               WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, radeon_crtc->legacy_cursor_offset);
+               WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset,
+                      radeon_crtc->cursor_addr - radeon_crtc->legacy_display_base_addr);
        }
-
-       return 0;
-
-fail:
-       drm_gem_object_unreference_unlocked(obj);
-
-       return ret;
 }
 
 int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
@@ -283,7 +265,9 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
                            int32_t hot_y)
 {
        struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+       struct radeon_device *rdev = crtc->dev->dev_private;
        struct drm_gem_object *obj;
+       struct radeon_bo *robj;
        int ret;
 
        if (!handle) {
@@ -305,6 +289,23 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
                return -ENOENT;
        }
 
+       robj = gem_to_radeon_bo(obj);
+       ret = radeon_bo_reserve(robj, false);
+       if (ret != 0) {
+               drm_gem_object_unreference_unlocked(obj);
+               return ret;
+       }
+       /* Only 27 bit offset for legacy cursor */
+       ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM,
+                                      ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,
+                                      &radeon_crtc->cursor_addr);
+       radeon_bo_unreserve(robj);
+       if (ret) {
+               DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
+               drm_gem_object_unreference_unlocked(obj);
+               return ret;
+       }
+
        radeon_crtc->cursor_width = width;
        radeon_crtc->cursor_height = height;
 
@@ -323,13 +324,8 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
                radeon_crtc->cursor_hot_y = hot_y;
        }
 
-       ret = radeon_set_cursor(crtc, obj);
-
-       if (ret)
-               DRM_ERROR("radeon_set_cursor returned %d, not changing cursor\n",
-                         ret);
-       else
-               radeon_show_cursor(crtc);
+       radeon_set_cursor(crtc);
+       radeon_show_cursor(crtc);
 
        radeon_lock_cursor(crtc, false);
 
@@ -341,8 +337,7 @@ unpin:
                        radeon_bo_unpin(robj);
                        radeon_bo_unreserve(robj);
                }
-               if (radeon_crtc->cursor_bo != obj)
-                       drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo);
+               drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo);
        }
 
        radeon_crtc->cursor_bo = obj;
@@ -360,7 +355,6 @@ unpin:
 void radeon_cursor_reset(struct drm_crtc *crtc)
 {
        struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
-       int ret;
 
        if (radeon_crtc->cursor_bo) {
                radeon_lock_cursor(crtc, true);
@@ -368,12 +362,8 @@ void radeon_cursor_reset(struct drm_crtc *crtc)
                radeon_cursor_move_locked(crtc, radeon_crtc->cursor_x,
                                          radeon_crtc->cursor_y);
 
-               ret = radeon_set_cursor(crtc, radeon_crtc->cursor_bo);
-               if (ret)
-                       DRM_ERROR("radeon_set_cursor returned %d, not showing "
-                                 "cursor\n", ret);
-               else
-                       radeon_show_cursor(crtc);
+               radeon_set_cursor(crtc);
+               radeon_show_cursor(crtc);
 
                radeon_lock_cursor(crtc, false);
        }
index a7fdfa4..604c44d 100644 (file)
@@ -1572,11 +1572,21 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
                drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
        }
 
-       /* unpin the front buffers */
+       /* unpin the front buffers and cursors */
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
                struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->primary->fb);
                struct radeon_bo *robj;
 
+               if (radeon_crtc->cursor_bo) {
+                       struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
+                       r = radeon_bo_reserve(robj, false);
+                       if (r == 0) {
+                               radeon_bo_unpin(robj);
+                               radeon_bo_unreserve(robj);
+                       }
+               }
+
                if (rfb == NULL || rfb->obj == NULL) {
                        continue;
                }
@@ -1639,6 +1649,7 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
 {
        struct drm_connector *connector;
        struct radeon_device *rdev = dev->dev_private;
+       struct drm_crtc *crtc;
        int r;
 
        if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
@@ -1678,6 +1689,27 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
 
        radeon_restore_bios_scratch_regs(rdev);
 
+       /* pin cursors */
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+
+               if (radeon_crtc->cursor_bo) {
+                       struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
+                       r = radeon_bo_reserve(robj, false);
+                       if (r == 0) {
+                               /* Only 27 bit offset for legacy cursor */
+                               r = radeon_bo_pin_restricted(robj,
+                                                            RADEON_GEM_DOMAIN_VRAM,
+                                                            ASIC_IS_AVIVO(rdev) ?
+                                                            0 : 1 << 27,
+                                                            &radeon_crtc->cursor_addr);
+                               if (r != 0)
+                                       DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
+                               radeon_bo_unreserve(robj);
+                       }
+               }
+       }
+
        /* init dig PHYs, disp eng pll */
        if (rdev->is_atom_bios) {
                radeon_atom_encoder_init(rdev);
index aeb6767..634793e 100644 (file)
@@ -257,7 +257,6 @@ static int radeonfb_create(struct drm_fb_helper *helper,
        }
 
        info->par = rfbdev;
-       info->skip_vt_switch = true;
 
        ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
        if (ret) {
index 5450fa9..c4777c8 100644 (file)
@@ -260,8 +260,10 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
                        }
                }
        }
-       mb();
-       radeon_gart_tlb_flush(rdev);
+       if (rdev->gart.ptr) {
+               mb();
+               radeon_gart_tlb_flush(rdev);
+       }
 }
 
 /**
@@ -306,8 +308,10 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
                        page_base += RADEON_GPU_PAGE_SIZE;
                }
        }
-       mb();
-       radeon_gart_tlb_flush(rdev);
+       if (rdev->gart.ptr) {
+               mb();
+               radeon_gart_tlb_flush(rdev);
+       }
        return 0;
 }
 
index ac3c131..186d0b7 100644 (file)
@@ -36,6 +36,7 @@ void radeon_gem_object_free(struct drm_gem_object *gobj)
        if (robj) {
                if (robj->gem_base.import_attach)
                        drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
+               radeon_mn_unregister(robj);
                radeon_bo_unref(&robj);
        }
 }
@@ -471,6 +472,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
                r = ret;
 
        /* Flush HDP cache via MMIO if necessary */
+       cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);
        if (rdev->asic->mmio_hdp_flush &&
            radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
                robj->rdev->asic->mmio_hdp_flush(rdev);
index 7162c93..f682e53 100644 (file)
@@ -79,10 +79,12 @@ static void radeon_hotplug_work_func(struct work_struct *work)
        struct drm_mode_config *mode_config = &dev->mode_config;
        struct drm_connector *connector;
 
+       mutex_lock(&mode_config->mutex);
        if (mode_config->num_connector) {
                list_for_each_entry(connector, &mode_config->connector_list, head)
                        radeon_connector_hotplug(connector);
        }
+       mutex_unlock(&mode_config->mutex);
        /* Just fire off a uevent and let userspace tell us what to do */
        drm_helper_hpd_irq_event(dev);
 }
index fa91a17..9af2d83 100644 (file)
@@ -237,7 +237,6 @@ struct radeon_afmt {
        int offset;
        bool last_buffer_filled_status;
        int id;
-       struct r600_audio_pin *pin;
 };
 
 struct radeon_mode_info {
@@ -343,7 +342,6 @@ struct radeon_crtc {
        int max_cursor_width;
        int max_cursor_height;
        uint32_t legacy_display_base_addr;
-       uint32_t legacy_cursor_offset;
        enum radeon_rmx_type rmx_type;
        u8 h_border;
        u8 v_border;
@@ -440,6 +438,7 @@ struct radeon_encoder_atom_dig {
        uint8_t backlight_level;
        int panel_mode;
        struct radeon_afmt *afmt;
+       struct r600_audio_pin *pin;
        int active_mst_links;
 };
 
index 318165d..6763627 100644 (file)
@@ -75,7 +75,6 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
        bo = container_of(tbo, struct radeon_bo, tbo);
 
        radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1);
-       radeon_mn_unregister(bo);
 
        mutex_lock(&bo->rdev->gem.mutex);
        list_del_init(&bo->list);
index 4c679b8..e15185b 100644 (file)
@@ -6466,23 +6466,27 @@ restart_ih:
                case 1: /* D1 vblank/vline */
                        switch (src_data) {
                        case 0: /* D1 vblank */
-                               if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) {
-                                       if (rdev->irq.crtc_vblank_int[0]) {
-                                               drm_handle_vblank(rdev->ddev, 0);
-                                               rdev->pm.vblank_sync = true;
-                                               wake_up(&rdev->irq.vblank_queue);
-                                       }
-                                       if (atomic_read(&rdev->irq.pflip[0]))
-                                               radeon_crtc_handle_vblank(rdev, 0);
-                                       rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
-                                       DRM_DEBUG("IH: D1 vblank\n");
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               if (rdev->irq.crtc_vblank_int[0]) {
+                                       drm_handle_vblank(rdev->ddev, 0);
+                                       rdev->pm.vblank_sync = true;
+                                       wake_up(&rdev->irq.vblank_queue);
                                }
+                               if (atomic_read(&rdev->irq.pflip[0]))
+                                       radeon_crtc_handle_vblank(rdev, 0);
+                               rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+                               DRM_DEBUG("IH: D1 vblank\n");
+
                                break;
                        case 1: /* D1 vline */
-                               if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
-                                       DRM_DEBUG("IH: D1 vline\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
+                               DRM_DEBUG("IH: D1 vline\n");
+
                                break;
                        default:
                                DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6492,23 +6496,27 @@ restart_ih:
                case 2: /* D2 vblank/vline */
                        switch (src_data) {
                        case 0: /* D2 vblank */
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
-                                       if (rdev->irq.crtc_vblank_int[1]) {
-                                               drm_handle_vblank(rdev->ddev, 1);
-                                               rdev->pm.vblank_sync = true;
-                                               wake_up(&rdev->irq.vblank_queue);
-                                       }
-                                       if (atomic_read(&rdev->irq.pflip[1]))
-                                               radeon_crtc_handle_vblank(rdev, 1);
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
-                                       DRM_DEBUG("IH: D2 vblank\n");
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               if (rdev->irq.crtc_vblank_int[1]) {
+                                       drm_handle_vblank(rdev->ddev, 1);
+                                       rdev->pm.vblank_sync = true;
+                                       wake_up(&rdev->irq.vblank_queue);
                                }
+                               if (atomic_read(&rdev->irq.pflip[1]))
+                                       radeon_crtc_handle_vblank(rdev, 1);
+                               rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
+                               DRM_DEBUG("IH: D2 vblank\n");
+
                                break;
                        case 1: /* D2 vline */
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
-                                       DRM_DEBUG("IH: D2 vline\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
+                               DRM_DEBUG("IH: D2 vline\n");
+
                                break;
                        default:
                                DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6518,23 +6526,27 @@ restart_ih:
                case 3: /* D3 vblank/vline */
                        switch (src_data) {
                        case 0: /* D3 vblank */
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
-                                       if (rdev->irq.crtc_vblank_int[2]) {
-                                               drm_handle_vblank(rdev->ddev, 2);
-                                               rdev->pm.vblank_sync = true;
-                                               wake_up(&rdev->irq.vblank_queue);
-                                       }
-                                       if (atomic_read(&rdev->irq.pflip[2]))
-                                               radeon_crtc_handle_vblank(rdev, 2);
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
-                                       DRM_DEBUG("IH: D3 vblank\n");
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               if (rdev->irq.crtc_vblank_int[2]) {
+                                       drm_handle_vblank(rdev->ddev, 2);
+                                       rdev->pm.vblank_sync = true;
+                                       wake_up(&rdev->irq.vblank_queue);
                                }
+                               if (atomic_read(&rdev->irq.pflip[2]))
+                                       radeon_crtc_handle_vblank(rdev, 2);
+                               rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
+                               DRM_DEBUG("IH: D3 vblank\n");
+
                                break;
                        case 1: /* D3 vline */
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
-                                       DRM_DEBUG("IH: D3 vline\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
+                               DRM_DEBUG("IH: D3 vline\n");
+
                                break;
                        default:
                                DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6544,23 +6556,27 @@ restart_ih:
                case 4: /* D4 vblank/vline */
                        switch (src_data) {
                        case 0: /* D4 vblank */
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
-                                       if (rdev->irq.crtc_vblank_int[3]) {
-                                               drm_handle_vblank(rdev->ddev, 3);
-                                               rdev->pm.vblank_sync = true;
-                                               wake_up(&rdev->irq.vblank_queue);
-                                       }
-                                       if (atomic_read(&rdev->irq.pflip[3]))
-                                               radeon_crtc_handle_vblank(rdev, 3);
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
-                                       DRM_DEBUG("IH: D4 vblank\n");
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               if (rdev->irq.crtc_vblank_int[3]) {
+                                       drm_handle_vblank(rdev->ddev, 3);
+                                       rdev->pm.vblank_sync = true;
+                                       wake_up(&rdev->irq.vblank_queue);
                                }
+                               if (atomic_read(&rdev->irq.pflip[3]))
+                                       radeon_crtc_handle_vblank(rdev, 3);
+                               rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
+                               DRM_DEBUG("IH: D4 vblank\n");
+
                                break;
                        case 1: /* D4 vline */
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
-                                       DRM_DEBUG("IH: D4 vline\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
+                               DRM_DEBUG("IH: D4 vline\n");
+
                                break;
                        default:
                                DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6570,23 +6586,27 @@ restart_ih:
                case 5: /* D5 vblank/vline */
                        switch (src_data) {
                        case 0: /* D5 vblank */
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
-                                       if (rdev->irq.crtc_vblank_int[4]) {
-                                               drm_handle_vblank(rdev->ddev, 4);
-                                               rdev->pm.vblank_sync = true;
-                                               wake_up(&rdev->irq.vblank_queue);
-                                       }
-                                       if (atomic_read(&rdev->irq.pflip[4]))
-                                               radeon_crtc_handle_vblank(rdev, 4);
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
-                                       DRM_DEBUG("IH: D5 vblank\n");
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               if (rdev->irq.crtc_vblank_int[4]) {
+                                       drm_handle_vblank(rdev->ddev, 4);
+                                       rdev->pm.vblank_sync = true;
+                                       wake_up(&rdev->irq.vblank_queue);
                                }
+                               if (atomic_read(&rdev->irq.pflip[4]))
+                                       radeon_crtc_handle_vblank(rdev, 4);
+                               rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
+                               DRM_DEBUG("IH: D5 vblank\n");
+
                                break;
                        case 1: /* D5 vline */
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
-                                       DRM_DEBUG("IH: D5 vline\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
+                               DRM_DEBUG("IH: D5 vline\n");
+
                                break;
                        default:
                                DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6596,23 +6616,27 @@ restart_ih:
                case 6: /* D6 vblank/vline */
                        switch (src_data) {
                        case 0: /* D6 vblank */
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
-                                       if (rdev->irq.crtc_vblank_int[5]) {
-                                               drm_handle_vblank(rdev->ddev, 5);
-                                               rdev->pm.vblank_sync = true;
-                                               wake_up(&rdev->irq.vblank_queue);
-                                       }
-                                       if (atomic_read(&rdev->irq.pflip[5]))
-                                               radeon_crtc_handle_vblank(rdev, 5);
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
-                                       DRM_DEBUG("IH: D6 vblank\n");
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               if (rdev->irq.crtc_vblank_int[5]) {
+                                       drm_handle_vblank(rdev->ddev, 5);
+                                       rdev->pm.vblank_sync = true;
+                                       wake_up(&rdev->irq.vblank_queue);
                                }
+                               if (atomic_read(&rdev->irq.pflip[5]))
+                                       radeon_crtc_handle_vblank(rdev, 5);
+                               rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
+                               DRM_DEBUG("IH: D6 vblank\n");
+
                                break;
                        case 1: /* D6 vline */
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
-                                       DRM_DEBUG("IH: D6 vline\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
+                               DRM_DEBUG("IH: D6 vline\n");
+
                                break;
                        default:
                                DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6632,88 +6656,112 @@ restart_ih:
                case 42: /* HPD hotplug */
                        switch (src_data) {
                        case 0:
-                               if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
-                                       queue_hotplug = true;
-                                       DRM_DEBUG("IH: HPD1\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
+                               queue_hotplug = true;
+                               DRM_DEBUG("IH: HPD1\n");
+
                                break;
                        case 1:
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
-                                       queue_hotplug = true;
-                                       DRM_DEBUG("IH: HPD2\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
+                               queue_hotplug = true;
+                               DRM_DEBUG("IH: HPD2\n");
+
                                break;
                        case 2:
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
-                                       queue_hotplug = true;
-                                       DRM_DEBUG("IH: HPD3\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
+                               queue_hotplug = true;
+                               DRM_DEBUG("IH: HPD3\n");
+
                                break;
                        case 3:
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
-                                       queue_hotplug = true;
-                                       DRM_DEBUG("IH: HPD4\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
+                               queue_hotplug = true;
+                               DRM_DEBUG("IH: HPD4\n");
+
                                break;
                        case 4:
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
-                                       queue_hotplug = true;
-                                       DRM_DEBUG("IH: HPD5\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
+                               queue_hotplug = true;
+                               DRM_DEBUG("IH: HPD5\n");
+
                                break;
                        case 5:
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
-                                       queue_hotplug = true;
-                                       DRM_DEBUG("IH: HPD6\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
+                               queue_hotplug = true;
+                               DRM_DEBUG("IH: HPD6\n");
+
                                break;
                        case 6:
-                               if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT;
-                                       queue_dp = true;
-                                       DRM_DEBUG("IH: HPD_RX 1\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT;
+                               queue_dp = true;
+                               DRM_DEBUG("IH: HPD_RX 1\n");
+
                                break;
                        case 7:
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
-                                       queue_dp = true;
-                                       DRM_DEBUG("IH: HPD_RX 2\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
+                               queue_dp = true;
+                               DRM_DEBUG("IH: HPD_RX 2\n");
+
                                break;
                        case 8:
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
-                                       queue_dp = true;
-                                       DRM_DEBUG("IH: HPD_RX 3\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
+                               queue_dp = true;
+                               DRM_DEBUG("IH: HPD_RX 3\n");
+
                                break;
                        case 9:
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
-                                       queue_dp = true;
-                                       DRM_DEBUG("IH: HPD_RX 4\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
+                               queue_dp = true;
+                               DRM_DEBUG("IH: HPD_RX 4\n");
+
                                break;
                        case 10:
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
-                                       queue_dp = true;
-                                       DRM_DEBUG("IH: HPD_RX 5\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
+                               queue_dp = true;
+                               DRM_DEBUG("IH: HPD_RX 5\n");
+
                                break;
                        case 11:
-                               if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
-                                       rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
-                                       queue_dp = true;
-                                       DRM_DEBUG("IH: HPD_RX 6\n");
-                               }
+                               if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT))
+                                       DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+                               rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
+                               queue_dp = true;
+                               DRM_DEBUG("IH: HPD_RX 6\n");
+
                                break;
                        default:
                                DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
index ff8b83f..9dfcede 100644 (file)
@@ -2925,6 +2925,7 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
        /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
        { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
        { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
+       { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
        { 0, 0, 0, 0 },
 };
 
index eb2282c..eba5f8a 100644 (file)
@@ -54,55 +54,56 @@ static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj)
                       &rk_obj->dma_attrs);
 }
 
-int rockchip_gem_mmap_buf(struct drm_gem_object *obj,
-                         struct vm_area_struct *vma)
+static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
+                                       struct vm_area_struct *vma)
+
 {
+       int ret;
        struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
        struct drm_device *drm = obj->dev;
-       unsigned long vm_size;
 
-       vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
-       vm_size = vma->vm_end - vma->vm_start;
-
-       if (vm_size > obj->size)
-               return -EINVAL;
+       /*
+        * dma_alloc_attrs() allocated a struct page table for rk_obj, so clear
+        * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
+        */
+       vma->vm_flags &= ~VM_PFNMAP;
 
-       return dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
+       ret = dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
                             obj->size, &rk_obj->dma_attrs);
+       if (ret)
+               drm_gem_vm_close(vma);
+
+       return ret;
 }
 
-/* drm driver mmap file operations */
-int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+int rockchip_gem_mmap_buf(struct drm_gem_object *obj,
+                         struct vm_area_struct *vma)
 {
-       struct drm_file *priv = filp->private_data;
-       struct drm_device *dev = priv->minor->dev;
-       struct drm_gem_object *obj;
-       struct drm_vma_offset_node *node;
+       struct drm_device *drm = obj->dev;
        int ret;
 
-       if (drm_device_is_unplugged(dev))
-               return -ENODEV;
+       mutex_lock(&drm->struct_mutex);
+       ret = drm_gem_mmap_obj(obj, obj->size, vma);
+       mutex_unlock(&drm->struct_mutex);
+       if (ret)
+               return ret;
 
-       mutex_lock(&dev->struct_mutex);
+       return rockchip_drm_gem_object_mmap(obj, vma);
+}
 
-       node = drm_vma_offset_exact_lookup(dev->vma_offset_manager,
-                                          vma->vm_pgoff,
-                                          vma_pages(vma));
-       if (!node) {
-               mutex_unlock(&dev->struct_mutex);
-               DRM_ERROR("failed to find vma node.\n");
-               return -EINVAL;
-       } else if (!drm_vma_node_is_allowed(node, filp)) {
-               mutex_unlock(&dev->struct_mutex);
-               return -EACCES;
-       }
+/* drm driver mmap file operations */
+int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+       struct drm_gem_object *obj;
+       int ret;
 
-       obj = container_of(node, struct drm_gem_object, vma_node);
-       ret = rockchip_gem_mmap_buf(obj, vma);
+       ret = drm_gem_mmap(filp, vma);
+       if (ret)
+               return ret;
 
-       mutex_unlock(&dev->struct_mutex);
+       obj = vma->vm_private_data;
 
-       return ret;
+       return rockchip_drm_gem_object_mmap(obj, vma);
 }
 
 struct rockchip_gem_object *
index d6b55e3..a43a836 100644 (file)
@@ -72,34 +72,32 @@ static inline void tegra_dpaux_writel(struct tegra_dpaux *dpaux,
 static void tegra_dpaux_write_fifo(struct tegra_dpaux *dpaux, const u8 *buffer,
                                   size_t size)
 {
-       unsigned long offset = DPAUX_DP_AUXDATA_WRITE(0);
        size_t i, j;
 
-       for (i = 0; i < size; i += 4) {
-               size_t num = min_t(size_t, size - i, 4);
+       for (i = 0; i < DIV_ROUND_UP(size, 4); i++) {
+               size_t num = min_t(size_t, size - i * 4, 4);
                unsigned long value = 0;
 
                for (j = 0; j < num; j++)
-                       value |= buffer[i + j] << (j * 8);
+                       value |= buffer[i * 4 + j] << (j * 8);
 
-               tegra_dpaux_writel(dpaux, value, offset++);
+               tegra_dpaux_writel(dpaux, value, DPAUX_DP_AUXDATA_WRITE(i));
        }
 }
 
 static void tegra_dpaux_read_fifo(struct tegra_dpaux *dpaux, u8 *buffer,
                                  size_t size)
 {
-       unsigned long offset = DPAUX_DP_AUXDATA_READ(0);
        size_t i, j;
 
-       for (i = 0; i < size; i += 4) {
-               size_t num = min_t(size_t, size - i, 4);
+       for (i = 0; i < DIV_ROUND_UP(size, 4); i++) {
+               size_t num = min_t(size_t, size - i * 4, 4);
                unsigned long value;
 
-               value = tegra_dpaux_readl(dpaux, offset++);
+               value = tegra_dpaux_readl(dpaux, DPAUX_DP_AUXDATA_READ(i));
 
                for (j = 0; j < num; j++)
-                       buffer[i + j] = value >> (j * 8);
+                       buffer[i * 4 + j] = value >> (j * 8);
        }
 }
 
index 7a207ca..6394547 100644 (file)
@@ -328,6 +328,8 @@ static int __init vgem_init(void)
                goto out;
        }
 
+       drm_dev_set_unique(vgem_device, "vgem");
+
        ret  = drm_dev_register(vgem_device, 0);
 
        if (ret)
index 654c8da..97ad3bc 100644 (file)
@@ -2492,7 +2492,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
        ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
                                     true, NULL);
        if (unlikely(ret != 0))
-               goto out_err;
+               goto out_err_nores;
 
        ret = vmw_validate_buffers(dev_priv, sw_context);
        if (unlikely(ret != 0))
@@ -2536,6 +2536,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
        vmw_resource_relocations_free(&sw_context->res_relocations);
 
        vmw_fifo_commit(dev_priv, command_size);
+       mutex_unlock(&dev_priv->binding_mutex);
 
        vmw_query_bo_switch_commit(dev_priv, sw_context);
        ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
@@ -2551,7 +2552,6 @@ int vmw_execbuf_process(struct drm_file *file_priv,
                DRM_ERROR("Fence submission error. Syncing.\n");
 
        vmw_resource_list_unreserve(&sw_context->resource_list, false);
-       mutex_unlock(&dev_priv->binding_mutex);
 
        ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
                                    (void *) fence);
index 3318de6..a2dbbbe 100644 (file)
@@ -356,6 +356,8 @@ static int cp2112_read(struct cp2112_device *dev, u8 *data, size_t size)
        struct cp2112_force_read_report report;
        int ret;
 
+       if (size > sizeof(dev->read_data))
+               size = sizeof(dev->read_data);
        report.report = CP2112_DATA_READ_FORCE_SEND;
        report.length = cpu_to_be16(size);
 
index 008e89b..32d52d2 100644 (file)
@@ -462,12 +462,15 @@ out:
 
 static void hidinput_cleanup_battery(struct hid_device *dev)
 {
+       const struct power_supply_desc *psy_desc;
+
        if (!dev->battery)
                return;
 
+       psy_desc = dev->battery->desc;
        power_supply_unregister(dev->battery);
-       kfree(dev->battery->desc->name);
-       kfree(dev->battery->desc);
+       kfree(psy_desc->name);
+       kfree(psy_desc);
        dev->battery = NULL;
 }
 #else  /* !CONFIG_HID_BATTERY_STRENGTH */
index 9416731..b905d50 100644 (file)
@@ -858,7 +858,7 @@ static int uclogic_tablet_enable(struct hid_device *hdev)
        for (p = drvdata->rdesc;
             p <= drvdata->rdesc + drvdata->rsize - 4;) {
                if (p[0] == 0xFE && p[1] == 0xED && p[2] == 0x1D &&
-                   p[3] < sizeof(params)) {
+                   p[3] < ARRAY_SIZE(params)) {
                        v = params[p[3]];
                        put_unaligned(cpu_to_le32(v), (s32 *)p);
                        p += 4;
index d219c06..972444a 100644 (file)
 /* output format */
 #define MCP3021_SAR_SHIFT      2
 #define MCP3021_SAR_MASK       0x3ff
-
 #define MCP3021_OUTPUT_RES     10      /* 10-bit resolution */
-#define MCP3021_OUTPUT_SCALE   4
 
 #define MCP3221_SAR_SHIFT      0
 #define MCP3221_SAR_MASK       0xfff
 #define MCP3221_OUTPUT_RES     12      /* 12-bit resolution */
-#define MCP3221_OUTPUT_SCALE   1
 
 enum chips {
        mcp3021,
@@ -54,7 +51,6 @@ struct mcp3021_data {
        u16 sar_shift;
        u16 sar_mask;
        u8 output_res;
-       u8 output_scale;
 };
 
 static int mcp3021_read16(struct i2c_client *client)
@@ -84,13 +80,7 @@ static int mcp3021_read16(struct i2c_client *client)
 
 static inline u16 volts_from_reg(struct mcp3021_data *data, u16 val)
 {
-       if (val == 0)
-               return 0;
-
-       val = val * data->output_scale - data->output_scale / 2;
-
-       return val * DIV_ROUND_CLOSEST(data->vdd,
-                       (1 << data->output_res) * data->output_scale);
+       return DIV_ROUND_CLOSEST(data->vdd * val, 1 << data->output_res);
 }
 
 static ssize_t show_in_input(struct device *dev, struct device_attribute *attr,
@@ -132,14 +122,12 @@ static int mcp3021_probe(struct i2c_client *client,
                data->sar_shift = MCP3021_SAR_SHIFT;
                data->sar_mask = MCP3021_SAR_MASK;
                data->output_res = MCP3021_OUTPUT_RES;
-               data->output_scale = MCP3021_OUTPUT_SCALE;
                break;
 
        case mcp3221:
                data->sar_shift = MCP3221_SAR_SHIFT;
                data->sar_mask = MCP3221_SAR_MASK;
                data->output_res = MCP3221_OUTPUT_RES;
-               data->output_scale = MCP3221_OUTPUT_SCALE;
                break;
        }
 
index 5576579..fbfc02b 100644 (file)
@@ -195,7 +195,7 @@ abort:
 }
 
 static int nct7802_write_voltage(struct nct7802_data *data, int nr, int index,
-                                unsigned int voltage)
+                                unsigned long voltage)
 {
        int shift = 8 - REG_VOLTAGE_LIMIT_MSB_SHIFT[index - 1][nr];
        int err;
@@ -547,7 +547,7 @@ static umode_t nct7802_temp_is_visible(struct kobject *kobj,
        if (index >= 9 && index < 18 &&
            (reg & 0x0c) != 0x04 && (reg & 0x0c) != 0x08)       /* RD2 */
                return 0;
-       if (index >= 18 && index < 27 && (reg & 0x30) != 0x10)  /* RD3 */
+       if (index >= 18 && index < 27 && (reg & 0x30) != 0x20)  /* RD3 */
                return 0;
        if (index >= 27 && index < 35)                          /* local */
                return attr->mode;
index b77b82f..08ff89d 100644 (file)
@@ -412,8 +412,9 @@ static ssize_t show_pwm(struct device *dev,
        return sprintf(buf, "%d\n", val);
 }
 
-static ssize_t store_mode(struct device *dev, struct device_attribute *devattr,
-                         const char *buf, size_t count)
+static ssize_t store_enable(struct device *dev,
+                           struct device_attribute *devattr,
+                           const char *buf, size_t count)
 {
        int index = to_sensor_dev_attr(devattr)->index;
        struct nct7904_data *data = dev_get_drvdata(dev);
@@ -422,18 +423,18 @@ static ssize_t store_mode(struct device *dev, struct device_attribute *devattr,
 
        if (kstrtoul(buf, 10, &val) < 0)
                return -EINVAL;
-       if (val > 1 || (val && !data->fan_mode[index]))
+       if (val < 1 || val > 2 || (val == 2 && !data->fan_mode[index]))
                return -EINVAL;
 
        ret = nct7904_write_reg(data, BANK_3, FANCTL1_FMR_REG + index,
-                               val ? data->fan_mode[index] : 0);
+                               val == 2 ? data->fan_mode[index] : 0);
 
        return ret ? ret : count;
 }
 
-/* Return 0 for manual mode or 1 for SmartFan mode */
-static ssize_t show_mode(struct device *dev,
-                        struct device_attribute *devattr, char *buf)
+/* Return 1 for manual mode or 2 for SmartFan mode */
+static ssize_t show_enable(struct device *dev,
+                          struct device_attribute *devattr, char *buf)
 {
        int index = to_sensor_dev_attr(devattr)->index;
        struct nct7904_data *data = dev_get_drvdata(dev);
@@ -443,36 +444,36 @@ static ssize_t show_mode(struct device *dev,
        if (val < 0)
                return val;
 
-       return sprintf(buf, "%d\n", val ? 1 : 0);
+       return sprintf(buf, "%d\n", val ? 2 : 1);
 }
 
 /* 2 attributes per channel: pwm and mode */
-static SENSOR_DEVICE_ATTR(fan1_pwm, S_IRUGO | S_IWUSR,
+static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR,
                        show_pwm, store_pwm, 0);
-static SENSOR_DEVICE_ATTR(fan1_mode, S_IRUGO | S_IWUSR,
-                       show_mode, store_mode, 0);
-static SENSOR_DEVICE_ATTR(fan2_pwm, S_IRUGO | S_IWUSR,
+static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR,
+                       show_enable, store_enable, 0);
+static SENSOR_DEVICE_ATTR(pwm2, S_IRUGO | S_IWUSR,
                        show_pwm, store_pwm, 1);
-static SENSOR_DEVICE_ATTR(fan2_mode, S_IRUGO | S_IWUSR,
-                       show_mode, store_mode, 1);
-static SENSOR_DEVICE_ATTR(fan3_pwm, S_IRUGO | S_IWUSR,
+static SENSOR_DEVICE_ATTR(pwm2_enable, S_IRUGO | S_IWUSR,
+                       show_enable, store_enable, 1);
+static SENSOR_DEVICE_ATTR(pwm3, S_IRUGO | S_IWUSR,
                        show_pwm, store_pwm, 2);
-static SENSOR_DEVICE_ATTR(fan3_mode, S_IRUGO | S_IWUSR,
-                       show_mode, store_mode, 2);
-static SENSOR_DEVICE_ATTR(fan4_pwm, S_IRUGO | S_IWUSR,
+static SENSOR_DEVICE_ATTR(pwm3_enable, S_IRUGO | S_IWUSR,
+                       show_enable, store_enable, 2);
+static SENSOR_DEVICE_ATTR(pwm4, S_IRUGO | S_IWUSR,
                        show_pwm, store_pwm, 3);
-static SENSOR_DEVICE_ATTR(fan4_mode, S_IRUGO | S_IWUSR,
-                       show_mode, store_mode, 3);
+static SENSOR_DEVICE_ATTR(pwm4_enable, S_IRUGO | S_IWUSR,
+                       show_enable, store_enable, 3);
 
 static struct attribute *nct7904_fanctl_attrs[] = {
-       &sensor_dev_attr_fan1_pwm.dev_attr.attr,
-       &sensor_dev_attr_fan1_mode.dev_attr.attr,
-       &sensor_dev_attr_fan2_pwm.dev_attr.attr,
-       &sensor_dev_attr_fan2_mode.dev_attr.attr,
-       &sensor_dev_attr_fan3_pwm.dev_attr.attr,
-       &sensor_dev_attr_fan3_mode.dev_attr.attr,
-       &sensor_dev_attr_fan4_pwm.dev_attr.attr,
-       &sensor_dev_attr_fan4_mode.dev_attr.attr,
+       &sensor_dev_attr_pwm1.dev_attr.attr,
+       &sensor_dev_attr_pwm1_enable.dev_attr.attr,
+       &sensor_dev_attr_pwm2.dev_attr.attr,
+       &sensor_dev_attr_pwm2_enable.dev_attr.attr,
+       &sensor_dev_attr_pwm3.dev_attr.attr,
+       &sensor_dev_attr_pwm3_enable.dev_attr.attr,
+       &sensor_dev_attr_pwm4.dev_attr.attr,
+       &sensor_dev_attr_pwm4_enable.dev_attr.attr,
        NULL
 };
 
@@ -574,6 +575,7 @@ static const struct i2c_device_id nct7904_id[] = {
        {"nct7904", 0},
        {}
 };
+MODULE_DEVICE_TABLE(i2c, nct7904_id);
 
 static struct i2c_driver nct7904_driver = {
        .class = I2C_CLASS_HWMON,
index ff23d1b..9bd10a9 100644 (file)
@@ -65,6 +65,9 @@
 #define        AT91_TWI_UNRE           0x0080  /* Underrun Error */
 #define        AT91_TWI_NACK           0x0100  /* Not Acknowledged */
 
+#define        AT91_TWI_INT_MASK \
+       (AT91_TWI_TXCOMP | AT91_TWI_RXRDY | AT91_TWI_TXRDY | AT91_TWI_NACK)
+
 #define        AT91_TWI_IER            0x0024  /* Interrupt Enable Register */
 #define        AT91_TWI_IDR            0x0028  /* Interrupt Disable Register */
 #define        AT91_TWI_IMR            0x002c  /* Interrupt Mask Register */
@@ -119,13 +122,12 @@ static void at91_twi_write(struct at91_twi_dev *dev, unsigned reg, unsigned val)
 
 static void at91_disable_twi_interrupts(struct at91_twi_dev *dev)
 {
-       at91_twi_write(dev, AT91_TWI_IDR,
-                      AT91_TWI_TXCOMP | AT91_TWI_RXRDY | AT91_TWI_TXRDY);
+       at91_twi_write(dev, AT91_TWI_IDR, AT91_TWI_INT_MASK);
 }
 
 static void at91_twi_irq_save(struct at91_twi_dev *dev)
 {
-       dev->imr = at91_twi_read(dev, AT91_TWI_IMR) & 0x7;
+       dev->imr = at91_twi_read(dev, AT91_TWI_IMR) & AT91_TWI_INT_MASK;
        at91_disable_twi_interrupts(dev);
 }
 
@@ -215,6 +217,14 @@ static void at91_twi_write_data_dma_callback(void *data)
        dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg),
                         dev->buf_len, DMA_TO_DEVICE);
 
+       /*
+        * When this callback is called, THR/TX FIFO is likely not to be empty
+        * yet. So we have to wait for TXCOMP or NACK bits to be set into the
+        * Status Register to be sure that the STOP bit has been sent and the
+        * transfer is completed. The NACK interrupt has already been enabled,
+        * we just have to enable TXCOMP one.
+        */
+       at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
        at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
 }
 
@@ -309,7 +319,7 @@ static void at91_twi_read_data_dma_callback(void *data)
        /* The last two bytes have to be read without using dma */
        dev->buf += dev->buf_len - 2;
        dev->buf_len = 2;
-       at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_RXRDY);
+       at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_RXRDY | AT91_TWI_TXCOMP);
 }
 
 static void at91_twi_read_data_dma(struct at91_twi_dev *dev)
@@ -370,7 +380,7 @@ static irqreturn_t atmel_twi_interrupt(int irq, void *dev_id)
        /* catch error flags */
        dev->transfer_status |= status;
 
-       if (irqstatus & AT91_TWI_TXCOMP) {
+       if (irqstatus & (AT91_TWI_TXCOMP | AT91_TWI_NACK)) {
                at91_disable_twi_interrupts(dev);
                complete(&dev->cmd_complete);
        }
@@ -384,6 +394,34 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
        unsigned long time_left;
        bool has_unre_flag = dev->pdata->has_unre_flag;
 
+       /*
+        * WARNING: the TXCOMP bit in the Status Register is NOT a clear on
+        * read flag but shows the state of the transmission at the time the
+        * Status Register is read. According to the programmer datasheet,
+        * TXCOMP is set when both holding register and internal shifter are
+        * empty and STOP condition has been sent.
+        * Consequently, we should enable NACK interrupt rather than TXCOMP to
+        * detect transmission failure.
+        *
+        * Besides, the TXCOMP bit is already set before the i2c transaction
+        * has been started. For read transactions, this bit is cleared when
+        * writing the START bit into the Control Register. So the
+        * corresponding interrupt can safely be enabled just after.
+        * However for write transactions managed by the CPU, we first write
+        * into THR, so TXCOMP is cleared. Then we can safely enable TXCOMP
+        * interrupt. If TXCOMP interrupt were enabled before writing into THR,
+        * the interrupt handler would be called immediately and the i2c command
+        * would be reported as completed.
+        * Also when a write transaction is managed by the DMA controller,
+        * enabling the TXCOMP interrupt in this function may lead to a race
+        * condition since we don't know whether the TXCOMP interrupt is enabled
+        * before or after the DMA has started to write into THR. So the TXCOMP
+        * interrupt is enabled later by at91_twi_write_data_dma_callback().
+        * Immediately after in that DMA callback, we still need to send the
+        * STOP condition manually writing the corresponding bit into the
+        * Control Register.
+        */
+
        dev_dbg(dev->dev, "transfer: %s %d bytes.\n",
                (dev->msg->flags & I2C_M_RD) ? "read" : "write", dev->buf_len);
 
@@ -414,26 +452,24 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
                 * seems to be the best solution.
                 */
                if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
+                       at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_NACK);
                        at91_twi_read_data_dma(dev);
-                       /*
-                        * It is important to enable TXCOMP irq here because
-                        * doing it only when transferring the last two bytes
-                        * will mask NACK errors since TXCOMP is set when a
-                        * NACK occurs.
-                        */
-                       at91_twi_write(dev, AT91_TWI_IER,
-                              AT91_TWI_TXCOMP);
-               } else
+               } else {
                        at91_twi_write(dev, AT91_TWI_IER,
-                              AT91_TWI_TXCOMP | AT91_TWI_RXRDY);
+                                      AT91_TWI_TXCOMP |
+                                      AT91_TWI_NACK |
+                                      AT91_TWI_RXRDY);
+               }
        } else {
                if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
+                       at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_NACK);
                        at91_twi_write_data_dma(dev);
-                       at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
                } else {
                        at91_twi_write_next_byte(dev);
                        at91_twi_write(dev, AT91_TWI_IER,
-                               AT91_TWI_TXCOMP | AT91_TWI_TXRDY);
+                                      AT91_TWI_TXCOMP |
+                                      AT91_TWI_NACK |
+                                      AT91_TWI_TXRDY);
                }
        }
 
index 06cc1ff..2ba7c0f 100644 (file)
@@ -51,7 +51,7 @@ static int i2c_mux_master_xfer(struct i2c_adapter *adap,
 
        ret = priv->select(parent, priv->mux_priv, priv->chan_id);
        if (ret >= 0)
-               ret = parent->algo->master_xfer(parent, msgs, num);
+               ret = __i2c_transfer(parent, msgs, num);
        if (priv->deselect)
                priv->deselect(parent, priv->mux_priv, priv->chan_id);
 
@@ -144,6 +144,7 @@ struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,
        priv->adap.dev.parent = &parent->dev;
        priv->adap.retries = parent->retries;
        priv->adap.timeout = parent->timeout;
+       priv->adap.quirks = parent->quirks;
 
        /* Sanity check on class */
        if (i2c_mux_parent_classes(parent) & class)
index cb77277..0c8d4d2 100644 (file)
@@ -104,7 +104,7 @@ static int pca9541_reg_write(struct i2c_client *client, u8 command, u8 val)
                buf[0] = command;
                buf[1] = val;
                msg.buf = buf;
-               ret = adap->algo->master_xfer(adap, &msg, 1);
+               ret = __i2c_transfer(adap, &msg, 1);
        } else {
                union i2c_smbus_data data;
 
@@ -144,7 +144,7 @@ static int pca9541_reg_read(struct i2c_client *client, u8 command)
                                .buf = &val
                        }
                };
-               ret = adap->algo->master_xfer(adap, msg, 2);
+               ret = __i2c_transfer(adap, msg, 2);
                if (ret == 2)
                        ret = val;
                else if (ret >= 0)
index bea0d2d..ea4aa9d 100644 (file)
@@ -134,7 +134,7 @@ static int pca954x_reg_write(struct i2c_adapter *adap,
                msg.len = 1;
                buf[0] = val;
                msg.buf = buf;
-               ret = adap->algo->master_xfer(adap, &msg, 1);
+               ret = __i2c_transfer(adap, &msg, 1);
        } else {
                union i2c_smbus_data data;
                ret = adap->algo->smbus_xfer(adap, client->addr,
index 73e8773..bf827d0 100644 (file)
@@ -1465,7 +1465,7 @@ static void bmc150_accel_unregister_triggers(struct bmc150_accel_data *data,
 {
        int i;
 
-       for (i = from; i >= 0; i++) {
+       for (i = from; i >= 0; i--) {
                if (data->triggers[i].indio_trig) {
                        iio_trigger_unregister(data->triggers[i].indio_trig);
                        data->triggers[i].indio_trig = NULL;
index e36a73e..1bcb65b 100644 (file)
@@ -146,8 +146,7 @@ config DA9150_GPADC
 
 config CC10001_ADC
        tristate "Cosmic Circuits 10001 ADC driver"
-       depends on HAVE_CLK || REGULATOR
-       depends on HAS_IOMEM
+       depends on HAS_IOMEM && HAVE_CLK && REGULATOR
        select IIO_BUFFER
        select IIO_TRIGGERED_BUFFER
        help
index 8a0eb4a..7b40925 100644 (file)
@@ -182,7 +182,7 @@ struct at91_adc_caps {
        u8      ts_pen_detect_sensitivity;
 
        /* startup time calculate function */
-       u32 (*calc_startup_ticks)(u8 startup_time, u32 adc_clk_khz);
+       u32 (*calc_startup_ticks)(u32 startup_time, u32 adc_clk_khz);
 
        u8      num_channels;
        struct at91_adc_reg_desc registers;
@@ -201,7 +201,7 @@ struct at91_adc_state {
        u8                      num_channels;
        void __iomem            *reg_base;
        struct at91_adc_reg_desc *registers;
-       u                     startup_time;
+       u32                     startup_time;
        u8                      sample_hold_time;
        bool                    sleep_mode;
        struct iio_trigger      **trig;
@@ -779,7 +779,7 @@ ret:
        return ret;
 }
 
-static u32 calc_startup_ticks_9260(u8 startup_time, u32 adc_clk_khz)
+static u32 calc_startup_ticks_9260(u32 startup_time, u32 adc_clk_khz)
 {
        /*
         * Number of ticks needed to cover the startup time of the ADC
@@ -790,7 +790,7 @@ static u32 calc_startup_ticks_9260(u8 startup_time, u32 adc_clk_khz)
        return round_up((startup_time * adc_clk_khz / 1000) - 1, 8) / 8;
 }
 
-static u32 calc_startup_ticks_9x5(u8 startup_time, u32 adc_clk_khz)
+static u32 calc_startup_ticks_9x5(u32 startup_time, u32 adc_clk_khz)
 {
        /*
         * For sama5d3x and at91sam9x5, the formula changes to:
index 8d4e019..9c311c1 100644 (file)
@@ -349,3 +349,7 @@ static struct platform_driver rockchip_saradc_driver = {
 };
 
 module_platform_driver(rockchip_saradc_driver);
+
+MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
+MODULE_DESCRIPTION("Rockchip SARADC driver");
+MODULE_LICENSE("GPL v2");
index 94c5f05..4caecbe 100644 (file)
@@ -835,7 +835,8 @@ static int twl4030_madc_probe(struct platform_device *pdev)
        irq = platform_get_irq(pdev, 0);
        ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
                                   twl4030_madc_threaded_irq_handler,
-                                  IRQF_TRIGGER_RISING, "twl4030_madc", madc);
+                                  IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+                                  "twl4030_madc", madc);
        if (ret) {
                dev_err(&pdev->dev, "could not request irq\n");
                goto err_i2c;
index 610fc98..5955110 100644 (file)
@@ -36,6 +36,8 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
        s32 poll_value = 0;
 
        if (state) {
+               if (!atomic_read(&st->user_requested_state))
+                       return 0;
                if (sensor_hub_device_open(st->hsdev))
                        return -EIO;
 
@@ -52,8 +54,12 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
 
                poll_value = hid_sensor_read_poll_value(st);
        } else {
-               if (!atomic_dec_and_test(&st->data_ready))
+               int val;
+
+               val = atomic_dec_if_positive(&st->data_ready);
+               if (val < 0)
                        return 0;
+
                sensor_hub_device_close(st->hsdev);
                state_val = hid_sensor_get_usage_index(st->hsdev,
                        st->power_state.report_id,
@@ -92,9 +98,11 @@ EXPORT_SYMBOL(hid_sensor_power_state);
 
 int hid_sensor_power_state(struct hid_sensor_common *st, bool state)
 {
+
 #ifdef CONFIG_PM
        int ret;
 
+       atomic_set(&st->user_requested_state, state);
        if (state)
                ret = pm_runtime_get_sync(&st->pdev->dev);
        else {
@@ -109,6 +117,7 @@ int hid_sensor_power_state(struct hid_sensor_common *st, bool state)
 
        return 0;
 #else
+       atomic_set(&st->user_requested_state, state);
        return _hid_sensor_power_state(st, state);
 #endif
 }
index 61bb9d4..e98428d 100644 (file)
@@ -22,7 +22,7 @@
 #include "ad5624r.h"
 
 static int ad5624r_spi_write(struct spi_device *spi,
-                            u8 cmd, u8 addr, u16 val, u8 len)
+                            u8 cmd, u8 addr, u16 val, u8 shift)
 {
        u32 data;
        u8 msg[3];
@@ -35,7 +35,7 @@ static int ad5624r_spi_write(struct spi_device *spi,
         * 14-, 12-bit input code followed by 0, 2, or 4 don't care bits,
         * for the AD5664R, AD5644R, and AD5624R, respectively.
         */
-       data = (0 << 22) | (cmd << 19) | (addr << 16) | (val << (16 - len));
+       data = (0 << 22) | (cmd << 19) | (addr << 16) | (val << shift);
        msg[0] = data >> 16;
        msg[1] = data >> 8;
        msg[2] = data;
index 17d4bb1..65ce868 100644 (file)
@@ -431,6 +431,23 @@ static int inv_mpu6050_write_gyro_scale(struct inv_mpu6050_state *st, int val)
        return -EINVAL;
 }
 
+static int inv_write_raw_get_fmt(struct iio_dev *indio_dev,
+                                struct iio_chan_spec const *chan, long mask)
+{
+       switch (mask) {
+       case IIO_CHAN_INFO_SCALE:
+               switch (chan->type) {
+               case IIO_ANGL_VEL:
+                       return IIO_VAL_INT_PLUS_NANO;
+               default:
+                       return IIO_VAL_INT_PLUS_MICRO;
+               }
+       default:
+               return IIO_VAL_INT_PLUS_MICRO;
+       }
+
+       return -EINVAL;
+}
 static int inv_mpu6050_write_accel_scale(struct inv_mpu6050_state *st, int val)
 {
        int result, i;
@@ -696,6 +713,7 @@ static const struct iio_info mpu_info = {
        .driver_module = THIS_MODULE,
        .read_raw = &inv_mpu6050_read_raw,
        .write_raw = &inv_mpu6050_write_raw,
+       .write_raw_get_fmt = &inv_write_raw_get_fmt,
        .attrs = &inv_attribute_group,
        .validate_trigger = inv_mpu6050_validate_trigger,
 };
index 869033e..a1d4905 100644 (file)
@@ -123,7 +123,7 @@ static int cm3323_set_it_bits(struct cm3323_data *data, int val, int val2)
        for (i = 0; i < ARRAY_SIZE(cm3323_int_time); i++) {
                if (val == cm3323_int_time[i].val &&
                    val2 == cm3323_int_time[i].val2) {
-                       reg_conf = data->reg_conf;
+                       reg_conf = data->reg_conf & ~CM3323_CONF_IT_MASK;
                        reg_conf |= i << CM3323_CONF_IT_SHIFT;
 
                        ret = i2c_smbus_write_word_data(data->client,
index 71c2bde..f8b1df0 100644 (file)
@@ -185,7 +185,7 @@ static int tcs3414_write_raw(struct iio_dev *indio_dev,
                if (val != 0)
                        return -EINVAL;
                for (i = 0; i < ARRAY_SIZE(tcs3414_times); i++) {
-                       if (val == tcs3414_times[i] * 1000) {
+                       if (val2 == tcs3414_times[i] * 1000) {
                                data->timing &= ~TCS3414_INTEG_MASK;
                                data->timing |= i;
                                return i2c_smbus_write_byte_data(
index fa40f6d..bd26a48 100644 (file)
@@ -206,7 +206,7 @@ static int sx9500_read_proximity(struct sx9500_data *data,
        if (ret < 0)
                return ret;
 
-       *val = 32767 - (s16)be16_to_cpu(regval);
+       *val = be16_to_cpu(regval);
 
        return IIO_VAL_INT;
 }
index 84a0789..7a80509 100644 (file)
@@ -132,6 +132,9 @@ static int tmp006_write_raw(struct iio_dev *indio_dev,
        struct tmp006_data *data = iio_priv(indio_dev);
        int i;
 
+       if (mask != IIO_CHAN_INFO_SAMP_FREQ)
+               return -EINVAL;
+
        for (i = 0; i < ARRAY_SIZE(tmp006_freqs); i++)
                if ((val == tmp006_freqs[i][0]) &&
                    (val2 == tmp006_freqs[i][1])) {
index 9dcb660..219f212 100644 (file)
@@ -679,7 +679,6 @@ err:
                ocrdma_release_ucontext_pd(uctx);
        } else {
                status = _ocrdma_dealloc_pd(dev, pd);
-               kfree(pd);
        }
 exit:
        return ERR_PTR(status);
index e5cc430..2d13fd0 100644 (file)
@@ -176,7 +176,8 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
                else
                        size += ipoib_recvq_size * ipoib_max_conn_qp;
        } else
-               goto out_free_wq;
+               if (ret != -ENOSYS)
+                       goto out_free_wq;
 
        priv->recv_cq = ib_create_cq(priv->ca, ipoib_ib_completion, NULL, dev, size, 0);
        if (IS_ERR(priv->recv_cq)) {
index 097d721..c6dc644 100644 (file)
@@ -246,7 +246,7 @@ static int gpio_keys_polled_probe(struct platform_device *pdev)
                 * convert it to descriptor.
                 */
                if (!button->gpiod && gpio_is_valid(button->gpio)) {
-                       unsigned flags = 0;
+                       unsigned flags = GPIOF_IN;
 
                        if (button->active_low)
                                flags |= GPIOF_ACTIVE_LOW;
index a353b7d..bc7eed6 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/input/mt.h>
 #include <linux/serio.h>
 #include <linux/libps2.h>
+#include <linux/dmi.h>
 
 #include "psmouse.h"
 #include "alps.h"
@@ -99,6 +100,7 @@ static const struct alps_nibble_commands alps_v6_nibble_commands[] = {
 #define ALPS_FOUR_BUTTONS      0x40    /* 4 direction button present */
 #define ALPS_PS2_INTERLEAVED   0x80    /* 3-byte PS/2 packet interleaved with
                                           6-byte ALPS packet */
+#define ALPS_DELL              0x100   /* device is a Dell laptop */
 #define ALPS_BUTTONPAD         0x200   /* device is a clickpad */
 
 static const struct alps_model_info alps_model_data[] = {
@@ -251,9 +253,9 @@ static void alps_process_packet_v1_v2(struct psmouse *psmouse)
                return;
        }
 
-       /* Non interleaved V2 dualpoint has separate stick button bits */
+       /* Dell non interleaved V2 dualpoint has separate stick button bits */
        if (priv->proto_version == ALPS_PROTO_V2 &&
-           priv->flags == (ALPS_PASS | ALPS_DUALPOINT)) {
+           priv->flags == (ALPS_DELL | ALPS_PASS | ALPS_DUALPOINT)) {
                left |= packet[0] & 1;
                right |= packet[0] & 2;
                middle |= packet[0] & 4;
@@ -2542,6 +2544,8 @@ static int alps_set_protocol(struct psmouse *psmouse,
        priv->byte0 = protocol->byte0;
        priv->mask0 = protocol->mask0;
        priv->flags = protocol->flags;
+       if (dmi_name_in_vendors("Dell"))
+               priv->flags |= ALPS_DELL;
 
        priv->x_max = 2000;
        priv->y_max = 1400;
index 35c8d0c..3a32caf 100644 (file)
@@ -1199,7 +1199,7 @@ static void set_input_params(struct psmouse *psmouse,
                                        ABS_MT_POSITION_Y);
                /* Image sensors can report per-contact pressure */
                input_set_abs_params(dev, ABS_MT_PRESSURE, 0, 255, 0, 0);
-               input_mt_init_slots(dev, 3, INPUT_MT_POINTER | INPUT_MT_TRACK);
+               input_mt_init_slots(dev, 2, INPUT_MT_POINTER | INPUT_MT_TRACK);
 
                /* Image sensors can signal 4 and 5 finger clicks */
                __set_bit(BTN_TOOL_QUADTAP, dev->keybit);
index f2c6c35..2c41107 100644 (file)
@@ -627,6 +627,9 @@ static int dmc_tsc10_init(struct usbtouch_usb *usbtouch)
                goto err_out;
        }
 
+       /* TSC-25 data sheet specifies a delay after the RESET command */
+       msleep(150);
+
        /* set coordinate output rate */
        buf[0] = buf[1] = 0xFF;
        ret = usb_control_msg(dev, usb_rcvctrlpipe (dev, 0),
index 5ecfaf2..c87c4b1 100644 (file)
@@ -1756,8 +1756,9 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
 
 static void domain_exit(struct dmar_domain *domain)
 {
+       struct dmar_drhd_unit *drhd;
+       struct intel_iommu *iommu;
        struct page *freelist = NULL;
-       int i;
 
        /* Domain 0 is reserved, so dont process it */
        if (!domain)
@@ -1777,8 +1778,10 @@ static void domain_exit(struct dmar_domain *domain)
 
        /* clear attached or cached domains */
        rcu_read_lock();
-       for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus)
-               iommu_detach_domain(domain, g_iommus[i]);
+       for_each_active_iommu(iommu, drhd)
+               if (domain_type_is_vm(domain) ||
+                   test_bit(iommu->seq_id, domain->iommu_bmp))
+                       iommu_detach_domain(domain, iommu);
        rcu_read_unlock();
 
        dma_free_pagelist(freelist);
index 692fe2b..c12bb93 100644 (file)
@@ -68,7 +68,9 @@ static struct irq_chip crossbar_chip = {
        .irq_mask               = irq_chip_mask_parent,
        .irq_unmask             = irq_chip_unmask_parent,
        .irq_retrigger          = irq_chip_retrigger_hierarchy,
-       .irq_set_wake           = irq_chip_set_wake_parent,
+       .irq_set_type           = irq_chip_set_type_parent,
+       .flags                  = IRQCHIP_MASK_ON_SUSPEND |
+                                 IRQCHIP_SKIP_SET_WAKE,
 #ifdef CONFIG_SMP
        .irq_set_affinity       = irq_chip_set_affinity_parent,
 #endif
index 1b7e155..c00e2db 100644 (file)
@@ -75,6 +75,13 @@ struct its_node {
 
 #define ITS_ITT_ALIGN          SZ_256
 
+struct event_lpi_map {
+       unsigned long           *lpi_map;
+       u16                     *col_map;
+       irq_hw_number_t         lpi_base;
+       int                     nr_lpis;
+};
+
 /*
  * The ITS view of a device - belongs to an ITS, a collection, owns an
  * interrupt translation table, and a list of interrupts.
@@ -82,11 +89,8 @@ struct its_node {
 struct its_device {
        struct list_head        entry;
        struct its_node         *its;
-       struct its_collection   *collection;
+       struct event_lpi_map    event_map;
        void                    *itt;
-       unsigned long           *lpi_map;
-       irq_hw_number_t         lpi_base;
-       int                     nr_lpis;
        u32                     nr_ites;
        u32                     device_id;
 };
@@ -99,6 +103,14 @@ static struct rdists *gic_rdists;
 #define gic_data_rdist()               (raw_cpu_ptr(gic_rdists->rdist))
 #define gic_data_rdist_rd_base()       (gic_data_rdist()->rd_base)
 
+static struct its_collection *dev_event_to_col(struct its_device *its_dev,
+                                              u32 event)
+{
+       struct its_node *its = its_dev->its;
+
+       return its->collections + its_dev->event_map.col_map[event];
+}
+
 /*
  * ITS command descriptors - parameters to be encoded in a command
  * block.
@@ -134,7 +146,7 @@ struct its_cmd_desc {
                struct {
                        struct its_device *dev;
                        struct its_collection *col;
-                       u32 id;
+                       u32 event_id;
                } its_movi_cmd;
 
                struct {
@@ -241,7 +253,7 @@ static struct its_collection *its_build_mapd_cmd(struct its_cmd_block *cmd,
 
        its_fixup_cmd(cmd);
 
-       return desc->its_mapd_cmd.dev->collection;
+       return NULL;
 }
 
 static struct its_collection *its_build_mapc_cmd(struct its_cmd_block *cmd,
@@ -260,52 +272,72 @@ static struct its_collection *its_build_mapc_cmd(struct its_cmd_block *cmd,
 static struct its_collection *its_build_mapvi_cmd(struct its_cmd_block *cmd,
                                                  struct its_cmd_desc *desc)
 {
+       struct its_collection *col;
+
+       col = dev_event_to_col(desc->its_mapvi_cmd.dev,
+                              desc->its_mapvi_cmd.event_id);
+
        its_encode_cmd(cmd, GITS_CMD_MAPVI);
        its_encode_devid(cmd, desc->its_mapvi_cmd.dev->device_id);
        its_encode_event_id(cmd, desc->its_mapvi_cmd.event_id);
        its_encode_phys_id(cmd, desc->its_mapvi_cmd.phys_id);
-       its_encode_collection(cmd, desc->its_mapvi_cmd.dev->collection->col_id);
+       its_encode_collection(cmd, col->col_id);
 
        its_fixup_cmd(cmd);
 
-       return desc->its_mapvi_cmd.dev->collection;
+       return col;
 }
 
 static struct its_collection *its_build_movi_cmd(struct its_cmd_block *cmd,
                                                 struct its_cmd_desc *desc)
 {
+       struct its_collection *col;
+
+       col = dev_event_to_col(desc->its_movi_cmd.dev,
+                              desc->its_movi_cmd.event_id);
+
        its_encode_cmd(cmd, GITS_CMD_MOVI);
        its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id);
-       its_encode_event_id(cmd, desc->its_movi_cmd.id);
+       its_encode_event_id(cmd, desc->its_movi_cmd.event_id);
        its_encode_collection(cmd, desc->its_movi_cmd.col->col_id);
 
        its_fixup_cmd(cmd);
 
-       return desc->its_movi_cmd.dev->collection;
+       return col;
 }
 
 static struct its_collection *its_build_discard_cmd(struct its_cmd_block *cmd,
                                                    struct its_cmd_desc *desc)
 {
+       struct its_collection *col;
+
+       col = dev_event_to_col(desc->its_discard_cmd.dev,
+                              desc->its_discard_cmd.event_id);
+
        its_encode_cmd(cmd, GITS_CMD_DISCARD);
        its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id);
        its_encode_event_id(cmd, desc->its_discard_cmd.event_id);
 
        its_fixup_cmd(cmd);
 
-       return desc->its_discard_cmd.dev->collection;
+       return col;
 }
 
 static struct its_collection *its_build_inv_cmd(struct its_cmd_block *cmd,
                                                struct its_cmd_desc *desc)
 {
+       struct its_collection *col;
+
+       col = dev_event_to_col(desc->its_inv_cmd.dev,
+                              desc->its_inv_cmd.event_id);
+
        its_encode_cmd(cmd, GITS_CMD_INV);
        its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
        its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
 
        its_fixup_cmd(cmd);
 
-       return desc->its_inv_cmd.dev->collection;
+       return col;
 }
 
 static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd,
@@ -497,7 +529,7 @@ static void its_send_movi(struct its_device *dev,
 
        desc.its_movi_cmd.dev = dev;
        desc.its_movi_cmd.col = col;
-       desc.its_movi_cmd.id = id;
+       desc.its_movi_cmd.event_id = id;
 
        its_send_single_command(dev->its, its_build_movi_cmd, &desc);
 }
@@ -528,7 +560,7 @@ static void its_send_invall(struct its_node *its, struct its_collection *col)
 static inline u32 its_get_event_id(struct irq_data *d)
 {
        struct its_device *its_dev = irq_data_get_irq_chip_data(d);
-       return d->hwirq - its_dev->lpi_base;
+       return d->hwirq - its_dev->event_map.lpi_base;
 }
 
 static void lpi_set_config(struct irq_data *d, bool enable)
@@ -583,7 +615,7 @@ static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
 
        target_col = &its_dev->its->collections[cpu];
        its_send_movi(its_dev, target_col, id);
-       its_dev->collection = target_col;
+       its_dev->event_map.col_map[id] = cpu;
 
        return IRQ_SET_MASK_OK_DONE;
 }
@@ -713,8 +745,10 @@ out:
        return bitmap;
 }
 
-static void its_lpi_free(unsigned long *bitmap, int base, int nr_ids)
+static void its_lpi_free(struct event_lpi_map *map)
 {
+       int base = map->lpi_base;
+       int nr_ids = map->nr_lpis;
        int lpi;
 
        spin_lock(&lpi_lock);
@@ -731,7 +765,8 @@ static void its_lpi_free(unsigned long *bitmap, int base, int nr_ids)
 
        spin_unlock(&lpi_lock);
 
-       kfree(bitmap);
+       kfree(map->lpi_map);
+       kfree(map->col_map);
 }
 
 /*
@@ -1099,11 +1134,11 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
        struct its_device *dev;
        unsigned long *lpi_map;
        unsigned long flags;
+       u16 *col_map = NULL;
        void *itt;
        int lpi_base;
        int nr_lpis;
        int nr_ites;
-       int cpu;
        int sz;
 
        dev = kzalloc(sizeof(*dev), GFP_KERNEL);
@@ -1117,20 +1152,24 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
        sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
        itt = kzalloc(sz, GFP_KERNEL);
        lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis);
+       if (lpi_map)
+               col_map = kzalloc(sizeof(*col_map) * nr_lpis, GFP_KERNEL);
 
-       if (!dev || !itt || !lpi_map) {
+       if (!dev || !itt || !lpi_map || !col_map) {
                kfree(dev);
                kfree(itt);
                kfree(lpi_map);
+               kfree(col_map);
                return NULL;
        }
 
        dev->its = its;
        dev->itt = itt;
        dev->nr_ites = nr_ites;
-       dev->lpi_map = lpi_map;
-       dev->lpi_base = lpi_base;
-       dev->nr_lpis = nr_lpis;
+       dev->event_map.lpi_map = lpi_map;
+       dev->event_map.col_map = col_map;
+       dev->event_map.lpi_base = lpi_base;
+       dev->event_map.nr_lpis = nr_lpis;
        dev->device_id = dev_id;
        INIT_LIST_HEAD(&dev->entry);
 
@@ -1138,10 +1177,6 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
        list_add(&dev->entry, &its->its_device_list);
        raw_spin_unlock_irqrestore(&its->lock, flags);
 
-       /* Bind the device to the first possible CPU */
-       cpu = cpumask_first(cpu_online_mask);
-       dev->collection = &its->collections[cpu];
-
        /* Map device to its ITT */
        its_send_mapd(dev, 1);
 
@@ -1163,12 +1198,13 @@ static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq)
 {
        int idx;
 
-       idx = find_first_zero_bit(dev->lpi_map, dev->nr_lpis);
-       if (idx == dev->nr_lpis)
+       idx = find_first_zero_bit(dev->event_map.lpi_map,
+                                 dev->event_map.nr_lpis);
+       if (idx == dev->event_map.nr_lpis)
                return -ENOSPC;
 
-       *hwirq = dev->lpi_base + idx;
-       set_bit(idx, dev->lpi_map);
+       *hwirq = dev->event_map.lpi_base + idx;
+       set_bit(idx, dev->event_map.lpi_map);
 
        return 0;
 }
@@ -1288,7 +1324,8 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
                irq_domain_set_hwirq_and_chip(domain, virq + i,
                                              hwirq, &its_irq_chip, its_dev);
                dev_dbg(info->scratchpad[1].ptr, "ID:%d pID:%d vID:%d\n",
-                       (int)(hwirq - its_dev->lpi_base), (int)hwirq, virq + i);
+                       (int)(hwirq - its_dev->event_map.lpi_base),
+                       (int)hwirq, virq + i);
        }
 
        return 0;
@@ -1300,6 +1337,9 @@ static void its_irq_domain_activate(struct irq_domain *domain,
        struct its_device *its_dev = irq_data_get_irq_chip_data(d);
        u32 event = its_get_event_id(d);
 
+       /* Bind the LPI to the first possible CPU */
+       its_dev->event_map.col_map[event] = cpumask_first(cpu_online_mask);
+
        /* Map the GIC IRQ and event to the device */
        its_send_mapvi(its_dev, d->hwirq, event);
 }
@@ -1327,17 +1367,16 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
                u32 event = its_get_event_id(data);
 
                /* Mark interrupt index as unused */
-               clear_bit(event, its_dev->lpi_map);
+               clear_bit(event, its_dev->event_map.lpi_map);
 
                /* Nuke the entry in the domain */
                irq_domain_reset_irq_data(data);
        }
 
        /* If all interrupts have been freed, start mopping the floor */
-       if (bitmap_empty(its_dev->lpi_map, its_dev->nr_lpis)) {
-               its_lpi_free(its_dev->lpi_map,
-                            its_dev->lpi_base,
-                            its_dev->nr_lpis);
+       if (bitmap_empty(its_dev->event_map.lpi_map,
+                        its_dev->event_map.nr_lpis)) {
+               its_lpi_free(&its_dev->event_map);
 
                /* Unmap device/itt */
                its_send_mapd(its_dev, 0);
index 135a090..c90118e 100644 (file)
@@ -494,7 +494,7 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap)
        bitmap_super_t *sb;
        unsigned long chunksize, daemon_sleep, write_behind;
 
-       bitmap->storage.sb_page = alloc_page(GFP_KERNEL);
+       bitmap->storage.sb_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
        if (bitmap->storage.sb_page == NULL)
                return -ENOMEM;
        bitmap->storage.sb_page->index = 0;
@@ -541,6 +541,7 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap)
        sb->state = cpu_to_le32(bitmap->flags);
        bitmap->events_cleared = bitmap->mddev->events;
        sb->events_cleared = cpu_to_le64(bitmap->mddev->events);
+       bitmap->mddev->bitmap_info.nodes = 0;
 
        kunmap_atomic(sb);
 
@@ -611,8 +612,16 @@ re_read:
        daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ;
        write_behind = le32_to_cpu(sb->write_behind);
        sectors_reserved = le32_to_cpu(sb->sectors_reserved);
-       nodes = le32_to_cpu(sb->nodes);
-       strlcpy(bitmap->mddev->bitmap_info.cluster_name, sb->cluster_name, 64);
+       /* XXX: This is a hack to ensure that we don't use clustering
+        *  in case:
+        *      - dm-raid is in use and
+        *      - the nodes written in bitmap_sb is erroneous.
+        */
+       if (!bitmap->mddev->sync_super) {
+               nodes = le32_to_cpu(sb->nodes);
+               strlcpy(bitmap->mddev->bitmap_info.cluster_name,
+                               sb->cluster_name, 64);
+       }
 
        /* verify that the bitmap-specific fields are valid */
        if (sb->magic != cpu_to_le32(BITMAP_MAGIC))
index b04d1f9..004e463 100644 (file)
@@ -171,7 +171,8 @@ static void remove_cache_hash_entry(struct wb_cache_entry *e)
 /* Public interface (see dm-cache-policy.h */
 static int wb_map(struct dm_cache_policy *pe, dm_oblock_t oblock,
                  bool can_block, bool can_migrate, bool discarded_oblock,
-                 struct bio *bio, struct policy_result *result)
+                 struct bio *bio, struct policy_locker *locker,
+                 struct policy_result *result)
 {
        struct policy *p = to_policy(pe);
        struct wb_cache_entry *e;
index 2256a1f..c198e6d 100644 (file)
  */
 static inline int policy_map(struct dm_cache_policy *p, dm_oblock_t oblock,
                             bool can_block, bool can_migrate, bool discarded_oblock,
-                            struct bio *bio, struct policy_result *result)
+                            struct bio *bio, struct policy_locker *locker,
+                            struct policy_result *result)
 {
-       return p->map(p, oblock, can_block, can_migrate, discarded_oblock, bio, result);
+       return p->map(p, oblock, can_block, can_migrate, discarded_oblock, bio, locker, result);
 }
 
 static inline int policy_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock)
index 3ddd116..515d44b 100644 (file)
@@ -693,9 +693,10 @@ static void requeue(struct mq_policy *mq, struct entry *e)
  * - set the hit count to a hard coded value other than 1, eg, is it better
  *   if it goes in at level 2?
  */
-static int demote_cblock(struct mq_policy *mq, dm_oblock_t *oblock)
+static int demote_cblock(struct mq_policy *mq,
+                        struct policy_locker *locker, dm_oblock_t *oblock)
 {
-       struct entry *demoted = pop(mq, &mq->cache_clean);
+       struct entry *demoted = peek(&mq->cache_clean);
 
        if (!demoted)
                /*
@@ -707,6 +708,13 @@ static int demote_cblock(struct mq_policy *mq, dm_oblock_t *oblock)
                 */
                return -ENOSPC;
 
+       if (locker->fn(locker, demoted->oblock))
+               /*
+                * We couldn't lock the demoted block.
+                */
+               return -EBUSY;
+
+       del(mq, demoted);
        *oblock = demoted->oblock;
        free_entry(&mq->cache_pool, demoted);
 
@@ -795,6 +803,7 @@ static int cache_entry_found(struct mq_policy *mq,
  * finding which cache block to use.
  */
 static int pre_cache_to_cache(struct mq_policy *mq, struct entry *e,
+                             struct policy_locker *locker,
                              struct policy_result *result)
 {
        int r;
@@ -803,11 +812,12 @@ static int pre_cache_to_cache(struct mq_policy *mq, struct entry *e,
        /* Ensure there's a free cblock in the cache */
        if (epool_empty(&mq->cache_pool)) {
                result->op = POLICY_REPLACE;
-               r = demote_cblock(mq, &result->old_oblock);
+               r = demote_cblock(mq, locker, &result->old_oblock);
                if (r) {
                        result->op = POLICY_MISS;
                        return 0;
                }
+
        } else
                result->op = POLICY_NEW;
 
@@ -829,7 +839,8 @@ static int pre_cache_to_cache(struct mq_policy *mq, struct entry *e,
 
 static int pre_cache_entry_found(struct mq_policy *mq, struct entry *e,
                                 bool can_migrate, bool discarded_oblock,
-                                int data_dir, struct policy_result *result)
+                                int data_dir, struct policy_locker *locker,
+                                struct policy_result *result)
 {
        int r = 0;
 
@@ -842,7 +853,7 @@ static int pre_cache_entry_found(struct mq_policy *mq, struct entry *e,
 
        else {
                requeue(mq, e);
-               r = pre_cache_to_cache(mq, e, result);
+               r = pre_cache_to_cache(mq, e, locker, result);
        }
 
        return r;
@@ -872,6 +883,7 @@ static void insert_in_pre_cache(struct mq_policy *mq,
 }
 
 static void insert_in_cache(struct mq_policy *mq, dm_oblock_t oblock,
+                           struct policy_locker *locker,
                            struct policy_result *result)
 {
        int r;
@@ -879,7 +891,7 @@ static void insert_in_cache(struct mq_policy *mq, dm_oblock_t oblock,
 
        if (epool_empty(&mq->cache_pool)) {
                result->op = POLICY_REPLACE;
-               r = demote_cblock(mq, &result->old_oblock);
+               r = demote_cblock(mq, locker, &result->old_oblock);
                if (unlikely(r)) {
                        result->op = POLICY_MISS;
                        insert_in_pre_cache(mq, oblock);
@@ -907,11 +919,12 @@ static void insert_in_cache(struct mq_policy *mq, dm_oblock_t oblock,
 
 static int no_entry_found(struct mq_policy *mq, dm_oblock_t oblock,
                          bool can_migrate, bool discarded_oblock,
-                         int data_dir, struct policy_result *result)
+                         int data_dir, struct policy_locker *locker,
+                         struct policy_result *result)
 {
        if (adjusted_promote_threshold(mq, discarded_oblock, data_dir) <= 1) {
                if (can_migrate)
-                       insert_in_cache(mq, oblock, result);
+                       insert_in_cache(mq, oblock, locker, result);
                else
                        return -EWOULDBLOCK;
        } else {
@@ -928,7 +941,8 @@ static int no_entry_found(struct mq_policy *mq, dm_oblock_t oblock,
  */
 static int map(struct mq_policy *mq, dm_oblock_t oblock,
               bool can_migrate, bool discarded_oblock,
-              int data_dir, struct policy_result *result)
+              int data_dir, struct policy_locker *locker,
+              struct policy_result *result)
 {
        int r = 0;
        struct entry *e = hash_lookup(mq, oblock);
@@ -942,11 +956,11 @@ static int map(struct mq_policy *mq, dm_oblock_t oblock,
 
        else if (e)
                r = pre_cache_entry_found(mq, e, can_migrate, discarded_oblock,
-                                         data_dir, result);
+                                         data_dir, locker, result);
 
        else
                r = no_entry_found(mq, oblock, can_migrate, discarded_oblock,
-                                  data_dir, result);
+                                  data_dir, locker, result);
 
        if (r == -EWOULDBLOCK)
                result->op = POLICY_MISS;
@@ -1012,7 +1026,8 @@ static void copy_tick(struct mq_policy *mq)
 
 static int mq_map(struct dm_cache_policy *p, dm_oblock_t oblock,
                  bool can_block, bool can_migrate, bool discarded_oblock,
-                 struct bio *bio, struct policy_result *result)
+                 struct bio *bio, struct policy_locker *locker,
+                 struct policy_result *result)
 {
        int r;
        struct mq_policy *mq = to_mq_policy(p);
@@ -1028,7 +1043,7 @@ static int mq_map(struct dm_cache_policy *p, dm_oblock_t oblock,
 
        iot_examine_bio(&mq->tracker, bio);
        r = map(mq, oblock, can_migrate, discarded_oblock,
-               bio_data_dir(bio), result);
+               bio_data_dir(bio), locker, result);
 
        mutex_unlock(&mq->lock);
 
index f50fe36..5524e21 100644 (file)
@@ -69,6 +69,18 @@ enum policy_operation {
        POLICY_REPLACE
 };
 
+/*
+ * When issuing a POLICY_REPLACE the policy needs to make a callback to
+ * lock the block being demoted.  This doesn't need to occur during a
+ * writeback operation since the block remains in the cache.
+ */
+struct policy_locker;
+typedef int (*policy_lock_fn)(struct policy_locker *l, dm_oblock_t oblock);
+
+struct policy_locker {
+       policy_lock_fn fn;
+};
+
 /*
  * This is the instruction passed back to the core target.
  */
@@ -122,7 +134,8 @@ struct dm_cache_policy {
         */
        int (*map)(struct dm_cache_policy *p, dm_oblock_t oblock,
                   bool can_block, bool can_migrate, bool discarded_oblock,
-                  struct bio *bio, struct policy_result *result);
+                  struct bio *bio, struct policy_locker *locker,
+                  struct policy_result *result);
 
        /*
         * Sometimes we want to see if a block is in the cache, without
index 7755af3..e049bec 100644 (file)
@@ -1445,16 +1445,43 @@ static void inc_miss_counter(struct cache *cache, struct bio *bio)
                   &cache->stats.read_miss : &cache->stats.write_miss);
 }
 
+/*----------------------------------------------------------------*/
+
+struct old_oblock_lock {
+       struct policy_locker locker;
+       struct cache *cache;
+       struct prealloc *structs;
+       struct dm_bio_prison_cell *cell;
+};
+
+static int null_locker(struct policy_locker *locker, dm_oblock_t b)
+{
+       /* This should never be called */
+       BUG();
+       return 0;
+}
+
+static int cell_locker(struct policy_locker *locker, dm_oblock_t b)
+{
+       struct old_oblock_lock *l = container_of(locker, struct old_oblock_lock, locker);
+       struct dm_bio_prison_cell *cell_prealloc = prealloc_get_cell(l->structs);
+
+       return bio_detain(l->cache, b, NULL, cell_prealloc,
+                         (cell_free_fn) prealloc_put_cell,
+                         l->structs, &l->cell);
+}
+
 static void process_bio(struct cache *cache, struct prealloc *structs,
                        struct bio *bio)
 {
        int r;
        bool release_cell = true;
        dm_oblock_t block = get_bio_block(cache, bio);
-       struct dm_bio_prison_cell *cell_prealloc, *old_ocell, *new_ocell;
+       struct dm_bio_prison_cell *cell_prealloc, *new_ocell;
        struct policy_result lookup_result;
        bool passthrough = passthrough_mode(&cache->features);
        bool discarded_block, can_migrate;
+       struct old_oblock_lock ool;
 
        /*
         * Check to see if that block is currently migrating.
@@ -1469,8 +1496,12 @@ static void process_bio(struct cache *cache, struct prealloc *structs,
        discarded_block = is_discarded_oblock(cache, block);
        can_migrate = !passthrough && (discarded_block || spare_migration_bandwidth(cache));
 
+       ool.locker.fn = cell_locker;
+       ool.cache = cache;
+       ool.structs = structs;
+       ool.cell = NULL;
        r = policy_map(cache->policy, block, true, can_migrate, discarded_block,
-                      bio, &lookup_result);
+                      bio, &ool.locker, &lookup_result);
 
        if (r == -EWOULDBLOCK)
                /* migration has been denied */
@@ -1527,27 +1558,11 @@ static void process_bio(struct cache *cache, struct prealloc *structs,
                break;
 
        case POLICY_REPLACE:
-               cell_prealloc = prealloc_get_cell(structs);
-               r = bio_detain(cache, lookup_result.old_oblock, bio, cell_prealloc,
-                              (cell_free_fn) prealloc_put_cell,
-                              structs, &old_ocell);
-               if (r > 0) {
-                       /*
-                        * We have to be careful to avoid lock inversion of
-                        * the cells.  So we back off, and wait for the
-                        * old_ocell to become free.
-                        */
-                       policy_force_mapping(cache->policy, block,
-                                            lookup_result.old_oblock);
-                       atomic_inc(&cache->stats.cache_cell_clash);
-                       break;
-               }
                atomic_inc(&cache->stats.demotion);
                atomic_inc(&cache->stats.promotion);
-
                demote_then_promote(cache, structs, lookup_result.old_oblock,
                                    block, lookup_result.cblock,
-                                   old_ocell, new_ocell);
+                                   ool.cell, new_ocell);
                release_cell = false;
                break;
 
@@ -2595,6 +2610,9 @@ static int __cache_map(struct cache *cache, struct bio *bio, struct dm_bio_priso
        bool discarded_block;
        struct policy_result lookup_result;
        struct per_bio_data *pb = init_per_bio_data(bio, pb_data_size);
+       struct old_oblock_lock ool;
+
+       ool.locker.fn = null_locker;
 
        if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) {
                /*
@@ -2633,7 +2651,7 @@ static int __cache_map(struct cache *cache, struct bio *bio, struct dm_bio_priso
        discarded_block = is_discarded_oblock(cache, block);
 
        r = policy_map(cache->policy, block, false, can_migrate, discarded_block,
-                      bio, &lookup_result);
+                      bio, &ool.locker, &lookup_result);
        if (r == -EWOULDBLOCK) {
                cell_defer(cache, *cell, true);
                return DM_MAPIO_SUBMITTED;
index f478a4c..419bdd4 100644 (file)
@@ -795,6 +795,8 @@ static int message_stats_create(struct mapped_device *md,
                return -EINVAL;
 
        if (sscanf(argv[2], "/%u%c", &divisor, &dummy) == 1) {
+               if (!divisor)
+                       return -EINVAL;
                step = end - start;
                if (do_div(step, divisor))
                        step++;
index 79f6941..cde1d67 100644 (file)
@@ -1295,8 +1295,8 @@ static int __release_metadata_snap(struct dm_pool_metadata *pmd)
                return r;
 
        disk_super = dm_block_data(copy);
-       dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->data_mapping_root));
-       dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->device_details_root));
+       dm_btree_del(&pmd->info, le64_to_cpu(disk_super->data_mapping_root));
+       dm_btree_del(&pmd->details_info, le64_to_cpu(disk_super->device_details_root));
        dm_sm_dec_block(pmd->metadata_sm, held_root);
 
        return dm_tm_unlock(pmd->tm, copy);
index 921aafd..e22e6c8 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/slab.h>
+#include <linux/vmalloc.h>
 #include <linux/sort.h>
 #include <linux/rbtree.h>
 
@@ -260,7 +261,7 @@ struct pool {
        process_mapping_fn process_prepared_mapping;
        process_mapping_fn process_prepared_discard;
 
-       struct dm_bio_prison_cell *cell_sort_array[CELL_SORT_ARRAY_SIZE];
+       struct dm_bio_prison_cell **cell_sort_array;
 };
 
 static enum pool_mode get_pool_mode(struct pool *pool);
@@ -2499,6 +2500,7 @@ static void __pool_destroy(struct pool *pool)
 {
        __pool_table_remove(pool);
 
+       vfree(pool->cell_sort_array);
        if (dm_pool_metadata_close(pool->pmd) < 0)
                DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
 
@@ -2611,6 +2613,13 @@ static struct pool *pool_create(struct mapped_device *pool_md,
                goto bad_mapping_pool;
        }
 
+       pool->cell_sort_array = vmalloc(sizeof(*pool->cell_sort_array) * CELL_SORT_ARRAY_SIZE);
+       if (!pool->cell_sort_array) {
+               *error = "Error allocating cell sort array";
+               err_p = ERR_PTR(-ENOMEM);
+               goto bad_sort_array;
+       }
+
        pool->ref_count = 1;
        pool->last_commit_jiffies = jiffies;
        pool->pool_md = pool_md;
@@ -2619,6 +2628,8 @@ static struct pool *pool_create(struct mapped_device *pool_md,
 
        return pool;
 
+bad_sort_array:
+       mempool_destroy(pool->mapping_pool);
 bad_mapping_pool:
        dm_deferred_set_destroy(pool->all_io_ds);
 bad_all_io_ds:
index 9275239..98347e2 100644 (file)
@@ -1053,13 +1053,10 @@ static struct dm_rq_target_io *tio_from_request(struct request *rq)
  */
 static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
 {
-       int nr_requests_pending;
-
        atomic_dec(&md->pending[rw]);
 
        /* nudge anyone waiting on suspend queue */
-       nr_requests_pending = md_in_flight(md);
-       if (!nr_requests_pending)
+       if (!md_in_flight(md))
                wake_up(&md->wait);
 
        /*
@@ -1071,8 +1068,7 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
        if (run_queue) {
                if (md->queue->mq_ops)
                        blk_mq_run_hw_queues(md->queue, true);
-               else if (!nr_requests_pending ||
-                        (nr_requests_pending >= md->queue->nr_congestion_on))
+               else
                        blk_run_queue_async(md->queue);
        }
 
@@ -1723,7 +1719,8 @@ static int dm_merge_bvec(struct request_queue *q,
        struct mapped_device *md = q->queuedata;
        struct dm_table *map = dm_get_live_table_fast(md);
        struct dm_target *ti;
-       sector_t max_sectors, max_size = 0;
+       sector_t max_sectors;
+       int max_size = 0;
 
        if (unlikely(!map))
                goto out;
@@ -1736,18 +1733,10 @@ static int dm_merge_bvec(struct request_queue *q,
         * Find maximum amount of I/O that won't need splitting
         */
        max_sectors = min(max_io_len(bvm->bi_sector, ti),
-                         (sector_t) queue_max_sectors(q));
+                         (sector_t) BIO_MAX_SECTORS);
        max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
-
-       /*
-        * FIXME: this stop-gap fix _must_ be cleaned up (by passing a sector_t
-        * to the targets' merge function since it holds sectors not bytes).
-        * Just doing this as an interim fix for stable@ because the more
-        * comprehensive cleanup of switching to sector_t will impact every
-        * DM target that implements a ->merge hook.
-        */
-       if (max_size > INT_MAX)
-               max_size = INT_MAX;
+       if (max_size < 0)
+               max_size = 0;
 
        /*
         * merge_bvec_fn() returns number of bytes
@@ -1755,13 +1744,13 @@ static int dm_merge_bvec(struct request_queue *q,
         * max is precomputed maximal io size
         */
        if (max_size && ti->type->merge)
-               max_size = ti->type->merge(ti, bvm, biovec, (int) max_size);
+               max_size = ti->type->merge(ti, bvm, biovec, max_size);
        /*
         * If the target doesn't support merge method and some of the devices
-        * provided their merge_bvec method (we know this by looking for the
-        * max_hw_sectors that dm_set_device_limits may set), then we can't
-        * allow bios with multiple vector entries.  So always set max_size
-        * to 0, and the code below allows just one page.
+        * provided their merge_bvec method (we know this by looking at
+        * queue_max_hw_sectors), then we can't allow bios with multiple vector
+        * entries.  So always set max_size to 0, and the code below allows
+        * just one page.
         */
        else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9)
                max_size = 0;
index 4dbed4a..e462151 100644 (file)
@@ -4005,8 +4005,10 @@ new_dev_store(struct mddev *mddev, const char *buf, size_t len)
        else
                rdev = md_import_device(dev, -1, -1);
 
-       if (IS_ERR(rdev))
+       if (IS_ERR(rdev)) {
+               mddev_unlock(mddev);
                return PTR_ERR(rdev);
+       }
        err = bind_rdev_to_array(rdev, mddev);
  out:
        if (err)
@@ -5159,6 +5161,7 @@ int md_run(struct mddev *mddev)
                mddev_detach(mddev);
                if (mddev->private)
                        pers->free(mddev, mddev->private);
+               mddev->private = NULL;
                module_put(pers->owner);
                bitmap_destroy(mddev);
                return err;
@@ -5294,6 +5297,7 @@ static void md_clean(struct mddev *mddev)
        mddev->changed = 0;
        mddev->degraded = 0;
        mddev->safemode = 0;
+       mddev->private = NULL;
        mddev->merge_check_needed = 0;
        mddev->bitmap_info.offset = 0;
        mddev->bitmap_info.default_offset = 0;
@@ -5366,6 +5370,7 @@ static void __md_stop(struct mddev *mddev)
        mddev->pers = NULL;
        spin_unlock(&mddev->lock);
        pers->free(mddev, mddev->private);
+       mddev->private = NULL;
        if (pers->sync_request && mddev->to_remove == NULL)
                mddev->to_remove = &md_redundancy_group;
        module_put(pers->owner);
@@ -5735,7 +5740,7 @@ static int get_bitmap_file(struct mddev *mddev, void __user * arg)
        char *ptr;
        int err;
 
-       file = kmalloc(sizeof(*file), GFP_NOIO);
+       file = kzalloc(sizeof(*file), GFP_NOIO);
        if (!file)
                return -ENOMEM;
 
@@ -6375,7 +6380,7 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
            mddev->ctime         != info->ctime         ||
            mddev->level         != info->level         ||
 /*         mddev->layout        != info->layout        || */
-           !mddev->persistent   != info->not_persistent||
+           mddev->persistent    != !info->not_persistent ||
            mddev->chunk_sectors != info->chunk_size >> 9 ||
            /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
            ((state^info->state) & 0xfffffe00)
index b88757c..a03178e 100644 (file)
@@ -309,8 +309,8 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
 
                if (s < 0 && nr_center < -s) {
                        /* not enough in central node */
-                       shift(left, center, nr_center);
-                       s = nr_center - target;
+                       shift(left, center, -nr_center);
+                       s += nr_center;
                        shift(left, right, s);
                        nr_right += s;
                } else
@@ -323,7 +323,7 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
                if (s > 0 && nr_center < s) {
                        /* not enough in central node */
                        shift(center, right, nr_center);
-                       s = target - nr_center;
+                       s -= nr_center;
                        shift(left, right, s);
                        nr_left -= s;
                } else
index 200ac12..fdd3793 100644 (file)
@@ -255,7 +255,7 @@ int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
        int r;
        struct del_stack *s;
 
-       s = kmalloc(sizeof(*s), GFP_KERNEL);
+       s = kmalloc(sizeof(*s), GFP_NOIO);
        if (!s)
                return -ENOMEM;
        s->info = info;
index e8a9042..5309129 100644 (file)
@@ -204,6 +204,27 @@ static void in(struct sm_metadata *smm)
        smm->recursion_count++;
 }
 
+static int apply_bops(struct sm_metadata *smm)
+{
+       int r = 0;
+
+       while (!brb_empty(&smm->uncommitted)) {
+               struct block_op bop;
+
+               r = brb_pop(&smm->uncommitted, &bop);
+               if (r) {
+                       DMERR("bug in bop ring buffer");
+                       break;
+               }
+
+               r = commit_bop(smm, &bop);
+               if (r)
+                       break;
+       }
+
+       return r;
+}
+
 static int out(struct sm_metadata *smm)
 {
        int r = 0;
@@ -216,21 +237,8 @@ static int out(struct sm_metadata *smm)
                return -ENOMEM;
        }
 
-       if (smm->recursion_count == 1) {
-               while (!brb_empty(&smm->uncommitted)) {
-                       struct block_op bop;
-
-                       r = brb_pop(&smm->uncommitted, &bop);
-                       if (r) {
-                               DMERR("bug in bop ring buffer");
-                               break;
-                       }
-
-                       r = commit_bop(smm, &bop);
-                       if (r)
-                               break;
-               }
-       }
+       if (smm->recursion_count == 1)
+               apply_bops(smm);
 
        smm->recursion_count--;
 
@@ -704,6 +712,12 @@ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
                }
                old_len = smm->begin;
 
+               r = apply_bops(smm);
+               if (r) {
+                       DMERR("%s: apply_bops failed", __func__);
+                       goto out;
+               }
+
                r = sm_ll_commit(&smm->ll);
                if (r)
                        goto out;
@@ -773,6 +787,12 @@ int dm_sm_metadata_create(struct dm_space_map *sm,
        if (r)
                return r;
 
+       r = apply_bops(smm);
+       if (r) {
+               DMERR("%s: apply_bops failed", __func__);
+               return r;
+       }
+
        return sm_metadata_commit(sm);
 }
 
index 9157a29..5ce3cd5 100644 (file)
@@ -336,7 +336,7 @@ static void raid1_end_read_request(struct bio *bio, int error)
                spin_lock_irqsave(&conf->device_lock, flags);
                if (r1_bio->mddev->degraded == conf->raid_disks ||
                    (r1_bio->mddev->degraded == conf->raid_disks-1 &&
-                    !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags)))
+                    test_bit(In_sync, &conf->mirrors[mirror].rdev->flags)))
                        uptodate = 1;
                spin_unlock_irqrestore(&conf->device_lock, flags);
        }
@@ -1475,6 +1475,7 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
 {
        char b[BDEVNAME_SIZE];
        struct r1conf *conf = mddev->private;
+       unsigned long flags;
 
        /*
         * If it is not operational, then we have already marked it as dead
@@ -1494,14 +1495,13 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
                return;
        }
        set_bit(Blocked, &rdev->flags);
+       spin_lock_irqsave(&conf->device_lock, flags);
        if (test_and_clear_bit(In_sync, &rdev->flags)) {
-               unsigned long flags;
-               spin_lock_irqsave(&conf->device_lock, flags);
                mddev->degraded++;
                set_bit(Faulty, &rdev->flags);
-               spin_unlock_irqrestore(&conf->device_lock, flags);
        } else
                set_bit(Faulty, &rdev->flags);
+       spin_unlock_irqrestore(&conf->device_lock, flags);
        /*
         * if recovery is running, make sure it aborts.
         */
@@ -1567,7 +1567,10 @@ static int raid1_spare_active(struct mddev *mddev)
         * Find all failed disks within the RAID1 configuration
         * and mark them readable.
         * Called under mddev lock, so rcu protection not needed.
+        * device_lock used to avoid races with raid1_end_read_request
+        * which expects 'In_sync' flags and ->degraded to be consistent.
         */
+       spin_lock_irqsave(&conf->device_lock, flags);
        for (i = 0; i < conf->raid_disks; i++) {
                struct md_rdev *rdev = conf->mirrors[i].rdev;
                struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
@@ -1598,7 +1601,6 @@ static int raid1_spare_active(struct mddev *mddev)
                        sysfs_notify_dirent_safe(rdev->sysfs_state);
                }
        }
-       spin_lock_irqsave(&conf->device_lock, flags);
        mddev->degraded -= count;
        spin_unlock_irqrestore(&conf->device_lock, flags);
 
index 8001690..ba6c8f6 100644 (file)
@@ -605,6 +605,10 @@ static int af9013_set_frontend(struct dvb_frontend *fe)
                        }
                }
 
+               /* Return an error if can't find bandwidth or the right clock */
+               if (i == ARRAY_SIZE(coeff_lut))
+                       return -EINVAL;
+
                ret = af9013_wr_regs(state, 0xae00, coeff_lut[i].val,
                        sizeof(coeff_lut[i].val));
        }
index 2916d7c..7bc68b3 100644 (file)
@@ -963,6 +963,10 @@ static int cx24116_send_diseqc_msg(struct dvb_frontend *fe,
        struct cx24116_state *state = fe->demodulator_priv;
        int i, ret;
 
+       /* Validate length */
+       if (d->msg_len > sizeof(d->msg))
+                return -EINVAL;
+
        /* Dump DiSEqC message */
        if (debug) {
                printk(KERN_INFO "cx24116: %s(", __func__);
@@ -974,10 +978,6 @@ static int cx24116_send_diseqc_msg(struct dvb_frontend *fe,
                printk(") toneburst=%d\n", toneburst);
        }
 
-       /* Validate length */
-       if (d->msg_len > (CX24116_ARGLEN - CX24116_DISEQC_MSGOFS))
-               return -EINVAL;
-
        /* DiSEqC message */
        for (i = 0; i < d->msg_len; i++)
                state->dsec_cmd.args[CX24116_DISEQC_MSGOFS + i] = d->msg[i];
index acb965c..af63635 100644 (file)
@@ -1043,7 +1043,7 @@ static int cx24117_send_diseqc_msg(struct dvb_frontend *fe,
        dev_dbg(&state->priv->i2c->dev, ")\n");
 
        /* Validate length */
-       if (d->msg_len > 15)
+       if (d->msg_len > sizeof(d->msg))
                return -EINVAL;
 
        /* DiSEqC message */
index 93eeaf7..0b4f8fe 100644 (file)
@@ -180,7 +180,7 @@ static int s5h1420_send_master_cmd (struct dvb_frontend* fe,
        int result = 0;
 
        dprintk("enter %s\n", __func__);
-       if (cmd->msg_len > 8)
+       if (cmd->msg_len > sizeof(cmd->msg))
                return -EINVAL;
 
        /* setup for DISEQC */
index c82d25d..c986084 100644 (file)
@@ -90,6 +90,7 @@ static struct {
                "encoder PCM audio",
                VFL_TYPE_GRABBER, CX18_V4L2_ENC_PCM_OFFSET,
                PCI_DMA_FROMDEVICE,
+               V4L2_CAP_TUNER | V4L2_CAP_AUDIO | V4L2_CAP_READWRITE,
        },
        {       /* CX18_ENC_STREAM_TYPE_IDX */
                "encoder IDX",
index 9266965..7a0a651 100644 (file)
@@ -721,13 +721,14 @@ static int vidioc_querycap(struct file *file, void  *priv,
                sizeof(cap->card));
        sprintf(cap->bus_info, "PCI:%s", pci_name(dev->pci));
 
-       cap->capabilities =
+       cap->device_caps =
                V4L2_CAP_VIDEO_CAPTURE |
-               V4L2_CAP_READWRITE     |
-               0;
+               V4L2_CAP_READWRITE |
+               V4L2_CAP_TUNER;
 
-       cap->capabilities |= V4L2_CAP_TUNER;
-       cap->version = 0;
+       cap->capabilities = cap->device_caps |
+               V4L2_CAP_VBI_CAPTURE |
+               V4L2_CAP_DEVICE_CAPS;
 
        return 0;
 }
index 6e025fe..06117e6 100644 (file)
@@ -660,13 +660,14 @@ static int vidioc_querycap(struct file *file, void  *priv,
                sizeof(cap->card));
        sprintf(cap->bus_info, "PCI:%s", pci_name(dev->pci));
 
-       cap->capabilities =
+       cap->device_caps =
                V4L2_CAP_VBI_CAPTURE |
-               V4L2_CAP_READWRITE     |
-               0;
+               V4L2_CAP_READWRITE |
+               V4L2_CAP_TUNER;
 
-       cap->capabilities |= V4L2_CAP_TUNER;
-       cap->version = 0;
+       cap->capabilities = cap->device_caps |
+               V4L2_CAP_VIDEO_CAPTURE |
+               V4L2_CAP_DEVICE_CAPS;
 
        return 0;
 }
index 2b40393..0d248ce 100644 (file)
@@ -655,10 +655,20 @@ out:
 struct dib0700_rc_response {
        u8 report_id;
        u8 data_state;
-       u8 system;
-       u8 not_system;
-       u8 data;
-       u8 not_data;
+       union {
+               struct {
+                       u8 system;
+                       u8 not_system;
+                       u8 data;
+                       u8 not_data;
+               } nec;
+               struct {
+                       u8 not_used;
+                       u8 system;
+                       u8 data;
+                       u8 not_data;
+               } rc5;
+       };
 };
 #define RC_MSG_SIZE_V1_20 6
 
@@ -694,8 +704,8 @@ static void dib0700_rc_urb_completion(struct urb *purb)
 
        deb_data("IR ID = %02X state = %02X System = %02X %02X Cmd = %02X %02X (len %d)\n",
                 poll_reply->report_id, poll_reply->data_state,
-                poll_reply->system, poll_reply->not_system,
-                poll_reply->data, poll_reply->not_data,
+                poll_reply->nec.system, poll_reply->nec.not_system,
+                poll_reply->nec.data, poll_reply->nec.not_data,
                 purb->actual_length);
 
        switch (d->props.rc.core.protocol) {
@@ -704,30 +714,30 @@ static void dib0700_rc_urb_completion(struct urb *purb)
                toggle = 0;
 
                /* NEC protocol sends repeat code as 0 0 0 FF */
-               if (poll_reply->system     == 0x00 &&
-                   poll_reply->not_system == 0x00 &&
-                   poll_reply->data       == 0x00 &&
-                   poll_reply->not_data   == 0xff) {
+               if (poll_reply->nec.system     == 0x00 &&
+                   poll_reply->nec.not_system == 0x00 &&
+                   poll_reply->nec.data       == 0x00 &&
+                   poll_reply->nec.not_data   == 0xff) {
                        poll_reply->data_state = 2;
                        break;
                }
 
-               if ((poll_reply->data ^ poll_reply->not_data) != 0xff) {
+               if ((poll_reply->nec.data ^ poll_reply->nec.not_data) != 0xff) {
                        deb_data("NEC32 protocol\n");
-                       keycode = RC_SCANCODE_NEC32(poll_reply->system     << 24 |
-                                                    poll_reply->not_system << 16 |
-                                                    poll_reply->data       << 8  |
-                                                    poll_reply->not_data);
-               } else if ((poll_reply->system ^ poll_reply->not_system) != 0xff) {
+                       keycode = RC_SCANCODE_NEC32(poll_reply->nec.system     << 24 |
+                                                    poll_reply->nec.not_system << 16 |
+                                                    poll_reply->nec.data       << 8  |
+                                                    poll_reply->nec.not_data);
+               } else if ((poll_reply->nec.system ^ poll_reply->nec.not_system) != 0xff) {
                        deb_data("NEC extended protocol\n");
-                       keycode = RC_SCANCODE_NECX(poll_reply->system << 8 |
-                                                   poll_reply->not_system,
-                                                   poll_reply->data);
+                       keycode = RC_SCANCODE_NECX(poll_reply->nec.system << 8 |
+                                                   poll_reply->nec.not_system,
+                                                   poll_reply->nec.data);
 
                } else {
                        deb_data("NEC normal protocol\n");
-                       keycode = RC_SCANCODE_NEC(poll_reply->system,
-                                                  poll_reply->data);
+                       keycode = RC_SCANCODE_NEC(poll_reply->nec.system,
+                                                  poll_reply->nec.data);
                }
 
                break;
@@ -735,19 +745,19 @@ static void dib0700_rc_urb_completion(struct urb *purb)
                deb_data("RC5 protocol\n");
                protocol = RC_TYPE_RC5;
                toggle = poll_reply->report_id;
-               keycode = RC_SCANCODE_RC5(poll_reply->system, poll_reply->data);
+               keycode = RC_SCANCODE_RC5(poll_reply->rc5.system, poll_reply->rc5.data);
+
+               if ((poll_reply->rc5.data ^ poll_reply->rc5.not_data) != 0xff) {
+                       /* Key failed integrity check */
+                       err("key failed integrity check: %02x %02x %02x %02x",
+                           poll_reply->rc5.not_used, poll_reply->rc5.system,
+                           poll_reply->rc5.data, poll_reply->rc5.not_data);
+                       goto resubmit;
+               }
 
                break;
        }
 
-       if ((poll_reply->data + poll_reply->not_data) != 0xff) {
-               /* Key failed integrity check */
-               err("key failed integrity check: %02x %02x %02x %02x",
-                   poll_reply->system,  poll_reply->not_system,
-                   poll_reply->data, poll_reply->not_data);
-               goto resubmit;
-       }
-
        rc_keydown(d->rc_dev, protocol, keycode, toggle);
 
 resubmit:
index d7d55a2..c170523 100644 (file)
@@ -3944,6 +3944,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
 
                                DIB0700_DEFAULT_STREAMING_CONFIG(0x02),
                        }},
+                               .size_of_priv = sizeof(struct
+                                               dib0700_adapter_state),
                        }, {
                        .num_frontends = 1,
                        .fe = {{
@@ -3956,6 +3958,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
 
                                DIB0700_DEFAULT_STREAMING_CONFIG(0x03),
                        }},
+                               .size_of_priv = sizeof(struct
+                                               dib0700_adapter_state),
                        }
                },
 
@@ -4009,6 +4013,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
 
                                DIB0700_DEFAULT_STREAMING_CONFIG(0x02),
                        }},
+                               .size_of_priv = sizeof(struct
+                                               dib0700_adapter_state),
                        },
                },
 
index 66ada01..cf9d644 100644 (file)
@@ -1237,6 +1237,23 @@ void vb2_discard_done(struct vb2_queue *q)
 }
 EXPORT_SYMBOL_GPL(vb2_discard_done);
 
+static void vb2_warn_zero_bytesused(struct vb2_buffer *vb)
+{
+       static bool __check_once __read_mostly;
+
+       if (__check_once)
+               return;
+
+       __check_once = true;
+       __WARN();
+
+       pr_warn_once("use of bytesused == 0 is deprecated and will be removed in the future,\n");
+       if (vb->vb2_queue->allow_zero_bytesused)
+               pr_warn_once("use VIDIOC_DECODER_CMD(V4L2_DEC_CMD_STOP) instead.\n");
+       else
+               pr_warn_once("use the actual size instead.\n");
+}
+
 /**
  * __fill_vb2_buffer() - fill a vb2_buffer with information provided in a
  * v4l2_buffer by the userspace. The caller has already verified that struct
@@ -1247,16 +1264,6 @@ static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b
 {
        unsigned int plane;
 
-       if (V4L2_TYPE_IS_OUTPUT(b->type)) {
-               if (WARN_ON_ONCE(b->bytesused == 0)) {
-                       pr_warn_once("use of bytesused == 0 is deprecated and will be removed in the future,\n");
-                       if (vb->vb2_queue->allow_zero_bytesused)
-                               pr_warn_once("use VIDIOC_DECODER_CMD(V4L2_DEC_CMD_STOP) instead.\n");
-                       else
-                               pr_warn_once("use the actual size instead.\n");
-               }
-       }
-
        if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
                if (b->memory == V4L2_MEMORY_USERPTR) {
                        for (plane = 0; plane < vb->num_planes; ++plane) {
@@ -1297,6 +1304,9 @@ static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b
                                struct v4l2_plane *pdst = &v4l2_planes[plane];
                                struct v4l2_plane *psrc = &b->m.planes[plane];
 
+                               if (psrc->bytesused == 0)
+                                       vb2_warn_zero_bytesused(vb);
+
                                if (vb->vb2_queue->allow_zero_bytesused)
                                        pdst->bytesused = psrc->bytesused;
                                else
@@ -1331,6 +1341,9 @@ static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b
                }
 
                if (V4L2_TYPE_IS_OUTPUT(b->type)) {
+                       if (b->bytesused == 0)
+                               vb2_warn_zero_bytesused(vb);
+
                        if (vb->vb2_queue->allow_zero_bytesused)
                                v4l2_planes[0].bytesused = b->bytesused;
                        else
index 6ca6dfa..6523903 100644 (file)
@@ -912,10 +912,6 @@ int arizona_dev_init(struct arizona *arizona)
                             arizona->pdata.gpio_defaults[i]);
        }
 
-       pm_runtime_set_autosuspend_delay(arizona->dev, 100);
-       pm_runtime_use_autosuspend(arizona->dev);
-       pm_runtime_enable(arizona->dev);
-
        /* Chip default */
        if (!arizona->pdata.clk32k_src)
                arizona->pdata.clk32k_src = ARIZONA_32KZ_MCLK2;
@@ -1012,11 +1008,17 @@ int arizona_dev_init(struct arizona *arizona)
                                           arizona->pdata.spk_fmt[i]);
        }
 
+       pm_runtime_set_active(arizona->dev);
+       pm_runtime_enable(arizona->dev);
+
        /* Set up for interrupts */
        ret = arizona_irq_init(arizona);
        if (ret != 0)
                goto err_reset;
 
+       pm_runtime_set_autosuspend_delay(arizona->dev, 100);
+       pm_runtime_use_autosuspend(arizona->dev);
+
        arizona_request_irq(arizona, ARIZONA_IRQ_CLKGEN_ERR, "CLKGEN error",
                            arizona_clkgen_err, arizona);
        arizona_request_irq(arizona, ARIZONA_IRQ_OVERCLOCKED, "Overclocked",
@@ -1045,10 +1047,6 @@ int arizona_dev_init(struct arizona *arizona)
                goto err_irq;
        }
 
-#ifdef CONFIG_PM
-       regulator_disable(arizona->dcvdd);
-#endif
-
        return 0;
 
 err_irq:
index d1b55fe..e4dc8cd 100644 (file)
@@ -113,11 +113,11 @@ static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 
        if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
                area = ctx->afu->psn_phys;
-               if (offset > ctx->afu->adapter->ps_size)
+               if (offset >= ctx->afu->adapter->ps_size)
                        return VM_FAULT_SIGBUS;
        } else {
                area = ctx->psn_phys;
-               if (offset > ctx->psn_size)
+               if (offset >= ctx->psn_size)
                        return VM_FAULT_SIGBUS;
        }
 
index 8ccddce..de350dd 100644 (file)
@@ -73,7 +73,7 @@ static inline void cxl_slbia_core(struct mm_struct *mm)
                spin_lock(&adapter->afu_list_lock);
                for (slice = 0; slice < adapter->slices; slice++) {
                        afu = adapter->afu[slice];
-                       if (!afu->enabled)
+                       if (!afu || !afu->enabled)
                                continue;
                        rcu_read_lock();
                        idr_for_each_entry(&afu->contexts_idr, ctx, id)
index 3e29681..e40bcd0 100644 (file)
@@ -685,7 +685,7 @@ int mei_register(struct mei_device *dev, struct device *parent)
        /* Fill in the data structures */
        devno = MKDEV(MAJOR(mei_devt), dev->minor);
        cdev_init(&dev->cdev, &mei_fops);
-       dev->cdev.owner = mei_fops.owner;
+       dev->cdev.owner = parent->driver->owner;
 
        /* Add the device */
        ret = cdev_add(&dev->cdev, devno, 1);
index 60f7141..31d2627 100644 (file)
@@ -208,6 +208,8 @@ static ssize_t power_ro_lock_show(struct device *dev,
 
        ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
 
+       mmc_blk_put(md);
+
        return ret;
 }
 
@@ -1910,9 +1912,11 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
                        break;
                case MMC_BLK_CMD_ERR:
                        ret = mmc_blk_cmd_err(md, card, brq, req, ret);
-                       if (!mmc_blk_reset(md, card->host, type))
-                               break;
-                       goto cmd_abort;
+                       if (mmc_blk_reset(md, card->host, type))
+                               goto cmd_abort;
+                       if (!ret)
+                               goto start_new_req;
+                       break;
                case MMC_BLK_RETRY:
                        if (retry++ < 5)
                                break;
index 9df2b68..d0abdff 100644 (file)
@@ -1062,6 +1062,10 @@ static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status)
 
                if (status & (CTO_EN | CCRC_EN))
                        end_cmd = 1;
+               if (host->data || host->response_busy) {
+                       end_trans = !end_cmd;
+                       host->response_busy = 0;
+               }
                if (status & (CTO_EN | DTO_EN))
                        hsmmc_command_incomplete(host, -ETIMEDOUT, end_cmd);
                else if (status & (CCRC_EN | DCRC_EN))
@@ -1081,10 +1085,6 @@ static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status)
                        }
                        dev_dbg(mmc_dev(host->mmc), "AC12 err: 0x%x\n", ac12);
                }
-               if (host->data || host->response_busy) {
-                       end_trans = !end_cmd;
-                       host->response_busy = 0;
-               }
        }
 
        OMAP_HSMMC_WRITE(host->base, STAT, status);
index 3497cfa..a870c42 100644 (file)
@@ -45,6 +45,6 @@
 #define ESDHC_DMA_SYSCTL       0x40c
 #define ESDHC_DMA_SNOOP                0x00000040
 
-#define ESDHC_HOST_CONTROL_RES 0x05
+#define ESDHC_HOST_CONTROL_RES 0x01
 
 #endif /* _DRIVERS_MMC_SDHCI_ESDHC_H */
index b5103a2..065dc70 100644 (file)
@@ -411,6 +411,7 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
                        goto err_of_parse;
                sdhci_get_of_property(pdev);
                pdata = pxav3_get_mmc_pdata(dev);
+               pdev->dev.platform_data = pdata;
        } else if (pdata) {
                /* on-chip device */
                if (pdata->flags & PXA_FLAG_CARD_PERMANENT)
index f09bc10..bec8a30 100644 (file)
@@ -2691,31 +2691,6 @@ static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
        return isr ? IRQ_HANDLED : IRQ_NONE;
 }
 
-#ifdef CONFIG_PREEMPT_RT_BASE
-static irqreturn_t sdhci_rt_irq(int irq, void *dev_id)
-{
-       irqreturn_t ret;
-
-       local_bh_disable();
-       ret = sdhci_irq(irq, dev_id);
-       local_bh_enable();
-       if (ret == IRQ_WAKE_THREAD)
-               ret = sdhci_thread_irq(irq, dev_id);
-       return ret;
-}
-#endif
-
-static int sdhci_req_irq(struct sdhci_host *host)
-{
-#ifdef CONFIG_PREEMPT_RT_BASE
-       return request_threaded_irq(host->irq, NULL, sdhci_rt_irq,
-                                   IRQF_SHARED, mmc_hostname(host->mmc), host);
-#else
-       return request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
-                                   IRQF_SHARED, mmc_hostname(host->mmc), host);
-#endif
-}
-
 /*****************************************************************************\
  *                                                                           *
  * Suspend/resume                                                            *
@@ -2783,7 +2758,9 @@ int sdhci_resume_host(struct sdhci_host *host)
        }
 
        if (!device_may_wakeup(mmc_dev(host->mmc))) {
-               ret = sdhci_req_irq(host);
+               ret = request_threaded_irq(host->irq, sdhci_irq,
+                                          sdhci_thread_irq, IRQF_SHARED,
+                                          mmc_hostname(host->mmc), host);
                if (ret)
                        return ret;
        } else {
@@ -3060,8 +3037,11 @@ int sdhci_add_host(struct sdhci_host *host)
                                                      GFP_KERNEL);
                host->align_buffer = kmalloc(host->align_buffer_sz, GFP_KERNEL);
                if (!host->adma_table || !host->align_buffer) {
-                       dma_free_coherent(mmc_dev(mmc), host->adma_table_sz,
-                                         host->adma_table, host->adma_addr);
+                       if (host->adma_table)
+                               dma_free_coherent(mmc_dev(mmc),
+                                                 host->adma_table_sz,
+                                                 host->adma_table,
+                                                 host->adma_addr);
                        kfree(host->align_buffer);
                        pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
                                mmc_hostname(mmc));
@@ -3338,13 +3318,14 @@ int sdhci_add_host(struct sdhci_host *host)
                                   SDHCI_MAX_CURRENT_MULTIPLIER;
        }
 
-       /* If OCR set by external regulators, use it instead */
+       /* If OCR set by host, use it instead. */
+       if (host->ocr_mask)
+               ocr_avail = host->ocr_mask;
+
+       /* If OCR set by external regulators, give it highest prio. */
        if (mmc->ocr_avail)
                ocr_avail = mmc->ocr_avail;
 
-       if (host->ocr_mask)
-               ocr_avail &= host->ocr_mask;
-
        mmc->ocr_avail = ocr_avail;
        mmc->ocr_avail_sdio = ocr_avail;
        if (host->ocr_avail_sdio)
@@ -3440,7 +3421,8 @@ int sdhci_add_host(struct sdhci_host *host)
 
        sdhci_init(host, 0);
 
-       ret = sdhci_req_irq(host);
+       ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
+                                  IRQF_SHARED, mmc_hostname(mmc), host);
        if (ret) {
                pr_err("%s: Failed to request IRQ %d: %d\n",
                       mmc_hostname(mmc), host->irq, ret);
index 041525d..5d214d1 100644 (file)
@@ -592,6 +592,7 @@ static int c_can_start(struct net_device *dev)
 {
        struct c_can_priv *priv = netdev_priv(dev);
        int err;
+       struct pinctrl *p;
 
        /* basic c_can configuration */
        err = c_can_chip_config(dev);
@@ -604,8 +605,13 @@ static int c_can_start(struct net_device *dev)
 
        priv->can.state = CAN_STATE_ERROR_ACTIVE;
 
-       /* activate pins */
-       pinctrl_pm_select_default_state(dev->dev.parent);
+       /* Attempt to use "active" if available else use "default" */
+       p = pinctrl_get_select(priv->device, "active");
+       if (!IS_ERR(p))
+               pinctrl_put(p);
+       else
+               pinctrl_pm_select_default_state(priv->device);
+
        return 0;
 }
 
index e9b1810..aede704 100644 (file)
@@ -440,9 +440,6 @@ unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx)
                struct can_frame *cf = (struct can_frame *)skb->data;
                u8 dlc = cf->can_dlc;
 
-               if (!(skb->tstamp.tv64))
-                       __net_timestamp(skb);
-
                netif_rx(priv->echo_skb[idx]);
                priv->echo_skb[idx] = NULL;
 
@@ -578,7 +575,6 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
        if (unlikely(!skb))
                return NULL;
 
-       __net_timestamp(skb);
        skb->protocol = htons(ETH_P_CAN);
        skb->pkt_type = PACKET_BROADCAST;
        skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -589,6 +585,7 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
 
        can_skb_reserve(skb);
        can_skb_prv(skb)->ifindex = dev->ifindex;
+       can_skb_prv(skb)->skbcnt = 0;
 
        *cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
        memset(*cf, 0, sizeof(struct can_frame));
@@ -607,7 +604,6 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev,
        if (unlikely(!skb))
                return NULL;
 
-       __net_timestamp(skb);
        skb->protocol = htons(ETH_P_CANFD);
        skb->pkt_type = PACKET_BROADCAST;
        skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -618,6 +614,7 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev,
 
        can_skb_reserve(skb);
        can_skb_prv(skb)->ifindex = dev->ifindex;
+       can_skb_prv(skb)->skbcnt = 0;
 
        *cfd = (struct canfd_frame *)skb_put(skb, sizeof(struct canfd_frame));
        memset(*cfd, 0, sizeof(struct canfd_frame));
index 7deb80d..2f9ebad 100644 (file)
@@ -526,7 +526,7 @@ static int rcar_can_open(struct net_device *ndev)
        napi_enable(&priv->napi);
        err = request_irq(ndev->irq, rcar_can_interrupt, 0, ndev->name, ndev);
        if (err) {
-               netdev_err(ndev, "error requesting interrupt %x\n", ndev->irq);
+               netdev_err(ndev, "error requesting interrupt %d\n", ndev->irq);
                goto out_close;
        }
        can_led_event(ndev, CAN_LED_EVENT_OPEN);
@@ -758,8 +758,9 @@ static int rcar_can_probe(struct platform_device *pdev)
        }
 
        irq = platform_get_irq(pdev, 0);
-       if (!irq) {
+       if (irq < 0) {
                dev_err(&pdev->dev, "No IRQ resource\n");
+               err = irq;
                goto fail;
        }
 
@@ -823,7 +824,7 @@ static int rcar_can_probe(struct platform_device *pdev)
 
        devm_can_led_init(ndev);
 
-       dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%u)\n",
+       dev_info(&pdev->dev, "device registered (regs @ %p, IRQ%d)\n",
                 priv->regs, ndev->irq);
 
        return 0;
index f64f529..a23a7af 100644 (file)
@@ -207,7 +207,6 @@ static void slc_bump(struct slcan *sl)
        if (!skb)
                return;
 
-       __net_timestamp(skb);
        skb->dev = sl->dev;
        skb->protocol = htons(ETH_P_CAN);
        skb->pkt_type = PACKET_BROADCAST;
@@ -215,6 +214,7 @@ static void slc_bump(struct slcan *sl)
 
        can_skb_reserve(skb);
        can_skb_prv(skb)->ifindex = sl->dev->ifindex;
+       can_skb_prv(skb)->skbcnt = 0;
 
        memcpy(skb_put(skb, sizeof(struct can_frame)),
               &cf, sizeof(struct can_frame));
index bf63fee..34c625e 100644 (file)
@@ -1221,17 +1221,16 @@ static int __maybe_unused mcp251x_can_resume(struct device *dev)
        struct spi_device *spi = to_spi_device(dev);
        struct mcp251x_priv *priv = spi_get_drvdata(spi);
 
-       if (priv->after_suspend & AFTER_SUSPEND_POWER) {
+       if (priv->after_suspend & AFTER_SUSPEND_POWER)
                mcp251x_power_enable(priv->power, 1);
+
+       if (priv->after_suspend & AFTER_SUSPEND_UP) {
+               mcp251x_power_enable(priv->transceiver, 1);
                queue_work(priv->wq, &priv->restart_work);
        } else {
-               if (priv->after_suspend & AFTER_SUSPEND_UP) {
-                       mcp251x_power_enable(priv->transceiver, 1);
-                       queue_work(priv->wq, &priv->restart_work);
-               } else {
-                       priv->after_suspend = 0;
-               }
+               priv->after_suspend = 0;
        }
+
        priv->force_quit = 0;
        enable_irq(spi->irq);
        return 0;
index 72427f2..edfec54 100644 (file)
@@ -855,6 +855,18 @@ static int pcan_usb_probe(struct usb_interface *intf)
 /*
  * describe the PCAN-USB adapter
  */
+static const struct can_bittiming_const pcan_usb_const = {
+       .name = "pcan_usb",
+       .tseg1_min = 1,
+       .tseg1_max = 16,
+       .tseg2_min = 1,
+       .tseg2_max = 8,
+       .sjw_max = 4,
+       .brp_min = 1,
+       .brp_max = 64,
+       .brp_inc = 1,
+};
+
 const struct peak_usb_adapter pcan_usb = {
        .name = "PCAN-USB",
        .device_id = PCAN_USB_PRODUCT_ID,
@@ -863,17 +875,7 @@ const struct peak_usb_adapter pcan_usb = {
        .clock = {
                .freq = PCAN_USB_CRYSTAL_HZ / 2 ,
        },
-       .bittiming_const = {
-               .name = "pcan_usb",
-               .tseg1_min = 1,
-               .tseg1_max = 16,
-               .tseg2_min = 1,
-               .tseg2_max = 8,
-               .sjw_max = 4,
-               .brp_min = 1,
-               .brp_max = 64,
-               .brp_inc = 1,
-       },
+       .bittiming_const = &pcan_usb_const,
 
        /* size of device private data */
        .sizeof_dev_private = sizeof(struct pcan_usb),
index 7921cff..5a2e341 100644 (file)
@@ -792,9 +792,9 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter,
        dev->ep_msg_out = peak_usb_adapter->ep_msg_out[ctrl_idx];
 
        dev->can.clock = peak_usb_adapter->clock;
-       dev->can.bittiming_const = &peak_usb_adapter->bittiming_const;
+       dev->can.bittiming_const = peak_usb_adapter->bittiming_const;
        dev->can.do_set_bittiming = peak_usb_set_bittiming;
-       dev->can.data_bittiming_const = &peak_usb_adapter->data_bittiming_const;
+       dev->can.data_bittiming_const = peak_usb_adapter->data_bittiming_const;
        dev->can.do_set_data_bittiming = peak_usb_set_data_bittiming;
        dev->can.do_set_mode = peak_usb_set_mode;
        dev->can.do_get_berr_counter = peak_usb_adapter->do_get_berr_counter;
index 9e624f0..506fe50 100644 (file)
@@ -48,8 +48,8 @@ struct peak_usb_adapter {
        u32 device_id;
        u32 ctrlmode_supported;
        struct can_clock clock;
-       const struct can_bittiming_const bittiming_const;
-       const struct can_bittiming_const data_bittiming_const;
+       const struct can_bittiming_const * const bittiming_const;
+       const struct can_bittiming_const * const data_bittiming_const;
        unsigned int ctrl_count;
 
        int (*intf_probe)(struct usb_interface *intf);
index 09d14e7..ce44a03 100644 (file)
@@ -990,6 +990,30 @@ static void pcan_usb_fd_free(struct peak_usb_device *dev)
 }
 
 /* describes the PCAN-USB FD adapter */
+static const struct can_bittiming_const pcan_usb_fd_const = {
+       .name = "pcan_usb_fd",
+       .tseg1_min = 1,
+       .tseg1_max = 64,
+       .tseg2_min = 1,
+       .tseg2_max = 16,
+       .sjw_max = 16,
+       .brp_min = 1,
+       .brp_max = 1024,
+       .brp_inc = 1,
+};
+
+static const struct can_bittiming_const pcan_usb_fd_data_const = {
+       .name = "pcan_usb_fd",
+       .tseg1_min = 1,
+       .tseg1_max = 16,
+       .tseg2_min = 1,
+       .tseg2_max = 8,
+       .sjw_max = 4,
+       .brp_min = 1,
+       .brp_max = 1024,
+       .brp_inc = 1,
+};
+
 const struct peak_usb_adapter pcan_usb_fd = {
        .name = "PCAN-USB FD",
        .device_id = PCAN_USBFD_PRODUCT_ID,
@@ -999,28 +1023,8 @@ const struct peak_usb_adapter pcan_usb_fd = {
        .clock = {
                .freq = PCAN_UFD_CRYSTAL_HZ,
        },
-       .bittiming_const = {
-               .name = "pcan_usb_fd",
-               .tseg1_min = 1,
-               .tseg1_max = 64,
-               .tseg2_min = 1,
-               .tseg2_max = 16,
-               .sjw_max = 16,
-               .brp_min = 1,
-               .brp_max = 1024,
-               .brp_inc = 1,
-       },
-       .data_bittiming_const = {
-               .name = "pcan_usb_fd",
-               .tseg1_min = 1,
-               .tseg1_max = 16,
-               .tseg2_min = 1,
-               .tseg2_max = 8,
-               .sjw_max = 4,
-               .brp_min = 1,
-               .brp_max = 1024,
-               .brp_inc = 1,
-       },
+       .bittiming_const = &pcan_usb_fd_const,
+       .data_bittiming_const = &pcan_usb_fd_data_const,
 
        /* size of device private data */
        .sizeof_dev_private = sizeof(struct pcan_usb_fd_device),
@@ -1058,6 +1062,30 @@ const struct peak_usb_adapter pcan_usb_fd = {
 };
 
 /* describes the PCAN-USB Pro FD adapter */
+static const struct can_bittiming_const pcan_usb_pro_fd_const = {
+       .name = "pcan_usb_pro_fd",
+       .tseg1_min = 1,
+       .tseg1_max = 64,
+       .tseg2_min = 1,
+       .tseg2_max = 16,
+       .sjw_max = 16,
+       .brp_min = 1,
+       .brp_max = 1024,
+       .brp_inc = 1,
+};
+
+static const struct can_bittiming_const pcan_usb_pro_fd_data_const = {
+       .name = "pcan_usb_pro_fd",
+       .tseg1_min = 1,
+       .tseg1_max = 16,
+       .tseg2_min = 1,
+       .tseg2_max = 8,
+       .sjw_max = 4,
+       .brp_min = 1,
+       .brp_max = 1024,
+       .brp_inc = 1,
+};
+
 const struct peak_usb_adapter pcan_usb_pro_fd = {
        .name = "PCAN-USB Pro FD",
        .device_id = PCAN_USBPROFD_PRODUCT_ID,
@@ -1067,28 +1095,8 @@ const struct peak_usb_adapter pcan_usb_pro_fd = {
        .clock = {
                .freq = PCAN_UFD_CRYSTAL_HZ,
        },
-       .bittiming_const = {
-               .name = "pcan_usb_pro_fd",
-               .tseg1_min = 1,
-               .tseg1_max = 64,
-               .tseg2_min = 1,
-               .tseg2_max = 16,
-               .sjw_max = 16,
-               .brp_min = 1,
-               .brp_max = 1024,
-               .brp_inc = 1,
-       },
-       .data_bittiming_const = {
-               .name = "pcan_usb_pro_fd",
-               .tseg1_min = 1,
-               .tseg1_max = 16,
-               .tseg2_min = 1,
-               .tseg2_max = 8,
-               .sjw_max = 4,
-               .brp_min = 1,
-               .brp_max = 1024,
-               .brp_inc = 1,
-       },
+       .bittiming_const = &pcan_usb_pro_fd_const,
+       .data_bittiming_const = &pcan_usb_pro_fd_data_const,
 
        /* size of device private data */
        .sizeof_dev_private = sizeof(struct pcan_usb_fd_device),
index dec5171..a5ad2e6 100644 (file)
@@ -1004,6 +1004,18 @@ int pcan_usb_pro_probe(struct usb_interface *intf)
 /*
  * describe the PCAN-USB Pro adapter
  */
+static const struct can_bittiming_const pcan_usb_pro_const = {
+       .name = "pcan_usb_pro",
+       .tseg1_min = 1,
+       .tseg1_max = 16,
+       .tseg2_min = 1,
+       .tseg2_max = 8,
+       .sjw_max = 4,
+       .brp_min = 1,
+       .brp_max = 1024,
+       .brp_inc = 1,
+};
+
 const struct peak_usb_adapter pcan_usb_pro = {
        .name = "PCAN-USB Pro",
        .device_id = PCAN_USBPRO_PRODUCT_ID,
@@ -1012,17 +1024,7 @@ const struct peak_usb_adapter pcan_usb_pro = {
        .clock = {
                .freq = PCAN_USBPRO_CRYSTAL_HZ,
        },
-       .bittiming_const = {
-               .name = "pcan_usb_pro",
-               .tseg1_min = 1,
-               .tseg1_max = 16,
-               .tseg2_min = 1,
-               .tseg2_max = 8,
-               .sjw_max = 4,
-               .brp_min = 1,
-               .brp_max = 1024,
-               .brp_inc = 1,
-       },
+       .bittiming_const = &pcan_usb_pro_const,
 
        /* size of device private data */
        .sizeof_dev_private = sizeof(struct pcan_usb_pro_device),
index 0ce868d..674f367 100644 (file)
@@ -78,9 +78,6 @@ static void vcan_rx(struct sk_buff *skb, struct net_device *dev)
        skb->dev       = dev;
        skb->ip_summed = CHECKSUM_UNNECESSARY;
 
-       if (!(skb->tstamp.tv64))
-               __net_timestamp(skb);
-
        netif_rx_ni(skb);
 }
 
index dc79ed8..32e7775 100644 (file)
@@ -2010,7 +2010,7 @@ const struct e1000_info e1000_82573_info = {
        .flags2                 = FLAG2_DISABLE_ASPM_L1
                                  | FLAG2_DISABLE_ASPM_L0S,
        .pba                    = 20,
-       .max_hw_frame_size      = ETH_FRAME_LEN + ETH_FCS_LEN,
+       .max_hw_frame_size      = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN,
        .get_variants           = e1000_get_variants_82571,
        .mac_ops                = &e82571_mac_ops,
        .phy_ops                = &e82_phy_ops_m88,
index 9d81c03..e2498db 100644 (file)
@@ -1563,7 +1563,7 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
            ((adapter->hw.mac.type >= e1000_pch2lan) &&
             (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LSECCK)))) {
                adapter->flags &= ~FLAG_HAS_JUMBO_FRAMES;
-               adapter->max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN;
+               adapter->max_hw_frame_size = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
 
                hw->mac.ops.blink_led = NULL;
        }
@@ -5681,7 +5681,7 @@ const struct e1000_info e1000_ich8_info = {
                                  | FLAG_HAS_FLASH
                                  | FLAG_APME_IN_WUC,
        .pba                    = 8,
-       .max_hw_frame_size      = ETH_FRAME_LEN + ETH_FCS_LEN,
+       .max_hw_frame_size      = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN,
        .get_variants           = e1000_get_variants_ich8lan,
        .mac_ops                = &ich8_mac_ops,
        .phy_ops                = &ich8_phy_ops,
@@ -5754,7 +5754,7 @@ const struct e1000_info e1000_pch2_info = {
        .flags2                 = FLAG2_HAS_PHY_STATS
                                  | FLAG2_HAS_EEE,
        .pba                    = 26,
-       .max_hw_frame_size      = 9018,
+       .max_hw_frame_size      = 9022,
        .get_variants           = e1000_get_variants_ich8lan,
        .mac_ops                = &ich8_mac_ops,
        .phy_ops                = &ich8_phy_ops,
@@ -5774,7 +5774,7 @@ const struct e1000_info e1000_pch_lpt_info = {
        .flags2                 = FLAG2_HAS_PHY_STATS
                                  | FLAG2_HAS_EEE,
        .pba                    = 26,
-       .max_hw_frame_size      = 9018,
+       .max_hw_frame_size      = 9022,
        .get_variants           = e1000_get_variants_ich8lan,
        .mac_ops                = &ich8_mac_ops,
        .phy_ops                = &ich8_phy_ops,
@@ -5794,7 +5794,7 @@ const struct e1000_info e1000_pch_spt_info = {
        .flags2                 = FLAG2_HAS_PHY_STATS
                                  | FLAG2_HAS_EEE,
        .pba                    = 26,
-       .max_hw_frame_size      = 9018,
+       .max_hw_frame_size      = 9022,
        .get_variants           = e1000_get_variants_ich8lan,
        .mac_ops                = &ich8_mac_ops,
        .phy_ops                = &ich8_phy_ops,
index c509a5c..68913d1 100644 (file)
@@ -3807,7 +3807,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
        /* reset Packet Buffer Allocation to default */
        ew32(PBA, pba);
 
-       if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
+       if (adapter->max_frame_size > (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)) {
                /* To maintain wire speed transmits, the Tx FIFO should be
                 * large enough to accommodate two full transmit packets,
                 * rounded up to the next 1KB and expressed in KB.  Likewise,
@@ -4196,9 +4196,9 @@ static int e1000_sw_init(struct e1000_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
 
-       adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
+       adapter->rx_buffer_len = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
        adapter->rx_ps_bsize0 = 128;
-       adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+       adapter->max_frame_size = netdev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
        adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
        adapter->tx_ring_count = E1000_DEFAULT_TXD;
        adapter->rx_ring_count = E1000_DEFAULT_RXD;
@@ -5781,17 +5781,17 @@ struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
 static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
-       int max_frame = new_mtu + VLAN_HLEN + ETH_HLEN + ETH_FCS_LEN;
+       int max_frame = new_mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
 
        /* Jumbo frame support */
-       if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
+       if ((max_frame > (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)) &&
            !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
                e_err("Jumbo Frames not supported.\n");
                return -EINVAL;
        }
 
        /* Supported frame sizes */
-       if ((new_mtu < ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) ||
+       if ((new_mtu < (VLAN_ETH_ZLEN + ETH_FCS_LEN)) ||
            (max_frame > adapter->max_hw_frame_size)) {
                e_err("Unsupported MTU setting\n");
                return -EINVAL;
@@ -5831,10 +5831,8 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
                adapter->rx_buffer_len = 4096;
 
        /* adjust allocation if LPE protects us, and we aren't using SBP */
-       if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
-           (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
-               adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN
-                   + ETH_FCS_LEN;
+       if (max_frame <= (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN))
+               adapter->rx_buffer_len = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
 
        if (netif_running(netdev))
                e1000e_up(adapter);
index 7681237..ead5432 100644 (file)
@@ -1524,12 +1524,11 @@ static int ath10k_pci_get_num_banks(struct ath10k *ar)
                switch (MS(ar->chip_id, SOC_CHIP_ID_REV)) {
                case QCA6174_HW_1_0_CHIP_ID_REV:
                case QCA6174_HW_1_1_CHIP_ID_REV:
+               case QCA6174_HW_2_1_CHIP_ID_REV:
+               case QCA6174_HW_2_2_CHIP_ID_REV:
                        return 3;
                case QCA6174_HW_1_3_CHIP_ID_REV:
                        return 2;
-               case QCA6174_HW_2_1_CHIP_ID_REV:
-               case QCA6174_HW_2_2_CHIP_ID_REV:
-                       return 6;
                case QCA6174_HW_3_0_CHIP_ID_REV:
                case QCA6174_HW_3_1_CHIP_ID_REV:
                case QCA6174_HW_3_2_CHIP_ID_REV:
index e82a0d4..5dbc617 100644 (file)
@@ -440,9 +440,9 @@ static inline void ath9k_htc_stop_btcoex(struct ath9k_htc_priv *priv)
 }
 #endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
 
-#define OP_BT_PRIORITY_DETECTED    BIT(3)
-#define OP_BT_SCAN                 BIT(4)
-#define OP_TSF_RESET               BIT(6)
+#define OP_BT_PRIORITY_DETECTED    3
+#define OP_BT_SCAN                 4
+#define OP_TSF_RESET               6
 
 enum htc_op_flags {
        HTC_FWFLAG_NO_RMW,
index b0badef..d5f2fbf 100644 (file)
@@ -216,11 +216,13 @@ static bool ath_prepare_reset(struct ath_softc *sc)
        ath_stop_ani(sc);
        ath9k_hw_disable_interrupts(ah);
 
-       if (!ath_drain_all_txq(sc))
-               ret = false;
-
-       if (!ath_stoprecv(sc))
-               ret = false;
+       if (AR_SREV_9300_20_OR_LATER(ah)) {
+               ret &= ath_stoprecv(sc);
+               ret &= ath_drain_all_txq(sc);
+       } else {
+               ret &= ath_drain_all_txq(sc);
+               ret &= ath_stoprecv(sc);
+       }
 
        return ret;
 }
index 8e604a3..ef20be0 100644 (file)
@@ -540,13 +540,11 @@ static void iwl_set_hw_address_family_8000(struct device *dev,
                hw_addr = (const u8 *)(mac_override +
                                 MAC_ADDRESS_OVERRIDE_FAMILY_8000);
 
-               /* The byte order is little endian 16 bit, meaning 214365 */
-               data->hw_addr[0] = hw_addr[1];
-               data->hw_addr[1] = hw_addr[0];
-               data->hw_addr[2] = hw_addr[3];
-               data->hw_addr[3] = hw_addr[2];
-               data->hw_addr[4] = hw_addr[5];
-               data->hw_addr[5] = hw_addr[4];
+               /*
+                * Store the MAC address from MAO section.
+                * No byte swapping is required in MAO section
+                */
+               memcpy(data->hw_addr, hw_addr, ETH_ALEN);
 
                /*
                 * Force the use of the OTP MAC address in case of reserved MAC
index 9ac04c1..8c17b94 100644 (file)
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -1356,6 +1356,7 @@ static ssize_t iwl_dbgfs_d0i3_refs_read(struct file *file,
        PRINT_MVM_REF(IWL_MVM_REF_UCODE_DOWN);
        PRINT_MVM_REF(IWL_MVM_REF_SCAN);
        PRINT_MVM_REF(IWL_MVM_REF_ROC);
+       PRINT_MVM_REF(IWL_MVM_REF_ROC_AUX);
        PRINT_MVM_REF(IWL_MVM_REF_P2P_CLIENT);
        PRINT_MVM_REF(IWL_MVM_REF_AP_IBSS);
        PRINT_MVM_REF(IWL_MVM_REF_USER);
index dda9f7b..60c138a 100644 (file)
@@ -1404,7 +1404,7 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
         * The work item could be running or queued if the
         * ROC time event stops just as we get here.
         */
-       cancel_work_sync(&mvm->roc_done_wk);
+       flush_work(&mvm->roc_done_wk);
 
        iwl_trans_stop_device(mvm->trans);
 
index cf70f68..6af21da 100644 (file)
@@ -275,6 +275,7 @@ enum iwl_mvm_ref_type {
        IWL_MVM_REF_UCODE_DOWN,
        IWL_MVM_REF_SCAN,
        IWL_MVM_REF_ROC,
+       IWL_MVM_REF_ROC_AUX,
        IWL_MVM_REF_P2P_CLIENT,
        IWL_MVM_REF_AP_IBSS,
        IWL_MVM_REF_USER,
index fd7b0d3..a7448cf 100644 (file)
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -108,12 +108,14 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
         * in the case that the time event actually completed in the firmware
         * (which is handled in iwl_mvm_te_handle_notif).
         */
-       if (test_and_clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status))
+       if (test_and_clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status)) {
                queues |= BIT(IWL_MVM_OFFCHANNEL_QUEUE);
-       if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
+               iwl_mvm_unref(mvm, IWL_MVM_REF_ROC);
+       }
+       if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) {
                queues |= BIT(mvm->aux_queue);
-
-       iwl_mvm_unref(mvm, IWL_MVM_REF_ROC);
+               iwl_mvm_unref(mvm, IWL_MVM_REF_ROC_AUX);
+       }
 
        synchronize_net();
 
@@ -393,6 +395,7 @@ static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm *mvm,
        } else if (le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_START) {
                set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status);
                te_data->running = true;
+               iwl_mvm_ref(mvm, IWL_MVM_REF_ROC_AUX);
                ieee80211_ready_on_channel(mvm->hw); /* Start TE */
        } else {
                IWL_DEBUG_TE(mvm,
index ef32e17..281451c 100644 (file)
@@ -225,7 +225,7 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
 
        if (info->band == IEEE80211_BAND_2GHZ &&
            !iwl_mvm_bt_coex_is_shared_ant_avail(mvm))
-               rate_flags = BIT(mvm->cfg->non_shared_ant) << RATE_MCS_ANT_POS;
+               rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS;
        else
                rate_flags =
                        BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;
index dc17909..699a480 100644 (file)
@@ -457,10 +457,16 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
                if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000)
                        iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
                                          APMG_PCIDEV_STT_VAL_WAKE_ME);
-               else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+               else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
+                       iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+                                   CSR_RESET_LINK_PWR_MGMT_DISABLED);
                        iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
                                    CSR_HW_IF_CONFIG_REG_PREPARE |
                                    CSR_HW_IF_CONFIG_REG_ENABLE_PME);
+                       mdelay(1);
+                       iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+                                     CSR_RESET_LINK_PWR_MGMT_DISABLED);
+               }
                mdelay(5);
        }
 
@@ -555,6 +561,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
        if (ret >= 0)
                return 0;
 
+       iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+                   CSR_RESET_LINK_PWR_MGMT_DISABLED);
+       msleep(1);
+
        for (iter = 0; iter < 10; iter++) {
                /* If HW is not ready, prepare the conditions to check again */
                iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
@@ -562,8 +572,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
 
                do {
                        ret = iwl_pcie_set_hw_ready(trans);
-                       if (ret >= 0)
-                               return 0;
+                       if (ret >= 0) {
+                               ret = 0;
+                               goto out;
+                       }
 
                        usleep_range(200, 1000);
                        t += 200;
@@ -573,6 +585,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
 
        IWL_ERR(trans, "Couldn't prepare the card\n");
 
+out:
+       iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+                     CSR_RESET_LINK_PWR_MGMT_DISABLED);
+
        return ret;
 }
 
@@ -2515,6 +2531,12 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
                trans->hw_rev = (trans->hw_rev & 0xfff0) |
                                (CSR_HW_REV_STEP(trans->hw_rev << 2) << 2);
 
+               ret = iwl_pcie_prepare_card_hw(trans);
+               if (ret) {
+                       IWL_WARN(trans, "Exit HW not ready\n");
+                       goto out_pci_disable_msi;
+               }
+
                /*
                 * in-order to recognize C step driver should read chip version
                 * id located at the AUX bus MISC address space.
index b6cc9ff..1c6788a 100644 (file)
@@ -172,6 +172,7 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
                (struct rsi_91x_sdiodev *)adapter->rsi_dev;
        u32 len;
        u32 num_blocks;
+       const u8 *fw;
        const struct firmware *fw_entry = NULL;
        u32 block_size = dev->tx_blk_size;
        int status = 0;
@@ -200,6 +201,10 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
                return status;
        }
 
+       /* Copy firmware into DMA-accessible memory */
+       fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
+       if (!fw)
+               return -ENOMEM;
        len = fw_entry->size;
 
        if (len % 4)
@@ -210,7 +215,8 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
        rsi_dbg(INIT_ZONE, "%s: Instruction size:%d\n", __func__, len);
        rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks);
 
-       status = rsi_copy_to_card(common, fw_entry->data, len, num_blocks);
+       status = rsi_copy_to_card(common, fw, len, num_blocks);
+       kfree(fw);
        release_firmware(fw_entry);
        return status;
 }
index 1106ce7..30c2cf7 100644 (file)
@@ -146,7 +146,10 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
                return status;
        }
 
+       /* Copy firmware into DMA-accessible memory */
        fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
+       if (!fw)
+               return -ENOMEM;
        len = fw_entry->size;
 
        if (len % 4)
@@ -158,6 +161,7 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
        rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks);
 
        status = rsi_copy_to_card(common, fw, len, num_blocks);
+       kfree(fw);
        release_firmware(fw_entry);
        return status;
 }
index 3b3a88b..585d088 100644 (file)
@@ -1015,9 +1015,12 @@ static void send_beacon_frame(struct ieee80211_hw *hw,
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
+       struct rtl_tcb_desc tcb_desc;
 
-       if (skb)
-               rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, NULL);
+       if (skb) {
+               memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
+               rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, &tcb_desc);
+       }
 }
 
 static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
index 86ce5b1..e5d8108 100644 (file)
@@ -1354,27 +1354,11 @@ void rtl88ee_set_qos(struct ieee80211_hw *hw, int aci)
        }
 }
 
-static void rtl88ee_clear_interrupt(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u32 tmp;
-
-       tmp = rtl_read_dword(rtlpriv, REG_HISR);
-       rtl_write_dword(rtlpriv, REG_HISR, tmp);
-
-       tmp = rtl_read_dword(rtlpriv, REG_HISRE);
-       rtl_write_dword(rtlpriv, REG_HISRE, tmp);
-
-       tmp = rtl_read_dword(rtlpriv, REG_HSISR);
-       rtl_write_dword(rtlpriv, REG_HSISR, tmp);
-}
-
 void rtl88ee_enable_interrupt(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
 
-       rtl88ee_clear_interrupt(hw);/*clear it here first*/
        rtl_write_dword(rtlpriv, REG_HIMR,
                        rtlpci->irq_mask[0] & 0xFFFFFFFF);
        rtl_write_dword(rtlpriv, REG_HIMRE,
index da0a612..cbf2ca7 100644 (file)
@@ -1584,28 +1584,11 @@ void rtl92ee_set_qos(struct ieee80211_hw *hw, int aci)
        }
 }
 
-static void rtl92ee_clear_interrupt(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u32 tmp;
-
-       tmp = rtl_read_dword(rtlpriv, REG_HISR);
-       rtl_write_dword(rtlpriv, REG_HISR, tmp);
-
-       tmp = rtl_read_dword(rtlpriv, REG_HISRE);
-       rtl_write_dword(rtlpriv, REG_HISRE, tmp);
-
-       tmp = rtl_read_dword(rtlpriv, REG_HSISR);
-       rtl_write_dword(rtlpriv, REG_HSISR, tmp);
-}
-
 void rtl92ee_enable_interrupt(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
 
-       rtl92ee_clear_interrupt(hw);/*clear it here first*/
-
        rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF);
        rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF);
        rtlpci->irq_enabled = true;
index 67bb47d..a4b7eac 100644 (file)
@@ -1258,18 +1258,6 @@ void rtl8723e_set_qos(struct ieee80211_hw *hw, int aci)
        }
 }
 
-static void rtl8723e_clear_interrupt(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u32 tmp;
-
-       tmp = rtl_read_dword(rtlpriv, REG_HISR);
-       rtl_write_dword(rtlpriv, REG_HISR, tmp);
-
-       tmp = rtl_read_dword(rtlpriv, REG_HISRE);
-       rtl_write_dword(rtlpriv, REG_HISRE, tmp);
-}
-
 void rtl8723e_enable_interrupt(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -1284,7 +1272,6 @@ void rtl8723e_disable_interrupt(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
-       rtl8723e_clear_interrupt(hw);/*clear it here first*/
        rtl_write_dword(rtlpriv, 0x3a8, IMR8190_DISABLED);
        rtl_write_dword(rtlpriv, 0x3ac, IMR8190_DISABLED);
        rtlpci->irq_enabled = false;
index b681af3..b941726 100644 (file)
@@ -1634,28 +1634,11 @@ void rtl8723be_set_qos(struct ieee80211_hw *hw, int aci)
        }
 }
 
-static void rtl8723be_clear_interrupt(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u32 tmp;
-
-       tmp = rtl_read_dword(rtlpriv, REG_HISR);
-       rtl_write_dword(rtlpriv, REG_HISR, tmp);
-
-       tmp = rtl_read_dword(rtlpriv, REG_HISRE);
-       rtl_write_dword(rtlpriv, REG_HISRE, tmp);
-
-       tmp = rtl_read_dword(rtlpriv, REG_HSISR);
-       rtl_write_dword(rtlpriv, REG_HSISR, tmp);
-}
-
 void rtl8723be_enable_interrupt(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
 
-       rtl8723be_clear_interrupt(hw);/*clear it here first*/
-
        rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF);
        rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF);
        rtlpci->irq_enabled = true;
index 1017f02..7bf88d9 100644 (file)
@@ -385,6 +385,7 @@ module_param_named(debug, rtl8723be_mod_params.debug, int, 0444);
 module_param_named(ips, rtl8723be_mod_params.inactiveps, bool, 0444);
 module_param_named(swlps, rtl8723be_mod_params.swctrl_lps, bool, 0444);
 module_param_named(fwlps, rtl8723be_mod_params.fwctrl_lps, bool, 0444);
+module_param_named(msi, rtl8723be_mod_params.msi_support, bool, 0444);
 module_param_named(disable_watchdog, rtl8723be_mod_params.disable_watchdog,
                   bool, 0444);
 MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
index 8704eee..57966e3 100644 (file)
@@ -2253,31 +2253,11 @@ void rtl8821ae_set_qos(struct ieee80211_hw *hw, int aci)
        }
 }
 
-static void rtl8821ae_clear_interrupt(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u32 tmp;
-       tmp = rtl_read_dword(rtlpriv, REG_HISR);
-       /*printk("clear interrupt first:\n");
-       printk("0x%x = 0x%08x\n",REG_HISR, tmp);*/
-       rtl_write_dword(rtlpriv, REG_HISR, tmp);
-
-       tmp = rtl_read_dword(rtlpriv, REG_HISRE);
-       /*printk("0x%x = 0x%08x\n",REG_HISRE, tmp);*/
-       rtl_write_dword(rtlpriv, REG_HISRE, tmp);
-
-       tmp = rtl_read_dword(rtlpriv, REG_HSISR);
-       /*printk("0x%x = 0x%08x\n",REG_HSISR, tmp);*/
-       rtl_write_dword(rtlpriv, REG_HSISR, tmp);
-}
-
 void rtl8821ae_enable_interrupt(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
 
-       rtl8821ae_clear_interrupt(hw);/*clear it here first*/
-
        rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF);
        rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF);
        rtlpci->irq_enabled = true;
index 76a4cad..c44f8cf 100644 (file)
@@ -87,11 +87,6 @@ static void st21nfcb_nci_i2c_disable(void *phy_id)
        gpio_set_value(phy->gpio_reset, 1);
 }
 
-static void st21nfcb_nci_remove_header(struct sk_buff *skb)
-{
-       skb_pull(skb, ST21NFCB_FRAME_HEADROOM);
-}
-
 /*
  * Writing a frame must not return the number of written bytes.
  * It must return either zero for success, or <0 for error.
@@ -121,8 +116,6 @@ static int st21nfcb_nci_i2c_write(void *phy_id, struct sk_buff *skb)
                        r = 0;
        }
 
-       st21nfcb_nci_remove_header(skb);
-
        return r;
 }
 
@@ -366,9 +359,6 @@ static int st21nfcb_nci_i2c_remove(struct i2c_client *client)
 
        ndlc_remove(phy->ndlc);
 
-       if (phy->powered)
-               st21nfcb_nci_i2c_disable(phy);
-
        return 0;
 }
 
index ca9871a..c7dc282 100644 (file)
@@ -131,11 +131,8 @@ EXPORT_SYMBOL_GPL(st21nfcb_nci_probe);
 
 void st21nfcb_nci_remove(struct nci_dev *ndev)
 {
-       struct st21nfcb_nci_info *info = nci_get_drvdata(ndev);
-
        nci_unregister_device(ndev);
        nci_free_device(ndev);
-       kfree(info);
 }
 EXPORT_SYMBOL_GPL(st21nfcb_nci_remove);
 
index 6906a3f..8bfda6a 100644 (file)
@@ -712,7 +712,7 @@ int __weak pci_register_io_range(phys_addr_t addr, resource_size_t size)
        }
 
        /* add the range to the list */
-       range = kzalloc(sizeof(*range), GFP_KERNEL);
+       range = kzalloc(sizeof(*range), GFP_ATOMIC);
        if (!range) {
                err = -ENOMEM;
                goto end_register;
index f065026..5ed9724 100644 (file)
@@ -89,7 +89,7 @@ EXPORT_SYMBOL(of_n_size_cells);
 #ifdef CONFIG_NUMA
 int __weak of_node_to_nid(struct device_node *np)
 {
-       return numa_node_id();
+       return NUMA_NO_NODE;
 }
 #endif
 
index 73de4ef..944f500 100644 (file)
@@ -2,7 +2,7 @@
 # PCI configuration
 #
 config PCI_BUS_ADDR_T_64BIT
-       def_bool y if (ARCH_DMA_ADDR_T_64BIT || 64BIT)
+       def_bool y if (ARCH_DMA_ADDR_T_64BIT || (64BIT && !PARISC))
        depends on PCI
 
 config PCI_MSI
index c6fc95b..ab54f28 100644 (file)
 static const u32 phy_berlin_pll_dividers[] = {
        /* Berlin 2 */
        CLK_REF_DIV(0xc) | FEEDBACK_CLK_DIV(0x54),
-       /* Berlin 2CD */
-       CLK_REF_DIV(0x6) | FEEDBACK_CLK_DIV(0x55),
+       /* Berlin 2CD/Q */
+       CLK_REF_DIV(0xc) | FEEDBACK_CLK_DIV(0x54),
 };
 
 struct phy_berlin_usb_priv {
index bc42d6a..6285f46 100644 (file)
 #define PMBR1                          0x0D
 #define GPIO_USB_4PIN_ULPI_2430C       (3 << 0)
 
+/*
+ * If VBUS is valid or ID is ground, then we know a
+ * cable is present and we need to be runtime-enabled
+ */
+static inline bool cable_present(enum omap_musb_vbus_id_status stat)
+{
+       return stat == OMAP_MUSB_VBUS_VALID ||
+               stat == OMAP_MUSB_ID_GROUND;
+}
+
 struct twl4030_usb {
        struct usb_phy          phy;
        struct device           *dev;
@@ -536,8 +546,10 @@ static irqreturn_t twl4030_usb_irq(int irq, void *_twl)
 
        mutex_lock(&twl->lock);
        if (status >= 0 && status != twl->linkstat) {
+               status_changed =
+                       cable_present(twl->linkstat) !=
+                       cable_present(status);
                twl->linkstat = status;
-               status_changed = true;
        }
        mutex_unlock(&twl->lock);
 
@@ -553,15 +565,11 @@ static irqreturn_t twl4030_usb_irq(int irq, void *_twl)
                 * USB_LINK_VBUS state.  musb_hdrc won't care until it
                 * starts to handle softconnect right.
                 */
-               if ((status == OMAP_MUSB_VBUS_VALID) ||
-                   (status == OMAP_MUSB_ID_GROUND)) {
-                       if (pm_runtime_suspended(twl->dev))
-                               pm_runtime_get_sync(twl->dev);
+               if (cable_present(status)) {
+                       pm_runtime_get_sync(twl->dev);
                } else {
-                       if (pm_runtime_active(twl->dev)) {
-                               pm_runtime_mark_last_busy(twl->dev);
-                               pm_runtime_put_autosuspend(twl->dev);
-                       }
+                       pm_runtime_mark_last_busy(twl->dev);
+                       pm_runtime_put_autosuspend(twl->dev);
                }
                omap_musb_mailbox(status);
        }
@@ -711,7 +719,6 @@ static int twl4030_usb_probe(struct platform_device *pdev)
        pm_runtime_use_autosuspend(&pdev->dev);
        pm_runtime_set_autosuspend_delay(&pdev->dev, 2000);
        pm_runtime_enable(&pdev->dev);
-       pm_runtime_get_sync(&pdev->dev);
 
        /* Our job is to use irqs and status from the power module
         * to keep the transceiver disabled when nothing's connected.
@@ -767,6 +774,9 @@ static int twl4030_usb_remove(struct platform_device *pdev)
 
        /* disable complete OTG block */
        twl4030_usb_clear_bits(twl, POWER_CTRL, POWER_CTRL_OTG_ENAB);
+
+       if (cable_present(twl->linkstat))
+               pm_runtime_put_noidle(twl->dev);
        pm_runtime_mark_last_busy(twl->dev);
        pm_runtime_put(twl->dev);
 
index 5ac59fb..d3a3be7 100644 (file)
@@ -403,14 +403,13 @@ static int imx1_pinconf_set(struct pinctrl_dev *pctldev,
                             unsigned num_configs)
 {
        struct imx1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
-       const struct imx1_pinctrl_soc_info *info = ipctl->info;
        int i;
 
        for (i = 0; i != num_configs; ++i) {
                imx1_write_bit(ipctl, pin_id, configs[i] & 0x01, MX1_PUEN);
 
                dev_dbg(ipctl->dev, "pinconf set pullup pin %s\n",
-                       info->pins[pin_id].name);
+                       pin_desc_get(pctldev, pin_id)->name);
        }
 
        return 0;
index 03aa58c..1eb084c 100644 (file)
@@ -370,11 +370,11 @@ static struct mvebu_mpp_mode mv88f6710_mpp_modes[] = {
        MPP_MODE(64,
           MPP_FUNCTION(0x0, "gpio", NULL),
           MPP_FUNCTION(0x1, "spi0", "miso"),
-          MPP_FUNCTION(0x2, "spi0-1", "cs1")),
+          MPP_FUNCTION(0x2, "spi0", "cs1")),
        MPP_MODE(65,
           MPP_FUNCTION(0x0, "gpio", NULL),
           MPP_FUNCTION(0x1, "spi0", "mosi"),
-          MPP_FUNCTION(0x2, "spi0-1", "cs2")),
+          MPP_FUNCTION(0x2, "spi0", "cs2")),
 };
 
 static struct mvebu_pinctrl_soc_info armada_370_pinctrl_info;
index ca1e757..203291b 100644 (file)
@@ -92,19 +92,17 @@ static struct mvebu_mpp_mode mv88f6720_mpp_modes[] = {
                 MPP_FUNCTION(0x5, "nand", "io1")),
        MPP_MODE(8,
                 MPP_FUNCTION(0x0, "gpio", NULL),
-                MPP_FUNCTION(0x1, "dev ", "bootcs"),
+                MPP_FUNCTION(0x1, "dev", "bootcs"),
                 MPP_FUNCTION(0x2, "spi0", "cs0"),
                 MPP_FUNCTION(0x3, "spi1", "cs0"),
                 MPP_FUNCTION(0x5, "nand", "ce")),
        MPP_MODE(9,
                 MPP_FUNCTION(0x0, "gpio", NULL),
-                MPP_FUNCTION(0x1, "nf", "wen"),
                 MPP_FUNCTION(0x2, "spi0", "sck"),
                 MPP_FUNCTION(0x3, "spi1", "sck"),
                 MPP_FUNCTION(0x5, "nand", "we")),
        MPP_MODE(10,
                 MPP_FUNCTION(0x0, "gpio", NULL),
-                MPP_FUNCTION(0x1, "nf", "ren"),
                 MPP_FUNCTION(0x2, "dram", "vttctrl"),
                 MPP_FUNCTION(0x3, "led", "c1"),
                 MPP_FUNCTION(0x5, "nand", "re"),
index 83bbcc7..ff411a5 100644 (file)
@@ -94,37 +94,39 @@ static struct mvebu_mpp_mode armada_38x_mpp_modes[] = {
                 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
                 MPP_VAR_FUNCTION(1, "ge0",   "rxd0",       V_88F6810_PLUS),
                 MPP_VAR_FUNCTION(2, "pcie0", "rstout",     V_88F6810_PLUS),
-                MPP_VAR_FUNCTION(3, "pcie1", "rstout",     V_88F6820_PLUS),
                 MPP_VAR_FUNCTION(4, "spi0",  "cs1",        V_88F6810_PLUS),
-                MPP_VAR_FUNCTION(5, "dev",   "ad14",       V_88F6810_PLUS)),
+                MPP_VAR_FUNCTION(5, "dev",   "ad14",       V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(6, "pcie3", "clkreq",     V_88F6810_PLUS)),
        MPP_MODE(13,
                 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
                 MPP_VAR_FUNCTION(1, "ge0",   "rxd1",       V_88F6810_PLUS),
                 MPP_VAR_FUNCTION(2, "pcie0", "clkreq",     V_88F6810_PLUS),
                 MPP_VAR_FUNCTION(3, "pcie1", "clkreq",     V_88F6820_PLUS),
                 MPP_VAR_FUNCTION(4, "spi0",  "cs2",        V_88F6810_PLUS),
-                MPP_VAR_FUNCTION(5, "dev",   "ad15",       V_88F6810_PLUS)),
+                MPP_VAR_FUNCTION(5, "dev",   "ad15",       V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(6, "pcie2", "clkreq",     V_88F6810_PLUS)),
        MPP_MODE(14,
                 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
                 MPP_VAR_FUNCTION(1, "ge0",   "rxd2",       V_88F6810_PLUS),
                 MPP_VAR_FUNCTION(2, "ptp",   "clk",        V_88F6810_PLUS),
                 MPP_VAR_FUNCTION(3, "m",     "vtt_ctrl",   V_88F6810_PLUS),
                 MPP_VAR_FUNCTION(4, "spi0",  "cs3",        V_88F6810_PLUS),
-                MPP_VAR_FUNCTION(5, "dev",   "wen1",       V_88F6810_PLUS)),
+                MPP_VAR_FUNCTION(5, "dev",   "wen1",       V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(6, "pcie3", "clkreq",     V_88F6810_PLUS)),
        MPP_MODE(15,
                 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
                 MPP_VAR_FUNCTION(1, "ge0",   "rxd3",       V_88F6810_PLUS),
                 MPP_VAR_FUNCTION(2, "ge",    "mdc slave",  V_88F6810_PLUS),
                 MPP_VAR_FUNCTION(3, "pcie0", "rstout",     V_88F6810_PLUS),
-                MPP_VAR_FUNCTION(4, "spi0",  "mosi",       V_88F6810_PLUS),
-                MPP_VAR_FUNCTION(5, "pcie1", "rstout",     V_88F6820_PLUS)),
+                MPP_VAR_FUNCTION(4, "spi0",  "mosi",       V_88F6810_PLUS)),
        MPP_MODE(16,
                 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
                 MPP_VAR_FUNCTION(1, "ge0",   "rxctl",      V_88F6810_PLUS),
                 MPP_VAR_FUNCTION(2, "ge",    "mdio slave", V_88F6810_PLUS),
                 MPP_VAR_FUNCTION(3, "m",     "decc_err",   V_88F6810_PLUS),
                 MPP_VAR_FUNCTION(4, "spi0",  "miso",       V_88F6810_PLUS),
-                MPP_VAR_FUNCTION(5, "pcie0", "clkreq",     V_88F6810_PLUS)),
+                MPP_VAR_FUNCTION(5, "pcie0", "clkreq",     V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(6, "pcie1", "clkreq",     V_88F6820_PLUS)),
        MPP_MODE(17,
                 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
                 MPP_VAR_FUNCTION(1, "ge0",   "rxclk",      V_88F6810_PLUS),
@@ -137,13 +139,12 @@ static struct mvebu_mpp_mode armada_38x_mpp_modes[] = {
                 MPP_VAR_FUNCTION(1, "ge0",   "rxerr",      V_88F6810_PLUS),
                 MPP_VAR_FUNCTION(2, "ptp",   "trig_gen",   V_88F6810_PLUS),
                 MPP_VAR_FUNCTION(3, "ua1",   "txd",        V_88F6810_PLUS),
-                MPP_VAR_FUNCTION(4, "spi0",  "cs0",        V_88F6810_PLUS),
-                MPP_VAR_FUNCTION(5, "pcie1", "rstout",     V_88F6820_PLUS)),
+                MPP_VAR_FUNCTION(4, "spi0",  "cs0",        V_88F6810_PLUS)),
        MPP_MODE(19,
                 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
                 MPP_VAR_FUNCTION(1, "ge0",   "col",        V_88F6810_PLUS),
                 MPP_VAR_FUNCTION(2, "ptp",   "event_req",  V_88F6810_PLUS),
-                MPP_VAR_FUNCTION(3, "pcie0", "clkreq",     V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(3, "ge0",   "txerr",      V_88F6810_PLUS),
                 MPP_VAR_FUNCTION(4, "sata1", "prsnt",      V_88F6810_PLUS),
                 MPP_VAR_FUNCTION(5, "ua0",   "cts",        V_88F6810_PLUS),
                 MPP_VAR_FUNCTION(6, "ua1",   "rxd",        V_88F6810_PLUS)),
@@ -151,7 +152,6 @@ static struct mvebu_mpp_mode armada_38x_mpp_modes[] = {
                 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
                 MPP_VAR_FUNCTION(1, "ge0",   "txclk",      V_88F6810_PLUS),
                 MPP_VAR_FUNCTION(2, "ptp",   "clk",        V_88F6810_PLUS),
-                MPP_VAR_FUNCTION(3, "pcie1", "rstout",     V_88F6820_PLUS),
                 MPP_VAR_FUNCTION(4, "sata0", "prsnt",      V_88F6810_PLUS),
                 MPP_VAR_FUNCTION(5, "ua0",   "rts",        V_88F6810_PLUS),
                 MPP_VAR_FUNCTION(6, "ua1",   "txd",        V_88F6810_PLUS)),
@@ -277,35 +277,27 @@ static struct mvebu_mpp_mode armada_38x_mpp_modes[] = {
                 MPP_VAR_FUNCTION(1, "pcie0", "clkreq",     V_88F6810_PLUS),
                 MPP_VAR_FUNCTION(2, "m",     "vtt_ctrl",   V_88F6810_PLUS),
                 MPP_VAR_FUNCTION(3, "m",     "decc_err",   V_88F6810_PLUS),
-                MPP_VAR_FUNCTION(4, "pcie0", "rstout",     V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(4, "spi1",  "cs2",        V_88F6810_PLUS),
                 MPP_VAR_FUNCTION(5, "dev",   "clkout",     V_88F6810_PLUS)),
        MPP_MODE(44,
                 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
                 MPP_VAR_FUNCTION(1, "sata0", "prsnt",      V_88F6810_PLUS),
                 MPP_VAR_FUNCTION(2, "sata1", "prsnt",      V_88F6810_PLUS),
                 MPP_VAR_FUNCTION(3, "sata2", "prsnt",      V_88F6828),
-                MPP_VAR_FUNCTION(4, "sata3", "prsnt",      V_88F6828),
-                MPP_VAR_FUNCTION(5, "pcie0", "rstout",     V_88F6810_PLUS)),
+                MPP_VAR_FUNCTION(4, "sata3", "prsnt",      V_88F6828)),
        MPP_MODE(45,
                 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
                 MPP_VAR_FUNCTION(1, "ref",   "clk_out0",   V_88F6810_PLUS),
-                MPP_VAR_FUNCTION(2, "pcie0", "rstout",     V_88F6810_PLUS),
-                MPP_VAR_FUNCTION(3, "pcie1", "rstout",     V_88F6820_PLUS),
-                MPP_VAR_FUNCTION(4, "pcie2", "rstout",     V_88F6810_PLUS),
-                MPP_VAR_FUNCTION(5, "pcie3", "rstout",     V_88F6810_PLUS)),
+                MPP_VAR_FUNCTION(2, "pcie0", "rstout",     V_88F6810_PLUS)),
        MPP_MODE(46,
                 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
                 MPP_VAR_FUNCTION(1, "ref",   "clk_out1",   V_88F6810_PLUS),
-                MPP_VAR_FUNCTION(2, "pcie0", "rstout",     V_88F6810_PLUS),
-                MPP_VAR_FUNCTION(3, "pcie1", "rstout",     V_88F6820_PLUS),
-                MPP_VAR_FUNCTION(4, "pcie2", "rstout",     V_88F6810_PLUS),
-                MPP_VAR_FUNCTION(5, "pcie3", "rstout",     V_88F6810_PLUS)),
+                MPP_VAR_FUNCTION(2, "pcie0", "rstout",     V_88F6810_PLUS)),
        MPP_MODE(47,
                 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
                 MPP_VAR_FUNCTION(1, "sata0", "prsnt",      V_88F6810_PLUS),
                 MPP_VAR_FUNCTION(2, "sata1", "prsnt",      V_88F6810_PLUS),
                 MPP_VAR_FUNCTION(3, "sata2", "prsnt",      V_88F6828),
-                MPP_VAR_FUNCTION(4, "spi1",  "cs2",        V_88F6810_PLUS),
                 MPP_VAR_FUNCTION(5, "sata3", "prsnt",      V_88F6828)),
        MPP_MODE(48,
                 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
@@ -313,18 +305,19 @@ static struct mvebu_mpp_mode armada_38x_mpp_modes[] = {
                 MPP_VAR_FUNCTION(2, "m",     "vtt_ctrl",   V_88F6810_PLUS),
                 MPP_VAR_FUNCTION(3, "tdm2c", "pclk",       V_88F6810_PLUS),
                 MPP_VAR_FUNCTION(4, "audio", "mclk",       V_88F6810_PLUS),
-                MPP_VAR_FUNCTION(5, "sd0",   "d4",         V_88F6810_PLUS)),
+                MPP_VAR_FUNCTION(5, "sd0",   "d4",         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(6, "pcie0", "clkreq",     V_88F6810_PLUS)),
        MPP_MODE(49,
                 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
                 MPP_VAR_FUNCTION(1, "sata2", "prsnt",      V_88F6828),
                 MPP_VAR_FUNCTION(2, "sata3", "prsnt",      V_88F6828),
                 MPP_VAR_FUNCTION(3, "tdm2c", "fsync",      V_88F6810_PLUS),
                 MPP_VAR_FUNCTION(4, "audio", "lrclk",      V_88F6810_PLUS),
-                MPP_VAR_FUNCTION(5, "sd0",   "d5",         V_88F6810_PLUS)),
+                MPP_VAR_FUNCTION(5, "sd0",   "d5",         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(6, "pcie1", "clkreq",     V_88F6820_PLUS)),
        MPP_MODE(50,
                 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
                 MPP_VAR_FUNCTION(1, "pcie0", "rstout",     V_88F6810_PLUS),
-                MPP_VAR_FUNCTION(2, "pcie1", "rstout",     V_88F6820_PLUS),
                 MPP_VAR_FUNCTION(3, "tdm2c", "drx",        V_88F6810_PLUS),
                 MPP_VAR_FUNCTION(4, "audio", "extclk",     V_88F6810_PLUS),
                 MPP_VAR_FUNCTION(5, "sd0",   "cmd",        V_88F6810_PLUS)),
@@ -336,7 +329,6 @@ static struct mvebu_mpp_mode armada_38x_mpp_modes[] = {
        MPP_MODE(52,
                 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
                 MPP_VAR_FUNCTION(1, "pcie0", "rstout",     V_88F6810_PLUS),
-                MPP_VAR_FUNCTION(2, "pcie1", "rstout",     V_88F6820_PLUS),
                 MPP_VAR_FUNCTION(3, "tdm2c", "intn",       V_88F6810_PLUS),
                 MPP_VAR_FUNCTION(4, "audio", "sdi",        V_88F6810_PLUS),
                 MPP_VAR_FUNCTION(5, "sd0",   "d6",         V_88F6810_PLUS)),
@@ -352,7 +344,7 @@ static struct mvebu_mpp_mode armada_38x_mpp_modes[] = {
                 MPP_VAR_FUNCTION(1, "sata0", "prsnt",      V_88F6810_PLUS),
                 MPP_VAR_FUNCTION(2, "sata1", "prsnt",      V_88F6810_PLUS),
                 MPP_VAR_FUNCTION(3, "pcie0", "rstout",     V_88F6810_PLUS),
-                MPP_VAR_FUNCTION(4, "pcie1", "rstout",     V_88F6820_PLUS),
+                MPP_VAR_FUNCTION(4, "ge0",   "txerr",      V_88F6810_PLUS),
                 MPP_VAR_FUNCTION(5, "sd0",   "d3",         V_88F6810_PLUS)),
        MPP_MODE(55,
                 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
@@ -382,7 +374,6 @@ static struct mvebu_mpp_mode armada_38x_mpp_modes[] = {
                 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
                 MPP_VAR_FUNCTION(1, "pcie0", "rstout",     V_88F6810_PLUS),
                 MPP_VAR_FUNCTION(2, "i2c1",  "sda",        V_88F6810_PLUS),
-                MPP_VAR_FUNCTION(3, "pcie1", "rstout",     V_88F6820_PLUS),
                 MPP_VAR_FUNCTION(4, "spi1",  "cs0",        V_88F6810_PLUS),
                 MPP_VAR_FUNCTION(5, "sd0",   "d2",         V_88F6810_PLUS)),
 };
@@ -411,7 +402,7 @@ static struct mvebu_mpp_ctrl armada_38x_mpp_controls[] = {
 
 static struct pinctrl_gpio_range armada_38x_mpp_gpio_ranges[] = {
        MPP_GPIO_RANGE(0,   0,  0, 32),
-       MPP_GPIO_RANGE(1,  32, 32, 27),
+       MPP_GPIO_RANGE(1,  32, 32, 28),
 };
 
 static int armada_38x_pinctrl_probe(struct platform_device *pdev)
index 4249162..2dcf9b4 100644 (file)
@@ -380,7 +380,7 @@ static struct mvebu_mpp_ctrl armada_39x_mpp_controls[] = {
 
 static struct pinctrl_gpio_range armada_39x_mpp_gpio_ranges[] = {
        MPP_GPIO_RANGE(0,   0,  0, 32),
-       MPP_GPIO_RANGE(1,  32, 32, 27),
+       MPP_GPIO_RANGE(1,  32, 32, 28),
 };
 
 static int armada_39x_pinctrl_probe(struct platform_device *pdev)
index 578db9f..d7cdb14 100644 (file)
  * available: mv78230, mv78260 and mv78460. From a pin muxing
  * perspective, the mv78230 has 49 MPP pins. The mv78260 and mv78460
  * both have 67 MPP pins (more GPIOs and address lines for the memory
- * bus mainly). The only difference between the mv78260 and the
- * mv78460 in terms of pin muxing is the addition of two functions on
- * pins 43 and 56 to access the VDD of the CPU2 and 3 (mv78260 has two
- * cores, mv78460 has four cores).
+ * bus mainly).
  */
 
 #include <linux/err.h>
@@ -172,20 +169,17 @@ static struct mvebu_mpp_mode armada_xp_mpp_modes[] = {
        MPP_MODE(24,
                 MPP_VAR_FUNCTION(0x0, "gpio", NULL,        V_MV78230_PLUS),
                 MPP_VAR_FUNCTION(0x1, "sata1", "prsnt",    V_MV78230_PLUS),
-                MPP_VAR_FUNCTION(0x2, "nf", "bootcs-re",   V_MV78230_PLUS),
                 MPP_VAR_FUNCTION(0x3, "tdm", "rst",        V_MV78230_PLUS),
                 MPP_VAR_FUNCTION(0x4, "lcd", "hsync",      V_MV78230_PLUS)),
        MPP_MODE(25,
                 MPP_VAR_FUNCTION(0x0, "gpio", NULL,        V_MV78230_PLUS),
                 MPP_VAR_FUNCTION(0x1, "sata0", "prsnt",    V_MV78230_PLUS),
-                MPP_VAR_FUNCTION(0x2, "nf", "bootcs-we",   V_MV78230_PLUS),
                 MPP_VAR_FUNCTION(0x3, "tdm", "pclk",       V_MV78230_PLUS),
                 MPP_VAR_FUNCTION(0x4, "lcd", "vsync",      V_MV78230_PLUS)),
        MPP_MODE(26,
                 MPP_VAR_FUNCTION(0x0, "gpio", NULL,        V_MV78230_PLUS),
                 MPP_VAR_FUNCTION(0x3, "tdm", "fsync",      V_MV78230_PLUS),
-                MPP_VAR_FUNCTION(0x4, "lcd", "clk",        V_MV78230_PLUS),
-                MPP_VAR_FUNCTION(0x5, "vdd", "cpu1-pd",    V_MV78230_PLUS)),
+                MPP_VAR_FUNCTION(0x4, "lcd", "clk",        V_MV78230_PLUS)),
        MPP_MODE(27,
                 MPP_VAR_FUNCTION(0x0, "gpio", NULL,        V_MV78230_PLUS),
                 MPP_VAR_FUNCTION(0x1, "ptp", "trig",       V_MV78230_PLUS),
@@ -200,8 +194,7 @@ static struct mvebu_mpp_mode armada_xp_mpp_modes[] = {
                 MPP_VAR_FUNCTION(0x0, "gpio", NULL,        V_MV78230_PLUS),
                 MPP_VAR_FUNCTION(0x1, "ptp", "clk",        V_MV78230_PLUS),
                 MPP_VAR_FUNCTION(0x3, "tdm", "int0",       V_MV78230_PLUS),
-                MPP_VAR_FUNCTION(0x4, "lcd", "ref-clk",    V_MV78230_PLUS),
-                MPP_VAR_FUNCTION(0x5, "vdd", "cpu0-pd",    V_MV78230_PLUS)),
+                MPP_VAR_FUNCTION(0x4, "lcd", "ref-clk",    V_MV78230_PLUS)),
        MPP_MODE(30,
                 MPP_VAR_FUNCTION(0x0, "gpio", NULL,        V_MV78230_PLUS),
                 MPP_VAR_FUNCTION(0x1, "sd0", "clk",        V_MV78230_PLUS),
@@ -209,13 +202,11 @@ static struct mvebu_mpp_mode armada_xp_mpp_modes[] = {
        MPP_MODE(31,
                 MPP_VAR_FUNCTION(0x0, "gpio", NULL,        V_MV78230_PLUS),
                 MPP_VAR_FUNCTION(0x1, "sd0", "cmd",        V_MV78230_PLUS),
-                MPP_VAR_FUNCTION(0x3, "tdm", "int2",       V_MV78230_PLUS),
-                MPP_VAR_FUNCTION(0x5, "vdd", "cpu0-pd",    V_MV78230_PLUS)),
+                MPP_VAR_FUNCTION(0x3, "tdm", "int2",       V_MV78230_PLUS)),
        MPP_MODE(32,
                 MPP_VAR_FUNCTION(0x0, "gpio", NULL,        V_MV78230_PLUS),
                 MPP_VAR_FUNCTION(0x1, "sd0", "d0",         V_MV78230_PLUS),
-                MPP_VAR_FUNCTION(0x3, "tdm", "int3",       V_MV78230_PLUS),
-                MPP_VAR_FUNCTION(0x5, "vdd", "cpu1-pd",    V_MV78230_PLUS)),
+                MPP_VAR_FUNCTION(0x3, "tdm", "int3",       V_MV78230_PLUS)),
        MPP_MODE(33,
                 MPP_VAR_FUNCTION(0x0, "gpio", NULL,        V_MV78230_PLUS),
                 MPP_VAR_FUNCTION(0x1, "sd0", "d1",         V_MV78230_PLUS),
@@ -247,7 +238,6 @@ static struct mvebu_mpp_mode armada_xp_mpp_modes[] = {
                 MPP_VAR_FUNCTION(0x0, "gpio", NULL,        V_MV78230_PLUS),
                 MPP_VAR_FUNCTION(0x1, "spi", "cs1",        V_MV78230_PLUS),
                 MPP_VAR_FUNCTION(0x2, "uart2", "cts",      V_MV78230_PLUS),
-                MPP_VAR_FUNCTION(0x3, "vdd", "cpu1-pd",    V_MV78230_PLUS),
                 MPP_VAR_FUNCTION(0x4, "lcd", "vga-hsync",  V_MV78230_PLUS),
                 MPP_VAR_FUNCTION(0x5, "pcie", "clkreq0",   V_MV78230_PLUS)),
        MPP_MODE(41,
@@ -262,15 +252,13 @@ static struct mvebu_mpp_mode armada_xp_mpp_modes[] = {
                 MPP_VAR_FUNCTION(0x1, "uart2", "rxd",      V_MV78230_PLUS),
                 MPP_VAR_FUNCTION(0x2, "uart0", "cts",      V_MV78230_PLUS),
                 MPP_VAR_FUNCTION(0x3, "tdm", "int7",       V_MV78230_PLUS),
-                MPP_VAR_FUNCTION(0x4, "tdm-1", "timer",    V_MV78230_PLUS),
-                MPP_VAR_FUNCTION(0x5, "vdd", "cpu0-pd",    V_MV78230_PLUS)),
+                MPP_VAR_FUNCTION(0x4, "tdm-1", "timer",    V_MV78230_PLUS)),
        MPP_MODE(43,
                 MPP_VAR_FUNCTION(0x0, "gpio", NULL,        V_MV78230_PLUS),
                 MPP_VAR_FUNCTION(0x1, "uart2", "txd",      V_MV78230_PLUS),
                 MPP_VAR_FUNCTION(0x2, "uart0", "rts",      V_MV78230_PLUS),
                 MPP_VAR_FUNCTION(0x3, "spi", "cs3",        V_MV78230_PLUS),
-                MPP_VAR_FUNCTION(0x4, "pcie", "rstout",    V_MV78230_PLUS),
-                MPP_VAR_FUNCTION(0x5, "vdd", "cpu2-3-pd",  V_MV78460)),
+                MPP_VAR_FUNCTION(0x4, "pcie", "rstout",    V_MV78230_PLUS)),
        MPP_MODE(44,
                 MPP_VAR_FUNCTION(0x0, "gpio", NULL,        V_MV78230_PLUS),
                 MPP_VAR_FUNCTION(0x1, "uart2", "cts",      V_MV78230_PLUS),
@@ -299,7 +287,7 @@ static struct mvebu_mpp_mode armada_xp_mpp_modes[] = {
                 MPP_VAR_FUNCTION(0x5, "pcie", "clkreq3",   V_MV78230_PLUS)),
        MPP_MODE(48,
                 MPP_VAR_FUNCTION(0x0, "gpio", NULL,        V_MV78230_PLUS),
-                MPP_VAR_FUNCTION(0x1, "tclk", NULL,        V_MV78230_PLUS),
+                MPP_VAR_FUNCTION(0x1, "dev", "clkout",     V_MV78230_PLUS),
                 MPP_VAR_FUNCTION(0x2, "dev", "burst/last", V_MV78230_PLUS)),
        MPP_MODE(49,
                 MPP_VAR_FUNCTION(0x0, "gpio", NULL,        V_MV78260_PLUS),
@@ -321,16 +309,13 @@ static struct mvebu_mpp_mode armada_xp_mpp_modes[] = {
                 MPP_VAR_FUNCTION(0x1, "dev", "ad19",       V_MV78260_PLUS)),
        MPP_MODE(55,
                 MPP_VAR_FUNCTION(0x0, "gpio", NULL,        V_MV78260_PLUS),
-                MPP_VAR_FUNCTION(0x1, "dev", "ad20",       V_MV78260_PLUS),
-                MPP_VAR_FUNCTION(0x2, "vdd", "cpu0-pd",    V_MV78260_PLUS)),
+                MPP_VAR_FUNCTION(0x1, "dev", "ad20",       V_MV78260_PLUS)),
        MPP_MODE(56,
                 MPP_VAR_FUNCTION(0x0, "gpio", NULL,        V_MV78260_PLUS),
-                MPP_VAR_FUNCTION(0x1, "dev", "ad21",       V_MV78260_PLUS),
-                MPP_VAR_FUNCTION(0x2, "vdd", "cpu1-pd",    V_MV78260_PLUS)),
+                MPP_VAR_FUNCTION(0x1, "dev", "ad21",       V_MV78260_PLUS)),
        MPP_MODE(57,
                 MPP_VAR_FUNCTION(0x0, "gpio", NULL,        V_MV78260_PLUS),
-                MPP_VAR_FUNCTION(0x1, "dev", "ad22",       V_MV78260_PLUS),
-                MPP_VAR_FUNCTION(0x2, "vdd", "cpu2-3-pd",  V_MV78460)),
+                MPP_VAR_FUNCTION(0x1, "dev", "ad22",       V_MV78260_PLUS)),
        MPP_MODE(58,
                 MPP_VAR_FUNCTION(0x0, "gpio", NULL,        V_MV78260_PLUS),
                 MPP_VAR_FUNCTION(0x1, "dev", "ad23",       V_MV78260_PLUS)),
index 22280bd..8c51a3c 100644 (file)
@@ -714,12 +714,13 @@ static const char * const gpio0_groups[] = {"gpio0_0_grp",
                .mux_val = mval,                        \
        }
 
-#define DEFINE_ZYNQ_PINMUX_FUNCTION_MUX(fname, mval, mux, mask, shift) \
+#define DEFINE_ZYNQ_PINMUX_FUNCTION_MUX(fname, mval, offset, mask, shift)\
        [ZYNQ_PMUX_##fname] = {                         \
                .name = #fname,                         \
                .groups = fname##_groups,               \
                .ngroups = ARRAY_SIZE(fname##_groups),  \
                .mux_val = mval,                        \
+               .mux = offset,                          \
                .mux_mask = mask,                       \
                .mux_shift = shift,                     \
        }
@@ -744,15 +745,15 @@ static const struct zynq_pinmux_function zynq_pmux_functions[] = {
        DEFINE_ZYNQ_PINMUX_FUNCTION(spi1, 0x50),
        DEFINE_ZYNQ_PINMUX_FUNCTION(sdio0, 0x40),
        DEFINE_ZYNQ_PINMUX_FUNCTION(sdio0_pc, 0xc),
-       DEFINE_ZYNQ_PINMUX_FUNCTION_MUX(sdio0_wp, 0, 130, ZYNQ_SDIO_WP_MASK,
+       DEFINE_ZYNQ_PINMUX_FUNCTION_MUX(sdio0_wp, 0, 0x130, ZYNQ_SDIO_WP_MASK,
                                        ZYNQ_SDIO_WP_SHIFT),
-       DEFINE_ZYNQ_PINMUX_FUNCTION_MUX(sdio0_cd, 0, 130, ZYNQ_SDIO_CD_MASK,
+       DEFINE_ZYNQ_PINMUX_FUNCTION_MUX(sdio0_cd, 0, 0x130, ZYNQ_SDIO_CD_MASK,
                                        ZYNQ_SDIO_CD_SHIFT),
        DEFINE_ZYNQ_PINMUX_FUNCTION(sdio1, 0x40),
        DEFINE_ZYNQ_PINMUX_FUNCTION(sdio1_pc, 0xc),
-       DEFINE_ZYNQ_PINMUX_FUNCTION_MUX(sdio1_wp, 0, 134, ZYNQ_SDIO_WP_MASK,
+       DEFINE_ZYNQ_PINMUX_FUNCTION_MUX(sdio1_wp, 0, 0x134, ZYNQ_SDIO_WP_MASK,
                                        ZYNQ_SDIO_WP_SHIFT),
-       DEFINE_ZYNQ_PINMUX_FUNCTION_MUX(sdio1_cd, 0, 134, ZYNQ_SDIO_CD_MASK,
+       DEFINE_ZYNQ_PINMUX_FUNCTION_MUX(sdio1_cd, 0, 0x134, ZYNQ_SDIO_CD_MASK,
                                        ZYNQ_SDIO_CD_SHIFT),
        DEFINE_ZYNQ_PINMUX_FUNCTION(smc0_nor, 4),
        DEFINE_ZYNQ_PINMUX_FUNCTION(smc0_nor_cs1, 8),
index d688d80..2c1d5f5 100644 (file)
@@ -305,7 +305,6 @@ static const struct dmi_system_id dell_quirks[] __initconst = {
 };
 
 static struct calling_interface_buffer *buffer;
-static struct page *bufferpage;
 static DEFINE_MUTEX(buffer_mutex);
 
 static int hwswitch_state;
@@ -1896,12 +1895,11 @@ static int __init dell_init(void)
         * Allocate buffer below 4GB for SMI data--only 32-bit physical addr
         * is passed to SMI handler.
         */
-       bufferpage = alloc_page(GFP_KERNEL | GFP_DMA32);
-       if (!bufferpage) {
+       buffer = (void *)__get_free_page(GFP_KERNEL | GFP_DMA32);
+       if (!buffer) {
                ret = -ENOMEM;
                goto fail_buffer;
        }
-       buffer = page_address(bufferpage);
 
        ret = dell_setup_rfkill();
 
@@ -1965,7 +1963,7 @@ fail_backlight:
        cancel_delayed_work_sync(&dell_rfkill_work);
        dell_cleanup_rfkill();
 fail_rfkill:
-       free_page((unsigned long)bufferpage);
+       free_page((unsigned long)buffer);
 fail_buffer:
        platform_device_del(platform_device);
 fail_platform_device2:
index b496db8..cb7cd8d 100644 (file)
@@ -464,8 +464,9 @@ static const struct ideapad_rfk_data ideapad_rfk_data[] = {
 static int ideapad_rfk_set(void *data, bool blocked)
 {
        struct ideapad_rfk_priv *priv = data;
+       int opcode = ideapad_rfk_data[priv->dev].opcode;
 
-       return write_ec_cmd(priv->priv->adev->handle, priv->dev, !blocked);
+       return write_ec_cmd(priv->priv->adev->handle, opcode, !blocked);
 }
 
 static struct rfkill_ops ideapad_rfk_ops = {
@@ -836,6 +837,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
                        DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo G40-30"),
                },
        },
+       {
+               .ident = "Lenovo G50-30",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo G50-30"),
+               },
+       },
        {
                .ident = "Lenovo Yoga 2 11 / 13 / Pro",
                .matches = {
index 515f338..49c1720 100644 (file)
@@ -7,7 +7,6 @@
  *     Bjorn Helgaas <bjorn.helgaas@hp.com>
  */
 
-#include <linux/acpi.h>
 #include <linux/pnp.h>
 #include <linux/device.h>
 #include <linux/init.h>
@@ -23,41 +22,25 @@ static const struct pnp_device_id pnp_dev_table[] = {
        {"", 0}
 };
 
-#ifdef CONFIG_ACPI
-static bool __reserve_range(u64 start, unsigned int length, bool io, char *desc)
-{
-       u8 space_id = io ? ACPI_ADR_SPACE_SYSTEM_IO : ACPI_ADR_SPACE_SYSTEM_MEMORY;
-       return !acpi_reserve_region(start, length, space_id, IORESOURCE_BUSY, desc);
-}
-#else
-static bool __reserve_range(u64 start, unsigned int length, bool io, char *desc)
-{
-       struct resource *res;
-
-       res = io ? request_region(start, length, desc) :
-               request_mem_region(start, length, desc);
-       if (res) {
-               res->flags &= ~IORESOURCE_BUSY;
-               return true;
-       }
-       return false;
-}
-#endif
-
 static void reserve_range(struct pnp_dev *dev, struct resource *r, int port)
 {
        char *regionid;
        const char *pnpid = dev_name(&dev->dev);
        resource_size_t start = r->start, end = r->end;
-       bool reserved;
+       struct resource *res;
 
        regionid = kmalloc(16, GFP_KERNEL);
        if (!regionid)
                return;
 
        snprintf(regionid, 16, "pnp %s", pnpid);
-       reserved = __reserve_range(start, end - start + 1, !!port, regionid);
-       if (!reserved)
+       if (port)
+               res = request_region(start, end - start + 1, regionid);
+       else
+               res = request_mem_region(start, end - start + 1, regionid);
+       if (res)
+               res->flags &= ~IORESOURCE_BUSY;
+       else
                kfree(regionid);
 
        /*
@@ -66,7 +49,7 @@ static void reserve_range(struct pnp_dev *dev, struct resource *r, int port)
         * have double reservations.
         */
        dev_info(&dev->dev, "%pR %s reserved\n", r,
-                reserved ? "has been" : "could not be");
+                res ? "has been" : "could not be");
 }
 
 static void reserve_resources_of_dev(struct pnp_dev *dev)
index ff82811..8de1351 100644 (file)
@@ -34,6 +34,8 @@
 #include <linux/mfd/samsung/s2mps14.h>
 #include <linux/mfd/samsung/s2mpu02.h>
 
+/* The highest number of possible regulators for supported devices. */
+#define S2MPS_REGULATOR_MAX            S2MPS13_REGULATOR_MAX
 struct s2mps11_info {
        unsigned int rdev_num;
        int ramp_delay2;
@@ -49,7 +51,7 @@ struct s2mps11_info {
         * One bit for each S2MPS13/S2MPS14/S2MPU02 regulator whether
         * the suspend mode was enabled.
         */
-       unsigned long long s2mps14_suspend_state:50;
+       DECLARE_BITMAP(suspend_state, S2MPS_REGULATOR_MAX);
 
        /* Array of size rdev_num with GPIO-s for external sleep control */
        int *ext_control_gpio;
@@ -500,7 +502,7 @@ static int s2mps14_regulator_enable(struct regulator_dev *rdev)
        switch (s2mps11->dev_type) {
        case S2MPS13X:
        case S2MPS14X:
-               if (s2mps11->s2mps14_suspend_state & (1 << rdev_get_id(rdev)))
+               if (test_bit(rdev_get_id(rdev), s2mps11->suspend_state))
                        val = S2MPS14_ENABLE_SUSPEND;
                else if (gpio_is_valid(s2mps11->ext_control_gpio[rdev_get_id(rdev)]))
                        val = S2MPS14_ENABLE_EXT_CONTROL;
@@ -508,7 +510,7 @@ static int s2mps14_regulator_enable(struct regulator_dev *rdev)
                        val = rdev->desc->enable_mask;
                break;
        case S2MPU02:
-               if (s2mps11->s2mps14_suspend_state & (1 << rdev_get_id(rdev)))
+               if (test_bit(rdev_get_id(rdev), s2mps11->suspend_state))
                        val = S2MPU02_ENABLE_SUSPEND;
                else
                        val = rdev->desc->enable_mask;
@@ -562,7 +564,7 @@ static int s2mps14_regulator_set_suspend_disable(struct regulator_dev *rdev)
        if (ret < 0)
                return ret;
 
-       s2mps11->s2mps14_suspend_state |= (1 << rdev_get_id(rdev));
+       set_bit(rdev_get_id(rdev), s2mps11->suspend_state);
        /*
         * Don't enable suspend mode if regulator is already disabled because
         * this would effectively for a short time turn on the regulator after
@@ -960,18 +962,22 @@ static int s2mps11_pmic_probe(struct platform_device *pdev)
        case S2MPS11X:
                s2mps11->rdev_num = ARRAY_SIZE(s2mps11_regulators);
                regulators = s2mps11_regulators;
+               BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num);
                break;
        case S2MPS13X:
                s2mps11->rdev_num = ARRAY_SIZE(s2mps13_regulators);
                regulators = s2mps13_regulators;
+               BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num);
                break;
        case S2MPS14X:
                s2mps11->rdev_num = ARRAY_SIZE(s2mps14_regulators);
                regulators = s2mps14_regulators;
+               BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num);
                break;
        case S2MPU02:
                s2mps11->rdev_num = ARRAY_SIZE(s2mpu02_regulators);
                regulators = s2mpu02_regulators;
+               BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num);
                break;
        default:
                dev_err(&pdev->dev, "Invalid device type: %u\n",
index 0479e80..d87a85c 100644 (file)
@@ -322,6 +322,13 @@ static int snvs_rtc_suspend(struct device *dev)
        if (device_may_wakeup(dev))
                enable_irq_wake(data->irq);
 
+       return 0;
+}
+
+static int snvs_rtc_suspend_noirq(struct device *dev)
+{
+       struct snvs_rtc_data *data = dev_get_drvdata(dev);
+
        if (data->clk)
                clk_disable_unprepare(data->clk);
 
@@ -331,23 +338,28 @@ static int snvs_rtc_suspend(struct device *dev)
 static int snvs_rtc_resume(struct device *dev)
 {
        struct snvs_rtc_data *data = dev_get_drvdata(dev);
-       int ret;
 
        if (device_may_wakeup(dev))
-               disable_irq_wake(data->irq);
+               return disable_irq_wake(data->irq);
 
-       if (data->clk) {
-               ret = clk_prepare_enable(data->clk);
-               if (ret)
-                       return ret;
-       }
+       return 0;
+}
+
+static int snvs_rtc_resume_noirq(struct device *dev)
+{
+       struct snvs_rtc_data *data = dev_get_drvdata(dev);
+
+       if (data->clk)
+               return clk_prepare_enable(data->clk);
 
        return 0;
 }
 
 static const struct dev_pm_ops snvs_rtc_pm_ops = {
-       .suspend_noirq = snvs_rtc_suspend,
-       .resume_noirq = snvs_rtc_resume,
+       .suspend = snvs_rtc_suspend,
+       .suspend_noirq = snvs_rtc_suspend_noirq,
+       .resume = snvs_rtc_resume,
+       .resume_noirq = snvs_rtc_resume_noirq,
 };
 
 #define SNVS_RTC_PM_OPS        (&snvs_rtc_pm_ops)
index 26270c3..ce129e5 100644 (file)
@@ -39,7 +39,7 @@
 
 #define DRV_NAME               "fnic"
 #define DRV_DESCRIPTION                "Cisco FCoE HBA Driver"
-#define DRV_VERSION            "1.6.0.17"
+#define DRV_VERSION            "1.6.0.17a"
 #define PFX                    DRV_NAME ": "
 #define DFX                     DRV_NAME "%d: "
 
index 155b286..25436cd 100644 (file)
@@ -425,6 +425,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
        unsigned long ptr;
        struct fc_rport_priv *rdata;
        spinlock_t *io_lock = NULL;
+       int io_lock_acquired = 0;
 
        if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED)))
                return SCSI_MLQUEUE_HOST_BUSY;
@@ -518,6 +519,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
        spin_lock_irqsave(io_lock, flags);
 
        /* initialize rest of io_req */
+       io_lock_acquired = 1;
        io_req->port_id = rport->port_id;
        io_req->start_time = jiffies;
        CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
@@ -571,7 +573,7 @@ out:
                  (((u64)CMD_FLAGS(sc) >> 32) | CMD_STATE(sc)));
 
        /* if only we issued IO, will we have the io lock */
-       if (CMD_FLAGS(sc) & FNIC_IO_INITIALIZED)
+       if (io_lock_acquired)
                spin_unlock_irqrestore(io_lock, flags);
 
        atomic_dec(&fnic->in_flight);
index 8827448..a9aa389 100644 (file)
@@ -599,9 +599,10 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
 {
        struct ipr_trace_entry *trace_entry;
        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+       unsigned int trace_index;
 
-       trace_entry = &ioa_cfg->trace[atomic_add_return
-                       (1, &ioa_cfg->trace_index)%IPR_NUM_TRACE_ENTRIES];
+       trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
+       trace_entry = &ioa_cfg->trace[trace_index];
        trace_entry->time = jiffies;
        trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
        trace_entry->type = type;
@@ -1051,10 +1052,15 @@ static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
 
 static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
 {
+       unsigned int hrrq;
+
        if (ioa_cfg->hrrq_num == 1)
-               return 0;
-       else
-               return (atomic_add_return(1, &ioa_cfg->hrrq_index) % (ioa_cfg->hrrq_num - 1)) + 1;
+               hrrq = 0;
+       else {
+               hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
+               hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
+       }
+       return hrrq;
 }
 
 /**
@@ -6263,21 +6269,23 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
        struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
        u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
-       unsigned long hrrq_flags;
+       unsigned long lock_flags;
 
        scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
 
        if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
                scsi_dma_unmap(scsi_cmd);
 
-               spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
+               spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
                list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
                scsi_cmd->scsi_done(scsi_cmd);
-               spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
+               spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
        } else {
-               spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
+               spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+               spin_lock(&ipr_cmd->hrrq->_lock);
                ipr_erp_start(ioa_cfg, ipr_cmd);
-               spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
+               spin_unlock(&ipr_cmd->hrrq->_lock);
+               spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
        }
 }
 
index 73790a1..6b97ee4 100644 (file)
@@ -1486,6 +1486,7 @@ struct ipr_ioa_cfg {
 
 #define IPR_NUM_TRACE_INDEX_BITS       8
 #define IPR_NUM_TRACE_ENTRIES          (1 << IPR_NUM_TRACE_INDEX_BITS)
+#define IPR_TRACE_INDEX_MASK           (IPR_NUM_TRACE_ENTRIES - 1)
 #define IPR_TRACE_SIZE (sizeof(struct ipr_trace_entry) * IPR_NUM_TRACE_ENTRIES)
        char trace_start[8];
 #define IPR_TRACE_START_LABEL                  "trace"
index 50af66a..6c686bc 100644 (file)
@@ -733,8 +733,6 @@ static bool fc_invoke_resp(struct fc_exch *ep, struct fc_seq *sp,
        if (resp) {
                resp(sp, fp, arg);
                res = true;
-       } else if (!IS_ERR(fp)) {
-               fc_frame_free(fp);
        }
 
        spin_lock_bh(&ep->ex_lock);
@@ -1596,7 +1594,8 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
         * If new exch resp handler is valid then call that
         * first.
         */
-       fc_invoke_resp(ep, sp, fp);
+       if (!fc_invoke_resp(ep, sp, fp))
+               fc_frame_free(fp);
 
        fc_exch_release(ep);
        return;
@@ -1695,7 +1694,8 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
        fc_exch_hold(ep);
        if (!rc)
                fc_exch_delete(ep);
-       fc_invoke_resp(ep, sp, fp);
+       if (!fc_invoke_resp(ep, sp, fp))
+               fc_frame_free(fp);
        if (has_rec)
                fc_exch_timer_set(ep, ep->r_a_tov);
        fc_exch_release(ep);
index c679594..2d5909c 100644 (file)
@@ -1039,11 +1039,26 @@ restart:
                fc_fcp_pkt_hold(fsp);
                spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
 
-               if (!fc_fcp_lock_pkt(fsp)) {
+               spin_lock_bh(&fsp->scsi_pkt_lock);
+               if (!(fsp->state & FC_SRB_COMPL)) {
+                       fsp->state |= FC_SRB_COMPL;
+                       /*
+                        * TODO: dropping scsi_pkt_lock and then reacquiring
+                        * again around fc_fcp_cleanup_cmd() is required,
+                        * since fc_fcp_cleanup_cmd() calls into
+                        * fc_seq_set_resp() and that func preempts cpu using
+                        * schedule. May be schedule and related code should be
+                        * removed instead of unlocking here to avoid scheduling
+                        * while atomic bug.
+                        */
+                       spin_unlock_bh(&fsp->scsi_pkt_lock);
+
                        fc_fcp_cleanup_cmd(fsp, error);
+
+                       spin_lock_bh(&fsp->scsi_pkt_lock);
                        fc_io_compl(fsp);
-                       fc_fcp_unlock_pkt(fsp);
                }
+               spin_unlock_bh(&fsp->scsi_pkt_lock);
 
                fc_fcp_pkt_release(fsp);
                spin_lock_irqsave(&si->scsi_queue_lock, flags);
index 8053f24..98d9bb6 100644 (file)
@@ -2941,10 +2941,10 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
 {
        struct iscsi_conn *conn = cls_conn->dd_data;
        struct iscsi_session *session = conn->session;
-       unsigned long flags;
 
        del_timer_sync(&conn->transport_timer);
 
+       mutex_lock(&session->eh_mutex);
        spin_lock_bh(&session->frwd_lock);
        conn->c_stage = ISCSI_CONN_CLEANUP_WAIT;
        if (session->leadconn == conn) {
@@ -2956,28 +2956,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
        }
        spin_unlock_bh(&session->frwd_lock);
 
-       /*
-        * Block until all in-progress commands for this connection
-        * time out or fail.
-        */
-       for (;;) {
-               spin_lock_irqsave(session->host->host_lock, flags);
-               if (!atomic_read(&session->host->host_busy)) { /* OK for ERL == 0 */
-                       spin_unlock_irqrestore(session->host->host_lock, flags);
-                       break;
-               }
-               spin_unlock_irqrestore(session->host->host_lock, flags);
-               msleep_interruptible(500);
-               iscsi_conn_printk(KERN_INFO, conn, "iscsi conn_destroy(): "
-                                 "host_busy %d host_failed %d\n",
-                                 atomic_read(&session->host->host_busy),
-                                 session->host->host_failed);
-               /*
-                * force eh_abort() to unblock
-                */
-               wake_up(&conn->ehwait);
-       }
-
        /* flush queued up work because we free the connection below */
        iscsi_suspend_tx(conn);
 
@@ -2994,6 +2972,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
        if (session->leadconn == conn)
                session->leadconn = NULL;
        spin_unlock_bh(&session->frwd_lock);
+       mutex_unlock(&session->eh_mutex);
 
        iscsi_destroy_conn(cls_conn);
 }
index 0e6ee3c..e9ae6b9 100644 (file)
@@ -68,7 +68,7 @@
  * |                              |                    | 0xd101-0xd1fe |
  * |                              |                    | 0xd214-0xd2fe |
  * | Target Mode                 |       0xe079       |                |
- * | Target Mode Management      |       0xf072       | 0xf002         |
+ * | Target Mode Management      |       0xf080       | 0xf002         |
  * |                              |                    | 0xf046-0xf049  |
  * | Target Mode Task Management  |      0x1000b      |                |
  * ----------------------------------------------------------------------
index 285cb20..998498e 100644 (file)
@@ -2924,6 +2924,7 @@ qla2x00_rport_del(void *data)
        struct fc_rport *rport;
        scsi_qla_host_t *vha = fcport->vha;
        unsigned long flags;
+       unsigned long vha_flags;
 
        spin_lock_irqsave(fcport->vha->host->host_lock, flags);
        rport = fcport->drport ? fcport->drport: fcport->rport;
@@ -2935,7 +2936,9 @@ qla2x00_rport_del(void *data)
                 * Release the target mode FC NEXUS in qla_target.c code
                 * if target mod is enabled.
                 */
+               spin_lock_irqsave(&vha->hw->hardware_lock, vha_flags);
                qlt_fc_port_deleted(vha, fcport);
+               spin_unlock_irqrestore(&vha->hw->hardware_lock, vha_flags);
        }
 }
 
@@ -3303,6 +3306,7 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
         * Create target mode FC NEXUS in qla_target.c if target mode is
         * enabled..
         */
+
        qlt_fc_port_added(vha, fcport);
 
        spin_lock_irqsave(fcport->vha->host->host_lock, flags);
@@ -3460,20 +3464,43 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
                        if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
                                continue;
 
-                       if (fcport->scan_state == QLA_FCPORT_SCAN &&
-                           atomic_read(&fcport->state) == FCS_ONLINE) {
-                               qla2x00_mark_device_lost(vha, fcport,
-                                   ql2xplogiabsentdevice, 0);
-                               if (fcport->loop_id != FC_NO_LOOP_ID &&
-                                   (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
-                                   fcport->port_type != FCT_INITIATOR &&
-                                   fcport->port_type != FCT_BROADCAST) {
-                                       ha->isp_ops->fabric_logout(vha,
-                                           fcport->loop_id,
-                                           fcport->d_id.b.domain,
-                                           fcport->d_id.b.area,
-                                           fcport->d_id.b.al_pa);
-                                       qla2x00_clear_loop_id(fcport);
+                       if (fcport->scan_state == QLA_FCPORT_SCAN) {
+                               if (qla_ini_mode_enabled(base_vha) &&
+                                   atomic_read(&fcport->state) == FCS_ONLINE) {
+                                       qla2x00_mark_device_lost(vha, fcport,
+                                           ql2xplogiabsentdevice, 0);
+                                       if (fcport->loop_id != FC_NO_LOOP_ID &&
+                                           (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
+                                           fcport->port_type != FCT_INITIATOR &&
+                                           fcport->port_type != FCT_BROADCAST) {
+                                               ha->isp_ops->fabric_logout(vha,
+                                                   fcport->loop_id,
+                                                   fcport->d_id.b.domain,
+                                                   fcport->d_id.b.area,
+                                                   fcport->d_id.b.al_pa);
+                                               qla2x00_clear_loop_id(fcport);
+                                       }
+                               } else if (!qla_ini_mode_enabled(base_vha)) {
+                                       /*
+                                        * In target mode, explicitly kill
+                                        * sessions and log out of devices
+                                        * that are gone, so that we don't
+                                        * end up with an initiator using the
+                                        * wrong ACL (if the fabric recycles
+                                        * an FC address and we have a stale
+                                        * session around) and so that we don't
+                                        * report initiators that are no longer
+                                        * on the fabric.
+                                        */
+                                       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf077,
+                                           "port gone, logging out/killing session: "
+                                           "%8phC state 0x%x flags 0x%x fc4_type 0x%x "
+                                           "scan_state %d\n",
+                                           fcport->port_name,
+                                           atomic_read(&fcport->state),
+                                           fcport->flags, fcport->fc4_type,
+                                           fcport->scan_state);
+                                       qlt_fc_port_deleted(vha, fcport);
                                }
                        }
                }
@@ -3494,6 +3521,28 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
                            (fcport->flags & FCF_LOGIN_NEEDED) == 0)
                                continue;
 
+                       /*
+                        * If we're not an initiator, skip looking for devices
+                        * and logging in.  There's no reason for us to do it,
+                        * and it seems to actively cause problems in target
+                        * mode if we race with the initiator logging into us
+                        * (we might get the "port ID used" status back from
+                        * our login command and log out the initiator, which
+                        * seems to cause havoc).
+                        */
+                       if (!qla_ini_mode_enabled(base_vha)) {
+                               if (fcport->scan_state == QLA_FCPORT_FOUND) {
+                                       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf078,
+                                           "port %8phC state 0x%x flags 0x%x fc4_type 0x%x "
+                                           "scan_state %d (initiator mode disabled; skipping "
+                                           "login)\n", fcport->port_name,
+                                           atomic_read(&fcport->state),
+                                           fcport->flags, fcport->fc4_type,
+                                           fcport->scan_state);
+                               }
+                               continue;
+                       }
+
                        if (fcport->loop_id == FC_NO_LOOP_ID) {
                                fcport->loop_id = next_loopid;
                                rval = qla2x00_find_new_loop_id(
@@ -3520,16 +3569,38 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
                            test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
                                break;
 
-                       /* Find a new loop ID to use. */
-                       fcport->loop_id = next_loopid;
-                       rval = qla2x00_find_new_loop_id(base_vha, fcport);
-                       if (rval != QLA_SUCCESS) {
-                               /* Ran out of IDs to use */
-                               break;
-                       }
+                       /*
+                        * If we're not an initiator, skip looking for devices
+                        * and logging in.  There's no reason for us to do it,
+                        * and it seems to actively cause problems in target
+                        * mode if we race with the initiator logging into us
+                        * (we might get the "port ID used" status back from
+                        * our login command and log out the initiator, which
+                        * seems to cause havoc).
+                        */
+                       if (qla_ini_mode_enabled(base_vha)) {
+                               /* Find a new loop ID to use. */
+                               fcport->loop_id = next_loopid;
+                               rval = qla2x00_find_new_loop_id(base_vha,
+                                   fcport);
+                               if (rval != QLA_SUCCESS) {
+                                       /* Ran out of IDs to use */
+                                       break;
+                               }
 
-                       /* Login and update database */
-                       qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
+                               /* Login and update database */
+                               qla2x00_fabric_dev_login(vha, fcport,
+                                   &next_loopid);
+                       } else {
+                               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf079,
+                                       "new port %8phC state 0x%x flags 0x%x fc4_type "
+                                       "0x%x scan_state %d (initiator mode disabled; "
+                                       "skipping login)\n",
+                                       fcport->port_name,
+                                       atomic_read(&fcport->state),
+                                       fcport->flags, fcport->fc4_type,
+                                       fcport->scan_state);
+                       }
 
                        list_move_tail(&fcport->list, &vha->vp_fcports);
                }
@@ -3725,11 +3796,12 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
                        fcport->fp_speed = new_fcport->fp_speed;
 
                        /*
-                        * If address the same and state FCS_ONLINE, nothing
-                        * changed.
+                        * If address the same and state FCS_ONLINE
+                        * (or in target mode), nothing changed.
                         */
                        if (fcport->d_id.b24 == new_fcport->d_id.b24 &&
-                           atomic_read(&fcport->state) == FCS_ONLINE) {
+                           (atomic_read(&fcport->state) == FCS_ONLINE ||
+                            !qla_ini_mode_enabled(base_vha))) {
                                break;
                        }
 
@@ -3749,6 +3821,22 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
                         * Log it out if still logged in and mark it for
                         * relogin later.
                         */
+                       if (!qla_ini_mode_enabled(base_vha)) {
+                               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf080,
+                                        "port changed FC ID, %8phC"
+                                        " old %x:%x:%x (loop_id 0x%04x)-> new %x:%x:%x\n",
+                                        fcport->port_name,
+                                        fcport->d_id.b.domain,
+                                        fcport->d_id.b.area,
+                                        fcport->d_id.b.al_pa,
+                                        fcport->loop_id,
+                                        new_fcport->d_id.b.domain,
+                                        new_fcport->d_id.b.area,
+                                        new_fcport->d_id.b.al_pa);
+                               fcport->d_id.b24 = new_fcport->d_id.b24;
+                               break;
+                       }
+
                        fcport->d_id.b24 = new_fcport->d_id.b24;
                        fcport->flags |= FCF_LOGIN_NEEDED;
                        if (fcport->loop_id != FC_NO_LOOP_ID &&
@@ -3768,6 +3856,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
                if (found)
                        continue;
                /* If device was not in our fcports list, then add it. */
+               new_fcport->scan_state = QLA_FCPORT_FOUND;
                list_add_tail(&new_fcport->list, new_fcports);
 
                /* Allocate a new replacement fcport. */
index fe8a8d1..496a733 100644 (file)
@@ -113,6 +113,7 @@ static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha,
 static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
        struct atio_from_isp *atio, uint16_t status, int qfull);
 static void qlt_disable_vha(struct scsi_qla_host *vha);
+static void qlt_clear_tgt_db(struct qla_tgt *tgt);
 /*
  * Global Variables
  */
@@ -431,10 +432,10 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
 
        loop_id = le16_to_cpu(n->u.isp24.nport_handle);
        if (loop_id == 0xFFFF) {
-#if 0 /* FIXME: Re-enable Global event handling.. */
                /* Global event */
-               atomic_inc(&ha->tgt.qla_tgt->tgt_global_resets_count);
-               qlt_clear_tgt_db(ha->tgt.qla_tgt);
+               atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
+               qlt_clear_tgt_db(vha->vha_tgt.qla_tgt);
+#if 0 /* FIXME: do we need to choose a session here? */
                if (!list_empty(&ha->tgt.qla_tgt->sess_list)) {
                        sess = list_entry(ha->tgt.qla_tgt->sess_list.next,
                            typeof(*sess), sess_list_entry);
@@ -782,25 +783,20 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
 
 void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport)
 {
-       struct qla_hw_data *ha = vha->hw;
        struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
        struct qla_tgt_sess *sess;
-       unsigned long flags;
 
        if (!vha->hw->tgt.tgt_ops)
                return;
 
-       if (!tgt || (fcport->port_type != FCT_INITIATOR))
+       if (!tgt)
                return;
 
-       spin_lock_irqsave(&ha->hardware_lock, flags);
        if (tgt->tgt_stop) {
-               spin_unlock_irqrestore(&ha->hardware_lock, flags);
                return;
        }
        sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
        if (!sess) {
-               spin_unlock_irqrestore(&ha->hardware_lock, flags);
                return;
        }
 
@@ -808,7 +804,6 @@ void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport)
 
        sess->local = 1;
        qlt_schedule_sess_for_deletion(sess, false);
-       spin_unlock_irqrestore(&ha->hardware_lock, flags);
 }
 
 static inline int test_tgt_sess_count(struct qla_tgt *tgt)
@@ -2347,9 +2342,10 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
                res = qlt_build_ctio_crc2_pkt(&prm, vha);
        else
                res = qlt_24xx_build_ctio_pkt(&prm, vha);
-       if (unlikely(res != 0))
+       if (unlikely(res != 0)) {
+               vha->req->cnt += full_req_cnt;
                goto out_unmap_unlock;
-
+       }
 
        pkt = (struct ctio7_to_24xx *)prm.pkt;
 
@@ -2487,8 +2483,11 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
        else
                res = qlt_24xx_build_ctio_pkt(&prm, vha);
 
-       if (unlikely(res != 0))
+       if (unlikely(res != 0)) {
+               vha->req->cnt += prm.req_cnt;
                goto out_unlock_free_unmap;
+       }
+
        pkt = (struct ctio7_to_24xx *)prm.pkt;
        pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
            CTIO7_FLAGS_STATUS_MODE_0);
@@ -2717,7 +2716,7 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
 static void qlt_send_term_exchange(struct scsi_qla_host *vha,
        struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked)
 {
-       unsigned long flags;
+       unsigned long flags = 0;
        int rc;
 
        if (qlt_issue_marker(vha, ha_locked) < 0)
@@ -2733,17 +2732,18 @@ static void qlt_send_term_exchange(struct scsi_qla_host *vha,
        rc = __qlt_send_term_exchange(vha, cmd, atio);
        if (rc == -ENOMEM)
                qlt_alloc_qfull_cmd(vha, atio, 0, 0);
-       spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
 
 done:
        if (cmd && ((cmd->state != QLA_TGT_STATE_ABORTED) ||
            !cmd->cmd_sent_to_fw)) {
-               if (!ha_locked && !in_interrupt())
-                       msleep(250); /* just in case */
-
-               qlt_unmap_sg(vha, cmd);
+               if (cmd->sg_mapped)
+                       qlt_unmap_sg(vha, cmd);
                vha->hw->tgt.tgt_ops->free_cmd(cmd);
        }
+
+       if (!ha_locked)
+               spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+
        return;
 }
 
@@ -3347,6 +3347,11 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
        cmd->loop_id = sess->loop_id;
        cmd->conf_compl_supported = sess->conf_compl_supported;
 
+       cmd->cmd_flags = 0;
+       cmd->jiffies_at_alloc = get_jiffies_64();
+
+       cmd->reset_count = vha->hw->chip_reset;
+
        return cmd;
 }
 
@@ -3453,11 +3458,6 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
                return -ENOMEM;
        }
 
-       cmd->cmd_flags = 0;
-       cmd->jiffies_at_alloc = get_jiffies_64();
-
-       cmd->reset_count = vha->hw->chip_reset;
-
        cmd->cmd_in_wq = 1;
        cmd->cmd_flags |= BIT_0;
        INIT_WORK(&cmd->work, qlt_do_work);
index c95a4e9..ce6c770 100644 (file)
@@ -26,7 +26,6 @@
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <linux/jiffies.h>
-#include <asm/unaligned.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_cmnd.h>
@@ -944,7 +943,7 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
                            scmd->sdb.length);
                scmd->sdb.table.sgl = &ses->sense_sgl;
                scmd->sc_data_direction = DMA_FROM_DEVICE;
-               scmd->sdb.table.nents = 1;
+               scmd->sdb.table.nents = scmd->sdb.table.orig_nents = 1;
                scmd->cmnd[0] = REQUEST_SENSE;
                scmd->cmnd[4] = scmd->sdb.length;
                scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
@@ -2587,33 +2586,3 @@ void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq)
        }
 }
 EXPORT_SYMBOL(scsi_build_sense_buffer);
-
-/**
- * scsi_set_sense_information - set the information field in a
- *             formatted sense data buffer
- * @buf:       Where to build sense data
- * @info:      64-bit information value to be set
- *
- **/
-void scsi_set_sense_information(u8 *buf, u64 info)
-{
-       if ((buf[0] & 0x7f) == 0x72) {
-               u8 *ucp, len;
-
-               len = buf[7];
-               ucp = (char *)scsi_sense_desc_find(buf, len + 8, 0);
-               if (!ucp) {
-                       buf[7] = len + 0xa;
-                       ucp = buf + 8 + len;
-               }
-               ucp[0] = 0;
-               ucp[1] = 0xa;
-               ucp[2] = 0x80; /* Valid bit */
-               ucp[3] = 0;
-               put_unaligned_be64(info, &ucp[4]);
-       } else if ((buf[0] & 0x7f) == 0x70) {
-               buf[0] |= 0x80;
-               put_unaligned_be64(info, &buf[3]);
-       }
-}
-EXPORT_SYMBOL(scsi_set_sense_information);
index b1a2631..448ebda 100644 (file)
@@ -583,7 +583,7 @@ static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask)
 
 static void scsi_free_sgtable(struct scsi_data_buffer *sdb, bool mq)
 {
-       if (mq && sdb->table.nents <= SCSI_MAX_SG_SEGMENTS)
+       if (mq && sdb->table.orig_nents <= SCSI_MAX_SG_SEGMENTS)
                return;
        __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, mq, scsi_sg_free);
 }
@@ -597,8 +597,8 @@ static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, bool mq)
 
        if (mq) {
                if (nents <= SCSI_MAX_SG_SEGMENTS) {
-                       sdb->table.nents = nents;
-                       sg_init_table(sdb->table.sgl, sdb->table.nents);
+                       sdb->table.nents = sdb->table.orig_nents = nents;
+                       sg_init_table(sdb->table.sgl, nents);
                        return 0;
                }
                first_chunk = sdb->table.sgl;
index 9e43ae1..e4b7998 100644 (file)
@@ -217,15 +217,15 @@ static int sdev_runtime_suspend(struct device *dev)
 {
        const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
        struct scsi_device *sdev = to_scsi_device(dev);
-       int err;
+       int err = 0;
 
-       err = blk_pre_runtime_suspend(sdev->request_queue);
-       if (err)
-               return err;
-       if (pm && pm->runtime_suspend)
+       if (pm && pm->runtime_suspend) {
+               err = blk_pre_runtime_suspend(sdev->request_queue);
+               if (err)
+                       return err;
                err = pm->runtime_suspend(dev);
-       blk_post_runtime_suspend(sdev->request_queue, err);
-
+               blk_post_runtime_suspend(sdev->request_queue, err);
+       }
        return err;
 }
 
@@ -248,11 +248,11 @@ static int sdev_runtime_resume(struct device *dev)
        const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
        int err = 0;
 
-       blk_pre_runtime_resume(sdev->request_queue);
-       if (pm && pm->runtime_resume)
+       if (pm && pm->runtime_resume) {
+               blk_pre_runtime_resume(sdev->request_queue);
                err = pm->runtime_resume(dev);
-       blk_post_runtime_resume(sdev->request_queue, err);
-
+               blk_post_runtime_resume(sdev->request_queue, err);
+       }
        return err;
 }
 
index 1ac38e7..9ad4116 100644 (file)
@@ -859,7 +859,7 @@ sdev_store_queue_depth(struct device *dev, struct device_attribute *attr,
 
        depth = simple_strtoul(buf, NULL, 0);
 
-       if (depth < 1 || depth > sht->can_queue)
+       if (depth < 1 || depth > sdev->host->can_queue)
                return -EINVAL;
 
        retval = sht->change_queue_depth(sdev, depth);
index 7f9d65f..11ea52b 100644 (file)
@@ -2770,9 +2770,9 @@ static int sd_revalidate_disk(struct gendisk *disk)
        max_xfer = sdkp->max_xfer_blocks;
        max_xfer <<= ilog2(sdp->sector_size) - 9;
 
-       max_xfer = min_not_zero(queue_max_hw_sectors(sdkp->disk->queue),
-                               max_xfer);
-       blk_queue_max_hw_sectors(sdkp->disk->queue, max_xfer);
+       sdkp->disk->queue->limits.max_sectors =
+               min_not_zero(queue_max_hw_sectors(sdkp->disk->queue), max_xfer);
+
        set_capacity(disk, sdkp->capacity);
        sd_config_write_same(sdkp);
        kfree(buffer);
index 9a1c342..525ab4c 100644 (file)
@@ -1274,9 +1274,9 @@ static int st_open(struct inode *inode, struct file *filp)
        spin_lock(&st_use_lock);
        STp->in_use = 0;
        spin_unlock(&st_use_lock);
-       scsi_tape_put(STp);
        if (resumed)
                scsi_autopm_put_device(STp->device);
+       scsi_tape_put(STp);
        return retval;
 
 }
index 788e2b1..acce90a 100644 (file)
@@ -40,6 +40,7 @@
 #define SPFI_CONTROL_SOFT_RESET                        BIT(11)
 #define SPFI_CONTROL_SEND_DMA                  BIT(10)
 #define SPFI_CONTROL_GET_DMA                   BIT(9)
+#define SPFI_CONTROL_SE                        BIT(8)
 #define SPFI_CONTROL_TMODE_SHIFT               5
 #define SPFI_CONTROL_TMODE_MASK                        0x7
 #define SPFI_CONTROL_TMODE_SINGLE              0
@@ -491,6 +492,7 @@ static void img_spfi_config(struct spi_master *master, struct spi_device *spi,
        else if (xfer->tx_nbits == SPI_NBITS_QUAD &&
                 xfer->rx_nbits == SPI_NBITS_QUAD)
                val |= SPFI_CONTROL_TMODE_QUAD << SPFI_CONTROL_TMODE_SHIFT;
+       val |= SPFI_CONTROL_SE;
        spfi_writel(spfi, val, SPFI_CONTROL);
 }
 
index f08e812..412b9c8 100644 (file)
@@ -201,8 +201,9 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
 {
        struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
 
-       if (spi_imx->dma_is_inited && (transfer->len > spi_imx->rx_wml)
-           && (transfer->len > spi_imx->tx_wml))
+       if (spi_imx->dma_is_inited
+           && transfer->len > spi_imx->rx_wml * sizeof(u32)
+           && transfer->len > spi_imx->tx_wml * sizeof(u32))
                return true;
        return false;
 }
index c458e50..4ebf5aa 100644 (file)
@@ -243,7 +243,7 @@ static int cb_pcimdas_ao_insn_write(struct comedi_device *dev,
        return insn->n;
 }
 
-static int cb_pcimdas_di_insn_read(struct comedi_device *dev,
+static int cb_pcimdas_di_insn_bits(struct comedi_device *dev,
                                   struct comedi_subdevice *s,
                                   struct comedi_insn *insn,
                                   unsigned int *data)
@@ -258,7 +258,7 @@ static int cb_pcimdas_di_insn_read(struct comedi_device *dev,
        return insn->n;
 }
 
-static int cb_pcimdas_do_insn_write(struct comedi_device *dev,
+static int cb_pcimdas_do_insn_bits(struct comedi_device *dev,
                                    struct comedi_subdevice *s,
                                    struct comedi_insn *insn,
                                    unsigned int *data)
@@ -424,7 +424,7 @@ static int cb_pcimdas_auto_attach(struct comedi_device *dev,
        s->n_chan       = 4;
        s->maxdata      = 1;
        s->range_table  = &range_digital;
-       s->insn_read    = cb_pcimdas_di_insn_read;
+       s->insn_bits    = cb_pcimdas_di_insn_bits;
 
        /* Digital Output subdevice (main connector) */
        s = &dev->subdevices[4];
@@ -433,7 +433,7 @@ static int cb_pcimdas_auto_attach(struct comedi_device *dev,
        s->n_chan       = 4;
        s->maxdata      = 1;
        s->range_table  = &range_digital;
-       s->insn_write   = cb_pcimdas_do_insn_write;
+       s->insn_bits    = cb_pcimdas_do_insn_bits;
 
        /* Counter subdevice (8254) */
        s = &dev->subdevices[5];
index 9c934e6..c61add4 100644 (file)
@@ -40,7 +40,7 @@
 
 #define DEBUG_SUBSYSTEM D_OTHER
 
-#include <linux/unaligned/access_ok.h>
+#include <asm/unaligned.h>
 
 #include "../include/obd_support.h"
 #include "../include/lustre_debug.h"
index 50227b5..fcb8c61 100644 (file)
@@ -1056,7 +1056,8 @@ static int recvbuf2recvframe(struct _adapter *padapter, struct sk_buff *pskb)
                /* for first fragment packet, driver need allocate 1536 +
                 * drvinfo_sz + RXDESC_SIZE to defrag packet. */
                if ((mf == 1) && (frag == 0))
-                       alloc_sz = 1658;/*1658+6=1664, 1664 is 128 alignment.*/
+                       /*1658+6=1664, 1664 is 128 alignment.*/
+                       alloc_sz = max_t(u16, tmp_len, 1658);
                else
                        alloc_sz = tmp_len;
                /* 2 is for IP header 4 bytes alignment in QoS packet case.
index 0343ae3..376e4a0 100644 (file)
@@ -807,6 +807,10 @@ static int device_rx_srv(struct vnt_private *pDevice, unsigned int uIdx)
             pRD = pRD->next) {
                if (works++ > 15)
                        break;
+
+               if (!pRD->pRDInfo->skb)
+                       break;
+
                if (vnt_receive_frame(pDevice, pRD)) {
                        if (!device_alloc_rx_buf(pDevice, pRD)) {
                                dev_err(&pDevice->pcid->dev,
@@ -1417,7 +1421,7 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
 
        priv->current_aid = conf->aid;
 
-       if (changed & BSS_CHANGED_BSSID) {
+       if (changed & BSS_CHANGED_BSSID && conf->bssid) {
                unsigned long flags;
 
                spin_lock_irqsave(&priv->lock, flags);
@@ -1482,8 +1486,9 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
                }
        }
 
-       if (changed & BSS_CHANGED_ASSOC && priv->op_mode != NL80211_IFTYPE_AP) {
-               if (conf->assoc) {
+       if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_BEACON_INFO) &&
+           priv->op_mode != NL80211_IFTYPE_AP) {
+               if (conf->assoc && conf->beacon_rate) {
                        CARDbUpdateTSF(priv, conf->beacon_rate->hw_value,
                                       conf->sync_tsf);
 
index ab3ab84..766fdce 100644 (file)
@@ -701,7 +701,7 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
 
        priv->current_aid = conf->aid;
 
-       if (changed & BSS_CHANGED_BSSID)
+       if (changed & BSS_CHANGED_BSSID && conf->bssid)
                vnt_mac_set_bssid_addr(priv, (u8 *)conf->bssid);
 
 
index 74e6114..0ab6e2e 100644 (file)
@@ -968,9 +968,9 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
                cmd->cmd_flags |= ICF_NON_IMMEDIATE_UNSOLICITED_DATA;
 
        conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
-       if (hdr->flags & ISCSI_FLAG_CMD_READ) {
+       if (hdr->flags & ISCSI_FLAG_CMD_READ)
                cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
-       } else if (hdr->flags & ISCSI_FLAG_CMD_WRITE)
+       else
                cmd->targ_xfer_tag = 0xFFFFFFFF;
        cmd->cmd_sn             = be32_to_cpu(hdr->cmdsn);
        cmd->exp_stat_sn        = be32_to_cpu(hdr->exp_statsn);
@@ -4001,7 +4001,13 @@ get_immediate:
        }
 
 transport_err:
-       iscsit_take_action_for_connection_exit(conn);
+       /*
+        * Avoid the normal connection failure code-path if this connection
+        * is still within LOGIN mode, and iscsi_np process context is
+        * responsible for cleaning up the early connection failure.
+        */
+       if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN)
+               iscsit_take_action_for_connection_exit(conn);
 out:
        return 0;
 }
@@ -4093,7 +4099,7 @@ reject:
 
 int iscsi_target_rx_thread(void *arg)
 {
-       int ret;
+       int ret, rc;
        u8 buffer[ISCSI_HDR_LEN], opcode;
        u32 checksum = 0, digest = 0;
        struct iscsi_conn *conn = arg;
@@ -4103,10 +4109,16 @@ int iscsi_target_rx_thread(void *arg)
         * connection recovery / failure event can be triggered externally.
         */
        allow_signal(SIGINT);
+       /*
+        * Wait for iscsi_post_login_handler() to complete before allowing
+        * incoming iscsi/tcp socket I/O, and/or failing the connection.
+        */
+       rc = wait_for_completion_interruptible(&conn->rx_login_comp);
+       if (rc < 0)
+               return 0;
 
        if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) {
                struct completion comp;
-               int rc;
 
                init_completion(&comp);
                rc = wait_for_completion_interruptible(&comp);
@@ -4543,7 +4555,18 @@ static void iscsit_logout_post_handler_closesession(
        struct iscsi_conn *conn)
 {
        struct iscsi_session *sess = conn->sess;
-       int sleep = cmpxchg(&conn->tx_thread_active, true, false);
+       int sleep = 1;
+       /*
+        * Traditional iscsi/tcp will invoke this logic from TX thread
+        * context during session logout, so clear tx_thread_active and
+        * sleep if iscsit_close_connection() has not already occured.
+        *
+        * Since iser-target invokes this logic from it's own workqueue,
+        * always sleep waiting for RX/TX thread shutdown to complete
+        * within iscsit_close_connection().
+        */
+       if (conn->conn_transport->transport_type == ISCSI_TCP)
+               sleep = cmpxchg(&conn->tx_thread_active, true, false);
 
        atomic_set(&conn->conn_logout_remove, 0);
        complete(&conn->conn_logout_comp);
@@ -4557,7 +4580,10 @@ static void iscsit_logout_post_handler_closesession(
 static void iscsit_logout_post_handler_samecid(
        struct iscsi_conn *conn)
 {
-       int sleep = cmpxchg(&conn->tx_thread_active, true, false);
+       int sleep = 1;
+
+       if (conn->conn_transport->transport_type == ISCSI_TCP)
+               sleep = cmpxchg(&conn->tx_thread_active, true, false);
 
        atomic_set(&conn->conn_logout_remove, 0);
        complete(&conn->conn_logout_comp);
@@ -4776,6 +4802,7 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
        struct iscsi_session *sess;
        struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
        struct se_session *se_sess, *se_sess_tmp;
+       LIST_HEAD(free_list);
        int session_count = 0;
 
        spin_lock_bh(&se_tpg->session_lock);
@@ -4797,14 +4824,17 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
                }
                atomic_set(&sess->session_reinstatement, 1);
                spin_unlock(&sess->conn_lock);
-               spin_unlock_bh(&se_tpg->session_lock);
 
-               iscsit_free_session(sess);
-               spin_lock_bh(&se_tpg->session_lock);
+               list_move_tail(&se_sess->sess_list, &free_list);
+       }
+       spin_unlock_bh(&se_tpg->session_lock);
+
+       list_for_each_entry_safe(se_sess, se_sess_tmp, &free_list, sess_list) {
+               sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
 
+               iscsit_free_session(sess);
                session_count++;
        }
-       spin_unlock_bh(&se_tpg->session_lock);
 
        pr_debug("Released %d iSCSI Session(s) from Target Portal"
                        " Group: %hu\n", session_count, tpg->tpgt);
index 70d799d..c3bccad 100644 (file)
@@ -82,6 +82,7 @@ static struct iscsi_login *iscsi_login_init_conn(struct iscsi_conn *conn)
        init_completion(&conn->conn_logout_comp);
        init_completion(&conn->rx_half_close_comp);
        init_completion(&conn->tx_half_close_comp);
+       init_completion(&conn->rx_login_comp);
        spin_lock_init(&conn->cmd_lock);
        spin_lock_init(&conn->conn_usage_lock);
        spin_lock_init(&conn->immed_queue_lock);
@@ -699,7 +700,7 @@ static void iscsi_post_login_start_timers(struct iscsi_conn *conn)
                iscsit_start_nopin_timer(conn);
 }
 
-static int iscsit_start_kthreads(struct iscsi_conn *conn)
+int iscsit_start_kthreads(struct iscsi_conn *conn)
 {
        int ret = 0;
 
@@ -734,6 +735,7 @@ static int iscsit_start_kthreads(struct iscsi_conn *conn)
 
        return 0;
 out_tx:
+       send_sig(SIGINT, conn->tx_thread, 1);
        kthread_stop(conn->tx_thread);
        conn->tx_thread_active = false;
 out_bitmap:
@@ -744,7 +746,7 @@ out_bitmap:
        return ret;
 }
 
-int iscsi_post_login_handler(
+void iscsi_post_login_handler(
        struct iscsi_np *np,
        struct iscsi_conn *conn,
        u8 zero_tsih)
@@ -754,7 +756,6 @@ int iscsi_post_login_handler(
        struct se_session *se_sess = sess->se_sess;
        struct iscsi_portal_group *tpg = sess->tpg;
        struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
-       int rc;
 
        iscsit_inc_conn_usage_count(conn);
 
@@ -795,10 +796,6 @@ int iscsi_post_login_handler(
                        sess->sess_ops->InitiatorName);
                spin_unlock_bh(&sess->conn_lock);
 
-               rc = iscsit_start_kthreads(conn);
-               if (rc)
-                       return rc;
-
                iscsi_post_login_start_timers(conn);
                /*
                 * Determine CPU mask to ensure connection's RX and TX kthreads
@@ -807,15 +804,20 @@ int iscsi_post_login_handler(
                iscsit_thread_get_cpumask(conn);
                conn->conn_rx_reset_cpumask = 1;
                conn->conn_tx_reset_cpumask = 1;
-
+               /*
+                * Wakeup the sleeping iscsi_target_rx_thread() now that
+                * iscsi_conn is in TARG_CONN_STATE_LOGGED_IN state.
+                */
+               complete(&conn->rx_login_comp);
                iscsit_dec_conn_usage_count(conn);
+
                if (stop_timer) {
                        spin_lock_bh(&se_tpg->session_lock);
                        iscsit_stop_time2retain_timer(sess);
                        spin_unlock_bh(&se_tpg->session_lock);
                }
                iscsit_dec_session_usage_count(sess);
-               return 0;
+               return;
        }
 
        iscsi_set_session_parameters(sess->sess_ops, conn->param_list, 1);
@@ -856,10 +858,6 @@ int iscsi_post_login_handler(
                " iSCSI Target Portal Group: %hu\n", tpg->nsessions, tpg->tpgt);
        spin_unlock_bh(&se_tpg->session_lock);
 
-       rc = iscsit_start_kthreads(conn);
-       if (rc)
-               return rc;
-
        iscsi_post_login_start_timers(conn);
        /*
         * Determine CPU mask to ensure connection's RX and TX kthreads
@@ -868,10 +866,12 @@ int iscsi_post_login_handler(
        iscsit_thread_get_cpumask(conn);
        conn->conn_rx_reset_cpumask = 1;
        conn->conn_tx_reset_cpumask = 1;
-
+       /*
+        * Wakeup the sleeping iscsi_target_rx_thread() now that
+        * iscsi_conn is in TARG_CONN_STATE_LOGGED_IN state.
+        */
+       complete(&conn->rx_login_comp);
        iscsit_dec_conn_usage_count(conn);
-
-       return 0;
 }
 
 static void iscsi_handle_login_thread_timeout(unsigned long data)
@@ -1436,23 +1436,12 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
        if (ret < 0)
                goto new_sess_out;
 
-       if (!conn->sess) {
-               pr_err("struct iscsi_conn session pointer is NULL!\n");
-               goto new_sess_out;
-       }
-
        iscsi_stop_login_thread_timer(np);
 
-       if (signal_pending(current))
-               goto new_sess_out;
-
        if (ret == 1) {
                tpg_np = conn->tpg_np;
 
-               ret = iscsi_post_login_handler(np, conn, zero_tsih);
-               if (ret < 0)
-                       goto new_sess_out;
-
+               iscsi_post_login_handler(np, conn, zero_tsih);
                iscsit_deaccess_np(np, tpg, tpg_np);
        }
 
index 29d0983..55cbf45 100644 (file)
@@ -12,7 +12,8 @@ extern int iscsit_accept_np(struct iscsi_np *, struct iscsi_conn *);
 extern int iscsit_get_login_rx(struct iscsi_conn *, struct iscsi_login *);
 extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32);
 extern void iscsit_free_conn(struct iscsi_np *, struct iscsi_conn *);
-extern int iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8);
+extern int iscsit_start_kthreads(struct iscsi_conn *);
+extern void iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8);
 extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *,
                                bool, bool);
 extern int iscsi_target_login_thread(void *);
index 8c02fa3..f9cde91 100644 (file)
@@ -17,6 +17,7 @@
  ******************************************************************************/
 
 #include <linux/ctype.h>
+#include <linux/kthread.h>
 #include <scsi/iscsi_proto.h>
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
@@ -361,10 +362,24 @@ static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_log
                ntohl(login_rsp->statsn), login->rsp_length);
 
        padding = ((-login->rsp_length) & 3);
+       /*
+        * Before sending the last login response containing the transition
+        * bit for full-feature-phase, go ahead and start up TX/RX threads
+        * now to avoid potential resource allocation failures after the
+        * final login response has been sent.
+        */
+       if (login->login_complete) {
+               int rc = iscsit_start_kthreads(conn);
+               if (rc) {
+                       iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+                                           ISCSI_LOGIN_STATUS_NO_RESOURCES);
+                       return -1;
+               }
+       }
 
        if (conn->conn_transport->iscsit_put_login_tx(conn, login,
                                        login->rsp_length + padding) < 0)
-               return -1;
+               goto err;
 
        login->rsp_length               = 0;
        mutex_lock(&sess->cmdsn_mutex);
@@ -373,6 +388,23 @@ static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_log
        mutex_unlock(&sess->cmdsn_mutex);
 
        return 0;
+
+err:
+       if (login->login_complete) {
+               if (conn->rx_thread && conn->rx_thread_active) {
+                       send_sig(SIGINT, conn->rx_thread, 1);
+                       kthread_stop(conn->rx_thread);
+               }
+               if (conn->tx_thread && conn->tx_thread_active) {
+                       send_sig(SIGINT, conn->tx_thread, 1);
+                       kthread_stop(conn->tx_thread);
+               }
+               spin_lock(&iscsit_global->ts_bitmap_lock);
+               bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id,
+                                     get_order(1));
+               spin_unlock(&iscsit_global->ts_bitmap_lock);
+       }
+       return -1;
 }
 
 static void iscsi_target_sk_data_ready(struct sock *sk)
index 1d30b09..67098a8 100644 (file)
@@ -1209,6 +1209,8 @@ err_clk_sec:
        if (!IS_ERR(data->clk_sec))
                clk_unprepare(data->clk_sec);
 err_sensor:
+       if (!IS_ERR_OR_NULL(data->regulator))
+               regulator_disable(data->regulator);
        thermal_zone_of_sensor_unregister(&pdev->dev, data->tzd);
 
        return ret;
index 396344c..16ed0b6 100644 (file)
@@ -1108,19 +1108,29 @@ static void eraser(unsigned char c, struct tty_struct *tty)
  *     Locking: ctrl_lock
  */
 
-static void isig(int sig, struct tty_struct *tty)
+static void __isig(int sig, struct tty_struct *tty)
 {
-       struct n_tty_data *ldata = tty->disc_data;
        struct pid *tty_pgrp = tty_get_pgrp(tty);
        if (tty_pgrp) {
                kill_pgrp(tty_pgrp, sig, 1);
                put_pid(tty_pgrp);
        }
+}
 
-       if (!L_NOFLSH(tty)) {
+static void isig(int sig, struct tty_struct *tty)
+{
+       struct n_tty_data *ldata = tty->disc_data;
+
+       if (L_NOFLSH(tty)) {
+               /* signal only */
+               __isig(sig, tty);
+
+       } else { /* signal and flush */
                up_read(&tty->termios_rwsem);
                down_write(&tty->termios_rwsem);
 
+               __isig(sig, tty);
+
                /* clear echo buffer */
                mutex_lock(&ldata->output_lock);
                ldata->echo_head = ldata->echo_tail = 0;
index f8120c1..8cd3534 100644 (file)
@@ -241,7 +241,6 @@ config SERIAL_SAMSUNG
        tristate "Samsung SoC serial support"
        depends on PLAT_SAMSUNG || ARCH_EXYNOS
        select SERIAL_CORE
-       select SERIAL_EARLYCON
        help
          Support for the on-chip UARTs on the Samsung S3C24XX series CPUs,
          providing /dev/ttySAC0, 1 and 2 (note, some machines may not
@@ -277,6 +276,7 @@ config SERIAL_SAMSUNG_CONSOLE
        bool "Support for console on Samsung SoC serial port"
        depends on SERIAL_SAMSUNG=y
        select SERIAL_CORE_CONSOLE
+       select SERIAL_EARLYCON
        help
          Allow selection of the S3C24XX on-board serial ports for use as
          an virtual console.
index 27dade2..5ca1dfb 100644 (file)
@@ -315,8 +315,7 @@ static int atmel_config_rs485(struct uart_port *port,
        if (rs485conf->flags & SER_RS485_ENABLED) {
                dev_dbg(port->dev, "Setting UART to RS485\n");
                atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
-               if ((rs485conf->delay_rts_after_send) > 0)
-                       UART_PUT_TTGR(port, rs485conf->delay_rts_after_send);
+               UART_PUT_TTGR(port, rs485conf->delay_rts_after_send);
                mode |= ATMEL_US_USMODE_RS485;
        } else {
                dev_dbg(port->dev, "Setting UART to RS232\n");
@@ -354,8 +353,7 @@ static void atmel_set_mctrl(struct uart_port *port, u_int mctrl)
 
        /* override mode to RS485 if needed, otherwise keep the current mode */
        if (port->rs485.flags & SER_RS485_ENABLED) {
-               if ((port->rs485.delay_rts_after_send) > 0)
-                       UART_PUT_TTGR(port, port->rs485.delay_rts_after_send);
+               UART_PUT_TTGR(port, port->rs485.delay_rts_after_send);
                mode &= ~ATMEL_US_USMODE;
                mode |= ATMEL_US_USMODE_RS485;
        }
@@ -2061,8 +2059,7 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
 
        /* mode */
        if (port->rs485.flags & SER_RS485_ENABLED) {
-               if ((port->rs485.delay_rts_after_send) > 0)
-                       UART_PUT_TTGR(port, port->rs485.delay_rts_after_send);
+               UART_PUT_TTGR(port, port->rs485.delay_rts_after_send);
                mode |= ATMEL_US_USMODE_RS485;
        } else if (termios->c_cflag & CRTSCTS) {
                /* RS232 with hardware handshake (RTS/CTS) */
index 8825039..01aa52f 100644 (file)
@@ -1132,11 +1132,6 @@ static int imx_startup(struct uart_port *port)
        while (!(readl(sport->port.membase + UCR2) & UCR2_SRST) && (--i > 0))
                udelay(1);
 
-       /* Can we enable the DMA support? */
-       if (is_imx6q_uart(sport) && !uart_console(port) &&
-           !sport->dma_is_inited)
-               imx_uart_dma_init(sport);
-
        spin_lock_irqsave(&sport->port.lock, flags);
 
        /*
@@ -1145,9 +1140,6 @@ static int imx_startup(struct uart_port *port)
        writel(USR1_RTSD, sport->port.membase + USR1);
        writel(USR2_ORE, sport->port.membase + USR2);
 
-       if (sport->dma_is_inited && !sport->dma_is_enabled)
-               imx_enable_dma(sport);
-
        temp = readl(sport->port.membase + UCR1);
        temp |= UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN;
 
@@ -1318,6 +1310,11 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
                        } else {
                                ucr2 |= UCR2_CTSC;
                        }
+
+                       /* Can we enable the DMA support? */
+                       if (is_imx6q_uart(sport) && !uart_console(port)
+                               && !sport->dma_is_inited)
+                               imx_uart_dma_init(sport);
                } else {
                        termios->c_cflag &= ~CRTSCTS;
                }
@@ -1434,6 +1431,8 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
        if (UART_ENABLE_MS(&sport->port, termios->c_cflag))
                imx_enable_ms(&sport->port);
 
+       if (sport->dma_is_inited && !sport->dma_is_enabled)
+               imx_enable_dma(sport);
        spin_unlock_irqrestore(&sport->port.lock, flags);
 }
 
index 0b7bb12..ec54044 100644 (file)
@@ -1409,7 +1409,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
        mutex_lock(&port->mutex);
        uart_shutdown(tty, state);
        tty_port_tty_set(port, NULL);
-       tty->closing = 0;
+
        spin_lock_irqsave(&port->lock, flags);
 
        if (port->blocked_open) {
@@ -1435,6 +1435,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
        mutex_unlock(&port->mutex);
 
        tty_ldisc_flush(tty);
+       tty->closing = 0;
 }
 
 static void uart_wait_until_sent(struct tty_struct *tty, int timeout)
index 843f2cd..9ffdfcf 100644 (file)
@@ -55,9 +55,6 @@
 static int __read_mostly sysrq_enabled = CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE;
 static bool __read_mostly sysrq_always_enabled;
 
-unsigned short platform_sysrq_reset_seq[] __weak = { KEY_RESERVED };
-int sysrq_reset_downtime_ms __weak;
-
 static bool sysrq_on(void)
 {
        return sysrq_enabled || sysrq_always_enabled;
@@ -569,6 +566,7 @@ void handle_sysrq(int key)
 EXPORT_SYMBOL(handle_sysrq);
 
 #ifdef CONFIG_INPUT
+static int sysrq_reset_downtime_ms;
 
 /* Simple translation table for the SysRq keys */
 static const unsigned char sysrq_xlate[KEY_CNT] =
@@ -949,23 +947,8 @@ static bool sysrq_handler_registered;
 
 static inline void sysrq_register_handler(void)
 {
-       unsigned short key;
        int error;
-       int i;
-
-       /* First check if a __weak interface was instantiated. */
-       for (i = 0; i < ARRAY_SIZE(sysrq_reset_seq); i++) {
-               key = platform_sysrq_reset_seq[i];
-               if (key == KEY_RESERVED || key > KEY_MAX)
-                       break;
-
-               sysrq_reset_seq[sysrq_reset_seq_len++] = key;
-       }
 
-       /*
-        * DT configuration takes precedence over anything that would
-        * have been defined via the __weak interface.
-        */
        sysrq_of_get_keyreset_config();
 
        error = input_register_handler(&sysrq_handler);
index 74fea4f..3ad48e1 100644 (file)
@@ -1024,7 +1024,18 @@ static struct platform_driver ci_hdrc_driver = {
        },
 };
 
-module_platform_driver(ci_hdrc_driver);
+static int __init ci_hdrc_platform_register(void)
+{
+       ci_hdrc_host_driver_init();
+       return platform_driver_register(&ci_hdrc_driver);
+}
+module_init(ci_hdrc_platform_register);
+
+static void __exit ci_hdrc_platform_unregister(void)
+{
+       platform_driver_unregister(&ci_hdrc_driver);
+}
+module_exit(ci_hdrc_platform_unregister);
 
 MODULE_ALIAS("platform:ci_hdrc");
 MODULE_LICENSE("GPL v2");
index 21fe1a3..2f8af40 100644 (file)
@@ -237,9 +237,12 @@ int ci_hdrc_host_init(struct ci_hdrc *ci)
        rdrv->name      = "host";
        ci->roles[CI_ROLE_HOST] = rdrv;
 
+       return 0;
+}
+
+void ci_hdrc_host_driver_init(void)
+{
        ehci_init_driver(&ci_ehci_hc_driver, &ehci_ci_overrides);
        orig_bus_suspend = ci_ehci_hc_driver.bus_suspend;
        ci_ehci_hc_driver.bus_suspend = ci_ehci_bus_suspend;
-
-       return 0;
 }
index 5707bf3..0f12f13 100644 (file)
@@ -5,6 +5,7 @@
 
 int ci_hdrc_host_init(struct ci_hdrc *ci);
 void ci_hdrc_host_destroy(struct ci_hdrc *ci);
+void ci_hdrc_host_driver_init(void);
 
 #else
 
@@ -18,6 +19,11 @@ static inline void ci_hdrc_host_destroy(struct ci_hdrc *ci)
 
 }
 
+static void ci_hdrc_host_driver_init(void)
+{
+
+}
+
 #endif
 
 #endif /* __DRIVERS_USB_CHIPIDEA_HOST_H */
index 4b0448c..986abde 100644 (file)
@@ -513,7 +513,7 @@ static void async_completed(struct urb *urb)
        snoop(&urb->dev->dev, "urb complete\n");
        snoop_urb(urb->dev, as->userurb, urb->pipe, urb->actual_length,
                        as->status, COMPLETE, NULL, 0);
-       if ((urb->transfer_flags & URB_DIR_MASK) == USB_DIR_IN)
+       if ((urb->transfer_flags & URB_DIR_MASK) == URB_DIR_IN)
                snoop_urb_data(urb, urb->actual_length);
 
        if (as->status < 0 && as->bulk_addr && as->status != -ECONNRESET &&
index 69398f9..ab14f84 100644 (file)
@@ -1022,9 +1022,12 @@ static int register_root_hub(struct usb_hcd *hcd)
                                dev_name(&usb_dev->dev), retval);
                return (retval < 0) ? retval : -EMSGSIZE;
        }
-       if (usb_dev->speed == USB_SPEED_SUPER) {
+
+       if (le16_to_cpu(usb_dev->descriptor.bcdUSB) >= 0x0201) {
                retval = usb_get_bos_descriptor(usb_dev);
-               if (retval < 0) {
+               if (!retval) {
+                       usb_dev->lpm_capable = usb_device_supports_lpm(usb_dev);
+               } else if (usb_dev->speed == USB_SPEED_SUPER) {
                        mutex_unlock(&usb_bus_list_lock);
                        dev_dbg(parent_dev, "can't read %s bos descriptor %d\n",
                                        dev_name(&usb_dev->dev), retval);
index 3b71516..1e9a8c9 100644 (file)
@@ -122,7 +122,7 @@ struct usb_hub *usb_hub_to_struct_hub(struct usb_device *hdev)
        return usb_get_intfdata(hdev->actconfig->interface[0]);
 }
 
-static int usb_device_supports_lpm(struct usb_device *udev)
+int usb_device_supports_lpm(struct usb_device *udev)
 {
        /* USB 2.1 (and greater) devices indicate LPM support through
         * their USB 2.0 Extended Capabilities BOS descriptor.
@@ -2616,9 +2616,6 @@ static bool use_new_scheme(struct usb_device *udev, int retry)
        return USE_NEW_SCHEME(retry);
 }
 
-static int hub_port_reset(struct usb_hub *hub, int port1,
-                       struct usb_device *udev, unsigned int delay, bool warm);
-
 /* Is a USB 3.0 port in the Inactive or Compliance Mode state?
  * Port worm reset is required to recover
  */
@@ -2706,44 +2703,6 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
        return 0;
 }
 
-static void hub_port_finish_reset(struct usb_hub *hub, int port1,
-                       struct usb_device *udev, int *status)
-{
-       switch (*status) {
-       case 0:
-               /* TRSTRCY = 10 ms; plus some extra */
-               msleep(10 + 40);
-               if (udev) {
-                       struct usb_hcd *hcd = bus_to_hcd(udev->bus);
-
-                       update_devnum(udev, 0);
-                       /* The xHC may think the device is already reset,
-                        * so ignore the status.
-                        */
-                       if (hcd->driver->reset_device)
-                               hcd->driver->reset_device(hcd, udev);
-               }
-               /* FALL THROUGH */
-       case -ENOTCONN:
-       case -ENODEV:
-               usb_clear_port_feature(hub->hdev,
-                               port1, USB_PORT_FEAT_C_RESET);
-               if (hub_is_superspeed(hub->hdev)) {
-                       usb_clear_port_feature(hub->hdev, port1,
-                                       USB_PORT_FEAT_C_BH_PORT_RESET);
-                       usb_clear_port_feature(hub->hdev, port1,
-                                       USB_PORT_FEAT_C_PORT_LINK_STATE);
-                       usb_clear_port_feature(hub->hdev, port1,
-                                       USB_PORT_FEAT_C_CONNECTION);
-               }
-               if (udev)
-                       usb_set_device_state(udev, *status
-                                       ? USB_STATE_NOTATTACHED
-                                       : USB_STATE_DEFAULT);
-               break;
-       }
-}
-
 /* Handle port reset and port warm(BH) reset (for USB3 protocol ports) */
 static int hub_port_reset(struct usb_hub *hub, int port1,
                        struct usb_device *udev, unsigned int delay, bool warm)
@@ -2767,13 +2726,10 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
                 * If the caller hasn't explicitly requested a warm reset,
                 * double check and see if one is needed.
                 */
-               status = hub_port_status(hub, port1,
-                                       &portstatus, &portchange);
-               if (status < 0)
-                       goto done;
-
-               if (hub_port_warm_reset_required(hub, port1, portstatus))
-                       warm = true;
+               if (hub_port_status(hub, port1, &portstatus, &portchange) == 0)
+                       if (hub_port_warm_reset_required(hub, port1,
+                                                       portstatus))
+                               warm = true;
        }
        clear_bit(port1, hub->warm_reset_bits);
 
@@ -2799,11 +2755,19 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
 
                /* Check for disconnect or reset */
                if (status == 0 || status == -ENOTCONN || status == -ENODEV) {
-                       hub_port_finish_reset(hub, port1, udev, &status);
+                       usb_clear_port_feature(hub->hdev, port1,
+                                       USB_PORT_FEAT_C_RESET);
 
                        if (!hub_is_superspeed(hub->hdev))
                                goto done;
 
+                       usb_clear_port_feature(hub->hdev, port1,
+                                       USB_PORT_FEAT_C_BH_PORT_RESET);
+                       usb_clear_port_feature(hub->hdev, port1,
+                                       USB_PORT_FEAT_C_PORT_LINK_STATE);
+                       usb_clear_port_feature(hub->hdev, port1,
+                                       USB_PORT_FEAT_C_CONNECTION);
+
                        /*
                         * If a USB 3.0 device migrates from reset to an error
                         * state, re-issue the warm reset.
@@ -2836,6 +2800,26 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
        dev_err(&port_dev->dev, "Cannot enable. Maybe the USB cable is bad?\n");
 
 done:
+       if (status == 0) {
+               /* TRSTRCY = 10 ms; plus some extra */
+               msleep(10 + 40);
+               if (udev) {
+                       struct usb_hcd *hcd = bus_to_hcd(udev->bus);
+
+                       update_devnum(udev, 0);
+                       /* The xHC may think the device is already reset,
+                        * so ignore the status.
+                        */
+                       if (hcd->driver->reset_device)
+                               hcd->driver->reset_device(hcd, udev);
+
+                       usb_set_device_state(udev, USB_STATE_DEFAULT);
+               }
+       } else {
+               if (udev)
+                       usb_set_device_state(udev, USB_STATE_NOTATTACHED);
+       }
+
        if (!hub_is_superspeed(hub->hdev))
                up_read(&ehci_cf_port_reset_rwsem);
 
index 7eb1e26..457255a 100644 (file)
@@ -65,6 +65,7 @@ extern int  usb_hub_init(void);
 extern void usb_hub_cleanup(void);
 extern int usb_major_init(void);
 extern void usb_major_cleanup(void);
+extern int usb_device_supports_lpm(struct usb_device *udev);
 
 #ifdef CONFIG_PM
 
index 2ef3c8d..69e769c 100644 (file)
@@ -727,6 +727,10 @@ static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
                dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ISOCH_DELAY");
                ret = dwc3_ep0_set_isoch_delay(dwc, ctrl);
                break;
+       case USB_REQ_SET_INTERFACE:
+               dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_INTERFACE");
+               dwc->start_config_issued = false;
+               /* Fall through */
        default:
                dwc3_trace(trace_dwc3_ep0, "Forwarding to gadget driver");
                ret = dwc3_ep0_delegate_req(dwc, ctrl);
index 8946c32..333a7c0 100644 (file)
@@ -291,6 +291,8 @@ int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param)
                        dwc3_trace(trace_dwc3_gadget,
                                        "Command Complete --> %d",
                                        DWC3_DGCMD_STATUS(reg));
+                       if (DWC3_DGCMD_STATUS(reg))
+                               return -EINVAL;
                        return 0;
                }
 
@@ -328,6 +330,8 @@ int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
                        dwc3_trace(trace_dwc3_gadget,
                                        "Command Complete --> %d",
                                        DWC3_DEPCMD_STATUS(reg));
+                       if (DWC3_DEPCMD_STATUS(reg))
+                               return -EINVAL;
                        return 0;
                }
 
@@ -1902,12 +1906,16 @@ static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
 {
        unsigned                status = 0;
        int                     clean_busy;
+       u32                     is_xfer_complete;
+
+       is_xfer_complete = (event->endpoint_event == DWC3_DEPEVT_XFERCOMPLETE);
 
        if (event->status & DEPEVT_STATUS_BUSERR)
                status = -ECONNRESET;
 
        clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status);
-       if (clean_busy)
+       if (clean_busy && (is_xfer_complete ||
+                               usb_endpoint_xfer_isoc(dep->endpoint.desc)))
                dep->flags &= ~DWC3_EP_BUSY;
 
        /*
index 4e3447b..58b4657 100644 (file)
@@ -1758,10 +1758,13 @@ unknown:
                 * take such requests too, if that's ever needed:  to work
                 * in config 0, etc.
                 */
-               list_for_each_entry(f, &cdev->config->functions, list)
-                       if (f->req_match && f->req_match(f, ctrl))
-                               goto try_fun_setup;
-               f = NULL;
+               if (cdev->config) {
+                       list_for_each_entry(f, &cdev->config->functions, list)
+                               if (f->req_match && f->req_match(f, ctrl))
+                                       goto try_fun_setup;
+                       f = NULL;
+               }
+
                switch (ctrl->bRequestType & USB_RECIP_MASK) {
                case USB_RECIP_INTERFACE:
                        if (!cdev->config || intf >= MAX_CONFIG_INTERFACES)
index 40223b5..f51bb89 100644 (file)
@@ -924,7 +924,8 @@ static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from)
 
        kiocb->private = p;
 
-       kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
+       if (p->aio)
+               kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
 
        res = ffs_epfile_io(kiocb->ki_filp, p);
        if (res == -EIOCBQUEUED)
@@ -968,7 +969,8 @@ static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to)
 
        kiocb->private = p;
 
-       kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
+       if (p->aio)
+               kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
 
        res = ffs_epfile_io(kiocb->ki_filp, p);
        if (res == -EIOCBQUEUED)
index 3cc109f..15c3071 100644 (file)
@@ -2786,7 +2786,7 @@ int fsg_common_set_nluns(struct fsg_common *common, int nluns)
                return -EINVAL;
        }
 
-       curlun = kcalloc(nluns, sizeof(*curlun), GFP_KERNEL);
+       curlun = kcalloc(FSG_MAX_LUNS, sizeof(*curlun), GFP_KERNEL);
        if (unlikely(!curlun))
                return -ENOMEM;
 
@@ -2796,8 +2796,6 @@ int fsg_common_set_nluns(struct fsg_common *common, int nluns)
        common->luns = curlun;
        common->nluns = nluns;
 
-       pr_info("Number of LUNs=%d\n", common->nluns);
-
        return 0;
 }
 EXPORT_SYMBOL_GPL(fsg_common_set_nluns);
@@ -3563,14 +3561,26 @@ static struct usb_function *fsg_alloc(struct usb_function_instance *fi)
        struct fsg_opts *opts = fsg_opts_from_func_inst(fi);
        struct fsg_common *common = opts->common;
        struct fsg_dev *fsg;
+       unsigned nluns, i;
 
        fsg = kzalloc(sizeof(*fsg), GFP_KERNEL);
        if (unlikely(!fsg))
                return ERR_PTR(-ENOMEM);
 
        mutex_lock(&opts->lock);
+       if (!opts->refcnt) {
+               for (nluns = i = 0; i < FSG_MAX_LUNS; ++i)
+                       if (common->luns[i])
+                               nluns = i + 1;
+               if (!nluns)
+                       pr_warn("No LUNS defined, continuing anyway\n");
+               else
+                       common->nluns = nluns;
+               pr_info("Number of LUNs=%u\n", common->nluns);
+       }
        opts->refcnt++;
        mutex_unlock(&opts->lock);
+
        fsg->function.name      = FSG_DRIVER_DESC;
        fsg->function.bind      = fsg_bind;
        fsg->function.unbind    = fsg_unbind;
index 6d3eb8b..5318615 100644 (file)
@@ -1162,14 +1162,14 @@ afunc_set_alt(struct usb_function *fn, unsigned intf, unsigned alt)
                        factor = 1000;
                } else {
                        ep_desc = &hs_epin_desc;
-                       factor = 125;
+                       factor = 8000;
                }
 
                /* pre-compute some values for iso_complete() */
                uac2->p_framesize = opts->p_ssize *
                                    num_channels(opts->p_chmask);
                rate = opts->p_srate * uac2->p_framesize;
-               uac2->p_interval = (1 << (ep_desc->bInterval - 1)) * factor;
+               uac2->p_interval = factor / (1 << (ep_desc->bInterval - 1));
                uac2->p_pktsize = min_t(unsigned int, rate / uac2->p_interval,
                                        prm->max_psize);
 
index d32160d..5da37c9 100644 (file)
@@ -2167,7 +2167,7 @@ static int mv_udc_probe(struct platform_device *pdev)
                return -ENODEV;
        }
 
-       udc->phy_regs = ioremap(r->start, resource_size(r));
+       udc->phy_regs = devm_ioremap(&pdev->dev, r->start, resource_size(r));
        if (udc->phy_regs == NULL) {
                dev_err(&pdev->dev, "failed to map phy I/O memory\n");
                return -EBUSY;
index d69c355..7d69931 100644 (file)
@@ -321,6 +321,7 @@ err4:
 
 err3:
        put_device(&udc->dev);
+       device_del(&gadget->dev);
 
 err2:
        put_device(&gadget->dev);
index 1463c39..fe1d5fc 100644 (file)
@@ -980,10 +980,6 @@ rescan_all:
                int                     completed, modified;
                __hc32                  *prev;
 
-               /* Is this ED already invisible to the hardware? */
-               if (ed->state == ED_IDLE)
-                       goto ed_idle;
-
                /* only take off EDs that the HC isn't using, accounting for
                 * frame counter wraps and EDs with partially retired TDs
                 */
@@ -1011,12 +1007,10 @@ skip_ed:
                }
 
                /* ED's now officially unlinked, hc doesn't see */
-               ed->state = ED_IDLE;
                ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_H);
                ed->hwNextED = 0;
                wmb();
                ed->hwINFO &= ~cpu_to_hc32(ohci, ED_SKIP | ED_DEQUEUE);
-ed_idle:
 
                /* reentrancy:  if we drop the schedule lock, someone might
                 * have modified this list.  normally it's just prepending
@@ -1087,6 +1081,7 @@ rescan_this:
                if (list_empty(&ed->td_list)) {
                        *last = ed->ed_next;
                        ed->ed_next = NULL;
+                       ed->state = ED_IDLE;
                        list_del(&ed->in_use_list);
                } else if (ohci->rh_state == OHCI_RH_RUNNING) {
                        *last = ed->ed_next;
index 0827d7c..ee07ba4 100644 (file)
@@ -484,10 +484,13 @@ static void xhci_hub_report_usb3_link_state(struct xhci_hcd *xhci,
        u32 pls = status_reg & PORT_PLS_MASK;
 
        /* resume state is a xHCI internal state.
-        * Do not report it to usb core.
+        * Do not report it to usb core, instead, pretend to be U3,
+        * thus usb core knows it's not ready for transfer
         */
-       if (pls == XDEV_RESUME)
+       if (pls == XDEV_RESUME) {
+               *status |= USB_SS_PORT_LS_U3;
                return;
+       }
 
        /* When the CAS bit is set then warm reset
         * should be performed on port
@@ -588,7 +591,14 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
                status |= USB_PORT_STAT_C_RESET << 16;
        /* USB3.0 only */
        if (hcd->speed == HCD_USB3) {
-               if ((raw_port_status & PORT_PLC))
+               /* Port link change with port in resume state should not be
+                * reported to usbcore, as this is an internal state to be
+                * handled by xhci driver. Reporting PLC to usbcore may
+                * cause usbcore clearing PLC first and port change event
+                * irq won't be generated.
+                */
+               if ((raw_port_status & PORT_PLC) &&
+                       (raw_port_status & PORT_PLS_MASK) != XDEV_RESUME)
                        status |= USB_PORT_STAT_C_LINK_STATE << 16;
                if ((raw_port_status & PORT_WRC))
                        status |= USB_PORT_STAT_C_BH_RESET << 16;
@@ -1120,10 +1130,10 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
        spin_lock_irqsave(&xhci->lock, flags);
 
        if (hcd->self.root_hub->do_remote_wakeup) {
-               if (bus_state->resuming_ports) {
+               if (bus_state->resuming_ports ||        /* USB2 */
+                   bus_state->port_remote_wakeup) {    /* USB3 */
                        spin_unlock_irqrestore(&xhci->lock, flags);
-                       xhci_dbg(xhci, "suspend failed because "
-                                               "a port is resuming\n");
+                       xhci_dbg(xhci, "suspend failed because a port is resuming\n");
                        return -EBUSY;
                }
        }
index f833640..9a8c936 100644 (file)
@@ -1427,10 +1427,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
                /* Attempt to use the ring cache */
                if (virt_dev->num_rings_cached == 0)
                        return -ENOMEM;
+               virt_dev->num_rings_cached--;
                virt_dev->eps[ep_index].new_ring =
                        virt_dev->ring_cache[virt_dev->num_rings_cached];
                virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
-               virt_dev->num_rings_cached--;
                xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring,
                                        1, type);
        }
@@ -1792,7 +1792,8 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
        int size;
        int i, j, num_ports;
 
-       del_timer_sync(&xhci->cmd_timer);
+       if (timer_pending(&xhci->cmd_timer))
+               del_timer_sync(&xhci->cmd_timer);
 
        /* Free the Event Ring Segment Table and the actual Event Ring */
        size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
index 7d34cbf..b3a0a22 100644 (file)
@@ -82,7 +82,7 @@ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
                return 0;
        /* offset in TRBs */
        segment_offset = trb - seg->trbs;
-       if (segment_offset > TRBS_PER_SEGMENT)
+       if (segment_offset >= TRBS_PER_SEGMENT)
                return 0;
        return seg->dma + (segment_offset * sizeof(*trb));
 }
@@ -1546,6 +1546,9 @@ static void handle_port_status(struct xhci_hcd *xhci,
                usb_hcd_resume_root_hub(hcd);
        }
 
+       if (hcd->speed == HCD_USB3 && (temp & PORT_PLS_MASK) == XDEV_INACTIVE)
+               bus_state->port_remote_wakeup &= ~(1 << faked_port_index);
+
        if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) {
                xhci_dbg(xhci, "port resume event for port %d\n", port_id);
 
index 36bf089..c502c22 100644 (file)
@@ -3453,6 +3453,9 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
                        return -EINVAL;
        }
 
+       if (virt_dev->tt_info)
+               old_active_eps = virt_dev->tt_info->active_eps;
+
        if (virt_dev->udev != udev) {
                /* If the virt_dev and the udev does not match, this virt_dev
                 * may belong to another udev.
index 6977f84..0f26dd2 100644 (file)
@@ -285,6 +285,7 @@ struct xhci_op_regs {
 #define XDEV_U0                (0x0 << 5)
 #define XDEV_U2                (0x2 << 5)
 #define XDEV_U3                (0x3 << 5)
+#define XDEV_INACTIVE  (0x6 << 5)
 #define XDEV_RESUME    (0xf << 5)
 /* true: port has power (see HCC_PPC) */
 #define PORT_POWER     (1 << 9)
index 86c4b53..4731bac 100644 (file)
@@ -273,9 +273,7 @@ static int musb_has_gadget(struct musb *musb)
 #ifdef CONFIG_USB_MUSB_HOST
        return 1;
 #else
-       if (musb->port_mode == MUSB_PORT_MODE_HOST)
-               return 1;
-       return musb->g.dev.driver != NULL;
+       return musb->port_mode == MUSB_PORT_MODE_HOST;
 #endif
 }
 
index 8f7cb06..3fcc048 100644 (file)
@@ -217,6 +217,9 @@ static bool mxs_phy_get_vbus_status(struct mxs_phy *mxs_phy)
 {
        unsigned int vbus_value;
 
+       if (!mxs_phy->regmap_anatop)
+               return false;
+
        if (mxs_phy->port_id == 0)
                regmap_read(mxs_phy->regmap_anatop,
                        ANADIG_USB1_VBUS_DET_STAT,
index ffd739e..eac7cca 100644 (file)
@@ -187,6 +187,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x1FB9, 0x0602) }, /* Lake Shore Model 648 Magnet Power Supply */
        { USB_DEVICE(0x1FB9, 0x0700) }, /* Lake Shore Model 737 VSM Controller */
        { USB_DEVICE(0x1FB9, 0x0701) }, /* Lake Shore Model 776 Hall Matrix */
+       { USB_DEVICE(0x2626, 0xEA60) }, /* Aruba Networks 7xxx USB Serial Console */
        { USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */
        { USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */
        { USB_DEVICE(0x3195, 0xF281) }, /* Link Instruments MSO-28 */
index f0c0c53..876423b 100644 (file)
@@ -1099,6 +1099,8 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
        { USB_DEVICE_INTERFACE_CLASS(SIERRA_VENDOR_ID, 0x68c0, 0xff),
          .driver_info = (kernel_ulong_t)&sierra_mc73xx_blacklist }, /* MC73xx */
+       { USB_DEVICE_INTERFACE_CLASS(SIERRA_VENDOR_ID, 0x9041, 0xff),
+         .driver_info = (kernel_ulong_t)&sierra_mc73xx_blacklist }, /* MC7305/MC7355 */
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
@@ -1765,6 +1767,7 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
        { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
        { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
+       { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) },                /* OLICARD300 - MT6225 */
        { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
        { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
        { } /* Terminating entry */
index 9c63897..d156545 100644 (file)
@@ -145,7 +145,6 @@ static const struct usb_device_id id_table[] = {
        {DEVICE_SWI(0x1199, 0x901c)},   /* Sierra Wireless EM7700 */
        {DEVICE_SWI(0x1199, 0x901f)},   /* Sierra Wireless EM7355 */
        {DEVICE_SWI(0x1199, 0x9040)},   /* Sierra Wireless Modem */
-       {DEVICE_SWI(0x1199, 0x9041)},   /* Sierra Wireless MC7305/MC7355 */
        {DEVICE_SWI(0x1199, 0x9051)},   /* Netgear AirCard 340U */
        {DEVICE_SWI(0x1199, 0x9053)},   /* Sierra Wireless Modem */
        {DEVICE_SWI(0x1199, 0x9054)},   /* Sierra Wireless Modem */
@@ -158,6 +157,7 @@ static const struct usb_device_id id_table[] = {
        {DEVICE_SWI(0x413c, 0x81a4)},   /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
        {DEVICE_SWI(0x413c, 0x81a8)},   /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
        {DEVICE_SWI(0x413c, 0x81a9)},   /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
+       {DEVICE_SWI(0x413c, 0x81b1)},   /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
 
        /* Huawei devices */
        {DEVICE_HWI(0x03f0, 0x581d)},   /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */
index 46179a0..07d1ecd 100644 (file)
@@ -289,6 +289,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68AA, 0xFF, 0xFF, 0xFF),
          .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
        },
+       { USB_DEVICE(0x1199, 0x68AB) }, /* Sierra Wireless AR8550 */
        /* AT&T Direct IP LTE modems */
        { USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68AA, 0xFF, 0xFF, 0xFF),
          .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
index 529066b..46f1f13 100644 (file)
@@ -1306,6 +1306,7 @@ static void __exit usb_serial_exit(void)
        tty_unregister_driver(usb_serial_tty_driver);
        put_tty_driver(usb_serial_tty_driver);
        bus_unregister(&usb_serial_bus_type);
+       idr_destroy(&serial_minors);
 }
 
 
index caf1888..87898ca 100644 (file)
@@ -2065,6 +2065,18 @@ UNUSUAL_DEV( 0x1908, 0x3335, 0x0200, 0x0200,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_NO_READ_DISC_INFO ),
 
+/* Reported by Oliver Neukum <oneukum@suse.com>
+ * This device morphes spontaneously into another device if the access
+ * pattern of Windows isn't followed. Thus writable media would be dirty
+ * if the initial instance is used. So the device is limited to its
+ * virtual CD.
+ * And yes, the concept that BCD goes up to 9 is not heeded */
+UNUSUAL_DEV( 0x19d2, 0x1225, 0x0000, 0xffff,
+               "ZTE,Incorporated",
+               "ZTE WCDMA Technologies MSM",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_SINGLE_LUN ),
+
 /* Reported by Sven Geggus <sven-usbst@geggus.net>
  * This encrypted pen drive returns bogus data for the initial READ(10).
  */
index 2ee2826..fa49d32 100644 (file)
@@ -886,6 +886,7 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
                }
                if (eventfp != d->log_file) {
                        filep = d->log_file;
+                       d->log_file = eventfp;
                        ctx = d->log_ctx;
                        d->log_ctx = eventfp ?
                                eventfd_ctx_fileget(eventfp) : NULL;
index 1f11a20..55eb86c 100644 (file)
@@ -59,16 +59,32 @@ MODULE_ALIAS("w1-family-" __stringify(W1_THERM_DS28EA00));
 static int w1_strong_pullup = 1;
 module_param_named(strong_pullup, w1_strong_pullup, int, 0);
 
+struct w1_therm_family_data {
+       uint8_t rom[9];
+       atomic_t refcnt;
+};
+
+/* return the address of the refcnt in the family data */
+#define THERM_REFCNT(family_data) \
+       (&((struct w1_therm_family_data*)family_data)->refcnt)
+
 static int w1_therm_add_slave(struct w1_slave *sl)
 {
-       sl->family_data = kzalloc(9, GFP_KERNEL);
+       sl->family_data = kzalloc(sizeof(struct w1_therm_family_data),
+               GFP_KERNEL);
        if (!sl->family_data)
                return -ENOMEM;
+       atomic_set(THERM_REFCNT(sl->family_data), 1);
        return 0;
 }
 
 static void w1_therm_remove_slave(struct w1_slave *sl)
 {
+       int refcnt = atomic_sub_return(1, THERM_REFCNT(sl->family_data));
+       while(refcnt) {
+               msleep(1000);
+               refcnt = atomic_read(THERM_REFCNT(sl->family_data));
+       }
        kfree(sl->family_data);
        sl->family_data = NULL;
 }
@@ -194,13 +210,22 @@ static ssize_t w1_slave_show(struct device *device,
        struct w1_slave *sl = dev_to_w1_slave(device);
        struct w1_master *dev = sl->master;
        u8 rom[9], crc, verdict, external_power;
-       int i, max_trying = 10;
+       int i, ret, max_trying = 10;
        ssize_t c = PAGE_SIZE;
+       u8 *family_data = sl->family_data;
+
+       ret = mutex_lock_interruptible(&dev->bus_mutex);
+       if (ret != 0)
+               goto post_unlock;
 
-       i = mutex_lock_interruptible(&dev->bus_mutex);
-       if (i != 0)
-               return i;
+       if(!sl->family_data)
+       {
+               ret = -ENODEV;
+               goto pre_unlock;
+       }
 
+       /* prevent the slave from going away in sleep */
+       atomic_inc(THERM_REFCNT(family_data));
        memset(rom, 0, sizeof(rom));
 
        while (max_trying--) {
@@ -230,17 +255,19 @@ static ssize_t w1_slave_show(struct device *device,
                                mutex_unlock(&dev->bus_mutex);
 
                                sleep_rem = msleep_interruptible(tm);
-                               if (sleep_rem != 0)
-                                       return -EINTR;
+                               if (sleep_rem != 0) {
+                                       ret = -EINTR;
+                                       goto post_unlock;
+                               }
 
-                               i = mutex_lock_interruptible(&dev->bus_mutex);
-                               if (i != 0)
-                                       return i;
+                               ret = mutex_lock_interruptible(&dev->bus_mutex);
+                               if (ret != 0)
+                                       goto post_unlock;
                        } else if (!w1_strong_pullup) {
                                sleep_rem = msleep_interruptible(tm);
                                if (sleep_rem != 0) {
-                                       mutex_unlock(&dev->bus_mutex);
-                                       return -EINTR;
+                                       ret = -EINTR;
+                                       goto pre_unlock;
                                }
                        }
 
@@ -269,19 +296,24 @@ static ssize_t w1_slave_show(struct device *device,
        c -= snprintf(buf + PAGE_SIZE - c, c, ": crc=%02x %s\n",
                           crc, (verdict) ? "YES" : "NO");
        if (verdict)
-               memcpy(sl->family_data, rom, sizeof(rom));
+               memcpy(family_data, rom, sizeof(rom));
        else
                dev_warn(device, "Read failed CRC check\n");
 
        for (i = 0; i < 9; ++i)
                c -= snprintf(buf + PAGE_SIZE - c, c, "%02x ",
-                             ((u8 *)sl->family_data)[i]);
+                             ((u8 *)family_data)[i]);
 
        c -= snprintf(buf + PAGE_SIZE - c, c, "t=%d\n",
                w1_convert_temp(rom, sl->family->fid));
+       ret = PAGE_SIZE - c;
+
+pre_unlock:
        mutex_unlock(&dev->bus_mutex);
 
-       return PAGE_SIZE - c;
+post_unlock:
+       atomic_dec(THERM_REFCNT(family_data));
+       return ret;
 }
 
 static int __init w1_therm_init(void)
index 1e6be9e..c9c97da 100644 (file)
@@ -132,6 +132,13 @@ static int omap_wdt_start(struct watchdog_device *wdog)
 
        pm_runtime_get_sync(wdev->dev);
 
+       /*
+        * Make sure the watchdog is disabled. This is unfortunately required
+        * because writing to various registers with the watchdog running has no
+        * effect.
+        */
+       omap_wdt_disable(wdev);
+
        /* initialize prescaler */
        while (readl_relaxed(base + OMAP_WATCHDOG_WPS) & 0x01)
                cpu_relax();
index 8927485..4bd23bb 100644 (file)
@@ -568,12 +568,14 @@ static int gntdev_release(struct inode *inode, struct file *flip)
 
        pr_debug("priv %p\n", priv);
 
+       mutex_lock(&priv->lock);
        while (!list_empty(&priv->maps)) {
                map = list_entry(priv->maps.next, struct grant_map, next);
                list_del(&map->next);
                gntdev_put_map(NULL /* already removed */, map);
        }
        WARN_ON(!list_empty(&priv->freeable_maps));
+       mutex_unlock(&priv->lock);
 
        if (use_ptemod)
                mmu_notifier_unregister(&priv->mn, priv->mm);
index 96b2011..658be6c 100644 (file)
@@ -814,8 +814,10 @@ static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr)
 
        rv = xenbus_unmap_ring(dev, node->handles, node->nr_handles,
                               addrs);
-       if (!rv)
+       if (!rv) {
                vunmap(vaddr);
+               free_xenballooned_pages(node->nr_handles, node->hvm.pages);
+       }
        else
                WARN(1, "Leaking %p, size %u page(s)\n", vaddr,
                     node->nr_handles);
index 703342e..53f1e8a 100644 (file)
@@ -540,8 +540,7 @@ static struct inode *v9fs_qid_iget(struct super_block *sb,
        unlock_new_inode(inode);
        return inode;
 error:
-       unlock_new_inode(inode);
-       iput(inode);
+       iget_failed(inode);
        return ERR_PTR(retval);
 
 }
index 9861c7c..4d3ecfb 100644 (file)
@@ -149,8 +149,7 @@ static struct inode *v9fs_qid_iget_dotl(struct super_block *sb,
        unlock_new_inode(inode);
        return inode;
 error:
-       unlock_new_inode(inode);
-       iput(inode);
+       iget_failed(inode);
        return ERR_PTR(retval);
 
 }
index f6a596d..d4a582a 100644 (file)
@@ -246,6 +246,7 @@ void btrfs_unpin_free_ino(struct btrfs_root *root)
 {
        struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
        struct rb_root *rbroot = &root->free_ino_pinned->free_space_offset;
+       spinlock_t *rbroot_lock = &root->free_ino_pinned->tree_lock;
        struct btrfs_free_space *info;
        struct rb_node *n;
        u64 count;
@@ -254,24 +255,30 @@ void btrfs_unpin_free_ino(struct btrfs_root *root)
                return;
 
        while (1) {
+               bool add_to_ctl = true;
+
+               spin_lock(rbroot_lock);
                n = rb_first(rbroot);
-               if (!n)
+               if (!n) {
+                       spin_unlock(rbroot_lock);
                        break;
+               }
 
                info = rb_entry(n, struct btrfs_free_space, offset_index);
                BUG_ON(info->bitmap); /* Logic error */
 
                if (info->offset > root->ino_cache_progress)
-                       goto free;
+                       add_to_ctl = false;
                else if (info->offset + info->bytes > root->ino_cache_progress)
                        count = root->ino_cache_progress - info->offset + 1;
                else
                        count = info->bytes;
 
-               __btrfs_add_free_space(ctl, info->offset, count);
-free:
                rb_erase(&info->offset_index, rbroot);
-               kfree(info);
+               spin_unlock(rbroot_lock);
+               if (add_to_ctl)
+                       __btrfs_add_free_space(ctl, info->offset, count);
+               kmem_cache_free(btrfs_free_space_cachep, info);
        }
 }
 
index 1c22c65..37d456a 100644 (file)
@@ -2413,8 +2413,6 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
                goto out_unlock_inode;
        }
 
-       d_invalidate(dentry);
-
        down_write(&root->fs_info->subvol_sem);
 
        err = may_destroy_subvol(dest);
@@ -2508,7 +2506,7 @@ out_up_write:
 out_unlock_inode:
        mutex_unlock(&inode->i_mutex);
        if (!err) {
-               shrink_dcache_sb(root->fs_info->sb);
+               d_invalidate(dentry);
                btrfs_invalidate_inodes(dest);
                d_delete(dentry);
                ASSERT(dest->send_in_progress == 0);
@@ -2940,7 +2938,7 @@ out_unlock:
 static long btrfs_ioctl_file_extent_same(struct file *file,
                        struct btrfs_ioctl_same_args __user *argp)
 {
-       struct btrfs_ioctl_same_args *same;
+       struct btrfs_ioctl_same_args *same = NULL;
        struct btrfs_ioctl_same_extent_info *info;
        struct inode *src = file_inode(file);
        u64 off;
@@ -2970,6 +2968,7 @@ static long btrfs_ioctl_file_extent_same(struct file *file,
 
        if (IS_ERR(same)) {
                ret = PTR_ERR(same);
+               same = NULL;
                goto out;
        }
 
@@ -3040,6 +3039,7 @@ static long btrfs_ioctl_file_extent_same(struct file *file,
 
 out:
        mnt_drop_write_file(file);
+       kfree(same);
        return ret;
 }
 
@@ -3434,6 +3434,20 @@ process_slot:
                                u64 trim = 0;
                                u64 aligned_end = 0;
 
+                               /*
+                                * Don't copy an inline extent into an offset
+                                * greater than zero. Having an inline extent
+                                * at such an offset results in chaos as btrfs
+                                * isn't prepared for such cases. Just skip
+                                * this case for the same reasons as commented
+                                * at btrfs_ioctl_clone().
+                                */
+                               if (last_dest_end > 0) {
+                                       ret = -EOPNOTSUPP;
+                                       btrfs_end_transaction(trans, root);
+                                       goto out;
+                               }
+
                                if (off > key.offset) {
                                        skip = off - key.offset;
                                        new_key.offset += skip;
index 5628e25..94e909c 100644 (file)
@@ -758,7 +758,7 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
 
        if (!list_empty(&trans->ordered)) {
                spin_lock(&info->trans_lock);
-               list_splice(&trans->ordered, &cur_trans->pending_ordered);
+               list_splice_init(&trans->ordered, &cur_trans->pending_ordered);
                spin_unlock(&info->trans_lock);
        }
 
@@ -1848,7 +1848,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
        }
 
        spin_lock(&root->fs_info->trans_lock);
-       list_splice(&trans->ordered, &cur_trans->pending_ordered);
+       list_splice_init(&trans->ordered, &cur_trans->pending_ordered);
        if (cur_trans->state >= TRANS_STATE_COMMIT_START) {
                spin_unlock(&root->fs_info->trans_lock);
                atomic_inc(&cur_trans->use_count);
index d049683..4920fce 100644 (file)
@@ -4161,6 +4161,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
        u64 ino = btrfs_ino(inode);
        struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
        u64 logged_isize = 0;
+       bool need_log_inode_item = true;
 
        path = btrfs_alloc_path();
        if (!path)
@@ -4269,11 +4270,6 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
                } else {
                        if (inode_only == LOG_INODE_ALL)
                                fast_search = true;
-                       ret = log_inode_item(trans, log, dst_path, inode);
-                       if (ret) {
-                               err = ret;
-                               goto out_unlock;
-                       }
                        goto log_extents;
                }
 
@@ -4296,6 +4292,9 @@ again:
                if (min_key.type > max_key.type)
                        break;
 
+               if (min_key.type == BTRFS_INODE_ITEM_KEY)
+                       need_log_inode_item = false;
+
                src = path->nodes[0];
                if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) {
                        ins_nr++;
@@ -4366,6 +4365,11 @@ next_slot:
 log_extents:
        btrfs_release_path(path);
        btrfs_release_path(dst_path);
+       if (need_log_inode_item) {
+               err = log_inode_item(trans, log, dst_path, inode);
+               if (err)
+                       goto out_unlock;
+       }
        if (fast_search) {
                /*
                 * Some ordered extents started by fsync might have completed
index e1d20e1..c1dad92 100644 (file)
@@ -643,7 +643,7 @@ static inline bool fast_dput(struct dentry *dentry)
 
        /*
         * If we have a d_op->d_delete() operation, we sould not
-        * let the dentry count go to zero, so use "put__or_lock".
+        * let the dentry count go to zero, so use "put_or_lock".
         */
        if (unlikely(dentry->d_flags & DCACHE_OP_DELETE))
                return lockref_put_or_lock(&dentry->d_lockref);
@@ -698,7 +698,7 @@ static inline bool fast_dput(struct dentry *dentry)
         */
        smp_rmb();
        d_flags = ACCESS_ONCE(dentry->d_flags);
-       d_flags &= DCACHE_REFERENCED | DCACHE_LRU_LIST;
+       d_flags &= DCACHE_REFERENCED | DCACHE_LRU_LIST | DCACHE_DISCONNECTED;
 
        /* Nothing to do? Dropping the reference was all we needed? */
        if (d_flags == (DCACHE_REFERENCED | DCACHE_LRU_LIST) && !d_unhashed(dentry))
@@ -777,6 +777,9 @@ repeat:
        if (unlikely(d_unhashed(dentry)))
                goto kill_it;
 
+       if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED))
+               goto kill_it;
+
        if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) {
                if (dentry->d_op->d_delete(dentry))
                        goto kill_it;
index e003a1e..87ba10d 100644 (file)
@@ -503,7 +503,7 @@ __read_extent_tree_block(const char *function, unsigned int line,
        struct buffer_head              *bh;
        int                             err;
 
-       bh = sb_getblk(inode->i_sb, pblk);
+       bh = sb_getblk_gfp(inode->i_sb, pblk, __GFP_MOVABLE | GFP_NOFS);
        if (unlikely(!bh))
                return ERR_PTR(-ENOMEM);
 
@@ -1088,7 +1088,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
                err = -EIO;
                goto cleanup;
        }
-       bh = sb_getblk(inode->i_sb, newblock);
+       bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS);
        if (unlikely(!bh)) {
                err = -ENOMEM;
                goto cleanup;
@@ -1282,7 +1282,7 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
        if (newblock == 0)
                return err;
 
-       bh = sb_getblk(inode->i_sb, newblock);
+       bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS);
        if (unlikely(!bh))
                return -ENOMEM;
        lock_buffer(bh);
index 9588240..94ae687 100644 (file)
@@ -565,7 +565,7 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
                                       EXT4_FEATURE_RO_COMPAT_BIGALLOC)) {
                EXT4_ERROR_INODE(inode, "Can't allocate blocks for "
                                 "non-extent mapped inodes with bigalloc");
-               return -ENOSPC;
+               return -EUCLEAN;
        }
 
        /* Set up for the direct block allocation */
index 0554b0b..966c614 100644 (file)
@@ -1342,7 +1342,7 @@ static void ext4_da_page_release_reservation(struct page *page,
                                             unsigned int offset,
                                             unsigned int length)
 {
-       int to_release = 0;
+       int to_release = 0, contiguous_blks = 0;
        struct buffer_head *head, *bh;
        unsigned int curr_off = 0;
        struct inode *inode = page->mapping->host;
@@ -1363,14 +1363,23 @@ static void ext4_da_page_release_reservation(struct page *page,
 
                if ((offset <= curr_off) && (buffer_delay(bh))) {
                        to_release++;
+                       contiguous_blks++;
                        clear_buffer_delay(bh);
+               } else if (contiguous_blks) {
+                       lblk = page->index <<
+                              (PAGE_CACHE_SHIFT - inode->i_blkbits);
+                       lblk += (curr_off >> inode->i_blkbits) -
+                               contiguous_blks;
+                       ext4_es_remove_extent(inode, lblk, contiguous_blks);
+                       contiguous_blks = 0;
                }
                curr_off = next_off;
        } while ((bh = bh->b_this_page) != head);
 
-       if (to_release) {
+       if (contiguous_blks) {
                lblk = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
-               ext4_es_remove_extent(inode, lblk, to_release);
+               lblk += (curr_off >> inode->i_blkbits) - contiguous_blks;
+               ext4_es_remove_extent(inode, lblk, contiguous_blks);
        }
 
        /* If we have released all the blocks belonging to a cluster, then we
@@ -1701,19 +1710,32 @@ static int __ext4_journalled_writepage(struct page *page,
                ext4_walk_page_buffers(handle, page_bufs, 0, len,
                                       NULL, bget_one);
        }
-       /* As soon as we unlock the page, it can go away, but we have
-        * references to buffers so we are safe */
+       /*
+        * We need to release the page lock before we start the
+        * journal, so grab a reference so the page won't disappear
+        * out from under us.
+        */
+       get_page(page);
        unlock_page(page);
 
        handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
                                    ext4_writepage_trans_blocks(inode));
        if (IS_ERR(handle)) {
                ret = PTR_ERR(handle);
-               goto out;
+               put_page(page);
+               goto out_no_pagelock;
        }
-
        BUG_ON(!ext4_handle_valid(handle));
 
+       lock_page(page);
+       put_page(page);
+       if (page->mapping != mapping) {
+               /* The page got truncated from under us */
+               ext4_journal_stop(handle);
+               ret = 0;
+               goto out;
+       }
+
        if (inline_data) {
                BUFFER_TRACE(inode_bh, "get write access");
                ret = ext4_journal_get_write_access(handle, inode_bh);
@@ -1739,6 +1761,8 @@ static int __ext4_journalled_writepage(struct page *page,
                                       NULL, bput_one);
        ext4_set_inode_state(inode, EXT4_STATE_JDATA);
 out:
+       unlock_page(page);
+out_no_pagelock:
        brelse(inode_bh);
        return ret;
 }
@@ -4345,7 +4369,12 @@ static void ext4_update_other_inodes_time(struct super_block *sb,
        int inode_size = EXT4_INODE_SIZE(sb);
 
        oi.orig_ino = orig_ino;
-       ino = (orig_ino & ~(inodes_per_block - 1)) + 1;
+       /*
+        * Calculate the first inode in the inode table block.  Inode
+        * numbers are one-based.  That is, the first inode in a block
+        * (assuming 4k blocks and 256 byte inodes) is (n*16 + 1).
+        */
+       ino = ((orig_ino - 1) & ~(inodes_per_block - 1)) + 1;
        for (i = 0; i < inodes_per_block; i++, ino++, buf += inode_size) {
                if (ino == orig_ino)
                        continue;
index 8d1e602..4126048 100644 (file)
@@ -4800,18 +4800,12 @@ do_more:
                /*
                 * blocks being freed are metadata. these blocks shouldn't
                 * be used until this transaction is committed
+                *
+                * We use __GFP_NOFAIL because ext4_free_blocks() is not allowed
+                * to fail.
                 */
-       retry:
-               new_entry = kmem_cache_alloc(ext4_free_data_cachep, GFP_NOFS);
-               if (!new_entry) {
-                       /*
-                        * We use a retry loop because
-                        * ext4_free_blocks() is not allowed to fail.
-                        */
-                       cond_resched();
-                       congestion_wait(BLK_RW_ASYNC, HZ/50);
-                       goto retry;
-               }
+               new_entry = kmem_cache_alloc(ext4_free_data_cachep,
+                               GFP_NOFS|__GFP_NOFAIL);
                new_entry->efd_start_cluster = bit;
                new_entry->efd_group = block_group;
                new_entry->efd_count = count_clusters;
index b52374e..6163ad2 100644 (file)
@@ -620,6 +620,7 @@ int ext4_ind_migrate(struct inode *inode)
        struct ext4_inode_info          *ei = EXT4_I(inode);
        struct ext4_extent              *ex;
        unsigned int                    i, len;
+       ext4_lblk_t                     start, end;
        ext4_fsblk_t                    blk;
        handle_t                        *handle;
        int                             ret;
@@ -633,6 +634,14 @@ int ext4_ind_migrate(struct inode *inode)
                                       EXT4_FEATURE_RO_COMPAT_BIGALLOC))
                return -EOPNOTSUPP;
 
+       /*
+        * In order to get correct extent info, force all delayed allocation
+        * blocks to be allocated, otherwise delayed allocation blocks may not
+        * be reflected and bypass the checks on extent header.
+        */
+       if (test_opt(inode->i_sb, DELALLOC))
+               ext4_alloc_da_blocks(inode);
+
        handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
        if (IS_ERR(handle))
                return PTR_ERR(handle);
@@ -650,11 +659,13 @@ int ext4_ind_migrate(struct inode *inode)
                goto errout;
        }
        if (eh->eh_entries == 0)
-               blk = len = 0;
+               blk = len = start = end = 0;
        else {
                len = le16_to_cpu(ex->ee_len);
                blk = ext4_ext_pblock(ex);
-               if (len > EXT4_NDIR_BLOCKS) {
+               start = le32_to_cpu(ex->ee_block);
+               end = start + len - 1;
+               if (end >= EXT4_NDIR_BLOCKS) {
                        ret = -EOPNOTSUPP;
                        goto errout;
                }
@@ -662,7 +673,7 @@ int ext4_ind_migrate(struct inode *inode)
 
        ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS);
        memset(ei->i_data, 0, sizeof(ei->i_data));
-       for (i=0; i < len; i++)
+       for (i = start; i <= end; i++)
                ei->i_data[i] = cpu_to_le32(blk++);
        ext4_mark_inode_dirty(handle, inode);
 errout:
index ca9d4a2..ca12aff 100644 (file)
@@ -807,6 +807,7 @@ static void ext4_put_super(struct super_block *sb)
                dump_orphan_list(sb, sbi);
        J_ASSERT(list_empty(&sbi->s_orphan));
 
+       sync_blockdev(sb->s_bdev);
        invalidate_bdev(sb->s_bdev);
        if (sbi->journal_bdev && sbi->journal_bdev != sb->s_bdev) {
                /*
@@ -4943,6 +4944,9 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
                set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
        }
 
+       if (*flags & MS_LAZYTIME)
+               sb->s_flags |= MS_LAZYTIME;
+
        if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY)) {
                if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) {
                        err = -EROFS;
index 18dacf9..708d697 100644 (file)
@@ -1026,6 +1026,7 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
                goto err_fput;
 
        fuse_conn_init(fc);
+       fc->release = fuse_free_conn;
 
        fc->dev = sb->s_dev;
        fc->sb = sb;
@@ -1040,7 +1041,6 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
                fc->dont_mask = 1;
        sb->s_flags |= MS_POSIXACL;
 
-       fc->release = fuse_free_conn;
        fc->flags = d.flags;
        fc->user_id = d.user_id;
        fc->group_id = d.group_id;
index 7cd00d3..8685c65 100644 (file)
@@ -52,17 +52,20 @@ static void unmark_dirty(struct super_block *s)
 }
 
 /* Filesystem error... */
-static char err_buf[1024];
-
 void hpfs_error(struct super_block *s, const char *fmt, ...)
 {
+       struct va_format vaf;
        va_list args;
 
        va_start(args, fmt);
-       vsnprintf(err_buf, sizeof(err_buf), fmt, args);
+
+       vaf.fmt = fmt;
+       vaf.va = &args;
+
+       pr_err("filesystem error: %pV", &vaf);
+
        va_end(args);
 
-       pr_err("filesystem error: %s", err_buf);
        if (!hpfs_sb(s)->sb_was_error) {
                if (hpfs_sb(s)->sb_err == 2) {
                        pr_cont("; crashing the system because you wanted it\n");
@@ -424,11 +427,14 @@ static int hpfs_remount_fs(struct super_block *s, int *flags, char *data)
        int o;
        struct hpfs_sb_info *sbi = hpfs_sb(s);
        char *new_opts = kstrdup(data, GFP_KERNEL);
-       
+
+       if (!new_opts)
+               return -ENOMEM;
+
        sync_filesystem(s);
 
        *flags |= MS_NOATIME;
-       
+
        hpfs_lock(s);
        uid = sbi->sb_uid; gid = sbi->sb_gid;
        umask = 0777 & ~sbi->sb_mode;
index 6f691c2..9c00e2e 100644 (file)
@@ -392,7 +392,7 @@ int jbd2_cleanup_journal_tail(journal_t *journal)
        unsigned long   blocknr;
 
        if (is_journal_aborted(journal))
-               return 1;
+               return -EIO;
 
        if (!jbd2_journal_get_log_tail(journal, &first_tid, &blocknr))
                return 1;
@@ -407,10 +407,9 @@ int jbd2_cleanup_journal_tail(journal_t *journal)
         * jbd2_cleanup_journal_tail() doesn't get called all that often.
         */
        if (journal->j_flags & JBD2_BARRIER)
-               blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL);
+               blkdev_issue_flush(journal->j_fs_dev, GFP_NOFS, NULL);
 
-       __jbd2_update_log_tail(journal, first_tid, blocknr);
-       return 0;
+       return __jbd2_update_log_tail(journal, first_tid, blocknr);
 }
 
 
index b96bd80..112fad9 100644 (file)
@@ -885,9 +885,10 @@ int jbd2_journal_get_log_tail(journal_t *journal, tid_t *tid,
  *
  * Requires j_checkpoint_mutex
  */
-void __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block)
+int __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block)
 {
        unsigned long freed;
+       int ret;
 
        BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));
 
@@ -897,7 +898,10 @@ void __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block)
         * space and if we lose sb update during power failure we'd replay
         * old transaction with possibly newly overwritten data.
         */
-       jbd2_journal_update_sb_log_tail(journal, tid, block, WRITE_FUA);
+       ret = jbd2_journal_update_sb_log_tail(journal, tid, block, WRITE_FUA);
+       if (ret)
+               goto out;
+
        write_lock(&journal->j_state_lock);
        freed = block - journal->j_tail;
        if (block < journal->j_tail)
@@ -913,6 +917,9 @@ void __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block)
        journal->j_tail_sequence = tid;
        journal->j_tail = block;
        write_unlock(&journal->j_state_lock);
+
+out:
+       return ret;
 }
 
 /*
@@ -1331,7 +1338,7 @@ static int journal_reset(journal_t *journal)
        return jbd2_journal_start_thread(journal);
 }
 
-static void jbd2_write_superblock(journal_t *journal, int write_op)
+static int jbd2_write_superblock(journal_t *journal, int write_op)
 {
        struct buffer_head *bh = journal->j_sb_buffer;
        journal_superblock_t *sb = journal->j_superblock;
@@ -1370,7 +1377,10 @@ static void jbd2_write_superblock(journal_t *journal, int write_op)
                printk(KERN_ERR "JBD2: Error %d detected when updating "
                       "journal superblock for %s.\n", ret,
                       journal->j_devname);
+               jbd2_journal_abort(journal, ret);
        }
+
+       return ret;
 }
 
 /**
@@ -1383,10 +1393,11 @@ static void jbd2_write_superblock(journal_t *journal, int write_op)
  * Update a journal's superblock information about log tail and write it to
  * disk, waiting for the IO to complete.
  */
-void jbd2_journal_update_sb_log_tail(journal_t *journal, tid_t tail_tid,
+int jbd2_journal_update_sb_log_tail(journal_t *journal, tid_t tail_tid,
                                     unsigned long tail_block, int write_op)
 {
        journal_superblock_t *sb = journal->j_superblock;
+       int ret;
 
        BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));
        jbd_debug(1, "JBD2: updating superblock (start %lu, seq %u)\n",
@@ -1395,13 +1406,18 @@ void jbd2_journal_update_sb_log_tail(journal_t *journal, tid_t tail_tid,
        sb->s_sequence = cpu_to_be32(tail_tid);
        sb->s_start    = cpu_to_be32(tail_block);
 
-       jbd2_write_superblock(journal, write_op);
+       ret = jbd2_write_superblock(journal, write_op);
+       if (ret)
+               goto out;
 
        /* Log is no longer empty */
        write_lock(&journal->j_state_lock);
        WARN_ON(!sb->s_sequence);
        journal->j_flags &= ~JBD2_FLUSHED;
        write_unlock(&journal->j_state_lock);
+
+out:
+       return ret;
 }
 
 /**
@@ -1950,7 +1966,14 @@ int jbd2_journal_flush(journal_t *journal)
                return -EIO;
 
        mutex_lock(&journal->j_checkpoint_mutex);
-       jbd2_cleanup_journal_tail(journal);
+       if (!err) {
+               err = jbd2_cleanup_journal_tail(journal);
+               if (err < 0) {
+                       mutex_unlock(&journal->j_checkpoint_mutex);
+                       goto out;
+               }
+               err = 0;
+       }
 
        /* Finally, mark the journal as really needing no recovery.
         * This sets s_start==0 in the underlying superblock, which is
@@ -1966,7 +1989,8 @@ int jbd2_journal_flush(journal_t *journal)
        J_ASSERT(journal->j_head == journal->j_tail);
        J_ASSERT(journal->j_tail_sequence == journal->j_transaction_sequence);
        write_unlock(&journal->j_state_lock);
-       return 0;
+out:
+       return err;
 }
 
 /**
index c246b29..2893702 100644 (file)
@@ -1354,6 +1354,36 @@ enum umount_tree_flags {
        UMOUNT_PROPAGATE = 2,
        UMOUNT_CONNECTED = 4,
 };
+
+static bool disconnect_mount(struct mount *mnt, enum umount_tree_flags how)
+{
+       /* Leaving mounts connected is only valid for lazy umounts */
+       if (how & UMOUNT_SYNC)
+               return true;
+
+       /* A mount without a parent has nothing to be connected to */
+       if (!mnt_has_parent(mnt))
+               return true;
+
+       /* Because the reference counting rules change when mounts are
+        * unmounted and connected, umounted mounts may not be
+        * connected to mounted mounts.
+        */
+       if (!(mnt->mnt_parent->mnt.mnt_flags & MNT_UMOUNT))
+               return true;
+
+       /* Has it been requested that the mount remain connected? */
+       if (how & UMOUNT_CONNECTED)
+               return false;
+
+       /* Is the mount locked such that it needs to remain connected? */
+       if (IS_MNT_LOCKED(mnt))
+               return false;
+
+       /* By default disconnect the mount */
+       return true;
+}
+
 /*
  * mount_lock must be held
  * namespace_sem must be held for write
@@ -1391,10 +1421,7 @@ static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
                if (how & UMOUNT_SYNC)
                        p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
 
-               disconnect = !(((how & UMOUNT_CONNECTED) &&
-                               mnt_has_parent(p) &&
-                               (p->mnt_parent->mnt.mnt_flags & MNT_UMOUNT)) ||
-                              IS_MNT_LOCKED_AND_LAZY(p));
+               disconnect = disconnect_mount(p, how);
 
                pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt,
                                 disconnect ? &unmounted : NULL);
@@ -1531,11 +1558,8 @@ void __detach_mounts(struct dentry *dentry)
        while (!hlist_empty(&mp->m_list)) {
                mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
                if (mnt->mnt.mnt_flags & MNT_UMOUNT) {
-                       struct mount *p, *tmp;
-                       list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts,  mnt_child) {
-                               hlist_add_head(&p->mnt_umount.s_list, &unmounted);
-                               umount_mnt(p);
-                       }
+                       hlist_add_head(&mnt->mnt_umount.s_list, &unmounted);
+                       umount_mnt(mnt);
                }
                else umount_tree(mnt, UMOUNT_CONNECTED);
        }
index 7d05089..6f5f0f4 100644 (file)
@@ -631,7 +631,7 @@ static void ff_layout_reset_write(struct nfs_pgio_header *hdr, bool retry_pnfs)
                        nfs_direct_set_resched_writes(hdr->dreq);
                        /* fake unstable write to let common nfs resend pages */
                        hdr->verf.committed = NFS_UNSTABLE;
-                       hdr->good_bytes = 0;
+                       hdr->good_bytes = hdr->args.count;
                }
                return;
        }
index 77a2d02..f13e196 100644 (file)
@@ -324,7 +324,8 @@ static int ff_layout_update_mirror_cred(struct nfs4_ff_layout_mirror *mirror,
                                __func__, PTR_ERR(cred));
                        return PTR_ERR(cred);
                } else {
-                       mirror->cred = cred;
+                       if (cmpxchg(&mirror->cred, NULL, cred))
+                               put_rpccred(cred);
                }
        }
        return 0;
@@ -386,7 +387,7 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
        /* matching smp_wmb() in _nfs4_pnfs_v3/4_ds_connect */
        smp_rmb();
        if (ds->ds_clp)
-               goto out;
+               goto out_update_creds;
 
        flavor = nfs4_ff_layout_choose_authflavor(mirror);
 
@@ -430,7 +431,7 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
                        }
                }
        }
-
+out_update_creds:
        if (ff_layout_update_mirror_cred(mirror, ds))
                ds = NULL;
 out:
index f734562..5d25b9d 100644 (file)
@@ -1242,9 +1242,11 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
        if (fattr->valid & NFS_ATTR_FATTR_SIZE) {
                cur_size = i_size_read(inode);
                new_isize = nfs_size_to_loff_t(fattr->size);
-               if (cur_size != new_isize && nfsi->nrequests == 0)
+               if (cur_size != new_isize)
                        invalid |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE;
        }
+       if (nfsi->nrequests != 0)
+               invalid &= ~NFS_INO_REVAL_PAGECACHE;
 
        /* Have any file permissions changed? */
        if ((fattr->valid & NFS_ATTR_FATTR_MODE) && (inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO))
@@ -1682,8 +1684,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
                        invalid |= NFS_INO_INVALID_ATTR
                                | NFS_INO_INVALID_DATA
                                | NFS_INO_INVALID_ACCESS
-                               | NFS_INO_INVALID_ACL
-                               | NFS_INO_REVAL_PAGECACHE;
+                               | NFS_INO_INVALID_ACL;
                        if (S_ISDIR(inode->i_mode))
                                nfs_force_lookup_revalidate(inode);
                        inode->i_version = fattr->change_attr;
@@ -1715,7 +1716,6 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
                        if ((nfsi->nrequests == 0) || new_isize > cur_isize) {
                                i_size_write(inode, new_isize);
                                invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
-                               invalid &= ~NFS_INO_REVAL_PAGECACHE;
                        }
                        dprintk("NFS: isize change on server for file %s/%ld "
                                        "(%Ld to %Ld)\n",
index 53852a4..9b04c2e 100644 (file)
@@ -1342,7 +1342,7 @@ static void nfs3_xdr_enc_setacl3args(struct rpc_rqst *req,
        if (args->npages != 0)
                xdr_write_pages(xdr, args->pages, 0, args->len);
        else
-               xdr_reserve_space(xdr, NFS_ACL_INLINE_BUFSIZE);
+               xdr_reserve_space(xdr, args->len);
 
        error = nfsacl_encode(xdr->buf, base, args->inode,
                            (args->mask & NFS_ACL) ?
index 55e1e3a..d3f2051 100644 (file)
@@ -1204,12 +1204,15 @@ static bool nfs_need_update_open_stateid(struct nfs4_state *state,
 
 static void nfs_resync_open_stateid_locked(struct nfs4_state *state)
 {
+       if (!(state->n_wronly || state->n_rdonly || state->n_rdwr))
+               return;
        if (state->n_wronly)
                set_bit(NFS_O_WRONLY_STATE, &state->flags);
        if (state->n_rdonly)
                set_bit(NFS_O_RDONLY_STATE, &state->flags);
        if (state->n_rdwr)
                set_bit(NFS_O_RDWR_STATE, &state->flags);
+       set_bit(NFS_OPEN_STATE, &state->flags);
 }
 
 static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
index 2782cfc..ddef1dc 100644 (file)
@@ -1482,6 +1482,8 @@ restart:
                                        spin_unlock(&state->state_lock);
                                }
                                nfs4_put_open_state(state);
+                               clear_bit(NFS4CLNT_RECLAIM_NOGRACE,
+                                       &state->flags);
                                spin_lock(&sp->so_lock);
                                goto restart;
                        }
index 282b393..7b45526 100644 (file)
@@ -1110,8 +1110,11 @@ static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
                        nfs_list_remove_request(req);
                        if (__nfs_pageio_add_request(desc, req))
                                continue;
-                       if (desc->pg_error < 0)
+                       if (desc->pg_error < 0) {
+                               list_splice_tail(&head, &mirror->pg_list);
+                               mirror->pg_recoalesce = 1;
                                return 0;
+                       }
                        break;
                }
        } while (mirror->pg_recoalesce);
index 2306062..d47c188 100644 (file)
@@ -1821,6 +1821,7 @@ int pnfs_write_done_resend_to_mds(struct nfs_pgio_header *hdr)
        /* Resend all requests through the MDS */
        nfs_pageio_init_write(&pgio, hdr->inode, FLUSH_STABLE, true,
                              hdr->completion_ops);
+       set_bit(NFS_CONTEXT_RESEND_WRITES, &hdr->args.context->flags);
        return nfs_pageio_resend(&pgio, hdr);
 }
 EXPORT_SYMBOL_GPL(pnfs_write_done_resend_to_mds);
@@ -1865,6 +1866,7 @@ pnfs_write_through_mds(struct nfs_pageio_descriptor *desc,
                mirror->pg_recoalesce = 1;
        }
        nfs_pgio_data_destroy(hdr);
+       hdr->release(hdr);
 }
 
 static enum pnfs_try_status
@@ -1979,6 +1981,7 @@ pnfs_read_through_mds(struct nfs_pageio_descriptor *desc,
                mirror->pg_recoalesce = 1;
        }
        nfs_pgio_data_destroy(hdr);
+       hdr->release(hdr);
 }
 
 /*
index dfc19f1..daf3556 100644 (file)
@@ -1289,6 +1289,7 @@ static void nfs_initiate_write(struct nfs_pgio_header *hdr,
 static void nfs_redirty_request(struct nfs_page *req)
 {
        nfs_mark_request_dirty(req);
+       set_bit(NFS_CONTEXT_RESEND_WRITES, &req->wb_context->flags);
        nfs_unlock_request(req);
        nfs_end_page_writeback(req);
        nfs_release_request(req);
index 039f9c8..6e13504 100644 (file)
@@ -4397,9 +4397,9 @@ laundromat_main(struct work_struct *laundry)
        queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
 }
 
-static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_ol_stateid *stp)
+static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp)
 {
-       if (!fh_match(&fhp->fh_handle, &stp->st_stid.sc_file->fi_fhandle))
+       if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle))
                return nfserr_bad_stateid;
        return nfs_ok;
 }
@@ -4574,20 +4574,48 @@ nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
        return nfs_ok;
 }
 
+static struct file *
+nfs4_find_file(struct nfs4_stid *s, int flags)
+{
+       switch (s->sc_type) {
+       case NFS4_DELEG_STID:
+               if (WARN_ON_ONCE(!s->sc_file->fi_deleg_file))
+                       return NULL;
+               return get_file(s->sc_file->fi_deleg_file);
+       case NFS4_OPEN_STID:
+       case NFS4_LOCK_STID:
+               if (flags & RD_STATE)
+                       return find_readable_file(s->sc_file);
+               else
+                       return find_writeable_file(s->sc_file);
+               break;
+       }
+
+       return NULL;
+}
+
+static __be32
+nfs4_check_olstateid(struct svc_fh *fhp, struct nfs4_ol_stateid *ols, int flags)
+{
+       __be32 status;
+
+       status = nfsd4_check_openowner_confirmed(ols);
+       if (status)
+               return status;
+       return nfs4_check_openmode(ols, flags);
+}
+
 /*
-* Checks for stateid operations
-*/
+ * Checks for stateid operations
+ */
 __be32
 nfs4_preprocess_stateid_op(struct net *net, struct nfsd4_compound_state *cstate,
                           stateid_t *stateid, int flags, struct file **filpp)
 {
-       struct nfs4_stid *s;
-       struct nfs4_ol_stateid *stp = NULL;
-       struct nfs4_delegation *dp = NULL;
-       struct svc_fh *current_fh = &cstate->current_fh;
-       struct inode *ino = d_inode(current_fh->fh_dentry);
+       struct svc_fh *fhp = &cstate->current_fh;
+       struct inode *ino = d_inode(fhp->fh_dentry);
        struct nfsd_net *nn = net_generic(net, nfsd_net_id);
-       struct file *file = NULL;
+       struct nfs4_stid *s;
        __be32 status;
 
        if (filpp)
@@ -4597,60 +4625,39 @@ nfs4_preprocess_stateid_op(struct net *net, struct nfsd4_compound_state *cstate,
                return nfserr_grace;
 
        if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
-               return check_special_stateids(net, current_fh, stateid, flags);
+               return check_special_stateids(net, fhp, stateid, flags);
 
        status = nfsd4_lookup_stateid(cstate, stateid,
                                NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID,
                                &s, nn);
        if (status)
                return status;
-       status = check_stateid_generation(stateid, &s->sc_stateid, nfsd4_has_session(cstate));
+       status = check_stateid_generation(stateid, &s->sc_stateid,
+                       nfsd4_has_session(cstate));
        if (status)
                goto out;
+
        switch (s->sc_type) {
        case NFS4_DELEG_STID:
-               dp = delegstateid(s);
-               status = nfs4_check_delegmode(dp, flags);
-               if (status)
-                       goto out;
-               if (filpp) {
-                       file = dp->dl_stid.sc_file->fi_deleg_file;
-                       if (!file) {
-                               WARN_ON_ONCE(1);
-                               status = nfserr_serverfault;
-                               goto out;
-                       }
-                       get_file(file);
-               }
+               status = nfs4_check_delegmode(delegstateid(s), flags);
                break;
        case NFS4_OPEN_STID:
        case NFS4_LOCK_STID:
-               stp = openlockstateid(s);
-               status = nfs4_check_fh(current_fh, stp);
-               if (status)
-                       goto out;
-               status = nfsd4_check_openowner_confirmed(stp);
-               if (status)
-                       goto out;
-               status = nfs4_check_openmode(stp, flags);
-               if (status)
-                       goto out;
-               if (filpp) {
-                       struct nfs4_file *fp = stp->st_stid.sc_file;
-
-                       if (flags & RD_STATE)
-                               file = find_readable_file(fp);
-                       else
-                               file = find_writeable_file(fp);
-               }
+               status = nfs4_check_olstateid(fhp, openlockstateid(s), flags);
                break;
        default:
                status = nfserr_bad_stateid;
+               break;
+       }
+       if (status)
                goto out;
+       status = nfs4_check_fh(fhp, s);
+
+       if (!status && filpp) {
+               *filpp = nfs4_find_file(s, flags);
+               if (!*filpp)
+                       status = nfserr_serverfault;
        }
-       status = nfs_ok;
-       if (file)
-               *filpp = file;
 out:
        nfs4_put_stid(s);
        return status;
@@ -4754,7 +4761,7 @@ static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_
        status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
        if (status)
                return status;
-       return nfs4_check_fh(current_fh, stp);
+       return nfs4_check_fh(current_fh, &stp->st_stid);
 }
 
 /* 
index 158badf..d4d8445 100644 (file)
@@ -2142,6 +2142,7 @@ nfsd4_encode_aclname(struct xdr_stream *xdr, struct svc_rqst *rqstp,
 #define WORD0_ABSENT_FS_ATTRS (FATTR4_WORD0_FS_LOCATIONS | FATTR4_WORD0_FSID | \
                              FATTR4_WORD0_RDATTR_ERROR)
 #define WORD1_ABSENT_FS_ATTRS FATTR4_WORD1_MOUNTED_ON_FILEID
+#define WORD2_ABSENT_FS_ATTRS 0
 
 #ifdef CONFIG_NFSD_V4_SECURITY_LABEL
 static inline __be32
@@ -2170,7 +2171,7 @@ nfsd4_encode_security_label(struct xdr_stream *xdr, struct svc_rqst *rqstp,
 { return 0; }
 #endif
 
-static __be32 fattr_handle_absent_fs(u32 *bmval0, u32 *bmval1, u32 *rdattr_err)
+static __be32 fattr_handle_absent_fs(u32 *bmval0, u32 *bmval1, u32 *bmval2, u32 *rdattr_err)
 {
        /* As per referral draft:  */
        if (*bmval0 & ~WORD0_ABSENT_FS_ATTRS ||
@@ -2183,6 +2184,7 @@ static __be32 fattr_handle_absent_fs(u32 *bmval0, u32 *bmval1, u32 *rdattr_err)
        }
        *bmval0 &= WORD0_ABSENT_FS_ATTRS;
        *bmval1 &= WORD1_ABSENT_FS_ATTRS;
+       *bmval2 &= WORD2_ABSENT_FS_ATTRS;
        return 0;
 }
 
@@ -2246,8 +2248,7 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp,
        BUG_ON(bmval2 & ~nfsd_suppattrs2(minorversion));
 
        if (exp->ex_fslocs.migrated) {
-               BUG_ON(bmval[2]);
-               status = fattr_handle_absent_fs(&bmval0, &bmval1, &rdattr_err);
+               status = fattr_handle_absent_fs(&bmval0, &bmval1, &bmval2, &rdattr_err);
                if (status)
                        goto out;
        }
@@ -2290,8 +2291,8 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp,
        }
 
 #ifdef CONFIG_NFSD_V4_SECURITY_LABEL
-       if ((bmval[2] & FATTR4_WORD2_SECURITY_LABEL) ||
-                       bmval[0] & FATTR4_WORD0_SUPPORTED_ATTRS) {
+       if ((bmval2 & FATTR4_WORD2_SECURITY_LABEL) ||
+            bmval0 & FATTR4_WORD0_SUPPORTED_ATTRS) {
                err = security_inode_getsecctx(d_inode(dentry),
                                                &context, &contextlen);
                contextsupport = (err == 0);
index 92e48c7..39ddcaf 100644 (file)
@@ -412,16 +412,36 @@ void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group,
                                         unsigned int flags)
 {
        struct fsnotify_mark *lmark, *mark;
+       LIST_HEAD(to_free);
 
+       /*
+        * We have to be really careful here. Anytime we drop mark_mutex, e.g.
+        * fsnotify_clear_marks_by_inode() can come and free marks. Even in our
+        * to_free list so we have to use mark_mutex even when accessing that
+        * list. And freeing mark requires us to drop mark_mutex. So we can
+        * reliably free only the first mark in the list. That's why we first
+        * move marks to free to to_free list in one go and then free marks in
+        * to_free list one by one.
+        */
        mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
        list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) {
-               if (mark->flags & flags) {
-                       fsnotify_get_mark(mark);
-                       fsnotify_destroy_mark_locked(mark, group);
-                       fsnotify_put_mark(mark);
-               }
+               if (mark->flags & flags)
+                       list_move(&mark->g_list, &to_free);
        }
        mutex_unlock(&group->mark_mutex);
+
+       while (1) {
+               mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
+               if (list_empty(&to_free)) {
+                       mutex_unlock(&group->mark_mutex);
+                       break;
+               }
+               mark = list_first_entry(&to_free, struct fsnotify_mark, g_list);
+               fsnotify_get_mark(mark);
+               fsnotify_destroy_mark_locked(mark, group);
+               mutex_unlock(&group->mark_mutex);
+               fsnotify_put_mark(mark);
+       }
 }
 
 /*
index f906a25..9ea7012 100644 (file)
@@ -686,7 +686,7 @@ static int ocfs2_direct_IO_zero_extend(struct ocfs2_super *osb,
 
        if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) {
                u64 s = i_size_read(inode);
-               sector_t sector = (p_cpos << (osb->s_clustersize_bits - 9)) +
+               sector_t sector = ((u64)p_cpos << (osb->s_clustersize_bits - 9)) +
                        (do_div(s, osb->s_clustersize) >> 9);
 
                ret = blkdev_issue_zeroout(osb->sb->s_bdev, sector,
@@ -911,7 +911,7 @@ static ssize_t ocfs2_direct_IO_write(struct kiocb *iocb,
                BUG_ON(!p_cpos || (ext_flags & OCFS2_EXT_UNWRITTEN));
 
                ret = blkdev_issue_zeroout(osb->sb->s_bdev,
-                               p_cpos << (osb->s_clustersize_bits - 9),
+                               (u64)p_cpos << (osb->s_clustersize_bits - 9),
                                zero_len_head >> 9, GFP_NOFS, false);
                if (ret < 0)
                        mlog_errno(ret);
index 8b23aa2..23157e4 100644 (file)
@@ -4025,9 +4025,13 @@ static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb)
        osb->dc_work_sequence = osb->dc_wake_sequence;
 
        processed = osb->blocked_lock_count;
-       while (processed) {
-               BUG_ON(list_empty(&osb->blocked_lock_list));
-
+       /*
+        * blocked lock processing in this loop might call iput which can
+        * remove items off osb->blocked_lock_list. Downconvert up to
+        * 'processed' number of locks, but stop short if we had some
+        * removed in ocfs2_mark_lockres_freeing when downconverting.
+        */
+       while (processed && !list_empty(&osb->blocked_lock_list)) {
                lockres = list_entry(osb->blocked_lock_list.next,
                                     struct ocfs2_lock_res, l_blocked_list);
                list_del_init(&lockres->l_blocked_list);
index 907870e..70e9af5 100644 (file)
@@ -23,6 +23,7 @@ struct ovl_cache_entry {
        u64 ino;
        struct list_head l_node;
        struct rb_node node;
+       struct ovl_cache_entry *next_maybe_whiteout;
        bool is_whiteout;
        char name[];
 };
@@ -39,7 +40,7 @@ struct ovl_readdir_data {
        struct rb_root root;
        struct list_head *list;
        struct list_head middle;
-       struct dentry *dir;
+       struct ovl_cache_entry *first_maybe_whiteout;
        int count;
        int err;
 };
@@ -79,7 +80,7 @@ static struct ovl_cache_entry *ovl_cache_entry_find(struct rb_root *root,
        return NULL;
 }
 
-static struct ovl_cache_entry *ovl_cache_entry_new(struct dentry *dir,
+static struct ovl_cache_entry *ovl_cache_entry_new(struct ovl_readdir_data *rdd,
                                                   const char *name, int len,
                                                   u64 ino, unsigned int d_type)
 {
@@ -98,29 +99,8 @@ static struct ovl_cache_entry *ovl_cache_entry_new(struct dentry *dir,
        p->is_whiteout = false;
 
        if (d_type == DT_CHR) {
-               struct dentry *dentry;
-               const struct cred *old_cred;
-               struct cred *override_cred;
-
-               override_cred = prepare_creds();
-               if (!override_cred) {
-                       kfree(p);
-                       return NULL;
-               }
-
-               /*
-                * CAP_DAC_OVERRIDE for lookup
-                */
-               cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
-               old_cred = override_creds(override_cred);
-
-               dentry = lookup_one_len(name, dir, len);
-               if (!IS_ERR(dentry)) {
-                       p->is_whiteout = ovl_is_whiteout(dentry);
-                       dput(dentry);
-               }
-               revert_creds(old_cred);
-               put_cred(override_cred);
+               p->next_maybe_whiteout = rdd->first_maybe_whiteout;
+               rdd->first_maybe_whiteout = p;
        }
        return p;
 }
@@ -148,7 +128,7 @@ static int ovl_cache_entry_add_rb(struct ovl_readdir_data *rdd,
                        return 0;
        }
 
-       p = ovl_cache_entry_new(rdd->dir, name, len, ino, d_type);
+       p = ovl_cache_entry_new(rdd, name, len, ino, d_type);
        if (p == NULL)
                return -ENOMEM;
 
@@ -169,7 +149,7 @@ static int ovl_fill_lower(struct ovl_readdir_data *rdd,
        if (p) {
                list_move_tail(&p->l_node, &rdd->middle);
        } else {
-               p = ovl_cache_entry_new(rdd->dir, name, namelen, ino, d_type);
+               p = ovl_cache_entry_new(rdd, name, namelen, ino, d_type);
                if (p == NULL)
                        rdd->err = -ENOMEM;
                else
@@ -219,6 +199,43 @@ static int ovl_fill_merge(struct dir_context *ctx, const char *name,
                return ovl_fill_lower(rdd, name, namelen, offset, ino, d_type);
 }
 
+static int ovl_check_whiteouts(struct dentry *dir, struct ovl_readdir_data *rdd)
+{
+       int err;
+       struct ovl_cache_entry *p;
+       struct dentry *dentry;
+       const struct cred *old_cred;
+       struct cred *override_cred;
+
+       override_cred = prepare_creds();
+       if (!override_cred)
+               return -ENOMEM;
+
+       /*
+        * CAP_DAC_OVERRIDE for lookup
+        */
+       cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
+       old_cred = override_creds(override_cred);
+
+       err = mutex_lock_killable(&dir->d_inode->i_mutex);
+       if (!err) {
+               while (rdd->first_maybe_whiteout) {
+                       p = rdd->first_maybe_whiteout;
+                       rdd->first_maybe_whiteout = p->next_maybe_whiteout;
+                       dentry = lookup_one_len(p->name, dir, p->len);
+                       if (!IS_ERR(dentry)) {
+                               p->is_whiteout = ovl_is_whiteout(dentry);
+                               dput(dentry);
+                       }
+               }
+               mutex_unlock(&dir->d_inode->i_mutex);
+       }
+       revert_creds(old_cred);
+       put_cred(override_cred);
+
+       return err;
+}
+
 static inline int ovl_dir_read(struct path *realpath,
                               struct ovl_readdir_data *rdd)
 {
@@ -229,7 +246,7 @@ static inline int ovl_dir_read(struct path *realpath,
        if (IS_ERR(realfile))
                return PTR_ERR(realfile);
 
-       rdd->dir = realpath->dentry;
+       rdd->first_maybe_whiteout = NULL;
        rdd->ctx.pos = 0;
        do {
                rdd->count = 0;
@@ -238,6 +255,10 @@ static inline int ovl_dir_read(struct path *realpath,
                if (err >= 0)
                        err = rdd->err;
        } while (!err && rdd->count);
+
+       if (!err && rdd->first_maybe_whiteout)
+               err = ovl_check_whiteouts(realpath->dentry, rdd);
+
        fput(realfile);
 
        return err;
index 7114ce6..0fcdbe7 100644 (file)
@@ -20,8 +20,6 @@
 #define SET_MNT_MARK(m) ((m)->mnt.mnt_flags |= MNT_MARKED)
 #define CLEAR_MNT_MARK(m) ((m)->mnt.mnt_flags &= ~MNT_MARKED)
 #define IS_MNT_LOCKED(m) ((m)->mnt.mnt_flags & MNT_LOCKED)
-#define IS_MNT_LOCKED_AND_LAZY(m) \
-       (((m)->mnt.mnt_flags & (MNT_LOCKED|MNT_SYNC_UMOUNT)) == MNT_LOCKED)
 
 #define CL_EXPIRE              0x01
 #define CL_SLAVE               0x02
index 7e412ad..270221f 100644 (file)
@@ -121,8 +121,9 @@ static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo,
                 * Other callers might not initialize the si_lsb field,
                 * so check explicitly for the right codes here.
                 */
-               if (kinfo->si_code == BUS_MCEERR_AR ||
-                   kinfo->si_code == BUS_MCEERR_AO)
+               if (kinfo->si_signo == SIGBUS &&
+                   (kinfo->si_code == BUS_MCEERR_AR ||
+                    kinfo->si_code == BUS_MCEERR_AO))
                        err |= __put_user((short) kinfo->si_addr_lsb,
                                          &uinfo->ssi_addr_lsb);
 #endif
index 20de88d..dd71403 100644 (file)
@@ -159,11 +159,10 @@ xfs_attr3_rmt_write_verify(
        struct xfs_buf  *bp)
 {
        struct xfs_mount *mp = bp->b_target->bt_mount;
-       struct xfs_buf_log_item *bip = bp->b_fspriv;
+       int             blksize = mp->m_attr_geo->blksize;
        char            *ptr;
        int             len;
        xfs_daddr_t     bno;
-       int             blksize = mp->m_attr_geo->blksize;
 
        /* no verification of non-crc buffers */
        if (!xfs_sb_version_hascrc(&mp->m_sb))
@@ -175,16 +174,22 @@ xfs_attr3_rmt_write_verify(
        ASSERT(len >= blksize);
 
        while (len > 0) {
+               struct xfs_attr3_rmt_hdr *rmt = (struct xfs_attr3_rmt_hdr *)ptr;
+
                if (!xfs_attr3_rmt_verify(mp, ptr, blksize, bno)) {
                        xfs_buf_ioerror(bp, -EFSCORRUPTED);
                        xfs_verifier_error(bp);
                        return;
                }
-               if (bip) {
-                       struct xfs_attr3_rmt_hdr *rmt;
 
-                       rmt = (struct xfs_attr3_rmt_hdr *)ptr;
-                       rmt->rm_lsn = cpu_to_be64(bip->bli_item.li_lsn);
+               /*
+                * Ensure we aren't writing bogus LSNs to disk. See
+                * xfs_attr3_rmt_hdr_set() for the explanation.
+                */
+               if (rmt->rm_lsn != cpu_to_be64(NULLCOMMITLSN)) {
+                       xfs_buf_ioerror(bp, -EFSCORRUPTED);
+                       xfs_verifier_error(bp);
+                       return;
                }
                xfs_update_cksum(ptr, blksize, XFS_ATTR3_RMT_CRC_OFF);
 
@@ -221,6 +226,18 @@ xfs_attr3_rmt_hdr_set(
        rmt->rm_owner = cpu_to_be64(ino);
        rmt->rm_blkno = cpu_to_be64(bno);
 
+       /*
+        * Remote attribute blocks are written synchronously, so we don't
+        * have an LSN that we can stamp in them that makes any sense to log
+        * recovery. To ensure that log recovery handles overwrites of these
+        * blocks sanely (i.e. once they've been freed and reallocated as some
+        * other type of metadata) we need to ensure that the LSN has a value
+        * that tells log recovery to ignore the LSN and overwrite the buffer
+        * with whatever is in it's log. To do this, we use the magic
+        * NULLCOMMITLSN to indicate that the LSN is invalid.
+        */
+       rmt->rm_lsn = cpu_to_be64(NULLCOMMITLSN);
+
        return sizeof(struct xfs_attr3_rmt_hdr);
 }
 
@@ -434,14 +451,21 @@ xfs_attr_rmtval_set(
 
                /*
                 * Allocate a single extent, up to the size of the value.
+                *
+                * Note that we have to consider this a data allocation as we
+                * write the remote attribute without logging the contents.
+                * Hence we must ensure that we aren't using blocks that are on
+                * the busy list so that we don't overwrite blocks which have
+                * recently been freed but their transactions are not yet
+                * committed to disk. If we overwrite the contents of a busy
+                * extent and then crash then the block may not contain the
+                * correct metadata after log recovery occurs.
                 */
                xfs_bmap_init(args->flist, args->firstblock);
                nmap = 1;
                error = xfs_bmapi_write(args->trans, dp, (xfs_fileoff_t)lblkno,
-                                 blkcnt,
-                                 XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA,
-                                 args->firstblock, args->total, &map, &nmap,
-                                 args->flist);
+                                 blkcnt, XFS_BMAPI_ATTRFORK, args->firstblock,
+                                 args->total, &map, &nmap, args->flist);
                if (!error) {
                        error = xfs_bmap_finish(&args->trans, args->flist,
                                                &committed);
index 3fbf167..73e75a8 100644 (file)
@@ -435,8 +435,14 @@ xfs_attr_inactive(
         */
        xfs_trans_ijoin(trans, dp, 0);
 
-       /* invalidate and truncate the attribute fork extents */
-       if (dp->i_d.di_aformat != XFS_DINODE_FMT_LOCAL) {
+       /*
+        * Invalidate and truncate the attribute fork extents. Make sure the
+        * fork actually has attributes as otherwise the invalidation has no
+        * blocks to read and returns an error. In this case, just do the fork
+        * removal below.
+        */
+       if (xfs_inode_hasattr(dp) &&
+           dp->i_d.di_aformat != XFS_DINODE_FMT_LOCAL) {
                error = xfs_attr3_root_inactive(&trans, dp);
                if (error)
                        goto out_cancel;
index 539a85f..fec4bfb 100644 (file)
@@ -164,7 +164,7 @@ xfs_ilock(
               (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
        ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
               (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
-       ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
+       ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
 
        if (lock_flags & XFS_IOLOCK_EXCL)
                mrupdate_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
@@ -212,7 +212,7 @@ xfs_ilock_nowait(
               (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
        ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
               (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
-       ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
+       ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
 
        if (lock_flags & XFS_IOLOCK_EXCL) {
                if (!mrtryupdate(&ip->i_iolock))
@@ -281,7 +281,7 @@ xfs_iunlock(
               (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
        ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
               (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
-       ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
+       ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
        ASSERT(lock_flags != 0);
 
        if (lock_flags & XFS_IOLOCK_EXCL)
@@ -364,30 +364,38 @@ int xfs_lock_delays;
 
 /*
  * Bump the subclass so xfs_lock_inodes() acquires each lock with a different
- * value. This shouldn't be called for page fault locking, but we also need to
- * ensure we don't overrun the number of lockdep subclasses for the iolock or
- * mmaplock as that is limited to 12 by the mmap lock lockdep annotations.
+ * value. This can be called for any type of inode lock combination, including
+ * parent locking. Care must be taken to ensure we don't overrun the subclass
+ * storage fields in the class mask we build.
  */
 static inline int
 xfs_lock_inumorder(int lock_mode, int subclass)
 {
+       int     class = 0;
+
+       ASSERT(!(lock_mode & (XFS_ILOCK_PARENT | XFS_ILOCK_RTBITMAP |
+                             XFS_ILOCK_RTSUM)));
+
        if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) {
-               ASSERT(subclass + XFS_LOCK_INUMORDER <
-                       (1 << (XFS_MMAPLOCK_SHIFT - XFS_IOLOCK_SHIFT)));
-               lock_mode |= (subclass + XFS_LOCK_INUMORDER) << XFS_IOLOCK_SHIFT;
+               ASSERT(subclass <= XFS_IOLOCK_MAX_SUBCLASS);
+               ASSERT(subclass + XFS_IOLOCK_PARENT_VAL <
+                                               MAX_LOCKDEP_SUBCLASSES);
+               class += subclass << XFS_IOLOCK_SHIFT;
+               if (lock_mode & XFS_IOLOCK_PARENT)
+                       class += XFS_IOLOCK_PARENT_VAL << XFS_IOLOCK_SHIFT;
        }
 
        if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) {
-               ASSERT(subclass + XFS_LOCK_INUMORDER <
-                       (1 << (XFS_ILOCK_SHIFT - XFS_MMAPLOCK_SHIFT)));
-               lock_mode |= (subclass + XFS_LOCK_INUMORDER) <<
-                                                       XFS_MMAPLOCK_SHIFT;
+               ASSERT(subclass <= XFS_MMAPLOCK_MAX_SUBCLASS);
+               class += subclass << XFS_MMAPLOCK_SHIFT;
        }
 
-       if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL))
-               lock_mode |= (subclass + XFS_LOCK_INUMORDER) << XFS_ILOCK_SHIFT;
+       if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) {
+               ASSERT(subclass <= XFS_ILOCK_MAX_SUBCLASS);
+               class += subclass << XFS_ILOCK_SHIFT;
+       }
 
-       return lock_mode;
+       return (lock_mode & ~XFS_LOCK_SUBCLASS_MASK) | class;
 }
 
 /*
@@ -399,6 +407,11 @@ xfs_lock_inumorder(int lock_mode, int subclass)
  * transaction (such as truncate). This can result in deadlock since the long
  * running trans might need to wait for the inode we just locked in order to
  * push the tail and free space in the log.
+ *
+ * xfs_lock_inodes() can only be used to lock one type of lock at a time -
+ * the iolock, the mmaplock or the ilock, but not more than one at a time. If we
+ * lock more than one at a time, lockdep will report false positives saying we
+ * have violated locking orders.
  */
 void
 xfs_lock_inodes(
@@ -409,8 +422,29 @@ xfs_lock_inodes(
        int             attempts = 0, i, j, try_lock;
        xfs_log_item_t  *lp;
 
-       /* currently supports between 2 and 5 inodes */
+       /*
+        * Currently supports between 2 and 5 inodes with exclusive locking.  We
+        * support an arbitrary depth of locking here, but absolute limits on
+        * inodes depend on the the type of locking and the limits placed by
+        * lockdep annotations in xfs_lock_inumorder.  These are all checked by
+        * the asserts.
+        */
        ASSERT(ips && inodes >= 2 && inodes <= 5);
+       ASSERT(lock_mode & (XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL |
+                           XFS_ILOCK_EXCL));
+       ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED | XFS_MMAPLOCK_SHARED |
+                             XFS_ILOCK_SHARED)));
+       ASSERT(!(lock_mode & XFS_IOLOCK_EXCL) ||
+               inodes <= XFS_IOLOCK_MAX_SUBCLASS + 1);
+       ASSERT(!(lock_mode & XFS_MMAPLOCK_EXCL) ||
+               inodes <= XFS_MMAPLOCK_MAX_SUBCLASS + 1);
+       ASSERT(!(lock_mode & XFS_ILOCK_EXCL) ||
+               inodes <= XFS_ILOCK_MAX_SUBCLASS + 1);
+
+       if (lock_mode & XFS_IOLOCK_EXCL) {
+               ASSERT(!(lock_mode & (XFS_MMAPLOCK_EXCL | XFS_ILOCK_EXCL)));
+       } else if (lock_mode & XFS_MMAPLOCK_EXCL)
+               ASSERT(!(lock_mode & XFS_ILOCK_EXCL));
 
        try_lock = 0;
        i = 0;
index 8f22d20..ee26a60 100644 (file)
@@ -284,9 +284,9 @@ static inline int xfs_isiflocked(struct xfs_inode *ip)
  * Flags for lockdep annotations.
  *
  * XFS_LOCK_PARENT - for directory operations that require locking a
- * parent directory inode and a child entry inode.  The parent gets locked
- * with this flag so it gets a lockdep subclass of 1 and the child entry
- * lock will have a lockdep subclass of 0.
+ * parent directory inode and a child entry inode. IOLOCK requires nesting,
+ * MMAPLOCK does not support this class, ILOCK requires a single subclass
+ * to differentiate parent from child.
  *
  * XFS_LOCK_RTBITMAP/XFS_LOCK_RTSUM - the realtime device bitmap and summary
  * inodes do not participate in the normal lock order, and thus have their
@@ -295,30 +295,63 @@ static inline int xfs_isiflocked(struct xfs_inode *ip)
  * XFS_LOCK_INUMORDER - for locking several inodes at the some time
  * with xfs_lock_inodes().  This flag is used as the starting subclass
  * and each subsequent lock acquired will increment the subclass by one.
- * So the first lock acquired will have a lockdep subclass of 4, the
- * second lock will have a lockdep subclass of 5, and so on. It is
- * the responsibility of the class builder to shift this to the correct
- * portion of the lock_mode lockdep mask.
+ * However, MAX_LOCKDEP_SUBCLASSES == 8, which means we are greatly
+ * limited to the subclasses we can represent via nesting. We need at least
+ * 5 inodes nest depth for the ILOCK through rename, and we also have to support
+ * XFS_ILOCK_PARENT, which gives 6 subclasses. Then we have XFS_ILOCK_RTBITMAP
+ * and XFS_ILOCK_RTSUM, which are another 2 unique subclasses, so that's all
+ * 8 subclasses supported by lockdep.
+ *
+ * This also means we have to number the sub-classes in the lowest bits of
+ * the mask we keep, and we have to ensure we never exceed 3 bits of lockdep
+ * mask and we can't use bit-masking to build the subclasses. What a mess.
+ *
+ * Bit layout:
+ *
+ * Bit         Lock Region
+ * 16-19       XFS_IOLOCK_SHIFT dependencies
+ * 20-23       XFS_MMAPLOCK_SHIFT dependencies
+ * 24-31       XFS_ILOCK_SHIFT dependencies
+ *
+ * IOLOCK values
+ *
+ * 0-3         subclass value
+ * 4-7         PARENT subclass values
+ *
+ * MMAPLOCK values
+ *
+ * 0-3         subclass value
+ * 4-7         unused
+ *
+ * ILOCK values
+ * 0-4         subclass values
+ * 5           PARENT subclass (not nestable)
+ * 6           RTBITMAP subclass (not nestable)
+ * 7           RTSUM subclass (not nestable)
+ *
  */
-#define XFS_LOCK_PARENT                1
-#define XFS_LOCK_RTBITMAP      2
-#define XFS_LOCK_RTSUM         3
-#define XFS_LOCK_INUMORDER     4
-
-#define XFS_IOLOCK_SHIFT       16
-#define        XFS_IOLOCK_PARENT       (XFS_LOCK_PARENT << XFS_IOLOCK_SHIFT)
-
-#define XFS_MMAPLOCK_SHIFT     20
-
-#define XFS_ILOCK_SHIFT                24
-#define        XFS_ILOCK_PARENT        (XFS_LOCK_PARENT << XFS_ILOCK_SHIFT)
-#define        XFS_ILOCK_RTBITMAP      (XFS_LOCK_RTBITMAP << XFS_ILOCK_SHIFT)
-#define        XFS_ILOCK_RTSUM         (XFS_LOCK_RTSUM << XFS_ILOCK_SHIFT)
-
-#define XFS_IOLOCK_DEP_MASK    0x000f0000
-#define XFS_MMAPLOCK_DEP_MASK  0x00f00000
-#define XFS_ILOCK_DEP_MASK     0xff000000
-#define XFS_LOCK_DEP_MASK      (XFS_IOLOCK_DEP_MASK | \
+#define XFS_IOLOCK_SHIFT               16
+#define XFS_IOLOCK_PARENT_VAL          4
+#define XFS_IOLOCK_MAX_SUBCLASS                (XFS_IOLOCK_PARENT_VAL - 1)
+#define XFS_IOLOCK_DEP_MASK            0x000f0000
+#define        XFS_IOLOCK_PARENT               (XFS_IOLOCK_PARENT_VAL << XFS_IOLOCK_SHIFT)
+
+#define XFS_MMAPLOCK_SHIFT             20
+#define XFS_MMAPLOCK_NUMORDER          0
+#define XFS_MMAPLOCK_MAX_SUBCLASS      3
+#define XFS_MMAPLOCK_DEP_MASK          0x00f00000
+
+#define XFS_ILOCK_SHIFT                        24
+#define XFS_ILOCK_PARENT_VAL           5
+#define XFS_ILOCK_MAX_SUBCLASS         (XFS_ILOCK_PARENT_VAL - 1)
+#define XFS_ILOCK_RTBITMAP_VAL         6
+#define XFS_ILOCK_RTSUM_VAL            7
+#define XFS_ILOCK_DEP_MASK             0xff000000
+#define        XFS_ILOCK_PARENT                (XFS_ILOCK_PARENT_VAL << XFS_ILOCK_SHIFT)
+#define        XFS_ILOCK_RTBITMAP              (XFS_ILOCK_RTBITMAP_VAL << XFS_ILOCK_SHIFT)
+#define        XFS_ILOCK_RTSUM                 (XFS_ILOCK_RTSUM_VAL << XFS_ILOCK_SHIFT)
+
+#define XFS_LOCK_SUBCLASS_MASK (XFS_IOLOCK_DEP_MASK | \
                                 XFS_MMAPLOCK_DEP_MASK | \
                                 XFS_ILOCK_DEP_MASK)
 
index 4f5784f..a5d0339 100644 (file)
@@ -1887,9 +1887,14 @@ xlog_recover_get_buf_lsn(
                uuid = &((struct xfs_dir3_blk_hdr *)blk)->uuid;
                break;
        case XFS_ATTR3_RMT_MAGIC:
-               lsn = be64_to_cpu(((struct xfs_attr3_rmt_hdr *)blk)->rm_lsn);
-               uuid = &((struct xfs_attr3_rmt_hdr *)blk)->rm_uuid;
-               break;
+               /*
+                * Remote attr blocks are written synchronously, rather than
+                * being logged. That means they do not contain a valid LSN
+                * (i.e. transactionally ordered) in them, and hence any time we
+                * see a buffer to replay over the top of a remote attribute
+                * block we should simply do so.
+                */
+               goto recover_immediately;
        case XFS_SB_MAGIC:
                lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn);
                uuid = &((struct xfs_dsb *)blk)->sb_uuid;
index 3df411e..40c0765 100644 (file)
@@ -104,7 +104,7 @@ xfs_readlink_bmap(
                        cur_chunk += sizeof(struct xfs_dsymlink_hdr);
                }
 
-               memcpy(link + offset, bp->b_addr, byte_cnt);
+               memcpy(link + offset, cur_chunk, byte_cnt);
 
                pathlen -= byte_cnt;
                offset += byte_cnt;
index 08ef57b..f5ed1f1 100644 (file)
@@ -195,9 +195,18 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_do_not_use_xsdt, FALSE);
  * address. Although ACPICA adheres to the ACPI specification which
  * requires the use of the corresponding 64-bit address if it is non-zero,
  * some machines have been found to have a corrupted non-zero 64-bit
- * address. Default is TRUE, favor the 32-bit addresses.
+ * address. Default is FALSE, do not favor the 32-bit addresses.
  */
-ACPI_INIT_GLOBAL(u8, acpi_gbl_use32_bit_fadt_addresses, TRUE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_use32_bit_fadt_addresses, FALSE);
+
+/*
+ * Optionally use 32-bit FACS table addresses.
+ * It is reported that some platforms fail to resume from system suspending
+ * if 64-bit FACS table address is selected:
+ * https://bugzilla.kernel.org/show_bug.cgi?id=74021
+ * Default is TRUE, favor the 32-bit addresses.
+ */
+ACPI_INIT_GLOBAL(u8, acpi_gbl_use32_bit_facs_addresses, TRUE);
 
 /*
  * Optionally truncate I/O addresses to 16 bits. Provides compatibility
index 1c3002e..181427e 100644 (file)
@@ -572,6 +572,7 @@ typedef u64 acpi_integer;
 #define ACPI_NO_ACPI_ENABLE             0x10
 #define ACPI_NO_DEVICE_INIT             0x20
 #define ACPI_NO_OBJECT_INIT             0x40
+#define ACPI_NO_FACS_INIT               0x80
 
 /*
  * Initialization state
index c157103..3f13b91 100644 (file)
@@ -77,26 +77,26 @@ int __must_check drm_atomic_async_commit(struct drm_atomic_state *state);
 
 #define for_each_connector_in_state(state, connector, connector_state, __i) \
        for ((__i) = 0;                                                 \
-            (connector) = (state)->connectors[__i],                    \
-            (connector_state) = (state)->connector_states[__i],        \
-            (__i) < (state)->num_connector;                            \
+            (__i) < (state)->num_connector &&                          \
+            ((connector) = (state)->connectors[__i],                   \
+            (connector_state) = (state)->connector_states[__i], 1);    \
             (__i)++)                                                   \
                if (connector)
 
 #define for_each_crtc_in_state(state, crtc, crtc_state, __i)   \
        for ((__i) = 0;                                         \
-            (crtc) = (state)->crtcs[__i],                      \
-            (crtc_state) = (state)->crtc_states[__i],          \
-            (__i) < (state)->dev->mode_config.num_crtc;        \
+            (__i) < (state)->dev->mode_config.num_crtc &&      \
+            ((crtc) = (state)->crtcs[__i],                     \
+            (crtc_state) = (state)->crtc_states[__i], 1);      \
             (__i)++)                                           \
                if (crtc_state)
 
-#define for_each_plane_in_state(state, plane, plane_state, __i)        \
-       for ((__i) = 0;                                         \
-            (plane) = (state)->planes[__i],                    \
-            (plane_state) = (state)->plane_states[__i],        \
-            (__i) < (state)->dev->mode_config.num_total_plane; \
-            (__i)++)                                           \
+#define for_each_plane_in_state(state, plane, plane_state, __i)                \
+       for ((__i) = 0;                                                 \
+            (__i) < (state)->dev->mode_config.num_total_plane &&       \
+            ((plane) = (state)->planes[__i],                           \
+            (plane_state) = (state)->plane_states[__i], 1);            \
+            (__i)++)                                                   \
                if (plane_state)
 
 #endif /* DRM_ATOMIC_H_ */
index ca71c03..5423358 100644 (file)
@@ -731,6 +731,8 @@ struct drm_connector {
        uint8_t num_h_tile, num_v_tile;
        uint8_t tile_h_loc, tile_v_loc;
        uint16_t tile_h_size, tile_v_size;
+
+       struct list_head destroy_list;
 };
 
 /**
index a250781..86d0b25 100644 (file)
@@ -463,6 +463,10 @@ struct drm_dp_mst_topology_mgr {
        struct work_struct work;
 
        struct work_struct tx_work;
+
+       struct list_head destroy_connector_list;
+       struct mutex destroy_connector_lock;
+       struct work_struct destroy_connector_work;
 };
 
 int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, struct device *dev, struct drm_dp_aux *aux, int max_dpcd_transaction_bytes, int max_payloads, int conn_base_id);
index 45c39a3..8bc073d 100644 (file)
        {0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6617, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6620, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
index 5da2d2e..4550be3 100644 (file)
@@ -332,9 +332,6 @@ int acpi_check_region(resource_size_t start, resource_size_t n,
 
 int acpi_resources_are_enforced(void);
 
-int acpi_reserve_region(u64 start, unsigned int length, u8 space_id,
-                       unsigned long flags, char *desc);
-
 #ifdef CONFIG_HIBERNATION
 void __init acpi_no_s4_hw_signature(void);
 #endif
@@ -530,13 +527,6 @@ static inline int acpi_check_region(resource_size_t start, resource_size_t n,
        return 0;
 }
 
-static inline int acpi_reserve_region(u64 start, unsigned int length,
-                                     u8 space_id, unsigned long flags,
-                                     char *desc)
-{
-       return -ENXIO;
-}
-
 struct acpi_table_header;
 static inline int acpi_table_parse(char *id,
                                int (*handler)(struct acpi_table_header *))
index b666b77..5dfbcd8 100644 (file)
@@ -45,6 +45,7 @@ enum {
        ATA_SECT_SIZE           = 512,
        ATA_MAX_SECTORS_128     = 128,
        ATA_MAX_SECTORS         = 256,
+       ATA_MAX_SECTORS_1024    = 1024,
        ATA_MAX_SECTORS_LBA48   = 65535,/* TODO: 65536? */
        ATA_MAX_SECTORS_TAPE    = 65535,
 
@@ -384,8 +385,6 @@ enum {
        SATA_SSP                = 0x06, /* Software Settings Preservation */
        SATA_DEVSLP             = 0x09, /* Device Sleep */
 
-       SETFEATURE_SENSE_DATA = 0xC3, /* Sense Data Reporting feature */
-
        /* feature values for SET_MAX */
        ATA_SET_MAX_ADDR        = 0x00,
        ATA_SET_MAX_PASSWD      = 0x01,
@@ -529,8 +528,6 @@ struct ata_bmdma_prd {
 #define ata_id_cdb_intr(id)    (((id)[ATA_ID_CONFIG] & 0x60) == 0x20)
 #define ata_id_has_da(id)      ((id)[ATA_ID_SATA_CAPABILITY_2] & (1 << 4))
 #define ata_id_has_devslp(id)  ((id)[ATA_ID_FEATURE_SUPP] & (1 << 8))
-#define ata_id_has_ncq_autosense(id) \
-                               ((id)[ATA_ID_FEATURE_SUPP] & (1 << 7))
 
 static inline bool ata_id_has_hipm(const u16 *id)
 {
@@ -709,20 +706,6 @@ static inline bool ata_id_has_read_log_dma_ext(const u16 *id)
        return id[ATA_ID_COMMAND_SET_3] & (1 << 3);
 }
 
-static inline bool ata_id_has_sense_reporting(const u16 *id)
-{
-       if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
-               return false;
-       return id[ATA_ID_COMMAND_SET_3] & (1 << 6);
-}
-
-static inline bool ata_id_sense_reporting_enabled(const u16 *id)
-{
-       if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
-               return false;
-       return id[ATA_ID_COMMAND_SET_4] & (1 << 6);
-}
-
 /**
  *     ata_id_major_version    -       get ATA level of drive
  *     @id: Identify data
index ea72f52..6d25afd 100644 (file)
@@ -361,6 +361,13 @@ sb_getblk(struct super_block *sb, sector_t block)
        return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
 }
 
+
+static inline struct buffer_head *
+sb_getblk_gfp(struct super_block *sb, sector_t block, gfp_t gfp)
+{
+       return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, gfp);
+}
+
 static inline struct buffer_head *
 sb_find_get_block(struct super_block *sb, sector_t block)
 {
index b6a52a4..51bb653 100644 (file)
 /**
  * struct can_skb_priv - private additional data inside CAN sk_buffs
  * @ifindex:   ifindex of the first interface the CAN frame appeared on
+ * @skbcnt:    atomic counter to have an unique id together with skb pointer
  * @cf:                align to the following CAN frame at skb->data
  */
 struct can_skb_priv {
        int ifindex;
+       int skbcnt;
        struct can_frame cf[0];
 };
 
index 0c9a2f2..d4c7113 100644 (file)
 /* Intel ECC compiler doesn't support gcc specific asm stmts.
  * It uses intrinsics to do the equivalent things.
  */
+#undef barrier
 #undef barrier_data
 #undef RELOC_HIDE
 #undef OPTIMIZER_HIDE_VAR
 
+#define barrier() __memory_barrier()
 #define barrier_data(ptr) barrier()
 
 #define RELOC_HIDE(ptr, off)                                   \
index 76abba4..dcacb1a 100644 (file)
@@ -340,7 +340,27 @@ struct cper_ia_proc_ctx {
        __u64   mm_reg_addr;
 };
 
-/* Memory Error Section */
+/* Old Memory Error Section UEFI 2.1, 2.2 */
+struct cper_sec_mem_err_old {
+       __u64   validation_bits;
+       __u64   error_status;
+       __u64   physical_addr;
+       __u64   physical_addr_mask;
+       __u16   node;
+       __u16   card;
+       __u16   module;
+       __u16   bank;
+       __u16   device;
+       __u16   row;
+       __u16   column;
+       __u16   bit_pos;
+       __u64   requestor_id;
+       __u64   responder_id;
+       __u64   target_id;
+       __u8    error_type;
+};
+
+/* Memory Error Section UEFI >= 2.3 */
 struct cper_sec_mem_err {
        __u64   validation_bits;
        __u64   error_status;
index 1da6029..6cd8c0e 100644 (file)
@@ -116,6 +116,7 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
  *            SAVE_REGS. If another ops with this flag set is already registered
  *            for any of the functions that this ops will be registered for, then
  *            this ops will fail to register or set_filter_ip.
+ * PID     - Is affected by set_ftrace_pid (allows filtering on those pids)
  */
 enum {
        FTRACE_OPS_FL_ENABLED                   = 1 << 0,
@@ -132,6 +133,7 @@ enum {
        FTRACE_OPS_FL_MODIFYING                 = 1 << 11,
        FTRACE_OPS_FL_ALLOC_TRAMP               = 1 << 12,
        FTRACE_OPS_FL_IPMODIFY                  = 1 << 13,
+       FTRACE_OPS_FL_PID                       = 1 << 14,
 };
 
 #ifdef CONFIG_DYNAMIC_FTRACE
@@ -159,6 +161,7 @@ struct ftrace_ops {
        struct ftrace_ops               *next;
        unsigned long                   flags;
        void                            *private;
+       ftrace_func_t                   saved_func;
        int __percpu                    *disabled;
 #ifdef CONFIG_DYNAMIC_FTRACE
        int                             nr_trampolines;
index 3a7c9ff..da04265 100644 (file)
@@ -406,6 +406,21 @@ static inline int desc_to_gpio(const struct gpio_desc *desc)
        return -EINVAL;
 }
 
+/* Child properties interface */
+struct fwnode_handle;
+
+static inline struct gpio_desc *fwnode_get_named_gpiod(
+       struct fwnode_handle *fwnode, const char *propname)
+{
+       return ERR_PTR(-ENOSYS);
+}
+
+static inline struct gpio_desc *devm_get_gpiod_from_child(
+       struct device *dev, const char *con_id, struct fwnode_handle *child)
+{
+       return ERR_PTR(-ENOSYS);
+}
+
 #endif /* CONFIG_GPIOLIB */
 
 /*
index 0042bf3..c02b5ce 100644 (file)
@@ -230,6 +230,7 @@ struct hid_sensor_common {
        struct platform_device *pdev;
        unsigned usage_id;
        atomic_t data_ready;
+       atomic_t user_requested_state;
        struct iio_trigger *trigger;
        struct hid_sensor_hub_attribute_info poll;
        struct hid_sensor_hub_attribute_info report_state;
index d89b5e0..06bae5a 100644 (file)
@@ -66,7 +66,7 @@ static inline void kunmap(struct page *page)
 
 static inline void *kmap_atomic(struct page *page)
 {
-       preempt_disable();
+       preempt_disable_nort();
        pagefault_disable();
        return page_address(page);
 }
@@ -75,7 +75,7 @@ static inline void *kmap_atomic(struct page *page)
 static inline void __kunmap_atomic(void *addr)
 {
        pagefault_enable();
-       preempt_enable();
+       preempt_enable_nort();
 }
 
 #define kmap_atomic_pfn(pfn)   kmap_atomic(pfn_to_page(pfn))
index 67f2af8..fe25455 100644 (file)
@@ -104,6 +104,7 @@ typedef irqreturn_t (*irq_handler_t)(int, void *);
  * @flags:     flags (see IRQF_* above)
  * @thread_fn: interrupt handler function for threaded interrupts
  * @thread:    thread pointer for threaded interrupts
+ * @secondary: pointer to secondary irqaction (force threading)
  * @thread_flags:      flags related to @thread
  * @thread_mask:       bitmask for keeping track of @thread activity
  * @dir:       pointer to the proc/irq/NN/name entry
@@ -115,6 +116,7 @@ struct irqaction {
        struct irqaction        *next;
        irq_handler_t           thread_fn;
        struct task_struct      *thread;
+       struct irqaction        *secondary;
        unsigned int            irq;
        unsigned int            flags;
        unsigned long           thread_flags;
index 7ec31b5..38ecc0c 100644 (file)
@@ -469,6 +469,7 @@ extern int irq_chip_set_affinity_parent(struct irq_data *data,
                                        const struct cpumask *dest,
                                        bool force);
 extern int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on);
+extern int irq_chip_set_type_parent(struct irq_data *data, unsigned int type);
 #endif
 
 /* Handling of unhandled and spurious interrupts: */
index 20e7f78..edb640a 100644 (file)
@@ -1035,7 +1035,7 @@ struct buffer_head *jbd2_journal_get_descriptor_buffer(journal_t *journal);
 int jbd2_journal_next_log_block(journal_t *, unsigned long long *);
 int jbd2_journal_get_log_tail(journal_t *journal, tid_t *tid,
                              unsigned long *block);
-void __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block);
+int __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block);
 void jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block);
 
 /* Commit management */
@@ -1157,7 +1157,7 @@ extern int           jbd2_journal_recover    (journal_t *journal);
 extern int        jbd2_journal_wipe       (journal_t *, int);
 extern int        jbd2_journal_skip_recovery   (journal_t *);
 extern void       jbd2_journal_update_sb_errno(journal_t *);
-extern void       jbd2_journal_update_sb_log_tail      (journal_t *, tid_t,
+extern int        jbd2_journal_update_sb_log_tail      (journal_t *, tid_t,
                                unsigned long, int);
 extern void       __jbd2_journal_abort_hard    (journal_t *);
 extern void       jbd2_journal_abort      (journal_t *, int);
index 28aeae4..e0e3378 100644 (file)
@@ -431,6 +431,9 @@ enum {
        ATA_HORKAGE_NOLPM       = (1 << 20),    /* don't use LPM */
        ATA_HORKAGE_WD_BROKEN_LPM = (1 << 21),  /* some WDs have broken LPM */
        ATA_HORKAGE_ZERO_AFTER_TRIM = (1 << 22),/* guarantees zero after trim */
+       ATA_HORKAGE_NO_NCQ_LOG  = (1 << 23),    /* don't use NCQ for log read */
+       ATA_HORKAGE_NOTRIM      = (1 << 24),    /* don't use TRIM */
+       ATA_HORKAGE_MAX_SEC_1024 = (1 << 25),   /* Limit max sects to 1024 */
 
         /* DMA mask for user DMA control: User visible values; DO NOT
            renumber */
index 3d4ea7e..12b75f3 100644 (file)
@@ -175,11 +175,6 @@ typedef enum {
 #define NAND_OWN_BUFFERS       0x00020000
 /* Chip may not exist, so silence any errors in scan */
 #define NAND_SCAN_SILENT_NODEV 0x00040000
-/*
- * This option could be defined by controller drivers to protect against
- * kmap'ed, vmalloc'ed highmem buffers being passed from upper layers
- */
-#define NAND_USE_BOUNCE_BUFFER 0x00080000
 /*
  * Autodetect nand buswidth with readid/onfi.
  * This suppose the driver will configure the hardware in 8 bits mode
@@ -187,6 +182,11 @@ typedef enum {
  * before calling nand_scan_tail.
  */
 #define NAND_BUSWIDTH_AUTO      0x00080000
+/*
+ * This option could be defined by controller drivers to protect against
+ * kmap'ed, vmalloc'ed highmem buffers being passed from upper layers
+ */
+#define NAND_USE_BOUNCE_BUFFER 0x00100000
 
 /* Options set by nand scan */
 /* Nand scan has allocated controller struct */
index 93ab607..e9e9a8d 100644 (file)
@@ -1142,7 +1142,7 @@ struct nfs41_state_protection {
        struct nfs4_op_map allow;
 };
 
-#define NFS4_EXCHANGE_ID_LEN   (48)
+#define NFS4_EXCHANGE_ID_LEN   (127)
 struct nfs41_exchange_id_args {
        struct nfs_client               *client;
        nfs4_verifier                   *verifier;
index b871ff9..8135d50 100644 (file)
@@ -673,7 +673,10 @@ static inline void of_property_clear_flag(struct property *p, unsigned long flag
 #if defined(CONFIG_OF) && defined(CONFIG_NUMA)
 extern int of_node_to_nid(struct device_node *np);
 #else
-static inline int of_node_to_nid(struct device_node *device) { return 0; }
+static inline int of_node_to_nid(struct device_node *device)
+{
+       return NUMA_NO_NODE;
+}
 #endif
 
 static inline struct device_node *of_find_matching_node(
index 5a4bb5b..1e1421b 100644 (file)
@@ -59,7 +59,6 @@ extern int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len,
                                   u64 * info_out);
 
 extern void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq);
-extern void scsi_set_sense_information(u8 *buf, u64 info);
 
 extern int scsi_ioctl_reset(struct scsi_device *, int __user *);
 
index 54e7af3..73abbc5 100644 (file)
@@ -606,6 +606,7 @@ struct iscsi_conn {
        int                     bitmap_id;
        int                     rx_thread_active;
        struct task_struct      *rx_thread;
+       struct completion       rx_login_comp;
        int                     tx_thread_active;
        struct task_struct      *tx_thread;
        /* list_head for session connection list */
index 551b673..a7e41fb 100644 (file)
@@ -1065,6 +1065,14 @@ struct drm_i915_reg_read {
        __u64 offset;
        __u64 val; /* Return value */
 };
+/* Known registers:
+ *
+ * Render engine timestamp - 0x2358 + 64bit - gen7+
+ * - Note this register returns an invalid value if using the default
+ *   single instruction 8byte read, in order to workaround that use
+ *   offset (0x2538 | 1) instead.
+ *
+ */
 
 struct drm_i915_reset_stats {
        __u32 ctx_id;
index efe3443..413417f 100644 (file)
 #define PCI_MSIX_PBA           8       /* Pending Bit Array offset */
 #define  PCI_MSIX_PBA_BIR      0x00000007 /* BAR index */
 #define  PCI_MSIX_PBA_OFFSET   0xfffffff8 /* Offset into specified BAR */
+#define PCI_MSIX_FLAGS_BIRMASK PCI_MSIX_PBA_BIR /* deprecated */
 #define PCI_CAP_MSIX_SIZEOF    12      /* size of MSIX registers */
 
 /* MSI-X Table entry format */
index a24ba9f..161a180 100644 (file)
@@ -142,7 +142,6 @@ static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info)
                if (!leaf)
                        return -ENOMEM;
                INIT_LIST_HEAD(&leaf->msg_list);
-               info->qsize += sizeof(*leaf);
        }
        leaf->priority = msg->m_type;
        rb_link_node(&leaf->rb_node, parent, p);
@@ -187,7 +186,6 @@ try_again:
                             "lazy leaf delete!\n");
                rb_erase(&leaf->rb_node, &info->msg_tree);
                if (info->node_cache) {
-                       info->qsize -= sizeof(*leaf);
                        kfree(leaf);
                } else {
                        info->node_cache = leaf;
@@ -200,7 +198,6 @@ try_again:
                if (list_empty(&leaf->msg_list)) {
                        rb_erase(&leaf->rb_node, &info->msg_tree);
                        if (info->node_cache) {
-                               info->qsize -= sizeof(*leaf);
                                kfree(leaf);
                        } else {
                                info->node_cache = leaf;
@@ -1034,7 +1031,6 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
                /* Save our speculative allocation into the cache */
                INIT_LIST_HEAD(&new_leaf->msg_list);
                info->node_cache = new_leaf;
-               info->qsize += sizeof(*new_leaf);
                new_leaf = NULL;
        } else {
                kfree(new_leaf);
@@ -1142,7 +1138,6 @@ SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
                /* Save our speculative allocation into the cache */
                INIT_LIST_HEAD(&new_leaf->msg_list);
                info->node_cache = new_leaf;
-               info->qsize += sizeof(*new_leaf);
        } else {
                kfree(new_leaf);
        }
index 8f99bac..d6261be 100644 (file)
@@ -252,6 +252,16 @@ static void sem_rcu_free(struct rcu_head *head)
        ipc_rcu_free(head);
 }
 
+/*
+ * spin_unlock_wait() and !spin_is_locked() are not memory barriers, they
+ * are only control barriers.
+ * The code must pair with spin_unlock(&sem->lock) or
+ * spin_unlock(&sem_perm.lock), thus just the control barrier is insufficient.
+ *
+ * smp_rmb() is sufficient, as writes cannot pass the control barrier.
+ */
+#define ipc_smp_acquire__after_spin_is_unlocked()      smp_rmb()
+
 /*
  * Wait until all currently ongoing simple ops have completed.
  * Caller must own sem_perm.lock.
@@ -275,6 +285,7 @@ static void sem_wait_array(struct sem_array *sma)
                sem = sma->sem_base + i;
                spin_unlock_wait(&sem->lock);
        }
+       ipc_smp_acquire__after_spin_is_unlocked();
 }
 
 /*
@@ -327,13 +338,12 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
                /* Then check that the global lock is free */
                if (!spin_is_locked(&sma->sem_perm.lock)) {
                        /*
-                        * The ipc object lock check must be visible on all
-                        * cores before rechecking the complex count.  Otherwise
-                        * we can race with  another thread that does:
+                        * We need a memory barrier with acquire semantics,
+                        * otherwise we can race with another thread that does:
                         *      complex_count++;
                         *      spin_unlock(sem_perm.lock);
                         */
-                       smp_rmb();
+                       ipc_smp_acquire__after_spin_is_unlocked();
 
                        /*
                         * Now repeat the test of complex_count:
@@ -2084,17 +2094,28 @@ void exit_sem(struct task_struct *tsk)
                rcu_read_lock();
                un = list_entry_rcu(ulp->list_proc.next,
                                    struct sem_undo, list_proc);
-               if (&un->list_proc == &ulp->list_proc)
-                       semid = -1;
-                else
-                       semid = un->semid;
+               if (&un->list_proc == &ulp->list_proc) {
+                       /*
+                        * We must wait for freeary() before freeing this ulp,
+                        * in case we raced with last sem_undo. There is a small
+                        * possibility where we exit while freeary() didn't
+                        * finish unlocking sem_undo_list.
+                        */
+                       spin_unlock_wait(&ulp->lock);
+                       rcu_read_unlock();
+                       break;
+               }
+               spin_lock(&ulp->lock);
+               semid = un->semid;
+               spin_unlock(&ulp->lock);
 
+               /* exit_sem raced with IPC_RMID, nothing to do */
                if (semid == -1) {
                        rcu_read_unlock();
-                       break;
+                       continue;
                }
 
-               sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid);
+               sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid);
                /* exit_sem raced with IPC_RMID, nothing to do */
                if (IS_ERR(sma)) {
                        rcu_read_unlock();
index ee14e3a..f0acff0 100644 (file)
@@ -1223,7 +1223,7 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
        spin_unlock_irq(&callback_lock);
 
        /* use trialcs->mems_allowed as a temp variable */
-       update_nodemasks_hier(cs, &cs->mems_allowed);
+       update_nodemasks_hier(cs, &trialcs->mems_allowed);
 done:
        return retval;
 }
index e05d1dd..5146610 100644 (file)
@@ -1886,8 +1886,6 @@ event_sched_in(struct perf_event *event,
 
        perf_pmu_disable(event->pmu);
 
-       event->tstamp_running += tstamp - event->tstamp_stopped;
-
        perf_set_shadow_time(event, ctx, tstamp);
 
        perf_log_itrace_start(event);
@@ -1899,6 +1897,8 @@ event_sched_in(struct perf_event *event,
                goto out;
        }
 
+       event->tstamp_running += tstamp - event->tstamp_stopped;
+
        if (!is_software_event(event))
                cpuctx->active_oncpu++;
        if (!ctx->nr_active++)
@@ -3976,28 +3976,21 @@ static void perf_event_for_each(struct perf_event *event,
                perf_event_for_each_child(sibling, func);
 }
 
-static int perf_event_period(struct perf_event *event, u64 __user *arg)
-{
-       struct perf_event_context *ctx = event->ctx;
-       int ret = 0, active;
+struct period_event {
+       struct perf_event *event;
        u64 value;
+};
 
-       if (!is_sampling_event(event))
-               return -EINVAL;
-
-       if (copy_from_user(&value, arg, sizeof(value)))
-               return -EFAULT;
-
-       if (!value)
-               return -EINVAL;
+static int __perf_event_period(void *info)
+{
+       struct period_event *pe = info;
+       struct perf_event *event = pe->event;
+       struct perf_event_context *ctx = event->ctx;
+       u64 value = pe->value;
+       bool active;
 
-       raw_spin_lock_irq(&ctx->lock);
+       raw_spin_lock(&ctx->lock);
        if (event->attr.freq) {
-               if (value > sysctl_perf_event_sample_rate) {
-                       ret = -EINVAL;
-                       goto unlock;
-               }
-
                event->attr.sample_freq = value;
        } else {
                event->attr.sample_period = value;
@@ -4016,11 +4009,53 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
                event->pmu->start(event, PERF_EF_RELOAD);
                perf_pmu_enable(ctx->pmu);
        }
+       raw_spin_unlock(&ctx->lock);
 
-unlock:
+       return 0;
+}
+
+static int perf_event_period(struct perf_event *event, u64 __user *arg)
+{
+       struct period_event pe = { .event = event, };
+       struct perf_event_context *ctx = event->ctx;
+       struct task_struct *task;
+       u64 value;
+
+       if (!is_sampling_event(event))
+               return -EINVAL;
+
+       if (copy_from_user(&value, arg, sizeof(value)))
+               return -EFAULT;
+
+       if (!value)
+               return -EINVAL;
+
+       if (event->attr.freq && value > sysctl_perf_event_sample_rate)
+               return -EINVAL;
+
+       task = ctx->task;
+       pe.value = value;
+
+       if (!task) {
+               cpu_function_call(event->cpu, __perf_event_period, &pe);
+               return 0;
+       }
+
+retry:
+       if (!task_function_call(task, __perf_event_period, &pe))
+               return 0;
+
+       raw_spin_lock_irq(&ctx->lock);
+       if (ctx->is_active) {
+               raw_spin_unlock_irq(&ctx->lock);
+               task = ctx->task;
+               goto retry;
+       }
+
+       __perf_event_period(&pe);
        raw_spin_unlock_irq(&ctx->lock);
 
-       return ret;
+       return 0;
 }
 
 static const struct file_operations perf_fops;
@@ -4766,12 +4801,20 @@ static const struct file_operations perf_fops = {
  * to user-space before waking everybody up.
  */
 
+static inline struct fasync_struct **perf_event_fasync(struct perf_event *event)
+{
+       /* only the parent has fasync state */
+       if (event->parent)
+               event = event->parent;
+       return &event->fasync;
+}
+
 void perf_event_wakeup(struct perf_event *event)
 {
        ring_buffer_wakeup(event);
 
        if (event->pending_kill) {
-               kill_fasync(&event->fasync, SIGIO, event->pending_kill);
+               kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill);
                event->pending_kill = 0;
        }
 }
@@ -6117,7 +6160,7 @@ static int __perf_event_overflow(struct perf_event *event,
        else
                perf_event_output(event, data, regs);
 
-       if (event->fasync && event->pending_kill) {
+       if (*perf_event_fasync(event) && event->pending_kill) {
                event->pending_wakeup = 1;
                irq_work_queue(&event->pending);
        }
index 725c416..a7604c8 100644 (file)
@@ -547,11 +547,13 @@ static void __rb_free_aux(struct ring_buffer *rb)
                rb->aux_priv = NULL;
        }
 
-       for (pg = 0; pg < rb->aux_nr_pages; pg++)
-               rb_free_aux_page(rb, pg);
+       if (rb->aux_nr_pages) {
+               for (pg = 0; pg < rb->aux_nr_pages; pg++)
+                       rb_free_aux_page(rb, pg);
 
-       kfree(rb->aux_pages);
-       rb->aux_nr_pages = 0;
+               kfree(rb->aux_pages);
+               rb->aux_nr_pages = 0;
+       }
 }
 
 void rb_free_aux(struct ring_buffer *rb)
index eb9a4ea..94bbd8f 100644 (file)
@@ -933,6 +933,23 @@ int irq_chip_set_affinity_parent(struct irq_data *data,
        return -ENOSYS;
 }
 
+/**
+ * irq_chip_set_type_parent - Set IRQ type on the parent interrupt
+ * @data:      Pointer to interrupt specific data
+ * @type:      IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
+ *
+ * Conditional, as the underlying parent chip might not implement it.
+ */
+int irq_chip_set_type_parent(struct irq_data *data, unsigned int type)
+{
+       data = data->parent_data;
+
+       if (data->chip->irq_set_type)
+               return data->chip->irq_set_type(data, type);
+
+       return -ENOSYS;
+}
+
 /**
  * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware
  * @data:      Pointer to interrupt specific data
@@ -946,7 +963,7 @@ int irq_chip_retrigger_hierarchy(struct irq_data *data)
                if (data->chip && data->chip->irq_retrigger)
                        return data->chip->irq_retrigger(data);
 
-       return -ENOSYS;
+       return 0;
 }
 
 /**
index 1b5c50a..79c55c2 100644 (file)
@@ -772,6 +772,12 @@ static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
        return IRQ_NONE;
 }
 
+static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
+{
+       WARN(1, "Secondary action handler called for irq %d\n", irq);
+       return IRQ_NONE;
+}
+
 static int irq_wait_for_interrupt(struct irqaction *action)
 {
        set_current_state(TASK_INTERRUPTIBLE);
@@ -798,7 +804,8 @@ static int irq_wait_for_interrupt(struct irqaction *action)
 static void irq_finalize_oneshot(struct irq_desc *desc,
                                 struct irqaction *action)
 {
-       if (!(desc->istate & IRQS_ONESHOT))
+       if (!(desc->istate & IRQS_ONESHOT) ||
+           action->handler == irq_forced_secondary_handler)
                return;
 again:
        chip_bus_lock(desc);
@@ -960,6 +967,18 @@ static void irq_thread_dtor(struct callback_head *unused)
        irq_finalize_oneshot(desc, action);
 }
 
+static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
+{
+       struct irqaction *secondary = action->secondary;
+
+       if (WARN_ON_ONCE(!secondary))
+               return;
+
+       raw_spin_lock_irq(&desc->lock);
+       __irq_wake_thread(desc, secondary);
+       raw_spin_unlock_irq(&desc->lock);
+}
+
 /*
  * Interrupt handler thread
  */
@@ -990,6 +1009,8 @@ static int irq_thread(void *data)
                action_ret = handler_fn(desc, action);
                if (action_ret == IRQ_HANDLED)
                        atomic_inc(&desc->threads_handled);
+               if (action_ret == IRQ_WAKE_THREAD)
+                       irq_wake_secondary(desc, action);
 
 #ifdef CONFIG_PREEMPT_RT_FULL
                migrate_disable();
@@ -1040,20 +1061,36 @@ void irq_wake_thread(unsigned int irq, void *dev_id)
 }
 EXPORT_SYMBOL_GPL(irq_wake_thread);
 
-static void irq_setup_forced_threading(struct irqaction *new)
+static int irq_setup_forced_threading(struct irqaction *new)
 {
        if (!force_irqthreads)
-               return;
+               return 0;
        if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
-               return;
+               return 0;
 
        new->flags |= IRQF_ONESHOT;
 
-       if (!new->thread_fn) {
-               set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
-               new->thread_fn = new->handler;
-               new->handler = irq_default_primary_handler;
+       /*
+        * Handle the case where we have a real primary handler and a
+        * thread handler. We force thread them as well by creating a
+        * secondary action.
+        */
+       if (new->handler != irq_default_primary_handler && new->thread_fn) {
+               /* Allocate the secondary action */
+               new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
+               if (!new->secondary)
+                       return -ENOMEM;
+               new->secondary->handler = irq_forced_secondary_handler;
+               new->secondary->thread_fn = new->thread_fn;
+               new->secondary->dev_id = new->dev_id;
+               new->secondary->irq = new->irq;
+               new->secondary->name = new->name;
        }
+       /* Deal with the primary handler */
+       set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
+       new->thread_fn = new->handler;
+       new->handler = irq_default_primary_handler;
+       return 0;
 }
 
 static int irq_request_resources(struct irq_desc *desc)
@@ -1073,6 +1110,48 @@ static void irq_release_resources(struct irq_desc *desc)
                c->irq_release_resources(d);
 }
 
+static int
+setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
+{
+       struct task_struct *t;
+       struct sched_param param = {
+               .sched_priority = MAX_USER_RT_PRIO/2,
+       };
+
+       if (!secondary) {
+               t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
+                                  new->name);
+       } else {
+               t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
+                                  new->name);
+               param.sched_priority += 1;
+       }
+
+       if (IS_ERR(t))
+               return PTR_ERR(t);
+
+       sched_setscheduler_nocheck(t, SCHED_FIFO, &param);
+
+       /*
+        * We keep the reference to the task struct even if
+        * the thread dies to avoid that the interrupt code
+        * references an already freed task_struct.
+        */
+       get_task_struct(t);
+       new->thread = t;
+       /*
+        * Tell the thread to set its affinity. This is
+        * important for shared interrupt handlers as we do
+        * not invoke setup_affinity() for the secondary
+        * handlers as everything is already set up. Even for
+        * interrupts marked with IRQF_NO_BALANCE this is
+        * correct as we want the thread to move to the cpu(s)
+        * on which the requesting code placed the interrupt.
+        */
+       set_bit(IRQTF_AFFINITY, &new->thread_flags);
+       return 0;
+}
+
 /*
  * Internal function to register an irqaction - typically used to
  * allocate special interrupts that are part of the architecture.
@@ -1093,6 +1172,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
        if (!try_module_get(desc->owner))
                return -ENODEV;
 
+       new->irq = irq;
+
        /*
         * Check whether the interrupt nests into another interrupt
         * thread.
@@ -1110,8 +1191,11 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
                 */
                new->handler = irq_nested_primary_handler;
        } else {
-               if (irq_settings_can_thread(desc))
-                       irq_setup_forced_threading(new);
+               if (irq_settings_can_thread(desc)) {
+                       ret = irq_setup_forced_threading(new);
+                       if (ret)
+                               goto out_mput;
+               }
        }
 
        /*
@@ -1120,37 +1204,14 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
         * thread.
         */
        if (new->thread_fn && !nested) {
-               struct task_struct *t;
-               static const struct sched_param param = {
-                       .sched_priority = MAX_USER_RT_PRIO/2,
-               };
-
-               t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
-                                  new->name);
-               if (IS_ERR(t)) {
-                       ret = PTR_ERR(t);
+               ret = setup_irq_thread(new, irq, false);
+               if (ret)
                        goto out_mput;
+               if (new->secondary) {
+                       ret = setup_irq_thread(new->secondary, irq, true);
+                       if (ret)
+                               goto out_thread;
                }
-
-               sched_setscheduler_nocheck(t, SCHED_FIFO, &param);
-
-               /*
-                * We keep the reference to the task struct even if
-                * the thread dies to avoid that the interrupt code
-                * references an already freed task_struct.
-                */
-               get_task_struct(t);
-               new->thread = t;
-               /*
-                * Tell the thread to set its affinity. This is
-                * important for shared interrupt handlers as we do
-                * not invoke setup_affinity() for the secondary
-                * handlers as everything is already set up. Even for
-                * interrupts marked with IRQF_NO_BALANCE this is
-                * correct as we want the thread to move to the cpu(s)
-                * on which the requesting code placed the interrupt.
-                */
-               set_bit(IRQTF_AFFINITY, &new->thread_flags);
        }
 
        if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
@@ -1326,7 +1387,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
                                   irq, nmsk, omsk);
        }
 
-       new->irq = irq;
        *old_ptr = new;
 
        irq_pm_install_action(desc, new);
@@ -1352,6 +1412,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
         */
        if (new->thread)
                wake_up_process(new->thread);
+       if (new->secondary)
+               wake_up_process(new->secondary->thread);
 
        register_irq_proc(irq, desc);
        new->dir = NULL;
@@ -1382,6 +1444,13 @@ out_thread:
                kthread_stop(t);
                put_task_struct(t);
        }
+       if (new->secondary && new->secondary->thread) {
+               struct task_struct *t = new->secondary->thread;
+
+               new->secondary->thread = NULL;
+               kthread_stop(t);
+               put_task_struct(t);
+       }
 out_mput:
        module_put(desc->owner);
        return ret;
@@ -1489,9 +1558,14 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
        if (action->thread) {
                kthread_stop(action->thread);
                put_task_struct(action->thread);
+               if (action->secondary && action->secondary->thread) {
+                       kthread_stop(action->secondary->thread);
+                       put_task_struct(action->secondary->thread);
+               }
        }
 
        module_put(desc->owner);
+       kfree(action->secondary);
        return action;
 }
 
@@ -1635,8 +1709,10 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
        retval = __setup_irq(irq, desc, action);
        chip_bus_sync_unlock(desc);
 
-       if (retval)
+       if (retval) {
+               kfree(action->secondary);
                kfree(action);
+       }
 
 #ifdef CONFIG_DEBUG_SHIRQ_FIXME
        if (!retval && (irqflags & IRQF_SHARED)) {
index 9065107..7a5237a 100644 (file)
@@ -75,13 +75,21 @@ void check_irq_resend(struct irq_desc *desc, unsigned int irq)
                    !desc->irq_data.chip->irq_retrigger(&desc->irq_data)) {
 #ifdef CONFIG_HARDIRQS_SW_RESEND
                        /*
-                        * If the interrupt has a parent irq and runs
-                        * in the thread context of the parent irq,
-                        * retrigger the parent.
+                        * If the interrupt is running in the thread
+                        * context of the parent irq we need to be
+                        * careful, because we cannot trigger it
+                        * directly.
                         */
-                       if (desc->parent_irq &&
-                           irq_settings_is_nested_thread(desc))
+                       if (irq_settings_is_nested_thread(desc)) {
+                               /*
+                                * If the parent_irq is valid, we
+                                * retrigger the parent, otherwise we
+                                * do nothing.
+                                */
+                               if (!desc->parent_irq)
+                                       return;
                                irq = desc->parent_irq;
+                       }
                        /* Set it pending and activate the softirq: */
                        set_bit(irq, irqs_resend);
                        tasklet_schedule(&resend_tasklet);
index 7e01f78..9e30231 100644 (file)
@@ -187,7 +187,7 @@ config DPM_WATCHDOG
 config DPM_WATCHDOG_TIMEOUT
        int "Watchdog timeout in seconds"
        range 1 120
-       default 12
+       default 60
        depends on DPM_WATCHDOG
 
 config PM_TRACE
index 6062a04..c2d5877 100644 (file)
@@ -484,11 +484,11 @@ int check_syslog_permissions(int type, bool from_file)
         * already done the capabilities checks at open time.
         */
        if (from_file && type != SYSLOG_ACTION_OPEN)
-               return 0;
+               goto ok;
 
        if (syslog_action_restricted(type)) {
                if (capable(CAP_SYSLOG))
-                       return 0;
+                       goto ok;
                /*
                 * For historical reasons, accept CAP_SYS_ADMIN too, with
                 * a warning.
@@ -498,10 +498,11 @@ int check_syslog_permissions(int type, bool from_file)
                                     "CAP_SYS_ADMIN but no CAP_SYSLOG "
                                     "(deprecated).\n",
                                 current->comm, task_pid_nr(current));
-                       return 0;
+                       goto ok;
                }
                return -EPERM;
        }
+ok:
        return security_syslog(type);
 }
 
@@ -1288,10 +1289,6 @@ int do_syslog(int type, char __user *buf, int len, bool from_file)
        if (error)
                goto out;
 
-       error = security_syslog(type);
-       if (error)
-               return error;
-
        switch (type) {
        case SYSLOG_ACTION_CLOSE:       /* Close log */
                break;
index 7dc2ebe..1336e4c 100644 (file)
@@ -2854,12 +2854,15 @@ int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
                 * Other callers might not initialize the si_lsb field,
                 * so check explicitly for the right codes here.
                 */
-               if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
+               if (from->si_signo == SIGBUS &&
+                   (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
                        err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
 #endif
 #ifdef SEGV_BNDERR
-               err |= __put_user(from->si_lower, &to->si_lower);
-               err |= __put_user(from->si_upper, &to->si_upper);
+               if (from->si_signo == SIGSEGV && from->si_code == SEGV_BNDERR) {
+                       err |= __put_user(from->si_lower, &to->si_lower);
+                       err |= __put_user(from->si_upper, &to->si_upper);
+               }
 #endif
                break;
        case __SI_CHLD:
@@ -3123,7 +3126,7 @@ COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
                        int, sig,
                        struct compat_siginfo __user *, uinfo)
 {
-       siginfo_t info;
+       siginfo_t info = {};
        int ret = copy_siginfo_from_user32(&info, uinfo);
        if (unlikely(ret))
                return ret;
@@ -3167,7 +3170,7 @@ COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
                        int, sig,
                        struct compat_siginfo __user *, uinfo)
 {
-       siginfo_t info;
+       siginfo_t info = {};
 
        if (copy_siginfo_from_user32(&info, uinfo))
                return -EFAULT;
index 02bece4..eb11011 100644 (file)
@@ -98,6 +98,13 @@ struct ftrace_pid {
        struct pid *pid;
 };
 
+static bool ftrace_pids_enabled(void)
+{
+       return !list_empty(&ftrace_pids);
+}
+
+static void ftrace_update_trampoline(struct ftrace_ops *ops);
+
 /*
  * ftrace_disabled is set when an anomaly is discovered.
  * ftrace_disabled is much stronger than ftrace_enabled.
@@ -109,7 +116,6 @@ static DEFINE_MUTEX(ftrace_lock);
 static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
 static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
-ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
 static struct ftrace_ops global_ops;
 static struct ftrace_ops control_ops;
 
@@ -183,14 +189,7 @@ static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
        if (!test_tsk_trace_trace(current))
                return;
 
-       ftrace_pid_function(ip, parent_ip, op, regs);
-}
-
-static void set_ftrace_pid_function(ftrace_func_t func)
-{
-       /* do not set ftrace_pid_function to itself! */
-       if (func != ftrace_pid_func)
-               ftrace_pid_function = func;
+       op->saved_func(ip, parent_ip, op, regs);
 }
 
 /**
@@ -202,7 +201,6 @@ static void set_ftrace_pid_function(ftrace_func_t func)
 void clear_ftrace_function(void)
 {
        ftrace_trace_function = ftrace_stub;
-       ftrace_pid_function = ftrace_stub;
 }
 
 static void control_ops_disable_all(struct ftrace_ops *ops)
@@ -436,6 +434,12 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
        } else
                add_ftrace_ops(&ftrace_ops_list, ops);
 
+       /* Always save the function, and reset at unregistering */
+       ops->saved_func = ops->func;
+
+       if (ops->flags & FTRACE_OPS_FL_PID && ftrace_pids_enabled())
+               ops->func = ftrace_pid_func;
+
        ftrace_update_trampoline(ops);
 
        if (ftrace_enabled)
@@ -463,15 +467,28 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
        if (ftrace_enabled)
                update_ftrace_function();
 
+       ops->func = ops->saved_func;
+
        return 0;
 }
 
 static void ftrace_update_pid_func(void)
 {
+       bool enabled = ftrace_pids_enabled();
+       struct ftrace_ops *op;
+
        /* Only do something if we are tracing something */
        if (ftrace_trace_function == ftrace_stub)
                return;
 
+       do_for_each_ftrace_op(op, ftrace_ops_list) {
+               if (op->flags & FTRACE_OPS_FL_PID) {
+                       op->func = enabled ? ftrace_pid_func :
+                               op->saved_func;
+                       ftrace_update_trampoline(op);
+               }
+       } while_for_each_ftrace_op(op);
+
        update_ftrace_function();
 }
 
@@ -1133,7 +1150,8 @@ static struct ftrace_ops global_ops = {
        .local_hash.filter_hash         = EMPTY_HASH,
        INIT_OPS_HASH(global_ops)
        .flags                          = FTRACE_OPS_FL_RECURSION_SAFE |
-                                         FTRACE_OPS_FL_INITIALIZED,
+                                         FTRACE_OPS_FL_INITIALIZED |
+                                         FTRACE_OPS_FL_PID,
 };
 
 /*
@@ -5023,7 +5041,9 @@ static void ftrace_update_trampoline(struct ftrace_ops *ops)
 
 static struct ftrace_ops global_ops = {
        .func                   = ftrace_stub,
-       .flags                  = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
+       .flags                  = FTRACE_OPS_FL_RECURSION_SAFE |
+                                 FTRACE_OPS_FL_INITIALIZED |
+                                 FTRACE_OPS_FL_PID,
 };
 
 static int __init ftrace_nodyn_init(void)
@@ -5080,11 +5100,6 @@ void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
                if (WARN_ON(tr->ops->func != ftrace_stub))
                        printk("ftrace ops had %pS for function\n",
                               tr->ops->func);
-               /* Only the top level instance does pid tracing */
-               if (!list_empty(&ftrace_pids)) {
-                       set_ftrace_pid_function(func);
-                       func = ftrace_pid_func;
-               }
        }
        tr->ops->func = func;
        tr->ops->private = tr;
@@ -5371,7 +5386,7 @@ static void *fpid_start(struct seq_file *m, loff_t *pos)
 {
        mutex_lock(&ftrace_lock);
 
-       if (list_empty(&ftrace_pids) && (!*pos))
+       if (!ftrace_pids_enabled() && (!*pos))
                return (void *) 1;
 
        return seq_list_start(&ftrace_pids, *pos);
@@ -5610,6 +5625,7 @@ static struct ftrace_ops graph_ops = {
        .func                   = ftrace_stub,
        .flags                  = FTRACE_OPS_FL_RECURSION_SAFE |
                                   FTRACE_OPS_FL_INITIALIZED |
+                                  FTRACE_OPS_FL_PID |
                                   FTRACE_OPS_FL_STUB,
 #ifdef FTRACE_GRAPH_TRAMP_ADDR
        .trampoline             = FTRACE_GRAPH_TRAMP_ADDR,
index a6784ca..c0f3c56 100644 (file)
@@ -446,6 +446,7 @@ enum {
 
        TRACE_CONTROL_BIT,
 
+       TRACE_BRANCH_BIT,
 /*
  * Abuse of the trace_recursion.
  * As we need a way to maintain state if we are tracing the function
@@ -1314,7 +1315,7 @@ void trace_event_init(void);
 void trace_event_enum_update(struct trace_enum_map **map, int len);
 #else
 static inline void __init trace_event_init(void) { }
-static inlin void trace_event_enum_update(struct trace_enum_map **map, int len) { }
+static inline void trace_event_enum_update(struct trace_enum_map **map, int len) { }
 #endif
 
 extern struct trace_iterator *tracepoint_print_iter;
index 57cbf1e..1879980 100644 (file)
@@ -36,9 +36,12 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
        struct trace_branch *entry;
        struct ring_buffer *buffer;
        unsigned long flags;
-       int cpu, pc;
+       int pc;
        const char *p;
 
+       if (current->trace_recursion & TRACE_BRANCH_BIT)
+               return;
+
        /*
         * I would love to save just the ftrace_likely_data pointer, but
         * this code can also be used by modules. Ugly things can happen
@@ -49,10 +52,10 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
        if (unlikely(!tr))
                return;
 
-       local_irq_save(flags);
-       cpu = raw_smp_processor_id();
-       data = per_cpu_ptr(tr->trace_buffer.data, cpu);
-       if (atomic_inc_return(&data->disabled) != 1)
+       raw_local_irq_save(flags);
+       current->trace_recursion |= TRACE_BRANCH_BIT;
+       data = this_cpu_ptr(tr->trace_buffer.data);
+       if (atomic_read(&data->disabled))
                goto out;
 
        pc = preempt_count();
@@ -81,8 +84,8 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
                __buffer_unlock_commit(buffer, event);
 
  out:
-       atomic_dec(&data->disabled);
-       local_irq_restore(flags);
+       current->trace_recursion &= ~TRACE_BRANCH_BIT;
+       raw_local_irq_restore(flags);
 }
 
 static inline
index 7f2e97c..52adf02 100644 (file)
@@ -1056,6 +1056,9 @@ static void parse_init(struct filter_parse_state *ps,
 
 static char infix_next(struct filter_parse_state *ps)
 {
+       if (!ps->infix.cnt)
+               return 0;
+
        ps->infix.cnt--;
 
        return ps->infix.string[ps->infix.tail++];
@@ -1071,6 +1074,9 @@ static char infix_peek(struct filter_parse_state *ps)
 
 static void infix_advance(struct filter_parse_state *ps)
 {
+       if (!ps->infix.cnt)
+               return;
+
        ps->infix.cnt--;
        ps->infix.tail++;
 }
@@ -1385,7 +1391,9 @@ static int check_preds(struct filter_parse_state *ps)
                if (elt->op != OP_NOT)
                        cnt--;
                n_normal_preds++;
-               WARN_ON_ONCE(cnt < 0);
+               /* all ops should have operands */
+               if (cnt < 0)
+                       break;
        }
 
        if (cnt != 1 || !n_normal_preds || n_logical_preds >= n_normal_preds) {
index aaade2e..d0e1d0e 100644 (file)
@@ -450,7 +450,7 @@ EXPORT_SYMBOL_GPL(stop_critical_timings);
 #ifdef CONFIG_PROVE_LOCKING
 void time_hardirqs_on(unsigned long a0, unsigned long a1)
 {
-       trace_preemptirqsoff_hist(IRQS_ON, 0);
+       trace_preemptirqsoff_hist_rcuidle(IRQS_ON, 0);
        if (!preempt_trace() && irq_trace())
                stop_critical_timing(a0, a1);
 }
@@ -459,7 +459,7 @@ void time_hardirqs_off(unsigned long a0, unsigned long a1)
 {
        if (!preempt_trace() && irq_trace())
                start_critical_timing(a0, a1);
-       trace_preemptirqsoff_hist(IRQS_OFF, 1);
+       trace_preemptirqsoff_hist_rcuidle(IRQS_OFF, 1);
 }
 
 #else /* !CONFIG_PROVE_LOCKING */
index 64c0926..40162f8 100644 (file)
@@ -506,12 +506,12 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
        unsigned a, b;
        int c, old_c, totaldigits;
        const char __user __force *ubuf = (const char __user __force *)buf;
-       int exp_digit, in_range;
+       int at_start, in_range;
 
        totaldigits = c = 0;
        bitmap_zero(maskp, nmaskbits);
        do {
-               exp_digit = 1;
+               at_start = 1;
                in_range = 0;
                a = b = 0;
 
@@ -540,11 +540,10 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
                                break;
 
                        if (c == '-') {
-                               if (exp_digit || in_range)
+                               if (at_start || in_range)
                                        return -EINVAL;
                                b = 0;
                                in_range = 1;
-                               exp_digit = 1;
                                continue;
                        }
 
@@ -554,16 +553,18 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
                        b = b * 10 + (c - '0');
                        if (!in_range)
                                a = b;
-                       exp_digit = 0;
+                       at_start = 0;
                        totaldigits++;
                }
                if (!(a <= b))
                        return -EINVAL;
                if (b >= nmaskbits)
                        return -ERANGE;
-               while (a <= b) {
-                       set_bit(a, maskp);
-                       a++;
+               if (!at_start) {
+                       while (a <= b) {
+                               set_bit(a, maskp);
+                               a++;
+                       }
                }
        } while (buflen && c == ',');
        return 0;
index ae4b65e..dace71f 100644 (file)
@@ -574,6 +574,9 @@ void debug_dma_assert_idle(struct page *page)
        unsigned long flags;
        phys_addr_t cln;
 
+       if (dma_debug_disabled())
+               return;
+
        if (!page)
                return;
 
index 6745c62..7ccbc6f 100644 (file)
@@ -33,7 +33,7 @@ asmlinkage __visible void dump_stack(void)
         * Permit this cpu to perform nested stack dumps while serialising
         * against other CPUs
         */
-       preempt_disable();
+       migrate_disable();
 
 retry:
        cpu = smp_processor_id();
@@ -52,7 +52,7 @@ retry:
        if (!was_locked)
                atomic_set(&dump_lock, -1);
 
-       preempt_enable();
+       migrate_enable();
 }
 #else
 asmlinkage __visible void dump_stack(void)
index 271e443..8c4c1f9 100644 (file)
@@ -40,6 +40,11 @@ int hugepages_treat_as_movable;
 int hugetlb_max_hstate __read_mostly;
 unsigned int default_hstate_idx;
 struct hstate hstates[HUGE_MAX_HSTATE];
+/*
+ * Minimum page order among possible hugepage sizes, set to a proper value
+ * at boot time.
+ */
+static unsigned int minimum_order __read_mostly = UINT_MAX;
 
 __initdata LIST_HEAD(huge_boot_pages);
 
@@ -1188,19 +1193,13 @@ static void dissolve_free_huge_page(struct page *page)
  */
 void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
 {
-       unsigned int order = 8 * sizeof(void *);
        unsigned long pfn;
-       struct hstate *h;
 
        if (!hugepages_supported())
                return;
 
-       /* Set scan step to minimum hugepage size */
-       for_each_hstate(h)
-               if (order > huge_page_order(h))
-                       order = huge_page_order(h);
-       VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << order));
-       for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order)
+       VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << minimum_order));
+       for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order)
                dissolve_free_huge_page(pfn_to_page(pfn));
 }
 
@@ -1627,10 +1626,14 @@ static void __init hugetlb_init_hstates(void)
        struct hstate *h;
 
        for_each_hstate(h) {
+               if (minimum_order > huge_page_order(h))
+                       minimum_order = huge_page_order(h);
+
                /* oversize hugepages were init'ed in early boot */
                if (!hstate_is_gigantic(h))
                        hugetlb_hstate_alloc_pages(h);
        }
+       VM_BUG_ON(minimum_order == UINT_MAX);
 }
 
 static char * __init memfmt(char *buf, unsigned long n)
index 501820c..9f48145 100644 (file)
@@ -1558,6 +1558,8 @@ static int get_any_page(struct page *page, unsigned long pfn, int flags)
                 */
                ret = __get_any_page(page, pfn, 0);
                if (!PageLRU(page)) {
+                       /* Drop page reference which is from __get_any_page() */
+                       put_page(page);
                        pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n",
                                pfn, page->flags);
                        return -EIO;
@@ -1587,13 +1589,12 @@ static int soft_offline_huge_page(struct page *page, int flags)
        unlock_page(hpage);
 
        ret = isolate_huge_page(hpage, &pagelist);
-       if (ret) {
-               /*
-                * get_any_page() and isolate_huge_page() takes a refcount each,
-                * so need to drop one here.
-                */
-               put_page(hpage);
-       } else {
+       /*
+        * get_any_page() and isolate_huge_page() takes a refcount each,
+        * so need to drop one here.
+        */
+       put_page(hpage);
+       if (!ret) {
                pr_info("soft offline: %#lx hugepage failed to isolate\n", pfn);
                return -EBUSY;
        }
index 17734c3..3fc6efd 100644 (file)
@@ -2669,6 +2669,10 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
 
        pte_unmap(page_table);
 
+       /* File mapping without ->vm_ops ? */
+       if (vma->vm_flags & VM_SHARED)
+               return VM_FAULT_SIGBUS;
+
        /* Check if we need to add a guard page to the stack */
        if (check_stack_guard_page(vma, address) < 0)
                return VM_FAULT_SIGSEGV;
@@ -3097,6 +3101,9 @@ static int do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                        - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
 
        pte_unmap(page_table);
+       /* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */
+       if (!vma->vm_ops->fault)
+               return VM_FAULT_SIGBUS;
        if (!(flags & FAULT_FLAG_WRITE))
                return do_read_fault(mm, vma, address, pmd, pgoff, flags,
                                orig_pte);
@@ -3242,13 +3249,12 @@ static int handle_pte_fault(struct mm_struct *mm,
        barrier();
        if (!pte_present(entry)) {
                if (pte_none(entry)) {
-                       if (vma->vm_ops) {
-                               if (likely(vma->vm_ops->fault))
-                                       return do_fault(mm, vma, address, pte,
-                                                       pmd, flags, entry);
-                       }
-                       return do_anonymous_page(mm, vma, address,
-                                                pte, pmd, flags);
+                       if (vma->vm_ops)
+                               return do_fault(mm, vma, address, pte, pmd,
+                                               flags, entry);
+
+                       return do_anonymous_page(mm, vma, address, pte, pmd,
+                                       flags);
                }
                return do_swap_page(mm, vma, address,
                                        pte, pmd, flags, entry);
index 5e8eadd..0d024fc 100644 (file)
@@ -937,21 +937,17 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                 *
                 * 2) Global reclaim encounters a page, memcg encounters a
                 *    page that is not marked for immediate reclaim or
-                *    the caller does not have __GFP_IO. In this case mark
+                *    the caller does not have __GFP_FS (or __GFP_IO if it's
+                *    simply going to swap, not to fs). In this case mark
                 *    the page for immediate reclaim and continue scanning.
                 *
-                *    __GFP_IO is checked  because a loop driver thread might
+                *    Require may_enter_fs because we would wait on fs, which
+                *    may not have submitted IO yet. And the loop driver might
                 *    enter reclaim, and deadlock if it waits on a page for
                 *    which it is needed to do the write (loop masks off
                 *    __GFP_IO|__GFP_FS for this reason); but more thought
                 *    would probably show more reasons.
                 *
-                *    Don't require __GFP_FS, since we're not going into the
-                *    FS, just waiting on its writeback completion. Worryingly,
-                *    ext4 gfs2 and xfs allocate pages with
-                *    grab_cache_page_write_begin(,,AOP_FLAG_NOFS), so testing
-                *    may_enter_fs here is liable to OOM on them.
-                *
                 * 3) memcg encounters a page that is not already marked
                 *    PageReclaim. memcg does not have any dirty pages
                 *    throttling so we could easily OOM just because too many
@@ -968,7 +964,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
 
                        /* Case 2 above */
                        } else if (global_reclaim(sc) ||
-                           !PageReclaim(page) || !(sc->gfp_mask & __GFP_IO)) {
+                           !PageReclaim(page) || !may_enter_fs) {
                                /*
                                 * This is slightly racy - end_page_writeback()
                                 * might have just cleared PageReclaim, then
index 6f4c4c8..fcf6fe0 100644 (file)
@@ -843,7 +843,8 @@ static struct p9_req_t *p9_client_zc_rpc(struct p9_client *c, int8_t type,
        if (err < 0) {
                if (err == -EIO)
                        c->status = Disconnected;
-               goto reterr;
+               if (err != -ERESTARTSYS)
+                       goto reterr;
        }
        if (req->status == REQ_STATUS_ERROR) {
                p9_debug(P9_DEBUG_ERROR, "req_status error %d\n", req->t_err);
@@ -1540,6 +1541,7 @@ p9_client_read(struct p9_fid *fid, u64 offset, struct iov_iter *to, int *err)
        struct p9_client *clnt = fid->clnt;
        struct p9_req_t *req;
        int total = 0;
+       *err = 0;
 
        p9_debug(P9_DEBUG_9P, ">>> TREAD fid %d offset %llu %d\n",
                   fid->fid, (unsigned long long) offset, (int)iov_iter_count(to));
@@ -1615,6 +1617,7 @@ p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err)
        struct p9_client *clnt = fid->clnt;
        struct p9_req_t *req;
        int total = 0;
+       *err = 0;
 
        p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %zd\n",
                                fid->fid, (unsigned long long) offset,
@@ -1647,6 +1650,7 @@ p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err)
                if (*err) {
                        trace_9p_protocol_dump(clnt, req->rc);
                        p9_free_req(clnt, req);
+                       break;
                }
 
                p9_debug(P9_DEBUG_9P, "<<< RWRITE count %d\n", count);
index 56f9edb..e11a5cf 100644 (file)
@@ -741,10 +741,11 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
                        goto done;
                }
 
-               if (test_bit(HCI_UP, &hdev->flags) ||
-                   test_bit(HCI_INIT, &hdev->flags) ||
+               if (test_bit(HCI_INIT, &hdev->flags) ||
                    hci_dev_test_flag(hdev, HCI_SETUP) ||
-                   hci_dev_test_flag(hdev, HCI_CONFIG)) {
+                   hci_dev_test_flag(hdev, HCI_CONFIG) ||
+                   (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
+                    test_bit(HCI_UP, &hdev->flags))) {
                        err = -EBUSY;
                        hci_dev_put(hdev);
                        goto done;
@@ -760,10 +761,21 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
 
                err = hci_dev_open(hdev->id);
                if (err) {
-                       hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
-                       mgmt_index_added(hdev);
-                       hci_dev_put(hdev);
-                       goto done;
+                       if (err == -EALREADY) {
+                               /* In case the transport is already up and
+                                * running, clear the error here.
+                                *
+                                * This can happen when opening an user
+                                * channel and HCI_AUTO_OFF grace period
+                                * is still active.
+                                */
+                               err = 0;
+                       } else {
+                               hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
+                               mgmt_index_added(hdev);
+                               hci_dev_put(hdev);
+                               goto done;
+                       }
                }
 
                atomic_inc(&hdev->promisc);
index 1ab3dc9..7b815bc 100644 (file)
@@ -2295,6 +2295,10 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
                return 1;
 
        chan = conn->smp;
+       if (!chan) {
+               BT_ERR("SMP security requested but not available");
+               return 1;
+       }
 
        if (!hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED))
                return 1;
index 689c818..62c635f 100644 (file)
@@ -89,6 +89,8 @@ struct timer_list can_stattimer;   /* timer for statistics update */
 struct s_stats    can_stats;       /* packet statistics */
 struct s_pstats   can_pstats;      /* receive list statistics */
 
+static atomic_t skbcounter = ATOMIC_INIT(0);
+
 /*
  * af_can socket functions
  */
@@ -310,12 +312,8 @@ int can_send(struct sk_buff *skb, int loop)
                return err;
        }
 
-       if (newskb) {
-               if (!(newskb->tstamp.tv64))
-                       __net_timestamp(newskb);
-
+       if (newskb)
                netif_rx_ni(newskb);
-       }
 
        /* update statistics */
        can_stats.tx_frames++;
@@ -683,6 +681,10 @@ static void can_receive(struct sk_buff *skb, struct net_device *dev)
        can_stats.rx_frames++;
        can_stats.rx_frames_delta++;
 
+       /* create non-zero unique skb identifier together with *skb */
+       while (!(can_skb_prv(skb)->skbcnt))
+               can_skb_prv(skb)->skbcnt = atomic_inc_return(&skbcounter);
+
        rcu_read_lock();
 
        /* deliver the packet to sockets listening on all devices */
index b523453..a1ba687 100644 (file)
@@ -261,6 +261,7 @@ static void bcm_can_tx(struct bcm_op *op)
 
        can_skb_reserve(skb);
        can_skb_prv(skb)->ifindex = dev->ifindex;
+       can_skb_prv(skb)->skbcnt = 0;
 
        memcpy(skb_put(skb, CFSIZ), cf, CFSIZ);
 
@@ -1217,6 +1218,7 @@ static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk)
        }
 
        can_skb_prv(skb)->ifindex = dev->ifindex;
+       can_skb_prv(skb)->skbcnt = 0;
        skb->dev = dev;
        can_skb_set_owner(skb, sk);
        err = can_send(skb, 1); /* send with loopback */
index 31b9748..2e67b14 100644 (file)
@@ -75,7 +75,7 @@ MODULE_ALIAS("can-proto-1");
  */
 
 struct uniqframe {
-       ktime_t tstamp;
+       int skbcnt;
        const struct sk_buff *skb;
        unsigned int join_rx_count;
 };
@@ -133,7 +133,7 @@ static void raw_rcv(struct sk_buff *oskb, void *data)
 
        /* eliminate multiple filter matches for the same skb */
        if (this_cpu_ptr(ro->uniq)->skb == oskb &&
-           ktime_equal(this_cpu_ptr(ro->uniq)->tstamp, oskb->tstamp)) {
+           this_cpu_ptr(ro->uniq)->skbcnt == can_skb_prv(oskb)->skbcnt) {
                if (ro->join_filters) {
                        this_cpu_inc(ro->uniq->join_rx_count);
                        /* drop frame until all enabled filters matched */
@@ -144,7 +144,7 @@ static void raw_rcv(struct sk_buff *oskb, void *data)
                }
        } else {
                this_cpu_ptr(ro->uniq)->skb = oskb;
-               this_cpu_ptr(ro->uniq)->tstamp = oskb->tstamp;
+               this_cpu_ptr(ro->uniq)->skbcnt = can_skb_prv(oskb)->skbcnt;
                this_cpu_ptr(ro->uniq)->join_rx_count = 1;
                /* drop first frame to check all enabled filters? */
                if (ro->join_filters && ro->count > 1)
@@ -749,6 +749,7 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
 
        can_skb_reserve(skb);
        can_skb_prv(skb)->ifindex = dev->ifindex;
+       can_skb_prv(skb)->skbcnt = 0;
 
        err = memcpy_from_msg(skb_put(skb, size), msg, size);
        if (err < 0)
index 1579669..4a31258 100644 (file)
@@ -89,7 +89,7 @@ static int crush_decode_tree_bucket(void **p, void *end,
 {
        int j;
        dout("crush_decode_tree_bucket %p to %p\n", *p, end);
-       ceph_decode_32_safe(p, end, b->num_nodes, bad);
+       ceph_decode_8_safe(p, end, b->num_nodes, bad);
        b->node_weights = kcalloc(b->num_nodes, sizeof(u32), GFP_NOFS);
        if (b->node_weights == NULL)
                return -ENOMEM;
index b60c65f..627a253 100644 (file)
@@ -739,6 +739,12 @@ static int dgram_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
        sock_recv_ts_and_drops(msg, sk, skb);
 
        if (saddr) {
+               /* Clear the implicit padding in struct sockaddr_ieee802154
+                * (16 bits between 'family' and 'addr') and in struct
+                * ieee802154_addr_sa (16 bits at the end of the structure).
+                */
+               memset(saddr, 0, sizeof(*saddr));
+
                saddr->family = AF_IEEE802154;
                ieee802154_addr_to_sa(&saddr->addr, &mac_cb(skb)->source);
                *addr_len = sizeof(*saddr);
index ff347a0..f06d422 100644 (file)
@@ -3356,6 +3356,7 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
        /* Update CSA counters */
        if (sdata->vif.csa_active &&
            (sdata->vif.type == NL80211_IFTYPE_AP ||
+            sdata->vif.type == NL80211_IFTYPE_MESH_POINT ||
             sdata->vif.type == NL80211_IFTYPE_ADHOC) &&
            params->n_csa_offsets) {
                int i;
index 29236e8..c09c013 100644 (file)
@@ -723,6 +723,7 @@ void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata)
 
        debugfs_remove_recursive(sdata->vif.debugfs_dir);
        sdata->vif.debugfs_dir = NULL;
+       sdata->debugfs.subdir_stations = NULL;
 }
 
 void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata)
index bfef1b2..a9c9d96 100644 (file)
@@ -146,6 +146,7 @@ ieee80211_ibss_build_presp(struct ieee80211_sub_if_data *sdata,
                                csa_settings->chandef.chan->center_freq);
                presp->csa_counter_offsets[0] = (pos - presp->head);
                *pos++ = csa_settings->count;
+               presp->csa_current_counter = csa_settings->count;
        }
 
        /* put the remaining rates in WLAN_EID_EXT_SUPP_RATES */
index df3051d..e86daed 100644 (file)
@@ -249,6 +249,7 @@ static void ieee80211_restart_work(struct work_struct *work)
 {
        struct ieee80211_local *local =
                container_of(work, struct ieee80211_local, restart_work);
+       struct ieee80211_sub_if_data *sdata;
 
        /* wait for scan work complete */
        flush_workqueue(local->workqueue);
@@ -257,6 +258,8 @@ static void ieee80211_restart_work(struct work_struct *work)
             "%s called with hardware scan in progress\n", __func__);
 
        rtnl_lock();
+       list_for_each_entry(sdata, &local->interfaces, list)
+               flush_delayed_work(&sdata->dec_tailroom_needed_wk);
        ieee80211_scan_cancel(local);
        ieee80211_reconfig(local);
        rtnl_unlock();
index d468424..817098a 100644 (file)
@@ -680,6 +680,7 @@ ieee80211_mesh_build_beacon(struct ieee80211_if_mesh *ifmsh)
                *pos++ = 0x0;
                *pos++ = ieee80211_frequency_to_channel(
                                csa->settings.chandef.chan->center_freq);
+               bcn->csa_current_counter = csa->settings.count;
                bcn->csa_counter_offsets[0] = hdr_len + 6;
                *pos++ = csa->settings.count;
                *pos++ = WLAN_EID_CHAN_SWITCH_PARAM;
index 247552a..3ece7d1 100644 (file)
@@ -92,14 +92,15 @@ int minstrel_get_tp_avg(struct minstrel_rate *mr, int prob_ewma)
 static inline void
 minstrel_sort_best_tp_rates(struct minstrel_sta_info *mi, int i, u8 *tp_list)
 {
-       int j = MAX_THR_RATES;
-       struct minstrel_rate_stats *tmp_mrs = &mi->r[j - 1].stats;
+       int j;
+       struct minstrel_rate_stats *tmp_mrs;
        struct minstrel_rate_stats *cur_mrs = &mi->r[i].stats;
 
-       while (j > 0 && (minstrel_get_tp_avg(&mi->r[i], cur_mrs->prob_ewma) >
-              minstrel_get_tp_avg(&mi->r[tp_list[j - 1]], tmp_mrs->prob_ewma))) {
-               j--;
+       for (j = MAX_THR_RATES; j > 0; --j) {
                tmp_mrs = &mi->r[tp_list[j - 1]].stats;
+               if (minstrel_get_tp_avg(&mi->r[i], cur_mrs->prob_ewma) <=
+                   minstrel_get_tp_avg(&mi->r[tp_list[j - 1]], tmp_mrs->prob_ewma))
+                       break;
        }
 
        if (j < MAX_THR_RATES - 1)
index f64b6e8..c8faaf3 100644 (file)
@@ -760,8 +760,10 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
        }
 
        ibmr = rds_ib_alloc_fmr(rds_ibdev);
-       if (IS_ERR(ibmr))
+       if (IS_ERR(ibmr)) {
+               rds_ib_dev_put(rds_ibdev);
                return ibmr;
+       }
 
        ret = rds_ib_map_fmr(rds_ibdev, ibmr, sg, nents);
        if (ret == 0)
index 9dd0ea8..28504df 100644 (file)
@@ -60,7 +60,7 @@ static void xprt_free_allocation(struct rpc_rqst *req)
 
        dprintk("RPC:        free allocations for req= %p\n", req);
        WARN_ON_ONCE(test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
-       xbufp = &req->rq_private_buf;
+       xbufp = &req->rq_rcv_buf;
        free_page((unsigned long)xbufp->head[0].iov_base);
        xbufp = &req->rq_snd_buf;
        free_page((unsigned long)xbufp->head[0].iov_base);
index 70051ab..7e4e3ff 100644 (file)
@@ -944,7 +944,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
             ntype == NL80211_IFTYPE_P2P_CLIENT))
                return -EBUSY;
 
-       if (ntype != otype && netif_running(dev)) {
+       if (ntype != otype) {
                dev->ieee80211_ptr->use_4addr = false;
                dev->ieee80211_ptr->mesh_id_up_len = 0;
                wdev_lock(dev->ieee80211_ptr);
index 8965d1b..125d640 100644 (file)
  *
  *      For __dynamic_array(int, foo, bar) use __get_dynamic_array(foo)
  *            Use __get_dynamic_array_len(foo) to get the length of the array
- *            saved.
+ *            saved. Note, __get_dynamic_array_len() returns the total allocated
+ *            length of the dynamic array; __print_array() expects the second
+ *            parameter to be the number of elements. To get that, the array length
+ *            needs to be divided by the element size.
  *
  *      For __string(foo, bar) use __get_str(foo)
  *
@@ -288,7 +291,7 @@ TRACE_EVENT(foo_bar,
  *    This prints out the array that is defined by __array in a nice format.
  */
                  __print_array(__get_dynamic_array(list),
-                               __get_dynamic_array_len(list),
+                               __get_dynamic_array_len(list) / sizeof(int),
                                sizeof(int)),
                  __get_str(str), __get_bitmask(cpus))
 );
index 9cb8522..f3d3fb4 100755 (executable)
@@ -137,7 +137,7 @@ my $ksource = ($ARGV[0] ? $ARGV[0] : '.');
 my $kconfig = $ARGV[1];
 my $lsmod_file = $ENV{'LSMOD'};
 
-my @makefiles = `find $ksource -name Makefile 2>/dev/null`;
+my @makefiles = `find $ksource -name Makefile -or -name Kbuild 2>/dev/null`;
 chomp @makefiles;
 
 my %depends;
index 10f9943..5820914 100644 (file)
@@ -296,6 +296,17 @@ static int evm_protect_xattr(struct dentry *dentry, const char *xattr_name,
                iint = integrity_iint_find(d_backing_inode(dentry));
                if (iint && (iint->flags & IMA_NEW_FILE))
                        return 0;
+
+               /* exception for pseudo filesystems */
+               if (dentry->d_inode->i_sb->s_magic == TMPFS_MAGIC
+                   || dentry->d_inode->i_sb->s_magic == SYSFS_MAGIC)
+                       return 0;
+
+               integrity_audit_msg(AUDIT_INTEGRITY_METADATA,
+                                   dentry->d_inode, dentry->d_name.name,
+                                   "update_metadata",
+                                   integrity_status_msg[evm_status],
+                                   -EPERM, 0);
        }
 out:
        if (evm_status != INTEGRITY_PASS)
index 8ee997d..fc56d4d 100644 (file)
@@ -106,7 +106,7 @@ void ima_add_violation(struct file *file, const unsigned char *filename,
                       const char *op, const char *cause);
 int ima_init_crypto(void);
 void ima_putc(struct seq_file *m, void *data, int datalen);
-void ima_print_digest(struct seq_file *m, u8 *digest, int size);
+void ima_print_digest(struct seq_file *m, u8 *digest, u32 size);
 struct ima_template_desc *ima_template_desc_current(void);
 int ima_init_template(void);
 
index 461215e..816d175 100644 (file)
@@ -190,9 +190,9 @@ static const struct file_operations ima_measurements_ops = {
        .release = seq_release,
 };
 
-void ima_print_digest(struct seq_file *m, u8 *digest, int size)
+void ima_print_digest(struct seq_file *m, u8 *digest, u32 size)
 {
-       int i;
+       u32 i;
 
        for (i = 0; i < size; i++)
                seq_printf(m, "%02x", *(digest + i));
index d1eefb9..3997e20 100644 (file)
@@ -27,6 +27,8 @@
 #define IMA_UID                0x0008
 #define IMA_FOWNER     0x0010
 #define IMA_FSUUID     0x0020
+#define IMA_INMASK     0x0040
+#define IMA_EUID       0x0080
 
 #define UNKNOWN                0
 #define MEASURE                0x0001  /* same as IMA_MEASURE */
@@ -42,6 +44,8 @@ enum lsm_rule_types { LSM_OBJ_USER, LSM_OBJ_ROLE, LSM_OBJ_TYPE,
        LSM_SUBJ_USER, LSM_SUBJ_ROLE, LSM_SUBJ_TYPE
 };
 
+enum policy_types { ORIGINAL_TCB = 1, DEFAULT_TCB };
+
 struct ima_rule_entry {
        struct list_head list;
        int action;
@@ -70,7 +74,7 @@ struct ima_rule_entry {
  * normal users can easily run the machine out of memory simply building
  * and running executables.
  */
-static struct ima_rule_entry default_rules[] = {
+static struct ima_rule_entry dont_measure_rules[] = {
        {.action = DONT_MEASURE, .fsmagic = PROC_SUPER_MAGIC, .flags = IMA_FSMAGIC},
        {.action = DONT_MEASURE, .fsmagic = SYSFS_MAGIC, .flags = IMA_FSMAGIC},
        {.action = DONT_MEASURE, .fsmagic = DEBUGFS_MAGIC, .flags = IMA_FSMAGIC},
@@ -79,12 +83,31 @@ static struct ima_rule_entry default_rules[] = {
        {.action = DONT_MEASURE, .fsmagic = BINFMTFS_MAGIC, .flags = IMA_FSMAGIC},
        {.action = DONT_MEASURE, .fsmagic = SECURITYFS_MAGIC, .flags = IMA_FSMAGIC},
        {.action = DONT_MEASURE, .fsmagic = SELINUX_MAGIC, .flags = IMA_FSMAGIC},
+       {.action = DONT_MEASURE, .fsmagic = CGROUP_SUPER_MAGIC,
+        .flags = IMA_FSMAGIC},
+       {.action = DONT_MEASURE, .fsmagic = NSFS_MAGIC, .flags = IMA_FSMAGIC}
+};
+
+static struct ima_rule_entry original_measurement_rules[] = {
        {.action = MEASURE, .func = MMAP_CHECK, .mask = MAY_EXEC,
         .flags = IMA_FUNC | IMA_MASK},
        {.action = MEASURE, .func = BPRM_CHECK, .mask = MAY_EXEC,
         .flags = IMA_FUNC | IMA_MASK},
-       {.action = MEASURE, .func = FILE_CHECK, .mask = MAY_READ, .uid = GLOBAL_ROOT_UID,
-        .flags = IMA_FUNC | IMA_MASK | IMA_UID},
+       {.action = MEASURE, .func = FILE_CHECK, .mask = MAY_READ,
+        .uid = GLOBAL_ROOT_UID, .flags = IMA_FUNC | IMA_MASK | IMA_UID},
+       {.action = MEASURE, .func = MODULE_CHECK, .flags = IMA_FUNC},
+       {.action = MEASURE, .func = FIRMWARE_CHECK, .flags = IMA_FUNC},
+};
+
+static struct ima_rule_entry default_measurement_rules[] = {
+       {.action = MEASURE, .func = MMAP_CHECK, .mask = MAY_EXEC,
+        .flags = IMA_FUNC | IMA_MASK},
+       {.action = MEASURE, .func = BPRM_CHECK, .mask = MAY_EXEC,
+        .flags = IMA_FUNC | IMA_MASK},
+       {.action = MEASURE, .func = FILE_CHECK, .mask = MAY_READ,
+        .uid = GLOBAL_ROOT_UID, .flags = IMA_FUNC | IMA_INMASK | IMA_EUID},
+       {.action = MEASURE, .func = FILE_CHECK, .mask = MAY_READ,
+        .uid = GLOBAL_ROOT_UID, .flags = IMA_FUNC | IMA_INMASK | IMA_UID},
        {.action = MEASURE, .func = MODULE_CHECK, .flags = IMA_FUNC},
        {.action = MEASURE, .func = FIRMWARE_CHECK, .flags = IMA_FUNC},
 };
@@ -99,6 +122,7 @@ static struct ima_rule_entry default_appraise_rules[] = {
        {.action = DONT_APPRAISE, .fsmagic = BINFMTFS_MAGIC, .flags = IMA_FSMAGIC},
        {.action = DONT_APPRAISE, .fsmagic = SECURITYFS_MAGIC, .flags = IMA_FSMAGIC},
        {.action = DONT_APPRAISE, .fsmagic = SELINUX_MAGIC, .flags = IMA_FSMAGIC},
+       {.action = DONT_APPRAISE, .fsmagic = NSFS_MAGIC, .flags = IMA_FSMAGIC},
        {.action = DONT_APPRAISE, .fsmagic = CGROUP_SUPER_MAGIC, .flags = IMA_FSMAGIC},
 #ifndef CONFIG_IMA_APPRAISE_SIGNED_INIT
        {.action = APPRAISE, .fowner = GLOBAL_ROOT_UID, .flags = IMA_FOWNER},
@@ -115,14 +139,29 @@ static struct list_head *ima_rules;
 
 static DEFINE_MUTEX(ima_rules_mutex);
 
-static bool ima_use_tcb __initdata;
+static int ima_policy __initdata;
 static int __init default_measure_policy_setup(char *str)
 {
-       ima_use_tcb = 1;
+       if (ima_policy)
+               return 1;
+
+       ima_policy = ORIGINAL_TCB;
        return 1;
 }
 __setup("ima_tcb", default_measure_policy_setup);
 
+static int __init policy_setup(char *str)
+{
+       if (ima_policy)
+               return 1;
+
+       if (strcmp(str, "tcb") == 0)
+               ima_policy = DEFAULT_TCB;
+
+       return 1;
+}
+__setup("ima_policy=", policy_setup);
+
 static bool ima_use_appraise_tcb __initdata;
 static int __init default_appraise_policy_setup(char *str)
 {
@@ -182,6 +221,9 @@ static bool ima_match_rules(struct ima_rule_entry *rule,
        if ((rule->flags & IMA_MASK) &&
            (rule->mask != mask && func != POST_SETATTR))
                return false;
+       if ((rule->flags & IMA_INMASK) &&
+           (!(rule->mask & mask) && func != POST_SETATTR))
+               return false;
        if ((rule->flags & IMA_FSMAGIC)
            && rule->fsmagic != inode->i_sb->s_magic)
                return false;
@@ -190,6 +232,16 @@ static bool ima_match_rules(struct ima_rule_entry *rule,
                return false;
        if ((rule->flags & IMA_UID) && !uid_eq(rule->uid, cred->uid))
                return false;
+       if (rule->flags & IMA_EUID) {
+               if (has_capability_noaudit(current, CAP_SETUID)) {
+                       if (!uid_eq(rule->uid, cred->euid)
+                           && !uid_eq(rule->uid, cred->suid)
+                           && !uid_eq(rule->uid, cred->uid))
+                               return false;
+               } else if (!uid_eq(rule->uid, cred->euid))
+                       return false;
+       }
+
        if ((rule->flags & IMA_FOWNER) && !uid_eq(rule->fowner, inode->i_uid))
                return false;
        for (i = 0; i < MAX_LSM_RULES; i++) {
@@ -333,21 +385,31 @@ void __init ima_init_policy(void)
 {
        int i, measure_entries, appraise_entries;
 
-       /* if !ima_use_tcb set entries = 0 so we load NO default rules */
-       measure_entries = ima_use_tcb ? ARRAY_SIZE(default_rules) : 0;
+       /* if !ima_policy set entries = 0 so we load NO default rules */
+       measure_entries = ima_policy ? ARRAY_SIZE(dont_measure_rules) : 0;
        appraise_entries = ima_use_appraise_tcb ?
                         ARRAY_SIZE(default_appraise_rules) : 0;
 
-       for (i = 0; i < measure_entries + appraise_entries; i++) {
-               if (i < measure_entries)
-                       list_add_tail(&default_rules[i].list,
-                                     &ima_default_rules);
-               else {
-                       int j = i - measure_entries;
+       for (i = 0; i < measure_entries; i++)
+               list_add_tail(&dont_measure_rules[i].list, &ima_default_rules);
 
-                       list_add_tail(&default_appraise_rules[j].list,
+       switch (ima_policy) {
+       case ORIGINAL_TCB:
+               for (i = 0; i < ARRAY_SIZE(original_measurement_rules); i++)
+                       list_add_tail(&original_measurement_rules[i].list,
                                      &ima_default_rules);
-               }
+               break;
+       case DEFAULT_TCB:
+               for (i = 0; i < ARRAY_SIZE(default_measurement_rules); i++)
+                       list_add_tail(&default_measurement_rules[i].list,
+                                     &ima_default_rules);
+       default:
+               break;
+       }
+
+       for (i = 0; i < appraise_entries; i++) {
+               list_add_tail(&default_appraise_rules[i].list,
+                             &ima_default_rules);
        }
 
        ima_rules = &ima_default_rules;
@@ -373,7 +435,8 @@ enum {
        Opt_audit,
        Opt_obj_user, Opt_obj_role, Opt_obj_type,
        Opt_subj_user, Opt_subj_role, Opt_subj_type,
-       Opt_func, Opt_mask, Opt_fsmagic, Opt_uid, Opt_fowner,
+       Opt_func, Opt_mask, Opt_fsmagic,
+       Opt_uid, Opt_euid, Opt_fowner,
        Opt_appraise_type, Opt_fsuuid, Opt_permit_directio
 };
 
@@ -394,6 +457,7 @@ static match_table_t policy_tokens = {
        {Opt_fsmagic, "fsmagic=%s"},
        {Opt_fsuuid, "fsuuid=%s"},
        {Opt_uid, "uid=%s"},
+       {Opt_euid, "euid=%s"},
        {Opt_fowner, "fowner=%s"},
        {Opt_appraise_type, "appraise_type=%s"},
        {Opt_permit_directio, "permit_directio"},
@@ -435,6 +499,7 @@ static void ima_log_string(struct audit_buffer *ab, char *key, char *value)
 static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
 {
        struct audit_buffer *ab;
+       char *from;
        char *p;
        int result = 0;
 
@@ -525,18 +590,23 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
                        if (entry->mask)
                                result = -EINVAL;
 
-                       if ((strcmp(args[0].from, "MAY_EXEC")) == 0)
+                       from = args[0].from;
+                       if (*from == '^')
+                               from++;
+
+                       if ((strcmp(from, "MAY_EXEC")) == 0)
                                entry->mask = MAY_EXEC;
-                       else if (strcmp(args[0].from, "MAY_WRITE") == 0)
+                       else if (strcmp(from, "MAY_WRITE") == 0)
                                entry->mask = MAY_WRITE;
-                       else if (strcmp(args[0].from, "MAY_READ") == 0)
+                       else if (strcmp(from, "MAY_READ") == 0)
                                entry->mask = MAY_READ;
-                       else if (strcmp(args[0].from, "MAY_APPEND") == 0)
+                       else if (strcmp(from, "MAY_APPEND") == 0)
                                entry->mask = MAY_APPEND;
                        else
                                result = -EINVAL;
                        if (!result)
-                               entry->flags |= IMA_MASK;
+                               entry->flags |= (*args[0].from == '^')
+                                    ? IMA_INMASK : IMA_MASK;
                        break;
                case Opt_fsmagic:
                        ima_log_string(ab, "fsmagic", args[0].from);
@@ -566,6 +636,9 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
                        break;
                case Opt_uid:
                        ima_log_string(ab, "uid", args[0].from);
+               case Opt_euid:
+                       if (token == Opt_euid)
+                               ima_log_string(ab, "euid", args[0].from);
 
                        if (uid_valid(entry->uid)) {
                                result = -EINVAL;
@@ -574,11 +647,14 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
 
                        result = kstrtoul(args[0].from, 10, &lnum);
                        if (!result) {
-                               entry->uid = make_kuid(current_user_ns(), (uid_t)lnum);
-                               if (!uid_valid(entry->uid) || (((uid_t)lnum) != lnum))
+                               entry->uid = make_kuid(current_user_ns(),
+                                                      (uid_t) lnum);
+                               if (!uid_valid(entry->uid) ||
+                                   (uid_t)lnum != lnum)
                                        result = -EINVAL;
                                else
-                                       entry->flags |= IMA_UID;
+                                       entry->flags |= (token == Opt_uid)
+                                           ? IMA_UID : IMA_EUID;
                        }
                        break;
                case Opt_fowner:
index bcfc36c..61fbd0c 100644 (file)
@@ -70,7 +70,8 @@ static void ima_show_template_data_ascii(struct seq_file *m,
                                         enum data_formats datafmt,
                                         struct ima_field_data *field_data)
 {
-       u8 *buf_ptr = field_data->data, buflen = field_data->len;
+       u8 *buf_ptr = field_data->data;
+       u32 buflen = field_data->len;
 
        switch (datafmt) {
        case DATA_FMT_DIGEST_WITH_ALGO:
index e72548b..d334370 100644 (file)
@@ -1181,9 +1181,11 @@ void __key_link_end(struct key *keyring,
        if (index_key->type == &key_type_keyring)
                up_write(&keyring_serialise_link_sem);
 
-       if (edit && !edit->dead_leaf) {
-               key_payload_reserve(keyring,
-                                   keyring->datalen - KEYQUOTA_LINK_BYTES);
+       if (edit) {
+               if (!edit->dead_leaf) {
+                       key_payload_reserve(keyring,
+                               keyring->datalen - KEYQUOTA_LINK_BYTES);
+               }
                assoc_array_cancel_edit(edit);
        }
        up_write(&keyring->sem);
index 212070e..7f8d7f1 100644 (file)
@@ -3288,7 +3288,8 @@ static int file_map_prot_check(struct file *file, unsigned long prot, int shared
        int rc = 0;
 
        if (default_noexec &&
-           (prot & PROT_EXEC) && (!file || (!shared && (prot & PROT_WRITE)))) {
+           (prot & PROT_EXEC) && (!file || IS_PRIVATE(file_inode(file)) ||
+                                  (!shared && (prot & PROT_WRITE)))) {
                /*
                 * We are making executable an anonymous mapping or a
                 * private file mapping that will also be writable.
index afe6a26..57644b1 100644 (file)
@@ -153,6 +153,12 @@ int ebitmap_netlbl_import(struct ebitmap *ebmap,
                if (offset == (u32)-1)
                        return 0;
 
+               /* don't waste ebitmap space if the netlabel bitmap is empty */
+               if (bitmap == 0) {
+                       offset += EBITMAP_UNIT_SIZE;
+                       continue;
+               }
+
                if (e_iter == NULL ||
                    offset >= e_iter->startbit + EBITMAP_SIZE) {
                        e_prev = e_iter;
index 5dce6d8..ec8486c 100644 (file)
@@ -85,7 +85,7 @@ static DECLARE_RWSEM(snd_pcm_link_rwsem);
 void snd_pcm_stream_lock(struct snd_pcm_substream *substream)
 {
        if (substream->pcm->nonatomic) {
-               down_read(&snd_pcm_link_rwsem);
+               down_read_nested(&snd_pcm_link_rwsem, SINGLE_DEPTH_NESTING);
                mutex_lock(&substream->self_group.mutex);
        } else {
                read_lock(&snd_pcm_link_rwlock);
index e061355..bf20593 100644 (file)
@@ -730,8 +730,9 @@ static void handle_in_packet(struct amdtp_stream *s,
            s->data_block_counter != UINT_MAX)
                data_block_counter = s->data_block_counter;
 
-       if (((s->flags & CIP_SKIP_DBC_ZERO_CHECK) && data_block_counter == 0) ||
-           (s->data_block_counter == UINT_MAX)) {
+       if (((s->flags & CIP_SKIP_DBC_ZERO_CHECK) &&
+            data_block_counter == s->tx_first_dbc) ||
+           s->data_block_counter == UINT_MAX) {
                lost = false;
        } else if (!(s->flags & CIP_DBC_IS_END_EVENT)) {
                lost = data_block_counter != s->data_block_counter;
index 8a03a91..25c9055 100644 (file)
@@ -153,6 +153,8 @@ struct amdtp_stream {
 
        /* quirk: fixed interval of dbc between previos/current packets. */
        unsigned int tx_dbc_interval;
+       /* quirk: indicate the value of dbc field in a first packet. */
+       unsigned int tx_first_dbc;
 
        bool callbacked;
        wait_queue_head_t callback_wait;
index 2682e7e..c94a432 100644 (file)
@@ -248,8 +248,16 @@ efw_probe(struct fw_unit *unit,
        err = get_hardware_info(efw);
        if (err < 0)
                goto error;
+       /* AudioFire8 (since 2009) and AudioFirePre8 */
        if (entry->model_id == MODEL_ECHO_AUDIOFIRE_9)
                efw->is_af9 = true;
+       /* These models uses the same firmware. */
+       if (entry->model_id == MODEL_ECHO_AUDIOFIRE_2 ||
+           entry->model_id == MODEL_ECHO_AUDIOFIRE_4 ||
+           entry->model_id == MODEL_ECHO_AUDIOFIRE_9 ||
+           entry->model_id == MODEL_GIBSON_RIP ||
+           entry->model_id == MODEL_GIBSON_GOLDTOP)
+               efw->is_fireworks3 = true;
 
        snd_efw_proc_init(efw);
 
index 4f0201a..084d414 100644 (file)
@@ -71,6 +71,7 @@ struct snd_efw {
 
        /* for quirks */
        bool is_af9;
+       bool is_fireworks3;
        u32 firmware_version;
 
        unsigned int midi_in_ports;
index c55db1b..7e353f1 100644 (file)
@@ -172,6 +172,15 @@ int snd_efw_stream_init_duplex(struct snd_efw *efw)
        efw->tx_stream.flags |= CIP_DBC_IS_END_EVENT;
        /* Fireworks reset dbc at bus reset. */
        efw->tx_stream.flags |= CIP_SKIP_DBC_ZERO_CHECK;
+       /*
+        * But Recent firmwares starts packets with non-zero dbc.
+        * Driver version 5.7.6 installs firmware version 5.7.3.
+        */
+       if (efw->is_fireworks3 &&
+           (efw->firmware_version == 0x5070000 ||
+            efw->firmware_version == 0x5070300 ||
+            efw->firmware_version == 0x5080000))
+               efw->tx_stream.tx_first_dbc = 0x02;
        /* AudioFire9 always reports wrong dbs. */
        if (efw->is_af9)
                efw->tx_stream.flags |= CIP_WRONG_DBS;
index 5645481..36e8f12 100644 (file)
@@ -3259,7 +3259,7 @@ static int add_std_chmaps(struct hda_codec *codec)
                        struct snd_pcm_chmap *chmap;
                        const struct snd_pcm_chmap_elem *elem;
 
-                       if (!pcm || pcm->own_chmap ||
+                       if (!pcm || !pcm->pcm || pcm->own_chmap ||
                            !hinfo->substreams)
                                continue;
                        elem = hinfo->chmap ? hinfo->chmap : snd_pcm_std_chmaps;
index ac0db16..5bc7f2e 100644 (file)
@@ -671,7 +671,8 @@ static bool is_active_nid(struct hda_codec *codec, hda_nid_t nid,
                }
                for (i = 0; i < path->depth; i++) {
                        if (path->path[i] == nid) {
-                               if (dir == HDA_OUTPUT || path->idx[i] == idx)
+                               if (dir == HDA_OUTPUT || idx == -1 ||
+                                   path->idx[i] == idx)
                                        return true;
                                break;
                        }
@@ -682,7 +683,7 @@ static bool is_active_nid(struct hda_codec *codec, hda_nid_t nid,
 
 /* check whether the NID is referred by any active paths */
 #define is_active_nid_for_any(codec, nid) \
-       is_active_nid(codec, nid, HDA_OUTPUT, 0)
+       is_active_nid(codec, nid, HDA_OUTPUT, -1)
 
 /* get the default amp value for the target state */
 static int get_amp_val_to_activate(struct hda_codec *codec, hda_nid_t nid,
@@ -883,8 +884,7 @@ void snd_hda_activate_path(struct hda_codec *codec, struct nid_path *path,
        struct hda_gen_spec *spec = codec->spec;
        int i;
 
-       if (!enable)
-               path->active = false;
+       path->active = enable;
 
        /* make sure the widget is powered up */
        if (enable && (spec->power_down_unused || codec->power_save_node))
@@ -902,9 +902,6 @@ void snd_hda_activate_path(struct hda_codec *codec, struct nid_path *path,
                if (has_amp_out(codec, path, i))
                        activate_amp_out(codec, path, i, enable);
        }
-
-       if (enable)
-               path->active = true;
 }
 EXPORT_SYMBOL_GPL(snd_hda_activate_path);
 
index c403dd1..44dfc7b 100644 (file)
@@ -2056,6 +2056,8 @@ static const struct pci_device_id azx_ids[] = {
        /* ATI HDMI */
        { PCI_DEVICE(0x1002, 0x1308),
          .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+       { PCI_DEVICE(0x1002, 0x157a),
+         .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
        { PCI_DEVICE(0x1002, 0x793b),
          .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
        { PCI_DEVICE(0x1002, 0x7919),
@@ -2110,8 +2112,14 @@ static const struct pci_device_id azx_ids[] = {
          .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
        { PCI_DEVICE(0x1002, 0xaab0),
          .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+       { PCI_DEVICE(0x1002, 0xaac0),
+         .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
        { PCI_DEVICE(0x1002, 0xaac8),
          .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+       { PCI_DEVICE(0x1002, 0xaad8),
+         .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+       { PCI_DEVICE(0x1002, 0xaae8),
+         .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
        /* VIA VT8251/VT8237A */
        { PCI_DEVICE(0x1106, 0x3288),
          .driver_data = AZX_DRIVER_VIA | AZX_DCAPS_POSFIX_VIA },
index 50e9dd6..3a24f77 100644 (file)
@@ -1001,9 +1001,7 @@ static void cs4210_spdif_automute(struct hda_codec *codec,
 
        spec->spdif_present = spdif_present;
        /* SPDIF TX on/off */
-       if (spdif_present)
-               snd_hda_set_pin_ctl(codec, spdif_pin,
-                                   spdif_present ? PIN_OUT : 0);
+       snd_hda_set_pin_ctl(codec, spdif_pin, spdif_present ? PIN_OUT : 0);
 
        cs_automute(codec);
 }
index 78b719b..06cc9d5 100644 (file)
@@ -200,12 +200,33 @@ static int cx_auto_init(struct hda_codec *codec)
        return 0;
 }
 
-#define cx_auto_free   snd_hda_gen_free
+static void cx_auto_reboot_notify(struct hda_codec *codec)
+{
+       struct conexant_spec *spec = codec->spec;
+
+       if (codec->core.vendor_id != 0x14f150f2)
+               return;
+
+       /* Turn the CX20722 codec into D3 to avoid spurious noises
+          from the internal speaker during (and after) reboot */
+       cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, false);
+
+       snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
+       snd_hda_codec_write(codec, codec->core.afg, 0,
+                           AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
+}
+
+static void cx_auto_free(struct hda_codec *codec)
+{
+       cx_auto_reboot_notify(codec);
+       snd_hda_gen_free(codec);
+}
 
 static const struct hda_codec_ops cx_auto_patch_ops = {
        .build_controls = cx_auto_build_controls,
        .build_pcms = snd_hda_gen_build_pcms,
        .init = cx_auto_init,
+       .reboot_notify = cx_auto_reboot_notify,
        .free = cx_auto_free,
        .unsol_event = snd_hda_jack_unsol_event,
 #ifdef CONFIG_PM
index 5f44f60..225b78b 100644 (file)
@@ -3333,6 +3333,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
 { .id = 0x10de0070, .name = "GPU 70 HDMI/DP",  .patch = patch_nvhdmi },
 { .id = 0x10de0071, .name = "GPU 71 HDMI/DP",  .patch = patch_nvhdmi },
 { .id = 0x10de0072, .name = "GPU 72 HDMI/DP",  .patch = patch_nvhdmi },
+{ .id = 0x10de007d, .name = "GPU 7d HDMI/DP",  .patch = patch_nvhdmi },
 { .id = 0x10de8001, .name = "MCP73 HDMI",      .patch = patch_nvhdmi_2ch },
 { .id = 0x11069f80, .name = "VX900 HDMI/DP",   .patch = patch_via_hdmi },
 { .id = 0x11069f81, .name = "VX900 HDMI/DP",   .patch = patch_via_hdmi },
@@ -3396,6 +3397,7 @@ MODULE_ALIAS("snd-hda-codec-id:10de0067");
 MODULE_ALIAS("snd-hda-codec-id:10de0070");
 MODULE_ALIAS("snd-hda-codec-id:10de0071");
 MODULE_ALIAS("snd-hda-codec-id:10de0072");
+MODULE_ALIAS("snd-hda-codec-id:10de007d");
 MODULE_ALIAS("snd-hda-codec-id:10de8001");
 MODULE_ALIAS("snd-hda-codec-id:11069f80");
 MODULE_ALIAS("snd-hda-codec-id:11069f81");
index 0e75998..91f6928 100644 (file)
@@ -2224,7 +2224,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
        SND_PCI_QUIRK(0x106b, 0x4300, "iMac 9,1", ALC889_FIXUP_IMAC91_VREF),
        SND_PCI_QUIRK(0x106b, 0x4600, "MacbookPro 5,2", ALC889_FIXUP_IMAC91_VREF),
        SND_PCI_QUIRK(0x106b, 0x4900, "iMac 9,1 Aluminum", ALC889_FIXUP_IMAC91_VREF),
-       SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_IMAC91_VREF),
+       SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_MBA11_VREF),
 
        SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD),
        SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
@@ -5004,7 +5004,7 @@ static const struct hda_fixup alc269_fixups[] = {
                        { 0x14, 0x90170110 },
                        { 0x17, 0x40000008 },
                        { 0x18, 0x411111f0 },
-                       { 0x19, 0x411111f0 },
+                       { 0x19, 0x01a1913c },
                        { 0x1a, 0x411111f0 },
                        { 0x1b, 0x411111f0 },
                        { 0x1d, 0x40f89b2d },
@@ -5114,9 +5114,12 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0665, "Dell XPS 13", ALC288_FIXUP_DELL_XPS_13),
+       SND_PCI_QUIRK(0x1028, 0x069a, "Dell Vostro 5480", ALC290_FIXUP_SUBWOOFER_HSJACK),
        SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
+       SND_PCI_QUIRK(0x1028, 0x06db, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
        SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -5381,6 +5384,17 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
                {0x17, 0x40000000},
                {0x1d, 0x40700001},
                {0x21, 0x02211030}),
+       SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+               {0x12, 0x40000000},
+               {0x14, 0x90170130},
+               {0x17, 0x411111f0},
+               {0x18, 0x411111f0},
+               {0x19, 0x411111f0},
+               {0x1a, 0x411111f0},
+               {0x1b, 0x01014020},
+               {0x1d, 0x4054c029},
+               {0x1e, 0x411111f0},
+               {0x21, 0x0221103f}),
        SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
                {0x12, 0x90a60160},
                {0x14, 0x90170120},
index 6c66d7e..25f0f45 100644 (file)
@@ -2920,7 +2920,8 @@ static const struct snd_pci_quirk stac92hd83xxx_fixup_tbl[] = {
        SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x148a,
                      "HP Mini", STAC_92HD83XXX_HP_LED),
        SND_PCI_QUIRK_VENDOR(PCI_VENDOR_ID_HP, "HP", STAC_92HD83XXX_HP),
-       SND_PCI_QUIRK(PCI_VENDOR_ID_TOSHIBA, 0xfa91,
+       /* match both for 0xfa91 and 0xfa93 */
+       SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_TOSHIBA, 0xfffd, 0xfa91,
                      "Toshiba Satellite S50D", STAC_92HD83XXX_GPIO10_EAPD),
        {} /* terminator */
 };
index 9b5a17d..aad6642 100644 (file)
@@ -346,7 +346,7 @@ static int max98925_dai_set_fmt(struct snd_soc_dai *codec_dai,
        }
 
        regmap_update_bits(max98925->regmap, MAX98925_FORMAT,
-                       M98925_DAI_BCI_MASK, invert);
+                       M98925_DAI_BCI_MASK | M98925_DAI_WCI_MASK, invert);
        return 0;
 }
 
index 477e13d..e7ba557 100644 (file)
@@ -102,7 +102,7 @@ static int pcm1681_set_deemph(struct snd_soc_codec *codec)
 
        if (val != -1) {
                regmap_update_bits(priv->regmap, PCM1681_DEEMPH_CONTROL,
-                                       PCM1681_DEEMPH_RATE_MASK, val);
+                                  PCM1681_DEEMPH_RATE_MASK, val << 3);
                enable = 1;
        } else
                enable = 0;
index be4d741..2ee44ab 100644 (file)
@@ -2837,6 +2837,8 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
                }
        }
 
+       INIT_DELAYED_WORK(&rt5645->jack_detect_work, rt5645_jack_detect_work);
+
        if (rt5645->i2c->irq) {
                ret = request_threaded_irq(rt5645->i2c->irq, NULL, rt5645_irq,
                        IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING
@@ -2855,8 +2857,6 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
                        dev_err(&i2c->dev, "Fail gpio_direction hp_det_gpio\n");
        }
 
-       INIT_DELAYED_WORK(&rt5645->jack_detect_work, rt5645_jack_detect_work);
-
        return snd_soc_register_codec(&i2c->dev, &soc_codec_dev_rt5645,
                                      rt5645_dai, ARRAY_SIZE(rt5645_dai));
 }
index a984485..f7549cc 100644 (file)
@@ -315,7 +315,13 @@ static int ssm4567_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
        if (invert_fclk)
                ctrl1 |= SSM4567_SAI_CTRL_1_FSYNC;
 
-       return regmap_write(ssm4567->regmap, SSM4567_REG_SAI_CTRL_1, ctrl1);
+       return regmap_update_bits(ssm4567->regmap, SSM4567_REG_SAI_CTRL_1,
+                       SSM4567_SAI_CTRL_1_BCLK |
+                       SSM4567_SAI_CTRL_1_FSYNC |
+                       SSM4567_SAI_CTRL_1_LJ |
+                       SSM4567_SAI_CTRL_1_TDM |
+                       SSM4567_SAI_CTRL_1_PDM,
+                       ctrl1);
 }
 
 static int ssm4567_set_power(struct ssm4567 *ssm4567, bool enable)
index dfb4ff5..1855859 100644 (file)
@@ -120,6 +120,9 @@ static void tas2552_sw_shutdown(struct tas2552_data *tas_data, int sw_shutdown)
 {
        u8 cfg1_reg;
 
+       if (!tas_data->codec)
+               return;
+
        if (sw_shutdown)
                cfg1_reg = 0;
        else
@@ -335,7 +338,6 @@ static DECLARE_TLV_DB_SCALE(dac_tlv, -7, 100, 24);
 static const struct snd_kcontrol_new tas2552_snd_controls[] = {
        SOC_SINGLE_TLV("Speaker Driver Playback Volume",
                         TAS2552_PGA_GAIN, 0, 0x1f, 1, dac_tlv),
-       SOC_DAPM_SINGLE("Playback AMP", SND_SOC_NOPM, 0, 1, 0),
 };
 
 static const struct reg_default tas2552_init_regs[] = {
index 0c6d1bc..d476221 100644 (file)
@@ -42,7 +42,7 @@ struct wm5102_priv {
 static DECLARE_TLV_DB_SCALE(ana_tlv, 0, 100, 0);
 static DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0);
 static DECLARE_TLV_DB_SCALE(digital_tlv, -6400, 50, 0);
-static DECLARE_TLV_DB_SCALE(noise_tlv, 0, 600, 0);
+static DECLARE_TLV_DB_SCALE(noise_tlv, -13200, 600, 0);
 static DECLARE_TLV_DB_SCALE(ng_tlv, -10200, 600, 0);
 
 static const struct wm_adsp_region wm5102_dsp1_regions[] = {
index fbaeddb..3ee6cfd 100644 (file)
@@ -167,7 +167,7 @@ static int wm5110_sysclk_ev(struct snd_soc_dapm_widget *w,
 static DECLARE_TLV_DB_SCALE(ana_tlv, 0, 100, 0);
 static DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0);
 static DECLARE_TLV_DB_SCALE(digital_tlv, -6400, 50, 0);
-static DECLARE_TLV_DB_SCALE(noise_tlv, 0, 600, 0);
+static DECLARE_TLV_DB_SCALE(noise_tlv, -13200, 600, 0);
 static DECLARE_TLV_DB_SCALE(ng_tlv, -10200, 600, 0);
 
 #define WM5110_NG_SRC(name, base) \
index ada9ac1..51171e4 100644 (file)
@@ -483,7 +483,8 @@ static int wm8737_set_bias_level(struct snd_soc_codec *codec,
 
                        /* Fast VMID ramp at 2*2.5k */
                        snd_soc_update_bits(codec, WM8737_MISC_BIAS_CONTROL,
-                                           WM8737_VMIDSEL_MASK, 0x4);
+                                           WM8737_VMIDSEL_MASK,
+                                           2 << WM8737_VMIDSEL_SHIFT);
 
                        /* Bring VMID up */
                        snd_soc_update_bits(codec, WM8737_POWER_MANAGEMENT,
@@ -497,7 +498,8 @@ static int wm8737_set_bias_level(struct snd_soc_codec *codec,
 
                /* VMID at 2*300k */
                snd_soc_update_bits(codec, WM8737_MISC_BIAS_CONTROL,
-                                   WM8737_VMIDSEL_MASK, 2);
+                                   WM8737_VMIDSEL_MASK,
+                                   1 << WM8737_VMIDSEL_SHIFT);
 
                break;
 
index db94931..0bb4a64 100644 (file)
@@ -172,7 +172,7 @@ extern int wm8903_mic_detect(struct snd_soc_codec *codec,
 #define WM8903_VMID_BUF_ENA_WIDTH                    1  /* VMID_BUF_ENA */
 
 #define WM8903_VMID_RES_50K                          2
-#define WM8903_VMID_RES_250K                         3
+#define WM8903_VMID_RES_250K                         4
 #define WM8903_VMID_RES_5K                           6
 
 /*
index 00bec91..03e04bf 100644 (file)
@@ -298,7 +298,7 @@ static int wm8955_configure_clocking(struct snd_soc_codec *codec)
                snd_soc_update_bits(codec, WM8955_PLL_CONTROL_2,
                                    WM8955_K_17_9_MASK,
                                    (pll.k >> 9) & WM8955_K_17_9_MASK);
-               snd_soc_update_bits(codec, WM8955_PLL_CONTROL_2,
+               snd_soc_update_bits(codec, WM8955_PLL_CONTROL_3,
                                    WM8955_K_8_0_MASK,
                                    pll.k & WM8955_K_8_0_MASK);
                if (pll.k)
index e97a761..8d7f632 100644 (file)
@@ -245,7 +245,7 @@ SOC_SINGLE("PCM Playback -6dB Switch", WM8960_DACCTL1, 7, 1, 0),
 SOC_ENUM("ADC Polarity", wm8960_enum[0]),
 SOC_SINGLE("ADC High Pass Filter Switch", WM8960_DACCTL1, 0, 1, 0),
 
-SOC_ENUM("DAC Polarity", wm8960_enum[2]),
+SOC_ENUM("DAC Polarity", wm8960_enum[1]),
 SOC_SINGLE_BOOL_EXT("DAC Deemphasis Switch", 0,
                    wm8960_get_deemph, wm8960_put_deemph),
 
index a4d1177..e7c81ba 100644 (file)
@@ -40,7 +40,7 @@ struct wm8997_priv {
 static DECLARE_TLV_DB_SCALE(ana_tlv, 0, 100, 0);
 static DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0);
 static DECLARE_TLV_DB_SCALE(digital_tlv, -6400, 50, 0);
-static DECLARE_TLV_DB_SCALE(noise_tlv, 0, 600, 0);
+static DECLARE_TLV_DB_SCALE(noise_tlv, -13200, 600, 0);
 static DECLARE_TLV_DB_SCALE(ng_tlv, -10200, 600, 0);
 
 static const struct reg_default wm8997_sysclk_reva_patch[] = {
index cd146d4..b38b98c 100644 (file)
@@ -190,7 +190,7 @@ static int imx_wm8962_probe(struct platform_device *pdev)
                dev_err(&pdev->dev, "audmux internal port setup failed\n");
                return ret;
        }
-       imx_audmux_v2_configure_port(ext_port,
+       ret = imx_audmux_v2_configure_port(ext_port,
                        IMX_AUDMUX_V2_PTCR_SYN,
                        IMX_AUDMUX_V2_PDCR_RXDSEL(int_port));
        if (ret) {
index 7b50a9d..edc1869 100644 (file)
 #define MIN_FRAGMENT_SIZE (50 * 1024)
 #define MAX_FRAGMENT_SIZE (1024 * 1024)
 #define SST_GET_BYTES_PER_SAMPLE(pcm_wd_sz)  (((pcm_wd_sz + 15) >> 4) << 1)
+#ifdef CONFIG_PM
+#define GET_USAGE_COUNT(dev) (atomic_read(&dev->power.usage_count))
+#else
+#define GET_USAGE_COUNT(dev) 1
+#endif
 
 int free_stream_context(struct intel_sst_drv *ctx, unsigned int str_id)
 {
@@ -141,15 +146,9 @@ static int sst_power_control(struct device *dev, bool state)
        int ret = 0;
        int usage_count = 0;
 
-#ifdef CONFIG_PM
-       usage_count = atomic_read(&dev->power.usage_count);
-#else
-       usage_count = 1;
-#endif
-
        if (state == true) {
                ret = pm_runtime_get_sync(dev);
-
+               usage_count = GET_USAGE_COUNT(dev);
                dev_dbg(ctx->dev, "Enable: pm usage count: %d\n", usage_count);
                if (ret < 0) {
                        dev_err(ctx->dev, "Runtime get failed with err: %d\n", ret);
@@ -164,6 +163,7 @@ static int sst_power_control(struct device *dev, bool state)
                        }
                }
        } else {
+               usage_count = GET_USAGE_COUNT(dev);
                dev_dbg(ctx->dev, "Disable: pm usage count: %d\n", usage_count);
                return sst_pm_runtime_put(ctx);
        }
index 6768e4f..30d0109 100644 (file)
@@ -100,12 +100,13 @@ config SND_OMAP_SOC_OMAP_TWL4030
 
 config SND_OMAP_SOC_OMAP_ABE_TWL6040
        tristate "SoC Audio support for OMAP boards using ABE and twl6040 codec"
-       depends on TWL6040_CORE && SND_OMAP_SOC && (ARCH_OMAP4 || SOC_OMAP5 || COMPILE_TEST)
+       depends on TWL6040_CORE && SND_OMAP_SOC
+       depends on ARCH_OMAP4 || (SOC_OMAP5 && MFD_PALMAS) || COMPILE_TEST
        select SND_OMAP_SOC_DMIC
        select SND_OMAP_SOC_MCPDM
        select SND_SOC_TWL6040
        select SND_SOC_DMIC
-       select COMMON_CLK_PALMAS if MFD_PALMAS
+       select COMMON_CLK_PALMAS if (SOC_OMAP5 && MFD_PALMAS)
        help
          Say Y if you want to add support for SoC audio on OMAP boards using
          ABE and twl6040 codec. This driver currently supports:
index 5f58e4f..b07f183 100644 (file)
@@ -6,12 +6,10 @@ config SND_SOC_QCOM
 
 config SND_SOC_LPASS_CPU
        tristate
-       depends on SND_SOC_QCOM
        select REGMAP_MMIO
 
 config SND_SOC_LPASS_PLATFORM
        tristate
-       depends on SND_SOC_QCOM
        select REGMAP_MMIO
 
 config SND_SOC_STORM
index 158204d..b6c12dc 100644 (file)
@@ -1811,6 +1811,7 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
                                           size_t count, loff_t *ppos)
 {
        struct snd_soc_dapm_widget *w = file->private_data;
+       struct snd_soc_card *card = w->dapm->card;
        char *buf;
        int in, out;
        ssize_t ret;
@@ -1820,6 +1821,8 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
        if (!buf)
                return -ENOMEM;
 
+       mutex_lock(&card->dapm_mutex);
+
        /* Supply widgets are not handled by is_connected_{input,output}_ep() */
        if (w->is_supply) {
                in = 0;
@@ -1866,6 +1869,8 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
                                        p->sink->name);
        }
 
+       mutex_unlock(&card->dapm_mutex);
+
        ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
 
        kfree(buf);
@@ -2140,11 +2145,15 @@ static ssize_t dapm_widget_show(struct device *dev,
        struct snd_soc_pcm_runtime *rtd = dev_get_drvdata(dev);
        int i, count = 0;
 
+       mutex_lock(&rtd->card->dapm_mutex);
+
        for (i = 0; i < rtd->num_codecs; i++) {
                struct snd_soc_codec *codec = rtd->codec_dais[i]->codec;
                count += dapm_widget_show_codec(codec, buf + count);
        }
 
+       mutex_unlock(&rtd->card->dapm_mutex);
+
        return count;
 }
 
@@ -3100,16 +3109,10 @@ snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm,
        }
 
        prefix = soc_dapm_prefix(dapm);
-       if (prefix) {
+       if (prefix)
                w->name = kasprintf(GFP_KERNEL, "%s %s", prefix, widget->name);
-               if (widget->sname)
-                       w->sname = kasprintf(GFP_KERNEL, "%s %s", prefix,
-                                            widget->sname);
-       } else {
+       else
                w->name = kasprintf(GFP_KERNEL, "%s", widget->name);
-               if (widget->sname)
-                       w->sname = kasprintf(GFP_KERNEL, "%s", widget->sname);
-       }
        if (w->name == NULL) {
                kfree(w);
                return NULL;
@@ -3557,7 +3560,7 @@ int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card)
                                break;
                        }
 
-                       if (!w->sname || !strstr(w->sname, dai_w->name))
+                       if (!w->sname || !strstr(w->sname, dai_w->sname))
                                continue;
 
                        if (dai_w->id == snd_soc_dapm_dai_in) {
index 1fab977..0450593 100644 (file)
@@ -638,7 +638,7 @@ int snd_usb_autoresume(struct snd_usb_audio *chip)
        int err = -ENODEV;
 
        down_read(&chip->shutdown_rwsem);
-       if (chip->probing && chip->in_pm)
+       if (chip->probing || chip->in_pm)
                err = 0;
        else if (!chip->shutdown)
                err = usb_autopm_get_interface(chip->pm_intf);
index 8461d6b..204cc07 100644 (file)
@@ -186,12 +186,8 @@ static int line6_stream_start(struct snd_line6_pcm *line6pcm, int direction,
        int ret = 0;
 
        spin_lock_irqsave(&pstr->lock, flags);
-       if (!test_and_set_bit(type, &pstr->running)) {
-               if (pstr->active_urbs || pstr->unlink_urbs) {
-                       ret = -EBUSY;
-                       goto error;
-               }
-
+       if (!test_and_set_bit(type, &pstr->running) &&
+           !(pstr->active_urbs || pstr->unlink_urbs)) {
                pstr->count = 0;
                /* Submit all currently available URBs */
                if (direction == SNDRV_PCM_STREAM_PLAYBACK)
@@ -199,7 +195,6 @@ static int line6_stream_start(struct snd_line6_pcm *line6pcm, int direction,
                else
                        ret = line6_submit_audio_in_all_urbs(line6pcm);
        }
- error:
        if (ret < 0)
                clear_bit(type, &pstr->running);
        spin_unlock_irqrestore(&pstr->lock, flags);
index e5000da..6a803ef 100644 (file)
@@ -341,6 +341,20 @@ static const struct usbmix_name_map scms_usb3318_map[] = {
        { 0 }
 };
 
+/* Bose companion 5, the dB conversion factor is 16 instead of 256 */
+static struct usbmix_dB_map bose_companion5_dB = {-5006, -6};
+static struct usbmix_name_map bose_companion5_map[] = {
+       { 3, NULL, .dB = &bose_companion5_dB },
+       { 0 }   /* terminator */
+};
+
+/* Dragonfly DAC 1.2, the dB conversion factor is 1 instead of 256 */
+static struct usbmix_dB_map dragonfly_1_2_dB = {0, 5000};
+static struct usbmix_name_map dragonfly_1_2_map[] = {
+       { 7, NULL, .dB = &dragonfly_1_2_dB },
+       { 0 }   /* terminator */
+};
+
 /*
  * Control map entries
  */
@@ -451,6 +465,16 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
                .id = USB_ID(0x25c4, 0x0003),
                .map = scms_usb3318_map,
        },
+       {
+               /* Bose Companion 5 */
+               .id = USB_ID(0x05a7, 0x1020),
+               .map = bose_companion5_map,
+       },
+       {
+               /* Dragonfly DAC 1.2 */
+               .id = USB_ID(0x21b4, 0x0081),
+               .map = dragonfly_1_2_map,
+       },
        { 0 } /* terminator */
 };
 
index 2f6d3e9..e475665 100644 (file)
@@ -2512,6 +2512,74 @@ YAMAHA_DEVICE(0x7010, "UB99"),
        }
 },
 
+/* Steinberg devices */
+{
+       /* Steinberg MI2 */
+       USB_DEVICE_VENDOR_SPEC(0x0a4e, 0x2040),
+       .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+               .ifnum = QUIRK_ANY_INTERFACE,
+               .type = QUIRK_COMPOSITE,
+               .data = & (const struct snd_usb_audio_quirk[]) {
+                       {
+                               .ifnum = 0,
+                               .type = QUIRK_AUDIO_STANDARD_INTERFACE
+                       },
+                       {
+                               .ifnum = 1,
+                               .type = QUIRK_AUDIO_STANDARD_INTERFACE
+                       },
+                       {
+                               .ifnum = 2,
+                               .type = QUIRK_AUDIO_STANDARD_INTERFACE
+                       },
+                       {
+                               .ifnum = 3,
+                               .type = QUIRK_MIDI_FIXED_ENDPOINT,
+                               .data = &(const struct snd_usb_midi_endpoint_info) {
+                                       .out_cables = 0x0001,
+                                       .in_cables  = 0x0001
+                               }
+                       },
+                       {
+                               .ifnum = -1
+                       }
+               }
+       }
+},
+{
+       /* Steinberg MI4 */
+       USB_DEVICE_VENDOR_SPEC(0x0a4e, 0x4040),
+       .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+               .ifnum = QUIRK_ANY_INTERFACE,
+               .type = QUIRK_COMPOSITE,
+               .data = & (const struct snd_usb_audio_quirk[]) {
+                       {
+                               .ifnum = 0,
+                               .type = QUIRK_AUDIO_STANDARD_INTERFACE
+                       },
+                       {
+                               .ifnum = 1,
+                               .type = QUIRK_AUDIO_STANDARD_INTERFACE
+                       },
+                       {
+                               .ifnum = 2,
+                               .type = QUIRK_AUDIO_STANDARD_INTERFACE
+                       },
+                       {
+                               .ifnum = 3,
+                               .type = QUIRK_MIDI_FIXED_ENDPOINT,
+                               .data = &(const struct snd_usb_midi_endpoint_info) {
+                                       .out_cables = 0x0001,
+                                       .in_cables  = 0x0001
+                               }
+                       },
+                       {
+                               .ifnum = -1
+                       }
+               }
+       }
+},
+
 /* TerraTec devices */
 {
        USB_DEVICE_VENDOR_SPEC(0x0ccd, 0x0012),
index 754e689..00ebc0c 100644 (file)
@@ -1268,6 +1268,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
                        return SNDRV_PCM_FMTBIT_DSD_U32_BE;
                break;
 
+       case USB_ID(0x20b1, 0x000a): /* Gustard DAC-X20U */
        case USB_ID(0x20b1, 0x2009): /* DIYINHK DSD DXD 384kHz USB to I2S/DSD */
        case USB_ID(0x20b1, 0x2023): /* JLsounds I2SoverUSB */
                if (fp->altsetting == 3)
index 995b7a8..658b0a8 100644 (file)
@@ -45,7 +45,7 @@ static struct rb_node *hists__filter_entries(struct rb_node *nd,
 
 static bool hist_browser__has_filter(struct hist_browser *hb)
 {
-       return hists__has_filter(hb->hists) || hb->min_pcnt;
+       return hists__has_filter(hb->hists) || hb->min_pcnt || symbol_conf.has_filter;
 }
 
 static int hist_browser__get_folding(struct hist_browser *browser)
index 85b5238..2babdda 100644 (file)
@@ -7,11 +7,15 @@
 
 static unsigned long flag = PERF_FLAG_FD_CLOEXEC;
 
+#ifdef __GLIBC_PREREQ
+#if !__GLIBC_PREREQ(2, 6)
 int __weak sched_getcpu(void)
 {
        errno = ENOSYS;
        return -1;
 }
+#endif
+#endif
 
 static int perf_flag_probe(void)
 {
index 201f6c4..99378a5 100644 (file)
@@ -1893,6 +1893,8 @@ int setup_intlist(struct intlist **list, const char *list_str,
                pr_err("problems parsing %s list\n", list_name);
                return -1;
        }
+
+       symbol_conf.has_filter = true;
        return 0;
 }
 
index 0956150..be02179 100644 (file)
@@ -105,7 +105,8 @@ struct symbol_conf {
                        demangle_kernel,
                        filter_relative,
                        show_hist_headers,
-                       branch_callstack;
+                       branch_callstack,
+                       has_filter;
        const char      *vmlinux_name,
                        *kallsyms_name,
                        *source_prefix,