aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinux Build Service Account <lnxbuild@localhost>2019-10-13 05:45:39 -0700
committerLinux Build Service Account <lnxbuild@localhost>2019-10-13 05:45:39 -0700
commit7befd0b86ee75ba7e7ff7f20df1e55d25daba7a8 (patch)
treef6c79b2d424ded53823160b89e041f686ecd01b6
parentb6bcdb74c182560e136ea585e0ba28e12d0b1b6c (diff)
parent3842f7d8bedf92b0985c996206e235a212f853af (diff)
Merge 3842f7d8bedf92b0985c996206e235a212f853af on remote branchLA.UM.7.11.r1-04000-NICOBAR.0
Change-Id: Iaa900edd6040e42b220b17d60b09df3fe0cd72c3
-rw-r--r--Documentation/devicetree/bindings/sound/wcd_codec.txt19
-rw-r--r--arch/arm64/boot/dts/qcom/qcs405-wsa881x.dtsi6
-rw-r--r--arch/arm64/boot/dts/qcom/sm6150-audio-overlay.dtsi4
-rw-r--r--arch/arm64/boot/dts/qcom/trinket-audio-overlay.dtsi4
-rw-r--r--drivers/gpu/msm/adreno_ringbuffer.c56
-rw-r--r--drivers/gpu/msm/adreno_ringbuffer.h12
-rw-r--r--drivers/gpu/msm/kgsl.h4
-rw-r--r--drivers/gpu/msm/kgsl_drawobj.c24
-rw-r--r--drivers/gpu/msm/kgsl_iommu.c60
-rw-r--r--drivers/iommu/arm-smmu.c77
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c59
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.h5
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp.c60
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp.h5
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp48.c6
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c15
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c36
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_util.h2
-rw-r--r--drivers/media/platform/msm/npu/npu_hw_access.c2
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa.c4
-rw-r--r--drivers/soc/qcom/icnss_qmi.c2
-rw-r--r--drivers/soc/qcom/mem-offline.c3
-rw-r--r--drivers/soc/qcom/qmi_interface.c42
-rw-r--r--drivers/usb/dwc3/core.c8
-rw-r--r--drivers/usb/dwc3/core.h1
-rw-r--r--drivers/usb/dwc3/debug.h20
-rw-r--r--drivers/usb/dwc3/debug_ipc.c46
-rw-r--r--drivers/usb/dwc3/gadget.c14
-rw-r--r--include/linux/soc/qcom/qmi.h4
-rw-r--r--include/uapi/media/msmb_isp.h20
-rw-r--r--include/uapi/sound/compress_params.h5
-rw-r--r--kernel/sched/walt.c9
-rw-r--r--mm/memory.c21
-rw-r--r--mm/page_owner.c18
34 files changed, 520 insertions, 153 deletions
diff --git a/Documentation/devicetree/bindings/sound/wcd_codec.txt b/Documentation/devicetree/bindings/sound/wcd_codec.txt
index 7f3183e8a97f..75eb86009ae0 100644
--- a/Documentation/devicetree/bindings/sound/wcd_codec.txt
+++ b/Documentation/devicetree/bindings/sound/wcd_codec.txt
@@ -460,6 +460,16 @@ Required properties:
required to be configured to receive interrupts
in BCL block of WSA macro
+WSA slave device as child of Bolero codec
+
+Required properties:
+ - compatible = "qcom,wsa881x";
+ - reg: Specifies the WSA slave device base address.
+ - qcom,spkr-sd-n-gpio: speaker reset gpio
+
+Optional properties:
+ - qcom,bolero-handle: phandle to bolero codec
+
Example:
&bolero {
@@ -471,6 +481,15 @@ Example:
<&clock_audio_wsa_2 0>;
qcom,wsa-swr-gpios = &wsa_swr_gpios;
qcom,wsa-bcl-pmic-params = /bits/ 8 <0x00 0x00 0x1E>;
+ swr_0: wsa_swr_master {
+ compatible = "qcom,swr-mstr";
+ wsa881x_1: wsa881x@20170212 {
+ compatible = "qcom,wsa881x";
+ reg = <0x00 0x20170212>;
+ qcom,spkr-sd-n-gpio = <&tlmm 80 0>;
+ qcom,bolero-handle = <&bolero>;
+ };
+ };
};
};
diff --git a/arch/arm64/boot/dts/qcom/qcs405-wsa881x.dtsi b/arch/arm64/boot/dts/qcom/qcs405-wsa881x.dtsi
index cbcfc52bea80..4b421f592d64 100644
--- a/arch/arm64/boot/dts/qcom/qcs405-wsa881x.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcs405-wsa881x.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -38,24 +38,28 @@
compatible = "qcom,wsa881x";
reg = <0x0 0x20170211>;
qcom,spkr-sd-n-node = <&wsa_spkr_en_1_2>;
+ qcom,bolero-handle = <&bolero>;
};
wsa881x_0212: wsa881x@20170212 {
compatible = "qcom,wsa881x";
reg = <0x0 0x20170212>;
qcom,spkr-sd-n-node = <&wsa_spkr_en_1_2>;
+ qcom,bolero-handle = <&bolero>;
};
wsa881x_0213: wsa881x@21170213 {
compatible = "qcom,wsa881x";
reg = <0x0 0x21170213>;
qcom,spkr-sd-n-node = <&wsa_spkr_en_1_2>;
+ qcom,bolero-handle = <&bolero>;
};
wsa881x_0214: wsa881x@21170214 {
compatible = "qcom,wsa881x";
reg = <0x0 0x21170214>;
qcom,spkr-sd-n-node = <&wsa_spkr_en_1_2>;
+ qcom,bolero-handle = <&bolero>;
};
};
};
diff --git a/arch/arm64/boot/dts/qcom/sm6150-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/sm6150-audio-overlay.dtsi
index 4316c8b35d56..c7ad8e030eba 100644
--- a/arch/arm64/boot/dts/qcom/sm6150-audio-overlay.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm6150-audio-overlay.dtsi
@@ -116,24 +116,28 @@
compatible = "qcom,wsa881x";
reg = <0x0 0x20170211>;
qcom,spkr-sd-n-node = <&wsa_spkr_en1>;
+ qcom,bolero-handle = <&bolero>;
};
wsa881x_0212: wsa881x@20170212 {
compatible = "qcom,wsa881x";
reg = <0x0 0x20170212>;
qcom,spkr-sd-n-node = <&wsa_spkr_en2>;
+ qcom,bolero-handle = <&bolero>;
};
wsa881x_0213: wsa881x@21170213 {
compatible = "qcom,wsa881x";
reg = <0x0 0x21170213>;
qcom,spkr-sd-n-node = <&wsa_spkr_en1>;
+ qcom,bolero-handle = <&bolero>;
};
wsa881x_0214: wsa881x@21170214 {
compatible = "qcom,wsa881x";
reg = <0x0 0x21170214>;
qcom,spkr-sd-n-node = <&wsa_spkr_en2>;
+ qcom,bolero-handle = <&bolero>;
};
};
diff --git a/arch/arm64/boot/dts/qcom/trinket-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/trinket-audio-overlay.dtsi
index 2568fab8077b..af8ecc159b6d 100644
--- a/arch/arm64/boot/dts/qcom/trinket-audio-overlay.dtsi
+++ b/arch/arm64/boot/dts/qcom/trinket-audio-overlay.dtsi
@@ -114,24 +114,28 @@
compatible = "qcom,wsa881x";
reg = <0x0 0x20170211>;
qcom,spkr-sd-n-node = <&wsa_spkr_en1>;
+ qcom,bolero-handle = <&bolero>;
};
wsa881x_0212: wsa881x@20170212 {
compatible = "qcom,wsa881x";
reg = <0x0 0x20170212>;
qcom,spkr-sd-n-node = <&wsa_spkr_en1>;
+ qcom,bolero-handle = <&bolero>;
};
wsa881x_0213: wsa881x@21170213 {
compatible = "qcom,wsa881x";
reg = <0x0 0x21170213>;
qcom,spkr-sd-n-node = <&wsa_spkr_en1>;
+ qcom,bolero-handle = <&bolero>;
};
wsa881x_0214: wsa881x@21170214 {
compatible = "qcom,wsa881x";
reg = <0x0 0x21170214>;
qcom,spkr-sd-n-node = <&wsa_spkr_en1>;
+ qcom,bolero-handle = <&bolero>;
};
};
};
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index 596a613eda45..ded68b43a5e9 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -309,6 +309,11 @@ static int _adreno_ringbuffer_probe(struct adreno_device *adreno_dev,
PAGE_SIZE, 0, KGSL_MEMDESC_PRIVILEGED, "pagetable_desc");
if (ret)
return ret;
+
+ /* allocate a chunk of memory to create user profiling IB1s */
+ kgsl_allocate_global(KGSL_DEVICE(adreno_dev), &rb->profile_desc,
+ PAGE_SIZE, KGSL_MEMFLAGS_GPUREADONLY, 0, "profile_desc");
+
return kgsl_allocate_global(KGSL_DEVICE(adreno_dev), &rb->buffer_desc,
KGSL_RB_SIZE, KGSL_MEMFLAGS_GPUREADONLY,
0, "ringbuffer");
@@ -323,7 +328,7 @@ int adreno_ringbuffer_probe(struct adreno_device *adreno_dev, bool nopreempt)
if (!adreno_is_a3xx(adreno_dev)) {
status = kgsl_allocate_global(device, &device->scratch,
- PAGE_SIZE, 0, 0, "scratch");
+ PAGE_SIZE, 0, KGSL_MEMDESC_RANDOM, "scratch");
if (status != 0)
return status;
}
@@ -354,7 +359,7 @@ static void _adreno_ringbuffer_close(struct adreno_device *adreno_dev,
kgsl_free_global(device, &rb->pagetable_desc);
kgsl_free_global(device, &rb->preemption_desc);
-
+ kgsl_free_global(device, &rb->profile_desc);
kgsl_free_global(device, &rb->buffer_desc);
kgsl_del_event_group(&rb->events);
memset(rb, 0, sizeof(struct adreno_ringbuffer));
@@ -855,6 +860,37 @@ static inline int _get_alwayson_counter(struct adreno_device *adreno_dev,
return (unsigned int)(p - cmds);
}
+/* This is the maximum possible size for 64 bit targets */
+#define PROFILE_IB_DWORDS 4
+#define PROFILE_IB_SLOTS (PAGE_SIZE / (PROFILE_IB_DWORDS << 2))
+
+static int set_user_profiling(struct adreno_device *adreno_dev,
+ struct adreno_ringbuffer *rb, u32 *cmds, u64 gpuaddr)
+{
+ int dwords, index = 0;
+ u64 ib_gpuaddr;
+ u32 *ib;
+
+ if (!rb->profile_desc.hostptr)
+ return 0;
+
+ ib = ((u32 *) rb->profile_desc.hostptr) +
+ (rb->profile_index * PROFILE_IB_DWORDS);
+ ib_gpuaddr = rb->profile_desc.gpuaddr +
+ (rb->profile_index * (PROFILE_IB_DWORDS << 2));
+
+ dwords = _get_alwayson_counter(adreno_dev, ib, gpuaddr);
+
+ /* Make an indirect buffer for the request */
+ cmds[index++] = cp_mem_packet(adreno_dev, CP_INDIRECT_BUFFER_PFE, 2, 1);
+ index += cp_gpuaddr(adreno_dev, &cmds[index], ib_gpuaddr);
+ cmds[index++] = dwords;
+
+ rb->profile_index = (rb->profile_index + 1) % PROFILE_IB_SLOTS;
+
+ return index;
+}
+
/* adreno_rindbuffer_submitcmd - submit userspace IBs to the GPU */
int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
struct kgsl_drawobj_cmd *cmdobj,
@@ -955,14 +991,12 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
!adreno_is_a3xx(adreno_dev) &&
(cmdobj->profiling_buf_entry != NULL)) {
user_profiling = true;
- dwords += 6;
/*
- * REG_TO_MEM packet on A5xx and above needs another ordinal.
- * Add 2 more dwords since we do profiling before and after.
+ * User side profiling uses two IB1s, one before with 4 dwords
+ * per INDIRECT_BUFFER_PFE call
*/
- if (!ADRENO_LEGACY_PM4(adreno_dev))
- dwords += 2;
+ dwords += 8;
/*
* we want to use an adreno_submit_time struct to get the
@@ -1021,11 +1055,11 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
}
/*
- * Add cmds to read the GPU ticks at the start of command obj and
+ * Add IB1 to read the GPU ticks at the start of command obj and
* write it into the appropriate command obj profiling buffer offset
*/
if (user_profiling) {
- cmds += _get_alwayson_counter(adreno_dev, cmds,
+ cmds += set_user_profiling(adreno_dev, rb, cmds,
cmdobj->profiling_buffer_gpuaddr +
offsetof(struct kgsl_drawobj_profiling_buffer,
gpu_ticks_submitted));
@@ -1073,11 +1107,11 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
}
/*
- * Add cmds to read the GPU ticks at the end of command obj and
+ * Add IB1 to read the GPU ticks at the end of command obj and
* write it into the appropriate command obj profiling buffer offset
*/
if (user_profiling) {
- cmds += _get_alwayson_counter(adreno_dev, cmds,
+ cmds += set_user_profiling(adreno_dev, rb, cmds,
cmdobj->profiling_buffer_gpuaddr +
offsetof(struct kgsl_drawobj_profiling_buffer,
gpu_ticks_retired));
diff --git a/drivers/gpu/msm/adreno_ringbuffer.h b/drivers/gpu/msm/adreno_ringbuffer.h
index 5b53fcfd21a2..eae211f93dea 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.h
+++ b/drivers/gpu/msm/adreno_ringbuffer.h
@@ -138,6 +138,18 @@ struct adreno_ringbuffer {
enum adreno_rb_starve_states starve_state;
spinlock_t preempt_lock;
bool skip_inline_wptr;
+ /**
+ * @profile_desc: global memory to construct IB1s to do user side
+ * profiling
+ */
+ struct kgsl_memdesc profile_desc;
+ /**
+ * @profile_index: Pointer to the next "slot" in profile_desc for a user
+ * profiling IB1. This allows for PAGE_SIZE / 16 = 256 simultaneous
+ * commands per ringbuffer with user profiling enabled
+ * enough.
+ */
+ u32 profile_index;
};
/* Returns the current ringbuffer */
diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h
index dfb5050c734b..e5e071e03314 100644
--- a/drivers/gpu/msm/kgsl.h
+++ b/drivers/gpu/msm/kgsl.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -201,6 +201,8 @@ struct kgsl_memdesc_ops {
#define KGSL_MEMDESC_CONTIG BIT(8)
/* This is an instruction buffer */
#define KGSL_MEMDESC_UCODE BIT(9)
+/* For global buffers, randomly assign an address from the region */
+#define KGSL_MEMDESC_RANDOM BIT(10)
/**
* struct kgsl_memdesc - GPU memory object descriptor
diff --git a/drivers/gpu/msm/kgsl_drawobj.c b/drivers/gpu/msm/kgsl_drawobj.c
index c927fdccb66d..05c41362e11d 100644
--- a/drivers/gpu/msm/kgsl_drawobj.c
+++ b/drivers/gpu/msm/kgsl_drawobj.c
@@ -598,13 +598,29 @@ static void add_profiling_buffer(struct kgsl_device *device,
return;
}
- cmdobj->profiling_buf_entry = entry;
- if (id != 0)
+ if (!id) {
+ cmdobj->profiling_buffer_gpuaddr = gpuaddr;
+ } else {
+ u64 off = offset + sizeof(struct kgsl_drawobj_profiling_buffer);
+
+ /*
+ * Make sure there is enough room in the object to store the
+ * entire profiling buffer object
+ */
+ if (off < offset || off >= entry->memdesc.size) {
+ dev_err(device->dev,
+ "ignore invalid profile offset ctxt %d id %d offset %lld gpuaddr %llx size %lld\n",
+ drawobj->context->id, id, offset, gpuaddr, size);
+ kgsl_mem_entry_put(entry);
+ return;
+ }
+
cmdobj->profiling_buffer_gpuaddr =
entry->memdesc.gpuaddr + offset;
- else
- cmdobj->profiling_buffer_gpuaddr = gpuaddr;
+ }
+
+ cmdobj->profiling_buf_entry = entry;
}
/**
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index 4afbd3de988b..35895b2f93a1 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -20,6 +20,7 @@
#include <linux/msm_kgsl.h>
#include <linux/ratelimit.h>
#include <linux/of_platform.h>
+#include <linux/random.h>
#include <soc/qcom/scm.h>
#include <soc/qcom/secure_buffer.h>
#include <linux/compat.h>
@@ -90,15 +91,8 @@ static struct kmem_cache *addr_entry_cache;
*
* Here we define an array and a simple allocator to keep track of the currently
* active global entries. Each entry is assigned a unique address inside of a
- * MMU implementation specific "global" region. The addresses are assigned
- * sequentially and never re-used to avoid having to go back and reprogram
- * existing pagetables. The entire list of active entries are mapped and
- * unmapped into every new pagetable as it is created and destroyed.
- *
- * Because there are relatively few entries and they are defined at boot time we
- * don't need to go over the top to define a dynamic allocation scheme. It will
- * be less wasteful to pick a static number with a little bit of growth
- * potential.
+ * MMU implementation specific "global" region. We use a simple bitmap based
+ * allocator for the region to allow for both fixed and dynamic addressing.
*/
#define GLOBAL_PT_ENTRIES 32
@@ -108,13 +102,17 @@ struct global_pt_entry {
char name[32];
};
+#define GLOBAL_MAP_PAGES (KGSL_IOMMU_GLOBAL_MEM_SIZE >> PAGE_SHIFT)
+
static struct global_pt_entry global_pt_entries[GLOBAL_PT_ENTRIES];
+static DECLARE_BITMAP(global_map, GLOBAL_MAP_PAGES);
+
static int secure_global_size;
static int global_pt_count;
-uint64_t global_pt_alloc;
static struct kgsl_memdesc gpu_qdss_desc;
static struct kgsl_memdesc gpu_qtimer_desc;
static unsigned int context_bank_number;
+
void kgsl_print_global_pt_entries(struct seq_file *s)
{
int i;
@@ -209,6 +207,12 @@ static void kgsl_iommu_remove_global(struct kgsl_mmu *mmu,
for (i = 0; i < global_pt_count; i++) {
if (global_pt_entries[i].memdesc == memdesc) {
+ u64 offset = memdesc->gpuaddr -
+ KGSL_IOMMU_GLOBAL_MEM_BASE(mmu);
+
+ bitmap_clear(global_map, offset >> PAGE_SHIFT,
+ kgsl_memdesc_footprint(memdesc) >> PAGE_SHIFT);
+
memdesc->gpuaddr = 0;
memdesc->priv &= ~KGSL_MEMDESC_GLOBAL;
global_pt_entries[i].memdesc = NULL;
@@ -220,19 +224,43 @@ static void kgsl_iommu_remove_global(struct kgsl_mmu *mmu,
static void kgsl_iommu_add_global(struct kgsl_mmu *mmu,
struct kgsl_memdesc *memdesc, const char *name)
{
+ u32 bit, start = 0;
+ u64 size = kgsl_memdesc_footprint(memdesc);
+
if (memdesc->gpuaddr != 0)
return;
- /*Check that we can fit the global allocations */
- if (WARN_ON(global_pt_count >= GLOBAL_PT_ENTRIES) ||
- WARN_ON((global_pt_alloc + memdesc->size) >=
- KGSL_IOMMU_GLOBAL_MEM_SIZE))
+ if (WARN_ON(global_pt_count >= GLOBAL_PT_ENTRIES))
+ return;
+
+ if (WARN_ON(size > KGSL_IOMMU_GLOBAL_MEM_SIZE))
+ return;
+
+ if (memdesc->priv & KGSL_MEMDESC_RANDOM) {
+ u32 range = GLOBAL_MAP_PAGES - (size >> PAGE_SHIFT);
+
+ start = get_random_int() % range;
+ }
+
+ while (start >= 0) {
+ bit = bitmap_find_next_zero_area(global_map, GLOBAL_MAP_PAGES,
+ start, size >> PAGE_SHIFT, 0);
+
+ if (bit < GLOBAL_MAP_PAGES)
+ break;
+
+ start--;
+ }
+
+ if (WARN_ON(start < 0))
return;
- memdesc->gpuaddr = KGSL_IOMMU_GLOBAL_MEM_BASE(mmu) + global_pt_alloc;
+ memdesc->gpuaddr =
+ KGSL_IOMMU_GLOBAL_MEM_BASE(mmu) + (bit << PAGE_SHIFT);
+
+ bitmap_set(global_map, bit, size >> PAGE_SHIFT);
memdesc->priv |= KGSL_MEMDESC_GLOBAL;
- global_pt_alloc += kgsl_memdesc_footprint(memdesc);
global_pt_entries[global_pt_count].memdesc = memdesc;
strlcpy(global_pt_entries[global_pt_count].name, name,
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index e870b6496dc4..03ab8b9bc7c9 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -1163,10 +1163,32 @@ static struct qsmmuv500_tbu_device *qsmmuv500_find_tbu(
return NULL;
}
+static void arm_smmu_testbus_dump(struct arm_smmu_device *smmu, u16 sid)
+{
+ if (smmu->model == QCOM_SMMUV500 &&
+ IS_ENABLED(CONFIG_ARM_SMMU_TESTBUS_DUMP)) {
+ struct qsmmuv500_archdata *data;
+ struct qsmmuv500_tbu_device *tbu;
+
+ data = smmu->archdata;
+ tbu = qsmmuv500_find_tbu(smmu, sid);
+ spin_lock(&testbus_lock);
+ if (tbu)
+ arm_smmu_debug_dump_tbu_testbus(tbu->dev,
+ tbu->base,
+ data->tcu_base,
+ tbu_testbus_sel,
+ data->testbus_version);
+
+ arm_smmu_debug_dump_tcu_testbus(smmu->dev, ARM_SMMU_GR0(smmu),
+ data->tcu_base,
+ tcu_testbus_sel);
+ spin_unlock(&testbus_lock);
+ }
+}
/* Wait for any pending TLB invalidations to complete */
static int __arm_smmu_tlb_sync(struct arm_smmu_device *smmu,
- void __iomem *sync, void __iomem *status,
- struct arm_smmu_domain *smmu_domain)
+ void __iomem *sync, void __iomem *status)
{
unsigned int spin_cnt, delay;
u32 sync_inv_ack, tbu_pwr_status, sync_inv_progress;
@@ -1190,46 +1212,6 @@ static int __arm_smmu_tlb_sync(struct arm_smmu_device *smmu,
dev_err_ratelimited(smmu->dev,
"TLB sync timed out -- SMMU may be deadlocked ack 0x%x pwr 0x%x sync and invalidation progress 0x%x\n",
sync_inv_ack, tbu_pwr_status, sync_inv_progress);
-
- if (smmu->model == QCOM_SMMUV500 &&
- IS_ENABLED(CONFIG_ARM_SMMU_TESTBUS_DUMP)) {
-
- struct qsmmuv500_archdata *data;
-
- spin_lock(&testbus_lock);
- data = smmu->archdata;
-
- if (smmu_domain) {
- struct qsmmuv500_tbu_device *tbu;
- int i, idx;
- u16 sid = U16_MAX;
- struct device *dev = smmu->dev;
- struct iommu_fwspec *fwspec = dev->iommu_fwspec;
- struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
- struct arm_smmu_smr *smrs = smmu->smrs;
-
- mutex_lock(&smmu->stream_map_mutex);
- for_each_cfg_sme(fwspec, i, idx) {
- if (smmu->s2crs[idx].cbndx == cfg->cbndx) {
- sid = smrs[idx].id;
- break;
- }
- }
- mutex_unlock(&smmu->stream_map_mutex);
-
- tbu = qsmmuv500_find_tbu(smmu, sid);
- if (tbu)
- arm_smmu_debug_dump_tbu_testbus(tbu->dev,
- tbu->base, data->tcu_base,
- tbu_testbus_sel,
- data->testbus_version);
- }
- arm_smmu_debug_dump_tcu_testbus(smmu->dev, ARM_SMMU_GR0(smmu),
- data->tcu_base, tcu_testbus_sel);
- spin_unlock(&testbus_lock);
- }
-
- BUG_ON(IS_ENABLED(CONFIG_IOMMU_TLBSYNC_DEBUG));
return -EINVAL;
}
@@ -1240,9 +1222,12 @@ static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
spin_lock_irqsave(&smmu->global_sync_lock, flags);
if (__arm_smmu_tlb_sync(smmu, base + ARM_SMMU_GR0_sTLBGSYNC,
- base + ARM_SMMU_GR0_sTLBGSTATUS, NULL))
+ base + ARM_SMMU_GR0_sTLBGSTATUS)) {
dev_err_ratelimited(smmu->dev,
"TLB global sync failed!\n");
+ arm_smmu_testbus_dump(smmu, U16_MAX);
+ BUG_ON(IS_ENABLED(CONFIG_IOMMU_TLBSYNC_DEBUG));
+ }
spin_unlock_irqrestore(&smmu->global_sync_lock, flags);
}
@@ -1254,6 +1239,7 @@ static void arm_smmu_tlb_sync_context(void *cookie)
struct arm_smmu_device *smmu = smmu_domain->smmu;
struct device *dev = smmu_domain->dev;
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+ struct iommu_fwspec *fwspec = dev->iommu_fwspec;
void __iomem *base = ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
unsigned long flags;
size_t ret;
@@ -1274,11 +1260,14 @@ static void arm_smmu_tlb_sync_context(void *cookie)
spin_lock_irqsave(&smmu_domain->sync_lock, flags);
if (__arm_smmu_tlb_sync(smmu, base + ARM_SMMU_CB_TLBSYNC,
- base + ARM_SMMU_CB_TLBSTATUS, smmu_domain))
+ base + ARM_SMMU_CB_TLBSTATUS)) {
dev_err_ratelimited(smmu->dev,
"TLB sync on cb%d failed for device %s\n",
smmu_domain->cfg.cbndx,
dev_name(smmu_domain->dev));
+ arm_smmu_testbus_dump(smmu, (u16)fwspec->ids[0]);
+ BUG_ON(IS_ENABLED(CONFIG_IOMMU_TLBSYNC_DEBUG));
+ }
spin_unlock_irqrestore(&smmu_domain->sync_lock, flags);
trace_tlbi_end(dev, ktime_us_delta(ktime_get(), cur));
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c b/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c
index 34235d41ec84..d53c81016e1c 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -755,6 +755,62 @@ static int msm_isp_buf_divert(struct msm_isp_buf_mgr *buf_mgr,
return 0;
}
+static int msm_isp_buf_err(struct msm_isp_buf_mgr *buf_mgr,
+ uint32_t bufq_handle, uint32_t buf_index,
+ struct timeval *tv, uint32_t frame_id, uint32_t output_format)
+{
+ int rc = 0;
+ unsigned long flags;
+ struct msm_isp_bufq *bufq = NULL;
+ struct msm_isp_buffer *buf_info = NULL;
+ enum msm_isp_buffer_state state;
+
+ bufq = msm_isp_get_bufq(buf_mgr, bufq_handle);
+ if (!bufq) {
+ pr_err("Invalid bufq\n");
+ return -EINVAL;
+ }
+
+ buf_info = msm_isp_get_buf_ptr(buf_mgr, bufq_handle, buf_index);
+ if (!buf_info) {
+ pr_err("%s: buf not found\n", __func__);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&bufq->bufq_lock, flags);
+ state = buf_info->state;
+
+ if (BUF_SRC(bufq->stream_id) == MSM_ISP_BUFFER_SRC_HAL) {
+ if (state == MSM_ISP_BUFFER_STATE_DEQUEUED) {
+ buf_info->state = MSM_ISP_BUFFER_STATE_DISPATCHED;
+ spin_unlock_irqrestore(&bufq->bufq_lock, flags);
+ buf_mgr->vb2_ops->buf_error(buf_info->vb2_v4l2_buf,
+ bufq->session_id, bufq->stream_id,
+ frame_id, tv, output_format);
+ } else {
+ spin_unlock_irqrestore(&bufq->bufq_lock, flags);
+ }
+ goto done;
+ }
+
+ /*
+ * For native buffer put the diverted buffer back to queue since caller
+ * is not going to send it to CPP, this is error case like
+ * drop_frame/empty_buffer
+ */
+ if (state == MSM_ISP_BUFFER_STATE_DIVERTED) {
+ buf_info->state = MSM_ISP_BUFFER_STATE_PREPARED;
+ rc = msm_isp_put_buf_unsafe(buf_mgr, buf_info->bufq_handle,
+ buf_info->buf_idx);
+ if (rc < 0)
+ pr_err("%s: Buf put failed\n", __func__);
+ }
+ spin_unlock_irqrestore(&bufq->bufq_lock, flags);
+done:
+ return rc;
+}
+
+
static int msm_isp_buf_done(struct msm_isp_buf_mgr *buf_mgr,
uint32_t bufq_handle, uint32_t buf_index,
struct timeval *tv, uint32_t frame_id, uint32_t output_format)
@@ -1503,6 +1559,7 @@ static struct msm_isp_buf_ops isp_buf_ops = {
.buf_mgr_debug = msm_isp_buf_mgr_debug,
.get_bufq = msm_isp_get_bufq,
.buf_divert = msm_isp_buf_divert,
+ .buf_err = msm_isp_buf_err,
};
int msm_isp_create_isp_buf_mgr(
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.h b/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.h
index e88534c49783..de985839141f 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -180,6 +180,9 @@ struct msm_isp_buf_ops {
int (*buf_divert)(struct msm_isp_buf_mgr *buf_mgr,
uint32_t bufq_handle, uint32_t buf_index,
struct timeval *tv, uint32_t frame_id);
+ int (*buf_err)(struct msm_isp_buf_mgr *buf_mgr,
+ uint32_t bufq_handle, uint32_t buf_index,
+ struct timeval *tv, uint32_t frame_id, uint32_t output_format);
};
struct msm_isp_buf_mgr {
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp.c
index 097046d30ff0..c159b144c42d 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp.c
@@ -357,26 +357,48 @@ static long msm_isp_dqevent(struct file *file, struct v4l2_fh *vfh, void *arg)
file->f_flags & O_NONBLOCK);
if (rc)
return rc;
- event_data = (struct msm_isp_event_data *)
- isp_event.u.data;
- isp_event_user = (struct v4l2_event *)arg;
- memcpy(isp_event_user, &isp_event,
+ if (isp_event.type == ISP_EVENT_SOF_UPDATE_NANOSEC) {
+ struct msm_isp_event_data_nanosec *event_data_nanosec;
+ struct msm_isp_event_data_nanosec
+ *event_data_nanosec_user;
+
+ event_data_nanosec =
+ (struct msm_isp_event_data_nanosec *)
+ isp_event.u.data;
+ isp_event_user = (struct v4l2_event *)arg;
+ memcpy(isp_event_user, &isp_event,
sizeof(*isp_event_user));
- event_data32 = (struct msm_isp_event_data32 *)
- isp_event_user->u.data;
- memset(event_data32, 0,
- sizeof(struct msm_isp_event_data32));
- event_data32->timestamp.tv_sec =
- event_data->timestamp.tv_sec;
- event_data32->timestamp.tv_usec =
- event_data->timestamp.tv_usec;
- event_data32->mono_timestamp.tv_sec =
- event_data->mono_timestamp.tv_sec;
- event_data32->mono_timestamp.tv_usec =
- event_data->mono_timestamp.tv_usec;
- event_data32->frame_id = event_data->frame_id;
- memcpy(&(event_data32->u), &(event_data->u),
- sizeof(event_data32->u));
+ event_data_nanosec_user =
+ (struct msm_isp_event_data_nanosec *)
+ isp_event_user->u.data;
+ memset(event_data_nanosec_user, 0,
+ sizeof(struct msm_isp_event_data_nanosec));
+ event_data_nanosec_user->nano_timestamp =
+ event_data_nanosec->nano_timestamp;
+ event_data_nanosec_user->frame_id =
+ event_data_nanosec->frame_id;
+ } else {
+ event_data = (struct msm_isp_event_data *)
+ isp_event.u.data;
+ isp_event_user = (struct v4l2_event *)arg;
+ memcpy(isp_event_user, &isp_event,
+ sizeof(*isp_event_user));
+ event_data32 = (struct msm_isp_event_data32 *)
+ isp_event_user->u.data;
+ memset(event_data32, 0,
+ sizeof(struct msm_isp_event_data32));
+ event_data32->timestamp.tv_sec =
+ event_data->timestamp.tv_sec;
+ event_data32->timestamp.tv_usec =
+ event_data->timestamp.tv_usec;
+ event_data32->mono_timestamp.tv_sec =
+ event_data->mono_timestamp.tv_sec;
+ event_data32->mono_timestamp.tv_usec =
+ event_data->mono_timestamp.tv_usec;
+ event_data32->frame_id = event_data->frame_id;
+ memcpy(&(event_data32->u), &(event_data->u),
+ sizeof(event_data32->u));
+ }
} else {
rc = v4l2_event_dequeue(vfh, arg,
file->f_flags & O_NONBLOCK);
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
index 233e61f45880..bd54899f6454 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
@@ -131,6 +131,8 @@ struct msm_isp_timestamp {
struct timeval vt_time;
/*Wall clock for userspace event*/
struct timeval event_time;
+ /* event time in nanosec*/
+ uint64_t buf_time_ns;
};
struct msm_vfe_irq_ops {
@@ -874,6 +876,9 @@ struct vfe_device {
/* irq info */
uint32_t dual_irq_mask;
uint32_t irq_sof_id;
+
+ /* nano sec timestamp */
+ uint32_t nanosec_ts_enable;
};
struct vfe_parent_device {
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp48.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp48.c
index c5347780e907..0126c3719ca7 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp48.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp48.c
@@ -30,7 +30,7 @@
#define MSM_VFE48_BUS_CLIENT_INIT 0xABAB
#define VFE48_STATS_BURST_LEN 3
#define VFE48_UB_SIZE_VFE 2048 /* 2048 * 256 bits = 64KB */
-#define VFE48_UB_STATS_SIZE 288
+#define VFE48_UB_STATS_SIZE 352
#define MSM_ISP48_TOTAL_IMAGE_UB_VFE (VFE48_UB_SIZE_VFE - VFE48_UB_STATS_SIZE)
@@ -321,8 +321,8 @@ void msm_vfe48_stats_cfg_ub(struct vfe_device *vfe_dev)
int i;
uint32_t ub_offset = 0, stats_burst_len;
uint32_t ub_size[VFE47_NUM_STATS_TYPE] = {
- 32, /* MSM_ISP_STATS_HDR_BE */
- 32, /* MSM_ISP_STATS_BG */
+ 64, /* MSM_ISP_STATS_HDR_BE */
+ 64, /* MSM_ISP_STATS_BG */
32, /* MSM_ISP_STATS_BF */
32, /* MSM_ISP_STATS_HDR_BHIST */
32, /* MSM_ISP_STATS_RS */
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
index fd14672a89cc..e2b1fa282f92 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
@@ -1150,6 +1150,19 @@ void msm_isp_notify(struct vfe_device *vfe_dev, uint32_t event_type,
break;
}
+ if ((vfe_dev->nanosec_ts_enable) &&
+ (event_type == ISP_EVENT_SOF) &&
+ (frame_src == VFE_PIX_0)) {
+ struct msm_isp_event_data_nanosec event_data_nanosec;
+
+ event_data_nanosec.frame_id =
+ vfe_dev->axi_data.src_info[frame_src].frame_id;
+ event_data_nanosec.nano_timestamp = ts->buf_time_ns;
+ msm_isp_send_event_update_nanosec(vfe_dev,
+ ISP_EVENT_SOF_UPDATE_NANOSEC,
+ &event_data_nanosec);
+ }
+
event_data.frame_id = vfe_dev->axi_data.src_info[frame_src].frame_id;
event_data.timestamp = ts->event_time;
event_data.mono_timestamp = ts->buf_time;
@@ -3656,7 +3669,7 @@ static int msm_isp_return_empty_buffer(struct vfe_device *vfe_dev,
buf->buf_debug.put_state[buf->buf_debug.put_state_last] =
MSM_ISP_BUFFER_STATE_DROP_REG;
buf->buf_debug.put_state_last ^= 1;
- rc = vfe_dev->buf_mgr->ops->buf_done(vfe_dev->buf_mgr,
+ rc = vfe_dev->buf_mgr->ops->buf_err(vfe_dev->buf_mgr,
buf->bufq_handle, buf->buf_idx,
&timestamp.buf_time, frame_id,
stream_info->runtime_output_format);
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
index e38cf5eb1081..c1bc1c2f0177 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
@@ -218,6 +218,8 @@ void msm_isp_get_timestamp(struct msm_isp_timestamp *time_stamp,
get_monotonic_boottime(&ts);
time_stamp->buf_time.tv_sec = ts.tv_sec;
time_stamp->buf_time.tv_usec = ts.tv_nsec/1000;
+ time_stamp->buf_time_ns =
+ ((uint64_t)ts.tv_sec * 1000000000) + ts.tv_nsec;
}
}
@@ -265,6 +267,9 @@ static inline u32 msm_isp_evt_mask_to_isp_event(u32 evt_mask)
case ISP_EVENT_MASK_INDEX_BUF_FATAL_ERROR:
evt_id = ISP_EVENT_BUF_FATAL_ERROR;
break;
+ case ISP_EVENT_MASK_INDEX_SOF_UPDATE_NANOSEC:
+ evt_id = ISP_EVENT_SOF_UPDATE_NANOSEC;
+ break;
default:
evt_id = ISP_EVENT_SUBS_MASK_NONE;
break;
@@ -294,6 +299,7 @@ static inline int msm_isp_subscribe_event_mask(struct v4l2_fh *fh,
}
}
} else if (evt_mask_index == ISP_EVENT_MASK_INDEX_SOF ||
+ evt_mask_index == ISP_EVENT_MASK_INDEX_SOF_UPDATE_NANOSEC ||
evt_mask_index == ISP_EVENT_MASK_INDEX_REG_UPDATE ||
evt_mask_index == ISP_EVENT_MASK_INDEX_STREAM_UPDATE_DONE) {
for (interface = 0; interface < VFE_SRC_MAX; interface++) {
@@ -339,7 +345,7 @@ static inline int msm_isp_process_event_subscription(struct v4l2_fh *fh,
}
for (evt_mask_index = ISP_EVENT_MASK_INDEX_STATS_NOTIFY;
- evt_mask_index <= ISP_EVENT_MASK_INDEX_BUF_FATAL_ERROR;
+ evt_mask_index <= ISP_EVENT_MASK_INDEX_SOF_UPDATE_NANOSEC;
evt_mask_index++) {
if (evt_mask & (1<<evt_mask_index)) {
evt_id = msm_isp_evt_mask_to_isp_event(evt_mask_index);
@@ -537,6 +543,14 @@ static int msm_isp_set_dual_vfe_sync_mode(
vfe_dev->dual_vfe_sync_enable = mode->enable;
return 0;
}
+static int msm_isp_nano_sec_timestamp(
+ struct vfe_device *vfe_dev, void *arg)
+{
+ struct msm_vfe_nano_sec_timestamp *mode = arg;
+
+ vfe_dev->nanosec_ts_enable = mode->enable;
+ return 0;
+}
int msm_isp_cfg_input(struct vfe_device *vfe_dev, void *arg)
{
int rc = 0;
@@ -1072,6 +1086,11 @@ static long msm_isp_ioctl_unlocked(struct v4l2_subdev *sd,
rc = msm_isp_set_dual_vfe_sync_mode(vfe_dev, arg);
mutex_unlock(&vfe_dev->core_mutex);
break;
+ case VIDIOC_MSM_ISP_NANOSEC_TIMESTAMP:
+ mutex_lock(&vfe_dev->core_mutex);
+ rc = msm_isp_nano_sec_timestamp(vfe_dev, arg);
+ mutex_unlock(&vfe_dev->core_mutex);
+ break;
default:
pr_err_ratelimited("%s: Invalid ISP command %x\n", __func__,
cmd);
@@ -1572,6 +1591,21 @@ int msm_isp_send_event(struct vfe_device *vfe_dev,
return 0;
}
+int msm_isp_send_event_update_nanosec(struct vfe_device *vfe_dev,
+ uint32_t event_type,
+ struct msm_isp_event_data_nanosec *event_data)
+{
+ struct v4l2_event isp_event;
+
+ memset(&isp_event, 0, sizeof(struct v4l2_event));
+ isp_event.id = 0;
+ isp_event.type = event_type;
+ memcpy(&isp_event.u.data[0], event_data,
+ sizeof(struct msm_isp_event_data_nanosec));
+ v4l2_event_queue(vfe_dev->subdev.sd.devnode, &isp_event);
+ return 0;
+}
+
#define CAL_WORD(width, M, N) ((width * M + N - 1) / N)
int msm_isp_cal_word_per_line(uint32_t output_format,
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.h
index 638697b6d635..032ede501bc7 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.h
@@ -54,6 +54,8 @@ int msm_isp_unsubscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
int msm_isp_proc_cmd(struct vfe_device *vfe_dev, void *arg);
int msm_isp_send_event(struct vfe_device *vfe_dev,
uint32_t type, struct msm_isp_event_data *event_data);
+int msm_isp_send_event_update_nanosec(struct vfe_device *vfe_dev,
+ uint32_t type, struct msm_isp_event_data_nanosec *event_data);
int msm_isp_cal_word_per_line(uint32_t output_format,
uint32_t pixel_per_line);
int msm_isp_get_bit_per_pixel(uint32_t output_format);
diff --git a/drivers/media/platform/msm/npu/npu_hw_access.c b/drivers/media/platform/msm/npu/npu_hw_access.c
index 8727ab89097d..d14a6d487554 100644
--- a/drivers/media/platform/msm/npu/npu_hw_access.c
+++ b/drivers/media/platform/msm/npu/npu_hw_access.c
@@ -298,8 +298,6 @@ int npu_mem_map(struct npu_client *client, int buf_hdl, uint32_t size,
goto map_end;
}
- dma_sync_sg_for_device(&(npu_dev->pdev->dev), ion_buf->table->sgl,
- ion_buf->table->nents, DMA_BIDIRECTIONAL);
ion_buf->iova = ion_buf->table->sgl->dma_address;
ion_buf->size = ion_buf->dma_buf->size;
*addr = ion_buf->iova;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 7b299a88ecbf..e003603b823c 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -1882,6 +1882,10 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+
+ /* null terminate the string */
+ fst_switch.netdev_name[IPA_RESOURCE_NAME_MAX - 1] = '\0';
+
retval = ipa_wigig_send_msg(WIGIG_FST_SWITCH,
fst_switch.netdev_name,
fst_switch.client_mac_addr,
diff --git a/drivers/soc/qcom/icnss_qmi.c b/drivers/soc/qcom/icnss_qmi.c
index 57a30421d9da..5b06f15d3d6d 100644
--- a/drivers/soc/qcom/icnss_qmi.c
+++ b/drivers/soc/qcom/icnss_qmi.c
@@ -141,7 +141,7 @@ int wlfw_msa_mem_info_send_sync_msg(struct icnss_priv *priv)
for (i = 0; i < resp->mem_region_info_len; i++) {
if (resp->mem_region_info[i].size > priv->msa_mem_size ||
- resp->mem_region_info[i].region_addr > max_mapped_addr ||
+ resp->mem_region_info[i].region_addr >= max_mapped_addr ||
resp->mem_region_info[i].region_addr < priv->msa_pa ||
resp->mem_region_info[i].size +
resp->mem_region_info[i].region_addr > max_mapped_addr) {
diff --git a/drivers/soc/qcom/mem-offline.c b/drivers/soc/qcom/mem-offline.c
index f6e8cfee3f9c..a54c2b7b0ee9 100644
--- a/drivers/soc/qcom/mem-offline.c
+++ b/drivers/soc/qcom/mem-offline.c
@@ -22,6 +22,7 @@
#include <linux/kobject.h>
#include <linux/platform_device.h>
#include <linux/of.h>
+#include <linux/bootmem.h>
#include <linux/mailbox_client.h>
#include <linux/mailbox/qmp.h>
#include <soc/qcom/rpm-smd.h>
@@ -455,6 +456,8 @@ static int mem_online_remaining_blocks(void)
fail = 1;
}
}
+
+ max_pfn = PFN_DOWN(memblock_end_of_DRAM());
return fail;
}
diff --git a/drivers/soc/qcom/qmi_interface.c b/drivers/soc/qcom/qmi_interface.c
index 6a5bed9935a5..cc1bcaa8da93 100644
--- a/drivers/soc/qcom/qmi_interface.c
+++ b/drivers/soc/qcom/qmi_interface.c
@@ -317,7 +317,6 @@ int qmi_txn_init(struct qmi_handle *qmi, struct qmi_txn *txn,
memset(txn, 0, sizeof(*txn));
- mutex_init(&txn->lock);
init_completion(&txn->completion);
txn->qmi = qmi;
txn->ei = ei;
@@ -353,17 +352,12 @@ int qmi_txn_wait(struct qmi_txn *txn, unsigned long timeout)
ret = wait_for_completion_timeout(&txn->completion, timeout);
- mutex_lock(&txn->lock);
if (txn->result == -ENETRESET) {
- mutex_unlock(&txn->lock);
return txn->result;
}
- mutex_unlock(&txn->lock);
mutex_lock(&qmi->txn_lock);
- mutex_lock(&txn->lock);
idr_remove(&qmi->txns, txn->id);
- mutex_unlock(&txn->lock);
mutex_unlock(&qmi->txn_lock);
if (ret == 0)
@@ -382,9 +376,7 @@ void qmi_txn_cancel(struct qmi_txn *txn)
struct qmi_handle *qmi = txn->qmi;
mutex_lock(&qmi->txn_lock);
- mutex_lock(&txn->lock);
idr_remove(&qmi->txns, txn->id);
- mutex_unlock(&txn->lock);
mutex_unlock(&qmi->txn_lock);
}
EXPORT_SYMBOL(qmi_txn_cancel);
@@ -505,24 +497,22 @@ static void qmi_handle_message(struct qmi_handle *qmi,
if (hdr->type == QMI_RESPONSE) {
mutex_lock(&qmi->txn_lock);
txn = idr_find(&qmi->txns, hdr->txn_id);
- if (txn)
- mutex_lock(&txn->lock);
- mutex_unlock(&qmi->txn_lock);
- }
-
- if (txn && txn->dest && txn->ei) {
- ret = qmi_decode_message(buf, len, txn->ei, txn->dest);
- if (ret < 0)
- pr_err("failed to decode incoming message\n");
-
- txn->result = ret;
- complete(&txn->completion);
-
- mutex_unlock(&txn->lock);
- } else if (txn) {
- qmi_invoke_handler(qmi, sq, txn, buf, len);
+ /* Ignore unexpected responses */
+ if (!txn) {
+ mutex_unlock(&qmi->txn_lock);
+ return;
+ }
+ if (txn->dest && txn->ei) {
+ ret = qmi_decode_message(buf, len, txn->ei, txn->dest);
+ if (ret < 0)
+ pr_err("failed to decode incoming message\n");
- mutex_unlock(&txn->lock);
+ txn->result = ret;
+ complete(&txn->completion);
+ } else {
+ qmi_invoke_handler(qmi, sq, txn, buf, len);
+ }
+ mutex_unlock(&qmi->txn_lock);
} else {
/* Create a txn based on the txn_id of the incoming message */
memset(&tmp_txn, 0, sizeof(tmp_txn));
@@ -718,11 +708,9 @@ void qmi_handle_release(struct qmi_handle *qmi)
mutex_lock(&qmi->txn_lock);
idr_for_each_entry(&qmi->txns, txn, txn_id) {
- mutex_lock(&txn->lock);
idr_remove(&qmi->txns, txn->id);
txn->result = -ENETRESET;
complete(&txn->completion);
- mutex_unlock(&txn->lock);
}
mutex_unlock(&qmi->txn_lock);
idr_destroy(&qmi->txns);
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 82e804560bd2..cf0dc5bd0c80 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -1246,6 +1246,7 @@ static int dwc3_probe(struct platform_device *pdev)
void __iomem *regs;
int irq;
+ char dma_ipc_log_ctx_name[40];
if (count >= DWC_CTRL_COUNT) {
dev_err(dev, "Err dwc instance %d >= %d available\n",
@@ -1349,6 +1350,13 @@ static int dwc3_probe(struct platform_device *pdev)
if (!dwc->dwc_ipc_log_ctxt)
dev_err(dwc->dev, "Error getting ipc_log_ctxt\n");
+ snprintf(dma_ipc_log_ctx_name, sizeof(dma_ipc_log_ctx_name),
+ "%s.ep_events", dev_name(dwc->dev));
+ dwc->dwc_dma_ipc_log_ctxt = ipc_log_context_create(NUM_LOG_PAGES,
+ dma_ipc_log_ctx_name, 0);
+ if (!dwc->dwc_dma_ipc_log_ctxt)
+ dev_err(dwc->dev, "Error getting ipc_log_ctxt for ep_events\n");
+
dwc3_instance[count] = dwc;
dwc->index = count;
count++;
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 8d1a8d9d3132..7caee2635308 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -1178,6 +1178,7 @@ struct dwc3 {
unsigned int index;
void *dwc_ipc_log_ctxt;
+ void *dwc_dma_ipc_log_ctxt;
struct dwc3_gadget_events dbg_gadget_events;
u32 xhci_imod_value;
int core_id;
diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h
index 2e00951fd785..92662ac095c8 100644
--- a/drivers/usb/dwc3/debug.h
+++ b/drivers/usb/dwc3/debug.h
@@ -44,6 +44,18 @@
#define dbg_setup(ep_num, req) \
dwc3_dbg_setup(dwc, ep_num, req)
+#define dbg_ep_queue(ep_num, req) \
+ dwc3_dbg_dma_queue(dwc, ep_num, req)
+
+#define dbg_ep_dequeue(ep_num, req) \
+ dwc3_dbg_dma_dequeue(dwc, ep_num, req)
+
+#define dbg_ep_unmap(ep_num, req) \
+ dwc3_dbg_dma_unmap(dwc, ep_num, req)
+
+#define dbg_ep_map(ep_num, req) \
+ dwc3_dbg_dma_map(dwc, ep_num, req)
+
#define dbg_log_string(fmt, ...) \
ipc_log_string(dwc->dwc_ipc_log_ctxt,\
"%s: " fmt, __func__, ##__VA_ARGS__)
@@ -643,6 +655,14 @@ void dwc3_dbg_setup(struct dwc3 *dwc, u8 ep_num,
const struct usb_ctrlrequest *req);
void dwc3_dbg_print_reg(struct dwc3 *dwc,
const char *name, int reg);
+void dwc3_dbg_dma_queue(struct dwc3 *dwc, u8 ep_num,
+ struct dwc3_request *req);
+void dwc3_dbg_dma_dequeue(struct dwc3 *dwc, u8 ep_num,
+ struct dwc3_request *req);
+void dwc3_dbg_dma_map(struct dwc3 *dwc, u8 ep_num,
+ struct dwc3_request *req);
+void dwc3_dbg_dma_unmap(struct dwc3 *dwc, u8 ep_num,
+ struct dwc3_request *req);
#ifdef CONFIG_DEBUG_FS
extern void dwc3_debugfs_init(struct dwc3 *);
diff --git a/drivers/usb/dwc3/debug_ipc.c b/drivers/usb/dwc3/debug_ipc.c
index e5341a609762..13319f72a28c 100644
--- a/drivers/usb/dwc3/debug_ipc.c
+++ b/drivers/usb/dwc3/debug_ipc.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -144,3 +144,47 @@ void dwc3_dbg_print_reg(struct dwc3 *dwc, const char *name, int reg)
ipc_log_string(dwc->dwc_ipc_log_ctxt, "%s = 0x%08x", name, reg);
}
+
+void dwc3_dbg_dma_unmap(struct dwc3 *dwc, u8 ep_num, struct dwc3_request *req)
+{
+ if (ep_num < 2)
+ return;
+
+ ipc_log_string(dwc->dwc_dma_ipc_log_ctxt,
+ "%02X-%-3.3s %-25.25s 0x%pK 0x%lx %u 0x%lx %d", ep_num >> 1,
+ ep_num & 1 ? "IN":"OUT", "UNMAP", &req->request,
+ req->request.dma, req->request.length, req->trb_dma,
+ req->trb->ctrl & DWC3_TRB_CTRL_HWO);
+}
+
+void dwc3_dbg_dma_map(struct dwc3 *dwc, u8 ep_num, struct dwc3_request *req)
+{
+ if (ep_num < 2)
+ return;
+
+ ipc_log_string(dwc->dwc_dma_ipc_log_ctxt,
+ "%02X-%-3.3s %-25.25s 0x%pK 0x%lx %u 0x%lx", ep_num >> 1,
+ ep_num & 1 ? "IN":"OUT", "MAP", &req->request, req->request.dma,
+ req->request.length, req->trb_dma);
+}
+
+void dwc3_dbg_dma_dequeue(struct dwc3 *dwc, u8 ep_num, struct dwc3_request *req)
+{
+ if (ep_num < 2)
+ return;
+
+ ipc_log_string(dwc->dwc_dma_ipc_log_ctxt,
+ "%02X-%-3.3s %-25.25s 0x%pK 0x%lx 0x%lx", ep_num >> 1,
+ ep_num & 1 ? "IN":"OUT", "DEQUEUE", &req->request,
+ req->request.dma, req->trb_dma);
+}
+
+void dwc3_dbg_dma_queue(struct dwc3 *dwc, u8 ep_num, struct dwc3_request *req)
+{
+ if (ep_num < 2)
+ return;
+
+ ipc_log_string(dwc->dwc_dma_ipc_log_ctxt,
+ "%02X-%-3.3s %-25.25s 0x%pK", ep_num >> 1,
+ ep_num & 1 ? "IN":"OUT", "QUEUE", &req->request);
+}
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index f02a5de4e95f..2af481b841b7 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -294,9 +294,11 @@ void dwc3_gadget_del_and_unmap_request(struct dwc3_ep *dep,
if (req->request.status == -EINPROGRESS)
req->request.status = status;
- if (req->trb)
+ if (req->trb) {
+ dbg_ep_unmap(dep->number, req);
usb_gadget_unmap_request_by_dev(dwc->sysdev,
&req->request, req->direction);
+ }
req->trb = NULL;
trace_dwc3_gadget_giveback(req);
@@ -1137,6 +1139,12 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb,
if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable)
trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(stream_id);
+ /*
+ * Ensure that updates of buffer address and size happens
+ * before we set the DWC3_TRB_CTRL_HWO so that core
+ * does not process any stale TRB.
+ */
+ mb();
trb->ctrl |= DWC3_TRB_CTRL_HWO;
trace_dwc3_prepare_trb(dep, trb);
@@ -1359,6 +1367,7 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep)
else
dwc3_prepare_one_trb_linear(dep, req);
+ dbg_ep_map(dep->number, req);
if (!dwc3_calc_trbs_left(dep))
return;
}
@@ -1522,6 +1531,7 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
list_add_tail(&req->list, &dep->pending_list);
+ dbg_ep_queue(dep->number, req);
/*
* NOTICE: Isochronous endpoints should NEVER be prestarted. We must
* wait for a XferNotReady event so we will know what's the current
@@ -1698,7 +1708,7 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
}
out1:
- dbg_event(dep->number, "DEQUEUE", 0);
+ dbg_ep_dequeue(dep->number, req);
/* giveback the request */
dep->queued_requests--;
dwc3_gadget_giveback(dep, req, -ECONNRESET);
diff --git a/include/linux/soc/qcom/qmi.h b/include/linux/soc/qcom/qmi.h
index df41a615b9f0..f150107df208 100644
--- a/include/linux/soc/qcom/qmi.h
+++ b/include/linux/soc/qcom/qmi.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2017, Linaro Ltd.
*
* This program is free software; you can redistribute it and/or modify
@@ -166,7 +166,6 @@ struct qmi_ops {
* struct qmi_txn - transaction context
* @qmi: QMI handle this transaction is associated with
* @id: transaction id
- * @lock: for synchronization between handler and waiter of messages
* @completion: completion object as the transaction receives a response
* @result: result code for the completed transaction
* @ei: description of the QMI encoded response (optional)
@@ -177,7 +176,6 @@ struct qmi_txn {
u16 id;
- struct mutex lock;
struct completion completion;
int result;
diff --git a/include/uapi/media/msmb_isp.h b/include/uapi/media/msmb_isp.h
index c77824ae6d66..5ef82a4c8de9 100644
--- a/include/uapi/media/msmb_isp.h
+++ b/include/uapi/media/msmb_isp.h
@@ -657,6 +657,7 @@ enum msm_isp_event_mask_index {
ISP_EVENT_MASK_INDEX_REG_UPDATE_MISSING = 10,
ISP_EVENT_MASK_INDEX_PING_PONG_MISMATCH = 11,
ISP_EVENT_MASK_INDEX_BUF_FATAL_ERROR = 12,
+ ISP_EVENT_MASK_INDEX_SOF_UPDATE_NANOSEC = 13,
};
@@ -701,6 +702,9 @@ enum msm_isp_event_mask_index {
#define ISP_EVENT_SUBS_MASK_BUF_FATAL_ERROR \
(1 << ISP_EVENT_MASK_INDEX_BUF_FATAL_ERROR)
+#define ISP_EVENT_SUBS_MASK_SOF_UPDATE_NANOSEC \
+ (1 << ISP_EVENT_MASK_INDEX_SOF_UPDATE_NANOSEC)
+
enum msm_isp_event_idx {
ISP_REG_UPDATE = 0,
ISP_EPOCH_0 = 1,
@@ -738,6 +742,7 @@ enum msm_isp_event_idx {
#define ISP_EVENT_ERROR (ISP_EVENT_BASE + ISP_ERROR)
#define ISP_EVENT_SOF (ISP_CAMIF_EVENT_BASE)
#define ISP_EVENT_EOF (ISP_CAMIF_EVENT_BASE + 1)
+#define ISP_EVENT_SOF_UPDATE_NANOSEC (ISP_CAMIF_EVENT_BASE + 512)
#define ISP_EVENT_BUF_DONE (ISP_EVENT_BASE + ISP_BUF_DONE)
#define ISP_EVENT_BUF_DIVERT (ISP_BUF_EVENT_BASE)
#define ISP_EVENT_STATS_NOTIFY (ISP_STATS_EVENT_BASE)
@@ -872,6 +877,12 @@ struct msm_isp_event_data {
} u; /* union can have max 52 bytes */
};
+struct msm_isp_event_data_nanosec {
+ /* nano second timestamp */
+ uint64_t nano_timestamp;
+ uint32_t frame_id;
+};
+
struct msm_isp32_event_data {
/*Wall clock except for buffer divert events
*which use monotonic clock
@@ -923,6 +934,10 @@ struct msm_vfe_dual_vfe_sync_mode {
uint32_t enable;
};
+struct msm_vfe_nano_sec_timestamp {
+ uint32_t enable;
+};
+
#define V4L2_PIX_FMT_QBGGR8 v4l2_fourcc('Q', 'B', 'G', '8')
#define V4L2_PIX_FMT_QGBRG8 v4l2_fourcc('Q', 'G', 'B', '8')
#define V4L2_PIX_FMT_QGRBG8 v4l2_fourcc('Q', 'G', 'R', '8')
@@ -991,6 +1006,7 @@ enum msm_isp_ioctl_cmd_code {
MSM_ISP32_REQUEST_STREAM,
MSM_ISP_DUAL_SYNC_CFG,
MSM_ISP_DUAL_SYNC_CFG_VER2,
+ MSM_ISP_NANOSEC_TIMESTAMP,
};
#define VIDIOC_MSM_VFE_REG_CFG \
@@ -1129,4 +1145,8 @@ enum msm_isp_ioctl_cmd_code {
_IOWR('V', MSM_ISP_DUAL_SYNC_CFG_VER2, \
struct msm_vfe_dual_vfe_sync_mode)
+#define VIDIOC_MSM_ISP_NANOSEC_TIMESTAMP \
+ _IOW('V', MSM_ISP_NANOSEC_TIMESTAMP, \
+ struct msm_vfe_nano_sec_timestamp)
+
#endif /* __MSMB_ISP__ */
diff --git a/include/uapi/sound/compress_params.h b/include/uapi/sound/compress_params.h
index d28d79d5d2fd..a9f8cc24a357 100644
--- a/include/uapi/sound/compress_params.h
+++ b/include/uapi/sound/compress_params.h
@@ -433,6 +433,10 @@ struct snd_dec_pcm {
__u8 ch_map[MAX_PCM_DECODE_CHANNELS];
} __attribute__((packed, aligned(4)));
+struct snd_dec_amrwb_plus {
+ __u32 bit_stream_fmt;
+};
+
union snd_codec_options {
struct snd_enc_wma wma;
struct snd_enc_vorbis vorbis;
@@ -447,6 +451,7 @@ union snd_codec_options {
struct snd_dec_aptx aptx_dec;
struct snd_dec_thd truehd;
struct snd_dec_pcm pcm_dec;
+ struct snd_dec_amrwb_plus amrwbplus;
};
/** struct snd_codec_desc - description of codec capabilities
diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c
index 0bda756f5e62..8e340258536d 100644
--- a/kernel/sched/walt.c
+++ b/kernel/sched/walt.c
@@ -2022,11 +2022,10 @@ void init_new_task_load(struct task_struct *p)
memset(&p->ravg, 0, sizeof(struct ravg));
p->cpu_cycles = 0;
- p->ravg.curr_window_cpu = kcalloc(nr_cpu_ids, sizeof(u32), GFP_KERNEL);
- p->ravg.prev_window_cpu = kcalloc(nr_cpu_ids, sizeof(u32), GFP_KERNEL);
-
- /* Don't have much choice. CPU frequency would be bogus */
- BUG_ON(!p->ravg.curr_window_cpu || !p->ravg.prev_window_cpu);
+ p->ravg.curr_window_cpu = kcalloc(nr_cpu_ids, sizeof(u32),
+ GFP_KERNEL | __GFP_NOFAIL);
+ p->ravg.prev_window_cpu = kcalloc(nr_cpu_ids, sizeof(u32),
+ GFP_KERNEL | __GFP_NOFAIL);
if (init_load_pct) {
init_load_windows = div64_u64((u64)init_load_pct *
diff --git a/mm/memory.c b/mm/memory.c
index e463b6bbb844..626234dfa4c6 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3036,6 +3036,8 @@ int do_swap_page(struct vm_fault *vmf)
struct page *page = NULL, *swapcache;
struct mem_cgroup *memcg;
swp_entry_t entry;
+ struct swap_info_struct *si;
+ bool skip_swapcache = false;
pte_t pte;
int locked;
int exclusive = 0;
@@ -3077,15 +3079,24 @@ int do_swap_page(struct vm_fault *vmf)
delayacct_set_flag(DELAYACCT_PF_SWAPIN);
+
+ /*
+ * lookup_swap_cache below can fail and before the SWP_SYNCHRONOUS_IO
+ * check is made, another process can populate the swapcache, delete
+ * the swap entry and decrement the swap count. So decide on taking
+ * the SWP_SYNCHRONOUS_IO path before the lookup. In the event of the
+ * race described, the victim process will find a swap_count > 1
+ * and can then take the readahead path instead of SWP_SYNCHRONOUS_IO.
+ */
+ si = swp_swap_info(entry);
+ if (si->flags & SWP_SYNCHRONOUS_IO && __swap_count(si, entry) == 1)
+ skip_swapcache = true;
+
page = lookup_swap_cache(entry, vma, vmf->address);
swapcache = page;
if (!page) {
- struct swap_info_struct *si = swp_swap_info(entry);
-
- if (si->flags & SWP_SYNCHRONOUS_IO &&
- __swap_count(si, entry) == 1) {
- /* skip swapcache */
+ if (skip_swapcache) {
page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
vmf->address);
if (page) {
diff --git a/mm/page_owner.c b/mm/page_owner.c
index c553b2cf5a6c..a4726f8eec6f 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -10,6 +10,8 @@
#include <linux/migrate.h>
#include <linux/stackdepot.h>
#include <linux/seq_file.h>
+#include <linux/sched.h>
+#include <linux/sched/clock.h>
#include "internal.h"
@@ -24,6 +26,8 @@ struct page_owner {
gfp_t gfp_mask;
int last_migrate_reason;
depot_stack_handle_t handle;
+ int pid;
+ u64 ts_nsec;
};
static bool page_owner_disabled =
@@ -183,6 +187,8 @@ static inline void __set_page_owner_handle(struct page_ext *page_ext,
page_owner->order = order;
page_owner->gfp_mask = gfp_mask;
page_owner->last_migrate_reason = -1;
+ page_owner->pid = current->pid;
+ page_owner->ts_nsec = local_clock();
__set_bit(PAGE_EXT_OWNER, &page_ext->flags);
}
@@ -243,6 +249,8 @@ void __copy_page_owner(struct page *oldpage, struct page *newpage)
new_page_owner->last_migrate_reason =
old_page_owner->last_migrate_reason;
new_page_owner->handle = old_page_owner->handle;
+ new_page_owner->pid = old_page_owner->pid;
+ new_page_owner->ts_nsec = old_page_owner->ts_nsec;
/*
* We don't clear the bit on the oldpage as it's going to be freed
@@ -360,9 +368,10 @@ print_page_owner(char __user *buf, size_t count, unsigned long pfn,
return -ENOMEM;
ret = snprintf(kbuf, count,
- "Page allocated via order %u, mask %#x(%pGg)\n",
+ "Page allocated via order %u, mask %#x(%pGg), pid %d, ts %llu ns\n",
page_owner->order, page_owner->gfp_mask,
- &page_owner->gfp_mask);
+ &page_owner->gfp_mask, page_owner->pid,
+ page_owner->ts_nsec);
if (ret >= count)
goto err;
@@ -445,8 +454,9 @@ void __dump_page_owner(struct page *page)
}
depot_fetch_stack(handle, &trace);
- pr_alert("page allocated via order %u, migratetype %s, gfp_mask %#x(%pGg)\n",
- page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask);
+ pr_alert("page allocated via order %u, migratetype %s, gfp_mask %#x(%pGg), pid %d, ts %llu ns\n",
+ page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask,
+ page_owner->pid, page_owner->ts_nsec);
print_stack_trace(&trace, 0);
if (page_owner->last_migrate_reason != -1)