aboutsummaryrefslogtreecommitdiff
path: root/tools/gator/daemon/PerfBuffer.cpp
diff options
context:
space:
mode:
authorKevin Hilman <khilman@linaro.org>2015-05-12 15:27:07 -0700
committerKevin Hilman <khilman@linaro.org>2015-05-12 15:27:07 -0700
commit0ed41f1a9564c0733ad155b742ac66fb41d2f9d3 (patch)
treefb32ef8d494ee1952b99a65f876a9e28da004a1b /tools/gator/daemon/PerfBuffer.cpp
parenta3d64deb4ee72f27441dd5363d74c294b7dd34a7 (diff)
parentb93dcce48cc6182fcf66cb6d72665b10c775f4c3 (diff)
Merge branch 'lsk-3.10-gator' of git://git.linaro.org/landing-teams/working/arm/kernel into v3.10/topic/gatorv3.10/topic/gator
* 'lsk-3.10-gator' of git://git.linaro.org/landing-teams/working/arm/kernel: gator: Enable multiple source copies to exist in Android build environments gator: Add config for building the module in-tree gator: Version 5.21.1
Diffstat (limited to 'tools/gator/daemon/PerfBuffer.cpp')
-rw-r--r--tools/gator/daemon/PerfBuffer.cpp129
1 files changed, 97 insertions, 32 deletions
diff --git a/tools/gator/daemon/PerfBuffer.cpp b/tools/gator/daemon/PerfBuffer.cpp
index f127c996d43b..3b9da1dc6592 100644
--- a/tools/gator/daemon/PerfBuffer.cpp
+++ b/tools/gator/daemon/PerfBuffer.cpp
@@ -1,5 +1,5 @@
/**
- * Copyright (C) ARM Limited 2013-2014. All rights reserved.
+ * Copyright (C) ARM Limited 2013-2015. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -35,14 +35,14 @@ PerfBuffer::~PerfBuffer() {
bool PerfBuffer::useFd(const int cpu, const int fd) {
if (mFds[cpu] < 0) {
if (mBuf[cpu] != MAP_FAILED) {
- logg->logMessage("%s(%s:%i): cpu %i already online or not correctly cleaned up", __FUNCTION__, __FILE__, __LINE__, cpu);
+ logg->logMessage("cpu %i already online or not correctly cleaned up", cpu);
return false;
}
// The buffer isn't mapped yet
mBuf[cpu] = mmap(NULL, gSessionData->mPageSize + BUF_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
if (mBuf[cpu] == MAP_FAILED) {
- logg->logMessage("%s(%s:%i): mmap failed", __FUNCTION__, __FILE__, __LINE__);
+ logg->logMessage("mmap failed");
return false;
}
mFds[cpu] = fd;
@@ -50,17 +50,17 @@ bool PerfBuffer::useFd(const int cpu, const int fd) {
// Check the version
struct perf_event_mmap_page *pemp = static_cast<struct perf_event_mmap_page *>(mBuf[cpu]);
if (pemp->compat_version != 0) {
- logg->logMessage("%s(%s:%i): Incompatible perf_event_mmap_page compat_version", __FUNCTION__, __FILE__, __LINE__);
+ logg->logMessage("Incompatible perf_event_mmap_page compat_version");
return false;
}
} else {
if (mBuf[cpu] == MAP_FAILED) {
- logg->logMessage("%s(%s:%i): cpu already online or not correctly cleaned up", __FUNCTION__, __FILE__, __LINE__);
+ logg->logMessage("cpu already online or not correctly cleaned up");
return false;
}
if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, mFds[cpu]) < 0) {
- logg->logMessage("%s(%s:%i): ioctl failed", __FUNCTION__, __FILE__, __LINE__);
+ logg->logMessage("ioctl failed");
return false;
}
}
@@ -79,8 +79,8 @@ bool PerfBuffer::isEmpty() {
if (mBuf[cpu] != MAP_FAILED) {
// Take a snapshot of the positions
struct perf_event_mmap_page *pemp = static_cast<struct perf_event_mmap_page *>(mBuf[cpu]);
- const __u64 head = pemp->data_head;
- const __u64 tail = pemp->data_tail;
+ const __u64 head = ACCESS_ONCE(pemp->data_head);
+ const __u64 tail = ACCESS_ONCE(pemp->data_tail);
if (head != tail) {
return false;
@@ -91,42 +91,105 @@ bool PerfBuffer::isEmpty() {
return true;
}
-static void compressAndSend(const int cpu, const __u64 head, __u64 tail, const uint8_t *const b, Sender *const sender) {
- // Pick a big size but something smaller than the chunkSize in Sender::writeData which is 100k
- char buf[1<<16];
- int writePos = 0;
- const int typeLength = gSessionData->mLocalCapture ? 0 : 1;
+bool PerfBuffer::isFull() {
+ for (int cpu = 0; cpu < gSessionData->mCores; ++cpu) {
+ if (mBuf[cpu] != MAP_FAILED) {
+ // Take a snapshot of the positions
+ struct perf_event_mmap_page *pemp = static_cast<struct perf_event_mmap_page *>(mBuf[cpu]);
+ const __u64 head = ACCESS_ONCE(pemp->data_head);
- while (head > tail) {
- writePos = 0;
- if (!gSessionData->mLocalCapture) {
- buf[writePos++] = RESPONSE_APC_DATA;
+ if (head + 2000 <= (unsigned int)BUF_SIZE) {
+ return true;
+ }
}
- // Reserve space for size
- writePos += sizeof(uint32_t);
- Buffer::packInt(buf, sizeof(buf), writePos, FRAME_PERF);
- Buffer::packInt(buf, sizeof(buf), writePos, cpu);
+ }
+
+ return false;
+}
+
+class PerfFrame {
+public:
+ PerfFrame(Sender *const sender) : mSender(sender), mWritePos(-1), mCpuSizePos(-1) {}
+
+ void add(const int cpu, const __u64 head, __u64 tail, const uint8_t *const b) {
+ cpuHeader(cpu);
while (head > tail) {
const int count = reinterpret_cast<const struct perf_event_header *>(b + (tail & BUF_MASK))->size/sizeof(uint64_t);
// Can this whole message be written as Streamline assumes events are not split between frames
- if (sizeof(buf) <= writePos + count*Buffer::MAXSIZE_PACK64) {
- break;
+ if (sizeof(mBuf) <= mWritePos + count*Buffer::MAXSIZE_PACK64) {
+ send();
+ cpuHeader(cpu);
}
for (int i = 0; i < count; ++i) {
// Must account for message size
- Buffer::packInt64(buf, sizeof(buf), writePos, *reinterpret_cast<const uint64_t *>(b + (tail & BUF_MASK)));
+ Buffer::packInt64(mBuf, sizeof(mBuf), mWritePos, *reinterpret_cast<const uint64_t *>(b + (tail & BUF_MASK)));
tail += sizeof(uint64_t);
}
}
+ }
+
+ void send() {
+ if (mWritePos > 0) {
+ writeFrameSize();
+ mSender->writeData(mBuf, mWritePos, RESPONSE_APC_DATA);
+ mWritePos = -1;
+ mCpuSizePos = -1;
+ }
+ }
- // Write size
- Buffer::writeLEInt(reinterpret_cast<unsigned char *>(buf + typeLength), writePos - typeLength - sizeof(uint32_t));
- sender->writeData(buf, writePos, RESPONSE_APC_DATA);
+private:
+ void writeFrameSize() {
+ writeCpuSize();
+ const int typeLength = gSessionData->mLocalCapture ? 0 : 1;
+ Buffer::writeLEInt(reinterpret_cast<unsigned char *>(mBuf + typeLength), mWritePos - typeLength - sizeof(uint32_t));
}
-}
+
+ void frameHeader() {
+ if (mWritePos < 0) {
+ mWritePos = 0;
+ mCpuSizePos = -1;
+ if (!gSessionData->mLocalCapture) {
+ mBuf[mWritePos++] = RESPONSE_APC_DATA;
+ }
+ // Reserve space for frame size
+ mWritePos += sizeof(uint32_t);
+ Buffer::packInt(mBuf, sizeof(mBuf), mWritePos, FRAME_PERF);
+ }
+ }
+
+ void writeCpuSize() {
+ if (mCpuSizePos >= 0) {
+ Buffer::writeLEInt(reinterpret_cast<unsigned char *>(mBuf + mCpuSizePos), mWritePos - mCpuSizePos - sizeof(uint32_t));
+ }
+ }
+
+ void cpuHeader(const int cpu) {
+ if (sizeof(mBuf) <= mWritePos + Buffer::MAXSIZE_PACK32 + sizeof(uint32_t)) {
+ send();
+ }
+ frameHeader();
+ writeCpuSize();
+ Buffer::packInt(mBuf, sizeof(mBuf), mWritePos, cpu);
+ mCpuSizePos = mWritePos;
+ // Reserve space for cpu size
+ mWritePos += sizeof(uint32_t);
+ }
+
+ // Pick a big size but something smaller than the chunkSize in Sender::writeData which is 100k
+ char mBuf[1<<16];
+ Sender *const mSender;
+ int mWritePos;
+ int mCpuSizePos;
+
+ // Intentionally unimplemented
+ PerfFrame(const PerfFrame &);
+ PerfFrame& operator=(const PerfFrame &);
+};
bool PerfBuffer::send(Sender *const sender) {
+ PerfFrame frame(sender);
+
for (int cpu = 0; cpu < gSessionData->mCores; ++cpu) {
if (mBuf[cpu] == MAP_FAILED) {
continue;
@@ -134,12 +197,12 @@ bool PerfBuffer::send(Sender *const sender) {
// Take a snapshot of the positions
struct perf_event_mmap_page *pemp = static_cast<struct perf_event_mmap_page *>(mBuf[cpu]);
- const __u64 head = pemp->data_head;
- const __u64 tail = pemp->data_tail;
+ const __u64 head = ACCESS_ONCE(pemp->data_head);
+ const __u64 tail = ACCESS_ONCE(pemp->data_tail);
if (head > tail) {
const uint8_t *const b = static_cast<uint8_t *>(mBuf[cpu]) + gSessionData->mPageSize;
- compressAndSend(cpu, head, tail, b, sender);
+ frame.add(cpu, head, tail, b);
// Update tail with the data read
pemp->data_tail = head;
@@ -150,9 +213,11 @@ bool PerfBuffer::send(Sender *const sender) {
mBuf[cpu] = MAP_FAILED;
mDiscard[cpu] = false;
mFds[cpu] = -1;
- logg->logMessage("%s(%s:%i): Unmaped cpu %i", __FUNCTION__, __FILE__, __LINE__, cpu);
+ logg->logMessage("Unmaped cpu %i", cpu);
}
}
+ frame.send();
+
return true;
}