DispSync.cpp 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582
  1. /*
  2. * Copyright (C) 2013 The Android Open Source Project
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #define ATRACE_TAG ATRACE_TAG_GRAPHICS
  17. // This is needed for stdint.h to define INT64_MAX in C++
  18. #define __STDC_LIMIT_MACROS
  19. #include <math.h>
  20. #include <cutils/iosched_policy.h>
  21. #include <cutils/log.h>
  22. #include <ui/Fence.h>
  23. #include <utils/String8.h>
  24. #include <utils/Thread.h>
  25. #include <utils/Trace.h>
  26. #include <utils/Vector.h>
  27. #include "DispSync.h"
  28. #include "EventLog/EventLog.h"
  29. namespace android {
  30. // Setting this to true enables verbose tracing that can be used to debug
  31. // vsync event model or phase issues.
  32. static const bool kTraceDetailedInfo = false;
  33. // This is the threshold used to determine when hardware vsync events are
  34. // needed to re-synchronize the software vsync model with the hardware. The
  35. // error metric used is the mean of the squared difference between each
  36. // present time and the nearest software-predicted vsync.
  37. static const nsecs_t kErrorThreshold = 160000000000; // 400 usec squared
  38. // This is the offset from the present fence timestamps to the corresponding
  39. // vsync event.
  40. static const int64_t kPresentTimeOffset = PRESENT_TIME_OFFSET_FROM_VSYNC_NS;
  41. class DispSyncThread: public Thread {
  42. public:
  43. DispSyncThread():
  44. mStop(false),
  45. mPeriod(0),
  46. mPhase(0),
  47. mReferenceTime(0),
  48. mWakeupLatency(0) {
  49. }
  50. virtual ~DispSyncThread() {}
  51. void updateModel(nsecs_t period, nsecs_t phase, nsecs_t referenceTime) {
  52. Mutex::Autolock lock(mMutex);
  53. mPeriod = period;
  54. mPhase = phase;
  55. mReferenceTime = referenceTime;
  56. mCond.signal();
  57. }
  58. void stop() {
  59. Mutex::Autolock lock(mMutex);
  60. mStop = true;
  61. mCond.signal();
  62. }
  63. virtual bool threadLoop() {
  64. status_t err;
  65. nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC);
  66. nsecs_t nextEventTime = 0;
  67. while (true) {
  68. Vector<CallbackInvocation> callbackInvocations;
  69. nsecs_t targetTime = 0;
  70. { // Scope for lock
  71. Mutex::Autolock lock(mMutex);
  72. if (mStop) {
  73. return false;
  74. }
  75. if (mPeriod == 0) {
  76. err = mCond.wait(mMutex);
  77. if (err != NO_ERROR) {
  78. ALOGE("error waiting for new events: %s (%d)",
  79. strerror(-err), err);
  80. return false;
  81. }
  82. continue;
  83. }
  84. nextEventTime = computeNextEventTimeLocked(now);
  85. targetTime = nextEventTime;
  86. bool isWakeup = false;
  87. if (now < targetTime) {
  88. err = mCond.waitRelative(mMutex, targetTime - now);
  89. if (err == TIMED_OUT) {
  90. isWakeup = true;
  91. } else if (err != NO_ERROR) {
  92. ALOGE("error waiting for next event: %s (%d)",
  93. strerror(-err), err);
  94. return false;
  95. }
  96. }
  97. now = systemTime(SYSTEM_TIME_MONOTONIC);
  98. if (isWakeup) {
  99. mWakeupLatency = ((mWakeupLatency * 63) +
  100. (now - targetTime)) / 64;
  101. if (mWakeupLatency > 500000) {
  102. // Don't correct by more than 500 us
  103. mWakeupLatency = 500000;
  104. }
  105. if (kTraceDetailedInfo) {
  106. ATRACE_INT64("DispSync:WakeupLat", now - nextEventTime);
  107. ATRACE_INT64("DispSync:AvgWakeupLat", mWakeupLatency);
  108. }
  109. }
  110. callbackInvocations = gatherCallbackInvocationsLocked(now);
  111. }
  112. if (callbackInvocations.size() > 0) {
  113. fireCallbackInvocations(callbackInvocations);
  114. }
  115. }
  116. return false;
  117. }
  118. status_t addEventListener(nsecs_t phase, const sp<DispSync::Callback>& callback) {
  119. Mutex::Autolock lock(mMutex);
  120. for (size_t i = 0; i < mEventListeners.size(); i++) {
  121. if (mEventListeners[i].mCallback == callback) {
  122. return BAD_VALUE;
  123. }
  124. }
  125. EventListener listener;
  126. listener.mPhase = phase;
  127. listener.mCallback = callback;
  128. // We want to allow the firstmost future event to fire without
  129. // allowing any past events to fire. Because
  130. // computeListenerNextEventTimeLocked filters out events within a half
  131. // a period of the last event time, we need to initialize the last
  132. // event time to a half a period in the past.
  133. listener.mLastEventTime = systemTime(SYSTEM_TIME_MONOTONIC) - mPeriod / 2;
  134. mEventListeners.push(listener);
  135. mCond.signal();
  136. return NO_ERROR;
  137. }
  138. status_t removeEventListener(const sp<DispSync::Callback>& callback) {
  139. Mutex::Autolock lock(mMutex);
  140. for (size_t i = 0; i < mEventListeners.size(); i++) {
  141. if (mEventListeners[i].mCallback == callback) {
  142. mEventListeners.removeAt(i);
  143. mCond.signal();
  144. return NO_ERROR;
  145. }
  146. }
  147. return BAD_VALUE;
  148. }
  149. // This method is only here to handle the kIgnorePresentFences case.
  150. bool hasAnyEventListeners() {
  151. Mutex::Autolock lock(mMutex);
  152. return !mEventListeners.empty();
  153. }
  154. private:
  155. struct EventListener {
  156. nsecs_t mPhase;
  157. nsecs_t mLastEventTime;
  158. sp<DispSync::Callback> mCallback;
  159. };
  160. struct CallbackInvocation {
  161. sp<DispSync::Callback> mCallback;
  162. nsecs_t mEventTime;
  163. };
  164. nsecs_t computeNextEventTimeLocked(nsecs_t now) {
  165. nsecs_t nextEventTime = INT64_MAX;
  166. for (size_t i = 0; i < mEventListeners.size(); i++) {
  167. nsecs_t t = computeListenerNextEventTimeLocked(mEventListeners[i],
  168. now);
  169. if (t < nextEventTime) {
  170. nextEventTime = t;
  171. }
  172. }
  173. return nextEventTime;
  174. }
  175. Vector<CallbackInvocation> gatherCallbackInvocationsLocked(nsecs_t now) {
  176. Vector<CallbackInvocation> callbackInvocations;
  177. nsecs_t ref = now - mPeriod;
  178. for (size_t i = 0; i < mEventListeners.size(); i++) {
  179. nsecs_t t = computeListenerNextEventTimeLocked(mEventListeners[i],
  180. ref);
  181. if (t < now) {
  182. CallbackInvocation ci;
  183. ci.mCallback = mEventListeners[i].mCallback;
  184. ci.mEventTime = t;
  185. callbackInvocations.push(ci);
  186. mEventListeners.editItemAt(i).mLastEventTime = t;
  187. }
  188. }
  189. return callbackInvocations;
  190. }
  191. nsecs_t computeListenerNextEventTimeLocked(const EventListener& listener,
  192. nsecs_t ref) {
  193. nsecs_t lastEventTime = listener.mLastEventTime;
  194. if (ref < lastEventTime) {
  195. ref = lastEventTime;
  196. }
  197. nsecs_t phase = mReferenceTime + mPhase + listener.mPhase;
  198. nsecs_t t = (((ref - phase) / mPeriod) + 1) * mPeriod + phase;
  199. if (t - listener.mLastEventTime < mPeriod / 2) {
  200. t += mPeriod;
  201. }
  202. return t;
  203. }
  204. void fireCallbackInvocations(const Vector<CallbackInvocation>& callbacks) {
  205. for (size_t i = 0; i < callbacks.size(); i++) {
  206. callbacks[i].mCallback->onDispSyncEvent(callbacks[i].mEventTime);
  207. }
  208. }
  209. bool mStop;
  210. nsecs_t mPeriod;
  211. nsecs_t mPhase;
  212. nsecs_t mReferenceTime;
  213. nsecs_t mWakeupLatency;
  214. Vector<EventListener> mEventListeners;
  215. Mutex mMutex;
  216. Condition mCond;
  217. };
  218. class ZeroPhaseTracer : public DispSync::Callback {
  219. public:
  220. ZeroPhaseTracer() : mParity(false) {}
  221. virtual void onDispSyncEvent(nsecs_t /*when*/) {
  222. mParity = !mParity;
  223. ATRACE_INT("ZERO_PHASE_VSYNC", mParity ? 1 : 0);
  224. }
  225. private:
  226. bool mParity;
  227. };
  228. DispSync::DispSync() :
  229. mRefreshSkipCount(0),
  230. mThread(new DispSyncThread()) {
  231. mThread->run("DispSync", PRIORITY_URGENT_DISPLAY + PRIORITY_MORE_FAVORABLE);
  232. android_set_rt_ioprio(mThread->getTid(), 1);
  233. reset();
  234. beginResync();
  235. if (kTraceDetailedInfo) {
  236. // If we're not getting present fences then the ZeroPhaseTracer
  237. // would prevent HW vsync event from ever being turned off.
  238. // Even if we're just ignoring the fences, the zero-phase tracing is
  239. // not needed because any time there is an event registered we will
  240. // turn on the HW vsync events.
  241. if (!kIgnorePresentFences) {
  242. addEventListener(0, new ZeroPhaseTracer());
  243. }
  244. }
  245. }
  246. DispSync::~DispSync() {}
  247. void DispSync::reset() {
  248. Mutex::Autolock lock(mMutex);
  249. mPhase = 0;
  250. mReferenceTime = 0;
  251. mModelUpdated = false;
  252. mNumResyncSamples = 0;
  253. mFirstResyncSample = 0;
  254. mNumResyncSamplesSincePresent = 0;
  255. resetErrorLocked();
  256. }
  257. bool DispSync::addPresentFence(const sp<Fence>& fence) {
  258. Mutex::Autolock lock(mMutex);
  259. mPresentFences[mPresentSampleOffset] = fence;
  260. mPresentTimes[mPresentSampleOffset] = 0;
  261. mPresentSampleOffset = (mPresentSampleOffset + 1) % NUM_PRESENT_SAMPLES;
  262. mNumResyncSamplesSincePresent = 0;
  263. for (size_t i = 0; i < NUM_PRESENT_SAMPLES; i++) {
  264. const sp<Fence>& f(mPresentFences[i]);
  265. if (f != NULL) {
  266. nsecs_t t = f->getSignalTime();
  267. if (t < INT64_MAX) {
  268. mPresentFences[i].clear();
  269. mPresentTimes[i] = t + kPresentTimeOffset;
  270. }
  271. }
  272. }
  273. updateErrorLocked();
  274. return !mModelUpdated || mError > kErrorThreshold;
  275. }
  276. void DispSync::beginResync() {
  277. Mutex::Autolock lock(mMutex);
  278. mModelUpdated = false;
  279. mNumResyncSamples = 0;
  280. }
  281. bool DispSync::addResyncSample(nsecs_t timestamp) {
  282. Mutex::Autolock lock(mMutex);
  283. size_t idx = (mFirstResyncSample + mNumResyncSamples) % MAX_RESYNC_SAMPLES;
  284. mResyncSamples[idx] = timestamp;
  285. if (mNumResyncSamples == 0) {
  286. mPhase = 0;
  287. mReferenceTime = timestamp;
  288. }
  289. if (mNumResyncSamples < MAX_RESYNC_SAMPLES) {
  290. mNumResyncSamples++;
  291. } else {
  292. mFirstResyncSample = (mFirstResyncSample + 1) % MAX_RESYNC_SAMPLES;
  293. }
  294. updateModelLocked();
  295. if (mNumResyncSamplesSincePresent++ > MAX_RESYNC_SAMPLES_WITHOUT_PRESENT) {
  296. resetErrorLocked();
  297. }
  298. if (kIgnorePresentFences) {
  299. // If we don't have the sync framework we will never have
  300. // addPresentFence called. This means we have no way to know whether
  301. // or not we're synchronized with the HW vsyncs, so we just request
  302. // that the HW vsync events be turned on whenever we need to generate
  303. // SW vsync events.
  304. return mThread->hasAnyEventListeners();
  305. }
  306. return !mModelUpdated || mError > kErrorThreshold;
  307. }
  308. void DispSync::endResync() {
  309. }
  310. status_t DispSync::addEventListener(nsecs_t phase,
  311. const sp<Callback>& callback) {
  312. Mutex::Autolock lock(mMutex);
  313. return mThread->addEventListener(phase, callback);
  314. }
  315. void DispSync::setRefreshSkipCount(int count) {
  316. Mutex::Autolock lock(mMutex);
  317. ALOGD("setRefreshSkipCount(%d)", count);
  318. mRefreshSkipCount = count;
  319. updateModelLocked();
  320. }
  321. status_t DispSync::removeEventListener(const sp<Callback>& callback) {
  322. Mutex::Autolock lock(mMutex);
  323. return mThread->removeEventListener(callback);
  324. }
  325. void DispSync::setPeriod(nsecs_t period) {
  326. Mutex::Autolock lock(mMutex);
  327. mPeriod = period;
  328. mPhase = 0;
  329. mReferenceTime = 0;
  330. mThread->updateModel(mPeriod, mPhase, mReferenceTime);
  331. }
  332. nsecs_t DispSync::getPeriod() {
  333. // lock mutex as mPeriod changes multiple times in updateModelLocked
  334. Mutex::Autolock lock(mMutex);
  335. return mPeriod;
  336. }
  337. void DispSync::updateModelLocked() {
  338. if (mNumResyncSamples >= MIN_RESYNC_SAMPLES_FOR_UPDATE) {
  339. nsecs_t durationSum = 0;
  340. for (size_t i = 1; i < mNumResyncSamples; i++) {
  341. size_t idx = (mFirstResyncSample + i) % MAX_RESYNC_SAMPLES;
  342. size_t prev = (idx + MAX_RESYNC_SAMPLES - 1) % MAX_RESYNC_SAMPLES;
  343. durationSum += mResyncSamples[idx] - mResyncSamples[prev];
  344. }
  345. mPeriod = durationSum / (mNumResyncSamples - 1);
  346. double sampleAvgX = 0;
  347. double sampleAvgY = 0;
  348. double scale = 2.0 * M_PI / double(mPeriod);
  349. for (size_t i = 0; i < mNumResyncSamples; i++) {
  350. size_t idx = (mFirstResyncSample + i) % MAX_RESYNC_SAMPLES;
  351. nsecs_t sample = mResyncSamples[idx] - mReferenceTime;
  352. double samplePhase = double(sample % mPeriod) * scale;
  353. sampleAvgX += cos(samplePhase);
  354. sampleAvgY += sin(samplePhase);
  355. }
  356. sampleAvgX /= double(mNumResyncSamples);
  357. sampleAvgY /= double(mNumResyncSamples);
  358. mPhase = nsecs_t(atan2(sampleAvgY, sampleAvgX) / scale);
  359. if (mPhase < 0) {
  360. mPhase += mPeriod;
  361. }
  362. if (kTraceDetailedInfo) {
  363. ATRACE_INT64("DispSync:Period", mPeriod);
  364. ATRACE_INT64("DispSync:Phase", mPhase);
  365. }
  366. // Artificially inflate the period if requested.
  367. mPeriod += mPeriod * mRefreshSkipCount;
  368. mThread->updateModel(mPeriod, mPhase, mReferenceTime);
  369. mModelUpdated = true;
  370. }
  371. }
  372. void DispSync::updateErrorLocked() {
  373. if (!mModelUpdated) {
  374. return;
  375. }
  376. // Need to compare present fences against the un-adjusted refresh period,
  377. // since they might arrive between two events.
  378. nsecs_t period = mPeriod / (1 + mRefreshSkipCount);
  379. int numErrSamples = 0;
  380. nsecs_t sqErrSum = 0;
  381. for (size_t i = 0; i < NUM_PRESENT_SAMPLES; i++) {
  382. nsecs_t sample = mPresentTimes[i] - mReferenceTime;
  383. if (sample > mPhase) {
  384. nsecs_t sampleErr = (sample - mPhase) % period;
  385. if (sampleErr > period / 2) {
  386. sampleErr -= period;
  387. }
  388. sqErrSum += sampleErr * sampleErr;
  389. numErrSamples++;
  390. }
  391. }
  392. if (numErrSamples > 0) {
  393. mError = sqErrSum / numErrSamples;
  394. } else {
  395. mError = 0;
  396. }
  397. if (kTraceDetailedInfo) {
  398. ATRACE_INT64("DispSync:Error", mError);
  399. }
  400. }
  401. void DispSync::resetErrorLocked() {
  402. mPresentSampleOffset = 0;
  403. mError = 0;
  404. for (size_t i = 0; i < NUM_PRESENT_SAMPLES; i++) {
  405. mPresentFences[i].clear();
  406. mPresentTimes[i] = 0;
  407. }
  408. }
  409. nsecs_t DispSync::computeNextRefresh(int periodOffset) const {
  410. Mutex::Autolock lock(mMutex);
  411. nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC);
  412. nsecs_t phase = mReferenceTime + mPhase;
  413. return (((now - phase) / mPeriod) + periodOffset + 1) * mPeriod + phase;
  414. }
  415. void DispSync::dump(String8& result) const {
  416. Mutex::Autolock lock(mMutex);
  417. result.appendFormat("present fences are %s\n",
  418. kIgnorePresentFences ? "ignored" : "used");
  419. result.appendFormat("mPeriod: %" PRId64 " ns (%.3f fps; skipCount=%d)\n",
  420. mPeriod, 1000000000.0 / mPeriod, mRefreshSkipCount);
  421. result.appendFormat("mPhase: %" PRId64 " ns\n", mPhase);
  422. result.appendFormat("mError: %" PRId64 " ns (sqrt=%.1f)\n",
  423. mError, sqrt(mError));
  424. result.appendFormat("mNumResyncSamplesSincePresent: %d (limit %d)\n",
  425. mNumResyncSamplesSincePresent, MAX_RESYNC_SAMPLES_WITHOUT_PRESENT);
  426. result.appendFormat("mNumResyncSamples: %zd (max %d)\n",
  427. mNumResyncSamples, MAX_RESYNC_SAMPLES);
  428. result.appendFormat("mResyncSamples:\n");
  429. nsecs_t previous = -1;
  430. for (size_t i = 0; i < mNumResyncSamples; i++) {
  431. size_t idx = (mFirstResyncSample + i) % MAX_RESYNC_SAMPLES;
  432. nsecs_t sampleTime = mResyncSamples[idx];
  433. if (i == 0) {
  434. result.appendFormat(" %" PRId64 "\n", sampleTime);
  435. } else {
  436. result.appendFormat(" %" PRId64 " (+%" PRId64 ")\n",
  437. sampleTime, sampleTime - previous);
  438. }
  439. previous = sampleTime;
  440. }
  441. result.appendFormat("mPresentFences / mPresentTimes [%d]:\n",
  442. NUM_PRESENT_SAMPLES);
  443. nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC);
  444. previous = 0;
  445. for (size_t i = 0; i < NUM_PRESENT_SAMPLES; i++) {
  446. size_t idx = (i + mPresentSampleOffset) % NUM_PRESENT_SAMPLES;
  447. bool signaled = mPresentFences[idx] == NULL;
  448. nsecs_t presentTime = mPresentTimes[idx];
  449. if (!signaled) {
  450. result.appendFormat(" [unsignaled fence]\n");
  451. } else if (presentTime == 0) {
  452. result.appendFormat(" 0\n");
  453. } else if (previous == 0) {
  454. result.appendFormat(" %" PRId64 " (%.3f ms ago)\n", presentTime,
  455. (now - presentTime) / 1000000.0);
  456. } else {
  457. result.appendFormat(" %" PRId64 " (+%" PRId64 " / %.3f) (%.3f ms ago)\n",
  458. presentTime, presentTime - previous,
  459. (presentTime - previous) / (double) mPeriod,
  460. (now - presentTime) / 1000000.0);
  461. }
  462. previous = presentTime;
  463. }
  464. result.appendFormat("current monotonic time: %" PRId64 "\n", now);
  465. }
  466. } // namespace android