adreno.h 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955
  1. /* Copyright (c) 2008-2014, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. */
  13. #ifndef __ADRENO_H
  14. #define __ADRENO_H
  15. #include "kgsl_device.h"
  16. #include "adreno_drawctxt.h"
  17. #include "adreno_ringbuffer.h"
  18. #include "adreno_profile.h"
  19. #include "kgsl_iommu.h"
  20. #include <mach/ocmem.h>
  21. #include "a3xx_reg.h"
  22. #define DEVICE_3D_NAME "kgsl-3d"
  23. #define DEVICE_3D0_NAME "kgsl-3d0"
  24. #define ADRENO_DEVICE(device) \
  25. KGSL_CONTAINER_OF(device, struct adreno_device, dev)
  26. #define ADRENO_CONTEXT(device) \
  27. KGSL_CONTAINER_OF(device, struct adreno_context, base)
  28. #define ADRENO_CHIPID_CORE(_id) (((_id) >> 24) & 0xFF)
  29. #define ADRENO_CHIPID_MAJOR(_id) (((_id) >> 16) & 0xFF)
  30. #define ADRENO_CHIPID_MINOR(_id) (((_id) >> 8) & 0xFF)
  31. #define ADRENO_CHIPID_PATCH(_id) ((_id) & 0xFF)
  32. /* Flags to control command packet settings */
  33. #define KGSL_CMD_FLAGS_NONE 0
  34. #define KGSL_CMD_FLAGS_PMODE BIT(0)
  35. #define KGSL_CMD_FLAGS_INTERNAL_ISSUE BIT(1)
  36. #define KGSL_CMD_FLAGS_WFI BIT(2)
  37. #define KGSL_CMD_FLAGS_PROFILE BIT(3)
  38. #define KGSL_CMD_FLAGS_PWRON_FIXUP BIT(4)
  39. #define KGSL_CMD_FLAGS_MEMLIST BIT(5)
  40. /* Command identifiers */
  41. #define KGSL_CONTEXT_TO_MEM_IDENTIFIER 0x2EADBEEF
  42. #define KGSL_CMD_IDENTIFIER 0x2EEDFACE
  43. #define KGSL_CMD_INTERNAL_IDENTIFIER 0x2EEDD00D
  44. #define KGSL_START_OF_IB_IDENTIFIER 0x2EADEABE
  45. #define KGSL_END_OF_IB_IDENTIFIER 0x2ABEDEAD
  46. #define KGSL_END_OF_FRAME_IDENTIFIER 0x2E0F2E0F
  47. #define KGSL_NOP_IB_IDENTIFIER 0x20F20F20
  48. #define KGSL_START_OF_PROFILE_IDENTIFIER 0x2DEFADE1
  49. #define KGSL_END_OF_PROFILE_IDENTIFIER 0x2DEFADE2
  50. #define KGSL_PWRON_FIXUP_IDENTIFIER 0x2AFAFAFA
  51. #ifdef CONFIG_MSM_SCM
  52. #define ADRENO_DEFAULT_PWRSCALE_POLICY (&kgsl_pwrscale_policy_tz)
  53. #elif defined CONFIG_MSM_SLEEP_STATS_DEVICE
  54. #define ADRENO_DEFAULT_PWRSCALE_POLICY (&kgsl_pwrscale_policy_idlestats)
  55. #else
  56. #define ADRENO_DEFAULT_PWRSCALE_POLICY NULL
  57. #endif
  58. void adreno_debugfs_init(struct kgsl_device *device);
  59. #define ADRENO_ISTORE_START 0x5000 /* Istore offset */
  60. #define ADRENO_NUM_CTX_SWITCH_ALLOWED_BEFORE_DRAW 50
  61. /* One cannot wait forever for the core to idle, so set an upper limit to the
  62. * amount of time to wait for the core to go idle
  63. */
  64. #define ADRENO_IDLE_TIMEOUT (20 * 1000)
  65. enum adreno_gpurev {
  66. ADRENO_REV_UNKNOWN = 0,
  67. ADRENO_REV_A200 = 200,
  68. ADRENO_REV_A203 = 203,
  69. ADRENO_REV_A205 = 205,
  70. ADRENO_REV_A220 = 220,
  71. ADRENO_REV_A225 = 225,
  72. ADRENO_REV_A305 = 305,
  73. ADRENO_REV_A305C = 306,
  74. ADRENO_REV_A320 = 320,
  75. ADRENO_REV_A330 = 330,
  76. ADRENO_REV_A305B = 335,
  77. };
  78. #define ADRENO_SOFT_FAULT BIT(0)
  79. #define ADRENO_HARD_FAULT BIT(1)
  80. #define ADRENO_TIMEOUT_FAULT BIT(2)
  81. #define ADRENO_IOMMU_PAGE_FAULT BIT(3)
  82. /*
  83. * Maximum size of the dispatcher ringbuffer - the actual inflight size will be
  84. * smaller then this but this size will allow for a larger range of inflight
  85. * sizes that can be chosen at runtime
  86. */
  87. #define ADRENO_DISPATCH_CMDQUEUE_SIZE 128
  88. /**
  89. * struct adreno_dispatcher - container for the adreno GPU dispatcher
  90. * @mutex: Mutex to protect the structure
  91. * @state: Current state of the dispatcher (active or paused)
  92. * @timer: Timer to monitor the progress of the command batches
  93. * @inflight: Number of command batch operations pending in the ringbuffer
  94. * @fault: Non-zero if a fault was detected.
  95. * @pending: Priority list of contexts waiting to submit command batches
  96. * @plist_lock: Spin lock to protect the pending queue
  97. * @cmdqueue: Queue of command batches currently flight
  98. * @head: pointer to the head of of the cmdqueue. This is the oldest pending
  99. * operation
  100. * @tail: pointer to the tail of the cmdqueue. This is the most recently
  101. * submitted operation
  102. * @work: work_struct to put the dispatcher in a work queue
  103. * @kobj: kobject for the dispatcher directory in the device sysfs node
  104. */
  105. struct adreno_dispatcher {
  106. struct mutex mutex;
  107. unsigned long priv;
  108. struct timer_list timer;
  109. struct timer_list fault_timer;
  110. unsigned int inflight;
  111. atomic_t fault;
  112. struct plist_head pending;
  113. spinlock_t plist_lock;
  114. struct kgsl_cmdbatch *cmdqueue[ADRENO_DISPATCH_CMDQUEUE_SIZE];
  115. unsigned int head;
  116. unsigned int tail;
  117. struct kthread_work work;
  118. struct kobject kobj;
  119. };
  120. enum adreno_dispatcher_flags {
  121. ADRENO_DISPATCHER_POWER = 0,
  122. };
  123. struct adreno_gpudev;
  124. struct adreno_busy_data {
  125. unsigned int gpu_busy;
  126. unsigned int vbif_ram_cycles;
  127. unsigned int vbif_starved_ram;
  128. };
  129. struct adreno_device {
  130. struct kgsl_device dev; /* Must be first field in this struct */
  131. unsigned long priv;
  132. unsigned int chip_id;
  133. enum adreno_gpurev gpurev;
  134. unsigned long gmem_base;
  135. unsigned int gmem_size;
  136. struct adreno_context *drawctxt_active;
  137. const char *pfp_fwfile;
  138. unsigned int *pfp_fw;
  139. size_t pfp_fw_size;
  140. unsigned int pfp_fw_version;
  141. const char *pm4_fwfile;
  142. unsigned int *pm4_fw;
  143. size_t pm4_fw_size;
  144. unsigned int pm4_fw_version;
  145. struct adreno_ringbuffer ringbuffer;
  146. unsigned int mharb;
  147. struct adreno_gpudev *gpudev;
  148. unsigned int wait_timeout;
  149. unsigned int pm4_jt_idx;
  150. unsigned int pm4_jt_addr;
  151. unsigned int pm4_bstrp_size;
  152. unsigned int pfp_jt_idx;
  153. unsigned int pfp_jt_addr;
  154. unsigned int pfp_bstrp_size;
  155. unsigned int pfp_bstrp_ver;
  156. unsigned int istore_size;
  157. unsigned int pix_shader_start;
  158. unsigned int instruction_size;
  159. unsigned int ib_check_level;
  160. unsigned int fast_hang_detect;
  161. unsigned int ft_policy;
  162. unsigned int long_ib_detect;
  163. unsigned int ft_pf_policy;
  164. unsigned int gpulist_index;
  165. struct ocmem_buf *ocmem_hdl;
  166. unsigned int ocmem_base;
  167. struct adreno_profile profile;
  168. struct kgsl_memdesc pwron_fixup;
  169. unsigned int pwron_fixup_dwords;
  170. struct adreno_dispatcher dispatcher;
  171. struct adreno_busy_data busy_data;
  172. struct work_struct input_work;
  173. unsigned int ram_cycles_lo;
  174. };
  175. /**
  176. * enum adreno_device_flags - Private flags for the adreno_device
  177. * @ADRENO_DEVICE_PWRON - Set during init after a power collapse
  178. * @ADRENO_DEVICE_PWRON_FIXUP - Set if the target requires the shader fixup
  179. * after power collapse
  180. */
  181. enum adreno_device_flags {
  182. ADRENO_DEVICE_PWRON = 0,
  183. ADRENO_DEVICE_PWRON_FIXUP = 1,
  184. ADRENO_DEVICE_INITIALIZED = 2,
  185. ADRENO_DEVICE_STARTED = 3,
  186. ADRENO_DEVICE_HANG_INTR = 4,
  187. };
  188. #define PERFCOUNTER_FLAG_NONE 0x0
  189. #define PERFCOUNTER_FLAG_KERNEL 0x1
  190. /* Structs to maintain the list of active performance counters */
  191. /**
  192. * struct adreno_perfcount_register: register state
  193. * @countable: countable the register holds
  194. * @kernelcount: number of user space users of the register
  195. * @usercount: number of kernel users of the register
  196. * @offset: register hardware offset
  197. * @load_bit: The bit number in LOAD register which corresponds to this counter
  198. * @select: The countable register offset
  199. * @value: The 64 bit countable register value
  200. */
  201. struct adreno_perfcount_register {
  202. unsigned int countable;
  203. unsigned int kernelcount;
  204. unsigned int usercount;
  205. unsigned int offset;
  206. unsigned int offset_hi;
  207. int load_bit;
  208. unsigned int select;
  209. uint64_t value;
  210. };
  211. /**
  212. * struct adreno_perfcount_group: registers for a hardware group
  213. * @regs: available registers for this group
  214. * @reg_count: total registers for this group
  215. * @name: group name for this group
  216. */
  217. struct adreno_perfcount_group {
  218. struct adreno_perfcount_register *regs;
  219. unsigned int reg_count;
  220. const char *name;
  221. unsigned long flags;
  222. };
  223. /*
  224. * ADRENO_PERFCOUNTER_GROUP_FIXED indicates that a perfcounter group is fixed -
  225. * instead of having configurable countables like the other groups, registers in
  226. * fixed groups have a hardwired countable. So when the user requests a
  227. * countable in one of these groups, that countable should be used as the
  228. * register offset to return
  229. */
  230. #define ADRENO_PERFCOUNTER_GROUP_FIXED BIT(0)
  231. /**
  232. * adreno_perfcounts: all available perfcounter groups
  233. * @groups: available groups for this device
  234. * @group_count: total groups for this device
  235. */
  236. struct adreno_perfcounters {
  237. struct adreno_perfcount_group *groups;
  238. unsigned int group_count;
  239. };
  240. #define ADRENO_PERFCOUNTER_GROUP(core, name) { core##_perfcounters_##name, \
  241. ARRAY_SIZE(core##_perfcounters_##name), __stringify(name), 0 }
  242. #define ADRENO_PERFCOUNTER_GROUP_FLAGS(core, name, flags) \
  243. { core##_perfcounters_##name, \
  244. ARRAY_SIZE(core##_perfcounters_##name), __stringify(name), flags }
  245. /**
  246. * adreno_regs: List of registers that are used in kgsl driver for all
  247. * 3D devices. Each device type has different offset value for the same
  248. * register, so an array of register offsets are declared for every device
  249. * and are indexed by the enumeration values defined in this enum
  250. */
  251. enum adreno_regs {
  252. ADRENO_REG_CP_DEBUG,
  253. ADRENO_REG_CP_ME_RAM_WADDR,
  254. ADRENO_REG_CP_ME_RAM_DATA,
  255. ADRENO_REG_CP_PFP_UCODE_DATA,
  256. ADRENO_REG_CP_PFP_UCODE_ADDR,
  257. ADRENO_REG_CP_WFI_PEND_CTR,
  258. ADRENO_REG_CP_RB_BASE,
  259. ADRENO_REG_CP_RB_RPTR_ADDR,
  260. ADRENO_REG_CP_RB_RPTR,
  261. ADRENO_REG_CP_RB_WPTR,
  262. ADRENO_REG_CP_PROTECT_CTRL,
  263. ADRENO_REG_CP_ME_CNTL,
  264. ADRENO_REG_CP_RB_CNTL,
  265. ADRENO_REG_CP_IB1_BASE,
  266. ADRENO_REG_CP_IB1_BUFSZ,
  267. ADRENO_REG_CP_IB2_BASE,
  268. ADRENO_REG_CP_IB2_BUFSZ,
  269. ADRENO_REG_CP_TIMESTAMP,
  270. ADRENO_REG_CP_HW_FAULT,
  271. ADRENO_REG_SCRATCH_ADDR,
  272. ADRENO_REG_SCRATCH_UMSK,
  273. ADRENO_REG_SCRATCH_REG2,
  274. ADRENO_REG_RBBM_STATUS,
  275. ADRENO_REG_RBBM_PERFCTR_CTL,
  276. ADRENO_REG_RBBM_PERFCTR_LOAD_CMD0,
  277. ADRENO_REG_RBBM_PERFCTR_LOAD_CMD1,
  278. ADRENO_REG_RBBM_PERFCTR_LOAD_CMD2,
  279. ADRENO_REG_RBBM_PERFCTR_PWR_1_LO,
  280. ADRENO_REG_RBBM_INT_0_MASK,
  281. ADRENO_REG_RBBM_INT_0_STATUS,
  282. ADRENO_REG_RBBM_AHB_ERROR_STATUS,
  283. ADRENO_REG_RBBM_PM_OVERRIDE2,
  284. ADRENO_REG_VPC_VPC_DEBUG_RAM_SEL,
  285. ADRENO_REG_VPC_VPC_DEBUG_RAM_READ,
  286. ADRENO_REG_VSC_PIPE_DATA_ADDRESS_0,
  287. ADRENO_REG_VSC_PIPE_DATA_LENGTH_7,
  288. ADRENO_REG_VSC_SIZE_ADDRESS,
  289. ADRENO_REG_VFD_CONTROL_0,
  290. ADRENO_REG_VFD_FETCH_INSTR_0_0,
  291. ADRENO_REG_VFD_FETCH_INSTR_1_F,
  292. ADRENO_REG_VFD_INDEX_MAX,
  293. ADRENO_REG_SP_VS_PVT_MEM_ADDR_REG,
  294. ADRENO_REG_SP_FS_PVT_MEM_ADDR_REG,
  295. ADRENO_REG_SP_VS_OBJ_START_REG,
  296. ADRENO_REG_SP_FS_OBJ_START_REG,
  297. ADRENO_REG_PA_SC_AA_CONFIG,
  298. ADRENO_REG_SQ_GPR_MANAGEMENT,
  299. ADRENO_REG_SQ_INST_STORE_MANAGMENT,
  300. ADRENO_REG_TC_CNTL_STATUS,
  301. ADRENO_REG_TP0_CHICKEN,
  302. ADRENO_REG_RBBM_RBBM_CTL,
  303. ADRENO_REG_UCHE_INVALIDATE0,
  304. ADRENO_REG_REGISTER_MAX,
  305. };
  306. /**
  307. * adreno_reg_offsets: Holds array of register offsets
  308. * @offsets: Offset array of size defined by enum adreno_regs
  309. * @offset_0: This is the index of the register in offset array whose value
  310. * is 0. 0 is a valid register offset and during initialization of the
  311. * offset array we need to know if an offset value is correctly defined to 0
  312. */
  313. struct adreno_reg_offsets {
  314. unsigned int *offsets;
  315. enum adreno_regs offset_0;
  316. };
  317. #define ADRENO_REG_UNUSED 0xFFFFFFFF
  318. #define ADRENO_REG_DEFINE(_offset, _reg) [_offset] = _reg
  319. struct adreno_gpudev {
  320. /*
  321. * These registers are in a different location on different devices,
  322. * so define them in the structure and use them as variables.
  323. */
  324. struct adreno_reg_offsets *reg_offsets;
  325. /* keeps track of when we need to execute the draw workaround code */
  326. int ctx_switches_since_last_draw;
  327. struct adreno_perfcounters *perfcounters;
  328. /* GPU specific function hooks */
  329. int (*ctxt_create)(struct adreno_device *, struct adreno_context *);
  330. irqreturn_t (*irq_handler)(struct adreno_device *);
  331. void (*irq_control)(struct adreno_device *, int);
  332. unsigned int (*irq_pending)(struct adreno_device *);
  333. void * (*snapshot)(struct adreno_device *, void *, int *, int);
  334. int (*rb_init)(struct adreno_device *, struct adreno_ringbuffer *);
  335. int (*perfcounter_init)(struct adreno_device *);
  336. void (*perfcounter_close)(struct adreno_device *);
  337. void (*perfcounter_save)(struct adreno_device *);
  338. void (*perfcounter_restore)(struct adreno_device *);
  339. void (*fault_detect_start)(struct adreno_device *);
  340. void (*fault_detect_stop)(struct adreno_device *);
  341. void (*start)(struct adreno_device *);
  342. int (*perfcounter_enable)(struct adreno_device *, unsigned int group,
  343. unsigned int counter, unsigned int countable);
  344. void (*busy_cycles)(struct adreno_device *, struct adreno_busy_data *);
  345. uint64_t (*perfcounter_read)(struct adreno_device *adreno_dev,
  346. unsigned int group, unsigned int counter);
  347. void (*perfcounter_write)(struct adreno_device *adreno_dev,
  348. unsigned int group, unsigned int counter);
  349. void (*soft_reset)(struct adreno_device *device);
  350. void (*postmortem_dump)(struct adreno_device *adreno_dev);
  351. };
  352. #define FT_DETECT_REGS_COUNT 14
  353. struct log_field {
  354. bool show;
  355. const char *display;
  356. };
  357. /* Fault Tolerance policy flags */
  358. #define KGSL_FT_OFF 0
  359. #define KGSL_FT_REPLAY 1
  360. #define KGSL_FT_SKIPIB 2
  361. #define KGSL_FT_SKIPFRAME 3
  362. #define KGSL_FT_DISABLE 4
  363. #define KGSL_FT_TEMP_DISABLE 5
  364. #define KGSL_FT_THROTTLE 6
  365. #define KGSL_FT_SKIPCMD 7
  366. #define KGSL_FT_DEFAULT_POLICY (BIT(KGSL_FT_REPLAY) + BIT(KGSL_FT_SKIPCMD) \
  367. + BIT(KGSL_FT_THROTTLE))
  368. /* This internal bit is used to skip the PM dump on replayed command batches */
  369. #define KGSL_FT_SKIP_PMDUMP 31
  370. /* Pagefault policy flags */
  371. #define KGSL_FT_PAGEFAULT_INT_ENABLE BIT(0)
  372. #define KGSL_FT_PAGEFAULT_GPUHALT_ENABLE BIT(1)
  373. #define KGSL_FT_PAGEFAULT_LOG_ONE_PER_PAGE BIT(2)
  374. #define KGSL_FT_PAGEFAULT_LOG_ONE_PER_INT BIT(3)
  375. #define KGSL_FT_PAGEFAULT_DEFAULT_POLICY KGSL_FT_PAGEFAULT_INT_ENABLE
  376. #define ADRENO_FT_TYPES \
  377. { BIT(KGSL_FT_OFF), "off" }, \
  378. { BIT(KGSL_FT_REPLAY), "replay" }, \
  379. { BIT(KGSL_FT_SKIPIB), "skipib" }, \
  380. { BIT(KGSL_FT_SKIPFRAME), "skipframe" }, \
  381. { BIT(KGSL_FT_DISABLE), "disable" }, \
  382. { BIT(KGSL_FT_TEMP_DISABLE), "temp" }, \
  383. { BIT(KGSL_FT_THROTTLE), "throttle"}, \
  384. { BIT(KGSL_FT_SKIPCMD), "skipcmd" }
  385. #define ADRENO_CMDBATCH_FLAGS \
  386. { KGSL_CMDBATCH_CTX_SWITCH, "CTX_SWITCH" }, \
  387. { KGSL_CMDBATCH_SYNC, "SYNC" }, \
  388. { KGSL_CMDBATCH_END_OF_FRAME, "EOF" }, \
  389. { KGSL_CMDBATCH_PWR_CONSTRAINT, "PWR_CONSTRAINT" }
  390. extern struct adreno_gpudev adreno_a2xx_gpudev;
  391. extern struct adreno_gpudev adreno_a3xx_gpudev;
  392. /* A2XX register sets defined in adreno_a2xx.c */
  393. extern const unsigned int a200_registers[];
  394. extern const unsigned int a220_registers[];
  395. extern const unsigned int a225_registers[];
  396. extern const unsigned int a200_registers_count;
  397. extern const unsigned int a220_registers_count;
  398. extern const unsigned int a225_registers_count;
  399. /* A3XX register set defined in adreno_a3xx.c */
  400. extern const unsigned int a3xx_registers[];
  401. extern const unsigned int a3xx_registers_count;
  402. extern const unsigned int a3xx_hlsq_registers[];
  403. extern const unsigned int a3xx_hlsq_registers_count;
  404. extern const unsigned int a330_registers[];
  405. extern const unsigned int a330_registers_count;
  406. extern unsigned int ft_detect_regs[];
  407. bool adreno_hw_isidle(struct kgsl_device *device);
  408. int adreno_idle(struct kgsl_device *device);
  409. bool adreno_isidle(struct kgsl_device *device);
  410. void adreno_shadermem_regread(struct kgsl_device *device,
  411. unsigned int offsetwords,
  412. unsigned int *value);
  413. int adreno_dump(struct kgsl_device *device, int manual);
  414. void adreno_dump_fields(struct kgsl_device *device,
  415. const char *start, const struct log_field *lines,
  416. int num);
  417. unsigned int adreno_a3xx_rbbm_clock_ctl_default(struct adreno_device
  418. *adreno_dev);
  419. struct kgsl_memdesc *adreno_find_region(struct kgsl_device *device,
  420. phys_addr_t pt_base,
  421. unsigned int gpuaddr,
  422. unsigned int size,
  423. struct kgsl_mem_entry **entry);
  424. uint8_t *adreno_convertaddr(struct kgsl_device *device,
  425. phys_addr_t pt_base, unsigned int gpuaddr, unsigned int size,
  426. struct kgsl_mem_entry **entry);
  427. struct kgsl_memdesc *adreno_find_ctxtmem(struct kgsl_device *device,
  428. phys_addr_t pt_base, unsigned int gpuaddr, unsigned int size);
  429. void *adreno_snapshot(struct kgsl_device *device, void *snapshot, int *remain,
  430. int hang);
  431. void adreno_dispatcher_start(struct kgsl_device *device);
  432. int adreno_dispatcher_init(struct adreno_device *adreno_dev);
  433. void adreno_dispatcher_close(struct adreno_device *adreno_dev);
  434. int adreno_dispatcher_idle(struct adreno_device *adreno_dev,
  435. unsigned int timeout);
  436. void adreno_dispatcher_irq_fault(struct kgsl_device *device);
  437. void adreno_dispatcher_stop(struct adreno_device *adreno_dev);
  438. int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev,
  439. struct adreno_context *drawctxt, struct kgsl_cmdbatch *cmdbatch,
  440. uint32_t *timestamp);
  441. void adreno_dispatcher_schedule(struct kgsl_device *device);
  442. void adreno_dispatcher_pause(struct adreno_device *adreno_dev);
  443. void adreno_dispatcher_queue_context(struct kgsl_device *device,
  444. struct adreno_context *drawctxt);
  445. int adreno_reset(struct kgsl_device *device);
  446. int adreno_ft_init_sysfs(struct kgsl_device *device);
  447. void adreno_ft_uninit_sysfs(struct kgsl_device *device);
  448. void adreno_fault_skipcmd_detached(struct kgsl_device *device,
  449. struct adreno_context *drawctxt,
  450. struct kgsl_cmdbatch *cmdbatch);
  451. int adreno_perfcounter_get_groupid(struct adreno_device *adreno_dev,
  452. const char *name);
  453. const char *adreno_perfcounter_get_name(struct adreno_device
  454. *adreno_dev, unsigned int groupid);
  455. int adreno_perfcounter_get(struct adreno_device *adreno_dev,
  456. unsigned int groupid, unsigned int countable, unsigned int *offset,
  457. unsigned int *offset_hi, unsigned int flags);
  458. int adreno_perfcounter_put(struct adreno_device *adreno_dev,
  459. unsigned int groupid, unsigned int countable, unsigned int flags);
  460. int adreno_soft_reset(struct kgsl_device *device);
  461. int adreno_a3xx_pwron_fixup_init(struct adreno_device *adreno_dev);
  462. static inline int adreno_is_a200(struct adreno_device *adreno_dev)
  463. {
  464. return (adreno_dev->gpurev == ADRENO_REV_A200);
  465. }
  466. static inline int adreno_is_a203(struct adreno_device *adreno_dev)
  467. {
  468. return (adreno_dev->gpurev == ADRENO_REV_A203);
  469. }
  470. static inline int adreno_is_a205(struct adreno_device *adreno_dev)
  471. {
  472. return (adreno_dev->gpurev == ADRENO_REV_A205);
  473. }
  474. static inline int adreno_is_a20x(struct adreno_device *adreno_dev)
  475. {
  476. return (adreno_dev->gpurev <= 209);
  477. }
  478. static inline int adreno_is_a220(struct adreno_device *adreno_dev)
  479. {
  480. return (adreno_dev->gpurev == ADRENO_REV_A220);
  481. }
  482. static inline int adreno_is_a225(struct adreno_device *adreno_dev)
  483. {
  484. return (adreno_dev->gpurev == ADRENO_REV_A225);
  485. }
  486. static inline int adreno_is_a22x(struct adreno_device *adreno_dev)
  487. {
  488. return (adreno_dev->gpurev == ADRENO_REV_A220 ||
  489. adreno_dev->gpurev == ADRENO_REV_A225);
  490. }
  491. static inline int adreno_is_a2xx(struct adreno_device *adreno_dev)
  492. {
  493. return (adreno_dev->gpurev <= 299);
  494. }
  495. bool adreno_hw_isidle(struct kgsl_device *device);
  496. static inline int adreno_is_a3xx(struct adreno_device *adreno_dev)
  497. {
  498. return (adreno_dev->gpurev >= 300);
  499. }
  500. static inline int adreno_is_a305(struct adreno_device *adreno_dev)
  501. {
  502. return (adreno_dev->gpurev == ADRENO_REV_A305);
  503. }
  504. static inline int adreno_is_a305b(struct adreno_device *adreno_dev)
  505. {
  506. return (adreno_dev->gpurev == ADRENO_REV_A305B);
  507. }
  508. static inline int adreno_is_a305c(struct adreno_device *adreno_dev)
  509. {
  510. return (adreno_dev->gpurev == ADRENO_REV_A305C);
  511. }
  512. static inline int adreno_is_a320(struct adreno_device *adreno_dev)
  513. {
  514. return (adreno_dev->gpurev == ADRENO_REV_A320);
  515. }
  516. static inline int adreno_is_a330(struct adreno_device *adreno_dev)
  517. {
  518. return (adreno_dev->gpurev == ADRENO_REV_A330);
  519. }
  520. static inline int adreno_is_a330v2(struct adreno_device *adreno_dev)
  521. {
  522. return ((adreno_dev->gpurev == ADRENO_REV_A330) &&
  523. (ADRENO_CHIPID_PATCH(adreno_dev->chip_id) > 0));
  524. }
  525. static inline int adreno_rb_ctxtswitch(unsigned int *cmd)
  526. {
  527. return (cmd[0] == cp_nop_packet(1) &&
  528. cmd[1] == KGSL_CONTEXT_TO_MEM_IDENTIFIER);
  529. }
  530. /**
  531. * adreno_context_timestamp() - Return the last queued timestamp for the context
  532. * @k_ctxt: Pointer to the KGSL context to query
  533. * @rb: Pointer to the ringbuffer structure for the GPU
  534. *
  535. * Return the last queued context for the given context. This is used to verify
  536. * that incoming requests are not using an invalid (unsubmitted) timestamp
  537. */
  538. static inline int adreno_context_timestamp(struct kgsl_context *k_ctxt,
  539. struct adreno_ringbuffer *rb)
  540. {
  541. if (k_ctxt) {
  542. struct adreno_context *a_ctxt = ADRENO_CONTEXT(k_ctxt);
  543. return a_ctxt->timestamp;
  544. }
  545. return rb->global_ts;
  546. }
  547. /**
  548. * adreno_encode_istore_size - encode istore size in CP format
  549. * @adreno_dev - The 3D device.
  550. *
  551. * Encode the istore size into the format expected that the
  552. * CP_SET_SHADER_BASES and CP_ME_INIT commands:
  553. * bits 31:29 - istore size as encoded by this function
  554. * bits 27:16 - vertex shader start offset in instructions
  555. * bits 11:0 - pixel shader start offset in instructions.
  556. */
  557. static inline int adreno_encode_istore_size(struct adreno_device *adreno_dev)
  558. {
  559. unsigned int size;
  560. /* in a225 the CP microcode multiplies the encoded
  561. * value by 3 while decoding.
  562. */
  563. if (adreno_is_a225(adreno_dev))
  564. size = adreno_dev->istore_size/3;
  565. else
  566. size = adreno_dev->istore_size;
  567. return (ilog2(size) - 5) << 29;
  568. }
  569. static inline int __adreno_add_idle_indirect_cmds(unsigned int *cmds,
  570. unsigned int nop_gpuaddr)
  571. {
  572. /* Adding an indirect buffer ensures that the prefetch stalls until
  573. * the commands in indirect buffer have completed. We need to stall
  574. * prefetch with a nop indirect buffer when updating pagetables
  575. * because it provides stabler synchronization */
  576. *cmds++ = CP_HDR_INDIRECT_BUFFER_PFD;
  577. *cmds++ = nop_gpuaddr;
  578. *cmds++ = 2;
  579. *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
  580. *cmds++ = 0x00000000;
  581. return 5;
  582. }
  583. static inline int adreno_add_change_mh_phys_limit_cmds(unsigned int *cmds,
  584. unsigned int new_phys_limit,
  585. unsigned int nop_gpuaddr)
  586. {
  587. unsigned int *start = cmds;
  588. *cmds++ = cp_type0_packet(MH_MMU_MPU_END, 1);
  589. *cmds++ = new_phys_limit;
  590. cmds += __adreno_add_idle_indirect_cmds(cmds, nop_gpuaddr);
  591. return cmds - start;
  592. }
  593. static inline int adreno_add_bank_change_cmds(unsigned int *cmds,
  594. int cur_ctx_bank,
  595. unsigned int nop_gpuaddr)
  596. {
  597. unsigned int *start = cmds;
  598. *cmds++ = cp_type0_packet(REG_CP_STATE_DEBUG_INDEX, 1);
  599. *cmds++ = (cur_ctx_bank ? 0 : 0x20);
  600. cmds += __adreno_add_idle_indirect_cmds(cmds, nop_gpuaddr);
  601. return cmds - start;
  602. }
  603. /*
  604. * adreno_read_cmds - Add pm4 packets to perform read
  605. * @device - Pointer to device structure
  606. * @cmds - Pointer to memory where read commands need to be added
  607. * @addr - gpu address of the read
  608. * @val - The GPU will wait until the data at address addr becomes
  609. * equal to value
  610. */
  611. static inline int adreno_add_read_cmds(struct kgsl_device *device,
  612. unsigned int *cmds, unsigned int addr,
  613. unsigned int val, unsigned int nop_gpuaddr)
  614. {
  615. unsigned int *start = cmds;
  616. *cmds++ = cp_type3_packet(CP_WAIT_REG_MEM, 5);
  617. /* MEM SPACE = memory, FUNCTION = equals */
  618. *cmds++ = 0x13;
  619. *cmds++ = addr;
  620. *cmds++ = val;
  621. *cmds++ = 0xFFFFFFFF;
  622. *cmds++ = 0xFFFFFFFF;
  623. /* WAIT_REG_MEM turns back on protected mode - push it off */
  624. *cmds++ = cp_type3_packet(CP_SET_PROTECTED_MODE, 1);
  625. *cmds++ = 0;
  626. cmds += __adreno_add_idle_indirect_cmds(cmds, nop_gpuaddr);
  627. return cmds - start;
  628. }
  629. /*
  630. * adreno_idle_cmds - Add pm4 packets for GPU idle
  631. * @adreno_dev - Pointer to device structure
  632. * @cmds - Pointer to memory where idle commands need to be added
  633. */
  634. static inline int adreno_add_idle_cmds(struct adreno_device *adreno_dev,
  635. unsigned int *cmds)
  636. {
  637. unsigned int *start = cmds;
  638. *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
  639. *cmds++ = 0;
  640. if (adreno_is_a3xx(adreno_dev)) {
  641. *cmds++ = cp_type3_packet(CP_WAIT_FOR_ME, 1);
  642. *cmds++ = 0;
  643. }
  644. return cmds - start;
  645. }
  646. /*
  647. * adreno_wait_reg_eq() - Add a CP_WAIT_REG_EQ command
  648. * @cmds: Pointer to memory where commands are to be added
  649. * @addr: Regiater address to poll for
  650. * @val: Value to poll for
  651. * @mask: The value against which register value is masked
  652. * @interval: wait interval
  653. */
  654. static inline int adreno_wait_reg_eq(unsigned int *cmds, unsigned int addr,
  655. unsigned int val, unsigned int mask,
  656. unsigned int interval)
  657. {
  658. unsigned int *start = cmds;
  659. *cmds++ = cp_type3_packet(CP_WAIT_REG_EQ, 4);
  660. *cmds++ = addr;
  661. *cmds++ = val;
  662. *cmds++ = mask;
  663. *cmds++ = interval;
  664. return cmds - start;
  665. }
  666. /*
  667. * adreno_checkreg_off() - Checks the validity of a register enum
  668. * @adreno_dev: Pointer to adreno device
  669. * @offset_name: The register enum that is checked
  670. */
  671. static inline bool adreno_checkreg_off(struct adreno_device *adreno_dev,
  672. enum adreno_regs offset_name)
  673. {
  674. if (offset_name >= ADRENO_REG_REGISTER_MAX ||
  675. ADRENO_REG_UNUSED ==
  676. adreno_dev->gpudev->reg_offsets->offsets[offset_name]) {
  677. BUG_ON(1);
  678. }
  679. return true;
  680. }
  681. /*
  682. * adreno_readreg() - Read a register by getting its offset from the
  683. * offset array defined in gpudev node
  684. * @adreno_dev: Pointer to the the adreno device
  685. * @offset_name: The register enum that is to be read
  686. * @val: Register value read is placed here
  687. */
  688. static inline void adreno_readreg(struct adreno_device *adreno_dev,
  689. enum adreno_regs offset_name, unsigned int *val)
  690. {
  691. struct kgsl_device *device = &adreno_dev->dev;
  692. if (adreno_checkreg_off(adreno_dev, offset_name))
  693. kgsl_regread(device,
  694. adreno_dev->gpudev->reg_offsets->offsets[offset_name],
  695. val);
  696. }
  697. /*
  698. * adreno_writereg() - Write a register by getting its offset from the
  699. * offset array defined in gpudev node
  700. * @adreno_dev: Pointer to the the adreno device
  701. * @offset_name: The register enum that is to be written
  702. * @val: Value to write
  703. */
  704. static inline void adreno_writereg(struct adreno_device *adreno_dev,
  705. enum adreno_regs offset_name, unsigned int val)
  706. {
  707. struct kgsl_device *device = &adreno_dev->dev;
  708. if (adreno_checkreg_off(adreno_dev, offset_name))
  709. kgsl_regwrite(device,
  710. adreno_dev->gpudev->reg_offsets->offsets[offset_name], val);
  711. }
  712. /*
  713. * adreno_getreg() - Returns the offset value of a register from the
  714. * register offset array in the gpudev node
  715. * @adreno_dev: Pointer to the the adreno device
  716. * @offset_name: The register enum whore offset is returned
  717. */
  718. static inline unsigned int adreno_getreg(struct adreno_device *adreno_dev,
  719. enum adreno_regs offset_name)
  720. {
  721. if (!adreno_checkreg_off(adreno_dev, offset_name))
  722. return ADRENO_REG_REGISTER_MAX;
  723. return adreno_dev->gpudev->reg_offsets->offsets[offset_name];
  724. }
  725. #ifdef CONFIG_DEBUG_FS
  726. void adreno_debugfs_init(struct kgsl_device *device);
  727. #else
  728. static inline void adreno_debugfs_init(struct kgsl_device *device) { }
  729. #endif
  730. /**
  731. * adreno_gpu_fault() - Return the current state of the GPU
  732. * @adreno_dev: A ponter to the adreno_device to query
  733. *
  734. * Return 0 if there is no fault or positive with the last type of fault that
  735. * occurred
  736. */
  737. static inline unsigned int adreno_gpu_fault(struct adreno_device *adreno_dev)
  738. {
  739. smp_rmb();
  740. return atomic_read(&adreno_dev->dispatcher.fault);
  741. }
  742. /**
  743. * adreno_set_gpu_fault() - Set the current fault status of the GPU
  744. * @adreno_dev: A pointer to the adreno_device to set
  745. * @state: fault state to set
  746. *
  747. */
  748. static inline void adreno_set_gpu_fault(struct adreno_device *adreno_dev,
  749. int state)
  750. {
  751. /* only set the fault bit w/o overwriting other bits */
  752. atomic_add(state, &adreno_dev->dispatcher.fault);
  753. smp_wmb();
  754. }
  755. /**
  756. * adreno_clear_gpu_fault() - Clear the GPU fault register
  757. * @adreno_dev: A pointer to an adreno_device structure
  758. *
  759. * Clear the GPU fault status for the adreno device
  760. */
  761. static inline void adreno_clear_gpu_fault(struct adreno_device *adreno_dev)
  762. {
  763. atomic_set(&adreno_dev->dispatcher.fault, 0);
  764. smp_wmb();
  765. }
  766. /*
  767. * adreno_bootstrap_ucode() - Checks if Ucode bootstrapping is supported
  768. * @adreno_dev: Pointer to the the adreno device
  769. */
  770. static inline int adreno_bootstrap_ucode(struct adreno_device *adreno_dev)
  771. {
  772. if ((adreno_dev->pfp_bstrp_size) && (adreno_dev->pm4_bstrp_size)
  773. && (adreno_dev->pfp_fw_version >= adreno_dev->pfp_bstrp_ver))
  774. return 1;
  775. else
  776. return 0;
  777. }
  778. /**
  779. * adreno_get_rptr() - Get the current ringbuffer read pointer
  780. * @rb: Pointer the ringbuffer to query
  781. *
  782. * Get the current read pointer from the GPU register.
  783. */
  784. static inline unsigned int
  785. adreno_get_rptr(struct adreno_ringbuffer *rb)
  786. {
  787. struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device);
  788. unsigned int result;
  789. adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_RPTR, &result);
  790. return result;
  791. }
  792. /*
  793. * adreno_set_protected_registers() - Protect the specified range of registers
  794. * from being accessed by the GPU
  795. * @device: pointer to the KGSL device
  796. * @index: Pointer to the index of the protect mode register to write to
  797. * @reg: Starting dword register to write
  798. * @mask_len: Size of the mask to protect (# of registers = 2 ** mask_len)
  799. *
  800. * Add the range of registers to the list of protected mode registers that will
  801. * cause an exception if the GPU accesses them. There are 16 available
  802. * protected mode registers. Index is used to specify which register to write
  803. * to - the intent is to call this function multiple times with the same index
  804. * pointer for each range and the registers will be magically programmed in
  805. * incremental fashion
  806. */
  807. static inline void adreno_set_protected_registers(struct kgsl_device *device,
  808. unsigned int *index, unsigned int reg, int mask_len)
  809. {
  810. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  811. unsigned int val;
  812. /* This function is only for adreno A3XX and beyond */
  813. BUG_ON(adreno_is_a2xx(adreno_dev));
  814. /* There are only 16 registers available */
  815. BUG_ON(*index >= 16);
  816. val = 0x60000000 | ((mask_len & 0x1F) << 24) | ((reg << 2) & 0x1FFFF);
  817. /*
  818. * Write the protection range to the next available protection
  819. * register
  820. */
  821. kgsl_regwrite(device, A3XX_CP_PROTECT_REG_0 + *index, val);
  822. *index = *index + 1;
  823. }
  824. #ifdef CONFIG_DEBUG_FS
  825. void adreno_debugfs_init(struct kgsl_device *device);
  826. #else
  827. static inline void adreno_debugfs_init(struct kgsl_device *device) { }
  828. #endif
  829. #endif /*__ADRENO_H */