unaligned.h 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171
  1. /*
  2. * Port on Texas Instruments TMS320C6x architecture
  3. *
  4. * Copyright (C) 2004, 2009, 2010 Texas Instruments Incorporated
  5. * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
  6. * Rewritten for 2.6.3x: Mark Salter <msalter@redhat.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #ifndef _ASM_C6X_UNALIGNED_H
  13. #define _ASM_C6X_UNALIGNED_H
  14. #include <linux/swab.h>
  15. /*
  16. * The C64x+ can do unaligned word and dword accesses in hardware
  17. * using special load/store instructions.
  18. */
  19. static inline u16 get_unaligned_le16(const void *p)
  20. {
  21. const u8 *_p = p;
  22. return _p[0] | _p[1] << 8;
  23. }
  24. static inline u16 get_unaligned_be16(const void *p)
  25. {
  26. const u8 *_p = p;
  27. return _p[0] << 8 | _p[1];
  28. }
  29. static inline void put_unaligned_le16(u16 val, void *p)
  30. {
  31. u8 *_p = p;
  32. _p[0] = val;
  33. _p[1] = val >> 8;
  34. }
  35. static inline void put_unaligned_be16(u16 val, void *p)
  36. {
  37. u8 *_p = p;
  38. _p[0] = val >> 8;
  39. _p[1] = val;
  40. }
  41. static inline u32 get_unaligned32(const void *p)
  42. {
  43. u32 val = (u32) p;
  44. asm (" ldnw .d1t1 *%0,%0\n"
  45. " nop 4\n"
  46. : "+a"(val));
  47. return val;
  48. }
  49. static inline void put_unaligned32(u32 val, void *p)
  50. {
  51. asm volatile (" stnw .d2t1 %0,*%1\n"
  52. : : "a"(val), "b"(p) : "memory");
  53. }
  54. static inline u64 get_unaligned64(const void *p)
  55. {
  56. u64 val;
  57. asm volatile (" ldndw .d1t1 *%1,%0\n"
  58. " nop 4\n"
  59. : "=a"(val) : "a"(p));
  60. return val;
  61. }
  62. static inline void put_unaligned64(u64 val, const void *p)
  63. {
  64. asm volatile (" stndw .d2t1 %0,*%1\n"
  65. : : "a"(val), "b"(p) : "memory");
  66. }
  67. #ifdef CONFIG_CPU_BIG_ENDIAN
  68. #define get_unaligned_le32(p) __swab32(get_unaligned32(p))
  69. #define get_unaligned_le64(p) __swab64(get_unaligned64(p))
  70. #define get_unaligned_be32(p) get_unaligned32(p)
  71. #define get_unaligned_be64(p) get_unaligned64(p)
  72. #define put_unaligned_le32(v, p) put_unaligned32(__swab32(v), (p))
  73. #define put_unaligned_le64(v, p) put_unaligned64(__swab64(v), (p))
  74. #define put_unaligned_be32(v, p) put_unaligned32((v), (p))
  75. #define put_unaligned_be64(v, p) put_unaligned64((v), (p))
  76. #define get_unaligned __get_unaligned_be
  77. #define put_unaligned __put_unaligned_be
  78. #else
  79. #define get_unaligned_le32(p) get_unaligned32(p)
  80. #define get_unaligned_le64(p) get_unaligned64(p)
  81. #define get_unaligned_be32(p) __swab32(get_unaligned32(p))
  82. #define get_unaligned_be64(p) __swab64(get_unaligned64(p))
  83. #define put_unaligned_le32(v, p) put_unaligned32((v), (p))
  84. #define put_unaligned_le64(v, p) put_unaligned64((v), (p))
  85. #define put_unaligned_be32(v, p) put_unaligned32(__swab32(v), (p))
  86. #define put_unaligned_be64(v, p) put_unaligned64(__swab64(v), (p))
  87. #define get_unaligned __get_unaligned_le
  88. #define put_unaligned __put_unaligned_le
  89. #endif
  90. /*
  91. * Cause a link-time error if we try an unaligned access other than
  92. * 1,2,4 or 8 bytes long
  93. */
  94. extern int __bad_unaligned_access_size(void);
  95. #define __get_unaligned_le(ptr) (typeof(*(ptr)))({ \
  96. sizeof(*(ptr)) == 1 ? *(ptr) : \
  97. (sizeof(*(ptr)) == 2 ? get_unaligned_le16((ptr)) : \
  98. (sizeof(*(ptr)) == 4 ? get_unaligned_le32((ptr)) : \
  99. (sizeof(*(ptr)) == 8 ? get_unaligned_le64((ptr)) : \
  100. __bad_unaligned_access_size()))); \
  101. })
  102. #define __get_unaligned_be(ptr) (__force typeof(*(ptr)))({ \
  103. sizeof(*(ptr)) == 1 ? *(ptr) : \
  104. (sizeof(*(ptr)) == 2 ? get_unaligned_be16((ptr)) : \
  105. (sizeof(*(ptr)) == 4 ? get_unaligned_be32((ptr)) : \
  106. (sizeof(*(ptr)) == 8 ? get_unaligned_be64((ptr)) : \
  107. __bad_unaligned_access_size()))); \
  108. })
  109. #define __put_unaligned_le(val, ptr) ({ \
  110. void *__gu_p = (ptr); \
  111. switch (sizeof(*(ptr))) { \
  112. case 1: \
  113. *(u8 *)__gu_p = (__force u8)(val); \
  114. break; \
  115. case 2: \
  116. put_unaligned_le16((__force u16)(val), __gu_p); \
  117. break; \
  118. case 4: \
  119. put_unaligned_le32((__force u32)(val), __gu_p); \
  120. break; \
  121. case 8: \
  122. put_unaligned_le64((__force u64)(val), __gu_p); \
  123. break; \
  124. default: \
  125. __bad_unaligned_access_size(); \
  126. break; \
  127. } \
  128. (void)0; })
  129. #define __put_unaligned_be(val, ptr) ({ \
  130. void *__gu_p = (ptr); \
  131. switch (sizeof(*(ptr))) { \
  132. case 1: \
  133. *(u8 *)__gu_p = (__force u8)(val); \
  134. break; \
  135. case 2: \
  136. put_unaligned_be16((__force u16)(val), __gu_p); \
  137. break; \
  138. case 4: \
  139. put_unaligned_be32((__force u32)(val), __gu_p); \
  140. break; \
  141. case 8: \
  142. put_unaligned_be64((__force u64)(val), __gu_p); \
  143. break; \
  144. default: \
  145. __bad_unaligned_access_size(); \
  146. break; \
  147. } \
  148. (void)0; })
  149. #endif /* _ASM_C6X_UNALIGNED_H */