diff.go 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265
  1. // Copied from the Go stdlib, with modifications.
  2. //https://github.com/golang/go/raw/master/src/internal/diff/diff.go
  3. // Copyright 2022 The Go Authors. All rights reserved.
  4. // Use of this source code is governed by a BSD-style
  5. // license that can be found in the LICENSE file.
  6. package diff
  7. import (
  8. "bytes"
  9. "fmt"
  10. "sort"
  11. "strings"
  12. )
  13. // A pair is a pair of values tracked for both the x and y side of a diff.
  14. // It is typically a pair of line indexes.
  15. type pair struct{ x, y int }
  16. // Diff returns an anchored diff of the two texts old and new
  17. // in the “unified diff” format. If old and new are identical,
  18. // Diff returns a nil slice (no output).
  19. //
  20. // Unix diff implementations typically look for a diff with
  21. // the smallest number of lines inserted and removed,
  22. // which can in the worst case take time quadratic in the
  23. // number of lines in the texts. As a result, many implementations
  24. // either can be made to run for a long time or cut off the search
  25. // after a predetermined amount of work.
  26. //
  27. // In contrast, this implementation looks for a diff with the
  28. // smallest number of “unique” lines inserted and removed,
  29. // where unique means a line that appears just once in both old and new.
  30. // We call this an “anchored diff” because the unique lines anchor
  31. // the chosen matching regions. An anchored diff is usually clearer
  32. // than a standard diff, because the algorithm does not try to
  33. // reuse unrelated blank lines or closing braces.
  34. // The algorithm also guarantees to run in O(n log n) time
  35. // instead of the standard O(n²) time.
  36. //
  37. // Some systems call this approach a “patience diff,” named for
  38. // the “patience sorting” algorithm, itself named for a solitaire card game.
  39. // We avoid that name for two reasons. First, the name has been used
  40. // for a few different variants of the algorithm, so it is imprecise.
  41. // Second, the name is frequently interpreted as meaning that you have
  42. // to wait longer (to be patient) for the diff, meaning that it is a slower algorithm,
  43. // when in fact the algorithm is faster than the standard one.
  44. func Diff(oldName, old, newName, new string, num_of_context_lines int) []byte {
  45. if old == new {
  46. return nil
  47. }
  48. x := lines(old)
  49. y := lines(new)
  50. // Print diff header.
  51. var out bytes.Buffer
  52. fmt.Fprintf(&out, "diff %s %s\n", oldName, newName)
  53. fmt.Fprintf(&out, "--- %s\n", oldName)
  54. fmt.Fprintf(&out, "+++ %s\n", newName)
  55. // Loop over matches to consider,
  56. // expanding each match to include surrounding lines,
  57. // and then printing diff chunks.
  58. // To avoid setup/teardown cases outside the loop,
  59. // tgs returns a leading {0,0} and trailing {len(x), len(y)} pair
  60. // in the sequence of matches.
  61. var (
  62. done pair // printed up to x[:done.x] and y[:done.y]
  63. chunk pair // start lines of current chunk
  64. count pair // number of lines from each side in current chunk
  65. ctext []string // lines for current chunk
  66. )
  67. for _, m := range tgs(x, y) {
  68. if m.x < done.x {
  69. // Already handled scanning forward from earlier match.
  70. continue
  71. }
  72. // Expand matching lines as far possible,
  73. // establishing that x[start.x:end.x] == y[start.y:end.y].
  74. // Note that on the first (or last) iteration we may (or definitey do)
  75. // have an empty match: start.x==end.x and start.y==end.y.
  76. start := m
  77. for start.x > done.x && start.y > done.y && x[start.x-1] == y[start.y-1] {
  78. start.x--
  79. start.y--
  80. }
  81. end := m
  82. for end.x < len(x) && end.y < len(y) && x[end.x] == y[end.y] {
  83. end.x++
  84. end.y++
  85. }
  86. // Emit the mismatched lines before start into this chunk.
  87. // (No effect on first sentinel iteration, when start = {0,0}.)
  88. for _, s := range x[done.x:start.x] {
  89. ctext = append(ctext, "-"+s)
  90. count.x++
  91. }
  92. for _, s := range y[done.y:start.y] {
  93. ctext = append(ctext, "+"+s)
  94. count.y++
  95. }
  96. // If we're not at EOF and have too few common lines,
  97. // the chunk includes all the common lines and continues.
  98. C := num_of_context_lines // number of context lines
  99. if (end.x < len(x) || end.y < len(y)) &&
  100. (end.x-start.x < C || (len(ctext) > 0 && end.x-start.x < 2*C)) {
  101. for _, s := range x[start.x:end.x] {
  102. ctext = append(ctext, " "+s)
  103. count.x++
  104. count.y++
  105. }
  106. done = end
  107. continue
  108. }
  109. // End chunk with common lines for context.
  110. if len(ctext) > 0 {
  111. n := end.x - start.x
  112. if n > C {
  113. n = C
  114. }
  115. for _, s := range x[start.x : start.x+n] {
  116. ctext = append(ctext, " "+s)
  117. count.x++
  118. count.y++
  119. }
  120. done = pair{start.x + n, start.y + n}
  121. // Format and emit chunk.
  122. // Convert line numbers to 1-indexed.
  123. // Special case: empty file shows up as 0,0 not 1,0.
  124. if count.x > 0 {
  125. chunk.x++
  126. }
  127. if count.y > 0 {
  128. chunk.y++
  129. }
  130. fmt.Fprintf(&out, "@@ -%d,%d +%d,%d @@\n", chunk.x, count.x, chunk.y, count.y)
  131. for _, s := range ctext {
  132. out.WriteString(s)
  133. }
  134. count.x = 0
  135. count.y = 0
  136. ctext = ctext[:0]
  137. }
  138. // If we reached EOF, we're done.
  139. if end.x >= len(x) && end.y >= len(y) {
  140. break
  141. }
  142. // Otherwise start a new chunk.
  143. chunk = pair{end.x - C, end.y - C}
  144. for _, s := range x[chunk.x:end.x] {
  145. ctext = append(ctext, " "+s)
  146. count.x++
  147. count.y++
  148. }
  149. done = end
  150. }
  151. return out.Bytes()
  152. }
  153. // lines returns the lines in the file x, including newlines.
  154. // If the file does not end in a newline, one is supplied
  155. // along with a warning about the missing newline.
  156. func lines(x string) []string {
  157. l := strings.SplitAfter(x, "\n")
  158. if l[len(l)-1] == "" {
  159. l = l[:len(l)-1]
  160. } else {
  161. // Treat last line as having a message about the missing newline attached,
  162. // using the same text as BSD/GNU diff (including the leading backslash).
  163. l[len(l)-1] += "\n\\ No newline at end of file\n"
  164. }
  165. return l
  166. }
  167. // tgs returns the pairs of indexes of the longest common subsequence
  168. // of unique lines in x and y, where a unique line is one that appears
  169. // once in x and once in y.
  170. //
  171. // The longest common subsequence algorithm is as described in
  172. // Thomas G. Szymanski, “A Special Case of the Maximal Common
  173. // Subsequence Problem,” Princeton TR #170 (January 1975),
  174. // available at https://research.swtch.com/tgs170.pdf.
  175. func tgs(x, y []string) []pair {
  176. // Count the number of times each string appears in a and b.
  177. // We only care about 0, 1, many, counted as 0, -1, -2
  178. // for the x side and 0, -4, -8 for the y side.
  179. // Using negative numbers now lets us distinguish positive line numbers later.
  180. m := make(map[string]int)
  181. for _, s := range x {
  182. if c := m[s]; c > -2 {
  183. m[s] = c - 1
  184. }
  185. }
  186. for _, s := range y {
  187. if c := m[s]; c > -8 {
  188. m[s] = c - 4
  189. }
  190. }
  191. // Now unique strings can be identified by m[s] = -1+-4.
  192. //
  193. // Gather the indexes of those strings in x and y, building:
  194. // xi[i] = increasing indexes of unique strings in x.
  195. // yi[i] = increasing indexes of unique strings in y.
  196. // inv[i] = index j such that x[xi[i]] = y[yi[j]].
  197. var xi, yi, inv []int
  198. for i, s := range y {
  199. if m[s] == -1+-4 {
  200. m[s] = len(yi)
  201. yi = append(yi, i)
  202. }
  203. }
  204. for i, s := range x {
  205. if j, ok := m[s]; ok && j >= 0 {
  206. xi = append(xi, i)
  207. inv = append(inv, j)
  208. }
  209. }
  210. // Apply Algorithm A from Szymanski's paper.
  211. // In those terms, A = J = inv and B = [0, n).
  212. // We add sentinel pairs {0,0}, and {len(x),len(y)}
  213. // to the returned sequence, to help the processing loop.
  214. J := inv
  215. n := len(xi)
  216. T := make([]int, n)
  217. L := make([]int, n)
  218. for i := range T {
  219. T[i] = n + 1
  220. }
  221. for i := 0; i < n; i++ {
  222. k := sort.Search(n, func(k int) bool {
  223. return T[k] >= J[i]
  224. })
  225. T[k] = J[i]
  226. L[i] = k + 1
  227. }
  228. k := 0
  229. for _, v := range L {
  230. if k < v {
  231. k = v
  232. }
  233. }
  234. seq := make([]pair, 2+k)
  235. seq[1+k] = pair{len(x), len(y)} // sentinel at end
  236. lastj := n
  237. for i := n - 1; i >= 0; i-- {
  238. if L[i] == k && J[i] < lastj {
  239. seq[k] = pair{xi[i], yi[J[i]]}
  240. k--
  241. }
  242. }
  243. seq[0] = pair{0, 0} // sentinel at start
  244. return seq
  245. }