erase_memory.rb 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215
  1. def udev_watchdog_monitored_device
  2. ps_output = $vm.execute_successfully('ps -wweo cmd').stdout
  3. udev_watchdog_cmd = '/usr/local/sbin/udev-watchdog'
  4. # The regex below looks for a line like the following:
  5. # /usr/local/sbin/udev-watchdog /devices/pci0000:00/0000:00:01.1/ata2/host1/target1:0:0/1:0:0:0/block/sr0 cd
  6. # We're only interested in the device itself, not in the type
  7. ps_output_scan = ps_output.scan(/^#{Regexp.escape(udev_watchdog_cmd)}\s(\S+)\s(?:cd|disk)$/)
  8. assert_equal(ps_output_scan.count, 1, "There should be one udev-watchdog running.")
  9. monitored_out = ps_output_scan.flatten[0]
  10. assert(!monitored_out.nil?)
  11. monitored_device_id = $vm.file_content('/sys' + monitored_out + '/dev').chomp
  12. monitored_device =
  13. $vm.execute_successfully(
  14. "readlink -f /dev/block/'#{monitored_device_id}'").stdout.chomp
  15. return monitored_device
  16. end
  17. Given /^udev-watchdog is monitoring the correct device$/ do
  18. assert_equal(udev_watchdog_monitored_device, boot_device)
  19. end
  20. Given /^the computer is a modern 64-bit system$/ do
  21. $vm.set_arch("x86_64")
  22. $vm.drop_hypervisor_feature("nonpae")
  23. $vm.add_hypervisor_feature("pae")
  24. end
  25. Given /^the computer is an old pentium without the PAE extension$/ do
  26. $vm.set_arch("i686")
  27. $vm.drop_hypervisor_feature("pae")
  28. # libvirt claim the following feature doesn't exit even though
  29. # it's listed in the hvm i686 capabilities...
  30. # $vm.add_hypervisor_feature("nonpae")
  31. # ... so we use a workaround until we can figure this one out.
  32. $vm.disable_pae_workaround
  33. end
  34. def which_kernel
  35. kernel_path = $vm.execute_successfully("tails-get-bootinfo kernel").stdout.chomp
  36. return File.basename(kernel_path)
  37. end
  38. Given /^the PAE kernel is running$/ do
  39. kernel = which_kernel
  40. assert_equal("vmlinuz2", kernel)
  41. end
  42. Given /^the non-PAE kernel is running$/ do
  43. kernel = which_kernel
  44. assert_equal("vmlinuz", kernel)
  45. end
  46. def used_ram_in_MiB
  47. return $vm.execute_successfully("free -m | awk '/^Mem:/ { print $3 }'").stdout.chomp.to_i
  48. end
  49. def detected_ram_in_MiB
  50. return $vm.execute_successfully("free -m | awk '/^Mem:/ { print $2 }'").stdout.chomp.to_i
  51. end
  52. Given /^at least (\d+) ([[:alpha:]]+) of RAM was detected$/ do |min_ram, unit|
  53. @detected_ram_m = detected_ram_in_MiB
  54. puts "Detected #{@detected_ram_m} MiB of RAM"
  55. min_ram_m = convert_to_MiB(min_ram.to_i, unit)
  56. # All RAM will not be reported by `free`, so we allow a 196 MB gap
  57. gap = convert_to_MiB(256, "MiB")
  58. assert(@detected_ram_m + gap >= min_ram_m, "Didn't detect enough RAM")
  59. end
  60. def pattern_coverage_in_guest_ram
  61. assert_not_nil(
  62. @free_mem_before_fill_b,
  63. "@free_mem_before_fill_b is not set, probably the required 'I fill the " +
  64. "guest's memory ...' step was not run")
  65. free_mem_before_fill_m = convert_to_MiB(@free_mem_before_fill_b, 'b')
  66. dump = "#{$config["TMPDIR"]}/memdump"
  67. # Workaround: when dumping the guest's memory via core_dump(), libvirt
  68. # will create files that only root can read. We therefore pre-create
  69. # them with more permissible permissions, which libvirt will preserve
  70. # (although it will change ownership) so that the user running the
  71. # script can grep the dump for the fillram pattern, and delete it.
  72. if File.exist?(dump)
  73. File.delete(dump)
  74. end
  75. FileUtils.touch(dump)
  76. FileUtils.chmod(0666, dump)
  77. $vm.domain.core_dump(dump)
  78. patterns = IO.popen(['grep', '--text', '-c', 'wipe_didnt_work', dump]).gets.to_i
  79. File.delete dump
  80. # Pattern is 16 bytes long
  81. patterns_b = patterns*16
  82. patterns_m = convert_to_MiB(patterns_b, 'b')
  83. coverage = patterns_b.to_f/@free_mem_before_fill_b
  84. puts "Pattern coverage: #{"%.3f" % (coverage*100)}% (#{patterns_m} MiB " +
  85. "out of #{free_mem_before_fill_m} MiB initial free memory)"
  86. return coverage
  87. end
  88. Given /^I fill the guest's memory with a known pattern(| without verifying)$/ do |dont_verify|
  89. verify = dont_verify.empty?
  90. # Free some more memory by dropping the caches etc.
  91. $vm.execute_successfully("echo 3 > /proc/sys/vm/drop_caches")
  92. # The (guest) kernel may freeze when approaching full memory without
  93. # adjusting the OOM killer and memory overcommitment limitations.
  94. kernel_mem_reserved_k = 64*1024 # Duplicated in /usr/share/initramfs-tools/scripts/init-premount/memory_wipe
  95. kernel_mem_reserved_m = convert_to_MiB(kernel_mem_reserved_k, 'k')
  96. admin_mem_reserved_k = 128*1024 # Duplicated in /usr/share/initramfs-tools/scripts/init-premount/memory_wipe
  97. admin_mem_reserved_m = convert_to_MiB(admin_mem_reserved_k, 'k')
  98. kernel_mem_settings = [
  99. # Let's avoid killing other random processes, and instead focus on
  100. # the hoggers, which will be our fillram instances.
  101. ["vm.oom_kill_allocating_task", 0],
  102. # Let's not print stuff to the terminal.
  103. ["vm.oom_dump_tasks", 0],
  104. # From tests the 'guess' heuristic seems to allow us to safely
  105. # (i.e. no kernel freezes) fill the maximum amount of RAM.
  106. ["vm.overcommit_memory", 0],
  107. # Make sure the kernel doesn't starve...
  108. ["vm.min_free_kbytes", kernel_mem_reserved_k],
  109. # ... and also some core privileged processes, e.g. the remote
  110. # shell.
  111. ["vm.admin_reserve_kbytes", admin_mem_reserved_k],
  112. ]
  113. kernel_mem_settings.each do |key, val|
  114. $vm.execute_successfully("sysctl #{key}=#{val}")
  115. end
  116. # We exclude the memory we reserve for the kernel and admin
  117. # processes above from the free memory since fillram will be run by
  118. # an unprivileged user in user-space.
  119. used_mem_before_fill_m = used_ram_in_MiB
  120. free_mem_before_fill_m = @detected_ram_m - used_mem_before_fill_m -
  121. kernel_mem_reserved_m - admin_mem_reserved_m
  122. @free_mem_before_fill_b = convert_to_bytes(free_mem_before_fill_m, 'MiB')
  123. # To be sure that we fill all memory we run one fillram instance for
  124. # each GiB of detected memory, rounded up. To maintain stability we
  125. # prioritize the fillram instances to be OOM killed. We also kill
  126. # all instances after the first one has finished, i.e. when the
  127. # memory is full, since the others otherwise may continue re-filling
  128. # the same memory unnecessarily. Note that we leave the `killall`
  129. # call outside of the OOM adjusted shell so it will not be OOM
  130. # killed too.
  131. nr_instances = (@detected_ram_m.to_f/(2**10)).ceil
  132. nr_instances.times do
  133. oom_adjusted_fillram_cmd =
  134. "echo 1000 > /proc/$$/oom_score_adj && exec /usr/local/sbin/fillram"
  135. $vm.spawn("sh -c '#{oom_adjusted_fillram_cmd}'; killall fillram",
  136. :user => LIVE_USER)
  137. end
  138. # We make sure that all fillram processes have started...
  139. try_for(10, :msg => "all fillram processes didn't start", :delay => 0.1) do
  140. nr_fillram_procs = $vm.pidof("fillram").size
  141. nr_instances == nr_fillram_procs
  142. end
  143. prev_used_ram_ratio = -1
  144. # ... and that it finishes
  145. try_for(nr_instances*2*60, { :msg => "fillram didn't complete, probably the VM crashed" }) do
  146. used_ram_ratio = (used_ram_in_MiB.to_f/@detected_ram_m)*100
  147. # Round down to closest multiple of 10 to limit the logging a bit.
  148. used_ram_ratio = (used_ram_ratio/10).round*10
  149. if used_ram_ratio - prev_used_ram_ratio >= 10
  150. debug_log("Memory fill progress: %3d%%" % used_ram_ratio)
  151. prev_used_ram_ratio = used_ram_ratio
  152. end
  153. ! $vm.has_process?("fillram")
  154. end
  155. debug_log("Memory fill progress: finished")
  156. if verify
  157. coverage = pattern_coverage_in_guest_ram()
  158. min_coverage = 0.90
  159. assert(coverage > min_coverage,
  160. "#{"%.3f" % (coverage*100)}% of the free memory was filled with " +
  161. "the pattern, but more than #{"%.3f" % (min_coverage*100)}% was " +
  162. "expected")
  163. end
  164. end
  165. Then /^I find very few patterns in the guest's memory$/ do
  166. coverage = pattern_coverage_in_guest_ram()
  167. max_coverage = 0.008
  168. assert(coverage < max_coverage,
  169. "#{"%.3f" % (coverage*100)}% of the free memory still has the " +
  170. "pattern, but less than #{"%.3f" % (max_coverage*100)}% was expected")
  171. end
  172. Then /^I find many patterns in the guest's memory$/ do
  173. coverage = pattern_coverage_in_guest_ram()
  174. min_coverage = 0.9
  175. assert(coverage > min_coverage,
  176. "#{"%.3f" % (coverage*100)}% of the free memory still has the " +
  177. "pattern, but more than #{"%.3f" % (min_coverage*100)}% was expected")
  178. end
  179. When /^I reboot without wiping the memory$/ do
  180. $vm.reset
  181. end
  182. When /^I stop the boot at the bootloader menu$/ do
  183. step "Tails is at the boot menu's cmdline"
  184. end
  185. When /^I shutdown and wait for Tails to finish wiping the memory$/ do
  186. $vm.spawn("halt")
  187. try_for(memory_wipe_timeout, { :msg => "memory wipe didn't finish, probably the VM crashed" }) do
  188. # We spam keypresses to prevent console blanking from hiding the
  189. # image we're waiting for
  190. @screen.type(" ")
  191. @screen.find('MemoryWipeCompleted.png')
  192. end
  193. end