coalesced_mmio.c 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * KVM coalesced MMIO
  4. *
  5. * Copyright (c) 2008 Bull S.A.S.
  6. * Copyright 2009 Red Hat, Inc. and/or its affiliates.
  7. *
  8. * Author: Laurent Vivier <Laurent.Vivier@bull.net>
  9. *
  10. */
  11. #include <kvm/iodev.h>
  12. #include <linux/kvm_host.h>
  13. #include <linux/slab.h>
  14. #include <linux/kvm.h>
  15. #include "coalesced_mmio.h"
  16. static inline struct kvm_coalesced_mmio_dev *to_mmio(struct kvm_io_device *dev)
  17. {
  18. return container_of(dev, struct kvm_coalesced_mmio_dev, dev);
  19. }
  20. static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
  21. gpa_t addr, int len)
  22. {
  23. /* is it in a batchable area ?
  24. * (addr,len) is fully included in
  25. * (zone->addr, zone->size)
  26. */
  27. if (len < 0)
  28. return 0;
  29. if (addr + len < addr)
  30. return 0;
  31. if (addr < dev->zone.addr)
  32. return 0;
  33. if (addr + len > dev->zone.addr + dev->zone.size)
  34. return 0;
  35. return 1;
  36. }
  37. static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev, u32 last)
  38. {
  39. struct kvm_coalesced_mmio_ring *ring;
  40. unsigned avail;
  41. /* Are we able to batch it ? */
  42. /* last is the first free entry
  43. * check if we don't meet the first used entry
  44. * there is always one unused entry in the buffer
  45. */
  46. ring = dev->kvm->coalesced_mmio_ring;
  47. avail = (ring->first - last - 1) % KVM_COALESCED_MMIO_MAX;
  48. if (avail == 0) {
  49. /* full */
  50. return 0;
  51. }
  52. return 1;
  53. }
  54. static int coalesced_mmio_write(struct kvm_vcpu *vcpu,
  55. struct kvm_io_device *this, gpa_t addr,
  56. int len, const void *val)
  57. {
  58. struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
  59. struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
  60. __u32 insert;
  61. if (!coalesced_mmio_in_range(dev, addr, len))
  62. return -EOPNOTSUPP;
  63. spin_lock(&dev->kvm->ring_lock);
  64. insert = READ_ONCE(ring->last);
  65. if (!coalesced_mmio_has_room(dev, insert) ||
  66. insert >= KVM_COALESCED_MMIO_MAX) {
  67. spin_unlock(&dev->kvm->ring_lock);
  68. return -EOPNOTSUPP;
  69. }
  70. /* copy data in first free entry of the ring */
  71. ring->coalesced_mmio[insert].phys_addr = addr;
  72. ring->coalesced_mmio[insert].len = len;
  73. memcpy(ring->coalesced_mmio[insert].data, val, len);
  74. smp_wmb();
  75. ring->last = (insert + 1) % KVM_COALESCED_MMIO_MAX;
  76. spin_unlock(&dev->kvm->ring_lock);
  77. return 0;
  78. }
  79. static void coalesced_mmio_destructor(struct kvm_io_device *this)
  80. {
  81. struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
  82. list_del(&dev->list);
  83. kfree(dev);
  84. }
  85. static const struct kvm_io_device_ops coalesced_mmio_ops = {
  86. .write = coalesced_mmio_write,
  87. .destructor = coalesced_mmio_destructor,
  88. };
  89. int kvm_coalesced_mmio_init(struct kvm *kvm)
  90. {
  91. struct page *page;
  92. int ret;
  93. ret = -ENOMEM;
  94. page = alloc_page(GFP_KERNEL | __GFP_ZERO);
  95. if (!page)
  96. goto out_err;
  97. ret = 0;
  98. kvm->coalesced_mmio_ring = page_address(page);
  99. /*
  100. * We're using this spinlock to sync access to the coalesced ring.
  101. * The list doesn't need it's own lock since device registration and
  102. * unregistration should only happen when kvm->slots_lock is held.
  103. */
  104. spin_lock_init(&kvm->ring_lock);
  105. INIT_LIST_HEAD(&kvm->coalesced_zones);
  106. out_err:
  107. return ret;
  108. }
  109. void kvm_coalesced_mmio_free(struct kvm *kvm)
  110. {
  111. if (kvm->coalesced_mmio_ring)
  112. free_page((unsigned long)kvm->coalesced_mmio_ring);
  113. }
  114. int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
  115. struct kvm_coalesced_mmio_zone *zone)
  116. {
  117. int ret;
  118. struct kvm_coalesced_mmio_dev *dev;
  119. dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL);
  120. if (!dev)
  121. return -ENOMEM;
  122. kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
  123. dev->kvm = kvm;
  124. dev->zone = *zone;
  125. mutex_lock(&kvm->slots_lock);
  126. ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, zone->addr,
  127. zone->size, &dev->dev);
  128. if (ret < 0)
  129. goto out_free_dev;
  130. list_add_tail(&dev->list, &kvm->coalesced_zones);
  131. mutex_unlock(&kvm->slots_lock);
  132. return 0;
  133. out_free_dev:
  134. mutex_unlock(&kvm->slots_lock);
  135. kfree(dev);
  136. return ret;
  137. }
  138. int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
  139. struct kvm_coalesced_mmio_zone *zone)
  140. {
  141. struct kvm_coalesced_mmio_dev *dev, *tmp;
  142. mutex_lock(&kvm->slots_lock);
  143. list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list)
  144. if (coalesced_mmio_in_range(dev, zone->addr, zone->size)) {
  145. kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &dev->dev);
  146. kvm_iodevice_destructor(&dev->dev);
  147. }
  148. mutex_unlock(&kvm->slots_lock);
  149. return 0;
  150. }