3 Commits c057116da8 ... 51cf3e5e7d

Author SHA1 Message Date
  Agustina Arzille 51cf3e5e7d Improve flag managing in pmaps 4 months ago
  Agustina Arzille 3972355d76 Improve checking of userspace iovecs 4 months ago
  Agustina Arzille 2aa334c918 Fix receiving of alerts when allocating 4 months ago
6 changed files with 35 additions and 24 deletions
  1. 7 3
      kern/capability.c
  2. 1 0
      kern/capability.h
  3. 1 2
      kern/spinlock.h
  4. 20 13
      kern/user.c
  5. 1 0
      test/test_cap.c
  6. 5 6
      vm/map.c

+ 7 - 3
kern/capability.c

@@ -603,6 +603,13 @@ int
         cap_alert_type(alert) = CAP_ALERT_USER;
         cap_fill_ids (&alert->thread_id, &alert->task_id, thread_self ());
         alert->tag = tag;
+
+        /*
+         * Allocating an alert temporarily drops the flow lock. Since a
+         * receiver could have been added in the meantime, we need to
+         * check again before returning.
+         */
+        cap_recv_wakeup_fast (flow);
         return (0);
       }
 
@@ -660,9 +667,6 @@ cap_iters_copy (struct cap_iters *dst, const struct cap_iters *src)
   d->type.cur = s->type.cur;   \
   d->type.end = s->type.end
 
-  for (uint32_t i = src->iov.cache_idx; i < IPC_IOV_ITER_CACHE_SIZE; ++i)
-    dst->iov.cache[i] = src->iov.cache[i];
-
   dst->iov.cache_idx = src->iov.cache_idx;
   dst->iov.head = src->iov.head;
 

+ 1 - 0
kern/capability.h

@@ -252,6 +252,7 @@ int cap_send_alert (struct cap_base *cap, const void *buf,
 #define cap_send_alert(cap, buf, flags, prio)   \
   (cap_send_alert) (CAP (cap), buf, flags, prio)
 
+// Add and remove a port to/from a flow.
 int cap_flow_add_port (struct cap_flow *flow, void *stack, size_t size,
                        struct ipc_msg *msg, struct ipc_msg_data *mdata,
                        struct cap_thread_info *info);

+ 1 - 2
kern/spinlock.h

@@ -101,8 +101,7 @@ spinlock_unlock_common (struct spinlock *lock)
 static inline bool
 spinlock_locked (const struct spinlock *lock)
 {
-  uint32_t value = atomic_load_rlx (&lock->value);
-  return (value != SPINLOCK_UNLOCKED);
+  return (atomic_load_rlx (&lock->value) != SPINLOCK_UNLOCKED);
 }
 
 #ifdef SPINLOCK_TRACK_OWNER

+ 20 - 13
kern/user.c

@@ -55,24 +55,28 @@ user_iov_next (struct ipc_iov_iter *it, int check, int *errp)
     {
       if (it->head.iov_len)
         return (&it->head);
-      else if (it->cur < it->end)
+      else if (it->cur >= it->end)
+        return (NULL);
+
+      _Auto iov = it->begin + it->cur;
+      if (check && !user_check_range (iov->iov_base, iov->iov_len))
         {
-          _Auto iov = it->begin + it->cur;
-          if (check && (!user_check_range (iov, sizeof (*iov)) ||
-                        !user_check_range (iov->iov_base, iov->iov_len)))
-            {
-              *errp = -EFAULT;
-              return (NULL);
-            }
-
-          it->head = *iov;
-          ++it->cur;
+          *errp = -EFAULT;
+          return (NULL);
         }
-      else
-        return (NULL);
+
+      it->head = *iov;
+      ++it->cur;
     }
 }
 
+static bool
+user_check_iov_iter (struct ipc_iov_iter *iov)
+{
+  return (iov->begin == &iov->head ||
+          user_check_range (iov->begin, iov->end * sizeof (*iov->begin)));
+}
+
 ssize_t
 user_copyv_impl (struct ipc_iov_iter *dst,
                  struct ipc_iov_iter *src, int to_user)
@@ -81,6 +85,9 @@ user_copyv_impl (struct ipc_iov_iter *dst,
   int error = unw_fixup_save (&fixup);
   if (unlikely (error))
     return (-error);
+  else if ((to_user && !user_check_iov_iter (dst)) ||
+           (!to_user && !user_check_iov_iter (src)))
+    return (-EFAULT);
 
   for (ssize_t ret = 0 ; ; )
     {

+ 1 - 0
test/test_cap.c

@@ -201,6 +201,7 @@ test_cap_receiver (void *arg)
   vm_page_unref (page);
   cap_base_rel (data->ch);
 
+  // Test that we receive an alert on a channel closed.
   error = cap_recv_alert (flow, &vars->alert, 0, &vars->mdata);
   assert (! error);
   assert (vars->mdata.task_id == 0);

+ 5 - 6
vm/map.c

@@ -551,6 +551,8 @@ vm_map_clip_end (struct vm_map *map, struct vm_map_entry *entry,
   vm_map_link (map, new_entry, next);
 }
 
+#define VM_MAP_PMAP_FLAGS   (PMAP_PEF_GLOBAL | PMAP_IGNORE_ERRORS)
+
 static int
 vm_map_remove_impl (struct vm_map *map, uintptr_t start,
                     uintptr_t end, struct list *list, bool clear)
@@ -603,8 +605,7 @@ vm_map_remove_impl (struct vm_map *map, uintptr_t start,
   if (clear)
     { // Don't prevent lookups and page faults from here on.
       sxlock_share (&map->lock);
-      pmap_remove_range (map->pmap, start, end,
-                         PMAP_PEF_GLOBAL | PMAP_IGNORE_ERRORS);
+      pmap_remove_range (map->pmap, start, end, VM_MAP_PMAP_FLAGS);
       pmap_update (map->pmap);
     }
 
@@ -647,8 +648,6 @@ vm_map_try_merge_entries (struct vm_map *map, struct vm_map_entry *prev,
   list_insert_tail (dead, &prev->list_node);
 }
 
-#define VM_MAP_PROT_PFLAGS   (PMAP_PEF_GLOBAL | PMAP_IGNORE_ERRORS)
-
 static int
 vm_map_protect_entry (struct vm_map *map, struct vm_map_entry *entry,
                       uintptr_t start, uintptr_t end,
@@ -682,9 +681,9 @@ vm_map_protect_entry (struct vm_map *map, struct vm_map_entry *entry,
 
   if (prot == VM_PROT_NONE &&
       (VM_MAP_PROT (entry->flags) & VM_PROT_WRITE) == 0)
-    pmap_remove_range (map->pmap, start, end, VM_MAP_PROT_PFLAGS);
+    pmap_remove_range (map->pmap, start, end, VM_MAP_PMAP_FLAGS);
   else
-    pmap_protect_range (map->pmap, start, end, prot, VM_MAP_PROT_PFLAGS);
+    pmap_protect_range (map->pmap, start, end, prot, VM_MAP_PMAP_FLAGS);
 
   return (0);
 }