12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356 |
- /*
- * VMware VMCI Driver
- *
- * Copyright (C) 2012 VMware, Inc. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation version 2 and no later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- */
- #include <linux/vmw_vmci_defs.h>
- #include <linux/vmw_vmci_api.h>
- #include <linux/highmem.h>
- #include <linux/kernel.h>
- #include <linux/mm.h>
- #include <linux/module.h>
- #include <linux/mutex.h>
- #include <linux/pagemap.h>
- #include <linux/pci.h>
- #include <linux/sched.h>
- #include <linux/slab.h>
- #include <linux/uio.h>
- #include <linux/wait.h>
- #include <linux/vmalloc.h>
- #include <linux/skbuff.h>
- #include "vmci_handle_array.h"
- #include "vmci_queue_pair.h"
- #include "vmci_datagram.h"
- #include "vmci_resource.h"
- #include "vmci_context.h"
- #include "vmci_driver.h"
- #include "vmci_event.h"
- #include "vmci_route.h"
- /*
- * In the following, we will distinguish between two kinds of VMX processes -
- * the ones with versions lower than VMCI_VERSION_NOVMVM that use specialized
- * VMCI page files in the VMX and supporting VM to VM communication and the
- * newer ones that use the guest memory directly. We will in the following
- * refer to the older VMX versions as old-style VMX'en, and the newer ones as
- * new-style VMX'en.
- *
- * The state transition datagram is as follows (the VMCIQPB_ prefix has been
- * removed for readability) - see below for more details on the transtions:
- *
- * -------------- NEW -------------
- * | |
- * \_/ \_/
- * CREATED_NO_MEM <-----------------> CREATED_MEM
- * | | |
- * | o-----------------------o |
- * | | |
- * \_/ \_/ \_/
- * ATTACHED_NO_MEM <----------------> ATTACHED_MEM
- * | | |
- * | o----------------------o |
- * | | |
- * \_/ \_/ \_/
- * SHUTDOWN_NO_MEM <----------------> SHUTDOWN_MEM
- * | |
- * | |
- * -------------> gone <-------------
- *
- * In more detail. When a VMCI queue pair is first created, it will be in the
- * VMCIQPB_NEW state. It will then move into one of the following states:
- *
- * - VMCIQPB_CREATED_NO_MEM: this state indicates that either:
- *
- * - the created was performed by a host endpoint, in which case there is
- * no backing memory yet.
- *
- * - the create was initiated by an old-style VMX, that uses
- * vmci_qp_broker_set_page_store to specify the UVAs of the queue pair at
- * a later point in time. This state can be distinguished from the one
- * above by the context ID of the creator. A host side is not allowed to
- * attach until the page store has been set.
- *
- * - VMCIQPB_CREATED_MEM: this state is the result when the queue pair
- * is created by a VMX using the queue pair device backend that
- * sets the UVAs of the queue pair immediately and stores the
- * information for later attachers. At this point, it is ready for
- * the host side to attach to it.
- *
- * Once the queue pair is in one of the created states (with the exception of
- * the case mentioned for older VMX'en above), it is possible to attach to the
- * queue pair. Again we have two new states possible:
- *
- * - VMCIQPB_ATTACHED_MEM: this state can be reached through the following
- * paths:
- *
- * - from VMCIQPB_CREATED_NO_MEM when a new-style VMX allocates a queue
- * pair, and attaches to a queue pair previously created by the host side.
- *
- * - from VMCIQPB_CREATED_MEM when the host side attaches to a queue pair
- * already created by a guest.
- *
- * - from VMCIQPB_ATTACHED_NO_MEM, when an old-style VMX calls
- * vmci_qp_broker_set_page_store (see below).
- *
- * - VMCIQPB_ATTACHED_NO_MEM: If the queue pair already was in the
- * VMCIQPB_CREATED_NO_MEM due to a host side create, an old-style VMX will
- * bring the queue pair into this state. Once vmci_qp_broker_set_page_store
- * is called to register the user memory, the VMCIQPB_ATTACH_MEM state
- * will be entered.
- *
- * From the attached queue pair, the queue pair can enter the shutdown states
- * when either side of the queue pair detaches. If the guest side detaches
- * first, the queue pair will enter the VMCIQPB_SHUTDOWN_NO_MEM state, where
- * the content of the queue pair will no longer be available. If the host
- * side detaches first, the queue pair will either enter the
- * VMCIQPB_SHUTDOWN_MEM, if the guest memory is currently mapped, or
- * VMCIQPB_SHUTDOWN_NO_MEM, if the guest memory is not mapped
- * (e.g., the host detaches while a guest is stunned).
- *
- * New-style VMX'en will also unmap guest memory, if the guest is
- * quiesced, e.g., during a snapshot operation. In that case, the guest
- * memory will no longer be available, and the queue pair will transition from
- * *_MEM state to a *_NO_MEM state. The VMX may later map the memory once more,
- * in which case the queue pair will transition from the *_NO_MEM state at that
- * point back to the *_MEM state. Note that the *_NO_MEM state may have changed,
- * since the peer may have either attached or detached in the meantime. The
- * values are laid out such that ++ on a state will move from a *_NO_MEM to a
- * *_MEM state, and vice versa.
- */
- /*
- * VMCIMemcpy{To,From}QueueFunc() prototypes. Functions of these
- * types are passed around to enqueue and dequeue routines. Note that
- * often the functions passed are simply wrappers around memcpy
- * itself.
- *
- * Note: In order for the memcpy typedefs to be compatible with the VMKernel,
- * there's an unused last parameter for the hosted side. In
- * ESX, that parameter holds a buffer type.
- */
- typedef int vmci_memcpy_to_queue_func(struct vmci_queue *queue,
- u64 queue_offset, const void *src,
- size_t src_offset, size_t size);
- typedef int vmci_memcpy_from_queue_func(void *dest, size_t dest_offset,
- const struct vmci_queue *queue,
- u64 queue_offset, size_t size);
- /* The Kernel specific component of the struct vmci_queue structure. */
- struct vmci_queue_kern_if {
- struct mutex __mutex; /* Protects the queue. */
- struct mutex *mutex; /* Shared by producer and consumer queues. */
- size_t num_pages; /* Number of pages incl. header. */
- bool host; /* Host or guest? */
- union {
- struct {
- dma_addr_t *pas;
- void **vas;
- } g; /* Used by the guest. */
- struct {
- struct page **page;
- struct page **header_page;
- } h; /* Used by the host. */
- } u;
- };
- /*
- * This structure is opaque to the clients.
- */
- struct vmci_qp {
- struct vmci_handle handle;
- struct vmci_queue *produce_q;
- struct vmci_queue *consume_q;
- u64 produce_q_size;
- u64 consume_q_size;
- u32 peer;
- u32 flags;
- u32 priv_flags;
- bool guest_endpoint;
- unsigned int blocked;
- unsigned int generation;
- wait_queue_head_t event;
- };
- enum qp_broker_state {
- VMCIQPB_NEW,
- VMCIQPB_CREATED_NO_MEM,
- VMCIQPB_CREATED_MEM,
- VMCIQPB_ATTACHED_NO_MEM,
- VMCIQPB_ATTACHED_MEM,
- VMCIQPB_SHUTDOWN_NO_MEM,
- VMCIQPB_SHUTDOWN_MEM,
- VMCIQPB_GONE
- };
- #define QPBROKERSTATE_HAS_MEM(_qpb) (_qpb->state == VMCIQPB_CREATED_MEM || \
- _qpb->state == VMCIQPB_ATTACHED_MEM || \
- _qpb->state == VMCIQPB_SHUTDOWN_MEM)
- /*
- * In the queue pair broker, we always use the guest point of view for
- * the produce and consume queue values and references, e.g., the
- * produce queue size stored is the guests produce queue size. The
- * host endpoint will need to swap these around. The only exception is
- * the local queue pairs on the host, in which case the host endpoint
- * that creates the queue pair will have the right orientation, and
- * the attaching host endpoint will need to swap.
- */
- struct qp_entry {
- struct list_head list_item;
- struct vmci_handle handle;
- u32 peer;
- u32 flags;
- u64 produce_size;
- u64 consume_size;
- u32 ref_count;
- };
- struct qp_broker_entry {
- struct vmci_resource resource;
- struct qp_entry qp;
- u32 create_id;
- u32 attach_id;
- enum qp_broker_state state;
- bool require_trusted_attach;
- bool created_by_trusted;
- bool vmci_page_files; /* Created by VMX using VMCI page files */
- struct vmci_queue *produce_q;
- struct vmci_queue *consume_q;
- struct vmci_queue_header saved_produce_q;
- struct vmci_queue_header saved_consume_q;
- vmci_event_release_cb wakeup_cb;
- void *client_data;
- void *local_mem; /* Kernel memory for local queue pair */
- };
- struct qp_guest_endpoint {
- struct vmci_resource resource;
- struct qp_entry qp;
- u64 num_ppns;
- void *produce_q;
- void *consume_q;
- struct ppn_set ppn_set;
- };
- struct qp_list {
- struct list_head head;
- struct mutex mutex; /* Protect queue list. */
- };
- static struct qp_list qp_broker_list = {
- .head = LIST_HEAD_INIT(qp_broker_list.head),
- .mutex = __MUTEX_INITIALIZER(qp_broker_list.mutex),
- };
- static struct qp_list qp_guest_endpoints = {
- .head = LIST_HEAD_INIT(qp_guest_endpoints.head),
- .mutex = __MUTEX_INITIALIZER(qp_guest_endpoints.mutex),
- };
- #define INVALID_VMCI_GUEST_MEM_ID 0
- #define QPE_NUM_PAGES(_QPE) ((u32) \
- (DIV_ROUND_UP(_QPE.produce_size, PAGE_SIZE) + \
- DIV_ROUND_UP(_QPE.consume_size, PAGE_SIZE) + 2))
- /*
- * Frees kernel VA space for a given queue and its queue header, and
- * frees physical data pages.
- */
- static void qp_free_queue(void *q, u64 size)
- {
- struct vmci_queue *queue = q;
- if (queue) {
- u64 i;
- /* Given size does not include header, so add in a page here. */
- for (i = 0; i < DIV_ROUND_UP(size, PAGE_SIZE) + 1; i++) {
- dma_free_coherent(&vmci_pdev->dev, PAGE_SIZE,
- queue->kernel_if->u.g.vas[i],
- queue->kernel_if->u.g.pas[i]);
- }
- vfree(queue);
- }
- }
- /*
- * Allocates kernel queue pages of specified size with IOMMU mappings,
- * plus space for the queue structure/kernel interface and the queue
- * header.
- */
- static void *qp_alloc_queue(u64 size, u32 flags)
- {
- u64 i;
- struct vmci_queue *queue;
- size_t pas_size;
- size_t vas_size;
- size_t queue_size = sizeof(*queue) + sizeof(*queue->kernel_if);
- const u64 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
- if (num_pages >
- (SIZE_MAX - queue_size) /
- (sizeof(*queue->kernel_if->u.g.pas) +
- sizeof(*queue->kernel_if->u.g.vas)))
- return NULL;
- pas_size = num_pages * sizeof(*queue->kernel_if->u.g.pas);
- vas_size = num_pages * sizeof(*queue->kernel_if->u.g.vas);
- queue_size += pas_size + vas_size;
- queue = vmalloc(queue_size);
- if (!queue)
- return NULL;
- queue->q_header = NULL;
- queue->saved_header = NULL;
- queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1);
- queue->kernel_if->mutex = NULL;
- queue->kernel_if->num_pages = num_pages;
- queue->kernel_if->u.g.pas = (dma_addr_t *)(queue->kernel_if + 1);
- queue->kernel_if->u.g.vas =
- (void **)((u8 *)queue->kernel_if->u.g.pas + pas_size);
- queue->kernel_if->host = false;
- for (i = 0; i < num_pages; i++) {
- queue->kernel_if->u.g.vas[i] =
- dma_alloc_coherent(&vmci_pdev->dev, PAGE_SIZE,
- &queue->kernel_if->u.g.pas[i],
- GFP_KERNEL);
- if (!queue->kernel_if->u.g.vas[i]) {
- /* Size excl. the header. */
- qp_free_queue(queue, i * PAGE_SIZE);
- return NULL;
- }
- }
- /* Queue header is the first page. */
- queue->q_header = queue->kernel_if->u.g.vas[0];
- return queue;
- }
- /*
- * Copies from a given buffer or iovector to a VMCI Queue. Uses
- * kmap()/kunmap() to dynamically map/unmap required portions of the queue
- * by traversing the offset -> page translation structure for the queue.
- * Assumes that offset + size does not wrap around in the queue.
- */
- static int __qp_memcpy_to_queue(struct vmci_queue *queue,
- u64 queue_offset,
- const void *src,
- size_t size,
- bool is_iovec)
- {
- struct vmci_queue_kern_if *kernel_if = queue->kernel_if;
- size_t bytes_copied = 0;
- while (bytes_copied < size) {
- const u64 page_index =
- (queue_offset + bytes_copied) / PAGE_SIZE;
- const size_t page_offset =
- (queue_offset + bytes_copied) & (PAGE_SIZE - 1);
- void *va;
- size_t to_copy;
- if (kernel_if->host)
- va = kmap(kernel_if->u.h.page[page_index]);
- else
- va = kernel_if->u.g.vas[page_index + 1];
- /* Skip header. */
- if (size - bytes_copied > PAGE_SIZE - page_offset)
- /* Enough payload to fill up from this page. */
- to_copy = PAGE_SIZE - page_offset;
- else
- to_copy = size - bytes_copied;
- if (is_iovec) {
- struct msghdr *msg = (struct msghdr *)src;
- int err;
- /* The iovec will track bytes_copied internally. */
- err = memcpy_from_msg((u8 *)va + page_offset,
- msg, to_copy);
- if (err != 0) {
- if (kernel_if->host)
- kunmap(kernel_if->u.h.page[page_index]);
- return VMCI_ERROR_INVALID_ARGS;
- }
- } else {
- memcpy((u8 *)va + page_offset,
- (u8 *)src + bytes_copied, to_copy);
- }
- bytes_copied += to_copy;
- if (kernel_if->host)
- kunmap(kernel_if->u.h.page[page_index]);
- }
- return VMCI_SUCCESS;
- }
- /*
- * Copies to a given buffer or iovector from a VMCI Queue. Uses
- * kmap()/kunmap() to dynamically map/unmap required portions of the queue
- * by traversing the offset -> page translation structure for the queue.
- * Assumes that offset + size does not wrap around in the queue.
- */
- static int __qp_memcpy_from_queue(void *dest,
- const struct vmci_queue *queue,
- u64 queue_offset,
- size_t size,
- bool is_iovec)
- {
- struct vmci_queue_kern_if *kernel_if = queue->kernel_if;
- size_t bytes_copied = 0;
- while (bytes_copied < size) {
- const u64 page_index =
- (queue_offset + bytes_copied) / PAGE_SIZE;
- const size_t page_offset =
- (queue_offset + bytes_copied) & (PAGE_SIZE - 1);
- void *va;
- size_t to_copy;
- if (kernel_if->host)
- va = kmap(kernel_if->u.h.page[page_index]);
- else
- va = kernel_if->u.g.vas[page_index + 1];
- /* Skip header. */
- if (size - bytes_copied > PAGE_SIZE - page_offset)
- /* Enough payload to fill up this page. */
- to_copy = PAGE_SIZE - page_offset;
- else
- to_copy = size - bytes_copied;
- if (is_iovec) {
- struct msghdr *msg = dest;
- int err;
- /* The iovec will track bytes_copied internally. */
- err = memcpy_to_msg(msg, (u8 *)va + page_offset,
- to_copy);
- if (err != 0) {
- if (kernel_if->host)
- kunmap(kernel_if->u.h.page[page_index]);
- return VMCI_ERROR_INVALID_ARGS;
- }
- } else {
- memcpy((u8 *)dest + bytes_copied,
- (u8 *)va + page_offset, to_copy);
- }
- bytes_copied += to_copy;
- if (kernel_if->host)
- kunmap(kernel_if->u.h.page[page_index]);
- }
- return VMCI_SUCCESS;
- }
- /*
- * Allocates two list of PPNs --- one for the pages in the produce queue,
- * and the other for the pages in the consume queue. Intializes the list
- * of PPNs with the page frame numbers of the KVA for the two queues (and
- * the queue headers).
- */
- static int qp_alloc_ppn_set(void *prod_q,
- u64 num_produce_pages,
- void *cons_q,
- u64 num_consume_pages, struct ppn_set *ppn_set)
- {
- u32 *produce_ppns;
- u32 *consume_ppns;
- struct vmci_queue *produce_q = prod_q;
- struct vmci_queue *consume_q = cons_q;
- u64 i;
- if (!produce_q || !num_produce_pages || !consume_q ||
- !num_consume_pages || !ppn_set)
- return VMCI_ERROR_INVALID_ARGS;
- if (ppn_set->initialized)
- return VMCI_ERROR_ALREADY_EXISTS;
- produce_ppns =
- kmalloc(num_produce_pages * sizeof(*produce_ppns), GFP_KERNEL);
- if (!produce_ppns)
- return VMCI_ERROR_NO_MEM;
- consume_ppns =
- kmalloc(num_consume_pages * sizeof(*consume_ppns), GFP_KERNEL);
- if (!consume_ppns) {
- kfree(produce_ppns);
- return VMCI_ERROR_NO_MEM;
- }
- for (i = 0; i < num_produce_pages; i++) {
- unsigned long pfn;
- produce_ppns[i] =
- produce_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT;
- pfn = produce_ppns[i];
- /* Fail allocation if PFN isn't supported by hypervisor. */
- if (sizeof(pfn) > sizeof(*produce_ppns)
- && pfn != produce_ppns[i])
- goto ppn_error;
- }
- for (i = 0; i < num_consume_pages; i++) {
- unsigned long pfn;
- consume_ppns[i] =
- consume_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT;
- pfn = consume_ppns[i];
- /* Fail allocation if PFN isn't supported by hypervisor. */
- if (sizeof(pfn) > sizeof(*consume_ppns)
- && pfn != consume_ppns[i])
- goto ppn_error;
- }
- ppn_set->num_produce_pages = num_produce_pages;
- ppn_set->num_consume_pages = num_consume_pages;
- ppn_set->produce_ppns = produce_ppns;
- ppn_set->consume_ppns = consume_ppns;
- ppn_set->initialized = true;
- return VMCI_SUCCESS;
- ppn_error:
- kfree(produce_ppns);
- kfree(consume_ppns);
- return VMCI_ERROR_INVALID_ARGS;
- }
- /*
- * Frees the two list of PPNs for a queue pair.
- */
- static void qp_free_ppn_set(struct ppn_set *ppn_set)
- {
- if (ppn_set->initialized) {
- /* Do not call these functions on NULL inputs. */
- kfree(ppn_set->produce_ppns);
- kfree(ppn_set->consume_ppns);
- }
- memset(ppn_set, 0, sizeof(*ppn_set));
- }
- /*
- * Populates the list of PPNs in the hypercall structure with the PPNS
- * of the produce queue and the consume queue.
- */
- static int qp_populate_ppn_set(u8 *call_buf, const struct ppn_set *ppn_set)
- {
- memcpy(call_buf, ppn_set->produce_ppns,
- ppn_set->num_produce_pages * sizeof(*ppn_set->produce_ppns));
- memcpy(call_buf +
- ppn_set->num_produce_pages * sizeof(*ppn_set->produce_ppns),
- ppn_set->consume_ppns,
- ppn_set->num_consume_pages * sizeof(*ppn_set->consume_ppns));
- return VMCI_SUCCESS;
- }
- static int qp_memcpy_to_queue(struct vmci_queue *queue,
- u64 queue_offset,
- const void *src, size_t src_offset, size_t size)
- {
- return __qp_memcpy_to_queue(queue, queue_offset,
- (u8 *)src + src_offset, size, false);
- }
- static int qp_memcpy_from_queue(void *dest,
- size_t dest_offset,
- const struct vmci_queue *queue,
- u64 queue_offset, size_t size)
- {
- return __qp_memcpy_from_queue((u8 *)dest + dest_offset,
- queue, queue_offset, size, false);
- }
- /*
- * Copies from a given iovec from a VMCI Queue.
- */
- static int qp_memcpy_to_queue_iov(struct vmci_queue *queue,
- u64 queue_offset,
- const void *msg,
- size_t src_offset, size_t size)
- {
- /*
- * We ignore src_offset because src is really a struct iovec * and will
- * maintain offset internally.
- */
- return __qp_memcpy_to_queue(queue, queue_offset, msg, size, true);
- }
- /*
- * Copies to a given iovec from a VMCI Queue.
- */
- static int qp_memcpy_from_queue_iov(void *dest,
- size_t dest_offset,
- const struct vmci_queue *queue,
- u64 queue_offset, size_t size)
- {
- /*
- * We ignore dest_offset because dest is really a struct iovec * and
- * will maintain offset internally.
- */
- return __qp_memcpy_from_queue(dest, queue, queue_offset, size, true);
- }
- /*
- * Allocates kernel VA space of specified size plus space for the queue
- * and kernel interface. This is different from the guest queue allocator,
- * because we do not allocate our own queue header/data pages here but
- * share those of the guest.
- */
- static struct vmci_queue *qp_host_alloc_queue(u64 size)
- {
- struct vmci_queue *queue;
- size_t queue_page_size;
- const u64 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
- const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if));
- if (num_pages > (SIZE_MAX - queue_size) /
- sizeof(*queue->kernel_if->u.h.page))
- return NULL;
- queue_page_size = num_pages * sizeof(*queue->kernel_if->u.h.page);
- queue = kzalloc(queue_size + queue_page_size, GFP_KERNEL);
- if (queue) {
- queue->q_header = NULL;
- queue->saved_header = NULL;
- queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1);
- queue->kernel_if->host = true;
- queue->kernel_if->mutex = NULL;
- queue->kernel_if->num_pages = num_pages;
- queue->kernel_if->u.h.header_page =
- (struct page **)((u8 *)queue + queue_size);
- queue->kernel_if->u.h.page =
- &queue->kernel_if->u.h.header_page[1];
- }
- return queue;
- }
- /*
- * Frees kernel memory for a given queue (header plus translation
- * structure).
- */
- static void qp_host_free_queue(struct vmci_queue *queue, u64 queue_size)
- {
- kfree(queue);
- }
- /*
- * Initialize the mutex for the pair of queues. This mutex is used to
- * protect the q_header and the buffer from changing out from under any
- * users of either queue. Of course, it's only any good if the mutexes
- * are actually acquired. Queue structure must lie on non-paged memory
- * or we cannot guarantee access to the mutex.
- */
- static void qp_init_queue_mutex(struct vmci_queue *produce_q,
- struct vmci_queue *consume_q)
- {
- /*
- * Only the host queue has shared state - the guest queues do not
- * need to synchronize access using a queue mutex.
- */
- if (produce_q->kernel_if->host) {
- produce_q->kernel_if->mutex = &produce_q->kernel_if->__mutex;
- consume_q->kernel_if->mutex = &produce_q->kernel_if->__mutex;
- mutex_init(produce_q->kernel_if->mutex);
- }
- }
- /*
- * Cleans up the mutex for the pair of queues.
- */
- static void qp_cleanup_queue_mutex(struct vmci_queue *produce_q,
- struct vmci_queue *consume_q)
- {
- if (produce_q->kernel_if->host) {
- produce_q->kernel_if->mutex = NULL;
- consume_q->kernel_if->mutex = NULL;
- }
- }
- /*
- * Acquire the mutex for the queue. Note that the produce_q and
- * the consume_q share a mutex. So, only one of the two need to
- * be passed in to this routine. Either will work just fine.
- */
- static void qp_acquire_queue_mutex(struct vmci_queue *queue)
- {
- if (queue->kernel_if->host)
- mutex_lock(queue->kernel_if->mutex);
- }
- /*
- * Release the mutex for the queue. Note that the produce_q and
- * the consume_q share a mutex. So, only one of the two need to
- * be passed in to this routine. Either will work just fine.
- */
- static void qp_release_queue_mutex(struct vmci_queue *queue)
- {
- if (queue->kernel_if->host)
- mutex_unlock(queue->kernel_if->mutex);
- }
- /*
- * Helper function to release pages in the PageStoreAttachInfo
- * previously obtained using get_user_pages.
- */
- static void qp_release_pages(struct page **pages,
- u64 num_pages, bool dirty)
- {
- int i;
- for (i = 0; i < num_pages; i++) {
- if (dirty)
- set_page_dirty(pages[i]);
- page_cache_release(pages[i]);
- pages[i] = NULL;
- }
- }
- /*
- * Lock the user pages referenced by the {produce,consume}Buffer
- * struct into memory and populate the {produce,consume}Pages
- * arrays in the attach structure with them.
- */
- static int qp_host_get_user_memory(u64 produce_uva,
- u64 consume_uva,
- struct vmci_queue *produce_q,
- struct vmci_queue *consume_q)
- {
- int retval;
- int err = VMCI_SUCCESS;
- retval = get_user_pages_fast((uintptr_t) produce_uva,
- produce_q->kernel_if->num_pages, 1,
- produce_q->kernel_if->u.h.header_page);
- if (retval < produce_q->kernel_if->num_pages) {
- pr_debug("get_user_pages_fast(produce) failed (retval=%d)",
- retval);
- qp_release_pages(produce_q->kernel_if->u.h.header_page,
- retval, false);
- err = VMCI_ERROR_NO_MEM;
- goto out;
- }
- retval = get_user_pages_fast((uintptr_t) consume_uva,
- consume_q->kernel_if->num_pages, 1,
- consume_q->kernel_if->u.h.header_page);
- if (retval < consume_q->kernel_if->num_pages) {
- pr_debug("get_user_pages_fast(consume) failed (retval=%d)",
- retval);
- qp_release_pages(consume_q->kernel_if->u.h.header_page,
- retval, false);
- qp_release_pages(produce_q->kernel_if->u.h.header_page,
- produce_q->kernel_if->num_pages, false);
- err = VMCI_ERROR_NO_MEM;
- }
- out:
- return err;
- }
- /*
- * Registers the specification of the user pages used for backing a queue
- * pair. Enough information to map in pages is stored in the OS specific
- * part of the struct vmci_queue structure.
- */
- static int qp_host_register_user_memory(struct vmci_qp_page_store *page_store,
- struct vmci_queue *produce_q,
- struct vmci_queue *consume_q)
- {
- u64 produce_uva;
- u64 consume_uva;
- /*
- * The new style and the old style mapping only differs in
- * that we either get a single or two UVAs, so we split the
- * single UVA range at the appropriate spot.
- */
- produce_uva = page_store->pages;
- consume_uva = page_store->pages +
- produce_q->kernel_if->num_pages * PAGE_SIZE;
- return qp_host_get_user_memory(produce_uva, consume_uva, produce_q,
- consume_q);
- }
- /*
- * Releases and removes the references to user pages stored in the attach
- * struct. Pages are released from the page cache and may become
- * swappable again.
- */
- static void qp_host_unregister_user_memory(struct vmci_queue *produce_q,
- struct vmci_queue *consume_q)
- {
- qp_release_pages(produce_q->kernel_if->u.h.header_page,
- produce_q->kernel_if->num_pages, true);
- memset(produce_q->kernel_if->u.h.header_page, 0,
- sizeof(*produce_q->kernel_if->u.h.header_page) *
- produce_q->kernel_if->num_pages);
- qp_release_pages(consume_q->kernel_if->u.h.header_page,
- consume_q->kernel_if->num_pages, true);
- memset(consume_q->kernel_if->u.h.header_page, 0,
- sizeof(*consume_q->kernel_if->u.h.header_page) *
- consume_q->kernel_if->num_pages);
- }
- /*
- * Once qp_host_register_user_memory has been performed on a
- * queue, the queue pair headers can be mapped into the
- * kernel. Once mapped, they must be unmapped with
- * qp_host_unmap_queues prior to calling
- * qp_host_unregister_user_memory.
- * Pages are pinned.
- */
- static int qp_host_map_queues(struct vmci_queue *produce_q,
- struct vmci_queue *consume_q)
- {
- int result;
- if (!produce_q->q_header || !consume_q->q_header) {
- struct page *headers[2];
- if (produce_q->q_header != consume_q->q_header)
- return VMCI_ERROR_QUEUEPAIR_MISMATCH;
- if (produce_q->kernel_if->u.h.header_page == NULL ||
- *produce_q->kernel_if->u.h.header_page == NULL)
- return VMCI_ERROR_UNAVAILABLE;
- headers[0] = *produce_q->kernel_if->u.h.header_page;
- headers[1] = *consume_q->kernel_if->u.h.header_page;
- produce_q->q_header = vmap(headers, 2, VM_MAP, PAGE_KERNEL);
- if (produce_q->q_header != NULL) {
- consume_q->q_header =
- (struct vmci_queue_header *)((u8 *)
- produce_q->q_header +
- PAGE_SIZE);
- result = VMCI_SUCCESS;
- } else {
- pr_warn("vmap failed\n");
- result = VMCI_ERROR_NO_MEM;
- }
- } else {
- result = VMCI_SUCCESS;
- }
- return result;
- }
- /*
- * Unmaps previously mapped queue pair headers from the kernel.
- * Pages are unpinned.
- */
- static int qp_host_unmap_queues(u32 gid,
- struct vmci_queue *produce_q,
- struct vmci_queue *consume_q)
- {
- if (produce_q->q_header) {
- if (produce_q->q_header < consume_q->q_header)
- vunmap(produce_q->q_header);
- else
- vunmap(consume_q->q_header);
- produce_q->q_header = NULL;
- consume_q->q_header = NULL;
- }
- return VMCI_SUCCESS;
- }
- /*
- * Finds the entry in the list corresponding to a given handle. Assumes
- * that the list is locked.
- */
- static struct qp_entry *qp_list_find(struct qp_list *qp_list,
- struct vmci_handle handle)
- {
- struct qp_entry *entry;
- if (vmci_handle_is_invalid(handle))
- return NULL;
- list_for_each_entry(entry, &qp_list->head, list_item) {
- if (vmci_handle_is_equal(entry->handle, handle))
- return entry;
- }
- return NULL;
- }
- /*
- * Finds the entry in the list corresponding to a given handle.
- */
- static struct qp_guest_endpoint *
- qp_guest_handle_to_entry(struct vmci_handle handle)
- {
- struct qp_guest_endpoint *entry;
- struct qp_entry *qp = qp_list_find(&qp_guest_endpoints, handle);
- entry = qp ? container_of(
- qp, struct qp_guest_endpoint, qp) : NULL;
- return entry;
- }
- /*
- * Finds the entry in the list corresponding to a given handle.
- */
- static struct qp_broker_entry *
- qp_broker_handle_to_entry(struct vmci_handle handle)
- {
- struct qp_broker_entry *entry;
- struct qp_entry *qp = qp_list_find(&qp_broker_list, handle);
- entry = qp ? container_of(
- qp, struct qp_broker_entry, qp) : NULL;
- return entry;
- }
- /*
- * Dispatches a queue pair event message directly into the local event
- * queue.
- */
- static int qp_notify_peer_local(bool attach, struct vmci_handle handle)
- {
- u32 context_id = vmci_get_context_id();
- struct vmci_event_qp ev;
- ev.msg.hdr.dst = vmci_make_handle(context_id, VMCI_EVENT_HANDLER);
- ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
- VMCI_CONTEXT_RESOURCE_ID);
- ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr);
- ev.msg.event_data.event =
- attach ? VMCI_EVENT_QP_PEER_ATTACH : VMCI_EVENT_QP_PEER_DETACH;
- ev.payload.peer_id = context_id;
- ev.payload.handle = handle;
- return vmci_event_dispatch(&ev.msg.hdr);
- }
- /*
- * Allocates and initializes a qp_guest_endpoint structure.
- * Allocates a queue_pair rid (and handle) iff the given entry has
- * an invalid handle. 0 through VMCI_RESERVED_RESOURCE_ID_MAX
- * are reserved handles. Assumes that the QP list mutex is held
- * by the caller.
- */
- static struct qp_guest_endpoint *
- qp_guest_endpoint_create(struct vmci_handle handle,
- u32 peer,
- u32 flags,
- u64 produce_size,
- u64 consume_size,
- void *produce_q,
- void *consume_q)
- {
- int result;
- struct qp_guest_endpoint *entry;
- /* One page each for the queue headers. */
- const u64 num_ppns = DIV_ROUND_UP(produce_size, PAGE_SIZE) +
- DIV_ROUND_UP(consume_size, PAGE_SIZE) + 2;
- if (vmci_handle_is_invalid(handle)) {
- u32 context_id = vmci_get_context_id();
- handle = vmci_make_handle(context_id, VMCI_INVALID_ID);
- }
- entry = kzalloc(sizeof(*entry), GFP_KERNEL);
- if (entry) {
- entry->qp.peer = peer;
- entry->qp.flags = flags;
- entry->qp.produce_size = produce_size;
- entry->qp.consume_size = consume_size;
- entry->qp.ref_count = 0;
- entry->num_ppns = num_ppns;
- entry->produce_q = produce_q;
- entry->consume_q = consume_q;
- INIT_LIST_HEAD(&entry->qp.list_item);
- /* Add resource obj */
- result = vmci_resource_add(&entry->resource,
- VMCI_RESOURCE_TYPE_QPAIR_GUEST,
- handle);
- entry->qp.handle = vmci_resource_handle(&entry->resource);
- if ((result != VMCI_SUCCESS) ||
- qp_list_find(&qp_guest_endpoints, entry->qp.handle)) {
- pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d",
- handle.context, handle.resource, result);
- kfree(entry);
- entry = NULL;
- }
- }
- return entry;
- }
- /*
- * Frees a qp_guest_endpoint structure.
- */
- static void qp_guest_endpoint_destroy(struct qp_guest_endpoint *entry)
- {
- qp_free_ppn_set(&entry->ppn_set);
- qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q);
- qp_free_queue(entry->produce_q, entry->qp.produce_size);
- qp_free_queue(entry->consume_q, entry->qp.consume_size);
- /* Unlink from resource hash table and free callback */
- vmci_resource_remove(&entry->resource);
- kfree(entry);
- }
- /*
- * Helper to make a queue_pairAlloc hypercall when the driver is
- * supporting a guest device.
- */
- static int qp_alloc_hypercall(const struct qp_guest_endpoint *entry)
- {
- struct vmci_qp_alloc_msg *alloc_msg;
- size_t msg_size;
- int result;
- if (!entry || entry->num_ppns <= 2)
- return VMCI_ERROR_INVALID_ARGS;
- msg_size = sizeof(*alloc_msg) +
- (size_t) entry->num_ppns * sizeof(u32);
- alloc_msg = kmalloc(msg_size, GFP_KERNEL);
- if (!alloc_msg)
- return VMCI_ERROR_NO_MEM;
- alloc_msg->hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
- VMCI_QUEUEPAIR_ALLOC);
- alloc_msg->hdr.src = VMCI_ANON_SRC_HANDLE;
- alloc_msg->hdr.payload_size = msg_size - VMCI_DG_HEADERSIZE;
- alloc_msg->handle = entry->qp.handle;
- alloc_msg->peer = entry->qp.peer;
- alloc_msg->flags = entry->qp.flags;
- alloc_msg->produce_size = entry->qp.produce_size;
- alloc_msg->consume_size = entry->qp.consume_size;
- alloc_msg->num_ppns = entry->num_ppns;
- result = qp_populate_ppn_set((u8 *)alloc_msg + sizeof(*alloc_msg),
- &entry->ppn_set);
- if (result == VMCI_SUCCESS)
- result = vmci_send_datagram(&alloc_msg->hdr);
- kfree(alloc_msg);
- return result;
- }
- /*
- * Helper to make a queue_pairDetach hypercall when the driver is
- * supporting a guest device.
- */
- static int qp_detatch_hypercall(struct vmci_handle handle)
- {
- struct vmci_qp_detach_msg detach_msg;
- detach_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
- VMCI_QUEUEPAIR_DETACH);
- detach_msg.hdr.src = VMCI_ANON_SRC_HANDLE;
- detach_msg.hdr.payload_size = sizeof(handle);
- detach_msg.handle = handle;
- return vmci_send_datagram(&detach_msg.hdr);
- }
- /*
- * Adds the given entry to the list. Assumes that the list is locked.
- */
- static void qp_list_add_entry(struct qp_list *qp_list, struct qp_entry *entry)
- {
- if (entry)
- list_add(&entry->list_item, &qp_list->head);
- }
- /*
- * Removes the given entry from the list. Assumes that the list is locked.
- */
- static void qp_list_remove_entry(struct qp_list *qp_list,
- struct qp_entry *entry)
- {
- if (entry)
- list_del(&entry->list_item);
- }
- /*
- * Helper for VMCI queue_pair detach interface. Frees the physical
- * pages for the queue pair.
- */
- static int qp_detatch_guest_work(struct vmci_handle handle)
- {
- int result;
- struct qp_guest_endpoint *entry;
- u32 ref_count = ~0; /* To avoid compiler warning below */
- mutex_lock(&qp_guest_endpoints.mutex);
- entry = qp_guest_handle_to_entry(handle);
- if (!entry) {
- mutex_unlock(&qp_guest_endpoints.mutex);
- return VMCI_ERROR_NOT_FOUND;
- }
- if (entry->qp.flags & VMCI_QPFLAG_LOCAL) {
- result = VMCI_SUCCESS;
- if (entry->qp.ref_count > 1) {
- result = qp_notify_peer_local(false, handle);
- /*
- * We can fail to notify a local queuepair
- * because we can't allocate. We still want
- * to release the entry if that happens, so
- * don't bail out yet.
- */
- }
- } else {
- result = qp_detatch_hypercall(handle);
- if (result < VMCI_SUCCESS) {
- /*
- * We failed to notify a non-local queuepair.
- * That other queuepair might still be
- * accessing the shared memory, so don't
- * release the entry yet. It will get cleaned
- * up by VMCIqueue_pair_Exit() if necessary
- * (assuming we are going away, otherwise why
- * did this fail?).
- */
- mutex_unlock(&qp_guest_endpoints.mutex);
- return result;
- }
- }
- /*
- * If we get here then we either failed to notify a local queuepair, or
- * we succeeded in all cases. Release the entry if required.
- */
- entry->qp.ref_count--;
- if (entry->qp.ref_count == 0)
- qp_list_remove_entry(&qp_guest_endpoints, &entry->qp);
- /* If we didn't remove the entry, this could change once we unlock. */
- if (entry)
- ref_count = entry->qp.ref_count;
- mutex_unlock(&qp_guest_endpoints.mutex);
- if (ref_count == 0)
- qp_guest_endpoint_destroy(entry);
- return result;
- }
- /*
- * This functions handles the actual allocation of a VMCI queue
- * pair guest endpoint. Allocates physical pages for the queue
- * pair. It makes OS dependent calls through generic wrappers.
- */
- static int qp_alloc_guest_work(struct vmci_handle *handle,
- struct vmci_queue **produce_q,
- u64 produce_size,
- struct vmci_queue **consume_q,
- u64 consume_size,
- u32 peer,
- u32 flags,
- u32 priv_flags)
- {
- const u64 num_produce_pages =
- DIV_ROUND_UP(produce_size, PAGE_SIZE) + 1;
- const u64 num_consume_pages =
- DIV_ROUND_UP(consume_size, PAGE_SIZE) + 1;
- void *my_produce_q = NULL;
- void *my_consume_q = NULL;
- int result;
- struct qp_guest_endpoint *queue_pair_entry = NULL;
- if (priv_flags != VMCI_NO_PRIVILEGE_FLAGS)
- return VMCI_ERROR_NO_ACCESS;
- mutex_lock(&qp_guest_endpoints.mutex);
- queue_pair_entry = qp_guest_handle_to_entry(*handle);
- if (queue_pair_entry) {
- if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) {
- /* Local attach case. */
- if (queue_pair_entry->qp.ref_count > 1) {
- pr_devel("Error attempting to attach more than once\n");
- result = VMCI_ERROR_UNAVAILABLE;
- goto error_keep_entry;
- }
- if (queue_pair_entry->qp.produce_size != consume_size ||
- queue_pair_entry->qp.consume_size !=
- produce_size ||
- queue_pair_entry->qp.flags !=
- (flags & ~VMCI_QPFLAG_ATTACH_ONLY)) {
- pr_devel("Error mismatched queue pair in local attach\n");
- result = VMCI_ERROR_QUEUEPAIR_MISMATCH;
- goto error_keep_entry;
- }
- /*
- * Do a local attach. We swap the consume and
- * produce queues for the attacher and deliver
- * an attach event.
- */
- result = qp_notify_peer_local(true, *handle);
- if (result < VMCI_SUCCESS)
- goto error_keep_entry;
- my_produce_q = queue_pair_entry->consume_q;
- my_consume_q = queue_pair_entry->produce_q;
- goto out;
- }
- result = VMCI_ERROR_ALREADY_EXISTS;
- goto error_keep_entry;
- }
- my_produce_q = qp_alloc_queue(produce_size, flags);
- if (!my_produce_q) {
- pr_warn("Error allocating pages for produce queue\n");
- result = VMCI_ERROR_NO_MEM;
- goto error;
- }
- my_consume_q = qp_alloc_queue(consume_size, flags);
- if (!my_consume_q) {
- pr_warn("Error allocating pages for consume queue\n");
- result = VMCI_ERROR_NO_MEM;
- goto error;
- }
- queue_pair_entry = qp_guest_endpoint_create(*handle, peer, flags,
- produce_size, consume_size,
- my_produce_q, my_consume_q);
- if (!queue_pair_entry) {
- pr_warn("Error allocating memory in %s\n", __func__);
- result = VMCI_ERROR_NO_MEM;
- goto error;
- }
- result = qp_alloc_ppn_set(my_produce_q, num_produce_pages, my_consume_q,
- num_consume_pages,
- &queue_pair_entry->ppn_set);
- if (result < VMCI_SUCCESS) {
- pr_warn("qp_alloc_ppn_set failed\n");
- goto error;
- }
- /*
- * It's only necessary to notify the host if this queue pair will be
- * attached to from another context.
- */
- if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) {
- /* Local create case. */
- u32 context_id = vmci_get_context_id();
- /*
- * Enforce similar checks on local queue pairs as we
- * do for regular ones. The handle's context must
- * match the creator or attacher context id (here they
- * are both the current context id) and the
- * attach-only flag cannot exist during create. We
- * also ensure specified peer is this context or an
- * invalid one.
- */
- if (queue_pair_entry->qp.handle.context != context_id ||
- (queue_pair_entry->qp.peer != VMCI_INVALID_ID &&
- queue_pair_entry->qp.peer != context_id)) {
- result = VMCI_ERROR_NO_ACCESS;
- goto error;
- }
- if (queue_pair_entry->qp.flags & VMCI_QPFLAG_ATTACH_ONLY) {
- result = VMCI_ERROR_NOT_FOUND;
- goto error;
- }
- } else {
- result = qp_alloc_hypercall(queue_pair_entry);
- if (result < VMCI_SUCCESS) {
- pr_warn("qp_alloc_hypercall result = %d\n", result);
- goto error;
- }
- }
- qp_init_queue_mutex((struct vmci_queue *)my_produce_q,
- (struct vmci_queue *)my_consume_q);
- qp_list_add_entry(&qp_guest_endpoints, &queue_pair_entry->qp);
- out:
- queue_pair_entry->qp.ref_count++;
- *handle = queue_pair_entry->qp.handle;
- *produce_q = (struct vmci_queue *)my_produce_q;
- *consume_q = (struct vmci_queue *)my_consume_q;
- /*
- * We should initialize the queue pair header pages on a local
- * queue pair create. For non-local queue pairs, the
- * hypervisor initializes the header pages in the create step.
- */
- if ((queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) &&
- queue_pair_entry->qp.ref_count == 1) {
- vmci_q_header_init((*produce_q)->q_header, *handle);
- vmci_q_header_init((*consume_q)->q_header, *handle);
- }
- mutex_unlock(&qp_guest_endpoints.mutex);
- return VMCI_SUCCESS;
- error:
- mutex_unlock(&qp_guest_endpoints.mutex);
- if (queue_pair_entry) {
- /* The queues will be freed inside the destroy routine. */
- qp_guest_endpoint_destroy(queue_pair_entry);
- } else {
- qp_free_queue(my_produce_q, produce_size);
- qp_free_queue(my_consume_q, consume_size);
- }
- return result;
- error_keep_entry:
- /* This path should only be used when an existing entry was found. */
- mutex_unlock(&qp_guest_endpoints.mutex);
- return result;
- }
- /*
- * The first endpoint issuing a queue pair allocation will create the state
- * of the queue pair in the queue pair broker.
- *
- * If the creator is a guest, it will associate a VMX virtual address range
- * with the queue pair as specified by the page_store. For compatibility with
- * older VMX'en, that would use a separate step to set the VMX virtual
- * address range, the virtual address range can be registered later using
- * vmci_qp_broker_set_page_store. In that case, a page_store of NULL should be
- * used.
- *
- * If the creator is the host, a page_store of NULL should be used as well,
- * since the host is not able to supply a page store for the queue pair.
- *
- * For older VMX and host callers, the queue pair will be created in the
- * VMCIQPB_CREATED_NO_MEM state, and for current VMX callers, it will be
- * created in VMCOQPB_CREATED_MEM state.
- */
- static int qp_broker_create(struct vmci_handle handle,
- u32 peer,
- u32 flags,
- u32 priv_flags,
- u64 produce_size,
- u64 consume_size,
- struct vmci_qp_page_store *page_store,
- struct vmci_ctx *context,
- vmci_event_release_cb wakeup_cb,
- void *client_data, struct qp_broker_entry **ent)
- {
- struct qp_broker_entry *entry = NULL;
- const u32 context_id = vmci_ctx_get_id(context);
- bool is_local = flags & VMCI_QPFLAG_LOCAL;
- int result;
- u64 guest_produce_size;
- u64 guest_consume_size;
- /* Do not create if the caller asked not to. */
- if (flags & VMCI_QPFLAG_ATTACH_ONLY)
- return VMCI_ERROR_NOT_FOUND;
- /*
- * Creator's context ID should match handle's context ID or the creator
- * must allow the context in handle's context ID as the "peer".
- */
- if (handle.context != context_id && handle.context != peer)
- return VMCI_ERROR_NO_ACCESS;
- if (VMCI_CONTEXT_IS_VM(context_id) && VMCI_CONTEXT_IS_VM(peer))
- return VMCI_ERROR_DST_UNREACHABLE;
- /*
- * Creator's context ID for local queue pairs should match the
- * peer, if a peer is specified.
- */
- if (is_local && peer != VMCI_INVALID_ID && context_id != peer)
- return VMCI_ERROR_NO_ACCESS;
- entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
- if (!entry)
- return VMCI_ERROR_NO_MEM;
- if (vmci_ctx_get_id(context) == VMCI_HOST_CONTEXT_ID && !is_local) {
- /*
- * The queue pair broker entry stores values from the guest
- * point of view, so a creating host side endpoint should swap
- * produce and consume values -- unless it is a local queue
- * pair, in which case no swapping is necessary, since the local
- * attacher will swap queues.
- */
- guest_produce_size = consume_size;
- guest_consume_size = produce_size;
- } else {
- guest_produce_size = produce_size;
- guest_consume_size = consume_size;
- }
- entry->qp.handle = handle;
- entry->qp.peer = peer;
- entry->qp.flags = flags;
- entry->qp.produce_size = guest_produce_size;
- entry->qp.consume_size = guest_consume_size;
- entry->qp.ref_count = 1;
- entry->create_id = context_id;
- entry->attach_id = VMCI_INVALID_ID;
- entry->state = VMCIQPB_NEW;
- entry->require_trusted_attach =
- !!(context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED);
- entry->created_by_trusted =
- !!(priv_flags & VMCI_PRIVILEGE_FLAG_TRUSTED);
- entry->vmci_page_files = false;
- entry->wakeup_cb = wakeup_cb;
- entry->client_data = client_data;
- entry->produce_q = qp_host_alloc_queue(guest_produce_size);
- if (entry->produce_q == NULL) {
- result = VMCI_ERROR_NO_MEM;
- goto error;
- }
- entry->consume_q = qp_host_alloc_queue(guest_consume_size);
- if (entry->consume_q == NULL) {
- result = VMCI_ERROR_NO_MEM;
- goto error;
- }
- qp_init_queue_mutex(entry->produce_q, entry->consume_q);
- INIT_LIST_HEAD(&entry->qp.list_item);
- if (is_local) {
- u8 *tmp;
- entry->local_mem = kcalloc(QPE_NUM_PAGES(entry->qp),
- PAGE_SIZE, GFP_KERNEL);
- if (entry->local_mem == NULL) {
- result = VMCI_ERROR_NO_MEM;
- goto error;
- }
- entry->state = VMCIQPB_CREATED_MEM;
- entry->produce_q->q_header = entry->local_mem;
- tmp = (u8 *)entry->local_mem + PAGE_SIZE *
- (DIV_ROUND_UP(entry->qp.produce_size, PAGE_SIZE) + 1);
- entry->consume_q->q_header = (struct vmci_queue_header *)tmp;
- } else if (page_store) {
- /*
- * The VMX already initialized the queue pair headers, so no
- * need for the kernel side to do that.
- */
- result = qp_host_register_user_memory(page_store,
- entry->produce_q,
- entry->consume_q);
- if (result < VMCI_SUCCESS)
- goto error;
- entry->state = VMCIQPB_CREATED_MEM;
- } else {
- /*
- * A create without a page_store may be either a host
- * side create (in which case we are waiting for the
- * guest side to supply the memory) or an old style
- * queue pair create (in which case we will expect a
- * set page store call as the next step).
- */
- entry->state = VMCIQPB_CREATED_NO_MEM;
- }
- qp_list_add_entry(&qp_broker_list, &entry->qp);
- if (ent != NULL)
- *ent = entry;
- /* Add to resource obj */
- result = vmci_resource_add(&entry->resource,
- VMCI_RESOURCE_TYPE_QPAIR_HOST,
- handle);
- if (result != VMCI_SUCCESS) {
- pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d",
- handle.context, handle.resource, result);
- goto error;
- }
- entry->qp.handle = vmci_resource_handle(&entry->resource);
- if (is_local) {
- vmci_q_header_init(entry->produce_q->q_header,
- entry->qp.handle);
- vmci_q_header_init(entry->consume_q->q_header,
- entry->qp.handle);
- }
- vmci_ctx_qp_create(context, entry->qp.handle);
- return VMCI_SUCCESS;
- error:
- if (entry != NULL) {
- qp_host_free_queue(entry->produce_q, guest_produce_size);
- qp_host_free_queue(entry->consume_q, guest_consume_size);
- kfree(entry);
- }
- return result;
- }
- /*
- * Enqueues an event datagram to notify the peer VM attached to
- * the given queue pair handle about attach/detach event by the
- * given VM. Returns Payload size of datagram enqueued on
- * success, error code otherwise.
- */
- static int qp_notify_peer(bool attach,
- struct vmci_handle handle,
- u32 my_id,
- u32 peer_id)
- {
- int rv;
- struct vmci_event_qp ev;
- if (vmci_handle_is_invalid(handle) || my_id == VMCI_INVALID_ID ||
- peer_id == VMCI_INVALID_ID)
- return VMCI_ERROR_INVALID_ARGS;
- /*
- * In vmci_ctx_enqueue_datagram() we enforce the upper limit on
- * number of pending events from the hypervisor to a given VM
- * otherwise a rogue VM could do an arbitrary number of attach
- * and detach operations causing memory pressure in the host
- * kernel.
- */
- ev.msg.hdr.dst = vmci_make_handle(peer_id, VMCI_EVENT_HANDLER);
- ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
- VMCI_CONTEXT_RESOURCE_ID);
- ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr);
- ev.msg.event_data.event = attach ?
- VMCI_EVENT_QP_PEER_ATTACH : VMCI_EVENT_QP_PEER_DETACH;
- ev.payload.handle = handle;
- ev.payload.peer_id = my_id;
- rv = vmci_datagram_dispatch(VMCI_HYPERVISOR_CONTEXT_ID,
- &ev.msg.hdr, false);
- if (rv < VMCI_SUCCESS)
- pr_warn("Failed to enqueue queue_pair %s event datagram for context (ID=0x%x)\n",
- attach ? "ATTACH" : "DETACH", peer_id);
- return rv;
- }
- /*
- * The second endpoint issuing a queue pair allocation will attach to
- * the queue pair registered with the queue pair broker.
- *
- * If the attacher is a guest, it will associate a VMX virtual address
- * range with the queue pair as specified by the page_store. At this
- * point, the already attach host endpoint may start using the queue
- * pair, and an attach event is sent to it. For compatibility with
- * older VMX'en, that used a separate step to set the VMX virtual
- * address range, the virtual address range can be registered later
- * using vmci_qp_broker_set_page_store. In that case, a page_store of
- * NULL should be used, and the attach event will be generated once
- * the actual page store has been set.
- *
- * If the attacher is the host, a page_store of NULL should be used as
- * well, since the page store information is already set by the guest.
- *
- * For new VMX and host callers, the queue pair will be moved to the
- * VMCIQPB_ATTACHED_MEM state, and for older VMX callers, it will be
- * moved to the VMCOQPB_ATTACHED_NO_MEM state.
- */
- static int qp_broker_attach(struct qp_broker_entry *entry,
- u32 peer,
- u32 flags,
- u32 priv_flags,
- u64 produce_size,
- u64 consume_size,
- struct vmci_qp_page_store *page_store,
- struct vmci_ctx *context,
- vmci_event_release_cb wakeup_cb,
- void *client_data,
- struct qp_broker_entry **ent)
- {
- const u32 context_id = vmci_ctx_get_id(context);
- bool is_local = flags & VMCI_QPFLAG_LOCAL;
- int result;
- if (entry->state != VMCIQPB_CREATED_NO_MEM &&
- entry->state != VMCIQPB_CREATED_MEM)
- return VMCI_ERROR_UNAVAILABLE;
- if (is_local) {
- if (!(entry->qp.flags & VMCI_QPFLAG_LOCAL) ||
- context_id != entry->create_id) {
- return VMCI_ERROR_INVALID_ARGS;
- }
- } else if (context_id == entry->create_id ||
- context_id == entry->attach_id) {
- return VMCI_ERROR_ALREADY_EXISTS;
- }
- if (VMCI_CONTEXT_IS_VM(context_id) &&
- VMCI_CONTEXT_IS_VM(entry->create_id))
- return VMCI_ERROR_DST_UNREACHABLE;
- /*
- * If we are attaching from a restricted context then the queuepair
- * must have been created by a trusted endpoint.
- */
- if ((context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED) &&
- !entry->created_by_trusted)
- return VMCI_ERROR_NO_ACCESS;
- /*
- * If we are attaching to a queuepair that was created by a restricted
- * context then we must be trusted.
- */
- if (entry->require_trusted_attach &&
- (!(priv_flags & VMCI_PRIVILEGE_FLAG_TRUSTED)))
- return VMCI_ERROR_NO_ACCESS;
- /*
- * If the creator specifies VMCI_INVALID_ID in "peer" field, access
- * control check is not performed.
- */
- if (entry->qp.peer != VMCI_INVALID_ID && entry->qp.peer != context_id)
- return VMCI_ERROR_NO_ACCESS;
- if (entry->create_id == VMCI_HOST_CONTEXT_ID) {
- /*
- * Do not attach if the caller doesn't support Host Queue Pairs
- * and a host created this queue pair.
- */
- if (!vmci_ctx_supports_host_qp(context))
- return VMCI_ERROR_INVALID_RESOURCE;
- } else if (context_id == VMCI_HOST_CONTEXT_ID) {
- struct vmci_ctx *create_context;
- bool supports_host_qp;
- /*
- * Do not attach a host to a user created queue pair if that
- * user doesn't support host queue pair end points.
- */
- create_context = vmci_ctx_get(entry->create_id);
- supports_host_qp = vmci_ctx_supports_host_qp(create_context);
- vmci_ctx_put(create_context);
- if (!supports_host_qp)
- return VMCI_ERROR_INVALID_RESOURCE;
- }
- if ((entry->qp.flags & ~VMCI_QP_ASYMM) != (flags & ~VMCI_QP_ASYMM_PEER))
- return VMCI_ERROR_QUEUEPAIR_MISMATCH;
- if (context_id != VMCI_HOST_CONTEXT_ID) {
- /*
- * The queue pair broker entry stores values from the guest
- * point of view, so an attaching guest should match the values
- * stored in the entry.
- */
- if (entry->qp.produce_size != produce_size ||
- entry->qp.consume_size != consume_size) {
- return VMCI_ERROR_QUEUEPAIR_MISMATCH;
- }
- } else if (entry->qp.produce_size != consume_size ||
- entry->qp.consume_size != produce_size) {
- return VMCI_ERROR_QUEUEPAIR_MISMATCH;
- }
- if (context_id != VMCI_HOST_CONTEXT_ID) {
- /*
- * If a guest attached to a queue pair, it will supply
- * the backing memory. If this is a pre NOVMVM vmx,
- * the backing memory will be supplied by calling
- * vmci_qp_broker_set_page_store() following the
- * return of the vmci_qp_broker_alloc() call. If it is
- * a vmx of version NOVMVM or later, the page store
- * must be supplied as part of the
- * vmci_qp_broker_alloc call. Under all circumstances
- * must the initially created queue pair not have any
- * memory associated with it already.
- */
- if (entry->state != VMCIQPB_CREATED_NO_MEM)
- return VMCI_ERROR_INVALID_ARGS;
- if (page_store != NULL) {
- /*
- * Patch up host state to point to guest
- * supplied memory. The VMX already
- * initialized the queue pair headers, so no
- * need for the kernel side to do that.
- */
- result = qp_host_register_user_memory(page_store,
- entry->produce_q,
- entry->consume_q);
- if (result < VMCI_SUCCESS)
- return result;
- entry->state = VMCIQPB_ATTACHED_MEM;
- } else {
- entry->state = VMCIQPB_ATTACHED_NO_MEM;
- }
- } else if (entry->state == VMCIQPB_CREATED_NO_MEM) {
- /*
- * The host side is attempting to attach to a queue
- * pair that doesn't have any memory associated with
- * it. This must be a pre NOVMVM vmx that hasn't set
- * the page store information yet, or a quiesced VM.
- */
- return VMCI_ERROR_UNAVAILABLE;
- } else {
- /* The host side has successfully attached to a queue pair. */
- entry->state = VMCIQPB_ATTACHED_MEM;
- }
- if (entry->state == VMCIQPB_ATTACHED_MEM) {
- result =
- qp_notify_peer(true, entry->qp.handle, context_id,
- entry->create_id);
- if (result < VMCI_SUCCESS)
- pr_warn("Failed to notify peer (ID=0x%x) of attach to queue pair (handle=0x%x:0x%x)\n",
- entry->create_id, entry->qp.handle.context,
- entry->qp.handle.resource);
- }
- entry->attach_id = context_id;
- entry->qp.ref_count++;
- if (wakeup_cb) {
- entry->wakeup_cb = wakeup_cb;
- entry->client_data = client_data;
- }
- /*
- * When attaching to local queue pairs, the context already has
- * an entry tracking the queue pair, so don't add another one.
- */
- if (!is_local)
- vmci_ctx_qp_create(context, entry->qp.handle);
- if (ent != NULL)
- *ent = entry;
- return VMCI_SUCCESS;
- }
- /*
- * queue_pair_Alloc for use when setting up queue pair endpoints
- * on the host.
- */
- static int qp_broker_alloc(struct vmci_handle handle,
- u32 peer,
- u32 flags,
- u32 priv_flags,
- u64 produce_size,
- u64 consume_size,
- struct vmci_qp_page_store *page_store,
- struct vmci_ctx *context,
- vmci_event_release_cb wakeup_cb,
- void *client_data,
- struct qp_broker_entry **ent,
- bool *swap)
- {
- const u32 context_id = vmci_ctx_get_id(context);
- bool create;
- struct qp_broker_entry *entry = NULL;
- bool is_local = flags & VMCI_QPFLAG_LOCAL;
- int result;
- if (vmci_handle_is_invalid(handle) ||
- (flags & ~VMCI_QP_ALL_FLAGS) || is_local ||
- !(produce_size || consume_size) ||
- !context || context_id == VMCI_INVALID_ID ||
- handle.context == VMCI_INVALID_ID) {
- return VMCI_ERROR_INVALID_ARGS;
- }
- if (page_store && !VMCI_QP_PAGESTORE_IS_WELLFORMED(page_store))
- return VMCI_ERROR_INVALID_ARGS;
- /*
- * In the initial argument check, we ensure that non-vmkernel hosts
- * are not allowed to create local queue pairs.
- */
- mutex_lock(&qp_broker_list.mutex);
- if (!is_local && vmci_ctx_qp_exists(context, handle)) {
- pr_devel("Context (ID=0x%x) already attached to queue pair (handle=0x%x:0x%x)\n",
- context_id, handle.context, handle.resource);
- mutex_unlock(&qp_broker_list.mutex);
- return VMCI_ERROR_ALREADY_EXISTS;
- }
- if (handle.resource != VMCI_INVALID_ID)
- entry = qp_broker_handle_to_entry(handle);
- if (!entry) {
- create = true;
- result =
- qp_broker_create(handle, peer, flags, priv_flags,
- produce_size, consume_size, page_store,
- context, wakeup_cb, client_data, ent);
- } else {
- create = false;
- result =
- qp_broker_attach(entry, peer, flags, priv_flags,
- produce_size, consume_size, page_store,
- context, wakeup_cb, client_data, ent);
- }
- mutex_unlock(&qp_broker_list.mutex);
- if (swap)
- *swap = (context_id == VMCI_HOST_CONTEXT_ID) &&
- !(create && is_local);
- return result;
- }
- /*
- * This function implements the kernel API for allocating a queue
- * pair.
- */
- static int qp_alloc_host_work(struct vmci_handle *handle,
- struct vmci_queue **produce_q,
- u64 produce_size,
- struct vmci_queue **consume_q,
- u64 consume_size,
- u32 peer,
- u32 flags,
- u32 priv_flags,
- vmci_event_release_cb wakeup_cb,
- void *client_data)
- {
- struct vmci_handle new_handle;
- struct vmci_ctx *context;
- struct qp_broker_entry *entry;
- int result;
- bool swap;
- if (vmci_handle_is_invalid(*handle)) {
- new_handle = vmci_make_handle(
- VMCI_HOST_CONTEXT_ID, VMCI_INVALID_ID);
- } else
- new_handle = *handle;
- context = vmci_ctx_get(VMCI_HOST_CONTEXT_ID);
- entry = NULL;
- result =
- qp_broker_alloc(new_handle, peer, flags, priv_flags,
- produce_size, consume_size, NULL, context,
- wakeup_cb, client_data, &entry, &swap);
- if (result == VMCI_SUCCESS) {
- if (swap) {
- /*
- * If this is a local queue pair, the attacher
- * will swap around produce and consume
- * queues.
- */
- *produce_q = entry->consume_q;
- *consume_q = entry->produce_q;
- } else {
- *produce_q = entry->produce_q;
- *consume_q = entry->consume_q;
- }
- *handle = vmci_resource_handle(&entry->resource);
- } else {
- *handle = VMCI_INVALID_HANDLE;
- pr_devel("queue pair broker failed to alloc (result=%d)\n",
- result);
- }
- vmci_ctx_put(context);
- return result;
- }
- /*
- * Allocates a VMCI queue_pair. Only checks validity of input
- * arguments. The real work is done in the host or guest
- * specific function.
- */
- int vmci_qp_alloc(struct vmci_handle *handle,
- struct vmci_queue **produce_q,
- u64 produce_size,
- struct vmci_queue **consume_q,
- u64 consume_size,
- u32 peer,
- u32 flags,
- u32 priv_flags,
- bool guest_endpoint,
- vmci_event_release_cb wakeup_cb,
- void *client_data)
- {
- if (!handle || !produce_q || !consume_q ||
- (!produce_size && !consume_size) || (flags & ~VMCI_QP_ALL_FLAGS))
- return VMCI_ERROR_INVALID_ARGS;
- if (guest_endpoint) {
- return qp_alloc_guest_work(handle, produce_q,
- produce_size, consume_q,
- consume_size, peer,
- flags, priv_flags);
- } else {
- return qp_alloc_host_work(handle, produce_q,
- produce_size, consume_q,
- consume_size, peer, flags,
- priv_flags, wakeup_cb, client_data);
- }
- }
- /*
- * This function implements the host kernel API for detaching from
- * a queue pair.
- */
- static int qp_detatch_host_work(struct vmci_handle handle)
- {
- int result;
- struct vmci_ctx *context;
- context = vmci_ctx_get(VMCI_HOST_CONTEXT_ID);
- result = vmci_qp_broker_detach(handle, context);
- vmci_ctx_put(context);
- return result;
- }
- /*
- * Detaches from a VMCI queue_pair. Only checks validity of input argument.
- * Real work is done in the host or guest specific function.
- */
- static int qp_detatch(struct vmci_handle handle, bool guest_endpoint)
- {
- if (vmci_handle_is_invalid(handle))
- return VMCI_ERROR_INVALID_ARGS;
- if (guest_endpoint)
- return qp_detatch_guest_work(handle);
- else
- return qp_detatch_host_work(handle);
- }
- /*
- * Returns the entry from the head of the list. Assumes that the list is
- * locked.
- */
- static struct qp_entry *qp_list_get_head(struct qp_list *qp_list)
- {
- if (!list_empty(&qp_list->head)) {
- struct qp_entry *entry =
- list_first_entry(&qp_list->head, struct qp_entry,
- list_item);
- return entry;
- }
- return NULL;
- }
- void vmci_qp_broker_exit(void)
- {
- struct qp_entry *entry;
- struct qp_broker_entry *be;
- mutex_lock(&qp_broker_list.mutex);
- while ((entry = qp_list_get_head(&qp_broker_list))) {
- be = (struct qp_broker_entry *)entry;
- qp_list_remove_entry(&qp_broker_list, entry);
- kfree(be);
- }
- mutex_unlock(&qp_broker_list.mutex);
- }
- /*
- * Requests that a queue pair be allocated with the VMCI queue
- * pair broker. Allocates a queue pair entry if one does not
- * exist. Attaches to one if it exists, and retrieves the page
- * files backing that queue_pair. Assumes that the queue pair
- * broker lock is held.
- */
- int vmci_qp_broker_alloc(struct vmci_handle handle,
- u32 peer,
- u32 flags,
- u32 priv_flags,
- u64 produce_size,
- u64 consume_size,
- struct vmci_qp_page_store *page_store,
- struct vmci_ctx *context)
- {
- return qp_broker_alloc(handle, peer, flags, priv_flags,
- produce_size, consume_size,
- page_store, context, NULL, NULL, NULL, NULL);
- }
- /*
- * VMX'en with versions lower than VMCI_VERSION_NOVMVM use a separate
- * step to add the UVAs of the VMX mapping of the queue pair. This function
- * provides backwards compatibility with such VMX'en, and takes care of
- * registering the page store for a queue pair previously allocated by the
- * VMX during create or attach. This function will move the queue pair state
- * to either from VMCIQBP_CREATED_NO_MEM to VMCIQBP_CREATED_MEM or
- * VMCIQBP_ATTACHED_NO_MEM to VMCIQBP_ATTACHED_MEM. If moving to the
- * attached state with memory, the queue pair is ready to be used by the
- * host peer, and an attached event will be generated.
- *
- * Assumes that the queue pair broker lock is held.
- *
- * This function is only used by the hosted platform, since there is no
- * issue with backwards compatibility for vmkernel.
- */
- int vmci_qp_broker_set_page_store(struct vmci_handle handle,
- u64 produce_uva,
- u64 consume_uva,
- struct vmci_ctx *context)
- {
- struct qp_broker_entry *entry;
- int result;
- const u32 context_id = vmci_ctx_get_id(context);
- if (vmci_handle_is_invalid(handle) || !context ||
- context_id == VMCI_INVALID_ID)
- return VMCI_ERROR_INVALID_ARGS;
- /*
- * We only support guest to host queue pairs, so the VMX must
- * supply UVAs for the mapped page files.
- */
- if (produce_uva == 0 || consume_uva == 0)
- return VMCI_ERROR_INVALID_ARGS;
- mutex_lock(&qp_broker_list.mutex);
- if (!vmci_ctx_qp_exists(context, handle)) {
- pr_warn("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
- context_id, handle.context, handle.resource);
- result = VMCI_ERROR_NOT_FOUND;
- goto out;
- }
- entry = qp_broker_handle_to_entry(handle);
- if (!entry) {
- result = VMCI_ERROR_NOT_FOUND;
- goto out;
- }
- /*
- * If I'm the owner then I can set the page store.
- *
- * Or, if a host created the queue_pair and I'm the attached peer
- * then I can set the page store.
- */
- if (entry->create_id != context_id &&
- (entry->create_id != VMCI_HOST_CONTEXT_ID ||
- entry->attach_id != context_id)) {
- result = VMCI_ERROR_QUEUEPAIR_NOTOWNER;
- goto out;
- }
- if (entry->state != VMCIQPB_CREATED_NO_MEM &&
- entry->state != VMCIQPB_ATTACHED_NO_MEM) {
- result = VMCI_ERROR_UNAVAILABLE;
- goto out;
- }
- result = qp_host_get_user_memory(produce_uva, consume_uva,
- entry->produce_q, entry->consume_q);
- if (result < VMCI_SUCCESS)
- goto out;
- result = qp_host_map_queues(entry->produce_q, entry->consume_q);
- if (result < VMCI_SUCCESS) {
- qp_host_unregister_user_memory(entry->produce_q,
- entry->consume_q);
- goto out;
- }
- if (entry->state == VMCIQPB_CREATED_NO_MEM)
- entry->state = VMCIQPB_CREATED_MEM;
- else
- entry->state = VMCIQPB_ATTACHED_MEM;
- entry->vmci_page_files = true;
- if (entry->state == VMCIQPB_ATTACHED_MEM) {
- result =
- qp_notify_peer(true, handle, context_id, entry->create_id);
- if (result < VMCI_SUCCESS) {
- pr_warn("Failed to notify peer (ID=0x%x) of attach to queue pair (handle=0x%x:0x%x)\n",
- entry->create_id, entry->qp.handle.context,
- entry->qp.handle.resource);
- }
- }
- result = VMCI_SUCCESS;
- out:
- mutex_unlock(&qp_broker_list.mutex);
- return result;
- }
- /*
- * Resets saved queue headers for the given QP broker
- * entry. Should be used when guest memory becomes available
- * again, or the guest detaches.
- */
- static void qp_reset_saved_headers(struct qp_broker_entry *entry)
- {
- entry->produce_q->saved_header = NULL;
- entry->consume_q->saved_header = NULL;
- }
- /*
- * The main entry point for detaching from a queue pair registered with the
- * queue pair broker. If more than one endpoint is attached to the queue
- * pair, the first endpoint will mainly decrement a reference count and
- * generate a notification to its peer. The last endpoint will clean up
- * the queue pair state registered with the broker.
- *
- * When a guest endpoint detaches, it will unmap and unregister the guest
- * memory backing the queue pair. If the host is still attached, it will
- * no longer be able to access the queue pair content.
- *
- * If the queue pair is already in a state where there is no memory
- * registered for the queue pair (any *_NO_MEM state), it will transition to
- * the VMCIQPB_SHUTDOWN_NO_MEM state. This will also happen, if a guest
- * endpoint is the first of two endpoints to detach. If the host endpoint is
- * the first out of two to detach, the queue pair will move to the
- * VMCIQPB_SHUTDOWN_MEM state.
- */
- int vmci_qp_broker_detach(struct vmci_handle handle, struct vmci_ctx *context)
- {
- struct qp_broker_entry *entry;
- const u32 context_id = vmci_ctx_get_id(context);
- u32 peer_id;
- bool is_local = false;
- int result;
- if (vmci_handle_is_invalid(handle) || !context ||
- context_id == VMCI_INVALID_ID) {
- return VMCI_ERROR_INVALID_ARGS;
- }
- mutex_lock(&qp_broker_list.mutex);
- if (!vmci_ctx_qp_exists(context, handle)) {
- pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
- context_id, handle.context, handle.resource);
- result = VMCI_ERROR_NOT_FOUND;
- goto out;
- }
- entry = qp_broker_handle_to_entry(handle);
- if (!entry) {
- pr_devel("Context (ID=0x%x) reports being attached to queue pair(handle=0x%x:0x%x) that isn't present in broker\n",
- context_id, handle.context, handle.resource);
- result = VMCI_ERROR_NOT_FOUND;
- goto out;
- }
- if (context_id != entry->create_id && context_id != entry->attach_id) {
- result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
- goto out;
- }
- if (context_id == entry->create_id) {
- peer_id = entry->attach_id;
- entry->create_id = VMCI_INVALID_ID;
- } else {
- peer_id = entry->create_id;
- entry->attach_id = VMCI_INVALID_ID;
- }
- entry->qp.ref_count--;
- is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL;
- if (context_id != VMCI_HOST_CONTEXT_ID) {
- bool headers_mapped;
- /*
- * Pre NOVMVM vmx'en may detach from a queue pair
- * before setting the page store, and in that case
- * there is no user memory to detach from. Also, more
- * recent VMX'en may detach from a queue pair in the
- * quiesced state.
- */
- qp_acquire_queue_mutex(entry->produce_q);
- headers_mapped = entry->produce_q->q_header ||
- entry->consume_q->q_header;
- if (QPBROKERSTATE_HAS_MEM(entry)) {
- result =
- qp_host_unmap_queues(INVALID_VMCI_GUEST_MEM_ID,
- entry->produce_q,
- entry->consume_q);
- if (result < VMCI_SUCCESS)
- pr_warn("Failed to unmap queue headers for queue pair (handle=0x%x:0x%x,result=%d)\n",
- handle.context, handle.resource,
- result);
- if (entry->vmci_page_files)
- qp_host_unregister_user_memory(entry->produce_q,
- entry->
- consume_q);
- else
- qp_host_unregister_user_memory(entry->produce_q,
- entry->
- consume_q);
- }
- if (!headers_mapped)
- qp_reset_saved_headers(entry);
- qp_release_queue_mutex(entry->produce_q);
- if (!headers_mapped && entry->wakeup_cb)
- entry->wakeup_cb(entry->client_data);
- } else {
- if (entry->wakeup_cb) {
- entry->wakeup_cb = NULL;
- entry->client_data = NULL;
- }
- }
- if (entry->qp.ref_count == 0) {
- qp_list_remove_entry(&qp_broker_list, &entry->qp);
- if (is_local)
- kfree(entry->local_mem);
- qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q);
- qp_host_free_queue(entry->produce_q, entry->qp.produce_size);
- qp_host_free_queue(entry->consume_q, entry->qp.consume_size);
- /* Unlink from resource hash table and free callback */
- vmci_resource_remove(&entry->resource);
- kfree(entry);
- vmci_ctx_qp_destroy(context, handle);
- } else {
- qp_notify_peer(false, handle, context_id, peer_id);
- if (context_id == VMCI_HOST_CONTEXT_ID &&
- QPBROKERSTATE_HAS_MEM(entry)) {
- entry->state = VMCIQPB_SHUTDOWN_MEM;
- } else {
- entry->state = VMCIQPB_SHUTDOWN_NO_MEM;
- }
- if (!is_local)
- vmci_ctx_qp_destroy(context, handle);
- }
- result = VMCI_SUCCESS;
- out:
- mutex_unlock(&qp_broker_list.mutex);
- return result;
- }
- /*
- * Establishes the necessary mappings for a queue pair given a
- * reference to the queue pair guest memory. This is usually
- * called when a guest is unquiesced and the VMX is allowed to
- * map guest memory once again.
- */
- int vmci_qp_broker_map(struct vmci_handle handle,
- struct vmci_ctx *context,
- u64 guest_mem)
- {
- struct qp_broker_entry *entry;
- const u32 context_id = vmci_ctx_get_id(context);
- bool is_local = false;
- int result;
- if (vmci_handle_is_invalid(handle) || !context ||
- context_id == VMCI_INVALID_ID)
- return VMCI_ERROR_INVALID_ARGS;
- mutex_lock(&qp_broker_list.mutex);
- if (!vmci_ctx_qp_exists(context, handle)) {
- pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
- context_id, handle.context, handle.resource);
- result = VMCI_ERROR_NOT_FOUND;
- goto out;
- }
- entry = qp_broker_handle_to_entry(handle);
- if (!entry) {
- pr_devel("Context (ID=0x%x) reports being attached to queue pair (handle=0x%x:0x%x) that isn't present in broker\n",
- context_id, handle.context, handle.resource);
- result = VMCI_ERROR_NOT_FOUND;
- goto out;
- }
- if (context_id != entry->create_id && context_id != entry->attach_id) {
- result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
- goto out;
- }
- is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL;
- result = VMCI_SUCCESS;
- if (context_id != VMCI_HOST_CONTEXT_ID) {
- struct vmci_qp_page_store page_store;
- page_store.pages = guest_mem;
- page_store.len = QPE_NUM_PAGES(entry->qp);
- qp_acquire_queue_mutex(entry->produce_q);
- qp_reset_saved_headers(entry);
- result =
- qp_host_register_user_memory(&page_store,
- entry->produce_q,
- entry->consume_q);
- qp_release_queue_mutex(entry->produce_q);
- if (result == VMCI_SUCCESS) {
- /* Move state from *_NO_MEM to *_MEM */
- entry->state++;
- if (entry->wakeup_cb)
- entry->wakeup_cb(entry->client_data);
- }
- }
- out:
- mutex_unlock(&qp_broker_list.mutex);
- return result;
- }
- /*
- * Saves a snapshot of the queue headers for the given QP broker
- * entry. Should be used when guest memory is unmapped.
- * Results:
- * VMCI_SUCCESS on success, appropriate error code if guest memory
- * can't be accessed..
- */
- static int qp_save_headers(struct qp_broker_entry *entry)
- {
- int result;
- if (entry->produce_q->saved_header != NULL &&
- entry->consume_q->saved_header != NULL) {
- /*
- * If the headers have already been saved, we don't need to do
- * it again, and we don't want to map in the headers
- * unnecessarily.
- */
- return VMCI_SUCCESS;
- }
- if (NULL == entry->produce_q->q_header ||
- NULL == entry->consume_q->q_header) {
- result = qp_host_map_queues(entry->produce_q, entry->consume_q);
- if (result < VMCI_SUCCESS)
- return result;
- }
- memcpy(&entry->saved_produce_q, entry->produce_q->q_header,
- sizeof(entry->saved_produce_q));
- entry->produce_q->saved_header = &entry->saved_produce_q;
- memcpy(&entry->saved_consume_q, entry->consume_q->q_header,
- sizeof(entry->saved_consume_q));
- entry->consume_q->saved_header = &entry->saved_consume_q;
- return VMCI_SUCCESS;
- }
- /*
- * Removes all references to the guest memory of a given queue pair, and
- * will move the queue pair from state *_MEM to *_NO_MEM. It is usually
- * called when a VM is being quiesced where access to guest memory should
- * avoided.
- */
- int vmci_qp_broker_unmap(struct vmci_handle handle,
- struct vmci_ctx *context,
- u32 gid)
- {
- struct qp_broker_entry *entry;
- const u32 context_id = vmci_ctx_get_id(context);
- bool is_local = false;
- int result;
- if (vmci_handle_is_invalid(handle) || !context ||
- context_id == VMCI_INVALID_ID)
- return VMCI_ERROR_INVALID_ARGS;
- mutex_lock(&qp_broker_list.mutex);
- if (!vmci_ctx_qp_exists(context, handle)) {
- pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
- context_id, handle.context, handle.resource);
- result = VMCI_ERROR_NOT_FOUND;
- goto out;
- }
- entry = qp_broker_handle_to_entry(handle);
- if (!entry) {
- pr_devel("Context (ID=0x%x) reports being attached to queue pair (handle=0x%x:0x%x) that isn't present in broker\n",
- context_id, handle.context, handle.resource);
- result = VMCI_ERROR_NOT_FOUND;
- goto out;
- }
- if (context_id != entry->create_id && context_id != entry->attach_id) {
- result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
- goto out;
- }
- is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL;
- if (context_id != VMCI_HOST_CONTEXT_ID) {
- qp_acquire_queue_mutex(entry->produce_q);
- result = qp_save_headers(entry);
- if (result < VMCI_SUCCESS)
- pr_warn("Failed to save queue headers for queue pair (handle=0x%x:0x%x,result=%d)\n",
- handle.context, handle.resource, result);
- qp_host_unmap_queues(gid, entry->produce_q, entry->consume_q);
- /*
- * On hosted, when we unmap queue pairs, the VMX will also
- * unmap the guest memory, so we invalidate the previously
- * registered memory. If the queue pair is mapped again at a
- * later point in time, we will need to reregister the user
- * memory with a possibly new user VA.
- */
- qp_host_unregister_user_memory(entry->produce_q,
- entry->consume_q);
- /*
- * Move state from *_MEM to *_NO_MEM.
- */
- entry->state--;
- qp_release_queue_mutex(entry->produce_q);
- }
- result = VMCI_SUCCESS;
- out:
- mutex_unlock(&qp_broker_list.mutex);
- return result;
- }
- /*
- * Destroys all guest queue pair endpoints. If active guest queue
- * pairs still exist, hypercalls to attempt detach from these
- * queue pairs will be made. Any failure to detach is silently
- * ignored.
- */
- void vmci_qp_guest_endpoints_exit(void)
- {
- struct qp_entry *entry;
- struct qp_guest_endpoint *ep;
- mutex_lock(&qp_guest_endpoints.mutex);
- while ((entry = qp_list_get_head(&qp_guest_endpoints))) {
- ep = (struct qp_guest_endpoint *)entry;
- /* Don't make a hypercall for local queue_pairs. */
- if (!(entry->flags & VMCI_QPFLAG_LOCAL))
- qp_detatch_hypercall(entry->handle);
- /* We cannot fail the exit, so let's reset ref_count. */
- entry->ref_count = 0;
- qp_list_remove_entry(&qp_guest_endpoints, entry);
- qp_guest_endpoint_destroy(ep);
- }
- mutex_unlock(&qp_guest_endpoints.mutex);
- }
- /*
- * Helper routine that will lock the queue pair before subsequent
- * operations.
- * Note: Non-blocking on the host side is currently only implemented in ESX.
- * Since non-blocking isn't yet implemented on the host personality we
- * have no reason to acquire a spin lock. So to avoid the use of an
- * unnecessary lock only acquire the mutex if we can block.
- */
- static void qp_lock(const struct vmci_qp *qpair)
- {
- qp_acquire_queue_mutex(qpair->produce_q);
- }
- /*
- * Helper routine that unlocks the queue pair after calling
- * qp_lock.
- */
- static void qp_unlock(const struct vmci_qp *qpair)
- {
- qp_release_queue_mutex(qpair->produce_q);
- }
- /*
- * The queue headers may not be mapped at all times. If a queue is
- * currently not mapped, it will be attempted to do so.
- */
- static int qp_map_queue_headers(struct vmci_queue *produce_q,
- struct vmci_queue *consume_q)
- {
- int result;
- if (NULL == produce_q->q_header || NULL == consume_q->q_header) {
- result = qp_host_map_queues(produce_q, consume_q);
- if (result < VMCI_SUCCESS)
- return (produce_q->saved_header &&
- consume_q->saved_header) ?
- VMCI_ERROR_QUEUEPAIR_NOT_READY :
- VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
- }
- return VMCI_SUCCESS;
- }
- /*
- * Helper routine that will retrieve the produce and consume
- * headers of a given queue pair. If the guest memory of the
- * queue pair is currently not available, the saved queue headers
- * will be returned, if these are available.
- */
- static int qp_get_queue_headers(const struct vmci_qp *qpair,
- struct vmci_queue_header **produce_q_header,
- struct vmci_queue_header **consume_q_header)
- {
- int result;
- result = qp_map_queue_headers(qpair->produce_q, qpair->consume_q);
- if (result == VMCI_SUCCESS) {
- *produce_q_header = qpair->produce_q->q_header;
- *consume_q_header = qpair->consume_q->q_header;
- } else if (qpair->produce_q->saved_header &&
- qpair->consume_q->saved_header) {
- *produce_q_header = qpair->produce_q->saved_header;
- *consume_q_header = qpair->consume_q->saved_header;
- result = VMCI_SUCCESS;
- }
- return result;
- }
- /*
- * Callback from VMCI queue pair broker indicating that a queue
- * pair that was previously not ready, now either is ready or
- * gone forever.
- */
- static int qp_wakeup_cb(void *client_data)
- {
- struct vmci_qp *qpair = (struct vmci_qp *)client_data;
- qp_lock(qpair);
- while (qpair->blocked > 0) {
- qpair->blocked--;
- qpair->generation++;
- wake_up(&qpair->event);
- }
- qp_unlock(qpair);
- return VMCI_SUCCESS;
- }
- /*
- * Makes the calling thread wait for the queue pair to become
- * ready for host side access. Returns true when thread is
- * woken up after queue pair state change, false otherwise.
- */
- static bool qp_wait_for_ready_queue(struct vmci_qp *qpair)
- {
- unsigned int generation;
- qpair->blocked++;
- generation = qpair->generation;
- qp_unlock(qpair);
- wait_event(qpair->event, generation != qpair->generation);
- qp_lock(qpair);
- return true;
- }
- /*
- * Enqueues a given buffer to the produce queue using the provided
- * function. As many bytes as possible (space available in the queue)
- * are enqueued. Assumes the queue->mutex has been acquired. Returns
- * VMCI_ERROR_QUEUEPAIR_NOSPACE if no space was available to enqueue
- * data, VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the
- * queue (as defined by the queue size), VMCI_ERROR_INVALID_ARGS, if
- * an error occured when accessing the buffer,
- * VMCI_ERROR_QUEUEPAIR_NOTATTACHED, if the queue pair pages aren't
- * available. Otherwise, the number of bytes written to the queue is
- * returned. Updates the tail pointer of the produce queue.
- */
- static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q,
- struct vmci_queue *consume_q,
- const u64 produce_q_size,
- const void *buf,
- size_t buf_size,
- vmci_memcpy_to_queue_func memcpy_to_queue)
- {
- s64 free_space;
- u64 tail;
- size_t written;
- ssize_t result;
- result = qp_map_queue_headers(produce_q, consume_q);
- if (unlikely(result != VMCI_SUCCESS))
- return result;
- free_space = vmci_q_header_free_space(produce_q->q_header,
- consume_q->q_header,
- produce_q_size);
- if (free_space == 0)
- return VMCI_ERROR_QUEUEPAIR_NOSPACE;
- if (free_space < VMCI_SUCCESS)
- return (ssize_t) free_space;
- written = (size_t) (free_space > buf_size ? buf_size : free_space);
- tail = vmci_q_header_producer_tail(produce_q->q_header);
- if (likely(tail + written < produce_q_size)) {
- result = memcpy_to_queue(produce_q, tail, buf, 0, written);
- } else {
- /* Tail pointer wraps around. */
- const size_t tmp = (size_t) (produce_q_size - tail);
- result = memcpy_to_queue(produce_q, tail, buf, 0, tmp);
- if (result >= VMCI_SUCCESS)
- result = memcpy_to_queue(produce_q, 0, buf, tmp,
- written - tmp);
- }
- if (result < VMCI_SUCCESS)
- return result;
- vmci_q_header_add_producer_tail(produce_q->q_header, written,
- produce_q_size);
- return written;
- }
- /*
- * Dequeues data (if available) from the given consume queue. Writes data
- * to the user provided buffer using the provided function.
- * Assumes the queue->mutex has been acquired.
- * Results:
- * VMCI_ERROR_QUEUEPAIR_NODATA if no data was available to dequeue.
- * VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the queue
- * (as defined by the queue size).
- * VMCI_ERROR_INVALID_ARGS, if an error occured when accessing the buffer.
- * Otherwise the number of bytes dequeued is returned.
- * Side effects:
- * Updates the head pointer of the consume queue.
- */
- static ssize_t qp_dequeue_locked(struct vmci_queue *produce_q,
- struct vmci_queue *consume_q,
- const u64 consume_q_size,
- void *buf,
- size_t buf_size,
- vmci_memcpy_from_queue_func memcpy_from_queue,
- bool update_consumer)
- {
- s64 buf_ready;
- u64 head;
- size_t read;
- ssize_t result;
- result = qp_map_queue_headers(produce_q, consume_q);
- if (unlikely(result != VMCI_SUCCESS))
- return result;
- buf_ready = vmci_q_header_buf_ready(consume_q->q_header,
- produce_q->q_header,
- consume_q_size);
- if (buf_ready == 0)
- return VMCI_ERROR_QUEUEPAIR_NODATA;
- if (buf_ready < VMCI_SUCCESS)
- return (ssize_t) buf_ready;
- read = (size_t) (buf_ready > buf_size ? buf_size : buf_ready);
- head = vmci_q_header_consumer_head(produce_q->q_header);
- if (likely(head + read < consume_q_size)) {
- result = memcpy_from_queue(buf, 0, consume_q, head, read);
- } else {
- /* Head pointer wraps around. */
- const size_t tmp = (size_t) (consume_q_size - head);
- result = memcpy_from_queue(buf, 0, consume_q, head, tmp);
- if (result >= VMCI_SUCCESS)
- result = memcpy_from_queue(buf, tmp, consume_q, 0,
- read - tmp);
- }
- if (result < VMCI_SUCCESS)
- return result;
- if (update_consumer)
- vmci_q_header_add_consumer_head(produce_q->q_header,
- read, consume_q_size);
- return read;
- }
- /*
- * vmci_qpair_alloc() - Allocates a queue pair.
- * @qpair: Pointer for the new vmci_qp struct.
- * @handle: Handle to track the resource.
- * @produce_qsize: Desired size of the producer queue.
- * @consume_qsize: Desired size of the consumer queue.
- * @peer: ContextID of the peer.
- * @flags: VMCI flags.
- * @priv_flags: VMCI priviledge flags.
- *
- * This is the client interface for allocating the memory for a
- * vmci_qp structure and then attaching to the underlying
- * queue. If an error occurs allocating the memory for the
- * vmci_qp structure no attempt is made to attach. If an
- * error occurs attaching, then the structure is freed.
- */
- int vmci_qpair_alloc(struct vmci_qp **qpair,
- struct vmci_handle *handle,
- u64 produce_qsize,
- u64 consume_qsize,
- u32 peer,
- u32 flags,
- u32 priv_flags)
- {
- struct vmci_qp *my_qpair;
- int retval;
- struct vmci_handle src = VMCI_INVALID_HANDLE;
- struct vmci_handle dst = vmci_make_handle(peer, VMCI_INVALID_ID);
- enum vmci_route route;
- vmci_event_release_cb wakeup_cb;
- void *client_data;
- /*
- * Restrict the size of a queuepair. The device already
- * enforces a limit on the total amount of memory that can be
- * allocated to queuepairs for a guest. However, we try to
- * allocate this memory before we make the queuepair
- * allocation hypercall. On Linux, we allocate each page
- * separately, which means rather than fail, the guest will
- * thrash while it tries to allocate, and will become
- * increasingly unresponsive to the point where it appears to
- * be hung. So we place a limit on the size of an individual
- * queuepair here, and leave the device to enforce the
- * restriction on total queuepair memory. (Note that this
- * doesn't prevent all cases; a user with only this much
- * physical memory could still get into trouble.) The error
- * used by the device is NO_RESOURCES, so use that here too.
- */
- if (produce_qsize + consume_qsize < max(produce_qsize, consume_qsize) ||
- produce_qsize + consume_qsize > VMCI_MAX_GUEST_QP_MEMORY)
- return VMCI_ERROR_NO_RESOURCES;
- retval = vmci_route(&src, &dst, false, &route);
- if (retval < VMCI_SUCCESS)
- route = vmci_guest_code_active() ?
- VMCI_ROUTE_AS_GUEST : VMCI_ROUTE_AS_HOST;
- if (flags & (VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED)) {
- pr_devel("NONBLOCK OR PINNED set");
- return VMCI_ERROR_INVALID_ARGS;
- }
- my_qpair = kzalloc(sizeof(*my_qpair), GFP_KERNEL);
- if (!my_qpair)
- return VMCI_ERROR_NO_MEM;
- my_qpair->produce_q_size = produce_qsize;
- my_qpair->consume_q_size = consume_qsize;
- my_qpair->peer = peer;
- my_qpair->flags = flags;
- my_qpair->priv_flags = priv_flags;
- wakeup_cb = NULL;
- client_data = NULL;
- if (VMCI_ROUTE_AS_HOST == route) {
- my_qpair->guest_endpoint = false;
- if (!(flags & VMCI_QPFLAG_LOCAL)) {
- my_qpair->blocked = 0;
- my_qpair->generation = 0;
- init_waitqueue_head(&my_qpair->event);
- wakeup_cb = qp_wakeup_cb;
- client_data = (void *)my_qpair;
- }
- } else {
- my_qpair->guest_endpoint = true;
- }
- retval = vmci_qp_alloc(handle,
- &my_qpair->produce_q,
- my_qpair->produce_q_size,
- &my_qpair->consume_q,
- my_qpair->consume_q_size,
- my_qpair->peer,
- my_qpair->flags,
- my_qpair->priv_flags,
- my_qpair->guest_endpoint,
- wakeup_cb, client_data);
- if (retval < VMCI_SUCCESS) {
- kfree(my_qpair);
- return retval;
- }
- *qpair = my_qpair;
- my_qpair->handle = *handle;
- return retval;
- }
- EXPORT_SYMBOL_GPL(vmci_qpair_alloc);
- /*
- * vmci_qpair_detach() - Detatches the client from a queue pair.
- * @qpair: Reference of a pointer to the qpair struct.
- *
- * This is the client interface for detaching from a VMCIQPair.
- * Note that this routine will free the memory allocated for the
- * vmci_qp structure too.
- */
- int vmci_qpair_detach(struct vmci_qp **qpair)
- {
- int result;
- struct vmci_qp *old_qpair;
- if (!qpair || !(*qpair))
- return VMCI_ERROR_INVALID_ARGS;
- old_qpair = *qpair;
- result = qp_detatch(old_qpair->handle, old_qpair->guest_endpoint);
- /*
- * The guest can fail to detach for a number of reasons, and
- * if it does so, it will cleanup the entry (if there is one).
- * The host can fail too, but it won't cleanup the entry
- * immediately, it will do that later when the context is
- * freed. Either way, we need to release the qpair struct
- * here; there isn't much the caller can do, and we don't want
- * to leak.
- */
- memset(old_qpair, 0, sizeof(*old_qpair));
- old_qpair->handle = VMCI_INVALID_HANDLE;
- old_qpair->peer = VMCI_INVALID_ID;
- kfree(old_qpair);
- *qpair = NULL;
- return result;
- }
- EXPORT_SYMBOL_GPL(vmci_qpair_detach);
- /*
- * vmci_qpair_get_produce_indexes() - Retrieves the indexes of the producer.
- * @qpair: Pointer to the queue pair struct.
- * @producer_tail: Reference used for storing producer tail index.
- * @consumer_head: Reference used for storing the consumer head index.
- *
- * This is the client interface for getting the current indexes of the
- * QPair from the point of the view of the caller as the producer.
- */
- int vmci_qpair_get_produce_indexes(const struct vmci_qp *qpair,
- u64 *producer_tail,
- u64 *consumer_head)
- {
- struct vmci_queue_header *produce_q_header;
- struct vmci_queue_header *consume_q_header;
- int result;
- if (!qpair)
- return VMCI_ERROR_INVALID_ARGS;
- qp_lock(qpair);
- result =
- qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
- if (result == VMCI_SUCCESS)
- vmci_q_header_get_pointers(produce_q_header, consume_q_header,
- producer_tail, consumer_head);
- qp_unlock(qpair);
- if (result == VMCI_SUCCESS &&
- ((producer_tail && *producer_tail >= qpair->produce_q_size) ||
- (consumer_head && *consumer_head >= qpair->produce_q_size)))
- return VMCI_ERROR_INVALID_SIZE;
- return result;
- }
- EXPORT_SYMBOL_GPL(vmci_qpair_get_produce_indexes);
- /*
- * vmci_qpair_get_consume_indexes() - Retrieves the indexes of the comsumer.
- * @qpair: Pointer to the queue pair struct.
- * @consumer_tail: Reference used for storing consumer tail index.
- * @producer_head: Reference used for storing the producer head index.
- *
- * This is the client interface for getting the current indexes of the
- * QPair from the point of the view of the caller as the consumer.
- */
- int vmci_qpair_get_consume_indexes(const struct vmci_qp *qpair,
- u64 *consumer_tail,
- u64 *producer_head)
- {
- struct vmci_queue_header *produce_q_header;
- struct vmci_queue_header *consume_q_header;
- int result;
- if (!qpair)
- return VMCI_ERROR_INVALID_ARGS;
- qp_lock(qpair);
- result =
- qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
- if (result == VMCI_SUCCESS)
- vmci_q_header_get_pointers(consume_q_header, produce_q_header,
- consumer_tail, producer_head);
- qp_unlock(qpair);
- if (result == VMCI_SUCCESS &&
- ((consumer_tail && *consumer_tail >= qpair->consume_q_size) ||
- (producer_head && *producer_head >= qpair->consume_q_size)))
- return VMCI_ERROR_INVALID_SIZE;
- return result;
- }
- EXPORT_SYMBOL_GPL(vmci_qpair_get_consume_indexes);
- /*
- * vmci_qpair_produce_free_space() - Retrieves free space in producer queue.
- * @qpair: Pointer to the queue pair struct.
- *
- * This is the client interface for getting the amount of free
- * space in the QPair from the point of the view of the caller as
- * the producer which is the common case. Returns < 0 if err, else
- * available bytes into which data can be enqueued if > 0.
- */
- s64 vmci_qpair_produce_free_space(const struct vmci_qp *qpair)
- {
- struct vmci_queue_header *produce_q_header;
- struct vmci_queue_header *consume_q_header;
- s64 result;
- if (!qpair)
- return VMCI_ERROR_INVALID_ARGS;
- qp_lock(qpair);
- result =
- qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
- if (result == VMCI_SUCCESS)
- result = vmci_q_header_free_space(produce_q_header,
- consume_q_header,
- qpair->produce_q_size);
- else
- result = 0;
- qp_unlock(qpair);
- return result;
- }
- EXPORT_SYMBOL_GPL(vmci_qpair_produce_free_space);
- /*
- * vmci_qpair_consume_free_space() - Retrieves free space in consumer queue.
- * @qpair: Pointer to the queue pair struct.
- *
- * This is the client interface for getting the amount of free
- * space in the QPair from the point of the view of the caller as
- * the consumer which is not the common case. Returns < 0 if err, else
- * available bytes into which data can be enqueued if > 0.
- */
- s64 vmci_qpair_consume_free_space(const struct vmci_qp *qpair)
- {
- struct vmci_queue_header *produce_q_header;
- struct vmci_queue_header *consume_q_header;
- s64 result;
- if (!qpair)
- return VMCI_ERROR_INVALID_ARGS;
- qp_lock(qpair);
- result =
- qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
- if (result == VMCI_SUCCESS)
- result = vmci_q_header_free_space(consume_q_header,
- produce_q_header,
- qpair->consume_q_size);
- else
- result = 0;
- qp_unlock(qpair);
- return result;
- }
- EXPORT_SYMBOL_GPL(vmci_qpair_consume_free_space);
- /*
- * vmci_qpair_produce_buf_ready() - Gets bytes ready to read from
- * producer queue.
- * @qpair: Pointer to the queue pair struct.
- *
- * This is the client interface for getting the amount of
- * enqueued data in the QPair from the point of the view of the
- * caller as the producer which is not the common case. Returns < 0 if err,
- * else available bytes that may be read.
- */
- s64 vmci_qpair_produce_buf_ready(const struct vmci_qp *qpair)
- {
- struct vmci_queue_header *produce_q_header;
- struct vmci_queue_header *consume_q_header;
- s64 result;
- if (!qpair)
- return VMCI_ERROR_INVALID_ARGS;
- qp_lock(qpair);
- result =
- qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
- if (result == VMCI_SUCCESS)
- result = vmci_q_header_buf_ready(produce_q_header,
- consume_q_header,
- qpair->produce_q_size);
- else
- result = 0;
- qp_unlock(qpair);
- return result;
- }
- EXPORT_SYMBOL_GPL(vmci_qpair_produce_buf_ready);
- /*
- * vmci_qpair_consume_buf_ready() - Gets bytes ready to read from
- * consumer queue.
- * @qpair: Pointer to the queue pair struct.
- *
- * This is the client interface for getting the amount of
- * enqueued data in the QPair from the point of the view of the
- * caller as the consumer which is the normal case. Returns < 0 if err,
- * else available bytes that may be read.
- */
- s64 vmci_qpair_consume_buf_ready(const struct vmci_qp *qpair)
- {
- struct vmci_queue_header *produce_q_header;
- struct vmci_queue_header *consume_q_header;
- s64 result;
- if (!qpair)
- return VMCI_ERROR_INVALID_ARGS;
- qp_lock(qpair);
- result =
- qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
- if (result == VMCI_SUCCESS)
- result = vmci_q_header_buf_ready(consume_q_header,
- produce_q_header,
- qpair->consume_q_size);
- else
- result = 0;
- qp_unlock(qpair);
- return result;
- }
- EXPORT_SYMBOL_GPL(vmci_qpair_consume_buf_ready);
- /*
- * vmci_qpair_enqueue() - Throw data on the queue.
- * @qpair: Pointer to the queue pair struct.
- * @buf: Pointer to buffer containing data
- * @buf_size: Length of buffer.
- * @buf_type: Buffer type (Unused).
- *
- * This is the client interface for enqueueing data into the queue.
- * Returns number of bytes enqueued or < 0 on error.
- */
- ssize_t vmci_qpair_enqueue(struct vmci_qp *qpair,
- const void *buf,
- size_t buf_size,
- int buf_type)
- {
- ssize_t result;
- if (!qpair || !buf)
- return VMCI_ERROR_INVALID_ARGS;
- qp_lock(qpair);
- do {
- result = qp_enqueue_locked(qpair->produce_q,
- qpair->consume_q,
- qpair->produce_q_size,
- buf, buf_size,
- qp_memcpy_to_queue);
- if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
- !qp_wait_for_ready_queue(qpair))
- result = VMCI_ERROR_WOULD_BLOCK;
- } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
- qp_unlock(qpair);
- return result;
- }
- EXPORT_SYMBOL_GPL(vmci_qpair_enqueue);
- /*
- * vmci_qpair_dequeue() - Get data from the queue.
- * @qpair: Pointer to the queue pair struct.
- * @buf: Pointer to buffer for the data
- * @buf_size: Length of buffer.
- * @buf_type: Buffer type (Unused).
- *
- * This is the client interface for dequeueing data from the queue.
- * Returns number of bytes dequeued or < 0 on error.
- */
- ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair,
- void *buf,
- size_t buf_size,
- int buf_type)
- {
- ssize_t result;
- if (!qpair || !buf)
- return VMCI_ERROR_INVALID_ARGS;
- qp_lock(qpair);
- do {
- result = qp_dequeue_locked(qpair->produce_q,
- qpair->consume_q,
- qpair->consume_q_size,
- buf, buf_size,
- qp_memcpy_from_queue, true);
- if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
- !qp_wait_for_ready_queue(qpair))
- result = VMCI_ERROR_WOULD_BLOCK;
- } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
- qp_unlock(qpair);
- return result;
- }
- EXPORT_SYMBOL_GPL(vmci_qpair_dequeue);
- /*
- * vmci_qpair_peek() - Peek at the data in the queue.
- * @qpair: Pointer to the queue pair struct.
- * @buf: Pointer to buffer for the data
- * @buf_size: Length of buffer.
- * @buf_type: Buffer type (Unused on Linux).
- *
- * This is the client interface for peeking into a queue. (I.e.,
- * copy data from the queue without updating the head pointer.)
- * Returns number of bytes dequeued or < 0 on error.
- */
- ssize_t vmci_qpair_peek(struct vmci_qp *qpair,
- void *buf,
- size_t buf_size,
- int buf_type)
- {
- ssize_t result;
- if (!qpair || !buf)
- return VMCI_ERROR_INVALID_ARGS;
- qp_lock(qpair);
- do {
- result = qp_dequeue_locked(qpair->produce_q,
- qpair->consume_q,
- qpair->consume_q_size,
- buf, buf_size,
- qp_memcpy_from_queue, false);
- if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
- !qp_wait_for_ready_queue(qpair))
- result = VMCI_ERROR_WOULD_BLOCK;
- } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
- qp_unlock(qpair);
- return result;
- }
- EXPORT_SYMBOL_GPL(vmci_qpair_peek);
- /*
- * vmci_qpair_enquev() - Throw data on the queue using iov.
- * @qpair: Pointer to the queue pair struct.
- * @iov: Pointer to buffer containing data
- * @iov_size: Length of buffer.
- * @buf_type: Buffer type (Unused).
- *
- * This is the client interface for enqueueing data into the queue.
- * This function uses IO vectors to handle the work. Returns number
- * of bytes enqueued or < 0 on error.
- */
- ssize_t vmci_qpair_enquev(struct vmci_qp *qpair,
- struct msghdr *msg,
- size_t iov_size,
- int buf_type)
- {
- ssize_t result;
- if (!qpair)
- return VMCI_ERROR_INVALID_ARGS;
- qp_lock(qpair);
- do {
- result = qp_enqueue_locked(qpair->produce_q,
- qpair->consume_q,
- qpair->produce_q_size,
- msg, iov_size,
- qp_memcpy_to_queue_iov);
- if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
- !qp_wait_for_ready_queue(qpair))
- result = VMCI_ERROR_WOULD_BLOCK;
- } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
- qp_unlock(qpair);
- return result;
- }
- EXPORT_SYMBOL_GPL(vmci_qpair_enquev);
- /*
- * vmci_qpair_dequev() - Get data from the queue using iov.
- * @qpair: Pointer to the queue pair struct.
- * @iov: Pointer to buffer for the data
- * @iov_size: Length of buffer.
- * @buf_type: Buffer type (Unused).
- *
- * This is the client interface for dequeueing data from the queue.
- * This function uses IO vectors to handle the work. Returns number
- * of bytes dequeued or < 0 on error.
- */
- ssize_t vmci_qpair_dequev(struct vmci_qp *qpair,
- struct msghdr *msg,
- size_t iov_size,
- int buf_type)
- {
- ssize_t result;
- if (!qpair)
- return VMCI_ERROR_INVALID_ARGS;
- qp_lock(qpair);
- do {
- result = qp_dequeue_locked(qpair->produce_q,
- qpair->consume_q,
- qpair->consume_q_size,
- msg, iov_size,
- qp_memcpy_from_queue_iov,
- true);
- if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
- !qp_wait_for_ready_queue(qpair))
- result = VMCI_ERROR_WOULD_BLOCK;
- } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
- qp_unlock(qpair);
- return result;
- }
- EXPORT_SYMBOL_GPL(vmci_qpair_dequev);
- /*
- * vmci_qpair_peekv() - Peek at the data in the queue using iov.
- * @qpair: Pointer to the queue pair struct.
- * @iov: Pointer to buffer for the data
- * @iov_size: Length of buffer.
- * @buf_type: Buffer type (Unused on Linux).
- *
- * This is the client interface for peeking into a queue. (I.e.,
- * copy data from the queue without updating the head pointer.)
- * This function uses IO vectors to handle the work. Returns number
- * of bytes peeked or < 0 on error.
- */
- ssize_t vmci_qpair_peekv(struct vmci_qp *qpair,
- struct msghdr *msg,
- size_t iov_size,
- int buf_type)
- {
- ssize_t result;
- if (!qpair)
- return VMCI_ERROR_INVALID_ARGS;
- qp_lock(qpair);
- do {
- result = qp_dequeue_locked(qpair->produce_q,
- qpair->consume_q,
- qpair->consume_q_size,
- msg, iov_size,
- qp_memcpy_from_queue_iov,
- false);
- if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
- !qp_wait_for_ready_queue(qpair))
- result = VMCI_ERROR_WOULD_BLOCK;
- } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
- qp_unlock(qpair);
- return result;
- }
- EXPORT_SYMBOL_GPL(vmci_qpair_peekv);
|