1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143 |
- \input texinfo @c -*-texinfo-*-
- @c %**start of header
- @setfilename libgomp.info
- @settitle GNU libgomp
- @c %**end of header
- @copying
- Copyright @copyright{} 2006-2015 Free Software Foundation, Inc.
- Permission is granted to copy, distribute and/or modify this document
- under the terms of the GNU Free Documentation License, Version 1.3 or
- any later version published by the Free Software Foundation; with the
- Invariant Sections being ``Funding Free Software'', the Front-Cover
- texts being (a) (see below), and with the Back-Cover Texts being (b)
- (see below). A copy of the license is included in the section entitled
- ``GNU Free Documentation License''.
- (a) The FSF's Front-Cover Text is:
- A GNU Manual
- (b) The FSF's Back-Cover Text is:
- You have freedom to copy and modify this GNU Manual, like GNU
- software. Copies published by the Free Software Foundation raise
- funds for GNU development.
- @end copying
- @ifinfo
- @dircategory GNU Libraries
- @direntry
- * libgomp: (libgomp). GNU Offloading and Multi Processing Runtime Library.
- @end direntry
- This manual documents libgomp, the GNU Offloading and Multi Processing
- Runtime library. This is the GNU implementation of the OpenMP and
- OpenACC APIs for parallel and accelerator programming in C/C++ and
- Fortran.
- Published by the Free Software Foundation
- 51 Franklin Street, Fifth Floor
- Boston, MA 02110-1301 USA
- @insertcopying
- @end ifinfo
- @setchapternewpage odd
- @titlepage
- @title GNU Offloading and Multi Processing Runtime Library
- @subtitle The GNU OpenMP and OpenACC Implementation
- @page
- @vskip 0pt plus 1filll
- @comment For the @value{version-GCC} Version*
- @sp 1
- Published by the Free Software Foundation @*
- 51 Franklin Street, Fifth Floor@*
- Boston, MA 02110-1301, USA@*
- @sp 1
- @insertcopying
- @end titlepage
- @summarycontents
- @contents
- @page
- @node Top
- @top Introduction
- @cindex Introduction
- This manual documents the usage of libgomp, the GNU Offloading and
- Multi Processing Runtime Library. This includes the GNU
- implementation of the @uref{http://www.openmp.org, OpenMP} Application
- Programming Interface (API) for multi-platform shared-memory parallel
- programming in C/C++ and Fortran, and the GNU implementation of the
- @uref{http://www.openacc.org/, OpenACC} Application Programming
- Interface (API) for offloading of code to accelerator devices in C/C++
- and Fortran.
- Originally, libgomp implemented the GNU OpenMP Runtime Library. Based
- on this, support for OpenACC and offloading (both OpenACC and OpenMP
- 4's target construct) has been added later on, and the library's name
- changed to GNU Offloading and Multi Processing Runtime Library.
- @comment
- @comment When you add a new menu item, please keep the right hand
- @comment aligned to the same column. Do not use tabs. This provides
- @comment better formatting.
- @comment
- @menu
- * Enabling OpenMP:: How to enable OpenMP for your applications.
- * Runtime Library Routines:: The OpenMP runtime application programming
- interface.
- * Environment Variables:: Influencing runtime behavior with environment
- variables.
- * The libgomp ABI:: Notes on the external ABI presented by libgomp.
- * Reporting Bugs:: How to report bugs in the GNU Offloading and
- Multi Processing Runtime Library.
- * Copying:: GNU general public license says
- how you can copy and share libgomp.
- * GNU Free Documentation License::
- How you can copy and share this manual.
- * Funding:: How to help assure continued work for free
- software.
- * Library Index:: Index of this documentation.
- @end menu
- @c ---------------------------------------------------------------------
- @c Enabling OpenMP
- @c ---------------------------------------------------------------------
- @node Enabling OpenMP
- @chapter Enabling OpenMP
- To activate the OpenMP extensions for C/C++ and Fortran, the compile-time
- flag @command{-fopenmp} must be specified. This enables the OpenMP directive
- @code{#pragma omp} in C/C++ and @code{!$omp} directives in free form,
- @code{c$omp}, @code{*$omp} and @code{!$omp} directives in fixed form,
- @code{!$} conditional compilation sentinels in free form and @code{c$},
- @code{*$} and @code{!$} sentinels in fixed form, for Fortran. The flag also
- arranges for automatic linking of the OpenMP runtime library
- (@ref{Runtime Library Routines}).
- A complete description of all OpenMP directives accepted may be found in
- the @uref{http://www.openmp.org, OpenMP Application Program Interface} manual,
- version 4.0.
- @c ---------------------------------------------------------------------
- @c Runtime Library Routines
- @c ---------------------------------------------------------------------
- @node Runtime Library Routines
- @chapter Runtime Library Routines
- The runtime routines described here are defined by Section 3 of the OpenMP
- specification in version 4.0. The routines are structured in following
- three parts:
- @menu
- Control threads, processors and the parallel environment. They have C
- linkage, and do not throw exceptions.
- * omp_get_active_level:: Number of active parallel regions
- * omp_get_ancestor_thread_num:: Ancestor thread ID
- * omp_get_cancellation:: Whether cancellation support is enabled
- * omp_get_default_device:: Get the default device for target regions
- * omp_get_dynamic:: Dynamic teams setting
- * omp_get_level:: Number of parallel regions
- * omp_get_max_active_levels:: Maximum number of active regions
- * omp_get_max_threads:: Maximum number of threads of parallel region
- * omp_get_nested:: Nested parallel regions
- * omp_get_num_devices:: Number of target devices
- * omp_get_num_procs:: Number of processors online
- * omp_get_num_teams:: Number of teams
- * omp_get_num_threads:: Size of the active team
- * omp_get_proc_bind:: Whether theads may be moved between CPUs
- * omp_get_schedule:: Obtain the runtime scheduling method
- * omp_get_team_num:: Get team number
- * omp_get_team_size:: Number of threads in a team
- * omp_get_thread_limit:: Maximum number of threads
- * omp_get_thread_num:: Current thread ID
- * omp_in_parallel:: Whether a parallel region is active
- * omp_in_final:: Whether in final or included task region
- * omp_is_initial_device:: Whether executing on the host device
- * omp_set_default_device:: Set the default device for target regions
- * omp_set_dynamic:: Enable/disable dynamic teams
- * omp_set_max_active_levels:: Limits the number of active parallel regions
- * omp_set_nested:: Enable/disable nested parallel regions
- * omp_set_num_threads:: Set upper team size limit
- * omp_set_schedule:: Set the runtime scheduling method
- Initialize, set, test, unset and destroy simple and nested locks.
- * omp_init_lock:: Initialize simple lock
- * omp_set_lock:: Wait for and set simple lock
- * omp_test_lock:: Test and set simple lock if available
- * omp_unset_lock:: Unset simple lock
- * omp_destroy_lock:: Destroy simple lock
- * omp_init_nest_lock:: Initialize nested lock
- * omp_set_nest_lock:: Wait for and set simple lock
- * omp_test_nest_lock:: Test and set nested lock if available
- * omp_unset_nest_lock:: Unset nested lock
- * omp_destroy_nest_lock:: Destroy nested lock
- Portable, thread-based, wall clock timer.
- * omp_get_wtick:: Get timer precision.
- * omp_get_wtime:: Elapsed wall clock time.
- @end menu
- @node omp_get_active_level
- @section @code{omp_get_active_level} -- Number of parallel regions
- @table @asis
- @item @emph{Description}:
- This function returns the nesting level for the active parallel blocks,
- which enclose the calling call.
- @item @emph{C/C++}
- @multitable @columnfractions .20 .80
- @item @emph{Prototype}: @tab @code{int omp_get_active_level(void);}
- @end multitable
- @item @emph{Fortran}:
- @multitable @columnfractions .20 .80
- @item @emph{Interface}: @tab @code{integer function omp_get_active_level()}
- @end multitable
- @item @emph{See also}:
- @ref{omp_get_level}, @ref{omp_get_max_active_levels}, @ref{omp_set_max_active_levels}
- @item @emph{Reference}:
- @uref{http://www.openmp.org/, OpenMP specification v4.0}, Section 3.2.20.
- @end table
- @node omp_get_ancestor_thread_num
- @section @code{omp_get_ancestor_thread_num} -- Ancestor thread ID
- @table @asis
- @item @emph{Description}:
- This function returns the thread identification number for the given
- nesting level of the current thread. For values of @var{level} outside
- zero to @code{omp_get_level} -1 is returned; if @var{level} is
- @code{omp_get_level} the result is identical to @code{omp_get_thread_num}.
- @item @emph{C/C++}
- @multitable @columnfractions .20 .80
- @item @emph{Prototype}: @tab @code{int omp_get_ancestor_thread_num(int level);}
- @end multitable
- @item @emph{Fortran}:
- @multitable @columnfractions .20 .80
- @item @emph{Interface}: @tab @code{integer function omp_get_ancestor_thread_num(level)}
- @item @tab @code{integer level}
- @end multitable
- @item @emph{See also}:
- @ref{omp_get_level}, @ref{omp_get_thread_num}, @ref{omp_get_team_size}
- @item @emph{Reference}:
- @uref{http://www.openmp.org/, OpenMP specification v4.0}, Section 3.2.18.
- @end table
- @node omp_get_cancellation
- @section @code{omp_get_cancellation} -- Whether cancellation support is enabled
- @table @asis
- @item @emph{Description}:
- This function returns @code{true} if cancellation is activated, @code{false}
- otherwise. Here, @code{true} and @code{false} represent their language-specific
- counterparts. Unless @env{OMP_CANCELLATION} is set true, cancellations are
- deactivated.
- @item @emph{C/C++}:
- @multitable @columnfractions .20 .80
- @item @emph{Prototype}: @tab @code{int omp_get_cancellation(void);}
- @end multitable
- @item @emph{Fortran}:
- @multitable @columnfractions .20 .80
- @item @emph{Interface}: @tab @code{logical function omp_get_cancellation()}
- @end multitable
- @item @emph{See also}:
- @ref{OMP_CANCELLATION}
- @item @emph{Reference}:
- @uref{http://www.openmp.org/, OpenMP specification v4.0}, Section 3.2.9.
- @end table
- @node omp_get_default_device
- @section @code{omp_get_default_device} -- Get the default device for target regions
- @table @asis
- @item @emph{Description}:
- Get the default device for target regions without device clause.
- @item @emph{C/C++}:
- @multitable @columnfractions .20 .80
- @item @emph{Prototype}: @tab @code{int omp_get_default_device(void);}
- @end multitable
- @item @emph{Fortran}:
- @multitable @columnfractions .20 .80
- @item @emph{Interface}: @tab @code{integer function omp_get_default_device()}
- @end multitable
- @item @emph{See also}:
- @ref{OMP_DEFAULT_DEVICE}, @ref{omp_set_default_device}
- @item @emph{Reference}:
- @uref{http://www.openmp.org/, OpenMP specification v4.0}, Section 3.2.24.
- @end table
- @node omp_get_dynamic
- @section @code{omp_get_dynamic} -- Dynamic teams setting
- @table @asis
- @item @emph{Description}:
- This function returns @code{true} if enabled, @code{false} otherwise.
- Here, @code{true} and @code{false} represent their language-specific
- counterparts.
- The dynamic team setting may be initialized at startup by the
- @env{OMP_DYNAMIC} environment variable or at runtime using
- @code{omp_set_dynamic}. If undefined, dynamic adjustment is
- disabled by default.
- @item @emph{C/C++}:
- @multitable @columnfractions .20 .80
- @item @emph{Prototype}: @tab @code{int omp_get_dynamic(void);}
- @end multitable
- @item @emph{Fortran}:
- @multitable @columnfractions .20 .80
- @item @emph{Interface}: @tab @code{logical function omp_get_dynamic()}
- @end multitable
- @item @emph{See also}:
- @ref{omp_set_dynamic}, @ref{OMP_DYNAMIC}
- @item @emph{Reference}:
- @uref{http://www.openmp.org/, OpenMP specification v4.0}, Section 3.2.8.
- @end table
- @node omp_get_level
- @section @code{omp_get_level} -- Obtain the current nesting level
- @table @asis
- @item @emph{Description}:
- This function returns the nesting level for the parallel blocks,
- which enclose the calling call.
- @item @emph{C/C++}
- @multitable @columnfractions .20 .80
- @item @emph{Prototype}: @tab @code{int omp_get_level(void);}
- @end multitable
- @item @emph{Fortran}:
- @multitable @columnfractions .20 .80
- @item @emph{Interface}: @tab @code{integer function omp_level()}
- @end multitable
- @item @emph{See also}:
- @ref{omp_get_active_level}
- @item @emph{Reference}:
- @uref{http://www.openmp.org/, OpenMP specification v4.0}, Section 3.2.17.
- @end table
- @node omp_get_max_active_levels
- @section @code{omp_get_max_active_levels} -- Maximum number of active regions
- @table @asis
- @item @emph{Description}:
- This function obtains the maximum allowed number of nested, active parallel regions.
- @item @emph{C/C++}
- @multitable @columnfractions .20 .80
- @item @emph{Prototype}: @tab @code{int omp_get_max_active_levels(void);}
- @end multitable
- @item @emph{Fortran}:
- @multitable @columnfractions .20 .80
- @item @emph{Interface}: @tab @code{integer function omp_get_max_active_levels()}
- @end multitable
- @item @emph{See also}:
- @ref{omp_set_max_active_levels}, @ref{omp_get_active_level}
- @item @emph{Reference}:
- @uref{http://www.openmp.org/, OpenMP specification v4.0}, Section 3.2.16.
- @end table
- @node omp_get_max_threads
- @section @code{omp_get_max_threads} -- Maximum number of threads of parallel region
- @table @asis
- @item @emph{Description}:
- Return the maximum number of threads used for the current parallel region
- that does not use the clause @code{num_threads}.
- @item @emph{C/C++}:
- @multitable @columnfractions .20 .80
- @item @emph{Prototype}: @tab @code{int omp_get_max_threads(void);}
- @end multitable
- @item @emph{Fortran}:
- @multitable @columnfractions .20 .80
- @item @emph{Interface}: @tab @code{integer function omp_get_max_threads()}
- @end multitable
- @item @emph{See also}:
- @ref{omp_set_num_threads}, @ref{omp_set_dynamic}, @ref{omp_get_thread_limit}
- @item @emph{Reference}:
- @uref{http://www.openmp.org/, OpenMP specification v4.0}, Section 3.2.3.
- @end table
- @node omp_get_nested
- @section @code{omp_get_nested} -- Nested parallel regions
- @table @asis
- @item @emph{Description}:
- This function returns @code{true} if nested parallel regions are
- enabled, @code{false} otherwise. Here, @code{true} and @code{false}
- represent their language-specific counterparts.
- Nested parallel regions may be initialized at startup by the
- @env{OMP_NESTED} environment variable or at runtime using
- @code{omp_set_nested}. If undefined, nested parallel regions are
- disabled by default.
- @item @emph{C/C++}:
- @multitable @columnfractions .20 .80
- @item @emph{Prototype}: @tab @code{int omp_get_nested(void);}
- @end multitable
- @item @emph{Fortran}:
- @multitable @columnfractions .20 .80
- @item @emph{Interface}: @tab @code{logical function omp_get_nested()}
- @end multitable
- @item @emph{See also}:
- @ref{omp_set_nested}, @ref{OMP_NESTED}
- @item @emph{Reference}:
- @uref{http://www.openmp.org/, OpenMP specification v4.0}, Section 3.2.11.
- @end table
- @node omp_get_num_devices
- @section @code{omp_get_num_devices} -- Number of target devices
- @table @asis
- @item @emph{Description}:
- Returns the number of target devices.
- @item @emph{C/C++}:
- @multitable @columnfractions .20 .80
- @item @emph{Prototype}: @tab @code{int omp_get_num_devices(void);}
- @end multitable
- @item @emph{Fortran}:
- @multitable @columnfractions .20 .80
- @item @emph{Interface}: @tab @code{integer function omp_get_num_devices()}
- @end multitable
- @item @emph{Reference}:
- @uref{http://www.openmp.org/, OpenMP specification v4.0}, Section 3.2.25.
- @end table
- @node omp_get_num_procs
- @section @code{omp_get_num_procs} -- Number of processors online
- @table @asis
- @item @emph{Description}:
- Returns the number of processors online on that device.
- @item @emph{C/C++}:
- @multitable @columnfractions .20 .80
- @item @emph{Prototype}: @tab @code{int omp_get_num_procs(void);}
- @end multitable
- @item @emph{Fortran}:
- @multitable @columnfractions .20 .80
- @item @emph{Interface}: @tab @code{integer function omp_get_num_procs()}
- @end multitable
- @item @emph{Reference}:
- @uref{http://www.openmp.org/, OpenMP specification v4.0}, Section 3.2.5.
- @end table
- @node omp_get_num_teams
- @section @code{omp_get_num_teams} -- Number of teams
- @table @asis
- @item @emph{Description}:
- Returns the number of teams in the current team region.
- @item @emph{C/C++}:
- @multitable @columnfractions .20 .80
- @item @emph{Prototype}: @tab @code{int omp_get_num_teams(void);}
- @end multitable
- @item @emph{Fortran}:
- @multitable @columnfractions .20 .80
- @item @emph{Interface}: @tab @code{integer function omp_get_num_teams()}
- @end multitable
- @item @emph{Reference}:
- @uref{http://www.openmp.org/, OpenMP specification v4.0}, Section 3.2.26.
- @end table
- @node omp_get_num_threads
- @section @code{omp_get_num_threads} -- Size of the active team
- @table @asis
- @item @emph{Description}:
- Returns the number of threads in the current team. In a sequential section of
- the program @code{omp_get_num_threads} returns 1.
- The default team size may be initialized at startup by the
- @env{OMP_NUM_THREADS} environment variable. At runtime, the size
- of the current team may be set either by the @code{NUM_THREADS}
- clause or by @code{omp_set_num_threads}. If none of the above were
- used to define a specific value and @env{OMP_DYNAMIC} is disabled,
- one thread per CPU online is used.
- @item @emph{C/C++}:
- @multitable @columnfractions .20 .80
- @item @emph{Prototype}: @tab @code{int omp_get_num_threads(void);}
- @end multitable
- @item @emph{Fortran}:
- @multitable @columnfractions .20 .80
- @item @emph{Interface}: @tab @code{integer function omp_get_num_threads()}
- @end multitable
- @item @emph{See also}:
- @ref{omp_get_max_threads}, @ref{omp_set_num_threads}, @ref{OMP_NUM_THREADS}
- @item @emph{Reference}:
- @uref{http://www.openmp.org/, OpenMP specification v4.0}, Section 3.2.2.
- @end table
- @node omp_get_proc_bind
- @section @code{omp_get_proc_bind} -- Whether theads may be moved between CPUs
- @table @asis
- @item @emph{Description}:
- This functions returns the currently active thread affinity policy, which is
- set via @env{OMP_PROC_BIND}. Possible values are @code{omp_proc_bind_false},
- @code{omp_proc_bind_true}, @code{omp_proc_bind_master},
- @code{omp_proc_bind_close} and @code{omp_proc_bind_spread}.
- @item @emph{C/C++}:
- @multitable @columnfractions .20 .80
- @item @emph{Prototype}: @tab @code{omp_proc_bind_t omp_get_proc_bind(void);}
- @end multitable
- @item @emph{Fortran}:
- @multitable @columnfractions .20 .80
- @item @emph{Interface}: @tab @code{integer(kind=omp_proc_bind_kind) function omp_get_proc_bind()}
- @end multitable
- @item @emph{See also}:
- @ref{OMP_PROC_BIND}, @ref{OMP_PLACES}, @ref{GOMP_CPU_AFFINITY},
- @item @emph{Reference}:
- @uref{http://www.openmp.org/, OpenMP specification v4.0}, Section 3.2.22.
- @end table
- @node omp_get_schedule
- @section @code{omp_get_schedule} -- Obtain the runtime scheduling method
- @table @asis
- @item @emph{Description}:
- Obtain the runtime scheduling method. The @var{kind} argument will be
- set to the value @code{omp_sched_static}, @code{omp_sched_dynamic},
- @code{omp_sched_guided} or @code{omp_sched_auto}. The second argument,
- @var{modifier}, is set to the chunk size.
- @item @emph{C/C++}
- @multitable @columnfractions .20 .80
- @item @emph{Prototype}: @tab @code{void omp_get_schedule(omp_sched_t *kind, int *modifier);}
- @end multitable
- @item @emph{Fortran}:
- @multitable @columnfractions .20 .80
- @item @emph{Interface}: @tab @code{subroutine omp_get_schedule(kind, modifier)}
- @item @tab @code{integer(kind=omp_sched_kind) kind}
- @item @tab @code{integer modifier}
- @end multitable
- @item @emph{See also}:
- @ref{omp_set_schedule}, @ref{OMP_SCHEDULE}
- @item @emph{Reference}:
- @uref{http://www.openmp.org/, OpenMP specification v4.0}, Section 3.2.13.
- @end table
- @node omp_get_team_num
- @section @code{omp_get_team_num} -- Get team number
- @table @asis
- @item @emph{Description}:
- Returns the team number of the calling thread.
- @item @emph{C/C++}:
- @multitable @columnfractions .20 .80
- @item @emph{Prototype}: @tab @code{int omp_get_team_num(void);}
- @end multitable
- @item @emph{Fortran}:
- @multitable @columnfractions .20 .80
- @item @emph{Interface}: @tab @code{integer function omp_get_team_num()}
- @end multitable
- @item @emph{Reference}:
- @uref{http://www.openmp.org/, OpenMP specification v4.0}, Section 3.2.27.
- @end table
- @node omp_get_team_size
- @section @code{omp_get_team_size} -- Number of threads in a team
- @table @asis
- @item @emph{Description}:
- This function returns the number of threads in a thread team to which
- either the current thread or its ancestor belongs. For values of @var{level}
- outside zero to @code{omp_get_level}, -1 is returned; if @var{level} is zero,
- 1 is returned, and for @code{omp_get_level}, the result is identical
- to @code{omp_get_num_threads}.
- @item @emph{C/C++}:
- @multitable @columnfractions .20 .80
- @item @emph{Prototype}: @tab @code{int omp_get_team_size(int level);}
- @end multitable
- @item @emph{Fortran}:
- @multitable @columnfractions .20 .80
- @item @emph{Interface}: @tab @code{integer function omp_get_team_size(level)}
- @item @tab @code{integer level}
- @end multitable
- @item @emph{See also}:
- @ref{omp_get_num_threads}, @ref{omp_get_level}, @ref{omp_get_ancestor_thread_num}
- @item @emph{Reference}:
- @uref{http://www.openmp.org/, OpenMP specification v4.0}, Section 3.2.19.
- @end table
- @node omp_get_thread_limit
- @section @code{omp_get_thread_limit} -- Maximum number of threads
- @table @asis
- @item @emph{Description}:
- Return the maximum number of threads of the program.
- @item @emph{C/C++}:
- @multitable @columnfractions .20 .80
- @item @emph{Prototype}: @tab @code{int omp_get_thread_limit(void);}
- @end multitable
- @item @emph{Fortran}:
- @multitable @columnfractions .20 .80
- @item @emph{Interface}: @tab @code{integer function omp_get_thread_limit()}
- @end multitable
- @item @emph{See also}:
- @ref{omp_get_max_threads}, @ref{OMP_THREAD_LIMIT}
- @item @emph{Reference}:
- @uref{http://www.openmp.org/, OpenMP specification v4.0}, Section 3.2.14.
- @end table
- @node omp_get_thread_num
- @section @code{omp_get_thread_num} -- Current thread ID
- @table @asis
- @item @emph{Description}:
- Returns a unique thread identification number within the current team.
- In a sequential parts of the program, @code{omp_get_thread_num}
- always returns 0. In parallel regions the return value varies
- from 0 to @code{omp_get_num_threads}-1 inclusive. The return
- value of the master thread of a team is always 0.
- @item @emph{C/C++}:
- @multitable @columnfractions .20 .80
- @item @emph{Prototype}: @tab @code{int omp_get_thread_num(void);}
- @end multitable
- @item @emph{Fortran}:
- @multitable @columnfractions .20 .80
- @item @emph{Interface}: @tab @code{integer function omp_get_thread_num()}
- @end multitable
- @item @emph{See also}:
- @ref{omp_get_num_threads}, @ref{omp_get_ancestor_thread_num}
- @item @emph{Reference}:
- @uref{http://www.openmp.org/, OpenMP specification v4.0}, Section 3.2.4.
- @end table
- @node omp_in_parallel
- @section @code{omp_in_parallel} -- Whether a parallel region is active
- @table @asis
- @item @emph{Description}:
- This function returns @code{true} if currently running in parallel,
- @code{false} otherwise. Here, @code{true} and @code{false} represent
- their language-specific counterparts.
- @item @emph{C/C++}:
- @multitable @columnfractions .20 .80
- @item @emph{Prototype}: @tab @code{int omp_in_parallel(void);}
- @end multitable
- @item @emph{Fortran}:
- @multitable @columnfractions .20 .80
- @item @emph{Interface}: @tab @code{logical function omp_in_parallel()}
- @end multitable
- @item @emph{Reference}:
- @uref{http://www.openmp.org/, OpenMP specification v4.0}, Section 3.2.6.
- @end table
- @node omp_in_final
- @section @code{omp_in_final} -- Whether in final or included task region
- @table @asis
- @item @emph{Description}:
- This function returns @code{true} if currently running in a final
- or included task region, @code{false} otherwise. Here, @code{true}
- and @code{false} represent their language-specific counterparts.
- @item @emph{C/C++}:
- @multitable @columnfractions .20 .80
- @item @emph{Prototype}: @tab @code{int omp_in_final(void);}
- @end multitable
- @item @emph{Fortran}:
- @multitable @columnfractions .20 .80
- @item @emph{Interface}: @tab @code{logical function omp_in_final()}
- @end multitable
- @item @emph{Reference}:
- @uref{http://www.openmp.org/, OpenMP specification v4.0}, Section 3.2.21.
- @end table
- @node omp_is_initial_device
- @section @code{omp_is_initial_device} -- Whether executing on the host device
- @table @asis
- @item @emph{Description}:
- This function returns @code{true} if currently running on the host device,
- @code{false} otherwise. Here, @code{true} and @code{false} represent
- their language-specific counterparts.
- @item @emph{C/C++}:
- @multitable @columnfractions .20 .80
- @item @emph{Prototype}: @tab @code{int omp_is_initial_device(void);}
- @end multitable
- @item @emph{Fortran}:
- @multitable @columnfractions .20 .80
- @item @emph{Interface}: @tab @code{logical function omp_is_initial_device()}
- @end multitable
- @item @emph{Reference}:
- @uref{http://www.openmp.org/, OpenMP specification v4.0}, Section 3.2.28.
- @end table
- @node omp_set_default_device
- @section @code{omp_set_default_device} -- Set the default device for target regions
- @table @asis
- @item @emph{Description}:
- Set the default device for target regions without device clause. The argument
- shall be a nonnegative device number.
- @item @emph{C/C++}:
- @multitable @columnfractions .20 .80
- @item @emph{Prototype}: @tab @code{void omp_set_default_device(int device_num);}
- @end multitable
- @item @emph{Fortran}:
- @multitable @columnfractions .20 .80
- @item @emph{Interface}: @tab @code{subroutine omp_set_default_device(device_num)}
- @item @tab @code{integer device_num}
- @end multitable
- @item @emph{See also}:
- @ref{OMP_DEFAULT_DEVICE}, @ref{omp_get_default_device}
- @item @emph{Reference}:
- @uref{http://www.openmp.org/, OpenMP specification v4.0}, Section 3.2.23.
- @end table
- @node omp_set_dynamic
- @section @code{omp_set_dynamic} -- Enable/disable dynamic teams
- @table @asis
- @item @emph{Description}:
- Enable or disable the dynamic adjustment of the number of threads
- within a team. The function takes the language-specific equivalent
- of @code{true} and @code{false}, where @code{true} enables dynamic
- adjustment of team sizes and @code{false} disables it.
- @item @emph{C/C++}:
- @multitable @columnfractions .20 .80
- @item @emph{Prototype}: @tab @code{void omp_set_dynamic(int dynamic_threads);}
- @end multitable
- @item @emph{Fortran}:
- @multitable @columnfractions .20 .80
- @item @emph{Interface}: @tab @code{subroutine omp_set_dynamic(dynamic_threads)}
- @item @tab @code{logical, intent(in) :: dynamic_threads}
- @end multitable
- @item @emph{See also}:
- @ref{OMP_DYNAMIC}, @ref{omp_get_dynamic}
- @item @emph{Reference}:
- @uref{http://www.openmp.org/, OpenMP specification v4.0}, Section 3.2.7.
- @end table
- @node omp_set_max_active_levels
- @section @code{omp_set_max_active_levels} -- Limits the number of active parallel regions
- @table @asis
- @item @emph{Description}:
- This function limits the maximum allowed number of nested, active
- parallel regions.
- @item @emph{C/C++}
- @multitable @columnfractions .20 .80
- @item @emph{Prototype}: @tab @code{void omp_set_max_active_levels(int max_levels);}
- @end multitable
- @item @emph{Fortran}:
- @multitable @columnfractions .20 .80
- @item @emph{Interface}: @tab @code{subroutine omp_set_max_active_levels(max_levels)}
- @item @tab @code{integer max_levels}
- @end multitable
- @item @emph{See also}:
- @ref{omp_get_max_active_levels}, @ref{omp_get_active_level}
- @item @emph{Reference}:
- @uref{http://www.openmp.org/, OpenMP specification v4.0}, Section 3.2.15.
- @end table
- @node omp_set_nested
- @section @code{omp_set_nested} -- Enable/disable nested parallel regions
- @table @asis
- @item @emph{Description}:
- Enable or disable nested parallel regions, i.e., whether team members
- are allowed to create new teams. The function takes the language-specific
- equivalent of @code{true} and @code{false}, where @code{true} enables
- dynamic adjustment of team sizes and @code{false} disables it.
- @item @emph{C/C++}:
- @multitable @columnfractions .20 .80
- @item @emph{Prototype}: @tab @code{void omp_set_nested(int nested);}
- @end multitable
- @item @emph{Fortran}:
- @multitable @columnfractions .20 .80
- @item @emph{Interface}: @tab @code{subroutine omp_set_nested(nested)}
- @item @tab @code{logical, intent(in) :: nested}
- @end multitable
- @item @emph{See also}:
- @ref{OMP_NESTED}, @ref{omp_get_nested}
- @item @emph{Reference}:
- @uref{http://www.openmp.org/, OpenMP specification v4.0}, Section 3.2.10.
- @end table
- @node omp_set_num_threads
- @section @code{omp_set_num_threads} -- Set upper team size limit
- @table @asis
- @item @emph{Description}:
- Specifies the number of threads used by default in subsequent parallel
- sections, if those do not specify a @code{num_threads} clause. The
- argument of @code{omp_set_num_threads} shall be a positive integer.
- @item @emph{C/C++}:
- @multitable @columnfractions .20 .80
- @item @emph{Prototype}: @tab @code{void omp_set_num_threads(int num_threads);}
- @end multitable
- @item @emph{Fortran}:
- @multitable @columnfractions .20 .80
- @item @emph{Interface}: @tab @code{subroutine omp_set_num_threads(num_threads)}
- @item @tab @code{integer, intent(in) :: num_threads}
- @end multitable
- @item @emph{See also}:
- @ref{OMP_NUM_THREADS}, @ref{omp_get_num_threads}, @ref{omp_get_max_threads}
- @item @emph{Reference}:
- @uref{http://www.openmp.org/, OpenMP specification v4.0}, Section 3.2.1.
- @end table
- @node omp_set_schedule
- @section @code{omp_set_schedule} -- Set the runtime scheduling method
- @table @asis
- @item @emph{Description}:
- Sets the runtime scheduling method. The @var{kind} argument can have the
- value @code{omp_sched_static}, @code{omp_sched_dynamic},
- @code{omp_sched_guided} or @code{omp_sched_auto}. Except for
- @code{omp_sched_auto}, the chunk size is set to the value of
- @var{modifier} if positive, or to the default value if zero or negative.
- For @code{omp_sched_auto} the @var{modifier} argument is ignored.
- @item @emph{C/C++}
- @multitable @columnfractions .20 .80
- @item @emph{Prototype}: @tab @code{void omp_set_schedule(omp_sched_t kind, int modifier);}
- @end multitable
- @item @emph{Fortran}:
- @multitable @columnfractions .20 .80
- @item @emph{Interface}: @tab @code{subroutine omp_set_schedule(kind, modifier)}
- @item @tab @code{integer(kind=omp_sched_kind) kind}
- @item @tab @code{integer modifier}
- @end multitable
- @item @emph{See also}:
- @ref{omp_get_schedule}
- @ref{OMP_SCHEDULE}
- @item @emph{Reference}:
- @uref{http://www.openmp.org/, OpenMP specification v4.0}, Section 3.2.12.
- @end table
- @node omp_init_lock
- @section @code{omp_init_lock} -- Initialize simple lock
- @table @asis
- @item @emph{Description}:
- Initialize a simple lock. After initialization, the lock is in
- an unlocked state.
- @item @emph{C/C++}:
- @multitable @columnfractions .20 .80
- @item @emph{Prototype}: @tab @code{void omp_init_lock(omp_lock_t *lock);}
- @end multitable
- @item @emph{Fortran}:
- @multitable @columnfractions .20 .80
- @item @emph{Interface}: @tab @code{subroutine omp_init_lock(svar)}
- @item @tab @code{integer(omp_lock_kind), intent(out) :: svar}
- @end multitable
- @item @emph{See also}:
- @ref{omp_destroy_lock}
- @item @emph{Reference}:
- @uref{http://www.openmp.org/, OpenMP specification v4.0}, Section 3.3.1.
- @end table
- @node omp_set_lock
- @section @code{omp_set_lock} -- Wait for and set simple lock
- @table @asis
- @item @emph{Description}:
- Before setting a simple lock, the lock variable must be initialized by
- @code{omp_init_lock}. The calling thread is blocked until the lock
- is available. If the lock is already held by the current thread,
- a deadlock occurs.
- @item @emph{C/C++}:
- @multitable @columnfractions .20 .80
- @item @emph{Prototype}: @tab @code{void omp_set_lock(omp_lock_t *lock);}
- @end multitable
- @item @emph{Fortran}:
- @multitable @columnfractions .20 .80
- @item @emph{Interface}: @tab @code{subroutine omp_set_lock(svar)}
- @item @tab @code{integer(omp_lock_kind), intent(inout) :: svar}
- @end multitable
- @item @emph{See also}:
- @ref{omp_init_lock}, @ref{omp_test_lock}, @ref{omp_unset_lock}
- @item @emph{Reference}:
- @uref{http://www.openmp.org/, OpenMP specification v4.0}, Section 3.3.3.
- @end table
- @node omp_test_lock
- @section @code{omp_test_lock} -- Test and set simple lock if available
- @table @asis
- @item @emph{Description}:
- Before setting a simple lock, the lock variable must be initialized by
- @code{omp_init_lock}. Contrary to @code{omp_set_lock}, @code{omp_test_lock}
- does not block if the lock is not available. This function returns
- @code{true} upon success, @code{false} otherwise. Here, @code{true} and
- @code{false} represent their language-specific counterparts.
- @item @emph{C/C++}:
- @multitable @columnfractions .20 .80
- @item @emph{Prototype}: @tab @code{int omp_test_lock(omp_lock_t *lock);}
- @end multitable
- @item @emph{Fortran}:
- @multitable @columnfractions .20 .80
- @item @emph{Interface}: @tab @code{logical function omp_test_lock(svar)}
- @item @tab @code{integer(omp_lock_kind), intent(inout) :: svar}
- @end multitable
- @item @emph{See also}:
- @ref{omp_init_lock}, @ref{omp_set_lock}, @ref{omp_set_lock}
- @item @emph{Reference}:
- @uref{http://www.openmp.org/, OpenMP specification v4.0}, Section 3.3.5.
- @end table
- @node omp_unset_lock
- @section @code{omp_unset_lock} -- Unset simple lock
- @table @asis
- @item @emph{Description}:
- A simple lock about to be unset must have been locked by @code{omp_set_lock}
- or @code{omp_test_lock} before. In addition, the lock must be held by the
- thread calling @code{omp_unset_lock}. Then, the lock becomes unlocked. If one
- or more threads attempted to set the lock before, one of them is chosen to,
- again, set the lock to itself.
- @item @emph{C/C++}:
- @multitable @columnfractions .20 .80
- @item @emph{Prototype}: @tab @code{void omp_unset_lock(omp_lock_t *lock);}
- @end multitable
- @item @emph{Fortran}:
- @multitable @columnfractions .20 .80
- @item @emph{Interface}: @tab @code{subroutine omp_unset_lock(svar)}
- @item @tab @code{integer(omp_lock_kind), intent(inout) :: svar}
- @end multitable
- @item @emph{See also}:
- @ref{omp_set_lock}, @ref{omp_test_lock}
- @item @emph{Reference}:
- @uref{http://www.openmp.org/, OpenMP specification v4.0}, Section 3.3.4.
- @end table
- @node omp_destroy_lock
- @section @code{omp_destroy_lock} -- Destroy simple lock
- @table @asis
- @item @emph{Description}:
- Destroy a simple lock. In order to be destroyed, a simple lock must be
- in the unlocked state.
- @item @emph{C/C++}:
- @multitable @columnfractions .20 .80
- @item @emph{Prototype}: @tab @code{void omp_destroy_lock(omp_lock_t *lock);}
- @end multitable
- @item @emph{Fortran}:
- @multitable @columnfractions .20 .80
- @item @emph{Interface}: @tab @code{subroutine omp_destroy_lock(svar)}
- @item @tab @code{integer(omp_lock_kind), intent(inout) :: svar}
- @end multitable
- @item @emph{See also}:
- @ref{omp_init_lock}
- @item @emph{Reference}:
- @uref{http://www.openmp.org/, OpenMP specification v4.0}, Section 3.3.2.
- @end table
- @node omp_init_nest_lock
- @section @code{omp_init_nest_lock} -- Initialize nested lock
- @table @asis
- @item @emph{Description}:
- Initialize a nested lock. After initialization, the lock is in
- an unlocked state and the nesting count is set to zero.
- @item @emph{C/C++}:
- @multitable @columnfractions .20 .80
- @item @emph{Prototype}: @tab @code{void omp_init_nest_lock(omp_nest_lock_t *lock);}
- @end multitable
- @item @emph{Fortran}:
- @multitable @columnfractions .20 .80
- @item @emph{Interface}: @tab @code{subroutine omp_init_nest_lock(nvar)}
- @item @tab @code{integer(omp_nest_lock_kind), intent(out) :: nvar}
- @end multitable
- @item @emph{See also}:
- @ref{omp_destroy_nest_lock}
- @item @emph{Reference}:
- @uref{http://www.openmp.org/, OpenMP specification v4.0}, Section 3.3.1.
- @end table
- @node omp_set_nest_lock
- @section @code{omp_set_nest_lock} -- Wait for and set nested lock
- @table @asis
- @item @emph{Description}:
- Before setting a nested lock, the lock variable must be initialized by
- @code{omp_init_nest_lock}. The calling thread is blocked until the lock
- is available. If the lock is already held by the current thread, the
- nesting count for the lock is incremented.
- @item @emph{C/C++}:
- @multitable @columnfractions .20 .80
- @item @emph{Prototype}: @tab @code{void omp_set_nest_lock(omp_nest_lock_t *lock);}
- @end multitable
- @item @emph{Fortran}:
- @multitable @columnfractions .20 .80
- @item @emph{Interface}: @tab @code{subroutine omp_set_nest_lock(nvar)}
- @item @tab @code{integer(omp_nest_lock_kind), intent(inout) :: nvar}
- @end multitable
- @item @emph{See also}:
- @ref{omp_init_nest_lock}, @ref{omp_unset_nest_lock}
- @item @emph{Reference}:
- @uref{http://www.openmp.org/, OpenMP specification v4.0}, Section 3.3.3.
- @end table
- @node omp_test_nest_lock
- @section @code{omp_test_nest_lock} -- Test and set nested lock if available
- @table @asis
- @item @emph{Description}:
- Before setting a nested lock, the lock variable must be initialized by
- @code{omp_init_nest_lock}. Contrary to @code{omp_set_nest_lock},
- @code{omp_test_nest_lock} does not block if the lock is not available.
- If the lock is already held by the current thread, the new nesting count
- is returned. Otherwise, the return value equals zero.
- @item @emph{C/C++}:
- @multitable @columnfractions .20 .80
- @item @emph{Prototype}: @tab @code{int omp_test_nest_lock(omp_nest_lock_t *lock);}
- @end multitable
- @item @emph{Fortran}:
- @multitable @columnfractions .20 .80
- @item @emph{Interface}: @tab @code{logical function omp_test_nest_lock(nvar)}
- @item @tab @code{integer(omp_nest_lock_kind), intent(inout) :: nvar}
- @end multitable
- @item @emph{See also}:
- @ref{omp_init_lock}, @ref{omp_set_lock}, @ref{omp_set_lock}
- @item @emph{Reference}:
- @uref{http://www.openmp.org/, OpenMP specification v4.0}, Section 3.3.5.
- @end table
- @node omp_unset_nest_lock
- @section @code{omp_unset_nest_lock} -- Unset nested lock
- @table @asis
- @item @emph{Description}:
- A nested lock about to be unset must have been locked by @code{omp_set_nested_lock}
- or @code{omp_test_nested_lock} before. In addition, the lock must be held by the
- thread calling @code{omp_unset_nested_lock}. If the nesting count drops to zero, the
- lock becomes unlocked. If one ore more threads attempted to set the lock before,
- one of them is chosen to, again, set the lock to itself.
- @item @emph{C/C++}:
- @multitable @columnfractions .20 .80
- @item @emph{Prototype}: @tab @code{void omp_unset_nest_lock(omp_nest_lock_t *lock);}
- @end multitable
- @item @emph{Fortran}:
- @multitable @columnfractions .20 .80
- @item @emph{Interface}: @tab @code{subroutine omp_unset_nest_lock(nvar)}
- @item @tab @code{integer(omp_nest_lock_kind), intent(inout) :: nvar}
- @end multitable
- @item @emph{See also}:
- @ref{omp_set_nest_lock}
- @item @emph{Reference}:
- @uref{http://www.openmp.org/, OpenMP specification v4.0}, Section 3.3.4.
- @end table
- @node omp_destroy_nest_lock
- @section @code{omp_destroy_nest_lock} -- Destroy nested lock
- @table @asis
- @item @emph{Description}:
- Destroy a nested lock. In order to be destroyed, a nested lock must be
- in the unlocked state and its nesting count must equal zero.
- @item @emph{C/C++}:
- @multitable @columnfractions .20 .80
- @item @emph{Prototype}: @tab @code{void omp_destroy_nest_lock(omp_nest_lock_t *);}
- @end multitable
- @item @emph{Fortran}:
- @multitable @columnfractions .20 .80
- @item @emph{Interface}: @tab @code{subroutine omp_destroy_nest_lock(nvar)}
- @item @tab @code{integer(omp_nest_lock_kind), intent(inout) :: nvar}
- @end multitable
- @item @emph{See also}:
- @ref{omp_init_lock}
- @item @emph{Reference}:
- @uref{http://www.openmp.org/, OpenMP specification v4.0}, Section 3.3.2.
- @end table
- @node omp_get_wtick
- @section @code{omp_get_wtick} -- Get timer precision
- @table @asis
- @item @emph{Description}:
- Gets the timer precision, i.e., the number of seconds between two
- successive clock ticks.
- @item @emph{C/C++}:
- @multitable @columnfractions .20 .80
- @item @emph{Prototype}: @tab @code{double omp_get_wtick(void);}
- @end multitable
- @item @emph{Fortran}:
- @multitable @columnfractions .20 .80
- @item @emph{Interface}: @tab @code{double precision function omp_get_wtick()}
- @end multitable
- @item @emph{See also}:
- @ref{omp_get_wtime}
- @item @emph{Reference}:
- @uref{http://www.openmp.org/, OpenMP specification v4.0}, Section 3.4.2.
- @end table
- @node omp_get_wtime
- @section @code{omp_get_wtime} -- Elapsed wall clock time
- @table @asis
- @item @emph{Description}:
- Elapsed wall clock time in seconds. The time is measured per thread, no
- guarantee can be made that two distinct threads measure the same time.
- Time is measured from some "time in the past", which is an arbitrary time
- guaranteed not to change during the execution of the program.
- @item @emph{C/C++}:
- @multitable @columnfractions .20 .80
- @item @emph{Prototype}: @tab @code{double omp_get_wtime(void);}
- @end multitable
- @item @emph{Fortran}:
- @multitable @columnfractions .20 .80
- @item @emph{Interface}: @tab @code{double precision function omp_get_wtime()}
- @end multitable
- @item @emph{See also}:
- @ref{omp_get_wtick}
- @item @emph{Reference}:
- @uref{http://www.openmp.org/, OpenMP specification v4.0}, Section 3.4.1.
- @end table
- @c ---------------------------------------------------------------------
- @c Environment Variables
- @c ---------------------------------------------------------------------
- @node Environment Variables
- @chapter Environment Variables
- The environment variables which beginning with @env{OMP_} are defined by
- section 4 of the OpenMP specification in version 4.0, while those
- beginning with @env{GOMP_} are GNU extensions.
- @menu
- * OMP_CANCELLATION:: Set whether cancellation is activated
- * OMP_DISPLAY_ENV:: Show OpenMP version and environment variables
- * OMP_DEFAULT_DEVICE:: Set the device used in target regions
- * OMP_DYNAMIC:: Dynamic adjustment of threads
- * OMP_MAX_ACTIVE_LEVELS:: Set the maximum number of nested parallel regions
- * OMP_NESTED:: Nested parallel regions
- * OMP_NUM_THREADS:: Specifies the number of threads to use
- * OMP_PROC_BIND:: Whether theads may be moved between CPUs
- * OMP_PLACES:: Specifies on which CPUs the theads should be placed
- * OMP_STACKSIZE:: Set default thread stack size
- * OMP_SCHEDULE:: How threads are scheduled
- * OMP_THREAD_LIMIT:: Set the maximum number of threads
- * OMP_WAIT_POLICY:: How waiting threads are handled
- * GOMP_CPU_AFFINITY:: Bind threads to specific CPUs
- * GOMP_DEBUG:: Enable debugging output
- * GOMP_STACKSIZE:: Set default thread stack size
- * GOMP_SPINCOUNT:: Set the busy-wait spin count
- @end menu
- @node OMP_CANCELLATION
- @section @env{OMP_CANCELLATION} -- Set whether cancellation is activated
- @cindex Environment Variable
- @table @asis
- @item @emph{Description}:
- If set to @code{TRUE}, the cancellation is activated. If set to @code{FALSE} or
- if unset, cancellation is disabled and the @code{cancel} construct is ignored.
- @item @emph{See also}:
- @ref{omp_get_cancellation}
- @item @emph{Reference}:
- @uref{http://www.openmp.org/, OpenMP specification v4.0}, Section 4.11
- @end table
- @node OMP_DISPLAY_ENV
- @section @env{OMP_DISPLAY_ENV} -- Show OpenMP version and environment variables
- @cindex Environment Variable
- @table @asis
- @item @emph{Description}:
- If set to @code{TRUE}, the OpenMP version number and the values
- associated with the OpenMP environment variables are printed to @code{stderr}.
- If set to @code{VERBOSE}, it additionally shows the value of the environment
- variables which are GNU extensions. If undefined or set to @code{FALSE},
- this information will not be shown.
- @item @emph{Reference}:
- @uref{http://www.openmp.org/, OpenMP specification v4.0}, Section 4.12
- @end table
- @node OMP_DEFAULT_DEVICE
- @section @env{OMP_DEFAULT_DEVICE} -- Set the device used in target regions
- @cindex Environment Variable
- @table @asis
- @item @emph{Description}:
- Set to choose the device which is used in a @code{target} region, unless the
- value is overridden by @code{omp_set_default_device} or by a @code{device}
- clause. The value shall be the nonnegative device number. If no device with
- the given device number exists, the code is executed on the host. If unset,
- device number 0 will be used.
- @item @emph{See also}:
- @ref{omp_get_default_device}, @ref{omp_set_default_device},
- @item @emph{Reference}:
- @uref{http://www.openmp.org/, OpenMP specification v4.0}, Section 4.11
- @end table
- @node OMP_DYNAMIC
- @section @env{OMP_DYNAMIC} -- Dynamic adjustment of threads
- @cindex Environment Variable
- @table @asis
- @item @emph{Description}:
- Enable or disable the dynamic adjustment of the number of threads
- within a team. The value of this environment variable shall be
- @code{TRUE} or @code{FALSE}. If undefined, dynamic adjustment is
- disabled by default.
- @item @emph{See also}:
- @ref{omp_set_dynamic}
- @item @emph{Reference}:
- @uref{http://www.openmp.org/, OpenMP specification v4.0}, Section 4.3
- @end table
- @node OMP_MAX_ACTIVE_LEVELS
- @section @env{OMP_MAX_ACTIVE_LEVELS} -- Set the maximum number of nested parallel regions
- @cindex Environment Variable
- @table @asis
- @item @emph{Description}:
- Specifies the initial value for the maximum number of nested parallel
- regions. The value of this variable shall be a positive integer.
- If undefined, the number of active levels is unlimited.
- @item @emph{See also}:
- @ref{omp_set_max_active_levels}
- @item @emph{Reference}:
- @uref{http://www.openmp.org/, OpenMP specification v4.0}, Section 4.9
- @end table
- @node OMP_NESTED
- @section @env{OMP_NESTED} -- Nested parallel regions
- @cindex Environment Variable
- @cindex Implementation specific setting
- @table @asis
- @item @emph{Description}:
- Enable or disable nested parallel regions, i.e., whether team members
- are allowed to create new teams. The value of this environment variable
- shall be @code{TRUE} or @code{FALSE}. If undefined, nested parallel
- regions are disabled by default.
- @item @emph{See also}:
- @ref{omp_set_nested}
- @item @emph{Reference}:
- @uref{http://www.openmp.org/, OpenMP specification v4.0}, Section 4.6
- @end table
- @node OMP_NUM_THREADS
- @section @env{OMP_NUM_THREADS} -- Specifies the number of threads to use
- @cindex Environment Variable
- @cindex Implementation specific setting
- @table @asis
- @item @emph{Description}:
- Specifies the default number of threads to use in parallel regions. The
- value of this variable shall be a comma-separated list of positive integers;
- the value specified the number of threads to use for the corresponding nested
- level. If undefined one thread per CPU is used.
- @item @emph{See also}:
- @ref{omp_set_num_threads}
- @item @emph{Reference}:
- @uref{http://www.openmp.org/, OpenMP specification v4.0}, Section 4.2
- @end table
- @node OMP_PROC_BIND
- @section @env{OMP_PROC_BIND} -- Whether theads may be moved between CPUs
- @cindex Environment Variable
- @table @asis
- @item @emph{Description}:
- Specifies whether threads may be moved between processors. If set to
- @code{TRUE}, OpenMP theads should not be moved; if set to @code{FALSE}
- they may be moved. Alternatively, a comma separated list with the
- values @code{MASTER}, @code{CLOSE} and @code{SPREAD} can be used to specify
- the thread affinity policy for the corresponding nesting level. With
- @code{MASTER} the worker threads are in the same place partition as the
- master thread. With @code{CLOSE} those are kept close to the master thread
- in contiguous place partitions. And with @code{SPREAD} a sparse distribution
- across the place partitions is used.
- When undefined, @env{OMP_PROC_BIND} defaults to @code{TRUE} when
- @env{OMP_PLACES} or @env{GOMP_CPU_AFFINITY} is set and @code{FALSE} otherwise.
- @item @emph{See also}:
- @ref{OMP_PLACES}, @ref{GOMP_CPU_AFFINITY}, @ref{omp_get_proc_bind}
- @item @emph{Reference}:
- @uref{http://www.openmp.org/, OpenMP specification v4.0}, Section 4.4
- @end table
- @node OMP_PLACES
- @section @env{OMP_PLACES} -- Specifies on which CPUs the theads should be placed
- @cindex Environment Variable
- @table @asis
- @item @emph{Description}:
- The thread placement can be either specified using an abstract name or by an
- explicit list of the places. The abstract names @code{threads}, @code{cores}
- and @code{sockets} can be optionally followed by a positive number in
- parentheses, which denotes the how many places shall be created. With
- @code{threads} each place corresponds to a single hardware thread; @code{cores}
- to a single core with the corresponding number of hardware threads; and with
- @code{sockets} the place corresponds to a single socket. The resulting
- placement can be shown by setting the @env{OMP_DISPLAY_ENV} environment
- variable.
- Alternatively, the placement can be specified explicitly as comma-separated
- list of places. A place is specified by set of nonnegative numbers in curly
- braces, denoting the denoting the hardware threads. The hardware threads
- belonging to a place can either be specified as comma-separated list of
- nonnegative thread numbers or using an interval. Multiple places can also be
- either specified by a comma-separated list of places or by an interval. To
- specify an interval, a colon followed by the count is placed after after
- the hardware thread number or the place. Optionally, the length can be
- followed by a colon and the stride number -- otherwise a unit stride is
- assumed. For instance, the following specifies the same places list:
- @code{"@{0,1,2@}, @{3,4,6@}, @{7,8,9@}, @{10,11,12@}"};
- @code{"@{0:3@}, @{3:3@}, @{7:3@}, @{10:3@}"}; and @code{"@{0:2@}:4:3"}.
- If @env{OMP_PLACES} and @env{GOMP_CPU_AFFINITY} are unset and
- @env{OMP_PROC_BIND} is either unset or @code{false}, threads may be moved
- between CPUs following no placement policy.
- @item @emph{See also}:
- @ref{OMP_PROC_BIND}, @ref{GOMP_CPU_AFFINITY}, @ref{omp_get_proc_bind},
- @ref{OMP_DISPLAY_ENV}
- @item @emph{Reference}:
- @uref{http://www.openmp.org/, OpenMP specification v4.0}, Section 4.5
- @end table
- @node OMP_STACKSIZE
- @section @env{OMP_STACKSIZE} -- Set default thread stack size
- @cindex Environment Variable
- @table @asis
- @item @emph{Description}:
- Set the default thread stack size in kilobytes, unless the number
- is suffixed by @code{B}, @code{K}, @code{M} or @code{G}, in which
- case the size is, respectively, in bytes, kilobytes, megabytes
- or gigabytes. This is different from @code{pthread_attr_setstacksize}
- which gets the number of bytes as an argument. If the stack size cannot
- be set due to system constraints, an error is reported and the initial
- stack size is left unchanged. If undefined, the stack size is system
- dependent.
- @item @emph{Reference}:
- @uref{http://www.openmp.org/, OpenMP specification v4.0}, Section 4.7
- @end table
- @node OMP_SCHEDULE
- @section @env{OMP_SCHEDULE} -- How threads are scheduled
- @cindex Environment Variable
- @cindex Implementation specific setting
- @table @asis
- @item @emph{Description}:
- Allows to specify @code{schedule type} and @code{chunk size}.
- The value of the variable shall have the form: @code{type[,chunk]} where
- @code{type} is one of @code{static}, @code{dynamic}, @code{guided} or @code{auto}
- The optional @code{chunk} size shall be a positive integer. If undefined,
- dynamic scheduling and a chunk size of 1 is used.
- @item @emph{See also}:
- @ref{omp_set_schedule}
- @item @emph{Reference}:
- @uref{http://www.openmp.org/, OpenMP specification v4.0}, Sections 2.7.1 and 4.1
- @end table
- @node OMP_THREAD_LIMIT
- @section @env{OMP_THREAD_LIMIT} -- Set the maximum number of threads
- @cindex Environment Variable
- @table @asis
- @item @emph{Description}:
- Specifies the number of threads to use for the whole program. The
- value of this variable shall be a positive integer. If undefined,
- the number of threads is not limited.
- @item @emph{See also}:
- @ref{OMP_NUM_THREADS}, @ref{omp_get_thread_limit}
- @item @emph{Reference}:
- @uref{http://www.openmp.org/, OpenMP specification v4.0}, Section 4.10
- @end table
- @node OMP_WAIT_POLICY
- @section @env{OMP_WAIT_POLICY} -- How waiting threads are handled
- @cindex Environment Variable
- @table @asis
- @item @emph{Description}:
- Specifies whether waiting threads should be active or passive. If
- the value is @code{PASSIVE}, waiting threads should not consume CPU
- power while waiting; while the value is @code{ACTIVE} specifies that
- they should. If undefined, threads wait actively for a short time
- before waiting passively.
- @item @emph{See also}:
- @ref{GOMP_SPINCOUNT}
- @item @emph{Reference}:
- @uref{http://www.openmp.org/, OpenMP specification v4.0}, Section 4.8
- @end table
- @node GOMP_CPU_AFFINITY
- @section @env{GOMP_CPU_AFFINITY} -- Bind threads to specific CPUs
- @cindex Environment Variable
- @table @asis
- @item @emph{Description}:
- Binds threads to specific CPUs. The variable should contain a space-separated
- or comma-separated list of CPUs. This list may contain different kinds of
- entries: either single CPU numbers in any order, a range of CPUs (M-N)
- or a range with some stride (M-N:S). CPU numbers are zero based. For example,
- @code{GOMP_CPU_AFFINITY="0 3 1-2 4-15:2"} will bind the initial thread
- to CPU 0, the second to CPU 3, the third to CPU 1, the fourth to
- CPU 2, the fifth to CPU 4, the sixth through tenth to CPUs 6, 8, 10, 12,
- and 14 respectively and then start assigning back from the beginning of
- the list. @code{GOMP_CPU_AFFINITY=0} binds all threads to CPU 0.
- There is no libgomp library routine to determine whether a CPU affinity
- specification is in effect. As a workaround, language-specific library
- functions, e.g., @code{getenv} in C or @code{GET_ENVIRONMENT_VARIABLE} in
- Fortran, may be used to query the setting of the @code{GOMP_CPU_AFFINITY}
- environment variable. A defined CPU affinity on startup cannot be changed
- or disabled during the runtime of the application.
- If both @env{GOMP_CPU_AFFINITY} and @env{OMP_PROC_BIND} are set,
- @env{OMP_PROC_BIND} has a higher precedence. If neither has been set and
- @env{OMP_PROC_BIND} is unset, or when @env{OMP_PROC_BIND} is set to
- @code{FALSE}, the host system will handle the assignment of threads to CPUs.
- @item @emph{See also}:
- @ref{OMP_PLACES}, @ref{OMP_PROC_BIND}
- @end table
- @node GOMP_DEBUG
- @section @env{GOMP_DEBUG} -- Enable debugging output
- @cindex Environment Variable
- @table @asis
- @item @emph{Description}:
- Enable debugging output. The variable should be set to @code{0}
- (disabled, also the default if not set), or @code{1} (enabled).
- If enabled, some debugging output will be printed during execution.
- This is currently not specified in more detail, and subject to change.
- @end table
- @node GOMP_STACKSIZE
- @section @env{GOMP_STACKSIZE} -- Set default thread stack size
- @cindex Environment Variable
- @cindex Implementation specific setting
- @table @asis
- @item @emph{Description}:
- Set the default thread stack size in kilobytes. This is different from
- @code{pthread_attr_setstacksize} which gets the number of bytes as an
- argument. If the stack size cannot be set due to system constraints, an
- error is reported and the initial stack size is left unchanged. If undefined,
- the stack size is system dependent.
- @item @emph{See also}:
- @ref{OMP_STACKSIZE}
- @item @emph{Reference}:
- @uref{http://gcc.gnu.org/ml/gcc-patches/2006-06/msg00493.html,
- GCC Patches Mailinglist},
- @uref{http://gcc.gnu.org/ml/gcc-patches/2006-06/msg00496.html,
- GCC Patches Mailinglist}
- @end table
- @node GOMP_SPINCOUNT
- @section @env{GOMP_SPINCOUNT} -- Set the busy-wait spin count
- @cindex Environment Variable
- @cindex Implementation specific setting
- @table @asis
- @item @emph{Description}:
- Determines how long a threads waits actively with consuming CPU power
- before waiting passively without consuming CPU power. The value may be
- either @code{INFINITE}, @code{INFINITY} to always wait actively or an
- integer which gives the number of spins of the busy-wait loop. The
- integer may optionally be followed by the following suffixes acting
- as multiplication factors: @code{k} (kilo, thousand), @code{M} (mega,
- million), @code{G} (giga, billion), or @code{T} (tera, trillion).
- If undefined, 0 is used when @env{OMP_WAIT_POLICY} is @code{PASSIVE},
- 300,000 is used when @env{OMP_WAIT_POLICY} is undefined and
- 30 billion is used when @env{OMP_WAIT_POLICY} is @code{ACTIVE}.
- If there are more OpenMP threads than available CPUs, 1000 and 100
- spins are used for @env{OMP_WAIT_POLICY} being @code{ACTIVE} or
- undefined, respectively; unless the @env{GOMP_SPINCOUNT} is lower
- or @env{OMP_WAIT_POLICY} is @code{PASSIVE}.
- @item @emph{See also}:
- @ref{OMP_WAIT_POLICY}
- @end table
- @c ---------------------------------------------------------------------
- @c The libgomp ABI
- @c ---------------------------------------------------------------------
- @node The libgomp ABI
- @chapter The libgomp ABI
- The following sections present notes on the external ABI as
- presented by libgomp. Only maintainers should need them.
- @menu
- * Implementing MASTER construct::
- * Implementing CRITICAL construct::
- * Implementing ATOMIC construct::
- * Implementing FLUSH construct::
- * Implementing BARRIER construct::
- * Implementing THREADPRIVATE construct::
- * Implementing PRIVATE clause::
- * Implementing FIRSTPRIVATE LASTPRIVATE COPYIN and COPYPRIVATE clauses::
- * Implementing REDUCTION clause::
- * Implementing PARALLEL construct::
- * Implementing FOR construct::
- * Implementing ORDERED construct::
- * Implementing SECTIONS construct::
- * Implementing SINGLE construct::
- @end menu
- @node Implementing MASTER construct
- @section Implementing MASTER construct
- @smallexample
- if (omp_get_thread_num () == 0)
- block
- @end smallexample
- Alternately, we generate two copies of the parallel subfunction
- and only include this in the version run by the master thread.
- Surely this is not worthwhile though...
- @node Implementing CRITICAL construct
- @section Implementing CRITICAL construct
- Without a specified name,
- @smallexample
- void GOMP_critical_start (void);
- void GOMP_critical_end (void);
- @end smallexample
- so that we don't get COPY relocations from libgomp to the main
- application.
- With a specified name, use omp_set_lock and omp_unset_lock with
- name being transformed into a variable declared like
- @smallexample
- omp_lock_t gomp_critical_user_<name> __attribute__((common))
- @end smallexample
- Ideally the ABI would specify that all zero is a valid unlocked
- state, and so we wouldn't need to initialize this at
- startup.
- @node Implementing ATOMIC construct
- @section Implementing ATOMIC construct
- The target should implement the @code{__sync} builtins.
- Failing that we could add
- @smallexample
- void GOMP_atomic_enter (void)
- void GOMP_atomic_exit (void)
- @end smallexample
- which reuses the regular lock code, but with yet another lock
- object private to the library.
- @node Implementing FLUSH construct
- @section Implementing FLUSH construct
- Expands to the @code{__sync_synchronize} builtin.
- @node Implementing BARRIER construct
- @section Implementing BARRIER construct
- @smallexample
- void GOMP_barrier (void)
- @end smallexample
- @node Implementing THREADPRIVATE construct
- @section Implementing THREADPRIVATE construct
- In _most_ cases we can map this directly to @code{__thread}. Except
- that OMP allows constructors for C++ objects. We can either
- refuse to support this (how often is it used?) or we can
- implement something akin to .ctors.
- Even more ideally, this ctor feature is handled by extensions
- to the main pthreads library. Failing that, we can have a set
- of entry points to register ctor functions to be called.
- @node Implementing PRIVATE clause
- @section Implementing PRIVATE clause
- In association with a PARALLEL, or within the lexical extent
- of a PARALLEL block, the variable becomes a local variable in
- the parallel subfunction.
- In association with FOR or SECTIONS blocks, create a new
- automatic variable within the current function. This preserves
- the semantic of new variable creation.
- @node Implementing FIRSTPRIVATE LASTPRIVATE COPYIN and COPYPRIVATE clauses
- @section Implementing FIRSTPRIVATE LASTPRIVATE COPYIN and COPYPRIVATE clauses
- This seems simple enough for PARALLEL blocks. Create a private
- struct for communicating between the parent and subfunction.
- In the parent, copy in values for scalar and "small" structs;
- copy in addresses for others TREE_ADDRESSABLE types. In the
- subfunction, copy the value into the local variable.
- It is not clear what to do with bare FOR or SECTION blocks.
- The only thing I can figure is that we do something like:
- @smallexample
- #pragma omp for firstprivate(x) lastprivate(y)
- for (int i = 0; i < n; ++i)
- body;
- @end smallexample
- which becomes
- @smallexample
- @{
- int x = x, y;
- // for stuff
- if (i == n)
- y = y;
- @}
- @end smallexample
- where the "x=x" and "y=y" assignments actually have different
- uids for the two variables, i.e. not something you could write
- directly in C. Presumably this only makes sense if the "outer"
- x and y are global variables.
- COPYPRIVATE would work the same way, except the structure
- broadcast would have to happen via SINGLE machinery instead.
- @node Implementing REDUCTION clause
- @section Implementing REDUCTION clause
- The private struct mentioned in the previous section should have
- a pointer to an array of the type of the variable, indexed by the
- thread's @var{team_id}. The thread stores its final value into the
- array, and after the barrier, the master thread iterates over the
- array to collect the values.
- @node Implementing PARALLEL construct
- @section Implementing PARALLEL construct
- @smallexample
- #pragma omp parallel
- @{
- body;
- @}
- @end smallexample
- becomes
- @smallexample
- void subfunction (void *data)
- @{
- use data;
- body;
- @}
- setup data;
- GOMP_parallel_start (subfunction, &data, num_threads);
- subfunction (&data);
- GOMP_parallel_end ();
- @end smallexample
- @smallexample
- void GOMP_parallel_start (void (*fn)(void *), void *data, unsigned num_threads)
- @end smallexample
- The @var{FN} argument is the subfunction to be run in parallel.
- The @var{DATA} argument is a pointer to a structure used to
- communicate data in and out of the subfunction, as discussed
- above with respect to FIRSTPRIVATE et al.
- The @var{NUM_THREADS} argument is 1 if an IF clause is present
- and false, or the value of the NUM_THREADS clause, if
- present, or 0.
- The function needs to create the appropriate number of
- threads and/or launch them from the dock. It needs to
- create the team structure and assign team ids.
- @smallexample
- void GOMP_parallel_end (void)
- @end smallexample
- Tears down the team and returns us to the previous @code{omp_in_parallel()} state.
- @node Implementing FOR construct
- @section Implementing FOR construct
- @smallexample
- #pragma omp parallel for
- for (i = lb; i <= ub; i++)
- body;
- @end smallexample
- becomes
- @smallexample
- void subfunction (void *data)
- @{
- long _s0, _e0;
- while (GOMP_loop_static_next (&_s0, &_e0))
- @{
- long _e1 = _e0, i;
- for (i = _s0; i < _e1; i++)
- body;
- @}
- GOMP_loop_end_nowait ();
- @}
- GOMP_parallel_loop_static (subfunction, NULL, 0, lb, ub+1, 1, 0);
- subfunction (NULL);
- GOMP_parallel_end ();
- @end smallexample
- @smallexample
- #pragma omp for schedule(runtime)
- for (i = 0; i < n; i++)
- body;
- @end smallexample
- becomes
- @smallexample
- @{
- long i, _s0, _e0;
- if (GOMP_loop_runtime_start (0, n, 1, &_s0, &_e0))
- do @{
- long _e1 = _e0;
- for (i = _s0, i < _e0; i++)
- body;
- @} while (GOMP_loop_runtime_next (&_s0, _&e0));
- GOMP_loop_end ();
- @}
- @end smallexample
- Note that while it looks like there is trickiness to propagating
- a non-constant STEP, there isn't really. We're explicitly allowed
- to evaluate it as many times as we want, and any variables involved
- should automatically be handled as PRIVATE or SHARED like any other
- variables. So the expression should remain evaluable in the
- subfunction. We can also pull it into a local variable if we like,
- but since its supposed to remain unchanged, we can also not if we like.
- If we have SCHEDULE(STATIC), and no ORDERED, then we ought to be
- able to get away with no work-sharing context at all, since we can
- simply perform the arithmetic directly in each thread to divide up
- the iterations. Which would mean that we wouldn't need to call any
- of these routines.
- There are separate routines for handling loops with an ORDERED
- clause. Bookkeeping for that is non-trivial...
- @node Implementing ORDERED construct
- @section Implementing ORDERED construct
- @smallexample
- void GOMP_ordered_start (void)
- void GOMP_ordered_end (void)
- @end smallexample
- @node Implementing SECTIONS construct
- @section Implementing SECTIONS construct
- A block as
- @smallexample
- #pragma omp sections
- @{
- #pragma omp section
- stmt1;
- #pragma omp section
- stmt2;
- #pragma omp section
- stmt3;
- @}
- @end smallexample
- becomes
- @smallexample
- for (i = GOMP_sections_start (3); i != 0; i = GOMP_sections_next ())
- switch (i)
- @{
- case 1:
- stmt1;
- break;
- case 2:
- stmt2;
- break;
- case 3:
- stmt3;
- break;
- @}
- GOMP_barrier ();
- @end smallexample
- @node Implementing SINGLE construct
- @section Implementing SINGLE construct
- A block like
- @smallexample
- #pragma omp single
- @{
- body;
- @}
- @end smallexample
- becomes
- @smallexample
- if (GOMP_single_start ())
- body;
- GOMP_barrier ();
- @end smallexample
- while
- @smallexample
- #pragma omp single copyprivate(x)
- body;
- @end smallexample
- becomes
- @smallexample
- datap = GOMP_single_copy_start ();
- if (datap == NULL)
- @{
- body;
- data.x = x;
- GOMP_single_copy_end (&data);
- @}
- else
- x = datap->x;
- GOMP_barrier ();
- @end smallexample
- @c ---------------------------------------------------------------------
- @c Reporting Bugs
- @c ---------------------------------------------------------------------
- @node Reporting Bugs
- @chapter Reporting Bugs
- Bugs in the GNU Offloading and Multi Processing Runtime Library should
- be reported via @uref{http://gcc.gnu.org/bugzilla/, Bugzilla}. Please add
- "openacc", or "openmp", or both to the keywords field in the bug
- report, as appropriate.
- @c ---------------------------------------------------------------------
- @c GNU General Public License
- @c ---------------------------------------------------------------------
- @include gpl_v3.texi
- @c ---------------------------------------------------------------------
- @c GNU Free Documentation License
- @c ---------------------------------------------------------------------
- @include fdl.texi
- @c ---------------------------------------------------------------------
- @c Funding Free Software
- @c ---------------------------------------------------------------------
- @include funding.texi
- @c ---------------------------------------------------------------------
- @c Index
- @c ---------------------------------------------------------------------
- @node Library Index
- @unnumbered Library Index
- @printindex cp
- @bye
|